summaryrefslogtreecommitdiff
path: root/include/linux
diff options
context:
space:
mode:
Diffstat (limited to 'include/linux')
-rw-r--r--include/linux/acpi.h70
-rw-r--r--include/linux/aer.h4
-rw-r--r--include/linux/arm-smccc.h2
-rw-r--r--include/linux/arm_ffa.h79
-rw-r--r--include/linux/array_size.h13
-rw-r--r--include/linux/atomic/atomic-arch-fallback.h23
-rw-r--r--include/linux/atomic/atomic-instrumented.h10
-rw-r--r--include/linux/avf/virtchnl.h15
-rw-r--r--include/linux/badblocks.h30
-rw-r--r--include/linux/binfmts.h10
-rw-r--r--include/linux/bitmap-str.h16
-rw-r--r--include/linux/bitmap.h87
-rw-r--r--include/linux/blk-mq.h2
-rw-r--r--include/linux/blkdev.h11
-rw-r--r--include/linux/bootmem_info.h2
-rw-r--r--include/linux/bpf-cgroup-defs.h5
-rw-r--r--include/linux/bpf-cgroup.h99
-rw-r--r--include/linux/bpf.h66
-rw-r--r--include/linux/bpf_mem_alloc.h1
-rw-r--r--include/linux/bpf_verifier.h44
-rw-r--r--include/linux/brcmphy.h10
-rw-r--r--include/linux/btf.h1
-rw-r--r--include/linux/btf_ids.h2
-rw-r--r--include/linux/buffer_head.h86
-rw-r--r--include/linux/buildid.h3
-rw-r--r--include/linux/cacheinfo.h1
-rw-r--r--include/linux/can/dev.h4
-rw-r--r--include/linux/cdx/cdx_bus.h59
-rw-r--r--include/linux/ceph/ceph_fs.h24
-rw-r--r--include/linux/ceph/mon_client.h2
-rw-r--r--include/linux/ceph/osd_client.h2
-rw-r--r--include/linux/cgroup-defs.h7
-rw-r--r--include/linux/cgroup.h12
-rw-r--r--include/linux/cleanup.h39
-rw-r--r--include/linux/clk-provider.h15
-rw-r--r--include/linux/closure.h404
-rw-r--r--include/linux/comedi/comedi_8254.h51
-rw-r--r--include/linux/comedi/comedi_8255.h24
-rw-r--r--include/linux/comedi/comedidev.h2
-rw-r--r--include/linux/compat.h1
-rw-r--r--include/linux/compiler-clang.h5
-rw-r--r--include/linux/compiler-gcc.h2
-rw-r--r--include/linux/compiler.h13
-rw-r--r--include/linux/compiler_types.h32
-rw-r--r--include/linux/console.h138
-rw-r--r--include/linux/const.h8
-rw-r--r--include/linux/cpu.h4
-rw-r--r--include/linux/cpufreq.h8
-rw-r--r--include/linux/cpuhotplug.h4
-rw-r--r--include/linux/cpumask.h113
-rw-r--r--include/linux/crash_core.h51
-rw-r--r--include/linux/crash_dump.h8
-rw-r--r--include/linux/cred.h66
-rw-r--r--include/linux/crypto.h30
-rw-r--r--include/linux/damon.h74
-rw-r--r--include/linux/dax.h10
-rw-r--r--include/linux/dcache.h1
-rw-r--r--include/linux/device-mapper.h1
-rw-r--r--include/linux/device.h8
-rw-r--r--include/linux/dma-fence.h19
-rw-r--r--include/linux/dmaengine.h2
-rw-r--r--include/linux/dpll.h170
-rw-r--r--include/linux/dsa/sja1105.h2
-rw-r--r--include/linux/efi.h7
-rw-r--r--include/linux/ethtool.h19
-rw-r--r--include/linux/export-internal.h2
-rw-r--r--include/linux/exportfs.h20
-rw-r--r--include/linux/f2fs_fs.h70
-rw-r--r--include/linux/fb.h5
-rw-r--r--include/linux/fdtable.h17
-rw-r--r--include/linux/filter.h67
-rw-r--r--include/linux/firmware/meson/meson_sm.h2
-rw-r--r--include/linux/firmware/qcom/qcom_qseecom.h52
-rw-r--r--include/linux/firmware/qcom/qcom_scm.h123
-rw-r--r--include/linux/firmware/xlnx-zynqmp.h12
-rw-r--r--include/linux/fortify-string.h6
-rw-r--r--include/linux/freelist.h129
-rw-r--r--include/linux/fs.h181
-rw-r--r--include/linux/fs_context.h2
-rw-r--r--include/linux/fs_stack.h6
-rw-r--r--include/linux/fscrypt.h82
-rw-r--r--include/linux/fsnotify.h3
-rw-r--r--include/linux/fw_table.h43
-rw-r--r--include/linux/fwnode.h2
-rw-r--r--include/linux/generic-radix-tree.h68
-rw-r--r--include/linux/gfp.h12
-rw-r--r--include/linux/gpio/consumer.h16
-rw-r--r--include/linux/gpio/driver.h47
-rw-r--r--include/linux/habanalabs/cpucp_if.h1417
-rw-r--r--include/linux/habanalabs/hl_boot_if.h792
-rw-r--r--include/linux/hid-sensor-ids.h4
-rw-r--r--include/linux/hid.h4
-rw-r--r--include/linux/highmem.h18
-rw-r--r--include/linux/hisi_acc_qm.h39
-rw-r--r--include/linux/hugetlb.h72
-rw-r--r--include/linux/hugetlb_cgroup.h11
-rw-r--r--include/linux/hw_random.h1
-rw-r--r--include/linux/i2c.h11
-rw-r--r--include/linux/i3c/master.h11
-rw-r--r--include/linux/ieee80211.h135
-rw-r--r--include/linux/if_team.h2
-rw-r--r--include/linux/igmp.h2
-rw-r--r--include/linux/iio/iio.h12
-rw-r--r--include/linux/iio/sw_device.h3
-rw-r--r--include/linux/iio/sw_trigger.h3
-rw-r--r--include/linux/init.h3
-rw-r--r--include/linux/instruction_pointer.h5
-rw-r--r--include/linux/intel_tpmi.h6
-rw-r--r--include/linux/interrupt.h8
-rw-r--r--include/linux/io-pgtable.h4
-rw-r--r--include/linux/io_uring.h19
-rw-r--r--include/linux/io_uring_types.h15
-rw-r--r--include/linux/iommu.h146
-rw-r--r--include/linux/ioport.h12
-rw-r--r--include/linux/iosys-map.h4
-rw-r--r--include/linux/iov_iter.h274
-rw-r--r--include/linux/iova_bitmap.h26
-rw-r--r--include/linux/ipv6.h50
-rw-r--r--include/linux/irq.h26
-rw-r--r--include/linux/irqdomain.h2
-rw-r--r--include/linux/iversion.h2
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/kasan.h10
-rw-r--r--include/linux/kernel.h7
-rw-r--r--include/linux/kernfs.h1
-rw-r--r--include/linux/kexec.h4
-rw-r--r--include/linux/key.h1
-rw-r--r--include/linux/kprobes.h11
-rw-r--r--include/linux/kstrtox.h5
-rw-r--r--include/linux/kthread.h1
-rw-r--r--include/linux/kvm_host.h2
-rw-r--r--include/linux/libata.h39
-rw-r--r--include/linux/linkmode.h18
-rw-r--r--include/linux/list.h8
-rw-r--r--include/linux/llist.h46
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/logic_pio.h3
-rw-r--r--include/linux/lsm_hook_defs.h10
-rw-r--r--include/linux/lwq.h124
-rw-r--r--include/linux/maple_tree.h11
-rw-r--r--include/linux/mcb.h1
-rw-r--r--include/linux/mei_cl_bus.h12
-rw-r--r--include/linux/memblock.h9
-rw-r--r--include/linux/memcontrol.h97
-rw-r--r--include/linux/memory-tiers.h41
-rw-r--r--include/linux/mempolicy.h58
-rw-r--r--include/linux/mfd/abx500/ab8500.h6
-rw-r--r--include/linux/mfd/core.h2
-rw-r--r--include/linux/mfd/lpc_ich.h7
-rw-r--r--include/linux/mfd/mt6358/registers.h23
-rw-r--r--include/linux/mfd/stm32-timers.h11
-rw-r--r--include/linux/micrel_phy.h4
-rw-r--r--include/linux/migrate.h4
-rw-r--r--include/linux/minmax.h129
-rw-r--r--include/linux/mlx5/device.h3
-rw-r--r--include/linux/mlx5/driver.h20
-rw-r--r--include/linux/mlx5/fs.h1
-rw-r--r--include/linux/mlx5/mlx5_ifc.h131
-rw-r--r--include/linux/mlx5/port.h3
-rw-r--r--include/linux/mm.h185
-rw-r--r--include/linux/mm_inline.h20
-rw-r--r--include/linux/mm_types.h93
-rw-r--r--include/linux/mmc/host.h1
-rw-r--r--include/linux/mmu_notifier.h9
-rw-r--r--include/linux/mmzone.h29
-rw-r--r--include/linux/mod_devicetable.h14
-rw-r--r--include/linux/module_symbol.h2
-rw-r--r--include/linux/moduleparam.h8
-rw-r--r--include/linux/mount.h4
-rw-r--r--include/linux/msi.h6
-rw-r--r--include/linux/mtd/cfi.h2
-rw-r--r--include/linux/mtd/jedec.h3
-rw-r--r--include/linux/mtd/onfi.h1
-rw-r--r--include/linux/mtd/qinfo.h2
-rw-r--r--include/linux/mtd/rawnand.h2
-rw-r--r--include/linux/mtd/spinand.h1
-rw-r--r--include/linux/namei.h26
-rw-r--r--include/linux/netdevice.h100
-rw-r--r--include/linux/netfilter.h10
-rw-r--r--include/linux/netfilter/nf_conntrack_sctp.h1
-rw-r--r--include/linux/nfs4.h262
-rw-r--r--include/linux/nfs_fs_sb.h1
-rw-r--r--include/linux/nfs_page.h4
-rw-r--r--include/linux/numa.h14
-rw-r--r--include/linux/nvme-auth.h7
-rw-r--r--include/linux/nvme-fc-driver.h6
-rw-r--r--include/linux/nvme-keyring.h36
-rw-r--r--include/linux/nvme-tcp.h6
-rw-r--r--include/linux/nvme.h10
-rw-r--r--include/linux/nvmem-consumer.h6
-rw-r--r--include/linux/nvmem-provider.h4
-rw-r--r--include/linux/objpool.h181
-rw-r--r--include/linux/objtool.h13
-rw-r--r--include/linux/oid_registry.h23
-rw-r--r--include/linux/overflow.h35
-rw-r--r--include/linux/page-flags.h19
-rw-r--r--include/linux/pagemap.h33
-rw-r--r--include/linux/parport.h2
-rw-r--r--include/linux/pci.h63
-rw-r--r--include/linux/pci_ids.h4
-rw-r--r--include/linux/pds/pds_core_if.h1
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/percpu_counter.h30
-rw-r--r--include/linux/perf/arm_pmuv3.h9
-rw-r--r--include/linux/perf_event.h4
-rw-r--r--include/linux/pgtable.h10
-rw-r--r--include/linux/phy.h1
-rw-r--r--include/linux/phylink.h56
-rw-r--r--include/linux/pinctrl/machine.h2
-rw-r--r--include/linux/pipe_fs_i.h22
-rw-r--r--include/linux/pktcdvd.h4
-rw-r--r--include/linux/platform_data/cros_ec_proto.h2
-rw-r--r--include/linux/platform_data/gpio-omap.h3
-rw-r--r--include/linux/platform_data/gsc_hwmon.h2
-rw-r--r--include/linux/platform_data/omap-twl4030.h3
-rw-r--r--include/linux/platform_data/pca953x.h13
-rw-r--r--include/linux/platform_data/x86/asus-wmi.h4
-rw-r--r--include/linux/pm.h45
-rw-r--r--include/linux/pm_domain.h11
-rw-r--r--include/linux/pm_opp.h40
-rw-r--r--include/linux/pnp.h8
-rw-r--r--include/linux/posix-clock.h35
-rw-r--r--include/linux/power_supply.h1
-rw-r--r--include/linux/preempt.h15
-rw-r--r--include/linux/property.h3
-rw-r--r--include/linux/pseudo_fs.h2
-rw-r--r--include/linux/quota.h4
-rw-r--r--include/linux/quotaops.h2
-rw-r--r--include/linux/raid/pq.h2
-rw-r--r--include/linux/rcu_notifier.h32
-rw-r--r--include/linux/rcupdate.h2
-rw-r--r--include/linux/rcutiny.h2
-rw-r--r--include/linux/rcutree.h17
-rw-r--r--include/linux/reboot.h3
-rw-r--r--include/linux/regulator/mt6358-regulator.h3
-rw-r--r--include/linux/resctrl.h4
-rw-r--r--include/linux/resume_user_mode.h2
-rw-r--r--include/linux/rethook.h16
-rw-r--r--include/linux/rmap.h4
-rw-r--r--include/linux/rtc.h17
-rw-r--r--include/linux/sched.h16
-rw-r--r--include/linux/sched/coredump.h19
-rw-r--r--include/linux/sched/deadline.h4
-rw-r--r--include/linux/sched/mm.h4
-rw-r--r--include/linux/sched/numa_balancing.h16
-rw-r--r--include/linux/sched/rt.h4
-rw-r--r--include/linux/sched/sd_flags.h7
-rw-r--r--include/linux/sched/signal.h39
-rw-r--r--include/linux/sched/smt.h2
-rw-r--r--include/linux/sched/topology.h10
-rw-r--r--include/linux/sched/types.h2
-rw-r--r--include/linux/sched/vhost_task.h7
-rw-r--r--include/linux/scmi_protocol.h43
-rw-r--r--include/linux/security.h20
-rw-r--r--include/linux/sed-opal-key.h26
-rw-r--r--include/linux/seq_buf.h28
-rw-r--r--include/linux/seq_file.h15
-rw-r--r--include/linux/seqlock.h54
-rw-r--r--include/linux/serial_core.h91
-rw-r--r--include/linux/shmem_fs.h16
-rw-r--r--include/linux/shrinker.h87
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h7
-rw-r--r--include/linux/smp.h2
-rw-r--r--include/linux/soc/mediatek/infracfg.h41
-rw-r--r--include/linux/soc/mediatek/mtk_wed.h76
-rw-r--r--include/linux/soc/qcom/llcc-qcom.h2
-rw-r--r--include/linux/socket.h1
-rw-r--r--include/linux/sockptr.h23
-rw-r--r--include/linux/spi/spi.h28
-rw-r--r--include/linux/spmi.h2
-rw-r--r--include/linux/stmmac.h2
-rw-r--r--include/linux/string.h48
-rw-r--r--include/linux/string_helpers.h4
-rw-r--r--include/linux/stringify.h2
-rw-r--r--include/linux/sunrpc/svc.h45
-rw-r--r--include/linux/sunrpc/svc_xprt.h2
-rw-r--r--include/linux/sunrpc/xdr.h4
-rw-r--r--include/linux/sunrpc/xprt.h3
-rw-r--r--include/linux/superhyway.h107
-rw-r--r--include/linux/surface_aggregator/controller.h2
-rw-r--r--include/linux/swap.h1
-rw-r--r--include/linux/swiotlb.h23
-rw-r--r--include/linux/syscalls.h11
-rw-r--r--include/linux/sysctl.h6
-rw-r--r--include/linux/sysfs.h2
-rw-r--r--include/linux/tcp.h61
-rw-r--r--include/linux/thermal.h24
-rw-r--r--include/linux/thunderbolt.h2
-rw-r--r--include/linux/tick.h10
-rw-r--r--include/linux/timer.h3
-rw-r--r--include/linux/topology.h2
-rw-r--r--include/linux/torture.h8
-rw-r--r--include/linux/trace_events.h19
-rw-r--r--include/linux/trace_seq.h2
-rw-r--r--include/linux/tracefs.h73
-rw-r--r--include/linux/tsm.h69
-rw-r--r--include/linux/tty.h25
-rw-r--r--include/linux/ucs2_string.h1
-rw-r--r--include/linux/udp.h66
-rw-r--r--include/linux/uio.h34
-rw-r--r--include/linux/units.h4
-rw-r--r--include/linux/usb.h16
-rw-r--r--include/linux/usb/chipidea.h1
-rw-r--r--include/linux/usb/composite.h8
-rw-r--r--include/linux/usb/gadget.h9
-rw-r--r--include/linux/usb/hcd.h17
-rw-r--r--include/linux/usb/ljca.h145
-rw-r--r--include/linux/usb/pd.h1
-rw-r--r--include/linux/usb/pd_vdo.h1
-rw-r--r--include/linux/usb/renesas_usbhs.h10
-rw-r--r--include/linux/usb/typec.h37
-rw-r--r--include/linux/usb/typec_dp.h28
-rw-r--r--include/linux/usb/typec_tbt.h1
-rw-r--r--include/linux/user_namespace.h8
-rw-r--r--include/linux/userfaultfd_k.h28
-rw-r--r--include/linux/verification.h1
-rw-r--r--include/linux/virtio_net.h19
-rw-r--r--include/linux/virtio_vsock.h10
-rw-r--r--include/linux/wait.h9
-rw-r--r--include/linux/watch_queue.h2
-rw-r--r--include/linux/workqueue.h46
322 files changed, 8541 insertions, 2011 deletions
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index a73246c3c35e..d37642d85225 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -15,6 +15,7 @@
#include <linux/mod_devicetable.h>
#include <linux/property.h>
#include <linux/uuid.h>
+#include <linux/fw_table.h>
struct irq_domain;
struct irq_domain_ops;
@@ -24,6 +25,16 @@ struct irq_domain_ops;
#endif
#include <acpi/acpi.h>
+#ifdef CONFIG_ACPI_TABLE_LIB
+#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
+#define __init_or_acpilib
+#define __initdata_or_acpilib
+#else
+#define EXPORT_SYMBOL_ACPI_LIB(x)
+#define __init_or_acpilib __init
+#define __initdata_or_acpilib __initdata
+#endif
+
#ifdef CONFIG_ACPI
#include <linux/list.h>
@@ -119,21 +130,8 @@ enum acpi_address_range_id {
/* Table Handlers */
-union acpi_subtable_headers {
- struct acpi_subtable_header common;
- struct acpi_hmat_structure hmat;
- struct acpi_prmt_module_header prmt;
- struct acpi_cedt_header cedt;
-};
-
typedef int (*acpi_tbl_table_handler)(struct acpi_table_header *table);
-typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
- const unsigned long end);
-
-typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
- void *arg, const unsigned long end);
-
/* Debugger support */
struct acpi_debugger_ops {
@@ -207,14 +205,6 @@ static inline int acpi_debugger_notify_command_complete(void)
(!entry) || (unsigned long)entry + sizeof(*entry) > end || \
((struct acpi_subtable_header *)entry)->length < sizeof(*entry))
-struct acpi_subtable_proc {
- int id;
- acpi_tbl_entry_handler handler;
- acpi_tbl_entry_handler_arg handler_arg;
- void *arg;
- int count;
-};
-
void __iomem *__acpi_map_table(unsigned long phys, unsigned long size);
void __acpi_unmap_table(void __iomem *map, unsigned long size);
int early_acpi_boot_init(void);
@@ -229,16 +219,6 @@ void acpi_reserve_initial_tables (void);
void acpi_table_init_complete (void);
int acpi_table_init (void);
-#ifdef CONFIG_ACPI_TABLE_LIB
-#define EXPORT_SYMBOL_ACPI_LIB(x) EXPORT_SYMBOL_NS_GPL(x, ACPI)
-#define __init_or_acpilib
-#define __initdata_or_acpilib
-#else
-#define EXPORT_SYMBOL_ACPI_LIB(x)
-#define __init_or_acpilib __init
-#define __initdata_or_acpilib __initdata
-#endif
-
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
int __init_or_acpilib acpi_table_parse_entries(char *id,
unsigned long table_size, int entry_id,
@@ -256,10 +236,15 @@ acpi_table_parse_cedt(enum acpi_cedt_type id,
int acpi_parse_mcfg (struct acpi_table_header *header);
void acpi_table_print_madt_entry (struct acpi_subtable_header *madt);
+static inline bool acpi_gicc_is_usable(struct acpi_madt_generic_interrupt *gicc)
+{
+ return gicc->flags & ACPI_MADT_ENABLED;
+}
+
/* the following numa functions are architecture-dependent */
void acpi_numa_slit_init (struct acpi_table_slit *slit);
-#if defined(CONFIG_X86) || defined(CONFIG_IA64) || defined(CONFIG_LOONGARCH)
+#if defined(CONFIG_X86) || defined(CONFIG_LOONGARCH)
void acpi_numa_processor_affinity_init (struct acpi_srat_cpu_affinity *pa);
#else
static inline void
@@ -787,6 +772,11 @@ static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
struct acpi_device;
+static inline bool acpi_dev_uid_match(struct acpi_device *adev, const char *uid2)
+{
+ return false;
+}
+
static inline bool
acpi_dev_hid_uid_match(struct acpi_device *adev, const char *hid2, const char *uid2)
{
@@ -1114,15 +1104,8 @@ static inline int acpi_get_lps0_constraint(struct device *dev)
return ACPI_STATE_UNKNOWN;
}
#endif /* CONFIG_SUSPEND && CONFIG_X86 */
-#ifndef CONFIG_IA64
void arch_reserve_mem_area(acpi_physical_address addr, size_t size);
#else
-static inline void arch_reserve_mem_area(acpi_physical_address addr,
- size_t size)
-{
-}
-#endif /* CONFIG_X86 */
-#else
#define acpi_os_set_prepare_sleep(func, pm1a_ctrl, pm1b_ctrl) do { } while (0)
#endif
@@ -1480,6 +1463,15 @@ static inline int lpit_read_residency_count_address(u64 *address)
}
#endif
+#ifdef CONFIG_ACPI_PROCESSOR_IDLE
+#ifndef arch_get_idle_state_flags
+static inline unsigned int arch_get_idle_state_flags(u32 arch_flags)
+{
+ return 0;
+}
+#endif
+#endif /* CONFIG_ACPI_PROCESSOR_IDLE */
+
#ifdef CONFIG_ACPI_PPTT
int acpi_pptt_cpu_is_thread(unsigned int cpu);
int find_acpi_cpu_topology(unsigned int cpu, int level);
diff --git a/include/linux/aer.h b/include/linux/aer.h
index 2dd175f5debd..f6ea2f57d808 100644
--- a/include/linux/aer.h
+++ b/include/linux/aer.h
@@ -42,14 +42,16 @@ struct aer_capability_regs {
#if defined(CONFIG_PCIEAER)
int pci_aer_clear_nonfatal_status(struct pci_dev *dev);
+int pcie_aer_is_native(struct pci_dev *dev);
#else
static inline int pci_aer_clear_nonfatal_status(struct pci_dev *dev)
{
return -EINVAL;
}
+static inline int pcie_aer_is_native(struct pci_dev *dev) { return 0; }
#endif
-void cper_print_aer(struct pci_dev *dev, int aer_severity,
+void pci_print_aer(struct pci_dev *dev, int aer_severity,
struct aer_capability_regs *aer);
int cper_severity_to_aer(int cper_severity);
void aer_recover_queue(int domain, unsigned int bus, unsigned int devfn,
diff --git a/include/linux/arm-smccc.h b/include/linux/arm-smccc.h
index 7c67c17321d4..083f85653716 100644
--- a/include/linux/arm-smccc.h
+++ b/include/linux/arm-smccc.h
@@ -67,6 +67,8 @@
#define ARM_SMCCC_VERSION_1_3 0x10003
#define ARM_SMCCC_1_3_SVE_HINT 0x10000
+#define ARM_SMCCC_CALL_HINTS ARM_SMCCC_1_3_SVE_HINT
+
#define ARM_SMCCC_VERSION_FUNC_ID \
ARM_SMCCC_CALL_VAL(ARM_SMCCC_FAST_CALL, \
diff --git a/include/linux/arm_ffa.h b/include/linux/arm_ffa.h
index cc060da51bec..1abedb5b2e48 100644
--- a/include/linux/arm_ffa.h
+++ b/include/linux/arm_ffa.h
@@ -6,6 +6,7 @@
#ifndef _LINUX_ARM_FFA_H
#define _LINUX_ARM_FFA_H
+#include <linux/bitfield.h>
#include <linux/device.h>
#include <linux/module.h>
#include <linux/types.h>
@@ -20,6 +21,7 @@
#define FFA_ERROR FFA_SMC_32(0x60)
#define FFA_SUCCESS FFA_SMC_32(0x61)
+#define FFA_FN64_SUCCESS FFA_SMC_64(0x61)
#define FFA_INTERRUPT FFA_SMC_32(0x62)
#define FFA_VERSION FFA_SMC_32(0x63)
#define FFA_FEATURES FFA_SMC_32(0x64)
@@ -54,6 +56,23 @@
#define FFA_MEM_FRAG_RX FFA_SMC_32(0x7A)
#define FFA_MEM_FRAG_TX FFA_SMC_32(0x7B)
#define FFA_NORMAL_WORLD_RESUME FFA_SMC_32(0x7C)
+#define FFA_NOTIFICATION_BITMAP_CREATE FFA_SMC_32(0x7D)
+#define FFA_NOTIFICATION_BITMAP_DESTROY FFA_SMC_32(0x7E)
+#define FFA_NOTIFICATION_BIND FFA_SMC_32(0x7F)
+#define FFA_NOTIFICATION_UNBIND FFA_SMC_32(0x80)
+#define FFA_NOTIFICATION_SET FFA_SMC_32(0x81)
+#define FFA_NOTIFICATION_GET FFA_SMC_32(0x82)
+#define FFA_NOTIFICATION_INFO_GET FFA_SMC_32(0x83)
+#define FFA_FN64_NOTIFICATION_INFO_GET FFA_SMC_64(0x83)
+#define FFA_RX_ACQUIRE FFA_SMC_32(0x84)
+#define FFA_SPM_ID_GET FFA_SMC_32(0x85)
+#define FFA_MSG_SEND2 FFA_SMC_32(0x86)
+#define FFA_SECONDARY_EP_REGISTER FFA_SMC_32(0x87)
+#define FFA_FN64_SECONDARY_EP_REGISTER FFA_SMC_64(0x87)
+#define FFA_MEM_PERM_GET FFA_SMC_32(0x88)
+#define FFA_FN64_MEM_PERM_GET FFA_SMC_64(0x88)
+#define FFA_MEM_PERM_SET FFA_SMC_32(0x89)
+#define FFA_FN64_MEM_PERM_SET FFA_SMC_64(0x89)
/*
* For some calls it is necessary to use SMC64 to pass or return 64-bit values.
@@ -76,6 +95,7 @@
#define FFA_RET_DENIED (-6)
#define FFA_RET_RETRY (-7)
#define FFA_RET_ABORTED (-8)
+#define FFA_RET_NO_DATA (-9)
/* FFA version encoding */
#define FFA_MAJOR_VERSION_MASK GENMASK(30, 16)
@@ -86,6 +106,7 @@
(FIELD_PREP(FFA_MAJOR_VERSION_MASK, (major)) | \
FIELD_PREP(FFA_MINOR_VERSION_MASK, (minor)))
#define FFA_VERSION_1_0 FFA_PACK_VERSION_INFO(1, 0)
+#define FFA_VERSION_1_1 FFA_PACK_VERSION_INFO(1, 1)
/**
* FF-A specification mentions explicitly about '4K pages'. This should
@@ -278,8 +299,8 @@ struct ffa_mem_region {
#define FFA_MEM_NON_SHAREABLE (0)
#define FFA_MEM_OUTER_SHAREABLE (2)
#define FFA_MEM_INNER_SHAREABLE (3)
- u8 attributes;
- u8 reserved_0;
+ /* Memory region attributes, upper byte MBZ pre v1.1 */
+ u16 attributes;
/*
* Clear memory region contents after unmapping it from the sender and
* before mapping it for any receiver.
@@ -317,27 +338,41 @@ struct ffa_mem_region {
* memory region.
*/
u64 tag;
- u32 reserved_1;
+ /* Size of each endpoint memory access descriptor, MBZ pre v1.1 */
+ u32 ep_mem_size;
/*
* The number of `ffa_mem_region_attributes` entries included in this
* transaction.
*/
u32 ep_count;
/*
- * An array of endpoint memory access descriptors.
- * Each one specifies a memory region offset, an endpoint and the
- * attributes with which this memory region should be mapped in that
- * endpoint's page table.
+ * 16-byte aligned offset from the base address of this descriptor
+ * to the first element of the endpoint memory access descriptor array
+ * Valid only from v1.1
*/
- struct ffa_mem_region_attributes ep_mem_access[];
+ u32 ep_mem_offset;
+ /* MBZ, valid only from v1.1 */
+ u32 reserved[3];
};
-#define COMPOSITE_OFFSET(x) \
- (offsetof(struct ffa_mem_region, ep_mem_access[x]))
#define CONSTITUENTS_OFFSET(x) \
(offsetof(struct ffa_composite_mem_region, constituents[x]))
-#define COMPOSITE_CONSTITUENTS_OFFSET(x, y) \
- (COMPOSITE_OFFSET(x) + CONSTITUENTS_OFFSET(y))
+
+static inline u32
+ffa_mem_desc_offset(struct ffa_mem_region *buf, int count, u32 ffa_version)
+{
+ u32 offset = count * sizeof(struct ffa_mem_region_attributes);
+ /*
+ * Earlier to v1.1, the endpoint memory descriptor array started at
+ * offset 32(i.e. offset of ep_mem_offset in the current structure)
+ */
+ if (ffa_version <= FFA_VERSION_1_0)
+ offset += offsetof(struct ffa_mem_region, ep_mem_offset);
+ else
+ offset += sizeof(struct ffa_mem_region);
+
+ return offset;
+}
struct ffa_mem_ops_args {
bool use_txbuf;
@@ -367,10 +402,30 @@ struct ffa_mem_ops {
int (*memory_lend)(struct ffa_mem_ops_args *args);
};
+struct ffa_cpu_ops {
+ int (*run)(struct ffa_device *dev, u16 vcpu);
+};
+
+typedef void (*ffa_sched_recv_cb)(u16 vcpu, bool is_per_vcpu, void *cb_data);
+typedef void (*ffa_notifier_cb)(int notify_id, void *cb_data);
+
+struct ffa_notifier_ops {
+ int (*sched_recv_cb_register)(struct ffa_device *dev,
+ ffa_sched_recv_cb cb, void *cb_data);
+ int (*sched_recv_cb_unregister)(struct ffa_device *dev);
+ int (*notify_request)(struct ffa_device *dev, bool per_vcpu,
+ ffa_notifier_cb cb, void *cb_data, int notify_id);
+ int (*notify_relinquish)(struct ffa_device *dev, int notify_id);
+ int (*notify_send)(struct ffa_device *dev, int notify_id, bool per_vcpu,
+ u16 vcpu);
+};
+
struct ffa_ops {
const struct ffa_info_ops *info_ops;
const struct ffa_msg_ops *msg_ops;
const struct ffa_mem_ops *mem_ops;
+ const struct ffa_cpu_ops *cpu_ops;
+ const struct ffa_notifier_ops *notifier_ops;
};
#endif /* _LINUX_ARM_FFA_H */
diff --git a/include/linux/array_size.h b/include/linux/array_size.h
new file mode 100644
index 000000000000..06d7d83196ca
--- /dev/null
+++ b/include/linux/array_size.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_ARRAY_SIZE_H
+#define _LINUX_ARRAY_SIZE_H
+
+#include <linux/compiler.h>
+
+/**
+ * ARRAY_SIZE - get the number of elements in array @arr
+ * @arr: array to be sized
+ */
+#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
+
+#endif /* _LINUX_ARRAY_SIZE_H */
diff --git a/include/linux/atomic/atomic-arch-fallback.h b/include/linux/atomic/atomic-arch-fallback.h
index 18f5744dfb5d..5e95faa959c4 100644
--- a/include/linux/atomic/atomic-arch-fallback.h
+++ b/include/linux/atomic/atomic-arch-fallback.h
@@ -428,6 +428,19 @@ extern void raw_cmpxchg128_relaxed_not_implemented(void);
#define raw_sync_cmpxchg arch_sync_cmpxchg
+#ifdef arch_sync_try_cmpxchg
+#define raw_sync_try_cmpxchg arch_sync_try_cmpxchg
+#else
+#define raw_sync_try_cmpxchg(_ptr, _oldp, _new) \
+({ \
+ typeof(*(_ptr)) *___op = (_oldp), ___o = *___op, ___r; \
+ ___r = raw_sync_cmpxchg((_ptr), ___o, (_new)); \
+ if (unlikely(___r != ___o)) \
+ *___op = ___r; \
+ likely(___r == ___o); \
+})
+#endif
+
/**
* raw_atomic_read() - atomic load with relaxed ordering
* @v: pointer to atomic_t
@@ -459,8 +472,6 @@ raw_atomic_read_acquire(const atomic_t *v)
{
#if defined(arch_atomic_read_acquire)
return arch_atomic_read_acquire(v);
-#elif defined(arch_atomic_read)
- return arch_atomic_read(v);
#else
int ret;
@@ -508,8 +519,6 @@ raw_atomic_set_release(atomic_t *v, int i)
{
#if defined(arch_atomic_set_release)
arch_atomic_set_release(v, i);
-#elif defined(arch_atomic_set)
- arch_atomic_set(v, i);
#else
if (__native_word(atomic_t)) {
smp_store_release(&(v)->counter, i);
@@ -2575,8 +2584,6 @@ raw_atomic64_read_acquire(const atomic64_t *v)
{
#if defined(arch_atomic64_read_acquire)
return arch_atomic64_read_acquire(v);
-#elif defined(arch_atomic64_read)
- return arch_atomic64_read(v);
#else
s64 ret;
@@ -2624,8 +2631,6 @@ raw_atomic64_set_release(atomic64_t *v, s64 i)
{
#if defined(arch_atomic64_set_release)
arch_atomic64_set_release(v, i);
-#elif defined(arch_atomic64_set)
- arch_atomic64_set(v, i);
#else
if (__native_word(atomic64_t)) {
smp_store_release(&(v)->counter, i);
@@ -4657,4 +4662,4 @@ raw_atomic64_dec_if_positive(atomic64_t *v)
}
#endif /* _LINUX_ATOMIC_FALLBACK_H */
-// 202b45c7db600ce36198eb1f1fc2c2d5268ace2d
+// eec048affea735b8464f58e6d96992101f8f85f1
diff --git a/include/linux/atomic/atomic-instrumented.h b/include/linux/atomic/atomic-instrumented.h
index d401b406ef7c..54d7bbe0aeaa 100644
--- a/include/linux/atomic/atomic-instrumented.h
+++ b/include/linux/atomic/atomic-instrumented.h
@@ -4998,6 +4998,14 @@ atomic_long_dec_if_positive(atomic_long_t *v)
raw_try_cmpxchg128_local(__ai_ptr, __ai_oldp, __VA_ARGS__); \
})
+#define sync_try_cmpxchg(ptr, ...) \
+({ \
+ typeof(ptr) __ai_ptr = (ptr); \
+ kcsan_mb(); \
+ instrument_atomic_read_write(__ai_ptr, sizeof(*__ai_ptr)); \
+ raw_sync_try_cmpxchg(__ai_ptr, __VA_ARGS__); \
+})
+
#endif /* _LINUX_ATOMIC_INSTRUMENTED_H */
-// 1568f875fef72097413caab8339120c065a39aa4
+// 2cc4bc990fef44d3836ec108f11b610f3f438184
diff --git a/include/linux/avf/virtchnl.h b/include/linux/avf/virtchnl.h
index d0807ad43f93..6b3acf15be5c 100644
--- a/include/linux/avf/virtchnl.h
+++ b/include/linux/avf/virtchnl.h
@@ -4,6 +4,10 @@
#ifndef _VIRTCHNL_H_
#define _VIRTCHNL_H_
+#include <linux/bitops.h>
+#include <linux/overflow.h>
+#include <uapi/linux/if_ether.h>
+
/* Description:
* This header file describes the Virtual Function (VF) - Physical Function
* (PF) communication protocol used by the drivers for all devices starting
@@ -240,6 +244,7 @@ VIRTCHNL_CHECK_STRUCT_LEN(16, virtchnl_vsi_resource);
#define VIRTCHNL_VF_OFFLOAD_REQ_QUEUES BIT(6)
/* used to negotiate communicating link speeds in Mbps */
#define VIRTCHNL_VF_CAP_ADV_LINK_SPEED BIT(7)
+#define VIRTCHNL_VF_OFFLOAD_CRC BIT(10)
#define VIRTCHNL_VF_OFFLOAD_VLAN_V2 BIT(15)
#define VIRTCHNL_VF_OFFLOAD_VLAN BIT(16)
#define VIRTCHNL_VF_OFFLOAD_RX_POLLING BIT(17)
@@ -295,7 +300,13 @@ VIRTCHNL_CHECK_STRUCT_LEN(24, virtchnl_txq_info);
/* VIRTCHNL_OP_CONFIG_RX_QUEUE
* VF sends this message to set up parameters for one RX queue.
* External data buffer contains one instance of virtchnl_rxq_info.
- * PF configures requested queue and returns a status code.
+ * PF configures requested queue and returns a status code. The
+ * crc_disable flag disables CRC stripping on the VF. Setting
+ * the crc_disable flag to 1 will disable CRC stripping for each
+ * queue in the VF where the flag is set. The VIRTCHNL_VF_OFFLOAD_CRC
+ * offload must have been set prior to sending this info or the PF
+ * will ignore the request. This flag should be set the same for
+ * all of the queues for a VF.
*/
/* Rx queue config info */
@@ -307,7 +318,7 @@ struct virtchnl_rxq_info {
u16 splithdr_enabled; /* deprecated with AVF 1.0 */
u32 databuffer_size;
u32 max_pkt_size;
- u8 pad0;
+ u8 crc_disable;
u8 rxdid;
u8 pad1[2];
u64 dma_ring_addr;
diff --git a/include/linux/badblocks.h b/include/linux/badblocks.h
index 2426276b9bd3..670f2dae692f 100644
--- a/include/linux/badblocks.h
+++ b/include/linux/badblocks.h
@@ -15,6 +15,7 @@
#define BB_OFFSET(x) (((x) & BB_OFFSET_MASK) >> 9)
#define BB_LEN(x) (((x) & BB_LEN_MASK) + 1)
#define BB_ACK(x) (!!((x) & BB_ACK_MASK))
+#define BB_END(x) (BB_OFFSET(x) + BB_LEN(x))
#define BB_MAKE(a, l, ack) (((a)<<9) | ((l)-1) | ((u64)(!!(ack)) << 63))
/* Bad block numbers are stored sorted in a single page.
@@ -41,6 +42,12 @@ struct badblocks {
sector_t size; /* in sectors */
};
+struct badblocks_context {
+ sector_t start;
+ sector_t len;
+ int ack;
+};
+
int badblocks_check(struct badblocks *bb, sector_t s, int sectors,
sector_t *first_bad, int *bad_sectors);
int badblocks_set(struct badblocks *bb, sector_t s, int sectors,
@@ -63,4 +70,27 @@ static inline void devm_exit_badblocks(struct device *dev, struct badblocks *bb)
}
badblocks_exit(bb);
}
+
+static inline int badblocks_full(struct badblocks *bb)
+{
+ return (bb->count >= MAX_BADBLOCKS);
+}
+
+static inline int badblocks_empty(struct badblocks *bb)
+{
+ return (bb->count == 0);
+}
+
+static inline void set_changed(struct badblocks *bb)
+{
+ if (bb->changed != 1)
+ bb->changed = 1;
+}
+
+static inline void clear_changed(struct badblocks *bb)
+{
+ if (bb->changed != 0)
+ bb->changed = 0;
+}
+
#endif
diff --git a/include/linux/binfmts.h b/include/linux/binfmts.h
index 8d51f69f9f5e..70f97f685bff 100644
--- a/include/linux/binfmts.h
+++ b/include/linux/binfmts.h
@@ -90,6 +90,16 @@ struct linux_binfmt {
#endif
} __randomize_layout;
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc {
+ struct list_head entries;
+ rwlock_t entries_lock;
+ bool enabled;
+} __randomize_layout;
+
+extern struct binfmt_misc init_binfmt_misc;
+#endif
+
extern void __register_binfmt(struct linux_binfmt *fmt, int insert);
/* Registration of default binfmt handlers */
diff --git a/include/linux/bitmap-str.h b/include/linux/bitmap-str.h
new file mode 100644
index 000000000000..17caeca94cab
--- /dev/null
+++ b/include/linux/bitmap-str.h
@@ -0,0 +1,16 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __LINUX_BITMAP_STR_H
+#define __LINUX_BITMAP_STR_H
+
+int bitmap_parse_user(const char __user *ubuf, unsigned int ulen, unsigned long *dst, int nbits);
+int bitmap_print_to_pagebuf(bool list, char *buf, const unsigned long *maskp, int nmaskbits);
+extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
+ int nmaskbits, loff_t off, size_t count);
+int bitmap_parse(const char *buf, unsigned int buflen, unsigned long *dst, int nbits);
+int bitmap_parselist(const char *buf, unsigned long *maskp, int nmaskbits);
+int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
+ unsigned long *dst, int nbits);
+
+#endif /* __LINUX_BITMAP_STR_H */
diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 03644237e1ef..99451431e4d6 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -6,10 +6,12 @@
#include <linux/align.h>
#include <linux/bitops.h>
+#include <linux/errno.h>
#include <linux/find.h>
#include <linux/limits.h>
#include <linux/string.h>
#include <linux/types.h>
+#include <linux/bitmap-str.h>
struct device;
@@ -200,14 +202,6 @@ bitmap_find_next_zero_area(unsigned long *map,
align_mask, 0);
}
-int bitmap_parse(const char *buf, unsigned int buflen,
- unsigned long *dst, int nbits);
-int bitmap_parse_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
-int bitmap_parselist(const char *buf, unsigned long *maskp,
- int nmaskbits);
-int bitmap_parselist_user(const char __user *ubuf, unsigned int ulen,
- unsigned long *dst, int nbits);
void bitmap_remap(unsigned long *dst, const unsigned long *src,
const unsigned long *old, const unsigned long *new, unsigned int nbits);
int bitmap_bitremap(int oldbit,
@@ -216,23 +210,6 @@ void bitmap_onto(unsigned long *dst, const unsigned long *orig,
const unsigned long *relmap, unsigned int bits);
void bitmap_fold(unsigned long *dst, const unsigned long *orig,
unsigned int sz, unsigned int nbits);
-int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order);
-void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order);
-int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order);
-
-#ifdef __BIG_ENDIAN
-void bitmap_copy_le(unsigned long *dst, const unsigned long *src, unsigned int nbits);
-#else
-#define bitmap_copy_le bitmap_copy
-#endif
-int bitmap_print_to_pagebuf(bool list, char *buf,
- const unsigned long *maskp, int nmaskbits);
-
-extern int bitmap_print_bitmask_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
-
-extern int bitmap_print_list_to_buf(char *buf, const unsigned long *maskp,
- int nmaskbits, loff_t off, size_t count);
#define BITMAP_FIRST_WORD_MASK(start) (~0UL << ((start) & (BITS_PER_LONG - 1)))
#define BITMAP_LAST_WORD_MASK(nbits) (~0UL >> (-(nbits) & (BITS_PER_LONG - 1)))
@@ -519,6 +496,66 @@ static inline void bitmap_next_set_region(unsigned long *bitmap,
}
/**
+ * bitmap_release_region - release allocated bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to release
+ * @order: region size (log base 2 of number of bits) to release
+ *
+ * This is the complement to __bitmap_find_free_region() and releases
+ * the found region (by clearing it in the bitmap).
+ */
+static inline void bitmap_release_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ bitmap_clear(bitmap, pos, BIT(order));
+}
+
+/**
+ * bitmap_allocate_region - allocate bitmap region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @pos: beginning of bit region to allocate
+ * @order: region size (log base 2 of number of bits) to allocate
+ *
+ * Allocate (set bits in) a specified region of a bitmap.
+ *
+ * Returns: 0 on success, or %-EBUSY if specified region wasn't
+ * free (not all bits were zero).
+ */
+static inline int bitmap_allocate_region(unsigned long *bitmap, unsigned int pos, int order)
+{
+ unsigned int len = BIT(order);
+
+ if (find_next_bit(bitmap, pos + len, pos) < pos + len)
+ return -EBUSY;
+ bitmap_set(bitmap, pos, len);
+ return 0;
+}
+
+/**
+ * bitmap_find_free_region - find a contiguous aligned mem region
+ * @bitmap: array of unsigned longs corresponding to the bitmap
+ * @bits: number of bits in the bitmap
+ * @order: region size (log base 2 of number of bits) to find
+ *
+ * Find a region of free (zero) bits in a @bitmap of @bits bits and
+ * allocate them (set them to one). Only consider regions of length
+ * a power (@order) of two, aligned to that power of two, which
+ * makes the search algorithm much faster.
+ *
+ * Returns: the bit offset in bitmap of the allocated region,
+ * or -errno on failure.
+ */
+static inline int bitmap_find_free_region(unsigned long *bitmap, unsigned int bits, int order)
+{
+ unsigned int pos, end; /* scans bitmap by regions of size order */
+
+ for (pos = 0; (end = pos + BIT(order)) <= bits; pos = end) {
+ if (!bitmap_allocate_region(bitmap, pos, order))
+ return pos;
+ }
+ return -ENOMEM;
+}
+
+/**
* BITMAP_FROM_U64() - Represent u64 value in the format suitable for bitmap.
* @n: u64 value
*
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 958ed7e89b30..1ab3081c82ed 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -32,8 +32,6 @@ typedef __u32 __bitwise req_flags_t;
#define RQF_FLUSH_SEQ ((__force req_flags_t)(1 << 4))
/* merge of different types, fail separately */
#define RQF_MIXED_MERGE ((__force req_flags_t)(1 << 5))
-/* track inflight for MQ */
-#define RQF_MQ_INFLIGHT ((__force req_flags_t)(1 << 6))
/* don't call prep for this one */
#define RQF_DONTPREP ((__force req_flags_t)(1 << 7))
/* use hctx->sched_tags */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index eef450f25982..51fa7ffdee83 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -1479,14 +1479,25 @@ extern const struct blk_holder_ops fs_holder_ops;
#define sb_open_mode(flags) \
(BLK_OPEN_READ | (((flags) & SB_RDONLY) ? 0 : BLK_OPEN_WRITE))
+struct bdev_handle {
+ struct block_device *bdev;
+ void *holder;
+ blk_mode_t mode;
+};
+
struct block_device *blkdev_get_by_dev(dev_t dev, blk_mode_t mode, void *holder,
const struct blk_holder_ops *hops);
struct block_device *blkdev_get_by_path(const char *path, blk_mode_t mode,
void *holder, const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_dev(dev_t dev, blk_mode_t mode, void *holder,
+ const struct blk_holder_ops *hops);
+struct bdev_handle *bdev_open_by_path(const char *path, blk_mode_t mode,
+ void *holder, const struct blk_holder_ops *hops);
int bd_prepare_to_claim(struct block_device *bdev, void *holder,
const struct blk_holder_ops *hops);
void bd_abort_claiming(struct block_device *bdev, void *holder);
void blkdev_put(struct block_device *bdev, void *holder);
+void bdev_release(struct bdev_handle *handle);
/* just for blk-cgroup, don't use elsewhere */
struct block_device *blkdev_get_no_open(dev_t dev);
diff --git a/include/linux/bootmem_info.h b/include/linux/bootmem_info.h
index e1a3c9c9754c..cffa38a73618 100644
--- a/include/linux/bootmem_info.h
+++ b/include/linux/bootmem_info.h
@@ -60,7 +60,7 @@ static inline void get_page_bootmem(unsigned long info, struct page *page,
static inline void free_bootmem_page(struct page *page)
{
- kmemleak_free_part(page_to_virt(page), PAGE_SIZE);
+ kmemleak_free_part_phys(PFN_PHYS(page_to_pfn(page)), PAGE_SIZE);
free_reserved_page(page);
}
#endif
diff --git a/include/linux/bpf-cgroup-defs.h b/include/linux/bpf-cgroup-defs.h
index 7b121bd780eb..0985221d5478 100644
--- a/include/linux/bpf-cgroup-defs.h
+++ b/include/linux/bpf-cgroup-defs.h
@@ -28,19 +28,24 @@ enum cgroup_bpf_attach_type {
CGROUP_INET6_BIND,
CGROUP_INET4_CONNECT,
CGROUP_INET6_CONNECT,
+ CGROUP_UNIX_CONNECT,
CGROUP_INET4_POST_BIND,
CGROUP_INET6_POST_BIND,
CGROUP_UDP4_SENDMSG,
CGROUP_UDP6_SENDMSG,
+ CGROUP_UNIX_SENDMSG,
CGROUP_SYSCTL,
CGROUP_UDP4_RECVMSG,
CGROUP_UDP6_RECVMSG,
+ CGROUP_UNIX_RECVMSG,
CGROUP_GETSOCKOPT,
CGROUP_SETSOCKOPT,
CGROUP_INET4_GETPEERNAME,
CGROUP_INET6_GETPEERNAME,
+ CGROUP_UNIX_GETPEERNAME,
CGROUP_INET4_GETSOCKNAME,
CGROUP_INET6_GETSOCKNAME,
+ CGROUP_UNIX_GETSOCKNAME,
CGROUP_INET_SOCK_RELEASE,
CGROUP_LSM_START,
CGROUP_LSM_END = CGROUP_LSM_START + CGROUP_LSM_NUM - 1,
diff --git a/include/linux/bpf-cgroup.h b/include/linux/bpf-cgroup.h
index 8506690dbb9c..a789266feac3 100644
--- a/include/linux/bpf-cgroup.h
+++ b/include/linux/bpf-cgroup.h
@@ -48,19 +48,24 @@ to_cgroup_bpf_attach_type(enum bpf_attach_type attach_type)
CGROUP_ATYPE(CGROUP_INET6_BIND);
CGROUP_ATYPE(CGROUP_INET4_CONNECT);
CGROUP_ATYPE(CGROUP_INET6_CONNECT);
+ CGROUP_ATYPE(CGROUP_UNIX_CONNECT);
CGROUP_ATYPE(CGROUP_INET4_POST_BIND);
CGROUP_ATYPE(CGROUP_INET6_POST_BIND);
CGROUP_ATYPE(CGROUP_UDP4_SENDMSG);
CGROUP_ATYPE(CGROUP_UDP6_SENDMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_SENDMSG);
CGROUP_ATYPE(CGROUP_SYSCTL);
CGROUP_ATYPE(CGROUP_UDP4_RECVMSG);
CGROUP_ATYPE(CGROUP_UDP6_RECVMSG);
+ CGROUP_ATYPE(CGROUP_UNIX_RECVMSG);
CGROUP_ATYPE(CGROUP_GETSOCKOPT);
CGROUP_ATYPE(CGROUP_SETSOCKOPT);
CGROUP_ATYPE(CGROUP_INET4_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET6_GETPEERNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETPEERNAME);
CGROUP_ATYPE(CGROUP_INET4_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET6_GETSOCKNAME);
+ CGROUP_ATYPE(CGROUP_UNIX_GETSOCKNAME);
CGROUP_ATYPE(CGROUP_INET_SOCK_RELEASE);
default:
return CGROUP_BPF_ATTACH_TYPE_INVALID;
@@ -120,6 +125,7 @@ int __cgroup_bpf_run_filter_sk(struct sock *sk,
int __cgroup_bpf_run_filter_sock_addr(struct sock *sk,
struct sockaddr *uaddr,
+ int *uaddrlen,
enum cgroup_bpf_attach_type atype,
void *t_ctx,
u32 *flags);
@@ -137,11 +143,12 @@ int __cgroup_bpf_run_filter_sysctl(struct ctl_table_header *head,
enum cgroup_bpf_attach_type atype);
int __cgroup_bpf_run_filter_setsockopt(struct sock *sock, int *level,
- int *optname, char __user *optval,
+ int *optname, sockptr_t optval,
int *optlen, char **kernel_optval);
+
int __cgroup_bpf_run_filter_getsockopt(struct sock *sk, int level,
- int optname, char __user *optval,
- int __user *optlen, int max_optlen,
+ int optname, sockptr_t optval,
+ sockptr_t optlen, int max_optlen,
int retval);
int __cgroup_bpf_run_filter_getsockopt_kern(struct sock *sk, int level,
@@ -230,22 +237,22 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) \
BPF_CGROUP_RUN_SK_PROG(sk, CGROUP_INET6_POST_BIND)
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) \
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, NULL); \
__ret; \
})
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) \
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) \
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- t_ctx, NULL); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, t_ctx, NULL); \
release_sock(sk); \
} \
__ret; \
@@ -256,14 +263,14 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
* (at bit position 0) is to indicate CAP_NET_BIND_SERVICE capability check
* should be bypassed (BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE).
*/
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, bind_flags) \
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, bind_flags) \
({ \
u32 __flags = 0; \
int __ret = 0; \
if (cgroup_bpf_enabled(atype)) { \
lock_sock(sk); \
- __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, atype, \
- NULL, &__flags); \
+ __ret = __cgroup_bpf_run_filter_sock_addr(sk, uaddr, uaddrlen, \
+ atype, NULL, &__flags); \
release_sock(sk); \
if (__flags & BPF_RET_BIND_NO_CAP_NET_BIND_SERVICE) \
*bind_flags |= BIND_NO_CAP_NET_BIND_SERVICE; \
@@ -276,29 +283,38 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
cgroup_bpf_enabled(CGROUP_INET6_CONNECT)) && \
(sk)->sk_prot->pre_connect)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET4_CONNECT)
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT)
+
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET4_CONNECT, NULL)
+
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_INET6_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG(sk, uaddr, CGROUP_INET6_CONNECT)
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_CONNECT, NULL)
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET4_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_INET6_CONNECT, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_SENDMSG, t_ctx)
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_SENDMSG, t_ctx)
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP4_RECVMSG, NULL)
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP4_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UDP6_RECVMSG, NULL)
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) \
- BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, CGROUP_UDP6_RECVMSG, NULL)
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) \
+ BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, CGROUP_UNIX_RECVMSG, NULL)
/* The SOCK_OPS"_SK" macro should be used when sock_ops->sk is not a
* fullsock and its parent fullsock cannot be traced by
@@ -377,7 +393,7 @@ static inline bool cgroup_bpf_sock_enabled(struct sock *sk,
({ \
int __ret = 0; \
if (cgroup_bpf_enabled(CGROUP_GETSOCKOPT)) \
- get_user(__ret, optlen); \
+ copy_from_sockptr(&__ret, optlen, sizeof(int)); \
__ret; \
})
@@ -477,24 +493,27 @@ static inline int bpf_percpu_cgroup_storage_update(struct bpf_map *map,
}
#define cgroup_bpf_enabled(atype) (0)
-#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, atype, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, atype) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG_LOCK(sk, uaddr, uaddrlen, atype, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_SA_PROG(sk, uaddr, uaddrlen, atype) ({ 0; })
#define BPF_CGROUP_PRE_CONNECT_ENABLED(sk) (0)
#define BPF_CGROUP_RUN_PROG_INET_INGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_EGRESS(sk,skb) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET_SOCK_RELEASE(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, atype, flags) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET_BIND_LOCK(sk, uaddr, uaddrlen, atype, flags) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET4_POST_BIND(sk) ({ 0; })
#define BPF_CGROUP_RUN_PROG_INET6_POST_BIND(sk) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, t_ctx) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr) ({ 0; })
-#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET4_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_INET6_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_CONNECT_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_SENDMSG_LOCK(sk, uaddr, uaddrlen, t_ctx) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP4_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UDP6_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
+#define BPF_CGROUP_RUN_PROG_UNIX_RECVMSG_LOCK(sk, uaddr, uaddrlen) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SOCK_OPS(sock_ops) ({ 0; })
#define BPF_CGROUP_RUN_PROG_DEVICE_CGROUP(atype, major, minor, access) ({ 0; })
#define BPF_CGROUP_RUN_PROG_SYSCTL(head,table,write,buf,count,pos) ({ 0; })
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 024e8b28c34b..b4825d3cdb29 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -55,8 +55,8 @@ struct cgroup;
extern struct idr btf_idr;
extern spinlock_t btf_idr_lock;
extern struct kobject *btf_kobj;
-extern struct bpf_mem_alloc bpf_global_ma;
-extern bool bpf_global_ma_set;
+extern struct bpf_mem_alloc bpf_global_ma, bpf_global_percpu_ma;
+extern bool bpf_global_ma_set, bpf_global_percpu_ma_set;
typedef u64 (*bpf_callback_t)(u64, u64, u64, u64, u64);
typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
@@ -180,14 +180,15 @@ enum btf_field_type {
BPF_TIMER = (1 << 1),
BPF_KPTR_UNREF = (1 << 2),
BPF_KPTR_REF = (1 << 3),
- BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF,
- BPF_LIST_HEAD = (1 << 4),
- BPF_LIST_NODE = (1 << 5),
- BPF_RB_ROOT = (1 << 6),
- BPF_RB_NODE = (1 << 7),
+ BPF_KPTR_PERCPU = (1 << 4),
+ BPF_KPTR = BPF_KPTR_UNREF | BPF_KPTR_REF | BPF_KPTR_PERCPU,
+ BPF_LIST_HEAD = (1 << 5),
+ BPF_LIST_NODE = (1 << 6),
+ BPF_RB_ROOT = (1 << 7),
+ BPF_RB_NODE = (1 << 8),
BPF_GRAPH_NODE_OR_ROOT = BPF_LIST_NODE | BPF_LIST_HEAD |
BPF_RB_NODE | BPF_RB_ROOT,
- BPF_REFCOUNT = (1 << 8),
+ BPF_REFCOUNT = (1 << 9),
};
typedef void (*btf_dtor_kfunc_t)(void *);
@@ -300,6 +301,8 @@ static inline const char *btf_field_type_name(enum btf_field_type type)
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
return "kptr";
+ case BPF_KPTR_PERCPU:
+ return "percpu_kptr";
case BPF_LIST_HEAD:
return "bpf_list_head";
case BPF_LIST_NODE:
@@ -325,6 +328,7 @@ static inline u32 btf_field_type_size(enum btf_field_type type)
return sizeof(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return sizeof(u64);
case BPF_LIST_HEAD:
return sizeof(struct bpf_list_head);
@@ -351,6 +355,7 @@ static inline u32 btf_field_type_align(enum btf_field_type type)
return __alignof__(struct bpf_timer);
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
return __alignof__(u64);
case BPF_LIST_HEAD:
return __alignof__(struct bpf_list_head);
@@ -389,6 +394,7 @@ static inline void bpf_obj_init_field(const struct btf_field *field, void *addr)
case BPF_TIMER:
case BPF_KPTR_UNREF:
case BPF_KPTR_REF:
+ case BPF_KPTR_PERCPU:
break;
default:
WARN_ON_ONCE(1);
@@ -1029,6 +1035,11 @@ struct btf_func_model {
*/
#define BPF_TRAMP_F_SHARE_IPMODIFY BIT(6)
+/* Indicate that current trampoline is in a tail call context. Then, it has to
+ * cache and restore tail_call_cnt to avoid infinite tail call loop.
+ */
+#define BPF_TRAMP_F_TAIL_CALL_CTX BIT(7)
+
/* Each call __bpf_prog_enter + call bpf_func + call __bpf_prog_exit is ~50
* bytes on x86.
*/
@@ -1307,7 +1318,7 @@ static inline int bpf_trampoline_unlink_prog(struct bpf_tramp_link *link,
static inline struct bpf_trampoline *bpf_trampoline_get(u64 key,
struct bpf_attach_target_info *tgt_info)
{
- return ERR_PTR(-EOPNOTSUPP);
+ return NULL;
}
static inline void bpf_trampoline_put(struct bpf_trampoline *tr) {}
#define DEFINE_BPF_DISPATCHER(name)
@@ -1378,6 +1389,7 @@ struct bpf_prog_aux {
u32 stack_depth;
u32 id;
u32 func_cnt; /* used by non-func prog as the number of func progs */
+ u32 real_func_cnt; /* includes hidden progs, only used for JIT and freeing progs */
u32 func_idx; /* 0 for non-func prog, the index in func array for func prog */
u32 attach_btf_id; /* in-kernel BTF type id to attach to */
u32 ctx_arg_info_size;
@@ -1398,6 +1410,8 @@ struct bpf_prog_aux {
bool sleepable;
bool tail_call_reachable;
bool xdp_has_frags;
+ bool exception_cb;
+ bool exception_boundary;
/* BTF_KIND_FUNC_PROTO for valid attach_btf_id */
const struct btf_type *attach_func_proto;
/* function name for valid attach_btf_id */
@@ -1420,6 +1434,7 @@ struct bpf_prog_aux {
int cgroup_atype; /* enum cgroup_bpf_attach_type */
struct bpf_map *cgroup_storage[MAX_BPF_CGROUP_STORAGE_TYPE];
char name[BPF_OBJ_NAME_LEN];
+ unsigned int (*bpf_exception_cb)(u64 cookie, u64 sp, u64 bp);
#ifdef CONFIG_SECURITY
void *security;
#endif
@@ -2043,6 +2058,7 @@ struct btf_record *btf_record_dup(const struct btf_record *rec);
bool btf_record_equal(const struct btf_record *rec_a, const struct btf_record *rec_b);
void bpf_obj_free_timer(const struct btf_record *rec, void *obj);
void bpf_obj_free_fields(const struct btf_record *rec, void *obj);
+void __bpf_obj_drop_impl(void *p, const struct btf_record *rec, bool percpu);
struct bpf_map *bpf_map_get(u32 ufd);
struct bpf_map *bpf_map_get_with_uref(u32 ufd);
@@ -2149,12 +2165,12 @@ static inline bool bpf_allow_uninit_stack(void)
static inline bool bpf_bypass_spec_v1(void)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || perfmon_capable();
}
static inline bool bpf_bypass_spec_v4(void)
{
- return perfmon_capable();
+ return cpu_mitigations_off() || perfmon_capable();
}
int bpf_map_new_fd(struct bpf_map *map, int flags);
@@ -2407,9 +2423,11 @@ int btf_check_subprog_arg_match(struct bpf_verifier_env *env, int subprog,
int btf_check_subprog_call(struct bpf_verifier_env *env, int subprog,
struct bpf_reg_state *regs);
int btf_prepare_func_args(struct bpf_verifier_env *env, int subprog,
- struct bpf_reg_state *reg);
+ struct bpf_reg_state *reg, bool is_ex_cb);
int btf_check_type_match(struct bpf_verifier_log *log, const struct bpf_prog *prog,
struct btf *btf, const struct btf_type *t);
+const char *btf_find_decl_tag_value(const struct btf *btf, const struct btf_type *pt,
+ int comp_idx, const char *tag_key);
struct bpf_prog *bpf_prog_by_id(u32 id);
struct bpf_link *bpf_link_by_id(u32 id);
@@ -2461,6 +2479,9 @@ void bpf_dynptr_init(struct bpf_dynptr_kern *ptr, void *data,
enum bpf_dynptr_type type, u32 offset, u32 size);
void bpf_dynptr_set_null(struct bpf_dynptr_kern *ptr);
void bpf_dynptr_set_rdonly(struct bpf_dynptr_kern *ptr);
+
+bool dev_check_flush(void);
+bool cpu_map_check_flush(void);
#else /* !CONFIG_BPF_SYSCALL */
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
@@ -2905,6 +2926,22 @@ static inline int sock_map_bpf_prog_query(const union bpf_attr *attr,
#endif /* CONFIG_BPF_SYSCALL */
#endif /* CONFIG_NET && CONFIG_BPF_SYSCALL */
+static __always_inline void
+bpf_prog_inc_misses_counters(const struct bpf_prog_array *array)
+{
+ const struct bpf_prog_array_item *item;
+ struct bpf_prog *prog;
+
+ if (unlikely(!array))
+ return;
+
+ item = &array->items[0];
+ while ((prog = READ_ONCE(item->prog))) {
+ bpf_prog_inc_misses_counter(prog);
+ item++;
+ }
+}
+
#if defined(CONFIG_INET) && defined(CONFIG_BPF_SYSCALL)
void bpf_sk_reuseport_detach(struct sock *sk);
int bpf_fd_reuseport_array_lookup_elem(struct bpf_map *map, void *key,
@@ -3183,4 +3220,9 @@ static inline gfp_t bpf_memcg_flags(gfp_t flags)
return flags;
}
+static inline bool bpf_is_subprog(const struct bpf_prog *prog)
+{
+ return prog->aux->func_idx != 0;
+}
+
#endif /* _LINUX_BPF_H */
diff --git a/include/linux/bpf_mem_alloc.h b/include/linux/bpf_mem_alloc.h
index d644bbb298af..bb1223b21308 100644
--- a/include/linux/bpf_mem_alloc.h
+++ b/include/linux/bpf_mem_alloc.h
@@ -11,6 +11,7 @@ struct bpf_mem_caches;
struct bpf_mem_alloc {
struct bpf_mem_caches __percpu *caches;
struct bpf_mem_cache __percpu *cache;
+ bool percpu;
struct work_struct work;
};
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index b6e58dab8e27..24213a99cc79 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -300,6 +300,7 @@ struct bpf_func_state {
bool in_callback_fn;
struct tnum callback_ret_range;
bool in_async_callback_fn;
+ bool in_exception_callback_fn;
/* The following fields should be last. See copy_func_state() */
int acquired_refs;
@@ -372,10 +373,25 @@ struct bpf_verifier_state {
struct bpf_active_lock active_lock;
bool speculative;
bool active_rcu_lock;
+ /* If this state was ever pointed-to by other state's loop_entry field
+ * this flag would be set to true. Used to avoid freeing such states
+ * while they are still in use.
+ */
+ bool used_as_loop_entry;
/* first and last insn idx of this verifier state */
u32 first_insn_idx;
u32 last_insn_idx;
+ /* If this state is a part of states loop this field points to some
+ * parent of this state such that:
+ * - it is also a member of the same states loop;
+ * - DFS states traversal starting from initial state visits loop_entry
+ * state before this state.
+ * Used to compute topmost loop entry for state loops.
+ * State loops might appear because of open coded iterators logic.
+ * See get_loop_entry() for more information.
+ */
+ struct bpf_verifier_state *loop_entry;
/* jmp history recorded from first to last.
* backtracking is using it to go from last to first.
* For most states jmp_history_cnt is [0-3].
@@ -383,21 +399,21 @@ struct bpf_verifier_state {
*/
struct bpf_idx_pair *jmp_history;
u32 jmp_history_cnt;
+ u32 dfs_depth;
};
-#define bpf_get_spilled_reg(slot, frame) \
+#define bpf_get_spilled_reg(slot, frame, mask) \
(((slot < frame->allocated_stack / BPF_REG_SIZE) && \
- (frame->stack[slot].slot_type[0] == STACK_SPILL)) \
+ ((1 << frame->stack[slot].slot_type[0]) & (mask))) \
? &frame->stack[slot].spilled_ptr : NULL)
/* Iterate over 'frame', setting 'reg' to either NULL or a spilled register. */
-#define bpf_for_each_spilled_reg(iter, frame, reg) \
- for (iter = 0, reg = bpf_get_spilled_reg(iter, frame); \
+#define bpf_for_each_spilled_reg(iter, frame, reg, mask) \
+ for (iter = 0, reg = bpf_get_spilled_reg(iter, frame, mask); \
iter < frame->allocated_stack / BPF_REG_SIZE; \
- iter++, reg = bpf_get_spilled_reg(iter, frame))
+ iter++, reg = bpf_get_spilled_reg(iter, frame, mask))
-/* Invoke __expr over regsiters in __vst, setting __state and __reg */
-#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+#define bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, __mask, __expr) \
({ \
struct bpf_verifier_state *___vstate = __vst; \
int ___i, ___j; \
@@ -409,7 +425,7 @@ struct bpf_verifier_state {
__reg = &___regs[___j]; \
(void)(__expr); \
} \
- bpf_for_each_spilled_reg(___j, __state, __reg) { \
+ bpf_for_each_spilled_reg(___j, __state, __reg, __mask) { \
if (!__reg) \
continue; \
(void)(__expr); \
@@ -417,6 +433,10 @@ struct bpf_verifier_state {
} \
})
+/* Invoke __expr over regsiters in __vst, setting __state and __reg */
+#define bpf_for_each_reg_in_vstate(__vst, __state, __reg, __expr) \
+ bpf_for_each_reg_in_vstate_mask(__vst, __state, __reg, 1 << STACK_SPILL, __expr)
+
/* linked list of verifier states used to prune search */
struct bpf_verifier_state_list {
struct bpf_verifier_state state;
@@ -480,6 +500,7 @@ struct bpf_insn_aux_data {
bool zext_dst; /* this insn zero extends dst reg */
bool storage_get_func_atomic; /* bpf_*_storage_get() with atomic memory alloc */
bool is_iter_next; /* bpf_iter_<type>_next() kfunc call */
+ bool call_with_percpu_alloc_ptr; /* {this,per}_cpu_ptr() with prog percpu alloc */
u8 alu_state; /* used in combination with alu_limit */
/* below fields are initialized once */
@@ -540,7 +561,9 @@ struct bpf_subprog_info {
bool has_tail_call;
bool tail_call_reachable;
bool has_ld_abs;
+ bool is_cb;
bool is_async_cb;
+ bool is_exception_cb;
};
struct bpf_verifier_env;
@@ -587,6 +610,8 @@ struct bpf_verifier_env {
u32 used_map_cnt; /* number of used maps */
u32 used_btf_cnt; /* number of used BTF objects */
u32 id_gen; /* used to generate unique reg IDs */
+ u32 hidden_subprog_cnt; /* number of hidden subprogs */
+ int exception_callback_subprog;
bool explore_alu_limits;
bool allow_ptr_leaks;
bool allow_uninit_stack;
@@ -594,10 +619,11 @@ struct bpf_verifier_env {
bool bypass_spec_v1;
bool bypass_spec_v4;
bool seen_direct_write;
+ bool seen_exception;
struct bpf_insn_aux_data *insn_aux_data; /* array of per-insn state */
const struct bpf_line_info *prev_linfo;
struct bpf_verifier_log log;
- struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 1];
+ struct bpf_subprog_info subprog_info[BPF_MAX_SUBPROGS + 2]; /* max + 2 for the fake and exception subprogs */
union {
struct bpf_idmap idmap_scratch;
struct bpf_idset idset_scratch;
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index c55810a43541..1394ba302367 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -11,6 +11,7 @@
#define PHY_ID_BCM50610 0x0143bd60
#define PHY_ID_BCM50610M 0x0143bd70
+#define PHY_ID_BCM5221 0x004061e0
#define PHY_ID_BCM5241 0x0143bc30
#define PHY_ID_BCMAC131 0x0143bc70
#define PHY_ID_BCM5481 0x0143bca0
@@ -331,6 +332,15 @@
#define BCM54XX_WOL_INT_STATUS (MII_BCM54XX_EXP_SEL_WOL + 0x94)
+/* BCM5221 Registers */
+#define BCM5221_AEGSR 0x1C
+#define BCM5221_AEGSR_MDIX_STATUS BIT(13)
+#define BCM5221_AEGSR_MDIX_MAN_SWAP BIT(12)
+#define BCM5221_AEGSR_MDIX_DIS BIT(11)
+
+#define BCM5221_SHDW_AM4_EN_CLK_LPM BIT(2)
+#define BCM5221_SHDW_AM4_FORCE_LPM BIT(1)
+
/*****************************************************************************/
/* Fast Ethernet Transceiver definitions. */
/*****************************************************************************/
diff --git a/include/linux/btf.h b/include/linux/btf.h
index 928113a80a95..c2231c64d60b 100644
--- a/include/linux/btf.h
+++ b/include/linux/btf.h
@@ -74,6 +74,7 @@
#define KF_ITER_NEW (1 << 8) /* kfunc implements BPF iter constructor */
#define KF_ITER_NEXT (1 << 9) /* kfunc implements BPF iter next method */
#define KF_ITER_DESTROY (1 << 10) /* kfunc implements BPF iter destructor */
+#define KF_RCU_PROTECTED (1 << 11) /* kfunc should be protected by rcu cs when they are invoked */
/*
* Tag marking a kernel function as a kfunc. This is meant to minimize the
diff --git a/include/linux/btf_ids.h b/include/linux/btf_ids.h
index a3462a9b8e18..a9cb10b0e2e9 100644
--- a/include/linux/btf_ids.h
+++ b/include/linux/btf_ids.h
@@ -49,7 +49,7 @@ word \
____BTF_ID(symbol, word)
#define __ID(prefix) \
- __PASTE(prefix, __COUNTER__)
+ __PASTE(__PASTE(prefix, __COUNTER__), __LINE__)
/*
* The BTF_ID defines unique symbol for each ID pointing
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 4ede47649a81..5f23ee599889 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -171,7 +171,10 @@ static __always_inline int buffer_uptodate(const struct buffer_head *bh)
return test_bit_acquire(BH_Uptodate, &bh->b_state);
}
-#define bh_offset(bh) ((unsigned long)(bh)->b_data & ~PAGE_MASK)
+static inline unsigned long bh_offset(const struct buffer_head *bh)
+{
+ return (unsigned long)(bh)->b_data & (page_size(bh->b_page) - 1);
+}
/* If we *know* page->private refers to buffer_heads */
#define page_buffers(page) \
@@ -195,13 +198,11 @@ void touch_buffer(struct buffer_head *bh);
void folio_set_bh(struct buffer_head *bh, struct folio *folio,
unsigned long offset);
struct buffer_head *folio_alloc_buffers(struct folio *folio, unsigned long size,
- bool retry);
+ gfp_t gfp);
struct buffer_head *alloc_page_buffers(struct page *page, unsigned long size,
bool retry);
-void create_empty_buffers(struct page *, unsigned long,
- unsigned long b_state);
-void folio_create_empty_buffers(struct folio *folio, unsigned long blocksize,
- unsigned long b_state);
+struct buffer_head *create_empty_buffers(struct folio *folio,
+ unsigned long blocksize, unsigned long b_state);
void end_buffer_read_sync(struct buffer_head *bh, int uptodate);
void end_buffer_write_sync(struct buffer_head *bh, int uptodate);
void end_buffer_async_write(struct buffer_head *bh, int uptodate);
@@ -224,8 +225,8 @@ void __wait_on_buffer(struct buffer_head *);
wait_queue_head_t *bh_waitq_head(struct buffer_head *bh);
struct buffer_head *__find_get_block(struct block_device *bdev, sector_t block,
unsigned size);
-struct buffer_head *__getblk_gfp(struct block_device *bdev, sector_t block,
- unsigned size, gfp_t gfp);
+struct buffer_head *bdev_getblk(struct block_device *bdev, sector_t block,
+ unsigned size, gfp_t gfp);
void __brelse(struct buffer_head *);
void __bforget(struct buffer_head *);
void __breadahead(struct block_device *, sector_t block, unsigned int size);
@@ -335,17 +336,38 @@ sb_breadahead(struct super_block *sb, sector_t block)
__breadahead(sb->s_bdev, block, sb->s_blocksize);
}
-static inline struct buffer_head *
-sb_getblk(struct super_block *sb, sector_t block)
+static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
+ sector_t block, unsigned size)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, __GFP_MOVABLE);
+ gfp_t gfp;
+
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
}
+static inline struct buffer_head *__getblk(struct block_device *bdev,
+ sector_t block, unsigned size)
+{
+ gfp_t gfp;
-static inline struct buffer_head *
-sb_getblk_gfp(struct super_block *sb, sector_t block, gfp_t gfp)
+ gfp = mapping_gfp_constraint(bdev->bd_inode->i_mapping, ~__GFP_FS);
+ gfp |= __GFP_MOVABLE | __GFP_NOFAIL;
+
+ return bdev_getblk(bdev, block, size, gfp);
+}
+
+static inline struct buffer_head *sb_getblk(struct super_block *sb,
+ sector_t block)
+{
+ return __getblk(sb->s_bdev, block, sb->s_blocksize);
+}
+
+static inline struct buffer_head *sb_getblk_gfp(struct super_block *sb,
+ sector_t block, gfp_t gfp)
{
- return __getblk_gfp(sb->s_bdev, block, sb->s_blocksize, gfp);
+ return bdev_getblk(sb->s_bdev, block, sb->s_blocksize, gfp);
}
static inline struct buffer_head *
@@ -382,20 +404,6 @@ static inline void lock_buffer(struct buffer_head *bh)
__lock_buffer(bh);
}
-static inline struct buffer_head *getblk_unmovable(struct block_device *bdev,
- sector_t block,
- unsigned size)
-{
- return __getblk_gfp(bdev, block, size, 0);
-}
-
-static inline struct buffer_head *__getblk(struct block_device *bdev,
- sector_t block,
- unsigned size)
-{
- return __getblk_gfp(bdev, block, size, __GFP_MOVABLE);
-}
-
static inline void bh_readahead(struct buffer_head *bh, blk_opf_t op_flags)
{
if (!buffer_uptodate(bh) && trylock_buffer(bh)) {
@@ -447,6 +455,28 @@ __bread(struct block_device *bdev, sector_t block, unsigned size)
return __bread_gfp(bdev, block, size, __GFP_MOVABLE);
}
+/**
+ * get_nth_bh - Get a reference on the n'th buffer after this one.
+ * @bh: The buffer to start counting from.
+ * @count: How many buffers to skip.
+ *
+ * This is primarily useful for finding the nth buffer in a folio; in
+ * that case you pass the head buffer and the byte offset in the folio
+ * divided by the block size. It can be used for other purposes, but
+ * it will wrap at the end of the folio rather than returning NULL or
+ * proceeding to the next folio for you.
+ *
+ * Return: The requested buffer with an elevated refcount.
+ */
+static inline __must_check
+struct buffer_head *get_nth_bh(struct buffer_head *bh, unsigned int count)
+{
+ while (count--)
+ bh = bh->b_this_page;
+ get_bh(bh);
+ return bh;
+}
+
bool block_dirty_folio(struct address_space *mapping, struct folio *folio);
#ifdef CONFIG_BUFFER_HEAD
diff --git a/include/linux/buildid.h b/include/linux/buildid.h
index 3b7a0ff4642f..8a582d242f06 100644
--- a/include/linux/buildid.h
+++ b/include/linux/buildid.h
@@ -2,10 +2,11 @@
#ifndef _LINUX_BUILDID_H
#define _LINUX_BUILDID_H
-#include <linux/mm_types.h>
+#include <linux/types.h>
#define BUILD_ID_SIZE_MAX 20
+struct vm_area_struct;
int build_id_parse(struct vm_area_struct *vma, unsigned char *build_id,
__u32 *size);
int build_id_parse_buf(const void *buf, unsigned char *build_id, u32 buf_size);
diff --git a/include/linux/cacheinfo.h b/include/linux/cacheinfo.h
index a5cfd44fab45..d504eb4b49ab 100644
--- a/include/linux/cacheinfo.h
+++ b/include/linux/cacheinfo.h
@@ -73,6 +73,7 @@ struct cacheinfo {
struct cpu_cacheinfo {
struct cacheinfo *info_list;
+ unsigned int per_cpu_data_slice_size;
unsigned int num_levels;
unsigned int num_leaves;
bool cpu_map_populated;
diff --git a/include/linux/can/dev.h b/include/linux/can/dev.h
index 982ba245eb41..1b92aed49363 100644
--- a/include/linux/can/dev.h
+++ b/include/linux/can/dev.h
@@ -195,6 +195,10 @@ int can_restart_now(struct net_device *dev);
void can_bus_off(struct net_device *dev);
const char *can_get_state_str(const enum can_state state);
+void can_state_get_by_berr_counter(const struct net_device *dev,
+ const struct can_berr_counter *bec,
+ enum can_state *tx_state,
+ enum can_state *rx_state);
void can_change_state(struct net_device *dev, struct can_frame *cf,
enum can_state tx_state, enum can_state rx_state);
diff --git a/include/linux/cdx/cdx_bus.h b/include/linux/cdx/cdx_bus.h
index bead71b7bc73..94ad2c9017c9 100644
--- a/include/linux/cdx/cdx_bus.h
+++ b/include/linux/cdx/cdx_bus.h
@@ -21,13 +21,19 @@
struct cdx_controller;
enum {
+ CDX_DEV_BUS_MASTER_CONF,
CDX_DEV_RESET_CONF,
};
struct cdx_device_config {
u8 type;
+ bool bus_master_enable;
};
+typedef int (*cdx_bus_enable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
+typedef int (*cdx_bus_disable_cb)(struct cdx_controller *cdx, u8 bus_num);
+
typedef int (*cdx_scan_cb)(struct cdx_controller *cdx);
typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
@@ -35,6 +41,19 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
struct cdx_device_config *dev_config);
/**
+ * CDX_DEVICE - macro used to describe a specific CDX device
+ * @vend: the 16 bit CDX Vendor ID
+ * @dev: the 16 bit CDX Device ID
+ *
+ * This macro is used to create a struct cdx_device_id that matches a
+ * specific device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
+ */
+#define CDX_DEVICE(vend, dev) \
+ .vendor = (vend), .device = (dev), \
+ .subvendor = CDX_ANY_ID, .subdevice = CDX_ANY_ID
+
+/**
* CDX_DEVICE_DRIVER_OVERRIDE - macro used to describe a CDX device with
* override_only flags.
* @vend: the 16 bit CDX Vendor ID
@@ -42,18 +61,24 @@ typedef int (*cdx_dev_configure_cb)(struct cdx_controller *cdx,
* @driver_override: the 32 bit CDX Device override_only
*
* This macro is used to create a struct cdx_device_id that matches only a
- * driver_override device.
+ * driver_override device. The subvendor and subdevice fields will be set to
+ * CDX_ANY_ID.
*/
#define CDX_DEVICE_DRIVER_OVERRIDE(vend, dev, driver_override) \
- .vendor = (vend), .device = (dev), .override_only = (driver_override)
+ .vendor = (vend), .device = (dev), .subvendor = CDX_ANY_ID,\
+ .subdevice = CDX_ANY_ID, .override_only = (driver_override)
/**
* struct cdx_ops - Callbacks supported by CDX controller.
+ * @bus_enable: enable bus on the controller
+ * @bus_disable: disable bus on the controller
* @scan: scan the devices on the controller
* @dev_configure: configuration like reset, master_enable,
* msi_config etc for a CDX device
*/
struct cdx_ops {
+ cdx_bus_enable_cb bus_enable;
+ cdx_bus_disable_cb bus_disable;
cdx_scan_cb scan;
cdx_dev_configure_cb dev_configure;
};
@@ -63,12 +88,14 @@ struct cdx_ops {
* @dev: Linux device associated with the CDX controller.
* @priv: private data
* @id: Controller ID
+ * @controller_registered: controller registered with bus
* @ops: CDX controller ops
*/
struct cdx_controller {
struct device *dev;
void *priv;
u32 id;
+ bool controller_registered;
struct cdx_ops *ops;
};
@@ -78,6 +105,10 @@ struct cdx_controller {
* @cdx: CDX controller associated with the device
* @vendor: Vendor ID for CDX device
* @device: Device ID for CDX device
+ * @subsystem_vendor: Subsystem Vendor ID for CDX device
+ * @subsystem_device: Subsystem Device ID for CDX device
+ * @class: Class for the CDX device
+ * @revision: Revision of the CDX device
* @bus_num: Bus number for this CDX device
* @dev_num: Device number for this device
* @res: array of MMIO region entries
@@ -86,6 +117,8 @@ struct cdx_controller {
* @dma_mask: Default DMA mask
* @flags: CDX device flags
* @req_id: Requestor ID associated with CDX device
+ * @is_bus: Is this bus device
+ * @enabled: is this bus enabled
* @driver_override: driver name to force a match; do not set directly,
* because core frees it; use driver_set_override() to
* set or clear it.
@@ -95,6 +128,10 @@ struct cdx_device {
struct cdx_controller *cdx;
u16 vendor;
u16 device;
+ u16 subsystem_vendor;
+ u16 subsystem_device;
+ u32 class;
+ u8 revision;
u8 bus_num;
u8 dev_num;
struct resource res[MAX_CDX_DEV_RESOURCES];
@@ -102,6 +139,8 @@ struct cdx_device {
u64 dma_mask;
u16 flags;
u32 req_id;
+ bool is_bus;
+ bool enabled;
const char *driver_override;
};
@@ -170,4 +209,20 @@ extern struct bus_type cdx_bus_type;
*/
int cdx_dev_reset(struct device *dev);
+/**
+ * cdx_set_master - enables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to enable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_set_master(struct cdx_device *cdx_dev);
+
+/**
+ * cdx_clear_master - disables bus-mastering for CDX device
+ * @cdx_dev: the CDX device to disable
+ *
+ * Return: 0 for success, -errno on failure
+ */
+int cdx_clear_master(struct cdx_device *cdx_dev);
+
#endif /* _CDX_BUS_H_ */
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index 5f2301ee88bc..f3b3593254b9 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -467,19 +467,17 @@ union ceph_mds_request_args {
} __attribute__ ((packed));
union ceph_mds_request_args_ext {
- union {
- union ceph_mds_request_args old;
- struct {
- __le32 mode;
- __le32 uid;
- __le32 gid;
- struct ceph_timespec mtime;
- struct ceph_timespec atime;
- __le64 size, old_size; /* old_size needed by truncate */
- __le32 mask; /* CEPH_SETATTR_* */
- struct ceph_timespec btime;
- } __attribute__ ((packed)) setattr_ext;
- };
+ union ceph_mds_request_args old;
+ struct {
+ __le32 mode;
+ __le32 uid;
+ __le32 gid;
+ struct ceph_timespec mtime;
+ struct ceph_timespec atime;
+ __le64 size, old_size; /* old_size needed by truncate */
+ __le32 mask; /* CEPH_SETATTR_* */
+ struct ceph_timespec btime;
+ } __attribute__ ((packed)) setattr_ext;
};
#define CEPH_MDS_FLAG_REPLAY 1 /* this is a replayed op */
diff --git a/include/linux/ceph/mon_client.h b/include/linux/ceph/mon_client.h
index b658961156a0..7a9a40163c0f 100644
--- a/include/linux/ceph/mon_client.h
+++ b/include/linux/ceph/mon_client.h
@@ -19,7 +19,7 @@ struct ceph_monmap {
struct ceph_fsid fsid;
u32 epoch;
u32 num_mon;
- struct ceph_entity_inst mon_inst[];
+ struct ceph_entity_inst mon_inst[] __counted_by(num_mon);
};
struct ceph_mon_client;
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index bf9823956758..b8610e9d2471 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -278,7 +278,7 @@ struct ceph_osd_request {
int r_attempts;
u32 r_map_dne_bound;
- struct ceph_osd_req_op r_ops[];
+ struct ceph_osd_req_op r_ops[] __counted_by(r_num_ops);
};
struct ceph_request_redirect {
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index f1b3151ac30b..4a6b6b77ccb6 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -115,6 +115,11 @@ enum {
* Enable recursive subtree protection
*/
CGRP_ROOT_MEMORY_RECURSIVE_PROT = (1 << 18),
+
+ /*
+ * Enable hugetlb accounting for the memory controller.
+ */
+ CGRP_ROOT_MEMORY_HUGETLB_ACCOUNTING = (1 << 19),
};
/* cftype->flags */
@@ -238,7 +243,7 @@ struct css_set {
* Lists running through all tasks using this cgroup group.
* mg_tasks lists tasks which belong to this cset but are in the
* process of being migrated out or in. Protected by
- * css_set_rwsem, but, during migration, once tasks are moved to
+ * css_set_lock, but, during migration, once tasks are moved to
* mg_tasks, it can be read safely while holding cgroup_mutex.
*/
struct list_head tasks;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index b307013b9c6c..0ef0af66080e 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -40,13 +40,11 @@ struct kernel_clone_args;
#define CGROUP_WEIGHT_DFL 100
#define CGROUP_WEIGHT_MAX 10000
-/* walk only threadgroup leaders */
-#define CSS_TASK_ITER_PROCS (1U << 0)
-/* walk all threaded css_sets in the domain */
-#define CSS_TASK_ITER_THREADED (1U << 1)
-
-/* internal flags */
-#define CSS_TASK_ITER_SKIPPED (1U << 16)
+enum {
+ CSS_TASK_ITER_PROCS = (1U << 0), /* walk only threadgroup leaders */
+ CSS_TASK_ITER_THREADED = (1U << 1), /* walk all threaded css_sets in the domain */
+ CSS_TASK_ITER_SKIPPED = (1U << 16), /* internal flags */
+};
/* a css_task_iter should be treated as an opaque object */
struct css_task_iter {
diff --git a/include/linux/cleanup.h b/include/linux/cleanup.h
index 53f1a7a932b0..9f1a9c455b68 100644
--- a/include/linux/cleanup.h
+++ b/include/linux/cleanup.h
@@ -7,8 +7,9 @@
/*
* DEFINE_FREE(name, type, free):
* simple helper macro that defines the required wrapper for a __free()
- * based cleanup function. @free is an expression using '_T' to access
- * the variable.
+ * based cleanup function. @free is an expression using '_T' to access the
+ * variable. @free should typically include a NULL test before calling a
+ * function, see the example below.
*
* __free(name):
* variable attribute to add a scoped based cleanup to the variable.
@@ -17,6 +18,9 @@
* like a non-atomic xchg(var, NULL), such that the cleanup function will
* be inhibited -- provided it sanely deals with a NULL value.
*
+ * NOTE: this has __must_check semantics so that it is harder to accidentally
+ * leak the resource.
+ *
* return_ptr(p):
* returns p while inhibiting the __free().
*
@@ -24,6 +28,8 @@
*
* DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
*
+ * void *alloc_obj(...)
+ * {
* struct obj *p __free(kfree) = kmalloc(...);
* if (!p)
* return NULL;
@@ -32,6 +38,24 @@
* return NULL;
*
* return_ptr(p);
+ * }
+ *
+ * NOTE: the DEFINE_FREE()'s @free expression includes a NULL test even though
+ * kfree() is fine to be called with a NULL value. This is on purpose. This way
+ * the compiler sees the end of our alloc_obj() function as:
+ *
+ * tmp = p;
+ * p = NULL;
+ * if (p)
+ * kfree(p);
+ * return tmp;
+ *
+ * And through the magic of value-propagation and dead-code-elimination, it
+ * eliminates the actual cleanup call and compiles into:
+ *
+ * return p;
+ *
+ * Without the NULL test it turns into a mess and the compiler can't help us.
*/
#define DEFINE_FREE(_name, _type, _free) \
@@ -39,8 +63,17 @@
#define __free(_name) __cleanup(__free_##_name)
+#define __get_and_null_ptr(p) \
+ ({ __auto_type __ptr = &(p); \
+ __auto_type __val = *__ptr; \
+ *__ptr = NULL; __val; })
+
+static inline __must_check
+const volatile void * __must_check_fn(const volatile void *val)
+{ return val; }
+
#define no_free_ptr(p) \
- ({ __auto_type __ptr = (p); (p) = NULL; __ptr; })
+ ((typeof(p)) __must_check_fn(__get_and_null_ptr(p)))
#define return_ptr(p) return no_free_ptr(p)
diff --git a/include/linux/clk-provider.h b/include/linux/clk-provider.h
index ec32ec58c59f..ace3a4ce2fc9 100644
--- a/include/linux/clk-provider.h
+++ b/include/linux/clk-provider.h
@@ -74,7 +74,7 @@ void clk_hw_forward_rate_request(const struct clk_hw *core,
unsigned long parent_rate);
/**
- * struct clk_duty - Struture encoding the duty cycle ratio of a clock
+ * struct clk_duty - Structure encoding the duty cycle ratio of a clock
*
* @num: Numerator of the duty cycle ratio
* @den: Denominator of the duty cycle ratio
@@ -129,7 +129,7 @@ struct clk_duty {
* @restore_context: Restore the context of the clock after a restoration
* of power.
*
- * @recalc_rate Recalculate the rate of this clock, by querying hardware. The
+ * @recalc_rate: Recalculate the rate of this clock, by querying hardware. The
* parent rate is an input parameter. It is up to the caller to
* ensure that the prepare_mutex is held across this call. If the
* driver cannot figure out a rate for this clock, it must return
@@ -456,7 +456,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* clock with the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
* @fixed_accuracy: non-adjustable clock accuracy
@@ -471,7 +471,7 @@ struct clk *clk_register_fixed_rate(struct device *dev, const char *name,
* the clock framework
* @dev: device that is registering this clock
* @name: name of this clock
- * @parent_name: name of clock's parent
+ * @parent_data: name of clock's parent
* @flags: framework-specific flags
* @fixed_rate: non-adjustable clock rate
*/
@@ -649,7 +649,7 @@ struct clk_div_table {
* Clock with an adjustable divider affecting its output frequency. Implements
* .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_DIVIDER_ONE_BASED - by default the divisor is the value read from the
* register plus one. If CLK_DIVIDER_ONE_BASED is set then the divider is
* the raw value read from the register, with the value of zero considered
@@ -1130,11 +1130,12 @@ struct clk_hw *clk_hw_register_fixed_factor_parent_hw(struct device *dev,
* @mwidth: width of the numerator bit field
* @nshift: shift to the denominator bit field
* @nwidth: width of the denominator bit field
+ * @approximation: clk driver's callback for calculating the divider clock
* @lock: register lock
*
* Clock with adjustable fractional divider affecting its output frequency.
*
- * Flags:
+ * @flags:
* CLK_FRAC_DIVIDER_ZERO_BASED - by default the numerator and denominator
* is the value read from the register. If CLK_FRAC_DIVIDER_ZERO_BASED
* is set then the numerator and denominator are both the value read
@@ -1191,7 +1192,7 @@ void clk_hw_unregister_fractional_divider(struct clk_hw *hw);
* Clock with an adjustable multiplier affecting its output frequency.
* Implements .recalc_rate, .set_rate and .round_rate
*
- * Flags:
+ * @flags:
* CLK_MULTIPLIER_ZERO_BYPASS - By default, the multiplier is the value read
* from the register, with 0 being a valid value effectively
* zeroing the output clock rate. If CLK_MULTIPLIER_ZERO_BYPASS is
diff --git a/include/linux/closure.h b/include/linux/closure.h
new file mode 100644
index 000000000000..722a586bb224
--- /dev/null
+++ b/include/linux/closure.h
@@ -0,0 +1,404 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_CLOSURE_H
+#define _LINUX_CLOSURE_H
+
+#include <linux/llist.h>
+#include <linux/sched.h>
+#include <linux/sched/task_stack.h>
+#include <linux/workqueue.h>
+
+/*
+ * Closure is perhaps the most overused and abused term in computer science, but
+ * since I've been unable to come up with anything better you're stuck with it
+ * again.
+ *
+ * What are closures?
+ *
+ * They embed a refcount. The basic idea is they count "things that are in
+ * progress" - in flight bios, some other thread that's doing something else -
+ * anything you might want to wait on.
+ *
+ * The refcount may be manipulated with closure_get() and closure_put().
+ * closure_put() is where many of the interesting things happen, when it causes
+ * the refcount to go to 0.
+ *
+ * Closures can be used to wait on things both synchronously and asynchronously,
+ * and synchronous and asynchronous use can be mixed without restriction. To
+ * wait synchronously, use closure_sync() - you will sleep until your closure's
+ * refcount hits 1.
+ *
+ * To wait asynchronously, use
+ * continue_at(cl, next_function, workqueue);
+ *
+ * passing it, as you might expect, the function to run when nothing is pending
+ * and the workqueue to run that function out of.
+ *
+ * continue_at() also, critically, requires a 'return' immediately following the
+ * location where this macro is referenced, to return to the calling function.
+ * There's good reason for this.
+ *
+ * To use safely closures asynchronously, they must always have a refcount while
+ * they are running owned by the thread that is running them. Otherwise, suppose
+ * you submit some bios and wish to have a function run when they all complete:
+ *
+ * foo_endio(struct bio *bio)
+ * {
+ * closure_put(cl);
+ * }
+ *
+ * closure_init(cl);
+ *
+ * do_stuff();
+ * closure_get(cl);
+ * bio1->bi_endio = foo_endio;
+ * bio_submit(bio1);
+ *
+ * do_more_stuff();
+ * closure_get(cl);
+ * bio2->bi_endio = foo_endio;
+ * bio_submit(bio2);
+ *
+ * continue_at(cl, complete_some_read, system_wq);
+ *
+ * If closure's refcount started at 0, complete_some_read() could run before the
+ * second bio was submitted - which is almost always not what you want! More
+ * importantly, it wouldn't be possible to say whether the original thread or
+ * complete_some_read()'s thread owned the closure - and whatever state it was
+ * associated with!
+ *
+ * So, closure_init() initializes a closure's refcount to 1 - and when a
+ * closure_fn is run, the refcount will be reset to 1 first.
+ *
+ * Then, the rule is - if you got the refcount with closure_get(), release it
+ * with closure_put() (i.e, in a bio->bi_endio function). If you have a refcount
+ * on a closure because you called closure_init() or you were run out of a
+ * closure - _always_ use continue_at(). Doing so consistently will help
+ * eliminate an entire class of particularly pernicious races.
+ *
+ * Lastly, you might have a wait list dedicated to a specific event, and have no
+ * need for specifying the condition - you just want to wait until someone runs
+ * closure_wake_up() on the appropriate wait list. In that case, just use
+ * closure_wait(). It will return either true or false, depending on whether the
+ * closure was already on a wait list or not - a closure can only be on one wait
+ * list at a time.
+ *
+ * Parents:
+ *
+ * closure_init() takes two arguments - it takes the closure to initialize, and
+ * a (possibly null) parent.
+ *
+ * If parent is non null, the new closure will have a refcount for its lifetime;
+ * a closure is considered to be "finished" when its refcount hits 0 and the
+ * function to run is null. Hence
+ *
+ * continue_at(cl, NULL, NULL);
+ *
+ * returns up the (spaghetti) stack of closures, precisely like normal return
+ * returns up the C stack. continue_at() with non null fn is better thought of
+ * as doing a tail call.
+ *
+ * All this implies that a closure should typically be embedded in a particular
+ * struct (which its refcount will normally control the lifetime of), and that
+ * struct can very much be thought of as a stack frame.
+ */
+
+struct closure;
+struct closure_syncer;
+typedef void (closure_fn) (struct closure *);
+extern struct dentry *bcache_debug;
+
+struct closure_waitlist {
+ struct llist_head list;
+};
+
+enum closure_state {
+ /*
+ * CLOSURE_WAITING: Set iff the closure is on a waitlist. Must be set by
+ * the thread that owns the closure, and cleared by the thread that's
+ * waking up the closure.
+ *
+ * The rest are for debugging and don't affect behaviour:
+ *
+ * CLOSURE_RUNNING: Set when a closure is running (i.e. by
+ * closure_init() and when closure_put() runs then next function), and
+ * must be cleared before remaining hits 0. Primarily to help guard
+ * against incorrect usage and accidentally transferring references.
+ * continue_at() and closure_return() clear it for you, if you're doing
+ * something unusual you can use closure_set_dead() which also helps
+ * annotate where references are being transferred.
+ */
+
+ CLOSURE_BITS_START = (1U << 26),
+ CLOSURE_DESTRUCTOR = (1U << 26),
+ CLOSURE_WAITING = (1U << 28),
+ CLOSURE_RUNNING = (1U << 30),
+};
+
+#define CLOSURE_GUARD_MASK \
+ ((CLOSURE_DESTRUCTOR|CLOSURE_WAITING|CLOSURE_RUNNING) << 1)
+
+#define CLOSURE_REMAINING_MASK (CLOSURE_BITS_START - 1)
+#define CLOSURE_REMAINING_INITIALIZER (1|CLOSURE_RUNNING)
+
+struct closure {
+ union {
+ struct {
+ struct workqueue_struct *wq;
+ struct closure_syncer *s;
+ struct llist_node list;
+ closure_fn *fn;
+ };
+ struct work_struct work;
+ };
+
+ struct closure *parent;
+
+ atomic_t remaining;
+
+#ifdef CONFIG_DEBUG_CLOSURES
+#define CLOSURE_MAGIC_DEAD 0xc054dead
+#define CLOSURE_MAGIC_ALIVE 0xc054a11e
+
+ unsigned int magic;
+ struct list_head all;
+ unsigned long ip;
+ unsigned long waiting_on;
+#endif
+};
+
+void closure_sub(struct closure *cl, int v);
+void closure_put(struct closure *cl);
+void __closure_wake_up(struct closure_waitlist *list);
+bool closure_wait(struct closure_waitlist *list, struct closure *cl);
+void __closure_sync(struct closure *cl);
+
+static inline unsigned closure_nr_remaining(struct closure *cl)
+{
+ return atomic_read(&cl->remaining) & CLOSURE_REMAINING_MASK;
+}
+
+/**
+ * closure_sync - sleep until a closure a closure has nothing left to wait on
+ *
+ * Sleeps until the refcount hits 1 - the thread that's running the closure owns
+ * the last refcount.
+ */
+static inline void closure_sync(struct closure *cl)
+{
+ if (closure_nr_remaining(cl) != 1)
+ __closure_sync(cl);
+}
+
+#ifdef CONFIG_DEBUG_CLOSURES
+
+void closure_debug_create(struct closure *cl);
+void closure_debug_destroy(struct closure *cl);
+
+#else
+
+static inline void closure_debug_create(struct closure *cl) {}
+static inline void closure_debug_destroy(struct closure *cl) {}
+
+#endif
+
+static inline void closure_set_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _THIS_IP_;
+#endif
+}
+
+static inline void closure_set_ret_ip(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->ip = _RET_IP_;
+#endif
+}
+
+static inline void closure_set_waiting(struct closure *cl, unsigned long f)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ cl->waiting_on = f;
+#endif
+}
+
+static inline void closure_set_stopped(struct closure *cl)
+{
+ atomic_sub(CLOSURE_RUNNING, &cl->remaining);
+}
+
+static inline void set_closure_fn(struct closure *cl, closure_fn *fn,
+ struct workqueue_struct *wq)
+{
+ closure_set_ip(cl);
+ cl->fn = fn;
+ cl->wq = wq;
+ /* between atomic_dec() in closure_put() */
+ smp_mb__before_atomic();
+}
+
+static inline void closure_queue(struct closure *cl)
+{
+ struct workqueue_struct *wq = cl->wq;
+ /**
+ * Changes made to closure, work_struct, or a couple of other structs
+ * may cause work.func not pointing to the right location.
+ */
+ BUILD_BUG_ON(offsetof(struct closure, fn)
+ != offsetof(struct work_struct, func));
+
+ if (wq) {
+ INIT_WORK(&cl->work, cl->work.func);
+ BUG_ON(!queue_work(wq, &cl->work));
+ } else
+ cl->fn(cl);
+}
+
+/**
+ * closure_get - increment a closure's refcount
+ */
+static inline void closure_get(struct closure *cl)
+{
+#ifdef CONFIG_DEBUG_CLOSURES
+ BUG_ON((atomic_inc_return(&cl->remaining) &
+ CLOSURE_REMAINING_MASK) <= 1);
+#else
+ atomic_inc(&cl->remaining);
+#endif
+}
+
+/**
+ * closure_init - Initialize a closure, setting the refcount to 1
+ * @cl: closure to initialize
+ * @parent: parent of the new closure. cl will take a refcount on it for its
+ * lifetime; may be NULL.
+ */
+static inline void closure_init(struct closure *cl, struct closure *parent)
+{
+ cl->fn = NULL;
+ cl->parent = parent;
+ if (parent)
+ closure_get(parent);
+
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+
+ closure_debug_create(cl);
+ closure_set_ip(cl);
+}
+
+static inline void closure_init_stack(struct closure *cl)
+{
+ memset(cl, 0, sizeof(struct closure));
+ atomic_set(&cl->remaining, CLOSURE_REMAINING_INITIALIZER);
+}
+
+/**
+ * closure_wake_up - wake up all closures on a wait list,
+ * with memory barrier
+ */
+static inline void closure_wake_up(struct closure_waitlist *list)
+{
+ /* Memory barrier for the wait list */
+ smp_mb();
+ __closure_wake_up(list);
+}
+
+/**
+ * continue_at - jump to another function with barrier
+ *
+ * After @cl is no longer waiting on anything (i.e. all outstanding refs have
+ * been dropped with closure_put()), it will resume execution at @fn running out
+ * of @wq (or, if @wq is NULL, @fn will be called by closure_put() directly).
+ *
+ * This is because after calling continue_at() you no longer have a ref on @cl,
+ * and whatever @cl owns may be freed out from under you - a running closure fn
+ * has a ref on its own closure which continue_at() drops.
+ *
+ * Note you are expected to immediately return after using this macro.
+ */
+#define continue_at(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_sub(_cl, CLOSURE_RUNNING + 1); \
+} while (0)
+
+/**
+ * closure_return - finish execution of a closure
+ *
+ * This is used to indicate that @cl is finished: when all outstanding refs on
+ * @cl have been dropped @cl's ref on its parent closure (as passed to
+ * closure_init()) will be dropped, if one was specified - thus this can be
+ * thought of as returning to the parent closure.
+ */
+#define closure_return(_cl) continue_at((_cl), NULL, NULL)
+
+/**
+ * continue_at_nobarrier - jump to another function without barrier
+ *
+ * Causes @fn to be executed out of @cl, in @wq context (or called directly if
+ * @wq is NULL).
+ *
+ * The ref the caller of continue_at_nobarrier() had on @cl is now owned by @fn,
+ * thus it's not safe to touch anything protected by @cl after a
+ * continue_at_nobarrier().
+ */
+#define continue_at_nobarrier(_cl, _fn, _wq) \
+do { \
+ set_closure_fn(_cl, _fn, _wq); \
+ closure_queue(_cl); \
+} while (0)
+
+/**
+ * closure_return_with_destructor - finish execution of a closure,
+ * with destructor
+ *
+ * Works like closure_return(), except @destructor will be called when all
+ * outstanding refs on @cl have been dropped; @destructor may be used to safely
+ * free the memory occupied by @cl, and it is called with the ref on the parent
+ * closure still held - so @destructor could safely return an item to a
+ * freelist protected by @cl's parent.
+ */
+#define closure_return_with_destructor(_cl, _destructor) \
+do { \
+ set_closure_fn(_cl, _destructor, NULL); \
+ closure_sub(_cl, CLOSURE_RUNNING - CLOSURE_DESTRUCTOR + 1); \
+} while (0)
+
+/**
+ * closure_call - execute @fn out of a new, uninitialized closure
+ *
+ * Typically used when running out of one closure, and we want to run @fn
+ * asynchronously out of a new closure - @parent will then wait for @cl to
+ * finish.
+ */
+static inline void closure_call(struct closure *cl, closure_fn fn,
+ struct workqueue_struct *wq,
+ struct closure *parent)
+{
+ closure_init(cl, parent);
+ continue_at_nobarrier(cl, fn, wq);
+}
+
+#define __closure_wait_event(waitlist, _cond) \
+do { \
+ struct closure cl; \
+ \
+ closure_init_stack(&cl); \
+ \
+ while (1) { \
+ closure_wait(waitlist, &cl); \
+ if (_cond) \
+ break; \
+ closure_sync(&cl); \
+ } \
+ closure_wake_up(waitlist); \
+ closure_sync(&cl); \
+} while (0)
+
+#define closure_wait_event(waitlist, _cond) \
+do { \
+ if (!(_cond)) \
+ __closure_wait_event(waitlist, _cond); \
+} while (0)
+
+#endif /* _LINUX_CLOSURE_H */
diff --git a/include/linux/comedi/comedi_8254.h b/include/linux/comedi/comedi_8254.h
index d8264417e53c..d527f04400df 100644
--- a/include/linux/comedi/comedi_8254.h
+++ b/include/linux/comedi/comedi_8254.h
@@ -12,6 +12,8 @@
#define _COMEDI_8254_H
#include <linux/types.h>
+#include <linux/errno.h>
+#include <linux/err.h>
struct comedi_device;
struct comedi_insn;
@@ -57,10 +59,24 @@ struct comedi_subdevice;
/* counter maps zero to 0x10000 */
#define I8254_MAX_COUNT 0x10000
+struct comedi_8254;
+
+/**
+ * typedef comedi_8254_iocb_fn - call-back function type for 8254 register access
+ * @i8254: pointer to struct comedi_8254
+ * @dir: direction (0 = read, 1 = write)
+ * @reg: register number
+ * @val: value to write
+ *
+ * Return: Register value when reading, 0 when writing.
+ */
+typedef unsigned int comedi_8254_iocb_fn(struct comedi_8254 *i8254, int dir,
+ unsigned int reg, unsigned int val);
+
/**
* struct comedi_8254 - private data used by this module
- * @iobase: PIO base address of the registers (in/out)
- * @mmio: MMIO base address of the registers (read/write)
+ * @iocb: I/O call-back function for register access
+ * @context: context for register access (e.g. a base address)
* @iosize: I/O size used to access the registers (b/w/l)
* @regshift: register gap shift
* @osc_base: cascaded oscillator speed in ns
@@ -76,8 +92,8 @@ struct comedi_subdevice;
* @insn_config: driver specific (*insn_config) callback
*/
struct comedi_8254 {
- unsigned long iobase;
- void __iomem *mmio;
+ comedi_8254_iocb_fn *iocb;
+ unsigned long context;
unsigned int iosize;
unsigned int regshift;
unsigned int osc_base;
@@ -122,13 +138,24 @@ void comedi_8254_set_busy(struct comedi_8254 *i8254,
void comedi_8254_subdevice_init(struct comedi_subdevice *s,
struct comedi_8254 *i8254);
-struct comedi_8254 *comedi_8254_init(unsigned long iobase,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
-struct comedi_8254 *comedi_8254_mm_init(void __iomem *mmio,
- unsigned int osc_base,
- unsigned int iosize,
- unsigned int regshift);
+#ifdef CONFIG_HAS_IOPORT
+struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
+#else
+static inline struct comedi_8254 *comedi_8254_io_alloc(unsigned long iobase,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift)
+{
+ return ERR_PTR(-ENXIO);
+}
+#endif
+
+struct comedi_8254 *comedi_8254_mm_alloc(void __iomem *mmio,
+ unsigned int osc_base,
+ unsigned int iosize,
+ unsigned int regshift);
#endif /* _COMEDI_8254_H */
diff --git a/include/linux/comedi/comedi_8255.h b/include/linux/comedi/comedi_8255.h
index b2a5bc6b3a49..d24a69da389b 100644
--- a/include/linux/comedi/comedi_8255.h
+++ b/include/linux/comedi/comedi_8255.h
@@ -10,6 +10,8 @@
#ifndef _COMEDI_8255_H
#define _COMEDI_8255_H
+#include <linux/errno.h>
+
#define I8255_SIZE 0x04
#define I8255_DATA_A_REG 0x00
@@ -27,16 +29,26 @@
struct comedi_device;
struct comedi_subdevice;
-int subdev_8255_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
- unsigned long regbase);
+#ifdef CONFIG_HAS_IOPORT
+int subdev_8255_io_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ unsigned long regbase);
+#else
+static inline int subdev_8255_io_init(struct comedi_device *dev,
+ struct comedi_subdevice *s,
+ unsigned long regbase)
+{
+ return -ENXIO;
+}
+#endif
int subdev_8255_mm_init(struct comedi_device *dev, struct comedi_subdevice *s,
- int (*io)(struct comedi_device *dev, int dir, int port,
- int data, unsigned long regbase),
unsigned long regbase);
+int subdev_8255_cb_init(struct comedi_device *dev, struct comedi_subdevice *s,
+ int (*io)(struct comedi_device *dev, int dir, int port,
+ int data, unsigned long context),
+ unsigned long context);
+
unsigned long subdev_8255_regbase(struct comedi_subdevice *s);
#endif
diff --git a/include/linux/comedi/comedidev.h b/include/linux/comedi/comedidev.h
index 0a1150900ef3..c08416a7364b 100644
--- a/include/linux/comedi/comedidev.h
+++ b/include/linux/comedi/comedidev.h
@@ -633,7 +633,7 @@ extern const struct comedi_lrange range_unknown;
*/
struct comedi_lrange {
int length;
- struct comedi_krange range[];
+ struct comedi_krange range[] __counted_by(length);
};
/**
diff --git a/include/linux/compat.h b/include/linux/compat.h
index 1cfa4f0f490a..233f61ec8afc 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -581,7 +581,6 @@ asmlinkage long compat_sys_io_pgetevents_time64(compat_aio_context_t ctx_id,
struct io_event __user *events,
struct __kernel_timespec __user *timeout,
const struct __compat_aio_sigset __user *usig);
-asmlinkage long compat_sys_lookup_dcookie(u32, u32, char __user *, compat_size_t);
asmlinkage long compat_sys_epoll_pwait(int epfd,
struct epoll_event __user *events,
int maxevents, int timeout,
diff --git a/include/linux/compiler-clang.h b/include/linux/compiler-clang.h
index 9b673fefcef8..ddab1ef22bee 100644
--- a/include/linux/compiler-clang.h
+++ b/include/linux/compiler-clang.h
@@ -14,11 +14,6 @@
#undef __cleanup
#define __cleanup(func) __maybe_unused __attribute__((__cleanup__(func)))
-/* same as gcc, this was present in clang-2.6 so we can assume it works
- * with any version that can compile the kernel
- */
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
/* all clang versions usable with the kernel support KASAN ABI version 5 */
#define KASAN_ABI_VERSION 5
diff --git a/include/linux/compiler-gcc.h b/include/linux/compiler-gcc.h
index 7af9e34ec261..2ceba3fe4ec1 100644
--- a/include/linux/compiler-gcc.h
+++ b/include/linux/compiler-gcc.h
@@ -39,8 +39,6 @@
#define __noretpoline __attribute__((__indirect_branch__("keep")))
#endif
-#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
-
#if defined(LATENT_ENTROPY_PLUGIN) && !defined(__CHECKER__)
#define __latent_entropy __attribute__((latent_entropy))
#endif
diff --git a/include/linux/compiler.h b/include/linux/compiler.h
index d7779a18b24f..bb1339c7057b 100644
--- a/include/linux/compiler.h
+++ b/include/linux/compiler.h
@@ -177,10 +177,7 @@ void ftrace_likely_update(struct ftrace_likely_data *f, int val,
__asm__ ("" : "=r" (var) : "0" (var))
#endif
-/* Not-quite-unique ID. */
-#ifndef __UNIQUE_ID
-# define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __LINE__)
-#endif
+#define __UNIQUE_ID(prefix) __PASTE(__PASTE(__UNIQUE_ID_, prefix), __COUNTER__)
/**
* data_race - mark an expression as containing intentional data races
@@ -231,6 +228,14 @@ static inline void *offset_to_ptr(const int *off)
#define __must_be_array(a) BUILD_BUG_ON_ZERO(__same_type((a), &(a)[0]))
/*
+ * This returns a constant expression while determining if an argument is
+ * a constant expression, most importantly without evaluating the argument.
+ * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
+ */
+#define __is_constexpr(x) \
+ (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
+
+/*
* Whether 'type' is a signed type or an unsigned type. Supports scalar types,
* bool and also pointer types.
*/
diff --git a/include/linux/compiler_types.h b/include/linux/compiler_types.h
index c523c6683789..6f1ca49306d2 100644
--- a/include/linux/compiler_types.h
+++ b/include/linux/compiler_types.h
@@ -2,6 +2,15 @@
#ifndef __LINUX_COMPILER_TYPES_H
#define __LINUX_COMPILER_TYPES_H
+/*
+ * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
+ * In the meantime, to support gcc < 10, we implement __has_builtin
+ * by hand.
+ */
+#ifndef __has_builtin
+#define __has_builtin(x) (0)
+#endif
+
#ifndef __ASSEMBLY__
/*
@@ -134,17 +143,6 @@ static inline void __chk_io_ptr(const volatile void __iomem *ptr) { }
# define __preserve_most
#endif
-/* Builtins */
-
-/*
- * __has_builtin is supported on gcc >= 10, clang >= 3 and icc >= 21.
- * In the meantime, to support gcc < 10, we implement __has_builtin
- * by hand.
- */
-#ifndef __has_builtin
-#define __has_builtin(x) (0)
-#endif
-
/* Compiler specific macros. */
#ifdef __clang__
#include <linux/compiler-clang.h>
@@ -352,6 +350,18 @@ struct ftrace_likely_data {
# define __realloc_size(x, ...)
#endif
+/*
+ * When the size of an allocated object is needed, use the best available
+ * mechanism to find it. (For cases where sizeof() cannot be used.)
+ */
+#if __has_builtin(__builtin_dynamic_object_size)
+#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
+#define __member_size(p) __builtin_dynamic_object_size(p, 1)
+#else
+#define __struct_size(p) __builtin_object_size(p, 0)
+#define __member_size(p) __builtin_object_size(p, 1)
+#endif
+
#ifndef asm_volatile_goto
#define asm_volatile_goto(x...) asm goto(x)
#endif
diff --git a/include/linux/console.h b/include/linux/console.h
index 7de11c763eb3..779d388af8a0 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -101,6 +101,13 @@ extern const struct consw dummy_con; /* dummy console buffer */
extern const struct consw vga_con; /* VGA text console */
extern const struct consw newport_con; /* SGI Newport console */
+struct screen_info;
+#ifdef CONFIG_VGA_CONSOLE
+void vgacon_register_screen(struct screen_info *si);
+#else
+static inline void vgacon_register_screen(struct screen_info *si) { }
+#endif
+
int con_is_bound(const struct consw *csw);
int do_unregister_con_driver(const struct consw *csw);
int do_take_over_console(const struct consw *sw, int first, int last, int deflt);
@@ -156,6 +163,8 @@ static inline int con_debug_leave(void)
* /dev/kmesg which requires a larger output buffer.
* @CON_SUSPENDED: Indicates if a console is suspended. If true, the
* printing callbacks must not be called.
+ * @CON_NBCON: Console can operate outside of the legacy style console_lock
+ * constraints.
*/
enum cons_flags {
CON_PRINTBUFFER = BIT(0),
@@ -166,6 +175,111 @@ enum cons_flags {
CON_BRL = BIT(5),
CON_EXTENDED = BIT(6),
CON_SUSPENDED = BIT(7),
+ CON_NBCON = BIT(8),
+};
+
+/**
+ * struct nbcon_state - console state for nbcon consoles
+ * @atom: Compound of the state fields for atomic operations
+ *
+ * @req_prio: The priority of a handover request
+ * @prio: The priority of the current owner
+ * @unsafe: Console is busy in a non takeover region
+ * @unsafe_takeover: A hostile takeover in an unsafe state happened in the
+ * past. The console cannot be safe until re-initialized.
+ * @cpu: The CPU on which the owner runs
+ *
+ * To be used for reading and preparing of the value stored in the nbcon
+ * state variable @console::nbcon_state.
+ *
+ * The @prio and @req_prio fields are particularly important to allow
+ * spin-waiting to timeout and give up without the risk of a waiter being
+ * assigned the lock after giving up.
+ */
+struct nbcon_state {
+ union {
+ unsigned int atom;
+ struct {
+ unsigned int prio : 2;
+ unsigned int req_prio : 2;
+ unsigned int unsafe : 1;
+ unsigned int unsafe_takeover : 1;
+ unsigned int cpu : 24;
+ };
+ };
+};
+
+/*
+ * The nbcon_state struct is used to easily create and interpret values that
+ * are stored in the @console::nbcon_state variable. Ensure this struct stays
+ * within the size boundaries of the atomic variable's underlying type in
+ * order to avoid any accidental truncation.
+ */
+static_assert(sizeof(struct nbcon_state) <= sizeof(int));
+
+/**
+ * nbcon_prio - console owner priority for nbcon consoles
+ * @NBCON_PRIO_NONE: Unused
+ * @NBCON_PRIO_NORMAL: Normal (non-emergency) usage
+ * @NBCON_PRIO_EMERGENCY: Emergency output (WARN/OOPS...)
+ * @NBCON_PRIO_PANIC: Panic output
+ * @NBCON_PRIO_MAX: The number of priority levels
+ *
+ * A higher priority context can takeover the console when it is
+ * in the safe state. The final attempt to flush consoles in panic()
+ * can be allowed to do so even in an unsafe state (Hope and pray).
+ */
+enum nbcon_prio {
+ NBCON_PRIO_NONE = 0,
+ NBCON_PRIO_NORMAL,
+ NBCON_PRIO_EMERGENCY,
+ NBCON_PRIO_PANIC,
+ NBCON_PRIO_MAX,
+};
+
+struct console;
+struct printk_buffers;
+
+/**
+ * struct nbcon_context - Context for console acquire/release
+ * @console: The associated console
+ * @spinwait_max_us: Limit for spin-wait acquire
+ * @prio: Priority of the context
+ * @allow_unsafe_takeover: Allow performing takeover even if unsafe. Can
+ * be used only with NBCON_PRIO_PANIC @prio. It
+ * might cause a system freeze when the console
+ * is used later.
+ * @backlog: Ringbuffer has pending records
+ * @pbufs: Pointer to the text buffer for this context
+ * @seq: The sequence number to print for this context
+ */
+struct nbcon_context {
+ /* members set by caller */
+ struct console *console;
+ unsigned int spinwait_max_us;
+ enum nbcon_prio prio;
+ unsigned int allow_unsafe_takeover : 1;
+
+ /* members set by emit */
+ unsigned int backlog : 1;
+
+ /* members set by acquire */
+ struct printk_buffers *pbufs;
+ u64 seq;
+};
+
+/**
+ * struct nbcon_write_context - Context handed to the nbcon write callbacks
+ * @ctxt: The core console context
+ * @outbuf: Pointer to the text buffer for output
+ * @len: Length to write
+ * @unsafe_takeover: If a hostile takeover in an unsafe state has occurred
+ */
+struct nbcon_write_context {
+ struct nbcon_context __private ctxt;
+ char *outbuf;
+ unsigned int len;
+ bool unsafe_takeover;
};
/**
@@ -187,6 +301,11 @@ enum cons_flags {
* @dropped: Number of unreported dropped ringbuffer records
* @data: Driver private data
* @node: hlist node for the console list
+ *
+ * @write_atomic: Write callback for atomic context
+ * @nbcon_state: State for nbcon consoles
+ * @nbcon_seq: Sequence number of the next record for nbcon to print
+ * @pbufs: Pointer to nbcon private buffer
*/
struct console {
char name[16];
@@ -206,6 +325,13 @@ struct console {
unsigned long dropped;
void *data;
struct hlist_node node;
+
+ /* nbcon console specific members */
+ bool (*write_atomic)(struct console *con,
+ struct nbcon_write_context *wctxt);
+ atomic_t __private nbcon_state;
+ atomic_long_t __private nbcon_seq;
+ struct printk_buffers *pbufs;
};
#ifdef CONFIG_LOCKDEP
@@ -332,6 +458,16 @@ static inline bool console_is_registered(const struct console *con)
lockdep_assert_console_list_lock_held(); \
hlist_for_each_entry(con, &console_list, node)
+#ifdef CONFIG_PRINTK
+extern bool nbcon_can_proceed(struct nbcon_write_context *wctxt);
+extern bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt);
+extern bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt);
+#else
+static inline bool nbcon_can_proceed(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_enter_unsafe(struct nbcon_write_context *wctxt) { return false; }
+static inline bool nbcon_exit_unsafe(struct nbcon_write_context *wctxt) { return false; }
+#endif
+
extern int console_set_on_cmdline;
extern struct console *early_console;
@@ -340,7 +476,7 @@ enum con_flush_mode {
CONSOLE_REPLAY_ALL,
};
-extern int add_preferred_console(char *name, int idx, char *options);
+extern int add_preferred_console(const char *name, const short idx, char *options);
extern void console_force_preferred_locked(struct console *con);
extern void register_console(struct console *);
extern int unregister_console(struct console *);
diff --git a/include/linux/const.h b/include/linux/const.h
index 435ddd72d2c4..81b8aae5a855 100644
--- a/include/linux/const.h
+++ b/include/linux/const.h
@@ -3,12 +3,4 @@
#include <vdso/const.h>
-/*
- * This returns a constant expression while determining if an argument is
- * a constant expression, most importantly without evaluating the argument.
- * Glory to Martin Uecker <Martin.Uecker@med.uni-goettingen.de>
- */
-#define __is_constexpr(x) \
- (sizeof(int) == sizeof(*(8 ? ((void *)((long)(x) * 0l)) : (int *)8)))
-
#endif /* _LINUX_CONST_H */
diff --git a/include/linux/cpu.h b/include/linux/cpu.h
index 0abd60a7987b..fc8094419084 100644
--- a/include/linux/cpu.h
+++ b/include/linux/cpu.h
@@ -80,6 +80,8 @@ extern __printf(4, 5)
struct device *cpu_device_create(struct device *parent, void *drvdata,
const struct attribute_group **groups,
const char *fmt, ...);
+extern int arch_register_cpu(int cpu);
+extern void arch_unregister_cpu(int cpu);
#ifdef CONFIG_HOTPLUG_CPU
extern void unregister_cpu(struct cpu *cpu);
extern ssize_t arch_cpu_probe(const char *, size_t);
@@ -153,6 +155,8 @@ static inline int remove_cpu(unsigned int cpu) { return -EPERM; }
static inline void smp_shutdown_nonboot_cpus(unsigned int primary_cpu) { }
#endif /* !CONFIG_HOTPLUG_CPU */
+DEFINE_LOCK_GUARD_0(cpus_read_lock, cpus_read_lock(), cpus_read_unlock())
+
#ifdef CONFIG_PM_SLEEP_SMP
extern int freeze_secondary_cpus(int primary);
extern void thaw_secondary_cpus(void);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 71d186d6933a..1c5ca92a0555 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -1193,14 +1193,6 @@ static inline int of_perf_domain_get_sharing_cpumask(int pcpu, const char *list_
}
#endif
-#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
-void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov);
-#else
-static inline void sched_cpufreq_governor_change(struct cpufreq_policy *policy,
- struct cpufreq_governor *old_gov) { }
-#endif
-
extern unsigned int arch_freq_get_on_cpu(int cpu);
#ifndef arch_set_freq_scale
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 06dda85f0424..d305db70674b 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -90,7 +90,6 @@ enum cpuhp_state {
CPUHP_FS_BUFF_DEAD,
CPUHP_PRINTK_DEAD,
CPUHP_MM_MEMCQ_DEAD,
- CPUHP_XFS_DEAD,
CPUHP_PERCPU_CNT_DEAD,
CPUHP_RADIX_DEAD,
CPUHP_PAGE_ALLOC,
@@ -173,6 +172,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_EXYNOS4_MCT_TIMER_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
+ CPUHP_AP_ARM_ARCH_TIMER_EVTSTRM_STARTING,
CPUHP_AP_ARM_GLOBAL_TIMER_STARTING,
CPUHP_AP_JCORE_TIMER_STARTING,
CPUHP_AP_ARM_TWD_STARTING,
@@ -190,6 +190,7 @@ enum cpuhp_state {
/* Must be the last timer callback */
CPUHP_AP_DUMMY_TIMER_STARTING,
CPUHP_AP_ARM_XEN_STARTING,
+ CPUHP_AP_ARM_XEN_RUNSTATE_STARTING,
CPUHP_AP_ARM_CORESIGHT_STARTING,
CPUHP_AP_ARM_CORESIGHT_CTI_STARTING,
CPUHP_AP_ARM64_ISNDEP_STARTING,
@@ -205,7 +206,6 @@ enum cpuhp_state {
CPUHP_AP_KVM_ONLINE,
CPUHP_AP_SCHED_WAIT_EMPTY,
CPUHP_AP_SMPBOOT_THREADS,
- CPUHP_AP_X86_VDSO_VMA_ONLINE,
CPUHP_AP_IRQ_AFFINITY_ONLINE,
CPUHP_AP_BLK_MQ_ONLINE,
CPUHP_AP_ARM_MVEBU_SYNC_CLOCKS,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index f10fb87d49db..cfb545841a2c 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -4,7 +4,7 @@
/*
* Cpumasks provide a bitmap suitable for representing the
- * set of CPU's in a system, one bit position per CPU number. In general,
+ * set of CPUs in a system, one bit position per CPU number. In general,
* only nr_cpu_ids (<= NR_CPUS) bits are valid.
*/
#include <linux/kernel.h>
@@ -97,7 +97,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
*
* If !CONFIG_HOTPLUG_CPU, present == possible, and active == online.
*
- * The cpu_possible_mask is fixed at boot time, as the set of CPU id's
+ * The cpu_possible_mask is fixed at boot time, as the set of CPU IDs
* that it is possible might ever be plugged in at anytime during the
* life of that system boot. The cpu_present_mask is dynamic(*),
* representing which CPUs are currently plugged in. And
@@ -112,7 +112,7 @@ static inline void set_nr_cpu_ids(unsigned int nr)
* hotplug, it's a copy of cpu_possible_mask, hence fixed at boot.
*
* Subtleties:
- * 1) UP arch's (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
+ * 1) UP ARCHes (NR_CPUS == 1, CONFIG_SMP not defined) hardcode
* assumption that their single CPU is online. The UP
* cpu_{online,possible,present}_masks are placebos. Changing them
* will have no useful affect on the following num_*_cpus()
@@ -155,7 +155,7 @@ static __always_inline unsigned int cpumask_check(unsigned int cpu)
* cpumask_first - get the first cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline unsigned int cpumask_first(const struct cpumask *srcp)
{
@@ -166,7 +166,7 @@ static inline unsigned int cpumask_first(const struct cpumask *srcp)
* cpumask_first_zero - get the first unset cpu in a cpumask
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if all cpus are set.
+ * Return: >= nr_cpu_ids if all cpus are set.
*/
static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
{
@@ -178,7 +178,7 @@ static inline unsigned int cpumask_first_zero(const struct cpumask *srcp)
* @srcp1: the first input
* @srcp2: the second input
*
- * Returns >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
+ * Return: >= nr_cpu_ids if no cpus set in both. See also cpumask_next_and().
*/
static inline
unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask *srcp2)
@@ -190,7 +190,7 @@ unsigned int cpumask_first_and(const struct cpumask *srcp1, const struct cpumask
* cpumask_last - get the last CPU in a cpumask
* @srcp: - the cpumask pointer
*
- * Returns >= nr_cpumask_bits if no CPUs set.
+ * Return: >= nr_cpumask_bits if no CPUs set.
*/
static inline unsigned int cpumask_last(const struct cpumask *srcp)
{
@@ -199,10 +199,10 @@ static inline unsigned int cpumask_last(const struct cpumask *srcp)
/**
* cpumask_next - get the next cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set.
+ * Return: >= nr_cpu_ids if no further cpus set.
*/
static inline
unsigned int cpumask_next(int n, const struct cpumask *srcp)
@@ -215,10 +215,10 @@ unsigned int cpumask_next(int n, const struct cpumask *srcp)
/**
* cpumask_next_zero - get the next unset cpu in a cpumask
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @srcp: the cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus unset.
+ * Return: >= nr_cpu_ids if no further cpus unset.
*/
static inline unsigned int cpumask_next_zero(int n, const struct cpumask *srcp)
{
@@ -254,11 +254,11 @@ unsigned int cpumask_any_distribute(const struct cpumask *srcp);
/**
* cpumask_next_and - get the next cpu in *src1p & *src2p
- * @n: the cpu prior to the place to search (ie. return will be > @n)
+ * @n: the cpu prior to the place to search (i.e. return will be > @n)
* @src1p: the first cpumask pointer
* @src2p: the second cpumask pointer
*
- * Returns >= nr_cpu_ids if no further cpus set in both.
+ * Return: >= nr_cpu_ids if no further cpus set in both.
*/
static inline
unsigned int cpumask_next_and(int n, const struct cpumask *src1p,
@@ -373,7 +373,7 @@ unsigned int __pure cpumask_next_wrap(int n, const struct cpumask *mask, int sta
* @cpu: the cpu to ignore.
*
* Often used to find any cpu but smp_processor_id() in a mask.
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
static inline
unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
@@ -388,11 +388,11 @@ unsigned int cpumask_any_but(const struct cpumask *mask, unsigned int cpu)
}
/**
- * cpumask_nth - get the first cpu in a cpumask
+ * cpumask_nth - get the Nth cpu in a cpumask
* @srcp: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *srcp)
{
@@ -400,12 +400,12 @@ static inline unsigned int cpumask_nth(unsigned int cpu, const struct cpumask *s
}
/**
- * cpumask_nth_and - get the first cpu in 2 cpumasks
+ * cpumask_nth_and - get the Nth cpu in 2 cpumasks
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
@@ -416,12 +416,12 @@ unsigned int cpumask_nth_and(unsigned int cpu, const struct cpumask *srcp1,
}
/**
- * cpumask_nth_andnot - get the first cpu set in 1st cpumask, and clear in 2nd.
+ * cpumask_nth_andnot - get the Nth cpu set in 1st cpumask, and clear in 2nd.
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static inline
unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -436,9 +436,9 @@ unsigned int cpumask_nth_andnot(unsigned int cpu, const struct cpumask *srcp1,
* @srcp1: the cpumask pointer
* @srcp2: the cpumask pointer
* @srcp3: the cpumask pointer
- * @cpu: the N'th cpu to find, starting from 0
+ * @cpu: the Nth cpu to find, starting from 0
*
- * Returns >= nr_cpu_ids if such cpu doesn't exist.
+ * Return: >= nr_cpu_ids if such cpu doesn't exist.
*/
static __always_inline
unsigned int cpumask_nth_and_andnot(unsigned int cpu, const struct cpumask *srcp1,
@@ -497,7 +497,7 @@ static __always_inline void __cpumask_clear_cpu(int cpu, struct cpumask *dstp)
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in @cpumask, else returns false
+ * Return: true if @cpu is set in @cpumask, else returns false
*/
static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpumask)
{
@@ -509,9 +509,9 @@ static __always_inline bool cpumask_test_cpu(int cpu, const struct cpumask *cpum
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_set_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cpumask)
{
@@ -523,9 +523,9 @@ static __always_inline bool cpumask_test_and_set_cpu(int cpu, struct cpumask *cp
* @cpu: cpu number (< nr_cpu_ids)
* @cpumask: the cpumask pointer
*
- * Returns true if @cpu is set in old bitmap of @cpumask, else returns false
- *
* test_and_clear_bit wrapper for cpumasks.
+ *
+ * Return: true if @cpu is set in old bitmap of @cpumask, else returns false
*/
static __always_inline bool cpumask_test_and_clear_cpu(int cpu, struct cpumask *cpumask)
{
@@ -560,7 +560,7 @@ static inline void cpumask_clear(struct cpumask *dstp)
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_and(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -603,7 +603,7 @@ static inline void cpumask_xor(struct cpumask *dstp,
* @src1p: the first input
* @src2p: the second input
*
- * If *@dstp is empty, returns false, else returns true
+ * Return: false if *@dstp is empty, else returns true
*/
static inline bool cpumask_andnot(struct cpumask *dstp,
const struct cpumask *src1p,
@@ -617,6 +617,8 @@ static inline bool cpumask_andnot(struct cpumask *dstp,
* cpumask_equal - *src1p == *src2p
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if the cpumasks are equal, false if not
*/
static inline bool cpumask_equal(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -630,6 +632,9 @@ static inline bool cpumask_equal(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
* @src3p: the third input
+ *
+ * Return: true if first cpumask ORed with second cpumask == third cpumask,
+ * otherwise false
*/
static inline bool cpumask_or_equal(const struct cpumask *src1p,
const struct cpumask *src2p,
@@ -643,6 +648,9 @@ static inline bool cpumask_or_equal(const struct cpumask *src1p,
* cpumask_intersects - (*src1p & *src2p) != 0
* @src1p: the first input
* @src2p: the second input
+ *
+ * Return: true if first cpumask ANDed with second cpumask is non-empty,
+ * otherwise false
*/
static inline bool cpumask_intersects(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -656,7 +664,7 @@ static inline bool cpumask_intersects(const struct cpumask *src1p,
* @src1p: the first input
* @src2p: the second input
*
- * Returns true if *@src1p is a subset of *@src2p, else returns false
+ * Return: true if *@src1p is a subset of *@src2p, else returns false
*/
static inline bool cpumask_subset(const struct cpumask *src1p,
const struct cpumask *src2p)
@@ -668,6 +676,8 @@ static inline bool cpumask_subset(const struct cpumask *src1p,
/**
* cpumask_empty - *srcp == 0
* @srcp: the cpumask to that all cpus < nr_cpu_ids are clear.
+ *
+ * Return: true if srcp is empty (has no bits set), else false
*/
static inline bool cpumask_empty(const struct cpumask *srcp)
{
@@ -677,6 +687,8 @@ static inline bool cpumask_empty(const struct cpumask *srcp)
/**
* cpumask_full - *srcp == 0xFFFFFFFF...
* @srcp: the cpumask to that all cpus < nr_cpu_ids are set.
+ *
+ * Return: true if srcp is full (has all bits set), else false
*/
static inline bool cpumask_full(const struct cpumask *srcp)
{
@@ -686,6 +698,8 @@ static inline bool cpumask_full(const struct cpumask *srcp)
/**
* cpumask_weight - Count of bits in *srcp
* @srcp: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in *srcp
*/
static inline unsigned int cpumask_weight(const struct cpumask *srcp)
{
@@ -696,6 +710,8 @@ static inline unsigned int cpumask_weight(const struct cpumask *srcp)
* cpumask_weight_and - Count of bits in (*srcp1 & *srcp2)
* @srcp1: the cpumask to count bits (< nr_cpu_ids) in.
* @srcp2: the cpumask to count bits (< nr_cpu_ids) in.
+ *
+ * Return: count of bits set in both *srcp1 and *srcp2
*/
static inline unsigned int cpumask_weight_and(const struct cpumask *srcp1,
const struct cpumask *srcp2)
@@ -744,7 +760,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* cpumask_any - pick a "random" cpu from *srcp
* @srcp: the input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any(srcp) cpumask_first(srcp)
@@ -753,7 +769,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @mask1: the first input cpumask
* @mask2: the second input cpumask
*
- * Returns >= nr_cpu_ids if no cpus set.
+ * Return: >= nr_cpu_ids if no cpus set.
*/
#define cpumask_any_and(mask1, mask2) cpumask_first_and((mask1), (mask2))
@@ -769,7 +785,7 @@ static inline void cpumask_copy(struct cpumask *dstp,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -783,7 +799,7 @@ static inline int cpumask_parse_user(const char __user *buf, int len,
* @len: the length of the buffer
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parselist_user(const char __user *buf, int len,
struct cpumask *dstp)
@@ -797,7 +813,7 @@ static inline int cpumask_parselist_user(const char __user *buf, int len,
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
{
@@ -809,7 +825,7 @@ static inline int cpumask_parse(const char *buf, struct cpumask *dstp)
* @buf: the buffer to extract from
* @dstp: the cpumask to set.
*
- * Returns -errno, or 0 for success.
+ * Return: -errno, or 0 for success.
*/
static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
{
@@ -817,7 +833,9 @@ static inline int cpulist_parse(const char *buf, struct cpumask *dstp)
}
/**
- * cpumask_size - size to allocate for a 'struct cpumask' in bytes
+ * cpumask_size - calculate size to allocate for a 'struct cpumask' in bytes
+ *
+ * Return: size to allocate for a &struct cpumask in bytes
*/
static inline unsigned int cpumask_size(void)
{
@@ -831,7 +849,7 @@ static inline unsigned int cpumask_size(void)
* little more difficult, we typedef cpumask_var_t to an array or a
* pointer: doing &mask on an array is a noop, so it still works.
*
- * ie.
+ * i.e.
* cpumask_var_t tmpmask;
* if (!alloc_cpumask_var(&tmpmask, GFP_KERNEL))
* return -ENOMEM;
@@ -887,6 +905,8 @@ bool zalloc_cpumask_var_node(cpumask_var_t *mask, gfp_t flags, int node)
* a nop returning a constant 1 (in <linux/cpumask.h>).
*
* See alloc_cpumask_var_node.
+ *
+ * Return: %true if allocation succeeded, %false if not
*/
static inline
bool alloc_cpumask_var(cpumask_var_t *mask, gfp_t flags)
@@ -1025,7 +1045,7 @@ set_cpu_dying(unsigned int cpu, bool dying)
}
/**
- * to_cpumask - convert an NR_CPUS bitmap to a struct cpumask *
+ * to_cpumask - convert a NR_CPUS bitmap to a struct cpumask *
* @bitmap: the bitmap
*
* There are a few places where cpumask_var_t isn't appropriate and
@@ -1068,6 +1088,8 @@ static inline const struct cpumask *get_cpu_mask(unsigned int cpu)
* interface gives only a momentary snapshot and is not protected against
* concurrent CPU hotplug operations unless invoked from a cpuhp_lock held
* region.
+ *
+ * Return: momentary snapshot of the number of online CPUs
*/
static __always_inline unsigned int num_online_cpus(void)
{
@@ -1160,7 +1182,7 @@ static inline bool cpu_dying(unsigned int cpu)
* @mask: the cpumask to copy
* @buf: the buffer to copy into
*
- * Returns the length of the (null-terminated) @buf string, zero if
+ * Return: the length of the (null-terminated) @buf string, zero if
* nothing is copied.
*/
static inline ssize_t
@@ -1183,7 +1205,7 @@ cpumap_print_to_pagebuf(bool list, char *buf, const struct cpumask *mask)
* cpumask; Typically used by bin_attribute to export cpumask bitmask
* ABI.
*
- * Returns the length of how many bytes have been copied, excluding
+ * Return: the length of how many bytes have been copied, excluding
* terminating '\0'.
*/
static inline ssize_t
@@ -1204,6 +1226,9 @@ cpumap_print_bitmask_to_buf(char *buf, const struct cpumask *mask,
*
* Everything is same with the above cpumap_print_bitmask_to_buf()
* except the print format.
+ *
+ * Return: the length of how many bytes have been copied, excluding
+ * terminating '\0'.
*/
static inline ssize_t
cpumap_print_list_to_buf(char *buf, const struct cpumask *mask,
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
index 0c06561bf5ff..5126a4fecb44 100644
--- a/include/linux/crash_core.h
+++ b/include/linux/crash_core.h
@@ -5,6 +5,14 @@
#include <linux/linkage.h>
#include <linux/elfcore.h>
#include <linux/elf.h>
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#include <asm/crash_core.h>
+#endif
+
+/* Location of a reserved region to hold the crash kernel.
+ */
+extern struct resource crashk_res;
+extern struct resource crashk_low_res;
#define CRASH_CORE_NOTE_NAME "CORE"
#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
@@ -79,12 +87,43 @@ Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
void *data, size_t data_len);
void final_note(Elf_Word *buf);
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#endif
+
int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
+ unsigned long long *crash_size, unsigned long long *crash_base,
+ unsigned long long *low_size, bool *high);
+
+#ifdef CONFIG_ARCH_HAS_GENERIC_CRASHKERNEL_RESERVATION
+#ifndef DEFAULT_CRASH_KERNEL_LOW_SIZE
+#define DEFAULT_CRASH_KERNEL_LOW_SIZE (128UL << 20)
+#endif
+#ifndef CRASH_ALIGN
+#define CRASH_ALIGN SZ_2M
+#endif
+#ifndef CRASH_ADDR_LOW_MAX
+#define CRASH_ADDR_LOW_MAX SZ_4G
+#endif
+#ifndef CRASH_ADDR_HIGH_MAX
+#define CRASH_ADDR_HIGH_MAX memblock_end_of_DRAM()
+#endif
+
+void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high);
+#else
+static inline void __init reserve_crashkernel_generic(char *cmdline,
+ unsigned long long crash_size,
+ unsigned long long crash_base,
+ unsigned long long crash_low_size,
+ bool high)
+{}
+#endif
/* Alignment required for elf header segment */
#define ELF_CORE_HEADER_ALIGN 4096
@@ -92,7 +131,7 @@ int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
struct crash_mem {
unsigned int max_nr_ranges;
unsigned int nr_ranges;
- struct range ranges[];
+ struct range ranges[] __counted_by(max_nr_ranges);
};
extern int crash_exclude_mem_range(struct crash_mem *mem,
diff --git a/include/linux/crash_dump.h b/include/linux/crash_dump.h
index 0f3a656293b0..acc55626afdc 100644
--- a/include/linux/crash_dump.h
+++ b/include/linux/crash_dump.h
@@ -50,6 +50,7 @@ void vmcore_cleanup(void);
#define vmcore_elf64_check_arch(x) (elf_check_arch(x) || vmcore_elf_check_arch_cross(x))
#endif
+#ifndef is_kdump_kernel
/*
* is_kdump_kernel() checks whether this kernel is booting after a panic of
* previous kernel or not. This is determined by checking if previous kernel
@@ -64,6 +65,7 @@ static inline bool is_kdump_kernel(void)
{
return elfcorehdr_addr != ELFCORE_ADDR_MAX;
}
+#endif
/* is_vmcore_usable() checks if the kernel is booting after a panic and
* the vmcore region is usable.
@@ -75,7 +77,8 @@ static inline bool is_kdump_kernel(void)
static inline int is_vmcore_usable(void)
{
- return is_kdump_kernel() && elfcorehdr_addr != ELFCORE_ADDR_ERR ? 1 : 0;
+ return elfcorehdr_addr != ELFCORE_ADDR_ERR &&
+ elfcorehdr_addr != ELFCORE_ADDR_MAX ? 1 : 0;
}
/* vmcore_unusable() marks the vmcore as unusable,
@@ -84,8 +87,7 @@ static inline int is_vmcore_usable(void)
static inline void vmcore_unusable(void)
{
- if (is_kdump_kernel())
- elfcorehdr_addr = ELFCORE_ADDR_ERR;
+ elfcorehdr_addr = ELFCORE_ADDR_ERR;
}
/**
diff --git a/include/linux/cred.h b/include/linux/cred.h
index f923528d5cc4..af8d353a4b86 100644
--- a/include/linux/cred.h
+++ b/include/linux/cred.h
@@ -12,6 +12,7 @@
#include <linux/init.h>
#include <linux/key.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/uidgid.h>
#include <linux/sched.h>
#include <linux/sched/user.h>
@@ -23,7 +24,7 @@ struct inode;
* COW Supplementary groups list
*/
struct group_info {
- atomic_t usage;
+ refcount_t usage;
int ngroups;
kgid_t gid[];
} __randomize_layout;
@@ -39,7 +40,7 @@ struct group_info {
*/
static inline struct group_info *get_group_info(struct group_info *gi)
{
- atomic_inc(&gi->usage);
+ refcount_inc(&gi->usage);
return gi;
}
@@ -49,7 +50,7 @@ static inline struct group_info *get_group_info(struct group_info *gi)
*/
#define put_group_info(group_info) \
do { \
- if (atomic_dec_and_test(&(group_info)->usage)) \
+ if (refcount_dec_and_test(&(group_info)->usage)) \
groups_free(group_info); \
} while (0)
@@ -219,6 +220,20 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
}
/**
+ * get_new_cred_many - Get references on a new set of credentials
+ * @cred: The new credentials to reference
+ * @nr: Number of references to acquire
+ *
+ * Get references on the specified set of new credentials. The caller must
+ * release all acquired references.
+ */
+static inline struct cred *get_new_cred_many(struct cred *cred, int nr)
+{
+ atomic_add(nr, &cred->usage);
+ return cred;
+}
+
+/**
* get_new_cred - Get a reference on a new set of credentials
* @cred: The new credentials to reference
*
@@ -227,16 +242,16 @@ static inline bool cap_ambient_invariant_ok(const struct cred *cred)
*/
static inline struct cred *get_new_cred(struct cred *cred)
{
- atomic_inc(&cred->usage);
- return cred;
+ return get_new_cred_many(cred, 1);
}
/**
- * get_cred - Get a reference on a set of credentials
+ * get_cred_many - Get references on a set of credentials
* @cred: The credentials to reference
+ * @nr: Number of references to acquire
*
- * Get a reference on the specified set of credentials. The caller must
- * release the reference. If %NULL is passed, it is returned with no action.
+ * Get references on the specified set of credentials. The caller must release
+ * all acquired reference. If %NULL is passed, it is returned with no action.
*
* This is used to deal with a committed set of credentials. Although the
* pointer is const, this will temporarily discard the const and increment the
@@ -244,14 +259,28 @@ static inline struct cred *get_new_cred(struct cred *cred)
* accidental alteration of a set of credentials that should be considered
* immutable.
*/
-static inline const struct cred *get_cred(const struct cred *cred)
+static inline const struct cred *get_cred_many(const struct cred *cred, int nr)
{
struct cred *nonconst_cred = (struct cred *) cred;
if (!cred)
return cred;
validate_creds(cred);
nonconst_cred->non_rcu = 0;
- return get_new_cred(nonconst_cred);
+ return get_new_cred_many(nonconst_cred, nr);
+}
+
+/*
+ * get_cred - Get a reference on a set of credentials
+ * @cred: The credentials to reference
+ *
+ * Get a reference on the specified set of credentials. The caller must
+ * release the reference. If %NULL is passed, it is returned with no action.
+ *
+ * This is used to deal with a committed set of credentials.
+ */
+static inline const struct cred *get_cred(const struct cred *cred)
+{
+ return get_cred_many(cred, 1);
}
static inline const struct cred *get_cred_rcu(const struct cred *cred)
@@ -269,6 +298,7 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
/**
* put_cred - Release a reference to a set of credentials
* @cred: The credentials to release
+ * @nr: Number of references to release
*
* Release a reference to a set of credentials, deleting them when the last ref
* is released. If %NULL is passed, nothing is done.
@@ -277,17 +307,29 @@ static inline const struct cred *get_cred_rcu(const struct cred *cred)
* on task_struct are attached by const pointers to prevent accidental
* alteration of otherwise immutable credential sets.
*/
-static inline void put_cred(const struct cred *_cred)
+static inline void put_cred_many(const struct cred *_cred, int nr)
{
struct cred *cred = (struct cred *) _cred;
if (cred) {
validate_creds(cred);
- if (atomic_dec_and_test(&(cred)->usage))
+ if (atomic_sub_and_test(nr, &cred->usage))
__put_cred(cred);
}
}
+/*
+ * put_cred - Release a reference to a set of credentials
+ * @cred: The credentials to release
+ *
+ * Release a reference to a set of credentials, deleting them when the last ref
+ * is released. If %NULL is passed, nothing is done.
+ */
+static inline void put_cred(const struct cred *cred)
+{
+ put_cred_many(cred, 1);
+}
+
/**
* current_cred - Access the current task's subjective credentials
*
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index 31f6fee0c36c..b164da5e129e 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -24,6 +24,7 @@
#define CRYPTO_ALG_TYPE_CIPHER 0x00000001
#define CRYPTO_ALG_TYPE_COMPRESS 0x00000002
#define CRYPTO_ALG_TYPE_AEAD 0x00000003
+#define CRYPTO_ALG_TYPE_LSKCIPHER 0x00000004
#define CRYPTO_ALG_TYPE_SKCIPHER 0x00000005
#define CRYPTO_ALG_TYPE_AKCIPHER 0x00000006
#define CRYPTO_ALG_TYPE_SIG 0x00000007
@@ -35,8 +36,6 @@
#define CRYPTO_ALG_TYPE_SHASH 0x0000000e
#define CRYPTO_ALG_TYPE_AHASH 0x0000000f
-#define CRYPTO_ALG_TYPE_HASH_MASK 0x0000000e
-#define CRYPTO_ALG_TYPE_AHASH_MASK 0x0000000e
#define CRYPTO_ALG_TYPE_ACOMPRESS_MASK 0x0000000e
#define CRYPTO_ALG_LARVAL 0x00000010
@@ -111,7 +110,6 @@
* crypto_aead_walksize() (with the remainder going at the end), no chunk
* can cross a page boundary or a scatterlist element boundary.
* ahash:
- * - The result buffer must be aligned to the algorithm's alignmask.
* - crypto_ahash_finup() must not be used unless the algorithm implements
* ->finup() natively.
*/
@@ -279,18 +277,20 @@ struct compress_alg {
* @cra_ctxsize: Size of the operational context of the transformation. This
* value informs the kernel crypto API about the memory size
* needed to be allocated for the transformation context.
- * @cra_alignmask: Alignment mask for the input and output data buffer. The data
- * buffer containing the input data for the algorithm must be
- * aligned to this alignment mask. The data buffer for the
- * output data must be aligned to this alignment mask. Note that
- * the Crypto API will do the re-alignment in software, but
- * only under special conditions and there is a performance hit.
- * The re-alignment happens at these occasions for different
- * @cra_u types: cipher -- For both input data and output data
- * buffer; ahash -- For output hash destination buf; shash --
- * For output hash destination buf.
- * This is needed on hardware which is flawed by design and
- * cannot pick data from arbitrary addresses.
+ * @cra_alignmask: For cipher, skcipher, lskcipher, and aead algorithms this is
+ * 1 less than the alignment, in bytes, that the algorithm
+ * implementation requires for input and output buffers. When
+ * the crypto API is invoked with buffers that are not aligned
+ * to this alignment, the crypto API automatically utilizes
+ * appropriately aligned temporary buffers to comply with what
+ * the algorithm needs. (For scatterlists this happens only if
+ * the algorithm uses the skcipher_walk helper functions.) This
+ * misalignment handling carries a performance penalty, so it is
+ * preferred that algorithms do not set a nonzero alignmask.
+ * Also, crypto API users may wish to allocate buffers aligned
+ * to the alignmask of the algorithm being used, in order to
+ * avoid the API having to realign them. Note: the alignmask is
+ * not supported for hash algorithms and is always 0 for them.
* @cra_priority: Priority of this transformation implementation. In case
* multiple transformations with same @cra_name are available to
* the Crypto API, the kernel will use the one with highest
diff --git a/include/linux/damon.h b/include/linux/damon.h
index ae2664d1d5f1..ab2f17d9926b 100644
--- a/include/linux/damon.h
+++ b/include/linux/damon.h
@@ -40,9 +40,24 @@ struct damon_addr_range {
* @ar: The address range of the region.
* @sampling_addr: Address of the sample for the next access check.
* @nr_accesses: Access frequency of this region.
+ * @nr_accesses_bp: @nr_accesses in basis point (0.01%) that updated for
+ * each sampling interval.
* @list: List head for siblings.
* @age: Age of this region.
*
+ * @nr_accesses is reset to zero for every &damon_attrs->aggr_interval and be
+ * increased for every &damon_attrs->sample_interval if an access to the region
+ * during the last sampling interval is found. The update of this field should
+ * not be done with direct access but with the helper function,
+ * damon_update_region_access_rate().
+ *
+ * @nr_accesses_bp is another representation of @nr_accesses in basis point
+ * (1 in 10,000) that updated for every &damon_attrs->sample_interval in a
+ * manner similar to moving sum. By the algorithm, this value becomes
+ * @nr_accesses * 10000 for every &struct damon_attrs->aggr_interval. This can
+ * be used when the aggregation interval is too huge and therefore cannot wait
+ * for it before getting the access monitoring results.
+ *
* @age is initially zero, increased for each aggregation interval, and reset
* to zero again if the access frequency is significantly changed. If two
* regions are merged into a new region, both @nr_accesses and @age of the new
@@ -52,6 +67,7 @@ struct damon_region {
struct damon_addr_range ar;
unsigned long sampling_addr;
unsigned int nr_accesses;
+ unsigned int nr_accesses_bp;
struct list_head list;
unsigned int age;
@@ -298,24 +314,24 @@ struct damos_access_pattern {
* struct damos - Represents a Data Access Monitoring-based Operation Scheme.
* @pattern: Access pattern of target regions.
* @action: &damo_action to be applied to the target regions.
+ * @apply_interval_us: The time between applying the @action.
* @quota: Control the aggressiveness of this scheme.
* @wmarks: Watermarks for automated (in)activation of this scheme.
* @filters: Additional set of &struct damos_filter for &action.
* @stat: Statistics of this scheme.
* @list: List head for siblings.
*
- * For each aggregation interval, DAMON finds regions which fit in the
+ * For each @apply_interval_us, DAMON finds regions which fit in the
* &pattern and applies &action to those. To avoid consuming too much
* CPU time or IO resources for the &action, &quota is used.
*
+ * If @apply_interval_us is zero, &damon_attrs->aggr_interval is used instead.
+ *
* To do the work only when needed, schemes can be activated for specific
* system situations using &wmarks. If all schemes that registered to the
* monitoring context are inactive, DAMON stops monitoring either, and just
* repeatedly checks the watermarks.
*
- * If all schemes that registered to a &struct damon_ctx are inactive, DAMON
- * stops monitoring and just repeatedly checks the watermarks.
- *
* Before applying the &action to a memory region, &struct damon_operations
* implementation could check pages of the region and skip &action to respect
* &filters
@@ -327,6 +343,14 @@ struct damos_access_pattern {
struct damos {
struct damos_access_pattern pattern;
enum damos_action action;
+ unsigned long apply_interval_us;
+/* private: internal use only */
+ /*
+ * number of sample intervals that should be passed before applying
+ * @action
+ */
+ unsigned long next_apply_sis;
+/* public: */
struct damos_quota quota;
struct damos_watermarks wmarks;
struct list_head filters;
@@ -472,13 +496,14 @@ struct damon_callback {
* regions.
*
* For each @sample_interval, DAMON checks whether each region is accessed or
- * not. It aggregates and keeps the access information (number of accesses to
- * each region) for @aggr_interval time. DAMON also checks whether the target
- * memory regions need update (e.g., by ``mmap()`` calls from the application,
- * in case of virtual memory monitoring) and applies the changes for each
- * @ops_update_interval. All time intervals are in micro-seconds.
- * Please refer to &struct damon_operations and &struct damon_callback for more
- * detail.
+ * not during the last @sample_interval. If such access is found, DAMON
+ * aggregates the information by increasing &damon_region->nr_accesses for
+ * @aggr_interval time. For each @aggr_interval, the count is reset. DAMON
+ * also checks whether the target memory regions need update (e.g., by
+ * ``mmap()`` calls from the application, in case of virtual memory monitoring)
+ * and applies the changes for each @ops_update_interval. All time intervals
+ * are in micro-seconds. Please refer to &struct damon_operations and &struct
+ * damon_callback for more detail.
*/
struct damon_attrs {
unsigned long sample_interval;
@@ -522,8 +547,18 @@ struct damon_ctx {
struct damon_attrs attrs;
/* private: internal use only */
- struct timespec64 last_aggregation;
- struct timespec64 last_ops_update;
+ /* number of sample intervals that passed since this context started */
+ unsigned long passed_sample_intervals;
+ /*
+ * number of sample intervals that should be passed before next
+ * aggregation
+ */
+ unsigned long next_aggregation_sis;
+ /*
+ * number of sample intervals that should be passed before next ops
+ * update
+ */
+ unsigned long next_ops_update_sis;
/* public: */
struct task_struct *kdamond;
@@ -608,6 +643,8 @@ void damon_add_region(struct damon_region *r, struct damon_target *t);
void damon_destroy_region(struct damon_region *r, struct damon_target *t);
int damon_set_regions(struct damon_target *t, struct damon_addr_range *ranges,
unsigned int nr_ranges);
+void damon_update_region_access_rate(struct damon_region *r, bool accessed,
+ struct damon_attrs *attrs);
struct damos_filter *damos_new_filter(enum damos_filter_type type,
bool matching);
@@ -615,7 +652,9 @@ void damos_add_filter(struct damos *s, struct damos_filter *f);
void damos_destroy_filter(struct damos_filter *f);
struct damos *damon_new_scheme(struct damos_access_pattern *pattern,
- enum damos_action action, struct damos_quota *quota,
+ enum damos_action action,
+ unsigned long apply_interval_us,
+ struct damos_quota *quota,
struct damos_watermarks *wmarks);
void damon_add_scheme(struct damon_ctx *ctx, struct damos *s);
void damon_destroy_scheme(struct damos *s);
@@ -642,6 +681,13 @@ static inline bool damon_target_has_pid(const struct damon_ctx *ctx)
return ctx->ops.id == DAMON_OPS_VADDR || ctx->ops.id == DAMON_OPS_FVADDR;
}
+static inline unsigned int damon_max_nr_accesses(const struct damon_attrs *attrs)
+{
+ /* {aggr,sample}_interval are unsigned long, hence could overflow */
+ return min(attrs->aggr_interval / attrs->sample_interval,
+ (unsigned long)UINT_MAX);
+}
+
int damon_start(struct damon_ctx **ctxs, int nr_ctxs, bool exclusive);
int damon_stop(struct damon_ctx **ctxs, int nr_ctxs);
diff --git a/include/linux/dax.h b/include/linux/dax.h
index 22cd9902345d..b463502b16e1 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -159,8 +159,8 @@ int dax_writeback_mapping_range(struct address_space *mapping,
struct page *dax_layout_busy_page(struct address_space *mapping);
struct page *dax_layout_busy_page_range(struct address_space *mapping, loff_t start, loff_t end);
-dax_entry_t dax_lock_page(struct page *page);
-void dax_unlock_page(struct page *page, dax_entry_t cookie);
+dax_entry_t dax_lock_folio(struct folio *folio);
+void dax_unlock_folio(struct folio *folio, dax_entry_t cookie);
dax_entry_t dax_lock_mapping_entry(struct address_space *mapping,
unsigned long index, struct page **page);
void dax_unlock_mapping_entry(struct address_space *mapping,
@@ -182,14 +182,14 @@ static inline int dax_writeback_mapping_range(struct address_space *mapping,
return -EOPNOTSUPP;
}
-static inline dax_entry_t dax_lock_page(struct page *page)
+static inline dax_entry_t dax_lock_folio(struct folio *folio)
{
- if (IS_DAX(page->mapping->host))
+ if (IS_DAX(folio->mapping->host))
return ~0UL;
return 0;
}
-static inline void dax_unlock_page(struct page *page, dax_entry_t cookie)
+static inline void dax_unlock_folio(struct folio *folio, dax_entry_t cookie)
{
}
diff --git a/include/linux/dcache.h b/include/linux/dcache.h
index 6b351e009f59..3da2f0545d5d 100644
--- a/include/linux/dcache.h
+++ b/include/linux/dcache.h
@@ -251,6 +251,7 @@ extern struct dentry * d_make_root(struct inode *);
/* <clickety>-<click> the ramfs-type tree */
extern void d_genocide(struct dentry *);
+extern void d_mark_tmpfile(struct file *, struct inode *);
extern void d_tmpfile(struct file *, struct inode *);
extern struct dentry *d_find_alias(struct inode *);
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index 69d0435c7ebb..772ab4d74d94 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -165,6 +165,7 @@ void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct bdev_handle *bdev_handle;
struct dax_device *dax_dev;
blk_mode_t mode;
char name[16];
diff --git a/include/linux/device.h b/include/linux/device.h
index 56d93a1ffb7b..d7a72a8749ea 100644
--- a/include/linux/device.h
+++ b/include/linux/device.h
@@ -389,8 +389,8 @@ void devm_remove_action(struct device *dev, void (*action)(void *), void *data);
void devm_release_action(struct device *dev, void (*action)(void *), void *data);
int __devm_add_action(struct device *dev, void (*action)(void *), void *data, const char *name);
-#define devm_add_action(release, action, data) \
- __devm_add_action(release, action, data, #action)
+#define devm_add_action(dev, action, data) \
+ __devm_add_action(dev, action, data, #action)
static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(void *),
void *data, const char *name)
@@ -403,8 +403,8 @@ static inline int __devm_add_action_or_reset(struct device *dev, void (*action)(
return ret;
}
-#define devm_add_action_or_reset(release, action, data) \
- __devm_add_action_or_reset(release, action, data, #action)
+#define devm_add_action_or_reset(dev, action, data) \
+ __devm_add_action_or_reset(dev, action, data, #action)
/**
* devm_alloc_percpu - Resource-managed alloc_percpu
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 0d678e9a7b24..ebe78bd3d121 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -568,6 +568,25 @@ static inline void dma_fence_set_error(struct dma_fence *fence,
fence->error = error;
}
+/**
+ * dma_fence_timestamp - helper to get the completion timestamp of a fence
+ * @fence: fence to get the timestamp from.
+ *
+ * After a fence is signaled the timestamp is updated with the signaling time,
+ * but setting the timestamp can race with tasks waiting for the signaling. This
+ * helper busy waits for the correct timestamp to appear.
+ */
+static inline ktime_t dma_fence_timestamp(struct dma_fence *fence)
+{
+ if (WARN_ON(!test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &fence->flags)))
+ return ktime_get();
+
+ while (!test_bit(DMA_FENCE_FLAG_TIMESTAMP_BIT, &fence->flags))
+ cpu_relax();
+
+ return fence->timestamp;
+}
+
signed long dma_fence_wait_timeout(struct dma_fence *,
bool intr, signed long timeout);
signed long dma_fence_wait_any_timeout(struct dma_fence **fences,
diff --git a/include/linux/dmaengine.h b/include/linux/dmaengine.h
index c3656e590213..3df70d6131c8 100644
--- a/include/linux/dmaengine.h
+++ b/include/linux/dmaengine.h
@@ -517,8 +517,6 @@ static inline const char *dma_chan_name(struct dma_chan *chan)
return dev_name(&chan->dev->device);
}
-void dma_chan_cleanup(struct kref *kref);
-
/**
* typedef dma_filter_fn - callback filter for dma_request_channel
* @chan: channel to be reviewed
diff --git a/include/linux/dpll.h b/include/linux/dpll.h
new file mode 100644
index 000000000000..578fc5fa3750
--- /dev/null
+++ b/include/linux/dpll.h
@@ -0,0 +1,170 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Meta Platforms, Inc. and affiliates
+ * Copyright (c) 2023 Intel and affiliates
+ */
+
+#ifndef __DPLL_H__
+#define __DPLL_H__
+
+#include <uapi/linux/dpll.h>
+#include <linux/device.h>
+#include <linux/netlink.h>
+
+struct dpll_device;
+struct dpll_pin;
+
+struct dpll_device_ops {
+ int (*mode_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_mode *mode, struct netlink_ext_ack *extack);
+ bool (*mode_supported)(const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_mode mode,
+ struct netlink_ext_ack *extack);
+ int (*lock_status_get)(const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_lock_status *status,
+ struct netlink_ext_ack *extack);
+ int (*temp_get)(const struct dpll_device *dpll, void *dpll_priv,
+ s32 *temp, struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_ops {
+ int (*frequency_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u64 frequency,
+ struct netlink_ext_ack *extack);
+ int (*frequency_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u64 *frequency, struct netlink_ext_ack *extack);
+ int (*direction_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const enum dpll_pin_direction direction,
+ struct netlink_ext_ack *extack);
+ int (*direction_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ enum dpll_pin_direction *direction,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv, enum dpll_pin_state *state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_pin_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_pin *parent_pin,
+ void *parent_pin_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*state_on_dpll_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll,
+ void *dpll_priv,
+ const enum dpll_pin_state state,
+ struct netlink_ext_ack *extack);
+ int (*prio_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ u32 *prio, struct netlink_ext_ack *extack);
+ int (*prio_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const u32 prio, struct netlink_ext_ack *extack);
+ int (*phase_offset_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s64 *phase_offset,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_get)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ s32 *phase_adjust,
+ struct netlink_ext_ack *extack);
+ int (*phase_adjust_set)(const struct dpll_pin *pin, void *pin_priv,
+ const struct dpll_device *dpll, void *dpll_priv,
+ const s32 phase_adjust,
+ struct netlink_ext_ack *extack);
+};
+
+struct dpll_pin_frequency {
+ u64 min;
+ u64 max;
+};
+
+#define DPLL_PIN_FREQUENCY_RANGE(_min, _max) \
+ { \
+ .min = _min, \
+ .max = _max, \
+ }
+
+#define DPLL_PIN_FREQUENCY(_val) DPLL_PIN_FREQUENCY_RANGE(_val, _val)
+#define DPLL_PIN_FREQUENCY_1PPS \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_1_HZ)
+#define DPLL_PIN_FREQUENCY_10MHZ \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_MHZ)
+#define DPLL_PIN_FREQUENCY_IRIG_B \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_10_KHZ)
+#define DPLL_PIN_FREQUENCY_DCF77 \
+ DPLL_PIN_FREQUENCY(DPLL_PIN_FREQUENCY_77_5_KHZ)
+
+struct dpll_pin_phase_adjust_range {
+ s32 min;
+ s32 max;
+};
+
+struct dpll_pin_properties {
+ const char *board_label;
+ const char *panel_label;
+ const char *package_label;
+ enum dpll_pin_type type;
+ unsigned long capabilities;
+ u32 freq_supported_num;
+ struct dpll_pin_frequency *freq_supported;
+ struct dpll_pin_phase_adjust_range phase_range;
+};
+
+#if IS_ENABLED(CONFIG_DPLL)
+size_t dpll_msg_pin_handle_size(struct dpll_pin *pin);
+int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin);
+#else
+static inline size_t dpll_msg_pin_handle_size(struct dpll_pin *pin)
+{
+ return 0;
+}
+
+static inline int dpll_msg_add_pin_handle(struct sk_buff *msg, struct dpll_pin *pin)
+{
+ return 0;
+}
+#endif
+
+struct dpll_device *
+dpll_device_get(u64 clock_id, u32 dev_driver_id, struct module *module);
+
+void dpll_device_put(struct dpll_device *dpll);
+
+int dpll_device_register(struct dpll_device *dpll, enum dpll_type type,
+ const struct dpll_device_ops *ops, void *priv);
+
+void dpll_device_unregister(struct dpll_device *dpll,
+ const struct dpll_device_ops *ops, void *priv);
+
+struct dpll_pin *
+dpll_pin_get(u64 clock_id, u32 dev_driver_id, struct module *module,
+ const struct dpll_pin_properties *prop);
+
+int dpll_pin_register(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_unregister(struct dpll_device *dpll, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_put(struct dpll_pin *pin);
+
+int dpll_pin_on_pin_register(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+void dpll_pin_on_pin_unregister(struct dpll_pin *parent, struct dpll_pin *pin,
+ const struct dpll_pin_ops *ops, void *priv);
+
+int dpll_device_change_ntf(struct dpll_device *dpll);
+
+int dpll_pin_change_ntf(struct dpll_pin *pin);
+
+#endif
diff --git a/include/linux/dsa/sja1105.h b/include/linux/dsa/sja1105.h
index c177322f793d..b9dd35d4b8f5 100644
--- a/include/linux/dsa/sja1105.h
+++ b/include/linux/dsa/sja1105.h
@@ -28,7 +28,7 @@
/* Source and Destination MAC of follow-up meta frames.
* Whereas the choice of SMAC only affects the unique identification of the
* switch as sender of meta frames, the DMAC must be an address that is present
- * in the DSA master port's multicast MAC filter.
+ * in the DSA conduit port's multicast MAC filter.
* 01-80-C2-00-00-0E is a good choice for this, as all profiles of IEEE 1588
* over L2 use this address for some purpose already.
*/
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 80b21d1c6eaf..9cc5bf32f6f2 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -358,13 +358,10 @@ void efi_native_runtime_setup(void);
* where the UEFI SPEC breaks the line.
*/
#define NULL_GUID EFI_GUID(0x00000000, 0x0000, 0x0000, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00)
-#define MPS_TABLE_GUID EFI_GUID(0xeb9d2d2f, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_TABLE_GUID EFI_GUID(0xeb9d2d30, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define ACPI_20_TABLE_GUID EFI_GUID(0x8868e871, 0xe4f1, 0x11d3, 0xbc, 0x22, 0x00, 0x80, 0xc7, 0x3c, 0x88, 0x81)
#define SMBIOS_TABLE_GUID EFI_GUID(0xeb9d2d31, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
#define SMBIOS3_TABLE_GUID EFI_GUID(0xf2fd1544, 0x9794, 0x4a2c, 0x99, 0x2e, 0xe5, 0xbb, 0xcf, 0x20, 0xe3, 0x94)
-#define SAL_SYSTEM_TABLE_GUID EFI_GUID(0xeb9d2d32, 0x2d88, 0x11d3, 0x9a, 0x16, 0x00, 0x90, 0x27, 0x3f, 0xc1, 0x4d)
-#define HCDP_TABLE_GUID EFI_GUID(0xf951938d, 0x620b, 0x42ef, 0x82, 0x79, 0xa8, 0x4b, 0x79, 0x61, 0x78, 0x98)
#define UGA_IO_PROTOCOL_GUID EFI_GUID(0x61a4d49e, 0x6f68, 0x4f1b, 0xb9, 0x22, 0xa8, 0x6e, 0xed, 0x0b, 0x07, 0xa2)
#define EFI_GLOBAL_VARIABLE_GUID EFI_GUID(0x8be4df61, 0x93ca, 0x11d2, 0xaa, 0x0d, 0x00, 0xe0, 0x98, 0x03, 0x2b, 0x8c)
#define UV_SYSTEM_TABLE_GUID EFI_GUID(0x3b13a7d4, 0x633e, 0x11dd, 0x93, 0xec, 0xda, 0x25, 0x56, 0xd8, 0x95, 0x93)
@@ -851,10 +848,6 @@ static inline int efi_range_is_wc(unsigned long start, unsigned long len)
return 1;
}
-#ifdef CONFIG_EFI_PCDP
-extern int __init efi_setup_pcdp_console(char *);
-#endif
-
/*
* We play games with efi_enabled so that the compiler will, if
* possible, remove EFI-related code altogether.
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 62b61527bcc4..226a36ed5aa1 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -1052,4 +1052,23 @@ static inline int ethtool_mm_frag_size_min_to_add(u32 val_min, u32 *val_add,
* next string.
*/
extern __printf(2, 3) void ethtool_sprintf(u8 **data, const char *fmt, ...);
+
+/* Link mode to forced speed capabilities maps */
+struct ethtool_forced_speed_map {
+ u32 speed;
+ __ETHTOOL_DECLARE_LINK_MODE_MASK(caps);
+
+ const u32 *cap_arr;
+ u32 arr_size;
+};
+
+#define ETHTOOL_FORCED_SPEED_MAP(prefix, value) \
+{ \
+ .speed = SPEED_##value, \
+ .cap_arr = prefix##_##value, \
+ .arr_size = ARRAY_SIZE(prefix##_##value), \
+}
+
+void
+ethtool_forced_speed_maps_init(struct ethtool_forced_speed_map *maps, u32 size);
#endif /* _LINUX_ETHTOOL_H */
diff --git a/include/linux/export-internal.h b/include/linux/export-internal.h
index 1c849db953a5..45fca09b2319 100644
--- a/include/linux/export-internal.h
+++ b/include/linux/export-internal.h
@@ -52,6 +52,8 @@
#ifdef CONFIG_IA64
#define KSYM_FUNC(name) @fptr(name)
+#elif defined(CONFIG_PARISC) && defined(CONFIG_64BIT)
+#define KSYM_FUNC(name) P%name
#else
#define KSYM_FUNC(name) name
#endif
diff --git a/include/linux/exportfs.h b/include/linux/exportfs.h
index 11fbd0ee1370..0388e8c20f52 100644
--- a/include/linux/exportfs.h
+++ b/include/linux/exportfs.h
@@ -105,6 +105,12 @@ enum fid_type {
FILEID_LUSTRE = 0x97,
/*
+ * 64 bit inode number, 32 bit subvolume, 32 bit generation number:
+ */
+ FILEID_BCACHEFS_WITHOUT_PARENT = 0xb1,
+ FILEID_BCACHEFS_WITH_PARENT = 0xb2,
+
+ /*
* 64 bit unique kernfs id
*/
FILEID_KERNFS = 0xfe,
@@ -224,9 +230,23 @@ struct export_operations {
atomic attribute updates
*/
#define EXPORT_OP_FLUSH_ON_CLOSE (0x20) /* fs flushes file data on close */
+#define EXPORT_OP_ASYNC_LOCK (0x40) /* fs can do async lock request */
unsigned long flags;
};
+/**
+ * exportfs_lock_op_is_async() - export op supports async lock operation
+ * @export_ops: the nfs export operations to check
+ *
+ * Returns true if the nfs export_operations structure has
+ * EXPORT_OP_ASYNC_LOCK in their flags set
+ */
+static inline bool
+exportfs_lock_op_is_async(const struct export_operations *export_ops)
+{
+ return export_ops->flags & EXPORT_OP_ASYNC_LOCK;
+}
+
extern int exportfs_encode_inode_fh(struct inode *inode, struct fid *fid,
int *max_len, struct inode *parent,
int flags);
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index a82a4bb6ce68..039fe0ce8d83 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -13,10 +13,10 @@
#define F2FS_SUPER_OFFSET 1024 /* byte-size offset */
#define F2FS_MIN_LOG_SECTOR_SIZE 9 /* 9 bits for 512 bytes */
-#define F2FS_MAX_LOG_SECTOR_SIZE 12 /* 12 bits for 4096 bytes */
-#define F2FS_LOG_SECTORS_PER_BLOCK 3 /* log number for sector/blk */
-#define F2FS_BLKSIZE 4096 /* support only 4KB block */
-#define F2FS_BLKSIZE_BITS 12 /* bits for F2FS_BLKSIZE */
+#define F2FS_MAX_LOG_SECTOR_SIZE PAGE_SHIFT /* Max is Block Size */
+#define F2FS_LOG_SECTORS_PER_BLOCK (PAGE_SHIFT - 9) /* log number for sector/blk */
+#define F2FS_BLKSIZE PAGE_SIZE /* support only block == page */
+#define F2FS_BLKSIZE_BITS PAGE_SHIFT /* bits for F2FS_BLKSIZE */
#define F2FS_MAX_EXTENSION 64 /* # of extension entries */
#define F2FS_EXTENSION_LEN 8 /* max size of extension */
#define F2FS_BLK_ALIGN(x) (((x) + F2FS_BLKSIZE - 1) >> F2FS_BLKSIZE_BITS)
@@ -104,6 +104,7 @@ enum f2fs_error {
ERROR_CORRUPTED_VERITY_XATTR,
ERROR_CORRUPTED_XATTR,
ERROR_INVALID_NODE_REFERENCE,
+ ERROR_INCONSISTENT_NAT,
ERROR_MAX,
};
@@ -210,14 +211,14 @@ struct f2fs_checkpoint {
unsigned char sit_nat_version_bitmap[];
} __packed;
-#define CP_CHKSUM_OFFSET 4092 /* default chksum offset in checkpoint */
+#define CP_CHKSUM_OFFSET (F2FS_BLKSIZE - sizeof(__le32)) /* default chksum offset in checkpoint */
#define CP_MIN_CHKSUM_OFFSET \
(offsetof(struct f2fs_checkpoint, sit_nat_version_bitmap))
/*
* For orphan inode management
*/
-#define F2FS_ORPHANS_PER_BLOCK 1020
+#define F2FS_ORPHANS_PER_BLOCK ((F2FS_BLKSIZE - 4 * sizeof(__le32)) / sizeof(__le32))
#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
@@ -243,14 +244,31 @@ struct f2fs_extent {
#define F2FS_NAME_LEN 255
/* 200 bytes for inline xattrs by default */
#define DEFAULT_INLINE_XATTR_ADDRS 50
-#define DEF_ADDRS_PER_INODE 923 /* Address Pointers in an Inode */
+
+#define OFFSET_OF_END_OF_I_EXT 360
+#define SIZE_OF_I_NID 20
+
+struct node_footer {
+ __le32 nid; /* node id */
+ __le32 ino; /* inode number */
+ __le32 flag; /* include cold/fsync/dentry marks and offset */
+ __le64 cp_ver; /* checkpoint version */
+ __le32 next_blkaddr; /* next node page block address */
+} __packed;
+
+/* Address Pointers in an Inode */
+#define DEF_ADDRS_PER_INODE ((F2FS_BLKSIZE - OFFSET_OF_END_OF_I_EXT \
+ - SIZE_OF_I_NID \
+ - sizeof(struct node_footer)) / sizeof(__le32))
#define CUR_ADDRS_PER_INODE(inode) (DEF_ADDRS_PER_INODE - \
get_extra_isize(inode))
#define DEF_NIDS_PER_INODE 5 /* Node IDs in an Inode */
#define ADDRS_PER_INODE(inode) addrs_per_inode(inode)
-#define DEF_ADDRS_PER_BLOCK 1018 /* Address Pointers in a Direct Block */
+/* Address Pointers in a Direct Block */
+#define DEF_ADDRS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_BLOCK(inode) addrs_per_block(inode)
-#define NIDS_PER_BLOCK 1018 /* Node IDs in an Indirect Block */
+/* Node IDs in an Indirect Block */
+#define NIDS_PER_BLOCK ((F2FS_BLKSIZE - sizeof(struct node_footer)) / sizeof(__le32))
#define ADDRS_PER_PAGE(page, inode) \
(IS_INODE(page) ? ADDRS_PER_INODE(inode) : ADDRS_PER_BLOCK(inode))
@@ -342,14 +360,6 @@ enum {
#define OFFSET_BIT_MASK GENMASK(OFFSET_BIT_SHIFT - 1, 0)
-struct node_footer {
- __le32 nid; /* node id */
- __le32 ino; /* inode number */
- __le32 flag; /* include cold/fsync/dentry marks and offset */
- __le64 cp_ver; /* checkpoint version */
- __le32 next_blkaddr; /* next node page block address */
-} __packed;
-
struct f2fs_node {
/* can be one of three types: inode, direct, and indirect types */
union {
@@ -363,7 +373,7 @@ struct f2fs_node {
/*
* For NAT entries
*/
-#define NAT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_nat_entry))
+#define NAT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_nat_entry))
struct f2fs_nat_entry {
__u8 version; /* latest version of cached nat entry */
@@ -378,12 +388,13 @@ struct f2fs_nat_block {
/*
* For SIT entries
*
- * Each segment is 2MB in size by default so that a bitmap for validity of
- * there-in blocks should occupy 64 bytes, 512 bits.
+ * A validity bitmap of 64 bytes covers 512 blocks of area. For a 4K page size,
+ * this results in a segment size of 2MB. For 16k pages, the default segment size
+ * is 8MB.
* Not allow to change this.
*/
#define SIT_VBLOCK_MAP_SIZE 64
-#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
+#define SIT_ENTRY_PER_BLOCK (F2FS_BLKSIZE / sizeof(struct f2fs_sit_entry))
/*
* F2FS uses 4 bytes to represent block address. As a result, supported size of
@@ -418,7 +429,7 @@ struct f2fs_sit_block {
* For segment summary
*
* One summary block contains exactly 512 summary entries, which represents
- * exactly 2MB segment by default. Not allow to change the basic units.
+ * exactly one segment by default. Not allow to change the basic units.
*
* NOTE: For initializing fields, you must use set_summary
*
@@ -429,12 +440,12 @@ struct f2fs_sit_block {
* from node's page's beginning to get a data block address.
* ex) data_blkaddr = (block_t)(nodepage_start_address + ofs_in_node)
*/
-#define ENTRIES_IN_SUM 512
+#define ENTRIES_IN_SUM (F2FS_BLKSIZE / 8)
#define SUMMARY_SIZE (7) /* sizeof(struct summary) */
#define SUM_FOOTER_SIZE (5) /* sizeof(struct summary_footer) */
#define SUM_ENTRY_SIZE (SUMMARY_SIZE * ENTRIES_IN_SUM)
-/* a summary entry for a 4KB-sized block in a segment */
+/* a summary entry for a block in a segment */
struct f2fs_summary {
__le32 nid; /* parent node id */
union {
@@ -518,7 +529,7 @@ struct f2fs_journal {
};
} __packed;
-/* 4KB-sized summary block structure */
+/* Block-sized summary block structure */
struct f2fs_summary_block {
struct f2fs_summary entries[ENTRIES_IN_SUM];
struct f2fs_journal journal;
@@ -559,11 +570,14 @@ typedef __le32 f2fs_hash_t;
* Note: there are more reserved space in inline dentry than in regular
* dentry, when converting inline dentry we should handle this carefully.
*/
-#define NR_DENTRY_IN_BLOCK 214 /* the number of dentry in a block */
+
+/* the number of dentry in a block */
+#define NR_DENTRY_IN_BLOCK ((BITS_PER_BYTE * F2FS_BLKSIZE) / \
+ ((SIZE_OF_DIR_ENTRY + F2FS_SLOT_LEN) * BITS_PER_BYTE + 1))
#define SIZE_OF_DIR_ENTRY 11 /* by byte */
#define SIZE_OF_DENTRY_BITMAP ((NR_DENTRY_IN_BLOCK + BITS_PER_BYTE - 1) / \
BITS_PER_BYTE)
-#define SIZE_OF_RESERVED (PAGE_SIZE - ((SIZE_OF_DIR_ENTRY + \
+#define SIZE_OF_RESERVED (F2FS_BLKSIZE - ((SIZE_OF_DIR_ENTRY + \
F2FS_SLOT_LEN) * \
NR_DENTRY_IN_BLOCK + SIZE_OF_DENTRY_BITMAP))
#define MIN_INLINE_DENTRY_SIZE 40 /* just include '.' and '..' entries */
@@ -576,7 +590,7 @@ struct f2fs_dir_entry {
__u8 file_type; /* file type */
} __packed;
-/* 4KB-sized directory entry block */
+/* Block-sized directory entry block */
struct f2fs_dentry_block {
/* validity bitmap for directory entries in each block */
__u8 dentry_bitmap[SIZE_OF_DENTRY_BITMAP];
diff --git a/include/linux/fb.h b/include/linux/fb.h
index c14576458228..94e2c44c6569 100644
--- a/include/linux/fb.h
+++ b/include/linux/fb.h
@@ -591,8 +591,6 @@ extern ssize_t fb_sys_write(struct fb_info *info, const char __user *buf,
/* fbmem.c */
extern int register_framebuffer(struct fb_info *fb_info);
extern void unregister_framebuffer(struct fb_info *fb_info);
-extern int fb_prepare_logo(struct fb_info *fb_info, int rotate);
-extern int fb_show_logo(struct fb_info *fb_info, int rotate);
extern char* fb_get_buffer_offset(struct fb_info *info, struct fb_pixmap *buf, u32 size);
extern void fb_pad_unaligned_buffer(u8 *dst, u32 d_pitch, u8 *src, u32 idx,
u32 height, u32 shift_high, u32 shift_low, u32 mod);
@@ -603,9 +601,6 @@ extern int fb_get_color_depth(struct fb_var_screeninfo *var,
extern int fb_get_options(const char *name, char **option);
extern int fb_new_modelist(struct fb_info *info);
-extern bool fb_center_logo;
-extern int fb_logo_count;
-
static inline void lock_fb_info(struct fb_info *info)
{
mutex_lock(&info->lock);
diff --git a/include/linux/fdtable.h b/include/linux/fdtable.h
index e066816f3519..bc4c3287a65e 100644
--- a/include/linux/fdtable.h
+++ b/include/linux/fdtable.h
@@ -98,20 +98,9 @@ static inline struct file *files_lookup_fd_locked(struct files_struct *files, un
return files_lookup_fd_raw(files, fd);
}
-static inline struct file *files_lookup_fd_rcu(struct files_struct *files, unsigned int fd)
-{
- RCU_LOCKDEP_WARN(!rcu_read_lock_held(),
- "suspicious rcu_dereference_check() usage");
- return files_lookup_fd_raw(files, fd);
-}
-
-static inline struct file *lookup_fd_rcu(unsigned int fd)
-{
- return files_lookup_fd_rcu(current->files, fd);
-}
-
-struct file *task_lookup_fd_rcu(struct task_struct *task, unsigned int fd);
-struct file *task_lookup_next_fd_rcu(struct task_struct *task, unsigned int *fd);
+struct file *lookup_fdget_rcu(unsigned int fd);
+struct file *task_lookup_fdget_rcu(struct task_struct *task, unsigned int fd);
+struct file *task_lookup_next_fdget_rcu(struct task_struct *task, unsigned int *fd);
struct task_struct;
diff --git a/include/linux/filter.h b/include/linux/filter.h
index 761af6b3cf2b..a4953fafc8cb 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -117,21 +117,25 @@ struct ctl_table_header;
/* ALU ops on immediates, bpf_add|sub|...: dst_reg += imm32 */
-#define BPF_ALU64_IMM(OP, DST, IMM) \
+#define BPF_ALU64_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU64 | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU64_IMM(OP, DST, IMM) \
+ BPF_ALU64_IMM_OFF(OP, DST, IMM, 0)
-#define BPF_ALU32_IMM(OP, DST, IMM) \
+#define BPF_ALU32_IMM_OFF(OP, DST, IMM, OFF) \
((struct bpf_insn) { \
.code = BPF_ALU | BPF_OP(OP) | BPF_K, \
.dst_reg = DST, \
.src_reg = 0, \
- .off = 0, \
+ .off = OFF, \
.imm = IMM })
+#define BPF_ALU32_IMM(OP, DST, IMM) \
+ BPF_ALU32_IMM_OFF(OP, DST, IMM, 0)
/* Endianess conversion, cpu_to_{l,b}e(), {l,b}e_to_cpu() */
@@ -143,6 +147,16 @@ struct ctl_table_header;
.off = 0, \
.imm = LEN })
+/* Byte Swap, bswap16/32/64 */
+
+#define BPF_BSWAP(DST, LEN) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_END | BPF_SRC(BPF_TO_LE), \
+ .dst_reg = DST, \
+ .src_reg = 0, \
+ .off = 0, \
+ .imm = LEN })
+
/* Short form of mov, dst_reg = src_reg */
#define BPF_MOV64_REG(DST, SRC) \
@@ -179,6 +193,24 @@ struct ctl_table_header;
.off = 0, \
.imm = IMM })
+/* Short form of movsx, dst_reg = (s8,s16,s32)src_reg */
+
+#define BPF_MOVSX64_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU64 | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
+#define BPF_MOVSX32_REG(DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_ALU | BPF_MOV | BPF_X, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Special form of mov32, used for doing explicit zero extension on dst. */
#define BPF_ZEXT_REG(DST) \
((struct bpf_insn) { \
@@ -263,6 +295,16 @@ static inline bool insn_is_zext(const struct bpf_insn *insn)
.off = OFF, \
.imm = 0 })
+/* Memory load, dst_reg = *(signed size *) (src_reg + off16) */
+
+#define BPF_LDX_MEMSX(SIZE, DST, SRC, OFF) \
+ ((struct bpf_insn) { \
+ .code = BPF_LDX | BPF_SIZE(SIZE) | BPF_MEMSX, \
+ .dst_reg = DST, \
+ .src_reg = SRC, \
+ .off = OFF, \
+ .imm = 0 })
+
/* Memory store, *(uint *) (dst_reg + off16) = src_reg */
#define BPF_STX_MEM(SIZE, DST, SRC, OFF) \
@@ -694,7 +736,7 @@ static inline void bpf_compute_and_save_data_end(
cb->data_end = skb->data + skb_headlen(skb);
}
-/* Restore data saved by bpf_compute_data_pointers(). */
+/* Restore data saved by bpf_compute_and_save_data_end(). */
static inline void bpf_restore_data_end(
struct sk_buff *skb, void *saved_data_end)
{
@@ -912,6 +954,8 @@ bool bpf_jit_needs_zext(void);
bool bpf_jit_supports_subprog_tailcalls(void);
bool bpf_jit_supports_kfunc_call(void);
bool bpf_jit_supports_far_kfunc_call(void);
+bool bpf_jit_supports_exceptions(void);
+void arch_bpf_stack_walk(bool (*consume_fn)(void *cookie, u64 ip, u64 sp, u64 bp), void *cookie);
bool bpf_helper_changes_pkt_data(void *func);
static inline bool bpf_dump_raw_ok(const struct cred *cred)
@@ -981,12 +1025,6 @@ int xdp_do_redirect_frame(struct net_device *dev,
struct bpf_prog *prog);
void xdp_do_flush(void);
-/* The xdp_do_flush_map() helper has been renamed to drop the _map suffix, as
- * it is no longer only flushing maps. Keep this define for compatibility
- * until all drivers are updated - do not use xdp_do_flush_map() in new code!
- */
-#define xdp_do_flush_map xdp_do_flush
-
void bpf_warn_invalid_xdp_action(struct net_device *dev, struct bpf_prog *prog, u32 act);
#ifdef CONFIG_INET
@@ -1127,6 +1165,7 @@ const char *__bpf_address_lookup(unsigned long addr, unsigned long *size,
bool is_bpf_text_address(unsigned long addr);
int bpf_get_kallsym(unsigned int symnum, unsigned long *value, char *type,
char *sym);
+struct bpf_prog *bpf_prog_ksym_find(unsigned long addr);
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
@@ -1194,6 +1233,11 @@ static inline int bpf_get_kallsym(unsigned int symnum, unsigned long *value,
return -ERANGE;
}
+static inline struct bpf_prog *bpf_prog_ksym_find(unsigned long addr)
+{
+ return NULL;
+}
+
static inline const char *
bpf_address_lookup(unsigned long addr, unsigned long *size,
unsigned long *off, char **modname, char *sym)
@@ -1285,6 +1329,7 @@ struct bpf_sock_addr_kern {
*/
u64 tmp_reg;
void *t_ctx; /* Attach type specific context. */
+ u32 uaddrlen;
};
struct bpf_sock_ops_kern {
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 95b0da2326a9..8eaf8922ab02 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -19,7 +19,7 @@ enum {
struct meson_sm_firmware;
int meson_sm_call(struct meson_sm_firmware *fw, unsigned int cmd_index,
- u32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
+ s32 *ret, u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(struct meson_sm_firmware *fw, void *buffer,
unsigned int b_size, unsigned int cmd_index, u32 arg0,
u32 arg1, u32 arg2, u32 arg3, u32 arg4);
diff --git a/include/linux/firmware/qcom/qcom_qseecom.h b/include/linux/firmware/qcom/qcom_qseecom.h
new file mode 100644
index 000000000000..5c28298a98be
--- /dev/null
+++ b/include/linux/firmware/qcom/qcom_qseecom.h
@@ -0,0 +1,52 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * Driver for Qualcomm Secure Execution Environment (SEE) interface (QSEECOM).
+ * Responsible for setting up and managing QSEECOM client devices.
+ *
+ * Copyright (C) 2023 Maximilian Luz <luzmaximilian@gmail.com>
+ */
+
+#ifndef __QCOM_QSEECOM_H
+#define __QCOM_QSEECOM_H
+
+#include <linux/auxiliary_bus.h>
+#include <linux/types.h>
+
+#include <linux/firmware/qcom/qcom_scm.h>
+
+/**
+ * struct qseecom_client - QSEECOM client device.
+ * @aux_dev: Underlying auxiliary device.
+ * @app_id: ID of the loaded application.
+ */
+struct qseecom_client {
+ struct auxiliary_device aux_dev;
+ u32 app_id;
+};
+
+/**
+ * qcom_qseecom_app_send() - Send to and receive data from a given QSEE app.
+ * @client: The QSEECOM client associated with the target app.
+ * @req: Request buffer sent to the app (must be DMA-mappable).
+ * @req_size: Size of the request buffer.
+ * @rsp: Response buffer, written to by the app (must be DMA-mappable).
+ * @rsp_size: Size of the response buffer.
+ *
+ * Sends a request to the QSEE app associated with the given client and read
+ * back its response. The caller must provide two DMA memory regions, one for
+ * the request and one for the response, and fill out the @req region with the
+ * respective (app-specific) request data. The QSEE app reads this and returns
+ * its response in the @rsp region.
+ *
+ * Note: This is a convenience wrapper around qcom_scm_qseecom_app_send().
+ * Clients should prefer to use this wrapper.
+ *
+ * Return: Zero on success, nonzero on failure.
+ */
+static inline int qcom_qseecom_app_send(struct qseecom_client *client, void *req, size_t req_size,
+ void *rsp, size_t rsp_size)
+{
+ return qcom_scm_qseecom_app_send(client->app_id, req, req_size, rsp, rsp_size);
+}
+
+#endif /* __QCOM_QSEECOM_H */
diff --git a/include/linux/firmware/qcom/qcom_scm.h b/include/linux/firmware/qcom/qcom_scm.h
index 0c091a3f6d49..ccaf28846054 100644
--- a/include/linux/firmware/qcom/qcom_scm.h
+++ b/include/linux/firmware/qcom/qcom_scm.h
@@ -59,12 +59,12 @@ enum qcom_scm_ice_cipher {
#define QCOM_SCM_PERM_RW (QCOM_SCM_PERM_READ | QCOM_SCM_PERM_WRITE)
#define QCOM_SCM_PERM_RWX (QCOM_SCM_PERM_RW | QCOM_SCM_PERM_EXEC)
-extern bool qcom_scm_is_available(void);
+bool qcom_scm_is_available(void);
-extern int qcom_scm_set_cold_boot_addr(void *entry);
-extern int qcom_scm_set_warm_boot_addr(void *entry);
-extern void qcom_scm_cpu_power_down(u32 flags);
-extern int qcom_scm_set_remote_state(u32 state, u32 id);
+int qcom_scm_set_cold_boot_addr(void *entry);
+int qcom_scm_set_warm_boot_addr(void *entry);
+void qcom_scm_cpu_power_down(u32 flags);
+int qcom_scm_set_remote_state(u32 state, u32 id);
struct qcom_scm_pas_metadata {
void *ptr;
@@ -72,54 +72,69 @@ struct qcom_scm_pas_metadata {
ssize_t size;
};
-extern int qcom_scm_pas_init_image(u32 peripheral, const void *metadata,
- size_t size,
- struct qcom_scm_pas_metadata *ctx);
-extern void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
-extern int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr,
- phys_addr_t size);
-extern int qcom_scm_pas_auth_and_reset(u32 peripheral);
-extern int qcom_scm_pas_shutdown(u32 peripheral);
-extern bool qcom_scm_pas_supported(u32 peripheral);
-
-extern int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
-extern int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
-
-extern bool qcom_scm_restore_sec_cfg_available(void);
-extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
-extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
-extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
-extern int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
-extern int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
- u32 cp_nonpixel_start,
- u32 cp_nonpixel_size);
-extern int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz,
- u64 *src,
- const struct qcom_scm_vmperm *newvm,
- unsigned int dest_cnt);
-
-extern bool qcom_scm_ocmem_lock_available(void);
-extern int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size, u32 mode);
-extern int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset,
- u32 size);
-
-extern bool qcom_scm_ice_available(void);
-extern int qcom_scm_ice_invalidate_key(u32 index);
-extern int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
- enum qcom_scm_ice_cipher cipher,
- u32 data_unit_size);
-
-extern bool qcom_scm_hdcp_available(void);
-extern int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt,
- u32 *resp);
-
-extern int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
-extern int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
-
-extern int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
- u64 limit_node, u32 node_id, u64 version);
-extern int qcom_scm_lmh_profile_change(u32 profile_id);
-extern bool qcom_scm_lmh_dcvsh_available(void);
+int qcom_scm_pas_init_image(u32 peripheral, const void *metadata, size_t size,
+ struct qcom_scm_pas_metadata *ctx);
+void qcom_scm_pas_metadata_release(struct qcom_scm_pas_metadata *ctx);
+int qcom_scm_pas_mem_setup(u32 peripheral, phys_addr_t addr, phys_addr_t size);
+int qcom_scm_pas_auth_and_reset(u32 peripheral);
+int qcom_scm_pas_shutdown(u32 peripheral);
+bool qcom_scm_pas_supported(u32 peripheral);
+
+int qcom_scm_io_readl(phys_addr_t addr, unsigned int *val);
+int qcom_scm_io_writel(phys_addr_t addr, unsigned int val);
+
+bool qcom_scm_restore_sec_cfg_available(void);
+int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
+int qcom_scm_iommu_set_cp_pool_size(u32 spare, u32 size);
+int qcom_scm_mem_protect_video_var(u32 cp_start, u32 cp_size,
+ u32 cp_nonpixel_start, u32 cp_nonpixel_size);
+int qcom_scm_assign_mem(phys_addr_t mem_addr, size_t mem_sz, u64 *src,
+ const struct qcom_scm_vmperm *newvm,
+ unsigned int dest_cnt);
+
+bool qcom_scm_ocmem_lock_available(void);
+int qcom_scm_ocmem_lock(enum qcom_scm_ocmem_client id, u32 offset, u32 size,
+ u32 mode);
+int qcom_scm_ocmem_unlock(enum qcom_scm_ocmem_client id, u32 offset, u32 size);
+
+bool qcom_scm_ice_available(void);
+int qcom_scm_ice_invalidate_key(u32 index);
+int qcom_scm_ice_set_key(u32 index, const u8 *key, u32 key_size,
+ enum qcom_scm_ice_cipher cipher, u32 data_unit_size);
+
+bool qcom_scm_hdcp_available(void);
+int qcom_scm_hdcp_req(struct qcom_scm_hdcp_req *req, u32 req_cnt, u32 *resp);
+
+int qcom_scm_iommu_set_pt_format(u32 sec_id, u32 ctx_num, u32 pt_fmt);
+int qcom_scm_qsmmu500_wait_safe_toggle(bool en);
+
+int qcom_scm_lmh_dcvsh(u32 payload_fn, u32 payload_reg, u32 payload_val,
+ u64 limit_node, u32 node_id, u64 version);
+int qcom_scm_lmh_profile_change(u32 profile_id);
+bool qcom_scm_lmh_dcvsh_available(void);
+
+#ifdef CONFIG_QCOM_QSEECOM
+
+int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id);
+int qcom_scm_qseecom_app_send(u32 app_id, void *req, size_t req_size, void *rsp,
+ size_t rsp_size);
+
+#else /* CONFIG_QCOM_QSEECOM */
+
+static inline int qcom_scm_qseecom_app_get_id(const char *app_name, u32 *app_id)
+{
+ return -EINVAL;
+}
+
+static inline int qcom_scm_qseecom_app_send(u32 app_id, void *req,
+ size_t req_size, void *rsp,
+ size_t rsp_size)
+{
+ return -EINVAL;
+}
+
+#endif /* CONFIG_QCOM_QSEECOM */
#endif
diff --git a/include/linux/firmware/xlnx-zynqmp.h b/include/linux/firmware/xlnx-zynqmp.h
index e8b12ec8b060..d1ea3898564c 100644
--- a/include/linux/firmware/xlnx-zynqmp.h
+++ b/include/linux/firmware/xlnx-zynqmp.h
@@ -100,6 +100,18 @@
#define SD_ITAPDLY 0xFF180314
#define SD_OTAPDLYSEL 0xFF180318
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_CR: Error event mask for DDRMC MC Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_CR BIT(18)
+
+/**
+ * XPM_EVENT_ERROR_MASK_DDRMC_NCR: Error event mask for DDRMC MC Non-Correctable ECC Error.
+ */
+#define XPM_EVENT_ERROR_MASK_DDRMC_NCR BIT(19)
+#define XPM_EVENT_ERROR_MASK_NOC_NCR BIT(13)
+#define XPM_EVENT_ERROR_MASK_NOC_CR BIT(12)
+
enum pm_api_cb_id {
PM_INIT_SUSPEND_CB = 30,
PM_ACKNOWLEDGE_CB = 31,
diff --git a/include/linux/fortify-string.h b/include/linux/fortify-string.h
index da51a83b2829..79ef6ac4c021 100644
--- a/include/linux/fortify-string.h
+++ b/include/linux/fortify-string.h
@@ -93,13 +93,9 @@ extern char *__underlying_strncpy(char *p, const char *q, __kernel_size_t size)
#if __has_builtin(__builtin_dynamic_object_size)
#define POS __pass_dynamic_object_size(1)
#define POS0 __pass_dynamic_object_size(0)
-#define __struct_size(p) __builtin_dynamic_object_size(p, 0)
-#define __member_size(p) __builtin_dynamic_object_size(p, 1)
#else
#define POS __pass_object_size(1)
#define POS0 __pass_object_size(0)
-#define __struct_size(p) __builtin_object_size(p, 0)
-#define __member_size(p) __builtin_object_size(p, 1)
#endif
#define __compiletime_lessthan(bounds, length) ( \
@@ -643,7 +639,7 @@ __FORTIFY_INLINE bool fortify_memcpy_chk(__kernel_size_t size,
__q_size_field, #op), \
#op ": detected field-spanning write (size %zu) of single %s (size %zu)\n", \
__fortify_size, \
- "field \"" #p "\" at " __FILE__ ":" __stringify(__LINE__), \
+ "field \"" #p "\" at " FILE_LINE, \
__p_size_field); \
__underlying_##op(p, q, __fortify_size); \
})
diff --git a/include/linux/freelist.h b/include/linux/freelist.h
deleted file mode 100644
index fc1842b96469..000000000000
--- a/include/linux/freelist.h
+++ /dev/null
@@ -1,129 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause */
-#ifndef FREELIST_H
-#define FREELIST_H
-
-#include <linux/atomic.h>
-
-/*
- * Copyright: cameron@moodycamel.com
- *
- * A simple CAS-based lock-free free list. Not the fastest thing in the world
- * under heavy contention, but simple and correct (assuming nodes are never
- * freed until after the free list is destroyed), and fairly speedy under low
- * contention.
- *
- * Adapted from: https://moodycamel.com/blog/2014/solving-the-aba-problem-for-lock-free-free-lists
- */
-
-struct freelist_node {
- atomic_t refs;
- struct freelist_node *next;
-};
-
-struct freelist_head {
- struct freelist_node *head;
-};
-
-#define REFS_ON_FREELIST 0x80000000
-#define REFS_MASK 0x7FFFFFFF
-
-static inline void __freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * Since the refcount is zero, and nobody can increase it once it's
- * zero (except us, and we run only one copy of this method per node at
- * a time, i.e. the single thread case), then we know we can safely
- * change the next pointer of the node; however, once the refcount is
- * back above zero, then other threads could increase it (happens under
- * heavy contention, when the refcount goes to zero in between a load
- * and a refcount increment of a node in try_get, then back up to
- * something non-zero, then the refcount increment is done by the other
- * thread) -- so if the CAS to add the node to the actual list fails,
- * decrese the refcount and leave the add operation to the next thread
- * who puts the refcount back to zero (which could be us, hence the
- * loop).
- */
- struct freelist_node *head = READ_ONCE(list->head);
-
- for (;;) {
- WRITE_ONCE(node->next, head);
- atomic_set_release(&node->refs, 1);
-
- if (!try_cmpxchg_release(&list->head, &head, node)) {
- /*
- * Hmm, the add failed, but we can only try again when
- * the refcount goes back to zero.
- */
- if (atomic_fetch_add_release(REFS_ON_FREELIST - 1, &node->refs) == 1)
- continue;
- }
- return;
- }
-}
-
-static inline void freelist_add(struct freelist_node *node, struct freelist_head *list)
-{
- /*
- * We know that the should-be-on-freelist bit is 0 at this point, so
- * it's safe to set it using a fetch_add.
- */
- if (!atomic_fetch_add_release(REFS_ON_FREELIST, &node->refs)) {
- /*
- * Oh look! We were the last ones referencing this node, and we
- * know we want to add it to the free list, so let's do it!
- */
- __freelist_add(node, list);
- }
-}
-
-static inline struct freelist_node *freelist_try_get(struct freelist_head *list)
-{
- struct freelist_node *prev, *next, *head = smp_load_acquire(&list->head);
- unsigned int refs;
-
- while (head) {
- prev = head;
- refs = atomic_read(&head->refs);
- if ((refs & REFS_MASK) == 0 ||
- !atomic_try_cmpxchg_acquire(&head->refs, &refs, refs+1)) {
- head = smp_load_acquire(&list->head);
- continue;
- }
-
- /*
- * Good, reference count has been incremented (it wasn't at
- * zero), which means we can read the next and not worry about
- * it changing between now and the time we do the CAS.
- */
- next = READ_ONCE(head->next);
- if (try_cmpxchg_acquire(&list->head, &head, next)) {
- /*
- * Yay, got the node. This means it was on the list,
- * which means should-be-on-freelist must be false no
- * matter the refcount (because nobody else knows it's
- * been taken off yet, it can't have been put back on).
- */
- WARN_ON_ONCE(atomic_read(&head->refs) & REFS_ON_FREELIST);
-
- /*
- * Decrease refcount twice, once for our ref, and once
- * for the list's ref.
- */
- atomic_fetch_add(-2, &head->refs);
-
- return head;
- }
-
- /*
- * OK, the head must have changed on us, but we still need to decrement
- * the refcount we increased.
- */
- refs = atomic_fetch_add(-1, &prev->refs);
- if (refs == REFS_ON_FREELIST + 1)
- __freelist_add(prev, list);
- }
-
- return NULL;
-}
-
-#endif /* FREELIST_H */
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 4aeb3fa11927..98b7a7a8c42e 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -67,7 +67,7 @@ struct swap_info_struct;
struct seq_file;
struct workqueue_struct;
struct iov_iter;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fscrypt_operations;
struct fsverity_info;
struct fsverity_operations;
@@ -454,7 +454,7 @@ extern const struct address_space_operations empty_aops;
* It is also used to block modification of page cache contents through
* memory mappings.
* @gfp_mask: Memory allocation flags to use for allocating pages.
- * @i_mmap_writable: Number of VM_SHARED mappings.
+ * @i_mmap_writable: Number of VM_SHARED, VM_MAYWRITE mappings.
* @nr_thps: Number of THPs in the pagecache (non-shmem only).
* @i_mmap: Tree of private and shared mappings.
* @i_mmap_rwsem: Protects @i_mmap and @i_mmap_writable.
@@ -557,7 +557,7 @@ static inline int mapping_mapped(struct address_space *mapping)
/*
* Might pages of this file have been modified in userspace?
- * Note that i_mmap_writable counts all VM_SHARED vmas: do_mmap
+ * Note that i_mmap_writable counts all VM_SHARED, VM_MAYWRITE vmas: do_mmap
* marks vma as VM_SHARED if it is shared, and the file was opened for
* writing i.e. vma may be mprotected writable even if now readonly.
*
@@ -671,8 +671,8 @@ struct inode {
};
dev_t i_rdev;
loff_t i_size;
- struct timespec64 i_atime;
- struct timespec64 i_mtime;
+ struct timespec64 __i_atime;
+ struct timespec64 __i_mtime;
struct timespec64 __i_ctime; /* use inode_*_ctime accessors! */
spinlock_t i_lock; /* i_blocks, i_bytes, maybe i_size */
unsigned short i_bytes;
@@ -738,7 +738,7 @@ struct inode {
#endif
#ifdef CONFIG_FS_ENCRYPTION
- struct fscrypt_info *i_crypt_info;
+ struct fscrypt_inode_info *i_crypt_info;
#endif
#ifdef CONFIG_FS_VERITY
@@ -1042,7 +1042,10 @@ static inline struct file *get_file(struct file *f)
atomic_long_inc(&f->f_count);
return f;
}
-#define get_file_rcu(x) atomic_long_inc_not_zero(&(x)->f_count)
+
+struct file *get_file_rcu(struct file __rcu **f);
+struct file *get_file_active(struct file **f);
+
#define file_count(x) atomic_long_read(&(x)->f_count)
#define MAX_NON_LFS ((1UL<<31) - 1)
@@ -1119,7 +1122,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_NOATIME BIT(10) /* Do not update access times. */
#define SB_NODIRATIME BIT(11) /* Do not update directory access times */
#define SB_SILENT BIT(15)
-#define SB_POSIXACL BIT(16) /* VFS does not apply the umask */
+#define SB_POSIXACL BIT(16) /* Supports POSIX ACLs */
#define SB_INLINECRYPT BIT(17) /* Use blk-crypto for encrypted files */
#define SB_KERNMOUNT BIT(22) /* this is a kern_mount call */
#define SB_I_VERSION BIT(23) /* Update inode I_version field */
@@ -1166,6 +1169,7 @@ extern int send_sigurg(struct fown_struct *fown);
#define SB_I_PERSB_BDI 0x00000200 /* has a per-sb bdi */
#define SB_I_TS_EXPIRY_WARNED 0x00000400 /* warned about timestamp range expiry */
#define SB_I_RETIRED 0x00000800 /* superblock shouldn't be reused */
+#define SB_I_NOUMASK 0x00001000 /* VFS does not apply umask */
/* Possible states of 'frozen' field */
enum {
@@ -1206,7 +1210,7 @@ struct super_block {
#ifdef CONFIG_SECURITY
void *s_security;
#endif
- const struct xattr_handler **s_xattr;
+ const struct xattr_handler * const *s_xattr;
#ifdef CONFIG_FS_ENCRYPTION
const struct fscrypt_operations *s_cop;
struct fscrypt_keyring *s_master_keys; /* master crypto keys in use */
@@ -1221,6 +1225,7 @@ struct super_block {
struct hlist_bl_head s_roots; /* alternate root dentries for NFS */
struct list_head s_mounts; /* list of mounts; _not_ for fs use */
struct block_device *s_bdev;
+ struct bdev_handle *s_bdev_handle;
struct backing_dev_info *s_bdi;
struct mtd_info *s_mtd;
struct hlist_node s_instances;
@@ -1265,7 +1270,7 @@ struct super_block {
const struct dentry_operations *s_d_op; /* default d_op for dentries */
- struct shrinker s_shrink; /* per-sb shrinker handle */
+ struct shrinker *s_shrink; /* per-sb shrinker handle */
/* Number of inodes with nlink == 0 but still referenced */
atomic_long_t s_remove_count;
@@ -1508,56 +1513,84 @@ static inline bool fsuidgid_has_mapping(struct super_block *sb,
kgid_has_mapping(fs_userns, kgid);
}
-struct timespec64 current_mgtime(struct inode *inode);
struct timespec64 current_time(struct inode *inode);
struct timespec64 inode_set_ctime_current(struct inode *inode);
-/*
- * Multigrain timestamps
- *
- * Conditionally use fine-grained ctime and mtime timestamps when there
- * are users actively observing them via getattr. The primary use-case
- * for this is NFS clients that use the ctime to distinguish between
- * different states of the file, and that are often fooled by multiple
- * operations that occur in the same coarse-grained timer tick.
- *
- * The kernel always keeps normalized struct timespec64 values in the ctime,
- * which means that only the first 30 bits of the value are used. Use the
- * 31st bit of the ctime's tv_nsec field as a flag to indicate that the value
- * has been queried since it was last updated.
- */
-#define I_CTIME_QUERIED (1L<<30)
+static inline time64_t inode_get_atime_sec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_sec;
+}
-/**
- * inode_get_ctime - fetch the current ctime from the inode
- * @inode: inode from which to fetch ctime
- *
- * Grab the current ctime tv_nsec field from the inode, mask off the
- * I_CTIME_QUERIED flag and return it. This is mostly intended for use by
- * internal consumers of the ctime that aren't concerned with ensuring a
- * fine-grained update on the next change (e.g. when preparing to store
- * the value in the backing store for later retrieval).
- *
- * This is safe to call regardless of whether the underlying filesystem
- * is using multigrain timestamps.
- */
-static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+static inline long inode_get_atime_nsec(const struct inode *inode)
+{
+ return inode->__i_atime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_atime(const struct inode *inode)
+{
+ return inode->__i_atime;
+}
+
+static inline struct timespec64 inode_set_atime_to_ts(struct inode *inode,
+ struct timespec64 ts)
{
- struct timespec64 ctime;
+ inode->__i_atime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_atime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_atime_to_ts(inode, ts);
+}
- ctime.tv_sec = inode->__i_ctime.tv_sec;
- ctime.tv_nsec = inode->__i_ctime.tv_nsec & ~I_CTIME_QUERIED;
+static inline time64_t inode_get_mtime_sec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_sec;
+}
- return ctime;
+static inline long inode_get_mtime_nsec(const struct inode *inode)
+{
+ return inode->__i_mtime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_mtime(const struct inode *inode)
+{
+ return inode->__i_mtime;
+}
+
+static inline struct timespec64 inode_set_mtime_to_ts(struct inode *inode,
+ struct timespec64 ts)
+{
+ inode->__i_mtime = ts;
+ return ts;
+}
+
+static inline struct timespec64 inode_set_mtime(struct inode *inode,
+ time64_t sec, long nsec)
+{
+ struct timespec64 ts = { .tv_sec = sec,
+ .tv_nsec = nsec };
+ return inode_set_mtime_to_ts(inode, ts);
+}
+
+static inline time64_t inode_get_ctime_sec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_sec;
+}
+
+static inline long inode_get_ctime_nsec(const struct inode *inode)
+{
+ return inode->__i_ctime.tv_nsec;
+}
+
+static inline struct timespec64 inode_get_ctime(const struct inode *inode)
+{
+ return inode->__i_ctime;
}
-/**
- * inode_set_ctime_to_ts - set the ctime in the inode
- * @inode: inode in which to set the ctime
- * @ts: value to set in the ctime field
- *
- * Set the ctime in @inode to @ts
- */
static inline struct timespec64 inode_set_ctime_to_ts(struct inode *inode,
struct timespec64 ts)
{
@@ -1582,6 +1615,8 @@ static inline struct timespec64 inode_set_ctime(struct inode *inode,
return inode_set_ctime_to_ts(inode, ts);
}
+struct timespec64 simple_inode_init_ts(struct inode *inode);
+
/*
* Snapshotting support.
*/
@@ -2110,7 +2145,12 @@ static inline bool sb_rdonly(const struct super_block *sb) { return sb->s_flags
#define IS_NOQUOTA(inode) ((inode)->i_flags & S_NOQUOTA)
#define IS_APPEND(inode) ((inode)->i_flags & S_APPEND)
#define IS_IMMUTABLE(inode) ((inode)->i_flags & S_IMMUTABLE)
+
+#ifdef CONFIG_FS_POSIX_ACL
#define IS_POSIXACL(inode) __IS_FLG(inode, SB_POSIXACL)
+#else
+#define IS_POSIXACL(inode) 0
+#endif
#define IS_DEADDIR(inode) ((inode)->i_flags & S_DEAD)
#define IS_NOCMTIME(inode) ((inode)->i_flags & S_NOCMTIME)
@@ -2334,7 +2374,6 @@ struct file_system_type {
#define FS_USERNS_MOUNT 8 /* Can be mounted by userns root */
#define FS_DISALLOW_NOTIFY_PERM 16 /* Disable fanotify permission events */
#define FS_ALLOW_IDMAP 32 /* FS has been updated to handle vfs idmappings. */
-#define FS_MGTIME 64 /* FS uses multigrain timestamps */
#define FS_RENAME_DOES_D_MOVE 32768 /* FS will handle d_move() during rename() internally. */
int (*init_fs_context)(struct fs_context *);
const struct fs_parameter_spec *parameters;
@@ -2358,17 +2397,6 @@ struct file_system_type {
#define MODULE_ALIAS_FS(NAME) MODULE_ALIAS("fs-" NAME)
-/**
- * is_mgtime: is this inode using multigrain timestamps
- * @inode: inode to test for multigrain timestamps
- *
- * Return true if the inode uses multigrain timestamps, false otherwise.
- */
-static inline bool is_mgtime(const struct inode *inode)
-{
- return inode->i_sb->s_type->fs_flags & FS_MGTIME;
-}
-
extern struct dentry *mount_bdev(struct file_system_type *fs_type,
int flags, const char *dev_name, void *data,
int (*fill_super)(struct super_block *, void *, int));
@@ -2444,13 +2472,13 @@ struct audit_names;
struct filename {
const char *name; /* pointer to actual string */
const __user char *uptr; /* original userland pointer */
- int refcnt;
+ atomic_t refcnt;
struct audit_names *aname;
const char iname[];
};
static_assert(offsetof(struct filename, iname) % sizeof(long) == 0);
-static inline struct mnt_idmap *file_mnt_idmap(struct file *file)
+static inline struct mnt_idmap *file_mnt_idmap(const struct file *file)
{
return mnt_idmap(file->f_path.mnt);
}
@@ -2489,24 +2517,24 @@ struct file *dentry_open(const struct path *path, int flags,
const struct cred *creds);
struct file *dentry_create(const struct path *path, int flags, umode_t mode,
const struct cred *cred);
-struct file *backing_file_open(const struct path *path, int flags,
+struct file *backing_file_open(const struct path *user_path, int flags,
const struct path *real_path,
const struct cred *cred);
-struct path *backing_file_real_path(struct file *f);
+struct path *backing_file_user_path(struct file *f);
/*
- * file_real_path - get the path corresponding to f_inode
+ * file_user_path - get the path to display for memory mapped file
*
- * When opening a backing file for a stackable filesystem (e.g.,
- * overlayfs) f_path may be on the stackable filesystem and f_inode on
- * the underlying filesystem. When the path associated with f_inode is
- * needed, this helper should be used instead of accessing f_path
- * directly.
-*/
-static inline const struct path *file_real_path(struct file *f)
+ * When mmapping a file on a stackable filesystem (e.g., overlayfs), the file
+ * stored in ->vm_file is a backing file whose f_inode is on the underlying
+ * filesystem. When the mapped file path is displayed to user (e.g. via
+ * /proc/<pid>/maps), this helper should be used to get the path to display
+ * to the user, which is the path of the fd that user has requested to map.
+ */
+static inline const struct path *file_user_path(struct file *f)
{
if (unlikely(f->f_mode & FMODE_BACKING))
- return backing_file_real_path(f);
+ return backing_file_user_path(f);
return &f->f_path;
}
@@ -3054,7 +3082,6 @@ extern void page_put_link(void *);
extern int page_symlink(struct inode *inode, const char *symname, int len);
extern const struct inode_operations page_symlink_inode_operations;
extern void kfree_link(void *);
-void fill_mg_cmtime(struct kstat *stat, u32 request_mask, struct inode *inode);
void generic_fillattr(struct mnt_idmap *, u32, struct inode *, struct kstat *);
void generic_fill_statx_attr(struct inode *inode, struct kstat *stat);
extern int vfs_getattr_nosec(const struct path *, struct kstat *, u32, unsigned int);
diff --git a/include/linux/fs_context.h b/include/linux/fs_context.h
index 96332db693d5..c13e99cbbf81 100644
--- a/include/linux/fs_context.h
+++ b/include/linux/fs_context.h
@@ -136,6 +136,8 @@ extern struct fs_context *vfs_dup_fs_context(struct fs_context *fc);
extern int vfs_parse_fs_param(struct fs_context *fc, struct fs_parameter *param);
extern int vfs_parse_fs_string(struct fs_context *fc, const char *key,
const char *value, size_t v_size);
+int vfs_parse_monolithic_sep(struct fs_context *fc, void *data,
+ char *(*sep)(char **));
extern int generic_parse_monolithic(struct fs_context *fc, void *data);
extern int vfs_get_tree(struct fs_context *fc);
extern void put_fs_context(struct fs_context *fc);
diff --git a/include/linux/fs_stack.h b/include/linux/fs_stack.h
index 010d39d0dc1c..2b1f74b24070 100644
--- a/include/linux/fs_stack.h
+++ b/include/linux/fs_stack.h
@@ -16,14 +16,14 @@ extern void fsstack_copy_inode_size(struct inode *dst, struct inode *src);
static inline void fsstack_copy_attr_atime(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
}
static inline void fsstack_copy_attr_times(struct inode *dest,
const struct inode *src)
{
- dest->i_atime = src->i_atime;
- dest->i_mtime = src->i_mtime;
+ inode_set_atime_to_ts(dest, inode_get_atime(src));
+ inode_set_mtime_to_ts(dest, inode_get_mtime(src));
inode_set_ctime_to_ts(dest, inode_get_ctime(src));
}
diff --git a/include/linux/fscrypt.h b/include/linux/fscrypt.h
index c895b12737a1..12f9e455d569 100644
--- a/include/linux/fscrypt.h
+++ b/include/linux/fscrypt.h
@@ -31,7 +31,7 @@
#define FSCRYPT_CONTENTS_ALIGNMENT 16
union fscrypt_policy;
-struct fscrypt_info;
+struct fscrypt_inode_info;
struct fs_parameter;
struct seq_file;
@@ -59,26 +59,55 @@ struct fscrypt_name {
#ifdef CONFIG_FS_ENCRYPTION
-/*
- * If set, the fscrypt bounce page pool won't be allocated (unless another
- * filesystem needs it). Set this if the filesystem always uses its own bounce
- * pages for writes and therefore won't need the fscrypt bounce page pool.
- */
-#define FS_CFLG_OWN_PAGES (1U << 1)
-
/* Crypto operations for filesystems */
struct fscrypt_operations {
- /* Set of optional flags; see above for allowed flags */
- unsigned int flags;
+ /*
+ * If set, then fs/crypto/ will allocate a global bounce page pool the
+ * first time an encryption key is set up for a file. The bounce page
+ * pool is required by the following functions:
+ *
+ * - fscrypt_encrypt_pagecache_blocks()
+ * - fscrypt_zeroout_range() for files not using inline crypto
+ *
+ * If the filesystem doesn't use those, it doesn't need to set this.
+ */
+ unsigned int needs_bounce_pages : 1;
/*
- * If set, this is a filesystem-specific key description prefix that
- * will be accepted for "logon" keys for v1 fscrypt policies, in
- * addition to the generic prefix "fscrypt:". This functionality is
- * deprecated, so new filesystems shouldn't set this field.
+ * If set, then fs/crypto/ will allow the use of encryption settings
+ * that assume inode numbers fit in 32 bits (i.e.
+ * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64}), provided that the other
+ * prerequisites for these settings are also met. This is only useful
+ * if the filesystem wants to support inline encryption hardware that is
+ * limited to 32-bit or 64-bit data unit numbers and where programming
+ * keyslots is very slow.
*/
- const char *key_prefix;
+ unsigned int has_32bit_inodes : 1;
+
+ /*
+ * If set, then fs/crypto/ will allow users to select a crypto data unit
+ * size that is less than the filesystem block size. This is done via
+ * the log2_data_unit_size field of the fscrypt policy. This flag is
+ * not compatible with filesystems that encrypt variable-length blocks
+ * (i.e. blocks that aren't all equal to filesystem's block size), for
+ * example as a result of compression. It's also not compatible with
+ * the fscrypt_encrypt_block_inplace() and
+ * fscrypt_decrypt_block_inplace() functions.
+ */
+ unsigned int supports_subblock_data_units : 1;
+
+ /*
+ * This field exists only for backwards compatibility reasons and should
+ * only be set by the filesystems that are setting it already. It
+ * contains the filesystem-specific key description prefix that is
+ * accepted for "logon" keys for v1 fscrypt policies. This
+ * functionality is deprecated in favor of the generic prefix
+ * "fscrypt:", which itself is deprecated in favor of the filesystem
+ * keyring ioctls such as FS_IOC_ADD_ENCRYPTION_KEY. Filesystems that
+ * are newly adding fscrypt support should not set this field.
+ */
+ const char *legacy_key_prefix;
/*
* Get the fscrypt context of the given inode.
@@ -146,21 +175,6 @@ struct fscrypt_operations {
bool (*has_stable_inodes)(struct super_block *sb);
/*
- * Get the number of bits that the filesystem uses to represent inode
- * numbers and file logical block numbers.
- *
- * By default, both of these are assumed to be 64-bit. This function
- * can be implemented to declare that either or both of these numbers is
- * shorter, which may allow the use of the
- * FSCRYPT_POLICY_FLAG_IV_INO_LBLK_{32,64} flags and/or the use of
- * inline crypto hardware whose maximum DUN length is less than 64 bits
- * (e.g., eMMC v5.2 spec compliant hardware). This function only needs
- * to be implemented if support for one of these features is needed.
- */
- void (*get_ino_and_lblk_bits)(struct super_block *sb,
- int *ino_bits_ret, int *lblk_bits_ret);
-
- /*
* Return an array of pointers to the block devices to which the
* filesystem may write encrypted file contents, NULL if the filesystem
* only has a single such block device, or an ERR_PTR() on error.
@@ -178,7 +192,8 @@ struct fscrypt_operations {
unsigned int *num_devs);
};
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
/*
* Pairs with the cmpxchg_release() in fscrypt_setup_encryption_info().
@@ -390,7 +405,8 @@ static inline void fscrypt_set_ops(struct super_block *sb,
}
#else /* !CONFIG_FS_ENCRYPTION */
-static inline struct fscrypt_info *fscrypt_get_info(const struct inode *inode)
+static inline struct fscrypt_inode_info *
+fscrypt_get_inode_info(const struct inode *inode)
{
return NULL;
}
@@ -868,7 +884,7 @@ static inline bool fscrypt_inode_uses_fs_layer_crypto(const struct inode *inode)
*/
static inline bool fscrypt_has_encryption_key(const struct inode *inode)
{
- return fscrypt_get_info(inode) != NULL;
+ return fscrypt_get_inode_info(inode) != NULL;
}
/**
diff --git a/include/linux/fsnotify.h b/include/linux/fsnotify.h
index ed48e4f1e755..bcb6609b54b3 100644
--- a/include/linux/fsnotify.h
+++ b/include/linux/fsnotify.h
@@ -96,8 +96,7 @@ static inline int fsnotify_file(struct file *file, __u32 mask)
if (file->f_mode & FMODE_NONOTIFY)
return 0;
- /* Overlayfs internal files have fake f_path */
- path = file_real_path(file);
+ path = &file->f_path;
return fsnotify_parent(path->dentry, mask, path, FSNOTIFY_EVENT_PATH);
}
diff --git a/include/linux/fw_table.h b/include/linux/fw_table.h
new file mode 100644
index 000000000000..ff8fa58d5818
--- /dev/null
+++ b/include/linux/fw_table.h
@@ -0,0 +1,43 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/*
+ * fw_tables.h - Parsing support for ACPI and ACPI-like tables provided by
+ * platform or device firmware
+ *
+ * Copyright (C) 2001 Paul Diefenbaugh <paul.s.diefenbaugh@intel.com>
+ * Copyright (C) 2023 Intel Corp.
+ */
+#ifndef _FW_TABLE_H_
+#define _FW_TABLE_H_
+
+union acpi_subtable_headers;
+
+typedef int (*acpi_tbl_entry_handler)(union acpi_subtable_headers *header,
+ const unsigned long end);
+
+typedef int (*acpi_tbl_entry_handler_arg)(union acpi_subtable_headers *header,
+ void *arg, const unsigned long end);
+
+struct acpi_subtable_proc {
+ int id;
+ acpi_tbl_entry_handler handler;
+ acpi_tbl_entry_handler_arg handler_arg;
+ void *arg;
+ int count;
+};
+
+#include <linux/acpi.h>
+#include <acpi/acpi.h>
+
+union acpi_subtable_headers {
+ struct acpi_subtable_header common;
+ struct acpi_hmat_structure hmat;
+ struct acpi_prmt_module_header prmt;
+ struct acpi_cedt_header cedt;
+};
+
+int acpi_parse_entries_array(char *id, unsigned long table_size,
+ struct acpi_table_header *table_header,
+ struct acpi_subtable_proc *proc,
+ int proc_num, unsigned int max_entries);
+
+#endif
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 5700451b300f..2a72f55d26eb 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -41,6 +41,8 @@ struct device;
struct fwnode_handle {
struct fwnode_handle *secondary;
const struct fwnode_operations *ops;
+
+ /* The below is used solely by device links, don't use otherwise */
struct device *dev;
struct list_head suppliers;
struct list_head consumers;
diff --git a/include/linux/generic-radix-tree.h b/include/linux/generic-radix-tree.h
index 107613f7d792..847413164738 100644
--- a/include/linux/generic-radix-tree.h
+++ b/include/linux/generic-radix-tree.h
@@ -38,6 +38,7 @@
#include <asm/page.h>
#include <linux/bug.h>
+#include <linux/limits.h>
#include <linux/log2.h>
#include <linux/math.h>
#include <linux/types.h>
@@ -116,6 +117,11 @@ static inline size_t __idx_to_offset(size_t idx, size_t obj_size)
#define __genradix_cast(_radix) (typeof((_radix)->type[0]) *)
#define __genradix_obj_size(_radix) sizeof((_radix)->type[0])
+#define __genradix_objs_per_page(_radix) \
+ (PAGE_SIZE / sizeof((_radix)->type[0]))
+#define __genradix_page_remainder(_radix) \
+ (PAGE_SIZE % sizeof((_radix)->type[0]))
+
#define __genradix_idx_to_offset(_radix, _idx) \
__idx_to_offset(_idx, __genradix_obj_size(_radix))
@@ -179,11 +185,35 @@ void *__genradix_iter_peek(struct genradix_iter *, struct __genradix *, size_t);
#define genradix_iter_peek(_iter, _radix) \
(__genradix_cast(_radix) \
__genradix_iter_peek(_iter, &(_radix)->tree, \
- PAGE_SIZE / __genradix_obj_size(_radix)))
+ __genradix_objs_per_page(_radix)))
+
+void *__genradix_iter_peek_prev(struct genradix_iter *, struct __genradix *,
+ size_t, size_t);
+
+/**
+ * genradix_iter_peek_prev - get first entry at or below iterator's current
+ * position
+ * @_iter: a genradix_iter
+ * @_radix: genradix being iterated over
+ *
+ * If no more entries exist at or below @_iter's current position, returns NULL
+ */
+#define genradix_iter_peek_prev(_iter, _radix) \
+ (__genradix_cast(_radix) \
+ __genradix_iter_peek_prev(_iter, &(_radix)->tree, \
+ __genradix_objs_per_page(_radix), \
+ __genradix_obj_size(_radix) + \
+ __genradix_page_remainder(_radix)))
static inline void __genradix_iter_advance(struct genradix_iter *iter,
size_t obj_size)
{
+ if (iter->offset + obj_size < iter->offset) {
+ iter->offset = SIZE_MAX;
+ iter->pos = SIZE_MAX;
+ return;
+ }
+
iter->offset += obj_size;
if (!is_power_of_2(obj_size) &&
@@ -196,6 +226,25 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_iter_advance(_iter, _radix) \
__genradix_iter_advance(_iter, __genradix_obj_size(_radix))
+static inline void __genradix_iter_rewind(struct genradix_iter *iter,
+ size_t obj_size)
+{
+ if (iter->offset == 0 ||
+ iter->offset == SIZE_MAX) {
+ iter->offset = SIZE_MAX;
+ return;
+ }
+
+ if ((iter->offset & (PAGE_SIZE - 1)) == 0)
+ iter->offset -= PAGE_SIZE % obj_size;
+
+ iter->offset -= obj_size;
+ iter->pos--;
+}
+
+#define genradix_iter_rewind(_iter, _radix) \
+ __genradix_iter_rewind(_iter, __genradix_obj_size(_radix))
+
#define genradix_for_each_from(_radix, _iter, _p, _start) \
for (_iter = genradix_iter_init(_radix, _start); \
(_p = genradix_iter_peek(&_iter, _radix)) != NULL; \
@@ -213,6 +262,23 @@ static inline void __genradix_iter_advance(struct genradix_iter *iter,
#define genradix_for_each(_radix, _iter, _p) \
genradix_for_each_from(_radix, _iter, _p, 0)
+#define genradix_last_pos(_radix) \
+ (SIZE_MAX / PAGE_SIZE * __genradix_objs_per_page(_radix) - 1)
+
+/**
+ * genradix_for_each_reverse - iterate over entry in a genradix, reverse order
+ * @_radix: genradix to iterate over
+ * @_iter: a genradix_iter to track current position
+ * @_p: pointer to genradix entry type
+ *
+ * On every iteration, @_p will point to the current entry, and @_iter.pos
+ * will be the current entry's index.
+ */
+#define genradix_for_each_reverse(_radix, _iter, _p) \
+ for (_iter = genradix_iter_init(_radix, genradix_last_pos(_radix));\
+ (_p = genradix_iter_peek_prev(&_iter, _radix)) != NULL;\
+ genradix_iter_rewind(&_iter, _radix))
+
int __genradix_prealloc(struct __genradix *, size_t, gfp_t);
/**
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index 665f06675c83..de292a007138 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -8,6 +8,7 @@
#include <linux/topology.h>
struct vm_area_struct;
+struct mempolicy;
/* Convert GFP flags to their corresponding migrate type */
#define GFP_MOVABLE_MASK (__GFP_RECLAIMABLE|__GFP_MOVABLE)
@@ -262,7 +263,9 @@ static inline struct page *alloc_pages_node(int nid, gfp_t gfp_mask,
#ifdef CONFIG_NUMA
struct page *alloc_pages(gfp_t gfp, unsigned int order);
-struct folio *folio_alloc(gfp_t gfp, unsigned order);
+struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid);
+struct folio *folio_alloc(gfp_t gfp, unsigned int order);
struct folio *vma_alloc_folio(gfp_t gfp, int order, struct vm_area_struct *vma,
unsigned long addr, bool hugepage);
#else
@@ -270,6 +273,11 @@ static inline struct page *alloc_pages(gfp_t gfp_mask, unsigned int order)
{
return alloc_pages_node(numa_node_id(), gfp_mask, order);
}
+static inline struct page *alloc_pages_mpol(gfp_t gfp, unsigned int order,
+ struct mempolicy *mpol, pgoff_t ilx, int nid)
+{
+ return alloc_pages(gfp, order);
+}
static inline struct folio *folio_alloc(gfp_t gfp, unsigned int order)
{
return __folio_alloc_node(gfp, order, numa_node_id());
@@ -320,11 +328,13 @@ extern void page_frag_free(void *addr);
#define free_page(addr) free_pages((addr), 0)
void page_alloc_init_cpuhp(void);
+int decay_pcp_high(struct zone *zone, struct per_cpu_pages *pcp);
void drain_zone_pages(struct zone *zone, struct per_cpu_pages *pcp);
void drain_all_pages(struct zone *zone);
void drain_local_pages(struct zone *zone);
void page_alloc_init_late(void);
+void setup_pcp_cacheinfo(void);
/*
* gfp_allowed_mask is set to GFP_BOOT_MASK during early boot to restrict what
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 1c4385a00f88..db2dfbae8edb 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -159,7 +159,6 @@ int gpiod_set_raw_array_value_cansleep(unsigned int array_size,
int gpiod_set_config(struct gpio_desc *desc, unsigned long config);
int gpiod_set_debounce(struct gpio_desc *desc, unsigned int debounce);
-int gpiod_set_transitory(struct gpio_desc *desc, bool transitory);
void gpiod_toggle_active_low(struct gpio_desc *desc);
int gpiod_is_active_low(const struct gpio_desc *desc);
@@ -494,13 +493,6 @@ static inline int gpiod_set_debounce(struct gpio_desc *desc, unsigned int deboun
return -ENOSYS;
}
-static inline int gpiod_set_transitory(struct gpio_desc *desc, bool transitory)
-{
- /* GPIO can never have been requested */
- WARN_ON(desc);
- return -ENOSYS;
-}
-
static inline void gpiod_toggle_active_low(struct gpio_desc *desc)
{
/* GPIO can never have been requested */
@@ -614,8 +606,6 @@ void acpi_dev_remove_driver_gpios(struct acpi_device *adev);
int devm_acpi_dev_add_driver_gpios(struct device *dev,
const struct acpi_gpio_mapping *gpios);
-struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin, char *label);
-
#else /* CONFIG_GPIOLIB && CONFIG_ACPI */
#include <linux/err.h>
@@ -633,12 +623,6 @@ static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
return -ENXIO;
}
-static inline struct gpio_desc *acpi_get_and_request_gpiod(char *path, unsigned int pin,
- char *label)
-{
- return ERR_PTR(-ENOSYS);
-}
-
#endif /* CONFIG_GPIOLIB && CONFIG_ACPI */
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 4f0c5d62c8f3..1d454dc944b3 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -3,6 +3,8 @@
#define __LINUX_GPIO_DRIVER_H
#include <linux/bits.h>
+#include <linux/cleanup.h>
+#include <linux/err.h>
#include <linux/irqchip/chained_irq.h>
#include <linux/irqdomain.h>
#include <linux/irqhandler.h>
@@ -529,8 +531,7 @@ struct gpio_chip {
#endif /* CONFIG_OF_GPIO */
};
-extern const char *gpiochip_is_requested(struct gpio_chip *gc,
- unsigned int offset);
+const char *gpiochip_is_requested(struct gpio_chip *gc, unsigned int offset);
/**
* for_each_requested_gpio_in_range - iterates over requested GPIOs in a given range
@@ -549,9 +550,9 @@ extern const char *gpiochip_is_requested(struct gpio_chip *gc,
for_each_requested_gpio_in_range(chip, i, 0, chip->ngpio, label)
/* add/remove chips */
-extern int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+int gpiochip_add_data_with_key(struct gpio_chip *gc, void *data,
+ struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
/**
* gpiochip_add_data() - register a gpio_chip
@@ -599,13 +600,26 @@ static inline int gpiochip_add(struct gpio_chip *gc)
{
return gpiochip_add_data(gc, NULL);
}
-extern void gpiochip_remove(struct gpio_chip *gc);
-extern int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc, void *data,
- struct lock_class_key *lock_key,
- struct lock_class_key *request_key);
+void gpiochip_remove(struct gpio_chip *gc);
+int devm_gpiochip_add_data_with_key(struct device *dev, struct gpio_chip *gc,
+ void *data, struct lock_class_key *lock_key,
+ struct lock_class_key *request_key);
+
+struct gpio_chip *gpiochip_find(void *data,
+ int (*match)(struct gpio_chip *gc, void *data));
+
+struct gpio_device *gpio_device_find(void *data,
+ int (*match)(struct gpio_chip *gc, void *data));
+struct gpio_device *gpio_device_find_by_label(const char *label);
+struct gpio_device *gpio_device_find_by_fwnode(const struct fwnode_handle *fwnode);
+
+struct gpio_device *gpio_device_get(struct gpio_device *gdev);
+void gpio_device_put(struct gpio_device *gdev);
+
+DEFINE_FREE(gpio_device_put, struct gpio_device *,
+ if (IS_ERR_OR_NULL(_T)) gpio_device_put(_T));
-extern struct gpio_chip *gpiochip_find(void *data,
- int (*match)(struct gpio_chip *gc, void *data));
+struct device *gpio_device_to_device(struct gpio_device *gdev);
bool gpiochip_line_is_irq(struct gpio_chip *gc, unsigned int offset);
int gpiochip_reqres_irq(struct gpio_chip *gc, unsigned int offset);
@@ -758,14 +772,23 @@ struct gpio_desc *gpiochip_request_own_desc(struct gpio_chip *gc,
enum gpiod_flags dflags);
void gpiochip_free_own_desc(struct gpio_desc *desc);
+struct gpio_desc *gpiochip_get_desc(struct gpio_chip *gc, unsigned int hwnum);
+struct gpio_desc *
+gpio_device_get_desc(struct gpio_device *gdev, unsigned int hwnum);
+
+struct gpio_chip *gpio_device_get_chip(struct gpio_device *gdev);
+
#ifdef CONFIG_GPIOLIB
/* lock/unlock as IRQ */
int gpiochip_lock_as_irq(struct gpio_chip *gc, unsigned int offset);
void gpiochip_unlock_as_irq(struct gpio_chip *gc, unsigned int offset);
-
struct gpio_chip *gpiod_to_chip(const struct gpio_desc *desc);
+struct gpio_device *gpiod_to_gpio_device(struct gpio_desc *desc);
+
+/* struct gpio_device getters */
+int gpio_device_get_base(struct gpio_device *gdev);
#else /* CONFIG_GPIOLIB */
diff --git a/include/linux/habanalabs/cpucp_if.h b/include/linux/habanalabs/cpucp_if.h
new file mode 100644
index 000000000000..86ea7c63a0d2
--- /dev/null
+++ b/include/linux/habanalabs/cpucp_if.h
@@ -0,0 +1,1417 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2020-2023 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef CPUCP_IF_H
+#define CPUCP_IF_H
+
+#include <linux/types.h>
+#include <linux/if_ether.h>
+
+#include "hl_boot_if.h"
+
+#define NUM_HBM_PSEUDO_CH 2
+#define NUM_HBM_CH_PER_DEV 8
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_SHIFT 0
+#define CPUCP_PKT_HBM_ECC_INFO_WR_PAR_MASK 0x00000001
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_SHIFT 1
+#define CPUCP_PKT_HBM_ECC_INFO_RD_PAR_MASK 0x00000002
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_SHIFT 2
+#define CPUCP_PKT_HBM_ECC_INFO_CA_PAR_MASK 0x00000004
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_SHIFT 3
+#define CPUCP_PKT_HBM_ECC_INFO_DERR_MASK 0x00000008
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_SHIFT 4
+#define CPUCP_PKT_HBM_ECC_INFO_SERR_MASK 0x00000010
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_SHIFT 5
+#define CPUCP_PKT_HBM_ECC_INFO_TYPE_MASK 0x00000020
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_SHIFT 6
+#define CPUCP_PKT_HBM_ECC_INFO_HBM_CH_MASK 0x000007C0
+
+#define PLL_MAP_MAX_BITS 128
+#define PLL_MAP_LEN (PLL_MAP_MAX_BITS / 8)
+
+enum eq_event_id {
+ EQ_EVENT_NIC_STS_REQUEST = 0,
+ EQ_EVENT_PWR_MODE_0,
+ EQ_EVENT_PWR_MODE_1,
+ EQ_EVENT_PWR_MODE_2,
+ EQ_EVENT_PWR_MODE_3,
+ EQ_EVENT_PWR_BRK_ENTRY,
+ EQ_EVENT_PWR_BRK_EXIT,
+ EQ_EVENT_HEARTBEAT,
+};
+
+/*
+ * info of the pkt queue pointers in the first async occurrence
+ */
+struct cpucp_pkt_sync_err {
+ __le32 pi;
+ __le32 ci;
+};
+
+struct hl_eq_hbm_ecc_data {
+ /* SERR counter */
+ __le32 sec_cnt;
+ /* DERR counter */
+ __le32 dec_cnt;
+ /* Supplemental Information according to the mask bits */
+ __le32 hbm_ecc_info;
+ /* Address in hbm where the ecc happened */
+ __le32 first_addr;
+ /* SERR continuous address counter */
+ __le32 sec_cont_cnt;
+ __le32 pad;
+};
+
+/*
+ * EVENT QUEUE
+ */
+
+struct hl_eq_header {
+ __le32 reserved;
+ __le32 ctl;
+};
+
+struct hl_eq_ecc_data {
+ __le64 ecc_address;
+ __le64 ecc_syndrom;
+ __u8 memory_wrapper_idx;
+ __u8 is_critical;
+ __le16 block_id;
+ __u8 pad[4];
+};
+
+enum hl_sm_sei_cause {
+ SM_SEI_SO_OVERFLOW,
+ SM_SEI_LBW_4B_UNALIGNED,
+ SM_SEI_AXI_RESPONSE_ERR
+};
+
+struct hl_eq_sm_sei_data {
+ __le32 sei_log;
+ /* enum hl_sm_sei_cause */
+ __u8 sei_cause;
+ __u8 pad[3];
+};
+
+enum hl_fw_alive_severity {
+ FW_ALIVE_SEVERITY_MINOR,
+ FW_ALIVE_SEVERITY_CRITICAL
+};
+
+struct hl_eq_fw_alive {
+ __le64 uptime_seconds;
+ __le32 process_id;
+ __le32 thread_id;
+ /* enum hl_fw_alive_severity */
+ __u8 severity;
+ __u8 pad[7];
+};
+
+struct hl_eq_intr_cause {
+ __le64 intr_cause_data;
+};
+
+struct hl_eq_pcie_drain_ind_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 drain_wr_addr_lbw;
+ __le64 drain_rd_addr_lbw;
+ __le64 drain_wr_addr_hbw;
+ __le64 drain_rd_addr_hbw;
+};
+
+struct hl_eq_razwi_lbw_info_regs {
+ __le32 rr_aw_razwi_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+struct hl_eq_razwi_hbw_info_regs {
+ __le32 rr_aw_razwi_hi_reg;
+ __le32 rr_aw_razwi_lo_reg;
+ __le32 rr_aw_razwi_id_reg;
+ __le32 rr_ar_razwi_hi_reg;
+ __le32 rr_ar_razwi_lo_reg;
+ __le32 rr_ar_razwi_id_reg;
+};
+
+/* razwi_happened masks */
+#define RAZWI_HAPPENED_HBW 0x1
+#define RAZWI_HAPPENED_LBW 0x2
+#define RAZWI_HAPPENED_AW 0x4
+#define RAZWI_HAPPENED_AR 0x8
+
+struct hl_eq_razwi_info {
+ __le32 razwi_happened_mask;
+ union {
+ struct hl_eq_razwi_lbw_info_regs lbw;
+ struct hl_eq_razwi_hbw_info_regs hbw;
+ };
+ __le32 pad;
+};
+
+struct hl_eq_razwi_with_intr_cause {
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_intr_cause intr_cause;
+};
+
+#define HBM_CA_ERR_CMD_LIFO_LEN 8
+#define HBM_RD_ERR_DATA_LIFO_LEN 8
+#define HBM_WR_PAR_CMD_LIFO_LEN 11
+
+enum hl_hbm_sei_cause {
+ /* Command/address parity error event is split into 2 events due to
+ * size limitation: ODD suffix for odd HBM CK_t cycles and EVEN suffix
+ * for even HBM CK_t cycles
+ */
+ HBM_SEI_CMD_PARITY_EVEN,
+ HBM_SEI_CMD_PARITY_ODD,
+ /* Read errors can be reflected as a combination of SERR/DERR/parity
+ * errors. Therefore, we define one event for all read error types.
+ * LKD will perform further proccessing.
+ */
+ HBM_SEI_READ_ERR,
+ HBM_SEI_WRITE_DATA_PARITY_ERR,
+ HBM_SEI_CATTRIP,
+ HBM_SEI_MEM_BIST_FAIL,
+ HBM_SEI_DFI,
+ HBM_SEI_INV_TEMP_READ_OUT,
+ HBM_SEI_BIST_FAIL,
+};
+
+/* Masks for parsing hl_hbm_sei_headr fields */
+#define HBM_ECC_SERR_CNTR_MASK 0xFF
+#define HBM_ECC_DERR_CNTR_MASK 0xFF00
+#define HBM_RD_PARITY_CNTR_MASK 0xFF0000
+
+/* HBM index and MC index are known by the event_id */
+struct hl_hbm_sei_header {
+ union {
+ /* relevant only in case of HBM read error */
+ struct {
+ __u8 ecc_serr_cnt;
+ __u8 ecc_derr_cnt;
+ __u8 read_par_cnt;
+ __u8 reserved;
+ };
+ /* All other cases */
+ __le32 cnt;
+ };
+ __u8 sei_cause; /* enum hl_hbm_sei_cause */
+ __u8 mc_channel; /* range: 0-3 */
+ __u8 mc_pseudo_channel; /* range: 0-7 */
+ __u8 is_critical;
+};
+
+#define HBM_RD_ADDR_SID_SHIFT 0
+#define HBM_RD_ADDR_SID_MASK 0x1
+#define HBM_RD_ADDR_BG_SHIFT 1
+#define HBM_RD_ADDR_BG_MASK 0x6
+#define HBM_RD_ADDR_BA_SHIFT 3
+#define HBM_RD_ADDR_BA_MASK 0x18
+#define HBM_RD_ADDR_COL_SHIFT 5
+#define HBM_RD_ADDR_COL_MASK 0x7E0
+#define HBM_RD_ADDR_ROW_SHIFT 11
+#define HBM_RD_ADDR_ROW_MASK 0x3FFF800
+
+struct hbm_rd_addr {
+ union {
+ /* bit fields are only for FW use */
+ struct {
+ u32 dbg_rd_err_addr_sid:1;
+ u32 dbg_rd_err_addr_bg:2;
+ u32 dbg_rd_err_addr_ba:2;
+ u32 dbg_rd_err_addr_col:6;
+ u32 dbg_rd_err_addr_row:15;
+ u32 reserved:6;
+ };
+ __le32 rd_addr_val;
+ };
+};
+
+#define HBM_RD_ERR_BEAT_SHIFT 2
+/* dbg_rd_err_misc fields: */
+/* Read parity is calculated per DW on every beat */
+#define HBM_RD_ERR_PAR_ERR_BEAT0_SHIFT 0
+#define HBM_RD_ERR_PAR_ERR_BEAT0_MASK 0x3
+#define HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT 8
+#define HBM_RD_ERR_PAR_DATA_BEAT0_MASK 0x300
+/* ECC is calculated per PC on every beat */
+#define HBM_RD_ERR_SERR_BEAT0_SHIFT 16
+#define HBM_RD_ERR_SERR_BEAT0_MASK 0x10000
+#define HBM_RD_ERR_DERR_BEAT0_SHIFT 24
+#define HBM_RD_ERR_DERR_BEAT0_MASK 0x100000
+
+struct hl_eq_hbm_sei_read_err_intr_info {
+ /* DFI_RD_ERR_REP_ADDR */
+ struct hbm_rd_addr dbg_rd_err_addr;
+ /* DFI_RD_ERR_REP_ERR */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 dbg_rd_err_par:8;
+ u32 dbg_rd_err_par_data:8;
+ u32 dbg_rd_err_serr:4;
+ u32 dbg_rd_err_derr:4;
+ u32 reserved:8;
+ };
+ __le32 dbg_rd_err_misc;
+ };
+ /* DFI_RD_ERR_REP_DM */
+ __le32 dbg_rd_err_dm;
+ /* DFI_RD_ERR_REP_SYNDROME */
+ __le32 dbg_rd_err_syndrome;
+ /* DFI_RD_ERR_REP_DATA */
+ __le32 dbg_rd_err_data[HBM_RD_ERR_DATA_LIFO_LEN];
+};
+
+struct hl_eq_hbm_sei_ca_par_intr_info {
+ /* 14 LSBs */
+ __le16 dbg_row[HBM_CA_ERR_CMD_LIFO_LEN];
+ /* 18 LSBs */
+ __le32 dbg_col[HBM_CA_ERR_CMD_LIFO_LEN];
+};
+
+#define WR_PAR_LAST_CMD_COL_SHIFT 0
+#define WR_PAR_LAST_CMD_COL_MASK 0x3F
+#define WR_PAR_LAST_CMD_BG_SHIFT 6
+#define WR_PAR_LAST_CMD_BG_MASK 0xC0
+#define WR_PAR_LAST_CMD_BA_SHIFT 8
+#define WR_PAR_LAST_CMD_BA_MASK 0x300
+#define WR_PAR_LAST_CMD_SID_SHIFT 10
+#define WR_PAR_LAST_CMD_SID_MASK 0x400
+
+/* Row address isn't latched */
+struct hbm_sei_wr_cmd_address {
+ /* DFI_DERR_LAST_CMD */
+ union {
+ struct {
+ /* bit fields are only for FW use */
+ u32 col:6;
+ u32 bg:2;
+ u32 ba:2;
+ u32 sid:1;
+ u32 reserved:21;
+ };
+ __le32 dbg_wr_cmd_addr;
+ };
+};
+
+struct hl_eq_hbm_sei_wr_par_intr_info {
+ /* entry 0: WR command address from the 1st cycle prior to the error
+ * entry 1: WR command address from the 2nd cycle prior to the error
+ * and so on...
+ */
+ struct hbm_sei_wr_cmd_address dbg_last_wr_cmds[HBM_WR_PAR_CMD_LIFO_LEN];
+ /* derr[0:1] - 1st HBM cycle DERR output
+ * derr[2:3] - 2nd HBM cycle DERR output
+ */
+ __u8 dbg_derr;
+ /* extend to reach 8B */
+ __u8 pad[3];
+};
+
+/*
+ * this struct represents the following sei causes:
+ * command parity, ECC double error, ECC single error, dfi error, cattrip,
+ * temperature read-out, read parity error and write parity error.
+ * some only use the header while some have extra data.
+ */
+struct hl_eq_hbm_sei_data {
+ struct hl_hbm_sei_header hdr;
+ union {
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_even_info;
+ struct hl_eq_hbm_sei_ca_par_intr_info ca_parity_odd_info;
+ struct hl_eq_hbm_sei_read_err_intr_info read_err_info;
+ struct hl_eq_hbm_sei_wr_par_intr_info wr_parity_info;
+ };
+};
+
+/* Engine/farm arc interrupt type */
+enum hl_engine_arc_interrupt_type {
+ /* Qman/farm ARC DCCM QUEUE FULL interrupt type */
+ ENGINE_ARC_DCCM_QUEUE_FULL_IRQ = 1
+};
+
+/* Data structure specifies details of payload of DCCM QUEUE FULL interrupt */
+struct hl_engine_arc_dccm_queue_full_irq {
+ /* Queue index value which caused DCCM QUEUE FULL */
+ __le32 queue_index;
+ __le32 pad;
+};
+
+/* Data structure specifies details of QM/FARM ARC interrupt */
+struct hl_eq_engine_arc_intr_data {
+ /* ARC engine id e.g. DCORE0_TPC0_QM_ARC, DCORE0_TCP1_QM_ARC */
+ __le32 engine_id;
+ __le32 intr_type; /* enum hl_engine_arc_interrupt_type */
+ /* More info related to the interrupt e.g. queue index
+ * incase of DCCM_QUEUE_FULL interrupt.
+ */
+ __le64 payload;
+ __le64 pad[5];
+};
+
+#define ADDR_DEC_ADDRESS_COUNT_MAX 4
+
+/* Data structure specifies details of ADDR_DEC interrupt */
+struct hl_eq_addr_dec_intr_data {
+ struct hl_eq_intr_cause intr_cause;
+ __le64 addr[ADDR_DEC_ADDRESS_COUNT_MAX];
+ __u8 addr_cnt;
+ __u8 pad[7];
+};
+
+struct hl_eq_entry {
+ struct hl_eq_header hdr;
+ union {
+ __le64 data_placeholder;
+ struct hl_eq_ecc_data ecc_data;
+ struct hl_eq_hbm_ecc_data hbm_ecc_data; /* Obsolete */
+ struct hl_eq_sm_sei_data sm_sei_data;
+ struct cpucp_pkt_sync_err pkt_sync_err;
+ struct hl_eq_fw_alive fw_alive;
+ struct hl_eq_intr_cause intr_cause;
+ struct hl_eq_pcie_drain_ind_data pcie_drain_ind_data;
+ struct hl_eq_razwi_info razwi_info;
+ struct hl_eq_razwi_with_intr_cause razwi_with_intr_cause;
+ struct hl_eq_hbm_sei_data sei_data; /* Gaudi2 HBM */
+ struct hl_eq_engine_arc_intr_data arc_data;
+ struct hl_eq_addr_dec_intr_data addr_dec;
+ __le64 data[7];
+ };
+};
+
+#define HL_EQ_ENTRY_SIZE sizeof(struct hl_eq_entry)
+
+#define EQ_CTL_READY_SHIFT 31
+#define EQ_CTL_READY_MASK 0x80000000
+
+#define EQ_CTL_EVENT_TYPE_SHIFT 16
+#define EQ_CTL_EVENT_TYPE_MASK 0x0FFF0000
+
+#define EQ_CTL_INDEX_SHIFT 0
+#define EQ_CTL_INDEX_MASK 0x0000FFFF
+
+enum pq_init_status {
+ PQ_INIT_STATUS_NA = 0,
+ PQ_INIT_STATUS_READY_FOR_CP,
+ PQ_INIT_STATUS_READY_FOR_HOST,
+ PQ_INIT_STATUS_READY_FOR_CP_SINGLE_MSI,
+ PQ_INIT_STATUS_LEN_NOT_POWER_OF_TWO_ERR,
+ PQ_INIT_STATUS_ILLEGAL_Q_ADDR_ERR
+};
+
+/*
+ * CpuCP Primary Queue Packets
+ *
+ * During normal operation, the host's kernel driver needs to send various
+ * messages to CpuCP, usually either to SET some value into a H/W periphery or
+ * to GET the current value of some H/W periphery. For example, SET the
+ * frequency of MME/TPC and GET the value of the thermal sensor.
+ *
+ * These messages can be initiated either by the User application or by the
+ * host's driver itself, e.g. power management code. In either case, the
+ * communication from the host's driver to CpuCP will *always* be in
+ * synchronous mode, meaning that the host will send a single message and poll
+ * until the message was acknowledged and the results are ready (if results are
+ * needed).
+ *
+ * This means that only a single message can be sent at a time and the host's
+ * driver must wait for its result before sending the next message. Having said
+ * that, because these are control messages which are sent in a relatively low
+ * frequency, this limitation seems acceptable. It's important to note that
+ * in case of multiple devices, messages to different devices *can* be sent
+ * at the same time.
+ *
+ * The message, inputs/outputs (if relevant) and fence object will be located
+ * on the device DDR at an address that will be determined by the host's driver.
+ * During device initialization phase, the host will pass to CpuCP that address.
+ * Most of the message types will contain inputs/outputs inside the message
+ * itself. The common part of each message will contain the opcode of the
+ * message (its type) and a field representing a fence object.
+ *
+ * When the host's driver wishes to send a message to CPU CP, it will write the
+ * message contents to the device DDR, clear the fence object and then write to
+ * the PSOC_ARC1_AUX_SW_INTR, to issue interrupt 121 to ARC Management CPU.
+ *
+ * Upon receiving the interrupt (#121), CpuCP will read the message from the
+ * DDR. In case the message is a SET operation, CpuCP will first perform the
+ * operation and then write to the fence object on the device DDR. In case the
+ * message is a GET operation, CpuCP will first fill the results section on the
+ * device DDR and then write to the fence object. If an error occurred, CpuCP
+ * will fill the rc field with the right error code.
+ *
+ * In the meantime, the host's driver will poll on the fence object. Once the
+ * host sees that the fence object is signaled, it will read the results from
+ * the device DDR (if relevant) and resume the code execution in the host's
+ * driver.
+ *
+ * To use QMAN packets, the opcode must be the QMAN opcode, shifted by 8
+ * so the value being put by the host's driver matches the value read by CpuCP
+ *
+ * Non-QMAN packets should be limited to values 1 through (2^8 - 1)
+ *
+ * Detailed description:
+ *
+ * CPUCP_PACKET_DISABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU must NOT issue PCI
+ * transactions (read/write) towards the Host CPU. This also include
+ * sending MSI-X interrupts.
+ * This packet is usually sent before the device is moved to D3Hot state.
+ *
+ * CPUCP_PACKET_ENABLE_PCI_ACCESS -
+ * After receiving this packet the embedded CPU is allowed to issue PCI
+ * transactions towards the Host CPU, including sending MSI-X interrupts.
+ * This packet is usually send after the device is moved to D0 state.
+ *
+ * CPUCP_PACKET_TEMPERATURE_GET -
+ * Fetch the current temperature / Max / Max Hyst / Critical /
+ * Critical Hyst of a specified thermal sensor. The packet's
+ * arguments specify the desired sensor and the field to get.
+ *
+ * CPUCP_PACKET_VOLTAGE_GET -
+ * Fetch the voltage / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_CURRENT_GET -
+ * Fetch the current / Max / Min of a specified sensor. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_FAN_SPEED_GET -
+ * Fetch the speed / Max / Min of a specified fan. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_GET -
+ * Fetch the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor and type.
+ *
+ * CPUCP_PACKET_PWM_SET -
+ * Set the pwm value / mode of a specified pwm. The packet's
+ * arguments specify the sensor, type and value.
+ *
+ * CPUCP_PACKET_FREQUENCY_SET -
+ * Set the frequency of a specified PLL. The packet's arguments specify
+ * the PLL and the desired frequency. The actual frequency in the device
+ * might differ from the requested frequency.
+ *
+ * CPUCP_PACKET_FREQUENCY_GET -
+ * Fetch the frequency of a specified PLL. The packet's arguments specify
+ * the PLL.
+ *
+ * CPUCP_PACKET_LED_SET -
+ * Set the state of a specified led. The packet's arguments
+ * specify the led and the desired state.
+ *
+ * CPUCP_PACKET_I2C_WR -
+ * Write 32-bit value to I2C device. The packet's arguments specify the
+ * I2C bus, address and value.
+ *
+ * CPUCP_PACKET_I2C_RD -
+ * Read 32-bit value from I2C device. The packet's arguments specify the
+ * I2C bus and address.
+ *
+ * CPUCP_PACKET_INFO_GET -
+ * Fetch information from the device as specified in the packet's
+ * structure. The host's driver passes the max size it allows the CpuCP to
+ * write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_FLASH_PROGRAM_REMOVED - this packet was removed
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ -
+ * Unmask the given IRQ. The IRQ number is specified in the value field.
+ * The packet is sent after receiving an interrupt and printing its
+ * relevant information.
+ *
+ * CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY -
+ * Unmask the given IRQs. The IRQs numbers are specified in an array right
+ * after the cpucp_packet structure, where its first element is the array
+ * length. The packet is sent after a soft reset was done in order to
+ * handle any interrupts that were sent during the reset process.
+ *
+ * CPUCP_PACKET_TEST -
+ * Test packet for CpuCP connectivity. The CPU will put the fence value
+ * in the result field.
+ *
+ * CPUCP_PACKET_FREQUENCY_CURR_GET -
+ * Fetch the current frequency of a specified PLL. The packet's arguments
+ * specify the PLL.
+ *
+ * CPUCP_PACKET_MAX_POWER_GET -
+ * Fetch the maximal power of the device.
+ *
+ * CPUCP_PACKET_MAX_POWER_SET -
+ * Set the maximal power of the device. The packet's arguments specify
+ * the power.
+ *
+ * CPUCP_PACKET_EEPROM_DATA_GET -
+ * Get EEPROM data from the CpuCP kernel. The buffer is specified in the
+ * addr field. The CPU will put the returned data size in the result
+ * field. In addition, the host's driver passes the max size it allows the
+ * CpuCP to write to the structure, to prevent data corruption in case of
+ * mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_NIC_INFO_GET -
+ * Fetch information from the device regarding the NIC. the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to
+ * prevent data corruption in case of mismatched driver/FW versions.
+ *
+ * CPUCP_PACKET_TEMPERATURE_SET -
+ * Set the value of the offset property of a specified thermal sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_VOLTAGE_SET -
+ * Trigger the reset_history property of a specified voltage sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_CURRENT_SET -
+ * Trigger the reset_history property of a specified current sensor.
+ * The packet's arguments specify the desired sensor and the field to
+ * set.
+ *
+ * CPUCP_PACKET_PCIE_THROUGHPUT_GET -
+ * Get throughput of PCIe.
+ * The packet's arguments specify the transaction direction (TX/RX).
+ * The window measurement is 10[msec], and the return value is in KB/sec.
+ *
+ * CPUCP_PACKET_PCIE_REPLAY_CNT_GET
+ * Replay count measures number of "replay" events, which is basicly
+ * number of retries done by PCIe.
+ *
+ * CPUCP_PACKET_TOTAL_ENERGY_GET -
+ * Total Energy is measurement of energy from the time FW Linux
+ * is loaded. It is calculated by multiplying the average power
+ * by time (passed from armcp start). The units are in MilliJouls.
+ *
+ * CPUCP_PACKET_PLL_INFO_GET -
+ * Fetch frequencies of PLL from the required PLL IP.
+ * The packet's arguments specify the device PLL type
+ * Pll type is the PLL from device pll_index enum.
+ * The result is composed of 4 outputs, each is 16-bit
+ * frequency in MHz.
+ *
+ * CPUCP_PACKET_POWER_GET -
+ * Fetch the present power consumption of the device (Current * Voltage).
+ *
+ * CPUCP_PACKET_NIC_PFC_SET -
+ * Enable/Disable the NIC PFC feature. The packet's arguments specify the
+ * NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_FAULT_GET -
+ * Fetch the current indication for local/remote faults from the NIC MAC.
+ * The result is 32-bit value of the relevant register.
+ *
+ * CPUCP_PACKET_NIC_LPBK_SET -
+ * Enable/Disable the MAC loopback feature. The packet's arguments specify
+ * the NIC port, relevant lanes to configure and one bit indication for
+ * enable/disable.
+ *
+ * CPUCP_PACKET_NIC_MAC_INIT -
+ * Configure the NIC MAC channels. The packet's arguments specify the
+ * NIC port and the speed.
+ *
+ * CPUCP_PACKET_MSI_INFO_SET -
+ * set the index number for each supported msi type going from
+ * host to device
+ *
+ * CPUCP_PACKET_NIC_XPCS91_REGS_GET -
+ * Fetch the un/correctable counters values from the NIC MAC.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_GET -
+ * Fetch various NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_CLR -
+ * Clear the various NIC MAC counters in the NIC STAT.
+ *
+ * CPUCP_PACKET_NIC_STAT_REGS_ALL_GET -
+ * Fetch all NIC MAC counters from the NIC STAT.
+ *
+ * CPUCP_PACKET_IS_IDLE_CHECK -
+ * Check if the device is IDLE in regard to the DMA/compute engines
+ * and QMANs. The f/w will return a bitmask where each bit represents
+ * a different engine or QMAN according to enum cpucp_idle_mask.
+ * The bit will be 1 if the engine is NOT idle.
+ *
+ * CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET -
+ * Fetch all HBM replaced-rows and prending to be replaced rows data.
+ *
+ * CPUCP_PACKET_HBM_PENDING_ROWS_STATUS -
+ * Fetch status of HBM rows pending replacement and need a reboot to
+ * be replaced.
+ *
+ * CPUCP_PACKET_POWER_SET -
+ * Resets power history of device to 0
+ *
+ * CPUCP_PACKET_ENGINE_CORE_ASID_SET -
+ * Packet to perform engine core ASID configuration
+ *
+ * CPUCP_PACKET_SEC_ATTEST_GET -
+ * Get the attestaion data that is collected during various stages of the
+ * boot sequence. the attestation data is also hashed with some unique
+ * number (nonce) provided by the host to prevent replay attacks.
+ * public key and certificate also provided as part of the FW response.
+ *
+ * CPUCP_PACKET_MONITOR_DUMP_GET -
+ * Get monitors registers dump from the CpuCP kernel.
+ * The CPU will put the registers dump in the a buffer allocated by the driver
+ * which address is passed via the CpuCp packet. In addition, the host's driver
+ * passes the max size it allows the CpuCP to write to the structure, to prevent
+ * data corruption in case of mismatched driver/FW versions.
+ * Obsolete.
+ *
+ * CPUCP_PACKET_GENERIC_PASSTHROUGH -
+ * Generic opcode for all firmware info that is only passed to host
+ * through the LKD, without getting parsed there.
+ *
+ * CPUCP_PACKET_ACTIVE_STATUS_SET -
+ * LKD sends FW indication whether device is free or in use, this indication is reported
+ * also to the BMC.
+ *
+ * CPUCP_PACKET_SOFT_RESET -
+ * Packet to perform soft-reset.
+ *
+ * CPUCP_PACKET_INTS_REGISTER -
+ * Packet to inform FW that queues have been established and LKD is ready to receive
+ * EQ events.
+ */
+
+enum cpucp_packet_id {
+ CPUCP_PACKET_DISABLE_PCI_ACCESS = 1, /* internal */
+ CPUCP_PACKET_ENABLE_PCI_ACCESS, /* internal */
+ CPUCP_PACKET_TEMPERATURE_GET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_GET, /* sysfs */
+ CPUCP_PACKET_CURRENT_GET, /* sysfs */
+ CPUCP_PACKET_FAN_SPEED_GET, /* sysfs */
+ CPUCP_PACKET_PWM_GET, /* sysfs */
+ CPUCP_PACKET_PWM_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_SET, /* sysfs */
+ CPUCP_PACKET_FREQUENCY_GET, /* sysfs */
+ CPUCP_PACKET_LED_SET, /* debugfs */
+ CPUCP_PACKET_I2C_WR, /* debugfs */
+ CPUCP_PACKET_I2C_RD, /* debugfs */
+ CPUCP_PACKET_INFO_GET, /* IOCTL */
+ CPUCP_PACKET_FLASH_PROGRAM_REMOVED,
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ, /* internal */
+ CPUCP_PACKET_UNMASK_RAZWI_IRQ_ARRAY, /* internal */
+ CPUCP_PACKET_TEST, /* internal */
+ CPUCP_PACKET_FREQUENCY_CURR_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_GET, /* sysfs */
+ CPUCP_PACKET_MAX_POWER_SET, /* sysfs */
+ CPUCP_PACKET_EEPROM_DATA_GET, /* sysfs */
+ CPUCP_PACKET_NIC_INFO_GET, /* internal */
+ CPUCP_PACKET_TEMPERATURE_SET, /* sysfs */
+ CPUCP_PACKET_VOLTAGE_SET, /* sysfs */
+ CPUCP_PACKET_CURRENT_SET, /* sysfs */
+ CPUCP_PACKET_PCIE_THROUGHPUT_GET, /* internal */
+ CPUCP_PACKET_PCIE_REPLAY_CNT_GET, /* internal */
+ CPUCP_PACKET_TOTAL_ENERGY_GET, /* internal */
+ CPUCP_PACKET_PLL_INFO_GET, /* internal */
+ CPUCP_PACKET_NIC_STATUS, /* internal */
+ CPUCP_PACKET_POWER_GET, /* internal */
+ CPUCP_PACKET_NIC_PFC_SET, /* internal */
+ CPUCP_PACKET_NIC_FAULT_GET, /* internal */
+ CPUCP_PACKET_NIC_LPBK_SET, /* internal */
+ CPUCP_PACKET_NIC_MAC_CFG, /* internal */
+ CPUCP_PACKET_MSI_INFO_SET, /* internal */
+ CPUCP_PACKET_NIC_XPCS91_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_GET, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_CLR, /* internal */
+ CPUCP_PACKET_NIC_STAT_REGS_ALL_GET, /* internal */
+ CPUCP_PACKET_IS_IDLE_CHECK, /* internal */
+ CPUCP_PACKET_HBM_REPLACED_ROWS_INFO_GET,/* internal */
+ CPUCP_PACKET_HBM_PENDING_ROWS_STATUS, /* internal */
+ CPUCP_PACKET_POWER_SET, /* internal */
+ CPUCP_PACKET_RESERVED, /* not used */
+ CPUCP_PACKET_ENGINE_CORE_ASID_SET, /* internal */
+ CPUCP_PACKET_RESERVED2, /* not used */
+ CPUCP_PACKET_SEC_ATTEST_GET, /* internal */
+ CPUCP_PACKET_RESERVED3, /* not used */
+ CPUCP_PACKET_RESERVED4, /* not used */
+ CPUCP_PACKET_MONITOR_DUMP_GET, /* debugfs */
+ CPUCP_PACKET_RESERVED5, /* not used */
+ CPUCP_PACKET_RESERVED6, /* not used */
+ CPUCP_PACKET_RESERVED7, /* not used */
+ CPUCP_PACKET_GENERIC_PASSTHROUGH, /* IOCTL */
+ CPUCP_PACKET_RESERVED8, /* not used */
+ CPUCP_PACKET_ACTIVE_STATUS_SET, /* internal */
+ CPUCP_PACKET_RESERVED9, /* not used */
+ CPUCP_PACKET_RESERVED10, /* not used */
+ CPUCP_PACKET_RESERVED11, /* not used */
+ CPUCP_PACKET_RESERVED12, /* internal */
+ CPUCP_PACKET_RESERVED13, /* internal */
+ CPUCP_PACKET_SOFT_RESET, /* internal */
+ CPUCP_PACKET_INTS_REGISTER, /* internal */
+ CPUCP_PACKET_ID_MAX /* must be last */
+};
+
+#define CPUCP_PACKET_FENCE_VAL 0xFE8CE7A5
+
+#define CPUCP_PKT_CTL_RC_SHIFT 12
+#define CPUCP_PKT_CTL_RC_MASK 0x0000F000
+
+#define CPUCP_PKT_CTL_OPCODE_SHIFT 16
+#define CPUCP_PKT_CTL_OPCODE_MASK 0x1FFF0000
+
+#define CPUCP_PKT_RES_PLL_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_PLL_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_PLL_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_PLL_OUT1_MASK 0x00000000FFFF0000ull
+#define CPUCP_PKT_RES_PLL_OUT2_SHIFT 32
+#define CPUCP_PKT_RES_PLL_OUT2_MASK 0x0000FFFF00000000ull
+#define CPUCP_PKT_RES_PLL_OUT3_SHIFT 48
+#define CPUCP_PKT_RES_PLL_OUT3_MASK 0xFFFF000000000000ull
+
+#define CPUCP_PKT_RES_EEPROM_OUT0_SHIFT 0
+#define CPUCP_PKT_RES_EEPROM_OUT0_MASK 0x000000000000FFFFull
+#define CPUCP_PKT_RES_EEPROM_OUT1_SHIFT 16
+#define CPUCP_PKT_RES_EEPROM_OUT1_MASK 0x0000000000FF0000ull
+
+#define CPUCP_PKT_VAL_PFC_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_PFC_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_PFC_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_PFC_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_LPBK_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_LPBK_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_LPBK_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_LPBK_IN2_MASK 0x000000000000001Eull
+
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_SHIFT 0
+#define CPUCP_PKT_VAL_MAC_CNT_IN1_MASK 0x0000000000000001ull
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_SHIFT 1
+#define CPUCP_PKT_VAL_MAC_CNT_IN2_MASK 0x00000000FFFFFFFEull
+
+/* heartbeat status bits */
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_SHIFT 0
+#define CPUCP_PKT_HB_STATUS_EQ_FAULT_MASK 0x00000001
+
+struct cpucp_packet {
+ union {
+ __le64 value; /* For SET packets */
+ __le64 result; /* For GET packets */
+ __le64 addr; /* For PQ */
+ };
+
+ __le32 ctl;
+
+ __le32 fence; /* Signal to host that message is completed */
+
+ union {
+ struct {/* For temperature/current/voltage/fan/pwm get/set */
+ __le16 sensor_index;
+ __le16 type;
+ };
+
+ struct { /* For I2C read/write */
+ __u8 i2c_bus;
+ __u8 i2c_addr;
+ __u8 i2c_reg;
+ /*
+ * In legacy implemetations, i2c_len was not present,
+ * was unused and just added as pad.
+ * So if i2c_len is 0, it is treated as legacy
+ * and r/w 1 Byte, else if i2c_len is specified,
+ * its treated as new multibyte r/w support.
+ */
+ __u8 i2c_len;
+ };
+
+ struct {/* For PLL info fetch */
+ __le16 pll_type;
+ /* TODO pll_reg is kept temporary before removal */
+ __le16 pll_reg;
+ };
+
+ /* For any general request */
+ __le32 index;
+
+ /* For frequency get/set */
+ __le32 pll_index;
+
+ /* For led set */
+ __le32 led_index;
+
+ /* For get CpuCP info/EEPROM data/NIC info */
+ __le32 data_max_size;
+
+ /*
+ * For any general status bitmask. Shall be used whenever the
+ * result cannot be used to hold general purpose data.
+ */
+ __le32 status_mask;
+
+ /* random, used once number, for security packets */
+ __le32 nonce;
+ };
+
+ union {
+ /* For NIC requests */
+ __le32 port_index;
+
+ /* For Generic packet sub index */
+ __le32 pkt_subidx;
+ };
+};
+
+struct cpucp_unmask_irq_arr_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 irqs[];
+};
+
+struct cpucp_nic_status_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+struct cpucp_array_data_packet {
+ struct cpucp_packet cpucp_pkt;
+ __le32 length;
+ __le32 data[];
+};
+
+enum cpucp_led_index {
+ CPUCP_LED0_INDEX = 0,
+ CPUCP_LED1_INDEX,
+ CPUCP_LED2_INDEX,
+ CPUCP_LED_MAX_INDEX = CPUCP_LED2_INDEX
+};
+
+/*
+ * enum cpucp_packet_rc - Error return code
+ * @cpucp_packet_success -> in case of success.
+ * @cpucp_packet_invalid -> this is to support first generation platforms.
+ * @cpucp_packet_fault -> in case of processing error like failing to
+ * get device binding or semaphore etc.
+ * @cpucp_packet_invalid_pkt -> when cpucp packet is un-supported.
+ * @cpucp_packet_invalid_params -> when checking parameter like length of buffer
+ * or attribute value etc.
+ * @cpucp_packet_rc_max -> It indicates size of enum so should be at last.
+ */
+enum cpucp_packet_rc {
+ cpucp_packet_success,
+ cpucp_packet_invalid,
+ cpucp_packet_fault,
+ cpucp_packet_invalid_pkt,
+ cpucp_packet_invalid_params,
+ cpucp_packet_rc_max
+};
+
+/*
+ * cpucp_temp_type should adhere to hwmon_temp_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_temp_type {
+ cpucp_temp_input,
+ cpucp_temp_min = 4,
+ cpucp_temp_min_hyst,
+ cpucp_temp_max = 6,
+ cpucp_temp_max_hyst,
+ cpucp_temp_crit,
+ cpucp_temp_crit_hyst,
+ cpucp_temp_offset = 19,
+ cpucp_temp_lowest = 21,
+ cpucp_temp_highest = 22,
+ cpucp_temp_reset_history = 23,
+ cpucp_temp_warn = 24,
+ cpucp_temp_max_crit = 25,
+ cpucp_temp_max_warn = 26,
+};
+
+enum cpucp_in_attributes {
+ cpucp_in_input,
+ cpucp_in_min,
+ cpucp_in_max,
+ cpucp_in_lowest = 6,
+ cpucp_in_highest = 7,
+ cpucp_in_reset_history,
+ cpucp_in_intr_alarm_a,
+ cpucp_in_intr_alarm_b,
+};
+
+enum cpucp_curr_attributes {
+ cpucp_curr_input,
+ cpucp_curr_min,
+ cpucp_curr_max,
+ cpucp_curr_lowest = 6,
+ cpucp_curr_highest = 7,
+ cpucp_curr_reset_history
+};
+
+enum cpucp_fan_attributes {
+ cpucp_fan_input,
+ cpucp_fan_min = 2,
+ cpucp_fan_max
+};
+
+enum cpucp_pwm_attributes {
+ cpucp_pwm_input,
+ cpucp_pwm_enable
+};
+
+enum cpucp_pcie_throughput_attributes {
+ cpucp_pcie_throughput_tx,
+ cpucp_pcie_throughput_rx
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_reg_attributes {
+ cpucp_pll_nr_reg,
+ cpucp_pll_nf_reg,
+ cpucp_pll_od_reg,
+ cpucp_pll_div_factor_reg,
+ cpucp_pll_div_sel_reg
+};
+
+/* TODO temporary kept before removal */
+enum cpucp_pll_type_attributes {
+ cpucp_pll_cpu,
+ cpucp_pll_pci,
+};
+
+/*
+ * cpucp_power_type aligns with hwmon_power_attributes
+ * defined in Linux kernel hwmon.h file
+ */
+enum cpucp_power_type {
+ CPUCP_POWER_INPUT = 8,
+ CPUCP_POWER_INPUT_HIGHEST = 9,
+ CPUCP_POWER_RESET_INPUT_HISTORY = 11
+};
+
+/*
+ * MSI type enumeration table for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table (before CPUCP_NUM_OF_MSI_TYPES).
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum cpucp_msi_type {
+ CPUCP_EVENT_QUEUE_MSI_TYPE,
+ CPUCP_NIC_PORT1_MSI_TYPE,
+ CPUCP_NIC_PORT3_MSI_TYPE,
+ CPUCP_NIC_PORT5_MSI_TYPE,
+ CPUCP_NIC_PORT7_MSI_TYPE,
+ CPUCP_NIC_PORT9_MSI_TYPE,
+ CPUCP_EVENT_QUEUE_ERR_MSI_TYPE,
+ CPUCP_NUM_OF_MSI_TYPES
+};
+
+/*
+ * PLL enumeration table used for all ASICs and future SW versions.
+ * For future ASIC-LKD compatibility, we can only add new enumerations.
+ * at the end of the table.
+ * Changing the order of entries or removing entries is not allowed.
+ */
+enum pll_index {
+ CPU_PLL = 0,
+ PCI_PLL = 1,
+ NIC_PLL = 2,
+ DMA_PLL = 3,
+ MESH_PLL = 4,
+ MME_PLL = 5,
+ TPC_PLL = 6,
+ IF_PLL = 7,
+ SRAM_PLL = 8,
+ NS_PLL = 9,
+ HBM_PLL = 10,
+ MSS_PLL = 11,
+ DDR_PLL = 12,
+ VID_PLL = 13,
+ BANK_PLL = 14,
+ MMU_PLL = 15,
+ IC_PLL = 16,
+ MC_PLL = 17,
+ EMMC_PLL = 18,
+ D2D_PLL = 19,
+ CS_PLL = 20,
+ C2C_PLL = 21,
+ NCH_PLL = 22,
+ C2M_PLL = 23,
+ PLL_MAX
+};
+
+enum rl_index {
+ TPC_RL = 0,
+ MME_RL,
+ EDMA_RL,
+};
+
+enum pvt_index {
+ PVT_SW,
+ PVT_SE,
+ PVT_NW,
+ PVT_NE
+};
+
+/* Event Queue Packets */
+
+struct eq_generic_event {
+ __le64 data[7];
+};
+
+/*
+ * CpuCP info
+ */
+
+#define CARD_NAME_MAX_LEN 16
+#define CPUCP_MAX_SENSORS 128
+#define CPUCP_MAX_NICS 128
+#define CPUCP_LANES_PER_NIC 4
+#define CPUCP_NIC_QSFP_EEPROM_MAX_LEN 1024
+#define CPUCP_MAX_NIC_LANES (CPUCP_MAX_NICS * CPUCP_LANES_PER_NIC)
+#define CPUCP_NIC_MASK_ARR_LEN ((CPUCP_MAX_NICS + 63) / 64)
+#define CPUCP_NIC_POLARITY_ARR_LEN ((CPUCP_MAX_NIC_LANES + 63) / 64)
+#define CPUCP_HBM_ROW_REPLACE_MAX 32
+
+struct cpucp_sensor {
+ __le32 type;
+ __le32 flags;
+};
+
+/**
+ * struct cpucp_card_types - ASIC card type.
+ * @cpucp_card_type_pci: PCI card.
+ * @cpucp_card_type_pmc: PCI Mezzanine Card.
+ */
+enum cpucp_card_types {
+ cpucp_card_type_pci,
+ cpucp_card_type_pmc
+};
+
+#define CPUCP_SEC_CONF_ENABLED_SHIFT 0
+#define CPUCP_SEC_CONF_ENABLED_MASK 0x00000001
+
+#define CPUCP_SEC_CONF_FLASH_WP_SHIFT 1
+#define CPUCP_SEC_CONF_FLASH_WP_MASK 0x00000002
+
+#define CPUCP_SEC_CONF_EEPROM_WP_SHIFT 2
+#define CPUCP_SEC_CONF_EEPROM_WP_MASK 0x00000004
+
+/**
+ * struct cpucp_security_info - Security information.
+ * @config: configuration bit field
+ * @keys_num: number of stored keys
+ * @revoked_keys: revoked keys bit field
+ * @min_svn: minimal security version
+ */
+struct cpucp_security_info {
+ __u8 config;
+ __u8 keys_num;
+ __u8 revoked_keys;
+ __u8 min_svn;
+};
+
+/**
+ * struct cpucp_info - Info from CpuCP that is necessary to the host's driver
+ * @sensors: available sensors description.
+ * @kernel_version: CpuCP linux kernel version.
+ * @reserved: reserved field.
+ * @card_type: card configuration type.
+ * @card_location: in a server, each card has different connections topology
+ * depending on its location (relevant for PMC card type)
+ * @cpld_version: CPLD programmed F/W version.
+ * @infineon_version: Infineon main DC-DC version.
+ * @fuse_version: silicon production FUSE information.
+ * @thermal_version: thermald S/W version.
+ * @cpucp_version: CpuCP S/W version.
+ * @infineon_second_stage_version: Infineon 2nd stage DC-DC version.
+ * @dram_size: available DRAM size.
+ * @card_name: card name that will be displayed in HWMON subsystem on the host
+ * @tpc_binning_mask: TPC binning mask, 1 bit per TPC instance
+ * (0 = functional, 1 = binned)
+ * @decoder_binning_mask: Decoder binning mask, 1 bit per decoder instance
+ * (0 = functional, 1 = binned), maximum 1 per dcore
+ * @sram_binning: Categorize SRAM functionality
+ * (0 = fully functional, 1 = lower-half is not functional,
+ * 2 = upper-half is not functional)
+ * @sec_info: security information
+ * @pll_map: Bit map of supported PLLs for current ASIC version.
+ * @mme_binning_mask: MME binning mask,
+ * bits [0:6] <==> dcore0 mme fma
+ * bits [7:13] <==> dcore1 mme fma
+ * bits [14:20] <==> dcore0 mme ima
+ * bits [21:27] <==> dcore1 mme ima
+ * For each group, if the 6th bit is set then first 5 bits
+ * represent the col's idx [0-31], otherwise these bits are
+ * ignored, and col idx 32 is binned. 7th bit is don't care.
+ * @dram_binning_mask: DRAM binning mask, 1 bit per dram instance
+ * (0 = functional 1 = binned)
+ * @memory_repair_flag: eFuse flag indicating memory repair
+ * @edma_binning_mask: EDMA binning mask, 1 bit per EDMA instance
+ * (0 = functional 1 = binned)
+ * @xbar_binning_mask: Xbar binning mask, 1 bit per Xbar instance
+ * (0 = functional 1 = binned)
+ * @interposer_version: Interposer version programmed in eFuse
+ * @substrate_version: Substrate version programmed in eFuse
+ * @eq_health_check_supported: eq health check feature supported in FW.
+ * @fw_hbm_region_size: Size in bytes of FW reserved region in HBM.
+ * @fw_os_version: Firmware OS Version
+ */
+struct cpucp_info {
+ struct cpucp_sensor sensors[CPUCP_MAX_SENSORS];
+ __u8 kernel_version[VERSION_MAX_LEN];
+ __le32 reserved;
+ __le32 card_type;
+ __le32 card_location;
+ __le32 cpld_version;
+ __le32 infineon_version;
+ __u8 fuse_version[VERSION_MAX_LEN];
+ __u8 thermal_version[VERSION_MAX_LEN];
+ __u8 cpucp_version[VERSION_MAX_LEN];
+ __le32 infineon_second_stage_version;
+ __le64 dram_size;
+ char card_name[CARD_NAME_MAX_LEN];
+ __le64 tpc_binning_mask;
+ __le64 decoder_binning_mask;
+ __u8 sram_binning;
+ __u8 dram_binning_mask;
+ __u8 memory_repair_flag;
+ __u8 edma_binning_mask;
+ __u8 xbar_binning_mask;
+ __u8 interposer_version;
+ __u8 substrate_version;
+ __u8 eq_health_check_supported;
+ struct cpucp_security_info sec_info;
+ __le32 fw_hbm_region_size;
+ __u8 pll_map[PLL_MAP_LEN];
+ __le64 mme_binning_mask;
+ __u8 fw_os_version[VERSION_MAX_LEN];
+};
+
+struct cpucp_mac_addr {
+ __u8 mac_addr[ETH_ALEN];
+};
+
+enum cpucp_serdes_type {
+ TYPE_1_SERDES_TYPE,
+ TYPE_2_SERDES_TYPE,
+ HLS1_SERDES_TYPE,
+ HLS1H_SERDES_TYPE,
+ HLS2_SERDES_TYPE,
+ HLS2_TYPE_1_SERDES_TYPE,
+ MAX_NUM_SERDES_TYPE, /* number of types */
+ UNKNOWN_SERDES_TYPE = 0xFFFF /* serdes_type is u16 */
+};
+
+struct cpucp_nic_info {
+ struct cpucp_mac_addr mac_addrs[CPUCP_MAX_NICS];
+ __le64 link_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le64 pol_tx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 pol_rx_mask[CPUCP_NIC_POLARITY_ARR_LEN];
+ __le64 link_ext_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __u8 qsfp_eeprom[CPUCP_NIC_QSFP_EEPROM_MAX_LEN];
+ __le64 auto_neg_mask[CPUCP_NIC_MASK_ARR_LEN];
+ __le16 serdes_type; /* enum cpucp_serdes_type */
+ __le16 tx_swap_map[CPUCP_MAX_NICS];
+ __u8 reserved[6];
+};
+
+#define PAGE_DISCARD_MAX 64
+
+struct page_discard_info {
+ __u8 num_entries;
+ __u8 reserved[7];
+ __le32 mmu_page_idx[PAGE_DISCARD_MAX];
+};
+
+/*
+ * struct frac_val - fracture value represented by "integer.frac".
+ * @integer: the integer part of the fracture value;
+ * @frac: the fracture part of the fracture value.
+ */
+struct frac_val {
+ union {
+ struct {
+ __le16 integer;
+ __le16 frac;
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * struct ser_val - the SER (symbol error rate) value is represented by "integer * 10 ^ -exp".
+ * @integer: the integer part of the SER value;
+ * @exp: the exponent part of the SER value.
+ */
+struct ser_val {
+ __le16 integer;
+ __le16 exp;
+};
+
+/*
+ * struct cpucp_nic_status - describes the status of a NIC port.
+ * @port: NIC port index.
+ * @bad_format_cnt: e.g. CRC.
+ * @responder_out_of_sequence_psn_cnt: e.g NAK.
+ * @high_ber_reinit_cnt: link reinit due to high BER.
+ * @correctable_err_cnt: e.g. bit-flip.
+ * @uncorrectable_err_cnt: e.g. MAC errors.
+ * @retraining_cnt: re-training counter.
+ * @up: is port up.
+ * @pcs_link: has PCS link.
+ * @phy_ready: is PHY ready.
+ * @auto_neg: is Autoneg enabled.
+ * @timeout_retransmission_cnt: timeout retransmission events.
+ * @high_ber_cnt: high ber events.
+ * @pre_fec_ser: pre FEC SER value.
+ * @post_fec_ser: post FEC SER value.
+ * @throughput: measured throughput.
+ * @latency: measured latency.
+ */
+struct cpucp_nic_status {
+ __le32 port;
+ __le32 bad_format_cnt;
+ __le32 responder_out_of_sequence_psn_cnt;
+ __le32 high_ber_reinit;
+ __le32 correctable_err_cnt;
+ __le32 uncorrectable_err_cnt;
+ __le32 retraining_cnt;
+ __u8 up;
+ __u8 pcs_link;
+ __u8 phy_ready;
+ __u8 auto_neg;
+ __le32 timeout_retransmission_cnt;
+ __le32 high_ber_cnt;
+ struct ser_val pre_fec_ser;
+ struct ser_val post_fec_ser;
+ struct frac_val bandwidth;
+ struct frac_val lat;
+};
+
+enum cpucp_hbm_row_replace_cause {
+ REPLACE_CAUSE_DOUBLE_ECC_ERR,
+ REPLACE_CAUSE_MULTI_SINGLE_ECC_ERR,
+};
+
+struct cpucp_hbm_row_info {
+ __u8 hbm_idx;
+ __u8 pc;
+ __u8 sid;
+ __u8 bank_idx;
+ __le16 row_addr;
+ __u8 replaced_row_cause; /* enum cpucp_hbm_row_replace_cause */
+ __u8 pad;
+};
+
+struct cpucp_hbm_row_replaced_rows_info {
+ __le16 num_replaced_rows;
+ __u8 pad[6];
+ struct cpucp_hbm_row_info replaced_rows[CPUCP_HBM_ROW_REPLACE_MAX];
+};
+
+enum cpu_reset_status {
+ CPU_RST_STATUS_NA = 0,
+ CPU_RST_STATUS_SOFT_RST_DONE = 1,
+};
+
+#define SEC_PCR_DATA_BUF_SZ 256
+#define SEC_PCR_QUOTE_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_SIGNATURE_BUF_SZ 255 /* (256 - 1) 1 byte used for size */
+#define SEC_PUB_DATA_BUF_SZ 510 /* (512 - 2) 2 bytes used for size */
+#define SEC_CERTIFICATE_BUF_SZ 2046 /* (2048 - 2) 2 bytes used for size */
+
+/*
+ * struct cpucp_sec_attest_info - attestation report of the boot
+ * @pcr_data: raw values of the PCR registers
+ * @pcr_num_reg: number of PCR registers in the pcr_data array
+ * @pcr_reg_len: length of each PCR register in the pcr_data array (bytes)
+ * @nonce: number only used once. random number provided by host. this also
+ * passed to the quote command as a qualifying data.
+ * @pcr_quote_len: length of the attestation quote data (bytes)
+ * @pcr_quote: attestation report data structure
+ * @quote_sig_len: length of the attestation report signature (bytes)
+ * @quote_sig: signature structure of the attestation report
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key for the signed attestation
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the attestation signing key
+ */
+struct cpucp_sec_attest_info {
+ __u8 pcr_data[SEC_PCR_DATA_BUF_SZ];
+ __u8 pcr_num_reg;
+ __u8 pcr_reg_len;
+ __le16 pad0;
+ __le32 nonce;
+ __le16 pcr_quote_len;
+ __u8 pcr_quote[SEC_PCR_QUOTE_BUF_SZ];
+ __u8 quote_sig_len;
+ __u8 quote_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+/*
+ * struct cpucp_dev_info_signed - device information signed by a secured device
+ * @info: device information structure as defined above
+ * @nonce: number only used once. random number provided by host. this number is
+ * hashed and signed along with the device information.
+ * @info_sig_len: length of the attestation signature (bytes)
+ * @info_sig: signature of the info + nonce data.
+ * @pub_data_len: length of the public data (bytes)
+ * @public_data: public key info signed info data
+ * (outPublic + name + qualifiedName)
+ * @certificate_len: length of the certificate (bytes)
+ * @certificate: certificate for the signing key
+ */
+struct cpucp_dev_info_signed {
+ struct cpucp_info info; /* assumed to be 64bit aligned */
+ __le32 nonce;
+ __le32 pad0;
+ __u8 info_sig_len;
+ __u8 info_sig[SEC_SIGNATURE_BUF_SZ];
+ __le16 pub_data_len;
+ __u8 public_data[SEC_PUB_DATA_BUF_SZ];
+ __le16 certificate_len;
+ __u8 certificate[SEC_CERTIFICATE_BUF_SZ];
+};
+
+#define DCORE_MON_REGS_SZ 512
+/*
+ * struct dcore_monitor_regs_data - DCORE monitor regs data.
+ * the structure follows sync manager block layout. Obsolete.
+ * @mon_pay_addrl: array of payload address low bits.
+ * @mon_pay_addrh: array of payload address high bits.
+ * @mon_pay_data: array of payload data.
+ * @mon_arm: array of monitor arm.
+ * @mon_status: array of monitor status.
+ */
+struct dcore_monitor_regs_data {
+ __le32 mon_pay_addrl[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_addrh[DCORE_MON_REGS_SZ];
+ __le32 mon_pay_data[DCORE_MON_REGS_SZ];
+ __le32 mon_arm[DCORE_MON_REGS_SZ];
+ __le32 mon_status[DCORE_MON_REGS_SZ];
+};
+
+/* contains SM data for each SYNC_MNGR (Obsolete) */
+struct cpucp_monitor_dump {
+ struct dcore_monitor_regs_data sync_mngr_w_s;
+ struct dcore_monitor_regs_data sync_mngr_e_s;
+ struct dcore_monitor_regs_data sync_mngr_w_n;
+ struct dcore_monitor_regs_data sync_mngr_e_n;
+};
+
+/*
+ * The Type of the generic request (and other input arguments) will be fetched from user by reading
+ * from "pkt_subidx" field in struct cpucp_packet.
+ *
+ * HL_PASSTHROUGHT_VERSIONS - Fetch all firmware versions.
+ */
+enum hl_passthrough_type {
+ HL_PASSTHROUGH_VERSIONS,
+};
+
+#endif /* CPUCP_IF_H */
diff --git a/include/linux/habanalabs/hl_boot_if.h b/include/linux/habanalabs/hl_boot_if.h
new file mode 100644
index 000000000000..93366d5621fd
--- /dev/null
+++ b/include/linux/habanalabs/hl_boot_if.h
@@ -0,0 +1,792 @@
+/* SPDX-License-Identifier: GPL-2.0
+ *
+ * Copyright 2018-2020 HabanaLabs, Ltd.
+ * All Rights Reserved.
+ *
+ */
+
+#ifndef HL_BOOT_IF_H
+#define HL_BOOT_IF_H
+
+#define LKD_HARD_RESET_MAGIC 0xED7BD694 /* deprecated - do not use */
+#define HL_POWER9_HOST_MAGIC 0x1DA30009
+
+#define BOOT_FIT_SRAM_OFFSET 0x200000
+
+#define VERSION_MAX_LEN 128
+
+enum cpu_boot_err {
+ CPU_BOOT_ERR_DRAM_INIT_FAIL = 0,
+ CPU_BOOT_ERR_FIT_CORRUPTED = 1,
+ CPU_BOOT_ERR_TS_INIT_FAIL = 2,
+ CPU_BOOT_ERR_DRAM_SKIPPED = 3,
+ CPU_BOOT_ERR_BMC_WAIT_SKIPPED = 4,
+ CPU_BOOT_ERR_NIC_DATA_NOT_RDY = 5,
+ CPU_BOOT_ERR_NIC_FW_FAIL = 6,
+ CPU_BOOT_ERR_SECURITY_NOT_RDY = 7,
+ CPU_BOOT_ERR_SECURITY_FAIL = 8,
+ CPU_BOOT_ERR_EFUSE_FAIL = 9,
+ CPU_BOOT_ERR_PRI_IMG_VER_FAIL = 10,
+ CPU_BOOT_ERR_SEC_IMG_VER_FAIL = 11,
+ CPU_BOOT_ERR_PLL_FAIL = 12,
+ CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL = 13,
+ CPU_BOOT_ERR_BOOT_FW_CRIT_ERR = 18,
+ CPU_BOOT_ERR_BINNING_FAIL = 19,
+ CPU_BOOT_ERR_TPM_FAIL = 20,
+ CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL = 21,
+ CPU_BOOT_ERR_EEPROM_FAIL = 22,
+ CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL = 23,
+ CPU_BOOT_ERR_ENABLED = 31,
+ CPU_BOOT_ERR_SCND_EN = 63,
+ CPU_BOOT_ERR_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * Mask for fatal failures
+ * This mask contains all possible fatal failures, and a dynamic code
+ * will clear the non-relevant ones.
+ */
+#define CPU_BOOT_ERR_FATAL_MASK \
+ ((1 << CPU_BOOT_ERR_DRAM_INIT_FAIL) | \
+ (1 << CPU_BOOT_ERR_PLL_FAIL) | \
+ (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL) | \
+ (1 << CPU_BOOT_ERR_BINNING_FAIL) | \
+ (1 << CPU_BOOT_ERR_DRAM_SKIPPED) | \
+ (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL) | \
+ (1 << CPU_BOOT_ERR_EEPROM_FAIL))
+
+/*
+ * CPU error bits in BOOT_ERROR registers
+ *
+ * CPU_BOOT_ERR0_DRAM_INIT_FAIL DRAM initialization failed.
+ * DRAM is not reliable to use.
+ *
+ * CPU_BOOT_ERR0_FIT_CORRUPTED FIT data integrity verification of the
+ * image provided by the host has failed.
+ *
+ * CPU_BOOT_ERR0_TS_INIT_FAIL Thermal Sensor initialization failed.
+ * Boot continues as usual, but keep in
+ * mind this is a warning.
+ *
+ * CPU_BOOT_ERR0_DRAM_SKIPPED DRAM initialization has been skipped.
+ * Skipping DRAM initialization has been
+ * requested (e.g. strap, command, etc.)
+ * and FW skipped the DRAM initialization.
+ * Host can initialize the DRAM.
+ *
+ * CPU_BOOT_ERR0_BMC_WAIT_SKIPPED Waiting for BMC data will be skipped.
+ * Meaning the BMC data might not be
+ * available until reset.
+ *
+ * CPU_BOOT_ERR0_NIC_DATA_NOT_RDY NIC data from BMC is not ready.
+ * BMC has not provided the NIC data yet.
+ * Once provided this bit will be cleared.
+ *
+ * CPU_BOOT_ERR0_NIC_FW_FAIL NIC FW loading failed.
+ * The NIC FW loading and initialization
+ * failed. This means NICs are not usable.
+ *
+ * CPU_BOOT_ERR0_SECURITY_NOT_RDY Chip security initialization has been
+ * started, but is not ready yet - chip
+ * cannot be accessed.
+ *
+ * CPU_BOOT_ERR0_SECURITY_FAIL Security related tasks have failed.
+ * The tasks are security init (root of
+ * trust), boot authentication (chain of
+ * trust), data packets authentication.
+ *
+ * CPU_BOOT_ERR0_EFUSE_FAIL Reading from eFuse failed.
+ * The PCI device ID might be wrong.
+ *
+ * CPU_BOOT_ERR0_PRI_IMG_VER_FAIL Verification of primary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot primary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_SEC_IMG_VER_FAIL Verification of secondary image failed.
+ * It mean that ppboot checksum
+ * verification for the preboot secondary
+ * image has failed to match expected
+ * checksum. Trying to program image again
+ * might solve this.
+ *
+ * CPU_BOOT_ERR0_PLL_FAIL PLL settings failed, meaning that one
+ * of the PLLs remains in REF_CLK
+ *
+ * CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL Device is unusable and customer support
+ * should be contacted.
+ *
+ * CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR Critical error was detected during
+ * the execution of ppboot or preboot.
+ * for example: stack overflow.
+ *
+ * CPU_BOOT_ERR0_BINNING_FAIL Binning settings failed, meaning
+ * malfunctioning components might still be
+ * in use.
+ *
+ * CPU_BOOT_ERR0_TPM_FAIL TPM verification flow failed.
+ *
+ * CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL Failed to set threshold for tmperature
+ * sensor.
+ *
+ * CPU_BOOT_ERR_EEPROM_FAIL Failed reading EEPROM data. Defaults
+ * are used.
+ *
+ * CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL Failed scrubbing the Engines/ARCFarm
+ * memories. Boot disabled until reset.
+ *
+ * CPU_BOOT_ERR0_ENABLED Error registers enabled.
+ * This is a main indication that the
+ * running FW populates the error
+ * registers. Meaning the error bits are
+ * not garbage, but actual error statuses.
+ */
+#define CPU_BOOT_ERR0_DRAM_INIT_FAIL (1 << CPU_BOOT_ERR_DRAM_INIT_FAIL)
+#define CPU_BOOT_ERR0_FIT_CORRUPTED (1 << CPU_BOOT_ERR_FIT_CORRUPTED)
+#define CPU_BOOT_ERR0_TS_INIT_FAIL (1 << CPU_BOOT_ERR_TS_INIT_FAIL)
+#define CPU_BOOT_ERR0_DRAM_SKIPPED (1 << CPU_BOOT_ERR_DRAM_SKIPPED)
+#define CPU_BOOT_ERR0_BMC_WAIT_SKIPPED (1 << CPU_BOOT_ERR_BMC_WAIT_SKIPPED)
+#define CPU_BOOT_ERR0_NIC_DATA_NOT_RDY (1 << CPU_BOOT_ERR_NIC_DATA_NOT_RDY)
+#define CPU_BOOT_ERR0_NIC_FW_FAIL (1 << CPU_BOOT_ERR_NIC_FW_FAIL)
+#define CPU_BOOT_ERR0_SECURITY_NOT_RDY (1 << CPU_BOOT_ERR_SECURITY_NOT_RDY)
+#define CPU_BOOT_ERR0_SECURITY_FAIL (1 << CPU_BOOT_ERR_SECURITY_FAIL)
+#define CPU_BOOT_ERR0_EFUSE_FAIL (1 << CPU_BOOT_ERR_EFUSE_FAIL)
+#define CPU_BOOT_ERR0_PRI_IMG_VER_FAIL (1 << CPU_BOOT_ERR_PRI_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_SEC_IMG_VER_FAIL (1 << CPU_BOOT_ERR_SEC_IMG_VER_FAIL)
+#define CPU_BOOT_ERR0_PLL_FAIL (1 << CPU_BOOT_ERR_PLL_FAIL)
+#define CPU_BOOT_ERR0_DEVICE_UNUSABLE_FAIL (1 << CPU_BOOT_ERR_DEVICE_UNUSABLE_FAIL)
+#define CPU_BOOT_ERR0_BOOT_FW_CRIT_ERR (1 << CPU_BOOT_ERR_BOOT_FW_CRIT_ERR)
+#define CPU_BOOT_ERR0_BINNING_FAIL (1 << CPU_BOOT_ERR_BINNING_FAIL)
+#define CPU_BOOT_ERR0_TPM_FAIL (1 << CPU_BOOT_ERR_TPM_FAIL)
+#define CPU_BOOT_ERR0_TMP_THRESH_INIT_FAIL (1 << CPU_BOOT_ERR_TMP_THRESH_INIT_FAIL)
+#define CPU_BOOT_ERR0_EEPROM_FAIL (1 << CPU_BOOT_ERR_EEPROM_FAIL)
+#define CPU_BOOT_ERR0_ENG_ARC_MEM_SCRUB_FAIL (1 << CPU_BOOT_ERR_ENG_ARC_MEM_SCRUB_FAIL)
+#define CPU_BOOT_ERR0_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+#define CPU_BOOT_ERR1_ENABLED (1 << CPU_BOOT_ERR_ENABLED)
+
+enum cpu_boot_dev_sts {
+ CPU_BOOT_DEV_STS_SECURITY_EN = 0,
+ CPU_BOOT_DEV_STS_DEBUG_EN = 1,
+ CPU_BOOT_DEV_STS_WATCHDOG_EN = 2,
+ CPU_BOOT_DEV_STS_DRAM_INIT_EN = 3,
+ CPU_BOOT_DEV_STS_BMC_WAIT_EN = 4,
+ CPU_BOOT_DEV_STS_E2E_CRED_EN = 5,
+ CPU_BOOT_DEV_STS_HBM_CRED_EN = 6,
+ CPU_BOOT_DEV_STS_RL_EN = 7,
+ CPU_BOOT_DEV_STS_SRAM_SCR_EN = 8,
+ CPU_BOOT_DEV_STS_DRAM_SCR_EN = 9,
+ CPU_BOOT_DEV_STS_FW_HARD_RST_EN = 10,
+ CPU_BOOT_DEV_STS_PLL_INFO_EN = 11,
+ CPU_BOOT_DEV_STS_SP_SRAM_EN = 12,
+ CPU_BOOT_DEV_STS_CLK_GATE_EN = 13,
+ CPU_BOOT_DEV_STS_HBM_ECC_EN = 14,
+ CPU_BOOT_DEV_STS_PKT_PI_ACK_EN = 15,
+ CPU_BOOT_DEV_STS_FW_LD_COM_EN = 16,
+ CPU_BOOT_DEV_STS_FW_IATU_CONF_EN = 17,
+ CPU_BOOT_DEV_STS_FW_NIC_MAC_EN = 18,
+ CPU_BOOT_DEV_STS_DYN_PLL_EN = 19,
+ CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN = 20,
+ CPU_BOOT_DEV_STS_EQ_INDEX_EN = 21,
+ CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN = 22,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN = 23,
+ CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN = 24,
+ CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN = 25,
+ CPU_BOOT_DEV_STS_MAP_HWMON_EN = 26,
+ CPU_BOOT_DEV_STS_ENABLED = 31,
+ CPU_BOOT_DEV_STS_SCND_EN = 63,
+ CPU_BOOT_DEV_STS_LAST = 64 /* we have 2 registers of 32 bits */
+};
+
+/*
+ * BOOT DEVICE STATUS bits in BOOT_DEVICE_STS registers
+ *
+ * CPU_BOOT_DEV_STS0_SECURITY_EN Security is Enabled.
+ * This is an indication for security
+ * enabled in FW, which means that
+ * all conditions for security are met:
+ * device is indicated as security enabled,
+ * registers are protected, and device
+ * uses keys for image verification.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DEBUG_EN Debug is enabled.
+ * Enabled when JTAG or DEBUG is enabled
+ * in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_WATCHDOG_EN Watchdog is enabled.
+ * Watchdog is enabled in FW.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_INIT_EN DRAM initialization is enabled.
+ * DRAM initialization has been done in FW.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_BMC_WAIT_EN Waiting for BMC data enabled.
+ * If set, it means that during boot,
+ * FW waited for BMC data.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_E2E_CRED_EN E2E credits initialized.
+ * FW initialized E2E credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_CRED_EN HBM credits initialized.
+ * FW initialized HBM credits.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_RL_EN Rate limiter initialized.
+ * FW initialized rate limiter.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_SRAM_SCR_EN SRAM scrambler enabled.
+ * FW initialized SRAM scrambler.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DRAM_SCR_EN DRAM scrambler enabled.
+ * FW initialized DRAM scrambler.
+ * Initialized in: u-boot
+ *
+ * CPU_BOOT_DEV_STS0_FW_HARD_RST_EN FW hard reset procedure is enabled.
+ * FW has the hard reset procedure
+ * implemented. This means that FW will
+ * perform hard reset procedure on
+ * receiving the halt-machine event.
+ * Initialized in: preboot, u-boot, linux
+ *
+ * CPU_BOOT_DEV_STS0_PLL_INFO_EN FW retrieval of PLL info is enabled.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_SP_SRAM_EN SP SRAM is initialized and available
+ * for use.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_CLK_GATE_EN Clock Gating enabled.
+ * FW initialized Clock Gating.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_HBM_ECC_EN HBM ECC handling Enabled.
+ * FW handles HBM ECC indications.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN Packets ack value used in the armcpd
+ * is set to the PI counter.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_LD_COM_EN Flexible FW loading communication
+ * protocol is enabled.
+ * Initialized in: preboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN FW iATU configuration is enabled.
+ * This bit if set, means the iATU has been
+ * configured and is ready for use.
+ * Initialized in: ppboot
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN NIC MAC channels init is done by FW and
+ * any access to them is done via the FW.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_DYN_PLL_EN Dynamic PLL configuration is enabled.
+ * FW sends to host a bitmap of supported
+ * PLLs.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN GIC access permission only from
+ * previleged entity. FW sets this status
+ * bit for host. If this bit is set then
+ * GIC can not be accessed from host.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_EQ_INDEX_EN Event Queue (EQ) index is a running
+ * index for each new event sent to host.
+ * This is used as a method in host to
+ * identify that the waiting event in
+ * queue is actually a new event which
+ * was not served before.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN Use multiple scratchpad interfaces to
+ * prevent IRQs overriding each other.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN
+ * NIC STAT and XPCS91 access is restricted
+ * and is done via FW only.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN
+ * NIC STAT get all is supported.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN
+ * F/W checks if the device is idle by reading defined set
+ * of registers. It returns a bitmask of all the engines,
+ * where a bit is set if the engine is not idle.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_MAP_HWMON_EN
+ * If set, means f/w supports proprietary
+ * HWMON enum mapping to cpucp enums.
+ * Initialized in: linux
+ *
+ * CPU_BOOT_DEV_STS0_ENABLED Device status register enabled.
+ * This is a main indication that the
+ * running FW populates the device status
+ * register. Meaning the device status
+ * bits are not garbage, but actual
+ * statuses.
+ * Initialized in: preboot
+ *
+ */
+#define CPU_BOOT_DEV_STS0_SECURITY_EN (1 << CPU_BOOT_DEV_STS_SECURITY_EN)
+#define CPU_BOOT_DEV_STS0_DEBUG_EN (1 << CPU_BOOT_DEV_STS_DEBUG_EN)
+#define CPU_BOOT_DEV_STS0_WATCHDOG_EN (1 << CPU_BOOT_DEV_STS_WATCHDOG_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_INIT_EN (1 << CPU_BOOT_DEV_STS_DRAM_INIT_EN)
+#define CPU_BOOT_DEV_STS0_BMC_WAIT_EN (1 << CPU_BOOT_DEV_STS_BMC_WAIT_EN)
+#define CPU_BOOT_DEV_STS0_E2E_CRED_EN (1 << CPU_BOOT_DEV_STS_E2E_CRED_EN)
+#define CPU_BOOT_DEV_STS0_HBM_CRED_EN (1 << CPU_BOOT_DEV_STS_HBM_CRED_EN)
+#define CPU_BOOT_DEV_STS0_RL_EN (1 << CPU_BOOT_DEV_STS_RL_EN)
+#define CPU_BOOT_DEV_STS0_SRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_SRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_DRAM_SCR_EN (1 << CPU_BOOT_DEV_STS_DRAM_SCR_EN)
+#define CPU_BOOT_DEV_STS0_FW_HARD_RST_EN (1 << CPU_BOOT_DEV_STS_FW_HARD_RST_EN)
+#define CPU_BOOT_DEV_STS0_PLL_INFO_EN (1 << CPU_BOOT_DEV_STS_PLL_INFO_EN)
+#define CPU_BOOT_DEV_STS0_SP_SRAM_EN (1 << CPU_BOOT_DEV_STS_SP_SRAM_EN)
+#define CPU_BOOT_DEV_STS0_CLK_GATE_EN (1 << CPU_BOOT_DEV_STS_CLK_GATE_EN)
+#define CPU_BOOT_DEV_STS0_HBM_ECC_EN (1 << CPU_BOOT_DEV_STS_HBM_ECC_EN)
+#define CPU_BOOT_DEV_STS0_PKT_PI_ACK_EN (1 << CPU_BOOT_DEV_STS_PKT_PI_ACK_EN)
+#define CPU_BOOT_DEV_STS0_FW_LD_COM_EN (1 << CPU_BOOT_DEV_STS_FW_LD_COM_EN)
+#define CPU_BOOT_DEV_STS0_FW_IATU_CONF_EN (1 << CPU_BOOT_DEV_STS_FW_IATU_CONF_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_MAC_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_MAC_EN)
+#define CPU_BOOT_DEV_STS0_DYN_PLL_EN (1 << CPU_BOOT_DEV_STS_DYN_PLL_EN)
+#define CPU_BOOT_DEV_STS0_GIC_PRIVILEGED_EN (1 << CPU_BOOT_DEV_STS_GIC_PRIVILEGED_EN)
+#define CPU_BOOT_DEV_STS0_EQ_INDEX_EN (1 << CPU_BOOT_DEV_STS_EQ_INDEX_EN)
+#define CPU_BOOT_DEV_STS0_MULTI_IRQ_POLL_EN (1 << CPU_BOOT_DEV_STS_MULTI_IRQ_POLL_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_XPCS91_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_XPCS91_EN)
+#define CPU_BOOT_DEV_STS0_FW_NIC_STAT_EXT_EN (1 << CPU_BOOT_DEV_STS_FW_NIC_STAT_EXT_EN)
+#define CPU_BOOT_DEV_STS0_IS_IDLE_CHECK_EN (1 << CPU_BOOT_DEV_STS_IS_IDLE_CHECK_EN)
+#define CPU_BOOT_DEV_STS0_MAP_HWMON_EN (1 << CPU_BOOT_DEV_STS_MAP_HWMON_EN)
+#define CPU_BOOT_DEV_STS0_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+#define CPU_BOOT_DEV_STS1_ENABLED (1 << CPU_BOOT_DEV_STS_ENABLED)
+
+enum cpu_boot_status {
+ CPU_BOOT_STATUS_NA = 0, /* Default value after reset of chip */
+ CPU_BOOT_STATUS_IN_WFE = 1,
+ CPU_BOOT_STATUS_DRAM_RDY = 2,
+ CPU_BOOT_STATUS_SRAM_AVAIL = 3,
+ CPU_BOOT_STATUS_IN_BTL = 4, /* BTL is H/W FSM */
+ CPU_BOOT_STATUS_IN_PREBOOT = 5,
+ CPU_BOOT_STATUS_IN_SPL, /* deprecated - not reported */
+ CPU_BOOT_STATUS_IN_UBOOT = 7,
+ CPU_BOOT_STATUS_DRAM_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_FIT_CORRUPTED, /* deprecated - will be removed */
+ /* U-Boot console prompt activated, commands are not processed */
+ CPU_BOOT_STATUS_UBOOT_NOT_READY = 10,
+ /* Finished NICs init, reported after DRAM and NICs */
+ CPU_BOOT_STATUS_NIC_FW_RDY = 11,
+ CPU_BOOT_STATUS_TS_INIT_FAIL, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_DRAM_SKIPPED, /* deprecated - will be removed */
+ CPU_BOOT_STATUS_BMC_WAITING_SKIPPED, /* deprecated - will be removed */
+ /* Last boot loader progress status, ready to receive commands */
+ CPU_BOOT_STATUS_READY_TO_BOOT = 15,
+ /* Internal Boot finished, ready for boot-fit */
+ CPU_BOOT_STATUS_WAITING_FOR_BOOT_FIT = 16,
+ /* Internal Security has been initialized, device can be accessed */
+ CPU_BOOT_STATUS_SECURITY_READY = 17,
+ /* FW component is preparing to shutdown and communication with host is not available */
+ CPU_BOOT_STATUS_FW_SHUTDOWN_PREP = 18,
+};
+
+enum kmd_msg {
+ KMD_MSG_NA = 0,
+ KMD_MSG_GOTO_WFE,
+ KMD_MSG_FIT_RDY,
+ KMD_MSG_SKIP_BMC,
+ RESERVED,
+ KMD_MSG_RST_DEV,
+ KMD_MSG_LAST
+};
+
+enum cpu_msg_status {
+ CPU_MSG_CLR = 0,
+ CPU_MSG_OK,
+ CPU_MSG_ERR,
+};
+
+/* communication registers mapping - consider ABI when changing */
+struct cpu_dyn_regs {
+ __le32 cpu_pq_base_addr_low;
+ __le32 cpu_pq_base_addr_high;
+ __le32 cpu_pq_length;
+ __le32 cpu_pq_init_status;
+ __le32 cpu_eq_base_addr_low;
+ __le32 cpu_eq_base_addr_high;
+ __le32 cpu_eq_length;
+ __le32 cpu_eq_ci;
+ __le32 cpu_cq_base_addr_low;
+ __le32 cpu_cq_base_addr_high;
+ __le32 cpu_cq_length;
+ __le32 cpu_pf_pq_pi;
+ __le32 cpu_boot_dev_sts0;
+ __le32 cpu_boot_dev_sts1;
+ __le32 cpu_boot_err0;
+ __le32 cpu_boot_err1;
+ __le32 cpu_boot_status;
+ __le32 fw_upd_sts;
+ __le32 fw_upd_cmd;
+ __le32 fw_upd_pending_sts;
+ __le32 fuse_ver_offset;
+ __le32 preboot_ver_offset;
+ __le32 uboot_ver_offset;
+ __le32 hw_state;
+ __le32 kmd_msg_to_cpu;
+ __le32 cpu_cmd_status_to_host;
+ __le32 gic_host_pi_upd_irq;
+ __le32 gic_tpc_qm_irq_ctrl;
+ __le32 gic_mme_qm_irq_ctrl;
+ __le32 gic_dma_qm_irq_ctrl;
+ __le32 gic_nic_qm_irq_ctrl;
+ __le32 gic_dma_core_irq_ctrl;
+ __le32 gic_host_halt_irq;
+ __le32 gic_host_ints_irq;
+ __le32 gic_host_soft_rst_irq;
+ __le32 gic_rot_qm_irq_ctrl;
+ __le32 cpu_rst_status;
+ __le32 eng_arc_irq_ctrl;
+ __le32 reserved1[20]; /* reserve for future use */
+};
+
+/* TODO: remove the desc magic after the code is updated to use message */
+/* HCDM - Habana Communications Descriptor Magic */
+#define HL_COMMS_DESC_MAGIC 0x4843444D
+#define HL_COMMS_DESC_VER 3
+
+/* HCMv - Habana Communications Message + header version */
+#define HL_COMMS_MSG_MAGIC_VALUE 0x48434D00
+#define HL_COMMS_MSG_MAGIC_MASK 0xFFFFFF00
+#define HL_COMMS_MSG_MAGIC_VER_MASK 0xFF
+
+#define HL_COMMS_MSG_MAGIC_VER(ver) (HL_COMMS_MSG_MAGIC_VALUE | \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+#define HL_COMMS_MSG_MAGIC_V0 HL_COMMS_DESC_MAGIC
+#define HL_COMMS_MSG_MAGIC_V1 HL_COMMS_MSG_MAGIC_VER(1)
+#define HL_COMMS_MSG_MAGIC_V2 HL_COMMS_MSG_MAGIC_VER(2)
+#define HL_COMMS_MSG_MAGIC_V3 HL_COMMS_MSG_MAGIC_VER(3)
+
+#define HL_COMMS_MSG_MAGIC HL_COMMS_MSG_MAGIC_V3
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC(magic) \
+ (((magic) & HL_COMMS_MSG_MAGIC_MASK) == \
+ HL_COMMS_MSG_MAGIC_VALUE)
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE_VERSION(magic, ver) \
+ (((magic) & HL_COMMS_MSG_MAGIC_VER_MASK) >= \
+ ((ver) & HL_COMMS_MSG_MAGIC_VER_MASK))
+
+#define HL_COMMS_MSG_MAGIC_VALIDATE(magic, ver) \
+ (HL_COMMS_MSG_MAGIC_VALIDATE_MAGIC((magic)) && \
+ HL_COMMS_MSG_MAGIC_VALIDATE_VERSION((magic), (ver)))
+
+enum comms_msg_type {
+ HL_COMMS_DESC_TYPE = 0,
+ HL_COMMS_RESET_CAUSE_TYPE = 1,
+ HL_COMMS_FW_CFG_SKIP_TYPE = 2,
+ HL_COMMS_BINNING_CONF_TYPE = 3,
+};
+
+/*
+ * Binning information shared between LKD and FW
+ * @tpc_mask_l - TPC binning information lower 64 bit
+ * @dec_mask - Decoder binning information
+ * @dram_mask - DRAM binning information
+ * @edma_mask - EDMA binning information
+ * @mme_mask_l - MME binning information lower 32
+ * @mme_mask_h - MME binning information upper 32
+ * @rot_mask - Rotator binning information
+ * @xbar_mask - xBAR binning information
+ * @reserved - reserved field for future binning info w/o ABI change
+ * @tpc_mask_h - TPC binning information upper 64 bit
+ * @nic_mask - NIC binning information
+ */
+struct lkd_fw_binning_info {
+ __le64 tpc_mask_l;
+ __le32 dec_mask;
+ __le32 dram_mask;
+ __le32 edma_mask;
+ __le32 mme_mask_l;
+ __le32 mme_mask_h;
+ __le32 rot_mask;
+ __le32 xbar_mask;
+ __le32 reserved0;
+ __le64 tpc_mask_h;
+ __le64 nic_mask;
+ __le32 reserved1[8];
+};
+
+/* TODO: remove this struct after the code is updated to use message */
+/* this is the comms descriptor header - meta data */
+struct comms_desc_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the descriptor w/o header */
+ __le16 size; /* size of the descriptor w/o header */
+ __u8 version; /* descriptor version */
+ __u8 reserved[5]; /* pad to 64 bit */
+};
+
+/* this is the comms message header - meta data */
+struct comms_msg_header {
+ __le32 magic; /* magic for validation */
+ __le32 crc32; /* CRC32 of the message w/o header */
+ __le16 size; /* size of the message w/o header */
+ __u8 version; /* message payload version */
+ __u8 type; /* message type */
+ __u8 reserved[4]; /* pad to 64 bit */
+};
+
+enum lkd_fw_ascii_msg_lvls {
+ LKD_FW_ASCII_MSG_ERR = 0,
+ LKD_FW_ASCII_MSG_WRN = 1,
+ LKD_FW_ASCII_MSG_INF = 2,
+ LKD_FW_ASCII_MSG_DBG = 3,
+};
+
+#define LKD_FW_ASCII_MSG_MAX_LEN 128
+#define LKD_FW_ASCII_MSG_MAX 4 /* consider ABI when changing */
+
+struct lkd_fw_ascii_msg {
+ __u8 valid;
+ __u8 msg_lvl;
+ __u8 reserved[6];
+ char msg[LKD_FW_ASCII_MSG_MAX_LEN];
+};
+
+/* this is the main FW descriptor - consider ABI when changing */
+struct lkd_fw_comms_desc {
+ struct comms_desc_header header;
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ __le64 img_addr; /* address for next FW component load */
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ __le32 rsvd_mem_size_mb; /* reserved memory size [MB] for FW/SVE */
+ char reserved1[4];
+};
+
+enum comms_reset_cause {
+ HL_RESET_CAUSE_UNKNOWN = 0,
+ HL_RESET_CAUSE_HEARTBEAT = 1,
+ HL_RESET_CAUSE_TDR = 2,
+};
+
+/* TODO: remove define after struct name is aligned on all projects */
+#define lkd_msg_comms lkd_fw_comms_msg
+
+/* this is the comms message descriptor */
+struct lkd_fw_comms_msg {
+ struct comms_msg_header header;
+ /* union for future expantions of new messages */
+ union {
+ struct {
+ struct cpu_dyn_regs cpu_dyn_regs;
+ char fuse_ver[VERSION_MAX_LEN];
+ char cur_fw_ver[VERSION_MAX_LEN];
+ /* can be used for 1 more version w/o ABI change */
+ char reserved0[VERSION_MAX_LEN];
+ /* address for next FW component load */
+ __le64 img_addr;
+ struct lkd_fw_binning_info binning_info;
+ struct lkd_fw_ascii_msg ascii_msg[LKD_FW_ASCII_MSG_MAX];
+ /* reserved memory size [MB] for FW/SVE */
+ __le32 rsvd_mem_size_mb;
+ char reserved1[4];
+ };
+ struct {
+ __u8 reset_cause;
+ };
+ struct {
+ __u8 fw_cfg_skip; /* 1 - skip, 0 - don't skip */
+ };
+ struct lkd_fw_binning_info binning_conf;
+ };
+};
+
+/*
+ * LKD commands:
+ *
+ * COMMS_NOOP Used to clear the command register and no actual
+ * command is send.
+ *
+ * COMMS_CLR_STS Clear status command - FW should clear the
+ * status register. Used for synchronization
+ * between the commands as part of the race free
+ * protocol.
+ *
+ * COMMS_RST_STATE Reset the current communication state which is
+ * kept by FW for proper responses.
+ * Should be used in the beginning of the
+ * communication cycle to clean any leftovers from
+ * previous communication attempts.
+ *
+ * COMMS_PREP_DESC Prepare descriptor for setting up the
+ * communication and other dynamic data:
+ * struct lkd_fw_comms_desc.
+ * This command has a parameter stating the next FW
+ * component size, so the FW can actually prepare a
+ * space for it and in the status response provide
+ * the descriptor offset. The Offset of the next FW
+ * data component is a part of the descriptor
+ * structure.
+ *
+ * COMMS_DATA_RDY The FW data has been uploaded and is ready for
+ * validation.
+ *
+ * COMMS_EXEC Execute the next FW component.
+ *
+ * COMMS_RST_DEV Reset the device.
+ *
+ * COMMS_GOTO_WFE Execute WFE command. Allowed only on non-secure
+ * devices.
+ *
+ * COMMS_SKIP_BMC Perform actions required for BMC-less servers.
+ * Do not wait for BMC response.
+ *
+ * COMMS_PREP_DESC_ELBI Same as COMMS_PREP_DESC only that the memory
+ * space is allocated in a ELBI access only
+ * address range.
+ *
+ */
+enum comms_cmd {
+ COMMS_NOOP = 0,
+ COMMS_CLR_STS = 1,
+ COMMS_RST_STATE = 2,
+ COMMS_PREP_DESC = 3,
+ COMMS_DATA_RDY = 4,
+ COMMS_EXEC = 5,
+ COMMS_RST_DEV = 6,
+ COMMS_GOTO_WFE = 7,
+ COMMS_SKIP_BMC = 8,
+ COMMS_PREP_DESC_ELBI = 10,
+ COMMS_INVLD_LAST
+};
+
+#define COMMS_COMMAND_SIZE_SHIFT 0
+#define COMMS_COMMAND_SIZE_MASK 0x1FFFFFF
+#define COMMS_COMMAND_CMD_SHIFT 27
+#define COMMS_COMMAND_CMD_MASK 0xF8000000
+
+/*
+ * LKD command to FW register structure
+ * @size - FW component size
+ * @cmd - command from enum comms_cmd
+ */
+struct comms_command {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 size :25; /* 32MB max. */
+ u32 reserved :2;
+ enum comms_cmd cmd :5; /* 32 commands */
+ };
+ __le32 val;
+ };
+};
+
+/*
+ * FW status
+ *
+ * COMMS_STS_NOOP Used to clear the status register and no actual
+ * status is provided.
+ *
+ * COMMS_STS_ACK Command has been received and recognized.
+ *
+ * COMMS_STS_OK Command execution has finished successfully.
+ *
+ * COMMS_STS_ERR Command execution was unsuccessful and resulted
+ * in error.
+ *
+ * COMMS_STS_VALID_ERR FW validation has failed.
+ *
+ * COMMS_STS_TIMEOUT_ERR Command execution has timed out.
+ */
+enum comms_sts {
+ COMMS_STS_NOOP = 0,
+ COMMS_STS_ACK = 1,
+ COMMS_STS_OK = 2,
+ COMMS_STS_ERR = 3,
+ COMMS_STS_VALID_ERR = 4,
+ COMMS_STS_TIMEOUT_ERR = 5,
+ COMMS_STS_INVLD_LAST
+};
+
+/* RAM types for FW components loading - defines the base address */
+enum comms_ram_types {
+ COMMS_SRAM = 0,
+ COMMS_DRAM = 1,
+};
+
+#define COMMS_STATUS_OFFSET_SHIFT 0
+#define COMMS_STATUS_OFFSET_MASK 0x03FFFFFF
+#define COMMS_STATUS_OFFSET_ALIGN_SHIFT 2
+#define COMMS_STATUS_RAM_TYPE_SHIFT 26
+#define COMMS_STATUS_RAM_TYPE_MASK 0x0C000000
+#define COMMS_STATUS_STATUS_SHIFT 28
+#define COMMS_STATUS_STATUS_MASK 0xF0000000
+
+/*
+ * FW status to LKD register structure
+ * @offset - an offset from the base of the ram_type shifted right by
+ * 2 bits (always aligned to 32 bits).
+ * Allows a maximum addressable offset of 256MB from RAM base.
+ * Example: for real offset in RAM of 0x800000 (8MB), the value
+ * in offset field is (0x800000 >> 2) = 0x200000.
+ * @ram_type - the RAM type that should be used for offset from
+ * enum comms_ram_types
+ * @status - status from enum comms_sts
+ */
+struct comms_status {
+ union { /* bit fields are only for FW use */
+ struct {
+ u32 offset :26;
+ enum comms_ram_types ram_type :2;
+ enum comms_sts status :4; /* 16 statuses */
+ };
+ __le32 val;
+ };
+};
+
+#define NAME_MAX_LEN 32 /* bytes */
+struct hl_module_data {
+ __u8 name[NAME_MAX_LEN];
+ __u8 version[VERSION_MAX_LEN];
+};
+
+/**
+ * struct hl_component_versions - versions associated with hl component.
+ * @struct_size: size of all the struct (including dynamic size of modules).
+ * @modules_offset: offset of the modules field in this struct.
+ * @component: version of the component itself.
+ * @fw_os: Firmware OS Version.
+ * @comp_name: Name of the component.
+ * @modules_counter: number of set bits in modules_mask.
+ * @reserved: reserved for future use.
+ * @modules: versions of the component's modules. Elborated explanation in
+ * struct cpucp_versions.
+ */
+struct hl_component_versions {
+ __le16 struct_size;
+ __le16 modules_offset;
+ __u8 component[VERSION_MAX_LEN];
+ __u8 fw_os[VERSION_MAX_LEN];
+ __u8 comp_name[NAME_MAX_LEN];
+ __u8 modules_counter;
+ __u8 reserved[3];
+ struct hl_module_data modules[];
+};
+
+/* Max size of fit size */
+#define HL_FW_VERSIONS_FIT_SIZE 4096
+
+#endif /* HL_BOOT_IF_H */
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 13b1e65fbdcc..6730ee900ee1 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -21,6 +21,10 @@
#define HID_USAGE_SENSOR_ALS 0x200041
#define HID_USAGE_SENSOR_DATA_LIGHT 0x2004d0
#define HID_USAGE_SENSOR_LIGHT_ILLUM 0x2004d1
+#define HID_USAGE_SENSOR_LIGHT_COLOR_TEMPERATURE 0x2004d2
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY 0x2004d3
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_X 0x2004d4
+#define HID_USAGE_SENSOR_LIGHT_CHROMATICITY_Y 0x2004d5
/* PROX (200011) */
#define HID_USAGE_SENSOR_PROX 0x200011
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 964ca1f15e3f..5a8387a4a712 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -833,11 +833,11 @@ struct hid_driver {
void (*feature_mapping)(struct hid_device *hdev,
struct hid_field *field,
struct hid_usage *usage);
-#ifdef CONFIG_PM
+
int (*suspend)(struct hid_device *hdev, pm_message_t message);
int (*resume)(struct hid_device *hdev);
int (*reset_resume)(struct hid_device *hdev);
-#endif
+
/* private: */
struct device_driver driver;
};
diff --git a/include/linux/highmem.h b/include/linux/highmem.h
index 99c474de800d..4cacc0e43b51 100644
--- a/include/linux/highmem.h
+++ b/include/linux/highmem.h
@@ -551,10 +551,24 @@ static inline void folio_zero_range(struct folio *folio,
zero_user_segments(&folio->page, start, start + length, 0, 0);
}
-static inline void unmap_and_put_page(struct page *page, void *addr)
+/**
+ * folio_release_kmap - Unmap a folio and drop a refcount.
+ * @folio: The folio to release.
+ * @addr: The address previously returned by a call to kmap_local_folio().
+ *
+ * It is common, eg in directory handling to kmap a folio. This function
+ * unmaps the folio and drops the refcount that was being held to keep the
+ * folio alive while we accessed it.
+ */
+static inline void folio_release_kmap(struct folio *folio, void *addr)
{
kunmap_local(addr);
- put_page(page);
+ folio_put(folio);
+}
+
+static inline void unmap_and_put_page(struct page *page, void *addr)
+{
+ folio_release_kmap(page_folio(page), addr);
}
#endif /* _LINUX_HIGHMEM_H */
diff --git a/include/linux/hisi_acc_qm.h b/include/linux/hisi_acc_qm.h
index 39fbfb4be944..ddc7ebb70523 100644
--- a/include/linux/hisi_acc_qm.h
+++ b/include/linux/hisi_acc_qm.h
@@ -144,6 +144,13 @@ enum qm_vf_state {
QM_NOT_READY,
};
+enum qm_misc_ctl_bits {
+ QM_DRIVER_REMOVING = 0x0,
+ QM_RST_SCHED,
+ QM_RESETTING,
+ QM_MODULE_PARAM,
+};
+
enum qm_cap_bits {
QM_SUPPORT_DB_ISOLATION = 0x0,
QM_SUPPORT_FUNC_QOS,
@@ -269,6 +276,7 @@ struct hisi_qm_poll_data {
struct hisi_qm *qm;
struct work_struct work;
u16 *qp_finish_id;
+ u16 eqe_num;
};
/**
@@ -285,6 +293,18 @@ struct qm_err_isolate {
struct list_head qm_hw_errs;
};
+struct qm_rsv_buf {
+ struct qm_sqc *sqc;
+ struct qm_cqc *cqc;
+ struct qm_eqc *eqc;
+ struct qm_aeqc *aeqc;
+ dma_addr_t sqc_dma;
+ dma_addr_t cqc_dma;
+ dma_addr_t eqc_dma;
+ dma_addr_t aeqc_dma;
+ struct qm_dma qcdma;
+};
+
struct hisi_qm {
enum qm_hw_ver ver;
enum qm_fun_type fun_type;
@@ -317,6 +337,7 @@ struct hisi_qm {
dma_addr_t cqc_dma;
dma_addr_t eqe_dma;
dma_addr_t aeqe_dma;
+ struct qm_rsv_buf xqc_buf;
struct hisi_qm_status status;
const struct hisi_qm_err_ini *err_ini;
@@ -471,6 +492,20 @@ static inline void hisi_qm_init_list(struct hisi_qm_list *qm_list)
mutex_init(&qm_list->lock);
}
+static inline void hisi_qm_add_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_add_tail(&qm->list, &qm_list->list);
+ mutex_unlock(&qm_list->lock);
+}
+
+static inline void hisi_qm_del_list(struct hisi_qm *qm, struct hisi_qm_list *qm_list)
+{
+ mutex_lock(&qm_list->lock);
+ list_del(&qm->list);
+ mutex_unlock(&qm_list->lock);
+}
+
int hisi_qm_init(struct hisi_qm *qm);
void hisi_qm_uninit(struct hisi_qm *qm);
int hisi_qm_start(struct hisi_qm *qm);
@@ -516,8 +551,8 @@ int hisi_qm_alloc_qps_node(struct hisi_qm_list *qm_list, int qp_num,
void hisi_qm_free_qps(struct hisi_qp **qps, int qp_num);
void hisi_qm_dev_shutdown(struct pci_dev *pdev);
void hisi_qm_wait_task_finish(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
-void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list);
+int hisi_qm_alg_register(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
+void hisi_qm_alg_unregister(struct hisi_qm *qm, struct hisi_qm_list *qm_list, int guard);
int hisi_qm_resume(struct device *dev);
int hisi_qm_suspend(struct device *dev);
void hisi_qm_pm_uninit(struct hisi_qm *qm);
diff --git a/include/linux/hugetlb.h b/include/linux/hugetlb.h
index 5b2626063f4f..d3acecc5db4b 100644
--- a/include/linux/hugetlb.h
+++ b/include/linux/hugetlb.h
@@ -30,7 +30,7 @@ void free_huge_folio(struct folio *folio);
#ifdef CONFIG_HUGETLB_PAGE
-#include <linux/mempolicy.h>
+#include <linux/pagemap.h>
#include <linux/shm.h>
#include <asm/tlbflush.h>
@@ -60,6 +60,7 @@ struct resv_map {
long adds_in_progress;
struct list_head region_cache;
long region_cache_count;
+ struct rw_semaphore rw_sema;
#ifdef CONFIG_CGROUP_HUGETLB
/*
* On private mappings, the counter to uncharge reservations is stored
@@ -138,7 +139,7 @@ struct page *hugetlb_follow_page_mask(struct vm_area_struct *vma,
void unmap_hugepage_range(struct vm_area_struct *,
unsigned long, unsigned long, struct page *,
zap_flags_t);
-void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct page *ref_page, zap_flags_t zap_flags);
@@ -245,6 +246,25 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
void adjust_range_if_pmd_sharing_possible(struct vm_area_struct *vma,
unsigned long *start, unsigned long *end);
+extern void __hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *begin, unsigned long *end);
+extern void __hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details);
+
+static inline void hugetlb_zap_begin(struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_begin(vma, start, end);
+}
+
+static inline void hugetlb_zap_end(struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+ if (is_vm_hugetlb_page(vma))
+ __hugetlb_zap_end(vma, details);
+}
+
void hugetlb_vma_lock_read(struct vm_area_struct *vma);
void hugetlb_vma_unlock_read(struct vm_area_struct *vma);
void hugetlb_vma_lock_write(struct vm_area_struct *vma);
@@ -260,6 +280,7 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
unsigned long cp_flags);
bool is_hugetlb_entry_migration(pte_t pte);
+bool is_hugetlb_entry_hwpoisoned(pte_t pte);
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
#else /* !CONFIG_HUGETLB_PAGE */
@@ -296,6 +317,18 @@ static inline void adjust_range_if_pmd_sharing_possible(
{
}
+static inline void hugetlb_zap_begin(
+ struct vm_area_struct *vma,
+ unsigned long *start, unsigned long *end)
+{
+}
+
+static inline void hugetlb_zap_end(
+ struct vm_area_struct *vma,
+ struct zap_details *details)
+{
+}
+
static inline struct page *hugetlb_follow_page_mask(
struct vm_area_struct *vma, unsigned long address, unsigned int flags,
unsigned int *page_mask)
@@ -441,7 +474,7 @@ static inline long hugetlb_change_protection(
return 0;
}
-static inline void __unmap_hugepage_range_final(struct mmu_gather *tlb,
+static inline void __unmap_hugepage_range(struct mmu_gather *tlb,
struct vm_area_struct *vma, unsigned long start,
unsigned long end, struct page *ref_page,
zap_flags_t zap_flags)
@@ -512,7 +545,6 @@ static inline struct hugetlbfs_sb_info *HUGETLBFS_SB(struct super_block *sb)
}
struct hugetlbfs_inode_info {
- struct shared_policy policy;
struct inode vfs_inode;
unsigned int seals;
};
@@ -716,8 +748,6 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
unsigned long addr, int avoid_reserve);
struct folio *alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
nodemask_t *nmask, gfp_t gfp_mask);
-struct folio *alloc_hugetlb_folio_vma(struct hstate *h, struct vm_area_struct *vma,
- unsigned long address);
int hugetlb_add_to_page_cache(struct folio *folio, struct address_space *mapping,
pgoff_t idx);
void restore_reserve_on_error(struct hstate *h, struct vm_area_struct *vma,
@@ -812,6 +842,12 @@ static inline unsigned int blocks_per_huge_page(struct hstate *h)
return huge_page_size(h) / 512;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return filemap_lock_folio(mapping, idx << huge_page_order(h));
+}
+
#include <asm/hugetlb.h>
#ifndef is_hugepage_only_range
@@ -984,7 +1020,9 @@ static inline void huge_ptep_modify_prot_commit(struct vm_area_struct *vma,
unsigned long addr, pte_t *ptep,
pte_t old_pte, pte_t pte)
{
- set_huge_pte_at(vma->vm_mm, addr, ptep, pte);
+ unsigned long psize = huge_page_size(hstate_vma(vma));
+
+ set_huge_pte_at(vma->vm_mm, addr, ptep, pte, psize);
}
#endif
@@ -1006,6 +1044,12 @@ static inline struct hugepage_subpool *hugetlb_folio_subpool(struct folio *folio
return NULL;
}
+static inline struct folio *filemap_lock_hugetlb_folio(struct hstate *h,
+ struct address_space *mapping, pgoff_t idx)
+{
+ return NULL;
+}
+
static inline int isolate_or_dissolve_huge_page(struct page *page,
struct list_head *list)
{
@@ -1026,13 +1070,6 @@ alloc_hugetlb_folio_nodemask(struct hstate *h, int preferred_nid,
return NULL;
}
-static inline struct folio *alloc_hugetlb_folio_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
-{
- return NULL;
-}
-
static inline int __alloc_bootmem_huge_page(struct hstate *h)
{
return 0;
@@ -1173,7 +1210,7 @@ static inline pte_t huge_ptep_clear_flush(struct vm_area_struct *vma,
}
static inline void set_huge_pte_at(struct mm_struct *mm, unsigned long addr,
- pte_t *ptep, pte_t pte)
+ pte_t *ptep, pte_t pte, unsigned long sz)
{
}
@@ -1231,6 +1268,11 @@ static inline bool __vma_shareable_lock(struct vm_area_struct *vma)
return (vma->vm_flags & VM_MAYSHARE) && vma->vm_private_data;
}
+static inline bool __vma_private_lock(struct vm_area_struct *vma)
+{
+ return (!(vma->vm_flags & VM_MAYSHARE)) && vma->vm_private_data;
+}
+
/*
* Safe version of huge_pte_offset() to check the locks. See comments
* above huge_pte_offset().
diff --git a/include/linux/hugetlb_cgroup.h b/include/linux/hugetlb_cgroup.h
index 3d82d91f49ac..e5d64b8b59c2 100644
--- a/include/linux/hugetlb_cgroup.h
+++ b/include/linux/hugetlb_cgroup.h
@@ -22,13 +22,6 @@ struct resv_map;
struct file_region;
#ifdef CONFIG_CGROUP_HUGETLB
-/*
- * Minimum page order trackable by hugetlb cgroup.
- * At least 3 pages are necessary for all the tracking information.
- * The second tail page contains all of the hugetlb-specific fields.
- */
-#define HUGETLB_CGROUP_MIN_ORDER order_base_2(__NR_USED_SUBPAGE)
-
enum hugetlb_memory_event {
HUGETLB_MAX,
HUGETLB_NR_MEMORY_EVENTS,
@@ -68,8 +61,6 @@ static inline struct hugetlb_cgroup *
__hugetlb_cgroup_from_folio(struct folio *folio, bool rsvd)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
- if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
- return NULL;
if (rsvd)
return folio->_hugetlb_cgroup_rsvd;
else
@@ -91,8 +82,6 @@ static inline void __set_hugetlb_cgroup(struct folio *folio,
struct hugetlb_cgroup *h_cg, bool rsvd)
{
VM_BUG_ON_FOLIO(!folio_test_hugetlb(folio), folio);
- if (folio_order(folio) < HUGETLB_CGROUP_MIN_ORDER)
- return;
if (rsvd)
folio->_hugetlb_cgroup_rsvd = h_cg;
else
diff --git a/include/linux/hw_random.h b/include/linux/hw_random.h
index 8a3115516a1b..136e9842120e 100644
--- a/include/linux/hw_random.h
+++ b/include/linux/hw_random.h
@@ -63,5 +63,6 @@ extern void hwrng_unregister(struct hwrng *rng);
extern void devm_hwrng_unregister(struct device *dve, struct hwrng *rng);
extern long hwrng_msleep(struct hwrng *rng, unsigned int msecs);
+extern long hwrng_yield(struct hwrng *rng);
#endif /* LINUX_HWRANDOM_H_ */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 3430cc2b05a6..0dae9db27538 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -237,7 +237,6 @@ enum i2c_driver_flags {
* struct i2c_driver - represent an I2C device driver
* @class: What kind of i2c device we instantiate (for detect)
* @probe: Callback for device binding
- * @probe_new: Transitional callback for device binding - do not use
* @remove: Callback for device unbinding
* @shutdown: Callback for device shutdown
* @alert: Alert callback, for example for the SMBus alert protocol
@@ -272,16 +271,8 @@ enum i2c_driver_flags {
struct i2c_driver {
unsigned int class;
- union {
/* Standard driver model interfaces */
- int (*probe)(struct i2c_client *client);
- /*
- * Legacy callback that was part of a conversion of .probe().
- * Today it has the same semantic as .probe(). Don't use for new
- * code.
- */
- int (*probe_new)(struct i2c_client *client);
- };
+ int (*probe)(struct i2c_client *client);
void (*remove)(struct i2c_client *client);
diff --git a/include/linux/i3c/master.h b/include/linux/i3c/master.h
index 349d7d0dda73..24c1863b86e2 100644
--- a/include/linux/i3c/master.h
+++ b/include/linux/i3c/master.h
@@ -24,6 +24,12 @@
struct i2c_client;
+/* notifier actions. notifier call data is the struct i3c_bus */
+enum {
+ I3C_NOTIFY_BUS_ADD,
+ I3C_NOTIFY_BUS_REMOVE,
+};
+
struct i3c_master_controller;
struct i3c_bus;
struct i3c_device;
@@ -654,4 +660,9 @@ void i3c_master_queue_ibi(struct i3c_dev_desc *dev, struct i3c_ibi_slot *slot);
struct i3c_ibi_slot *i3c_master_get_free_ibi_slot(struct i3c_dev_desc *dev);
+void i3c_for_each_bus_locked(int (*fn)(struct i3c_bus *bus, void *data),
+ void *data);
+int i3c_register_notifier(struct notifier_block *nb);
+int i3c_unregister_notifier(struct notifier_block *nb);
+
#endif /* I3C_MASTER_H */
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index bd2f6e19c357..958771bac9c0 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -307,6 +307,13 @@ static inline u16 ieee80211_sn_sub(u16 sn1, u16 sn2)
#define IEEE80211_TRIGGER_TYPE_BQRP 0x6
#define IEEE80211_TRIGGER_TYPE_NFRP 0x7
+/* UL-bandwidth within common_info of trigger frame */
+#define IEEE80211_TRIGGER_ULBW_MASK 0xc0000
+#define IEEE80211_TRIGGER_ULBW_20MHZ 0x0
+#define IEEE80211_TRIGGER_ULBW_40MHZ 0x1
+#define IEEE80211_TRIGGER_ULBW_80MHZ 0x2
+#define IEEE80211_TRIGGER_ULBW_160_80P80MHZ 0x3
+
struct ieee80211_hdr {
__le16 frame_control;
__le16 duration_id;
@@ -951,17 +958,24 @@ struct ieee80211_wide_bw_chansw_ie {
* @dtim_count: DTIM Count
* @dtim_period: DTIM Period
* @bitmap_ctrl: Bitmap Control
+ * @required_octet: "Syntatic sugar" to force the struct size to the
+ * minimum valid size when carried in a non-S1G PPDU
* @virtual_map: Partial Virtual Bitmap
*
* This structure represents the payload of the "TIM element" as
- * described in IEEE Std 802.11-2020 section 9.4.2.5.
+ * described in IEEE Std 802.11-2020 section 9.4.2.5. Note that this
+ * definition is only applicable when the element is carried in a
+ * non-S1G PPDU. When the TIM is carried in an S1G PPDU, the Bitmap
+ * Control and Partial Virtual Bitmap may not be present.
*/
struct ieee80211_tim_ie {
u8 dtim_count;
u8 dtim_period;
u8 bitmap_ctrl;
- /* variable size: 1 - 251 bytes */
- u8 virtual_map[1];
+ union {
+ u8 required_octet;
+ DECLARE_FLEX_ARRAY(u8, virtual_map);
+ };
} __packed;
/**
@@ -1239,6 +1253,30 @@ struct ieee80211_twt_setup {
u8 params[];
} __packed;
+#define IEEE80211_TTLM_MAX_CNT 2
+#define IEEE80211_TTLM_CONTROL_DIRECTION 0x03
+#define IEEE80211_TTLM_CONTROL_DEF_LINK_MAP 0x04
+#define IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT 0x08
+#define IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT 0x10
+#define IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE 0x20
+
+#define IEEE80211_TTLM_DIRECTION_DOWN 0
+#define IEEE80211_TTLM_DIRECTION_UP 1
+#define IEEE80211_TTLM_DIRECTION_BOTH 2
+
+/**
+ * struct ieee80211_ttlm_elem - TID-To-Link Mapping element
+ *
+ * Defined in section 9.4.2.314 in P802.11be_D4
+ *
+ * @control: the first part of control field
+ * @optional: the second part of control field
+ */
+struct ieee80211_ttlm_elem {
+ u8 control;
+ u8 optional[];
+} __packed;
+
struct ieee80211_mgmt {
__le16 frame_control;
__le16 duration;
@@ -1674,6 +1712,8 @@ struct ieee80211_mcs_info {
#define IEEE80211_HT_MCS_TX_MAX_STREAMS 4
#define IEEE80211_HT_MCS_TX_UNEQUAL_MODULATION 0x10
+#define IEEE80211_HT_MCS_CHAINS(mcs) ((mcs) == 32 ? 1 : (1 + ((mcs) >> 3)))
+
/*
* 802.11n D5.0 20.3.5 / 20.6 says:
* - indices 0 to 7 and 32 are single spatial stream
@@ -3132,6 +3172,28 @@ ieee80211_eht_oper_size_ok(const u8 *data, u8 len)
return len >= needed;
}
+#define IEEE80211_BW_IND_DIS_SUBCH_PRESENT BIT(1)
+
+struct ieee80211_bandwidth_indication {
+ u8 params;
+ struct ieee80211_eht_operation_info info;
+} __packed;
+
+static inline bool
+ieee80211_bandwidth_indication_size_ok(const u8 *data, u8 len)
+{
+ const struct ieee80211_bandwidth_indication *bwi = (const void *)data;
+
+ if (len < sizeof(*bwi))
+ return false;
+
+ if (bwi->params & IEEE80211_BW_IND_DIS_SUBCH_PRESENT &&
+ len < sizeof(*bwi) + 2)
+ return false;
+
+ return true;
+}
+
#define LISTEN_INT_USF GENMASK(15, 14)
#define LISTEN_INT_UI GENMASK(13, 0)
@@ -3589,6 +3651,8 @@ enum ieee80211_eid_ext {
WLAN_EID_EXT_EHT_OPERATION = 106,
WLAN_EID_EXT_EHT_MULTI_LINK = 107,
WLAN_EID_EXT_EHT_CAPABILITY = 108,
+ WLAN_EID_EXT_TID_TO_LINK_MAPPING = 109,
+ WLAN_EID_EXT_BANDWIDTH_INDICATION = 135,
};
/* Action category code */
@@ -4356,6 +4420,35 @@ static inline bool ieee80211_is_public_action(struct ieee80211_hdr *hdr,
}
/**
+ * ieee80211_is_protected_dual_of_public_action - check if skb contains a
+ * protected dual of public action management frame
+ * @skb: the skb containing the frame, length will be checked
+ *
+ * Return: true if the skb contains a protected dual of public action
+ * management frame, false otherwise.
+ */
+static inline bool
+ieee80211_is_protected_dual_of_public_action(struct sk_buff *skb)
+{
+ u8 action;
+
+ if (!ieee80211_is_public_action((void *)skb->data, skb->len) ||
+ skb->len < IEEE80211_MIN_ACTION_SIZE + 1)
+ return false;
+
+ action = *(u8 *)(skb->data + IEEE80211_MIN_ACTION_SIZE);
+
+ return action != WLAN_PUB_ACTION_20_40_BSS_COEX &&
+ action != WLAN_PUB_ACTION_DSE_REG_LOC_ANN &&
+ action != WLAN_PUB_ACTION_MSMT_PILOT &&
+ action != WLAN_PUB_ACTION_TDLS_DISCOVER_RES &&
+ action != WLAN_PUB_ACTION_LOC_TRACK_NOTI &&
+ action != WLAN_PUB_ACTION_FTM_REQUEST &&
+ action != WLAN_PUB_ACTION_FTM_RESPONSE &&
+ action != WLAN_PUB_ACTION_FILS_DISCOVERY;
+}
+
+/**
* _ieee80211_is_group_privacy_action - check if frame is a group addressed
* privacy action frame
* @hdr: the frame
@@ -4426,12 +4519,11 @@ static inline bool ieee80211_check_tim(const struct ieee80211_tim_ie *tim,
/**
* ieee80211_get_tdls_action - get tdls packet action (or -1, if not tdls packet)
* @skb: the skb containing the frame, length will not be checked
- * @hdr_size: the size of the ieee80211_hdr that starts at skb->data
*
* This function assumes the frame is a data frame, and that the network header
* is in the correct place.
*/
-static inline int ieee80211_get_tdls_action(struct sk_buff *skb, u32 hdr_size)
+static inline int ieee80211_get_tdls_action(struct sk_buff *skb)
{
if (!skb_is_nonlinear(skb) &&
skb->len > (skb_network_offset(skb) + 2)) {
@@ -5125,6 +5217,39 @@ static inline bool ieee80211_mle_reconf_sta_prof_size_ok(const u8 *data,
fixed + prof->sta_info_len - 1 <= len;
}
+static inline bool ieee80211_tid_to_link_map_size_ok(const u8 *data, size_t len)
+{
+ const struct ieee80211_ttlm_elem *t2l = (const void *)data;
+ u8 control, fixed = sizeof(*t2l), elem_len = 0;
+
+ if (len < fixed)
+ return false;
+
+ control = t2l->control;
+
+ if (control & IEEE80211_TTLM_CONTROL_SWITCH_TIME_PRESENT)
+ elem_len += 2;
+ if (control & IEEE80211_TTLM_CONTROL_EXPECTED_DUR_PRESENT)
+ elem_len += 3;
+
+ if (!(control & IEEE80211_TTLM_CONTROL_DEF_LINK_MAP)) {
+ u8 bm_size;
+
+ elem_len += 1;
+ if (len < fixed + elem_len)
+ return false;
+
+ if (control & IEEE80211_TTLM_CONTROL_LINK_MAP_SIZE)
+ bm_size = 1;
+ else
+ bm_size = 2;
+
+ elem_len += hweight8(t2l->optional[0]) * bm_size;
+ }
+
+ return len >= fixed + elem_len;
+}
+
#define for_each_mle_subelement(_elem, _data, _len) \
if (ieee80211_mle_size_ok(_data, _len)) \
for_each_element(_elem, \
diff --git a/include/linux/if_team.h b/include/linux/if_team.h
index 1b9b15a492fa..cdc684e04a2f 100644
--- a/include/linux/if_team.h
+++ b/include/linux/if_team.h
@@ -189,6 +189,8 @@ struct team {
struct net_device *dev; /* associated netdevice */
struct team_pcpu_stats __percpu *pcpu_stats;
+ const struct header_ops *header_ops_cache;
+
struct mutex lock; /* used for overall locking, e.g. port lists write */
/*
diff --git a/include/linux/igmp.h b/include/linux/igmp.h
index ebf4349a53af..5171231f70a8 100644
--- a/include/linux/igmp.h
+++ b/include/linux/igmp.h
@@ -39,7 +39,7 @@ struct ip_sf_socklist {
unsigned int sl_max;
unsigned int sl_count;
struct rcu_head rcu;
- __be32 sl_addr[];
+ __be32 sl_addr[] __counted_by(sl_max);
};
#define IP_SFBLOCK 10 /* allocate this many at once */
diff --git a/include/linux/iio/iio.h b/include/linux/iio/iio.h
index 202e55b0a28b..d0ce3b71106a 100644
--- a/include/linux/iio/iio.h
+++ b/include/linux/iio/iio.h
@@ -427,6 +427,8 @@ struct iio_trigger; /* forward declaration */
* @write_event_config: set if the event is enabled.
* @read_event_value: read a configuration value associated with the event.
* @write_event_value: write a configuration value for the event.
+ * @read_event_label: function to request label name for a specified label,
+ * for better event identification.
* @validate_trigger: function to validate the trigger when the
* current trigger gets changed.
* @update_scan_mode: function to configure device and scan buffer when
@@ -511,6 +513,12 @@ struct iio_info {
enum iio_event_direction dir,
enum iio_event_info info, int val, int val2);
+ int (*read_event_label)(struct iio_dev *indio_dev,
+ struct iio_chan_spec const *chan,
+ enum iio_event_type type,
+ enum iio_event_direction dir,
+ char *label);
+
int (*validate_trigger)(struct iio_dev *indio_dev,
struct iio_trigger *trig);
int (*update_scan_mode)(struct iio_dev *indio_dev,
@@ -556,7 +564,9 @@ struct iio_buffer_setup_ops {
* and owner
* @buffer: [DRIVER] any buffer present
* @scan_bytes: [INTERN] num bytes captured to be fed to buffer demux
- * @available_scan_masks: [DRIVER] optional array of allowed bitmasks
+ * @available_scan_masks: [DRIVER] optional array of allowed bitmasks. Sort the
+ * array in order of preference, the most preferred
+ * masks first.
* @masklength: [INTERN] the length of the mask established from
* channels
* @active_scan_mask: [INTERN] union of all scan masks requested by buffers
diff --git a/include/linux/iio/sw_device.h b/include/linux/iio/sw_device.h
index eff1e6b2595c..0f7fe7b522e3 100644
--- a/include/linux/iio/sw_device.h
+++ b/include/linux/iio/sw_device.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_device_type(struct iio_sw_device_type *dt);
struct iio_sw_device *iio_sw_device_create(const char *, const char *);
void iio_sw_device_destroy(struct iio_sw_device *);
-int iio_sw_device_type_configfs_register(struct iio_sw_device_type *dt);
-void iio_sw_device_type_configfs_unregister(struct iio_sw_device_type *dt);
-
static inline
void iio_swd_group_init_type_name(struct iio_sw_device *d,
const char *name,
diff --git a/include/linux/iio/sw_trigger.h b/include/linux/iio/sw_trigger.h
index 47de2443e984..bc77f88df303 100644
--- a/include/linux/iio/sw_trigger.h
+++ b/include/linux/iio/sw_trigger.h
@@ -51,9 +51,6 @@ void iio_unregister_sw_trigger_type(struct iio_sw_trigger_type *tt);
struct iio_sw_trigger *iio_sw_trigger_create(const char *, const char *);
void iio_sw_trigger_destroy(struct iio_sw_trigger *);
-int iio_sw_trigger_type_configfs_register(struct iio_sw_trigger_type *tt);
-void iio_sw_trigger_type_configfs_unregister(struct iio_sw_trigger_type *tt);
-
static inline
void iio_swt_group_init_type_name(struct iio_sw_trigger *t,
const char *name,
diff --git a/include/linux/init.h b/include/linux/init.h
index 266c3e1640d4..01b52c9c7526 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -89,9 +89,6 @@
__latent_entropy
#define __meminitdata __section(".meminit.data")
#define __meminitconst __section(".meminit.rodata")
-#define __memexit __section(".memexit.text") __exitused __cold notrace
-#define __memexitdata __section(".memexit.data")
-#define __memexitconst __section(".memexit.rodata")
/* For assembly routines */
#define __HEAD .section ".head.text","ax"
diff --git a/include/linux/instruction_pointer.h b/include/linux/instruction_pointer.h
index cda1f706eaeb..aa0b3ffea935 100644
--- a/include/linux/instruction_pointer.h
+++ b/include/linux/instruction_pointer.h
@@ -2,7 +2,12 @@
#ifndef _LINUX_INSTRUCTION_POINTER_H
#define _LINUX_INSTRUCTION_POINTER_H
+#include <asm/linkage.h>
+
#define _RET_IP_ (unsigned long)__builtin_return_address(0)
+
+#ifndef _THIS_IP_
#define _THIS_IP_ ({ __label__ __here; __here: (unsigned long)&&__here; })
+#endif
#endif /* _LINUX_INSTRUCTION_POINTER_H */
diff --git a/include/linux/intel_tpmi.h b/include/linux/intel_tpmi.h
index 04d937ad4dc4..ee07393445f9 100644
--- a/include/linux/intel_tpmi.h
+++ b/include/linux/intel_tpmi.h
@@ -6,6 +6,12 @@
#ifndef _INTEL_TPMI_H_
#define _INTEL_TPMI_H_
+#include <linux/bitfield.h>
+
+#define TPMI_VERSION_INVALID 0xff
+#define TPMI_MINOR_VERSION(val) FIELD_GET(GENMASK(4, 0), val)
+#define TPMI_MAJOR_VERSION(val) FIELD_GET(GENMASK(7, 5), val)
+
/**
* struct intel_tpmi_plat_info - Platform information for a TPMI device instance
* @package_id: CPU Package id
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index a92bce40b04b..76121c2bb4f8 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -566,11 +566,15 @@ enum
*
* _ RCU:
* 1) rcutree_migrate_callbacks() migrates the queue.
- * 2) rcu_report_dead() reports the final quiescent states.
+ * 2) rcutree_report_cpu_dead() reports the final quiescent states.
*
* _ IRQ_POLL: irq_poll_cpu_dead() migrates the queue
+ *
+ * _ (HR)TIMER_SOFTIRQ: (hr)timers_dead_cpu() migrates the queue
*/
-#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(RCU_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ))
+#define SOFTIRQ_HOTPLUG_SAFE_MASK (BIT(TIMER_SOFTIRQ) | BIT(IRQ_POLL_SOFTIRQ) |\
+ BIT(HRTIMER_SOFTIRQ) | BIT(RCU_SOFTIRQ))
+
/* map softirq index to softirq name. update 'softirq_to_name' in
* kernel/softirq.c when adding a new softirq.
diff --git a/include/linux/io-pgtable.h b/include/linux/io-pgtable.h
index 1b7a44b35616..25142a0e2fc2 100644
--- a/include/linux/io-pgtable.h
+++ b/include/linux/io-pgtable.h
@@ -166,6 +166,10 @@ struct io_pgtable_ops {
struct iommu_iotlb_gather *gather);
phys_addr_t (*iova_to_phys)(struct io_pgtable_ops *ops,
unsigned long iova);
+ int (*read_and_clear_dirty)(struct io_pgtable_ops *ops,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
};
/**
diff --git a/include/linux/io_uring.h b/include/linux/io_uring.h
index 106cdc55ff3b..aefb73eeeebf 100644
--- a/include/linux/io_uring.h
+++ b/include/linux/io_uring.h
@@ -20,8 +20,16 @@ enum io_uring_cmd_flags {
IO_URING_F_SQE128 = (1 << 8),
IO_URING_F_CQE32 = (1 << 9),
IO_URING_F_IOPOLL = (1 << 10),
+
+ /* set when uring wants to cancel a previously issued command */
+ IO_URING_F_CANCEL = (1 << 11),
+ IO_URING_F_COMPAT = (1 << 12),
};
+/* only top 8 bits of sqe->uring_cmd_flags for kernel internal use */
+#define IORING_URING_CMD_CANCELABLE (1U << 30)
+#define IORING_URING_CMD_POLLED (1U << 31)
+
struct io_uring_cmd {
struct file *file;
const struct io_uring_sqe *sqe;
@@ -82,6 +90,9 @@ static inline void io_uring_free(struct task_struct *tsk)
__io_uring_free(tsk);
}
int io_uring_cmd_sock(struct io_uring_cmd *cmd, unsigned int issue_flags);
+void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags);
+struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd);
#else
static inline int io_uring_cmd_import_fixed(u64 ubuf, unsigned long len, int rw,
struct iov_iter *iter, void *ioucmd)
@@ -122,6 +133,14 @@ static inline int io_uring_cmd_sock(struct io_uring_cmd *cmd,
{
return -EOPNOTSUPP;
}
+static inline void io_uring_cmd_mark_cancelable(struct io_uring_cmd *cmd,
+ unsigned int issue_flags)
+{
+}
+static inline struct task_struct *io_uring_cmd_get_task(struct io_uring_cmd *cmd)
+{
+ return NULL;
+}
#endif
#endif
diff --git a/include/linux/io_uring_types.h b/include/linux/io_uring_types.h
index 13d19b9be9f4..d3009d56af0b 100644
--- a/include/linux/io_uring_types.h
+++ b/include/linux/io_uring_types.h
@@ -265,6 +265,12 @@ struct io_ring_ctx {
*/
struct io_wq_work_list iopoll_list;
bool poll_multi_queue;
+
+ /*
+ * Any cancelable uring_cmd is added to this list in
+ * ->uring_cmd() by io_uring_cmd_insert_cancelable()
+ */
+ struct hlist_head cancelable_uring_cmd;
} ____cacheline_aligned_in_smp;
struct {
@@ -313,6 +319,13 @@ struct io_ring_ctx {
struct list_head cq_overflow_list;
struct io_hash_table cancel_table;
+ struct hlist_head waitid_list;
+
+#ifdef CONFIG_FUTEX
+ struct hlist_head futex_list;
+ struct io_alloc_cache futex_cache;
+#endif
+
const struct cred *sq_creds; /* cred used for __io_sq_thread() */
struct io_sq_data *sq_data; /* if using sq thread polling */
@@ -342,8 +355,6 @@ struct io_ring_ctx {
struct wait_queue_head rsrc_quiesce_wq;
unsigned rsrc_quiesce;
- struct list_head io_buffers_pages;
-
#if defined(CONFIG_UNIX)
struct socket *ring_sock;
#endif
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index c50a769d569a..8fb1b41b4d15 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -13,6 +13,7 @@
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
+#include <linux/iova_bitmap.h>
#include <uapi/linux/iommu.h>
#define IOMMU_READ (1 << 0)
@@ -37,6 +38,7 @@ struct bus_type;
struct device;
struct iommu_domain;
struct iommu_domain_ops;
+struct iommu_dirty_ops;
struct notifier_block;
struct iommu_sva;
struct iommu_fault_event;
@@ -65,6 +67,9 @@ struct iommu_domain_geometry {
#define __IOMMU_DOMAIN_SVA (1U << 4) /* Shared process address space */
+#define __IOMMU_DOMAIN_NESTED (1U << 6) /* User-managed address space nested
+ on a stage-2 translation */
+
#define IOMMU_DOMAIN_ALLOC_FLAGS ~__IOMMU_DOMAIN_DMA_FQ
/*
* This are the possible domain-types
@@ -91,10 +96,13 @@ struct iommu_domain_geometry {
__IOMMU_DOMAIN_DMA_API | \
__IOMMU_DOMAIN_DMA_FQ)
#define IOMMU_DOMAIN_SVA (__IOMMU_DOMAIN_SVA)
+#define IOMMU_DOMAIN_NESTED (__IOMMU_DOMAIN_NESTED)
struct iommu_domain {
unsigned type;
const struct iommu_domain_ops *ops;
+ const struct iommu_dirty_ops *dirty_ops;
+
unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */
struct iommu_domain_geometry geometry;
struct iommu_dma_cookie *iova_cookie;
@@ -133,6 +141,7 @@ enum iommu_cap {
* usefully support the non-strict DMA flush queue.
*/
IOMMU_CAP_DEFERRED_FLUSH,
+ IOMMU_CAP_DIRTY_TRACKING, /* IOMMU supports dirty tracking */
};
/* These are the possible reserved region types */
@@ -228,13 +237,109 @@ struct iommu_iotlb_gather {
};
/**
+ * struct iommu_dirty_bitmap - Dirty IOVA bitmap state
+ * @bitmap: IOVA bitmap
+ * @gather: Range information for a pending IOTLB flush
+ */
+struct iommu_dirty_bitmap {
+ struct iova_bitmap *bitmap;
+ struct iommu_iotlb_gather *gather;
+};
+
+/* Read but do not clear any dirty bits */
+#define IOMMU_DIRTY_NO_CLEAR (1 << 0)
+
+/**
+ * struct iommu_dirty_ops - domain specific dirty tracking operations
+ * @set_dirty_tracking: Enable or Disable dirty tracking on the iommu domain
+ * @read_and_clear_dirty: Walk IOMMU page tables for dirtied PTEs marshalled
+ * into a bitmap, with a bit represented as a page.
+ * Reads the dirty PTE bits and clears it from IO
+ * pagetables.
+ */
+struct iommu_dirty_ops {
+ int (*set_dirty_tracking)(struct iommu_domain *domain, bool enabled);
+ int (*read_and_clear_dirty)(struct iommu_domain *domain,
+ unsigned long iova, size_t size,
+ unsigned long flags,
+ struct iommu_dirty_bitmap *dirty);
+};
+
+/**
+ * struct iommu_user_data - iommu driver specific user space data info
+ * @type: The data type of the user buffer
+ * @uptr: Pointer to the user buffer for copy_from_user()
+ * @len: The length of the user buffer in bytes
+ *
+ * A user space data is an uAPI that is defined in include/uapi/linux/iommufd.h
+ * @type, @uptr and @len should be just copied from an iommufd core uAPI struct.
+ */
+struct iommu_user_data {
+ unsigned int type;
+ void __user *uptr;
+ size_t len;
+};
+
+/**
+ * __iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @dst_data: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @src_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @dst_data. Must match with @src_data.type
+ * @data_len: Length of current user data structure, i.e. sizeof(struct _dst)
+ * @min_len: Initial length of user data structure for backward compatibility.
+ * This should be offsetofend using the last member in the user data
+ * struct that was initially added to include/uapi/linux/iommufd.h
+ */
+static inline int __iommu_copy_struct_from_user(
+ void *dst_data, const struct iommu_user_data *src_data,
+ unsigned int data_type, size_t data_len, size_t min_len)
+{
+ if (src_data->type != data_type)
+ return -EINVAL;
+ if (WARN_ON(!dst_data || !src_data))
+ return -EINVAL;
+ if (src_data->len < min_len || data_len < src_data->len)
+ return -EINVAL;
+ return copy_struct_from_user(dst_data, data_len, src_data->uptr,
+ src_data->len);
+}
+
+/**
+ * iommu_copy_struct_from_user - Copy iommu driver specific user space data
+ * @kdst: Pointer to an iommu driver specific user data that is defined in
+ * include/uapi/linux/iommufd.h
+ * @user_data: Pointer to a struct iommu_user_data for user space data info
+ * @data_type: The data type of the @kdst. Must match with @user_data->type
+ * @min_last: The last memember of the data structure @kdst points in the
+ * initial version.
+ * Return 0 for success, otherwise -error.
+ */
+#define iommu_copy_struct_from_user(kdst, user_data, data_type, min_last) \
+ __iommu_copy_struct_from_user(kdst, user_data, data_type, \
+ sizeof(*kdst), \
+ offsetofend(typeof(*kdst), min_last))
+
+/**
* struct iommu_ops - iommu ops and capabilities
* @capable: check capability
* @hw_info: report iommu hardware information. The data buffer returned by this
* op is allocated in the iommu driver and freed by the caller after
* use. The information type is one of enum iommu_hw_info_type defined
* in include/uapi/linux/iommufd.h.
- * @domain_alloc: allocate iommu domain
+ * @domain_alloc: allocate and return an iommu domain if success. Otherwise
+ * NULL is returned. The domain is not fully initialized until
+ * the caller iommu_domain_alloc() returns.
+ * @domain_alloc_user: Allocate an iommu domain corresponding to the input
+ * parameters as defined in include/uapi/linux/iommufd.h.
+ * Unlike @domain_alloc, it is called only by IOMMUFD and
+ * must fully initialize the new domain before return.
+ * Upon success, if the @user_data is valid and the @parent
+ * points to a kernel-managed domain, the new domain must be
+ * IOMMU_DOMAIN_NESTED type; otherwise, the @parent must be
+ * NULL while the @user_data can be optionally provided, the
+ * new domain must support __IOMMU_DOMAIN_PAGING.
+ * Upon failure, ERR_PTR must be returned.
* @probe_device: Add device to iommu driver handling
* @release_device: Remove device from iommu driver handling
* @probe_finalize: Do final setup work after the device is added to an IOMMU
@@ -267,6 +372,9 @@ struct iommu_ops {
/* Domain allocation and freeing by the iommu driver */
struct iommu_domain *(*domain_alloc)(unsigned iommu_domain_type);
+ struct iommu_domain *(*domain_alloc_user)(
+ struct device *dev, u32 flags, struct iommu_domain *parent,
+ const struct iommu_user_data *user_data);
struct iommu_device *(*probe_device)(struct device *dev);
void (*release_device)(struct device *dev);
@@ -632,6 +740,28 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
return gather && gather->queued;
}
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+ if (gather)
+ iommu_iotlb_gather_init(gather);
+
+ dirty->bitmap = bitmap;
+ dirty->gather = gather;
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+ if (dirty->bitmap)
+ iova_bitmap_set(dirty->bitmap, iova, length);
+
+ if (dirty->gather)
+ iommu_iotlb_gather_add_range(dirty->gather, iova, length);
+}
+
/* PCI device grouping function */
extern struct iommu_group *pci_device_group(struct device *dev);
/* Generic device grouping function */
@@ -737,6 +867,8 @@ struct iommu_fwspec {};
struct iommu_device {};
struct iommu_fault_param {};
struct iommu_iotlb_gather {};
+struct iommu_dirty_bitmap {};
+struct iommu_dirty_ops {};
static inline bool iommu_present(const struct bus_type *bus)
{
@@ -969,6 +1101,18 @@ static inline bool iommu_iotlb_gather_queued(struct iommu_iotlb_gather *gather)
return false;
}
+static inline void iommu_dirty_bitmap_init(struct iommu_dirty_bitmap *dirty,
+ struct iova_bitmap *bitmap,
+ struct iommu_iotlb_gather *gather)
+{
+}
+
+static inline void iommu_dirty_bitmap_record(struct iommu_dirty_bitmap *dirty,
+ unsigned long iova,
+ unsigned long length)
+{
+}
+
static inline void iommu_device_unregister(struct iommu_device *iommu)
{
}
diff --git a/include/linux/ioport.h b/include/linux/ioport.h
index 25d768d48970..14f5cfabbbc8 100644
--- a/include/linux/ioport.h
+++ b/include/linux/ioport.h
@@ -229,7 +229,7 @@ static inline unsigned long resource_ext_type(const struct resource *res)
return res->flags & IORESOURCE_EXT_TYPE_BITS;
}
/* True iff r1 completely contains r2 */
-static inline bool resource_contains(struct resource *r1, struct resource *r2)
+static inline bool resource_contains(const struct resource *r1, const struct resource *r2)
{
if (resource_type(r1) != resource_type(r2))
return false;
@@ -239,13 +239,13 @@ static inline bool resource_contains(struct resource *r1, struct resource *r2)
}
/* True if any part of r1 overlaps r2 */
-static inline bool resource_overlaps(struct resource *r1, struct resource *r2)
+static inline bool resource_overlaps(const struct resource *r1, const struct resource *r2)
{
return r1->start <= r2->end && r1->end >= r2->start;
}
-static inline bool
-resource_intersection(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_intersection(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
@@ -254,8 +254,8 @@ resource_intersection(struct resource *r1, struct resource *r2, struct resource
return true;
}
-static inline bool
-resource_union(struct resource *r1, struct resource *r2, struct resource *r)
+static inline bool resource_union(const struct resource *r1, const struct resource *r2,
+ struct resource *r)
{
if (!resource_overlaps(r1, r2))
return false;
diff --git a/include/linux/iosys-map.h b/include/linux/iosys-map.h
index cb71aa616bd3..1b06d074ade0 100644
--- a/include/linux/iosys-map.h
+++ b/include/linux/iosys-map.h
@@ -426,7 +426,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* iosys_map_rd_field - Read a member from a struct in the iosys_map
*
* @map__: The iosys_map structure
- * @struct_offset__: Offset from the beggining of the map, where the struct
+ * @struct_offset__: Offset from the beginning of the map, where the struct
* is located
* @struct_type__: The struct describing the layout of the mapping
* @field__: Member of the struct to read
@@ -494,7 +494,7 @@ static inline void iosys_map_memset(struct iosys_map *dst, size_t offset,
* iosys_map_wr_field - Write to a member of a struct in the iosys_map
*
* @map__: The iosys_map structure
- * @struct_offset__: Offset from the beggining of the map, where the struct
+ * @struct_offset__: Offset from the beginning of the map, where the struct
* is located
* @struct_type__: The struct describing the layout of the mapping
* @field__: Member of the struct to read
diff --git a/include/linux/iov_iter.h b/include/linux/iov_iter.h
new file mode 100644
index 000000000000..270454a6703d
--- /dev/null
+++ b/include/linux/iov_iter.h
@@ -0,0 +1,274 @@
+/* SPDX-License-Identifier: GPL-2.0-or-later */
+/* I/O iterator iteration building functions.
+ *
+ * Copyright (C) 2023 Red Hat, Inc. All Rights Reserved.
+ * Written by David Howells (dhowells@redhat.com)
+ */
+
+#ifndef _LINUX_IOV_ITER_H
+#define _LINUX_IOV_ITER_H
+
+#include <linux/uio.h>
+#include <linux/bvec.h>
+
+typedef size_t (*iov_step_f)(void *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+typedef size_t (*iov_ustep_f)(void __user *iter_base, size_t progress, size_t len,
+ void *priv, void *priv2);
+
+/*
+ * Handle ITER_UBUF.
+ */
+static __always_inline
+size_t iterate_ubuf(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ void __user *base = iter->ubuf;
+ size_t progress = 0, remain;
+
+ remain = step(base + iter->iov_offset, 0, len, priv, priv2);
+ progress = len - remain;
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_IOVEC.
+ */
+static __always_inline
+size_t iterate_iovec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_ustep_f step)
+{
+ const struct iovec *p = iter->__iov;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->__iov;
+ iter->__iov = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_KVEC.
+ */
+static __always_inline
+size_t iterate_kvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct kvec *p = iter->kvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t part = min(len, p->iov_len - skip);
+
+ if (likely(part)) {
+ remain = step(p->iov_base + skip, progress, part, priv, priv2);
+ consumed = part - remain;
+ progress += consumed;
+ skip += consumed;
+ len -= consumed;
+ if (skip < p->iov_len)
+ break;
+ }
+ p++;
+ skip = 0;
+ } while (len);
+
+ iter->nr_segs -= p - iter->kvec;
+ iter->kvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_BVEC.
+ */
+static __always_inline
+size_t iterate_bvec(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ const struct bio_vec *p = iter->bvec;
+ size_t progress = 0, skip = iter->iov_offset;
+
+ do {
+ size_t remain, consumed;
+ size_t offset = p->bv_offset + skip, part;
+ void *kaddr = kmap_local_page(p->bv_page + offset / PAGE_SIZE);
+
+ part = min3(len,
+ (size_t)(p->bv_len - skip),
+ (size_t)(PAGE_SIZE - offset % PAGE_SIZE));
+ remain = step(kaddr + offset % PAGE_SIZE, progress, part, priv, priv2);
+ kunmap_local(kaddr);
+ consumed = part - remain;
+ len -= consumed;
+ progress += consumed;
+ skip += consumed;
+ if (skip >= p->bv_len) {
+ skip = 0;
+ p++;
+ }
+ if (remain)
+ break;
+ } while (len);
+
+ iter->nr_segs -= p - iter->bvec;
+ iter->bvec = p;
+ iter->iov_offset = skip;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_XARRAY.
+ */
+static __always_inline
+size_t iterate_xarray(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ struct folio *folio;
+ size_t progress = 0;
+ loff_t start = iter->xarray_start + iter->iov_offset;
+ pgoff_t index = start / PAGE_SIZE;
+ XA_STATE(xas, iter->xarray, index);
+
+ rcu_read_lock();
+ xas_for_each(&xas, folio, ULONG_MAX) {
+ size_t remain, consumed, offset, part, flen;
+
+ if (xas_retry(&xas, folio))
+ continue;
+ if (WARN_ON(xa_is_value(folio)))
+ break;
+ if (WARN_ON(folio_test_hugetlb(folio)))
+ break;
+
+ offset = offset_in_folio(folio, start + progress);
+ flen = min(folio_size(folio) - offset, len);
+
+ while (flen) {
+ void *base = kmap_local_folio(folio, offset);
+
+ part = min_t(size_t, flen,
+ PAGE_SIZE - offset_in_page(offset));
+ remain = step(base, progress, part, priv, priv2);
+ kunmap_local(base);
+
+ consumed = part - remain;
+ progress += consumed;
+ len -= consumed;
+
+ if (remain || len == 0)
+ goto out;
+ flen -= consumed;
+ offset += consumed;
+ }
+ }
+
+out:
+ rcu_read_unlock();
+ iter->iov_offset += progress;
+ iter->count -= progress;
+ return progress;
+}
+
+/*
+ * Handle ITER_DISCARD.
+ */
+static __always_inline
+size_t iterate_discard(struct iov_iter *iter, size_t len, void *priv, void *priv2,
+ iov_step_f step)
+{
+ size_t progress = len;
+
+ iter->count -= progress;
+ return progress;
+}
+
+/**
+ * iterate_and_advance2 - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @priv2: More data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * Iterate over the next part of an iterator, up to the specified length. The
+ * buffer is presented in segments, which for kernel iteration are broken up by
+ * physical pages and mapped, with the mapped address being presented.
+ *
+ * Two step functions, @step and @ustep, must be provided, one for handling
+ * mapped kernel addresses and the other is given user addresses which have the
+ * potential to fault since no pinning is performed.
+ *
+ * The step functions are passed the address and length of the segment, @priv,
+ * @priv2 and the amount of data so far iterated over (which can, for example,
+ * be added to @priv to point to the right part of a second buffer). The step
+ * functions should return the amount of the segment they didn't process (ie. 0
+ * indicates complete processsing).
+ *
+ * This function returns the amount of data processed (ie. 0 means nothing was
+ * processed and the value of @len means processes to completion).
+ */
+static __always_inline
+size_t iterate_and_advance2(struct iov_iter *iter, size_t len, void *priv,
+ void *priv2, iov_ustep_f ustep, iov_step_f step)
+{
+ if (unlikely(iter->count < len))
+ len = iter->count;
+ if (unlikely(!len))
+ return 0;
+
+ if (likely(iter_is_ubuf(iter)))
+ return iterate_ubuf(iter, len, priv, priv2, ustep);
+ if (likely(iter_is_iovec(iter)))
+ return iterate_iovec(iter, len, priv, priv2, ustep);
+ if (iov_iter_is_bvec(iter))
+ return iterate_bvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_kvec(iter))
+ return iterate_kvec(iter, len, priv, priv2, step);
+ if (iov_iter_is_xarray(iter))
+ return iterate_xarray(iter, len, priv, priv2, step);
+ return iterate_discard(iter, len, priv, priv2, step);
+}
+
+/**
+ * iterate_and_advance - Iterate over an iterator
+ * @iter: The iterator to iterate over.
+ * @len: The amount to iterate over.
+ * @priv: Data for the step functions.
+ * @ustep: Function for UBUF/IOVEC iterators; given __user addresses.
+ * @step: Function for other iterators; given kernel addresses.
+ *
+ * As iterate_and_advance2(), but priv2 is always NULL.
+ */
+static __always_inline
+size_t iterate_and_advance(struct iov_iter *iter, size_t len, void *priv,
+ iov_ustep_f ustep, iov_step_f step)
+{
+ return iterate_and_advance2(iter, len, priv, NULL, ustep, step);
+}
+
+#endif /* _LINUX_IOV_ITER_H */
diff --git a/include/linux/iova_bitmap.h b/include/linux/iova_bitmap.h
index c006cf0a25f3..1c338f5e5b7a 100644
--- a/include/linux/iova_bitmap.h
+++ b/include/linux/iova_bitmap.h
@@ -7,6 +7,7 @@
#define _IOVA_BITMAP_H_
#include <linux/types.h>
+#include <linux/errno.h>
struct iova_bitmap;
@@ -14,6 +15,7 @@ typedef int (*iova_bitmap_fn_t)(struct iova_bitmap *bitmap,
unsigned long iova, size_t length,
void *opaque);
+#if IS_ENABLED(CONFIG_IOMMUFD_DRIVER)
struct iova_bitmap *iova_bitmap_alloc(unsigned long iova, size_t length,
unsigned long page_size,
u64 __user *data);
@@ -22,5 +24,29 @@ int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
iova_bitmap_fn_t fn);
void iova_bitmap_set(struct iova_bitmap *bitmap,
unsigned long iova, size_t length);
+#else
+static inline struct iova_bitmap *iova_bitmap_alloc(unsigned long iova,
+ size_t length,
+ unsigned long page_size,
+ u64 __user *data)
+{
+ return NULL;
+}
+
+static inline void iova_bitmap_free(struct iova_bitmap *bitmap)
+{
+}
+
+static inline int iova_bitmap_for_each(struct iova_bitmap *bitmap, void *opaque,
+ iova_bitmap_fn_t fn)
+{
+ return -EOPNOTSUPP;
+}
+
+static inline void iova_bitmap_set(struct iova_bitmap *bitmap,
+ unsigned long iova, size_t length)
+{
+}
+#endif
#endif
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index af8a771a053c..5e605e384aac 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -82,6 +82,7 @@ struct ipv6_devconf {
__u32 ioam6_id_wide;
__u8 ioam6_enabled;
__u8 ndisc_evict_nocarrier;
+ __u8 ra_honor_pio_life;
struct ctl_table_header *sysctl_header;
};
@@ -213,28 +214,9 @@ struct ipv6_pinfo {
__be32 flow_label;
__u32 frag_size;
- /*
- * Packed in 16bits.
- * Omit one shift by putting the signed field at MSB.
- */
-#if defined(__BIG_ENDIAN_BITFIELD)
- __s16 hop_limit:9;
- __u16 __unused_1:7;
-#else
- __u16 __unused_1:7;
- __s16 hop_limit:9;
-#endif
+ s16 hop_limit;
+ u8 mcast_hops;
-#if defined(__BIG_ENDIAN_BITFIELD)
- /* Packed in 16bits. */
- __s16 mcast_hops:9;
- __u16 __unused_2:6,
- mc_loop:1;
-#else
- __u16 mc_loop:1,
- __unused_2:6;
- __s16 mcast_hops:9;
-#endif
int ucast_oif;
int mcast_oif;
@@ -262,21 +244,11 @@ struct ipv6_pinfo {
} rxopt;
/* sockopt flags */
- __u16 recverr:1,
- sndflow:1,
- repflow:1,
- pmtudisc:3,
- padding:1, /* 1 bit hole */
- srcprefs:3, /* 001: prefer temporary address
+ __u8 srcprefs; /* 001: prefer temporary address
* 010: prefer public address
* 100: prefer care-of address
*/
- dontfrag:1,
- autoflowlabel:1,
- autoflowlabel_set:1,
- mc_all:1,
- recverr_rfc4884:1,
- rtalert_isolate:1;
+ __u8 pmtudisc;
__u8 min_hopcount;
__u8 tclass;
__be32 rcv_flowinfo;
@@ -293,6 +265,18 @@ struct ipv6_pinfo {
struct inet6_cork cork;
};
+/* We currently use available bits from inet_sk(sk)->inet_flags,
+ * this could change in the future.
+ */
+#define inet6_test_bit(nr, sk) \
+ test_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_set_bit(nr, sk) \
+ set_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_clear_bit(nr, sk) \
+ clear_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags)
+#define inet6_assign_bit(nr, sk, val) \
+ assign_bit(INET_FLAGS_##nr, &inet_sk(sk)->inet_flags, val)
+
/* WARNING: don't change the layout of the members in {raw,udp,tcp}6_sock! */
struct raw6_sock {
/* inet_sock has to be the first member of raw6_sock */
diff --git a/include/linux/irq.h b/include/linux/irq.h
index d8a6fdce9373..90081afa10ce 100644
--- a/include/linux/irq.h
+++ b/include/linux/irq.h
@@ -215,8 +215,6 @@ struct irq_data {
* IRQD_SINGLE_TARGET - IRQ allows only a single affinity target
* IRQD_DEFAULT_TRIGGER_SET - Expected trigger already been set
* IRQD_CAN_RESERVE - Can use reservation mode
- * IRQD_MSI_NOMASK_QUIRK - Non-maskable MSI quirk for affinity change
- * required
* IRQD_HANDLE_ENFORCE_IRQCTX - Enforce that handle_irq_*() is only invoked
* from actual interrupt context.
* IRQD_AFFINITY_ON_ACTIVATE - Affinity is set on activation. Don't call
@@ -247,11 +245,10 @@ enum {
IRQD_SINGLE_TARGET = BIT(24),
IRQD_DEFAULT_TRIGGER_SET = BIT(25),
IRQD_CAN_RESERVE = BIT(26),
- IRQD_MSI_NOMASK_QUIRK = BIT(27),
- IRQD_HANDLE_ENFORCE_IRQCTX = BIT(28),
- IRQD_AFFINITY_ON_ACTIVATE = BIT(29),
- IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(30),
- IRQD_RESEND_WHEN_IN_PROGRESS = BIT(31),
+ IRQD_HANDLE_ENFORCE_IRQCTX = BIT(27),
+ IRQD_AFFINITY_ON_ACTIVATE = BIT(28),
+ IRQD_IRQ_ENABLED_ON_SUSPEND = BIT(29),
+ IRQD_RESEND_WHEN_IN_PROGRESS = BIT(30),
};
#define __irqd_to_state(d) ACCESS_PRIVATE((d)->common, state_use_accessors)
@@ -426,21 +423,6 @@ static inline bool irqd_can_reserve(struct irq_data *d)
return __irqd_to_state(d) & IRQD_CAN_RESERVE;
}
-static inline void irqd_set_msi_nomask_quirk(struct irq_data *d)
-{
- __irqd_to_state(d) |= IRQD_MSI_NOMASK_QUIRK;
-}
-
-static inline void irqd_clr_msi_nomask_quirk(struct irq_data *d)
-{
- __irqd_to_state(d) &= ~IRQD_MSI_NOMASK_QUIRK;
-}
-
-static inline bool irqd_msi_nomask_quirk(struct irq_data *d)
-{
- return __irqd_to_state(d) & IRQD_MSI_NOMASK_QUIRK;
-}
-
static inline void irqd_set_affinity_on_activate(struct irq_data *d)
{
__irqd_to_state(d) |= IRQD_AFFINITY_ON_ACTIVATE;
diff --git a/include/linux/irqdomain.h b/include/linux/irqdomain.h
index 51c254b7fec2..ee0a82c60508 100644
--- a/include/linux/irqdomain.h
+++ b/include/linux/irqdomain.h
@@ -174,7 +174,7 @@ struct irq_domain {
irq_hw_number_t hwirq_max;
unsigned int revmap_size;
struct radix_tree_root revmap_tree;
- struct irq_data __rcu *revmap[];
+ struct irq_data __rcu *revmap[] __counted_by(revmap_size);
};
/* Irq domain flags */
diff --git a/include/linux/iversion.h b/include/linux/iversion.h
index f174ff1b59ee..8f972eaca2ed 100644
--- a/include/linux/iversion.h
+++ b/include/linux/iversion.h
@@ -256,7 +256,7 @@ inode_peek_iversion(const struct inode *inode)
* For filesystems without any sort of change attribute, the best we can
* do is fake one up from the ctime:
*/
-static inline u64 time_to_chattr(struct timespec64 *t)
+static inline u64 time_to_chattr(const struct timespec64 *t)
{
u64 chattr = t->tv_sec;
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index 52772c826c86..6dcbb4eb80fb 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -886,7 +886,7 @@ struct journal_s
* Journal head shrinker, reclaim buffer's journal head which
* has been written back.
*/
- struct shrinker j_shrinker;
+ struct shrinker *j_shrinker;
/**
* @j_checkpoint_jh_count:
diff --git a/include/linux/kasan.h b/include/linux/kasan.h
index 3df5499f7936..72cb693b075b 100644
--- a/include/linux/kasan.h
+++ b/include/linux/kasan.h
@@ -54,7 +54,7 @@ extern p4d_t kasan_early_shadow_p4d[MAX_PTRS_PER_P4D];
int kasan_populate_early_shadow(const void *shadow_start,
const void *shadow_end);
-#ifndef __HAVE_ARCH_SHADOW_MAP
+#ifndef kasan_mem_to_shadow
static inline void *kasan_mem_to_shadow(const void *addr)
{
return (void *)((unsigned long)addr >> KASAN_SHADOW_SCALE_SHIFT)
@@ -285,8 +285,10 @@ static inline bool kasan_check_byte(const void *address)
#if defined(CONFIG_KASAN) && defined(CONFIG_KASAN_STACK)
void kasan_unpoison_task_stack(struct task_struct *task);
+asmlinkage void kasan_unpoison_task_stack_below(const void *watermark);
#else
static inline void kasan_unpoison_task_stack(struct task_struct *task) {}
+static inline void kasan_unpoison_task_stack_below(const void *watermark) {}
#endif
#ifdef CONFIG_KASAN_GENERIC
@@ -466,10 +468,10 @@ static inline void kasan_free_module_shadow(const struct vm_struct *vm) {}
#endif /* (CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS) && !CONFIG_KASAN_VMALLOC */
-#ifdef CONFIG_KASAN_INLINE
+#if defined(CONFIG_KASAN_GENERIC) || defined(CONFIG_KASAN_SW_TAGS)
void kasan_non_canonical_hook(unsigned long addr);
-#else /* CONFIG_KASAN_INLINE */
+#else /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
static inline void kasan_non_canonical_hook(unsigned long addr) { }
-#endif /* CONFIG_KASAN_INLINE */
+#endif /* CONFIG_KASAN_GENERIC || CONFIG_KASAN_SW_TAGS */
#endif /* LINUX_KASAN_H */
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index cee8fe87e9f4..d9ad21058eed 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -13,6 +13,7 @@
#include <linux/stdarg.h>
#include <linux/align.h>
+#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
@@ -50,12 +51,6 @@
#define READ 0
#define WRITE 1
-/**
- * ARRAY_SIZE - get the number of elements in array @arr
- * @arr: array to be sized
- */
-#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
-
#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \
diff --git a/include/linux/kernfs.h b/include/linux/kernfs.h
index 2a36f3218b51..99aaa050ccb7 100644
--- a/include/linux/kernfs.h
+++ b/include/linux/kernfs.h
@@ -316,6 +316,7 @@ struct kernfs_ops {
struct poll_table_struct *pt);
int (*mmap)(struct kernfs_open_file *of, struct vm_area_struct *vma);
+ loff_t (*llseek)(struct kernfs_open_file *of, loff_t offset, int whence);
};
/*
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index 32c78078552c..8227455192b7 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -22,10 +22,6 @@
#include <uapi/linux/kexec.h>
#include <linux/verification.h>
-/* Location of a reserved region to hold the crash kernel.
- */
-extern struct resource crashk_res;
-extern struct resource crashk_low_res;
extern note_buf_t __percpu *crash_notes;
#ifdef CONFIG_KEXEC_CORE
diff --git a/include/linux/key.h b/include/linux/key.h
index 938d7ecfb495..943a432da3ae 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -515,6 +515,7 @@ extern void key_init(void);
#define key_init() do { } while(0)
#define key_free_user_ns(ns) do { } while(0)
#define key_remove_domain(d) do { } while(0)
+#define key_lookup(k) NULL
#endif /* CONFIG_KEYS */
#endif /* __KERNEL__ */
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index 85a64cb95d75..365eb092e9c4 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -26,8 +26,7 @@
#include <linux/rcupdate.h>
#include <linux/mutex.h>
#include <linux/ftrace.h>
-#include <linux/refcount.h>
-#include <linux/freelist.h>
+#include <linux/objpool.h>
#include <linux/rethook.h>
#include <asm/kprobes.h>
@@ -141,7 +140,7 @@ static inline bool kprobe_ftrace(struct kprobe *p)
*/
struct kretprobe_holder {
struct kretprobe *rp;
- refcount_t ref;
+ struct objpool_head pool;
};
struct kretprobe {
@@ -154,7 +153,6 @@ struct kretprobe {
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
struct rethook *rh;
#else
- struct freelist_head freelist;
struct kretprobe_holder *rph;
#endif
};
@@ -165,10 +163,7 @@ struct kretprobe_instance {
#ifdef CONFIG_KRETPROBE_ON_RETHOOK
struct rethook_node node;
#else
- union {
- struct freelist_node freelist;
- struct rcu_head rcu;
- };
+ struct rcu_head rcu;
struct llist_node llist;
struct kretprobe_holder *rph;
kprobe_opcode_t *ret_addr;
diff --git a/include/linux/kstrtox.h b/include/linux/kstrtox.h
index 529974e22ea7..7fcf29a4e0de 100644
--- a/include/linux/kstrtox.h
+++ b/include/linux/kstrtox.h
@@ -147,9 +147,4 @@ extern long simple_strtol(const char *,char **,unsigned int);
extern unsigned long long simple_strtoull(const char *,char **,unsigned int);
extern long long simple_strtoll(const char *,char **,unsigned int);
-static inline int strtobool(const char *s, bool *res)
-{
- return kstrtobool(s, res);
-}
-
#endif /* _LINUX_KSTRTOX_H */
diff --git a/include/linux/kthread.h b/include/linux/kthread.h
index 2c30ade43bc8..b11f53c1ba2e 100644
--- a/include/linux/kthread.h
+++ b/include/linux/kthread.h
@@ -86,6 +86,7 @@ void free_kthread_struct(struct task_struct *k);
void kthread_bind(struct task_struct *k, unsigned int cpu);
void kthread_bind_mask(struct task_struct *k, const struct cpumask *mask);
int kthread_stop(struct task_struct *k);
+int kthread_stop_put(struct task_struct *k);
bool kthread_should_stop(void);
bool kthread_should_park(void);
bool kthread_should_stop_or_park(void);
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index fb6c6109fdca..4944136efaa2 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -664,7 +664,7 @@ struct kvm_irq_routing_table {
* Array indexed by gsi. Each entry contains list of irq chips
* the gsi is connected to.
*/
- struct hlist_head map[];
+ struct hlist_head map[] __counted_by(nr_rt_entries);
};
#endif
diff --git a/include/linux/libata.h b/include/linux/libata.h
index 52d58b13e5ee..1dbb14daccfa 100644
--- a/include/linux/libata.h
+++ b/include/linux/libata.h
@@ -192,6 +192,7 @@ enum {
ATA_PFLAG_UNLOADING = (1 << 9), /* driver is being unloaded */
ATA_PFLAG_UNLOADED = (1 << 10), /* driver is unloaded */
+ ATA_PFLAG_RESUMING = (1 << 16), /* port is being resumed */
ATA_PFLAG_SUSPENDED = (1 << 17), /* port is suspended (power) */
ATA_PFLAG_PM_PENDING = (1 << 18), /* PM operation pending */
ATA_PFLAG_INIT_GTM_VALID = (1 << 19), /* initial gtm data valid */
@@ -222,6 +223,10 @@ enum {
ATA_HOST_PARALLEL_SCAN = (1 << 2), /* Ports on this host can be scanned in parallel */
ATA_HOST_IGNORE_ATA = (1 << 3), /* Ignore ATA devices on this host. */
+ ATA_HOST_NO_PART = (1 << 4), /* Host does not support partial */
+ ATA_HOST_NO_SSC = (1 << 5), /* Host does not support slumber */
+ ATA_HOST_NO_DEVSLP = (1 << 6), /* Host does not support devslp */
+
/* bits 24:31 of host->flags are reserved for LLD specific flags */
/* various lengths of time */
@@ -255,7 +260,7 @@ enum {
* advised to wait only for the following duration before
* doing SRST.
*/
- ATA_TMOUT_PMP_SRST_WAIT = 5000,
+ ATA_TMOUT_PMP_SRST_WAIT = 10000,
/* When the LPM policy is set to ATA_LPM_MAX_POWER, there might
* be a spurious PHY event, so ignore the first PHY event that
@@ -314,9 +319,10 @@ enum {
ATA_EH_ENABLE_LINK = (1 << 3),
ATA_EH_PARK = (1 << 5), /* unload heads and stop I/O */
ATA_EH_GET_SUCCESS_SENSE = (1 << 6), /* Get sense data for successful cmd */
+ ATA_EH_SET_ACTIVE = (1 << 7), /* Set a device to active power mode */
ATA_EH_PERDEV_MASK = ATA_EH_REVALIDATE | ATA_EH_PARK |
- ATA_EH_GET_SUCCESS_SENSE,
+ ATA_EH_GET_SUCCESS_SENSE | ATA_EH_SET_ACTIVE,
ATA_EH_ALL_ACTIONS = ATA_EH_REVALIDATE | ATA_EH_RESET |
ATA_EH_ENABLE_LINK,
@@ -353,7 +359,7 @@ enum {
/* This should match the actual table size of
* ata_eh_cmd_timeout_table in libata-eh.c.
*/
- ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 7,
+ ATA_EH_CMD_TIMEOUT_TABLE_SIZE = 8,
/* Horkage types. May be set by libata or controller on drives
(some horkage may be drive/controller pair dependent */
@@ -650,7 +656,7 @@ struct ata_cpr {
struct ata_cpr_log {
u8 nr_cpr;
- struct ata_cpr cpr[];
+ struct ata_cpr cpr[] __counted_by(nr_cpr);
};
struct ata_device {
@@ -1144,6 +1150,7 @@ extern int ata_std_bios_param(struct scsi_device *sdev,
struct block_device *bdev,
sector_t capacity, int geom[]);
extern void ata_scsi_unlock_native_capacity(struct scsi_device *sdev);
+extern int ata_scsi_slave_alloc(struct scsi_device *sdev);
extern int ata_scsi_slave_config(struct scsi_device *sdev);
extern void ata_scsi_slave_destroy(struct scsi_device *sdev);
extern int ata_scsi_change_queue_depth(struct scsi_device *sdev,
@@ -1392,6 +1399,7 @@ extern const struct attribute_group *ata_common_sdev_groups[];
.this_id = ATA_SHT_THIS_ID, \
.emulated = ATA_SHT_EMULATED, \
.proc_name = drv_name, \
+ .slave_alloc = ata_scsi_slave_alloc, \
.slave_destroy = ata_scsi_slave_destroy, \
.bios_param = ata_std_bios_param, \
.unlock_native_capacity = ata_scsi_unlock_native_capacity,\
@@ -1553,6 +1561,11 @@ void ata_port_desc(struct ata_port *ap, const char *fmt, ...);
extern void ata_port_pbar_desc(struct ata_port *ap, int bar, ssize_t offset,
const char *name);
#endif
+static inline void ata_port_desc_misc(struct ata_port *ap, int irq)
+{
+ ata_port_desc(ap, "irq %d", irq);
+ ata_port_desc(ap, "lpm-pol %d", ap->target_lpm_policy);
+}
static inline bool ata_tag_internal(unsigned int tag)
{
@@ -1873,23 +1886,21 @@ static inline unsigned long ata_deadline(unsigned long from_jiffies,
change in future hardware and specs, secondly 0xFF means 'no DMA' but is
> UDMA_0. Dyma ddreigiau */
-static inline int ata_using_mwdma(struct ata_device *adev)
+static inline bool ata_using_mwdma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_MW_DMA_0 && adev->dma_mode <= XFER_MW_DMA_4)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_MW_DMA_0 &&
+ adev->dma_mode <= XFER_MW_DMA_4;
}
-static inline int ata_using_udma(struct ata_device *adev)
+static inline bool ata_using_udma(struct ata_device *adev)
{
- if (adev->dma_mode >= XFER_UDMA_0 && adev->dma_mode <= XFER_UDMA_7)
- return 1;
- return 0;
+ return adev->dma_mode >= XFER_UDMA_0 &&
+ adev->dma_mode <= XFER_UDMA_7;
}
-static inline int ata_dma_enabled(struct ata_device *adev)
+static inline bool ata_dma_enabled(struct ata_device *adev)
{
- return (adev->dma_mode == 0xFF ? 0 : 1);
+ return adev->dma_mode != 0xFF;
}
/**************************************************************************
diff --git a/include/linux/linkmode.h b/include/linux/linkmode.h
index 15e0e0209da4..7303b4bc2ce0 100644
--- a/include/linux/linkmode.h
+++ b/include/linux/linkmode.h
@@ -43,15 +43,6 @@ static inline void linkmode_set_bit(int nr, volatile unsigned long *addr)
__set_bit(nr, addr);
}
-static inline void linkmode_set_bit_array(const int *array, int array_size,
- unsigned long *addr)
-{
- int i;
-
- for (i = 0; i < array_size; i++)
- linkmode_set_bit(array[i], addr);
-}
-
static inline void linkmode_clear_bit(int nr, volatile unsigned long *addr)
{
__clear_bit(nr, addr);
@@ -71,6 +62,15 @@ static inline int linkmode_test_bit(int nr, const volatile unsigned long *addr)
return test_bit(nr, addr);
}
+static inline void linkmode_set_bit_array(const int *array, int array_size,
+ unsigned long *addr)
+{
+ int i;
+
+ for (i = 0; i < array_size; i++)
+ linkmode_set_bit(array[i], addr);
+}
+
static inline int linkmode_equal(const unsigned long *src1,
const unsigned long *src2)
{
diff --git a/include/linux/list.h b/include/linux/list.h
index 164b4d0e9d2a..1837caedf723 100644
--- a/include/linux/list.h
+++ b/include/linux/list.h
@@ -687,6 +687,14 @@ static inline void list_splice_tail_init(struct list_head *list,
for (pos = (head)->next; !list_is_head(pos, (head)); pos = pos->next)
/**
+ * list_for_each_reverse - iterate backwards over a list
+ * @pos: the &struct list_head to use as a loop cursor.
+ * @head: the head for your list.
+ */
+#define list_for_each_reverse(pos, head) \
+ for (pos = (head)->prev; pos != (head); pos = pos->prev)
+
+/**
* list_for_each_rcu - Iterate over a list in an RCU-safe fashion
* @pos: the &struct list_head to use as a loop cursor.
* @head: the head for your list.
diff --git a/include/linux/llist.h b/include/linux/llist.h
index 85bda2d02d65..2c982ff7475a 100644
--- a/include/linux/llist.h
+++ b/include/linux/llist.h
@@ -74,6 +74,33 @@ static inline void init_llist_head(struct llist_head *list)
}
/**
+ * init_llist_node - initialize lock-less list node
+ * @node: the node to be initialised
+ *
+ * In cases where there is a need to test if a node is on
+ * a list or not, this initialises the node to clearly
+ * not be on any list.
+ */
+static inline void init_llist_node(struct llist_node *node)
+{
+ node->next = node;
+}
+
+/**
+ * llist_on_list - test if a lock-list list node is on a list
+ * @node: the node to test
+ *
+ * When a node is on a list the ->next pointer will be NULL or
+ * some other node. It can never point to itself. We use that
+ * in init_llist_node() to record that a node is not on any list,
+ * and here to test whether it is on any list.
+ */
+static inline bool llist_on_list(const struct llist_node *node)
+{
+ return node->next != node;
+}
+
+/**
* llist_entry - get the struct of this entry
* @ptr: the &struct llist_node pointer.
* @type: the type of the struct this is embedded in.
@@ -249,6 +276,25 @@ static inline struct llist_node *__llist_del_all(struct llist_head *head)
extern struct llist_node *llist_del_first(struct llist_head *head);
+/**
+ * llist_del_first_init - delete first entry from lock-list and mark is as being off-list
+ * @head: the head of lock-less list to delete from.
+ *
+ * This behave the same as llist_del_first() except that llist_init_node() is called
+ * on the returned node so that llist_on_list() will report false for the node.
+ */
+static inline struct llist_node *llist_del_first_init(struct llist_head *head)
+{
+ struct llist_node *n = llist_del_first(head);
+
+ if (n)
+ init_llist_node(n);
+ return n;
+}
+
+extern bool llist_del_first_this(struct llist_head *head,
+ struct llist_node *this);
+
struct llist_node *llist_reverse_order(struct llist_node *head);
#endif /* LLIST_H */
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index 0f016d69c996..9f565416d186 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -282,7 +282,7 @@ __be32 nlmsvc_testlock(struct svc_rqst *, struct nlm_file *,
struct nlm_host *, struct nlm_lock *,
struct nlm_lock *, struct nlm_cookie *);
__be32 nlmsvc_cancel_blocked(struct net *net, struct nlm_file *, struct nlm_lock *);
-void nlmsvc_retry_blocked(void);
+void nlmsvc_retry_blocked(struct svc_rqst *rqstp);
void nlmsvc_traverse_blocks(struct nlm_host *, struct nlm_file *,
nlm_host_match_fn_t match);
void nlmsvc_grant_reply(struct nlm_cookie *, __be32);
diff --git a/include/linux/logic_pio.h b/include/linux/logic_pio.h
index 54945aa824b4..babf4e3c28ba 100644
--- a/include/linux/logic_pio.h
+++ b/include/linux/logic_pio.h
@@ -39,9 +39,6 @@ struct logic_pio_host_ops {
#ifdef CONFIG_INDIRECT_PIO
u8 logic_inb(unsigned long addr);
-void logic_outb(u8 value, unsigned long addr);
-void logic_outw(u16 value, unsigned long addr);
-void logic_outl(u32 value, unsigned long addr);
u16 logic_inw(unsigned long addr);
u32 logic_inl(unsigned long addr);
void logic_outb(u8 value, unsigned long addr);
diff --git a/include/linux/lsm_hook_defs.h b/include/linux/lsm_hook_defs.h
index ac962c4cb44b..99b8176c3738 100644
--- a/include/linux/lsm_hook_defs.h
+++ b/include/linux/lsm_hook_defs.h
@@ -43,17 +43,17 @@ LSM_HOOK(int, 0, capset, struct cred *new, const struct cred *old,
const kernel_cap_t *permitted)
LSM_HOOK(int, 0, capable, const struct cred *cred, struct user_namespace *ns,
int cap, unsigned int opts)
-LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, struct super_block *sb)
+LSM_HOOK(int, 0, quotactl, int cmds, int type, int id, const struct super_block *sb)
LSM_HOOK(int, 0, quota_on, struct dentry *dentry)
LSM_HOOK(int, 0, syslog, int type)
LSM_HOOK(int, 0, settime, const struct timespec64 *ts,
const struct timezone *tz)
LSM_HOOK(int, 0, vm_enough_memory, struct mm_struct *mm, long pages)
LSM_HOOK(int, 0, bprm_creds_for_exec, struct linux_binprm *bprm)
-LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, struct file *file)
+LSM_HOOK(int, 0, bprm_creds_from_file, struct linux_binprm *bprm, const struct file *file)
LSM_HOOK(int, 0, bprm_check_security, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, struct linux_binprm *bprm)
-LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committing_creds, const struct linux_binprm *bprm)
+LSM_HOOK(void, LSM_RET_VOID, bprm_committed_creds, const struct linux_binprm *bprm)
LSM_HOOK(int, 0, fs_context_submount, struct fs_context *fc, struct super_block *reference)
LSM_HOOK(int, 0, fs_context_dup, struct fs_context *fc,
struct fs_context *src_sc)
@@ -66,7 +66,7 @@ LSM_HOOK(void, LSM_RET_VOID, sb_free_mnt_opts, void *mnt_opts)
LSM_HOOK(int, 0, sb_eat_lsm_opts, char *orig, void **mnt_opts)
LSM_HOOK(int, 0, sb_mnt_opts_compat, struct super_block *sb, void *mnt_opts)
LSM_HOOK(int, 0, sb_remount, struct super_block *sb, void *mnt_opts)
-LSM_HOOK(int, 0, sb_kern_mount, struct super_block *sb)
+LSM_HOOK(int, 0, sb_kern_mount, const struct super_block *sb)
LSM_HOOK(int, 0, sb_show_options, struct seq_file *m, struct super_block *sb)
LSM_HOOK(int, 0, sb_statfs, struct dentry *dentry)
LSM_HOOK(int, 0, sb_mount, const char *dev_name, const struct path *path,
diff --git a/include/linux/lwq.h b/include/linux/lwq.h
new file mode 100644
index 000000000000..d081d5cf8e33
--- /dev/null
+++ b/include/linux/lwq.h
@@ -0,0 +1,124 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+
+#ifndef LWQ_H
+#define LWQ_H
+/*
+ * Light-weight single-linked queue built from llist
+ *
+ * Entries can be enqueued from any context with no locking.
+ * Entries can be dequeued from process context with integrated locking.
+ *
+ * This is particularly suitable when work items are queued in
+ * BH or IRQ context, and where work items are handled one at a time
+ * by dedicated threads.
+ */
+#include <linux/container_of.h>
+#include <linux/spinlock.h>
+#include <linux/llist.h>
+
+struct lwq_node {
+ struct llist_node node;
+};
+
+struct lwq {
+ spinlock_t lock;
+ struct llist_node *ready; /* entries to be dequeued */
+ struct llist_head new; /* entries being enqueued */
+};
+
+/**
+ * lwq_init - initialise a lwq
+ * @q: the lwq object
+ */
+static inline void lwq_init(struct lwq *q)
+{
+ spin_lock_init(&q->lock);
+ q->ready = NULL;
+ init_llist_head(&q->new);
+}
+
+/**
+ * lwq_empty - test if lwq contains any entry
+ * @q: the lwq object
+ *
+ * This empty test contains an acquire barrier so that if a wakeup
+ * is sent when lwq_dequeue returns true, it is safe to go to sleep after
+ * a test on lwq_empty().
+ */
+static inline bool lwq_empty(struct lwq *q)
+{
+ /* acquire ensures ordering wrt lwq_enqueue() */
+ return smp_load_acquire(&q->ready) == NULL && llist_empty(&q->new);
+}
+
+struct llist_node *__lwq_dequeue(struct lwq *q);
+/**
+ * lwq_dequeue - dequeue first (oldest) entry from lwq
+ * @q: the queue to dequeue from
+ * @type: the type of object to return
+ * @member: them member in returned object which is an lwq_node.
+ *
+ * Remove a single object from the lwq and return it. This will take
+ * a spinlock and so must always be called in the same context, typcially
+ * process contet.
+ */
+#define lwq_dequeue(q, type, member) \
+ ({ struct llist_node *_n = __lwq_dequeue(q); \
+ _n ? container_of(_n, type, member.node) : NULL; })
+
+struct llist_node *lwq_dequeue_all(struct lwq *q);
+
+/**
+ * lwq_for_each_safe - iterate over detached queue allowing deletion
+ * @_n: iterator variable
+ * @_t1: temporary struct llist_node **
+ * @_t2: temporary struct llist_node *
+ * @_l: address of llist_node pointer from lwq_dequeue_all()
+ * @_member: member in _n where lwq_node is found.
+ *
+ * Iterate over members in a dequeued list. If the iterator variable
+ * is set to NULL, the iterator removes that entry from the queue.
+ */
+#define lwq_for_each_safe(_n, _t1, _t2, _l, _member) \
+ for (_t1 = (_l); \
+ *(_t1) ? (_n = container_of(*(_t1), typeof(*(_n)), _member.node),\
+ _t2 = ((*_t1)->next), \
+ true) \
+ : false; \
+ (_n) ? (_t1 = &(_n)->_member.node.next, 0) \
+ : ((*(_t1) = (_t2)), 0))
+
+/**
+ * lwq_enqueue - add a new item to the end of the queue
+ * @n - the lwq_node embedded in the item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue(struct lwq_node *n, struct lwq *q)
+{
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add(&n->node, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+
+/**
+ * lwq_enqueue_batch - add a list of new items to the end of the queue
+ * @n - the lwq_node embedded in the first item to be added
+ * @q - the lwq to append to.
+ *
+ * No locking is needed to append to the queue so this can
+ * be called from any context.
+ * Return %true is the list may have previously been empty.
+ */
+static inline bool lwq_enqueue_batch(struct llist_node *n, struct lwq *q)
+{
+ struct llist_node *e = n;
+
+ /* acquire enqures ordering wrt lwq_dequeue */
+ return llist_add_batch(llist_reverse_order(n), e, &q->new) &&
+ smp_load_acquire(&q->ready) == NULL;
+}
+#endif /* LWQ_H */
diff --git a/include/linux/maple_tree.h b/include/linux/maple_tree.h
index e41c70ac7744..d01e850b570f 100644
--- a/include/linux/maple_tree.h
+++ b/include/linux/maple_tree.h
@@ -428,6 +428,8 @@ struct ma_wr_state {
#define MAS_ROOT ((struct maple_enode *)5UL)
#define MAS_NONE ((struct maple_enode *)9UL)
#define MAS_PAUSE ((struct maple_enode *)17UL)
+#define MAS_OVERFLOW ((struct maple_enode *)33UL)
+#define MAS_UNDERFLOW ((struct maple_enode *)65UL)
#define MA_ERROR(err) \
((struct maple_enode *)(((unsigned long)err << 2) | 2UL))
@@ -511,6 +513,15 @@ static inline bool mas_is_paused(const struct ma_state *mas)
return mas->node == MAS_PAUSE;
}
+/* Check if the mas is pointing to a node or not */
+static inline bool mas_is_active(struct ma_state *mas)
+{
+ if ((unsigned long)mas->node >= MAPLE_RESERVED_RANGE)
+ return true;
+
+ return false;
+}
+
/**
* mas_reset() - Reset a Maple Tree operation state.
* @mas: Maple Tree operation state.
diff --git a/include/linux/mcb.h b/include/linux/mcb.h
index 1e5893138afe..0b971b24a804 100644
--- a/include/linux/mcb.h
+++ b/include/linux/mcb.h
@@ -63,7 +63,6 @@ static inline struct mcb_bus *to_mcb_bus(struct device *dev)
struct mcb_device {
struct device dev;
struct mcb_bus *bus;
- bool is_added;
struct mcb_driver *driver;
u16 id;
int inst;
diff --git a/include/linux/mei_cl_bus.h b/include/linux/mei_cl_bus.h
index fd6e0620658d..b38a56a13f39 100644
--- a/include/linux/mei_cl_bus.h
+++ b/include/linux/mei_cl_bus.h
@@ -31,11 +31,11 @@ typedef void (*mei_cldev_cb_t)(struct mei_cl_device *cldev);
* @rx_work: async work to execute Rx event callback
* @rx_cb: Drivers register this callback to get asynchronous ME
* Rx buffer pending notifications.
- * @notif_work: async work to execute FW notif event callback
+ * @notif_work: async work to execute FW notify event callback
* @notif_cb: Drivers register this callback to get asynchronous ME
* FW notification pending notifications.
*
- * @do_match: wheather device can be matched with a driver
+ * @do_match: whether the device can be matched with a driver
* @is_added: device is already scanned
* @priv_data: client private data
*/
@@ -94,15 +94,23 @@ void mei_cldev_driver_unregister(struct mei_cl_driver *cldrv);
ssize_t mei_cldev_send(struct mei_cl_device *cldev, const u8 *buf,
size_t length);
+ssize_t mei_cldev_send_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, unsigned long timeout);
ssize_t mei_cldev_recv(struct mei_cl_device *cldev, u8 *buf, size_t length);
ssize_t mei_cldev_recv_nonblock(struct mei_cl_device *cldev, u8 *buf,
size_t length);
+ssize_t mei_cldev_recv_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ unsigned long timeout);
ssize_t mei_cldev_send_vtag(struct mei_cl_device *cldev, const u8 *buf,
size_t length, u8 vtag);
+ssize_t mei_cldev_send_vtag_timeout(struct mei_cl_device *cldev, const u8 *buf,
+ size_t length, u8 vtag, unsigned long timeout);
ssize_t mei_cldev_recv_vtag(struct mei_cl_device *cldev, u8 *buf, size_t length,
u8 *vtag);
ssize_t mei_cldev_recv_nonblock_vtag(struct mei_cl_device *cldev, u8 *buf,
size_t length, u8 *vtag);
+ssize_t mei_cldev_recv_vtag_timeout(struct mei_cl_device *cldev, u8 *buf, size_t length,
+ u8 *vtag, unsigned long timeout);
int mei_cldev_register_rx_cb(struct mei_cl_device *cldev, mei_cldev_cb_t rx_cb);
int mei_cldev_register_notif_cb(struct mei_cl_device *cldev,
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index 1c1072e3ca06..ae3bde302f70 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -40,6 +40,8 @@ extern unsigned long long max_possible_pfn;
* via a driver, and never indicated in the firmware-provided memory map as
* system RAM. This corresponds to IORESOURCE_SYSRAM_DRIVER_MANAGED in the
* kernel resource tree.
+ * @MEMBLOCK_RSRV_NOINIT: memory region for which struct pages are
+ * not initialized (only for reserved regions).
*/
enum memblock_flags {
MEMBLOCK_NONE = 0x0, /* No special request */
@@ -47,6 +49,7 @@ enum memblock_flags {
MEMBLOCK_MIRROR = 0x2, /* mirrored region */
MEMBLOCK_NOMAP = 0x4, /* don't add to kernel direct mapping */
MEMBLOCK_DRIVER_MANAGED = 0x8, /* always detected via a driver */
+ MEMBLOCK_RSRV_NOINIT = 0x10, /* don't initialize struct pages */
};
/**
@@ -125,6 +128,7 @@ int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_reserved_mark_noinit(phys_addr_t base, phys_addr_t size);
void memblock_free_all(void);
void memblock_free(void *ptr, size_t size);
@@ -259,6 +263,11 @@ static inline bool memblock_is_nomap(struct memblock_region *m)
return m->flags & MEMBLOCK_NOMAP;
}
+static inline bool memblock_is_reserved_noinit(struct memblock_region *m)
+{
+ return m->flags & MEMBLOCK_RSRV_NOINIT;
+}
+
static inline bool memblock_is_driver_managed(struct memblock_region *m)
{
return m->flags & MEMBLOCK_DRIVER_MANAGED;
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index ab94ad4597d0..7bdcf3020d7a 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -21,6 +21,7 @@
#include <linux/vmstat.h>
#include <linux/writeback.h>
#include <linux/page-flags.h>
+#include <linux/shrinker.h>
struct mem_cgroup;
struct obj_cgroup;
@@ -88,17 +89,6 @@ struct mem_cgroup_reclaim_iter {
unsigned int generation;
};
-/*
- * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
- * shrinkers, which have elements charged to this memcg.
- */
-struct shrinker_info {
- struct rcu_head rcu;
- atomic_long_t *nr_deferred;
- unsigned long *map;
- int map_nr_max;
-};
-
struct lruvec_stats_percpu {
/* Local (CPU and cgroup) state */
long state[NR_VM_NODE_STAT_ITEMS];
@@ -153,7 +143,7 @@ struct mem_cgroup_threshold_ary {
/* Size of entries[] */
unsigned int size;
/* Array of thresholds */
- struct mem_cgroup_threshold entries[];
+ struct mem_cgroup_threshold entries[] __counted_by(size);
};
struct mem_cgroup_thresholds {
@@ -299,7 +289,13 @@ struct mem_cgroup {
#ifdef CONFIG_MEMCG_KMEM
int kmemcg_id;
- struct obj_cgroup __rcu *objcg;
+ /*
+ * memcg->objcg is wiped out as a part of the objcg repaprenting
+ * process. memcg->orig_objcg preserves a pointer (and a reference)
+ * to the original objcg until the end of live of memcg.
+ */
+ struct obj_cgroup __rcu *objcg;
+ struct obj_cgroup *orig_objcg;
/* list of inherited objcgs, protected by objcg_lock */
struct list_head objcg_list;
#endif
@@ -662,6 +658,8 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
page_counter_read(&memcg->memory);
}
+void mem_cgroup_commit_charge(struct folio *folio, struct mem_cgroup *memcg);
+
int __mem_cgroup_charge(struct folio *folio, struct mm_struct *mm, gfp_t gfp);
/**
@@ -686,6 +684,9 @@ static inline int mem_cgroup_charge(struct folio *folio, struct mm_struct *mm,
return __mem_cgroup_charge(folio, mm, gfp);
}
+int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg, gfp_t gfp,
+ long nr_pages);
+
int mem_cgroup_swapin_charge_folio(struct folio *folio, struct mm_struct *mm,
gfp_t gfp, swp_entry_t entry);
void mem_cgroup_swapin_uncharge_swap(swp_entry_t entry);
@@ -713,6 +714,10 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
__mem_cgroup_uncharge_list(page_list);
}
+void mem_cgroup_cancel_charge(struct mem_cgroup *memcg, unsigned int nr_pages);
+
+void mem_cgroup_replace_folio(struct folio *old, struct folio *new);
+
void mem_cgroup_migrate(struct folio *old, struct folio *new);
/**
@@ -769,6 +774,8 @@ struct mem_cgroup *mem_cgroup_from_task(struct task_struct *p);
struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm);
+struct mem_cgroup *get_mem_cgroup_from_current(void);
+
struct lruvec *folio_lruvec_lock(struct folio *folio);
struct lruvec *folio_lruvec_lock_irq(struct folio *folio);
struct lruvec *folio_lruvec_lock_irqsave(struct folio *folio,
@@ -920,7 +927,7 @@ unsigned long mem_cgroup_get_zone_lru_size(struct lruvec *lruvec,
return READ_ONCE(mz->lru_zone_size[zone_idx][lru]);
}
-void mem_cgroup_handle_over_high(void);
+void mem_cgroup_handle_over_high(gfp_t gfp_mask);
unsigned long mem_cgroup_get_max(struct mem_cgroup *memcg);
@@ -1080,15 +1087,6 @@ static inline void count_memcg_events(struct mem_cgroup *memcg,
local_irq_restore(flags);
}
-static inline void count_memcg_page_event(struct page *page,
- enum vm_event_item idx)
-{
- struct mem_cgroup *memcg = page_memcg(page);
-
- if (memcg)
- count_memcg_events(memcg, idx, 1);
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1249,12 +1247,23 @@ static inline bool mem_cgroup_below_min(struct mem_cgroup *target,
return false;
}
+static inline void mem_cgroup_commit_charge(struct folio *folio,
+ struct mem_cgroup *memcg)
+{
+}
+
static inline int mem_cgroup_charge(struct folio *folio,
struct mm_struct *mm, gfp_t gfp)
{
return 0;
}
+static inline int mem_cgroup_hugetlb_try_charge(struct mem_cgroup *memcg,
+ gfp_t gfp, long nr_pages)
+{
+ return 0;
+}
+
static inline int mem_cgroup_swapin_charge_folio(struct folio *folio,
struct mm_struct *mm, gfp_t gfp, swp_entry_t entry)
{
@@ -1273,6 +1282,16 @@ static inline void mem_cgroup_uncharge_list(struct list_head *page_list)
{
}
+static inline void mem_cgroup_cancel_charge(struct mem_cgroup *memcg,
+ unsigned int nr_pages)
+{
+}
+
+static inline void mem_cgroup_replace_folio(struct folio *old,
+ struct folio *new)
+{
+}
+
static inline void mem_cgroup_migrate(struct folio *old, struct folio *new)
{
}
@@ -1310,6 +1329,11 @@ static inline struct mem_cgroup *get_mem_cgroup_from_mm(struct mm_struct *mm)
return NULL;
}
+static inline struct mem_cgroup *get_mem_cgroup_from_current(void)
+{
+ return NULL;
+}
+
static inline
struct mem_cgroup *mem_cgroup_from_css(struct cgroup_subsys_state *css)
{
@@ -1458,7 +1482,7 @@ static inline void mem_cgroup_unlock_pages(void)
rcu_read_unlock();
}
-static inline void mem_cgroup_handle_over_high(void)
+static inline void mem_cgroup_handle_over_high(gfp_t gfp_mask)
{
}
@@ -1565,11 +1589,6 @@ static inline void __count_memcg_events(struct mem_cgroup *memcg,
{
}
-static inline void count_memcg_page_event(struct page *page,
- int idx)
-{
-}
-
static inline void count_memcg_folio_events(struct folio *folio,
enum vm_event_item idx, unsigned long nr)
{
@@ -1763,9 +1782,27 @@ bool mem_cgroup_kmem_disabled(void);
int __memcg_kmem_charge_page(struct page *page, gfp_t gfp, int order);
void __memcg_kmem_uncharge_page(struct page *page, int order);
-struct obj_cgroup *get_obj_cgroup_from_current(void);
+/*
+ * The returned objcg pointer is safe to use without additional
+ * protection within a scope. The scope is defined either by
+ * the current task (similar to the "current" global variable)
+ * or by set_active_memcg() pair.
+ * Please, use obj_cgroup_get() to get a reference if the pointer
+ * needs to be used outside of the local scope.
+ */
+struct obj_cgroup *current_obj_cgroup(void);
struct obj_cgroup *get_obj_cgroup_from_folio(struct folio *folio);
+static inline struct obj_cgroup *get_obj_cgroup_from_current(void)
+{
+ struct obj_cgroup *objcg = current_obj_cgroup();
+
+ if (objcg)
+ obj_cgroup_get(objcg);
+
+ return objcg;
+}
+
int obj_cgroup_charge(struct obj_cgroup *objcg, gfp_t gfp, size_t size);
void obj_cgroup_uncharge(struct obj_cgroup *objcg, size_t size);
diff --git a/include/linux/memory-tiers.h b/include/linux/memory-tiers.h
index 437441cdf78f..1e39d27bee41 100644
--- a/include/linux/memory-tiers.h
+++ b/include/linux/memory-tiers.h
@@ -6,6 +6,7 @@
#include <linux/nodemask.h>
#include <linux/kref.h>
#include <linux/mmzone.h>
+#include <linux/notifier.h>
/*
* Each tier cover a abstrace distance chunk size of 128
*/
@@ -22,7 +23,9 @@
struct memory_tier;
struct memory_dev_type {
/* list of memory types that are part of same tier as this type */
- struct list_head tier_sibiling;
+ struct list_head tier_sibling;
+ /* list of memory types that are managed by one driver */
+ struct list_head list;
/* abstract distance for this specific memory type */
int adistance;
/* Nodes of same abstract distance */
@@ -30,12 +33,21 @@ struct memory_dev_type {
struct kref kref;
};
+struct node_hmem_attrs;
+
#ifdef CONFIG_NUMA
extern bool numa_demotion_enabled;
+extern struct memory_dev_type *default_dram_type;
struct memory_dev_type *alloc_memory_type(int adistance);
void put_memory_type(struct memory_dev_type *memtype);
void init_node_memory_type(int node, struct memory_dev_type *default_type);
void clear_node_memory_type(int node, struct memory_dev_type *memtype);
+int register_mt_adistance_algorithm(struct notifier_block *nb);
+int unregister_mt_adistance_algorithm(struct notifier_block *nb);
+int mt_calc_adistance(int node, int *adist);
+int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+ const char *source);
+int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist);
#ifdef CONFIG_MIGRATION
int next_demotion_node(int node);
void node_get_allowed_targets(pg_data_t *pgdat, nodemask_t *targets);
@@ -60,6 +72,7 @@ static inline bool node_is_toptier(int node)
#else
#define numa_demotion_enabled false
+#define default_dram_type NULL
/*
* CONFIG_NUMA implementation returns non NULL error.
*/
@@ -97,5 +110,31 @@ static inline bool node_is_toptier(int node)
{
return true;
}
+
+static inline int register_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int unregister_mt_adistance_algorithm(struct notifier_block *nb)
+{
+ return 0;
+}
+
+static inline int mt_calc_adistance(int node, int *adist)
+{
+ return NOTIFY_DONE;
+}
+
+static inline int mt_set_default_dram_perf(int nid, struct node_hmem_attrs *perf,
+ const char *source)
+{
+ return -EIO;
+}
+
+static inline int mt_perf_to_adistance(struct node_hmem_attrs *perf, int *adist)
+{
+ return -EIO;
+}
#endif /* CONFIG_NUMA */
#endif /* _LINUX_MEMORY_TIERS_H */
diff --git a/include/linux/mempolicy.h b/include/linux/mempolicy.h
index d232de7cdc56..931b118336f4 100644
--- a/include/linux/mempolicy.h
+++ b/include/linux/mempolicy.h
@@ -17,6 +17,8 @@
struct mm_struct;
+#define NO_INTERLEAVE_INDEX (-1UL) /* use task il_prev for interleaving */
+
#ifdef CONFIG_NUMA
/*
@@ -89,8 +91,6 @@ static inline struct mempolicy *mpol_dup(struct mempolicy *pol)
return pol;
}
-#define vma_policy(vma) ((vma)->vm_policy)
-
static inline void mpol_get(struct mempolicy *pol)
{
if (pol)
@@ -107,35 +107,30 @@ static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
/*
* Tree of shared policies for a shared memory region.
- * Maintain the policies in a pseudo mm that contains vmas. The vmas
- * carry the policy. As a special twist the pseudo mm is indexed in pages, not
- * bytes, so that we can work with shared memory segments bigger than
- * unsigned long.
*/
-
-struct sp_node {
- struct rb_node nd;
- unsigned long start, end;
- struct mempolicy *policy;
-};
-
struct shared_policy {
struct rb_root root;
rwlock_t lock;
};
+struct sp_node {
+ struct rb_node nd;
+ pgoff_t start, end;
+ struct mempolicy *policy;
+};
int vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst);
void mpol_shared_policy_init(struct shared_policy *sp, struct mempolicy *mpol);
-int mpol_set_shared_policy(struct shared_policy *info,
- struct vm_area_struct *vma,
- struct mempolicy *new);
-void mpol_free_shared_policy(struct shared_policy *p);
+int mpol_set_shared_policy(struct shared_policy *sp,
+ struct vm_area_struct *vma, struct mempolicy *mpol);
+void mpol_free_shared_policy(struct shared_policy *sp);
struct mempolicy *mpol_shared_policy_lookup(struct shared_policy *sp,
- unsigned long idx);
+ pgoff_t idx);
struct mempolicy *get_task_policy(struct task_struct *p);
struct mempolicy *__get_vma_policy(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
+struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx);
bool vma_policy_mof(struct vm_area_struct *vma);
extern void numa_default_policy(void);
@@ -149,8 +144,6 @@ extern int huge_node(struct vm_area_struct *vma,
extern bool init_nodemask_of_mempolicy(nodemask_t *mask);
extern bool mempolicy_in_oom_domain(struct task_struct *tsk,
const nodemask_t *mask);
-extern nodemask_t *policy_nodemask(gfp_t gfp, struct mempolicy *policy);
-
extern unsigned int mempolicy_slab_node(void);
extern enum zone_type policy_zone;
@@ -174,7 +167,7 @@ extern void mpol_to_str(char *buffer, int maxlen, struct mempolicy *pol);
/* Check if a vma is migratable */
extern bool vma_migratable(struct vm_area_struct *vma);
-extern int mpol_misplaced(struct page *, struct vm_area_struct *, unsigned long);
+int mpol_misplaced(struct folio *, struct vm_area_struct *, unsigned long);
extern void mpol_put_task_policy(struct task_struct *);
static inline bool mpol_is_preferred_many(struct mempolicy *pol)
@@ -188,12 +181,17 @@ extern bool apply_policy_zone(struct mempolicy *policy, enum zone_type zone);
struct mempolicy {};
+static inline struct mempolicy *get_task_policy(struct task_struct *p)
+{
+ return NULL;
+}
+
static inline bool mpol_equal(struct mempolicy *a, struct mempolicy *b)
{
return true;
}
-static inline void mpol_put(struct mempolicy *p)
+static inline void mpol_put(struct mempolicy *pol)
{
}
@@ -212,17 +210,22 @@ static inline void mpol_shared_policy_init(struct shared_policy *sp,
{
}
-static inline void mpol_free_shared_policy(struct shared_policy *p)
+static inline void mpol_free_shared_policy(struct shared_policy *sp)
{
}
static inline struct mempolicy *
-mpol_shared_policy_lookup(struct shared_policy *sp, unsigned long idx)
+mpol_shared_policy_lookup(struct shared_policy *sp, pgoff_t idx)
{
return NULL;
}
-#define vma_policy(vma) NULL
+static inline struct mempolicy *get_vma_policy(struct vm_area_struct *vma,
+ unsigned long addr, int order, pgoff_t *ilx)
+{
+ *ilx = 0;
+ return NULL;
+}
static inline int
vma_dup_policy(struct vm_area_struct *src, struct vm_area_struct *dst)
@@ -278,7 +281,8 @@ static inline int mpol_parse_str(char *str, struct mempolicy **mpol)
}
#endif
-static inline int mpol_misplaced(struct page *page, struct vm_area_struct *vma,
+static inline int mpol_misplaced(struct folio *folio,
+ struct vm_area_struct *vma,
unsigned long address)
{
return -1; /* no node preference */
diff --git a/include/linux/mfd/abx500/ab8500.h b/include/linux/mfd/abx500/ab8500.h
index 09fb3c56e7d7..76d326ea8eba 100644
--- a/include/linux/mfd/abx500/ab8500.h
+++ b/include/linux/mfd/abx500/ab8500.h
@@ -499,13 +499,7 @@ static inline int is_ab9540_2p0_or_earlier(struct ab8500 *ab)
void ab8500_override_turn_on_stat(u8 mask, u8 set);
-#ifdef CONFIG_AB8500_DEBUG
-extern int prcmu_abb_read(u8 slave, u8 reg, u8 *value, u8 size);
-void ab8500_dump_all_banks(struct device *dev);
-void ab8500_debug_register_interrupt(int line);
-#else
static inline void ab8500_dump_all_banks(struct device *dev) {}
static inline void ab8500_debug_register_interrupt(int line) {}
-#endif
#endif /* MFD_AB8500_H */
diff --git a/include/linux/mfd/core.h b/include/linux/mfd/core.h
index 47e7a3a61ce6..e8bcad641d8c 100644
--- a/include/linux/mfd/core.h
+++ b/include/linux/mfd/core.h
@@ -92,7 +92,7 @@ struct mfd_cell {
* (above) when matching OF nodes with devices that have identical
* compatible strings
*/
- const u64 of_reg;
+ u64 of_reg;
/* Set to 'true' to use 'of_reg' (above) - allows for of_reg=0 */
bool use_of_reg;
diff --git a/include/linux/mfd/lpc_ich.h b/include/linux/mfd/lpc_ich.h
index ea4a4b1b246a..1fbda1f8967d 100644
--- a/include/linux/mfd/lpc_ich.h
+++ b/include/linux/mfd/lpc_ich.h
@@ -15,7 +15,7 @@
#define ICH_RES_GPE0 1
/* GPIO compatibility */
-enum {
+enum lpc_gpio_versions {
ICH_I3100_GPIO,
ICH_V5_GPIO,
ICH_V6_GPIO,
@@ -26,11 +26,14 @@ enum {
AVOTON_GPIO,
};
+struct lpc_ich_gpio_info;
+
struct lpc_ich_info {
char name[32];
unsigned int iTCO_version;
- unsigned int gpio_version;
+ enum lpc_gpio_versions gpio_version;
enum intel_spi_type spi_type;
+ const struct lpc_ich_gpio_info *gpio_info;
u8 use_gpio;
};
diff --git a/include/linux/mfd/mt6358/registers.h b/include/linux/mfd/mt6358/registers.h
index 3d33517f178c..d83e87298ac4 100644
--- a/include/linux/mfd/mt6358/registers.h
+++ b/include/linux/mfd/mt6358/registers.h
@@ -262,6 +262,12 @@
#define MT6358_LDO_VBIF28_CON3 0x1db0
#define MT6358_VCAMA1_ANA_CON0 0x1e08
#define MT6358_VCAMA2_ANA_CON0 0x1e0c
+#define MT6358_VFE28_ANA_CON0 0x1e10
+#define MT6358_VCN28_ANA_CON0 0x1e14
+#define MT6358_VBIF28_ANA_CON0 0x1e18
+#define MT6358_VAUD28_ANA_CON0 0x1e1c
+#define MT6358_VAUX18_ANA_CON0 0x1e20
+#define MT6358_VXO22_ANA_CON0 0x1e24
#define MT6358_VCN33_ANA_CON0 0x1e28
#define MT6358_VSIM1_ANA_CON0 0x1e2c
#define MT6358_VSIM2_ANA_CON0 0x1e30
@@ -288,4 +294,21 @@
#define MT6358_AUD_TOP_INT_CON0 0x2228
#define MT6358_AUD_TOP_INT_STATUS0 0x2234
+/*
+ * MT6366 has no VCAM*, but has other regulators in its place. The names
+ * keep the MT6358 prefix for ease of use in the regulator driver.
+ */
+#define MT6358_LDO_VSRAM_CON5 0x1bf8
+#define MT6358_LDO_VM18_CON0 MT6358_LDO_VCAMA1_CON0
+#define MT6358_LDO_VM18_CON1 MT6358_LDO_VCAMA1_CON1
+#define MT6358_LDO_VM18_CON2 MT6358_LDO_VCAMA1_CON2
+#define MT6358_LDO_VMDDR_CON0 MT6358_LDO_VCAMA2_CON0
+#define MT6358_LDO_VMDDR_CON1 MT6358_LDO_VCAMA2_CON1
+#define MT6358_LDO_VMDDR_CON2 MT6358_LDO_VCAMA2_CON2
+#define MT6358_LDO_VSRAM_CORE_CON0 MT6358_LDO_VCAMD_CON0
+#define MT6358_LDO_VSRAM_CORE_DBG0 0x1cb6
+#define MT6358_LDO_VSRAM_CORE_DBG1 0x1cb8
+#define MT6358_VM18_ANA_CON0 MT6358_VCAMA1_ANA_CON0
+#define MT6358_VMDDR_ANA_CON0 MT6358_VCAMD_ANA_CON0
+
#endif /* __MFD_MT6358_REGISTERS_H__ */
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index 1b94325febb3..ca35af30745f 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -102,6 +102,15 @@ enum stm32_timers_dmas {
STM32_TIMERS_MAX_DMAS,
};
+/* STM32 Timer may have either a unique global interrupt or 4 interrupt lines */
+enum stm32_timers_irqs {
+ STM32_TIMERS_IRQ_GLOBAL_BRK, /* global or brk IRQ */
+ STM32_TIMERS_IRQ_UP,
+ STM32_TIMERS_IRQ_TRG_COM,
+ STM32_TIMERS_IRQ_CC,
+ STM32_TIMERS_MAX_IRQS,
+};
+
/**
* struct stm32_timers_dma - STM32 timer DMA handling.
* @completion: end of DMA transfer completion
@@ -123,6 +132,8 @@ struct stm32_timers {
struct regmap *regmap;
u32 max_arr;
struct stm32_timers_dma dma; /* Only to be used by the parent */
+ unsigned int nr_irqs;
+ int irq[STM32_TIMERS_MAX_IRQS];
};
#if IS_REACHABLE(CONFIG_MFD_STM32_TIMERS)
diff --git a/include/linux/micrel_phy.h b/include/linux/micrel_phy.h
index 4e27ca7c49de..591bf5b5e8dc 100644
--- a/include/linux/micrel_phy.h
+++ b/include/linux/micrel_phy.h
@@ -64,6 +64,10 @@
#define KSZ886X_BMCR_DISABLE_TRANSMIT BIT(1)
#define KSZ886X_BMCR_DISABLE_LED BIT(0)
+/* PHY Special Control/Status Register (Reg 31) */
#define KSZ886X_CTRL_MDIX_STAT BIT(4)
+#define KSZ886X_CTRL_FORCE_LINK BIT(3)
+#define KSZ886X_CTRL_PWRSAVE BIT(2)
+#define KSZ886X_CTRL_REMOTE_LOOPBACK BIT(1)
#endif /* _MICREL_PHY_H */
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index 711dd9412561..2ce13e8a309b 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -142,10 +142,10 @@ const struct movable_operations *page_movable_ops(struct page *page)
}
#ifdef CONFIG_NUMA_BALANCING
-int migrate_misplaced_page(struct page *page, struct vm_area_struct *vma,
+int migrate_misplaced_folio(struct folio *folio, struct vm_area_struct *vma,
int node);
#else
-static inline int migrate_misplaced_page(struct page *page,
+static inline int migrate_misplaced_folio(struct folio *folio,
struct vm_area_struct *vma, int node)
{
return -EAGAIN; /* can't migrate now */
diff --git a/include/linux/minmax.h b/include/linux/minmax.h
index 83aebc244cba..2ec559284a9f 100644
--- a/include/linux/minmax.h
+++ b/include/linux/minmax.h
@@ -2,60 +2,77 @@
#ifndef _LINUX_MINMAX_H
#define _LINUX_MINMAX_H
+#include <linux/build_bug.h>
+#include <linux/compiler.h>
#include <linux/const.h>
#include <linux/types.h>
/*
* min()/max()/clamp() macros must accomplish three things:
*
- * - avoid multiple evaluations of the arguments (so side-effects like
+ * - Avoid multiple evaluations of the arguments (so side-effects like
* "x++" happen only once) when non-constant.
- * - perform strict type-checking (to generate warnings instead of
- * nasty runtime surprises). See the "unnecessary" pointer comparison
- * in __typecheck().
- * - retain result as a constant expressions when called with only
+ * - Retain result as a constant expressions when called with only
* constant expressions (to avoid tripping VLA warnings in stack
* allocation usage).
+ * - Perform signed v unsigned type-checking (to generate compile
+ * errors instead of nasty runtime surprises).
+ * - Unsigned char/short are always promoted to signed int and can be
+ * compared against signed or unsigned arguments.
+ * - Unsigned arguments can be compared against non-negative signed constants.
+ * - Comparison of a signed argument against an unsigned constant fails
+ * even if the constant is below __INT_MAX__ and could be cast to int.
*/
#define __typecheck(x, y) \
(!!(sizeof((typeof(x) *)1 == (typeof(y) *)1)))
-#define __no_side_effects(x, y) \
- (__is_constexpr(x) && __is_constexpr(y))
+/* is_signed_type() isn't a constexpr for pointer types */
+#define __is_signed(x) \
+ __builtin_choose_expr(__is_constexpr(is_signed_type(typeof(x))), \
+ is_signed_type(typeof(x)), 0)
-#define __safe_cmp(x, y) \
- (__typecheck(x, y) && __no_side_effects(x, y))
+/* True for a non-negative signed int constant */
+#define __is_noneg_int(x) \
+ (__builtin_choose_expr(__is_constexpr(x) && __is_signed(x), x, -1) >= 0)
-#define __cmp(x, y, op) ((x) op (y) ? (x) : (y))
+#define __types_ok(x, y) \
+ (__is_signed(x) == __is_signed(y) || \
+ __is_signed((x) + 0) == __is_signed((y) + 0) || \
+ __is_noneg_int(x) || __is_noneg_int(y))
-#define __cmp_once(x, y, unique_x, unique_y, op) ({ \
- typeof(x) unique_x = (x); \
- typeof(y) unique_y = (y); \
- __cmp(unique_x, unique_y, op); })
+#define __cmp_op_min <
+#define __cmp_op_max >
-#define __careful_cmp(x, y, op) \
- __builtin_choose_expr(__safe_cmp(x, y), \
- __cmp(x, y, op), \
- __cmp_once(x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y), op))
+#define __cmp(op, x, y) ((x) __cmp_op_##op (y) ? (x) : (y))
+
+#define __cmp_once(op, x, y, unique_x, unique_y) ({ \
+ typeof(x) unique_x = (x); \
+ typeof(y) unique_y = (y); \
+ static_assert(__types_ok(x, y), \
+ #op "(" #x ", " #y ") signedness error, fix types or consider u" #op "() before " #op "_t()"); \
+ __cmp(op, unique_x, unique_y); })
+
+#define __careful_cmp(op, x, y) \
+ __builtin_choose_expr(__is_constexpr((x) - (y)), \
+ __cmp(op, x, y), \
+ __cmp_once(op, x, y, __UNIQUE_ID(__x), __UNIQUE_ID(__y)))
#define __clamp(val, lo, hi) \
((val) >= (hi) ? (hi) : ((val) <= (lo) ? (lo) : (val)))
-#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
- typeof(val) unique_val = (val); \
- typeof(lo) unique_lo = (lo); \
- typeof(hi) unique_hi = (hi); \
- __clamp(unique_val, unique_lo, unique_hi); })
-
-#define __clamp_input_check(lo, hi) \
- (BUILD_BUG_ON_ZERO(__builtin_choose_expr( \
- __is_constexpr((lo) > (hi)), (lo) > (hi), false)))
+#define __clamp_once(val, lo, hi, unique_val, unique_lo, unique_hi) ({ \
+ typeof(val) unique_val = (val); \
+ typeof(lo) unique_lo = (lo); \
+ typeof(hi) unique_hi = (hi); \
+ static_assert(__builtin_choose_expr(__is_constexpr((lo) > (hi)), \
+ (lo) <= (hi), true), \
+ "clamp() low limit " #lo " greater than high limit " #hi); \
+ static_assert(__types_ok(val, lo), "clamp() 'lo' signedness error"); \
+ static_assert(__types_ok(val, hi), "clamp() 'hi' signedness error"); \
+ __clamp(unique_val, unique_lo, unique_hi); })
#define __careful_clamp(val, lo, hi) ({ \
- __clamp_input_check(lo, hi) + \
- __builtin_choose_expr(__typecheck(val, lo) && __typecheck(val, hi) && \
- __typecheck(hi, lo) && __is_constexpr(val) && \
- __is_constexpr(lo) && __is_constexpr(hi), \
+ __builtin_choose_expr(__is_constexpr((val) - (lo) + (hi)), \
__clamp(val, lo, hi), \
__clamp_once(val, lo, hi, __UNIQUE_ID(__val), \
__UNIQUE_ID(__lo), __UNIQUE_ID(__hi))); })
@@ -65,14 +82,31 @@
* @x: first value
* @y: second value
*/
-#define min(x, y) __careful_cmp(x, y, <)
+#define min(x, y) __careful_cmp(min, x, y)
/**
* max - return maximum of two values of the same or compatible types
* @x: first value
* @y: second value
*/
-#define max(x, y) __careful_cmp(x, y, >)
+#define max(x, y) __careful_cmp(max, x, y)
+
+/**
+ * umin - return minimum of two non-negative values
+ * Signed types are zero extended to match a larger unsigned type.
+ * @x: first value
+ * @y: second value
+ */
+#define umin(x, y) \
+ __careful_cmp(min, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
+
+/**
+ * umax - return maximum of two non-negative values
+ * @x: first value
+ * @y: second value
+ */
+#define umax(x, y) \
+ __careful_cmp(max, (x) + 0u + 0ul + 0ull, (y) + 0u + 0ul + 0ull)
/**
* min3 - return minimum of three values
@@ -124,7 +158,7 @@
* @x: first value
* @y: second value
*/
-#define min_t(type, x, y) __careful_cmp((type)(x), (type)(y), <)
+#define min_t(type, x, y) __careful_cmp(min, (type)(x), (type)(y))
/**
* max_t - return maximum of two values, using the specified type
@@ -132,28 +166,7 @@
* @x: first value
* @y: second value
*/
-#define max_t(type, x, y) __careful_cmp((type)(x), (type)(y), >)
-
-/*
- * Remove a const qualifier from integer types
- * _Generic(foo, type-name: association, ..., default: association) performs a
- * comparison against the foo type (not the qualified type).
- * Do not use the const keyword in the type-name as it will not match the
- * unqualified type of foo.
- */
-#define __unconst_integer_type_cases(type) \
- unsigned type: (unsigned type)0, \
- signed type: (signed type)0
-
-#define __unconst_integer_typeof(x) typeof( \
- _Generic((x), \
- char: (char)0, \
- __unconst_integer_type_cases(char), \
- __unconst_integer_type_cases(short), \
- __unconst_integer_type_cases(int), \
- __unconst_integer_type_cases(long), \
- __unconst_integer_type_cases(long long), \
- default: (x)))
+#define max_t(type, x, y) __careful_cmp(max, (type)(x), (type)(y))
/*
* Do not check the array parameter using __must_be_array().
@@ -169,13 +182,13 @@
* 'int *buff' and 'int buff[N]' types.
*
* The array can be an array of const items.
- * typeof() keeps the const qualifier. Use __unconst_integer_typeof() in order
+ * typeof() keeps the const qualifier. Use __unqual_scalar_typeof() in order
* to discard the const qualifier for the __element variable.
*/
#define __minmax_array(op, array, len) ({ \
typeof(&(array)[0]) __array = (array); \
typeof(len) __len = (len); \
- __unconst_integer_typeof(__array[0]) __element = __array[--__len]; \
+ __unqual_scalar_typeof(__array[0]) __element = __array[--__len];\
while (__len--) \
__element = op(__element, __array[__len]); \
__element; })
diff --git a/include/linux/mlx5/device.h b/include/linux/mlx5/device.h
index 4d5be378fa8c..820bca965fb6 100644
--- a/include/linux/mlx5/device.h
+++ b/include/linux/mlx5/device.h
@@ -366,6 +366,9 @@ enum mlx5_driver_event {
MLX5_DRIVER_EVENT_UPLINK_NETDEV,
MLX5_DRIVER_EVENT_MACSEC_SA_ADDED,
MLX5_DRIVER_EVENT_MACSEC_SA_DELETED,
+ MLX5_DRIVER_EVENT_SF_PEER_DEVLINK,
+ MLX5_DRIVER_EVENT_AFFILIATION_DONE,
+ MLX5_DRIVER_EVENT_AFFILIATION_REMOVED,
};
enum {
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 3033bbaeac81..d2b8d4a74a30 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -155,6 +155,8 @@ enum {
MLX5_REG_MCC = 0x9062,
MLX5_REG_MCDA = 0x9063,
MLX5_REG_MCAM = 0x907f,
+ MLX5_REG_MSECQ = 0x9155,
+ MLX5_REG_MSEES = 0x9156,
MLX5_REG_MIRC = 0x9162,
MLX5_REG_SBCAM = 0xB01F,
MLX5_REG_RESOURCE_DUMP = 0xC000,
@@ -613,6 +615,7 @@ struct mlx5_priv {
int adev_idx;
int sw_vhca_id;
struct mlx5_events *events;
+ struct mlx5_vhca_events *vhca_events;
struct mlx5_flow_steering *steering;
struct mlx5_mpfs *mpfs;
@@ -621,6 +624,7 @@ struct mlx5_priv {
struct mlx5_lag *lag;
u32 flags;
struct mlx5_devcom_dev *devc;
+ struct mlx5_devcom_comp_dev *hca_devcom_comp;
struct mlx5_fw_reset *fw_reset;
struct mlx5_core_roce roce;
struct mlx5_fc_stats fc_stats;
@@ -1027,6 +1031,8 @@ bool mlx5_cmd_is_down(struct mlx5_core_dev *dev);
void mlx5_core_uplink_netdev_set(struct mlx5_core_dev *mdev, struct net_device *netdev);
void mlx5_core_uplink_netdev_event_replay(struct mlx5_core_dev *mdev);
+void mlx5_core_mp_event_replay(struct mlx5_core_dev *dev, u32 event, void *data);
+
void mlx5_health_cleanup(struct mlx5_core_dev *dev);
int mlx5_health_init(struct mlx5_core_dev *dev);
void mlx5_start_health_poll(struct mlx5_core_dev *dev);
@@ -1037,10 +1043,6 @@ void mlx5_trigger_health_work(struct mlx5_core_dev *dev);
int mlx5_frag_buf_alloc_node(struct mlx5_core_dev *dev, int size,
struct mlx5_frag_buf *buf, int node);
void mlx5_frag_buf_free(struct mlx5_core_dev *dev, struct mlx5_frag_buf *buf);
-struct mlx5_cmd_mailbox *mlx5_alloc_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- gfp_t flags, int npages);
-void mlx5_free_cmd_mailbox_chain(struct mlx5_core_dev *dev,
- struct mlx5_cmd_mailbox *head);
int mlx5_core_create_mkey(struct mlx5_core_dev *dev, u32 *mkey, u32 *in,
int inlen);
int mlx5_core_destroy_mkey(struct mlx5_core_dev *dev, u32 mkey);
@@ -1054,8 +1056,6 @@ void mlx5_pagealloc_start(struct mlx5_core_dev *dev);
void mlx5_pagealloc_stop(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_init(struct mlx5_core_dev *dev);
void mlx5_pages_debugfs_cleanup(struct mlx5_core_dev *dev);
-void mlx5_core_req_pages_handler(struct mlx5_core_dev *dev, u16 func_id,
- s32 npages, bool ec_function);
int mlx5_satisfy_startup_pages(struct mlx5_core_dev *dev, int boot);
int mlx5_reclaim_startup_pages(struct mlx5_core_dev *dev);
void mlx5_register_debugfs(void);
@@ -1095,8 +1095,6 @@ int mlx5_core_create_psv(struct mlx5_core_dev *dev, u32 pdn,
int mlx5_core_destroy_psv(struct mlx5_core_dev *dev, int psv_num);
__be32 mlx5_core_get_terminate_scatter_list_mkey(struct mlx5_core_dev *dev);
void mlx5_core_put_rsc(struct mlx5_core_rsc_common *common);
-int mlx5_query_odp_caps(struct mlx5_core_dev *dev,
- struct mlx5_odp_caps *odp_caps);
int mlx5_init_rl_table(struct mlx5_core_dev *dev);
void mlx5_cleanup_rl_table(struct mlx5_core_dev *dev);
@@ -1198,12 +1196,6 @@ int mlx5_sriov_blocking_notifier_register(struct mlx5_core_dev *mdev,
void mlx5_sriov_blocking_notifier_unregister(struct mlx5_core_dev *mdev,
int vf_id,
struct notifier_block *nb);
-#ifdef CONFIG_MLX5_CORE_IPOIB
-struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
- struct ib_device *ibdev,
- const char *name,
- void (*setup)(struct net_device *));
-#endif /* CONFIG_MLX5_CORE_IPOIB */
int mlx5_rdma_rn_get_params(struct mlx5_core_dev *mdev,
struct ib_device *device,
struct rdma_netdev_alloc_params *params);
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 1e00c2436377..6f7725238abc 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -67,6 +67,7 @@ enum {
MLX5_FLOW_TABLE_TERMINATION = BIT(2),
MLX5_FLOW_TABLE_UNMANAGED = BIT(3),
MLX5_FLOW_TABLE_OTHER_VPORT = BIT(4),
+ MLX5_FLOW_TABLE_UPLINK_VPORT = BIT(5),
};
#define LEFTOVERS_RULE_NUM 2
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index fc3db401f8a2..4df6d1c12437 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -312,6 +312,7 @@ enum {
MLX5_CMD_OP_QUERY_VHCA_STATE = 0xb0d,
MLX5_CMD_OP_MODIFY_VHCA_STATE = 0xb0e,
MLX5_CMD_OP_SYNC_CRYPTO = 0xb12,
+ MLX5_CMD_OP_ALLOW_OTHER_VHCA_ACCESS = 0xb16,
MLX5_CMD_OP_MAX
};
@@ -1934,6 +1935,14 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 match_definer_format_supported[0x40];
};
+enum {
+ MLX5_CROSS_VHCA_OBJ_TO_OBJ_SUPPORTED_LOCAL_FLOW_TABLE_TO_REMOTE_FLOW_TABLE_MISS = 0x80000,
+};
+
+enum {
+ MLX5_ALLOWED_OBJ_FOR_OTHER_VHCA_ACCESS_FLOW_TABLE = 0x200,
+};
+
struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_0[0x80];
@@ -1948,9 +1957,15 @@ struct mlx5_ifc_cmd_hca_cap_2_bits {
u8 reserved_at_c0[0x8];
u8 migration_multi_load[0x1];
u8 migration_tracking_state[0x1];
- u8 reserved_at_ca[0x16];
+ u8 reserved_at_ca[0x6];
+ u8 migration_in_chunks[0x1];
+ u8 reserved_at_d1[0xf];
+
+ u8 cross_vhca_object_to_object_supported[0x20];
+
+ u8 allowed_object_for_other_vhca_access[0x40];
- u8 reserved_at_e0[0xc0];
+ u8 reserved_at_140[0x60];
u8 flow_table_type_2_type[0x8];
u8 reserved_at_1a8[0x3];
@@ -6369,6 +6384,28 @@ struct mlx5_ifc_general_obj_out_cmd_hdr_bits {
u8 reserved_at_60[0x20];
};
+struct mlx5_ifc_allow_other_vhca_access_in_bits {
+ u8 opcode[0x10];
+ u8 uid[0x10];
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+ u8 reserved_at_40[0x50];
+ u8 object_type_to_be_accessed[0x10];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_c0[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+};
+
+struct mlx5_ifc_allow_other_vhca_access_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+ u8 syndrome[0x20];
+ u8 reserved_at_40[0x40];
+};
+
struct mlx5_ifc_modify_header_arg_bits {
u8 reserved_at_0[0x80];
@@ -6391,6 +6428,24 @@ struct mlx5_ifc_create_match_definer_out_bits {
struct mlx5_ifc_general_obj_out_cmd_hdr_bits general_obj_out_cmd_hdr;
};
+struct mlx5_ifc_alias_context_bits {
+ u8 vhca_id_to_be_accessed[0x10];
+ u8 reserved_at_10[0xd];
+ u8 status[0x3];
+ u8 object_id_to_be_accessed[0x20];
+ u8 reserved_at_40[0x40];
+ union {
+ u8 access_key_raw[0x100];
+ u8 access_key[8][0x20];
+ };
+ u8 metadata[0x80];
+};
+
+struct mlx5_ifc_create_alias_obj_in_bits {
+ struct mlx5_ifc_general_obj_in_cmd_hdr_bits hdr;
+ struct mlx5_ifc_alias_context_bits alias_ctx;
+};
+
enum {
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_OUTER_HEADERS = 0x0,
MLX5_QUERY_FLOW_GROUP_OUT_MATCH_CRITERIA_ENABLE_MISC_PARAMETERS = 0x1,
@@ -10176,7 +10231,9 @@ struct mlx5_ifc_mcam_access_reg_bits2 {
u8 mirc[0x1];
u8 regs_97_to_96[0x2];
- u8 regs_95_to_64[0x20];
+ u8 regs_95_to_87[0x09];
+ u8 synce_registers[0x2];
+ u8 regs_84_to_64[0x15];
u8 regs_63_to_32[0x20];
@@ -10572,6 +10629,7 @@ enum {
MLX5_INITIAL_SEG_HEALTH_SYNDROME_EQ_INV = 0xe,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_FFSER_ERR = 0xf,
MLX5_INITIAL_SEG_HEALTH_SYNDROME_HIGH_TEMP_ERR = 0x10,
+ MLX5_INITIAL_SEG_HEALTH_SYNDROME_ICM_PCI_POISONED_ERR = 0x12,
};
struct mlx5_ifc_initial_seg_bits {
@@ -11917,6 +11975,7 @@ enum {
MLX5_GENERAL_OBJECT_TYPES_FLOW_METER_ASO = 0x24,
MLX5_GENERAL_OBJECT_TYPES_MACSEC = 0x27,
MLX5_GENERAL_OBJECT_TYPES_INT_KEK = 0x47,
+ MLX5_GENERAL_OBJECT_TYPES_FLOW_TABLE_ALIAS = 0xff15,
};
enum {
@@ -12392,7 +12451,8 @@ struct mlx5_ifc_query_vhca_migration_state_in_bits {
u8 op_mod[0x10];
u8 incremental[0x1];
- u8 reserved_at_41[0xf];
+ u8 chunk[0x1];
+ u8 reserved_at_42[0xe];
u8 vhca_id[0x10];
u8 reserved_at_60[0x20];
@@ -12408,7 +12468,11 @@ struct mlx5_ifc_query_vhca_migration_state_out_bits {
u8 required_umem_size[0x20];
- u8 reserved_at_a0[0x160];
+ u8 reserved_at_a0[0x20];
+
+ u8 remaining_total_size[0x40];
+
+ u8 reserved_at_100[0x100];
};
struct mlx5_ifc_save_vhca_state_in_bits {
@@ -12440,7 +12504,7 @@ struct mlx5_ifc_save_vhca_state_out_bits {
u8 actual_image_size[0x20];
- u8 reserved_at_60[0x20];
+ u8 next_required_umem_size[0x20];
};
struct mlx5_ifc_load_vhca_state_in_bits {
@@ -12549,4 +12613,59 @@ struct mlx5_ifc_modify_page_track_obj_in_bits {
struct mlx5_ifc_page_track_bits obj_context;
};
+struct mlx5_ifc_msecq_reg_bits {
+ u8 reserved_at_0[0x20];
+
+ u8 reserved_at_20[0x12];
+ u8 network_option[0x2];
+ u8 local_ssm_code[0x4];
+ u8 local_enhanced_ssm_code[0x8];
+
+ u8 local_clock_identity[0x40];
+
+ u8 reserved_at_80[0x180];
+};
+
+enum {
+ MLX5_MSEES_FIELD_SELECT_ENABLE = BIT(0),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_STATUS = BIT(1),
+ MLX5_MSEES_FIELD_SELECT_ADMIN_FREQ_MEASURE = BIT(2),
+};
+
+enum mlx5_msees_admin_status {
+ MLX5_MSEES_ADMIN_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_ADMIN_STATUS_TRACK = 0x1,
+};
+
+enum mlx5_msees_oper_status {
+ MLX5_MSEES_OPER_STATUS_FREE_RUNNING = 0x0,
+ MLX5_MSEES_OPER_STATUS_SELF_TRACK = 0x1,
+ MLX5_MSEES_OPER_STATUS_OTHER_TRACK = 0x2,
+ MLX5_MSEES_OPER_STATUS_HOLDOVER = 0x3,
+ MLX5_MSEES_OPER_STATUS_FAIL_HOLDOVER = 0x4,
+ MLX5_MSEES_OPER_STATUS_FAIL_FREE_RUNNING = 0x5,
+};
+
+struct mlx5_ifc_msees_reg_bits {
+ u8 reserved_at_0[0x8];
+ u8 local_port[0x8];
+ u8 pnat[0x2];
+ u8 lp_msb[0x2];
+ u8 reserved_at_14[0xc];
+
+ u8 field_select[0x20];
+
+ u8 admin_status[0x4];
+ u8 oper_status[0x4];
+ u8 ho_acq[0x1];
+ u8 reserved_at_49[0xc];
+ u8 admin_freq_measure[0x1];
+ u8 oper_freq_measure[0x1];
+ u8 failure_reason[0x9];
+
+ u8 frequency_diff[0x20];
+
+ u8 reserved_at_80[0x180];
+};
+
#endif /* MLX5_IFC_H */
diff --git a/include/linux/mlx5/port.h b/include/linux/mlx5/port.h
index 98b2e1e149f9..26092c78a985 100644
--- a/include/linux/mlx5/port.h
+++ b/include/linux/mlx5/port.h
@@ -115,8 +115,9 @@ enum mlx5e_ext_link_mode {
MLX5E_100GAUI_1_100GBASE_CR_KR = 11,
MLX5E_200GAUI_4_200GBASE_CR4_KR4 = 12,
MLX5E_200GAUI_2_200GBASE_CR2_KR2 = 13,
- MLX5E_400GAUI_8 = 15,
+ MLX5E_400GAUI_8_400GBASE_CR8 = 15,
MLX5E_400GAUI_4_400GBASE_CR4_KR4 = 16,
+ MLX5E_800GAUI_8_800GBASE_CR8_KR8 = 19,
MLX5E_EXT_LINK_MODES_NUMBER,
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index bf5d0b1b16f4..418d26608ece 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -362,8 +362,6 @@ extern unsigned int kobjsize(const void *objp);
# define VM_SAO VM_ARCH_1 /* Strong Access Ordering (powerpc) */
#elif defined(CONFIG_PARISC)
# define VM_GROWSUP VM_ARCH_1
-#elif defined(CONFIG_IA64)
-# define VM_GROWSUP VM_ARCH_1
#elif defined(CONFIG_SPARC64)
# define VM_SPARC_ADI VM_ARCH_1 /* Uses ADI tag for access control */
# define VM_ARCH_CLEAR VM_SPARC_ADI
@@ -619,7 +617,7 @@ struct vm_operations_struct {
* policy.
*/
struct mempolicy *(*get_policy)(struct vm_area_struct *vma,
- unsigned long addr);
+ unsigned long addr, pgoff_t *ilx);
#endif
/*
* Called by vm_normal_page() for special PTEs to find the
@@ -937,6 +935,17 @@ static inline bool vma_is_accessible(struct vm_area_struct *vma)
return vma->vm_flags & VM_ACCESS_FLAGS;
}
+static inline bool is_shared_maywrite(vm_flags_t vm_flags)
+{
+ return (vm_flags & (VM_SHARED | VM_MAYWRITE)) ==
+ (VM_SHARED | VM_MAYWRITE);
+}
+
+static inline bool vma_is_shared_maywrite(struct vm_area_struct *vma)
+{
+ return is_shared_maywrite(vma->vm_flags);
+}
+
static inline
struct vm_area_struct *vma_find(struct vma_iterator *vmi, unsigned long max)
{
@@ -1337,7 +1346,6 @@ void set_pte_range(struct vm_fault *vmf, struct folio *folio,
struct page *page, unsigned int nr, unsigned long addr);
vm_fault_t finish_fault(struct vm_fault *vmf);
-vm_fault_t finish_mkwrite_fault(struct vm_fault *vmf);
#endif
/*
@@ -1686,26 +1694,26 @@ static inline bool __cpupid_match_pid(pid_t task_pid, int cpupid)
#define cpupid_match_pid(task, cpupid) __cpupid_match_pid(task->pid, cpupid)
#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return xchg(&page->_last_cpupid, cpupid & LAST_CPUPID_MASK);
+ return xchg(&folio->_last_cpupid, cpupid & LAST_CPUPID_MASK);
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page->_last_cpupid;
+ return folio->_last_cpupid;
}
static inline void page_cpupid_reset_last(struct page *page)
{
page->_last_cpupid = -1 & LAST_CPUPID_MASK;
}
#else
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return (page->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
+ return (folio->flags >> LAST_CPUPID_PGSHIFT) & LAST_CPUPID_MASK;
}
-extern int page_cpupid_xchg_last(struct page *page, int cpupid);
+int folio_xchg_last_cpupid(struct folio *folio, int cpupid);
static inline void page_cpupid_reset_last(struct page *page)
{
@@ -1713,11 +1721,12 @@ static inline void page_cpupid_reset_last(struct page *page)
}
#endif /* LAST_CPUPID_NOT_IN_PAGE_FLAGS */
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
int last_time;
- last_time = page_cpupid_xchg_last(page, time >> PAGE_ACCESS_TIME_BUCKETS);
+ last_time = folio_xchg_last_cpupid(folio,
+ time >> PAGE_ACCESS_TIME_BUCKETS);
return last_time << PAGE_ACCESS_TIME_BUCKETS;
}
@@ -1726,24 +1735,24 @@ static inline void vma_set_access_pid_bit(struct vm_area_struct *vma)
unsigned int pid_bit;
pid_bit = hash_32(current->pid, ilog2(BITS_PER_LONG));
- if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->access_pids[1])) {
- __set_bit(pid_bit, &vma->numab_state->access_pids[1]);
+ if (vma->numab_state && !test_bit(pid_bit, &vma->numab_state->pids_active[1])) {
+ __set_bit(pid_bit, &vma->numab_state->pids_active[1]);
}
}
#else /* !CONFIG_NUMA_BALANCING */
-static inline int page_cpupid_xchg_last(struct page *page, int cpupid)
+static inline int folio_xchg_last_cpupid(struct folio *folio, int cpupid)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
-static inline int xchg_page_access_time(struct page *page, int time)
+static inline int folio_xchg_access_time(struct folio *folio, int time)
{
return 0;
}
-static inline int page_cpupid_last(struct page *page)
+static inline int folio_last_cpupid(struct folio *folio)
{
- return page_to_nid(page); /* XXX */
+ return folio_nid(folio); /* XXX */
}
static inline int cpupid_to_nid(int cpupid)
@@ -2327,6 +2336,8 @@ struct folio *vm_normal_folio(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
struct page *vm_normal_page(struct vm_area_struct *vma, unsigned long addr,
pte_t pte);
+struct folio *vm_normal_folio_pmd(struct vm_area_struct *vma,
+ unsigned long addr, pmd_t pmd);
struct page *vm_normal_page_pmd(struct vm_area_struct *vma, unsigned long addr,
pmd_t pmd);
@@ -2413,8 +2424,6 @@ extern int access_process_vm(struct task_struct *tsk, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
extern int access_remote_vm(struct mm_struct *mm, unsigned long addr,
void *buf, int len, unsigned int gup_flags);
-extern int __access_remote_vm(struct mm_struct *mm, unsigned long addr,
- void *buf, int len, unsigned int gup_flags);
long get_user_pages_remote(struct mm_struct *mm,
unsigned long start, unsigned long nr_pages,
@@ -2425,6 +2434,9 @@ long pin_user_pages_remote(struct mm_struct *mm,
unsigned int gup_flags, struct page **pages,
int *locked);
+/*
+ * Retrieves a single page alongside its VMA. Does not support FOLL_NOWAIT.
+ */
static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
unsigned long addr,
int gup_flags,
@@ -2432,12 +2444,15 @@ static inline struct page *get_user_page_vma_remote(struct mm_struct *mm,
{
struct page *page;
struct vm_area_struct *vma;
- int got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
+ int got;
+
+ if (WARN_ON_ONCE(unlikely(gup_flags & FOLL_NOWAIT)))
+ return ERR_PTR(-EINVAL);
+
+ got = get_user_pages_remote(mm, addr, 1, gup_flags, &page, NULL);
if (got < 0)
return ERR_PTR(got);
- if (got == 0)
- return NULL;
vma = vma_lookup(mm, addr);
if (WARN_ON_ONCE(!vma)) {
@@ -2480,7 +2495,7 @@ int get_cmdline(struct task_struct *task, char *buffer, int buflen);
extern unsigned long move_page_tables(struct vm_area_struct *vma,
unsigned long old_addr, struct vm_area_struct *new_vma,
unsigned long new_addr, unsigned long len,
- bool need_rmap_locks);
+ bool need_rmap_locks, bool for_stack);
/*
* Flags used by change_protection(). For now we make it a bitmap so
@@ -2628,14 +2643,6 @@ static inline void setmax_mm_hiwater_rss(unsigned long *maxrss,
*maxrss = hiwater_rss;
}
-#if defined(SPLIT_RSS_COUNTING)
-void sync_mm_rss(struct mm_struct *mm);
-#else
-static inline void sync_mm_rss(struct mm_struct *mm)
-{
-}
-#endif
-
#ifndef CONFIG_ARCH_HAS_PTE_SPECIAL
static inline int pte_special(pte_t pte)
{
@@ -3057,6 +3064,22 @@ static inline spinlock_t *pud_lock(struct mm_struct *mm, pud_t *pud)
return ptl;
}
+static inline void pagetable_pud_ctor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_set_pgtable(folio);
+ lruvec_stat_add_folio(folio, NR_PAGETABLE);
+}
+
+static inline void pagetable_pud_dtor(struct ptdesc *ptdesc)
+{
+ struct folio *folio = ptdesc_folio(ptdesc);
+
+ __folio_clear_pgtable(folio);
+ lruvec_stat_sub_folio(folio, NR_PAGETABLE);
+}
+
extern void __init pagecache_init(void);
extern void free_initmem(void);
@@ -3221,22 +3244,73 @@ extern int vma_expand(struct vma_iterator *vmi, struct vm_area_struct *vma,
struct vm_area_struct *next);
extern int vma_shrink(struct vma_iterator *vmi, struct vm_area_struct *vma,
unsigned long start, unsigned long end, pgoff_t pgoff);
-extern struct vm_area_struct *vma_merge(struct vma_iterator *vmi,
- struct mm_struct *, struct vm_area_struct *prev, unsigned long addr,
- unsigned long end, unsigned long vm_flags, struct anon_vma *,
- struct file *, pgoff_t, struct mempolicy *, struct vm_userfaultfd_ctx,
- struct anon_vma_name *);
extern struct anon_vma *find_mergeable_anon_vma(struct vm_area_struct *);
-extern int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
- unsigned long addr, int new_below);
-extern int split_vma(struct vma_iterator *vmi, struct vm_area_struct *,
- unsigned long addr, int new_below);
extern int insert_vm_struct(struct mm_struct *, struct vm_area_struct *);
extern void unlink_file_vma(struct vm_area_struct *);
extern struct vm_area_struct *copy_vma(struct vm_area_struct **,
unsigned long addr, unsigned long len, pgoff_t pgoff,
bool *need_rmap_locks);
extern void exit_mmap(struct mm_struct *);
+struct vm_area_struct *vma_modify(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long vm_flags,
+ struct mempolicy *policy,
+ struct vm_userfaultfd_ctx uffd_ctx,
+ struct anon_vma_name *anon_name);
+
+/* We are about to modify the VMA's flags. */
+static inline struct vm_area_struct
+*vma_modify_flags(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx,
+ anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or anon_name. */
+static inline struct vm_area_struct
+*vma_modify_flags_name(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start,
+ unsigned long end,
+ unsigned long new_flags,
+ struct anon_vma_name *new_name)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), vma->vm_userfaultfd_ctx, new_name);
+}
+
+/* We are about to modify the VMA's memory policy. */
+static inline struct vm_area_struct
+*vma_modify_policy(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ struct mempolicy *new_pol)
+{
+ return vma_modify(vmi, prev, vma, start, end, vma->vm_flags,
+ new_pol, vma->vm_userfaultfd_ctx, anon_vma_name(vma));
+}
+
+/* We are about to modify the VMA's flags and/or uffd context. */
+static inline struct vm_area_struct
+*vma_modify_flags_uffd(struct vma_iterator *vmi,
+ struct vm_area_struct *prev,
+ struct vm_area_struct *vma,
+ unsigned long start, unsigned long end,
+ unsigned long new_flags,
+ struct vm_userfaultfd_ctx new_ctx)
+{
+ return vma_modify(vmi, prev, vma, start, end, new_flags,
+ vma_policy(vma), new_ctx, anon_vma_name(vma));
+}
static inline int check_data_rlimit(unsigned long rlim,
unsigned long new,
@@ -3308,8 +3382,7 @@ static inline void mm_populate(unsigned long addr, unsigned long len)
static inline void mm_populate(unsigned long addr, unsigned long len) {}
#endif
-/* These take the mm semaphore themselves */
-extern int __must_check vm_brk(unsigned long, unsigned long);
+/* This takes the mm semaphore itself */
extern int __must_check vm_brk_flags(unsigned long, unsigned long, unsigned long);
extern int vm_munmap(unsigned long, size_t);
extern unsigned long __must_check vm_mmap(struct file *, unsigned long,
@@ -4000,25 +4073,26 @@ static inline void mem_dump_obj(void *object) {}
#endif
/**
- * seal_check_future_write - Check for F_SEAL_FUTURE_WRITE flag and handle it
+ * seal_check_write - Check for F_SEAL_WRITE or F_SEAL_FUTURE_WRITE flags and
+ * handle them.
* @seals: the seals to check
* @vma: the vma to operate on
*
- * Check whether F_SEAL_FUTURE_WRITE is set; if so, do proper check/handling on
- * the vma flags. Return 0 if check pass, or <0 for errors.
+ * Check whether F_SEAL_WRITE or F_SEAL_FUTURE_WRITE are set; if so, do proper
+ * check/handling on the vma flags. Return 0 if check pass, or <0 for errors.
*/
-static inline int seal_check_future_write(int seals, struct vm_area_struct *vma)
+static inline int seal_check_write(int seals, struct vm_area_struct *vma)
{
- if (seals & F_SEAL_FUTURE_WRITE) {
+ if (seals & (F_SEAL_WRITE | F_SEAL_FUTURE_WRITE)) {
/*
* New PROT_WRITE and MAP_SHARED mmaps are not allowed when
- * "future write" seal active.
+ * write seals are active.
*/
if ((vma->vm_flags & VM_SHARED) && (vma->vm_flags & VM_WRITE))
return -EPERM;
/*
- * Since an F_SEAL_FUTURE_WRITE sealed memfd can be mapped as
+ * Since an F_SEAL_[FUTURE_]WRITE sealed memfd can be mapped as
* MAP_SHARED and read-only, take care to not allow mprotect to
* revert protections on such mappings. Do this only for shared
* mappings. For private mappings, don't need to mask
@@ -4062,4 +4136,11 @@ static inline void accept_memory(phys_addr_t start, phys_addr_t end)
#endif
+static inline bool pfn_is_unaccepted_memory(unsigned long pfn)
+{
+ phys_addr_t paddr = pfn << PAGE_SHIFT;
+
+ return range_contains_unaccepted_memory(paddr, paddr + PAGE_SIZE);
+}
+
#endif /* _LINUX_MM_H */
diff --git a/include/linux/mm_inline.h b/include/linux/mm_inline.h
index 8148b30a9df1..9ae7def16cb2 100644
--- a/include/linux/mm_inline.h
+++ b/include/linux/mm_inline.h
@@ -4,6 +4,7 @@
#include <linux/atomic.h>
#include <linux/huge_mm.h>
+#include <linux/mm_types.h>
#include <linux/swap.h>
#include <linux/string.h>
#include <linux/userfaultfd_k.h>
@@ -352,15 +353,6 @@ void lruvec_del_folio(struct lruvec *lruvec, struct folio *folio)
}
#ifdef CONFIG_ANON_VMA_NAME
-/*
- * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
- * either keep holding the lock while using the returned pointer or it should
- * raise anon_vma_name refcount before releasing the lock.
- */
-extern struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
-extern struct anon_vma_name *anon_vma_name_alloc(const char *name);
-extern void anon_vma_name_free(struct kref *kref);
-
/* mmap_lock should be read-locked */
static inline void anon_vma_name_get(struct anon_vma_name *anon_name)
{
@@ -415,16 +407,6 @@ static inline bool anon_vma_name_eq(struct anon_vma_name *anon_name1,
}
#else /* CONFIG_ANON_VMA_NAME */
-static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
-{
- return NULL;
-}
-
-static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
-{
- return NULL;
-}
-
static inline void anon_vma_name_get(struct anon_vma_name *anon_name) {}
static inline void anon_vma_name_put(struct anon_vma_name *anon_name) {}
static inline void dup_anon_vma_name(struct vm_area_struct *orig_vma,
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index 36c5b43999e6..957ce38768b2 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -125,18 +125,7 @@ struct page {
struct page_pool *pp;
unsigned long _pp_mapping_pad;
unsigned long dma_addr;
- union {
- /**
- * dma_addr_upper: might require a 64-bit
- * value on 32-bit architectures.
- */
- unsigned long dma_addr_upper;
- /**
- * For frag page support, not supported in
- * 32-bit architectures with 64-bit DMA.
- */
- atomic_long_t pp_frag_count;
- };
+ atomic_long_t pp_frag_count;
};
struct { /* Tail pages of compound page */
unsigned long compound_head; /* Bit zero is set */
@@ -199,6 +188,10 @@ struct page {
not kmapped, ie. highmem) */
#endif /* WANT_PAGE_VIRTUAL */
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
+
#ifdef CONFIG_KMSAN
/*
* KMSAN metadata for this page:
@@ -210,10 +203,6 @@ struct page {
struct page *kmsan_shadow;
struct page *kmsan_origin;
#endif
-
-#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
- int _last_cpupid;
-#endif
} _struct_page_alignment;
/*
@@ -272,6 +261,8 @@ typedef struct {
* @_refcount: Do not access this member directly. Use folio_ref_count()
* to find how many references there are to this folio.
* @memcg_data: Memory Control Group data.
+ * @virtual: Virtual address in the kernel direct map.
+ * @_last_cpupid: IDs of last CPU and last process that accessed the folio.
* @_entire_mapcount: Do not use directly, call folio_entire_mapcount().
* @_nr_pages_mapped: Do not use directly, call folio_mapcount().
* @_pincount: Do not use directly, call folio_maybe_dma_pinned().
@@ -318,6 +309,12 @@ struct folio {
#ifdef CONFIG_MEMCG
unsigned long memcg_data;
#endif
+#if defined(WANT_PAGE_VIRTUAL)
+ void *virtual;
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+ int _last_cpupid;
+#endif
/* private: the union with struct page is transitional */
};
struct page page;
@@ -373,6 +370,12 @@ FOLIO_MATCH(_refcount, _refcount);
#ifdef CONFIG_MEMCG
FOLIO_MATCH(memcg_data, memcg_data);
#endif
+#if defined(WANT_PAGE_VIRTUAL)
+FOLIO_MATCH(virtual, virtual);
+#endif
+#ifdef LAST_CPUPID_NOT_IN_PAGE_FLAGS
+FOLIO_MATCH(_last_cpupid, _last_cpupid);
+#endif
#undef FOLIO_MATCH
#define FOLIO_MATCH(pg, fl) \
static_assert(offsetof(struct folio, fl) == \
@@ -546,14 +549,62 @@ struct anon_vma_name {
char name[];
};
+#ifdef CONFIG_ANON_VMA_NAME
+/*
+ * mmap_lock should be read-locked when calling anon_vma_name(). Caller should
+ * either keep holding the lock while using the returned pointer or it should
+ * raise anon_vma_name refcount before releasing the lock.
+ */
+struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma);
+struct anon_vma_name *anon_vma_name_alloc(const char *name);
+void anon_vma_name_free(struct kref *kref);
+#else /* CONFIG_ANON_VMA_NAME */
+static inline struct anon_vma_name *anon_vma_name(struct vm_area_struct *vma)
+{
+ return NULL;
+}
+
+static inline struct anon_vma_name *anon_vma_name_alloc(const char *name)
+{
+ return NULL;
+}
+#endif
+
struct vma_lock {
struct rw_semaphore lock;
};
struct vma_numab_state {
+ /*
+ * Initialised as time in 'jiffies' after which VMA
+ * should be scanned. Delays first scan of new VMA by at
+ * least sysctl_numa_balancing_scan_delay:
+ */
unsigned long next_scan;
- unsigned long next_pid_reset;
- unsigned long access_pids[2];
+
+ /*
+ * Time in jiffies when pids_active[] is reset to
+ * detect phase change behaviour:
+ */
+ unsigned long pids_active_reset;
+
+ /*
+ * Approximate tracking of PIDs that trapped a NUMA hinting
+ * fault. May produce false positives due to hash collisions.
+ *
+ * [0] Previous PID tracking
+ * [1] Current PID tracking
+ *
+ * Window moves after next_pid_reset has expired approximately
+ * every VMA_PID_RESET_PERIOD jiffies:
+ */
+ unsigned long pids_active[2];
+
+ /*
+ * MM scan sequence ID when the VMA was last completely scanned.
+ * A VMA is not eligible for scanning if prev_scan_seq == numa_scan_seq
+ */
+ int prev_scan_seq;
};
/*
@@ -662,6 +713,12 @@ struct vm_area_struct {
struct vm_userfaultfd_ctx vm_userfaultfd_ctx;
} __randomize_layout;
+#ifdef CONFIG_NUMA
+#define vma_policy(vma) ((vma)->vm_policy)
+#else
+#define vma_policy(vma) NULL
+#endif
+
#ifdef CONFIG_SCHED_MM_CID
struct mm_cid {
u64 time;
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 62a6847a3b6f..2f445c651742 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -526,6 +526,7 @@ struct mmc_host {
/* Host Software Queue support */
bool hsq_enabled;
+ int hsq_depth;
u32 err_stats[MMC_ERR_MAX];
unsigned long private[] ____cacheline_aligned;
diff --git a/include/linux/mmu_notifier.h b/include/linux/mmu_notifier.h
index 6e3c857606f1..f349e08a9dfe 100644
--- a/include/linux/mmu_notifier.h
+++ b/include/linux/mmu_notifier.h
@@ -459,7 +459,14 @@ mmu_notifier_invalidate_range_start(struct mmu_notifier_range *range)
lock_map_release(&__mmu_notifier_invalidate_range_start_map);
}
-static inline int
+/*
+ * This version of mmu_notifier_invalidate_range_start() avoids blocking, but it
+ * can return an error if a notifier can't proceed without blocking, in which
+ * case you're not allowed to modify PTEs in the specified range.
+ *
+ * This is mainly intended for OOM handling.
+ */
+static inline int __must_check
mmu_notifier_invalidate_range_start_nonblock(struct mmu_notifier_range *range)
{
int ret = 0;
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 4106fbc5b4b3..3c25226beeed 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -639,8 +639,6 @@ struct lruvec {
#endif
};
-/* Isolate unmapped pages */
-#define ISOLATE_UNMAPPED ((__force isolate_mode_t)0x2)
/* Isolate for asynchronous migration */
#define ISOLATE_ASYNC_MIGRATE ((__force isolate_mode_t)0x4)
/* Isolate unevictable pages */
@@ -676,15 +674,34 @@ enum zone_watermarks {
#define high_wmark_pages(z) (z->_watermark[WMARK_HIGH] + z->watermark_boost)
#define wmark_pages(z, i) (z->_watermark[i] + z->watermark_boost)
+/*
+ * Flags used in pcp->flags field.
+ *
+ * PCPF_PREV_FREE_HIGH_ORDER: a high-order page is freed in the
+ * previous page freeing. To avoid to drain PCP for an accident
+ * high-order page freeing.
+ *
+ * PCPF_FREE_HIGH_BATCH: preserve "pcp->batch" pages in PCP before
+ * draining PCP for consecutive high-order pages freeing without
+ * allocation if data cache slice of CPU is large enough. To reduce
+ * zone lock contention and keep cache-hot pages reusing.
+ */
+#define PCPF_PREV_FREE_HIGH_ORDER BIT(0)
+#define PCPF_FREE_HIGH_BATCH BIT(1)
+
struct per_cpu_pages {
spinlock_t lock; /* Protects lists field */
int count; /* number of pages in the list */
int high; /* high watermark, emptying needed */
+ int high_min; /* min high watermark */
+ int high_max; /* max high watermark */
int batch; /* chunk size for buddy add/remove */
- short free_factor; /* batch scaling factor during free */
+ u8 flags; /* protected by pcp->lock */
+ u8 alloc_factor; /* batch scaling factor during allocate */
#ifdef CONFIG_NUMA
- short expire; /* When 0, remote pagesets are drained */
+ u8 expire; /* When 0, remote pagesets are drained */
#endif
+ short free_count; /* consecutive free count */
/* Lists of pages, one per migrate type stored on the pcp-lists */
struct list_head lists[NR_PCP_LISTS];
@@ -837,7 +854,8 @@ struct zone {
* the high and batch values are copied to individual pagesets for
* faster access
*/
- int pageset_high;
+ int pageset_high_min;
+ int pageset_high_max;
int pageset_batch;
#ifndef CONFIG_SPARSEMEM
@@ -998,6 +1016,7 @@ enum zone_flags {
* Cleared when kswapd is woken.
*/
ZONE_RECLAIM_ACTIVE, /* kswapd may be scanning the zone. */
+ ZONE_BELOW_HIGH, /* zone is below high watermark. */
};
static inline unsigned long zone_managed_pages(struct zone *zone)
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index b0678b093cb2..f458469c5ce5 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -935,6 +935,12 @@ enum {
* struct cdx_device_id - CDX device identifier
* @vendor: Vendor ID
* @device: Device ID
+ * @subvendor: Subsystem vendor ID (or CDX_ANY_ID)
+ * @subdevice: Subsystem device ID (or CDX_ANY_ID)
+ * @class: Device class
+ * Most drivers do not need to specify class/class_mask
+ * as vendor/device is normally sufficient.
+ * @class_mask: Limit which sub-fields of the class field are compared.
* @override_only: Match only when dev->driver_override is this driver.
*
* Type of entries in the "device Id" table for CDX devices supported by
@@ -943,7 +949,15 @@ enum {
struct cdx_device_id {
__u16 vendor;
__u16 device;
+ __u16 subvendor;
+ __u16 subdevice;
+ __u32 class;
+ __u32 class_mask;
__u32 override_only;
};
+struct vchiq_device_id {
+ char name[32];
+};
+
#endif /* LINUX_MOD_DEVICETABLE_H */
diff --git a/include/linux/module_symbol.h b/include/linux/module_symbol.h
index 1269543d0634..77c9895b9ddb 100644
--- a/include/linux/module_symbol.h
+++ b/include/linux/module_symbol.h
@@ -3,7 +3,7 @@
#define _LINUX_MODULE_SYMBOL_H
/* This ignores the intensely annoying "mapping symbols" found in ELF files. */
-static inline int is_mapping_symbol(const char *str)
+static inline bool is_mapping_symbol(const char *str)
{
if (str[0] == '.' && str[1] == 'L')
return true;
diff --git a/include/linux/moduleparam.h b/include/linux/moduleparam.h
index 962cd41a2cb5..4fa9726bc328 100644
--- a/include/linux/moduleparam.h
+++ b/include/linux/moduleparam.h
@@ -276,7 +276,7 @@ struct kparam_array
read-only sections (which is part of respective UNIX ABI on these
platforms). So 'const' makes no sense and even causes compile failures
with some compilers. */
-#if defined(CONFIG_ALPHA) || defined(CONFIG_IA64) || defined(CONFIG_PPC64)
+#if defined(CONFIG_ALPHA) || defined(CONFIG_PPC64)
#define __moduleparam_const
#else
#define __moduleparam_const const
@@ -293,7 +293,11 @@ struct kparam_array
= { __param_str_##name, THIS_MODULE, ops, \
VERIFY_OCTAL_PERMISSIONS(perm), level, flags, { arg } }
-/* Obsolete - use module_param_cb() */
+/*
+ * Useful for describing a set/get pair used only once (i.e. for this
+ * parameter). For repeated set/get pairs (i.e. the same struct
+ * kernel_param_ops), use module_param_cb() instead.
+ */
#define module_param_call(name, _set, _get, arg, perm) \
static const struct kernel_param_ops __param_ops_##name = \
{ .flags = 0, .set = _set, .get = _get }; \
diff --git a/include/linux/mount.h b/include/linux/mount.h
index 4f40b40306d0..ac3dd2876197 100644
--- a/include/linux/mount.h
+++ b/include/linux/mount.h
@@ -92,8 +92,8 @@ extern bool __mnt_is_readonly(struct vfsmount *mnt);
extern bool mnt_may_suid(struct vfsmount *mnt);
extern struct vfsmount *clone_private_mount(const struct path *path);
-extern int __mnt_want_write(struct vfsmount *);
-extern void __mnt_drop_write(struct vfsmount *);
+int mnt_get_write_access(struct vfsmount *mnt);
+void mnt_put_write_access(struct vfsmount *mnt);
extern struct vfsmount *fc_mount(struct fs_context *fc);
extern struct vfsmount *vfs_create_mount(struct fs_context *fc);
diff --git a/include/linux/msi.h b/include/linux/msi.h
index a50ea79522f8..ddace8c34dcf 100644
--- a/include/linux/msi.h
+++ b/include/linux/msi.h
@@ -547,12 +547,6 @@ enum {
MSI_FLAG_ALLOC_SIMPLE_MSI_DESCS = (1 << 5),
/* Free MSI descriptors */
MSI_FLAG_FREE_MSI_DESCS = (1 << 6),
- /*
- * Quirk to handle MSI implementations which do not provide
- * masking. Currently known to affect x86, but has to be partially
- * handled in the core MSI code.
- */
- MSI_FLAG_NOMASK_QUIRK = (1 << 7),
/* Mask for the generic functionality */
MSI_GENERIC_FLAGS_MASK = GENMASK(15, 0),
diff --git a/include/linux/mtd/cfi.h b/include/linux/mtd/cfi.h
index d88bb56c18e2..947410faf9e2 100644
--- a/include/linux/mtd/cfi.h
+++ b/include/linux/mtd/cfi.h
@@ -287,7 +287,7 @@ struct cfi_private {
unsigned long chipshift; /* Because they're of the same type */
const char *im_name; /* inter_module name for cmdset_setup */
unsigned long quirks;
- struct flchip chips[]; /* per-chip data structure for each chip */
+ struct flchip chips[] __counted_by(numchips); /* per-chip data structure for each chip */
};
uint32_t cfi_build_cmd_addr(uint32_t cmd_ofs,
diff --git a/include/linux/mtd/jedec.h b/include/linux/mtd/jedec.h
index 0b6b59f7cfbd..56047a4e54c9 100644
--- a/include/linux/mtd/jedec.h
+++ b/include/linux/mtd/jedec.h
@@ -21,6 +21,9 @@ struct jedec_ecc_info {
/* JEDEC features */
#define JEDEC_FEATURE_16_BIT_BUS (1 << 0)
+/* JEDEC Optional Commands */
+#define JEDEC_OPT_CMD_READ_CACHE BIT(1)
+
struct nand_jedec_params {
/* rev info and features block */
/* 'J' 'E' 'S' 'D' */
diff --git a/include/linux/mtd/onfi.h b/include/linux/mtd/onfi.h
index a7376f9beddf..55ab2e4d62f9 100644
--- a/include/linux/mtd/onfi.h
+++ b/include/linux/mtd/onfi.h
@@ -55,6 +55,7 @@
#define ONFI_SUBFEATURE_PARAM_LEN 4
/* ONFI optional commands SET/GET FEATURES supported? */
+#define ONFI_OPT_CMD_READ_CACHE BIT(1)
#define ONFI_OPT_CMD_SET_GET_FEATURES BIT(2)
struct nand_onfi_params {
diff --git a/include/linux/mtd/qinfo.h b/include/linux/mtd/qinfo.h
index 2e3f43788d48..0421f12156b5 100644
--- a/include/linux/mtd/qinfo.h
+++ b/include/linux/mtd/qinfo.h
@@ -24,7 +24,7 @@ struct lpddr_private {
struct qinfo_chip *qinfo;
int numchips;
unsigned long chipshift;
- struct flchip chips[];
+ struct flchip chips[] __counted_by(numchips);
};
/* qinfo_query_info structure contains request information for
diff --git a/include/linux/mtd/rawnand.h b/include/linux/mtd/rawnand.h
index 90a141ba2a5a..c29ace15a053 100644
--- a/include/linux/mtd/rawnand.h
+++ b/include/linux/mtd/rawnand.h
@@ -225,6 +225,7 @@ struct gpio_desc;
* struct nand_parameters - NAND generic parameters from the parameter page
* @model: Model name
* @supports_set_get_features: The NAND chip supports setting/getting features
+ * @supports_read_cache: The NAND chip supports read cache operations
* @set_feature_list: Bitmap of features that can be set
* @get_feature_list: Bitmap of features that can be get
* @onfi: ONFI specific parameters
@@ -233,6 +234,7 @@ struct nand_parameters {
/* Generic parameters */
const char *model;
bool supports_set_get_features;
+ bool supports_read_cache;
DECLARE_BITMAP(set_feature_list, ONFI_FEATURE_NUMBER);
DECLARE_BITMAP(get_feature_list, ONFI_FEATURE_NUMBER);
diff --git a/include/linux/mtd/spinand.h b/include/linux/mtd/spinand.h
index 3e285c09d16d..badb4c1ac079 100644
--- a/include/linux/mtd/spinand.h
+++ b/include/linux/mtd/spinand.h
@@ -263,6 +263,7 @@ struct spinand_manufacturer {
extern const struct spinand_manufacturer alliancememory_spinand_manufacturer;
extern const struct spinand_manufacturer ato_spinand_manufacturer;
extern const struct spinand_manufacturer esmt_c8_spinand_manufacturer;
+extern const struct spinand_manufacturer foresee_spinand_manufacturer;
extern const struct spinand_manufacturer gigadevice_spinand_manufacturer;
extern const struct spinand_manufacturer macronix_spinand_manufacturer;
extern const struct spinand_manufacturer micron_spinand_manufacturer;
diff --git a/include/linux/namei.h b/include/linux/namei.h
index 1463cbda4888..3100371b5e32 100644
--- a/include/linux/namei.h
+++ b/include/linux/namei.h
@@ -92,6 +92,30 @@ extern struct dentry *lock_rename(struct dentry *, struct dentry *);
extern struct dentry *lock_rename_child(struct dentry *, struct dentry *);
extern void unlock_rename(struct dentry *, struct dentry *);
+/**
+ * mode_strip_umask - handle vfs umask stripping
+ * @dir: parent directory of the new inode
+ * @mode: mode of the new inode to be created in @dir
+ *
+ * In most filesystems, umask stripping depends on whether or not the
+ * filesystem supports POSIX ACLs. If the filesystem doesn't support it umask
+ * stripping is done directly in here. If the filesystem does support POSIX
+ * ACLs umask stripping is deferred until the filesystem calls
+ * posix_acl_create().
+ *
+ * Some filesystems (like NFSv4) also want to avoid umask stripping by the
+ * VFS, but don't support POSIX ACLs. Those filesystems can set SB_I_NOUMASK
+ * to get this effect without declaring that they support POSIX ACLs.
+ *
+ * Returns: mode
+ */
+static inline umode_t __must_check mode_strip_umask(const struct inode *dir, umode_t mode)
+{
+ if (!IS_POSIXACL(dir) && !(dir->i_sb->s_iflags & SB_I_NOUMASK))
+ mode &= ~current_umask();
+ return mode;
+}
+
extern int __must_check nd_jump_link(const struct path *path);
static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
@@ -112,7 +136,7 @@ static inline void nd_terminate_link(void *name, size_t len, size_t maxlen)
static inline bool
retry_estale(const long error, const unsigned int flags)
{
- return error == -ESTALE && !(flags & LOOKUP_REVAL);
+ return unlikely(error == -ESTALE && !(flags & LOOKUP_REVAL));
}
#endif /* _LINUX_NAMEI_H */
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 0896aaa91dd7..a16c9cc063fe 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -79,6 +79,8 @@ struct xdp_buff;
struct xdp_frame;
struct xdp_metadata_ops;
struct xdp_md;
+/* DPLL specific */
+struct dpll_pin;
typedef u32 xdp_features_t;
@@ -480,6 +482,29 @@ static inline bool napi_prefer_busy_poll(struct napi_struct *n)
return test_bit(NAPI_STATE_PREFER_BUSY_POLL, &n->state);
}
+/**
+ * napi_is_scheduled - test if NAPI is scheduled
+ * @n: NAPI context
+ *
+ * This check is "best-effort". With no locking implemented,
+ * a NAPI can be scheduled or terminate right after this check
+ * and produce not precise results.
+ *
+ * NAPI_STATE_SCHED is an internal state, napi_is_scheduled
+ * should not be used normally and napi_schedule should be
+ * used instead.
+ *
+ * Use only if the driver really needs to check if a NAPI
+ * is scheduled for example in the context of delayed timer
+ * that can be skipped if a NAPI is already scheduled.
+ *
+ * Return True if NAPI is scheduled, False otherwise.
+ */
+static inline bool napi_is_scheduled(struct napi_struct *n)
+{
+ return test_bit(NAPI_STATE_SCHED, &n->state);
+}
+
bool napi_schedule_prep(struct napi_struct *n);
/**
@@ -488,11 +513,18 @@ bool napi_schedule_prep(struct napi_struct *n);
*
* Schedule NAPI poll routine to be called if it is not already
* running.
+ * Return true if we schedule a NAPI or false if not.
+ * Refer to napi_schedule_prep() for additional reason on why
+ * a NAPI might not be scheduled.
*/
-static inline void napi_schedule(struct napi_struct *n)
+static inline bool napi_schedule(struct napi_struct *n)
{
- if (napi_schedule_prep(n))
+ if (napi_schedule_prep(n)) {
__napi_schedule(n);
+ return true;
+ }
+
+ return false;
}
/**
@@ -507,16 +539,6 @@ static inline void napi_schedule_irqoff(struct napi_struct *n)
__napi_schedule_irqoff(n);
}
-/* Try to reschedule poll. Called by dev->poll() after napi_complete(). */
-static inline bool napi_reschedule(struct napi_struct *napi)
-{
- if (napi_schedule_prep(napi)) {
- __napi_schedule(napi);
- return true;
- }
- return false;
-}
-
/**
* napi_complete_done - NAPI processing complete
* @n: NAPI context
@@ -917,6 +939,7 @@ struct net_device_path {
u8 queue;
u16 wcid;
u8 bss;
+ u8 amsdu;
} mtk_wdma;
};
};
@@ -1287,9 +1310,7 @@ struct netdev_net_notifier {
* struct net_device *dev,
* const unsigned char *addr, u16 vid)
* Deletes the FDB entry from dev coresponding to addr.
- * int (*ndo_fdb_del_bulk)(struct ndmsg *ndm, struct nlattr *tb[],
- * struct net_device *dev,
- * u16 vid,
+ * int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh, struct net_device *dev,
* struct netlink_ext_ack *extack);
* int (*ndo_fdb_dump)(struct sk_buff *skb, struct netlink_callback *cb,
* struct net_device *dev, struct net_device *filter_dev,
@@ -1564,10 +1585,8 @@ struct net_device_ops {
struct net_device *dev,
const unsigned char *addr,
u16 vid, struct netlink_ext_ack *extack);
- int (*ndo_fdb_del_bulk)(struct ndmsg *ndm,
- struct nlattr *tb[],
+ int (*ndo_fdb_del_bulk)(struct nlmsghdr *nlh,
struct net_device *dev,
- u16 vid,
struct netlink_ext_ack *extack);
int (*ndo_fdb_dump)(struct sk_buff *skb,
struct netlink_callback *cb,
@@ -1590,6 +1609,10 @@ struct net_device_ops {
int (*ndo_mdb_dump)(struct net_device *dev,
struct sk_buff *skb,
struct netlink_callback *cb);
+ int (*ndo_mdb_get)(struct net_device *dev,
+ struct nlattr *tb[], u32 portid,
+ u32 seq,
+ struct netlink_ext_ack *extack);
int (*ndo_bridge_setlink)(struct net_device *dev,
struct nlmsghdr *nlh,
u16 flags,
@@ -2049,6 +2072,9 @@ enum netdev_ml_priv_type {
* SET_NETDEV_DEVLINK_PORT macro. This pointer is static
* during the time netdevice is registered.
*
+ * @dpll_pin: Pointer to the SyncE source pin of a DPLL subsystem,
+ * where the clock is recovered.
+ *
* FIXME: cleanup struct net_device such that network protocol info
* moves out.
*/
@@ -2405,6 +2431,10 @@ struct net_device {
struct rtnl_hw_stats64 *offload_xstats_l3;
struct devlink_port *devlink_port;
+
+#if IS_ENABLED(CONFIG_DPLL)
+ struct dpll_pin *dpll_pin;
+#endif
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
@@ -3940,6 +3970,18 @@ int dev_get_mac_address(struct sockaddr *sa, struct net *net, char *dev_name);
int dev_get_port_parent_id(struct net_device *dev,
struct netdev_phys_item_id *ppid, bool recurse);
bool netdev_port_same_parent_id(struct net_device *a, struct net_device *b);
+void netdev_dpll_pin_set(struct net_device *dev, struct dpll_pin *dpll_pin);
+void netdev_dpll_pin_clear(struct net_device *dev);
+
+static inline struct dpll_pin *netdev_dpll_pin(const struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_DPLL)
+ return dev->dpll_pin;
+#else
+ return NULL;
+#endif
+}
+
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev, bool *again);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3980,32 +4022,19 @@ static __always_inline bool __is_skb_forwardable(const struct net_device *dev,
return false;
}
-struct net_device_core_stats __percpu *netdev_core_stats_alloc(struct net_device *dev);
-
-static inline struct net_device_core_stats __percpu *dev_core_stats(struct net_device *dev)
-{
- /* This READ_ONCE() pairs with the write in netdev_core_stats_alloc() */
- struct net_device_core_stats __percpu *p = READ_ONCE(dev->core_stats);
-
- if (likely(p))
- return p;
-
- return netdev_core_stats_alloc(dev);
-}
+void netdev_core_stats_inc(struct net_device *dev, u32 offset);
#define DEV_CORE_STATS_INC(FIELD) \
static inline void dev_core_stats_##FIELD##_inc(struct net_device *dev) \
{ \
- struct net_device_core_stats __percpu *p; \
- \
- p = dev_core_stats(dev); \
- if (p) \
- this_cpu_inc(p->FIELD); \
+ netdev_core_stats_inc(dev, \
+ offsetof(struct net_device_core_stats, FIELD)); \
}
DEV_CORE_STATS_INC(rx_dropped)
DEV_CORE_STATS_INC(tx_dropped)
DEV_CORE_STATS_INC(rx_nohandler)
DEV_CORE_STATS_INC(rx_otherhost_dropped)
+#undef DEV_CORE_STATS_INC
static __always_inline int ____dev_forward_skb(struct net_device *dev,
struct sk_buff *skb,
@@ -5214,5 +5243,6 @@ extern struct net_device *blackhole_netdev;
#define DEV_STATS_INC(DEV, FIELD) atomic_long_inc(&(DEV)->stats.__##FIELD)
#define DEV_STATS_ADD(DEV, FIELD, VAL) \
atomic_long_add((VAL), &(DEV)->stats.__##FIELD)
+#define DEV_STATS_READ(DEV, FIELD) atomic_long_read(&(DEV)->stats.__##FIELD)
#endif /* _LINUX_NETDEVICE_H */
diff --git a/include/linux/netfilter.h b/include/linux/netfilter.h
index d68644b7c299..80900d910992 100644
--- a/include/linux/netfilter.h
+++ b/include/linux/netfilter.h
@@ -22,6 +22,16 @@ static inline int NF_DROP_GETERR(int verdict)
return -(verdict >> NF_VERDICT_QBITS);
}
+static __always_inline int
+NF_DROP_REASON(struct sk_buff *skb, enum skb_drop_reason reason, u32 err)
+{
+ BUILD_BUG_ON(err > 0xffff);
+
+ kfree_skb_reason(skb, reason);
+
+ return ((err << 16) | NF_STOLEN);
+}
+
static inline int nf_inet_addr_cmp(const union nf_inet_addr *a1,
const union nf_inet_addr *a2)
{
diff --git a/include/linux/netfilter/nf_conntrack_sctp.h b/include/linux/netfilter/nf_conntrack_sctp.h
index 625f491b95de..fb31312825ae 100644
--- a/include/linux/netfilter/nf_conntrack_sctp.h
+++ b/include/linux/netfilter/nf_conntrack_sctp.h
@@ -9,6 +9,7 @@ struct ip_ct_sctp {
enum sctp_conntrack state;
__be32 vtag[IP_CT_DIR_MAX];
+ u8 init[IP_CT_DIR_MAX];
u8 last_dir;
u8 flags;
};
diff --git a/include/linux/nfs4.h b/include/linux/nfs4.h
index 730003c4f4af..c11c4db34639 100644
--- a/include/linux/nfs4.h
+++ b/include/linux/nfs4.h
@@ -150,7 +150,7 @@ enum nfs_opnum4 {
OP_WRITE_SAME = 70,
OP_CLONE = 71,
- /* xattr support (RFC8726) */
+ /* xattr support (RFC8276) */
OP_GETXATTR = 72,
OP_SETXATTR = 73,
OP_LISTXATTRS = 74,
@@ -389,79 +389,203 @@ enum lock_type4 {
NFS4_WRITEW_LT = 4
};
+/*
+ * Symbol names and values are from RFC 7531 Section 2.
+ * "XDR Description of NFSv4.0"
+ */
+enum {
+ FATTR4_SUPPORTED_ATTRS = 0,
+ FATTR4_TYPE = 1,
+ FATTR4_FH_EXPIRE_TYPE = 2,
+ FATTR4_CHANGE = 3,
+ FATTR4_SIZE = 4,
+ FATTR4_LINK_SUPPORT = 5,
+ FATTR4_SYMLINK_SUPPORT = 6,
+ FATTR4_NAMED_ATTR = 7,
+ FATTR4_FSID = 8,
+ FATTR4_UNIQUE_HANDLES = 9,
+ FATTR4_LEASE_TIME = 10,
+ FATTR4_RDATTR_ERROR = 11,
+ FATTR4_ACL = 12,
+ FATTR4_ACLSUPPORT = 13,
+ FATTR4_ARCHIVE = 14,
+ FATTR4_CANSETTIME = 15,
+ FATTR4_CASE_INSENSITIVE = 16,
+ FATTR4_CASE_PRESERVING = 17,
+ FATTR4_CHOWN_RESTRICTED = 18,
+ FATTR4_FILEHANDLE = 19,
+ FATTR4_FILEID = 20,
+ FATTR4_FILES_AVAIL = 21,
+ FATTR4_FILES_FREE = 22,
+ FATTR4_FILES_TOTAL = 23,
+ FATTR4_FS_LOCATIONS = 24,
+ FATTR4_HIDDEN = 25,
+ FATTR4_HOMOGENEOUS = 26,
+ FATTR4_MAXFILESIZE = 27,
+ FATTR4_MAXLINK = 28,
+ FATTR4_MAXNAME = 29,
+ FATTR4_MAXREAD = 30,
+ FATTR4_MAXWRITE = 31,
+ FATTR4_MIMETYPE = 32,
+ FATTR4_MODE = 33,
+ FATTR4_NO_TRUNC = 34,
+ FATTR4_NUMLINKS = 35,
+ FATTR4_OWNER = 36,
+ FATTR4_OWNER_GROUP = 37,
+ FATTR4_QUOTA_AVAIL_HARD = 38,
+ FATTR4_QUOTA_AVAIL_SOFT = 39,
+ FATTR4_QUOTA_USED = 40,
+ FATTR4_RAWDEV = 41,
+ FATTR4_SPACE_AVAIL = 42,
+ FATTR4_SPACE_FREE = 43,
+ FATTR4_SPACE_TOTAL = 44,
+ FATTR4_SPACE_USED = 45,
+ FATTR4_SYSTEM = 46,
+ FATTR4_TIME_ACCESS = 47,
+ FATTR4_TIME_ACCESS_SET = 48,
+ FATTR4_TIME_BACKUP = 49,
+ FATTR4_TIME_CREATE = 50,
+ FATTR4_TIME_DELTA = 51,
+ FATTR4_TIME_METADATA = 52,
+ FATTR4_TIME_MODIFY = 53,
+ FATTR4_TIME_MODIFY_SET = 54,
+ FATTR4_MOUNTED_ON_FILEID = 55,
+};
+
+/*
+ * Symbol names and values are from RFC 5662 Section 2.
+ * "XDR Description of NFSv4.1"
+ */
+enum {
+ FATTR4_DIR_NOTIF_DELAY = 56,
+ FATTR4_DIRENT_NOTIF_DELAY = 57,
+ FATTR4_DACL = 58,
+ FATTR4_SACL = 59,
+ FATTR4_CHANGE_POLICY = 60,
+ FATTR4_FS_STATUS = 61,
+ FATTR4_FS_LAYOUT_TYPES = 62,
+ FATTR4_LAYOUT_HINT = 63,
+ FATTR4_LAYOUT_TYPES = 64,
+ FATTR4_LAYOUT_BLKSIZE = 65,
+ FATTR4_LAYOUT_ALIGNMENT = 66,
+ FATTR4_FS_LOCATIONS_INFO = 67,
+ FATTR4_MDSTHRESHOLD = 68,
+ FATTR4_RETENTION_GET = 69,
+ FATTR4_RETENTION_SET = 70,
+ FATTR4_RETENTEVT_GET = 71,
+ FATTR4_RETENTEVT_SET = 72,
+ FATTR4_RETENTION_HOLD = 73,
+ FATTR4_MODE_SET_MASKED = 74,
+ FATTR4_SUPPATTR_EXCLCREAT = 75,
+ FATTR4_FS_CHARSET_CAP = 76,
+};
+
+/*
+ * Symbol names and values are from RFC 7863 Section 2.
+ * "XDR Description of NFSv4.2"
+ */
+enum {
+ FATTR4_CLONE_BLKSIZE = 77,
+ FATTR4_SPACE_FREED = 78,
+ FATTR4_CHANGE_ATTR_TYPE = 79,
+ FATTR4_SEC_LABEL = 80,
+};
+
+/*
+ * Symbol names and values are from RFC 8275 Section 5.
+ * "The mode_umask Attribute"
+ */
+enum {
+ FATTR4_MODE_UMASK = 81,
+};
+
+/*
+ * Symbol names and values are from RFC 8276 Section 8.6.
+ * "Numeric Values Assigned to Protocol Extensions"
+ */
+enum {
+ FATTR4_XATTR_SUPPORT = 82,
+};
+
+/*
+ * The following internal definitions enable processing the above
+ * attribute bits within 32-bit word boundaries.
+ */
/* Mandatory Attributes */
-#define FATTR4_WORD0_SUPPORTED_ATTRS (1UL << 0)
-#define FATTR4_WORD0_TYPE (1UL << 1)
-#define FATTR4_WORD0_FH_EXPIRE_TYPE (1UL << 2)
-#define FATTR4_WORD0_CHANGE (1UL << 3)
-#define FATTR4_WORD0_SIZE (1UL << 4)
-#define FATTR4_WORD0_LINK_SUPPORT (1UL << 5)
-#define FATTR4_WORD0_SYMLINK_SUPPORT (1UL << 6)
-#define FATTR4_WORD0_NAMED_ATTR (1UL << 7)
-#define FATTR4_WORD0_FSID (1UL << 8)
-#define FATTR4_WORD0_UNIQUE_HANDLES (1UL << 9)
-#define FATTR4_WORD0_LEASE_TIME (1UL << 10)
-#define FATTR4_WORD0_RDATTR_ERROR (1UL << 11)
+#define FATTR4_WORD0_SUPPORTED_ATTRS BIT(FATTR4_SUPPORTED_ATTRS)
+#define FATTR4_WORD0_TYPE BIT(FATTR4_TYPE)
+#define FATTR4_WORD0_FH_EXPIRE_TYPE BIT(FATTR4_FH_EXPIRE_TYPE)
+#define FATTR4_WORD0_CHANGE BIT(FATTR4_CHANGE)
+#define FATTR4_WORD0_SIZE BIT(FATTR4_SIZE)
+#define FATTR4_WORD0_LINK_SUPPORT BIT(FATTR4_LINK_SUPPORT)
+#define FATTR4_WORD0_SYMLINK_SUPPORT BIT(FATTR4_SYMLINK_SUPPORT)
+#define FATTR4_WORD0_NAMED_ATTR BIT(FATTR4_NAMED_ATTR)
+#define FATTR4_WORD0_FSID BIT(FATTR4_FSID)
+#define FATTR4_WORD0_UNIQUE_HANDLES BIT(FATTR4_UNIQUE_HANDLES)
+#define FATTR4_WORD0_LEASE_TIME BIT(FATTR4_LEASE_TIME)
+#define FATTR4_WORD0_RDATTR_ERROR BIT(FATTR4_RDATTR_ERROR)
/* Mandatory in NFSv4.1 */
-#define FATTR4_WORD2_SUPPATTR_EXCLCREAT (1UL << 11)
+#define FATTR4_WORD2_SUPPATTR_EXCLCREAT BIT(FATTR4_SUPPATTR_EXCLCREAT - 64)
/* Recommended Attributes */
-#define FATTR4_WORD0_ACL (1UL << 12)
-#define FATTR4_WORD0_ACLSUPPORT (1UL << 13)
-#define FATTR4_WORD0_ARCHIVE (1UL << 14)
-#define FATTR4_WORD0_CANSETTIME (1UL << 15)
-#define FATTR4_WORD0_CASE_INSENSITIVE (1UL << 16)
-#define FATTR4_WORD0_CASE_PRESERVING (1UL << 17)
-#define FATTR4_WORD0_CHOWN_RESTRICTED (1UL << 18)
-#define FATTR4_WORD0_FILEHANDLE (1UL << 19)
-#define FATTR4_WORD0_FILEID (1UL << 20)
-#define FATTR4_WORD0_FILES_AVAIL (1UL << 21)
-#define FATTR4_WORD0_FILES_FREE (1UL << 22)
-#define FATTR4_WORD0_FILES_TOTAL (1UL << 23)
-#define FATTR4_WORD0_FS_LOCATIONS (1UL << 24)
-#define FATTR4_WORD0_HIDDEN (1UL << 25)
-#define FATTR4_WORD0_HOMOGENEOUS (1UL << 26)
-#define FATTR4_WORD0_MAXFILESIZE (1UL << 27)
-#define FATTR4_WORD0_MAXLINK (1UL << 28)
-#define FATTR4_WORD0_MAXNAME (1UL << 29)
-#define FATTR4_WORD0_MAXREAD (1UL << 30)
-#define FATTR4_WORD0_MAXWRITE (1UL << 31)
-#define FATTR4_WORD1_MIMETYPE (1UL << 0)
-#define FATTR4_WORD1_MODE (1UL << 1)
-#define FATTR4_WORD1_NO_TRUNC (1UL << 2)
-#define FATTR4_WORD1_NUMLINKS (1UL << 3)
-#define FATTR4_WORD1_OWNER (1UL << 4)
-#define FATTR4_WORD1_OWNER_GROUP (1UL << 5)
-#define FATTR4_WORD1_QUOTA_HARD (1UL << 6)
-#define FATTR4_WORD1_QUOTA_SOFT (1UL << 7)
-#define FATTR4_WORD1_QUOTA_USED (1UL << 8)
-#define FATTR4_WORD1_RAWDEV (1UL << 9)
-#define FATTR4_WORD1_SPACE_AVAIL (1UL << 10)
-#define FATTR4_WORD1_SPACE_FREE (1UL << 11)
-#define FATTR4_WORD1_SPACE_TOTAL (1UL << 12)
-#define FATTR4_WORD1_SPACE_USED (1UL << 13)
-#define FATTR4_WORD1_SYSTEM (1UL << 14)
-#define FATTR4_WORD1_TIME_ACCESS (1UL << 15)
-#define FATTR4_WORD1_TIME_ACCESS_SET (1UL << 16)
-#define FATTR4_WORD1_TIME_BACKUP (1UL << 17)
-#define FATTR4_WORD1_TIME_CREATE (1UL << 18)
-#define FATTR4_WORD1_TIME_DELTA (1UL << 19)
-#define FATTR4_WORD1_TIME_METADATA (1UL << 20)
-#define FATTR4_WORD1_TIME_MODIFY (1UL << 21)
-#define FATTR4_WORD1_TIME_MODIFY_SET (1UL << 22)
-#define FATTR4_WORD1_MOUNTED_ON_FILEID (1UL << 23)
-#define FATTR4_WORD1_DACL (1UL << 26)
-#define FATTR4_WORD1_SACL (1UL << 27)
-#define FATTR4_WORD1_FS_LAYOUT_TYPES (1UL << 30)
-#define FATTR4_WORD2_LAYOUT_TYPES (1UL << 0)
-#define FATTR4_WORD2_LAYOUT_BLKSIZE (1UL << 1)
-#define FATTR4_WORD2_MDSTHRESHOLD (1UL << 4)
-#define FATTR4_WORD2_CLONE_BLKSIZE (1UL << 13)
-#define FATTR4_WORD2_CHANGE_ATTR_TYPE (1UL << 15)
-#define FATTR4_WORD2_SECURITY_LABEL (1UL << 16)
-#define FATTR4_WORD2_MODE_UMASK (1UL << 17)
-#define FATTR4_WORD2_XATTR_SUPPORT (1UL << 18)
+#define FATTR4_WORD0_ACL BIT(FATTR4_ACL)
+#define FATTR4_WORD0_ACLSUPPORT BIT(FATTR4_ACLSUPPORT)
+#define FATTR4_WORD0_ARCHIVE BIT(FATTR4_ARCHIVE)
+#define FATTR4_WORD0_CANSETTIME BIT(FATTR4_CANSETTIME)
+#define FATTR4_WORD0_CASE_INSENSITIVE BIT(FATTR4_CASE_INSENSITIVE)
+#define FATTR4_WORD0_CASE_PRESERVING BIT(FATTR4_CASE_PRESERVING)
+#define FATTR4_WORD0_CHOWN_RESTRICTED BIT(FATTR4_CHOWN_RESTRICTED)
+#define FATTR4_WORD0_FILEHANDLE BIT(FATTR4_FILEHANDLE)
+#define FATTR4_WORD0_FILEID BIT(FATTR4_FILEID)
+#define FATTR4_WORD0_FILES_AVAIL BIT(FATTR4_FILES_AVAIL)
+#define FATTR4_WORD0_FILES_FREE BIT(FATTR4_FILES_FREE)
+#define FATTR4_WORD0_FILES_TOTAL BIT(FATTR4_FILES_TOTAL)
+#define FATTR4_WORD0_FS_LOCATIONS BIT(FATTR4_FS_LOCATIONS)
+#define FATTR4_WORD0_HIDDEN BIT(FATTR4_HIDDEN)
+#define FATTR4_WORD0_HOMOGENEOUS BIT(FATTR4_HOMOGENEOUS)
+#define FATTR4_WORD0_MAXFILESIZE BIT(FATTR4_MAXFILESIZE)
+#define FATTR4_WORD0_MAXLINK BIT(FATTR4_MAXLINK)
+#define FATTR4_WORD0_MAXNAME BIT(FATTR4_MAXNAME)
+#define FATTR4_WORD0_MAXREAD BIT(FATTR4_MAXREAD)
+#define FATTR4_WORD0_MAXWRITE BIT(FATTR4_MAXWRITE)
+
+#define FATTR4_WORD1_MIMETYPE BIT(FATTR4_MIMETYPE - 32)
+#define FATTR4_WORD1_MODE BIT(FATTR4_MODE - 32)
+#define FATTR4_WORD1_NO_TRUNC BIT(FATTR4_NO_TRUNC - 32)
+#define FATTR4_WORD1_NUMLINKS BIT(FATTR4_NUMLINKS - 32)
+#define FATTR4_WORD1_OWNER BIT(FATTR4_OWNER - 32)
+#define FATTR4_WORD1_OWNER_GROUP BIT(FATTR4_OWNER_GROUP - 32)
+#define FATTR4_WORD1_QUOTA_HARD BIT(FATTR4_QUOTA_AVAIL_HARD - 32)
+#define FATTR4_WORD1_QUOTA_SOFT BIT(FATTR4_QUOTA_AVAIL_SOFT - 32)
+#define FATTR4_WORD1_QUOTA_USED BIT(FATTR4_QUOTA_USED - 32)
+#define FATTR4_WORD1_RAWDEV BIT(FATTR4_RAWDEV - 32)
+#define FATTR4_WORD1_SPACE_AVAIL BIT(FATTR4_SPACE_AVAIL - 32)
+#define FATTR4_WORD1_SPACE_FREE BIT(FATTR4_SPACE_FREE - 32)
+#define FATTR4_WORD1_SPACE_TOTAL BIT(FATTR4_SPACE_TOTAL - 32)
+#define FATTR4_WORD1_SPACE_USED BIT(FATTR4_SPACE_USED - 32)
+#define FATTR4_WORD1_SYSTEM BIT(FATTR4_SYSTEM - 32)
+#define FATTR4_WORD1_TIME_ACCESS BIT(FATTR4_TIME_ACCESS - 32)
+#define FATTR4_WORD1_TIME_ACCESS_SET BIT(FATTR4_TIME_ACCESS_SET - 32)
+#define FATTR4_WORD1_TIME_BACKUP BIT(FATTR4_TIME_BACKUP - 32)
+#define FATTR4_WORD1_TIME_CREATE BIT(FATTR4_TIME_CREATE - 32)
+#define FATTR4_WORD1_TIME_DELTA BIT(FATTR4_TIME_DELTA - 32)
+#define FATTR4_WORD1_TIME_METADATA BIT(FATTR4_TIME_METADATA - 32)
+#define FATTR4_WORD1_TIME_MODIFY BIT(FATTR4_TIME_MODIFY - 32)
+#define FATTR4_WORD1_TIME_MODIFY_SET BIT(FATTR4_TIME_MODIFY_SET - 32)
+#define FATTR4_WORD1_MOUNTED_ON_FILEID BIT(FATTR4_MOUNTED_ON_FILEID - 32)
+#define FATTR4_WORD1_DACL BIT(FATTR4_DACL - 32)
+#define FATTR4_WORD1_SACL BIT(FATTR4_SACL - 32)
+#define FATTR4_WORD1_FS_LAYOUT_TYPES BIT(FATTR4_FS_LAYOUT_TYPES - 32)
+
+#define FATTR4_WORD2_LAYOUT_TYPES BIT(FATTR4_LAYOUT_TYPES - 64)
+#define FATTR4_WORD2_LAYOUT_BLKSIZE BIT(FATTR4_LAYOUT_BLKSIZE - 64)
+#define FATTR4_WORD2_MDSTHRESHOLD BIT(FATTR4_MDSTHRESHOLD - 64)
+#define FATTR4_WORD2_CLONE_BLKSIZE BIT(FATTR4_CLONE_BLKSIZE - 64)
+#define FATTR4_WORD2_CHANGE_ATTR_TYPE BIT(FATTR4_CHANGE_ATTR_TYPE - 64)
+#define FATTR4_WORD2_SECURITY_LABEL BIT(FATTR4_SEC_LABEL - 64)
+#define FATTR4_WORD2_MODE_UMASK BIT(FATTR4_MODE_UMASK - 64)
+#define FATTR4_WORD2_XATTR_SUPPORT BIT(FATTR4_XATTR_SUPPORT - 64)
/* MDS threshold bitmap bits */
#define THRESHOLD_RD (1UL << 0)
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index 20eeba8b009d..cd628c4b011e 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -48,6 +48,7 @@ struct nfs_client {
#define NFS_CS_NOPING 6 /* - don't ping on connect */
#define NFS_CS_DS 7 /* - Server is a DS */
#define NFS_CS_REUSEPORT 8 /* - reuse src port on reconnect */
+#define NFS_CS_PNFS 9 /* - Server used for pnfs */
struct sockaddr_storage cl_addr; /* server identifier */
size_t cl_addrlen;
char * cl_hostname; /* hostname of server */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index aa9f4c6ebe26..1c315f854ea8 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -157,7 +157,9 @@ extern void nfs_unlock_request(struct nfs_page *req);
extern void nfs_unlock_and_release_request(struct nfs_page *);
extern struct nfs_page *nfs_page_group_lock_head(struct nfs_page *req);
extern int nfs_page_group_lock_subrequests(struct nfs_page *head);
-extern void nfs_join_page_group(struct nfs_page *head, struct inode *inode);
+extern void nfs_join_page_group(struct nfs_page *head,
+ struct nfs_commit_info *cinfo,
+ struct inode *inode);
extern int nfs_page_group_lock(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
diff --git a/include/linux/numa.h b/include/linux/numa.h
index 59df211d051f..a904861de800 100644
--- a/include/linux/numa.h
+++ b/include/linux/numa.h
@@ -12,6 +12,7 @@
#define MAX_NUMNODES (1 << NODES_SHIFT)
#define NUMA_NO_NODE (-1)
+#define NUMA_NO_MEMBLK (-1)
/* optionally keep NUMA memory info available post init */
#ifdef CONFIG_NUMA_KEEP_MEMINFO
@@ -25,7 +26,7 @@
#include <asm/sparsemem.h>
/* Generic implementation available */
-int numa_map_to_online_node(int node);
+int numa_nearest_node(int node, unsigned int state);
#ifndef memory_add_physaddr_to_nid
static inline int memory_add_physaddr_to_nid(u64 start)
@@ -43,11 +44,18 @@ static inline int phys_to_target_node(u64 start)
return 0;
}
#endif
+#ifndef numa_fill_memblks
+static inline int __init numa_fill_memblks(u64 start, u64 end)
+{
+ return NUMA_NO_MEMBLK;
+}
+#endif
#else /* !CONFIG_NUMA */
-static inline int numa_map_to_online_node(int node)
+static inline int numa_nearest_node(int node, unsigned int state)
{
return NUMA_NO_NODE;
}
+
static inline int memory_add_physaddr_to_nid(u64 start)
{
return 0;
@@ -58,6 +66,8 @@ static inline int phys_to_target_node(u64 start)
}
#endif
+#define numa_map_to_online_node(node) numa_nearest_node(node, N_ONLINE)
+
#ifdef CONFIG_HAVE_ARCH_NODE_DEV_GROUP
extern const struct attribute_group arch_node_dev_group;
#endif
diff --git a/include/linux/nvme-auth.h b/include/linux/nvme-auth.h
index dcb8030062dd..c1d0bc5d9624 100644
--- a/include/linux/nvme-auth.h
+++ b/include/linux/nvme-auth.h
@@ -9,9 +9,9 @@
#include <crypto/kpp.h>
struct nvme_dhchap_key {
- u8 *key;
size_t len;
u8 hash;
+ u8 key[];
};
u32 nvme_auth_get_seqnum(void);
@@ -24,10 +24,13 @@ const char *nvme_auth_digest_name(u8 hmac_id);
size_t nvme_auth_hmac_hash_len(u8 hmac_id);
u8 nvme_auth_hmac_id(const char *hmac_name);
+u32 nvme_auth_key_struct_size(u32 key_len);
struct nvme_dhchap_key *nvme_auth_extract_key(unsigned char *secret,
u8 key_hash);
void nvme_auth_free_key(struct nvme_dhchap_key *key);
-u8 *nvme_auth_transform_key(struct nvme_dhchap_key *key, char *nqn);
+struct nvme_dhchap_key *nvme_auth_alloc_key(u32 len, u8 hash);
+struct nvme_dhchap_key *nvme_auth_transform_key(
+ struct nvme_dhchap_key *key, char *nqn);
int nvme_auth_generate_key(u8 *secret, struct nvme_dhchap_key **ret_key);
int nvme_auth_augmented_challenge(u8 hmac_id, u8 *skey, size_t skey_len,
u8 *challenge, u8 *aug, size_t hlen);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index f6ef8cf5d774..4109f1bd6128 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -53,10 +53,10 @@
struct nvmefc_ls_req {
void *rqstaddr;
dma_addr_t rqstdma;
- __le32 rqstlen;
+ u32 rqstlen;
void *rspaddr;
dma_addr_t rspdma;
- __le32 rsplen;
+ u32 rsplen;
u32 timeout;
void *private;
@@ -120,7 +120,7 @@ struct nvmefc_ls_req {
struct nvmefc_ls_rsp {
void *rspbuf;
dma_addr_t rspdma;
- __le32 rsplen;
+ u16 rsplen;
void (*done)(struct nvmefc_ls_rsp *rsp);
void *nvme_fc_private; /* LLDD is not to access !! */
diff --git a/include/linux/nvme-keyring.h b/include/linux/nvme-keyring.h
new file mode 100644
index 000000000000..4efea9dd967c
--- /dev/null
+++ b/include/linux/nvme-keyring.h
@@ -0,0 +1,36 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Copyright (c) 2023 Hannes Reinecke, SUSE Labs
+ */
+
+#ifndef _NVME_KEYRING_H
+#define _NVME_KEYRING_H
+
+#ifdef CONFIG_NVME_KEYRING
+
+key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn);
+
+key_serial_t nvme_keyring_id(void);
+int nvme_keyring_init(void);
+void nvme_keyring_exit(void);
+
+#else
+
+static inline key_serial_t nvme_tls_psk_default(struct key *keyring,
+ const char *hostnqn, const char *subnqn)
+{
+ return 0;
+}
+static inline key_serial_t nvme_keyring_id(void)
+{
+ return 0;
+}
+static inline int nvme_keyring_init(void)
+{
+ return 0;
+}
+static inline void nvme_keyring_exit(void) {}
+
+#endif /* !CONFIG_NVME_KEYRING */
+#endif /* _NVME_KEYRING_H */
diff --git a/include/linux/nvme-tcp.h b/include/linux/nvme-tcp.h
index 57ebe1267f7f..e07e8978d691 100644
--- a/include/linux/nvme-tcp.h
+++ b/include/linux/nvme-tcp.h
@@ -18,6 +18,12 @@ enum nvme_tcp_pfv {
NVME_TCP_PFV_1_0 = 0x0,
};
+enum nvme_tcp_tls_cipher {
+ NVME_TCP_TLS_CIPHER_INVALID = 0,
+ NVME_TCP_TLS_CIPHER_SHA256 = 1,
+ NVME_TCP_TLS_CIPHER_SHA384 = 2,
+};
+
enum nvme_tcp_fatal_error_status {
NVME_TCP_FES_INVALID_PDU_HDR = 0x01,
NVME_TCP_FES_PDU_SEQ_ERR = 0x02,
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 26dd3f859d9d..a7ba74babad7 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -108,6 +108,13 @@ enum {
NVMF_RDMA_CMS_RDMA_CM = 1, /* Sockets based endpoint addressing */
};
+/* TSAS SECTYPE for TCP transport */
+enum {
+ NVMF_TCP_SECTYPE_NONE = 0, /* No Security */
+ NVMF_TCP_SECTYPE_TLS12 = 1, /* TLSv1.2, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+ NVMF_TCP_SECTYPE_TLS13 = 2, /* TLSv1.3, NVMe-oF 1.1 and NVMe-TCP 3.6.1.1 */
+};
+
#define NVME_AQ_DEPTH 32
#define NVME_NR_AEN_COMMANDS 1
#define NVME_AQ_BLK_MQ_DEPTH (NVME_AQ_DEPTH - NVME_NR_AEN_COMMANDS)
@@ -1493,6 +1500,9 @@ struct nvmf_disc_rsp_page_entry {
__u16 pkey;
__u8 resv10[246];
} rdma;
+ struct tcp {
+ __u8 sectype;
+ } tcp;
} tsas;
};
diff --git a/include/linux/nvmem-consumer.h b/include/linux/nvmem-consumer.h
index 4523e4e83319..6ec4b9743e25 100644
--- a/include/linux/nvmem-consumer.h
+++ b/include/linux/nvmem-consumer.h
@@ -127,6 +127,12 @@ static inline int nvmem_cell_write(struct nvmem_cell *cell,
return -EOPNOTSUPP;
}
+static inline int nvmem_cell_read_u8(struct device *dev,
+ const char *cell_id, u8 *val)
+{
+ return -EOPNOTSUPP;
+}
+
static inline int nvmem_cell_read_u16(struct device *dev,
const char *cell_id, u16 *val)
{
diff --git a/include/linux/nvmem-provider.h b/include/linux/nvmem-provider.h
index dae26295e6be..e3930835235b 100644
--- a/include/linux/nvmem-provider.h
+++ b/include/linux/nvmem-provider.h
@@ -82,13 +82,13 @@ struct nvmem_cell_info {
* @owner: Pointer to exporter module. Used for refcounting.
* @cells: Optional array of pre-defined NVMEM cells.
* @ncells: Number of elements in cells.
+ * @add_legacy_fixed_of_cells: Read fixed NVMEM cells from old OF syntax.
* @keepout: Optional array of keepout ranges (sorted ascending by start).
* @nkeepout: Number of elements in the keepout array.
* @type: Type of the nvmem storage
* @read_only: Device is read-only.
* @root_only: Device is accessibly to root only.
* @of_node: If given, this will be used instead of the parent's of_node.
- * @no_of_node: Device should not use the parent's of_node even if it's !NULL.
* @reg_read: Callback to read data.
* @reg_write: Callback to write data.
* @size: Device size.
@@ -112,6 +112,7 @@ struct nvmem_config {
struct module *owner;
const struct nvmem_cell_info *cells;
int ncells;
+ bool add_legacy_fixed_of_cells;
const struct nvmem_keepout *keepout;
unsigned int nkeepout;
enum nvmem_type type;
@@ -120,7 +121,6 @@ struct nvmem_config {
bool ignore_wp;
struct nvmem_layout *layout;
struct device_node *of_node;
- bool no_of_node;
nvmem_reg_read_t reg_read;
nvmem_reg_write_t reg_write;
int size;
diff --git a/include/linux/objpool.h b/include/linux/objpool.h
new file mode 100644
index 000000000000..15aff4a17f0c
--- /dev/null
+++ b/include/linux/objpool.h
@@ -0,0 +1,181 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+#ifndef _LINUX_OBJPOOL_H
+#define _LINUX_OBJPOOL_H
+
+#include <linux/types.h>
+#include <linux/refcount.h>
+
+/*
+ * objpool: ring-array based lockless MPMC queue
+ *
+ * Copyright: wuqiang.matt@bytedance.com,mhiramat@kernel.org
+ *
+ * objpool is a scalable implementation of high performance queue for
+ * object allocation and reclamation, such as kretprobe instances.
+ *
+ * With leveraging percpu ring-array to mitigate hot spots of memory
+ * contention, it delivers near-linear scalability for high parallel
+ * scenarios. The objpool is best suited for the following cases:
+ * 1) Memory allocation or reclamation are prohibited or too expensive
+ * 2) Consumers are of different priorities, such as irqs and threads
+ *
+ * Limitations:
+ * 1) Maximum objects (capacity) is fixed after objpool creation
+ * 2) All pre-allocated objects are managed in percpu ring array,
+ * which consumes more memory than linked lists
+ */
+
+/**
+ * struct objpool_slot - percpu ring array of objpool
+ * @head: head sequence of the local ring array (to retrieve at)
+ * @tail: tail sequence of the local ring array (to append at)
+ * @last: the last sequence number marked as ready for retrieve
+ * @mask: bits mask for modulo capacity to compute array indexes
+ * @entries: object entries on this slot
+ *
+ * Represents a cpu-local array-based ring buffer, its size is specialized
+ * during initialization of object pool. The percpu objpool node is to be
+ * allocated from local memory for NUMA system, and to be kept compact in
+ * continuous memory: CPU assigned number of objects are stored just after
+ * the body of objpool_node.
+ *
+ * Real size of the ring array is far too smaller than the value range of
+ * head and tail, typed as uint32_t: [0, 2^32), so only lower bits (mask)
+ * of head and tail are used as the actual position in the ring array. In
+ * general the ring array is acting like a small sliding window, which is
+ * always moving forward in the loop of [0, 2^32).
+ */
+struct objpool_slot {
+ uint32_t head;
+ uint32_t tail;
+ uint32_t last;
+ uint32_t mask;
+ void *entries[];
+} __packed;
+
+struct objpool_head;
+
+/*
+ * caller-specified callback for object initial setup, it's only called
+ * once for each object (just after the memory allocation of the object)
+ */
+typedef int (*objpool_init_obj_cb)(void *obj, void *context);
+
+/* caller-specified cleanup callback for objpool destruction */
+typedef int (*objpool_fini_cb)(struct objpool_head *head, void *context);
+
+/**
+ * struct objpool_head - object pooling metadata
+ * @obj_size: object size, aligned to sizeof(void *)
+ * @nr_objs: total objs (to be pre-allocated with objpool)
+ * @nr_cpus: local copy of nr_cpu_ids
+ * @capacity: max objs can be managed by one objpool_slot
+ * @gfp: gfp flags for kmalloc & vmalloc
+ * @ref: refcount of objpool
+ * @flags: flags for objpool management
+ * @cpu_slots: pointer to the array of objpool_slot
+ * @release: resource cleanup callback
+ * @context: caller-provided context
+ */
+struct objpool_head {
+ int obj_size;
+ int nr_objs;
+ int nr_cpus;
+ int capacity;
+ gfp_t gfp;
+ refcount_t ref;
+ unsigned long flags;
+ struct objpool_slot **cpu_slots;
+ objpool_fini_cb release;
+ void *context;
+};
+
+#define OBJPOOL_NR_OBJECT_MAX (1UL << 24) /* maximum numbers of total objects */
+#define OBJPOOL_OBJECT_SIZE_MAX (1UL << 16) /* maximum size of an object */
+
+/**
+ * objpool_init() - initialize objpool and pre-allocated objects
+ * @pool: the object pool to be initialized, declared by caller
+ * @nr_objs: total objects to be pre-allocated by this object pool
+ * @object_size: size of an object (should be > 0)
+ * @gfp: flags for memory allocation (via kmalloc or vmalloc)
+ * @context: user context for object initialization callback
+ * @objinit: object initialization callback for extra setup
+ * @release: cleanup callback for extra cleanup task
+ *
+ * return value: 0 for success, otherwise error code
+ *
+ * All pre-allocated objects are to be zeroed after memory allocation.
+ * Caller could do extra initialization in objinit callback. objinit()
+ * will be called just after slot allocation and called only once for
+ * each object. After that the objpool won't touch any content of the
+ * objects. It's caller's duty to perform reinitialization after each
+ * pop (object allocation) or do clearance before each push (object
+ * reclamation).
+ */
+int objpool_init(struct objpool_head *pool, int nr_objs, int object_size,
+ gfp_t gfp, void *context, objpool_init_obj_cb objinit,
+ objpool_fini_cb release);
+
+/**
+ * objpool_pop() - allocate an object from objpool
+ * @pool: object pool
+ *
+ * return value: object ptr or NULL if failed
+ */
+void *objpool_pop(struct objpool_head *pool);
+
+/**
+ * objpool_push() - reclaim the object and return back to objpool
+ * @obj: object ptr to be pushed to objpool
+ * @pool: object pool
+ *
+ * return: 0 or error code (it fails only when user tries to push
+ * the same object multiple times or wrong "objects" into objpool)
+ */
+int objpool_push(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_drop() - discard the object and deref objpool
+ * @obj: object ptr to be discarded
+ * @pool: object pool
+ *
+ * return: 0 if objpool was released; -EAGAIN if there are still
+ * outstanding objects
+ *
+ * objpool_drop is normally for the release of outstanding objects
+ * after objpool cleanup (objpool_fini). Thinking of this example:
+ * kretprobe is unregistered and objpool_fini() is called to release
+ * all remained objects, but there are still objects being used by
+ * unfinished kretprobes (like blockable function: sys_accept). So
+ * only when the last outstanding object is dropped could the whole
+ * objpool be released along with the call of objpool_drop()
+ */
+int objpool_drop(void *obj, struct objpool_head *pool);
+
+/**
+ * objpool_free() - release objpool forcely (all objects to be freed)
+ * @pool: object pool to be released
+ */
+void objpool_free(struct objpool_head *pool);
+
+/**
+ * objpool_fini() - deref object pool (also releasing unused objects)
+ * @pool: object pool to be dereferenced
+ *
+ * objpool_fini() will try to release all remained free objects and
+ * then drop an extra reference of the objpool. If all objects are
+ * already returned to objpool (so called synchronous use cases),
+ * the objpool itself will be freed together. But if there are still
+ * outstanding objects (so called asynchronous use cases, such like
+ * blockable kretprobe), the objpool won't be released until all
+ * the outstanding objects are dropped, but the caller must assure
+ * there are no concurrent objpool_push() on the fly. Normally RCU
+ * is being required to make sure all ongoing objpool_push() must
+ * be finished before calling objpool_fini(), so does test_objpool,
+ * kretprobe or rethook
+ */
+void objpool_fini(struct objpool_head *pool);
+
+#endif /* _LINUX_OBJPOOL_H */
diff --git a/include/linux/objtool.h b/include/linux/objtool.h
index 03f82c2c2ebf..33212e93f4a6 100644
--- a/include/linux/objtool.h
+++ b/include/linux/objtool.h
@@ -48,13 +48,13 @@
#define ANNOTATE_NOENDBR \
"986: \n\t" \
".pushsection .discard.noendbr\n\t" \
- ".long 986b - .\n\t" \
+ ".long 986b\n\t" \
".popsection\n\t"
#define ASM_REACHABLE \
"998:\n\t" \
".pushsection .discard.reachable\n\t" \
- ".long 998b - .\n\t" \
+ ".long 998b\n\t" \
".popsection\n\t"
#else /* __ASSEMBLY__ */
@@ -66,7 +66,7 @@
#define ANNOTATE_INTRA_FUNCTION_CALL \
999: \
.pushsection .discard.intra_function_calls; \
- .long 999b - .; \
+ .long 999b; \
.popsection;
/*
@@ -118,7 +118,7 @@
.macro ANNOTATE_NOENDBR
.Lhere_\@:
.pushsection .discard.noendbr
- .long .Lhere_\@ - .
+ .long .Lhere_\@
.popsection
.endm
@@ -130,7 +130,8 @@
* it will be ignored.
*/
.macro VALIDATE_UNRET_BEGIN
-#if defined(CONFIG_NOINSTR_VALIDATION) && defined(CONFIG_CPU_UNRET_ENTRY)
+#if defined(CONFIG_NOINSTR_VALIDATION) && \
+ (defined(CONFIG_CPU_UNRET_ENTRY) || defined(CONFIG_CPU_SRSO))
.Lhere_\@:
.pushsection .discard.validate_unret
.long .Lhere_\@ - .
@@ -141,7 +142,7 @@
.macro REACHABLE
.Lhere_\@:
.pushsection .discard.reachable
- .long .Lhere_\@ - .
+ .long .Lhere_\@
.popsection
.endm
diff --git a/include/linux/oid_registry.h b/include/linux/oid_registry.h
index f86a08ba0207..3921fbed0b28 100644
--- a/include/linux/oid_registry.h
+++ b/include/linux/oid_registry.h
@@ -17,12 +17,10 @@
* build_OID_registry.pl to generate the data for look_up_OID().
*/
enum OID {
- OID_id_dsa_with_sha1, /* 1.2.840.10030.4.3 */
OID_id_dsa, /* 1.2.840.10040.4.1 */
OID_id_ecPublicKey, /* 1.2.840.10045.2.1 */
OID_id_prime192v1, /* 1.2.840.10045.3.1.1 */
OID_id_prime256v1, /* 1.2.840.10045.3.1.7 */
- OID_id_ecdsa_with_sha1, /* 1.2.840.10045.4.1 */
OID_id_ecdsa_with_sha224, /* 1.2.840.10045.4.3.1 */
OID_id_ecdsa_with_sha256, /* 1.2.840.10045.4.3.2 */
OID_id_ecdsa_with_sha384, /* 1.2.840.10045.4.3.3 */
@@ -30,10 +28,6 @@ enum OID {
/* PKCS#1 {iso(1) member-body(2) us(840) rsadsi(113549) pkcs(1) pkcs-1(1)} */
OID_rsaEncryption, /* 1.2.840.113549.1.1.1 */
- OID_md2WithRSAEncryption, /* 1.2.840.113549.1.1.2 */
- OID_md3WithRSAEncryption, /* 1.2.840.113549.1.1.3 */
- OID_md4WithRSAEncryption, /* 1.2.840.113549.1.1.4 */
- OID_sha1WithRSAEncryption, /* 1.2.840.113549.1.1.5 */
OID_sha256WithRSAEncryption, /* 1.2.840.113549.1.1.11 */
OID_sha384WithRSAEncryption, /* 1.2.840.113549.1.1.12 */
OID_sha512WithRSAEncryption, /* 1.2.840.113549.1.1.13 */
@@ -49,11 +43,6 @@ enum OID {
OID_smimeCapabilites, /* 1.2.840.113549.1.9.15 */
OID_smimeAuthenticatedAttrs, /* 1.2.840.113549.1.9.16.2.11 */
- /* {iso(1) member-body(2) us(840) rsadsi(113549) digestAlgorithm(2)} */
- OID_md2, /* 1.2.840.113549.2.2 */
- OID_md4, /* 1.2.840.113549.2.4 */
- OID_md5, /* 1.2.840.113549.2.5 */
-
OID_mskrb5, /* 1.2.840.48018.1.2.2 */
OID_krb5, /* 1.2.840.113554.1.2.2 */
OID_krb5u2u, /* 1.2.840.113554.1.2.2.3 */
@@ -75,7 +64,6 @@ enum OID {
OID_PKU2U, /* 1.3.5.1.5.2.7 */
OID_Scram, /* 1.3.6.1.5.5.14 */
OID_certAuthInfoAccess, /* 1.3.6.1.5.5.7.1.1 */
- OID_sha1, /* 1.3.14.3.2.26 */
OID_id_ansip384r1, /* 1.3.132.0.34 */
OID_sha256, /* 2.16.840.1.101.3.4.2.1 */
OID_sha384, /* 2.16.840.1.101.3.4.2.2 */
@@ -141,6 +129,17 @@ enum OID {
OID_TPMImportableKey, /* 2.23.133.10.1.4 */
OID_TPMSealedData, /* 2.23.133.10.1.5 */
+ /* CSOR FIPS-202 SHA-3 */
+ OID_sha3_256, /* 2.16.840.1.101.3.4.2.8 */
+ OID_sha3_384, /* 2.16.840.1.101.3.4.2.9 */
+ OID_sha3_512, /* 2.16.840.1.101.3.4.2.10 */
+ OID_id_ecdsa_with_sha3_256, /* 2.16.840.1.101.3.4.3.10 */
+ OID_id_ecdsa_with_sha3_384, /* 2.16.840.1.101.3.4.3.11 */
+ OID_id_ecdsa_with_sha3_512, /* 2.16.840.1.101.3.4.3.12 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_256, /* 2.16.840.1.101.3.4.3.14 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_384, /* 2.16.840.1.101.3.4.3.15 */
+ OID_id_rsassa_pkcs1_v1_5_with_sha3_512, /* 2.16.840.1.101.3.4.3.16 */
+
OID__NR
};
diff --git a/include/linux/overflow.h b/include/linux/overflow.h
index f9b60313eaea..7b5cf4a5cd19 100644
--- a/include/linux/overflow.h
+++ b/include/linux/overflow.h
@@ -309,4 +309,39 @@ static inline size_t __must_check size_sub(size_t minuend, size_t subtrahend)
#define struct_size_t(type, member, count) \
struct_size((type *)NULL, member, count)
+/**
+ * _DEFINE_FLEX() - helper macro for DEFINE_FLEX() family.
+ * Enables caller macro to pass (different) initializer.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ * @initializer: initializer expression (could be empty for no init).
+ */
+#define _DEFINE_FLEX(type, name, member, count, initializer) \
+ _Static_assert(__builtin_constant_p(count), \
+ "onstack flex array members require compile-time const count"); \
+ union { \
+ u8 bytes[struct_size_t(type, member, count)]; \
+ type obj; \
+ } name##_u initializer; \
+ type *name = (type *)&name##_u
+
+/**
+ * DEFINE_FLEX() - Define an on-stack instance of structure with a trailing
+ * flexible array member.
+ *
+ * @type: structure type name, including "struct" keyword.
+ * @name: Name for a variable to define.
+ * @member: Name of the array member.
+ * @count: Number of elements in the array; must be compile-time const.
+ *
+ * Define a zeroed, on-stack, instance of @type structure with a trailing
+ * flexible array member.
+ * Use __struct_size(@name) to get compile-time size of it afterwards.
+ */
+#define DEFINE_FLEX(type, name, member, count) \
+ _DEFINE_FLEX(type, name, member, count, = {})
+
#endif /* __LINUX_OVERFLOW_H */
diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 5c02720c53a5..a88e64acebfe 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -693,6 +693,25 @@ TESTPAGEFLAG_FALSE(Ksm, ksm)
u64 stable_page_flags(struct page *page);
/**
+ * folio_xor_flags_has_waiters - Change some folio flags.
+ * @folio: The folio.
+ * @mask: Bits set in this word will be changed.
+ *
+ * This must only be used for flags which are changed with the folio
+ * lock held. For example, it is unsafe to use for PG_dirty as that
+ * can be set without the folio lock held. It can also only be used
+ * on flags which are in the range 0-6 as some of the implementations
+ * only affect those bits.
+ *
+ * Return: Whether there are tasks waiting on the folio.
+ */
+static inline bool folio_xor_flags_has_waiters(struct folio *folio,
+ unsigned long mask)
+{
+ return xor_unlock_is_negative_byte(mask, folio_flags(folio, 0));
+}
+
+/**
* folio_test_uptodate - Is this folio up to date?
* @folio: The folio.
*
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 351c3b7f93a1..bcc1ea44b4e8 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -789,9 +789,6 @@ static inline pgoff_t folio_next_index(struct folio *folio)
*/
static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (folio_test_hugetlb(folio))
- return &folio->page;
return folio_page(folio, index & (folio_nr_pages(folio) - 1));
}
@@ -807,9 +804,6 @@ static inline struct page *folio_file_page(struct folio *folio, pgoff_t index)
*/
static inline bool folio_contains(struct folio *folio, pgoff_t index)
{
- /* HugeTLBfs indexes the page cache in units of hpage_size */
- if (folio_test_hugetlb(folio))
- return folio->index == index;
return index - folio_index(folio) < folio_nr_pages(folio);
}
@@ -867,10 +861,9 @@ static inline struct folio *read_mapping_folio(struct address_space *mapping,
}
/*
- * Get index of the page within radix-tree (but not for hugetlb pages).
- * (TODO: remove once hugetlb pages will have ->index in PAGE_SIZE)
+ * Get the offset in PAGE_SIZE (even for hugetlb pages).
*/
-static inline pgoff_t page_to_index(struct page *page)
+static inline pgoff_t page_to_pgoff(struct page *page)
{
struct page *head;
@@ -885,19 +878,6 @@ static inline pgoff_t page_to_index(struct page *page)
return head->index + page - head;
}
-extern pgoff_t hugetlb_basepage_index(struct page *page);
-
-/*
- * Get the offset in PAGE_SIZE (even for hugetlb pages).
- * (TODO: hugetlb pages should have ->index in PAGE_SIZE)
- */
-static inline pgoff_t page_to_pgoff(struct page *page)
-{
- if (unlikely(PageHuge(page)))
- return hugetlb_basepage_index(page);
- return page_to_index(page);
-}
-
/*
* Return byte-offset into filesystem object for page.
*/
@@ -934,24 +914,16 @@ static inline loff_t folio_file_pos(struct folio *folio)
/*
* Get the offset in PAGE_SIZE (even for hugetlb folios).
- * (TODO: hugetlb folios should have ->index in PAGE_SIZE)
*/
static inline pgoff_t folio_pgoff(struct folio *folio)
{
- if (unlikely(folio_test_hugetlb(folio)))
- return hugetlb_basepage_index(&folio->page);
return folio->index;
}
-extern pgoff_t linear_hugepage_index(struct vm_area_struct *vma,
- unsigned long address);
-
static inline pgoff_t linear_page_index(struct vm_area_struct *vma,
unsigned long address)
{
pgoff_t pgoff;
- if (unlikely(is_vm_hugetlb_page(vma)))
- return linear_hugepage_index(vma, address);
pgoff = (address - vma->vm_start) >> PAGE_SHIFT;
pgoff += vma->vm_pgoff;
return pgoff;
@@ -1129,6 +1101,7 @@ static inline void wait_on_page_locked(struct page *page)
folio_wait_locked(page_folio(page));
}
+void folio_end_read(struct folio *folio, bool success);
void wait_on_page_writeback(struct page *page);
void folio_wait_writeback(struct folio *folio);
int folio_wait_writeback_killable(struct folio *folio);
diff --git a/include/linux/parport.h b/include/linux/parport.h
index 999eddd619b7..fff39bc30629 100644
--- a/include/linux/parport.h
+++ b/include/linux/parport.h
@@ -180,8 +180,6 @@ struct ieee1284_info {
struct semaphore irq;
};
-#define PARPORT_NAME_MAX_LEN 15
-
/* A parallel port */
struct parport {
unsigned long base; /* base address */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index 8c7c2c3c6c65..60ca768bc867 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -713,6 +713,30 @@ static inline bool pci_is_bridge(struct pci_dev *dev)
dev->hdr_type == PCI_HEADER_TYPE_CARDBUS;
}
+/**
+ * pci_is_vga - check if the PCI device is a VGA device
+ *
+ * The PCI Code and ID Assignment spec, r1.15, secs 1.4 and 1.1, define
+ * VGA Base Class and Sub-Classes:
+ *
+ * 03 00 PCI_CLASS_DISPLAY_VGA VGA-compatible or 8514-compatible
+ * 00 01 PCI_CLASS_NOT_DEFINED_VGA VGA-compatible (before Class Code)
+ *
+ * Return true if the PCI device is a VGA device and uses the legacy VGA
+ * resources ([mem 0xa0000-0xbffff], [io 0x3b0-0x3bb], [io 0x3c0-0x3df] and
+ * aliases).
+ */
+static inline bool pci_is_vga(struct pci_dev *pdev)
+{
+ if ((pdev->class >> 8) == PCI_CLASS_DISPLAY_VGA)
+ return true;
+
+ if ((pdev->class >> 8) == PCI_CLASS_NOT_DEFINED_VGA)
+ return true;
+
+ return false;
+}
+
#define for_each_pci_bridge(dev, bus) \
list_for_each_entry(dev, &bus->devices, bus_list) \
if (!pci_is_bridge(dev)) {} else
@@ -1181,6 +1205,8 @@ struct pci_dev *pci_get_slot(struct pci_bus *bus, unsigned int devfn);
struct pci_dev *pci_get_domain_bus_and_slot(int domain, unsigned int bus,
unsigned int devfn);
struct pci_dev *pci_get_class(unsigned int class, struct pci_dev *from);
+struct pci_dev *pci_get_base_class(unsigned int class, struct pci_dev *from);
+
int pci_dev_present(const struct pci_device_id *ids);
int pci_bus_read_config_byte(struct pci_bus *bus, unsigned int devfn,
@@ -1624,6 +1650,8 @@ struct msix_entry {
u16 entry; /* Driver uses to specify entry, OS writes */
};
+struct msi_domain_template;
+
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
@@ -1656,6 +1684,11 @@ void pci_msix_free_irq(struct pci_dev *pdev, struct msi_map map);
void pci_free_irq_vectors(struct pci_dev *dev);
int pci_irq_vector(struct pci_dev *dev, unsigned int nr);
const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev, int vec);
+bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
+ unsigned int hwsize, void *data);
+struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc);
+void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
@@ -1719,6 +1752,25 @@ static inline const struct cpumask *pci_irq_get_affinity(struct pci_dev *pdev,
{
return cpu_possible_mask;
}
+
+static inline bool pci_create_ims_domain(struct pci_dev *pdev,
+ const struct msi_domain_template *template,
+ unsigned int hwsize, void *data)
+{ return false; }
+
+static inline struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev,
+ union msi_instance_cookie *icookie,
+ const struct irq_affinity_desc *affdesc)
+{
+ struct msi_map map = { .index = -ENOSYS, };
+
+ return map;
+}
+
+static inline void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map)
+{
+}
+
#endif
/**
@@ -1924,6 +1976,9 @@ static inline struct pci_dev *pci_get_class(unsigned int class,
struct pci_dev *from)
{ return NULL; }
+static inline struct pci_dev *pci_get_base_class(unsigned int class,
+ struct pci_dev *from)
+{ return NULL; }
static inline int pci_dev_present(const struct pci_device_id *ids)
{ return 0; }
@@ -2616,14 +2671,6 @@ static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
void pci_uevent_ers(struct pci_dev *pdev, enum pci_ers_result err_type);
#endif
-struct msi_domain_template;
-
-bool pci_create_ims_domain(struct pci_dev *pdev, const struct msi_domain_template *template,
- unsigned int hwsize, void *data);
-struct msi_map pci_ims_alloc_irq(struct pci_dev *pdev, union msi_instance_cookie *icookie,
- const struct irq_affinity_desc *affdesc);
-void pci_ims_free_irq(struct pci_dev *pdev, struct msi_map map);
-
#include <linux/dma-mapping.h>
#define pci_printk(level, pdev, fmt, arg...) \
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index 5fb3d4c393a9..275799b5f535 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -180,6 +180,8 @@
#define PCI_DEVICE_ID_BERKOM_A4T 0xffa4
#define PCI_DEVICE_ID_BERKOM_SCITEL_QUADRO 0xffa8
+#define PCI_VENDOR_ID_ITTIM 0x0b48
+
#define PCI_VENDOR_ID_COMPAQ 0x0e11
#define PCI_DEVICE_ID_COMPAQ_TOKENRING 0x0508
#define PCI_DEVICE_ID_COMPAQ_TACHYON 0xa0fc
@@ -579,6 +581,8 @@
#define PCI_DEVICE_ID_AMD_1AH_M00H_DF_F3 0x12c3
#define PCI_DEVICE_ID_AMD_1AH_M20H_DF_F3 0x16fb
#define PCI_DEVICE_ID_AMD_MI200_DF_F3 0x14d3
+#define PCI_DEVICE_ID_AMD_MI300_DF_F3 0x152b
+#define PCI_DEVICE_ID_AMD_VANGOGH_USB 0x163a
#define PCI_DEVICE_ID_AMD_CNB17H_F3 0x1703
#define PCI_DEVICE_ID_AMD_LANCE 0x2000
#define PCI_DEVICE_ID_AMD_LANCE_HOME 0x2001
diff --git a/include/linux/pds/pds_core_if.h b/include/linux/pds/pds_core_if.h
index e838a2b90440..17a87c1a55d7 100644
--- a/include/linux/pds/pds_core_if.h
+++ b/include/linux/pds/pds_core_if.h
@@ -79,6 +79,7 @@ enum pds_core_status_code {
PDS_RC_EVFID = 31, /* VF ID does not exist */
PDS_RC_BAD_FW = 32, /* FW file is invalid or corrupted */
PDS_RC_ECLIENT = 33, /* No such client id */
+ PDS_RC_BAD_PCI = 255, /* Broken PCI when reading status */
};
/**
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 68fac2e7cbe6..8c677f185901 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -132,6 +132,7 @@ extern void __init setup_per_cpu_areas(void);
extern void __percpu *__alloc_percpu_gfp(size_t size, size_t align, gfp_t gfp) __alloc_size(1);
extern void __percpu *__alloc_percpu(size_t size, size_t align) __alloc_size(1);
extern void free_percpu(void __percpu *__pdata);
+extern size_t pcpu_alloc_size(void __percpu *__pdata);
DEFINE_FREE(free_percpu, void __percpu *, free_percpu(_T))
diff --git a/include/linux/percpu_counter.h b/include/linux/percpu_counter.h
index d01351b1526f..3a44dd1e33d2 100644
--- a/include/linux/percpu_counter.h
+++ b/include/linux/percpu_counter.h
@@ -57,6 +57,8 @@ void percpu_counter_add_batch(struct percpu_counter *fbc, s64 amount,
s32 batch);
s64 __percpu_counter_sum(struct percpu_counter *fbc);
int __percpu_counter_compare(struct percpu_counter *fbc, s64 rhs, s32 batch);
+bool __percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit,
+ s64 amount, s32 batch);
void percpu_counter_sync(struct percpu_counter *fbc);
static inline int percpu_counter_compare(struct percpu_counter *fbc, s64 rhs)
@@ -69,6 +71,13 @@ static inline void percpu_counter_add(struct percpu_counter *fbc, s64 amount)
percpu_counter_add_batch(fbc, amount, percpu_counter_batch);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ return __percpu_counter_limited_add(fbc, limit, amount,
+ percpu_counter_batch);
+}
+
/*
* With percpu_counter_add_local() and percpu_counter_sub_local(), counts
* are accumulated in local per cpu counter and not in fbc->count until
@@ -185,6 +194,27 @@ percpu_counter_add(struct percpu_counter *fbc, s64 amount)
local_irq_restore(flags);
}
+static inline bool
+percpu_counter_limited_add(struct percpu_counter *fbc, s64 limit, s64 amount)
+{
+ unsigned long flags;
+ bool good = false;
+ s64 count;
+
+ if (amount == 0)
+ return true;
+
+ local_irq_save(flags);
+ count = fbc->count + amount;
+ if ((amount > 0 && count <= limit) ||
+ (amount < 0 && count >= limit)) {
+ fbc->count = count;
+ good = true;
+ }
+ local_irq_restore(flags);
+ return good;
+}
+
/* non-SMP percpu_counter_add_local is the same with percpu_counter_add */
static inline void
percpu_counter_add_local(struct percpu_counter *fbc, s64 amount)
diff --git a/include/linux/perf/arm_pmuv3.h b/include/linux/perf/arm_pmuv3.h
index e3899bd77f5c..9c226adf938a 100644
--- a/include/linux/perf/arm_pmuv3.h
+++ b/include/linux/perf/arm_pmuv3.h
@@ -234,9 +234,12 @@
/*
* Event filters for PMUv3
*/
-#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
-#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
-#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL1 (1U << 31)
+#define ARMV8_PMU_EXCLUDE_EL0 (1U << 30)
+#define ARMV8_PMU_EXCLUDE_NS_EL1 (1U << 29)
+#define ARMV8_PMU_EXCLUDE_NS_EL0 (1U << 28)
+#define ARMV8_PMU_INCLUDE_EL2 (1U << 27)
+#define ARMV8_PMU_EXCLUDE_EL3 (1U << 26)
/*
* PMUSERENR: user enable reg
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index e85cd1c0eaf3..afb028c54f33 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -704,6 +704,7 @@ struct perf_event {
/* The cumulative AND of all event_caps for events in this group. */
int group_caps;
+ unsigned int group_generation;
struct perf_event *group_leader;
/*
* event->pmu will always point to pmu in which this event belongs.
@@ -878,6 +879,7 @@ struct perf_event_pmu_context {
unsigned int embedded : 1;
unsigned int nr_events;
+ unsigned int nr_cgroups;
atomic_t refcount; /* event <-> epc */
struct rcu_head rcu_head;
@@ -1573,7 +1575,7 @@ extern int sysctl_perf_cpu_time_max_percent;
extern void perf_sample_event_took(u64 sample_len_ns);
-int perf_proc_update_handler(struct ctl_table *table, int write,
+int perf_event_max_sample_rate_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
int perf_cpu_time_max_percent_handler(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos);
diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h
index 1fba072b3dac..af7639c3b0a3 100644
--- a/include/linux/pgtable.h
+++ b/include/linux/pgtable.h
@@ -206,6 +206,14 @@ static inline int pmd_young(pmd_t pmd)
#endif
#ifndef set_ptes
+
+#ifndef pte_next_pfn
+static inline pte_t pte_next_pfn(pte_t pte)
+{
+ return __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+}
+#endif
+
/**
* set_ptes - Map consecutive pages to a contiguous range of addresses.
* @mm: Address space to map the pages into.
@@ -231,7 +239,7 @@ static inline void set_ptes(struct mm_struct *mm, unsigned long addr,
if (--nr == 0)
break;
ptep++;
- pte = __pte(pte_val(pte) + (1UL << PFN_PTE_SHIFT));
+ pte = pte_next_pfn(pte);
}
arch_leave_lazy_mmu_mode();
}
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 1351b802ffcf..3cc52826f18e 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -1736,6 +1736,7 @@ void phy_detach(struct phy_device *phydev);
void phy_start(struct phy_device *phydev);
void phy_stop(struct phy_device *phydev);
int phy_config_aneg(struct phy_device *phydev);
+int _phy_start_aneg(struct phy_device *phydev);
int phy_start_aneg(struct phy_device *phydev);
int phy_aneg_done(struct phy_device *phydev);
int phy_speed_down(struct phy_device *phydev, bool sync);
diff --git a/include/linux/phylink.h b/include/linux/phylink.h
index 2b886ea654bb..875439ab45de 100644
--- a/include/linux/phylink.h
+++ b/include/linux/phylink.h
@@ -227,7 +227,7 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
/**
* struct phylink_mac_ops - MAC operations structure.
- * @validate: Validate and update the link configuration.
+ * @mac_get_caps: Get MAC capabilities for interface mode.
* @mac_select_pcs: Select a PCS for the interface mode.
* @mac_prepare: prepare for a major reconfiguration of the interface.
* @mac_config: configure the MAC for the selected mode and state.
@@ -238,9 +238,8 @@ void phylink_limit_mac_speed(struct phylink_config *config, u32 max_speed);
* The individual methods are described more fully below.
*/
struct phylink_mac_ops {
- void (*validate)(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
+ unsigned long (*mac_get_caps)(struct phylink_config *config,
+ phy_interface_t interface);
struct phylink_pcs *(*mac_select_pcs)(struct phylink_config *config,
phy_interface_t interface);
int (*mac_prepare)(struct phylink_config *config, unsigned int mode,
@@ -259,39 +258,17 @@ struct phylink_mac_ops {
#if 0 /* For kernel-doc purposes only. */
/**
- * validate - Validate and update the link configuration
+ * mac_get_caps: Get MAC capabilities for interface mode.
* @config: a pointer to a &struct phylink_config.
- * @supported: ethtool bitmask for supported link modes.
- * @state: a pointer to a &struct phylink_link_state.
- *
- * Clear bits in the @supported and @state->advertising masks that
- * are not supportable by the MAC.
- *
- * Note that the PHY may be able to transform from one connection
- * technology to another, so, eg, don't clear 1000BaseX just
- * because the MAC is unable to BaseX mode. This is more about
- * clearing unsupported speeds and duplex settings. The port modes
- * should not be cleared; phylink_set_port_modes() will help with this.
- *
- * When @config->supported_interfaces has been set, phylink will iterate
- * over the supported interfaces to determine the full capability of the
- * MAC. The validation function must not print errors if @state->interface
- * is set to an unexpected value.
+ * @interface: PHY interface mode.
*
- * When @config->supported_interfaces is empty, phylink will call this
- * function with @state->interface set to %PHY_INTERFACE_MODE_NA, and
- * expects the MAC driver to return all supported link modes.
- *
- * If the @state->interface mode is not supported, then the @supported
- * mask must be cleared.
- *
- * This member is optional; if not set, the generic validator will be
- * used making use of @config->mac_capabilities and
- * @config->supported_interfaces to determine which link modes are
- * supported.
+ * Optional method. When not provided, config->mac_capabilities will be used.
+ * When implemented, this returns the MAC capabilities for the specified
+ * interface mode where there is some special handling required by the MAC
+ * driver (e.g. not supporting half-duplex in certain interface modes.)
*/
-void validate(struct phylink_config *config, unsigned long *supported,
- struct phylink_link_state *state);
+unsigned long mac_get_caps(struct phylink_config *config,
+ phy_interface_t interface);
/**
* mac_select_pcs: Select a PCS for the interface mode.
* @config: a pointer to a &struct phylink_config.
@@ -636,17 +613,6 @@ void pcs_link_up(struct phylink_pcs *pcs, unsigned int neg_mode,
phy_interface_t interface, int speed, int duplex);
#endif
-void phylink_caps_to_linkmodes(unsigned long *linkmodes, unsigned long caps);
-unsigned long phylink_get_capabilities(phy_interface_t interface,
- unsigned long mac_capabilities,
- int rate_matching);
-void phylink_validate_mask_caps(unsigned long *supported,
- struct phylink_link_state *state,
- unsigned long caps);
-void phylink_generic_validate(struct phylink_config *config,
- unsigned long *supported,
- struct phylink_link_state *state);
-
struct phylink *phylink_create(struct phylink_config *,
const struct fwnode_handle *,
phy_interface_t,
diff --git a/include/linux/pinctrl/machine.h b/include/linux/pinctrl/machine.h
index 0639b36f43c5..ee8803f6ad07 100644
--- a/include/linux/pinctrl/machine.h
+++ b/include/linux/pinctrl/machine.h
@@ -11,7 +11,7 @@
#ifndef __LINUX_PINCTRL_MACHINE_H
#define __LINUX_PINCTRL_MACHINE_H
-#include <linux/kernel.h> /* ARRAY_SIZE() */
+#include <linux/array_size.h>
#include <linux/pinctrl/pinctrl-state.h>
diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h
index 608a9eb86bff..8ff23bf5a819 100644
--- a/include/linux/pipe_fs_i.h
+++ b/include/linux/pipe_fs_i.h
@@ -62,9 +62,6 @@ struct pipe_inode_info {
unsigned int tail;
unsigned int max_usage;
unsigned int ring_size;
-#ifdef CONFIG_WATCH_QUEUE
- bool note_loss;
-#endif
unsigned int nr_accounted;
unsigned int readers;
unsigned int writers;
@@ -72,6 +69,9 @@ struct pipe_inode_info {
unsigned int r_counter;
unsigned int w_counter;
bool poll_usage;
+#ifdef CONFIG_WATCH_QUEUE
+ bool note_loss;
+#endif
struct page *tmp_page;
struct fasync_struct *fasync_readers;
struct fasync_struct *fasync_writers;
@@ -125,6 +125,22 @@ struct pipe_buf_operations {
};
/**
+ * pipe_has_watch_queue - Check whether the pipe is a watch_queue,
+ * i.e. it was created with O_NOTIFICATION_PIPE
+ * @pipe: The pipe to check
+ *
+ * Return: true if pipe is a watch queue, false otherwise.
+ */
+static inline bool pipe_has_watch_queue(const struct pipe_inode_info *pipe)
+{
+#ifdef CONFIG_WATCH_QUEUE
+ return pipe->watch_queue != NULL;
+#else
+ return false;
+#endif
+}
+
+/**
* pipe_empty - Return true if the pipe is empty
* @head: The pipe ring head pointer
* @tail: The pipe ring tail pointer
diff --git a/include/linux/pktcdvd.h b/include/linux/pktcdvd.h
index 80cb00db42a4..79594aeb160d 100644
--- a/include/linux/pktcdvd.h
+++ b/include/linux/pktcdvd.h
@@ -154,7 +154,9 @@ struct packet_stacked_data
struct pktcdvd_device
{
- struct block_device *bdev; /* dev attached */
+ struct bdev_handle *bdev_handle; /* dev attached */
+ /* handle acquired for bdev during pkt_open_dev() */
+ struct bdev_handle *open_bdev_handle;
dev_t pkt_dev; /* our dev */
struct packet_settings settings;
struct packet_stats stats;
diff --git a/include/linux/platform_data/cros_ec_proto.h b/include/linux/platform_data/cros_ec_proto.h
index 4f9f756bc17c..8865e350c12a 100644
--- a/include/linux/platform_data/cros_ec_proto.h
+++ b/include/linux/platform_data/cros_ec_proto.h
@@ -258,7 +258,7 @@ bool cros_ec_check_features(struct cros_ec_dev *ec, int feature);
int cros_ec_get_sensor_count(struct cros_ec_dev *ec);
-int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, void *outdata,
+int cros_ec_cmd(struct cros_ec_device *ec_dev, unsigned int version, int command, const void *outdata,
size_t outsize, void *indata, size_t insize);
/**
diff --git a/include/linux/platform_data/gpio-omap.h b/include/linux/platform_data/gpio-omap.h
index f377817ce75c..cdd8cfb424f5 100644
--- a/include/linux/platform_data/gpio-omap.h
+++ b/include/linux/platform_data/gpio-omap.h
@@ -144,9 +144,6 @@
#define OMAP_MAX_GPIO_LINES 192
-#define OMAP_MPUIO(nr) (OMAP_MAX_GPIO_LINES + (nr))
-#define OMAP_GPIO_IS_MPUIO(nr) ((nr) >= OMAP_MAX_GPIO_LINES)
-
#ifndef __ASSEMBLER__
struct omap_gpio_reg_offs {
u16 revision;
diff --git a/include/linux/platform_data/gsc_hwmon.h b/include/linux/platform_data/gsc_hwmon.h
index f2781aa7eff8..70e8a6bec0f6 100644
--- a/include/linux/platform_data/gsc_hwmon.h
+++ b/include/linux/platform_data/gsc_hwmon.h
@@ -40,6 +40,6 @@ struct gsc_hwmon_platform_data {
unsigned int resolution;
unsigned int vreference;
unsigned int fan_base;
- struct gsc_hwmon_channel channels[];
+ struct gsc_hwmon_channel channels[] __counted_by(nchannels);
};
#endif
diff --git a/include/linux/platform_data/omap-twl4030.h b/include/linux/platform_data/omap-twl4030.h
index 0dd851ea1c72..7fcb55fe21c9 100644
--- a/include/linux/platform_data/omap-twl4030.h
+++ b/include/linux/platform_data/omap-twl4030.h
@@ -37,9 +37,6 @@ struct omap_tw4030_pdata {
bool has_digimic0;
bool has_digimic1;
u8 has_linein;
-
- /* Jack detect GPIO or <= 0 if it is not implemented */
- int jack_detect;
};
#endif /* _OMAP_TWL4030_H_ */
diff --git a/include/linux/platform_data/pca953x.h b/include/linux/platform_data/pca953x.h
index 96c1a14ab365..3c3787c4d96c 100644
--- a/include/linux/platform_data/pca953x.h
+++ b/include/linux/platform_data/pca953x.h
@@ -11,21 +11,8 @@ struct pca953x_platform_data {
/* number of the first GPIO */
unsigned gpio_base;
- /* initial polarity inversion setting */
- u32 invert;
-
/* interrupt base */
int irq_base;
-
- void *context; /* param to setup/teardown */
-
- int (*setup)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- void (*teardown)(struct i2c_client *client,
- unsigned gpio, unsigned ngpio,
- void *context);
- const char *const *names;
};
#endif /* _LINUX_PCA953X_H */
diff --git a/include/linux/platform_data/x86/asus-wmi.h b/include/linux/platform_data/x86/asus-wmi.h
index 16e99a1c37fc..63e630276499 100644
--- a/include/linux/platform_data/x86/asus-wmi.h
+++ b/include/linux/platform_data/x86/asus-wmi.h
@@ -58,6 +58,10 @@
#define ASUS_WMI_DEVID_KBD_BACKLIGHT 0x00050021
#define ASUS_WMI_DEVID_LIGHT_SENSOR 0x00050022 /* ?? */
#define ASUS_WMI_DEVID_LIGHTBAR 0x00050025
+/* This can only be used to disable the screen, not re-enable */
+#define ASUS_WMI_DEVID_SCREENPAD_POWER 0x00050031
+/* Writing a brightness re-enables the screen if disabled */
+#define ASUS_WMI_DEVID_SCREENPAD_LIGHT 0x00050032
#define ASUS_WMI_DEVID_FAN_BOOST_MODE 0x00110018
#define ASUS_WMI_DEVID_THROTTLE_THERMAL_POLICY 0x00120075
diff --git a/include/linux/pm.h b/include/linux/pm.h
index 1400c37b29c7..92a4f69de0e8 100644
--- a/include/linux/pm.h
+++ b/include/linux/pm.h
@@ -374,24 +374,39 @@ const struct dev_pm_ops name = { \
RUNTIME_PM_OPS(runtime_suspend_fn, runtime_resume_fn, idle_fn) \
}
-#ifdef CONFIG_PM
-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
+#define _EXPORT_PM_OPS(name, license, ns) \
const struct dev_pm_ops name; \
__EXPORT_SYMBOL(name, license, ns); \
const struct dev_pm_ops name
-#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
-#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
-#else
-#define _EXPORT_DEV_PM_OPS(name, license, ns) \
+
+#define _DISCARD_PM_OPS(name, license, ns) \
static __maybe_unused const struct dev_pm_ops __static_##name
+
+#ifdef CONFIG_PM
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#define EXPORT_PM_FN_GPL(name) EXPORT_SYMBOL_GPL(name)
+#define EXPORT_PM_FN_NS_GPL(name, ns) EXPORT_SYMBOL_NS_GPL(name, ns)
+#else
+#define _EXPORT_DEV_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
#define EXPORT_PM_FN_GPL(name)
#define EXPORT_PM_FN_NS_GPL(name, ns)
#endif
-#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
-#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
-#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
-#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
+#ifdef CONFIG_PM_SLEEP
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _EXPORT_PM_OPS(name, license, ns)
+#else
+#define _EXPORT_DEV_SLEEP_PM_OPS(name, license, ns) _DISCARD_PM_OPS(name, license, ns)
+#endif
+
+#define EXPORT_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_PM_OPS(name) _EXPORT_DEV_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_PM_OPS(name, ns) _EXPORT_DEV_PM_OPS(name, "GPL", #ns)
+
+#define EXPORT_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "", "")
+#define EXPORT_GPL_DEV_SLEEP_PM_OPS(name) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", "")
+#define EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "", #ns)
+#define EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) _EXPORT_DEV_SLEEP_PM_OPS(name, "GPL", #ns)
/*
* Use this if you want to use the same suspend and resume callbacks for suspend
@@ -404,19 +419,19 @@ const struct dev_pm_ops name = { \
_DEFINE_DEV_PM_OPS(name, suspend_fn, resume_fn, NULL, NULL, NULL)
#define EXPORT_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- EXPORT_DEV_PM_OPS(name) = { \
+ EXPORT_DEV_SLEEP_PM_OPS(name) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn) \
- EXPORT_GPL_DEV_PM_OPS(name) = { \
+ EXPORT_GPL_DEV_SLEEP_PM_OPS(name) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_NS_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- EXPORT_NS_DEV_PM_OPS(name, ns) = { \
+ EXPORT_NS_DEV_SLEEP_PM_OPS(name, ns) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
#define EXPORT_NS_GPL_SIMPLE_DEV_PM_OPS(name, suspend_fn, resume_fn, ns) \
- EXPORT_NS_GPL_DEV_PM_OPS(name, ns) = { \
+ EXPORT_NS_GPL_DEV_SLEEP_PM_OPS(name, ns) = { \
SYSTEM_SLEEP_PM_OPS(suspend_fn, resume_fn) \
}
@@ -719,6 +734,7 @@ extern void dev_pm_put_subsys_data(struct device *dev);
* @activate: Called before executing probe routines for bus types and drivers.
* @sync: Called after successful driver probe.
* @dismiss: Called after unsuccessful driver probe and after driver removal.
+ * @set_performance_state: Called to request a new performance state.
*
* Power domains provide callbacks that are executed during system suspend,
* hibernation, system resume and during runtime PM transitions instead of
@@ -731,6 +747,7 @@ struct dev_pm_domain {
int (*activate)(struct device *dev);
void (*sync)(struct device *dev);
void (*dismiss)(struct device *dev);
+ int (*set_performance_state)(struct device *dev, unsigned int state);
};
/*
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index f776fb93eaa0..34663d0d5c55 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -61,6 +61,10 @@
* GENPD_FLAG_MIN_RESIDENCY: Enable the genpd governor to consider its
* components' next wakeup when determining the
* optimal idle state.
+ *
+ * GENPD_FLAG_OPP_TABLE_FW: The genpd provider supports performance states,
+ * but its corresponding OPP tables are not
+ * described in DT, but are given directly by FW.
*/
#define GENPD_FLAG_PM_CLK (1U << 0)
#define GENPD_FLAG_IRQ_SAFE (1U << 1)
@@ -69,6 +73,7 @@
#define GENPD_FLAG_CPU_DOMAIN (1U << 4)
#define GENPD_FLAG_RPM_ALWAYS_ON (1U << 5)
#define GENPD_FLAG_MIN_RESIDENCY (1U << 6)
+#define GENPD_FLAG_OPP_TABLE_FW (1U << 7)
enum gpd_status {
GENPD_STATE_ON = 0, /* PM domain is on */
@@ -430,6 +435,7 @@ struct device *dev_pm_domain_attach_by_name(struct device *dev,
void dev_pm_domain_detach(struct device *dev, bool power_off);
int dev_pm_domain_start(struct device *dev);
void dev_pm_domain_set(struct device *dev, struct dev_pm_domain *pd);
+int dev_pm_domain_set_performance_state(struct device *dev, unsigned int state);
#else
static inline int dev_pm_domain_attach(struct device *dev, bool power_on)
{
@@ -452,6 +458,11 @@ static inline int dev_pm_domain_start(struct device *dev)
}
static inline void dev_pm_domain_set(struct device *dev,
struct dev_pm_domain *pd) {}
+static inline int dev_pm_domain_set_performance_state(struct device *dev,
+ unsigned int state)
+{
+ return 0;
+}
#endif
#endif /* _LINUX_PM_DOMAIN_H */
diff --git a/include/linux/pm_opp.h b/include/linux/pm_opp.h
index 91f87d7e807c..ccd97bcef269 100644
--- a/include/linux/pm_opp.h
+++ b/include/linux/pm_opp.h
@@ -92,6 +92,18 @@ struct dev_pm_opp_config {
struct device ***virt_devs;
};
+/**
+ * struct dev_pm_opp_data - The data to use to initialize an OPP.
+ * @level: The performance level for the OPP.
+ * @freq: The clock rate in Hz for the OPP.
+ * @u_volt: The voltage in uV for the OPP.
+ */
+struct dev_pm_opp_data {
+ unsigned int level;
+ unsigned long freq;
+ unsigned long u_volt;
+};
+
#if defined(CONFIG_PM_OPP)
struct opp_table *dev_pm_opp_get_opp_table(struct device *dev);
@@ -144,6 +156,9 @@ struct dev_pm_opp *dev_pm_opp_find_level_exact(struct device *dev,
struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
unsigned int *level);
+struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned long *level);
+
struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index);
@@ -152,8 +167,8 @@ struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
void dev_pm_opp_put(struct dev_pm_opp *opp);
-int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt);
+int dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp);
+
void dev_pm_opp_remove(struct device *dev, unsigned long freq);
void dev_pm_opp_remove_all_dynamic(struct device *dev);
@@ -308,6 +323,12 @@ static inline struct dev_pm_opp *dev_pm_opp_find_level_ceil(struct device *dev,
return ERR_PTR(-EOPNOTSUPP);
}
+static inline struct dev_pm_opp *dev_pm_opp_find_level_floor(struct device *dev,
+ unsigned long *level)
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
static inline struct dev_pm_opp *dev_pm_opp_find_bw_ceil(struct device *dev,
unsigned int *bw, int index)
{
@@ -322,8 +343,8 @@ static inline struct dev_pm_opp *dev_pm_opp_find_bw_floor(struct device *dev,
static inline void dev_pm_opp_put(struct dev_pm_opp *opp) {}
-static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
- unsigned long u_volt)
+static inline int
+dev_pm_opp_add_dynamic(struct device *dev, struct dev_pm_opp_data *opp)
{
return -EOPNOTSUPP;
}
@@ -519,6 +540,17 @@ static inline int dev_pm_opp_of_find_icc_paths(struct device *dev, struct opp_ta
/* OPP Configuration helpers */
+static inline int dev_pm_opp_add(struct device *dev, unsigned long freq,
+ unsigned long u_volt)
+{
+ struct dev_pm_opp_data data = {
+ .freq = freq,
+ .u_volt = u_volt,
+ };
+
+ return dev_pm_opp_add_dynamic(dev, &data);
+}
+
/* Regulators helpers */
static inline int dev_pm_opp_set_regulators(struct device *dev,
const char * const names[])
diff --git a/include/linux/pnp.h b/include/linux/pnp.h
index c2a7cfbca713..267fb8a4fb6e 100644
--- a/include/linux/pnp.h
+++ b/include/linux/pnp.h
@@ -291,7 +291,7 @@ static inline void pnp_set_drvdata(struct pnp_dev *pdev, void *data)
struct pnp_fixup {
char id[7];
- void (*quirk_function) (struct pnp_dev * dev); /* fixup function */
+ void (*quirk_function) (struct pnp_dev *dev); /* fixup function */
};
/* config parameters */
@@ -419,8 +419,8 @@ struct pnp_protocol {
/* protocol specific suspend/resume */
bool (*can_wakeup) (struct pnp_dev *dev);
- int (*suspend) (struct pnp_dev * dev, pm_message_t state);
- int (*resume) (struct pnp_dev * dev);
+ int (*suspend) (struct pnp_dev *dev, pm_message_t state);
+ int (*resume) (struct pnp_dev *dev);
/* used by pnp layer only (look but don't touch) */
unsigned char number; /* protocol number */
@@ -492,7 +492,7 @@ static inline int pnp_start_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_stop_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_activate_dev(struct pnp_dev *dev) { return -ENODEV; }
static inline int pnp_disable_dev(struct pnp_dev *dev) { return -ENODEV; }
-static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0;}
+static inline int pnp_range_reserved(resource_size_t start, resource_size_t end) { return 0; }
/* protocol helpers */
static inline int pnp_is_active(struct pnp_dev *dev) { return 0; }
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 468328b1e1dd..ef8619f48920 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -14,6 +14,7 @@
#include <linux/rwsem.h>
struct posix_clock;
+struct posix_clock_context;
/**
* struct posix_clock_operations - functional interface to the clock
@@ -50,18 +51,18 @@ struct posix_clock_operations {
/*
* Optional character device methods:
*/
- long (*ioctl) (struct posix_clock *pc,
- unsigned int cmd, unsigned long arg);
+ long (*ioctl)(struct posix_clock_context *pccontext, unsigned int cmd,
+ unsigned long arg);
- int (*open) (struct posix_clock *pc, fmode_t f_mode);
+ int (*open)(struct posix_clock_context *pccontext, fmode_t f_mode);
- __poll_t (*poll) (struct posix_clock *pc,
- struct file *file, poll_table *wait);
+ __poll_t (*poll)(struct posix_clock_context *pccontext, struct file *file,
+ poll_table *wait);
- int (*release) (struct posix_clock *pc);
+ int (*release)(struct posix_clock_context *pccontext);
- ssize_t (*read) (struct posix_clock *pc,
- uint flags, char __user *buf, size_t cnt);
+ ssize_t (*read)(struct posix_clock_context *pccontext, uint flags,
+ char __user *buf, size_t cnt);
};
/**
@@ -91,6 +92,24 @@ struct posix_clock {
};
/**
+ * struct posix_clock_context - represents clock file operations context
+ *
+ * @clk: Pointer to the clock
+ * @private_clkdata: Pointer to user data
+ *
+ * Drivers should use struct posix_clock_context during specific character
+ * device file operation methods to access the posix clock.
+ *
+ * Drivers can store a private data structure during the open operation
+ * if they have specific information that is required in other file
+ * operations.
+ */
+struct posix_clock_context {
+ struct posix_clock *clk;
+ void *private_clkdata;
+};
+
+/**
* posix_clock_register() - register a new clock
* @clk: Pointer to the clock. Caller must provide 'ops' field
* @dev: Pointer to the initialized device. Caller must provide
diff --git a/include/linux/power_supply.h b/include/linux/power_supply.h
index a427f13c757f..c0992a77feea 100644
--- a/include/linux/power_supply.h
+++ b/include/linux/power_supply.h
@@ -767,7 +767,6 @@ struct power_supply_battery_info {
int bti_resistance_tolerance;
};
-extern struct atomic_notifier_head power_supply_notifier;
extern int power_supply_reg_notifier(struct notifier_block *nb);
extern void power_supply_unreg_notifier(struct notifier_block *nb);
#if IS_ENABLED(CONFIG_POWER_SUPPLY)
diff --git a/include/linux/preempt.h b/include/linux/preempt.h
index 1424670df161..9aa6358a1a16 100644
--- a/include/linux/preempt.h
+++ b/include/linux/preempt.h
@@ -99,14 +99,21 @@ static __always_inline unsigned char interrupt_context_level(void)
return level;
}
+/*
+ * These macro definitions avoid redundant invocations of preempt_count()
+ * because such invocations would result in redundant loads given that
+ * preempt_count() is commonly implemented with READ_ONCE().
+ */
+
#define nmi_count() (preempt_count() & NMI_MASK)
#define hardirq_count() (preempt_count() & HARDIRQ_MASK)
#ifdef CONFIG_PREEMPT_RT
# define softirq_count() (current->softirq_disable_cnt & SOFTIRQ_MASK)
+# define irq_count() ((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | softirq_count())
#else
# define softirq_count() (preempt_count() & SOFTIRQ_MASK)
+# define irq_count() (preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_MASK))
#endif
-#define irq_count() (nmi_count() | hardirq_count() | softirq_count())
/*
* Macros to retrieve the current execution context:
@@ -119,7 +126,11 @@ static __always_inline unsigned char interrupt_context_level(void)
#define in_nmi() (nmi_count())
#define in_hardirq() (hardirq_count())
#define in_serving_softirq() (softirq_count() & SOFTIRQ_OFFSET)
-#define in_task() (!(in_nmi() | in_hardirq() | in_serving_softirq()))
+#ifdef CONFIG_PREEMPT_RT
+# define in_task() (!((preempt_count() & (NMI_MASK | HARDIRQ_MASK)) | in_serving_softirq()))
+#else
+# define in_task() (!(preempt_count() & (NMI_MASK | HARDIRQ_MASK | SOFTIRQ_OFFSET)))
+#endif
/*
* The following macros are deprecated and should not be used in new code:
diff --git a/include/linux/property.h b/include/linux/property.h
index 8c3c6685a2ae..9f2585d705a8 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -10,6 +10,7 @@
#ifndef _LINUX_PROPERTY_H_
#define _LINUX_PROPERTY_H_
+#include <linux/args.h>
#include <linux/bits.h>
#include <linux/fwnode.h>
#include <linux/stddef.h>
@@ -288,7 +289,7 @@ struct software_node_ref_args {
#define SOFTWARE_NODE_REFERENCE(_ref_, ...) \
(const struct software_node_ref_args) { \
.node = _ref_, \
- .nargs = ARRAY_SIZE(((u64[]){ 0, ##__VA_ARGS__ })) - 1, \
+ .nargs = COUNT_ARGS(__VA_ARGS__), \
.args = { __VA_ARGS__ }, \
}
diff --git a/include/linux/pseudo_fs.h b/include/linux/pseudo_fs.h
index eceda1d1407a..730f77381d55 100644
--- a/include/linux/pseudo_fs.h
+++ b/include/linux/pseudo_fs.h
@@ -5,7 +5,7 @@
struct pseudo_fs_context {
const struct super_operations *ops;
- const struct xattr_handler **xattr;
+ const struct xattr_handler * const *xattr;
const struct dentry_operations *dops;
unsigned long magic;
};
diff --git a/include/linux/quota.h b/include/linux/quota.h
index fd692b4a41d5..07071e64abf3 100644
--- a/include/linux/quota.h
+++ b/include/linux/quota.h
@@ -285,7 +285,9 @@ static inline void dqstats_dec(unsigned int type)
#define DQ_FAKE_B 3 /* no limits only usage */
#define DQ_READ_B 4 /* dquot was read into memory */
#define DQ_ACTIVE_B 5 /* dquot is active (dquot_release not called) */
-#define DQ_LASTSET_B 6 /* Following 6 bits (see QIF_) are reserved\
+#define DQ_RELEASING_B 6 /* dquot is in releasing_dquots list waiting
+ * to be cleaned up */
+#define DQ_LASTSET_B 7 /* Following 6 bits (see QIF_) are reserved\
* for the mask of entries set via SETQUOTA\
* quotactl. They are set under dq_data_lock\
* and the quota format handling dquot can\
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 11a4becff3a9..4fa4ef0a173a 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -57,7 +57,7 @@ static inline bool dquot_is_busy(struct dquot *dquot)
{
if (test_bit(DQ_MOD_B, &dquot->dq_flags))
return true;
- if (atomic_read(&dquot->dq_count) > 1)
+ if (atomic_read(&dquot->dq_count) > 0)
return true;
return false;
}
diff --git a/include/linux/raid/pq.h b/include/linux/raid/pq.h
index 006e18decfad..98030accf641 100644
--- a/include/linux/raid/pq.h
+++ b/include/linux/raid/pq.h
@@ -84,8 +84,6 @@ extern const struct raid6_calls raid6_intx1;
extern const struct raid6_calls raid6_intx2;
extern const struct raid6_calls raid6_intx4;
extern const struct raid6_calls raid6_intx8;
-extern const struct raid6_calls raid6_intx16;
-extern const struct raid6_calls raid6_intx32;
extern const struct raid6_calls raid6_mmxx1;
extern const struct raid6_calls raid6_mmxx2;
extern const struct raid6_calls raid6_sse1x1;
diff --git a/include/linux/rcu_notifier.h b/include/linux/rcu_notifier.h
new file mode 100644
index 000000000000..ebf371364581
--- /dev/null
+++ b/include/linux/rcu_notifier.h
@@ -0,0 +1,32 @@
+/* SPDX-License-Identifier: GPL-2.0+ */
+/*
+ * Read-Copy Update notifiers, initially RCU CPU stall notifier.
+ * Separate from rcupdate.h to avoid #include loops.
+ *
+ * Copyright (C) 2023 Paul E. McKenney.
+ */
+
+#ifndef __LINUX_RCU_NOTIFIER_H
+#define __LINUX_RCU_NOTIFIER_H
+
+// Actions for RCU CPU stall notifier calls.
+#define RCU_STALL_NOTIFY_NORM 1
+#define RCU_STALL_NOTIFY_EXP 2
+
+#ifdef CONFIG_RCU_STALL_COMMON
+
+#include <linux/notifier.h>
+#include <linux/types.h>
+
+int rcu_stall_chain_notifier_register(struct notifier_block *n);
+int rcu_stall_chain_notifier_unregister(struct notifier_block *n);
+
+#else // #ifdef CONFIG_RCU_STALL_COMMON
+
+// No RCU CPU stall warnings in Tiny RCU.
+static inline int rcu_stall_chain_notifier_register(struct notifier_block *n) { return -EEXIST; }
+static inline int rcu_stall_chain_notifier_unregister(struct notifier_block *n) { return -ENOENT; }
+
+#endif // #else // #ifdef CONFIG_RCU_STALL_COMMON
+
+#endif /* __LINUX_RCU_NOTIFIER_H */
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index 5e5f920ade90..f7206b2623c9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -122,8 +122,6 @@ static inline void call_rcu_hurry(struct rcu_head *head, rcu_callback_t func)
void rcu_init(void);
extern int rcu_scheduler_active;
void rcu_sched_clock_irq(int user);
-void rcu_report_dead(unsigned int cpu);
-void rcutree_migrate_callbacks(int cpu);
#ifdef CONFIG_TASKS_RCU_GENERIC
void rcu_init_tasks_generic(void);
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index 7b949292908a..d9ac7b136aea 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -171,6 +171,6 @@ static inline void rcu_all_qs(void) { barrier(); }
#define rcutree_offline_cpu NULL
#define rcutree_dead_cpu NULL
#define rcutree_dying_cpu NULL
-static inline void rcu_cpu_starting(unsigned int cpu) { }
+static inline void rcutree_report_cpu_starting(unsigned int cpu) { }
#endif /* __LINUX_RCUTINY_H */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 126f6b418f6a..254244202ea9 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -37,7 +37,6 @@ void synchronize_rcu_expedited(void);
void kvfree_call_rcu(struct rcu_head *head, void *ptr);
void rcu_barrier(void);
-bool rcu_eqs_special_set(int cpu);
void rcu_momentary_dyntick_idle(void);
void kfree_rcu_scheduler_running(void);
bool rcu_gp_might_be_stalled(void);
@@ -111,9 +110,21 @@ void rcu_all_qs(void);
/* RCUtree hotplug events */
int rcutree_prepare_cpu(unsigned int cpu);
int rcutree_online_cpu(unsigned int cpu);
-int rcutree_offline_cpu(unsigned int cpu);
+void rcutree_report_cpu_starting(unsigned int cpu);
+
+#ifdef CONFIG_HOTPLUG_CPU
int rcutree_dead_cpu(unsigned int cpu);
int rcutree_dying_cpu(unsigned int cpu);
-void rcu_cpu_starting(unsigned int cpu);
+int rcutree_offline_cpu(unsigned int cpu);
+#else
+#define rcutree_dead_cpu NULL
+#define rcutree_dying_cpu NULL
+#define rcutree_offline_cpu NULL
+#endif
+
+void rcutree_migrate_callbacks(int cpu);
+
+/* Called from hotplug and also arm64 early secondary boot failure */
+void rcutree_report_cpu_dead(void);
#endif /* __LINUX_RCUTREE_H */
diff --git a/include/linux/reboot.h b/include/linux/reboot.h
index 2b6bb593be5b..c4cc3b89ced1 100644
--- a/include/linux/reboot.h
+++ b/include/linux/reboot.h
@@ -129,11 +129,14 @@ enum sys_off_mode {
* @cb_data: User's callback data.
* @cmd: Command string. Currently used only by the sys-off restart mode,
* NULL otherwise.
+ * @dev: Device of the sys-off handler. Only if known (devm_register_*),
+ * NULL otherwise.
*/
struct sys_off_data {
int mode;
void *cb_data;
const char *cmd;
+ struct device *dev;
};
struct sys_off_handler *
diff --git a/include/linux/regulator/mt6358-regulator.h b/include/linux/regulator/mt6358-regulator.h
index c71a6a9fce7a..562386f9b80e 100644
--- a/include/linux/regulator/mt6358-regulator.h
+++ b/include/linux/regulator/mt6358-regulator.h
@@ -86,6 +86,9 @@ enum {
MT6366_ID_VMC,
MT6366_ID_VAUD28,
MT6366_ID_VSIM2,
+ MT6366_ID_VM18,
+ MT6366_ID_VMDDR,
+ MT6366_ID_VSRAM_CORE,
MT6366_ID_RG_MAX,
};
diff --git a/include/linux/resctrl.h b/include/linux/resctrl.h
index 8334eeacfec5..66942d7fba7f 100644
--- a/include/linux/resctrl.h
+++ b/include/linux/resctrl.h
@@ -94,7 +94,7 @@ struct rdt_domain {
* zero CBM.
* @shareable_bits: Bitmask of shareable resource with other
* executing entities
- * @arch_has_sparse_bitmaps: True if a bitmap like f00f is valid.
+ * @arch_has_sparse_bitmasks: True if a bitmask like f00f is valid.
* @arch_has_per_cpu_cfg: True if QOS_CFG register for this cache
* level has CPU scope.
*/
@@ -102,7 +102,7 @@ struct resctrl_cache {
unsigned int cbm_len;
unsigned int min_cbm_bits;
unsigned int shareable_bits;
- bool arch_has_sparse_bitmaps;
+ bool arch_has_sparse_bitmasks;
bool arch_has_per_cpu_cfg;
};
diff --git a/include/linux/resume_user_mode.h b/include/linux/resume_user_mode.h
index 285189454449..f8f3e958e9cf 100644
--- a/include/linux/resume_user_mode.h
+++ b/include/linux/resume_user_mode.h
@@ -55,7 +55,7 @@ static inline void resume_user_mode_work(struct pt_regs *regs)
}
#endif
- mem_cgroup_handle_over_high();
+ mem_cgroup_handle_over_high(GFP_KERNEL);
blkcg_maybe_throttle_current();
rseq_handle_notify_resume(NULL, regs);
diff --git a/include/linux/rethook.h b/include/linux/rethook.h
index 26b6f3c81a76..ce69b2b7bc35 100644
--- a/include/linux/rethook.h
+++ b/include/linux/rethook.h
@@ -6,11 +6,10 @@
#define _LINUX_RETHOOK_H
#include <linux/compiler.h>
-#include <linux/freelist.h>
+#include <linux/objpool.h>
#include <linux/kallsyms.h>
#include <linux/llist.h>
#include <linux/rcupdate.h>
-#include <linux/refcount.h>
struct rethook_node;
@@ -30,14 +29,12 @@ typedef void (*rethook_handler_t) (struct rethook_node *, void *, unsigned long,
struct rethook {
void *data;
rethook_handler_t handler;
- struct freelist_head pool;
- refcount_t ref;
+ struct objpool_head pool;
struct rcu_head rcu;
};
/**
* struct rethook_node - The rethook shadow-stack entry node.
- * @freelist: The freelist, linked to struct rethook::pool.
* @rcu: The rcu_head for deferred freeing.
* @llist: The llist, linked to a struct task_struct::rethooks.
* @rethook: The pointer to the struct rethook.
@@ -48,20 +45,16 @@ struct rethook {
* on each entry of the shadow stack.
*/
struct rethook_node {
- union {
- struct freelist_node freelist;
- struct rcu_head rcu;
- };
+ struct rcu_head rcu;
struct llist_node llist;
struct rethook *rethook;
unsigned long ret_addr;
unsigned long frame;
};
-struct rethook *rethook_alloc(void *data, rethook_handler_t handler);
+struct rethook *rethook_alloc(void *data, rethook_handler_t handler, int size, int num);
void rethook_stop(struct rethook *rh);
void rethook_free(struct rethook *rh);
-void rethook_add_node(struct rethook *rh, struct rethook_node *node);
struct rethook_node *rethook_try_get(struct rethook *rh);
void rethook_recycle(struct rethook_node *node);
void rethook_hook(struct rethook_node *node, struct pt_regs *regs, bool mcount);
@@ -98,4 +91,3 @@ void rethook_flush_task(struct task_struct *tk);
#endif
#endif
-
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 51cc21ebb568..b26fe858fd44 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -189,7 +189,7 @@ typedef int __bitwise rmap_t;
/*
* rmap interfaces called when adding or removing pte of page
*/
-void page_move_anon_rmap(struct page *, struct vm_area_struct *);
+void folio_move_anon_rmap(struct folio *, struct vm_area_struct *);
void page_add_anon_rmap(struct page *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void page_add_new_anon_rmap(struct page *, struct vm_area_struct *,
@@ -203,7 +203,7 @@ void folio_add_file_rmap_range(struct folio *, struct page *, unsigned int nr,
void page_remove_rmap(struct page *, struct vm_area_struct *,
bool compound);
-void hugepage_add_anon_rmap(struct page *, struct vm_area_struct *,
+void hugepage_add_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address, rmap_t flags);
void hugepage_add_new_anon_rmap(struct folio *, struct vm_area_struct *,
unsigned long address);
diff --git a/include/linux/rtc.h b/include/linux/rtc.h
index 4c0bcbeb1f00..5f8e438a0312 100644
--- a/include/linux/rtc.h
+++ b/include/linux/rtc.h
@@ -225,6 +225,23 @@ static inline bool is_leap_year(unsigned int year)
return (!(year % 4) && (year % 100)) || !(year % 400);
}
+/**
+ * rtc_bound_alarmtime() - Return alarm time bound by rtc limit
+ * @rtc: Pointer to rtc device structure
+ * @requested: Requested alarm timeout
+ *
+ * Return: Alarm timeout bound by maximum alarm time supported by rtc.
+ */
+static inline ktime_t rtc_bound_alarmtime(struct rtc_device *rtc,
+ ktime_t requested)
+{
+ if (rtc->alarm_offset_max &&
+ rtc->alarm_offset_max * MSEC_PER_SEC < ktime_to_ms(requested))
+ return ms_to_ktime(rtc->alarm_offset_max * MSEC_PER_SEC);
+
+ return requested;
+}
+
#define devm_rtc_register_device(device) \
__devm_rtc_register_device(THIS_MODULE, device)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 77f01ac385f7..292c31697248 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -63,7 +63,6 @@ struct robust_list_head;
struct root_domain;
struct rq;
struct sched_attr;
-struct sched_param;
struct seq_file;
struct sighand_struct;
struct signal_struct;
@@ -370,6 +369,10 @@ extern struct root_domain def_root_domain;
extern struct mutex sched_domains_mutex;
#endif
+struct sched_param {
+ int sched_priority;
+};
+
struct sched_info {
#ifdef CONFIG_SCHED_INFO
/* Cumulative counters: */
@@ -750,10 +753,8 @@ struct task_struct {
#endif
unsigned int __state;
-#ifdef CONFIG_PREEMPT_RT
/* saved state for "spinlock sleepers" */
unsigned int saved_state;
-#endif
/*
* This begins the randomizable portion of task_struct. Only
@@ -875,6 +876,7 @@ struct task_struct {
struct mm_struct *mm;
struct mm_struct *active_mm;
+ struct address_space *faults_disabled_mapping;
int exit_state;
int exit_code;
@@ -911,6 +913,9 @@ struct task_struct {
* ->sched_remote_wakeup gets used, so it can be in this word.
*/
unsigned sched_remote_wakeup:1;
+#ifdef CONFIG_RT_MUTEXES
+ unsigned sched_rt_mutex:1;
+#endif
/* Bit to tell LSMs we're in execve(): */
unsigned in_execve:1;
@@ -1002,7 +1007,6 @@ struct task_struct {
/* PID/PID hash table linkage. */
struct pid *thread_pid;
struct hlist_node pid_links[PIDTYPE_MAX];
- struct list_head thread_group;
struct list_head thread_node;
struct completion *vfork_done;
@@ -1443,6 +1447,10 @@ struct task_struct {
struct mem_cgroup *active_memcg;
#endif
+#ifdef CONFIG_MEMCG_KMEM
+ struct obj_cgroup *objcg;
+#endif
+
#ifdef CONFIG_BLK_CGROUP
struct gendisk *throttle_disk;
#endif
diff --git a/include/linux/sched/coredump.h b/include/linux/sched/coredump.h
index 0ee96ea7a0e9..02f5090ffea2 100644
--- a/include/linux/sched/coredump.h
+++ b/include/linux/sched/coredump.h
@@ -71,6 +71,7 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_UNSTABLE 22 /* mm is unstable for copy_from_user */
#define MMF_HUGE_ZERO_PAGE 23 /* mm has ever used the global huge zero page */
#define MMF_DISABLE_THP 24 /* disable THP for all VMAs */
+#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
#define MMF_OOM_REAP_QUEUED 25 /* mm was queued for oom_reaper */
#define MMF_MULTIPROCESS 26 /* mm is shared between processes */
/*
@@ -85,10 +86,22 @@ static inline int get_dumpable(struct mm_struct *mm)
#define MMF_HAS_MDWE 28
#define MMF_HAS_MDWE_MASK (1 << MMF_HAS_MDWE)
-#define MMF_DISABLE_THP_MASK (1 << MMF_DISABLE_THP)
+
+#define MMF_HAS_MDWE_NO_INHERIT 29
+
+#define MMF_VM_MERGE_ANY 30
+#define MMF_VM_MERGE_ANY_MASK (1 << MMF_VM_MERGE_ANY)
#define MMF_INIT_MASK (MMF_DUMPABLE_MASK | MMF_DUMP_FILTER_MASK |\
- MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK)
+ MMF_DISABLE_THP_MASK | MMF_HAS_MDWE_MASK |\
+ MMF_VM_MERGE_ANY_MASK)
+
+static inline unsigned long mmf_init_flags(unsigned long flags)
+{
+ if (flags & (1UL << MMF_HAS_MDWE_NO_INHERIT))
+ flags &= ~((1UL << MMF_HAS_MDWE) |
+ (1UL << MMF_HAS_MDWE_NO_INHERIT));
+ return flags & MMF_INIT_MASK;
+}
-#define MMF_VM_MERGE_ANY 29
#endif /* _LINUX_SCHED_COREDUMP_H */
diff --git a/include/linux/sched/deadline.h b/include/linux/sched/deadline.h
index 7c83d4d5a971..df3aca89d4f5 100644
--- a/include/linux/sched/deadline.h
+++ b/include/linux/sched/deadline.h
@@ -1,4 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_DEADLINE_H
+#define _LINUX_SCHED_DEADLINE_H
/*
* SCHED_DEADLINE tasks has negative priorities, reflecting
@@ -34,3 +36,5 @@ extern void dl_add_task_root_domain(struct task_struct *p);
extern void dl_clear_root_domain(struct root_domain *rd);
#endif /* CONFIG_SMP */
+
+#endif /* _LINUX_SCHED_DEADLINE_H */
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 8d89c8c4fac1..9a19f1b42f64 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -403,6 +403,10 @@ DECLARE_PER_CPU(struct mem_cgroup *, int_active_memcg);
* __GFP_ACCOUNT allocations till the end of the scope will be charged to the
* given memcg.
*
+ * Please, make sure that caller has a reference to the passed memcg structure,
+ * so its lifetime is guaranteed to exceed the scope between two
+ * set_active_memcg() calls.
+ *
* NOTE: This function can nest. Users must save the return value and
* reset the previous value after their own charging scope is over.
*/
diff --git a/include/linux/sched/numa_balancing.h b/include/linux/sched/numa_balancing.h
index 3988762efe15..52b22c5c396d 100644
--- a/include/linux/sched/numa_balancing.h
+++ b/include/linux/sched/numa_balancing.h
@@ -15,13 +15,23 @@
#define TNF_FAULT_LOCAL 0x08
#define TNF_MIGRATE_FAIL 0x10
+enum numa_vmaskip_reason {
+ NUMAB_SKIP_UNSUITABLE,
+ NUMAB_SKIP_SHARED_RO,
+ NUMAB_SKIP_INACCESSIBLE,
+ NUMAB_SKIP_SCAN_DELAY,
+ NUMAB_SKIP_PID_INACTIVE,
+ NUMAB_SKIP_IGNORE_PID,
+ NUMAB_SKIP_SEQ_COMPLETED,
+};
+
#ifdef CONFIG_NUMA_BALANCING
extern void task_numa_fault(int last_node, int node, int pages, int flags);
extern pid_t task_numa_group_id(struct task_struct *p);
extern void set_numabalancing_state(bool enabled);
extern void task_numa_free(struct task_struct *p, bool final);
-extern bool should_numa_migrate_memory(struct task_struct *p, struct page *page,
- int src_nid, int dst_cpu);
+bool should_numa_migrate_memory(struct task_struct *p, struct folio *folio,
+ int src_nid, int dst_cpu);
#else
static inline void task_numa_fault(int last_node, int node, int pages,
int flags)
@@ -38,7 +48,7 @@ static inline void task_numa_free(struct task_struct *p, bool final)
{
}
static inline bool should_numa_migrate_memory(struct task_struct *p,
- struct page *page, int src_nid, int dst_cpu)
+ struct folio *folio, int src_nid, int dst_cpu)
{
return true;
}
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 994c25640e15..b2b9e6eb9683 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -30,6 +30,10 @@ static inline bool task_is_realtime(struct task_struct *tsk)
}
#ifdef CONFIG_RT_MUTEXES
+extern void rt_mutex_pre_schedule(void);
+extern void rt_mutex_schedule(void);
+extern void rt_mutex_post_schedule(void);
+
/*
* Must hold either p->pi_lock or task_rq(p)->lock.
*/
diff --git a/include/linux/sched/sd_flags.h b/include/linux/sched/sd_flags.h
index fad77b5172e2..a8b28647aafc 100644
--- a/include/linux/sched/sd_flags.h
+++ b/include/linux/sched/sd_flags.h
@@ -110,6 +110,13 @@ SD_FLAG(SD_ASYM_CPUCAPACITY_FULL, SDF_SHARED_PARENT | SDF_NEEDS_GROUPS)
SD_FLAG(SD_SHARE_CPUCAPACITY, SDF_SHARED_CHILD | SDF_NEEDS_GROUPS)
/*
+ * Domain members share CPU cluster (LLC tags or L2 cache)
+ *
+ * NEEDS_GROUPS: Clusters are shared between groups.
+ */
+SD_FLAG(SD_CLUSTER, SDF_NEEDS_GROUPS)
+
+/*
* Domain members share CPU package resources (i.e. caches)
*
* SHARED_CHILD: Set from the base domain up until spanned CPUs no longer share
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 0014d3adaf84..3499c1a8b929 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -303,20 +303,11 @@ static inline void kernel_signal_stop(void)
schedule();
}
-#ifdef __ia64__
-# define ___ARCH_SI_IA64(_a1, _a2, _a3) , _a1, _a2, _a3
-#else
-# define ___ARCH_SI_IA64(_a1, _a2, _a3)
-#endif
-int force_sig_fault_to_task(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
-int force_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr));
-int send_sig_fault(int sig, int code, void __user *addr
- ___ARCH_SI_IA64(int imm, unsigned int flags, unsigned long isr)
- , struct task_struct *t);
+int force_sig_fault_to_task(int sig, int code, void __user *addr,
+ struct task_struct *t);
+int force_sig_fault(int sig, int code, void __user *addr);
+int send_sig_fault(int sig, int code, void __user *addr, struct task_struct *t);
int force_sig_mceerr(int code, void __user *, short);
int send_sig_mceerr(int code, void __user *, short, struct task_struct *);
@@ -656,7 +647,8 @@ extern bool current_is_single_threaded(void);
while ((t = next_thread(t)) != g)
#define __for_each_thread(signal, t) \
- list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node)
+ list_for_each_entry_rcu(t, &(signal)->thread_head, thread_node, \
+ lockdep_is_held(&tasklist_lock))
#define for_each_thread(p, t) \
__for_each_thread((p)->signal, t)
@@ -715,15 +707,26 @@ bool same_thread_group(struct task_struct *p1, struct task_struct *p2)
return p1->signal == p2->signal;
}
-static inline struct task_struct *next_thread(const struct task_struct *p)
+/*
+ * returns NULL if p is the last thread in the thread group
+ */
+static inline struct task_struct *__next_thread(struct task_struct *p)
+{
+ return list_next_or_null_rcu(&p->signal->thread_head,
+ &p->thread_node,
+ struct task_struct,
+ thread_node);
+}
+
+static inline struct task_struct *next_thread(struct task_struct *p)
{
- return list_entry_rcu(p->thread_group.next,
- struct task_struct, thread_group);
+ return __next_thread(p) ?: p->group_leader;
}
static inline int thread_group_empty(struct task_struct *p)
{
- return list_empty(&p->thread_group);
+ return thread_group_leader(p) &&
+ list_is_last(&p->thread_node, &p->signal->thread_head);
}
#define delay_group_leader(p) \
diff --git a/include/linux/sched/smt.h b/include/linux/sched/smt.h
index 59d3736c454c..fb1e295e7e63 100644
--- a/include/linux/sched/smt.h
+++ b/include/linux/sched/smt.h
@@ -17,4 +17,4 @@ static inline bool sched_smt_active(void) { return false; }
void arch_smt_update(void);
-#endif
+#endif /* _LINUX_SCHED_SMT_H */
diff --git a/include/linux/sched/topology.h b/include/linux/sched/topology.h
index 67b573d5bf28..de545ba85218 100644
--- a/include/linux/sched/topology.h
+++ b/include/linux/sched/topology.h
@@ -45,7 +45,7 @@ static inline int cpu_smt_flags(void)
#ifdef CONFIG_SCHED_CLUSTER
static inline int cpu_cluster_flags(void)
{
- return SD_SHARE_PKG_RESOURCES;
+ return SD_CLUSTER | SD_SHARE_PKG_RESOURCES;
}
#endif
@@ -109,8 +109,6 @@ struct sched_domain {
u64 max_newidle_lb_cost;
unsigned long last_decay_max_lb_cost;
- u64 avg_scan_cost; /* select_idle_sibling */
-
#ifdef CONFIG_SCHEDSTATS
/* load_balance() stats */
unsigned int lb_count[CPU_MAX_IDLE_TYPES];
@@ -179,6 +177,7 @@ cpumask_var_t *alloc_sched_domains(unsigned int ndoms);
void free_sched_domains(cpumask_var_t doms[], unsigned int ndoms);
bool cpus_share_cache(int this_cpu, int that_cpu);
+bool cpus_share_resources(int this_cpu, int that_cpu);
typedef const struct cpumask *(*sched_domain_mask_f)(int cpu);
typedef int (*sched_domain_flags_f)(void);
@@ -232,6 +231,11 @@ static inline bool cpus_share_cache(int this_cpu, int that_cpu)
return true;
}
+static inline bool cpus_share_resources(int this_cpu, int that_cpu)
+{
+ return true;
+}
+
#endif /* !CONFIG_SMP */
#if defined(CONFIG_ENERGY_MODEL) && defined(CONFIG_CPU_FREQ_GOV_SCHEDUTIL)
diff --git a/include/linux/sched/types.h b/include/linux/sched/types.h
index 3c3e049224ae..969aaf5ef9d6 100644
--- a/include/linux/sched/types.h
+++ b/include/linux/sched/types.h
@@ -20,4 +20,4 @@ struct task_cputime {
unsigned long long sum_exec_runtime;
};
-#endif
+#endif /* _LINUX_SCHED_TYPES_H */
diff --git a/include/linux/sched/vhost_task.h b/include/linux/sched/vhost_task.h
index 837a23624a66..bc60243d43b3 100644
--- a/include/linux/sched/vhost_task.h
+++ b/include/linux/sched/vhost_task.h
@@ -1,7 +1,6 @@
/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _LINUX_VHOST_TASK_H
-#define _LINUX_VHOST_TASK_H
-
+#ifndef _LINUX_SCHED_VHOST_TASK_H
+#define _LINUX_SCHED_VHOST_TASK_H
struct vhost_task;
@@ -11,4 +10,4 @@ void vhost_task_start(struct vhost_task *vtsk);
void vhost_task_stop(struct vhost_task *vtsk);
void vhost_task_wake(struct vhost_task *vtsk);
-#endif
+#endif /* _LINUX_SCHED_VHOST_TASK_H */
diff --git a/include/linux/scmi_protocol.h b/include/linux/scmi_protocol.h
index e6fe4f73ffe6..f2f05fb42d28 100644
--- a/include/linux/scmi_protocol.h
+++ b/include/linux/scmi_protocol.h
@@ -58,6 +58,8 @@ struct scmi_clock_info {
u64 step_size;
} range;
};
+ int num_parents;
+ u32 *parents;
};
enum scmi_power_scale {
@@ -80,6 +82,11 @@ struct scmi_protocol_handle;
* @rate_set: set the clock rate of a clock
* @enable: enables the specified clock
* @disable: disables the specified clock
+ * @state_get: get the status of the specified clock
+ * @config_oem_get: get the value of an OEM specific clock config
+ * @config_oem_set: set the value of an OEM specific clock config
+ * @parent_get: get the parent id of a clk
+ * @parent_set: set the parent of a clock
*/
struct scmi_clk_proto_ops {
int (*count_get)(const struct scmi_protocol_handle *ph);
@@ -90,22 +97,36 @@ struct scmi_clk_proto_ops {
u64 *rate);
int (*rate_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
u64 rate);
- int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*enable_atomic)(const struct scmi_protocol_handle *ph, u32 clk_id);
- int (*disable_atomic)(const struct scmi_protocol_handle *ph,
- u32 clk_id);
+ int (*enable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*disable)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool atomic);
+ int (*state_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ bool *enabled, bool atomic);
+ int (*config_oem_get)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u8 oem_type, u32 *oem_val, u32 *attributes,
+ bool atomic);
+ int (*config_oem_set)(const struct scmi_protocol_handle *ph, u32 clk_id,
+ u8 oem_type, u32 oem_val, bool atomic);
+ int (*parent_get)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 *parent_id);
+ int (*parent_set)(const struct scmi_protocol_handle *ph, u32 clk_id, u32 parent_id);
+};
+
+struct scmi_perf_domain_info {
+ char name[SCMI_MAX_STR_SIZE];
+ bool set_perf;
};
/**
* struct scmi_perf_proto_ops - represents the various operations provided
* by SCMI Performance Protocol
*
+ * @num_domains_get: gets the number of supported performance domains
+ * @info_get: get the information of a performance domain
* @limits_set: sets limits on the performance level of a domain
* @limits_get: gets limits on the performance level of a domain
* @level_set: sets the performance level of a domain
* @level_get: gets the performance level of a domain
- * @device_domain_id: gets the scmi domain id for a given device
* @transition_latency_get: gets the DVFS transition latency for a given device
* @device_opps_add: adds all the OPPs for a given device
* @freq_set: sets the frequency for a given device using sustained frequency
@@ -120,6 +141,9 @@ struct scmi_clk_proto_ops {
* or in some other (abstract) scale
*/
struct scmi_perf_proto_ops {
+ int (*num_domains_get)(const struct scmi_protocol_handle *ph);
+ const struct scmi_perf_domain_info __must_check *(*info_get)
+ (const struct scmi_protocol_handle *ph, u32 domain);
int (*limits_set)(const struct scmi_protocol_handle *ph, u32 domain,
u32 max_perf, u32 min_perf);
int (*limits_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -128,11 +152,10 @@ struct scmi_perf_proto_ops {
u32 level, bool poll);
int (*level_get)(const struct scmi_protocol_handle *ph, u32 domain,
u32 *level, bool poll);
- int (*device_domain_id)(struct device *dev);
int (*transition_latency_get)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ u32 domain);
int (*device_opps_add)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ struct device *dev, u32 domain);
int (*freq_set)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long rate, bool poll);
int (*freq_get)(const struct scmi_protocol_handle *ph, u32 domain,
@@ -140,7 +163,7 @@ struct scmi_perf_proto_ops {
int (*est_power_get)(const struct scmi_protocol_handle *ph, u32 domain,
unsigned long *rate, unsigned long *power);
bool (*fast_switch_possible)(const struct scmi_protocol_handle *ph,
- struct device *dev);
+ u32 domain);
enum scmi_power_scale (*power_scale_get)(const struct scmi_protocol_handle *ph);
};
diff --git a/include/linux/security.h b/include/linux/security.h
index 5f16eecde00b..1d1df326c881 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -151,7 +151,7 @@ extern int cap_capset(struct cred *new, const struct cred *old,
const kernel_cap_t *effective,
const kernel_cap_t *inheritable,
const kernel_cap_t *permitted);
-extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+extern int cap_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int cap_inode_setxattr(struct dentry *dentry, const char *name,
const void *value, size_t size, int flags);
int cap_inode_removexattr(struct mnt_idmap *idmap,
@@ -284,16 +284,16 @@ int security_capable(const struct cred *cred,
struct user_namespace *ns,
int cap,
unsigned int opts);
-int security_quotactl(int cmds, int type, int id, struct super_block *sb);
+int security_quotactl(int cmds, int type, int id, const struct super_block *sb);
int security_quota_on(struct dentry *dentry);
int security_syslog(int type);
int security_settime64(const struct timespec64 *ts, const struct timezone *tz);
int security_vm_enough_memory_mm(struct mm_struct *mm, long pages);
int security_bprm_creds_for_exec(struct linux_binprm *bprm);
-int security_bprm_creds_from_file(struct linux_binprm *bprm, struct file *file);
+int security_bprm_creds_from_file(struct linux_binprm *bprm, const struct file *file);
int security_bprm_check(struct linux_binprm *bprm);
-void security_bprm_committing_creds(struct linux_binprm *bprm);
-void security_bprm_committed_creds(struct linux_binprm *bprm);
+void security_bprm_committing_creds(const struct linux_binprm *bprm);
+void security_bprm_committed_creds(const struct linux_binprm *bprm);
int security_fs_context_submount(struct fs_context *fc, struct super_block *reference);
int security_fs_context_dup(struct fs_context *fc, struct fs_context *src_fc);
int security_fs_context_parse_param(struct fs_context *fc, struct fs_parameter *param);
@@ -304,7 +304,7 @@ void security_free_mnt_opts(void **mnt_opts);
int security_sb_eat_lsm_opts(char *options, void **mnt_opts);
int security_sb_mnt_opts_compat(struct super_block *sb, void *mnt_opts);
int security_sb_remount(struct super_block *sb, void *mnt_opts);
-int security_sb_kern_mount(struct super_block *sb);
+int security_sb_kern_mount(const struct super_block *sb);
int security_sb_show_options(struct seq_file *m, struct super_block *sb);
int security_sb_statfs(struct dentry *dentry);
int security_sb_mount(const char *dev_name, const struct path *path,
@@ -581,7 +581,7 @@ static inline int security_capable(const struct cred *cred,
}
static inline int security_quotactl(int cmds, int type, int id,
- struct super_block *sb)
+ const struct super_block *sb)
{
return 0;
}
@@ -613,7 +613,7 @@ static inline int security_bprm_creds_for_exec(struct linux_binprm *bprm)
}
static inline int security_bprm_creds_from_file(struct linux_binprm *bprm,
- struct file *file)
+ const struct file *file)
{
return cap_bprm_creds_from_file(bprm, file);
}
@@ -623,11 +623,11 @@ static inline int security_bprm_check(struct linux_binprm *bprm)
return 0;
}
-static inline void security_bprm_committing_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committing_creds(const struct linux_binprm *bprm)
{
}
-static inline void security_bprm_committed_creds(struct linux_binprm *bprm)
+static inline void security_bprm_committed_creds(const struct linux_binprm *bprm)
{
}
diff --git a/include/linux/sed-opal-key.h b/include/linux/sed-opal-key.h
new file mode 100644
index 000000000000..0ca03054e8f6
--- /dev/null
+++ b/include/linux/sed-opal-key.h
@@ -0,0 +1,26 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * SED key operations.
+ *
+ * Copyright (C) 2023 IBM Corporation
+ *
+ * These are the accessor functions (read/write) for SED Opal
+ * keys. Specific keystores can provide overrides.
+ *
+ */
+
+#include <linux/kernel.h>
+
+#ifdef CONFIG_PSERIES_PLPKS_SED
+int sed_read_key(char *keyname, char *key, u_int *keylen);
+int sed_write_key(char *keyname, char *key, u_int keylen);
+#else
+static inline
+int sed_read_key(char *keyname, char *key, u_int *keylen) {
+ return -EOPNOTSUPP;
+}
+static inline
+int sed_write_key(char *keyname, char *key, u_int keylen) {
+ return -EOPNOTSUPP;
+}
+#endif
diff --git a/include/linux/seq_buf.h b/include/linux/seq_buf.h
index 515d7fcb9634..5fb1f12c33f9 100644
--- a/include/linux/seq_buf.h
+++ b/include/linux/seq_buf.h
@@ -14,19 +14,25 @@
* @buffer: pointer to the buffer
* @size: size of the buffer
* @len: the amount of data inside the buffer
- * @readpos: The next position to read in the buffer.
*/
struct seq_buf {
char *buffer;
size_t size;
size_t len;
- loff_t readpos;
};
+#define DECLARE_SEQ_BUF(NAME, SIZE) \
+ char __ ## NAME ## _buffer[SIZE] = ""; \
+ struct seq_buf NAME = { \
+ .buffer = &__ ## NAME ## _buffer, \
+ .size = SIZE, \
+ }
+
static inline void seq_buf_clear(struct seq_buf *s)
{
s->len = 0;
- s->readpos = 0;
+ if (s->size)
+ s->buffer[0] = '\0';
}
static inline void
@@ -39,7 +45,7 @@ seq_buf_init(struct seq_buf *s, char *buf, unsigned int size)
/*
* seq_buf have a buffer that might overflow. When this happens
- * the len and size are set to be equal.
+ * len is set to be greater than size.
*/
static inline bool
seq_buf_has_overflowed(struct seq_buf *s)
@@ -72,8 +78,8 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
}
/**
- * seq_buf_terminate - Make sure buffer is nul terminated
- * @s: the seq_buf descriptor to terminate.
+ * seq_buf_str - get %NUL-terminated C string from seq_buf
+ * @s: the seq_buf handle
*
* This makes sure that the buffer in @s is nul terminated and
* safe to read as a string.
@@ -84,16 +90,20 @@ static inline unsigned int seq_buf_used(struct seq_buf *s)
*
* After this function is called, s->buffer is safe to use
* in string operations.
+ *
+ * Returns @s->buf after making sure it is terminated.
*/
-static inline void seq_buf_terminate(struct seq_buf *s)
+static inline const char *seq_buf_str(struct seq_buf *s)
{
if (WARN_ON(s->size == 0))
- return;
+ return "";
if (seq_buf_buffer_left(s))
s->buffer[s->len] = 0;
else
s->buffer[s->size - 1] = 0;
+
+ return s->buffer;
}
/**
@@ -143,7 +153,7 @@ extern __printf(2, 0)
int seq_buf_vprintf(struct seq_buf *s, const char *fmt, va_list args);
extern int seq_buf_print_seq(struct seq_file *m, struct seq_buf *s);
extern int seq_buf_to_user(struct seq_buf *s, char __user *ubuf,
- int cnt);
+ size_t start, int cnt);
extern int seq_buf_puts(struct seq_buf *s, const char *str);
extern int seq_buf_putc(struct seq_buf *s, unsigned char c);
extern int seq_buf_putmem(struct seq_buf *s, const void *mem, unsigned int len);
diff --git a/include/linux/seq_file.h b/include/linux/seq_file.h
index 386ab580b839..234bcdb1fba4 100644
--- a/include/linux/seq_file.h
+++ b/include/linux/seq_file.h
@@ -207,6 +207,21 @@ static const struct file_operations __name ## _fops = { \
.release = single_release, \
}
+#define DEFINE_SHOW_STORE_ATTRIBUTE(__name) \
+static int __name ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __name ## _show, inode->i_private); \
+} \
+ \
+static const struct file_operations __name ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __name ## _open, \
+ .read = seq_read, \
+ .write = __name ## _write, \
+ .llseek = seq_lseek, \
+ .release = single_release, \
+}
+
#define DEFINE_PROC_SHOW_ATTRIBUTE(__name) \
static int __name ## _open(struct inode *inode, struct file *file) \
{ \
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 987a59d977c5..e92f9d5577ba 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -191,11 +191,9 @@ static inline void seqcount_lockdep_reader_access(const seqcount_t *s)
* @lockname: "LOCKNAME" part of seqcount_LOCKNAME_t
* @locktype: LOCKNAME canonical C data type
* @preemptible: preemptibility of above locktype
- * @lockmember: argument for lockdep_assert_held()
- * @lockbase: associated lock release function (prefix only)
- * @lock_acquire: associated lock acquisition function (full call)
+ * @lockbase: prefix for associated lock/unlock
*/
-#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockmember, lockbase, lock_acquire) \
+#define SEQCOUNT_LOCKNAME(lockname, locktype, preemptible, lockbase) \
typedef struct seqcount_##lockname { \
seqcount_t seqcount; \
__SEQ_LOCK(locktype *lock); \
@@ -207,6 +205,12 @@ __seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
return &s->seqcount; \
} \
\
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
+} \
+ \
static __always_inline unsigned \
__seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
{ \
@@ -216,7 +220,7 @@ __seqprop_##lockname##_sequence(const seqcount_##lockname##_t *s) \
return seq; \
\
if (preemptible && unlikely(seq & 1)) { \
- __SEQ_LOCK(lock_acquire); \
+ __SEQ_LOCK(lockbase##_lock(s->lock)); \
__SEQ_LOCK(lockbase##_unlock(s->lock)); \
\
/* \
@@ -242,7 +246,7 @@ __seqprop_##lockname##_preemptible(const seqcount_##lockname##_t *s) \
static __always_inline void \
__seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
{ \
- __SEQ_LOCK(lockdep_assert_held(lockmember)); \
+ __SEQ_LOCK(lockdep_assert_held(s->lock)); \
}
/*
@@ -254,6 +258,11 @@ static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
return s;
}
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
+{
+ return s;
+}
+
static inline unsigned __seqprop_sequence(const seqcount_t *s)
{
return READ_ONCE(s->sequence);
@@ -271,10 +280,10 @@ static inline void __seqprop_assert(const seqcount_t *s)
#define __SEQ_RT IS_ENABLED(CONFIG_PREEMPT_RT)
-SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, s->lock, raw_spin, raw_spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, s->lock, spin, spin_lock(s->lock))
-SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, s->lock, read, read_lock(s->lock))
-SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex, mutex_lock(s->lock))
+SEQCOUNT_LOCKNAME(raw_spinlock, raw_spinlock_t, false, raw_spin)
+SEQCOUNT_LOCKNAME(spinlock, spinlock_t, __SEQ_RT, spin)
+SEQCOUNT_LOCKNAME(rwlock, rwlock_t, __SEQ_RT, read)
+SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
/*
* SEQCNT_LOCKNAME_ZERO - static initializer for seqcount_LOCKNAME_t
@@ -294,19 +303,20 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
#define SEQCNT_WW_MUTEX_ZERO(name, lock) SEQCOUNT_LOCKNAME_ZERO(name, lock)
#define __seqprop_case(s, lockname, prop) \
- seqcount_##lockname##_t: __seqprop_##lockname##_##prop((void *)(s))
+ seqcount_##lockname##_t: __seqprop_##lockname##_##prop
#define __seqprop(s, prop) _Generic(*(s), \
- seqcount_t: __seqprop_##prop((void *)(s)), \
+ seqcount_t: __seqprop_##prop, \
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop))
-#define seqprop_ptr(s) __seqprop(s, ptr)
-#define seqprop_sequence(s) __seqprop(s, sequence)
-#define seqprop_preemptible(s) __seqprop(s, preemptible)
-#define seqprop_assert(s) __seqprop(s, assert)
+#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
+#define seqprop_sequence(s) __seqprop(s, sequence)(s)
+#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
+#define seqprop_assert(s) __seqprop(s, assert)(s)
/**
* __read_seqcount_begin() - begin a seqcount_t read section w/o barrier
@@ -355,7 +365,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})
@@ -421,7 +431,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, s->lock, mutex
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- do___read_seqcount_retry(seqprop_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -441,7 +451,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- do_read_seqcount_retry(seqprop_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)
static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -512,8 +522,8 @@ do { \
static inline void do_write_seqcount_begin_nested(seqcount_t *s, int subclass)
{
- do_raw_write_seqcount_begin(s);
seqcount_acquire(&s->dep_map, subclass, 0, _RET_IP_);
+ do_raw_write_seqcount_begin(s);
}
/**
@@ -574,7 +584,7 @@ static inline void do_write_seqcount_end(seqcount_t *s)
* via WRITE_ONCE): a) to ensure the writes become visible to other threads
* atomically, avoiding compiler optimizations; b) to document which writes are
* meant to propagate to the reader critical section. This is necessary because
- * neither writes before and after the barrier are enclosed in a seq-writer
+ * neither writes before nor after the barrier are enclosed in a seq-writer
* critical section that would ensure readers are aware of ongoing writes::
*
* seqcount_t seq;
@@ -864,7 +874,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
}
/*
- * For all seqlock_t write side functions, use the the internal
+ * For all seqlock_t write side functions, use the internal
* do_write_seqcount_begin() instead of generic write_seqcount_begin().
* This way, no redundant lockdep_assert_held() checks are added.
*/
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index bb6f073bc159..89f7b6c63598 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -588,6 +588,85 @@ struct uart_port {
void *private_data; /* generic platform data pointer */
};
+/**
+ * uart_port_lock - Lock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock(struct uart_port *up)
+{
+ spin_lock(&up->lock);
+}
+
+/**
+ * uart_port_lock_irq - Lock the UART port and disable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_lock_irq(struct uart_port *up)
+{
+ spin_lock_irq(&up->lock);
+}
+
+/**
+ * uart_port_lock_irqsave - Lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ */
+static inline void uart_port_lock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ spin_lock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_trylock - Try to lock the UART port
+ * @up: Pointer to UART port structure
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock(struct uart_port *up)
+{
+ return spin_trylock(&up->lock);
+}
+
+/**
+ * uart_port_trylock_irqsave - Try to lock the UART port, save and disable interrupts
+ * @up: Pointer to UART port structure
+ * @flags: Pointer to interrupt flags storage
+ *
+ * Returns: True if lock was acquired, false otherwise
+ */
+static inline bool uart_port_trylock_irqsave(struct uart_port *up, unsigned long *flags)
+{
+ return spin_trylock_irqsave(&up->lock, *flags);
+}
+
+/**
+ * uart_port_unlock - Unlock the UART port
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock(struct uart_port *up)
+{
+ spin_unlock(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irq - Unlock the UART port and re-enable interrupts
+ * @up: Pointer to UART port structure
+ */
+static inline void uart_port_unlock_irq(struct uart_port *up)
+{
+ spin_unlock_irq(&up->lock);
+}
+
+/**
+ * uart_port_unlock_irqrestore - Unlock the UART port, restore interrupts
+ * @up: Pointer to UART port structure
+ * @flags: The saved interrupt flags for restore
+ */
+static inline void uart_port_unlock_irqrestore(struct uart_port *up, unsigned long flags)
+{
+ spin_unlock_irqrestore(&up->lock, flags);
+}
+
static inline int serial_port_in(struct uart_port *up, int offset)
{
return up->serial_in(up, offset);
@@ -956,14 +1035,14 @@ static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
@@ -975,14 +1054,14 @@ static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port
u8 sysrq_ch;
if (!port->has_sysrq) {
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
return;
}
sysrq_ch = port->sysrq_ch;
port->sysrq_ch = 0;
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
if (sysrq_ch)
handle_sysrq(sysrq_ch);
@@ -998,12 +1077,12 @@ static inline int uart_prepare_sysrq_char(struct uart_port *port, u8 ch)
}
static inline void uart_unlock_and_check_sysrq(struct uart_port *port)
{
- spin_unlock(&port->lock);
+ uart_port_unlock(port);
}
static inline void uart_unlock_and_check_sysrq_irqrestore(struct uart_port *port,
unsigned long flags)
{
- spin_unlock_irqrestore(&port->lock, flags);
+ uart_port_unlock_irqrestore(port, flags);
}
#endif /* CONFIG_MAGIC_SYSRQ_SERIAL */
diff --git a/include/linux/shmem_fs.h b/include/linux/shmem_fs.h
index 6b0c626620f5..2caa6b86106a 100644
--- a/include/linux/shmem_fs.h
+++ b/include/linux/shmem_fs.h
@@ -23,18 +23,22 @@ struct shmem_inode_info {
unsigned long flags;
unsigned long alloced; /* data pages alloced to file */
unsigned long swapped; /* subtotal assigned to swap */
- pgoff_t fallocend; /* highest fallocate endindex */
- struct list_head shrinklist; /* shrinkable hpage inodes */
- struct list_head swaplist; /* chain of maybes on swap */
+ union {
+ struct offset_ctx dir_offsets; /* stable directory offsets */
+ struct {
+ struct list_head shrinklist; /* shrinkable hpage inodes */
+ struct list_head swaplist; /* chain of maybes on swap */
+ };
+ };
+ struct timespec64 i_crtime; /* file creation time */
struct shared_policy policy; /* NUMA memory alloc policy */
struct simple_xattrs xattrs; /* list of xattrs */
+ pgoff_t fallocend; /* highest fallocate endindex */
+ unsigned int fsflags; /* for FS_IOC_[SG]ETFLAGS */
atomic_t stop_eviction; /* hold when working on inode */
- struct timespec64 i_crtime; /* file creation time */
- unsigned int fsflags; /* flags for FS_IOC_[SG]ETFLAGS */
#ifdef CONFIG_TMPFS_QUOTA
struct dquot *i_dquot[MAXQUOTAS];
#endif
- struct offset_ctx dir_offsets; /* stable entry offsets */
struct inode vfs_inode;
};
diff --git a/include/linux/shrinker.h b/include/linux/shrinker.h
index 224293b2dd06..1a00be90d93a 100644
--- a/include/linux/shrinker.h
+++ b/include/linux/shrinker.h
@@ -4,6 +4,25 @@
#include <linux/atomic.h>
#include <linux/types.h>
+#include <linux/refcount.h>
+#include <linux/completion.h>
+
+#define SHRINKER_UNIT_BITS BITS_PER_LONG
+
+/*
+ * Bitmap and deferred work of shrinker::id corresponding to memcg-aware
+ * shrinkers, which have elements charged to the memcg.
+ */
+struct shrinker_info_unit {
+ atomic_long_t nr_deferred[SHRINKER_UNIT_BITS];
+ DECLARE_BITMAP(map, SHRINKER_UNIT_BITS);
+};
+
+struct shrinker_info {
+ struct rcu_head rcu;
+ int map_nr_max;
+ struct shrinker_info_unit *unit[];
+};
/*
* This struct is used to pass information from page reclaim to the shrinkers.
@@ -70,6 +89,19 @@ struct shrinker {
int seeks; /* seeks to recreate an obj */
unsigned flags;
+ /*
+ * The reference count of this shrinker. Registered shrinker have an
+ * initial refcount of 1, then the lookup operations are now allowed
+ * to use it via shrinker_try_get(). Later in the unregistration step,
+ * the initial refcount will be discarded, and will free the shrinker
+ * asynchronously via RCU after its refcount reaches 0.
+ */
+ refcount_t refcount;
+ struct completion done; /* use to wait for refcount to reach 0 */
+ struct rcu_head rcu;
+
+ void *private_data;
+
/* These are for internal use */
struct list_head list;
#ifdef CONFIG_MEMCG
@@ -86,48 +118,39 @@ struct shrinker {
};
#define DEFAULT_SEEKS 2 /* A good number if you don't know better. */
-/* Flags */
-#define SHRINKER_REGISTERED (1 << 0)
-#define SHRINKER_NUMA_AWARE (1 << 1)
-#define SHRINKER_MEMCG_AWARE (1 << 2)
+/* Internal flags */
+#define SHRINKER_REGISTERED BIT(0)
+#define SHRINKER_ALLOCATED BIT(1)
+
+/* Flags for users to use */
+#define SHRINKER_NUMA_AWARE BIT(2)
+#define SHRINKER_MEMCG_AWARE BIT(3)
/*
* It just makes sense when the shrinker is also MEMCG_AWARE for now,
* non-MEMCG_AWARE shrinker should not have this flag set.
*/
-#define SHRINKER_NONSLAB (1 << 3)
+#define SHRINKER_NONSLAB BIT(4)
-extern int __printf(2, 3) prealloc_shrinker(struct shrinker *shrinker,
- const char *fmt, ...);
-extern void register_shrinker_prepared(struct shrinker *shrinker);
-extern int __printf(2, 3) register_shrinker(struct shrinker *shrinker,
- const char *fmt, ...);
-extern void unregister_shrinker(struct shrinker *shrinker);
-extern void free_prealloced_shrinker(struct shrinker *shrinker);
-extern void synchronize_shrinkers(void);
+__printf(2, 3)
+struct shrinker *shrinker_alloc(unsigned int flags, const char *fmt, ...);
+void shrinker_register(struct shrinker *shrinker);
+void shrinker_free(struct shrinker *shrinker);
-#ifdef CONFIG_SHRINKER_DEBUG
-extern int shrinker_debugfs_add(struct shrinker *shrinker);
-extern struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
- int *debugfs_id);
-extern void shrinker_debugfs_remove(struct dentry *debugfs_entry,
- int debugfs_id);
-extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
- const char *fmt, ...);
-#else /* CONFIG_SHRINKER_DEBUG */
-static inline int shrinker_debugfs_add(struct shrinker *shrinker)
-{
- return 0;
-}
-static inline struct dentry *shrinker_debugfs_detach(struct shrinker *shrinker,
- int *debugfs_id)
+static inline bool shrinker_try_get(struct shrinker *shrinker)
{
- *debugfs_id = -1;
- return NULL;
+ return refcount_inc_not_zero(&shrinker->refcount);
}
-static inline void shrinker_debugfs_remove(struct dentry *debugfs_entry,
- int debugfs_id)
+
+static inline void shrinker_put(struct shrinker *shrinker)
{
+ if (refcount_dec_and_test(&shrinker->refcount))
+ complete(&shrinker->done);
}
+
+#ifdef CONFIG_SHRINKER_DEBUG
+extern int __printf(2, 3) shrinker_debugfs_rename(struct shrinker *shrinker,
+ const char *fmt, ...);
+#else /* CONFIG_SHRINKER_DEBUG */
static inline __printf(2, 3)
int shrinker_debugfs_rename(struct shrinker *shrinker, const char *fmt, ...)
{
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index 4174c4b82d13..27998f73183e 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -1309,7 +1309,7 @@ struct sk_buff_fclones {
*
* Returns true if skb is a fast clone, and its clone is not freed.
* Some drivers call skb_orphan() in their ndo_start_xmit(),
- * so we also check that this didnt happen.
+ * so we also check that didn't happen.
*/
static inline bool skb_fclone_busy(const struct sock *sk,
const struct sk_buff *skb)
@@ -2016,7 +2016,7 @@ static inline struct sk_buff *skb_share_check(struct sk_buff *skb, gfp_t pri)
* Copy shared buffers into a new sk_buff. We effectively do COW on
* packets to handle cases where we have a local reader and forward
* and a couple of other messy ones. The normal one is tcpdumping
- * a packet thats being forwarded.
+ * a packet that's being forwarded.
*/
/**
@@ -3679,6 +3679,9 @@ static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int l
return __skb_put_padto(skb, len, true);
}
+bool csum_and_copy_from_iter_full(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i)
+ __must_check;
+
static inline int skb_add_data(struct sk_buff *skb,
struct iov_iter *from, int copy)
{
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 8228d1276a2f..d6d6ffeeb9a2 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -245,8 +245,9 @@ DEFINE_FREE(kfree, void *, if (_T) kfree(_T))
size_t ksize(const void *objp);
#ifdef CONFIG_PRINTK
-bool kmem_valid_obj(void *object);
-void kmem_dump_obj(void *object);
+bool kmem_dump_obj(void *object);
+#else
+static inline bool kmem_dump_obj(void *object) { return false; }
#endif
/*
@@ -763,6 +764,8 @@ static inline __alloc_size(1, 2) void *kvcalloc(size_t n, size_t size, gfp_t fla
extern void *kvrealloc(const void *p, size_t oldsize, size_t newsize, gfp_t flags)
__realloc_size(3);
extern void kvfree(const void *addr);
+DEFINE_FREE(kvfree, void *, if (_T) kvfree(_T))
+
extern void kvfree_sensitive(const void *addr, size_t len);
unsigned int kmem_cache_size(struct kmem_cache *s);
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 91ea4a67f8ca..e87520dc2959 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -53,7 +53,7 @@ int smp_call_function_single(int cpuid, smp_call_func_t func, void *info,
void on_each_cpu_cond_mask(smp_cond_func_t cond_func, smp_call_func_t func,
void *info, bool wait, const struct cpumask *mask);
-int smp_call_function_single_async(int cpu, struct __call_single_data *csd);
+int smp_call_function_single_async(int cpu, call_single_data_t *csd);
/*
* Cpus stopping functions in panic. All have default weak definitions.
diff --git a/include/linux/soc/mediatek/infracfg.h b/include/linux/soc/mediatek/infracfg.h
index 07f67b3d8e97..6c6cccc848f4 100644
--- a/include/linux/soc/mediatek/infracfg.h
+++ b/include/linux/soc/mediatek/infracfg.h
@@ -2,6 +2,47 @@
#ifndef __SOC_MEDIATEK_INFRACFG_H
#define __SOC_MEDIATEK_INFRACFG_H
+#define MT8365_INFRA_TOPAXI_PROTECTEN_STA1 0x228
+#define MT8365_INFRA_TOPAXI_PROTECTEN_SET 0x2a0
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CLR 0x2a4
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM_M0 BIT(1)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MDMCU_M1 BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_0 BIT(10)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MM2INFRA_AXI_GALS_SLV_1 BIT(11)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_AP2CONN_AHB BIT(13)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_CONN2INFRA_AHB BIT(14)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_MFG_M0 BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_INFRA2MFG BIT(22)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_STA1 0x258
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_SET 0x2a8
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CLR 0x2ac
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU2AP BIT(2)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_0 BIT(16)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_MM2INFRA_AXI_GALS_MST_1 BIT(17)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CONN2INFRA_AXI_GALS_MST BIT(18)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_CAM2MM_AXI_GALS_MST BIT(19)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_APU_CBIP_GALS_MST BIT(20)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_INFRA2CONN_AHB_GALS_SLV BIT(21)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_INFRA_GALS_ADB BIT(24)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_PWRDNREQ_MP1_L2C_AFIFO BIT(27)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_AUDIO_M BIT(28)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_M BIT(30)
+#define MT8365_INFRA_TOPAXI_PROTECTEN_1_AUDIO_BUS_DSP_S BIT(31)
+
+#define MT8365_INFRA_NAO_TOPAXI_SI0_STA 0x0
+#define MT8365_INFRA_NAO_TOPAXI_SI0_CTRL_UPDATED BIT(24)
+#define MT8365_INFRA_NAO_TOPAXI_SI2_STA 0x28
+#define MT8365_INFRA_NAO_TOPAXI_SI2_CTRL_UPDATED BIT(14)
+#define MT8365_INFRA_TOPAXI_SI0_CTL 0x200
+#define MT8365_INFRA_TOPAXI_SI0_WAY_EN_MMAPB_S BIT(6)
+#define MT8365_INFRA_TOPAXI_SI2_CTL 0x234
+#define MT8365_INFRA_TOPAXI_SI2_WAY_EN_PERI_M1 BIT(5)
+
+#define MT8365_SMI_COMMON_CLAMP_EN 0x3c0
+#define MT8365_SMI_COMMON_CLAMP_EN_SET 0x3c4
+#define MT8365_SMI_COMMON_CLAMP_EN_CLR 0x3c8
+
#define MT8195_TOP_AXI_PROT_EN_STA1 0x228
#define MT8195_TOP_AXI_PROT_EN_1_STA1 0x258
#define MT8195_TOP_AXI_PROT_EN_SET 0x2a0
diff --git a/include/linux/soc/mediatek/mtk_wed.h b/include/linux/soc/mediatek/mtk_wed.h
index b2b28180dff7..a476648858a6 100644
--- a/include/linux/soc/mediatek/mtk_wed.h
+++ b/include/linux/soc/mediatek/mtk_wed.h
@@ -10,6 +10,7 @@
#define MTK_WED_TX_QUEUES 2
#define MTK_WED_RX_QUEUES 2
+#define MTK_WED_RX_PAGE_QUEUES 3
#define WED_WO_STA_REC 0x6
@@ -45,7 +46,7 @@ enum mtk_wed_wo_cmd {
MTK_WED_WO_CMD_WED_END
};
-struct mtk_rxbm_desc {
+struct mtk_wed_bm_desc {
__le32 buf0;
__le32 token;
} __packed __aligned(4);
@@ -76,6 +77,11 @@ struct mtk_wed_wo_rx_stats {
__le32 rx_drop_cnt;
};
+struct mtk_wed_buf {
+ void *p;
+ dma_addr_t phy_addr;
+};
+
struct mtk_wed_device {
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
const struct mtk_wed_ops *ops;
@@ -94,17 +100,20 @@ struct mtk_wed_device {
struct mtk_wed_ring txfree_ring;
struct mtk_wed_ring tx_wdma[MTK_WED_TX_QUEUES];
struct mtk_wed_ring rx_wdma[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_rro_ring[MTK_WED_RX_QUEUES];
+ struct mtk_wed_ring rx_page_ring[MTK_WED_RX_PAGE_QUEUES];
+ struct mtk_wed_ring ind_cmd_ring;
struct {
int size;
- void **pages;
+ struct mtk_wed_buf *pages;
struct mtk_wdma_desc *desc;
dma_addr_t desc_phys;
} tx_buf_ring;
struct {
int size;
- struct mtk_rxbm_desc *desc;
+ struct mtk_wed_bm_desc *desc;
dma_addr_t desc_phys;
} rx_buf_ring;
@@ -114,6 +123,13 @@ struct mtk_wed_device {
dma_addr_t fdbk_phys;
} rro;
+ struct {
+ int size;
+ struct mtk_wed_buf *pages;
+ struct mtk_wed_bm_desc *desc;
+ dma_addr_t desc_phys;
+ } hw_rro;
+
/* filled by driver: */
struct {
union {
@@ -123,6 +139,7 @@ struct mtk_wed_device {
enum mtk_wed_bus_tye bus_type;
void __iomem *base;
u32 phy_base;
+ u32 id;
u32 wpdma_phys;
u32 wpdma_int;
@@ -131,18 +148,35 @@ struct mtk_wed_device {
u32 wpdma_txfree;
u32 wpdma_rx_glo;
u32 wpdma_rx;
+ u32 wpdma_rx_rro[MTK_WED_RX_QUEUES];
+ u32 wpdma_rx_pg;
bool wcid_512;
+ bool hw_rro;
+ bool msi;
u16 token_start;
unsigned int nbuf;
unsigned int rx_nbuf;
unsigned int rx_npkt;
unsigned int rx_size;
+ unsigned int amsdu_max_len;
u8 tx_tbit[MTK_WED_TX_QUEUES];
u8 rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rro_rx_tbit[MTK_WED_RX_QUEUES];
+ u8 rx_pg_tbit[MTK_WED_RX_PAGE_QUEUES];
u8 txfree_tbit;
+ u8 amsdu_max_subframes;
+
+ struct {
+ u8 se_group_nums;
+ u16 win_size;
+ u16 particular_sid;
+ u32 ack_sn_addr;
+ dma_addr_t particular_se_phys;
+ dma_addr_t addr_elem_phys[1024];
+ } ind_cmd;
u32 (*init_buf)(void *ptr, dma_addr_t phys, int token_id);
int (*offload_enable)(struct mtk_wed_device *wed);
@@ -182,6 +216,14 @@ struct mtk_wed_ops {
void (*irq_set_mask)(struct mtk_wed_device *dev, u32 mask);
int (*setup_tc)(struct mtk_wed_device *wed, struct net_device *dev,
enum tc_setup_type type, void *type_data);
+ void (*start_hw_rro)(struct mtk_wed_device *dev, u32 irq_mask,
+ bool reset);
+ void (*rro_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ void (*msdu_pg_rx_ring_setup)(struct mtk_wed_device *dev, int ring,
+ void __iomem *regs);
+ int (*ind_rx_ring_setup)(struct mtk_wed_device *dev,
+ void __iomem *regs);
};
extern const struct mtk_wed_ops __rcu *mtk_soc_wed_ops;
@@ -206,16 +248,27 @@ mtk_wed_device_attach(struct mtk_wed_device *dev)
return ret;
}
-static inline bool
-mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
+static inline bool mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
{
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ if (dev->version == 3)
+ return dev->wlan.hw_rro;
+
return dev->version != 1;
#else
return false;
#endif
}
+static inline bool mtk_wed_is_amsdu_supported(struct mtk_wed_device *dev)
+{
+#ifdef CONFIG_NET_MEDIATEK_SOC_WED
+ return dev->version == 3;
+#else
+ return false;
+#endif
+}
+
#ifdef CONFIG_NET_MEDIATEK_SOC_WED
#define mtk_wed_device_active(_dev) !!(_dev)->ops
#define mtk_wed_device_detach(_dev) (_dev)->ops->detach(_dev)
@@ -242,6 +295,15 @@ mtk_wed_get_rx_capa(struct mtk_wed_device *dev)
#define mtk_wed_device_dma_reset(_dev) (_dev)->ops->reset_dma(_dev)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) \
(_dev)->ops->setup_tc(_dev, _netdev, _type, _type_data)
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) \
+ (_dev)->ops->start_hw_rro(_dev, _mask, _reset)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->rro_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) \
+ (_dev)->ops->msdu_pg_rx_ring_setup(_dev, _ring, _regs)
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) \
+ (_dev)->ops->ind_rx_ring_setup(_dev, _regs)
+
#else
static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
{
@@ -261,6 +323,10 @@ static inline bool mtk_wed_device_active(struct mtk_wed_device *dev)
#define mtk_wed_device_stop(_dev) do {} while (0)
#define mtk_wed_device_dma_reset(_dev) do {} while (0)
#define mtk_wed_device_setup_tc(_dev, _netdev, _type, _type_data) -EOPNOTSUPP
+#define mtk_wed_device_start_hw_rro(_dev, _mask, _reset) do {} while (0)
+#define mtk_wed_device_rro_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_msdu_pg_rx_ring_setup(_dev, _ring, _regs) -ENODEV
+#define mtk_wed_device_ind_rx_ring_setup(_dev, _regs) -ENODEV
#endif
#endif
diff --git a/include/linux/soc/qcom/llcc-qcom.h b/include/linux/soc/qcom/llcc-qcom.h
index 93417ba1ead4..1a886666bbb6 100644
--- a/include/linux/soc/qcom/llcc-qcom.h
+++ b/include/linux/soc/qcom/llcc-qcom.h
@@ -30,7 +30,7 @@
#define LLCC_NPU 23
#define LLCC_WLHW 24
#define LLCC_PIMEM 25
-#define LLCC_DRE 26
+#define LLCC_ECC 26
#define LLCC_CVP 28
#define LLCC_MODPE 29
#define LLCC_APTCM 30
diff --git a/include/linux/socket.h b/include/linux/socket.h
index 39b74d83c7c4..cfcb7e2c3813 100644
--- a/include/linux/socket.h
+++ b/include/linux/socket.h
@@ -383,6 +383,7 @@ struct ucred {
#define SOL_MPTCP 284
#define SOL_MCTP 285
#define SOL_SMC 286
+#define SOL_VSOCK 287
/* IPX options */
#define IPX_TYPE 1
diff --git a/include/linux/sockptr.h b/include/linux/sockptr.h
index bae5e2369b4f..307961b41541 100644
--- a/include/linux/sockptr.h
+++ b/include/linux/sockptr.h
@@ -55,6 +55,29 @@ static inline int copy_from_sockptr(void *dst, sockptr_t src, size_t size)
return copy_from_sockptr_offset(dst, src, 0, size);
}
+static inline int copy_struct_from_sockptr(void *dst, size_t ksize,
+ sockptr_t src, size_t usize)
+{
+ size_t size = min(ksize, usize);
+ size_t rest = max(ksize, usize) - size;
+
+ if (!sockptr_is_kernel(src))
+ return copy_struct_from_user(dst, ksize, src.user, size);
+
+ if (usize < ksize) {
+ memset(dst + size, 0, rest);
+ } else if (usize > ksize) {
+ char *p = src.kernel;
+
+ while (rest--) {
+ if (*p++)
+ return -E2BIG;
+ }
+ }
+ memcpy(dst, src.kernel, size);
+ return 0;
+}
+
static inline int copy_to_sockptr_offset(sockptr_t dst, size_t offset,
const void *src, size_t size)
{
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 7f8b478fdeb3..86825c88b576 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -867,6 +867,7 @@ extern int devm_spi_register_controller(struct device *dev,
extern void spi_unregister_controller(struct spi_controller *ctlr);
#if IS_ENABLED(CONFIG_ACPI)
+extern struct spi_controller *acpi_spi_find_controller_by_adev(struct acpi_device *adev);
extern struct spi_device *acpi_spi_device_alloc(struct spi_controller *ctlr,
struct acpi_device *adev,
int index);
@@ -1086,8 +1087,6 @@ struct spi_transfer {
* @state: for use by whichever driver currently owns the message
* @resources: for resource management when the SPI message is processed
* @prepared: spi_prepare_message was called for the this message
- * @t: for use with spi_message_alloc() when message and transfers have
- * been allocated together
*
* A @spi_message is used to execute an atomic sequence of data transfers,
* each represented by a struct spi_transfer. The sequence is "atomic"
@@ -1142,9 +1141,6 @@ struct spi_message {
/* List of spi_res resources when the SPI message is processed */
struct list_head resources;
-
- /* For embedding transfers into the memory of the message */
- struct spi_transfer t[];
};
static inline void spi_message_init_no_memset(struct spi_message *m)
@@ -1203,17 +1199,21 @@ struct spi_transfer *xfers, unsigned int num_xfers)
*/
static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
{
- struct spi_message *m;
+ struct spi_message_with_transfers {
+ struct spi_message m;
+ struct spi_transfer t[];
+ } *mwt;
+ unsigned i;
+
+ mwt = kzalloc(struct_size(mwt, t, ntrans), flags);
+ if (!mwt)
+ return NULL;
- m = kzalloc(struct_size(m, t, ntrans), flags);
- if (m) {
- unsigned i;
+ spi_message_init_no_memset(&mwt->m);
+ for (i = 0; i < ntrans; i++)
+ spi_message_add_tail(&mwt->t[i], &mwt->m);
- spi_message_init_no_memset(m);
- for (i = 0; i < ntrans; i++)
- spi_message_add_tail(&m->t[i], m);
- }
- return m;
+ return &mwt->m;
}
static inline void spi_message_free(struct spi_message *m)
diff --git a/include/linux/spmi.h b/include/linux/spmi.h
index eac1956a8727..2a4ce4144f9f 100644
--- a/include/linux/spmi.h
+++ b/include/linux/spmi.h
@@ -166,7 +166,7 @@ static inline void spmi_driver_unregister(struct spmi_driver *sdrv)
struct device_node;
-struct spmi_device *spmi_device_from_of(struct device_node *np);
+struct spmi_device *spmi_find_device_by_of_node(struct device_node *np);
int spmi_register_read(struct spmi_device *sdev, u8 addr, u8 *buf);
int spmi_ext_register_read(struct spmi_device *sdev, u8 addr, u8 *buf,
size_t len);
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index ce89cc3e4913..0b4658a7eceb 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -139,6 +139,7 @@ struct stmmac_rxq_cfg {
struct stmmac_txq_cfg {
u32 weight;
+ bool coe_unsupported;
u8 mode_to_use;
/* Credit Base Shaper parameters */
u32 send_slope;
@@ -302,7 +303,6 @@ struct plat_stmmacenet_data {
unsigned int eee_usecs_rate;
struct pci_dev *pdev;
int int_snapshot_num;
- int ext_snapshot_num;
int msi_mac_vec;
int msi_wol_vec;
int msi_lpi_vec;
diff --git a/include/linux/string.h b/include/linux/string.h
index dbfc66400050..ce137830a0b9 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -2,10 +2,13 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
+#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */
+#include <linux/err.h> /* for ERR_PTR() */
#include <linux/errno.h> /* for E2BIG */
+#include <linux/overflow.h> /* for check_mul_overflow() */
#include <linux/stdarg.h>
#include <uapi/linux/string.h>
@@ -14,6 +17,44 @@ extern void *memdup_user(const void __user *, size_t);
extern void *vmemdup_user(const void __user *, size_t);
extern void *memdup_user_nul(const void __user *, size_t);
+/**
+ * memdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result is physically
+ * contiguous, to be freed by kfree().
+ */
+static inline void *memdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return memdup_user(src, nbytes);
+}
+
+/**
+ * vmemdup_array_user - duplicate array from user space
+ * @src: source address in user space
+ * @n: number of array members to copy
+ * @size: size of one array member
+ *
+ * Return: an ERR_PTR() on failure. Result may be not
+ * physically contiguous. Use kvfree() to free.
+ */
+static inline void *vmemdup_array_user(const void __user *src, size_t n, size_t size)
+{
+ size_t nbytes;
+
+ if (check_mul_overflow(n, size, &nbytes))
+ return ERR_PTR(-EOVERFLOW);
+
+ return vmemdup_user(src, nbytes);
+}
+
/*
* Include machine specific inline routines
*/
@@ -277,10 +318,12 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem_pad(dest, src, pad) do { \
const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
- memcpy_and_pad(dest, _dest_len, src, strnlen(src, _dest_len), pad); \
+ memcpy_and_pad(dest, _dest_len, src, \
+ strnlen(src, min(_src_len, _dest_len)), pad); \
} while (0)
/**
@@ -298,10 +341,11 @@ void memcpy_and_pad(void *dest, size_t dest_len, const void *src, size_t count,
*/
#define strtomem(dest, src) do { \
const size_t _dest_len = __builtin_object_size(dest, 1); \
+ const size_t _src_len = __builtin_object_size(src, 1); \
\
BUILD_BUG_ON(!__builtin_constant_p(_dest_len) || \
_dest_len == (size_t)-1); \
- memcpy(dest, src, min(_dest_len, strnlen(src, _dest_len))); \
+ memcpy(dest, src, strnlen(src, min(_src_len, _dest_len))); \
} while (0)
/**
diff --git a/include/linux/string_helpers.h b/include/linux/string_helpers.h
index 9d1f5bb74dd5..58fb1f90eda5 100644
--- a/include/linux/string_helpers.h
+++ b/include/linux/string_helpers.h
@@ -24,8 +24,8 @@ enum string_size_units {
STRING_UNITS_2, /* use binary powers of 2^10 */
};
-void string_get_size(u64 size, u64 blk_size, enum string_size_units units,
- char *buf, int len);
+int string_get_size(u64 size, u64 blk_size, enum string_size_units units,
+ char *buf, int len);
int parse_int_array_user(const char __user *from, size_t count, int **array);
diff --git a/include/linux/stringify.h b/include/linux/stringify.h
index 841cec8ed525..0e84cbe65270 100644
--- a/include/linux/stringify.h
+++ b/include/linux/stringify.h
@@ -9,4 +9,6 @@
#define __stringify_1(x...) #x
#define __stringify(x...) __stringify_1(x)
+#define FILE_LINE __FILE__ ":" __stringify(__LINE__)
+
#endif /* !__LINUX_STRINGIFY_H */
diff --git a/include/linux/sunrpc/svc.h b/include/linux/sunrpc/svc.h
index dbf5b21feafe..b10f987509cc 100644
--- a/include/linux/sunrpc/svc.h
+++ b/include/linux/sunrpc/svc.h
@@ -17,6 +17,7 @@
#include <linux/sunrpc/xdr.h>
#include <linux/sunrpc/auth.h>
#include <linux/sunrpc/svcauth.h>
+#include <linux/lwq.h>
#include <linux/wait.h>
#include <linux/mm.h>
#include <linux/pagevec.h>
@@ -33,10 +34,10 @@
*/
struct svc_pool {
unsigned int sp_id; /* pool id; also node id on NUMA */
- spinlock_t sp_lock; /* protects all fields */
- struct list_head sp_sockets; /* pending sockets */
- unsigned int sp_nrthreads; /* # of threads in pool */
+ struct lwq sp_xprts; /* pending transports */
+ atomic_t sp_nrthreads; /* # of threads in pool */
struct list_head sp_all_threads; /* all server threads */
+ struct llist_head sp_idle_threads; /* idle server threads */
/* statistics on pool operation */
struct percpu_counter sp_messages_arrived;
@@ -49,7 +50,8 @@ struct svc_pool {
/* bits for sp_flags */
enum {
SP_TASK_PENDING, /* still work to do even if no xprt is queued */
- SP_CONGESTED, /* all threads are busy, none idle */
+ SP_NEED_VICTIM, /* One thread needs to agree to exit */
+ SP_VICTIM_REMAINS, /* One thread needs to actually exit */
};
@@ -88,12 +90,9 @@ struct svc_serv {
int (*sv_threadfn)(void *data);
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head sv_cb_list; /* queue for callback requests
+ struct lwq sv_cb_list; /* queue for callback requests
* that arrive over the same
* connection */
- spinlock_t sv_cb_lock; /* protects the svc_cb_list */
- wait_queue_head_t sv_cb_waitq; /* sleep here if there are no
- * entries in the svc_cb_list */
bool sv_bc_enabled; /* service uses backchannel */
#endif /* CONFIG_SUNRPC_BACKCHANNEL */
};
@@ -186,6 +185,7 @@ extern u32 svc_max_payload(const struct svc_rqst *rqstp);
*/
struct svc_rqst {
struct list_head rq_all; /* all threads list */
+ struct llist_node rq_idle; /* On the idle list */
struct rcu_head rq_rcu_head; /* for RCU deferred kfree */
struct svc_xprt * rq_xprt; /* transport ptr */
@@ -251,6 +251,7 @@ struct svc_rqst {
* net namespace
*/
void ** rq_lease_breaker; /* The v4 client breaking a lease */
+ unsigned int rq_status_counter; /* RPC processing counter */
};
/* bits for rq_flags */
@@ -261,8 +262,7 @@ enum {
RQ_DROPME, /* drop current reply */
RQ_SPLICE_OK, /* turned off in gss privacy to prevent
* encrypting page cache pages */
- RQ_VICTIM, /* about to be shut down */
- RQ_BUSY, /* request is busy */
+ RQ_VICTIM, /* Have agreed to shut down */
RQ_DATA, /* request has data */
};
@@ -301,6 +301,28 @@ static inline struct sockaddr *svc_daddr(const struct svc_rqst *rqst)
return (struct sockaddr *) &rqst->rq_daddr;
}
+/**
+ * svc_thread_should_stop - check if this thread should stop
+ * @rqstp: the thread that might need to stop
+ *
+ * To stop an svc thread, the pool flags SP_NEED_VICTIM and SP_VICTIM_REMAINS
+ * are set. The first thread which sees SP_NEED_VICTIM clears it, becoming
+ * the victim using this function. It should then promptly call
+ * svc_exit_thread() to complete the process, clearing SP_VICTIM_REMAINS
+ * so the task waiting for a thread to exit can wake and continue.
+ *
+ * Return values:
+ * %true: caller should invoke svc_exit_thread()
+ * %false: caller should do nothing
+ */
+static inline bool svc_thread_should_stop(struct svc_rqst *rqstp)
+{
+ if (test_and_clear_bit(SP_NEED_VICTIM, &rqstp->rq_pool->sp_flags))
+ set_bit(RQ_VICTIM, &rqstp->rq_flags);
+
+ return test_bit(RQ_VICTIM, &rqstp->rq_flags);
+}
+
struct svc_deferred_req {
u32 prot; /* protocol (UDP or TCP) */
struct svc_xprt *xprt;
@@ -413,8 +435,7 @@ struct svc_serv * svc_create_pooled(struct svc_program *, unsigned int,
int svc_set_num_threads(struct svc_serv *, struct svc_pool *, int);
int svc_pool_stats_open(struct svc_serv *serv, struct file *file);
void svc_process(struct svc_rqst *rqstp);
-int bc_svc_process(struct svc_serv *, struct rpc_rqst *,
- struct svc_rqst *);
+void svc_process_bc(struct rpc_rqst *req, struct svc_rqst *rqstp);
int svc_register(const struct svc_serv *, struct net *, const int,
const unsigned short, const unsigned short);
diff --git a/include/linux/sunrpc/svc_xprt.h b/include/linux/sunrpc/svc_xprt.h
index fa55d12dc765..8e20cd60e2e7 100644
--- a/include/linux/sunrpc/svc_xprt.h
+++ b/include/linux/sunrpc/svc_xprt.h
@@ -54,7 +54,7 @@ struct svc_xprt {
const struct svc_xprt_ops *xpt_ops;
struct kref xpt_ref;
struct list_head xpt_list;
- struct list_head xpt_ready;
+ struct lwq_node xpt_ready;
unsigned long xpt_flags;
struct svc_serv *xpt_server; /* service for transport */
diff --git a/include/linux/sunrpc/xdr.h b/include/linux/sunrpc/xdr.h
index 5b4fb3c791bc..2f8dc47f1eb0 100644
--- a/include/linux/sunrpc/xdr.h
+++ b/include/linux/sunrpc/xdr.h
@@ -779,7 +779,9 @@ xdr_stream_decode_uint32_array(struct xdr_stream *xdr,
if (unlikely(xdr_stream_decode_u32(xdr, &len) < 0))
return -EBADMSG;
- p = xdr_inline_decode(xdr, size_mul(len, sizeof(*p)));
+ if (U32_MAX >= SIZE_MAX / sizeof(*p) && len > SIZE_MAX / sizeof(*p))
+ return -EBADMSG;
+ p = xdr_inline_decode(xdr, len * sizeof(*p));
if (unlikely(!p))
return -EBADMSG;
if (array == NULL)
diff --git a/include/linux/sunrpc/xprt.h b/include/linux/sunrpc/xprt.h
index 4ecc89301eb7..f85d3a0daca2 100644
--- a/include/linux/sunrpc/xprt.h
+++ b/include/linux/sunrpc/xprt.h
@@ -57,6 +57,7 @@ struct xprt_class;
struct seq_file;
struct svc_serv;
struct net;
+#include <linux/lwq.h>
/*
* This describes a complete RPC request
@@ -121,7 +122,7 @@ struct rpc_rqst {
int rq_ntrans;
#if defined(CONFIG_SUNRPC_BACKCHANNEL)
- struct list_head rq_bc_list; /* Callback service list */
+ struct lwq_node rq_bc_list; /* Callback service list */
unsigned long rq_bc_pa_state; /* Backchannel prealloc state */
struct list_head rq_bc_pa_list; /* Backchannel prealloc list */
#endif /* CONFIG_SUNRPC_BACKCHANEL */
diff --git a/include/linux/superhyway.h b/include/linux/superhyway.h
deleted file mode 100644
index 8d3376775813..000000000000
--- a/include/linux/superhyway.h
+++ /dev/null
@@ -1,107 +0,0 @@
-/*
- * include/linux/superhyway.h
- *
- * SuperHyway Bus definitions
- *
- * Copyright (C) 2004, 2005 Paul Mundt <lethal@linux-sh.org>
- *
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- */
-#ifndef __LINUX_SUPERHYWAY_H
-#define __LINUX_SUPERHYWAY_H
-
-#include <linux/device.h>
-
-/*
- * SuperHyway IDs
- */
-#define SUPERHYWAY_DEVICE_ID_SH5_DMAC 0x0183
-
-struct superhyway_vcr_info {
- u8 perr_flags; /* P-port Error flags */
- u8 merr_flags; /* Module Error flags */
- u16 mod_vers; /* Module Version */
- u16 mod_id; /* Module ID */
- u8 bot_mb; /* Bottom Memory block */
- u8 top_mb; /* Top Memory block */
-};
-
-struct superhyway_ops {
- int (*read_vcr)(unsigned long base, struct superhyway_vcr_info *vcr);
- int (*write_vcr)(unsigned long base, struct superhyway_vcr_info vcr);
-};
-
-struct superhyway_bus {
- struct superhyway_ops *ops;
-};
-
-extern struct superhyway_bus superhyway_channels[];
-
-struct superhyway_device_id {
- unsigned int id;
- unsigned long driver_data;
-};
-
-struct superhyway_device;
-extern struct bus_type superhyway_bus_type;
-
-struct superhyway_driver {
- char *name;
-
- const struct superhyway_device_id *id_table;
- struct device_driver drv;
-
- int (*probe)(struct superhyway_device *dev, const struct superhyway_device_id *id);
- void (*remove)(struct superhyway_device *dev);
-};
-
-#define to_superhyway_driver(d) container_of((d), struct superhyway_driver, drv)
-
-struct superhyway_device {
- char name[32];
-
- struct device dev;
-
- struct superhyway_device_id id;
- struct superhyway_driver *drv;
- struct superhyway_bus *bus;
-
- int num_resources;
- struct resource *resource;
- struct superhyway_vcr_info vcr;
-};
-
-#define to_superhyway_device(d) container_of((d), struct superhyway_device, dev)
-
-#define superhyway_get_drvdata(d) dev_get_drvdata(&(d)->dev)
-#define superhyway_set_drvdata(d,p) dev_set_drvdata(&(d)->dev, (p))
-
-static inline int
-superhyway_read_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info *vcr)
-{
- return dev->bus->ops->read_vcr(base, vcr);
-}
-
-static inline int
-superhyway_write_vcr(struct superhyway_device *dev, unsigned long base,
- struct superhyway_vcr_info vcr)
-{
- return dev->bus->ops->write_vcr(base, vcr);
-}
-
-extern int superhyway_scan_bus(struct superhyway_bus *);
-
-/* drivers/sh/superhyway/superhyway.c */
-int superhyway_register_driver(struct superhyway_driver *);
-void superhyway_unregister_driver(struct superhyway_driver *);
-int superhyway_add_device(unsigned long base, struct superhyway_device *, struct superhyway_bus *);
-int superhyway_add_devices(struct superhyway_bus *bus, struct superhyway_device **devices, int nr_devices);
-
-/* drivers/sh/superhyway/superhyway-sysfs.c */
-extern const struct attribute_group *superhyway_dev_groups[];
-
-#endif /* __LINUX_SUPERHYWAY_H */
-
diff --git a/include/linux/surface_aggregator/controller.h b/include/linux/surface_aggregator/controller.h
index cb7980805920..5b67f0f47d80 100644
--- a/include/linux/surface_aggregator/controller.h
+++ b/include/linux/surface_aggregator/controller.h
@@ -44,7 +44,7 @@ struct ssam_event {
u8 command_id;
u8 instance_id;
u16 length;
- u8 data[];
+ u8 data[] __counted_by(length);
};
/**
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 493487ed7c38..f6dd6575b905 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -298,6 +298,7 @@ struct swap_info_struct {
unsigned int __percpu *cluster_next_cpu; /*percpu index for next allocation */
struct percpu_cluster __percpu *percpu_cluster; /* per cpu's swap location */
struct rb_root swap_extent_root;/* root of the swap extent rbtree */
+ struct bdev_handle *bdev_handle;/* open handle of the bdev */
struct block_device *bdev; /* swap device or bdev of swap file */
struct file *swap_file; /* seldom referenced */
unsigned int old_block_size; /* seldom referenced */
diff --git a/include/linux/swiotlb.h b/include/linux/swiotlb.h
index b4536626f8ff..ecde0312dd52 100644
--- a/include/linux/swiotlb.h
+++ b/include/linux/swiotlb.h
@@ -172,14 +172,23 @@ static inline bool is_swiotlb_buffer(struct device *dev, phys_addr_t paddr)
if (!mem)
return false;
- if (IS_ENABLED(CONFIG_SWIOTLB_DYNAMIC)) {
- /* Pairs with smp_wmb() in swiotlb_find_slots() and
- * swiotlb_dyn_alloc(), which modify the RCU lists.
- */
- smp_rmb();
- return swiotlb_find_pool(dev, paddr);
- }
+#ifdef CONFIG_SWIOTLB_DYNAMIC
+ /*
+ * All SWIOTLB buffer addresses must have been returned by
+ * swiotlb_tbl_map_single() and passed to a device driver.
+ * If a SWIOTLB address is checked on another CPU, then it was
+ * presumably loaded by the device driver from an unspecified private
+ * data structure. Make sure that this load is ordered before reading
+ * dev->dma_uses_io_tlb here and mem->pools in swiotlb_find_pool().
+ *
+ * This barrier pairs with smp_mb() in swiotlb_find_slots().
+ */
+ smp_rmb();
+ return READ_ONCE(dev->dma_uses_io_tlb) &&
+ swiotlb_find_pool(dev, paddr);
+#else
return paddr >= mem->defpool.start && paddr < mem->defpool.end;
+#endif
}
static inline bool is_swiotlb_force_bounce(struct device *dev)
diff --git a/include/linux/syscalls.h b/include/linux/syscalls.h
index 22bc6bc147f8..fd9d12de7e92 100644
--- a/include/linux/syscalls.h
+++ b/include/linux/syscalls.h
@@ -355,7 +355,6 @@ asmlinkage long sys_lremovexattr(const char __user *path,
const char __user *name);
asmlinkage long sys_fremovexattr(int fd, const char __user *name);
asmlinkage long sys_getcwd(char __user *buf, unsigned long size);
-asmlinkage long sys_lookup_dcookie(u64 cookie64, char __user *buf, size_t len);
asmlinkage long sys_eventfd2(unsigned int count, int flags);
asmlinkage long sys_epoll_create1(int flags);
asmlinkage long sys_epoll_ctl(int epfd, int op, int fd,
@@ -549,6 +548,16 @@ asmlinkage long sys_set_robust_list(struct robust_list_head __user *head,
asmlinkage long sys_futex_waitv(struct futex_waitv *waiters,
unsigned int nr_futexes, unsigned int flags,
struct __kernel_timespec __user *timeout, clockid_t clockid);
+
+asmlinkage long sys_futex_wake(void __user *uaddr, unsigned long mask, int nr, unsigned int flags);
+
+asmlinkage long sys_futex_wait(void __user *uaddr, unsigned long val, unsigned long mask,
+ unsigned int flags, struct __kernel_timespec __user *timespec,
+ clockid_t clockid);
+
+asmlinkage long sys_futex_requeue(struct futex_waitv __user *waiters,
+ unsigned int flags, int nr_wake, int nr_requeue);
+
asmlinkage long sys_nanosleep(struct __kernel_timespec __user *rqtp,
struct __kernel_timespec __user *rmtp);
asmlinkage long sys_nanosleep_time32(struct old_timespec32 __user *rqtp,
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index 09d7429d67c0..61b40ea81f4d 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -242,6 +242,7 @@ extern void __register_sysctl_init(const char *path, struct ctl_table *table,
extern struct ctl_table_header *register_sysctl_mount_point(const char *path);
void do_sysctl_args(void);
+bool sysctl_is_alias(char *param);
int do_proc_douintvec(struct ctl_table *table, int write,
void *buffer, size_t *lenp, loff_t *ppos,
int (*conv)(unsigned long *lvalp,
@@ -287,6 +288,11 @@ static inline void setup_sysctl_set(struct ctl_table_set *p,
static inline void do_sysctl_args(void)
{
}
+
+static inline bool sysctl_is_alias(char *param)
+{
+ return false;
+}
#endif /* CONFIG_SYSCTL */
int sysctl_max_threads(struct ctl_table *table, int write, void *buffer,
diff --git a/include/linux/sysfs.h b/include/linux/sysfs.h
index fd3fe5c8c17f..b717a70219f6 100644
--- a/include/linux/sysfs.h
+++ b/include/linux/sysfs.h
@@ -181,6 +181,8 @@ struct bin_attribute {
char *, loff_t, size_t);
ssize_t (*write)(struct file *, struct kobject *, struct bin_attribute *,
char *, loff_t, size_t);
+ loff_t (*llseek)(struct file *, struct kobject *, struct bin_attribute *,
+ loff_t, int);
int (*mmap)(struct file *, struct kobject *, struct bin_attribute *attr,
struct vm_area_struct *vma);
};
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index 3c5efeeb024f..ec4e9367f5b0 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -152,6 +152,7 @@ struct tcp_request_sock {
u64 snt_synack; /* first SYNACK sent time */
bool tfo_listener;
bool is_mptcp;
+ s8 req_usec_ts;
#if IS_ENABLED(CONFIG_MPTCP)
bool drop_req;
#endif
@@ -165,6 +166,11 @@ struct tcp_request_sock {
* after data-in-SYN.
*/
u8 syn_tos;
+#ifdef CONFIG_TCP_AO
+ u8 ao_keyid;
+ u8 ao_rcv_next;
+ u8 maclen;
+#endif
};
static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
@@ -172,6 +178,19 @@ static inline struct tcp_request_sock *tcp_rsk(const struct request_sock *req)
return (struct tcp_request_sock *)req;
}
+static inline bool tcp_rsk_used_ao(const struct request_sock *req)
+{
+ /* The real length of MAC is saved in the request socket,
+ * signing anything with zero-length makes no sense, so here is
+ * a little hack..
+ */
+#ifndef CONFIG_TCP_AO
+ return false;
+#else
+ return tcp_rsk(req)->maclen != 0;
+#endif
+}
+
#define TCP_RMEM_TO_WIN_SCALE 8
struct tcp_sock {
@@ -257,7 +276,8 @@ struct tcp_sock {
u8 compressed_ack;
u8 dup_ack_counter:2,
tlp_retrans:1, /* TLP is a retransmission */
- unused:5;
+ tcp_usec_ts:1, /* TSval values in usec */
+ unused:4;
u32 chrono_start; /* Start time in jiffies of a TCP chrono */
u32 chrono_stat[3]; /* Time in jiffies for chrono_stat stats */
u8 chrono_type:2, /* current chronograph type */
@@ -377,6 +397,14 @@ struct tcp_sock {
* Total data bytes retransmitted
*/
u32 total_retrans; /* Total retransmits for entire connection */
+ u32 rto_stamp; /* Start time (ms) of last CA_Loss recovery */
+ u16 total_rto; /* Total number of RTO timeouts, including
+ * SYN/SYN-ACK and recurring timeouts.
+ */
+ u16 total_rto_recoveries; /* Total number of RTO recoveries,
+ * including any unfinished recovery.
+ */
+ u32 total_rto_time; /* ms spent in (completed) RTO recoveries. */
u32 urg_seq; /* Seq of received urgent pointer */
unsigned int keepalive_time; /* time before keep alive takes place */
@@ -437,13 +465,18 @@ struct tcp_sock {
bool syn_smc; /* SYN includes SMC */
#endif
-#ifdef CONFIG_TCP_MD5SIG
-/* TCP AF-Specific parts; only used by MD5 Signature support so far */
+#if defined(CONFIG_TCP_MD5SIG) || defined(CONFIG_TCP_AO)
+/* TCP AF-Specific parts; only used by TCP-AO/MD5 Signature support so far */
const struct tcp_sock_af_ops *af_specific;
+#ifdef CONFIG_TCP_MD5SIG
/* TCP MD5 Signature Option information */
struct tcp_md5sig_info __rcu *md5sig_info;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
+#endif
/* TCP fastopen related information */
struct tcp_fastopen_request *fastopen_req;
@@ -463,15 +496,17 @@ enum tsq_enum {
TCP_MTU_REDUCED_DEFERRED, /* tcp_v{4|6}_err() could not call
* tcp_v{4|6}_mtu_reduced()
*/
+ TCP_ACK_DEFERRED, /* TX pure ack is deferred */
};
enum tsq_flags {
- TSQF_THROTTLED = (1UL << TSQ_THROTTLED),
- TSQF_QUEUED = (1UL << TSQ_QUEUED),
- TCPF_TSQ_DEFERRED = (1UL << TCP_TSQ_DEFERRED),
- TCPF_WRITE_TIMER_DEFERRED = (1UL << TCP_WRITE_TIMER_DEFERRED),
- TCPF_DELACK_TIMER_DEFERRED = (1UL << TCP_DELACK_TIMER_DEFERRED),
- TCPF_MTU_REDUCED_DEFERRED = (1UL << TCP_MTU_REDUCED_DEFERRED),
+ TSQF_THROTTLED = BIT(TSQ_THROTTLED),
+ TSQF_QUEUED = BIT(TSQ_QUEUED),
+ TCPF_TSQ_DEFERRED = BIT(TCP_TSQ_DEFERRED),
+ TCPF_WRITE_TIMER_DEFERRED = BIT(TCP_WRITE_TIMER_DEFERRED),
+ TCPF_DELACK_TIMER_DEFERRED = BIT(TCP_DELACK_TIMER_DEFERRED),
+ TCPF_MTU_REDUCED_DEFERRED = BIT(TCP_MTU_REDUCED_DEFERRED),
+ TCPF_ACK_DEFERRED = BIT(TCP_ACK_DEFERRED),
};
#define tcp_sk(ptr) container_of_const(ptr, struct tcp_sock, inet_conn.icsk_inet.sk)
@@ -497,6 +532,9 @@ struct tcp_timewait_sock {
#ifdef CONFIG_TCP_MD5SIG
struct tcp_md5sig_key *tw_md5_key;
#endif
+#ifdef CONFIG_TCP_AO
+ struct tcp_ao_info __rcu *ao_info;
+#endif
};
static inline struct tcp_timewait_sock *tcp_twsk(const struct sock *sk)
@@ -566,4 +604,9 @@ void tcp_sock_set_quickack(struct sock *sk, int val);
int tcp_sock_set_syncnt(struct sock *sk, int val);
int tcp_sock_set_user_timeout(struct sock *sk, int val);
+static inline bool dst_tcp_usec_ts(const struct dst_entry *dst)
+{
+ return dst_feature(dst, RTAX_FEATURE_TCP_USEC_TS);
+}
+
#endif /* _LINUX_TCP_H */
diff --git a/include/linux/thermal.h b/include/linux/thermal.h
index c99440aac1a1..cee814d5d1ac 100644
--- a/include/linux/thermal.h
+++ b/include/linux/thermal.h
@@ -80,8 +80,8 @@ struct thermal_zone_device_ops {
int (*set_trip_hyst) (struct thermal_zone_device *, int, int);
int (*get_crit_temp) (struct thermal_zone_device *, int *);
int (*set_emul_temp) (struct thermal_zone_device *, int);
- int (*get_trend) (struct thermal_zone_device *, struct thermal_trip *,
- enum thermal_trend *);
+ int (*get_trend) (struct thermal_zone_device *,
+ const struct thermal_trip *, enum thermal_trend *);
void (*hot)(struct thermal_zone_device *);
void (*critical)(struct thermal_zone_device *);
};
@@ -122,7 +122,6 @@ struct thermal_cooling_device {
* @devdata: private pointer for device private data
* @trips: an array of struct thermal_trip
* @num_trips: number of trip points the thermal zone supports
- * @trips_disabled; bitmap for disabled trips
* @passive_delay_jiffies: number of jiffies to wait between polls when
* performing passive cooling.
* @polling_delay_jiffies: number of jiffies to wait between polls when
@@ -163,7 +162,6 @@ struct thermal_zone_device {
void *devdata;
struct thermal_trip *trips;
int num_trips;
- unsigned long trips_disabled; /* bitmap for disabled trips */
unsigned long passive_delay_jiffies;
unsigned long polling_delay_jiffies;
int temperature;
@@ -201,7 +199,8 @@ struct thermal_governor {
char name[THERMAL_NAME_LENGTH];
int (*bind_to_tz)(struct thermal_zone_device *tz);
void (*unbind_from_tz)(struct thermal_zone_device *tz);
- int (*throttle)(struct thermal_zone_device *tz, int trip);
+ int (*throttle)(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip);
struct list_head governor_list;
};
@@ -288,6 +287,9 @@ int thermal_zone_set_trip(struct thermal_zone_device *tz, int trip_id,
int for_each_thermal_trip(struct thermal_zone_device *tz,
int (*cb)(struct thermal_trip *, void *),
void *data);
+int thermal_zone_for_each_trip(struct thermal_zone_device *tz,
+ int (*cb)(struct thermal_trip *, void *),
+ void *data);
int thermal_zone_get_num_trips(struct thermal_zone_device *tz);
int thermal_zone_get_crit_temp(struct thermal_zone_device *tz, int *temp);
@@ -322,18 +324,22 @@ const char *thermal_zone_device_type(struct thermal_zone_device *tzd);
int thermal_zone_device_id(struct thermal_zone_device *tzd);
struct device *thermal_zone_device(struct thermal_zone_device *tzd);
+int thermal_bind_cdev_to_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev,
+ unsigned long upper, unsigned long lower,
+ unsigned int weight);
int thermal_zone_bind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *,
unsigned long, unsigned long,
unsigned int);
+int thermal_unbind_cdev_from_trip(struct thermal_zone_device *tz,
+ const struct thermal_trip *trip,
+ struct thermal_cooling_device *cdev);
int thermal_zone_unbind_cooling_device(struct thermal_zone_device *, int,
struct thermal_cooling_device *);
void thermal_zone_device_update(struct thermal_zone_device *,
enum thermal_notify_event);
-void thermal_zone_device_exec(struct thermal_zone_device *tz,
- void (*cb)(struct thermal_zone_device *,
- unsigned long),
- unsigned long data);
struct thermal_cooling_device *thermal_cooling_device_register(const char *,
void *, const struct thermal_cooling_device_ops *);
diff --git a/include/linux/thunderbolt.h b/include/linux/thunderbolt.h
index 02333f47c994..6151c210d987 100644
--- a/include/linux/thunderbolt.h
+++ b/include/linux/thunderbolt.h
@@ -175,7 +175,7 @@ void tb_unregister_property_dir(const char *key, struct tb_property_dir *dir);
* enum tb_link_width - Thunderbolt/USB4 link width
* @TB_LINK_WIDTH_SINGLE: Single lane link
* @TB_LINK_WIDTH_DUAL: Dual lane symmetric link
- * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 trasmitters
+ * @TB_LINK_WIDTH_ASYM_TX: Dual lane asymmetric Gen 4 link with 3 transmitters
* @TB_LINK_WIDTH_ASYM_RX: Dual lane asymmetric Gen 4 link with 3 receivers
*/
enum tb_link_width {
diff --git a/include/linux/tick.h b/include/linux/tick.h
index 9459fef5b857..716d17f31c45 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -140,14 +140,6 @@ extern unsigned long tick_nohz_get_idle_calls(void);
extern unsigned long tick_nohz_get_idle_calls_cpu(int cpu);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
-
-static inline void tick_nohz_idle_stop_tick_protected(void)
-{
- local_irq_disable();
- tick_nohz_idle_stop_tick();
- local_irq_enable();
-}
-
#else /* !CONFIG_NO_HZ_COMMON */
#define tick_nohz_enabled (0)
static inline int tick_nohz_tick_stopped(void) { return 0; }
@@ -170,8 +162,6 @@ static inline ktime_t tick_nohz_get_sleep_length(ktime_t *delta_next)
}
static inline u64 get_cpu_idle_time_us(int cpu, u64 *unused) { return -1; }
static inline u64 get_cpu_iowait_time_us(int cpu, u64 *unused) { return -1; }
-
-static inline void tick_nohz_idle_stop_tick_protected(void) { }
#endif /* !CONFIG_NO_HZ_COMMON */
#ifdef CONFIG_NO_HZ_FULL
diff --git a/include/linux/timer.h b/include/linux/timer.h
index 9162f275819a..26a545bb0153 100644
--- a/include/linux/timer.h
+++ b/include/linux/timer.h
@@ -77,8 +77,7 @@ struct timer_list {
.entry = { .next = TIMER_ENTRY_STATIC }, \
.function = (_function), \
.flags = (_flags), \
- __TIMER_LOCKDEP_MAP_INITIALIZER( \
- __FILE__ ":" __stringify(__LINE__)) \
+ __TIMER_LOCKDEP_MAP_INITIALIZER(FILE_LINE) \
}
#define DEFINE_TIMER(_name, _function) \
diff --git a/include/linux/topology.h b/include/linux/topology.h
index fea32377f7c7..52f5850730b3 100644
--- a/include/linux/topology.h
+++ b/include/linux/topology.h
@@ -251,7 +251,7 @@ extern const struct cpumask *sched_numa_hop_mask(unsigned int node, unsigned int
#else
static __always_inline int sched_numa_find_nth_cpu(const struct cpumask *cpus, int cpu, int node)
{
- return cpumask_nth(cpu, cpus);
+ return cpumask_nth_and(cpu, cpus, cpu_online_mask);
}
static inline const struct cpumask *
diff --git a/include/linux/torture.h b/include/linux/torture.h
index bb466eec01e4..c98d0c83d117 100644
--- a/include/linux/torture.h
+++ b/include/linux/torture.h
@@ -81,7 +81,8 @@ static inline void torture_random_init(struct torture_random_state *trsp)
}
/* Definitions for high-resolution-timer sleeps. */
-int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, struct torture_random_state *trsp);
+int torture_hrtimeout_ns(ktime_t baset_ns, u32 fuzzt_ns, const enum hrtimer_mode mode,
+ struct torture_random_state *trsp);
int torture_hrtimeout_us(u32 baset_us, u32 fuzzt_ns, struct torture_random_state *trsp);
int torture_hrtimeout_ms(u32 baset_ms, u32 fuzzt_us, struct torture_random_state *trsp);
int torture_hrtimeout_jiffies(u32 baset_j, struct torture_random_state *trsp);
@@ -120,10 +121,15 @@ void _torture_stop_kthread(char *m, struct task_struct **tp);
#define torture_stop_kthread(n, tp) \
_torture_stop_kthread("Stopping " #n " task", &(tp))
+/* Scheduler-related definitions. */
#ifdef CONFIG_PREEMPTION
#define torture_preempt_schedule() __preempt_schedule()
#else
#define torture_preempt_schedule() do { } while (0)
#endif
+#if IS_ENABLED(CONFIG_RCU_TORTURE_TEST) || IS_MODULE(CONFIG_RCU_TORTURE_TEST) || IS_ENABLED(CONFIG_LOCK_TORTURE_TEST) || IS_MODULE(CONFIG_LOCK_TORTURE_TEST)
+long torture_sched_setaffinity(pid_t pid, const struct cpumask *in_mask);
+#endif
+
#endif /* __LINUX_TORTURE_H */
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index eb5c3add939b..d68ff9b1247f 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -62,13 +62,13 @@ void trace_event_printf(struct trace_iterator *iter, const char *fmt, ...);
/* Used to find the offset and length of dynamic fields in trace events */
struct trace_dynamic_info {
#ifdef CONFIG_CPU_BIG_ENDIAN
- u16 offset;
u16 len;
+ u16 offset;
#else
- u16 len;
u16 offset;
+ u16 len;
#endif
-};
+} __packed;
/*
* The trace entry - the most basic unit of tracing. This is what
@@ -492,6 +492,7 @@ enum {
EVENT_FILE_FL_TRIGGER_COND_BIT,
EVENT_FILE_FL_PID_FILTER_BIT,
EVENT_FILE_FL_WAS_ENABLED_BIT,
+ EVENT_FILE_FL_FREED_BIT,
};
extern struct trace_event_file *trace_get_event_file(const char *instance,
@@ -630,6 +631,7 @@ extern int __kprobe_event_add_fields(struct dynevent_cmd *cmd, ...);
* TRIGGER_COND - When set, one or more triggers has an associated filter
* PID_FILTER - When set, the event is filtered based on pid
* WAS_ENABLED - Set when enabled to know to clear trace on module removal
+ * FREED - File descriptor is freed, all fields should be considered invalid
*/
enum {
EVENT_FILE_FL_ENABLED = (1 << EVENT_FILE_FL_ENABLED_BIT),
@@ -643,14 +645,14 @@ enum {
EVENT_FILE_FL_TRIGGER_COND = (1 << EVENT_FILE_FL_TRIGGER_COND_BIT),
EVENT_FILE_FL_PID_FILTER = (1 << EVENT_FILE_FL_PID_FILTER_BIT),
EVENT_FILE_FL_WAS_ENABLED = (1 << EVENT_FILE_FL_WAS_ENABLED_BIT),
+ EVENT_FILE_FL_FREED = (1 << EVENT_FILE_FL_FREED_BIT),
};
struct trace_event_file {
struct list_head list;
struct trace_event_call *event_call;
struct event_filter __rcu *filter;
- struct eventfs_file *ef;
- struct dentry *dir;
+ struct eventfs_inode *ei;
struct trace_array *tr;
struct trace_subsystem_dir *system;
struct list_head triggers;
@@ -672,6 +674,7 @@ struct trace_event_file {
* caching and such. Which is mostly OK ;-)
*/
unsigned long flags;
+ atomic_t ref; /* ref count for opened files */
atomic_t sm_ref; /* soft-mode reference counter */
atomic_t tm_ref; /* trigger-mode reference counter */
};
@@ -762,7 +765,8 @@ struct bpf_raw_event_map *bpf_get_raw_tracepoint(const char *name);
void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp);
int bpf_get_perf_event_info(const struct perf_event *event, u32 *prog_id,
u32 *fd_type, const char **buf,
- u64 *probe_offset, u64 *probe_addr);
+ u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed);
int bpf_kprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
int bpf_uprobe_multi_link_attach(const union bpf_attr *attr, struct bpf_prog *prog);
#else
@@ -802,7 +806,7 @@ static inline void bpf_put_raw_tracepoint(struct bpf_raw_event_map *btp)
static inline int bpf_get_perf_event_info(const struct perf_event *event,
u32 *prog_id, u32 *fd_type,
const char **buf, u64 *probe_offset,
- u64 *probe_addr)
+ u64 *probe_addr, unsigned long *missed)
{
return -EOPNOTSUPP;
}
@@ -878,6 +882,7 @@ extern void perf_kprobe_destroy(struct perf_event *event);
extern int bpf_get_kprobe_info(const struct perf_event *event,
u32 *fd_type, const char **symbol,
u64 *probe_offset, u64 *probe_addr,
+ unsigned long *missed,
bool perf_type_tracepoint);
#endif
#ifdef CONFIG_UPROBE_EVENTS
diff --git a/include/linux/trace_seq.h b/include/linux/trace_seq.h
index 6be92bf559fe..3691e0e76a1a 100644
--- a/include/linux/trace_seq.h
+++ b/include/linux/trace_seq.h
@@ -14,6 +14,7 @@
struct trace_seq {
char buffer[PAGE_SIZE];
struct seq_buf seq;
+ size_t readpos;
int full;
};
@@ -22,6 +23,7 @@ trace_seq_init(struct trace_seq *s)
{
seq_buf_init(&s->seq, s->buffer, PAGE_SIZE);
s->full = 0;
+ s->readpos = 0;
}
/**
diff --git a/include/linux/tracefs.h b/include/linux/tracefs.h
index 009072792fa3..7a5fe17b6bf9 100644
--- a/include/linux/tracefs.h
+++ b/include/linux/tracefs.h
@@ -23,26 +23,69 @@ struct file_operations;
struct eventfs_file;
-struct dentry *eventfs_create_events_dir(const char *name,
- struct dentry *parent);
-
-struct eventfs_file *eventfs_add_subsystem_dir(const char *name,
- struct dentry *parent);
+/**
+ * eventfs_callback - A callback function to create dynamic files in eventfs
+ * @name: The name of the file that is to be created
+ * @mode: return the file mode for the file (RW access, etc)
+ * @data: data to pass to the created file ops
+ * @fops: the file operations of the created file
+ *
+ * The evetnfs files are dynamically created. The struct eventfs_entry array
+ * is passed to eventfs_create_dir() or eventfs_create_events_dir() that will
+ * be used to create the files within those directories. When a lookup
+ * or access to a file within the directory is made, the struct eventfs_entry
+ * array is used to find a callback() with the matching name that is being
+ * referenced (for lookups, the entire array is iterated and each callback
+ * will be called).
+ *
+ * The callback will be called with @name for the name of the file to create.
+ * The callback can return less than 1 to indicate that no file should be
+ * created.
+ *
+ * If a file is to be created, then @mode should be populated with the file
+ * mode (permissions) for which the file is created for. This would be
+ * used to set the created inode i_mode field.
+ *
+ * The @data should be set to the data passed to the other file operations
+ * (read, write, etc). Note, @data will also point to the data passed in
+ * to eventfs_create_dir() or eventfs_create_events_dir(), but the callback
+ * can replace the data if it chooses to. Otherwise, the original data
+ * will be used for the file operation functions.
+ *
+ * The @fops should be set to the file operations that will be used to create
+ * the inode.
+ *
+ * NB. This callback is called while holding internal locks of the eventfs
+ * system. The callback must not call any code that might also call into
+ * the tracefs or eventfs system or it will risk creating a deadlock.
+ */
+typedef int (*eventfs_callback)(const char *name, umode_t *mode, void **data,
+ const struct file_operations **fops);
-struct eventfs_file *eventfs_add_dir(const char *name,
- struct eventfs_file *ef_parent);
+/**
+ * struct eventfs_entry - dynamically created eventfs file call back handler
+ * @name: Then name of the dynamic file in an eventfs directory
+ * @callback: The callback to get the fops of the file when it is created
+ *
+ * See evenfs_callback() typedef for how to set up @callback.
+ */
+struct eventfs_entry {
+ const char *name;
+ eventfs_callback callback;
+};
-int eventfs_add_file(const char *name, umode_t mode,
- struct eventfs_file *ef_parent, void *data,
- const struct file_operations *fops);
+struct eventfs_inode;
-int eventfs_add_events_file(const char *name, umode_t mode,
- struct dentry *parent, void *data,
- const struct file_operations *fops);
+struct eventfs_inode *eventfs_create_events_dir(const char *name, struct dentry *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
-void eventfs_remove(struct eventfs_file *ef);
+struct eventfs_inode *eventfs_create_dir(const char *name, struct eventfs_inode *parent,
+ const struct eventfs_entry *entries,
+ int size, void *data);
-void eventfs_remove_events_dir(struct dentry *dentry);
+void eventfs_remove_events_dir(struct eventfs_inode *ei);
+void eventfs_remove_dir(struct eventfs_inode *ei);
struct dentry *tracefs_create_file(const char *name, umode_t mode,
struct dentry *parent, void *data,
diff --git a/include/linux/tsm.h b/include/linux/tsm.h
new file mode 100644
index 000000000000..de8324a2223c
--- /dev/null
+++ b/include/linux/tsm.h
@@ -0,0 +1,69 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef __TSM_H
+#define __TSM_H
+
+#include <linux/sizes.h>
+#include <linux/types.h>
+
+#define TSM_INBLOB_MAX 64
+#define TSM_OUTBLOB_MAX SZ_32K
+
+/*
+ * Privilege level is a nested permission concept to allow confidential
+ * guests to partition address space, 4-levels are supported.
+ */
+#define TSM_PRIVLEVEL_MAX 3
+
+/**
+ * struct tsm_desc - option descriptor for generating tsm report blobs
+ * @privlevel: optional privilege level to associate with @outblob
+ * @inblob_len: sizeof @inblob
+ * @inblob: arbitrary input data
+ */
+struct tsm_desc {
+ unsigned int privlevel;
+ size_t inblob_len;
+ u8 inblob[TSM_INBLOB_MAX];
+};
+
+/**
+ * struct tsm_report - track state of report generation relative to options
+ * @desc: input parameters to @report_new()
+ * @outblob_len: sizeof(@outblob)
+ * @outblob: generated evidence to provider to the attestation agent
+ * @auxblob_len: sizeof(@auxblob)
+ * @auxblob: (optional) auxiliary data to the report (e.g. certificate data)
+ */
+struct tsm_report {
+ struct tsm_desc desc;
+ size_t outblob_len;
+ u8 *outblob;
+ size_t auxblob_len;
+ u8 *auxblob;
+};
+
+/**
+ * struct tsm_ops - attributes and operations for tsm instances
+ * @name: tsm id reflected in /sys/kernel/config/tsm/report/$report/provider
+ * @privlevel_floor: convey base privlevel for nested scenarios
+ * @report_new: Populate @report with the report blob and auxblob
+ * (optional), return 0 on successful population, or -errno otherwise
+ *
+ * Implementation specific ops, only one is expected to be registered at
+ * a time i.e. only one of "sev-guest", "tdx-guest", etc.
+ */
+struct tsm_ops {
+ const char *name;
+ const unsigned int privlevel_floor;
+ int (*report_new)(struct tsm_report *report, void *data);
+};
+
+extern const struct config_item_type tsm_report_default_type;
+
+/* publish @privlevel, @privlevel_floor, and @auxblob attributes */
+extern const struct config_item_type tsm_report_extra_type;
+
+int tsm_register(const struct tsm_ops *ops, void *priv,
+ const struct config_item_type *type);
+int tsm_unregister(const struct tsm_ops *ops);
+#endif /* __TSM_H */
diff --git a/include/linux/tty.h b/include/linux/tty.h
index f002d0f25db7..4b6340ac2af2 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -390,14 +390,12 @@ int vcs_init(void);
extern const struct class tty_class;
/**
- * tty_kref_get - get a tty reference
- * @tty: tty device
+ * tty_kref_get - get a tty reference
+ * @tty: tty device
*
- * Return a new reference to a tty object. The caller must hold
- * sufficient locks/counts to ensure that their existing reference cannot
- * go away
+ * Returns: a new reference to a tty object. The caller must hold sufficient
+ * locks/counts to ensure that their existing reference cannot go away
*/
-
static inline struct tty_struct *tty_kref_get(struct tty_struct *tty)
{
if (tty)
@@ -416,8 +414,8 @@ unsigned int tty_chars_in_buffer(struct tty_struct *tty);
unsigned int tty_write_room(struct tty_struct *tty);
void tty_driver_flush_buffer(struct tty_struct *tty);
void tty_unthrottle(struct tty_struct *tty);
-int tty_throttle_safe(struct tty_struct *tty);
-int tty_unthrottle_safe(struct tty_struct *tty);
+bool tty_throttle_safe(struct tty_struct *tty);
+bool tty_unthrottle_safe(struct tty_struct *tty);
int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
int tty_get_icount(struct tty_struct *tty,
struct serial_icounter_struct *icount);
@@ -435,14 +433,13 @@ void tty_encode_baud_rate(struct tty_struct *tty, speed_t ibaud,
speed_t obaud);
/**
- * tty_get_baud_rate - get tty bit rates
- * @tty: tty to query
+ * tty_get_baud_rate - get tty bit rates
+ * @tty: tty to query
*
- * Returns the baud rate as an integer for this terminal. The
- * termios lock must be held by the caller and the terminal bit
- * flags may be updated.
+ * Returns: the baud rate as an integer for this terminal. The termios lock
+ * must be held by the caller and the terminal bit flags may be updated.
*
- * Locking: none
+ * Locking: none
*/
static inline speed_t tty_get_baud_rate(struct tty_struct *tty)
{
diff --git a/include/linux/ucs2_string.h b/include/linux/ucs2_string.h
index cf3ada3e820e..c499ae809c7d 100644
--- a/include/linux/ucs2_string.h
+++ b/include/linux/ucs2_string.h
@@ -10,6 +10,7 @@ typedef u16 ucs2_char_t;
unsigned long ucs2_strnlen(const ucs2_char_t *s, size_t maxlength);
unsigned long ucs2_strlen(const ucs2_char_t *s);
unsigned long ucs2_strsize(const ucs2_char_t *data, unsigned long maxlength);
+ssize_t ucs2_strscpy(ucs2_char_t *dst, const ucs2_char_t *src, size_t count);
int ucs2_strncmp(const ucs2_char_t *a, const ucs2_char_t *b, size_t len);
unsigned long ucs2_utf8size(const ucs2_char_t *src);
diff --git a/include/linux/udp.h b/include/linux/udp.h
index 43c1fb2d2c21..d04188714dca 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -32,25 +32,30 @@ static inline u32 udp_hashfn(const struct net *net, u32 num, u32 mask)
return (num + net_hash_mix(net)) & mask;
}
+enum {
+ UDP_FLAGS_CORK, /* Cork is required */
+ UDP_FLAGS_NO_CHECK6_TX, /* Send zero UDP6 checksums on TX? */
+ UDP_FLAGS_NO_CHECK6_RX, /* Allow zero UDP6 checksums on RX? */
+ UDP_FLAGS_GRO_ENABLED, /* Request GRO aggregation */
+ UDP_FLAGS_ACCEPT_FRAGLIST,
+ UDP_FLAGS_ACCEPT_L4,
+ UDP_FLAGS_ENCAP_ENABLED, /* This socket enabled encap */
+ UDP_FLAGS_UDPLITE_SEND_CC, /* set via udplite setsockopt */
+ UDP_FLAGS_UDPLITE_RECV_CC, /* set via udplite setsockopt */
+};
+
struct udp_sock {
/* inet_sock has to be the first member */
struct inet_sock inet;
#define udp_port_hash inet.sk.__sk_common.skc_u16hashes[0]
#define udp_portaddr_hash inet.sk.__sk_common.skc_u16hashes[1]
#define udp_portaddr_node inet.sk.__sk_common.skc_portaddr_node
+
+ unsigned long udp_flags;
+
int pending; /* Any pending frames ? */
- unsigned int corkflag; /* Cork is required */
__u8 encap_type; /* Is this an Encapsulation socket? */
- unsigned char no_check6_tx:1,/* Send zero UDP6 checksums on TX? */
- no_check6_rx:1,/* Allow zero UDP6 checksums on RX? */
- encap_enabled:1, /* This socket enabled encap
- * processing; UDP tunnels and
- * different encapsulation layer set
- * this
- */
- gro_enabled:1, /* Request GRO aggregation */
- accept_udp_l4:1,
- accept_udp_fraglist:1;
+
/*
* Following member retains the information to create a UDP header
* when the socket is uncorked.
@@ -62,12 +67,6 @@ struct udp_sock {
*/
__u16 pcslen;
__u16 pcrlen;
-/* indicator bits used by pcflag: */
-#define UDPLITE_BIT 0x1 /* set by udplite proto init function */
-#define UDPLITE_SEND_CC 0x2 /* set via udplite setsockopt */
-#define UDPLITE_RECV_CC 0x4 /* set via udplite setsocktopt */
- __u8 pcflag; /* marks socket as UDP-Lite if > 0 */
- __u8 unused[3];
/*
* For encapsulation sockets.
*/
@@ -95,28 +94,39 @@ struct udp_sock {
int forward_threshold;
};
+#define udp_test_bit(nr, sk) \
+ test_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_set_bit(nr, sk) \
+ set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_test_and_set_bit(nr, sk) \
+ test_and_set_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_clear_bit(nr, sk) \
+ clear_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags)
+#define udp_assign_bit(nr, sk, val) \
+ assign_bit(UDP_FLAGS_##nr, &udp_sk(sk)->udp_flags, val)
+
#define UDP_MAX_SEGMENTS (1 << 6UL)
#define udp_sk(ptr) container_of_const(ptr, struct udp_sock, inet.sk)
static inline void udp_set_no_check6_tx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_tx = val;
+ udp_assign_bit(NO_CHECK6_TX, sk, val);
}
static inline void udp_set_no_check6_rx(struct sock *sk, bool val)
{
- udp_sk(sk)->no_check6_rx = val;
+ udp_assign_bit(NO_CHECK6_RX, sk, val);
}
-static inline bool udp_get_no_check6_tx(struct sock *sk)
+static inline bool udp_get_no_check6_tx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_tx;
+ return udp_test_bit(NO_CHECK6_TX, sk);
}
-static inline bool udp_get_no_check6_rx(struct sock *sk)
+static inline bool udp_get_no_check6_rx(const struct sock *sk)
{
- return udp_sk(sk)->no_check6_rx;
+ return udp_test_bit(NO_CHECK6_RX, sk);
}
static inline void udp_cmsg_recv(struct msghdr *msg, struct sock *sk,
@@ -135,10 +145,12 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
if (!skb_is_gso(skb))
return false;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 && !udp_sk(sk)->accept_udp_l4)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
+ !udp_test_bit(ACCEPT_L4, sk))
return true;
- if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST && !udp_sk(sk)->accept_udp_fraglist)
+ if (skb_shinfo(skb)->gso_type & SKB_GSO_FRAGLIST &&
+ !udp_test_bit(ACCEPT_FRAGLIST, sk))
return true;
return false;
@@ -146,8 +158,8 @@ static inline bool udp_unexpected_gso(struct sock *sk, struct sk_buff *skb)
static inline void udp_allow_gso(struct sock *sk)
{
- udp_sk(sk)->accept_udp_l4 = 1;
- udp_sk(sk)->accept_udp_fraglist = 1;
+ udp_set_bit(ACCEPT_L4, sk);
+ udp_set_bit(ACCEPT_FRAGLIST, sk);
}
#define udp_portaddr_for_each_entry(__sk, list) \
diff --git a/include/linux/uio.h b/include/linux/uio.h
index 42bce38a8e87..b6214cbf2a43 100644
--- a/include/linux/uio.h
+++ b/include/linux/uio.h
@@ -21,12 +21,12 @@ struct kvec {
enum iter_type {
/* iter types */
+ ITER_UBUF,
ITER_IOVEC,
- ITER_KVEC,
ITER_BVEC,
+ ITER_KVEC,
ITER_XARRAY,
ITER_DISCARD,
- ITER_UBUF,
};
#define ITER_SOURCE 1 // == WRITE
@@ -43,11 +43,7 @@ struct iov_iter {
bool copy_mc;
bool nofault;
bool data_source;
- bool user_backed;
- union {
- size_t iov_offset;
- int last_offset;
- };
+ size_t iov_offset;
/*
* Hack alert: overlay ubuf_iovec with iovec + count, so
* that the members resolve correctly regardless of the type
@@ -143,7 +139,7 @@ static inline unsigned char iov_iter_rw(const struct iov_iter *i)
static inline bool user_backed_iter(const struct iov_iter *i)
{
- return i->user_backed;
+ return iter_is_ubuf(i) || iter_is_iovec(i);
}
/*
@@ -342,27 +338,6 @@ iov_iter_npages_cap(struct iov_iter *i, int maxpages, size_t max_bytes)
return npages;
}
-struct csum_state {
- __wsum csum;
- size_t off;
-};
-
-size_t csum_and_copy_to_iter(const void *addr, size_t bytes, void *csstate, struct iov_iter *i);
-size_t csum_and_copy_from_iter(void *addr, size_t bytes, __wsum *csum, struct iov_iter *i);
-
-static __always_inline __must_check
-bool csum_and_copy_from_iter_full(void *addr, size_t bytes,
- __wsum *csum, struct iov_iter *i)
-{
- size_t copied = csum_and_copy_from_iter(addr, bytes, csum, i);
- if (likely(copied == bytes))
- return true;
- iov_iter_revert(i, copied);
- return false;
-}
-size_t hash_and_copy_to_iter(const void *addr, size_t bytes, void *hashp,
- struct iov_iter *i);
-
struct iovec *iovec_from_user(const struct iovec __user *uvector,
unsigned long nr_segs, unsigned long fast_segs,
struct iovec *fast_iov, bool compat);
@@ -383,7 +358,6 @@ static inline void iov_iter_ubuf(struct iov_iter *i, unsigned int direction,
*i = (struct iov_iter) {
.iter_type = ITER_UBUF,
.copy_mc = false,
- .user_backed = true,
.data_source = direction,
.ubuf = buf,
.count = count,
diff --git a/include/linux/units.h b/include/linux/units.h
index 2793a41e73a2..ff1bd6b5f5b3 100644
--- a/include/linux/units.h
+++ b/include/linux/units.h
@@ -31,6 +31,10 @@
#define MICROWATT_PER_MILLIWATT 1000UL
#define MICROWATT_PER_WATT 1000000UL
+#define BYTES_PER_KBIT (KILO / BITS_PER_BYTE)
+#define BYTES_PER_MBIT (MEGA / BITS_PER_BYTE)
+#define BYTES_PER_GBIT (GIGA / BITS_PER_BYTE)
+
#define ABSOLUTE_ZERO_MILLICELSIUS -273150
static inline long milli_kelvin_to_millicelsius(long t)
diff --git a/include/linux/usb.h b/include/linux/usb.h
index a21074861f91..8c61643acd49 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -1823,22 +1823,6 @@ void *usb_alloc_coherent(struct usb_device *dev, size_t size,
void usb_free_coherent(struct usb_device *dev, size_t size,
void *addr, dma_addr_t dma);
-#if 0
-struct urb *usb_buffer_map(struct urb *urb);
-void usb_buffer_dmasync(struct urb *urb);
-void usb_buffer_unmap(struct urb *urb);
-#endif
-
-struct scatterlist;
-int usb_buffer_map_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int nents);
-#if 0
-void usb_buffer_dmasync_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-#endif
-void usb_buffer_unmap_sg(const struct usb_device *dev, int is_in,
- struct scatterlist *sg, int n_hw_ents);
-
/*-------------------------------------------------------------------*
* SYNCHRONOUS CALL SUPPORT *
*-------------------------------------------------------------------*/
diff --git a/include/linux/usb/chipidea.h b/include/linux/usb/chipidea.h
index 0b4f2d5faa08..5a7f96684ea2 100644
--- a/include/linux/usb/chipidea.h
+++ b/include/linux/usb/chipidea.h
@@ -64,6 +64,7 @@ struct ci_hdrc_platform_data {
#define CI_HDRC_PMQOS BIT(15)
#define CI_HDRC_PHY_VBUS_CONTROL BIT(16)
#define CI_HDRC_HAS_PORTSC_PEC_MISSED BIT(17)
+#define CI_HDRC_FORCE_VBUS_ACTIVE_ALWAYS BIT(18)
enum usb_dr_mode dr_mode;
#define CI_HDRC_CONTROLLER_RESET_EVENT 0
#define CI_HDRC_CONTROLLER_STOPPED_EVENT 1
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 6014340ba980..af3cd2aae4bc 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -35,6 +35,14 @@
* are ready. The control transfer will then be kept from completing till
* all the function drivers that requested for USB_GADGET_DELAYED_STAUS
* invoke usb_composite_setup_continue().
+ *
+ * NOTE: USB_GADGET_DELAYED_STATUS must not be used in UDC drivers: they
+ * must delay completing the status stage for 0-length control transfers
+ * regardless of the whether USB_GADGET_DELAYED_STATUS is returned from
+ * the gadget driver's setup() callback.
+ * Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS,
+ * which is a bug. These drivers must be fixed and USB_GADGET_DELAYED_STATUS
+ * must be contained within the composite framework.
*/
#define USB_GADGET_DELAYED_STATUS 0x7fff /* Impossibly large value */
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index 75bda0783395..6532beb587b1 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -711,6 +711,15 @@ static inline int usb_gadget_check_config(struct usb_gadget *gadget)
* get_interface. Setting a configuration (or interface) is where
* endpoints should be activated or (config 0) shut down.
*
+ * The gadget driver's setup() callback does not have to queue a response to
+ * ep0 within the setup() call, the driver can do it after setup() returns.
+ * The UDC driver must wait until such a response is queued before proceeding
+ * with the data/status stages of the control transfer.
+ *
+ * NOTE: Currently, a number of UDC drivers rely on USB_GADGET_DELAYED_STATUS
+ * being returned from the setup() callback, which is a bug. See the comment
+ * next to USB_GADGET_DELAYED_STATUS for details.
+ *
* (Note that only the default control endpoint is supported. Neither
* hosts nor devices generally support control traffic except to ep0.)
*
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 61d4f0b793dc..00724b4f6e12 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -484,8 +484,25 @@ extern int usb_hcd_pci_probe(struct pci_dev *dev,
extern void usb_hcd_pci_remove(struct pci_dev *dev);
extern void usb_hcd_pci_shutdown(struct pci_dev *dev);
+#ifdef CONFIG_USB_PCI_AMD
extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ if (!usb_hcd_amd_remote_wakeup_quirk(dev))
+ return false;
+ if (driver->flags & (HCD_USB11 | HCD_USB3))
+ return true;
+ return false;
+}
+#else /* CONFIG_USB_PCI_AMD */
+static inline bool usb_hcd_amd_resume_bug(struct pci_dev *dev,
+ const struct hc_driver *driver)
+{
+ return false;
+}
+#endif
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif /* CONFIG_USB_PCI */
diff --git a/include/linux/usb/ljca.h b/include/linux/usb/ljca.h
new file mode 100644
index 000000000000..47661feda96c
--- /dev/null
+++ b/include/linux/usb/ljca.h
@@ -0,0 +1,145 @@
+/* SPDX-License-Identifier: GPL-2.0-only */
+/*
+ * Copyright (c) 2023, Intel Corporation. All rights reserved.
+ */
+#ifndef _LINUX_USB_LJCA_H_
+#define _LINUX_USB_LJCA_H_
+
+#include <linux/auxiliary_bus.h>
+#include <linux/list.h>
+#include <linux/spinlock.h>
+#include <linux/types.h>
+
+#define LJCA_MAX_GPIO_NUM 64
+
+#define auxiliary_dev_to_ljca_client(auxiliary_dev) \
+ container_of(auxiliary_dev, struct ljca_client, auxdev)
+
+struct ljca_adapter;
+
+/**
+ * typedef ljca_event_cb_t - event callback function signature
+ *
+ * @context: the execution context of who registered this callback
+ * @cmd: the command from device for this event
+ * @evt_data: the event data payload
+ * @len: the event data payload length
+ *
+ * The callback function is called in interrupt context and the data payload is
+ * only valid during the call. If the user needs later access of the data, it
+ * must copy it.
+ */
+typedef void (*ljca_event_cb_t)(void *context, u8 cmd, const void *evt_data, int len);
+
+/**
+ * struct ljca_client - represent a ljca client device
+ *
+ * @type: ljca client type
+ * @id: ljca client id within same client type
+ * @link: ljca client on the same ljca adapter
+ * @auxdev: auxiliary device object
+ * @adapter: ljca adapter the ljca client sit on
+ * @context: the execution context of the event callback
+ * @event_cb: ljca client driver register this callback to get
+ * firmware asynchronous rx buffer pending notifications
+ * @event_cb_lock: spinlock to protect event callback
+ */
+struct ljca_client {
+ u8 type;
+ u8 id;
+ struct list_head link;
+ struct auxiliary_device auxdev;
+ struct ljca_adapter *adapter;
+
+ void *context;
+ ljca_event_cb_t event_cb;
+ /* lock to protect event_cb */
+ spinlock_t event_cb_lock;
+};
+
+/**
+ * struct ljca_gpio_info - ljca gpio client device info
+ *
+ * @num: ljca gpio client device pin number
+ * @valid_pin_map: ljca gpio client device valid pin mapping
+ */
+struct ljca_gpio_info {
+ unsigned int num;
+ DECLARE_BITMAP(valid_pin_map, LJCA_MAX_GPIO_NUM);
+};
+
+/**
+ * struct ljca_i2c_info - ljca i2c client device info
+ *
+ * @id: ljca i2c client device identification number
+ * @capacity: ljca i2c client device capacity
+ * @intr_pin: ljca i2c client device interrupt pin number if exists
+ */
+struct ljca_i2c_info {
+ u8 id;
+ u8 capacity;
+ u8 intr_pin;
+};
+
+/**
+ * struct ljca_spi_info - ljca spi client device info
+ *
+ * @id: ljca spi client device identification number
+ * @capacity: ljca spi client device capacity
+ */
+struct ljca_spi_info {
+ u8 id;
+ u8 capacity;
+};
+
+/**
+ * ljca_register_event_cb - register a callback function to receive events
+ *
+ * @client: ljca client device
+ * @event_cb: callback function
+ * @context: execution context of event callback
+ *
+ * Return: 0 in case of success, negative value in case of error
+ */
+int ljca_register_event_cb(struct ljca_client *client, ljca_event_cb_t event_cb, void *context);
+
+/**
+ * ljca_unregister_event_cb - unregister the callback function for an event
+ *
+ * @client: ljca client device
+ */
+void ljca_unregister_event_cb(struct ljca_client *client);
+
+/**
+ * ljca_transfer - issue a LJCA command and wait for a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device; it should
+ * be 0 when obuf is NULL
+ * @ibuf: any data associated with the response will be copied here; it can be
+ * NULL if the user doesn't need the response data
+ * @ibuf_len: must be initialized to the input buffer size
+ *
+ * Return: the actual length of response data for success, negative value for errors
+ */
+int ljca_transfer(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len, u8 *ibuf, u8 ibuf_len);
+
+/**
+ * ljca_transfer_noack - issue a LJCA command without a response
+ *
+ * @client: ljca client device
+ * @cmd: the command to be sent to the device
+ * @obuf: the buffer to be sent to the device; it can be NULL if the user
+ * doesn't need to transmit data with this command
+ * @obuf_len: the size of the buffer to be sent to the device
+ *
+ * Return: 0 for success, negative value for errors
+ */
+int ljca_transfer_noack(struct ljca_client *client, u8 cmd, const u8 *obuf,
+ u8 obuf_len);
+
+#endif
diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h
index c59fb79a42e8..eb626af0e4e7 100644
--- a/include/linux/usb/pd.h
+++ b/include/linux/usb/pd.h
@@ -228,6 +228,7 @@ enum pd_pdo_type {
#define PDO_FIXED_UNCHUNK_EXT BIT(24) /* Unchunked Extended Message supported (Source) */
#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */
#define PDO_FIXED_FRS_CURR_SHIFT 23
+#define PDO_FIXED_PEAK_CURR_SHIFT 20
#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */
#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */
diff --git a/include/linux/usb/pd_vdo.h b/include/linux/usb/pd_vdo.h
index b057250704e8..3a747938cdab 100644
--- a/include/linux/usb/pd_vdo.h
+++ b/include/linux/usb/pd_vdo.h
@@ -376,6 +376,7 @@
| ((vbm) & 0x3) << 9 | (sbu) << 8 | (sbut) << 7 | ((cur) & 0x3) << 5 \
| (vbt) << 4 | (sopp) << 3 | ((spd) & 0x7))
+#define VDO_TYPEC_CABLE_SPEED(vdo) ((vdo) & 0x7)
#define VDO_TYPEC_CABLE_TYPE(vdo) (((vdo) >> 18) & 0x3)
/*
diff --git a/include/linux/usb/renesas_usbhs.h b/include/linux/usb/renesas_usbhs.h
index d418c55523a7..372898d9eeb0 100644
--- a/include/linux/usb/renesas_usbhs.h
+++ b/include/linux/usb/renesas_usbhs.h
@@ -5,16 +5,6 @@
* Copyright (C) 2011 Renesas Solutions Corp.
* Copyright (C) 2019 Renesas Electronics Corporation
* Kuninori Morimoto <kuninori.morimoto.gx@renesas.com>
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- *
*/
#ifndef RENESAS_USB_H
#define RENESAS_USB_H
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
index 8fa781207970..a05d6f6f2536 100644
--- a/include/linux/usb/typec.h
+++ b/include/linux/usb/typec.h
@@ -202,6 +202,8 @@ struct typec_cable_desc {
* @accessory: Audio, Debug or none.
* @identity: Discover Identity command data
* @pd_revision: USB Power Delivery Specification Revision if supported
+ * @attach: Notification about attached USB device
+ * @deattach: Notification about removed USB device
*
* Details about a partner that is attached to USB Type-C port. If @identity
* member exists when partner is registered, a directory named "identity" is
@@ -217,6 +219,9 @@ struct typec_partner_desc {
enum typec_accessory accessory;
struct usb_pd_identity *identity;
u16 pd_revision; /* 0300H = "3.0" */
+
+ void (*attach)(struct typec_partner *partner, struct device *dev);
+ void (*deattach)(struct typec_partner *partner, struct device *dev);
};
/**
@@ -335,4 +340,36 @@ int typec_port_set_usb_power_delivery(struct typec_port *port, struct usb_power_
int typec_partner_set_usb_power_delivery(struct typec_partner *partner,
struct usb_power_delivery *pd);
+/**
+ * struct typec_connector - Representation of Type-C port for external drivers
+ * @attach: notification about device removal
+ * @deattach: notification about device removal
+ *
+ * Drivers that control the USB and other ports (DisplayPorts, etc.), that are
+ * connected to the Type-C connectors, can use these callbacks to inform the
+ * Type-C connector class about connections and disconnections. That information
+ * can then be used by the typec-port drivers to power on or off parts that are
+ * needed or not needed - as an example, in USB mode if USB2 device is
+ * enumerated, USB3 components (retimers, phys, and what have you) do not need
+ * to be powered on.
+ *
+ * The attached (enumerated) devices will be liked with the typec-partner device.
+ */
+struct typec_connector {
+ void (*attach)(struct typec_connector *con, struct device *dev);
+ void (*deattach)(struct typec_connector *con, struct device *dev);
+};
+
+static inline void typec_attach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->attach)
+ con->attach(con, dev);
+}
+
+static inline void typec_deattach(struct typec_connector *con, struct device *dev)
+{
+ if (con && con->deattach)
+ con->deattach(con, dev);
+}
+
#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/typec_dp.h b/include/linux/usb/typec_dp.h
index 8d09c2f0a9b8..1f358098522d 100644
--- a/include/linux/usb/typec_dp.h
+++ b/include/linux/usb/typec_dp.h
@@ -67,8 +67,10 @@ enum {
#define DP_CAP_UFP_D 1
#define DP_CAP_DFP_D 2
#define DP_CAP_DFP_D_AND_UFP_D 3
-#define DP_CAP_DP_SIGNALING BIT(2) /* Always set */
-#define DP_CAP_GEN2 BIT(3) /* Reserved after v1.0b */
+#define DP_CAP_DP_SIGNALLING(_cap_) (((_cap_) & GENMASK(5, 2)) >> 2)
+#define DP_CAP_SIGNALLING_HBR3 1
+#define DP_CAP_SIGNALLING_UHBR10 2
+#define DP_CAP_SIGNALLING_UHBR20 3
#define DP_CAP_RECEPTACLE BIT(6)
#define DP_CAP_USB BIT(7)
#define DP_CAP_DFP_D_PIN_ASSIGN(_cap_) (((_cap_) & GENMASK(15, 8)) >> 8)
@@ -78,6 +80,13 @@ enum {
DP_CAP_UFP_D_PIN_ASSIGN(_cap_) : DP_CAP_DFP_D_PIN_ASSIGN(_cap_))
#define DP_CAP_PIN_ASSIGN_DFP_D(_cap_) ((_cap_ & DP_CAP_RECEPTACLE) ? \
DP_CAP_DFP_D_PIN_ASSIGN(_cap_) : DP_CAP_UFP_D_PIN_ASSIGN(_cap_))
+#define DP_CAP_UHBR_13_5_SUPPORT BIT(26)
+#define DP_CAP_CABLE_TYPE(_cap_) (((_cap_) & GENMASK(29, 28)) >> 28)
+#define DP_CAP_CABLE_TYPE_PASSIVE 0
+#define DP_CAP_CABLE_TYPE_RE_TIMER 1
+#define DP_CAP_CABLE_TYPE_RE_DRIVER 2
+#define DP_CAP_CABLE_TYPE_OPTICAL 3
+#define DP_CAP_DPAM_VERSION BIT(30)
/* DisplayPort Status Update VDO bits */
#define DP_STATUS_CONNECTION(_status_) ((_status_) & 3)
@@ -97,13 +106,24 @@ enum {
#define DP_CONF_CURRENTLY(_conf_) ((_conf_) & 3)
#define DP_CONF_UFP_U_AS_DFP_D BIT(0)
#define DP_CONF_UFP_U_AS_UFP_D BIT(1)
-#define DP_CONF_SIGNALING_DP BIT(2)
-#define DP_CONF_SIGNALING_GEN_2 BIT(3) /* Reserved after v1.0b */
+#define DP_CONF_SIGNALLING_MASK GENMASK(5, 2)
+#define DP_CONF_SIGNALLING_SHIFT 2
+#define DP_CONF_SIGNALLING_HBR3 1
+#define DP_CONF_SIGNALLING_UHBR10 2
+#define DP_CONF_SIGNALLING_UHBR20 3
#define DP_CONF_PIN_ASSIGNEMENT_SHIFT 8
#define DP_CONF_PIN_ASSIGNEMENT_MASK GENMASK(15, 8)
/* Helper for setting/getting the pin assignment value to the configuration */
#define DP_CONF_SET_PIN_ASSIGN(_a_) ((_a_) << 8)
#define DP_CONF_GET_PIN_ASSIGN(_conf_) (((_conf_) & GENMASK(15, 8)) >> 8)
+#define DP_CONF_UHBR13_5_SUPPORT BIT(26)
+#define DP_CONF_CABLE_TYPE_MASK GENMASK(29, 28)
+#define DP_CONF_CABLE_TYPE_SHIFT 28
+#define DP_CONF_CABLE_TYPE_PASSIVE 0
+#define DP_CONF_CABLE_TYPE_RE_TIMER 1
+#define DP_CONF_CABLE_TYPE_RE_DRIVER 2
+#define DP_CONF_CABLE_TYPE_OPTICAL 3
+#define DP_CONF_DPAM_VERSION BIT(30)
#endif /* __USB_TYPEC_DP_H */
diff --git a/include/linux/usb/typec_tbt.h b/include/linux/usb/typec_tbt.h
index 63dd44b72e0c..c7a2153bd6f5 100644
--- a/include/linux/usb/typec_tbt.h
+++ b/include/linux/usb/typec_tbt.h
@@ -46,6 +46,7 @@ struct typec_thunderbolt_data {
#define TBT_CABLE_OPTICAL BIT(21)
#define TBT_CABLE_RETIMER BIT(22)
#define TBT_CABLE_LINK_TRAINING BIT(23)
+#define TBT_CABLE_ACTIVE_PASSIVE BIT(25)
#define TBT_SET_CABLE_SPEED(_s_) (((_s_) & GENMASK(2, 0)) << 16)
#define TBT_SET_CABLE_ROUNDED(_g_) (((_g_) & GENMASK(1, 0)) << 19)
diff --git a/include/linux/user_namespace.h b/include/linux/user_namespace.h
index 45f09bec02c4..6030a8235617 100644
--- a/include/linux/user_namespace.h
+++ b/include/linux/user_namespace.h
@@ -65,6 +65,10 @@ enum rlimit_type {
UCOUNT_RLIMIT_COUNTS,
};
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+struct binfmt_misc;
+#endif
+
struct user_namespace {
struct uid_gid_map uid_map;
struct uid_gid_map gid_map;
@@ -102,6 +106,10 @@ struct user_namespace {
struct ucounts *ucounts;
long ucount_max[UCOUNT_COUNTS];
long rlimit_max[UCOUNT_RLIMIT_COUNTS];
+
+#if IS_ENABLED(CONFIG_BINFMT_MISC)
+ struct binfmt_misc *binfmt_misc;
+#endif
} __randomize_layout;
struct ucounts {
diff --git a/include/linux/userfaultfd_k.h b/include/linux/userfaultfd_k.h
index ac8c6854097c..f2dc19f40d05 100644
--- a/include/linux/userfaultfd_k.h
+++ b/include/linux/userfaultfd_k.h
@@ -161,11 +161,22 @@ static inline bool userfaultfd_armed(struct vm_area_struct *vma)
}
static inline bool vma_can_userfault(struct vm_area_struct *vma,
- unsigned long vm_flags)
+ unsigned long vm_flags,
+ bool wp_async)
{
+ vm_flags &= __VM_UFFD_FLAGS;
+
if ((vm_flags & VM_UFFD_MINOR) &&
(!is_vm_hugetlb_page(vma) && !vma_is_shmem(vma)))
return false;
+
+ /*
+ * If wp async enabled, and WP is the only mode enabled, allow any
+ * memory type.
+ */
+ if (wp_async && (vm_flags == VM_UFFD_WP))
+ return true;
+
#ifndef CONFIG_PTE_MARKER_UFFD_WP
/*
* If user requested uffd-wp but not enabled pte markers for
@@ -175,6 +186,8 @@ static inline bool vma_can_userfault(struct vm_area_struct *vma,
if ((vm_flags & VM_UFFD_WP) && !vma_is_anonymous(vma))
return false;
#endif
+
+ /* By default, allow any of anon|shmem|hugetlb */
return vma_is_anonymous(vma) || is_vm_hugetlb_page(vma) ||
vma_is_shmem(vma);
}
@@ -197,6 +210,7 @@ extern int userfaultfd_unmap_prep(struct vm_area_struct *vma,
extern void userfaultfd_unmap_complete(struct mm_struct *mm,
struct list_head *uf);
extern bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma);
+extern bool userfaultfd_wp_async(struct vm_area_struct *vma);
#else /* CONFIG_USERFAULTFD */
@@ -207,6 +221,13 @@ static inline vm_fault_t handle_userfault(struct vm_fault *vmf,
return VM_FAULT_SIGBUS;
}
+static inline long uffd_wp_range(struct vm_area_struct *vma,
+ unsigned long start, unsigned long len,
+ bool enable_wp)
+{
+ return false;
+}
+
static inline bool is_mergeable_vm_userfaultfd_ctx(struct vm_area_struct *vma,
struct vm_userfaultfd_ctx vm_ctx)
{
@@ -297,6 +318,11 @@ static inline bool userfaultfd_wp_unpopulated(struct vm_area_struct *vma)
return false;
}
+static inline bool userfaultfd_wp_async(struct vm_area_struct *vma)
+{
+ return false;
+}
+
#endif /* CONFIG_USERFAULTFD */
static inline bool userfaultfd_wp_use_markers(struct vm_area_struct *vma)
diff --git a/include/linux/verification.h b/include/linux/verification.h
index f34e50ebcf60..cb2d47f28091 100644
--- a/include/linux/verification.h
+++ b/include/linux/verification.h
@@ -8,6 +8,7 @@
#ifndef _LINUX_VERIFICATION_H
#define _LINUX_VERIFICATION_H
+#include <linux/errno.h>
#include <linux/types.h>
/*
diff --git a/include/linux/virtio_net.h b/include/linux/virtio_net.h
index 7b4dd69555e4..27cc1d464321 100644
--- a/include/linux/virtio_net.h
+++ b/include/linux/virtio_net.h
@@ -3,8 +3,8 @@
#define _LINUX_VIRTIO_NET_H
#include <linux/if_vlan.h>
+#include <linux/udp.h>
#include <uapi/linux/tcp.h>
-#include <uapi/linux/udp.h>
#include <uapi/linux/virtio_net.h>
static inline bool virtio_net_hdr_match_proto(__be16 protocol, __u8 gso_type)
@@ -151,9 +151,22 @@ retry:
unsigned int nh_off = p_off;
struct skb_shared_info *shinfo = skb_shinfo(skb);
- /* UFO may not include transport header in gso_size. */
- if (gso_type & SKB_GSO_UDP)
+ switch (gso_type & ~SKB_GSO_TCP_ECN) {
+ case SKB_GSO_UDP:
+ /* UFO may not include transport header in gso_size. */
nh_off -= thlen;
+ break;
+ case SKB_GSO_UDP_L4:
+ if (!(hdr->flags & VIRTIO_NET_HDR_F_NEEDS_CSUM))
+ return -EINVAL;
+ if (skb->csum_offset != offsetof(struct udphdr, check))
+ return -EINVAL;
+ if (skb->len - p_off > gso_size * UDP_MAX_SEGMENTS)
+ return -EINVAL;
+ if (gso_type != SKB_GSO_UDP_L4)
+ return -EINVAL;
+ break;
+ }
/* Kernel has a special handling for GSO_BY_FRAGS. */
if (gso_size == GSO_BY_FRAGS)
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index c58453699ee9..ebb3ce63d64d 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -12,6 +12,7 @@
struct virtio_vsock_skb_cb {
bool reply;
bool tap_delivered;
+ u32 offset;
};
#define VIRTIO_VSOCK_SKB_CB(skb) ((struct virtio_vsock_skb_cb *)((skb)->cb))
@@ -159,6 +160,15 @@ struct virtio_transport {
/* Takes ownership of the packet */
int (*send_pkt)(struct sk_buff *skb);
+
+ /* Used in MSG_ZEROCOPY mode. Checks, that provided data
+ * (number of buffers) could be transmitted with zerocopy
+ * mode. If this callback is not implemented for the current
+ * transport - this means that this transport doesn't need
+ * extra checks and can perform zerocopy transmission by
+ * default.
+ */
+ bool (*can_msgzerocopy)(int bufs_num);
};
ssize_t
diff --git a/include/linux/wait.h b/include/linux/wait.h
index 5ec7739400f4..3473b663176f 100644
--- a/include/linux/wait.h
+++ b/include/linux/wait.h
@@ -19,10 +19,9 @@ int default_wake_function(struct wait_queue_entry *wq_entry, unsigned mode, int
/* wait_queue_entry::flags */
#define WQ_FLAG_EXCLUSIVE 0x01
#define WQ_FLAG_WOKEN 0x02
-#define WQ_FLAG_BOOKMARK 0x04
-#define WQ_FLAG_CUSTOM 0x08
-#define WQ_FLAG_DONE 0x10
-#define WQ_FLAG_PRIORITY 0x20
+#define WQ_FLAG_CUSTOM 0x04
+#define WQ_FLAG_DONE 0x08
+#define WQ_FLAG_PRIORITY 0x10
/*
* A single wait-queue entry structure:
@@ -212,8 +211,6 @@ __remove_wait_queue(struct wait_queue_head *wq_head, struct wait_queue_entry *wq
int __wake_up(struct wait_queue_head *wq_head, unsigned int mode, int nr, void *key);
void __wake_up_on_current_cpu(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
-void __wake_up_locked_key_bookmark(struct wait_queue_head *wq_head,
- unsigned int mode, void *key, wait_queue_entry_t *bookmark);
void __wake_up_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked_sync_key(struct wait_queue_head *wq_head, unsigned int mode, void *key);
void __wake_up_locked(struct wait_queue_head *wq_head, unsigned int mode, int nr);
diff --git a/include/linux/watch_queue.h b/include/linux/watch_queue.h
index 45cd42f55d49..429c7b6afead 100644
--- a/include/linux/watch_queue.h
+++ b/include/linux/watch_queue.h
@@ -32,7 +32,7 @@ struct watch_filter {
DECLARE_BITMAP(type_filter, WATCH_TYPE__NR);
};
u32 nr_filters; /* Number of filters */
- struct watch_type_filter filters[];
+ struct watch_type_filter filters[] __counted_by(nr_filters);
};
struct watch_queue {
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index 1c1d06804d45..24b1e5070f4d 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -274,18 +274,16 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
* to generate better code.
*/
#ifdef CONFIG_LOCKDEP
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
- static struct lock_class_key __key; \
- \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
- lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, &__key, 0); \
+ lockdep_init_map(&(_work)->lockdep_map, "(work_completion)"#_work, (_key), 0); \
INIT_LIST_HEAD(&(_work)->entry); \
(_work)->func = (_func); \
} while (0)
#else
-#define __INIT_WORK(_work, _func, _onstack) \
+#define __INIT_WORK_KEY(_work, _func, _onstack, _key) \
do { \
__init_work((_work), _onstack); \
(_work)->data = (atomic_long_t) WORK_DATA_INIT(); \
@@ -294,12 +292,22 @@ static inline unsigned int work_static(struct work_struct *work) { return 0; }
} while (0)
#endif
+#define __INIT_WORK(_work, _func, _onstack) \
+ do { \
+ static __maybe_unused struct lock_class_key __key; \
+ \
+ __INIT_WORK_KEY(_work, _func, _onstack, &__key); \
+ } while (0)
+
#define INIT_WORK(_work, _func) \
__INIT_WORK((_work), (_func), 0)
#define INIT_WORK_ONSTACK(_work, _func) \
__INIT_WORK((_work), (_func), 1)
+#define INIT_WORK_ONSTACK_KEY(_work, _func, _key) \
+ __INIT_WORK_KEY((_work), (_func), 1, _key)
+
#define __INIT_DELAYED_WORK(_work, _func, _tflags) \
do { \
INIT_WORK(&(_work)->work, (_func)); \
@@ -693,8 +701,32 @@ static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
return fn(arg);
}
#else
-long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
-long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_key(_cpu, _fn, _arg, &__key); \
+})
+
+long work_on_cpu_safe_key(int cpu, long (*fn)(void *),
+ void *arg, struct lock_class_key *key);
+
+/*
+ * A new key is defined for each caller to make sure the work
+ * associated with the function doesn't share its locking class.
+ */
+#define work_on_cpu_safe(_cpu, _fn, _arg) \
+({ \
+ static struct lock_class_key __key; \
+ \
+ work_on_cpu_safe_key(_cpu, _fn, _arg, &__key); \
+})
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER