summaryrefslogtreecommitdiff
path: root/include
diff options
context:
space:
mode:
Diffstat (limited to 'include')
-rw-r--r--include/acpi/acconfig.h1
-rw-r--r--include/acpi/acpi_bus.h13
-rw-r--r--include/acpi/acpixf.h2
-rw-r--r--include/acpi/actbl2.h11
-rw-r--r--include/acpi/cppc_acpi.h3
-rw-r--r--include/asm-generic/bug.h20
-rw-r--r--include/asm-generic/extable.h26
-rw-r--r--include/asm-generic/mm_hooks.h6
-rw-r--r--include/asm-generic/pgtable.h25
-rw-r--r--include/asm-generic/set_memory.h12
-rw-r--r--include/asm-generic/uaccess.h135
-rw-r--r--include/asm-generic/vmlinux.lds.h6
-rw-r--r--include/clocksource/arm_arch_timer.h34
-rw-r--r--include/crypto/gf128mul.h87
-rw-r--r--include/crypto/internal/acompress.h3
-rw-r--r--include/crypto/internal/scompress.h3
-rw-r--r--include/crypto/kpp.h6
-rw-r--r--include/crypto/public_key.h15
-rw-r--r--include/crypto/xts.h2
-rw-r--r--include/drm/bridge/analogix_dp.h3
-rw-r--r--include/drm/bridge/dw_hdmi.h101
-rw-r--r--include/drm/drmP.h330
-rw-r--r--include/drm/drm_atomic.h350
-rw-r--r--include/drm/drm_atomic_helper.h47
-rw-r--r--include/drm/drm_auth.h17
-rw-r--r--include/drm/drm_connector.h154
-rw-r--r--include/drm/drm_crtc.h101
-rw-r--r--include/drm/drm_crtc_helper.h42
-rw-r--r--include/drm/drm_debugfs.h101
-rw-r--r--include/drm/drm_dp_helper.h66
-rw-r--r--include/drm/drm_dp_mst_helper.h15
-rw-r--r--include/drm/drm_drv.h102
-rw-r--r--include/drm/drm_edid.h8
-rw-r--r--include/drm/drm_fb_helper.h16
-rw-r--r--include/drm/drm_file.h375
-rw-r--r--include/drm/drm_fourcc.h6
-rw-r--r--include/drm/drm_framebuffer.h49
-rw-r--r--include/drm/drm_gem.h110
-rw-r--r--include/drm/drm_gem_cma_helper.h26
-rw-r--r--include/drm/drm_global.h8
-rw-r--r--include/drm/drm_hashtab.h20
-rw-r--r--include/drm/drm_ioctl.h188
-rw-r--r--include/drm/drm_irq.h1
-rw-r--r--include/drm/drm_mem_util.h9
-rw-r--r--include/drm/drm_mm.h5
-rw-r--r--include/drm/drm_mode_config.h167
-rw-r--r--include/drm/drm_mode_object.h36
-rw-r--r--include/drm/drm_modeset_helper_vtables.h70
-rw-r--r--include/drm/drm_modeset_lock.h5
-rw-r--r--include/drm/drm_of.h37
-rw-r--r--include/drm/drm_panel.h2
-rw-r--r--include/drm/drm_pci.h75
-rw-r--r--include/drm/drm_plane.h43
-rw-r--r--include/drm/drm_plane_helper.h6
-rw-r--r--include/drm/drm_prime.h80
-rw-r--r--include/drm/drm_print.h3
-rw-r--r--include/drm/drm_property.h35
-rw-r--r--include/drm/drm_scdc_helper.h161
-rw-r--r--include/drm/drm_simple_kms_helper.h2
-rw-r--r--include/drm/drm_sysfs.h12
-rw-r--r--include/drm/drm_vma_manager.h1
-rw-r--r--include/drm/i915_pciids.h11
-rw-r--r--include/drm/tinydrm/tinydrm.h4
-rw-r--r--include/drm/ttm/ttm_bo_api.h71
-rw-r--r--include/drm/ttm/ttm_bo_driver.h11
-rw-r--r--include/drm/ttm/ttm_placement.h1
-rw-r--r--include/dt-bindings/clock/gxbb-clkc.h14
-rw-r--r--include/dt-bindings/clock/r7s72100-clock.h9
-rw-r--r--include/dt-bindings/clock/r8a73a4-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7790-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7791-clock.h1
-rw-r--r--include/dt-bindings/clock/r8a7792-clock.h2
-rw-r--r--include/dt-bindings/clock/r8a7793-clock.h5
-rw-r--r--include/dt-bindings/clock/r8a7794-clock.h2
-rw-r--r--include/dt-bindings/genpd/k2g.h90
-rw-r--r--include/dt-bindings/gpio/gpio.h12
-rw-r--r--include/dt-bindings/mfd/stm32f7-rcc.h112
-rw-r--r--include/dt-bindings/pinctrl/hisi.h15
-rw-r--r--include/dt-bindings/pinctrl/mt7623-pinfunc.h30
-rw-r--r--include/dt-bindings/power/imx7-power.h16
-rw-r--r--include/dt-bindings/power/r8a7795-sysc.h2
-rw-r--r--include/dt-bindings/reset/altr,rst-mgr-a10sr.h33
-rw-r--r--include/dt-bindings/reset/imx7-reset.h62
-rw-r--r--include/keys/system_keyring.h18
-rw-r--r--include/kvm/arm_arch_timer.h2
-rw-r--r--include/kvm/arm_pmu.h7
-rw-r--r--include/kvm/arm_vgic.h14
-rw-r--r--include/linux/acpi.h72
-rw-r--r--include/linux/acpi_iort.h6
-rw-r--r--include/linux/amba/pl080.h50
-rw-r--r--include/linux/amba/pl330.h35
-rw-r--r--include/linux/amigaffs.h144
-rw-r--r--include/linux/ata.h5
-rw-r--r--include/linux/atmel_serial.h169
-rw-r--r--include/linux/atomic.h46
-rw-r--r--include/linux/audit.h7
-rw-r--r--include/linux/backing-dev-defs.h8
-rw-r--r--include/linux/backing-dev.h16
-rw-r--r--include/linux/bcma/bcma_driver_pci.h2
-rw-r--r--include/linux/bio.h13
-rw-r--r--include/linux/blk-mq.h30
-rw-r--r--include/linux/blk_types.h47
-rw-r--r--include/linux/blkdev.h97
-rw-r--r--include/linux/bpf.h36
-rw-r--r--include/linux/bpf_types.h36
-rw-r--r--include/linux/bpf_verifier.h5
-rw-r--r--include/linux/brcmphy.h3
-rw-r--r--include/linux/buffer_head.h2
-rw-r--r--include/linux/bug.h2
-rw-r--r--include/linux/can/core.h11
-rw-r--r--include/linux/can/dev/peak_canfd.h308
-rw-r--r--include/linux/can/platform/ti_hecc.h44
-rw-r--r--include/linux/ccp.h68
-rw-r--r--include/linux/cdev.h5
-rw-r--r--include/linux/ceph/ceph_features.h4
-rw-r--r--include/linux/ceph/ceph_fs.h14
-rw-r--r--include/linux/ceph/cls_lock_client.h5
-rw-r--r--include/linux/ceph/libceph.h8
-rw-r--r--include/linux/ceph/mdsmap.h7
-rw-r--r--include/linux/ceph/osd_client.h7
-rw-r--r--include/linux/ceph/pagelist.h6
-rw-r--r--include/linux/cgroup-defs.h12
-rw-r--r--include/linux/cgroup.h8
-rw-r--r--include/linux/clk.h4
-rw-r--r--include/linux/clockchips.h1
-rw-r--r--include/linux/clocksource.h2
-rw-r--r--include/linux/cma.h6
-rw-r--r--include/linux/coda_psdev.h1
-rw-r--r--include/linux/compat.h14
-rw-r--r--include/linux/console.h2
-rw-r--r--include/linux/coresight.h2
-rw-r--r--include/linux/cpufreq.h7
-rw-r--r--include/linux/cpuhotplug.h2
-rw-r--r--include/linux/cpumask.h14
-rw-r--r--include/linux/cpuset.h4
-rw-r--r--include/linux/crash_core.h69
-rw-r--r--include/linux/crypto.h2
-rw-r--r--include/linux/cryptohash.h5
-rw-r--r--include/linux/dax.h34
-rw-r--r--include/linux/debugfs.h2
-rw-r--r--include/linux/dell-led.h6
-rw-r--r--include/linux/devfreq.h30
-rw-r--r--include/linux/device-mapper.h47
-rw-r--r--include/linux/dma-buf.h22
-rw-r--r--include/linux/dma-fence-array.h2
-rw-r--r--include/linux/dma-fence.h4
-rw-r--r--include/linux/dma-iommu.h6
-rw-r--r--include/linux/dma-mapping.h12
-rw-r--r--include/linux/dma_remapping.h1
-rw-r--r--include/linux/edac.h30
-rw-r--r--include/linux/efi-bgrt.h5
-rw-r--r--include/linux/efi.h5
-rw-r--r--include/linux/elevator.h12
-rw-r--r--include/linux/elf.h2
-rw-r--r--include/linux/etherdevice.h15
-rw-r--r--include/linux/ethtool.h2
-rw-r--r--include/linux/extcon.h21
-rw-r--r--include/linux/f2fs_fs.h17
-rw-r--r--include/linux/fcntl.h6
-rw-r--r--include/linux/filter.h20
-rw-r--r--include/linux/firmware/meson/meson_sm.h4
-rw-r--r--include/linux/flex_array.h67
-rw-r--r--include/linux/fpga/altera-pr-ip-core.h29
-rw-r--r--include/linux/fpga/fpga-mgr.h4
-rw-r--r--include/linux/fs.h28
-rw-r--r--include/linux/fscrypt_common.h11
-rw-r--r--include/linux/fscrypt_notsupp.h9
-rw-r--r--include/linux/fscrypt_supp.h74
-rw-r--r--include/linux/fsnotify_backend.h95
-rw-r--r--include/linux/ftrace.h94
-rw-r--r--include/linux/fwnode.h12
-rw-r--r--include/linux/genhd.h12
-rw-r--r--include/linux/gfp.h22
-rw-r--r--include/linux/gpio/consumer.h12
-rw-r--r--include/linux/gpio/driver.h6
-rw-r--r--include/linux/gpio/gpio-reg.h13
-rw-r--r--include/linux/hdmi.h1
-rw-r--r--include/linux/hid-sensor-hub.h2
-rw-r--r--include/linux/hid-sensor-ids.h8
-rw-r--r--include/linux/hid.h5
-rw-r--r--include/linux/hiddev.h12
-rw-r--r--include/linux/host1x.h1
-rw-r--r--include/linux/hrtimer.h6
-rw-r--r--include/linux/hwmon.h2
-rw-r--r--include/linux/hyperv.h127
-rw-r--r--include/linux/i2c.h15
-rw-r--r--include/linux/i2c/i2c-hid.h6
-rw-r--r--include/linux/ide.h2
-rw-r--r--include/linux/ieee80211.h77
-rw-r--r--include/linux/if_bridge.h1
-rw-r--r--include/linux/inet.h6
-rw-r--r--include/linux/inetdevice.h4
-rw-r--r--include/linux/init.h4
-rw-r--r--include/linux/init_task.h17
-rw-r--r--include/linux/input/eeti_ts.h10
-rw-r--r--include/linux/input/matrix_keypad.h3
-rw-r--r--include/linux/intel-iommu.h18
-rw-r--r--include/linux/interrupt.h2
-rw-r--r--include/linux/io.h21
-rw-r--r--include/linux/iomap.h1
-rw-r--r--include/linux/iommu.h58
-rw-r--r--include/linux/iova.h91
-rw-r--r--include/linux/ipc.h7
-rw-r--r--include/linux/ipv6.h2
-rw-r--r--include/linux/irqchip/arm-gic-v3.h14
-rw-r--r--include/linux/irqchip/mips-gic.h1
-rw-r--r--include/linux/jbd2.h2
-rw-r--r--include/linux/jiffies.h11
-rw-r--r--include/linux/kernel.h2
-rw-r--r--include/linux/kexec.h57
-rw-r--r--include/linux/key-type.h8
-rw-r--r--include/linux/key.h39
-rw-r--r--include/linux/kobject.h2
-rw-r--r--include/linux/kprobes.h3
-rw-r--r--include/linux/kref.h6
-rw-r--r--include/linux/ksm.h5
-rw-r--r--include/linux/kvm_host.h92
-rw-r--r--include/linux/leds-pca9532.h4
-rw-r--r--include/linux/leds.h14
-rw-r--r--include/linux/libnvdimm.h8
-rw-r--r--include/linux/lightnvm.h13
-rw-r--r--include/linux/livepatch.h68
-rw-r--r--include/linux/lockd/bind.h24
-rw-r--r--include/linux/lockd/lockd.h2
-rw-r--r--include/linux/lockdep.h3
-rw-r--r--include/linux/lsm_hooks.h34
-rw-r--r--include/linux/mailbox/brcm-message.h14
-rw-r--r--include/linux/memblock.h2
-rw-r--r--include/linux/memcontrol.h201
-rw-r--r--include/linux/mfd/arizona/pdata.h7
-rw-r--r--include/linux/mfd/axp20x.h44
-rw-r--r--include/linux/mfd/cros_ec.h18
-rw-r--r--include/linux/mfd/cros_ec_commands.h4
-rw-r--r--include/linux/mfd/da9062/core.h29
-rw-r--r--include/linux/mfd/da9062/registers.h5
-rw-r--r--include/linux/mfd/intel_soc_pmic_bxtwc.h (renamed from include/linux/mfd/intel_bxtwc.h)4
-rw-r--r--include/linux/mfd/motorola-cpcap.h5
-rw-r--r--include/linux/mfd/mxs-lradc.h187
-rw-r--r--include/linux/mfd/stm32-timers.h2
-rw-r--r--include/linux/mfd/sun4i-gpadc.h6
-rw-r--r--include/linux/mfd/syscon/atmel-smc.h237
-rw-r--r--include/linux/mfd/syscon/exynos5-pmu.h33
-rw-r--r--include/linux/mfd/syscon/imx7-iomuxc-gpr.h4
-rw-r--r--include/linux/mfd/ti-lmu-register.h280
-rw-r--r--include/linux/mfd/ti-lmu.h87
-rw-r--r--include/linux/mfd/wm831x/core.h9
-rw-r--r--include/linux/mg_disk.h45
-rw-r--r--include/linux/migrate.h5
-rw-r--r--include/linux/mlx4/device.h5
-rw-r--r--include/linux/mlx5/driver.h28
-rw-r--r--include/linux/mlx5/fs.h20
-rw-r--r--include/linux/mlx5/mlx5_ifc.h153
-rw-r--r--include/linux/mlx5/qp.h10
-rw-r--r--include/linux/mm.h41
-rw-r--r--include/linux/mm_types.h5
-rw-r--r--include/linux/mmc/card.h10
-rw-r--r--include/linux/mmc/host.h6
-rw-r--r--include/linux/mmzone.h12
-rw-r--r--include/linux/mod_devicetable.h10
-rw-r--r--include/linux/module.h12
-rw-r--r--include/linux/mpls.h5
-rw-r--r--include/linux/mtd/mtd.h5
-rw-r--r--include/linux/net.h3
-rw-r--r--include/linux/netdev_features.h8
-rw-r--r--include/linux/netdevice.h62
-rw-r--r--include/linux/netfilter/nfnetlink.h5
-rw-r--r--include/linux/netfilter_bridge/ebtables.h6
-rw-r--r--include/linux/netlink.h38
-rw-r--r--include/linux/nfs_fs.h17
-rw-r--r--include/linux/nfs_fs_sb.h2
-rw-r--r--include/linux/nfs_page.h5
-rw-r--r--include/linux/nfs_xdr.h3
-rw-r--r--include/linux/nvme-fc-driver.h104
-rw-r--r--include/linux/nvme-fc.h68
-rw-r--r--include/linux/nvme.h13
-rw-r--r--include/linux/of.h7
-rw-r--r--include/linux/of_device.h17
-rw-r--r--include/linux/of_gpio.h1
-rw-r--r--include/linux/of_irq.h2
-rw-r--r--include/linux/of_mdio.h4
-rw-r--r--include/linux/of_pci.h11
-rw-r--r--include/linux/of_platform.h11
-rw-r--r--include/linux/page-isolation.h5
-rw-r--r--include/linux/pagemap.h4
-rw-r--r--include/linux/pci-ecam.h3
-rw-r--r--include/linux/pci-ep-cfs.h41
-rw-r--r--include/linux/pci-epc.h144
-rw-r--r--include/linux/pci-epf.h162
-rw-r--r--include/linux/pci.h111
-rw-r--r--include/linux/pci_ids.h2
-rw-r--r--include/linux/pe.h177
-rw-r--r--include/linux/percpu-refcount.h1
-rw-r--r--include/linux/percpu.h1
-rw-r--r--include/linux/perf/arm_pmu.h29
-rw-r--r--include/linux/perf_event.h17
-rw-r--r--include/linux/phy.h109
-rw-r--r--include/linux/pinctrl/pinconf-generic.h3
-rw-r--r--include/linux/platform_data/iommu-omap.h20
-rw-r--r--include/linux/platform_data/isl9305.h2
-rw-r--r--include/linux/platform_data/itco_wdt.h4
-rw-r--r--include/linux/platform_data/pn544.h43
-rw-r--r--include/linux/platform_data/st21nfca.h33
-rw-r--r--include/linux/pm_domain.h2
-rw-r--r--include/linux/pm_wakeup.h25
-rw-r--r--include/linux/pmem.h23
-rw-r--r--include/linux/poll.h56
-rw-r--r--include/linux/posix-clock.h10
-rw-r--r--include/linux/posix-timers.h20
-rw-r--r--include/linux/power/bq24190_charger.h16
-rw-r--r--include/linux/printk.h4
-rw-r--r--include/linux/proc_ns.h2
-rw-r--r--include/linux/property.h26
-rw-r--r--include/linux/pstore.h156
-rw-r--r--include/linux/ptr_ring.h63
-rw-r--r--include/linux/qcom_scm.h6
-rw-r--r--include/linux/qed/common_hsi.h30
-rw-r--r--include/linux/qed/eth_common.h3
-rw-r--r--include/linux/qed/fcoe_common.h180
-rw-r--r--include/linux/qed/iscsi_common.h241
-rw-r--r--include/linux/qed/qed_eth_if.h32
-rw-r--r--include/linux/qed/qed_if.h64
-rw-r--r--include/linux/qed/qed_iscsi_if.h2
-rw-r--r--include/linux/qed/qed_roce_if.h2
-rw-r--r--include/linux/qed/rdma_common.h3
-rw-r--r--include/linux/qed/roce_common.h17
-rw-r--r--include/linux/qed/storage_common.h30
-rw-r--r--include/linux/qed/tcp_common.h1
-rw-r--r--include/linux/quotaops.h1
-rw-r--r--include/linux/ras.h13
-rw-r--r--include/linux/rcu_node_tree.h99
-rw-r--r--include/linux/rcu_segcblist.h90
-rw-r--r--include/linux/rculist.h3
-rw-r--r--include/linux/rcupdate.h22
-rw-r--r--include/linux/rcutiny.h24
-rw-r--r--include/linux/rcutree.h5
-rw-r--r--include/linux/refcount.h19
-rw-r--r--include/linux/regulator/arizona-ldo1.h24
-rw-r--r--include/linux/regulator/arizona-micsupp.h21
-rw-r--r--include/linux/regulator/consumer.h1
-rw-r--r--include/linux/regulator/driver.h18
-rw-r--r--include/linux/regulator/machine.h3
-rw-r--r--include/linux/regulator/pfuze100.h1
-rw-r--r--include/linux/reservation.h20
-rw-r--r--include/linux/rhashtable.h68
-rw-r--r--include/linux/ring_buffer.h2
-rw-r--r--include/linux/rmap.h47
-rw-r--r--include/linux/rodata_test.h1
-rw-r--r--include/linux/rpmsg/qcom_smd.h2
-rw-r--r--include/linux/sbitmap.h55
-rw-r--r--include/linux/sched.h27
-rw-r--r--include/linux/sched/mm.h38
-rw-r--r--include/linux/sched/rt.h23
-rw-r--r--include/linux/sched/signal.h1
-rw-r--r--include/linux/security.h20
-rw-r--r--include/linux/sem.h3
-rw-r--r--include/linux/serdev.h68
-rw-r--r--include/linux/serial_core.h1
-rw-r--r--include/linux/serio.h1
-rw-r--r--include/linux/signal.h4
-rw-r--r--include/linux/skbuff.h7
-rw-r--r--include/linux/slab.h6
-rw-r--r--include/linux/smp.h12
-rw-r--r--include/linux/soc/qcom/smd.h139
-rw-r--r--include/linux/soc/qcom/wcnss_ctrl.h11
-rw-r--r--include/linux/soc/samsung/exynos-regs-pmu.h16
-rw-r--r--include/linux/sock_diag.h1
-rw-r--r--include/linux/spi/spi.h10
-rw-r--r--include/linux/splice.h4
-rw-r--r--include/linux/srcu.h84
-rw-r--r--include/linux/srcuclassic.h115
-rw-r--r--include/linux/srcutiny.h93
-rw-r--r--include/linux/srcutree.h150
-rw-r--r--include/linux/stacktrace.h9
-rw-r--r--include/linux/stmmac.h41
-rw-r--r--include/linux/string.h18
-rw-r--r--include/linux/suspend.h7
-rw-r--r--include/linux/swap.h5
-rw-r--r--include/linux/sysctl.h1
-rw-r--r--include/linux/t10-pi.h8
-rw-r--r--include/linux/tcp.h14
-rw-r--r--include/linux/tee_drv.h277
-rw-r--r--include/linux/thread_info.h16
-rw-r--r--include/linux/tick.h1
-rw-r--r--include/linux/time.h3
-rw-r--r--include/linux/timekeeping.h20
-rw-r--r--include/linux/tpm.h3
-rw-r--r--include/linux/trace_events.h11
-rw-r--r--include/linux/tracepoint.h19
-rw-r--r--include/linux/tty.h13
-rw-r--r--include/linux/types.h2
-rw-r--r--include/linux/uaccess.h198
-rw-r--r--include/linux/udp.h2
-rw-r--r--include/linux/uio_driver.h13
-rw-r--r--include/linux/usb.h73
-rw-r--r--include/linux/usb/composite.h6
-rw-r--r--include/linux/usb/gadget.h2
-rw-r--r--include/linux/usb/hcd.h7
-rw-r--r--include/linux/usb/of.h5
-rw-r--r--include/linux/usb/otg-fsm.h15
-rw-r--r--include/linux/usb/serial.h42
-rw-r--r--include/linux/usb/typec.h243
-rw-r--r--include/linux/usb/usbnet.h12
-rw-r--r--include/linux/usb/xhci-dbgp.h29
-rw-r--r--include/linux/virtio.h13
-rw-r--r--include/linux/virtio_config.h25
-rw-r--r--include/linux/virtio_ring.h3
-rw-r--r--include/linux/virtio_vsock.h1
-rw-r--r--include/linux/vm_event_item.h2
-rw-r--r--include/linux/vmalloc.h20
-rw-r--r--include/linux/vme.h12
-rw-r--r--include/linux/workqueue.h5
-rw-r--r--include/linux/writeback.h1
-rw-r--r--include/media/cec-edid.h104
-rw-r--r--include/media/cec-notifier.h111
-rw-r--r--include/media/cec.h119
-rw-r--r--include/media/davinci/vpif_types.h1
-rw-r--r--include/media/rc-map.h79
-rw-r--r--include/media/soc_camera.h14
-rw-r--r--include/media/tveeprom.h3
-rw-r--r--include/media/v4l2-ioctl.h17
-rw-r--r--include/media/videobuf2-core.h12
-rw-r--r--include/media/videobuf2-memops.h3
-rw-r--r--include/misc/charlcd.h42
-rw-r--r--include/net/6lowpan.h15
-rw-r--r--include/net/addrconf.h28
-rw-r--r--include/net/af_rxrpc.h2
-rw-r--r--include/net/af_vsock.h13
-rw-r--r--include/net/bluetooth/l2cap.h2
-rw-r--r--include/net/bluetooth/rfcomm.h8
-rw-r--r--include/net/bonding.h35
-rw-r--r--include/net/busy_poll.h102
-rw-r--r--include/net/cfg80211.h299
-rw-r--r--include/net/devlink.h261
-rw-r--r--include/net/dsa.h39
-rw-r--r--include/net/esp.h19
-rw-r--r--include/net/fib_rules.h7
-rw-r--r--include/net/flow.h2
-rw-r--r--include/net/flow_dissector.h8
-rw-r--r--include/net/flowcache.h6
-rw-r--r--include/net/genetlink.h20
-rw-r--r--include/net/ip.h2
-rw-r--r--include/net/ip6_route.h1
-rw-r--r--include/net/ip6_tunnel.h2
-rw-r--r--include/net/ip_fib.h38
-rw-r--r--include/net/ip_tunnels.h5
-rw-r--r--include/net/ip_vs.h28
-rw-r--r--include/net/mac80211.h180
-rw-r--r--include/net/mpls_iptunnel.h7
-rw-r--r--include/net/ndisc.h2
-rw-r--r--include/net/neighbour.h7
-rw-r--r--include/net/net_namespace.h4
-rw-r--r--include/net/netfilter/ipv4/nf_conntrack_ipv4.h1
-rw-r--r--include/net/netfilter/ipv6/nf_conntrack_ipv6.h1
-rw-r--r--include/net/netfilter/nf_conntrack.h32
-rw-r--r--include/net/netfilter/nf_conntrack_core.h2
-rw-r--r--include/net/netfilter/nf_conntrack_ecache.h4
-rw-r--r--include/net/netfilter/nf_conntrack_expect.h6
-rw-r--r--include/net/netfilter/nf_conntrack_extend.h29
-rw-r--r--include/net/netfilter/nf_conntrack_helper.h31
-rw-r--r--include/net/netfilter/nf_conntrack_l4proto.h3
-rw-r--r--include/net/netfilter/nf_conntrack_synproxy.h2
-rw-r--r--include/net/netfilter/nf_conntrack_timeout.h3
-rw-r--r--include/net/netfilter/nf_nat.h2
-rw-r--r--include/net/netfilter/nf_nat_helper.h36
-rw-r--r--include/net/netfilter/nf_queue.h3
-rw-r--r--include/net/netfilter/nf_tables.h17
-rw-r--r--include/net/netfilter/nft_fib.h2
-rw-r--r--include/net/netlink.h36
-rw-r--r--include/net/netns/can.h40
-rw-r--r--include/net/netns/ipv4.h4
-rw-r--r--include/net/netns/mpls.h3
-rw-r--r--include/net/nfc/nfc.h1
-rw-r--r--include/net/pkt_sched.h2
-rw-r--r--include/net/protocol.h7
-rw-r--r--include/net/route.h6
-rw-r--r--include/net/rtnetlink.h6
-rw-r--r--include/net/sch_generic.h5
-rw-r--r--include/net/sctp/sm.h16
-rw-r--r--include/net/sctp/structs.h3
-rw-r--r--include/net/sctp/ulpevent.h8
-rw-r--r--include/net/secure_seq.h10
-rw-r--r--include/net/sock.h16
-rw-r--r--include/net/tc_act/tc_pedit.h45
-rw-r--r--include/net/tc_act/tc_vlan.h8
-rw-r--r--include/net/tcp.h42
-rw-r--r--include/net/udp.h1
-rw-r--r--include/net/xfrm.h119
-rw-r--r--include/rdma/ib.h2
-rw-r--r--include/rdma/ib_cm.h14
-rw-r--r--include/rdma/ib_hdrs.h66
-rw-r--r--include/rdma/ib_mad.h40
-rw-r--r--include/rdma/ib_marshall.h6
-rw-r--r--include/rdma/ib_pack.h2
-rw-r--r--include/rdma/ib_sa.h304
-rw-r--r--include/rdma/ib_umem.h8
-rw-r--r--include/rdma/ib_umem_odp.h6
-rw-r--r--include/rdma/ib_verbs.h325
-rw-r--r--include/rdma/opa_addr.h79
-rw-r--r--include/rdma/opa_port_info.h3
-rw-r--r--include/rdma/opa_vnic.h141
-rw-r--r--include/rdma/rdma_cm.h4
-rw-r--r--include/rdma/rdma_cm_ib.h2
-rw-r--r--include/rdma/rdma_vt.h11
-rw-r--r--include/rdma/rdmavt_qp.h19
-rw-r--r--include/rdma/uverbs_std_types.h114
-rw-r--r--include/rdma/uverbs_types.h172
-rw-r--r--include/scsi/libfc.h3
-rw-r--r--include/scsi/libiscsi.h3
-rw-r--r--include/scsi/libsas.h1
-rw-r--r--include/scsi/scsi_device.h2
-rw-r--r--include/scsi/scsi_driver.h1
-rw-r--r--include/scsi/scsi_eh.h5
-rw-r--r--include/scsi/scsi_host.h5
-rw-r--r--include/scsi/scsi_request.h2
-rw-r--r--include/scsi/scsi_transport_fc.h1
-rw-r--r--include/scsi/sg.h1
-rw-r--r--include/soc/fsl/qman.h109
-rw-r--r--include/soc/tegra/flowctrl.h82
-rw-r--r--include/soc/tegra/pmc.h29
-rw-r--r--include/sound/cs35l35.h108
-rw-r--r--include/sound/hda_register.h30
-rw-r--r--include/sound/hdaudio.h28
-rw-r--r--include/sound/soc.h14
-rw-r--r--include/trace/events/block.h61
-rw-r--r--include/trace/events/bpf.h10
-rw-r--r--include/trace/events/btrfs.h187
-rw-r--r--include/trace/events/ext4.h74
-rw-r--r--include/trace/events/f2fs.h62
-rw-r--r--include/trace/events/fs_dax.h130
-rw-r--r--include/trace/events/iommu.h1
-rw-r--r--include/trace/events/rxrpc.h101
-rw-r--r--include/trace/events/sched.h16
-rw-r--r--include/trace/events/v4l2.h1
-rw-r--r--include/trace/events/xen.h28
-rw-r--r--include/uapi/asm-generic/socket.h6
-rw-r--r--include/uapi/asm-generic/unistd.h3
-rw-r--r--include/uapi/drm/amdgpu_drm.h69
-rw-r--r--include/uapi/drm/drm.h3
-rw-r--r--include/uapi/drm/drm_fourcc.h59
-rw-r--r--include/uapi/drm/drm_mode.h4
-rw-r--r--include/uapi/drm/etnaviv_drm.h8
-rw-r--r--include/uapi/drm/i915_drm.h65
-rw-r--r--include/uapi/drm/msm_drm.h1
-rw-r--r--include/uapi/drm/vmwgfx_drm.h24
-rw-r--r--include/uapi/linux/Kbuild2
-rw-r--r--include/uapi/linux/aspeed-lpc-ctrl.h61
-rw-r--r--include/uapi/linux/bpf.h31
-rw-r--r--include/uapi/linux/btrfs.h10
-rw-r--r--include/uapi/linux/can/vxcan.h12
-rw-r--r--include/uapi/linux/capability.h2
-rw-r--r--include/uapi/linux/cec.h2
-rw-r--r--include/uapi/linux/cryptouser.h10
-rw-r--r--include/uapi/linux/devlink.h74
-rw-r--r--include/uapi/linux/elf-em.h1
-rw-r--r--include/uapi/linux/elf.h4
-rw-r--r--include/uapi/linux/ethtool.h2
-rw-r--r--include/uapi/linux/eventpoll.h21
-rw-r--r--include/uapi/linux/fs.h15
-rw-r--r--include/uapi/linux/fsmap.h112
-rw-r--r--include/uapi/linux/gtp.h3
-rw-r--r--include/uapi/linux/if_arp.h1
-rw-r--r--include/uapi/linux/if_link.h12
-rw-r--r--include/uapi/linux/if_packet.h1
-rw-r--r--include/uapi/linux/if_tunnel.h3
-rw-r--r--include/uapi/linux/input-event-codes.h1
-rw-r--r--include/uapi/linux/input.h11
-rw-r--r--include/uapi/linux/ipmi.h2
-rw-r--r--include/uapi/linux/ipv6.h2
-rw-r--r--include/uapi/linux/keyctl.h8
-rw-r--r--include/uapi/linux/kvm.h25
-rw-r--r--include/uapi/linux/lightnvm.h4
-rw-r--r--include/uapi/linux/media-bus-format.h13
-rw-r--r--include/uapi/linux/mpls_iptunnel.h2
-rw-r--r--include/uapi/linux/nbd-netlink.h98
-rw-r--r--include/uapi/linux/nbd.h6
-rw-r--r--include/uapi/linux/ndctl.h1
-rw-r--r--include/uapi/linux/netfilter/nf_conntrack_common.h22
-rw-r--r--include/uapi/linux/netfilter/nf_tables.h28
-rw-r--r--include/uapi/linux/netlink.h48
-rw-r--r--include/uapi/linux/netlink_diag.h10
-rw-r--r--include/uapi/linux/nl80211.h116
-rw-r--r--include/uapi/linux/nubus.h4
-rw-r--r--include/uapi/linux/openvswitch.h27
-rw-r--r--include/uapi/linux/pci_regs.h3
-rw-r--r--include/uapi/linux/pcitest.h19
-rw-r--r--include/uapi/linux/perf_event.h49
-rw-r--r--include/uapi/linux/pkt_cls.h20
-rw-r--r--include/uapi/linux/pkt_sched.h8
-rw-r--r--include/uapi/linux/pps.h19
-rw-r--r--include/uapi/linux/raid/md_p.h45
-rw-r--r--include/uapi/linux/rtnetlink.h4
-rw-r--r--include/uapi/linux/sctp.h32
-rw-r--r--include/uapi/linux/serio.h1
-rw-r--r--include/uapi/linux/snmp.h2
-rw-r--r--include/uapi/linux/stat.h8
-rw-r--r--include/uapi/linux/switchtec_ioctl.h132
-rw-r--r--include/uapi/linux/sysctl.h1
-rw-r--r--include/uapi/linux/tee.h346
-rw-r--r--include/uapi/linux/usb/ch9.h3
-rw-r--r--include/uapi/linux/usb/functionfs.h2
-rw-r--r--include/uapi/linux/vfio.h18
-rw-r--r--include/uapi/linux/vfio_ccw.h24
-rw-r--r--include/uapi/linux/videodev2.h25
-rw-r--r--include/uapi/linux/vsockmon.h60
-rw-r--r--include/uapi/linux/xfrm.h8
-rw-r--r--include/uapi/rdma/ib_user_verbs.h11
-rw-r--r--include/uapi/rdma/vmw_pvrdma-abi.h4
-rw-r--r--include/uapi/sound/asound.h4
-rw-r--r--include/uapi/sound/firewire.h10
-rw-r--r--include/video/imx-ipu-v3.h39
-rw-r--r--include/video/udlfb.h2
-rw-r--r--include/xen/arm/page-coherent.h26
-rw-r--r--include/xen/interface/io/9pfs.h36
-rw-r--r--include/xen/interface/io/displif.h854
-rw-r--r--include/xen/interface/io/kbdif.h458
-rw-r--r--include/xen/interface/io/ring.h143
-rw-r--r--include/xen/interface/io/sndif.h793
-rw-r--r--include/xen/page.h2
-rw-r--r--include/xen/xen-ops.h19
619 files changed, 17882 insertions, 4569 deletions
diff --git a/include/acpi/acconfig.h b/include/acpi/acconfig.h
index 07740072da55..6db3b4668b1a 100644
--- a/include/acpi/acconfig.h
+++ b/include/acpi/acconfig.h
@@ -78,6 +78,7 @@
#define ACPI_MAX_EXTPARSE_CACHE_DEPTH 96 /* Parse tree objects */
#define ACPI_MAX_OBJECT_CACHE_DEPTH 96 /* Interpreter operand objects */
#define ACPI_MAX_NAMESPACE_CACHE_DEPTH 96 /* Namespace objects */
+#define ACPI_MAX_COMMENT_CACHE_DEPTH 96 /* Comments for the -ca option */
/*
* Should the subsystem abort the loading of an ACPI table if the
diff --git a/include/acpi/acpi_bus.h b/include/acpi/acpi_bus.h
index ef0ae8aaa567..197f3fffc9a7 100644
--- a/include/acpi/acpi_bus.h
+++ b/include/acpi/acpi_bus.h
@@ -88,6 +88,7 @@ acpi_evaluate_dsm_typed(acpi_handle handle, const u8 *uuid, u64 rev, u64 func,
}
bool acpi_dev_found(const char *hid);
+bool acpi_dev_present(const char *hid, const char *uid, s64 hrv);
#ifdef CONFIG_ACPI
@@ -386,6 +387,7 @@ struct acpi_data_node {
const char *name;
acpi_handle handle;
struct fwnode_handle fwnode;
+ struct fwnode_handle *parent;
struct acpi_device_data data;
struct list_head sibling;
struct kobject kobj;
@@ -575,7 +577,7 @@ struct acpi_pci_root {
bool acpi_dma_supported(struct acpi_device *adev);
enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev);
-void acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
+int acpi_dma_configure(struct device *dev, enum dev_dma_attr attr);
void acpi_dma_deconfigure(struct device *dev);
struct acpi_device *acpi_find_child_device(struct acpi_device *parent,
@@ -586,6 +588,15 @@ struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle);
int acpi_enable_wakeup_device_power(struct acpi_device *dev, int state);
int acpi_disable_wakeup_device_power(struct acpi_device *dev);
+#ifdef CONFIG_X86
+bool acpi_device_always_present(struct acpi_device *adev);
+#else
+static inline bool acpi_device_always_present(struct acpi_device *adev)
+{
+ return false;
+}
+#endif
+
#ifdef CONFIG_PM
acpi_status acpi_add_pm_notifier(struct acpi_device *adev, struct device *dev,
void (*work_func)(struct work_struct *work));
diff --git a/include/acpi/acpixf.h b/include/acpi/acpixf.h
index 3795386ea706..15c86ce4df53 100644
--- a/include/acpi/acpixf.h
+++ b/include/acpi/acpixf.h
@@ -46,7 +46,7 @@
/* Current ACPICA subsystem version in YYYYMMDD format */
-#define ACPI_CA_VERSION 0x20170119
+#define ACPI_CA_VERSION 0x20170303
#include <acpi/acconfig.h>
#include <acpi/actypes.h>
diff --git a/include/acpi/actbl2.h b/include/acpi/actbl2.h
index 7aee9fb3bd1f..faa9f2c0d5de 100644
--- a/include/acpi/actbl2.h
+++ b/include/acpi/actbl2.h
@@ -87,6 +87,7 @@
#define ACPI_SIG_WDAT "WDAT" /* Watchdog Action Table */
#define ACPI_SIG_WDDT "WDDT" /* Watchdog Timer Description Table */
#define ACPI_SIG_WDRT "WDRT" /* Watchdog Resource Table */
+#define ACPI_SIG_XXXX "XXXX" /* Intermediate AML header for ASL/ASL+ converter */
#ifdef ACPI_UNDEFINED_TABLES
/*
@@ -783,6 +784,15 @@ struct acpi_iort_smmu {
#define ACPI_IORT_SMMU_DVM_SUPPORTED (1)
#define ACPI_IORT_SMMU_COHERENT_WALK (1<<1)
+/* Global interrupt format */
+
+struct acpi_iort_smmu_gsi {
+ u32 nsg_irpt;
+ u32 nsg_irpt_flags;
+ u32 nsg_cfg_irpt;
+ u32 nsg_cfg_irpt_flags;
+};
+
struct acpi_iort_smmu_v3 {
u64 base_address; /* SMMUv3 base address */
u32 flags;
@@ -1294,6 +1304,7 @@ struct acpi_table_tpm2 {
#define ACPI_TPM2_MEMORY_MAPPED 6
#define ACPI_TPM2_COMMAND_BUFFER 7
#define ACPI_TPM2_COMMAND_BUFFER_WITH_START_METHOD 8
+#define ACPI_TPM2_COMMAND_BUFFER_WITH_SMC 11
/*******************************************************************************
*
diff --git a/include/acpi/cppc_acpi.h b/include/acpi/cppc_acpi.h
index 427a7c3e6c75..2010c0516f27 100644
--- a/include/acpi/cppc_acpi.h
+++ b/include/acpi/cppc_acpi.h
@@ -103,6 +103,7 @@ struct cppc_perf_caps {
u32 highest_perf;
u32 nominal_perf;
u32 lowest_perf;
+ u32 lowest_nonlinear_perf;
};
struct cppc_perf_ctrls {
@@ -115,7 +116,7 @@ struct cppc_perf_fb_ctrs {
u64 reference;
u64 delivered;
u64 reference_perf;
- u64 ctr_wrap_time;
+ u64 wraparound_time;
};
/* Per CPU container for runtime CPPC management. */
diff --git a/include/asm-generic/bug.h b/include/asm-generic/bug.h
index 6f96247226a4..d6f4aed479a1 100644
--- a/include/asm-generic/bug.h
+++ b/include/asm-generic/bug.h
@@ -5,7 +5,9 @@
#ifdef CONFIG_GENERIC_BUG
#define BUGFLAG_WARNING (1 << 0)
-#define BUGFLAG_TAINT(taint) (BUGFLAG_WARNING | ((taint) << 8))
+#define BUGFLAG_ONCE (1 << 1)
+#define BUGFLAG_DONE (1 << 2)
+#define BUGFLAG_TAINT(taint) ((taint) << 8)
#define BUG_GET_TAINT(bug) ((bug)->flags >> 8)
#endif
@@ -55,6 +57,18 @@ struct bug_entry {
#define BUG_ON(condition) do { if (unlikely(condition)) BUG(); } while (0)
#endif
+#ifdef __WARN_FLAGS
+#define __WARN_TAINT(taint) __WARN_FLAGS(BUGFLAG_TAINT(taint))
+#define __WARN_ONCE_TAINT(taint) __WARN_FLAGS(BUGFLAG_ONCE|BUGFLAG_TAINT(taint))
+
+#define WARN_ON_ONCE(condition) ({ \
+ int __ret_warn_on = !!(condition); \
+ if (unlikely(__ret_warn_on)) \
+ __WARN_ONCE_TAINT(TAINT_WARN); \
+ unlikely(__ret_warn_on); \
+})
+#endif
+
/*
* WARN(), WARN_ON(), WARN_ON_ONCE, and so on can be used to report
* significant issues that need prompt attention if they should ever
@@ -97,7 +111,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
#endif
#ifndef WARN
-#define WARN(condition, format...) ({ \
+#define WARN(condition, format...) ({ \
int __ret_warn_on = !!(condition); \
if (unlikely(__ret_warn_on)) \
__WARN_printf(format); \
@@ -112,6 +126,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
unlikely(__ret_warn_on); \
})
+#ifndef WARN_ON_ONCE
#define WARN_ON_ONCE(condition) ({ \
static bool __section(.data.unlikely) __warned; \
int __ret_warn_once = !!(condition); \
@@ -122,6 +137,7 @@ void __warn(const char *file, int line, void *caller, unsigned taint,
} \
unlikely(__ret_warn_once); \
})
+#endif
#define WARN_ONCE(condition, format...) ({ \
static bool __section(.data.unlikely) __warned; \
diff --git a/include/asm-generic/extable.h b/include/asm-generic/extable.h
new file mode 100644
index 000000000000..ca14c6664027
--- /dev/null
+++ b/include/asm-generic/extable.h
@@ -0,0 +1,26 @@
+#ifndef __ASM_GENERIC_EXTABLE_H
+#define __ASM_GENERIC_EXTABLE_H
+
+/*
+ * The exception table consists of pairs of addresses: the first is the
+ * address of an instruction that is allowed to fault, and the second is
+ * the address at which the program should continue. No registers are
+ * modified, so it is entirely up to the continuation code to figure out
+ * what to do.
+ *
+ * All the routines below use bits of fixup code that are out of line
+ * with the main instruction path. This means when everything is well,
+ * we don't even have to jump over them. Further, they do not intrude
+ * on our cache or tlb entries.
+ */
+
+struct exception_table_entry
+{
+ unsigned long insn, fixup;
+};
+
+
+struct pt_regs;
+extern int fixup_exception(struct pt_regs *regs);
+
+#endif
diff --git a/include/asm-generic/mm_hooks.h b/include/asm-generic/mm_hooks.h
index cc5d9a1405df..41e5b6784b97 100644
--- a/include/asm-generic/mm_hooks.h
+++ b/include/asm-generic/mm_hooks.h
@@ -32,10 +32,4 @@ static inline bool arch_vma_access_permitted(struct vm_area_struct *vma,
/* by default, allow everything */
return true;
}
-
-static inline bool arch_pte_access_permitted(pte_t pte, bool write)
-{
- /* by default, allow everything */
- return true;
-}
#endif /* _ASM_GENERIC_MM_HOOKS_H */
diff --git a/include/asm-generic/pgtable.h b/include/asm-generic/pgtable.h
index 1fad160f35de..7dfa767dc680 100644
--- a/include/asm-generic/pgtable.h
+++ b/include/asm-generic/pgtable.h
@@ -341,6 +341,31 @@ static inline int pte_unused(pte_t pte)
}
#endif
+#ifndef pte_access_permitted
+#define pte_access_permitted(pte, write) \
+ (pte_present(pte) && (!(write) || pte_write(pte)))
+#endif
+
+#ifndef pmd_access_permitted
+#define pmd_access_permitted(pmd, write) \
+ (pmd_present(pmd) && (!(write) || pmd_write(pmd)))
+#endif
+
+#ifndef pud_access_permitted
+#define pud_access_permitted(pud, write) \
+ (pud_present(pud) && (!(write) || pud_write(pud)))
+#endif
+
+#ifndef p4d_access_permitted
+#define p4d_access_permitted(p4d, write) \
+ (p4d_present(p4d) && (!(write) || p4d_write(p4d)))
+#endif
+
+#ifndef pgd_access_permitted
+#define pgd_access_permitted(pgd, write) \
+ (pgd_present(pgd) && (!(write) || pgd_write(pgd)))
+#endif
+
#ifndef __HAVE_ARCH_PMD_SAME
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
static inline int pmd_same(pmd_t pmd_a, pmd_t pmd_b)
diff --git a/include/asm-generic/set_memory.h b/include/asm-generic/set_memory.h
new file mode 100644
index 000000000000..83e81f8996b2
--- /dev/null
+++ b/include/asm-generic/set_memory.h
@@ -0,0 +1,12 @@
+#ifndef __ASM_SET_MEMORY_H
+#define __ASM_SET_MEMORY_H
+
+/*
+ * Functions to change memory attributes.
+ */
+int set_memory_ro(unsigned long addr, int numpages);
+int set_memory_rw(unsigned long addr, int numpages);
+int set_memory_x(unsigned long addr, int numpages);
+int set_memory_nx(unsigned long addr, int numpages);
+
+#endif
diff --git a/include/asm-generic/uaccess.h b/include/asm-generic/uaccess.h
index cc6bb319e464..bbe4bb438e39 100644
--- a/include/asm-generic/uaccess.h
+++ b/include/asm-generic/uaccess.h
@@ -6,7 +6,6 @@
* on any machine that has kernel and user data in the same
* address space, e.g. all NOMMU machines.
*/
-#include <linux/sched.h>
#include <linux/string.h>
#include <asm/segment.h>
@@ -35,9 +34,6 @@ static inline void set_fs(mm_segment_t fs)
#define segment_eq(a, b) ((a).seg == (b).seg)
#endif
-#define VERIFY_READ 0
-#define VERIFY_WRITE 1
-
#define access_ok(type, addr, size) __access_ok((unsigned long)(addr),(size))
/*
@@ -52,87 +48,6 @@ static inline int __access_ok(unsigned long addr, unsigned long size)
#endif
/*
- * The exception table consists of pairs of addresses: the first is the
- * address of an instruction that is allowed to fault, and the second is
- * the address at which the program should continue. No registers are
- * modified, so it is entirely up to the continuation code to figure out
- * what to do.
- *
- * All the routines below use bits of fixup code that are out of line
- * with the main instruction path. This means when everything is well,
- * we don't even have to jump over them. Further, they do not intrude
- * on our cache or tlb entries.
- */
-
-struct exception_table_entry
-{
- unsigned long insn, fixup;
-};
-
-/*
- * architectures with an MMU should override these two
- */
-#ifndef __copy_from_user
-static inline __must_check long __copy_from_user(void *to,
- const void __user * from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 *)to = *(u8 __force *)from;
- return 0;
- case 2:
- *(u16 *)to = *(u16 __force *)from;
- return 0;
- case 4:
- *(u32 *)to = *(u32 __force *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 *)to = *(u64 __force *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
-
- memcpy(to, (const void __force *)from, n);
- return 0;
-}
-#endif
-
-#ifndef __copy_to_user
-static inline __must_check long __copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- if (__builtin_constant_p(n)) {
- switch(n) {
- case 1:
- *(u8 __force *)to = *(u8 *)from;
- return 0;
- case 2:
- *(u16 __force *)to = *(u16 *)from;
- return 0;
- case 4:
- *(u32 __force *)to = *(u32 *)from;
- return 0;
-#ifdef CONFIG_64BIT
- case 8:
- *(u64 __force *)to = *(u64 *)from;
- return 0;
-#endif
- default:
- break;
- }
- }
-
- memcpy((void __force *)to, from, n);
- return 0;
-}
-#endif
-
-/*
* These are the main single-value transfer routines. They automatically
* use the right size if we just have the right pointer type.
* This version just falls back to copy_{from,to}_user, which should
@@ -171,8 +86,7 @@ static inline __must_check long __copy_to_user(void __user *to,
static inline int __put_user_fn(size_t size, void __user *ptr, void *x)
{
- size = __copy_to_user(ptr, x, size);
- return size ? -EFAULT : size;
+ return unlikely(raw_copy_to_user(ptr, x, size)) ? -EFAULT : 0;
}
#define __put_user_fn(sz, u, k) __put_user_fn(sz, u, k)
@@ -187,28 +101,28 @@ extern int __put_user_bad(void) __attribute__((noreturn));
__chk_user_ptr(ptr); \
switch (sizeof(*(ptr))) { \
case 1: { \
- unsigned char __x; \
+ unsigned char __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 2: { \
- unsigned short __x; \
+ unsigned short __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 4: { \
- unsigned int __x; \
+ unsigned int __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
break; \
}; \
case 8: { \
- unsigned long long __x; \
+ unsigned long long __x = 0; \
__gu_err = __get_user_fn(sizeof (*(ptr)), \
ptr, &__x); \
(x) = *(__force __typeof__(*(ptr)) *) &__x; \
@@ -233,12 +147,7 @@ extern int __put_user_bad(void) __attribute__((noreturn));
#ifndef __get_user_fn
static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
{
- size_t n = __copy_from_user(x, ptr, size);
- if (unlikely(n)) {
- memset(x + (size - n), 0, n);
- return -EFAULT;
- }
- return 0;
+ return unlikely(raw_copy_from_user(x, ptr, size)) ? -EFAULT : 0;
}
#define __get_user_fn(sz, u, k) __get_user_fn(sz, u, k)
@@ -247,36 +156,6 @@ static inline int __get_user_fn(size_t size, const void __user *ptr, void *x)
extern int __get_user_bad(void) __attribute__((noreturn));
-#ifndef __copy_from_user_inatomic
-#define __copy_from_user_inatomic __copy_from_user
-#endif
-
-#ifndef __copy_to_user_inatomic
-#define __copy_to_user_inatomic __copy_to_user
-#endif
-
-static inline long copy_from_user(void *to,
- const void __user * from, unsigned long n)
-{
- unsigned long res = n;
- might_fault();
- if (likely(access_ok(VERIFY_READ, from, n)))
- res = __copy_from_user(to, from, n);
- if (unlikely(res))
- memset(to + (n - res), 0, res);
- return res;
-}
-
-static inline long copy_to_user(void __user *to,
- const void *from, unsigned long n)
-{
- might_fault();
- if (access_ok(VERIFY_WRITE, to, n))
- return __copy_to_user(to, from, n);
- else
- return n;
-}
-
/*
* Copy a null terminated string from userspace.
*/
@@ -348,4 +227,6 @@ clear_user(void __user *to, unsigned long n)
return __clear_user(to, n);
}
+#include <asm/extable.h>
+
#endif /* __ASM_GENERIC_UACCESS_H */
diff --git a/include/asm-generic/vmlinux.lds.h b/include/asm-generic/vmlinux.lds.h
index 143db9c523e2..314a0b9219c6 100644
--- a/include/asm-generic/vmlinux.lds.h
+++ b/include/asm-generic/vmlinux.lds.h
@@ -287,8 +287,6 @@
*(.rodata1) \
} \
\
- BUG_TABLE \
- \
/* PCI quirks */ \
.pci_fixup : AT(ADDR(.pci_fixup) - LOAD_OFFSET) { \
VMLINUX_SYMBOL(__start_pci_fixups_early) = .; \
@@ -568,7 +566,6 @@
IRQCHIP_OF_MATCH_TABLE() \
ACPI_PROBE_TABLE(irqchip) \
ACPI_PROBE_TABLE(clksrc) \
- ACPI_PROBE_TABLE(iort) \
EARLYCON_TABLE()
#define INIT_TEXT \
@@ -857,7 +854,8 @@
READ_MOSTLY_DATA(cacheline) \
DATA_DATA \
CONSTRUCTORS \
- }
+ } \
+ BUG_TABLE
#define INIT_TEXT_SECTION(inittext_align) \
. = ALIGN(inittext_align); \
diff --git a/include/clocksource/arm_arch_timer.h b/include/clocksource/arm_arch_timer.h
index caedb74c9210..cc805b72994a 100644
--- a/include/clocksource/arm_arch_timer.h
+++ b/include/clocksource/arm_arch_timer.h
@@ -16,9 +16,13 @@
#ifndef __CLKSOURCE_ARM_ARCH_TIMER_H
#define __CLKSOURCE_ARM_ARCH_TIMER_H
+#include <linux/bitops.h>
#include <linux/timecounter.h>
#include <linux/types.h>
+#define ARCH_TIMER_TYPE_CP15 BIT(0)
+#define ARCH_TIMER_TYPE_MEM BIT(1)
+
#define ARCH_TIMER_CTRL_ENABLE (1 << 0)
#define ARCH_TIMER_CTRL_IT_MASK (1 << 1)
#define ARCH_TIMER_CTRL_IT_STAT (1 << 2)
@@ -34,11 +38,27 @@ enum arch_timer_reg {
ARCH_TIMER_REG_TVAL,
};
+enum arch_timer_ppi_nr {
+ ARCH_TIMER_PHYS_SECURE_PPI,
+ ARCH_TIMER_PHYS_NONSECURE_PPI,
+ ARCH_TIMER_VIRT_PPI,
+ ARCH_TIMER_HYP_PPI,
+ ARCH_TIMER_MAX_TIMER_PPI
+};
+
+enum arch_timer_spi_nr {
+ ARCH_TIMER_PHYS_SPI,
+ ARCH_TIMER_VIRT_SPI,
+ ARCH_TIMER_MAX_TIMER_SPI
+};
+
#define ARCH_TIMER_PHYS_ACCESS 0
#define ARCH_TIMER_VIRT_ACCESS 1
#define ARCH_TIMER_MEM_PHYS_ACCESS 2
#define ARCH_TIMER_MEM_VIRT_ACCESS 3
+#define ARCH_TIMER_MEM_MAX_FRAMES 8
+
#define ARCH_TIMER_USR_PCT_ACCESS_EN (1 << 0) /* physical counter */
#define ARCH_TIMER_USR_VCT_ACCESS_EN (1 << 1) /* virtual counter */
#define ARCH_TIMER_VIRT_EVT_EN (1 << 2)
@@ -54,6 +74,20 @@ struct arch_timer_kvm_info {
int virtual_irq;
};
+struct arch_timer_mem_frame {
+ bool valid;
+ phys_addr_t cntbase;
+ size_t size;
+ int phys_irq;
+ int virt_irq;
+};
+
+struct arch_timer_mem {
+ phys_addr_t cntctlbase;
+ size_t size;
+ struct arch_timer_mem_frame frame[ARCH_TIMER_MEM_MAX_FRAMES];
+};
+
#ifdef CONFIG_ARM_ARCH_TIMER
extern u32 arch_timer_get_rate(void);
diff --git a/include/crypto/gf128mul.h b/include/crypto/gf128mul.h
index 592d47e565a8..0977fb18ff68 100644
--- a/include/crypto/gf128mul.h
+++ b/include/crypto/gf128mul.h
@@ -43,12 +43,13 @@
---------------------------------------------------------------------------
Issue Date: 31/01/2006
- An implementation of field multiplication in Galois Field GF(128)
+ An implementation of field multiplication in Galois Field GF(2^128)
*/
#ifndef _CRYPTO_GF128MUL_H
#define _CRYPTO_GF128MUL_H
+#include <asm/byteorder.h>
#include <crypto/b128ops.h>
#include <linux/slab.h>
@@ -65,7 +66,7 @@
* are left and the lsb's are right. char b[16] is an array and b[0] is
* the first octet.
*
- * 80000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
+ * 10000000 00000000 00000000 00000000 .... 00000000 00000000 00000000
* b[0] b[1] b[2] b[3] b[13] b[14] b[15]
*
* Every bit is a coefficient of some power of X. We can store the bits
@@ -85,15 +86,17 @@
* Both of the above formats are easy to implement on big-endian
* machines.
*
- * EME (which is patent encumbered) uses the ble format (bits are stored
- * in big endian order and the bytes in little endian). The above buffer
- * represents X^7 in this case and the primitive polynomial is b[0] = 0x87.
+ * XTS and EME (the latter of which is patent encumbered) use the ble
+ * format (bits are stored in big endian order and the bytes in little
+ * endian). The above buffer represents X^7 in this case and the
+ * primitive polynomial is b[0] = 0x87.
*
* The common machine word-size is smaller than 128 bits, so to make
* an efficient implementation we must split into machine word sizes.
- * This file uses one 32bit for the moment. Machine endianness comes into
- * play. The lle format in relation to machine endianness is discussed
- * below by the original author of gf128mul Dr Brian Gladman.
+ * This implementation uses 64-bit words for the moment. Machine
+ * endianness comes into play. The lle format in relation to machine
+ * endianness is discussed below by the original author of gf128mul Dr
+ * Brian Gladman.
*
* Let's look at the bbe and ble format on a little endian machine.
*
@@ -127,10 +130,10 @@
* machines this will automatically aligned to wordsize and on a 64-bit
* machine also.
*/
-/* Multiply a GF128 field element by x. Field elements are held in arrays
- of bytes in which field bits 8n..8n + 7 are held in byte[n], with lower
- indexed bits placed in the more numerically significant bit positions
- within bytes.
+/* Multiply a GF(2^128) field element by x. Field elements are
+ held in arrays of bytes in which field bits 8n..8n + 7 are held in
+ byte[n], with lower indexed bits placed in the more numerically
+ significant bit positions within bytes.
On little endian machines the bit indexes translate into the bit
positions within four 32-bit words in the following way
@@ -161,8 +164,58 @@ void gf128mul_lle(be128 *a, const be128 *b);
void gf128mul_bbe(be128 *a, const be128 *b);
-/* multiply by x in ble format, needed by XTS */
-void gf128mul_x_ble(be128 *a, const be128 *b);
+/*
+ * The following functions multiply a field element by x in
+ * the polynomial field representation. They use 64-bit word operations
+ * to gain speed but compensate for machine endianness and hence work
+ * correctly on both styles of machine.
+ *
+ * They are defined here for performance.
+ */
+
+static inline u64 gf128mul_mask_from_bit(u64 x, int which)
+{
+ /* a constant-time version of 'x & ((u64)1 << which) ? (u64)-1 : 0' */
+ return ((s64)(x << (63 - which)) >> 63);
+}
+
+static inline void gf128mul_x_lle(be128 *r, const be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_le[(b << 7) & 0xff] << 48
+ * (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(b, 0) & ((u64)0xe1 << 56);
+
+ r->b = cpu_to_be64((b >> 1) | (a << 63));
+ r->a = cpu_to_be64((a >> 1) ^ _tt);
+}
+
+static inline void gf128mul_x_bbe(be128 *r, const be128 *x)
+{
+ u64 a = be64_to_cpu(x->a);
+ u64 b = be64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[a >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+ r->a = cpu_to_be64((a << 1) | (b >> 63));
+ r->b = cpu_to_be64((b << 1) ^ _tt);
+}
+
+/* needed by XTS */
+static inline void gf128mul_x_ble(le128 *r, const le128 *x)
+{
+ u64 a = le64_to_cpu(x->a);
+ u64 b = le64_to_cpu(x->b);
+
+ /* equivalent to gf128mul_table_be[b >> 63] (see crypto/gf128mul.c): */
+ u64 _tt = gf128mul_mask_from_bit(a, 63) & 0x87;
+
+ r->a = cpu_to_le64((a << 1) | (b >> 63));
+ r->b = cpu_to_le64((b << 1) ^ _tt);
+}
/* 4k table optimization */
@@ -172,8 +225,8 @@ struct gf128mul_4k {
struct gf128mul_4k *gf128mul_init_4k_lle(const be128 *g);
struct gf128mul_4k *gf128mul_init_4k_bbe(const be128 *g);
-void gf128mul_4k_lle(be128 *a, struct gf128mul_4k *t);
-void gf128mul_4k_bbe(be128 *a, struct gf128mul_4k *t);
+void gf128mul_4k_lle(be128 *a, const struct gf128mul_4k *t);
+void gf128mul_4k_bbe(be128 *a, const struct gf128mul_4k *t);
static inline void gf128mul_free_4k(struct gf128mul_4k *t)
{
@@ -194,6 +247,6 @@ struct gf128mul_64k {
*/
struct gf128mul_64k *gf128mul_init_64k_bbe(const be128 *g);
void gf128mul_free_64k(struct gf128mul_64k *t);
-void gf128mul_64k_bbe(be128 *a, struct gf128mul_64k *t);
+void gf128mul_64k_bbe(be128 *a, const struct gf128mul_64k *t);
#endif /* _CRYPTO_GF128MUL_H */
diff --git a/include/crypto/internal/acompress.h b/include/crypto/internal/acompress.h
index 1de2b5af12d7..51052f65cefc 100644
--- a/include/crypto/internal/acompress.h
+++ b/include/crypto/internal/acompress.h
@@ -78,4 +78,7 @@ int crypto_register_acomp(struct acomp_alg *alg);
*/
int crypto_unregister_acomp(struct acomp_alg *alg);
+int crypto_register_acomps(struct acomp_alg *algs, int count);
+void crypto_unregister_acomps(struct acomp_alg *algs, int count);
+
#endif
diff --git a/include/crypto/internal/scompress.h b/include/crypto/internal/scompress.h
index 3fda3c5655a0..ccad9b2c9bd6 100644
--- a/include/crypto/internal/scompress.h
+++ b/include/crypto/internal/scompress.h
@@ -133,4 +133,7 @@ int crypto_register_scomp(struct scomp_alg *alg);
*/
int crypto_unregister_scomp(struct scomp_alg *alg);
+int crypto_register_scomps(struct scomp_alg *algs, int count);
+void crypto_unregister_scomps(struct scomp_alg *algs, int count);
+
#endif
diff --git a/include/crypto/kpp.h b/include/crypto/kpp.h
index 4307a2f2365f..ce8e1f79374b 100644
--- a/include/crypto/kpp.h
+++ b/include/crypto/kpp.h
@@ -74,7 +74,7 @@ struct crypto_kpp {
* @base: Common crypto API algorithm data structure
*/
struct kpp_alg {
- int (*set_secret)(struct crypto_kpp *tfm, void *buffer,
+ int (*set_secret)(struct crypto_kpp *tfm, const void *buffer,
unsigned int len);
int (*generate_public_key)(struct kpp_request *req);
int (*compute_shared_secret)(struct kpp_request *req);
@@ -273,8 +273,8 @@ struct kpp_secret {
*
* Return: zero on success; error code in case of error
*/
-static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm, void *buffer,
- unsigned int len)
+static inline int crypto_kpp_set_secret(struct crypto_kpp *tfm,
+ const void *buffer, unsigned int len)
{
struct kpp_alg *alg = crypto_kpp_alg(tfm);
diff --git a/include/crypto/public_key.h b/include/crypto/public_key.h
index 882ca0e1e7a5..e0b681a717ba 100644
--- a/include/crypto/public_key.h
+++ b/include/crypto/public_key.h
@@ -50,9 +50,20 @@ struct key;
struct key_type;
union key_payload;
-extern int restrict_link_by_signature(struct key *trust_keyring,
+extern int restrict_link_by_signature(struct key *dest_keyring,
const struct key_type *type,
- const union key_payload *payload);
+ const union key_payload *payload,
+ struct key *trust_keyring);
+
+extern int restrict_link_by_key_or_keyring(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trusted);
+
+extern int restrict_link_by_key_or_keyring_chain(struct key *trust_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *trusted);
extern int verify_signature(const struct key *key,
const struct public_key_signature *sig);
diff --git a/include/crypto/xts.h b/include/crypto/xts.h
index 77b630672b2c..c0bde308b28a 100644
--- a/include/crypto/xts.h
+++ b/include/crypto/xts.h
@@ -11,7 +11,7 @@ struct blkcipher_desc;
#define XTS_BLOCK_SIZE 16
struct xts_crypt_req {
- be128 *tbuf;
+ le128 *tbuf;
unsigned int tbuflen;
void *tweak_ctx;
diff --git a/include/drm/bridge/analogix_dp.h b/include/drm/bridge/analogix_dp.h
index f6f0c062205c..c99d6eaef1ac 100644
--- a/include/drm/bridge/analogix_dp.h
+++ b/include/drm/bridge/analogix_dp.h
@@ -49,4 +49,7 @@ int analogix_dp_bind(struct device *dev, struct drm_device *drm_dev,
struct analogix_dp_plat_data *plat_data);
void analogix_dp_unbind(struct device *dev, struct device *master, void *data);
+int analogix_dp_start_crc(struct drm_connector *connector);
+int analogix_dp_stop_crc(struct drm_connector *connector);
+
#endif /* _ANALOGIX_DP_H_ */
diff --git a/include/drm/bridge/dw_hdmi.h b/include/drm/bridge/dw_hdmi.h
index b080a171a23f..ed599bea3f6c 100644
--- a/include/drm/bridge/dw_hdmi.h
+++ b/include/drm/bridge/dw_hdmi.h
@@ -14,6 +14,67 @@
struct dw_hdmi;
+/**
+ * DOC: Supported input formats and encodings
+ *
+ * Depending on the Hardware configuration of the Controller IP, it supports
+ * a subset of the following input formats and encodings on its internal
+ * 48bit bus.
+ *
+ * +----------------------+----------------------------------+------------------------------+
+ * + Format Name + Format Code + Encodings +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 8bit + ``MEDIA_BUS_FMT_RGB888_1X24`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 10bits + ``MEDIA_BUS_FMT_RGB101010_1X30`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 12bits + ``MEDIA_BUS_FMT_RGB121212_1X36`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + RGB 4:4:4 16bits + ``MEDIA_BUS_FMT_RGB161616_1X48`` + ``V4L2_YCBCR_ENC_DEFAULT`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 8bit + ``MEDIA_BUS_FMT_YUV8_1X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 10bits + ``MEDIA_BUS_FMT_YUV10_1X30`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 12bits + ``MEDIA_BUS_FMT_YUV12_1X36`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:4:4 16bits + ``MEDIA_BUS_FMT_YUV16_1X48`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV601`` +
+ * + + + or ``V4L2_YCBCR_ENC_XV709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 8bit + ``MEDIA_BUS_FMT_UYVY8_1X16`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 10bits + ``MEDIA_BUS_FMT_UYVY10_1X20`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:2 12bits + ``MEDIA_BUS_FMT_UYVY12_1X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 8bit + ``MEDIA_BUS_FMT_UYYVYY8_0_5X24`` + ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 10bits + ``MEDIA_BUS_FMT_UYYVYY10_0_5X30``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 12bits + ``MEDIA_BUS_FMT_UYYVYY12_0_5X36``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ * + YCbCr 4:2:0 16bits + ``MEDIA_BUS_FMT_UYYVYY16_0_5X48``+ ``V4L2_YCBCR_ENC_601`` +
+ * + + + or ``V4L2_YCBCR_ENC_709`` +
+ * +----------------------+----------------------------------+------------------------------+
+ */
+
enum {
DW_HDMI_RES_8,
DW_HDMI_RES_10,
@@ -21,12 +82,6 @@ enum {
DW_HDMI_RES_MAX,
};
-enum dw_hdmi_devtype {
- IMX6Q_HDMI,
- IMX6DL_HDMI,
- RK3288_HDMI,
-};
-
enum dw_hdmi_phy_type {
DW_HDMI_PHY_DWC_HDMI_TX_PHY = 0x00,
DW_HDMI_PHY_DWC_MHL_PHY_HEAC = 0xb2,
@@ -57,13 +112,35 @@ struct dw_hdmi_phy_config {
u16 vlev_ctr; /* voltage level control */
};
+struct dw_hdmi_phy_ops {
+ int (*init)(struct dw_hdmi *hdmi, void *data,
+ struct drm_display_mode *mode);
+ void (*disable)(struct dw_hdmi *hdmi, void *data);
+ enum drm_connector_status (*read_hpd)(struct dw_hdmi *hdmi, void *data);
+ void (*update_hpd)(struct dw_hdmi *hdmi, void *data,
+ bool force, bool disabled, bool rxsense);
+ void (*setup_hpd)(struct dw_hdmi *hdmi, void *data);
+};
+
struct dw_hdmi_plat_data {
- enum dw_hdmi_devtype dev_type;
+ struct regmap *regm;
+ enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
+ struct drm_display_mode *mode);
+ unsigned long input_bus_format;
+ unsigned long input_bus_encoding;
+
+ /* Vendor PHY support */
+ const struct dw_hdmi_phy_ops *phy_ops;
+ const char *phy_name;
+ void *phy_data;
+
+ /* Synopsys PHY support */
const struct dw_hdmi_mpll_config *mpll_cfg;
const struct dw_hdmi_curr_ctrl *cur_ctr;
const struct dw_hdmi_phy_config *phy_config;
- enum drm_mode_status (*mode_valid)(struct drm_connector *connector,
- struct drm_display_mode *mode);
+ int (*configure_phy)(struct dw_hdmi *hdmi,
+ const struct dw_hdmi_plat_data *pdata,
+ unsigned long mpixelclock);
};
int dw_hdmi_probe(struct platform_device *pdev,
@@ -73,8 +150,14 @@ void dw_hdmi_unbind(struct device *dev);
int dw_hdmi_bind(struct platform_device *pdev, struct drm_encoder *encoder,
const struct dw_hdmi_plat_data *plat_data);
+void dw_hdmi_setup_rx_sense(struct device *dev, bool hpd, bool rx_sense);
+
void dw_hdmi_set_sample_rate(struct dw_hdmi *hdmi, unsigned int rate);
void dw_hdmi_audio_enable(struct dw_hdmi *hdmi);
void dw_hdmi_audio_disable(struct dw_hdmi *hdmi);
+/* PHY configuration */
+void dw_hdmi_phy_i2c_write(struct dw_hdmi *hdmi, unsigned short data,
+ unsigned char addr);
+
#endif /* __IMX_HDMI_H__ */
diff --git a/include/drm/drmP.h b/include/drm/drmP.h
index 6105c050d7bc..e1daa4f343cd 100644
--- a/include/drm/drmP.h
+++ b/include/drm/drmP.h
@@ -47,17 +47,16 @@
#include <linux/miscdevice.h>
#include <linux/mm.h>
#include <linux/mutex.h>
-#include <linux/pci.h>
#include <linux/platform_device.h>
#include <linux/poll.h>
#include <linux/ratelimit.h>
-#include <linux/rbtree.h>
#include <linux/sched.h>
#include <linux/slab.h>
#include <linux/types.h>
#include <linux/vmalloc.h>
#include <linux/workqueue.h>
#include <linux/dma-fence.h>
+#include <linux/module.h>
#include <asm/mman.h>
#include <asm/pgalloc.h>
@@ -75,26 +74,33 @@
#include <drm/drm_mm.h>
#include <drm/drm_os_linux.h>
#include <drm/drm_sarea.h>
-#include <drm/drm_vma_manager.h>
#include <drm/drm_drv.h>
+#include <drm/drm_prime.h>
+#include <drm/drm_pci.h>
+#include <drm/drm_file.h>
+#include <drm/drm_debugfs.h>
+#include <drm/drm_ioctl.h>
+#include <drm/drm_sysfs.h>
struct module;
-struct drm_file;
struct drm_device;
struct drm_agp_head;
struct drm_local_map;
struct drm_device_dma;
-struct drm_dma_handle;
struct drm_gem_object;
struct drm_master;
struct drm_vblank_crtc;
+struct drm_vma_offset_manager;
struct device_node;
struct videomode;
struct reservation_object;
struct dma_buf_attachment;
+struct pci_dev;
+struct pci_controller;
+
/*
* The following categories are defined:
*
@@ -313,194 +319,16 @@ struct dma_buf_attachment;
#define DRM_IF_VERSION(maj, min) (maj << 16 | min)
-/**
- * Ioctl function type.
- *
- * \param inode device inode.
- * \param file_priv DRM file private pointer.
- * \param cmd command.
- * \param arg argument.
- */
-typedef int drm_ioctl_t(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-
-typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
- unsigned long arg);
-
-#define DRM_IOCTL_NR(n) _IOC_NR(n)
-#define DRM_MAJOR 226
-
-#define DRM_AUTH 0x1
-#define DRM_MASTER 0x2
-#define DRM_ROOT_ONLY 0x4
-#define DRM_CONTROL_ALLOW 0x8
-#define DRM_UNLOCKED 0x10
-#define DRM_RENDER_ALLOW 0x20
-
-struct drm_ioctl_desc {
- unsigned int cmd;
- int flags;
- drm_ioctl_t *func;
- const char *name;
-};
-
-/**
- * Creates a driver or general drm_ioctl_desc array entry for the given
- * ioctl, for use by drm_ioctl().
- */
-
-#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
- [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
- .cmd = DRM_IOCTL_##ioctl, \
- .func = _func, \
- .flags = _flags, \
- .name = #ioctl \
- }
-
-/* Event queued up for userspace to read */
-struct drm_pending_event {
- struct completion *completion;
- void (*completion_release)(struct completion *completion);
- struct drm_event *event;
- struct dma_fence *fence;
- struct list_head link;
- struct list_head pending_link;
- struct drm_file *file_priv;
- pid_t pid; /* pid of requester, no guarantee it's valid by the time
- we deliver the event, for tracing only */
-};
-
-struct drm_prime_file_private {
- struct mutex lock;
- struct rb_root dmabufs;
- struct rb_root handles;
-};
-
-/** File private data */
-struct drm_file {
- unsigned authenticated :1;
- /* true when the client has asked us to expose stereo 3D mode flags */
- unsigned stereo_allowed :1;
- /*
- * true if client understands CRTC primary planes and cursor planes
- * in the plane list
- */
- unsigned universal_planes:1;
- /* true if client understands atomic properties */
- unsigned atomic:1;
- /*
- * This client is the creator of @master.
- * Protected by struct drm_device::master_mutex.
- */
- unsigned is_master:1;
-
- struct pid *pid;
- drm_magic_t magic;
- struct list_head lhead;
- struct drm_minor *minor;
- unsigned long lock_count;
-
- /** Mapping of mm object handles to object pointers. */
- struct idr object_idr;
- /** Lock for synchronization of access to object_idr. */
- spinlock_t table_lock;
-
- struct file *filp;
- void *driver_priv;
-
- struct drm_master *master; /* master this node is currently associated with
- N.B. not always dev->master */
- /**
- * fbs - List of framebuffers associated with this file.
- *
- * Protected by fbs_lock. Note that the fbs list holds a reference on
- * the fb object to prevent it from untimely disappearing.
- */
- struct list_head fbs;
- struct mutex fbs_lock;
-
- /** User-created blob properties; this retains a reference on the
- * property. */
- struct list_head blobs;
-
- wait_queue_head_t event_wait;
- struct list_head pending_event_list;
- struct list_head event_list;
- int event_space;
-
- struct mutex event_read_lock;
-
- struct drm_prime_file_private prime;
-};
-
-/**
- * Lock data.
- */
-struct drm_lock_data {
- struct drm_hw_lock *hw_lock; /**< Hardware lock */
- /** Private of lock holder's file (NULL=kernel) */
- struct drm_file *file_priv;
- wait_queue_head_t lock_queue; /**< Queue of blocked processes */
- unsigned long lock_time; /**< Time of last lock in jiffies */
- spinlock_t spinlock;
- uint32_t kernel_waiters;
- uint32_t user_waiters;
- int idle_has_lock;
-};
/* Flags and return codes for get_vblank_timestamp() driver function. */
#define DRM_CALLED_FROM_VBLIRQ 1
#define DRM_VBLANKTIME_SCANOUTPOS_METHOD (1 << 0)
-#define DRM_VBLANKTIME_IN_VBLANK (1 << 1)
/* get_scanout_position() return flags */
#define DRM_SCANOUTPOS_VALID (1 << 0)
#define DRM_SCANOUTPOS_IN_VBLANK (1 << 1)
#define DRM_SCANOUTPOS_ACCURATE (1 << 2)
-enum drm_minor_type {
- DRM_MINOR_PRIMARY,
- DRM_MINOR_CONTROL,
- DRM_MINOR_RENDER,
- DRM_MINOR_CNT,
-};
-
-/**
- * Info file list entry. This structure represents a debugfs or proc file to
- * be created by the drm core
- */
-struct drm_info_list {
- const char *name; /** file name */
- int (*show)(struct seq_file*, void*); /** show callback */
- u32 driver_features; /**< Required driver features for this entry */
- void *data;
-};
-
-/**
- * debugfs node structure. This structure represents a debugfs file.
- */
-struct drm_info_node {
- struct list_head list;
- struct drm_minor *minor;
- const struct drm_info_list *info_ent;
- struct dentry *dent;
-};
-
-/**
- * DRM minor structure. This structure represents a drm minor number.
- */
-struct drm_minor {
- int index; /**< Minor device number */
- int type; /**< Control or render */
- struct device *kdev; /**< Linux device */
- struct drm_device *dev;
-
- struct dentry *debugfs_root;
-
- struct list_head debugfs_list;
- struct mutex debugfs_lock; /* Protects debugfs_list. */
-};
-
/**
* DRM device structure. This structure represent a complete card that
* may contain multiple heads.
@@ -611,7 +439,6 @@ struct drm_device {
struct pci_controller *hose;
#endif
- struct platform_device *platformdev; /**< Platform device struture */
struct virtio_device *virtdev;
struct drm_sg_mem *sg; /**< Scatter gather memory */
@@ -675,154 +502,19 @@ static inline int drm_device_is_unplugged(struct drm_device *dev)
return ret;
}
-static inline bool drm_is_render_client(const struct drm_file *file_priv)
-{
- return file_priv->minor->type == DRM_MINOR_RENDER;
-}
-
-static inline bool drm_is_control_client(const struct drm_file *file_priv)
-{
- return file_priv->minor->type == DRM_MINOR_CONTROL;
-}
-
-static inline bool drm_is_primary_client(const struct drm_file *file_priv)
-{
- return file_priv->minor->type == DRM_MINOR_PRIMARY;
-}
-
/******************************************************************/
/** \name Internal function definitions */
/*@{*/
/* Driver support (drm_drv.h) */
-extern int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
-extern long drm_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg);
-#ifdef CONFIG_COMPAT
-extern long drm_compat_ioctl(struct file *filp,
- unsigned int cmd, unsigned long arg);
-#else
-/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */
-#define drm_compat_ioctl NULL
-#endif
-extern bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
-
-/* File Operations (drm_fops.c) */
-int drm_open(struct inode *inode, struct file *filp);
-ssize_t drm_read(struct file *filp, char __user *buffer,
- size_t count, loff_t *offset);
-int drm_release(struct inode *inode, struct file *filp);
-unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
-int drm_event_reserve_init_locked(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_pending_event *p,
- struct drm_event *e);
-int drm_event_reserve_init(struct drm_device *dev,
- struct drm_file *file_priv,
- struct drm_pending_event *p,
- struct drm_event *e);
-void drm_event_cancel_free(struct drm_device *dev,
- struct drm_pending_event *p);
-void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
-void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
-
-/* Misc. IOCTL support (drm_ioctl.c) */
-int drm_noop(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
-int drm_invalid_op(struct drm_device *dev, void *data,
- struct drm_file *file_priv);
/*
* These are exported to drivers so that they can implement fencing using
* DMA quiscent + idle. DMA quiescent usually requires the hardware lock.
*/
- /* Debugfs support */
-#if defined(CONFIG_DEBUG_FS)
-extern int drm_debugfs_create_files(const struct drm_info_list *files,
- int count, struct dentry *root,
- struct drm_minor *minor);
-extern int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor);
-#else
-static inline int drm_debugfs_create_files(const struct drm_info_list *files,
- int count, struct dentry *root,
- struct drm_minor *minor)
-{
- return 0;
-}
-
-static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
- int count, struct drm_minor *minor)
-{
- return 0;
-}
-#endif
-
-struct dma_buf_export_info;
-
-extern struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
- struct drm_gem_object *obj,
- int flags);
-extern int drm_gem_prime_handle_to_fd(struct drm_device *dev,
- struct drm_file *file_priv, uint32_t handle, uint32_t flags,
- int *prime_fd);
-extern struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
- struct dma_buf *dma_buf);
-extern int drm_gem_prime_fd_to_handle(struct drm_device *dev,
- struct drm_file *file_priv, int prime_fd, uint32_t *handle);
-struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
- struct dma_buf_export_info *exp_info);
-extern void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
-
-extern int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
- dma_addr_t *addrs, int max_pages);
-extern struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
-extern void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
-
-
-extern struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
- size_t align);
-extern void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
-
- /* sysfs support (drm_sysfs.c) */
-extern void drm_sysfs_hotplug_event(struct drm_device *dev);
-
-
/*@}*/
-extern int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
-extern void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
-#ifdef CONFIG_PCI
-extern int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver);
-extern int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
-#else
-static inline int drm_get_pci_dev(struct pci_dev *pdev,
- const struct pci_device_id *ent,
- struct drm_driver *driver)
-{
- return -ENOSYS;
-}
-
-static inline int drm_pci_set_busid(struct drm_device *dev,
- struct drm_master *master)
-{
- return -ENOSYS;
-}
-#endif
-
-#define DRM_PCIE_SPEED_25 1
-#define DRM_PCIE_SPEED_50 2
-#define DRM_PCIE_SPEED_80 4
-
-extern int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
-extern int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
-
-/* platform section */
-extern int drm_platform_init(struct drm_driver *driver, struct platform_device *platform_device);
-
/* returns true if currently okay to sleep */
static __inline__ bool drm_can_sleep(void)
{
diff --git a/include/drm/drm_atomic.h b/include/drm/drm_atomic.h
index 052ab161b239..788daf756f48 100644
--- a/include/drm/drm_atomic.h
+++ b/include/drm/drm_atomic.h
@@ -138,12 +138,12 @@ struct drm_crtc_commit {
struct __drm_planes_state {
struct drm_plane *ptr;
- struct drm_plane_state *state;
+ struct drm_plane_state *state, *old_state, *new_state;
};
struct __drm_crtcs_state {
struct drm_crtc *ptr;
- struct drm_crtc_state *state;
+ struct drm_crtc_state *state, *old_state, *new_state;
struct drm_crtc_commit *commit;
s32 __user *out_fence_ptr;
unsigned last_vblank_count;
@@ -151,7 +151,7 @@ struct __drm_crtcs_state {
struct __drm_connnectors_state {
struct drm_connector *ptr;
- struct drm_connector_state *state;
+ struct drm_connector_state *state, *old_state, *new_state;
};
/**
@@ -160,7 +160,6 @@ struct __drm_connnectors_state {
* @dev: parent DRM device
* @allow_modeset: allow full modeset
* @legacy_cursor_update: hint to enforce legacy cursor IOCTL semantics
- * @legacy_set_config: Disable conflicting encoders instead of failing with -EINVAL.
* @planes: pointer to array of structures with per-plane data
* @crtcs: pointer to array of CRTC pointers
* @num_connector: size of the @connectors and @connector_states arrays
@@ -173,7 +172,6 @@ struct drm_atomic_state {
struct drm_device *dev;
bool allow_modeset : 1;
bool legacy_cursor_update : 1;
- bool legacy_set_config : 1;
struct __drm_planes_state *planes;
struct __drm_crtcs_state *crtcs;
int num_connector;
@@ -277,6 +275,9 @@ int drm_atomic_connector_set_property(struct drm_connector *connector,
*
* This function returns the crtc state for the given crtc, or NULL
* if the crtc is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_crtc_state or
+ * @drm_atomic_get_new_crtc_state should be used instead.
*/
static inline struct drm_crtc_state *
drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
@@ -286,12 +287,44 @@ drm_atomic_get_existing_crtc_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_crtc_state - get old crtc state, if it exists
+ * @state: global atomic state object
+ * @crtc: crtc to grab
+ *
+ * This function returns the old crtc state for the given crtc, or
+ * NULL if the crtc is not part of the global atomic state.
+ */
+static inline struct drm_crtc_state *
+drm_atomic_get_old_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ return state->crtcs[drm_crtc_index(crtc)].old_state;
+}
+/**
+ * drm_atomic_get_new_crtc_state - get new crtc state, if it exists
+ * @state: global atomic state object
+ * @crtc: crtc to grab
+ *
+ * This function returns the new crtc state for the given crtc, or
+ * NULL if the crtc is not part of the global atomic state.
+ */
+static inline struct drm_crtc_state *
+drm_atomic_get_new_crtc_state(struct drm_atomic_state *state,
+ struct drm_crtc *crtc)
+{
+ return state->crtcs[drm_crtc_index(crtc)].new_state;
+}
+
+/**
* drm_atomic_get_existing_plane_state - get plane state, if it exists
* @state: global atomic state object
* @plane: plane to grab
*
* This function returns the plane state for the given plane, or NULL
* if the plane is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_plane_state or
+ * @drm_atomic_get_new_plane_state should be used instead.
*/
static inline struct drm_plane_state *
drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
@@ -301,12 +334,45 @@ drm_atomic_get_existing_plane_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_plane_state - get plane state, if it exists
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the old plane state for the given plane, or
+ * NULL if the plane is not part of the global atomic state.
+ */
+static inline struct drm_plane_state *
+drm_atomic_get_old_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ return state->planes[drm_plane_index(plane)].old_state;
+}
+
+/**
+ * drm_atomic_get_new_plane_state - get plane state, if it exists
+ * @state: global atomic state object
+ * @plane: plane to grab
+ *
+ * This function returns the new plane state for the given plane, or
+ * NULL if the plane is not part of the global atomic state.
+ */
+static inline struct drm_plane_state *
+drm_atomic_get_new_plane_state(struct drm_atomic_state *state,
+ struct drm_plane *plane)
+{
+ return state->planes[drm_plane_index(plane)].new_state;
+}
+
+/**
* drm_atomic_get_existing_connector_state - get connector state, if it exists
* @state: global atomic state object
* @connector: connector to grab
*
* This function returns the connector state for the given connector,
* or NULL if the connector is not part of the global atomic state.
+ *
+ * This function is deprecated, @drm_atomic_get_old_connector_state or
+ * @drm_atomic_get_new_connector_state should be used instead.
*/
static inline struct drm_connector_state *
drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
@@ -321,6 +387,46 @@ drm_atomic_get_existing_connector_state(struct drm_atomic_state *state,
}
/**
+ * drm_atomic_get_old_connector_state - get connector state, if it exists
+ * @state: global atomic state object
+ * @connector: connector to grab
+ *
+ * This function returns the old connector state for the given connector,
+ * or NULL if the connector is not part of the global atomic state.
+ */
+static inline struct drm_connector_state *
+drm_atomic_get_old_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int index = drm_connector_index(connector);
+
+ if (index >= state->num_connector)
+ return NULL;
+
+ return state->connectors[index].old_state;
+}
+
+/**
+ * drm_atomic_get_new_connector_state - get connector state, if it exists
+ * @state: global atomic state object
+ * @connector: connector to grab
+ *
+ * This function returns the new connector state for the given connector,
+ * or NULL if the connector is not part of the global atomic state.
+ */
+static inline struct drm_connector_state *
+drm_atomic_get_new_connector_state(struct drm_atomic_state *state,
+ struct drm_connector *connector)
+{
+ int index = drm_connector_index(connector);
+
+ if (index >= state->num_connector)
+ return NULL;
+
+ return state->connectors[index].new_state;
+}
+
+/**
* __drm_atomic_get_current_plane_state - get current plane state
* @state: global atomic state object
* @plane: plane to grab
@@ -390,6 +496,23 @@ int __must_check drm_atomic_nonblocking_commit(struct drm_atomic_state *state);
void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
+/**
+ * for_each_connector_in_state - iterate over all connectors in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @connector: &struct drm_connector iteration cursor
+ * @connector_state: &struct drm_connector_state iteration cursor
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all connectors in an atomic update. Note that before the
+ * software state is committed (by calling drm_atomic_helper_swap_state(), this
+ * points to the new state, while afterwards it points to the old state. Due to
+ * this tricky confusion this macro is deprecated.
+ *
+ * FIXME:
+ *
+ * Replace all usage of this with one of the explicit iterators below and then
+ * remove this macro.
+ */
#define for_each_connector_in_state(__state, connector, connector_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->num_connector && \
@@ -398,6 +521,86 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if (connector)
+/**
+ * for_each_oldnew_connector_in_state - iterate over all connectors in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @connector: &struct drm_connector iteration cursor
+ * @old_connector_state: &struct drm_connector_state iteration cursor for the
+ * old state
+ * @new_connector_state: &struct drm_connector_state iteration cursor for the
+ * new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all connectors in an atomic update, tracking both old and
+ * new state. This is useful in places where the state delta needs to be
+ * considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_connector_in_state(__state, connector, old_connector_state, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_connector && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (old_connector_state) = (__state)->connectors[__i].old_state, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (connector)
+
+/**
+ * for_each_old_connector_in_state - iterate over all connectors in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @connector: &struct drm_connector iteration cursor
+ * @old_connector_state: &struct drm_connector_state iteration cursor for the
+ * old state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all connectors in an atomic update, tracking only the old
+ * state. This is useful in disable functions, where we need the old state the
+ * hardware is still in.
+ */
+#define for_each_old_connector_in_state(__state, connector, old_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_connector && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (old_connector_state) = (__state)->connectors[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if (connector)
+
+/**
+ * for_each_new_connector_in_state - iterate over all connectors in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @connector: &struct drm_connector iteration cursor
+ * @new_connector_state: &struct drm_connector_state iteration cursor for the
+ * new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all connectors in an atomic update, tracking only the new
+ * state. This is useful in enable functions, where we need the new state the
+ * hardware should be in when the atomic commit operation has completed.
+ */
+#define for_each_new_connector_in_state(__state, connector, new_connector_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->num_connector && \
+ ((connector) = (__state)->connectors[__i].ptr, \
+ (new_connector_state) = (__state)->connectors[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (connector)
+
+/**
+ * for_each_crtc_in_state - iterate over all connectors in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @crtc: &struct drm_crtc iteration cursor
+ * @crtc_state: &struct drm_crtc_state iteration cursor
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all CRTCs in an atomic update. Note that before the
+ * software state is committed (by calling drm_atomic_helper_swap_state(), this
+ * points to the new state, while afterwards it points to the old state. Due to
+ * this tricky confusion this macro is deprecated.
+ *
+ * FIXME:
+ *
+ * Replace all usage of this with one of the explicit iterators below and then
+ * remove this macro.
+ */
#define for_each_crtc_in_state(__state, crtc, crtc_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_crtc && \
@@ -406,6 +609,82 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
(__i)++) \
for_each_if (crtc_state)
+/**
+ * for_each_oldnew_crtc_in_state - iterate over all CRTCs in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @crtc: &struct drm_crtc iteration cursor
+ * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state
+ * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all CRTCs in an atomic update, tracking both old and
+ * new state. This is useful in places where the state delta needs to be
+ * considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_crtc_in_state(__state, crtc, old_crtc_state, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_crtc && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (old_crtc_state) = (__state)->crtcs[__i].old_state, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+/**
+ * for_each_old_crtc_in_state - iterate over all CRTCs in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @crtc: &struct drm_crtc iteration cursor
+ * @old_crtc_state: &struct drm_crtc_state iteration cursor for the old state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all CRTCs in an atomic update, tracking only the old
+ * state. This is useful in disable functions, where we need the old state the
+ * hardware is still in.
+ */
+#define for_each_old_crtc_in_state(__state, crtc, old_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_crtc && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (old_crtc_state) = (__state)->crtcs[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+/**
+ * for_each_new_crtc_in_state - iterate over all CRTCs in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @crtc: &struct drm_crtc iteration cursor
+ * @new_crtc_state: &struct drm_crtc_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all CRTCs in an atomic update, tracking only the new
+ * state. This is useful in enable functions, where we need the new state the
+ * hardware should be in when the atomic commit operation has completed.
+ */
+#define for_each_new_crtc_in_state(__state, crtc, new_crtc_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_crtc && \
+ ((crtc) = (__state)->crtcs[__i].ptr, \
+ (new_crtc_state) = (__state)->crtcs[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (crtc)
+
+/**
+ * for_each_plane_in_state - iterate over all planes in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @plane_state: &struct drm_plane_state iteration cursor
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update. Note that before the
+ * software state is committed (by calling drm_atomic_helper_swap_state(), this
+ * points to the new state, while afterwards it points to the old state. Due to
+ * this tricky confusion this macro is deprecated.
+ *
+ * FIXME:
+ *
+ * Replace all usage of this with one of the explicit iterators below and then
+ * remove this macro.
+ */
#define for_each_plane_in_state(__state, plane, plane_state, __i) \
for ((__i) = 0; \
(__i) < (__state)->dev->mode_config.num_total_plane && \
@@ -415,12 +694,71 @@ void drm_state_dump(struct drm_device *dev, struct drm_printer *p);
for_each_if (plane_state)
/**
+ * for_each_oldnew_plane_in_state - iterate over all planes in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update, tracking both old and
+ * new state. This is useful in places where the state delta needs to be
+ * considered, for example in atomic check functions.
+ */
+#define for_each_oldnew_plane_in_state(__state, plane, old_plane_state, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_total_plane && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (plane)
+
+/**
+ * for_each_old_plane_in_state - iterate over all planes in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @old_plane_state: &struct drm_plane_state iteration cursor for the old state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update, tracking only the old
+ * state. This is useful in disable functions, where we need the old state the
+ * hardware is still in.
+ */
+#define for_each_old_plane_in_state(__state, plane, old_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_total_plane && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (old_plane_state) = (__state)->planes[__i].old_state, 1); \
+ (__i)++) \
+ for_each_if (plane)
+
+/**
+ * for_each_new_plane_in_state - iterate over all planes in an atomic update
+ * @__state: &struct drm_atomic_state pointer
+ * @plane: &struct drm_plane iteration cursor
+ * @new_plane_state: &struct drm_plane_state iteration cursor for the new state
+ * @__i: int iteration cursor, for macro-internal use
+ *
+ * This iterates over all planes in an atomic update, tracking only the new
+ * state. This is useful in enable functions, where we need the new state the
+ * hardware should be in when the atomic commit operation has completed.
+ */
+#define for_each_new_plane_in_state(__state, plane, new_plane_state, __i) \
+ for ((__i) = 0; \
+ (__i) < (__state)->dev->mode_config.num_total_plane && \
+ ((plane) = (__state)->planes[__i].ptr, \
+ (new_plane_state) = (__state)->planes[__i].new_state, 1); \
+ (__i)++) \
+ for_each_if (plane)
+
+/**
* drm_atomic_crtc_needs_modeset - compute combined modeset need
* @state: &drm_crtc_state for the CRTC
*
* To give drivers flexibility &struct drm_crtc_state has 3 booleans to track
* whether the state CRTC changed enough to need a full modeset cycle:
- * connectors_changed, mode_changed and active_changed. This helper simply
+ * planes_changed, mode_changed and active_changed. This helper simply
* combines these three to compute the overall need for a modeset for @state.
*
* The atomic helper code sets these booleans, but drivers can and should
diff --git a/include/drm/drm_atomic_helper.h b/include/drm/drm_atomic_helper.h
index d066e9491ae3..f0a8678ae98e 100644
--- a/include/drm/drm_atomic_helper.h
+++ b/include/drm/drm_atomic_helper.h
@@ -94,17 +94,23 @@ int drm_atomic_helper_update_plane(struct drm_plane *plane,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int drm_atomic_helper_disable_plane(struct drm_plane *plane);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_atomic_helper_disable_plane(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
int __drm_atomic_helper_disable_plane(struct drm_plane *plane,
struct drm_plane_state *plane_state);
-int drm_atomic_helper_set_config(struct drm_mode_set *set);
+int drm_atomic_helper_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
int __drm_atomic_helper_set_config(struct drm_mode_set *set,
struct drm_atomic_state *state);
int drm_atomic_helper_disable_all(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
+void drm_atomic_helper_shutdown(struct drm_device *dev);
struct drm_atomic_state *drm_atomic_helper_suspend(struct drm_device *dev);
+int drm_atomic_helper_commit_duplicated_state(struct drm_atomic_state *state,
+ struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_resume(struct drm_device *dev,
struct drm_atomic_state *state);
@@ -120,13 +126,15 @@ int drm_atomic_helper_connector_set_property(struct drm_connector *connector,
int drm_atomic_helper_page_flip(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
- uint32_t flags);
+ uint32_t flags,
+ struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_page_flip_target(
struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
uint32_t flags,
- uint32_t target);
+ uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx);
int drm_atomic_helper_connector_dpms(struct drm_connector *connector,
int mode);
struct drm_encoder *
@@ -168,7 +176,8 @@ void drm_atomic_helper_connector_destroy_state(struct drm_connector *connector,
struct drm_connector_state *state);
int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
u16 *red, u16 *green, u16 *blue,
- uint32_t size);
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* drm_atomic_crtc_for_each_plane - iterate over planes currently attached to CRTC
@@ -218,10 +227,10 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
__drm_atomic_get_current_plane_state((crtc_state)->state, \
plane)))
-/*
+/**
* drm_atomic_plane_disabling - check whether a plane is being disabled
- * @plane: plane object
- * @old_state: previous atomic state
+ * @old_plane_state: old atomic plane state
+ * @new_plane_state: new atomic plane state
*
* Checks the atomic state of a plane to determine whether it's being disabled
* or not. This also WARNs if it detects an invalid state (both CRTC and FB
@@ -231,28 +240,18 @@ int drm_atomic_helper_legacy_gamma_set(struct drm_crtc *crtc,
* True if the plane is being disabled, false otherwise.
*/
static inline bool
-drm_atomic_plane_disabling(struct drm_plane *plane,
- struct drm_plane_state *old_state)
+drm_atomic_plane_disabling(struct drm_plane_state *old_plane_state,
+ struct drm_plane_state *new_plane_state)
{
/*
* When disabling a plane, CRTC and FB should always be NULL together.
* Anything else should be considered a bug in the atomic core, so we
* gently warn about it.
*/
- WARN_ON((plane->state->crtc == NULL && plane->state->fb != NULL) ||
- (plane->state->crtc != NULL && plane->state->fb == NULL));
+ WARN_ON((new_plane_state->crtc == NULL && new_plane_state->fb != NULL) ||
+ (new_plane_state->crtc != NULL && new_plane_state->fb == NULL));
- /*
- * When using the transitional helpers, old_state may be NULL. If so,
- * we know nothing about the current state and have to assume that it
- * might be enabled.
- *
- * When using the atomic helpers, old_state won't be NULL. Therefore
- * this check assumes that either the driver will have reconstructed
- * the correct state in ->reset() or that the driver will have taken
- * appropriate measures to disable all planes.
- */
- return (!old_state || old_state->crtc) && !plane->state->crtc;
+ return old_plane_state->crtc && !new_plane_state->crtc;
}
#endif /* DRM_ATOMIC_HELPER_H_ */
diff --git a/include/drm/drm_auth.h b/include/drm/drm_auth.h
index 1eb4a52cad8d..81a40c2a9a3e 100644
--- a/include/drm/drm_auth.h
+++ b/include/drm/drm_auth.h
@@ -28,6 +28,23 @@
#ifndef _DRM_AUTH_H_
#define _DRM_AUTH_H_
+/*
+ * Legacy DRI1 locking data structure. Only here instead of in drm_legacy.h for
+ * include ordering reasons.
+ *
+ * DO NOT USE.
+ */
+struct drm_lock_data {
+ struct drm_hw_lock *hw_lock;
+ struct drm_file *file_priv;
+ wait_queue_head_t lock_queue;
+ unsigned long lock_time;
+ spinlock_t spinlock;
+ uint32_t kernel_waiters;
+ uint32_t user_waiters;
+ int idle_has_lock;
+};
+
/**
* struct drm_master - drm master structure
*
diff --git a/include/drm/drm_connector.h b/include/drm/drm_connector.h
index e5e1eddd19fb..4eeda120e46d 100644
--- a/include/drm/drm_connector.h
+++ b/include/drm/drm_connector.h
@@ -32,6 +32,7 @@
struct drm_device;
struct drm_connector_helper_funcs;
+struct drm_modeset_acquire_ctx;
struct drm_device;
struct drm_crtc;
struct drm_encoder;
@@ -87,6 +88,70 @@ enum subpixel_order {
SubPixelVerticalRGB,
SubPixelVerticalBGR,
SubPixelNone,
+
+};
+
+/**
+ * struct drm_scrambling: sink's scrambling support.
+ */
+struct drm_scrambling {
+ /**
+ * @supported: scrambling supported for rates > 340 Mhz.
+ */
+ bool supported;
+ /**
+ * @low_rates: scrambling supported for rates <= 340 Mhz.
+ */
+ bool low_rates;
+};
+
+/*
+ * struct drm_scdc - Information about scdc capabilities of a HDMI 2.0 sink
+ *
+ * Provides SCDC register support and capabilities related information on a
+ * HDMI 2.0 sink. In case of a HDMI 1.4 sink, all parameter must be 0.
+ */
+struct drm_scdc {
+ /**
+ * @supported: status control & data channel present.
+ */
+ bool supported;
+ /**
+ * @read_request: sink is capable of generating scdc read request.
+ */
+ bool read_request;
+ /**
+ * @scrambling: sink's scrambling capabilities
+ */
+ struct drm_scrambling scrambling;
+};
+
+
+/**
+ * struct drm_hdmi_info - runtime information about the connected HDMI sink
+ *
+ * Describes if a given display supports advanced HDMI 2.0 features.
+ * This information is available in CEA-861-F extension blocks (like HF-VSDB).
+ */
+struct drm_hdmi_info {
+ /** @scdc: sink's scdc support and capabilities */
+ struct drm_scdc scdc;
+};
+
+/**
+ * enum drm_link_status - connector's link_status property value
+ *
+ * This enum is used as the connector's link status property value.
+ * It is set to the values defined in uapi.
+ *
+ * @DRM_LINK_STATUS_GOOD: DP Link is Good as a result of successful
+ * link training
+ * @DRM_LINK_STATUS_BAD: DP Link is BAD as a result of link training
+ * failure
+ */
+enum drm_link_status {
+ DRM_LINK_STATUS_GOOD = DRM_MODE_LINK_STATUS_GOOD,
+ DRM_LINK_STATUS_BAD = DRM_MODE_LINK_STATUS_BAD,
};
/**
@@ -160,6 +225,10 @@ struct drm_display_info {
#define DRM_BUS_FLAG_PIXDATA_POSEDGE (1<<2)
/* drive data on neg. edge */
#define DRM_BUS_FLAG_PIXDATA_NEGEDGE (1<<3)
+/* data is transmitted MSB to LSB on the bus */
+#define DRM_BUS_FLAG_DATA_MSB_TO_LSB (1<<4)
+/* data is transmitted LSB to MSB on the bus */
+#define DRM_BUS_FLAG_DATA_LSB_TO_MSB (1<<5)
/**
* @bus_flags: Additional information (like pixel signal polarity) for
@@ -188,6 +257,11 @@ struct drm_display_info {
* @cea_rev: CEA revision of the HDMI sink.
*/
u8 cea_rev;
+
+ /**
+ * @hdmi: advance features of a HDMI sink.
+ */
+ struct drm_hdmi_info hdmi;
};
int drm_display_info_set_bus_formats(struct drm_display_info *info,
@@ -243,6 +317,12 @@ struct drm_connector_state {
struct drm_encoder *best_encoder;
+ /**
+ * @link_status: Connector link_status to keep track of whether link is
+ * GOOD or BAD to notify userspace if retraining is necessary.
+ */
+ enum drm_link_status link_status;
+
struct drm_atomic_state *state;
struct drm_tv_connector_state tv;
@@ -303,6 +383,11 @@ struct drm_connector_funcs {
* the helper library vtable purely for historical reasons. The only DRM
* core entry point to probe connector state is @fill_modes.
*
+ * Note that the helper library will already hold
+ * &drm_mode_config.connection_mutex. Drivers which need to grab additional
+ * locks to avoid races with concurrent modeset changes need to use
+ * &drm_connector_helper_funcs.detect_ctx instead.
+ *
* RETURNS:
*
* drm_connector_status indicating the connector's status.
@@ -581,7 +666,6 @@ struct drm_cmdline_mode {
* @bad_edid_counter: track sinks that give us an EDID with invalid checksum
* @edid_corrupt: indicates whether the last read EDID was corrupt
* @debugfs_entry: debugfs directory for this connector
- * @state: current atomic state for this connector
* @has_tile: is this connector connected to a tiled monitor
* @tile_group: tile group for the connected monitor
* @tile_is_single_monitor: whether the tile is one monitor housing
@@ -749,6 +833,21 @@ struct drm_connector {
struct dentry *debugfs_entry;
+ /**
+ * @state:
+ *
+ * Current atomic state for this connector.
+ *
+ * This is protected by @drm_mode_config.connection_mutex. Note that
+ * nonblocking atomic commits access the current connector state without
+ * taking locks. Either by going through the &struct drm_atomic_state
+ * pointers, see for_each_connector_in_state(),
+ * for_each_oldnew_connector_in_state(),
+ * for_each_old_connector_in_state() and
+ * for_each_new_connector_in_state(). Or through careful ordering of
+ * atomic commit operations as implemented in the atomic helpers, see
+ * &struct drm_crtc_commit.
+ */
struct drm_connector_state *state;
/* DisplayID bits */
@@ -795,25 +894,50 @@ static inline struct drm_connector *drm_connector_lookup(struct drm_device *dev,
}
/**
- * drm_connector_reference - incr the connector refcnt
- * @connector: connector
+ * drm_connector_get - acquire a connector reference
+ * @connector: DRM connector
*
* This function increments the connector's refcount.
*/
+static inline void drm_connector_get(struct drm_connector *connector)
+{
+ drm_mode_object_get(&connector->base);
+}
+
+/**
+ * drm_connector_put - release a connector reference
+ * @connector: DRM connector
+ *
+ * This function decrements the connector's reference count and frees the
+ * object if the reference count drops to zero.
+ */
+static inline void drm_connector_put(struct drm_connector *connector)
+{
+ drm_mode_object_put(&connector->base);
+}
+
+/**
+ * drm_connector_reference - acquire a connector reference
+ * @connector: DRM connector
+ *
+ * This is a compatibility alias for drm_connector_get() and should not be
+ * used by new code.
+ */
static inline void drm_connector_reference(struct drm_connector *connector)
{
- drm_mode_object_reference(&connector->base);
+ drm_connector_get(connector);
}
/**
- * drm_connector_unreference - unref a connector
- * @connector: connector to unref
+ * drm_connector_unreference - release a connector reference
+ * @connector: DRM connector
*
- * This function decrements the connector's refcount and frees it if it drops to zero.
+ * This is a compatibility alias for drm_connector_put() and should not be
+ * used by new code.
*/
static inline void drm_connector_unreference(struct drm_connector *connector)
{
- drm_mode_object_unreference(&connector->base);
+ drm_connector_put(connector);
}
const char *drm_get_connector_status_name(enum drm_connector_status status);
@@ -837,6 +961,8 @@ int drm_mode_connector_set_path_property(struct drm_connector *connector,
int drm_mode_connector_set_tile_property(struct drm_connector *connector);
int drm_mode_connector_update_edid_property(struct drm_connector *connector,
const struct edid *edid);
+void drm_mode_connector_set_link_status_property(struct drm_connector *connector,
+ uint64_t link_status);
/**
* struct drm_tile_group - Tile group metadata
@@ -882,7 +1008,7 @@ void drm_mode_put_tile_group(struct drm_device *dev,
*
* This iterator tracks state needed to be able to walk the connector_list
* within struct drm_mode_config. Only use together with
- * drm_connector_list_iter_get(), drm_connector_list_iter_put() and
+ * drm_connector_list_iter_begin(), drm_connector_list_iter_end() and
* drm_connector_list_iter_next() respectively the convenience macro
* drm_for_each_connector_iter().
*/
@@ -892,11 +1018,11 @@ struct drm_connector_list_iter {
struct drm_connector *conn;
};
-void drm_connector_list_iter_get(struct drm_device *dev,
- struct drm_connector_list_iter *iter);
+void drm_connector_list_iter_begin(struct drm_device *dev,
+ struct drm_connector_list_iter *iter);
struct drm_connector *
drm_connector_list_iter_next(struct drm_connector_list_iter *iter);
-void drm_connector_list_iter_put(struct drm_connector_list_iter *iter);
+void drm_connector_list_iter_end(struct drm_connector_list_iter *iter);
/**
* drm_for_each_connector_iter - connector_list iterator macro
@@ -904,8 +1030,8 @@ void drm_connector_list_iter_put(struct drm_connector_list_iter *iter);
* @iter: &struct drm_connector_list_iter
*
* Note that @connector is only valid within the list body, if you want to use
- * @connector after calling drm_connector_list_iter_put() then you need to grab
- * your own reference first using drm_connector_reference().
+ * @connector after calling drm_connector_list_iter_end() then you need to grab
+ * your own reference first using drm_connector_begin().
*/
#define drm_for_each_connector_iter(connector, iter) \
while ((connector = drm_connector_list_iter_next(iter)))
diff --git a/include/drm/drm_crtc.h b/include/drm/drm_crtc.h
index 8f0b195e4a59..a8176a836e25 100644
--- a/include/drm/drm_crtc.h
+++ b/include/drm/drm_crtc.h
@@ -155,10 +155,17 @@ struct drm_crtc_state {
* Target vertical blank period when a page flip
* should take effect.
*/
-
u32 target_vblank;
/**
+ * @pageflip_flags:
+ *
+ * DRM_MODE_PAGE_FLIP_* flags, as passed to the page flip ioctl.
+ * Zero in any other case.
+ */
+ u32 pageflip_flags;
+
+ /**
* @event:
*
* Optional pointer to a DRM event to signal upon completion of the
@@ -197,6 +204,12 @@ struct drm_crtc_state {
* drm_crtc_arm_vblank_event(). See the documentation of that function
* for a detailed discussion of the constraints it needs to be used
* safely.
+ *
+ * If the device can't notify of flip completion in a race-free way
+ * at all, then the event should be armed just after the page flip is
+ * committed. In the worst case the driver will send the event to
+ * userspace one frame too late. This doesn't allow for a real atomic
+ * update, but it should avoid tearing.
*/
struct drm_pending_vblank_event *event;
@@ -309,7 +322,8 @@ struct drm_crtc_funcs {
* hooks.
*/
int (*gamma_set)(struct drm_crtc *crtc, u16 *r, u16 *g, u16 *b,
- uint32_t size);
+ uint32_t size,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @destroy:
@@ -334,7 +348,8 @@ struct drm_crtc_funcs {
*
* 0 on success or a negative error code on failure.
*/
- int (*set_config)(struct drm_mode_set *set);
+ int (*set_config)(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @page_flip:
@@ -392,7 +407,8 @@ struct drm_crtc_funcs {
int (*page_flip)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
- uint32_t flags);
+ uint32_t flags,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @page_flip_target:
@@ -410,7 +426,8 @@ struct drm_crtc_funcs {
int (*page_flip_target)(struct drm_crtc *crtc,
struct drm_framebuffer *fb,
struct drm_pending_vblank_event *event,
- uint32_t flags, uint32_t target);
+ uint32_t flags, uint32_t target,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @set_property:
@@ -577,9 +594,12 @@ struct drm_crtc_funcs {
* When CRC generation is enabled, the driver should call
* drm_crtc_add_crc_entry() at each frame, providing any information
* that characterizes the frame contents in the crcN arguments, as
- * provided from the configured source. Drivers must accept a "auto"
+ * provided from the configured source. Drivers must accept an "auto"
* source name that will select a default source for this CRTC.
*
+ * Note that "auto" can depend upon the current modeset configuration,
+ * e.g. it could pick an encoder or output specific CRC sampling point.
+ *
* This callback is optional if the driver does not support any CRC
* generation functionality.
*
@@ -601,6 +621,50 @@ struct drm_crtc_funcs {
*/
void (*atomic_print_state)(struct drm_printer *p,
const struct drm_crtc_state *state);
+
+ /**
+ * @get_vblank_counter:
+ *
+ * Driver callback for fetching a raw hardware vblank counter for the
+ * CRTC. It's meant to be used by new drivers as the replacement of
+ * &drm_driver.get_vblank_counter hook.
+ *
+ * This callback is optional. If a device doesn't have a hardware
+ * counter, the driver can simply leave the hook as NULL. The DRM core
+ * will account for missed vblank events while interrupts where disabled
+ * based on system timestamps.
+ *
+ * Wraparound handling and loss of events due to modesetting is dealt
+ * with in the DRM core code, as long as drivers call
+ * drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
+ * enabling a CRTC.
+ *
+ * Returns:
+ *
+ * Raw vblank counter value.
+ */
+ u32 (*get_vblank_counter)(struct drm_crtc *crtc);
+
+ /**
+ * @enable_vblank:
+ *
+ * Enable vblank interrupts for the CRTC. It's meant to be used by
+ * new drivers as the replacement of &drm_driver.enable_vblank hook.
+ *
+ * Returns:
+ *
+ * Zero on success, appropriate errno if the vblank interrupt cannot
+ * be enabled.
+ */
+ int (*enable_vblank)(struct drm_crtc *crtc);
+
+ /**
+ * @disable_vblank:
+ *
+ * Disable vblank interrupts for the CRTC. It's meant to be used by
+ * new drivers as the replacement of &drm_driver.disable_vblank hook.
+ */
+ void (*disable_vblank)(struct drm_crtc *crtc);
};
/**
@@ -639,10 +703,12 @@ struct drm_crtc {
/**
* @mutex:
*
- * This provides a read lock for the overall crtc state (mode, dpms
+ * This provides a read lock for the overall CRTC state (mode, dpms
* state, ...) and a write lock for everything which can be update
- * without a full modeset (fb, cursor data, crtc properties ...). A full
+ * without a full modeset (fb, cursor data, CRTC properties ...). A full
* modeset also need to grab &drm_mode_config.connection_mutex.
+ *
+ * For atomic drivers specifically this protects @state.
*/
struct drm_modeset_lock mutex;
@@ -688,6 +754,14 @@ struct drm_crtc {
* @state:
*
* Current atomic state for this CRTC.
+ *
+ * This is protected by @mutex. Note that nonblocking atomic commits
+ * access the current CRTC state without taking locks. Either by going
+ * through the &struct drm_atomic_state pointers, see
+ * for_each_crtc_in_state(), for_each_oldnew_crtc_in_state(),
+ * for_each_old_crtc_in_state() and for_each_new_crtc_in_state(). Or
+ * through careful ordering of atomic commit operations as implemented
+ * in the atomic helpers, see &struct drm_crtc_commit.
*/
struct drm_crtc_state *state;
@@ -709,15 +783,6 @@ struct drm_crtc {
*/
spinlock_t commit_lock;
- /**
- * @acquire_ctx:
- *
- * Per-CRTC implicit acquire context used by atomic drivers for legacy
- * IOCTLs, so that atomic drivers can get at the locking acquire
- * context.
- */
- struct drm_modeset_acquire_ctx *acquire_ctx;
-
#ifdef CONFIG_DEBUG_FS
/**
* @debugfs_entry:
@@ -725,6 +790,7 @@ struct drm_crtc {
* Debugfs directory for this CRTC.
*/
struct dentry *debugfs_entry;
+#endif
/**
* @crc:
@@ -732,7 +798,6 @@ struct drm_crtc {
* Configuration settings of CRC capture.
*/
struct drm_crtc_crc crc;
-#endif
/**
* @fence_context:
diff --git a/include/drm/drm_crtc_helper.h b/include/drm/drm_crtc_helper.h
index d026f5017c33..76e237bd989b 100644
--- a/include/drm/drm_crtc_helper.h
+++ b/include/drm/drm_crtc_helper.h
@@ -43,18 +43,19 @@
#include <drm/drm_modeset_helper_vtables.h>
#include <drm/drm_modeset_helper.h>
-extern void drm_helper_disable_unused_functions(struct drm_device *dev);
-extern int drm_crtc_helper_set_config(struct drm_mode_set *set);
-extern bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
- struct drm_display_mode *mode,
- int x, int y,
- struct drm_framebuffer *old_fb);
-extern bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
-extern bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
+void drm_helper_disable_unused_functions(struct drm_device *dev);
+int drm_crtc_helper_set_config(struct drm_mode_set *set,
+ struct drm_modeset_acquire_ctx *ctx);
+bool drm_crtc_helper_set_mode(struct drm_crtc *crtc,
+ struct drm_display_mode *mode,
+ int x, int y,
+ struct drm_framebuffer *old_fb);
+bool drm_helper_crtc_in_use(struct drm_crtc *crtc);
+bool drm_helper_encoder_in_use(struct drm_encoder *encoder);
-extern int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
+int drm_helper_connector_dpms(struct drm_connector *connector, int mode);
-extern void drm_helper_resume_force_mode(struct drm_device *dev);
+void drm_helper_resume_force_mode(struct drm_device *dev);
int drm_helper_crtc_mode_set(struct drm_crtc *crtc, struct drm_display_mode *mode,
struct drm_display_mode *adjusted_mode, int x, int y,
@@ -63,15 +64,18 @@ int drm_helper_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
struct drm_framebuffer *old_fb);
/* drm_probe_helper.c */
-extern int drm_helper_probe_single_connector_modes(struct drm_connector
- *connector, uint32_t maxX,
- uint32_t maxY);
-extern void drm_kms_helper_poll_init(struct drm_device *dev);
-extern void drm_kms_helper_poll_fini(struct drm_device *dev);
-extern bool drm_helper_hpd_irq_event(struct drm_device *dev);
-extern void drm_kms_helper_hotplug_event(struct drm_device *dev);
+int drm_helper_probe_single_connector_modes(struct drm_connector
+ *connector, uint32_t maxX,
+ uint32_t maxY);
+int drm_helper_probe_detect(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
+void drm_kms_helper_poll_init(struct drm_device *dev);
+void drm_kms_helper_poll_fini(struct drm_device *dev);
+bool drm_helper_hpd_irq_event(struct drm_device *dev);
+void drm_kms_helper_hotplug_event(struct drm_device *dev);
-extern void drm_kms_helper_poll_disable(struct drm_device *dev);
-extern void drm_kms_helper_poll_enable(struct drm_device *dev);
+void drm_kms_helper_poll_disable(struct drm_device *dev);
+void drm_kms_helper_poll_enable(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_debugfs.h b/include/drm/drm_debugfs.h
new file mode 100644
index 000000000000..ac0f75df1ac9
--- /dev/null
+++ b/include/drm/drm_debugfs.h
@@ -0,0 +1,101 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_DEBUGFS_H_
+#define _DRM_DEBUGFS_H_
+
+/**
+ * struct drm_info_list - debugfs info list entry
+ *
+ * This structure represents a debugfs file to be created by the drm
+ * core.
+ */
+struct drm_info_list {
+ /** @name: file name */
+ const char *name;
+ /**
+ * @show:
+ *
+ * Show callback. &seq_file->private will be set to the &struct
+ * drm_info_node corresponding to the instance of this info on a given
+ * &struct drm_minor.
+ */
+ int (*show)(struct seq_file*, void*);
+ /** @driver_features: Required driver features for this entry */
+ u32 driver_features;
+ /** @data: Driver-private data, should not be device-specific. */
+ void *data;
+};
+
+/**
+ * struct drm_info_node - Per-minor debugfs node structure
+ *
+ * This structure represents a debugfs file, as an instantiation of a &struct
+ * drm_info_list on a &struct drm_minor.
+ *
+ * FIXME:
+ *
+ * No it doesn't make a hole lot of sense that we duplicate debugfs entries for
+ * both the render and the primary nodes, but that's how this has organically
+ * grown. It should probably be fixed, with a compatibility link, if needed.
+ */
+struct drm_info_node {
+ /** @minor: &struct drm_minor for this node. */
+ struct drm_minor *minor;
+ /** @info_ent: template for this node. */
+ const struct drm_info_list *info_ent;
+ /* private: */
+ struct list_head list;
+ struct dentry *dent;
+};
+
+#if defined(CONFIG_DEBUG_FS)
+int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor);
+int drm_debugfs_remove_files(const struct drm_info_list *files,
+ int count, struct drm_minor *minor);
+#else
+static inline int drm_debugfs_create_files(const struct drm_info_list *files,
+ int count, struct dentry *root,
+ struct drm_minor *minor)
+{
+ return 0;
+}
+
+static inline int drm_debugfs_remove_files(const struct drm_info_list *files,
+ int count, struct drm_minor *minor)
+{
+ return 0;
+}
+#endif
+
+#endif /* _DRM_DEBUGFS_H_ */
diff --git a/include/drm/drm_dp_helper.h b/include/drm/drm_dp_helper.h
index 04681359a6f5..c0bd0d7651a9 100644
--- a/include/drm/drm_dp_helper.h
+++ b/include/drm/drm_dp_helper.h
@@ -417,6 +417,63 @@
#define DP_TEST_LANE_COUNT 0x220
#define DP_TEST_PATTERN 0x221
+# define DP_NO_TEST_PATTERN 0x0
+# define DP_COLOR_RAMP 0x1
+# define DP_BLACK_AND_WHITE_VERTICAL_LINES 0x2
+# define DP_COLOR_SQUARE 0x3
+
+#define DP_TEST_H_TOTAL_HI 0x222
+#define DP_TEST_H_TOTAL_LO 0x223
+
+#define DP_TEST_V_TOTAL_HI 0x224
+#define DP_TEST_V_TOTAL_LO 0x225
+
+#define DP_TEST_H_START_HI 0x226
+#define DP_TEST_H_START_LO 0x227
+
+#define DP_TEST_V_START_HI 0x228
+#define DP_TEST_V_START_LO 0x229
+
+#define DP_TEST_HSYNC_HI 0x22A
+# define DP_TEST_HSYNC_POLARITY (1 << 7)
+# define DP_TEST_HSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_HSYNC_WIDTH_LO 0x22B
+
+#define DP_TEST_VSYNC_HI 0x22C
+# define DP_TEST_VSYNC_POLARITY (1 << 7)
+# define DP_TEST_VSYNC_WIDTH_HI_MASK (127 << 0)
+#define DP_TEST_VSYNC_WIDTH_LO 0x22D
+
+#define DP_TEST_H_WIDTH_HI 0x22E
+#define DP_TEST_H_WIDTH_LO 0x22F
+
+#define DP_TEST_V_HEIGHT_HI 0x230
+#define DP_TEST_V_HEIGHT_LO 0x231
+
+#define DP_TEST_MISC0 0x232
+# define DP_TEST_SYNC_CLOCK (1 << 0)
+# define DP_TEST_COLOR_FORMAT_MASK (3 << 1)
+# define DP_TEST_COLOR_FORMAT_SHIFT 1
+# define DP_COLOR_FORMAT_RGB (0 << 1)
+# define DP_COLOR_FORMAT_YCbCr422 (1 << 1)
+# define DP_COLOR_FORMAT_YCbCr444 (2 << 1)
+# define DP_TEST_DYNAMIC_RANGE_CEA (1 << 3)
+# define DP_TEST_YCBCR_COEFFICIENTS (1 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU601 (0 << 4)
+# define DP_YCBCR_COEFFICIENTS_ITU709 (1 << 4)
+# define DP_TEST_BIT_DEPTH_MASK (7 << 5)
+# define DP_TEST_BIT_DEPTH_SHIFT 5
+# define DP_TEST_BIT_DEPTH_6 (0 << 5)
+# define DP_TEST_BIT_DEPTH_8 (1 << 5)
+# define DP_TEST_BIT_DEPTH_10 (2 << 5)
+# define DP_TEST_BIT_DEPTH_12 (3 << 5)
+# define DP_TEST_BIT_DEPTH_16 (4 << 5)
+
+#define DP_TEST_MISC1 0x233
+# define DP_TEST_REFRESH_DENOMINATOR (1 << 0)
+# define DP_TEST_INTERLACED (1 << 1)
+
+#define DP_TEST_REFRESH_RATE_NUMERATOR 0x234
#define DP_TEST_CRC_R_CR 0x240
#define DP_TEST_CRC_G_Y 0x242
@@ -732,7 +789,10 @@ struct drm_dp_aux_msg {
* @name: user-visible name of this AUX channel and the I2C-over-AUX adapter
* @ddc: I2C adapter that can be used for I2C-over-AUX communication
* @dev: pointer to struct device that is the parent for this AUX channel
+ * @crtc: backpointer to the crtc that is currently using this AUX channel
* @hw_mutex: internal mutex used for locking transfers
+ * @crc_work: worker that captures CRCs for each frame
+ * @crc_count: counter of captured frame CRCs
* @transfer: transfers a message representing a single AUX transaction
*
* The .dev field should be set to a pointer to the device that implements
@@ -768,7 +828,10 @@ struct drm_dp_aux {
const char *name;
struct i2c_adapter ddc;
struct device *dev;
+ struct drm_crtc *crtc;
struct mutex hw_mutex;
+ struct work_struct crc_work;
+ u8 crc_count;
ssize_t (*transfer)(struct drm_dp_aux *aux,
struct drm_dp_aux_msg *msg);
/**
@@ -847,4 +910,7 @@ void drm_dp_aux_init(struct drm_dp_aux *aux);
int drm_dp_aux_register(struct drm_dp_aux *aux);
void drm_dp_aux_unregister(struct drm_dp_aux *aux);
+int drm_dp_start_crc(struct drm_dp_aux *aux, struct drm_crtc *crtc);
+int drm_dp_stop_crc(struct drm_dp_aux *aux);
+
#endif /* _DRM_DP_HELPER_H_ */
diff --git a/include/drm/drm_dp_mst_helper.h b/include/drm/drm_dp_mst_helper.h
index f4b4d154b98e..5b024764666c 100644
--- a/include/drm/drm_dp_mst_helper.h
+++ b/include/drm/drm_dp_mst_helper.h
@@ -479,18 +479,6 @@ struct drm_dp_mst_topology_mgr {
* @pbn_div: PBN to slots divisor.
*/
int pbn_div;
- /**
- * @total_slots: Total slots that can be allocated.
- */
- int total_slots;
- /**
- * @avail_slots: Still available slots that can be allocated.
- */
- int avail_slots;
- /**
- * @total_pbn: Total PBN count.
- */
- int total_pbn;
/**
* @qlock: protects @tx_msg_downq, the &drm_dp_mst_branch.txslost and
@@ -579,7 +567,8 @@ struct edid *drm_dp_mst_get_edid(struct drm_connector *connector, struct drm_dp_
int drm_dp_calc_pbn_mode(int clock, int bpp);
-bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port, int pbn, int *slots);
+bool drm_dp_mst_allocate_vcpi(struct drm_dp_mst_topology_mgr *mgr,
+ struct drm_dp_mst_port *port, int pbn, int slots);
int drm_dp_mst_get_vcpi_slots(struct drm_dp_mst_topology_mgr *mgr, struct drm_dp_mst_port *port);
diff --git a/include/drm/drm_drv.h b/include/drm/drm_drv.h
index 5699f42195fe..53b98321df9b 100644
--- a/include/drm/drm_drv.h
+++ b/include/drm/drm_drv.h
@@ -64,7 +64,6 @@ struct drm_mode_create_dumb;
* structure for GEM drivers.
*/
struct drm_driver {
-
/**
* @load:
*
@@ -76,14 +75,94 @@ struct drm_driver {
* See drm_dev_init() and drm_dev_register() for proper and
* race-free way to set up a &struct drm_device.
*
+ * This is deprecated, do not use!
+ *
* Returns:
*
* Zero on success, non-zero value on failure.
*/
int (*load) (struct drm_device *, unsigned long flags);
+
+ /**
+ * @open:
+ *
+ * Driver callback when a new &struct drm_file is opened. Useful for
+ * setting up driver-private data structures like buffer allocators,
+ * execution contexts or similar things. Such driver-private resources
+ * must be released again in @postclose.
+ *
+ * Since the display/modeset side of DRM can only be owned by exactly
+ * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
+ * there should never be a need to set up any modeset related resources
+ * in this callback. Doing so would be a driver design bug.
+ *
+ * Returns:
+ *
+ * 0 on success, a negative error code on failure, which will be
+ * promoted to userspace as the result of the open() system call.
+ */
int (*open) (struct drm_device *, struct drm_file *);
+
+ /**
+ * @preclose:
+ *
+ * One of the driver callbacks when a new &struct drm_file is closed.
+ * Useful for tearing down driver-private data structures allocated in
+ * @open like buffer allocators, execution contexts or similar things.
+ *
+ * Since the display/modeset side of DRM can only be owned by exactly
+ * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
+ * there should never be a need to tear down any modeset related
+ * resources in this callback. Doing so would be a driver design bug.
+ *
+ * FIXME: It is not really clear why there's both @preclose and
+ * @postclose. Without a really good reason, use @postclose only.
+ */
void (*preclose) (struct drm_device *, struct drm_file *file_priv);
+
+ /**
+ * @postclose:
+ *
+ * One of the driver callbacks when a new &struct drm_file is closed.
+ * Useful for tearing down driver-private data structures allocated in
+ * @open like buffer allocators, execution contexts or similar things.
+ *
+ * Since the display/modeset side of DRM can only be owned by exactly
+ * one &struct drm_file (see &drm_file.is_master and &drm_device.master)
+ * there should never be a need to tear down any modeset related
+ * resources in this callback. Doing so would be a driver design bug.
+ *
+ * FIXME: It is not really clear why there's both @preclose and
+ * @postclose. Without a really good reason, use @postclose only.
+ */
void (*postclose) (struct drm_device *, struct drm_file *);
+
+ /**
+ * @lastclose:
+ *
+ * Called when the last &struct drm_file has been closed and there's
+ * currently no userspace client for the &struct drm_device.
+ *
+ * Modern drivers should only use this to force-restore the fbdev
+ * framebuffer using drm_fb_helper_restore_fbdev_mode_unlocked().
+ * Anything else would indicate there's something seriously wrong.
+ * Modern drivers can also use this to execute delayed power switching
+ * state changes, e.g. in conjunction with the :ref:`vga_switcheroo`
+ * infrastructure.
+ *
+ * This is called after @preclose and @postclose have been called.
+ *
+ * NOTE:
+ *
+ * All legacy drivers use this callback to de-initialize the hardware.
+ * This is purely because of the shadow-attach model, where the DRM
+ * kernel driver does not really own the hardware. Instead ownershipe is
+ * handled with the help of userspace through an inheritedly racy dance
+ * to set/unset the VT into raw mode.
+ *
+ * Legacy drivers initialize the hardware in the @firstopen callback,
+ * which isn't even called for modern drivers.
+ */
void (*lastclose) (struct drm_device *);
/**
@@ -120,16 +199,18 @@ struct drm_driver {
*
* Driver callback for fetching a raw hardware vblank counter for the
* CRTC specified with the pipe argument. If a device doesn't have a
- * hardware counter, the driver can simply use
- * drm_vblank_no_hw_counter() function. The DRM core will account for
- * missed vblank events while interrupts where disabled based on system
- * timestamps.
+ * hardware counter, the driver can simply leave the hook as NULL.
+ * The DRM core will account for missed vblank events while interrupts
+ * where disabled based on system timestamps.
*
* Wraparound handling and loss of events due to modesetting is dealt
* with in the DRM core code, as long as drivers call
* drm_crtc_vblank_off() and drm_crtc_vblank_on() when disabling or
* enabling a CRTC.
*
+ * This is deprecated and should not be used by new drivers.
+ * Use &drm_crtc_funcs.get_vblank_counter instead.
+ *
* Returns:
*
* Raw vblank counter value.
@@ -142,6 +223,9 @@ struct drm_driver {
* Enable vblank interrupts for the CRTC specified with the pipe
* argument.
*
+ * This is deprecated and should not be used by new drivers.
+ * Use &drm_crtc_funcs.enable_vblank instead.
+ *
* Returns:
*
* Zero on success, appropriate errno if the given @crtc's vblank
@@ -154,6 +238,9 @@ struct drm_driver {
*
* Disable vblank interrupts for the CRTC specified with the pipe
* argument.
+ *
+ * This is deprecated and should not be used by new drivers.
+ * Use &drm_crtc_funcs.disable_vblank instead.
*/
void (*disable_vblank) (struct drm_device *dev, unsigned int pipe);
@@ -294,7 +381,6 @@ struct drm_driver {
void (*master_drop)(struct drm_device *dev, struct drm_file *file_priv);
int (*debugfs_init)(struct drm_minor *minor);
- void (*debugfs_cleanup)(struct drm_minor *minor);
/**
* @gem_free_object: deconstructor for drm_gem_objects
@@ -436,11 +522,11 @@ struct drm_driver {
int dev_priv_size;
};
-extern __printf(6, 7)
+__printf(6, 7)
void drm_dev_printk(const struct device *dev, const char *level,
unsigned int category, const char *function_name,
const char *prefix, const char *format, ...);
-extern __printf(3, 4)
+__printf(3, 4)
void drm_printk(const char *level, unsigned int category,
const char *format, ...);
extern unsigned int drm_debug;
diff --git a/include/drm/drm_edid.h b/include/drm/drm_edid.h
index 577d5063e63d..7b9f48b62e07 100644
--- a/include/drm/drm_edid.h
+++ b/include/drm/drm_edid.h
@@ -332,11 +332,12 @@ int drm_av_sync_delay(struct drm_connector *connector,
const struct drm_display_mode *mode);
#ifdef CONFIG_DRM_LOAD_EDID_FIRMWARE
-int drm_load_edid_firmware(struct drm_connector *connector);
+struct edid *drm_load_edid_firmware(struct drm_connector *connector);
#else
-static inline int drm_load_edid_firmware(struct drm_connector *connector)
+static inline struct edid *
+drm_load_edid_firmware(struct drm_connector *connector)
{
- return 0;
+ return ERR_PTR(-ENOENT);
}
#endif
@@ -475,5 +476,4 @@ void drm_edid_get_monitor_name(struct edid *edid, char *name,
struct drm_display_mode *drm_mode_find_dmt(struct drm_device *dev,
int hsize, int vsize, int fresh,
bool rb);
-
#endif /* __DRM_EDID_H__ */
diff --git a/include/drm/drm_fb_helper.h b/include/drm/drm_fb_helper.h
index 6f5acebb266a..119e5e4609c7 100644
--- a/include/drm/drm_fb_helper.h
+++ b/include/drm/drm_fb_helper.h
@@ -230,7 +230,8 @@ struct drm_fb_helper {
.fb_blank = drm_fb_helper_blank, \
.fb_pan_display = drm_fb_helper_pan_display, \
.fb_debug_enter = drm_fb_helper_debug_enter, \
- .fb_debug_leave = drm_fb_helper_debug_leave
+ .fb_debug_leave = drm_fb_helper_debug_leave, \
+ .fb_ioctl = drm_fb_helper_ioctl
#ifdef CONFIG_DRM_FBDEV_EMULATION
void drm_fb_helper_prepare(struct drm_device *dev, struct drm_fb_helper *helper,
@@ -249,7 +250,6 @@ int drm_fb_helper_restore_fbdev_mode_unlocked(struct drm_fb_helper *fb_helper);
struct fb_info *drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper);
-void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper);
void drm_fb_helper_fill_var(struct fb_info *info, struct drm_fb_helper *fb_helper,
uint32_t fb_width, uint32_t fb_height);
void drm_fb_helper_fill_fix(struct fb_info *info, uint32_t pitch,
@@ -285,6 +285,9 @@ void drm_fb_helper_set_suspend_unlocked(struct drm_fb_helper *fb_helper,
int drm_fb_helper_setcmap(struct fb_cmap *cmap, struct fb_info *info);
+int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg);
+
int drm_fb_helper_hotplug_event(struct drm_fb_helper *fb_helper);
int drm_fb_helper_initial_config(struct drm_fb_helper *fb_helper, int bpp_sel);
int drm_fb_helper_single_add_all_connectors(struct drm_fb_helper *fb_helper);
@@ -354,9 +357,6 @@ drm_fb_helper_alloc_fbi(struct drm_fb_helper *fb_helper)
static inline void drm_fb_helper_unregister_fbi(struct drm_fb_helper *fb_helper)
{
}
-static inline void drm_fb_helper_release_fbi(struct drm_fb_helper *fb_helper)
-{
-}
static inline void drm_fb_helper_fill_var(struct fb_info *info,
struct drm_fb_helper *fb_helper,
@@ -375,6 +375,12 @@ static inline int drm_fb_helper_setcmap(struct fb_cmap *cmap,
return 0;
}
+static inline int drm_fb_helper_ioctl(struct fb_info *info, unsigned int cmd,
+ unsigned long arg)
+{
+ return 0;
+}
+
static inline void drm_fb_helper_unlink_fbi(struct drm_fb_helper *fb_helper)
{
}
diff --git a/include/drm/drm_file.h b/include/drm/drm_file.h
new file mode 100644
index 000000000000..5dd27ae5c47c
--- /dev/null
+++ b/include/drm/drm_file.h
@@ -0,0 +1,375 @@
+/*
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_FILE_H_
+#define _DRM_FILE_H_
+
+#include <linux/types.h>
+#include <linux/completion.h>
+
+#include <uapi/drm/drm.h>
+
+#include <drm/drm_prime.h>
+
+struct dma_fence;
+struct drm_file;
+struct drm_device;
+
+/*
+ * FIXME: Not sure we want to have drm_minor here in the end, but to avoid
+ * header include loops we need it here for now.
+ */
+
+enum drm_minor_type {
+ DRM_MINOR_PRIMARY,
+ DRM_MINOR_CONTROL,
+ DRM_MINOR_RENDER,
+};
+
+/**
+ * struct drm_minor - DRM device minor structure
+ *
+ * This structure represents a DRM minor number for device nodes in /dev.
+ * Entirely opaque to drivers and should never be inspected directly by drivers.
+ * Drivers instead should only interact with &struct drm_file and of course
+ * &struct drm_device, which is also where driver-private data and resources can
+ * be attached to.
+ */
+struct drm_minor {
+ /* private: */
+ int index; /* Minor device number */
+ int type; /* Control or render */
+ struct device *kdev; /* Linux device */
+ struct drm_device *dev;
+
+ struct dentry *debugfs_root;
+
+ struct list_head debugfs_list;
+ struct mutex debugfs_lock; /* Protects debugfs_list. */
+};
+
+/**
+ * struct drm_pending_event - Event queued up for userspace to read
+ *
+ * This represents a DRM event. Drivers can use this as a generic completion
+ * mechanism, which supports kernel-internal &struct completion, &struct dma_fence
+ * and also the DRM-specific &struct drm_event delivery mechanism.
+ */
+struct drm_pending_event {
+ /**
+ * @completion:
+ *
+ * Optional pointer to a kernel internal completion signalled when
+ * drm_send_event() is called, useful to internally synchronize with
+ * nonblocking operations.
+ */
+ struct completion *completion;
+
+ /**
+ * @completion_release:
+ *
+ * Optional callback currently only used by the atomic modeset helpers
+ * to clean up the reference count for the structure @completion is
+ * stored in.
+ */
+ void (*completion_release)(struct completion *completion);
+
+ /**
+ * @event:
+ *
+ * Pointer to the actual event that should be sent to userspace to be
+ * read using drm_read(). Can be optional, since nowadays events are
+ * also used to signal kernel internal threads with @completion or DMA
+ * transactions using @fence.
+ */
+ struct drm_event *event;
+
+ /**
+ * @fence:
+ *
+ * Optional DMA fence to unblock other hardware transactions which
+ * depend upon the nonblocking DRM operation this event represents.
+ */
+ struct dma_fence *fence;
+
+ /**
+ * @file_priv:
+ *
+ * &struct drm_file where @event should be delivered to. Only set when
+ * @event is set.
+ */
+ struct drm_file *file_priv;
+
+ /**
+ * @link:
+ *
+ * Double-linked list to keep track of this event. Can be used by the
+ * driver up to the point when it calls drm_send_event(), after that
+ * this list entry is owned by the core for its own book-keeping.
+ */
+ struct list_head link;
+
+ /**
+ * @pending_link:
+ *
+ * Entry on &drm_file.pending_event_list, to keep track of all pending
+ * events for @file_priv, to allow correct unwinding of them when
+ * userspace closes the file before the event is delivered.
+ */
+ struct list_head pending_link;
+};
+
+/**
+ * struct drm_file - DRM file private data
+ *
+ * This structure tracks DRM state per open file descriptor.
+ */
+struct drm_file {
+ /**
+ * @authenticated:
+ *
+ * Whether the client is allowed to submit rendering, which for legacy
+ * nodes means it must be authenticated.
+ *
+ * See also the :ref:`section on primary nodes and authentication
+ * <drm_primary_node>`.
+ */
+ unsigned authenticated :1;
+
+ /**
+ * @stereo_allowed:
+ *
+ * True when the client has asked us to expose stereo 3D mode flags.
+ */
+ unsigned stereo_allowed :1;
+
+ /**
+ * @universal_planes:
+ *
+ * True if client understands CRTC primary planes and cursor planes
+ * in the plane list. Automatically set when @atomic is set.
+ */
+ unsigned universal_planes:1;
+
+ /** @atomic: True if client understands atomic properties. */
+ unsigned atomic:1;
+
+ /**
+ * @is_master:
+ *
+ * This client is the creator of @master. Protected by struct
+ * &drm_device.master_mutex.
+ *
+ * See also the :ref:`section on primary nodes and authentication
+ * <drm_primary_node>`.
+ */
+ unsigned is_master:1;
+
+ /**
+ * @master:
+ *
+ * Master this node is currently associated with. Only relevant if
+ * drm_is_primary_client() returns true. Note that this only
+ * matches &drm_device.master if the master is the currently active one.
+ *
+ * See also @authentication and @is_master and the :ref:`section on
+ * primary nodes and authentication <drm_primary_node>`.
+ */
+ struct drm_master *master;
+
+ /** @pid: Process that opened this file. */
+ struct pid *pid;
+
+ /** @magic: Authentication magic, see @authenticated. */
+ drm_magic_t magic;
+
+ /**
+ * @lhead:
+ *
+ * List of all open files of a DRM device, linked into
+ * &drm_device.filelist. Protected by &drm_device.filelist_mutex.
+ */
+ struct list_head lhead;
+
+ /** @minor: &struct drm_minor for this file. */
+ struct drm_minor *minor;
+
+ /**
+ * @object_idr:
+ *
+ * Mapping of mm object handles to object pointers. Used by the GEM
+ * subsystem. Protected by @table_lock.
+ */
+ struct idr object_idr;
+
+ /** @table_lock: Protects @object_idr. */
+ spinlock_t table_lock;
+
+ /** @filp: Pointer to the core file structure. */
+ struct file *filp;
+
+ /**
+ * @driver_priv:
+ *
+ * Optional pointer for driver private data. Can be allocated in
+ * &drm_driver.open and should be freed in &drm_driver.postclose.
+ */
+ void *driver_priv;
+
+ /**
+ * @fbs:
+ *
+ * List of &struct drm_framebuffer associated with this file, using the
+ * &drm_framebuffer.filp_head entry.
+ *
+ * Protected by @fbs_lock. Note that the @fbs list holds a reference on
+ * the framebuffer object to prevent it from untimely disappearing.
+ */
+ struct list_head fbs;
+
+ /** @fbs_lock: Protects @fbs. */
+ struct mutex fbs_lock;
+
+ /**
+ * @blobs:
+ *
+ * User-created blob properties; this retains a reference on the
+ * property.
+ *
+ * Protected by @drm_mode_config.blob_lock;
+ */
+ struct list_head blobs;
+
+ /** @event_wait: Waitqueue for new events added to @event_list. */
+ wait_queue_head_t event_wait;
+
+ /**
+ * @pending_event_list:
+ *
+ * List of pending &struct drm_pending_event, used to clean up pending
+ * events in case this file gets closed before the event is signalled.
+ * Uses the &drm_pending_event.pending_link entry.
+ *
+ * Protect by &drm_device.event_lock.
+ */
+ struct list_head pending_event_list;
+
+ /**
+ * @event_list:
+ *
+ * List of &struct drm_pending_event, ready for delivery to userspace
+ * through drm_read(). Uses the &drm_pending_event.link entry.
+ *
+ * Protect by &drm_device.event_lock.
+ */
+ struct list_head event_list;
+
+ /**
+ * @event_space:
+ *
+ * Available event space to prevent userspace from
+ * exhausting kernel memory. Currently limited to the fairly arbitrary
+ * value of 4KB.
+ */
+ int event_space;
+
+ /** @event_read_lock: Serializes drm_read(). */
+ struct mutex event_read_lock;
+
+ /**
+ * @prime:
+ *
+ * Per-file buffer caches used by the PRIME buffer sharing code.
+ */
+ struct drm_prime_file_private prime;
+
+ /* private: */
+ unsigned long lock_count; /* DRI1 legacy lock count */
+};
+
+/**
+ * drm_is_primary_client - is this an open file of the primary node
+ * @file_priv: DRM file
+ *
+ * Returns true if this is an open file of the primary node, i.e.
+ * &drm_file.minor of @file_priv is a primary minor.
+ *
+ * See also the :ref:`section on primary nodes and authentication
+ * <drm_primary_node>`.
+ */
+static inline bool drm_is_primary_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_PRIMARY;
+}
+
+/**
+ * drm_is_render_client - is this an open file of the render node
+ * @file_priv: DRM file
+ *
+ * Returns true if this is an open file of the render node, i.e.
+ * &drm_file.minor of @file_priv is a render minor.
+ *
+ * See also the :ref:`section on render nodes <drm_render_node>`.
+ */
+static inline bool drm_is_render_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_RENDER;
+}
+
+/**
+ * drm_is_control_client - is this an open file of the control node
+ * @file_priv: DRM file
+ *
+ * Control nodes are deprecated and in the process of getting removed from the
+ * DRM userspace API. Do not ever use!
+ */
+static inline bool drm_is_control_client(const struct drm_file *file_priv)
+{
+ return file_priv->minor->type == DRM_MINOR_CONTROL;
+}
+
+int drm_open(struct inode *inode, struct file *filp);
+ssize_t drm_read(struct file *filp, char __user *buffer,
+ size_t count, loff_t *offset);
+int drm_release(struct inode *inode, struct file *filp);
+unsigned int drm_poll(struct file *filp, struct poll_table_struct *wait);
+int drm_event_reserve_init_locked(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e);
+int drm_event_reserve_init(struct drm_device *dev,
+ struct drm_file *file_priv,
+ struct drm_pending_event *p,
+ struct drm_event *e);
+void drm_event_cancel_free(struct drm_device *dev,
+ struct drm_pending_event *p);
+void drm_send_event_locked(struct drm_device *dev, struct drm_pending_event *e);
+void drm_send_event(struct drm_device *dev, struct drm_pending_event *e);
+
+#endif /* _DRM_FILE_H_ */
diff --git a/include/drm/drm_fourcc.h b/include/drm/drm_fourcc.h
index fcc08da850c8..6942e84b6edd 100644
--- a/include/drm/drm_fourcc.h
+++ b/include/drm/drm_fourcc.h
@@ -25,6 +25,9 @@
#include <linux/types.h>
#include <uapi/drm/drm_fourcc.h>
+struct drm_device;
+struct drm_mode_fb_cmd2;
+
/**
* struct drm_format_info - information about a DRM format
* @format: 4CC format identifier (DRM_FORMAT_*)
@@ -55,6 +58,9 @@ struct drm_format_name_buf {
const struct drm_format_info *__drm_format_info(u32 format);
const struct drm_format_info *drm_format_info(u32 format);
+const struct drm_format_info *
+drm_get_format_info(struct drm_device *dev,
+ const struct drm_mode_fb_cmd2 *mode_cmd);
uint32_t drm_mode_legacy_fb_format(uint32_t bpp, uint32_t depth);
int drm_format_num_planes(uint32_t format);
int drm_format_plane_cpp(uint32_t format, int plane);
diff --git a/include/drm/drm_framebuffer.h b/include/drm/drm_framebuffer.h
index dd1e3e99dcff..5244f059d23a 100644
--- a/include/drm/drm_framebuffer.h
+++ b/include/drm/drm_framebuffer.h
@@ -101,8 +101,8 @@ struct drm_framebuffer_funcs {
* cleanup (like releasing the reference(s) on the backing GEM bo(s))
* should be deferred. In cases like this, the driver would like to
* hold a ref to the fb even though it has already been removed from
- * userspace perspective. See drm_framebuffer_reference() and
- * drm_framebuffer_unreference().
+ * userspace perspective. See drm_framebuffer_get() and
+ * drm_framebuffer_put().
*
* The refcount is stored inside the mode object @base.
*/
@@ -204,25 +204,50 @@ void drm_framebuffer_cleanup(struct drm_framebuffer *fb);
void drm_framebuffer_unregister_private(struct drm_framebuffer *fb);
/**
- * drm_framebuffer_reference - incr the fb refcnt
- * @fb: framebuffer
+ * drm_framebuffer_get - acquire a framebuffer reference
+ * @fb: DRM framebuffer
+ *
+ * This function increments the framebuffer's reference count.
+ */
+static inline void drm_framebuffer_get(struct drm_framebuffer *fb)
+{
+ drm_mode_object_get(&fb->base);
+}
+
+/**
+ * drm_framebuffer_put - release a framebuffer reference
+ * @fb: DRM framebuffer
+ *
+ * This function decrements the framebuffer's reference count and frees the
+ * framebuffer if the reference count drops to zero.
+ */
+static inline void drm_framebuffer_put(struct drm_framebuffer *fb)
+{
+ drm_mode_object_put(&fb->base);
+}
+
+/**
+ * drm_framebuffer_reference - acquire a framebuffer reference
+ * @fb: DRM framebuffer
*
- * This functions increments the fb's refcount.
+ * This is a compatibility alias for drm_framebuffer_get() and should not be
+ * used by new code.
*/
static inline void drm_framebuffer_reference(struct drm_framebuffer *fb)
{
- drm_mode_object_reference(&fb->base);
+ drm_framebuffer_get(fb);
}
/**
- * drm_framebuffer_unreference - unref a framebuffer
- * @fb: framebuffer to unref
+ * drm_framebuffer_unreference - release a framebuffer reference
+ * @fb: DRM framebuffer
*
- * This functions decrements the fb's refcount and frees it if it drops to zero.
+ * This is a compatibility alias for drm_framebuffer_put() and should not be
+ * used by new code.
*/
static inline void drm_framebuffer_unreference(struct drm_framebuffer *fb)
{
- drm_mode_object_unreference(&fb->base);
+ drm_framebuffer_put(fb);
}
/**
@@ -248,9 +273,9 @@ static inline void drm_framebuffer_assign(struct drm_framebuffer **p,
struct drm_framebuffer *fb)
{
if (fb)
- drm_framebuffer_reference(fb);
+ drm_framebuffer_get(fb);
if (*p)
- drm_framebuffer_unreference(*p);
+ drm_framebuffer_put(*p);
*p = fb;
}
diff --git a/include/drm/drm_gem.h b/include/drm/drm_gem.h
index 449a41b56ffc..663d80358057 100644
--- a/include/drm/drm_gem.h
+++ b/include/drm/drm_gem.h
@@ -34,6 +34,10 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
+#include <linux/kref.h>
+
+#include <drm/drm_vma_manager.h>
+
/**
* struct drm_gem_object - GEM buffer object
*
@@ -48,9 +52,9 @@ struct drm_gem_object {
*
* Reference count of this object
*
- * Please use drm_gem_object_reference() to acquire and
- * drm_gem_object_unreference() or drm_gem_object_unreference_unlocked()
- * to release a reference to a GEM buffer object.
+ * Please use drm_gem_object_get() to acquire and drm_gem_object_put()
+ * or drm_gem_object_put_unlocked() to release a reference to a GEM
+ * buffer object.
*/
struct kref refcount;
@@ -174,6 +178,32 @@ struct drm_gem_object {
struct dma_buf_attachment *import_attach;
};
+/**
+ * DEFINE_DRM_GEM_FOPS() - macro to generate file operations for GEM drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for GEM based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_mmap,\
+ }
+
void drm_gem_object_release(struct drm_gem_object *obj);
void drm_gem_object_free(struct kref *kref);
int drm_gem_object_init(struct drm_device *dev,
@@ -187,42 +217,90 @@ int drm_gem_mmap_obj(struct drm_gem_object *obj, unsigned long obj_size,
int drm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
/**
- * drm_gem_object_reference - acquire a GEM BO reference
+ * drm_gem_object_get - acquire a GEM buffer object reference
* @obj: GEM buffer object
*
- * This acquires additional reference to @obj. It is illegal to call this
- * without already holding a reference. No locks required.
+ * This function acquires an additional reference to @obj. It is illegal to
+ * call this without already holding a reference. No locks required.
*/
-static inline void
-drm_gem_object_reference(struct drm_gem_object *obj)
+static inline void drm_gem_object_get(struct drm_gem_object *obj)
{
kref_get(&obj->refcount);
}
/**
- * __drm_gem_object_unreference - raw function to release a GEM BO reference
+ * __drm_gem_object_put - raw function to release a GEM buffer object reference
* @obj: GEM buffer object
*
* This function is meant to be used by drivers which are not encumbered with
* &drm_device.struct_mutex legacy locking and which are using the
* gem_free_object_unlocked callback. It avoids all the locking checks and
- * locking overhead of drm_gem_object_unreference() and
- * drm_gem_object_unreference_unlocked().
+ * locking overhead of drm_gem_object_put() and drm_gem_object_put_unlocked().
*
* Drivers should never call this directly in their code. Instead they should
- * wrap it up into a ``driver_gem_object_unreference(struct driver_gem_object
- * *obj)`` wrapper function, and use that. Shared code should never call this, to
+ * wrap it up into a ``driver_gem_object_put(struct driver_gem_object *obj)``
+ * wrapper function, and use that. Shared code should never call this, to
* avoid breaking drivers by accident which still depend upon
* &drm_device.struct_mutex locking.
*/
static inline void
-__drm_gem_object_unreference(struct drm_gem_object *obj)
+__drm_gem_object_put(struct drm_gem_object *obj)
{
kref_put(&obj->refcount, drm_gem_object_free);
}
-void drm_gem_object_unreference_unlocked(struct drm_gem_object *obj);
-void drm_gem_object_unreference(struct drm_gem_object *obj);
+void drm_gem_object_put_unlocked(struct drm_gem_object *obj);
+void drm_gem_object_put(struct drm_gem_object *obj);
+
+/**
+ * drm_gem_object_reference - acquire a GEM buffer object reference
+ * @obj: GEM buffer object
+ *
+ * This is a compatibility alias for drm_gem_object_get() and should not be
+ * used by new code.
+ */
+static inline void drm_gem_object_reference(struct drm_gem_object *obj)
+{
+ drm_gem_object_get(obj);
+}
+
+/**
+ * __drm_gem_object_unreference - raw function to release a GEM buffer object
+ * reference
+ * @obj: GEM buffer object
+ *
+ * This is a compatibility alias for __drm_gem_object_put() and should not be
+ * used by new code.
+ */
+static inline void __drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ __drm_gem_object_put(obj);
+}
+
+/**
+ * drm_gem_object_unreference_unlocked - release a GEM buffer object reference
+ * @obj: GEM buffer object
+ *
+ * This is a compatibility alias for drm_gem_object_put_unlocked() and should
+ * not be used by new code.
+ */
+static inline void
+drm_gem_object_unreference_unlocked(struct drm_gem_object *obj)
+{
+ drm_gem_object_put_unlocked(obj);
+}
+
+/**
+ * drm_gem_object_unreference - release a GEM buffer object reference
+ * @obj: GEM buffer object
+ *
+ * This is a compatibility alias for drm_gem_object_put() and should not be
+ * used by new code.
+ */
+static inline void drm_gem_object_unreference(struct drm_gem_object *obj)
+{
+ drm_gem_object_put(obj);
+}
int drm_gem_handle_create(struct drm_file *file_priv,
struct drm_gem_object *obj,
diff --git a/include/drm/drm_gem_cma_helper.h b/include/drm/drm_gem_cma_helper.h
index 2abcd5190cc1..f962d33667cf 100644
--- a/include/drm/drm_gem_cma_helper.h
+++ b/include/drm/drm_gem_cma_helper.h
@@ -26,6 +26,32 @@ to_drm_gem_cma_obj(struct drm_gem_object *gem_obj)
return container_of(gem_obj, struct drm_gem_cma_object, base);
}
+/**
+ * DEFINE_DRM_GEM_CMA_FOPS() - macro to generate file operations for CMA drivers
+ * @name: name for the generated structure
+ *
+ * This macro autogenerates a suitable &struct file_operations for CMA based
+ * drivers, which can be assigned to &drm_driver.fops. Note that this structure
+ * cannot be shared between drivers, because it contains a reference to the
+ * current module using THIS_MODULE.
+ *
+ * Note that the declaration is already marked as static - if you need a
+ * non-static version of this you're probably doing it wrong and will break the
+ * THIS_MODULE reference by accident.
+ */
+#define DEFINE_DRM_GEM_CMA_FOPS(name) \
+ static const struct file_operations name = {\
+ .owner = THIS_MODULE,\
+ .open = drm_open,\
+ .release = drm_release,\
+ .unlocked_ioctl = drm_ioctl,\
+ .compat_ioctl = drm_compat_ioctl,\
+ .poll = drm_poll,\
+ .read = drm_read,\
+ .llseek = noop_llseek,\
+ .mmap = drm_gem_cma_mmap,\
+ }
+
/* free GEM object */
void drm_gem_cma_free_object(struct drm_gem_object *gem_obj);
diff --git a/include/drm/drm_global.h b/include/drm/drm_global.h
index a06805eaf649..3a830602a2e4 100644
--- a/include/drm/drm_global.h
+++ b/include/drm/drm_global.h
@@ -45,9 +45,9 @@ struct drm_global_reference {
void (*release) (struct drm_global_reference *);
};
-extern void drm_global_init(void);
-extern void drm_global_release(void);
-extern int drm_global_item_ref(struct drm_global_reference *ref);
-extern void drm_global_item_unref(struct drm_global_reference *ref);
+void drm_global_init(void);
+void drm_global_release(void);
+int drm_global_item_ref(struct drm_global_reference *ref);
+void drm_global_item_unref(struct drm_global_reference *ref);
#endif
diff --git a/include/drm/drm_hashtab.h b/include/drm/drm_hashtab.h
index fce2ef3fdfff..bb95ff011baf 100644
--- a/include/drm/drm_hashtab.h
+++ b/include/drm/drm_hashtab.h
@@ -49,17 +49,17 @@ struct drm_open_hash {
u8 order;
};
-extern int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
-extern int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-extern int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
- unsigned long seed, int bits, int shift,
- unsigned long add);
-extern int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
+int drm_ht_create(struct drm_open_hash *ht, unsigned int order);
+int drm_ht_insert_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+int drm_ht_just_insert_please(struct drm_open_hash *ht, struct drm_hash_item *item,
+ unsigned long seed, int bits, int shift,
+ unsigned long add);
+int drm_ht_find_item(struct drm_open_hash *ht, unsigned long key, struct drm_hash_item **item);
-extern void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
-extern int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
-extern int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
-extern void drm_ht_remove(struct drm_open_hash *ht);
+void drm_ht_verbose_list(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_key(struct drm_open_hash *ht, unsigned long key);
+int drm_ht_remove_item(struct drm_open_hash *ht, struct drm_hash_item *item);
+void drm_ht_remove(struct drm_open_hash *ht);
/*
* RCU-safe interface
diff --git a/include/drm/drm_ioctl.h b/include/drm/drm_ioctl.h
new file mode 100644
index 000000000000..ee03b3c44b3b
--- /dev/null
+++ b/include/drm/drm_ioctl.h
@@ -0,0 +1,188 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_IOCTL_H_
+#define _DRM_IOCTL_H_
+
+#include <linux/types.h>
+#include <linux/bitops.h>
+
+#include <asm/ioctl.h>
+
+struct drm_device;
+struct drm_file;
+struct file;
+
+/**
+ * drm_ioctl_t - DRM ioctl function type.
+ * @dev: DRM device inode
+ * @data: private pointer of the ioctl call
+ * @file_priv: DRM file this ioctl was made on
+ *
+ * This is the DRM ioctl typedef. Note that drm_ioctl() has alrady copied @data
+ * into kernel-space, and will also copy it back, depending upon the read/write
+ * settings in the ioctl command code.
+ */
+typedef int drm_ioctl_t(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+/**
+ * drm_ioctl_compat_t - compatibility DRM ioctl function type.
+ * @filp: file pointer
+ * @cmd: ioctl command code
+ * @arg: DRM file this ioctl was made on
+ *
+ * Just a typedef to make declaring an array of compatibility handlers easier.
+ * New drivers shouldn't screw up the structure layout for their ioctl
+ * structures and hence never need this.
+ */
+typedef int drm_ioctl_compat_t(struct file *filp, unsigned int cmd,
+ unsigned long arg);
+
+#define DRM_IOCTL_NR(n) _IOC_NR(n)
+#define DRM_MAJOR 226
+
+/**
+ * enum drm_ioctl_flags - DRM ioctl flags
+ *
+ * Various flags that can be set in &drm_ioctl_desc.flags to control how
+ * userspace can use a given ioctl.
+ */
+enum drm_ioctl_flags {
+ /**
+ * @DRM_AUTH:
+ *
+ * This is for ioctl which are used for rendering, and require that the
+ * file descriptor is either for a render node, or if it's a
+ * legacy/primary node, then it must be authenticated.
+ */
+ DRM_AUTH = BIT(0),
+ /**
+ * @DRM_MASTER:
+ *
+ * This must be set for any ioctl which can change the modeset or
+ * display state. Userspace must call the ioctl through a primary node,
+ * while it is the active master.
+ *
+ * Note that read-only modeset ioctl can also be called by
+ * unauthenticated clients, or when a master is not the currently active
+ * one.
+ */
+ DRM_MASTER = BIT(1),
+ /**
+ * @DRM_ROOT_ONLY:
+ *
+ * Anything that could potentially wreak a master file descriptor needs
+ * to have this flag set. Current that's only for the SETMASTER and
+ * DROPMASTER ioctl, which e.g. logind can call to force a non-behaving
+ * master (display compositor) into compliance.
+ *
+ * This is equivalent to callers with the SYSADMIN capability.
+ */
+ DRM_ROOT_ONLY = BIT(2),
+ /**
+ * @DRM_CONTROL_ALLOW:
+ *
+ * Deprecated, do not use. Control nodes are in the process of getting
+ * removed.
+ */
+ DRM_CONTROL_ALLOW = BIT(3),
+ /**
+ * @DRM_UNLOCKED:
+ *
+ * Whether &drm_ioctl_desc.func should be called with the DRM BKL held
+ * or not. Enforced as the default for all modern drivers, hence there
+ * should never be a need to set this flag.
+ */
+ DRM_UNLOCKED = BIT(4),
+ /**
+ * @DRM_RENDER_ALLOW:
+ *
+ * This is used for all ioctl needed for rendering only, for drivers
+ * which support render nodes. This should be all new render drivers,
+ * and hence it should be always set for any ioctl with DRM_AUTH set.
+ * Note though that read-only query ioctl might have this set, but have
+ * not set DRM_AUTH because they do not require authentication.
+ */
+ DRM_RENDER_ALLOW = BIT(5),
+};
+
+/**
+ * struct drm_ioctl_desc - DRM driver ioctl entry
+ * @cmd: ioctl command number, without flags
+ * @flags: a bitmask of &enum drm_ioctl_flags
+ * @func: handler for this ioctl
+ * @name: user-readable name for debug output
+ *
+ * For convenience it's easier to create these using the DRM_IOCTL_DEF_DRV()
+ * macro.
+ */
+struct drm_ioctl_desc {
+ unsigned int cmd;
+ enum drm_ioctl_flags flags;
+ drm_ioctl_t *func;
+ const char *name;
+};
+
+/**
+ * DRM_IOCTL_DEF_DRV() - helper macro to fill out a &struct drm_ioctl_desc
+ * @ioctl: ioctl command suffix
+ * @_func: handler for the ioctl
+ * @_flags: a bitmask of &enum drm_ioctl_flags
+ *
+ * Small helper macro to create a &struct drm_ioctl_desc entry. The ioctl
+ * command number is constructed by prepending ``DRM_IOCTL\_`` and passing that
+ * to DRM_IOCTL_NR().
+ */
+#define DRM_IOCTL_DEF_DRV(ioctl, _func, _flags) \
+ [DRM_IOCTL_NR(DRM_IOCTL_##ioctl) - DRM_COMMAND_BASE] = { \
+ .cmd = DRM_IOCTL_##ioctl, \
+ .func = _func, \
+ .flags = _flags, \
+ .name = #ioctl \
+ }
+
+int drm_ioctl_permit(u32 flags, struct drm_file *file_priv);
+long drm_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#ifdef CONFIG_COMPAT
+long drm_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg);
+#else
+/* Let drm_compat_ioctl be assigned to .compat_ioctl unconditionally */
+#define drm_compat_ioctl NULL
+#endif
+bool drm_ioctl_flags(unsigned int nr, unsigned int *flags);
+
+int drm_noop(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+int drm_invalid_op(struct drm_device *dev, void *data,
+ struct drm_file *file_priv);
+
+#endif /* _DRM_IOCTL_H_ */
diff --git a/include/drm/drm_irq.h b/include/drm/drm_irq.h
index 2fb880462a57..cf0be6594c8c 100644
--- a/include/drm/drm_irq.h
+++ b/include/drm/drm_irq.h
@@ -152,7 +152,6 @@ void drm_crtc_vblank_reset(struct drm_crtc *crtc);
void drm_crtc_vblank_on(struct drm_crtc *crtc);
void drm_vblank_cleanup(struct drm_device *dev);
u32 drm_accurate_vblank_count(struct drm_crtc *crtc);
-u32 drm_vblank_no_hw_counter(struct drm_device *dev, unsigned int pipe);
int drm_calc_vbltimestamp_from_scanoutpos(struct drm_device *dev,
unsigned int pipe, int *max_error,
diff --git a/include/drm/drm_mem_util.h b/include/drm/drm_mem_util.h
index 70d4e221a3ad..d0f6cf2e5324 100644
--- a/include/drm/drm_mem_util.h
+++ b/include/drm/drm_mem_util.h
@@ -37,8 +37,7 @@ static __inline__ void *drm_calloc_large(size_t nmemb, size_t size)
if (size * nmemb <= PAGE_SIZE)
return kcalloc(nmemb, size, GFP_KERNEL);
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM | __GFP_ZERO, PAGE_KERNEL);
+ return vzalloc(size * nmemb);
}
/* Modeled after cairo's malloc_ab, it's like calloc but without the zeroing. */
@@ -50,8 +49,7 @@ static __inline__ void *drm_malloc_ab(size_t nmemb, size_t size)
if (size * nmemb <= PAGE_SIZE)
return kmalloc(nmemb * size, GFP_KERNEL);
- return __vmalloc(size * nmemb,
- GFP_KERNEL | __GFP_HIGHMEM, PAGE_KERNEL);
+ return vmalloc(size * nmemb);
}
static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
@@ -69,8 +67,7 @@ static __inline__ void *drm_malloc_gfp(size_t nmemb, size_t size, gfp_t gfp)
return ptr;
}
- return __vmalloc(size * nmemb,
- gfp | __GFP_HIGHMEM, PAGE_KERNEL);
+ return __vmalloc(size * nmemb, gfp, PAGE_KERNEL);
}
static __inline void drm_free_large(void *ptr)
diff --git a/include/drm/drm_mm.h b/include/drm/drm_mm.h
index 2ef16bf25826..49b292e98fec 100644
--- a/include/drm/drm_mm.h
+++ b/include/drm/drm_mm.h
@@ -460,10 +460,13 @@ __drm_mm_interval_first(const struct drm_mm *mm, u64 start, u64 last);
* but using the internal interval tree to accelerate the search for the
* starting node, and so not safe against removal of elements. It assumes
* that @end is within (or is the upper limit of) the drm_mm allocator.
+ * If [@start, @end] are beyond the range of the drm_mm, the iterator may walk
+ * over the special _unallocated_ &drm_mm.head_node, and may even continue
+ * indefinitely.
*/
#define drm_mm_for_each_node_in_range(node__, mm__, start__, end__) \
for (node__ = __drm_mm_interval_first((mm__), (start__), (end__)-1); \
- node__ && node__->start < (end__); \
+ node__->start < (end__); \
node__ = list_next_entry(node__, node_list))
void drm_mm_scan_init_with_range(struct drm_mm_scan *scan,
diff --git a/include/drm/drm_mode_config.h b/include/drm/drm_mode_config.h
index 26ff46ab26fb..42981711189b 100644
--- a/include/drm/drm_mode_config.h
+++ b/include/drm/drm_mode_config.h
@@ -34,6 +34,7 @@ struct drm_file;
struct drm_device;
struct drm_atomic_state;
struct drm_mode_fb_cmd2;
+struct drm_format_info;
/**
* struct drm_mode_config_funcs - basic driver provided mode setting functions
@@ -70,6 +71,19 @@ struct drm_mode_config_funcs {
const struct drm_mode_fb_cmd2 *mode_cmd);
/**
+ * @get_format_info:
+ *
+ * Allows a driver to return custom format information for special
+ * fb layouts (eg. ones with auxiliary compression control planes).
+ *
+ * RETURNS:
+ *
+ * The format information specific to the given fb metadata, or
+ * NULL if none is found.
+ */
+ const struct drm_format_info *(*get_format_info)(const struct drm_mode_fb_cmd2 *mode_cmd);
+
+ /**
* @output_poll_changed:
*
* Callback used by helpers to inform the driver of output configuration
@@ -267,7 +281,7 @@ struct drm_mode_config_funcs {
* passed-in &drm_atomic_state. This hook is called when the caller
* encountered a &drm_modeset_lock deadlock and needs to drop all
* already acquired locks as part of the deadlock avoidance dance
- * implemented in drm_modeset_lock_backoff().
+ * implemented in drm_modeset_backoff().
*
* Any duplicated state must be invalidated since a concurrent atomic
* update might change it, and the drm atomic interfaces always apply
@@ -285,29 +299,14 @@ struct drm_mode_config_funcs {
* itself. Note that the core first calls drm_atomic_state_clear() to
* avoid code duplicate between the clear and free hooks.
*
- * Drivers that implement this must call drm_atomic_state_default_free()
- * to release common resources.
+ * Drivers that implement this must call
+ * drm_atomic_state_default_release() to release common resources.
*/
void (*atomic_state_free)(struct drm_atomic_state *state);
};
/**
* struct drm_mode_config - Mode configuration control structure
- * @mutex: mutex protecting KMS related lists and structures
- * @connection_mutex: ww mutex protecting connector state and routing
- * @acquire_ctx: global implicit acquire context used by atomic drivers for
- * legacy IOCTLs
- * @fb_lock: mutex to protect fb state and lists
- * @num_fb: number of fbs available
- * @fb_list: list of framebuffers available
- * @num_encoder: number of encoders on this device
- * @encoder_list: list of encoder objects
- * @num_overlay_plane: number of overlay planes on this device
- * @num_total_plane: number of universal (i.e. with primary/curso) planes on this device
- * @plane_list: list of plane objects
- * @num_crtc: number of CRTCs on this device
- * @crtc_list: list of CRTC objects
- * @property_list: list of property objects
* @min_width: minimum pixel width on this device
* @min_height: minimum pixel height on this device
* @max_width: maximum pixel width on this device
@@ -318,9 +317,6 @@ struct drm_mode_config_funcs {
* @poll_running: track polling status for this device
* @delayed_event: track delayed poll uevent deliver for this device
* @output_poll_work: delayed work for polling in process context
- * @property_blob_list: list of all the blob property objects
- * @blob_lock: mutex for blob property allocation and management
- * @*_property: core property tracking
* @preferred_depth: preferred RBG pixel depth, used by fb helpers
* @prefer_shadow: hint to userspace to prefer shadow-fb rendering
* @cursor_width: hint to userspace for max cursor width
@@ -332,9 +328,37 @@ struct drm_mode_config_funcs {
* global restrictions are also here, e.g. dimension restrictions.
*/
struct drm_mode_config {
- struct mutex mutex; /* protects configuration (mode lists etc.) */
- struct drm_modeset_lock connection_mutex; /* protects connector->encoder and encoder->crtc links */
- struct drm_modeset_acquire_ctx *acquire_ctx; /* for legacy _lock_all() / _unlock_all() */
+ /**
+ * @mutex:
+ *
+ * This is the big scary modeset BKL which protects everything that
+ * isn't protect otherwise. Scope is unclear and fuzzy, try to remove
+ * anything from under it's protection and move it into more well-scoped
+ * locks.
+ *
+ * The one important thing this protects is the use of @acquire_ctx.
+ */
+ struct mutex mutex;
+
+ /**
+ * @connection_mutex:
+ *
+ * This protects connector state and the connector to encoder to CRTC
+ * routing chain.
+ *
+ * For atomic drivers specifically this protects &drm_connector.state.
+ */
+ struct drm_modeset_lock connection_mutex;
+
+ /**
+ * @acquire_ctx:
+ *
+ * Global implicit acquire context used by atomic drivers for legacy
+ * IOCTLs. Deprecated, since implicit locking contexts make it
+ * impossible to use driver-private &struct drm_modeset_lock. Users of
+ * this must hold @mutex.
+ */
+ struct drm_modeset_acquire_ctx *acquire_ctx;
/**
* @idr_mutex:
@@ -360,8 +384,11 @@ struct drm_mode_config {
*/
struct idr tile_idr;
- struct mutex fb_lock; /* proctects global and per-file fb lists */
+ /** @fb_lock: Mutex to protect fb the global @fb_list and @num_fb. */
+ struct mutex fb_lock;
+ /** @num_fb: Number of entries on @fb_list. */
int num_fb;
+ /** @fb_list: List of all &struct drm_framebuffer. */
struct list_head fb_list;
/**
@@ -379,27 +406,80 @@ struct drm_mode_config {
*/
struct ida connector_ida;
/**
- * @connector_list: List of connector objects. Protected by
- * @connector_list_lock. Only use drm_for_each_connector_iter() and
+ * @connector_list:
+ *
+ * List of connector objects linked with &drm_connector.head. Protected
+ * by @connector_list_lock. Only use drm_for_each_connector_iter() and
* &struct drm_connector_list_iter to walk this list.
*/
struct list_head connector_list;
+ /**
+ * @num_encoder:
+ *
+ * Number of encoders on this device. This is invariant over the
+ * lifetime of a device and hence doesn't need any locks.
+ */
int num_encoder;
+ /**
+ * @encoder_list:
+ *
+ * List of encoder objects linked with &drm_encoder.head. This is
+ * invariant over the lifetime of a device and hence doesn't need any
+ * locks.
+ */
struct list_head encoder_list;
- /*
- * Track # of overlay planes separately from # of total planes. By
- * default we only advertise overlay planes to userspace; if userspace
- * sets the "universal plane" capability bit, we'll go ahead and
- * expose all planes.
+ /**
+ * @num_overlay_plane:
+ *
+ * Number of overlay planes on this device, excluding primary and cursor
+ * planes.
+ *
+ * Track number of overlay planes separately from number of total
+ * planes. By default we only advertise overlay planes to userspace; if
+ * userspace sets the "universal plane" capability bit, we'll go ahead
+ * and expose all planes. This is invariant over the lifetime of a
+ * device and hence doesn't need any locks.
*/
int num_overlay_plane;
+ /**
+ * @num_total_plane:
+ *
+ * Number of universal (i.e. with primary/curso) planes on this device.
+ * This is invariant over the lifetime of a device and hence doesn't
+ * need any locks.
+ */
int num_total_plane;
+ /**
+ * @plane_list:
+ *
+ * List of plane objects linked with &drm_plane.head. This is invariant
+ * over the lifetime of a device and hence doesn't need any locks.
+ */
struct list_head plane_list;
+ /**
+ * @num_crtc:
+ *
+ * Number of CRTCs on this device linked with &drm_crtc.head. This is invariant over the lifetime
+ * of a device and hence doesn't need any locks.
+ */
int num_crtc;
+ /**
+ * @crtc_list:
+ *
+ * List of CRTC objects linked with &drm_crtc.head. This is invariant
+ * over the lifetime of a device and hence doesn't need any locks.
+ */
struct list_head crtc_list;
+ /**
+ * @property_list:
+ *
+ * List of property type objects linked with &drm_property.head. This is
+ * invariant over the lifetime of a device and hence doesn't need any
+ * locks.
+ */
struct list_head property_list;
int min_width, min_height;
@@ -413,10 +493,24 @@ struct drm_mode_config {
bool delayed_event;
struct delayed_work output_poll_work;
+ /**
+ * @blob_lock:
+ *
+ * Mutex for blob property allocation and management, protects
+ * @property_blob_list and &drm_file.blobs.
+ */
struct mutex blob_lock;
- /* pointers to standard properties */
+ /**
+ * @property_blob_list:
+ *
+ * List of all the blob property objects linked with
+ * &drm_property_blob.head. Protected by @blob_lock.
+ */
struct list_head property_blob_list;
+
+ /* pointers to standard properties */
+
/**
* @edid_property: Default connector property to hold the EDID of the
* currently connected sink, if any.
@@ -439,6 +533,11 @@ struct drm_mode_config {
*/
struct drm_property *tile_property;
/**
+ * @link_status_property: Default connector property for link status
+ * of a connector
+ */
+ struct drm_property *link_status_property;
+ /**
* @plane_type_property: Default plane property to differentiate
* CURSOR, PRIMARY and OVERLAY legacy uses of planes.
*/
@@ -661,7 +760,7 @@ struct drm_mode_config {
/* cursor size */
uint32_t cursor_width, cursor_height;
- struct drm_mode_config_helper_funcs *helper_private;
+ const struct drm_mode_config_helper_funcs *helper_private;
};
void drm_mode_config_init(struct drm_device *dev);
diff --git a/include/drm/drm_mode_object.h b/include/drm/drm_mode_object.h
index 2c017adf6d74..a767b4a30a6d 100644
--- a/include/drm/drm_mode_object.h
+++ b/include/drm/drm_mode_object.h
@@ -45,10 +45,10 @@ struct drm_device;
* drm_object_attach_property() before the object is visible to userspace.
*
* - For objects with dynamic lifetimes (as indicated by a non-NULL @free_cb) it
- * provides reference counting through drm_mode_object_reference() and
- * drm_mode_object_unreference(). This is used by &drm_framebuffer,
- * &drm_connector and &drm_property_blob. These objects provide specialized
- * reference counting wrappers.
+ * provides reference counting through drm_mode_object_get() and
+ * drm_mode_object_put(). This is used by &drm_framebuffer, &drm_connector
+ * and &drm_property_blob. These objects provide specialized reference
+ * counting wrappers.
*/
struct drm_mode_object {
uint32_t id;
@@ -114,8 +114,32 @@ struct drm_object_properties {
struct drm_mode_object *drm_mode_object_find(struct drm_device *dev,
uint32_t id, uint32_t type);
-void drm_mode_object_reference(struct drm_mode_object *obj);
-void drm_mode_object_unreference(struct drm_mode_object *obj);
+void drm_mode_object_get(struct drm_mode_object *obj);
+void drm_mode_object_put(struct drm_mode_object *obj);
+
+/**
+ * drm_mode_object_reference - acquire a mode object reference
+ * @obj: DRM mode object
+ *
+ * This is a compatibility alias for drm_mode_object_get() and should not be
+ * used by new code.
+ */
+static inline void drm_mode_object_reference(struct drm_mode_object *obj)
+{
+ drm_mode_object_get(obj);
+}
+
+/**
+ * drm_mode_object_unreference - release a mode object reference
+ * @obj: DRM mode object
+ *
+ * This is a compatibility alias for drm_mode_object_put() and should not be
+ * used by new code.
+ */
+static inline void drm_mode_object_unreference(struct drm_mode_object *obj)
+{
+ drm_mode_object_put(obj);
+}
int drm_object_property_set_value(struct drm_mode_object *obj,
struct drm_property *property,
diff --git a/include/drm/drm_modeset_helper_vtables.h b/include/drm/drm_modeset_helper_vtables.h
index 091c42205667..c01c328f6cc8 100644
--- a/include/drm/drm_modeset_helper_vtables.h
+++ b/include/drm/drm_modeset_helper_vtables.h
@@ -747,6 +747,10 @@ struct drm_connector_helper_funcs {
* This callback is used by the probe helpers in e.g.
* drm_helper_probe_single_connector_modes().
*
+ * To avoid races with concurrent connector state updates, the helper
+ * libraries always call this with the &drm_mode_config.connection_mutex
+ * held. Because of this it's safe to inspect &drm_connector->state.
+ *
* RETURNS:
*
* The number of modes added by calling drm_mode_probed_add().
@@ -754,6 +758,34 @@ struct drm_connector_helper_funcs {
int (*get_modes)(struct drm_connector *connector);
/**
+ * @detect_ctx:
+ *
+ * Check to see if anything is attached to the connector. The parameter
+ * force is set to false whilst polling, true when checking the
+ * connector due to a user request. force can be used by the driver to
+ * avoid expensive, destructive operations during automated probing.
+ *
+ * This callback is optional, if not implemented the connector will be
+ * considered as always being attached.
+ *
+ * This is the atomic version of &drm_connector_funcs.detect.
+ *
+ * To avoid races against concurrent connector state updates, the
+ * helper libraries always call this with ctx set to a valid context,
+ * and &drm_mode_config.connection_mutex will always be locked with
+ * the ctx parameter set to this ctx. This allows taking additional
+ * locks as required.
+ *
+ * RETURNS:
+ *
+ * &drm_connector_status indicating the connector's status,
+ * or the error code returned by drm_modeset_lock(), -EDEADLK.
+ */
+ int (*detect_ctx)(struct drm_connector *connector,
+ struct drm_modeset_acquire_ctx *ctx,
+ bool force);
+
+ /**
* @mode_valid:
*
* Callback to validate a mode for a connector, irrespective of the
@@ -771,6 +803,10 @@ struct drm_connector_helper_funcs {
* CRTC helpers will not call this function. Drivers therefore must
* still fully validate any mode passed in in a modeset request.
*
+ * To avoid races with concurrent connector state updates, the helper
+ * libraries always call this with the &drm_mode_config.connection_mutex
+ * held. Because of this it's safe to inspect &drm_connector->state.
+ *
* RETURNS:
*
* Either &drm_mode_status.MODE_OK or one of the failure reasons in &enum
@@ -836,6 +872,40 @@ struct drm_connector_helper_funcs {
*/
struct drm_encoder *(*atomic_best_encoder)(struct drm_connector *connector,
struct drm_connector_state *connector_state);
+
+ /**
+ * @atomic_check:
+ *
+ * This hook is used to validate connector state. This function is
+ * called from &drm_atomic_helper_check_modeset, and is called when
+ * a connector property is set, or a modeset on the crtc is forced.
+ *
+ * Because &drm_atomic_helper_check_modeset may be called multiple times,
+ * this function should handle being called multiple times as well.
+ *
+ * This function is also allowed to inspect any other object's state and
+ * can add more state objects to the atomic commit if needed. Care must
+ * be taken though to ensure that state check and compute functions for
+ * these added states are all called, and derived state in other objects
+ * all updated. Again the recommendation is to just call check helpers
+ * until a maximal configuration is reached.
+ *
+ * NOTE:
+ *
+ * This function is called in the check phase of an atomic update. The
+ * driver is not allowed to change anything outside of the free-standing
+ * state objects passed-in or assembled in the overall &drm_atomic_state
+ * update tracking structure.
+ *
+ * RETURNS:
+ *
+ * 0 on success, -EINVAL if the state or the transition can't be
+ * supported, -ENOMEM on memory allocation failure and -EDEADLK if an
+ * attempt to obtain another state object ran into a &drm_modeset_lock
+ * deadlock.
+ */
+ int (*atomic_check)(struct drm_connector *connector,
+ struct drm_connector_state *state);
};
/**
diff --git a/include/drm/drm_modeset_lock.h b/include/drm/drm_modeset_lock.h
index 96d39fbd12ca..4b27c2bb955c 100644
--- a/include/drm/drm_modeset_lock.h
+++ b/include/drm/drm_modeset_lock.h
@@ -121,12 +121,7 @@ struct drm_plane;
void drm_modeset_lock_all(struct drm_device *dev);
void drm_modeset_unlock_all(struct drm_device *dev);
-void drm_modeset_lock_crtc(struct drm_crtc *crtc,
- struct drm_plane *plane);
-void drm_modeset_unlock_crtc(struct drm_crtc *crtc);
void drm_warn_on_modeset_not_all_locked(struct drm_device *dev);
-struct drm_modeset_acquire_ctx *
-drm_modeset_legacy_acquire_ctx(struct drm_crtc *crtc);
int drm_modeset_lock_all_ctx(struct drm_device *dev,
struct drm_modeset_acquire_ctx *ctx);
diff --git a/include/drm/drm_of.h b/include/drm/drm_of.h
index 26a64805cc15..104dd517fdbe 100644
--- a/include/drm/drm_of.h
+++ b/include/drm/drm_of.h
@@ -8,21 +8,27 @@ struct component_match;
struct device;
struct drm_device;
struct drm_encoder;
+struct drm_panel;
+struct drm_bridge;
struct device_node;
#ifdef CONFIG_OF
-extern uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
- struct device_node *port);
-extern void drm_of_component_match_add(struct device *master,
- struct component_match **matchptr,
- int (*compare)(struct device *, void *),
- struct device_node *node);
-extern int drm_of_component_probe(struct device *dev,
- int (*compare_of)(struct device *, void *),
- const struct component_master_ops *m_ops);
-extern int drm_of_encoder_active_endpoint(struct device_node *node,
- struct drm_encoder *encoder,
- struct of_endpoint *endpoint);
+uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
+ struct device_node *port);
+void drm_of_component_match_add(struct device *master,
+ struct component_match **matchptr,
+ int (*compare)(struct device *, void *),
+ struct device_node *node);
+int drm_of_component_probe(struct device *dev,
+ int (*compare_of)(struct device *, void *),
+ const struct component_master_ops *m_ops);
+int drm_of_encoder_active_endpoint(struct device_node *node,
+ struct drm_encoder *encoder,
+ struct of_endpoint *endpoint);
+int drm_of_find_panel_or_bridge(const struct device_node *np,
+ int port, int endpoint,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge);
#else
static inline uint32_t drm_of_find_possible_crtcs(struct drm_device *dev,
struct device_node *port)
@@ -52,6 +58,13 @@ static inline int drm_of_encoder_active_endpoint(struct device_node *node,
{
return -EINVAL;
}
+static inline int drm_of_find_panel_or_bridge(const struct device_node *np,
+ int port, int endpoint,
+ struct drm_panel **panel,
+ struct drm_bridge **bridge)
+{
+ return -EINVAL;
+}
#endif
static inline int drm_of_encoder_active_endpoint_id(struct device_node *node,
diff --git a/include/drm/drm_panel.h b/include/drm/drm_panel.h
index 4b76cf2d5a7b..1b364b0100f4 100644
--- a/include/drm/drm_panel.h
+++ b/include/drm/drm_panel.h
@@ -192,7 +192,7 @@ void drm_panel_remove(struct drm_panel *panel);
int drm_panel_attach(struct drm_panel *panel, struct drm_connector *connector);
int drm_panel_detach(struct drm_panel *panel);
-#ifdef CONFIG_OF
+#if defined(CONFIG_OF) && defined(CONFIG_DRM_PANEL)
struct drm_panel *of_drm_find_panel(const struct device_node *np);
#else
static inline struct drm_panel *of_drm_find_panel(const struct device_node *np)
diff --git a/include/drm/drm_pci.h b/include/drm/drm_pci.h
new file mode 100644
index 000000000000..4579fac1080c
--- /dev/null
+++ b/include/drm/drm_pci.h
@@ -0,0 +1,75 @@
+/*
+ * Internal Header for the Direct Rendering Manager
+ *
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ * All rights reserved.
+ *
+ * Author: Rickard E. (Rik) Faith <faith@valinux.com>
+ * Author: Gareth Hughes <gareth@valinux.com>
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * VA LINUX SYSTEMS AND/OR ITS SUPPLIERS BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef _DRM_PCI_H_
+#define _DRM_PCI_H_
+
+#include <linux/pci.h>
+
+struct drm_dma_handle;
+struct drm_device;
+struct drm_driver;
+struct drm_master;
+
+struct drm_dma_handle *drm_pci_alloc(struct drm_device *dev, size_t size,
+ size_t align);
+void drm_pci_free(struct drm_device *dev, struct drm_dma_handle * dmah);
+
+int drm_pci_init(struct drm_driver *driver, struct pci_driver *pdriver);
+void drm_pci_exit(struct drm_driver *driver, struct pci_driver *pdriver);
+#ifdef CONFIG_PCI
+int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver);
+int drm_pci_set_busid(struct drm_device *dev, struct drm_master *master);
+#else
+static inline int drm_get_pci_dev(struct pci_dev *pdev,
+ const struct pci_device_id *ent,
+ struct drm_driver *driver)
+{
+ return -ENOSYS;
+}
+
+static inline int drm_pci_set_busid(struct drm_device *dev,
+ struct drm_master *master)
+{
+ return -ENOSYS;
+}
+#endif
+
+#define DRM_PCIE_SPEED_25 1
+#define DRM_PCIE_SPEED_50 2
+#define DRM_PCIE_SPEED_80 4
+
+int drm_pcie_get_speed_cap_mask(struct drm_device *dev, u32 *speed_mask);
+int drm_pcie_get_max_link_width(struct drm_device *dev, u32 *mlw);
+
+#endif /* _DRM_PCI_H_ */
diff --git a/include/drm/drm_plane.h b/include/drm/drm_plane.h
index 20867b4371ab..9ab3e7044812 100644
--- a/include/drm/drm_plane.h
+++ b/include/drm/drm_plane.h
@@ -29,6 +29,7 @@
struct drm_crtc;
struct drm_printer;
+struct drm_modeset_acquire_ctx;
/**
* struct drm_plane_state - mutable plane state
@@ -184,7 +185,8 @@ struct drm_plane_funcs {
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @disable_plane:
@@ -201,7 +203,8 @@ struct drm_plane_funcs {
*
* 0 on success or a negative error code on failure.
*/
- int (*disable_plane)(struct drm_plane *plane);
+ int (*disable_plane)(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
/**
* @destroy:
@@ -456,7 +459,6 @@ enum drm_plane_type {
* @funcs: helper functions
* @properties: property tracking for this plane
* @type: type of plane (overlay, primary, cursor)
- * @state: current atomic state for this plane
* @zpos_property: zpos property for this plane
* @rotation_property: rotation property for this plane
* @helper_private: mid-layer private data
@@ -473,6 +475,8 @@ struct drm_plane {
* Protects modeset plane state, together with the &drm_crtc.mutex of
* CRTC this plane is linked to (when active, getting activated or
* getting disabled).
+ *
+ * For atomic drivers specifically this protects @state.
*/
struct drm_modeset_lock mutex;
@@ -502,6 +506,19 @@ struct drm_plane {
const struct drm_plane_helper_funcs *helper_private;
+ /**
+ * @state:
+ *
+ * Current atomic state for this plane.
+ *
+ * This is protected by @mutex. Note that nonblocking atomic commits
+ * access the current plane state without taking locks. Either by going
+ * through the &struct drm_atomic_state pointers, see
+ * for_each_plane_in_state(), for_each_oldnew_plane_in_state(),
+ * for_each_old_plane_in_state() and for_each_new_plane_in_state(). Or
+ * through careful ordering of atomic commit operations as implemented
+ * in the atomic helpers, see &struct drm_crtc_commit.
+ */
struct drm_plane_state *state;
struct drm_property *zpos_property;
@@ -510,7 +527,7 @@ struct drm_plane {
#define obj_to_plane(x) container_of(x, struct drm_plane, base)
-extern __printf(8, 9)
+__printf(8, 9)
int drm_universal_plane_init(struct drm_device *dev,
struct drm_plane *plane,
uint32_t possible_crtcs,
@@ -519,13 +536,13 @@ int drm_universal_plane_init(struct drm_device *dev,
unsigned int format_count,
enum drm_plane_type type,
const char *name, ...);
-extern int drm_plane_init(struct drm_device *dev,
- struct drm_plane *plane,
- uint32_t possible_crtcs,
- const struct drm_plane_funcs *funcs,
- const uint32_t *formats, unsigned int format_count,
- bool is_primary);
-extern void drm_plane_cleanup(struct drm_plane *plane);
+int drm_plane_init(struct drm_device *dev,
+ struct drm_plane *plane,
+ uint32_t possible_crtcs,
+ const struct drm_plane_funcs *funcs,
+ const uint32_t *formats, unsigned int format_count,
+ bool is_primary);
+void drm_plane_cleanup(struct drm_plane *plane);
/**
* drm_plane_index - find the index of a registered plane
@@ -538,8 +555,8 @@ static inline unsigned int drm_plane_index(struct drm_plane *plane)
{
return plane->index;
}
-extern struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
-extern void drm_plane_force_disable(struct drm_plane *plane);
+struct drm_plane * drm_plane_from_index(struct drm_device *dev, int idx);
+void drm_plane_force_disable(struct drm_plane *plane);
int drm_mode_plane_set_obj_prop(struct drm_plane *plane,
struct drm_property *property,
diff --git a/include/drm/drm_plane_helper.h b/include/drm/drm_plane_helper.h
index c18959685c06..7c8a00ceadb7 100644
--- a/include/drm/drm_plane_helper.h
+++ b/include/drm/drm_plane_helper.h
@@ -61,8 +61,10 @@ int drm_primary_helper_update(struct drm_plane *plane,
int crtc_x, int crtc_y,
unsigned int crtc_w, unsigned int crtc_h,
uint32_t src_x, uint32_t src_y,
- uint32_t src_w, uint32_t src_h);
-int drm_primary_helper_disable(struct drm_plane *plane);
+ uint32_t src_w, uint32_t src_h,
+ struct drm_modeset_acquire_ctx *ctx);
+int drm_primary_helper_disable(struct drm_plane *plane,
+ struct drm_modeset_acquire_ctx *ctx);
void drm_primary_helper_destroy(struct drm_plane *plane);
extern const struct drm_plane_funcs drm_primary_helper_funcs;
diff --git a/include/drm/drm_prime.h b/include/drm/drm_prime.h
new file mode 100644
index 000000000000..0b2a235c4be0
--- /dev/null
+++ b/include/drm/drm_prime.h
@@ -0,0 +1,80 @@
+/*
+ * Copyright © 2012 Red Hat
+ * Copyright 1999 Precision Insight, Inc., Cedar Park, Texas.
+ * Copyright 2000 VA Linux Systems, Inc., Sunnyvale, California.
+ * Copyright (c) 2009-2010, Code Aurora Forum.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the next
+ * paragraph) shall be included in all copies or substantial portions of the
+ * Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
+ * IN THE SOFTWARE.
+ *
+ * Authors:
+ * Dave Airlie <airlied@redhat.com>
+ * Rob Clark <rob.clark@linaro.org>
+ *
+ */
+
+#ifndef __DRM_PRIME_H__
+#define __DRM_PRIME_H__
+
+#include <linux/mutex.h>
+#include <linux/rbtree.h>
+#include <linux/scatterlist.h>
+
+/**
+ * struct drm_prime_file_private - per-file tracking for PRIME
+ *
+ * This just contains the internal &struct dma_buf and handle caches for each
+ * &struct drm_file used by the PRIME core code.
+ */
+
+struct drm_prime_file_private {
+/* private: */
+ struct mutex lock;
+ struct rb_root dmabufs;
+ struct rb_root handles;
+};
+
+struct dma_buf_export_info;
+struct dma_buf;
+
+struct drm_device;
+struct drm_gem_object;
+struct drm_file;
+
+struct dma_buf *drm_gem_prime_export(struct drm_device *dev,
+ struct drm_gem_object *obj,
+ int flags);
+int drm_gem_prime_handle_to_fd(struct drm_device *dev,
+ struct drm_file *file_priv, uint32_t handle, uint32_t flags,
+ int *prime_fd);
+struct drm_gem_object *drm_gem_prime_import(struct drm_device *dev,
+ struct dma_buf *dma_buf);
+int drm_gem_prime_fd_to_handle(struct drm_device *dev,
+ struct drm_file *file_priv, int prime_fd, uint32_t *handle);
+struct dma_buf *drm_gem_dmabuf_export(struct drm_device *dev,
+ struct dma_buf_export_info *exp_info);
+void drm_gem_dmabuf_release(struct dma_buf *dma_buf);
+
+int drm_prime_sg_to_page_addr_arrays(struct sg_table *sgt, struct page **pages,
+ dma_addr_t *addrs, int max_pages);
+struct sg_table *drm_prime_pages_to_sg(struct page **pages, unsigned int nr_pages);
+void drm_prime_gem_destroy(struct drm_gem_object *obj, struct sg_table *sg);
+
+
+#endif /* __DRM_PRIME_H__ */
diff --git a/include/drm/drm_print.h b/include/drm/drm_print.h
index 7d98763c0444..ca4d7c6321f2 100644
--- a/include/drm/drm_print.h
+++ b/include/drm/drm_print.h
@@ -26,6 +26,8 @@
#ifndef DRM_PRINT_H_
#define DRM_PRINT_H_
+#include <linux/compiler.h>
+#include <linux/printk.h>
#include <linux/seq_file.h>
#include <linux/device.h>
@@ -75,6 +77,7 @@ void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_info(struct drm_printer *p, struct va_format *vaf);
void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf);
+__printf(2, 3)
void drm_printf(struct drm_printer *p, const char *f, ...);
diff --git a/include/drm/drm_property.h b/include/drm/drm_property.h
index f66fdb47551c..13e8c17d1c79 100644
--- a/include/drm/drm_property.h
+++ b/include/drm/drm_property.h
@@ -200,9 +200,8 @@ struct drm_property {
* Blobs are used to store bigger values than what fits directly into the 64
* bits available for a &drm_property.
*
- * Blobs are reference counted using drm_property_reference_blob() and
- * drm_property_unreference_blob(). They are created using
- * drm_property_create_blob().
+ * Blobs are reference counted using drm_property_blob_get() and
+ * drm_property_blob_put(). They are created using drm_property_create_blob().
*/
struct drm_property_blob {
struct drm_mode_object base;
@@ -274,8 +273,34 @@ int drm_property_replace_global_blob(struct drm_device *dev,
const void *data,
struct drm_mode_object *obj_holds_id,
struct drm_property *prop_holds_id);
-struct drm_property_blob *drm_property_reference_blob(struct drm_property_blob *blob);
-void drm_property_unreference_blob(struct drm_property_blob *blob);
+struct drm_property_blob *drm_property_blob_get(struct drm_property_blob *blob);
+void drm_property_blob_put(struct drm_property_blob *blob);
+
+/**
+ * drm_property_reference_blob - acquire a blob property reference
+ * @blob: DRM blob property
+ *
+ * This is a compatibility alias for drm_property_blob_get() and should not be
+ * used by new code.
+ */
+static inline struct drm_property_blob *
+drm_property_reference_blob(struct drm_property_blob *blob)
+{
+ return drm_property_blob_get(blob);
+}
+
+/**
+ * drm_property_unreference_blob - release a blob property reference
+ * @blob: DRM blob property
+ *
+ * This is a compatibility alias for drm_property_blob_put() and should not be
+ * used by new code.
+ */
+static inline void
+drm_property_unreference_blob(struct drm_property_blob *blob)
+{
+ drm_property_blob_put(blob);
+}
/**
* drm_connector_find - find property object
diff --git a/include/drm/drm_scdc_helper.h b/include/drm/drm_scdc_helper.h
new file mode 100644
index 000000000000..c25122bb490a
--- /dev/null
+++ b/include/drm/drm_scdc_helper.h
@@ -0,0 +1,161 @@
+/*
+ * Copyright (c) 2015 NVIDIA Corporation. All rights reserved.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sub license,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice (including the
+ * next paragraph) shall be included in all copies or substantial portions
+ * of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL
+ * THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ */
+
+#ifndef DRM_SCDC_HELPER_H
+#define DRM_SCDC_HELPER_H
+
+#include <linux/i2c.h>
+#include <linux/types.h>
+
+#define SCDC_SINK_VERSION 0x01
+
+#define SCDC_SOURCE_VERSION 0x02
+
+#define SCDC_UPDATE_0 0x10
+#define SCDC_READ_REQUEST_TEST (1 << 2)
+#define SCDC_CED_UPDATE (1 << 1)
+#define SCDC_STATUS_UPDATE (1 << 0)
+
+#define SCDC_UPDATE_1 0x11
+
+#define SCDC_TMDS_CONFIG 0x20
+#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_40 (1 << 1)
+#define SCDC_TMDS_BIT_CLOCK_RATIO_BY_10 (0 << 1)
+#define SCDC_SCRAMBLING_ENABLE (1 << 0)
+
+#define SCDC_SCRAMBLER_STATUS 0x21
+#define SCDC_SCRAMBLING_STATUS (1 << 0)
+
+#define SCDC_CONFIG_0 0x30
+#define SCDC_READ_REQUEST_ENABLE (1 << 0)
+
+#define SCDC_STATUS_FLAGS_0 0x40
+#define SCDC_CH2_LOCK (1 < 3)
+#define SCDC_CH1_LOCK (1 < 2)
+#define SCDC_CH0_LOCK (1 < 1)
+#define SCDC_CH_LOCK_MASK (SCDC_CH2_LOCK | SCDC_CH1_LOCK | SCDC_CH0_LOCK)
+#define SCDC_CLOCK_DETECT (1 << 0)
+
+#define SCDC_STATUS_FLAGS_1 0x41
+
+#define SCDC_ERR_DET_0_L 0x50
+#define SCDC_ERR_DET_0_H 0x51
+#define SCDC_ERR_DET_1_L 0x52
+#define SCDC_ERR_DET_1_H 0x53
+#define SCDC_ERR_DET_2_L 0x54
+#define SCDC_ERR_DET_2_H 0x55
+#define SCDC_CHANNEL_VALID (1 << 7)
+
+#define SCDC_ERR_DET_CHECKSUM 0x56
+
+#define SCDC_TEST_CONFIG_0 0xc0
+#define SCDC_TEST_READ_REQUEST (1 << 7)
+#define SCDC_TEST_READ_REQUEST_DELAY(x) ((x) & 0x7f)
+
+#define SCDC_MANUFACTURER_IEEE_OUI 0xd0
+#define SCDC_MANUFACTURER_IEEE_OUI_SIZE 3
+
+#define SCDC_DEVICE_ID 0xd3
+#define SCDC_DEVICE_ID_SIZE 8
+
+#define SCDC_DEVICE_HARDWARE_REVISION 0xdb
+#define SCDC_GET_DEVICE_HARDWARE_REVISION_MAJOR(x) (((x) >> 4) & 0xf)
+#define SCDC_GET_DEVICE_HARDWARE_REVISION_MINOR(x) (((x) >> 0) & 0xf)
+
+#define SCDC_DEVICE_SOFTWARE_MAJOR_REVISION 0xdc
+#define SCDC_DEVICE_SOFTWARE_MINOR_REVISION 0xdd
+
+#define SCDC_MANUFACTURER_SPECIFIC 0xde
+#define SCDC_MANUFACTURER_SPECIFIC_SIZE 34
+
+ssize_t drm_scdc_read(struct i2c_adapter *adapter, u8 offset, void *buffer,
+ size_t size);
+ssize_t drm_scdc_write(struct i2c_adapter *adapter, u8 offset,
+ const void *buffer, size_t size);
+
+/**
+ * drm_scdc_readb - read a single byte from SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Reads a single byte from SCDC. This is a convenience wrapper around the
+ * drm_scdc_read() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_readb(struct i2c_adapter *adapter, u8 offset,
+ u8 *value)
+{
+ return drm_scdc_read(adapter, offset, value, sizeof(*value));
+}
+
+/**
+ * drm_scdc_writeb - write a single byte to SCDC
+ * @adapter: I2C adapter
+ * @offset: offset of register to read
+ * @value: return location for the register value
+ *
+ * Writes a single byte to SCDC. This is a convenience wrapper around the
+ * drm_scdc_write() function.
+ *
+ * Returns:
+ * 0 on success or a negative error code on failure.
+ */
+static inline int drm_scdc_writeb(struct i2c_adapter *adapter, u8 offset,
+ u8 value)
+{
+ return drm_scdc_write(adapter, offset, &value, sizeof(value));
+}
+
+bool drm_scdc_get_scrambling_status(struct i2c_adapter *adapter);
+
+/**
+ * drm_scdc_set_scrambling - enable scrambling
+ * @adapter: I2C adapter for DDC channel
+ * @enable: bool to indicate if scrambling is to be enabled/disabled
+ *
+ * Writes the TMDS config register over SCDC channel, and:
+ * enables scrambling when enable = 1
+ * disables scrambling when enable = 0
+ *
+ * Returns:
+ * True if scrambling is set/reset successfully, false otherwise.
+ */
+bool drm_scdc_set_scrambling(struct i2c_adapter *adapter, bool enable);
+
+/**
+ * drm_scdc_set_high_tmds_clock_ratio - set TMDS clock ratio
+ * @adapter: I2C adapter for DDC channel
+ * @set: ret or reset the high clock ratio
+ *
+ * Writes to the TMDS config register over SCDC channel, and:
+ * sets TMDS clock ratio to 1/40 when set = 1
+ * sets TMDS clock ratio to 1/10 when set = 0
+ *
+ * Returns:
+ * True if write is successful, false otherwise.
+ */
+bool drm_scdc_set_high_tmds_clock_ratio(struct i2c_adapter *adapter, bool set);
+#endif
diff --git a/include/drm/drm_simple_kms_helper.h b/include/drm/drm_simple_kms_helper.h
index fffbb95a0915..2d36538e4a17 100644
--- a/include/drm/drm_simple_kms_helper.h
+++ b/include/drm/drm_simple_kms_helper.h
@@ -72,7 +72,7 @@ struct drm_simple_display_pipe_funcs {
* the hardware lacks vblank support entirely.
*/
void (*update)(struct drm_simple_display_pipe *pipe,
- struct drm_plane_state *plane_state);
+ struct drm_plane_state *old_plane_state);
/**
* @prepare_fb:
diff --git a/include/drm/drm_sysfs.h b/include/drm/drm_sysfs.h
index 1d8e033fde67..70c9a1074aca 100644
--- a/include/drm/drm_sysfs.h
+++ b/include/drm/drm_sysfs.h
@@ -1,12 +1,12 @@
#ifndef _DRM_SYSFS_H_
#define _DRM_SYSFS_H_
-/**
- * This minimalistic include file is intended for users (read TTM) that
- * don't want to include the full drmP.h file.
- */
+struct drm_device;
+struct device;
-extern int drm_class_device_register(struct device *dev);
-extern void drm_class_device_unregister(struct device *dev);
+int drm_class_device_register(struct device *dev);
+void drm_class_device_unregister(struct device *dev);
+
+void drm_sysfs_hotplug_event(struct drm_device *dev);
#endif
diff --git a/include/drm/drm_vma_manager.h b/include/drm/drm_vma_manager.h
index 9c03895dc479..d84d52f6d2b1 100644
--- a/include/drm/drm_vma_manager.h
+++ b/include/drm/drm_vma_manager.h
@@ -25,7 +25,6 @@
#include <drm/drm_mm.h>
#include <linux/mm.h>
-#include <linux/module.h>
#include <linux/rbtree.h>
#include <linux/spinlock.h>
#include <linux/types.h>
diff --git a/include/drm/i915_pciids.h b/include/drm/i915_pciids.h
index a1dd21d6b723..27e0dbaa6c0e 100644
--- a/include/drm/i915_pciids.h
+++ b/include/drm/i915_pciids.h
@@ -47,6 +47,14 @@
0x030000, 0xff0000, \
(unsigned long) info }
+#define INTEL_I810_IDS(info) \
+ INTEL_VGA_DEVICE(0x7121, info), /* I810 */ \
+ INTEL_VGA_DEVICE(0x7123, info), /* I810_DC100 */ \
+ INTEL_VGA_DEVICE(0x7125, info) /* I810_E */
+
+#define INTEL_I815_IDS(info) \
+ INTEL_VGA_DEVICE(0x1132, info) /* I815*/
+
#define INTEL_I830_IDS(info) \
INTEL_VGA_DEVICE(0x3577, info)
@@ -265,7 +273,8 @@
INTEL_VGA_DEVICE(0x1923, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1926, info), /* ULT GT3 */ \
INTEL_VGA_DEVICE(0x1927, info), /* ULT GT3 */ \
- INTEL_VGA_DEVICE(0x192B, info) /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192B, info), /* Halo GT3 */ \
+ INTEL_VGA_DEVICE(0x192D, info) /* SRV GT3 */
#define INTEL_SKL_GT4_IDS(info) \
INTEL_VGA_DEVICE(0x1932, info), /* DT GT4 */ \
diff --git a/include/drm/tinydrm/tinydrm.h b/include/drm/tinydrm/tinydrm.h
index cf9ca207b8b1..00b800df4d1b 100644
--- a/include/drm/tinydrm/tinydrm.h
+++ b/include/drm/tinydrm/tinydrm.h
@@ -58,8 +58,7 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.gem_prime_mmap = drm_gem_cma_prime_mmap, \
.dumb_create = drm_gem_cma_dumb_create, \
.dumb_map_offset = drm_gem_cma_dumb_map_offset, \
- .dumb_destroy = drm_gem_dumb_destroy, \
- .fops = &tinydrm_fops
+ .dumb_destroy = drm_gem_dumb_destroy
/**
* TINYDRM_MODE - tinydrm display mode
@@ -84,7 +83,6 @@ pipe_to_tinydrm(struct drm_simple_display_pipe *pipe)
.type = DRM_MODE_TYPE_DRIVER, \
.clock = 1 /* pass validation */
-extern const struct file_operations tinydrm_fops;
void tinydrm_lastclose(struct drm_device *drm);
void tinydrm_gem_cma_free_object(struct drm_gem_object *gem_obj);
struct drm_gem_object *
diff --git a/include/drm/ttm/ttm_bo_api.h b/include/drm/ttm/ttm_bo_api.h
index 8f619f499e55..fa07be197945 100644
--- a/include/drm/ttm/ttm_bo_api.h
+++ b/include/drm/ttm/ttm_bo_api.h
@@ -440,6 +440,60 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
unsigned struct_size);
/**
+ * ttm_bo_init_reserved
+ *
+ * @bdev: Pointer to a ttm_bo_device struct.
+ * @bo: Pointer to a ttm_buffer_object to be initialized.
+ * @size: Requested size of buffer object.
+ * @type: Requested type of buffer object.
+ * @flags: Initial placement flags.
+ * @page_alignment: Data alignment in pages.
+ * @interruptible: If needing to sleep to wait for GPU resources,
+ * sleep interruptible.
+ * @persistent_swap_storage: Usually the swap storage is deleted for buffers
+ * pinned in physical memory. If this behaviour is not desired, this member
+ * holds a pointer to a persistent shmem object. Typically, this would
+ * point to the shmem object backing a GEM object if TTM is used to back a
+ * GEM user interface.
+ * @acc_size: Accounted size for this object.
+ * @resv: Pointer to a reservation_object, or NULL to let ttm allocate one.
+ * @destroy: Destroy function. Use NULL for kfree().
+ *
+ * This function initializes a pre-allocated struct ttm_buffer_object.
+ * As this object may be part of a larger structure, this function,
+ * together with the @destroy function,
+ * enables driver-specific objects derived from a ttm_buffer_object.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ * Furthermore, if resv == NULL, the buffer's reservation lock will be held,
+ * and it is the caller's responsibility to call ttm_bo_unreserve.
+ *
+ * If a failure occurs, the function will call the @destroy function, or
+ * kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
+ * illegal and will likely cause memory corruption.
+ *
+ * Returns
+ * -ENOMEM: Out of memory.
+ * -EINVAL: Invalid placement flags.
+ * -ERESTARTSYS: Interrupted by signal while sleeping waiting for resources.
+ */
+
+extern int ttm_bo_init_reserved(struct ttm_bo_device *bdev,
+ struct ttm_buffer_object *bo,
+ unsigned long size,
+ enum ttm_bo_type type,
+ struct ttm_placement *placement,
+ uint32_t page_alignment,
+ bool interrubtible,
+ struct file *persistent_swap_storage,
+ size_t acc_size,
+ struct sg_table *sg,
+ struct reservation_object *resv,
+ void (*destroy) (struct ttm_buffer_object *));
+
+/**
* ttm_bo_init
*
* @bdev: Pointer to a ttm_bo_device struct.
@@ -463,7 +517,11 @@ size_t ttm_bo_dma_acc_size(struct ttm_bo_device *bdev,
* As this object may be part of a larger structure, this function,
* together with the @destroy function,
* enables driver-specific objects derived from a ttm_buffer_object.
- * On successful return, the object kref and list_kref are set to 1.
+ *
+ * On successful return, the caller owns an object kref to @bo. The kref and
+ * list_kref are usually set to 1, but note that in some situations, other
+ * tasks may already be holding references to @bo as well.
+ *
* If a failure occurs, the function will call the @destroy function, or
* kfree() if @destroy is NULL. Thus, after a failure, dereferencing @bo is
* illegal and will likely cause memory corruption.
@@ -653,6 +711,17 @@ extern int ttm_fbdev_mmap(struct vm_area_struct *vma,
struct ttm_buffer_object *bo);
/**
+ * ttm_bo_default_iomem_pfn - get a pfn for a page offset
+ *
+ * @bo: the BO we need to look up the pfn for
+ * @page_offset: offset inside the BO to look up.
+ *
+ * Calculate the PFN for iomem based mappings during page fault
+ */
+unsigned long ttm_bo_default_io_mem_pfn(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
+
+/**
* ttm_bo_mmap - mmap out of the ttm device address space.
*
* @filp: filp as input from the mmap method.
diff --git a/include/drm/ttm/ttm_bo_driver.h b/include/drm/ttm/ttm_bo_driver.h
index 8145773c582c..6bbd34d25a8d 100644
--- a/include/drm/ttm/ttm_bo_driver.h
+++ b/include/drm/ttm/ttm_bo_driver.h
@@ -42,7 +42,7 @@
#include <linux/spinlock.h>
#include <linux/reservation.h>
-#define TTM_MAX_BO_PRIORITY 16U
+#define TTM_MAX_BO_PRIORITY 4U
struct ttm_backend_func {
/**
@@ -462,6 +462,15 @@ struct ttm_bo_driver {
struct ttm_mem_reg *mem);
void (*io_mem_free)(struct ttm_bo_device *bdev,
struct ttm_mem_reg *mem);
+
+ /**
+ * Return the pfn for a given page_offset inside the BO.
+ *
+ * @bo: the BO to look up the pfn for
+ * @page_offset: the offset to look up
+ */
+ unsigned long (*io_mem_pfn)(struct ttm_buffer_object *bo,
+ unsigned long page_offset);
};
/**
diff --git a/include/drm/ttm/ttm_placement.h b/include/drm/ttm/ttm_placement.h
index 932be0c8086e..e88a8e39767b 100644
--- a/include/drm/ttm/ttm_placement.h
+++ b/include/drm/ttm/ttm_placement.h
@@ -63,6 +63,7 @@
#define TTM_PL_FLAG_CACHED (1 << 16)
#define TTM_PL_FLAG_UNCACHED (1 << 17)
#define TTM_PL_FLAG_WC (1 << 18)
+#define TTM_PL_FLAG_CONTIGUOUS (1 << 19)
#define TTM_PL_FLAG_NO_EVICT (1 << 21)
#define TTM_PL_FLAG_TOPDOWN (1 << 22)
diff --git a/include/dt-bindings/clock/gxbb-clkc.h b/include/dt-bindings/clock/gxbb-clkc.h
index 692846c7941b..3190e30b9398 100644
--- a/include/dt-bindings/clock/gxbb-clkc.h
+++ b/include/dt-bindings/clock/gxbb-clkc.h
@@ -10,12 +10,18 @@
#define CLKID_FCLK_DIV2 4
#define CLKID_FCLK_DIV3 5
#define CLKID_FCLK_DIV4 6
+#define CLKID_GP0_PLL 9
#define CLKID_CLK81 12
#define CLKID_MPLL2 15
-#define CLKID_SPI 34
#define CLKID_I2C 22
#define CLKID_SAR_ADC 23
+#define CLKID_RNG0 25
+#define CLKID_SPI 34
#define CLKID_ETH 36
+#define CLKID_AIU_GLUE 38
+#define CLKID_I2S_OUT 40
+#define CLKID_MIXER_IFACE 44
+#define CLKID_AIU 47
#define CLKID_USB0 50
#define CLKID_USB1 51
#define CLKID_USB 55
@@ -24,11 +30,17 @@
#define CLKID_USB0_DDR_BRIDGE 65
#define CLKID_SANA 69
#define CLKID_GCLK_VENCI_INT0 77
+#define CLKID_AOCLK_GATE 80
#define CLKID_AO_I2C 93
#define CLKID_SD_EMMC_A 94
#define CLKID_SD_EMMC_B 95
#define CLKID_SD_EMMC_C 96
#define CLKID_SAR_ADC_CLK 97
#define CLKID_SAR_ADC_SEL 98
+#define CLKID_MALI_0_SEL 100
+#define CLKID_MALI_0 102
+#define CLKID_MALI_1_SEL 103
+#define CLKID_MALI_1 105
+#define CLKID_MALI 106
#endif /* __GXBB_CLKC_H */
diff --git a/include/dt-bindings/clock/r7s72100-clock.h b/include/dt-bindings/clock/r7s72100-clock.h
index ce09915c298f..bc256d31099a 100644
--- a/include/dt-bindings/clock/r7s72100-clock.h
+++ b/include/dt-bindings/clock/r7s72100-clock.h
@@ -29,6 +29,9 @@
#define R7S72100_CLK_OSTM0 1
#define R7S72100_CLK_OSTM1 0
+/* MSTP6 */
+#define R7S72100_CLK_RTC 0
+
/* MSTP7 */
#define R7S72100_CLK_ETHER 4
@@ -49,7 +52,9 @@
#define R7S72100_CLK_SPI4 3
/* MSTP12 */
-#define R7S72100_CLK_SDHI0 3
-#define R7S72100_CLK_SDHI1 2
+#define R7S72100_CLK_SDHI00 3
+#define R7S72100_CLK_SDHI01 2
+#define R7S72100_CLK_SDHI10 1
+#define R7S72100_CLK_SDHI11 0
#endif /* __DT_BINDINGS_CLOCK_R7S72100_H__ */
diff --git a/include/dt-bindings/clock/r8a73a4-clock.h b/include/dt-bindings/clock/r8a73a4-clock.h
index dd11ecdf837e..4b3668157257 100644
--- a/include/dt-bindings/clock/r8a73a4-clock.h
+++ b/include/dt-bindings/clock/r8a73a4-clock.h
@@ -54,6 +54,7 @@
#define R8A73A4_CLK_IIC3 11
#define R8A73A4_CLK_IIC4 10
#define R8A73A4_CLK_IIC5 9
+#define R8A73A4_CLK_INTC_SYS 8
#define R8A73A4_CLK_IRQC 7
/* MSTP5 */
diff --git a/include/dt-bindings/clock/r8a7790-clock.h b/include/dt-bindings/clock/r8a7790-clock.h
index fa5e8da809f2..20641fa68e73 100644
--- a/include/dt-bindings/clock/r8a7790-clock.h
+++ b/include/dt-bindings/clock/r8a7790-clock.h
@@ -82,6 +82,7 @@
/* MSTP4 */
#define R8A7790_CLK_IRQC 7
+#define R8A7790_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7790_CLK_AUDIO_DMAC1 1
diff --git a/include/dt-bindings/clock/r8a7791-clock.h b/include/dt-bindings/clock/r8a7791-clock.h
index ffa11379b3f0..adc50dc31ab3 100644
--- a/include/dt-bindings/clock/r8a7791-clock.h
+++ b/include/dt-bindings/clock/r8a7791-clock.h
@@ -72,6 +72,7 @@
/* MSTP4 */
#define R8A7791_CLK_IRQC 7
+#define R8A7791_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7791_CLK_AUDIO_DMAC1 1
diff --git a/include/dt-bindings/clock/r8a7792-clock.h b/include/dt-bindings/clock/r8a7792-clock.h
index 9a8b392ceb00..5be90bc23bd7 100644
--- a/include/dt-bindings/clock/r8a7792-clock.h
+++ b/include/dt-bindings/clock/r8a7792-clock.h
@@ -17,7 +17,6 @@
#define R8A7792_CLK_PLL3 3
#define R8A7792_CLK_LB 4
#define R8A7792_CLK_QSPI 5
-#define R8A7792_CLK_Z 6
/* MSTP0 */
#define R8A7792_CLK_MSIOF0 0
@@ -45,6 +44,7 @@
/* MSTP4 */
#define R8A7792_CLK_IRQC 7
+#define R8A7792_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7792_CLK_AUDIO_DMAC0 2
diff --git a/include/dt-bindings/clock/r8a7793-clock.h b/include/dt-bindings/clock/r8a7793-clock.h
index efcbc594fe82..7318d45d4e7e 100644
--- a/include/dt-bindings/clock/r8a7793-clock.h
+++ b/include/dt-bindings/clock/r8a7793-clock.h
@@ -77,10 +77,11 @@
/* MSTP4 */
#define R8A7793_CLK_IRQC 7
+#define R8A7793_CLK_INTC_SYS 8
/* MSTP5 */
-#define R8A7793_CLK_AUDIO_DMAC1 1
-#define R8A7793_CLK_AUDIO_DMAC0 2
+#define R8A7793_CLK_AUDIO_DMAC1 1
+#define R8A7793_CLK_AUDIO_DMAC0 2
#define R8A7793_CLK_ADSP_MOD 6
#define R8A7793_CLK_THERMAL 22
#define R8A7793_CLK_PWM 23
diff --git a/include/dt-bindings/clock/r8a7794-clock.h b/include/dt-bindings/clock/r8a7794-clock.h
index 88e64846cf37..93e99c3ffc8d 100644
--- a/include/dt-bindings/clock/r8a7794-clock.h
+++ b/include/dt-bindings/clock/r8a7794-clock.h
@@ -64,6 +64,7 @@
/* MSTP4 */
#define R8A7794_CLK_IRQC 7
+#define R8A7794_CLK_INTC_SYS 8
/* MSTP5 */
#define R8A7794_CLK_AUDIO_DMAC0 2
@@ -81,6 +82,7 @@
#define R8A7794_CLK_SCIF2 19
#define R8A7794_CLK_SCIF1 20
#define R8A7794_CLK_SCIF0 21
+#define R8A7794_CLK_DU1 23
#define R8A7794_CLK_DU0 24
/* MSTP8 */
diff --git a/include/dt-bindings/genpd/k2g.h b/include/dt-bindings/genpd/k2g.h
new file mode 100644
index 000000000000..1f31f17e19eb
--- /dev/null
+++ b/include/dt-bindings/genpd/k2g.h
@@ -0,0 +1,90 @@
+/*
+ * TI K2G SoC Device definitions
+ *
+ * Copyright (C) 2015-2017 Texas Instruments Incorporated - http://www.ti.com/
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DT_BINDINGS_GENPD_K2G_H
+#define _DT_BINDINGS_GENPD_K2G_H
+
+/* Documented in http://processors.wiki.ti.com/index.php/TISCI */
+
+#define K2G_DEV_PMMC0 0x0000
+#define K2G_DEV_MLB0 0x0001
+#define K2G_DEV_DSS0 0x0002
+#define K2G_DEV_MCBSP0 0x0003
+#define K2G_DEV_MCASP0 0x0004
+#define K2G_DEV_MCASP1 0x0005
+#define K2G_DEV_MCASP2 0x0006
+#define K2G_DEV_DCAN0 0x0008
+#define K2G_DEV_DCAN1 0x0009
+#define K2G_DEV_EMIF0 0x000a
+#define K2G_DEV_MMCHS0 0x000b
+#define K2G_DEV_MMCHS1 0x000c
+#define K2G_DEV_GPMC0 0x000d
+#define K2G_DEV_ELM0 0x000e
+#define K2G_DEV_SPI0 0x0010
+#define K2G_DEV_SPI1 0x0011
+#define K2G_DEV_SPI2 0x0012
+#define K2G_DEV_SPI3 0x0013
+#define K2G_DEV_ICSS0 0x0014
+#define K2G_DEV_ICSS1 0x0015
+#define K2G_DEV_USB0 0x0016
+#define K2G_DEV_USB1 0x0017
+#define K2G_DEV_NSS0 0x0018
+#define K2G_DEV_PCIE0 0x0019
+#define K2G_DEV_GPIO0 0x001b
+#define K2G_DEV_GPIO1 0x001c
+#define K2G_DEV_TIMER64_0 0x001d
+#define K2G_DEV_TIMER64_1 0x001e
+#define K2G_DEV_TIMER64_2 0x001f
+#define K2G_DEV_TIMER64_3 0x0020
+#define K2G_DEV_TIMER64_4 0x0021
+#define K2G_DEV_TIMER64_5 0x0022
+#define K2G_DEV_TIMER64_6 0x0023
+#define K2G_DEV_MSGMGR0 0x0025
+#define K2G_DEV_BOOTCFG0 0x0026
+#define K2G_DEV_ARM_BOOTROM0 0x0027
+#define K2G_DEV_DSP_BOOTROM0 0x0029
+#define K2G_DEV_DEBUGSS0 0x002b
+#define K2G_DEV_UART0 0x002c
+#define K2G_DEV_UART1 0x002d
+#define K2G_DEV_UART2 0x002e
+#define K2G_DEV_EHRPWM0 0x002f
+#define K2G_DEV_EHRPWM1 0x0030
+#define K2G_DEV_EHRPWM2 0x0031
+#define K2G_DEV_EHRPWM3 0x0032
+#define K2G_DEV_EHRPWM4 0x0033
+#define K2G_DEV_EHRPWM5 0x0034
+#define K2G_DEV_EQEP0 0x0035
+#define K2G_DEV_EQEP1 0x0036
+#define K2G_DEV_EQEP2 0x0037
+#define K2G_DEV_ECAP0 0x0038
+#define K2G_DEV_ECAP1 0x0039
+#define K2G_DEV_I2C0 0x003a
+#define K2G_DEV_I2C1 0x003b
+#define K2G_DEV_I2C2 0x003c
+#define K2G_DEV_EDMA0 0x003f
+#define K2G_DEV_SEMAPHORE0 0x0040
+#define K2G_DEV_INTC0 0x0041
+#define K2G_DEV_GIC0 0x0042
+#define K2G_DEV_QSPI0 0x0043
+#define K2G_DEV_ARM_64B_COUNTER0 0x0044
+#define K2G_DEV_TETRIS0 0x0045
+#define K2G_DEV_CGEM0 0x0046
+#define K2G_DEV_MSMC0 0x0047
+#define K2G_DEV_CBASS0 0x0049
+#define K2G_DEV_BOARD0 0x004c
+#define K2G_DEV_EDMA1 0x004f
+
+#endif
diff --git a/include/dt-bindings/gpio/gpio.h b/include/dt-bindings/gpio/gpio.h
index c673d2c87c60..b4f54da694eb 100644
--- a/include/dt-bindings/gpio/gpio.h
+++ b/include/dt-bindings/gpio/gpio.h
@@ -17,11 +17,15 @@
#define GPIO_PUSH_PULL 0
#define GPIO_SINGLE_ENDED 2
+/* Bit 2 express Open drain or open source */
+#define GPIO_LINE_OPEN_SOURCE 0
+#define GPIO_LINE_OPEN_DRAIN 4
+
/*
- * Open Drain/Collector is the combination of single-ended active low,
- * Open Source/Emitter is the combination of single-ended active high.
+ * Open Drain/Collector is the combination of single-ended open drain interface.
+ * Open Source/Emitter is the combination of single-ended open source interface.
*/
-#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_ACTIVE_LOW)
-#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_ACTIVE_HIGH)
+#define GPIO_OPEN_DRAIN (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_DRAIN)
+#define GPIO_OPEN_SOURCE (GPIO_SINGLE_ENDED | GPIO_LINE_OPEN_SOURCE)
#endif
diff --git a/include/dt-bindings/mfd/stm32f7-rcc.h b/include/dt-bindings/mfd/stm32f7-rcc.h
new file mode 100644
index 000000000000..e36cc69959c7
--- /dev/null
+++ b/include/dt-bindings/mfd/stm32f7-rcc.h
@@ -0,0 +1,112 @@
+/*
+ * This header provides constants for the STM32F7 RCC IP
+ */
+
+#ifndef _DT_BINDINGS_MFD_STM32F7_RCC_H
+#define _DT_BINDINGS_MFD_STM32F7_RCC_H
+
+/* AHB1 */
+#define STM32F7_RCC_AHB1_GPIOA 0
+#define STM32F7_RCC_AHB1_GPIOB 1
+#define STM32F7_RCC_AHB1_GPIOC 2
+#define STM32F7_RCC_AHB1_GPIOD 3
+#define STM32F7_RCC_AHB1_GPIOE 4
+#define STM32F7_RCC_AHB1_GPIOF 5
+#define STM32F7_RCC_AHB1_GPIOG 6
+#define STM32F7_RCC_AHB1_GPIOH 7
+#define STM32F7_RCC_AHB1_GPIOI 8
+#define STM32F7_RCC_AHB1_GPIOJ 9
+#define STM32F7_RCC_AHB1_GPIOK 10
+#define STM32F7_RCC_AHB1_CRC 12
+#define STM32F7_RCC_AHB1_BKPSRAM 18
+#define STM32F7_RCC_AHB1_DTCMRAM 20
+#define STM32F7_RCC_AHB1_DMA1 21
+#define STM32F7_RCC_AHB1_DMA2 22
+#define STM32F7_RCC_AHB1_DMA2D 23
+#define STM32F7_RCC_AHB1_ETHMAC 25
+#define STM32F7_RCC_AHB1_ETHMACTX 26
+#define STM32F7_RCC_AHB1_ETHMACRX 27
+#define STM32FF_RCC_AHB1_ETHMACPTP 28
+#define STM32F7_RCC_AHB1_OTGHS 29
+#define STM32F7_RCC_AHB1_OTGHSULPI 30
+
+#define STM32F7_AHB1_RESET(bit) (STM32F7_RCC_AHB1_##bit + (0x10 * 8))
+#define STM32F7_AHB1_CLOCK(bit) (STM32F7_RCC_AHB1_##bit)
+
+
+/* AHB2 */
+#define STM32F7_RCC_AHB2_DCMI 0
+#define STM32F7_RCC_AHB2_CRYP 4
+#define STM32F7_RCC_AHB2_HASH 5
+#define STM32F7_RCC_AHB2_RNG 6
+#define STM32F7_RCC_AHB2_OTGFS 7
+
+#define STM32F7_AHB2_RESET(bit) (STM32F7_RCC_AHB2_##bit + (0x14 * 8))
+#define STM32F7_AHB2_CLOCK(bit) (STM32F7_RCC_AHB2_##bit + 0x20)
+
+/* AHB3 */
+#define STM32F7_RCC_AHB3_FMC 0
+#define STM32F7_RCC_AHB3_QSPI 1
+
+#define STM32F7_AHB3_RESET(bit) (STM32F7_RCC_AHB3_##bit + (0x18 * 8))
+#define STM32F7_AHB3_CLOCK(bit) (STM32F7_RCC_AHB3_##bit + 0x40)
+
+/* APB1 */
+#define STM32F7_RCC_APB1_TIM2 0
+#define STM32F7_RCC_APB1_TIM3 1
+#define STM32F7_RCC_APB1_TIM4 2
+#define STM32F7_RCC_APB1_TIM5 3
+#define STM32F7_RCC_APB1_TIM6 4
+#define STM32F7_RCC_APB1_TIM7 5
+#define STM32F7_RCC_APB1_TIM12 6
+#define STM32F7_RCC_APB1_TIM13 7
+#define STM32F7_RCC_APB1_TIM14 8
+#define STM32F7_RCC_APB1_LPTIM1 9
+#define STM32F7_RCC_APB1_WWDG 11
+#define STM32F7_RCC_APB1_SPI2 14
+#define STM32F7_RCC_APB1_SPI3 15
+#define STM32F7_RCC_APB1_SPDIFRX 16
+#define STM32F7_RCC_APB1_UART2 17
+#define STM32F7_RCC_APB1_UART3 18
+#define STM32F7_RCC_APB1_UART4 19
+#define STM32F7_RCC_APB1_UART5 20
+#define STM32F7_RCC_APB1_I2C1 21
+#define STM32F7_RCC_APB1_I2C2 22
+#define STM32F7_RCC_APB1_I2C3 23
+#define STM32F7_RCC_APB1_I2C4 24
+#define STM32F7_RCC_APB1_CAN1 25
+#define STM32F7_RCC_APB1_CAN2 26
+#define STM32F7_RCC_APB1_CEC 27
+#define STM32F7_RCC_APB1_PWR 28
+#define STM32F7_RCC_APB1_DAC 29
+#define STM32F7_RCC_APB1_UART7 30
+#define STM32F7_RCC_APB1_UART8 31
+
+#define STM32F7_APB1_RESET(bit) (STM32F7_RCC_APB1_##bit + (0x20 * 8))
+#define STM32F7_APB1_CLOCK(bit) (STM32F7_RCC_APB1_##bit + 0x80)
+
+/* APB2 */
+#define STM32F7_RCC_APB2_TIM1 0
+#define STM32F7_RCC_APB2_TIM8 1
+#define STM32F7_RCC_APB2_USART1 4
+#define STM32F7_RCC_APB2_USART6 5
+#define STM32F7_RCC_APB2_ADC1 8
+#define STM32F7_RCC_APB2_ADC2 9
+#define STM32F7_RCC_APB2_ADC3 10
+#define STM32F7_RCC_APB2_SDMMC1 11
+#define STM32F7_RCC_APB2_SPI1 12
+#define STM32F7_RCC_APB2_SPI4 13
+#define STM32F7_RCC_APB2_SYSCFG 14
+#define STM32F7_RCC_APB2_TIM9 16
+#define STM32F7_RCC_APB2_TIM10 17
+#define STM32F7_RCC_APB2_TIM11 18
+#define STM32F7_RCC_APB2_SPI5 20
+#define STM32F7_RCC_APB2_SPI6 21
+#define STM32F7_RCC_APB2_SAI1 22
+#define STM32F7_RCC_APB2_SAI2 23
+#define STM32F7_RCC_APB2_LTDC 26
+
+#define STM32F7_APB2_RESET(bit) (STM32F7_RCC_APB2_##bit + (0x24 * 8))
+#define STM32F7_APB2_CLOCK(bit) (STM32F7_RCC_APB2_##bit + 0xA0)
+
+#endif /* _DT_BINDINGS_MFD_STM32F7_RCC_H */
diff --git a/include/dt-bindings/pinctrl/hisi.h b/include/dt-bindings/pinctrl/hisi.h
index 38f1ea879ea1..0359bfdc9119 100644
--- a/include/dt-bindings/pinctrl/hisi.h
+++ b/include/dt-bindings/pinctrl/hisi.h
@@ -56,4 +56,19 @@
#define DRIVE4_08MA (4 << 4)
#define DRIVE4_10MA (6 << 4)
+/* drive strength definition for hi3660 */
+#define DRIVE6_MASK (15 << 4)
+#define DRIVE6_04MA (0 << 4)
+#define DRIVE6_12MA (4 << 4)
+#define DRIVE6_19MA (8 << 4)
+#define DRIVE6_27MA (10 << 4)
+#define DRIVE6_32MA (15 << 4)
+#define DRIVE7_02MA (0 << 4)
+#define DRIVE7_04MA (1 << 4)
+#define DRIVE7_06MA (2 << 4)
+#define DRIVE7_08MA (3 << 4)
+#define DRIVE7_10MA (4 << 4)
+#define DRIVE7_12MA (5 << 4)
+#define DRIVE7_14MA (6 << 4)
+#define DRIVE7_16MA (7 << 4)
#endif
diff --git a/include/dt-bindings/pinctrl/mt7623-pinfunc.h b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
index 2f00bdc42442..436a87be864a 100644
--- a/include/dt-bindings/pinctrl/mt7623-pinfunc.h
+++ b/include/dt-bindings/pinctrl/mt7623-pinfunc.h
@@ -185,6 +185,12 @@
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MO (MTK_PIN_NO(56) | 1)
#define MT7623_PIN_56_SPI0_MO_FUNC_SPI0_MI (MTK_PIN_NO(56) | 2)
+#define MT7623_PIN_57_SDA1_FUNC_GPIO57 (MTK_PIN_NO(57) | 0)
+#define MT7623_PIN_57_SDA1_FUNC_SDA1 (MTK_PIN_NO(57) | 1)
+
+#define MT7623_PIN_58_SCL1_FUNC_GPIO58 (MTK_PIN_NO(58) | 0)
+#define MT7623_PIN_58_SCL1_FUNC_SCL1 (MTK_PIN_NO(58) | 1)
+
#define MT7623_PIN_60_WB_RSTB_FUNC_GPIO60 (MTK_PIN_NO(60) | 0)
#define MT7623_PIN_60_WB_RSTB_FUNC_WB_RSTB (MTK_PIN_NO(60) | 1)
@@ -244,6 +250,22 @@
#define MT7623_PIN_76_SCL0_FUNC_GPIO76 (MTK_PIN_NO(76) | 0)
#define MT7623_PIN_76_SCL0_FUNC_SCL0 (MTK_PIN_NO(76) | 1)
+#define MT7623_PIN_79_URXD0_FUNC_GPIO79 (MTK_PIN_NO(79) | 0)
+#define MT7623_PIN_79_URXD0_FUNC_URXD0 (MTK_PIN_NO(79) | 1)
+#define MT7623_PIN_79_URXD0_FUNC_UTXD0 (MTK_PIN_NO(79) | 2)
+
+#define MT7623_PIN_80_UTXD0_FUNC_GPIO80 (MTK_PIN_NO(80) | 0)
+#define MT7623_PIN_80_UTXD0_FUNC_UTXD0 (MTK_PIN_NO(80) | 1)
+#define MT7623_PIN_80_UTXD0_FUNC_URXD0 (MTK_PIN_NO(80) | 2)
+
+#define MT7623_PIN_81_URXD1_FUNC_GPIO81 (MTK_PIN_NO(81) | 0)
+#define MT7623_PIN_81_URXD1_FUNC_URXD1 (MTK_PIN_NO(81) | 1)
+#define MT7623_PIN_81_URXD1_FUNC_UTXD1 (MTK_PIN_NO(81) | 2)
+
+#define MT7623_PIN_82_UTXD1_FUNC_GPIO82 (MTK_PIN_NO(82) | 0)
+#define MT7623_PIN_82_UTXD1_FUNC_UTXD1 (MTK_PIN_NO(82) | 1)
+#define MT7623_PIN_82_UTXD1_FUNC_URXD1 (MTK_PIN_NO(82) | 2)
+
#define MT7623_PIN_83_LCM_RST_FUNC_GPIO83 (MTK_PIN_NO(83) | 0)
#define MT7623_PIN_83_LCM_RST_FUNC_LCM_RST (MTK_PIN_NO(83) | 1)
@@ -351,10 +373,10 @@
#define MT7623_PIN_122_GPIO122_FUNC_SDA2 (MTK_PIN_NO(122) | 4)
#define MT7623_PIN_122_GPIO122_FUNC_URXD0 (MTK_PIN_NO(122) | 5)
-#define MT7623_PIN_123_GPIO123_FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
-#define MT7623_PIN_123_GPIO123_FUNC_TEST (MTK_PIN_NO(123) | 1)
-#define MT7623_PIN_123_GPIO123_FUNC_SCL2 (MTK_PIN_NO(123) | 4)
-#define MT7623_PIN_123_GPIO123_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
+#define MT7623_PIN_123_HTPLG_FUNC_GPIO123 (MTK_PIN_NO(123) | 0)
+#define MT7623_PIN_123_HTPLG_FUNC_HTPLG (MTK_PIN_NO(123) | 1)
+#define MT7623_PIN_123_HTPLG_FUNC_SCL2 (MTK_PIN_NO(123) | 4)
+#define MT7623_PIN_123_HTPLG_FUNC_UTXD0 (MTK_PIN_NO(123) | 5)
#define MT7623_PIN_124_GPIO124_FUNC_GPIO124 (MTK_PIN_NO(124) | 0)
#define MT7623_PIN_124_GPIO124_FUNC_TEST (MTK_PIN_NO(124) | 1)
diff --git a/include/dt-bindings/power/imx7-power.h b/include/dt-bindings/power/imx7-power.h
new file mode 100644
index 000000000000..3a181e410517
--- /dev/null
+++ b/include/dt-bindings/power/imx7-power.h
@@ -0,0 +1,16 @@
+/*
+ * Copyright (C) 2017 Impinj
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __DT_BINDINGS_IMX7_POWER_H__
+#define __DT_BINDINGS_IMX7_POWER_H__
+
+#define IMX7_POWER_DOMAIN_MIPI_PHY 0
+#define IMX7_POWER_DOMAIN_PCIE_PHY 1
+#define IMX7_POWER_DOMAIN_USB_HSIC_PHY 2
+
+#endif
diff --git a/include/dt-bindings/power/r8a7795-sysc.h b/include/dt-bindings/power/r8a7795-sysc.h
index ee2e26ba605e..ad679eeda137 100644
--- a/include/dt-bindings/power/r8a7795-sysc.h
+++ b/include/dt-bindings/power/r8a7795-sysc.h
@@ -33,7 +33,7 @@
#define R8A7795_PD_CA53_SCU 21
#define R8A7795_PD_3DG_E 22
#define R8A7795_PD_A3IR 24
-#define R8A7795_PD_A2VC0 25
+#define R8A7795_PD_A2VC0 25 /* ES1.x only */
#define R8A7795_PD_A2VC1 26
/* Always-on power area */
diff --git a/include/dt-bindings/reset/altr,rst-mgr-a10sr.h b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h
new file mode 100644
index 000000000000..9855925e5256
--- /dev/null
+++ b/include/dt-bindings/reset/altr,rst-mgr-a10sr.h
@@ -0,0 +1,33 @@
+/*
+ * Copyright Intel Corporation (C) 2017. All Rights Reserved
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ *
+ * Reset binding definitions for Altera Arria10 MAX5 System Resource Chip
+ *
+ * Adapted from altr,rst-mgr-a10.h
+ */
+
+#ifndef _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H
+#define _DT_BINDINGS_RESET_ALTR_RST_MGR_A10SR_H
+
+/* Peripheral PHY resets */
+#define A10SR_RESET_ENET_HPS 0
+#define A10SR_RESET_PCIE 1
+#define A10SR_RESET_FILE 2
+#define A10SR_RESET_BQSPI 3
+#define A10SR_RESET_USB 4
+
+#define A10SR_RESET_NUM 5
+
+#endif
diff --git a/include/dt-bindings/reset/imx7-reset.h b/include/dt-bindings/reset/imx7-reset.h
new file mode 100644
index 000000000000..63948170c7b2
--- /dev/null
+++ b/include/dt-bindings/reset/imx7-reset.h
@@ -0,0 +1,62 @@
+/*
+ * Copyright (C) 2017 Impinj, Inc.
+ *
+ * Author: Andrey Smirnov <andrew.smirnov@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef DT_BINDING_RESET_IMX7_H
+#define DT_BINDING_RESET_IMX7_H
+
+#define IMX7_RESET_A7_CORE_POR_RESET0 0
+#define IMX7_RESET_A7_CORE_POR_RESET1 1
+#define IMX7_RESET_A7_CORE_RESET0 2
+#define IMX7_RESET_A7_CORE_RESET1 3
+#define IMX7_RESET_A7_DBG_RESET0 4
+#define IMX7_RESET_A7_DBG_RESET1 5
+#define IMX7_RESET_A7_ETM_RESET0 6
+#define IMX7_RESET_A7_ETM_RESET1 7
+#define IMX7_RESET_A7_SOC_DBG_RESET 8
+#define IMX7_RESET_A7_L2RESET 9
+#define IMX7_RESET_SW_M4C_RST 10
+#define IMX7_RESET_SW_M4P_RST 11
+#define IMX7_RESET_EIM_RST 12
+#define IMX7_RESET_HSICPHY_PORT_RST 13
+#define IMX7_RESET_USBPHY1_POR 14
+#define IMX7_RESET_USBPHY1_PORT_RST 15
+#define IMX7_RESET_USBPHY2_POR 16
+#define IMX7_RESET_USBPHY2_PORT_RST 17
+#define IMX7_RESET_MIPI_PHY_MRST 18
+#define IMX7_RESET_MIPI_PHY_SRST 19
+
+/*
+ * IMX7_RESET_PCIEPHY is a logical reset line combining PCIEPHY_BTN
+ * and PCIEPHY_G_RST
+ */
+#define IMX7_RESET_PCIEPHY 20
+#define IMX7_RESET_PCIEPHY_PERST 21
+
+/*
+ * IMX7_RESET_PCIE_CTRL_APPS_EN is not strictly a reset line, but it
+ * can be used to inhibit PCIe LTTSM, so, in a way, it can be thoguht
+ * of as one
+ */
+#define IMX7_RESET_PCIE_CTRL_APPS_EN 22
+#define IMX7_RESET_DDRC_PRST 23
+#define IMX7_RESET_DDRC_CORE_RST 24
+
+#define IMX7_RESET_NUM 25
+
+#endif
+
diff --git a/include/keys/system_keyring.h b/include/keys/system_keyring.h
index fbd4647767e9..359c2f936004 100644
--- a/include/keys/system_keyring.h
+++ b/include/keys/system_keyring.h
@@ -18,7 +18,8 @@
extern int restrict_link_by_builtin_trusted(struct key *keyring,
const struct key_type *type,
- const union key_payload *payload);
+ const union key_payload *payload,
+ struct key *restriction_key);
#else
#define restrict_link_by_builtin_trusted restrict_link_reject
@@ -28,11 +29,24 @@ extern int restrict_link_by_builtin_trusted(struct key *keyring,
extern int restrict_link_by_builtin_and_secondary_trusted(
struct key *keyring,
const struct key_type *type,
- const union key_payload *payload);
+ const union key_payload *payload,
+ struct key *restriction_key);
#else
#define restrict_link_by_builtin_and_secondary_trusted restrict_link_by_builtin_trusted
#endif
+#ifdef CONFIG_SYSTEM_BLACKLIST_KEYRING
+extern int mark_hash_blacklisted(const char *hash);
+extern int is_hash_blacklisted(const u8 *hash, size_t hash_len,
+ const char *type);
+#else
+static inline int is_hash_blacklisted(const u8 *hash, size_t hash_len,
+ const char *type)
+{
+ return 0;
+}
+#endif
+
#ifdef CONFIG_IMA_BLACKLIST_KEYRING
extern struct key *ima_blacklist_keyring;
diff --git a/include/kvm/arm_arch_timer.h b/include/kvm/arm_arch_timer.h
index fe797d6ef89d..295584f31a4e 100644
--- a/include/kvm/arm_arch_timer.h
+++ b/include/kvm/arm_arch_timer.h
@@ -63,6 +63,8 @@ int kvm_timer_vcpu_reset(struct kvm_vcpu *vcpu,
void kvm_timer_vcpu_init(struct kvm_vcpu *vcpu);
void kvm_timer_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_timer_sync_hwstate(struct kvm_vcpu *vcpu);
+bool kvm_timer_should_notify_user(struct kvm_vcpu *vcpu);
+void kvm_timer_update_run(struct kvm_vcpu *vcpu);
void kvm_timer_vcpu_terminate(struct kvm_vcpu *vcpu);
u64 kvm_arm_timer_get_reg(struct kvm_vcpu *, u64 regid);
diff --git a/include/kvm/arm_pmu.h b/include/kvm/arm_pmu.h
index 92e7e97ca8ff..1ab4633adf4f 100644
--- a/include/kvm/arm_pmu.h
+++ b/include/kvm/arm_pmu.h
@@ -50,6 +50,8 @@ void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu);
void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu);
+bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu);
+void kvm_pmu_update_run(struct kvm_vcpu *vcpu);
void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val);
void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu, u64 data,
@@ -85,6 +87,11 @@ static inline void kvm_pmu_enable_counter(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_overflow_set(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_flush_hwstate(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_sync_hwstate(struct kvm_vcpu *vcpu) {}
+static inline bool kvm_pmu_should_notify_user(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+static inline void kvm_pmu_update_run(struct kvm_vcpu *vcpu) {}
static inline void kvm_pmu_software_increment(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_handle_pmcr(struct kvm_vcpu *vcpu, u64 val) {}
static inline void kvm_pmu_set_counter_event_type(struct kvm_vcpu *vcpu,
diff --git a/include/kvm/arm_vgic.h b/include/kvm/arm_vgic.h
index c0b3d999c266..97b8d3728b31 100644
--- a/include/kvm/arm_vgic.h
+++ b/include/kvm/arm_vgic.h
@@ -148,7 +148,6 @@ struct vgic_its {
gpa_t vgic_its_base;
bool enabled;
- bool initialized;
struct vgic_io_device iodev;
struct kvm_device *dev;
@@ -162,6 +161,9 @@ struct vgic_its {
u32 creadr;
u32 cwriter;
+ /* migration ABI revision in use */
+ u32 abi_rev;
+
/* Protects the device and collection lists */
struct mutex its_lock;
struct list_head device_list;
@@ -225,8 +227,6 @@ struct vgic_dist {
struct vgic_v2_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
- u32 vgic_misr; /* Saved only */
- u64 vgic_eisr; /* Saved only */
u64 vgic_elrsr; /* Saved only */
u32 vgic_apr;
u32 vgic_lr[VGIC_V2_MAX_LRS];
@@ -236,8 +236,6 @@ struct vgic_v3_cpu_if {
u32 vgic_hcr;
u32 vgic_vmcr;
u32 vgic_sre; /* Restored only, change ignored */
- u32 vgic_misr; /* Saved only */
- u32 vgic_eisr; /* Saved only */
u32 vgic_elrsr; /* Saved only */
u32 vgic_ap0r[4];
u32 vgic_ap1r[4];
@@ -264,8 +262,6 @@ struct vgic_cpu {
*/
struct list_head ap_list_head;
- u64 live_lrs;
-
/*
* Members below are used with GICv3 emulation only and represent
* parts of the redistributor.
@@ -289,6 +285,7 @@ extern struct static_key_false vgic_v2_cpuif_trap;
int kvm_vgic_addr(struct kvm *kvm, unsigned long type, u64 *addr, bool write);
void kvm_vgic_early_init(struct kvm *kvm);
+int kvm_vgic_vcpu_init(struct kvm_vcpu *vcpu);
int kvm_vgic_create(struct kvm *kvm, u32 type);
void kvm_vgic_destroy(struct kvm *kvm);
void kvm_vgic_vcpu_early_init(struct kvm_vcpu *vcpu);
@@ -307,6 +304,9 @@ bool kvm_vgic_map_is_active(struct kvm_vcpu *vcpu, unsigned int virt_irq);
int kvm_vgic_vcpu_pending_irq(struct kvm_vcpu *vcpu);
+void kvm_vgic_load(struct kvm_vcpu *vcpu);
+void kvm_vgic_put(struct kvm_vcpu *vcpu);
+
#define irqchip_in_kernel(k) (!!((k)->arch.vgic.in_kernel))
#define vgic_initialized(k) ((k)->arch.vgic.initialized)
#define vgic_ready(k) ((k)->arch.vgic.ready)
diff --git a/include/linux/acpi.h b/include/linux/acpi.h
index 9b05886f9773..137e4a3d89c5 100644
--- a/include/linux/acpi.h
+++ b/include/linux/acpi.h
@@ -233,10 +233,6 @@ int acpi_numa_init (void);
int acpi_table_init (void);
int acpi_table_parse(char *id, acpi_tbl_table_handler handler);
-int __init acpi_parse_entries(char *id, unsigned long table_size,
- acpi_tbl_entry_handler handler,
- struct acpi_table_header *table_header,
- int entry_id, unsigned int max_entries);
int __init acpi_table_parse_entries(char *id, unsigned long table_size,
int entry_id,
acpi_tbl_entry_handler handler,
@@ -595,6 +591,13 @@ enum acpi_reconfig_event {
int acpi_reconfig_notifier_register(struct notifier_block *nb);
int acpi_reconfig_notifier_unregister(struct notifier_block *nb);
+#ifdef CONFIG_ACPI_GTDT
+int acpi_gtdt_init(struct acpi_table_header *table, int *platform_timer_count);
+int acpi_gtdt_map_ppi(int type);
+bool acpi_gtdt_c3stop(int type);
+int acpi_arch_timer_mem_init(struct arch_timer_mem *timer_mem, int *timer_count);
+#endif
+
#else /* !CONFIG_ACPI */
#define acpi_disabled 1
@@ -611,6 +614,11 @@ static inline bool acpi_dev_found(const char *hid)
return false;
}
+static inline bool acpi_dev_present(const char *hid, const char *uid, s64 hrv)
+{
+ return false;
+}
+
static inline bool is_acpi_node(struct fwnode_handle *fwnode)
{
return false;
@@ -762,8 +770,11 @@ static inline enum dev_dma_attr acpi_get_dma_attr(struct acpi_device *adev)
return DEV_DMA_NOT_SUPPORTED;
}
-static inline void acpi_dma_configure(struct device *dev,
- enum dev_dma_attr attr) { }
+static inline int acpi_dma_configure(struct device *dev,
+ enum dev_dma_attr attr)
+{
+ return 0;
+}
static inline void acpi_dma_deconfigure(struct device *dev) { }
@@ -949,6 +960,10 @@ static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev)
adev->driver_gpios = NULL;
}
+int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios);
+void devm_acpi_dev_remove_driver_gpios(struct device *dev);
+
int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index);
#else
static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
@@ -958,6 +973,13 @@ static inline int acpi_dev_add_driver_gpios(struct acpi_device *adev,
}
static inline void acpi_dev_remove_driver_gpios(struct acpi_device *adev) {}
+static inline int devm_acpi_dev_add_driver_gpios(struct device *dev,
+ const struct acpi_gpio_mapping *gpios)
+{
+ return -ENXIO;
+}
+static inline void devm_acpi_dev_remove_driver_gpios(struct device *dev) {}
+
static inline int acpi_dev_gpio_irq_get(struct acpi_device *adev, int index)
{
return -ENXIO;
@@ -997,8 +1019,16 @@ int acpi_node_prop_read(struct fwnode_handle *fwnode, const char *propname,
int acpi_dev_prop_read(struct acpi_device *adev, const char *propname,
enum dev_prop_type proptype, void *val, size_t nval);
-struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode);
+struct fwnode_handle *acpi_get_next_subnode(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+struct fwnode_handle *acpi_node_get_parent(struct fwnode_handle *fwnode);
+
+struct fwnode_handle *acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev);
+int acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint);
struct acpi_probe_entry;
typedef bool (*acpi_probe_entry_validate_subtbl)(struct acpi_subtable_header *,
@@ -1115,12 +1145,34 @@ static inline int acpi_dev_prop_read(struct acpi_device *adev,
return -ENXIO;
}
-static inline struct fwnode_handle *acpi_get_next_subnode(struct device *dev,
- struct fwnode_handle *subnode)
+static inline struct fwnode_handle *
+acpi_get_next_subnode(struct fwnode_handle *fwnode, struct fwnode_handle *child)
{
return NULL;
}
+static inline struct fwnode_handle *
+acpi_node_get_parent(struct fwnode_handle *fwnode)
+{
+ return NULL;
+}
+
+static inline struct fwnode_handle *
+acpi_graph_get_next_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle *prev)
+{
+ return ERR_PTR(-ENXIO);
+}
+
+static inline int
+acpi_graph_get_remote_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_handle **remote,
+ struct fwnode_handle **port,
+ struct fwnode_handle **endpoint)
+{
+ return -ENXIO;
+}
+
#define ACPI_DECLARE_PROBE_ENTRY(table, name, table_id, subtable, valid, data, fn) \
static const void * __acpi_table_##name[] \
__attribute__((unused)) \
diff --git a/include/linux/acpi_iort.h b/include/linux/acpi_iort.h
index 77e08099e554..3ff9acea8616 100644
--- a/include/linux/acpi_iort.h
+++ b/include/linux/acpi_iort.h
@@ -34,6 +34,8 @@ void acpi_iort_init(void);
bool iort_node_match(u8 type);
u32 iort_msi_map_rid(struct device *dev, u32 req_id);
struct irq_domain *iort_get_device_domain(struct device *dev, u32 req_id);
+void acpi_configure_pmsi_domain(struct device *dev);
+int iort_pmsi_get_dev_id(struct device *dev, u32 *dev_id);
/* IOMMU interface */
void iort_set_dma_mask(struct device *dev);
const struct iommu_ops *iort_iommu_configure(struct device *dev);
@@ -45,6 +47,7 @@ static inline u32 iort_msi_map_rid(struct device *dev, u32 req_id)
static inline struct irq_domain *iort_get_device_domain(struct device *dev,
u32 req_id)
{ return NULL; }
+static inline void acpi_configure_pmsi_domain(struct device *dev) { }
/* IOMMU interface */
static inline void iort_set_dma_mask(struct device *dev) { }
static inline
@@ -52,7 +55,4 @@ const struct iommu_ops *iort_iommu_configure(struct device *dev)
{ return NULL; }
#endif
-#define IORT_ACPI_DECLARE(name, table_id, fn) \
- ACPI_DECLARE_PROBE_ENTRY(iort, name, table_id, 0, NULL, 0, fn)
-
#endif /* __ACPI_IORT_H__ */
diff --git a/include/linux/amba/pl080.h b/include/linux/amba/pl080.h
index 91b84a7f0539..580b5323a717 100644
--- a/include/linux/amba/pl080.h
+++ b/include/linux/amba/pl080.h
@@ -38,24 +38,16 @@
#define PL080_SOFT_LSREQ (0x2C)
#define PL080_CONFIG (0x30)
-#define PL080_CONFIG_M2_BE (1 << 2)
-#define PL080_CONFIG_M1_BE (1 << 1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_M2_BE BIT(2)
+#define PL080_CONFIG_M1_BE BIT(1)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_SYNC (0x34)
/* Per channel configuration registers */
-#define PL080_Cx_STRIDE (0x20)
+/* Per channel configuration registers */
#define PL080_Cx_BASE(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_SRC_ADDR(x) ((0x100 + (x * 0x20)))
-#define PL080_Cx_DST_ADDR(x) ((0x104 + (x * 0x20)))
-#define PL080_Cx_LLI(x) ((0x108 + (x * 0x20)))
-#define PL080_Cx_CONTROL(x) ((0x10C + (x * 0x20)))
-#define PL080_Cx_CONFIG(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONTROL2(x) ((0x110 + (x * 0x20)))
-#define PL080S_Cx_CONFIG(x) ((0x114 + (x * 0x20)))
-
#define PL080_CH_SRC_ADDR (0x00)
#define PL080_CH_DST_ADDR (0x04)
#define PL080_CH_LLI (0x08)
@@ -66,18 +58,18 @@
#define PL080_LLI_ADDR_MASK (0x3fffffff << 2)
#define PL080_LLI_ADDR_SHIFT (2)
-#define PL080_LLI_LM_AHB2 (1 << 0)
+#define PL080_LLI_LM_AHB2 BIT(0)
-#define PL080_CONTROL_TC_IRQ_EN (1 << 31)
+#define PL080_CONTROL_TC_IRQ_EN BIT(31)
#define PL080_CONTROL_PROT_MASK (0x7 << 28)
#define PL080_CONTROL_PROT_SHIFT (28)
-#define PL080_CONTROL_PROT_CACHE (1 << 30)
-#define PL080_CONTROL_PROT_BUFF (1 << 29)
-#define PL080_CONTROL_PROT_SYS (1 << 28)
-#define PL080_CONTROL_DST_INCR (1 << 27)
-#define PL080_CONTROL_SRC_INCR (1 << 26)
-#define PL080_CONTROL_DST_AHB2 (1 << 25)
-#define PL080_CONTROL_SRC_AHB2 (1 << 24)
+#define PL080_CONTROL_PROT_CACHE BIT(30)
+#define PL080_CONTROL_PROT_BUFF BIT(29)
+#define PL080_CONTROL_PROT_SYS BIT(28)
+#define PL080_CONTROL_DST_INCR BIT(27)
+#define PL080_CONTROL_SRC_INCR BIT(26)
+#define PL080_CONTROL_DST_AHB2 BIT(25)
+#define PL080_CONTROL_SRC_AHB2 BIT(24)
#define PL080_CONTROL_DWIDTH_MASK (0x7 << 21)
#define PL080_CONTROL_DWIDTH_SHIFT (21)
#define PL080_CONTROL_SWIDTH_MASK (0x7 << 18)
@@ -103,20 +95,20 @@
#define PL080_WIDTH_16BIT (0x1)
#define PL080_WIDTH_32BIT (0x2)
-#define PL080N_CONFIG_ITPROT (1 << 20)
-#define PL080N_CONFIG_SECPROT (1 << 19)
-#define PL080_CONFIG_HALT (1 << 18)
-#define PL080_CONFIG_ACTIVE (1 << 17) /* RO */
-#define PL080_CONFIG_LOCK (1 << 16)
-#define PL080_CONFIG_TC_IRQ_MASK (1 << 15)
-#define PL080_CONFIG_ERR_IRQ_MASK (1 << 14)
+#define PL080N_CONFIG_ITPROT BIT(20)
+#define PL080N_CONFIG_SECPROT BIT(19)
+#define PL080_CONFIG_HALT BIT(18)
+#define PL080_CONFIG_ACTIVE BIT(17) /* RO */
+#define PL080_CONFIG_LOCK BIT(16)
+#define PL080_CONFIG_TC_IRQ_MASK BIT(15)
+#define PL080_CONFIG_ERR_IRQ_MASK BIT(14)
#define PL080_CONFIG_FLOW_CONTROL_MASK (0x7 << 11)
#define PL080_CONFIG_FLOW_CONTROL_SHIFT (11)
#define PL080_CONFIG_DST_SEL_MASK (0xf << 6)
#define PL080_CONFIG_DST_SEL_SHIFT (6)
#define PL080_CONFIG_SRC_SEL_MASK (0xf << 1)
#define PL080_CONFIG_SRC_SEL_SHIFT (1)
-#define PL080_CONFIG_ENABLE (1 << 0)
+#define PL080_CONFIG_ENABLE BIT(0)
#define PL080_FLOW_MEM2MEM (0x0)
#define PL080_FLOW_MEM2PER (0x1)
diff --git a/include/linux/amba/pl330.h b/include/linux/amba/pl330.h
deleted file mode 100644
index fe93758e8403..000000000000
--- a/include/linux/amba/pl330.h
+++ /dev/null
@@ -1,35 +0,0 @@
-/* linux/include/linux/amba/pl330.h
- *
- * Copyright (C) 2010 Samsung Electronics Co. Ltd.
- * Jaswinder Singh <jassi.brar@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef __AMBA_PL330_H_
-#define __AMBA_PL330_H_
-
-#include <linux/dmaengine.h>
-
-struct dma_pl330_platdata {
- /*
- * Number of valid peripherals connected to DMAC.
- * This may be different from the value read from
- * CR0, as the PL330 implementation might have 'holes'
- * in the peri list or the peri could also be reached
- * from another DMAC which the platform prefers.
- */
- u8 nr_valid_peri;
- /* Array of valid peripherals */
- u8 *peri_id;
- /* Operational capabilities */
- dma_cap_mask_t cap_mask;
- /* Bytes to allocate for MC buffer */
- unsigned mcbuf_sz;
-};
-
-extern bool pl330_filter(struct dma_chan *chan, void *param);
-#endif /* __AMBA_PL330_H_ */
diff --git a/include/linux/amigaffs.h b/include/linux/amigaffs.h
deleted file mode 100644
index 43b41c06aa37..000000000000
--- a/include/linux/amigaffs.h
+++ /dev/null
@@ -1,144 +0,0 @@
-#ifndef AMIGAFFS_H
-#define AMIGAFFS_H
-
-#include <linux/types.h>
-#include <asm/byteorder.h>
-
-#define FS_OFS 0x444F5300
-#define FS_FFS 0x444F5301
-#define FS_INTLOFS 0x444F5302
-#define FS_INTLFFS 0x444F5303
-#define FS_DCOFS 0x444F5304
-#define FS_DCFFS 0x444F5305
-#define MUFS_FS 0x6d754653 /* 'muFS' */
-#define MUFS_OFS 0x6d754600 /* 'muF\0' */
-#define MUFS_FFS 0x6d754601 /* 'muF\1' */
-#define MUFS_INTLOFS 0x6d754602 /* 'muF\2' */
-#define MUFS_INTLFFS 0x6d754603 /* 'muF\3' */
-#define MUFS_DCOFS 0x6d754604 /* 'muF\4' */
-#define MUFS_DCFFS 0x6d754605 /* 'muF\5' */
-
-#define T_SHORT 2
-#define T_LIST 16
-#define T_DATA 8
-
-#define ST_LINKFILE -4
-#define ST_FILE -3
-#define ST_ROOT 1
-#define ST_USERDIR 2
-#define ST_SOFTLINK 3
-#define ST_LINKDIR 4
-
-#define AFFS_ROOT_BMAPS 25
-
-struct affs_date {
- __be32 days;
- __be32 mins;
- __be32 ticks;
-};
-
-struct affs_short_date {
- __be16 days;
- __be16 mins;
- __be16 ticks;
-};
-
-struct affs_root_head {
- __be32 ptype;
- __be32 spare1;
- __be32 spare2;
- __be32 hash_size;
- __be32 spare3;
- __be32 checksum;
- __be32 hashtable[1];
-};
-
-struct affs_root_tail {
- __be32 bm_flag;
- __be32 bm_blk[AFFS_ROOT_BMAPS];
- __be32 bm_ext;
- struct affs_date root_change;
- u8 disk_name[32];
- __be32 spare1;
- __be32 spare2;
- struct affs_date disk_change;
- struct affs_date disk_create;
- __be32 spare3;
- __be32 spare4;
- __be32 dcache;
- __be32 stype;
-};
-
-struct affs_head {
- __be32 ptype;
- __be32 key;
- __be32 block_count;
- __be32 spare1;
- __be32 first_data;
- __be32 checksum;
- __be32 table[1];
-};
-
-struct affs_tail {
- __be32 spare1;
- __be16 uid;
- __be16 gid;
- __be32 protect;
- __be32 size;
- u8 comment[92];
- struct affs_date change;
- u8 name[32];
- __be32 spare2;
- __be32 original;
- __be32 link_chain;
- __be32 spare[5];
- __be32 hash_chain;
- __be32 parent;
- __be32 extension;
- __be32 stype;
-};
-
-struct slink_front
-{
- __be32 ptype;
- __be32 key;
- __be32 spare1[3];
- __be32 checksum;
- u8 symname[1]; /* depends on block size */
-};
-
-struct affs_data_head
-{
- __be32 ptype;
- __be32 key;
- __be32 sequence;
- __be32 size;
- __be32 next;
- __be32 checksum;
- u8 data[1]; /* depends on block size */
-};
-
-/* Permission bits */
-
-#define FIBF_OTR_READ 0x8000
-#define FIBF_OTR_WRITE 0x4000
-#define FIBF_OTR_EXECUTE 0x2000
-#define FIBF_OTR_DELETE 0x1000
-#define FIBF_GRP_READ 0x0800
-#define FIBF_GRP_WRITE 0x0400
-#define FIBF_GRP_EXECUTE 0x0200
-#define FIBF_GRP_DELETE 0x0100
-
-#define FIBF_HIDDEN 0x0080
-#define FIBF_SCRIPT 0x0040
-#define FIBF_PURE 0x0020 /* no use under linux */
-#define FIBF_ARCHIVED 0x0010 /* never set, always cleared on write */
-#define FIBF_NOREAD 0x0008 /* 0 means allowed */
-#define FIBF_NOWRITE 0x0004 /* 0 means allowed */
-#define FIBF_NOEXECUTE 0x0002 /* 0 means allowed, ignored under linux */
-#define FIBF_NODELETE 0x0001 /* 0 means allowed */
-
-#define FIBF_OWNER 0x000F /* Bits pertaining to owner */
-#define FIBF_MASK 0xEE0E /* Bits modified by Linux */
-
-#endif
diff --git a/include/linux/ata.h b/include/linux/ata.h
index af6859b3a93d..ad7d9ee89ff0 100644
--- a/include/linux/ata.h
+++ b/include/linux/ata.h
@@ -817,11 +817,6 @@ static inline bool ata_id_sct_error_recovery_ctrl(const u16 *id)
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 3) ? true : false;
}
-static inline bool ata_id_sct_write_same(const u16 *id)
-{
- return id[ATA_ID_SCT_CMD_XPORT] & (1 << 2) ? true : false;
-}
-
static inline bool ata_id_sct_long_sector_access(const u16 *id)
{
return id[ATA_ID_SCT_CMD_XPORT] & (1 << 1) ? true : false;
diff --git a/include/linux/atmel_serial.h b/include/linux/atmel_serial.h
deleted file mode 100644
index bd2560502f3c..000000000000
--- a/include/linux/atmel_serial.h
+++ /dev/null
@@ -1,169 +0,0 @@
-/*
- * include/linux/atmel_serial.h
- *
- * Copyright (C) 2005 Ivan Kokshaysky
- * Copyright (C) SAN People
- *
- * USART registers.
- * Based on AT91RM9200 datasheet revision E.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; either version 2 of the License, or
- * (at your option) any later version.
- */
-
-#ifndef ATMEL_SERIAL_H
-#define ATMEL_SERIAL_H
-
-#define ATMEL_US_CR 0x00 /* Control Register */
-#define ATMEL_US_RSTRX BIT(2) /* Reset Receiver */
-#define ATMEL_US_RSTTX BIT(3) /* Reset Transmitter */
-#define ATMEL_US_RXEN BIT(4) /* Receiver Enable */
-#define ATMEL_US_RXDIS BIT(5) /* Receiver Disable */
-#define ATMEL_US_TXEN BIT(6) /* Transmitter Enable */
-#define ATMEL_US_TXDIS BIT(7) /* Transmitter Disable */
-#define ATMEL_US_RSTSTA BIT(8) /* Reset Status Bits */
-#define ATMEL_US_STTBRK BIT(9) /* Start Break */
-#define ATMEL_US_STPBRK BIT(10) /* Stop Break */
-#define ATMEL_US_STTTO BIT(11) /* Start Time-out */
-#define ATMEL_US_SENDA BIT(12) /* Send Address */
-#define ATMEL_US_RSTIT BIT(13) /* Reset Iterations */
-#define ATMEL_US_RSTNACK BIT(14) /* Reset Non Acknowledge */
-#define ATMEL_US_RETTO BIT(15) /* Rearm Time-out */
-#define ATMEL_US_DTREN BIT(16) /* Data Terminal Ready Enable */
-#define ATMEL_US_DTRDIS BIT(17) /* Data Terminal Ready Disable */
-#define ATMEL_US_RTSEN BIT(18) /* Request To Send Enable */
-#define ATMEL_US_RTSDIS BIT(19) /* Request To Send Disable */
-#define ATMEL_US_TXFCLR BIT(24) /* Transmit FIFO Clear */
-#define ATMEL_US_RXFCLR BIT(25) /* Receive FIFO Clear */
-#define ATMEL_US_TXFLCLR BIT(26) /* Transmit FIFO Lock Clear */
-#define ATMEL_US_FIFOEN BIT(30) /* FIFO enable */
-#define ATMEL_US_FIFODIS BIT(31) /* FIFO disable */
-
-#define ATMEL_US_MR 0x04 /* Mode Register */
-#define ATMEL_US_USMODE GENMASK(3, 0) /* Mode of the USART */
-#define ATMEL_US_USMODE_NORMAL 0
-#define ATMEL_US_USMODE_RS485 1
-#define ATMEL_US_USMODE_HWHS 2
-#define ATMEL_US_USMODE_MODEM 3
-#define ATMEL_US_USMODE_ISO7816_T0 4
-#define ATMEL_US_USMODE_ISO7816_T1 6
-#define ATMEL_US_USMODE_IRDA 8
-#define ATMEL_US_USCLKS GENMASK(5, 4) /* Clock Selection */
-#define ATMEL_US_USCLKS_MCK (0 << 4)
-#define ATMEL_US_USCLKS_MCK_DIV8 (1 << 4)
-#define ATMEL_US_USCLKS_SCK (3 << 4)
-#define ATMEL_US_CHRL GENMASK(7, 6) /* Character Length */
-#define ATMEL_US_CHRL_5 (0 << 6)
-#define ATMEL_US_CHRL_6 (1 << 6)
-#define ATMEL_US_CHRL_7 (2 << 6)
-#define ATMEL_US_CHRL_8 (3 << 6)
-#define ATMEL_US_SYNC BIT(8) /* Synchronous Mode Select */
-#define ATMEL_US_PAR GENMASK(11, 9) /* Parity Type */
-#define ATMEL_US_PAR_EVEN (0 << 9)
-#define ATMEL_US_PAR_ODD (1 << 9)
-#define ATMEL_US_PAR_SPACE (2 << 9)
-#define ATMEL_US_PAR_MARK (3 << 9)
-#define ATMEL_US_PAR_NONE (4 << 9)
-#define ATMEL_US_PAR_MULTI_DROP (6 << 9)
-#define ATMEL_US_NBSTOP GENMASK(13, 12) /* Number of Stop Bits */
-#define ATMEL_US_NBSTOP_1 (0 << 12)
-#define ATMEL_US_NBSTOP_1_5 (1 << 12)
-#define ATMEL_US_NBSTOP_2 (2 << 12)
-#define ATMEL_US_CHMODE GENMASK(15, 14) /* Channel Mode */
-#define ATMEL_US_CHMODE_NORMAL (0 << 14)
-#define ATMEL_US_CHMODE_ECHO (1 << 14)
-#define ATMEL_US_CHMODE_LOC_LOOP (2 << 14)
-#define ATMEL_US_CHMODE_REM_LOOP (3 << 14)
-#define ATMEL_US_MSBF BIT(16) /* Bit Order */
-#define ATMEL_US_MODE9 BIT(17) /* 9-bit Character Length */
-#define ATMEL_US_CLKO BIT(18) /* Clock Output Select */
-#define ATMEL_US_OVER BIT(19) /* Oversampling Mode */
-#define ATMEL_US_INACK BIT(20) /* Inhibit Non Acknowledge */
-#define ATMEL_US_DSNACK BIT(21) /* Disable Successive NACK */
-#define ATMEL_US_MAX_ITER GENMASK(26, 24) /* Max Iterations */
-#define ATMEL_US_FILTER BIT(28) /* Infrared Receive Line Filter */
-
-#define ATMEL_US_IER 0x08 /* Interrupt Enable Register */
-#define ATMEL_US_RXRDY BIT(0) /* Receiver Ready */
-#define ATMEL_US_TXRDY BIT(1) /* Transmitter Ready */
-#define ATMEL_US_RXBRK BIT(2) /* Break Received / End of Break */
-#define ATMEL_US_ENDRX BIT(3) /* End of Receiver Transfer */
-#define ATMEL_US_ENDTX BIT(4) /* End of Transmitter Transfer */
-#define ATMEL_US_OVRE BIT(5) /* Overrun Error */
-#define ATMEL_US_FRAME BIT(6) /* Framing Error */
-#define ATMEL_US_PARE BIT(7) /* Parity Error */
-#define ATMEL_US_TIMEOUT BIT(8) /* Receiver Time-out */
-#define ATMEL_US_TXEMPTY BIT(9) /* Transmitter Empty */
-#define ATMEL_US_ITERATION BIT(10) /* Max number of Repetitions Reached */
-#define ATMEL_US_TXBUFE BIT(11) /* Transmission Buffer Empty */
-#define ATMEL_US_RXBUFF BIT(12) /* Reception Buffer Full */
-#define ATMEL_US_NACK BIT(13) /* Non Acknowledge */
-#define ATMEL_US_RIIC BIT(16) /* Ring Indicator Input Change */
-#define ATMEL_US_DSRIC BIT(17) /* Data Set Ready Input Change */
-#define ATMEL_US_DCDIC BIT(18) /* Data Carrier Detect Input Change */
-#define ATMEL_US_CTSIC BIT(19) /* Clear to Send Input Change */
-#define ATMEL_US_RI BIT(20) /* RI */
-#define ATMEL_US_DSR BIT(21) /* DSR */
-#define ATMEL_US_DCD BIT(22) /* DCD */
-#define ATMEL_US_CTS BIT(23) /* CTS */
-
-#define ATMEL_US_IDR 0x0c /* Interrupt Disable Register */
-#define ATMEL_US_IMR 0x10 /* Interrupt Mask Register */
-#define ATMEL_US_CSR 0x14 /* Channel Status Register */
-#define ATMEL_US_RHR 0x18 /* Receiver Holding Register */
-#define ATMEL_US_THR 0x1c /* Transmitter Holding Register */
-#define ATMEL_US_SYNH BIT(15) /* Transmit/Receive Sync */
-
-#define ATMEL_US_BRGR 0x20 /* Baud Rate Generator Register */
-#define ATMEL_US_CD GENMASK(15, 0) /* Clock Divider */
-#define ATMEL_US_FP_OFFSET 16 /* Fractional Part */
-#define ATMEL_US_FP_MASK 0x7
-
-#define ATMEL_US_RTOR 0x24 /* Receiver Time-out Register for USART */
-#define ATMEL_UA_RTOR 0x28 /* Receiver Time-out Register for UART */
-#define ATMEL_US_TO GENMASK(15, 0) /* Time-out Value */
-
-#define ATMEL_US_TTGR 0x28 /* Transmitter Timeguard Register */
-#define ATMEL_US_TG GENMASK(7, 0) /* Timeguard Value */
-
-#define ATMEL_US_FIDI 0x40 /* FI DI Ratio Register */
-#define ATMEL_US_NER 0x44 /* Number of Errors Register */
-#define ATMEL_US_IF 0x4c /* IrDA Filter Register */
-
-#define ATMEL_US_CMPR 0x90 /* Comparaison Register */
-#define ATMEL_US_FMR 0xa0 /* FIFO Mode Register */
-#define ATMEL_US_TXRDYM(data) (((data) & 0x3) << 0) /* TX Ready Mode */
-#define ATMEL_US_RXRDYM(data) (((data) & 0x3) << 4) /* RX Ready Mode */
-#define ATMEL_US_ONE_DATA 0x0
-#define ATMEL_US_TWO_DATA 0x1
-#define ATMEL_US_FOUR_DATA 0x2
-#define ATMEL_US_FRTSC BIT(7) /* FIFO RTS pin Control */
-#define ATMEL_US_TXFTHRES(thr) (((thr) & 0x3f) << 8) /* TX FIFO Threshold */
-#define ATMEL_US_RXFTHRES(thr) (((thr) & 0x3f) << 16) /* RX FIFO Threshold */
-#define ATMEL_US_RXFTHRES2(thr) (((thr) & 0x3f) << 24) /* RX FIFO Threshold2 */
-
-#define ATMEL_US_FLR 0xa4 /* FIFO Level Register */
-#define ATMEL_US_TXFL(reg) (((reg) >> 0) & 0x3f) /* TX FIFO Level */
-#define ATMEL_US_RXFL(reg) (((reg) >> 16) & 0x3f) /* RX FIFO Level */
-
-#define ATMEL_US_FIER 0xa8 /* FIFO Interrupt Enable Register */
-#define ATMEL_US_FIDR 0xac /* FIFO Interrupt Disable Register */
-#define ATMEL_US_FIMR 0xb0 /* FIFO Interrupt Mask Register */
-#define ATMEL_US_FESR 0xb4 /* FIFO Event Status Register */
-#define ATMEL_US_TXFEF BIT(0) /* Transmit FIFO Empty Flag */
-#define ATMEL_US_TXFFF BIT(1) /* Transmit FIFO Full Flag */
-#define ATMEL_US_TXFTHF BIT(2) /* Transmit FIFO Threshold Flag */
-#define ATMEL_US_RXFEF BIT(3) /* Receive FIFO Empty Flag */
-#define ATMEL_US_RXFFF BIT(4) /* Receive FIFO Full Flag */
-#define ATMEL_US_RXFTHF BIT(5) /* Receive FIFO Threshold Flag */
-#define ATMEL_US_TXFPTEF BIT(6) /* Transmit FIFO Pointer Error Flag */
-#define ATMEL_US_RXFPTEF BIT(7) /* Receive FIFO Pointer Error Flag */
-#define ATMEL_US_TXFLOCK BIT(8) /* Transmit FIFO Lock (FESR only) */
-#define ATMEL_US_RXFTHF2 BIT(9) /* Receive FIFO Threshold Flag 2 */
-
-#define ATMEL_US_NAME 0xf0 /* Ip Name */
-#define ATMEL_US_VERSION 0xfc /* Ip Version */
-
-#endif
diff --git a/include/linux/atomic.h b/include/linux/atomic.h
index e71835bf60a9..c56be7410130 100644
--- a/include/linux/atomic.h
+++ b/include/linux/atomic.h
@@ -423,6 +423,29 @@
#endif
#endif /* atomic_cmpxchg_relaxed */
+#ifndef atomic_try_cmpxchg
+
+#define __atomic_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic_try_cmpxchg(_p, _po, _n) __atomic_try_cmpxchg(, _p, _po, _n)
+#define atomic_try_cmpxchg_relaxed(_p, _po, _n) __atomic_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic_try_cmpxchg_acquire(_p, _po, _n) __atomic_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic_try_cmpxchg_release(_p, _po, _n) __atomic_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic_try_cmpxchg */
+#define atomic_try_cmpxchg_relaxed atomic_try_cmpxchg
+#define atomic_try_cmpxchg_acquire atomic_try_cmpxchg
+#define atomic_try_cmpxchg_release atomic_try_cmpxchg
+#endif /* atomic_try_cmpxchg */
+
/* cmpxchg_relaxed */
#ifndef cmpxchg_relaxed
#define cmpxchg_relaxed cmpxchg
@@ -996,6 +1019,29 @@ static inline int atomic_dec_if_positive(atomic_t *v)
#endif
#endif /* atomic64_cmpxchg_relaxed */
+#ifndef atomic64_try_cmpxchg
+
+#define __atomic64_try_cmpxchg(type, _p, _po, _n) \
+({ \
+ typeof(_po) __po = (_po); \
+ typeof(*(_po)) __r, __o = *__po; \
+ __r = atomic64_cmpxchg##type((_p), __o, (_n)); \
+ if (unlikely(__r != __o)) \
+ *__po = __r; \
+ likely(__r == __o); \
+})
+
+#define atomic64_try_cmpxchg(_p, _po, _n) __atomic64_try_cmpxchg(, _p, _po, _n)
+#define atomic64_try_cmpxchg_relaxed(_p, _po, _n) __atomic64_try_cmpxchg(_relaxed, _p, _po, _n)
+#define atomic64_try_cmpxchg_acquire(_p, _po, _n) __atomic64_try_cmpxchg(_acquire, _p, _po, _n)
+#define atomic64_try_cmpxchg_release(_p, _po, _n) __atomic64_try_cmpxchg(_release, _p, _po, _n)
+
+#else /* atomic64_try_cmpxchg */
+#define atomic64_try_cmpxchg_relaxed atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_acquire atomic64_try_cmpxchg
+#define atomic64_try_cmpxchg_release atomic64_try_cmpxchg
+#endif /* atomic64_try_cmpxchg */
+
#ifndef atomic64_andnot
static inline void atomic64_andnot(long long i, atomic64_t *v)
{
diff --git a/include/linux/audit.h b/include/linux/audit.h
index 504e784b7ffa..2150bdccfbab 100644
--- a/include/linux/audit.h
+++ b/include/linux/audit.h
@@ -163,8 +163,7 @@ extern void audit_log_task_info(struct audit_buffer *ab,
extern int audit_update_lsm_rules(void);
/* Private API (for audit.c only) */
-extern int audit_rule_change(int type, __u32 portid, int seq,
- void *data, size_t datasz);
+extern int audit_rule_change(int type, int seq, void *data, size_t datasz);
extern int audit_list_rules_send(struct sk_buff *request_skb, int seq);
extern u32 audit_enabled;
@@ -332,7 +331,7 @@ static inline void audit_ptrace(struct task_struct *t)
/* Private API (for audit.c only) */
extern unsigned int audit_serial(void);
extern int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec *t, unsigned int *serial);
+ struct timespec64 *t, unsigned int *serial);
extern int audit_set_loginuid(kuid_t loginuid);
static inline kuid_t audit_get_loginuid(struct task_struct *tsk)
@@ -511,7 +510,7 @@ static inline void __audit_seccomp(unsigned long syscall, long signr, int code)
static inline void audit_seccomp(unsigned long syscall, long signr, int code)
{ }
static inline int auditsc_get_stamp(struct audit_context *ctx,
- struct timespec *t, unsigned int *serial)
+ struct timespec64 *t, unsigned int *serial)
{
return 0;
}
diff --git a/include/linux/backing-dev-defs.h b/include/linux/backing-dev-defs.h
index ad955817916d..866c433e7d32 100644
--- a/include/linux/backing-dev-defs.h
+++ b/include/linux/backing-dev-defs.h
@@ -21,6 +21,7 @@ struct dentry;
*/
enum wb_state {
WB_registered, /* bdi_register() was done */
+ WB_shutting_down, /* wb_shutdown() in progress */
WB_writeback_running, /* Writeback is in progress */
WB_has_dirty_io, /* Dirty inodes on ->b_{dirty|io|more_io} */
};
@@ -54,7 +55,9 @@ struct bdi_writeback_congested {
atomic_t refcnt; /* nr of attached wb's and blkg */
#ifdef CONFIG_CGROUP_WRITEBACK
- struct backing_dev_info *bdi; /* the associated bdi */
+ struct backing_dev_info *__bdi; /* the associated bdi, set to NULL
+ * on bdi unregistration. For memcg-wb
+ * internal use only! */
int blkcg_id; /* ID of the associated blkcg */
struct rb_node rb_node; /* on bdi->cgwb_congestion_tree */
#endif
@@ -143,7 +146,7 @@ struct backing_dev_info {
congested_fn *congested_fn; /* Function pointer if device is md/dm */
void *congested_data; /* Pointer to aux data for congested func */
- char *name;
+ const char *name;
struct kref refcnt; /* Reference counter for the structure */
unsigned int capabilities; /* Device capabilities */
@@ -161,7 +164,6 @@ struct backing_dev_info {
#ifdef CONFIG_CGROUP_WRITEBACK
struct radix_tree_root cgwb_tree; /* radix tree of active cgroup wbs */
struct rb_root cgwb_congested_tree; /* their congested states */
- atomic_t usage_cnt; /* counts both cgwbs and cgwb_contested's */
#else
struct bdi_writeback_congested *wb_congested;
#endif
diff --git a/include/linux/backing-dev.h b/include/linux/backing-dev.h
index c52a48cb9a66..557d84063934 100644
--- a/include/linux/backing-dev.h
+++ b/include/linux/backing-dev.h
@@ -17,8 +17,6 @@
#include <linux/backing-dev-defs.h>
#include <linux/slab.h>
-int __must_check bdi_init(struct backing_dev_info *bdi);
-
static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
{
kref_get(&bdi->refcnt);
@@ -27,16 +25,18 @@ static inline struct backing_dev_info *bdi_get(struct backing_dev_info *bdi)
void bdi_put(struct backing_dev_info *bdi);
-__printf(3, 4)
-int bdi_register(struct backing_dev_info *bdi, struct device *parent,
- const char *fmt, ...);
-int bdi_register_dev(struct backing_dev_info *bdi, dev_t dev);
+__printf(2, 3)
+int bdi_register(struct backing_dev_info *bdi, const char *fmt, ...);
+int bdi_register_va(struct backing_dev_info *bdi, const char *fmt,
+ va_list args);
int bdi_register_owner(struct backing_dev_info *bdi, struct device *owner);
void bdi_unregister(struct backing_dev_info *bdi);
-int __must_check bdi_setup_and_register(struct backing_dev_info *, char *);
-void bdi_destroy(struct backing_dev_info *bdi);
struct backing_dev_info *bdi_alloc_node(gfp_t gfp_mask, int node_id);
+static inline struct backing_dev_info *bdi_alloc(gfp_t gfp_mask)
+{
+ return bdi_alloc_node(gfp_mask, NUMA_NO_NODE);
+}
void wb_start_writeback(struct bdi_writeback *wb, long nr_pages,
bool range_cyclic, enum wb_reason reason);
diff --git a/include/linux/bcma/bcma_driver_pci.h b/include/linux/bcma/bcma_driver_pci.h
index 9657f11d48a7..bca6a5e4ca3d 100644
--- a/include/linux/bcma/bcma_driver_pci.h
+++ b/include/linux/bcma/bcma_driver_pci.h
@@ -80,7 +80,7 @@ struct pci_dev;
#define BCMA_CORE_PCI_MDIODATA_DEV_TX 0x1e /* SERDES TX Dev */
#define BCMA_CORE_PCI_MDIODATA_DEV_RX 0x1f /* SERDES RX Dev */
#define BCMA_CORE_PCI_PCIEIND_ADDR 0x0130 /* indirect access to the internal register */
-#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal regsiter */
+#define BCMA_CORE_PCI_PCIEIND_DATA 0x0134 /* Data to/from the internal register */
#define BCMA_CORE_PCI_CLKREQENCTRL 0x0138 /* >= rev 6, Clkreq rdma control */
#define BCMA_CORE_PCI_PCICFG0 0x0400 /* PCI config space 0 (rev >= 8) */
#define BCMA_CORE_PCI_PCICFG1 0x0500 /* PCI config space 1 (rev >= 8) */
diff --git a/include/linux/bio.h b/include/linux/bio.h
index 8e521194f6fc..d1b04b0e99cf 100644
--- a/include/linux/bio.h
+++ b/include/linux/bio.h
@@ -183,7 +183,7 @@ static inline void bio_advance_iter(struct bio *bio, struct bvec_iter *iter,
#define bio_iter_last(bvec, iter) ((iter).bi_size == (bvec).bv_len)
-static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
+static inline unsigned bio_segments(struct bio *bio)
{
unsigned segs = 0;
struct bio_vec bv;
@@ -205,17 +205,12 @@ static inline unsigned __bio_segments(struct bio *bio, struct bvec_iter *bvec)
break;
}
- __bio_for_each_segment(bv, bio, iter, *bvec)
+ bio_for_each_segment(bv, bio, iter)
segs++;
return segs;
}
-static inline unsigned bio_segments(struct bio *bio)
-{
- return __bio_segments(bio, &bio->bi_iter);
-}
-
/*
* get a reference to a bio, so it won't disappear. the intended use is
* something like:
@@ -383,14 +378,12 @@ extern struct bio_set *bioset_create_nobvec(unsigned int, unsigned int);
extern void bioset_free(struct bio_set *);
extern mempool_t *biovec_create_pool(int pool_entries);
-extern struct bio *bio_alloc_bioset(gfp_t, int, struct bio_set *);
+extern struct bio *bio_alloc_bioset(gfp_t, unsigned int, struct bio_set *);
extern void bio_put(struct bio *);
extern void __bio_clone_fast(struct bio *, struct bio *);
extern struct bio *bio_clone_fast(struct bio *, gfp_t, struct bio_set *);
extern struct bio *bio_clone_bioset(struct bio *, gfp_t, struct bio_set *bs);
-extern struct bio *bio_clone_bioset_partial(struct bio *, gfp_t,
- struct bio_set *, int, int);
extern struct bio_set *fs_bio_set;
diff --git a/include/linux/blk-mq.h b/include/linux/blk-mq.h
index 9382c5da7a2e..c47aa248c640 100644
--- a/include/linux/blk-mq.h
+++ b/include/linux/blk-mq.h
@@ -15,7 +15,7 @@ struct blk_mq_hw_ctx {
unsigned long state; /* BLK_MQ_S_* flags */
} ____cacheline_aligned_in_smp;
- struct work_struct run_work;
+ struct delayed_work run_work;
cpumask_var_t cpumask;
int next_cpu;
int next_cpu_batch;
@@ -51,15 +51,17 @@ struct blk_mq_hw_ctx {
atomic_t nr_active;
- struct delayed_work delayed_run_work;
- struct delayed_work delay_work;
-
struct hlist_node cpuhp_dead;
struct kobject kobj;
unsigned long poll_considered;
unsigned long poll_invoked;
unsigned long poll_success;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ struct dentry *debugfs_dir;
+ struct dentry *sched_debugfs_dir;
+#endif
};
struct blk_mq_tag_set {
@@ -82,7 +84,6 @@ struct blk_mq_tag_set {
struct blk_mq_queue_data {
struct request *rq;
- struct list_head *list;
bool last;
};
@@ -90,9 +91,9 @@ typedef int (queue_rq_fn)(struct blk_mq_hw_ctx *, const struct blk_mq_queue_data
typedef enum blk_eh_timer_return (timeout_fn)(struct request *, bool);
typedef int (init_hctx_fn)(struct blk_mq_hw_ctx *, void *, unsigned int);
typedef void (exit_hctx_fn)(struct blk_mq_hw_ctx *, unsigned int);
-typedef int (init_request_fn)(void *, struct request *, unsigned int,
+typedef int (init_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int, unsigned int);
-typedef void (exit_request_fn)(void *, struct request *, unsigned int,
+typedef void (exit_request_fn)(struct blk_mq_tag_set *set, struct request *,
unsigned int);
typedef int (reinit_request_fn)(void *, struct request *);
@@ -143,6 +144,14 @@ struct blk_mq_ops {
reinit_request_fn *reinit_request;
map_queues_fn *map_queues;
+
+#ifdef CONFIG_BLK_DEBUG_FS
+ /*
+ * Used by the debugfs implementation to show driver-specific
+ * information about a request.
+ */
+ void (*show_rq)(struct seq_file *m, struct request *rq);
+#endif
};
enum {
@@ -153,7 +162,6 @@ enum {
BLK_MQ_F_SHOULD_MERGE = 1 << 0,
BLK_MQ_F_TAG_SHARED = 1 << 1,
BLK_MQ_F_SG_MERGE = 1 << 2,
- BLK_MQ_F_DEFER_ISSUE = 1 << 4,
BLK_MQ_F_BLOCKING = 1 << 5,
BLK_MQ_F_NO_SCHED = 1 << 6,
BLK_MQ_F_ALLOC_POLICY_START_BIT = 8,
@@ -163,6 +171,7 @@ enum {
BLK_MQ_S_TAG_ACTIVE = 1,
BLK_MQ_S_SCHED_RESTART = 2,
BLK_MQ_S_TAG_WAITING = 3,
+ BLK_MQ_S_START_ON_RUN = 4,
BLK_MQ_MAX_DEPTH = 10240,
@@ -230,7 +239,7 @@ void blk_mq_add_to_requeue_list(struct request *rq, bool at_head,
void blk_mq_kick_requeue_list(struct request_queue *q);
void blk_mq_delay_kick_requeue_list(struct request_queue *q, unsigned long msecs);
void blk_mq_abort_requeue_list(struct request_queue *q);
-void blk_mq_complete_request(struct request *rq, int error);
+void blk_mq_complete_request(struct request *rq);
bool blk_mq_queue_stopped(struct request_queue *q);
void blk_mq_stop_hw_queue(struct blk_mq_hw_ctx *hctx);
@@ -240,13 +249,14 @@ void blk_mq_start_hw_queues(struct request_queue *q);
void blk_mq_start_stopped_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_start_stopped_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_run_hw_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
+void blk_mq_run_hw_queue(struct blk_mq_hw_ctx *hctx, bool async);
void blk_mq_run_hw_queues(struct request_queue *q, bool async);
void blk_mq_delay_queue(struct blk_mq_hw_ctx *hctx, unsigned long msecs);
void blk_mq_tagset_busy_iter(struct blk_mq_tag_set *tagset,
busy_tag_iter_fn *fn, void *priv);
void blk_mq_freeze_queue(struct request_queue *q);
void blk_mq_unfreeze_queue(struct request_queue *q);
-void blk_mq_freeze_queue_start(struct request_queue *q);
+void blk_freeze_queue_start(struct request_queue *q);
void blk_mq_freeze_queue_wait(struct request_queue *q);
int blk_mq_freeze_queue_wait_timeout(struct request_queue *q,
unsigned long timeout);
diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h
index d703acb55d0f..61339bc44400 100644
--- a/include/linux/blk_types.h
+++ b/include/linux/blk_types.h
@@ -17,6 +17,10 @@ struct io_context;
struct cgroup_subsys_state;
typedef void (bio_end_io_t) (struct bio *);
+struct blk_issue_stat {
+ u64 stat;
+};
+
/*
* main unit of I/O for the block layer and lower layers (ie drivers and
* stacking drivers)
@@ -29,7 +33,7 @@ struct bio {
* top bits REQ_OP. Use
* accessors.
*/
- unsigned short bi_flags; /* status, command, etc */
+ unsigned short bi_flags; /* status, etc and bvec pool number */
unsigned short bi_ioprio;
struct bvec_iter bi_iter;
@@ -58,6 +62,10 @@ struct bio {
*/
struct io_context *bi_ioc;
struct cgroup_subsys_state *bi_css;
+#ifdef CONFIG_BLK_DEV_THROTTLING_LOW
+ void *bi_cg_private;
+ struct blk_issue_stat bi_issue_stat;
+#endif
#endif
union {
#if defined(CONFIG_BLK_DEV_INTEGRITY)
@@ -102,12 +110,9 @@ struct bio {
#define BIO_REFFED 8 /* bio has elevated ->bi_cnt */
#define BIO_THROTTLED 9 /* This bio has already been subjected to
* throttling rules. Don't do it again. */
-
-/*
- * Flags starting here get preserved by bio_reset() - this includes
- * BVEC_POOL_IDX()
- */
-#define BIO_RESET_BITS 10
+#define BIO_TRACE_COMPLETION 10 /* bio_endio() should trace the final completion
+ * of this bio. */
+/* See BVEC_POOL_OFFSET below before adding new flags */
/*
* We support 6 different bvec pools, the last one is magic in that it
@@ -117,13 +122,22 @@ struct bio {
#define BVEC_POOL_MAX (BVEC_POOL_NR - 1)
/*
- * Top 4 bits of bio flags indicate the pool the bvecs came from. We add
+ * Top 3 bits of bio flags indicate the pool the bvecs came from. We add
* 1 to the actual index so that 0 indicates that there are no bvecs to be
* freed.
*/
-#define BVEC_POOL_BITS (4)
+#define BVEC_POOL_BITS (3)
#define BVEC_POOL_OFFSET (16 - BVEC_POOL_BITS)
#define BVEC_POOL_IDX(bio) ((bio)->bi_flags >> BVEC_POOL_OFFSET)
+#if (1<< BVEC_POOL_BITS) < (BVEC_POOL_NR+1)
+# error "BVEC_POOL_BITS is too small"
+#endif
+
+/*
+ * Flags starting here get preserved by bio_reset() - this includes
+ * only BVEC_POOL_IDX()
+ */
+#define BIO_RESET_BITS BVEC_POOL_OFFSET
/*
* Operations and flags common to the bio and request structures.
@@ -160,7 +174,7 @@ enum req_opf {
/* write the same sector many times */
REQ_OP_WRITE_SAME = 7,
/* write the zero filled sector many times */
- REQ_OP_WRITE_ZEROES = 8,
+ REQ_OP_WRITE_ZEROES = 9,
/* SCSI passthrough using struct scsi_request */
REQ_OP_SCSI_IN = 32,
@@ -187,6 +201,10 @@ enum req_flag_bits {
__REQ_PREFLUSH, /* request for cache flush */
__REQ_RAHEAD, /* read ahead, can fail anytime */
__REQ_BACKGROUND, /* background IO */
+
+ /* command specific flags for REQ_OP_WRITE_ZEROES: */
+ __REQ_NOUNMAP, /* do not free blocks when zeroing */
+
__REQ_NR_BITS, /* stops here */
};
@@ -204,6 +222,8 @@ enum req_flag_bits {
#define REQ_RAHEAD (1ULL << __REQ_RAHEAD)
#define REQ_BACKGROUND (1ULL << __REQ_BACKGROUND)
+#define REQ_NOUNMAP (1ULL << __REQ_NOUNMAP)
+
#define REQ_FAILFAST_MASK \
(REQ_FAILFAST_DEV | REQ_FAILFAST_TRANSPORT | REQ_FAILFAST_DRIVER)
@@ -283,12 +303,6 @@ static inline bool blk_qc_t_is_internal(blk_qc_t cookie)
return (cookie & BLK_QC_T_INTERNAL) != 0;
}
-struct blk_issue_stat {
- u64 time;
-};
-
-#define BLK_RQ_STAT_BATCH 64
-
struct blk_rq_stat {
s64 mean;
u64 min;
@@ -296,7 +310,6 @@ struct blk_rq_stat {
s32 nr_samples;
s32 nr_batch;
u64 batch;
- s64 time;
};
#endif /* __LINUX_BLK_TYPES_H */
diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h
index 01a696b0a4d3..b5d1e27631ee 100644
--- a/include/linux/blkdev.h
+++ b/include/linux/blkdev.h
@@ -40,15 +40,20 @@ struct blkcg_gq;
struct blk_flush_queue;
struct pr_ops;
struct rq_wb;
+struct blk_queue_stats;
+struct blk_stat_callback;
#define BLKDEV_MIN_RQ 4
#define BLKDEV_MAX_RQ 128 /* Default maximum */
+/* Must be consisitent with blk_mq_poll_stats_bkt() */
+#define BLK_MQ_POLL_STATS_BKTS 16
+
/*
* Maximum number of blkcg policies allowed to be registered concurrently.
* Defined here to simplify include dependency.
*/
-#define BLKCG_MAX_POLS 2
+#define BLKCG_MAX_POLS 3
typedef void (rq_end_io_fn)(struct request *, int);
@@ -173,6 +178,7 @@ struct request {
struct rb_node rb_node; /* sort/lookup */
struct bio_vec special_vec;
void *completion_data;
+ int error_count; /* for legacy drivers, don't use */
};
/*
@@ -213,16 +219,14 @@ struct request {
unsigned short ioprio;
- void *special; /* opaque pointer available for LLD use */
+ unsigned int timeout;
- int errors;
+ void *special; /* opaque pointer available for LLD use */
unsigned int extra_len; /* length of alignment and padding */
unsigned long deadline;
struct list_head timeout_list;
- unsigned int timeout;
- int retries;
/*
* completion callback.
@@ -337,7 +341,6 @@ struct queue_limits {
unsigned char misaligned;
unsigned char discard_misaligned;
unsigned char cluster;
- unsigned char discard_zeroes_data;
unsigned char raid_partial_stripes_expensive;
enum blk_zoned_model zoned;
};
@@ -388,6 +391,7 @@ struct request_queue {
int nr_rqs[2]; /* # allocated [a]sync rqs */
int nr_rqs_elvpriv; /* # allocated rqs w/ elvpriv */
+ struct blk_queue_stats *stats;
struct rq_wb *rq_wb;
/*
@@ -505,8 +509,6 @@ struct request_queue {
unsigned int nr_sorted;
unsigned int in_flight[2];
- struct blk_rq_stat rq_stats[2];
-
/*
* Number of active block driver functions for which blk_drain_queue()
* must wait. Must be incremented around functions that unlock the
@@ -516,6 +518,10 @@ struct request_queue {
unsigned int rq_timeout;
int poll_nsec;
+
+ struct blk_stat_callback *poll_cb;
+ struct blk_rq_stat poll_stat[BLK_MQ_POLL_STATS_BKTS];
+
struct timer_list timeout;
struct work_struct timeout_work;
struct list_head timeout_list;
@@ -573,7 +579,7 @@ struct request_queue {
#ifdef CONFIG_BLK_DEBUG_FS
struct dentry *debugfs_dir;
- struct dentry *mq_debugfs_dir;
+ struct dentry *sched_debugfs_dir;
#endif
bool mq_sysfs_init_done;
@@ -610,6 +616,8 @@ struct request_queue {
#define QUEUE_FLAG_FLUSH_NQ 25 /* flush not queueuable */
#define QUEUE_FLAG_DAX 26 /* device supports DAX */
#define QUEUE_FLAG_STATS 27 /* track rq completion times */
+#define QUEUE_FLAG_POLL_STATS 28 /* collecting stats for hybrid polling */
+#define QUEUE_FLAG_REGISTERED 29 /* queue has been registered to a disk */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_STACKABLE) | \
@@ -918,6 +926,7 @@ extern int blk_register_queue(struct gendisk *disk);
extern void blk_unregister_queue(struct gendisk *disk);
extern blk_qc_t generic_make_request(struct bio *bio);
extern void blk_rq_init(struct request_queue *q, struct request *rq);
+extern void blk_init_request_from_bio(struct request *req, struct bio *bio);
extern void blk_put_request(struct request *);
extern void __blk_put_request(struct request_queue *, struct request *);
extern struct request *blk_get_request(struct request_queue *, int, gfp_t);
@@ -963,7 +972,7 @@ extern int blk_rq_map_kern(struct request_queue *, struct request *, void *, uns
extern int blk_rq_map_user_iov(struct request_queue *, struct request *,
struct rq_map_data *, const struct iov_iter *,
gfp_t);
-extern int blk_execute_rq(struct request_queue *, struct gendisk *,
+extern void blk_execute_rq(struct request_queue *, struct gendisk *,
struct request *, int);
extern void blk_execute_rq_nowait(struct request_queue *, struct gendisk *,
struct request *, int, rq_end_io_fn *);
@@ -1081,20 +1090,6 @@ static inline unsigned int blk_rq_count_bios(struct request *rq)
}
/*
- * blk_rq_set_prio - associate a request with prio from ioc
- * @rq: request of interest
- * @ioc: target iocontext
- *
- * Assocate request prio with ioc prio so request based drivers
- * can leverage priority information.
- */
-static inline void blk_rq_set_prio(struct request *rq, struct io_context *ioc)
-{
- if (ioc)
- rq->ioprio = ioc->ioprio;
-}
-
-/*
* Request issue related functions.
*/
extern struct request *blk_peek_request(struct request_queue *q);
@@ -1120,13 +1115,10 @@ extern void blk_finish_request(struct request *rq, int error);
extern bool blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void blk_end_request_all(struct request *rq, int error);
-extern bool blk_end_request_cur(struct request *rq, int error);
-extern bool blk_end_request_err(struct request *rq, int error);
extern bool __blk_end_request(struct request *rq, int error,
unsigned int nr_bytes);
extern void __blk_end_request_all(struct request *rq, int error);
extern bool __blk_end_request_cur(struct request *rq, int error);
-extern bool __blk_end_request_err(struct request *rq, int error);
extern void blk_complete_request(struct request *);
extern void __blk_complete_request(struct request *);
@@ -1329,23 +1321,27 @@ static inline struct request *blk_map_queue_find_tag(struct blk_queue_tag *bqt,
return bqt->tag_index[tag];
}
+extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
+extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
+ sector_t nr_sects, gfp_t gfp_mask, struct page *page);
#define BLKDEV_DISCARD_SECURE (1 << 0) /* issue a secure erase */
-#define BLKDEV_DISCARD_ZERO (1 << 1) /* must reliably zero data */
-extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, unsigned long flags);
extern int __blkdev_issue_discard(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, int flags,
struct bio **biop);
-extern int blkdev_issue_write_same(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, struct page *page);
+
+#define BLKDEV_ZERO_NOUNMAP (1 << 0) /* do not free blocks */
+#define BLKDEV_ZERO_NOFALLBACK (1 << 1) /* don't write explicit zeroes */
+
extern int __blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
sector_t nr_sects, gfp_t gfp_mask, struct bio **biop,
- bool discard);
+ unsigned flags);
extern int blkdev_issue_zeroout(struct block_device *bdev, sector_t sector,
- sector_t nr_sects, gfp_t gfp_mask, bool discard);
+ sector_t nr_sects, gfp_t gfp_mask, unsigned flags);
+
static inline int sb_issue_discard(struct super_block *sb, sector_t block,
sector_t nr_blocks, gfp_t gfp_mask, unsigned long flags)
{
@@ -1359,7 +1355,7 @@ static inline int sb_issue_zeroout(struct super_block *sb, sector_t block,
return blkdev_issue_zeroout(sb->s_bdev,
block << (sb->s_blocksize_bits - 9),
nr_blocks << (sb->s_blocksize_bits - 9),
- gfp_mask, true);
+ gfp_mask, 0);
}
extern int blk_verify_command(unsigned char *cmd, fmode_t has_write_perm);
@@ -1529,19 +1525,6 @@ static inline int bdev_discard_alignment(struct block_device *bdev)
return q->limits.discard_alignment;
}
-static inline unsigned int queue_discard_zeroes_data(struct request_queue *q)
-{
- if (q->limits.max_discard_sectors && q->limits.discard_zeroes_data == 1)
- return 1;
-
- return 0;
-}
-
-static inline unsigned int bdev_discard_zeroes_data(struct block_device *bdev)
-{
- return queue_discard_zeroes_data(bdev_get_queue(bdev));
-}
-
static inline unsigned int bdev_write_same(struct block_device *bdev)
{
struct request_queue *q = bdev_get_queue(bdev);
@@ -1726,6 +1709,7 @@ int kblockd_schedule_work(struct work_struct *work);
int kblockd_schedule_work_on(int cpu, struct work_struct *work);
int kblockd_schedule_delayed_work(struct delayed_work *dwork, unsigned long delay);
int kblockd_schedule_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
+int kblockd_mod_delayed_work_on(int cpu, struct delayed_work *dwork, unsigned long delay);
#ifdef CONFIG_BLK_CGROUP
/*
@@ -1939,28 +1923,12 @@ static inline bool integrity_req_gap_front_merge(struct request *req,
#endif /* CONFIG_BLK_DEV_INTEGRITY */
-/**
- * struct blk_dax_ctl - control and output parameters for ->direct_access
- * @sector: (input) offset relative to a block_device
- * @addr: (output) kernel virtual address for @sector populated by driver
- * @pfn: (output) page frame number for @addr populated by driver
- * @size: (input) number of bytes requested
- */
-struct blk_dax_ctl {
- sector_t sector;
- void *addr;
- long size;
- pfn_t pfn;
-};
-
struct block_device_operations {
int (*open) (struct block_device *, fmode_t);
void (*release) (struct gendisk *, fmode_t);
int (*rw_page)(struct block_device *, sector_t, struct page *, bool);
int (*ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
int (*compat_ioctl) (struct block_device *, fmode_t, unsigned, unsigned long);
- long (*direct_access)(struct block_device *, sector_t, void **, pfn_t *,
- long);
unsigned int (*check_events) (struct gendisk *disk,
unsigned int clearing);
/* ->media_changed() is DEPRECATED, use ->check_events() instead */
@@ -1979,9 +1947,8 @@ extern int __blkdev_driver_ioctl(struct block_device *, fmode_t, unsigned int,
extern int bdev_read_page(struct block_device *, sector_t, struct page *);
extern int bdev_write_page(struct block_device *, sector_t, struct page *,
struct writeback_control *);
-extern long bdev_direct_access(struct block_device *, struct blk_dax_ctl *);
extern int bdev_dax_supported(struct super_block *, int);
-extern bool bdev_dax_capable(struct block_device *);
+int bdev_dax_pgoff(struct block_device *, sector_t, size_t, pgoff_t *pgoff);
#else /* CONFIG_BLOCK */
struct block_device;
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index 909fc033173a..6bb38d76faf4 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -35,6 +35,7 @@ struct bpf_map_ops {
void *(*map_fd_get_ptr)(struct bpf_map *map, struct file *map_file,
int fd);
void (*map_fd_put_ptr)(void *ptr);
+ u32 (*map_gen_lookup)(struct bpf_map *map, struct bpf_insn *insn_buf);
};
struct bpf_map {
@@ -49,12 +50,7 @@ struct bpf_map {
const struct bpf_map_ops *ops;
struct work_struct work;
atomic_t usercnt;
-};
-
-struct bpf_map_type_list {
- struct list_head list_node;
- const struct bpf_map_ops *ops;
- enum bpf_map_type type;
+ struct bpf_map *inner_map_meta;
};
/* function argument constraints */
@@ -167,12 +163,8 @@ struct bpf_verifier_ops {
const struct bpf_insn *src,
struct bpf_insn *dst,
struct bpf_prog *prog);
-};
-
-struct bpf_prog_type_list {
- struct list_head list_node;
- const struct bpf_verifier_ops *ops;
- enum bpf_prog_type type;
+ int (*test_run)(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
};
struct bpf_prog_aux {
@@ -231,11 +223,21 @@ typedef unsigned long (*bpf_ctx_copy_t)(void *dst, const void *src,
u64 bpf_event_output(struct bpf_map *map, u64 flags, void *meta, u64 meta_size,
void *ctx, u64 ctx_size, bpf_ctx_copy_t ctx_copy);
+int bpf_prog_test_run_xdp(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+int bpf_prog_test_run_skb(struct bpf_prog *prog, const union bpf_attr *kattr,
+ union bpf_attr __user *uattr);
+
#ifdef CONFIG_BPF_SYSCALL
DECLARE_PER_CPU(int, bpf_prog_active);
-void bpf_register_prog_type(struct bpf_prog_type_list *tl);
-void bpf_register_map_type(struct bpf_map_type_list *tl);
+#define BPF_PROG_TYPE(_id, _ops) \
+ extern const struct bpf_verifier_ops _ops;
+#define BPF_MAP_TYPE(_id, _ops) \
+ extern const struct bpf_map_ops _ops;
+#include <linux/bpf_types.h>
+#undef BPF_PROG_TYPE
+#undef BPF_MAP_TYPE
struct bpf_prog *bpf_prog_get(u32 ufd);
struct bpf_prog *bpf_prog_get_type(u32 ufd, enum bpf_prog_type type);
@@ -275,6 +277,8 @@ int bpf_stackmap_copy(struct bpf_map *map, void *key, void *value);
int bpf_fd_array_map_update_elem(struct bpf_map *map, struct file *map_file,
void *key, void *value, u64 map_flags);
void bpf_fd_array_map_clear(struct bpf_map *map);
+int bpf_fd_htab_map_update_elem(struct bpf_map *map, struct file *map_file,
+ void *key, void *value, u64 map_flags);
/* memcpy that is used with 8-byte aligned pointers, power-of-8 size and
* forced to use 'long' read/writes to try to atomically copy long counters.
@@ -295,10 +299,6 @@ static inline void bpf_long_memcpy(void *dst, const void *src, u32 size)
/* verify correctness of eBPF program */
int bpf_check(struct bpf_prog **fp, union bpf_attr *attr);
#else
-static inline void bpf_register_prog_type(struct bpf_prog_type_list *tl)
-{
-}
-
static inline struct bpf_prog *bpf_prog_get(u32 ufd)
{
return ERR_PTR(-EOPNOTSUPP);
diff --git a/include/linux/bpf_types.h b/include/linux/bpf_types.h
new file mode 100644
index 000000000000..03bf223f18be
--- /dev/null
+++ b/include/linux/bpf_types.h
@@ -0,0 +1,36 @@
+/* internal file - do not include directly */
+
+#ifdef CONFIG_NET
+BPF_PROG_TYPE(BPF_PROG_TYPE_SOCKET_FILTER, sk_filter_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_CLS, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_SCHED_ACT, tc_cls_act_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_XDP, xdp_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SKB, cg_skb_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_CGROUP_SOCK, cg_sock_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_IN, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_OUT, lwt_inout_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_LWT_XMIT, lwt_xmit_prog_ops)
+#endif
+#ifdef CONFIG_BPF_EVENTS
+BPF_PROG_TYPE(BPF_PROG_TYPE_KPROBE, kprobe_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_TRACEPOINT, tracepoint_prog_ops)
+BPF_PROG_TYPE(BPF_PROG_TYPE_PERF_EVENT, perf_event_prog_ops)
+#endif
+
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY, array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_ARRAY, percpu_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PROG_ARRAY, prog_array_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERF_EVENT_ARRAY, perf_event_array_map_ops)
+#ifdef CONFIG_CGROUPS
+BPF_MAP_TYPE(BPF_MAP_TYPE_CGROUP_ARRAY, cgroup_array_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH, htab_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_PERCPU_HASH, htab_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_HASH, htab_lru_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LRU_PERCPU_HASH, htab_lru_percpu_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_LPM_TRIE, trie_map_ops)
+#ifdef CONFIG_PERF_EVENTS
+BPF_MAP_TYPE(BPF_MAP_TYPE_STACK_TRACE, stack_map_ops)
+#endif
+BPF_MAP_TYPE(BPF_MAP_TYPE_ARRAY_OF_MAPS, array_of_maps_map_ops)
+BPF_MAP_TYPE(BPF_MAP_TYPE_HASH_OF_MAPS, htab_of_maps_map_ops)
diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index a13b031dc6b8..5efb4db44e1e 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -66,7 +66,10 @@ struct bpf_verifier_state_list {
};
struct bpf_insn_aux_data {
- enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+ union {
+ enum bpf_reg_type ptr_type; /* pointer type for load/store insns */
+ struct bpf_map *map_ptr; /* pointer for call insn into lookup_elem */
+ };
};
#define MAX_USED_MAPS 64 /* max number of maps accessed by one eBPF program */
diff --git a/include/linux/brcmphy.h b/include/linux/brcmphy.h
index 55e517130311..abcda9b458ab 100644
--- a/include/linux/brcmphy.h
+++ b/include/linux/brcmphy.h
@@ -25,6 +25,9 @@
#define PHY_ID_BCM57780 0x03625d90
#define PHY_ID_BCM7250 0xae025280
+#define PHY_ID_BCM7260 0xae025190
+#define PHY_ID_BCM7268 0xae025090
+#define PHY_ID_BCM7271 0xae0253b0
#define PHY_ID_BCM7278 0xae0251a0
#define PHY_ID_BCM7364 0xae025260
#define PHY_ID_BCM7366 0x600d8490
diff --git a/include/linux/buffer_head.h b/include/linux/buffer_head.h
index 79591c3660cc..bd029e52ef5e 100644
--- a/include/linux/buffer_head.h
+++ b/include/linux/buffer_head.h
@@ -196,8 +196,6 @@ void ll_rw_block(int, int, int, struct buffer_head * bh[]);
int sync_dirty_buffer(struct buffer_head *bh);
int __sync_dirty_buffer(struct buffer_head *bh, int op_flags);
void write_dirty_buffer(struct buffer_head *bh, int op_flags);
-int _submit_bh(int op, int op_flags, struct buffer_head *bh,
- unsigned long bio_flags);
int submit_bh(int, int, struct buffer_head *);
void write_boundary_block(struct block_device *bdev,
sector_t bblock, unsigned blocksize);
diff --git a/include/linux/bug.h b/include/linux/bug.h
index 5828489309bb..687b557fc5eb 100644
--- a/include/linux/bug.h
+++ b/include/linux/bug.h
@@ -105,7 +105,7 @@ static inline int is_warning_bug(const struct bug_entry *bug)
return bug->flags & BUGFLAG_WARNING;
}
-const struct bug_entry *find_bug(unsigned long bugaddr);
+struct bug_entry *find_bug(unsigned long bugaddr);
enum bug_trap_type report_bug(unsigned long bug_addr, struct pt_regs *regs);
diff --git a/include/linux/can/core.h b/include/linux/can/core.h
index df08a41d5be5..c9a17bb1221c 100644
--- a/include/linux/can/core.h
+++ b/include/linux/can/core.h
@@ -5,7 +5,7 @@
*
* Authors: Oliver Hartkopp <oliver.hartkopp@volkswagen.de>
* Urs Thuermann <urs.thuermann@volkswagen.de>
- * Copyright (c) 2002-2007 Volkswagen Group Electronic Research
+ * Copyright (c) 2002-2017 Volkswagen Group Electronic Research
* All rights reserved.
*
*/
@@ -17,7 +17,7 @@
#include <linux/skbuff.h>
#include <linux/netdevice.h>
-#define CAN_VERSION "20120528"
+#define CAN_VERSION "20170425"
/* increment this number each time you change some user-space interface */
#define CAN_ABI_VERSION "9"
@@ -45,12 +45,13 @@ struct can_proto {
extern int can_proto_register(const struct can_proto *cp);
extern void can_proto_unregister(const struct can_proto *cp);
-int can_rx_register(struct net_device *dev, canid_t can_id, canid_t mask,
+int can_rx_register(struct net *net, struct net_device *dev,
+ canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data, char *ident, struct sock *sk);
-extern void can_rx_unregister(struct net_device *dev, canid_t can_id,
- canid_t mask,
+extern void can_rx_unregister(struct net *net, struct net_device *dev,
+ canid_t can_id, canid_t mask,
void (*func)(struct sk_buff *, void *),
void *data);
diff --git a/include/linux/can/dev/peak_canfd.h b/include/linux/can/dev/peak_canfd.h
new file mode 100644
index 000000000000..46dceef2cfa6
--- /dev/null
+++ b/include/linux/can/dev/peak_canfd.h
@@ -0,0 +1,308 @@
+/*
+ * CAN driver for PEAK System micro-CAN based adapters
+ *
+ * Copyright (C) 2003-2011 PEAK System-Technik GmbH
+ * Copyright (C) 2011-2013 Stephane Grosjean <s.grosjean@peak-system.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published
+ * by the Free Software Foundation; version 2 of the License.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ */
+#ifndef PUCAN_H
+#define PUCAN_H
+
+/* uCAN commands opcodes list (low-order 10 bits) */
+#define PUCAN_CMD_NOP 0x000
+#define PUCAN_CMD_RESET_MODE 0x001
+#define PUCAN_CMD_NORMAL_MODE 0x002
+#define PUCAN_CMD_LISTEN_ONLY_MODE 0x003
+#define PUCAN_CMD_TIMING_SLOW 0x004
+#define PUCAN_CMD_TIMING_FAST 0x005
+#define PUCAN_CMD_SET_STD_FILTER 0x006
+#define PUCAN_CMD_RESERVED2 0x007
+#define PUCAN_CMD_FILTER_STD 0x008
+#define PUCAN_CMD_TX_ABORT 0x009
+#define PUCAN_CMD_WR_ERR_CNT 0x00a
+#define PUCAN_CMD_SET_EN_OPTION 0x00b
+#define PUCAN_CMD_CLR_DIS_OPTION 0x00c
+#define PUCAN_CMD_RX_BARRIER 0x010
+#define PUCAN_CMD_END_OF_COLLECTION 0x3ff
+
+/* uCAN received messages list */
+#define PUCAN_MSG_CAN_RX 0x0001
+#define PUCAN_MSG_ERROR 0x0002
+#define PUCAN_MSG_STATUS 0x0003
+#define PUCAN_MSG_BUSLOAD 0x0004
+
+#define PUCAN_MSG_CACHE_CRITICAL 0x0102
+
+/* uCAN transmitted messages */
+#define PUCAN_MSG_CAN_TX 0x1000
+
+/* uCAN command common header */
+struct __packed pucan_command {
+ __le16 opcode_channel;
+ u16 args[3];
+};
+
+/* return the opcode from the opcode_channel field of a command */
+static inline u16 pucan_cmd_get_opcode(struct pucan_command *c)
+{
+ return le16_to_cpu(c->opcode_channel) & 0x3ff;
+}
+
+#define PUCAN_TSLOW_BRP_BITS 10
+#define PUCAN_TSLOW_TSGEG1_BITS 8
+#define PUCAN_TSLOW_TSGEG2_BITS 7
+#define PUCAN_TSLOW_SJW_BITS 7
+
+#define PUCAN_TSLOW_BRP_MASK ((1 << PUCAN_TSLOW_BRP_BITS) - 1)
+#define PUCAN_TSLOW_TSEG1_MASK ((1 << PUCAN_TSLOW_TSGEG1_BITS) - 1)
+#define PUCAN_TSLOW_TSEG2_MASK ((1 << PUCAN_TSLOW_TSGEG2_BITS) - 1)
+#define PUCAN_TSLOW_SJW_MASK ((1 << PUCAN_TSLOW_SJW_BITS) - 1)
+
+/* uCAN TIMING_SLOW command fields */
+#define PUCAN_TSLOW_SJW_T(s, t) (((s) & PUCAN_TSLOW_SJW_MASK) | \
+ ((!!(t)) << 7))
+#define PUCAN_TSLOW_TSEG2(t) ((t) & PUCAN_TSLOW_TSEG2_MASK)
+#define PUCAN_TSLOW_TSEG1(t) ((t) & PUCAN_TSLOW_TSEG1_MASK)
+#define PUCAN_TSLOW_BRP(b) ((b) & PUCAN_TSLOW_BRP_MASK)
+
+struct __packed pucan_timing_slow {
+ __le16 opcode_channel;
+
+ u8 ewl; /* Error Warning limit */
+ u8 sjw_t; /* Sync Jump Width + Triple sampling */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+#define PUCAN_TFAST_BRP_BITS 10
+#define PUCAN_TFAST_TSGEG1_BITS 5
+#define PUCAN_TFAST_TSGEG2_BITS 4
+#define PUCAN_TFAST_SJW_BITS 4
+
+#define PUCAN_TFAST_BRP_MASK ((1 << PUCAN_TFAST_BRP_BITS) - 1)
+#define PUCAN_TFAST_TSEG1_MASK ((1 << PUCAN_TFAST_TSGEG1_BITS) - 1)
+#define PUCAN_TFAST_TSEG2_MASK ((1 << PUCAN_TFAST_TSGEG2_BITS) - 1)
+#define PUCAN_TFAST_SJW_MASK ((1 << PUCAN_TFAST_SJW_BITS) - 1)
+
+/* uCAN TIMING_FAST command fields */
+#define PUCAN_TFAST_SJW(s) ((s) & PUCAN_TFAST_SJW_MASK)
+#define PUCAN_TFAST_TSEG2(t) ((t) & PUCAN_TFAST_TSEG2_MASK)
+#define PUCAN_TFAST_TSEG1(t) ((t) & PUCAN_TFAST_TSEG1_MASK)
+#define PUCAN_TFAST_BRP(b) ((b) & PUCAN_TFAST_BRP_MASK)
+
+struct __packed pucan_timing_fast {
+ __le16 opcode_channel;
+
+ u8 unused;
+ u8 sjw; /* Sync Jump Width */
+ u8 tseg2; /* Timing SEGment 2 */
+ u8 tseg1; /* Timing SEGment 1 */
+
+ __le16 brp; /* BaudRate Prescaler */
+};
+
+/* uCAN FILTER_STD command fields */
+#define PUCAN_FLTSTD_ROW_IDX_BITS 6
+
+struct __packed pucan_filter_std {
+ __le16 opcode_channel;
+
+ __le16 idx;
+ __le32 mask; /* CAN-ID bitmask in idx range */
+};
+
+#define PUCAN_FLTSTD_ROW_IDX_MAX ((1 << PUCAN_FLTSTD_ROW_IDX_BITS) - 1)
+
+/* uCAN SET_STD_FILTER command fields */
+struct __packed pucan_std_filter {
+ __le16 opcode_channel;
+
+ u8 unused;
+ u8 idx;
+ __le32 mask; /* CAN-ID bitmask in idx range */
+};
+
+/* uCAN TX_ABORT commands fields */
+#define PUCAN_TX_ABORT_FLUSH 0x0001
+
+struct __packed pucan_tx_abort {
+ __le16 opcode_channel;
+
+ __le16 flags;
+ u32 unused;
+};
+
+/* uCAN WR_ERR_CNT command fields */
+#define PUCAN_WRERRCNT_TE 0x4000 /* Tx error cntr write Enable */
+#define PUCAN_WRERRCNT_RE 0x8000 /* Rx error cntr write Enable */
+
+struct __packed pucan_wr_err_cnt {
+ __le16 opcode_channel;
+
+ __le16 sel_mask;
+ u8 tx_counter; /* Tx error counter new value */
+ u8 rx_counter; /* Rx error counter new value */
+
+ u16 unused;
+};
+
+/* uCAN SET_EN/CLR_DIS _OPTION command fields */
+#define PUCAN_OPTION_ERROR 0x0001
+#define PUCAN_OPTION_BUSLOAD 0x0002
+#define PUCAN_OPTION_CANDFDISO 0x0004
+
+struct __packed pucan_options {
+ __le16 opcode_channel;
+
+ __le16 options;
+ u32 unused;
+};
+
+/* uCAN received messages global format */
+struct __packed pucan_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+};
+
+/* uCAN flags for CAN/CANFD messages */
+#define PUCAN_MSG_SELF_RECEIVE 0x80
+#define PUCAN_MSG_ERROR_STATE_IND 0x40 /* error state indicator */
+#define PUCAN_MSG_BITRATE_SWITCH 0x20 /* bitrate switch */
+#define PUCAN_MSG_EXT_DATA_LEN 0x10 /* extended data length */
+#define PUCAN_MSG_SINGLE_SHOT 0x08
+#define PUCAN_MSG_LOOPED_BACK 0x04
+#define PUCAN_MSG_EXT_ID 0x02
+#define PUCAN_MSG_RTR 0x01
+
+struct __packed pucan_rx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* uCAN error types */
+#define PUCAN_ERMSG_BIT_ERROR 0
+#define PUCAN_ERMSG_FORM_ERROR 1
+#define PUCAN_ERMSG_STUFF_ERROR 2
+#define PUCAN_ERMSG_OTHER_ERROR 3
+#define PUCAN_ERMSG_ERR_CNT_DEC 4
+
+struct __packed pucan_error_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_type_d;
+ u8 code_g;
+ u8 tx_err_cnt;
+ u8 rx_err_cnt;
+};
+
+static inline int pucan_error_get_channel(const struct pucan_error_msg *msg)
+{
+ return msg->channel_type_d & 0x0f;
+}
+
+#define PUCAN_RX_BARRIER 0x10
+#define PUCAN_BUS_PASSIVE 0x20
+#define PUCAN_BUS_WARNING 0x40
+#define PUCAN_BUS_BUSOFF 0x80
+
+struct __packed pucan_status_msg {
+ __le16 size;
+ __le16 type;
+ __le32 ts_low;
+ __le32 ts_high;
+ u8 channel_p_w_b;
+ u8 unused[3];
+};
+
+static inline int pucan_status_get_channel(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & 0x0f;
+}
+
+static inline int pucan_status_is_rx_barrier(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_RX_BARRIER;
+}
+
+static inline int pucan_status_is_passive(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_PASSIVE;
+}
+
+static inline int pucan_status_is_warning(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_WARNING;
+}
+
+static inline int pucan_status_is_busoff(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & PUCAN_BUS_BUSOFF;
+}
+
+/* uCAN transmitted message format */
+#define PUCAN_MSG_CHANNEL_DLC(c, d) (((c) & 0xf) | ((d) << 4))
+
+struct __packed pucan_tx_msg {
+ __le16 size;
+ __le16 type;
+ __le32 tag_low;
+ __le32 tag_high;
+ u8 channel_dlc;
+ u8 client;
+ __le16 flags;
+ __le32 can_id;
+ u8 d[0];
+};
+
+/* build the cmd opcode_channel field with respect to the correct endianness */
+static inline __le16 pucan_cmd_opcode_channel(int index, int opcode)
+{
+ return cpu_to_le16(((index) << 12) | ((opcode) & 0x3ff));
+}
+
+/* return the channel number part from any received message channel_dlc field */
+static inline int pucan_msg_get_channel(const struct pucan_rx_msg *msg)
+{
+ return msg->channel_dlc & 0xf;
+}
+
+/* return the dlc value from any received message channel_dlc field */
+static inline int pucan_msg_get_dlc(const struct pucan_rx_msg *msg)
+{
+ return msg->channel_dlc >> 4;
+}
+
+static inline int pucan_ermsg_get_channel(const struct pucan_error_msg *msg)
+{
+ return msg->channel_type_d & 0x0f;
+}
+
+static inline int pucan_stmsg_get_channel(const struct pucan_status_msg *msg)
+{
+ return msg->channel_p_w_b & 0x0f;
+}
+
+#endif
diff --git a/include/linux/can/platform/ti_hecc.h b/include/linux/can/platform/ti_hecc.h
deleted file mode 100644
index a52f47ca6c8a..000000000000
--- a/include/linux/can/platform/ti_hecc.h
+++ /dev/null
@@ -1,44 +0,0 @@
-#ifndef _CAN_PLATFORM_TI_HECC_H
-#define _CAN_PLATFORM_TI_HECC_H
-
-/*
- * TI HECC (High End CAN Controller) driver platform header
- *
- * Copyright (C) 2009 Texas Instruments Incorporated - http://www.ti.com/
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License as
- * published by the Free Software Foundation version 2.
- *
- * This program is distributed as is WITHOUT ANY WARRANTY of any
- * kind, whether express or implied; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- */
-
-/**
- * struct hecc_platform_data - HECC Platform Data
- *
- * @scc_hecc_offset: mostly 0 - should really never change
- * @scc_ram_offset: SCC RAM offset
- * @hecc_ram_offset: HECC RAM offset
- * @mbx_offset: Mailbox RAM offset
- * @int_line: Interrupt line to use - 0 or 1
- * @version: version for future use
- * @transceiver_switch: platform specific callback fn for transceiver control
- *
- * Platform data structure to get all platform specific settings.
- * this structure also accounts the fact that the IP may have different
- * RAM and mailbox offsets for different SOC's
- */
-struct ti_hecc_platform_data {
- u32 scc_hecc_offset;
- u32 scc_ram_offset;
- u32 hecc_ram_offset;
- u32 mbx_offset;
- u32 int_line;
- u32 version;
- void (*transceiver_switch) (int);
-};
-#endif /* !_CAN_PLATFORM_TI_HECC_H */
diff --git a/include/linux/ccp.h b/include/linux/ccp.h
index c41b8d99dd0e..3285c944194a 100644
--- a/include/linux/ccp.h
+++ b/include/linux/ccp.h
@@ -123,6 +123,10 @@ enum ccp_aes_mode {
CCP_AES_MODE_CFB,
CCP_AES_MODE_CTR,
CCP_AES_MODE_CMAC,
+ CCP_AES_MODE_GHASH,
+ CCP_AES_MODE_GCTR,
+ CCP_AES_MODE_GCM,
+ CCP_AES_MODE_GMAC,
CCP_AES_MODE__LAST,
};
@@ -137,6 +141,9 @@ enum ccp_aes_action {
CCP_AES_ACTION_ENCRYPT,
CCP_AES_ACTION__LAST,
};
+/* Overloaded field */
+#define CCP_AES_GHASHAAD CCP_AES_ACTION_DECRYPT
+#define CCP_AES_GHASHFINAL CCP_AES_ACTION_ENCRYPT
/**
* struct ccp_aes_engine - CCP AES operation
@@ -181,6 +188,8 @@ struct ccp_aes_engine {
struct scatterlist *cmac_key; /* K1/K2 cmac key required for
* final cmac cmd */
u32 cmac_key_len; /* In bytes */
+
+ u32 aad_len; /* In bytes */
};
/***** XTS-AES engine *****/
@@ -249,6 +258,8 @@ enum ccp_sha_type {
CCP_SHA_TYPE_1 = 1,
CCP_SHA_TYPE_224,
CCP_SHA_TYPE_256,
+ CCP_SHA_TYPE_384,
+ CCP_SHA_TYPE_512,
CCP_SHA_TYPE__LAST,
};
@@ -290,6 +301,60 @@ struct ccp_sha_engine {
* final sha cmd */
};
+/***** 3DES engine *****/
+enum ccp_des3_mode {
+ CCP_DES3_MODE_ECB = 0,
+ CCP_DES3_MODE_CBC,
+ CCP_DES3_MODE_CFB,
+ CCP_DES3_MODE__LAST,
+};
+
+enum ccp_des3_type {
+ CCP_DES3_TYPE_168 = 1,
+ CCP_DES3_TYPE__LAST,
+ };
+
+enum ccp_des3_action {
+ CCP_DES3_ACTION_DECRYPT = 0,
+ CCP_DES3_ACTION_ENCRYPT,
+ CCP_DES3_ACTION__LAST,
+};
+
+/**
+ * struct ccp_des3_engine - CCP SHA operation
+ * @type: Type of 3DES operation
+ * @mode: cipher mode
+ * @action: 3DES operation (decrypt/encrypt)
+ * @key: key to be used for this 3DES operation
+ * @key_len: length of key (in bytes)
+ * @iv: IV to be used for this AES operation
+ * @iv_len: length in bytes of iv
+ * @src: input data to be used for this operation
+ * @src_len: length of input data used for this operation (in bytes)
+ * @dst: output data produced by this operation
+ *
+ * Variables required to be set when calling ccp_enqueue_cmd():
+ * - type, mode, action, key, key_len, src, dst, src_len
+ * - iv, iv_len for any mode other than ECB
+ *
+ * The iv variable is used as both input and output. On completion of the
+ * 3DES operation the new IV overwrites the old IV.
+ */
+struct ccp_des3_engine {
+ enum ccp_des3_type type;
+ enum ccp_des3_mode mode;
+ enum ccp_des3_action action;
+
+ struct scatterlist *key;
+ u32 key_len; /* In bytes */
+
+ struct scatterlist *iv;
+ u32 iv_len; /* In bytes */
+
+ struct scatterlist *src, *dst;
+ u64 src_len; /* In bytes */
+};
+
/***** RSA engine *****/
/**
* struct ccp_rsa_engine - CCP RSA operation
@@ -539,7 +604,7 @@ struct ccp_ecc_engine {
enum ccp_engine {
CCP_ENGINE_AES = 0,
CCP_ENGINE_XTS_AES_128,
- CCP_ENGINE_RSVD1,
+ CCP_ENGINE_DES3,
CCP_ENGINE_SHA,
CCP_ENGINE_RSA,
CCP_ENGINE_PASSTHRU,
@@ -587,6 +652,7 @@ struct ccp_cmd {
union {
struct ccp_aes_engine aes;
struct ccp_xts_aes_engine xts;
+ struct ccp_des3_engine des3;
struct ccp_sha_engine sha;
struct ccp_rsa_engine rsa;
struct ccp_passthru_engine passthru;
diff --git a/include/linux/cdev.h b/include/linux/cdev.h
index f8763615a5f2..408bc09ce497 100644
--- a/include/linux/cdev.h
+++ b/include/linux/cdev.h
@@ -4,6 +4,7 @@
#include <linux/kobject.h>
#include <linux/kdev_t.h>
#include <linux/list.h>
+#include <linux/device.h>
struct file_operations;
struct inode;
@@ -26,6 +27,10 @@ void cdev_put(struct cdev *p);
int cdev_add(struct cdev *, dev_t, unsigned);
+void cdev_set_parent(struct cdev *p, struct kobject *kobj);
+int cdev_device_add(struct cdev *cdev, struct device *dev);
+void cdev_device_del(struct cdev *cdev, struct device *dev);
+
void cdev_del(struct cdev *);
void cd_forget(struct inode *);
diff --git a/include/linux/ceph/ceph_features.h b/include/linux/ceph/ceph_features.h
index ae2f66833762..fd8b2953c78f 100644
--- a/include/linux/ceph/ceph_features.h
+++ b/include/linux/ceph/ceph_features.h
@@ -105,8 +105,10 @@ static inline u64 ceph_sanitize_features(u64 features)
*/
#define CEPH_FEATURES_SUPPORTED_DEFAULT \
(CEPH_FEATURE_NOSRCADDR | \
+ CEPH_FEATURE_FLOCK | \
CEPH_FEATURE_SUBSCRIBE2 | \
CEPH_FEATURE_RECONNECT_SEQ | \
+ CEPH_FEATURE_DIRLAYOUTHASH | \
CEPH_FEATURE_PGID64 | \
CEPH_FEATURE_PGPOOL3 | \
CEPH_FEATURE_OSDENC | \
@@ -114,11 +116,13 @@ static inline u64 ceph_sanitize_features(u64 features)
CEPH_FEATURE_MSG_AUTH | \
CEPH_FEATURE_CRUSH_TUNABLES2 | \
CEPH_FEATURE_REPLY_CREATE_INODE | \
+ CEPH_FEATURE_MDSENC | \
CEPH_FEATURE_OSDHASHPSPOOL | \
CEPH_FEATURE_OSD_CACHEPOOL | \
CEPH_FEATURE_CRUSH_V2 | \
CEPH_FEATURE_EXPORT_PEER | \
CEPH_FEATURE_OSDMAP_ENC | \
+ CEPH_FEATURE_MDS_INLINE_DATA | \
CEPH_FEATURE_CRUSH_TUNABLES3 | \
CEPH_FEATURE_OSD_PRIMARY_AFFINITY | \
CEPH_FEATURE_MSGR_KEEPALIVE2 | \
diff --git a/include/linux/ceph/ceph_fs.h b/include/linux/ceph/ceph_fs.h
index f4b2ee18f38c..ad078ebe25d6 100644
--- a/include/linux/ceph/ceph_fs.h
+++ b/include/linux/ceph/ceph_fs.h
@@ -365,6 +365,19 @@ extern const char *ceph_mds_op_name(int op);
#define CEPH_READDIR_FRAG_END (1<<0)
#define CEPH_READDIR_FRAG_COMPLETE (1<<8)
#define CEPH_READDIR_HASH_ORDER (1<<9)
+#define CEPH_READDIR_OFFSET_HASH (1<<10)
+
+/*
+ * open request flags
+ */
+#define CEPH_O_RDONLY 00000000
+#define CEPH_O_WRONLY 00000001
+#define CEPH_O_RDWR 00000002
+#define CEPH_O_CREAT 00000100
+#define CEPH_O_EXCL 00000200
+#define CEPH_O_TRUNC 00001000
+#define CEPH_O_DIRECTORY 00200000
+#define CEPH_O_NOFOLLOW 00400000
union ceph_mds_request_args {
struct {
@@ -384,6 +397,7 @@ union ceph_mds_request_args {
__le32 max_entries; /* how many dentries to grab */
__le32 max_bytes;
__le16 flags;
+ __le32 offset_hash;
} __attribute__ ((packed)) readdir;
struct {
__le32 mode;
diff --git a/include/linux/ceph/cls_lock_client.h b/include/linux/ceph/cls_lock_client.h
index 84884d8d4710..0594d3bba774 100644
--- a/include/linux/ceph/cls_lock_client.h
+++ b/include/linux/ceph/cls_lock_client.h
@@ -37,6 +37,11 @@ int ceph_cls_break_lock(struct ceph_osd_client *osdc,
struct ceph_object_locator *oloc,
char *lock_name, char *cookie,
struct ceph_entity_name *locker);
+int ceph_cls_set_cookie(struct ceph_osd_client *osdc,
+ struct ceph_object_id *oid,
+ struct ceph_object_locator *oloc,
+ char *lock_name, u8 type, char *old_cookie,
+ char *tag, char *new_cookie);
void ceph_free_lockers(struct ceph_locker *lockers, u32 num_lockers);
diff --git a/include/linux/ceph/libceph.h b/include/linux/ceph/libceph.h
index 88cd5dc8e238..3229ae6c7846 100644
--- a/include/linux/ceph/libceph.h
+++ b/include/linux/ceph/libceph.h
@@ -14,6 +14,7 @@
#include <linux/wait.h>
#include <linux/writeback.h>
#include <linux/slab.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/messenger.h>
@@ -161,7 +162,7 @@ struct ceph_client {
* dirtied.
*/
struct ceph_snap_context {
- atomic_t nref;
+ refcount_t nref;
u64 seq;
u32 num_snaps;
u64 snaps[];
@@ -262,10 +263,7 @@ int ceph_print_client_options(struct seq_file *m, struct ceph_client *client);
extern void ceph_destroy_options(struct ceph_options *opt);
extern int ceph_compare_options(struct ceph_options *new_opt,
struct ceph_client *client);
-extern struct ceph_client *ceph_create_client(struct ceph_options *opt,
- void *private,
- u64 supported_features,
- u64 required_features);
+struct ceph_client *ceph_create_client(struct ceph_options *opt, void *private);
struct ceph_entity_addr *ceph_client_addr(struct ceph_client *client);
u64 ceph_client_gid(struct ceph_client *client);
extern void ceph_destroy_client(struct ceph_client *client);
diff --git a/include/linux/ceph/mdsmap.h b/include/linux/ceph/mdsmap.h
index 8ed5dc505fbb..d5f783f3226a 100644
--- a/include/linux/ceph/mdsmap.h
+++ b/include/linux/ceph/mdsmap.h
@@ -25,6 +25,7 @@ struct ceph_mdsmap {
u32 m_session_autoclose; /* seconds */
u64 m_max_file_size;
u32 m_max_mds; /* size of m_addr, m_state arrays */
+ int m_num_mds;
struct ceph_mds_info *m_info;
/* which object pools file data can be stored in */
@@ -40,7 +41,7 @@ struct ceph_mdsmap {
static inline struct ceph_entity_addr *
ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
{
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return NULL;
return &m->m_info[w].addr;
}
@@ -48,14 +49,14 @@ ceph_mdsmap_get_addr(struct ceph_mdsmap *m, int w)
static inline int ceph_mdsmap_get_state(struct ceph_mdsmap *m, int w)
{
BUG_ON(w < 0);
- if (w >= m->m_max_mds)
+ if (w >= m->m_num_mds)
return CEPH_MDS_STATE_DNE;
return m->m_info[w].state;
}
static inline bool ceph_mdsmap_is_laggy(struct ceph_mdsmap *m, int w)
{
- if (w >= 0 && w < m->m_max_mds)
+ if (w >= 0 && w < m->m_num_mds)
return m->m_info[w].laggy;
return false;
}
diff --git a/include/linux/ceph/osd_client.h b/include/linux/ceph/osd_client.h
index c125b5d9e13c..85650b415e73 100644
--- a/include/linux/ceph/osd_client.h
+++ b/include/linux/ceph/osd_client.h
@@ -5,6 +5,7 @@
#include <linux/kref.h>
#include <linux/mempool.h>
#include <linux/rbtree.h>
+#include <linux/refcount.h>
#include <linux/ceph/types.h>
#include <linux/ceph/osdmap.h>
@@ -27,7 +28,7 @@ typedef void (*ceph_osdc_callback_t)(struct ceph_osd_request *);
/* a given osd we're communicating with */
struct ceph_osd {
- atomic_t o_ref;
+ refcount_t o_ref;
struct ceph_osd_client *o_osdc;
int o_osd;
int o_incarnation;
@@ -186,12 +187,12 @@ struct ceph_osd_request {
struct timespec r_mtime; /* ditto */
u64 r_data_offset; /* ditto */
bool r_linger; /* don't resend on failure */
+ bool r_abort_on_full; /* return ENOSPC when full */
/* internal */
unsigned long r_stamp; /* jiffies, send or check time */
unsigned long r_start_stamp; /* jiffies */
int r_attempts;
- struct ceph_eversion r_replay_version; /* aka reassert_version */
u32 r_last_force_resend;
u32 r_map_dne_bound;
@@ -266,6 +267,7 @@ struct ceph_osd_client {
struct rb_root osds; /* osds */
struct list_head osd_lru; /* idle osds */
spinlock_t osd_lru_lock;
+ u32 epoch_barrier;
struct ceph_osd homeless_osd;
atomic64_t last_tid; /* tid of last request */
u64 last_linger_id;
@@ -304,6 +306,7 @@ extern void ceph_osdc_handle_reply(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
extern void ceph_osdc_handle_map(struct ceph_osd_client *osdc,
struct ceph_msg *msg);
+void ceph_osdc_update_epoch_barrier(struct ceph_osd_client *osdc, u32 eb);
extern void osd_req_op_init(struct ceph_osd_request *osd_req,
unsigned int which, u16 opcode, u32 flags);
diff --git a/include/linux/ceph/pagelist.h b/include/linux/ceph/pagelist.h
index 13d71fe18b0c..75a7db21457d 100644
--- a/include/linux/ceph/pagelist.h
+++ b/include/linux/ceph/pagelist.h
@@ -2,7 +2,7 @@
#define __FS_CEPH_PAGELIST_H
#include <asm/byteorder.h>
-#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/list.h>
#include <linux/types.h>
@@ -13,7 +13,7 @@ struct ceph_pagelist {
size_t room;
struct list_head free_list;
size_t num_pages_free;
- atomic_t refcnt;
+ refcount_t refcnt;
};
struct ceph_pagelist_cursor {
@@ -30,7 +30,7 @@ static inline void ceph_pagelist_init(struct ceph_pagelist *pl)
pl->room = 0;
INIT_LIST_HEAD(&pl->free_list);
pl->num_pages_free = 0;
- atomic_set(&pl->refcnt, 1);
+ refcount_set(&pl->refcnt, 1);
}
extern void ceph_pagelist_release(struct ceph_pagelist *pl);
diff --git a/include/linux/cgroup-defs.h b/include/linux/cgroup-defs.h
index 6a3f850cabab..21745946cae1 100644
--- a/include/linux/cgroup-defs.h
+++ b/include/linux/cgroup-defs.h
@@ -13,6 +13,7 @@
#include <linux/wait.h>
#include <linux/mutex.h>
#include <linux/rcupdate.h>
+#include <linux/refcount.h>
#include <linux/percpu-refcount.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
@@ -106,9 +107,6 @@ struct cgroup_subsys_state {
/* reference count - access via css_[try]get() and css_put() */
struct percpu_ref refcnt;
- /* PI: the parent css */
- struct cgroup_subsys_state *parent;
-
/* siblings list anchored at the parent's ->children */
struct list_head sibling;
struct list_head children;
@@ -138,6 +136,12 @@ struct cgroup_subsys_state {
/* percpu_ref killing and RCU release */
struct rcu_head rcu_head;
struct work_struct destroy_work;
+
+ /*
+ * PI: the parent css. Placed here for cache proximity to following
+ * fields of the containing structure.
+ */
+ struct cgroup_subsys_state *parent;
};
/*
@@ -156,7 +160,7 @@ struct css_set {
struct cgroup_subsys_state *subsys[CGROUP_SUBSYS_COUNT];
/* reference count */
- atomic_t refcount;
+ refcount_t refcount;
/* the default cgroup associated with this css_set */
struct cgroup *dfl_cgrp;
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index af9c86e958bd..ed2573e149fa 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -17,11 +17,11 @@
#include <linux/seq_file.h>
#include <linux/kernfs.h>
#include <linux/jump_label.h>
-#include <linux/nsproxy.h>
#include <linux/types.h>
#include <linux/ns_common.h>
#include <linux/nsproxy.h>
#include <linux/user_namespace.h>
+#include <linux/refcount.h>
#include <linux/cgroup-defs.h>
@@ -661,7 +661,7 @@ static inline void cgroup_sk_free(struct sock_cgroup_data *skcd) {}
#endif /* CONFIG_CGROUP_DATA */
struct cgroup_namespace {
- atomic_t count;
+ refcount_t count;
struct ns_common ns;
struct user_namespace *user_ns;
struct ucounts *ucounts;
@@ -696,12 +696,12 @@ copy_cgroup_ns(unsigned long flags, struct user_namespace *user_ns,
static inline void get_cgroup_ns(struct cgroup_namespace *ns)
{
if (ns)
- atomic_inc(&ns->count);
+ refcount_inc(&ns->count);
}
static inline void put_cgroup_ns(struct cgroup_namespace *ns)
{
- if (ns && atomic_dec_and_test(&ns->count))
+ if (ns && refcount_dec_and_test(&ns->count))
free_cgroup_ns(ns);
}
diff --git a/include/linux/clk.h b/include/linux/clk.h
index e9d36b3e49de..024cd07870d0 100644
--- a/include/linux/clk.h
+++ b/include/linux/clk.h
@@ -132,8 +132,8 @@ int clk_get_phase(struct clk *clk);
* @q: clk compared against p
*
* Returns true if the two struct clk pointers both point to the same hardware
- * clock node. Put differently, returns true if struct clk *p and struct clk *q
- * share the same struct clk_core object.
+ * clock node. Put differently, returns true if @p and @q
+ * share the same &struct clk_core object.
*
* Returns false otherwise. Note that two NULL clks are treated as matching.
*/
diff --git a/include/linux/clockchips.h b/include/linux/clockchips.h
index 6d7edc3082f9..acc9ce05e5f0 100644
--- a/include/linux/clockchips.h
+++ b/include/linux/clockchips.h
@@ -182,7 +182,6 @@ extern u64 clockevent_delta2ns(unsigned long latch, struct clock_event_device *e
extern void clockevents_register_device(struct clock_event_device *dev);
extern int clockevents_unbind_device(struct clock_event_device *ced, int cpu);
-extern void clockevents_config(struct clock_event_device *dev, u32 freq);
extern void clockevents_config_and_register(struct clock_event_device *dev,
u32 freq, unsigned long min_delta,
unsigned long max_delta);
diff --git a/include/linux/clocksource.h b/include/linux/clocksource.h
index cfc75848a35d..f2b10d9ebd04 100644
--- a/include/linux/clocksource.h
+++ b/include/linux/clocksource.h
@@ -120,7 +120,7 @@ struct clocksource {
#define CLOCK_SOURCE_RESELECT 0x100
/* simplify initialization of mask field */
-#define CLOCKSOURCE_MASK(bits) (u64)((bits) < 64 ? ((1ULL<<(bits))-1) : -1)
+#define CLOCKSOURCE_MASK(bits) GENMASK_ULL((bits) - 1, 0)
static inline u32 clocksource_freq2mult(u32 freq, u32 shift_constant, u64 from)
{
diff --git a/include/linux/cma.h b/include/linux/cma.h
index 03f32d0bd1d8..3e8fbf5a5c73 100644
--- a/include/linux/cma.h
+++ b/include/linux/cma.h
@@ -21,15 +21,19 @@ struct cma;
extern unsigned long totalcma_pages;
extern phys_addr_t cma_get_base(const struct cma *cma);
extern unsigned long cma_get_size(const struct cma *cma);
+extern const char *cma_get_name(const struct cma *cma);
extern int __init cma_declare_contiguous(phys_addr_t base,
phys_addr_t size, phys_addr_t limit,
phys_addr_t alignment, unsigned int order_per_bit,
- bool fixed, struct cma **res_cma);
+ bool fixed, const char *name, struct cma **res_cma);
extern int cma_init_reserved_mem(phys_addr_t base, phys_addr_t size,
unsigned int order_per_bit,
+ const char *name,
struct cma **res_cma);
extern struct page *cma_alloc(struct cma *cma, size_t count, unsigned int align,
gfp_t gfp_mask);
extern bool cma_release(struct cma *cma, const struct page *pages, unsigned int count);
+
+extern int cma_for_each_area(int (*it)(struct cma *cma, void *data), void *data);
#endif
diff --git a/include/linux/coda_psdev.h b/include/linux/coda_psdev.h
index 5b8721efa948..31e4e1f1547c 100644
--- a/include/linux/coda_psdev.h
+++ b/include/linux/coda_psdev.h
@@ -15,7 +15,6 @@ struct venus_comm {
struct list_head vc_processing;
int vc_inuse;
struct super_block *vc_sb;
- struct backing_dev_info bdi;
struct mutex vc_mutex;
};
diff --git a/include/linux/compat.h b/include/linux/compat.h
index aef47be2a5c1..1c5f3152cbb5 100644
--- a/include/linux/compat.h
+++ b/include/linux/compat.h
@@ -295,6 +295,13 @@ struct compat_old_sigaction {
};
#endif
+struct compat_keyctl_kdf_params {
+ compat_uptr_t hashname;
+ compat_uptr_t otherinfo;
+ __u32 otherinfolen;
+ __u32 __spare[8];
+};
+
struct compat_statfs;
struct compat_statfs64;
struct compat_old_linux_dirent;
@@ -528,11 +535,6 @@ asmlinkage long compat_sys_old_readdir(unsigned int fd,
asmlinkage long compat_sys_getdents(unsigned int fd,
struct compat_linux_dirent __user *dirent,
unsigned int count);
-#ifdef __ARCH_WANT_COMPAT_SYS_GETDENTS64
-asmlinkage long compat_sys_getdents64(unsigned int fd,
- struct linux_dirent64 __user *dirent,
- unsigned int count);
-#endif
asmlinkage long compat_sys_vmsplice(int fd, const struct compat_iovec __user *,
unsigned int nr_segs, unsigned int flags);
asmlinkage long compat_sys_open(const char __user *filename, int flags,
@@ -723,6 +725,8 @@ asmlinkage long compat_sys_sched_rr_get_interval(compat_pid_t pid,
asmlinkage long compat_sys_fanotify_mark(int, unsigned int, __u32, __u32,
int, const char __user *);
+asmlinkage long compat_sys_arch_prctl(int option, unsigned long arg2);
+
/*
* For most but not all architectures, "am I in a compat syscall?" and
* "am I a compat task?" are the same question. For architectures on which
diff --git a/include/linux/console.h b/include/linux/console.h
index 5949d1855589..b8920a031a3e 100644
--- a/include/linux/console.h
+++ b/include/linux/console.h
@@ -212,4 +212,6 @@ extern bool vgacon_text_force(void);
static inline bool vgacon_text_force(void) { return false; }
#endif
+extern void console_init(void);
+
#endif /* _LINUX_CONSOLE_H */
diff --git a/include/linux/coresight.h b/include/linux/coresight.h
index 2a5982c37dfb..035c16c9a505 100644
--- a/include/linux/coresight.h
+++ b/include/linux/coresight.h
@@ -201,7 +201,7 @@ struct coresight_ops_sink {
void *sink_config);
unsigned long (*reset_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
- void *sink_config, bool *lost);
+ void *sink_config);
void (*update_buffer)(struct coresight_device *csdev,
struct perf_output_handle *handle,
void *sink_config);
diff --git a/include/linux/cpufreq.h b/include/linux/cpufreq.h
index 87165f06a307..a5ce0bbeadb5 100644
--- a/include/linux/cpufreq.h
+++ b/include/linux/cpufreq.h
@@ -120,6 +120,13 @@ struct cpufreq_policy {
bool fast_switch_possible;
bool fast_switch_enabled;
+ /*
+ * Preferred average time interval between consecutive invocations of
+ * the driver to set the frequency for this policy. To be set by the
+ * scaling driver (0, which is the default, means no preference).
+ */
+ unsigned int transition_delay_us;
+
/* Cached frequency lookup from cpufreq_driver_resolve_freq. */
unsigned int cached_target_freq;
int cached_resolved_idx;
diff --git a/include/linux/cpuhotplug.h b/include/linux/cpuhotplug.h
index 62d240e962f0..0f2a80377520 100644
--- a/include/linux/cpuhotplug.h
+++ b/include/linux/cpuhotplug.h
@@ -94,6 +94,7 @@ enum cpuhp_state {
CPUHP_AP_ARM_VFP_STARTING,
CPUHP_AP_ARM64_DEBUG_MONITORS_STARTING,
CPUHP_AP_PERF_ARM_HW_BREAKPOINT_STARTING,
+ CPUHP_AP_PERF_ARM_ACPI_STARTING,
CPUHP_AP_PERF_ARM_STARTING,
CPUHP_AP_ARM_L2X0_STARTING,
CPUHP_AP_ARM_ARCH_TIMER_STARTING,
@@ -137,6 +138,7 @@ enum cpuhp_state {
CPUHP_AP_PERF_ARM_CCN_ONLINE,
CPUHP_AP_PERF_ARM_L2X0_ONLINE,
CPUHP_AP_PERF_ARM_QCOM_L2_ONLINE,
+ CPUHP_AP_PERF_ARM_QCOM_L3_ONLINE,
CPUHP_AP_WORKQUEUE_ONLINE,
CPUHP_AP_RCUTREE_ONLINE,
CPUHP_AP_ONLINE_DYN,
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 96f1e88b767c..2404ad238c0b 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -40,9 +40,9 @@ extern int nr_cpu_ids;
#ifdef CONFIG_CPUMASK_OFFSTACK
/* Assuming NR_CPUS is huge, a runtime limit is more efficient. Also,
* not all bits may be allocated. */
-#define nr_cpumask_bits nr_cpu_ids
+#define nr_cpumask_bits ((unsigned int)nr_cpu_ids)
#else
-#define nr_cpumask_bits NR_CPUS
+#define nr_cpumask_bits ((unsigned int)NR_CPUS)
#endif
/*
@@ -667,6 +667,11 @@ void alloc_bootmem_cpumask_var(cpumask_var_t *mask);
void free_cpumask_var(cpumask_var_t mask);
void free_bootmem_cpumask_var(cpumask_var_t mask);
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return mask != NULL;
+}
+
#else
typedef struct cpumask cpumask_var_t[1];
@@ -708,6 +713,11 @@ static inline void free_cpumask_var(cpumask_var_t mask)
static inline void free_bootmem_cpumask_var(cpumask_var_t mask)
{
}
+
+static inline bool cpumask_available(cpumask_var_t mask)
+{
+ return true;
+}
#endif /* CONFIG_CPUMASK_OFFSTACK */
/* It's common to want to use cpu_all_mask in struct member initializers,
diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h
index 611fce58d670..119a3f9604b0 100644
--- a/include/linux/cpuset.h
+++ b/include/linux/cpuset.h
@@ -42,7 +42,7 @@ static inline void cpuset_dec(void)
extern int cpuset_init(void);
extern void cpuset_init_smp(void);
-extern void cpuset_update_active_cpus(bool cpu_online);
+extern void cpuset_update_active_cpus(void);
extern void cpuset_cpus_allowed(struct task_struct *p, struct cpumask *mask);
extern void cpuset_cpus_allowed_fallback(struct task_struct *p);
extern nodemask_t cpuset_mems_allowed(struct task_struct *p);
@@ -155,7 +155,7 @@ static inline bool cpusets_enabled(void) { return false; }
static inline int cpuset_init(void) { return 0; }
static inline void cpuset_init_smp(void) {}
-static inline void cpuset_update_active_cpus(bool cpu_online)
+static inline void cpuset_update_active_cpus(void)
{
partition_sched_domains(1, NULL, NULL);
}
diff --git a/include/linux/crash_core.h b/include/linux/crash_core.h
new file mode 100644
index 000000000000..541a197ba4a2
--- /dev/null
+++ b/include/linux/crash_core.h
@@ -0,0 +1,69 @@
+#ifndef LINUX_CRASH_CORE_H
+#define LINUX_CRASH_CORE_H
+
+#include <linux/linkage.h>
+#include <linux/elfcore.h>
+#include <linux/elf.h>
+
+#define CRASH_CORE_NOTE_NAME "CORE"
+#define CRASH_CORE_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
+#define CRASH_CORE_NOTE_NAME_BYTES ALIGN(sizeof(CRASH_CORE_NOTE_NAME), 4)
+#define CRASH_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+
+#define CRASH_CORE_NOTE_BYTES ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ CRASH_CORE_NOTE_NAME_BYTES + \
+ CRASH_CORE_NOTE_DESC_BYTES)
+
+#define VMCOREINFO_BYTES (4096)
+#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
+#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
+#define VMCOREINFO_NOTE_SIZE ((CRASH_CORE_NOTE_HEAD_BYTES * 2) + \
+ VMCOREINFO_NOTE_NAME_BYTES + \
+ VMCOREINFO_BYTES)
+
+typedef u32 note_buf_t[CRASH_CORE_NOTE_BYTES/4];
+
+void crash_save_vmcoreinfo(void);
+void arch_crash_save_vmcoreinfo(void);
+__printf(1, 2)
+void vmcoreinfo_append_str(const char *fmt, ...);
+phys_addr_t paddr_vmcoreinfo_note(void);
+
+#define VMCOREINFO_OSRELEASE(value) \
+ vmcoreinfo_append_str("OSRELEASE=%s\n", value)
+#define VMCOREINFO_PAGESIZE(value) \
+ vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
+#define VMCOREINFO_SYMBOL(name) \
+ vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
+#define VMCOREINFO_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(name))
+#define VMCOREINFO_STRUCT_SIZE(name) \
+ vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
+ (unsigned long)sizeof(struct name))
+#define VMCOREINFO_OFFSET(name, field) \
+ vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
+ (unsigned long)offsetof(struct name, field))
+#define VMCOREINFO_LENGTH(name, value) \
+ vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
+#define VMCOREINFO_NUMBER(name) \
+ vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
+#define VMCOREINFO_CONFIG(name) \
+ vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
+
+extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
+extern size_t vmcoreinfo_size;
+extern size_t vmcoreinfo_max_size;
+
+Elf_Word *append_elf_note(Elf_Word *buf, char *name, unsigned int type,
+ void *data, size_t data_len);
+void final_note(Elf_Word *buf);
+
+int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
+ unsigned long long *crash_size, unsigned long long *crash_base);
+
+#endif /* LINUX_CRASH_CORE_H */
diff --git a/include/linux/crypto.h b/include/linux/crypto.h
index c0b0cf3d2d2f..84da9978e951 100644
--- a/include/linux/crypto.h
+++ b/include/linux/crypto.h
@@ -123,7 +123,7 @@
/*
* Miscellaneous stuff.
*/
-#define CRYPTO_MAX_ALG_NAME 64
+#define CRYPTO_MAX_ALG_NAME 128
/*
* The macro CRYPTO_MINALIGN_ATTR (along with the void * type in the actual
diff --git a/include/linux/cryptohash.h b/include/linux/cryptohash.h
index 3252799832cf..df4d3e943d28 100644
--- a/include/linux/cryptohash.h
+++ b/include/linux/cryptohash.h
@@ -10,9 +10,4 @@
void sha_init(__u32 *buf);
void sha_transform(__u32 *digest, const char *data, __u32 *W);
-#define MD5_DIGEST_WORDS 4
-#define MD5_MESSAGE_BYTES 64
-
-void md5_transform(__u32 *hash, __u32 const *in);
-
#endif
diff --git a/include/linux/dax.h b/include/linux/dax.h
index d8a3dc042e1c..d3158e74a59e 100644
--- a/include/linux/dax.h
+++ b/include/linux/dax.h
@@ -7,6 +7,28 @@
#include <asm/pgtable.h>
struct iomap_ops;
+struct dax_device;
+struct dax_operations {
+ /*
+ * direct_access: translate a device-relative
+ * logical-page-offset into an absolute physical pfn. Return the
+ * number of pages available for DAX at that pfn.
+ */
+ long (*direct_access)(struct dax_device *, pgoff_t, long,
+ void **, pfn_t *);
+};
+
+int dax_read_lock(void);
+void dax_read_unlock(int id);
+struct dax_device *dax_get_by_host(const char *host);
+struct dax_device *alloc_dax(void *private, const char *host,
+ const struct dax_operations *ops);
+void put_dax(struct dax_device *dax_dev);
+bool dax_alive(struct dax_device *dax_dev);
+void kill_dax(struct dax_device *dax_dev);
+void *dax_get_private(struct dax_device *dax_dev);
+long dax_direct_access(struct dax_device *dax_dev, pgoff_t pgoff, long nr_pages,
+ void **kaddr, pfn_t *pfn);
/*
* We use lowest available bit in exceptional entry for locking, one bit for
@@ -48,17 +70,13 @@ void dax_wake_mapping_entry_waiter(struct address_space *mapping,
pgoff_t index, void *entry, bool wake_all);
#ifdef CONFIG_FS_DAX
-struct page *read_dax_sector(struct block_device *bdev, sector_t n);
-int __dax_zero_page_range(struct block_device *bdev, sector_t sector,
+int __dax_zero_page_range(struct block_device *bdev,
+ struct dax_device *dax_dev, sector_t sector,
unsigned int offset, unsigned int length);
#else
-static inline struct page *read_dax_sector(struct block_device *bdev,
- sector_t n)
-{
- return ERR_PTR(-ENXIO);
-}
static inline int __dax_zero_page_range(struct block_device *bdev,
- sector_t sector, unsigned int offset, unsigned int length)
+ struct dax_device *dax_dev, sector_t sector,
+ unsigned int offset, unsigned int length)
{
return -ENXIO;
}
diff --git a/include/linux/debugfs.h b/include/linux/debugfs.h
index 7dff776e6d16..9174b0d28582 100644
--- a/include/linux/debugfs.h
+++ b/include/linux/debugfs.h
@@ -74,7 +74,7 @@ static const struct file_operations __fops = { \
.release = simple_attr_release, \
.read = debugfs_attr_read, \
.write = debugfs_attr_write, \
- .llseek = generic_file_llseek, \
+ .llseek = no_llseek, \
}
#if defined(CONFIG_DEBUG_FS)
diff --git a/include/linux/dell-led.h b/include/linux/dell-led.h
index 7009b8bec77b..3f033c48071e 100644
--- a/include/linux/dell-led.h
+++ b/include/linux/dell-led.h
@@ -1,10 +1,6 @@
#ifndef __DELL_LED_H__
#define __DELL_LED_H__
-enum {
- DELL_LED_MICMUTE,
-};
-
-int dell_app_wmi_led_set(int whichled, int on);
+int dell_micmute_led_set(int on);
#endif
diff --git a/include/linux/devfreq.h b/include/linux/devfreq.h
index e0acb0e5243b..6c220e4ebb6b 100644
--- a/include/linux/devfreq.h
+++ b/include/linux/devfreq.h
@@ -27,6 +27,7 @@
#define DEVFREQ_POSTCHANGE (1)
struct devfreq;
+struct devfreq_governor;
/**
* struct devfreq_dev_status - Data given from devfreq user device to
@@ -101,35 +102,6 @@ struct devfreq_dev_profile {
};
/**
- * struct devfreq_governor - Devfreq policy governor
- * @node: list node - contains registered devfreq governors
- * @name: Governor's name
- * @immutable: Immutable flag for governor. If the value is 1,
- * this govenror is never changeable to other governor.
- * @get_target_freq: Returns desired operating frequency for the device.
- * Basically, get_target_freq will run
- * devfreq_dev_profile.get_dev_status() to get the
- * status of the device (load = busy_time / total_time).
- * If no_central_polling is set, this callback is called
- * only with update_devfreq() notified by OPP.
- * @event_handler: Callback for devfreq core framework to notify events
- * to governors. Events include per device governor
- * init and exit, opp changes out of devfreq, suspend
- * and resume of per device devfreq during device idle.
- *
- * Note that the callbacks are called with devfreq->lock locked by devfreq.
- */
-struct devfreq_governor {
- struct list_head node;
-
- const char name[DEVFREQ_NAME_LEN];
- const unsigned int immutable;
- int (*get_target_freq)(struct devfreq *this, unsigned long *freq);
- int (*event_handler)(struct devfreq *devfreq,
- unsigned int event, void *data);
-};
-
-/**
* struct devfreq - Device devfreq structure
* @node: list node - contains the devices with devfreq that have been
* registered.
diff --git a/include/linux/device-mapper.h b/include/linux/device-mapper.h
index a7e6903866fd..f4c639c0c362 100644
--- a/include/linux/device-mapper.h
+++ b/include/linux/device-mapper.h
@@ -22,11 +22,13 @@ struct bio_vec;
/*
* Type of table, mapped_device's mempool and request_queue
*/
-#define DM_TYPE_NONE 0
-#define DM_TYPE_BIO_BASED 1
-#define DM_TYPE_REQUEST_BASED 2
-#define DM_TYPE_MQ_REQUEST_BASED 3
-#define DM_TYPE_DAX_BIO_BASED 4
+enum dm_queue_mode {
+ DM_TYPE_NONE = 0,
+ DM_TYPE_BIO_BASED = 1,
+ DM_TYPE_REQUEST_BASED = 2,
+ DM_TYPE_MQ_REQUEST_BASED = 3,
+ DM_TYPE_DAX_BIO_BASED = 4,
+};
typedef enum { STATUSTYPE_INFO, STATUSTYPE_TABLE } status_type_t;
@@ -128,13 +130,15 @@ typedef int (*dm_busy_fn) (struct dm_target *ti);
* < 0 : error
* >= 0 : the number of bytes accessible at the address
*/
-typedef long (*dm_direct_access_fn) (struct dm_target *ti, sector_t sector,
- void **kaddr, pfn_t *pfn, long size);
+typedef long (*dm_dax_direct_access_fn) (struct dm_target *ti, pgoff_t pgoff,
+ long nr_pages, void **kaddr, pfn_t *pfn);
+#define PAGE_SECTORS (PAGE_SIZE / 512)
void dm_error(const char *message);
struct dm_dev {
struct block_device *bdev;
+ struct dax_device *dax_dev;
fmode_t mode;
char name[16];
};
@@ -176,7 +180,7 @@ struct target_type {
dm_busy_fn busy;
dm_iterate_devices_fn iterate_devices;
dm_io_hints_fn io_hints;
- dm_direct_access_fn direct_access;
+ dm_dax_direct_access_fn direct_access;
/* For internal device-mapper use. */
struct list_head list;
@@ -221,6 +225,18 @@ struct target_type {
*/
typedef unsigned (*dm_num_write_bios_fn) (struct dm_target *ti, struct bio *bio);
+/*
+ * A target implements own bio data integrity.
+ */
+#define DM_TARGET_INTEGRITY 0x00000010
+#define dm_target_has_integrity(type) ((type)->features & DM_TARGET_INTEGRITY)
+
+/*
+ * A target passes integrity data to the lower device.
+ */
+#define DM_TARGET_PASSES_INTEGRITY 0x00000020
+#define dm_target_passes_integrity(type) ((type)->features & DM_TARGET_PASSES_INTEGRITY)
+
struct dm_target {
struct dm_table *table;
struct target_type *type;
@@ -255,6 +271,12 @@ struct dm_target {
unsigned num_write_same_bios;
/*
+ * The number of WRITE ZEROES bios that will be submitted to the target.
+ * The bio number can be accessed with dm_bio_get_target_bio_nr.
+ */
+ unsigned num_write_zeroes_bios;
+
+ /*
* The minimum number of extra bytes allocated in each io for the
* target to use.
*/
@@ -290,11 +312,6 @@ struct dm_target {
* on max_io_len boundary.
*/
bool split_discard_bios:1;
-
- /*
- * Set if this target does not return zeroes on discarded blocks.
- */
- bool discard_zeroes_data_unsupported:1;
};
/* Each target can link one of these into the table */
@@ -464,7 +481,7 @@ void dm_table_add_target_callbacks(struct dm_table *t, struct dm_target_callback
* Useful for "hybrid" target (supports both bio-based
* and request-based).
*/
-void dm_table_set_type(struct dm_table *t, unsigned type);
+void dm_table_set_type(struct dm_table *t, enum dm_queue_mode type);
/*
* Finally call this to make the table ready for use.
@@ -578,6 +595,7 @@ extern struct ratelimit_state dm_ratelimit_state;
/*
* Definitions of return values from target end_io function.
*/
+#define DM_ENDIO_DONE 0
#define DM_ENDIO_INCOMPLETE 1
#define DM_ENDIO_REQUEUE 2
@@ -588,6 +606,7 @@ extern struct ratelimit_state dm_ratelimit_state;
#define DM_MAPIO_REMAPPED 1
#define DM_MAPIO_REQUEUE DM_ENDIO_REQUEUE
#define DM_MAPIO_DELAY_REQUEUE 3
+#define DM_MAPIO_KILL 4
#define dm_sector_div64(x, y)( \
{ \
diff --git a/include/linux/dma-buf.h b/include/linux/dma-buf.h
index bfb3704fc6fc..79f27d60ec66 100644
--- a/include/linux/dma-buf.h
+++ b/include/linux/dma-buf.h
@@ -39,13 +39,13 @@ struct dma_buf_attachment;
/**
* struct dma_buf_ops - operations possible on struct dma_buf
- * @kmap_atomic: maps a page from the buffer into kernel address
- * space, users may not block until the subsequent unmap call.
- * This callback must not sleep.
- * @kunmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
- * This Callback must not sleep.
- * @kmap: maps a page from the buffer into kernel address space.
- * @kunmap: [optional] unmaps a page from the buffer.
+ * @map_atomic: maps a page from the buffer into kernel address
+ * space, users may not block until the subsequent unmap call.
+ * This callback must not sleep.
+ * @unmap_atomic: [optional] unmaps a atomically mapped page from the buffer.
+ * This Callback must not sleep.
+ * @map: maps a page from the buffer into kernel address space.
+ * @unmap: [optional] unmaps a page from the buffer.
* @vmap: [optional] creates a virtual mapping for the buffer into kernel
* address space. Same restrictions as for vmap and friends apply.
* @vunmap: [optional] unmaps a vmap from the buffer
@@ -206,10 +206,10 @@ struct dma_buf_ops {
* to be restarted.
*/
int (*end_cpu_access)(struct dma_buf *, enum dma_data_direction);
- void *(*kmap_atomic)(struct dma_buf *, unsigned long);
- void (*kunmap_atomic)(struct dma_buf *, unsigned long, void *);
- void *(*kmap)(struct dma_buf *, unsigned long);
- void (*kunmap)(struct dma_buf *, unsigned long, void *);
+ void *(*map_atomic)(struct dma_buf *, unsigned long);
+ void (*unmap_atomic)(struct dma_buf *, unsigned long, void *);
+ void *(*map)(struct dma_buf *, unsigned long);
+ void (*unmap)(struct dma_buf *, unsigned long, void *);
/**
* @mmap:
diff --git a/include/linux/dma-fence-array.h b/include/linux/dma-fence-array.h
index 5900945f962d..332a5420243c 100644
--- a/include/linux/dma-fence-array.h
+++ b/include/linux/dma-fence-array.h
@@ -83,4 +83,6 @@ struct dma_fence_array *dma_fence_array_create(int num_fences,
u64 context, unsigned seqno,
bool signal_on_any);
+bool dma_fence_match_context(struct dma_fence *fence, u64 context);
+
#endif /* __LINUX_DMA_FENCE_ARRAY_H */
diff --git a/include/linux/dma-fence.h b/include/linux/dma-fence.h
index 6048fa404e57..a5195a7d6f77 100644
--- a/include/linux/dma-fence.h
+++ b/include/linux/dma-fence.h
@@ -229,7 +229,7 @@ static inline struct dma_fence *dma_fence_get_rcu(struct dma_fence *fence)
*
* Function returns NULL if no refcount could be obtained, or the fence.
* This function handles acquiring a reference to a fence that may be
- * reallocated within the RCU grace period (such as with SLAB_DESTROY_BY_RCU),
+ * reallocated within the RCU grace period (such as with SLAB_TYPESAFE_BY_RCU),
* so long as the caller is using RCU on the pointer to the fence.
*
* An alternative mechanism is to employ a seqlock to protect a bunch of
@@ -257,7 +257,7 @@ dma_fence_get_rcu_safe(struct dma_fence * __rcu *fencep)
* have successfully acquire a reference to it. If it no
* longer matches, we are holding a reference to some other
* reallocated pointer. This is possible if the allocator
- * is using a freelist like SLAB_DESTROY_BY_RCU where the
+ * is using a freelist like SLAB_TYPESAFE_BY_RCU where the
* fence remains valid for the RCU grace period, but it
* may be reallocated. When using such allocators, we are
* responsible for ensuring the reference we get is to
diff --git a/include/linux/dma-iommu.h b/include/linux/dma-iommu.h
index 5725c94b1f12..4eac2670bfa1 100644
--- a/include/linux/dma-iommu.h
+++ b/include/linux/dma-iommu.h
@@ -20,6 +20,7 @@
#include <asm/errno.h>
#ifdef CONFIG_IOMMU_DMA
+#include <linux/dma-mapping.h>
#include <linux/iommu.h>
#include <linux/msi.h>
@@ -71,6 +72,7 @@ int iommu_dma_mapping_error(struct device *dev, dma_addr_t dma_addr);
/* The DMA API isn't _quite_ the whole story, though... */
void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg);
+void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list);
#else
@@ -100,6 +102,10 @@ static inline void iommu_dma_map_msi_msg(int irq, struct msi_msg *msg)
{
}
+static inline void iommu_dma_get_resv_regions(struct device *dev, struct list_head *list)
+{
+}
+
#endif /* CONFIG_IOMMU_DMA */
#endif /* __KERNEL__ */
#endif /* __DMA_IOMMU_H */
diff --git a/include/linux/dma-mapping.h b/include/linux/dma-mapping.h
index 0977317c6835..4f3eecedca2d 100644
--- a/include/linux/dma-mapping.h
+++ b/include/linux/dma-mapping.h
@@ -728,6 +728,18 @@ dma_mark_declared_memory_occupied(struct device *dev,
}
#endif /* CONFIG_HAVE_GENERIC_DMA_COHERENT */
+#ifdef CONFIG_HAS_DMA
+int dma_configure(struct device *dev);
+void dma_deconfigure(struct device *dev);
+#else
+static inline int dma_configure(struct device *dev)
+{
+ return 0;
+}
+
+static inline void dma_deconfigure(struct device *dev) {}
+#endif
+
/*
* Managed DMA API
*/
diff --git a/include/linux/dma_remapping.h b/include/linux/dma_remapping.h
index 187c10299722..90884072fa73 100644
--- a/include/linux/dma_remapping.h
+++ b/include/linux/dma_remapping.h
@@ -39,6 +39,7 @@ extern int iommu_calculate_agaw(struct intel_iommu *iommu);
extern int iommu_calculate_max_sagaw(struct intel_iommu *iommu);
extern int dmar_disabled;
extern int intel_iommu_enabled;
+extern int intel_iommu_tboot_noforce;
#else
static inline int iommu_calculate_agaw(struct intel_iommu *iommu)
{
diff --git a/include/linux/edac.h b/include/linux/edac.h
index 5b6adf964248..8ae0f45fafd6 100644
--- a/include/linux/edac.h
+++ b/include/linux/edac.h
@@ -28,12 +28,10 @@ struct device;
#define EDAC_OPSTATE_INT 2
extern int edac_op_state;
-extern int edac_err_assert;
-extern atomic_t edac_handlers;
-extern int edac_handler_set(void);
-extern void edac_atomic_assert_error(void);
-extern struct bus_type *edac_get_sysfs_subsys(void);
+struct bus_type *edac_get_sysfs_subsys(void);
+int edac_get_report_status(void);
+void edac_set_report_status(int new);
enum {
EDAC_REPORTING_ENABLED,
@@ -41,28 +39,6 @@ enum {
EDAC_REPORTING_FORCE
};
-extern int edac_report_status;
-#ifdef CONFIG_EDAC
-static inline int get_edac_report_status(void)
-{
- return edac_report_status;
-}
-
-static inline void set_edac_report_status(int new)
-{
- edac_report_status = new;
-}
-#else
-static inline int get_edac_report_status(void)
-{
- return EDAC_REPORTING_DISABLED;
-}
-
-static inline void set_edac_report_status(int new)
-{
-}
-#endif
-
static inline void opstate_init(void)
{
switch (edac_op_state) {
diff --git a/include/linux/efi-bgrt.h b/include/linux/efi-bgrt.h
index 2fd3993c370b..e6f624b53c3d 100644
--- a/include/linux/efi-bgrt.h
+++ b/include/linux/efi-bgrt.h
@@ -6,6 +6,7 @@
#ifdef CONFIG_ACPI_BGRT
void efi_bgrt_init(struct acpi_table_header *table);
+int __init acpi_parse_bgrt(struct acpi_table_header *table);
/* The BGRT data itself; only valid if bgrt_image != NULL. */
extern size_t bgrt_image_size;
@@ -14,6 +15,10 @@ extern struct acpi_table_bgrt bgrt_tab;
#else /* !CONFIG_ACPI_BGRT */
static inline void efi_bgrt_init(struct acpi_table_header *table) {}
+static inline int __init acpi_parse_bgrt(struct acpi_table_header *table)
+{
+ return 0;
+}
#endif /* !CONFIG_ACPI_BGRT */
diff --git a/include/linux/efi.h b/include/linux/efi.h
index 94d34e0be24f..ec36f42a2add 100644
--- a/include/linux/efi.h
+++ b/include/linux/efi.h
@@ -1435,9 +1435,6 @@ static inline int efi_runtime_map_copy(void *buf, size_t bufsz)
/* prototypes shared between arch specific and generic stub code */
-#define pr_efi(sys_table, msg) efi_printk(sys_table, "EFI stub: "msg)
-#define pr_efi_err(sys_table, msg) efi_printk(sys_table, "EFI stub: ERROR: "msg)
-
void efi_printk(efi_system_table_t *sys_table_arg, char *str);
void efi_free(efi_system_table_t *sys_table_arg, unsigned long size,
@@ -1471,7 +1468,7 @@ efi_status_t handle_cmdline_files(efi_system_table_t *sys_table_arg,
unsigned long *load_addr,
unsigned long *load_size);
-efi_status_t efi_parse_options(char *cmdline);
+efi_status_t efi_parse_options(char const *cmdline);
efi_status_t efi_setup_gop(efi_system_table_t *sys_table_arg,
struct screen_info *si, efi_guid_t *proto,
diff --git a/include/linux/elevator.h b/include/linux/elevator.h
index 22d39e8d4de1..9ec5e22846e0 100644
--- a/include/linux/elevator.h
+++ b/include/linux/elevator.h
@@ -8,6 +8,9 @@
struct io_cq;
struct elevator_type;
+#ifdef CONFIG_BLK_DEBUG_FS
+struct blk_mq_debugfs_attr;
+#endif
/*
* Return values from elevator merger
@@ -93,6 +96,8 @@ struct blk_mq_hw_ctx;
struct elevator_mq_ops {
int (*init_sched)(struct request_queue *, struct elevator_type *);
void (*exit_sched)(struct elevator_queue *);
+ int (*init_hctx)(struct blk_mq_hw_ctx *, unsigned int);
+ void (*exit_hctx)(struct blk_mq_hw_ctx *, unsigned int);
bool (*allow_merge)(struct request_queue *, struct request *, struct bio *);
bool (*bio_merge)(struct blk_mq_hw_ctx *, struct bio *);
@@ -104,7 +109,7 @@ struct elevator_mq_ops {
void (*insert_requests)(struct blk_mq_hw_ctx *, struct list_head *, bool);
struct request *(*dispatch_request)(struct blk_mq_hw_ctx *);
bool (*has_work)(struct blk_mq_hw_ctx *);
- void (*completed_request)(struct blk_mq_hw_ctx *, struct request *);
+ void (*completed_request)(struct request *);
void (*started_request)(struct request *);
void (*requeue_request)(struct request *);
struct request *(*former_request)(struct request_queue *, struct request *);
@@ -142,6 +147,10 @@ struct elevator_type
char elevator_name[ELV_NAME_MAX];
struct module *elevator_owner;
bool uses_mq;
+#ifdef CONFIG_BLK_DEBUG_FS
+ const struct blk_mq_debugfs_attr *queue_debugfs_attrs;
+ const struct blk_mq_debugfs_attr *hctx_debugfs_attrs;
+#endif
/* managed by elevator core */
char icq_cache_name[ELV_NAME_MAX + 5]; /* elvname + "_io_cq" */
@@ -212,7 +221,6 @@ extern ssize_t elv_iosched_store(struct request_queue *, const char *, size_t);
extern int elevator_init(struct request_queue *, char *);
extern void elevator_exit(struct request_queue *, struct elevator_queue *);
-extern int elevator_change(struct request_queue *, const char *);
extern bool elv_bio_merge_ok(struct request *, struct bio *);
extern struct elevator_queue *elevator_alloc(struct request_queue *,
struct elevator_type *);
diff --git a/include/linux/elf.h b/include/linux/elf.h
index 20fa8d8ae313..ba069e8f4f78 100644
--- a/include/linux/elf.h
+++ b/include/linux/elf.h
@@ -29,6 +29,7 @@ extern Elf32_Dyn _DYNAMIC [];
#define elf_note elf32_note
#define elf_addr_t Elf32_Off
#define Elf_Half Elf32_Half
+#define Elf_Word Elf32_Word
#else
@@ -39,6 +40,7 @@ extern Elf64_Dyn _DYNAMIC [];
#define elf_note elf64_note
#define elf_addr_t Elf64_Off
#define Elf_Half Elf64_Half
+#define Elf_Word Elf64_Word
#endif
diff --git a/include/linux/etherdevice.h b/include/linux/etherdevice.h
index c62b709b1ce0..2d9f80848d4b 100644
--- a/include/linux/etherdevice.h
+++ b/include/linux/etherdevice.h
@@ -447,21 +447,6 @@ static inline void eth_addr_dec(u8 *addr)
}
/**
- * ether_addr_greater - Compare two Ethernet addresses
- * @addr1: Pointer to a six-byte array containing the Ethernet address
- * @addr2: Pointer other six-byte array containing the Ethernet address
- *
- * Compare two Ethernet addresses, returns true addr1 is greater than addr2
- */
-static inline bool ether_addr_greater(const u8 *addr1, const u8 *addr2)
-{
- u64 u1 = ether_addr_to_u64(addr1);
- u64 u2 = ether_addr_to_u64(addr2);
-
- return u1 > u2;
-}
-
-/**
* is_etherdev_addr - Tell if given Ethernet address belongs to the device.
* @dev: Pointer to a device structure
* @addr: Pointer to a six-byte array containing the Ethernet address
diff --git a/include/linux/ethtool.h b/include/linux/ethtool.h
index 9ded8c6d8176..83cc9863444b 100644
--- a/include/linux/ethtool.h
+++ b/include/linux/ethtool.h
@@ -60,6 +60,7 @@ enum ethtool_phys_id_state {
enum {
ETH_RSS_HASH_TOP_BIT, /* Configurable RSS hash function - Toeplitz */
ETH_RSS_HASH_XOR_BIT, /* Configurable RSS hash function - Xor */
+ ETH_RSS_HASH_CRC32_BIT, /* Configurable RSS hash function - Crc32 */
/*
* Add your fresh new hash function bits above and remember to update
@@ -73,6 +74,7 @@ enum {
#define ETH_RSS_HASH_TOP __ETH_RSS_HASH(TOP)
#define ETH_RSS_HASH_XOR __ETH_RSS_HASH(XOR)
+#define ETH_RSS_HASH_CRC32 __ETH_RSS_HASH(CRC32)
#define ETH_RSS_HASH_UNKNOWN 0
#define ETH_RSS_HASH_NO_CHANGE 0
diff --git a/include/linux/extcon.h b/include/linux/extcon.h
index 7010fb01a81a..7e206a9f88db 100644
--- a/include/linux/extcon.h
+++ b/include/linux/extcon.h
@@ -236,11 +236,11 @@ extern int extcon_set_property_capability(struct extcon_dev *edev,
unsigned int id, unsigned int prop);
/*
- * Following APIs are to monitor every action of a notifier.
- * Registrar gets notified for every external port of a connection device.
- * Probably this could be used to debug an action of notifier; however,
- * we do not recommend to use this for normal 'notifiee' device drivers who
- * want to be notified by a specific external port of the notifier.
+ * Following APIs are to monitor the status change of the external connectors.
+ * extcon_register_notifier(*edev, id, *nb) : Register a notifier block
+ * for specific external connector of the extcon.
+ * extcon_register_notifier_all(*edev, *nb) : Register a notifier block
+ * for all supported external connectors of the extcon.
*/
extern int extcon_register_notifier(struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
@@ -253,6 +253,17 @@ extern void devm_extcon_unregister_notifier(struct device *dev,
struct extcon_dev *edev, unsigned int id,
struct notifier_block *nb);
+extern int extcon_register_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int extcon_unregister_notifier_all(struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern int devm_extcon_register_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+extern void devm_extcon_unregister_notifier_all(struct device *dev,
+ struct extcon_dev *edev,
+ struct notifier_block *nb);
+
/*
* Following API get the extcon device from devicetree.
* This function use phandle of devicetree to get extcon device directly.
diff --git a/include/linux/f2fs_fs.h b/include/linux/f2fs_fs.h
index e2d239ed4c60..b6feed6547ce 100644
--- a/include/linux/f2fs_fs.h
+++ b/include/linux/f2fs_fs.h
@@ -32,9 +32,9 @@
/* 0, 1(node nid), 2(meta nid) are reserved node id */
#define F2FS_RESERVED_NODE_NUM 3
-#define F2FS_ROOT_INO(sbi) (sbi->root_ino_num)
-#define F2FS_NODE_INO(sbi) (sbi->node_ino_num)
-#define F2FS_META_INO(sbi) (sbi->meta_ino_num)
+#define F2FS_ROOT_INO(sbi) ((sbi)->root_ino_num)
+#define F2FS_NODE_INO(sbi) ((sbi)->node_ino_num)
+#define F2FS_META_INO(sbi) ((sbi)->meta_ino_num)
#define F2FS_IO_SIZE(sbi) (1 << (sbi)->write_io_size_bits) /* Blocks */
#define F2FS_IO_SIZE_KB(sbi) (1 << ((sbi)->write_io_size_bits + 2)) /* KB */
@@ -114,6 +114,7 @@ struct f2fs_super_block {
/*
* For checkpoint
*/
+#define CP_TRIMMED_FLAG 0x00000100
#define CP_NAT_BITS_FLAG 0x00000080
#define CP_CRC_RECOVERY_FLAG 0x00000040
#define CP_FASTBOOT_FLAG 0x00000020
@@ -161,7 +162,7 @@ struct f2fs_checkpoint {
*/
#define F2FS_ORPHANS_PER_BLOCK 1020
-#define GET_ORPHAN_BLOCKS(n) ((n + F2FS_ORPHANS_PER_BLOCK - 1) / \
+#define GET_ORPHAN_BLOCKS(n) (((n) + F2FS_ORPHANS_PER_BLOCK - 1) / \
F2FS_ORPHANS_PER_BLOCK)
struct f2fs_orphan_block {
@@ -302,6 +303,12 @@ struct f2fs_nat_block {
#define SIT_ENTRY_PER_BLOCK (PAGE_SIZE / sizeof(struct f2fs_sit_entry))
/*
+ * F2FS uses 4 bytes to represent block address. As a result, supported size of
+ * disk is 16 TB and it equals to 16 * 1024 * 1024 / 2 segments.
+ */
+#define F2FS_MAX_SEGMENT ((16 * 1024 * 1024) / 2)
+
+/*
* Note that f2fs_sit_entry->vblocks has the following bit-field information.
* [15:10] : allocation type such as CURSEG_XXXX_TYPE
* [9:0] : valid block count
@@ -449,7 +456,7 @@ typedef __le32 f2fs_hash_t;
#define F2FS_SLOT_LEN 8
#define F2FS_SLOT_LEN_BITS 3
-#define GET_DENTRY_SLOTS(x) ((x + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
+#define GET_DENTRY_SLOTS(x) (((x) + F2FS_SLOT_LEN - 1) >> F2FS_SLOT_LEN_BITS)
/* MAX level for dir lookup */
#define MAX_DIR_HASH_DEPTH 63
diff --git a/include/linux/fcntl.h b/include/linux/fcntl.h
index 76ce329e656d..1b48d9c9a561 100644
--- a/include/linux/fcntl.h
+++ b/include/linux/fcntl.h
@@ -3,6 +3,12 @@
#include <uapi/linux/fcntl.h>
+/* list of all valid flags for the open/openat flags argument: */
+#define VALID_OPEN_FLAGS \
+ (O_RDONLY | O_WRONLY | O_RDWR | O_CREAT | O_EXCL | O_NOCTTY | O_TRUNC | \
+ O_APPEND | O_NDELAY | O_NONBLOCK | O_NDELAY | __O_SYNC | O_DSYNC | \
+ FASYNC | O_DIRECT | O_LARGEFILE | O_DIRECTORY | O_NOFOLLOW | \
+ O_NOATIME | O_CLOEXEC | O_PATH | __O_TMPFILE)
#ifndef force_o_largefile
#define force_o_largefile() (BITS_PER_LONG != 32)
diff --git a/include/linux/filter.h b/include/linux/filter.h
index fbf7b39e8103..56197f82af45 100644
--- a/include/linux/filter.h
+++ b/include/linux/filter.h
@@ -7,6 +7,7 @@
#include <stdarg.h>
#include <linux/atomic.h>
+#include <linux/refcount.h>
#include <linux/compat.h>
#include <linux/skbuff.h>
#include <linux/linkage.h>
@@ -18,7 +19,9 @@
#include <net/sch_generic.h>
-#include <asm/cacheflush.h>
+#ifdef CONFIG_ARCH_HAS_SET_MEMORY
+#include <asm/set_memory.h>
+#endif
#include <uapi/linux/filter.h>
#include <uapi/linux/bpf.h>
@@ -412,8 +415,7 @@ struct bpf_prog {
locked:1, /* Program image locked? */
gpl_compatible:1, /* Is filter GPL compatible? */
cb_access:1, /* Is control block accessed? */
- dst_needed:1, /* Do we need dst entry? */
- xdp_adjust_head:1; /* Adjusting pkt head? */
+ dst_needed:1; /* Do we need dst entry? */
kmemcheck_bitfield_end(meta);
enum bpf_prog_type type; /* Type of BPF program */
u32 len; /* Number of filter blocks */
@@ -430,7 +432,7 @@ struct bpf_prog {
};
struct sk_filter {
- atomic_t refcnt;
+ refcount_t refcnt;
struct rcu_head rcu;
struct bpf_prog *prog;
};
@@ -693,6 +695,11 @@ static inline bool bpf_jit_is_ebpf(void)
# endif
}
+static inline bool ebpf_jit_enabled(void)
+{
+ return bpf_jit_enable && bpf_jit_is_ebpf();
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return fp->jited && bpf_jit_is_ebpf();
@@ -753,6 +760,11 @@ void bpf_prog_kallsyms_del(struct bpf_prog *fp);
#else /* CONFIG_BPF_JIT */
+static inline bool ebpf_jit_enabled(void)
+{
+ return false;
+}
+
static inline bool bpf_prog_ebpf_jited(const struct bpf_prog *fp)
{
return false;
diff --git a/include/linux/firmware/meson/meson_sm.h b/include/linux/firmware/meson/meson_sm.h
index 8e953c6f394a..37a5eaea69dd 100644
--- a/include/linux/firmware/meson/meson_sm.h
+++ b/include/linux/firmware/meson/meson_sm.h
@@ -25,7 +25,7 @@ int meson_sm_call(unsigned int cmd_index, u32 *ret, u32 arg0, u32 arg1,
u32 arg2, u32 arg3, u32 arg4);
int meson_sm_call_write(void *buffer, unsigned int b_size, unsigned int cmd_index,
u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
-int meson_sm_call_read(void *buffer, unsigned int cmd_index, u32 arg0, u32 arg1,
- u32 arg2, u32 arg3, u32 arg4);
+int meson_sm_call_read(void *buffer, unsigned int bsize, unsigned int cmd_index,
+ u32 arg0, u32 arg1, u32 arg2, u32 arg3, u32 arg4);
#endif /* _MESON_SM_FW_H_ */
diff --git a/include/linux/flex_array.h b/include/linux/flex_array.h
index b6efb0c64408..11366b3ff0b4 100644
--- a/include/linux/flex_array.h
+++ b/include/linux/flex_array.h
@@ -61,16 +61,83 @@ struct flex_array {
FLEX_ARRAY_ELEMENTS_PER_PART(__element_size)); \
}
+/**
+ * flex_array_alloc() - Creates a flexible array.
+ * @element_size: individual object size.
+ * @total: maximum number of objects which can be stored.
+ * @flags: GFP flags
+ *
+ * Return: Returns an object of structure flex_array.
+ */
struct flex_array *flex_array_alloc(int element_size, unsigned int total,
gfp_t flags);
+
+/**
+ * flex_array_prealloc() - Ensures that memory for the elements indexed in the
+ * range defined by start and nr_elements has been allocated.
+ * @fa: array to allocate memory to.
+ * @start: start address
+ * @nr_elements: number of elements to be allocated.
+ * @flags: GFP flags
+ *
+ */
int flex_array_prealloc(struct flex_array *fa, unsigned int start,
unsigned int nr_elements, gfp_t flags);
+
+/**
+ * flex_array_free() - Removes all elements of a flexible array.
+ * @fa: array to be freed.
+ */
void flex_array_free(struct flex_array *fa);
+
+/**
+ * flex_array_free_parts() - Removes all elements of a flexible array, but
+ * leaves the array itself in place.
+ * @fa: array to be emptied.
+ */
void flex_array_free_parts(struct flex_array *fa);
+
+/**
+ * flex_array_put() - Stores data into a flexible array.
+ * @fa: array where element is to be stored.
+ * @element_nr: position to copy, must be less than the maximum specified when
+ * the array was created.
+ * @src: data source to be copied into the array.
+ * @flags: GFP flags
+ *
+ * Return: Returns zero on success, a negative error code otherwise.
+ */
int flex_array_put(struct flex_array *fa, unsigned int element_nr, void *src,
gfp_t flags);
+
+/**
+ * flex_array_clear() - Clears an individual element in the array, sets the
+ * given element to FLEX_ARRAY_FREE.
+ * @element_nr: element position to clear.
+ * @fa: array to which element to be cleared belongs.
+ *
+ * Return: Returns zero on success, -EINVAL otherwise.
+ */
int flex_array_clear(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_get() - Retrieves data into a flexible array.
+ *
+ * @element_nr: Element position to retrieve data from.
+ * @fa: array from which data is to be retrieved.
+ *
+ * Return: Returns a pointer to the data element, or NULL if that
+ * particular element has never been allocated.
+ */
void *flex_array_get(struct flex_array *fa, unsigned int element_nr);
+
+/**
+ * flex_array_shrink() - Reduces the allocated size of an array.
+ * @fa: array to shrink.
+ *
+ * Return: Returns number of pages of memory actually freed.
+ *
+ */
int flex_array_shrink(struct flex_array *fa);
#define flex_array_put_ptr(fa, nr, src, gfp) \
diff --git a/include/linux/fpga/altera-pr-ip-core.h b/include/linux/fpga/altera-pr-ip-core.h
new file mode 100644
index 000000000000..3810a9033f49
--- /dev/null
+++ b/include/linux/fpga/altera-pr-ip-core.h
@@ -0,0 +1,29 @@
+/*
+ * Driver for Altera Partial Reconfiguration IP Core
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Based on socfpga-a10.c Copyright (C) 2015-2016 Altera Corporation
+ * by Alan Tull <atull@opensource.altera.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _ALT_PR_IP_CORE_H
+#define _ALT_PR_IP_CORE_H
+#include <linux/io.h>
+
+int alt_pr_register(struct device *dev, void __iomem *reg_base);
+int alt_pr_unregister(struct device *dev);
+
+#endif /* _ALT_PR_IP_CORE_H */
diff --git a/include/linux/fpga/fpga-mgr.h b/include/linux/fpga/fpga-mgr.h
index 57beb5d09bfc..b4ac24c4411d 100644
--- a/include/linux/fpga/fpga-mgr.h
+++ b/include/linux/fpga/fpga-mgr.h
@@ -70,17 +70,21 @@ enum fpga_mgr_states {
*/
#define FPGA_MGR_PARTIAL_RECONFIG BIT(0)
#define FPGA_MGR_EXTERNAL_CONFIG BIT(1)
+#define FPGA_MGR_ENCRYPTED_BITSTREAM BIT(2)
/**
* struct fpga_image_info - information specific to a FPGA image
* @flags: boolean flags as defined above
* @enable_timeout_us: maximum time to enable traffic through bridge (uSec)
* @disable_timeout_us: maximum time to disable traffic through bridge (uSec)
+ * @config_complete_timeout_us: maximum time for FPGA to switch to operating
+ * status in the write_complete op.
*/
struct fpga_image_info {
u32 flags;
u32 enable_timeout_us;
u32 disable_timeout_us;
+ u32 config_complete_timeout_us;
};
/**
diff --git a/include/linux/fs.h b/include/linux/fs.h
index 7251f7bb45e8..0ad325ed71e8 100644
--- a/include/linux/fs.h
+++ b/include/linux/fs.h
@@ -29,7 +29,6 @@
#include <linux/lockdep.h>
#include <linux/percpu-rwsem.h>
#include <linux/workqueue.h>
-#include <linux/percpu-rwsem.h>
#include <linux/delayed_call.h>
#include <asm/byteorder.h>
@@ -250,9 +249,8 @@ enum positive_aop_returns {
AOP_TRUNCATED_PAGE = 0x80001,
};
-#define AOP_FLAG_UNINTERRUPTIBLE 0x0001 /* will not do a short write */
-#define AOP_FLAG_CONT_EXPAND 0x0002 /* called from cont_expand */
-#define AOP_FLAG_NOFS 0x0004 /* used by filesystem to direct
+#define AOP_FLAG_CONT_EXPAND 0x0001 /* called from cont_expand */
+#define AOP_FLAG_NOFS 0x0002 /* used by filesystem to direct
* helper code (eg buffer layer)
* to clear GFP_FS from alloc */
@@ -546,6 +544,8 @@ is_uncached_acl(struct posix_acl *acl)
#define IOP_XATTR 0x0008
#define IOP_DEFAULT_READLINK 0x0010
+struct fsnotify_mark_connector;
+
/*
* Keep mostly read-only and often accessed (especially for
* the RCU path lookup and 'stat' data) fields at the beginning
@@ -645,7 +645,7 @@ struct inode {
#ifdef CONFIG_FSNOTIFY
__u32 i_fsnotify_mask; /* all events this inode cares about */
- struct hlist_head i_fsnotify_marks;
+ struct fsnotify_mark_connector __rcu *i_fsnotify_marks;
#endif
#if IS_ENABLED(CONFIG_FS_ENCRYPTION)
@@ -909,6 +909,8 @@ static inline struct file *get_file(struct file *f)
#define FL_OFDLCK 1024 /* lock is "owned" by struct file */
#define FL_LAYOUT 2048 /* outstanding pNFS layout */
+#define FL_CLOSE_POSIX (FL_POSIX | FL_CLOSE)
+
/*
* Special return value from posix_lock_file() and vfs_lock_file() for
* asynchronous locking.
@@ -2121,6 +2123,9 @@ extern int vfs_ustat(dev_t, struct kstatfs *);
extern int freeze_super(struct super_block *super);
extern int thaw_super(struct super_block *super);
extern bool our_mnt(struct vfsmount *mnt);
+extern __printf(2, 3)
+int super_setup_bdi_name(struct super_block *sb, char *fmt, ...);
+extern int super_setup_bdi(struct super_block *sb);
extern int current_umask(void);
@@ -2921,17 +2926,19 @@ extern int vfs_statx_fd(unsigned int, struct kstat *, u32, unsigned int);
static inline int vfs_stat(const char __user *filename, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, filename, 0, stat, STATX_BASIC_STATS);
+ return vfs_statx(AT_FDCWD, filename, AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_lstat(const char __user *name, struct kstat *stat)
{
- return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW,
+ return vfs_statx(AT_FDCWD, name, AT_SYMLINK_NOFOLLOW | AT_NO_AUTOMOUNT,
stat, STATX_BASIC_STATS);
}
static inline int vfs_fstatat(int dfd, const char __user *filename,
struct kstat *stat, int flags)
{
- return vfs_statx(dfd, filename, flags, stat, STATX_BASIC_STATS);
+ return vfs_statx(dfd, filename, flags | AT_NO_AUTOMOUNT,
+ stat, STATX_BASIC_STATS);
}
static inline int vfs_fstat(int fd, struct kstat *stat)
{
@@ -2996,9 +3003,10 @@ extern const struct file_operations simple_dir_operations;
extern const struct inode_operations simple_dir_inode_operations;
extern void make_empty_dir_inode(struct inode *inode);
extern bool is_empty_dir_inode(struct inode *inode);
-struct tree_descr { char *name; const struct file_operations *ops; int mode; };
+struct tree_descr { const char *name; const struct file_operations *ops; int mode; };
struct dentry *d_alloc_name(struct dentry *, const char *);
-extern int simple_fill_super(struct super_block *, unsigned long, struct tree_descr *);
+extern int simple_fill_super(struct super_block *, unsigned long,
+ const struct tree_descr *);
extern int simple_pin_fs(struct file_system_type *, struct vfsmount **mount, int *count);
extern void simple_release_fs(struct vfsmount **mount, int *count);
diff --git a/include/linux/fscrypt_common.h b/include/linux/fscrypt_common.h
index 10c1abfbac6c..0a30c106c1e5 100644
--- a/include/linux/fscrypt_common.h
+++ b/include/linux/fscrypt_common.h
@@ -46,17 +46,6 @@ struct fscrypt_symlink_data {
char encrypted_path[1];
} __packed;
-/**
- * This function is used to calculate the disk space required to
- * store a filename of length l in encrypted symlink format.
- */
-static inline u32 fscrypt_symlink_data_len(u32 l)
-{
- if (l < FS_CRYPTO_BLOCK_SIZE)
- l = FS_CRYPTO_BLOCK_SIZE;
- return (l + sizeof(struct fscrypt_symlink_data) - 1);
-}
-
struct fscrypt_str {
unsigned char *name;
u32 len;
diff --git a/include/linux/fscrypt_notsupp.h b/include/linux/fscrypt_notsupp.h
index 3511ca798804..ec406aed2f2f 100644
--- a/include/linux/fscrypt_notsupp.h
+++ b/include/linux/fscrypt_notsupp.h
@@ -147,6 +147,15 @@ static inline int fscrypt_fname_usr_to_disk(struct inode *inode,
return -EOPNOTSUPP;
}
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ /* Encryption support disabled; use standard comparison */
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
static inline void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *ctx,
struct bio *bio)
diff --git a/include/linux/fscrypt_supp.h b/include/linux/fscrypt_supp.h
index a140f47e9b27..cd4e82c17304 100644
--- a/include/linux/fscrypt_supp.h
+++ b/include/linux/fscrypt_supp.h
@@ -57,6 +57,80 @@ extern int fscrypt_fname_disk_to_usr(struct inode *, u32, u32,
extern int fscrypt_fname_usr_to_disk(struct inode *, const struct qstr *,
struct fscrypt_str *);
+#define FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE 32
+
+/* Extracts the second-to-last ciphertext block; see explanation below */
+#define FSCRYPT_FNAME_DIGEST(name, len) \
+ ((name) + round_down((len) - FS_CRYPTO_BLOCK_SIZE - 1, \
+ FS_CRYPTO_BLOCK_SIZE))
+
+#define FSCRYPT_FNAME_DIGEST_SIZE FS_CRYPTO_BLOCK_SIZE
+
+/**
+ * fscrypt_digested_name - alternate identifier for an on-disk filename
+ *
+ * When userspace lists an encrypted directory without access to the key,
+ * filenames whose ciphertext is longer than FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE
+ * bytes are shown in this abbreviated form (base64-encoded) rather than as the
+ * full ciphertext (base64-encoded). This is necessary to allow supporting
+ * filenames up to NAME_MAX bytes, since base64 encoding expands the length.
+ *
+ * To make it possible for filesystems to still find the correct directory entry
+ * despite not knowing the full on-disk name, we encode any filesystem-specific
+ * 'hash' and/or 'minor_hash' which the filesystem may need for its lookups,
+ * followed by the second-to-last ciphertext block of the filename. Due to the
+ * use of the CBC-CTS encryption mode, the second-to-last ciphertext block
+ * depends on the full plaintext. (Note that ciphertext stealing causes the
+ * last two blocks to appear "flipped".) This makes accidental collisions very
+ * unlikely: just a 1 in 2^128 chance for two filenames to collide even if they
+ * share the same filesystem-specific hashes.
+ *
+ * However, this scheme isn't immune to intentional collisions, which can be
+ * created by anyone able to create arbitrary plaintext filenames and view them
+ * without the key. Making the "digest" be a real cryptographic hash like
+ * SHA-256 over the full ciphertext would prevent this, although it would be
+ * less efficient and harder to implement, especially since the filesystem would
+ * need to calculate it for each directory entry examined during a search.
+ */
+struct fscrypt_digested_name {
+ u32 hash;
+ u32 minor_hash;
+ u8 digest[FSCRYPT_FNAME_DIGEST_SIZE];
+};
+
+/**
+ * fscrypt_match_name() - test whether the given name matches a directory entry
+ * @fname: the name being searched for
+ * @de_name: the name from the directory entry
+ * @de_name_len: the length of @de_name in bytes
+ *
+ * Normally @fname->disk_name will be set, and in that case we simply compare
+ * that to the name stored in the directory entry. The only exception is that
+ * if we don't have the key for an encrypted directory and a filename in it is
+ * very long, then we won't have the full disk_name and we'll instead need to
+ * match against the fscrypt_digested_name.
+ *
+ * Return: %true if the name matches, otherwise %false.
+ */
+static inline bool fscrypt_match_name(const struct fscrypt_name *fname,
+ const u8 *de_name, u32 de_name_len)
+{
+ if (unlikely(!fname->disk_name.name)) {
+ const struct fscrypt_digested_name *n =
+ (const void *)fname->crypto_buf.name;
+ if (WARN_ON_ONCE(fname->usr_fname->name[0] != '_'))
+ return false;
+ if (de_name_len <= FSCRYPT_FNAME_MAX_UNDIGESTED_SIZE)
+ return false;
+ return !memcmp(FSCRYPT_FNAME_DIGEST(de_name, de_name_len),
+ n->digest, FSCRYPT_FNAME_DIGEST_SIZE);
+ }
+
+ if (de_name_len != fname->disk_name.len)
+ return false;
+ return !memcmp(de_name, fname->disk_name.name, fname->disk_name.len);
+}
+
/* bio.c */
extern void fscrypt_decrypt_bio_pages(struct fscrypt_ctx *, struct bio *);
extern void fscrypt_pullback_bio_page(struct page **, bool);
diff --git a/include/linux/fsnotify_backend.h b/include/linux/fsnotify_backend.h
index e6e689b5569e..c6c69318752b 100644
--- a/include/linux/fsnotify_backend.h
+++ b/include/linux/fsnotify_backend.h
@@ -80,6 +80,7 @@ struct fsnotify_event;
struct fsnotify_mark;
struct fsnotify_event_private_data;
struct fsnotify_fname;
+struct fsnotify_iter_info;
/*
* Each group much define these ops. The fsnotify infrastructure will call
@@ -98,10 +99,13 @@ struct fsnotify_ops {
struct fsnotify_mark *inode_mark,
struct fsnotify_mark *vfsmount_mark,
u32 mask, const void *data, int data_type,
- const unsigned char *file_name, u32 cookie);
+ const unsigned char *file_name, u32 cookie,
+ struct fsnotify_iter_info *iter_info);
void (*free_group_priv)(struct fsnotify_group *group);
void (*freeing_mark)(struct fsnotify_mark *mark, struct fsnotify_group *group);
void (*free_event)(struct fsnotify_event *event);
+ /* called on final put+free to free memory */
+ void (*free_mark)(struct fsnotify_mark *mark);
};
/*
@@ -163,6 +167,8 @@ struct fsnotify_group {
struct fsnotify_event *overflow_event; /* Event we queue when the
* notification list is too
* full */
+ atomic_t user_waits; /* Number of tasks waiting for user
+ * response */
/* groups can define private fields here or use the void *private */
union {
@@ -195,6 +201,30 @@ struct fsnotify_group {
#define FSNOTIFY_EVENT_INODE 2
/*
+ * Inode / vfsmount point to this structure which tracks all marks attached to
+ * the inode / vfsmount. The reference to inode / vfsmount is held by this
+ * structure. We destroy this structure when there are no more marks attached
+ * to it. The structure is protected by fsnotify_mark_srcu.
+ */
+struct fsnotify_mark_connector {
+ spinlock_t lock;
+#define FSNOTIFY_OBJ_TYPE_INODE 0x01
+#define FSNOTIFY_OBJ_TYPE_VFSMOUNT 0x02
+#define FSNOTIFY_OBJ_ALL_TYPES (FSNOTIFY_OBJ_TYPE_INODE | \
+ FSNOTIFY_OBJ_TYPE_VFSMOUNT)
+ unsigned int flags; /* Type of object [lock] */
+ union { /* Object pointer [lock] */
+ struct inode *inode;
+ struct vfsmount *mnt;
+ };
+ union {
+ struct hlist_head list;
+ /* Used listing heads to free after srcu period expires */
+ struct fsnotify_mark_connector *destroy_next;
+ };
+};
+
+/*
* A mark is simply an object attached to an in core inode which allows an
* fsnotify listener to indicate they are either no longer interested in events
* of a type matching mask or only interested in those events.
@@ -223,22 +253,16 @@ struct fsnotify_mark {
struct list_head g_list;
/* Protects inode / mnt pointers, flags, masks */
spinlock_t lock;
- /* List of marks for inode / vfsmount [obj_lock] */
+ /* List of marks for inode / vfsmount [connector->lock, mark ref] */
struct hlist_node obj_list;
- union { /* Object pointer [mark->lock, group->mark_mutex] */
- struct inode *inode; /* inode this mark is associated with */
- struct vfsmount *mnt; /* vfsmount this mark is associated with */
- };
+ /* Head of list of marks for an object [mark ref] */
+ struct fsnotify_mark_connector *connector;
/* Events types to ignore [mark->lock, group->mark_mutex] */
__u32 ignored_mask;
-#define FSNOTIFY_MARK_FLAG_INODE 0x01
-#define FSNOTIFY_MARK_FLAG_VFSMOUNT 0x02
-#define FSNOTIFY_MARK_FLAG_OBJECT_PINNED 0x04
-#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x08
-#define FSNOTIFY_MARK_FLAG_ALIVE 0x10
-#define FSNOTIFY_MARK_FLAG_ATTACHED 0x20
+#define FSNOTIFY_MARK_FLAG_IGNORED_SURV_MODIFY 0x01
+#define FSNOTIFY_MARK_FLAG_ALIVE 0x02
+#define FSNOTIFY_MARK_FLAG_ATTACHED 0x04
unsigned int flags; /* flags [mark->lock] */
- void (*free_mark)(struct fsnotify_mark *mark); /* called on final put+free */
};
#ifdef CONFIG_FSNOTIFY
@@ -315,23 +339,18 @@ extern struct fsnotify_event *fsnotify_remove_first_event(struct fsnotify_group
/* functions used to manipulate the marks attached to inodes */
-/* run all marks associated with a vfsmount and update mnt->mnt_fsnotify_mask */
-extern void fsnotify_recalc_vfsmount_mask(struct vfsmount *mnt);
-/* run all marks associated with an inode and update inode->i_fsnotify_mask */
-extern void fsnotify_recalc_inode_mask(struct inode *inode);
-extern void fsnotify_init_mark(struct fsnotify_mark *mark, void (*free_mark)(struct fsnotify_mark *mark));
-/* find (and take a reference) to a mark associated with group and inode */
-extern struct fsnotify_mark *fsnotify_find_inode_mark(struct fsnotify_group *group, struct inode *inode);
-/* find (and take a reference) to a mark associated with group and vfsmount */
-extern struct fsnotify_mark *fsnotify_find_vfsmount_mark(struct fsnotify_group *group, struct vfsmount *mnt);
-/* set the ignored_mask of a mark */
-extern void fsnotify_set_mark_ignored_mask_locked(struct fsnotify_mark *mark, __u32 mask);
-/* set the mask of a mark (might pin the object into memory */
-extern void fsnotify_set_mark_mask_locked(struct fsnotify_mark *mark, __u32 mask);
-/* attach the mark to both the group and the inode */
-extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct fsnotify_group *group,
- struct inode *inode, struct vfsmount *mnt, int allow_dups);
-extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark, struct fsnotify_group *group,
+/* Calculate mask of events for a list of marks */
+extern void fsnotify_recalc_mask(struct fsnotify_mark_connector *conn);
+extern void fsnotify_init_mark(struct fsnotify_mark *mark,
+ struct fsnotify_group *group);
+/* Find mark belonging to given group in the list of marks */
+extern struct fsnotify_mark *fsnotify_find_mark(
+ struct fsnotify_mark_connector __rcu **connp,
+ struct fsnotify_group *group);
+/* attach the mark to the inode or vfsmount */
+extern int fsnotify_add_mark(struct fsnotify_mark *mark, struct inode *inode,
+ struct vfsmount *mnt, int allow_dups);
+extern int fsnotify_add_mark_locked(struct fsnotify_mark *mark,
struct inode *inode, struct vfsmount *mnt, int allow_dups);
/* given a group and a mark, flag mark to be freed when all references are dropped */
extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
@@ -340,15 +359,23 @@ extern void fsnotify_destroy_mark(struct fsnotify_mark *mark,
extern void fsnotify_detach_mark(struct fsnotify_mark *mark);
/* free mark */
extern void fsnotify_free_mark(struct fsnotify_mark *mark);
+/* run all the marks in a group, and clear all of the marks attached to given object type */
+extern void fsnotify_clear_marks_by_group(struct fsnotify_group *group, unsigned int type);
/* run all the marks in a group, and clear all of the vfsmount marks */
-extern void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group);
+static inline void fsnotify_clear_vfsmount_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_VFSMOUNT);
+}
/* run all the marks in a group, and clear all of the inode marks */
-extern void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group);
-/* run all the marks in a group, and clear all of the marks where mark->flags & flags is true*/
-extern void fsnotify_clear_marks_by_group_flags(struct fsnotify_group *group, unsigned int flags);
+static inline void fsnotify_clear_inode_marks_by_group(struct fsnotify_group *group)
+{
+ fsnotify_clear_marks_by_group(group, FSNOTIFY_OBJ_TYPE_INODE);
+}
extern void fsnotify_get_mark(struct fsnotify_mark *mark);
extern void fsnotify_put_mark(struct fsnotify_mark *mark);
extern void fsnotify_unmount_inodes(struct super_block *sb);
+extern void fsnotify_finish_user_wait(struct fsnotify_iter_info *iter_info);
+extern bool fsnotify_prepare_user_wait(struct fsnotify_iter_info *iter_info);
/* put here because inotify does some weird stuff when destroying watches */
extern void fsnotify_init_event(struct fsnotify_event *event,
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 3633e8beff39..473f088aabea 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -42,8 +42,10 @@
/* Main tracing buffer and events set up */
#ifdef CONFIG_TRACING
void trace_init(void);
+void early_trace_init(void);
#else
static inline void trace_init(void) { }
+static inline void early_trace_init(void) { }
#endif
struct module;
@@ -70,7 +72,7 @@ ftrace_func_t ftrace_ops_get_func(struct ftrace_ops *ops);
* CONTROL, SAVE_REGS, SAVE_REGS_IF_SUPPORTED, RECURSION_SAFE, STUB and
* IPMODIFY are a kind of attribute flags which can be set only before
* registering the ftrace_ops, and can not be modified while registered.
- * Changing those attribute flags after regsitering ftrace_ops will
+ * Changing those attribute flags after registering ftrace_ops will
* cause unexpected results.
*
* ENABLED - set/unset when ftrace_ops is registered/unregistered
@@ -144,6 +146,10 @@ struct ftrace_ops_hash {
struct ftrace_hash *filter_hash;
struct mutex regex_lock;
};
+
+void ftrace_free_init_mem(void);
+#else
+static inline void ftrace_free_init_mem(void) { }
#endif
/*
@@ -260,6 +266,7 @@ static inline int ftrace_nr_registered_ops(void)
}
static inline void clear_ftrace_function(void) { }
static inline void ftrace_kill(void) { }
+static inline void ftrace_free_init_mem(void) { }
#endif /* CONFIG_FUNCTION_TRACER */
#ifdef CONFIG_STACK_TRACER
@@ -279,15 +286,45 @@ int
stack_trace_sysctl(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);
-#endif
-struct ftrace_func_command {
- struct list_head list;
- char *name;
- int (*func)(struct ftrace_hash *hash,
- char *func, char *cmd,
- char *params, int enable);
-};
+/* DO NOT MODIFY THIS VARIABLE DIRECTLY! */
+DECLARE_PER_CPU(int, disable_stack_tracer);
+
+/**
+ * stack_tracer_disable - temporarily disable the stack tracer
+ *
+ * There's a few locations (namely in RCU) where stack tracing
+ * cannot be executed. This function is used to disable stack
+ * tracing during those critical sections.
+ *
+ * This function must be called with preemption or interrupts
+ * disabled and stack_tracer_enable() must be called shortly after
+ * while preemption or interrupts are still disabled.
+ */
+static inline void stack_tracer_disable(void)
+{
+ /* Preemption or interupts must be disabled */
+ if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ this_cpu_inc(disable_stack_tracer);
+}
+
+/**
+ * stack_tracer_enable - re-enable the stack tracer
+ *
+ * After stack_tracer_disable() is called, stack_tracer_enable()
+ * must be called shortly afterward.
+ */
+static inline void stack_tracer_enable(void)
+{
+ if (IS_ENABLED(CONFIG_PREEMPT_DEBUG))
+ WARN_ON_ONCE(!preempt_count() || !irqs_disabled());
+ this_cpu_dec(disable_stack_tracer);
+}
+#else
+static inline void stack_tracer_disable(void) { }
+static inline void stack_tracer_enable(void) { }
+#endif
#ifdef CONFIG_DYNAMIC_FTRACE
@@ -315,30 +352,6 @@ void ftrace_bug(int err, struct dyn_ftrace *rec);
struct seq_file;
-struct ftrace_probe_ops {
- void (*func)(unsigned long ip,
- unsigned long parent_ip,
- void **data);
- int (*init)(struct ftrace_probe_ops *ops,
- unsigned long ip, void **data);
- void (*free)(struct ftrace_probe_ops *ops,
- unsigned long ip, void **data);
- int (*print)(struct seq_file *m,
- unsigned long ip,
- struct ftrace_probe_ops *ops,
- void *data);
-};
-
-extern int
-register_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
- void *data);
-extern void
-unregister_ftrace_function_probe(char *glob, struct ftrace_probe_ops *ops,
- void *data);
-extern void
-unregister_ftrace_function_probe_func(char *glob, struct ftrace_probe_ops *ops);
-extern void unregister_ftrace_function_probe_all(char *glob);
-
extern int ftrace_text_reserved(const void *start, const void *end);
extern int ftrace_nr_registered_ops(void);
@@ -400,9 +413,6 @@ void ftrace_set_global_notrace(unsigned char *buf, int len, int reset);
void ftrace_free_filter(struct ftrace_ops *ops);
void ftrace_ops_set_global_filter(struct ftrace_ops *ops);
-int register_ftrace_command(struct ftrace_func_command *cmd);
-int unregister_ftrace_command(struct ftrace_func_command *cmd);
-
enum {
FTRACE_UPDATE_CALLS = (1 << 0),
FTRACE_DISABLE_CALLS = (1 << 1),
@@ -433,8 +443,8 @@ enum {
FTRACE_ITER_FILTER = (1 << 0),
FTRACE_ITER_NOTRACE = (1 << 1),
FTRACE_ITER_PRINTALL = (1 << 2),
- FTRACE_ITER_DO_HASH = (1 << 3),
- FTRACE_ITER_HASH = (1 << 4),
+ FTRACE_ITER_DO_PROBES = (1 << 3),
+ FTRACE_ITER_PROBE = (1 << 4),
FTRACE_ITER_ENABLED = (1 << 5),
};
@@ -618,14 +628,6 @@ static inline void ftrace_enable_daemon(void) { }
static inline void ftrace_module_init(struct module *mod) { }
static inline void ftrace_module_enable(struct module *mod) { }
static inline void ftrace_release_mod(struct module *mod) { }
-static inline __init int register_ftrace_command(struct ftrace_func_command *cmd)
-{
- return -EINVAL;
-}
-static inline __init int unregister_ftrace_command(char *cmd_name)
-{
- return -EINVAL;
-}
static inline int ftrace_text_reserved(const void *start, const void *end)
{
return 0;
diff --git a/include/linux/fwnode.h b/include/linux/fwnode.h
index 8bd28ce6d76e..3dff2398a5f0 100644
--- a/include/linux/fwnode.h
+++ b/include/linux/fwnode.h
@@ -27,4 +27,16 @@ struct fwnode_handle {
struct fwnode_handle *secondary;
};
+/**
+ * struct fwnode_endpoint - Fwnode graph endpoint
+ * @port: Port number
+ * @id: Endpoint id
+ * @local_fwnode: reference to the related fwnode
+ */
+struct fwnode_endpoint {
+ unsigned int port;
+ unsigned int id;
+ const struct fwnode_handle *local_fwnode;
+};
+
#endif
diff --git a/include/linux/genhd.h b/include/linux/genhd.h
index 76f39754e7b0..acff9437e5c3 100644
--- a/include/linux/genhd.h
+++ b/include/linux/genhd.h
@@ -159,11 +159,11 @@ struct badblocks;
#if defined(CONFIG_BLK_DEV_INTEGRITY)
struct blk_integrity {
- struct blk_integrity_profile *profile;
- unsigned char flags;
- unsigned char tuple_size;
- unsigned char interval_exp;
- unsigned char tag_size;
+ const struct blk_integrity_profile *profile;
+ unsigned char flags;
+ unsigned char tuple_size;
+ unsigned char interval_exp;
+ unsigned char tag_size;
};
#endif /* CONFIG_BLK_DEV_INTEGRITY */
@@ -722,11 +722,9 @@ static inline void part_nr_sects_write(struct hd_struct *part, sector_t size)
#if defined(CONFIG_BLK_DEV_INTEGRITY)
extern void blk_integrity_add(struct gendisk *);
extern void blk_integrity_del(struct gendisk *);
-extern void blk_integrity_revalidate(struct gendisk *);
#else /* CONFIG_BLK_DEV_INTEGRITY */
static inline void blk_integrity_add(struct gendisk *disk) { }
static inline void blk_integrity_del(struct gendisk *disk) { }
-static inline void blk_integrity_revalidate(struct gendisk *disk) { }
#endif /* CONFIG_BLK_DEV_INTEGRITY */
#else /* CONFIG_BLOCK */
diff --git a/include/linux/gfp.h b/include/linux/gfp.h
index db373b9d3223..2b1a44f5bdb6 100644
--- a/include/linux/gfp.h
+++ b/include/linux/gfp.h
@@ -40,6 +40,11 @@ struct vm_area_struct;
#define ___GFP_DIRECT_RECLAIM 0x400000u
#define ___GFP_WRITE 0x800000u
#define ___GFP_KSWAPD_RECLAIM 0x1000000u
+#ifdef CONFIG_LOCKDEP
+#define ___GFP_NOLOCKDEP 0x4000000u
+#else
+#define ___GFP_NOLOCKDEP 0
+#endif
/* If the above are modified, __GFP_BITS_SHIFT may need updating */
/*
@@ -179,8 +184,11 @@ struct vm_area_struct;
#define __GFP_NOTRACK ((__force gfp_t)___GFP_NOTRACK)
#define __GFP_NOTRACK_FALSE_POSITIVE (__GFP_NOTRACK)
+/* Disable lockdep for GFP context tracking */
+#define __GFP_NOLOCKDEP ((__force gfp_t)___GFP_NOLOCKDEP)
+
/* Room for N __GFP_FOO bits */
-#define __GFP_BITS_SHIFT 25
+#define __GFP_BITS_SHIFT (25 + IS_ENABLED(CONFIG_LOCKDEP))
#define __GFP_BITS_MASK ((__force gfp_t)((1 << __GFP_BITS_SHIFT) - 1))
/*
@@ -202,8 +210,16 @@ struct vm_area_struct;
*
* GFP_NOIO will use direct reclaim to discard clean pages or slab pages
* that do not require the starting of any physical IO.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_noio_{save,restore} to mark the whole scope which cannot
+ * perform any IO with a short explanation why. All allocation requests
+ * will inherit GFP_NOIO implicitly.
*
* GFP_NOFS will use direct reclaim but will not use any filesystem interfaces.
+ * Please try to avoid using this flag directly and instead use
+ * memalloc_nofs_{save,restore} to mark the whole scope which cannot/shouldn't
+ * recurse into the FS layer with a short explanation why. All allocation
+ * requests will inherit GFP_NOFS implicitly.
*
* GFP_USER is for userspace allocations that also need to be directly
* accessibly by the kernel or hardware. It is typically used by hardware
@@ -297,8 +313,8 @@ static inline bool gfpflags_allow_blocking(const gfp_t gfp_flags)
/*
* GFP_ZONE_TABLE is a word size bitstring that is used for looking up the
- * zone to use given the lowest 4 bits of gfp_t. Entries are ZONE_SHIFT long
- * and there are 16 of them to cover all possible combinations of
+ * zone to use given the lowest 4 bits of gfp_t. Entries are GFP_ZONES_SHIFT
+ * bits long and there are 16 of them to cover all possible combinations of
* __GFP_DMA, __GFP_DMA32, __GFP_MOVABLE and __GFP_HIGHMEM.
*
* The zone fallback order is MOVABLE=>HIGHMEM=>NORMAL=>DMA32=>DMA.
diff --git a/include/linux/gpio/consumer.h b/include/linux/gpio/consumer.h
index 933d93656605..8f702fcbe485 100644
--- a/include/linux/gpio/consumer.h
+++ b/include/linux/gpio/consumer.h
@@ -170,14 +170,14 @@ static inline struct gpio_desc *__must_check
gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -191,7 +191,7 @@ static inline struct gpio_descs *__must_check
gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void gpiod_put(struct gpio_desc *desc)
@@ -231,14 +231,14 @@ static inline struct gpio_desc *__must_check
devm_gpiod_get_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_desc *__must_check
devm_gpiod_get_index_optional(struct device *dev, const char *con_id,
unsigned int index, enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline struct gpio_descs *__must_check
@@ -252,7 +252,7 @@ static inline struct gpio_descs *__must_check
devm_gpiod_get_array_optional(struct device *dev, const char *con_id,
enum gpiod_flags flags)
{
- return ERR_PTR(-ENOSYS);
+ return NULL;
}
static inline void devm_gpiod_put(struct device *dev, struct gpio_desc *desc)
diff --git a/include/linux/gpio/driver.h b/include/linux/gpio/driver.h
index 846f3b989480..393582867afd 100644
--- a/include/linux/gpio/driver.h
+++ b/include/linux/gpio/driver.h
@@ -168,7 +168,7 @@ struct gpio_chip {
unsigned int irq_base;
irq_flow_handler_t irq_handler;
unsigned int irq_default_type;
- int irq_chained_parent;
+ unsigned int irq_chained_parent;
bool irq_nested;
bool irq_need_valid_mask;
unsigned long *irq_valid_mask;
@@ -244,12 +244,12 @@ int bgpio_init(struct gpio_chip *gc, struct device *dev,
void gpiochip_set_chained_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq,
+ unsigned int parent_irq,
irq_flow_handler_t parent_handler);
void gpiochip_set_nested_irqchip(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
- int parent_irq);
+ unsigned int parent_irq);
int gpiochip_irqchip_add_key(struct gpio_chip *gpiochip,
struct irq_chip *irqchip,
diff --git a/include/linux/gpio/gpio-reg.h b/include/linux/gpio/gpio-reg.h
new file mode 100644
index 000000000000..90e0b9060e6d
--- /dev/null
+++ b/include/linux/gpio/gpio-reg.h
@@ -0,0 +1,13 @@
+#ifndef GPIO_REG_H
+#define GPIO_REG_H
+
+struct device;
+struct irq_domain;
+
+struct gpio_chip *gpio_reg_init(struct device *dev, void __iomem *reg,
+ int base, int num, const char *label, u32 direction, u32 def_out,
+ const char *const *names, struct irq_domain *irqdom, const int *irqs);
+
+int gpio_reg_resume(struct gpio_chip *gc);
+
+#endif
diff --git a/include/linux/hdmi.h b/include/linux/hdmi.h
index edbb4fc674ed..d271ff23984f 100644
--- a/include/linux/hdmi.h
+++ b/include/linux/hdmi.h
@@ -35,6 +35,7 @@ enum hdmi_infoframe_type {
};
#define HDMI_IEEE_OUI 0x000c03
+#define HDMI_FORUM_IEEE_OUI 0xc45dd8
#define HDMI_INFOFRAME_HEADER_SIZE 4
#define HDMI_AVI_INFOFRAME_SIZE 13
#define HDMI_SPD_INFOFRAME_SIZE 25
diff --git a/include/linux/hid-sensor-hub.h b/include/linux/hid-sensor-hub.h
index 7ef111d3ecc5..f32d7c392c1e 100644
--- a/include/linux/hid-sensor-hub.h
+++ b/include/linux/hid-sensor-hub.h
@@ -231,6 +231,8 @@ struct hid_sensor_common {
unsigned usage_id;
atomic_t data_ready;
atomic_t user_requested_state;
+ int poll_interval;
+ int raw_hystersis;
struct iio_trigger *trigger;
int timestamp_ns_scale;
struct hid_sensor_hub_attribute_info poll;
diff --git a/include/linux/hid-sensor-ids.h b/include/linux/hid-sensor-ids.h
index 30c7dc45e45f..761f86242473 100644
--- a/include/linux/hid-sensor-ids.h
+++ b/include/linux/hid-sensor-ids.h
@@ -45,6 +45,14 @@
#define HID_USAGE_SENSOR_DATA_ATMOSPHERIC_PRESSURE 0x200430
#define HID_USAGE_SENSOR_ATMOSPHERIC_PRESSURE 0x200431
+/* Tempreture (200033) */
+#define HID_USAGE_SENSOR_TEMPERATURE 0x200033
+#define HID_USAGE_SENSOR_DATA_ENVIRONMENTAL_TEMPERATURE 0x200434
+
+/* humidity */
+#define HID_USAGE_SENSOR_HUMIDITY 0x200032
+#define HID_USAGE_SENSOR_ATMOSPHERIC_HUMIDITY 0x200433
+
/* Gyro 3D: (200076) */
#define HID_USAGE_SENSOR_GYRO_3D 0x200076
#define HID_USAGE_SENSOR_DATA_ANGL_VELOCITY 0x200456
diff --git a/include/linux/hid.h b/include/linux/hid.h
index 28f38e2b8f30..5be325d890d9 100644
--- a/include/linux/hid.h
+++ b/include/linux/hid.h
@@ -268,6 +268,8 @@ struct hid_item {
#define HID_CP_APPLICATIONLAUNCHBUTTONS 0x000c0180
#define HID_CP_GENERICGUIAPPLICATIONCONTROLS 0x000c0200
+#define HID_DG_DEVICECONFIG 0x000d000e
+#define HID_DG_DEVICESETTINGS 0x000d0023
#define HID_DG_CONFIDENCE 0x000d0047
#define HID_DG_WIDTH 0x000d0048
#define HID_DG_HEIGHT 0x000d0049
@@ -322,7 +324,7 @@ struct hid_item {
#define HID_QUIRK_MULTI_INPUT 0x00000040
#define HID_QUIRK_HIDINPUT_FORCE 0x00000080
#define HID_QUIRK_NO_EMPTY_INPUT 0x00000100
-#define HID_QUIRK_NO_INIT_INPUT_REPORTS 0x00000200
+/* 0x00000200 reserved for backward compatibility, was NO_INIT_INPUT_REPORTS */
#define HID_QUIRK_ALWAYS_POLL 0x00000400
#define HID_QUIRK_SKIP_OUTPUT_REPORTS 0x00010000
#define HID_QUIRK_SKIP_OUTPUT_REPORT_ID 0x00020000
@@ -541,7 +543,6 @@ struct hid_device { /* device report descriptor */
struct list_head inputs; /* The list of inputs */
void *hiddev; /* The hiddev structure */
void *hidraw;
- int minor; /* Hiddev minor number */
int open; /* is the device open by anyone? */
char name[128]; /* Device name */
diff --git a/include/linux/hiddev.h b/include/linux/hiddev.h
index a5dd8148660b..921622222957 100644
--- a/include/linux/hiddev.h
+++ b/include/linux/hiddev.h
@@ -32,6 +32,18 @@
* In-kernel definitions.
*/
+struct hiddev {
+ int minor;
+ int exist;
+ int open;
+ struct mutex existancelock;
+ wait_queue_head_t wait;
+ struct hid_device *hid;
+ struct list_head list;
+ spinlock_t list_lock;
+ bool initialized;
+};
+
struct hid_device;
struct hid_usage;
struct hid_field;
diff --git a/include/linux/host1x.h b/include/linux/host1x.h
index 1ffbf2a8cb99..3d04aa1dc83e 100644
--- a/include/linux/host1x.h
+++ b/include/linux/host1x.h
@@ -26,6 +26,7 @@ enum host1x_class {
HOST1X_CLASS_HOST1X = 0x1,
HOST1X_CLASS_GR2D = 0x51,
HOST1X_CLASS_GR2D_SB = 0x52,
+ HOST1X_CLASS_VIC = 0x5D,
HOST1X_CLASS_GR3D = 0x60,
};
diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 249e579ecd4c..8c5b10eb7265 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -276,8 +276,6 @@ static inline int hrtimer_is_hres_active(struct hrtimer *timer)
return timer->base->cpu_base->hres_active;
}
-extern void hrtimer_peek_ahead_timers(void);
-
/*
* The resolution of the clocks. The resolution value is returned in
* the clock_getres() system call to give application programmers an
@@ -300,8 +298,6 @@ extern unsigned int hrtimer_resolution;
#define hrtimer_resolution (unsigned int)LOW_RES_NSEC
-static inline void hrtimer_peek_ahead_timers(void) { }
-
static inline int hrtimer_is_hres_active(struct hrtimer *timer)
{
return 0;
@@ -456,7 +452,7 @@ static inline u64 hrtimer_forward_now(struct hrtimer *timer,
}
/* Precise sleep: */
-extern long hrtimer_nanosleep(struct timespec *rqtp,
+extern long hrtimer_nanosleep(struct timespec64 *rqtp,
struct timespec __user *rmtp,
const enum hrtimer_mode mode,
const clockid_t clockid);
diff --git a/include/linux/hwmon.h b/include/linux/hwmon.h
index 88b673749121..ceb751987c40 100644
--- a/include/linux/hwmon.h
+++ b/include/linux/hwmon.h
@@ -337,7 +337,7 @@ struct hwmon_ops {
int (*read)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long *val);
int (*read_string)(struct device *dev, enum hwmon_sensor_types type,
- u32 attr, int channel, char **str);
+ u32 attr, int channel, const char **str);
int (*write)(struct device *dev, enum hwmon_sensor_types type,
u32 attr, int channel, long val);
};
diff --git a/include/linux/hyperv.h b/include/linux/hyperv.h
index 970771a5f739..e09fc8290c2f 100644
--- a/include/linux/hyperv.h
+++ b/include/linux/hyperv.h
@@ -491,6 +491,12 @@ struct vmbus_channel_rescind_offer {
u32 child_relid;
} __packed;
+static inline u32
+hv_ringbuffer_pending_size(const struct hv_ring_buffer_info *rbi)
+{
+ return rbi->ring_buffer->pending_send_sz;
+}
+
/*
* Request Offer -- no parameters, SynIC message contains the partition ID
* Set Snoop -- no parameters, SynIC message contains the partition ID
@@ -524,10 +530,10 @@ struct vmbus_channel_open_channel {
u32 target_vp;
/*
- * The upstream ring buffer begins at offset zero in the memory
- * described by RingBufferGpadlHandle. The downstream ring buffer
- * follows it at this offset (in pages).
- */
+ * The upstream ring buffer begins at offset zero in the memory
+ * described by RingBufferGpadlHandle. The downstream ring buffer
+ * follows it at this offset (in pages).
+ */
u32 downstream_ringbuffer_pageoffset;
/* User-specific data to be passed along to the server endpoint. */
@@ -1013,7 +1019,7 @@ extern int vmbus_open(struct vmbus_channel *channel,
u32 recv_ringbuffersize,
void *userdata,
u32 userdatalen,
- void(*onchannel_callback)(void *context),
+ void (*onchannel_callback)(void *context),
void *context);
extern void vmbus_close(struct vmbus_channel *channel);
@@ -1155,6 +1161,17 @@ static inline void *hv_get_drvdata(struct hv_device *dev)
return dev_get_drvdata(&dev->device);
}
+struct hv_ring_buffer_debug_info {
+ u32 current_interrupt_mask;
+ u32 current_read_index;
+ u32 current_write_index;
+ u32 bytes_avail_toread;
+ u32 bytes_avail_towrite;
+};
+
+void hv_ringbuffer_get_debuginfo(const struct hv_ring_buffer_info *ring_info,
+ struct hv_ring_buffer_debug_info *debug_info);
+
/* Vmbus interface */
#define vmbus_driver_register(driver) \
__vmbus_driver_register(driver, THIS_MODULE, KBUILD_MODNAME)
@@ -1428,7 +1445,7 @@ struct hyperv_service_callback {
char *log_msg;
uuid_le data;
struct vmbus_channel *channel;
- void (*callback) (void *context);
+ void (*callback)(void *context);
};
#define MAX_SRV_VER 0x7ffffff
@@ -1504,16 +1521,6 @@ static inline void hv_signal_on_read(struct vmbus_channel *channel)
cached_write_sz = hv_get_cached_bytes_to_write(rbi);
if (cached_write_sz < pending_sz)
vmbus_setevent(channel);
-
- return;
-}
-
-static inline void
-init_cached_read_index(struct vmbus_channel *channel)
-{
- struct hv_ring_buffer_info *rbi = &channel->inbound;
-
- rbi->cached_read_index = rbi->ring_buffer->read_index;
}
/*
@@ -1549,76 +1556,48 @@ static inline u32 hv_end_read(struct hv_ring_buffer_info *rbi)
/*
* An API to support in-place processing of incoming VMBUS packets.
*/
-#define VMBUS_PKT_TRAILER 8
-static inline struct vmpacket_descriptor *
-get_next_pkt_raw(struct vmbus_channel *channel)
+/* Get data payload associated with descriptor */
+static inline void *hv_pkt_data(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 priv_read_loc = ring_info->priv_read_index;
- void *ring_buffer = hv_get_ring_buffer(ring_info);
- u32 dsize = ring_info->ring_datasize;
- /*
- * delta is the difference between what is available to read and
- * what was already consumed in place. We commit read index after
- * the whole batch is processed.
- */
- u32 delta = priv_read_loc >= ring_info->ring_buffer->read_index ?
- priv_read_loc - ring_info->ring_buffer->read_index :
- (dsize - ring_info->ring_buffer->read_index) + priv_read_loc;
- u32 bytes_avail_toread = (hv_get_bytes_to_read(ring_info) - delta);
-
- if (bytes_avail_toread < sizeof(struct vmpacket_descriptor))
- return NULL;
-
- return ring_buffer + priv_read_loc;
+ return (void *)((unsigned long)desc + (desc->offset8 << 3));
}
-/*
- * A helper function to step through packets "in-place"
- * This API is to be called after each successful call
- * get_next_pkt_raw().
- */
-static inline void put_pkt_raw(struct vmbus_channel *channel,
- struct vmpacket_descriptor *desc)
+/* Get data size associated with descriptor */
+static inline u32 hv_pkt_datalen(const struct vmpacket_descriptor *desc)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- u32 packetlen = desc->len8 << 3;
- u32 dsize = ring_info->ring_datasize;
-
- /*
- * Include the packet trailer.
- */
- ring_info->priv_read_index += packetlen + VMBUS_PKT_TRAILER;
- ring_info->priv_read_index %= dsize;
+ return (desc->len8 << 3) - (desc->offset8 << 3);
}
+
+struct vmpacket_descriptor *
+hv_pkt_iter_first(struct vmbus_channel *channel);
+
+struct vmpacket_descriptor *
+__hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt);
+
+void hv_pkt_iter_close(struct vmbus_channel *channel);
+
/*
- * This call commits the read index and potentially signals the host.
- * Here is the pattern for using the "in-place" consumption APIs:
- *
- * init_cached_read_index();
- *
- * while (get_next_pkt_raw() {
- * process the packet "in-place";
- * put_pkt_raw();
- * }
- * if (packets processed in place)
- * commit_rd_index();
+ * Get next packet descriptor from iterator
+ * If at end of list, return NULL and update host.
*/
-static inline void commit_rd_index(struct vmbus_channel *channel)
+static inline struct vmpacket_descriptor *
+hv_pkt_iter_next(struct vmbus_channel *channel,
+ const struct vmpacket_descriptor *pkt)
{
- struct hv_ring_buffer_info *ring_info = &channel->inbound;
- /*
- * Make sure all reads are done before we update the read index since
- * the writer may start writing to the read area once the read index
- * is updated.
- */
- virt_rmb();
- ring_info->ring_buffer->read_index = ring_info->priv_read_index;
+ struct vmpacket_descriptor *nxt;
+
+ nxt = __hv_pkt_iter_next(channel, pkt);
+ if (!nxt)
+ hv_pkt_iter_close(channel);
- hv_signal_on_read(channel);
+ return nxt;
}
+#define foreach_vmbus_pkt(pkt, channel) \
+ for (pkt = hv_pkt_iter_first(channel); pkt; \
+ pkt = hv_pkt_iter_next(channel, pkt))
#endif /* _HYPERV_H */
diff --git a/include/linux/i2c.h b/include/linux/i2c.h
index 6b183521c616..72d0ece70ed3 100644
--- a/include/linux/i2c.h
+++ b/include/linux/i2c.h
@@ -38,6 +38,7 @@
extern struct bus_type i2c_bus_type;
extern struct device_type i2c_adapter_type;
+extern struct device_type i2c_client_type;
/* --- General options ------------------------------------------------ */
@@ -149,6 +150,7 @@ enum i2c_alert_protocol {
* @detect: Callback for device detection
* @address_list: The I2C addresses to probe (for detect)
* @clients: List of detected clients we created (for i2c-core use only)
+ * @disable_i2c_core_irq_mapping: Tell the i2c-core to not do irq-mapping
*
* The driver.owner field should be set to the module owner of this driver.
* The driver.name field should be set to the name of this driver.
@@ -212,6 +214,8 @@ struct i2c_driver {
int (*detect)(struct i2c_client *, struct i2c_board_info *);
const unsigned short *address_list;
struct list_head clients;
+
+ bool disable_i2c_core_irq_mapping;
};
#define to_i2c_driver(d) container_of(d, struct i2c_driver, driver)
@@ -303,6 +307,8 @@ static inline int i2c_slave_event(struct i2c_client *client,
* @of_node: pointer to OpenFirmware device node
* @fwnode: device node supplied by the platform firmware
* @properties: additional device properties for the device
+ * @resources: resources associated with the device
+ * @num_resources: number of resources in the @resources array
* @irq: stored in i2c_client.irq
*
* I2C doesn't actually support hardware probing, although controllers and
@@ -325,6 +331,8 @@ struct i2c_board_info {
struct device_node *of_node;
struct fwnode_handle *fwnode;
const struct property_entry *properties;
+ const struct resource *resources;
+ unsigned int num_resources;
int irq;
};
@@ -824,11 +832,18 @@ static inline const struct of_device_id
#if IS_ENABLED(CONFIG_ACPI)
u32 i2c_acpi_find_bus_speed(struct device *dev);
+struct i2c_client *i2c_acpi_new_device(struct device *dev, int index,
+ struct i2c_board_info *info);
#else
static inline u32 i2c_acpi_find_bus_speed(struct device *dev)
{
return 0;
}
+static inline struct i2c_client *i2c_acpi_new_device(struct device *dev,
+ int index, struct i2c_board_info *info)
+{
+ return NULL;
+}
#endif /* CONFIG_ACPI */
#endif /* _LINUX_I2C_H */
diff --git a/include/linux/i2c/i2c-hid.h b/include/linux/i2c/i2c-hid.h
index 7aa901d92058..1fb088239d12 100644
--- a/include/linux/i2c/i2c-hid.h
+++ b/include/linux/i2c/i2c-hid.h
@@ -14,9 +14,13 @@
#include <linux/types.h>
+struct regulator;
+
/**
* struct i2chid_platform_data - used by hid over i2c implementation.
* @hid_descriptor_address: i2c register where the HID descriptor is stored.
+ * @supply: regulator for powering on the device.
+ * @post_power_delay_ms: delay after powering on before device is usable.
*
* Note that it is the responsibility of the platform driver (or the acpi 5.0
* driver, or the flattened device tree) to setup the irq related to the gpio in
@@ -31,6 +35,8 @@
*/
struct i2c_hid_platform_data {
u16 hid_descriptor_address;
+ struct regulator *supply;
+ int post_power_delay_ms;
};
#endif /* __LINUX_I2C_HID_H */
diff --git a/include/linux/ide.h b/include/linux/ide.h
index 2f51c1724b5a..6980ca322074 100644
--- a/include/linux/ide.h
+++ b/include/linux/ide.h
@@ -88,7 +88,7 @@ static inline bool ata_pm_request(struct request *rq)
ide_req(rq)->type == ATA_PRIV_PM_RESUME);
}
-/* Error codes returned in rq->errors to the higher part of the driver. */
+/* Error codes returned in result to the higher part of the driver. */
enum {
IDE_DRV_ERROR_GENERAL = 101,
IDE_DRV_ERROR_FILEMARK = 102,
diff --git a/include/linux/ieee80211.h b/include/linux/ieee80211.h
index 0dd9498c694f..69033353d0d1 100644
--- a/include/linux/ieee80211.h
+++ b/include/linux/ieee80211.h
@@ -7,7 +7,7 @@
* Copyright (c) 2005, Devicescape Software, Inc.
* Copyright (c) 2006, Michael Wu <flamingice@sourmilk.net>
* Copyright (c) 2013 - 2014 Intel Mobile Communications GmbH
- * Copyright (c) 2016 Intel Deutschland GmbH
+ * Copyright (c) 2016 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -1411,6 +1411,8 @@ struct ieee80211_ht_operation {
#define IEEE80211_HT_OP_MODE_PROTECTION_NONHT_MIXED 3
#define IEEE80211_HT_OP_MODE_NON_GF_STA_PRSNT 0x0004
#define IEEE80211_HT_OP_MODE_NON_HT_STA_PRSNT 0x0010
+#define IEEE80211_HT_OP_MODE_CCFS2_SHIFT 5
+#define IEEE80211_HT_OP_MODE_CCFS2_MASK 0x1fe0
/* for stbc_param */
#define IEEE80211_HT_STBC_PARAM_DUAL_BEACON 0x0040
@@ -1525,14 +1527,14 @@ enum ieee80211_vht_chanwidth {
* This structure is the "VHT operation element" as
* described in 802.11ac D3.0 8.4.2.161
* @chan_width: Operating channel width
+ * @center_freq_seg0_idx: center freq segment 0 index
* @center_freq_seg1_idx: center freq segment 1 index
- * @center_freq_seg2_idx: center freq segment 2 index
* @basic_mcs_set: VHT Basic MCS rate set
*/
struct ieee80211_vht_operation {
u8 chan_width;
+ u8 center_freq_seg0_idx;
u8 center_freq_seg1_idx;
- u8 center_freq_seg2_idx;
__le16 basic_mcs_set;
} __packed;
@@ -1721,6 +1723,9 @@ enum ieee80211_statuscode {
WLAN_STATUS_REJECT_DSE_BAND = 96,
WLAN_STATUS_DENIED_WITH_SUGGESTED_BAND_AND_CHANNEL = 99,
WLAN_STATUS_DENIED_DUE_TO_SPECTRUM_MANAGEMENT = 103,
+ /* 802.11ai */
+ WLAN_STATUS_FILS_AUTHENTICATION_FAILURE = 108,
+ WLAN_STATUS_UNKNOWN_AUTHENTICATION_SERVER = 109,
};
@@ -2102,6 +2107,12 @@ enum ieee80211_key_len {
#define FILS_NONCE_LEN 16
#define FILS_MAX_KEK_LEN 64
+#define FILS_ERP_MAX_USERNAME_LEN 16
+#define FILS_ERP_MAX_REALM_LEN 253
+#define FILS_ERP_MAX_RRK_LEN 64
+
+#define PMK_MAX_LEN 48
+
/* Public action codes */
enum ieee80211_pub_actioncode {
WLAN_PUB_ACTION_EXT_CHANSW_ANN = 4,
@@ -2166,37 +2177,37 @@ enum ieee80211_tdls_actioncode {
#define WLAN_BSS_COEX_INFORMATION_REQUEST BIT(0)
/**
- * enum - mesh synchronization method identifier
+ * enum ieee80211_mesh_sync_method - mesh synchronization method identifier
*
* @IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET: the default synchronization method
* @IEEE80211_SYNC_METHOD_VENDOR: a vendor specific synchronization method
* that will be specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_sync_method {
IEEE80211_SYNC_METHOD_NEIGHBOR_OFFSET = 1,
IEEE80211_SYNC_METHOD_VENDOR = 255,
};
/**
- * enum - mesh path selection protocol identifier
+ * enum ieee80211_mesh_path_protocol - mesh path selection protocol identifier
*
* @IEEE80211_PATH_PROTOCOL_HWMP: the default path selection protocol
* @IEEE80211_PATH_PROTOCOL_VENDOR: a vendor specific protocol that will
* be specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_path_protocol {
IEEE80211_PATH_PROTOCOL_HWMP = 1,
IEEE80211_PATH_PROTOCOL_VENDOR = 255,
};
/**
- * enum - mesh path selection metric identifier
+ * enum ieee80211_mesh_path_metric - mesh path selection metric identifier
*
* @IEEE80211_PATH_METRIC_AIRTIME: the default path selection metric
* @IEEE80211_PATH_METRIC_VENDOR: a vendor specific metric that will be
* specified in a vendor specific information element
*/
-enum {
+enum ieee80211_mesh_path_metric {
IEEE80211_PATH_METRIC_AIRTIME = 1,
IEEE80211_PATH_METRIC_VENDOR = 255,
};
@@ -2305,6 +2316,32 @@ struct ieee80211_timeout_interval_ie {
__le32 value;
} __packed;
+/**
+ * enum ieee80211_idle_options - BSS idle options
+ * @WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE: the station should send an RSN
+ * protected frame to the AP to reset the idle timer at the AP for
+ * the station.
+ */
+enum ieee80211_idle_options {
+ WLAN_IDLE_OPTIONS_PROTECTED_KEEP_ALIVE = BIT(0),
+};
+
+/**
+ * struct ieee80211_bss_max_idle_period_ie
+ *
+ * This structure refers to "BSS Max idle period element"
+ *
+ * @max_idle_period: indicates the time period during which a station can
+ * refrain from transmitting frames to its associated AP without being
+ * disassociated. In units of 1000 TUs.
+ * @idle_options: indicates the options associated with the BSS idle capability
+ * as specified in &enum ieee80211_idle_options.
+ */
+struct ieee80211_bss_max_idle_period_ie {
+ __le16 max_idle_period;
+ u8 idle_options;
+} __packed;
+
/* BACK action code */
enum ieee80211_back_actioncode {
WLAN_ACTION_ADDBA_REQ = 0,
@@ -2345,13 +2382,21 @@ enum ieee80211_sa_query_action {
#define WLAN_CIPHER_SUITE_SMS4 SUITE(0x001472, 1)
/* AKM suite selectors */
-#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
-#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
-#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
-#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
-#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
-#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
-#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_8021X SUITE(0x000FAC, 1)
+#define WLAN_AKM_SUITE_PSK SUITE(0x000FAC, 2)
+#define WLAN_AKM_SUITE_FT_8021X SUITE(0x000FAC, 3)
+#define WLAN_AKM_SUITE_FT_PSK SUITE(0x000FAC, 4)
+#define WLAN_AKM_SUITE_8021X_SHA256 SUITE(0x000FAC, 5)
+#define WLAN_AKM_SUITE_PSK_SHA256 SUITE(0x000FAC, 6)
+#define WLAN_AKM_SUITE_TDLS SUITE(0x000FAC, 7)
+#define WLAN_AKM_SUITE_SAE SUITE(0x000FAC, 8)
+#define WLAN_AKM_SUITE_FT_OVER_SAE SUITE(0x000FAC, 9)
+#define WLAN_AKM_SUITE_8021X_SUITE_B SUITE(0x000FAC, 11)
+#define WLAN_AKM_SUITE_8021X_SUITE_B_192 SUITE(0x000FAC, 12)
+#define WLAN_AKM_SUITE_FILS_SHA256 SUITE(0x000FAC, 14)
+#define WLAN_AKM_SUITE_FILS_SHA384 SUITE(0x000FAC, 15)
+#define WLAN_AKM_SUITE_FT_FILS_SHA256 SUITE(0x000FAC, 16)
+#define WLAN_AKM_SUITE_FT_FILS_SHA384 SUITE(0x000FAC, 17)
#define WLAN_MAX_KEY_LEN 32
diff --git a/include/linux/if_bridge.h b/include/linux/if_bridge.h
index c5847dc75a93..0c16866a7aac 100644
--- a/include/linux/if_bridge.h
+++ b/include/linux/if_bridge.h
@@ -48,6 +48,7 @@ struct br_ip_list {
#define BR_MCAST_FLOOD BIT(11)
#define BR_MULTICAST_TO_UNICAST BIT(12)
#define BR_VLAN_TUNNEL BIT(13)
+#define BR_BCAST_FLOOD BIT(14)
#define BR_DEFAULT_AGEING_TIME (300 * HZ)
diff --git a/include/linux/inet.h b/include/linux/inet.h
index 4cca05c9678e..636ebe87e6f8 100644
--- a/include/linux/inet.h
+++ b/include/linux/inet.h
@@ -43,6 +43,8 @@
#define _LINUX_INET_H
#include <linux/types.h>
+#include <net/net_namespace.h>
+#include <linux/socket.h>
/*
* These mimic similar macros defined in user-space for inet_ntop(3).
@@ -54,4 +56,8 @@
extern __be32 in_aton(const char *str);
extern int in4_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
extern int in6_pton(const char *src, int srclen, u8 *dst, int delim, const char **end);
+
+extern int inet_pton_with_scope(struct net *net, unsigned short af,
+ const char *src, const char *port, struct sockaddr_storage *addr);
+
#endif /* _LINUX_INET_H */
diff --git a/include/linux/inetdevice.h b/include/linux/inetdevice.h
index ee971f335a8b..a2e9d6ea1349 100644
--- a/include/linux/inetdevice.h
+++ b/include/linux/inetdevice.h
@@ -153,8 +153,8 @@ struct in_ifaddr {
int register_inetaddr_notifier(struct notifier_block *nb);
int unregister_inetaddr_notifier(struct notifier_block *nb);
-void inet_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv4_devconf *devconf);
+void inet_netconf_notify_devconf(struct net *net, int event, int type,
+ int ifindex, struct ipv4_devconf *devconf);
struct net_device *__ip_dev_find(struct net *net, __be32 addr, bool devref);
static inline struct net_device *ip_dev_find(struct net *net, __be32 addr)
diff --git a/include/linux/init.h b/include/linux/init.h
index 79af0962fd52..94769d687cf0 100644
--- a/include/linux/init.h
+++ b/include/linux/init.h
@@ -39,7 +39,7 @@
/* These are for everybody (although not all archs will actually
discard it in modules) */
-#define __init __section(.init.text) __cold notrace __latent_entropy
+#define __init __section(.init.text) __cold __inittrace __latent_entropy
#define __initdata __section(.init.data)
#define __initconst __section(.init.rodata)
#define __exitdata __section(.exit.data)
@@ -68,8 +68,10 @@
#ifdef MODULE
#define __exitused
+#define __inittrace notrace
#else
#define __exitused __used
+#define __inittrace
#endif
#define __exit __section(.exit.text) __exitused __cold notrace
diff --git a/include/linux/init_task.h b/include/linux/init_task.h
index 91d9049f0039..e049526bc188 100644
--- a/include/linux/init_task.h
+++ b/include/linux/init_task.h
@@ -15,6 +15,7 @@
#include <linux/sched/autogroup.h>
#include <net/net_namespace.h>
#include <linux/sched/rt.h>
+#include <linux/livepatch.h>
#include <linux/mm_types.h>
#include <asm/thread_info.h>
@@ -181,6 +182,7 @@ extern struct cred init_cred;
#ifdef CONFIG_RT_MUTEXES
# define INIT_RT_MUTEXES(tsk) \
.pi_waiters = RB_ROOT, \
+ .pi_top_task = NULL, \
.pi_waiters_leftmost = NULL,
#else
# define INIT_RT_MUTEXES(tsk)
@@ -202,6 +204,13 @@ extern struct cred init_cred;
# define INIT_KASAN(tsk)
#endif
+#ifdef CONFIG_LIVEPATCH
+# define INIT_LIVEPATCH(tsk) \
+ .patch_state = KLP_UNDEFINED,
+#else
+# define INIT_LIVEPATCH(tsk)
+#endif
+
#ifdef CONFIG_THREAD_INFO_IN_TASK
# define INIT_TASK_TI(tsk) \
.thread_info = INIT_THREAD_INFO(tsk), \
@@ -210,6 +219,12 @@ extern struct cred init_cred;
# define INIT_TASK_TI(tsk)
#endif
+#ifdef CONFIG_SECURITY
+#define INIT_TASK_SECURITY .security = NULL,
+#else
+#define INIT_TASK_SECURITY
+#endif
+
/*
* INIT_TASK is used to set up the first task table, touch at
* your own risk!. Base=0, limit=0x1fffff (=2MB)
@@ -288,6 +303,8 @@ extern struct cred init_cred;
INIT_VTIME(tsk) \
INIT_NUMA_BALANCING(tsk) \
INIT_KASAN(tsk) \
+ INIT_LIVEPATCH(tsk) \
+ INIT_TASK_SECURITY \
}
diff --git a/include/linux/input/eeti_ts.h b/include/linux/input/eeti_ts.h
deleted file mode 100644
index 16625d799b6f..000000000000
--- a/include/linux/input/eeti_ts.h
+++ /dev/null
@@ -1,10 +0,0 @@
-#ifndef LINUX_INPUT_EETI_TS_H
-#define LINUX_INPUT_EETI_TS_H
-
-struct eeti_ts_platform_data {
- int irq_gpio;
- unsigned int irq_active_high;
-};
-
-#endif /* LINUX_INPUT_EETI_TS_H */
-
diff --git a/include/linux/input/matrix_keypad.h b/include/linux/input/matrix_keypad.h
index 37b04a0fdea4..6174733a57eb 100644
--- a/include/linux/input/matrix_keypad.h
+++ b/include/linux/input/matrix_keypad.h
@@ -49,6 +49,8 @@ struct matrix_keymap_data {
* @wakeup: controls whether the device should be set up as wakeup
* source
* @no_autorepeat: disable key autorepeat
+ * @drive_inactive_cols: drive inactive columns during scan, rather than
+ * making them inputs.
*
* This structure represents platform-specific data that use used by
* matrix_keypad driver to perform proper initialization.
@@ -73,6 +75,7 @@ struct matrix_keypad_platform_data {
bool active_low;
bool wakeup;
bool no_autorepeat;
+ bool drive_inactive_cols;
};
int matrix_keypad_build_keymap(const struct matrix_keymap_data *keymap_data,
diff --git a/include/linux/intel-iommu.h b/include/linux/intel-iommu.h
index c573a52ae440..485a5b48f038 100644
--- a/include/linux/intel-iommu.h
+++ b/include/linux/intel-iommu.h
@@ -30,6 +30,8 @@
#include <linux/mmu_notifier.h>
#include <linux/list.h>
#include <linux/iommu.h>
+#include <linux/io-64-nonatomic-lo-hi.h>
+
#include <asm/cacheflush.h>
#include <asm/iommu.h>
@@ -72,24 +74,8 @@
#define OFFSET_STRIDE (9)
-#ifdef CONFIG_64BIT
#define dmar_readq(a) readq(a)
#define dmar_writeq(a,v) writeq(v,a)
-#else
-static inline u64 dmar_readq(void __iomem *addr)
-{
- u32 lo, hi;
- lo = readl(addr);
- hi = readl(addr + 4);
- return (((u64) hi) << 32) + lo;
-}
-
-static inline void dmar_writeq(void __iomem *addr, u64 val)
-{
- writel((u32)val, addr);
- writel((u32)(val >> 32), addr + 4);
-}
-#endif
#define DMAR_VER_MAJOR(v) (((v) & 0xf0) >> 4)
#define DMAR_VER_MINOR(v) ((v) & 0x0f)
diff --git a/include/linux/interrupt.h b/include/linux/interrupt.h
index 53144e78a369..a6fba4804672 100644
--- a/include/linux/interrupt.h
+++ b/include/linux/interrupt.h
@@ -155,7 +155,7 @@ extern int __must_check
request_percpu_irq(unsigned int irq, irq_handler_t handler,
const char *devname, void __percpu *percpu_dev_id);
-extern void free_irq(unsigned int, void *);
+extern const void *free_irq(unsigned int, void *);
extern void free_percpu_irq(unsigned int, void __percpu *);
struct device;
diff --git a/include/linux/io.h b/include/linux/io.h
index 82ef36eac8a1..2195d9ea4aaa 100644
--- a/include/linux/io.h
+++ b/include/linux/io.h
@@ -90,6 +90,27 @@ void devm_memunmap(struct device *dev, void *addr);
void *__devm_memremap_pages(struct device *dev, struct resource *res);
+#ifdef CONFIG_PCI
+/*
+ * The PCI specifications (Rev 3.0, 3.2.5 "Transaction Ordering and
+ * Posting") mandate non-posted configuration transactions. There is
+ * no ioremap API in the kernel that can guarantee non-posted write
+ * semantics across arches so provide a default implementation for
+ * mapping PCI config space that defaults to ioremap_nocache(); arches
+ * should override it if they have memory mapping implementations that
+ * guarantee non-posted writes semantics to make the memory mapping
+ * compliant with the PCI specification.
+ */
+#ifndef pci_remap_cfgspace
+#define pci_remap_cfgspace pci_remap_cfgspace
+static inline void __iomem *pci_remap_cfgspace(phys_addr_t offset,
+ size_t size)
+{
+ return ioremap_nocache(offset, size);
+}
+#endif
+#endif
+
/*
* Some systems do not have legacy ISA devices.
* /dev/port is not a valid interface on these systems.
diff --git a/include/linux/iomap.h b/include/linux/iomap.h
index 7291810067eb..f753e788da31 100644
--- a/include/linux/iomap.h
+++ b/include/linux/iomap.h
@@ -41,6 +41,7 @@ struct iomap {
u16 type; /* type of mapping */
u16 flags; /* flags for mapping */
struct block_device *bdev; /* block device for I/O */
+ struct dax_device *dax_dev; /* dax_dev for dax operations */
};
/*
diff --git a/include/linux/iommu.h b/include/linux/iommu.h
index 2e4de0deee53..2cb54adc4a33 100644
--- a/include/linux/iommu.h
+++ b/include/linux/iommu.h
@@ -19,12 +19,12 @@
#ifndef __LINUX_IOMMU_H
#define __LINUX_IOMMU_H
+#include <linux/scatterlist.h>
+#include <linux/device.h>
+#include <linux/types.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/of.h>
-#include <linux/types.h>
-#include <linux/scatterlist.h>
-#include <trace/events/iommu.h>
#define IOMMU_READ (1 << 0)
#define IOMMU_WRITE (1 << 1)
@@ -32,10 +32,13 @@
#define IOMMU_NOEXEC (1 << 3)
#define IOMMU_MMIO (1 << 4) /* e.g. things like MSI doorbells */
/*
- * This is to make the IOMMU API setup privileged
- * mapppings accessible by the master only at higher
- * privileged execution level and inaccessible at
- * less privileged levels.
+ * Where the bus hardware includes a privilege level as part of its access type
+ * markings, and certain devices are capable of issuing transactions marked as
+ * either 'supervisor' or 'user', the IOMMU_PRIV flag requests that the other
+ * given permission flags only apply to accesses at the higher privilege level,
+ * and that unprivileged transactions should have as little access as possible.
+ * This would usually imply the same permissions as kernel mappings on the CPU,
+ * if the IOMMU page table format is equivalent.
*/
#define IOMMU_PRIV (1 << 5)
@@ -336,46 +339,9 @@ extern int iommu_domain_window_enable(struct iommu_domain *domain, u32 wnd_nr,
phys_addr_t offset, u64 size,
int prot);
extern void iommu_domain_window_disable(struct iommu_domain *domain, u32 wnd_nr);
-/**
- * report_iommu_fault() - report about an IOMMU fault to the IOMMU framework
- * @domain: the iommu domain where the fault has happened
- * @dev: the device where the fault has happened
- * @iova: the faulting address
- * @flags: mmu fault flags (e.g. IOMMU_FAULT_READ/IOMMU_FAULT_WRITE/...)
- *
- * This function should be called by the low-level IOMMU implementations
- * whenever IOMMU faults happen, to allow high-level users, that are
- * interested in such events, to know about them.
- *
- * This event may be useful for several possible use cases:
- * - mere logging of the event
- * - dynamic TLB/PTE loading
- * - if restarting of the faulting device is required
- *
- * Returns 0 on success and an appropriate error code otherwise (if dynamic
- * PTE/TLB loading will one day be supported, implementations will be able
- * to tell whether it succeeded or not according to this return value).
- *
- * Specifically, -ENOSYS is returned if a fault handler isn't installed
- * (though fault handlers can also return -ENOSYS, in case they want to
- * elicit the default behavior of the IOMMU drivers).
- */
-static inline int report_iommu_fault(struct iommu_domain *domain,
- struct device *dev, unsigned long iova, int flags)
-{
- int ret = -ENOSYS;
-
- /*
- * if upper layers showed interest and installed a fault handler,
- * invoke it.
- */
- if (domain->handler)
- ret = domain->handler(domain, dev, iova, flags,
- domain->handler_token);
- trace_io_page_fault(dev, iova, flags);
- return ret;
-}
+extern int report_iommu_fault(struct iommu_domain *domain, struct device *dev,
+ unsigned long iova, int flags);
static inline size_t iommu_map_sg(struct iommu_domain *domain,
unsigned long iova, struct scatterlist *sg,
diff --git a/include/linux/iova.h b/include/linux/iova.h
index f27bb2c62fca..e0a892ae45c0 100644
--- a/include/linux/iova.h
+++ b/include/linux/iova.h
@@ -82,6 +82,7 @@ static inline unsigned long iova_pfn(struct iova_domain *iovad, dma_addr_t iova)
return iova >> iova_shift(iovad);
}
+#if IS_ENABLED(CONFIG_IOMMU_IOVA)
int iova_cache_get(void);
void iova_cache_put(void);
@@ -106,5 +107,95 @@ void put_iova_domain(struct iova_domain *iovad);
struct iova *split_and_remove_iova(struct iova_domain *iovad,
struct iova *iova, unsigned long pfn_lo, unsigned long pfn_hi);
void free_cpu_cached_iovas(unsigned int cpu, struct iova_domain *iovad);
+#else
+static inline int iova_cache_get(void)
+{
+ return -ENOTSUPP;
+}
+
+static inline void iova_cache_put(void)
+{
+}
+
+static inline struct iova *alloc_iova_mem(void)
+{
+ return NULL;
+}
+
+static inline void free_iova_mem(struct iova *iova)
+{
+}
+
+static inline void free_iova(struct iova_domain *iovad, unsigned long pfn)
+{
+}
+
+static inline void __free_iova(struct iova_domain *iovad, struct iova *iova)
+{
+}
+
+static inline struct iova *alloc_iova(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn,
+ bool size_aligned)
+{
+ return NULL;
+}
+
+static inline void free_iova_fast(struct iova_domain *iovad,
+ unsigned long pfn,
+ unsigned long size)
+{
+}
+
+static inline unsigned long alloc_iova_fast(struct iova_domain *iovad,
+ unsigned long size,
+ unsigned long limit_pfn)
+{
+ return 0;
+}
+
+static inline struct iova *reserve_iova(struct iova_domain *iovad,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void copy_reserved_iova(struct iova_domain *from,
+ struct iova_domain *to)
+{
+}
+
+static inline void init_iova_domain(struct iova_domain *iovad,
+ unsigned long granule,
+ unsigned long start_pfn,
+ unsigned long pfn_32bit)
+{
+}
+
+static inline struct iova *find_iova(struct iova_domain *iovad,
+ unsigned long pfn)
+{
+ return NULL;
+}
+
+static inline void put_iova_domain(struct iova_domain *iovad)
+{
+}
+
+static inline struct iova *split_and_remove_iova(struct iova_domain *iovad,
+ struct iova *iova,
+ unsigned long pfn_lo,
+ unsigned long pfn_hi)
+{
+ return NULL;
+}
+
+static inline void free_cpu_cached_iovas(unsigned int cpu,
+ struct iova_domain *iovad)
+{
+}
+#endif
#endif
diff --git a/include/linux/ipc.h b/include/linux/ipc.h
index 9d84942ae2e5..71fd92d81b26 100644
--- a/include/linux/ipc.h
+++ b/include/linux/ipc.h
@@ -8,8 +8,7 @@
#define IPCMNI 32768 /* <= MAX_INT limit for ipc arrays (including sysctl changes) */
/* used by in-kernel data structures */
-struct kern_ipc_perm
-{
+struct kern_ipc_perm {
spinlock_t lock;
bool deleted;
int id;
@@ -18,9 +17,9 @@ struct kern_ipc_perm
kgid_t gid;
kuid_t cuid;
kgid_t cgid;
- umode_t mode;
+ umode_t mode;
unsigned long seq;
void *security;
-};
+} ____cacheline_aligned_in_smp;
#endif /* _LINUX_IPC_H */
diff --git a/include/linux/ipv6.h b/include/linux/ipv6.h
index 71be5b330d21..e1b442996f81 100644
--- a/include/linux/ipv6.h
+++ b/include/linux/ipv6.h
@@ -37,6 +37,7 @@ struct ipv6_devconf {
__s32 accept_ra_rtr_pref;
__s32 rtr_probe_interval;
#ifdef CONFIG_IPV6_ROUTE_INFO
+ __s32 accept_ra_rt_info_min_plen;
__s32 accept_ra_rt_info_max_plen;
#endif
#endif
@@ -70,6 +71,7 @@ struct ipv6_devconf {
#endif
__u32 enhanced_dad;
__u32 addr_gen_mode;
+ __s32 disable_policy;
struct ctl_table_header *sysctl_header;
};
diff --git a/include/linux/irqchip/arm-gic-v3.h b/include/linux/irqchip/arm-gic-v3.h
index 97cbca19430d..fffb91202bc9 100644
--- a/include/linux/irqchip/arm-gic-v3.h
+++ b/include/linux/irqchip/arm-gic-v3.h
@@ -132,6 +132,9 @@
#define GIC_BASER_SHAREABILITY(reg, type) \
(GIC_BASER_##type << reg##_SHAREABILITY_SHIFT)
+/* encode a size field of width @w containing @n - 1 units */
+#define GIC_ENCODE_SZ(n, w) (((unsigned long)(n) - 1) & GENMASK_ULL(((w) - 1), 0))
+
#define GICR_PROPBASER_SHAREABILITY_SHIFT (10)
#define GICR_PROPBASER_INNER_CACHEABILITY_SHIFT (7)
#define GICR_PROPBASER_OUTER_CACHEABILITY_SHIFT (56)
@@ -156,6 +159,8 @@
#define GICR_PROPBASER_RaWaWb GIC_BASER_CACHEABILITY(GICR_PROPBASER, INNER, RaWaWb)
#define GICR_PROPBASER_IDBITS_MASK (0x1f)
+#define GICR_PROPBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 12))
+#define GICR_PENDBASER_ADDRESS(x) ((x) & GENMASK_ULL(51, 16))
#define GICR_PENDBASER_SHAREABILITY_SHIFT (10)
#define GICR_PENDBASER_INNER_CACHEABILITY_SHIFT (7)
@@ -232,12 +237,18 @@
#define GITS_CTLR_QUIESCENT (1U << 31)
#define GITS_TYPER_PLPIS (1UL << 0)
+#define GITS_TYPER_ITT_ENTRY_SIZE_SHIFT 4
#define GITS_TYPER_IDBITS_SHIFT 8
#define GITS_TYPER_DEVBITS_SHIFT 13
#define GITS_TYPER_DEVBITS(r) ((((r) >> GITS_TYPER_DEVBITS_SHIFT) & 0x1f) + 1)
#define GITS_TYPER_PTA (1UL << 19)
#define GITS_TYPER_HWCOLLCNT_SHIFT 24
+#define GITS_IIDR_REV_SHIFT 12
+#define GITS_IIDR_REV_MASK (0xf << GITS_IIDR_REV_SHIFT)
+#define GITS_IIDR_REV(r) (((r) >> GITS_IIDR_REV_SHIFT) & 0xf)
+#define GITS_IIDR_PRODUCTID_SHIFT 24
+
#define GITS_CBASER_VALID (1ULL << 63)
#define GITS_CBASER_SHAREABILITY_SHIFT (10)
#define GITS_CBASER_INNER_CACHEABILITY_SHIFT (59)
@@ -290,6 +301,7 @@
#define GITS_BASER_TYPE(r) (((r) >> GITS_BASER_TYPE_SHIFT) & 7)
#define GITS_BASER_ENTRY_SIZE_SHIFT (48)
#define GITS_BASER_ENTRY_SIZE(r) ((((r) >> GITS_BASER_ENTRY_SIZE_SHIFT) & 0x1f) + 1)
+#define GITS_BASER_ENTRY_SIZE_MASK GENMASK_ULL(52, 48)
#define GITS_BASER_SHAREABILITY_SHIFT (10)
#define GITS_BASER_InnerShareable \
GIC_BASER_SHAREABILITY(GITS_BASER, InnerShareable)
@@ -337,9 +349,11 @@
#define E_ITS_INT_UNMAPPED_INTERRUPT 0x010307
#define E_ITS_CLEAR_UNMAPPED_INTERRUPT 0x010507
#define E_ITS_MAPD_DEVICE_OOR 0x010801
+#define E_ITS_MAPD_ITTSIZE_OOR 0x010802
#define E_ITS_MAPC_PROCNUM_OOR 0x010902
#define E_ITS_MAPC_COLLECTION_OOR 0x010903
#define E_ITS_MAPTI_UNMAPPED_DEVICE 0x010a04
+#define E_ITS_MAPTI_ID_OOR 0x010a05
#define E_ITS_MAPTI_PHYSICALID_OOR 0x010a06
#define E_ITS_INV_UNMAPPED_INTERRUPT 0x010c07
#define E_ITS_INVALL_UNMAPPED_COLLECTION 0x010d09
diff --git a/include/linux/irqchip/mips-gic.h b/include/linux/irqchip/mips-gic.h
index 7b49c71c968b..2b0e56619e53 100644
--- a/include/linux/irqchip/mips-gic.h
+++ b/include/linux/irqchip/mips-gic.h
@@ -258,7 +258,6 @@ extern unsigned int gic_present;
extern void gic_init(unsigned long gic_base_addr,
unsigned long gic_addrspace_size, unsigned int cpu_vec,
unsigned int irqbase);
-extern void gic_clocksource_init(unsigned int);
extern u64 gic_read_count(void);
extern unsigned int gic_get_count_width(void);
extern u64 gic_read_compare(void);
diff --git a/include/linux/jbd2.h b/include/linux/jbd2.h
index dfaa1f4dcb0c..606b6bce3a5b 100644
--- a/include/linux/jbd2.h
+++ b/include/linux/jbd2.h
@@ -491,6 +491,8 @@ struct jbd2_journal_handle
unsigned long h_start_jiffies;
unsigned int h_requested_credits;
+
+ unsigned int saved_alloc_context;
};
diff --git a/include/linux/jiffies.h b/include/linux/jiffies.h
index 624215cebee5..36872fbb815d 100644
--- a/include/linux/jiffies.h
+++ b/include/linux/jiffies.h
@@ -1,6 +1,7 @@
#ifndef _LINUX_JIFFIES_H
#define _LINUX_JIFFIES_H
+#include <linux/cache.h>
#include <linux/math64.h>
#include <linux/kernel.h>
#include <linux/types.h>
@@ -63,19 +64,13 @@ extern int register_refined_jiffies(long clock_tick_rate);
/* TICK_USEC is the time between ticks in usec assuming fake USER_HZ */
#define TICK_USEC ((1000000UL + USER_HZ/2) / USER_HZ)
-/* some arch's have a small-data section that can be accessed register-relative
- * but that can only take up to, say, 4-byte variables. jiffies being part of
- * an 8-byte variable may not be correctly accessed unless we force the issue
- */
-#define __jiffy_data __attribute__((section(".data")))
-
/*
* The 64-bit value is not atomic - you MUST NOT read it
* without sampling the sequence number in jiffies_lock.
* get_jiffies_64() will do this for you as appropriate.
*/
-extern u64 __jiffy_data jiffies_64;
-extern unsigned long volatile __jiffy_data jiffies;
+extern u64 __cacheline_aligned_in_smp jiffies_64;
+extern unsigned long volatile __cacheline_aligned_in_smp jiffies;
#if (BITS_PER_LONG < 64)
u64 get_jiffies_64(void);
diff --git a/include/linux/kernel.h b/include/linux/kernel.h
index 4c26dc3a8295..13bc08aba704 100644
--- a/include/linux/kernel.h
+++ b/include/linux/kernel.h
@@ -47,6 +47,7 @@
/* @a is a power of 2 value */
#define ALIGN(x, a) __ALIGN_KERNEL((x), (a))
+#define ALIGN_DOWN(x, a) __ALIGN_KERNEL((x) - ((a) - 1), (a))
#define __ALIGN_MASK(x, mask) __ALIGN_KERNEL_MASK((x), (mask))
#define PTR_ALIGN(p, a) ((typeof(p))ALIGN((unsigned long)(p), (a)))
#define IS_ALIGNED(x, a) (((x) & ((typeof(x))(a) - 1)) == 0)
@@ -438,6 +439,7 @@ extern int get_option(char **str, int *pint);
extern char *get_options(const char *str, int nints, int *ints);
extern unsigned long long memparse(const char *ptr, char **retptr);
extern bool parse_option_str(const char *str, const char *option);
+extern char *next_arg(char *args, char **param, char **val);
extern int core_kernel_text(unsigned long addr);
extern int core_kernel_data(unsigned long addr);
diff --git a/include/linux/kexec.h b/include/linux/kexec.h
index d419d0e51fe5..c9481ebcbc0c 100644
--- a/include/linux/kexec.h
+++ b/include/linux/kexec.h
@@ -14,17 +14,15 @@
#if !defined(__ASSEMBLY__)
+#include <linux/crash_core.h>
#include <asm/io.h>
#include <uapi/linux/kexec.h>
#ifdef CONFIG_KEXEC_CORE
#include <linux/list.h>
-#include <linux/linkage.h>
#include <linux/compat.h>
#include <linux/ioport.h>
-#include <linux/elfcore.h>
-#include <linux/elf.h>
#include <linux/module.h>
#include <asm/kexec.h>
@@ -62,19 +60,15 @@
#define KEXEC_CRASH_MEM_ALIGN PAGE_SIZE
#endif
-#define KEXEC_NOTE_HEAD_BYTES ALIGN(sizeof(struct elf_note), 4)
-#define KEXEC_CORE_NOTE_NAME "CORE"
-#define KEXEC_CORE_NOTE_NAME_BYTES ALIGN(sizeof(KEXEC_CORE_NOTE_NAME), 4)
-#define KEXEC_CORE_NOTE_DESC_BYTES ALIGN(sizeof(struct elf_prstatus), 4)
+#define KEXEC_CORE_NOTE_NAME CRASH_CORE_NOTE_NAME
+
/*
* The per-cpu notes area is a list of notes terminated by a "NULL"
* note header. For kdump, the code in vmcore.c runs in the context
* of the second kernel to combine them into one note.
*/
#ifndef KEXEC_NOTE_BYTES
-#define KEXEC_NOTE_BYTES ( (KEXEC_NOTE_HEAD_BYTES * 2) + \
- KEXEC_CORE_NOTE_NAME_BYTES + \
- KEXEC_CORE_NOTE_DESC_BYTES )
+#define KEXEC_NOTE_BYTES CRASH_CORE_NOTE_BYTES
#endif
/*
@@ -256,33 +250,6 @@ extern void crash_kexec(struct pt_regs *);
int kexec_should_crash(struct task_struct *);
int kexec_crash_loaded(void);
void crash_save_cpu(struct pt_regs *regs, int cpu);
-void crash_save_vmcoreinfo(void);
-void arch_crash_save_vmcoreinfo(void);
-__printf(1, 2)
-void vmcoreinfo_append_str(const char *fmt, ...);
-phys_addr_t paddr_vmcoreinfo_note(void);
-
-#define VMCOREINFO_OSRELEASE(value) \
- vmcoreinfo_append_str("OSRELEASE=%s\n", value)
-#define VMCOREINFO_PAGESIZE(value) \
- vmcoreinfo_append_str("PAGESIZE=%ld\n", value)
-#define VMCOREINFO_SYMBOL(name) \
- vmcoreinfo_append_str("SYMBOL(%s)=%lx\n", #name, (unsigned long)&name)
-#define VMCOREINFO_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(name))
-#define VMCOREINFO_STRUCT_SIZE(name) \
- vmcoreinfo_append_str("SIZE(%s)=%lu\n", #name, \
- (unsigned long)sizeof(struct name))
-#define VMCOREINFO_OFFSET(name, field) \
- vmcoreinfo_append_str("OFFSET(%s.%s)=%lu\n", #name, #field, \
- (unsigned long)offsetof(struct name, field))
-#define VMCOREINFO_LENGTH(name, value) \
- vmcoreinfo_append_str("LENGTH(%s)=%lu\n", #name, (unsigned long)value)
-#define VMCOREINFO_NUMBER(name) \
- vmcoreinfo_append_str("NUMBER(%s)=%ld\n", #name, (long)name)
-#define VMCOREINFO_CONFIG(name) \
- vmcoreinfo_append_str("CONFIG_%s=y\n", #name)
extern struct kimage *kexec_image;
extern struct kimage *kexec_crash_image;
@@ -303,31 +270,15 @@ extern int kexec_load_disabled;
#define KEXEC_FILE_FLAGS (KEXEC_FILE_UNLOAD | KEXEC_FILE_ON_CRASH | \
KEXEC_FILE_NO_INITRAMFS)
-#define VMCOREINFO_BYTES (4096)
-#define VMCOREINFO_NOTE_NAME "VMCOREINFO"
-#define VMCOREINFO_NOTE_NAME_BYTES ALIGN(sizeof(VMCOREINFO_NOTE_NAME), 4)
-#define VMCOREINFO_NOTE_SIZE (KEXEC_NOTE_HEAD_BYTES*2 + VMCOREINFO_BYTES \
- + VMCOREINFO_NOTE_NAME_BYTES)
-
/* Location of a reserved region to hold the crash kernel.
*/
extern struct resource crashk_res;
extern struct resource crashk_low_res;
-typedef u32 note_buf_t[KEXEC_NOTE_BYTES/4];
extern note_buf_t __percpu *crash_notes;
-extern u32 vmcoreinfo_note[VMCOREINFO_NOTE_SIZE/4];
-extern size_t vmcoreinfo_size;
-extern size_t vmcoreinfo_max_size;
/* flag to track if kexec reboot is in progress */
extern bool kexec_in_progress;
-int __init parse_crashkernel(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_high(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
-int parse_crashkernel_low(char *cmdline, unsigned long long system_ram,
- unsigned long long *crash_size, unsigned long long *crash_base);
int crash_shrink_memory(unsigned long new_size);
size_t crash_get_memory_size(void);
void crash_free_reserved_phys_range(unsigned long begin, unsigned long end);
diff --git a/include/linux/key-type.h b/include/linux/key-type.h
index eaee981c5558..8496cf64575c 100644
--- a/include/linux/key-type.h
+++ b/include/linux/key-type.h
@@ -147,6 +147,14 @@ struct key_type {
*/
request_key_actor_t request_key;
+ /* Look up a keyring access restriction (optional)
+ *
+ * - NULL is a valid return value (meaning the requested restriction
+ * is known but will never block addition of a key)
+ * - should return -EINVAL if the restriction is unknown
+ */
+ struct key_restriction *(*lookup_restriction)(const char *params);
+
/* internal fields */
struct list_head link; /* link in types list */
struct lock_class_key lock_class; /* key->sem lock class */
diff --git a/include/linux/key.h b/include/linux/key.h
index e45212f2777e..0c9b93b0d1f7 100644
--- a/include/linux/key.h
+++ b/include/linux/key.h
@@ -23,6 +23,7 @@
#include <linux/rwsem.h>
#include <linux/atomic.h>
#include <linux/assoc_array.h>
+#include <linux/refcount.h>
#ifdef __KERNEL__
#include <linux/uidgid.h>
@@ -126,6 +127,17 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
return (unsigned long) key_ref & 1UL;
}
+typedef int (*key_restrict_link_func_t)(struct key *dest_keyring,
+ const struct key_type *type,
+ const union key_payload *payload,
+ struct key *restriction_key);
+
+struct key_restriction {
+ key_restrict_link_func_t check;
+ struct key *key;
+ struct key_type *keytype;
+};
+
/*****************************************************************************/
/*
* authentication token / access credential / keyring
@@ -135,7 +147,7 @@ static inline bool is_key_possessed(const key_ref_t key_ref)
* - Kerberos TGTs and tickets
*/
struct key {
- atomic_t usage; /* number of references */
+ refcount_t usage; /* number of references */
key_serial_t serial; /* key serial number */
union {
struct list_head graveyard_link;
@@ -205,18 +217,17 @@ struct key {
};
/* This is set on a keyring to restrict the addition of a link to a key
- * to it. If this method isn't provided then it is assumed that the
+ * to it. If this structure isn't provided then it is assumed that the
* keyring is open to any addition. It is ignored for non-keyring
- * keys.
+ * keys. Only set this value using keyring_restrict(), keyring_alloc(),
+ * or key_alloc().
*
* This is intended for use with rings of trusted keys whereby addition
* to the keyring needs to be controlled. KEY_ALLOC_BYPASS_RESTRICTION
* overrides this, allowing the kernel to add extra keys without
* restriction.
*/
- int (*restrict_link)(struct key *keyring,
- const struct key_type *type,
- const union key_payload *payload);
+ struct key_restriction *restrict_link;
};
extern struct key *key_alloc(struct key_type *type,
@@ -225,9 +236,7 @@ extern struct key *key_alloc(struct key_type *type,
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
- int (*restrict_link)(struct key *,
- const struct key_type *,
- const union key_payload *));
+ struct key_restriction *restrict_link);
#define KEY_ALLOC_IN_QUOTA 0x0000 /* add to quota, reject if would overrun */
@@ -242,7 +251,7 @@ extern void key_put(struct key *key);
static inline struct key *__key_get(struct key *key)
{
- atomic_inc(&key->usage);
+ refcount_inc(&key->usage);
return key;
}
@@ -303,14 +312,13 @@ extern struct key *keyring_alloc(const char *description, kuid_t uid, kgid_t gid
const struct cred *cred,
key_perm_t perm,
unsigned long flags,
- int (*restrict_link)(struct key *,
- const struct key_type *,
- const union key_payload *),
+ struct key_restriction *restrict_link,
struct key *dest);
extern int restrict_link_reject(struct key *keyring,
const struct key_type *type,
- const union key_payload *payload);
+ const union key_payload *payload,
+ struct key *restriction_key);
extern int keyring_clear(struct key *keyring);
@@ -321,6 +329,9 @@ extern key_ref_t keyring_search(key_ref_t keyring,
extern int keyring_add_key(struct key *keyring,
struct key *key);
+extern int keyring_restrict(key_ref_t keyring, const char *type,
+ const char *restriction);
+
extern struct key *key_lookup(key_serial_t id);
static inline key_serial_t key_serial(const struct key *key)
diff --git a/include/linux/kobject.h b/include/linux/kobject.h
index e6284591599e..ca85cb80e99a 100644
--- a/include/linux/kobject.h
+++ b/include/linux/kobject.h
@@ -108,6 +108,8 @@ extern int __must_check kobject_rename(struct kobject *, const char *new_name);
extern int __must_check kobject_move(struct kobject *, struct kobject *);
extern struct kobject *kobject_get(struct kobject *kobj);
+extern struct kobject * __must_check kobject_get_unless_zero(
+ struct kobject *kobj);
extern void kobject_put(struct kobject *kobj);
extern const void *kobject_namespace(struct kobject *kobj);
diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h
index c328e4f7dcad..30f90c1a0aaf 100644
--- a/include/linux/kprobes.h
+++ b/include/linux/kprobes.h
@@ -267,6 +267,8 @@ extern int arch_init_kprobes(void);
extern void show_registers(struct pt_regs *regs);
extern void kprobes_inc_nmissed_count(struct kprobe *p);
extern bool arch_within_kprobe_blacklist(unsigned long addr);
+extern bool arch_function_offset_within_entry(unsigned long offset);
+extern bool function_offset_within_entry(kprobe_opcode_t *addr, const char *sym, unsigned long offset);
extern bool within_kprobe_blacklist(unsigned long addr);
@@ -379,6 +381,7 @@ static inline struct kprobe_ctlblk *get_kprobe_ctlblk(void)
return this_cpu_ptr(&kprobe_ctlblk);
}
+kprobe_opcode_t *kprobe_lookup_name(const char *name, unsigned int offset);
int register_kprobe(struct kprobe *p);
void unregister_kprobe(struct kprobe *p);
int register_kprobes(struct kprobe **kps, int num);
diff --git a/include/linux/kref.h b/include/linux/kref.h
index f4156f88f557..29220724bf1c 100644
--- a/include/linux/kref.h
+++ b/include/linux/kref.h
@@ -66,8 +66,6 @@ static inline void kref_get(struct kref *kref)
*/
static inline int kref_put(struct kref *kref, void (*release)(struct kref *kref))
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_test(&kref->refcount)) {
release(kref);
return 1;
@@ -79,8 +77,6 @@ static inline int kref_put_mutex(struct kref *kref,
void (*release)(struct kref *kref),
struct mutex *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_mutex_lock(&kref->refcount, lock)) {
release(kref);
return 1;
@@ -92,8 +88,6 @@ static inline int kref_put_lock(struct kref *kref,
void (*release)(struct kref *kref),
spinlock_t *lock)
{
- WARN_ON(release == NULL);
-
if (refcount_dec_and_lock(&kref->refcount, lock)) {
release(kref);
return 1;
diff --git a/include/linux/ksm.h b/include/linux/ksm.h
index e1cfda4bee58..78b44a024eaa 100644
--- a/include/linux/ksm.h
+++ b/include/linux/ksm.h
@@ -61,7 +61,7 @@ static inline void set_page_stable_node(struct page *page,
struct page *ksm_might_need_to_copy(struct page *page,
struct vm_area_struct *vma, unsigned long address);
-int rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_ksm(struct page *page, struct rmap_walk_control *rwc);
void ksm_migrate_page(struct page *newpage, struct page *oldpage);
#else /* !CONFIG_KSM */
@@ -94,10 +94,9 @@ static inline int page_referenced_ksm(struct page *page,
return 0;
}
-static inline int rmap_walk_ksm(struct page *page,
+static inline void rmap_walk_ksm(struct page *page,
struct rmap_walk_control *rwc)
{
- return 0;
}
static inline void ksm_migrate_page(struct page *newpage, struct page *oldpage)
diff --git a/include/linux/kvm_host.h b/include/linux/kvm_host.h
index d0250744507a..8c0664309815 100644
--- a/include/linux/kvm_host.h
+++ b/include/linux/kvm_host.h
@@ -115,14 +115,17 @@ static inline bool is_error_page(struct page *page)
return IS_ERR(page);
}
+#define KVM_REQUEST_MASK GENMASK(7,0)
+#define KVM_REQUEST_NO_WAKEUP BIT(8)
+#define KVM_REQUEST_WAIT BIT(9)
/*
* Architecture-independent vcpu->requests bit members
* Bits 4-7 are reserved for more arch-independent bits.
*/
-#define KVM_REQ_TLB_FLUSH 0
-#define KVM_REQ_MMU_RELOAD 1
-#define KVM_REQ_PENDING_TIMER 2
-#define KVM_REQ_UNHALT 3
+#define KVM_REQ_TLB_FLUSH (0 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_MMU_RELOAD (1 | KVM_REQUEST_WAIT | KVM_REQUEST_NO_WAKEUP)
+#define KVM_REQ_PENDING_TIMER 2
+#define KVM_REQ_UNHALT 3
#define KVM_USERSPACE_IRQ_SOURCE_ID 0
#define KVM_IRQFD_RESAMPLE_IRQ_SOURCE_ID 1
@@ -268,6 +271,12 @@ struct kvm_vcpu {
static inline int kvm_vcpu_exiting_guest_mode(struct kvm_vcpu *vcpu)
{
+ /*
+ * The memory barrier ensures a previous write to vcpu->requests cannot
+ * be reordered with the read of vcpu->mode. It pairs with the general
+ * memory barrier following the write of vcpu->mode in VCPU RUN.
+ */
+ smp_mb__before_atomic();
return cmpxchg(&vcpu->mode, IN_GUEST_MODE, EXITING_GUEST_MODE);
}
@@ -375,8 +384,6 @@ struct kvm {
struct mutex slots_lock;
struct mm_struct *mm; /* userspace tied to this vm */
struct kvm_memslots *memslots[KVM_ADDRESS_SPACE_NUM];
- struct srcu_struct srcu;
- struct srcu_struct irq_srcu;
struct kvm_vcpu *vcpus[KVM_MAX_VCPUS];
/*
@@ -403,7 +410,7 @@ struct kvm {
struct kvm_vm_stat stat;
struct kvm_arch arch;
refcount_t users_count;
-#ifdef KVM_COALESCED_MMIO_PAGE_OFFSET
+#ifdef CONFIG_KVM_MMIO
struct kvm_coalesced_mmio_ring *coalesced_mmio_ring;
spinlock_t ring_lock;
struct list_head coalesced_zones;
@@ -429,6 +436,8 @@ struct kvm {
struct list_head devices;
struct dentry *debugfs_dentry;
struct kvm_stat_data **debugfs_stat_data;
+ struct srcu_struct srcu;
+ struct srcu_struct irq_srcu;
};
#define kvm_err(fmt, ...) \
@@ -490,6 +499,17 @@ static inline struct kvm_vcpu *kvm_get_vcpu_by_id(struct kvm *kvm, int id)
return NULL;
}
+static inline int kvm_vcpu_get_idx(struct kvm_vcpu *vcpu)
+{
+ struct kvm_vcpu *tmp;
+ int idx;
+
+ kvm_for_each_vcpu(idx, tmp, vcpu->kvm)
+ if (tmp == vcpu)
+ return idx;
+ BUG();
+}
+
#define kvm_for_each_memslot(memslot, slots) \
for (memslot = &slots->memslots[0]; \
memslot < slots->memslots + KVM_MEM_SLOTS_NUM && memslot->npages;\
@@ -502,10 +522,10 @@ int __must_check vcpu_load(struct kvm_vcpu *vcpu);
void vcpu_put(struct kvm_vcpu *vcpu);
#ifdef __KVM_HAVE_IOAPIC
-void kvm_vcpu_request_scan_ioapic(struct kvm *kvm);
+void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm);
void kvm_arch_post_irq_routing_update(struct kvm *kvm);
#else
-static inline void kvm_vcpu_request_scan_ioapic(struct kvm *kvm)
+static inline void kvm_arch_post_irq_ack_notifier_list_update(struct kvm *kvm)
{
}
static inline void kvm_arch_post_irq_routing_update(struct kvm *kvm)
@@ -641,18 +661,18 @@ int kvm_read_guest_page(struct kvm *kvm, gfn_t gfn, void *data, int offset,
int kvm_read_guest_atomic(struct kvm *kvm, gpa_t gpa, void *data,
unsigned long len);
int kvm_read_guest(struct kvm *kvm, gpa_t gpa, void *data, unsigned long len);
-int kvm_vcpu_read_guest_cached(struct kvm_vcpu *vcpu, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
+int kvm_read_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
int kvm_write_guest_page(struct kvm *kvm, gfn_t gfn, const void *data,
int offset, int len);
int kvm_write_guest(struct kvm *kvm, gpa_t gpa, const void *data,
unsigned long len);
-int kvm_vcpu_write_guest_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, unsigned long len);
-int kvm_vcpu_write_guest_offset_cached(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- void *data, int offset, unsigned long len);
-int kvm_vcpu_gfn_to_hva_cache_init(struct kvm_vcpu *v, struct gfn_to_hva_cache *ghc,
- gpa_t gpa, unsigned long len);
+int kvm_write_guest_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, unsigned long len);
+int kvm_write_guest_offset_cached(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ void *data, int offset, unsigned long len);
+int kvm_gfn_to_hva_cache_init(struct kvm *kvm, struct gfn_to_hva_cache *ghc,
+ gpa_t gpa, unsigned long len);
int kvm_clear_guest_page(struct kvm *kvm, gfn_t gfn, int offset, int len);
int kvm_clear_guest(struct kvm *kvm, gpa_t gpa, unsigned long len);
struct kvm_memory_slot *gfn_to_memslot(struct kvm *kvm, gfn_t gfn);
@@ -682,7 +702,7 @@ void kvm_vcpu_mark_page_dirty(struct kvm_vcpu *vcpu, gfn_t gfn);
void kvm_vcpu_block(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu);
void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu);
-void kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
+bool kvm_vcpu_wake_up(struct kvm_vcpu *vcpu);
void kvm_vcpu_kick(struct kvm_vcpu *vcpu);
int kvm_vcpu_yield_to(struct kvm_vcpu *target);
void kvm_vcpu_on_spin(struct kvm_vcpu *vcpu);
@@ -767,8 +787,6 @@ void kvm_arch_check_processor_compat(void *rtn);
int kvm_arch_vcpu_runnable(struct kvm_vcpu *vcpu);
int kvm_arch_vcpu_should_kick(struct kvm_vcpu *vcpu);
-void *kvm_kvzalloc(unsigned long size);
-
#ifndef __KVM_HAVE_ARCH_VM_ALLOC
static inline struct kvm *kvm_arch_alloc_vm(void)
{
@@ -877,22 +895,6 @@ void kvm_unregister_irq_ack_notifier(struct kvm *kvm,
int kvm_request_irq_source_id(struct kvm *kvm);
void kvm_free_irq_source_id(struct kvm *kvm, int irq_source_id);
-#ifdef CONFIG_KVM_DEVICE_ASSIGNMENT
-int kvm_iommu_map_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-void kvm_iommu_unmap_pages(struct kvm *kvm, struct kvm_memory_slot *slot);
-#else
-static inline int kvm_iommu_map_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
- return 0;
-}
-
-static inline void kvm_iommu_unmap_pages(struct kvm *kvm,
- struct kvm_memory_slot *slot)
-{
-}
-#endif
-
/*
* search_memslots() and __gfn_to_memslot() are here because they are
* used in non-modular code in arch/powerpc/kvm/book3s_hv_rm_mmu.c.
@@ -1025,6 +1027,7 @@ static inline int mmu_notifier_retry(struct kvm *kvm, unsigned long mmu_seq)
#define KVM_MAX_IRQ_ROUTES 1024
#endif
+bool kvm_arch_can_set_irq_routing(struct kvm *kvm);
int kvm_set_irq_routing(struct kvm *kvm,
const struct kvm_irq_routing_entry *entries,
unsigned nr,
@@ -1092,13 +1095,23 @@ static inline void kvm_make_request(int req, struct kvm_vcpu *vcpu)
* caller. Paired with the smp_mb__after_atomic in kvm_check_request.
*/
smp_wmb();
- set_bit(req, &vcpu->requests);
+ set_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline bool kvm_test_request(int req, struct kvm_vcpu *vcpu)
+{
+ return test_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
+}
+
+static inline void kvm_clear_request(int req, struct kvm_vcpu *vcpu)
+{
+ clear_bit(req & KVM_REQUEST_MASK, &vcpu->requests);
}
static inline bool kvm_check_request(int req, struct kvm_vcpu *vcpu)
{
- if (test_bit(req, &vcpu->requests)) {
- clear_bit(req, &vcpu->requests);
+ if (kvm_test_request(req, vcpu)) {
+ kvm_clear_request(req, vcpu);
/*
* Ensure the rest of the request is visible to kvm_check_request's
@@ -1165,7 +1178,6 @@ int kvm_register_device_ops(struct kvm_device_ops *ops, u32 type);
void kvm_unregister_device_ops(u32 type);
extern struct kvm_device_ops kvm_mpic_ops;
-extern struct kvm_device_ops kvm_xics_ops;
extern struct kvm_device_ops kvm_arm_vgic_v2_ops;
extern struct kvm_device_ops kvm_arm_vgic_v3_ops;
diff --git a/include/linux/leds-pca9532.h b/include/linux/leds-pca9532.h
index d215b4561180..5e240b2b4d58 100644
--- a/include/linux/leds-pca9532.h
+++ b/include/linux/leds-pca9532.h
@@ -22,7 +22,8 @@ enum pca9532_state {
PCA9532_OFF = 0x0,
PCA9532_ON = 0x1,
PCA9532_PWM0 = 0x2,
- PCA9532_PWM1 = 0x3
+ PCA9532_PWM1 = 0x3,
+ PCA9532_KEEP = 0xff,
};
struct pca9532_led {
@@ -44,4 +45,3 @@ struct pca9532_platform_data {
};
#endif /* __LINUX_PCA9532_H */
-
diff --git a/include/linux/leds.h b/include/linux/leds.h
index 38c0bd7ca107..64c56d454f7d 100644
--- a/include/linux/leds.h
+++ b/include/linux/leds.h
@@ -122,10 +122,16 @@ struct led_classdev {
struct mutex led_access;
};
-extern int led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
-extern int devm_led_classdev_register(struct device *parent,
- struct led_classdev *led_cdev);
+extern int of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define led_classdev_register(parent, led_cdev) \
+ of_led_classdev_register(parent, NULL, led_cdev)
+extern int devm_of_led_classdev_register(struct device *parent,
+ struct device_node *np,
+ struct led_classdev *led_cdev);
+#define devm_led_classdev_register(parent, led_cdev) \
+ devm_of_led_classdev_register(parent, NULL, led_cdev)
extern void led_classdev_unregister(struct led_classdev *led_cdev);
extern void devm_led_classdev_unregister(struct device *parent,
struct led_classdev *led_cdev);
diff --git a/include/linux/libnvdimm.h b/include/linux/libnvdimm.h
index 77e7af32543f..6c807017128d 100644
--- a/include/linux/libnvdimm.h
+++ b/include/linux/libnvdimm.h
@@ -20,9 +20,11 @@
enum {
/* when a dimm supports both PMEM and BLK access a label is required */
- NDD_ALIASING = 1 << 0,
+ NDD_ALIASING = 0,
/* unarmed memory devices may not persist writes */
- NDD_UNARMED = 1 << 1,
+ NDD_UNARMED = 1,
+ /* locked memory devices should not be accessed */
+ NDD_LOCKED = 2,
/* need to set a limit somewhere, but yes, this is likely overkill */
ND_IOCTL_MAX_BUFLEN = SZ_4M,
@@ -120,7 +122,7 @@ static inline struct nd_blk_region_desc *to_blk_region_desc(
}
int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length);
-void nvdimm_clear_from_poison_list(struct nvdimm_bus *nvdimm_bus,
+void nvdimm_forget_poison(struct nvdimm_bus *nvdimm_bus,
phys_addr_t start, unsigned int len);
struct nvdimm_bus *nvdimm_bus_register(struct device *parent,
struct nvdimm_bus_descriptor *nfit_desc);
diff --git a/include/linux/lightnvm.h b/include/linux/lightnvm.h
index ca45e4a088a9..7dfa56ebbc6d 100644
--- a/include/linux/lightnvm.h
+++ b/include/linux/lightnvm.h
@@ -56,7 +56,6 @@ typedef int (nvm_get_l2p_tbl_fn)(struct nvm_dev *, u64, u32,
typedef int (nvm_op_bb_tbl_fn)(struct nvm_dev *, struct ppa_addr, u8 *);
typedef int (nvm_op_set_bb_fn)(struct nvm_dev *, struct ppa_addr *, int, int);
typedef int (nvm_submit_io_fn)(struct nvm_dev *, struct nvm_rq *);
-typedef int (nvm_erase_blk_fn)(struct nvm_dev *, struct nvm_rq *);
typedef void *(nvm_create_dma_pool_fn)(struct nvm_dev *, char *);
typedef void (nvm_destroy_dma_pool_fn)(void *);
typedef void *(nvm_dev_dma_alloc_fn)(struct nvm_dev *, void *, gfp_t,
@@ -70,7 +69,6 @@ struct nvm_dev_ops {
nvm_op_set_bb_fn *set_bb_tbl;
nvm_submit_io_fn *submit_io;
- nvm_erase_blk_fn *erase_block;
nvm_create_dma_pool_fn *create_dma_pool;
nvm_destroy_dma_pool_fn *destroy_dma_pool;
@@ -125,7 +123,7 @@ enum {
/* NAND Access Modes */
NVM_IO_SUSPEND = 0x80,
NVM_IO_SLC_MODE = 0x100,
- NVM_IO_SCRAMBLE_DISABLE = 0x200,
+ NVM_IO_SCRAMBLE_ENABLE = 0x200,
/* Block Types */
NVM_BLK_T_FREE = 0x0,
@@ -438,7 +436,8 @@ static inline int ppa_cmp_blk(struct ppa_addr ppa1, struct ppa_addr ppa2)
typedef blk_qc_t (nvm_tgt_make_rq_fn)(struct request_queue *, struct bio *);
typedef sector_t (nvm_tgt_capacity_fn)(void *);
-typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *);
+typedef void *(nvm_tgt_init_fn)(struct nvm_tgt_dev *, struct gendisk *,
+ int flags);
typedef void (nvm_tgt_exit_fn)(void *);
typedef int (nvm_tgt_sysfs_init_fn)(struct gendisk *);
typedef void (nvm_tgt_sysfs_exit_fn)(struct gendisk *);
@@ -479,10 +478,10 @@ extern int nvm_set_tgt_bb_tbl(struct nvm_tgt_dev *, struct ppa_addr *,
int, int);
extern int nvm_max_phys_sects(struct nvm_tgt_dev *);
extern int nvm_submit_io(struct nvm_tgt_dev *, struct nvm_rq *);
-extern int nvm_set_rqd_ppalist(struct nvm_dev *, struct nvm_rq *,
+extern int nvm_erase_sync(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern int nvm_set_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *,
const struct ppa_addr *, int, int);
-extern void nvm_free_rqd_ppalist(struct nvm_dev *, struct nvm_rq *);
-extern int nvm_erase_blk(struct nvm_tgt_dev *, struct ppa_addr *, int);
+extern void nvm_free_rqd_ppalist(struct nvm_tgt_dev *, struct nvm_rq *);
extern int nvm_get_l2p_tbl(struct nvm_tgt_dev *, u64, u32, nvm_l2p_update_fn *,
void *);
extern int nvm_get_area(struct nvm_tgt_dev *, sector_t *, sector_t);
diff --git a/include/linux/livepatch.h b/include/linux/livepatch.h
index 9072f04db616..194991ef9347 100644
--- a/include/linux/livepatch.h
+++ b/include/linux/livepatch.h
@@ -23,15 +23,16 @@
#include <linux/module.h>
#include <linux/ftrace.h>
+#include <linux/completion.h>
#if IS_ENABLED(CONFIG_LIVEPATCH)
#include <asm/livepatch.h>
-enum klp_state {
- KLP_DISABLED,
- KLP_ENABLED
-};
+/* task patch states */
+#define KLP_UNDEFINED -1
+#define KLP_UNPATCHED 0
+#define KLP_PATCHED 1
/**
* struct klp_func - function structure for live patching
@@ -39,10 +40,29 @@ enum klp_state {
* @new_func: pointer to the patched function code
* @old_sympos: a hint indicating which symbol position the old function
* can be found (optional)
+ * @immediate: patch the func immediately, bypassing safety mechanisms
* @old_addr: the address of the function being patched
* @kobj: kobject for sysfs resources
- * @state: tracks function-level patch application state
* @stack_node: list node for klp_ops func_stack list
+ * @old_size: size of the old function
+ * @new_size: size of the new function
+ * @patched: the func has been added to the klp_ops list
+ * @transition: the func is currently being applied or reverted
+ *
+ * The patched and transition variables define the func's patching state. When
+ * patching, a func is always in one of the following states:
+ *
+ * patched=0 transition=0: unpatched
+ * patched=0 transition=1: unpatched, temporary starting state
+ * patched=1 transition=1: patched, may be visible to some tasks
+ * patched=1 transition=0: patched, visible to all tasks
+ *
+ * And when unpatching, it goes in the reverse order:
+ *
+ * patched=1 transition=0: patched, visible to all tasks
+ * patched=1 transition=1: patched, may be visible to some tasks
+ * patched=0 transition=1: unpatched, temporary ending state
+ * patched=0 transition=0: unpatched
*/
struct klp_func {
/* external */
@@ -56,12 +76,15 @@ struct klp_func {
* in kallsyms for the given object is used.
*/
unsigned long old_sympos;
+ bool immediate;
/* internal */
unsigned long old_addr;
struct kobject kobj;
- enum klp_state state;
struct list_head stack_node;
+ unsigned long old_size, new_size;
+ bool patched;
+ bool transition;
};
/**
@@ -70,8 +93,8 @@ struct klp_func {
* @funcs: function entries for functions to be patched in the object
* @kobj: kobject for sysfs resources
* @mod: kernel module associated with the patched object
- * (NULL for vmlinux)
- * @state: tracks object-level patch application state
+ * (NULL for vmlinux)
+ * @patched: the object's funcs have been added to the klp_ops list
*/
struct klp_object {
/* external */
@@ -81,26 +104,30 @@ struct klp_object {
/* internal */
struct kobject kobj;
struct module *mod;
- enum klp_state state;
+ bool patched;
};
/**
* struct klp_patch - patch structure for live patching
* @mod: reference to the live patch module
* @objs: object entries for kernel objects to be patched
+ * @immediate: patch all funcs immediately, bypassing safety mechanisms
* @list: list node for global list of registered patches
* @kobj: kobject for sysfs resources
- * @state: tracks patch-level application state
+ * @enabled: the patch is enabled (but operation may be incomplete)
+ * @finish: for waiting till it is safe to remove the patch module
*/
struct klp_patch {
/* external */
struct module *mod;
struct klp_object *objs;
+ bool immediate;
/* internal */
struct list_head list;
struct kobject kobj;
- enum klp_state state;
+ bool enabled;
+ struct completion finish;
};
#define klp_for_each_object(patch, obj) \
@@ -123,10 +150,27 @@ void arch_klp_init_object_loaded(struct klp_patch *patch,
int klp_module_coming(struct module *mod);
void klp_module_going(struct module *mod);
+void klp_copy_process(struct task_struct *child);
+void klp_update_patch_state(struct task_struct *task);
+
+static inline bool klp_patch_pending(struct task_struct *task)
+{
+ return test_tsk_thread_flag(task, TIF_PATCH_PENDING);
+}
+
+static inline bool klp_have_reliable_stack(void)
+{
+ return IS_ENABLED(CONFIG_STACKTRACE) &&
+ IS_ENABLED(CONFIG_HAVE_RELIABLE_STACKTRACE);
+}
+
#else /* !CONFIG_LIVEPATCH */
static inline int klp_module_coming(struct module *mod) { return 0; }
-static inline void klp_module_going(struct module *mod) { }
+static inline void klp_module_going(struct module *mod) {}
+static inline bool klp_patch_pending(struct task_struct *task) { return false; }
+static inline void klp_update_patch_state(struct task_struct *task) {}
+static inline void klp_copy_process(struct task_struct *child) {}
#endif /* CONFIG_LIVEPATCH */
diff --git a/include/linux/lockd/bind.h b/include/linux/lockd/bind.h
index 140edab64446..05728396a1a1 100644
--- a/include/linux/lockd/bind.h
+++ b/include/linux/lockd/bind.h
@@ -18,6 +18,7 @@
/* Dummy declarations */
struct svc_rqst;
+struct rpc_task;
/*
* This is the set of functions for lockd->nfsd communication
@@ -43,6 +44,7 @@ struct nlmclnt_initdata {
u32 nfs_version;
int noresvport;
struct net *net;
+ const struct nlmclnt_operations *nlmclnt_ops;
};
/*
@@ -52,8 +54,26 @@ struct nlmclnt_initdata {
extern struct nlm_host *nlmclnt_init(const struct nlmclnt_initdata *nlm_init);
extern void nlmclnt_done(struct nlm_host *host);
-extern int nlmclnt_proc(struct nlm_host *host, int cmd,
- struct file_lock *fl);
+/*
+ * NLM client operations provide a means to modify RPC processing of NLM
+ * requests. Callbacks receive a pointer to data passed into the call to
+ * nlmclnt_proc().
+ */
+struct nlmclnt_operations {
+ /* Called on successful allocation of nlm_rqst, use for allocation or
+ * reference counting. */
+ void (*nlmclnt_alloc_call)(void *);
+
+ /* Called in rpc_task_prepare for unlock. A return value of true
+ * indicates the callback has put the task to sleep on a waitqueue
+ * and NLM should not call rpc_call_start(). */
+ bool (*nlmclnt_unlock_prepare)(struct rpc_task*, void *);
+
+ /* Called when the nlm_rqst is freed, callbacks should clean up here */
+ void (*nlmclnt_release_call)(void *);
+};
+
+extern int nlmclnt_proc(struct nlm_host *host, int cmd, struct file_lock *fl, void *data);
extern int lockd_up(struct net *net);
extern void lockd_down(struct net *net);
diff --git a/include/linux/lockd/lockd.h b/include/linux/lockd/lockd.h
index b37dee3acaba..41f7b6a04d69 100644
--- a/include/linux/lockd/lockd.h
+++ b/include/linux/lockd/lockd.h
@@ -69,6 +69,7 @@ struct nlm_host {
char *h_addrbuf; /* address eyecatcher */
struct net *net; /* host net */
char nodename[UNX_MAXNODENAME + 1];
+ const struct nlmclnt_operations *h_nlmclnt_ops; /* Callback ops for NLM users */
};
/*
@@ -142,6 +143,7 @@ struct nlm_rqst {
struct nlm_block * a_block;
unsigned int a_retries; /* Retry count */
u8 a_owner[NLMCLNT_OHSIZE];
+ void * a_callback_data; /* sent to nlmclnt_operations callbacks */
};
/*
diff --git a/include/linux/lockdep.h b/include/linux/lockdep.h
index 1e327bb80838..fffe49f188e6 100644
--- a/include/linux/lockdep.h
+++ b/include/linux/lockdep.h
@@ -361,6 +361,8 @@ static inline void lock_set_subclass(struct lockdep_map *lock,
lock_set_class(lock, lock->name, lock->key, subclass, ip);
}
+extern void lock_downgrade(struct lockdep_map *lock, unsigned long ip);
+
extern void lockdep_set_current_reclaim_state(gfp_t gfp_mask);
extern void lockdep_clear_current_reclaim_state(void);
extern void lockdep_trace_alloc(gfp_t mask);
@@ -411,6 +413,7 @@ static inline void lockdep_on(void)
# define lock_acquire(l, s, t, r, c, n, i) do { } while (0)
# define lock_release(l, n, i) do { } while (0)
+# define lock_downgrade(l, i) do { } while (0)
# define lock_set_class(l, n, k, s, i) do { } while (0)
# define lock_set_subclass(l, s, i) do { } while (0)
# define lockdep_set_current_reclaim_state(g) do { } while (0)
diff --git a/include/linux/lsm_hooks.h b/include/linux/lsm_hooks.h
index e29d4c62a3c8..080f34e66017 100644
--- a/include/linux/lsm_hooks.h
+++ b/include/linux/lsm_hooks.h
@@ -533,8 +533,13 @@
* manual page for definitions of the @clone_flags.
* @clone_flags contains the flags indicating what should be shared.
* Return 0 if permission is granted.
+ * @task_alloc:
+ * @task task being allocated.
+ * @clone_flags contains the flags indicating what should be shared.
+ * Handle allocation of task-related resources.
+ * Returns a zero on success, negative values on failure.
* @task_free:
- * @task task being freed
+ * @task task about to be freed.
* Handle release of task-related resources. (Note that this can be called
* from interrupt context.)
* @cred_alloc_blank:
@@ -630,10 +635,19 @@
* Check permission before getting the ioprio value of @p.
* @p contains the task_struct of process.
* Return 0 if permission is granted.
+ * @task_prlimit:
+ * Check permission before getting and/or setting the resource limits of
+ * another task.
+ * @cred points to the cred structure for the current task.
+ * @tcred points to the cred structure for the target task.
+ * @flags contains the LSM_PRLIMIT_* flag bits indicating whether the
+ * resource limits are being read, modified, or both.
+ * Return 0 if permission is granted.
* @task_setrlimit:
- * Check permission before setting the resource limits of the current
- * process for @resource to @new_rlim. The old resource limit values can
- * be examined by dereferencing (current->signal->rlim + resource).
+ * Check permission before setting the resource limits of process @p
+ * for @resource to @new_rlim. The old resource limit values can
+ * be examined by dereferencing (p->signal->rlim + resource).
+ * @p points to the task_struct for the target task's group leader.
* @resource contains the resource whose limit is being set.
* @new_rlim contains the new limits for @resource.
* Return 0 if permission is granted.
@@ -1473,6 +1487,7 @@ union security_list_options {
int (*file_open)(struct file *file, const struct cred *cred);
int (*task_create)(unsigned long clone_flags);
+ int (*task_alloc)(struct task_struct *task, unsigned long clone_flags);
void (*task_free)(struct task_struct *task);
int (*cred_alloc_blank)(struct cred *cred, gfp_t gfp);
void (*cred_free)(struct cred *cred);
@@ -1494,6 +1509,8 @@ union security_list_options {
int (*task_setnice)(struct task_struct *p, int nice);
int (*task_setioprio)(struct task_struct *p, int ioprio);
int (*task_getioprio)(struct task_struct *p);
+ int (*task_prlimit)(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags);
int (*task_setrlimit)(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int (*task_setscheduler)(struct task_struct *p);
@@ -1737,6 +1754,7 @@ struct security_hook_heads {
struct list_head file_receive;
struct list_head file_open;
struct list_head task_create;
+ struct list_head task_alloc;
struct list_head task_free;
struct list_head cred_alloc_blank;
struct list_head cred_free;
@@ -1755,6 +1773,7 @@ struct security_hook_heads {
struct list_head task_setnice;
struct list_head task_setioprio;
struct list_head task_getioprio;
+ struct list_head task_prlimit;
struct list_head task_setrlimit;
struct list_head task_setscheduler;
struct list_head task_getscheduler;
@@ -1908,6 +1927,13 @@ static inline void security_delete_hooks(struct security_hook_list *hooks,
}
#endif /* CONFIG_SECURITY_SELINUX_DISABLE */
+/* Currently required to handle SELinux runtime hook disable. */
+#ifdef CONFIG_SECURITY_WRITABLE_HOOKS
+#define __lsm_ro_after_init
+#else
+#define __lsm_ro_after_init __ro_after_init
+#endif /* CONFIG_SECURITY_WRITABLE_HOOKS */
+
extern int __init security_module_enable(const char *module);
extern void __init capability_add_hooks(void);
#ifdef CONFIG_SECURITY_YAMA
diff --git a/include/linux/mailbox/brcm-message.h b/include/linux/mailbox/brcm-message.h
index 6b55c938b401..c20b4843fc2d 100644
--- a/include/linux/mailbox/brcm-message.h
+++ b/include/linux/mailbox/brcm-message.h
@@ -16,6 +16,7 @@
enum brcm_message_type {
BRCM_MESSAGE_UNKNOWN = 0,
+ BRCM_MESSAGE_BATCH,
BRCM_MESSAGE_SPU,
BRCM_MESSAGE_SBA,
BRCM_MESSAGE_MAX,
@@ -23,24 +24,29 @@ enum brcm_message_type {
struct brcm_sba_command {
u64 cmd;
+ u64 *cmd_dma;
+ dma_addr_t cmd_dma_addr;
#define BRCM_SBA_CMD_TYPE_A BIT(0)
#define BRCM_SBA_CMD_TYPE_B BIT(1)
#define BRCM_SBA_CMD_TYPE_C BIT(2)
#define BRCM_SBA_CMD_HAS_RESP BIT(3)
#define BRCM_SBA_CMD_HAS_OUTPUT BIT(4)
u64 flags;
- dma_addr_t input;
- size_t input_len;
dma_addr_t resp;
size_t resp_len;
- dma_addr_t output;
- size_t output_len;
+ dma_addr_t data;
+ size_t data_len;
};
struct brcm_message {
enum brcm_message_type type;
union {
struct {
+ struct brcm_message *msgs;
+ unsigned int msgs_queued;
+ unsigned int msgs_count;
+ } batch;
+ struct {
struct scatterlist *src;
struct scatterlist *dst;
} spu;
diff --git a/include/linux/memblock.h b/include/linux/memblock.h
index bdfc65af4152..4ce24a376262 100644
--- a/include/linux/memblock.h
+++ b/include/linux/memblock.h
@@ -93,6 +93,7 @@ int memblock_mark_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_clear_hotplug(phys_addr_t base, phys_addr_t size);
int memblock_mark_mirror(phys_addr_t base, phys_addr_t size);
int memblock_mark_nomap(phys_addr_t base, phys_addr_t size);
+int memblock_clear_nomap(phys_addr_t base, phys_addr_t size);
ulong choose_memblock_flags(void);
/* Low level functions */
@@ -335,6 +336,7 @@ phys_addr_t memblock_mem_size(unsigned long limit_pfn);
phys_addr_t memblock_start_of_DRAM(void);
phys_addr_t memblock_end_of_DRAM(void);
void memblock_enforce_memory_limit(phys_addr_t memory_limit);
+void memblock_cap_memory_range(phys_addr_t base, phys_addr_t size);
void memblock_mem_limit_remove_map(phys_addr_t limit);
bool memblock_is_memory(phys_addr_t addr);
int memblock_is_map_memory(phys_addr_t addr);
diff --git a/include/linux/memcontrol.h b/include/linux/memcontrol.h
index bb7250c45cb8..899949bbb2f9 100644
--- a/include/linux/memcontrol.h
+++ b/include/linux/memcontrol.h
@@ -35,48 +35,43 @@ struct page;
struct mm_struct;
struct kmem_cache;
-/*
- * The corresponding mem_cgroup_stat_names is defined in mm/memcontrol.c,
- * These two lists should keep in accord with each other.
- */
-enum mem_cgroup_stat_index {
- /*
- * For MEM_CONTAINER_TYPE_ALL, usage = pagecache + rss.
- */
- MEM_CGROUP_STAT_CACHE, /* # of pages charged as cache */
- MEM_CGROUP_STAT_RSS, /* # of pages charged as anon rss */
- MEM_CGROUP_STAT_RSS_HUGE, /* # of pages charged as anon huge */
- MEM_CGROUP_STAT_FILE_MAPPED, /* # of pages charged as file rss */
- MEM_CGROUP_STAT_DIRTY, /* # of dirty pages in page cache */
- MEM_CGROUP_STAT_WRITEBACK, /* # of pages under writeback */
- MEM_CGROUP_STAT_SWAP, /* # of pages, swapped out */
- MEM_CGROUP_STAT_NSTATS,
- /* default hierarchy stats */
- MEMCG_KERNEL_STACK_KB = MEM_CGROUP_STAT_NSTATS,
+/* Cgroup-specific page state, on top of universal node page state */
+enum memcg_stat_item {
+ MEMCG_CACHE = NR_VM_NODE_STAT_ITEMS,
+ MEMCG_RSS,
+ MEMCG_RSS_HUGE,
+ MEMCG_SWAP,
+ MEMCG_SOCK,
+ /* XXX: why are these zone and not node counters? */
+ MEMCG_KERNEL_STACK_KB,
MEMCG_SLAB_RECLAIMABLE,
MEMCG_SLAB_UNRECLAIMABLE,
- MEMCG_SOCK,
MEMCG_NR_STAT,
};
+/* Cgroup-specific events, on top of universal VM events */
+enum memcg_event_item {
+ MEMCG_LOW = NR_VM_EVENT_ITEMS,
+ MEMCG_HIGH,
+ MEMCG_MAX,
+ MEMCG_OOM,
+ MEMCG_NR_EVENTS,
+};
+
struct mem_cgroup_reclaim_cookie {
pg_data_t *pgdat;
int priority;
unsigned int generation;
};
-enum mem_cgroup_events_index {
- MEM_CGROUP_EVENTS_PGPGIN, /* # of pages paged in */
- MEM_CGROUP_EVENTS_PGPGOUT, /* # of pages paged out */
- MEM_CGROUP_EVENTS_PGFAULT, /* # of page-faults */
- MEM_CGROUP_EVENTS_PGMAJFAULT, /* # of major page-faults */
- MEM_CGROUP_EVENTS_NSTATS,
- /* default hierarchy events */
- MEMCG_LOW = MEM_CGROUP_EVENTS_NSTATS,
- MEMCG_HIGH,
- MEMCG_MAX,
- MEMCG_OOM,
- MEMCG_NR_EVENTS,
+#ifdef CONFIG_MEMCG
+
+#define MEM_CGROUP_ID_SHIFT 16
+#define MEM_CGROUP_ID_MAX USHRT_MAX
+
+struct mem_cgroup_id {
+ int id;
+ atomic_t ref;
};
/*
@@ -92,16 +87,6 @@ enum mem_cgroup_events_target {
MEM_CGROUP_NTARGETS,
};
-#ifdef CONFIG_MEMCG
-
-#define MEM_CGROUP_ID_SHIFT 16
-#define MEM_CGROUP_ID_MAX USHRT_MAX
-
-struct mem_cgroup_id {
- int id;
- atomic_t ref;
-};
-
struct mem_cgroup_stat_cpu {
long count[MEMCG_NR_STAT];
unsigned long events[MEMCG_NR_EVENTS];
@@ -283,17 +268,10 @@ static inline bool mem_cgroup_disabled(void)
return !cgroup_subsys_enabled(memory_cgrp_subsys);
}
-/**
- * mem_cgroup_events - count memory events against a cgroup
- * @memcg: the memory cgroup
- * @idx: the event index
- * @nr: the number of events to account for
- */
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
{
- this_cpu_add(memcg->stat->events[idx], nr);
+ this_cpu_inc(memcg->stat->events[event]);
cgroup_file_notify(&memcg->events_file);
}
@@ -494,8 +472,42 @@ extern int do_swap_account;
void lock_page_memcg(struct page *page);
void unlock_page_memcg(struct page *page);
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ long val = 0;
+ int cpu;
+
+ for_each_possible_cpu(cpu)
+ val += per_cpu(memcg->stat->count[idx], cpu);
+
+ if (val < 0)
+ val = 0;
+
+ return val;
+}
+
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx, int val)
+{
+ if (!mem_cgroup_disabled())
+ this_cpu_add(memcg->stat->count[idx], val);
+}
+
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, 1);
+}
+
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ mod_memcg_state(memcg, idx, -1);
+}
+
/**
- * mem_cgroup_update_page_stat - update page state statistics
+ * mod_memcg_page_state - update page state statistics
* @page: the page
* @idx: page state item to account
* @val: number of pages (positive or negative)
@@ -506,28 +518,28 @@ void unlock_page_memcg(struct page *page);
*
* lock_page(page) or lock_page_memcg(page)
* if (TestClearPageState(page))
- * mem_cgroup_update_page_stat(page, state, -1);
+ * mod_memcg_page_state(page, state, -1);
* unlock_page(page) or unlock_page_memcg(page)
+ *
+ * Kernel pages are an exception to this, since they'll never move.
*/
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx, int val)
{
- VM_BUG_ON(!(rcu_read_lock_held() || PageLocked(page)));
-
if (page->mem_cgroup)
- this_cpu_add(page->mem_cgroup->stat->count[idx], val);
+ mod_memcg_state(page->mem_cgroup, idx, val);
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, 1);
+ mod_memcg_page_state(page, idx, 1);
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
- mem_cgroup_update_page_stat(page, idx, -1);
+ mod_memcg_page_state(page, idx, -1);
}
unsigned long mem_cgroup_soft_limit_reclaim(pg_data_t *pgdat, int order,
@@ -544,20 +556,8 @@ static inline void mem_cgroup_count_vm_event(struct mm_struct *mm,
rcu_read_lock();
memcg = mem_cgroup_from_task(rcu_dereference(mm->owner));
- if (unlikely(!memcg))
- goto out;
-
- switch (idx) {
- case PGFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGFAULT]);
- break;
- case PGMAJFAULT:
- this_cpu_inc(memcg->stat->events[MEM_CGROUP_EVENTS_PGMAJFAULT]);
- break;
- default:
- BUG();
- }
-out:
+ if (likely(memcg))
+ this_cpu_inc(memcg->stat->events[idx]);
rcu_read_unlock();
}
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
@@ -576,9 +576,8 @@ static inline bool mem_cgroup_disabled(void)
return true;
}
-static inline void mem_cgroup_events(struct mem_cgroup *memcg,
- enum mem_cgroup_events_index idx,
- unsigned int nr)
+static inline void mem_cgroup_event(struct mem_cgroup *memcg,
+ enum memcg_event_item event)
{
}
@@ -740,19 +739,41 @@ static inline bool mem_cgroup_oom_synchronize(bool wait)
return false;
}
-static inline void mem_cgroup_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx,
- int nr)
+static inline unsigned long memcg_page_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+ return 0;
+}
+
+static inline void mod_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx,
+ int nr)
+{
+}
+
+static inline void inc_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+}
+
+static inline void dec_memcg_state(struct mem_cgroup *memcg,
+ enum memcg_stat_item idx)
+{
+}
+
+static inline void mod_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx,
+ int nr)
{
}
-static inline void mem_cgroup_inc_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void inc_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
-static inline void mem_cgroup_dec_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx)
+static inline void dec_memcg_page_state(struct page *page,
+ enum memcg_stat_item idx)
{
}
@@ -872,7 +893,7 @@ static inline int memcg_cache_id(struct mem_cgroup *memcg)
* @val: number of pages (positive or negative)
*/
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
if (memcg_kmem_enabled() && page->mem_cgroup)
this_cpu_add(page->mem_cgroup->stat->count[idx], val);
@@ -901,7 +922,7 @@ static inline void memcg_put_cache_ids(void)
}
static inline void memcg_kmem_update_page_stat(struct page *page,
- enum mem_cgroup_stat_index idx, int val)
+ enum memcg_stat_item idx, int val)
{
}
#endif /* CONFIG_MEMCG && !CONFIG_SLOB */
diff --git a/include/linux/mfd/arizona/pdata.h b/include/linux/mfd/arizona/pdata.h
index 64faeeff698c..bfeecf179895 100644
--- a/include/linux/mfd/arizona/pdata.h
+++ b/include/linux/mfd/arizona/pdata.h
@@ -12,6 +12,8 @@
#define _ARIZONA_PDATA_H
#include <dt-bindings/mfd/arizona.h>
+#include <linux/regulator/arizona-ldo1.h>
+#include <linux/regulator/arizona-micsupp.h>
#define ARIZONA_GPN_DIR_MASK 0x8000 /* GPN_DIR */
#define ARIZONA_GPN_DIR_SHIFT 15 /* GPN_DIR */
@@ -76,13 +78,12 @@ struct arizona_micd_range {
struct arizona_pdata {
int reset; /** GPIO controlling /RESET, if any */
- int ldoena; /** GPIO controlling LODENA, if any */
/** Regulator configuration for MICVDD */
- struct regulator_init_data *micvdd;
+ struct arizona_micsupp_pdata micvdd;
/** Regulator configuration for LDO1 */
- struct regulator_init_data *ldo1;
+ struct arizona_ldo1_pdata ldo1;
/** If a direct 32kHz clock is provided on an MCLK specify it here */
int clk32k_src;
diff --git a/include/linux/mfd/axp20x.h b/include/linux/mfd/axp20x.h
index 0d9a1ff38393..cde56cfe8446 100644
--- a/include/linux/mfd/axp20x.h
+++ b/include/linux/mfd/axp20x.h
@@ -20,6 +20,7 @@ enum axp20x_variants {
AXP221_ID,
AXP223_ID,
AXP288_ID,
+ AXP803_ID,
AXP806_ID,
AXP809_ID,
NR_AXP20X_VARIANTS,
@@ -228,13 +229,13 @@ enum axp20x_variants {
#define AXP20X_OCV_MAX 0xf
/* AXP22X specific registers */
-#define AXP22X_PMIC_ADC_H 0x56
-#define AXP22X_PMIC_ADC_L 0x57
+#define AXP22X_PMIC_TEMP_H 0x56
+#define AXP22X_PMIC_TEMP_L 0x57
#define AXP22X_TS_ADC_H 0x58
#define AXP22X_TS_ADC_L 0x59
#define AXP22X_BATLOW_THRES1 0xe6
-/* AXP288 specific registers */
+/* AXP288/AXP803 specific registers */
#define AXP288_POWER_REASON 0x02
#define AXP288_BC_GLOBAL 0x2c
#define AXP288_BC_VBUS_CNTL 0x2d
@@ -475,6 +476,43 @@ enum axp288_irqs {
AXP288_IRQ_BC_USB_CHNG,
};
+enum axp803_irqs {
+ AXP803_IRQ_ACIN_OVER_V = 1,
+ AXP803_IRQ_ACIN_PLUGIN,
+ AXP803_IRQ_ACIN_REMOVAL,
+ AXP803_IRQ_VBUS_OVER_V,
+ AXP803_IRQ_VBUS_PLUGIN,
+ AXP803_IRQ_VBUS_REMOVAL,
+ AXP803_IRQ_BATT_PLUGIN,
+ AXP803_IRQ_BATT_REMOVAL,
+ AXP803_IRQ_BATT_ENT_ACT_MODE,
+ AXP803_IRQ_BATT_EXIT_ACT_MODE,
+ AXP803_IRQ_CHARG,
+ AXP803_IRQ_CHARG_DONE,
+ AXP803_IRQ_BATT_CHG_TEMP_HIGH,
+ AXP803_IRQ_BATT_CHG_TEMP_HIGH_END,
+ AXP803_IRQ_BATT_CHG_TEMP_LOW,
+ AXP803_IRQ_BATT_CHG_TEMP_LOW_END,
+ AXP803_IRQ_BATT_ACT_TEMP_HIGH,
+ AXP803_IRQ_BATT_ACT_TEMP_HIGH_END,
+ AXP803_IRQ_BATT_ACT_TEMP_LOW,
+ AXP803_IRQ_BATT_ACT_TEMP_LOW_END,
+ AXP803_IRQ_DIE_TEMP_HIGH,
+ AXP803_IRQ_GPADC,
+ AXP803_IRQ_LOW_PWR_LVL1,
+ AXP803_IRQ_LOW_PWR_LVL2,
+ AXP803_IRQ_TIMER,
+ AXP803_IRQ_PEK_RIS_EDGE,
+ AXP803_IRQ_PEK_FAL_EDGE,
+ AXP803_IRQ_PEK_SHORT,
+ AXP803_IRQ_PEK_LONG,
+ AXP803_IRQ_PEK_OVER_OFF,
+ AXP803_IRQ_GPIO1_INPUT,
+ AXP803_IRQ_GPIO0_INPUT,
+ AXP803_IRQ_BC_USB_CHNG,
+ AXP803_IRQ_MV_CHNG,
+};
+
enum axp806_irqs {
AXP806_IRQ_DIE_TEMP_HIGH_LV1,
AXP806_IRQ_DIE_TEMP_HIGH_LV2,
diff --git a/include/linux/mfd/cros_ec.h b/include/linux/mfd/cros_ec.h
index 3eef9fb9968a..28baee63eaf6 100644
--- a/include/linux/mfd/cros_ec.h
+++ b/include/linux/mfd/cros_ec.h
@@ -305,4 +305,22 @@ extern struct attribute_group cros_ec_attr_group;
extern struct attribute_group cros_ec_lightbar_attr_group;
extern struct attribute_group cros_ec_vbc_attr_group;
+/* ACPI GPE handler */
+#ifdef CONFIG_ACPI
+
+int cros_ec_acpi_install_gpe_handler(struct device *dev);
+void cros_ec_acpi_remove_gpe_handler(void);
+void cros_ec_acpi_clear_gpe(void);
+
+#else /* CONFIG_ACPI */
+
+static inline int cros_ec_acpi_install_gpe_handler(struct device *dev)
+{
+ return -ENODEV;
+}
+static inline void cros_ec_acpi_remove_gpe_handler(void) {}
+static inline void cros_ec_acpi_clear_gpe(void) {}
+
+#endif /* CONFIG_ACPI */
+
#endif /* __LINUX_MFD_CROS_EC_H */
diff --git a/include/linux/mfd/cros_ec_commands.h b/include/linux/mfd/cros_ec_commands.h
index f1ef6388c233..c93e7e0300ef 100644
--- a/include/linux/mfd/cros_ec_commands.h
+++ b/include/linux/mfd/cros_ec_commands.h
@@ -2041,6 +2041,9 @@ enum ec_mkbp_event {
/* The state of the switches have changed. */
EC_MKBP_EVENT_SWITCH = 4,
+ /* EC sent a sysrq command */
+ EC_MKBP_EVENT_SYSRQ = 6,
+
/* Number of MKBP events */
EC_MKBP_EVENT_COUNT,
};
@@ -2053,6 +2056,7 @@ union ec_response_get_next_data {
uint32_t buttons;
uint32_t switches;
+ uint32_t sysrq;
} __packed;
struct ec_response_get_next_event {
diff --git a/include/linux/mfd/da9062/core.h b/include/linux/mfd/da9062/core.h
index 376ba84366a0..74d33a01ddae 100644
--- a/include/linux/mfd/da9062/core.h
+++ b/include/linux/mfd/da9062/core.h
@@ -1,5 +1,5 @@
/*
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ * Copyright (C) 2015-2017 Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,7 +18,31 @@
#include <linux/interrupt.h>
#include <linux/mfd/da9062/registers.h>
-/* Interrupts */
+enum da9062_compatible_types {
+ COMPAT_TYPE_DA9061 = 1,
+ COMPAT_TYPE_DA9062,
+};
+
+enum da9061_irqs {
+ /* IRQ A */
+ DA9061_IRQ_ONKEY,
+ DA9061_IRQ_WDG_WARN,
+ DA9061_IRQ_SEQ_RDY,
+ /* IRQ B*/
+ DA9061_IRQ_TEMP,
+ DA9061_IRQ_LDO_LIM,
+ DA9061_IRQ_DVC_RDY,
+ DA9061_IRQ_VDD_WARN,
+ /* IRQ C */
+ DA9061_IRQ_GPI0,
+ DA9061_IRQ_GPI1,
+ DA9061_IRQ_GPI2,
+ DA9061_IRQ_GPI3,
+ DA9061_IRQ_GPI4,
+
+ DA9061_NUM_IRQ,
+};
+
enum da9062_irqs {
/* IRQ A */
DA9062_IRQ_ONKEY,
@@ -45,6 +69,7 @@ struct da9062 {
struct device *dev;
struct regmap *regmap;
struct regmap_irq_chip_data *regmap_irq;
+ enum da9062_compatible_types chip_type;
};
#endif /* __MFD_DA9062_CORE_H__ */
diff --git a/include/linux/mfd/da9062/registers.h b/include/linux/mfd/da9062/registers.h
index 97790d1b02c5..18d576aed902 100644
--- a/include/linux/mfd/da9062/registers.h
+++ b/include/linux/mfd/da9062/registers.h
@@ -1,6 +1,5 @@
/*
- * registers.h - REGISTERS H for DA9062
- * Copyright (C) 2015 Dialog Semiconductor Ltd.
+ * Copyright (C) 2015-2017 Dialog Semiconductor
*
* This program is free software; you can redistribute it and/or
* modify it under the terms of the GNU General Public License
@@ -18,6 +17,8 @@
#define DA9062_PMIC_DEVICE_ID 0x62
#define DA9062_PMIC_VARIANT_MRC_AA 0x01
+#define DA9062_PMIC_VARIANT_VRC_DA9061 0x01
+#define DA9062_PMIC_VARIANT_VRC_DA9062 0x02
#define DA9062_I2C_PAGE_SEL_SHIFT 1
diff --git a/include/linux/mfd/intel_bxtwc.h b/include/linux/mfd/intel_soc_pmic_bxtwc.h
index 1a0ee9d6efe9..0c351bc85d2d 100644
--- a/include/linux/mfd/intel_bxtwc.h
+++ b/include/linux/mfd/intel_soc_pmic_bxtwc.h
@@ -1,5 +1,5 @@
/*
- * intel_bxtwc.h - Header file for Intel Broxton Whiskey Cove PMIC
+ * Header file for Intel Broxton Whiskey Cove PMIC
*
* Copyright (C) 2015 Intel Corporation. All rights reserved.
*
@@ -13,8 +13,6 @@
* more details.
*/
-#include <linux/mfd/intel_soc_pmic.h>
-
#ifndef __INTEL_BXTWC_H__
#define __INTEL_BXTWC_H__
diff --git a/include/linux/mfd/motorola-cpcap.h b/include/linux/mfd/motorola-cpcap.h
index b4031c2b2214..aefc49cb7ba9 100644
--- a/include/linux/mfd/motorola-cpcap.h
+++ b/include/linux/mfd/motorola-cpcap.h
@@ -14,6 +14,9 @@
* published by the Free Software Foundation.
*/
+#include <linux/device.h>
+#include <linux/regmap.h>
+
#define CPCAP_VENDOR_ST 0
#define CPCAP_VENDOR_TI 1
@@ -290,3 +293,5 @@ static inline int cpcap_get_vendor(struct device *dev,
return 0;
}
+
+extern int cpcap_sense_virq(struct regmap *regmap, int virq);
diff --git a/include/linux/mfd/mxs-lradc.h b/include/linux/mfd/mxs-lradc.h
new file mode 100644
index 000000000000..661a4521f723
--- /dev/null
+++ b/include/linux/mfd/mxs-lradc.h
@@ -0,0 +1,187 @@
+/*
+ * Freescale MXS Low Resolution Analog-to-Digital Converter driver
+ *
+ * Copyright (c) 2012 DENX Software Engineering, GmbH.
+ * Copyright (c) 2016 Ksenija Stanojevic <ksenija.stanojevic@gmail.com>
+ *
+ * Author: Marek Vasut <marex@denx.de>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __MFD_MXS_LRADC_H
+#define __MFD_MXS_LRADC_H
+
+#include <linux/bitops.h>
+#include <linux/io.h>
+#include <linux/stmp_device.h>
+
+#define LRADC_MAX_DELAY_CHANS 4
+#define LRADC_MAX_MAPPED_CHANS 8
+#define LRADC_MAX_TOTAL_CHANS 16
+
+#define LRADC_DELAY_TIMER_HZ 2000
+
+#define LRADC_CTRL0 0x00
+# define LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE BIT(23)
+# define LRADC_CTRL0_MX28_TOUCH_SCREEN_TYPE BIT(22)
+# define LRADC_CTRL0_MX28_YNNSW /* YM */ BIT(21)
+# define LRADC_CTRL0_MX28_YPNSW /* YP */ BIT(20)
+# define LRADC_CTRL0_MX28_YPPSW /* YP */ BIT(19)
+# define LRADC_CTRL0_MX28_XNNSW /* XM */ BIT(18)
+# define LRADC_CTRL0_MX28_XNPSW /* XM */ BIT(17)
+# define LRADC_CTRL0_MX28_XPPSW /* XP */ BIT(16)
+
+# define LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE BIT(20)
+# define LRADC_CTRL0_MX23_YM BIT(19)
+# define LRADC_CTRL0_MX23_XM BIT(18)
+# define LRADC_CTRL0_MX23_YP BIT(17)
+# define LRADC_CTRL0_MX23_XP BIT(16)
+
+# define LRADC_CTRL0_MX28_PLATE_MASK \
+ (LRADC_CTRL0_MX28_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX28_YNNSW | LRADC_CTRL0_MX28_YPNSW | \
+ LRADC_CTRL0_MX28_YPPSW | LRADC_CTRL0_MX28_XNNSW | \
+ LRADC_CTRL0_MX28_XNPSW | LRADC_CTRL0_MX28_XPPSW)
+
+# define LRADC_CTRL0_MX23_PLATE_MASK \
+ (LRADC_CTRL0_MX23_TOUCH_DETECT_ENABLE | \
+ LRADC_CTRL0_MX23_YM | LRADC_CTRL0_MX23_XM | \
+ LRADC_CTRL0_MX23_YP | LRADC_CTRL0_MX23_XP)
+
+#define LRADC_CTRL1 0x10
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ_EN BIT(24)
+#define LRADC_CTRL1_LRADC_IRQ_EN(n) (1 << ((n) + 16))
+#define LRADC_CTRL1_MX28_LRADC_IRQ_EN_MASK (0x1fff << 16)
+#define LRADC_CTRL1_MX23_LRADC_IRQ_EN_MASK (0x01ff << 16)
+#define LRADC_CTRL1_LRADC_IRQ_EN_OFFSET 16
+#define LRADC_CTRL1_TOUCH_DETECT_IRQ BIT(8)
+#define LRADC_CTRL1_LRADC_IRQ(n) BIT(n)
+#define LRADC_CTRL1_MX28_LRADC_IRQ_MASK 0x1fff
+#define LRADC_CTRL1_MX23_LRADC_IRQ_MASK 0x01ff
+#define LRADC_CTRL1_LRADC_IRQ_OFFSET 0
+
+#define LRADC_CTRL2 0x20
+#define LRADC_CTRL2_DIVIDE_BY_TWO_OFFSET 24
+#define LRADC_CTRL2_TEMPSENSE_PWD BIT(15)
+
+#define LRADC_STATUS 0x40
+#define LRADC_STATUS_TOUCH_DETECT_RAW BIT(0)
+
+#define LRADC_CH(n) (0x50 + (0x10 * (n)))
+#define LRADC_CH_ACCUMULATE BIT(29)
+#define LRADC_CH_NUM_SAMPLES_MASK (0x1f << 24)
+#define LRADC_CH_NUM_SAMPLES_OFFSET 24
+#define LRADC_CH_NUM_SAMPLES(x) \
+ ((x) << LRADC_CH_NUM_SAMPLES_OFFSET)
+#define LRADC_CH_VALUE_MASK 0x3ffff
+#define LRADC_CH_VALUE_OFFSET 0
+
+#define LRADC_DELAY(n) (0xd0 + (0x10 * (n)))
+#define LRADC_DELAY_TRIGGER_LRADCS_MASK (0xffUL << 24)
+#define LRADC_DELAY_TRIGGER_LRADCS_OFFSET 24
+#define LRADC_DELAY_TRIGGER(x) \
+ (((x) << LRADC_DELAY_TRIGGER_LRADCS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_LRADCS_MASK)
+#define LRADC_DELAY_KICK BIT(20)
+#define LRADC_DELAY_TRIGGER_DELAYS_MASK (0xf << 16)
+#define LRADC_DELAY_TRIGGER_DELAYS_OFFSET 16
+#define LRADC_DELAY_TRIGGER_DELAYS(x) \
+ (((x) << LRADC_DELAY_TRIGGER_DELAYS_OFFSET) & \
+ LRADC_DELAY_TRIGGER_DELAYS_MASK)
+#define LRADC_DELAY_LOOP_COUNT_MASK (0x1f << 11)
+#define LRADC_DELAY_LOOP_COUNT_OFFSET 11
+#define LRADC_DELAY_LOOP(x) \
+ (((x) << LRADC_DELAY_LOOP_COUNT_OFFSET) & \
+ LRADC_DELAY_LOOP_COUNT_MASK)
+#define LRADC_DELAY_DELAY_MASK 0x7ff
+#define LRADC_DELAY_DELAY_OFFSET 0
+#define LRADC_DELAY_DELAY(x) \
+ (((x) << LRADC_DELAY_DELAY_OFFSET) & \
+ LRADC_DELAY_DELAY_MASK)
+
+#define LRADC_CTRL4 0x140
+#define LRADC_CTRL4_LRADCSELECT_MASK(n) (0xf << ((n) * 4))
+#define LRADC_CTRL4_LRADCSELECT_OFFSET(n) ((n) * 4)
+#define LRADC_CTRL4_LRADCSELECT(n, x) \
+ (((x) << LRADC_CTRL4_LRADCSELECT_OFFSET(n)) & \
+ LRADC_CTRL4_LRADCSELECT_MASK(n))
+
+#define LRADC_RESOLUTION 12
+#define LRADC_SINGLE_SAMPLE_MASK ((1 << LRADC_RESOLUTION) - 1)
+
+#define BUFFER_VCHANS_LIMITED 0x3f
+#define BUFFER_VCHANS_ALL 0xff
+
+ /*
+ * Certain LRADC channels are shared between touchscreen
+ * and/or touch-buttons and generic LRADC block. Therefore when using
+ * either of these, these channels are not available for the regular
+ * sampling. The shared channels are as follows:
+ *
+ * CH0 -- Touch button #0
+ * CH1 -- Touch button #1
+ * CH2 -- Touch screen XPUL
+ * CH3 -- Touch screen YPLL
+ * CH4 -- Touch screen XNUL
+ * CH5 -- Touch screen YNLR
+ * CH6 -- Touch screen WIPER (5-wire only)
+ *
+ * The bit fields below represents which parts of the LRADC block are
+ * switched into special mode of operation. These channels can not
+ * be sampled as regular LRADC channels. The driver will refuse any
+ * attempt to sample these channels.
+ */
+#define CHAN_MASK_TOUCHBUTTON (BIT(1) | BIT(0))
+#define CHAN_MASK_TOUCHSCREEN_4WIRE (0xf << 2)
+#define CHAN_MASK_TOUCHSCREEN_5WIRE (0x1f << 2)
+
+enum mxs_lradc_id {
+ IMX23_LRADC,
+ IMX28_LRADC,
+};
+
+enum mxs_lradc_ts_wires {
+ MXS_LRADC_TOUCHSCREEN_NONE = 0,
+ MXS_LRADC_TOUCHSCREEN_4WIRE,
+ MXS_LRADC_TOUCHSCREEN_5WIRE,
+};
+
+/**
+ * struct mxs_lradc
+ * @soc: soc type (IMX23 or IMX28)
+ * @clk: 2 kHz clock for delay units
+ * @buffer_vchans: channels that can be used during buffered capture
+ * @touchscreen_wire: touchscreen type (4-wire or 5-wire)
+ * @use_touchbutton: button state (on or off)
+ */
+struct mxs_lradc {
+ enum mxs_lradc_id soc;
+ struct clk *clk;
+ u8 buffer_vchans;
+
+ enum mxs_lradc_ts_wires touchscreen_wire;
+ bool use_touchbutton;
+};
+
+static inline u32 mxs_lradc_irq_mask(struct mxs_lradc *lradc)
+{
+ switch (lradc->soc) {
+ case IMX23_LRADC:
+ return LRADC_CTRL1_MX23_LRADC_IRQ_MASK;
+ case IMX28_LRADC:
+ return LRADC_CTRL1_MX28_LRADC_IRQ_MASK;
+ default:
+ return 0;
+ }
+}
+
+#endif /* __MXS_LRADC_H */
diff --git a/include/linux/mfd/stm32-timers.h b/include/linux/mfd/stm32-timers.h
index d0300045f04a..4a0abbc10ef6 100644
--- a/include/linux/mfd/stm32-timers.h
+++ b/include/linux/mfd/stm32-timers.h
@@ -21,6 +21,7 @@
#define TIM_CCMR1 0x18 /* Capt/Comp 1 Mode Reg */
#define TIM_CCMR2 0x1C /* Capt/Comp 2 Mode Reg */
#define TIM_CCER 0x20 /* Capt/Comp Enable Reg */
+#define TIM_CNT 0x24 /* Counter */
#define TIM_PSC 0x28 /* Prescaler */
#define TIM_ARR 0x2c /* Auto-Reload Register */
#define TIM_CCR1 0x34 /* Capt/Comp Register 1 */
@@ -30,6 +31,7 @@
#define TIM_BDTR 0x44 /* Break and Dead-Time Reg */
#define TIM_CR1_CEN BIT(0) /* Counter Enable */
+#define TIM_CR1_DIR BIT(4) /* Counter Direction */
#define TIM_CR1_ARPE BIT(7) /* Auto-reload Preload Ena */
#define TIM_CR2_MMS (BIT(4) | BIT(5) | BIT(6)) /* Master mode selection */
#define TIM_SMCR_SMS (BIT(0) | BIT(1) | BIT(2)) /* Slave mode selection */
diff --git a/include/linux/mfd/sun4i-gpadc.h b/include/linux/mfd/sun4i-gpadc.h
index d7a29f246d64..139872c2e0fe 100644
--- a/include/linux/mfd/sun4i-gpadc.h
+++ b/include/linux/mfd/sun4i-gpadc.h
@@ -28,6 +28,7 @@
#define SUN4I_GPADC_CTRL1_TP_MODE_EN BIT(4)
#define SUN4I_GPADC_CTRL1_TP_ADC_SELECT BIT(3)
#define SUN4I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(2, 0) & (x))
+#define SUN4I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(2, 0)
/* TP_CTRL1 bits for sun6i SOCs */
#define SUN6I_GPADC_CTRL1_TOUCH_PAN_CALI_EN BIT(7)
@@ -35,6 +36,11 @@
#define SUN6I_GPADC_CTRL1_TP_MODE_EN BIT(5)
#define SUN6I_GPADC_CTRL1_TP_ADC_SELECT BIT(4)
#define SUN6I_GPADC_CTRL1_ADC_CHAN_SELECT(x) (GENMASK(3, 0) & BIT(x))
+#define SUN6I_GPADC_CTRL1_ADC_CHAN_MASK GENMASK(3, 0)
+
+/* TP_CTRL1 bits for sun8i SoCs */
+#define SUN8I_GPADC_CTRL1_CHOP_TEMP_EN BIT(8)
+#define SUN8I_GPADC_CTRL1_GPADC_CALI_EN BIT(7)
#define SUN4I_GPADC_CTRL2 0x08
diff --git a/include/linux/mfd/syscon/atmel-smc.h b/include/linux/mfd/syscon/atmel-smc.h
index be6ebe64eebe..afa266169800 100644
--- a/include/linux/mfd/syscon/atmel-smc.h
+++ b/include/linux/mfd/syscon/atmel-smc.h
@@ -17,157 +17,92 @@
#include <linux/kernel.h>
#include <linux/regmap.h>
-#define AT91SAM9_SMC_GENERIC 0x00
-#define AT91SAM9_SMC_GENERIC_BLK_SZ 0x10
-
-#define SAMA5_SMC_GENERIC 0x600
-#define SAMA5_SMC_GENERIC_BLK_SZ 0x14
-
-#define AT91SAM9_SMC_SETUP(o) ((o) + 0x00)
-#define AT91SAM9_SMC_NWESETUP(x) (x)
-#define AT91SAM9_SMC_NCS_WRSETUP(x) ((x) << 8)
-#define AT91SAM9_SMC_NRDSETUP(x) ((x) << 16)
-#define AT91SAM9_SMC_NCS_NRDSETUP(x) ((x) << 24)
-
-#define AT91SAM9_SMC_PULSE(o) ((o) + 0x04)
-#define AT91SAM9_SMC_NWEPULSE(x) (x)
-#define AT91SAM9_SMC_NCS_WRPULSE(x) ((x) << 8)
-#define AT91SAM9_SMC_NRDPULSE(x) ((x) << 16)
-#define AT91SAM9_SMC_NCS_NRDPULSE(x) ((x) << 24)
-
-#define AT91SAM9_SMC_CYCLE(o) ((o) + 0x08)
-#define AT91SAM9_SMC_NWECYCLE(x) (x)
-#define AT91SAM9_SMC_NRDCYCLE(x) ((x) << 16)
-
-#define AT91SAM9_SMC_MODE(o) ((o) + 0x0c)
-#define SAMA5_SMC_MODE(o) ((o) + 0x10)
-#define AT91_SMC_READMODE BIT(0)
-#define AT91_SMC_READMODE_NCS (0 << 0)
-#define AT91_SMC_READMODE_NRD (1 << 0)
-#define AT91_SMC_WRITEMODE BIT(1)
-#define AT91_SMC_WRITEMODE_NCS (0 << 1)
-#define AT91_SMC_WRITEMODE_NWE (1 << 1)
-#define AT91_SMC_EXNWMODE GENMASK(5, 4)
-#define AT91_SMC_EXNWMODE_DISABLE (0 << 4)
-#define AT91_SMC_EXNWMODE_FROZEN (2 << 4)
-#define AT91_SMC_EXNWMODE_READY (3 << 4)
-#define AT91_SMC_BAT BIT(8)
-#define AT91_SMC_BAT_SELECT (0 << 8)
-#define AT91_SMC_BAT_WRITE (1 << 8)
-#define AT91_SMC_DBW GENMASK(13, 12)
-#define AT91_SMC_DBW_8 (0 << 12)
-#define AT91_SMC_DBW_16 (1 << 12)
-#define AT91_SMC_DBW_32 (2 << 12)
-#define AT91_SMC_TDF GENMASK(19, 16)
-#define AT91_SMC_TDF_(x) ((((x) - 1) << 16) & AT91_SMC_TDF)
-#define AT91_SMC_TDF_MAX 16
-#define AT91_SMC_TDFMODE_OPTIMIZED BIT(20)
-#define AT91_SMC_PMEN BIT(24)
-#define AT91_SMC_PS GENMASK(29, 28)
-#define AT91_SMC_PS_4 (0 << 28)
-#define AT91_SMC_PS_8 (1 << 28)
-#define AT91_SMC_PS_16 (2 << 28)
-#define AT91_SMC_PS_32 (3 << 28)
-
-
-/*
- * This function converts a setup timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_SETUP register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Setup Register"):
- *
- * setup length = (128* SETUP[5] + SETUP[4:0])
- *
- * where setup length is the timing expressed in cycles.
- */
-static inline u32 at91sam9_smc_setup_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 32) {
- coded_cycles |= 1 << 5;
- if (cycles < 128)
- cycles = 0;
- }
-
- coded_cycles |= cycles % 32;
-
- return coded_cycles;
-}
-
-/*
- * This function converts a pulse timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_PULSE register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Pulse Register"):
- *
- * pulse length = (256* PULSE[6] + PULSE[5:0])
- *
- * where pulse length is the timing expressed in cycles.
+#define ATMEL_SMC_SETUP(cs) (((cs) * 0x10))
+#define ATMEL_HSMC_SETUP(cs) (0x600 + ((cs) * 0x14))
+#define ATMEL_SMC_PULSE(cs) (((cs) * 0x10) + 0x4)
+#define ATMEL_HSMC_PULSE(cs) (0x600 + ((cs) * 0x14) + 0x4)
+#define ATMEL_SMC_CYCLE(cs) (((cs) * 0x10) + 0x8)
+#define ATMEL_HSMC_CYCLE(cs) (0x600 + ((cs) * 0x14) + 0x8)
+#define ATMEL_SMC_NWE_SHIFT 0
+#define ATMEL_SMC_NCS_WR_SHIFT 8
+#define ATMEL_SMC_NRD_SHIFT 16
+#define ATMEL_SMC_NCS_RD_SHIFT 24
+
+#define ATMEL_SMC_MODE(cs) (((cs) * 0x10) + 0xc)
+#define ATMEL_HSMC_MODE(cs) (0x600 + ((cs) * 0x14) + 0x10)
+#define ATMEL_SMC_MODE_READMODE_MASK BIT(0)
+#define ATMEL_SMC_MODE_READMODE_NCS (0 << 0)
+#define ATMEL_SMC_MODE_READMODE_NRD (1 << 0)
+#define ATMEL_SMC_MODE_WRITEMODE_MASK BIT(1)
+#define ATMEL_SMC_MODE_WRITEMODE_NCS (0 << 1)
+#define ATMEL_SMC_MODE_WRITEMODE_NWE (1 << 1)
+#define ATMEL_SMC_MODE_EXNWMODE_MASK GENMASK(5, 4)
+#define ATMEL_SMC_MODE_EXNWMODE_DISABLE (0 << 4)
+#define ATMEL_SMC_MODE_EXNWMODE_FROZEN (2 << 4)
+#define ATMEL_SMC_MODE_EXNWMODE_READY (3 << 4)
+#define ATMEL_SMC_MODE_BAT_MASK BIT(8)
+#define ATMEL_SMC_MODE_BAT_SELECT (0 << 8)
+#define ATMEL_SMC_MODE_BAT_WRITE (1 << 8)
+#define ATMEL_SMC_MODE_DBW_MASK GENMASK(13, 12)
+#define ATMEL_SMC_MODE_DBW_8 (0 << 12)
+#define ATMEL_SMC_MODE_DBW_16 (1 << 12)
+#define ATMEL_SMC_MODE_DBW_32 (2 << 12)
+#define ATMEL_SMC_MODE_TDF_MASK GENMASK(19, 16)
+#define ATMEL_SMC_MODE_TDF(x) (((x) - 1) << 16)
+#define ATMEL_SMC_MODE_TDF_MAX 16
+#define ATMEL_SMC_MODE_TDF_MIN 1
+#define ATMEL_SMC_MODE_TDFMODE_OPTIMIZED BIT(20)
+#define ATMEL_SMC_MODE_PMEN BIT(24)
+#define ATMEL_SMC_MODE_PS_MASK GENMASK(29, 28)
+#define ATMEL_SMC_MODE_PS_4 (0 << 28)
+#define ATMEL_SMC_MODE_PS_8 (1 << 28)
+#define ATMEL_SMC_MODE_PS_16 (2 << 28)
+#define ATMEL_SMC_MODE_PS_32 (3 << 28)
+
+#define ATMEL_HSMC_TIMINGS(cs) (0x600 + ((cs) * 0x14) + 0xc)
+#define ATMEL_HSMC_TIMINGS_OCMS BIT(12)
+#define ATMEL_HSMC_TIMINGS_RBNSEL(x) ((x) << 28)
+#define ATMEL_HSMC_TIMINGS_NFSEL BIT(31)
+#define ATMEL_HSMC_TIMINGS_TCLR_SHIFT 0
+#define ATMEL_HSMC_TIMINGS_TADL_SHIFT 4
+#define ATMEL_HSMC_TIMINGS_TAR_SHIFT 8
+#define ATMEL_HSMC_TIMINGS_TRR_SHIFT 16
+#define ATMEL_HSMC_TIMINGS_TWB_SHIFT 24
+
+/**
+ * struct atmel_smc_cs_conf - SMC CS config as described in the datasheet.
+ * @setup: NCS/NWE/NRD setup timings (not applicable to at91rm9200)
+ * @pulse: NCS/NWE/NRD pulse timings (not applicable to at91rm9200)
+ * @cycle: NWE/NRD cycle timings (not applicable to at91rm9200)
+ * @timings: advanced NAND related timings (only applicable to HSMC)
+ * @mode: all kind of config parameters (see the fields definition above).
+ * The mode fields are different on at91rm9200
*/
-static inline u32 at91sam9_smc_pulse_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 64) {
- coded_cycles |= 1 << 6;
- if (cycles < 256)
- cycles = 0;
- }
-
- coded_cycles |= cycles % 64;
-
- return coded_cycles;
-}
-
-/*
- * This function converts a cycle timing expressed in nanoseconds into an
- * encoded value that can be written in the SMC_CYCLE register.
- *
- * The following formula is described in atmel datasheets (section
- * "SMC Cycle Register"):
- *
- * cycle length = (CYCLE[8:7]*256 + CYCLE[6:0])
- *
- * where cycle length is the timing expressed in cycles.
- */
-static inline u32 at91sam9_smc_cycle_ns_to_cycles(unsigned int clk_rate,
- u32 timing_ns)
-{
- u32 clk_period = DIV_ROUND_UP(NSEC_PER_SEC, clk_rate);
- u32 coded_cycles = 0;
- u32 cycles;
-
- cycles = DIV_ROUND_UP(timing_ns, clk_period);
- if (cycles / 128) {
- coded_cycles = cycles / 256;
- cycles %= 256;
- if (cycles >= 128) {
- coded_cycles++;
- cycles = 0;
- }
-
- if (coded_cycles > 0x3) {
- coded_cycles = 0x3;
- cycles = 0x7f;
- }
-
- coded_cycles <<= 7;
- }
-
- coded_cycles |= cycles % 128;
-
- return coded_cycles;
-}
+struct atmel_smc_cs_conf {
+ u32 setup;
+ u32 pulse;
+ u32 cycle;
+ u32 timings;
+ u32 mode;
+};
+
+void atmel_smc_cs_conf_init(struct atmel_smc_cs_conf *conf);
+int atmel_smc_cs_conf_set_timing(struct atmel_smc_cs_conf *conf,
+ unsigned int shift,
+ unsigned int ncycles);
+int atmel_smc_cs_conf_set_setup(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+int atmel_smc_cs_conf_set_pulse(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+int atmel_smc_cs_conf_set_cycle(struct atmel_smc_cs_conf *conf,
+ unsigned int shift, unsigned int ncycles);
+void atmel_smc_cs_conf_apply(struct regmap *regmap, int cs,
+ const struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_apply(struct regmap *regmap, int cs,
+ const struct atmel_smc_cs_conf *conf);
+void atmel_smc_cs_conf_get(struct regmap *regmap, int cs,
+ struct atmel_smc_cs_conf *conf);
+void atmel_hsmc_cs_conf_get(struct regmap *regmap, int cs,
+ struct atmel_smc_cs_conf *conf);
#endif /* _LINUX_MFD_SYSCON_ATMEL_SMC_H_ */
diff --git a/include/linux/mfd/syscon/exynos5-pmu.h b/include/linux/mfd/syscon/exynos5-pmu.h
index c28ff21ca4d2..b4942a32b81d 100644
--- a/include/linux/mfd/syscon/exynos5-pmu.h
+++ b/include/linux/mfd/syscon/exynos5-pmu.h
@@ -12,41 +12,8 @@
#ifndef _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
#define _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_
-/* Exynos5 PMU register definitions */
-#define EXYNOS5_HDMI_PHY_CONTROL (0x700)
-#define EXYNOS5_USBDRD_PHY_CONTROL (0x704)
-
-/* Exynos5250 specific register definitions */
-#define EXYNOS5_USBHOST_PHY_CONTROL (0x708)
-#define EXYNOS5_EFNAND_PHY_CONTROL (0x70c)
-#define EXYNOS5_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5_ADC_PHY_CONTROL (0x718)
-#define EXYNOS5_MTCADC_PHY_CONTROL (0x71c)
-#define EXYNOS5_DPTX_PHY_CONTROL (0x720)
-#define EXYNOS5_SATA_PHY_CONTROL (0x724)
-
-/* Exynos5420 specific register definitions */
-#define EXYNOS5420_USBDRD1_PHY_CONTROL (0x708)
-#define EXYNOS5420_USBHOST_PHY_CONTROL (0x70c)
-#define EXYNOS5420_MIPI_PHY0_CONTROL (0x714)
-#define EXYNOS5420_MIPI_PHY1_CONTROL (0x718)
-#define EXYNOS5420_MIPI_PHY2_CONTROL (0x71c)
-#define EXYNOS5420_ADC_PHY_CONTROL (0x720)
-#define EXYNOS5420_MTCADC_PHY_CONTROL (0x724)
-#define EXYNOS5420_DPTX_PHY_CONTROL (0x728)
-
-/* Exynos5433 specific register definitions */
-#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x728)
-#define EXYNOS5433_MIPI_PHY0_CONTROL (0x710)
-#define EXYNOS5433_MIPI_PHY1_CONTROL (0x714)
-#define EXYNOS5433_MIPI_PHY2_CONTROL (0x718)
-
#define EXYNOS5_PHY_ENABLE BIT(0)
#define EXYNOS5_MIPI_PHY_S_RESETN BIT(1)
#define EXYNOS5_MIPI_PHY_M_RESETN BIT(2)
-#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
-#define EXYNOS5433_PAD_INITIATE_WAKEUP_FROM_LOWPWR BIT(28)
-
#endif /* _LINUX_MFD_SYSCON_PMU_EXYNOS5_H_ */
diff --git a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
index 4585d6105d68..abbd52466573 100644
--- a/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
+++ b/include/linux/mfd/syscon/imx7-iomuxc-gpr.h
@@ -44,4 +44,8 @@
#define IMX7D_GPR5_CSI_MUX_CONTROL_MIPI (0x1 << 4)
+#define IMX7D_GPR12_PCIE_PHY_REFCLK_SEL BIT(5)
+
+#define IMX7D_GPR22_PCIE_PHY_PLL_LOCKED BIT(31)
+
#endif /* __LINUX_IMX7_IOMUXC_GPR_H */
diff --git a/include/linux/mfd/ti-lmu-register.h b/include/linux/mfd/ti-lmu-register.h
new file mode 100644
index 000000000000..2125c7c02818
--- /dev/null
+++ b/include/linux/mfd/ti-lmu-register.h
@@ -0,0 +1,280 @@
+/*
+ * TI LMU (Lighting Management Unit) Device Register Map
+ *
+ * Copyright 2017 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_TI_LMU_REGISTER_H__
+#define __MFD_TI_LMU_REGISTER_H__
+
+#include <linux/bitops.h>
+
+/* LM3532 */
+#define LM3532_REG_OUTPUT_CFG 0x10
+#define LM3532_ILED1_CFG_MASK 0x03
+#define LM3532_ILED2_CFG_MASK 0x0C
+#define LM3532_ILED3_CFG_MASK 0x30
+#define LM3532_ILED1_CFG_SHIFT 0
+#define LM3532_ILED2_CFG_SHIFT 2
+#define LM3532_ILED3_CFG_SHIFT 4
+
+#define LM3532_REG_RAMPUP 0x12
+#define LM3532_REG_RAMPDN LM3532_REG_RAMPUP
+#define LM3532_RAMPUP_MASK 0x07
+#define LM3532_RAMPUP_SHIFT 0
+#define LM3532_RAMPDN_MASK 0x38
+#define LM3532_RAMPDN_SHIFT 3
+
+#define LM3532_REG_ENABLE 0x1D
+
+#define LM3532_REG_PWM_A_CFG 0x13
+#define LM3532_PWM_A_MASK 0x05 /* zone 0 */
+#define LM3532_PWM_ZONE_0 BIT(2)
+
+#define LM3532_REG_PWM_B_CFG 0x14
+#define LM3532_PWM_B_MASK 0x09 /* zone 1 */
+#define LM3532_PWM_ZONE_1 BIT(3)
+
+#define LM3532_REG_PWM_C_CFG 0x15
+#define LM3532_PWM_C_MASK 0x11 /* zone 2 */
+#define LM3532_PWM_ZONE_2 BIT(4)
+
+#define LM3532_REG_ZONE_CFG_A 0x16
+#define LM3532_REG_ZONE_CFG_B 0x18
+#define LM3532_REG_ZONE_CFG_C 0x1A
+#define LM3532_ZONE_MASK (BIT(2) | BIT(3) | BIT(4))
+#define LM3532_ZONE_0 0
+#define LM3532_ZONE_1 BIT(2)
+#define LM3532_ZONE_2 BIT(3)
+
+#define LM3532_REG_BRT_A 0x70 /* zone 0 */
+#define LM3532_REG_BRT_B 0x76 /* zone 1 */
+#define LM3532_REG_BRT_C 0x7C /* zone 2 */
+
+#define LM3532_MAX_REG 0x7E
+
+/* LM3631 */
+#define LM3631_REG_DEVCTRL 0x00
+#define LM3631_LCD_EN_MASK BIT(1)
+#define LM3631_BL_EN_MASK BIT(0)
+
+#define LM3631_REG_BRT_LSB 0x01
+#define LM3631_REG_BRT_MSB 0x02
+
+#define LM3631_REG_BL_CFG 0x06
+#define LM3631_BL_CHANNEL_MASK BIT(3)
+#define LM3631_BL_DUAL_CHANNEL 0
+#define LM3631_BL_SINGLE_CHANNEL BIT(3)
+#define LM3631_MAP_MASK BIT(5)
+#define LM3631_EXPONENTIAL_MAP 0
+
+#define LM3631_REG_BRT_MODE 0x08
+#define LM3631_MODE_MASK (BIT(1) | BIT(2) | BIT(3))
+#define LM3631_DEFAULT_MODE (BIT(1) | BIT(3))
+
+#define LM3631_REG_SLOPE 0x09
+#define LM3631_SLOPE_MASK 0xF0
+#define LM3631_SLOPE_SHIFT 4
+
+#define LM3631_REG_LDO_CTRL1 0x0A
+#define LM3631_EN_OREF_MASK BIT(0)
+#define LM3631_EN_VNEG_MASK BIT(1)
+#define LM3631_EN_VPOS_MASK BIT(2)
+
+#define LM3631_REG_LDO_CTRL2 0x0B
+#define LM3631_EN_CONT_MASK BIT(0)
+
+#define LM3631_REG_VOUT_CONT 0x0C
+#define LM3631_VOUT_CONT_MASK (BIT(6) | BIT(7))
+
+#define LM3631_REG_VOUT_BOOST 0x0C
+#define LM3631_REG_VOUT_POS 0x0D
+#define LM3631_REG_VOUT_NEG 0x0E
+#define LM3631_REG_VOUT_OREF 0x0F
+#define LM3631_VOUT_MASK 0x3F
+
+#define LM3631_REG_ENTIME_VCONT 0x0B
+#define LM3631_ENTIME_CONT_MASK 0x70
+
+#define LM3631_REG_ENTIME_VOREF 0x0F
+#define LM3631_REG_ENTIME_VPOS 0x10
+#define LM3631_REG_ENTIME_VNEG 0x11
+#define LM3631_ENTIME_MASK 0xF0
+#define LM3631_ENTIME_SHIFT 4
+
+#define LM3631_MAX_REG 0x16
+
+/* LM3632 */
+#define LM3632_REG_CONFIG1 0x02
+#define LM3632_OVP_MASK (BIT(5) | BIT(6) | BIT(7))
+#define LM3632_OVP_25V BIT(6)
+
+#define LM3632_REG_CONFIG2 0x03
+#define LM3632_SWFREQ_MASK BIT(7)
+#define LM3632_SWFREQ_1MHZ BIT(7)
+
+#define LM3632_REG_BRT_LSB 0x04
+#define LM3632_REG_BRT_MSB 0x05
+
+#define LM3632_REG_IO_CTRL 0x09
+#define LM3632_PWM_MASK BIT(6)
+#define LM3632_I2C_MODE 0
+#define LM3632_PWM_MODE BIT(6)
+
+#define LM3632_REG_ENABLE 0x0A
+#define LM3632_BL_EN_MASK BIT(0)
+#define LM3632_BL_CHANNEL_MASK (BIT(3) | BIT(4))
+#define LM3632_BL_SINGLE_CHANNEL BIT(4)
+#define LM3632_BL_DUAL_CHANNEL BIT(3)
+
+#define LM3632_REG_BIAS_CONFIG 0x0C
+#define LM3632_EXT_EN_MASK BIT(0)
+#define LM3632_EN_VNEG_MASK BIT(1)
+#define LM3632_EN_VPOS_MASK BIT(2)
+
+#define LM3632_REG_VOUT_BOOST 0x0D
+#define LM3632_REG_VOUT_POS 0x0E
+#define LM3632_REG_VOUT_NEG 0x0F
+#define LM3632_VOUT_MASK 0x3F
+
+#define LM3632_MAX_REG 0x10
+
+/* LM3633 */
+#define LM3633_REG_HVLED_OUTPUT_CFG 0x10
+#define LM3633_HVLED1_CFG_MASK BIT(0)
+#define LM3633_HVLED2_CFG_MASK BIT(1)
+#define LM3633_HVLED3_CFG_MASK BIT(2)
+#define LM3633_HVLED1_CFG_SHIFT 0
+#define LM3633_HVLED2_CFG_SHIFT 1
+#define LM3633_HVLED3_CFG_SHIFT 2
+
+#define LM3633_REG_BANK_SEL 0x11
+
+#define LM3633_REG_BL0_RAMP 0x12
+#define LM3633_REG_BL1_RAMP 0x13
+#define LM3633_BL_RAMPUP_MASK 0xF0
+#define LM3633_BL_RAMPUP_SHIFT 4
+#define LM3633_BL_RAMPDN_MASK 0x0F
+#define LM3633_BL_RAMPDN_SHIFT 0
+
+#define LM3633_REG_BL_RAMP_CONF 0x1B
+#define LM3633_BL_RAMP_MASK 0x0F
+#define LM3633_BL_RAMP_EACH 0x05
+
+#define LM3633_REG_PTN0_RAMP 0x1C
+#define LM3633_REG_PTN1_RAMP 0x1D
+#define LM3633_PTN_RAMPUP_MASK 0x70
+#define LM3633_PTN_RAMPUP_SHIFT 4
+#define LM3633_PTN_RAMPDN_MASK 0x07
+#define LM3633_PTN_RAMPDN_SHIFT 0
+
+#define LM3633_REG_LED_MAPPING_MODE 0x1F
+#define LM3633_LED_EXPONENTIAL BIT(1)
+
+#define LM3633_REG_IMAX_HVLED_A 0x20
+#define LM3633_REG_IMAX_HVLED_B 0x21
+#define LM3633_REG_IMAX_LVLED_BASE 0x22
+
+#define LM3633_REG_BL_FEEDBACK_ENABLE 0x28
+
+#define LM3633_REG_ENABLE 0x2B
+#define LM3633_LED_BANK_OFFSET 2
+
+#define LM3633_REG_PATTERN 0x2C
+
+#define LM3633_REG_BOOST_CFG 0x2D
+#define LM3633_OVP_MASK (BIT(1) | BIT(2))
+#define LM3633_OVP_40V 0x6
+
+#define LM3633_REG_PWM_CFG 0x2F
+#define LM3633_PWM_A_MASK BIT(0)
+#define LM3633_PWM_B_MASK BIT(1)
+
+#define LM3633_REG_BRT_HVLED_A_LSB 0x40
+#define LM3633_REG_BRT_HVLED_A_MSB 0x41
+#define LM3633_REG_BRT_HVLED_B_LSB 0x42
+#define LM3633_REG_BRT_HVLED_B_MSB 0x43
+
+#define LM3633_REG_BRT_LVLED_BASE 0x44
+
+#define LM3633_REG_PTN_DELAY 0x50
+
+#define LM3633_REG_PTN_LOWTIME 0x51
+
+#define LM3633_REG_PTN_HIGHTIME 0x52
+
+#define LM3633_REG_PTN_LOWBRT 0x53
+
+#define LM3633_REG_PTN_HIGHBRT LM3633_REG_BRT_LVLED_BASE
+
+#define LM3633_REG_BL_OPEN_FAULT_STATUS 0xB0
+
+#define LM3633_REG_BL_SHORT_FAULT_STATUS 0xB2
+
+#define LM3633_REG_MONITOR_ENABLE 0xB4
+
+#define LM3633_MAX_REG 0xB4
+
+/* LM3695 */
+#define LM3695_REG_GP 0x10
+#define LM3695_BL_CHANNEL_MASK BIT(3)
+#define LM3695_BL_DUAL_CHANNEL 0
+#define LM3695_BL_SINGLE_CHANNEL BIT(3)
+#define LM3695_BRT_RW_MASK BIT(2)
+#define LM3695_BL_EN_MASK BIT(0)
+
+#define LM3695_REG_BRT_LSB 0x13
+#define LM3695_REG_BRT_MSB 0x14
+
+#define LM3695_MAX_REG 0x14
+
+/* LM3697 */
+#define LM3697_REG_HVLED_OUTPUT_CFG 0x10
+#define LM3697_HVLED1_CFG_MASK BIT(0)
+#define LM3697_HVLED2_CFG_MASK BIT(1)
+#define LM3697_HVLED3_CFG_MASK BIT(2)
+#define LM3697_HVLED1_CFG_SHIFT 0
+#define LM3697_HVLED2_CFG_SHIFT 1
+#define LM3697_HVLED3_CFG_SHIFT 2
+
+#define LM3697_REG_BL0_RAMP 0x11
+#define LM3697_REG_BL1_RAMP 0x12
+#define LM3697_RAMPUP_MASK 0xF0
+#define LM3697_RAMPUP_SHIFT 4
+#define LM3697_RAMPDN_MASK 0x0F
+#define LM3697_RAMPDN_SHIFT 0
+
+#define LM3697_REG_RAMP_CONF 0x14
+#define LM3697_RAMP_MASK 0x0F
+#define LM3697_RAMP_EACH 0x05
+
+#define LM3697_REG_PWM_CFG 0x1C
+#define LM3697_PWM_A_MASK BIT(0)
+#define LM3697_PWM_B_MASK BIT(1)
+
+#define LM3697_REG_IMAX_A 0x17
+#define LM3697_REG_IMAX_B 0x18
+
+#define LM3697_REG_FEEDBACK_ENABLE 0x19
+
+#define LM3697_REG_BRT_A_LSB 0x20
+#define LM3697_REG_BRT_A_MSB 0x21
+#define LM3697_REG_BRT_B_LSB 0x22
+#define LM3697_REG_BRT_B_MSB 0x23
+
+#define LM3697_REG_ENABLE 0x24
+
+#define LM3697_REG_OPEN_FAULT_STATUS 0xB0
+
+#define LM3697_REG_SHORT_FAULT_STATUS 0xB2
+
+#define LM3697_REG_MONITOR_ENABLE 0xB4
+
+#define LM3697_MAX_REG 0xB4
+#endif
diff --git a/include/linux/mfd/ti-lmu.h b/include/linux/mfd/ti-lmu.h
new file mode 100644
index 000000000000..09d5f30384e5
--- /dev/null
+++ b/include/linux/mfd/ti-lmu.h
@@ -0,0 +1,87 @@
+/*
+ * TI LMU (Lighting Management Unit) Devices
+ *
+ * Copyright 2017 Texas Instruments
+ *
+ * Author: Milo Kim <milo.kim@ti.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __MFD_TI_LMU_H__
+#define __MFD_TI_LMU_H__
+
+#include <linux/gpio.h>
+#include <linux/notifier.h>
+#include <linux/regmap.h>
+
+/* Notifier event */
+#define LMU_EVENT_MONITOR_DONE 0x01
+
+enum ti_lmu_id {
+ LM3532,
+ LM3631,
+ LM3632,
+ LM3633,
+ LM3695,
+ LM3697,
+ LMU_MAX_ID,
+};
+
+enum ti_lmu_max_current {
+ LMU_IMAX_5mA,
+ LMU_IMAX_6mA,
+ LMU_IMAX_7mA = 0x03,
+ LMU_IMAX_8mA,
+ LMU_IMAX_9mA,
+ LMU_IMAX_10mA = 0x07,
+ LMU_IMAX_11mA,
+ LMU_IMAX_12mA,
+ LMU_IMAX_13mA,
+ LMU_IMAX_14mA,
+ LMU_IMAX_15mA = 0x0D,
+ LMU_IMAX_16mA,
+ LMU_IMAX_17mA,
+ LMU_IMAX_18mA,
+ LMU_IMAX_19mA,
+ LMU_IMAX_20mA = 0x13,
+ LMU_IMAX_21mA,
+ LMU_IMAX_22mA,
+ LMU_IMAX_23mA = 0x17,
+ LMU_IMAX_24mA,
+ LMU_IMAX_25mA,
+ LMU_IMAX_26mA,
+ LMU_IMAX_27mA = 0x1C,
+ LMU_IMAX_28mA,
+ LMU_IMAX_29mA,
+ LMU_IMAX_30mA,
+};
+
+enum lm363x_regulator_id {
+ LM3631_BOOST, /* Boost output */
+ LM3631_LDO_CONT, /* Display panel controller */
+ LM3631_LDO_OREF, /* Gamma reference */
+ LM3631_LDO_POS, /* Positive display bias output */
+ LM3631_LDO_NEG, /* Negative display bias output */
+ LM3632_BOOST, /* Boost output */
+ LM3632_LDO_POS, /* Positive display bias output */
+ LM3632_LDO_NEG, /* Negative display bias output */
+};
+
+/**
+ * struct ti_lmu
+ *
+ * @dev: Parent device pointer
+ * @regmap: Used for i2c communcation on accessing registers
+ * @en_gpio: GPIO for HWEN pin [Optional]
+ * @notifier: Notifier for reporting hwmon event
+ */
+struct ti_lmu {
+ struct device *dev;
+ struct regmap *regmap;
+ int en_gpio;
+ struct blocking_notifier_head notifier;
+};
+#endif
diff --git a/include/linux/mfd/wm831x/core.h b/include/linux/mfd/wm831x/core.h
index 76c22648436f..b49fa67612f1 100644
--- a/include/linux/mfd/wm831x/core.h
+++ b/include/linux/mfd/wm831x/core.h
@@ -21,6 +21,8 @@
#include <linux/list.h>
#include <linux/regmap.h>
#include <linux/mfd/wm831x/auxadc.h>
+#include <linux/mfd/wm831x/pdata.h>
+#include <linux/of.h>
/*
* Register values.
@@ -367,6 +369,9 @@ struct wm831x {
struct regmap *regmap;
+ struct wm831x_pdata pdata;
+ enum wm831x_parent type;
+
int irq; /* Our chip IRQ */
struct mutex irq_lock;
struct irq_domain *irq_domain;
@@ -412,7 +417,7 @@ int wm831x_set_bits(struct wm831x *wm831x, unsigned short reg,
int wm831x_bulk_read(struct wm831x *wm831x, unsigned short reg,
int count, u16 *buf);
-int wm831x_device_init(struct wm831x *wm831x, unsigned long id, int irq);
+int wm831x_device_init(struct wm831x *wm831x, int irq);
void wm831x_device_exit(struct wm831x *wm831x);
int wm831x_device_suspend(struct wm831x *wm831x);
void wm831x_device_shutdown(struct wm831x *wm831x);
@@ -427,4 +432,6 @@ static inline int wm831x_irq(struct wm831x *wm831x, int irq)
extern struct regmap_config wm831x_regmap_config;
+extern const struct of_device_id wm831x_of_match[];
+
#endif
diff --git a/include/linux/mg_disk.h b/include/linux/mg_disk.h
deleted file mode 100644
index e11f4d9f1c2e..000000000000
--- a/include/linux/mg_disk.h
+++ /dev/null
@@ -1,45 +0,0 @@
-/*
- * include/linux/mg_disk.c
- *
- * Private data for mflash platform driver
- *
- * (c) 2008 mGine Co.,LTD
- * (c) 2008 unsik Kim <donari75@gmail.com>
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef __MG_DISK_H__
-#define __MG_DISK_H__
-
-/* name for platform device */
-#define MG_DEV_NAME "mg_disk"
-
-/* names of GPIO resource */
-#define MG_RST_PIN "mg_rst"
-/* except MG_BOOT_DEV, reset-out pin should be assigned */
-#define MG_RSTOUT_PIN "mg_rstout"
-
-/* device attribution */
-/* use mflash as boot device */
-#define MG_BOOT_DEV (1 << 0)
-/* use mflash as storage device */
-#define MG_STORAGE_DEV (1 << 1)
-/* same as MG_STORAGE_DEV, but bootloader already done reset sequence */
-#define MG_STORAGE_DEV_SKIP_RST (1 << 2)
-
-/* private driver data */
-struct mg_drv_data {
- /* disk resource */
- u32 use_polling;
-
- /* device attribution */
- u32 dev_attr;
-
- /* internally used */
- void *host;
-};
-
-#endif
diff --git a/include/linux/migrate.h b/include/linux/migrate.h
index fa76b516fa47..48e24844b3c5 100644
--- a/include/linux/migrate.h
+++ b/include/linux/migrate.h
@@ -33,8 +33,9 @@ extern char *migrate_reason_names[MR_TYPES];
#ifdef CONFIG_MIGRATION
extern void putback_movable_pages(struct list_head *l);
-extern int migrate_page(struct address_space *,
- struct page *, struct page *, enum migrate_mode);
+extern int migrate_page(struct address_space *mapping,
+ struct page *newpage, struct page *page,
+ enum migrate_mode mode);
extern int migrate_pages(struct list_head *l, new_page_t new, free_page_t free,
unsigned long private, enum migrate_mode mode, int reason);
extern int isolate_movable_page(struct page *page, isolate_mode_t mode);
diff --git a/include/linux/mlx4/device.h b/include/linux/mlx4/device.h
index 1beb1ec2fbdf..d5bed0875d30 100644
--- a/include/linux/mlx4/device.h
+++ b/include/linux/mlx4/device.h
@@ -108,7 +108,7 @@ enum {
MLX4_MFUNC_EQE_MASK = (MLX4_MFUNC_MAX_EQES - 1)
};
-/* Driver supports 3 diffrent device methods to manage traffic steering:
+/* Driver supports 3 different device methods to manage traffic steering:
* -device managed - High level API for ib and eth flow steering. FW is
* managing flow steering tables.
* - B0 steering mode - Common low level API for ib and (if supported) eth.
@@ -1011,8 +1011,7 @@ struct mlx4_mad_ifc {
#define mlx4_foreach_ib_transport_port(port, dev) \
for ((port) = 1; (port) <= (dev)->caps.num_ports; (port)++) \
if (((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_IB) || \
- ((dev)->caps.flags & MLX4_DEV_CAP_FLAG_IBOE) || \
- ((dev)->caps.flags2 & MLX4_DEV_CAP_FLAG2_ROCE_V1_V2))
+ ((dev)->caps.port_mask[port] == MLX4_PORT_TYPE_ETH))
#define MLX4_INVALID_SLAVE_ID 0xFF
#define MLX4_SINK_COUNTER_INDEX(dev) (dev->caps.max_counters - 1)
diff --git a/include/linux/mlx5/driver.h b/include/linux/mlx5/driver.h
index 2fcff6b4503f..bcdf739ee41a 100644
--- a/include/linux/mlx5/driver.h
+++ b/include/linux/mlx5/driver.h
@@ -540,6 +540,7 @@ struct mlx5_fc_stats {
struct workqueue_struct *wq;
struct delayed_work work;
unsigned long next_query;
+ unsigned long sampling_interval; /* jiffies */
};
struct mlx5_eswitch;
@@ -728,6 +729,7 @@ struct mlx5e_resources {
u32 pdn;
struct mlx5_td td;
struct mlx5_core_mkey mkey;
+ struct mlx5_sq_bfreg bfreg;
};
struct mlx5_core_dev {
@@ -890,12 +892,7 @@ static inline u16 cmdif_rev(struct mlx5_core_dev *dev)
static inline void *mlx5_vzalloc(unsigned long size)
{
- void *rtn;
-
- rtn = kzalloc(size, GFP_KERNEL | __GFP_NOWARN);
- if (!rtn)
- rtn = vzalloc(size);
- return rtn;
+ return kvzalloc(size, GFP_KERNEL);
}
static inline u32 mlx5_base_mkey(const u32 key)
@@ -1100,6 +1097,25 @@ struct net_device *mlx5_lag_get_roce_netdev(struct mlx5_core_dev *dev);
struct mlx5_uars_page *mlx5_get_uars_page(struct mlx5_core_dev *mdev);
void mlx5_put_uars_page(struct mlx5_core_dev *mdev, struct mlx5_uars_page *up);
+#ifndef CONFIG_MLX5_CORE_IPOIB
+static inline
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *))
+{
+ return ERR_PTR(-EOPNOTSUPP);
+}
+
+static inline void mlx5_rdma_netdev_free(struct net_device *netdev) {}
+#else
+struct net_device *mlx5_rdma_netdev_alloc(struct mlx5_core_dev *mdev,
+ struct ib_device *ibdev,
+ const char *name,
+ void (*setup)(struct net_device *));
+void mlx5_rdma_netdev_free(struct net_device *netdev);
+#endif /* CONFIG_MLX5_CORE_IPOIB */
+
struct mlx5_profile {
u64 mask;
u8 log_max_qp;
diff --git a/include/linux/mlx5/fs.h b/include/linux/mlx5/fs.h
index 949b24b6c479..1b166d2e19c5 100644
--- a/include/linux/mlx5/fs.h
+++ b/include/linux/mlx5/fs.h
@@ -104,12 +104,18 @@ mlx5_create_auto_grouped_flow_table(struct mlx5_flow_namespace *ns,
u32 level,
u32 flags);
+struct mlx5_flow_table_attr {
+ int prio;
+ int max_fte;
+ u32 level;
+ u32 flags;
+ u32 underlay_qpn;
+};
+
struct mlx5_flow_table *
mlx5_create_flow_table(struct mlx5_flow_namespace *ns,
- int prio,
- int num_flow_table_entries,
- u32 level,
- u32 flags);
+ struct mlx5_flow_table_attr *ft_attr);
+
struct mlx5_flow_table *
mlx5_create_vport_flow_table(struct mlx5_flow_namespace *ns,
int prio,
@@ -134,8 +140,13 @@ struct mlx5_flow_act {
u32 action;
u32 flow_tag;
u32 encap_id;
+ u32 modify_id;
};
+#define MLX5_DECLARE_FLOW_ACT(name) \
+ struct mlx5_flow_act name = {MLX5_FLOW_CONTEXT_ACTION_FWD_DEST,\
+ MLX5_FS_DEFAULT_FLOW_TAG, 0, 0}
+
/* Single destination per rule.
* Group ID is implied by the match criteria.
*/
@@ -156,5 +167,4 @@ struct mlx5_fc *mlx5_fc_create(struct mlx5_core_dev *dev, bool aging);
void mlx5_fc_destroy(struct mlx5_core_dev *dev, struct mlx5_fc *counter);
void mlx5_fc_query_cached(struct mlx5_fc *counter,
u64 *bytes, u64 *packets, u64 *lastuse);
-
#endif
diff --git a/include/linux/mlx5/mlx5_ifc.h b/include/linux/mlx5/mlx5_ifc.h
index 838242697541..32de0724b400 100644
--- a/include/linux/mlx5/mlx5_ifc.h
+++ b/include/linux/mlx5/mlx5_ifc.h
@@ -227,6 +227,8 @@ enum {
MLX5_CMD_OP_MODIFY_FLOW_TABLE = 0x93c,
MLX5_CMD_OP_ALLOC_ENCAP_HEADER = 0x93d,
MLX5_CMD_OP_DEALLOC_ENCAP_HEADER = 0x93e,
+ MLX5_CMD_OP_ALLOC_MODIFY_HEADER_CONTEXT = 0x940,
+ MLX5_CMD_OP_DEALLOC_MODIFY_HEADER_CONTEXT = 0x941,
MLX5_CMD_OP_MAX
};
@@ -234,7 +236,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 outer_dmac[0x1];
u8 outer_smac[0x1];
u8 outer_ether_type[0x1];
- u8 reserved_at_3[0x1];
+ u8 outer_ip_version[0x1];
u8 outer_first_prio[0x1];
u8 outer_first_cfi[0x1];
u8 outer_first_vid[0x1];
@@ -263,7 +265,7 @@ struct mlx5_ifc_flow_table_fields_supported_bits {
u8 inner_dmac[0x1];
u8 inner_smac[0x1];
u8 inner_ether_type[0x1];
- u8 reserved_at_23[0x1];
+ u8 inner_ip_version[0x1];
u8 inner_first_prio[0x1];
u8 inner_first_cfi[0x1];
u8 inner_first_vid[0x1];
@@ -302,7 +304,8 @@ struct mlx5_ifc_flow_table_prop_layout_bits {
u8 reserved_at_20[0x2];
u8 log_max_ft_size[0x6];
- u8 reserved_at_28[0x10];
+ u8 log_max_modify_header_context[0x8];
+ u8 max_modify_header_actions[0x8];
u8 max_ft_level[0x8];
u8 reserved_at_40[0x20];
@@ -368,7 +371,7 @@ struct mlx5_ifc_fte_match_set_lyr_2_4_bits {
u8 cvlan_tag[0x1];
u8 svlan_tag[0x1];
u8 frag[0x1];
- u8 reserved_at_93[0x4];
+ u8 ip_version[0x4];
u8 tcp_flags[0x9];
u8 tcp_sport[0x10];
@@ -869,7 +872,8 @@ struct mlx5_ifc_cmd_hca_cap_bits {
u8 compact_address_vector[0x1];
u8 striding_rq[0x1];
- u8 reserved_at_202[0x2];
+ u8 reserved_at_202[0x1];
+ u8 ipoib_enhanced_offloads[0x1];
u8 ipoib_basic_offloads[0x1];
u8 reserved_at_205[0xa];
u8 drain_sigerr[0x1];
@@ -1452,7 +1456,9 @@ struct mlx5_ifc_ib_port_cntrs_grp_data_layout_bits {
u8 vl_15_dropped[0x10];
- u8 reserved_at_a0[0xa0];
+ u8 reserved_at_a0[0x80];
+
+ u8 port_xmit_wait[0x20];
};
struct mlx5_ifc_eth_per_traffic_grp_data_layout_bits {
@@ -2190,6 +2196,7 @@ enum {
MLX5_FLOW_CONTEXT_ACTION_COUNT = 0x8,
MLX5_FLOW_CONTEXT_ACTION_ENCAP = 0x10,
MLX5_FLOW_CONTEXT_ACTION_DECAP = 0x20,
+ MLX5_FLOW_CONTEXT_ACTION_MOD_HDR = 0x40,
};
struct mlx5_ifc_flow_context_bits {
@@ -2211,7 +2218,9 @@ struct mlx5_ifc_flow_context_bits {
u8 encap_id[0x20];
- u8 reserved_at_e0[0x120];
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_100[0x100];
struct mlx5_ifc_fte_match_param_bits match_value;
@@ -2287,7 +2296,9 @@ struct mlx5_ifc_tisc_bits {
u8 reserved_at_120[0x8];
u8 transport_domain[0x18];
- u8 reserved_at_140[0x3c0];
+ u8 reserved_at_140[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_160[0x3a0];
};
enum {
@@ -4534,6 +4545,109 @@ struct mlx5_ifc_dealloc_encap_header_in_bits {
u8 reserved_60[0x20];
};
+struct mlx5_ifc_set_action_in_bits {
+ u8 action_type[0x4];
+ u8 field[0xc];
+ u8 reserved_at_10[0x3];
+ u8 offset[0x5];
+ u8 reserved_at_18[0x3];
+ u8 length[0x5];
+
+ u8 data[0x20];
+};
+
+struct mlx5_ifc_add_action_in_bits {
+ u8 action_type[0x4];
+ u8 field[0xc];
+ u8 reserved_at_10[0x10];
+
+ u8 data[0x20];
+};
+
+union mlx5_ifc_set_action_in_add_action_in_auto_bits {
+ struct mlx5_ifc_set_action_in_bits set_action_in;
+ struct mlx5_ifc_add_action_in_bits add_action_in;
+ u8 reserved_at_0[0x40];
+};
+
+enum {
+ MLX5_ACTION_TYPE_SET = 0x1,
+ MLX5_ACTION_TYPE_ADD = 0x2,
+};
+
+enum {
+ MLX5_ACTION_IN_FIELD_OUT_SMAC_47_16 = 0x1,
+ MLX5_ACTION_IN_FIELD_OUT_SMAC_15_0 = 0x2,
+ MLX5_ACTION_IN_FIELD_OUT_ETHERTYPE = 0x3,
+ MLX5_ACTION_IN_FIELD_OUT_DMAC_47_16 = 0x4,
+ MLX5_ACTION_IN_FIELD_OUT_DMAC_15_0 = 0x5,
+ MLX5_ACTION_IN_FIELD_OUT_IP_DSCP = 0x6,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_FLAGS = 0x7,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_SPORT = 0x8,
+ MLX5_ACTION_IN_FIELD_OUT_TCP_DPORT = 0x9,
+ MLX5_ACTION_IN_FIELD_OUT_IP_TTL = 0xa,
+ MLX5_ACTION_IN_FIELD_OUT_UDP_SPORT = 0xb,
+ MLX5_ACTION_IN_FIELD_OUT_UDP_DPORT = 0xc,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_127_96 = 0xd,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_95_64 = 0xe,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_63_32 = 0xf,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV6_31_0 = 0x10,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_127_96 = 0x11,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_95_64 = 0x12,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_63_32 = 0x13,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV6_31_0 = 0x14,
+ MLX5_ACTION_IN_FIELD_OUT_SIPV4 = 0x15,
+ MLX5_ACTION_IN_FIELD_OUT_DIPV4 = 0x16,
+};
+
+struct mlx5_ifc_alloc_modify_header_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
+struct mlx5_ifc_alloc_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 reserved_at_40[0x20];
+
+ u8 table_type[0x8];
+ u8 reserved_at_68[0x10];
+ u8 num_of_actions[0x8];
+
+ union mlx5_ifc_set_action_in_add_action_in_auto_bits actions[0];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_out_bits {
+ u8 status[0x8];
+ u8 reserved_at_8[0x18];
+
+ u8 syndrome[0x20];
+
+ u8 reserved_at_40[0x40];
+};
+
+struct mlx5_ifc_dealloc_modify_header_context_in_bits {
+ u8 opcode[0x10];
+ u8 reserved_at_10[0x10];
+
+ u8 reserved_at_20[0x10];
+ u8 op_mod[0x10];
+
+ u8 modify_header_id[0x20];
+
+ u8 reserved_at_60[0x20];
+};
+
struct mlx5_ifc_query_dct_out_bits {
u8 status[0x8];
u8 reserved_at_8[0x18];
@@ -4623,17 +4737,17 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
u8 reserved_at_40[0x40];
- u8 cur_flows[0x20];
+ u8 rp_cur_flows[0x20];
u8 sum_flows[0x20];
- u8 cnp_ignored_high[0x20];
+ u8 rp_cnp_ignored_high[0x20];
- u8 cnp_ignored_low[0x20];
+ u8 rp_cnp_ignored_low[0x20];
- u8 cnp_handled_high[0x20];
+ u8 rp_cnp_handled_high[0x20];
- u8 cnp_handled_low[0x20];
+ u8 rp_cnp_handled_low[0x20];
u8 reserved_at_140[0x100];
@@ -4643,13 +4757,13 @@ struct mlx5_ifc_query_cong_statistics_out_bits {
u8 accumulators_period[0x20];
- u8 ecn_marked_roce_packets_high[0x20];
+ u8 np_ecn_marked_roce_packets_high[0x20];
- u8 ecn_marked_roce_packets_low[0x20];
+ u8 np_ecn_marked_roce_packets_low[0x20];
- u8 cnps_sent_high[0x20];
+ u8 np_cnp_sent_high[0x20];
- u8 cnps_sent_low[0x20];
+ u8 np_cnp_sent_low[0x20];
u8 reserved_at_320[0x560];
};
@@ -5013,6 +5127,7 @@ struct mlx5_ifc_modify_rq_out_bits {
enum {
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_VSD = 1ULL << 1,
+ MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_SCATTER_FCS = 1ULL << 2,
MLX5_MODIFY_RQ_IN_MODIFY_BITMASK_RQ_COUNTER_SET_ID = 1ULL << 3,
};
@@ -8108,7 +8223,9 @@ struct mlx5_ifc_set_flow_table_root_in_bits {
u8 reserved_at_a0[0x8];
u8 table_id[0x18];
- u8 reserved_at_c0[0x140];
+ u8 reserved_at_c0[0x8];
+ u8 underlay_qpn[0x18];
+ u8 reserved_at_e0[0x120];
};
enum {
diff --git a/include/linux/mlx5/qp.h b/include/linux/mlx5/qp.h
index 3096370fe831..bef80d0a0e30 100644
--- a/include/linux/mlx5/qp.h
+++ b/include/linux/mlx5/qp.h
@@ -295,6 +295,16 @@ struct mlx5_av {
u8 rgid[16];
};
+struct mlx5_ib_ah {
+ struct ib_ah ibah;
+ struct mlx5_av av;
+};
+
+static inline struct mlx5_ib_ah *to_mah(struct ib_ah *ibah)
+{
+ return container_of(ibah, struct mlx5_ib_ah, ibah);
+}
+
struct mlx5_wqe_datagram_seg {
struct mlx5_av av;
};
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 00a8fa7e366a..7cb17c6b97de 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -432,6 +432,10 @@ static inline int pud_devmap(pud_t pud)
{
return 0;
}
+static inline int pgd_devmap(pgd_t pgd)
+{
+ return 0;
+}
#endif
/*
@@ -514,6 +518,28 @@ static inline int is_vmalloc_or_module_addr(const void *x)
}
#endif
+extern void *kvmalloc_node(size_t size, gfp_t flags, int node);
+static inline void *kvmalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc_node(size, flags, NUMA_NO_NODE);
+}
+static inline void *kvzalloc_node(size_t size, gfp_t flags, int node)
+{
+ return kvmalloc_node(size, flags | __GFP_ZERO, node);
+}
+static inline void *kvzalloc(size_t size, gfp_t flags)
+{
+ return kvmalloc(size, flags | __GFP_ZERO);
+}
+
+static inline void *kvmalloc_array(size_t n, size_t size, gfp_t flags)
+{
+ if (size != 0 && n > SIZE_MAX / size)
+ return NULL;
+
+ return kvmalloc(n * size, flags);
+}
+
extern void kvfree(const void *addr);
static inline atomic_t *compound_mapcount_ptr(struct page *page)
@@ -758,19 +784,11 @@ static inline enum zone_type page_zonenum(const struct page *page)
}
#ifdef CONFIG_ZONE_DEVICE
-void get_zone_device_page(struct page *page);
-void put_zone_device_page(struct page *page);
static inline bool is_zone_device_page(const struct page *page)
{
return page_zonenum(page) == ZONE_DEVICE;
}
#else
-static inline void get_zone_device_page(struct page *page)
-{
-}
-static inline void put_zone_device_page(struct page *page)
-{
-}
static inline bool is_zone_device_page(const struct page *page)
{
return false;
@@ -786,9 +804,6 @@ static inline void get_page(struct page *page)
*/
VM_BUG_ON_PAGE(page_ref_count(page) <= 0, page);
page_ref_inc(page);
-
- if (unlikely(is_zone_device_page(page)))
- get_zone_device_page(page);
}
static inline void put_page(struct page *page)
@@ -797,9 +812,6 @@ static inline void put_page(struct page *page)
if (put_page_testzero(page))
__put_page(page);
-
- if (unlikely(is_zone_device_page(page)))
- put_zone_device_page(page);
}
#if defined(CONFIG_SPARSEMEM) && !defined(CONFIG_SPARSEMEM_VMEMMAP)
@@ -2497,7 +2509,6 @@ extern long copy_huge_page_from_user(struct page *dst_page,
#endif /* CONFIG_TRANSPARENT_HUGEPAGE || CONFIG_HUGETLBFS */
extern struct page_ext_operations debug_guardpage_ops;
-extern struct page_ext_operations page_poisoning_ops;
#ifdef CONFIG_DEBUG_PAGEALLOC
extern unsigned int _debug_guardpage_minorder;
diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h
index f60f45fe226f..45cdb27791a3 100644
--- a/include/linux/mm_types.h
+++ b/include/linux/mm_types.h
@@ -367,6 +367,11 @@ struct mm_struct {
#endif
unsigned long mmap_base; /* base of mmap area */
unsigned long mmap_legacy_base; /* base of mmap area in bottom-up allocations */
+#ifdef CONFIG_HAVE_ARCH_COMPAT_MMAP_BASES
+ /* Base adresses for compatible mmap() */
+ unsigned long mmap_compat_base;
+ unsigned long mmap_compat_legacy_base;
+#endif
unsigned long task_size; /* size of task vm space */
unsigned long highest_vm_end; /* highest vma end address */
pgd_t * pgd;
diff --git a/include/linux/mmc/card.h b/include/linux/mmc/card.h
index 77e61e0a216a..aad015e0152b 100644
--- a/include/linux/mmc/card.h
+++ b/include/linux/mmc/card.h
@@ -89,6 +89,7 @@ struct mmc_ext_csd {
unsigned int boot_ro_lock; /* ro lock support */
bool boot_ro_lockable;
bool ffu_capable; /* Firmware upgrade support */
+ bool cmdq_en; /* Command Queue enabled */
bool cmdq_support; /* Command Queue supported */
unsigned int cmdq_depth; /* Command Queue depth */
#define MMC_FIRMWARE_LEN 8
@@ -208,6 +209,7 @@ struct sdio_cis {
struct mmc_host;
struct sdio_func;
struct sdio_func_tuple;
+struct mmc_queue_req;
#define SDIO_MAX_FUNCS 7
@@ -267,6 +269,8 @@ struct mmc_card {
#define MMC_QUIRK_TRIM_BROKEN (1<<12) /* Skip trim */
#define MMC_QUIRK_BROKEN_HPI (1<<13) /* Disable broken HPI support */
+ bool reenable_cmdq; /* Re-enable Command Queue */
+
unsigned int erase_size; /* erase size in sectors */
unsigned int erase_shift; /* if erase unit is power 2 */
unsigned int pref_erase; /* in sectors */
@@ -300,6 +304,10 @@ struct mmc_card {
struct dentry *debugfs_root;
struct mmc_part part[MMC_NUM_PHY_PARTITION]; /* physical partitions */
unsigned int nr_parts;
+
+ struct mmc_queue_req *mqrq; /* Shared queue structure */
+ unsigned int bouncesz; /* Bounce buffer size */
+ int qdepth; /* Shared queue depth */
};
static inline bool mmc_large_sector(struct mmc_card *card)
@@ -307,6 +315,8 @@ static inline bool mmc_large_sector(struct mmc_card *card)
return card->ext_csd.data_sector_size == 4096;
}
+bool mmc_card_is_blockaddr(struct mmc_card *card);
+
#define mmc_card_mmc(c) ((c)->type == MMC_TYPE_MMC)
#define mmc_card_sd(c) ((c)->type == MMC_TYPE_SD)
#define mmc_card_sdio(c) ((c)->type == MMC_TYPE_SDIO)
diff --git a/include/linux/mmc/host.h b/include/linux/mmc/host.h
index 83f1c4a9f03b..21385ac0c9b1 100644
--- a/include/linux/mmc/host.h
+++ b/include/linux/mmc/host.h
@@ -17,6 +17,7 @@
#include <linux/mmc/core.h>
#include <linux/mmc/card.h>
#include <linux/mmc/pm.h>
+#include <linux/dma-direction.h>
struct mmc_ios {
unsigned int clock; /* clock rate */
@@ -499,6 +500,11 @@ static inline bool mmc_can_retune(struct mmc_host *host)
return host->can_retune == 1;
}
+static inline enum dma_data_direction mmc_get_dma_dir(struct mmc_data *data)
+{
+ return data->flags & MMC_DATA_WRITE ? DMA_TO_DEVICE : DMA_FROM_DEVICE;
+}
+
int mmc_send_tuning(struct mmc_host *host, u32 opcode, int *cmd_error);
int mmc_abort_tuning(struct mmc_host *host, u32 opcode);
diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 8e02b3750fe0..ebaccd4e7d8c 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -35,7 +35,7 @@
*/
#define PAGE_ALLOC_COSTLY_ORDER 3
-enum {
+enum migratetype {
MIGRATE_UNMOVABLE,
MIGRATE_MOVABLE,
MIGRATE_RECLAIMABLE,
@@ -74,6 +74,11 @@ extern char * const migratetype_names[MIGRATE_TYPES];
# define is_migrate_cma_page(_page) false
#endif
+static inline bool is_migrate_movable(int mt)
+{
+ return is_migrate_cma(mt) || mt == MIGRATE_MOVABLE;
+}
+
#define for_each_migratetype_order(order, type) \
for (order = 0; order < MAX_ORDER; order++) \
for (type = 0; type < MIGRATE_TYPES; type++)
@@ -149,7 +154,6 @@ enum node_stat_item {
NR_UNEVICTABLE, /* " " " " " */
NR_ISOLATED_ANON, /* Temporary isolated pages from anon lru */
NR_ISOLATED_FILE, /* Temporary isolated pages from file lru */
- NR_PAGES_SCANNED, /* pages scanned since last reclaim */
WORKINGSET_REFAULT,
WORKINGSET_ACTIVATE,
WORKINGSET_NODERECLAIM,
@@ -226,6 +230,8 @@ struct lruvec {
struct zone_reclaim_stat reclaim_stat;
/* Evictions & activations on the inactive file list */
atomic_long_t inactive_age;
+ /* Refaults at the time of last reclaim cycle */
+ unsigned long refaults;
#ifdef CONFIG_MEMCG
struct pglist_data *pgdat;
#endif
@@ -630,6 +636,8 @@ typedef struct pglist_data {
int kswapd_order;
enum zone_type kswapd_classzone_idx;
+ int kswapd_failures; /* Number of 'reclaimed == 0' runs */
+
#ifdef CONFIG_COMPACTION
int kcompactd_max_order;
enum zone_type kcompactd_classzone_idx;
diff --git a/include/linux/mod_devicetable.h b/include/linux/mod_devicetable.h
index 8850fcaf50db..566fda587fcf 100644
--- a/include/linux/mod_devicetable.h
+++ b/include/linux/mod_devicetable.h
@@ -428,6 +428,16 @@ struct i2c_device_id {
kernel_ulong_t driver_data; /* Data private to the driver */
};
+/* pci_epf */
+
+#define PCI_EPF_NAME_SIZE 20
+#define PCI_EPF_MODULE_PREFIX "pci_epf:"
+
+struct pci_epf_device_id {
+ char name[PCI_EPF_NAME_SIZE];
+ kernel_ulong_t driver_data;
+};
+
/* spi */
#define SPI_NAME_SIZE 32
diff --git a/include/linux/module.h b/include/linux/module.h
index 0297c5cd7cdf..21f56393602f 100644
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -493,6 +493,7 @@ static inline int module_is_live(struct module *mod)
struct module *__module_text_address(unsigned long addr);
struct module *__module_address(unsigned long addr);
bool is_module_address(unsigned long addr);
+bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr);
bool is_module_percpu_address(unsigned long addr);
bool is_module_text_address(unsigned long addr);
@@ -582,7 +583,7 @@ extern bool try_module_get(struct module *module);
extern void module_put(struct module *module);
#else /*!CONFIG_MODULE_UNLOAD*/
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
return !module || module_is_live(module);
}
@@ -660,6 +661,11 @@ static inline bool is_module_percpu_address(unsigned long addr)
return false;
}
+static inline bool __is_module_percpu_address(unsigned long addr, unsigned long *can_addr)
+{
+ return false;
+}
+
static inline bool is_module_text_address(unsigned long addr)
{
return false;
@@ -674,9 +680,9 @@ static inline void __module_get(struct module *module)
{
}
-static inline int try_module_get(struct module *module)
+static inline bool try_module_get(struct module *module)
{
- return 1;
+ return true;
}
static inline void module_put(struct module *module)
diff --git a/include/linux/mpls.h b/include/linux/mpls.h
index 9999145bc190..384fb22b6c43 100644
--- a/include/linux/mpls.h
+++ b/include/linux/mpls.h
@@ -3,4 +3,9 @@
#include <uapi/linux/mpls.h>
+#define MPLS_TTL_MASK (MPLS_LS_TTL_MASK >> MPLS_LS_TTL_SHIFT)
+#define MPLS_BOS_MASK (MPLS_LS_S_MASK >> MPLS_LS_S_SHIFT)
+#define MPLS_TC_MASK (MPLS_LS_TC_MASK >> MPLS_LS_TC_SHIFT)
+#define MPLS_LABEL_MASK (MPLS_LS_LABEL_MASK >> MPLS_LS_LABEL_SHIFT)
+
#endif /* _LINUX_MPLS_H */
diff --git a/include/linux/mtd/mtd.h b/include/linux/mtd/mtd.h
index eebdc63cf6af..79b176eca04a 100644
--- a/include/linux/mtd/mtd.h
+++ b/include/linux/mtd/mtd.h
@@ -334,11 +334,6 @@ struct mtd_info {
int (*_get_device) (struct mtd_info *mtd);
void (*_put_device) (struct mtd_info *mtd);
- /* Backing device capabilities for this device
- * - provides mmap capabilities
- */
- struct backing_dev_info *backing_dev_info;
-
struct notifier_block reboot_notifier; /* default mode before reboot */
/* ECC status information */
diff --git a/include/linux/net.h b/include/linux/net.h
index 0620f5e18c96..abcfa46a2bd9 100644
--- a/include/linux/net.h
+++ b/include/linux/net.h
@@ -298,6 +298,9 @@ int kernel_sendpage(struct socket *sock, struct page *page, int offset,
int kernel_sock_ioctl(struct socket *sock, int cmd, unsigned long arg);
int kernel_sock_shutdown(struct socket *sock, enum sock_shutdown_cmd how);
+/* Routine returns the IP overhead imposed by a (caller-protected) socket. */
+u32 kernel_sock_ip_overhead(struct sock *sk);
+
#define MODULE_ALIAS_NETPROTO(proto) \
MODULE_ALIAS("net-pf-" __stringify(proto))
diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h
index 9a0419594e84..1d4737cffc71 100644
--- a/include/linux/netdev_features.h
+++ b/include/linux/netdev_features.h
@@ -54,8 +54,9 @@ enum {
*/
NETIF_F_GSO_TUNNEL_REMCSUM_BIT, /* ... TUNNEL with TSO & REMCSUM */
NETIF_F_GSO_SCTP_BIT, /* ... SCTP fragmentation */
+ NETIF_F_GSO_ESP_BIT, /* ... ESP with TSO */
/**/NETIF_F_GSO_LAST = /* last bit, see GSO_MASK */
- NETIF_F_GSO_SCTP_BIT,
+ NETIF_F_GSO_ESP_BIT,
NETIF_F_FCOE_CRC_BIT, /* FCoE CRC32 */
NETIF_F_SCTP_CRC_BIT, /* SCTP checksum offload */
@@ -73,6 +74,8 @@ enum {
NETIF_F_HW_L2FW_DOFFLOAD_BIT, /* Allow L2 Forwarding in Hardware */
NETIF_F_HW_TC_BIT, /* Offload TC infrastructure */
+ NETIF_F_HW_ESP_BIT, /* Hardware ESP transformation offload */
+ NETIF_F_HW_ESP_TX_CSUM_BIT, /* ESP with TX checksum offload */
/*
* Add your fresh new feature above and remember to update
@@ -129,11 +132,14 @@ enum {
#define NETIF_F_GSO_PARTIAL __NETIF_F(GSO_PARTIAL)
#define NETIF_F_GSO_TUNNEL_REMCSUM __NETIF_F(GSO_TUNNEL_REMCSUM)
#define NETIF_F_GSO_SCTP __NETIF_F(GSO_SCTP)
+#define NETIF_F_GSO_ESP __NETIF_F(GSO_ESP)
#define NETIF_F_HW_VLAN_STAG_FILTER __NETIF_F(HW_VLAN_STAG_FILTER)
#define NETIF_F_HW_VLAN_STAG_RX __NETIF_F(HW_VLAN_STAG_RX)
#define NETIF_F_HW_VLAN_STAG_TX __NETIF_F(HW_VLAN_STAG_TX)
#define NETIF_F_HW_L2FW_DOFFLOAD __NETIF_F(HW_L2FW_DOFFLOAD)
#define NETIF_F_HW_TC __NETIF_F(HW_TC)
+#define NETIF_F_HW_ESP __NETIF_F(HW_ESP)
+#define NETIF_F_HW_ESP_TX_CSUM __NETIF_F(HW_ESP_TX_CSUM)
#define for_each_netdev_feature(mask_addr, bit) \
for_each_set_bit(bit, (unsigned long *)mask_addr, NETDEV_FEATURE_COUNT)
diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h
index 97456b2539e4..9c23bd2efb56 100644
--- a/include/linux/netdevice.h
+++ b/include/linux/netdevice.h
@@ -41,7 +41,6 @@
#include <linux/ethtool.h>
#include <net/net_namespace.h>
-#include <net/dsa.h>
#ifdef CONFIG_DCB
#include <net/dcbnl.h>
#endif
@@ -57,6 +56,8 @@
struct netpoll_info;
struct device;
struct phy_device;
+struct dsa_switch_tree;
+
/* 802.11 specific */
struct wireless_dev;
/* 802.15.4 specific */
@@ -236,8 +237,7 @@ struct netdev_hw_addr_list {
netdev_hw_addr_list_for_each(ha, &(dev)->mc)
struct hh_cache {
- u16 hh_len;
- u16 __pad;
+ unsigned int hh_len;
seqlock_t hh_lock;
/* cached hardware header; allow for machine alignment needs. */
@@ -786,11 +786,11 @@ struct tc_cls_u32_offload;
struct tc_to_netdev {
unsigned int type;
union {
- u8 tc;
struct tc_cls_u32_offload *cls_u32;
struct tc_cls_flower_offload *cls_flower;
struct tc_cls_matchall_offload *cls_mall;
struct tc_cls_bpf_offload *cls_bpf;
+ struct tc_mqprio_qopt *mqprio;
};
bool egress_dev;
};
@@ -813,16 +813,31 @@ enum xdp_netdev_command {
XDP_QUERY_PROG,
};
+struct netlink_ext_ack;
+
struct netdev_xdp {
enum xdp_netdev_command command;
union {
/* XDP_SETUP_PROG */
- struct bpf_prog *prog;
+ struct {
+ struct bpf_prog *prog;
+ struct netlink_ext_ack *extack;
+ };
/* XDP_QUERY_PROG */
bool prog_attached;
};
};
+#ifdef CONFIG_XFRM_OFFLOAD
+struct xfrmdev_ops {
+ int (*xdo_dev_state_add) (struct xfrm_state *x);
+ void (*xdo_dev_state_delete) (struct xfrm_state *x);
+ void (*xdo_dev_state_free) (struct xfrm_state *x);
+ bool (*xdo_dev_offload_ok) (struct sk_buff *skb,
+ struct xfrm_state *x);
+};
+#endif
+
/*
* This structure defines the management hooks for network devices.
* The following hooks can be defined; unless noted otherwise, they are
@@ -1696,6 +1711,10 @@ struct net_device {
const struct ndisc_ops *ndisc_ops;
#endif
+#ifdef CONFIG_XFRM
+ const struct xfrmdev_ops *xfrmdev_ops;
+#endif
+
const struct header_ops *header_ops;
unsigned int flags;
@@ -1715,7 +1734,7 @@ struct net_device {
unsigned int max_mtu;
unsigned short type;
unsigned short hard_header_len;
- unsigned short min_header_len;
+ unsigned char min_header_len;
unsigned short needed_headroom;
unsigned short needed_tailroom;
@@ -1776,6 +1795,7 @@ struct net_device {
unsigned int real_num_rx_queues;
#endif
+ struct bpf_prog __rcu *xdp_prog;
unsigned long gro_flush_timeout;
rx_handler_func_t __rcu *rx_handler;
void __rcu *rx_handler_data;
@@ -1894,6 +1914,13 @@ struct net_device {
};
#define to_net_dev(d) container_of(d, struct net_device, dev)
+static inline bool netif_elide_gro(const struct net_device *dev)
+{
+ if (!(dev->features & NETIF_F_GRO) || dev->xdp_prog)
+ return true;
+ return false;
+}
+
#define NETDEV_ALIGN 32
static inline
@@ -2004,15 +2031,6 @@ void dev_net_set(struct net_device *dev, struct net *net)
write_pnet(&dev->nd_net, net);
}
-static inline bool netdev_uses_dsa(struct net_device *dev)
-{
-#if IS_ENABLED(CONFIG_NET_DSA)
- if (dev->dsa_ptr != NULL)
- return dsa_uses_tagged_protocol(dev->dsa_ptr);
-#endif
- return false;
-}
-
/**
* netdev_priv - access network device private data
* @dev: network device
@@ -3278,7 +3296,8 @@ int dev_get_phys_port_id(struct net_device *dev,
int dev_get_phys_port_name(struct net_device *dev,
char *name, size_t len);
int dev_change_proto_down(struct net_device *dev, bool proto_down);
-int dev_change_xdp_fd(struct net_device *dev, int fd, u32 flags);
+int dev_change_xdp_fd(struct net_device *dev, struct netlink_ext_ack *extack,
+ int fd, u32 flags);
struct sk_buff *validate_xmit_skb_list(struct sk_buff *skb, struct net_device *dev);
struct sk_buff *dev_hard_start_xmit(struct sk_buff *skb, struct net_device *dev,
struct netdev_queue *txq, int *ret);
@@ -3305,6 +3324,7 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
extern int netdev_budget;
+extern unsigned int netdev_budget_usecs;
/* Called by rtnetlink.c:rtnl_unlock() */
void netdev_run_todo(void);
@@ -3394,10 +3414,10 @@ static inline void netif_dormant_off(struct net_device *dev)
}
/**
- * netif_dormant - test if carrier present
+ * netif_dormant - test if device is dormant
* @dev: network device
*
- * Check if carrier is present on device
+ * Check if device is dormant.
*/
static inline bool netif_dormant(const struct net_device *dev)
{
@@ -4078,6 +4098,7 @@ static inline bool net_gso_ok(netdev_features_t features, int gso_type)
BUILD_BUG_ON(SKB_GSO_PARTIAL != (NETIF_F_GSO_PARTIAL >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_TUNNEL_REMCSUM != (NETIF_F_GSO_TUNNEL_REMCSUM >> NETIF_F_GSO_SHIFT));
BUILD_BUG_ON(SKB_GSO_SCTP != (NETIF_F_GSO_SCTP >> NETIF_F_GSO_SHIFT));
+ BUILD_BUG_ON(SKB_GSO_ESP != (NETIF_F_GSO_ESP >> NETIF_F_GSO_SHIFT));
return (features & feature) == feature;
}
@@ -4180,6 +4201,11 @@ static inline bool netif_is_ovs_master(const struct net_device *dev)
return dev->priv_flags & IFF_OPENVSWITCH;
}
+static inline bool netif_is_ovs_port(const struct net_device *dev)
+{
+ return dev->priv_flags & IFF_OVS_DATAPATH;
+}
+
static inline bool netif_is_team_master(const struct net_device *dev)
{
return dev->priv_flags & IFF_TEAM;
diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h
index 1b49209dd5c7..996711d8a7b4 100644
--- a/include/linux/netfilter/nfnetlink.h
+++ b/include/linux/netfilter/nfnetlink.h
@@ -41,6 +41,11 @@ int nfnetlink_set_err(struct net *net, u32 portid, u32 group, int error);
int nfnetlink_unicast(struct sk_buff *skb, struct net *net, u32 portid,
int flags);
+static inline u16 nfnl_msg_type(u8 subsys, u8 msg_type)
+{
+ return subsys << 8 | msg_type;
+}
+
void nfnl_lock(__u8 subsys_id);
void nfnl_unlock(__u8 subsys_id);
#ifdef CONFIG_PROVE_LOCKING
diff --git a/include/linux/netfilter_bridge/ebtables.h b/include/linux/netfilter_bridge/ebtables.h
index 984b2112c77b..a30efb437e6d 100644
--- a/include/linux/netfilter_bridge/ebtables.h
+++ b/include/linux/netfilter_bridge/ebtables.h
@@ -109,8 +109,10 @@ struct ebt_table {
#define EBT_ALIGN(s) (((s) + (__alignof__(struct _xt_align)-1)) & \
~(__alignof__(struct _xt_align)-1))
extern struct ebt_table *ebt_register_table(struct net *net,
- const struct ebt_table *table);
-extern void ebt_unregister_table(struct net *net, struct ebt_table *table);
+ const struct ebt_table *table,
+ const struct nf_hook_ops *);
+extern void ebt_unregister_table(struct net *net, struct ebt_table *table,
+ const struct nf_hook_ops *);
extern unsigned int ebt_do_table(struct sk_buff *skb,
const struct nf_hook_state *state,
struct ebt_table *table);
diff --git a/include/linux/netlink.h b/include/linux/netlink.h
index da14ab61f363..5fff5ba5964e 100644
--- a/include/linux/netlink.h
+++ b/include/linux/netlink.h
@@ -62,11 +62,47 @@ netlink_kernel_create(struct net *net, int unit, struct netlink_kernel_cfg *cfg)
return __netlink_kernel_create(net, unit, THIS_MODULE, cfg);
}
+/* this can be increased when necessary - don't expose to userland */
+#define NETLINK_MAX_COOKIE_LEN 20
+
+/**
+ * struct netlink_ext_ack - netlink extended ACK report struct
+ * @_msg: message string to report - don't access directly, use
+ * %NL_SET_ERR_MSG
+ * @bad_attr: attribute with error
+ * @cookie: cookie data to return to userspace (for success)
+ * @cookie_len: actual cookie data length
+ */
+struct netlink_ext_ack {
+ const char *_msg;
+ const struct nlattr *bad_attr;
+ u8 cookie[NETLINK_MAX_COOKIE_LEN];
+ u8 cookie_len;
+};
+
+/* Always use this macro, this allows later putting the
+ * message into a separate section or such for things
+ * like translation or listing all possible messages.
+ * Currently string formatting is not supported (due
+ * to the lack of an output buffer.)
+ */
+#define NL_SET_ERR_MSG(extack, msg) do { \
+ static const char __msg[] = (msg); \
+ struct netlink_ext_ack *__extack = (extack); \
+ \
+ if (__extack) \
+ __extack->_msg = __msg; \
+} while (0)
+
+#define NL_SET_ERR_MSG_MOD(extack, msg) \
+ NL_SET_ERR_MSG((extack), KBUILD_MODNAME ": " msg)
+
extern void netlink_kernel_release(struct sock *sk);
extern int __netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern int netlink_change_ngroups(struct sock *sk, unsigned int groups);
extern void __netlink_clear_multicast_users(struct sock *sk, unsigned int group);
-extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err);
+extern void netlink_ack(struct sk_buff *in_skb, struct nlmsghdr *nlh, int err,
+ const struct netlink_ext_ack *extack);
extern int netlink_has_listeners(struct sock *sk, unsigned int group);
extern int netlink_unicast(struct sock *ssk, struct sk_buff *skb, __u32 portid, int nonblock);
diff --git a/include/linux/nfs_fs.h b/include/linux/nfs_fs.h
index 287f34161086..bb0eb2c9acca 100644
--- a/include/linux/nfs_fs.h
+++ b/include/linux/nfs_fs.h
@@ -76,6 +76,7 @@ struct nfs_open_context {
#define NFS_CONTEXT_ERROR_WRITE (0)
#define NFS_CONTEXT_RESEND_WRITES (1)
#define NFS_CONTEXT_BAD (2)
+#define NFS_CONTEXT_UNLOCK (3)
int error;
struct list_head list;
@@ -499,25 +500,13 @@ extern int nfs_updatepage(struct file *, struct page *, unsigned int, unsigned
*/
extern int nfs_sync_inode(struct inode *inode);
extern int nfs_wb_all(struct inode *inode);
-extern int nfs_wb_single_page(struct inode *inode, struct page *page, bool launder);
+extern int nfs_wb_page(struct inode *inode, struct page *page);
extern int nfs_wb_page_cancel(struct inode *inode, struct page* page);
extern int nfs_commit_inode(struct inode *, int);
-extern struct nfs_commit_data *nfs_commitdata_alloc(void);
+extern struct nfs_commit_data *nfs_commitdata_alloc(bool never_fail);
extern void nfs_commit_free(struct nfs_commit_data *data);
static inline int
-nfs_wb_launder_page(struct inode *inode, struct page *page)
-{
- return nfs_wb_single_page(inode, page, true);
-}
-
-static inline int
-nfs_wb_page(struct inode *inode, struct page *page)
-{
- return nfs_wb_single_page(inode, page, false);
-}
-
-static inline int
nfs_have_writebacks(struct inode *inode)
{
return NFS_I(inode)->nrequests != 0;
diff --git a/include/linux/nfs_fs_sb.h b/include/linux/nfs_fs_sb.h
index b34097c67848..e418a1096662 100644
--- a/include/linux/nfs_fs_sb.h
+++ b/include/linux/nfs_fs_sb.h
@@ -133,7 +133,6 @@ struct nfs_server {
struct rpc_clnt * client_acl; /* ACL RPC client handle */
struct nlm_host *nlm_host; /* NLM client handle */
struct nfs_iostats __percpu *io_stats; /* I/O statistics */
- struct backing_dev_info backing_dev_info;
atomic_long_t writeback; /* number of writeback pages */
int flags; /* various flags */
unsigned int caps; /* server capabilities */
@@ -222,6 +221,7 @@ struct nfs_server {
u32 mountd_version;
unsigned short mountd_port;
unsigned short mountd_protocol;
+ struct rpc_wait_queue uoc_rpcwaitq;
};
/* Server capabilities */
diff --git a/include/linux/nfs_page.h b/include/linux/nfs_page.h
index 957049f72290..247cc3d3498f 100644
--- a/include/linux/nfs_page.h
+++ b/include/linux/nfs_page.h
@@ -64,7 +64,6 @@ struct nfs_pageio_ops {
};
struct nfs_rw_ops {
- const fmode_t rw_mode;
struct nfs_pgio_header *(*rw_alloc_header)(void);
void (*rw_free_header)(struct nfs_pgio_header *);
int (*rw_done)(struct rpc_task *, struct nfs_pgio_header *,
@@ -124,7 +123,8 @@ extern void nfs_pageio_init(struct nfs_pageio_descriptor *desc,
const struct nfs_pgio_completion_ops *compl_ops,
const struct nfs_rw_ops *rw_ops,
size_t bsize,
- int how);
+ int how,
+ gfp_t gfp_flags);
extern int nfs_pageio_add_request(struct nfs_pageio_descriptor *,
struct nfs_page *);
extern int nfs_pageio_resend(struct nfs_pageio_descriptor *,
@@ -141,6 +141,7 @@ extern int nfs_page_group_lock(struct nfs_page *, bool);
extern void nfs_page_group_lock_wait(struct nfs_page *);
extern void nfs_page_group_unlock(struct nfs_page *);
extern bool nfs_page_group_sync_on_bit(struct nfs_page *, unsigned int);
+extern bool nfs_async_iocounter_wait(struct rpc_task *, struct nfs_lock_context *);
/*
* Lock the page of an asynchronous request
diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h
index 348f7c158084..b28c83475ee8 100644
--- a/include/linux/nfs_xdr.h
+++ b/include/linux/nfs_xdr.h
@@ -1383,6 +1383,7 @@ struct nfs42_copy_res {
struct nfs42_write_res write_res;
bool consecutive;
bool synchronous;
+ struct nfs_commitres commit_res;
};
struct nfs42_seek_args {
@@ -1427,6 +1428,7 @@ struct nfs_pgio_header {
struct list_head pages;
struct nfs_page *req;
struct nfs_writeverf verf; /* Used for writes */
+ fmode_t rw_mode;
struct pnfs_layout_segment *lseg;
loff_t io_start;
const struct rpc_call_ops *mds_ops;
@@ -1550,6 +1552,7 @@ struct nfs_rpc_ops {
const struct inode_operations *dir_inode_ops;
const struct inode_operations *file_inode_ops;
const struct file_operations *file_ops;
+ const struct nlmclnt_operations *nlmclnt_ops;
int (*getroot) (struct nfs_server *, struct nfs_fh *,
struct nfs_fsinfo *);
diff --git a/include/linux/nvme-fc-driver.h b/include/linux/nvme-fc-driver.h
index f21471f7ee40..0db37158a61d 100644
--- a/include/linux/nvme-fc-driver.h
+++ b/include/linux/nvme-fc-driver.h
@@ -137,9 +137,9 @@ enum nvmefc_fcp_datadir {
* transferred. Should equal payload_length on success.
* @rcv_rsplen: length, in bytes, of the FCP RSP IU received.
* @status: Completion status of the FCP operation. must be 0 upon success,
- * NVME_SC_FC_xxx value upon failure. Note: this is NOT a
- * reflection of the NVME CQE completion status. Only the status
- * of the FCP operation at the NVME-FC level.
+ * negative errno value upon failure (ex: -EIO). Note: this is
+ * NOT a reflection of the NVME CQE completion status. Only the
+ * status of the FCP operation at the NVME-FC level.
*/
struct nvmefc_fcp_req {
void *cmdaddr;
@@ -533,9 +533,6 @@ enum {
* rsp as well
*/
NVMET_FCOP_RSP = 4, /* send rsp frame */
- NVMET_FCOP_ABORT = 5, /* abort exchange via ABTS */
- NVMET_FCOP_BA_ACC = 6, /* send BA_ACC */
- NVMET_FCOP_BA_RJT = 7, /* send BA_RJT */
};
/**
@@ -572,8 +569,6 @@ enum {
* upon compeletion of the operation. The nvmet-fc layer will also set a
* private pointer for its own use in the done routine.
*
- * Note: the LLDD must never fail a NVMET_FCOP_ABORT request !!
- *
* Values set by the NVMET-FC layer prior to calling the LLDD fcp_op
* entrypoint.
* @op: Indicates the FCP IU operation to perform (see NVMET_FCOP_xxx)
@@ -655,6 +650,22 @@ enum {
* on. The transport should pick a cpu to schedule the work
* on.
*/
+ NVMET_FCTGTFEAT_CMD_IN_ISR = (1 << 2),
+ /* Bit 2: When 0, the LLDD is calling the cmd rcv handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the cmd rcv handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
+ NVMET_FCTGTFEAT_OPDONE_IN_ISR = (1 << 3),
+ /* Bit 3: When 0, the LLDD is calling the op done handler
+ * in a non-isr context, allowing the transport to finish
+ * op completion in the calling context. When 1, the LLDD
+ * is calling the op done handler in an ISR context,
+ * requiring the transport to transition to a workqueue
+ * for op completion.
+ */
};
@@ -725,12 +736,12 @@ struct nvmet_fc_target_port {
* be freed/released.
* Entrypoint is Mandatory.
*
- * @fcp_op: Called to perform a data transfer, transmit a response, or
- * abort an FCP opertion. The nvmefc_tgt_fcp_req structure is the same
- * LLDD-supplied exchange structure specified in the
- * nvmet_fc_rcv_fcp_req() call made when the FCP CMD IU was received.
- * The op field in the structure shall indicate the operation for
- * the LLDD to perform relative to the io.
+ * @fcp_op: Called to perform a data transfer or transmit a response.
+ * The nvmefc_tgt_fcp_req structure is the same LLDD-supplied
+ * exchange structure specified in the nvmet_fc_rcv_fcp_req() call
+ * made when the FCP CMD IU was received. The op field in the
+ * structure shall indicate the operation for the LLDD to perform
+ * relative to the io.
* NVMET_FCOP_READDATA operation: the LLDD is to send the
* payload data (described by sglist) to the host in 1 or
* more FC sequences (preferrably 1). Note: the fc-nvme layer
@@ -752,29 +763,31 @@ struct nvmet_fc_target_port {
* successfully, the LLDD is to update the nvmefc_tgt_fcp_req
* transferred_length field and may subsequently transmit the
* FCP_RSP iu payload (described by rspbuf, rspdma, rsplen).
- * The LLDD is to await FCP_CONF reception to confirm the RSP
- * reception by the host. The LLDD may retramsit the FCP_RSP iu
- * if necessary per FC-NVME. Upon reception of FCP_CONF, or upon
- * FCP_CONF failure, the LLDD is to set the nvmefc_tgt_fcp_req
- * fcp_error field and consider the operation complete..
+ * If FCP_CONF is supported, the LLDD is to await FCP_CONF
+ * reception to confirm the RSP reception by the host. The LLDD
+ * may retramsit the FCP_RSP iu if necessary per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
+ * LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
+ * consider the operation complete.
* NVMET_FCOP_RSP: the LLDD is to transmit the FCP_RSP iu payload
- * (described by rspbuf, rspdma, rsplen). The LLDD is to await
- * FCP_CONF reception to confirm the RSP reception by the host.
- * The LLDD may retramsit the FCP_RSP iu if necessary per FC-NVME.
- * Upon reception of FCP_CONF, or upon FCP_CONF failure, the
+ * (described by rspbuf, rspdma, rsplen). If FCP_CONF is
+ * supported, the LLDD is to await FCP_CONF reception to confirm
+ * the RSP reception by the host. The LLDD may retramsit the
+ * FCP_RSP iu if FCP_CONF is not received per FC-NVME. Upon
+ * transmission of the FCP_RSP iu if FCP_CONF is not supported,
+ * or upon success/failure of FCP_CONF if it is supported, the
* LLDD is to set the nvmefc_tgt_fcp_req fcp_error field and
- * consider the operation complete..
- * NVMET_FCOP_ABORT: the LLDD is to terminate the exchange
- * corresponding to the fcp operation. The LLDD shall send
- * ABTS and follow FC exchange abort-multi rules, including
- * ABTS retries and possible logout.
+ * consider the operation complete.
* Upon completing the indicated operation, the LLDD is to set the
* status fields for the operation (tranferred_length and fcp_error
- * status) in the request, then all the "done" routine
- * indicated in the fcp request. Upon return from the "done"
- * routine for either a NVMET_FCOP_RSP or NVMET_FCOP_ABORT operation
- * the fc-nvme layer will not longer reference the fcp request,
- * allowing the LLDD to free/release the fcp request.
+ * status) in the request, then call the "done" routine
+ * indicated in the fcp request. After the operation completes,
+ * regardless of whether the FCP_RSP iu was successfully transmit,
+ * the LLDD-supplied exchange structure must remain valid until the
+ * transport calls the fcp_req_release() callback to return ownership
+ * of the exchange structure back to the LLDD so that it may be used
+ * for another fcp command.
* Note: when calling the done routine for READDATA or WRITEDATA
* operations, the fc-nvme layer may immediate convert, in the same
* thread and before returning to the LLDD, the fcp operation to
@@ -786,6 +799,22 @@ struct nvmet_fc_target_port {
* Returns 0 on success, -<errno> on failure (Ex: -EIO)
* Entrypoint is Mandatory.
*
+ * @fcp_abort: Called by the transport to abort an active command.
+ * The command may be in-between operations (nothing active in LLDD)
+ * or may have an active WRITEDATA operation pending. The LLDD is to
+ * initiate the ABTS process for the command and return from the
+ * callback. The ABTS does not need to be complete on the command.
+ * The fcp_abort callback inherently cannot fail. After the
+ * fcp_abort() callback completes, the transport will wait for any
+ * outstanding operation (if there was one) to complete, then will
+ * call the fcp_req_release() callback to return the command's
+ * exchange context back to the LLDD.
+ *
+ * @fcp_req_release: Called by the transport to return a nvmefc_tgt_fcp_req
+ * to the LLDD after all operations on the fcp operation are complete.
+ * This may be due to the command completing or upon completion of
+ * abort cleanup.
+ *
* @max_hw_queues: indicates the maximum number of hw queues the LLDD
* supports for cpu affinitization.
* Value is Mandatory. Must be at least 1.
@@ -820,7 +849,11 @@ struct nvmet_fc_target_template {
int (*xmt_ls_rsp)(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_ls_req *tls_req);
int (*fcp_op)(struct nvmet_fc_target_port *tgtport,
- struct nvmefc_tgt_fcp_req *);
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_abort)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+ void (*fcp_req_release)(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
u32 max_hw_queues;
u16 max_sgl_segments;
@@ -848,4 +881,7 @@ int nvmet_fc_rcv_fcp_req(struct nvmet_fc_target_port *tgtport,
struct nvmefc_tgt_fcp_req *fcpreq,
void *cmdiubuf, u32 cmdiubuf_len);
+void nvmet_fc_rcv_fcp_abort(struct nvmet_fc_target_port *tgtport,
+ struct nvmefc_tgt_fcp_req *fcpreq);
+
#endif /* _NVME_FC_DRIVER_H */
diff --git a/include/linux/nvme-fc.h b/include/linux/nvme-fc.h
index 4b45226bd604..e997c4a49a88 100644
--- a/include/linux/nvme-fc.h
+++ b/include/linux/nvme-fc.h
@@ -16,8 +16,7 @@
*/
/*
- * This file contains definitions relative to FC-NVME r1.11 and a few
- * newer items
+ * This file contains definitions relative to FC-NVME r1.14 (16-020vB).
*/
#ifndef _NVME_FC_H
@@ -47,8 +46,15 @@ struct nvme_fc_cmd_iu {
#define NVME_FC_SIZEOF_ZEROS_RSP 12
+enum {
+ FCNVME_SC_SUCCESS = 0,
+ FCNVME_SC_INVALID_FIELD = 1,
+ FCNVME_SC_INVALID_CONNID = 2,
+};
+
struct nvme_fc_ersp_iu {
- __u8 rsvd0[2];
+ __u8 status_code;
+ __u8 rsvd1;
__be16 iu_len;
__be32 rsn;
__be32 xfrd_len;
@@ -58,7 +64,7 @@ struct nvme_fc_ersp_iu {
};
-/* FC-NVME r1.03/16-119v0 NVME Link Services */
+/* FC-NVME Link Services */
enum {
FCNVME_LS_RSVD = 0,
FCNVME_LS_RJT = 1,
@@ -68,7 +74,7 @@ enum {
FCNVME_LS_DISCONNECT = 5,
};
-/* FC-NVME r1.03/16-119v0 NVME Link Service Descriptors */
+/* FC-NVME Link Service Descriptors */
enum {
FCNVME_LSDESC_RSVD = 0x0,
FCNVME_LSDESC_RQST = 0x1,
@@ -92,7 +98,6 @@ static inline __be32 fcnvme_lsdesc_len(size_t sz)
return cpu_to_be32(sz - (2 * sizeof(u32)));
}
-
struct fcnvme_ls_rqst_w0 {
u8 ls_cmd; /* FCNVME_LS_xxx */
u8 zeros[3];
@@ -106,8 +111,53 @@ struct fcnvme_lsdesc_rqst {
__be32 rsvd12;
};
+/* FC-NVME LS RJT reason_code values */
+enum fcnvme_ls_rjt_reason {
+ FCNVME_RJT_RC_NONE = 0,
+ /* no reason - not to be sent */
+
+ FCNVME_RJT_RC_INVAL = 0x01,
+ /* invalid NVMe_LS command code */
+
+ FCNVME_RJT_RC_LOGIC = 0x03,
+ /* logical error */
+
+ FCNVME_RJT_RC_UNAB = 0x09,
+ /* unable to perform command request */
+
+ FCNVME_RJT_RC_UNSUP = 0x0b,
+ /* command not supported */
+
+ FCNVME_RJT_RC_INPROG = 0x0e,
+ /* command already in progress */
+ FCNVME_RJT_RC_INV_ASSOC = 0x40,
+ /* Invalid Association ID*/
+ FCNVME_RJT_RC_INV_CONN = 0x41,
+ /* Invalid Connection ID*/
+
+ FCNVME_RJT_RC_VENDOR = 0xff,
+ /* vendor specific error */
+};
+
+/* FC-NVME LS RJT reason_explanation values */
+enum fcnvme_ls_rjt_explan {
+ FCNVME_RJT_EXP_NONE = 0x00,
+ /* No additional explanation */
+
+ FCNVME_RJT_EXP_OXID_RXID = 0x17,
+ /* invalid OX_ID-RX_ID combination */
+
+ FCNVME_RJT_EXP_INSUF_RES = 0x29,
+ /* insufficient resources */
+
+ FCNVME_RJT_EXP_UNAB_DATA = 0x2a,
+ /* unable to supply requested data */
+
+ FCNVME_RJT_EXP_INV_LEN = 0x2d,
+ /* Invalid payload length */
+};
/* FCNVME_LSDESC_RJT */
struct fcnvme_lsdesc_rjt {
@@ -119,15 +169,15 @@ struct fcnvme_lsdesc_rjt {
* Reject reason and explanaction codes are generic
* to ELs's from LS-3.
*/
- u8 reason_code;
- u8 reason_explanation;
+ u8 reason_code; /* fcnvme_ls_rjt_reason */
+ u8 reason_explanation; /* fcnvme_ls_rjt_explan */
u8 vendor;
__be32 rsvd12;
};
-#define FCNVME_ASSOC_HOSTID_LEN 64
+#define FCNVME_ASSOC_HOSTID_LEN 16
#define FCNVME_ASSOC_HOSTNQN_LEN 256
#define FCNVME_ASSOC_SUBNQN_LEN 256
diff --git a/include/linux/nvme.h b/include/linux/nvme.h
index 9061780b141f..b625bacf37ef 100644
--- a/include/linux/nvme.h
+++ b/include/linux/nvme.h
@@ -245,6 +245,7 @@ enum {
NVME_CTRL_ONCS_WRITE_ZEROES = 1 << 3,
NVME_CTRL_VWC_PRESENT = 1 << 0,
NVME_CTRL_OACS_SEC_SUPP = 1 << 0,
+ NVME_CTRL_OACS_DBBUF_SUPP = 1 << 7,
};
struct nvme_lbaf {
@@ -603,6 +604,7 @@ enum nvme_admin_opcode {
nvme_admin_download_fw = 0x11,
nvme_admin_ns_attach = 0x15,
nvme_admin_keep_alive = 0x18,
+ nvme_admin_dbbuf = 0x7C,
nvme_admin_format_nvm = 0x80,
nvme_admin_security_send = 0x81,
nvme_admin_security_recv = 0x82,
@@ -874,6 +876,16 @@ struct nvmf_property_get_command {
__u8 resv4[16];
};
+struct nvme_dbbuf {
+ __u8 opcode;
+ __u8 flags;
+ __u16 command_id;
+ __u32 rsvd1[5];
+ __le64 prp1;
+ __le64 prp2;
+ __u32 rsvd12[6];
+};
+
struct nvme_command {
union {
struct nvme_common_command common;
@@ -893,6 +905,7 @@ struct nvme_command {
struct nvmf_connect_command connect;
struct nvmf_property_set_command prop_set;
struct nvmf_property_get_command prop_get;
+ struct nvme_dbbuf dbbuf;
};
};
diff --git a/include/linux/of.h b/include/linux/of.h
index 21e6323de0f3..50fcdb54087f 100644
--- a/include/linux/of.h
+++ b/include/linux/of.h
@@ -159,6 +159,8 @@ static inline struct device_node *to_of_node(struct fwnode_handle *fwnode)
container_of(fwnode, struct device_node, fwnode) : NULL;
}
+#define of_fwnode_handle(node) (&(node)->fwnode)
+
static inline bool of_have_populated_dt(void)
{
return of_root != NULL;
@@ -292,6 +294,9 @@ extern int of_property_count_elems_of_size(const struct device_node *np,
extern int of_property_read_u32_index(const struct device_node *np,
const char *propname,
u32 index, u32 *out_value);
+extern int of_property_read_u64_index(const struct device_node *np,
+ const char *propname,
+ u32 index, u64 *out_value);
extern int of_property_read_variable_u8_array(const struct device_node *np,
const char *propname, u8 *out_values,
size_t sz_min, size_t sz_max);
@@ -602,6 +607,8 @@ static inline struct device_node *of_find_node_with_property(
return NULL;
}
+#define of_fwnode_handle(node) NULL
+
static inline bool of_have_populated_dt(void)
{
return false;
diff --git a/include/linux/of_device.h b/include/linux/of_device.h
index c12dace043f3..b4ad8b4f8506 100644
--- a/include/linux/of_device.h
+++ b/include/linux/of_device.h
@@ -34,8 +34,7 @@ extern void of_device_unregister(struct platform_device *ofdev);
extern const void *of_device_get_match_data(const struct device *dev);
-extern ssize_t of_device_get_modalias(struct device *dev,
- char *str, ssize_t len);
+extern ssize_t of_device_modalias(struct device *dev, char *str, ssize_t len);
extern int of_device_request_module(struct device *dev);
extern void of_device_uevent(struct device *dev, struct kobj_uevent_env *env);
@@ -55,7 +54,8 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
return of_node_get(cpu_dev->of_node);
}
-void of_dma_configure(struct device *dev, struct device_node *np);
+int of_dma_configure(struct device *dev, struct device_node *np);
+void of_dma_deconfigure(struct device *dev);
#else /* CONFIG_OF */
static inline int of_driver_match_device(struct device *dev,
@@ -72,8 +72,8 @@ static inline const void *of_device_get_match_data(const struct device *dev)
return NULL;
}
-static inline int of_device_get_modalias(struct device *dev,
- char *str, ssize_t len)
+static inline int of_device_modalias(struct device *dev,
+ char *str, ssize_t len)
{
return -ENODEV;
}
@@ -103,7 +103,12 @@ static inline struct device_node *of_cpu_device_node_get(int cpu)
{
return NULL;
}
-static inline void of_dma_configure(struct device *dev, struct device_node *np)
+
+static inline int of_dma_configure(struct device *dev, struct device_node *np)
+{
+ return 0;
+}
+static inline void of_dma_deconfigure(struct device *dev)
{}
#endif /* CONFIG_OF */
diff --git a/include/linux/of_gpio.h b/include/linux/of_gpio.h
index 3f87ea5b8bee..1e089d5a182b 100644
--- a/include/linux/of_gpio.h
+++ b/include/linux/of_gpio.h
@@ -30,6 +30,7 @@ struct device_node;
enum of_gpio_flags {
OF_GPIO_ACTIVE_LOW = 0x1,
OF_GPIO_SINGLE_ENDED = 0x2,
+ OF_GPIO_OPEN_DRAIN = 0x4,
};
#ifdef CONFIG_OF_GPIO
diff --git a/include/linux/of_irq.h b/include/linux/of_irq.h
index 1e0deb8e8494..ec6b11deb773 100644
--- a/include/linux/of_irq.h
+++ b/include/linux/of_irq.h
@@ -8,7 +8,7 @@
#include <linux/ioport.h>
#include <linux/of.h>
-typedef int (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
+typedef int const (*of_irq_init_cb_t)(struct device_node *, struct device_node *);
/*
* Workarounds only applied to 32bit powermac machines
diff --git a/include/linux/of_mdio.h b/include/linux/of_mdio.h
index a58cca8bcb29..ba35ba520487 100644
--- a/include/linux/of_mdio.h
+++ b/include/linux/of_mdio.h
@@ -12,7 +12,7 @@
#include <linux/phy.h>
#include <linux/of.h>
-#ifdef CONFIG_OF
+#if IS_ENABLED(CONFIG_OF_MDIO)
extern int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np);
extern struct phy_device *of_phy_find_device(struct device_node *phy_np);
extern struct phy_device *of_phy_connect(struct net_device *dev,
@@ -32,7 +32,7 @@ extern int of_phy_register_fixed_link(struct device_node *np);
extern void of_phy_deregister_fixed_link(struct device_node *np);
extern bool of_phy_is_fixed_link(struct device_node *np);
-#else /* CONFIG_OF */
+#else /* CONFIG_OF_MDIO */
static inline int of_mdiobus_register(struct mii_bus *mdio, struct device_node *np)
{
/*
diff --git a/include/linux/of_pci.h b/include/linux/of_pci.h
index 0e0974eceb80..518c8d20647a 100644
--- a/include/linux/of_pci.h
+++ b/include/linux/of_pci.h
@@ -85,15 +85,4 @@ static inline int of_pci_get_host_bridge_resources(struct device_node *dev,
}
#endif
-#if defined(CONFIG_OF) && defined(CONFIG_PCI_MSI)
-int of_pci_msi_chip_add(struct msi_controller *chip);
-void of_pci_msi_chip_remove(struct msi_controller *chip);
-struct msi_controller *of_pci_find_msi_chip_by_node(struct device_node *of_node);
-#else
-static inline int of_pci_msi_chip_add(struct msi_controller *chip) { return -EINVAL; }
-static inline void of_pci_msi_chip_remove(struct msi_controller *chip) { }
-static inline struct msi_controller *
-of_pci_find_msi_chip_by_node(struct device_node *of_node) { return NULL; }
-#endif
-
#endif
diff --git a/include/linux/of_platform.h b/include/linux/of_platform.h
index 956a1006aefc..dc8224ae28d5 100644
--- a/include/linux/of_platform.h
+++ b/include/linux/of_platform.h
@@ -76,6 +76,10 @@ extern int of_platform_default_populate(struct device_node *root,
const struct of_dev_auxdata *lookup,
struct device *parent);
extern void of_platform_depopulate(struct device *parent);
+
+extern int devm_of_platform_populate(struct device *dev);
+
+extern void devm_of_platform_depopulate(struct device *dev);
#else
static inline int of_platform_populate(struct device_node *root,
const struct of_device_id *matches,
@@ -91,6 +95,13 @@ static inline int of_platform_default_populate(struct device_node *root,
return -ENODEV;
}
static inline void of_platform_depopulate(struct device *parent) { }
+
+static inline int devm_of_platform_populate(struct device *dev)
+{
+ return -ENODEV;
+}
+
+static inline void devm_of_platform_depopulate(struct device *dev) { }
#endif
#if defined(CONFIG_OF_DYNAMIC) && defined(CONFIG_OF_ADDRESS)
diff --git a/include/linux/page-isolation.h b/include/linux/page-isolation.h
index 047d64706f2a..d4cd2014fa6f 100644
--- a/include/linux/page-isolation.h
+++ b/include/linux/page-isolation.h
@@ -33,10 +33,7 @@ bool has_unmovable_pages(struct zone *zone, struct page *page, int count,
bool skip_hwpoisoned_pages);
void set_pageblock_migratetype(struct page *page, int migratetype);
int move_freepages_block(struct zone *zone, struct page *page,
- int migratetype);
-int move_freepages(struct zone *zone,
- struct page *start_page, struct page *end_page,
- int migratetype);
+ int migratetype, int *num_movable);
/*
* Changes migrate type in [start_pfn, end_pfn) to be MIGRATE_ISOLATE.
diff --git a/include/linux/pagemap.h b/include/linux/pagemap.h
index 84943e8057ef..316a19f6b635 100644
--- a/include/linux/pagemap.h
+++ b/include/linux/pagemap.h
@@ -148,7 +148,7 @@ static inline int page_cache_get_speculative(struct page *page)
#ifdef CONFIG_TINY_RCU
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
/*
* Preempt must be disabled here - we rely on rcu_read_lock doing
@@ -186,7 +186,7 @@ static inline int page_cache_add_speculative(struct page *page, int count)
#if !defined(CONFIG_SMP) && defined(CONFIG_TREE_RCU)
# ifdef CONFIG_PREEMPT_COUNT
- VM_BUG_ON(!in_atomic());
+ VM_BUG_ON(!in_atomic() && !irqs_disabled());
# endif
VM_BUG_ON_PAGE(page_count(page) == 0, page);
page_ref_add(page, count);
diff --git a/include/linux/pci-ecam.h b/include/linux/pci-ecam.h
index f0d2b9451270..809c2f1873ac 100644
--- a/include/linux/pci-ecam.h
+++ b/include/linux/pci-ecam.h
@@ -16,6 +16,7 @@
#ifndef DRIVERS_PCI_ECAM_H
#define DRIVERS_PCI_ECAM_H
+#include <linux/pci.h>
#include <linux/kernel.h>
#include <linux/platform_device.h>
@@ -68,7 +69,7 @@ extern struct pci_ecam_ops xgene_v1_pcie_ecam_ops; /* APM X-Gene PCIe v1 */
extern struct pci_ecam_ops xgene_v2_pcie_ecam_ops; /* APM X-Gene PCIe v2.x */
#endif
-#ifdef CONFIG_PCI_HOST_GENERIC
+#ifdef CONFIG_PCI_HOST_COMMON
/* for DT-based PCI controllers that support ECAM */
int pci_host_common_probe(struct platform_device *pdev,
struct pci_ecam_ops *ops);
diff --git a/include/linux/pci-ep-cfs.h b/include/linux/pci-ep-cfs.h
new file mode 100644
index 000000000000..263b89ea5705
--- /dev/null
+++ b/include/linux/pci-ep-cfs.h
@@ -0,0 +1,41 @@
+/**
+ * PCI Endpoint ConfigFS header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EP_CFS_H
+#define __LINUX_PCI_EP_CFS_H
+
+#include <linux/configfs.h>
+
+#ifdef CONFIG_PCI_ENDPOINT_CONFIGFS
+struct config_group *pci_ep_cfs_add_epc_group(const char *name);
+void pci_ep_cfs_remove_epc_group(struct config_group *group);
+struct config_group *pci_ep_cfs_add_epf_group(const char *name);
+void pci_ep_cfs_remove_epf_group(struct config_group *group);
+#else
+static inline struct config_group *pci_ep_cfs_add_epc_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epc_group(struct config_group *group)
+{
+}
+
+static inline struct config_group *pci_ep_cfs_add_epf_group(const char *name)
+{
+ return 0;
+}
+
+static inline void pci_ep_cfs_remove_epf_group(struct config_group *group)
+{
+}
+#endif
+#endif /* __LINUX_PCI_EP_CFS_H */
diff --git a/include/linux/pci-epc.h b/include/linux/pci-epc.h
new file mode 100644
index 000000000000..af5edbf3eea3
--- /dev/null
+++ b/include/linux/pci-epc.h
@@ -0,0 +1,144 @@
+/**
+ * PCI Endpoint *Controller* (EPC) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPC_H
+#define __LINUX_PCI_EPC_H
+
+#include <linux/pci-epf.h>
+
+struct pci_epc;
+
+enum pci_epc_irq_type {
+ PCI_EPC_IRQ_UNKNOWN,
+ PCI_EPC_IRQ_LEGACY,
+ PCI_EPC_IRQ_MSI,
+};
+
+/**
+ * struct pci_epc_ops - set of function pointers for performing EPC operations
+ * @write_header: ops to populate configuration space header
+ * @set_bar: ops to configure the BAR
+ * @clear_bar: ops to reset the BAR
+ * @map_addr: ops to map CPU address to PCI address
+ * @unmap_addr: ops to unmap CPU address and PCI address
+ * @set_msi: ops to set the requested number of MSI interrupts in the MSI
+ * capability register
+ * @get_msi: ops to get the number of MSI interrupts allocated by the RC from
+ * the MSI capability register
+ * @raise_irq: ops to raise a legacy or MSI interrupt
+ * @start: ops to start the PCI link
+ * @stop: ops to stop the PCI link
+ * @owner: the module owner containing the ops
+ */
+struct pci_epc_ops {
+ int (*write_header)(struct pci_epc *pci_epc,
+ struct pci_epf_header *hdr);
+ int (*set_bar)(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+ void (*clear_bar)(struct pci_epc *epc, enum pci_barno bar);
+ int (*map_addr)(struct pci_epc *epc, phys_addr_t addr,
+ u64 pci_addr, size_t size);
+ void (*unmap_addr)(struct pci_epc *epc, phys_addr_t addr);
+ int (*set_msi)(struct pci_epc *epc, u8 interrupts);
+ int (*get_msi)(struct pci_epc *epc);
+ int (*raise_irq)(struct pci_epc *pci_epc,
+ enum pci_epc_irq_type type, u8 interrupt_num);
+ int (*start)(struct pci_epc *epc);
+ void (*stop)(struct pci_epc *epc);
+ struct module *owner;
+};
+
+/**
+ * struct pci_epc_mem - address space of the endpoint controller
+ * @phys_base: physical base address of the PCI address space
+ * @size: the size of the PCI address space
+ * @bitmap: bitmap to manage the PCI address space
+ * @pages: number of bits representing the address region
+ */
+struct pci_epc_mem {
+ phys_addr_t phys_base;
+ size_t size;
+ unsigned long *bitmap;
+ int pages;
+};
+
+/**
+ * struct pci_epc - represents the PCI EPC device
+ * @dev: PCI EPC device
+ * @pci_epf: list of endpoint functions present in this EPC device
+ * @ops: function pointers for performing endpoint operations
+ * @mem: address space of the endpoint controller
+ * @max_functions: max number of functions that can be configured in this EPC
+ * @group: configfs group representing the PCI EPC device
+ * @lock: spinlock to protect pci_epc ops
+ */
+struct pci_epc {
+ struct device dev;
+ struct list_head pci_epf;
+ const struct pci_epc_ops *ops;
+ struct pci_epc_mem *mem;
+ u8 max_functions;
+ struct config_group *group;
+ /* spinlock to protect against concurrent access of EP controller */
+ spinlock_t lock;
+};
+
+#define to_pci_epc(device) container_of((device), struct pci_epc, dev)
+
+#define pci_epc_create(dev, ops) \
+ __pci_epc_create((dev), (ops), THIS_MODULE)
+#define devm_pci_epc_create(dev, ops) \
+ __devm_pci_epc_create((dev), (ops), THIS_MODULE)
+
+static inline void epc_set_drvdata(struct pci_epc *epc, void *data)
+{
+ dev_set_drvdata(&epc->dev, data);
+}
+
+static inline void *epc_get_drvdata(struct pci_epc *epc)
+{
+ return dev_get_drvdata(&epc->dev);
+}
+
+struct pci_epc *
+__devm_pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+struct pci_epc *
+__pci_epc_create(struct device *dev, const struct pci_epc_ops *ops,
+ struct module *owner);
+void devm_pci_epc_destroy(struct device *dev, struct pci_epc *epc);
+void pci_epc_destroy(struct pci_epc *epc);
+int pci_epc_add_epf(struct pci_epc *epc, struct pci_epf *epf);
+void pci_epc_linkup(struct pci_epc *epc);
+void pci_epc_remove_epf(struct pci_epc *epc, struct pci_epf *epf);
+int pci_epc_write_header(struct pci_epc *epc, struct pci_epf_header *hdr);
+int pci_epc_set_bar(struct pci_epc *epc, enum pci_barno bar,
+ dma_addr_t bar_phys, size_t size, int flags);
+void pci_epc_clear_bar(struct pci_epc *epc, int bar);
+int pci_epc_map_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ u64 pci_addr, size_t size);
+void pci_epc_unmap_addr(struct pci_epc *epc, phys_addr_t phys_addr);
+int pci_epc_set_msi(struct pci_epc *epc, u8 interrupts);
+int pci_epc_get_msi(struct pci_epc *epc);
+int pci_epc_raise_irq(struct pci_epc *epc, enum pci_epc_irq_type type,
+ u8 interrupt_num);
+int pci_epc_start(struct pci_epc *epc);
+void pci_epc_stop(struct pci_epc *epc);
+struct pci_epc *pci_epc_get(const char *epc_name);
+void pci_epc_put(struct pci_epc *epc);
+
+int pci_epc_mem_init(struct pci_epc *epc, phys_addr_t phys_addr, size_t size);
+void pci_epc_mem_exit(struct pci_epc *epc);
+void __iomem *pci_epc_mem_alloc_addr(struct pci_epc *epc,
+ phys_addr_t *phys_addr, size_t size);
+void pci_epc_mem_free_addr(struct pci_epc *epc, phys_addr_t phys_addr,
+ void __iomem *virt_addr, size_t size);
+#endif /* __LINUX_PCI_EPC_H */
diff --git a/include/linux/pci-epf.h b/include/linux/pci-epf.h
new file mode 100644
index 000000000000..0d529cb90143
--- /dev/null
+++ b/include/linux/pci-epf.h
@@ -0,0 +1,162 @@
+/**
+ * PCI Endpoint *Function* (EPF) header file
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ * This program is free software: you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 of
+ * the License as published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_PCI_EPF_H
+#define __LINUX_PCI_EPF_H
+
+#include <linux/device.h>
+#include <linux/mod_devicetable.h>
+
+struct pci_epf;
+
+enum pci_interrupt_pin {
+ PCI_INTERRUPT_UNKNOWN,
+ PCI_INTERRUPT_INTA,
+ PCI_INTERRUPT_INTB,
+ PCI_INTERRUPT_INTC,
+ PCI_INTERRUPT_INTD,
+};
+
+enum pci_barno {
+ BAR_0,
+ BAR_1,
+ BAR_2,
+ BAR_3,
+ BAR_4,
+ BAR_5,
+};
+
+/**
+ * struct pci_epf_header - represents standard configuration header
+ * @vendorid: identifies device manufacturer
+ * @deviceid: identifies a particular device
+ * @revid: specifies a device-specific revision identifier
+ * @progif_code: identifies a specific register-level programming interface
+ * @subclass_code: identifies more specifically the function of the device
+ * @baseclass_code: broadly classifies the type of function the device performs
+ * @cache_line_size: specifies the system cacheline size in units of DWORDs
+ * @subsys_vendor_id: vendor of the add-in card or subsystem
+ * @subsys_id: id specific to vendor
+ * @interrupt_pin: interrupt pin the device (or device function) uses
+ */
+struct pci_epf_header {
+ u16 vendorid;
+ u16 deviceid;
+ u8 revid;
+ u8 progif_code;
+ u8 subclass_code;
+ u8 baseclass_code;
+ u8 cache_line_size;
+ u16 subsys_vendor_id;
+ u16 subsys_id;
+ enum pci_interrupt_pin interrupt_pin;
+};
+
+/**
+ * struct pci_epf_ops - set of function pointers for performing EPF operations
+ * @bind: ops to perform when a EPC device has been bound to EPF device
+ * @unbind: ops to perform when a binding has been lost between a EPC device
+ * and EPF device
+ * @linkup: ops to perform when the EPC device has established a connection with
+ * a host system
+ */
+struct pci_epf_ops {
+ int (*bind)(struct pci_epf *epf);
+ void (*unbind)(struct pci_epf *epf);
+ void (*linkup)(struct pci_epf *epf);
+};
+
+/**
+ * struct pci_epf_driver - represents the PCI EPF driver
+ * @probe: ops to perform when a new EPF device has been bound to the EPF driver
+ * @remove: ops to perform when the binding between the EPF device and EPF
+ * driver is broken
+ * @driver: PCI EPF driver
+ * @ops: set of function pointers for performing EPF operations
+ * @owner: the owner of the module that registers the PCI EPF driver
+ * @group: configfs group corresponding to the PCI EPF driver
+ * @id_table: identifies EPF devices for probing
+ */
+struct pci_epf_driver {
+ int (*probe)(struct pci_epf *epf);
+ int (*remove)(struct pci_epf *epf);
+
+ struct device_driver driver;
+ struct pci_epf_ops *ops;
+ struct module *owner;
+ struct config_group *group;
+ const struct pci_epf_device_id *id_table;
+};
+
+#define to_pci_epf_driver(drv) (container_of((drv), struct pci_epf_driver, \
+ driver))
+
+/**
+ * struct pci_epf_bar - represents the BAR of EPF device
+ * @phys_addr: physical address that should be mapped to the BAR
+ * @size: the size of the address space present in BAR
+ */
+struct pci_epf_bar {
+ dma_addr_t phys_addr;
+ size_t size;
+};
+
+/**
+ * struct pci_epf - represents the PCI EPF device
+ * @dev: the PCI EPF device
+ * @name: the name of the PCI EPF device
+ * @header: represents standard configuration header
+ * @bar: represents the BAR of EPF device
+ * @msi_interrupts: number of MSI interrupts required by this function
+ * @func_no: unique function number within this endpoint device
+ * @epc: the EPC device to which this EPF device is bound
+ * @driver: the EPF driver to which this EPF device is bound
+ * @list: to add pci_epf as a list of PCI endpoint functions to pci_epc
+ */
+struct pci_epf {
+ struct device dev;
+ const char *name;
+ struct pci_epf_header *header;
+ struct pci_epf_bar bar[6];
+ u8 msi_interrupts;
+ u8 func_no;
+
+ struct pci_epc *epc;
+ struct pci_epf_driver *driver;
+ struct list_head list;
+};
+
+#define to_pci_epf(epf_dev) container_of((epf_dev), struct pci_epf, dev)
+
+#define pci_epf_register_driver(driver) \
+ __pci_epf_register_driver((driver), THIS_MODULE)
+
+static inline void epf_set_drvdata(struct pci_epf *epf, void *data)
+{
+ dev_set_drvdata(&epf->dev, data);
+}
+
+static inline void *epf_get_drvdata(struct pci_epf *epf)
+{
+ return dev_get_drvdata(&epf->dev);
+}
+
+struct pci_epf *pci_epf_create(const char *name);
+void pci_epf_destroy(struct pci_epf *epf);
+int __pci_epf_register_driver(struct pci_epf_driver *driver,
+ struct module *owner);
+void pci_epf_unregister_driver(struct pci_epf_driver *driver);
+void *pci_epf_alloc_space(struct pci_epf *epf, size_t size, enum pci_barno bar);
+void pci_epf_free_space(struct pci_epf *epf, void *addr, enum pci_barno bar);
+int pci_epf_bind(struct pci_epf *epf);
+void pci_epf_unbind(struct pci_epf *epf);
+void pci_epf_linkup(struct pci_epf *epf);
+#endif /* __LINUX_PCI_EPF_H */
diff --git a/include/linux/pci.h b/include/linux/pci.h
index eb3da1a04e6c..33c2b0b77429 100644
--- a/include/linux/pci.h
+++ b/include/linux/pci.h
@@ -28,6 +28,7 @@
#include <linux/kobject.h>
#include <linux/atomic.h>
#include <linux/device.h>
+#include <linux/interrupt.h>
#include <linux/io.h>
#include <linux/resource_ext.h>
#include <uapi/linux/pci.h>
@@ -178,6 +179,10 @@ enum pci_dev_flags {
PCI_DEV_FLAGS_NO_PM_RESET = (__force pci_dev_flags_t) (1 << 7),
/* Get VPD from function 0 VPD */
PCI_DEV_FLAGS_VPD_REF_F0 = (__force pci_dev_flags_t) (1 << 8),
+ /* a non-root bridge where translation occurs, stop alias search here */
+ PCI_DEV_FLAGS_BRIDGE_XLATE_ROOT = (__force pci_dev_flags_t) (1 << 9),
+ /* Do not use FLR even if device advertises PCI_AF_CAP */
+ PCI_DEV_FLAGS_NO_FLR_RESET = (__force pci_dev_flags_t) (1 << 10),
};
enum pci_irq_reroute_variant {
@@ -358,6 +363,7 @@ struct pci_dev {
unsigned int is_virtfn:1;
unsigned int reset_fn:1;
unsigned int is_hotplug_bridge:1;
+ unsigned int is_thunderbolt:1; /* Thunderbolt controller */
unsigned int __aer_firmware_first_valid:1;
unsigned int __aer_firmware_first:1;
unsigned int broken_intx_masking:1;
@@ -396,6 +402,8 @@ struct pci_dev {
phys_addr_t rom; /* Physical address of ROM if it's not from the BAR */
size_t romlen; /* Length of ROM if it's not from the BAR */
char *driver_override; /* Driver name to force a match */
+
+ unsigned long priv_flags; /* Private flags for the pci driver */
};
static inline struct pci_dev *pci_physfn(struct pci_dev *dev)
@@ -940,32 +948,12 @@ int pci_generic_config_write32(struct pci_bus *bus, unsigned int devfn,
struct pci_ops *pci_bus_set_ops(struct pci_bus *bus, struct pci_ops *ops);
-static inline int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val)
-{
- return pci_bus_read_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val)
-{
- return pci_bus_read_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_read_config_dword(const struct pci_dev *dev, int where,
- u32 *val)
-{
- return pci_bus_read_config_dword(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val)
-{
- return pci_bus_write_config_byte(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_word(const struct pci_dev *dev, int where, u16 val)
-{
- return pci_bus_write_config_word(dev->bus, dev->devfn, where, val);
-}
-static inline int pci_write_config_dword(const struct pci_dev *dev, int where,
- u32 val)
-{
- return pci_bus_write_config_dword(dev->bus, dev->devfn, where, val);
-}
+int pci_read_config_byte(const struct pci_dev *dev, int where, u8 *val);
+int pci_read_config_word(const struct pci_dev *dev, int where, u16 *val);
+int pci_read_config_dword(const struct pci_dev *dev, int where, u32 *val);
+int pci_write_config_byte(const struct pci_dev *dev, int where, u8 val);
+int pci_write_config_word(const struct pci_dev *dev, int where, u16 val);
+int pci_write_config_dword(const struct pci_dev *dev, int where, u32 val);
int pcie_capability_read_word(struct pci_dev *dev, int pos, u16 *val);
int pcie_capability_read_dword(struct pci_dev *dev, int pos, u32 *val);
@@ -1052,6 +1040,7 @@ int pcie_get_mps(struct pci_dev *dev);
int pcie_set_mps(struct pci_dev *dev, int mps);
int pcie_get_minimum_link(struct pci_dev *dev, enum pci_bus_speed *speed,
enum pcie_link_width *width);
+void pcie_flr(struct pci_dev *dev);
int __pci_reset_function(struct pci_dev *dev);
int __pci_reset_function_locked(struct pci_dev *dev);
int pci_reset_function(struct pci_dev *dev);
@@ -1072,6 +1061,11 @@ int pci_select_bars(struct pci_dev *dev, unsigned long flags);
bool pci_device_is_present(struct pci_dev *pdev);
void pci_ignore_hotplug(struct pci_dev *dev);
+int __printf(6, 7) pci_request_irq(struct pci_dev *dev, unsigned int nr,
+ irq_handler_t handler, irq_handler_t thread_fn, void *dev_id,
+ const char *fmt, ...);
+void pci_free_irq(struct pci_dev *dev, unsigned int nr, void *dev_id);
+
/* ROM control related routines */
int pci_enable_rom(struct pci_dev *pdev);
void pci_disable_rom(struct pci_dev *pdev);
@@ -1199,6 +1193,11 @@ unsigned long pci_address_to_pio(phys_addr_t addr);
phys_addr_t pci_pio_to_address(unsigned long pio);
int pci_remap_iospace(const struct resource *res, phys_addr_t phys_addr);
void pci_unmap_iospace(struct resource *res);
+void __iomem *devm_pci_remap_cfgspace(struct device *dev,
+ resource_size_t offset,
+ resource_size_t size);
+void __iomem *devm_pci_remap_cfg_resource(struct device *dev,
+ struct resource *res);
static inline pci_bus_addr_t pci_bus_address(struct pci_dev *pdev, int bar)
{
@@ -1297,11 +1296,8 @@ struct msix_entry {
#ifdef CONFIG_PCI_MSI
int pci_msi_vec_count(struct pci_dev *dev);
-void pci_msi_shutdown(struct pci_dev *dev);
void pci_disable_msi(struct pci_dev *dev);
int pci_msix_vec_count(struct pci_dev *dev);
-int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec);
-void pci_msix_shutdown(struct pci_dev *dev);
void pci_disable_msix(struct pci_dev *dev);
void pci_restore_msi_state(struct pci_dev *dev);
int pci_msi_enabled(void);
@@ -1327,13 +1323,8 @@ int pci_irq_get_node(struct pci_dev *pdev, int vec);
#else
static inline int pci_msi_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline void pci_msi_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msi(struct pci_dev *dev) { }
static inline int pci_msix_vec_count(struct pci_dev *dev) { return -ENOSYS; }
-static inline int pci_enable_msix(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
-{ return -ENOSYS; }
-static inline void pci_msix_shutdown(struct pci_dev *dev) { }
static inline void pci_disable_msix(struct pci_dev *dev) { }
static inline void pci_restore_msi_state(struct pci_dev *dev) { }
static inline int pci_msi_enabled(void) { return 0; }
@@ -1626,6 +1617,36 @@ static inline int pci_get_new_domain_nr(void) { return -ENOSYS; }
#include <asm/pci.h>
+/* These two functions provide almost identical functionality. Depennding
+ * on the architecture, one will be implemented as a wrapper around the
+ * other (in drivers/pci/mmap.c).
+ *
+ * pci_mmap_resource_range() maps a specific BAR, and vm->vm_pgoff
+ * is expected to be an offset within that region.
+ *
+ * pci_mmap_page_range() is the legacy architecture-specific interface,
+ * which accepts a "user visible" resource address converted by
+ * pci_resource_to_user(), as used in the legacy mmap() interface in
+ * /proc/bus/pci/.
+ */
+int pci_mmap_resource_range(struct pci_dev *dev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+int pci_mmap_page_range(struct pci_dev *pdev, int bar,
+ struct vm_area_struct *vma,
+ enum pci_mmap_state mmap_state, int write_combine);
+
+#ifndef arch_can_pci_mmap_wc
+#define arch_can_pci_mmap_wc() 0
+#endif
+
+#ifndef arch_can_pci_mmap_io
+#define arch_can_pci_mmap_io() 0
+#define pci_iobar_pfn(pdev, bar, vma) (-EINVAL)
+#else
+int pci_iobar_pfn(struct pci_dev *pdev, int bar, struct vm_area_struct *vma);
+#endif
+
#ifndef pci_root_bus_fwnode
#define pci_root_bus_fwnode(bus) NULL
#endif
@@ -2160,6 +2181,28 @@ static inline bool pci_ari_enabled(struct pci_bus *bus)
return bus->self && bus->self->ari_enabled;
}
+/**
+ * pci_is_thunderbolt_attached - whether device is on a Thunderbolt daisy chain
+ * @pdev: PCI device to check
+ *
+ * Walk upwards from @pdev and check for each encountered bridge if it's part
+ * of a Thunderbolt controller. Reaching the host bridge means @pdev is not
+ * Thunderbolt-attached. (But rather soldered to the mainboard usually.)
+ */
+static inline bool pci_is_thunderbolt_attached(struct pci_dev *pdev)
+{
+ struct pci_dev *parent = pdev;
+
+ if (pdev->is_thunderbolt)
+ return true;
+
+ while ((parent = pci_upstream_bridge(parent)))
+ if (parent->is_thunderbolt)
+ return true;
+
+ return false;
+}
+
/* provide the legacy pci_dma_* API */
#include <linux/pci-dma-compat.h>
diff --git a/include/linux/pci_ids.h b/include/linux/pci_ids.h
index a4f77feecbb0..5f6b71d15393 100644
--- a/include/linux/pci_ids.h
+++ b/include/linux/pci_ids.h
@@ -862,6 +862,8 @@
#define PCI_DEVICE_ID_TI_X620 0xac8d
#define PCI_DEVICE_ID_TI_X420 0xac8e
#define PCI_DEVICE_ID_TI_XX20_FM 0xac8f
+#define PCI_DEVICE_ID_TI_DRA74x 0xb500
+#define PCI_DEVICE_ID_TI_DRA72x 0xb501
#define PCI_VENDOR_ID_SONY 0x104d
diff --git a/include/linux/pe.h b/include/linux/pe.h
index e170b95e763b..143ce75be5f0 100644
--- a/include/linux/pe.h
+++ b/include/linux/pe.h
@@ -23,34 +23,6 @@
#define MZ_MAGIC 0x5a4d /* "MZ" */
-struct mz_hdr {
- uint16_t magic; /* MZ_MAGIC */
- uint16_t lbsize; /* size of last used block */
- uint16_t blocks; /* pages in file, 0x3 */
- uint16_t relocs; /* relocations */
- uint16_t hdrsize; /* header size in "paragraphs" */
- uint16_t min_extra_pps; /* .bss */
- uint16_t max_extra_pps; /* runtime limit for the arena size */
- uint16_t ss; /* relative stack segment */
- uint16_t sp; /* initial %sp register */
- uint16_t checksum; /* word checksum */
- uint16_t ip; /* initial %ip register */
- uint16_t cs; /* initial %cs relative to load segment */
- uint16_t reloc_table_offset; /* offset of the first relocation */
- uint16_t overlay_num; /* overlay number. set to 0. */
- uint16_t reserved0[4]; /* reserved */
- uint16_t oem_id; /* oem identifier */
- uint16_t oem_info; /* oem specific */
- uint16_t reserved1[10]; /* reserved */
- uint32_t peaddr; /* address of pe header */
- char message[64]; /* message to print */
-};
-
-struct mz_reloc {
- uint16_t offset;
- uint16_t segment;
-};
-
#define PE_MAGIC 0x00004550 /* "PE\0\0" */
#define PE_OPT_MAGIC_PE32 0x010b
#define PE_OPT_MAGIC_PE32_ROM 0x0107
@@ -62,6 +34,7 @@ struct mz_reloc {
#define IMAGE_FILE_MACHINE_AMD64 0x8664
#define IMAGE_FILE_MACHINE_ARM 0x01c0
#define IMAGE_FILE_MACHINE_ARMV7 0x01c4
+#define IMAGE_FILE_MACHINE_ARM64 0xaa64
#define IMAGE_FILE_MACHINE_EBC 0x0ebc
#define IMAGE_FILE_MACHINE_I386 0x014c
#define IMAGE_FILE_MACHINE_IA64 0x0200
@@ -98,17 +71,6 @@ struct mz_reloc {
#define IMAGE_FILE_UP_SYSTEM_ONLY 0x4000
#define IMAGE_FILE_BYTES_REVERSED_HI 0x8000
-struct pe_hdr {
- uint32_t magic; /* PE magic */
- uint16_t machine; /* machine type */
- uint16_t sections; /* number of sections */
- uint32_t timestamp; /* time_t */
- uint32_t symbol_table; /* symbol table offset */
- uint32_t symbols; /* number of symbols */
- uint16_t opt_hdr_size; /* size of optional header */
- uint16_t flags; /* flags */
-};
-
#define IMAGE_FILE_OPT_ROM_MAGIC 0x107
#define IMAGE_FILE_OPT_PE32_MAGIC 0x10b
#define IMAGE_FILE_OPT_PE32_PLUS_MAGIC 0x20b
@@ -134,6 +96,95 @@ struct pe_hdr {
#define IMAGE_DLLCHARACTERISTICS_WDM_DRIVER 0x2000
#define IMAGE_DLLCHARACTERISTICS_TERMINAL_SERVER_AWARE 0x8000
+/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
+#define IMAGE_SCN_RESERVED_0 0x00000001
+#define IMAGE_SCN_RESERVED_1 0x00000002
+#define IMAGE_SCN_RESERVED_2 0x00000004
+#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
+#define IMAGE_SCN_RESERVED_3 0x00000010
+#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
+#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
+#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
+#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
+#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
+#define IMAGE_SCN_RESERVED_4 0x00000400
+#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
+#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
+#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
+#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
+#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
+/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
+#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
+#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
+#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
+#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
+/* and here they just stuck a 1-byte integer in the middle of a bitfield */
+#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
+#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
+#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
+#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
+#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
+#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
+#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
+#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
+#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
+#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
+#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
+#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
+#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
+#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
+#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
+#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
+#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
+#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
+#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
+#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
+#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
+#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
+
+#define IMAGE_DEBUG_TYPE_CODEVIEW 2
+
+#ifndef __ASSEMBLY__
+
+struct mz_hdr {
+ uint16_t magic; /* MZ_MAGIC */
+ uint16_t lbsize; /* size of last used block */
+ uint16_t blocks; /* pages in file, 0x3 */
+ uint16_t relocs; /* relocations */
+ uint16_t hdrsize; /* header size in "paragraphs" */
+ uint16_t min_extra_pps; /* .bss */
+ uint16_t max_extra_pps; /* runtime limit for the arena size */
+ uint16_t ss; /* relative stack segment */
+ uint16_t sp; /* initial %sp register */
+ uint16_t checksum; /* word checksum */
+ uint16_t ip; /* initial %ip register */
+ uint16_t cs; /* initial %cs relative to load segment */
+ uint16_t reloc_table_offset; /* offset of the first relocation */
+ uint16_t overlay_num; /* overlay number. set to 0. */
+ uint16_t reserved0[4]; /* reserved */
+ uint16_t oem_id; /* oem identifier */
+ uint16_t oem_info; /* oem specific */
+ uint16_t reserved1[10]; /* reserved */
+ uint32_t peaddr; /* address of pe header */
+ char message[64]; /* message to print */
+};
+
+struct mz_reloc {
+ uint16_t offset;
+ uint16_t segment;
+};
+
+struct pe_hdr {
+ uint32_t magic; /* PE magic */
+ uint16_t machine; /* machine type */
+ uint16_t sections; /* number of sections */
+ uint32_t timestamp; /* time_t */
+ uint32_t symbol_table; /* symbol table offset */
+ uint32_t symbols; /* number of symbols */
+ uint16_t opt_hdr_size; /* size of optional header */
+ uint16_t flags; /* flags */
+};
+
/* the fact that pe32 isn't padded where pe32+ is 64-bit means union won't
* work right. vomit. */
struct pe32_opt_hdr {
@@ -243,52 +294,6 @@ struct section_header {
uint32_t flags;
};
-/* they actually defined 0x00000000 as well, but I think we'll skip that one. */
-#define IMAGE_SCN_RESERVED_0 0x00000001
-#define IMAGE_SCN_RESERVED_1 0x00000002
-#define IMAGE_SCN_RESERVED_2 0x00000004
-#define IMAGE_SCN_TYPE_NO_PAD 0x00000008 /* don't pad - obsolete */
-#define IMAGE_SCN_RESERVED_3 0x00000010
-#define IMAGE_SCN_CNT_CODE 0x00000020 /* .text */
-#define IMAGE_SCN_CNT_INITIALIZED_DATA 0x00000040 /* .data */
-#define IMAGE_SCN_CNT_UNINITIALIZED_DATA 0x00000080 /* .bss */
-#define IMAGE_SCN_LNK_OTHER 0x00000100 /* reserved */
-#define IMAGE_SCN_LNK_INFO 0x00000200 /* .drectve comments */
-#define IMAGE_SCN_RESERVED_4 0x00000400
-#define IMAGE_SCN_LNK_REMOVE 0x00000800 /* .o only - scn to be rm'd*/
-#define IMAGE_SCN_LNK_COMDAT 0x00001000 /* .o only - COMDAT data */
-#define IMAGE_SCN_RESERVED_5 0x00002000 /* spec omits this */
-#define IMAGE_SCN_RESERVED_6 0x00004000 /* spec omits this */
-#define IMAGE_SCN_GPREL 0x00008000 /* global pointer referenced data */
-/* spec lists 0x20000 twice, I suspect they meant 0x10000 for one of them */
-#define IMAGE_SCN_MEM_PURGEABLE 0x00010000 /* reserved for "future" use */
-#define IMAGE_SCN_16BIT 0x00020000 /* reserved for "future" use */
-#define IMAGE_SCN_LOCKED 0x00040000 /* reserved for "future" use */
-#define IMAGE_SCN_PRELOAD 0x00080000 /* reserved for "future" use */
-/* and here they just stuck a 1-byte integer in the middle of a bitfield */
-#define IMAGE_SCN_ALIGN_1BYTES 0x00100000 /* it does what it says on the box */
-#define IMAGE_SCN_ALIGN_2BYTES 0x00200000
-#define IMAGE_SCN_ALIGN_4BYTES 0x00300000
-#define IMAGE_SCN_ALIGN_8BYTES 0x00400000
-#define IMAGE_SCN_ALIGN_16BYTES 0x00500000
-#define IMAGE_SCN_ALIGN_32BYTES 0x00600000
-#define IMAGE_SCN_ALIGN_64BYTES 0x00700000
-#define IMAGE_SCN_ALIGN_128BYTES 0x00800000
-#define IMAGE_SCN_ALIGN_256BYTES 0x00900000
-#define IMAGE_SCN_ALIGN_512BYTES 0x00a00000
-#define IMAGE_SCN_ALIGN_1024BYTES 0x00b00000
-#define IMAGE_SCN_ALIGN_2048BYTES 0x00c00000
-#define IMAGE_SCN_ALIGN_4096BYTES 0x00d00000
-#define IMAGE_SCN_ALIGN_8192BYTES 0x00e00000
-#define IMAGE_SCN_LNK_NRELOC_OVFL 0x01000000 /* extended relocations */
-#define IMAGE_SCN_MEM_DISCARDABLE 0x02000000 /* scn can be discarded */
-#define IMAGE_SCN_MEM_NOT_CACHED 0x04000000 /* cannot be cached */
-#define IMAGE_SCN_MEM_NOT_PAGED 0x08000000 /* not pageable */
-#define IMAGE_SCN_MEM_SHARED 0x10000000 /* can be shared */
-#define IMAGE_SCN_MEM_EXECUTE 0x20000000 /* can be executed as code */
-#define IMAGE_SCN_MEM_READ 0x40000000 /* readable */
-#define IMAGE_SCN_MEM_WRITE 0x80000000 /* writeable */
-
enum x64_coff_reloc_type {
IMAGE_REL_AMD64_ABSOLUTE = 0,
IMAGE_REL_AMD64_ADDR64,
@@ -445,4 +450,6 @@ struct win_certificate {
uint16_t cert_type;
};
+#endif /* !__ASSEMBLY__ */
+
#endif /* __LINUX_PE_H */
diff --git a/include/linux/percpu-refcount.h b/include/linux/percpu-refcount.h
index 3a481a49546e..c13dceb87b60 100644
--- a/include/linux/percpu-refcount.h
+++ b/include/linux/percpu-refcount.h
@@ -99,6 +99,7 @@ int __must_check percpu_ref_init(struct percpu_ref *ref,
void percpu_ref_exit(struct percpu_ref *ref);
void percpu_ref_switch_to_atomic(struct percpu_ref *ref,
percpu_ref_func_t *confirm_switch);
+void percpu_ref_switch_to_atomic_sync(struct percpu_ref *ref);
void percpu_ref_switch_to_percpu(struct percpu_ref *ref);
void percpu_ref_kill_and_confirm(struct percpu_ref *ref,
percpu_ref_func_t *confirm_kill);
diff --git a/include/linux/percpu.h b/include/linux/percpu.h
index 56939d3f6e53..491b3f5a5f8a 100644
--- a/include/linux/percpu.h
+++ b/include/linux/percpu.h
@@ -110,6 +110,7 @@ extern int __init pcpu_page_first_chunk(size_t reserved_size,
#endif
extern void __percpu *__alloc_reserved_percpu(size_t size, size_t align);
+extern bool __is_kernel_percpu_address(unsigned long addr, unsigned long *can_addr);
extern bool is_kernel_percpu_address(unsigned long addr);
#if !defined(CONFIG_SMP) || !defined(CONFIG_HAVE_SETUP_PER_CPU_AREA)
diff --git a/include/linux/perf/arm_pmu.h b/include/linux/perf/arm_pmu.h
index 8462da266089..1360dd6d5e61 100644
--- a/include/linux/perf/arm_pmu.h
+++ b/include/linux/perf/arm_pmu.h
@@ -75,6 +75,8 @@ struct pmu_hw_events {
* already have to allocate this struct per cpu.
*/
struct arm_pmu *percpu_pmu;
+
+ int irq;
};
enum armpmu_attr_groups {
@@ -88,7 +90,6 @@ struct arm_pmu {
struct pmu pmu;
cpumask_t active_irqs;
cpumask_t supported_cpus;
- int *irq_affinity;
char *name;
irqreturn_t (*handle_irq)(int irq_num, void *dev);
void (*enable)(struct perf_event *event);
@@ -104,12 +105,8 @@ struct arm_pmu {
void (*start)(struct arm_pmu *);
void (*stop)(struct arm_pmu *);
void (*reset)(void *);
- int (*request_irq)(struct arm_pmu *, irq_handler_t handler);
- void (*free_irq)(struct arm_pmu *);
int (*map_event)(struct perf_event *event);
int num_events;
- atomic_t active_events;
- struct mutex reserve_mutex;
u64 max_period;
bool secure_access; /* 32-bit ARM only */
#define ARMV8_PMUV3_MAX_COMMON_EVENTS 0x40
@@ -120,6 +117,9 @@ struct arm_pmu {
struct notifier_block cpu_pm_nb;
/* the attr_groups array must be NULL-terminated */
const struct attribute_group *attr_groups[ARMPMU_NR_ATTR_GROUPS + 1];
+
+ /* Only to be used by ACPI probing code */
+ unsigned long acpi_cpuid;
};
#define to_arm_pmu(p) (container_of(p, struct arm_pmu, pmu))
@@ -135,10 +135,12 @@ int armpmu_map_event(struct perf_event *event,
[PERF_COUNT_HW_CACHE_RESULT_MAX],
u32 raw_event_mask);
+typedef int (*armpmu_init_fn)(struct arm_pmu *);
+
struct pmu_probe_info {
unsigned int cpuid;
unsigned int mask;
- int (*init)(struct arm_pmu *);
+ armpmu_init_fn init;
};
#define PMU_PROBE(_cpuid, _mask, _fn) \
@@ -160,6 +162,21 @@ int arm_pmu_device_probe(struct platform_device *pdev,
const struct of_device_id *of_table,
const struct pmu_probe_info *probe_table);
+#ifdef CONFIG_ACPI
+int arm_pmu_acpi_probe(armpmu_init_fn init_fn);
+#else
+static inline int arm_pmu_acpi_probe(armpmu_init_fn init_fn) { return 0; }
+#endif
+
+/* Internal functions only for core arm_pmu code */
+struct arm_pmu *armpmu_alloc(void);
+void armpmu_free(struct arm_pmu *pmu);
+int armpmu_register(struct arm_pmu *pmu);
+int armpmu_request_irqs(struct arm_pmu *armpmu);
+void armpmu_free_irqs(struct arm_pmu *armpmu);
+int armpmu_request_irq(struct arm_pmu *armpmu, int cpu);
+void armpmu_free_irq(struct arm_pmu *armpmu, int cpu);
+
#define ARMV8_PMU_PDEV_NAME "armv8-pmu"
#endif /* CONFIG_ARM_PMU */
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 000fdb211c7d..24a635887f28 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -165,6 +165,13 @@ struct hw_perf_event {
struct list_head bp_list;
};
#endif
+ struct { /* amd_iommu */
+ u8 iommu_bank;
+ u8 iommu_cntr;
+ u16 padding;
+ u64 conf;
+ u64 conf1;
+ };
};
/*
* If the event is a per task event, this will point to the task in
@@ -801,6 +808,7 @@ struct perf_output_handle {
struct ring_buffer *rb;
unsigned long wakeup;
unsigned long size;
+ u64 aux_flags;
union {
void *addr;
unsigned long head;
@@ -849,10 +857,11 @@ perf_cgroup_from_task(struct task_struct *task, struct perf_event_context *ctx)
extern void *perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event);
extern void perf_aux_output_end(struct perf_output_handle *handle,
- unsigned long size, bool truncated);
+ unsigned long size);
extern int perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size);
extern void *perf_get_aux(struct perf_output_handle *handle);
+extern void perf_aux_output_flag(struct perf_output_handle *handle, u64 flags);
extern int perf_pmu_register(struct pmu *pmu, const char *name, int type);
extern void perf_pmu_unregister(struct pmu *pmu);
@@ -1112,6 +1121,7 @@ extern int perf_unregister_guest_info_callbacks(struct perf_guest_info_callbacks
extern void perf_event_exec(void);
extern void perf_event_comm(struct task_struct *tsk, bool exec);
+extern void perf_event_namespaces(struct task_struct *tsk);
extern void perf_event_fork(struct task_struct *tsk);
/* Callchains */
@@ -1267,8 +1277,8 @@ static inline void *
perf_aux_output_begin(struct perf_output_handle *handle,
struct perf_event *event) { return NULL; }
static inline void
-perf_aux_output_end(struct perf_output_handle *handle, unsigned long size,
- bool truncated) { }
+perf_aux_output_end(struct perf_output_handle *handle, unsigned long size)
+ { }
static inline int
perf_aux_output_skip(struct perf_output_handle *handle,
unsigned long size) { return -EINVAL; }
@@ -1315,6 +1325,7 @@ static inline int perf_unregister_guest_info_callbacks
static inline void perf_event_mmap(struct vm_area_struct *vma) { }
static inline void perf_event_exec(void) { }
static inline void perf_event_comm(struct task_struct *tsk, bool exec) { }
+static inline void perf_event_namespaces(struct task_struct *tsk) { }
static inline void perf_event_fork(struct task_struct *tsk) { }
static inline void perf_event_init(void) { }
static inline int perf_swevent_get_recursion_context(void) { return -1; }
diff --git a/include/linux/phy.h b/include/linux/phy.h
index 43a774873aa9..e76e4adbc7c7 100644
--- a/include/linux/phy.h
+++ b/include/linux/phy.h
@@ -217,6 +217,13 @@ struct mii_bus {
* matching its address
*/
int irq[PHY_MAX_ADDR];
+
+ /* GPIO reset pulse width in microseconds */
+ int reset_delay_us;
+ /* Number of reset GPIOs */
+ int num_reset_gpios;
+ /* Array of RESET GPIO descriptors */
+ struct gpio_desc **reset_gpiod;
};
#define to_mii_bus(d) container_of(d, struct mii_bus, dev)
@@ -587,23 +594,29 @@ struct phy_driver {
*/
void (*link_change_notify)(struct phy_device *dev);
- /* A function provided by a phy specific driver to override the
- * the PHY driver framework support for reading a MMD register
- * from the PHY. If not supported, return -1. This function is
- * optional for PHY specific drivers, if not provided then the
- * default MMD read function is used by the PHY framework.
+ /*
+ * Phy specific driver override for reading a MMD register.
+ * This function is optional for PHY specific drivers. When
+ * not provided, the default MMD read function will be used
+ * by phy_read_mmd(), which will use either a direct read for
+ * Clause 45 PHYs or an indirect read for Clause 22 PHYs.
+ * devnum is the MMD device number within the PHY device,
+ * regnum is the register within the selected MMD device.
*/
- int (*read_mmd_indirect)(struct phy_device *dev, int ptrad,
- int devnum, int regnum);
-
- /* A function provided by a phy specific driver to override the
- * the PHY driver framework support for writing a MMD register
- * from the PHY. This function is optional for PHY specific drivers,
- * if not provided then the default MMD read function is used by
- * the PHY framework.
+ int (*read_mmd)(struct phy_device *dev, int devnum, u16 regnum);
+
+ /*
+ * Phy specific driver override for writing a MMD register.
+ * This function is optional for PHY specific drivers. When
+ * not provided, the default MMD write function will be used
+ * by phy_write_mmd(), which will use either a direct write for
+ * Clause 45 PHYs, or an indirect write for Clause 22 PHYs.
+ * devnum is the MMD device number within the PHY device,
+ * regnum is the register within the selected MMD device.
+ * val is the value to be written.
*/
- void (*write_mmd_indirect)(struct phy_device *dev, int ptrad,
- int devnum, int regnum, u32 val);
+ int (*write_mmd)(struct phy_device *dev, int devnum, u16 regnum,
+ u16 val);
/* Get the size and type of the eeprom contained within a plug-in
* module */
@@ -651,25 +664,7 @@ struct phy_fixup {
*
* Same rules as for phy_read();
*/
-static inline int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum)
-{
- if (!phydev->is_c45)
- return -EOPNOTSUPP;
-
- return mdiobus_read(phydev->mdio.bus, phydev->mdio.addr,
- MII_ADDR_C45 | (devad << 16) | (regnum & 0xffff));
-}
-
-/**
- * phy_read_mmd_indirect - reads data from the MMD registers
- * @phydev: The PHY device bus
- * @prtad: MMD Address
- * @addr: PHY address on the MII bus
- *
- * Description: it reads data from the MMD registers (clause 22 to access to
- * clause 45) of the specified phy address.
- */
-int phy_read_mmd_indirect(struct phy_device *phydev, int prtad, int devad);
+int phy_read_mmd(struct phy_device *phydev, int devad, u32 regnum);
/**
* phy_read - Convenience function for reading a given PHY register
@@ -752,35 +747,29 @@ static inline bool phy_is_pseudo_fixed_link(struct phy_device *phydev)
*
* Same rules as for phy_write();
*/
-static inline int phy_write_mmd(struct phy_device *phydev, int devad,
- u32 regnum, u16 val)
-{
- if (!phydev->is_c45)
- return -EOPNOTSUPP;
-
- regnum = MII_ADDR_C45 | ((devad & 0x1f) << 16) | (regnum & 0xffff);
-
- return mdiobus_write(phydev->mdio.bus, phydev->mdio.addr, regnum, val);
-}
-
-/**
- * phy_write_mmd_indirect - writes data to the MMD registers
- * @phydev: The PHY device
- * @prtad: MMD Address
- * @devad: MMD DEVAD
- * @data: data to write in the MMD register
- *
- * Description: Write data from the MMD registers of the specified
- * phy address.
- */
-void phy_write_mmd_indirect(struct phy_device *phydev, int prtad,
- int devad, u32 data);
+int phy_write_mmd(struct phy_device *phydev, int devad, u32 regnum, u16 val);
struct phy_device *phy_device_create(struct mii_bus *bus, int addr, int phy_id,
bool is_c45,
struct phy_c45_device_ids *c45_ids);
+#if IS_ENABLED(CONFIG_PHYLIB)
struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45);
int phy_device_register(struct phy_device *phy);
+void phy_device_free(struct phy_device *phydev);
+#else
+static inline
+struct phy_device *get_phy_device(struct mii_bus *bus, int addr, bool is_c45)
+{
+ return NULL;
+}
+
+static inline int phy_device_register(struct phy_device *phy)
+{
+ return 0;
+}
+
+static inline void phy_device_free(struct phy_device *phydev) { }
+#endif /* CONFIG_PHYLIB */
void phy_device_remove(struct phy_device *phydev);
int phy_init_hw(struct phy_device *phydev);
int phy_suspend(struct phy_device *phydev);
@@ -852,6 +841,7 @@ void phy_change_work(struct work_struct *work);
void phy_mac_interrupt(struct phy_device *phydev, int new_link);
void phy_start_machine(struct phy_device *phydev);
void phy_stop_machine(struct phy_device *phydev);
+void phy_trigger_machine(struct phy_device *phydev, bool sync);
int phy_ethtool_sset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_gset(struct phy_device *phydev, struct ethtool_cmd *cmd);
int phy_ethtool_ksettings_get(struct phy_device *phydev,
@@ -861,7 +851,6 @@ int phy_ethtool_ksettings_set(struct phy_device *phydev,
int phy_mii_ioctl(struct phy_device *phydev, struct ifreq *ifr, int cmd);
int phy_start_interrupts(struct phy_device *phydev);
void phy_print_status(struct phy_device *phydev);
-void phy_device_free(struct phy_device *phydev);
int phy_set_max_speed(struct phy_device *phydev, u32 max_speed);
int phy_register_fixup(const char *bus_id, u32 phy_uid, u32 phy_uid_mask,
@@ -888,8 +877,10 @@ int phy_ethtool_set_link_ksettings(struct net_device *ndev,
const struct ethtool_link_ksettings *cmd);
int phy_ethtool_nway_reset(struct net_device *ndev);
+#if IS_ENABLED(CONFIG_PHYLIB)
int __init mdio_bus_init(void);
void mdio_bus_exit(void);
+#endif
extern struct bus_type mdio_bus_type;
@@ -900,7 +891,7 @@ struct mdio_board_info {
const void *platform_data;
};
-#if IS_ENABLED(CONFIG_PHYLIB)
+#if IS_ENABLED(CONFIG_MDIO_DEVICE)
int mdiobus_register_board_info(const struct mdio_board_info *info,
unsigned int n);
#else
diff --git a/include/linux/pinctrl/pinconf-generic.h b/include/linux/pinctrl/pinconf-generic.h
index 7620eb127cff..279e3c5326e3 100644
--- a/include/linux/pinctrl/pinconf-generic.h
+++ b/include/linux/pinctrl/pinconf-generic.h
@@ -42,6 +42,8 @@
* @PIN_CONFIG_BIAS_PULL_UP: the pin will be pulled up (usually with high
* impedance to VDD). If the argument is != 0 pull-up is enabled,
* if it is 0, pull-up is total, i.e. the pin is connected to VDD.
+ * @PIN_CONFIG_BIDIRECTIONAL: the pin will be configured to allow simultaneous
+ * input and output operations.
* @PIN_CONFIG_DRIVE_OPEN_DRAIN: the pin will be driven with open drain (open
* collector) which means it is usually wired with other output ports
* which are then pulled up with an external resistor. Setting this
@@ -96,6 +98,7 @@ enum pin_config_param {
PIN_CONFIG_BIAS_PULL_DOWN,
PIN_CONFIG_BIAS_PULL_PIN_DEFAULT,
PIN_CONFIG_BIAS_PULL_UP,
+ PIN_CONFIG_BIDIRECTIONAL,
PIN_CONFIG_DRIVE_OPEN_DRAIN,
PIN_CONFIG_DRIVE_OPEN_SOURCE,
PIN_CONFIG_DRIVE_PUSH_PULL,
diff --git a/include/linux/platform_data/iommu-omap.h b/include/linux/platform_data/iommu-omap.h
index 0496d171700a..e8b12dbf6170 100644
--- a/include/linux/platform_data/iommu-omap.h
+++ b/include/linux/platform_data/iommu-omap.h
@@ -12,28 +12,8 @@
#include <linux/platform_device.h>
-#define MMU_REG_SIZE 256
-
-/**
- * struct iommu_arch_data - omap iommu private data
- * @name: name of the iommu device
- * @iommu_dev: handle of the iommu device
- *
- * This is an omap iommu private data object, which binds an iommu user
- * to its iommu device. This object should be placed at the iommu user's
- * dev_archdata so generic IOMMU API can be used without having to
- * utilize omap-specific plumbing anymore.
- */
-struct omap_iommu_arch_data {
- const char *name;
- struct omap_iommu *iommu_dev;
-};
-
struct iommu_platform_data {
- const char *name;
const char *reset_name;
- int nr_tlb_entries;
-
int (*assert_reset)(struct platform_device *pdev, const char *name);
int (*deassert_reset)(struct platform_device *pdev, const char *name);
};
diff --git a/include/linux/platform_data/isl9305.h b/include/linux/platform_data/isl9305.h
index 1419133fa69e..4ac1a070af0a 100644
--- a/include/linux/platform_data/isl9305.h
+++ b/include/linux/platform_data/isl9305.h
@@ -24,7 +24,7 @@
struct regulator_init_data;
struct isl9305_pdata {
- struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR];
+ struct regulator_init_data *init_data[ISL9305_MAX_REGULATOR + 1];
};
#endif
diff --git a/include/linux/platform_data/itco_wdt.h b/include/linux/platform_data/itco_wdt.h
index f16542c77ff7..0e95527edf25 100644
--- a/include/linux/platform_data/itco_wdt.h
+++ b/include/linux/platform_data/itco_wdt.h
@@ -14,6 +14,10 @@
struct itco_wdt_platform_data {
char name[32];
unsigned int version;
+ /* private data to be passed to update_no_reboot_bit API */
+ void *no_reboot_priv;
+ /* pointer for platform specific no reboot update function */
+ int (*update_no_reboot_bit)(void *priv, bool set);
};
#endif /* _ITCO_WDT_H_ */
diff --git a/include/linux/platform_data/pn544.h b/include/linux/platform_data/pn544.h
deleted file mode 100644
index 5ce1ab983f44..000000000000
--- a/include/linux/platform_data/pn544.h
+++ /dev/null
@@ -1,43 +0,0 @@
-/*
- * Driver include for the PN544 NFC chip.
- *
- * Copyright (C) Nokia Corporation
- *
- * Author: Jari Vanhala <ext-jari.vanhala@nokia.com>
- * Contact: Matti Aaltoenn <matti.j.aaltonen@nokia.com>
- *
- * This program is free software; you can redistribute it and/or
- * modify it under the terms of the GNU General Public License
- * version 2 as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _PN544_H_
-#define _PN544_H_
-
-#include <linux/i2c.h>
-
-enum {
- NFC_GPIO_ENABLE,
- NFC_GPIO_FW_RESET,
- NFC_GPIO_IRQ
-};
-
-/* board config */
-struct pn544_nfc_platform_data {
- int (*request_resources) (struct i2c_client *client);
- void (*free_resources) (void);
- void (*enable) (int fw);
- int (*test) (void);
- void (*disable) (void);
- int (*get_gpio)(int type);
-};
-
-#endif /* _PN544_H_ */
diff --git a/include/linux/platform_data/st21nfca.h b/include/linux/platform_data/st21nfca.h
deleted file mode 100644
index cc2bdafb0c69..000000000000
--- a/include/linux/platform_data/st21nfca.h
+++ /dev/null
@@ -1,33 +0,0 @@
-/*
- * Driver include for the ST21NFCA NFC chip.
- *
- * Copyright (C) 2014 STMicroelectronics SAS. All rights reserved.
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms and conditions of the GNU General Public License,
- * version 2, as published by the Free Software Foundation.
- *
- * This program is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
- * GNU General Public License for more details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this program; if not, see <http://www.gnu.org/licenses/>.
- */
-
-#ifndef _ST21NFCA_HCI_H_
-#define _ST21NFCA_HCI_H_
-
-#include <linux/i2c.h>
-
-#define ST21NFCA_HCI_DRIVER_NAME "st21nfca_hci"
-
-struct st21nfca_nfc_platform_data {
- unsigned int gpio_ena;
- unsigned int irq_polarity;
- bool is_ese_present;
- bool is_uicc_present;
-};
-
-#endif /* _ST21NFCA_HCI_H_ */
diff --git a/include/linux/pm_domain.h b/include/linux/pm_domain.h
index 5339ed5bd6f9..b7803a251044 100644
--- a/include/linux/pm_domain.h
+++ b/include/linux/pm_domain.h
@@ -20,6 +20,7 @@
/* Defines used for the flags field in the struct generic_pm_domain */
#define GENPD_FLAG_PM_CLK (1U << 0) /* PM domain uses PM clk */
#define GENPD_FLAG_IRQ_SAFE (1U << 1) /* PM domain operates in atomic */
+#define GENPD_FLAG_ALWAYS_ON (1U << 2) /* PM domain is always powered on */
enum gpd_status {
GPD_STATE_ACTIVE = 0, /* PM domain is active */
@@ -117,6 +118,7 @@ struct generic_pm_domain_data {
struct pm_domain_data base;
struct gpd_timing_data td;
struct notifier_block nb;
+ void *data;
};
#ifdef CONFIG_PM_GENERIC_DOMAINS
diff --git a/include/linux/pm_wakeup.h b/include/linux/pm_wakeup.h
index a3447932df1f..4c2cba7ec1d4 100644
--- a/include/linux/pm_wakeup.h
+++ b/include/linux/pm_wakeup.h
@@ -106,8 +106,8 @@ extern void __pm_stay_awake(struct wakeup_source *ws);
extern void pm_stay_awake(struct device *dev);
extern void __pm_relax(struct wakeup_source *ws);
extern void pm_relax(struct device *dev);
-extern void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec);
-extern void pm_wakeup_event(struct device *dev, unsigned int msec);
+extern void pm_wakeup_ws_event(struct wakeup_source *ws, unsigned int msec, bool hard);
+extern void pm_wakeup_dev_event(struct device *dev, unsigned int msec, bool hard);
#else /* !CONFIG_PM_SLEEP */
@@ -182,9 +182,11 @@ static inline void __pm_relax(struct wakeup_source *ws) {}
static inline void pm_relax(struct device *dev) {}
-static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec) {}
+static inline void pm_wakeup_ws_event(struct wakeup_source *ws,
+ unsigned int msec, bool hard) {}
-static inline void pm_wakeup_event(struct device *dev, unsigned int msec) {}
+static inline void pm_wakeup_dev_event(struct device *dev, unsigned int msec,
+ bool hard) {}
#endif /* !CONFIG_PM_SLEEP */
@@ -201,4 +203,19 @@ static inline void wakeup_source_trash(struct wakeup_source *ws)
wakeup_source_drop(ws);
}
+static inline void __pm_wakeup_event(struct wakeup_source *ws, unsigned int msec)
+{
+ return pm_wakeup_ws_event(ws, msec, false);
+}
+
+static inline void pm_wakeup_event(struct device *dev, unsigned int msec)
+{
+ return pm_wakeup_dev_event(dev, msec, false);
+}
+
+static inline void pm_wakeup_hard_event(struct device *dev)
+{
+ return pm_wakeup_dev_event(dev, 0, true);
+}
+
#endif /* _LINUX_PM_WAKEUP_H */
diff --git a/include/linux/pmem.h b/include/linux/pmem.h
index e856c2cb0fe8..71ecf3d46aac 100644
--- a/include/linux/pmem.h
+++ b/include/linux/pmem.h
@@ -31,12 +31,6 @@ static inline void arch_memcpy_to_pmem(void *dst, const void *src, size_t n)
BUG();
}
-static inline int arch_memcpy_from_pmem(void *dst, const void *src, size_t n)
-{
- BUG();
- return -EFAULT;
-}
-
static inline size_t arch_copy_from_iter_pmem(void *addr, size_t bytes,
struct iov_iter *i)
{
@@ -65,23 +59,6 @@ static inline bool arch_has_pmem_api(void)
return IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API);
}
-/*
- * memcpy_from_pmem - read from persistent memory with error handling
- * @dst: destination buffer
- * @src: source buffer
- * @size: transfer length
- *
- * Returns 0 on success negative error code on failure.
- */
-static inline int memcpy_from_pmem(void *dst, void const *src, size_t size)
-{
- if (arch_has_pmem_api())
- return arch_memcpy_from_pmem(dst, src, size);
- else
- memcpy(dst, src, size);
- return 0;
-}
-
/**
* memcpy_to_pmem - copy data to persistent memory
* @dst: destination buffer for the copy
diff --git a/include/linux/poll.h b/include/linux/poll.h
index a46d6755035e..75ffc5729e4c 100644
--- a/include/linux/poll.h
+++ b/include/linux/poll.h
@@ -98,64 +98,8 @@ extern int poll_schedule_timeout(struct poll_wqueues *pwq, int state,
ktime_t *expires, unsigned long slack);
extern u64 select_estimate_accuracy(struct timespec64 *tv);
-
-static inline int poll_schedule(struct poll_wqueues *pwq, int state)
-{
- return poll_schedule_timeout(pwq, state, NULL, 0);
-}
-
-/*
- * Scalable version of the fd_set.
- */
-
-typedef struct {
- unsigned long *in, *out, *ex;
- unsigned long *res_in, *res_out, *res_ex;
-} fd_set_bits;
-
-/*
- * How many longwords for "nr" bits?
- */
-#define FDS_BITPERLONG (8*sizeof(long))
-#define FDS_LONGS(nr) (((nr)+FDS_BITPERLONG-1)/FDS_BITPERLONG)
-#define FDS_BYTES(nr) (FDS_LONGS(nr)*sizeof(long))
-
-/*
- * We do a VERIFY_WRITE here even though we are only reading this time:
- * we'll write to it eventually..
- *
- * Use "unsigned long" accesses to let user-mode fd_set's be long-aligned.
- */
-static inline
-int get_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- nr = FDS_BYTES(nr);
- if (ufdset)
- return copy_from_user(fdset, ufdset, nr) ? -EFAULT : 0;
-
- memset(fdset, 0, nr);
- return 0;
-}
-
-static inline unsigned long __must_check
-set_fd_set(unsigned long nr, void __user *ufdset, unsigned long *fdset)
-{
- if (ufdset)
- return __copy_to_user(ufdset, fdset, FDS_BYTES(nr));
- return 0;
-}
-
-static inline
-void zero_fd_set(unsigned long nr, unsigned long *fdset)
-{
- memset(fdset, 0, FDS_BYTES(nr));
-}
-
#define MAX_INT64_SECONDS (((s64)(~((u64)0)>>1)/HZ)-1)
-extern int do_select(int n, fd_set_bits *fds, struct timespec64 *end_time);
-extern int do_sys_poll(struct pollfd __user * ufds, unsigned int nfds,
- struct timespec64 *end_time);
extern int core_sys_select(int n, fd_set __user *inp, fd_set __user *outp,
fd_set __user *exp, struct timespec64 *end_time);
diff --git a/include/linux/posix-clock.h b/include/linux/posix-clock.h
index 34c4498b800f..83b22ae9ae12 100644
--- a/include/linux/posix-clock.h
+++ b/include/linux/posix-clock.h
@@ -59,23 +59,23 @@ struct posix_clock_operations {
int (*clock_adjtime)(struct posix_clock *pc, struct timex *tx);
- int (*clock_gettime)(struct posix_clock *pc, struct timespec *ts);
+ int (*clock_gettime)(struct posix_clock *pc, struct timespec64 *ts);
- int (*clock_getres) (struct posix_clock *pc, struct timespec *ts);
+ int (*clock_getres) (struct posix_clock *pc, struct timespec64 *ts);
int (*clock_settime)(struct posix_clock *pc,
- const struct timespec *ts);
+ const struct timespec64 *ts);
int (*timer_create) (struct posix_clock *pc, struct k_itimer *kit);
int (*timer_delete) (struct posix_clock *pc, struct k_itimer *kit);
void (*timer_gettime)(struct posix_clock *pc,
- struct k_itimer *kit, struct itimerspec *tsp);
+ struct k_itimer *kit, struct itimerspec64 *tsp);
int (*timer_settime)(struct posix_clock *pc,
struct k_itimer *kit, int flags,
- struct itimerspec *tsp, struct itimerspec *old);
+ struct itimerspec64 *tsp, struct itimerspec64 *old);
/*
* Optional character device methods:
*/
diff --git a/include/linux/posix-timers.h b/include/linux/posix-timers.h
index 64aa189efe21..8c1e43ab14a9 100644
--- a/include/linux/posix-timers.h
+++ b/include/linux/posix-timers.h
@@ -87,22 +87,22 @@ struct k_itimer {
};
struct k_clock {
- int (*clock_getres) (const clockid_t which_clock, struct timespec *tp);
+ int (*clock_getres) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_set) (const clockid_t which_clock,
- const struct timespec *tp);
- int (*clock_get) (const clockid_t which_clock, struct timespec * tp);
+ const struct timespec64 *tp);
+ int (*clock_get) (const clockid_t which_clock, struct timespec64 *tp);
int (*clock_adj) (const clockid_t which_clock, struct timex *tx);
int (*timer_create) (struct k_itimer *timer);
int (*nsleep) (const clockid_t which_clock, int flags,
- struct timespec *, struct timespec __user *);
+ struct timespec64 *, struct timespec __user *);
long (*nsleep_restart) (struct restart_block *restart_block);
- int (*timer_set) (struct k_itimer * timr, int flags,
- struct itimerspec * new_setting,
- struct itimerspec * old_setting);
- int (*timer_del) (struct k_itimer * timr);
+ int (*timer_set) (struct k_itimer *timr, int flags,
+ struct itimerspec64 *new_setting,
+ struct itimerspec64 *old_setting);
+ int (*timer_del) (struct k_itimer *timr);
#define TIMER_RETRY 1
- void (*timer_get) (struct k_itimer * timr,
- struct itimerspec * cur_setting);
+ void (*timer_get) (struct k_itimer *timr,
+ struct itimerspec64 *cur_setting);
};
extern struct k_clock clock_posix_cpu;
diff --git a/include/linux/power/bq24190_charger.h b/include/linux/power/bq24190_charger.h
deleted file mode 100644
index 9f0283721cbc..000000000000
--- a/include/linux/power/bq24190_charger.h
+++ /dev/null
@@ -1,16 +0,0 @@
-/*
- * Platform data for the TI bq24190 battery charger driver.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- */
-
-#ifndef _BQ24190_CHARGER_H_
-#define _BQ24190_CHARGER_H_
-
-struct bq24190_platform_data {
- unsigned int gpio_int; /* GPIO pin that's connected to INT# */
-};
-
-#endif
diff --git a/include/linux/printk.h b/include/linux/printk.h
index 571257e0f53d..e10f27468322 100644
--- a/include/linux/printk.h
+++ b/include/linux/printk.h
@@ -198,7 +198,7 @@ extern void wake_up_klogd(void);
char *log_buf_addr_get(void);
u32 log_buf_len_get(void);
-void log_buf_kexec_setup(void);
+void log_buf_vmcoreinfo_setup(void);
void __init setup_log_buf(int early);
__printf(1, 2) void dump_stack_set_arch_desc(const char *fmt, ...);
void dump_stack_print_info(const char *log_lvl);
@@ -246,7 +246,7 @@ static inline u32 log_buf_len_get(void)
return 0;
}
-static inline void log_buf_kexec_setup(void)
+static inline void log_buf_vmcoreinfo_setup(void)
{
}
diff --git a/include/linux/proc_ns.h b/include/linux/proc_ns.h
index 12cb8bd81d2d..58ab28d81fc2 100644
--- a/include/linux/proc_ns.h
+++ b/include/linux/proc_ns.h
@@ -14,6 +14,7 @@ struct inode;
struct proc_ns_operations {
const char *name;
+ const char *real_ns_name;
int type;
struct ns_common *(*get)(struct task_struct *task);
void (*put)(struct ns_common *ns);
@@ -26,6 +27,7 @@ extern const struct proc_ns_operations netns_operations;
extern const struct proc_ns_operations utsns_operations;
extern const struct proc_ns_operations ipcns_operations;
extern const struct proc_ns_operations pidns_operations;
+extern const struct proc_ns_operations pidns_for_children_operations;
extern const struct proc_ns_operations userns_operations;
extern const struct proc_ns_operations mntns_operations;
extern const struct proc_ns_operations cgroupns_operations;
diff --git a/include/linux/property.h b/include/linux/property.h
index 64e3a9c6d95f..2f482616a2f2 100644
--- a/include/linux/property.h
+++ b/include/linux/property.h
@@ -33,6 +33,8 @@ enum dev_dma_attr {
DEV_DMA_COHERENT,
};
+struct fwnode_handle *dev_fwnode(struct device *dev);
+
bool device_property_present(struct device *dev, const char *propname);
int device_property_read_u8_array(struct device *dev, const char *propname,
u8 *val, size_t nval);
@@ -70,6 +72,15 @@ int fwnode_property_read_string(struct fwnode_handle *fwnode,
int fwnode_property_match_string(struct fwnode_handle *fwnode,
const char *propname, const char *string);
+struct fwnode_handle *fwnode_get_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_parent(struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_get_next_child_node(struct fwnode_handle *fwnode,
+ struct fwnode_handle *child);
+
+#define fwnode_for_each_child_node(fwnode, child) \
+ for (child = fwnode_get_next_child_node(fwnode, NULL); child; \
+ child = fwnode_get_next_child_node(fwnode, child))
+
struct fwnode_handle *device_get_next_child_node(struct device *dev,
struct fwnode_handle *child);
@@ -77,9 +88,12 @@ struct fwnode_handle *device_get_next_child_node(struct device *dev,
for (child = device_get_next_child_node(dev, NULL); child; \
child = device_get_next_child_node(dev, child))
+struct fwnode_handle *fwnode_get_named_child_node(struct fwnode_handle *fwnode,
+ const char *childname);
struct fwnode_handle *device_get_named_child_node(struct device *dev,
const char *childname);
+void fwnode_handle_get(struct fwnode_handle *fwnode);
void fwnode_handle_put(struct fwnode_handle *fwnode);
unsigned int device_get_child_node_count(struct device *dev);
@@ -258,4 +272,16 @@ int device_get_phy_mode(struct device *dev);
void *device_get_mac_address(struct device *dev, char *addr, int alen);
+struct fwnode_handle *fwnode_graph_get_next_endpoint(
+ struct fwnode_handle *fwnode, struct fwnode_handle *prev);
+struct fwnode_handle *fwnode_graph_get_remote_port_parent(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_port(
+ struct fwnode_handle *fwnode);
+struct fwnode_handle *fwnode_graph_get_remote_endpoint(
+ struct fwnode_handle *fwnode);
+
+int fwnode_graph_parse_endpoint(struct fwnode_handle *fwnode,
+ struct fwnode_endpoint *endpoint);
+
#endif /* _LINUX_PROPERTY_H_ */
diff --git a/include/linux/pstore.h b/include/linux/pstore.h
index 0da29cae009b..e2233f50f428 100644
--- a/include/linux/pstore.h
+++ b/include/linux/pstore.h
@@ -30,7 +30,9 @@
#include <linux/time.h>
#include <linux/types.h>
-/* types */
+struct module;
+
+/* pstore record types (see fs/pstore/inode.c for filename templates) */
enum pstore_type_id {
PSTORE_TYPE_DMESG = 0,
PSTORE_TYPE_MCE = 1,
@@ -45,42 +47,146 @@ enum pstore_type_id {
PSTORE_TYPE_UNKNOWN = 255
};
-struct module;
+struct pstore_info;
+/**
+ * struct pstore_record - details of a pstore record entry
+ * @psi: pstore backend driver information
+ * @type: pstore record type
+ * @id: per-type unique identifier for record
+ * @time: timestamp of the record
+ * @buf: pointer to record contents
+ * @size: size of @buf
+ * @ecc_notice_size:
+ * ECC information for @buf
+ *
+ * Valid for PSTORE_TYPE_DMESG @type:
+ *
+ * @count: Oops count since boot
+ * @reason: kdump reason for notification
+ * @part: position in a multipart record
+ * @compressed: whether the buffer is compressed
+ *
+ */
+struct pstore_record {
+ struct pstore_info *psi;
+ enum pstore_type_id type;
+ u64 id;
+ struct timespec time;
+ char *buf;
+ ssize_t size;
+ ssize_t ecc_notice_size;
+ int count;
+ enum kmsg_dump_reason reason;
+ unsigned int part;
+ bool compressed;
+};
+
+/**
+ * struct pstore_info - backend pstore driver structure
+ *
+ * @owner: module which is repsonsible for this backend driver
+ * @name: name of the backend driver
+ *
+ * @buf_lock: spinlock to serialize access to @buf
+ * @buf: preallocated crash dump buffer
+ * @bufsize: size of @buf available for crash dump writes
+ *
+ * @read_mutex: serializes @open, @read, @close, and @erase callbacks
+ * @flags: bitfield of frontends the backend can accept writes for
+ * @data: backend-private pointer passed back during callbacks
+ *
+ * Callbacks:
+ *
+ * @open:
+ * Notify backend that pstore is starting a full read of backend
+ * records. Followed by one or more @read calls, and a final @close.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @close:
+ * Notify backend that pstore has finished a full read of backend
+ * records. Always preceded by an @open call and one or more @read
+ * calls.
+ *
+ * @psi: in: pointer to the struct pstore_info for the backend
+ *
+ * Returns 0 on success, and non-zero on error. (Though pstore will
+ * ignore the error.)
+ *
+ * @read:
+ * Read next available backend record. Called after a successful
+ * @open.
+ *
+ * @record:
+ * pointer to record to populate. @buf should be allocated
+ * by the backend and filled. At least @type and @id should
+ * be populated, since these are used when creating pstorefs
+ * file names.
+ *
+ * Returns record size on success, zero when no more records are
+ * available, or negative on error.
+ *
+ * @write:
+ * A newly generated record needs to be written to backend storage.
+ *
+ * @record:
+ * pointer to record metadata. When @type is PSTORE_TYPE_DMESG,
+ * @buf will be pointing to the preallocated @psi.buf, since
+ * memory allocation may be broken during an Oops. Regardless,
+ * @buf must be proccesed or copied before returning. The
+ * backend is also expected to write @id with something that
+ 8 can help identify this record to a future @erase callback.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @write_user:
+ * Perform a frontend write to a backend record, using a specified
+ * buffer that is coming directly from userspace, instead of the
+ * @record @buf.
+ *
+ * @record: pointer to record metadata.
+ * @buf: pointer to userspace contents to write to backend
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ * @erase:
+ * Delete a record from backend storage. Different backends
+ * identify records differently, so entire original record is
+ * passed back to assist in identification of what the backend
+ * should remove from storage.
+ *
+ * @record: pointer to record metadata.
+ *
+ * Returns 0 on success, and non-zero on error.
+ *
+ */
struct pstore_info {
struct module *owner;
char *name;
- spinlock_t buf_lock; /* serialize access to 'buf' */
+
+ spinlock_t buf_lock;
char *buf;
size_t bufsize;
- struct mutex read_mutex; /* serialize open/read/close */
+
+ struct mutex read_mutex;
+
int flags;
+ void *data;
+
int (*open)(struct pstore_info *psi);
int (*close)(struct pstore_info *psi);
- ssize_t (*read)(u64 *id, enum pstore_type_id *type,
- int *count, struct timespec *time, char **buf,
- bool *compressed, ssize_t *ecc_notice_size,
- struct pstore_info *psi);
- int (*write)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, int count, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char *buf, bool compressed,
- size_t size, struct pstore_info *psi);
- int (*write_buf_user)(enum pstore_type_id type,
- enum kmsg_dump_reason reason, u64 *id,
- unsigned int part, const char __user *buf,
- bool compressed, size_t size, struct pstore_info *psi);
- int (*erase)(enum pstore_type_id type, u64 id,
- int count, struct timespec time,
- struct pstore_info *psi);
- void *data;
+ ssize_t (*read)(struct pstore_record *record);
+ int (*write)(struct pstore_record *record);
+ int (*write_user)(struct pstore_record *record,
+ const char __user *buf);
+ int (*erase)(struct pstore_record *record);
};
+/* Supported frontends */
#define PSTORE_FLAGS_DMESG (1 << 0)
-#define PSTORE_FLAGS_FRAGILE PSTORE_FLAGS_DMESG
#define PSTORE_FLAGS_CONSOLE (1 << 1)
#define PSTORE_FLAGS_FTRACE (1 << 2)
#define PSTORE_FLAGS_PMSG (1 << 3)
diff --git a/include/linux/ptr_ring.h b/include/linux/ptr_ring.h
index 6c70444da3b9..6b2e0dd88569 100644
--- a/include/linux/ptr_ring.h
+++ b/include/linux/ptr_ring.h
@@ -34,11 +34,13 @@
struct ptr_ring {
int producer ____cacheline_aligned_in_smp;
spinlock_t producer_lock;
- int consumer ____cacheline_aligned_in_smp;
+ int consumer_head ____cacheline_aligned_in_smp; /* next valid entry */
+ int consumer_tail; /* next entry to invalidate */
spinlock_t consumer_lock;
/* Shared consumer/producer data */
/* Read-only by both the producer and the consumer */
int size ____cacheline_aligned_in_smp; /* max entries in queue */
+ int batch; /* number of entries to consume in a batch */
void **queue;
};
@@ -170,7 +172,7 @@ static inline int ptr_ring_produce_bh(struct ptr_ring *r, void *ptr)
static inline void *__ptr_ring_peek(struct ptr_ring *r)
{
if (likely(r->size))
- return r->queue[r->consumer];
+ return r->queue[r->consumer_head];
return NULL;
}
@@ -231,9 +233,38 @@ static inline bool ptr_ring_empty_bh(struct ptr_ring *r)
/* Must only be called after __ptr_ring_peek returned !NULL */
static inline void __ptr_ring_discard_one(struct ptr_ring *r)
{
- r->queue[r->consumer++] = NULL;
- if (unlikely(r->consumer >= r->size))
- r->consumer = 0;
+ /* Fundamentally, what we want to do is update consumer
+ * index and zero out the entry so producer can reuse it.
+ * Doing it naively at each consume would be as simple as:
+ * r->queue[r->consumer++] = NULL;
+ * if (unlikely(r->consumer >= r->size))
+ * r->consumer = 0;
+ * but that is suboptimal when the ring is full as producer is writing
+ * out new entries in the same cache line. Defer these updates until a
+ * batch of entries has been consumed.
+ */
+ int head = r->consumer_head++;
+
+ /* Once we have processed enough entries invalidate them in
+ * the ring all at once so producer can reuse their space in the ring.
+ * We also do this when we reach end of the ring - not mandatory
+ * but helps keep the implementation simple.
+ */
+ if (unlikely(r->consumer_head - r->consumer_tail >= r->batch ||
+ r->consumer_head >= r->size)) {
+ /* Zero out entries in the reverse order: this way we touch the
+ * cache line that producer might currently be reading the last;
+ * producer won't make progress and touch other cache lines
+ * besides the first one until we write out all entries.
+ */
+ while (likely(head >= r->consumer_tail))
+ r->queue[head--] = NULL;
+ r->consumer_tail = r->consumer_head;
+ }
+ if (unlikely(r->consumer_head >= r->size)) {
+ r->consumer_head = 0;
+ r->consumer_tail = 0;
+ }
}
static inline void *__ptr_ring_consume(struct ptr_ring *r)
@@ -345,14 +376,27 @@ static inline void **__ptr_ring_init_queue_alloc(int size, gfp_t gfp)
return kzalloc(ALIGN(size * sizeof(void *), SMP_CACHE_BYTES), gfp);
}
+static inline void __ptr_ring_set_size(struct ptr_ring *r, int size)
+{
+ r->size = size;
+ r->batch = SMP_CACHE_BYTES * 2 / sizeof(*(r->queue));
+ /* We need to set batch at least to 1 to make logic
+ * in __ptr_ring_discard_one work correctly.
+ * Batching too much (because ring is small) would cause a lot of
+ * burstiness. Needs tuning, for now disable batching.
+ */
+ if (r->batch > r->size / 2 || !r->batch)
+ r->batch = 1;
+}
+
static inline int ptr_ring_init(struct ptr_ring *r, int size, gfp_t gfp)
{
r->queue = __ptr_ring_init_queue_alloc(size, gfp);
if (!r->queue)
return -ENOMEM;
- r->size = size;
- r->producer = r->consumer = 0;
+ __ptr_ring_set_size(r, size);
+ r->producer = r->consumer_head = r->consumer_tail = 0;
spin_lock_init(&r->producer_lock);
spin_lock_init(&r->consumer_lock);
@@ -373,9 +417,10 @@ static inline void **__ptr_ring_swap_queue(struct ptr_ring *r, void **queue,
else if (destroy)
destroy(ptr);
- r->size = size;
+ __ptr_ring_set_size(r, size);
r->producer = producer;
- r->consumer = 0;
+ r->consumer_head = 0;
+ r->consumer_tail = 0;
old = r->queue;
r->queue = queue;
diff --git a/include/linux/qcom_scm.h b/include/linux/qcom_scm.h
index d32f6f1a5225..e5380471c2cd 100644
--- a/include/linux/qcom_scm.h
+++ b/include/linux/qcom_scm.h
@@ -40,6 +40,9 @@ extern int qcom_scm_pas_shutdown(u32 peripheral);
extern void qcom_scm_cpu_power_down(u32 flags);
extern u32 qcom_scm_get_version(void);
extern int qcom_scm_set_remote_state(u32 state, u32 id);
+extern int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare);
+extern int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size);
+extern int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare);
#else
static inline
int qcom_scm_set_cold_boot_addr(void *entry, const cpumask_t *cpus)
@@ -67,5 +70,8 @@ static inline void qcom_scm_cpu_power_down(u32 flags) {}
static inline u32 qcom_scm_get_version(void) { return 0; }
static inline u32
qcom_scm_set_remote_state(u32 state,u32 id) { return -ENODEV; }
+static inline int qcom_scm_restore_sec_cfg(u32 device_id, u32 spare) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_size(u32 spare, size_t *size) { return -ENODEV; }
+static inline int qcom_scm_iommu_secure_ptbl_init(u64 addr, u32 size, u32 spare) { return -ENODEV; }
#endif
#endif
diff --git a/include/linux/qed/common_hsi.h b/include/linux/qed/common_hsi.h
index 52966b9bfde3..fbab6e0514f0 100644
--- a/include/linux/qed/common_hsi.h
+++ b/include/linux/qed/common_hsi.h
@@ -100,8 +100,8 @@
#define MAX_NUM_LL2_TX_STATS_COUNTERS 32
#define FW_MAJOR_VERSION 8
-#define FW_MINOR_VERSION 10
-#define FW_REVISION_VERSION 10
+#define FW_MINOR_VERSION 15
+#define FW_REVISION_VERSION 3
#define FW_ENGINEERING_VERSION 0
/***********************/
@@ -187,6 +187,9 @@
/* DEMS */
#define DQ_DEMS_LEGACY 0
+#define DQ_DEMS_TOE_MORE_TO_SEND 3
+#define DQ_DEMS_TOE_LOCAL_ADV_WND 4
+#define DQ_DEMS_ROCE_CQ_CONS 7
/* XCM agg val selection */
#define DQ_XCM_AGG_VAL_SEL_WORD2 0
@@ -214,6 +217,9 @@
#define DQ_XCM_ISCSI_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
#define DQ_XCM_ISCSI_EXP_STAT_SN_CMD DQ_XCM_AGG_VAL_SEL_REG6
#define DQ_XCM_ROCE_SQ_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_TX_BD_PROD_CMD DQ_XCM_AGG_VAL_SEL_WORD4
+#define DQ_XCM_TOE_MORE_TO_SEND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG3
+#define DQ_XCM_TOE_LOCAL_ADV_WND_SEQ_CMD DQ_XCM_AGG_VAL_SEL_REG4
/* UCM agg val selection (HW) */
#define DQ_UCM_AGG_VAL_SEL_WORD0 0
@@ -269,6 +275,8 @@
#define DQ_XCM_ISCSI_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
#define DQ_XCM_ISCSI_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
#define DQ_XCM_ISCSI_PROC_ONLY_CLEANUP_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF23)
+#define DQ_XCM_TOE_DQ_FLUSH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF19)
+#define DQ_XCM_TOE_SLOW_PATH_CMD BIT(DQ_XCM_AGG_FLG_SHIFT_CF22)
/* UCM agg counter flag selection (HW) */
#define DQ_UCM_AGG_FLG_SHIFT_CF0 0
@@ -285,6 +293,9 @@
#define DQ_UCM_ETH_PMD_RX_ARM_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
#define DQ_UCM_ROCE_CQ_ARM_SE_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
#define DQ_UCM_ROCE_CQ_ARM_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
+#define DQ_UCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF3)
+#define DQ_UCM_TOE_SLOW_PATH_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF4)
+#define DQ_UCM_TOE_DQ_CF_CMD BIT(DQ_UCM_AGG_FLG_SHIFT_CF5)
/* TCM agg counter flag selection (HW) */
#define DQ_TCM_AGG_FLG_SHIFT_CF0 0
@@ -301,6 +312,9 @@
#define DQ_TCM_FCOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
#define DQ_TCM_ISCSI_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
#define DQ_TCM_ISCSI_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_TOE_FLUSH_Q0_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
+#define DQ_TCM_TOE_TIMER_STOP_ALL_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF3)
+#define DQ_TCM_IWARP_POST_RQ_CF_CMD BIT(DQ_TCM_AGG_FLG_SHIFT_CF1)
/* PWM address mapping */
#define DQ_PWM_OFFSET_DPM_BASE 0x0
@@ -689,6 +703,16 @@ struct iscsi_eqe_data {
#define ISCSI_EQE_DATA_RESERVED0_SHIFT 7
};
+struct rdma_eqe_destroy_qp {
+ __le32 cid;
+ u8 reserved[4];
+};
+
+union rdma_eqe_data {
+ struct regpair async_handle;
+ struct rdma_eqe_destroy_qp rdma_destroy_qp_data;
+};
+
struct malicious_vf_eqe_data {
u8 vf_id;
u8 err_id;
@@ -705,9 +729,9 @@ union event_ring_data {
u8 bytes[8];
struct vf_pf_channel_eqe_data vf_pf_channel;
struct iscsi_eqe_data iscsi_info;
+ union rdma_eqe_data rdma_data;
struct malicious_vf_eqe_data malicious_vf;
struct initial_cleanup_eqe_data vf_init_cleanup;
- struct regpair roce_handle;
};
/* Event Ring Entry */
diff --git a/include/linux/qed/eth_common.h b/include/linux/qed/eth_common.h
index 4b402fb0eaad..34d93eb5bfba 100644
--- a/include/linux/qed/eth_common.h
+++ b/include/linux/qed/eth_common.h
@@ -49,6 +49,9 @@
#define ETH_RX_CQE_PAGE_SIZE_BYTES 4096
#define ETH_RX_NUM_NEXT_PAGE_BDS 2
+#define ETH_MAX_TUNN_LSO_INNER_IPV4_OFFSET 253
+#define ETH_MAX_TUNN_LSO_INNER_IPV6_OFFSET 251
+
#define ETH_TX_MIN_BDS_PER_NON_LSO_PKT 1
#define ETH_TX_MAX_BDS_PER_NON_LSO_PACKET 18
#define ETH_TX_MAX_BDS_PER_LSO_PACKET 255
diff --git a/include/linux/qed/fcoe_common.h b/include/linux/qed/fcoe_common.h
index 2e417a45c5f7..947a635d04bb 100644
--- a/include/linux/qed/fcoe_common.h
+++ b/include/linux/qed/fcoe_common.h
@@ -109,13 +109,6 @@ struct fcoe_conn_terminate_ramrod_data {
struct regpair terminate_params_addr;
};
-struct fcoe_fast_sgl_ctx {
- struct regpair sgl_start_addr;
- __le32 sgl_byte_offset;
- __le16 task_reuse_cnt;
- __le16 init_offset_in_first_sge;
-};
-
struct fcoe_slow_sgl_ctx {
struct regpair base_sgl_addr;
__le16 curr_sge_off;
@@ -124,23 +117,16 @@ struct fcoe_slow_sgl_ctx {
__le16 reserved;
};
-struct fcoe_sge {
- struct regpair sge_addr;
- __le16 size;
- __le16 reserved0;
- u8 reserved1[3];
- u8 is_valid_sge;
-};
-
-union fcoe_data_desc_ctx {
- struct fcoe_fast_sgl_ctx fast;
- struct fcoe_slow_sgl_ctx slow;
- struct fcoe_sge single_sge;
-};
-
union fcoe_dix_desc_ctx {
struct fcoe_slow_sgl_ctx dix_sgl;
- struct fcoe_sge cached_dix_sge;
+ struct scsi_sge cached_dix_sge;
+};
+
+struct fcoe_fast_sgl_ctx {
+ struct regpair sgl_start_addr;
+ __le32 sgl_byte_offset;
+ __le16 task_reuse_cnt;
+ __le16 init_offset_in_first_sge;
};
struct fcoe_fcp_cmd_payload {
@@ -172,57 +158,6 @@ enum fcoe_mode_type {
MAX_FCOE_MODE_TYPE
};
-struct fcoe_mstorm_fcoe_task_st_ctx_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_MASK 0x7FFF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_RSRV0_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_FP_MP_INCLUDE_FC_HEADER_SHIFT 15
- __le16 difDataResidue;
- __le16 parent_id;
- __le16 single_sge_saved_offset;
- __le32 data_2_trns_rem;
- __le32 offset_in_io;
- union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
-};
-
-struct fcoe_mstorm_fcoe_task_st_ctx_non_fp {
- __le16 flags;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HOST_INTERFACE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_TO_PEER_SHIFT 2
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_APP_TAG_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_MASK 0xF
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_INTERVAL_SIZE_LOG_SHIFT 4
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_BLOCK_SIZE_SHIFT 8
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RESERVED_SHIFT 10
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_HAS_FIRST_PACKET_ARRIVED_SHIFT 11
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_VALIDATE_DIX_REF_TAG_SHIFT 12
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIX_CACHED_SGE_FLG_SHIFT 13
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_OFFSET_IN_IO_VALID_SHIFT 14
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_MASK 0x1
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_DIF_SUPPORTED_SHIFT 15
- u8 tx_rx_sgl_mode;
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_TX_SGL_MODE_SHIFT 0
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_MASK 0x7
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RX_SGL_MODE_SHIFT 3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_MASK 0x3
-#define FCOE_MSTORM_FCOE_TASK_ST_CTX_NON_FP_RSRV1_SHIFT 6
- u8 rsrv2;
- __le32 num_prm_zero_read;
- struct regpair rsp_buf_addr;
-};
-
struct fcoe_rx_stat {
struct regpair fcoe_rx_byte_cnt;
struct regpair fcoe_rx_data_pkt_cnt;
@@ -236,16 +171,6 @@ struct fcoe_rx_stat {
__le32 rsrv;
};
-enum fcoe_sgl_mode {
- FCOE_SLOW_SGL,
- FCOE_SINGLE_FAST_SGE,
- FCOE_2_FAST_SGE,
- FCOE_3_FAST_SGE,
- FCOE_4_FAST_SGE,
- FCOE_MUL_FAST_SGES,
- MAX_FCOE_SGL_MODE
-};
-
struct fcoe_stat_ramrod_data {
struct regpair stat_params_addr;
};
@@ -328,22 +253,24 @@ union fcoe_tx_info_union_ctx {
struct ystorm_fcoe_task_st_ctx {
u8 task_type;
u8 sgl_mode;
-#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x7
+#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
#define YSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 0
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x1F
-#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 3
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_MASK 0x7F
+#define YSTORM_FCOE_TASK_ST_CTX_RSRV_SHIFT 1
u8 cached_dix_sge;
u8 expect_first_xfer;
__le32 num_pbf_zero_write;
union protection_info_union_ctx protection_info_union;
__le32 data_2_trns_rem;
+ struct scsi_sgl_params sgl_params;
+ u8 reserved1[12];
union fcoe_tx_info_union_ctx tx_info_union;
union fcoe_dix_desc_ctx dix_desc;
- union fcoe_data_desc_ctx data_desc;
+ struct scsi_cached_sges data_desc;
__le16 ox_id;
__le16 rx_id;
__le32 task_rety_identifier;
- __le32 reserved1[2];
+ u8 reserved2[8];
};
struct ystorm_fcoe_task_ag_ctx {
@@ -484,22 +411,22 @@ struct tstorm_fcoe_task_ag_ctx {
struct fcoe_tstorm_fcoe_task_st_ctx_read_write {
union fcoe_cleanup_addr_exp_ro_union cleanup_addr_exp_ro_union;
__le16 flags;
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_MASK 0x1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RX_SGL_MODE_SHIFT 0
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 3
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_EXP_FIRST_FRAME_SHIFT 1
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 4
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_ACTIVE_SHIFT 2
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 5
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SEQ_TIMEOUT_SHIFT 3
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_SINGLE_PKT_IN_EX_SHIFT 4
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_MASK 0x1
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 7
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_OOO_RX_SEQ_STAT_SHIFT 5
#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_MASK 0x3
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 8
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0x3F
-#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 10
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_CQ_ADD_ADV_SHIFT 6
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_MASK 0xFF
+#define FCOE_TSTORM_FCOE_TASK_ST_CTX_READ_WRITE_RSRV1_SHIFT 8
__le16 seq_cnt;
u8 seq_id;
u8 ooo_rx_seq_id;
@@ -582,8 +509,34 @@ struct mstorm_fcoe_task_ag_ctx {
};
struct mstorm_fcoe_task_st_ctx {
- struct fcoe_mstorm_fcoe_task_st_ctx_non_fp non_fp;
- struct fcoe_mstorm_fcoe_task_st_ctx_fp fp;
+ struct regpair rsp_buf_addr;
+ __le32 rsrv[2];
+ struct scsi_sgl_params sgl_params;
+ __le32 data_2_trns_rem;
+ __le32 data_buffer_offset;
+ __le16 parent_id;
+ __le16 flags;
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_MASK 0xF
+#define MSTORM_FCOE_TASK_ST_CTX_INTERVAL_SIZE_LOG_SHIFT 0
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_HOST_INTERFACE_SHIFT 4
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_TO_PEER_SHIFT 6
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_MP_INCLUDE_FC_HEADER_SHIFT 7
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_BLOCK_SIZE_SHIFT 8
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_VALIDATE_DIX_REF_TAG_SHIFT 10
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIX_CACHED_SGE_FLG_SHIFT 11
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_DIF_SUPPORTED_SHIFT 12
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_MASK 0x1
+#define MSTORM_FCOE_TASK_ST_CTX_TX_SGL_MODE_SHIFT 13
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_MASK 0x3
+#define MSTORM_FCOE_TASK_ST_CTX_RESERVED_SHIFT 14
+ struct scsi_cached_sges data_desc;
};
struct ustorm_fcoe_task_ag_ctx {
@@ -646,6 +599,7 @@ struct ustorm_fcoe_task_ag_ctx {
struct fcoe_task_context {
struct ystorm_fcoe_task_st_ctx ystorm_st_context;
+ struct regpair ystorm_st_padding[2];
struct tdif_task_context tdif_context;
struct ystorm_fcoe_task_ag_ctx ystorm_ag_context;
struct tstorm_fcoe_task_ag_ctx tstorm_ag_context;
@@ -668,20 +622,20 @@ struct fcoe_tx_stat {
struct fcoe_wqe {
__le16 task_id;
__le16 flags;
-#define FCOE_WQE_REQ_TYPE_MASK 0xF
-#define FCOE_WQE_REQ_TYPE_SHIFT 0
-#define FCOE_WQE_SGL_MODE_MASK 0x7
-#define FCOE_WQE_SGL_MODE_SHIFT 4
-#define FCOE_WQE_CONTINUATION_MASK 0x1
-#define FCOE_WQE_CONTINUATION_SHIFT 7
-#define FCOE_WQE_INVALIDATE_PTU_MASK 0x1
-#define FCOE_WQE_INVALIDATE_PTU_SHIFT 8
-#define FCOE_WQE_SUPER_IO_MASK 0x1
-#define FCOE_WQE_SUPER_IO_SHIFT 9
-#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
-#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 10
-#define FCOE_WQE_RESERVED0_MASK 0x1F
-#define FCOE_WQE_RESERVED0_SHIFT 11
+#define FCOE_WQE_REQ_TYPE_MASK 0xF
+#define FCOE_WQE_REQ_TYPE_SHIFT 0
+#define FCOE_WQE_SGL_MODE_MASK 0x1
+#define FCOE_WQE_SGL_MODE_SHIFT 4
+#define FCOE_WQE_CONTINUATION_MASK 0x1
+#define FCOE_WQE_CONTINUATION_SHIFT 5
+#define FCOE_WQE_SEND_AUTO_RSP_MASK 0x1
+#define FCOE_WQE_SEND_AUTO_RSP_SHIFT 6
+#define FCOE_WQE_RESERVED_MASK 0x1
+#define FCOE_WQE_RESERVED_SHIFT 7
+#define FCOE_WQE_NUM_SGES_MASK 0xF
+#define FCOE_WQE_NUM_SGES_SHIFT 8
+#define FCOE_WQE_RESERVED1_MASK 0xF
+#define FCOE_WQE_RESERVED1_SHIFT 12
union fcoe_additional_info_union additional_info_union;
};
diff --git a/include/linux/qed/iscsi_common.h b/include/linux/qed/iscsi_common.h
index 4c5747babcf6..69949f8e354b 100644
--- a/include/linux/qed/iscsi_common.h
+++ b/include/linux/qed/iscsi_common.h
@@ -39,17 +39,9 @@
/* iSCSI HSI constants */
#define ISCSI_DEFAULT_MTU (1500)
-/* Current iSCSI HSI version number composed of two fields (16 bit) */
-#define ISCSI_HSI_MAJOR_VERSION (0)
-#define ISCSI_HSI_MINOR_VERSION (0)
-
/* KWQ (kernel work queue) layer codes */
#define ISCSI_SLOW_PATH_LAYER_CODE (6)
-/* CQE completion status */
-#define ISCSI_EQE_COMPLETION_SUCCESS (0x0)
-#define ISCSI_EQE_RST_CONN_RCVD (0x1)
-
/* iSCSI parameter defaults */
#define ISCSI_DEFAULT_HEADER_DIGEST (0)
#define ISCSI_DEFAULT_DATA_DIGEST (0)
@@ -68,6 +60,10 @@
#define ISCSI_MIN_VAL_MAX_OUTSTANDING_R2T (1)
#define ISCSI_MAX_VAL_MAX_OUTSTANDING_R2T (0xff)
+#define ISCSI_AHS_CNTL_SIZE 4
+
+#define ISCSI_WQE_NUM_SGES_SLOWIO (0xf)
+
/* iSCSI reserved params */
#define ISCSI_ITT_ALL_ONES (0xffffffff)
#define ISCSI_TTT_ALL_ONES (0xffffffff)
@@ -173,19 +169,6 @@ struct iscsi_async_msg_hdr {
__le32 reserved7;
};
-struct iscsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
-};
-
-struct iscsi_cached_sge_ctx {
- struct iscsi_sge sge;
- struct regpair reserved;
- __le32 dsgl_curr_offset[2];
-};
-
struct iscsi_cmd_hdr {
__le16 reserved1;
u8 flags_attr;
@@ -229,8 +212,13 @@ struct iscsi_common_hdr {
#define ISCSI_COMMON_HDR_DATA_SEG_LEN_SHIFT 0
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_COMMON_HDR_TOTAL_AHS_LEN_SHIFT 24
- __le32 lun_reserved[4];
- __le32 data[6];
+ struct regpair lun_reserved;
+ __le32 itt;
+ __le32 ttt;
+ __le32 cmdstat_sn;
+ __le32 exp_statcmd_sn;
+ __le32 max_cmd_sn;
+ __le32 data[3];
};
struct iscsi_conn_offload_params {
@@ -246,8 +234,10 @@ struct iscsi_conn_offload_params {
#define ISCSI_CONN_OFFLOAD_PARAMS_TCP_ON_CHIP_1B_SHIFT 0
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_MASK 0x1
#define ISCSI_CONN_OFFLOAD_PARAMS_TARGET_MODE_SHIFT 1
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x3F
-#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_MASK 0x1
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESTRICTED_MODE_SHIFT 2
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_MASK 0x1F
+#define ISCSI_CONN_OFFLOAD_PARAMS_RESERVED1_SHIFT 3
u8 pbl_page_size_log;
u8 pbe_page_size_log;
u8 default_cq;
@@ -278,8 +268,12 @@ struct iscsi_conn_update_ramrod_params {
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_INITIAL_R2T_SHIFT 2
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_MASK 0x1
#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_IMMEDIATE_DATA_SHIFT 3
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0xF
-#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_BLOCK_SIZE_SHIFT 4
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_MASK 0x1
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_DIF_ON_HOST_EN_SHIFT 5
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_MASK 0x3
+#define ISCSI_CONN_UPDATE_RAMROD_PARAMS_RESERVED1_SHIFT 6
u8 reserved0[3];
__le32 max_seq_size;
__le32 max_send_pdu_length;
@@ -312,7 +306,7 @@ struct iscsi_ext_cdb_cmd_hdr {
__le32 expected_transfer_length;
__le32 cmd_sn;
__le32 exp_stat_sn;
- struct iscsi_sge cdb_sge;
+ struct scsi_sge cdb_sge;
};
struct iscsi_login_req_hdr {
@@ -519,8 +513,8 @@ struct iscsi_logout_response_hdr {
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
__le32 reserved4;
- __le16 time2retain;
- __le16 time2wait;
+ __le16 time_2_retain;
+ __le16 time_2_wait;
__le32 reserved5[1];
};
@@ -602,7 +596,7 @@ struct iscsi_tmf_response_hdr {
#define ISCSI_TMF_RESPONSE_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
__le32 itt;
- __le32 rtt;
+ __le32 reserved1;
__le32 stat_sn;
__le32 exp_cmd_sn;
__le32 max_cmd_sn;
@@ -641,7 +635,7 @@ struct iscsi_reject_hdr {
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_MASK 0xFF
#define ISCSI_REJECT_HDR_TOTAL_AHS_LEN_SHIFT 24
struct regpair reserved0;
- __le32 reserved1;
+ __le32 all_ones;
__le32 reserved2;
__le32 stat_sn;
__le32 exp_cmd_sn;
@@ -688,7 +682,9 @@ struct iscsi_cqe_solicited {
__le16 itid;
u8 task_type;
u8 fw_dbg_field;
- __le32 reserved1[2];
+ u8 caused_conn_err;
+ u8 reserved0[3];
+ __le32 reserved1[1];
union iscsi_task_hdr iscsi_hdr;
};
@@ -727,35 +723,6 @@ enum iscsi_cqe_unsolicited_type {
MAX_ISCSI_CQE_UNSOLICITED_TYPE
};
-struct iscsi_virt_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- __le32 sgl_initial_offset;
- __le32 dsgl_initial_offset;
- __le32 dsgl_curr_offset[2];
-};
-
-struct iscsi_sgl_var_params {
- u8 sgl_ptr;
- u8 dsgl_ptr;
- __le16 sge_offset;
- __le16 dsge_offset;
-};
-
-struct iscsi_phys_sgl_ctx {
- struct regpair sgl_base;
- struct regpair dsgl_base;
- u8 sgl_size;
- u8 dsgl_size;
- __le16 reserved;
- struct iscsi_sgl_var_params var_params[2];
-};
-
-union iscsi_data_desc_ctx {
- struct iscsi_virt_sgl_ctx virt_sgl;
- struct iscsi_phys_sgl_ctx phys_sgl;
- struct iscsi_cached_sge_ctx cached_sge;
-};
struct iscsi_debug_modes {
u8 flags;
@@ -771,8 +738,10 @@ struct iscsi_debug_modes {
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_REJECT_OR_ASYNC_SHIFT 4
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_MASK 0x1
#define ISCSI_DEBUG_MODES_ASSERT_IF_RECV_NOP_SHIFT 5
-#define ISCSI_DEBUG_MODES_RESERVED0_MASK 0x3
-#define ISCSI_DEBUG_MODES_RESERVED0_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DATA_DIGEST_ERROR_SHIFT 6
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_MASK 0x1
+#define ISCSI_DEBUG_MODES_ASSERT_IF_DIF_ERROR_SHIFT 7
};
struct iscsi_dif_flags {
@@ -806,7 +775,6 @@ enum iscsi_eqe_opcode {
ISCSI_EVENT_TYPE_ASYN_FIN_WAIT2,
ISCSI_EVENT_TYPE_ISCSI_CONN_ERROR,
ISCSI_EVENT_TYPE_TCP_CONN_ERROR,
- ISCSI_EVENT_TYPE_ASYN_DELETE_OOO_ISLES,
MAX_ISCSI_EQE_OPCODE
};
@@ -856,31 +824,11 @@ enum iscsi_error_types {
ISCSI_CONN_ERROR_PROTOCOL_ERR_DIF_TX,
ISCSI_CONN_ERROR_SENSE_DATA_LENGTH,
ISCSI_CONN_ERROR_DATA_PLACEMENT_ERROR,
+ ISCSI_CONN_ERROR_INVALID_ITT,
ISCSI_ERROR_UNKNOWN,
MAX_ISCSI_ERROR_TYPES
};
-struct iscsi_mflags {
- u8 mflags;
-#define ISCSI_MFLAGS_SLOW_IO_MASK 0x1
-#define ISCSI_MFLAGS_SLOW_IO_SHIFT 0
-#define ISCSI_MFLAGS_SINGLE_SGE_MASK 0x1
-#define ISCSI_MFLAGS_SINGLE_SGE_SHIFT 1
-#define ISCSI_MFLAGS_RESERVED_MASK 0x3F
-#define ISCSI_MFLAGS_RESERVED_SHIFT 2
-};
-
-struct iscsi_sgl {
- struct regpair sgl_addr;
- __le16 updated_sge_size;
- __le16 updated_sge_offset;
- __le32 byte_offset;
-};
-
-union iscsi_mstorm_sgl {
- struct iscsi_sgl sgl_struct;
- struct iscsi_sge single_sge;
-};
enum iscsi_ramrod_cmd_id {
ISCSI_RAMROD_CMD_ID_UNUSED = 0,
@@ -896,10 +844,10 @@ enum iscsi_ramrod_cmd_id {
struct iscsi_reg1 {
__le32 reg1_map;
-#define ISCSI_REG1_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_REG1_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_REG1_RESERVED1_MASK 0x1FFFFFFF
-#define ISCSI_REG1_RESERVED1_SHIFT 3
+#define ISCSI_REG1_NUM_SGES_MASK 0xF
+#define ISCSI_REG1_NUM_SGES_SHIFT 0
+#define ISCSI_REG1_RESERVED1_MASK 0xFFFFFFF
+#define ISCSI_REG1_RESERVED1_SHIFT 4
};
union iscsi_seq_num {
@@ -967,22 +915,33 @@ struct iscsi_spe_func_init {
};
struct ystorm_iscsi_task_state {
- union iscsi_data_desc_ctx sgl_ctx_union;
- __le32 buffer_offset[2];
- __le16 bytes_nxt_dif;
- __le16 rxmit_bytes_nxt_dif;
- union iscsi_seq_num seq_num_union;
- u8 dif_bytes_leftover;
- u8 rxmit_dif_bytes_leftover;
- __le16 reuse_count;
- struct iscsi_dif_flags dif_flags;
- u8 local_comp;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
__le32 exp_r2t_sn;
- __le32 sgl_offset[2];
+ __le32 buffer_offset;
+ union iscsi_seq_num seq_num;
+ struct iscsi_dif_flags dif_flags;
+ u8 flags;
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_LOCAL_COMP_SHIFT 0
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_MASK 0x1
+#define YSTORM_ISCSI_TASK_STATE_SLOW_IO_SHIFT 1
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_MASK 0x3F
+#define YSTORM_ISCSI_TASK_STATE_RESERVED0_SHIFT 2
+};
+
+struct ystorm_iscsi_task_rxmit_opt {
+ __le32 fast_rxmit_sge_offset;
+ __le32 scan_start_buffer_offset;
+ __le32 fast_rxmit_buffer_offset;
+ u8 scan_start_sgl_index;
+ u8 fast_rxmit_sgl_index;
+ __le16 reserved;
};
struct ystorm_iscsi_task_st_ctx {
struct ystorm_iscsi_task_state state;
+ struct ystorm_iscsi_task_rxmit_opt rxmit_opt;
union iscsi_task_hdr pdu_hdr;
};
@@ -1152,25 +1111,16 @@ struct ustorm_iscsi_task_ag_ctx {
};
struct mstorm_iscsi_task_st_ctx {
- union iscsi_mstorm_sgl sgl_union;
- struct iscsi_dif_flags dif_flags;
- struct iscsi_mflags flags;
- u8 sgl_size;
- u8 host_sge_index;
- __le16 dix_cur_sge_offset;
- __le16 dix_cur_sge_size;
- __le32 data_offset_rtid;
- u8 dif_offset;
- u8 dix_sgl_size;
- u8 dix_sge_index;
+ struct scsi_cached_sges data_desc;
+ struct scsi_sgl_params sgl_params;
+ __le32 rem_task_size;
+ __le32 data_buffer_offset;
u8 task_type;
+ struct iscsi_dif_flags dif_flags;
+ u8 reserved0[2];
struct regpair sense_db;
- struct regpair dix_sgl_cur_sge;
- __le32 rem_task_size;
- __le16 reuse_count;
- __le16 dif_data_residue;
- u8 reserved0[4];
- __le32 reserved1[1];
+ __le32 expected_itt;
+ __le32 reserved1;
};
struct ustorm_iscsi_task_st_ctx {
@@ -1184,7 +1134,7 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_AHS_EXIST_SHIFT 0
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_MASK 0x7F
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED1_SHIFT 1
- u8 reserved2;
+ struct iscsi_dif_flags dif_flags;
__le16 reserved3;
__le32 reserved4;
__le32 reserved5;
@@ -1207,10 +1157,10 @@ struct ustorm_iscsi_task_st_ctx {
#define USTORM_ISCSI_TASK_ST_CTX_LOCAL_COMP_SHIFT 2
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_Q0_R2TQE_WRITE_SHIFT 3
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_TOTALDATAACKED_DONE_SHIFT 4
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_MASK 0x1
-#define USTORM_ISCSI_TASK_ST_CTX_HQSCANNED_DONE_SHIFT 5
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_TOTAL_DATA_ACKED_DONE_SHIFT 4
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_MASK 0x1
+#define USTORM_ISCSI_TASK_ST_CTX_HQ_SCANNED_DONE_SHIFT 5
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_MASK 0x1
#define USTORM_ISCSI_TASK_ST_CTX_R2T2RECV_DONE_SHIFT 6
#define USTORM_ISCSI_TASK_ST_CTX_RESERVED0_MASK 0x1
@@ -1220,7 +1170,6 @@ struct ustorm_iscsi_task_st_ctx {
struct iscsi_task_context {
struct ystorm_iscsi_task_st_ctx ystorm_st_context;
- struct regpair ystorm_st_padding[2];
struct ystorm_iscsi_task_ag_ctx ystorm_ag_context;
struct regpair ystorm_ag_padding[2];
struct tdif_task_context tdif_context;
@@ -1272,32 +1221,22 @@ struct iscsi_uhqe {
#define ISCSI_UHQE_TASK_ID_LO_SHIFT 24
};
-struct iscsi_wqe_field {
- __le32 contlen_cdbsize_field;
-#define ISCSI_WQE_FIELD_CONT_LEN_MASK 0xFFFFFF
-#define ISCSI_WQE_FIELD_CONT_LEN_SHIFT 0
-#define ISCSI_WQE_FIELD_CDB_SIZE_MASK 0xFF
-#define ISCSI_WQE_FIELD_CDB_SIZE_SHIFT 24
-};
-
-union iscsi_wqe_field_union {
- struct iscsi_wqe_field cont_field;
- __le32 prev_tid;
-};
struct iscsi_wqe {
__le16 task_id;
u8 flags;
#define ISCSI_WQE_WQE_TYPE_MASK 0x7
#define ISCSI_WQE_WQE_TYPE_SHIFT 0
-#define ISCSI_WQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_WQE_NUM_FAST_SGES_SHIFT 3
-#define ISCSI_WQE_PTU_INVALIDATE_MASK 0x1
-#define ISCSI_WQE_PTU_INVALIDATE_SHIFT 6
+#define ISCSI_WQE_NUM_SGES_MASK 0xF
+#define ISCSI_WQE_NUM_SGES_SHIFT 3
#define ISCSI_WQE_RESPONSE_MASK 0x1
#define ISCSI_WQE_RESPONSE_SHIFT 7
struct iscsi_dif_flags prot_flags;
- union iscsi_wqe_field_union cont_prevtid_union;
+ __le32 contlen_cdbsize;
+#define ISCSI_WQE_CONT_LEN_MASK 0xFFFFFF
+#define ISCSI_WQE_CONT_LEN_SHIFT 0
+#define ISCSI_WQE_CDB_SIZE_MASK 0xFF
+#define ISCSI_WQE_CDB_SIZE_SHIFT 24
};
enum iscsi_wqe_type {
@@ -1318,17 +1257,15 @@ struct iscsi_xhqe {
u8 total_ahs_length;
u8 opcode;
u8 flags;
-#define ISCSI_XHQE_NUM_FAST_SGES_MASK 0x7
-#define ISCSI_XHQE_NUM_FAST_SGES_SHIFT 0
-#define ISCSI_XHQE_FINAL_MASK 0x1
-#define ISCSI_XHQE_FINAL_SHIFT 3
-#define ISCSI_XHQE_SUPER_IO_MASK 0x1
-#define ISCSI_XHQE_SUPER_IO_SHIFT 4
-#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
-#define ISCSI_XHQE_STATUS_BIT_SHIFT 5
-#define ISCSI_XHQE_RESERVED_MASK 0x3
-#define ISCSI_XHQE_RESERVED_SHIFT 6
- union iscsi_seq_num seq_num_union;
+#define ISCSI_XHQE_FINAL_MASK 0x1
+#define ISCSI_XHQE_FINAL_SHIFT 0
+#define ISCSI_XHQE_STATUS_BIT_MASK 0x1
+#define ISCSI_XHQE_STATUS_BIT_SHIFT 1
+#define ISCSI_XHQE_NUM_SGES_MASK 0xF
+#define ISCSI_XHQE_NUM_SGES_SHIFT 2
+#define ISCSI_XHQE_RESERVED0_MASK 0x3
+#define ISCSI_XHQE_RESERVED0_SHIFT 6
+ union iscsi_seq_num seq_num;
__le16 reserved1;
};
diff --git a/include/linux/qed/qed_eth_if.h b/include/linux/qed/qed_eth_if.h
index 4cd1f0ccfa36..d66d16a559e1 100644
--- a/include/linux/qed/qed_eth_if.h
+++ b/include/linux/qed/qed_eth_if.h
@@ -158,15 +158,27 @@ struct qed_tunn_params {
struct qed_eth_cb_ops {
struct qed_common_cb_ops common;
void (*force_mac) (void *dev, u8 *mac, bool forced);
+ void (*ports_update)(void *dev, u16 vxlan_port, u16 geneve_port);
};
#define QED_MAX_PHC_DRIFT_PPB 291666666
enum qed_ptp_filter_type {
- QED_PTP_FILTER_L2,
- QED_PTP_FILTER_IPV4,
- QED_PTP_FILTER_IPV4_IPV6,
- QED_PTP_FILTER_L2_IPV4_IPV6
+ QED_PTP_FILTER_NONE,
+ QED_PTP_FILTER_ALL,
+ QED_PTP_FILTER_V1_L4_EVENT,
+ QED_PTP_FILTER_V1_L4_GEN,
+ QED_PTP_FILTER_V2_L4_EVENT,
+ QED_PTP_FILTER_V2_L4_GEN,
+ QED_PTP_FILTER_V2_L2_EVENT,
+ QED_PTP_FILTER_V2_L2_GEN,
+ QED_PTP_FILTER_V2_EVENT,
+ QED_PTP_FILTER_V2_GEN
+};
+
+enum qed_ptp_hwtstamp_tx_type {
+ QED_PTP_HWTSTAMP_TX_OFF,
+ QED_PTP_HWTSTAMP_TX_ON,
};
#ifdef CONFIG_DCB
@@ -229,8 +241,8 @@ struct qed_eth_dcbnl_ops {
#endif
struct qed_eth_ptp_ops {
- int (*hwtstamp_tx_on)(struct qed_dev *);
- int (*cfg_rx_filters)(struct qed_dev *, enum qed_ptp_filter_type);
+ int (*cfg_filters)(struct qed_dev *, enum qed_ptp_filter_type,
+ enum qed_ptp_hwtstamp_tx_type);
int (*read_rx_ts)(struct qed_dev *, u64 *);
int (*read_tx_ts)(struct qed_dev *, u64 *);
int (*read_cc)(struct qed_dev *, u64 *);
@@ -301,6 +313,14 @@ struct qed_eth_ops {
int (*tunn_config)(struct qed_dev *cdev,
struct qed_tunn_params *params);
+
+ int (*ntuple_filter_config)(struct qed_dev *cdev, void *cookie,
+ dma_addr_t mapping, u16 length,
+ u16 vport_id, u16 rx_queue_id,
+ bool add_filter);
+
+ int (*configure_arfs_searcher)(struct qed_dev *cdev,
+ bool en_searcher);
};
const struct qed_eth_ops *qed_get_eth_ops(void);
diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h
index fde56c436f71..c70ac13a97e6 100644
--- a/include/linux/qed/qed_if.h
+++ b/include/linux/qed/qed_if.h
@@ -144,6 +144,7 @@ struct qed_dcbx_operational_params {
bool enabled;
bool ieee;
bool cee;
+ bool local;
u32 err;
};
@@ -178,6 +179,12 @@ struct qed_eth_pf_params {
* to update_pf_params routine invoked before slowpath start
*/
u16 num_cons;
+
+ /* To enable arfs, previous to HW-init a positive number needs to be
+ * set [as filters require allocated searcher ILT memory].
+ * This will set the maximal number of configured steering-filters.
+ */
+ u32 num_arfs_filters;
};
struct qed_fcoe_pf_params {
@@ -263,7 +270,6 @@ struct qed_rdma_pf_params {
* the doorbell BAR).
*/
u32 min_dpis; /* number of requested DPIs */
- u32 num_mrs; /* number of requested memory regions */
u32 num_qps; /* number of requested Queue Pairs */
u32 num_srqs; /* number of requested SRQ */
u8 roce_edpm_mode; /* see QED_ROCE_EDPM_MODE_ENABLE */
@@ -300,6 +306,11 @@ struct qed_sb_info {
struct qed_dev *cdev;
};
+enum qed_dev_type {
+ QED_DEV_TYPE_BB,
+ QED_DEV_TYPE_AH,
+};
+
struct qed_dev_info {
unsigned long pci_mem_start;
unsigned long pci_mem_end;
@@ -325,6 +336,13 @@ struct qed_dev_info {
u16 mtu;
bool wol_support;
+
+ enum qed_dev_type dev_type;
+
+ /* Output parameters for qede */
+ bool vxlan_enable;
+ bool gre_enable;
+ bool geneve_enable;
};
enum qed_sb_type {
@@ -421,6 +439,7 @@ struct qed_int_info {
};
struct qed_common_cb_ops {
+ void (*arfs_filter_op)(void *dev, void *fltr, u8 fw_rc);
void (*link_update)(void *dev,
struct qed_link_output *link);
void (*dcbx_aen)(void *dev, struct qed_dcbx_get *get, u32 mib_type);
@@ -616,7 +635,7 @@ struct qed_common_ops {
* @return 0 on success, error otherwise.
*/
int (*set_coalesce)(struct qed_dev *cdev, u16 rx_coal, u16 tx_coal,
- u8 qid, u16 sb_id);
+ u16 qid, u16 sb_id);
/**
* @brief set_led - Configure LED mode
@@ -752,7 +771,7 @@ enum qed_mf_mode {
QED_MF_NPAR,
};
-struct qed_eth_stats {
+struct qed_eth_stats_common {
u64 no_buff_discards;
u64 packet_too_big_discard;
u64 ttl0_discard;
@@ -784,11 +803,6 @@ struct qed_eth_stats {
u64 rx_256_to_511_byte_packets;
u64 rx_512_to_1023_byte_packets;
u64 rx_1024_to_1518_byte_packets;
- u64 rx_1519_to_1522_byte_packets;
- u64 rx_1519_to_2047_byte_packets;
- u64 rx_2048_to_4095_byte_packets;
- u64 rx_4096_to_9216_byte_packets;
- u64 rx_9217_to_16383_byte_packets;
u64 rx_crc_errors;
u64 rx_mac_crtl_frames;
u64 rx_pause_frames;
@@ -805,14 +819,8 @@ struct qed_eth_stats {
u64 tx_256_to_511_byte_packets;
u64 tx_512_to_1023_byte_packets;
u64 tx_1024_to_1518_byte_packets;
- u64 tx_1519_to_2047_byte_packets;
- u64 tx_2048_to_4095_byte_packets;
- u64 tx_4096_to_9216_byte_packets;
- u64 tx_9217_to_16383_byte_packets;
u64 tx_pause_frames;
u64 tx_pfc_frames;
- u64 tx_lpi_entry_count;
- u64 tx_total_collisions;
u64 brb_truncates;
u64 brb_discards;
u64 rx_mac_bytes;
@@ -827,6 +835,34 @@ struct qed_eth_stats {
u64 tx_mac_ctrl_frames;
};
+struct qed_eth_stats_bb {
+ u64 rx_1519_to_1522_byte_packets;
+ u64 rx_1519_to_2047_byte_packets;
+ u64 rx_2048_to_4095_byte_packets;
+ u64 rx_4096_to_9216_byte_packets;
+ u64 rx_9217_to_16383_byte_packets;
+ u64 tx_1519_to_2047_byte_packets;
+ u64 tx_2048_to_4095_byte_packets;
+ u64 tx_4096_to_9216_byte_packets;
+ u64 tx_9217_to_16383_byte_packets;
+ u64 tx_lpi_entry_count;
+ u64 tx_total_collisions;
+};
+
+struct qed_eth_stats_ah {
+ u64 rx_1519_to_max_byte_packets;
+ u64 tx_1519_to_max_byte_packets;
+};
+
+struct qed_eth_stats {
+ struct qed_eth_stats_common common;
+
+ union {
+ struct qed_eth_stats_bb bb;
+ struct qed_eth_stats_ah ah;
+ };
+};
+
#define QED_SB_IDX 0x0002
#define RX_PI 0
diff --git a/include/linux/qed/qed_iscsi_if.h b/include/linux/qed/qed_iscsi_if.h
index f70bb81b8b6a..3414649133d2 100644
--- a/include/linux/qed/qed_iscsi_if.h
+++ b/include/linux/qed/qed_iscsi_if.h
@@ -67,6 +67,8 @@ struct qed_dev_iscsi_info {
void __iomem *primary_dbq_rq_addr;
void __iomem *secondary_bdq_rq_addr;
+
+ u8 num_cqs;
};
struct qed_iscsi_id_params {
diff --git a/include/linux/qed/qed_roce_if.h b/include/linux/qed/qed_roce_if.h
index f742d4312c9d..cbb2ff0ce4bc 100644
--- a/include/linux/qed/qed_roce_if.h
+++ b/include/linux/qed/qed_roce_if.h
@@ -240,6 +240,7 @@ struct qed_rdma_add_user_out_params {
u64 dpi_addr;
u64 dpi_phys_addr;
u32 dpi_size;
+ u16 wid_count;
};
enum roce_mode {
@@ -533,6 +534,7 @@ enum qed_rdma_type {
struct qed_dev_rdma_info {
struct qed_dev_info common;
enum qed_rdma_type rdma_type;
+ u8 user_dpm_enabled;
};
struct qed_rdma_ops {
diff --git a/include/linux/qed/rdma_common.h b/include/linux/qed/rdma_common.h
index f773aa5e746f..72c770f9f666 100644
--- a/include/linux/qed/rdma_common.h
+++ b/include/linux/qed/rdma_common.h
@@ -52,7 +52,8 @@
#define RDMA_MAX_PDS (64 * 1024)
#define RDMA_NUM_STATISTIC_COUNTERS MAX_NUM_VPORTS
-#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
+#define RDMA_NUM_STATISTIC_COUNTERS_K2 MAX_NUM_VPORTS_K2
+#define RDMA_NUM_STATISTIC_COUNTERS_BB MAX_NUM_VPORTS_BB
#define RDMA_TASK_TYPE (PROTOCOLID_ROCE)
diff --git a/include/linux/qed/roce_common.h b/include/linux/qed/roce_common.h
index bad02df213df..866f063026de 100644
--- a/include/linux/qed/roce_common.h
+++ b/include/linux/qed/roce_common.h
@@ -38,4 +38,21 @@
#define ROCE_MAX_QPS (32 * 1024)
+enum roce_async_events_type {
+ ROCE_ASYNC_EVENT_NONE = 0,
+ ROCE_ASYNC_EVENT_COMM_EST = 1,
+ ROCE_ASYNC_EVENT_SQ_DRAINED,
+ ROCE_ASYNC_EVENT_SRQ_LIMIT,
+ ROCE_ASYNC_EVENT_LAST_WQE_REACHED,
+ ROCE_ASYNC_EVENT_CQ_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_INVALID_REQUEST_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_LOCAL_ACCESS_ERR,
+ ROCE_ASYNC_EVENT_QP_CATASTROPHIC_ERR,
+ ROCE_ASYNC_EVENT_CQ_OVERFLOW_ERR,
+ ROCE_ASYNC_EVENT_SRQ_EMPTY,
+ ROCE_ASYNC_EVENT_DESTROY_QP_DONE,
+ MAX_ROCE_ASYNC_EVENTS_TYPE
+};
+
#endif /* __ROCE_COMMON__ */
diff --git a/include/linux/qed/storage_common.h b/include/linux/qed/storage_common.h
index 03f3e37ab059..08df82a096b6 100644
--- a/include/linux/qed/storage_common.h
+++ b/include/linux/qed/storage_common.h
@@ -40,6 +40,8 @@
#define BDQ_ID_IMM_DATA (1)
#define BDQ_NUM_IDS (2)
+#define SCSI_NUM_SGES_SLOW_SGL_THR 8
+
#define BDQ_MAX_EXTERNAL_RING_SIZE (1 << 15)
struct scsi_bd {
@@ -52,6 +54,16 @@ struct scsi_bdq_ram_drv_data {
__le16 reserved0[3];
};
+struct scsi_sge {
+ struct regpair sge_addr;
+ __le32 sge_len;
+ __le32 reserved;
+};
+
+struct scsi_cached_sges {
+ struct scsi_sge sge[4];
+};
+
struct scsi_drv_cmdq {
__le16 cmdq_cons;
__le16 reserved0;
@@ -99,11 +111,19 @@ struct scsi_ram_per_bdq_resource_drv_data {
struct scsi_bdq_ram_drv_data drv_data_per_bdq_id[BDQ_NUM_IDS];
};
-struct scsi_sge {
- struct regpair sge_addr;
- __le16 sge_len;
- __le16 reserved0;
- __le32 reserved1;
+enum scsi_sgl_mode {
+ SCSI_TX_SLOW_SGL,
+ SCSI_FAST_SGL,
+ MAX_SCSI_SGL_MODE
+};
+
+struct scsi_sgl_params {
+ struct regpair sgl_addr;
+ __le32 sgl_total_length;
+ __le32 sge_offset;
+ __le16 sgl_num_sges;
+ u8 sgl_index;
+ u8 reserved;
};
struct scsi_terminate_extra_params {
diff --git a/include/linux/qed/tcp_common.h b/include/linux/qed/tcp_common.h
index 46fe7856f1b2..a5e843268f0e 100644
--- a/include/linux/qed/tcp_common.h
+++ b/include/linux/qed/tcp_common.h
@@ -173,6 +173,7 @@ enum tcp_seg_placement_event {
TCP_EVENT_ADD_ISLE_RIGHT,
TCP_EVENT_ADD_ISLE_LEFT,
TCP_EVENT_JOIN,
+ TCP_EVENT_DELETE_ISLES,
TCP_EVENT_NOP,
MAX_TCP_SEG_PLACEMENT_EVENT
};
diff --git a/include/linux/quotaops.h b/include/linux/quotaops.h
index 799a63d0e1a8..9c6f768b7d32 100644
--- a/include/linux/quotaops.h
+++ b/include/linux/quotaops.h
@@ -162,7 +162,6 @@ static inline bool sb_has_quota_active(struct super_block *sb, int type)
* Operations supported for diskquotas.
*/
extern const struct dquot_operations dquot_operations;
-extern const struct quotactl_ops dquot_quotactl_ops;
extern const struct quotactl_ops dquot_quotactl_sysfile_ops;
#else
diff --git a/include/linux/ras.h b/include/linux/ras.h
index 2aceeafd6fe5..ffb147185e8d 100644
--- a/include/linux/ras.h
+++ b/include/linux/ras.h
@@ -1,14 +1,25 @@
#ifndef __RAS_H__
#define __RAS_H__
+#include <asm/errno.h>
+
#ifdef CONFIG_DEBUG_FS
int ras_userspace_consumers(void);
void ras_debugfs_init(void);
int ras_add_daemon_trace(void);
#else
static inline int ras_userspace_consumers(void) { return 0; }
-static inline void ras_debugfs_init(void) { return; }
+static inline void ras_debugfs_init(void) { }
static inline int ras_add_daemon_trace(void) { return 0; }
#endif
+#ifdef CONFIG_RAS_CEC
+void __init cec_init(void);
+int __init parse_cec_param(char *str);
+int cec_add_elem(u64 pfn);
+#else
+static inline void __init cec_init(void) { }
+static inline int cec_add_elem(u64 pfn) { return -ENODEV; }
#endif
+
+#endif /* __RAS_H__ */
diff --git a/include/linux/rcu_node_tree.h b/include/linux/rcu_node_tree.h
new file mode 100644
index 000000000000..4b766b61e1a0
--- /dev/null
+++ b/include/linux/rcu_node_tree.h
@@ -0,0 +1,99 @@
+/*
+ * RCU node combining tree definitions. These are used to compute
+ * global attributes while avoiding common-case global contention. A key
+ * property that these computations rely on is a tournament-style approach
+ * where only one of the tasks contending a lower level in the tree need
+ * advance to the next higher level. If properly configured, this allows
+ * unlimited scalability while maintaining a constant level of contention
+ * on the root node.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Author: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __LINUX_RCU_NODE_TREE_H
+#define __LINUX_RCU_NODE_TREE_H
+
+/*
+ * Define shape of hierarchy based on NR_CPUS, CONFIG_RCU_FANOUT, and
+ * CONFIG_RCU_FANOUT_LEAF.
+ * In theory, it should be possible to add more levels straightforwardly.
+ * In practice, this did work well going from three levels to four.
+ * Of course, your mileage may vary.
+ */
+
+#ifdef CONFIG_RCU_FANOUT
+#define RCU_FANOUT CONFIG_RCU_FANOUT
+#else /* #ifdef CONFIG_RCU_FANOUT */
+# ifdef CONFIG_64BIT
+# define RCU_FANOUT 64
+# else
+# define RCU_FANOUT 32
+# endif
+#endif /* #else #ifdef CONFIG_RCU_FANOUT */
+
+#ifdef CONFIG_RCU_FANOUT_LEAF
+#define RCU_FANOUT_LEAF CONFIG_RCU_FANOUT_LEAF
+#else /* #ifdef CONFIG_RCU_FANOUT_LEAF */
+#define RCU_FANOUT_LEAF 16
+#endif /* #else #ifdef CONFIG_RCU_FANOUT_LEAF */
+
+#define RCU_FANOUT_1 (RCU_FANOUT_LEAF)
+#define RCU_FANOUT_2 (RCU_FANOUT_1 * RCU_FANOUT)
+#define RCU_FANOUT_3 (RCU_FANOUT_2 * RCU_FANOUT)
+#define RCU_FANOUT_4 (RCU_FANOUT_3 * RCU_FANOUT)
+
+#if NR_CPUS <= RCU_FANOUT_1
+# define RCU_NUM_LVLS 1
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_NODES NUM_RCU_LVL_0
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0" }
+#elif NR_CPUS <= RCU_FANOUT_2
+# define RCU_NUM_LVLS 2
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1" }
+#elif NR_CPUS <= RCU_FANOUT_3
+# define RCU_NUM_LVLS 3
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2" }
+#elif NR_CPUS <= RCU_FANOUT_4
+# define RCU_NUM_LVLS 4
+# define NUM_RCU_LVL_0 1
+# define NUM_RCU_LVL_1 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_3)
+# define NUM_RCU_LVL_2 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_2)
+# define NUM_RCU_LVL_3 DIV_ROUND_UP(NR_CPUS, RCU_FANOUT_1)
+# define NUM_RCU_NODES (NUM_RCU_LVL_0 + NUM_RCU_LVL_1 + NUM_RCU_LVL_2 + NUM_RCU_LVL_3)
+# define NUM_RCU_LVL_INIT { NUM_RCU_LVL_0, NUM_RCU_LVL_1, NUM_RCU_LVL_2, NUM_RCU_LVL_3 }
+# define RCU_NODE_NAME_INIT { "rcu_node_0", "rcu_node_1", "rcu_node_2", "rcu_node_3" }
+# define RCU_FQS_NAME_INIT { "rcu_node_fqs_0", "rcu_node_fqs_1", "rcu_node_fqs_2", "rcu_node_fqs_3" }
+#else
+# error "CONFIG_RCU_FANOUT insufficient for NR_CPUS"
+#endif /* #if (NR_CPUS) <= RCU_FANOUT_1 */
+
+#endif /* __LINUX_RCU_NODE_TREE_H */
diff --git a/include/linux/rcu_segcblist.h b/include/linux/rcu_segcblist.h
new file mode 100644
index 000000000000..ba4d2621d9ca
--- /dev/null
+++ b/include/linux/rcu_segcblist.h
@@ -0,0 +1,90 @@
+/*
+ * RCU segmented callback lists
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright IBM Corporation, 2017
+ *
+ * Authors: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
+ */
+
+#ifndef __INCLUDE_LINUX_RCU_SEGCBLIST_H
+#define __INCLUDE_LINUX_RCU_SEGCBLIST_H
+
+/* Simple unsegmented callback lists. */
+struct rcu_cblist {
+ struct rcu_head *head;
+ struct rcu_head **tail;
+ long len;
+ long len_lazy;
+};
+
+#define RCU_CBLIST_INITIALIZER(n) { .head = NULL, .tail = &n.head }
+
+/* Complicated segmented callback lists. ;-) */
+
+/*
+ * Index values for segments in rcu_segcblist structure.
+ *
+ * The segments are as follows:
+ *
+ * [head, *tails[RCU_DONE_TAIL]):
+ * Callbacks whose grace period has elapsed, and thus can be invoked.
+ * [*tails[RCU_DONE_TAIL], *tails[RCU_WAIT_TAIL]):
+ * Callbacks waiting for the current GP from the current CPU's viewpoint.
+ * [*tails[RCU_WAIT_TAIL], *tails[RCU_NEXT_READY_TAIL]):
+ * Callbacks that arrived before the next GP started, again from
+ * the current CPU's viewpoint. These can be handled by the next GP.
+ * [*tails[RCU_NEXT_READY_TAIL], *tails[RCU_NEXT_TAIL]):
+ * Callbacks that might have arrived after the next GP started.
+ * There is some uncertainty as to when a given GP starts and
+ * ends, but a CPU knows the exact times if it is the one starting
+ * or ending the GP. Other CPUs know that the previous GP ends
+ * before the next one starts.
+ *
+ * Note that RCU_WAIT_TAIL cannot be empty unless RCU_NEXT_READY_TAIL is also
+ * empty.
+ *
+ * The ->gp_seq[] array contains the grace-period number at which the
+ * corresponding segment of callbacks will be ready to invoke. A given
+ * element of this array is meaningful only when the corresponding segment
+ * is non-empty, and it is never valid for RCU_DONE_TAIL (whose callbacks
+ * are already ready to invoke) or for RCU_NEXT_TAIL (whose callbacks have
+ * not yet been assigned a grace-period number).
+ */
+#define RCU_DONE_TAIL 0 /* Also RCU_WAIT head. */
+#define RCU_WAIT_TAIL 1 /* Also RCU_NEXT_READY head. */
+#define RCU_NEXT_READY_TAIL 2 /* Also RCU_NEXT head. */
+#define RCU_NEXT_TAIL 3
+#define RCU_CBLIST_NSEGS 4
+
+struct rcu_segcblist {
+ struct rcu_head *head;
+ struct rcu_head **tails[RCU_CBLIST_NSEGS];
+ unsigned long gp_seq[RCU_CBLIST_NSEGS];
+ long len;
+ long len_lazy;
+};
+
+#define RCU_SEGCBLIST_INITIALIZER(n) \
+{ \
+ .head = NULL, \
+ .tails[RCU_DONE_TAIL] = &n.head, \
+ .tails[RCU_WAIT_TAIL] = &n.head, \
+ .tails[RCU_NEXT_READY_TAIL] = &n.head, \
+ .tails[RCU_NEXT_TAIL] = &n.head, \
+}
+
+#endif /* __INCLUDE_LINUX_RCU_SEGCBLIST_H */
diff --git a/include/linux/rculist.h b/include/linux/rculist.h
index 4f7a9561b8c4..b1fd8bf85fdc 100644
--- a/include/linux/rculist.h
+++ b/include/linux/rculist.h
@@ -509,7 +509,8 @@ static inline void hlist_add_tail_rcu(struct hlist_node *n,
{
struct hlist_node *i, *last = NULL;
- for (i = hlist_first_rcu(h); i; i = hlist_next_rcu(i))
+ /* Note: write side code, so rcu accessors are not needed. */
+ for (i = h->first; i; i = i->next)
last = i;
if (last) {
diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
index de88b33c0974..e1e5d002fdb9 100644
--- a/include/linux/rcupdate.h
+++ b/include/linux/rcupdate.h
@@ -97,6 +97,7 @@ void do_trace_rcu_torture_read(const char *rcutorturename,
unsigned long secs,
unsigned long c_old,
unsigned long c);
+bool rcu_irq_enter_disabled(void);
#else
static inline void rcutorture_get_gp_data(enum rcutorture_type test_type,
int *flags,
@@ -113,6 +114,10 @@ static inline void rcutorture_record_test_transition(void)
static inline void rcutorture_record_progress(unsigned long vernum)
{
}
+static inline bool rcu_irq_enter_disabled(void)
+{
+ return false;
+}
#ifdef CONFIG_RCU_TRACE
void do_trace_rcu_torture_read(const char *rcutorturename,
struct rcu_head *rhp,
@@ -363,15 +368,20 @@ static inline void rcu_init_nohz(void)
#ifdef CONFIG_TASKS_RCU
#define TASKS_RCU(x) x
extern struct srcu_struct tasks_rcu_exit_srcu;
-#define rcu_note_voluntary_context_switch(t) \
+#define rcu_note_voluntary_context_switch_lite(t) \
do { \
- rcu_all_qs(); \
if (READ_ONCE((t)->rcu_tasks_holdout)) \
WRITE_ONCE((t)->rcu_tasks_holdout, false); \
} while (0)
+#define rcu_note_voluntary_context_switch(t) \
+ do { \
+ rcu_all_qs(); \
+ rcu_note_voluntary_context_switch_lite(t); \
+ } while (0)
#else /* #ifdef CONFIG_TASKS_RCU */
#define TASKS_RCU(x) do { } while (0)
-#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
+#define rcu_note_voluntary_context_switch_lite(t) do { } while (0)
+#define rcu_note_voluntary_context_switch(t) rcu_all_qs()
#endif /* #else #ifdef CONFIG_TASKS_RCU */
/**
@@ -1127,11 +1137,11 @@ do { \
* if the UNLOCK and LOCK are executed by the same CPU or if the
* UNLOCK and LOCK operate on the same lock variable.
*/
-#ifdef CONFIG_PPC
+#ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE
#define smp_mb__after_unlock_lock() smp_mb() /* Full ordering for lock. */
-#else /* #ifdef CONFIG_PPC */
+#else /* #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#define smp_mb__after_unlock_lock() do { } while (0)
-#endif /* #else #ifdef CONFIG_PPC */
+#endif /* #else #ifdef CONFIG_ARCH_WEAK_RELEASE_ACQUIRE */
#endif /* __LINUX_RCUPDATE_H */
diff --git a/include/linux/rcutiny.h b/include/linux/rcutiny.h
index b452953e21c8..74d9c3a1feee 100644
--- a/include/linux/rcutiny.h
+++ b/include/linux/rcutiny.h
@@ -33,6 +33,11 @@ static inline int rcu_dynticks_snap(struct rcu_dynticks *rdtp)
return 0;
}
+static inline bool rcu_eqs_special_set(int cpu)
+{
+ return false; /* Never flag non-existent other CPUs! */
+}
+
static inline unsigned long get_state_synchronize_rcu(void)
{
return 0;
@@ -87,10 +92,11 @@ static inline void kfree_call_rcu(struct rcu_head *head,
call_rcu(head, func);
}
-static inline void rcu_note_context_switch(void)
-{
- rcu_sched_qs();
-}
+#define rcu_note_context_switch(preempt) \
+ do { \
+ rcu_sched_qs(); \
+ rcu_note_voluntary_context_switch_lite(current); \
+ } while (0)
/*
* Take advantage of the fact that there is only one CPU, which
@@ -212,14 +218,14 @@ static inline void exit_rcu(void)
{
}
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
+#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU)
extern int rcu_scheduler_active __read_mostly;
void rcu_scheduler_starting(void);
-#else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#else /* #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
static inline void rcu_scheduler_starting(void)
{
}
-#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+#endif /* #else #if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_SRCU) */
#if defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE)
@@ -237,6 +243,10 @@ static inline bool rcu_is_watching(void)
#endif /* #else defined(CONFIG_DEBUG_LOCK_ALLOC) || defined(CONFIG_RCU_TRACE) */
+static inline void rcu_request_urgent_qs_task(struct task_struct *t)
+{
+}
+
static inline void rcu_all_qs(void)
{
barrier(); /* Avoid RCU read-side critical sections leaking across. */
diff --git a/include/linux/rcutree.h b/include/linux/rcutree.h
index 63a4e4cf40a5..0bacb6b2af69 100644
--- a/include/linux/rcutree.h
+++ b/include/linux/rcutree.h
@@ -30,7 +30,7 @@
#ifndef __LINUX_RCUTREE_H
#define __LINUX_RCUTREE_H
-void rcu_note_context_switch(void);
+void rcu_note_context_switch(bool preempt);
int rcu_needs_cpu(u64 basem, u64 *nextevt);
void rcu_cpu_stall_reset(void);
@@ -41,7 +41,7 @@ void rcu_cpu_stall_reset(void);
*/
static inline void rcu_virt_note_context_switch(int cpu)
{
- rcu_note_context_switch();
+ rcu_note_context_switch(false);
}
void synchronize_rcu_bh(void);
@@ -108,6 +108,7 @@ void rcu_scheduler_starting(void);
extern int rcu_scheduler_active __read_mostly;
bool rcu_is_watching(void);
+void rcu_request_urgent_qs_task(struct task_struct *t);
void rcu_all_qs(void);
diff --git a/include/linux/refcount.h b/include/linux/refcount.h
index 0023fee4bbbc..b34aa649d204 100644
--- a/include/linux/refcount.h
+++ b/include/linux/refcount.h
@@ -6,17 +6,36 @@
#include <linux/spinlock.h>
#include <linux/kernel.h>
+/**
+ * refcount_t - variant of atomic_t specialized for reference counts
+ * @refs: atomic_t counter field
+ *
+ * The counter saturates at UINT_MAX and will not move once
+ * there. This avoids wrapping the counter and causing 'spurious'
+ * use-after-free bugs.
+ */
typedef struct refcount_struct {
atomic_t refs;
} refcount_t;
#define REFCOUNT_INIT(n) { .refs = ATOMIC_INIT(n), }
+/**
+ * refcount_set - set a refcount's value
+ * @r: the refcount
+ * @n: value to which the refcount will be set
+ */
static inline void refcount_set(refcount_t *r, unsigned int n)
{
atomic_set(&r->refs, n);
}
+/**
+ * refcount_read - get a refcount's value
+ * @r: the refcount
+ *
+ * Return: the refcount's value
+ */
static inline unsigned int refcount_read(const refcount_t *r)
{
return atomic_read(&r->refs);
diff --git a/include/linux/regulator/arizona-ldo1.h b/include/linux/regulator/arizona-ldo1.h
new file mode 100644
index 000000000000..c685f1277c63
--- /dev/null
+++ b/include/linux/regulator/arizona-ldo1.h
@@ -0,0 +1,24 @@
+/*
+ * Platform data for Arizona LDO1 regulator
+ *
+ * Copyright 2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARIZONA_LDO1_H
+#define ARIZONA_LDO1_H
+
+struct regulator_init_data;
+
+struct arizona_ldo1_pdata {
+ /** GPIO controlling LDOENA, if any */
+ int ldoena;
+
+ /** Regulator configuration for LDO1 */
+ const struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/include/linux/regulator/arizona-micsupp.h b/include/linux/regulator/arizona-micsupp.h
new file mode 100644
index 000000000000..616842619c00
--- /dev/null
+++ b/include/linux/regulator/arizona-micsupp.h
@@ -0,0 +1,21 @@
+/*
+ * Platform data for Arizona micsupp regulator
+ *
+ * Copyright 2017 Cirrus Logic
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef ARIZONA_MICSUPP_H
+#define ARIZONA_MICSUPP_H
+
+struct regulator_init_data;
+
+struct arizona_micsupp_pdata {
+ /** Regulator configuration for micsupp */
+ const struct regulator_init_data *init_data;
+};
+
+#endif
diff --git a/include/linux/regulator/consumer.h b/include/linux/regulator/consumer.h
index ea0fffa5faeb..df176d7c2b87 100644
--- a/include/linux/regulator/consumer.h
+++ b/include/linux/regulator/consumer.h
@@ -119,6 +119,7 @@ struct regmap;
#define REGULATOR_EVENT_ABORT_VOLTAGE_CHANGE 0x200
#define REGULATOR_EVENT_PRE_DISABLE 0x400
#define REGULATOR_EVENT_ABORT_DISABLE 0x800
+#define REGULATOR_EVENT_ENABLE 0x1000
/*
* Regulator errors that can be queried using regulator_get_error_flags
diff --git a/include/linux/regulator/driver.h b/include/linux/regulator/driver.h
index dac8e7b16bc6..94417b4226bd 100644
--- a/include/linux/regulator/driver.h
+++ b/include/linux/regulator/driver.h
@@ -292,6 +292,14 @@ enum regulator_type {
* set_active_discharge
* @active_discharge_reg: Register for control when using regmap
* set_active_discharge
+ * @soft_start_reg: Register for control when using regmap set_soft_start
+ * @soft_start_mask: Mask for control when using regmap set_soft_start
+ * @soft_start_val_on: Enabling value for control when using regmap
+ * set_soft_start
+ * @pull_down_reg: Register for control when using regmap set_pull_down
+ * @pull_down_mask: Mask for control when using regmap set_pull_down
+ * @pull_down_val_on: Enabling value for control when using regmap
+ * set_pull_down
*
* @enable_time: Time taken for initial enable of regulator (in uS).
* @off_on_delay: guard time (in uS), before re-enabling a regulator
@@ -345,6 +353,12 @@ struct regulator_desc {
unsigned int active_discharge_off;
unsigned int active_discharge_mask;
unsigned int active_discharge_reg;
+ unsigned int soft_start_reg;
+ unsigned int soft_start_mask;
+ unsigned int soft_start_val_on;
+ unsigned int pull_down_reg;
+ unsigned int pull_down_mask;
+ unsigned int pull_down_val_on;
unsigned int enable_time;
@@ -429,6 +443,8 @@ struct regulator_dev {
struct regulator_enable_gpio *ena_pin;
unsigned int ena_gpio_state:1;
+ unsigned int is_switch:1;
+
/* time when this regulator was disabled last time */
unsigned long last_off_jiffy;
};
@@ -476,6 +492,8 @@ int regulator_set_voltage_time_sel(struct regulator_dev *rdev,
unsigned int new_selector);
int regulator_set_bypass_regmap(struct regulator_dev *rdev, bool enable);
int regulator_get_bypass_regmap(struct regulator_dev *rdev, bool *enable);
+int regulator_set_soft_start_regmap(struct regulator_dev *rdev);
+int regulator_set_pull_down_regmap(struct regulator_dev *rdev);
int regulator_set_active_discharge_regmap(struct regulator_dev *rdev,
bool enable);
diff --git a/include/linux/regulator/machine.h b/include/linux/regulator/machine.h
index c9f795e9a2ee..117699d1f7df 100644
--- a/include/linux/regulator/machine.h
+++ b/include/linux/regulator/machine.h
@@ -108,6 +108,8 @@ struct regulator_state {
* @initial_state: Suspend state to set by default.
* @initial_mode: Mode to set at startup.
* @ramp_delay: Time to settle down after voltage change (unit: uV/us)
+ * @settling_time: Time to settle down after voltage change when voltage
+ * change is non-linear (unit: microseconds).
* @active_discharge: Enable/disable active discharge. The enum
* regulator_active_discharge values are used for
* initialisation.
@@ -149,6 +151,7 @@ struct regulation_constraints {
unsigned int initial_mode;
unsigned int ramp_delay;
+ unsigned int settling_time;
unsigned int enable_time;
unsigned int active_discharge;
diff --git a/include/linux/regulator/pfuze100.h b/include/linux/regulator/pfuze100.h
index 70c6c66c5bcf..e0ccf46f66cf 100644
--- a/include/linux/regulator/pfuze100.h
+++ b/include/linux/regulator/pfuze100.h
@@ -48,6 +48,7 @@
#define PFUZE200_VGEN4 10
#define PFUZE200_VGEN5 11
#define PFUZE200_VGEN6 12
+#define PFUZE200_COIN 13
#define PFUZE3000_SW1A 0
#define PFUZE3000_SW1B 1
diff --git a/include/linux/reservation.h b/include/linux/reservation.h
index 2b5a4679daea..156cfd330b66 100644
--- a/include/linux/reservation.h
+++ b/include/linux/reservation.h
@@ -167,6 +167,26 @@ reservation_object_lock(struct reservation_object *obj,
}
/**
+ * reservation_object_trylock - trylock the reservation object
+ * @obj: the reservation object
+ *
+ * Tries to lock the reservation object for exclusive access and modification.
+ * Note, that the lock is only against other writers, readers will run
+ * concurrently with a writer under RCU. The seqlock is used to notify readers
+ * if they overlap with a writer.
+ *
+ * Also note that since no context is provided, no deadlock protection is
+ * possible.
+ *
+ * Returns true if the lock was acquired, false otherwise.
+ */
+static inline bool __must_check
+reservation_object_trylock(struct reservation_object *obj)
+{
+ return ww_mutex_trylock(&obj->lock);
+}
+
+/**
* reservation_object_unlock - unlock the reservation object
* @obj: the reservation object
*
diff --git a/include/linux/rhashtable.h b/include/linux/rhashtable.h
index 092292b6675e..7d56a7ea2b2e 100644
--- a/include/linux/rhashtable.h
+++ b/include/linux/rhashtable.h
@@ -49,6 +49,21 @@
/* Base bits plus 1 bit for nulls marker */
#define RHT_HASH_RESERVED_SPACE (RHT_BASE_BITS + 1)
+/* Maximum chain length before rehash
+ *
+ * The maximum (not average) chain length grows with the size of the hash
+ * table, at a rate of (log N)/(log log N).
+ *
+ * The value of 16 is selected so that even if the hash table grew to
+ * 2^32 you would not expect the maximum chain length to exceed it
+ * unless we are under attack (or extremely unlucky).
+ *
+ * As this limit is only to detect attacks, we don't need to set it to a
+ * lower value as you'd need the chain length to vastly exceed 16 to have
+ * any real effect on the system.
+ */
+#define RHT_ELASTICITY 16u
+
struct rhash_head {
struct rhash_head __rcu *next;
};
@@ -110,29 +125,25 @@ struct rhashtable;
* @key_len: Length of key
* @key_offset: Offset of key in struct to be hashed
* @head_offset: Offset of rhash_head in struct to be hashed
- * @insecure_max_entries: Maximum number of entries (may be exceeded)
* @max_size: Maximum size while expanding
* @min_size: Minimum size while shrinking
- * @nulls_base: Base value to generate nulls marker
- * @insecure_elasticity: Set to true to disable chain length checks
- * @automatic_shrinking: Enable automatic shrinking of tables
* @locks_mul: Number of bucket locks to allocate per cpu (default: 128)
+ * @automatic_shrinking: Enable automatic shrinking of tables
+ * @nulls_base: Base value to generate nulls marker
* @hashfn: Hash function (default: jhash2 if !(key_len % 4), or jhash)
* @obj_hashfn: Function to hash object
* @obj_cmpfn: Function to compare key with object
*/
struct rhashtable_params {
- size_t nelem_hint;
- size_t key_len;
- size_t key_offset;
- size_t head_offset;
- unsigned int insecure_max_entries;
+ u16 nelem_hint;
+ u16 key_len;
+ u16 key_offset;
+ u16 head_offset;
unsigned int max_size;
- unsigned int min_size;
- u32 nulls_base;
- bool insecure_elasticity;
+ u16 min_size;
bool automatic_shrinking;
- size_t locks_mul;
+ u8 locks_mul;
+ u32 nulls_base;
rht_hashfn_t hashfn;
rht_obj_hashfn_t obj_hashfn;
rht_obj_cmpfn_t obj_cmpfn;
@@ -143,8 +154,8 @@ struct rhashtable_params {
* @tbl: Bucket table
* @nelems: Number of elements in table
* @key_len: Key length for hashfn
- * @elasticity: Maximum chain length before rehash
* @p: Configuration parameters
+ * @max_elems: Maximum number of elements in table
* @rhlist: True if this is an rhltable
* @run_work: Deferred worker to expand/shrink asynchronously
* @mutex: Mutex to protect current/future table swapping
@@ -154,8 +165,8 @@ struct rhashtable {
struct bucket_table __rcu *tbl;
atomic_t nelems;
unsigned int key_len;
- unsigned int elasticity;
struct rhashtable_params p;
+ unsigned int max_elems;
bool rhlist;
struct work_struct run_work;
struct mutex mutex;
@@ -318,8 +329,7 @@ static inline bool rht_grow_above_100(const struct rhashtable *ht,
static inline bool rht_grow_above_max(const struct rhashtable *ht,
const struct bucket_table *tbl)
{
- return ht->p.insecure_max_entries &&
- atomic_read(&ht->nelems) >= ht->p.insecure_max_entries;
+ return atomic_read(&ht->nelems) >= ht->max_elems;
}
/* The bucket lock is selected based on the hash and protects mutations
@@ -726,7 +736,7 @@ slow_path:
return rhashtable_insert_slow(ht, key, obj);
}
- elasticity = ht->elasticity;
+ elasticity = RHT_ELASTICITY;
pprev = rht_bucket_insert(ht, tbl, hash);
data = ERR_PTR(-ENOMEM);
if (!pprev)
@@ -916,6 +926,28 @@ static inline int rhashtable_lookup_insert_fast(
}
/**
+ * rhashtable_lookup_get_insert_fast - lookup and insert object into hash table
+ * @ht: hash table
+ * @obj: pointer to hash head inside object
+ * @params: hash table parameters
+ *
+ * Just like rhashtable_lookup_insert_fast(), but this function returns the
+ * object if it exists, NULL if it did not and the insertion was successful,
+ * and an ERR_PTR otherwise.
+ */
+static inline void *rhashtable_lookup_get_insert_fast(
+ struct rhashtable *ht, struct rhash_head *obj,
+ const struct rhashtable_params params)
+{
+ const char *key = rht_obj(ht, obj);
+
+ BUG_ON(ht->p.obj_hashfn);
+
+ return __rhashtable_insert_fast(ht, key + ht->p.key_offset, obj, params,
+ false);
+}
+
+/**
* rhashtable_lookup_insert_key - search and insert object to hash table
* with explicit key
* @ht: hash table
diff --git a/include/linux/ring_buffer.h b/include/linux/ring_buffer.h
index b6d4568795a7..ee9b461af095 100644
--- a/include/linux/ring_buffer.h
+++ b/include/linux/ring_buffer.h
@@ -185,7 +185,7 @@ size_t ring_buffer_page_len(void *page);
void *ring_buffer_alloc_read_page(struct ring_buffer *buffer, int cpu);
-void ring_buffer_free_read_page(struct ring_buffer *buffer, void *data);
+void ring_buffer_free_read_page(struct ring_buffer *buffer, int cpu, void *data);
int ring_buffer_read_page(struct ring_buffer *buffer, void **data_page,
size_t len, int cpu, int full);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 8c89e902df3e..43ef2c30cb0f 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -83,19 +83,17 @@ struct anon_vma_chain {
};
enum ttu_flags {
- TTU_UNMAP = 1, /* unmap mode */
- TTU_MIGRATION = 2, /* migration mode */
- TTU_MUNLOCK = 4, /* munlock mode */
- TTU_LZFREE = 8, /* lazy free mode */
- TTU_SPLIT_HUGE_PMD = 16, /* split huge PMD if any */
-
- TTU_IGNORE_MLOCK = (1 << 8), /* ignore mlock */
- TTU_IGNORE_ACCESS = (1 << 9), /* don't age */
- TTU_IGNORE_HWPOISON = (1 << 10),/* corrupted page is recoverable */
- TTU_BATCH_FLUSH = (1 << 11), /* Batch TLB flushes where possible
+ TTU_MIGRATION = 0x1, /* migration mode */
+ TTU_MUNLOCK = 0x2, /* munlock mode */
+
+ TTU_SPLIT_HUGE_PMD = 0x4, /* split huge PMD if any */
+ TTU_IGNORE_MLOCK = 0x8, /* ignore mlock */
+ TTU_IGNORE_ACCESS = 0x10, /* don't age */
+ TTU_IGNORE_HWPOISON = 0x20, /* corrupted page is recoverable */
+ TTU_BATCH_FLUSH = 0x40, /* Batch TLB flushes where possible
* and caller guarantees they will
* do a final flush if necessary */
- TTU_RMAP_LOCKED = (1 << 12) /* do not grab rmap lock:
+ TTU_RMAP_LOCKED = 0x80 /* do not grab rmap lock:
* caller holds it */
};
@@ -193,9 +191,7 @@ static inline void page_dup_rmap(struct page *page, bool compound)
int page_referenced(struct page *, int is_locked,
struct mem_cgroup *memcg, unsigned long *vm_flags);
-#define TTU_ACTION(x) ((x) & TTU_ACTION_MASK)
-
-int try_to_unmap(struct page *, enum ttu_flags flags);
+bool try_to_unmap(struct page *, enum ttu_flags flags);
/* Avoid racy checks */
#define PVMW_SYNC (1 << 0)
@@ -239,7 +235,7 @@ int page_mkclean(struct page *);
* called in munlock()/munmap() path to check for other vmas holding
* the page mlocked.
*/
-int try_to_munlock(struct page *);
+void try_to_munlock(struct page *);
void remove_migration_ptes(struct page *old, struct page *new, bool locked);
@@ -261,15 +257,19 @@ int page_mapped_in_vma(struct page *page, struct vm_area_struct *vma);
*/
struct rmap_walk_control {
void *arg;
- int (*rmap_one)(struct page *page, struct vm_area_struct *vma,
+ /*
+ * Return false if page table scanning in rmap_walk should be stopped.
+ * Otherwise, return true.
+ */
+ bool (*rmap_one)(struct page *page, struct vm_area_struct *vma,
unsigned long addr, void *arg);
int (*done)(struct page *page);
struct anon_vma *(*anon_lock)(struct page *page);
bool (*invalid_vma)(struct vm_area_struct *vma, void *arg);
};
-int rmap_walk(struct page *page, struct rmap_walk_control *rwc);
-int rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk(struct page *page, struct rmap_walk_control *rwc);
+void rmap_walk_locked(struct page *page, struct rmap_walk_control *rwc);
#else /* !CONFIG_MMU */
@@ -285,7 +285,7 @@ static inline int page_referenced(struct page *page, int is_locked,
return 0;
}
-#define try_to_unmap(page, refs) SWAP_FAIL
+#define try_to_unmap(page, refs) false
static inline int page_mkclean(struct page *page)
{
@@ -295,13 +295,4 @@ static inline int page_mkclean(struct page *page)
#endif /* CONFIG_MMU */
-/*
- * Return values of try_to_unmap
- */
-#define SWAP_SUCCESS 0
-#define SWAP_AGAIN 1
-#define SWAP_FAIL 2
-#define SWAP_MLOCK 3
-#define SWAP_LZFREE 4
-
#endif /* _LINUX_RMAP_H */
diff --git a/include/linux/rodata_test.h b/include/linux/rodata_test.h
index ea05f6c51413..84766bcdd01f 100644
--- a/include/linux/rodata_test.h
+++ b/include/linux/rodata_test.h
@@ -14,7 +14,6 @@
#define _RODATA_TEST_H
#ifdef CONFIG_DEBUG_RODATA_TEST
-extern const int rodata_test_data;
void rodata_test(void);
#else
static inline void rodata_test(void) {}
diff --git a/include/linux/rpmsg/qcom_smd.h b/include/linux/rpmsg/qcom_smd.h
index 8ec8b6439b25..f27917e0a101 100644
--- a/include/linux/rpmsg/qcom_smd.h
+++ b/include/linux/rpmsg/qcom_smd.h
@@ -6,7 +6,7 @@
struct qcom_smd_edge;
-#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD) || IS_ENABLED(CONFIG_QCOM_SMD)
+#if IS_ENABLED(CONFIG_RPMSG_QCOM_SMD)
struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
struct device_node *node);
diff --git a/include/linux/sbitmap.h b/include/linux/sbitmap.h
index d4e0a204c118..a1904aadbc45 100644
--- a/include/linux/sbitmap.h
+++ b/include/linux/sbitmap.h
@@ -176,6 +176,25 @@ void sbitmap_resize(struct sbitmap *sb, unsigned int depth);
int sbitmap_get(struct sbitmap *sb, unsigned int alloc_hint, bool round_robin);
/**
+ * sbitmap_get_shallow() - Try to allocate a free bit from a &struct sbitmap,
+ * limiting the depth used from each word.
+ * @sb: Bitmap to allocate from.
+ * @alloc_hint: Hint for where to start searching for a free bit.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ *
+ * This rather specific operation allows for having multiple users with
+ * different allocation limits. E.g., there can be a high-priority class that
+ * uses sbitmap_get() and a low-priority class that uses sbitmap_get_shallow()
+ * with a @shallow_depth of (1 << (@sb->shift - 1)). Then, the low-priority
+ * class can only allocate half of the total bits in the bitmap, preventing it
+ * from starving out the high-priority class.
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int sbitmap_get_shallow(struct sbitmap *sb, unsigned int alloc_hint,
+ unsigned long shallow_depth);
+
+/**
* sbitmap_any_bit_set() - Check for a set bit in a &struct sbitmap.
* @sb: Bitmap to check.
*
@@ -326,6 +345,19 @@ void sbitmap_queue_resize(struct sbitmap_queue *sbq, unsigned int depth);
int __sbitmap_queue_get(struct sbitmap_queue *sbq);
/**
+ * __sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word, with preemption
+ * already disabled.
+ * @sbq: Bitmap queue to allocate from.
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+int __sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int shallow_depth);
+
+/**
* sbitmap_queue_get() - Try to allocate a free bit from a &struct
* sbitmap_queue.
* @sbq: Bitmap queue to allocate from.
@@ -346,6 +378,29 @@ static inline int sbitmap_queue_get(struct sbitmap_queue *sbq,
}
/**
+ * sbitmap_queue_get_shallow() - Try to allocate a free bit from a &struct
+ * sbitmap_queue, limiting the depth used from each word.
+ * @sbq: Bitmap queue to allocate from.
+ * @cpu: Output parameter; will contain the CPU we ran on (e.g., to be passed to
+ * sbitmap_queue_clear()).
+ * @shallow_depth: The maximum number of bits to allocate from a single word.
+ * See sbitmap_get_shallow().
+ *
+ * Return: Non-negative allocated bit number if successful, -1 otherwise.
+ */
+static inline int sbitmap_queue_get_shallow(struct sbitmap_queue *sbq,
+ unsigned int *cpu,
+ unsigned int shallow_depth)
+{
+ int nr;
+
+ *cpu = get_cpu();
+ nr = __sbitmap_queue_get_shallow(sbq, shallow_depth);
+ put_cpu();
+ return nr;
+}
+
+/**
* sbitmap_queue_clear() - Free an allocated bit and wake up waiters on a
* &struct sbitmap_queue.
* @sbq: Bitmap to free from.
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 4cf9a59a4d08..2b69fc650201 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -186,7 +186,7 @@ extern long io_schedule_timeout(long timeout);
extern void io_schedule(void);
/**
- * struct prev_cputime - snaphsot of system and user cputime
+ * struct prev_cputime - snapshot of system and user cputime
* @utime: time spent in user mode
* @stime: time spent in system mode
* @lock: protects the above two fields
@@ -779,6 +779,8 @@ struct task_struct {
/* PI waiters blocked on a rt_mutex held by this task: */
struct rb_root pi_waiters;
struct rb_node *pi_waiters_leftmost;
+ /* Updated under owner's pi_lock and rq lock */
+ struct task_struct *pi_top_task;
/* Deadlock detection and priority inheritance handling: */
struct rt_mutex_waiter *pi_blocked_on;
#endif
@@ -1042,6 +1044,13 @@ struct task_struct {
/* A live task holds one reference: */
atomic_t stack_refcount;
#endif
+#ifdef CONFIG_LIVEPATCH
+ int patch_state;
+#endif
+#ifdef CONFIG_SECURITY
+ /* Used by LSM modules for access restriction: */
+ void *security;
+#endif
/* CPU-specific state of this task: */
struct thread_struct thread;
@@ -1215,9 +1224,9 @@ extern struct pid *cad_pid;
#define PF_USED_ASYNC 0x00004000 /* Used async_schedule*(), used by module init */
#define PF_NOFREEZE 0x00008000 /* This thread should not be frozen */
#define PF_FROZEN 0x00010000 /* Frozen for system suspend */
-#define PF_FSTRANS 0x00020000 /* Inside a filesystem transaction */
-#define PF_KSWAPD 0x00040000 /* I am kswapd */
-#define PF_MEMALLOC_NOIO 0x00080000 /* Allocating memory without IO involved */
+#define PF_KSWAPD 0x00020000 /* I am kswapd */
+#define PF_MEMALLOC_NOFS 0x00040000 /* All allocation requests will inherit GFP_NOFS */
+#define PF_MEMALLOC_NOIO 0x00080000 /* All allocation requests will inherit GFP_NOIO */
#define PF_LESS_THROTTLE 0x00100000 /* Throttle me less: I clean memory */
#define PF_KTHREAD 0x00200000 /* I am a kernel thread */
#define PF_RANDOMIZE 0x00400000 /* Randomize virtual address space */
@@ -1260,7 +1269,6 @@ extern struct pid *cad_pid;
#define PFA_NO_NEW_PRIVS 0 /* May not gain new privileges. */
#define PFA_SPREAD_PAGE 1 /* Spread page cache over cpuset */
#define PFA_SPREAD_SLAB 2 /* Spread some slab caches over cpuset */
-#define PFA_LMK_WAITING 3 /* Lowmemorykiller is waiting */
#define TASK_PFA_TEST(name, func) \
@@ -1286,14 +1294,11 @@ TASK_PFA_TEST(SPREAD_SLAB, spread_slab)
TASK_PFA_SET(SPREAD_SLAB, spread_slab)
TASK_PFA_CLEAR(SPREAD_SLAB, spread_slab)
-TASK_PFA_TEST(LMK_WAITING, lmk_waiting)
-TASK_PFA_SET(LMK_WAITING, lmk_waiting)
-
static inline void
-tsk_restore_flags(struct task_struct *task, unsigned long orig_flags, unsigned long flags)
+current_restore_flags(unsigned long orig_flags, unsigned long flags)
{
- task->flags &= ~flags;
- task->flags |= orig_flags & flags;
+ current->flags &= ~flags;
+ current->flags |= orig_flags & flags;
}
extern int cpuset_cpumask_can_shrink(const struct cpumask *cur, const struct cpumask *trial);
diff --git a/include/linux/sched/mm.h b/include/linux/sched/mm.h
index 830953ebb391..2b24a6974847 100644
--- a/include/linux/sched/mm.h
+++ b/include/linux/sched/mm.h
@@ -149,13 +149,21 @@ static inline bool in_vfork(struct task_struct *tsk)
return ret;
}
-/* __GFP_IO isn't allowed if PF_MEMALLOC_NOIO is set in current->flags
- * __GFP_FS is also cleared as it implies __GFP_IO.
+/*
+ * Applies per-task gfp context to the given allocation flags.
+ * PF_MEMALLOC_NOIO implies GFP_NOIO
+ * PF_MEMALLOC_NOFS implies GFP_NOFS
*/
-static inline gfp_t memalloc_noio_flags(gfp_t flags)
+static inline gfp_t current_gfp_context(gfp_t flags)
{
+ /*
+ * NOIO implies both NOIO and NOFS and it is a weaker context
+ * so always make sure it makes precendence
+ */
if (unlikely(current->flags & PF_MEMALLOC_NOIO))
flags &= ~(__GFP_IO | __GFP_FS);
+ else if (unlikely(current->flags & PF_MEMALLOC_NOFS))
+ flags &= ~__GFP_FS;
return flags;
}
@@ -171,4 +179,28 @@ static inline void memalloc_noio_restore(unsigned int flags)
current->flags = (current->flags & ~PF_MEMALLOC_NOIO) | flags;
}
+static inline unsigned int memalloc_nofs_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC_NOFS;
+ current->flags |= PF_MEMALLOC_NOFS;
+ return flags;
+}
+
+static inline void memalloc_nofs_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC_NOFS) | flags;
+}
+
+static inline unsigned int memalloc_noreclaim_save(void)
+{
+ unsigned int flags = current->flags & PF_MEMALLOC;
+ current->flags |= PF_MEMALLOC;
+ return flags;
+}
+
+static inline void memalloc_noreclaim_restore(unsigned int flags)
+{
+ current->flags = (current->flags & ~PF_MEMALLOC) | flags;
+}
+
#endif /* _LINUX_SCHED_MM_H */
diff --git a/include/linux/sched/rt.h b/include/linux/sched/rt.h
index 3bd668414f61..f93329aba31a 100644
--- a/include/linux/sched/rt.h
+++ b/include/linux/sched/rt.h
@@ -18,27 +18,20 @@ static inline int rt_task(struct task_struct *p)
}
#ifdef CONFIG_RT_MUTEXES
-extern int rt_mutex_getprio(struct task_struct *p);
-extern void rt_mutex_setprio(struct task_struct *p, int prio);
-extern int rt_mutex_get_effective_prio(struct task_struct *task, int newprio);
-extern struct task_struct *rt_mutex_get_top_task(struct task_struct *task);
+/*
+ * Must hold either p->pi_lock or task_rq(p)->lock.
+ */
+static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *p)
+{
+ return p->pi_top_task;
+}
+extern void rt_mutex_setprio(struct task_struct *p, struct task_struct *pi_task);
extern void rt_mutex_adjust_pi(struct task_struct *p);
static inline bool tsk_is_pi_blocked(struct task_struct *tsk)
{
return tsk->pi_blocked_on != NULL;
}
#else
-static inline int rt_mutex_getprio(struct task_struct *p)
-{
- return p->normal_prio;
-}
-
-static inline int rt_mutex_get_effective_prio(struct task_struct *task,
- int newprio)
-{
- return newprio;
-}
-
static inline struct task_struct *rt_mutex_get_top_task(struct task_struct *task)
{
return NULL;
diff --git a/include/linux/sched/signal.h b/include/linux/sched/signal.h
index 2cf446704cd4..c06d63b3a583 100644
--- a/include/linux/sched/signal.h
+++ b/include/linux/sched/signal.h
@@ -293,7 +293,6 @@ extern int kill_pid_info_as_cred(int, struct siginfo *, struct pid *,
const struct cred *, u32);
extern int kill_pgrp(struct pid *pid, int sig, int priv);
extern int kill_pid(struct pid *pid, int sig, int priv);
-extern int kill_proc_info(int, struct siginfo *, pid_t);
extern __must_check bool do_notify_parent(struct task_struct *, int);
extern void __wake_up_parent(struct task_struct *p, struct task_struct *parent);
extern void force_sig(int, struct task_struct *);
diff --git a/include/linux/security.h b/include/linux/security.h
index 96899fad7016..af675b576645 100644
--- a/include/linux/security.h
+++ b/include/linux/security.h
@@ -133,6 +133,10 @@ extern unsigned long dac_mmap_min_addr;
/* setfsuid or setfsgid, id0 == fsuid or fsgid */
#define LSM_SETID_FS 8
+/* Flags for security_task_prlimit(). */
+#define LSM_PRLIMIT_READ 1
+#define LSM_PRLIMIT_WRITE 2
+
/* forward declares to avoid warnings */
struct sched_param;
struct request_sock;
@@ -304,6 +308,7 @@ int security_file_send_sigiotask(struct task_struct *tsk,
int security_file_receive(struct file *file);
int security_file_open(struct file *file, const struct cred *cred);
int security_task_create(unsigned long clone_flags);
+int security_task_alloc(struct task_struct *task, unsigned long clone_flags);
void security_task_free(struct task_struct *task);
int security_cred_alloc_blank(struct cred *cred, gfp_t gfp);
void security_cred_free(struct cred *cred);
@@ -324,6 +329,8 @@ void security_task_getsecid(struct task_struct *p, u32 *secid);
int security_task_setnice(struct task_struct *p, int nice);
int security_task_setioprio(struct task_struct *p, int ioprio);
int security_task_getioprio(struct task_struct *p);
+int security_task_prlimit(const struct cred *cred, const struct cred *tcred,
+ unsigned int flags);
int security_task_setrlimit(struct task_struct *p, unsigned int resource,
struct rlimit *new_rlim);
int security_task_setscheduler(struct task_struct *p);
@@ -855,6 +862,12 @@ static inline int security_task_create(unsigned long clone_flags)
return 0;
}
+static inline int security_task_alloc(struct task_struct *task,
+ unsigned long clone_flags)
+{
+ return 0;
+}
+
static inline void security_task_free(struct task_struct *task)
{ }
@@ -949,6 +962,13 @@ static inline int security_task_getioprio(struct task_struct *p)
return 0;
}
+static inline int security_task_prlimit(const struct cred *cred,
+ const struct cred *tcred,
+ unsigned int flags)
+{
+ return 0;
+}
+
static inline int security_task_setrlimit(struct task_struct *p,
unsigned int resource,
struct rlimit *new_rlim)
diff --git a/include/linux/sem.h b/include/linux/sem.h
index 4fc222f8755d..9edec926e9d9 100644
--- a/include/linux/sem.h
+++ b/include/linux/sem.h
@@ -10,8 +10,7 @@ struct task_struct;
/* One sem_array data structure for each set of semaphores in the system. */
struct sem_array {
- struct kern_ipc_perm ____cacheline_aligned_in_smp
- sem_perm; /* permissions .. see ipc.h */
+ struct kern_ipc_perm sem_perm; /* permissions .. see ipc.h */
time_t sem_ctime; /* last change time */
struct sem *sem_base; /* ptr to first semaphore in array */
struct list_head pending_alter; /* pending operations */
diff --git a/include/linux/serdev.h b/include/linux/serdev.h
index 9519da6253a8..cda76c6506ca 100644
--- a/include/linux/serdev.h
+++ b/include/linux/serdev.h
@@ -15,6 +15,8 @@
#include <linux/types.h>
#include <linux/device.h>
+#include <linux/termios.h>
+#include <linux/delay.h>
struct serdev_controller;
struct serdev_device;
@@ -39,12 +41,16 @@ struct serdev_device_ops {
* @nr: Device number on serdev bus.
* @ctrl: serdev controller managing this device.
* @ops: Device operations.
+ * @write_comp Completion used by serdev_device_write() internally
+ * @write_lock Lock to serialize access when writing data
*/
struct serdev_device {
struct device dev;
int nr;
struct serdev_controller *ctrl;
const struct serdev_device_ops *ops;
+ struct completion write_comp;
+ struct mutex write_lock;
};
static inline struct serdev_device *to_serdev_device(struct device *d)
@@ -81,6 +87,9 @@ struct serdev_controller_ops {
void (*close)(struct serdev_controller *);
void (*set_flow_control)(struct serdev_controller *, bool);
unsigned int (*set_baudrate)(struct serdev_controller *, unsigned int);
+ void (*wait_until_sent)(struct serdev_controller *, long);
+ int (*get_tiocm)(struct serdev_controller *);
+ int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int);
};
/**
@@ -165,7 +174,7 @@ static inline void serdev_controller_write_wakeup(struct serdev_controller *ctrl
if (!serdev || !serdev->ops->write_wakeup)
return;
- serdev->ops->write_wakeup(ctrl->serdev);
+ serdev->ops->write_wakeup(serdev);
}
static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
@@ -177,7 +186,7 @@ static inline int serdev_controller_receive_buf(struct serdev_controller *ctrl,
if (!serdev || !serdev->ops->receive_buf)
return -EINVAL;
- return serdev->ops->receive_buf(ctrl->serdev, data, count);
+ return serdev->ops->receive_buf(serdev, data, count);
}
#if IS_ENABLED(CONFIG_SERIAL_DEV_BUS)
@@ -186,7 +195,11 @@ int serdev_device_open(struct serdev_device *);
void serdev_device_close(struct serdev_device *);
unsigned int serdev_device_set_baudrate(struct serdev_device *, unsigned int);
void serdev_device_set_flow_control(struct serdev_device *, bool);
-int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_t);
+void serdev_device_wait_until_sent(struct serdev_device *, long);
+int serdev_device_get_tiocm(struct serdev_device *);
+int serdev_device_set_tiocm(struct serdev_device *, int, int);
+void serdev_device_write_wakeup(struct serdev_device *);
+int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, unsigned long);
void serdev_device_write_flush(struct serdev_device *);
int serdev_device_write_room(struct serdev_device *);
@@ -223,7 +236,17 @@ static inline unsigned int serdev_device_set_baudrate(struct serdev_device *sdev
return 0;
}
static inline void serdev_device_set_flow_control(struct serdev_device *sdev, bool enable) {}
-static inline int serdev_device_write_buf(struct serdev_device *sdev, const unsigned char *buf, size_t count)
+static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {}
+static inline int serdev_device_get_tiocm(struct serdev_device *serdev)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear)
+{
+ return -ENOTSUPP;
+}
+static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf,
+ size_t count, unsigned long timeout)
{
return -ENODEV;
}
@@ -238,6 +261,36 @@ static inline int serdev_device_write_room(struct serdev_device *sdev)
#endif /* CONFIG_SERIAL_DEV_BUS */
+static inline bool serdev_device_get_cts(struct serdev_device *serdev)
+{
+ int status = serdev_device_get_tiocm(serdev);
+ return !!(status & TIOCM_CTS);
+}
+
+static inline int serdev_device_wait_for_cts(struct serdev_device *serdev, bool state, int timeout_ms)
+{
+ unsigned long timeout;
+ bool signal;
+
+ timeout = jiffies + msecs_to_jiffies(timeout_ms);
+ while (time_is_after_jiffies(timeout)) {
+ signal = serdev_device_get_cts(serdev);
+ if (signal == state)
+ return 0;
+ usleep_range(1000, 2000);
+ }
+
+ return -ETIMEDOUT;
+}
+
+static inline int serdev_device_set_rts(struct serdev_device *serdev, bool enable)
+{
+ if (enable)
+ return serdev_device_set_tiocm(serdev, TIOCM_RTS, 0);
+ else
+ return serdev_device_set_tiocm(serdev, 0, TIOCM_RTS);
+}
+
/*
* serdev hooks into TTY core
*/
@@ -259,4 +312,11 @@ static inline struct device *serdev_tty_port_register(struct tty_port *port,
static inline void serdev_tty_port_unregister(struct tty_port *port) {}
#endif /* CONFIG_SERIAL_DEV_CTRL_TTYPORT */
+static inline int serdev_device_write_buf(struct serdev_device *serdev,
+ const unsigned char *data,
+ size_t count)
+{
+ return serdev_device_write(serdev, data, count, 0);
+}
+
#endif /*_LINUX_SERDEV_H */
diff --git a/include/linux/serial_core.h b/include/linux/serial_core.h
index 58484fb35cc8..64d892f1e5cd 100644
--- a/include/linux/serial_core.h
+++ b/include/linux/serial_core.h
@@ -247,6 +247,7 @@ struct uart_port {
unsigned char suspended;
unsigned char irq_wake;
unsigned char unused[2];
+ const char *name; /* port name */
struct attribute_group *attr_group; /* port specific attributes */
const struct attribute_group **tty_groups; /* all attributes (serial core use only) */
struct serial_rs485 rs485;
diff --git a/include/linux/serio.h b/include/linux/serio.h
index c733cff44e18..138a5efe863a 100644
--- a/include/linux/serio.h
+++ b/include/linux/serio.h
@@ -77,6 +77,7 @@ struct serio_driver {
irqreturn_t (*interrupt)(struct serio *, unsigned char, unsigned int);
int (*connect)(struct serio *, struct serio_driver *drv);
int (*reconnect)(struct serio *);
+ int (*fast_reconnect)(struct serio *);
void (*disconnect)(struct serio *);
void (*cleanup)(struct serio *);
diff --git a/include/linux/signal.h b/include/linux/signal.h
index 94ad6eea9550..1f5a16620693 100644
--- a/include/linux/signal.h
+++ b/include/linux/signal.h
@@ -390,10 +390,6 @@ int unhandled_signal(struct task_struct *tsk, int sig);
#define sig_kernel_ignore(sig) siginmask(sig, SIG_KERNEL_IGNORE_MASK)
#define sig_kernel_stop(sig) siginmask(sig, SIG_KERNEL_STOP_MASK)
-#define sig_user_defined(t, signr) \
- (((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_DFL) && \
- ((t)->sighand->action[(signr)-1].sa.sa_handler != SIG_IGN))
-
#define sig_fatal(t, signr) \
(!siginmask(signr, SIG_KERNEL_IGNORE_MASK|SIG_KERNEL_STOP_MASK) && \
(t)->sighand->action[(signr)-1].sa.sa_handler == SIG_DFL)
diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h
index c776abd86937..a098d95b3d84 100644
--- a/include/linux/skbuff.h
+++ b/include/linux/skbuff.h
@@ -413,14 +413,15 @@ struct ubuf_info {
* the end of the header data, ie. at skb->end.
*/
struct skb_shared_info {
+ unsigned short _unused;
unsigned char nr_frags;
__u8 tx_flags;
unsigned short gso_size;
/* Warning: this field is not always filled in (UFO)! */
unsigned short gso_segs;
- unsigned short gso_type;
struct sk_buff *frag_list;
struct skb_shared_hwtstamps hwtstamps;
+ unsigned int gso_type;
u32 tskey;
__be32 ip6_frag_id;
@@ -491,6 +492,8 @@ enum {
SKB_GSO_TUNNEL_REMCSUM = 1 << 14,
SKB_GSO_SCTP = 1 << 15,
+
+ SKB_GSO_ESP = 1 << 16,
};
#if BITS_PER_LONG > 32
@@ -3113,7 +3116,7 @@ struct sk_buff *pskb_extract(struct sk_buff *skb, int off, int to_copy,
static inline int memcpy_from_msg(void *data, struct msghdr *msg, int len)
{
- return copy_from_iter(data, len, &msg->msg_iter) == len ? 0 : -EFAULT;
+ return copy_from_iter_full(data, len, &msg->msg_iter) ? 0 : -EFAULT;
}
static inline int memcpy_to_msg(struct msghdr *msg, void *data, int len)
diff --git a/include/linux/slab.h b/include/linux/slab.h
index 3c37a8c51921..04a7f7993e67 100644
--- a/include/linux/slab.h
+++ b/include/linux/slab.h
@@ -28,7 +28,7 @@
#define SLAB_STORE_USER 0x00010000UL /* DEBUG: Store the last owner for bug hunting */
#define SLAB_PANIC 0x00040000UL /* Panic if kmem_cache_create() fails */
/*
- * SLAB_DESTROY_BY_RCU - **WARNING** READ THIS!
+ * SLAB_TYPESAFE_BY_RCU - **WARNING** READ THIS!
*
* This delays freeing the SLAB page by a grace period, it does _NOT_
* delay object freeing. This means that if you do kmem_cache_free()
@@ -61,8 +61,10 @@
*
* rcu_read_lock before reading the address, then rcu_read_unlock after
* taking the spinlock within the structure expected at that address.
+ *
+ * Note that SLAB_TYPESAFE_BY_RCU was originally named SLAB_DESTROY_BY_RCU.
*/
-#define SLAB_DESTROY_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
+#define SLAB_TYPESAFE_BY_RCU 0x00080000UL /* Defer freeing slabs to RCU */
#define SLAB_MEM_SPREAD 0x00100000UL /* Spread some memory over cpuset */
#define SLAB_TRACE 0x00200000UL /* Trace allocations and frees */
diff --git a/include/linux/smp.h b/include/linux/smp.h
index 8e0cb7a0f836..68123c1fe549 100644
--- a/include/linux/smp.h
+++ b/include/linux/smp.h
@@ -120,6 +120,13 @@ extern unsigned int setup_max_cpus;
extern void __init setup_nr_cpu_ids(void);
extern void __init smp_init(void);
+extern int __boot_cpu_id;
+
+static inline int get_boot_cpu_id(void)
+{
+ return __boot_cpu_id;
+}
+
#else /* !SMP */
static inline void smp_send_stop(void) { }
@@ -158,6 +165,11 @@ static inline void smp_init(void) { up_late_init(); }
static inline void smp_init(void) { }
#endif
+static inline int get_boot_cpu_id(void)
+{
+ return 0;
+}
+
#endif /* !SMP */
/*
diff --git a/include/linux/soc/qcom/smd.h b/include/linux/soc/qcom/smd.h
deleted file mode 100644
index f148e0ffbec7..000000000000
--- a/include/linux/soc/qcom/smd.h
+++ /dev/null
@@ -1,139 +0,0 @@
-#ifndef __QCOM_SMD_H__
-#define __QCOM_SMD_H__
-
-#include <linux/device.h>
-#include <linux/mod_devicetable.h>
-
-struct qcom_smd;
-struct qcom_smd_channel;
-struct qcom_smd_lookup;
-
-/**
- * struct qcom_smd_id - struct used for matching a smd device
- * @name: name of the channel
- */
-struct qcom_smd_id {
- char name[20];
-};
-
-/**
- * struct qcom_smd_device - smd device struct
- * @dev: the device struct
- * @channel: handle to the smd channel for this device
- */
-struct qcom_smd_device {
- struct device dev;
- struct qcom_smd_channel *channel;
-};
-
-typedef int (*qcom_smd_cb_t)(struct qcom_smd_channel *, const void *, size_t);
-
-/**
- * struct qcom_smd_driver - smd driver struct
- * @driver: underlying device driver
- * @smd_match_table: static channel match table
- * @probe: invoked when the smd channel is found
- * @remove: invoked when the smd channel is closed
- * @callback: invoked when an inbound message is received on the channel,
- * should return 0 on success or -EBUSY if the data cannot be
- * consumed at this time
- */
-struct qcom_smd_driver {
- struct device_driver driver;
- const struct qcom_smd_id *smd_match_table;
-
- int (*probe)(struct qcom_smd_device *dev);
- void (*remove)(struct qcom_smd_device *dev);
- qcom_smd_cb_t callback;
-};
-
-#if IS_ENABLED(CONFIG_QCOM_SMD)
-
-int qcom_smd_driver_register(struct qcom_smd_driver *drv);
-void qcom_smd_driver_unregister(struct qcom_smd_driver *drv);
-
-struct qcom_smd_channel *qcom_smd_open_channel(struct qcom_smd_channel *channel,
- const char *name,
- qcom_smd_cb_t cb);
-void qcom_smd_close_channel(struct qcom_smd_channel *channel);
-void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel);
-void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data);
-int qcom_smd_send(struct qcom_smd_channel *channel, const void *data, int len);
-
-
-struct qcom_smd_edge *qcom_smd_register_edge(struct device *parent,
- struct device_node *node);
-int qcom_smd_unregister_edge(struct qcom_smd_edge *edge);
-
-#else
-
-static inline int qcom_smd_driver_register(struct qcom_smd_driver *drv)
-{
- return -ENXIO;
-}
-
-static inline void qcom_smd_driver_unregister(struct qcom_smd_driver *drv)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline struct qcom_smd_channel *
-qcom_smd_open_channel(struct qcom_smd_channel *channel,
- const char *name,
- qcom_smd_cb_t cb)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return NULL;
-}
-
-static inline void qcom_smd_close_channel(struct qcom_smd_channel *channel)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline void *qcom_smd_get_drvdata(struct qcom_smd_channel *channel)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return NULL;
-}
-
-static inline void qcom_smd_set_drvdata(struct qcom_smd_channel *channel, void *data)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
-}
-
-static inline int qcom_smd_send(struct qcom_smd_channel *channel,
- const void *data, int len)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return -ENXIO;
-}
-
-static inline struct qcom_smd_edge *
-qcom_smd_register_edge(struct device *parent,
- struct device_node *node)
-{
- return ERR_PTR(-ENXIO);
-}
-
-static inline int qcom_smd_unregister_edge(struct qcom_smd_edge *edge)
-{
- /* This shouldn't be possible */
- WARN_ON(1);
- return -ENXIO;
-}
-
-#endif
-
-#define module_qcom_smd_driver(__smd_driver) \
- module_driver(__smd_driver, qcom_smd_driver_register, \
- qcom_smd_driver_unregister)
-
-
-#endif
diff --git a/include/linux/soc/qcom/wcnss_ctrl.h b/include/linux/soc/qcom/wcnss_ctrl.h
index eab64976a73b..a4dd4d7c711d 100644
--- a/include/linux/soc/qcom/wcnss_ctrl.h
+++ b/include/linux/soc/qcom/wcnss_ctrl.h
@@ -1,16 +1,19 @@
#ifndef __WCNSS_CTRL_H__
#define __WCNSS_CTRL_H__
-#include <linux/soc/qcom/smd.h>
+#include <linux/rpmsg.h>
#if IS_ENABLED(CONFIG_QCOM_WCNSS_CTRL)
-struct qcom_smd_channel *qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb);
+struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss, const char *name,
+ rpmsg_rx_cb_t cb, void *priv);
#else
-static inline struct qcom_smd_channel*
-qcom_wcnss_open_channel(void *wcnss, const char *name, qcom_smd_cb_t cb)
+static struct rpmsg_endpoint *qcom_wcnss_open_channel(void *wcnss,
+ const char *name,
+ rpmsg_rx_cb_t cb,
+ void *priv)
{
WARN_ON(1);
return ERR_PTR(-ENXIO);
diff --git a/include/linux/soc/samsung/exynos-regs-pmu.h b/include/linux/soc/samsung/exynos-regs-pmu.h
index 49df0a01a2cc..bebdde5dccd6 100644
--- a/include/linux/soc/samsung/exynos-regs-pmu.h
+++ b/include/linux/soc/samsung/exynos-regs-pmu.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2010-2012 Samsung Electronics Co., Ltd.
+ * Copyright (c) 2010-2015 Samsung Electronics Co., Ltd.
* http://www.samsung.com
*
* EXYNOS - Power management unit definition
@@ -50,6 +50,14 @@
#define S5P_WAKEUP_MASK 0x0608
#define S5P_WAKEUP_MASK2 0x0614
+/* MIPI_PHYn_CONTROL, valid for Exynos3250, Exynos4, Exynos5250 and Exynos5433 */
+#define EXYNOS4_MIPI_PHY_CONTROL(n) (0x0710 + (n) * 4)
+/* Phy enable bit, common for all phy registers, not only MIPI */
+#define EXYNOS4_PHY_ENABLE (1 << 0)
+#define EXYNOS4_MIPI_PHY_SRESETN (1 << 1)
+#define EXYNOS4_MIPI_PHY_MRESETN (1 << 2)
+#define EXYNOS4_MIPI_PHY_RESET_MASK (3 << 1)
+
#define S5P_INFORM0 0x0800
#define S5P_INFORM1 0x0804
#define S5P_INFORM5 0x0814
@@ -342,6 +350,8 @@
#define EXYNOS5_AUTO_WDTRESET_DISABLE 0x0408
#define EXYNOS5_MASK_WDTRESET_REQUEST 0x040C
+#define EXYNOS5_USBDRD_PHY_CONTROL 0x0704
+#define EXYNOS5_DPTX_PHY_CONTROL 0x0720
#define EXYNOS5_USE_RETENTION BIT(4)
#define EXYNOS5_SYS_WDTRESET (1 << 20)
@@ -495,6 +505,9 @@
#define EXYNOS5420_KFC_CORE_RESET(_nr) \
((EXYNOS5420_KFC_CORE_RESET0 | EXYNOS5420_KFC_ETM_RESET0) << (_nr))
+#define EXYNOS5420_USBDRD1_PHY_CONTROL 0x0708
+#define EXYNOS5420_MIPI_PHY_CONTROL(n) (0x0714 + (n) * 4)
+#define EXYNOS5420_DPTX_PHY_CONTROL 0x0728
#define EXYNOS5420_ARM_CORE2_SYS_PWR_REG 0x1020
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_LOCAL_SYS_PWR_REG 0x1024
#define EXYNOS5420_DIS_IRQ_ARM_CORE2_CENTRAL_SYS_PWR_REG 0x1028
@@ -632,6 +645,7 @@
| EXYNOS5420_KFC_USE_STANDBY_WFI3)
/* For EXYNOS5433 */
+#define EXYNOS5433_USBHOST30_PHY_CONTROL (0x0728)
#define EXYNOS5433_PAD_RETENTION_AUD_OPTION (0x3028)
#define EXYNOS5433_PAD_RETENTION_MMC2_OPTION (0x30C8)
#define EXYNOS5433_PAD_RETENTION_TOP_OPTION (0x3108)
diff --git a/include/linux/sock_diag.h b/include/linux/sock_diag.h
index a0596ca0e80a..a2f8109bb215 100644
--- a/include/linux/sock_diag.h
+++ b/include/linux/sock_diag.h
@@ -24,6 +24,7 @@ void sock_diag_unregister(const struct sock_diag_handler *h);
void sock_diag_register_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
void sock_diag_unregister_inet_compat(int (*fn)(struct sk_buff *skb, struct nlmsghdr *nlh));
+u64 sock_gen_cookie(struct sock *sk);
int sock_diag_check_cookie(struct sock *sk, const __u32 *cookie);
void sock_diag_save_cookie(struct sock *sk, __u32 *cookie);
diff --git a/include/linux/spi/spi.h b/include/linux/spi/spi.h
index 75c6bd0ac605..935bd2854ff1 100644
--- a/include/linux/spi/spi.h
+++ b/include/linux/spi/spi.h
@@ -23,6 +23,7 @@
#include <linux/scatterlist.h>
struct dma_chan;
+struct property_entry;
struct spi_master;
struct spi_transfer;
struct spi_flash_read_message;
@@ -375,6 +376,8 @@ static inline void spi_unregister_driver(struct spi_driver *sdrv)
* @unprepare_message: undo any work done by prepare_message().
* @spi_flash_read: to support spi-controller hardwares that provide
* accelerated interface to read from flash devices.
+ * @spi_flash_can_dma: analogous to can_dma() interface, but for
+ * controllers implementing spi_flash_read.
* @flash_read_supported: spi device supports flash read
* @cs_gpios: Array of GPIOs to use as chip select lines; one per CS
* number. Any individual value may be -ENOENT for CS lines that
@@ -538,6 +541,8 @@ struct spi_master {
struct spi_message *message);
int (*spi_flash_read)(struct spi_device *spi,
struct spi_flash_read_message *msg);
+ bool (*spi_flash_can_dma)(struct spi_device *spi,
+ struct spi_flash_read_message *msg);
bool (*flash_read_supported)(struct spi_device *spi);
/*
@@ -891,7 +896,7 @@ static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags
unsigned i;
struct spi_transfer *t = (struct spi_transfer *)(m + 1);
- INIT_LIST_HEAD(&m->transfers);
+ spi_message_init_no_memset(m);
for (i = 0; i < ntrans; i++, t++)
spi_message_add_tail(t, m);
}
@@ -1209,6 +1214,7 @@ int spi_flash_read(struct spi_device *spi,
* @modalias: Initializes spi_device.modalias; identifies the driver.
* @platform_data: Initializes spi_device.platform_data; the particular
* data stored there is driver-specific.
+ * @properties: Additional device properties for the device.
* @controller_data: Initializes spi_device.controller_data; some
* controllers need hints about hardware setup, e.g. for DMA.
* @irq: Initializes spi_device.irq; depends on how the board is wired.
@@ -1241,10 +1247,12 @@ struct spi_board_info {
*
* platform_data goes to spi_device.dev.platform_data,
* controller_data goes to spi_device.controller_data,
+ * device properties are copied and attached to spi_device,
* irq is copied too
*/
char modalias[SPI_NAME_SIZE];
const void *platform_data;
+ const struct property_entry *properties;
void *controller_data;
int irq;
diff --git a/include/linux/splice.h b/include/linux/splice.h
index 00a21166e268..db42746bdfea 100644
--- a/include/linux/splice.h
+++ b/include/linux/splice.h
@@ -20,6 +20,8 @@
#define SPLICE_F_MORE (0x04) /* expect more data */
#define SPLICE_F_GIFT (0x08) /* pages passed in are a gift */
+#define SPLICE_F_ALL (SPLICE_F_MOVE|SPLICE_F_NONBLOCK|SPLICE_F_MORE|SPLICE_F_GIFT)
+
/*
* Passed to the actors
*/
@@ -55,7 +57,6 @@ struct splice_pipe_desc {
struct partial_page *partial; /* pages[] may not be contig */
int nr_pages; /* number of populated pages in map */
unsigned int nr_pages_max; /* pages[] & partial[] arrays size */
- unsigned int flags; /* splice flags */
const struct pipe_buf_operations *ops;/* ops associated with output pipe */
void (*spd_release)(struct splice_pipe_desc *, unsigned int);
};
@@ -82,7 +83,6 @@ extern ssize_t splice_direct_to_actor(struct file *, struct splice_desc *,
*/
extern int splice_grow_spd(const struct pipe_inode_info *, struct splice_pipe_desc *);
extern void splice_shrink_spd(struct splice_pipe_desc *);
-extern void spd_release_page(struct splice_pipe_desc *, unsigned int);
extern const struct pipe_buf_operations page_cache_pipe_buf_ops;
extern const struct pipe_buf_operations default_pipe_buf_ops;
diff --git a/include/linux/srcu.h b/include/linux/srcu.h
index a598cf3ac70c..167ad8831aaf 100644
--- a/include/linux/srcu.h
+++ b/include/linux/srcu.h
@@ -22,7 +22,7 @@
* Lai Jiangshan <laijs@cn.fujitsu.com>
*
* For detailed explanation of Read-Copy Update mechanism see -
- * Documentation/RCU/ *.txt
+ * Documentation/RCU/ *.txt
*
*/
@@ -32,35 +32,9 @@
#include <linux/mutex.h>
#include <linux/rcupdate.h>
#include <linux/workqueue.h>
+#include <linux/rcu_segcblist.h>
-struct srcu_array {
- unsigned long lock_count[2];
- unsigned long unlock_count[2];
-};
-
-struct rcu_batch {
- struct rcu_head *head, **tail;
-};
-
-#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
-
-struct srcu_struct {
- unsigned long completed;
- struct srcu_array __percpu *per_cpu_ref;
- spinlock_t queue_lock; /* protect ->batch_queue, ->running */
- bool running;
- /* callbacks just queued */
- struct rcu_batch batch_queue;
- /* callbacks try to do the first check_zero */
- struct rcu_batch batch_check0;
- /* callbacks done with the first check_zero and the flip */
- struct rcu_batch batch_check1;
- struct rcu_batch batch_done;
- struct delayed_work work;
-#ifdef CONFIG_DEBUG_LOCK_ALLOC
- struct lockdep_map dep_map;
-#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-};
+struct srcu_struct;
#ifdef CONFIG_DEBUG_LOCK_ALLOC
@@ -82,46 +56,15 @@ int init_srcu_struct(struct srcu_struct *sp);
#define __SRCU_DEP_MAP_INIT(srcu_name)
#endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
-void process_srcu(struct work_struct *work);
-
-#define __SRCU_STRUCT_INIT(name) \
- { \
- .completed = -300, \
- .per_cpu_ref = &name##_srcu_array, \
- .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
- .running = false, \
- .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
- .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
- .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
- .batch_done = RCU_BATCH_INIT(name.batch_done), \
- .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
- __SRCU_DEP_MAP_INIT(name) \
- }
-
-/*
- * Define and initialize a srcu struct at build time.
- * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
- *
- * Note that although DEFINE_STATIC_SRCU() hides the name from other
- * files, the per-CPU variable rules nevertheless require that the
- * chosen name be globally unique. These rules also prohibit use of
- * DEFINE_STATIC_SRCU() within a function. If these rules are too
- * restrictive, declare the srcu_struct manually. For example, in
- * each file:
- *
- * static struct srcu_struct my_srcu;
- *
- * Then, before the first use of each my_srcu, manually initialize it:
- *
- * init_srcu_struct(&my_srcu);
- *
- * See include/linux/percpu-defs.h for the rules on per-CPU variables.
- */
-#define __DEFINE_SRCU(name, is_static) \
- static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
- is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
-#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
-#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+#ifdef CONFIG_TINY_SRCU
+#include <linux/srcutiny.h>
+#elif defined(CONFIG_TREE_SRCU)
+#include <linux/srcutree.h>
+#elif defined(CONFIG_CLASSIC_SRCU)
+#include <linux/srcuclassic.h>
+#else
+#error "Unknown SRCU implementation specified to kernel configuration"
+#endif
/**
* call_srcu() - Queue a callback for invocation after an SRCU grace period
@@ -147,9 +90,6 @@ void cleanup_srcu_struct(struct srcu_struct *sp);
int __srcu_read_lock(struct srcu_struct *sp) __acquires(sp);
void __srcu_read_unlock(struct srcu_struct *sp, int idx) __releases(sp);
void synchronize_srcu(struct srcu_struct *sp);
-void synchronize_srcu_expedited(struct srcu_struct *sp);
-unsigned long srcu_batches_completed(struct srcu_struct *sp);
-void srcu_barrier(struct srcu_struct *sp);
#ifdef CONFIG_DEBUG_LOCK_ALLOC
diff --git a/include/linux/srcuclassic.h b/include/linux/srcuclassic.h
new file mode 100644
index 000000000000..5753f7322262
--- /dev/null
+++ b/include/linux/srcuclassic.h
@@ -0,0 +1,115 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * classic v4.11 variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_CLASSIC_H
+#define _LINUX_SRCU_CLASSIC_H
+
+struct srcu_array {
+ unsigned long lock_count[2];
+ unsigned long unlock_count[2];
+};
+
+struct rcu_batch {
+ struct rcu_head *head, **tail;
+};
+
+#define RCU_BATCH_INIT(name) { NULL, &(name.head) }
+
+struct srcu_struct {
+ unsigned long completed;
+ struct srcu_array __percpu *per_cpu_ref;
+ spinlock_t queue_lock; /* protect ->batch_queue, ->running */
+ bool running;
+ /* callbacks just queued */
+ struct rcu_batch batch_queue;
+ /* callbacks try to do the first check_zero */
+ struct rcu_batch batch_check0;
+ /* callbacks done with the first check_zero and the flip */
+ struct rcu_batch batch_check1;
+ struct rcu_batch batch_done;
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .completed = -300, \
+ .per_cpu_ref = &name##_srcu_array, \
+ .queue_lock = __SPIN_LOCK_UNLOCKED(name.queue_lock), \
+ .running = false, \
+ .batch_queue = RCU_BATCH_INIT(name.batch_queue), \
+ .batch_check0 = RCU_BATCH_INIT(name.batch_check0), \
+ .batch_check1 = RCU_BATCH_INIT(name.batch_check1), \
+ .batch_done = RCU_BATCH_INIT(name.batch_done), \
+ .work = __DELAYED_WORK_INITIALIZER(name.work, process_srcu, 0),\
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_array, name##_srcu_array);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->completed;
+ *gpnum = *completed;
+ if (sp->batch_queue.head || sp->batch_check0.head || sp->batch_check0.head)
+ (*gpnum)++;
+}
+
+#endif
diff --git a/include/linux/srcutiny.h b/include/linux/srcutiny.h
new file mode 100644
index 000000000000..42311ee0334f
--- /dev/null
+++ b/include/linux/srcutiny.h
@@ -0,0 +1,93 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tiny variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TINY_H
+#define _LINUX_SRCU_TINY_H
+
+#include <linux/swait.h>
+
+struct srcu_struct {
+ int srcu_lock_nesting[2]; /* srcu_read_lock() nesting depth. */
+ struct swait_queue_head srcu_wq;
+ /* Last srcu_read_unlock() wakes GP. */
+ unsigned long srcu_gp_seq; /* GP seq # for callback tagging. */
+ struct rcu_segcblist srcu_cblist;
+ /* Pending SRCU callbacks. */
+ int srcu_idx; /* Current reader array element. */
+ bool srcu_gp_running; /* GP workqueue running? */
+ bool srcu_gp_waiting; /* GP waiting for readers? */
+ struct work_struct srcu_work; /* For driving grace periods. */
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+void srcu_drive_gp(struct work_struct *wp);
+
+#define __SRCU_STRUCT_INIT(name) \
+{ \
+ .srcu_wq = __SWAIT_QUEUE_HEAD_INITIALIZER(name.srcu_wq), \
+ .srcu_cblist = RCU_SEGCBLIST_INITIALIZER(name.srcu_cblist), \
+ .srcu_work = __WORK_INITIALIZER(name.srcu_work, srcu_drive_gp), \
+ __SRCU_DEP_MAP_INIT(name) \
+}
+
+/*
+ * This odd _STATIC_ arrangement is needed for API compatibility with
+ * Tree SRCU, which needs some per-CPU data.
+ */
+#define DEFINE_SRCU(name) \
+ struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_STATIC_SRCU(name) \
+ static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+
+void synchronize_srcu(struct srcu_struct *sp);
+
+static inline void synchronize_srcu_expedited(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline void srcu_barrier(struct srcu_struct *sp)
+{
+ synchronize_srcu(sp);
+}
+
+static inline unsigned long srcu_batches_completed(struct srcu_struct *sp)
+{
+ return 0;
+}
+
+static inline void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum,
+ unsigned long *completed)
+{
+ if (test_type != SRCU_FLAVOR)
+ return;
+ *flags = 0;
+ *completed = sp->srcu_gp_seq;
+ *gpnum = *completed;
+}
+
+#endif
diff --git a/include/linux/srcutree.h b/include/linux/srcutree.h
new file mode 100644
index 000000000000..32e86d85fd11
--- /dev/null
+++ b/include/linux/srcutree.h
@@ -0,0 +1,150 @@
+/*
+ * Sleepable Read-Copy Update mechanism for mutual exclusion,
+ * tree variant.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; either version 2 of the License, or
+ * (at your option) any later version.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program; if not, you can access it online at
+ * http://www.gnu.org/licenses/gpl-2.0.html.
+ *
+ * Copyright (C) IBM Corporation, 2017
+ *
+ * Author: Paul McKenney <paulmck@us.ibm.com>
+ */
+
+#ifndef _LINUX_SRCU_TREE_H
+#define _LINUX_SRCU_TREE_H
+
+#include <linux/rcu_node_tree.h>
+#include <linux/completion.h>
+
+struct srcu_node;
+struct srcu_struct;
+
+/*
+ * Per-CPU structure feeding into leaf srcu_node, similar in function
+ * to rcu_node.
+ */
+struct srcu_data {
+ /* Read-side state. */
+ unsigned long srcu_lock_count[2]; /* Locks per CPU. */
+ unsigned long srcu_unlock_count[2]; /* Unlocks per CPU. */
+
+ /* Update-side state. */
+ spinlock_t lock ____cacheline_internodealigned_in_smp;
+ struct rcu_segcblist srcu_cblist; /* List of callbacks.*/
+ unsigned long srcu_gp_seq_needed; /* Furthest future GP needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ bool srcu_cblist_invoking; /* Invoking these CBs? */
+ struct delayed_work work; /* Context for CB invoking. */
+ struct rcu_head srcu_barrier_head; /* For srcu_barrier() use. */
+ struct srcu_node *mynode; /* Leaf srcu_node. */
+ unsigned long grpmask; /* Mask for leaf srcu_node */
+ /* ->srcu_data_have_cbs[]. */
+ int cpu;
+ struct srcu_struct *sp;
+};
+
+/*
+ * Node in SRCU combining tree, similar in function to rcu_data.
+ */
+struct srcu_node {
+ spinlock_t lock;
+ unsigned long srcu_have_cbs[4]; /* GP seq for children */
+ /* having CBs, but only */
+ /* is > ->srcu_gq_seq. */
+ unsigned long srcu_data_have_cbs[4]; /* Which srcu_data structs */
+ /* have CBs for given GP? */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ struct srcu_node *srcu_parent; /* Next up in tree. */
+ int grplo; /* Least CPU for node. */
+ int grphi; /* Biggest CPU for node. */
+};
+
+/*
+ * Per-SRCU-domain structure, similar in function to rcu_state.
+ */
+struct srcu_struct {
+ struct srcu_node node[NUM_RCU_NODES]; /* Combining tree. */
+ struct srcu_node *level[RCU_NUM_LVLS + 1];
+ /* First node at each level. */
+ struct mutex srcu_cb_mutex; /* Serialize CB preparation. */
+ spinlock_t gp_lock; /* protect ->srcu_cblist */
+ struct mutex srcu_gp_mutex; /* Serialize GP work. */
+ unsigned int srcu_idx; /* Current rdr array element. */
+ unsigned long srcu_gp_seq; /* Grace-period seq #. */
+ unsigned long srcu_gp_seq_needed; /* Latest gp_seq needed. */
+ unsigned long srcu_gp_seq_needed_exp; /* Furthest future exp GP. */
+ unsigned long srcu_last_gp_end; /* Last GP end timestamp (ns) */
+ struct srcu_data __percpu *sda; /* Per-CPU srcu_data array. */
+ unsigned long srcu_barrier_seq; /* srcu_barrier seq #. */
+ struct mutex srcu_barrier_mutex; /* Serialize barrier ops. */
+ struct completion srcu_barrier_completion;
+ /* Awaken barrier rq at end. */
+ atomic_t srcu_barrier_cpu_cnt; /* # CPUs not yet posting a */
+ /* callback for the barrier */
+ /* operation. */
+ struct delayed_work work;
+#ifdef CONFIG_DEBUG_LOCK_ALLOC
+ struct lockdep_map dep_map;
+#endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
+};
+
+/* Values for state variable (bottom bits of ->srcu_gp_seq). */
+#define SRCU_STATE_IDLE 0
+#define SRCU_STATE_SCAN1 1
+#define SRCU_STATE_SCAN2 2
+
+void process_srcu(struct work_struct *work);
+
+#define __SRCU_STRUCT_INIT(name) \
+ { \
+ .sda = &name##_srcu_data, \
+ .gp_lock = __SPIN_LOCK_UNLOCKED(name.gp_lock), \
+ .srcu_gp_seq_needed = 0 - 1, \
+ __SRCU_DEP_MAP_INIT(name) \
+ }
+
+/*
+ * Define and initialize a srcu struct at build time.
+ * Do -not- call init_srcu_struct() nor cleanup_srcu_struct() on it.
+ *
+ * Note that although DEFINE_STATIC_SRCU() hides the name from other
+ * files, the per-CPU variable rules nevertheless require that the
+ * chosen name be globally unique. These rules also prohibit use of
+ * DEFINE_STATIC_SRCU() within a function. If these rules are too
+ * restrictive, declare the srcu_struct manually. For example, in
+ * each file:
+ *
+ * static struct srcu_struct my_srcu;
+ *
+ * Then, before the first use of each my_srcu, manually initialize it:
+ *
+ * init_srcu_struct(&my_srcu);
+ *
+ * See include/linux/percpu-defs.h for the rules on per-CPU variables.
+ */
+#define __DEFINE_SRCU(name, is_static) \
+ static DEFINE_PER_CPU(struct srcu_data, name##_srcu_data);\
+ is_static struct srcu_struct name = __SRCU_STRUCT_INIT(name)
+#define DEFINE_SRCU(name) __DEFINE_SRCU(name, /* not static */)
+#define DEFINE_STATIC_SRCU(name) __DEFINE_SRCU(name, static)
+
+void synchronize_srcu_expedited(struct srcu_struct *sp);
+void srcu_barrier(struct srcu_struct *sp);
+unsigned long srcu_batches_completed(struct srcu_struct *sp);
+
+void srcutorture_get_gp_data(enum rcutorture_type test_type,
+ struct srcu_struct *sp, int *flags,
+ unsigned long *gpnum, unsigned long *completed);
+
+#endif
diff --git a/include/linux/stacktrace.h b/include/linux/stacktrace.h
index 0a34489a46b6..4205f71a5f0e 100644
--- a/include/linux/stacktrace.h
+++ b/include/linux/stacktrace.h
@@ -18,6 +18,8 @@ extern void save_stack_trace_regs(struct pt_regs *regs,
struct stack_trace *trace);
extern void save_stack_trace_tsk(struct task_struct *tsk,
struct stack_trace *trace);
+extern int save_stack_trace_tsk_reliable(struct task_struct *tsk,
+ struct stack_trace *trace);
extern void print_stack_trace(struct stack_trace *trace, int spaces);
extern int snprint_stack_trace(char *buf, size_t size,
@@ -29,12 +31,13 @@ extern void save_stack_trace_user(struct stack_trace *trace);
# define save_stack_trace_user(trace) do { } while (0)
#endif
-#else
+#else /* !CONFIG_STACKTRACE */
# define save_stack_trace(trace) do { } while (0)
# define save_stack_trace_tsk(tsk, trace) do { } while (0)
# define save_stack_trace_user(trace) do { } while (0)
# define print_stack_trace(trace, spaces) do { } while (0)
# define snprint_stack_trace(buf, size, trace, spaces) do { } while (0)
-#endif
+# define save_stack_trace_tsk_reliable(tsk, trace) ({ -ENOSYS; })
+#endif /* CONFIG_STACKTRACE */
-#endif
+#endif /* __LINUX_STACKTRACE_H */
diff --git a/include/linux/stmmac.h b/include/linux/stmmac.h
index fc273e9d5f67..3921cb9dfadb 100644
--- a/include/linux/stmmac.h
+++ b/include/linux/stmmac.h
@@ -28,6 +28,9 @@
#include <linux/platform_device.h>
+#define MTL_MAX_RX_QUEUES 8
+#define MTL_MAX_TX_QUEUES 8
+
#define STMMAC_RX_COE_NONE 0
#define STMMAC_RX_COE_TYPE1 1
#define STMMAC_RX_COE_TYPE2 2
@@ -44,6 +47,18 @@
#define STMMAC_CSR_150_250M 0x4 /* MDC = clk_scr_i/102 */
#define STMMAC_CSR_250_300M 0x5 /* MDC = clk_scr_i/122 */
+/* MTL algorithms identifiers */
+#define MTL_TX_ALGORITHM_WRR 0x0
+#define MTL_TX_ALGORITHM_WFQ 0x1
+#define MTL_TX_ALGORITHM_DWRR 0x2
+#define MTL_TX_ALGORITHM_SP 0x3
+#define MTL_RX_ALGORITHM_SP 0x4
+#define MTL_RX_ALGORITHM_WSP 0x5
+
+/* RX/TX Queue Mode */
+#define MTL_QUEUE_AVB 0x0
+#define MTL_QUEUE_DCB 0x1
+
/* The MDC clock could be set higher than the IEEE 802.3
* specified frequency limit 0f 2.5 MHz, by programming a clock divider
* of value different than the above defined values. The resultant MDIO
@@ -109,6 +124,26 @@ struct stmmac_axi {
bool axi_rb;
};
+struct stmmac_rxq_cfg {
+ u8 mode_to_use;
+ u8 chan;
+ u8 pkt_route;
+ bool use_prio;
+ u32 prio;
+};
+
+struct stmmac_txq_cfg {
+ u8 weight;
+ u8 mode_to_use;
+ /* Credit Base Shaper parameters */
+ u32 send_slope;
+ u32 idle_slope;
+ u32 high_credit;
+ u32 low_credit;
+ bool use_prio;
+ u32 prio;
+};
+
struct plat_stmmacenet_data {
int bus_id;
int phy_addr;
@@ -133,6 +168,12 @@ struct plat_stmmacenet_data {
int unicast_filter_entries;
int tx_fifo_size;
int rx_fifo_size;
+ u8 rx_queues_to_use;
+ u8 tx_queues_to_use;
+ u8 rx_sched_algorithm;
+ u8 tx_sched_algorithm;
+ struct stmmac_rxq_cfg rx_queues_cfg[MTL_MAX_RX_QUEUES];
+ struct stmmac_txq_cfg tx_queues_cfg[MTL_MAX_TX_QUEUES];
void (*fix_mac_speed)(void *priv, unsigned int speed);
int (*init)(struct platform_device *pdev, void *priv);
void (*exit)(struct platform_device *pdev, void *priv);
diff --git a/include/linux/string.h b/include/linux/string.h
index 26b6f6a66f83..537918f8a98e 100644
--- a/include/linux/string.h
+++ b/include/linux/string.h
@@ -114,6 +114,14 @@ extern int memcmp(const void *,const void *,__kernel_size_t);
#ifndef __HAVE_ARCH_MEMCHR
extern void * memchr(const void *,int,__kernel_size_t);
#endif
+#ifndef __HAVE_ARCH_MEMCPY_MCSAFE
+static inline __must_check int memcpy_mcsafe(void *dst, const void *src,
+ size_t cnt)
+{
+ memcpy(dst, src, cnt);
+ return 0;
+}
+#endif
void *memchr_inv(const void *s, int c, size_t n);
char *strreplace(char *s, char old, char new);
@@ -135,6 +143,16 @@ static inline int strtobool(const char *s, bool *res)
}
int match_string(const char * const *array, size_t n, const char *string);
+int __sysfs_match_string(const char * const *array, size_t n, const char *s);
+
+/**
+ * sysfs_match_string - matches given string in an array
+ * @_a: array of strings
+ * @_s: string to match with
+ *
+ * Helper for __sysfs_match_string(). Calculates the size of @a automatically.
+ */
+#define sysfs_match_string(_a, _s) __sysfs_match_string(_a, ARRAY_SIZE(_a), _s)
#ifdef CONFIG_BINARY_PRINTF
int vbin_printf(u32 *bin_buf, size_t size, const char *fmt, va_list args);
diff --git a/include/linux/suspend.h b/include/linux/suspend.h
index d9718378a8be..0b1cf32edfd7 100644
--- a/include/linux/suspend.h
+++ b/include/linux/suspend.h
@@ -189,6 +189,8 @@ struct platform_suspend_ops {
struct platform_freeze_ops {
int (*begin)(void);
int (*prepare)(void);
+ void (*wake)(void);
+ void (*sync)(void);
void (*restore)(void);
void (*end)(void);
};
@@ -428,7 +430,8 @@ extern unsigned int pm_wakeup_irq;
extern bool pm_wakeup_pending(void);
extern void pm_system_wakeup(void);
-extern void pm_wakeup_clear(void);
+extern void pm_system_cancel_wakeup(void);
+extern void pm_wakeup_clear(bool reset);
extern void pm_system_irq_wakeup(unsigned int irq_number);
extern bool pm_get_wakeup_count(unsigned int *count, bool block);
extern bool pm_save_wakeup_count(unsigned int count);
@@ -478,7 +481,7 @@ static inline int unregister_pm_notifier(struct notifier_block *nb)
static inline bool pm_wakeup_pending(void) { return false; }
static inline void pm_system_wakeup(void) {}
-static inline void pm_wakeup_clear(void) {}
+static inline void pm_wakeup_clear(bool reset) {}
static inline void pm_system_irq_wakeup(unsigned int irq_number) {}
static inline void lock_system_sleep(void) {}
diff --git a/include/linux/swap.h b/include/linux/swap.h
index 45e91dd6716d..ba5882419a7d 100644
--- a/include/linux/swap.h
+++ b/include/linux/swap.h
@@ -279,7 +279,7 @@ extern void lru_add_drain_cpu(int cpu);
extern void lru_add_drain_all(void);
extern void rotate_reclaimable_page(struct page *page);
extern void deactivate_file_page(struct page *page);
-extern void deactivate_page(struct page *page);
+extern void mark_page_lazyfree(struct page *page);
extern void swap_setup(void);
extern void add_page_to_unevictable_list(struct page *page);
@@ -411,9 +411,6 @@ struct backing_dev_info;
extern int init_swap_address_space(unsigned int type, unsigned long nr_pages);
extern void exit_swap_address_space(unsigned int type);
-extern int get_swap_slots(int n, swp_entry_t *slots);
-extern void swapcache_free_batch(swp_entry_t *entries, int n);
-
#else /* CONFIG_SWAP */
#define swap_address_space(entry) (NULL)
diff --git a/include/linux/sysctl.h b/include/linux/sysctl.h
index b7e82049fec7..80d07816def0 100644
--- a/include/linux/sysctl.h
+++ b/include/linux/sysctl.h
@@ -180,7 +180,6 @@ extern void setup_sysctl_set(struct ctl_table_set *p,
int (*is_seen)(struct ctl_table_set *));
extern void retire_sysctl_set(struct ctl_table_set *set);
-void register_sysctl_root(struct ctl_table_root *root);
struct ctl_table_header *__register_sysctl_table(
struct ctl_table_set *set,
const char *path, struct ctl_table *table);
diff --git a/include/linux/t10-pi.h b/include/linux/t10-pi.h
index 9fba9dd33544..9375d23a24e7 100644
--- a/include/linux/t10-pi.h
+++ b/include/linux/t10-pi.h
@@ -34,9 +34,9 @@ struct t10_pi_tuple {
};
-extern struct blk_integrity_profile t10_pi_type1_crc;
-extern struct blk_integrity_profile t10_pi_type1_ip;
-extern struct blk_integrity_profile t10_pi_type3_crc;
-extern struct blk_integrity_profile t10_pi_type3_ip;
+extern const struct blk_integrity_profile t10_pi_type1_crc;
+extern const struct blk_integrity_profile t10_pi_type1_ip;
+extern const struct blk_integrity_profile t10_pi_type3_crc;
+extern const struct blk_integrity_profile t10_pi_type3_ip;
#endif
diff --git a/include/linux/tcp.h b/include/linux/tcp.h
index cfc2d9506ce8..b6d5adcee8fc 100644
--- a/include/linux/tcp.h
+++ b/include/linux/tcp.h
@@ -233,12 +233,14 @@ struct tcp_sock {
u8 syn_data:1, /* SYN includes data */
syn_fastopen:1, /* SYN includes Fast Open option */
syn_fastopen_exp:1,/* SYN includes Fast Open exp. option */
+ syn_fastopen_ch:1, /* Active TFO re-enabling probe */
syn_data_acked:1,/* data in SYN is acked by SYN-ACK */
save_syn:1, /* Save headers of SYN packet */
is_cwnd_limited:1;/* forward progress limited by snd_cwnd? */
u32 tlp_high_seq; /* snd_nxt at the time of TLP retransmit. */
/* RTT measurement */
+ struct skb_mstamp tcp_mstamp; /* most recent packet received/sent */
u32 srtt_us; /* smoothed round trip time << 3 in usecs */
u32 mdev_us; /* medium deviation */
u32 mdev_max_us; /* maximal mdev for the last rtt period */
@@ -331,16 +333,16 @@ struct tcp_sock {
/* Receiver side RTT estimation */
struct {
- u32 rtt;
- u32 seq;
- u32 time;
+ u32 rtt_us;
+ u32 seq;
+ struct skb_mstamp time;
} rcv_rtt_est;
/* Receiver queue space */
struct {
- int space;
- u32 seq;
- u32 time;
+ int space;
+ u32 seq;
+ struct skb_mstamp time;
} rcvq_space;
/* TCP-specific MTU probe information. */
diff --git a/include/linux/tee_drv.h b/include/linux/tee_drv.h
new file mode 100644
index 000000000000..0f175b8f6456
--- /dev/null
+++ b/include/linux/tee_drv.h
@@ -0,0 +1,277 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __TEE_DRV_H
+#define __TEE_DRV_H
+
+#include <linux/types.h>
+#include <linux/idr.h>
+#include <linux/list.h>
+#include <linux/tee.h>
+
+/*
+ * The file describes the API provided by the generic TEE driver to the
+ * specific TEE driver.
+ */
+
+#define TEE_SHM_MAPPED 0x1 /* Memory mapped by the kernel */
+#define TEE_SHM_DMA_BUF 0x2 /* Memory with dma-buf handle */
+
+struct tee_device;
+struct tee_shm;
+struct tee_shm_pool;
+
+/**
+ * struct tee_context - driver specific context on file pointer data
+ * @teedev: pointer to this drivers struct tee_device
+ * @list_shm: List of shared memory object owned by this context
+ * @data: driver specific context data, managed by the driver
+ */
+struct tee_context {
+ struct tee_device *teedev;
+ struct list_head list_shm;
+ void *data;
+};
+
+struct tee_param_memref {
+ size_t shm_offs;
+ size_t size;
+ struct tee_shm *shm;
+};
+
+struct tee_param_value {
+ u64 a;
+ u64 b;
+ u64 c;
+};
+
+struct tee_param {
+ u64 attr;
+ union {
+ struct tee_param_memref memref;
+ struct tee_param_value value;
+ } u;
+};
+
+/**
+ * struct tee_driver_ops - driver operations vtable
+ * @get_version: returns version of driver
+ * @open: called when the device file is opened
+ * @release: release this open file
+ * @open_session: open a new session
+ * @close_session: close a session
+ * @invoke_func: invoke a trusted function
+ * @cancel_req: request cancel of an ongoing invoke or open
+ * @supp_revc: called for supplicant to get a command
+ * @supp_send: called for supplicant to send a response
+ */
+struct tee_driver_ops {
+ void (*get_version)(struct tee_device *teedev,
+ struct tee_ioctl_version_data *vers);
+ int (*open)(struct tee_context *ctx);
+ void (*release)(struct tee_context *ctx);
+ int (*open_session)(struct tee_context *ctx,
+ struct tee_ioctl_open_session_arg *arg,
+ struct tee_param *param);
+ int (*close_session)(struct tee_context *ctx, u32 session);
+ int (*invoke_func)(struct tee_context *ctx,
+ struct tee_ioctl_invoke_arg *arg,
+ struct tee_param *param);
+ int (*cancel_req)(struct tee_context *ctx, u32 cancel_id, u32 session);
+ int (*supp_recv)(struct tee_context *ctx, u32 *func, u32 *num_params,
+ struct tee_param *param);
+ int (*supp_send)(struct tee_context *ctx, u32 ret, u32 num_params,
+ struct tee_param *param);
+};
+
+/**
+ * struct tee_desc - Describes the TEE driver to the subsystem
+ * @name: name of driver
+ * @ops: driver operations vtable
+ * @owner: module providing the driver
+ * @flags: Extra properties of driver, defined by TEE_DESC_* below
+ */
+#define TEE_DESC_PRIVILEGED 0x1
+struct tee_desc {
+ const char *name;
+ const struct tee_driver_ops *ops;
+ struct module *owner;
+ u32 flags;
+};
+
+/**
+ * tee_device_alloc() - Allocate a new struct tee_device instance
+ * @teedesc: Descriptor for this driver
+ * @dev: Parent device for this device
+ * @pool: Shared memory pool, NULL if not used
+ * @driver_data: Private driver data for this device
+ *
+ * Allocates a new struct tee_device instance. The device is
+ * removed by tee_device_unregister().
+ *
+ * @returns a pointer to a 'struct tee_device' or an ERR_PTR on failure
+ */
+struct tee_device *tee_device_alloc(const struct tee_desc *teedesc,
+ struct device *dev,
+ struct tee_shm_pool *pool,
+ void *driver_data);
+
+/**
+ * tee_device_register() - Registers a TEE device
+ * @teedev: Device to register
+ *
+ * tee_device_unregister() need to be called to remove the @teedev if
+ * this function fails.
+ *
+ * @returns < 0 on failure
+ */
+int tee_device_register(struct tee_device *teedev);
+
+/**
+ * tee_device_unregister() - Removes a TEE device
+ * @teedev: Device to unregister
+ *
+ * This function should be called to remove the @teedev even if
+ * tee_device_register() hasn't been called yet. Does nothing if
+ * @teedev is NULL.
+ */
+void tee_device_unregister(struct tee_device *teedev);
+
+/**
+ * struct tee_shm_pool_mem_info - holds information needed to create a shared
+ * memory pool
+ * @vaddr: Virtual address of start of pool
+ * @paddr: Physical address of start of pool
+ * @size: Size in bytes of the pool
+ */
+struct tee_shm_pool_mem_info {
+ unsigned long vaddr;
+ phys_addr_t paddr;
+ size_t size;
+};
+
+/**
+ * tee_shm_pool_alloc_res_mem() - Create a shared memory pool from reserved
+ * memory range
+ * @priv_info: Information for driver private shared memory pool
+ * @dmabuf_info: Information for dma-buf shared memory pool
+ *
+ * Start and end of pools will must be page aligned.
+ *
+ * Allocation with the flag TEE_SHM_DMA_BUF set will use the range supplied
+ * in @dmabuf, others will use the range provided by @priv.
+ *
+ * @returns pointer to a 'struct tee_shm_pool' or an ERR_PTR on failure.
+ */
+struct tee_shm_pool *
+tee_shm_pool_alloc_res_mem(struct tee_shm_pool_mem_info *priv_info,
+ struct tee_shm_pool_mem_info *dmabuf_info);
+
+/**
+ * tee_shm_pool_free() - Free a shared memory pool
+ * @pool: The shared memory pool to free
+ *
+ * The must be no remaining shared memory allocated from this pool when
+ * this function is called.
+ */
+void tee_shm_pool_free(struct tee_shm_pool *pool);
+
+/**
+ * tee_get_drvdata() - Return driver_data pointer
+ * @returns the driver_data pointer supplied to tee_register().
+ */
+void *tee_get_drvdata(struct tee_device *teedev);
+
+/**
+ * tee_shm_alloc() - Allocate shared memory
+ * @ctx: Context that allocates the shared memory
+ * @size: Requested size of shared memory
+ * @flags: Flags setting properties for the requested shared memory.
+ *
+ * Memory allocated as global shared memory is automatically freed when the
+ * TEE file pointer is closed. The @flags field uses the bits defined by
+ * TEE_SHM_* above. TEE_SHM_MAPPED must currently always be set. If
+ * TEE_SHM_DMA_BUF global shared memory will be allocated and associated
+ * with a dma-buf handle, else driver private memory.
+ *
+ * @returns a pointer to 'struct tee_shm'
+ */
+struct tee_shm *tee_shm_alloc(struct tee_context *ctx, size_t size, u32 flags);
+
+/**
+ * tee_shm_free() - Free shared memory
+ * @shm: Handle to shared memory to free
+ */
+void tee_shm_free(struct tee_shm *shm);
+
+/**
+ * tee_shm_put() - Decrease reference count on a shared memory handle
+ * @shm: Shared memory handle
+ */
+void tee_shm_put(struct tee_shm *shm);
+
+/**
+ * tee_shm_va2pa() - Get physical address of a virtual address
+ * @shm: Shared memory handle
+ * @va: Virtual address to tranlsate
+ * @pa: Returned physical address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_va2pa(struct tee_shm *shm, void *va, phys_addr_t *pa);
+
+/**
+ * tee_shm_pa2va() - Get virtual address of a physical address
+ * @shm: Shared memory handle
+ * @pa: Physical address to tranlsate
+ * @va: Returned virtual address
+ * @returns 0 on success and < 0 on failure
+ */
+int tee_shm_pa2va(struct tee_shm *shm, phys_addr_t pa, void **va);
+
+/**
+ * tee_shm_get_va() - Get virtual address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @returns virtual address of the shared memory + offs if offs is within
+ * the bounds of this shared memory, else an ERR_PTR
+ */
+void *tee_shm_get_va(struct tee_shm *shm, size_t offs);
+
+/**
+ * tee_shm_get_pa() - Get physical address of a shared memory plus an offset
+ * @shm: Shared memory handle
+ * @offs: Offset from start of this shared memory
+ * @pa: Physical address to return
+ * @returns 0 if offs is within the bounds of this shared memory, else an
+ * error code.
+ */
+int tee_shm_get_pa(struct tee_shm *shm, size_t offs, phys_addr_t *pa);
+
+/**
+ * tee_shm_get_id() - Get id of a shared memory object
+ * @shm: Shared memory handle
+ * @returns id
+ */
+int tee_shm_get_id(struct tee_shm *shm);
+
+/**
+ * tee_shm_get_from_id() - Find shared memory object and increase reference
+ * count
+ * @ctx: Context owning the shared memory
+ * @id: Id of shared memory object
+ * @returns a pointer to 'struct tee_shm' on success or an ERR_PTR on failure
+ */
+struct tee_shm *tee_shm_get_from_id(struct tee_context *ctx, int id);
+
+#endif /*__TEE_DRV_H*/
diff --git a/include/linux/thread_info.h b/include/linux/thread_info.h
index 58373875e8ee..d7d3ea637dd0 100644
--- a/include/linux/thread_info.h
+++ b/include/linux/thread_info.h
@@ -22,6 +22,18 @@
#endif
#include <linux/bitops.h>
+
+/*
+ * For per-arch arch_within_stack_frames() implementations, defined in
+ * asm/thread_info.h.
+ */
+enum {
+ BAD_STACK = -1,
+ NOT_STACK = 0,
+ GOOD_FRAME,
+ GOOD_STACK,
+};
+
#include <asm/thread_info.h>
#ifdef __KERNEL__
@@ -101,6 +113,10 @@ static inline void check_object_size(const void *ptr, unsigned long n,
{ }
#endif /* CONFIG_HARDENED_USERCOPY */
+#ifndef arch_setup_new_exec
+static inline void arch_setup_new_exec(void) { }
+#endif
+
#endif /* __KERNEL__ */
#endif /* _LINUX_THREAD_INFO_H */
diff --git a/include/linux/tick.h b/include/linux/tick.h
index a04fea19676f..fe01e68bf520 100644
--- a/include/linux/tick.h
+++ b/include/linux/tick.h
@@ -117,6 +117,7 @@ extern void tick_nohz_idle_enter(void);
extern void tick_nohz_idle_exit(void);
extern void tick_nohz_irq_exit(void);
extern ktime_t tick_nohz_get_sleep_length(void);
+extern unsigned long tick_nohz_get_idle_calls(void);
extern u64 get_cpu_idle_time_us(int cpu, u64 *last_update_time);
extern u64 get_cpu_iowait_time_us(int cpu, u64 *last_update_time);
#else /* !CONFIG_NO_HZ_COMMON */
diff --git a/include/linux/time.h b/include/linux/time.h
index 23f0f5ce3090..c0543f5f25de 100644
--- a/include/linux/time.h
+++ b/include/linux/time.h
@@ -151,9 +151,6 @@ static inline bool timespec_inject_offset_valid(const struct timespec *ts)
return true;
}
-#define CURRENT_TIME (current_kernel_time())
-#define CURRENT_TIME_SEC ((struct timespec) { get_seconds(), 0 })
-
/* Some architectures do not supply their own clocksource.
* This is mainly the case in architectures that get their
* inter-tick times by reading the counter on their interval
diff --git a/include/linux/timekeeping.h b/include/linux/timekeeping.h
index b598cbc7b576..ddc229ff6d1e 100644
--- a/include/linux/timekeeping.h
+++ b/include/linux/timekeeping.h
@@ -19,21 +19,6 @@ extern void do_gettimeofday(struct timeval *tv);
extern int do_settimeofday64(const struct timespec64 *ts);
extern int do_sys_settimeofday64(const struct timespec64 *tv,
const struct timezone *tz);
-static inline int do_sys_settimeofday(const struct timespec *tv,
- const struct timezone *tz)
-{
- struct timespec64 ts64;
-
- if (!tv)
- return do_sys_settimeofday64(NULL, tz);
-
- if (!timespec_valid(tv))
- return -EINVAL;
-
- ts64 = timespec_to_timespec64(*tv);
- return do_sys_settimeofday64(&ts64, tz);
-}
-
/*
* Kernel time accessors
*/
@@ -273,6 +258,11 @@ static inline void timekeeping_clocktai(struct timespec *ts)
*ts = ktime_to_timespec(ktime_get_clocktai());
}
+static inline void timekeeping_clocktai64(struct timespec64 *ts)
+{
+ *ts = ktime_to_timespec64(ktime_get_clocktai());
+}
+
/*
* RTC specific
*/
diff --git a/include/linux/tpm.h b/include/linux/tpm.h
index da158f06e0b2..5a090f5ab335 100644
--- a/include/linux/tpm.h
+++ b/include/linux/tpm.h
@@ -48,7 +48,8 @@ struct tpm_class_ops {
u8 (*status) (struct tpm_chip *chip);
bool (*update_timeouts)(struct tpm_chip *chip,
unsigned long *timeout_cap);
-
+ int (*request_locality)(struct tpm_chip *chip, int loc);
+ void (*relinquish_locality)(struct tpm_chip *chip, int loc);
};
#if defined(CONFIG_TCG_TPM) || defined(CONFIG_TCG_TPM_MODULE)
diff --git a/include/linux/trace_events.h b/include/linux/trace_events.h
index 0af63c4381b9..a556805eff8a 100644
--- a/include/linux/trace_events.h
+++ b/include/linux/trace_events.h
@@ -138,16 +138,7 @@ enum print_line_t {
TRACE_TYPE_NO_CONSUME = 3 /* Handled but ask to not consume */
};
-/*
- * Several functions return TRACE_TYPE_PARTIAL_LINE if the trace_seq
- * overflowed, and TRACE_TYPE_HANDLED otherwise. This helper function
- * simplifies those functions and keeps them in sync.
- */
-static inline enum print_line_t trace_handle_return(struct trace_seq *s)
-{
- return trace_seq_has_overflowed(s) ?
- TRACE_TYPE_PARTIAL_LINE : TRACE_TYPE_HANDLED;
-}
+enum print_line_t trace_handle_return(struct trace_seq *s);
void tracing_generic_entry_update(struct trace_entry *entry,
unsigned long flags,
diff --git a/include/linux/tracepoint.h b/include/linux/tracepoint.h
index f72fcfe0e66a..cc48cb2ce209 100644
--- a/include/linux/tracepoint.h
+++ b/include/linux/tracepoint.h
@@ -128,7 +128,7 @@ extern void syscall_unregfunc(void);
* as "(void *, void)". The DECLARE_TRACE_NOARGS() will pass in just
* "void *data", where as the DECLARE_TRACE() will pass in "void *data, proto".
*/
-#define __DO_TRACE(tp, proto, args, cond, prercu, postrcu) \
+#define __DO_TRACE(tp, proto, args, cond, rcucheck) \
do { \
struct tracepoint_func *it_func_ptr; \
void *it_func; \
@@ -136,7 +136,11 @@ extern void syscall_unregfunc(void);
\
if (!(cond)) \
return; \
- prercu; \
+ if (rcucheck) { \
+ if (WARN_ON_ONCE(rcu_irq_enter_disabled())) \
+ return; \
+ rcu_irq_enter_irqson(); \
+ } \
rcu_read_lock_sched_notrace(); \
it_func_ptr = rcu_dereference_sched((tp)->funcs); \
if (it_func_ptr) { \
@@ -147,20 +151,19 @@ extern void syscall_unregfunc(void);
} while ((++it_func_ptr)->func); \
} \
rcu_read_unlock_sched_notrace(); \
- postrcu; \
+ if (rcucheck) \
+ rcu_irq_exit_irqson(); \
} while (0)
#ifndef MODULE
-#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
+#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args) \
static inline void trace_##name##_rcuidle(proto) \
{ \
if (static_key_false(&__tracepoint_##name.key)) \
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
- TP_CONDITION(cond), \
- rcu_irq_enter_irqson(), \
- rcu_irq_exit_irqson()); \
+ TP_CONDITION(cond), 1); \
}
#else
#define __DECLARE_TRACE_RCU(name, proto, args, cond, data_proto, data_args)
@@ -186,7 +189,7 @@ extern void syscall_unregfunc(void);
__DO_TRACE(&__tracepoint_##name, \
TP_PROTO(data_proto), \
TP_ARGS(data_args), \
- TP_CONDITION(cond),,); \
+ TP_CONDITION(cond), 0); \
if (IS_ENABLED(CONFIG_LOCKDEP) && (cond)) { \
rcu_read_lock_sched_notrace(); \
rcu_dereference_sched(__tracepoint_##name.funcs);\
diff --git a/include/linux/tty.h b/include/linux/tty.h
index 1017e904c0a3..d07cd2105a6c 100644
--- a/include/linux/tty.h
+++ b/include/linux/tty.h
@@ -390,7 +390,6 @@ static inline bool tty_throttled(struct tty_struct *tty)
}
#ifdef CONFIG_TTY
-extern void console_init(void);
extern void tty_kref_put(struct tty_struct *tty);
extern struct pid *tty_get_pgrp(struct tty_struct *tty);
extern void tty_vhangup_self(void);
@@ -402,8 +401,6 @@ extern struct tty_struct *get_current_tty(void);
extern int __init tty_init(void);
extern const char *tty_name(const struct tty_struct *tty);
#else
-static inline void console_init(void)
-{ }
static inline void tty_kref_put(struct tty_struct *tty)
{ }
static inline struct pid *tty_get_pgrp(struct tty_struct *tty)
@@ -478,9 +475,13 @@ extern int tty_do_resize(struct tty_struct *tty, struct winsize *ws);
extern int is_current_pgrp_orphaned(void);
extern void tty_hangup(struct tty_struct *tty);
extern void tty_vhangup(struct tty_struct *tty);
+extern void tty_vhangup_session(struct tty_struct *tty);
extern int tty_hung_up_p(struct file *filp);
extern void do_SAK(struct tty_struct *tty);
extern void __do_SAK(struct tty_struct *tty);
+extern void tty_open_proc_set_tty(struct file *filp, struct tty_struct *tty);
+extern int tty_signal_session_leader(struct tty_struct *tty, int exit_session);
+extern void session_clear_tty(struct pid *session);
extern void no_tty(void);
extern void tty_buffer_free_all(struct tty_port *port);
extern void tty_buffer_flush(struct tty_struct *tty, struct tty_ldisc *ld);
@@ -528,6 +529,8 @@ extern void tty_ldisc_flush(struct tty_struct *tty);
extern long tty_ioctl(struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_mode_ioctl(struct tty_struct *tty, struct file *file,
unsigned int cmd, unsigned long arg);
+extern long tty_jobctrl_ioctl(struct tty_struct *tty, struct tty_struct *real_tty,
+ struct file *file, unsigned int cmd, unsigned long arg);
extern int tty_perform_flush(struct tty_struct *tty, unsigned long arg);
extern void tty_default_fops(struct file_operations *fops);
extern struct tty_struct *alloc_tty_struct(struct tty_driver *driver, int idx);
@@ -669,7 +672,11 @@ extern int tty_ldisc_receive_buf(struct tty_ldisc *ld, const unsigned char *p,
/* n_tty.c */
extern void n_tty_inherit_ops(struct tty_ldisc_ops *ops);
+#ifdef CONFIG_TTY
extern void __init n_tty_init(void);
+#else
+static inline void n_tty_init(void) { }
+#endif
/* tty_audit.c */
#ifdef CONFIG_AUDIT
diff --git a/include/linux/types.h b/include/linux/types.h
index 1e7bd24848fc..258099a4ed82 100644
--- a/include/linux/types.h
+++ b/include/linux/types.h
@@ -209,7 +209,7 @@ struct ustat {
* naturally due ABI requirements, but some architectures (like CRIS) have
* weird ABI and we need to ask it explicitly.
*
- * The alignment is required to guarantee that bits 0 and 1 of @next will be
+ * The alignment is required to guarantee that bit 0 of @next will be
* clear under normal conditions -- as long as we use call_rcu(),
* call_rcu_bh(), call_rcu_sched(), or call_srcu() to queue callback.
*
diff --git a/include/linux/uaccess.h b/include/linux/uaccess.h
index f30c187ed785..201418d5e15c 100644
--- a/include/linux/uaccess.h
+++ b/include/linux/uaccess.h
@@ -2,8 +2,199 @@
#define __LINUX_UACCESS_H__
#include <linux/sched.h>
+#include <linux/thread_info.h>
+#include <linux/kasan-checks.h>
+
+#define VERIFY_READ 0
+#define VERIFY_WRITE 1
+
+#define uaccess_kernel() segment_eq(get_fs(), KERNEL_DS)
+
#include <asm/uaccess.h>
+/*
+ * Architectures should provide two primitives (raw_copy_{to,from}_user())
+ * and get rid of their private instances of copy_{to,from}_user() and
+ * __copy_{to,from}_user{,_inatomic}().
+ *
+ * raw_copy_{to,from}_user(to, from, size) should copy up to size bytes and
+ * return the amount left to copy. They should assume that access_ok() has
+ * already been checked (and succeeded); they should *not* zero-pad anything.
+ * No KASAN or object size checks either - those belong here.
+ *
+ * Both of these functions should attempt to copy size bytes starting at from
+ * into the area starting at to. They must not fetch or store anything
+ * outside of those areas. Return value must be between 0 (everything
+ * copied successfully) and size (nothing copied).
+ *
+ * If raw_copy_{to,from}_user(to, from, size) returns N, size - N bytes starting
+ * at to must become equal to the bytes fetched from the corresponding area
+ * starting at from. All data past to + size - N must be left unmodified.
+ *
+ * If copying succeeds, the return value must be 0. If some data cannot be
+ * fetched, it is permitted to copy less than had been fetched; the only
+ * hard requirement is that not storing anything at all (i.e. returning size)
+ * should happen only when nothing could be copied. In other words, you don't
+ * have to squeeze as much as possible - it is allowed, but not necessary.
+ *
+ * For raw_copy_from_user() to always points to kernel memory and no faults
+ * on store should happen. Interpretation of from is affected by set_fs().
+ * For raw_copy_to_user() it's the other way round.
+ *
+ * Both can be inlined - it's up to architectures whether it wants to bother
+ * with that. They should not be used directly; they are used to implement
+ * the 6 functions (copy_{to,from}_user(), __copy_{to,from}_user_inatomic())
+ * that are used instead. Out of those, __... ones are inlined. Plain
+ * copy_{to,from}_user() might or might not be inlined. If you want them
+ * inlined, have asm/uaccess.h define INLINE_COPY_{TO,FROM}_USER.
+ *
+ * NOTE: only copy_from_user() zero-pads the destination in case of short copy.
+ * Neither __copy_from_user() nor __copy_from_user_inatomic() zero anything
+ * at all; their callers absolutely must check the return value.
+ *
+ * Biarch ones should also provide raw_copy_in_user() - similar to the above,
+ * but both source and destination are __user pointers (affected by set_fs()
+ * as usual) and both source and destination can trigger faults.
+ */
+
+static __always_inline unsigned long
+__copy_from_user_inatomic(void *to, const void __user *from, unsigned long n)
+{
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_write(to, n);
+ check_object_size(to, n, false);
+ return raw_copy_from_user(to, from, n);
+}
+
+/**
+ * __copy_to_user_inatomic: - Copy a block of data into user space, with less checking.
+ * @to: Destination address, in user space.
+ * @from: Source address, in kernel space.
+ * @n: Number of bytes to copy.
+ *
+ * Context: User context only.
+ *
+ * Copy data from kernel space to user space. Caller must check
+ * the specified block with access_ok() before calling this function.
+ * The caller should also make sure he pins the user space address
+ * so that we don't result in page fault and sleep.
+ */
+static __always_inline unsigned long
+__copy_to_user_inatomic(void __user *to, const void *from, unsigned long n)
+{
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+static __always_inline unsigned long
+__copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ kasan_check_read(from, n);
+ check_object_size(from, n, true);
+ return raw_copy_to_user(to, from, n);
+}
+
+#ifdef INLINE_COPY_FROM_USER
+static inline unsigned long
+_copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ unsigned long res = n;
+ if (likely(access_ok(VERIFY_READ, from, n)))
+ res = raw_copy_from_user(to, from, n);
+ if (unlikely(res))
+ memset(to + (n - res), 0, res);
+ return res;
+}
+#else
+extern unsigned long
+_copy_from_user(void *, const void __user *, unsigned long);
+#endif
+
+#ifdef INLINE_COPY_TO_USER
+static inline unsigned long
+_copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ if (access_ok(VERIFY_WRITE, to, n))
+ n = raw_copy_to_user(to, from, n);
+ return n;
+}
+#else
+extern unsigned long
+_copy_to_user(void __user *, const void *, unsigned long);
+#endif
+
+extern void __compiletime_error("usercopy buffer size is too small")
+__bad_copy_user(void);
+
+static inline void copy_user_overflow(int size, unsigned long count)
+{
+ WARN(1, "Buffer overflow detected (%d < %lu)!\n", size, count);
+}
+
+static __always_inline unsigned long __must_check
+copy_from_user(void *to, const void __user *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(to);
+
+ might_fault();
+ kasan_check_write(to, n);
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(to, n, false);
+ n = _copy_from_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+
+static __always_inline unsigned long __must_check
+copy_to_user(void __user *to, const void *from, unsigned long n)
+{
+ int sz = __compiletime_object_size(from);
+
+ kasan_check_read(from, n);
+ might_fault();
+
+ if (likely(sz < 0 || sz >= n)) {
+ check_object_size(from, n, true);
+ n = _copy_to_user(to, from, n);
+ } else if (!__builtin_constant_p(n))
+ copy_user_overflow(sz, n);
+ else
+ __bad_copy_user();
+
+ return n;
+}
+#ifdef CONFIG_COMPAT
+static __always_inline unsigned long __must_check
+__copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ return raw_copy_in_user(to, from, n);
+}
+static __always_inline unsigned long __must_check
+copy_in_user(void __user *to, const void *from, unsigned long n)
+{
+ might_fault();
+ if (access_ok(VERIFY_WRITE, to, n) && access_ok(VERIFY_READ, from, n))
+ n = raw_copy_in_user(to, from, n);
+ return n;
+}
+#endif
+
static __always_inline void pagefault_disabled_inc(void)
{
current->pagefault_disabled++;
@@ -12,7 +203,6 @@ static __always_inline void pagefault_disabled_inc(void)
static __always_inline void pagefault_disabled_dec(void)
{
current->pagefault_disabled--;
- WARN_ON(current->pagefault_disabled < 0);
}
/*
@@ -67,12 +257,6 @@ static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
return __copy_from_user_inatomic(to, from, n);
}
-static inline unsigned long __copy_from_user_nocache(void *to,
- const void __user *from, unsigned long n)
-{
- return __copy_from_user(to, from, n);
-}
-
#endif /* ARCH_HAS_NOCACHE_UACCESS */
/*
diff --git a/include/linux/udp.h b/include/linux/udp.h
index c0f530809d1f..6cb4061a720d 100644
--- a/include/linux/udp.h
+++ b/include/linux/udp.h
@@ -115,6 +115,6 @@ static inline bool udp_get_no_check6_rx(struct sock *sk)
#define udp_portaddr_for_each_entry_rcu(__sk, list) \
hlist_for_each_entry_rcu(__sk, list, __sk_common.skc_portaddr_node)
-#define IS_UDPLITE(__sk) (udp_sk(__sk)->pcflag)
+#define IS_UDPLITE(__sk) (__sk->sk_protocol == IPPROTO_UDPLITE)
#endif /* _LINUX_UDP_H */
diff --git a/include/linux/uio_driver.h b/include/linux/uio_driver.h
index 32c0e83d6239..3c85c81b0027 100644
--- a/include/linux/uio_driver.h
+++ b/include/linux/uio_driver.h
@@ -23,11 +23,13 @@ struct uio_map;
/**
* struct uio_mem - description of a UIO memory region
* @name: name of the memory region for identification
- * @addr: address of the device's memory (phys_addr is used since
- * addr can be logical, virtual, or physical & phys_addr_t
- * should always be large enough to handle any of the
- * address types)
- * @size: size of IO
+ * @addr: address of the device's memory rounded to page
+ * size (phys_addr is used since addr can be
+ * logical, virtual, or physical & phys_addr_t
+ * should always be large enough to handle any of
+ * the address types)
+ * @offs: offset of device memory within the page
+ * @size: size of IO (multiple of page size)
* @memtype: type of memory addr points to
* @internal_addr: ioremap-ped version of addr, for driver internal use
* @map: for use by the UIO core only.
@@ -35,6 +37,7 @@ struct uio_map;
struct uio_mem {
const char *name;
phys_addr_t addr;
+ unsigned long offs;
resource_size_t size;
int memtype;
void __iomem *internal_addr;
diff --git a/include/linux/usb.h b/include/linux/usb.h
index 7e68259360de..cb9fbd54386e 100644
--- a/include/linux/usb.h
+++ b/include/linux/usb.h
@@ -99,6 +99,76 @@ enum usb_interface_condition {
USB_INTERFACE_UNBINDING,
};
+int __must_check
+usb_find_common_endpoints(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+int __must_check
+usb_find_common_endpoints_reverse(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in,
+ struct usb_endpoint_descriptor **bulk_out,
+ struct usb_endpoint_descriptor **int_in,
+ struct usb_endpoint_descriptor **int_out);
+
+static inline int __must_check
+usb_find_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints(alt, NULL, NULL, NULL, int_out);
+}
+
+static inline int __must_check
+usb_find_last_bulk_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_in)
+{
+ return usb_find_common_endpoints_reverse(alt, bulk_in, NULL, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_bulk_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **bulk_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, bulk_out, NULL, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_in_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_in)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, int_in, NULL);
+}
+
+static inline int __must_check
+usb_find_last_int_out_endpoint(struct usb_host_interface *alt,
+ struct usb_endpoint_descriptor **int_out)
+{
+ return usb_find_common_endpoints_reverse(alt, NULL, NULL, NULL, int_out);
+}
+
/**
* struct usb_interface - what usb device drivers talk to
* @altsetting: array of interface structures, one for each alternate
@@ -248,7 +318,7 @@ void usb_put_intf(struct usb_interface *intf);
* struct usb_interface (which persists only as long as its configuration
* is installed). The altsetting arrays can be accessed through these
* structures at any time, permitting comparison of configurations and
- * providing support for the /proc/bus/usb/devices pseudo-file.
+ * providing support for the /sys/kernel/debug/usb/devices pseudo-file.
*/
struct usb_interface_cache {
unsigned num_altsetting; /* number of alternate settings */
@@ -354,6 +424,7 @@ struct usb_devmap {
*/
struct usb_bus {
struct device *controller; /* host/master side hardware */
+ struct device *sysdev; /* as seen from firmware or bus */
int busnum; /* Bus number (in order of reg) */
const char *bus_name; /* stable id (PCI slot_name etc) */
u8 uses_dma; /* Does the host controller use DMA? */
diff --git a/include/linux/usb/composite.h b/include/linux/usb/composite.h
index 4616a49a1c2e..f665d2ceac20 100644
--- a/include/linux/usb/composite.h
+++ b/include/linux/usb/composite.h
@@ -451,6 +451,7 @@ static inline struct usb_composite_driver *to_cdriver(
* sure doing that won't hurt too much.
*
* One notion for how to handle Wireless USB devices involves:
+ *
* (a) a second gadget here, discovery mechanism TBD, but likely
* needing separate "register/unregister WUSB gadget" calls;
* (b) updates to usb_gadget to include flags "is it wireless",
@@ -503,8 +504,9 @@ struct usb_composite_dev {
/* protects deactivations and delayed_status counts*/
spinlock_t lock;
- unsigned setup_pending:1;
- unsigned os_desc_pending:1;
+ /* public: */
+ unsigned int setup_pending:1;
+ unsigned int os_desc_pending:1;
};
extern int usb_string_id(struct usb_composite_dev *c);
diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h
index e4516e9ded0f..fbc22a39e7bc 100644
--- a/include/linux/usb/gadget.h
+++ b/include/linux/usb/gadget.h
@@ -188,7 +188,7 @@ struct usb_ep_caps {
* @caps:The structure describing types and directions supported by endoint.
* @maxpacket:The maximum packet size used on this endpoint. The initial
* value can sometimes be reduced (hardware allowing), according to
- * the endpoint descriptor used to configure the endpoint.
+ * the endpoint descriptor used to configure the endpoint.
* @maxpacket_limit:The maximum packet size value which can be handled by this
* endpoint. It's set once by UDC driver when endpoint is initialized, and
* should not be changed. Should not be confused with maxpacket.
diff --git a/include/linux/usb/hcd.h b/include/linux/usb/hcd.h
index 40edf6a8533e..a469999a106d 100644
--- a/include/linux/usb/hcd.h
+++ b/include/linux/usb/hcd.h
@@ -437,6 +437,9 @@ extern int usb_hcd_alloc_bandwidth(struct usb_device *udev,
struct usb_host_interface *new_alt);
extern int usb_hcd_get_frame_number(struct usb_device *udev);
+struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver,
+ struct device *sysdev, struct device *dev, const char *bus_name,
+ struct usb_hcd *primary_hcd);
extern struct usb_hcd *usb_create_hcd(const struct hc_driver *driver,
struct device *dev, const char *bus_name);
extern struct usb_hcd *usb_create_shared_hcd(const struct hc_driver *driver,
@@ -453,7 +456,7 @@ extern int usb_hcd_find_raw_port_number(struct usb_hcd *hcd, int port1);
struct platform_device;
extern void usb_hcd_platform_shutdown(struct platform_device *dev);
-#ifdef CONFIG_PCI
+#ifdef CONFIG_USB_PCI
struct pci_dev;
struct pci_device_id;
extern int usb_hcd_pci_probe(struct pci_dev *dev,
@@ -466,7 +469,7 @@ extern int usb_hcd_amd_remote_wakeup_quirk(struct pci_dev *dev);
#ifdef CONFIG_PM
extern const struct dev_pm_ops usb_hcd_pci_pm_ops;
#endif
-#endif /* CONFIG_PCI */
+#endif /* CONFIG_USB_PCI */
/* pci-ish (pdev null is ok) buffer alloc/mapping support */
void usb_init_pool_max(void);
diff --git a/include/linux/usb/of.h b/include/linux/usb/of.h
index 5ff9032ee1b4..4031f47629ec 100644
--- a/include/linux/usb/of.h
+++ b/include/linux/usb/of.h
@@ -18,6 +18,7 @@ int of_usb_update_otg_caps(struct device_node *np,
struct usb_otg_caps *otg_caps);
struct device_node *usb_of_get_child_node(struct device_node *parent,
int portnum);
+struct device *usb_of_get_companion_dev(struct device *dev);
#else
static inline enum usb_dr_mode
of_usb_get_dr_mode_by_phy(struct device_node *np, int arg0)
@@ -38,6 +39,10 @@ static inline struct device_node *usb_of_get_child_node
{
return NULL;
}
+static inline struct device *usb_of_get_companion_dev(struct device *dev)
+{
+ return NULL;
+}
#endif
#if IS_ENABLED(CONFIG_OF) && IS_ENABLED(CONFIG_USB_SUPPORT)
diff --git a/include/linux/usb/otg-fsm.h b/include/linux/usb/otg-fsm.h
index 7a0350535cb1..a0a8f878503c 100644
--- a/include/linux/usb/otg-fsm.h
+++ b/include/linux/usb/otg-fsm.h
@@ -21,21 +21,6 @@
#include <linux/mutex.h>
#include <linux/errno.h>
-#undef VERBOSE
-
-#ifdef VERBOSE
-#define VDBG(fmt, args...) pr_debug("[%s] " fmt , \
- __func__, ## args)
-#else
-#define VDBG(stuff...) do {} while (0)
-#endif
-
-#ifdef VERBOSE
-#define MPC_LOC printk("Current Location [%s]:[%d]\n", __FILE__, __LINE__)
-#else
-#define MPC_LOC do {} while (0)
-#endif
-
#define PROTO_UNDEF (0)
#define PROTO_HOST (1)
#define PROTO_GADGET (2)
diff --git a/include/linux/usb/serial.h b/include/linux/usb/serial.h
index 704a1ab8240c..e2f0ab07eea5 100644
--- a/include/linux/usb/serial.h
+++ b/include/linux/usb/serial.h
@@ -20,7 +20,7 @@
#include <linux/kfifo.h>
/* The maximum number of ports one device can grab at once */
-#define MAX_NUM_PORTS 8
+#define MAX_NUM_PORTS 16
/* parity check flag */
#define RELEVANT_IFLAG(iflag) (iflag & (IGNBRK|BRKINT|IGNPAR|PARMRK|INPCK))
@@ -159,10 +159,10 @@ struct usb_serial {
unsigned char minors_reserved:1;
unsigned char num_ports;
unsigned char num_port_pointers;
- char num_interrupt_in;
- char num_interrupt_out;
- char num_bulk_in;
- char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
struct usb_serial_port *port[MAX_NUM_PORTS];
struct kref kref;
struct mutex disc_mutex;
@@ -181,6 +181,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
serial->private = data;
}
+struct usb_serial_endpoints {
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+ struct usb_endpoint_descriptor *bulk_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *bulk_out[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_in[MAX_NUM_PORTS];
+ struct usb_endpoint_descriptor *interrupt_out[MAX_NUM_PORTS];
+};
+
/**
* usb_serial_driver - describes a usb serial driver
* @description: pointer to a string that describes this driver. This string
@@ -188,12 +199,17 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
* @id_table: pointer to a list of usb_device_id structures that define all
* of the devices this structure can support.
* @num_ports: the number of different ports this device will have.
+ * @num_bulk_in: minimum number of bulk-in endpoints
+ * @num_bulk_out: minimum number of bulk-out endpoints
+ * @num_interrupt_in: minimum number of interrupt-in endpoints
+ * @num_interrupt_out: minimum number of interrupt-out endpoints
* @bulk_in_size: minimum number of bytes to allocate for bulk-in buffer
* (0 = end-point size)
* @bulk_out_size: bytes to allocate for bulk-out buffer (0 = end-point size)
* @calc_num_ports: pointer to a function to determine how many ports this
- * device has dynamically. It will be called after the probe()
- * callback is called, but before attach()
+ * device has dynamically. It can also be used to verify the number of
+ * endpoints or to modify the port-endpoint mapping. It will be called
+ * after the probe() callback is called, but before attach().
* @probe: pointer to the driver's probe function.
* This will be called when the device is inserted into the system,
* but before the device has been fully initialized by the usb_serial
@@ -227,19 +243,26 @@ static inline void usb_set_serial_data(struct usb_serial *serial, void *data)
struct usb_serial_driver {
const char *description;
const struct usb_device_id *id_table;
- char num_ports;
struct list_head driver_list;
struct device_driver driver;
struct usb_driver *usb_driver;
struct usb_dynids dynids;
+ unsigned char num_ports;
+
+ unsigned char num_bulk_in;
+ unsigned char num_bulk_out;
+ unsigned char num_interrupt_in;
+ unsigned char num_interrupt_out;
+
size_t bulk_in_size;
size_t bulk_out_size;
int (*probe)(struct usb_serial *serial, const struct usb_device_id *id);
int (*attach)(struct usb_serial *serial);
- int (*calc_num_ports) (struct usb_serial *serial);
+ int (*calc_num_ports)(struct usb_serial *serial,
+ struct usb_serial_endpoints *epds);
void (*disconnect)(struct usb_serial *serial);
void (*release)(struct usb_serial *serial);
@@ -356,7 +379,6 @@ extern void usb_serial_handle_dcd_change(struct usb_serial_port *usb_port,
extern int usb_serial_bus_register(struct usb_serial_driver *device);
extern void usb_serial_bus_deregister(struct usb_serial_driver *device);
-extern struct usb_serial_driver usb_serial_generic_device;
extern struct bus_type usb_serial_bus_type;
extern struct tty_driver *usb_serial_tty_driver;
diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h
new file mode 100644
index 000000000000..ec78204964ab
--- /dev/null
+++ b/include/linux/usb/typec.h
@@ -0,0 +1,243 @@
+
+#ifndef __LINUX_USB_TYPEC_H
+#define __LINUX_USB_TYPEC_H
+
+#include <linux/types.h>
+
+/* XXX: Once we have a header for USB Power Delivery, this belongs there */
+#define ALTMODE_MAX_MODES 6
+
+/* USB Type-C Specification releases */
+#define USB_TYPEC_REV_1_0 0x100 /* 1.0 */
+#define USB_TYPEC_REV_1_1 0x110 /* 1.1 */
+#define USB_TYPEC_REV_1_2 0x120 /* 1.2 */
+
+struct typec_altmode;
+struct typec_partner;
+struct typec_cable;
+struct typec_plug;
+struct typec_port;
+
+struct fwnode_handle;
+
+enum typec_port_type {
+ TYPEC_PORT_DFP,
+ TYPEC_PORT_UFP,
+ TYPEC_PORT_DRP,
+};
+
+enum typec_plug_type {
+ USB_PLUG_NONE,
+ USB_PLUG_TYPE_A,
+ USB_PLUG_TYPE_B,
+ USB_PLUG_TYPE_C,
+ USB_PLUG_CAPTIVE,
+};
+
+enum typec_data_role {
+ TYPEC_DEVICE,
+ TYPEC_HOST,
+};
+
+enum typec_role {
+ TYPEC_SINK,
+ TYPEC_SOURCE,
+};
+
+enum typec_pwr_opmode {
+ TYPEC_PWR_MODE_USB,
+ TYPEC_PWR_MODE_1_5A,
+ TYPEC_PWR_MODE_3_0A,
+ TYPEC_PWR_MODE_PD,
+};
+
+enum typec_accessory {
+ TYPEC_ACCESSORY_NONE,
+ TYPEC_ACCESSORY_AUDIO,
+ TYPEC_ACCESSORY_DEBUG,
+};
+
+#define TYPEC_MAX_ACCESSORY 3
+
+/*
+ * struct usb_pd_identity - USB Power Delivery identity data
+ * @id_header: ID Header VDO
+ * @cert_stat: Cert Stat VDO
+ * @product: Product VDO
+ *
+ * USB power delivery Discover Identity command response data.
+ *
+ * REVISIT: This is USB Power Delivery specific information, so this structure
+ * probable belongs to USB Power Delivery header file once we have them.
+ */
+struct usb_pd_identity {
+ u32 id_header;
+ u32 cert_stat;
+ u32 product;
+};
+
+int typec_partner_set_identity(struct typec_partner *partner);
+int typec_cable_set_identity(struct typec_cable *cable);
+
+/*
+ * struct typec_mode_desc - Individual Mode of an Alternate Mode
+ * @index: Index of the Mode within the SVID
+ * @vdo: VDO returned by Discover Modes USB PD command
+ * @desc: Optional human readable description of the mode
+ * @roles: Only for ports. DRP if the mode is available in both roles
+ *
+ * Description of a mode of an Alternate Mode which a connector, cable plug or
+ * partner supports. Every mode will have it's own sysfs group. The details are
+ * the VDO returned by discover modes command, description for the mode and
+ * active flag telling has the mode being entered or not.
+ */
+struct typec_mode_desc {
+ int index;
+ u32 vdo;
+ char *desc;
+ /* Only used with ports */
+ enum typec_port_type roles;
+};
+
+/*
+ * struct typec_altmode_desc - USB Type-C Alternate Mode Descriptor
+ * @svid: Standard or Vendor ID
+ * @n_modes: Number of modes
+ * @modes: Array of modes supported by the Alternate Mode
+ *
+ * Representation of an Alternate Mode that has SVID assigned by USB-IF. The
+ * array of modes will list the modes of a particular SVID that are supported by
+ * a connector, partner of a cable plug.
+ */
+struct typec_altmode_desc {
+ u16 svid;
+ int n_modes;
+ struct typec_mode_desc modes[ALTMODE_MAX_MODES];
+};
+
+struct typec_altmode
+*typec_partner_register_altmode(struct typec_partner *partner,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_plug_register_altmode(struct typec_plug *plug,
+ struct typec_altmode_desc *desc);
+struct typec_altmode
+*typec_port_register_altmode(struct typec_port *port,
+ struct typec_altmode_desc *desc);
+void typec_unregister_altmode(struct typec_altmode *altmode);
+
+struct typec_port *typec_altmode2port(struct typec_altmode *alt);
+
+void typec_altmode_update_active(struct typec_altmode *alt, int mode,
+ bool active);
+
+enum typec_plug_index {
+ TYPEC_PLUG_SOP_P,
+ TYPEC_PLUG_SOP_PP,
+};
+
+/*
+ * struct typec_plug_desc - USB Type-C Cable Plug Descriptor
+ * @index: SOP Prime for the plug connected to DFP and SOP Double Prime for the
+ * plug connected to UFP
+ *
+ * Represents USB Type-C Cable Plug.
+ */
+struct typec_plug_desc {
+ enum typec_plug_index index;
+};
+
+/*
+ * struct typec_cable_desc - USB Type-C Cable Descriptor
+ * @type: The plug type from USB PD Cable VDO
+ * @active: Is the cable active or passive
+ * @identity: Result of Discover Identity command
+ *
+ * Represents USB Type-C Cable attached to USB Type-C port.
+ */
+struct typec_cable_desc {
+ enum typec_plug_type type;
+ unsigned int active:1;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_partner_desc - USB Type-C Partner Descriptor
+ * @usb_pd: USB Power Delivery support
+ * @accessory: Audio, Debug or none.
+ * @identity: Discover Identity command data
+ *
+ * Details about a partner that is attached to USB Type-C port. If @identity
+ * member exists when partner is registered, a directory named "identity" is
+ * created to sysfs for the partner device.
+ */
+struct typec_partner_desc {
+ unsigned int usb_pd:1;
+ enum typec_accessory accessory;
+ struct usb_pd_identity *identity;
+};
+
+/*
+ * struct typec_capability - USB Type-C Port Capabilities
+ * @role: DFP (Host-only), UFP (Device-only) or DRP (Dual Role)
+ * @revision: USB Type-C Specification release. Binary coded decimal
+ * @pd_revision: USB Power Delivery Specification revision if supported
+ * @prefer_role: Initial role preference
+ * @accessory: Supported Accessory Modes
+ * @fwnode: Optional fwnode of the port
+ * @try_role: Set data role preference for DRP port
+ * @dr_set: Set Data Role
+ * @pr_set: Set Power Role
+ * @vconn_set: Set VCONN Role
+ * @activate_mode: Enter/exit given Alternate Mode
+ *
+ * Static capabilities of a single USB Type-C port.
+ */
+struct typec_capability {
+ enum typec_port_type type;
+ u16 revision; /* 0120H = "1.2" */
+ u16 pd_revision; /* 0300H = "3.0" */
+ int prefer_role;
+ enum typec_accessory accessory[TYPEC_MAX_ACCESSORY];
+
+ struct fwnode_handle *fwnode;
+
+ int (*try_role)(const struct typec_capability *,
+ int role);
+
+ int (*dr_set)(const struct typec_capability *,
+ enum typec_data_role);
+ int (*pr_set)(const struct typec_capability *,
+ enum typec_role);
+ int (*vconn_set)(const struct typec_capability *,
+ enum typec_role);
+
+ int (*activate_mode)(const struct typec_capability *,
+ int mode, int activate);
+};
+
+/* Specific to try_role(). Indicates the user want's to clear the preference. */
+#define TYPEC_NO_PREFERRED_ROLE (-1)
+
+struct typec_port *typec_register_port(struct device *parent,
+ const struct typec_capability *cap);
+void typec_unregister_port(struct typec_port *port);
+
+struct typec_partner *typec_register_partner(struct typec_port *port,
+ struct typec_partner_desc *desc);
+void typec_unregister_partner(struct typec_partner *partner);
+
+struct typec_cable *typec_register_cable(struct typec_port *port,
+ struct typec_cable_desc *desc);
+void typec_unregister_cable(struct typec_cable *cable);
+
+struct typec_plug *typec_register_plug(struct typec_cable *cable,
+ struct typec_plug_desc *desc);
+void typec_unregister_plug(struct typec_plug *plug);
+
+void typec_set_data_role(struct typec_port *port, enum typec_data_role role);
+void typec_set_pwr_role(struct typec_port *port, enum typec_role role);
+void typec_set_vconn_role(struct typec_port *port, enum typec_role role);
+void typec_set_pwr_opmode(struct typec_port *port, enum typec_pwr_opmode mode);
+
+#endif /* __LINUX_USB_TYPEC_H */
diff --git a/include/linux/usb/usbnet.h b/include/linux/usb/usbnet.h
index 6e0ce8c7b8cb..7dffa5624ea6 100644
--- a/include/linux/usb/usbnet.h
+++ b/include/linux/usb/usbnet.h
@@ -64,6 +64,8 @@ struct usbnet {
struct usb_anchor deferred;
struct tasklet_struct bh;
+ struct pcpu_sw_netstats __percpu *stats64;
+
struct work_struct kevent;
unsigned long flags;
# define EVENT_TX_HALT 0
@@ -261,10 +263,10 @@ extern void usbnet_pause_rx(struct usbnet *);
extern void usbnet_resume_rx(struct usbnet *);
extern void usbnet_purge_paused_rxq(struct usbnet *);
-extern int usbnet_get_settings(struct net_device *net,
- struct ethtool_cmd *cmd);
-extern int usbnet_set_settings(struct net_device *net,
- struct ethtool_cmd *cmd);
+extern int usbnet_get_link_ksettings(struct net_device *net,
+ struct ethtool_link_ksettings *cmd);
+extern int usbnet_set_link_ksettings(struct net_device *net,
+ const struct ethtool_link_ksettings *cmd);
extern u32 usbnet_get_link(struct net_device *net);
extern u32 usbnet_get_msglevel(struct net_device *);
extern void usbnet_set_msglevel(struct net_device *, u32);
@@ -278,5 +280,7 @@ extern int usbnet_status_start(struct usbnet *dev, gfp_t mem_flags);
extern void usbnet_status_stop(struct usbnet *dev);
extern void usbnet_update_max_qlen(struct usbnet *dev);
+extern void usbnet_get_stats64(struct net_device *dev,
+ struct rtnl_link_stats64 *stats);
#endif /* __LINUX_USB_USBNET_H */
diff --git a/include/linux/usb/xhci-dbgp.h b/include/linux/usb/xhci-dbgp.h
new file mode 100644
index 000000000000..80c1cca1f529
--- /dev/null
+++ b/include/linux/usb/xhci-dbgp.h
@@ -0,0 +1,29 @@
+/*
+ * Standalone xHCI debug capability driver
+ *
+ * Copyright (C) 2016 Intel Corporation
+ *
+ * Author: Lu Baolu <baolu.lu@linux.intel.com>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __LINUX_XHCI_DBGP_H
+#define __LINUX_XHCI_DBGP_H
+
+#ifdef CONFIG_EARLY_PRINTK_USB_XDBC
+int __init early_xdbc_parse_parameter(char *s);
+int __init early_xdbc_setup_hardware(void);
+void __init early_xdbc_register_console(void);
+#else
+static inline int __init early_xdbc_setup_hardware(void)
+{
+ return -ENODEV;
+}
+static inline void __init early_xdbc_register_console(void)
+{
+}
+#endif /* CONFIG_EARLY_PRINTK_USB_XDBC */
+#endif /* __LINUX_XHCI_DBGP_H */
diff --git a/include/linux/virtio.h b/include/linux/virtio.h
index 7edfbdb55a99..28b0e965360f 100644
--- a/include/linux/virtio.h
+++ b/include/linux/virtio.h
@@ -44,6 +44,12 @@ int virtqueue_add_inbuf(struct virtqueue *vq,
void *data,
gfp_t gfp);
+int virtqueue_add_inbuf_ctx(struct virtqueue *vq,
+ struct scatterlist sg[], unsigned int num,
+ void *data,
+ void *ctx,
+ gfp_t gfp);
+
int virtqueue_add_sgs(struct virtqueue *vq,
struct scatterlist *sgs[],
unsigned int out_sgs,
@@ -59,6 +65,9 @@ bool virtqueue_notify(struct virtqueue *vq);
void *virtqueue_get_buf(struct virtqueue *vq, unsigned int *len);
+void *virtqueue_get_buf_ctx(struct virtqueue *vq, unsigned int *len,
+ void **ctx);
+
void virtqueue_disable_cb(struct virtqueue *vq);
bool virtqueue_enable_cb(struct virtqueue *vq);
@@ -156,9 +165,13 @@ int virtio_device_restore(struct virtio_device *dev);
* @feature_table_legacy: same as feature_table but when working in legacy mode.
* @feature_table_size_legacy: number of entries in feature table legacy array.
* @probe: the function to call when a device is found. Returns 0 or -errno.
+ * @scan: optional function to call after successful probe; intended
+ * for virtio-scsi to invoke a scan.
* @remove: the function to call when a device is removed.
* @config_changed: optional function to call when the device configuration
* changes; may be called in interrupt context.
+ * @freeze: optional function to call during suspend/hibernation.
+ * @restore: optional function to call on resume.
*/
struct virtio_driver {
struct device_driver driver;
diff --git a/include/linux/virtio_config.h b/include/linux/virtio_config.h
index 8355bab175e1..0133d8a12ccd 100644
--- a/include/linux/virtio_config.h
+++ b/include/linux/virtio_config.h
@@ -72,7 +72,8 @@ struct virtio_config_ops {
void (*reset)(struct virtio_device *vdev);
int (*find_vqs)(struct virtio_device *, unsigned nvqs,
struct virtqueue *vqs[], vq_callback_t *callbacks[],
- const char * const names[], struct irq_affinity *desc);
+ const char * const names[], const bool *ctx,
+ struct irq_affinity *desc);
void (*del_vqs)(struct virtio_device *);
u64 (*get_features)(struct virtio_device *vdev);
int (*finalize_features)(struct virtio_device *vdev);
@@ -173,12 +174,32 @@ struct virtqueue *virtio_find_single_vq(struct virtio_device *vdev,
vq_callback_t *callbacks[] = { c };
const char *names[] = { n };
struct virtqueue *vq;
- int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL);
+ int err = vdev->config->find_vqs(vdev, 1, &vq, callbacks, names, NULL,
+ NULL);
if (err < 0)
return ERR_PTR(err);
return vq;
}
+static inline
+int virtio_find_vqs(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[],
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, NULL, desc);
+}
+
+static inline
+int virtio_find_vqs_ctx(struct virtio_device *vdev, unsigned nvqs,
+ struct virtqueue *vqs[], vq_callback_t *callbacks[],
+ const char * const names[], const bool *ctx,
+ struct irq_affinity *desc)
+{
+ return vdev->config->find_vqs(vdev, nvqs, vqs, callbacks, names, ctx,
+ desc);
+}
+
/**
* virtio_device_ready - enable vq use in probe function
* @vdev: the device
diff --git a/include/linux/virtio_ring.h b/include/linux/virtio_ring.h
index e8d36938f09a..270cfa81830e 100644
--- a/include/linux/virtio_ring.h
+++ b/include/linux/virtio_ring.h
@@ -71,6 +71,7 @@ struct virtqueue *vring_create_virtqueue(unsigned int index,
struct virtio_device *vdev,
bool weak_barriers,
bool may_reduce_num,
+ bool ctx,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
const char *name);
@@ -80,6 +81,7 @@ struct virtqueue *__vring_new_virtqueue(unsigned int index,
struct vring vring,
struct virtio_device *vdev,
bool weak_barriers,
+ bool ctx,
bool (*notify)(struct virtqueue *),
void (*callback)(struct virtqueue *),
const char *name);
@@ -93,6 +95,7 @@ struct virtqueue *vring_new_virtqueue(unsigned int index,
unsigned int vring_align,
struct virtio_device *vdev,
bool weak_barriers,
+ bool ctx,
void *pages,
bool (*notify)(struct virtqueue *vq),
void (*callback)(struct virtqueue *vq),
diff --git a/include/linux/virtio_vsock.h b/include/linux/virtio_vsock.h
index 584f9a647ad4..ab13f0743da8 100644
--- a/include/linux/virtio_vsock.h
+++ b/include/linux/virtio_vsock.h
@@ -153,5 +153,6 @@ void virtio_transport_free_pkt(struct virtio_vsock_pkt *pkt);
void virtio_transport_inc_tx_pkt(struct virtio_vsock_sock *vvs, struct virtio_vsock_pkt *pkt);
u32 virtio_transport_get_credit(struct virtio_vsock_sock *vvs, u32 wanted);
void virtio_transport_put_credit(struct virtio_vsock_sock *vvs, u32 credit);
+void virtio_transport_deliver_tap_pkt(struct virtio_vsock_pkt *pkt);
#endif /* _LINUX_VIRTIO_VSOCK_H */
diff --git a/include/linux/vm_event_item.h b/include/linux/vm_event_item.h
index a80b7b59cf33..d84ae90ccd5c 100644
--- a/include/linux/vm_event_item.h
+++ b/include/linux/vm_event_item.h
@@ -25,7 +25,7 @@ enum vm_event_item { PGPGIN, PGPGOUT, PSWPIN, PSWPOUT,
FOR_ALL_ZONES(PGALLOC),
FOR_ALL_ZONES(ALLOCSTALL),
FOR_ALL_ZONES(PGSCAN_SKIP),
- PGFREE, PGACTIVATE, PGDEACTIVATE,
+ PGFREE, PGACTIVATE, PGDEACTIVATE, PGLAZYFREE,
PGFAULT, PGMAJFAULT,
PGLAZYFREED,
PGREFILL,
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index d68edffbf142..0328ce003992 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -6,6 +6,7 @@
#include <linux/list.h>
#include <linux/llist.h>
#include <asm/page.h> /* pgprot_t */
+#include <asm/pgtable.h> /* PAGE_KERNEL */
#include <linux/rbtree.h>
struct vm_area_struct; /* vma defining user mapping in mm_types.h */
@@ -80,6 +81,25 @@ extern void *__vmalloc_node_range(unsigned long size, unsigned long align,
unsigned long start, unsigned long end, gfp_t gfp_mask,
pgprot_t prot, unsigned long vm_flags, int node,
const void *caller);
+#ifndef CONFIG_MMU
+extern void *__vmalloc_node_flags(unsigned long size, int node, gfp_t flags);
+#else
+extern void *__vmalloc_node(unsigned long size, unsigned long align,
+ gfp_t gfp_mask, pgprot_t prot,
+ int node, const void *caller);
+
+/*
+ * We really want to have this inlined due to caller tracking. This
+ * function is used by the highlevel vmalloc apis and so we want to track
+ * their callers and inlining will achieve that.
+ */
+static inline void *__vmalloc_node_flags(unsigned long size,
+ int node, gfp_t flags)
+{
+ return __vmalloc_node(size, 1, flags, PAGE_KERNEL,
+ node, __builtin_return_address(0));
+}
+#endif
extern void vfree(const void *addr);
extern void vfree_atomic(const void *addr);
diff --git a/include/linux/vme.h b/include/linux/vme.h
index ec5e8bf6118e..25874da3f2e1 100644
--- a/include/linux/vme.h
+++ b/include/linux/vme.h
@@ -92,7 +92,7 @@ extern struct bus_type vme_bus_type;
#define VME_SLOT_ALL -2
/**
- * Structure representing a VME device
+ * struct vme_dev - Structure representing a VME device
* @num: The device number
* @bridge: Pointer to the bridge device this device is on
* @dev: Internal device structure
@@ -107,6 +107,16 @@ struct vme_dev {
struct list_head bridge_list;
};
+/**
+ * struct vme_driver - Structure representing a VME driver
+ * @name: Driver name, should be unique among VME drivers and usually the same
+ * as the module name.
+ * @match: Callback used to determine whether probe should be run.
+ * @probe: Callback for device binding, called when new device is detected.
+ * @remove: Callback, called on device removal.
+ * @driver: Underlying generic device driver structure.
+ * @devices: List of VME devices (struct vme_dev) associated with this driver.
+ */
struct vme_driver {
const char *name;
int (*match)(struct vme_dev *);
diff --git a/include/linux/workqueue.h b/include/linux/workqueue.h
index bde063cefd04..c102ef65cb64 100644
--- a/include/linux/workqueue.h
+++ b/include/linux/workqueue.h
@@ -608,8 +608,13 @@ static inline long work_on_cpu(int cpu, long (*fn)(void *), void *arg)
{
return fn(arg);
}
+static inline long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg)
+{
+ return fn(arg);
+}
#else
long work_on_cpu(int cpu, long (*fn)(void *), void *arg);
+long work_on_cpu_safe(int cpu, long (*fn)(void *), void *arg);
#endif /* CONFIG_SMP */
#ifdef CONFIG_FREEZER
diff --git a/include/linux/writeback.h b/include/linux/writeback.h
index a3c0cbd7c888..d5815794416c 100644
--- a/include/linux/writeback.h
+++ b/include/linux/writeback.h
@@ -237,6 +237,7 @@ static inline void inode_attach_wb(struct inode *inode, struct page *page)
static inline void inode_detach_wb(struct inode *inode)
{
if (inode->i_wb) {
+ WARN_ON_ONCE(!(inode->i_state & I_CLEAR));
wb_put(inode->i_wb);
inode->i_wb = NULL;
}
diff --git a/include/media/cec-edid.h b/include/media/cec-edid.h
deleted file mode 100644
index bdf731ecba1a..000000000000
--- a/include/media/cec-edid.h
+++ /dev/null
@@ -1,104 +0,0 @@
-/*
- * cec-edid - HDMI Consumer Electronics Control & EDID helpers
- *
- * Copyright 2016 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
- *
- * This program is free software; you may redistribute it and/or modify
- * it under the terms of the GNU General Public License as published by
- * the Free Software Foundation; version 2 of the License.
- *
- * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
- * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
- * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
- * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
- * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
- * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
- * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
- * SOFTWARE.
- */
-
-#ifndef _MEDIA_CEC_EDID_H
-#define _MEDIA_CEC_EDID_H
-
-#include <linux/types.h>
-
-#define CEC_PHYS_ADDR_INVALID 0xffff
-#define cec_phys_addr_exp(pa) \
- ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
-
-/**
- * cec_get_edid_phys_addr() - find and return the physical address
- *
- * @edid: pointer to the EDID data
- * @size: size in bytes of the EDID data
- * @offset: If not %NULL then the location of the physical address
- * bytes in the EDID will be returned here. This is set to 0
- * if there is no physical address found.
- *
- * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
- */
-u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
- unsigned int *offset);
-
-/**
- * cec_set_edid_phys_addr() - find and set the physical address
- *
- * @edid: pointer to the EDID data
- * @size: size in bytes of the EDID data
- * @phys_addr: the new physical address
- *
- * This function finds the location of the physical address in the EDID
- * and fills in the given physical address and updates the checksum
- * at the end of the EDID block. It does nothing if the EDID doesn't
- * contain a physical address.
- */
-void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
-
-/**
- * cec_phys_addr_for_input() - calculate the PA for an input
- *
- * @phys_addr: the physical address of the parent
- * @input: the number of the input port, must be between 1 and 15
- *
- * This function calculates a new physical address based on the input
- * port number. For example:
- *
- * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
- *
- * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
- *
- * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
- *
- * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
- *
- * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
- */
-u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
-
-/**
- * cec_phys_addr_validate() - validate a physical address from an EDID
- *
- * @phys_addr: the physical address to validate
- * @parent: if not %NULL, then this is filled with the parents PA.
- * @port: if not %NULL, then this is filled with the input port.
- *
- * This validates a physical address as read from an EDID. If the
- * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
- * then it will return -EINVAL.
- *
- * The parent PA is passed into %parent and the input port is passed into
- * %port. For example:
- *
- * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
- *
- * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
- *
- * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
- *
- * PA = f.f.f.f: has parent f.f.f.f and input port 0.
- *
- * Return: 0 if the PA is valid, -EINVAL if not.
- */
-int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
-
-#endif /* _MEDIA_CEC_EDID_H */
diff --git a/include/media/cec-notifier.h b/include/media/cec-notifier.h
new file mode 100644
index 000000000000..eb50ce54b759
--- /dev/null
+++ b/include/media/cec-notifier.h
@@ -0,0 +1,111 @@
+/*
+ * cec-notifier.h - notify CEC drivers of physical address changes
+ *
+ * Copyright 2016 Russell King <rmk+kernel@arm.linux.org.uk>
+ * Copyright 2016-2017 Cisco Systems, Inc. and/or its affiliates. All rights reserved.
+ *
+ * This program is free software; you may redistribute it and/or modify
+ * it under the terms of the GNU General Public License as published by
+ * the Free Software Foundation; version 2 of the License.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef LINUX_CEC_NOTIFIER_H
+#define LINUX_CEC_NOTIFIER_H
+
+#include <linux/types.h>
+#include <media/cec.h>
+
+struct device;
+struct edid;
+struct cec_adapter;
+struct cec_notifier;
+
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+
+/**
+ * cec_notifier_get - find or create a new cec_notifier for the given device.
+ * @dev: device that sends the events.
+ *
+ * If a notifier for device @dev already exists, then increase the refcount
+ * and return that notifier.
+ *
+ * If it doesn't exist, then allocate a new notifier struct and return a
+ * pointer to that new struct.
+ *
+ * Return NULL if the memory could not be allocated.
+ */
+struct cec_notifier *cec_notifier_get(struct device *dev);
+
+/**
+ * cec_notifier_put - decrease refcount and delete when the refcount reaches 0.
+ * @n: notifier
+ */
+void cec_notifier_put(struct cec_notifier *n);
+
+/**
+ * cec_notifier_set_phys_addr - set a new physical address.
+ * @n: the CEC notifier
+ * @pa: the CEC physical address
+ *
+ * Set a new CEC physical address.
+ */
+void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa);
+
+/**
+ * cec_notifier_set_phys_addr_from_edid - set parse the PA from the EDID.
+ * @n: the CEC notifier
+ * @edid: the struct edid pointer
+ *
+ * Parses the EDID to obtain the new CEC physical address and set it.
+ */
+void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+ const struct edid *edid);
+
+/**
+ * cec_notifier_register - register a callback with the notifier
+ * @n: the CEC notifier
+ * @adap: the CEC adapter, passed as argument to the callback function
+ * @callback: the callback function
+ */
+void cec_notifier_register(struct cec_notifier *n,
+ struct cec_adapter *adap,
+ void (*callback)(struct cec_adapter *adap, u16 pa));
+
+/**
+ * cec_notifier_unregister - unregister the callback from the notifier.
+ * @n: the CEC notifier
+ */
+void cec_notifier_unregister(struct cec_notifier *n);
+
+#else
+static inline struct cec_notifier *cec_notifier_get(struct device *dev)
+{
+ /* A non-NULL pointer is expected on success */
+ return (struct cec_notifier *)0xdeadfeed;
+}
+
+static inline void cec_notifier_put(struct cec_notifier *n)
+{
+}
+
+static inline void cec_notifier_set_phys_addr(struct cec_notifier *n, u16 pa)
+{
+}
+
+static inline void cec_notifier_set_phys_addr_from_edid(struct cec_notifier *n,
+ const struct edid *edid)
+{
+}
+
+#endif
+
+#endif
diff --git a/include/media/cec.h b/include/media/cec.h
index 96a0aa770d61..b8eb895731d5 100644
--- a/include/media/cec.h
+++ b/include/media/cec.h
@@ -29,7 +29,7 @@
#include <linux/timer.h>
#include <linux/cec-funcs.h>
#include <media/rc-core.h>
-#include <media/cec-edid.h>
+#include <media/cec-notifier.h>
/**
* struct cec_devnode - cec device node
@@ -173,6 +173,10 @@ struct cec_adapter {
bool passthrough;
struct cec_log_addrs log_addrs;
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+ struct cec_notifier *notifier;
+#endif
+
struct dentry *cec_dir;
struct dentry *status_file;
@@ -184,6 +188,11 @@ struct cec_adapter {
char input_drv[32];
};
+static inline void *cec_get_drvdata(const struct cec_adapter *adap)
+{
+ return adap->priv;
+}
+
static inline bool cec_has_log_addr(const struct cec_adapter *adap, u8 log_addr)
{
return adap->log_addrs.log_addr_mask & (1 << log_addr);
@@ -194,7 +203,10 @@ static inline bool cec_is_sink(const struct cec_adapter *adap)
return adap->phys_addr == 0;
}
-#if IS_ENABLED(CONFIG_MEDIA_CEC_SUPPORT)
+#define cec_phys_addr_exp(pa) \
+ ((pa) >> 12), ((pa) >> 8) & 0xf, ((pa) >> 4) & 0xf, (pa) & 0xf
+
+#if IS_ENABLED(CONFIG_CEC_CORE)
struct cec_adapter *cec_allocate_adapter(const struct cec_adap_ops *ops,
void *priv, const char *name, u32 caps, u8 available_las);
int cec_register_adapter(struct cec_adapter *adap, struct device *parent);
@@ -213,6 +225,86 @@ void cec_transmit_done(struct cec_adapter *adap, u8 status, u8 arb_lost_cnt,
u8 nack_cnt, u8 low_drive_cnt, u8 error_cnt);
void cec_received_msg(struct cec_adapter *adap, struct cec_msg *msg);
+/**
+ * cec_get_edid_phys_addr() - find and return the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @offset: If not %NULL then the location of the physical address
+ * bytes in the EDID will be returned here. This is set to 0
+ * if there is no physical address found.
+ *
+ * Return: the physical address or CEC_PHYS_ADDR_INVALID if there is none.
+ */
+u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset);
+
+/**
+ * cec_set_edid_phys_addr() - find and set the physical address
+ *
+ * @edid: pointer to the EDID data
+ * @size: size in bytes of the EDID data
+ * @phys_addr: the new physical address
+ *
+ * This function finds the location of the physical address in the EDID
+ * and fills in the given physical address and updates the checksum
+ * at the end of the EDID block. It does nothing if the EDID doesn't
+ * contain a physical address.
+ */
+void cec_set_edid_phys_addr(u8 *edid, unsigned int size, u16 phys_addr);
+
+/**
+ * cec_phys_addr_for_input() - calculate the PA for an input
+ *
+ * @phys_addr: the physical address of the parent
+ * @input: the number of the input port, must be between 1 and 15
+ *
+ * This function calculates a new physical address based on the input
+ * port number. For example:
+ *
+ * PA = 0.0.0.0 and input = 2 becomes 2.0.0.0
+ *
+ * PA = 3.0.0.0 and input = 1 becomes 3.1.0.0
+ *
+ * PA = 3.2.1.0 and input = 5 becomes 3.2.1.5
+ *
+ * PA = 3.2.1.3 and input = 5 becomes f.f.f.f since it maxed out the depth.
+ *
+ * Return: the new physical address or CEC_PHYS_ADDR_INVALID.
+ */
+u16 cec_phys_addr_for_input(u16 phys_addr, u8 input);
+
+/**
+ * cec_phys_addr_validate() - validate a physical address from an EDID
+ *
+ * @phys_addr: the physical address to validate
+ * @parent: if not %NULL, then this is filled with the parents PA.
+ * @port: if not %NULL, then this is filled with the input port.
+ *
+ * This validates a physical address as read from an EDID. If the
+ * PA is invalid (such as 1.0.1.0 since '0' is only allowed at the end),
+ * then it will return -EINVAL.
+ *
+ * The parent PA is passed into %parent and the input port is passed into
+ * %port. For example:
+ *
+ * PA = 0.0.0.0: has parent 0.0.0.0 and input port 0.
+ *
+ * PA = 1.0.0.0: has parent 0.0.0.0 and input port 1.
+ *
+ * PA = 3.2.0.0: has parent 3.0.0.0 and input port 2.
+ *
+ * PA = f.f.f.f: has parent f.f.f.f and input port 0.
+ *
+ * Return: 0 if the PA is valid, -EINVAL if not.
+ */
+int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port);
+
+#ifdef CONFIG_MEDIA_CEC_NOTIFIER
+void cec_register_cec_notifier(struct cec_adapter *adap,
+ struct cec_notifier *notifier);
+#endif
+
#else
static inline int cec_register_adapter(struct cec_adapter *adap,
@@ -234,6 +326,29 @@ static inline void cec_s_phys_addr(struct cec_adapter *adap, u16 phys_addr,
{
}
+static inline u16 cec_get_edid_phys_addr(const u8 *edid, unsigned int size,
+ unsigned int *offset)
+{
+ if (offset)
+ *offset = 0;
+ return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline void cec_set_edid_phys_addr(u8 *edid, unsigned int size,
+ u16 phys_addr)
+{
+}
+
+static inline u16 cec_phys_addr_for_input(u16 phys_addr, u8 input)
+{
+ return CEC_PHYS_ADDR_INVALID;
+}
+
+static inline int cec_phys_addr_validate(u16 phys_addr, u16 *parent, u16 *port)
+{
+ return 0;
+}
+
#endif
#endif /* _MEDIA_CEC_H */
diff --git a/include/media/davinci/vpif_types.h b/include/media/davinci/vpif_types.h
index c49c306cba61..385597da20dc 100644
--- a/include/media/davinci/vpif_types.h
+++ b/include/media/davinci/vpif_types.h
@@ -53,6 +53,7 @@ struct vpif_display_config {
int (*set_clock)(int, int);
struct vpif_subdev_info *subdevinfo;
int subdev_count;
+ int i2c_adapter_id;
struct vpif_display_chan_config chan_config[VPIF_DISPLAY_MAX_CHANNELS];
const char *card_name;
struct v4l2_async_subdev **asd; /* Flat array, arranged in groups */
diff --git a/include/media/rc-map.h b/include/media/rc-map.h
index a704749280d2..1a815a572fa1 100644
--- a/include/media/rc-map.h
+++ b/include/media/rc-map.h
@@ -27,7 +27,8 @@
* @RC_TYPE_NECX: Extended NEC protocol
* @RC_TYPE_NEC32: NEC 32 bit protocol
* @RC_TYPE_SANYO: Sanyo protocol
- * @RC_TYPE_MCE_KBD: RC6-ish MCE keyboard/mouse
+ * @RC_TYPE_MCIR2_KBD: RC6-ish MCE keyboard
+ * @RC_TYPE_MCIR2_MSE: RC6-ish MCE mouse
* @RC_TYPE_RC6_0: Philips RC6-0-16 protocol
* @RC_TYPE_RC6_6A_20: Philips RC6-6A-20 protocol
* @RC_TYPE_RC6_6A_24: Philips RC6-6A-24 protocol
@@ -51,48 +52,51 @@ enum rc_type {
RC_TYPE_NECX = 10,
RC_TYPE_NEC32 = 11,
RC_TYPE_SANYO = 12,
- RC_TYPE_MCE_KBD = 13,
- RC_TYPE_RC6_0 = 14,
- RC_TYPE_RC6_6A_20 = 15,
- RC_TYPE_RC6_6A_24 = 16,
- RC_TYPE_RC6_6A_32 = 17,
- RC_TYPE_RC6_MCE = 18,
- RC_TYPE_SHARP = 19,
- RC_TYPE_XMP = 20,
- RC_TYPE_CEC = 21,
+ RC_TYPE_MCIR2_KBD = 13,
+ RC_TYPE_MCIR2_MSE = 14,
+ RC_TYPE_RC6_0 = 15,
+ RC_TYPE_RC6_6A_20 = 16,
+ RC_TYPE_RC6_6A_24 = 17,
+ RC_TYPE_RC6_6A_32 = 18,
+ RC_TYPE_RC6_MCE = 19,
+ RC_TYPE_SHARP = 20,
+ RC_TYPE_XMP = 21,
+ RC_TYPE_CEC = 22,
};
#define RC_BIT_NONE 0ULL
-#define RC_BIT_UNKNOWN (1ULL << RC_TYPE_UNKNOWN)
-#define RC_BIT_OTHER (1ULL << RC_TYPE_OTHER)
-#define RC_BIT_RC5 (1ULL << RC_TYPE_RC5)
-#define RC_BIT_RC5X_20 (1ULL << RC_TYPE_RC5X_20)
-#define RC_BIT_RC5_SZ (1ULL << RC_TYPE_RC5_SZ)
-#define RC_BIT_JVC (1ULL << RC_TYPE_JVC)
-#define RC_BIT_SONY12 (1ULL << RC_TYPE_SONY12)
-#define RC_BIT_SONY15 (1ULL << RC_TYPE_SONY15)
-#define RC_BIT_SONY20 (1ULL << RC_TYPE_SONY20)
-#define RC_BIT_NEC (1ULL << RC_TYPE_NEC)
-#define RC_BIT_NECX (1ULL << RC_TYPE_NECX)
-#define RC_BIT_NEC32 (1ULL << RC_TYPE_NEC32)
-#define RC_BIT_SANYO (1ULL << RC_TYPE_SANYO)
-#define RC_BIT_MCE_KBD (1ULL << RC_TYPE_MCE_KBD)
-#define RC_BIT_RC6_0 (1ULL << RC_TYPE_RC6_0)
-#define RC_BIT_RC6_6A_20 (1ULL << RC_TYPE_RC6_6A_20)
-#define RC_BIT_RC6_6A_24 (1ULL << RC_TYPE_RC6_6A_24)
-#define RC_BIT_RC6_6A_32 (1ULL << RC_TYPE_RC6_6A_32)
-#define RC_BIT_RC6_MCE (1ULL << RC_TYPE_RC6_MCE)
-#define RC_BIT_SHARP (1ULL << RC_TYPE_SHARP)
-#define RC_BIT_XMP (1ULL << RC_TYPE_XMP)
-#define RC_BIT_CEC (1ULL << RC_TYPE_CEC)
+#define RC_BIT_UNKNOWN BIT_ULL(RC_TYPE_UNKNOWN)
+#define RC_BIT_OTHER BIT_ULL(RC_TYPE_OTHER)
+#define RC_BIT_RC5 BIT_ULL(RC_TYPE_RC5)
+#define RC_BIT_RC5X_20 BIT_ULL(RC_TYPE_RC5X_20)
+#define RC_BIT_RC5_SZ BIT_ULL(RC_TYPE_RC5_SZ)
+#define RC_BIT_JVC BIT_ULL(RC_TYPE_JVC)
+#define RC_BIT_SONY12 BIT_ULL(RC_TYPE_SONY12)
+#define RC_BIT_SONY15 BIT_ULL(RC_TYPE_SONY15)
+#define RC_BIT_SONY20 BIT_ULL(RC_TYPE_SONY20)
+#define RC_BIT_NEC BIT_ULL(RC_TYPE_NEC)
+#define RC_BIT_NECX BIT_ULL(RC_TYPE_NECX)
+#define RC_BIT_NEC32 BIT_ULL(RC_TYPE_NEC32)
+#define RC_BIT_SANYO BIT_ULL(RC_TYPE_SANYO)
+#define RC_BIT_MCIR2_KBD BIT_ULL(RC_TYPE_MCIR2_KBD)
+#define RC_BIT_MCIR2_MSE BIT_ULL(RC_TYPE_MCIR2_MSE)
+#define RC_BIT_RC6_0 BIT_ULL(RC_TYPE_RC6_0)
+#define RC_BIT_RC6_6A_20 BIT_ULL(RC_TYPE_RC6_6A_20)
+#define RC_BIT_RC6_6A_24 BIT_ULL(RC_TYPE_RC6_6A_24)
+#define RC_BIT_RC6_6A_32 BIT_ULL(RC_TYPE_RC6_6A_32)
+#define RC_BIT_RC6_MCE BIT_ULL(RC_TYPE_RC6_MCE)
+#define RC_BIT_SHARP BIT_ULL(RC_TYPE_SHARP)
+#define RC_BIT_XMP BIT_ULL(RC_TYPE_XMP)
+#define RC_BIT_CEC BIT_ULL(RC_TYPE_CEC)
#define RC_BIT_ALL (RC_BIT_UNKNOWN | RC_BIT_OTHER | \
RC_BIT_RC5 | RC_BIT_RC5X_20 | RC_BIT_RC5_SZ | \
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
- RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_SANYO | \
+ RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
+ RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
RC_BIT_XMP | RC_BIT_CEC)
/* All rc protocols for which we have decoders */
@@ -101,8 +105,8 @@ enum rc_type {
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | RC_BIT_MCE_KBD | RC_BIT_RC6_0 | \
- RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
+ RC_BIT_SANYO | RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
+ RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | RC_BIT_SHARP | \
RC_BIT_XMP)
@@ -111,7 +115,7 @@ enum rc_type {
RC_BIT_JVC | \
RC_BIT_SONY12 | RC_BIT_SONY15 | RC_BIT_SONY20 | \
RC_BIT_NEC | RC_BIT_NECX | RC_BIT_NEC32 | \
- RC_BIT_SANYO | \
+ RC_BIT_SANYO | RC_BIT_MCIR2_KBD | RC_BIT_MCIR2_MSE | \
RC_BIT_RC6_0 | RC_BIT_RC6_6A_20 | RC_BIT_RC6_6A_24 | \
RC_BIT_RC6_6A_32 | RC_BIT_RC6_MCE | \
RC_BIT_SHARP)
@@ -255,7 +259,6 @@ struct rc_map *rc_map_get(const char *name);
#define RC_MAP_KWORLD_PC150U "rc-kworld-pc150u"
#define RC_MAP_KWORLD_PLUS_TV_ANALOG "rc-kworld-plus-tv-analog"
#define RC_MAP_LEADTEK_Y04G0051 "rc-leadtek-y04g0051"
-#define RC_MAP_LIRC "rc-lirc"
#define RC_MAP_LME2510 "rc-lme2510"
#define RC_MAP_MANLI "rc-manli"
#define RC_MAP_MEDION_X10 "rc-medion-x10"
diff --git a/include/media/soc_camera.h b/include/media/soc_camera.h
index 1a15c3e4efd3..4d8cb0796bc6 100644
--- a/include/media/soc_camera.h
+++ b/include/media/soc_camera.h
@@ -17,7 +17,6 @@
#include <linux/mutex.h>
#include <linux/pm.h>
#include <linux/videodev2.h>
-#include <media/videobuf-core.h>
#include <media/videobuf2-v4l2.h>
#include <media/v4l2-async.h>
#include <media/v4l2-ctrls.h>
@@ -55,10 +54,7 @@ struct soc_camera_device {
/* Asynchronous subdevice management */
struct soc_camera_async_client *sasc;
/* video buffer queue */
- union {
- struct videobuf_queue vb_vidq;
- struct vb2_queue vb2_vidq;
- };
+ struct vb2_queue vb2_vidq;
};
/* Host supports programmable stride */
@@ -114,11 +110,8 @@ struct soc_camera_host_ops {
int (*set_liveselection)(struct soc_camera_device *, struct v4l2_selection *);
int (*set_fmt)(struct soc_camera_device *, struct v4l2_format *);
int (*try_fmt)(struct soc_camera_device *, struct v4l2_format *);
- void (*init_videobuf)(struct videobuf_queue *,
- struct soc_camera_device *);
int (*init_videobuf2)(struct vb2_queue *,
struct soc_camera_device *);
- int (*reqbufs)(struct soc_camera_device *, struct v4l2_requestbuffers *);
int (*querycap)(struct soc_camera_host *, struct v4l2_capability *);
int (*set_bus_param)(struct soc_camera_device *);
int (*get_parm)(struct soc_camera_device *, struct v4l2_streamparm *);
@@ -396,11 +389,6 @@ static inline struct soc_camera_device *soc_camera_from_vb2q(const struct vb2_qu
return container_of(vq, struct soc_camera_device, vb2_vidq);
}
-static inline struct soc_camera_device *soc_camera_from_vbq(const struct videobuf_queue *vq)
-{
- return container_of(vq, struct soc_camera_device, vb_vidq);
-}
-
static inline u32 soc_camera_grp_id(const struct soc_camera_device *icd)
{
return (icd->iface << 8) | (icd->devnum + 1);
diff --git a/include/media/tveeprom.h b/include/media/tveeprom.h
index c56501ee0484..630bcf3d8885 100644
--- a/include/media/tveeprom.h
+++ b/include/media/tveeprom.h
@@ -94,13 +94,12 @@ struct tveeprom {
* of the eeprom previously filled at
* @eeprom_data field.
*
- * @c: I2C client struct
* @tvee: Struct to where the eeprom parsed data will be filled;
* @eeprom_data: Array with the contents of the eeprom_data. It should
* contain 256 bytes filled with the contents of the
* eeprom read from the Hauppauge device.
*/
-void tveeprom_hauppauge_analog(struct i2c_client *c, struct tveeprom *tvee,
+void tveeprom_hauppauge_analog(struct tveeprom *tvee,
unsigned char *eeprom_data);
/**
diff --git a/include/media/v4l2-ioctl.h b/include/media/v4l2-ioctl.h
index 6cd94e5ee113..bd5312118013 100644
--- a/include/media/v4l2-ioctl.h
+++ b/include/media/v4l2-ioctl.h
@@ -44,6 +44,9 @@ struct v4l2_fh;
* @vidioc_enum_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
* for Software Defined Radio output
+ * @vidioc_enum_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_ENUM_FMT <vidioc_enum_fmt>` ioctl logic
+ * for metadata capture
* @vidioc_g_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -74,6 +77,8 @@ struct v4l2_fh;
* @vidioc_g_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_g_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_G_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_s_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -104,6 +109,8 @@ struct v4l2_fh;
* @vidioc_s_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_s_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_S_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_try_fmt_vid_cap: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for video capture
* in single plane mode
@@ -136,6 +143,8 @@ struct v4l2_fh;
* @vidioc_try_fmt_sdr_out: pointer to the function that implements
* :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for Software Defined
* Radio output
+ * @vidioc_try_fmt_meta_cap: pointer to the function that implements
+ * :ref:`VIDIOC_TRY_FMT <vidioc_g_fmt>` ioctl logic for metadata capture
* @vidioc_reqbufs: pointer to the function that implements
* :ref:`VIDIOC_REQBUFS <vidioc_reqbufs>` ioctl
* @vidioc_querybuf: pointer to the function that implements
@@ -306,6 +315,8 @@ struct v4l2_ioctl_ops {
struct v4l2_fmtdesc *f);
int (*vidioc_enum_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_fmtdesc *f);
+ int (*vidioc_enum_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_fmtdesc *f);
/* VIDIOC_G_FMT handlers */
int (*vidioc_g_fmt_vid_cap)(struct file *file, void *fh,
@@ -332,6 +343,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_g_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_g_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_S_FMT handlers */
int (*vidioc_s_fmt_vid_cap)(struct file *file, void *fh,
@@ -358,6 +371,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_s_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_s_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* VIDIOC_TRY_FMT handlers */
int (*vidioc_try_fmt_vid_cap)(struct file *file, void *fh,
@@ -384,6 +399,8 @@ struct v4l2_ioctl_ops {
struct v4l2_format *f);
int (*vidioc_try_fmt_sdr_out)(struct file *file, void *fh,
struct v4l2_format *f);
+ int (*vidioc_try_fmt_meta_cap)(struct file *file, void *fh,
+ struct v4l2_format *f);
/* Buffer handlers */
int (*vidioc_reqbufs)(struct file *file, void *fh,
diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h
index ac5898a55fd9..cb97c224be73 100644
--- a/include/media/videobuf2-core.h
+++ b/include/media/videobuf2-core.h
@@ -305,16 +305,16 @@ struct vb2_buffer {
* buffer in \*num_planes, the size of each plane should be
* set in the sizes\[\] array and optional per-plane
* allocator specific device in the alloc_devs\[\] array.
- * When called from VIDIOC_REQBUFS,() \*num_planes == 0,
+ * When called from VIDIOC_REQBUFS(), \*num_planes == 0,
* the driver has to use the currently configured format to
* determine the plane sizes and \*num_buffers is the total
* number of buffers that are being allocated. When called
- * from VIDIOC_CREATE_BUFS,() \*num_planes != 0 and it
+ * from VIDIOC_CREATE_BUFS(), \*num_planes != 0 and it
* describes the requested number of planes and sizes\[\]
- * contains the requested plane sizes. If either
- * \*num_planes or the requested sizes are invalid callback
- * must return %-EINVAL. In this case \*num_buffers are
- * being allocated additionally to q->num_buffers.
+ * contains the requested plane sizes. In this case
+ * \*num_buffers are being allocated additionally to
+ * q->num_buffers. If either \*num_planes or the requested
+ * sizes are invalid callback must return %-EINVAL.
* @wait_prepare: release any locks taken while calling vb2 functions;
* it is called before an ioctl needs to wait for a new
* buffer to arrive; required to avoid a deadlock in
diff --git a/include/media/videobuf2-memops.h b/include/media/videobuf2-memops.h
index 36565c7acb54..a6ed091b79ce 100644
--- a/include/media/videobuf2-memops.h
+++ b/include/media/videobuf2-memops.h
@@ -16,6 +16,7 @@
#include <media/videobuf2-v4l2.h>
#include <linux/mm.h>
+#include <linux/refcount.h>
/**
* struct vb2_vmarea_handler - common vma refcount tracking handler
@@ -25,7 +26,7 @@
* @arg: argument for @put callback
*/
struct vb2_vmarea_handler {
- atomic_t *refcount;
+ refcount_t *refcount;
void (*put)(void *arg);
void *arg;
};
diff --git a/include/misc/charlcd.h b/include/misc/charlcd.h
new file mode 100644
index 000000000000..23f61850f363
--- /dev/null
+++ b/include/misc/charlcd.h
@@ -0,0 +1,42 @@
+/*
+ * Character LCD driver for Linux
+ *
+ * Copyright (C) 2000-2008, Willy Tarreau <w@1wt.eu>
+ * Copyright (C) 2016-2017 Glider bvba
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+struct charlcd {
+ const struct charlcd_ops *ops;
+ const unsigned char *char_conv; /* Optional */
+
+ int ifwidth; /* 4-bit or 8-bit (default) */
+ int height;
+ int width;
+ int bwidth; /* Default set by charlcd_alloc() */
+ int hwidth; /* Default set by charlcd_alloc() */
+
+ void *drvdata; /* Set by charlcd_alloc() */
+};
+
+struct charlcd_ops {
+ /* Required */
+ void (*write_cmd)(struct charlcd *lcd, int cmd);
+ void (*write_data)(struct charlcd *lcd, int data);
+
+ /* Optional */
+ void (*write_cmd_raw4)(struct charlcd *lcd, int cmd); /* 4-bit only */
+ void (*clear_fast)(struct charlcd *lcd);
+ void (*backlight)(struct charlcd *lcd, int on);
+};
+
+struct charlcd *charlcd_alloc(unsigned int drvdata_size);
+
+int charlcd_register(struct charlcd *lcd);
+int charlcd_unregister(struct charlcd *lcd);
+
+void charlcd_poke(struct charlcd *lcd);
diff --git a/include/net/6lowpan.h b/include/net/6lowpan.h
index 5ab4c9901ccc..a71378007e61 100644
--- a/include/net/6lowpan.h
+++ b/include/net/6lowpan.h
@@ -198,6 +198,21 @@ static inline void lowpan_iphc_uncompress_eui64_lladdr(struct in6_addr *ipaddr,
ipaddr->s6_addr[8] ^= 0x02;
}
+static inline void lowpan_iphc_uncompress_eui48_lladdr(struct in6_addr *ipaddr,
+ const void *lladdr)
+{
+ /* fe:80::XXXX:XXff:feXX:XXXX
+ * \_________________/
+ * hwaddr
+ */
+ ipaddr->s6_addr[0] = 0xFE;
+ ipaddr->s6_addr[1] = 0x80;
+ memcpy(&ipaddr->s6_addr[8], lladdr, 3);
+ ipaddr->s6_addr[11] = 0xFF;
+ ipaddr->s6_addr[12] = 0xFE;
+ memcpy(&ipaddr->s6_addr[13], lladdr + 3, 3);
+}
+
#ifdef DEBUG
/* print data in line */
static inline void raw_dump_inline(const char *caller, char *msg,
diff --git a/include/net/addrconf.h b/include/net/addrconf.h
index 17c6fd84e287..b43a4eec3cec 100644
--- a/include/net/addrconf.h
+++ b/include/net/addrconf.h
@@ -20,6 +20,8 @@
#define ADDRCONF_TIMER_FUZZ (HZ / 4)
#define ADDRCONF_TIMER_FUZZ_MAX (HZ)
+#define ADDRCONF_NOTIFY_PRIORITY 0
+
#include <linux/in.h>
#include <linux/in6.h>
@@ -103,12 +105,24 @@ int addrconf_prefix_rcv_add_addr(struct net *net, struct net_device *dev,
u32 addr_flags, bool sllao, bool tokenized,
__u32 valid_lft, u32 prefered_lft);
+static inline void addrconf_addr_eui48_base(u8 *eui, const char *const addr)
+{
+ memcpy(eui, addr, 3);
+ eui[3] = 0xFF;
+ eui[4] = 0xFE;
+ memcpy(eui + 5, addr + 3, 3);
+}
+
+static inline void addrconf_addr_eui48(u8 *eui, const char *const addr)
+{
+ addrconf_addr_eui48_base(eui, addr);
+ eui[0] ^= 2;
+}
+
static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
{
if (dev->addr_len != ETH_ALEN)
return -1;
- memcpy(eui, dev->dev_addr, 3);
- memcpy(eui + 5, dev->dev_addr + 3, 3);
/*
* The zSeries OSA network cards can be shared among various
@@ -123,14 +137,16 @@ static inline int addrconf_ifid_eui48(u8 *eui, struct net_device *dev)
* case. Hence the resulting interface identifier has local
* scope according to RFC2373.
*/
+
+ addrconf_addr_eui48_base(eui, dev->dev_addr);
+
if (dev->dev_id) {
eui[3] = (dev->dev_id >> 8) & 0xFF;
eui[4] = dev->dev_id & 0xFF;
} else {
- eui[3] = 0xFF;
- eui[4] = 0xFE;
eui[0] ^= 2;
}
+
return 0;
}
@@ -262,8 +278,8 @@ int register_inet6addr_notifier(struct notifier_block *nb);
int unregister_inet6addr_notifier(struct notifier_block *nb);
int inet6addr_notifier_call_chain(unsigned long val, void *v);
-void inet6_netconf_notify_devconf(struct net *net, int type, int ifindex,
- struct ipv6_devconf *devconf);
+void inet6_netconf_notify_devconf(struct net *net, int event, int type,
+ int ifindex, struct ipv6_devconf *devconf);
/**
* __in6_dev_get - get inet6_dev pointer from netdevice
diff --git a/include/net/af_rxrpc.h b/include/net/af_rxrpc.h
index 1061a472a3e3..b5f5187f488c 100644
--- a/include/net/af_rxrpc.h
+++ b/include/net/af_rxrpc.h
@@ -39,7 +39,7 @@ int rxrpc_kernel_send_data(struct socket *, struct rxrpc_call *,
struct msghdr *, size_t);
int rxrpc_kernel_recv_data(struct socket *, struct rxrpc_call *,
void *, size_t, size_t *, bool, u32 *);
-void rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
+bool rxrpc_kernel_abort_call(struct socket *, struct rxrpc_call *,
u32, int, const char *);
void rxrpc_kernel_end_call(struct socket *, struct rxrpc_call *);
void rxrpc_kernel_get_peer(struct socket *, struct rxrpc_call *,
diff --git a/include/net/af_vsock.h b/include/net/af_vsock.h
index f32ed9ac181a..f9fb566e75cf 100644
--- a/include/net/af_vsock.h
+++ b/include/net/af_vsock.h
@@ -188,4 +188,17 @@ struct sock *vsock_find_connected_socket(struct sockaddr_vm *src,
void vsock_remove_sock(struct vsock_sock *vsk);
void vsock_for_each_connected_socket(void (*fn)(struct sock *sk));
+/**** TAP ****/
+
+struct vsock_tap {
+ struct net_device *dev;
+ struct module *module;
+ struct list_head list;
+};
+
+int vsock_init_tap(void);
+int vsock_add_tap(struct vsock_tap *vt);
+int vsock_remove_tap(struct vsock_tap *vt);
+void vsock_deliver_tap(struct sk_buff *build_skb(void *opaque), void *opaque);
+
#endif /* __AF_VSOCK_H__ */
diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h
index 5ee3c689c863..0697fd413087 100644
--- a/include/net/bluetooth/l2cap.h
+++ b/include/net/bluetooth/l2cap.h
@@ -282,7 +282,7 @@ struct l2cap_conn_rsp {
#define L2CAP_CR_BAD_KEY_SIZE 0x0007
#define L2CAP_CR_ENCRYPTION 0x0008
#define L2CAP_CR_INVALID_SCID 0x0009
-#define L2CAP_CR_SCID_IN_USE 0x0010
+#define L2CAP_CR_SCID_IN_USE 0x000A
/* connect/create channel status */
#define L2CAP_CS_NO_INFO 0x0000
diff --git a/include/net/bluetooth/rfcomm.h b/include/net/bluetooth/rfcomm.h
index 4190af53a46a..da4acefe39c8 100644
--- a/include/net/bluetooth/rfcomm.h
+++ b/include/net/bluetooth/rfcomm.h
@@ -21,6 +21,8 @@
SOFTWARE IS DISCLAIMED.
*/
+#include <linux/refcount.h>
+
#ifndef __RFCOMM_H
#define __RFCOMM_H
@@ -174,7 +176,7 @@ struct rfcomm_dlc {
struct mutex lock;
unsigned long state;
unsigned long flags;
- atomic_t refcnt;
+ refcount_t refcnt;
u8 dlci;
u8 addr;
u8 priority;
@@ -247,12 +249,12 @@ struct rfcomm_dlc *rfcomm_dlc_exists(bdaddr_t *src, bdaddr_t *dst, u8 channel);
static inline void rfcomm_dlc_hold(struct rfcomm_dlc *d)
{
- atomic_inc(&d->refcnt);
+ refcount_inc(&d->refcnt);
}
static inline void rfcomm_dlc_put(struct rfcomm_dlc *d)
{
- if (atomic_dec_and_test(&d->refcnt))
+ if (refcount_dec_and_test(&d->refcnt))
rfcomm_dlc_free(d);
}
diff --git a/include/net/bonding.h b/include/net/bonding.h
index 3c857778a6ca..b00508d22e0a 100644
--- a/include/net/bonding.h
+++ b/include/net/bonding.h
@@ -153,7 +153,8 @@ struct slave {
unsigned long last_link_up;
unsigned long last_rx;
unsigned long target_last_arp_rx[BOND_MAX_ARP_TARGETS];
- s8 link; /* one of BOND_LINK_XXXX */
+ s8 link; /* one of BOND_LINK_XXXX */
+ s8 link_new_state; /* one of BOND_LINK_XXXX */
s8 new_link;
u8 backup:1, /* indicates backup slave. Value corresponds with
BOND_STATE_ACTIVE and BOND_STATE_BACKUP */
@@ -165,7 +166,7 @@ struct slave {
u32 link_failure_count;
u32 speed;
u16 queue_id;
- u8 perm_hwaddr[ETH_ALEN];
+ u8 perm_hwaddr[MAX_ADDR_LEN];
struct ad_slave_info *ad_info;
struct tlb_slave_info tlb_info;
#ifdef CONFIG_NET_POLL_CONTROLLER
@@ -401,6 +402,16 @@ static inline bool bond_slave_can_tx(struct slave *slave)
bond_is_active_slave(slave);
}
+static inline void bond_hw_addr_copy(u8 *dst, const u8 *src, unsigned int len)
+{
+ if (len == ETH_ALEN) {
+ ether_addr_copy(dst, src);
+ return;
+ }
+
+ memcpy(dst, src, len);
+}
+
#define BOND_PRI_RESELECT_ALWAYS 0
#define BOND_PRI_RESELECT_BETTER 1
#define BOND_PRI_RESELECT_FAILURE 2
@@ -504,13 +515,17 @@ static inline bool bond_is_slave_inactive(struct slave *slave)
return slave->inactive;
}
-static inline void bond_set_slave_link_state(struct slave *slave, int state,
- bool notify)
+static inline void bond_propose_link_state(struct slave *slave, int state)
{
- if (slave->link == state)
+ slave->link_new_state = state;
+}
+
+static inline void bond_commit_link_state(struct slave *slave, bool notify)
+{
+ if (slave->link == slave->link_new_state)
return;
- slave->link = state;
+ slave->link = slave->link_new_state;
if (notify) {
bond_queue_slave_event(slave);
bond_lower_state_changed(slave);
@@ -523,6 +538,13 @@ static inline void bond_set_slave_link_state(struct slave *slave, int state,
}
}
+static inline void bond_set_slave_link_state(struct slave *slave, int state,
+ bool notify)
+{
+ bond_propose_link_state(slave, state);
+ bond_commit_link_state(slave, notify);
+}
+
static inline void bond_slave_link_notify(struct bonding *bond)
{
struct list_head *iter;
@@ -592,6 +614,7 @@ struct bond_vlan_tag *bond_verify_device_path(struct net_device *start_dev,
int level);
int bond_update_slave_arr(struct bonding *bond, struct slave *skipslave);
void bond_slave_arr_work_rearm(struct bonding *bond, unsigned long delay);
+void bond_work_init_all(struct bonding *bond);
#ifdef CONFIG_PROC_FS
void bond_create_proc_entry(struct bonding *bond);
diff --git a/include/net/busy_poll.h b/include/net/busy_poll.h
index c0452de83086..8ffd434676b7 100644
--- a/include/net/busy_poll.h
+++ b/include/net/busy_poll.h
@@ -35,83 +35,101 @@ struct napi_struct;
extern unsigned int sysctl_net_busy_read __read_mostly;
extern unsigned int sysctl_net_busy_poll __read_mostly;
+/* 0 - Reserved to indicate value not set
+ * 1..NR_CPUS - Reserved for sender_cpu
+ * NR_CPUS+1..~0 - Region available for NAPI IDs
+ */
+#define MIN_NAPI_ID ((unsigned int)(NR_CPUS + 1))
+
static inline bool net_busy_loop_on(void)
{
return sysctl_net_busy_poll;
}
-static inline u64 busy_loop_us_clock(void)
+static inline bool sk_can_busy_loop(const struct sock *sk)
{
- return local_clock() >> 10;
+ return sk->sk_ll_usec && !signal_pending(current);
}
-static inline unsigned long sk_busy_loop_end_time(struct sock *sk)
-{
- return busy_loop_us_clock() + ACCESS_ONCE(sk->sk_ll_usec);
-}
+bool sk_busy_loop_end(void *p, unsigned long start_time);
-/* in poll/select we use the global sysctl_net_ll_poll value */
-static inline unsigned long busy_loop_end_time(void)
+void napi_busy_loop(unsigned int napi_id,
+ bool (*loop_end)(void *, unsigned long),
+ void *loop_end_arg);
+
+#else /* CONFIG_NET_RX_BUSY_POLL */
+static inline unsigned long net_busy_loop_on(void)
{
- return busy_loop_us_clock() + ACCESS_ONCE(sysctl_net_busy_poll);
+ return 0;
}
-static inline bool sk_can_busy_loop(const struct sock *sk)
+static inline bool sk_can_busy_loop(struct sock *sk)
{
- return sk->sk_ll_usec && sk->sk_napi_id && !signal_pending(current);
+ return false;
}
+#endif /* CONFIG_NET_RX_BUSY_POLL */
-static inline bool busy_loop_timeout(unsigned long end_time)
+static inline unsigned long busy_loop_current_time(void)
{
- unsigned long now = busy_loop_us_clock();
-
- return time_after(now, end_time);
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ return (unsigned long)(local_clock() >> 10);
+#else
+ return 0;
+#endif
}
-bool sk_busy_loop(struct sock *sk, int nonblock);
-
-/* used in the NIC receive handler to mark the skb */
-static inline void skb_mark_napi_id(struct sk_buff *skb,
- struct napi_struct *napi)
+/* in poll/select we use the global sysctl_net_ll_poll value */
+static inline bool busy_loop_timeout(unsigned long start_time)
{
- skb->napi_id = napi->napi_id;
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sysctl_net_busy_poll);
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
-#else /* CONFIG_NET_RX_BUSY_POLL */
-static inline unsigned long net_busy_loop_on(void)
-{
- return 0;
+ return time_after(now, end_time);
+ }
+#endif
+ return true;
}
-static inline unsigned long busy_loop_end_time(void)
+static inline bool sk_busy_loop_timeout(struct sock *sk,
+ unsigned long start_time)
{
- return 0;
-}
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned long bp_usec = READ_ONCE(sk->sk_ll_usec);
-static inline bool sk_can_busy_loop(struct sock *sk)
-{
- return false;
-}
+ if (bp_usec) {
+ unsigned long end_time = start_time + bp_usec;
+ unsigned long now = busy_loop_current_time();
-static inline void skb_mark_napi_id(struct sk_buff *skb,
- struct napi_struct *napi)
-{
+ return time_after(now, end_time);
+ }
+#endif
+ return true;
}
-static inline bool busy_loop_timeout(unsigned long end_time)
+static inline void sk_busy_loop(struct sock *sk, int nonblock)
{
- return true;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ unsigned int napi_id = READ_ONCE(sk->sk_napi_id);
+
+ if (napi_id >= MIN_NAPI_ID)
+ napi_busy_loop(napi_id, nonblock ? NULL : sk_busy_loop_end, sk);
+#endif
}
-static inline bool sk_busy_loop(struct sock *sk, int nonblock)
+/* used in the NIC receive handler to mark the skb */
+static inline void skb_mark_napi_id(struct sk_buff *skb,
+ struct napi_struct *napi)
{
- return false;
+#ifdef CONFIG_NET_RX_BUSY_POLL
+ skb->napi_id = napi->napi_id;
+#endif
}
-#endif /* CONFIG_NET_RX_BUSY_POLL */
-
/* used in the protocol hanlder to propagate the napi_id to the socket */
static inline void sk_mark_napi_id(struct sock *sk, const struct sk_buff *skb)
{
diff --git a/include/net/cfg80211.h b/include/net/cfg80211.h
index ead1aa6d003e..b083e6cbae8c 100644
--- a/include/net/cfg80211.h
+++ b/include/net/cfg80211.h
@@ -363,6 +363,8 @@ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
/**
* struct vif_params - describes virtual interface parameters
+ * @flags: monitor interface flags, unchanged if 0, otherwise
+ * %MONITOR_FLAG_CHANGED will be set
* @use_4addr: use 4-address frames
* @macaddr: address to use for this virtual interface.
* If this parameter is set to zero address the driver may
@@ -370,13 +372,17 @@ static inline void wiphy_read_of_freq_limits(struct wiphy *wiphy)
* This feature is only fully supported by drivers that enable the
* %NL80211_FEATURE_MAC_ON_CREATE flag. Others may support creating
** only p2p devices with specified MAC.
- * @vht_mumimo_groups: MU-MIMO groupID. used for monitoring only
- * packets belonging to that MU-MIMO groupID.
+ * @vht_mumimo_groups: MU-MIMO groupID, used for monitoring MU-MIMO packets
+ * belonging to that MU-MIMO groupID; %NULL if not changed
+ * @vht_mumimo_follow_addr: MU-MIMO follow address, used for monitoring
+ * MU-MIMO packets going to the specified station; %NULL if not changed
*/
struct vif_params {
+ u32 flags;
int use_4addr;
u8 macaddr[ETH_ALEN];
- u8 vht_mumimo_groups[VHT_MUMIMO_GROUPS_DATA_LEN];
+ const u8 *vht_mumimo_groups;
+ const u8 *vht_mumimo_follow_addr;
};
/**
@@ -1007,9 +1013,9 @@ enum rate_info_flags {
* @RATE_INFO_BW_160: 160 MHz bandwidth
*/
enum rate_info_bw {
+ RATE_INFO_BW_20 = 0,
RATE_INFO_BW_5,
RATE_INFO_BW_10,
- RATE_INFO_BW_20,
RATE_INFO_BW_40,
RATE_INFO_BW_80,
RATE_INFO_BW_160,
@@ -1211,6 +1217,7 @@ static inline int cfg80211_get_station(struct net_device *dev,
* Monitor interface configuration flags. Note that these must be the bits
* according to the nl80211 flags.
*
+ * @MONITOR_FLAG_CHANGED: set if the flags were changed
* @MONITOR_FLAG_FCSFAIL: pass frames with bad FCS
* @MONITOR_FLAG_PLCPFAIL: pass frames with bad PLCP
* @MONITOR_FLAG_CONTROL: pass control frames
@@ -1219,6 +1226,7 @@ static inline int cfg80211_get_station(struct net_device *dev,
* @MONITOR_FLAG_ACTIVE: active monitor, ACKs frames on its MAC address
*/
enum monitor_flags {
+ MONITOR_FLAG_CHANGED = 1<<__NL80211_MNTR_FLAG_INVALID,
MONITOR_FLAG_FCSFAIL = 1<<NL80211_MNTR_FLAG_FCSFAIL,
MONITOR_FLAG_PLCPFAIL = 1<<NL80211_MNTR_FLAG_PLCPFAIL,
MONITOR_FLAG_CONTROL = 1<<NL80211_MNTR_FLAG_CONTROL,
@@ -1605,11 +1613,15 @@ static inline void get_random_mask_addr(u8 *buf, const u8 *addr, const u8 *mask)
/**
* struct cfg80211_match_set - sets of attributes to match
*
- * @ssid: SSID to be matched; may be zero-length for no match (RSSI only)
+ * @ssid: SSID to be matched; may be zero-length in case of BSSID match
+ * or no match (RSSI only)
+ * @bssid: BSSID to be matched; may be all-zero BSSID in case of SSID match
+ * or no match (RSSI only)
* @rssi_thold: don't report scan results below this threshold (in s32 dBm)
*/
struct cfg80211_match_set {
struct cfg80211_ssid ssid;
+ u8 bssid[ETH_ALEN];
s32 rssi_thold;
};
@@ -1641,6 +1653,7 @@ struct cfg80211_bss_select_adjust {
/**
* struct cfg80211_sched_scan_request - scheduled scan request description
*
+ * @reqid: identifies this request.
* @ssids: SSIDs to scan for (passed in the probe_reqs in active scans)
* @n_ssids: number of SSIDs
* @n_channels: total number of channels to scan
@@ -1653,6 +1666,7 @@ struct cfg80211_bss_select_adjust {
* (others are filtered out).
* If ommited, all results are passed.
* @n_match_sets: number of match sets
+ * @report_results: indicates that results were reported for this request
* @wiphy: the wiphy this was for
* @dev: the interface
* @scan_start: start time of the scheduled scan
@@ -1669,6 +1683,8 @@ struct cfg80211_bss_select_adjust {
* @rcu_head: RCU callback used to free the struct
* @owner_nlportid: netlink portid of owner (if this should is a request
* owned by a particular socket)
+ * @nl_owner_dead: netlink owner socket was closed - this request be freed
+ * @list: for keeping list of requests.
* @delay: delay in seconds to use before starting the first scan
* cycle. The driver may ignore this parameter and start
* immediately (or at any other time), if this feature is not
@@ -1685,6 +1701,7 @@ struct cfg80211_bss_select_adjust {
* comparisions.
*/
struct cfg80211_sched_scan_request {
+ u64 reqid;
struct cfg80211_ssid *ssids;
int n_ssids;
u32 n_channels;
@@ -1710,8 +1727,11 @@ struct cfg80211_sched_scan_request {
struct wiphy *wiphy;
struct net_device *dev;
unsigned long scan_start;
+ bool report_results;
struct rcu_head rcu_head;
u32 owner_nlportid;
+ bool nl_owner_dead;
+ struct list_head list;
/* keep last */
struct ieee80211_channel *channels[0];
@@ -2073,6 +2093,19 @@ struct cfg80211_bss_selection {
* the BSSID of the current association, i.e., to the value that is
* included in the Current AP address field of the Reassociation Request
* frame.
+ * @fils_erp_username: EAP re-authentication protocol (ERP) username part of the
+ * NAI or %NULL if not specified. This is used to construct FILS wrapped
+ * data IE.
+ * @fils_erp_username_len: Length of @fils_erp_username in octets.
+ * @fils_erp_realm: EAP re-authentication protocol (ERP) realm part of NAI or
+ * %NULL if not specified. This specifies the domain name of ER server and
+ * is used to construct FILS wrapped data IE.
+ * @fils_erp_realm_len: Length of @fils_erp_realm in octets.
+ * @fils_erp_next_seq_num: The next sequence number to use in the FILS ERP
+ * messages. This is also used to construct FILS wrapped data IE.
+ * @fils_erp_rrk: ERP re-authentication Root Key (rRK) used to derive additional
+ * keys in FILS or %NULL if not specified.
+ * @fils_erp_rrk_len: Length of @fils_erp_rrk in octets.
*/
struct cfg80211_connect_params {
struct ieee80211_channel *channel;
@@ -2098,6 +2131,13 @@ struct cfg80211_connect_params {
bool pbss;
struct cfg80211_bss_selection bss_select;
const u8 *prev_bssid;
+ const u8 *fils_erp_username;
+ size_t fils_erp_username_len;
+ const u8 *fils_erp_realm;
+ size_t fils_erp_realm_len;
+ u16 fils_erp_next_seq_num;
+ const u8 *fils_erp_rrk;
+ size_t fils_erp_rrk_len;
};
/**
@@ -2136,12 +2176,27 @@ enum wiphy_params_flags {
* This structure is passed to the set/del_pmksa() method for PMKSA
* caching.
*
- * @bssid: The AP's BSSID.
- * @pmkid: The PMK material itself.
+ * @bssid: The AP's BSSID (may be %NULL).
+ * @pmkid: The identifier to refer a PMKSA.
+ * @pmk: The PMK for the PMKSA identified by @pmkid. This is used for key
+ * derivation by a FILS STA. Otherwise, %NULL.
+ * @pmk_len: Length of the @pmk. The length of @pmk can differ depending on
+ * the hash algorithm used to generate this.
+ * @ssid: SSID to specify the ESS within which a PMKSA is valid when using FILS
+ * cache identifier (may be %NULL).
+ * @ssid_len: Length of the @ssid in octets.
+ * @cache_id: 2-octet cache identifier advertized by a FILS AP identifying the
+ * scope of PMKSA. This is valid only if @ssid_len is non-zero (may be
+ * %NULL).
*/
struct cfg80211_pmksa {
const u8 *bssid;
const u8 *pmkid;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *ssid;
+ size_t ssid_len;
+ const u8 *cache_id;
};
/**
@@ -2633,8 +2688,7 @@ struct cfg80211_nan_func {
* indication of requesting reassociation.
* In both the driver-initiated and new connect() call initiated roaming
* cases, the result of roaming is indicated with a call to
- * cfg80211_roamed() or cfg80211_roamed_bss().
- * (invoked with the wireless_dev mutex held)
+ * cfg80211_roamed(). (invoked with the wireless_dev mutex held)
* @update_connect_params: Update the connect parameters while connected to a
* BSS. The updated parameters can be used by driver/firmware for
* subsequent BSS selection (roaming) decisions and to form the
@@ -2712,15 +2766,20 @@ struct cfg80211_nan_func {
* the current level is above/below the configured threshold; this may
* need some care when the configuration is changed (without first being
* disabled.)
+ * @set_cqm_rssi_range_config: Configure two RSSI thresholds in the
+ * connection quality monitor. An event is to be sent only when the
+ * signal level is found to be outside the two values. The driver should
+ * set %NL80211_EXT_FEATURE_CQM_RSSI_LIST if this method is implemented.
+ * If it is provided then there's no point providing @set_cqm_rssi_config.
* @set_cqm_txe_config: Configure connection quality monitor TX error
* thresholds.
* @sched_scan_start: Tell the driver to start a scheduled scan.
- * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan. This
- * call must stop the scheduled scan and be ready for starting a new one
- * before it returns, i.e. @sched_scan_start may be called immediately
- * after that again and should not fail in that case. The driver should
- * not call cfg80211_sched_scan_stopped() for a requested stop (when this
- * method returns 0.)
+ * @sched_scan_stop: Tell the driver to stop an ongoing scheduled scan with
+ * given request id. This call must stop the scheduled scan and be ready
+ * for starting a new one before it returns, i.e. @sched_scan_start may be
+ * called immediately after that again and should not fail in that case.
+ * The driver should not call cfg80211_sched_scan_stopped() for a requested
+ * stop (when this method returns 0).
*
* @mgmt_frame_register: Notify driver that a management frame type was
* registered. The callback is allowed to sleep.
@@ -2826,13 +2885,12 @@ struct cfg80211_ops {
const char *name,
unsigned char name_assign_type,
enum nl80211_iftype type,
- u32 *flags,
struct vif_params *params);
int (*del_virtual_intf)(struct wiphy *wiphy,
struct wireless_dev *wdev);
int (*change_virtual_intf)(struct wiphy *wiphy,
struct net_device *dev,
- enum nl80211_iftype type, u32 *flags,
+ enum nl80211_iftype type,
struct vif_params *params);
int (*add_key)(struct wiphy *wiphy, struct net_device *netdev,
@@ -3001,6 +3059,10 @@ struct cfg80211_ops {
struct net_device *dev,
s32 rssi_thold, u32 rssi_hyst);
+ int (*set_cqm_rssi_range_config)(struct wiphy *wiphy,
+ struct net_device *dev,
+ s32 rssi_low, s32 rssi_high);
+
int (*set_cqm_txe_config)(struct wiphy *wiphy,
struct net_device *dev,
u32 rate, u32 pkts, u32 intvl);
@@ -3015,7 +3077,8 @@ struct cfg80211_ops {
int (*sched_scan_start)(struct wiphy *wiphy,
struct net_device *dev,
struct cfg80211_sched_scan_request *request);
- int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev);
+ int (*sched_scan_stop)(struct wiphy *wiphy, struct net_device *dev,
+ u64 reqid);
int (*set_rekey_data)(struct wiphy *wiphy, struct net_device *dev,
struct cfg80211_gtk_rekey_data *data);
@@ -3160,7 +3223,7 @@ enum wiphy_flags {
WIPHY_FLAG_CONTROL_PORT_PROTOCOL = BIT(7),
WIPHY_FLAG_IBSS_RSN = BIT(8),
WIPHY_FLAG_MESH_AUTH = BIT(10),
- WIPHY_FLAG_SUPPORTS_SCHED_SCAN = BIT(11),
+ /* use hole at 11 */
/* use hole at 12 */
WIPHY_FLAG_SUPPORTS_FW_ROAM = BIT(13),
WIPHY_FLAG_AP_UAPSD = BIT(14),
@@ -3498,6 +3561,8 @@ struct wiphy_iftype_ext_capab {
* this variable determines its size
* @max_scan_ssids: maximum number of SSIDs the device can scan for in
* any given scan
+ * @max_sched_scan_reqs: maximum number of scheduled scan requests that
+ * the device can run concurrently.
* @max_sched_scan_ssids: maximum number of SSIDs the device can scan
* for in any given scheduled scan
* @max_match_sets: maximum number of match sets the device can handle
@@ -3634,6 +3699,7 @@ struct wiphy {
int bss_priv_size;
u8 max_scan_ssids;
+ u8 max_sched_scan_reqs;
u8 max_sched_scan_ssids;
u8 max_match_sets;
u16 max_scan_ie_len;
@@ -3871,6 +3937,7 @@ void wiphy_free(struct wiphy *wiphy);
struct cfg80211_conn;
struct cfg80211_internal_bss;
struct cfg80211_cached_keys;
+struct cfg80211_cqm_config;
/**
* struct wireless_dev - wireless device state
@@ -3934,6 +4001,8 @@ struct cfg80211_cached_keys;
* @event_list: (private) list for internal event processing
* @event_lock: (private) lock for event list
* @owner_nlportid: (private) owner socket port ID
+ * @nl_owner_dead: (private) owner socket went away
+ * @cqm_config: (private) nl80211 RSSI monitor state
*/
struct wireless_dev {
struct wiphy *wiphy;
@@ -3982,12 +4051,13 @@ struct wireless_dev {
u32 ap_unexpected_nlportid;
+ u32 owner_nlportid;
+ bool nl_owner_dead;
+
bool cac_started;
unsigned long cac_start_time;
unsigned int cac_time_ms;
- u32 owner_nlportid;
-
#ifdef CONFIG_CFG80211_WEXT
/* wext data */
struct {
@@ -4002,6 +4072,8 @@ struct wireless_dev {
bool prev_bssid_valid;
} wext;
#endif
+
+ struct cfg80211_cqm_config *cqm_config;
};
static inline u8 *wdev_address(struct wireless_dev *wdev)
@@ -4494,31 +4566,34 @@ void cfg80211_scan_done(struct cfg80211_scan_request *request,
* cfg80211_sched_scan_results - notify that new scan results are available
*
* @wiphy: the wiphy which got scheduled scan results
+ * @reqid: identifier for the related scheduled scan request
*/
-void cfg80211_sched_scan_results(struct wiphy *wiphy);
+void cfg80211_sched_scan_results(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_sched_scan_stopped - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
+ * @reqid: identifier for the related scheduled scan request
*
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
*/
-void cfg80211_sched_scan_stopped(struct wiphy *wiphy);
+void cfg80211_sched_scan_stopped(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_sched_scan_stopped_rtnl - notify that the scheduled scan has stopped
*
* @wiphy: the wiphy on which the scheduled scan stopped
+ * @reqid: identifier for the related scheduled scan request
*
* The driver can call this function to inform cfg80211 that the
* scheduled scan had to be stopped, for whatever reason. The driver
* is then called back via the sched_scan_stop operation when done.
* This function should be called with rtnl locked.
*/
-void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy);
+void cfg80211_sched_scan_stopped_rtnl(struct wiphy *wiphy, u64 reqid);
/**
* cfg80211_inform_bss_frame_data - inform cfg80211 of a received BSS frame
@@ -4651,12 +4726,22 @@ cfg80211_inform_bss(struct wiphy *wiphy,
gfp);
}
+/**
+ * cfg80211_get_bss - get a BSS reference
+ * @wiphy: the wiphy this BSS struct belongs to
+ * @channel: the channel to search on (or %NULL)
+ * @bssid: the desired BSSID (or %NULL)
+ * @ssid: the desired SSID (or %NULL)
+ * @ssid_len: length of the SSID (or 0)
+ * @bss_type: type of BSS, see &enum ieee80211_bss_type
+ * @privacy: privacy filter, see &enum ieee80211_privacy
+ */
struct cfg80211_bss *cfg80211_get_bss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
const u8 *bssid,
const u8 *ssid, size_t ssid_len,
enum ieee80211_bss_type bss_type,
- enum ieee80211_privacy);
+ enum ieee80211_privacy privacy);
static inline struct cfg80211_bss *
cfg80211_get_ibss(struct wiphy *wiphy,
struct ieee80211_channel *channel,
@@ -5123,6 +5208,78 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
#endif
/**
+ * struct cfg80211_connect_resp_params - Connection response params
+ * @status: Status code, %WLAN_STATUS_SUCCESS for successful connection, use
+ * %WLAN_STATUS_UNSPECIFIED_FAILURE if your device cannot give you
+ * the real status code for failures. If this call is used to report a
+ * failure due to a timeout (e.g., not receiving an Authentication frame
+ * from the AP) instead of an explicit rejection by the AP, -1 is used to
+ * indicate that this is a failure, but without a status code.
+ * @timeout_reason is used to report the reason for the timeout in that
+ * case.
+ * @bssid: The BSSID of the AP (may be %NULL)
+ * @bss: Entry of bss to which STA got connected to, can be obtained through
+ * cfg80211_get_bss() (may be %NULL). Only one parameter among @bssid and
+ * @bss needs to be specified.
+ * @req_ie: Association request IEs (may be %NULL)
+ * @req_ie_len: Association request IEs length
+ * @resp_ie: Association response IEs (may be %NULL)
+ * @resp_ie_len: Association response IEs length
+ * @fils_kek: KEK derived from a successful FILS connection (may be %NULL)
+ * @fils_kek_len: Length of @fils_kek in octets
+ * @update_erp_next_seq_num: Boolean value to specify whether the value in
+ * @fils_erp_next_seq_num is valid.
+ * @fils_erp_next_seq_num: The next sequence number to use in ERP message in
+ * FILS Authentication. This value should be specified irrespective of the
+ * status for a FILS connection.
+ * @pmk: A new PMK if derived from a successful FILS connection (may be %NULL).
+ * @pmk_len: Length of @pmk in octets
+ * @pmkid: A new PMKID if derived from a successful FILS connection or the PMKID
+ * used for this FILS connection (may be %NULL).
+ * @timeout_reason: Reason for connection timeout. This is used when the
+ * connection fails due to a timeout instead of an explicit rejection from
+ * the AP. %NL80211_TIMEOUT_UNSPECIFIED is used when the timeout reason is
+ * not known. This value is used only if @status < 0 to indicate that the
+ * failure is due to a timeout and not due to explicit rejection by the AP.
+ * This value is ignored in other cases (@status >= 0).
+ */
+struct cfg80211_connect_resp_params {
+ int status;
+ const u8 *bssid;
+ struct cfg80211_bss *bss;
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+ const u8 *fils_kek;
+ size_t fils_kek_len;
+ bool update_erp_next_seq_num;
+ u16 fils_erp_next_seq_num;
+ const u8 *pmk;
+ size_t pmk_len;
+ const u8 *pmkid;
+ enum nl80211_timeout_reason timeout_reason;
+};
+
+/**
+ * cfg80211_connect_done - notify cfg80211 of connection result
+ *
+ * @dev: network device
+ * @params: connection response parameters
+ * @gfp: allocation flags
+ *
+ * It should be called by the underlying driver once execution of the connection
+ * request from connect() has been completed. This is similar to
+ * cfg80211_connect_bss(), but takes a structure pointer for connection response
+ * parameters. Only one of the functions among cfg80211_connect_bss(),
+ * cfg80211_connect_result(), cfg80211_connect_timeout(),
+ * and cfg80211_connect_done() should be called.
+ */
+void cfg80211_connect_done(struct net_device *dev,
+ struct cfg80211_connect_resp_params *params,
+ gfp_t gfp);
+
+/**
* cfg80211_connect_bss - notify cfg80211 of connection result
*
* @dev: network device
@@ -5152,13 +5309,31 @@ static inline void cfg80211_testmode_event(struct sk_buff *skb, gfp_t gfp)
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_result(), but with the option of identifying the exact bss
- * entry for the connection. Only one of these functions should be called.
+ * entry for the connection. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
-void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
- struct cfg80211_bss *bss, const u8 *req_ie,
- size_t req_ie_len, const u8 *resp_ie,
- size_t resp_ie_len, int status, gfp_t gfp,
- enum nl80211_timeout_reason timeout_reason);
+static inline void
+cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
+ struct cfg80211_bss *bss, const u8 *req_ie,
+ size_t req_ie_len, const u8 *resp_ie,
+ size_t resp_ie_len, int status, gfp_t gfp,
+ enum nl80211_timeout_reason timeout_reason)
+{
+ struct cfg80211_connect_resp_params params;
+
+ memset(&params, 0, sizeof(params));
+ params.status = status;
+ params.bssid = bssid;
+ params.bss = bss;
+ params.req_ie = req_ie;
+ params.req_ie_len = req_ie_len;
+ params.resp_ie = resp_ie;
+ params.resp_ie_len = resp_ie_len;
+ params.timeout_reason = timeout_reason;
+
+ cfg80211_connect_done(dev, &params, gfp);
+}
/**
* cfg80211_connect_result - notify cfg80211 of connection result
@@ -5177,7 +5352,8 @@ void cfg80211_connect_bss(struct net_device *dev, const u8 *bssid,
* It should be called by the underlying driver once execution of the connection
* request from connect() has been completed. This is similar to
* cfg80211_connect_bss() which allows the exact bss entry to be specified. Only
- * one of these functions should be called.
+ * one of the functions among cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
@@ -5204,7 +5380,9 @@ cfg80211_connect_result(struct net_device *dev, const u8 *bssid,
* in a sequence where no explicit authentication/association rejection was
* received from the AP. This could happen, e.g., due to not being able to send
* out the Authentication or Association Request frame or timing out while
- * waiting for the response.
+ * waiting for the response. Only one of the functions among
+ * cfg80211_connect_bss(), cfg80211_connect_result(),
+ * cfg80211_connect_timeout(), and cfg80211_connect_done() should be called.
*/
static inline void
cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
@@ -5216,51 +5394,46 @@ cfg80211_connect_timeout(struct net_device *dev, const u8 *bssid,
}
/**
- * cfg80211_roamed - notify cfg80211 of roaming
+ * struct cfg80211_roam_info - driver initiated roaming information
*
- * @dev: network device
* @channel: the channel of the new AP
- * @bssid: the BSSID of the new AP
+ * @bss: entry of bss to which STA got roamed (may be %NULL if %bssid is set)
+ * @bssid: the BSSID of the new AP (may be %NULL if %bss is set)
* @req_ie: association request IEs (maybe be %NULL)
* @req_ie_len: association request IEs length
* @resp_ie: association response IEs (may be %NULL)
* @resp_ie_len: assoc response IEs length
- * @gfp: allocation flags
- *
- * It should be called by the underlying driver whenever it roamed
- * from one AP to another while connected.
*/
-void cfg80211_roamed(struct net_device *dev,
- struct ieee80211_channel *channel,
- const u8 *bssid,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+struct cfg80211_roam_info {
+ struct ieee80211_channel *channel;
+ struct cfg80211_bss *bss;
+ const u8 *bssid;
+ const u8 *req_ie;
+ size_t req_ie_len;
+ const u8 *resp_ie;
+ size_t resp_ie_len;
+};
/**
- * cfg80211_roamed_bss - notify cfg80211 of roaming
+ * cfg80211_roamed - notify cfg80211 of roaming
*
* @dev: network device
- * @bss: entry of bss to which STA got roamed
- * @req_ie: association request IEs (maybe be %NULL)
- * @req_ie_len: association request IEs length
- * @resp_ie: association response IEs (may be %NULL)
- * @resp_ie_len: assoc response IEs length
+ * @info: information about the new BSS. struct &cfg80211_roam_info.
* @gfp: allocation flags
*
- * This is just a wrapper to notify cfg80211 of roaming event with driver
- * passing bss to avoid a race in timeout of the bss entry. It should be
- * called by the underlying driver whenever it roamed from one AP to another
- * while connected. Drivers which have roaming implemented in firmware
- * may use this function to avoid a race in bss entry timeout where the bss
- * entry of the new AP is seen in the driver, but gets timed out by the time
- * it is accessed in __cfg80211_roamed() due to delay in scheduling
+ * This function may be called with the driver passing either the BSSID of the
+ * new AP or passing the bss entry to avoid a race in timeout of the bss entry.
+ * It should be called by the underlying driver whenever it roamed from one AP
+ * to another while connected. Drivers which have roaming implemented in
+ * firmware should pass the bss entry to avoid a race in bss entry timeout where
+ * the bss entry of the new AP is seen in the driver, but gets timed out by the
+ * time it is accessed in __cfg80211_roamed() due to delay in scheduling
* rdev->event_work. In case of any failures, the reference is released
- * either in cfg80211_roamed_bss() or in __cfg80211_romed(), Otherwise,
- * it will be released while diconneting from the current bss.
+ * either in cfg80211_roamed() or in __cfg80211_romed(), Otherwise, it will be
+ * released while diconneting from the current bss.
*/
-void cfg80211_roamed_bss(struct net_device *dev, struct cfg80211_bss *bss,
- const u8 *req_ie, size_t req_ie_len,
- const u8 *resp_ie, size_t resp_ie_len, gfp_t gfp);
+void cfg80211_roamed(struct net_device *dev, struct cfg80211_roam_info *info,
+ gfp_t gfp);
/**
* cfg80211_disconnected - notify cfg80211 that connection was dropped
diff --git a/include/net/devlink.h b/include/net/devlink.h
index d29e5fc82582..ed7687bbf5d0 100644
--- a/include/net/devlink.h
+++ b/include/net/devlink.h
@@ -25,6 +25,8 @@ struct devlink {
struct list_head list;
struct list_head port_list;
struct list_head sb_list;
+ struct list_head dpipe_table_list;
+ struct devlink_dpipe_headers *dpipe_headers;
const struct devlink_ops *ops;
struct device *dev;
possible_net_t _net;
@@ -49,6 +51,178 @@ struct devlink_sb_pool_info {
enum devlink_sb_threshold_type threshold_type;
};
+/**
+ * struct devlink_dpipe_field - dpipe field object
+ * @name: field name
+ * @id: index inside the headers field array
+ * @bitwidth: bitwidth
+ * @mapping_type: mapping type
+ */
+struct devlink_dpipe_field {
+ const char *name;
+ unsigned int id;
+ unsigned int bitwidth;
+ enum devlink_dpipe_field_mapping_type mapping_type;
+};
+
+/**
+ * struct devlink_dpipe_header - dpipe header object
+ * @name: header name
+ * @id: index, global/local detrmined by global bit
+ * @fields: fields
+ * @fields_count: number of fields
+ * @global: indicates if header is shared like most protocol header
+ * or driver specific
+ */
+struct devlink_dpipe_header {
+ const char *name;
+ unsigned int id;
+ struct devlink_dpipe_field *fields;
+ unsigned int fields_count;
+ bool global;
+};
+
+/**
+ * struct devlink_dpipe_match - represents match operation
+ * @type: type of match
+ * @header_index: header index (packets can have several headers of same
+ * type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_match {
+ enum devlink_dpipe_match_type type;
+ unsigned int header_index;
+ struct devlink_dpipe_header *header;
+ unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_action - represents action operation
+ * @type: type of action
+ * @header_index: header index (packets can have several headers of same
+ * type like in case of tunnels)
+ * @header: header
+ * @fieled_id: field index
+ */
+struct devlink_dpipe_action {
+ enum devlink_dpipe_action_type type;
+ unsigned int header_index;
+ struct devlink_dpipe_header *header;
+ unsigned int field_id;
+};
+
+/**
+ * struct devlink_dpipe_value - represents value of match/action
+ * @action: action
+ * @match: match
+ * @mapping_value: in case the field has some mapping this value
+ * specified the mapping value
+ * @mapping_valid: specify if mapping value is valid
+ * @value_size: value size
+ * @value: value
+ * @mask: bit mask
+ */
+struct devlink_dpipe_value {
+ union {
+ struct devlink_dpipe_action *action;
+ struct devlink_dpipe_match *match;
+ };
+ unsigned int mapping_value;
+ bool mapping_valid;
+ unsigned int value_size;
+ void *value;
+ void *mask;
+};
+
+/**
+ * struct devlink_dpipe_entry - table entry object
+ * @index: index of the entry in the table
+ * @match_values: match values
+ * @matche_values_count: count of matches tuples
+ * @action_values: actions values
+ * @action_values_count: count of actions values
+ * @counter: value of counter
+ * @counter_valid: Specify if value is valid from hardware
+ */
+struct devlink_dpipe_entry {
+ u64 index;
+ struct devlink_dpipe_value *match_values;
+ unsigned int match_values_count;
+ struct devlink_dpipe_value *action_values;
+ unsigned int action_values_count;
+ u64 counter;
+ bool counter_valid;
+};
+
+/**
+ * struct devlink_dpipe_dump_ctx - context provided to driver in order
+ * to dump
+ * @info: info
+ * @cmd: devlink command
+ * @skb: skb
+ * @nest: top attribute
+ * @hdr: hdr
+ */
+struct devlink_dpipe_dump_ctx {
+ struct genl_info *info;
+ enum devlink_command cmd;
+ struct sk_buff *skb;
+ struct nlattr *nest;
+ void *hdr;
+};
+
+struct devlink_dpipe_table_ops;
+
+/**
+ * struct devlink_dpipe_table - table object
+ * @priv: private
+ * @name: table name
+ * @size: maximum number of entries
+ * @counters_enabled: indicates if counters are active
+ * @counter_control_extern: indicates if counter control is in dpipe or
+ * external tool
+ * @table_ops: table operations
+ * @rcu: rcu
+ */
+struct devlink_dpipe_table {
+ void *priv;
+ struct list_head list;
+ const char *name;
+ u64 size;
+ bool counters_enabled;
+ bool counter_control_extern;
+ struct devlink_dpipe_table_ops *table_ops;
+ struct rcu_head rcu;
+};
+
+/**
+ * struct devlink_dpipe_table_ops - dpipe_table ops
+ * @actions_dump - dumps all tables actions
+ * @matches_dump - dumps all tables matches
+ * @entries_dump - dumps all active entries in the table
+ * @counters_set_update - when changing the counter status hardware sync
+ * maybe needed to allocate/free counter related
+ * resources
+ */
+struct devlink_dpipe_table_ops {
+ int (*actions_dump)(void *priv, struct sk_buff *skb);
+ int (*matches_dump)(void *priv, struct sk_buff *skb);
+ int (*entries_dump)(void *priv, bool counters_enabled,
+ struct devlink_dpipe_dump_ctx *dump_ctx);
+ int (*counters_set_update)(void *priv, bool enable);
+};
+
+/**
+ * struct devlink_dpipe_headers - dpipe headers
+ * @headers - header array can be shared (global bit) or driver specific
+ * @headers_count - count of headers
+ */
+struct devlink_dpipe_headers {
+ struct devlink_dpipe_header **headers;
+ unsigned int headers_count;
+};
+
struct devlink_ops {
int (*port_type_set)(struct devlink_port *devlink_port,
enum devlink_port_type port_type);
@@ -94,6 +268,8 @@ struct devlink_ops {
int (*eswitch_mode_set)(struct devlink *devlink, u16 mode);
int (*eswitch_inline_mode_get)(struct devlink *devlink, u8 *p_inline_mode);
int (*eswitch_inline_mode_set)(struct devlink *devlink, u8 inline_mode);
+ int (*eswitch_encap_mode_get)(struct devlink *devlink, u8 *p_encap_mode);
+ int (*eswitch_encap_mode_set)(struct devlink *devlink, u8 encap_mode);
};
static inline void *devlink_priv(struct devlink *devlink)
@@ -132,6 +308,26 @@ int devlink_sb_register(struct devlink *devlink, unsigned int sb_index,
u16 egress_pools_count, u16 ingress_tc_count,
u16 egress_tc_count);
void devlink_sb_unregister(struct devlink *devlink, unsigned int sb_index);
+int devlink_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, u64 size,
+ bool counter_control_extern);
+void devlink_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name);
+int devlink_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *dpipe_headers);
+void devlink_dpipe_headers_unregister(struct devlink *devlink);
+bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+ const char *table_name);
+int devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx);
+int devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+ struct devlink_dpipe_entry *entry);
+int devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx);
+int devlink_dpipe_action_put(struct sk_buff *skb,
+ struct devlink_dpipe_action *action);
+int devlink_dpipe_match_put(struct sk_buff *skb,
+ struct devlink_dpipe_match *match);
#else
@@ -200,6 +396,71 @@ static inline void devlink_sb_unregister(struct devlink *devlink,
{
}
+static inline int
+devlink_dpipe_table_register(struct devlink *devlink,
+ const char *table_name,
+ struct devlink_dpipe_table_ops *table_ops,
+ void *priv, u64 size,
+ bool counter_control_extern)
+{
+ return 0;
+}
+
+static inline void devlink_dpipe_table_unregister(struct devlink *devlink,
+ const char *table_name)
+{
+}
+
+static inline int devlink_dpipe_headers_register(struct devlink *devlink,
+ struct devlink_dpipe_headers *
+ dpipe_headers)
+{
+ return 0;
+}
+
+static inline void devlink_dpipe_headers_unregister(struct devlink *devlink)
+{
+}
+
+static inline bool devlink_dpipe_table_counter_enabled(struct devlink *devlink,
+ const char *table_name)
+{
+ return false;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_prepare(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ return 0;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_append(struct devlink_dpipe_dump_ctx *dump_ctx,
+ struct devlink_dpipe_entry *entry)
+{
+ return 0;
+}
+
+static inline int
+devlink_dpipe_entry_ctx_close(struct devlink_dpipe_dump_ctx *dump_ctx)
+{
+ return 0;
+}
+
+static inline int
+devlink_dpipe_action_put(struct sk_buff *skb,
+ struct devlink_dpipe_action *action)
+{
+ return 0;
+}
+
+static inline int
+devlink_dpipe_match_put(struct sk_buff *skb,
+ struct devlink_dpipe_match *match)
+{
+ return 0;
+}
+
#endif
#endif /* _NET_DEVLINK_H_ */
diff --git a/include/net/dsa.h b/include/net/dsa.h
index 4e13e695f025..8e24677b1c62 100644
--- a/include/net/dsa.h
+++ b/include/net/dsa.h
@@ -19,6 +19,7 @@
#include <linux/workqueue.h>
#include <linux/of.h>
#include <linux/ethtool.h>
+#include <net/devlink.h>
struct tc_action;
struct phy_device;
@@ -31,6 +32,8 @@ enum dsa_tag_protocol {
DSA_TAG_PROTO_EDSA,
DSA_TAG_PROTO_BRCM,
DSA_TAG_PROTO_QCA,
+ DSA_TAG_PROTO_MTK,
+ DSA_TAG_PROTO_LAN9303,
DSA_TAG_LAST, /* MUST BE LAST */
};
@@ -122,7 +125,7 @@ struct dsa_switch_tree {
* protocol to use.
*/
struct net_device *master_netdev;
- int (*rcv)(struct sk_buff *skb,
+ struct sk_buff * (*rcv)(struct sk_buff *skb,
struct net_device *dev,
struct packet_type *pt,
struct net_device *orig_dev);
@@ -182,6 +185,7 @@ struct dsa_port {
unsigned int ageing_time;
u8 stp_state;
struct net_device *bridge_dev;
+ struct devlink_port devlink_port;
};
struct dsa_switch {
@@ -233,6 +237,13 @@ struct dsa_switch {
u32 phys_mii_mask;
struct mii_bus *slave_mii_bus;
+ /* Ageing Time limits in msecs */
+ unsigned int ageing_time_min;
+ unsigned int ageing_time_max;
+
+ /* devlink used to represent this switch device */
+ struct devlink *devlink;
+
/* Dynamically allocated ports, keep last */
size_t num_ports;
struct dsa_port ports[];
@@ -248,6 +259,11 @@ static inline bool dsa_is_dsa_port(struct dsa_switch *ds, int p)
return !!((ds->dsa_port_mask) & (1 << p));
}
+static inline bool dsa_is_normal_port(struct dsa_switch *ds, int p)
+{
+ return !dsa_is_cpu_port(ds, p) && !dsa_is_dsa_port(ds, p);
+}
+
static inline bool dsa_is_port_initialized(struct dsa_switch *ds, int p)
{
return ds->enabled_port_mask & (1 << p) && ds->ports[p].netdev;
@@ -287,7 +303,7 @@ struct dsa_notifier_bridge_info {
struct dsa_switch_ops {
/*
- * Probing and setup.
+ * Legacy probing.
*/
const char *(*probe)(struct device *dsa_dev,
struct device *host_dev, int sw_addr,
@@ -442,6 +458,14 @@ struct dsa_switch_ops {
bool ingress);
void (*port_mirror_del)(struct dsa_switch *ds, int port,
struct dsa_mall_mirror_tc_entry *mirror);
+
+ /*
+ * Cross-chip operations
+ */
+ int (*crosschip_bridge_join)(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *br);
+ void (*crosschip_bridge_leave)(struct dsa_switch *ds, int sw_index,
+ int port, struct net_device *br);
};
struct dsa_switch_driver {
@@ -449,9 +473,11 @@ struct dsa_switch_driver {
const struct dsa_switch_ops *ops;
};
+/* Legacy driver registration */
void register_switch_driver(struct dsa_switch_driver *type);
void unregister_switch_driver(struct dsa_switch_driver *type);
struct mii_bus *dsa_host_dev_to_mii_bus(struct device *dev);
+
struct net_device *dsa_dev_to_net_device(struct device *dev);
static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
@@ -459,6 +485,15 @@ static inline bool dsa_uses_tagged_protocol(struct dsa_switch_tree *dst)
return dst->rcv != NULL;
}
+static inline bool netdev_uses_dsa(struct net_device *dev)
+{
+#if IS_ENABLED(CONFIG_NET_DSA)
+ if (dev->dsa_ptr != NULL)
+ return dsa_uses_tagged_protocol(dev->dsa_ptr);
+#endif
+ return false;
+}
+
struct dsa_switch *dsa_switch_alloc(struct device *dev, size_t n);
void dsa_unregister_switch(struct dsa_switch *ds);
int dsa_register_switch(struct dsa_switch *ds, struct device *dev);
diff --git a/include/net/esp.h b/include/net/esp.h
index a43be85aedc4..c41994d1bfef 100644
--- a/include/net/esp.h
+++ b/include/net/esp.h
@@ -10,4 +10,23 @@ static inline struct ip_esp_hdr *ip_esp_hdr(const struct sk_buff *skb)
return (struct ip_esp_hdr *)skb_transport_header(skb);
}
+struct esp_info {
+ struct ip_esp_hdr *esph;
+ __be64 seqno;
+ int tfclen;
+ int tailen;
+ int plen;
+ int clen;
+ int len;
+ int nfrags;
+ __u8 proto;
+ bool inplace;
+};
+
+int esp_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp_input_done2(struct sk_buff *skb, int err);
+int esp6_output_head(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp6_output_tail(struct xfrm_state *x, struct sk_buff *skb, struct esp_info *esp);
+int esp6_input_done2(struct sk_buff *skb, int err);
#endif
diff --git a/include/net/fib_rules.h b/include/net/fib_rules.h
index 8dbfdf728cd8..76c7300626d6 100644
--- a/include/net/fib_rules.h
+++ b/include/net/fib_rules.h
@@ -141,7 +141,10 @@ int fib_rules_lookup(struct fib_rules_ops *, struct flowi *, int flags,
struct fib_lookup_arg *);
int fib_default_rule_add(struct fib_rules_ops *, u32 pref, u32 table,
u32 flags);
+bool fib_rule_matchall(const struct fib_rule *rule);
-int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh);
-int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh);
+int fib_nl_newrule(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
+int fib_nl_delrule(struct sk_buff *skb, struct nlmsghdr *nlh,
+ struct netlink_ext_ack *extack);
#endif
diff --git a/include/net/flow.h b/include/net/flow.h
index 6984f1913dc1..bae198b3039e 100644
--- a/include/net/flow.h
+++ b/include/net/flow.h
@@ -202,7 +202,7 @@ static inline struct flowi *flowidn_to_flowi(struct flowidn *fldn)
typedef unsigned long flow_compare_t;
-static inline size_t flow_key_size(u16 family)
+static inline unsigned int flow_key_size(u16 family)
{
switch (family) {
case AF_INET:
diff --git a/include/net/flow_dissector.h b/include/net/flow_dissector.h
index ac9703018a3a..8d21d448daa9 100644
--- a/include/net/flow_dissector.h
+++ b/include/net/flow_dissector.h
@@ -41,6 +41,13 @@ struct flow_dissector_key_vlan {
u16 padding;
};
+struct flow_dissector_key_mpls {
+ u32 mpls_ttl:8,
+ mpls_bos:1,
+ mpls_tc:3,
+ mpls_label:20;
+};
+
struct flow_dissector_key_keyid {
__be32 keyid;
};
@@ -169,6 +176,7 @@ enum flow_dissector_key_id {
FLOW_DISSECTOR_KEY_ENC_IPV6_ADDRS, /* struct flow_dissector_key_ipv6_addrs */
FLOW_DISSECTOR_KEY_ENC_CONTROL, /* struct flow_dissector_key_control */
FLOW_DISSECTOR_KEY_ENC_PORTS, /* struct flow_dissector_key_ports */
+ FLOW_DISSECTOR_KEY_MPLS, /* struct flow_dissector_key_mpls */
FLOW_DISSECTOR_KEY_MAX,
};
diff --git a/include/net/flowcache.h b/include/net/flowcache.h
index 9caf3bfc8d2d..51eb971e8973 100644
--- a/include/net/flowcache.h
+++ b/include/net/flowcache.h
@@ -8,7 +8,7 @@
struct flow_cache_percpu {
struct hlist_head *hash_table;
- int hash_count;
+ unsigned int hash_count;
u32 hash_rnd;
int hash_rnd_recalc;
struct tasklet_struct flush_tasklet;
@@ -18,8 +18,8 @@ struct flow_cache {
u32 hash_shift;
struct flow_cache_percpu __percpu *percpu;
struct hlist_node node;
- int low_watermark;
- int high_watermark;
+ unsigned int low_watermark;
+ unsigned int high_watermark;
struct timer_list rnd_timer;
};
#endif /* _NET_FLOWCACHE_H */
diff --git a/include/net/genetlink.h b/include/net/genetlink.h
index a34275be3600..68b88192b00c 100644
--- a/include/net/genetlink.h
+++ b/include/net/genetlink.h
@@ -84,6 +84,7 @@ struct nlattr **genl_family_attrbuf(const struct genl_family *family);
* @attrs: netlink attributes
* @_net: network namespace
* @user_ptr: user pointers
+ * @extack: extended ACK report struct
*/
struct genl_info {
u32 snd_seq;
@@ -94,6 +95,7 @@ struct genl_info {
struct nlattr ** attrs;
possible_net_t _net;
void * user_ptr[2];
+ struct netlink_ext_ack *extack;
};
static inline struct net *genl_info_net(struct genl_info *info)
@@ -106,6 +108,16 @@ static inline void genl_info_net_set(struct genl_info *info, struct net *net)
write_pnet(&info->_net, net);
}
+#define GENL_SET_ERR_MSG(info, msg) NL_SET_ERR_MSG((info)->extack, msg)
+
+static inline int genl_err_attr(struct genl_info *info, int err,
+ struct nlattr *attr)
+{
+ info->extack->bad_attr = attr;
+
+ return err;
+}
+
/**
* struct genl_ops - generic netlink operations
* @cmd: command identifier
@@ -162,14 +174,16 @@ genlmsg_nlhdr(void *user_hdr, const struct genl_family *family)
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
- * */
+ * @extack: extended ACK report struct
+ */
static inline int genlmsg_parse(const struct nlmsghdr *nlh,
const struct genl_family *family,
struct nlattr *tb[], int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
return nlmsg_parse(nlh, family->hdrsize + GENL_HDRLEN, tb, maxtype,
- policy);
+ policy, extack);
}
/**
diff --git a/include/net/ip.h b/include/net/ip.h
index bf264a8db1ce..821cedcc8e73 100644
--- a/include/net/ip.h
+++ b/include/net/ip.h
@@ -33,6 +33,8 @@
#include <net/flow.h>
#include <net/flow_dissector.h>
+#define IPV4_MAX_PMTU 65535U /* RFC 2675, Section 5.1 */
+
struct sock;
struct inet_skb_parm {
diff --git a/include/net/ip6_route.h b/include/net/ip6_route.h
index 9dc2c182a263..f5e625f53367 100644
--- a/include/net/ip6_route.h
+++ b/include/net/ip6_route.h
@@ -84,6 +84,7 @@ struct dst_entry *ip6_route_lookup(struct net *net, struct flowi6 *fl6,
struct rt6_info *ip6_pol_route(struct net *net, struct fib6_table *table,
int ifindex, struct flowi6 *fl6, int flags);
+void ip6_route_init_special_entries(void);
int ip6_route_init(void);
void ip6_route_cleanup(void);
diff --git a/include/net/ip6_tunnel.h b/include/net/ip6_tunnel.h
index 1b1cf33cbfb0..08fbc7f7d8d7 100644
--- a/include/net/ip6_tunnel.h
+++ b/include/net/ip6_tunnel.h
@@ -33,6 +33,8 @@ struct __ip6_tnl_parm {
__be16 o_flags;
__be32 i_key;
__be32 o_key;
+
+ __u32 fwmark;
};
/* IPv6 tunnel */
diff --git a/include/net/ip_fib.h b/include/net/ip_fib.h
index 368bb4024b78..6692c5758b33 100644
--- a/include/net/ip_fib.h
+++ b/include/net/ip_fib.h
@@ -213,6 +213,11 @@ struct fib_entry_notifier_info {
u32 tb_id;
};
+struct fib_rule_notifier_info {
+ struct fib_notifier_info info; /* must be first */
+ struct fib_rule *rule;
+};
+
struct fib_nh_notifier_info {
struct fib_notifier_info info; /* must be first */
struct fib_nh *fib_nh;
@@ -232,9 +237,21 @@ enum fib_event_type {
int register_fib_notifier(struct notifier_block *nb,
void (*cb)(struct notifier_block *nb));
int unregister_fib_notifier(struct notifier_block *nb);
+int call_fib_notifier(struct notifier_block *nb, struct net *net,
+ enum fib_event_type event_type,
+ struct fib_notifier_info *info);
int call_fib_notifiers(struct net *net, enum fib_event_type event_type,
struct fib_notifier_info *info);
+void fib_notify(struct net *net, struct notifier_block *nb);
+#ifdef CONFIG_IP_MULTIPLE_TABLES
+void fib_rules_notify(struct net *net, struct notifier_block *nb);
+#else
+static inline void fib_rules_notify(struct net *net, struct notifier_block *nb)
+{
+}
+#endif
+
struct fib_table {
struct hlist_node tb_hlist;
u32 tb_id;
@@ -299,6 +316,11 @@ static inline int fib_lookup(struct net *net, const struct flowi4 *flp,
return err;
}
+static inline bool fib4_rule_default(const struct fib_rule *rule)
+{
+ return true;
+}
+
#else /* CONFIG_IP_MULTIPLE_TABLES */
int __net_init fib4_rules_init(struct net *net);
void __net_exit fib4_rules_exit(struct net *net);
@@ -343,6 +365,8 @@ out:
return err;
}
+bool fib4_rule_default(const struct fib_rule *rule);
+
#endif /* CONFIG_IP_MULTIPLE_TABLES */
/* Exported by fib_frontend.c */
@@ -371,17 +395,13 @@ int fib_sync_down_dev(struct net_device *dev, unsigned long event, bool force);
int fib_sync_down_addr(struct net_device *dev, __be32 local);
int fib_sync_up(struct net_device *dev, unsigned int nh_flags);
-extern u32 fib_multipath_secret __read_mostly;
-
-static inline int fib_multipath_hash(__be32 saddr, __be32 daddr)
-{
- return jhash_2words((__force u32)saddr, (__force u32)daddr,
- fib_multipath_secret) >> 1;
-}
-
+#ifdef CONFIG_IP_ROUTE_MULTIPATH
+int fib_multipath_hash(const struct fib_info *fi, const struct flowi4 *fl4,
+ const struct sk_buff *skb);
+#endif
void fib_select_multipath(struct fib_result *res, int hash);
void fib_select_path(struct net *net, struct fib_result *res,
- struct flowi4 *fl4, int mp_hash);
+ struct flowi4 *fl4, const struct sk_buff *skb);
/* Exported by fib_trie.c */
void fib_trie_init(void);
diff --git a/include/net/ip_tunnels.h b/include/net/ip_tunnels.h
index 95056796657c..520809912f03 100644
--- a/include/net/ip_tunnels.h
+++ b/include/net/ip_tunnels.h
@@ -132,6 +132,7 @@ struct ip_tunnel {
unsigned int prl_count; /* # of entries in PRL */
unsigned int ip_tnl_net_id;
struct gro_cells gro_cells;
+ __u32 fwmark;
bool collect_md;
bool ignore_df;
};
@@ -273,9 +274,9 @@ int ip_tunnel_rcv(struct ip_tunnel *tunnel, struct sk_buff *skb,
const struct tnl_ptk_info *tpi, struct metadata_dst *tun_dst,
bool log_ecn_error);
int ip_tunnel_changelink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p);
+ struct ip_tunnel_parm *p, __u32 fwmark);
int ip_tunnel_newlink(struct net_device *dev, struct nlattr *tb[],
- struct ip_tunnel_parm *p);
+ struct ip_tunnel_parm *p, __u32 fwmark);
void ip_tunnel_setup(struct net_device *dev, unsigned int net_id);
struct ip_tunnel_encap_ops {
diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h
index 7bdfa7d78363..4f4f786255ef 100644
--- a/include/net/ip_vs.h
+++ b/include/net/ip_vs.h
@@ -12,6 +12,8 @@
#include <linux/list.h> /* for struct list_head */
#include <linux/spinlock.h> /* for struct rwlock_t */
#include <linux/atomic.h> /* for struct atomic_t */
+#include <linux/refcount.h> /* for struct refcount_t */
+
#include <linux/compiler.h>
#include <linux/timer.h>
#include <linux/bug.h>
@@ -525,7 +527,7 @@ struct ip_vs_conn {
struct netns_ipvs *ipvs;
/* counter and timer */
- atomic_t refcnt; /* reference count */
+ refcount_t refcnt; /* reference count */
struct timer_list timer; /* Expiration timer */
volatile unsigned long timeout; /* timeout */
@@ -667,7 +669,7 @@ struct ip_vs_dest {
atomic_t conn_flags; /* flags to copy to conn */
atomic_t weight; /* server weight */
- atomic_t refcnt; /* reference counter */
+ refcount_t refcnt; /* reference counter */
struct ip_vs_stats stats; /* statistics */
unsigned long idle_start; /* start time, jiffies */
@@ -1211,14 +1213,14 @@ struct ip_vs_conn * ip_vs_conn_out_get_proto(struct netns_ipvs *ipvs, int af,
*/
static inline bool __ip_vs_conn_get(struct ip_vs_conn *cp)
{
- return atomic_inc_not_zero(&cp->refcnt);
+ return refcount_inc_not_zero(&cp->refcnt);
}
/* put back the conn without restarting its timer */
static inline void __ip_vs_conn_put(struct ip_vs_conn *cp)
{
smp_mb__before_atomic();
- atomic_dec(&cp->refcnt);
+ refcount_dec(&cp->refcnt);
}
void ip_vs_conn_put(struct ip_vs_conn *cp);
void ip_vs_conn_fill_cport(struct ip_vs_conn *cp, __be16 cport);
@@ -1347,8 +1349,6 @@ int ip_vs_protocol_init(void);
void ip_vs_protocol_cleanup(void);
void ip_vs_protocol_timeout_change(struct netns_ipvs *ipvs, int flags);
int *ip_vs_create_timeout_table(int *table, int size);
-int ip_vs_set_state_timeout(int *table, int num, const char *const *names,
- const char *name, int to);
void ip_vs_tcpudp_debug_packet(int af, struct ip_vs_protocol *pp,
const struct sk_buff *skb, int offset,
const char *msg);
@@ -1410,18 +1410,18 @@ void ip_vs_try_bind_dest(struct ip_vs_conn *cp);
static inline void ip_vs_dest_hold(struct ip_vs_dest *dest)
{
- atomic_inc(&dest->refcnt);
+ refcount_inc(&dest->refcnt);
}
static inline void ip_vs_dest_put(struct ip_vs_dest *dest)
{
smp_mb__before_atomic();
- atomic_dec(&dest->refcnt);
+ refcount_dec(&dest->refcnt);
}
static inline void ip_vs_dest_put_and_free(struct ip_vs_dest *dest)
{
- if (atomic_dec_and_test(&dest->refcnt))
+ if (refcount_dec_and_test(&dest->refcnt))
kfree(dest);
}
@@ -1553,13 +1553,9 @@ static inline void ip_vs_notrack(struct sk_buff *skb)
enum ip_conntrack_info ctinfo;
struct nf_conn *ct = nf_ct_get(skb, &ctinfo);
- if (!ct || !nf_ct_is_untracked(ct)) {
- struct nf_conn *untracked;
-
+ if (ct) {
nf_conntrack_put(&ct->ct_general);
- untracked = nf_ct_untracked_get();
- nf_conntrack_get(&untracked->ct_general);
- nf_ct_set(skb, untracked, IP_CT_NEW);
+ nf_ct_set(skb, NULL, IP_CT_UNTRACKED);
}
#endif
}
@@ -1618,7 +1614,7 @@ static inline bool ip_vs_conn_uses_conntrack(struct ip_vs_conn *cp,
if (!(cp->flags & IP_VS_CONN_F_NFCT))
return false;
ct = nf_ct_get(skb, &ctinfo);
- if (ct && !nf_ct_is_untracked(ct))
+ if (ct)
return true;
#endif
return false;
diff --git a/include/net/mac80211.h b/include/net/mac80211.h
index a3bab3c5ecfb..76ed24a201eb 100644
--- a/include/net/mac80211.h
+++ b/include/net/mac80211.h
@@ -5,7 +5,7 @@
* Copyright 2006-2007 Jiri Benc <jbenc@suse.cz>
* Copyright 2007-2010 Johannes Berg <johannes@sipsolutions.net>
* Copyright 2013-2014 Intel Mobile Communications GmbH
- * Copyright (C) 2015 - 2016 Intel Deutschland GmbH
+ * Copyright (C) 2015 - 2017 Intel Deutschland GmbH
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License version 2 as
@@ -299,6 +299,8 @@ struct ieee80211_vif_chanctx_switch {
* context had been assigned.
* @BSS_CHANGED_OCB: OCB join status changed
* @BSS_CHANGED_MU_GROUPS: VHT MU-MIMO group id or user position changed
+ * @BSS_CHANGED_KEEP_ALIVE: keep alive options (idle period or protected
+ * keep alive) changed.
*/
enum ieee80211_bss_change {
BSS_CHANGED_ASSOC = 1<<0,
@@ -325,6 +327,7 @@ enum ieee80211_bss_change {
BSS_CHANGED_BANDWIDTH = 1<<21,
BSS_CHANGED_OCB = 1<<22,
BSS_CHANGED_MU_GROUPS = 1<<23,
+ BSS_CHANGED_KEEP_ALIVE = 1<<24,
/* when adding here, make sure to change ieee80211_reconfig */
};
@@ -501,6 +504,10 @@ struct ieee80211_mu_group_data {
* implies disabled. As with the cfg80211 callback, a change here should
* cause an event to be sent indicating where the current value is in
* relation to the newly configured threshold.
+ * @cqm_rssi_low: Connection quality monitor RSSI lower threshold, a zero value
+ * implies disabled. This is an alternative mechanism to the single
+ * threshold event and can't be enabled simultaneously with it.
+ * @cqm_rssi_high: Connection quality monitor RSSI upper threshold.
* @cqm_rssi_hyst: Connection quality monitor RSSI hysteresis
* @arp_addr_list: List of IPv4 addresses for hardware ARP filtering. The
* may filter ARP queries targeted for other addresses than listed here.
@@ -529,6 +536,13 @@ struct ieee80211_mu_group_data {
* @allow_p2p_go_ps: indication for AP or P2P GO interface, whether it's allowed
* to use P2P PS mechanism or not. AP/P2P GO is not allowed to use P2P PS
* if it has associated clients without P2P PS support.
+ * @max_idle_period: the time period during which the station can refrain from
+ * transmitting frames to its associated AP without being disassociated.
+ * In units of 1000 TUs. Zero value indicates that the AP did not include
+ * a (valid) BSS Max Idle Period Element.
+ * @protected_keep_alive: if set, indicates that the station should send an RSN
+ * protected frame to the AP to reset the idle timer at the AP for the
+ * station.
*/
struct ieee80211_bss_conf {
const u8 *bssid;
@@ -553,6 +567,8 @@ struct ieee80211_bss_conf {
u16 ht_operation_mode;
s32 cqm_rssi_thold;
u32 cqm_rssi_hyst;
+ s32 cqm_rssi_low;
+ s32 cqm_rssi_high;
struct cfg80211_chan_def chandef;
struct ieee80211_mu_group_data mu_group;
__be32 arp_addr_list[IEEE80211_BSS_ARP_ADDR_LIST_LEN];
@@ -567,6 +583,8 @@ struct ieee80211_bss_conf {
enum nl80211_tx_power_setting txpower_type;
struct ieee80211_p2p_noa_attr p2p_noa_attr;
bool allow_p2p_go_ps;
+ u16 max_idle_period;
+ bool protected_keep_alive;
};
/**
@@ -943,6 +961,19 @@ struct ieee80211_tx_info {
};
/**
+ * struct ieee80211_tx_status - extended tx staus info for rate control
+ *
+ * @sta: Station that the packet was transmitted for
+ * @info: Basic tx status information
+ * @skb: Packet skb (can be NULL if not provided by the driver)
+ */
+struct ieee80211_tx_status {
+ struct ieee80211_sta *sta;
+ struct ieee80211_tx_info *info;
+ struct sk_buff *skb;
+};
+
+/**
* struct ieee80211_scan_ies - descriptors for different blocks of IEs
*
* This structure is used to point to different blocks of IEs in HW scan
@@ -1039,16 +1070,8 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* (including FCS) was received.
* @RX_FLAG_MACTIME_PLCP_START: The timestamp passed in the RX status (@mactime
* field) is valid and contains the time the SYNC preamble was received.
- * @RX_FLAG_SHORTPRE: Short preamble was used for this frame
- * @RX_FLAG_HT: HT MCS was used and rate_idx is MCS index
- * @RX_FLAG_VHT: VHT MCS was used and rate_index is MCS index
- * @RX_FLAG_40MHZ: HT40 (40 MHz) was used
- * @RX_FLAG_SHORT_GI: Short guard interval was used
* @RX_FLAG_NO_SIGNAL_VAL: The signal strength value is not present.
* Valid only for data frames (mainly A-MPDU)
- * @RX_FLAG_HT_GF: This frame was received in a HT-greenfield transmission, if
- * the driver fills this value it should add %IEEE80211_RADIOTAP_MCS_HAVE_FMT
- * to hw.radiotap_mcs_details to advertise that fact
* @RX_FLAG_AMPDU_DETAILS: A-MPDU details are known, in particular the reference
* number (@ampdu_reference) must be populated and be a distinct number for
* each A-MPDU
@@ -1061,7 +1084,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* is stored in the @ampdu_delimiter_crc field)
* @RX_FLAG_MIC_STRIPPED: The mic was stripped of this packet. Decryption was
* done by the hardware
- * @RX_FLAG_LDPC: LDPC was used
* @RX_FLAG_ONLY_MONITOR: Report frame only to monitor interfaces without
* processing it in any regular way.
* This is useful if drivers offload some frames but still want to report
@@ -1070,9 +1092,6 @@ ieee80211_tx_info_clear_status(struct ieee80211_tx_info *info)
* monitor interfaces.
* This is useful if drivers offload some frames but still want to report
* them for sniffing purposes.
- * @RX_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
- * @RX_FLAG_10MHZ: 10 MHz (half channel) was used
- * @RX_FLAG_5MHZ: 5 MHz (quarter channel) was used
* @RX_FLAG_AMSDU_MORE: Some drivers may prefer to report separate A-MSDU
* subframes instead of a one huge frame for performance reasons.
* All, but the last MSDU from an A-MSDU should have this flag set. E.g.
@@ -1100,50 +1119,52 @@ enum mac80211_rx_flags {
RX_FLAG_FAILED_FCS_CRC = BIT(5),
RX_FLAG_FAILED_PLCP_CRC = BIT(6),
RX_FLAG_MACTIME_START = BIT(7),
- RX_FLAG_SHORTPRE = BIT(8),
- RX_FLAG_HT = BIT(9),
- RX_FLAG_40MHZ = BIT(10),
- RX_FLAG_SHORT_GI = BIT(11),
- RX_FLAG_NO_SIGNAL_VAL = BIT(12),
- RX_FLAG_HT_GF = BIT(13),
- RX_FLAG_AMPDU_DETAILS = BIT(14),
- RX_FLAG_PN_VALIDATED = BIT(15),
- RX_FLAG_DUP_VALIDATED = BIT(16),
- RX_FLAG_AMPDU_LAST_KNOWN = BIT(17),
- RX_FLAG_AMPDU_IS_LAST = BIT(18),
- RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(19),
- RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(20),
- RX_FLAG_MACTIME_END = BIT(21),
- RX_FLAG_VHT = BIT(22),
- RX_FLAG_LDPC = BIT(23),
- RX_FLAG_ONLY_MONITOR = BIT(24),
- RX_FLAG_SKIP_MONITOR = BIT(25),
- RX_FLAG_STBC_MASK = BIT(26) | BIT(27),
- RX_FLAG_10MHZ = BIT(28),
- RX_FLAG_5MHZ = BIT(29),
- RX_FLAG_AMSDU_MORE = BIT(30),
- RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(31),
- RX_FLAG_MIC_STRIPPED = BIT_ULL(32),
- RX_FLAG_ALLOW_SAME_PN = BIT_ULL(33),
- RX_FLAG_ICV_STRIPPED = BIT_ULL(34),
+ RX_FLAG_NO_SIGNAL_VAL = BIT(8),
+ RX_FLAG_AMPDU_DETAILS = BIT(9),
+ RX_FLAG_PN_VALIDATED = BIT(10),
+ RX_FLAG_DUP_VALIDATED = BIT(11),
+ RX_FLAG_AMPDU_LAST_KNOWN = BIT(12),
+ RX_FLAG_AMPDU_IS_LAST = BIT(13),
+ RX_FLAG_AMPDU_DELIM_CRC_ERROR = BIT(14),
+ RX_FLAG_AMPDU_DELIM_CRC_KNOWN = BIT(15),
+ RX_FLAG_MACTIME_END = BIT(16),
+ RX_FLAG_ONLY_MONITOR = BIT(17),
+ RX_FLAG_SKIP_MONITOR = BIT(18),
+ RX_FLAG_AMSDU_MORE = BIT(19),
+ RX_FLAG_RADIOTAP_VENDOR_DATA = BIT(20),
+ RX_FLAG_MIC_STRIPPED = BIT(21),
+ RX_FLAG_ALLOW_SAME_PN = BIT(22),
+ RX_FLAG_ICV_STRIPPED = BIT(23),
};
-#define RX_FLAG_STBC_SHIFT 26
-
/**
- * enum mac80211_rx_vht_flags - receive VHT flags
+ * enum mac80211_rx_encoding_flags - MCS & bandwidth flags
*
- * These flags are used with the @vht_flag member of
- * &struct ieee80211_rx_status.
- * @RX_VHT_FLAG_80MHZ: 80 MHz was used
- * @RX_VHT_FLAG_160MHZ: 160 MHz was used
- * @RX_VHT_FLAG_BF: packet was beamformed
- */
+ * @RX_ENC_FLAG_SHORTPRE: Short preamble was used for this frame
+ * @RX_ENC_FLAG_SHORT_GI: Short guard interval was used
+ * @RX_ENC_FLAG_HT_GF: This frame was received in a HT-greenfield transmission,
+ * if the driver fills this value it should add
+ * %IEEE80211_RADIOTAP_MCS_HAVE_FMT
+ * to hw.radiotap_mcs_details to advertise that fact
+ * @RX_ENC_FLAG_LDPC: LDPC was used
+ * @RX_ENC_FLAG_STBC_MASK: STBC 2 bit bitmask. 1 - Nss=1, 2 - Nss=2, 3 - Nss=3
+ * @RX_ENC_FLAG_BF: packet was beamformed
+ */
+enum mac80211_rx_encoding_flags {
+ RX_ENC_FLAG_SHORTPRE = BIT(0),
+ RX_ENC_FLAG_SHORT_GI = BIT(2),
+ RX_ENC_FLAG_HT_GF = BIT(3),
+ RX_ENC_FLAG_STBC_MASK = BIT(4) | BIT(5),
+ RX_ENC_FLAG_LDPC = BIT(6),
+ RX_ENC_FLAG_BF = BIT(7),
+};
-enum mac80211_rx_vht_flags {
- RX_VHT_FLAG_80MHZ = BIT(0),
- RX_VHT_FLAG_160MHZ = BIT(1),
- RX_VHT_FLAG_BF = BIT(2),
+#define RX_ENC_FLAG_STBC_SHIFT 4
+
+enum mac80211_rx_encoding {
+ RX_ENC_LEGACY = 0,
+ RX_ENC_HT,
+ RX_ENC_VHT,
};
/**
@@ -1173,9 +1194,11 @@ enum mac80211_rx_vht_flags {
* @antenna: antenna used
* @rate_idx: index of data rate into band's supported rates or MCS index if
* HT or VHT is used (%RX_FLAG_HT/%RX_FLAG_VHT)
- * @vht_nss: number of streams (VHT only)
+ * @nss: number of streams (VHT and HE only)
* @flag: %RX_FLAG_\*
- * @vht_flag: %RX_VHT_FLAG_\*
+ * @encoding: &enum mac80211_rx_encoding
+ * @bw: &enum rate_info_bw
+ * @enc_flags: uses bits from &enum mac80211_rx_encoding_flags
* @rx_flags: internal RX flags for mac80211
* @ampdu_reference: A-MPDU reference number, must be a different value for
* each A-MPDU but the same for each subframe within one A-MPDU
@@ -1186,11 +1209,12 @@ struct ieee80211_rx_status {
u64 boottime_ns;
u32 device_timestamp;
u32 ampdu_reference;
- u64 flag;
+ u32 flag;
u16 freq;
- u8 vht_flag;
+ u8 enc_flags;
+ u8 encoding:2, bw:3;
u8 rate_idx;
- u8 vht_nss;
+ u8 nss;
u8 rx_flags;
u8 band;
u8 antenna;
@@ -4200,6 +4224,23 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
struct sk_buff *skb);
/**
+ * ieee80211_tx_status_ext - extended transmit status callback
+ *
+ * This function can be used as a replacement for ieee80211_tx_status
+ * in drivers that may want to provide extra information that does not
+ * fit into &struct ieee80211_tx_info.
+ *
+ * Calls to this function for a single hardware must be synchronized
+ * against each other. Calls to this function, ieee80211_tx_status_ni()
+ * and ieee80211_tx_status_irqsafe() may not be mixed for a single hardware.
+ *
+ * @hw: the hardware the frame was transmitted by
+ * @status: tx status information
+ */
+void ieee80211_tx_status_ext(struct ieee80211_hw *hw,
+ struct ieee80211_tx_status *status);
+
+/**
* ieee80211_tx_status_noskb - transmit status callback without skb
*
* This function can be used as a replacement for ieee80211_tx_status
@@ -4215,9 +4256,17 @@ void ieee80211_tx_status(struct ieee80211_hw *hw,
* (NULL for multicast packets)
* @info: tx status information
*/
-void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
- struct ieee80211_sta *sta,
- struct ieee80211_tx_info *info);
+static inline void ieee80211_tx_status_noskb(struct ieee80211_hw *hw,
+ struct ieee80211_sta *sta,
+ struct ieee80211_tx_info *info)
+{
+ struct ieee80211_tx_status status = {
+ .sta = sta,
+ .info = info,
+ };
+
+ ieee80211_tx_status_ext(hw, &status);
+}
/**
* ieee80211_tx_status_ni - transmit status callback (in process context)
@@ -5438,9 +5487,6 @@ void ieee80211_stop_rx_ba_session_offl(struct ieee80211_vif *vif,
* RTS threshold
* @short_preamble: whether mac80211 will request short-preamble transmission
* if the selected rate supports it
- * @max_rate_idx: user-requested maximum (legacy) rate
- * (deprecated; this will be removed once drivers get updated to use
- * rate_idx_mask)
* @rate_idx_mask: user-requested (legacy) rate mask
* @rate_idx_mcs_mask: user-requested MCS rate mask (NULL if not in use)
* @bss: whether this frame is sent out in AP or IBSS mode
@@ -5452,7 +5498,6 @@ struct ieee80211_tx_rate_control {
struct sk_buff *skb;
struct ieee80211_tx_rate reported_rate;
bool rts, short_preamble;
- u8 max_rate_idx;
u32 rate_idx_mask;
u8 *rate_idx_mcs_mask;
bool bss;
@@ -5474,10 +5519,9 @@ struct rate_control_ops {
void (*free_sta)(void *priv, struct ieee80211_sta *sta,
void *priv_sta);
- void (*tx_status_noskb)(void *priv,
- struct ieee80211_supported_band *sband,
- struct ieee80211_sta *sta, void *priv_sta,
- struct ieee80211_tx_info *info);
+ void (*tx_status_ext)(void *priv,
+ struct ieee80211_supported_band *sband,
+ void *priv_sta, struct ieee80211_tx_status *st);
void (*tx_status)(void *priv, struct ieee80211_supported_band *sband,
struct ieee80211_sta *sta, void *priv_sta,
struct sk_buff *skb);
diff --git a/include/net/mpls_iptunnel.h b/include/net/mpls_iptunnel.h
index 179253f9dcfd..9d22bf67ac86 100644
--- a/include/net/mpls_iptunnel.h
+++ b/include/net/mpls_iptunnel.h
@@ -14,11 +14,12 @@
#ifndef _NET_MPLS_IPTUNNEL_H
#define _NET_MPLS_IPTUNNEL_H 1
-#define MAX_NEW_LABELS 2
-
struct mpls_iptunnel_encap {
- u32 label[MAX_NEW_LABELS];
u8 labels;
+ u8 ttl_propagate;
+ u8 default_ttl;
+ u8 reserved1;
+ u32 label[0];
};
static inline struct mpls_iptunnel_encap *mpls_lwtunnel_encap(struct lwtunnel_state *lwtstate)
diff --git a/include/net/ndisc.h b/include/net/ndisc.h
index 8a0214654b6b..1036c902d2c9 100644
--- a/include/net/ndisc.h
+++ b/include/net/ndisc.h
@@ -439,8 +439,10 @@ void ndisc_update(const struct net_device *dev, struct neighbour *neigh,
* IGMP
*/
int igmp6_init(void);
+int igmp6_late_init(void);
void igmp6_cleanup(void);
+void igmp6_late_cleanup(void);
int igmp6_event_query(struct sk_buff *skb);
diff --git a/include/net/neighbour.h b/include/net/neighbour.h
index 5ebf69491160..e4dd3a214034 100644
--- a/include/net/neighbour.h
+++ b/include/net/neighbour.h
@@ -314,7 +314,8 @@ static inline struct neighbour *neigh_create(struct neigh_table *tbl,
}
void neigh_destroy(struct neighbour *neigh);
int __neigh_event_send(struct neighbour *neigh, struct sk_buff *skb);
-int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags);
+int neigh_update(struct neighbour *neigh, const u8 *lladdr, u8 new, u32 flags,
+ u32 nlmsg_pid);
void __neigh_set_probe_once(struct neighbour *neigh);
void neigh_changeaddr(struct neigh_table *tbl, struct net_device *dev);
int neigh_ifdown(struct neigh_table *tbl, struct net_device *dev);
@@ -449,7 +450,7 @@ static inline int neigh_hh_bridge(struct hh_cache *hh, struct sk_buff *skb)
static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb)
{
unsigned int seq;
- int hh_len;
+ unsigned int hh_len;
do {
seq = read_seqbegin(&hh->hh_lock);
@@ -458,7 +459,7 @@ static inline int neigh_hh_output(const struct hh_cache *hh, struct sk_buff *skb
/* this is inlined by gcc */
memcpy(skb->data - HH_DATA_MOD, hh->hh_data, HH_DATA_MOD);
} else {
- int hh_alen = HH_DATA_ALIGN(hh_len);
+ unsigned int hh_alen = HH_DATA_ALIGN(hh_len);
memcpy(skb->data - hh_alen, hh->hh_data, hh_alen);
}
diff --git a/include/net/net_namespace.h b/include/net/net_namespace.h
index af8fe8a909dc..fe80bb48ab1f 100644
--- a/include/net/net_namespace.h
+++ b/include/net/net_namespace.h
@@ -27,6 +27,7 @@
#include <net/netns/nftables.h>
#include <net/netns/xfrm.h>
#include <net/netns/mpls.h>
+#include <net/netns/can.h>
#include <linux/ns_common.h>
#include <linux/idr.h>
#include <linux/skbuff.h>
@@ -141,6 +142,9 @@ struct net {
#if IS_ENABLED(CONFIG_MPLS)
struct netns_mpls mpls;
#endif
+#if IS_ENABLED(CONFIG_CAN)
+ struct netns_can can;
+#endif
struct sock *diag_nlsk;
atomic_t fnhe_genid;
};
diff --git a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
index 6ff32815641b..919e4e8af327 100644
--- a/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
+++ b/include/net/netfilter/ipv4/nf_conntrack_ipv4.h
@@ -14,7 +14,6 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp4;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite4;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmp;
#ifdef CONFIG_NF_CT_PROTO_DCCP
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp4;
diff --git a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
index c59b82456f89..eaea968f8657 100644
--- a/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
+++ b/include/net/netfilter/ipv6/nf_conntrack_ipv6.h
@@ -5,7 +5,6 @@ extern struct nf_conntrack_l3proto nf_conntrack_l3proto_ipv6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_tcp6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udp6;
-extern struct nf_conntrack_l4proto nf_conntrack_l4proto_udplite6;
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_icmpv6;
#ifdef CONFIG_NF_CT_PROTO_DCCP
extern struct nf_conntrack_l4proto nf_conntrack_l4proto_dccp6;
diff --git a/include/net/netfilter/nf_conntrack.h b/include/net/netfilter/nf_conntrack.h
index 19605878da47..8ece3612d0cd 100644
--- a/include/net/netfilter/nf_conntrack.h
+++ b/include/net/netfilter/nf_conntrack.h
@@ -50,25 +50,6 @@ union nf_conntrack_expect_proto {
#define NF_CT_ASSERT(x)
#endif
-struct nf_conntrack_helper;
-
-/* Must be kept in sync with the classes defined by helpers */
-#define NF_CT_MAX_EXPECT_CLASSES 4
-
-/* nf_conn feature for connections that have a helper */
-struct nf_conn_help {
- /* Helper. if any */
- struct nf_conntrack_helper __rcu *helper;
-
- struct hlist_head expectations;
-
- /* Current number of expected connections */
- u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
-
- /* private helper information. */
- char data[];
-};
-
#include <net/netfilter/ipv4/nf_conntrack_ipv4.h>
#include <net/netfilter/ipv6/nf_conntrack_ipv6.h>
@@ -243,14 +224,6 @@ extern s32 (*nf_ct_nat_offset)(const struct nf_conn *ct,
enum ip_conntrack_dir dir,
u32 seq);
-/* Fake conntrack entry for untracked connections */
-DECLARE_PER_CPU_ALIGNED(struct nf_conn, nf_conntrack_untracked);
-static inline struct nf_conn *nf_ct_untracked_get(void)
-{
- return raw_cpu_ptr(&nf_conntrack_untracked);
-}
-void nf_ct_untracked_status_or(unsigned long bits);
-
/* Iterate over all conntracks: if iter returns true, it's deleted. */
void nf_ct_iterate_cleanup(struct net *net,
int (*iter)(struct nf_conn *i, void *data),
@@ -281,11 +254,6 @@ static inline int nf_ct_is_dying(const struct nf_conn *ct)
return test_bit(IPS_DYING_BIT, &ct->status);
}
-static inline int nf_ct_is_untracked(const struct nf_conn *ct)
-{
- return test_bit(IPS_UNTRACKED_BIT, &ct->status);
-}
-
/* Packet is received from loopback */
static inline bool nf_is_loopback_packet(const struct sk_buff *skb)
{
diff --git a/include/net/netfilter/nf_conntrack_core.h b/include/net/netfilter/nf_conntrack_core.h
index 84ec7ca5f195..81d7f8a30945 100644
--- a/include/net/netfilter/nf_conntrack_core.h
+++ b/include/net/netfilter/nf_conntrack_core.h
@@ -65,7 +65,7 @@ static inline int nf_conntrack_confirm(struct sk_buff *skb)
struct nf_conn *ct = (struct nf_conn *)skb_nfct(skb);
int ret = NF_ACCEPT;
- if (ct && !nf_ct_is_untracked(ct)) {
+ if (ct) {
if (!nf_ct_is_confirmed(ct))
ret = __nf_conntrack_confirm(skb);
if (likely(ret == NF_ACCEPT))
diff --git a/include/net/netfilter/nf_conntrack_ecache.h b/include/net/netfilter/nf_conntrack_ecache.h
index 12d967b58726..2a10c6570fcc 100644
--- a/include/net/netfilter/nf_conntrack_ecache.h
+++ b/include/net/netfilter/nf_conntrack_ecache.h
@@ -20,11 +20,11 @@ enum nf_ct_ecache_state {
struct nf_conntrack_ecache {
unsigned long cache; /* bitops want long */
- unsigned long missed; /* missed events */
+ u16 missed; /* missed events */
u16 ctmask; /* bitmask of ct events to be delivered */
u16 expmask; /* bitmask of expect events to be delivered */
+ enum nf_ct_ecache_state state:8;/* ecache state */
u32 portid; /* netlink portid of destroyer */
- enum nf_ct_ecache_state state; /* ecache state */
};
static inline struct nf_conntrack_ecache *
diff --git a/include/net/netfilter/nf_conntrack_expect.h b/include/net/netfilter/nf_conntrack_expect.h
index 5ed33ea4718e..2ba54feaccd8 100644
--- a/include/net/netfilter/nf_conntrack_expect.h
+++ b/include/net/netfilter/nf_conntrack_expect.h
@@ -5,6 +5,8 @@
#ifndef _NF_CONNTRACK_EXPECT_H
#define _NF_CONNTRACK_EXPECT_H
+#include <linux/refcount.h>
+
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_zones.h>
@@ -37,7 +39,7 @@ struct nf_conntrack_expect {
struct timer_list timeout;
/* Usage count. */
- atomic_t use;
+ refcount_t use;
/* Flags */
unsigned int flags;
@@ -71,6 +73,7 @@ struct nf_conntrack_expect_policy {
};
#define NF_CT_EXPECT_CLASS_DEFAULT 0
+#define NF_CT_EXPECT_MAX_CNT 255
int nf_conntrack_expect_pernet_init(struct net *net);
void nf_conntrack_expect_pernet_fini(struct net *net);
@@ -102,6 +105,7 @@ static inline void nf_ct_unlink_expect(struct nf_conntrack_expect *exp)
void nf_ct_remove_expectations(struct nf_conn *ct);
void nf_ct_unexpect_related(struct nf_conntrack_expect *exp);
+bool nf_ct_remove_expect(struct nf_conntrack_expect *exp);
/* Allocate space for an expectation: this is mandatory before calling
nf_ct_expect_related. You will have to call put afterwards. */
diff --git a/include/net/netfilter/nf_conntrack_extend.h b/include/net/netfilter/nf_conntrack_extend.h
index 1c3035dda31f..4944bc9153cf 100644
--- a/include/net/netfilter/nf_conntrack_extend.h
+++ b/include/net/netfilter/nf_conntrack_extend.h
@@ -43,8 +43,8 @@ enum nf_ct_ext_id {
/* Extensions: optional stuff which isn't permanently in struct. */
struct nf_ct_ext {
struct rcu_head rcu;
- u16 offset[NF_CT_EXT_NUM];
- u16 len;
+ u8 offset[NF_CT_EXT_NUM];
+ u8 len;
char data[0];
};
@@ -69,12 +69,7 @@ static inline void *__nf_ct_ext_find(const struct nf_conn *ct, u8 id)
((id##_TYPE *)__nf_ct_ext_find((ext), (id)))
/* Destroy all relationships */
-void __nf_ct_ext_destroy(struct nf_conn *ct);
-static inline void nf_ct_ext_destroy(struct nf_conn *ct)
-{
- if (ct->ext)
- __nf_ct_ext_destroy(ct);
-}
+void nf_ct_ext_destroy(struct nf_conn *ct);
/* Free operation. If you want to free a object referred from private area,
* please implement __nf_ct_ext_free() and call it.
@@ -86,15 +81,7 @@ static inline void nf_ct_ext_free(struct nf_conn *ct)
}
/* Add this type, returns pointer to data or NULL. */
-void *__nf_ct_ext_add_length(struct nf_conn *ct, enum nf_ct_ext_id id,
- size_t var_alloc_len, gfp_t gfp);
-
-#define nf_ct_ext_add(ct, id, gfp) \
- ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), 0, (gfp)))
-#define nf_ct_ext_add_length(ct, id, len, gfp) \
- ((id##_TYPE *)__nf_ct_ext_add_length((ct), (id), (len), (gfp)))
-
-#define NF_CT_EXT_F_PREALLOC 0x0001
+void *nf_ct_ext_add(struct nf_conn *ct, enum nf_ct_ext_id id, gfp_t gfp);
struct nf_ct_ext_type {
/* Destroys relationships (can be NULL). */
@@ -102,15 +89,11 @@ struct nf_ct_ext_type {
enum nf_ct_ext_id id;
- unsigned int flags;
-
/* Length and min alignment. */
u8 len;
u8 align;
- /* initial size of nf_ct_ext. */
- u8 alloc_size;
};
-int nf_ct_extend_register(struct nf_ct_ext_type *type);
-void nf_ct_extend_unregister(struct nf_ct_ext_type *type);
+int nf_ct_extend_register(const struct nf_ct_ext_type *type);
+void nf_ct_extend_unregister(const struct nf_ct_ext_type *type);
#endif /* _NF_CONNTRACK_EXTEND_H */
diff --git a/include/net/netfilter/nf_conntrack_helper.h b/include/net/netfilter/nf_conntrack_helper.h
index 1eaac1f4cd6a..e04fa7691e5d 100644
--- a/include/net/netfilter/nf_conntrack_helper.h
+++ b/include/net/netfilter/nf_conntrack_helper.h
@@ -29,9 +29,6 @@ struct nf_conntrack_helper {
struct module *me; /* pointer to self */
const struct nf_conntrack_expect_policy *expect_policy;
- /* length of internal data, ie. sizeof(struct nf_ct_*_master) */
- size_t data_len;
-
/* Tuple of things we will help (compared against server response) */
struct nf_conntrack_tuple tuple;
@@ -49,9 +46,33 @@ struct nf_conntrack_helper {
unsigned int expect_class_max;
unsigned int flags;
- unsigned int queue_num; /* For user-space helpers. */
+
+ /* For user-space helpers: */
+ unsigned int queue_num;
+ /* length of userspace private data stored in nf_conn_help->data */
+ u16 data_len;
};
+/* Must be kept in sync with the classes defined by helpers */
+#define NF_CT_MAX_EXPECT_CLASSES 4
+
+/* nf_conn feature for connections that have a helper */
+struct nf_conn_help {
+ /* Helper. if any */
+ struct nf_conntrack_helper __rcu *helper;
+
+ struct hlist_head expectations;
+
+ /* Current number of expected connections */
+ u8 expecting[NF_CT_MAX_EXPECT_CLASSES];
+
+ /* private helper information. */
+ char data[32] __aligned(8);
+};
+
+#define NF_CT_HELPER_BUILD_BUG_ON(structsize) \
+ BUILD_BUG_ON((structsize) > FIELD_SIZEOF(struct nf_conn_help, data))
+
struct nf_conntrack_helper *__nf_conntrack_helper_find(const char *name,
u16 l3num, u8 protonum);
@@ -62,7 +83,7 @@ void nf_ct_helper_init(struct nf_conntrack_helper *helper,
u16 l3num, u16 protonum, const char *name,
u16 default_port, u16 spec_port, u32 id,
const struct nf_conntrack_expect_policy *exp_pol,
- u32 expect_class_max, u32 data_len,
+ u32 expect_class_max,
int (*help)(struct sk_buff *skb, unsigned int protoff,
struct nf_conn *ct,
enum ip_conntrack_info ctinfo),
diff --git a/include/net/netfilter/nf_conntrack_l4proto.h b/include/net/netfilter/nf_conntrack_l4proto.h
index 85e993e278d5..7032e044bbe2 100644
--- a/include/net/netfilter/nf_conntrack_l4proto.h
+++ b/include/net/netfilter/nf_conntrack_l4proto.h
@@ -58,6 +58,9 @@ struct nf_conntrack_l4proto {
unsigned int dataoff,
u_int8_t pf, unsigned int hooknum);
+ /* called by gc worker if table is full */
+ bool (*can_early_drop)(const struct nf_conn *ct);
+
/* Print out the per-protocol part of the tuple. Return like seq_* */
void (*print_tuple)(struct seq_file *s,
const struct nf_conntrack_tuple *);
diff --git a/include/net/netfilter/nf_conntrack_synproxy.h b/include/net/netfilter/nf_conntrack_synproxy.h
index b0ca402c1f72..a2fcb5271726 100644
--- a/include/net/netfilter/nf_conntrack_synproxy.h
+++ b/include/net/netfilter/nf_conntrack_synproxy.h
@@ -52,6 +52,8 @@ struct synproxy_stats {
struct synproxy_net {
struct nf_conn *tmpl;
struct synproxy_stats __percpu *stats;
+ unsigned int hook_ref4;
+ unsigned int hook_ref6;
};
extern unsigned int synproxy_net_id;
diff --git a/include/net/netfilter/nf_conntrack_timeout.h b/include/net/netfilter/nf_conntrack_timeout.h
index 5cc5e9e6171a..d40b89355fdd 100644
--- a/include/net/netfilter/nf_conntrack_timeout.h
+++ b/include/net/netfilter/nf_conntrack_timeout.h
@@ -4,6 +4,7 @@
#include <net/net_namespace.h>
#include <linux/netfilter/nf_conntrack_common.h>
#include <linux/netfilter/nf_conntrack_tuple_common.h>
+#include <linux/refcount.h>
#include <net/netfilter/nf_conntrack.h>
#include <net/netfilter/nf_conntrack_extend.h>
@@ -12,7 +13,7 @@
struct ctnl_timeout {
struct list_head head;
struct rcu_head rcu_head;
- atomic_t refcnt;
+ refcount_t refcnt;
char name[CTNL_TIMEOUT_NAME_MAX];
__u16 l3num;
struct nf_conntrack_l4proto *l4proto;
diff --git a/include/net/netfilter/nf_nat.h b/include/net/netfilter/nf_nat.h
index c327a431a6f3..05c82a1a4267 100644
--- a/include/net/netfilter/nf_nat.h
+++ b/include/net/netfilter/nf_nat.h
@@ -67,7 +67,7 @@ static inline bool nf_nat_oif_changed(unsigned int hooknum,
{
#if IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV4) || \
IS_ENABLED(CONFIG_NF_NAT_MASQUERADE_IPV6)
- return nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
+ return nat && nat->masq_index && hooknum == NF_INET_POST_ROUTING &&
CTINFO2DIR(ctinfo) == IP_CT_DIR_ORIGINAL &&
nat->masq_index != out->ifindex;
#else
diff --git a/include/net/netfilter/nf_nat_helper.h b/include/net/netfilter/nf_nat_helper.h
index 01bcc6bfbcc9..fbfa5acf4f14 100644
--- a/include/net/netfilter/nf_nat_helper.h
+++ b/include/net/netfilter/nf_nat_helper.h
@@ -7,31 +7,31 @@
struct sk_buff;
/* These return true or false. */
-int __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned int match_offset,
- unsigned int match_len, const char *rep_buffer,
- unsigned int rep_len, bool adjust);
+bool __nf_nat_mangle_tcp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len, bool adjust);
-static inline int nf_nat_mangle_tcp_packet(struct sk_buff *skb,
- struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff,
- unsigned int match_offset,
- unsigned int match_len,
- const char *rep_buffer,
- unsigned int rep_len)
+static inline bool nf_nat_mangle_tcp_packet(struct sk_buff *skb,
+ struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff,
+ unsigned int match_offset,
+ unsigned int match_len,
+ const char *rep_buffer,
+ unsigned int rep_len)
{
return __nf_nat_mangle_tcp_packet(skb, ct, ctinfo, protoff,
match_offset, match_len,
rep_buffer, rep_len, true);
}
-int nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
- enum ip_conntrack_info ctinfo,
- unsigned int protoff, unsigned int match_offset,
- unsigned int match_len, const char *rep_buffer,
- unsigned int rep_len);
+bool nf_nat_mangle_udp_packet(struct sk_buff *skb, struct nf_conn *ct,
+ enum ip_conntrack_info ctinfo,
+ unsigned int protoff, unsigned int match_offset,
+ unsigned int match_len, const char *rep_buffer,
+ unsigned int rep_len);
/* Setup NAT on this expected conntrack so it follows master, but goes
* to port ct->master->saved_proto. */
diff --git a/include/net/netfilter/nf_queue.h b/include/net/netfilter/nf_queue.h
index 09948d10e38e..4454719ff849 100644
--- a/include/net/netfilter/nf_queue.h
+++ b/include/net/netfilter/nf_queue.h
@@ -24,8 +24,7 @@ struct nf_queue_entry {
struct nf_queue_handler {
int (*outfn)(struct nf_queue_entry *entry,
unsigned int queuenum);
- void (*nf_hook_drop)(struct net *net,
- const struct nf_hook_entry *hooks);
+ unsigned int (*nf_hook_drop)(struct net *net);
};
void nf_register_queue_handler(struct net *net, const struct nf_queue_handler *qh);
diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h
index 0136028652bd..028faec8fc27 100644
--- a/include/net/netfilter/nf_tables.h
+++ b/include/net/netfilter/nf_tables.h
@@ -413,10 +413,11 @@ static inline struct nft_set *nft_set_container_of(const void *priv)
return (void *)priv - offsetof(struct nft_set, data);
}
-struct nft_set *nf_tables_set_lookup(const struct nft_table *table,
- const struct nlattr *nla, u8 genmask);
-struct nft_set *nf_tables_set_lookup_byid(const struct net *net,
- const struct nlattr *nla, u8 genmask);
+struct nft_set *nft_set_lookup(const struct net *net,
+ const struct nft_table *table,
+ const struct nlattr *nla_set_name,
+ const struct nlattr *nla_set_id,
+ u8 genmask);
static inline unsigned long nft_set_gc_interval(const struct nft_set *set)
{
@@ -910,6 +911,11 @@ static inline struct nft_base_chain *nft_base_chain(const struct nft_chain *chai
return container_of(chain, struct nft_base_chain, chain);
}
+static inline bool nft_is_base_chain(const struct nft_chain *chain)
+{
+ return chain->flags & NFT_BASE_CHAIN;
+}
+
int __nft_release_basechain(struct nft_ctx *ctx);
unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv);
@@ -1044,7 +1050,8 @@ struct nft_object_type {
unsigned int maxattr;
struct module *owner;
const struct nla_policy *policy;
- int (*init)(const struct nlattr * const tb[],
+ int (*init)(const struct nft_ctx *ctx,
+ const struct nlattr *const tb[],
struct nft_object *obj);
void (*destroy)(struct nft_object *obj);
int (*dump)(struct sk_buff *skb,
diff --git a/include/net/netfilter/nft_fib.h b/include/net/netfilter/nft_fib.h
index 5ceb2205e4e3..381af9469e6a 100644
--- a/include/net/netfilter/nft_fib.h
+++ b/include/net/netfilter/nft_fib.h
@@ -32,6 +32,6 @@ void nft_fib6_eval_type(const struct nft_expr *expr, struct nft_regs *regs,
void nft_fib6_eval(const struct nft_expr *expr, struct nft_regs *regs,
const struct nft_pktinfo *pkt);
-void nft_fib_store_result(void *reg, enum nft_fib_result r,
+void nft_fib_store_result(void *reg, const struct nft_fib *priv,
const struct nft_pktinfo *pkt, int index);
#endif
diff --git a/include/net/netlink.h b/include/net/netlink.h
index b239fcd33d80..01709172b3d3 100644
--- a/include/net/netlink.h
+++ b/include/net/netlink.h
@@ -233,14 +233,17 @@ struct nl_info {
};
int netlink_rcv_skb(struct sk_buff *skb,
- int (*cb)(struct sk_buff *, struct nlmsghdr *));
+ int (*cb)(struct sk_buff *, struct nlmsghdr *,
+ struct netlink_ext_ack *));
int nlmsg_notify(struct sock *sk, struct sk_buff *skb, u32 portid,
unsigned int group, int report, gfp_t flags);
int nla_validate(const struct nlattr *head, int len, int maxtype,
- const struct nla_policy *policy);
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
int nla_parse(struct nlattr **tb, int maxtype, const struct nlattr *head,
- int len, const struct nla_policy *policy);
+ int len, const struct nla_policy *policy,
+ struct netlink_ext_ack *extack);
int nla_policy_len(const struct nla_policy *, int);
struct nlattr *nla_find(const struct nlattr *head, int len, int attrtype);
size_t nla_strlcpy(char *dst, const struct nlattr *nla, size_t dstsize);
@@ -374,18 +377,20 @@ nlmsg_next(const struct nlmsghdr *nlh, int *remaining)
* @tb: destination array with maxtype+1 elements
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @extack: extended ACK report struct
*
* See nla_parse()
*/
static inline int nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen,
struct nlattr *tb[], int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
return nla_parse(tb, maxtype, nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), policy);
+ nlmsg_attrlen(nlh, hdrlen), policy, extack);
}
/**
@@ -409,16 +414,19 @@ static inline struct nlattr *nlmsg_find_attr(const struct nlmsghdr *nlh,
* @hdrlen: length of familiy specific header
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @extack: extended ACK report struct
*/
static inline int nlmsg_validate(const struct nlmsghdr *nlh,
int hdrlen, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
if (nlh->nlmsg_len < nlmsg_msg_size(hdrlen))
return -EINVAL;
return nla_validate(nlmsg_attrdata(nlh, hdrlen),
- nlmsg_attrlen(nlh, hdrlen), maxtype, policy);
+ nlmsg_attrlen(nlh, hdrlen), maxtype, policy,
+ extack);
}
/**
@@ -739,14 +747,17 @@ nla_find_nested(const struct nlattr *nla, int attrtype)
* @maxtype: maximum attribute type to be expected
* @nla: attribute containing the nested attributes
* @policy: validation policy
+ * @extack: extended ACK report struct
*
* See nla_parse()
*/
static inline int nla_parse_nested(struct nlattr *tb[], int maxtype,
const struct nlattr *nla,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
- return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy);
+ return nla_parse(tb, maxtype, nla_data(nla), nla_len(nla), policy,
+ extack);
}
/**
@@ -1252,6 +1263,7 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
* @start: container attribute
* @maxtype: maximum attribute type to be expected
* @policy: validation policy
+ * @extack: extended ACK report struct
*
* Validates all attributes in the nested attribute stream against the
* specified policy. Attributes with a type exceeding maxtype will be
@@ -1260,9 +1272,11 @@ static inline void nla_nest_cancel(struct sk_buff *skb, struct nlattr *start)
* Returns 0 on success or a negative error code.
*/
static inline int nla_validate_nested(const struct nlattr *start, int maxtype,
- const struct nla_policy *policy)
+ const struct nla_policy *policy,
+ struct netlink_ext_ack *extack)
{
- return nla_validate(nla_data(start), nla_len(start), maxtype, policy);
+ return nla_validate(nla_data(start), nla_len(start), maxtype, policy,
+ extack);
}
/**
diff --git a/include/net/netns/can.h b/include/net/netns/can.h
new file mode 100644
index 000000000000..b106e6ae2e5b
--- /dev/null
+++ b/include/net/netns/can.h
@@ -0,0 +1,40 @@
+/*
+ * can in net namespaces
+ */
+
+#ifndef __NETNS_CAN_H__
+#define __NETNS_CAN_H__
+
+#include <linux/spinlock.h>
+
+struct dev_rcv_lists;
+struct s_stats;
+struct s_pstats;
+
+struct netns_can {
+#if IS_ENABLED(CONFIG_PROC_FS)
+ struct proc_dir_entry *proc_dir;
+ struct proc_dir_entry *pde_version;
+ struct proc_dir_entry *pde_stats;
+ struct proc_dir_entry *pde_reset_stats;
+ struct proc_dir_entry *pde_rcvlist_all;
+ struct proc_dir_entry *pde_rcvlist_fil;
+ struct proc_dir_entry *pde_rcvlist_inv;
+ struct proc_dir_entry *pde_rcvlist_sff;
+ struct proc_dir_entry *pde_rcvlist_eff;
+ struct proc_dir_entry *pde_rcvlist_err;
+ struct proc_dir_entry *bcmproc_dir;
+#endif
+
+ /* receive filters subscribed for 'all' CAN devices */
+ struct dev_rcv_lists *can_rx_alldev_list;
+ spinlock_t can_rcvlists_lock;
+ struct timer_list can_stattimer;/* timer for statistics update */
+ struct s_stats *can_stats; /* packet statistics */
+ struct s_pstats *can_pstats; /* receive list statistics */
+
+ /* CAN GW per-net gateway jobs */
+ struct hlist_head cgw_list;
+};
+
+#endif /* __NETNS_CAN_H__ */
diff --git a/include/net/netns/ipv4.h b/include/net/netns/ipv4.h
index 622d2da27135..cd686c4fb32d 100644
--- a/include/net/netns/ipv4.h
+++ b/include/net/netns/ipv4.h
@@ -33,7 +33,6 @@ struct inet_timewait_death_row {
atomic_t tw_count;
struct inet_hashinfo *hashinfo ____cacheline_aligned_in_smp;
- int sysctl_tw_recycle;
int sysctl_max_tw_buckets;
};
@@ -96,6 +95,8 @@ struct netns_ipv4 {
/* Shall we try to damage output packets if routing dev changes? */
int sysctl_ip_dynaddr;
int sysctl_ip_early_demux;
+ int sysctl_tcp_early_demux;
+ int sysctl_udp_early_demux;
int sysctl_fwmark_reflect;
int sysctl_tcp_fwmark_accept;
@@ -152,6 +153,7 @@ struct netns_ipv4 {
#endif
#ifdef CONFIG_IP_ROUTE_MULTIPATH
int sysctl_fib_multipath_use_neigh;
+ int sysctl_fib_multipath_hash_policy;
#endif
unsigned int fib_seq; /* protected by rtnl_mutex */
diff --git a/include/net/netns/mpls.h b/include/net/netns/mpls.h
index d29203651c01..6608b3693385 100644
--- a/include/net/netns/mpls.h
+++ b/include/net/netns/mpls.h
@@ -9,8 +9,11 @@ struct mpls_route;
struct ctl_table_header;
struct netns_mpls {
+ int ip_ttl_propagate;
+ int default_ttl;
size_t platform_labels;
struct mpls_route __rcu * __rcu *platform_label;
+
struct ctl_table_header *ctl;
};
diff --git a/include/net/nfc/nfc.h b/include/net/nfc/nfc.h
index 1a3de8b34ad2..bbdc73a3239d 100644
--- a/include/net/nfc/nfc.h
+++ b/include/net/nfc/nfc.h
@@ -27,6 +27,7 @@
#include <linux/device.h>
#include <linux/skbuff.h>
+#define nfc_dbg(dev, fmt, ...) dev_dbg((dev), "NFC: " fmt, ##__VA_ARGS__)
#define nfc_info(dev, fmt, ...) dev_info((dev), "NFC: " fmt, ##__VA_ARGS__)
#define nfc_err(dev, fmt, ...) dev_err((dev), "NFC: " fmt, ##__VA_ARGS__)
diff --git a/include/net/pkt_sched.h b/include/net/pkt_sched.h
index f1b76b8e6d2d..bec46f63f10c 100644
--- a/include/net/pkt_sched.h
+++ b/include/net/pkt_sched.h
@@ -92,7 +92,7 @@ int unregister_qdisc(struct Qdisc_ops *qops);
void qdisc_get_default(char *id, size_t len);
int qdisc_set_default(const char *id);
-void qdisc_hash_add(struct Qdisc *q);
+void qdisc_hash_add(struct Qdisc *q, bool invisible);
void qdisc_hash_del(struct Qdisc *q);
struct Qdisc *qdisc_lookup(struct net_device *dev, u32 handle);
struct Qdisc *qdisc_lookup_class(struct net_device *dev, u32 handle);
diff --git a/include/net/protocol.h b/include/net/protocol.h
index bf36ca34af7a..65ba335b0e7e 100644
--- a/include/net/protocol.h
+++ b/include/net/protocol.h
@@ -40,6 +40,7 @@
/* This is used to register protocols. */
struct net_protocol {
void (*early_demux)(struct sk_buff *skb);
+ void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb, u32 info);
unsigned int no_policy:1,
@@ -54,7 +55,7 @@ struct net_protocol {
#if IS_ENABLED(CONFIG_IPV6)
struct inet6_protocol {
void (*early_demux)(struct sk_buff *skb);
-
+ void (*early_demux_handler)(struct sk_buff *skb);
int (*handler)(struct sk_buff *skb);
void (*err_handler)(struct sk_buff *skb,
@@ -92,12 +93,12 @@ struct inet_protosw {
#define INET_PROTOSW_PERMANENT 0x02 /* Permanent protocols are unremovable. */
#define INET_PROTOSW_ICSK 0x04 /* Is this an inet_connection_sock? */
-extern const struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
+extern struct net_protocol __rcu *inet_protos[MAX_INET_PROTOS];
extern const struct net_offload __rcu *inet_offloads[MAX_INET_PROTOS];
extern const struct net_offload __rcu *inet6_offloads[MAX_INET_PROTOS];
#if IS_ENABLED(CONFIG_IPV6)
-extern const struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
+extern struct inet6_protocol __rcu *inet6_protos[MAX_INET_PROTOS];
#endif
int inet_add_protocol(const struct net_protocol *prot, unsigned char num);
diff --git a/include/net/route.h b/include/net/route.h
index c0874c87c173..2cc0e14c6359 100644
--- a/include/net/route.h
+++ b/include/net/route.h
@@ -113,13 +113,13 @@ struct in_device;
int ip_rt_init(void);
void rt_cache_flush(struct net *net);
void rt_flush_dev(struct net_device *dev);
-struct rtable *__ip_route_output_key_hash(struct net *, struct flowi4 *flp,
- int mp_hash);
+struct rtable *__ip_route_output_key_hash(struct net *net, struct flowi4 *flp,
+ const struct sk_buff *skb);
static inline struct rtable *__ip_route_output_key(struct net *net,
struct flowi4 *flp)
{
- return __ip_route_output_key_hash(net, flp, -1);
+ return __ip_route_output_key_hash(net, flp, NULL);
}
struct rtable *ip_route_output_flow(struct net *, struct flowi4 *flp,
diff --git a/include/net/rtnetlink.h b/include/net/rtnetlink.h
index 106de5f7bf06..78fa5fe32947 100644
--- a/include/net/rtnetlink.h
+++ b/include/net/rtnetlink.h
@@ -4,7 +4,8 @@
#include <linux/rtnetlink.h>
#include <net/netlink.h>
-typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *);
+typedef int (*rtnl_doit_func)(struct sk_buff *, struct nlmsghdr *,
+ struct netlink_ext_ack *);
typedef int (*rtnl_dumpit_func)(struct sk_buff *, struct netlink_callback *);
typedef u16 (*rtnl_calcit_func)(struct sk_buff *, struct nlmsghdr *);
@@ -158,7 +159,8 @@ struct net_device *rtnl_create_link(struct net *net, const char *ifname,
int rtnl_delete_link(struct net_device *dev);
int rtnl_configure_link(struct net_device *dev, const struct ifinfomsg *ifm);
-int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len);
+int rtnl_nla_parse_ifla(struct nlattr **tb, const struct nlattr *head, int len,
+ struct netlink_ext_ack *exterr);
#define MODULE_ALIAS_RTNL_LINK(kind) MODULE_ALIAS("rtnl-link-" kind)
diff --git a/include/net/sch_generic.h b/include/net/sch_generic.h
index aeec4086afb2..22e52093bfda 100644
--- a/include/net/sch_generic.h
+++ b/include/net/sch_generic.h
@@ -66,6 +66,7 @@ struct Qdisc {
#define TCQ_F_NOPARENT 0x40 /* root of its hierarchy :
* qdisc_tree_decrease_qlen() should stop.
*/
+#define TCQ_F_INVISIBLE 0x80 /* invisible by default in dump */
u32 limit;
const struct Qdisc_ops *ops;
struct qdisc_size_table __rcu *stab;
@@ -203,14 +204,14 @@ struct tcf_proto_ops {
const struct tcf_proto *,
struct tcf_result *);
int (*init)(struct tcf_proto*);
- bool (*destroy)(struct tcf_proto*, bool);
+ void (*destroy)(struct tcf_proto*);
unsigned long (*get)(struct tcf_proto*, u32 handle);
int (*change)(struct net *net, struct sk_buff *,
struct tcf_proto*, unsigned long,
u32 handle, struct nlattr **,
unsigned long *, bool);
- int (*delete)(struct tcf_proto*, unsigned long);
+ int (*delete)(struct tcf_proto*, unsigned long, bool*);
void (*walk)(struct tcf_proto*, struct tcf_walker *arg);
/* rtnetlink specific */
diff --git a/include/net/sctp/sm.h b/include/net/sctp/sm.h
index b6f682ec184a..47113f2c4b0a 100644
--- a/include/net/sctp/sm.h
+++ b/include/net/sctp/sm.h
@@ -293,6 +293,22 @@ struct sctp_chunk *sctp_process_strreset_inreq(
struct sctp_association *asoc,
union sctp_params param,
struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_tsnreq(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_out(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_addstrm_in(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
+struct sctp_chunk *sctp_process_strreset_resp(
+ struct sctp_association *asoc,
+ union sctp_params param,
+ struct sctp_ulpevent **evp);
/* Prototypes for statetable processing. */
diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h
index 138f8615acf0..a8b38e123f97 100644
--- a/include/net/sctp/structs.h
+++ b/include/net/sctp/structs.h
@@ -1315,6 +1315,8 @@ struct sctp_inithdr_host {
struct sctp_stream_out {
__u16 ssn;
__u8 state;
+ __u64 abandoned_unsent[SCTP_PR_INDEX(MAX) + 1];
+ __u64 abandoned_sent[SCTP_PR_INDEX(MAX) + 1];
};
struct sctp_stream_in {
@@ -1887,6 +1889,7 @@ struct sctp_association {
__u32 strreset_outseq; /* Update after receiving response */
__u32 strreset_inseq; /* Update after receiving request */
+ __u32 strreset_result[2]; /* save the results of last 2 responses */
struct sctp_chunk *strreset_chunk; /* save request chunk */
diff --git a/include/net/sctp/ulpevent.h b/include/net/sctp/ulpevent.h
index 324b5965fc4d..1060494ac230 100644
--- a/include/net/sctp/ulpevent.h
+++ b/include/net/sctp/ulpevent.h
@@ -132,6 +132,14 @@ struct sctp_ulpevent *sctp_ulpevent_make_stream_reset_event(
const struct sctp_association *asoc, __u16 flags,
__u16 stream_num, __u16 *stream_list, gfp_t gfp);
+struct sctp_ulpevent *sctp_ulpevent_make_assoc_reset_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 local_tsn, __u32 remote_tsn, gfp_t gfp);
+
+struct sctp_ulpevent *sctp_ulpevent_make_stream_change_event(
+ const struct sctp_association *asoc, __u16 flags,
+ __u32 strchange_instrms, __u32 strchange_outstrms, gfp_t gfp);
+
void sctp_ulpevent_read_sndrcvinfo(const struct sctp_ulpevent *event,
struct msghdr *);
void sctp_ulpevent_read_rcvinfo(const struct sctp_ulpevent *event,
diff --git a/include/net/secure_seq.h b/include/net/secure_seq.h
index 0caee631a836..b94006f6fbdd 100644
--- a/include/net/secure_seq.h
+++ b/include/net/secure_seq.h
@@ -6,10 +6,12 @@
u32 secure_ipv4_port_ephemeral(__be32 saddr, __be32 daddr, __be16 dport);
u32 secure_ipv6_port_ephemeral(const __be32 *saddr, const __be32 *daddr,
__be16 dport);
-u32 secure_tcp_sequence_number(__be32 saddr, __be32 daddr,
- __be16 sport, __be16 dport, u32 *tsoff);
-u32 secure_tcpv6_sequence_number(const __be32 *saddr, const __be32 *daddr,
- __be16 sport, __be16 dport, u32 *tsoff);
+u32 secure_tcp_seq(__be32 saddr, __be32 daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcp_ts_off(__be32 saddr, __be32 daddr);
+u32 secure_tcpv6_seq(const __be32 *saddr, const __be32 *daddr,
+ __be16 sport, __be16 dport);
+u32 secure_tcpv6_ts_off(const __be32 *saddr, const __be32 *daddr);
u64 secure_dccp_sequence_number(__be32 saddr, __be32 daddr,
__be16 sport, __be16 dport);
u64 secure_dccpv6_sequence_number(__be32 *saddr, __be32 *daddr,
diff --git a/include/net/sock.h b/include/net/sock.h
index 03252d53975d..f33e3d134e0b 100644
--- a/include/net/sock.h
+++ b/include/net/sock.h
@@ -995,7 +995,7 @@ struct smc_hashinfo;
struct module;
/*
- * caches using SLAB_DESTROY_BY_RCU should let .next pointer from nulls nodes
+ * caches using SLAB_TYPESAFE_BY_RCU should let .next pointer from nulls nodes
* un-modified. Special care is taken when initializing object to zero.
*/
static inline void sk_prot_clear_nulls(struct sock *sk, int size)
@@ -1783,11 +1783,8 @@ __sk_dst_set(struct sock *sk, struct dst_entry *dst)
sk_tx_queue_clear(sk);
sk->sk_dst_pending_confirm = 0;
- /*
- * This can be called while sk is owned by the caller only,
- * with no state that can be checked in a rcu_dereference_check() cond
- */
- old_dst = rcu_dereference_raw(sk->sk_dst_cache);
+ old_dst = rcu_dereference_protected(sk->sk_dst_cache,
+ lockdep_sock_is_held(sk));
rcu_assign_pointer(sk->sk_dst_cache, dst);
dst_release(old_dst);
}
@@ -2242,6 +2239,7 @@ sock_recv_timestamp(struct msghdr *msg, struct sock *sk, struct sk_buff *skb)
void __sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb);
+#define SK_DEFAULT_STAMP (-1L * NSEC_PER_SEC)
static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
struct sk_buff *skb)
{
@@ -2252,8 +2250,10 @@ static inline void sock_recv_ts_and_drops(struct msghdr *msg, struct sock *sk,
if (sk->sk_flags & FLAGS_TS_OR_DROPS || sk->sk_tsflags & TSFLAGS_ANY)
__sock_recv_ts_and_drops(msg, sk, skb);
- else
+ else if (unlikely(sock_flag(sk, SOCK_TIMESTAMP)))
sk->sk_stamp = skb->tstamp;
+ else if (unlikely(sk->sk_stamp == SK_DEFAULT_STAMP))
+ sk->sk_stamp = 0;
}
void __sock_tx_timestamp(__u16 tsflags, __u8 *tx_flags);
@@ -2365,6 +2365,8 @@ bool sk_ns_capable(const struct sock *sk,
bool sk_capable(const struct sock *sk, int cap);
bool sk_net_capable(const struct sock *sk, int cap);
+void sk_get_meminfo(const struct sock *sk, u32 *meminfo);
+
extern __u32 sysctl_wmem_max;
extern __u32 sysctl_rmem_max;
diff --git a/include/net/tc_act/tc_pedit.h b/include/net/tc_act/tc_pedit.h
index dfbd6ee0bc7c..a46c3f2ace70 100644
--- a/include/net/tc_act/tc_pedit.h
+++ b/include/net/tc_act/tc_pedit.h
@@ -2,6 +2,7 @@
#define __NET_TC_PED_H
#include <net/act_api.h>
+#include <linux/tc_act/tc_pedit.h>
struct tcf_pedit_key_ex {
enum pedit_header_type htype;
@@ -17,4 +18,48 @@ struct tcf_pedit {
};
#define to_pedit(a) ((struct tcf_pedit *)a)
+static inline bool is_tcf_pedit(const struct tc_action *a)
+{
+#ifdef CONFIG_NET_CLS_ACT
+ if (a->ops && a->ops->type == TCA_ACT_PEDIT)
+ return true;
+#endif
+ return false;
+}
+
+static inline int tcf_pedit_nkeys(const struct tc_action *a)
+{
+ return to_pedit(a)->tcfp_nkeys;
+}
+
+static inline u32 tcf_pedit_htype(const struct tc_action *a, int index)
+{
+ if (to_pedit(a)->tcfp_keys_ex)
+ return to_pedit(a)->tcfp_keys_ex[index].htype;
+
+ return TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK;
+}
+
+static inline u32 tcf_pedit_cmd(const struct tc_action *a, int index)
+{
+ if (to_pedit(a)->tcfp_keys_ex)
+ return to_pedit(a)->tcfp_keys_ex[index].cmd;
+
+ return __PEDIT_CMD_MAX;
+}
+
+static inline u32 tcf_pedit_mask(const struct tc_action *a, int index)
+{
+ return to_pedit(a)->tcfp_keys[index].mask;
+}
+
+static inline u32 tcf_pedit_val(const struct tc_action *a, int index)
+{
+ return to_pedit(a)->tcfp_keys[index].val;
+}
+
+static inline u32 tcf_pedit_offset(const struct tc_action *a, int index)
+{
+ return to_pedit(a)->tcfp_keys[index].off;
+}
#endif /* __NET_TC_PED_H */
diff --git a/include/net/tc_act/tc_vlan.h b/include/net/tc_act/tc_vlan.h
index 48cca321ee6c..c2090df944ff 100644
--- a/include/net/tc_act/tc_vlan.h
+++ b/include/net/tc_act/tc_vlan.h
@@ -13,9 +13,6 @@
#include <net/act_api.h>
#include <linux/tc_act/tc_vlan.h>
-#define VLAN_F_POP 0x1
-#define VLAN_F_PUSH 0x2
-
struct tcf_vlan {
struct tc_action common;
int tcfv_action;
@@ -49,4 +46,9 @@ static inline __be16 tcf_vlan_push_proto(const struct tc_action *a)
return to_vlan(a)->tcfv_push_proto;
}
+static inline u8 tcf_vlan_push_prio(const struct tc_action *a)
+{
+ return to_vlan(a)->tcfv_push_prio;
+}
+
#endif /* __NET_TC_VLAN_H */
diff --git a/include/net/tcp.h b/include/net/tcp.h
index 6ec4ea652f3f..38a7427ae902 100644
--- a/include/net/tcp.h
+++ b/include/net/tcp.h
@@ -78,6 +78,9 @@ void tcp_time_wait(struct sock *sk, int state, int timeo);
/* Maximal number of ACKs sent quickly to accelerate slow-start. */
#define TCP_MAX_QUICKACKS 16U
+/* Maximal number of window scale according to RFC1323 */
+#define TCP_MAX_WSCALE 14U
+
/* urg_data states */
#define TCP_URG_VALID 0x0100
#define TCP_URG_NOTYET 0x0200
@@ -406,11 +409,7 @@ void tcp_clear_retrans(struct tcp_sock *tp);
void tcp_update_metrics(struct sock *sk);
void tcp_init_metrics(struct sock *sk);
void tcp_metrics_init(void);
-bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst,
- bool paws_check, bool timestamps);
-bool tcp_remember_stamp(struct sock *sk);
-bool tcp_tw_remember_stamp(struct inet_timewait_sock *tw);
-void tcp_fetch_timewait_stamp(struct sock *sk, struct dst_entry *dst);
+bool tcp_peer_is_proven(struct request_sock *req, struct dst_entry *dst);
void tcp_disable_fack(struct tcp_sock *tp);
void tcp_close(struct sock *sk, long timeout);
void tcp_init_sock(struct sock *sk);
@@ -471,7 +470,7 @@ void inet_sk_rx_dst_set(struct sock *sk, const struct sk_buff *skb);
/* From syncookies.c */
struct sock *tcp_get_cookie_sock(struct sock *sk, struct sk_buff *skb,
struct request_sock *req,
- struct dst_entry *dst);
+ struct dst_entry *dst, u32 tsoff);
int __cookie_v4_check(const struct iphdr *iph, const struct tcphdr *th,
u32 cookie);
struct sock *cookie_v4_check(struct sock *sk, struct sk_buff *skb);
@@ -1005,7 +1004,7 @@ void tcp_rate_skb_sent(struct sock *sk, struct sk_buff *skb);
void tcp_rate_skb_delivered(struct sock *sk, struct sk_buff *skb,
struct rate_sample *rs);
void tcp_rate_gen(struct sock *sk, u32 delivered, u32 lost,
- struct skb_mstamp *now, struct rate_sample *rs);
+ struct rate_sample *rs);
void tcp_rate_check_app_limited(struct sock *sk);
/* These functions determine how the current flow behaves in respect of SACK
@@ -1235,10 +1234,12 @@ void tcp_cwnd_restart(struct sock *sk, s32 delta);
static inline void tcp_slow_start_after_idle_check(struct sock *sk)
{
+ const struct tcp_congestion_ops *ca_ops = inet_csk(sk)->icsk_ca_ops;
struct tcp_sock *tp = tcp_sk(sk);
s32 delta;
- if (!sysctl_tcp_slow_start_after_idle || tp->packets_out)
+ if (!sysctl_tcp_slow_start_after_idle || tp->packets_out ||
+ ca_ops->cong_control)
return;
delta = tcp_time_stamp - tp->lsndtime;
if (delta > inet_csk(sk)->icsk_rto)
@@ -1252,9 +1253,11 @@ void tcp_select_initial_window(int __space, __u32 mss, __u32 *rcv_wnd,
static inline int tcp_win_from_space(int space)
{
- return sysctl_tcp_adv_win_scale<=0 ?
- (space>>(-sysctl_tcp_adv_win_scale)) :
- space - (space>>sysctl_tcp_adv_win_scale);
+ int tcp_adv_win_scale = sysctl_tcp_adv_win_scale;
+
+ return tcp_adv_win_scale <= 0 ?
+ (space>>(-tcp_adv_win_scale)) :
+ space - (space>>tcp_adv_win_scale);
}
/* Note: caller must be prepared to deal with negative returns */
@@ -1505,6 +1508,12 @@ struct tcp_fastopen_context {
struct rcu_head rcu;
};
+extern unsigned int sysctl_tcp_fastopen_blackhole_timeout;
+void tcp_fastopen_active_disable(struct sock *sk);
+bool tcp_fastopen_active_should_disable(struct sock *sk);
+void tcp_fastopen_active_disable_ofo_check(struct sock *sk);
+void tcp_fastopen_active_timeout_reset(void);
+
/* Latencies incurred by various limits for a sender. They are
* chronograph-like stats that are mutually exclusive.
*/
@@ -1814,9 +1823,9 @@ struct tcp_request_sock_ops {
__u16 *mss);
#endif
struct dst_entry *(*route_req)(const struct sock *sk, struct flowi *fl,
- const struct request_sock *req,
- bool *strict);
- __u32 (*init_seq)(const struct sk_buff *skb, u32 *tsoff);
+ const struct request_sock *req);
+ u32 (*init_seq)(const struct sk_buff *skb);
+ u32 (*init_ts_off)(const struct sk_buff *skb);
int (*send_synack)(const struct sock *sk, struct dst_entry *dst,
struct flowi *fl, struct request_sock *req,
struct tcp_fastopen_cookie *foc,
@@ -1847,10 +1856,9 @@ void tcp_v4_init(void);
void tcp_init(void);
/* tcp_recovery.c */
-extern void tcp_rack_mark_lost(struct sock *sk, const struct skb_mstamp *now);
+extern void tcp_rack_mark_lost(struct sock *sk);
extern void tcp_rack_advance(struct tcp_sock *tp, u8 sacked, u32 end_seq,
- const struct skb_mstamp *xmit_time,
- const struct skb_mstamp *ack_time);
+ const struct skb_mstamp *xmit_time);
extern void tcp_rack_reo_timeout(struct sock *sk);
/*
diff --git a/include/net/udp.h b/include/net/udp.h
index c9d8b8e848e0..3391dbd73959 100644
--- a/include/net/udp.h
+++ b/include/net/udp.h
@@ -372,4 +372,5 @@ void udp_encap_enable(void);
#if IS_ENABLED(CONFIG_IPV6)
void udpv6_encap_enable(void);
#endif
+
#endif /* _UDP_H */
diff --git a/include/net/xfrm.h b/include/net/xfrm.h
index 14d82bf16692..6793a30c66b1 100644
--- a/include/net/xfrm.h
+++ b/include/net/xfrm.h
@@ -120,6 +120,13 @@ struct xfrm_state_walk {
struct xfrm_address_filter *filter;
};
+struct xfrm_state_offload {
+ struct net_device *dev;
+ unsigned long offload_handle;
+ unsigned int num_exthdrs;
+ u8 flags;
+};
+
/* Full description of state of transformer. */
struct xfrm_state {
possible_net_t xs_net;
@@ -207,6 +214,8 @@ struct xfrm_state {
struct xfrm_lifetime_cur curlft;
struct tasklet_hrtimer mtimer;
+ struct xfrm_state_offload xso;
+
/* used to fix curlft->add_time when changing date */
long saved_tmo;
@@ -222,6 +231,8 @@ struct xfrm_state {
struct xfrm_mode *inner_mode_iaf;
struct xfrm_mode *outer_mode;
+ const struct xfrm_type_offload *type_offload;
+
/* Security context */
struct xfrm_sec_ctx *security;
@@ -314,12 +325,14 @@ void km_state_expired(struct xfrm_state *x, int hard, u32 portid);
int __xfrm_state_delete(struct xfrm_state *x);
struct xfrm_state_afinfo {
- unsigned int family;
- unsigned int proto;
- __be16 eth_proto;
- struct module *owner;
- const struct xfrm_type *type_map[IPPROTO_MAX];
- struct xfrm_mode *mode_map[XFRM_MODE_MAX];
+ unsigned int family;
+ unsigned int proto;
+ __be16 eth_proto;
+ struct module *owner;
+ const struct xfrm_type *type_map[IPPROTO_MAX];
+ const struct xfrm_type_offload *type_offload_map[IPPROTO_MAX];
+ struct xfrm_mode *mode_map[XFRM_MODE_MAX];
+
int (*init_flags)(struct xfrm_state *x);
void (*init_tempsel)(struct xfrm_selector *sel,
const struct flowi *fl);
@@ -380,6 +393,18 @@ struct xfrm_type {
int xfrm_register_type(const struct xfrm_type *type, unsigned short family);
int xfrm_unregister_type(const struct xfrm_type *type, unsigned short family);
+struct xfrm_type_offload {
+ char *description;
+ struct module *owner;
+ u8 proto;
+ void (*encap)(struct xfrm_state *, struct sk_buff *pskb);
+ int (*input_tail)(struct xfrm_state *x, struct sk_buff *skb);
+ int (*xmit)(struct xfrm_state *, struct sk_buff *pskb, netdev_features_t features);
+};
+
+int xfrm_register_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+int xfrm_unregister_type_offload(const struct xfrm_type_offload *type, unsigned short family);
+
struct xfrm_mode {
/*
* Remove encapsulation header.
@@ -428,6 +453,16 @@ struct xfrm_mode {
*/
int (*output)(struct xfrm_state *x, struct sk_buff *skb);
+ /*
+ * Adjust pointers into the packet and do GSO segmentation.
+ */
+ struct sk_buff *(*gso_segment)(struct xfrm_state *x, struct sk_buff *skb, netdev_features_t features);
+
+ /*
+ * Adjust pointers into the packet when IPsec is done at layer2.
+ */
+ void (*xmit)(struct xfrm_state *x, struct sk_buff *skb);
+
struct xfrm_state_afinfo *afinfo;
struct module *owner;
unsigned int encap;
@@ -586,7 +621,6 @@ struct xfrm_migrate {
struct xfrm_mgr {
struct list_head list;
- char *id;
int (*notify)(struct xfrm_state *x, const struct km_event *c);
int (*acquire)(struct xfrm_state *x, struct xfrm_tmpl *, struct xfrm_policy *xp);
struct xfrm_policy *(*compile_policy)(struct sock *sk, int opt, u8 *data, int len, int *dir);
@@ -817,12 +851,12 @@ static inline void xfrm_state_hold(struct xfrm_state *x)
}
static inline bool addr_match(const void *token1, const void *token2,
- int prefixlen)
+ unsigned int prefixlen)
{
const __be32 *a1 = token1;
const __be32 *a2 = token2;
- int pdw;
- int pbi;
+ unsigned int pdw;
+ unsigned int pbi;
pdw = prefixlen >> 5; /* num of whole u32 in prefix */
pbi = prefixlen & 0x1f; /* num of bits in incomplete u32 in prefix */
@@ -846,9 +880,9 @@ static inline bool addr_match(const void *token1, const void *token2,
static inline bool addr4_match(__be32 a1, __be32 a2, u8 prefixlen)
{
/* C99 6.5.7 (3): u32 << 32 is undefined behaviour */
- if (prefixlen == 0)
+ if (sizeof(long) == 4 && prefixlen == 0)
return true;
- return !((a1 ^ a2) & htonl(0xFFFFFFFFu << (32 - prefixlen)));
+ return !((a1 ^ a2) & htonl(~0UL << (32 - prefixlen)));
}
static __inline__
@@ -1533,6 +1567,7 @@ struct xfrmk_spdinfo {
struct xfrm_state *xfrm_find_acq_byseq(struct net *net, u32 mark, u32 seq);
int xfrm_state_delete(struct xfrm_state *x);
int xfrm_state_flush(struct net *net, u8 proto, bool task_valid);
+int xfrm_dev_state_flush(struct net *net, struct net_device *dev, bool task_valid);
void xfrm_sad_getinfo(struct net *net, struct xfrmk_sadinfo *si);
void xfrm_spd_getinfo(struct net *net, struct xfrmk_spdinfo *si);
u32 xfrm_replay_seqhi(struct xfrm_state *x, __be32 net_seq);
@@ -1615,6 +1650,11 @@ static inline int xfrm4_udp_encap_rcv(struct sock *sk, struct sk_buff *skb)
}
#endif
+struct dst_entry *__xfrm_dst_lookup(struct net *net, int tos, int oif,
+ const xfrm_address_t *saddr,
+ const xfrm_address_t *daddr,
+ int family);
+
struct xfrm_policy *xfrm_policy_alloc(struct net *net, gfp_t gfp);
void xfrm_policy_walk_init(struct xfrm_policy_walk *walk, u8 type);
@@ -1820,6 +1860,61 @@ static inline struct xfrm_offload *xfrm_offload(struct sk_buff *skb)
}
#endif
+#ifdef CONFIG_XFRM_OFFLOAD
+void __net_init xfrm_dev_init(void);
+int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features);
+int xfrm_dev_state_add(struct net *net, struct xfrm_state *x,
+ struct xfrm_user_offload *xuo);
+bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x);
+
+static inline void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+ struct xfrm_state_offload *xso = &x->xso;
+
+ if (xso->dev)
+ xso->dev->xfrmdev_ops->xdo_dev_state_delete(x);
+}
+
+static inline void xfrm_dev_state_free(struct xfrm_state *x)
+{
+ struct xfrm_state_offload *xso = &x->xso;
+ struct net_device *dev = xso->dev;
+
+ if (dev && dev->xfrmdev_ops) {
+ dev->xfrmdev_ops->xdo_dev_state_free(x);
+ xso->dev = NULL;
+ dev_put(dev);
+ }
+}
+#else
+static inline void __net_init xfrm_dev_init(void)
+{
+}
+
+static inline int validate_xmit_xfrm(struct sk_buff *skb, netdev_features_t features)
+{
+ return 0;
+}
+
+static inline int xfrm_dev_state_add(struct net *net, struct xfrm_state *x, struct xfrm_user_offload *xuo)
+{
+ return 0;
+}
+
+static inline void xfrm_dev_state_delete(struct xfrm_state *x)
+{
+}
+
+static inline void xfrm_dev_state_free(struct xfrm_state *x)
+{
+}
+
+static inline bool xfrm_dev_offload_ok(struct sk_buff *skb, struct xfrm_state *x)
+{
+ return false;
+}
+#endif
+
static inline int xfrm_mark_get(struct nlattr **attrs, struct xfrm_mark *m)
{
if (attrs[XFRMA_MARK])
diff --git a/include/rdma/ib.h b/include/rdma/ib.h
index 9b4c22a36931..66dbed0c146d 100644
--- a/include/rdma/ib.h
+++ b/include/rdma/ib.h
@@ -100,7 +100,7 @@ struct sockaddr_ib {
*/
static inline bool ib_safe_file_access(struct file *filp)
{
- return filp->f_cred == current_cred() && segment_eq(get_fs(), USER_DS);
+ return filp->f_cred == current_cred() && !uaccess_kernel();
}
#endif /* _RDMA_IB_H */
diff --git a/include/rdma/ib_cm.h b/include/rdma/ib_cm.h
index b49258b16f4e..7979cb04f529 100644
--- a/include/rdma/ib_cm.h
+++ b/include/rdma/ib_cm.h
@@ -117,8 +117,8 @@ struct ib_cm_req_event_param {
u8 port;
- struct ib_sa_path_rec *primary_path;
- struct ib_sa_path_rec *alternate_path;
+ struct sa_path_rec *primary_path;
+ struct sa_path_rec *alternate_path;
__be64 remote_ca_guid;
u32 remote_qkey;
@@ -197,7 +197,7 @@ struct ib_cm_mra_event_param {
};
struct ib_cm_lap_event_param {
- struct ib_sa_path_rec *alternate_path;
+ struct sa_path_rec *alternate_path;
};
enum ib_cm_apr_status {
@@ -363,8 +363,8 @@ struct ib_cm_id *ib_cm_insert_listen(struct ib_device *device,
__be64 service_id);
struct ib_cm_req_param {
- struct ib_sa_path_rec *primary_path;
- struct ib_sa_path_rec *alternate_path;
+ struct sa_path_rec *primary_path;
+ struct sa_path_rec *alternate_path;
__be64 service_id;
u32 qp_num;
enum ib_qp_type qp_type;
@@ -521,7 +521,7 @@ int ib_send_cm_mra(struct ib_cm_id *cm_id,
* @private_data_len: Size of the private data buffer, in bytes.
*/
int ib_send_cm_lap(struct ib_cm_id *cm_id,
- struct ib_sa_path_rec *alternate_path,
+ struct sa_path_rec *alternate_path,
const void *private_data,
u8 private_data_len);
@@ -565,7 +565,7 @@ int ib_send_cm_apr(struct ib_cm_id *cm_id,
u8 private_data_len);
struct ib_cm_sidr_req_param {
- struct ib_sa_path_rec *path;
+ struct sa_path_rec *path;
__be64 service_id;
int timeout_ms;
const void *private_data;
diff --git a/include/rdma/ib_hdrs.h b/include/rdma/ib_hdrs.h
index c755325f0831..5519f31f043a 100644
--- a/include/rdma/ib_hdrs.h
+++ b/include/rdma/ib_hdrs.h
@@ -74,6 +74,12 @@
#define IB_GRH_FLOW_MASK 0xFFFFF
#define IB_GRH_FLOW_SHIFT 0
#define IB_GRH_NEXT_HDR 0x1B
+#define IB_FECN_SHIFT 31
+#define IB_FECN_MASK 1
+#define IB_FECN_SMASK BIT(IB_FECN_SHIFT)
+#define IB_BECN_SHIFT 30
+#define IB_BECN_MASK 1
+#define IB_BECN_SMASK BIT(IB_BECN_SHIFT)
#define IB_AETH_CREDIT_SHIFT 24
#define IB_AETH_CREDIT_MASK 0x1F
@@ -181,4 +187,64 @@ static inline void put_ib_ateth_compare(u64 val, struct ib_atomic_eth *ateth)
ib_u64_put(val, &ateth->compare_data);
}
+/*
+ * 9B/IB Packet Format
+ */
+#define IB_LNH_MASK 3
+#define IB_SC_MASK 0xf
+#define IB_SC_SHIFT 12
+#define IB_SL_MASK 0xf
+#define IB_SL_SHIFT 4
+
+static inline u8 ib_get_lnh(struct ib_header *hdr)
+{
+ return (be16_to_cpu(hdr->lrh[0]) & IB_LNH_MASK);
+}
+
+static inline u8 ib_get_sc(struct ib_header *hdr)
+{
+ return ((be16_to_cpu(hdr->lrh[0]) >> IB_SC_SHIFT) & IB_SC_MASK);
+}
+
+static inline u8 ib_get_sl(struct ib_header *hdr)
+{
+ return ((be16_to_cpu(hdr->lrh[0]) >> IB_SL_SHIFT) & IB_SL_MASK);
+}
+
+static inline u16 ib_get_dlid(struct ib_header *hdr)
+{
+ return (be16_to_cpu(hdr->lrh[1]));
+}
+
+static inline u16 ib_get_slid(struct ib_header *hdr)
+{
+ return (be16_to_cpu(hdr->lrh[3]));
+}
+
+/*
+ * BTH
+ */
+#define IB_BTH_OPCODE_MASK 0xff
+#define IB_BTH_OPCODE_SHIFT 24
+#define IB_BTH_PAD_MASK 3
+#define IB_BTH_PKEY_MASK 0xffff
+#define IB_BTH_PAD_SHIFT 20
+
+static inline u8 ib_bth_get_pad(struct ib_other_headers *ohdr)
+{
+ return ((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_PAD_SHIFT) &
+ IB_BTH_PAD_MASK);
+}
+
+static inline u16 ib_bth_get_pkey(struct ib_other_headers *ohdr)
+{
+ return (be32_to_cpu(ohdr->bth[0]) & IB_BTH_PKEY_MASK);
+}
+
+static inline u8 ib_bth_get_opcode(struct ib_other_headers *ohdr)
+{
+ return ((be32_to_cpu(ohdr->bth[0]) >> IB_BTH_OPCODE_SHIFT) &
+ IB_BTH_OPCODE_MASK);
+}
+
#endif /* IB_HDRS_H */
diff --git a/include/rdma/ib_mad.h b/include/rdma/ib_mad.h
index 981214b3790c..d67b11b72029 100644
--- a/include/rdma/ib_mad.h
+++ b/include/rdma/ib_mad.h
@@ -262,6 +262,33 @@ struct ib_class_port_info {
__be32 trap_qkey;
};
+#define OPA_CLASS_PORT_INFO_PR_SUPPORT BIT(26)
+
+struct opa_class_port_info {
+ u8 base_version;
+ u8 class_version;
+ __be16 cap_mask;
+ __be32 cap_mask2_resp_time;
+
+ u8 redirect_gid[16];
+ __be32 redirect_tc_fl;
+ __be32 redirect_lid;
+ __be32 redirect_sl_qp;
+ __be32 redirect_qkey;
+
+ u8 trap_gid[16];
+ __be32 trap_tc_fl;
+ __be32 trap_lid;
+ __be32 trap_hl_qp;
+ __be32 trap_qkey;
+
+ __be16 trap_pkey;
+ __be16 redirect_pkey;
+
+ u8 trap_sl_rsvd;
+ u8 reserved[3];
+} __packed;
+
/**
* ib_get_cpi_resp_time - Returns the resp_time value from
* cap_mask2_resp_time in ib_class_port_info.
@@ -315,6 +342,17 @@ static inline void ib_set_cpi_capmask2(struct ib_class_port_info *cpi,
IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
}
+/**
+ * opa_get_cpi_capmask2 - Returns the capmask2 value from
+ * cap_mask2_resp_time in ib_class_port_info.
+ * @cpi: A struct opa_class_port_info mad.
+ */
+static inline u32 opa_get_cpi_capmask2(struct opa_class_port_info *cpi)
+{
+ return (be32_to_cpu(cpi->cap_mask2_resp_time) >>
+ IB_CLASS_PORT_INFO_RESP_TIME_FIELD_SIZE);
+}
+
struct ib_mad_notice_attr {
u8 generic_type;
u8 prod_type_msb;
@@ -673,7 +711,7 @@ struct ib_mad_agent *ib_register_mad_snoop(struct ib_device *device,
* After invoking this routine, MAD services are no longer usable by the
* client on the associated QP.
*/
-int ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
+void ib_unregister_mad_agent(struct ib_mad_agent *mad_agent);
/**
* ib_post_send_mad - Posts MAD(s) to the send queue of the QP associated
diff --git a/include/rdma/ib_marshall.h b/include/rdma/ib_marshall.h
index db037205c9e8..68cef3bd50fb 100644
--- a/include/rdma/ib_marshall.h
+++ b/include/rdma/ib_marshall.h
@@ -42,12 +42,12 @@ void ib_copy_qp_attr_to_user(struct ib_uverbs_qp_attr *dst,
struct ib_qp_attr *src);
void ib_copy_ah_attr_to_user(struct ib_uverbs_ah_attr *dst,
- struct ib_ah_attr *src);
+ struct rdma_ah_attr *src);
void ib_copy_path_rec_to_user(struct ib_user_path_rec *dst,
- struct ib_sa_path_rec *src);
+ struct sa_path_rec *src);
-void ib_copy_path_rec_from_user(struct ib_sa_path_rec *dst,
+void ib_copy_path_rec_from_user(struct sa_path_rec *dst,
struct ib_user_path_rec *src);
#endif /* IB_USER_MARSHALL_H */
diff --git a/include/rdma/ib_pack.h b/include/rdma/ib_pack.h
index b13419ce99ff..36655899ee02 100644
--- a/include/rdma/ib_pack.h
+++ b/include/rdma/ib_pack.h
@@ -80,6 +80,8 @@ enum {
IB_OPCODE_UD = 0x60,
/* per IBTA 1.3 vol 1 Table 38, A10.3.2 */
IB_OPCODE_CNP = 0x80,
+ /* Manufacturer specific */
+ IB_OPCODE_MSP = 0xe0,
/* operations -- just used to define real constants */
IB_OPCODE_SEND_FIRST = 0x00,
diff --git a/include/rdma/ib_sa.h b/include/rdma/ib_sa.h
index fd0e53219f93..f5f70e345318 100644
--- a/include/rdma/ib_sa.h
+++ b/include/rdma/ib_sa.h
@@ -43,6 +43,8 @@
#include <rdma/ib_verbs.h>
#include <rdma/ib_mad.h>
+#include <rdma/ib_addr.h>
+#include <rdma/opa_addr.h>
enum {
IB_SA_CLASS_VERSION = 2, /* IB spec version 1.1/1.2 */
@@ -56,6 +58,7 @@ enum {
IB_SA_METHOD_GET_TRACE_TBL = 0x13
};
+#define OPA_SA_CLASS_VERSION 0x80
enum {
IB_SA_ATTR_CLASS_PORTINFO = 0x01,
IB_SA_ATTR_NOTICE = 0x02,
@@ -147,13 +150,45 @@ enum ib_sa_mc_join_states {
#define IB_SA_PATH_REC_PACKET_LIFE_TIME IB_SA_COMP_MASK(21)
#define IB_SA_PATH_REC_PREFERENCE IB_SA_COMP_MASK(22)
-struct ib_sa_path_rec {
+enum sa_path_rec_type {
+ SA_PATH_REC_TYPE_IB,
+ SA_PATH_REC_TYPE_ROCE_V1,
+ SA_PATH_REC_TYPE_ROCE_V2,
+ SA_PATH_REC_TYPE_OPA
+};
+
+struct sa_path_rec_ib {
__be64 service_id;
- union ib_gid dgid;
- union ib_gid sgid;
__be16 dlid;
__be16 slid;
u8 raw_traffic;
+};
+
+struct sa_path_rec_roce {
+ u8 dmac[ETH_ALEN];
+ /* ignored in IB */
+ int ifindex;
+ /* ignored in IB */
+ struct net *net;
+
+};
+
+struct sa_path_rec_opa {
+ __be64 service_id;
+ __be32 dlid;
+ __be32 slid;
+ u8 raw_traffic;
+ u8 l2_8B;
+ u8 l2_10B;
+ u8 l2_9B;
+ u8 l2_16B;
+ u8 qos_type;
+ u8 qos_priority;
+};
+
+struct sa_path_rec {
+ union ib_gid dgid;
+ union ib_gid sgid;
/* reserved */
__be32 flow_label;
u8 hop_limit;
@@ -170,17 +205,109 @@ struct ib_sa_path_rec {
u8 packet_life_time_selector;
u8 packet_life_time;
u8 preference;
- u8 dmac[ETH_ALEN];
- /* ignored in IB */
- int ifindex;
- /* ignored in IB */
- struct net *net;
- enum ib_gid_type gid_type;
+ union {
+ struct sa_path_rec_ib ib;
+ struct sa_path_rec_roce roce;
+ struct sa_path_rec_opa opa;
+ };
+ enum sa_path_rec_type rec_type;
};
-static inline struct net_device *ib_get_ndev_from_path(struct ib_sa_path_rec *rec)
+static inline enum ib_gid_type
+ sa_conv_pathrec_to_gid_type(struct sa_path_rec *rec)
+{
+ switch (rec->rec_type) {
+ case SA_PATH_REC_TYPE_ROCE_V1:
+ return IB_GID_TYPE_ROCE;
+ case SA_PATH_REC_TYPE_ROCE_V2:
+ return IB_GID_TYPE_ROCE_UDP_ENCAP;
+ default:
+ return IB_GID_TYPE_IB;
+ }
+}
+
+static inline enum sa_path_rec_type
+ sa_conv_gid_to_pathrec_type(enum ib_gid_type type)
+{
+ switch (type) {
+ case IB_GID_TYPE_ROCE:
+ return SA_PATH_REC_TYPE_ROCE_V1;
+ case IB_GID_TYPE_ROCE_UDP_ENCAP:
+ return SA_PATH_REC_TYPE_ROCE_V2;
+ default:
+ return SA_PATH_REC_TYPE_IB;
+ }
+}
+
+static inline void path_conv_opa_to_ib(struct sa_path_rec *ib,
+ struct sa_path_rec *opa)
+{
+ if ((be32_to_cpu(opa->opa.dlid) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE)) ||
+ (be32_to_cpu(opa->opa.slid) >=
+ be16_to_cpu(IB_MULTICAST_LID_BASE))) {
+ /* Create OPA GID and zero out the LID */
+ ib->dgid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(opa->opa.dlid));
+ ib->dgid.global.subnet_prefix
+ = opa->dgid.global.subnet_prefix;
+ ib->sgid.global.interface_id
+ = OPA_MAKE_ID(be32_to_cpu(opa->opa.slid));
+ ib->dgid.global.subnet_prefix
+ = opa->dgid.global.subnet_prefix;
+ ib->ib.dlid = 0;
+
+ ib->ib.slid = 0;
+ } else {
+ ib->ib.dlid = htons(ntohl(opa->opa.dlid));
+ ib->ib.slid = htons(ntohl(opa->opa.slid));
+ }
+ ib->ib.service_id = opa->opa.service_id;
+ ib->ib.raw_traffic = opa->opa.raw_traffic;
+}
+
+static inline void path_conv_ib_to_opa(struct sa_path_rec *opa,
+ struct sa_path_rec *ib)
{
- return rec->net ? dev_get_by_index(rec->net, rec->ifindex) : NULL;
+ __be32 slid, dlid;
+
+ if ((ib_is_opa_gid(&ib->sgid)) ||
+ (ib_is_opa_gid(&ib->dgid))) {
+ slid = htonl(opa_get_lid_from_gid(&ib->sgid));
+ dlid = htonl(opa_get_lid_from_gid(&ib->dgid));
+ } else {
+ slid = htonl(ntohs(ib->ib.slid));
+ dlid = htonl(ntohs(ib->ib.dlid));
+ }
+ opa->opa.slid = slid;
+ opa->opa.dlid = dlid;
+ opa->opa.service_id = ib->ib.service_id;
+ opa->opa.raw_traffic = ib->ib.raw_traffic;
+}
+
+/* Convert from OPA to IB path record */
+static inline void sa_convert_path_opa_to_ib(struct sa_path_rec *dest,
+ struct sa_path_rec *src)
+{
+ if (src->rec_type != SA_PATH_REC_TYPE_OPA)
+ return;
+
+ *dest = *src;
+ dest->rec_type = SA_PATH_REC_TYPE_IB;
+ path_conv_opa_to_ib(dest, src);
+}
+
+/* Convert from IB to OPA path record */
+static inline void sa_convert_path_ib_to_opa(struct sa_path_rec *dest,
+ struct sa_path_rec *src)
+{
+ if (src->rec_type != SA_PATH_REC_TYPE_IB)
+ return;
+
+ /* Do a structure copy and overwrite the relevant fields */
+ *dest = *src;
+ dest->rec_type = SA_PATH_REC_TYPE_OPA;
+ path_conv_ib_to_opa(dest, src);
}
#define IB_SA_MCMEMBER_REC_MGID IB_SA_COMP_MASK( 0)
@@ -322,11 +449,11 @@ void ib_sa_cancel_query(int id, struct ib_sa_query *query);
int ib_sa_path_rec_get(struct ib_sa_client *client,
struct ib_device *device, u8 port_num,
- struct ib_sa_path_rec *rec,
+ struct sa_path_rec *rec,
ib_sa_comp_mask comp_mask,
int timeout_ms, gfp_t gfp_mask,
void (*callback)(int status,
- struct ib_sa_path_rec *resp,
+ struct sa_path_rec *resp,
void *context),
void *context,
struct ib_sa_query **query);
@@ -420,27 +547,27 @@ int ib_init_ah_from_mcmember(struct ib_device *device, u8 port_num,
struct ib_sa_mcmember_rec *rec,
struct net_device *ndev,
enum ib_gid_type gid_type,
- struct ib_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr);
/**
* ib_init_ah_from_path - Initialize address handle attributes based on an SA
* path record.
*/
int ib_init_ah_from_path(struct ib_device *device, u8 port_num,
- struct ib_sa_path_rec *rec,
- struct ib_ah_attr *ah_attr);
+ struct sa_path_rec *rec,
+ struct rdma_ah_attr *ah_attr);
/**
* ib_sa_pack_path - Conert a path record from struct ib_sa_path_rec
* to IB MAD wire format.
*/
-void ib_sa_pack_path(struct ib_sa_path_rec *rec, void *attribute);
+void ib_sa_pack_path(struct sa_path_rec *rec, void *attribute);
/**
* ib_sa_unpack_path - Convert a path record from MAD format to struct
* ib_sa_path_rec.
*/
-void ib_sa_unpack_path(void *attribute, struct ib_sa_path_rec *rec);
+void ib_sa_unpack_path(void *attribute, struct sa_path_rec *rec);
/* Support GuidInfoRecord */
int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
@@ -454,14 +581,137 @@ int ib_sa_guid_info_rec_query(struct ib_sa_client *client,
void *context,
struct ib_sa_query **sa_query);
-/* Support get SA ClassPortInfo */
-int ib_sa_classport_info_rec_query(struct ib_sa_client *client,
- struct ib_device *device, u8 port_num,
- int timeout_ms, gfp_t gfp_mask,
- void (*callback)(int status,
- struct ib_class_port_info *resp,
- void *context),
- void *context,
- struct ib_sa_query **sa_query);
+bool ib_sa_sendonly_fullmem_support(struct ib_sa_client *client,
+ struct ib_device *device,
+ u8 port_num);
+
+static inline bool sa_path_is_roce(struct sa_path_rec *rec)
+{
+ return ((rec->rec_type == SA_PATH_REC_TYPE_ROCE_V1) ||
+ (rec->rec_type == SA_PATH_REC_TYPE_ROCE_V2));
+}
+
+static inline void sa_path_set_service_id(struct sa_path_rec *rec,
+ __be64 service_id)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ rec->ib.service_id = service_id;
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ rec->opa.service_id = service_id;
+}
+
+static inline void sa_path_set_slid(struct sa_path_rec *rec, __be32 slid)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ rec->ib.slid = htons(ntohl(slid));
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ rec->opa.slid = slid;
+}
+
+static inline void sa_path_set_dlid(struct sa_path_rec *rec, __be32 dlid)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ rec->ib.dlid = htons(ntohl(dlid));
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ rec->opa.dlid = dlid;
+}
+
+static inline void sa_path_set_raw_traffic(struct sa_path_rec *rec,
+ u8 raw_traffic)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ rec->ib.raw_traffic = raw_traffic;
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ rec->opa.raw_traffic = raw_traffic;
+}
+
+static inline __be64 sa_path_get_service_id(struct sa_path_rec *rec)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ return rec->ib.service_id;
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ return rec->opa.service_id;
+ return 0;
+}
+
+static inline __be32 sa_path_get_slid(struct sa_path_rec *rec)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ return htonl(ntohs(rec->ib.slid));
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ return rec->opa.slid;
+ return 0;
+}
+
+static inline __be32 sa_path_get_dlid(struct sa_path_rec *rec)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ return htonl(ntohs(rec->ib.dlid));
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ return rec->opa.dlid;
+ return 0;
+}
+
+static inline u8 sa_path_get_raw_traffic(struct sa_path_rec *rec)
+{
+ if (rec->rec_type == SA_PATH_REC_TYPE_IB)
+ return rec->ib.raw_traffic;
+ else if (rec->rec_type == SA_PATH_REC_TYPE_OPA)
+ return rec->opa.raw_traffic;
+ return 0;
+}
+
+static inline void sa_path_set_dmac(struct sa_path_rec *rec, u8 *dmac)
+{
+ if (sa_path_is_roce(rec))
+ memcpy(rec->roce.dmac, dmac, ETH_ALEN);
+}
+
+static inline void sa_path_set_dmac_zero(struct sa_path_rec *rec)
+{
+ if (sa_path_is_roce(rec))
+ eth_zero_addr(rec->roce.dmac);
+}
+
+static inline void sa_path_set_ifindex(struct sa_path_rec *rec, int ifindex)
+{
+ if (sa_path_is_roce(rec))
+ rec->roce.ifindex = ifindex;
+}
+
+static inline void sa_path_set_ndev(struct sa_path_rec *rec, struct net *net)
+{
+ if (sa_path_is_roce(rec))
+ rec->roce.net = net;
+}
+
+static inline u8 *sa_path_get_dmac(struct sa_path_rec *rec)
+{
+ if (sa_path_is_roce(rec))
+ return rec->roce.dmac;
+ return NULL;
+}
+
+static inline int sa_path_get_ifindex(struct sa_path_rec *rec)
+{
+ if (sa_path_is_roce(rec))
+ return rec->roce.ifindex;
+ return 0;
+}
+
+static inline struct net *sa_path_get_ndev(struct sa_path_rec *rec)
+{
+ if (sa_path_is_roce(rec))
+ return rec->roce.net;
+ return NULL;
+}
+
+static inline struct net_device *ib_get_ndev_from_path(struct sa_path_rec *rec)
+{
+ return sa_path_get_ndev(rec) ?
+ dev_get_by_index(sa_path_get_ndev(rec),
+ sa_path_get_ifindex(rec))
+ : NULL;
+}
#endif /* IB_SA_H */
diff --git a/include/rdma/ib_umem.h b/include/rdma/ib_umem.h
index 2d83cfd7e6ce..23159dd5be18 100644
--- a/include/rdma/ib_umem.h
+++ b/include/rdma/ib_umem.h
@@ -44,7 +44,7 @@ struct ib_umem {
struct ib_ucontext *context;
size_t length;
unsigned long address;
- int page_size;
+ int page_shift;
int writable;
int hugetlb;
struct work_struct work;
@@ -60,7 +60,7 @@ struct ib_umem {
/* Returns the offset of the umem start relative to the first page. */
static inline int ib_umem_offset(struct ib_umem *umem)
{
- return umem->address & ((unsigned long)umem->page_size - 1);
+ return umem->address & (BIT(umem->page_shift) - 1);
}
/* Returns the first page of an ODP umem. */
@@ -72,12 +72,12 @@ static inline unsigned long ib_umem_start(struct ib_umem *umem)
/* Returns the address of the page after the last one of an ODP umem. */
static inline unsigned long ib_umem_end(struct ib_umem *umem)
{
- return PAGE_ALIGN(umem->address + umem->length);
+ return ALIGN(umem->address + umem->length, BIT(umem->page_shift));
}
static inline size_t ib_umem_num_pages(struct ib_umem *umem)
{
- return (ib_umem_end(umem) - ib_umem_start(umem)) >> PAGE_SHIFT;
+ return (ib_umem_end(umem) - ib_umem_start(umem)) >> umem->page_shift;
}
#ifdef CONFIG_INFINIBAND_USER_MEM
diff --git a/include/rdma/ib_umem_odp.h b/include/rdma/ib_umem_odp.h
index 542cd8b3414c..fb67554aabd6 100644
--- a/include/rdma/ib_umem_odp.h
+++ b/include/rdma/ib_umem_odp.h
@@ -84,7 +84,8 @@ struct ib_umem_odp {
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
-int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem);
+int ib_umem_odp_get(struct ib_ucontext *context, struct ib_umem *umem,
+ int access);
struct ib_umem *ib_alloc_odp_umem(struct ib_ucontext *context,
unsigned long addr,
size_t size);
@@ -154,7 +155,8 @@ static inline int ib_umem_mmu_notifier_retry(struct ib_umem *item,
#else /* CONFIG_INFINIBAND_ON_DEMAND_PAGING */
static inline int ib_umem_odp_get(struct ib_ucontext *context,
- struct ib_umem *umem)
+ struct ib_umem *umem,
+ int access)
{
return -EINVAL;
}
diff --git a/include/rdma/ib_verbs.h b/include/rdma/ib_verbs.h
index 99e4423eb2b8..f0cb4906478a 100644
--- a/include/rdma/ib_verbs.h
+++ b/include/rdma/ib_verbs.h
@@ -55,6 +55,7 @@
#include <net/ip.h>
#include <linux/string.h>
#include <linux/slab.h>
+#include <linux/netdevice.h>
#include <linux/if_link.h>
#include <linux/atomic.h>
@@ -224,6 +225,7 @@ enum ib_device_cap_flags {
IB_DEVICE_VIRTUAL_FUNCTION = (1ULL << 33),
/* Deprecated. Please use IB_RAW_PACKET_CAP_SCATTER_FCS. */
IB_DEVICE_RAW_SCATTER_FCS = (1ULL << 34),
+ IB_DEVICE_RDMA_NETDEV_OPA_VNIC = (1ULL << 35),
};
enum ib_signature_prot_cap {
@@ -431,7 +433,8 @@ enum ib_port_speed {
IB_SPEED_QDR = 4,
IB_SPEED_FDR10 = 8,
IB_SPEED_FDR = 16,
- IB_SPEED_EDR = 32
+ IB_SPEED_EDR = 32,
+ IB_SPEED_HDR = 64
};
/**
@@ -498,6 +501,7 @@ static inline struct rdma_hw_stats *rdma_alloc_hw_stats_struct(
/* Address format 0x000FF000 */
#define RDMA_CORE_CAP_AF_IB 0x00001000
#define RDMA_CORE_CAP_ETH_AH 0x00002000
+#define RDMA_CORE_CAP_OPA_AH 0x00004000
/* Protocol 0xFFF00000 */
#define RDMA_CORE_CAP_PROT_IB 0x00100000
@@ -836,15 +840,38 @@ struct ib_mr_status {
*/
__attribute_const__ enum ib_rate mult_to_ib_rate(int mult);
+enum rdma_ah_attr_type {
+ RDMA_AH_ATTR_TYPE_IB,
+ RDMA_AH_ATTR_TYPE_ROCE,
+ RDMA_AH_ATTR_TYPE_OPA,
+};
+
struct ib_ah_attr {
- struct ib_global_route grh;
u16 dlid;
- u8 sl;
u8 src_path_bits;
+};
+
+struct roce_ah_attr {
+ u8 dmac[ETH_ALEN];
+};
+
+struct opa_ah_attr {
+ u32 dlid;
+ u8 src_path_bits;
+};
+
+struct rdma_ah_attr {
+ struct ib_global_route grh;
+ u8 sl;
u8 static_rate;
- u8 ah_flags;
u8 port_num;
- u8 dmac[ETH_ALEN];
+ u8 ah_flags;
+ enum rdma_ah_attr_type type;
+ union {
+ struct ib_ah_attr ib;
+ struct roce_ah_attr roce;
+ struct opa_ah_attr opa;
+ };
};
enum ib_wc_status {
@@ -1163,8 +1190,8 @@ struct ib_qp_attr {
u32 dest_qp_num;
int qp_access_flags;
struct ib_qp_cap cap;
- struct ib_ah_attr ah_attr;
- struct ib_ah_attr alt_ah_attr;
+ struct rdma_ah_attr ah_attr;
+ struct rdma_ah_attr alt_ah_attr;
u16 pkey_index;
u16 alt_pkey_index;
u8 en_sqd_async_notify;
@@ -1336,6 +1363,7 @@ enum ib_access_flags {
IB_ACCESS_MW_BIND = (1<<4),
IB_ZERO_BASED = (1<<5),
IB_ACCESS_ON_DEMAND = (1<<6),
+ IB_ACCESS_HUGETLB = (1<<7),
};
/*
@@ -1357,6 +1385,17 @@ struct ib_fmr_attr {
struct ib_umem;
+enum rdma_remove_reason {
+ /* Userspace requested uobject deletion. Call could fail */
+ RDMA_REMOVE_DESTROY,
+ /* Context deletion. This call should delete the actual object itself */
+ RDMA_REMOVE_CLOSE,
+ /* Driver is being hot-unplugged. This call should delete the actual object itself */
+ RDMA_REMOVE_DRIVER_REMOVE,
+ /* Context is being cleaned-up, but commit was just completed */
+ RDMA_REMOVE_DURING_CLEANUP,
+};
+
struct ib_rdmacg_object {
#ifdef CONFIG_CGROUP_RDMA
struct rdma_cgroup *cg; /* owner rdma cgroup */
@@ -1365,19 +1404,16 @@ struct ib_rdmacg_object {
struct ib_ucontext {
struct ib_device *device;
- struct list_head pd_list;
- struct list_head mr_list;
- struct list_head mw_list;
- struct list_head cq_list;
- struct list_head qp_list;
- struct list_head srq_list;
- struct list_head ah_list;
- struct list_head xrcd_list;
- struct list_head rule_list;
- struct list_head wq_list;
- struct list_head rwq_ind_tbl_list;
+ struct ib_uverbs_file *ufile;
int closing;
+ /* locking the uobjects_list */
+ struct mutex uobjects_lock;
+ struct list_head uobjects;
+ /* protects cleanup process from other actions */
+ struct rw_semaphore cleanup_rwsem;
+ enum rdma_remove_reason cleanup_reason;
+
struct pid *tgid;
#ifdef CONFIG_INFINIBAND_ON_DEMAND_PAGING
struct rb_root umem_tree;
@@ -1407,9 +1443,16 @@ struct ib_uobject {
struct ib_rdmacg_object cg_obj; /* rdmacg object */
int id; /* index into kernel idr */
struct kref ref;
- struct rw_semaphore mutex; /* protects .live */
+ atomic_t usecnt; /* protects exclusive access */
struct rcu_head rcu; /* kfree_rcu() overhead */
- int live;
+
+ const struct uverbs_obj_type *type;
+};
+
+struct ib_uobject_file {
+ struct ib_uobject uobj;
+ /* ufile contains the lock between context release and file close */
+ struct ib_uverbs_file *ufile;
};
struct ib_udata {
@@ -1447,6 +1490,7 @@ struct ib_ah {
struct ib_device *device;
struct ib_pd *pd;
struct ib_uobject *uobject;
+ enum rdma_ah_attr_type type;
};
typedef void (*ib_comp_handler)(struct ib_cq *cq, void *cq_context);
@@ -1662,6 +1706,7 @@ enum ib_flow_spec_type {
IB_FLOW_SPEC_INNER = 0x100,
/* Actions */
IB_FLOW_SPEC_ACTION_TAG = 0x1000,
+ IB_FLOW_SPEC_ACTION_DROP = 0x1001,
};
#define IB_FLOW_SPEC_LAYER_MASK 0xF0
#define IB_FLOW_SPEC_SUPPORT_LAYERS 8
@@ -1790,6 +1835,11 @@ struct ib_flow_spec_action_tag {
u32 tag_id;
};
+struct ib_flow_spec_action_drop {
+ enum ib_flow_spec_type type;
+ u16 size;
+};
+
union ib_flow_spec {
struct {
u32 type;
@@ -1802,6 +1852,7 @@ union ib_flow_spec {
struct ib_flow_spec_ipv6 ipv6;
struct ib_flow_spec_tunnel tunnel;
struct ib_flow_spec_action_tag flow_tag;
+ struct ib_flow_spec_action_drop drop;
};
struct ib_flow_attr {
@@ -1862,6 +1913,34 @@ struct ib_port_immutable {
u32 max_mad_size;
};
+/* rdma netdev type - specifies protocol type */
+enum rdma_netdev_t {
+ RDMA_NETDEV_OPA_VNIC,
+ RDMA_NETDEV_IPOIB,
+};
+
+/**
+ * struct rdma_netdev - rdma netdev
+ * For cases where netstack interfacing is required.
+ */
+struct rdma_netdev {
+ void *clnt_priv;
+ struct ib_device *hca;
+ u8 port_num;
+
+ /* control functions */
+ void (*set_id)(struct net_device *netdev, int id);
+ /* send packet */
+ int (*send)(struct net_device *dev, struct sk_buff *skb,
+ struct ib_ah *address, u32 dqpn);
+ /* multicast */
+ int (*attach_mcast)(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *gid, u16 mlid,
+ int set_qkey, u32 qkey);
+ int (*detach_mcast)(struct net_device *dev, struct ib_device *hca,
+ union ib_gid *gid, u16 mlid);
+};
+
struct ib_device {
/* Do not access @dma_device directly from ULP nor from HW drivers. */
struct device *dma_device;
@@ -1977,12 +2056,12 @@ struct ib_device {
struct ib_udata *udata);
int (*dealloc_pd)(struct ib_pd *pd);
struct ib_ah * (*create_ah)(struct ib_pd *pd,
- struct ib_ah_attr *ah_attr,
+ struct rdma_ah_attr *ah_attr,
struct ib_udata *udata);
int (*modify_ah)(struct ib_ah *ah,
- struct ib_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr);
int (*query_ah)(struct ib_ah *ah,
- struct ib_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr);
int (*destroy_ah)(struct ib_ah *ah);
struct ib_srq * (*create_srq)(struct ib_pd *pd,
struct ib_srq_init_attr *srq_init_attr,
@@ -2115,6 +2194,20 @@ struct ib_device {
struct ib_rwq_ind_table_init_attr *init_attr,
struct ib_udata *udata);
int (*destroy_rwq_ind_table)(struct ib_rwq_ind_table *wq_ind_table);
+ /**
+ * rdma netdev operations
+ *
+ * Driver implementing alloc_rdma_netdev must return -EOPNOTSUPP if it
+ * doesn't support the specified rdma netdev type.
+ */
+ struct net_device *(*alloc_rdma_netdev)(
+ struct ib_device *device,
+ u8 port_num,
+ enum rdma_netdev_t type,
+ const char *name,
+ unsigned char name_assign_type,
+ void (*setup)(struct net_device *));
+ void (*free_rdma_netdev)(struct net_device *netdev);
struct module *owner;
struct device dev;
@@ -2537,6 +2630,21 @@ static inline bool rdma_cap_eth_ah(const struct ib_device *device, u8 port_num)
}
/**
+ * rdma_cap_opa_ah - Check if the port of device supports
+ * OPA Address handles
+ * @device: Device to check
+ * @port_num: Port number to check
+ *
+ * Return: true if we are running on an OPA device which supports
+ * the extended OPA addressing.
+ */
+static inline bool rdma_cap_opa_ah(struct ib_device *device, u8 port_num)
+{
+ return (device->port_immutable[port_num].core_cap_flags &
+ RDMA_CORE_CAP_OPA_AH) == RDMA_CORE_CAP_OPA_AH;
+}
+
+/**
* rdma_max_mad_size - Return the max MAD size required by this RDMA Port.
*
* @device: Device
@@ -2636,14 +2744,14 @@ struct ib_pd *__ib_alloc_pd(struct ib_device *device, unsigned int flags,
void ib_dealloc_pd(struct ib_pd *pd);
/**
- * ib_create_ah - Creates an address handle for the given address vector.
+ * rdma_create_ah - Creates an address handle for the given address vector.
* @pd: The protection domain associated with the address handle.
* @ah_attr: The attributes of the address vector.
*
* The address handle is used to reference a local or global destination
* in all UD QP post sends.
*/
-struct ib_ah *ib_create_ah(struct ib_pd *pd, struct ib_ah_attr *ah_attr);
+struct ib_ah *rdma_create_ah(struct ib_pd *pd, struct rdma_ah_attr *ah_attr);
/**
* ib_get_gids_from_rdma_hdr - Get sgid and dgid from GRH or IPv4 header
@@ -2676,7 +2784,7 @@ int ib_get_rdma_header_version(const union rdma_network_hdr *hdr);
*/
int ib_init_ah_from_wc(struct ib_device *device, u8 port_num,
const struct ib_wc *wc, const struct ib_grh *grh,
- struct ib_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr);
/**
* ib_create_ah_from_wc - Creates an address handle associated with the
@@ -2694,28 +2802,28 @@ struct ib_ah *ib_create_ah_from_wc(struct ib_pd *pd, const struct ib_wc *wc,
const struct ib_grh *grh, u8 port_num);
/**
- * ib_modify_ah - Modifies the address vector associated with an address
+ * rdma_modify_ah - Modifies the address vector associated with an address
* handle.
* @ah: The address handle to modify.
* @ah_attr: The new address vector attributes to associate with the
* address handle.
*/
-int ib_modify_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+int rdma_modify_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
/**
- * ib_query_ah - Queries the address vector associated with an address
+ * rdma_query_ah - Queries the address vector associated with an address
* handle.
* @ah: The address handle to query.
* @ah_attr: The address vector attributes associated with the address
* handle.
*/
-int ib_query_ah(struct ib_ah *ah, struct ib_ah_attr *ah_attr);
+int rdma_query_ah(struct ib_ah *ah, struct rdma_ah_attr *ah_attr);
/**
- * ib_destroy_ah - Destroys an address handle.
+ * rdma_destroy_ah - Destroys an address handle.
* @ah: The address handle to destroy.
*/
-int ib_destroy_ah(struct ib_ah *ah);
+int rdma_destroy_ah(struct ib_ah *ah);
/**
* ib_create_srq - Creates a SRQ associated with the specified protection
@@ -3380,5 +3488,156 @@ void ib_drain_sq(struct ib_qp *qp);
void ib_drain_qp(struct ib_qp *qp);
int ib_resolve_eth_dmac(struct ib_device *device,
- struct ib_ah_attr *ah_attr);
+ struct rdma_ah_attr *ah_attr);
+
+static inline u8 *rdma_ah_retrieve_dmac(struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_ROCE)
+ return attr->roce.dmac;
+ return NULL;
+}
+
+static inline void rdma_ah_set_dlid(struct rdma_ah_attr *attr, u32 dlid)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB)
+ attr->ib.dlid = (u16)dlid;
+ else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ attr->opa.dlid = dlid;
+}
+
+static inline u32 rdma_ah_get_dlid(const struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB)
+ return attr->ib.dlid;
+ else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ return attr->opa.dlid;
+ return 0;
+}
+
+static inline void rdma_ah_set_sl(struct rdma_ah_attr *attr, u8 sl)
+{
+ attr->sl = sl;
+}
+
+static inline u8 rdma_ah_get_sl(const struct rdma_ah_attr *attr)
+{
+ return attr->sl;
+}
+
+static inline void rdma_ah_set_path_bits(struct rdma_ah_attr *attr,
+ u8 src_path_bits)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB)
+ attr->ib.src_path_bits = src_path_bits;
+ else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ attr->opa.src_path_bits = src_path_bits;
+}
+
+static inline u8 rdma_ah_get_path_bits(const struct rdma_ah_attr *attr)
+{
+ if (attr->type == RDMA_AH_ATTR_TYPE_IB)
+ return attr->ib.src_path_bits;
+ else if (attr->type == RDMA_AH_ATTR_TYPE_OPA)
+ return attr->opa.src_path_bits;
+ return 0;
+}
+
+static inline void rdma_ah_set_port_num(struct rdma_ah_attr *attr, u8 port_num)
+{
+ attr->port_num = port_num;
+}
+
+static inline u8 rdma_ah_get_port_num(const struct rdma_ah_attr *attr)
+{
+ return attr->port_num;
+}
+
+static inline void rdma_ah_set_static_rate(struct rdma_ah_attr *attr,
+ u8 static_rate)
+{
+ attr->static_rate = static_rate;
+}
+
+static inline u8 rdma_ah_get_static_rate(const struct rdma_ah_attr *attr)
+{
+ return attr->static_rate;
+}
+
+static inline void rdma_ah_set_ah_flags(struct rdma_ah_attr *attr,
+ enum ib_ah_flags flag)
+{
+ attr->ah_flags = flag;
+}
+
+static inline enum ib_ah_flags
+ rdma_ah_get_ah_flags(const struct rdma_ah_attr *attr)
+{
+ return attr->ah_flags;
+}
+
+static inline const struct ib_global_route
+ *rdma_ah_read_grh(const struct rdma_ah_attr *attr)
+{
+ return &attr->grh;
+}
+
+/*To retrieve and modify the grh */
+static inline struct ib_global_route
+ *rdma_ah_retrieve_grh(struct rdma_ah_attr *attr)
+{
+ return &attr->grh;
+}
+
+static inline void rdma_ah_set_dgid_raw(struct rdma_ah_attr *attr, void *dgid)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ memcpy(grh->dgid.raw, dgid, sizeof(grh->dgid));
+}
+
+static inline void rdma_ah_set_subnet_prefix(struct rdma_ah_attr *attr,
+ __be64 prefix)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ grh->dgid.global.subnet_prefix = prefix;
+}
+
+static inline void rdma_ah_set_interface_id(struct rdma_ah_attr *attr,
+ __be64 if_id)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ grh->dgid.global.interface_id = if_id;
+}
+
+static inline void rdma_ah_set_grh(struct rdma_ah_attr *attr,
+ union ib_gid *dgid, u32 flow_label,
+ u8 sgid_index, u8 hop_limit,
+ u8 traffic_class)
+{
+ struct ib_global_route *grh = rdma_ah_retrieve_grh(attr);
+
+ attr->ah_flags = IB_AH_GRH;
+ if (dgid)
+ grh->dgid = *dgid;
+ grh->flow_label = flow_label;
+ grh->sgid_index = sgid_index;
+ grh->hop_limit = hop_limit;
+ grh->traffic_class = traffic_class;
+}
+
+/*Get AH type */
+static inline enum rdma_ah_attr_type rdma_ah_find_type(struct ib_device *dev,
+ u32 port_num)
+{
+ if ((rdma_protocol_roce(dev, port_num)) ||
+ (rdma_protocol_iwarp(dev, port_num)))
+ return RDMA_AH_ATTR_TYPE_ROCE;
+ else if ((rdma_protocol_ib(dev, port_num)) &&
+ (rdma_cap_opa_ah(dev, port_num)))
+ return RDMA_AH_ATTR_TYPE_OPA;
+ else
+ return RDMA_AH_ATTR_TYPE_IB;
+}
#endif /* IB_VERBS_H */
diff --git a/include/rdma/opa_addr.h b/include/rdma/opa_addr.h
new file mode 100644
index 000000000000..eace28f1555d
--- /dev/null
+++ b/include/rdma/opa_addr.h
@@ -0,0 +1,79 @@
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+#ifndef OPA_ADDR_H
+#define OPA_ADDR_H
+
+#define OPA_SPECIAL_OUI (0x00066AULL)
+#define OPA_MAKE_ID(x) (cpu_to_be64(OPA_SPECIAL_OUI << 40 | (x)))
+
+/**
+ * ib_is_opa_gid: Returns true if the top 24 bits of the gid
+ * contains the OPA_STL_OUI identifier. This identifies that
+ * the provided gid is a special purpose GID meant to carry
+ * extended LID information.
+ *
+ * @gid: The Global identifier
+ */
+static inline bool ib_is_opa_gid(union ib_gid *gid)
+{
+ return ((be64_to_cpu(gid->global.interface_id) >> 40) ==
+ OPA_SPECIAL_OUI);
+}
+
+/**
+ * opa_get_lid_from_gid: Returns the last 32 bits of the gid.
+ * OPA devices use one of the gids in the gid table to also
+ * store the lid.
+ *
+ * @gid: The Global identifier
+ */
+static inline u32 opa_get_lid_from_gid(union ib_gid *gid)
+{
+ return be64_to_cpu(gid->global.interface_id) & 0xFFFFFFFF;
+}
+#endif /* OPA_ADDR_H */
diff --git a/include/rdma/opa_port_info.h b/include/rdma/opa_port_info.h
index 9303e0e4f508..b4f0ac02f283 100644
--- a/include/rdma/opa_port_info.h
+++ b/include/rdma/opa_port_info.h
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2014 Intel Corporation. All rights reserved.
+ * Copyright (c) 2014-2017 Intel Corporation. All rights reserved.
*
* This software is available to you under a choice of one of two
* licenses. You may choose to be licensed under the terms of the GNU
@@ -127,6 +127,7 @@
#define OPA_LINK_WIDTH_3X 0x0004
#define OPA_LINK_WIDTH_4X 0x0008
+#define OPA_CAP_MASK3_IsEthOnFabricSupported (1 << 13)
#define OPA_CAP_MASK3_IsSnoopSupported (1 << 7)
#define OPA_CAP_MASK3_IsAsyncSC2VLSupported (1 << 6)
#define OPA_CAP_MASK3_IsAddrRangeConfigSupported (1 << 5)
diff --git a/include/rdma/opa_vnic.h b/include/rdma/opa_vnic.h
new file mode 100644
index 000000000000..39d6890616a6
--- /dev/null
+++ b/include/rdma/opa_vnic.h
@@ -0,0 +1,141 @@
+#ifndef _OPA_VNIC_H
+#define _OPA_VNIC_H
+/*
+ * Copyright(c) 2017 Intel Corporation.
+ *
+ * This file is provided under a dual BSD/GPLv2 license. When using or
+ * redistributing this file, you may do so under either license.
+ *
+ * GPL LICENSE SUMMARY
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of version 2 of the GNU General Public License as
+ * published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but
+ * WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * BSD LICENSE
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions
+ * are met:
+ *
+ * - Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * - Redistributions in binary form must reproduce the above copyright
+ * notice, this list of conditions and the following disclaimer in
+ * the documentation and/or other materials provided with the
+ * distribution.
+ * - Neither the name of Intel Corporation nor the names of its
+ * contributors may be used to endorse or promote products derived
+ * from this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ *
+ */
+
+/*
+ * This file contains Intel Omni-Path (OPA) Virtual Network Interface
+ * Controller (VNIC) specific declarations.
+ */
+
+#include <rdma/ib_verbs.h>
+
+/* VNIC uses 16B header format */
+#define OPA_VNIC_L2_TYPE 0x2
+
+/* 16 header bytes + 2 reserved bytes */
+#define OPA_VNIC_L2_HDR_LEN (16 + 2)
+
+#define OPA_VNIC_L4_HDR_LEN 2
+
+#define OPA_VNIC_HDR_LEN (OPA_VNIC_L2_HDR_LEN + \
+ OPA_VNIC_L4_HDR_LEN)
+
+#define OPA_VNIC_L4_ETHR 0x78
+
+#define OPA_VNIC_ICRC_LEN 4
+#define OPA_VNIC_TAIL_LEN 1
+#define OPA_VNIC_ICRC_TAIL_LEN (OPA_VNIC_ICRC_LEN + OPA_VNIC_TAIL_LEN)
+
+#define OPA_VNIC_SKB_MDATA_LEN 4
+#define OPA_VNIC_SKB_MDATA_ENCAP_ERR 0x1
+
+/* opa vnic rdma netdev's private data structure */
+struct opa_vnic_rdma_netdev {
+ struct rdma_netdev rn; /* keep this first */
+ /* followed by device private data */
+ char *dev_priv[0];
+};
+
+static inline void *opa_vnic_priv(const struct net_device *dev)
+{
+ struct rdma_netdev *rn = netdev_priv(dev);
+
+ return rn->clnt_priv;
+}
+
+static inline void *opa_vnic_dev_priv(const struct net_device *dev)
+{
+ struct opa_vnic_rdma_netdev *oparn = netdev_priv(dev);
+
+ return oparn->dev_priv;
+}
+
+/* opa_vnic skb meta data structrue */
+struct opa_vnic_skb_mdata {
+ u8 vl;
+ u8 entropy;
+ u8 flags;
+ u8 rsvd;
+} __packed;
+
+/* OPA VNIC group statistics */
+struct opa_vnic_grp_stats {
+ u64 unicast;
+ u64 mcastbcast;
+ u64 untagged;
+ u64 vlan;
+ u64 s_64;
+ u64 s_65_127;
+ u64 s_128_255;
+ u64 s_256_511;
+ u64 s_512_1023;
+ u64 s_1024_1518;
+ u64 s_1519_max;
+};
+
+struct opa_vnic_stats {
+ /* standard netdev statistics */
+ struct rtnl_link_stats64 netstats;
+
+ /* OPA VNIC statistics */
+ struct opa_vnic_grp_stats tx_grp;
+ struct opa_vnic_grp_stats rx_grp;
+ u64 tx_dlid_zero;
+ u64 tx_drop_state;
+ u64 rx_drop_state;
+ u64 rx_runt;
+ u64 rx_oversize;
+};
+
+static inline bool rdma_cap_opa_vnic(struct ib_device *device)
+{
+ return !!(device->attrs.device_cap_flags &
+ IB_DEVICE_RDMA_NETDEV_OPA_VNIC);
+}
+
+#endif /* _OPA_VNIC_H */
diff --git a/include/rdma/rdma_cm.h b/include/rdma/rdma_cm.h
index d3968b561f86..3d2eed3c4e75 100644
--- a/include/rdma/rdma_cm.h
+++ b/include/rdma/rdma_cm.h
@@ -85,7 +85,7 @@ struct rdma_addr {
struct rdma_route {
struct rdma_addr addr;
- struct ib_sa_path_rec *path_rec;
+ struct sa_path_rec *path_rec;
int num_paths;
};
@@ -106,7 +106,7 @@ struct rdma_conn_param {
struct rdma_ud_param {
const void *private_data;
u8 private_data_len;
- struct ib_ah_attr ah_attr;
+ struct rdma_ah_attr ah_attr;
u32 qp_num;
u32 qkey;
};
diff --git a/include/rdma/rdma_cm_ib.h b/include/rdma/rdma_cm_ib.h
index 2389c3b45404..6947a6ba2557 100644
--- a/include/rdma/rdma_cm_ib.h
+++ b/include/rdma/rdma_cm_ib.h
@@ -46,7 +46,7 @@
* connection and replaces the call to rdma_resolve_route.
*/
int rdma_set_ib_paths(struct rdma_cm_id *id,
- struct ib_sa_path_rec *path_rec, int num_paths);
+ struct sa_path_rec *path_rec, int num_paths);
/* Global qkey for UDP QPs and multicast groups. */
#define RDMA_UDP_QKEY 0x01234567
diff --git a/include/rdma/rdma_vt.h b/include/rdma/rdma_vt.h
index 8fc1ca7b6f23..4878aaf7bdff 100644
--- a/include/rdma/rdma_vt.h
+++ b/include/rdma/rdma_vt.h
@@ -170,7 +170,7 @@ struct rvt_pd {
/* Address handle */
struct rvt_ah {
struct ib_ah ibah;
- struct ib_ah_attr attr;
+ struct rdma_ah_attr attr;
atomic_t refcount;
u8 vl;
u8 log_pmtu;
@@ -311,10 +311,10 @@ struct rvt_driver_provided {
unsigned (*free_all_qps)(struct rvt_dev_info *rdi);
/* Driver specific AH validation */
- int (*check_ah)(struct ib_device *, struct ib_ah_attr *);
+ int (*check_ah)(struct ib_device *, struct rdma_ah_attr *);
/* Inform the driver a new AH has been created */
- void (*notify_new_ah)(struct ib_device *, struct ib_ah_attr *,
+ void (*notify_new_ah)(struct ib_device *, struct rdma_ah_attr *,
struct rvt_ah *);
/* Let the driver pick the next queue pair number*/
@@ -506,7 +506,7 @@ struct rvt_dev_info *rvt_alloc_device(size_t size, int nports);
void rvt_dealloc_device(struct rvt_dev_info *rdi);
int rvt_register_device(struct rvt_dev_info *rvd);
void rvt_unregister_device(struct rvt_dev_info *rvd);
-int rvt_check_ah(struct ib_device *ibdev, struct ib_ah_attr *ah_attr);
+int rvt_check_ah(struct ib_device *ibdev, struct rdma_ah_attr *ah_attr);
int rvt_init_port(struct rvt_dev_info *rdi, struct rvt_ibport *port,
int port_index, u16 *pkey_table);
int rvt_fast_reg_mr(struct rvt_qp *qp, struct ib_mr *ibmr, u32 key,
@@ -516,6 +516,7 @@ int rvt_rkey_ok(struct rvt_qp *qp, struct rvt_sge *sge,
u32 len, u64 vaddr, u32 rkey, int acc);
int rvt_lkey_ok(struct rvt_lkey_table *rkt, struct rvt_pd *pd,
struct rvt_sge *isge, struct ib_sge *sge, int acc);
-struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid);
+struct rvt_mcast *rvt_mcast_find(struct rvt_ibport *ibp, union ib_gid *mgid,
+ u16 lid);
#endif /* DEF_RDMA_VT_H */
diff --git a/include/rdma/rdmavt_qp.h b/include/rdma/rdmavt_qp.h
index f3816396c76a..be6472e5b06b 100644
--- a/include/rdma/rdmavt_qp.h
+++ b/include/rdma/rdmavt_qp.h
@@ -2,7 +2,7 @@
#define DEF_RDMAVT_INCQP_H
/*
- * Copyright(c) 2016 Intel Corporation.
+ * Copyright(c) 2016, 2017 Intel Corporation.
*
* This file is provided under a dual BSD/GPLv2 license. When using or
* redistributing this file, you may do so under either license.
@@ -269,8 +269,8 @@ struct rvt_qp {
struct ib_qp ibqp;
void *priv; /* Driver private data */
/* read mostly fields above and below */
- struct ib_ah_attr remote_ah_attr;
- struct ib_ah_attr alt_ah_attr;
+ struct rdma_ah_attr remote_ah_attr;
+ struct rdma_ah_attr alt_ah_attr;
struct rvt_qp __rcu *next; /* link list for QPN hash table */
struct rvt_swqe *s_wq; /* send work queue */
struct rvt_mmap_info *ip;
@@ -324,6 +324,7 @@ struct rvt_qp {
u8 r_state; /* opcode of last packet received */
u8 r_flags;
u8 r_head_ack_queue; /* index into s_ack_queue[] */
+ u8 r_adefered; /* defered ack count */
struct list_head rspwait; /* link for waiting to respond */
@@ -435,9 +436,14 @@ struct rvt_mcast_qp {
struct rvt_qp *qp;
};
+struct rvt_mcast_addr {
+ union ib_gid mgid;
+ u16 lid;
+};
+
struct rvt_mcast {
struct rb_node rb_node;
- union ib_gid mgid;
+ struct rvt_mcast_addr mcast_addr;
struct list_head qp_list;
wait_queue_head_t wait;
atomic_t refcount;
@@ -526,7 +532,6 @@ static inline void rvt_qp_wqe_reserve(
struct rvt_qp *qp,
struct rvt_swqe *wqe)
{
- wqe->wr.send_flags |= RVT_SEND_RESERVE_USED;
atomic_inc(&qp->s_reserved_used);
}
@@ -550,7 +555,6 @@ static inline void rvt_qp_wqe_unreserve(
struct rvt_swqe *wqe)
{
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED)) {
- wqe->wr.send_flags &= ~RVT_SEND_RESERVE_USED;
atomic_dec(&qp->s_reserved_used);
/* insure no compiler re-order up to s_last change */
smp_mb__after_atomic();
@@ -574,6 +578,7 @@ extern const enum ib_wc_opcode ib_rvt_wc_opcode[];
static inline void rvt_qp_swqe_complete(
struct rvt_qp *qp,
struct rvt_swqe *wqe,
+ enum ib_wc_opcode opcode,
enum ib_wc_status status)
{
if (unlikely(wqe->wr.send_flags & RVT_SEND_RESERVE_USED))
@@ -586,7 +591,7 @@ static inline void rvt_qp_swqe_complete(
memset(&wc, 0, sizeof(wc));
wc.wr_id = wqe->wr.wr_id;
wc.status = status;
- wc.opcode = ib_rvt_wc_opcode[wqe->wr.opcode];
+ wc.opcode = opcode;
wc.qp = &qp->ibqp;
wc.byte_len = wqe->length;
rvt_cq_enter(ibcq_to_rvtcq(qp->ibqp.send_cq), &wc,
diff --git a/include/rdma/uverbs_std_types.h b/include/rdma/uverbs_std_types.h
new file mode 100644
index 000000000000..7771ce966952
--- /dev/null
+++ b/include/rdma/uverbs_std_types.h
@@ -0,0 +1,114 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_STD_TYPES__
+#define _UVERBS_STD_TYPES__
+
+#include <rdma/uverbs_types.h>
+
+extern const struct uverbs_obj_fd_type uverbs_type_attrs_comp_channel;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_cq;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_qp;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_rwq_ind_table;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_wq;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_srq;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_ah;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_flow;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_mr;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_mw;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_pd;
+extern const struct uverbs_obj_idr_type uverbs_type_attrs_xrcd;
+
+static inline struct ib_uobject *__uobj_get(const struct uverbs_obj_type *type,
+ bool write,
+ struct ib_ucontext *ucontext,
+ int id)
+{
+ return rdma_lookup_get_uobject(type, ucontext, id, write);
+}
+
+#define uobj_get_type(_type) uverbs_type_attrs_##_type.type
+
+#define uobj_get_read(_type, _id, _ucontext) \
+ __uobj_get(&(_type), false, _ucontext, _id)
+
+#define uobj_get_obj_read(_type, _id, _ucontext) \
+({ \
+ struct ib_uobject *uobj = \
+ __uobj_get(&uobj_get_type(_type), \
+ false, _ucontext, _id); \
+ \
+ (struct ib_##_type *)(IS_ERR(uobj) ? NULL : uobj->object); \
+})
+
+#define uobj_get_write(_type, _id, _ucontext) \
+ __uobj_get(&(_type), true, _ucontext, _id)
+
+static inline void uobj_put_read(struct ib_uobject *uobj)
+{
+ rdma_lookup_put_uobject(uobj, false);
+}
+
+#define uobj_put_obj_read(_obj) \
+ uobj_put_read((_obj)->uobject)
+
+static inline void uobj_put_write(struct ib_uobject *uobj)
+{
+ rdma_lookup_put_uobject(uobj, true);
+}
+
+static inline int __must_check uobj_remove_commit(struct ib_uobject *uobj)
+{
+ return rdma_remove_commit_uobject(uobj);
+}
+
+static inline void uobj_alloc_commit(struct ib_uobject *uobj)
+{
+ rdma_alloc_commit_uobject(uobj);
+}
+
+static inline void uobj_alloc_abort(struct ib_uobject *uobj)
+{
+ rdma_alloc_abort_uobject(uobj);
+}
+
+static inline struct ib_uobject *__uobj_alloc(const struct uverbs_obj_type *type,
+ struct ib_ucontext *ucontext)
+{
+ return rdma_alloc_begin_uobject(type, ucontext);
+}
+
+#define uobj_alloc(_type, ucontext) \
+ __uobj_alloc(&(_type), ucontext)
+
+#endif
+
diff --git a/include/rdma/uverbs_types.h b/include/rdma/uverbs_types.h
new file mode 100644
index 000000000000..351ea185df44
--- /dev/null
+++ b/include/rdma/uverbs_types.h
@@ -0,0 +1,172 @@
+/*
+ * Copyright (c) 2017, Mellanox Technologies inc. All rights reserved.
+ *
+ * This software is available to you under a choice of one of two
+ * licenses. You may choose to be licensed under the terms of the GNU
+ * General Public License (GPL) Version 2, available from the file
+ * COPYING in the main directory of this source tree, or the
+ * OpenIB.org BSD license below:
+ *
+ * Redistribution and use in source and binary forms, with or
+ * without modification, are permitted provided that the following
+ * conditions are met:
+ *
+ * - Redistributions of source code must retain the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer.
+ *
+ * - Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following
+ * disclaimer in the documentation and/or other materials
+ * provided with the distribution.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+ * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+ * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
+ * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
+ * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
+ * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+ * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+ * SOFTWARE.
+ */
+
+#ifndef _UVERBS_TYPES_
+#define _UVERBS_TYPES_
+
+#include <linux/kernel.h>
+#include <rdma/ib_verbs.h>
+
+struct uverbs_obj_type;
+
+struct uverbs_obj_type_class {
+ /*
+ * Get an ib_uobject that corresponds to the given id from ucontext,
+ * These functions could create or destroy objects if required.
+ * The action will be finalized only when commit, abort or put fops are
+ * called.
+ * The flow of the different actions is:
+ * [alloc]: Starts with alloc_begin. The handlers logic is than
+ * executed. If the handler is successful, alloc_commit
+ * is called and the object is inserted to the repository.
+ * Once alloc_commit completes the object is visible to
+ * other threads and userspace.
+ e Otherwise, alloc_abort is called and the object is
+ * destroyed.
+ * [lookup]: Starts with lookup_get which fetches and locks the
+ * object. After the handler finished using the object, it
+ * needs to call lookup_put to unlock it. The exclusive
+ * flag indicates if the object is locked for exclusive
+ * access.
+ * [remove]: Starts with lookup_get with exclusive flag set. This
+ * locks the object for exclusive access. If the handler
+ * code completed successfully, remove_commit is called
+ * and the ib_uobject is removed from the context's
+ * uobjects repository and put. The object itself is
+ * destroyed as well. Once remove succeeds new krefs to
+ * the object cannot be acquired by other threads or
+ * userspace and the hardware driver is removed from the
+ * object. Other krefs on the object may still exist.
+ * If the handler code failed, lookup_put should be
+ * called. This callback is used when the context
+ * is destroyed as well (process termination,
+ * reset flow).
+ */
+ struct ib_uobject *(*alloc_begin)(const struct uverbs_obj_type *type,
+ struct ib_ucontext *ucontext);
+ void (*alloc_commit)(struct ib_uobject *uobj);
+ void (*alloc_abort)(struct ib_uobject *uobj);
+
+ struct ib_uobject *(*lookup_get)(const struct uverbs_obj_type *type,
+ struct ib_ucontext *ucontext, int id,
+ bool exclusive);
+ void (*lookup_put)(struct ib_uobject *uobj, bool exclusive);
+ /*
+ * Must be called with the exclusive lock held. If successful uobj is
+ * invalid on return. On failure uobject is left completely
+ * unchanged
+ */
+ int __must_check (*remove_commit)(struct ib_uobject *uobj,
+ enum rdma_remove_reason why);
+ u8 needs_kfree_rcu;
+};
+
+struct uverbs_obj_type {
+ const struct uverbs_obj_type_class * const type_class;
+ size_t obj_size;
+ unsigned int destroy_order;
+};
+
+/*
+ * Objects type classes which support a detach state (object is still alive but
+ * it's not attached to any context need to make sure:
+ * (a) no call through to a driver after a detach is called
+ * (b) detach isn't called concurrently with context_cleanup
+ */
+
+struct uverbs_obj_idr_type {
+ /*
+ * In idr based objects, uverbs_obj_type_class points to a generic
+ * idr operations. In order to specialize the underlying types (e.g. CQ,
+ * QPs, etc.), we add destroy_object specific callbacks.
+ */
+ struct uverbs_obj_type type;
+
+ /* Free driver resources from the uobject, make the driver uncallable,
+ * and move the uobject to the detached state. If the object was
+ * destroyed by the user's request, a failure should leave the uobject
+ * completely unchanged.
+ */
+ int __must_check (*destroy_object)(struct ib_uobject *uobj,
+ enum rdma_remove_reason why);
+};
+
+struct ib_uobject *rdma_lookup_get_uobject(const struct uverbs_obj_type *type,
+ struct ib_ucontext *ucontext,
+ int id, bool exclusive);
+void rdma_lookup_put_uobject(struct ib_uobject *uobj, bool exclusive);
+struct ib_uobject *rdma_alloc_begin_uobject(const struct uverbs_obj_type *type,
+ struct ib_ucontext *ucontext);
+void rdma_alloc_abort_uobject(struct ib_uobject *uobj);
+int __must_check rdma_remove_commit_uobject(struct ib_uobject *uobj);
+int rdma_alloc_commit_uobject(struct ib_uobject *uobj);
+
+struct uverbs_obj_fd_type {
+ /*
+ * In fd based objects, uverbs_obj_type_ops points to generic
+ * fd operations. In order to specialize the underlying types (e.g.
+ * completion_channel), we use fops, name and flags for fd creation.
+ * context_closed is called when the context is closed either when
+ * the driver is removed or the process terminated.
+ */
+ struct uverbs_obj_type type;
+ int (*context_closed)(struct ib_uobject_file *uobj_file,
+ enum rdma_remove_reason why);
+ const struct file_operations *fops;
+ const char *name;
+ int flags;
+};
+
+extern const struct uverbs_obj_type_class uverbs_idr_class;
+extern const struct uverbs_obj_type_class uverbs_fd_class;
+
+#define UVERBS_BUILD_BUG_ON(cond) (sizeof(char[1 - 2 * !!(cond)]) - \
+ sizeof(char))
+#define UVERBS_TYPE_ALLOC_FD(_size, _order) \
+ { \
+ .destroy_order = _order, \
+ .type_class = &uverbs_fd_class, \
+ .obj_size = (_size) + \
+ UVERBS_BUILD_BUG_ON((_size) < \
+ sizeof(struct ib_uobject_file)),\
+ }
+#define UVERBS_TYPE_ALLOC_IDR_SZ(_size, _order) \
+ { \
+ .destroy_order = _order, \
+ .type_class = &uverbs_idr_class, \
+ .obj_size = (_size) + \
+ UVERBS_BUILD_BUG_ON((_size) < \
+ sizeof(struct ib_uobject)), \
+ }
+#define UVERBS_TYPE_ALLOC_IDR(_order) \
+ UVERBS_TYPE_ALLOC_IDR_SZ(sizeof(struct ib_uobject), _order)
+#endif
diff --git a/include/scsi/libfc.h b/include/scsi/libfc.h
index da5033dd8cbc..2109844be53d 100644
--- a/include/scsi/libfc.h
+++ b/include/scsi/libfc.h
@@ -23,6 +23,7 @@
#include <linux/timer.h>
#include <linux/if.h>
#include <linux/percpu.h>
+#include <linux/refcount.h>
#include <scsi/scsi_transport.h>
#include <scsi/scsi_transport_fc.h>
@@ -321,7 +322,7 @@ struct fc_seq_els_data {
*/
struct fc_fcp_pkt {
spinlock_t scsi_pkt_lock;
- atomic_t ref_cnt;
+ refcount_t ref_cnt;
/* SCSI command and data transfer information */
u32 data_len;
diff --git a/include/scsi/libiscsi.h b/include/scsi/libiscsi.h
index 583875ea136a..c9bd935f4fd1 100644
--- a/include/scsi/libiscsi.h
+++ b/include/scsi/libiscsi.h
@@ -29,6 +29,7 @@
#include <linux/timer.h>
#include <linux/workqueue.h>
#include <linux/kfifo.h>
+#include <linux/refcount.h>
#include <scsi/iscsi_proto.h>
#include <scsi/iscsi_if.h>
#include <scsi/scsi_transport_iscsi.h>
@@ -139,7 +140,7 @@ struct iscsi_task {
/* state set/tested under session->lock */
int state;
- atomic_t refcount;
+ refcount_t refcount;
struct list_head running; /* running cmd list */
void *dd_data; /* driver/transport data */
};
diff --git a/include/scsi/libsas.h b/include/scsi/libsas.h
index dae99d7d2bc0..dd0f72c95abe 100644
--- a/include/scsi/libsas.h
+++ b/include/scsi/libsas.h
@@ -693,7 +693,6 @@ extern int sas_bios_param(struct scsi_device *,
sector_t capacity, int *hsc);
extern struct scsi_transport_template *
sas_domain_attach_transport(struct sas_domain_function_template *);
-extern void sas_domain_release_transport(struct scsi_transport_template *);
int sas_discover_root_expander(struct domain_device *);
diff --git a/include/scsi/scsi_device.h b/include/scsi/scsi_device.h
index 080c7ce9bae8..05641aebd181 100644
--- a/include/scsi/scsi_device.h
+++ b/include/scsi/scsi_device.h
@@ -316,7 +316,7 @@ extern int scsi_unregister_device_handler(struct scsi_device_handler *scsi_dh);
void scsi_attach_vpd(struct scsi_device *sdev);
extern struct scsi_device *scsi_device_from_queue(struct request_queue *q);
-extern int scsi_device_get(struct scsi_device *);
+extern int __must_check scsi_device_get(struct scsi_device *);
extern void scsi_device_put(struct scsi_device *);
extern struct scsi_device *scsi_device_lookup(struct Scsi_Host *,
uint, uint, u64);
diff --git a/include/scsi/scsi_driver.h b/include/scsi/scsi_driver.h
index 891a658aa867..a5534ccad859 100644
--- a/include/scsi/scsi_driver.h
+++ b/include/scsi/scsi_driver.h
@@ -16,6 +16,7 @@ struct scsi_driver {
void (*uninit_command)(struct scsi_cmnd *);
int (*done)(struct scsi_cmnd *);
int (*eh_action)(struct scsi_cmnd *, int);
+ void (*eh_reset)(struct scsi_cmnd *);
};
#define to_scsi_driver(drv) \
container_of((drv), struct scsi_driver, gendrv)
diff --git a/include/scsi/scsi_eh.h b/include/scsi/scsi_eh.h
index 98d366b55770..64d30d80dadb 100644
--- a/include/scsi/scsi_eh.h
+++ b/include/scsi/scsi_eh.h
@@ -23,14 +23,15 @@ static inline bool scsi_sense_is_deferred(const struct scsi_sense_hdr *sshdr)
return ((sshdr->response_code >= 0x70) && (sshdr->response_code & 1));
}
-extern int scsi_get_sense_info_fld(const u8 * sense_buffer, int sb_len,
- u64 * info_out);
+extern bool scsi_get_sense_info_fld(const u8 *sense_buffer, int sb_len,
+ u64 *info_out);
extern int scsi_ioctl_reset(struct scsi_device *, int __user *);
struct scsi_eh_save {
/* saved state */
int result;
+ int eh_eflags;
enum dma_data_direction data_direction;
unsigned underflow;
unsigned char cmd_len;
diff --git a/include/scsi/scsi_host.h b/include/scsi/scsi_host.h
index 3cd8c3bec638..afb04811b7b9 100644
--- a/include/scsi/scsi_host.h
+++ b/include/scsi/scsi_host.h
@@ -452,11 +452,6 @@ struct scsi_host_template {
unsigned no_write_same:1;
/*
- * True if asynchronous aborts are not supported
- */
- unsigned no_async_abort:1;
-
- /*
* Countdown for host blocking with no commands outstanding.
*/
unsigned int max_host_blocked;
diff --git a/include/scsi/scsi_request.h b/include/scsi/scsi_request.h
index ba0aeb980f7e..f0c76f9dc285 100644
--- a/include/scsi/scsi_request.h
+++ b/include/scsi/scsi_request.h
@@ -9,8 +9,10 @@ struct scsi_request {
unsigned char __cmd[BLK_MAX_CDB];
unsigned char *cmd;
unsigned short cmd_len;
+ int result;
unsigned int sense_len;
unsigned int resid_len; /* residual count */
+ int retries;
void *sense;
};
diff --git a/include/scsi/scsi_transport_fc.h b/include/scsi/scsi_transport_fc.h
index b21b8aa58c4d..6e208bb32c78 100644
--- a/include/scsi/scsi_transport_fc.h
+++ b/include/scsi/scsi_transport_fc.h
@@ -162,6 +162,7 @@ enum fc_tgtid_binding_type {
#define FC_PORT_ROLE_FCP_TARGET 0x01
#define FC_PORT_ROLE_FCP_INITIATOR 0x02
#define FC_PORT_ROLE_IP_PORT 0x04
+#define FC_PORT_ROLE_FCP_DUMMY_INITIATOR 0x08
/* The following are for compatibility */
#define FC_RPORT_ROLE_UNKNOWN FC_PORT_ROLE_UNKNOWN
diff --git a/include/scsi/sg.h b/include/scsi/sg.h
index 3afec7032448..20bc71c3e0b8 100644
--- a/include/scsi/sg.h
+++ b/include/scsi/sg.h
@@ -197,7 +197,6 @@ typedef struct sg_req_info { /* used by SG_GET_REQUEST_TABLE ioctl() */
#define SG_DEFAULT_RETRIES 0
/* Defaults, commented if they differ from original sg driver */
-#define SG_DEF_FORCE_LOW_DMA 0 /* was 1 -> memory below 16MB on i386 */
#define SG_DEF_FORCE_PACK_ID 0
#define SG_DEF_KEEP_ORPHAN 0
#define SG_DEF_RESERVED_SIZE SG_SCATTER_SZ /* load time option */
diff --git a/include/soc/fsl/qman.h b/include/soc/fsl/qman.h
index 3d4df74a96de..d4dfefdee6c1 100644
--- a/include/soc/fsl/qman.h
+++ b/include/soc/fsl/qman.h
@@ -36,8 +36,11 @@
/* Hardware constants */
#define QM_CHANNEL_SWPORTAL0 0
#define QMAN_CHANNEL_POOL1 0x21
+#define QMAN_CHANNEL_CAAM 0x80
#define QMAN_CHANNEL_POOL1_REV3 0x401
+#define QMAN_CHANNEL_CAAM_REV3 0x840
extern u16 qm_channel_pool1;
+extern u16 qm_channel_caam;
/* Portal processing (interrupt) sources */
#define QM_PIRQ_CSCI 0x00100000 /* Congestion State Change */
@@ -165,6 +168,7 @@ static inline void qm_fd_set_param(struct qm_fd *fd, enum qm_fd_format fmt,
#define qm_fd_set_contig_big(fd, len) \
qm_fd_set_param(fd, qm_fd_contig_big, 0, len)
#define qm_fd_set_sg_big(fd, len) qm_fd_set_param(fd, qm_fd_sg_big, 0, len)
+#define qm_fd_set_compound(fd, len) qm_fd_set_param(fd, qm_fd_compound, 0, len)
static inline void qm_fd_clear_fd(struct qm_fd *fd)
{
@@ -639,6 +643,7 @@ struct qm_mcc_initcgr {
#define QM_CGR_WE_MODE 0x0001
#define QMAN_CGR_FLAG_USE_INIT 0x00000001
+#define QMAN_CGR_MODE_FRAME 0x00000001
/* Portal and Frame Queues */
/* Represents a managed portal */
@@ -791,6 +796,84 @@ struct qman_cgr {
#define QMAN_INITFQ_FLAG_SCHED 0x00000001 /* schedule rather than park */
#define QMAN_INITFQ_FLAG_LOCAL 0x00000004 /* set dest portal */
+/*
+ * For qman_volatile_dequeue(); Choose one PRECEDENCE. EXACT is optional. Use
+ * NUMFRAMES(n) (6-bit) or NUMFRAMES_TILLEMPTY to fill in the frame-count. Use
+ * FQID(n) to fill in the frame queue ID.
+ */
+#define QM_VDQCR_PRECEDENCE_VDQCR 0x0
+#define QM_VDQCR_PRECEDENCE_SDQCR 0x80000000
+#define QM_VDQCR_EXACT 0x40000000
+#define QM_VDQCR_NUMFRAMES_MASK 0x3f000000
+#define QM_VDQCR_NUMFRAMES_SET(n) (((n) & 0x3f) << 24)
+#define QM_VDQCR_NUMFRAMES_GET(n) (((n) >> 24) & 0x3f)
+#define QM_VDQCR_NUMFRAMES_TILLEMPTY QM_VDQCR_NUMFRAMES_SET(0)
+
+#define QMAN_VOLATILE_FLAG_WAIT 0x00000001 /* wait if VDQCR is in use */
+#define QMAN_VOLATILE_FLAG_WAIT_INT 0x00000002 /* if wait, interruptible? */
+#define QMAN_VOLATILE_FLAG_FINISH 0x00000004 /* wait till VDQCR completes */
+
+/* "Query FQ Non-Programmable Fields" */
+struct qm_mcr_queryfq_np {
+ u8 verb;
+ u8 result;
+ u8 __reserved1;
+ u8 state; /* QM_MCR_NP_STATE_*** */
+ u32 fqd_link; /* 24-bit, _res2[24-31] */
+ u16 odp_seq; /* 14-bit, _res3[14-15] */
+ u16 orp_nesn; /* 14-bit, _res4[14-15] */
+ u16 orp_ea_hseq; /* 15-bit, _res5[15] */
+ u16 orp_ea_tseq; /* 15-bit, _res6[15] */
+ u32 orp_ea_hptr; /* 24-bit, _res7[24-31] */
+ u32 orp_ea_tptr; /* 24-bit, _res8[24-31] */
+ u32 pfdr_hptr; /* 24-bit, _res9[24-31] */
+ u32 pfdr_tptr; /* 24-bit, _res10[24-31] */
+ u8 __reserved2[5];
+ u8 is; /* 1-bit, _res12[1-7] */
+ u16 ics_surp;
+ u32 byte_cnt;
+ u32 frm_cnt; /* 24-bit, _res13[24-31] */
+ u32 __reserved3;
+ u16 ra1_sfdr; /* QM_MCR_NP_RA1_*** */
+ u16 ra2_sfdr; /* QM_MCR_NP_RA2_*** */
+ u16 __reserved4;
+ u16 od1_sfdr; /* QM_MCR_NP_OD1_*** */
+ u16 od2_sfdr; /* QM_MCR_NP_OD2_*** */
+ u16 od3_sfdr; /* QM_MCR_NP_OD3_*** */
+} __packed;
+
+#define QM_MCR_NP_STATE_FE 0x10
+#define QM_MCR_NP_STATE_R 0x08
+#define QM_MCR_NP_STATE_MASK 0x07 /* Reads FQD::STATE; */
+#define QM_MCR_NP_STATE_OOS 0x00
+#define QM_MCR_NP_STATE_RETIRED 0x01
+#define QM_MCR_NP_STATE_TEN_SCHED 0x02
+#define QM_MCR_NP_STATE_TRU_SCHED 0x03
+#define QM_MCR_NP_STATE_PARKED 0x04
+#define QM_MCR_NP_STATE_ACTIVE 0x05
+#define QM_MCR_NP_PTR_MASK 0x07ff /* for RA[12] & OD[123] */
+#define QM_MCR_NP_RA1_NRA(v) (((v) >> 14) & 0x3) /* FQD::NRA */
+#define QM_MCR_NP_RA2_IT(v) (((v) >> 14) & 0x1) /* FQD::IT */
+#define QM_MCR_NP_OD1_NOD(v) (((v) >> 14) & 0x3) /* FQD::NOD */
+#define QM_MCR_NP_OD3_NPC(v) (((v) >> 14) & 0x3) /* FQD::NPC */
+
+enum qm_mcr_queryfq_np_masks {
+ qm_mcr_fqd_link_mask = BIT(24) - 1,
+ qm_mcr_odp_seq_mask = BIT(14) - 1,
+ qm_mcr_orp_nesn_mask = BIT(14) - 1,
+ qm_mcr_orp_ea_hseq_mask = BIT(15) - 1,
+ qm_mcr_orp_ea_tseq_mask = BIT(15) - 1,
+ qm_mcr_orp_ea_hptr_mask = BIT(24) - 1,
+ qm_mcr_orp_ea_tptr_mask = BIT(24) - 1,
+ qm_mcr_pfdr_hptr_mask = BIT(24) - 1,
+ qm_mcr_pfdr_tptr_mask = BIT(24) - 1,
+ qm_mcr_is_mask = BIT(1) - 1,
+ qm_mcr_frm_cnt_mask = BIT(24) - 1,
+};
+
+#define qm_mcr_np_get(np, field) \
+ ((np)->field & (qm_mcr_##field##_mask))
+
/* Portal Management */
/**
* qman_p_irqsource_add - add processing sources to be interrupt-driven
@@ -963,6 +1046,25 @@ int qman_retire_fq(struct qman_fq *fq, u32 *flags);
*/
int qman_oos_fq(struct qman_fq *fq);
+/*
+ * qman_volatile_dequeue - Issue a volatile dequeue command
+ * @fq: the frame queue object to dequeue from
+ * @flags: a bit-mask of QMAN_VOLATILE_FLAG_*** options
+ * @vdqcr: bit mask of QM_VDQCR_*** options, as per qm_dqrr_vdqcr_set()
+ *
+ * Attempts to lock access to the portal's VDQCR volatile dequeue functionality.
+ * The function will block and sleep if QMAN_VOLATILE_FLAG_WAIT is specified and
+ * the VDQCR is already in use, otherwise returns non-zero for failure. If
+ * QMAN_VOLATILE_FLAG_FINISH is specified, the function will only return once
+ * the VDQCR command has finished executing (ie. once the callback for the last
+ * DQRR entry resulting from the VDQCR command has been called). If not using
+ * the FINISH flag, completion can be determined either by detecting the
+ * presence of the QM_DQRR_STAT_UNSCHEDULED and QM_DQRR_STAT_DQCR_EXPIRED bits
+ * in the "stat" parameter passed to the FQ's dequeue callback, or by waiting
+ * for the QMAN_FQ_STATE_VDQCR bit to disappear.
+ */
+int qman_volatile_dequeue(struct qman_fq *fq, u32 flags, u32 vdqcr);
+
/**
* qman_enqueue - Enqueue a frame to a frame queue
* @fq: the frame queue object to enqueue to
@@ -994,6 +1096,13 @@ int qman_alloc_fqid_range(u32 *result, u32 count);
*/
int qman_release_fqid(u32 fqid);
+/**
+ * qman_query_fq_np - Queries non-programmable FQD fields
+ * @fq: the frame queue object to be queried
+ * @np: storage for the queried FQD fields
+ */
+int qman_query_fq_np(struct qman_fq *fq, struct qm_mcr_queryfq_np *np);
+
/* Pool-channel management */
/**
* qman_alloc_pool_range - Allocate a contiguous range of pool-channel IDs
diff --git a/include/soc/tegra/flowctrl.h b/include/soc/tegra/flowctrl.h
new file mode 100644
index 000000000000..8f86aea4024b
--- /dev/null
+++ b/include/soc/tegra/flowctrl.h
@@ -0,0 +1,82 @@
+/*
+ * Functions and macros to control the flowcontroller
+ *
+ * Copyright (c) 2010-2012, NVIDIA Corporation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License
+ * along with this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __SOC_TEGRA_FLOWCTRL_H__
+#define __SOC_TEGRA_FLOWCTRL_H__
+
+#define FLOW_CTRL_HALT_CPU0_EVENTS 0x0
+#define FLOW_CTRL_WAITEVENT (2 << 29)
+#define FLOW_CTRL_WAIT_FOR_INTERRUPT (4 << 29)
+#define FLOW_CTRL_JTAG_RESUME (1 << 28)
+#define FLOW_CTRL_SCLK_RESUME (1 << 27)
+#define FLOW_CTRL_HALT_CPU_IRQ (1 << 10)
+#define FLOW_CTRL_HALT_CPU_FIQ (1 << 8)
+#define FLOW_CTRL_HALT_LIC_IRQ (1 << 11)
+#define FLOW_CTRL_HALT_LIC_FIQ (1 << 10)
+#define FLOW_CTRL_HALT_GIC_IRQ (1 << 9)
+#define FLOW_CTRL_HALT_GIC_FIQ (1 << 8)
+#define FLOW_CTRL_CPU0_CSR 0x8
+#define FLOW_CTRL_CSR_INTR_FLAG (1 << 15)
+#define FLOW_CTRL_CSR_EVENT_FLAG (1 << 14)
+#define FLOW_CTRL_CSR_ENABLE_EXT_CRAIL (1 << 13)
+#define FLOW_CTRL_CSR_ENABLE_EXT_NCPU (1 << 12)
+#define FLOW_CTRL_CSR_ENABLE_EXT_MASK ( \
+ FLOW_CTRL_CSR_ENABLE_EXT_NCPU | \
+ FLOW_CTRL_CSR_ENABLE_EXT_CRAIL)
+#define FLOW_CTRL_CSR_ENABLE (1 << 0)
+#define FLOW_CTRL_HALT_CPU1_EVENTS 0x14
+#define FLOW_CTRL_CPU1_CSR 0x18
+
+#define TEGRA20_FLOW_CTRL_CSR_WFE_CPU0 (1 << 4)
+#define TEGRA20_FLOW_CTRL_CSR_WFE_BITMAP (3 << 4)
+#define TEGRA20_FLOW_CTRL_CSR_WFI_BITMAP 0
+
+#define TEGRA30_FLOW_CTRL_CSR_WFI_CPU0 (1 << 8)
+#define TEGRA30_FLOW_CTRL_CSR_WFE_BITMAP (0xF << 4)
+#define TEGRA30_FLOW_CTRL_CSR_WFI_BITMAP (0xF << 8)
+
+#ifndef __ASSEMBLY__
+#ifdef CONFIG_SOC_TEGRA_FLOWCTRL
+u32 flowctrl_read_cpu_csr(unsigned int cpuid);
+void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value);
+void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value);
+
+void flowctrl_cpu_suspend_enter(unsigned int cpuid);
+void flowctrl_cpu_suspend_exit(unsigned int cpuid);
+#else
+static inline u32 flowctrl_read_cpu_csr(unsigned int cpuid)
+{
+ return 0;
+}
+
+static inline void flowctrl_write_cpu_csr(unsigned int cpuid, u32 value)
+{
+}
+
+static inline void flowctrl_write_cpu_halt(unsigned int cpuid, u32 value) {}
+
+static inline void flowctrl_cpu_suspend_enter(unsigned int cpuid)
+{
+}
+
+static inline void flowctrl_cpu_suspend_exit(unsigned int cpuid)
+{
+}
+#endif /* CONFIG_SOC_TEGRA_FLOWCTRL */
+#endif /* __ASSEMBLY */
+#endif /* __SOC_TEGRA_FLOWCTRL_H__ */
diff --git a/include/soc/tegra/pmc.h b/include/soc/tegra/pmc.h
index 2f271d1b9cea..1c3982bc558f 100644
--- a/include/soc/tegra/pmc.h
+++ b/include/soc/tegra/pmc.h
@@ -26,12 +26,6 @@
struct clk;
struct reset_control;
-#ifdef CONFIG_PM_SLEEP
-enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
-void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
-void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
-#endif /* CONFIG_PM_SLEEP */
-
#ifdef CONFIG_SMP
bool tegra_pmc_cpu_is_powered(unsigned int cpuid);
int tegra_pmc_cpu_power_on(unsigned int cpuid);
@@ -144,7 +138,7 @@ enum tegra_io_pad_voltage {
TEGRA_IO_PAD_3300000UV,
};
-#ifdef CONFIG_ARCH_TEGRA
+#ifdef CONFIG_SOC_TEGRA_PMC
int tegra_powergate_is_powered(unsigned int id);
int tegra_powergate_power_on(unsigned int id);
int tegra_powergate_power_off(unsigned int id);
@@ -163,6 +157,11 @@ int tegra_io_pad_get_voltage(enum tegra_io_pad id);
/* deprecated, use tegra_io_pad_power_{enable,disable}() instead */
int tegra_io_rail_power_on(unsigned int id);
int tegra_io_rail_power_off(unsigned int id);
+
+enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void);
+void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode);
+void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode);
+
#else
static inline int tegra_powergate_is_powered(unsigned int id)
{
@@ -221,6 +220,20 @@ static inline int tegra_io_rail_power_off(unsigned int id)
{
return -ENOSYS;
}
-#endif /* CONFIG_ARCH_TEGRA */
+
+static inline enum tegra_suspend_mode tegra_pmc_get_suspend_mode(void)
+{
+ return TEGRA_SUSPEND_NONE;
+}
+
+static inline void tegra_pmc_set_suspend_mode(enum tegra_suspend_mode mode)
+{
+}
+
+static inline void tegra_pmc_enter_suspend_mode(enum tegra_suspend_mode mode)
+{
+}
+
+#endif /* CONFIG_SOC_TEGRA_PMC */
#endif /* __SOC_TEGRA_PMC_H__ */
diff --git a/include/sound/cs35l35.h b/include/sound/cs35l35.h
new file mode 100644
index 000000000000..29da899e17e4
--- /dev/null
+++ b/include/sound/cs35l35.h
@@ -0,0 +1,108 @@
+/*
+ * linux/sound/cs35l35.h -- Platform data for CS35l35
+ *
+ * Copyright (c) 2016 Cirrus Logic Inc.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+#ifndef __CS35L35_H
+#define __CS35L35_H
+
+struct classh_cfg {
+ /*
+ * Class H Algorithm Control Variables
+ * You can either have it done
+ * automatically or you can adjust
+ * these variables for tuning
+ *
+ * if you do not enable the internal algorithm
+ * you will get a set of mixer controls for
+ * Class H tuning
+ *
+ * Section 4.3 of the datasheet
+ */
+ bool classh_bst_override;
+ bool classh_algo_enable;
+ int classh_bst_max_limit;
+ int classh_mem_depth;
+ int classh_release_rate;
+ int classh_headroom;
+ int classh_wk_fet_disable;
+ int classh_wk_fet_delay;
+ int classh_wk_fet_thld;
+ int classh_vpch_auto;
+ int classh_vpch_rate;
+ int classh_vpch_man;
+};
+
+struct monitor_cfg {
+ /*
+ * Signal Monitor Data
+ * highly configurable signal monitoring
+ * data positioning and different types of
+ * monitoring data.
+ *
+ * Section 4.8.2 - 4.8.4 of the datasheet
+ */
+ bool is_present;
+ bool imon_specs;
+ bool vmon_specs;
+ bool vpmon_specs;
+ bool vbstmon_specs;
+ bool vpbrstat_specs;
+ bool zerofill_specs;
+ u8 imon_dpth;
+ u8 imon_loc;
+ u8 imon_frm;
+ u8 imon_scale;
+ u8 vmon_dpth;
+ u8 vmon_loc;
+ u8 vmon_frm;
+ u8 vpmon_dpth;
+ u8 vpmon_loc;
+ u8 vpmon_frm;
+ u8 vbstmon_dpth;
+ u8 vbstmon_loc;
+ u8 vbstmon_frm;
+ u8 vpbrstat_dpth;
+ u8 vpbrstat_loc;
+ u8 vpbrstat_frm;
+ u8 zerofill_dpth;
+ u8 zerofill_loc;
+ u8 zerofill_frm;
+};
+
+struct cs35l35_platform_data {
+
+ /* Stereo (2 Device) */
+ bool stereo;
+ /* serial port drive strength */
+ int sp_drv_str;
+ /* serial port drive in unused slots */
+ int sp_drv_unused;
+ /* Boost Power Down with FET */
+ bool bst_pdn_fet_on;
+ /* Boost Voltage : used if ClassH Algo Enabled */
+ int bst_vctl;
+ /* Boost Converter Peak Current CTRL */
+ int bst_ipk;
+ /* Amp Gain Zero Cross */
+ bool gain_zc;
+ /* Audio Input Location */
+ int aud_channel;
+ /* Advisory Input Location */
+ int adv_channel;
+ /* Shared Boost for stereo */
+ bool shared_bst;
+ /* Specifies this amp is using an external boost supply */
+ bool ext_bst;
+ /* ClassH Algorithm */
+ struct classh_cfg classh_algo;
+ /* Monitor Config */
+ struct monitor_cfg mon_cfg;
+};
+
+#endif /* __CS35L35_H */
diff --git a/include/sound/hda_register.h b/include/sound/hda_register.h
index 0013063db7f2..15fc6daf9096 100644
--- a/include/sound/hda_register.h
+++ b/include/sound/hda_register.h
@@ -106,8 +106,26 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_HSW_EM4 0x100c
#define AZX_REG_HSW_EM5 0x1010
-/* Skylake/Broxton display HD-A controller Extended Mode registers */
-#define AZX_REG_SKL_EM4L 0x1040
+/* Skylake/Broxton vendor-specific registers */
+#define AZX_REG_VS_EM1 0x1000
+#define AZX_REG_VS_INRC 0x1004
+#define AZX_REG_VS_OUTRC 0x1008
+#define AZX_REG_VS_FIFOTRK 0x100C
+#define AZX_REG_VS_FIFOTRK2 0x1010
+#define AZX_REG_VS_EM2 0x1030
+#define AZX_REG_VS_EM3L 0x1038
+#define AZX_REG_VS_EM3U 0x103C
+#define AZX_REG_VS_EM4L 0x1040
+#define AZX_REG_VS_EM4U 0x1044
+#define AZX_REG_VS_LTRC 0x1048
+#define AZX_REG_VS_D0I3C 0x104A
+#define AZX_REG_VS_PCE 0x104B
+#define AZX_REG_VS_L2MAGC 0x1050
+#define AZX_REG_VS_L2LAHPT 0x1054
+#define AZX_REG_VS_SDXDPIB_XBASE 0x1084
+#define AZX_REG_VS_SDXDPIB_XINTERVAL 0x20
+#define AZX_REG_VS_SDXEFIFOS_XBASE 0x1094
+#define AZX_REG_VS_SDXEFIFOS_XINTERVAL 0x20
/* PCI space */
#define AZX_PCIREG_TCSEL 0x44
@@ -243,9 +261,11 @@ enum { SDI0, SDI1, SDI2, SDI3, SDO0, SDO1, SDO2, SDO3 };
#define AZX_REG_ML_LOUTPAY 0x20
#define AZX_REG_ML_LINPAY 0x30
-#define AZX_MLCTL_SPA (1<<16)
-#define AZX_MLCTL_CPA 23
-
+#define ML_LCTL_SCF_MASK 0xF
+#define AZX_MLCTL_SPA (0x1 << 16)
+#define AZX_MLCTL_CPA (0x1 << 23)
+#define AZX_MLCTL_SPA_SHIFT 16
+#define AZX_MLCTL_CPA_SHIFT 23
/* registers for DMA Resume Capability Structure */
#define AZX_DRSM_CAP_ID 0x5
diff --git a/include/sound/hdaudio.h b/include/sound/hdaudio.h
index 56004ec8d441..96546b30e900 100644
--- a/include/sound/hdaudio.h
+++ b/include/sound/hdaudio.h
@@ -368,24 +368,32 @@ void snd_hdac_bus_free_stream_pages(struct hdac_bus *bus);
/*
* macros for easy use
*/
-#define _snd_hdac_chip_write(type, chip, reg, value) \
- ((chip)->io_ops->reg_write ## type(value, (chip)->remap_addr + (reg)))
-#define _snd_hdac_chip_read(type, chip, reg) \
- ((chip)->io_ops->reg_read ## type((chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_writeb(chip, reg, value) \
+ ((chip)->io_ops->reg_writeb(value, (chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_readb(chip, reg) \
+ ((chip)->io_ops->reg_readb((chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_writew(chip, reg, value) \
+ ((chip)->io_ops->reg_writew(value, (chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_readw(chip, reg) \
+ ((chip)->io_ops->reg_readw((chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_writel(chip, reg, value) \
+ ((chip)->io_ops->reg_writel(value, (chip)->remap_addr + (reg)))
+#define _snd_hdac_chip_readl(chip, reg) \
+ ((chip)->io_ops->reg_readl((chip)->remap_addr + (reg)))
/* read/write a register, pass without AZX_REG_ prefix */
#define snd_hdac_chip_writel(chip, reg, value) \
- _snd_hdac_chip_write(l, chip, AZX_REG_ ## reg, value)
+ _snd_hdac_chip_writel(chip, AZX_REG_ ## reg, value)
#define snd_hdac_chip_writew(chip, reg, value) \
- _snd_hdac_chip_write(w, chip, AZX_REG_ ## reg, value)
+ _snd_hdac_chip_writew(chip, AZX_REG_ ## reg, value)
#define snd_hdac_chip_writeb(chip, reg, value) \
- _snd_hdac_chip_write(b, chip, AZX_REG_ ## reg, value)
+ _snd_hdac_chip_writeb(chip, AZX_REG_ ## reg, value)
#define snd_hdac_chip_readl(chip, reg) \
- _snd_hdac_chip_read(l, chip, AZX_REG_ ## reg)
+ _snd_hdac_chip_readl(chip, AZX_REG_ ## reg)
#define snd_hdac_chip_readw(chip, reg) \
- _snd_hdac_chip_read(w, chip, AZX_REG_ ## reg)
+ _snd_hdac_chip_readw(chip, AZX_REG_ ## reg)
#define snd_hdac_chip_readb(chip, reg) \
- _snd_hdac_chip_read(b, chip, AZX_REG_ ## reg)
+ _snd_hdac_chip_readb(chip, AZX_REG_ ## reg)
/* update a register, pass without AZX_REG_ prefix */
#define snd_hdac_chip_updatel(chip, reg, mask, val) \
diff --git a/include/sound/soc.h b/include/sound/soc.h
index cdfb55f7aede..5170fd81e1fd 100644
--- a/include/sound/soc.h
+++ b/include/sound/soc.h
@@ -434,6 +434,8 @@ int snd_soc_codec_set_sysclk(struct snd_soc_codec *codec, int clk_id,
int source, unsigned int freq, int dir);
int snd_soc_codec_set_pll(struct snd_soc_codec *codec, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out);
+int snd_soc_codec_set_jack(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, void *data);
int snd_soc_register_card(struct snd_soc_card *card);
int snd_soc_unregister_card(struct snd_soc_card *card);
@@ -497,7 +499,15 @@ void snd_soc_runtime_deactivate(struct snd_soc_pcm_runtime *rtd, int stream);
int snd_soc_runtime_set_dai_fmt(struct snd_soc_pcm_runtime *rtd,
unsigned int dai_fmt);
+#ifdef CONFIG_DMI
int snd_soc_set_dmi_name(struct snd_soc_card *card, const char *flavour);
+#else
+static inline int snd_soc_set_dmi_name(struct snd_soc_card *card,
+ const char *flavour)
+{
+ return 0;
+}
+#endif
/* Utility functions to get clock rates from various things */
int snd_soc_calc_frame_size(int sample_size, int channels, int tdm_slots);
@@ -721,6 +731,7 @@ struct snd_soc_jack_gpio {
/* private: */
struct snd_soc_jack *jack;
struct delayed_work work;
+ struct notifier_block pm_notifier;
struct gpio_desc *desc;
void *data;
@@ -812,7 +823,6 @@ struct snd_soc_component {
unsigned int ignore_pmdown_time:1; /* pmdown_time is ignored at stop */
unsigned int registered_as_component:1;
- unsigned int auxiliary:1; /* for auxiliary component of the card */
unsigned int suspended:1; /* is in suspend PM state */
struct list_head list;
@@ -913,6 +923,8 @@ struct snd_soc_codec_driver {
int clk_id, int source, unsigned int freq, int dir);
int (*set_pll)(struct snd_soc_codec *codec, int pll_id, int source,
unsigned int freq_in, unsigned int freq_out);
+ int (*set_jack)(struct snd_soc_codec *codec,
+ struct snd_soc_jack *jack, void *data);
/* codec IO */
struct regmap *(*get_regmap)(struct device *);
diff --git a/include/trace/events/block.h b/include/trace/events/block.h
index a88ed13446ff..d0dbe60d8a6d 100644
--- a/include/trace/events/block.h
+++ b/include/trace/events/block.h
@@ -61,7 +61,16 @@ DEFINE_EVENT(block_buffer, block_dirty_buffer,
TP_ARGS(bh)
);
-DECLARE_EVENT_CLASS(block_rq_with_error,
+/**
+ * block_rq_requeue - place block IO request back on a queue
+ * @q: queue holding operation
+ * @rq: block IO operation request
+ *
+ * The block operation request @rq is being placed back into queue
+ * @q. For some reason the request was not completed and needs to be
+ * put back in the queue.
+ */
+TRACE_EVENT(block_rq_requeue,
TP_PROTO(struct request_queue *q, struct request *rq),
@@ -71,7 +80,6 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, errors )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -80,7 +88,6 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_trace_sector(rq);
__entry->nr_sector = blk_rq_trace_nr_sectors(rq);
- __entry->errors = rq->errors;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, blk_rq_bytes(rq));
__get_str(cmd)[0] = '\0';
@@ -90,46 +97,13 @@ DECLARE_EVENT_CLASS(block_rq_with_error,
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->errors)
-);
-
-/**
- * block_rq_abort - abort block operation request
- * @q: queue containing the block operation request
- * @rq: block IO operation request
- *
- * Called immediately after pending block IO operation request @rq in
- * queue @q is aborted. The fields in the operation request @rq
- * can be examined to determine which device and sectors the pending
- * operation would access.
- */
-DEFINE_EVENT(block_rq_with_error, block_rq_abort,
-
- TP_PROTO(struct request_queue *q, struct request *rq),
-
- TP_ARGS(q, rq)
-);
-
-/**
- * block_rq_requeue - place block IO request back on a queue
- * @q: queue holding operation
- * @rq: block IO operation request
- *
- * The block operation request @rq is being placed back into queue
- * @q. For some reason the request was not completed and needs to be
- * put back in the queue.
- */
-DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
-
- TP_PROTO(struct request_queue *q, struct request *rq),
-
- TP_ARGS(q, rq)
+ __entry->nr_sector, 0)
);
/**
* block_rq_complete - block IO operation completed by device driver
- * @q: queue containing the block operation request
* @rq: block operations request
+ * @error: status code
* @nr_bytes: number of completed bytes
*
* The block_rq_complete tracepoint event indicates that some portion
@@ -140,16 +114,15 @@ DEFINE_EVENT(block_rq_with_error, block_rq_requeue,
*/
TRACE_EVENT(block_rq_complete,
- TP_PROTO(struct request_queue *q, struct request *rq,
- unsigned int nr_bytes),
+ TP_PROTO(struct request *rq, int error, unsigned int nr_bytes),
- TP_ARGS(q, rq, nr_bytes),
+ TP_ARGS(rq, error, nr_bytes),
TP_STRUCT__entry(
__field( dev_t, dev )
__field( sector_t, sector )
__field( unsigned int, nr_sector )
- __field( int, errors )
+ __field( int, error )
__array( char, rwbs, RWBS_LEN )
__dynamic_array( char, cmd, 1 )
),
@@ -158,7 +131,7 @@ TRACE_EVENT(block_rq_complete,
__entry->dev = rq->rq_disk ? disk_devt(rq->rq_disk) : 0;
__entry->sector = blk_rq_pos(rq);
__entry->nr_sector = nr_bytes >> 9;
- __entry->errors = rq->errors;
+ __entry->error = error;
blk_fill_rwbs(__entry->rwbs, rq->cmd_flags, nr_bytes);
__get_str(cmd)[0] = '\0';
@@ -168,7 +141,7 @@ TRACE_EVENT(block_rq_complete,
MAJOR(__entry->dev), MINOR(__entry->dev),
__entry->rwbs, __get_str(cmd),
(unsigned long long)__entry->sector,
- __entry->nr_sector, __entry->errors)
+ __entry->nr_sector, __entry->error)
);
DECLARE_EVENT_CLASS(block_rq,
diff --git a/include/trace/events/bpf.h b/include/trace/events/bpf.h
index c3a53fd47ff1..52c8425d144b 100644
--- a/include/trace/events/bpf.h
+++ b/include/trace/events/bpf.h
@@ -321,11 +321,14 @@ TRACE_EVENT(bpf_map_next_key,
__dynamic_array(u8, key, map->key_size)
__dynamic_array(u8, nxt, map->key_size)
__field(bool, key_trunc)
+ __field(bool, key_null)
__field(int, ufd)
),
TP_fast_assign(
- memcpy(__get_dynamic_array(key), key, map->key_size);
+ if (key)
+ memcpy(__get_dynamic_array(key), key, map->key_size);
+ __entry->key_null = !key;
memcpy(__get_dynamic_array(nxt), key_next, map->key_size);
__entry->type = map->map_type;
__entry->key_len = min(map->key_size, 16U);
@@ -336,8 +339,9 @@ TRACE_EVENT(bpf_map_next_key,
TP_printk("map type=%s ufd=%d key=[%s%s] next=[%s%s]",
__print_symbolic(__entry->type, __MAP_TYPE_SYM_TAB),
__entry->ufd,
- __print_hex(__get_dynamic_array(key), __entry->key_len),
- __entry->key_trunc ? " ..." : "",
+ __entry->key_null ? "NULL" : __print_hex(__get_dynamic_array(key),
+ __entry->key_len),
+ __entry->key_trunc && !__entry->key_null ? " ..." : "",
__print_hex(__get_dynamic_array(nxt), __entry->key_len),
__entry->key_trunc ? " ..." : "")
);
diff --git a/include/trace/events/btrfs.h b/include/trace/events/btrfs.h
index a3c3cab643a9..e37973526153 100644
--- a/include/trace/events/btrfs.h
+++ b/include/trace/events/btrfs.h
@@ -12,6 +12,7 @@ struct btrfs_root;
struct btrfs_fs_info;
struct btrfs_inode;
struct extent_map;
+struct btrfs_file_extent_item;
struct btrfs_ordered_extent;
struct btrfs_delayed_ref_node;
struct btrfs_delayed_tree_ref;
@@ -24,6 +25,7 @@ struct extent_buffer;
struct btrfs_work;
struct __btrfs_workqueue;
struct btrfs_qgroup_extent_record;
+struct btrfs_qgroup;
#define show_ref_type(type) \
__print_symbolic(type, \
@@ -54,6 +56,12 @@ struct btrfs_qgroup_extent_record;
(obj >= BTRFS_ROOT_TREE_OBJECTID && \
obj <= BTRFS_QUOTA_TREE_OBJECTID)) ? __show_root_type(obj) : "-"
+#define show_fi_type(type) \
+ __print_symbolic(type, \
+ { BTRFS_FILE_EXTENT_INLINE, "INLINE" }, \
+ { BTRFS_FILE_EXTENT_REG, "REG" }, \
+ { BTRFS_FILE_EXTENT_PREALLOC, "PREALLOC"})
+
#define BTRFS_GROUP_FLAGS \
{ BTRFS_BLOCK_GROUP_DATA, "DATA"}, \
{ BTRFS_BLOCK_GROUP_SYSTEM, "SYSTEM"}, \
@@ -213,7 +221,7 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->block_start = map->block_start;
__entry->block_len = map->block_len;
__entry->flags = map->flags;
- __entry->refs = atomic_read(&map->refs);
+ __entry->refs = refcount_read(&map->refs);
__entry->compress_type = map->compress_type;
),
@@ -232,6 +240,138 @@ TRACE_EVENT_CONDITION(btrfs_get_extent,
__entry->refs, __entry->compress_type)
);
+/* file extent item */
+DECLARE_EVENT_CLASS(btrfs__file_extent_item_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_obj )
+ __field( u64, ino )
+ __field( loff_t, isize )
+ __field( u64, disk_isize )
+ __field( u64, num_bytes )
+ __field( u64, ram_bytes )
+ __field( u64, disk_bytenr )
+ __field( u64, disk_num_bytes )
+ __field( u64, extent_offset )
+ __field( u8, extent_type )
+ __field( u8, compression )
+ __field( u64, extent_start )
+ __field( u64, extent_end )
+ ),
+
+ TP_fast_assign_btrfs(bi->root->fs_info,
+ __entry->root_obj = bi->root->objectid;
+ __entry->ino = btrfs_ino(bi);
+ __entry->isize = bi->vfs_inode.i_size;
+ __entry->disk_isize = bi->disk_i_size;
+ __entry->num_bytes = btrfs_file_extent_num_bytes(l, fi);
+ __entry->ram_bytes = btrfs_file_extent_ram_bytes(l, fi);
+ __entry->disk_bytenr = btrfs_file_extent_disk_bytenr(l, fi);
+ __entry->disk_num_bytes = btrfs_file_extent_disk_num_bytes(l, fi);
+ __entry->extent_offset = btrfs_file_extent_offset(l, fi);
+ __entry->extent_type = btrfs_file_extent_type(l, fi);
+ __entry->compression = btrfs_file_extent_compression(l, fi);
+ __entry->extent_start = start;
+ __entry->extent_end = (start + __entry->num_bytes);
+ ),
+
+ TP_printk_btrfs(
+ "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu "
+ "file extent range=[%llu %llu] "
+ "(num_bytes=%llu ram_bytes=%llu disk_bytenr=%llu "
+ "disk_num_bytes=%llu extent_offset=%llu type=%s "
+ "compression=%u",
+ show_root_type(__entry->root_obj), __entry->ino,
+ __entry->isize,
+ __entry->disk_isize, __entry->extent_start,
+ __entry->extent_end, __entry->num_bytes, __entry->ram_bytes,
+ __entry->disk_bytenr, __entry->disk_num_bytes,
+ __entry->extent_offset, show_fi_type(__entry->extent_type),
+ __entry->compression)
+);
+
+DECLARE_EVENT_CLASS(
+ btrfs__file_extent_item_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, root_obj )
+ __field( u64, ino )
+ __field( loff_t, isize )
+ __field( u64, disk_isize )
+ __field( u8, extent_type )
+ __field( u8, compression )
+ __field( u64, extent_start )
+ __field( u64, extent_end )
+ ),
+
+ TP_fast_assign_btrfs(
+ bi->root->fs_info,
+ __entry->root_obj = bi->root->objectid;
+ __entry->ino = btrfs_ino(bi);
+ __entry->isize = bi->vfs_inode.i_size;
+ __entry->disk_isize = bi->disk_i_size;
+ __entry->extent_type = btrfs_file_extent_type(l, fi);
+ __entry->compression = btrfs_file_extent_compression(l, fi);
+ __entry->extent_start = start;
+ __entry->extent_end = (start + btrfs_file_extent_inline_len(l, slot, fi));
+ ),
+
+ TP_printk_btrfs(
+ "root=%llu(%s) inode=%llu size=%llu disk_isize=%llu "
+ "file extent range=[%llu %llu] "
+ "extent_type=%s compression=%u",
+ show_root_type(__entry->root_obj), __entry->ino, __entry->isize,
+ __entry->disk_isize, __entry->extent_start,
+ __entry->extent_end, show_fi_type(__entry->extent_type),
+ __entry->compression)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_regular, btrfs_get_extent_show_fi_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_regular, btrfs_truncate_show_fi_regular,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, u64 start),
+
+ TP_ARGS(bi, l, fi, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_inline, btrfs_get_extent_show_fi_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start)
+);
+
+DEFINE_EVENT(
+ btrfs__file_extent_item_inline, btrfs_truncate_show_fi_inline,
+
+ TP_PROTO(struct btrfs_inode *bi, struct extent_buffer *l,
+ struct btrfs_file_extent_item *fi, int slot, u64 start),
+
+ TP_ARGS(bi, l, fi, slot, start)
+);
+
#define show_ordered_flags(flags) \
__print_flags(flags, "|", \
{ (1 << BTRFS_ORDERED_IO_DONE), "IO_DONE" }, \
@@ -275,7 +415,7 @@ DECLARE_EVENT_CLASS(btrfs__ordered_extent,
__entry->bytes_left = ordered->bytes_left;
__entry->flags = ordered->flags;
__entry->compress_type = ordered->compress_type;
- __entry->refs = atomic_read(&ordered->refs);
+ __entry->refs = refcount_read(&ordered->refs);
__entry->root_objectid =
BTRFS_I(inode)->root->root_key.objectid;
__entry->truncated_len = ordered->truncated_len;
@@ -1475,6 +1615,49 @@ TRACE_EVENT(qgroup_update_counters,
__entry->cur_new_count)
);
+TRACE_EVENT(qgroup_update_reserve,
+
+ TP_PROTO(struct btrfs_fs_info *fs_info, struct btrfs_qgroup *qgroup,
+ s64 diff),
+
+ TP_ARGS(fs_info, qgroup, diff),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, qgid )
+ __field( u64, cur_reserved )
+ __field( s64, diff )
+ ),
+
+ TP_fast_assign_btrfs(fs_info,
+ __entry->qgid = qgroup->qgroupid;
+ __entry->cur_reserved = qgroup->reserved;
+ __entry->diff = diff;
+ ),
+
+ TP_printk_btrfs("qgid=%llu cur_reserved=%llu diff=%lld",
+ __entry->qgid, __entry->cur_reserved, __entry->diff)
+);
+
+TRACE_EVENT(qgroup_meta_reserve,
+
+ TP_PROTO(struct btrfs_root *root, s64 diff),
+
+ TP_ARGS(root, diff),
+
+ TP_STRUCT__entry_btrfs(
+ __field( u64, refroot )
+ __field( s64, diff )
+ ),
+
+ TP_fast_assign_btrfs(root->fs_info,
+ __entry->refroot = root->objectid;
+ __entry->diff = diff;
+ ),
+
+ TP_printk_btrfs("refroot=%llu(%s) diff=%lld",
+ show_root_type(__entry->refroot), __entry->diff)
+);
+
#endif /* _TRACE_BTRFS_H */
/* This part must be outside protection */
diff --git a/include/trace/events/ext4.h b/include/trace/events/ext4.h
index 09c71e9aaebf..dfae175ddebc 100644
--- a/include/trace/events/ext4.h
+++ b/include/trace/events/ext4.h
@@ -15,6 +15,7 @@ struct ext4_inode_info;
struct mpage_da_data;
struct ext4_map_blocks;
struct extent_status;
+struct ext4_fsmap;
#define EXT4_I(inode) (container_of(inode, struct ext4_inode_info, vfs_inode))
@@ -2529,6 +2530,79 @@ TRACE_EVENT(ext4_es_shrink,
__entry->scan_time, __entry->nr_skipped, __entry->retried)
);
+/* fsmap traces */
+DECLARE_EVENT_CLASS(ext4_fsmap_class,
+ TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len,
+ u64 owner),
+ TP_ARGS(sb, keydev, agno, bno, len, owner),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(u32, agno)
+ __field(u64, bno)
+ __field(u64, len)
+ __field(u64, owner)
+ ),
+ TP_fast_assign(
+ __entry->dev = sb->s_bdev->bd_dev;
+ __entry->keydev = new_decode_dev(keydev);
+ __entry->agno = agno;
+ __entry->bno = bno;
+ __entry->len = len;
+ __entry->owner = owner;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d agno %u bno %llu len %llu owner %lld\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->agno,
+ __entry->bno,
+ __entry->len,
+ __entry->owner)
+)
+#define DEFINE_FSMAP_EVENT(name) \
+DEFINE_EVENT(ext4_fsmap_class, name, \
+ TP_PROTO(struct super_block *sb, u32 keydev, u32 agno, u64 bno, u64 len, \
+ u64 owner), \
+ TP_ARGS(sb, keydev, agno, bno, len, owner))
+DEFINE_FSMAP_EVENT(ext4_fsmap_low_key);
+DEFINE_FSMAP_EVENT(ext4_fsmap_high_key);
+DEFINE_FSMAP_EVENT(ext4_fsmap_mapping);
+
+DECLARE_EVENT_CLASS(ext4_getfsmap_class,
+ TP_PROTO(struct super_block *sb, struct ext4_fsmap *fsmap),
+ TP_ARGS(sb, fsmap),
+ TP_STRUCT__entry(
+ __field(dev_t, dev)
+ __field(dev_t, keydev)
+ __field(u64, block)
+ __field(u64, len)
+ __field(u64, owner)
+ __field(u64, flags)
+ ),
+ TP_fast_assign(
+ __entry->dev = sb->s_bdev->bd_dev;
+ __entry->keydev = new_decode_dev(fsmap->fmr_device);
+ __entry->block = fsmap->fmr_physical;
+ __entry->len = fsmap->fmr_length;
+ __entry->owner = fsmap->fmr_owner;
+ __entry->flags = fsmap->fmr_flags;
+ ),
+ TP_printk("dev %d:%d keydev %d:%d block %llu len %llu owner %lld flags 0x%llx\n",
+ MAJOR(__entry->dev), MINOR(__entry->dev),
+ MAJOR(__entry->keydev), MINOR(__entry->keydev),
+ __entry->block,
+ __entry->len,
+ __entry->owner,
+ __entry->flags)
+)
+#define DEFINE_GETFSMAP_EVENT(name) \
+DEFINE_EVENT(ext4_getfsmap_class, name, \
+ TP_PROTO(struct super_block *sb, struct ext4_fsmap *fsmap), \
+ TP_ARGS(sb, fsmap))
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_low_key);
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_high_key);
+DEFINE_GETFSMAP_EVENT(ext4_getfsmap_mapping);
+
#endif /* _TRACE_EXT4_H */
/* This part must be outside protection */
diff --git a/include/trace/events/f2fs.h b/include/trace/events/f2fs.h
index c80fcad0a6c9..15da88c5c3a4 100644
--- a/include/trace/events/f2fs.h
+++ b/include/trace/events/f2fs.h
@@ -15,6 +15,8 @@ TRACE_DEFINE_ENUM(META);
TRACE_DEFINE_ENUM(META_FLUSH);
TRACE_DEFINE_ENUM(INMEM);
TRACE_DEFINE_ENUM(INMEM_DROP);
+TRACE_DEFINE_ENUM(INMEM_INVALIDATE);
+TRACE_DEFINE_ENUM(INMEM_REVOKE);
TRACE_DEFINE_ENUM(IPU);
TRACE_DEFINE_ENUM(OPU);
TRACE_DEFINE_ENUM(CURSEG_HOT_DATA);
@@ -42,6 +44,7 @@ TRACE_DEFINE_ENUM(CP_FASTBOOT);
TRACE_DEFINE_ENUM(CP_SYNC);
TRACE_DEFINE_ENUM(CP_RECOVERY);
TRACE_DEFINE_ENUM(CP_DISCARD);
+TRACE_DEFINE_ENUM(CP_TRIMMED);
#define show_block_type(type) \
__print_symbolic(type, \
@@ -51,12 +54,13 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ META_FLUSH, "META_FLUSH" }, \
{ INMEM, "INMEM" }, \
{ INMEM_DROP, "INMEM_DROP" }, \
+ { INMEM_INVALIDATE, "INMEM_INVALIDATE" }, \
{ INMEM_REVOKE, "INMEM_REVOKE" }, \
{ IPU, "IN-PLACE" }, \
{ OPU, "OUT-OF-PLACE" })
-#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_PREFLUSH | REQ_META |\
- REQ_PRIO)
+#define F2FS_OP_FLAGS (REQ_RAHEAD | REQ_SYNC | REQ_META | REQ_PRIO | \
+ REQ_PREFLUSH | REQ_FUA)
#define F2FS_BIO_FLAG_MASK(t) (t & F2FS_OP_FLAGS)
#define show_bio_type(op,op_flags) show_bio_op(op), \
@@ -75,16 +79,13 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ REQ_OP_WRITE_ZEROES, "WRITE_ZEROES" })
#define show_bio_op_flags(flags) \
- __print_symbolic(F2FS_BIO_FLAG_MASK(flags), \
- { REQ_RAHEAD, "(RA)" }, \
- { REQ_SYNC, "(S)" }, \
- { REQ_SYNC | REQ_PRIO, "(SP)" }, \
- { REQ_META, "(M)" }, \
- { REQ_META | REQ_PRIO, "(MP)" }, \
- { REQ_SYNC | REQ_PREFLUSH , "(SF)" }, \
- { REQ_SYNC | REQ_META | REQ_PRIO, "(SMP)" }, \
- { REQ_PREFLUSH | REQ_META | REQ_PRIO, "(FMP)" }, \
- { 0, " \b" })
+ __print_flags(F2FS_BIO_FLAG_MASK(flags), "|", \
+ { REQ_RAHEAD, "R" }, \
+ { REQ_SYNC, "S" }, \
+ { REQ_META, "M" }, \
+ { REQ_PRIO, "P" }, \
+ { REQ_PREFLUSH, "PF" }, \
+ { REQ_FUA, "FUA" })
#define show_data_type(type) \
__print_symbolic(type, \
@@ -117,12 +118,14 @@ TRACE_DEFINE_ENUM(CP_DISCARD);
{ GC_CB, "Cost-Benefit" })
#define show_cpreason(type) \
- __print_symbolic(type, \
+ __print_flags(type, "|", \
{ CP_UMOUNT, "Umount" }, \
{ CP_FASTBOOT, "Fastboot" }, \
{ CP_SYNC, "Sync" }, \
{ CP_RECOVERY, "Recovery" }, \
- { CP_DISCARD, "Discard" })
+ { CP_DISCARD, "Discard" }, \
+ { CP_UMOUNT, "Umount" }, \
+ { CP_TRIMMED, "Trimmed" })
struct victim_sel_policy;
struct f2fs_map_blocks;
@@ -769,7 +772,7 @@ DECLARE_EVENT_CLASS(f2fs__submit_page_bio,
),
TP_printk("dev = (%d,%d), ino = %lu, page_index = 0x%lx, "
- "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s%s, type = %s",
+ "oldaddr = 0x%llx, newaddr = 0x%llx, rw = %s(%s), type = %s",
show_dev_ino(__entry),
(unsigned long)__entry->index,
(unsigned long long)__entry->old_blkaddr,
@@ -822,7 +825,7 @@ DECLARE_EVENT_CLASS(f2fs__bio,
__entry->size = bio->bi_iter.bi_size;
),
- TP_printk("dev = (%d,%d)/(%d,%d), rw = %s%s, %s, sector = %lld, size = %u",
+ TP_printk("dev = (%d,%d)/(%d,%d), rw = %s(%s), %s, sector = %lld, size = %u",
show_dev(__entry->target),
show_dev(__entry->dev),
show_bio_type(__entry->op, __entry->op_flags),
@@ -1126,7 +1129,7 @@ TRACE_EVENT(f2fs_write_checkpoint,
__entry->msg)
);
-TRACE_EVENT(f2fs_issue_discard,
+DECLARE_EVENT_CLASS(f2fs_discard,
TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
@@ -1150,6 +1153,20 @@ TRACE_EVENT(f2fs_issue_discard,
(unsigned long long)__entry->blklen)
);
+DEFINE_EVENT(f2fs_discard, f2fs_queue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
+DEFINE_EVENT(f2fs_discard, f2fs_issue_discard,
+
+ TP_PROTO(struct block_device *dev, block_t blkstart, block_t blklen),
+
+ TP_ARGS(dev, blkstart, blklen)
+);
+
TRACE_EVENT(f2fs_issue_reset_zone,
TP_PROTO(struct block_device *dev, block_t blkstart),
@@ -1174,26 +1191,29 @@ TRACE_EVENT(f2fs_issue_reset_zone,
TRACE_EVENT(f2fs_issue_flush,
TP_PROTO(struct block_device *dev, unsigned int nobarrier,
- unsigned int flush_merge),
+ unsigned int flush_merge, int ret),
- TP_ARGS(dev, nobarrier, flush_merge),
+ TP_ARGS(dev, nobarrier, flush_merge, ret),
TP_STRUCT__entry(
__field(dev_t, dev)
__field(unsigned int, nobarrier)
__field(unsigned int, flush_merge)
+ __field(int, ret)
),
TP_fast_assign(
__entry->dev = dev->bd_dev;
__entry->nobarrier = nobarrier;
__entry->flush_merge = flush_merge;
+ __entry->ret = ret;
),
- TP_printk("dev = (%d,%d), %s %s",
+ TP_printk("dev = (%d,%d), %s %s, ret = %d",
show_dev(__entry->dev),
__entry->nobarrier ? "skip (nobarrier)" : "issue",
- __entry->flush_merge ? " with flush_merge" : "")
+ __entry->flush_merge ? " with flush_merge" : "",
+ __entry->ret)
);
TRACE_EVENT(f2fs_lookup_extent_tree_start,
diff --git a/include/trace/events/fs_dax.h b/include/trace/events/fs_dax.h
index c566ddc87f73..08bb3ed18dcc 100644
--- a/include/trace/events/fs_dax.h
+++ b/include/trace/events/fs_dax.h
@@ -150,6 +150,136 @@ DEFINE_EVENT(dax_pmd_insert_mapping_class, name, \
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping);
DEFINE_PMD_INSERT_MAPPING_EVENT(dax_pmd_insert_mapping_fallback);
+DECLARE_EVENT_CLASS(dax_pte_fault_class,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result),
+ TP_ARGS(inode, vmf, result),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(pgoff_t, pgoff)
+ __field(dev_t, dev)
+ __field(unsigned int, flags)
+ __field(int, result)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->flags = vmf->flags;
+ __entry->pgoff = vmf->pgoff;
+ __entry->result = result;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx pgoff %#lx %s",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __print_flags(__entry->flags, "|", FAULT_FLAG_TRACE),
+ __entry->address,
+ __entry->pgoff,
+ __print_flags(__entry->result, "|", VM_FAULT_RESULT_TRACE)
+ )
+)
+
+#define DEFINE_PTE_FAULT_EVENT(name) \
+DEFINE_EVENT(dax_pte_fault_class, name, \
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, int result), \
+ TP_ARGS(inode, vmf, result))
+
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault);
+DEFINE_PTE_FAULT_EVENT(dax_pte_fault_done);
+DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite_no_entry);
+DEFINE_PTE_FAULT_EVENT(dax_pfn_mkwrite);
+DEFINE_PTE_FAULT_EVENT(dax_load_hole);
+
+TRACE_EVENT(dax_insert_mapping,
+ TP_PROTO(struct inode *inode, struct vm_fault *vmf, void *radix_entry),
+ TP_ARGS(inode, vmf, radix_entry),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(unsigned long, vm_flags)
+ __field(unsigned long, address)
+ __field(void *, radix_entry)
+ __field(dev_t, dev)
+ __field(int, write)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->vm_flags = vmf->vma->vm_flags;
+ __entry->address = vmf->address;
+ __entry->write = vmf->flags & FAULT_FLAG_WRITE;
+ __entry->radix_entry = radix_entry;
+ ),
+ TP_printk("dev %d:%d ino %#lx %s %s address %#lx radix_entry %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->vm_flags & VM_SHARED ? "shared" : "private",
+ __entry->write ? "write" : "read",
+ __entry->address,
+ (unsigned long)__entry->radix_entry
+ )
+)
+
+DECLARE_EVENT_CLASS(dax_writeback_range_class,
+ TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),
+ TP_ARGS(inode, start_index, end_index),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(pgoff_t, start_index)
+ __field(pgoff_t, end_index)
+ __field(dev_t, dev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->start_index = start_index;
+ __entry->end_index = end_index;
+ ),
+ TP_printk("dev %d:%d ino %#lx pgoff %#lx-%#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->start_index,
+ __entry->end_index
+ )
+)
+
+#define DEFINE_WRITEBACK_RANGE_EVENT(name) \
+DEFINE_EVENT(dax_writeback_range_class, name, \
+ TP_PROTO(struct inode *inode, pgoff_t start_index, pgoff_t end_index),\
+ TP_ARGS(inode, start_index, end_index))
+
+DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range);
+DEFINE_WRITEBACK_RANGE_EVENT(dax_writeback_range_done);
+
+TRACE_EVENT(dax_writeback_one,
+ TP_PROTO(struct inode *inode, pgoff_t pgoff, pgoff_t pglen),
+ TP_ARGS(inode, pgoff, pglen),
+ TP_STRUCT__entry(
+ __field(unsigned long, ino)
+ __field(pgoff_t, pgoff)
+ __field(pgoff_t, pglen)
+ __field(dev_t, dev)
+ ),
+ TP_fast_assign(
+ __entry->dev = inode->i_sb->s_dev;
+ __entry->ino = inode->i_ino;
+ __entry->pgoff = pgoff;
+ __entry->pglen = pglen;
+ ),
+ TP_printk("dev %d:%d ino %#lx pgoff %#lx pglen %#lx",
+ MAJOR(__entry->dev),
+ MINOR(__entry->dev),
+ __entry->ino,
+ __entry->pgoff,
+ __entry->pglen
+ )
+)
+
#endif /* _TRACE_FS_DAX_H */
/* This part must be outside protection */
diff --git a/include/trace/events/iommu.h b/include/trace/events/iommu.h
index 2c7befb10f13..99254ed89212 100644
--- a/include/trace/events/iommu.h
+++ b/include/trace/events/iommu.h
@@ -11,7 +11,6 @@
#define _TRACE_IOMMU_H
#include <linux/tracepoint.h>
-#include <linux/pci.h>
struct device;
diff --git a/include/trace/events/rxrpc.h b/include/trace/events/rxrpc.h
index 39123c06a566..29a3d53a4015 100644
--- a/include/trace/events/rxrpc.h
+++ b/include/trace/events/rxrpc.h
@@ -683,6 +683,57 @@ TRACE_EVENT(rxrpc_rx_ack,
__entry->n_acks)
);
+TRACE_EVENT(rxrpc_rx_abort,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ u32 abort_code),
+
+ TP_ARGS(call, serial, abort_code),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(u32, abort_code )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->abort_code = abort_code;
+ ),
+
+ TP_printk("c=%p ABORT %08x ac=%d",
+ __entry->call,
+ __entry->serial,
+ __entry->abort_code)
+ );
+
+TRACE_EVENT(rxrpc_rx_rwind_change,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ u32 rwind, bool wake),
+
+ TP_ARGS(call, serial, rwind, wake),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(u32, rwind )
+ __field(bool, wake )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->rwind = rwind;
+ __entry->wake = wake;
+ ),
+
+ TP_printk("c=%p %08x rw=%u%s",
+ __entry->call,
+ __entry->serial,
+ __entry->rwind,
+ __entry->wake ? " wake" : "")
+ );
+
TRACE_EVENT(rxrpc_tx_data,
TP_PROTO(struct rxrpc_call *call, rxrpc_seq_t seq,
rxrpc_serial_t serial, u8 flags, bool retrans, bool lose),
@@ -1087,6 +1138,56 @@ TRACE_EVENT(rxrpc_improper_term,
__entry->abort_code)
);
+TRACE_EVENT(rxrpc_rx_eproto,
+ TP_PROTO(struct rxrpc_call *call, rxrpc_serial_t serial,
+ const char *why),
+
+ TP_ARGS(call, serial, why),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(rxrpc_serial_t, serial )
+ __field(const char *, why )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->serial = serial;
+ __entry->why = why;
+ ),
+
+ TP_printk("c=%p EPROTO %08x %s",
+ __entry->call,
+ __entry->serial,
+ __entry->why)
+ );
+
+TRACE_EVENT(rxrpc_connect_call,
+ TP_PROTO(struct rxrpc_call *call),
+
+ TP_ARGS(call),
+
+ TP_STRUCT__entry(
+ __field(struct rxrpc_call *, call )
+ __field(unsigned long, user_call_ID )
+ __field(u32, cid )
+ __field(u32, call_id )
+ ),
+
+ TP_fast_assign(
+ __entry->call = call;
+ __entry->user_call_ID = call->user_call_ID;
+ __entry->cid = call->cid;
+ __entry->call_id = call->call_id;
+ ),
+
+ TP_printk("c=%p u=%p %08x:%08x",
+ __entry->call,
+ (void *)__entry->user_call_ID,
+ __entry->cid,
+ __entry->call_id)
+ );
+
#endif /* _TRACE_RXRPC_H */
/* This part must be outside protection */
diff --git a/include/trace/events/sched.h b/include/trace/events/sched.h
index 9e3ef6c99e4b..ae1409ffe99a 100644
--- a/include/trace/events/sched.h
+++ b/include/trace/events/sched.h
@@ -70,7 +70,7 @@ DECLARE_EVENT_CLASS(sched_wakeup_template,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->success = 1; /* rudiment, kill when possible */
__entry->target_cpu = task_cpu(p);
),
@@ -147,6 +147,7 @@ TRACE_EVENT(sched_switch,
memcpy(__entry->prev_comm, prev->comm, TASK_COMM_LEN);
__entry->next_pid = next->pid;
__entry->next_prio = next->prio;
+ /* XXX SCHED_DEADLINE */
),
TP_printk("prev_comm=%s prev_pid=%d prev_prio=%d prev_state=%s%s ==> next_comm=%s next_pid=%d next_prio=%d",
@@ -181,7 +182,7 @@ TRACE_EVENT(sched_migrate_task,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
__entry->orig_cpu = task_cpu(p);
__entry->dest_cpu = dest_cpu;
),
@@ -206,7 +207,7 @@ DECLARE_EVENT_CLASS(sched_process_template,
TP_fast_assign(
memcpy(__entry->comm, p->comm, TASK_COMM_LEN);
__entry->pid = p->pid;
- __entry->prio = p->prio;
+ __entry->prio = p->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
@@ -253,7 +254,7 @@ TRACE_EVENT(sched_process_wait,
TP_fast_assign(
memcpy(__entry->comm, current->comm, TASK_COMM_LEN);
__entry->pid = pid_nr(pid);
- __entry->prio = current->prio;
+ __entry->prio = current->prio; /* XXX SCHED_DEADLINE */
),
TP_printk("comm=%s pid=%d prio=%d",
@@ -413,9 +414,9 @@ DEFINE_EVENT(sched_stat_runtime, sched_stat_runtime,
*/
TRACE_EVENT(sched_pi_setprio,
- TP_PROTO(struct task_struct *tsk, int newprio),
+ TP_PROTO(struct task_struct *tsk, struct task_struct *pi_task),
- TP_ARGS(tsk, newprio),
+ TP_ARGS(tsk, pi_task),
TP_STRUCT__entry(
__array( char, comm, TASK_COMM_LEN )
@@ -428,7 +429,8 @@ TRACE_EVENT(sched_pi_setprio,
memcpy(__entry->comm, tsk->comm, TASK_COMM_LEN);
__entry->pid = tsk->pid;
__entry->oldprio = tsk->prio;
- __entry->newprio = newprio;
+ __entry->newprio = pi_task ? pi_task->prio : tsk->prio;
+ /* XXX SCHED_DEADLINE bits missing */
),
TP_printk("comm=%s pid=%d oldprio=%d newprio=%d",
diff --git a/include/trace/events/v4l2.h b/include/trace/events/v4l2.h
index ee7754c6e4a1..b3a85b3df53e 100644
--- a/include/trace/events/v4l2.h
+++ b/include/trace/events/v4l2.h
@@ -29,6 +29,7 @@
EM( V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE, "VIDEO_OUTPUT_MPLANE" ) \
EM( V4L2_BUF_TYPE_SDR_CAPTURE, "SDR_CAPTURE" ) \
EM( V4L2_BUF_TYPE_SDR_OUTPUT, "SDR_OUTPUT" ) \
+ EM( V4L2_BUF_TYPE_META_CAPTURE, "META_CAPTURE" ) \
EMe(V4L2_BUF_TYPE_PRIVATE, "PRIVATE" )
SHOW_TYPE
diff --git a/include/trace/events/xen.h b/include/trace/events/xen.h
index bce990f5a35d..31acce9019a6 100644
--- a/include/trace/events/xen.h
+++ b/include/trace/events/xen.h
@@ -241,21 +241,21 @@ TRACE_EVENT(xen_mmu_set_pud,
(int)sizeof(pudval_t) * 2, (unsigned long long)__entry->pudval)
);
-TRACE_EVENT(xen_mmu_set_pgd,
- TP_PROTO(pgd_t *pgdp, pgd_t *user_pgdp, pgd_t pgdval),
- TP_ARGS(pgdp, user_pgdp, pgdval),
+TRACE_EVENT(xen_mmu_set_p4d,
+ TP_PROTO(p4d_t *p4dp, p4d_t *user_p4dp, p4d_t p4dval),
+ TP_ARGS(p4dp, user_p4dp, p4dval),
TP_STRUCT__entry(
- __field(pgd_t *, pgdp)
- __field(pgd_t *, user_pgdp)
- __field(pgdval_t, pgdval)
- ),
- TP_fast_assign(__entry->pgdp = pgdp;
- __entry->user_pgdp = user_pgdp;
- __entry->pgdval = pgdval.pgd),
- TP_printk("pgdp %p user_pgdp %p pgdval %0*llx (raw %0*llx)",
- __entry->pgdp, __entry->user_pgdp,
- (int)sizeof(pgdval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->pgdval)),
- (int)sizeof(pgdval_t) * 2, (unsigned long long)__entry->pgdval)
+ __field(p4d_t *, p4dp)
+ __field(p4d_t *, user_p4dp)
+ __field(p4dval_t, p4dval)
+ ),
+ TP_fast_assign(__entry->p4dp = p4dp;
+ __entry->user_p4dp = user_p4dp;
+ __entry->p4dval = p4d_val(p4dval)),
+ TP_printk("p4dp %p user_p4dp %p p4dval %0*llx (raw %0*llx)",
+ __entry->p4dp, __entry->user_p4dp,
+ (int)sizeof(p4dval_t) * 2, (unsigned long long)pgd_val(native_make_pgd(__entry->p4dval)),
+ (int)sizeof(p4dval_t) * 2, (unsigned long long)__entry->p4dval)
);
TRACE_EVENT(xen_mmu_pud_clear,
diff --git a/include/uapi/asm-generic/socket.h b/include/uapi/asm-generic/socket.h
index 2c748ddad5f8..2b488565599d 100644
--- a/include/uapi/asm-generic/socket.h
+++ b/include/uapi/asm-generic/socket.h
@@ -94,4 +94,10 @@
#define SCM_TIMESTAMPING_OPT_STATS 54
+#define SO_MEMINFO 55
+
+#define SO_INCOMING_NAPI_ID 56
+
+#define SO_COOKIE 57
+
#endif /* __ASM_GENERIC_SOCKET_H */
diff --git a/include/uapi/asm-generic/unistd.h b/include/uapi/asm-generic/unistd.h
index a076cf1a3a23..061185a5eb51 100644
--- a/include/uapi/asm-generic/unistd.h
+++ b/include/uapi/asm-generic/unistd.h
@@ -194,8 +194,7 @@ __SYSCALL(__NR_quotactl, sys_quotactl)
/* fs/readdir.c */
#define __NR_getdents64 61
-#define __ARCH_WANT_COMPAT_SYS_GETDENTS64
-__SC_COMP(__NR_getdents64, sys_getdents64, compat_sys_getdents64)
+__SYSCALL(__NR_getdents64, sys_getdents64)
/* fs/read_write.c */
#define __NR3264_lseek 62
diff --git a/include/uapi/drm/amdgpu_drm.h b/include/uapi/drm/amdgpu_drm.h
index 5797283c2d79..516a9f285730 100644
--- a/include/uapi/drm/amdgpu_drm.h
+++ b/include/uapi/drm/amdgpu_drm.h
@@ -209,6 +209,7 @@ struct drm_amdgpu_gem_userptr {
__u32 handle;
};
+/* SI-CI-VI: */
/* same meaning as the GB_TILE_MODE and GL_MACRO_TILE_MODE fields */
#define AMDGPU_TILING_ARRAY_MODE_SHIFT 0
#define AMDGPU_TILING_ARRAY_MODE_MASK 0xf
@@ -227,10 +228,15 @@ struct drm_amdgpu_gem_userptr {
#define AMDGPU_TILING_NUM_BANKS_SHIFT 21
#define AMDGPU_TILING_NUM_BANKS_MASK 0x3
+/* GFX9 and later: */
+#define AMDGPU_TILING_SWIZZLE_MODE_SHIFT 0
+#define AMDGPU_TILING_SWIZZLE_MODE_MASK 0x1f
+
+/* Set/Get helpers for tiling flags. */
#define AMDGPU_TILING_SET(field, value) \
- (((value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
+ (((__u64)(value) & AMDGPU_TILING_##field##_MASK) << AMDGPU_TILING_##field##_SHIFT)
#define AMDGPU_TILING_GET(value, field) \
- (((value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
+ (((__u64)(value) >> AMDGPU_TILING_##field##_SHIFT) & AMDGPU_TILING_##field##_MASK)
#define AMDGPU_GEM_METADATA_OP_SET_METADATA 1
#define AMDGPU_GEM_METADATA_OP_GET_METADATA 2
@@ -350,6 +356,8 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VA_OP_MAP 1
#define AMDGPU_VA_OP_UNMAP 2
+#define AMDGPU_VA_OP_CLEAR 3
+#define AMDGPU_VA_OP_REPLACE 4
/* Delay the page table update till the next CS */
#define AMDGPU_VM_DELAY_UPDATE (1 << 0)
@@ -361,6 +369,20 @@ struct drm_amdgpu_gem_op {
#define AMDGPU_VM_PAGE_WRITEABLE (1 << 2)
/* executable mapping, new for VI */
#define AMDGPU_VM_PAGE_EXECUTABLE (1 << 3)
+/* partially resident texture */
+#define AMDGPU_VM_PAGE_PRT (1 << 4)
+/* MTYPE flags use bit 5 to 8 */
+#define AMDGPU_VM_MTYPE_MASK (0xf << 5)
+/* Default MTYPE. Pre-AI must use this. Recommended for newer ASICs. */
+#define AMDGPU_VM_MTYPE_DEFAULT (0 << 5)
+/* Use NC MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_NC (1 << 5)
+/* Use WC MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_WC (2 << 5)
+/* Use CC MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_CC (3 << 5)
+/* Use UC MTYPE instead of default MTYPE */
+#define AMDGPU_VM_MTYPE_UC (4 << 5)
struct drm_amdgpu_gem_va {
/** GEM object handle */
@@ -383,7 +405,8 @@ struct drm_amdgpu_gem_va {
#define AMDGPU_HW_IP_DMA 2
#define AMDGPU_HW_IP_UVD 3
#define AMDGPU_HW_IP_VCE 4
-#define AMDGPU_HW_IP_NUM 5
+#define AMDGPU_HW_IP_UVD_ENC 5
+#define AMDGPU_HW_IP_NUM 6
#define AMDGPU_HW_IP_INSTANCE_MAX_COUNT 1
@@ -422,9 +445,12 @@ union drm_amdgpu_cs {
/* This IB should be submitted to CE */
#define AMDGPU_IB_FLAG_CE (1<<0)
-/* CE Preamble */
+/* Preamble flag, which means the IB could be dropped if no context switch */
#define AMDGPU_IB_FLAG_PREAMBLE (1<<1)
+/* Preempt flag, IB should set Pre_enb bit if PREEMPT flag detected */
+#define AMDGPU_IB_FLAG_PREEMPT (1<<2)
+
struct drm_amdgpu_cs_chunk_ib {
__u32 _pad;
/** AMDGPU_IB_FLAG_* */
@@ -500,6 +526,10 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_FW_SMC 0x0a
/* Subquery id: Query SDMA firmware version */
#define AMDGPU_INFO_FW_SDMA 0x0b
+ /* Subquery id: Query PSP SOS firmware version */
+ #define AMDGPU_INFO_FW_SOS 0x0c
+ /* Subquery id: Query PSP ASD firmware version */
+ #define AMDGPU_INFO_FW_ASD 0x0d
/* number of bytes moved for TTM migration */
#define AMDGPU_INFO_NUM_BYTES_MOVED 0x0f
/* the used VRAM size */
@@ -530,6 +560,22 @@ struct drm_amdgpu_cs_chunk_data {
#define AMDGPU_INFO_VBIOS_IMAGE 0x2
/* Query UVD handles */
#define AMDGPU_INFO_NUM_HANDLES 0x1C
+/* Query sensor related information */
+#define AMDGPU_INFO_SENSOR 0x1D
+ /* Subquery id: Query GPU shader clock */
+ #define AMDGPU_INFO_SENSOR_GFX_SCLK 0x1
+ /* Subquery id: Query GPU memory clock */
+ #define AMDGPU_INFO_SENSOR_GFX_MCLK 0x2
+ /* Subquery id: Query GPU temperature */
+ #define AMDGPU_INFO_SENSOR_GPU_TEMP 0x3
+ /* Subquery id: Query GPU load */
+ #define AMDGPU_INFO_SENSOR_GPU_LOAD 0x4
+ /* Subquery id: Query average GPU power */
+ #define AMDGPU_INFO_SENSOR_GPU_AVG_POWER 0x5
+ /* Subquery id: Query northbridge voltage */
+ #define AMDGPU_INFO_SENSOR_VDDNB 0x6
+ /* Subquery id: Query graphics voltage */
+ #define AMDGPU_INFO_SENSOR_VDDGFX 0x7
#define AMDGPU_INFO_MMR_SE_INDEX_SHIFT 0
#define AMDGPU_INFO_MMR_SE_INDEX_MASK 0xff
@@ -593,6 +639,10 @@ struct drm_amdgpu_info {
__u32 type;
__u32 offset;
} vbios_info;
+
+ struct {
+ __u32 type;
+ } sensor_info;
};
};
@@ -704,6 +754,16 @@ struct drm_amdgpu_info_device {
__u32 vram_bit_width;
/* vce harvesting instance */
__u32 vce_harvest_config;
+ /* gfx double offchip LDS buffers */
+ __u32 gc_double_offchip_lds_buf;
+ /* NGG Primitive Buffer */
+ __u64 prim_buf_gpu_addr;
+ /* NGG Position Buffer */
+ __u64 pos_buf_gpu_addr;
+ /* NGG Control Sideband */
+ __u64 cntl_sb_buf_gpu_addr;
+ /* NGG Parameter Cache */
+ __u64 param_buf_gpu_addr;
};
struct drm_amdgpu_info_hw_ip {
@@ -755,6 +815,7 @@ struct drm_amdgpu_info_vce_clock_table {
#define AMDGPU_FAMILY_KV 125 /* Kaveri, Kabini, Mullins */
#define AMDGPU_FAMILY_VI 130 /* Iceland, Tonga */
#define AMDGPU_FAMILY_CZ 135 /* Carrizo, Stoney */
+#define AMDGPU_FAMILY_AI 141 /* Vega10 */
#if defined(__cplusplus)
}
diff --git a/include/uapi/drm/drm.h b/include/uapi/drm/drm.h
index b2c52843bc70..42d9f64ce416 100644
--- a/include/uapi/drm/drm.h
+++ b/include/uapi/drm/drm.h
@@ -647,6 +647,7 @@ struct drm_gem_open {
#define DRM_CAP_CURSOR_HEIGHT 0x9
#define DRM_CAP_ADDFB2_MODIFIERS 0x10
#define DRM_CAP_PAGE_FLIP_TARGET 0x11
+#define DRM_CAP_CRTC_IN_VBLANK_EVENT 0x12
/** DRM_IOCTL_GET_CAP ioctl argument type */
struct drm_get_cap {
@@ -851,7 +852,7 @@ struct drm_event_vblank {
__u32 tv_sec;
__u32 tv_usec;
__u32 sequence;
- __u32 reserved;
+ __u32 crtc_id; /* 0 on older kernels that do not support this */
};
/* typedef area */
diff --git a/include/uapi/drm/drm_fourcc.h b/include/uapi/drm/drm_fourcc.h
index ef20abb8119b..55e301047b3e 100644
--- a/include/uapi/drm/drm_fourcc.h
+++ b/include/uapi/drm/drm_fourcc.h
@@ -114,6 +114,20 @@ extern "C" {
#define DRM_FORMAT_AYUV fourcc_code('A', 'Y', 'U', 'V') /* [31:0] A:Y:Cb:Cr 8:8:8:8 little endian */
/*
+ * 2 plane RGB + A
+ * index 0 = RGB plane, same format as the corresponding non _A8 format has
+ * index 1 = A plane, [7:0] A
+ */
+#define DRM_FORMAT_XRGB8888_A8 fourcc_code('X', 'R', 'A', '8')
+#define DRM_FORMAT_XBGR8888_A8 fourcc_code('X', 'B', 'A', '8')
+#define DRM_FORMAT_RGBX8888_A8 fourcc_code('R', 'X', 'A', '8')
+#define DRM_FORMAT_BGRX8888_A8 fourcc_code('B', 'X', 'A', '8')
+#define DRM_FORMAT_RGB888_A8 fourcc_code('R', '8', 'A', '8')
+#define DRM_FORMAT_BGR888_A8 fourcc_code('B', '8', 'A', '8')
+#define DRM_FORMAT_RGB565_A8 fourcc_code('R', '5', 'A', '8')
+#define DRM_FORMAT_BGR565_A8 fourcc_code('B', '5', 'A', '8')
+
+/*
* 2 plane YCbCr
* index 0 = Y plane, [7:0] Y
* index 1 = Cr:Cb plane, [15:0] Cr:Cb little endian
@@ -292,6 +306,51 @@ extern "C" {
*/
#define DRM_FORMAT_MOD_VIVANTE_SPLIT_SUPER_TILED fourcc_mod_code(VIVANTE, 4)
+
+/* NVIDIA Tegra frame buffer modifiers */
+
+/*
+ * Some modifiers take parameters, for example the number of vertical GOBs in
+ * a block. Reserve the lower 32 bits for parameters
+ */
+#define __fourcc_mod_tegra_mode_shift 32
+#define fourcc_mod_tegra_code(val, params) \
+ fourcc_mod_code(NV, ((((__u64)val) << __fourcc_mod_tegra_mode_shift) | params))
+#define fourcc_mod_tegra_mod(m) \
+ (m & ~((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
+#define fourcc_mod_tegra_param(m) \
+ (m & ((1ULL << __fourcc_mod_tegra_mode_shift) - 1))
+
+/*
+ * Tegra Tiled Layout, used by Tegra 2, 3 and 4.
+ *
+ * Pixels are arranged in simple tiles of 16 x 16 bytes.
+ */
+#define NV_FORMAT_MOD_TEGRA_TILED fourcc_mod_tegra_code(1, 0)
+
+/*
+ * Tegra 16Bx2 Block Linear layout, used by TK1/TX1
+ *
+ * Pixels are arranged in 64x8 Groups Of Bytes (GOBs). GOBs are then stacked
+ * vertically by a power of 2 (1 to 32 GOBs) to form a block.
+ *
+ * Within a GOB, data is ordered as 16B x 2 lines sectors laid in Z-shape.
+ *
+ * Parameter 'v' is the log2 encoding of the number of GOBs stacked vertically.
+ * Valid values are:
+ *
+ * 0 == ONE_GOB
+ * 1 == TWO_GOBS
+ * 2 == FOUR_GOBS
+ * 3 == EIGHT_GOBS
+ * 4 == SIXTEEN_GOBS
+ * 5 == THIRTYTWO_GOBS
+ *
+ * Chapter 20 "Pixel Memory Formats" of the Tegra X1 TRM describes this format
+ * in full detail.
+ */
+#define NV_FORMAT_MOD_TEGRA_16BX2_BLOCK(v) fourcc_mod_tegra_code(2, v)
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/drm/drm_mode.h b/include/uapi/drm/drm_mode.h
index ce7efe2e8a5e..8c67fc03d53d 100644
--- a/include/uapi/drm/drm_mode.h
+++ b/include/uapi/drm/drm_mode.h
@@ -123,6 +123,10 @@ extern "C" {
#define DRM_MODE_DIRTY_ON 1
#define DRM_MODE_DIRTY_ANNOTATE 2
+/* Link Status options */
+#define DRM_MODE_LINK_STATUS_GOOD 0
+#define DRM_MODE_LINK_STATUS_BAD 1
+
struct drm_mode_modeinfo {
__u32 clock;
__u16 hdisplay;
diff --git a/include/uapi/drm/etnaviv_drm.h b/include/uapi/drm/etnaviv_drm.h
index 2584c1cca42f..76f6f78a352b 100644
--- a/include/uapi/drm/etnaviv_drm.h
+++ b/include/uapi/drm/etnaviv_drm.h
@@ -154,6 +154,12 @@ struct drm_etnaviv_gem_submit_bo {
* one or more cmdstream buffers. This allows for conditional execution
* (context-restore), and IB buffers needed for per tile/bin draw cmds.
*/
+#define ETNA_SUBMIT_NO_IMPLICIT 0x0001
+#define ETNA_SUBMIT_FENCE_FD_IN 0x0002
+#define ETNA_SUBMIT_FENCE_FD_OUT 0x0004
+#define ETNA_SUBMIT_FLAGS (ETNA_SUBMIT_NO_IMPLICIT | \
+ ETNA_SUBMIT_FENCE_FD_IN | \
+ ETNA_SUBMIT_FENCE_FD_OUT)
#define ETNA_PIPE_3D 0x00
#define ETNA_PIPE_2D 0x01
#define ETNA_PIPE_VG 0x02
@@ -167,6 +173,8 @@ struct drm_etnaviv_gem_submit {
__u64 bos; /* in, ptr to array of submit_bo's */
__u64 relocs; /* in, ptr to array of submit_reloc's */
__u64 stream; /* in, ptr to cmdstream */
+ __u32 flags; /* in, mask of ETNA_SUBMIT_x */
+ __s32 fence_fd; /* in/out, fence fd (see ETNA_SUBMIT_FENCE_FD_x) */
};
/* The normal way to synchronize with the GPU is just to CPU_PREP on
diff --git a/include/uapi/drm/i915_drm.h b/include/uapi/drm/i915_drm.h
index 57093b455db6..3554495bef13 100644
--- a/include/uapi/drm/i915_drm.h
+++ b/include/uapi/drm/i915_drm.h
@@ -246,6 +246,7 @@ typedef struct _drm_i915_sarea {
#define DRM_I915_OVERLAY_PUT_IMAGE 0x27
#define DRM_I915_OVERLAY_ATTRS 0x28
#define DRM_I915_GEM_EXECBUFFER2 0x29
+#define DRM_I915_GEM_EXECBUFFER2_WR DRM_I915_GEM_EXECBUFFER2
#define DRM_I915_GET_SPRITE_COLORKEY 0x2a
#define DRM_I915_SET_SPRITE_COLORKEY 0x2b
#define DRM_I915_GEM_WAIT 0x2c
@@ -280,6 +281,7 @@ typedef struct _drm_i915_sarea {
#define DRM_IOCTL_I915_GEM_INIT DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_INIT, struct drm_i915_gem_init)
#define DRM_IOCTL_I915_GEM_EXECBUFFER DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER, struct drm_i915_gem_execbuffer)
#define DRM_IOCTL_I915_GEM_EXECBUFFER2 DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2, struct drm_i915_gem_execbuffer2)
+#define DRM_IOCTL_I915_GEM_EXECBUFFER2_WR DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_EXECBUFFER2_WR, struct drm_i915_gem_execbuffer2)
#define DRM_IOCTL_I915_GEM_PIN DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_PIN, struct drm_i915_gem_pin)
#define DRM_IOCTL_I915_GEM_UNPIN DRM_IOW(DRM_COMMAND_BASE + DRM_I915_GEM_UNPIN, struct drm_i915_gem_unpin)
#define DRM_IOCTL_I915_GEM_BUSY DRM_IOWR(DRM_COMMAND_BASE + DRM_I915_GEM_BUSY, struct drm_i915_gem_busy)
@@ -397,6 +399,19 @@ typedef struct drm_i915_irq_wait {
#define I915_PARAM_HAS_SCHEDULER 41
#define I915_PARAM_HUC_STATUS 42
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports the ability to opt-out of
+ * synchronisation with implicit fencing on individual objects.
+ * See EXEC_OBJECT_ASYNC.
+ */
+#define I915_PARAM_HAS_EXEC_ASYNC 43
+
+/* Query whether DRM_I915_GEM_EXECBUFFER2 supports explicit fence support -
+ * both being able to pass in a sync_file fd to wait upon before executing,
+ * and being able to return a new sync_file fd that is signaled when the
+ * current request is complete. See I915_EXEC_FENCE_IN and I915_EXEC_FENCE_OUT.
+ */
+#define I915_PARAM_HAS_EXEC_FENCE 44
+
typedef struct drm_i915_getparam {
__s32 param;
/*
@@ -737,8 +752,29 @@ struct drm_i915_gem_exec_object2 {
#define EXEC_OBJECT_SUPPORTS_48B_ADDRESS (1<<3)
#define EXEC_OBJECT_PINNED (1<<4)
#define EXEC_OBJECT_PAD_TO_SIZE (1<<5)
+/* The kernel implicitly tracks GPU activity on all GEM objects, and
+ * synchronises operations with outstanding rendering. This includes
+ * rendering on other devices if exported via dma-buf. However, sometimes
+ * this tracking is too coarse and the user knows better. For example,
+ * if the object is split into non-overlapping ranges shared between different
+ * clients or engines (i.e. suballocating objects), the implicit tracking
+ * by kernel assumes that each operation affects the whole object rather
+ * than an individual range, causing needless synchronisation between clients.
+ * The kernel will also forgo any CPU cache flushes prior to rendering from
+ * the object as the client is expected to be also handling such domain
+ * tracking.
+ *
+ * The kernel maintains the implicit tracking in order to manage resources
+ * used by the GPU - this flag only disables the synchronisation prior to
+ * rendering with this object in this execbuf.
+ *
+ * Opting out of implicit synhronisation requires the user to do its own
+ * explicit tracking to avoid rendering corruption. See, for example,
+ * I915_PARAM_HAS_EXEC_FENCE to order execbufs and execute them asynchronously.
+ */
+#define EXEC_OBJECT_ASYNC (1<<6)
/* All remaining bits are MBZ and RESERVED FOR FUTURE USE */
-#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_PAD_TO_SIZE<<1)
+#define __EXEC_OBJECT_UNKNOWN_FLAGS -(EXEC_OBJECT_ASYNC<<1)
__u64 flags;
union {
@@ -828,7 +864,32 @@ struct drm_i915_gem_execbuffer2 {
*/
#define I915_EXEC_RESOURCE_STREAMER (1<<15)
-#define __I915_EXEC_UNKNOWN_FLAGS -(I915_EXEC_RESOURCE_STREAMER<<1)
+/* Setting I915_EXEC_FENCE_IN implies that lower_32_bits(rsvd2) represent
+ * a sync_file fd to wait upon (in a nonblocking manner) prior to executing
+ * the batch.
+ *
+ * Returns -EINVAL if the sync_file fd cannot be found.
+ */
+#define I915_EXEC_FENCE_IN (1<<16)
+
+/* Setting I915_EXEC_FENCE_OUT causes the ioctl to return a sync_file fd
+ * in the upper_32_bits(rsvd2) upon success. Ownership of the fd is given
+ * to the caller, and it should be close() after use. (The fd is a regular
+ * file descriptor and will be cleaned up on process termination. It holds
+ * a reference to the request, but nothing else.)
+ *
+ * The sync_file fd can be combined with other sync_file and passed either
+ * to execbuf using I915_EXEC_FENCE_IN, to atomic KMS ioctls (so that a flip
+ * will only occur after this request completes), or to other devices.
+ *
+ * Using I915_EXEC_FENCE_OUT requires use of
+ * DRM_IOCTL_I915_GEM_EXECBUFFER2_WR ioctl so that the result is written
+ * back to userspace. Failure to do so will cause the out-fence to always
+ * be reported as zero, and the real fence fd to be leaked.
+ */
+#define I915_EXEC_FENCE_OUT (1<<17)
+
+#define __I915_EXEC_UNKNOWN_FLAGS (-(I915_EXEC_FENCE_OUT<<1))
#define I915_EXEC_CONTEXT_ID_MASK (0xffffffff)
#define i915_execbuffer2_set_context_id(eb2, context) \
diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h
index 4d5d6a2bc59e..a4a189a240d7 100644
--- a/include/uapi/drm/msm_drm.h
+++ b/include/uapi/drm/msm_drm.h
@@ -72,6 +72,7 @@ struct drm_msm_timespec {
#define MSM_PARAM_CHIP_ID 0x03
#define MSM_PARAM_MAX_FREQ 0x04
#define MSM_PARAM_TIMESTAMP 0x05
+#define MSM_PARAM_GMEM_BASE 0x06
struct drm_msm_param {
__u32 pipe; /* in, MSM_PIPE_x */
diff --git a/include/uapi/drm/vmwgfx_drm.h b/include/uapi/drm/vmwgfx_drm.h
index d325a4107916..d9dfde9aa757 100644
--- a/include/uapi/drm/vmwgfx_drm.h
+++ b/include/uapi/drm/vmwgfx_drm.h
@@ -41,6 +41,7 @@ extern "C" {
#define DRM_VMW_GET_PARAM 0
#define DRM_VMW_ALLOC_DMABUF 1
#define DRM_VMW_UNREF_DMABUF 2
+#define DRM_VMW_HANDLE_CLOSE 2
#define DRM_VMW_CURSOR_BYPASS 3
/* guarded by DRM_VMW_PARAM_NUM_STREAMS != 0*/
#define DRM_VMW_CONTROL_STREAM 4
@@ -1092,6 +1093,29 @@ union drm_vmw_extended_context_arg {
struct drm_vmw_context_arg rep;
};
+/*************************************************************************/
+/*
+ * DRM_VMW_HANDLE_CLOSE - Close a user-space handle and release its
+ * underlying resource.
+ *
+ * Note that this ioctl is overlaid on the DRM_VMW_UNREF_DMABUF Ioctl.
+ * The ioctl arguments therefore need to be identical in layout.
+ *
+ */
+
+/**
+ * struct drm_vmw_handle_close_arg
+ *
+ * @handle: Handle to close.
+ *
+ * Argument to the DRM_VMW_HANDLE_CLOSE Ioctl.
+ */
+struct drm_vmw_handle_close_arg {
+ __u32 handle;
+ __u32 pad64;
+};
+
+
#if defined(__cplusplus)
}
#endif
diff --git a/include/uapi/linux/Kbuild b/include/uapi/linux/Kbuild
index f8d9fed17ba9..662c592b74dd 100644
--- a/include/uapi/linux/Kbuild
+++ b/include/uapi/linux/Kbuild
@@ -333,6 +333,7 @@ header-y += parport.h
header-y += patchkey.h
header-y += pci.h
header-y += pci_regs.h
+header-y += pcitest.h
header-y += perf_event.h
header-y += personality.h
header-y += pfkeyv2.h
@@ -477,6 +478,7 @@ header-y += virtio_types.h
header-y += virtio_vsock.h
header-y += virtio_crypto.h
header-y += vm_sockets.h
+header-y += vsockmon.h
header-y += vt.h
header-y += vtpm_proxy.h
header-y += wait.h
diff --git a/include/uapi/linux/aspeed-lpc-ctrl.h b/include/uapi/linux/aspeed-lpc-ctrl.h
new file mode 100644
index 000000000000..c328c976c684
--- /dev/null
+++ b/include/uapi/linux/aspeed-lpc-ctrl.h
@@ -0,0 +1,61 @@
+/*
+ * Copyright 2017 IBM Corp.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public License
+ * as published by the Free Software Foundation; either version
+ * 2 of the License, or (at your option) any later version.
+ */
+
+#ifndef _UAPI_LINUX_ASPEED_LPC_CTRL_H
+#define _UAPI_LINUX_ASPEED_LPC_CTRL_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/* Window types */
+#define ASPEED_LPC_CTRL_WINDOW_FLASH 1
+#define ASPEED_LPC_CTRL_WINDOW_MEMORY 2
+
+/*
+ * This driver provides a window for the host to access a BMC resource
+ * across the BMC <-> Host LPC bus.
+ *
+ * window_type: The BMC resource that the host will access through the
+ * window. BMC flash and BMC RAM.
+ *
+ * window_id: For each window type there may be multiple windows,
+ * these are referenced by ID.
+ *
+ * flags: Reserved for future use, this field is expected to be
+ * zeroed.
+ *
+ * addr: Address on the host LPC bus that the specified window should
+ * be mapped. This address must be power of two aligned.
+ *
+ * offset: Offset into the BMC window that should be mapped to the
+ * host (at addr). This must be a multiple of size.
+ *
+ * size: The size of the mapping. The smallest possible size is 64K.
+ * This must be power of two aligned.
+ *
+ */
+
+struct aspeed_lpc_ctrl_mapping {
+ __u8 window_type;
+ __u8 window_id;
+ __u16 flags;
+ __u32 addr;
+ __u32 offset;
+ __u32 size;
+};
+
+#define __ASPEED_LPC_CTRL_IOCTL_MAGIC 0xb2
+
+#define ASPEED_LPC_CTRL_IOCTL_GET_SIZE _IOWR(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
+ 0x00, struct aspeed_lpc_ctrl_mapping)
+
+#define ASPEED_LPC_CTRL_IOCTL_MAP _IOW(__ASPEED_LPC_CTRL_IOCTL_MAGIC, \
+ 0x01, struct aspeed_lpc_ctrl_mapping)
+
+#endif /* _UAPI_LINUX_ASPEED_LPC_CTRL_H */
diff --git a/include/uapi/linux/bpf.h b/include/uapi/linux/bpf.h
index 0539a0ceef38..945a1f5f63c5 100644
--- a/include/uapi/linux/bpf.h
+++ b/include/uapi/linux/bpf.h
@@ -81,6 +81,7 @@ enum bpf_cmd {
BPF_OBJ_GET,
BPF_PROG_ATTACH,
BPF_PROG_DETACH,
+ BPF_PROG_TEST_RUN,
};
enum bpf_map_type {
@@ -96,6 +97,8 @@ enum bpf_map_type {
BPF_MAP_TYPE_LRU_HASH,
BPF_MAP_TYPE_LRU_PERCPU_HASH,
BPF_MAP_TYPE_LPM_TRIE,
+ BPF_MAP_TYPE_ARRAY_OF_MAPS,
+ BPF_MAP_TYPE_HASH_OF_MAPS,
};
enum bpf_prog_type {
@@ -152,6 +155,7 @@ union bpf_attr {
__u32 value_size; /* size of value in bytes */
__u32 max_entries; /* max number of entries in a map */
__u32 map_flags; /* prealloc or not */
+ __u32 inner_map_fd; /* fd pointing to the inner map */
};
struct { /* anonymous struct used by BPF_MAP_*_ELEM commands */
@@ -186,6 +190,17 @@ union bpf_attr {
__u32 attach_type;
__u32 attach_flags;
};
+
+ struct { /* anonymous struct used by BPF_PROG_TEST_RUN command */
+ __u32 prog_fd;
+ __u32 retval;
+ __u32 data_size_in;
+ __u32 data_size_out;
+ __aligned_u64 data_in;
+ __aligned_u64 data_out;
+ __u32 repeat;
+ __u32 duration;
+ } test;
} __attribute__((aligned(8)));
/* BPF helper function descriptions:
@@ -456,6 +471,17 @@ union bpf_attr {
* Return:
* > 0 length of the string including the trailing NUL on success
* < 0 error
+ *
+ * u64 bpf_get_socket_cookie(skb)
+ * Get the cookie for the socket stored inside sk_buff.
+ * @skb: pointer to skb
+ * Return: 8 Bytes non-decreasing number on success or 0 if the socket
+ * field is missing inside sk_buff
+ *
+ * u32 bpf_get_socket_uid(skb)
+ * Get the owner uid of the socket stored inside sk_buff.
+ * @skb: pointer to skb
+ * Return: uid of the socket owner on success or overflowuid if failed.
*/
#define __BPF_FUNC_MAPPER(FN) \
FN(unspec), \
@@ -503,7 +529,9 @@ union bpf_attr {
FN(get_numa_node_id), \
FN(skb_change_head), \
FN(xdp_adjust_head), \
- FN(probe_read_str),
+ FN(probe_read_str), \
+ FN(get_socket_cookie), \
+ FN(get_socket_uid),
/* integer value in 'imm' field of BPF_CALL instruction selects which helper
* function eBPF program intends to call
@@ -574,6 +602,7 @@ struct __sk_buff {
__u32 tc_classid;
__u32 data;
__u32 data_end;
+ __u32 napi_id;
};
struct bpf_tunnel_key {
diff --git a/include/uapi/linux/btrfs.h b/include/uapi/linux/btrfs.h
index dcfc3a5a9cb1..a456e5309238 100644
--- a/include/uapi/linux/btrfs.h
+++ b/include/uapi/linux/btrfs.h
@@ -291,10 +291,10 @@ struct btrfs_ioctl_feature_flags {
struct btrfs_balance_args {
__u64 profiles;
union {
- __le64 usage;
+ __u64 usage;
struct {
- __le32 usage_min;
- __le32 usage_max;
+ __u32 usage_min;
+ __u32 usage_max;
};
};
__u64 devid;
@@ -324,8 +324,8 @@ struct btrfs_balance_args {
* Process chunks that cross stripes_min..stripes_max devices,
* BTRFS_BALANCE_ARGS_STRIPES_RANGE
*/
- __le32 stripes_min;
- __le32 stripes_max;
+ __u32 stripes_min;
+ __u32 stripes_max;
__u64 unused[6];
} __attribute__ ((__packed__));
diff --git a/include/uapi/linux/can/vxcan.h b/include/uapi/linux/can/vxcan.h
new file mode 100644
index 000000000000..ffb0b7156f7e
--- /dev/null
+++ b/include/uapi/linux/can/vxcan.h
@@ -0,0 +1,12 @@
+#ifndef _UAPI_CAN_VXCAN_H
+#define _UAPI_CAN_VXCAN_H
+
+enum {
+ VXCAN_INFO_UNSPEC,
+ VXCAN_INFO_PEER,
+
+ __VXCAN_INFO_MAX
+#define VXCAN_INFO_MAX (__VXCAN_INFO_MAX - 1)
+};
+
+#endif
diff --git a/include/uapi/linux/capability.h b/include/uapi/linux/capability.h
index 49bc06295398..6fe14d001f68 100644
--- a/include/uapi/linux/capability.h
+++ b/include/uapi/linux/capability.h
@@ -205,7 +205,7 @@ struct vfs_cap_data {
#define CAP_SYS_MODULE 16
/* Allow ioperm/iopl access */
-/* Allow sending USB messages to any device via /proc/bus/usb */
+/* Allow sending USB messages to any device via /dev/bus/usb */
#define CAP_SYS_RAWIO 17
diff --git a/include/uapi/linux/cec.h b/include/uapi/linux/cec.h
index 14b6f24b189e..a0dfe27bc6c7 100644
--- a/include/uapi/linux/cec.h
+++ b/include/uapi/linux/cec.h
@@ -223,7 +223,7 @@ static inline int cec_msg_status_is_ok(const struct cec_msg *msg)
#define CEC_LOG_ADDR_BACKUP_2 13
#define CEC_LOG_ADDR_SPECIFIC 14
#define CEC_LOG_ADDR_UNREGISTERED 15 /* as initiator address */
-#define CEC_LOG_ADDR_BROADCAST 15 /* ad destination address */
+#define CEC_LOG_ADDR_BROADCAST 15 /* as destination address */
/* The logical address types that the CEC device wants to claim */
#define CEC_LOG_ADDR_TYPE_TV 0
diff --git a/include/uapi/linux/cryptouser.h b/include/uapi/linux/cryptouser.h
index 11d21fce14d6..b4def5c630e7 100644
--- a/include/uapi/linux/cryptouser.h
+++ b/include/uapi/linux/cryptouser.h
@@ -31,7 +31,7 @@ enum {
#define CRYPTO_MSG_MAX (__CRYPTO_MSG_MAX - 1)
#define CRYPTO_NR_MSGTYPES (CRYPTO_MSG_MAX + 1 - CRYPTO_MSG_BASE)
-#define CRYPTO_MAX_NAME CRYPTO_MAX_ALG_NAME
+#define CRYPTO_MAX_NAME 64
/* Netlink message attributes. */
enum crypto_attr_type_t {
@@ -53,9 +53,9 @@ enum crypto_attr_type_t {
};
struct crypto_user_alg {
- char cru_name[CRYPTO_MAX_ALG_NAME];
- char cru_driver_name[CRYPTO_MAX_ALG_NAME];
- char cru_module_name[CRYPTO_MAX_ALG_NAME];
+ char cru_name[CRYPTO_MAX_NAME];
+ char cru_driver_name[CRYPTO_MAX_NAME];
+ char cru_module_name[CRYPTO_MAX_NAME];
__u32 cru_type;
__u32 cru_mask;
__u32 cru_refcnt;
@@ -73,7 +73,7 @@ struct crypto_report_hash {
};
struct crypto_report_cipher {
- char type[CRYPTO_MAX_ALG_NAME];
+ char type[CRYPTO_MAX_NAME];
unsigned int blocksize;
unsigned int min_keysize;
unsigned int max_keysize;
diff --git a/include/uapi/linux/devlink.h b/include/uapi/linux/devlink.h
index 0f1f3a12e23c..b0e807ac53bb 100644
--- a/include/uapi/linux/devlink.h
+++ b/include/uapi/linux/devlink.h
@@ -65,8 +65,12 @@ enum devlink_command {
#define DEVLINK_CMD_ESWITCH_MODE_SET /* obsolete, never use this! */ \
DEVLINK_CMD_ESWITCH_SET
- /* add new commands above here */
+ DEVLINK_CMD_DPIPE_TABLE_GET,
+ DEVLINK_CMD_DPIPE_ENTRIES_GET,
+ DEVLINK_CMD_DPIPE_HEADERS_GET,
+ DEVLINK_CMD_DPIPE_TABLE_COUNTERS_SET,
+ /* add new commands above here */
__DEVLINK_CMD_MAX,
DEVLINK_CMD_MAX = __DEVLINK_CMD_MAX - 1
};
@@ -115,6 +119,11 @@ enum devlink_eswitch_inline_mode {
DEVLINK_ESWITCH_INLINE_MODE_TRANSPORT,
};
+enum devlink_eswitch_encap_mode {
+ DEVLINK_ESWITCH_ENCAP_MODE_NONE,
+ DEVLINK_ESWITCH_ENCAP_MODE_BASIC,
+};
+
enum devlink_attr {
/* don't change the order or add anything between, this is ABI! */
DEVLINK_ATTR_UNSPEC,
@@ -148,10 +157,73 @@ enum devlink_attr {
DEVLINK_ATTR_ESWITCH_MODE, /* u16 */
DEVLINK_ATTR_ESWITCH_INLINE_MODE, /* u8 */
+ DEVLINK_ATTR_DPIPE_TABLES, /* nested */
+ DEVLINK_ATTR_DPIPE_TABLE, /* nested */
+ DEVLINK_ATTR_DPIPE_TABLE_NAME, /* string */
+ DEVLINK_ATTR_DPIPE_TABLE_SIZE, /* u64 */
+ DEVLINK_ATTR_DPIPE_TABLE_MATCHES, /* nested */
+ DEVLINK_ATTR_DPIPE_TABLE_ACTIONS, /* nested */
+ DEVLINK_ATTR_DPIPE_TABLE_COUNTERS_ENABLED, /* u8 */
+
+ DEVLINK_ATTR_DPIPE_ENTRIES, /* nested */
+ DEVLINK_ATTR_DPIPE_ENTRY, /* nested */
+ DEVLINK_ATTR_DPIPE_ENTRY_INDEX, /* u64 */
+ DEVLINK_ATTR_DPIPE_ENTRY_MATCH_VALUES, /* nested */
+ DEVLINK_ATTR_DPIPE_ENTRY_ACTION_VALUES, /* nested */
+ DEVLINK_ATTR_DPIPE_ENTRY_COUNTER, /* u64 */
+
+ DEVLINK_ATTR_DPIPE_MATCH, /* nested */
+ DEVLINK_ATTR_DPIPE_MATCH_VALUE, /* nested */
+ DEVLINK_ATTR_DPIPE_MATCH_TYPE, /* u32 */
+
+ DEVLINK_ATTR_DPIPE_ACTION, /* nested */
+ DEVLINK_ATTR_DPIPE_ACTION_VALUE, /* nested */
+ DEVLINK_ATTR_DPIPE_ACTION_TYPE, /* u32 */
+
+ DEVLINK_ATTR_DPIPE_VALUE,
+ DEVLINK_ATTR_DPIPE_VALUE_MASK,
+ DEVLINK_ATTR_DPIPE_VALUE_MAPPING, /* u32 */
+
+ DEVLINK_ATTR_DPIPE_HEADERS, /* nested */
+ DEVLINK_ATTR_DPIPE_HEADER, /* nested */
+ DEVLINK_ATTR_DPIPE_HEADER_NAME, /* string */
+ DEVLINK_ATTR_DPIPE_HEADER_ID, /* u32 */
+ DEVLINK_ATTR_DPIPE_HEADER_FIELDS, /* nested */
+ DEVLINK_ATTR_DPIPE_HEADER_GLOBAL, /* u8 */
+ DEVLINK_ATTR_DPIPE_HEADER_INDEX, /* u32 */
+
+ DEVLINK_ATTR_DPIPE_FIELD, /* nested */
+ DEVLINK_ATTR_DPIPE_FIELD_NAME, /* string */
+ DEVLINK_ATTR_DPIPE_FIELD_ID, /* u32 */
+ DEVLINK_ATTR_DPIPE_FIELD_BITWIDTH, /* u32 */
+ DEVLINK_ATTR_DPIPE_FIELD_MAPPING_TYPE, /* u32 */
+
+ DEVLINK_ATTR_PAD,
+
+ DEVLINK_ATTR_ESWITCH_ENCAP_MODE, /* u8 */
+
/* add new attributes above here, update the policy in devlink.c */
__DEVLINK_ATTR_MAX,
DEVLINK_ATTR_MAX = __DEVLINK_ATTR_MAX - 1
};
+/* Mapping between internal resource described by the field and system
+ * structure
+ */
+enum devlink_dpipe_field_mapping_type {
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_NONE,
+ DEVLINK_DPIPE_FIELD_MAPPING_TYPE_IFINDEX,
+};
+
+/* Match type - specify the type of the match */
+enum devlink_dpipe_match_type {
+ DEVLINK_DPIPE_MATCH_TYPE_FIELD_EXACT,
+};
+
+/* Action type - specify the action type */
+enum devlink_dpipe_action_type {
+ DEVLINK_DPIPE_ACTION_TYPE_FIELD_MODIFY,
+};
+
#endif /* _UAPI_LINUX_DEVLINK_H_ */
diff --git a/include/uapi/linux/elf-em.h b/include/uapi/linux/elf-em.h
index cb5d1a519202..9cd1de954c0a 100644
--- a/include/uapi/linux/elf-em.h
+++ b/include/uapi/linux/elf-em.h
@@ -42,7 +42,6 @@
#define EM_TILEGX 191 /* Tilera TILE-Gx */
#define EM_BPF 247 /* Linux BPF - in-kernel virtual machine */
#define EM_FRV 0x5441 /* Fujitsu FR-V */
-#define EM_AVR32 0x18ad /* Atmel AVR32 */
/*
* This is an interim value that we will use until the committee comes
diff --git a/include/uapi/linux/elf.h b/include/uapi/linux/elf.h
index b59ee077a596..b5280db9ef6a 100644
--- a/include/uapi/linux/elf.h
+++ b/include/uapi/linux/elf.h
@@ -409,6 +409,8 @@ typedef struct elf64_shdr {
#define NT_S390_TDB 0x308 /* s390 transaction diagnostic block */
#define NT_S390_VXRS_LOW 0x309 /* s390 vector registers 0-15 upper half */
#define NT_S390_VXRS_HIGH 0x30a /* s390 vector registers 16-31 */
+#define NT_S390_GS_CB 0x30b /* s390 guarded storage registers */
+#define NT_S390_GS_BC 0x30c /* s390 guarded storage broadcast control block */
#define NT_ARM_VFP 0x400 /* ARM VFP/NEON registers */
#define NT_ARM_TLS 0x401 /* ARM TLS register */
#define NT_ARM_HW_BREAK 0x402 /* ARM hardware breakpoint registers */
@@ -417,7 +419,7 @@ typedef struct elf64_shdr {
#define NT_METAG_CBUF 0x500 /* Metag catch buffer registers */
#define NT_METAG_RPIPE 0x501 /* Metag read pipeline state */
#define NT_METAG_TLS 0x502 /* Metag TLS pointer */
-
+#define NT_ARC_V2 0x600 /* ARCv2 accumulator/extra registers */
/* Note header in a PT_NOTE section */
typedef struct elf32_note {
diff --git a/include/uapi/linux/ethtool.h b/include/uapi/linux/ethtool.h
index 3dc91a46e8b8..d179d7767f51 100644
--- a/include/uapi/linux/ethtool.h
+++ b/include/uapi/linux/ethtool.h
@@ -1487,12 +1487,14 @@ enum ethtool_link_mode_bit_indices {
*/
/* The forced speed, in units of 1Mb. All values 0 to INT_MAX are legal. */
+/* Update drivers/net/phy/phy.c:phy_speed_to_str() when adding new values */
#define SPEED_10 10
#define SPEED_100 100
#define SPEED_1000 1000
#define SPEED_2500 2500
#define SPEED_5000 5000
#define SPEED_10000 10000
+#define SPEED_14000 14000
#define SPEED_20000 20000
#define SPEED_25000 25000
#define SPEED_40000 40000
diff --git a/include/uapi/linux/eventpoll.h b/include/uapi/linux/eventpoll.h
index 1c3154913a39..f4d5c998cc2b 100644
--- a/include/uapi/linux/eventpoll.h
+++ b/include/uapi/linux/eventpoll.h
@@ -26,8 +26,21 @@
#define EPOLL_CTL_DEL 2
#define EPOLL_CTL_MOD 3
+/* Epoll event masks */
+#define EPOLLIN 0x00000001
+#define EPOLLPRI 0x00000002
+#define EPOLLOUT 0x00000004
+#define EPOLLERR 0x00000008
+#define EPOLLHUP 0x00000010
+#define EPOLLRDNORM 0x00000040
+#define EPOLLRDBAND 0x00000080
+#define EPOLLWRNORM 0x00000100
+#define EPOLLWRBAND 0x00000200
+#define EPOLLMSG 0x00000400
+#define EPOLLRDHUP 0x00002000
+
/* Set exclusive wakeup mode for the target file descriptor */
-#define EPOLLEXCLUSIVE (1 << 28)
+#define EPOLLEXCLUSIVE (1U << 28)
/*
* Request the handling of system wakeup events so as to prevent system suspends
@@ -39,13 +52,13 @@
*
* Requires CAP_BLOCK_SUSPEND
*/
-#define EPOLLWAKEUP (1 << 29)
+#define EPOLLWAKEUP (1U << 29)
/* Set the One Shot behaviour for the target file descriptor */
-#define EPOLLONESHOT (1 << 30)
+#define EPOLLONESHOT (1U << 30)
/* Set the Edge Triggered behaviour for the target file descriptor */
-#define EPOLLET (1 << 31)
+#define EPOLLET (1U << 31)
/*
* On x86-64 make the 64bit structure have the same alignment as the
diff --git a/include/uapi/linux/fs.h b/include/uapi/linux/fs.h
index 048a85e9f017..24e61a54feaa 100644
--- a/include/uapi/linux/fs.h
+++ b/include/uapi/linux/fs.h
@@ -279,12 +279,25 @@ struct fscrypt_policy {
__u8 filenames_encryption_mode;
__u8 flags;
__u8 master_key_descriptor[FS_KEY_DESCRIPTOR_SIZE];
-} __packed;
+};
#define FS_IOC_SET_ENCRYPTION_POLICY _IOR('f', 19, struct fscrypt_policy)
#define FS_IOC_GET_ENCRYPTION_PWSALT _IOW('f', 20, __u8[16])
#define FS_IOC_GET_ENCRYPTION_POLICY _IOW('f', 21, struct fscrypt_policy)
+/* Parameters for passing an encryption key into the kernel keyring */
+#define FS_KEY_DESC_PREFIX "fscrypt:"
+#define FS_KEY_DESC_PREFIX_SIZE 8
+
+/* Structure that userspace passes to the kernel keyring */
+#define FS_MAX_KEY_SIZE 64
+
+struct fscrypt_key {
+ __u32 mode;
+ __u8 raw[FS_MAX_KEY_SIZE];
+ __u32 size;
+};
+
/*
* Inode flags (FS_IOC_GETFLAGS / FS_IOC_SETFLAGS)
*
diff --git a/include/uapi/linux/fsmap.h b/include/uapi/linux/fsmap.h
new file mode 100644
index 000000000000..7e8e5f0bd6d2
--- /dev/null
+++ b/include/uapi/linux/fsmap.h
@@ -0,0 +1,112 @@
+/*
+ * FS_IOC_GETFSMAP ioctl infrastructure.
+ *
+ * Copyright (C) 2017 Oracle. All Rights Reserved.
+ *
+ * Author: Darrick J. Wong <darrick.wong@oracle.com>
+ */
+#ifndef _LINUX_FSMAP_H
+#define _LINUX_FSMAP_H
+
+#include <linux/types.h>
+
+/*
+ * Structure for FS_IOC_GETFSMAP.
+ *
+ * The memory layout for this call are the scalar values defined in
+ * struct fsmap_head, followed by two struct fsmap that describe
+ * the lower and upper bound of mappings to return, followed by an
+ * array of struct fsmap mappings.
+ *
+ * fmh_iflags control the output of the call, whereas fmh_oflags report
+ * on the overall record output. fmh_count should be set to the
+ * length of the fmh_recs array, and fmh_entries will be set to the
+ * number of entries filled out during each call. If fmh_count is
+ * zero, the number of reverse mappings will be returned in
+ * fmh_entries, though no mappings will be returned. fmh_reserved
+ * must be set to zero.
+ *
+ * The two elements in the fmh_keys array are used to constrain the
+ * output. The first element in the array should represent the
+ * lowest disk mapping ("low key") that the user wants to learn
+ * about. If this value is all zeroes, the filesystem will return
+ * the first entry it knows about. For a subsequent call, the
+ * contents of fsmap_head.fmh_recs[fsmap_head.fmh_count - 1] should be
+ * copied into fmh_keys[0] to have the kernel start where it left off.
+ *
+ * The second element in the fmh_keys array should represent the
+ * highest disk mapping ("high key") that the user wants to learn
+ * about. If this value is all ones, the filesystem will not stop
+ * until it runs out of mapping to return or runs out of space in
+ * fmh_recs.
+ *
+ * fmr_device can be either a 32-bit cookie representing a device, or
+ * a 32-bit dev_t if the FMH_OF_DEV_T flag is set. fmr_physical,
+ * fmr_offset, and fmr_length are expressed in units of bytes.
+ * fmr_owner is either an inode number, or a special value if
+ * FMR_OF_SPECIAL_OWNER is set in fmr_flags.
+ */
+struct fsmap {
+ __u32 fmr_device; /* device id */
+ __u32 fmr_flags; /* mapping flags */
+ __u64 fmr_physical; /* device offset of segment */
+ __u64 fmr_owner; /* owner id */
+ __u64 fmr_offset; /* file offset of segment */
+ __u64 fmr_length; /* length of segment */
+ __u64 fmr_reserved[3]; /* must be zero */
+};
+
+struct fsmap_head {
+ __u32 fmh_iflags; /* control flags */
+ __u32 fmh_oflags; /* output flags */
+ __u32 fmh_count; /* # of entries in array incl. input */
+ __u32 fmh_entries; /* # of entries filled in (output). */
+ __u64 fmh_reserved[6]; /* must be zero */
+
+ struct fsmap fmh_keys[2]; /* low and high keys for the mapping search */
+ struct fsmap fmh_recs[]; /* returned records */
+};
+
+/* Size of an fsmap_head with room for nr records. */
+static inline size_t
+fsmap_sizeof(
+ unsigned int nr)
+{
+ return sizeof(struct fsmap_head) + nr * sizeof(struct fsmap);
+}
+
+/* Start the next fsmap query at the end of the current query results. */
+static inline void
+fsmap_advance(
+ struct fsmap_head *head)
+{
+ head->fmh_keys[0] = head->fmh_recs[head->fmh_entries - 1];
+}
+
+/* fmh_iflags values - set by FS_IOC_GETFSMAP caller in the header. */
+/* no flags defined yet */
+#define FMH_IF_VALID 0
+
+/* fmh_oflags values - returned in the header segment only. */
+#define FMH_OF_DEV_T 0x1 /* fmr_device values will be dev_t */
+
+/* fmr_flags values - returned for each non-header segment */
+#define FMR_OF_PREALLOC 0x1 /* segment = unwritten pre-allocation */
+#define FMR_OF_ATTR_FORK 0x2 /* segment = attribute fork */
+#define FMR_OF_EXTENT_MAP 0x4 /* segment = extent map */
+#define FMR_OF_SHARED 0x8 /* segment = shared with another file */
+#define FMR_OF_SPECIAL_OWNER 0x10 /* owner is a special value */
+#define FMR_OF_LAST 0x20 /* segment is the last in the FS */
+
+/* Each FS gets to define its own special owner codes. */
+#define FMR_OWNER(type, code) (((__u64)type << 32) | \
+ ((__u64)code & 0xFFFFFFFFULL))
+#define FMR_OWNER_TYPE(owner) ((__u32)((__u64)owner >> 32))
+#define FMR_OWNER_CODE(owner) ((__u32)(((__u64)owner & 0xFFFFFFFFULL)))
+#define FMR_OWN_FREE FMR_OWNER(0, 1) /* free space */
+#define FMR_OWN_UNKNOWN FMR_OWNER(0, 2) /* unknown owner */
+#define FMR_OWN_METADATA FMR_OWNER(0, 3) /* metadata */
+
+#define FS_IOC_GETFSMAP _IOWR('X', 59, struct fsmap_head)
+
+#endif /* _LINUX_FSMAP_H */
diff --git a/include/uapi/linux/gtp.h b/include/uapi/linux/gtp.h
index 72a04a0e8cce..57d1edb8efd9 100644
--- a/include/uapi/linux/gtp.h
+++ b/include/uapi/linux/gtp.h
@@ -19,7 +19,8 @@ enum gtp_attrs {
GTPA_LINK,
GTPA_VERSION,
GTPA_TID, /* for GTPv0 only */
- GTPA_SGSN_ADDRESS,
+ GTPA_PEER_ADDRESS, /* Remote GSN peer, either SGSN or GGSN */
+#define GTPA_SGSN_ADDRESS GTPA_PEER_ADDRESS /* maintain legacy attr name */
GTPA_MS_ADDRESS,
GTPA_FLOW,
GTPA_NET_NS_FD,
diff --git a/include/uapi/linux/if_arp.h b/include/uapi/linux/if_arp.h
index 4d024d75d64b..cf73510b9238 100644
--- a/include/uapi/linux/if_arp.h
+++ b/include/uapi/linux/if_arp.h
@@ -95,6 +95,7 @@
#define ARPHRD_IP6GRE 823 /* GRE over IPv6 */
#define ARPHRD_NETLINK 824 /* Netlink header */
#define ARPHRD_6LOWPAN 825 /* IPv6 over LoWPAN */
+#define ARPHRD_VSOCKMON 826 /* Vsock monitor header */
#define ARPHRD_VOID 0xFFFF /* Void type, nothing is known */
#define ARPHRD_NONE 0xFFFE /* zero header length */
diff --git a/include/uapi/linux/if_link.h b/include/uapi/linux/if_link.h
index 320fc1e747ee..8e56ac70e0d1 100644
--- a/include/uapi/linux/if_link.h
+++ b/include/uapi/linux/if_link.h
@@ -323,6 +323,7 @@ enum {
IFLA_BRPORT_MCAST_FLOOD,
IFLA_BRPORT_MCAST_TO_UCAST,
IFLA_BRPORT_VLAN_TUNNEL,
+ IFLA_BRPORT_BCAST_FLOOD,
__IFLA_BRPORT_MAX
};
#define IFLA_BRPORT_MAX (__IFLA_BRPORT_MAX - 1)
@@ -538,11 +539,18 @@ enum {
#define IFLA_PPP_MAX (__IFLA_PPP_MAX - 1)
/* GTP section */
+
+enum ifla_gtp_role {
+ GTP_ROLE_GGSN = 0,
+ GTP_ROLE_SGSN,
+};
+
enum {
IFLA_GTP_UNSPEC,
IFLA_GTP_FD0,
IFLA_GTP_FD1,
IFLA_GTP_PDP_HASHSIZE,
+ IFLA_GTP_ROLE,
__IFLA_GTP_MAX,
};
#define IFLA_GTP_MAX (__IFLA_GTP_MAX - 1)
@@ -880,7 +888,9 @@ enum {
/* XDP section */
#define XDP_FLAGS_UPDATE_IF_NOEXIST (1U << 0)
-#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST)
+#define XDP_FLAGS_SKB_MODE (2U << 0)
+#define XDP_FLAGS_MASK (XDP_FLAGS_UPDATE_IF_NOEXIST | \
+ XDP_FLAGS_SKB_MODE)
enum {
IFLA_XDP_UNSPEC,
diff --git a/include/uapi/linux/if_packet.h b/include/uapi/linux/if_packet.h
index 9e7edfd8141e..4df96a7dd4fa 100644
--- a/include/uapi/linux/if_packet.h
+++ b/include/uapi/linux/if_packet.h
@@ -66,6 +66,7 @@ struct sockaddr_ll {
#define PACKET_FANOUT_CBPF 6
#define PACKET_FANOUT_EBPF 7
#define PACKET_FANOUT_FLAG_ROLLOVER 0x1000
+#define PACKET_FANOUT_FLAG_UNIQUEID 0x2000
#define PACKET_FANOUT_FLAG_DEFRAG 0x8000
struct tpacket_stats {
diff --git a/include/uapi/linux/if_tunnel.h b/include/uapi/linux/if_tunnel.h
index 92f3c8677523..6792d1967d31 100644
--- a/include/uapi/linux/if_tunnel.h
+++ b/include/uapi/linux/if_tunnel.h
@@ -75,6 +75,7 @@ enum {
IFLA_IPTUN_ENCAP_SPORT,
IFLA_IPTUN_ENCAP_DPORT,
IFLA_IPTUN_COLLECT_METADATA,
+ IFLA_IPTUN_FWMARK,
__IFLA_IPTUN_MAX,
};
#define IFLA_IPTUN_MAX (__IFLA_IPTUN_MAX - 1)
@@ -132,6 +133,7 @@ enum {
IFLA_GRE_ENCAP_DPORT,
IFLA_GRE_COLLECT_METADATA,
IFLA_GRE_IGNORE_DF,
+ IFLA_GRE_FWMARK,
__IFLA_GRE_MAX,
};
@@ -147,6 +149,7 @@ enum {
IFLA_VTI_OKEY,
IFLA_VTI_LOCAL,
IFLA_VTI_REMOTE,
+ IFLA_VTI_FWMARK,
__IFLA_VTI_MAX,
};
diff --git a/include/uapi/linux/input-event-codes.h b/include/uapi/linux/input-event-codes.h
index 3af60ee69053..f5a8d96e1e09 100644
--- a/include/uapi/linux/input-event-codes.h
+++ b/include/uapi/linux/input-event-codes.h
@@ -641,6 +641,7 @@
* e.g. teletext or data broadcast application (MHEG, MHP, HbbTV, etc.)
*/
#define KEY_DATA 0x277
+#define KEY_ONSCREEN_KEYBOARD 0x278
#define BTN_TRIGGER_HAPPY 0x2c0
#define BTN_TRIGGER_HAPPY1 0x2c0
diff --git a/include/uapi/linux/input.h b/include/uapi/linux/input.h
index e794f7bee22f..f561c0eb7d63 100644
--- a/include/uapi/linux/input.h
+++ b/include/uapi/linux/input.h
@@ -61,9 +61,14 @@ struct input_id {
* Note that input core does not clamp reported values to the
* [minimum, maximum] limits, such task is left to userspace.
*
- * Resolution for main axes (ABS_X, ABS_Y, ABS_Z) is reported in
- * units per millimeter (units/mm), resolution for rotational axes
- * (ABS_RX, ABS_RY, ABS_RZ) is reported in units per radian.
+ * The default resolution for main axes (ABS_X, ABS_Y, ABS_Z)
+ * is reported in units per millimeter (units/mm), resolution
+ * for rotational axes (ABS_RX, ABS_RY, ABS_RZ) is reported
+ * in units per radian.
+ * When INPUT_PROP_ACCELEROMETER is set the resolution changes.
+ * The main axes (ABS_X, ABS_Y, ABS_Z) are then reported in
+ * in units per g (units/g) and in units per degree per second
+ * (units/deg/s) for rotational axes (ABS_RX, ABS_RY, ABS_RZ).
*/
struct input_absinfo {
__s32 value;
diff --git a/include/uapi/linux/ipmi.h b/include/uapi/linux/ipmi.h
index 7b26a62e5707..b9095a27a08a 100644
--- a/include/uapi/linux/ipmi.h
+++ b/include/uapi/linux/ipmi.h
@@ -355,7 +355,7 @@ struct ipmi_cmdspec {
#define IPMICTL_REGISTER_FOR_CMD _IOR(IPMI_IOC_MAGIC, 14, \
struct ipmi_cmdspec)
/*
- * Unregister a regsitered command. error values:
+ * Unregister a registered command. error values:
* - EFAULT - an address supplied was invalid.
* - ENOENT - The netfn/cmd was not found registered for this user.
*/
diff --git a/include/uapi/linux/ipv6.h b/include/uapi/linux/ipv6.h
index 8ef9e75e004e..2ae59178189d 100644
--- a/include/uapi/linux/ipv6.h
+++ b/include/uapi/linux/ipv6.h
@@ -183,6 +183,8 @@ enum {
DEVCONF_SEG6_REQUIRE_HMAC,
DEVCONF_ENHANCED_DAD,
DEVCONF_ADDR_GEN_MODE,
+ DEVCONF_DISABLE_POLICY,
+ DEVCONF_ACCEPT_RA_RT_INFO_MIN_PLEN,
DEVCONF_MAX
};
diff --git a/include/uapi/linux/keyctl.h b/include/uapi/linux/keyctl.h
index 86eddd6241f3..201c6644b237 100644
--- a/include/uapi/linux/keyctl.h
+++ b/include/uapi/linux/keyctl.h
@@ -60,6 +60,7 @@
#define KEYCTL_INVALIDATE 21 /* invalidate a key */
#define KEYCTL_GET_PERSISTENT 22 /* get a user's persistent keyring */
#define KEYCTL_DH_COMPUTE 23 /* Compute Diffie-Hellman values */
+#define KEYCTL_RESTRICT_KEYRING 29 /* Restrict keys allowed to link to a keyring */
/* keyctl structures */
struct keyctl_dh_params {
@@ -68,4 +69,11 @@ struct keyctl_dh_params {
__s32 base;
};
+struct keyctl_kdf_params {
+ char *hashname;
+ char *otherinfo;
+ __u32 otherinfolen;
+ __u32 __spare[8];
+};
+
#endif /* _LINUX_KEYCTL_H */
diff --git a/include/uapi/linux/kvm.h b/include/uapi/linux/kvm.h
index f51d5082a377..577429a95ad8 100644
--- a/include/uapi/linux/kvm.h
+++ b/include/uapi/linux/kvm.h
@@ -702,6 +702,10 @@ struct kvm_ppc_resize_hpt {
#define KVM_VM_PPC_HV 1
#define KVM_VM_PPC_PR 2
+/* on MIPS, 0 forces trap & emulate, 1 forces VZ ASE */
+#define KVM_VM_MIPS_TE 0
+#define KVM_VM_MIPS_VZ 1
+
#define KVM_S390_SIE_PAGE_OFFSET 1
/*
@@ -883,6 +887,14 @@ struct kvm_ppc_resize_hpt {
#define KVM_CAP_PPC_MMU_RADIX 134
#define KVM_CAP_PPC_MMU_HASH_V3 135
#define KVM_CAP_IMMEDIATE_EXIT 136
+#define KVM_CAP_MIPS_VZ 137
+#define KVM_CAP_MIPS_TE 138
+#define KVM_CAP_MIPS_64BIT 139
+#define KVM_CAP_S390_GS 140
+#define KVM_CAP_S390_AIS 141
+#define KVM_CAP_SPAPR_TCE_VFIO 142
+#define KVM_CAP_X86_GUEST_MWAIT 143
+#define KVM_CAP_ARM_USER_IRQ 144
#ifdef KVM_CAP_IRQ_ROUTING
@@ -1087,6 +1099,7 @@ struct kvm_device_attr {
#define KVM_DEV_VFIO_GROUP 1
#define KVM_DEV_VFIO_GROUP_ADD 1
#define KVM_DEV_VFIO_GROUP_DEL 2
+#define KVM_DEV_VFIO_GROUP_SET_SPAPR_TCE 3
enum kvm_device_type {
KVM_DEV_TYPE_FSL_MPIC_20 = 1,
@@ -1108,6 +1121,11 @@ enum kvm_device_type {
KVM_DEV_TYPE_MAX,
};
+struct kvm_vfio_spapr_tce {
+ __s32 groupfd;
+ __s32 tablefd;
+};
+
/*
* ioctls for VM fds
*/
@@ -1354,4 +1372,11 @@ struct kvm_assigned_msix_entry {
#define KVM_X2APIC_API_USE_32BIT_IDS (1ULL << 0)
#define KVM_X2APIC_API_DISABLE_BROADCAST_QUIRK (1ULL << 1)
+/* Available with KVM_CAP_ARM_USER_IRQ */
+
+/* Bits for run->s.regs.device_irq_level */
+#define KVM_ARM_DEV_EL1_VTIMER (1 << 0)
+#define KVM_ARM_DEV_EL1_PTIMER (1 << 1)
+#define KVM_ARM_DEV_PMU (1 << 2)
+
#endif /* __LINUX_KVM_H */
diff --git a/include/uapi/linux/lightnvm.h b/include/uapi/linux/lightnvm.h
index fd19f36b3129..c8aec4b9e73b 100644
--- a/include/uapi/linux/lightnvm.h
+++ b/include/uapi/linux/lightnvm.h
@@ -85,6 +85,10 @@ struct nvm_ioctl_create_conf {
};
};
+enum {
+ NVM_TARGET_FACTORY = 1 << 0, /* Init target in factory mode */
+};
+
struct nvm_ioctl_create {
char dev[DISK_NAME_LEN]; /* open-channel SSD device */
char tgttype[NVM_TTYPE_NAME_MAX]; /* target type name */
diff --git a/include/uapi/linux/media-bus-format.h b/include/uapi/linux/media-bus-format.h
index 2168759c1287..ef6fb307d2ce 100644
--- a/include/uapi/linux/media-bus-format.h
+++ b/include/uapi/linux/media-bus-format.h
@@ -33,7 +33,7 @@
#define MEDIA_BUS_FMT_FIXED 0x0001
-/* RGB - next is 0x1018 */
+/* RGB - next is 0x101b */
#define MEDIA_BUS_FMT_RGB444_1X12 0x1016
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_BE 0x1001
#define MEDIA_BUS_FMT_RGB444_2X8_PADHI_LE 0x1002
@@ -57,8 +57,11 @@
#define MEDIA_BUS_FMT_RGB888_1X7X4_JEIDA 0x1012
#define MEDIA_BUS_FMT_ARGB8888_1X32 0x100d
#define MEDIA_BUS_FMT_RGB888_1X32_PADHI 0x100f
+#define MEDIA_BUS_FMT_RGB101010_1X30 0x1018
+#define MEDIA_BUS_FMT_RGB121212_1X36 0x1019
+#define MEDIA_BUS_FMT_RGB161616_1X48 0x101a
-/* YUV (including grey) - next is 0x2026 */
+/* YUV (including grey) - next is 0x202c */
#define MEDIA_BUS_FMT_Y8_1X8 0x2001
#define MEDIA_BUS_FMT_UV8_1X8 0x2015
#define MEDIA_BUS_FMT_UYVY8_1_5X8 0x2002
@@ -90,12 +93,18 @@
#define MEDIA_BUS_FMT_YVYU10_1X20 0x200e
#define MEDIA_BUS_FMT_VUY8_1X24 0x2024
#define MEDIA_BUS_FMT_YUV8_1X24 0x2025
+#define MEDIA_BUS_FMT_UYYVYY8_0_5X24 0x2026
#define MEDIA_BUS_FMT_UYVY12_1X24 0x2020
#define MEDIA_BUS_FMT_VYUY12_1X24 0x2021
#define MEDIA_BUS_FMT_YUYV12_1X24 0x2022
#define MEDIA_BUS_FMT_YVYU12_1X24 0x2023
#define MEDIA_BUS_FMT_YUV10_1X30 0x2016
+#define MEDIA_BUS_FMT_UYYVYY10_0_5X30 0x2027
#define MEDIA_BUS_FMT_AYUV8_1X32 0x2017
+#define MEDIA_BUS_FMT_UYYVYY12_0_5X36 0x2028
+#define MEDIA_BUS_FMT_YUV12_1X36 0x2029
+#define MEDIA_BUS_FMT_YUV16_1X48 0x202a
+#define MEDIA_BUS_FMT_UYYVYY16_0_5X48 0x202b
/* Bayer - next is 0x3021 */
#define MEDIA_BUS_FMT_SBGGR8_1X8 0x3001
diff --git a/include/uapi/linux/mpls_iptunnel.h b/include/uapi/linux/mpls_iptunnel.h
index d80a0498f77e..f5e45095b0bb 100644
--- a/include/uapi/linux/mpls_iptunnel.h
+++ b/include/uapi/linux/mpls_iptunnel.h
@@ -16,11 +16,13 @@
/* MPLS tunnel attributes
* [RTA_ENCAP] = {
* [MPLS_IPTUNNEL_DST]
+ * [MPLS_IPTUNNEL_TTL]
* }
*/
enum {
MPLS_IPTUNNEL_UNSPEC,
MPLS_IPTUNNEL_DST,
+ MPLS_IPTUNNEL_TTL,
__MPLS_IPTUNNEL_MAX,
};
#define MPLS_IPTUNNEL_MAX (__MPLS_IPTUNNEL_MAX - 1)
diff --git a/include/uapi/linux/nbd-netlink.h b/include/uapi/linux/nbd-netlink.h
new file mode 100644
index 000000000000..6f7ca3d63a65
--- /dev/null
+++ b/include/uapi/linux/nbd-netlink.h
@@ -0,0 +1,98 @@
+/*
+ * Copyright (C) 2017 Facebook. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or
+ * modify it under the terms of the GNU General Public
+ * License v2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ * General Public License for more details.
+ *
+ * You should have received a copy of the GNU General Public
+ * License along with this program; if not, write to the
+ * Free Software Foundation, Inc., 59 Temple Place - Suite 330,
+ * Boston, MA 021110-1307, USA.
+ */
+#ifndef _UAPILINUX_NBD_NETLINK_H
+#define _UAPILINUX_NBD_NETLINK_H
+
+#define NBD_GENL_FAMILY_NAME "nbd"
+#define NBD_GENL_VERSION 0x1
+#define NBD_GENL_MCAST_GROUP_NAME "nbd_mc_group"
+
+/* Configuration policy attributes, used for CONNECT */
+enum {
+ NBD_ATTR_UNSPEC,
+ NBD_ATTR_INDEX,
+ NBD_ATTR_SIZE_BYTES,
+ NBD_ATTR_BLOCK_SIZE_BYTES,
+ NBD_ATTR_TIMEOUT,
+ NBD_ATTR_SERVER_FLAGS,
+ NBD_ATTR_CLIENT_FLAGS,
+ NBD_ATTR_SOCKETS,
+ NBD_ATTR_DEAD_CONN_TIMEOUT,
+ NBD_ATTR_DEVICE_LIST,
+ __NBD_ATTR_MAX,
+};
+#define NBD_ATTR_MAX (__NBD_ATTR_MAX - 1)
+
+/*
+ * This is the format for multiple devices with NBD_ATTR_DEVICE_LIST
+ *
+ * [NBD_ATTR_DEVICE_LIST]
+ * [NBD_DEVICE_ITEM]
+ * [NBD_DEVICE_INDEX]
+ * [NBD_DEVICE_CONNECTED]
+ */
+enum {
+ NBD_DEVICE_ITEM_UNSPEC,
+ NBD_DEVICE_ITEM,
+ __NBD_DEVICE_ITEM_MAX,
+};
+#define NBD_DEVICE_ITEM_MAX (__NBD_DEVICE_ITEM_MAX - 1)
+
+enum {
+ NBD_DEVICE_UNSPEC,
+ NBD_DEVICE_INDEX,
+ NBD_DEVICE_CONNECTED,
+ __NBD_DEVICE_MAX,
+};
+#define NBD_DEVICE_ATTR_MAX (__NBD_DEVICE_MAX - 1)
+
+/*
+ * This is the format for multiple sockets with NBD_ATTR_SOCKETS
+ *
+ * [NBD_ATTR_SOCKETS]
+ * [NBD_SOCK_ITEM]
+ * [NBD_SOCK_FD]
+ * [NBD_SOCK_ITEM]
+ * [NBD_SOCK_FD]
+ */
+enum {
+ NBD_SOCK_ITEM_UNSPEC,
+ NBD_SOCK_ITEM,
+ __NBD_SOCK_ITEM_MAX,
+};
+#define NBD_SOCK_ITEM_MAX (__NBD_SOCK_ITEM_MAX - 1)
+
+enum {
+ NBD_SOCK_UNSPEC,
+ NBD_SOCK_FD,
+ __NBD_SOCK_MAX,
+};
+#define NBD_SOCK_MAX (__NBD_SOCK_MAX - 1)
+
+enum {
+ NBD_CMD_UNSPEC,
+ NBD_CMD_CONNECT,
+ NBD_CMD_DISCONNECT,
+ NBD_CMD_RECONFIGURE,
+ NBD_CMD_LINK_DEAD,
+ NBD_CMD_STATUS,
+ __NBD_CMD_MAX,
+};
+#define NBD_CMD_MAX (__NBD_CMD_MAX - 1)
+
+#endif /* _UAPILINUX_NBD_NETLINK_H */
diff --git a/include/uapi/linux/nbd.h b/include/uapi/linux/nbd.h
index c91c642ea900..155e33f81913 100644
--- a/include/uapi/linux/nbd.h
+++ b/include/uapi/linux/nbd.h
@@ -37,7 +37,7 @@ enum {
NBD_CMD_TRIM = 4
};
-/* values for flags field */
+/* values for flags field, these are server interaction specific. */
#define NBD_FLAG_HAS_FLAGS (1 << 0) /* nbd-server supports flags */
#define NBD_FLAG_READ_ONLY (1 << 1) /* device is read-only */
#define NBD_FLAG_SEND_FLUSH (1 << 2) /* can flush writeback cache */
@@ -45,6 +45,10 @@ enum {
#define NBD_FLAG_SEND_TRIM (1 << 5) /* send trim/discard */
#define NBD_FLAG_CAN_MULTI_CONN (1 << 8) /* Server supports multiple connections per export. */
+/* These are client behavior specific flags. */
+#define NBD_CFLAG_DESTROY_ON_DISCONNECT (1 << 0) /* delete the nbd device on
+ disconnect. */
+
/* userspace doesn't need the nbd_device structure */
/* These are sent over the network in the request/reply magic fields */
diff --git a/include/uapi/linux/ndctl.h b/include/uapi/linux/ndctl.h
index ede5c6a62164..7ad3863cb88b 100644
--- a/include/uapi/linux/ndctl.h
+++ b/include/uapi/linux/ndctl.h
@@ -169,6 +169,7 @@ enum {
enum {
ND_ARS_VOLATILE = 1,
ND_ARS_PERSISTENT = 2,
+ ND_CONFIG_LOCKED = 1,
};
static inline const char *nvdimm_bus_cmd_name(unsigned cmd)
diff --git a/include/uapi/linux/netfilter/nf_conntrack_common.h b/include/uapi/linux/netfilter/nf_conntrack_common.h
index 6a8e33dd4ecb..dc947e59d03a 100644
--- a/include/uapi/linux/netfilter/nf_conntrack_common.h
+++ b/include/uapi/linux/netfilter/nf_conntrack_common.h
@@ -28,12 +28,14 @@ enum ip_conntrack_info {
/* only for userspace compatibility */
#ifndef __KERNEL__
IP_CT_NEW_REPLY = IP_CT_NUMBER,
+#else
+ IP_CT_UNTRACKED = 7,
#endif
};
#define NF_CT_STATE_INVALID_BIT (1 << 0)
#define NF_CT_STATE_BIT(ctinfo) (1 << ((ctinfo) % IP_CT_IS_REPLY + 1))
-#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_NUMBER + 1))
+#define NF_CT_STATE_UNTRACKED_BIT (1 << (IP_CT_UNTRACKED + 1))
/* Bitset representing status of connection. */
enum ip_conntrack_status {
@@ -82,10 +84,6 @@ enum ip_conntrack_status {
IPS_DYING_BIT = 9,
IPS_DYING = (1 << IPS_DYING_BIT),
- /* Bits that cannot be altered from userland. */
- IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
- IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING),
-
/* Connection has fixed timeout. */
IPS_FIXED_TIMEOUT_BIT = 10,
IPS_FIXED_TIMEOUT = (1 << IPS_FIXED_TIMEOUT_BIT),
@@ -94,13 +92,22 @@ enum ip_conntrack_status {
IPS_TEMPLATE_BIT = 11,
IPS_TEMPLATE = (1 << IPS_TEMPLATE_BIT),
- /* Conntrack is a fake untracked entry */
+ /* Conntrack is a fake untracked entry. Obsolete and not used anymore */
IPS_UNTRACKED_BIT = 12,
IPS_UNTRACKED = (1 << IPS_UNTRACKED_BIT),
/* Conntrack got a helper explicitly attached via CT target. */
IPS_HELPER_BIT = 13,
IPS_HELPER = (1 << IPS_HELPER_BIT),
+
+ /* Be careful here, modifying these bits can make things messy,
+ * so don't let users modify them directly.
+ */
+ IPS_UNCHANGEABLE_MASK = (IPS_NAT_DONE_MASK | IPS_NAT_MASK |
+ IPS_EXPECTED | IPS_CONFIRMED | IPS_DYING |
+ IPS_SEQ_ADJUST | IPS_TEMPLATE),
+
+ __IPS_MAX_BIT = 14,
};
/* Connection tracking event types */
@@ -117,6 +124,9 @@ enum ip_conntrack_events {
IPCT_NATSEQADJ = IPCT_SEQADJ,
IPCT_SECMARK, /* new security mark has been set */
IPCT_LABEL, /* new connlabel has been set */
+#ifdef __KERNEL__
+ __IPCT_MAX
+#endif
};
enum ip_conntrack_expect_events {
diff --git a/include/uapi/linux/netfilter/nf_tables.h b/include/uapi/linux/netfilter/nf_tables.h
index 05215d30fe5c..683f6f88fcac 100644
--- a/include/uapi/linux/netfilter/nf_tables.h
+++ b/include/uapi/linux/netfilter/nf_tables.h
@@ -816,6 +816,17 @@ enum nft_rt_keys {
};
/**
+ * enum nft_hash_types - nf_tables hash expression types
+ *
+ * @NFT_HASH_JENKINS: Jenkins Hash
+ * @NFT_HASH_SYM: Symmetric Hash
+ */
+enum nft_hash_types {
+ NFT_HASH_JENKINS,
+ NFT_HASH_SYM,
+};
+
+/**
* enum nft_hash_attributes - nf_tables hash expression netlink attributes
*
* @NFTA_HASH_SREG: source register (NLA_U32)
@@ -824,6 +835,7 @@ enum nft_rt_keys {
* @NFTA_HASH_MODULUS: modulus value (NLA_U32)
* @NFTA_HASH_SEED: seed value (NLA_U32)
* @NFTA_HASH_OFFSET: add this offset value to hash result (NLA_U32)
+ * @NFTA_HASH_TYPE: hash operation (NLA_U32: nft_hash_types)
*/
enum nft_hash_attributes {
NFTA_HASH_UNSPEC,
@@ -833,6 +845,7 @@ enum nft_hash_attributes {
NFTA_HASH_MODULUS,
NFTA_HASH_SEED,
NFTA_HASH_OFFSET,
+ NFTA_HASH_TYPE,
__NFTA_HASH_MAX,
};
#define NFTA_HASH_MAX (__NFTA_HASH_MAX - 1)
@@ -888,6 +901,7 @@ enum nft_rt_attributes {
* @NFT_CT_BYTES: conntrack bytes
* @NFT_CT_AVGPKT: conntrack average bytes per packet
* @NFT_CT_ZONE: conntrack zone
+ * @NFT_CT_EVENTMASK: ctnetlink events to be generated for this conntrack
*/
enum nft_ct_keys {
NFT_CT_STATE,
@@ -908,6 +922,7 @@ enum nft_ct_keys {
NFT_CT_BYTES,
NFT_CT_AVGPKT,
NFT_CT_ZONE,
+ NFT_CT_EVENTMASK,
};
/**
@@ -1244,12 +1259,23 @@ enum nft_fib_flags {
NFTA_FIB_F_MARK = 1 << 2, /* use skb->mark */
NFTA_FIB_F_IIF = 1 << 3, /* restrict to iif */
NFTA_FIB_F_OIF = 1 << 4, /* restrict to oif */
+ NFTA_FIB_F_PRESENT = 1 << 5, /* check existence only */
+};
+
+enum nft_ct_helper_attributes {
+ NFTA_CT_HELPER_UNSPEC,
+ NFTA_CT_HELPER_NAME,
+ NFTA_CT_HELPER_L3PROTO,
+ NFTA_CT_HELPER_L4PROTO,
+ __NFTA_CT_HELPER_MAX,
};
+#define NFTA_CT_HELPER_MAX (__NFTA_CT_HELPER_MAX - 1)
#define NFT_OBJECT_UNSPEC 0
#define NFT_OBJECT_COUNTER 1
#define NFT_OBJECT_QUOTA 2
-#define __NFT_OBJECT_MAX 3
+#define NFT_OBJECT_CT_HELPER 3
+#define __NFT_OBJECT_MAX 4
#define NFT_OBJECT_MAX (__NFT_OBJECT_MAX - 1)
/**
diff --git a/include/uapi/linux/netlink.h b/include/uapi/linux/netlink.h
index f3946a27bd07..f86127a46cfc 100644
--- a/include/uapi/linux/netlink.h
+++ b/include/uapi/linux/netlink.h
@@ -50,12 +50,12 @@ struct nlmsghdr {
/* Flags values */
-#define NLM_F_REQUEST 1 /* It is request message. */
-#define NLM_F_MULTI 2 /* Multipart message, terminated by NLMSG_DONE */
-#define NLM_F_ACK 4 /* Reply with ack, with zero or error code */
-#define NLM_F_ECHO 8 /* Echo this request */
-#define NLM_F_DUMP_INTR 16 /* Dump was inconsistent due to sequence change */
-#define NLM_F_DUMP_FILTERED 32 /* Dump was filtered as requested */
+#define NLM_F_REQUEST 0x01 /* It is request message. */
+#define NLM_F_MULTI 0x02 /* Multipart message, terminated by NLMSG_DONE */
+#define NLM_F_ACK 0x04 /* Reply with ack, with zero or error code */
+#define NLM_F_ECHO 0x08 /* Echo this request */
+#define NLM_F_DUMP_INTR 0x10 /* Dump was inconsistent due to sequence change */
+#define NLM_F_DUMP_FILTERED 0x20 /* Dump was filtered as requested */
/* Modifiers to GET request */
#define NLM_F_ROOT 0x100 /* specify tree root */
@@ -69,6 +69,10 @@ struct nlmsghdr {
#define NLM_F_CREATE 0x400 /* Create, if it does not exist */
#define NLM_F_APPEND 0x800 /* Add to end of list */
+/* Flags for ACK message */
+#define NLM_F_CAPPED 0x100 /* request was capped */
+#define NLM_F_ACK_TLVS 0x200 /* extended ACK TVLs were included */
+
/*
4.4BSD ADD NLM_F_CREATE|NLM_F_EXCL
4.4BSD CHANGE NLM_F_REPLACE
@@ -101,6 +105,37 @@ struct nlmsghdr {
struct nlmsgerr {
int error;
struct nlmsghdr msg;
+ /*
+ * followed by the message contents unless NETLINK_CAP_ACK was set
+ * or the ACK indicates success (error == 0)
+ * message length is aligned with NLMSG_ALIGN()
+ */
+ /*
+ * followed by TLVs defined in enum nlmsgerr_attrs
+ * if NETLINK_EXT_ACK was set
+ */
+};
+
+/**
+ * enum nlmsgerr_attrs - nlmsgerr attributes
+ * @NLMSGERR_ATTR_UNUSED: unused
+ * @NLMSGERR_ATTR_MSG: error message string (string)
+ * @NLMSGERR_ATTR_OFFS: offset of the invalid attribute in the original
+ * message, counting from the beginning of the header (u32)
+ * @NLMSGERR_ATTR_COOKIE: arbitrary subsystem specific cookie to
+ * be used - in the success case - to identify a created
+ * object or operation or similar (binary)
+ * @__NLMSGERR_ATTR_MAX: number of attributes
+ * @NLMSGERR_ATTR_MAX: highest attribute number
+ */
+enum nlmsgerr_attrs {
+ NLMSGERR_ATTR_UNUSED,
+ NLMSGERR_ATTR_MSG,
+ NLMSGERR_ATTR_OFFS,
+ NLMSGERR_ATTR_COOKIE,
+
+ __NLMSGERR_ATTR_MAX,
+ NLMSGERR_ATTR_MAX = __NLMSGERR_ATTR_MAX - 1
};
#define NETLINK_ADD_MEMBERSHIP 1
@@ -115,6 +150,7 @@ struct nlmsgerr {
#define NETLINK_LISTEN_ALL_NSID 8
#define NETLINK_LIST_MEMBERSHIPS 9
#define NETLINK_CAP_ACK 10
+#define NETLINK_EXT_ACK 11
struct nl_pktinfo {
__u32 group;
diff --git a/include/uapi/linux/netlink_diag.h b/include/uapi/linux/netlink_diag.h
index 76b4d87c83a8..6dcd4de3397b 100644
--- a/include/uapi/linux/netlink_diag.h
+++ b/include/uapi/linux/netlink_diag.h
@@ -38,6 +38,7 @@ enum {
NETLINK_DIAG_GROUPS,
NETLINK_DIAG_RX_RING,
NETLINK_DIAG_TX_RING,
+ NETLINK_DIAG_FLAGS,
__NETLINK_DIAG_MAX,
};
@@ -52,5 +53,14 @@ enum {
/* deprecated since 4.6 */
#define NDIAG_SHOW_RING_CFG 0x00000004 /* show ring configuration */
#endif
+#define NDIAG_SHOW_FLAGS 0x00000008 /* show flags of a netlink socket */
+
+/* flags */
+#define NDIAG_FLAG_CB_RUNNING 0x00000001
+#define NDIAG_FLAG_PKTINFO 0x00000002
+#define NDIAG_FLAG_BROADCAST_ERROR 0x00000004
+#define NDIAG_FLAG_NO_ENOBUFS 0x00000008
+#define NDIAG_FLAG_LISTEN_ALL_NSID 0x00000010
+#define NDIAG_FLAG_CAP_ACK 0x00000020
#endif
diff --git a/include/uapi/linux/nl80211.h b/include/uapi/linux/nl80211.h
index 5ed257c4cd4e..b8c44b98f12d 100644
--- a/include/uapi/linux/nl80211.h
+++ b/include/uapi/linux/nl80211.h
@@ -173,6 +173,42 @@
*/
/**
+ * DOC: FILS shared key authentication offload
+ *
+ * FILS shared key authentication offload can be advertized by drivers by
+ * setting @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD flag. The drivers that support
+ * FILS shared key authentication offload should be able to construct the
+ * authentication and association frames for FILS shared key authentication and
+ * eventually do a key derivation as per IEEE 802.11ai. The below additional
+ * parameters should be given to driver in %NL80211_CMD_CONNECT.
+ * %NL80211_ATTR_FILS_ERP_USERNAME - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_REALM - used to construct keyname_nai
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used to construct erp message
+ * %NL80211_ATTR_FILS_ERP_RRK - used to generate the rIK and rMSK
+ * rIK should be used to generate an authentication tag on the ERP message and
+ * rMSK should be used to derive a PMKSA.
+ * rIK, rMSK should be generated and keyname_nai, sequence number should be used
+ * as specified in IETF RFC 6696.
+ *
+ * When FILS shared key authentication is completed, driver needs to provide the
+ * below additional parameters to userspace.
+ * %NL80211_ATTR_FILS_KEK - used for key renewal
+ * %NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM - used in further EAP-RP exchanges
+ * %NL80211_ATTR_PMKID - used to identify the PMKSA used/generated
+ * %Nl80211_ATTR_PMK - used to update PMKSA cache in userspace
+ * The PMKSA can be maintained in userspace persistently so that it can be used
+ * later after reboots or wifi turn off/on also.
+ *
+ * %NL80211_ATTR_FILS_CACHE_ID is the cache identifier advertized by a FILS
+ * capable AP supporting PMK caching. It specifies the scope within which the
+ * PMKSAs are cached in an ESS. %NL80211_CMD_SET_PMKSA and
+ * %NL80211_CMD_DEL_PMKSA are enhanced to allow support for PMKSA caching based
+ * on FILS cache identifier. Additionally %NL80211_ATTR_PMK is used with
+ * %NL80211_SET_PMKSA to specify the PMK corresponding to a PMKSA for driver to
+ * use in a FILS shared key connection with PMKSA caching.
+ */
+
+/**
* enum nl80211_commands - supported nl80211 commands
*
* @NL80211_CMD_UNSPEC: unspecified command to catch errors
@@ -351,7 +387,9 @@
* are used. Extra IEs can also be passed from the userspace by
* using the %NL80211_ATTR_IE attribute. The first cycle of the
* scheduled scan can be delayed by %NL80211_ATTR_SCHED_SCAN_DELAY
- * is supplied.
+ * is supplied. If the device supports multiple concurrent scheduled
+ * scans, it will allow such when the caller provides the flag attribute
+ * %NL80211_ATTR_SCHED_SCAN_MULTI to indicate user-space support for it.
* @NL80211_CMD_STOP_SCHED_SCAN: stop a scheduled scan. Returns -ENOENT if
* scheduled scan is not running. The caller may assume that as soon
* as the call returns, it is safe to start a new scheduled scan again.
@@ -370,10 +408,18 @@
* @NL80211_CMD_NEW_SURVEY_RESULTS: survey data notification (as a reply to
* NL80211_CMD_GET_SURVEY and on the "scan" multicast group)
*
- * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * @NL80211_CMD_SET_PMKSA: Add a PMKSA cache entry using %NL80211_ATTR_MAC
+ * (for the BSSID), %NL80211_ATTR_PMKID, and optionally %NL80211_ATTR_PMK
+ * (PMK is used for PTKSA derivation in case of FILS shared key offload) or
+ * using %NL80211_ATTR_SSID, %NL80211_ATTR_FILS_CACHE_ID,
+ * %NL80211_ATTR_PMKID, and %NL80211_ATTR_PMK in case of FILS
+ * authentication where %NL80211_ATTR_FILS_CACHE_ID is the identifier
+ * advertized by a FILS capable AP identifying the scope of PMKSA in an
+ * ESS.
* @NL80211_CMD_DEL_PMKSA: Delete a PMKSA cache entry, using %NL80211_ATTR_MAC
- * (for the BSSID) and %NL80211_ATTR_PMKID.
+ * (for the BSSID) and %NL80211_ATTR_PMKID or using %NL80211_ATTR_SSID,
+ * %NL80211_ATTR_FILS_CACHE_ID, and %NL80211_ATTR_PMKID in case of FILS
+ * authentication.
* @NL80211_CMD_FLUSH_PMKSA: Flush all PMKSA cache entries.
*
* @NL80211_CMD_REG_CHANGE: indicates to userspace the regulatory domain
@@ -2012,6 +2058,36 @@ enum nl80211_commands {
* u32 attribute with an &enum nl80211_timeout_reason value. This is used,
* e.g., with %NL80211_CMD_CONNECT event.
*
+ * @NL80211_ATTR_FILS_ERP_USERNAME: EAP Re-authentication Protocol (ERP)
+ * username part of NAI used to refer keys rRK and rIK. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_REALM: EAP Re-authentication Protocol (ERP) realm part
+ * of NAI specifying the domain name of the ER server. This is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM: Unsigned 16-bit ERP next sequence number
+ * to use in ERP messages. This is used in generating the FILS wrapped data
+ * for FILS authentication and is used with %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_ERP_RRK: ERP re-authentication Root Key (rRK) for the
+ * NAI specified by %NL80211_ATTR_FILS_ERP_USERNAME and
+ * %NL80211_ATTR_FILS_ERP_REALM. This is used for generating rIK and rMSK
+ * from successful FILS authentication and is used with
+ * %NL80211_CMD_CONNECT.
+ *
+ * @NL80211_ATTR_FILS_CACHE_ID: A 2-octet identifier advertized by a FILS AP
+ * identifying the scope of PMKSAs. This is used with
+ * @NL80211_CMD_SET_PMKSA and @NL80211_CMD_DEL_PMKSA.
+ *
+ * @NL80211_ATTR_PMK: PMK for the PMKSA identified by %NL80211_ATTR_PMKID.
+ * This is used with @NL80211_CMD_SET_PMKSA.
+ *
+ * @NL80211_ATTR_SCHED_SCAN_MULTI: flag attribute which user-space shall use to
+ * indicate that it supports multiple active scheduled scan requests.
+ * @NL80211_ATTR_SCHED_SCAN_MAX_REQS: indicates maximum number of scheduled
+ * scan request that may be active for the device (u32).
+ *
* @NUM_NL80211_ATTR: total number of nl80211_attrs available
* @NL80211_ATTR_MAX: highest attribute number currently defined
* @__NL80211_ATTR_AFTER_LAST: internal use
@@ -2423,6 +2499,17 @@ enum nl80211_attrs {
NL80211_ATTR_TIMEOUT_REASON,
+ NL80211_ATTR_FILS_ERP_USERNAME,
+ NL80211_ATTR_FILS_ERP_REALM,
+ NL80211_ATTR_FILS_ERP_NEXT_SEQ_NUM,
+ NL80211_ATTR_FILS_ERP_RRK,
+ NL80211_ATTR_FILS_CACHE_ID,
+
+ NL80211_ATTR_PMK,
+
+ NL80211_ATTR_SCHED_SCAN_MULTI,
+ NL80211_ATTR_SCHED_SCAN_MAX_REQS,
+
/* add attributes here, update the policy in nl80211.c */
__NL80211_ATTR_AFTER_LAST,
@@ -3107,6 +3194,7 @@ enum nl80211_reg_rule_attr {
* @__NL80211_SCHED_SCAN_MATCH_ATTR_INVALID: attribute number 0 is reserved
* @NL80211_SCHED_SCAN_MATCH_ATTR_SSID: SSID to be used for matching,
* only report BSS with matching SSID.
+ * (This cannot be used together with BSSID.)
* @NL80211_SCHED_SCAN_MATCH_ATTR_RSSI: RSSI threshold (in dBm) for reporting a
* BSS in scan results. Filtering is turned off if not specified. Note that
* if this attribute is in a match set of its own, then it is treated as
@@ -3122,6 +3210,8 @@ enum nl80211_reg_rule_attr {
* BSS-es in the specified band is to be adjusted before doing
* RSSI-based BSS selection. The attribute value is a packed structure
* value as specified by &struct nl80211_bss_select_rssi_adjust.
+ * @NL80211_SCHED_SCAN_MATCH_ATTR_BSSID: BSSID to be used for matching
+ * (this cannot be used together with SSID).
* @NL80211_SCHED_SCAN_MATCH_ATTR_MAX: highest scheduled scan filter
* attribute number currently defined
* @__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST: internal use
@@ -3133,6 +3223,7 @@ enum nl80211_sched_scan_match_attr {
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RELATIVE_RSSI,
NL80211_SCHED_SCAN_MATCH_ATTR_RSSI_ADJUST,
+ NL80211_SCHED_SCAN_MATCH_ATTR_BSSID,
/* keep last */
__NL80211_SCHED_SCAN_MATCH_ATTR_AFTER_LAST,
@@ -3942,7 +4033,10 @@ enum nl80211_ps_state {
* @__NL80211_ATTR_CQM_INVALID: invalid
* @NL80211_ATTR_CQM_RSSI_THOLD: RSSI threshold in dBm. This value specifies
* the threshold for the RSSI level at which an event will be sent. Zero
- * to disable.
+ * to disable. Alternatively, if %NL80211_EXT_FEATURE_CQM_RSSI_LIST is
+ * set, multiple values can be supplied as a low-to-high sorted array of
+ * threshold values in dBm. Events will be sent when the RSSI value
+ * crosses any of the thresholds.
* @NL80211_ATTR_CQM_RSSI_HYST: RSSI hysteresis in dBm. This value specifies
* the minimum amount the RSSI level must change after an event before a
* new event may be issued (to reduce effects of RSSI oscillation).
@@ -4753,6 +4847,11 @@ enum nl80211_feature_flags {
* @NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI: The driver supports sched_scan
* for reporting BSSs with better RSSI than the current connected BSS
* (%NL80211_ATTR_SCHED_SCAN_RELATIVE_RSSI).
+ * @NL80211_EXT_FEATURE_CQM_RSSI_LIST: With this driver the
+ * %NL80211_ATTR_CQM_RSSI_THOLD attribute accepts a list of zero or more
+ * RSSI threshold values to monitor rather than exactly one threshold.
+ * @NL80211_EXT_FEATURE_FILS_SK_OFFLOAD: Driver SME supports FILS shared key
+ * authentication with %NL80211_CMD_CONNECT.
*
* @NUM_NL80211_EXT_FEATURES: number of extended features.
* @MAX_NL80211_EXT_FEATURES: highest extended feature index.
@@ -4771,6 +4870,8 @@ enum nl80211_ext_feature_index {
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA,
NL80211_EXT_FEATURE_MGMT_TX_RANDOM_TA_CONNECTED,
NL80211_EXT_FEATURE_SCHED_SCAN_RELATIVE_RSSI,
+ NL80211_EXT_FEATURE_CQM_RSSI_LIST,
+ NL80211_EXT_FEATURE_FILS_SK_OFFLOAD,
/* add new features before the definition below */
NUM_NL80211_EXT_FEATURES,
@@ -4906,12 +5007,17 @@ enum nl80211_smps_mode {
* change to the channel status.
* @NL80211_RADAR_NOP_FINISHED: The Non-Occupancy Period for this channel is
* over, channel becomes usable.
+ * @NL80211_RADAR_PRE_CAC_EXPIRED: Channel Availability Check done on this
+ * non-operating channel is expired and no longer valid. New CAC must
+ * be done on this channel before starting the operation. This is not
+ * applicable for ETSI dfs domain where pre-CAC is valid for ever.
*/
enum nl80211_radar_event {
NL80211_RADAR_DETECTED,
NL80211_RADAR_CAC_FINISHED,
NL80211_RADAR_CAC_ABORTED,
NL80211_RADAR_NOP_FINISHED,
+ NL80211_RADAR_PRE_CAC_EXPIRED,
};
/**
diff --git a/include/uapi/linux/nubus.h b/include/uapi/linux/nubus.h
index 77513d2b5638..ac516064f0ee 100644
--- a/include/uapi/linux/nubus.h
+++ b/include/uapi/linux/nubus.h
@@ -113,13 +113,15 @@ enum nubus_drhw {
NUBUS_DRHW_SIGMA_CLRMAX = 0x0007, /* Sigma Design ColorMax */
NUBUS_DRHW_APPLE_SE30 = 0x0009, /* Apple SE/30 video */
NUBUS_DRHW_APPLE_HRVC = 0x0013, /* Mac II High-Res Video Card */
+ NUBUS_DRHW_APPLE_MVC = 0x0014, /* Mac II Monochrome Video Card */
NUBUS_DRHW_APPLE_PVC = 0x0017, /* Mac II Portrait Video Card */
NUBUS_DRHW_APPLE_RBV1 = 0x0018, /* IIci RBV video */
NUBUS_DRHW_APPLE_MDC = 0x0019, /* Macintosh Display Card */
+ NUBUS_DRHW_APPLE_VSC = 0x0020, /* Duo MiniDock ViSC framebuffer */
NUBUS_DRHW_APPLE_SONORA = 0x0022, /* Sonora built-in video */
+ NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */
NUBUS_DRHW_APPLE_24AC = 0x002b, /* Mac 24AC Video Card */
NUBUS_DRHW_APPLE_VALKYRIE = 0x002e,
- NUBUS_DRHW_APPLE_JET = 0x0029, /* Jet framebuffer (DuoDock) */
NUBUS_DRHW_SMAC_GFX = 0x0105, /* SuperMac GFX */
NUBUS_DRHW_RASTER_CB264 = 0x013B, /* RasterOps ColorBoard 264 */
NUBUS_DRHW_MICRON_XCEED = 0x0146, /* Micron Exceed color */
diff --git a/include/uapi/linux/openvswitch.h b/include/uapi/linux/openvswitch.h
index 7f41f7d0000f..61b7d36dfe34 100644
--- a/include/uapi/linux/openvswitch.h
+++ b/include/uapi/linux/openvswitch.h
@@ -578,10 +578,25 @@ enum ovs_sample_attr {
OVS_SAMPLE_ATTR_PROBABILITY, /* u32 number */
OVS_SAMPLE_ATTR_ACTIONS, /* Nested OVS_ACTION_ATTR_* attributes. */
__OVS_SAMPLE_ATTR_MAX,
+
+#ifdef __KERNEL__
+ OVS_SAMPLE_ATTR_ARG /* struct sample_arg */
+#endif
};
#define OVS_SAMPLE_ATTR_MAX (__OVS_SAMPLE_ATTR_MAX - 1)
+#ifdef __KERNEL__
+struct sample_arg {
+ bool exec; /* When true, actions in sample will not
+ * change flow keys. False otherwise.
+ */
+ u32 probability; /* Same value as
+ * 'OVS_SAMPLE_ATTR_PROBABILITY'.
+ */
+};
+#endif
+
/**
* enum ovs_userspace_attr - Attributes for %OVS_ACTION_ATTR_USERSPACE action.
* @OVS_USERSPACE_ATTR_PID: u32 Netlink PID to which the %OVS_PACKET_CMD_ACTION
@@ -678,6 +693,17 @@ struct ovs_action_hash {
* nothing if the connection is already committed will check that the current
* packet is in conntrack entry's original direction. If directionality does
* not match, will delete the existing conntrack entry and commit a new one.
+ * @OVS_CT_ATTR_EVENTMASK: Mask of bits indicating which conntrack event types
+ * (enum ip_conntrack_events IPCT_*) should be reported. For any bit set to
+ * zero, the corresponding event type is not generated. Default behavior
+ * depends on system configuration, but typically all event types are
+ * generated, hence listening on NFNLGRP_CONNTRACK_UPDATE events may get a lot
+ * of events. Explicitly passing this attribute allows limiting the updates
+ * received to the events of interest. The bit 1 << IPCT_NEW, 1 <<
+ * IPCT_RELATED, and 1 << IPCT_DESTROY must be set to ones for those events to
+ * be received on NFNLGRP_CONNTRACK_NEW and NFNLGRP_CONNTRACK_DESTROY groups,
+ * respectively. Remaining bits control the changes for which an event is
+ * delivered on the NFNLGRP_CONNTRACK_UPDATE group.
*/
enum ovs_ct_attr {
OVS_CT_ATTR_UNSPEC,
@@ -689,6 +715,7 @@ enum ovs_ct_attr {
related connections. */
OVS_CT_ATTR_NAT, /* Nested OVS_NAT_ATTR_* */
OVS_CT_ATTR_FORCE_COMMIT, /* No argument */
+ OVS_CT_ATTR_EVENTMASK, /* u32 mask of IPCT_* events. */
__OVS_CT_ATTR_MAX
};
diff --git a/include/uapi/linux/pci_regs.h b/include/uapi/linux/pci_regs.h
index 634c9c44ed6c..d56bb0051009 100644
--- a/include/uapi/linux/pci_regs.h
+++ b/include/uapi/linux/pci_regs.h
@@ -114,7 +114,7 @@
#define PCI_SUBSYSTEM_ID 0x2e
#define PCI_ROM_ADDRESS 0x30 /* Bits 31..11 are address, 10..1 reserved */
#define PCI_ROM_ADDRESS_ENABLE 0x01
-#define PCI_ROM_ADDRESS_MASK (~0x7ffUL)
+#define PCI_ROM_ADDRESS_MASK (~0x7ffU)
#define PCI_CAPABILITY_LIST 0x34 /* Offset of first capability list entry */
@@ -630,6 +630,7 @@
#define PCI_EXP_DEVCTL2_COMP_TIMEOUT 0x000f /* Completion Timeout Value */
#define PCI_EXP_DEVCTL2_ARI 0x0020 /* Alternative Routing-ID */
#define PCI_EXP_DEVCTL2_ATOMIC_REQ 0x0040 /* Set Atomic requests */
+#define PCI_EXP_DEVCTL2_ATOMIC_EGRESS_BLOCK 0x0080 /* Block atomic egress */
#define PCI_EXP_DEVCTL2_IDO_REQ_EN 0x0100 /* Allow IDO for requests */
#define PCI_EXP_DEVCTL2_IDO_CMP_EN 0x0200 /* Allow IDO for completions */
#define PCI_EXP_DEVCTL2_LTR_EN 0x0400 /* Enable LTR mechanism */
diff --git a/include/uapi/linux/pcitest.h b/include/uapi/linux/pcitest.h
new file mode 100644
index 000000000000..a6aa10c45ad1
--- /dev/null
+++ b/include/uapi/linux/pcitest.h
@@ -0,0 +1,19 @@
+/**
+ * pcitest.h - PCI test uapi defines
+ *
+ * Copyright (C) 2017 Texas Instruments
+ * Author: Kishon Vijay Abraham I <kishon@ti.com>
+ *
+ */
+
+#ifndef __UAPI_LINUX_PCITEST_H
+#define __UAPI_LINUX_PCITEST_H
+
+#define PCITEST_BAR _IO('P', 0x1)
+#define PCITEST_LEGACY_IRQ _IO('P', 0x2)
+#define PCITEST_MSI _IOW('P', 0x3, int)
+#define PCITEST_WRITE _IOW('P', 0x4, unsigned long)
+#define PCITEST_READ _IOW('P', 0x5, unsigned long)
+#define PCITEST_COPY _IOW('P', 0x6, unsigned long)
+
+#endif /* __UAPI_LINUX_PCITEST_H */
diff --git a/include/uapi/linux/perf_event.h b/include/uapi/linux/perf_event.h
index c66a485a24ac..b1c0b187acfe 100644
--- a/include/uapi/linux/perf_event.h
+++ b/include/uapi/linux/perf_event.h
@@ -344,7 +344,8 @@ struct perf_event_attr {
use_clockid : 1, /* use @clockid for time fields */
context_switch : 1, /* context switch data */
write_backward : 1, /* Write ring buffer from end to beginning */
- __reserved_1 : 36;
+ namespaces : 1, /* include namespaces data */
+ __reserved_1 : 35;
union {
__u32 wakeup_events; /* wakeup every n events */
@@ -610,6 +611,23 @@ struct perf_event_header {
__u16 size;
};
+struct perf_ns_link_info {
+ __u64 dev;
+ __u64 ino;
+};
+
+enum {
+ NET_NS_INDEX = 0,
+ UTS_NS_INDEX = 1,
+ IPC_NS_INDEX = 2,
+ PID_NS_INDEX = 3,
+ USER_NS_INDEX = 4,
+ MNT_NS_INDEX = 5,
+ CGROUP_NS_INDEX = 6,
+
+ NR_NAMESPACES, /* number of available namespaces */
+};
+
enum perf_event_type {
/*
@@ -862,6 +880,18 @@ enum perf_event_type {
*/
PERF_RECORD_SWITCH_CPU_WIDE = 15,
+ /*
+ * struct {
+ * struct perf_event_header header;
+ * u32 pid;
+ * u32 tid;
+ * u64 nr_namespaces;
+ * { u64 dev, inode; } [nr_namespaces];
+ * struct sample_id sample_id;
+ * };
+ */
+ PERF_RECORD_NAMESPACES = 16,
+
PERF_RECORD_MAX, /* non-ABI */
};
@@ -885,12 +915,14 @@ enum perf_callchain_context {
*/
#define PERF_AUX_FLAG_TRUNCATED 0x01 /* record was truncated to fit */
#define PERF_AUX_FLAG_OVERWRITE 0x02 /* snapshot from overwrite mode */
+#define PERF_AUX_FLAG_PARTIAL 0x04 /* record contains gaps */
#define PERF_FLAG_FD_NO_GROUP (1UL << 0)
#define PERF_FLAG_FD_OUTPUT (1UL << 1)
#define PERF_FLAG_PID_CGROUP (1UL << 2) /* pid=cgroup id, per-cpu mode only */
#define PERF_FLAG_FD_CLOEXEC (1UL << 3) /* O_CLOEXEC */
+#if defined(__LITTLE_ENDIAN_BITFIELD)
union perf_mem_data_src {
__u64 val;
struct {
@@ -902,6 +934,21 @@ union perf_mem_data_src {
mem_rsvd:31;
};
};
+#elif defined(__BIG_ENDIAN_BITFIELD)
+union perf_mem_data_src {
+ __u64 val;
+ struct {
+ __u64 mem_rsvd:31,
+ mem_dtlb:7, /* tlb access */
+ mem_lock:2, /* lock instr */
+ mem_snoop:5, /* snoop mode */
+ mem_lvl:14, /* memory hierarchy level */
+ mem_op:5; /* type of opcode */
+ };
+};
+#else
+#error "Unknown endianness"
+#endif
/* type of opcode (load/store/prefetch,code) */
#define PERF_MEM_OP_NA 0x01 /* not available */
diff --git a/include/uapi/linux/pkt_cls.h b/include/uapi/linux/pkt_cls.h
index 7a69f2a4ca0c..d613be3b3239 100644
--- a/include/uapi/linux/pkt_cls.h
+++ b/include/uapi/linux/pkt_cls.h
@@ -37,7 +37,20 @@ enum {
#define TC_ACT_QUEUED 5
#define TC_ACT_REPEAT 6
#define TC_ACT_REDIRECT 7
-#define TC_ACT_JUMP 0x10000000
+
+/* There is a special kind of actions called "extended actions",
+ * which need a value parameter. These have a local opcode located in
+ * the highest nibble, starting from 1. The rest of the bits
+ * are used to carry the value. These two parts together make
+ * a combined opcode.
+ */
+#define __TC_ACT_EXT_SHIFT 28
+#define __TC_ACT_EXT(local) ((local) << __TC_ACT_EXT_SHIFT)
+#define TC_ACT_EXT_VAL_MASK ((1 << __TC_ACT_EXT_SHIFT) - 1)
+#define TC_ACT_EXT_CMP(combined, opcode) \
+ (((combined) & (~TC_ACT_EXT_VAL_MASK)) == opcode)
+
+#define TC_ACT_JUMP __TC_ACT_EXT(1)
/* Action type identifiers*/
enum {
@@ -432,6 +445,11 @@ enum {
TCA_FLOWER_KEY_ARP_THA, /* ETH_ALEN */
TCA_FLOWER_KEY_ARP_THA_MASK, /* ETH_ALEN */
+ TCA_FLOWER_KEY_MPLS_TTL, /* u8 - 8 bits */
+ TCA_FLOWER_KEY_MPLS_BOS, /* u8 - 1 bit */
+ TCA_FLOWER_KEY_MPLS_TC, /* u8 - 3 bits */
+ TCA_FLOWER_KEY_MPLS_LABEL, /* be32 - 20 bits */
+
__TCA_FLOWER_MAX,
};
diff --git a/include/uapi/linux/pkt_sched.h b/include/uapi/linux/pkt_sched.h
index df7451d35131..099bf5528fed 100644
--- a/include/uapi/linux/pkt_sched.h
+++ b/include/uapi/linux/pkt_sched.h
@@ -617,6 +617,14 @@ struct tc_drr_stats {
#define TC_QOPT_BITMASK 15
#define TC_QOPT_MAX_QUEUE 16
+enum {
+ TC_MQPRIO_HW_OFFLOAD_NONE, /* no offload requested */
+ TC_MQPRIO_HW_OFFLOAD_TCS, /* offload TCs, no queue counts */
+ __TC_MQPRIO_HW_OFFLOAD_MAX
+};
+
+#define TC_MQPRIO_HW_OFFLOAD_MAX (__TC_MQPRIO_HW_OFFLOAD_MAX - 1)
+
struct tc_mqprio_qopt {
__u8 num_tc;
__u8 prio_tc_map[TC_QOPT_BITMASK + 1];
diff --git a/include/uapi/linux/pps.h b/include/uapi/linux/pps.h
index a9bb1d93451a..c1cb3825a8bc 100644
--- a/include/uapi/linux/pps.h
+++ b/include/uapi/linux/pps.h
@@ -55,6 +55,12 @@ struct pps_ktime {
__s32 nsec;
__u32 flags;
};
+
+struct pps_ktime_compat {
+ __s64 sec;
+ __s32 nsec;
+ __u32 flags;
+} __attribute__((packed, aligned(4)));
#define PPS_TIME_INVALID (1<<0) /* used to specify timeout==NULL */
struct pps_kinfo {
@@ -65,6 +71,14 @@ struct pps_kinfo {
int current_mode; /* current mode bits */
};
+struct pps_kinfo_compat {
+ __u32 assert_sequence; /* seq. num. of assert event */
+ __u32 clear_sequence; /* seq. num. of clear event */
+ struct pps_ktime_compat assert_tu; /* time of assert event */
+ struct pps_ktime_compat clear_tu; /* time of clear event */
+ int current_mode; /* current mode bits */
+};
+
struct pps_kparams {
int api_version; /* API version # */
int mode; /* mode bits */
@@ -114,6 +128,11 @@ struct pps_fdata {
struct pps_ktime timeout;
};
+struct pps_fdata_compat {
+ struct pps_kinfo_compat info;
+ struct pps_ktime_compat timeout;
+};
+
struct pps_bind_args {
int tsformat; /* format of time stamps */
int edge; /* selected event type */
diff --git a/include/uapi/linux/raid/md_p.h b/include/uapi/linux/raid/md_p.h
index 9930f3e9040f..d500bd224979 100644
--- a/include/uapi/linux/raid/md_p.h
+++ b/include/uapi/linux/raid/md_p.h
@@ -242,10 +242,18 @@ struct mdp_superblock_1 {
__le32 chunksize; /* in 512byte sectors */
__le32 raid_disks;
- __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts
- * NOTE: signed, so bitmap can be before superblock
- * only meaningful of feature_map[0] is set.
- */
+ union {
+ __le32 bitmap_offset; /* sectors after start of superblock that bitmap starts
+ * NOTE: signed, so bitmap can be before superblock
+ * only meaningful of feature_map[0] is set.
+ */
+
+ /* only meaningful when feature_map[MD_FEATURE_PPL] is set */
+ struct {
+ __le16 offset; /* sectors from start of superblock that ppl starts (signed) */
+ __le16 size; /* ppl size in sectors */
+ } ppl;
+ };
/* These are only valid with feature bit '4' */
__le32 new_level; /* new level we are reshaping to */
@@ -318,6 +326,7 @@ struct mdp_superblock_1 {
*/
#define MD_FEATURE_CLUSTERED 256 /* clustered MD */
#define MD_FEATURE_JOURNAL 512 /* support write cache */
+#define MD_FEATURE_PPL 1024 /* support PPL */
#define MD_FEATURE_ALL (MD_FEATURE_BITMAP_OFFSET \
|MD_FEATURE_RECOVERY_OFFSET \
|MD_FEATURE_RESHAPE_ACTIVE \
@@ -328,6 +337,7 @@ struct mdp_superblock_1 {
|MD_FEATURE_RECOVERY_BITMAP \
|MD_FEATURE_CLUSTERED \
|MD_FEATURE_JOURNAL \
+ |MD_FEATURE_PPL \
)
struct r5l_payload_header {
@@ -388,4 +398,31 @@ struct r5l_meta_block {
#define R5LOG_VERSION 0x1
#define R5LOG_MAGIC 0x6433c509
+
+struct ppl_header_entry {
+ __le64 data_sector; /* raid sector of the new data */
+ __le32 pp_size; /* length of partial parity */
+ __le32 data_size; /* length of data */
+ __le32 parity_disk; /* member disk containing parity */
+ __le32 checksum; /* checksum of partial parity data for this
+ * entry (~crc32c) */
+} __attribute__ ((__packed__));
+
+#define PPL_HEADER_SIZE 4096
+#define PPL_HDR_RESERVED 512
+#define PPL_HDR_ENTRY_SPACE \
+ (PPL_HEADER_SIZE - PPL_HDR_RESERVED - 4 * sizeof(__le32) - sizeof(__le64))
+#define PPL_HDR_MAX_ENTRIES \
+ (PPL_HDR_ENTRY_SPACE / sizeof(struct ppl_header_entry))
+
+struct ppl_header {
+ __u8 reserved[PPL_HDR_RESERVED];/* reserved space, fill with 0xff */
+ __le32 signature; /* signature (family number of volume) */
+ __le32 padding; /* zero pad */
+ __le64 generation; /* generation number of the header */
+ __le32 entries_count; /* number of entries in entry array */
+ __le32 checksum; /* checksum of the header (~crc32c) */
+ struct ppl_header_entry entries[PPL_HDR_MAX_ENTRIES];
+} __attribute__ ((__packed__));
+
#endif
diff --git a/include/uapi/linux/rtnetlink.h b/include/uapi/linux/rtnetlink.h
index 6546917d605a..cce061382e40 100644
--- a/include/uapi/linux/rtnetlink.h
+++ b/include/uapi/linux/rtnetlink.h
@@ -122,6 +122,8 @@ enum {
RTM_NEWNETCONF = 80,
#define RTM_NEWNETCONF RTM_NEWNETCONF
+ RTM_DELNETCONF,
+#define RTM_DELNETCONF RTM_DELNETCONF
RTM_GETNETCONF = 82,
#define RTM_GETNETCONF RTM_GETNETCONF
@@ -319,6 +321,7 @@ enum rtattr_type_t {
RTA_EXPIRES,
RTA_PAD,
RTA_UID,
+ RTA_TTL_PROPAGATE,
__RTA_MAX
};
@@ -545,6 +548,7 @@ enum {
TCA_STATS2,
TCA_STAB,
TCA_PAD,
+ TCA_DUMP_INVISIBLE,
__TCA_MAX
};
diff --git a/include/uapi/linux/sctp.h b/include/uapi/linux/sctp.h
index d3ae381fcf33..ced9d8b97426 100644
--- a/include/uapi/linux/sctp.h
+++ b/include/uapi/linux/sctp.h
@@ -115,6 +115,8 @@ typedef __s32 sctp_assoc_t;
#define SCTP_PR_SUPPORTED 113
#define SCTP_DEFAULT_PRINFO 114
#define SCTP_PR_ASSOC_STATUS 115
+#define SCTP_PR_STREAM_STATUS 116
+#define SCTP_RECONFIG_SUPPORTED 117
#define SCTP_ENABLE_STREAM_RESET 118
#define SCTP_RESET_STREAMS 119
#define SCTP_RESET_ASSOC 120
@@ -502,6 +504,28 @@ struct sctp_stream_reset_event {
__u16 strreset_stream_list[];
};
+#define SCTP_ASSOC_RESET_DENIED 0x0004
+#define SCTP_ASSOC_RESET_FAILED 0x0008
+struct sctp_assoc_reset_event {
+ __u16 assocreset_type;
+ __u16 assocreset_flags;
+ __u32 assocreset_length;
+ sctp_assoc_t assocreset_assoc_id;
+ __u32 assocreset_local_tsn;
+ __u32 assocreset_remote_tsn;
+};
+
+#define SCTP_ASSOC_CHANGE_DENIED 0x0004
+#define SCTP_ASSOC_CHANGE_FAILED 0x0008
+struct sctp_stream_change_event {
+ __u16 strchange_type;
+ __u16 strchange_flags;
+ __u32 strchange_length;
+ sctp_assoc_t strchange_assoc_id;
+ __u16 strchange_instrms;
+ __u16 strchange_outstrms;
+};
+
/*
* Described in Section 7.3
* Ancillary Data and Notification Interest Options
@@ -518,6 +542,8 @@ struct sctp_event_subscribe {
__u8 sctp_authentication_event;
__u8 sctp_sender_dry_event;
__u8 sctp_stream_reset_event;
+ __u8 sctp_assoc_reset_event;
+ __u8 sctp_stream_change_event;
};
/*
@@ -543,6 +569,8 @@ union sctp_notification {
struct sctp_authkey_event sn_authkey_event;
struct sctp_sender_dry_event sn_sender_dry_event;
struct sctp_stream_reset_event sn_strreset_event;
+ struct sctp_assoc_reset_event sn_assocreset_event;
+ struct sctp_stream_change_event sn_strchange_event;
};
/* Section 5.3.1
@@ -572,6 +600,10 @@ enum sctp_sn_type {
#define SCTP_SENDER_DRY_EVENT SCTP_SENDER_DRY_EVENT
SCTP_STREAM_RESET_EVENT,
#define SCTP_STREAM_RESET_EVENT SCTP_STREAM_RESET_EVENT
+ SCTP_ASSOC_RESET_EVENT,
+#define SCTP_ASSOC_RESET_EVENT SCTP_ASSOC_RESET_EVENT
+ SCTP_STREAM_CHANGE_EVENT,
+#define SCTP_STREAM_CHANGE_EVENT SCTP_STREAM_CHANGE_EVENT
};
/* Notification error codes used to fill up the error fields in some
diff --git a/include/uapi/linux/serio.h b/include/uapi/linux/serio.h
index ccd0ccd00f47..ac217c6f0151 100644
--- a/include/uapi/linux/serio.h
+++ b/include/uapi/linux/serio.h
@@ -80,5 +80,6 @@
#define SERIO_WACOM_IV 0x3e
#define SERIO_EGALAX 0x3f
#define SERIO_PULSE8_CEC 0x40
+#define SERIO_RAINSHADOW_CEC 0x41
#endif /* _UAPI_SERIO_H */
diff --git a/include/uapi/linux/snmp.h b/include/uapi/linux/snmp.h
index 3b2bed7ca9a4..95cffcb21dfd 100644
--- a/include/uapi/linux/snmp.h
+++ b/include/uapi/linux/snmp.h
@@ -177,7 +177,6 @@ enum
LINUX_MIB_TIMEWAITED, /* TimeWaited */
LINUX_MIB_TIMEWAITRECYCLED, /* TimeWaitRecycled */
LINUX_MIB_TIMEWAITKILLED, /* TimeWaitKilled */
- LINUX_MIB_PAWSPASSIVEREJECTED, /* PAWSPassiveRejected */
LINUX_MIB_PAWSACTIVEREJECTED, /* PAWSActiveRejected */
LINUX_MIB_PAWSESTABREJECTED, /* PAWSEstabRejected */
LINUX_MIB_DELAYEDACKS, /* DelayedACKs */
@@ -260,6 +259,7 @@ enum
LINUX_MIB_TCPFASTOPENPASSIVEFAIL, /* TCPFastOpenPassiveFail */
LINUX_MIB_TCPFASTOPENLISTENOVERFLOW, /* TCPFastOpenListenOverflow */
LINUX_MIB_TCPFASTOPENCOOKIEREQD, /* TCPFastOpenCookieReqd */
+ LINUX_MIB_TCPFASTOPENBLACKHOLE, /* TCPFastOpenBlackholeDetect */
LINUX_MIB_TCPSPURIOUS_RTX_HOSTQUEUES, /* TCPSpuriousRtxHostQueues */
LINUX_MIB_BUSYPOLLRXPACKETS, /* BusyPollRxPackets */
LINUX_MIB_TCPAUTOCORKING, /* TCPAutoCorking */
diff --git a/include/uapi/linux/stat.h b/include/uapi/linux/stat.h
index d538897b8e08..17b10304c393 100644
--- a/include/uapi/linux/stat.h
+++ b/include/uapi/linux/stat.h
@@ -48,17 +48,13 @@
* tv_sec holds the number of seconds before (negative) or after (positive)
* 00:00:00 1st January 1970 UTC.
*
- * tv_nsec holds a number of nanoseconds before (0..-999,999,999 if tv_sec is
- * negative) or after (0..999,999,999 if tv_sec is positive) the tv_sec time.
- *
- * Note that if both tv_sec and tv_nsec are non-zero, then the two values must
- * either be both positive or both negative.
+ * tv_nsec holds a number of nanoseconds (0..999,999,999) after the tv_sec time.
*
* __reserved is held in case we need a yet finer resolution.
*/
struct statx_timestamp {
__s64 tv_sec;
- __s32 tv_nsec;
+ __u32 tv_nsec;
__s32 __reserved;
};
diff --git a/include/uapi/linux/switchtec_ioctl.h b/include/uapi/linux/switchtec_ioctl.h
new file mode 100644
index 000000000000..3e824e1a6495
--- /dev/null
+++ b/include/uapi/linux/switchtec_ioctl.h
@@ -0,0 +1,132 @@
+/*
+ * Microsemi Switchtec PCIe Driver
+ * Copyright (c) 2017, Microsemi Corporation
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms and conditions of the GNU General Public License,
+ * version 2, as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ */
+
+#ifndef _UAPI_LINUX_SWITCHTEC_IOCTL_H
+#define _UAPI_LINUX_SWITCHTEC_IOCTL_H
+
+#include <linux/types.h>
+
+#define SWITCHTEC_IOCTL_PART_CFG0 0
+#define SWITCHTEC_IOCTL_PART_CFG1 1
+#define SWITCHTEC_IOCTL_PART_IMG0 2
+#define SWITCHTEC_IOCTL_PART_IMG1 3
+#define SWITCHTEC_IOCTL_PART_NVLOG 4
+#define SWITCHTEC_IOCTL_PART_VENDOR0 5
+#define SWITCHTEC_IOCTL_PART_VENDOR1 6
+#define SWITCHTEC_IOCTL_PART_VENDOR2 7
+#define SWITCHTEC_IOCTL_PART_VENDOR3 8
+#define SWITCHTEC_IOCTL_PART_VENDOR4 9
+#define SWITCHTEC_IOCTL_PART_VENDOR5 10
+#define SWITCHTEC_IOCTL_PART_VENDOR6 11
+#define SWITCHTEC_IOCTL_PART_VENDOR7 12
+#define SWITCHTEC_IOCTL_NUM_PARTITIONS 13
+
+struct switchtec_ioctl_flash_info {
+ __u64 flash_length;
+ __u32 num_partitions;
+ __u32 padding;
+};
+
+struct switchtec_ioctl_flash_part_info {
+ __u32 flash_partition;
+ __u32 address;
+ __u32 length;
+ __u32 active;
+};
+
+struct switchtec_ioctl_event_summary {
+ __u64 global;
+ __u64 part_bitmap;
+ __u32 local_part;
+ __u32 padding;
+ __u32 part[48];
+ __u32 pff[48];
+};
+
+#define SWITCHTEC_IOCTL_EVENT_STACK_ERROR 0
+#define SWITCHTEC_IOCTL_EVENT_PPU_ERROR 1
+#define SWITCHTEC_IOCTL_EVENT_ISP_ERROR 2
+#define SWITCHTEC_IOCTL_EVENT_SYS_RESET 3
+#define SWITCHTEC_IOCTL_EVENT_FW_EXC 4
+#define SWITCHTEC_IOCTL_EVENT_FW_NMI 5
+#define SWITCHTEC_IOCTL_EVENT_FW_NON_FATAL 6
+#define SWITCHTEC_IOCTL_EVENT_FW_FATAL 7
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP 8
+#define SWITCHTEC_IOCTL_EVENT_TWI_MRPC_COMP_ASYNC 9
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP 10
+#define SWITCHTEC_IOCTL_EVENT_CLI_MRPC_COMP_ASYNC 11
+#define SWITCHTEC_IOCTL_EVENT_GPIO_INT 12
+#define SWITCHTEC_IOCTL_EVENT_PART_RESET 13
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP 14
+#define SWITCHTEC_IOCTL_EVENT_MRPC_COMP_ASYNC 15
+#define SWITCHTEC_IOCTL_EVENT_DYN_PART_BIND_COMP 16
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_P2P 17
+#define SWITCHTEC_IOCTL_EVENT_AER_IN_VEP 18
+#define SWITCHTEC_IOCTL_EVENT_DPC 19
+#define SWITCHTEC_IOCTL_EVENT_CTS 20
+#define SWITCHTEC_IOCTL_EVENT_HOTPLUG 21
+#define SWITCHTEC_IOCTL_EVENT_IER 22
+#define SWITCHTEC_IOCTL_EVENT_THRESH 23
+#define SWITCHTEC_IOCTL_EVENT_POWER_MGMT 24
+#define SWITCHTEC_IOCTL_EVENT_TLP_THROTTLING 25
+#define SWITCHTEC_IOCTL_EVENT_FORCE_SPEED 26
+#define SWITCHTEC_IOCTL_EVENT_CREDIT_TIMEOUT 27
+#define SWITCHTEC_IOCTL_EVENT_LINK_STATE 28
+#define SWITCHTEC_IOCTL_MAX_EVENTS 29
+
+#define SWITCHTEC_IOCTL_EVENT_LOCAL_PART_IDX -1
+#define SWITCHTEC_IOCTL_EVENT_IDX_ALL -2
+
+#define SWITCHTEC_IOCTL_EVENT_FLAG_CLEAR (1 << 0)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_POLL (1 << 1)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_LOG (1 << 2)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_CLI (1 << 3)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_EN_FATAL (1 << 4)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_POLL (1 << 5)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_LOG (1 << 6)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_CLI (1 << 7)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_DIS_FATAL (1 << 8)
+#define SWITCHTEC_IOCTL_EVENT_FLAG_UNUSED (~0x1ff)
+
+struct switchtec_ioctl_event_ctl {
+ __u32 event_id;
+ __s32 index;
+ __u32 flags;
+ __u32 occurred;
+ __u32 count;
+ __u32 data[5];
+};
+
+#define SWITCHTEC_IOCTL_PFF_VEP 100
+struct switchtec_ioctl_pff_port {
+ __u32 pff;
+ __u32 partition;
+ __u32 port;
+};
+
+#define SWITCHTEC_IOCTL_FLASH_INFO \
+ _IOR('W', 0x40, struct switchtec_ioctl_flash_info)
+#define SWITCHTEC_IOCTL_FLASH_PART_INFO \
+ _IOWR('W', 0x41, struct switchtec_ioctl_flash_part_info)
+#define SWITCHTEC_IOCTL_EVENT_SUMMARY \
+ _IOR('W', 0x42, struct switchtec_ioctl_event_summary)
+#define SWITCHTEC_IOCTL_EVENT_CTL \
+ _IOWR('W', 0x43, struct switchtec_ioctl_event_ctl)
+#define SWITCHTEC_IOCTL_PFF_TO_PORT \
+ _IOWR('W', 0x44, struct switchtec_ioctl_pff_port)
+#define SWITCHTEC_IOCTL_PORT_TO_PFF \
+ _IOWR('W', 0x45, struct switchtec_ioctl_pff_port)
+
+#endif
diff --git a/include/uapi/linux/sysctl.h b/include/uapi/linux/sysctl.h
index d2b12152e358..e13d48058b8d 100644
--- a/include/uapi/linux/sysctl.h
+++ b/include/uapi/linux/sysctl.h
@@ -568,6 +568,7 @@ enum {
NET_IPV6_PROXY_NDP=23,
NET_IPV6_ACCEPT_SOURCE_ROUTE=25,
NET_IPV6_ACCEPT_RA_FROM_LOCAL=26,
+ NET_IPV6_ACCEPT_RA_RT_INFO_MIN_PLEN=27,
__NET_IPV6_MAX
};
diff --git a/include/uapi/linux/tee.h b/include/uapi/linux/tee.h
new file mode 100644
index 000000000000..370d8845ab21
--- /dev/null
+++ b/include/uapi/linux/tee.h
@@ -0,0 +1,346 @@
+/*
+ * Copyright (c) 2015-2016, Linaro Limited
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are met:
+ *
+ * 1. Redistributions of source code must retain the above copyright notice,
+ * this list of conditions and the following disclaimer.
+ *
+ * 2. Redistributions in binary form must reproduce the above copyright notice,
+ * this list of conditions and the following disclaimer in the documentation
+ * and/or other materials provided with the distribution.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+ * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+ * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
+ * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+ * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+ * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+ * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+ * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+ * POSSIBILITY OF SUCH DAMAGE.
+ */
+
+#ifndef __TEE_H
+#define __TEE_H
+
+#include <linux/ioctl.h>
+#include <linux/types.h>
+
+/*
+ * This file describes the API provided by a TEE driver to user space.
+ *
+ * Each TEE driver defines a TEE specific protocol which is used for the
+ * data passed back and forth using TEE_IOC_CMD.
+ */
+
+/* Helpers to make the ioctl defines */
+#define TEE_IOC_MAGIC 0xa4
+#define TEE_IOC_BASE 0
+
+/* Flags relating to shared memory */
+#define TEE_IOCTL_SHM_MAPPED 0x1 /* memory mapped in normal world */
+#define TEE_IOCTL_SHM_DMA_BUF 0x2 /* dma-buf handle on shared memory */
+
+#define TEE_MAX_ARG_SIZE 1024
+
+#define TEE_GEN_CAP_GP (1 << 0)/* GlobalPlatform compliant TEE */
+
+/*
+ * TEE Implementation ID
+ */
+#define TEE_IMPL_ID_OPTEE 1
+
+/*
+ * OP-TEE specific capabilities
+ */
+#define TEE_OPTEE_CAP_TZ (1 << 0)
+
+/**
+ * struct tee_ioctl_version_data - TEE version
+ * @impl_id: [out] TEE implementation id
+ * @impl_caps: [out] Implementation specific capabilities
+ * @gen_caps: [out] Generic capabilities, defined by TEE_GEN_CAPS_* above
+ *
+ * Identifies the TEE implementation, @impl_id is one of TEE_IMPL_ID_* above.
+ * @impl_caps is implementation specific, for example TEE_OPTEE_CAP_*
+ * is valid when @impl_id == TEE_IMPL_ID_OPTEE.
+ */
+struct tee_ioctl_version_data {
+ __u32 impl_id;
+ __u32 impl_caps;
+ __u32 gen_caps;
+};
+
+/**
+ * TEE_IOC_VERSION - query version of TEE
+ *
+ * Takes a tee_ioctl_version_data struct and returns with the TEE version
+ * data filled in.
+ */
+#define TEE_IOC_VERSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 0, \
+ struct tee_ioctl_version_data)
+
+/**
+ * struct tee_ioctl_shm_alloc_data - Shared memory allocate argument
+ * @size: [in/out] Size of shared memory to allocate
+ * @flags: [in/out] Flags to/from allocation.
+ * @id: [out] Identifier of the shared memory
+ *
+ * The flags field should currently be zero as input. Updated by the call
+ * with actual flags as defined by TEE_IOCTL_SHM_* above.
+ * This structure is used as argument for TEE_IOC_SHM_ALLOC below.
+ */
+struct tee_ioctl_shm_alloc_data {
+ __u64 size;
+ __u32 flags;
+ __s32 id;
+};
+
+/**
+ * TEE_IOC_SHM_ALLOC - allocate shared memory
+ *
+ * Allocates shared memory between the user space process and secure OS.
+ *
+ * Returns a file descriptor on success or < 0 on failure
+ *
+ * The returned file descriptor is used to map the shared memory into user
+ * space. The shared memory is freed when the descriptor is closed and the
+ * memory is unmapped.
+ */
+#define TEE_IOC_SHM_ALLOC _IOWR(TEE_IOC_MAGIC, TEE_IOC_BASE + 1, \
+ struct tee_ioctl_shm_alloc_data)
+
+/**
+ * struct tee_ioctl_buf_data - Variable sized buffer
+ * @buf_ptr: [in] A __user pointer to a buffer
+ * @buf_len: [in] Length of the buffer above
+ *
+ * Used as argument for TEE_IOC_OPEN_SESSION, TEE_IOC_INVOKE,
+ * TEE_IOC_SUPPL_RECV, and TEE_IOC_SUPPL_SEND below.
+ */
+struct tee_ioctl_buf_data {
+ __u64 buf_ptr;
+ __u64 buf_len;
+};
+
+/*
+ * Attributes for struct tee_ioctl_param, selects field in the union
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_NONE 0 /* parameter not used */
+
+/*
+ * These defines value parameters (struct tee_ioctl_param_value)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INPUT 1
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_OUTPUT 2
+#define TEE_IOCTL_PARAM_ATTR_TYPE_VALUE_INOUT 3 /* input and output */
+
+/*
+ * These defines shared memory reference parameters (struct
+ * tee_ioctl_param_memref)
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INPUT 5
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_OUTPUT 6
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MEMREF_INOUT 7 /* input and output */
+
+/*
+ * Mask for the type part of the attribute, leaves room for more types
+ */
+#define TEE_IOCTL_PARAM_ATTR_TYPE_MASK 0xff
+
+/*
+ * Matches TEEC_LOGIN_* in GP TEE Client API
+ * Are only defined for GP compliant TEEs
+ */
+#define TEE_IOCTL_LOGIN_PUBLIC 0
+#define TEE_IOCTL_LOGIN_USER 1
+#define TEE_IOCTL_LOGIN_GROUP 2
+#define TEE_IOCTL_LOGIN_APPLICATION 4
+#define TEE_IOCTL_LOGIN_USER_APPLICATION 5
+#define TEE_IOCTL_LOGIN_GROUP_APPLICATION 6
+
+/**
+ * struct tee_ioctl_param - parameter
+ * @attr: attributes
+ * @a: if a memref, offset into the shared memory object, else a value parameter
+ * @b: if a memref, size of the buffer, else a value parameter
+ * @c: if a memref, shared memory identifier, else a value parameter
+ *
+ * @attr & TEE_PARAM_ATTR_TYPE_MASK indicates if memref or value is used in
+ * the union. TEE_PARAM_ATTR_TYPE_VALUE_* indicates value and
+ * TEE_PARAM_ATTR_TYPE_MEMREF_* indicates memref. TEE_PARAM_ATTR_TYPE_NONE
+ * indicates that none of the members are used.
+ *
+ * Shared memory is allocated with TEE_IOC_SHM_ALLOC which returns an
+ * identifier representing the shared memory object. A memref can reference
+ * a part of a shared memory by specifying an offset (@a) and size (@b) of
+ * the object. To supply the entire shared memory object set the offset
+ * (@a) to 0 and size (@b) to the previously returned size of the object.
+ */
+struct tee_ioctl_param {
+ __u64 attr;
+ __u64 a;
+ __u64 b;
+ __u64 c;
+};
+
+#define TEE_IOCTL_UUID_LEN 16
+
+/**
+ * struct tee_ioctl_open_session_arg - Open session argument
+ * @uuid: [in] UUID of the Trusted Application
+ * @clnt_uuid: [in] UUID of client
+ * @clnt_login: [in] Login class of client, TEE_IOCTL_LOGIN_* above
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [out] Session id
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_open_session_arg {
+ __u8 uuid[TEE_IOCTL_UUID_LEN];
+ __u8 clnt_uuid[TEE_IOCTL_UUID_LEN];
+ __u32 clnt_login;
+ __u32 cancel_id;
+ __u32 session;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_OPEN_SESSION - opens a session to a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_ioctl_open_session_arg followed by any array of struct
+ * tee_ioctl_param
+ */
+#define TEE_IOC_OPEN_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 2, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_invoke_func_arg - Invokes a function in a Trusted
+ * Application
+ * @func: [in] Trusted Application function, specific to the TA
+ * @session: [in] Session id
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @ret: [out] return value
+ * @ret_origin [out] origin of the return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_ioctl_invoke_arg {
+ __u32 func;
+ __u32 session;
+ __u32 cancel_id;
+ __u32 ret;
+ __u32 ret_origin;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_INVOKE - Invokes a function in a Trusted Application
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_invoke_func_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_INVOKE _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 3, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_ioctl_cancel_arg - Cancels an open session or invoke ioctl
+ * @cancel_id: [in] Cancellation id, a unique value to identify this request
+ * @session: [in] Session id, if the session is opened, else set to 0
+ */
+struct tee_ioctl_cancel_arg {
+ __u32 cancel_id;
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CANCEL - Cancels an open session or invoke
+ */
+#define TEE_IOC_CANCEL _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 4, \
+ struct tee_ioctl_cancel_arg)
+
+/**
+ * struct tee_ioctl_close_session_arg - Closes an open session
+ * @session: [in] Session id
+ */
+struct tee_ioctl_close_session_arg {
+ __u32 session;
+};
+
+/**
+ * TEE_IOC_CLOSE_SESSION - Closes a session
+ */
+#define TEE_IOC_CLOSE_SESSION _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 5, \
+ struct tee_ioctl_close_session_arg)
+
+/**
+ * struct tee_iocl_supp_recv_arg - Receive a request for a supplicant function
+ * @func: [in] supplicant function
+ * @num_params [in/out] number of parameters following this struct
+ *
+ * @num_params is the number of params that tee-supplicant has room to
+ * receive when input, @num_params is the number of actual params
+ * tee-supplicant receives when output.
+ */
+struct tee_iocl_supp_recv_arg {
+ __u32 func;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_RECV - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_recv_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_RECV _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 6, \
+ struct tee_ioctl_buf_data)
+
+/**
+ * struct tee_iocl_supp_send_arg - Send a response to a received request
+ * @ret: [out] return value
+ * @num_params [in] number of parameters following this struct
+ */
+struct tee_iocl_supp_send_arg {
+ __u32 ret;
+ __u32 num_params;
+ /* num_params tells the actual number of element in params */
+ struct tee_ioctl_param params[];
+};
+
+/**
+ * TEE_IOC_SUPPL_SEND - Receive a request for a supplicant function
+ *
+ * Takes a struct tee_ioctl_buf_data which contains a struct
+ * tee_iocl_supp_send_arg followed by any array of struct tee_param
+ */
+#define TEE_IOC_SUPPL_SEND _IOR(TEE_IOC_MAGIC, TEE_IOC_BASE + 7, \
+ struct tee_ioctl_buf_data)
+
+/*
+ * Five syscalls are used when communicating with the TEE driver.
+ * open(): opens the device associated with the driver
+ * ioctl(): as described above operating on the file descriptor from open()
+ * close(): two cases
+ * - closes the device file descriptor
+ * - closes a file descriptor connected to allocated shared memory
+ * mmap(): maps shared memory into user space using information from struct
+ * tee_ioctl_shm_alloc_data
+ * munmap(): unmaps previously shared memory
+ */
+
+#endif /*__TEE_H*/
diff --git a/include/uapi/linux/usb/ch9.h b/include/uapi/linux/usb/ch9.h
index 2c5d7c4a69e3..ce1169af39d7 100644
--- a/include/uapi/linux/usb/ch9.h
+++ b/include/uapi/linux/usb/ch9.h
@@ -224,7 +224,8 @@ struct usb_ctrlrequest {
* through the Linux-USB APIs, they are not converted to cpu byte
* order; it is the responsibility of the client code to do this.
* The single exception is when device and configuration descriptors (but
- * not other descriptors) are read from usbfs (i.e. /proc/bus/usb/BBB/DDD);
+ * not other descriptors) are read from character devices
+ * (i.e. /dev/bus/usb/BBB/DDD);
* in this case the fields are converted to host endianness by the kernel.
*/
diff --git a/include/uapi/linux/usb/functionfs.h b/include/uapi/linux/usb/functionfs.h
index b2a31a55a612..062606f02309 100644
--- a/include/uapi/linux/usb/functionfs.h
+++ b/include/uapi/linux/usb/functionfs.h
@@ -158,7 +158,7 @@ struct usb_ext_prop_desc {
* |-----+-----------------------+------+-------------------------------------|
* | 0 | bFirstInterfaceNumber | U8 | index of the interface or of the 1st|
* | | | | interface in an IAD group |
- * | 1 | Reserved | U8 | 0 |
+ * | 1 | Reserved | U8 | 1 |
* | 2 | CompatibleID | U8[8]| compatible ID string |
* | 10 | SubCompatibleID | U8[8]| subcompatible ID string |
* | 18 | Reserved | U8[6]| 0 |
diff --git a/include/uapi/linux/vfio.h b/include/uapi/linux/vfio.h
index 519eff362c1c..ae461050661a 100644
--- a/include/uapi/linux/vfio.h
+++ b/include/uapi/linux/vfio.h
@@ -198,6 +198,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_FLAGS_PCI (1 << 1) /* vfio-pci device */
#define VFIO_DEVICE_FLAGS_PLATFORM (1 << 2) /* vfio-platform device */
#define VFIO_DEVICE_FLAGS_AMBA (1 << 3) /* vfio-amba device */
+#define VFIO_DEVICE_FLAGS_CCW (1 << 4) /* vfio-ccw device */
__u32 num_regions; /* Max region index + 1 */
__u32 num_irqs; /* Max IRQ index + 1 */
};
@@ -212,6 +213,7 @@ struct vfio_device_info {
#define VFIO_DEVICE_API_PCI_STRING "vfio-pci"
#define VFIO_DEVICE_API_PLATFORM_STRING "vfio-platform"
#define VFIO_DEVICE_API_AMBA_STRING "vfio-amba"
+#define VFIO_DEVICE_API_CCW_STRING "vfio-ccw"
/**
* VFIO_DEVICE_GET_REGION_INFO - _IOWR(VFIO_TYPE, VFIO_BASE + 8,
@@ -446,6 +448,22 @@ enum {
VFIO_PCI_NUM_IRQS
};
+/*
+ * The vfio-ccw bus driver makes use of the following fixed region and
+ * IRQ index mapping. Unimplemented regions return a size of zero.
+ * Unimplemented IRQ types return a count of zero.
+ */
+
+enum {
+ VFIO_CCW_CONFIG_REGION_INDEX,
+ VFIO_CCW_NUM_REGIONS
+};
+
+enum {
+ VFIO_CCW_IO_IRQ_INDEX,
+ VFIO_CCW_NUM_IRQS
+};
+
/**
* VFIO_DEVICE_GET_PCI_HOT_RESET_INFO - _IORW(VFIO_TYPE, VFIO_BASE + 12,
* struct vfio_pci_hot_reset_info)
diff --git a/include/uapi/linux/vfio_ccw.h b/include/uapi/linux/vfio_ccw.h
new file mode 100644
index 000000000000..34a7f6f9e065
--- /dev/null
+++ b/include/uapi/linux/vfio_ccw.h
@@ -0,0 +1,24 @@
+/*
+ * Interfaces for vfio-ccw
+ *
+ * Copyright IBM Corp. 2017
+ *
+ * Author(s): Dong Jia Shi <bjsdjshi@linux.vnet.ibm.com>
+ */
+
+#ifndef _VFIO_CCW_H_
+#define _VFIO_CCW_H_
+
+#include <linux/types.h>
+
+struct ccw_io_region {
+#define ORB_AREA_SIZE 12
+ __u8 orb_area[ORB_AREA_SIZE];
+#define SCSW_AREA_SIZE 12
+ __u8 scsw_area[SCSW_AREA_SIZE];
+#define IRB_AREA_SIZE 96
+ __u8 irb_area[IRB_AREA_SIZE];
+ __u32 ret_code;
+} __packed;
+
+#endif
diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h
index 45184a2ef66c..2b8feb86d09e 100644
--- a/include/uapi/linux/videodev2.h
+++ b/include/uapi/linux/videodev2.h
@@ -143,6 +143,7 @@ enum v4l2_buf_type {
V4L2_BUF_TYPE_VIDEO_OUTPUT_MPLANE = 10,
V4L2_BUF_TYPE_SDR_CAPTURE = 11,
V4L2_BUF_TYPE_SDR_OUTPUT = 12,
+ V4L2_BUF_TYPE_META_CAPTURE = 13,
/* Deprecated, do not use */
V4L2_BUF_TYPE_PRIVATE = 0x80,
};
@@ -362,8 +363,7 @@ enum v4l2_quantization {
/*
* The default for R'G'B' quantization is always full range, except
* for the BT2020 colorspace. For Y'CbCr the quantization is always
- * limited range, except for COLORSPACE_JPEG, XV601 or XV709: those
- * are full range.
+ * limited range, except for COLORSPACE_JPEG: this is full range.
*/
V4L2_QUANTIZATION_DEFAULT = 0,
V4L2_QUANTIZATION_FULL_RANGE = 1,
@@ -378,8 +378,7 @@ enum v4l2_quantization {
#define V4L2_MAP_QUANTIZATION_DEFAULT(is_rgb_or_hsv, colsp, ycbcr_enc) \
(((is_rgb_or_hsv) && (colsp) == V4L2_COLORSPACE_BT2020) ? \
V4L2_QUANTIZATION_LIM_RANGE : \
- (((is_rgb_or_hsv) || (ycbcr_enc) == V4L2_YCBCR_ENC_XV601 || \
- (ycbcr_enc) == V4L2_YCBCR_ENC_XV709 || (colsp) == V4L2_COLORSPACE_JPEG) ? \
+ (((is_rgb_or_hsv) || (colsp) == V4L2_COLORSPACE_JPEG) ? \
V4L2_QUANTIZATION_FULL_RANGE : V4L2_QUANTIZATION_LIM_RANGE))
enum v4l2_priority {
@@ -453,6 +452,7 @@ struct v4l2_capability {
#define V4L2_CAP_SDR_CAPTURE 0x00100000 /* Is a SDR capture device */
#define V4L2_CAP_EXT_PIX_FORMAT 0x00200000 /* Supports the extended pixel format */
#define V4L2_CAP_SDR_OUTPUT 0x00400000 /* Is a SDR output device */
+#define V4L2_CAP_META_CAPTURE 0x00800000 /* Is a metadata capture device */
#define V4L2_CAP_READWRITE 0x01000000 /* read/write systemcalls */
#define V4L2_CAP_ASYNCIO 0x02000000 /* async I/O */
@@ -661,6 +661,7 @@ struct v4l2_pix_format {
#define V4L2_PIX_FMT_Y12I v4l2_fourcc('Y', '1', '2', 'I') /* Greyscale 12-bit L/R interleaved */
#define V4L2_PIX_FMT_Z16 v4l2_fourcc('Z', '1', '6', ' ') /* Depth data 16-bit */
#define V4L2_PIX_FMT_MT21C v4l2_fourcc('M', 'T', '2', '1') /* Mediatek compressed block mode */
+#define V4L2_PIX_FMT_INZI v4l2_fourcc('I', 'N', 'Z', 'I') /* Intel Planar Greyscale 10-bit and Depth 16-bit */
/* SDR formats - used only for Software Defined Radio devices */
#define V4L2_SDR_FMT_CU8 v4l2_fourcc('C', 'U', '0', '8') /* IQ u8 */
@@ -675,6 +676,10 @@ struct v4l2_pix_format {
#define V4L2_TCH_FMT_TU16 v4l2_fourcc('T', 'U', '1', '6') /* 16-bit unsigned touch data */
#define V4L2_TCH_FMT_TU08 v4l2_fourcc('T', 'U', '0', '8') /* 8-bit unsigned touch data */
+/* Meta-data formats */
+#define V4L2_META_FMT_VSP1_HGO v4l2_fourcc('V', 'S', 'P', 'H') /* R-Car VSP1 1-D Histogram */
+#define V4L2_META_FMT_VSP1_HGT v4l2_fourcc('V', 'S', 'P', 'T') /* R-Car VSP1 2-D Histogram */
+
/* priv field value to indicates that subsequent fields are valid. */
#define V4L2_PIX_FMT_PRIV_MAGIC 0xfeedcafe
@@ -1654,6 +1659,7 @@ struct v4l2_querymenu {
#define V4L2_CTRL_FLAG_VOLATILE 0x0080
#define V4L2_CTRL_FLAG_HAS_PAYLOAD 0x0100
#define V4L2_CTRL_FLAG_EXECUTE_ON_WRITE 0x0200
+#define V4L2_CTRL_FLAG_MODIFY_LAYOUT 0x0400
/* Query flags, to be ORed with the control ID */
#define V4L2_CTRL_FLAG_NEXT_CTRL 0x80000000
@@ -2087,6 +2093,16 @@ struct v4l2_sdr_format {
} __attribute__ ((packed));
/**
+ * struct v4l2_meta_format - metadata format definition
+ * @dataformat: little endian four character code (fourcc)
+ * @buffersize: maximum size in bytes required for data
+ */
+struct v4l2_meta_format {
+ __u32 dataformat;
+ __u32 buffersize;
+} __attribute__ ((packed));
+
+/**
* struct v4l2_format - stream data format
* @type: enum v4l2_buf_type; type of the data stream
* @pix: definition of an image format
@@ -2105,6 +2121,7 @@ struct v4l2_format {
struct v4l2_vbi_format vbi; /* V4L2_BUF_TYPE_VBI_CAPTURE */
struct v4l2_sliced_vbi_format sliced; /* V4L2_BUF_TYPE_SLICED_VBI_CAPTURE */
struct v4l2_sdr_format sdr; /* V4L2_BUF_TYPE_SDR_CAPTURE */
+ struct v4l2_meta_format meta; /* V4L2_BUF_TYPE_META_CAPTURE */
__u8 raw_data[200]; /* user-defined */
} fmt;
};
diff --git a/include/uapi/linux/vsockmon.h b/include/uapi/linux/vsockmon.h
new file mode 100644
index 000000000000..a08b522ef597
--- /dev/null
+++ b/include/uapi/linux/vsockmon.h
@@ -0,0 +1,60 @@
+#ifndef _UAPI_VSOCKMON_H
+#define _UAPI_VSOCKMON_H
+
+#include <linux/virtio_vsock.h>
+
+/*
+ * vsockmon is the AF_VSOCK packet capture device. Packets captured have the
+ * following layout:
+ *
+ * +-----------------------------------+
+ * | vsockmon header |
+ * | (struct af_vsockmon_hdr) |
+ * +-----------------------------------+
+ * | transport header |
+ * | (af_vsockmon_hdr->len bytes long) |
+ * +-----------------------------------+
+ * | payload |
+ * | (until end of packet) |
+ * +-----------------------------------+
+ *
+ * The vsockmon header is a transport-independent description of the packet.
+ * It duplicates some of the information from the transport header so that
+ * no transport-specific knowledge is necessary to process packets.
+ *
+ * The transport header is useful for low-level transport-specific packet
+ * analysis. Transport type is given in af_vsockmon_hdr->transport and
+ * transport header length is given in af_vsockmon_hdr->len.
+ *
+ * If af_vsockmon_hdr->op is AF_VSOCK_OP_PAYLOAD then the payload follows the
+ * transport header. Other ops do not have a payload.
+ */
+
+struct af_vsockmon_hdr {
+ __le64 src_cid;
+ __le64 dst_cid;
+ __le32 src_port;
+ __le32 dst_port;
+ __le16 op; /* enum af_vsockmon_op */
+ __le16 transport; /* enum af_vsockmon_transport */
+ __le16 len; /* Transport header length */
+ __u8 reserved[2];
+};
+
+enum af_vsockmon_op {
+ AF_VSOCK_OP_UNKNOWN = 0,
+ AF_VSOCK_OP_CONNECT = 1,
+ AF_VSOCK_OP_DISCONNECT = 2,
+ AF_VSOCK_OP_CONTROL = 3,
+ AF_VSOCK_OP_PAYLOAD = 4,
+};
+
+enum af_vsockmon_transport {
+ AF_VSOCK_TRANSPORT_UNKNOWN = 0,
+ AF_VSOCK_TRANSPORT_NO_INFO = 1, /* No transport information */
+
+ /* Transport header type: struct virtio_vsock_hdr */
+ AF_VSOCK_TRANSPORT_VIRTIO = 2,
+};
+
+#endif
diff --git a/include/uapi/linux/xfrm.h b/include/uapi/linux/xfrm.h
index 1fc62b239f1b..2b384ff09fa0 100644
--- a/include/uapi/linux/xfrm.h
+++ b/include/uapi/linux/xfrm.h
@@ -303,6 +303,7 @@ enum xfrm_attr_type_t {
XFRMA_PROTO, /* __u8 */
XFRMA_ADDRESS_FILTER, /* struct xfrm_address_filter */
XFRMA_PAD,
+ XFRMA_OFFLOAD_DEV, /* struct xfrm_state_offload */
__XFRMA_MAX
#define XFRMA_MAX (__XFRMA_MAX - 1)
@@ -494,6 +495,13 @@ struct xfrm_address_filter {
__u8 dplen;
};
+struct xfrm_user_offload {
+ int ifindex;
+ __u8 flags;
+};
+#define XFRM_OFFLOAD_IPV6 1
+#define XFRM_OFFLOAD_INBOUND 2
+
#ifndef __KERNEL__
/* backwards compatibility for userspace */
#define XFRMGRP_ACQUIRE 1
diff --git a/include/uapi/rdma/ib_user_verbs.h b/include/uapi/rdma/ib_user_verbs.h
index 997f904c7692..477d629f539d 100644
--- a/include/uapi/rdma/ib_user_verbs.h
+++ b/include/uapi/rdma/ib_user_verbs.h
@@ -947,6 +947,17 @@ struct ib_uverbs_flow_spec_action_tag {
__u32 reserved1;
};
+struct ib_uverbs_flow_spec_action_drop {
+ union {
+ struct ib_uverbs_flow_spec_hdr hdr;
+ struct {
+ __u32 type;
+ __u16 size;
+ __u16 reserved;
+ };
+ };
+};
+
struct ib_uverbs_flow_tunnel_filter {
__be32 tunnel_id;
};
diff --git a/include/uapi/rdma/vmw_pvrdma-abi.h b/include/uapi/rdma/vmw_pvrdma-abi.h
index 5016abc9ee97..c8c1d2d6df4d 100644
--- a/include/uapi/rdma/vmw_pvrdma-abi.h
+++ b/include/uapi/rdma/vmw_pvrdma-abi.h
@@ -222,7 +222,7 @@ struct pvrdma_sq_wqe_hdr {
__u32 opcode; /* operation type */
__u32 send_flags; /* wr flags */
union {
- __u32 imm_data;
+ __be32 imm_data;
__u32 invalidate_rkey;
} ex;
__u32 reserved;
@@ -273,7 +273,7 @@ struct pvrdma_cqe {
__u32 opcode;
__u32 status;
__u32 byte_len;
- __u32 imm_data;
+ __be32 imm_data;
__u32 src_qp;
__u32 wc_flags;
__u32 vendor_err;
diff --git a/include/uapi/sound/asound.h b/include/uapi/sound/asound.h
index be353a78c303..fd41697cb4d3 100644
--- a/include/uapi/sound/asound.h
+++ b/include/uapi/sound/asound.h
@@ -107,9 +107,11 @@ enum {
SNDRV_HWDEP_IFACE_FW_DIGI00X, /* Digidesign Digi 002/003 family */
SNDRV_HWDEP_IFACE_FW_TASCAM, /* TASCAM FireWire series */
SNDRV_HWDEP_IFACE_LINE6, /* Line6 USB processors */
+ SNDRV_HWDEP_IFACE_FW_MOTU, /* MOTU FireWire series */
+ SNDRV_HWDEP_IFACE_FW_FIREFACE, /* RME Fireface series */
/* Don't forget to change the following: */
- SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_LINE6
+ SNDRV_HWDEP_IFACE_LAST = SNDRV_HWDEP_IFACE_FW_FIREFACE
};
struct snd_hwdep_info {
diff --git a/include/uapi/sound/firewire.h b/include/uapi/sound/firewire.h
index db79a12fcc78..622900488bdc 100644
--- a/include/uapi/sound/firewire.h
+++ b/include/uapi/sound/firewire.h
@@ -10,6 +10,7 @@
#define SNDRV_FIREWIRE_EVENT_DICE_NOTIFICATION 0xd1ce004e
#define SNDRV_FIREWIRE_EVENT_EFW_RESPONSE 0x4e617475
#define SNDRV_FIREWIRE_EVENT_DIGI00X_MESSAGE 0x746e736c
+#define SNDRV_FIREWIRE_EVENT_MOTU_NOTIFICATION 0x64776479
struct snd_firewire_event_common {
unsigned int type; /* SNDRV_FIREWIRE_EVENT_xxx */
@@ -46,12 +47,18 @@ struct snd_firewire_event_digi00x_message {
__u32 message; /* Digi00x-specific message */
};
+struct snd_firewire_event_motu_notification {
+ unsigned int type;
+ __u32 message; /* MOTU-specific bits. */
+};
+
union snd_firewire_event {
struct snd_firewire_event_common common;
struct snd_firewire_event_lock_status lock_status;
struct snd_firewire_event_dice_notification dice_notification;
struct snd_firewire_event_efw_response efw_response;
struct snd_firewire_event_digi00x_message digi00x_message;
+ struct snd_firewire_event_motu_notification motu_notification;
};
@@ -65,7 +72,8 @@ union snd_firewire_event {
#define SNDRV_FIREWIRE_TYPE_OXFW 4
#define SNDRV_FIREWIRE_TYPE_DIGI00X 5
#define SNDRV_FIREWIRE_TYPE_TASCAM 6
-/* RME, MOTU, ... */
+#define SNDRV_FIREWIRE_TYPE_MOTU 7
+#define SNDRV_FIREWIRE_TYPE_FIREFACE 8
struct snd_firewire_get_info {
unsigned int type; /* SNDRV_FIREWIRE_TYPE_xxx */
diff --git a/include/video/imx-ipu-v3.h b/include/video/imx-ipu-v3.h
index 53cd07ccaa4c..8cb07680fb41 100644
--- a/include/video/imx-ipu-v3.h
+++ b/include/video/imx-ipu-v3.h
@@ -161,6 +161,28 @@ enum ipu_channel_irq {
#define IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA 52
#define IPUV3_NUM_CHANNELS 64
+static inline int ipu_channel_alpha_channel(int ch_num)
+{
+ switch (ch_num) {
+ case IPUV3_CHANNEL_G_MEM_IC_PRP_VF:
+ return IPUV3_CHANNEL_G_MEM_IC_PRP_VF_ALPHA;
+ case IPUV3_CHANNEL_G_MEM_IC_PP:
+ return IPUV3_CHANNEL_G_MEM_IC_PP_ALPHA;
+ case IPUV3_CHANNEL_MEM_FG_SYNC:
+ return IPUV3_CHANNEL_MEM_FG_SYNC_ALPHA;
+ case IPUV3_CHANNEL_MEM_FG_ASYNC:
+ return IPUV3_CHANNEL_MEM_FG_ASYNC_ALPHA;
+ case IPUV3_CHANNEL_MEM_BG_SYNC:
+ return IPUV3_CHANNEL_MEM_BG_SYNC_ALPHA;
+ case IPUV3_CHANNEL_MEM_BG_ASYNC:
+ return IPUV3_CHANNEL_MEM_BG_ASYNC_ALPHA;
+ case IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB:
+ return IPUV3_CHANNEL_MEM_VDI_PLANE1_COMB_ALPHA;
+ default:
+ return -EINVAL;
+ }
+}
+
int ipu_map_irq(struct ipu_soc *ipu, int irq);
int ipu_idmac_channel_irq(struct ipu_soc *ipu, struct ipuv3_channel *channel,
enum ipu_channel_irq irq);
@@ -300,7 +322,7 @@ struct ipu_dp *ipu_dp_get(struct ipu_soc *ipu, unsigned int flow);
void ipu_dp_put(struct ipu_dp *);
int ipu_dp_enable(struct ipu_soc *ipu);
int ipu_dp_enable_channel(struct ipu_dp *dp);
-void ipu_dp_disable_channel(struct ipu_dp *dp);
+void ipu_dp_disable_channel(struct ipu_dp *dp, bool sync);
void ipu_dp_disable(struct ipu_soc *ipu);
int ipu_dp_setup_channel(struct ipu_dp *dp,
enum ipu_color_space in, enum ipu_color_space out);
@@ -309,6 +331,21 @@ int ipu_dp_set_global_alpha(struct ipu_dp *dp, bool enable, u8 alpha,
bool bg_chan);
/*
+ * IPU Prefetch Resolve Gasket (prg) functions
+ */
+int ipu_prg_max_active_channels(void);
+bool ipu_prg_present(struct ipu_soc *ipu);
+bool ipu_prg_format_supported(struct ipu_soc *ipu, uint32_t format,
+ uint64_t modifier);
+int ipu_prg_enable(struct ipu_soc *ipu);
+void ipu_prg_disable(struct ipu_soc *ipu);
+void ipu_prg_channel_disable(struct ipuv3_channel *ipu_chan);
+int ipu_prg_channel_configure(struct ipuv3_channel *ipu_chan,
+ unsigned int axi_id, unsigned int width,
+ unsigned int height, unsigned int stride,
+ u32 format, unsigned long *eba);
+
+/*
* IPU CMOS Sensor Interface (csi) functions
*/
struct ipu_csi;
diff --git a/include/video/udlfb.h b/include/video/udlfb.h
index f9466fa54ba4..3ea90aea5617 100644
--- a/include/video/udlfb.h
+++ b/include/video/udlfb.h
@@ -92,6 +92,6 @@ struct dlfb_data {
/* remove these once align.h patch is taken into kernel */
#define DL_ALIGN_UP(x, a) ALIGN(x, a)
-#define DL_ALIGN_DOWN(x, a) ALIGN(x-(a-1), a)
+#define DL_ALIGN_DOWN(x, a) ALIGN_DOWN(x, a)
#endif
diff --git a/include/xen/arm/page-coherent.h b/include/xen/arm/page-coherent.h
index 95ce6ac3a971..b1b4ecdf329a 100644
--- a/include/xen/arm/page-coherent.h
+++ b/include/xen/arm/page-coherent.h
@@ -2,8 +2,16 @@
#define _ASM_ARM_XEN_PAGE_COHERENT_H
#include <asm/page.h>
+#include <asm/dma-mapping.h>
#include <linux/dma-mapping.h>
+static inline const struct dma_map_ops *xen_get_dma_ops(struct device *dev)
+{
+ if (dev && dev->archdata.dev_dma_ops)
+ return dev->archdata.dev_dma_ops;
+ return get_arch_dma_ops(NULL);
+}
+
void __xen_dma_map_page(struct device *hwdev, struct page *page,
dma_addr_t dev_addr, unsigned long offset, size_t size,
enum dma_data_direction dir, unsigned long attrs);
@@ -19,13 +27,13 @@ void __xen_dma_sync_single_for_device(struct device *hwdev,
static inline void *xen_alloc_coherent_pages(struct device *hwdev, size_t size,
dma_addr_t *dma_handle, gfp_t flags, unsigned long attrs)
{
- return __generic_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
+ return xen_get_dma_ops(hwdev)->alloc(hwdev, size, dma_handle, flags, attrs);
}
static inline void xen_free_coherent_pages(struct device *hwdev, size_t size,
void *cpu_addr, dma_addr_t dma_handle, unsigned long attrs)
{
- __generic_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
+ xen_get_dma_ops(hwdev)->free(hwdev, size, cpu_addr, dma_handle, attrs);
}
static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
@@ -49,7 +57,7 @@ static inline void xen_dma_map_page(struct device *hwdev, struct page *page,
* specific function.
*/
if (local)
- __generic_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
+ xen_get_dma_ops(hwdev)->map_page(hwdev, page, offset, size, dir, attrs);
else
__xen_dma_map_page(hwdev, page, dev_addr, offset, size, dir, attrs);
}
@@ -67,8 +75,8 @@ static inline void xen_dma_unmap_page(struct device *hwdev, dma_addr_t handle,
* specific function.
*/
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->unmap_page)
- __generic_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
+ if (xen_get_dma_ops(hwdev)->unmap_page)
+ xen_get_dma_ops(hwdev)->unmap_page(hwdev, handle, size, dir, attrs);
} else
__xen_dma_unmap_page(hwdev, handle, size, dir, attrs);
}
@@ -78,8 +86,8 @@ static inline void xen_dma_sync_single_for_cpu(struct device *hwdev,
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_cpu)
- __generic_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
+ if (xen_get_dma_ops(hwdev)->sync_single_for_cpu)
+ xen_get_dma_ops(hwdev)->sync_single_for_cpu(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_cpu(hwdev, handle, size, dir);
}
@@ -89,8 +97,8 @@ static inline void xen_dma_sync_single_for_device(struct device *hwdev,
{
unsigned long pfn = PFN_DOWN(handle);
if (pfn_valid(pfn)) {
- if (__generic_dma_ops(hwdev)->sync_single_for_device)
- __generic_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
+ if (xen_get_dma_ops(hwdev)->sync_single_for_device)
+ xen_get_dma_ops(hwdev)->sync_single_for_device(hwdev, handle, size, dir);
} else
__xen_dma_sync_single_for_device(hwdev, handle, size, dir);
}
diff --git a/include/xen/interface/io/9pfs.h b/include/xen/interface/io/9pfs.h
new file mode 100644
index 000000000000..5b6c19dae5e2
--- /dev/null
+++ b/include/xen/interface/io/9pfs.h
@@ -0,0 +1,36 @@
+/*
+ * 9pfs.h -- Xen 9PFS transport
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2017 Stefano Stabellini <stefano@aporeto.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_9PFS_H__
+#define __XEN_PUBLIC_IO_9PFS_H__
+
+#include "xen/interface/io/ring.h"
+
+/*
+ * See docs/misc/9pfs.markdown in xen.git for the full specification:
+ * https://xenbits.xen.org/docs/unstable/misc/9pfs.html
+ */
+DEFINE_XEN_FLEX_RING_AND_INTF(xen_9pfs);
+
+#endif
diff --git a/include/xen/interface/io/displif.h b/include/xen/interface/io/displif.h
new file mode 100644
index 000000000000..596578d9be3e
--- /dev/null
+++ b/include/xen/interface/io/displif.h
@@ -0,0 +1,854 @@
+/******************************************************************************
+ * displif.h
+ *
+ * Unified display device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2016-2017 EPAM Systems Inc.
+ *
+ * Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ * Oleksandr Grytsov <oleksandr_grytsov@epam.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_DISPLIF_H__
+#define __XEN_PUBLIC_IO_DISPLIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ ******************************************************************************
+ * Protocol version
+ ******************************************************************************
+ */
+#define XENDISPL_PROTOCOL_VERSION "1"
+
+/*
+ ******************************************************************************
+ * Main features provided by the protocol
+ ******************************************************************************
+ * This protocol aims to provide a unified protocol which fits more
+ * sophisticated use-cases than a framebuffer device can handle. At the
+ * moment basic functionality is supported with the intention to be extended:
+ * o multiple dynamically allocated/destroyed framebuffers
+ * o buffers of arbitrary sizes
+ * o buffer allocation at either back or front end
+ * o better configuration options including multiple display support
+ *
+ * Note: existing fbif can be used together with displif running at the
+ * same time, e.g. on Linux one provides framebuffer and another DRM/KMS
+ *
+ * Note: display resolution (XenStore's "resolution" property) defines
+ * visible area of the virtual display. At the same time resolution of
+ * the display and frame buffers may differ: buffers can be smaller, equal
+ * or bigger than the visible area. This is to enable use-cases, where backend
+ * may do some post-processing of the display and frame buffers supplied,
+ * e.g. those buffers can be just a part of the final composition.
+ *
+ ******************************************************************************
+ * Direction of improvements
+ ******************************************************************************
+ * Future extensions to the existing protocol may include:
+ * o display/connector cloning
+ * o allocation of objects other than display buffers
+ * o plane/overlay support
+ * o scaling support
+ * o rotation support
+ *
+ ******************************************************************************
+ * Feature and Parameter Negotiation
+ ******************************************************************************
+ *
+ * Front->back notifications: when enqueuing a new request, sending a
+ * notification can be made conditional on xendispl_req (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * xendispl_req appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: when enqueuing a new response, sending a
+ * notification can be made conditional on xendispl_resp (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * xendispl_resp appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ *
+ * The two halves of a para-virtual display driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following the XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ ******************************************************************************
+ * Example configuration
+ ******************************************************************************
+ *
+ * Note: depending on the use-case backend can expose more display connectors
+ * than the underlying HW physically has by employing SW graphics compositors
+ *
+ * This is an example of backend and frontend configuration:
+ *
+ *--------------------------------- Backend -----------------------------------
+ *
+ * /local/domain/0/backend/vdispl/1/0/frontend-id = "1"
+ * /local/domain/0/backend/vdispl/1/0/frontend = "/local/domain/1/device/vdispl/0"
+ * /local/domain/0/backend/vdispl/1/0/state = "4"
+ * /local/domain/0/backend/vdispl/1/0/versions = "1,2"
+ *
+ *--------------------------------- Frontend ----------------------------------
+ *
+ * /local/domain/1/device/vdispl/0/backend-id = "0"
+ * /local/domain/1/device/vdispl/0/backend = "/local/domain/0/backend/vdispl/1/0"
+ * /local/domain/1/device/vdispl/0/state = "4"
+ * /local/domain/1/device/vdispl/0/version = "1"
+ * /local/domain/1/device/vdispl/0/be-alloc = "1"
+ *
+ *-------------------------- Connector 0 configuration ------------------------
+ *
+ * /local/domain/1/device/vdispl/0/0/resolution = "1920x1080"
+ * /local/domain/1/device/vdispl/0/0/req-ring-ref = "2832"
+ * /local/domain/1/device/vdispl/0/0/req-event-channel = "15"
+ * /local/domain/1/device/vdispl/0/0/evt-ring-ref = "387"
+ * /local/domain/1/device/vdispl/0/0/evt-event-channel = "16"
+ *
+ *-------------------------- Connector 1 configuration ------------------------
+ *
+ * /local/domain/1/device/vdispl/0/1/resolution = "800x600"
+ * /local/domain/1/device/vdispl/0/1/req-ring-ref = "2833"
+ * /local/domain/1/device/vdispl/0/1/req-event-channel = "17"
+ * /local/domain/1/device/vdispl/0/1/evt-ring-ref = "388"
+ * /local/domain/1/device/vdispl/0/1/evt-event-channel = "18"
+ *
+ ******************************************************************************
+ * Backend XenBus Nodes
+ ******************************************************************************
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * versions
+ * Values: <string>
+ *
+ * List of XENDISPL_LIST_SEPARATOR separated protocol versions supported
+ * by the backend. For example "1,2,3".
+ *
+ ******************************************************************************
+ * Frontend XenBus Nodes
+ ******************************************************************************
+ *
+ *-------------------------------- Addressing ---------------------------------
+ *
+ * dom-id
+ * Values: <uint16_t>
+ *
+ * Domain identifier.
+ *
+ * dev-id
+ * Values: <uint16_t>
+ *
+ * Device identifier.
+ *
+ * conn-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the connector.
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/<conn-idx>/...
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * version
+ * Values: <string>
+ *
+ * Protocol version, chosen among the ones supported by the backend.
+ *
+ *------------------------- Backend buffer allocation -------------------------
+ *
+ * be-alloc
+ * Values: "0", "1"
+ *
+ * If value is set to "1", then backend can be a buffer provider/allocator
+ * for this domain during XENDISPL_OP_DBUF_CREATE operation (see below
+ * for negotiation).
+ * If value is not "1" or omitted frontend must allocate buffers itself.
+ *
+ *----------------------------- Connector settings ----------------------------
+ *
+ * resolution
+ * Values: <width, uint32_t>x<height, uint32_t>
+ *
+ * Width and height of the connector in pixels separated by
+ * XENDISPL_RESOLUTION_SEPARATOR. This defines visible area of the
+ * display.
+ *
+ *------------------ Connector Request Transport Parameters -------------------
+ *
+ * This communication path is used to deliver requests from frontend to backend
+ * and get the corresponding responses from backend to frontend,
+ * set up per connector.
+ *
+ * req-event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen connector's control event channel
+ * used to signal activity in the ring buffer.
+ *
+ * req-ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page of connector's control ring buffer.
+ *
+ *------------------- Connector Event Transport Parameters --------------------
+ *
+ * This communication path is used to deliver asynchronous events from backend
+ * to frontend, set up per connector.
+ *
+ * evt-event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen connector's event channel
+ * used to signal activity in the ring buffer.
+ *
+ * evt-ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page of connector's event ring buffer.
+ */
+
+/*
+ ******************************************************************************
+ * STATE DIAGRAMS
+ ******************************************************************************
+ *
+ * Tool stack creates front and back state nodes with initial state
+ * XenbusStateInitialising.
+ * Tool stack creates and sets up frontend display configuration
+ * nodes per domain.
+ *
+ *-------------------------------- Normal flow --------------------------------
+ *
+ * Front Back
+ * ================================= =====================================
+ * XenbusStateInitialising XenbusStateInitialising
+ * o Query backend device identification
+ * data.
+ * o Open and validate backend device.
+ * |
+ * |
+ * V
+ * XenbusStateInitWait
+ *
+ * o Query frontend configuration
+ * o Allocate and initialize
+ * event channels per configured
+ * connector.
+ * o Publish transport parameters
+ * that will be in effect during
+ * this connection.
+ * |
+ * |
+ * V
+ * XenbusStateInitialised
+ *
+ * o Query frontend transport parameters.
+ * o Connect to the event channels.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * o Create and initialize OS
+ * virtual display connectors
+ * as per configuration.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * XenbusStateUnknown
+ * XenbusStateClosed
+ * XenbusStateClosing
+ * o Remove virtual display device
+ * o Remove event channels
+ * |
+ * |
+ * V
+ * XenbusStateClosed
+ *
+ *------------------------------- Recovery flow -------------------------------
+ *
+ * In case of frontend unrecoverable errors backend handles that as
+ * if frontend goes into the XenbusStateClosed state.
+ *
+ * In case of backend unrecoverable errors frontend tries removing
+ * the virtualized device. If this is possible at the moment of error,
+ * then frontend goes into the XenbusStateInitialising state and is ready for
+ * new connection with backend. If the virtualized device is still in use and
+ * cannot be removed, then frontend goes into the XenbusStateReconfiguring state
+ * until either the virtualized device is removed or backend initiates a new
+ * connection. On the virtualized device removal frontend goes into the
+ * XenbusStateInitialising state.
+ *
+ * Note on XenbusStateReconfiguring state of the frontend: if backend has
+ * unrecoverable errors then frontend cannot send requests to the backend
+ * and thus cannot provide functionality of the virtualized device anymore.
+ * After backend is back to normal the virtualized device may still hold some
+ * state: configuration in use, allocated buffers, client application state etc.
+ * In most cases, this will require frontend to implement complex recovery
+ * reconnect logic. Instead, by going into XenbusStateReconfiguring state,
+ * frontend will make sure no new clients of the virtualized device are
+ * accepted, allow existing client(s) to exit gracefully by signaling error
+ * state etc.
+ * Once all the clients are gone frontend can reinitialize the virtualized
+ * device and get into XenbusStateInitialising state again signaling the
+ * backend that a new connection can be made.
+ *
+ * There are multiple conditions possible under which frontend will go from
+ * XenbusStateReconfiguring into XenbusStateInitialising, some of them are OS
+ * specific. For example:
+ * 1. The underlying OS framework may provide callbacks to signal that the last
+ * client of the virtualized device has gone and the device can be removed
+ * 2. Frontend can schedule a deferred work (timer/tasklet/workqueue)
+ * to periodically check if this is the right time to re-try removal of
+ * the virtualized device.
+ * 3. By any other means.
+ *
+ ******************************************************************************
+ * REQUEST CODES
+ ******************************************************************************
+ * Request codes [0; 15] are reserved and must not be used
+ */
+
+#define XENDISPL_OP_DBUF_CREATE 0x10
+#define XENDISPL_OP_DBUF_DESTROY 0x11
+#define XENDISPL_OP_FB_ATTACH 0x12
+#define XENDISPL_OP_FB_DETACH 0x13
+#define XENDISPL_OP_SET_CONFIG 0x14
+#define XENDISPL_OP_PG_FLIP 0x15
+
+/*
+ ******************************************************************************
+ * EVENT CODES
+ ******************************************************************************
+ */
+#define XENDISPL_EVT_PG_FLIP 0x00
+
+/*
+ ******************************************************************************
+ * XENSTORE FIELD AND PATH NAME STRINGS, HELPERS
+ ******************************************************************************
+ */
+#define XENDISPL_DRIVER_NAME "vdispl"
+
+#define XENDISPL_LIST_SEPARATOR ","
+#define XENDISPL_RESOLUTION_SEPARATOR "x"
+
+#define XENDISPL_FIELD_BE_VERSIONS "versions"
+#define XENDISPL_FIELD_FE_VERSION "version"
+#define XENDISPL_FIELD_REQ_RING_REF "req-ring-ref"
+#define XENDISPL_FIELD_REQ_CHANNEL "req-event-channel"
+#define XENDISPL_FIELD_EVT_RING_REF "evt-ring-ref"
+#define XENDISPL_FIELD_EVT_CHANNEL "evt-event-channel"
+#define XENDISPL_FIELD_RESOLUTION "resolution"
+#define XENDISPL_FIELD_BE_ALLOC "be-alloc"
+
+/*
+ ******************************************************************************
+ * STATUS RETURN CODES
+ ******************************************************************************
+ *
+ * Status return code is zero on success and -XEN_EXX on failure.
+ *
+ ******************************************************************************
+ * Assumptions
+ ******************************************************************************
+ * o usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ * o all references in this document to page sizes must be treated
+ * as pages of size XEN_PAGE_SIZE unless otherwise noted.
+ *
+ ******************************************************************************
+ * Description of the protocol between frontend and backend driver
+ ******************************************************************************
+ *
+ * The two halves of a Para-virtual display driver communicate with
+ * each other using shared pages and event channels.
+ * Shared page contains a ring with request/response packets.
+ *
+ * All reserved fields in the structures below must be 0.
+ * Display buffers's cookie of value 0 is treated as invalid.
+ * Framebuffer's cookie of value 0 is treated as invalid.
+ *
+ * For all request/response/event packets that use cookies:
+ * dbuf_cookie - uint64_t, unique to guest domain value used by the backend
+ * to map remote display buffer to its local one
+ * fb_cookie - uint64_t, unique to guest domain value used by the backend
+ * to map remote framebuffer to its local one
+ *
+ *---------------------------------- Requests ---------------------------------
+ *
+ * All requests/responses, which are not connector specific, must be sent over
+ * control ring of the connector which has the index value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ *
+ * All request packets have the same length (64 octets)
+ * All request packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, private guest value, echoed in response
+ * operation - uint8_t, operation code, XENDISPL_OP_???
+ *
+ * Request dbuf creation - request creation of a display buffer.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id |_OP_DBUF_CREATE | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | width | 20
+ * +----------------+----------------+----------------+----------------+
+ * | height | 24
+ * +----------------+----------------+----------------+----------------+
+ * | bpp | 28
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 32
+ * +----------------+----------------+----------------+----------------+
+ * | flags | 36
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 40
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 44
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ * All unused bits in flags field must be set to 0.
+ *
+ * An attempt to create multiple display buffers with the same dbuf_cookie is
+ * an error. dbuf_cookie can be re-used after destroying the corresponding
+ * display buffer.
+ *
+ * Width and height of the display buffers can be smaller, equal or bigger
+ * than the connector's resolution. Depth/pixel format of the individual
+ * buffers can differ as well.
+ *
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * bpp - uint32_t, bits per pixel
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * flags - uint32_t, flags of the operation
+ * o XENDISPL_DBUF_FLG_REQ_ALLOC - if set, then backend is requested
+ * to allocate the buffer with the parameters provided in this request.
+ * Page directory is handled as follows:
+ * Frontend on request:
+ * o allocates pages for the directory (gref_directory,
+ * gref_dir_next_page(s)
+ * o grants permissions for the pages of the directory to the backend
+ * o sets gref_dir_next_page fields
+ * Backend on response:
+ * o grants permissions for the pages of the buffer allocated to
+ * the frontend
+ * o fills in page directory with grant references
+ * (gref[] in struct xendispl_page_directory)
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing shared buffer references. At least one page exists. If shared
+ * buffer size (buffer_sz) exceeds what can be addressed by this single page,
+ * then reference to the next page must be supplied (see gref_dir_next_page
+ * below)
+ */
+
+#define XENDISPL_DBUF_FLG_REQ_ALLOC (1 << 0)
+
+struct xendispl_dbuf_create_req {
+ uint64_t dbuf_cookie;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bpp;
+ uint32_t buffer_sz;
+ uint32_t flags;
+ grant_ref_t gref_directory;
+};
+
+/*
+ * Shared page for XENDISPL_OP_DBUF_CREATE buffer descriptor (gref_directory in
+ * the request) employs a list of pages, describing all pages of the shared
+ * data buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref_dir_next_page | 4
+ * +----------------+----------------+----------------+----------------+
+ * | gref[0] | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[i] | i*4+8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[N - 1] | N*4+8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * gref_dir_next_page - grant_ref_t, reference to the next page describing
+ * page directory. Must be 0 if there are no more pages in the list.
+ * gref[i] - grant_ref_t, reference to a shared page of the buffer
+ * allocated at XENDISPL_OP_DBUF_CREATE
+ *
+ * Number of grant_ref_t entries in the whole page directory is not
+ * passed, but instead can be calculated as:
+ * num_grefs_total = (XENDISPL_OP_DBUF_CREATE.buffer_sz + XEN_PAGE_SIZE - 1) /
+ * XEN_PAGE_SIZE
+ */
+
+struct xendispl_page_directory {
+ grant_ref_t gref_dir_next_page;
+ grant_ref_t gref[1]; /* Variable length */
+};
+
+/*
+ * Request dbuf destruction - destroy a previously allocated display buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id |_OP_DBUF_DESTROY| reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ */
+
+struct xendispl_dbuf_destroy_req {
+ uint64_t dbuf_cookie;
+};
+
+/*
+ * Request framebuffer attachment - request attachment of a framebuffer to
+ * previously created display buffer.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_FB_ATTACH | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | dbuf_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 20
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 24
+ * +----------------+----------------+----------------+----------------+
+ * | width | 28
+ * +----------------+----------------+----------------+----------------+
+ * | height | 32
+ * +----------------+----------------+----------------+----------------+
+ * | pixel_format | 36
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ * Width and height can be smaller, equal or bigger than the connector's
+ * resolution.
+ *
+ * An attempt to create multiple frame buffers with the same fb_cookie is
+ * an error. fb_cookie can be re-used after destroying the corresponding
+ * frame buffer.
+ *
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * pixel_format - uint32_t, pixel format of the framebuffer, FOURCC code
+ */
+
+struct xendispl_fb_attach_req {
+ uint64_t dbuf_cookie;
+ uint64_t fb_cookie;
+ uint32_t width;
+ uint32_t height;
+ uint32_t pixel_format;
+};
+
+/*
+ * Request framebuffer detach - detach a previously
+ * attached framebuffer from the display buffer in request:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_FB_DETACH | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Must be sent over control ring of the connector which has the index
+ * value of 0:
+ * /local/domain/<dom-id>/device/vdispl/<dev-id>/0/req-ring-ref
+ */
+
+struct xendispl_fb_detach_req {
+ uint64_t fb_cookie;
+};
+
+/*
+ * Request configuration set/reset - request to set or reset
+ * the configuration/mode of the display:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_SET_CONFIG | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | x | 20
+ * +----------------+----------------+----------------+----------------+
+ * | y | 24
+ * +----------------+----------------+----------------+----------------+
+ * | width | 28
+ * +----------------+----------------+----------------+----------------+
+ * | height | 32
+ * +----------------+----------------+----------------+----------------+
+ * | bpp | 40
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 44
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Pass all zeros to reset, otherwise command is treated as
+ * configuration set.
+ * Framebuffer's cookie defines which framebuffer/dbuf must be
+ * displayed while enabling display (applying configuration).
+ * x, y, width and height are bound by the connector's resolution and must not
+ * exceed it.
+ *
+ * x - uint32_t, starting position in pixels by X axis
+ * y - uint32_t, starting position in pixels by Y axis
+ * width - uint32_t, width in pixels
+ * height - uint32_t, height in pixels
+ * bpp - uint32_t, bits per pixel
+ */
+
+struct xendispl_set_config_req {
+ uint64_t fb_cookie;
+ uint32_t x;
+ uint32_t y;
+ uint32_t width;
+ uint32_t height;
+ uint32_t bpp;
+};
+
+/*
+ * Request page flip - request to flip a page identified by the framebuffer
+ * cookie:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _OP_PG_FLIP | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ */
+
+struct xendispl_page_flip_req {
+ uint64_t fb_cookie;
+};
+
+/*
+ *---------------------------------- Responses --------------------------------
+ *
+ * All response packets have the same length (64 octets)
+ *
+ * All response packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, private guest value, echoed from request
+ * status - int32_t, response status, zero on success and -XEN_EXX on failure
+ *
+ *----------------------------------- Events ----------------------------------
+ *
+ * Events are sent via a shared page allocated by the front and propagated by
+ * evt-event-channel/evt-ring-ref XenStore entries
+ * All event packets have the same length (64 octets)
+ * All event packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | type | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, event id, may be used by front
+ * type - uint8_t, type of the event
+ *
+ *
+ * Page flip complete event - event from back to front on page flip completed:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | _EVT_PG_FLIP | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie low 32-bit | 12
+ * +----------------+----------------+----------------+----------------+
+ * | fb_cookie high 32-bit | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 64
+ * +----------------+----------------+----------------+----------------+
+ */
+
+struct xendispl_pg_flip_evt {
+ uint64_t fb_cookie;
+};
+
+struct xendispl_req {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved[5];
+ union {
+ struct xendispl_dbuf_create_req dbuf_create;
+ struct xendispl_dbuf_destroy_req dbuf_destroy;
+ struct xendispl_fb_attach_req fb_attach;
+ struct xendispl_fb_detach_req fb_detach;
+ struct xendispl_set_config_req set_config;
+ struct xendispl_page_flip_req pg_flip;
+ uint8_t reserved[56];
+ } op;
+};
+
+struct xendispl_resp {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved;
+ int32_t status;
+ uint8_t reserved1[56];
+};
+
+struct xendispl_evt {
+ uint16_t id;
+ uint8_t type;
+ uint8_t reserved[5];
+ union {
+ struct xendispl_pg_flip_evt pg_flip;
+ uint8_t reserved[56];
+ } op;
+};
+
+DEFINE_RING_TYPES(xen_displif, struct xendispl_req, struct xendispl_resp);
+
+/*
+ ******************************************************************************
+ * Back to front events delivery
+ ******************************************************************************
+ * In order to deliver asynchronous events from back to front a shared page is
+ * allocated by front and its granted reference propagated to back via
+ * XenStore entries (evt-ring-ref/evt-event-channel).
+ * This page has a common header used by both front and back to synchronize
+ * access and control event's ring buffer, while back being a producer of the
+ * events and front being a consumer. The rest of the page after the header
+ * is used for event packets.
+ *
+ * Upon reception of an event(s) front may confirm its reception
+ * for either each event, group of events or none.
+ */
+
+struct xendispl_event_page {
+ uint32_t in_cons;
+ uint32_t in_prod;
+ uint8_t reserved[56];
+};
+
+#define XENDISPL_EVENT_PAGE_SIZE XEN_PAGE_SIZE
+#define XENDISPL_IN_RING_OFFS (sizeof(struct xendispl_event_page))
+#define XENDISPL_IN_RING_SIZE (XENDISPL_EVENT_PAGE_SIZE - XENDISPL_IN_RING_OFFS)
+#define XENDISPL_IN_RING_LEN (XENDISPL_IN_RING_SIZE / sizeof(struct xendispl_evt))
+#define XENDISPL_IN_RING(page) \
+ ((struct xendispl_evt *)((char *)(page) + XENDISPL_IN_RING_OFFS))
+#define XENDISPL_IN_RING_REF(page, idx) \
+ (XENDISPL_IN_RING((page))[(idx) % XENDISPL_IN_RING_LEN])
+
+#endif /* __XEN_PUBLIC_IO_DISPLIF_H__ */
diff --git a/include/xen/interface/io/kbdif.h b/include/xen/interface/io/kbdif.h
index 8066c7849fbe..2a9510ade701 100644
--- a/include/xen/interface/io/kbdif.h
+++ b/include/xen/interface/io/kbdif.h
@@ -26,43 +26,432 @@
#ifndef __XEN_PUBLIC_IO_KBDIF_H__
#define __XEN_PUBLIC_IO_KBDIF_H__
-/* In events (backend -> frontend) */
+/*
+ *****************************************************************************
+ * Feature and Parameter Negotiation
+ *****************************************************************************
+ *
+ * The two halves of a para-virtual driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ *****************************************************************************
+ * Backend XenBus Nodes
+ *****************************************************************************
+ *
+ *---------------------------- Features supported ----------------------------
+ *
+ * Capable backend advertises supported features by publishing
+ * corresponding entries in XenStore and puts 1 as the value of the entry.
+ * If a feature is not supported then 0 must be set or feature entry omitted.
+ *
+ * feature-abs-pointer
+ * Values: <uint>
+ *
+ * Backends, which support reporting of absolute coordinates for pointer
+ * device should set this to 1.
+ *
+ * feature-multi-touch
+ * Values: <uint>
+ *
+ * Backends, which support reporting of multi-touch events
+ * should set this to 1.
+ *
+ *------------------------- Pointer Device Parameters ------------------------
+ *
+ * width
+ * Values: <uint>
+ *
+ * Maximum X coordinate (width) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * height
+ * Values: <uint>
+ *
+ * Maximum Y coordinate (height) to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ *****************************************************************************
+ * Frontend XenBus Nodes
+ *****************************************************************************
+ *
+ *------------------------------ Feature request -----------------------------
+ *
+ * Capable frontend requests features from backend via setting corresponding
+ * entries to 1 in XenStore. Requests for features not advertised as supported
+ * by the backend have no effect.
+ *
+ * request-abs-pointer
+ * Values: <uint>
+ *
+ * Request backend to report absolute pointer coordinates
+ * (XENKBD_TYPE_POS) instead of relative ones (XENKBD_TYPE_MOTION).
+ *
+ * request-multi-touch
+ * Values: <uint>
+ *
+ * Request backend to report multi-touch events.
+ *
+ *----------------------- Request Transport Parameters -----------------------
+ *
+ * event-channel
+ * Values: <uint>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * page-gref
+ * Values: <uint>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized event ring buffer.
+ *
+ * page-ref
+ * Values: <uint>
+ *
+ * OBSOLETE, not recommended for use.
+ * PFN of the shared page.
+ *
+ *----------------------- Multi-touch Device Parameters -----------------------
+ *
+ * multi-touch-num-contacts
+ * Values: <uint>
+ *
+ * Number of simultaneous touches reported.
+ *
+ * multi-touch-width
+ * Values: <uint>
+ *
+ * Width of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ *
+ * multi-touch-height
+ * Values: <uint>
+ *
+ * Height of the touch area to be used by the frontend
+ * while reporting input events, pixels, [0; UINT32_MAX].
+ */
/*
- * Frontends should ignore unknown in events.
+ * EVENT CODES.
*/
-/* Pointer movement event */
-#define XENKBD_TYPE_MOTION 1
-/* Event type 2 currently not used */
-/* Key event (includes pointer buttons) */
-#define XENKBD_TYPE_KEY 3
+#define XENKBD_TYPE_MOTION 1
+#define XENKBD_TYPE_RESERVED 2
+#define XENKBD_TYPE_KEY 3
+#define XENKBD_TYPE_POS 4
+#define XENKBD_TYPE_MTOUCH 5
+
+/* Multi-touch event sub-codes */
+
+#define XENKBD_MT_EV_DOWN 0
+#define XENKBD_MT_EV_UP 1
+#define XENKBD_MT_EV_MOTION 2
+#define XENKBD_MT_EV_SYN 3
+#define XENKBD_MT_EV_SHAPE 4
+#define XENKBD_MT_EV_ORIENT 5
+
+/*
+ * CONSTANTS, XENSTORE FIELD AND PATH NAME STRINGS, HELPERS.
+ */
+
+#define XENKBD_DRIVER_NAME "vkbd"
+
+#define XENKBD_FIELD_FEAT_ABS_POINTER "feature-abs-pointer"
+#define XENKBD_FIELD_FEAT_MTOUCH "feature-multi-touch"
+#define XENKBD_FIELD_REQ_ABS_POINTER "request-abs-pointer"
+#define XENKBD_FIELD_REQ_MTOUCH "request-multi-touch"
+#define XENKBD_FIELD_RING_GREF "page-gref"
+#define XENKBD_FIELD_EVT_CHANNEL "event-channel"
+#define XENKBD_FIELD_WIDTH "width"
+#define XENKBD_FIELD_HEIGHT "height"
+#define XENKBD_FIELD_MT_WIDTH "multi-touch-width"
+#define XENKBD_FIELD_MT_HEIGHT "multi-touch-height"
+#define XENKBD_FIELD_MT_NUM_CONTACTS "multi-touch-num-contacts"
+
+/* OBSOLETE, not recommended for use */
+#define XENKBD_FIELD_RING_REF "page-ref"
+
/*
- * Pointer position event
- * Capable backend sets feature-abs-pointer in xenstore.
- * Frontend requests ot instead of XENKBD_TYPE_MOTION by setting
- * request-abs-update in xenstore.
+ *****************************************************************************
+ * Description of the protocol between frontend and backend driver.
+ *****************************************************************************
+ *
+ * The two halves of a Para-virtual driver communicate with
+ * each other using a shared page and an event channel.
+ * Shared page contains a ring with event structures.
+ *
+ * All reserved fields in the structures below must be 0.
+ *
+ *****************************************************************************
+ * Backend to frontend events
+ *****************************************************************************
+ *
+ * Frontends should ignore unknown in events.
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ *
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code, XENKBD_TYPE_???
+ *
+ *
+ * Pointer relative movement event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MOTION | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | rel_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | rel_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * rel_x - int32_t, relative X motion
+ * rel_y - int32_t, relative Y motion
+ * rel_z - int32_t, relative Z motion (wheel)
*/
-#define XENKBD_TYPE_POS 4
struct xenkbd_motion {
- uint8_t type; /* XENKBD_TYPE_MOTION */
- int32_t rel_x; /* relative X motion */
- int32_t rel_y; /* relative Y motion */
- int32_t rel_z; /* relative Z motion (wheel) */
+ uint8_t type;
+ int32_t rel_x;
+ int32_t rel_y;
+ int32_t rel_z;
};
+/*
+ * Key event (includes pointer buttons)
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_KEY | pressed | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | keycode | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * pressed - uint8_t, 1 if pressed; 0 otherwise
+ * keycode - uint32_t, KEY_* from linux/input.h
+ */
+
struct xenkbd_key {
- uint8_t type; /* XENKBD_TYPE_KEY */
- uint8_t pressed; /* 1 if pressed; 0 otherwise */
- uint32_t keycode; /* KEY_* from linux/input.h */
+ uint8_t type;
+ uint8_t pressed;
+ uint32_t keycode;
};
+/*
+ * Pointer absolute position event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_POS | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 12
+ * +----------------+----------------+----------------+----------------+
+ * | rel_z | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position (in FB pixels)
+ * abs_y - int32_t, absolute Y position (in FB pixels)
+ * rel_z - int32_t, relative Z motion (wheel)
+ */
+
struct xenkbd_position {
- uint8_t type; /* XENKBD_TYPE_POS */
- int32_t abs_x; /* absolute X position (in FB pixels) */
- int32_t abs_y; /* absolute Y position (in FB pixels) */
- int32_t rel_z; /* relative Z motion (wheel) */
+ uint8_t type;
+ int32_t abs_x;
+ int32_t abs_y;
+ int32_t rel_z;
+};
+
+/*
+ * Multi-touch event and its sub-types
+ *
+ * All multi-touch event packets have common header:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | event_type | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * event_type - unt8_t, multi-touch event sub-type, XENKBD_MT_EV_???
+ * contact_id - unt8_t, ID of the contact
+ *
+ * Touch interactions can consist of one or more contacts.
+ * For each contact, a series of events is generated, starting
+ * with a down event, followed by zero or more motion events,
+ * and ending with an up event. Events relating to the same
+ * contact point can be identified by the ID of the sequence: contact ID.
+ * Contact ID may be reused after XENKBD_MT_EV_UP event and
+ * is in the [0; XENKBD_FIELD_NUM_CONTACTS - 1] range.
+ *
+ * For further information please refer to documentation on Wayland [1],
+ * Linux [2] and Windows [3] multi-touch support.
+ *
+ * [1] https://cgit.freedesktop.org/wayland/wayland/tree/protocol/wayland.xml
+ * [2] https://www.kernel.org/doc/Documentation/input/multi-touch-protocol.txt
+ * [3] https://msdn.microsoft.com/en-us/library/jj151564(v=vs.85).aspx
+ *
+ *
+ * Multi-touch down event - sent when a new touch is made: touch is assigned
+ * a unique contact ID, sent with this and consequent events related
+ * to this touch.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_DOWN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels
+ * abs_y - int32_t, absolute Y position, in pixels
+ *
+ * Multi-touch contact release event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_UP | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch motion event
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_MOTION | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | abs_x | 12
+ * +----------------+----------------+----------------+----------------+
+ * | abs_y | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * abs_x - int32_t, absolute X position, in pixels,
+ * abs_y - int32_t, absolute Y position, in pixels,
+ *
+ * Multi-touch input synchronization event - shows end of a set of events
+ * which logically belong together.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SYN | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Multi-touch shape event - touch point's shape has changed its shape.
+ * Shape is approximated by an ellipse through the major and minor axis
+ * lengths: major is the longer diameter of the ellipse and minor is the
+ * shorter one. Center of the ellipse is reported via
+ * XENKBD_MT_EV_DOWN/XENKBD_MT_EV_MOTION events.
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_SHAPE | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | major | 12
+ * +----------------+----------------+----------------+----------------+
+ * | minor | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * major - unt32_t, length of the major axis, pixels
+ * minor - unt32_t, length of the minor axis, pixels
+ *
+ * Multi-touch orientation event - touch point's shape has changed
+ * its orientation: calculated as a clockwise angle between the major axis
+ * of the ellipse and positive Y axis in degrees, [-180; +180].
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | _TYPE_MTOUCH | _MT_EV_ORIENT | contact_id | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | orientation | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 40
+ * +----------------+----------------+----------------+----------------+
+ *
+ * orientation - int16_t, clockwise angle of the major axis
+ */
+
+struct xenkbd_mtouch {
+ uint8_t type; /* XENKBD_TYPE_MTOUCH */
+ uint8_t event_type; /* XENKBD_MT_EV_??? */
+ uint8_t contact_id;
+ uint8_t reserved[5]; /* reserved for the future use */
+ union {
+ struct {
+ int32_t abs_x; /* absolute X position, pixels */
+ int32_t abs_y; /* absolute Y position, pixels */
+ } pos;
+ struct {
+ uint32_t major; /* length of the major axis, pixels */
+ uint32_t minor; /* length of the minor axis, pixels */
+ } shape;
+ int16_t orientation; /* clockwise angle of the major axis */
+ } u;
};
#define XENKBD_IN_EVENT_SIZE 40
@@ -72,15 +461,26 @@ union xenkbd_in_event {
struct xenkbd_motion motion;
struct xenkbd_key key;
struct xenkbd_position pos;
+ struct xenkbd_mtouch mtouch;
char pad[XENKBD_IN_EVENT_SIZE];
};
-/* Out events (frontend -> backend) */
-
/*
+ *****************************************************************************
+ * Frontend to backend events
+ *****************************************************************************
+ *
* Out events may be sent only when requested by backend, and receipt
* of an unknown out event is an error.
* No out events currently defined.
+
+ * All event packets have the same length (40 octets)
+ * All event packets have common header:
+ * 0 octet
+ * +-----------------+
+ * | type |
+ * +-----------------+
+ * type - uint8_t, event code
*/
#define XENKBD_OUT_EVENT_SIZE 40
@@ -90,7 +490,11 @@ union xenkbd_out_event {
char pad[XENKBD_OUT_EVENT_SIZE];
};
-/* shared page */
+/*
+ *****************************************************************************
+ * Shared page
+ *****************************************************************************
+ */
#define XENKBD_IN_RING_SIZE 2048
#define XENKBD_IN_RING_LEN (XENKBD_IN_RING_SIZE / XENKBD_IN_EVENT_SIZE)
@@ -113,4 +517,4 @@ struct xenkbd_page {
uint32_t out_cons, out_prod;
};
-#endif
+#endif /* __XEN_PUBLIC_IO_KBDIF_H__ */
diff --git a/include/xen/interface/io/ring.h b/include/xen/interface/io/ring.h
index 21f4fbd55e48..c79456855539 100644
--- a/include/xen/interface/io/ring.h
+++ b/include/xen/interface/io/ring.h
@@ -283,4 +283,147 @@ struct __name##_back_ring { \
(_work_to_do) = RING_HAS_UNCONSUMED_RESPONSES(_r); \
} while (0)
+
+/*
+ * DEFINE_XEN_FLEX_RING_AND_INTF defines two monodirectional rings and
+ * functions to check if there is data on the ring, and to read and
+ * write to them.
+ *
+ * DEFINE_XEN_FLEX_RING is similar to DEFINE_XEN_FLEX_RING_AND_INTF, but
+ * does not define the indexes page. As different protocols can have
+ * extensions to the basic format, this macro allow them to define their
+ * own struct.
+ *
+ * XEN_FLEX_RING_SIZE
+ * Convenience macro to calculate the size of one of the two rings
+ * from the overall order.
+ *
+ * $NAME_mask
+ * Function to apply the size mask to an index, to reduce the index
+ * within the range [0-size].
+ *
+ * $NAME_read_packet
+ * Function to read data from the ring. The amount of data to read is
+ * specified by the "size" argument.
+ *
+ * $NAME_write_packet
+ * Function to write data to the ring. The amount of data to write is
+ * specified by the "size" argument.
+ *
+ * $NAME_get_ring_ptr
+ * Convenience function that returns a pointer to read/write to the
+ * ring at the right location.
+ *
+ * $NAME_data_intf
+ * Indexes page, shared between frontend and backend. It also
+ * contains the array of grant refs.
+ *
+ * $NAME_queued
+ * Function to calculate how many bytes are currently on the ring,
+ * ready to be read. It can also be used to calculate how much free
+ * space is currently on the ring (XEN_FLEX_RING_SIZE() -
+ * $NAME_queued()).
+ */
+
+#ifndef XEN_PAGE_SHIFT
+/* The PAGE_SIZE for ring protocols and hypercall interfaces is always
+ * 4K, regardless of the architecture, and page granularity chosen by
+ * operating systems.
+ */
+#define XEN_PAGE_SHIFT 12
+#endif
+#define XEN_FLEX_RING_SIZE(order) \
+ (1UL << ((order) + XEN_PAGE_SHIFT - 1))
+
+#define DEFINE_XEN_FLEX_RING(name) \
+static inline RING_IDX name##_mask(RING_IDX idx, RING_IDX ring_size) \
+{ \
+ return idx & (ring_size - 1); \
+} \
+ \
+static inline unsigned char *name##_get_ring_ptr(unsigned char *buf, \
+ RING_IDX idx, \
+ RING_IDX ring_size) \
+{ \
+ return buf + name##_mask(idx, ring_size); \
+} \
+ \
+static inline void name##_read_packet(void *opaque, \
+ const unsigned char *buf, \
+ size_t size, \
+ RING_IDX masked_prod, \
+ RING_IDX *masked_cons, \
+ RING_IDX ring_size) \
+{ \
+ if (*masked_cons < masked_prod || \
+ size <= ring_size - *masked_cons) { \
+ memcpy(opaque, buf + *masked_cons, size); \
+ } else { \
+ memcpy(opaque, buf + *masked_cons, ring_size - *masked_cons); \
+ memcpy((unsigned char *)opaque + ring_size - *masked_cons, buf, \
+ size - (ring_size - *masked_cons)); \
+ } \
+ *masked_cons = name##_mask(*masked_cons + size, ring_size); \
+} \
+ \
+static inline void name##_write_packet(unsigned char *buf, \
+ const void *opaque, \
+ size_t size, \
+ RING_IDX *masked_prod, \
+ RING_IDX masked_cons, \
+ RING_IDX ring_size) \
+{ \
+ if (*masked_prod < masked_cons || \
+ size <= ring_size - *masked_prod) { \
+ memcpy(buf + *masked_prod, opaque, size); \
+ } else { \
+ memcpy(buf + *masked_prod, opaque, ring_size - *masked_prod); \
+ memcpy(buf, (unsigned char *)opaque + (ring_size - *masked_prod), \
+ size - (ring_size - *masked_prod)); \
+ } \
+ *masked_prod = name##_mask(*masked_prod + size, ring_size); \
+} \
+ \
+static inline RING_IDX name##_queued(RING_IDX prod, \
+ RING_IDX cons, \
+ RING_IDX ring_size) \
+{ \
+ RING_IDX size; \
+ \
+ if (prod == cons) \
+ return 0; \
+ \
+ prod = name##_mask(prod, ring_size); \
+ cons = name##_mask(cons, ring_size); \
+ \
+ if (prod == cons) \
+ return ring_size; \
+ \
+ if (prod > cons) \
+ size = prod - cons; \
+ else \
+ size = ring_size - (cons - prod); \
+ return size; \
+} \
+ \
+struct name##_data { \
+ unsigned char *in; /* half of the allocation */ \
+ unsigned char *out; /* half of the allocation */ \
+}
+
+#define DEFINE_XEN_FLEX_RING_AND_INTF(name) \
+struct name##_data_intf { \
+ RING_IDX in_cons, in_prod; \
+ \
+ uint8_t pad1[56]; \
+ \
+ RING_IDX out_cons, out_prod; \
+ \
+ uint8_t pad2[56]; \
+ \
+ RING_IDX ring_order; \
+ grant_ref_t ref[]; \
+}; \
+DEFINE_XEN_FLEX_RING(name)
+
#endif /* __XEN_PUBLIC_IO_RING_H__ */
diff --git a/include/xen/interface/io/sndif.h b/include/xen/interface/io/sndif.h
new file mode 100644
index 000000000000..5c918276835e
--- /dev/null
+++ b/include/xen/interface/io/sndif.h
@@ -0,0 +1,793 @@
+/******************************************************************************
+ * sndif.h
+ *
+ * Unified sound-device I/O interface for Xen guest OSes.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a copy
+ * of this software and associated documentation files (the "Software"), to
+ * deal in the Software without restriction, including without limitation the
+ * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
+ * sell copies of the Software, and to permit persons to whom the Software is
+ * furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+ * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+ * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
+ * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
+ * DEALINGS IN THE SOFTWARE.
+ *
+ * Copyright (C) 2013-2015 GlobalLogic Inc.
+ * Copyright (C) 2016-2017 EPAM Systems Inc.
+ *
+ * Authors: Oleksandr Andrushchenko <oleksandr_andrushchenko@epam.com>
+ * Oleksandr Grytsov <oleksandr_grytsov@epam.com>
+ * Oleksandr Dmytryshyn <oleksandr.dmytryshyn@globallogic.com>
+ * Iurii Konovalenko <iurii.konovalenko@globallogic.com>
+ */
+
+#ifndef __XEN_PUBLIC_IO_SNDIF_H__
+#define __XEN_PUBLIC_IO_SNDIF_H__
+
+#include "ring.h"
+#include "../grant_table.h"
+
+/*
+ ******************************************************************************
+ * Feature and Parameter Negotiation
+ ******************************************************************************
+ *
+ * Front->back notifications: when enqueuing a new request, sending a
+ * notification can be made conditional on xensnd_req (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Backends must set
+ * xensnd_req appropriately (e.g., using RING_FINAL_CHECK_FOR_REQUESTS()).
+ *
+ * Back->front notifications: when enqueuing a new response, sending a
+ * notification can be made conditional on xensnd_resp (i.e., the generic
+ * hold-off mechanism provided by the ring macros). Frontends must set
+ * xensnd_resp appropriately (e.g., using RING_FINAL_CHECK_FOR_RESPONSES()).
+ *
+ * The two halves of a para-virtual sound card driver utilize nodes within
+ * XenStore to communicate capabilities and to negotiate operating parameters.
+ * This section enumerates these nodes which reside in the respective front and
+ * backend portions of XenStore, following the XenBus convention.
+ *
+ * All data in XenStore is stored as strings. Nodes specifying numeric
+ * values are encoded in decimal. Integer value ranges listed below are
+ * expressed as fixed sized integer types capable of storing the conversion
+ * of a properly formated node string, without loss of information.
+ *
+ ******************************************************************************
+ * Example configuration
+ ******************************************************************************
+ *
+ * Note: depending on the use-case backend can expose more sound cards and
+ * PCM devices/streams than the underlying HW physically has by employing
+ * SW mixers, configuring virtual sound streams, channels etc.
+ *
+ * This is an example of backend and frontend configuration:
+ *
+ *--------------------------------- Backend -----------------------------------
+ *
+ * /local/domain/0/backend/vsnd/1/0/frontend-id = "1"
+ * /local/domain/0/backend/vsnd/1/0/frontend = "/local/domain/1/device/vsnd/0"
+ * /local/domain/0/backend/vsnd/1/0/state = "4"
+ * /local/domain/0/backend/vsnd/1/0/versions = "1,2"
+ *
+ *--------------------------------- Frontend ----------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/backend-id = "0"
+ * /local/domain/1/device/vsnd/0/backend = "/local/domain/0/backend/vsnd/1/0"
+ * /local/domain/1/device/vsnd/0/state = "4"
+ * /local/domain/1/device/vsnd/0/version = "1"
+ *
+ *----------------------------- Card configuration ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/short-name = "Card short name"
+ * /local/domain/1/device/vsnd/0/long-name = "Card long name"
+ * /local/domain/1/device/vsnd/0/sample-rates = "8000,32000,44100,48000,96000"
+ * /local/domain/1/device/vsnd/0/sample-formats = "s8,u8,s16_le,s16_be"
+ * /local/domain/1/device/vsnd/0/buffer-size = "262144"
+ *
+ *------------------------------- PCM device 0 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/name = "General analog"
+ * /local/domain/1/device/vsnd/0/0/channels-max = "5"
+ *
+ *----------------------------- Stream 0, playback ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/0/type = "p"
+ * /local/domain/1/device/vsnd/0/0/0/sample-formats = "s8,u8"
+ * /local/domain/1/device/vsnd/0/0/0/unique-id = "0"
+ *
+ * /local/domain/1/device/vsnd/0/0/0/ring-ref = "386"
+ * /local/domain/1/device/vsnd/0/0/0/event-channel = "15"
+ *
+ *------------------------------ Stream 1, capture ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/0/1/type = "c"
+ * /local/domain/1/device/vsnd/0/0/1/channels-max = "2"
+ * /local/domain/1/device/vsnd/0/0/1/unique-id = "1"
+ *
+ * /local/domain/1/device/vsnd/0/0/1/ring-ref = "384"
+ * /local/domain/1/device/vsnd/0/0/1/event-channel = "13"
+ *
+ *------------------------------- PCM device 1 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/1/name = "HDMI-0"
+ * /local/domain/1/device/vsnd/0/1/sample-rates = "8000,32000,44100"
+ *
+ *------------------------------ Stream 0, capture ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/1/0/type = "c"
+ * /local/domain/1/device/vsnd/0/1/0/unique-id = "2"
+ *
+ * /local/domain/1/device/vsnd/0/1/0/ring-ref = "387"
+ * /local/domain/1/device/vsnd/0/1/0/event-channel = "151"
+ *
+ *------------------------------- PCM device 2 --------------------------------
+ *
+ * /local/domain/1/device/vsnd/0/2/name = "SPDIF"
+ *
+ *----------------------------- Stream 0, playback ----------------------------
+ *
+ * /local/domain/1/device/vsnd/0/2/0/type = "p"
+ * /local/domain/1/device/vsnd/0/2/0/unique-id = "3"
+ *
+ * /local/domain/1/device/vsnd/0/2/0/ring-ref = "389"
+ * /local/domain/1/device/vsnd/0/2/0/event-channel = "152"
+ *
+ ******************************************************************************
+ * Backend XenBus Nodes
+ ******************************************************************************
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * versions
+ * Values: <string>
+ *
+ * List of XENSND_LIST_SEPARATOR separated protocol versions supported
+ * by the backend. For example "1,2,3".
+ *
+ ******************************************************************************
+ * Frontend XenBus Nodes
+ ******************************************************************************
+ *
+ *-------------------------------- Addressing ---------------------------------
+ *
+ * dom-id
+ * Values: <uint16_t>
+ *
+ * Domain identifier.
+ *
+ * dev-id
+ * Values: <uint16_t>
+ *
+ * Device identifier.
+ *
+ * pcm-dev-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the PCM device.
+ *
+ * stream-idx
+ * Values: <uint8_t>
+ *
+ * Zero based contigous index of the stream of the PCM device.
+ *
+ * The following pattern is used for addressing:
+ * /local/domain/<dom-id>/device/vsnd/<dev-id>/<pcm-dev-idx>/<stream-idx>/...
+ *
+ *----------------------------- Protocol version ------------------------------
+ *
+ * version
+ * Values: <string>
+ *
+ * Protocol version, chosen among the ones supported by the backend.
+ *
+ *------------------------------- PCM settings --------------------------------
+ *
+ * Every virtualized sound frontend has a set of PCM devices and streams, each
+ * could be individually configured. Part of the PCM configuration can be
+ * defined at higher level of the hierarchy and be fully or partially re-used
+ * by the underlying layers. These configuration values are:
+ * o number of channels (min/max)
+ * o supported sample rates
+ * o supported sample formats.
+ * E.g. one can define these values for the whole card, device or stream.
+ * Every underlying layer in turn can re-define some or all of them to better
+ * fit its needs. For example, card may define number of channels to be
+ * in [1; 8] range, and some particular stream may be limited to [1; 2] only.
+ * The rule is that the underlying layer must be a subset of the upper layer
+ * range.
+ *
+ * channels-min
+ * Values: <uint8_t>
+ *
+ * The minimum amount of channels that is supported, [1; channels-max].
+ * Optional, if not set or omitted a value of 1 is used.
+ *
+ * channels-max
+ * Values: <uint8_t>
+ *
+ * The maximum amount of channels that is supported.
+ * Must be at least <channels-min>.
+ *
+ * sample-rates
+ * Values: <list of uint32_t>
+ *
+ * List of supported sample rates separated by XENSND_LIST_SEPARATOR.
+ * Sample rates are expressed as a list of decimal values w/o any
+ * ordering requirement.
+ *
+ * sample-formats
+ * Values: <list of XENSND_PCM_FORMAT_XXX_STR>
+ *
+ * List of supported sample formats separated by XENSND_LIST_SEPARATOR.
+ * Items must not exceed XENSND_SAMPLE_FORMAT_MAX_LEN length.
+ *
+ * buffer-size
+ * Values: <uint32_t>
+ *
+ * The maximum size in octets of the buffer to allocate per stream.
+ *
+ *----------------------- Virtual sound card settings -------------------------
+ * short-name
+ * Values: <char[32]>
+ *
+ * Short name of the virtual sound card. Optional.
+ *
+ * long-name
+ * Values: <char[80]>
+ *
+ * Long name of the virtual sound card. Optional.
+ *
+ *----------------------------- Device settings -------------------------------
+ * name
+ * Values: <char[80]>
+ *
+ * Name of the sound device within the virtual sound card. Optional.
+ *
+ *----------------------------- Stream settings -------------------------------
+ *
+ * type
+ * Values: "p", "c"
+ *
+ * Stream type: "p" - playback stream, "c" - capture stream
+ *
+ * If both capture and playback are needed then two streams need to be
+ * defined under the same device.
+ *
+ * unique-id
+ * Values: <uint32_t>
+ *
+ * After stream initialization it is assigned a unique ID (within the front
+ * driver), so every stream of the frontend can be identified by the
+ * backend by this ID. This is not equal to stream-idx as the later is
+ * zero based within the device, but this index is contigous within the
+ * driver.
+ *
+ *-------------------- Stream Request Transport Parameters --------------------
+ *
+ * event-channel
+ * Values: <uint32_t>
+ *
+ * The identifier of the Xen event channel used to signal activity
+ * in the ring buffer.
+ *
+ * ring-ref
+ * Values: <uint32_t>
+ *
+ * The Xen grant reference granting permission for the backend to map
+ * a sole page in a single page sized ring buffer.
+ *
+ ******************************************************************************
+ * STATE DIAGRAMS
+ ******************************************************************************
+ *
+ * Tool stack creates front and back state nodes with initial state
+ * XenbusStateInitialising.
+ * Tool stack creates and sets up frontend sound configuration nodes per domain.
+ *
+ * Front Back
+ * ================================= =====================================
+ * XenbusStateInitialising XenbusStateInitialising
+ * o Query backend device identification
+ * data.
+ * o Open and validate backend device.
+ * |
+ * |
+ * V
+ * XenbusStateInitWait
+ *
+ * o Query frontend configuration
+ * o Allocate and initialize
+ * event channels per configured
+ * playback/capture stream.
+ * o Publish transport parameters
+ * that will be in effect during
+ * this connection.
+ * |
+ * |
+ * V
+ * XenbusStateInitialised
+ *
+ * o Query frontend transport parameters.
+ * o Connect to the event channels.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * o Create and initialize OS
+ * virtual sound device instances
+ * as per configuration.
+ * |
+ * |
+ * V
+ * XenbusStateConnected
+ *
+ * XenbusStateUnknown
+ * XenbusStateClosed
+ * XenbusStateClosing
+ * o Remove virtual sound device
+ * o Remove event channels
+ * |
+ * |
+ * V
+ * XenbusStateClosed
+ *
+ *------------------------------- Recovery flow -------------------------------
+ *
+ * In case of frontend unrecoverable errors backend handles that as
+ * if frontend goes into the XenbusStateClosed state.
+ *
+ * In case of backend unrecoverable errors frontend tries removing
+ * the virtualized device. If this is possible at the moment of error,
+ * then frontend goes into the XenbusStateInitialising state and is ready for
+ * new connection with backend. If the virtualized device is still in use and
+ * cannot be removed, then frontend goes into the XenbusStateReconfiguring state
+ * until either the virtualized device removed or backend initiates a new
+ * connection. On the virtualized device removal frontend goes into the
+ * XenbusStateInitialising state.
+ *
+ * Note on XenbusStateReconfiguring state of the frontend: if backend has
+ * unrecoverable errors then frontend cannot send requests to the backend
+ * and thus cannot provide functionality of the virtualized device anymore.
+ * After backend is back to normal the virtualized device may still hold some
+ * state: configuration in use, allocated buffers, client application state etc.
+ * So, in most cases, this will require frontend to implement complex recovery
+ * reconnect logic. Instead, by going into XenbusStateReconfiguring state,
+ * frontend will make sure no new clients of the virtualized device are
+ * accepted, allow existing client(s) to exit gracefully by signaling error
+ * state etc.
+ * Once all the clients are gone frontend can reinitialize the virtualized
+ * device and get into XenbusStateInitialising state again signaling the
+ * backend that a new connection can be made.
+ *
+ * There are multiple conditions possible under which frontend will go from
+ * XenbusStateReconfiguring into XenbusStateInitialising, some of them are OS
+ * specific. For example:
+ * 1. The underlying OS framework may provide callbacks to signal that the last
+ * client of the virtualized device has gone and the device can be removed
+ * 2. Frontend can schedule a deferred work (timer/tasklet/workqueue)
+ * to periodically check if this is the right time to re-try removal of
+ * the virtualized device.
+ * 3. By any other means.
+ *
+ ******************************************************************************
+ * PCM FORMATS
+ ******************************************************************************
+ *
+ * XENSND_PCM_FORMAT_<format>[_<endian>]
+ *
+ * format: <S/U/F><bits> or <name>
+ * S - signed, U - unsigned, F - float
+ * bits - 8, 16, 24, 32
+ * name - MU_LAW, GSM, etc.
+ *
+ * endian: <LE/BE>, may be absent
+ * LE - Little endian, BE - Big endian
+ */
+#define XENSND_PCM_FORMAT_S8 0
+#define XENSND_PCM_FORMAT_U8 1
+#define XENSND_PCM_FORMAT_S16_LE 2
+#define XENSND_PCM_FORMAT_S16_BE 3
+#define XENSND_PCM_FORMAT_U16_LE 4
+#define XENSND_PCM_FORMAT_U16_BE 5
+#define XENSND_PCM_FORMAT_S24_LE 6
+#define XENSND_PCM_FORMAT_S24_BE 7
+#define XENSND_PCM_FORMAT_U24_LE 8
+#define XENSND_PCM_FORMAT_U24_BE 9
+#define XENSND_PCM_FORMAT_S32_LE 10
+#define XENSND_PCM_FORMAT_S32_BE 11
+#define XENSND_PCM_FORMAT_U32_LE 12
+#define XENSND_PCM_FORMAT_U32_BE 13
+#define XENSND_PCM_FORMAT_F32_LE 14 /* 4-byte float, IEEE-754 32-bit, */
+#define XENSND_PCM_FORMAT_F32_BE 15 /* range -1.0 to 1.0 */
+#define XENSND_PCM_FORMAT_F64_LE 16 /* 8-byte float, IEEE-754 64-bit, */
+#define XENSND_PCM_FORMAT_F64_BE 17 /* range -1.0 to 1.0 */
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE 18
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE 19
+#define XENSND_PCM_FORMAT_MU_LAW 20
+#define XENSND_PCM_FORMAT_A_LAW 21
+#define XENSND_PCM_FORMAT_IMA_ADPCM 22
+#define XENSND_PCM_FORMAT_MPEG 23
+#define XENSND_PCM_FORMAT_GSM 24
+
+/*
+ ******************************************************************************
+ * REQUEST CODES
+ ******************************************************************************
+ */
+#define XENSND_OP_OPEN 0
+#define XENSND_OP_CLOSE 1
+#define XENSND_OP_READ 2
+#define XENSND_OP_WRITE 3
+#define XENSND_OP_SET_VOLUME 4
+#define XENSND_OP_GET_VOLUME 5
+#define XENSND_OP_MUTE 6
+#define XENSND_OP_UNMUTE 7
+
+/*
+ ******************************************************************************
+ * XENSTORE FIELD AND PATH NAME STRINGS, HELPERS
+ ******************************************************************************
+ */
+#define XENSND_DRIVER_NAME "vsnd"
+
+#define XENSND_LIST_SEPARATOR ","
+/* Field names */
+#define XENSND_FIELD_BE_VERSIONS "versions"
+#define XENSND_FIELD_FE_VERSION "version"
+#define XENSND_FIELD_VCARD_SHORT_NAME "short-name"
+#define XENSND_FIELD_VCARD_LONG_NAME "long-name"
+#define XENSND_FIELD_RING_REF "ring-ref"
+#define XENSND_FIELD_EVT_CHNL "event-channel"
+#define XENSND_FIELD_DEVICE_NAME "name"
+#define XENSND_FIELD_TYPE "type"
+#define XENSND_FIELD_STREAM_UNIQUE_ID "unique-id"
+#define XENSND_FIELD_CHANNELS_MIN "channels-min"
+#define XENSND_FIELD_CHANNELS_MAX "channels-max"
+#define XENSND_FIELD_SAMPLE_RATES "sample-rates"
+#define XENSND_FIELD_SAMPLE_FORMATS "sample-formats"
+#define XENSND_FIELD_BUFFER_SIZE "buffer-size"
+
+/* Stream type field values. */
+#define XENSND_STREAM_TYPE_PLAYBACK "p"
+#define XENSND_STREAM_TYPE_CAPTURE "c"
+/* Sample rate max string length */
+#define XENSND_SAMPLE_RATE_MAX_LEN 11
+/* Sample format field values */
+#define XENSND_SAMPLE_FORMAT_MAX_LEN 24
+
+#define XENSND_PCM_FORMAT_S8_STR "s8"
+#define XENSND_PCM_FORMAT_U8_STR "u8"
+#define XENSND_PCM_FORMAT_S16_LE_STR "s16_le"
+#define XENSND_PCM_FORMAT_S16_BE_STR "s16_be"
+#define XENSND_PCM_FORMAT_U16_LE_STR "u16_le"
+#define XENSND_PCM_FORMAT_U16_BE_STR "u16_be"
+#define XENSND_PCM_FORMAT_S24_LE_STR "s24_le"
+#define XENSND_PCM_FORMAT_S24_BE_STR "s24_be"
+#define XENSND_PCM_FORMAT_U24_LE_STR "u24_le"
+#define XENSND_PCM_FORMAT_U24_BE_STR "u24_be"
+#define XENSND_PCM_FORMAT_S32_LE_STR "s32_le"
+#define XENSND_PCM_FORMAT_S32_BE_STR "s32_be"
+#define XENSND_PCM_FORMAT_U32_LE_STR "u32_le"
+#define XENSND_PCM_FORMAT_U32_BE_STR "u32_be"
+#define XENSND_PCM_FORMAT_F32_LE_STR "float_le"
+#define XENSND_PCM_FORMAT_F32_BE_STR "float_be"
+#define XENSND_PCM_FORMAT_F64_LE_STR "float64_le"
+#define XENSND_PCM_FORMAT_F64_BE_STR "float64_be"
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_LE_STR "iec958_subframe_le"
+#define XENSND_PCM_FORMAT_IEC958_SUBFRAME_BE_STR "iec958_subframe_be"
+#define XENSND_PCM_FORMAT_MU_LAW_STR "mu_law"
+#define XENSND_PCM_FORMAT_A_LAW_STR "a_law"
+#define XENSND_PCM_FORMAT_IMA_ADPCM_STR "ima_adpcm"
+#define XENSND_PCM_FORMAT_MPEG_STR "mpeg"
+#define XENSND_PCM_FORMAT_GSM_STR "gsm"
+
+
+/*
+ ******************************************************************************
+ * STATUS RETURN CODES
+ ******************************************************************************
+ *
+ * Status return code is zero on success and -XEN_EXX on failure.
+ *
+ ******************************************************************************
+ * Assumptions
+ ******************************************************************************
+ * o usage of grant reference 0 as invalid grant reference:
+ * grant reference 0 is valid, but never exposed to a PV driver,
+ * because of the fact it is already in use/reserved by the PV console.
+ * o all references in this document to page sizes must be treated
+ * as pages of size XEN_PAGE_SIZE unless otherwise noted.
+ *
+ ******************************************************************************
+ * Description of the protocol between frontend and backend driver
+ ******************************************************************************
+ *
+ * The two halves of a Para-virtual sound driver communicate with
+ * each other using shared pages and event channels.
+ * Shared page contains a ring with request/response packets.
+ *
+ * Packets, used for input/output operations, e.g. read/write, set/get volume,
+ * etc., provide offset/length fields in order to allow asynchronous protocol
+ * operation with buffer space sharing: part of the buffer allocated at
+ * XENSND_OP_OPEN can be used for audio samples and part, for example,
+ * for volume control.
+ *
+ * All reserved fields in the structures below must be 0.
+ *
+ *---------------------------------- Requests ---------------------------------
+ *
+ * All request packets have the same length (32 octets)
+ * All request packets have common header:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * id - uint16_t, private guest value, echoed in response
+ * operation - uint8_t, operation code, XENSND_OP_???
+ *
+ * For all packets which use offset and length:
+ * offset - uint32_t, read or write data offset within the shared buffer,
+ * passed with XENSND_OP_OPEN request, octets,
+ * [0; XENSND_OP_OPEN.buffer_sz - 1].
+ * length - uint32_t, read or write data length, octets
+ *
+ * Request open - open a PCM stream for playback or capture:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | XENSND_OP_OPEN | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | pcm_rate | 12
+ * +----------------+----------------+----------------+----------------+
+ * | pcm_format | pcm_channels | reserved | 16
+ * +----------------+----------------+----------------+----------------+
+ * | buffer_sz | 20
+ * +----------------+----------------+----------------+----------------+
+ * | gref_directory | 24
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 28
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * pcm_rate - uint32_t, stream data rate, Hz
+ * pcm_format - uint8_t, XENSND_PCM_FORMAT_XXX value
+ * pcm_channels - uint8_t, number of channels of this stream,
+ * [channels-min; channels-max]
+ * buffer_sz - uint32_t, buffer size to be allocated, octets
+ * gref_directory - grant_ref_t, a reference to the first shared page
+ * describing shared buffer references. At least one page exists. If shared
+ * buffer size (buffer_sz) exceeds what can be addressed by this single page,
+ * then reference to the next page must be supplied (see gref_dir_next_page
+ * below)
+ */
+
+struct xensnd_open_req {
+ uint32_t pcm_rate;
+ uint8_t pcm_format;
+ uint8_t pcm_channels;
+ uint16_t reserved;
+ uint32_t buffer_sz;
+ grant_ref_t gref_directory;
+};
+
+/*
+ * Shared page for XENSND_OP_OPEN buffer descriptor (gref_directory in the
+ * request) employs a list of pages, describing all pages of the shared data
+ * buffer:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | gref_dir_next_page | 4
+ * +----------------+----------------+----------------+----------------+
+ * | gref[0] | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[i] | i*4+8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | gref[N - 1] | N*4+8
+ * +----------------+----------------+----------------+----------------+
+ *
+ * gref_dir_next_page - grant_ref_t, reference to the next page describing
+ * page directory. Must be 0 if there are no more pages in the list.
+ * gref[i] - grant_ref_t, reference to a shared page of the buffer
+ * allocated at XENSND_OP_OPEN
+ *
+ * Number of grant_ref_t entries in the whole page directory is not
+ * passed, but instead can be calculated as:
+ * num_grefs_total = (XENSND_OP_OPEN.buffer_sz + XEN_PAGE_SIZE - 1) /
+ * XEN_PAGE_SIZE
+ */
+
+struct xensnd_page_directory {
+ grant_ref_t gref_dir_next_page;
+ grant_ref_t gref[1]; /* Variable length */
+};
+
+/*
+ * Request close - close an opened pcm stream:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | XENSND_OP_CLOSE| reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * Request read/write - used for read (for capture) or write (for playback):
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 20
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_READ for read or XENSND_OP_WRITE for write
+ */
+
+struct xensnd_rw_req {
+ uint32_t offset;
+ uint32_t length;
+};
+
+/*
+ * Request set/get volume - set/get channels' volume of the stream given:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_SET_VOLUME for volume set
+ * or XENSND_OP_GET_VOLUME for volume get
+ * Buffer passed with XENSND_OP_OPEN is used to exchange volume
+ * values:
+ *
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | channel[0] | 4
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[i] | i*4
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[N - 1] | (N-1)*4
+ * +----------------+----------------+----------------+----------------+
+ *
+ * N = XENSND_OP_OPEN.pcm_channels
+ * i - uint8_t, index of a channel
+ * channel[i] - sint32_t, volume of i-th channel
+ * Volume is expressed as a signed value in steps of 0.001 dB,
+ * while 0 being 0 dB.
+ *
+ * Request mute/unmute - mute/unmute stream:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 8
+ * +----------------+----------------+----------------+----------------+
+ * | offset | 12
+ * +----------------+----------------+----------------+----------------+
+ * | length | 16
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * operation - XENSND_OP_MUTE for mute or XENSND_OP_UNMUTE for unmute
+ * Buffer passed with XENSND_OP_OPEN is used to exchange mute/unmute
+ * values:
+ *
+ * 0 octet
+ * +----------------+----------------+----------------+----------------+
+ * | channel[0] | 4
+ * +----------------+----------------+----------------+----------------+
+ * +/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[i] | i*4
+ * +----------------+----------------+----------------+----------------+
+ * +/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | channel[N - 1] | (N-1)*4
+ * +----------------+----------------+----------------+----------------+
+ *
+ * N = XENSND_OP_OPEN.pcm_channels
+ * i - uint8_t, index of a channel
+ * channel[i] - uint8_t, non-zero if i-th channel needs to be muted/unmuted
+ *
+ *------------------------------------ N.B. -----------------------------------
+ *
+ * The 'struct xensnd_rw_req' is also used for XENSND_OP_SET_VOLUME,
+ * XENSND_OP_GET_VOLUME, XENSND_OP_MUTE, XENSND_OP_UNMUTE.
+ */
+
+/*
+ *---------------------------------- Responses --------------------------------
+ *
+ * All response packets have the same length (32 octets)
+ *
+ * Response for all requests:
+ * 0 1 2 3 octet
+ * +----------------+----------------+----------------+----------------+
+ * | id | operation | reserved | 4
+ * +----------------+----------------+----------------+----------------+
+ * | status | 8
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 12
+ * +----------------+----------------+----------------+----------------+
+ * |/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/\/|
+ * +----------------+----------------+----------------+----------------+
+ * | reserved | 32
+ * +----------------+----------------+----------------+----------------+
+ *
+ * id - uint16_t, copied from the request
+ * operation - uint8_t, XENSND_OP_* - copied from request
+ * status - int32_t, response status, zero on success and -XEN_EXX on failure
+ */
+
+struct xensnd_req {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved[5];
+ union {
+ struct xensnd_open_req open;
+ struct xensnd_rw_req rw;
+ uint8_t reserved[24];
+ } op;
+};
+
+struct xensnd_resp {
+ uint16_t id;
+ uint8_t operation;
+ uint8_t reserved;
+ int32_t status;
+ uint8_t reserved1[24];
+};
+
+DEFINE_RING_TYPES(xen_sndif, struct xensnd_req, struct xensnd_resp);
+
+#endif /* __XEN_PUBLIC_IO_SNDIF_H__ */
diff --git a/include/xen/page.h b/include/xen/page.h
index 9dc46cb8a0fd..064194f6453e 100644
--- a/include/xen/page.h
+++ b/include/xen/page.h
@@ -38,7 +38,7 @@ struct xen_memory_region {
unsigned long n_pfns;
};
-#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820MAX */
+#define XEN_EXTRA_MEM_MAX_REGIONS 128 /* == E820_MAX_ENTRIES_ZEROPAGE */
extern __initdata
struct xen_memory_region xen_extra_mem[XEN_EXTRA_MEM_MAX_REGIONS];
diff --git a/include/xen/xen-ops.h b/include/xen/xen-ops.h
index b5486e648607..c44a2ee8c8f8 100644
--- a/include/xen/xen-ops.h
+++ b/include/xen/xen-ops.h
@@ -22,6 +22,8 @@ void xen_timer_resume(void);
void xen_arch_resume(void);
void xen_arch_suspend(void);
+void xen_reboot(int reason);
+
void xen_resume_notifier_register(struct notifier_block *nb);
void xen_resume_notifier_unregister(struct notifier_block *nb);
@@ -34,11 +36,25 @@ u64 xen_steal_clock(int cpu);
int xen_setup_shutdown_event(void);
extern unsigned long *xen_contiguous_bitmap;
+
+#ifdef CONFIG_XEN_PV
int xen_create_contiguous_region(phys_addr_t pstart, unsigned int order,
unsigned int address_bits,
dma_addr_t *dma_handle);
void xen_destroy_contiguous_region(phys_addr_t pstart, unsigned int order);
+#else
+static inline int xen_create_contiguous_region(phys_addr_t pstart,
+ unsigned int order,
+ unsigned int address_bits,
+ dma_addr_t *dma_handle)
+{
+ return 0;
+}
+
+static inline void xen_destroy_contiguous_region(phys_addr_t pstart,
+ unsigned int order) { }
+#endif
struct vm_area_struct;
@@ -120,6 +136,9 @@ efi_status_t xen_efi_update_capsule(efi_capsule_header_t **capsules,
efi_status_t xen_efi_query_capsule_caps(efi_capsule_header_t **capsules,
unsigned long count, u64 *max_size,
int *reset_type);
+void xen_efi_reset_system(int reset_type, efi_status_t status,
+ unsigned long data_size, efi_char16_t *data);
+
#ifdef CONFIG_PREEMPT