diff options
107 files changed, 5916 insertions, 1237 deletions
diff --git a/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml new file mode 100644 index 000000000000..57e4c87cb00b --- /dev/null +++ b/Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml @@ -0,0 +1,45 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/net/bluetooth/nxp,88w8987-bt.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: NXP Bluetooth chips + +description: + This binding describes UART-attached NXP bluetooth chips. These chips + are dual-radio chips supporting WiFi and Bluetooth. The bluetooth + works on standard H4 protocol over 4-wire UART. The RTS and CTS lines + are used during FW download. To enable power save mode, the host + asserts break signal over UART-TX line to put the chip into power save + state. De-asserting break wakes up the BT chip. + +maintainers: + - Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com> + +properties: + compatible: + enum: + - nxp,88w8987-bt + - nxp,88w8997-bt + + fw-init-baudrate: + description: + Chip baudrate after FW is downloaded and initialized. + This property depends on the module vendor's + configuration. If this property is not specified, + 115200 is set as default. + +required: + - compatible + +additionalProperties: false + +examples: + - | + serial { + bluetooth { + compatible = "nxp,88w8987-bt"; + fw-init-baudrate = <3000000>; + }; + }; diff --git a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml index a6a6b0e4df7a..68f78b90d23a 100644 --- a/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/bluetooth/qualcomm-bluetooth.yaml @@ -23,6 +23,7 @@ properties: - qcom,wcn3998-bt - qcom,qca6390-bt - qcom,wcn6750-bt + - qcom,wcn6855-bt enable-gpios: maxItems: 1 @@ -133,6 +134,22 @@ allOf: - vddrfa1p7-supply - vddrfa1p2-supply - vddasd-supply + - if: + properties: + compatible: + contains: + enum: + - qcom,wcn6855-bt + then: + required: + - enable-gpios + - swctrl-gpios + - vddio-supply + - vddbtcxmx-supply + - vddrfacmn-supply + - vddrfa0p8-supply + - vddrfa1p2-supply + - vddrfa1p7-supply examples: - | diff --git a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml b/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml index 6aa7a078faa2..188a42ca6ceb 100644 --- a/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/marvell-bluetooth.yaml @@ -15,11 +15,29 @@ maintainers: properties: compatible: - const: mrvl,88w8897 + enum: + - mrvl,88w8897 + - mrvl,88w8997 + + max-speed: + description: see Documentation/devicetree/bindings/serial/serial.yaml required: - compatible +allOf: + - if: + properties: + compatible: + contains: + const: mrvl,88w8997 + then: + properties: + max-speed: true + else: + properties: + max-speed: false + additionalProperties: false examples: diff --git a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml index 143b5667abad..8cc2b9924680 100644 --- a/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml +++ b/Documentation/devicetree/bindings/net/realtek-bluetooth.yaml @@ -4,24 +4,30 @@ $id: http://devicetree.org/schemas/net/realtek-bluetooth.yaml# $schema: http://devicetree.org/meta-schemas/core.yaml# -title: RTL8723BS/RTL8723CS/RTL8822CS Bluetooth +title: RTL8723BS/RTL8723CS/RTL8821CS/RTL8822CS Bluetooth maintainers: - Vasily Khoruzhick <anarsoul@gmail.com> - Alistair Francis <alistair@alistair23.me> description: - RTL8723CS/RTL8723CS/RTL8822CS is WiFi + BT chip. WiFi part is connected over - SDIO, while BT is connected over serial. It speaks H5 protocol with few - extra commands to upload firmware and change module speed. + RTL8723CS/RTL8723CS/RTL8821CS/RTL8822CS is a WiFi + BT chip. WiFi part + is connected over SDIO, while BT is connected over serial. It speaks + H5 protocol with few extra commands to upload firmware and change + module speed. properties: compatible: - enum: - - realtek,rtl8723bs-bt - - realtek,rtl8723cs-bt - - realtek,rtl8723ds-bt - - realtek,rtl8822cs-bt + oneOf: + - enum: + - realtek,rtl8723bs-bt + - realtek,rtl8723cs-bt + - realtek,rtl8723ds-bt + - realtek,rtl8822cs-bt + - items: + - enum: + - realtek,rtl8821cs-bt + - const: realtek,rtl8822cs-bt device-wake-gpios: maxItems: 1 diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index cd0973d4ba01..2540c70952ff 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -1099,6 +1099,10 @@ such that the corresponding bit in ``ethtool_ops::supported_coalesce_params`` is not set), regardless of their values. Driver may impose additional constraints on coalescing parameters and their values. +Compared to requests issued via the ``ioctl()`` netlink version of this request +will try harder to make sure that values specified by the user have been applied +and may call the driver twice. + PAUSE_GET ========= diff --git a/MAINTAINERS b/MAINTAINERS index 6ac562e0381e..c2f3ecdb47e0 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -23237,6 +23237,13 @@ L: linux-mm@kvack.org S: Maintained F: mm/zswap.c +NXP BLUETOOTH WIRELESS DRIVERS +M: Amitkumar Karwar <amitkumar.karwar@nxp.com> +M: Neeraj Kale <neeraj.sanjaykale@nxp.com> +S: Maintained +F: Documentation/devicetree/bindings/net/bluetooth/nxp,88w8987-bt.yaml +F: drivers/bluetooth/btnxpuart.c + THE REST M: Linus Torvalds <torvalds@linux-foundation.org> L: linux-kernel@vger.kernel.org diff --git a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi index 41262a69d33e..8fadd8afb190 100644 --- a/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi +++ b/arch/arm64/boot/dts/rockchip/rk3566-anbernic-rgxx3.dtsi @@ -716,7 +716,7 @@ status = "okay"; bluetooth { - compatible = "realtek,rtl8821cs-bt"; + compatible = "realtek,rtl8821cs-bt", "realtek,rtl8822cs-bt"; device-wake-gpios = <&gpio4 4 GPIO_ACTIVE_HIGH>; enable-gpios = <&gpio4 3 GPIO_ACTIVE_HIGH>; host-wake-gpios = <&gpio4 5 GPIO_ACTIVE_HIGH>; diff --git a/drivers/bluetooth/Kconfig b/drivers/bluetooth/Kconfig index 5a1a7bec3c42..bc211c324206 100644 --- a/drivers/bluetooth/Kconfig +++ b/drivers/bluetooth/Kconfig @@ -363,6 +363,7 @@ config BT_HCIBLUECARD config BT_HCIVHCI tristate "HCI VHCI (Virtual HCI device) driver" + select WANT_DEV_COREDUMP help Bluetooth Virtual HCI device driver. This driver is required if you want to use HCI Emulation software. @@ -465,4 +466,17 @@ config BT_VIRTIO Say Y here to compile support for HCI over Virtio into the kernel or say M to compile as a module. +config BT_NXPUART + tristate "NXP protocol support" + depends on SERIAL_DEV_BUS + select CRC32 + select CRC8 + help + NXP is serial driver required for NXP Bluetooth + devices with UART interface. + + Say Y here to compile support for NXP Bluetooth UART device into + the kernel, or say M here to compile as a module (btnxpuart). + + endmenu diff --git a/drivers/bluetooth/Makefile b/drivers/bluetooth/Makefile index e0b261f24fc9..7a5967e9ac48 100644 --- a/drivers/bluetooth/Makefile +++ b/drivers/bluetooth/Makefile @@ -29,6 +29,7 @@ obj-$(CONFIG_BT_QCA) += btqca.o obj-$(CONFIG_BT_MTK) += btmtk.o obj-$(CONFIG_BT_VIRTIO) += virtio_bt.o +obj-$(CONFIG_BT_NXPUART) += btnxpuart.o obj-$(CONFIG_BT_HCIUART_NOKIA) += hci_nokia.o diff --git a/drivers/bluetooth/btbcm.c b/drivers/bluetooth/btbcm.c index 43e98a598bd9..de2ea589aa49 100644 --- a/drivers/bluetooth/btbcm.c +++ b/drivers/bluetooth/btbcm.c @@ -6,6 +6,7 @@ * Copyright (C) 2015 Intel Corporation */ +#include <linux/efi.h> #include <linux/module.h> #include <linux/firmware.h> #include <linux/dmi.h> @@ -34,6 +35,43 @@ /* For kmalloc-ing the fw-name array instead of putting it on the stack */ typedef char bcm_fw_name[BCM_FW_NAME_LEN]; +#ifdef CONFIG_EFI +static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) +{ + efi_guid_t guid = EFI_GUID(0x74b00bd9, 0x805a, 0x4d61, 0xb5, 0x1f, + 0x43, 0x26, 0x81, 0x23, 0xd1, 0x13); + bdaddr_t efi_bdaddr, bdaddr; + efi_status_t status; + unsigned long len; + int ret; + + if (!efi_rt_services_supported(EFI_RT_SUPPORTED_GET_VARIABLE)) + return -EOPNOTSUPP; + + len = sizeof(efi_bdaddr); + status = efi.get_variable(L"BDADDR", &guid, NULL, &len, &efi_bdaddr); + if (status != EFI_SUCCESS) + return -ENXIO; + + if (len != sizeof(efi_bdaddr)) + return -EIO; + + baswap(&bdaddr, &efi_bdaddr); + + ret = btbcm_set_bdaddr(hdev, &bdaddr); + if (ret) + return ret; + + bt_dev_info(hdev, "BCM: Using EFI device address (%pMR)", &bdaddr); + return 0; +} +#else +static int btbcm_set_bdaddr_from_efi(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} +#endif + int btbcm_check_bdaddr(struct hci_dev *hdev) { struct hci_rp_read_bd_addr *bda; @@ -87,9 +125,12 @@ int btbcm_check_bdaddr(struct hci_dev *hdev) !bacmp(&bda->bdaddr, BDADDR_BCM4345C5) || !bacmp(&bda->bdaddr, BDADDR_BCM43430A0) || !bacmp(&bda->bdaddr, BDADDR_BCM43341B)) { - bt_dev_info(hdev, "BCM: Using default device address (%pMR)", - &bda->bdaddr); - set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + /* Try falling back to BDADDR EFI variable */ + if (btbcm_set_bdaddr_from_efi(hdev) != 0) { + bt_dev_info(hdev, "BCM: Using default device address (%pMR)", + &bda->bdaddr); + set_bit(HCI_QUIRK_INVALID_BDADDR, &hdev->quirks); + } } kfree_skb(skb); diff --git a/drivers/bluetooth/btintel.c b/drivers/bluetooth/btintel.c index af774688f1c0..d9349ba48281 100644 --- a/drivers/bluetooth/btintel.c +++ b/drivers/bluetooth/btintel.c @@ -43,6 +43,12 @@ struct cmd_write_boot_params { u8 fw_build_yy; } __packed; +static struct { + const char *driver_name; + u8 hw_variant; + u32 fw_build_num; +} coredump_info; + int btintel_check_bdaddr(struct hci_dev *hdev) { struct hci_rp_read_bd_addr *bda; @@ -315,6 +321,9 @@ int btintel_version_info(struct hci_dev *hdev, struct intel_version *ver) return -EINVAL; } + coredump_info.hw_variant = ver->hw_variant; + coredump_info.fw_build_num = ver->fw_build_num; + bt_dev_info(hdev, "%s revision %u.%u build %u week %u %u", variant, ver->fw_revision >> 4, ver->fw_revision & 0x0f, ver->fw_build_num, ver->fw_build_ww, @@ -509,6 +518,9 @@ static int btintel_version_info_tlv(struct hci_dev *hdev, return -EINVAL; } + coredump_info.hw_variant = INTEL_HW_VARIANT(version->cnvi_bt); + coredump_info.fw_build_num = version->build_num; + bt_dev_info(hdev, "%s timestamp %u.%u buildtype %u build %u", variant, 2000 + (version->timestamp >> 8), version->timestamp & 0xff, version->build_type, version->build_num); @@ -1462,6 +1474,59 @@ int btintel_set_quality_report(struct hci_dev *hdev, bool enable) } EXPORT_SYMBOL_GPL(btintel_set_quality_report); +static void btintel_coredump(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc4e, 0, NULL, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Coredump failed (%ld)", PTR_ERR(skb)); + return; + } + + kfree_skb(skb); +} + +static void btintel_dmp_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: 0x%X\n", + coredump_info.hw_variant); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%X\n", + coredump_info.fw_build_num); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\n", coredump_info.driver_name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: Intel\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +static int btintel_register_devcoredump_support(struct hci_dev *hdev) +{ + struct intel_debug_features features; + int err; + + err = btintel_read_debug_features(hdev, &features); + if (err) { + bt_dev_info(hdev, "Error reading debug features"); + return err; + } + + if (!(features.page1[0] & 0x3f)) { + bt_dev_dbg(hdev, "Telemetry exception format not supported"); + return -EOPNOTSUPP; + } + + hci_devcd_register(hdev, btintel_coredump, btintel_dmp_hdr, NULL); + + return err; +} + static const struct firmware *btintel_legacy_rom_get_fw(struct hci_dev *hdev, struct intel_version *ver) { @@ -2597,6 +2662,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) btintel_set_msft_opcode(hdev, ver.hw_variant); err = btintel_bootloader_setup(hdev, &ver); + btintel_register_devcoredump_support(hdev); break; default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", @@ -2670,6 +2736,7 @@ static int btintel_setup_combined(struct hci_dev *hdev) btintel_set_msft_opcode(hdev, ver.hw_variant); err = btintel_bootloader_setup(hdev, &ver); + btintel_register_devcoredump_support(hdev); break; case 0x17: case 0x18: @@ -2684,15 +2751,15 @@ static int btintel_setup_combined(struct hci_dev *hdev) */ set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); - /* Valid LE States quirk for GfP */ - if (INTEL_HW_VARIANT(ver_tlv.cnvi_bt) == 0x18) - set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); + /* Apply LE States quirk from solar onwards */ + set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); /* Setup MSFT Extension support */ btintel_set_msft_opcode(hdev, INTEL_HW_VARIANT(ver_tlv.cnvi_bt)); err = btintel_bootloader_setup_tlv(hdev, &ver_tlv); + btintel_register_devcoredump_support(hdev); break; default: bt_dev_err(hdev, "Unsupported Intel hw variant (%u)", @@ -2742,7 +2809,7 @@ static int btintel_shutdown_combined(struct hci_dev *hdev) return 0; } -int btintel_configure_setup(struct hci_dev *hdev) +int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name) { hdev->manufacturer = 2; hdev->setup = btintel_setup_combined; @@ -2751,6 +2818,8 @@ int btintel_configure_setup(struct hci_dev *hdev) hdev->set_diag = btintel_set_diag_combined; hdev->set_bdaddr = btintel_set_bdaddr; + coredump_info.driver_name = driver_name; + return 0; } EXPORT_SYMBOL_GPL(btintel_configure_setup); diff --git a/drivers/bluetooth/btintel.h b/drivers/bluetooth/btintel.h index 8fdb65b66315..d6a1dc8d8a82 100644 --- a/drivers/bluetooth/btintel.h +++ b/drivers/bluetooth/btintel.h @@ -143,6 +143,13 @@ struct btintel_loc_aware_reg { __le32 delta; } __packed; +#define INTEL_TLV_TYPE_ID 0x01 + +#define INTEL_TLV_SYSTEM_EXCEPTION 0x00 +#define INTEL_TLV_FATAL_EXCEPTION 0x01 +#define INTEL_TLV_DEBUG_EXCEPTION 0x02 +#define INTEL_TLV_TEST_EXCEPTION 0xDE + #define INTEL_HW_PLATFORM(cnvx_bt) ((u8)(((cnvx_bt) & 0x0000ff00) >> 8)) #define INTEL_HW_VARIANT(cnvx_bt) ((u8)(((cnvx_bt) & 0x003f0000) >> 16)) #define INTEL_CNVX_TOP_TYPE(cnvx_top) ((cnvx_top) & 0x00000fff) @@ -212,7 +219,7 @@ int btintel_read_boot_params(struct hci_dev *hdev, struct intel_boot_params *params); int btintel_download_firmware(struct hci_dev *dev, struct intel_version *ver, const struct firmware *fw, u32 *boot_param); -int btintel_configure_setup(struct hci_dev *hdev); +int btintel_configure_setup(struct hci_dev *hdev, const char *driver_name); void btintel_bootup(struct hci_dev *hdev, const void *ptr, unsigned int len); void btintel_secure_send_result(struct hci_dev *hdev, const void *ptr, unsigned int len); @@ -293,7 +300,8 @@ static inline int btintel_download_firmware(struct hci_dev *dev, return -EOPNOTSUPP; } -static inline int btintel_configure_setup(struct hci_dev *hdev) +static inline int btintel_configure_setup(struct hci_dev *hdev, + const char *driver_name) { return -ENODEV; } diff --git a/drivers/bluetooth/btmrvl_sdio.c b/drivers/bluetooth/btmrvl_sdio.c index ba057ebfda5c..d76c799553aa 100644 --- a/drivers/bluetooth/btmrvl_sdio.c +++ b/drivers/bluetooth/btmrvl_sdio.c @@ -40,7 +40,7 @@ static struct memory_type_mapping mem_type_mapping_tbl[] = { {"EXTLAST", NULL, 0, 0xFE}, }; -static const struct of_device_id btmrvl_sdio_of_match_table[] = { +static const struct of_device_id btmrvl_sdio_of_match_table[] __maybe_unused = { { .compatible = "marvell,sd8897-bt" }, { .compatible = "marvell,sd8997-bt" }, { } diff --git a/drivers/bluetooth/btmtkuart.c b/drivers/bluetooth/btmtkuart.c index c98691cdbbd5..7680c67cdb35 100644 --- a/drivers/bluetooth/btmtkuart.c +++ b/drivers/bluetooth/btmtkuart.c @@ -959,16 +959,16 @@ static void btmtkuart_remove(struct serdev_device *serdev) hci_free_dev(hdev); } -static const struct btmtkuart_data mt7622_data = { +static const struct btmtkuart_data mt7622_data __maybe_unused = { .fwname = FIRMWARE_MT7622, }; -static const struct btmtkuart_data mt7663_data = { +static const struct btmtkuart_data mt7663_data __maybe_unused = { .flags = BTMTKUART_FLAG_STANDALONE_HW, .fwname = FIRMWARE_MT7663, }; -static const struct btmtkuart_data mt7668_data = { +static const struct btmtkuart_data mt7668_data __maybe_unused = { .flags = BTMTKUART_FLAG_STANDALONE_HW, .fwname = FIRMWARE_MT7668, }; diff --git a/drivers/bluetooth/btnxpuart.c b/drivers/bluetooth/btnxpuart.c new file mode 100644 index 000000000000..3a34d7c1475b --- /dev/null +++ b/drivers/bluetooth/btnxpuart.c @@ -0,0 +1,1352 @@ +// SPDX-License-Identifier: GPL-2.0-or-later +/* + * NXP Bluetooth driver + * Copyright 2023 NXP + */ + +#include <linux/module.h> +#include <linux/kernel.h> + +#include <linux/serdev.h> +#include <linux/of.h> +#include <linux/skbuff.h> +#include <asm/unaligned.h> +#include <linux/firmware.h> +#include <linux/string.h> +#include <linux/crc8.h> +#include <linux/crc32.h> +#include <linux/string_helpers.h> + +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +#include "h4_recv.h" + +#define MANUFACTURER_NXP 37 + +#define BTNXPUART_TX_STATE_ACTIVE 1 +#define BTNXPUART_FW_DOWNLOADING 2 +#define BTNXPUART_CHECK_BOOT_SIGNATURE 3 +#define BTNXPUART_SERDEV_OPEN 4 + +#define FIRMWARE_W8987 "nxp/uartuart8987_bt.bin" +#define FIRMWARE_W8997 "nxp/uartuart8997_bt_v4.bin" +#define FIRMWARE_W9098 "nxp/uartuart9098_bt_v1.bin" +#define FIRMWARE_IW416 "nxp/uartiw416_bt_v0.bin" +#define FIRMWARE_IW612 "nxp/uartspi_n61x_v1.bin.se" +#define FIRMWARE_HELPER "nxp/helper_uart_3000000.bin" + +#define CHIP_ID_W9098 0x5c03 +#define CHIP_ID_IW416 0x7201 +#define CHIP_ID_IW612 0x7601 + +#define HCI_NXP_PRI_BAUDRATE 115200 +#define HCI_NXP_SEC_BAUDRATE 3000000 + +#define MAX_FW_FILE_NAME_LEN 50 + +/* Default ps timeout period in milliseconds */ +#define PS_DEFAULT_TIMEOUT_PERIOD_MS 2000 + +/* wakeup methods */ +#define WAKEUP_METHOD_DTR 0 +#define WAKEUP_METHOD_BREAK 1 +#define WAKEUP_METHOD_EXT_BREAK 2 +#define WAKEUP_METHOD_RTS 3 +#define WAKEUP_METHOD_INVALID 0xff + +/* power save mode status */ +#define PS_MODE_DISABLE 0 +#define PS_MODE_ENABLE 1 + +/* Power Save Commands to ps_work_func */ +#define PS_CMD_EXIT_PS 1 +#define PS_CMD_ENTER_PS 2 + +/* power save state */ +#define PS_STATE_AWAKE 0 +#define PS_STATE_SLEEP 1 + +/* Bluetooth vendor command : Sleep mode */ +#define HCI_NXP_AUTO_SLEEP_MODE 0xfc23 +/* Bluetooth vendor command : Wakeup method */ +#define HCI_NXP_WAKEUP_METHOD 0xfc53 +/* Bluetooth vendor command : Set operational baudrate */ +#define HCI_NXP_SET_OPER_SPEED 0xfc09 +/* Bluetooth vendor command: Independent Reset */ +#define HCI_NXP_IND_RESET 0xfcfc + +/* Bluetooth Power State : Vendor cmd params */ +#define BT_PS_ENABLE 0x02 +#define BT_PS_DISABLE 0x03 + +/* Bluetooth Host Wakeup Methods */ +#define BT_HOST_WAKEUP_METHOD_NONE 0x00 +#define BT_HOST_WAKEUP_METHOD_DTR 0x01 +#define BT_HOST_WAKEUP_METHOD_BREAK 0x02 +#define BT_HOST_WAKEUP_METHOD_GPIO 0x03 + +/* Bluetooth Chip Wakeup Methods */ +#define BT_CTRL_WAKEUP_METHOD_DSR 0x00 +#define BT_CTRL_WAKEUP_METHOD_BREAK 0x01 +#define BT_CTRL_WAKEUP_METHOD_GPIO 0x02 +#define BT_CTRL_WAKEUP_METHOD_EXT_BREAK 0x04 +#define BT_CTRL_WAKEUP_METHOD_RTS 0x05 + +struct ps_data { + u8 target_ps_mode; /* ps mode to be set */ + u8 cur_psmode; /* current ps_mode */ + u8 ps_state; /* controller's power save state */ + u8 ps_cmd; + u8 h2c_wakeupmode; + u8 cur_h2c_wakeupmode; + u8 c2h_wakeupmode; + u8 c2h_wakeup_gpio; + u8 h2c_wakeup_gpio; + bool driver_sent_cmd; + u16 h2c_ps_interval; + u16 c2h_ps_interval; + struct hci_dev *hdev; + struct work_struct work; + struct timer_list ps_timer; +}; + +struct wakeup_cmd_payload { + u8 c2h_wakeupmode; + u8 c2h_wakeup_gpio; + u8 h2c_wakeupmode; + u8 h2c_wakeup_gpio; +} __packed; + +struct psmode_cmd_payload { + u8 ps_cmd; + __le16 c2h_ps_interval; +} __packed; + +struct btnxpuart_data { + const char *helper_fw_name; + const char *fw_name; +}; + +struct btnxpuart_dev { + struct hci_dev *hdev; + struct serdev_device *serdev; + + struct work_struct tx_work; + unsigned long tx_state; + struct sk_buff_head txq; + struct sk_buff *rx_skb; + + const struct firmware *fw; + u8 fw_name[MAX_FW_FILE_NAME_LEN]; + u32 fw_dnld_v1_offset; + u32 fw_v1_sent_bytes; + u32 fw_v3_offset_correction; + u32 fw_v1_expected_len; + wait_queue_head_t fw_dnld_done_wait_q; + wait_queue_head_t check_boot_sign_wait_q; + + u32 new_baudrate; + u32 current_baudrate; + u32 fw_init_baudrate; + bool timeout_changed; + bool baudrate_changed; + bool helper_downloaded; + + struct ps_data psdata; + struct btnxpuart_data *nxp_data; +}; + +#define NXP_V1_FW_REQ_PKT 0xa5 +#define NXP_V1_CHIP_VER_PKT 0xaa +#define NXP_V3_FW_REQ_PKT 0xa7 +#define NXP_V3_CHIP_VER_PKT 0xab + +#define NXP_ACK_V1 0x5a +#define NXP_NAK_V1 0xbf +#define NXP_ACK_V3 0x7a +#define NXP_NAK_V3 0x7b +#define NXP_CRC_ERROR_V3 0x7c + +#define HDR_LEN 16 + +#define NXP_RECV_CHIP_VER_V1 \ + .type = NXP_V1_CHIP_VER_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_FW_REQ_V1 \ + .type = NXP_V1_FW_REQ_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_CHIP_VER_V3 \ + .type = NXP_V3_CHIP_VER_PKT, \ + .hlen = 4, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 4 + +#define NXP_RECV_FW_REQ_V3 \ + .type = NXP_V3_FW_REQ_PKT, \ + .hlen = 9, \ + .loff = 0, \ + .lsize = 0, \ + .maxlen = 9 + +struct v1_data_req { + __le16 len; + __le16 len_comp; +} __packed; + +struct v1_start_ind { + __le16 chip_id; + __le16 chip_id_comp; +} __packed; + +struct v3_data_req { + __le16 len; + __le32 offset; + __le16 error; + u8 crc; +} __packed; + +struct v3_start_ind { + __le16 chip_id; + u8 loader_ver; + u8 crc; +} __packed; + +/* UART register addresses of BT chip */ +#define CLKDIVADDR 0x7f00008f +#define UARTDIVADDR 0x7f000090 +#define UARTMCRADDR 0x7f000091 +#define UARTREINITADDR 0x7f000092 +#define UARTICRADDR 0x7f000093 +#define UARTFCRADDR 0x7f000094 + +#define MCR 0x00000022 +#define INIT 0x00000001 +#define ICR 0x000000c7 +#define FCR 0x000000c7 + +#define POLYNOMIAL8 0x07 + +struct uart_reg { + __le32 address; + __le32 value; +} __packed; + +struct uart_config { + struct uart_reg clkdiv; + struct uart_reg uartdiv; + struct uart_reg mcr; + struct uart_reg re_init; + struct uart_reg icr; + struct uart_reg fcr; + __be32 crc; +} __packed; + +struct nxp_bootloader_cmd { + __le32 header; + __le32 arg; + __le32 payload_len; + __be32 crc; +} __packed; + +static u8 crc8_table[CRC8_TABLE_SIZE]; + +/* Default configurations */ +#define DEFAULT_H2C_WAKEUP_MODE WAKEUP_METHOD_BREAK +#define DEFAULT_PS_MODE PS_MODE_DISABLE +#define FW_INIT_BAUDRATE HCI_NXP_PRI_BAUDRATE + +static struct sk_buff *nxp_drv_send_cmd(struct hci_dev *hdev, u16 opcode, + u32 plen, + void *param) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct sk_buff *skb; + + /* set flag to prevent nxp_enqueue from parsing values from this command and + * calling hci_cmd_sync_queue() again. + */ + psdata->driver_sent_cmd = true; + skb = __hci_cmd_sync(hdev, opcode, plen, param, HCI_CMD_TIMEOUT); + psdata->driver_sent_cmd = false; + + return skb; +} + +static void btnxpuart_tx_wakeup(struct btnxpuart_dev *nxpdev) +{ + if (schedule_work(&nxpdev->tx_work)) + set_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); +} + +/* NXP Power Save Feature */ +static void ps_start_timer(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + if (!psdata) + return; + + if (psdata->cur_psmode == PS_MODE_ENABLE) + mod_timer(&psdata->ps_timer, jiffies + msecs_to_jiffies(psdata->h2c_ps_interval)); +} + +static void ps_cancel_timer(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + flush_work(&psdata->work); + del_timer_sync(&psdata->ps_timer); +} + +static void ps_control(struct hci_dev *hdev, u8 ps_state) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + int status; + + if (psdata->ps_state == ps_state || + !test_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state)) + return; + + switch (psdata->cur_h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + if (ps_state == PS_STATE_AWAKE) + status = serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); + else + status = serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); + break; + case WAKEUP_METHOD_BREAK: + default: + if (ps_state == PS_STATE_AWAKE) + status = serdev_device_break_ctl(nxpdev->serdev, 0); + else + status = serdev_device_break_ctl(nxpdev->serdev, -1); + bt_dev_dbg(hdev, "Set UART break: %s, status=%d", + str_on_off(ps_state == PS_STATE_SLEEP), status); + break; + } + if (!status) + psdata->ps_state = ps_state; + if (ps_state == PS_STATE_AWAKE) + btnxpuart_tx_wakeup(nxpdev); +} + +static void ps_work_func(struct work_struct *work) +{ + struct ps_data *data = container_of(work, struct ps_data, work); + + if (data->ps_cmd == PS_CMD_ENTER_PS && data->cur_psmode == PS_MODE_ENABLE) + ps_control(data->hdev, PS_STATE_SLEEP); + else if (data->ps_cmd == PS_CMD_EXIT_PS) + ps_control(data->hdev, PS_STATE_AWAKE); +} + +static void ps_timeout_func(struct timer_list *t) +{ + struct ps_data *data = from_timer(data, t, ps_timer); + struct hci_dev *hdev = data->hdev; + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + if (test_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state)) { + ps_start_timer(nxpdev); + } else { + data->ps_cmd = PS_CMD_ENTER_PS; + schedule_work(&data->work); + } +} + +static int ps_init_work(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + psdata->h2c_ps_interval = PS_DEFAULT_TIMEOUT_PERIOD_MS; + psdata->ps_state = PS_STATE_AWAKE; + psdata->target_ps_mode = DEFAULT_PS_MODE; + psdata->hdev = hdev; + psdata->c2h_wakeupmode = BT_HOST_WAKEUP_METHOD_NONE; + psdata->c2h_wakeup_gpio = 0xff; + + switch (DEFAULT_H2C_WAKEUP_MODE) { + case WAKEUP_METHOD_DTR: + psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; + break; + case WAKEUP_METHOD_BREAK: + default: + psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; + break; + } + psdata->cur_psmode = PS_MODE_DISABLE; + psdata->cur_h2c_wakeupmode = WAKEUP_METHOD_INVALID; + INIT_WORK(&psdata->work, ps_work_func); + + return 0; +} + +static void ps_init_timer(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + timer_setup(&psdata->ps_timer, ps_timeout_func, 0); +} + +static void ps_wakeup(struct btnxpuart_dev *nxpdev) +{ + struct ps_data *psdata = &nxpdev->psdata; + + if (psdata->ps_state != PS_STATE_AWAKE) { + psdata->ps_cmd = PS_CMD_EXIT_PS; + schedule_work(&psdata->work); + } +} + +static int send_ps_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct psmode_cmd_payload pcmd; + struct sk_buff *skb; + u8 *status; + + if (psdata->target_ps_mode == PS_MODE_ENABLE) + pcmd.ps_cmd = BT_PS_ENABLE; + else + pcmd.ps_cmd = BT_PS_DISABLE; + pcmd.c2h_ps_interval = __cpu_to_le16(psdata->c2h_ps_interval); + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_AUTO_SLEEP_MODE, sizeof(pcmd), &pcmd); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting Power Save mode failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = skb_pull_data(skb, 1); + if (status) { + if (!*status) + psdata->cur_psmode = psdata->target_ps_mode; + else + psdata->target_ps_mode = psdata->cur_psmode; + if (psdata->cur_psmode == PS_MODE_ENABLE) + ps_start_timer(nxpdev); + else + ps_wakeup(nxpdev); + bt_dev_dbg(hdev, "Power Save mode response: status=%d, ps_mode=%d", + *status, psdata->cur_psmode); + } + kfree_skb(skb); + + return 0; +} + +static int send_wakeup_method_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct wakeup_cmd_payload pcmd; + struct sk_buff *skb; + u8 *status; + + pcmd.c2h_wakeupmode = psdata->c2h_wakeupmode; + pcmd.c2h_wakeup_gpio = psdata->c2h_wakeup_gpio; + switch (psdata->h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_DSR; + break; + case WAKEUP_METHOD_BREAK: + default: + pcmd.h2c_wakeupmode = BT_CTRL_WAKEUP_METHOD_BREAK; + break; + } + pcmd.h2c_wakeup_gpio = 0xff; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_WAKEUP_METHOD, sizeof(pcmd), &pcmd); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting wake-up method failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = skb_pull_data(skb, 1); + if (status) { + if (*status == 0) + psdata->cur_h2c_wakeupmode = psdata->h2c_wakeupmode; + else + psdata->h2c_wakeupmode = psdata->cur_h2c_wakeupmode; + bt_dev_dbg(hdev, "Set Wakeup Method response: status=%d, h2c_wakeupmode=%d", + *status, psdata->cur_h2c_wakeupmode); + } + kfree_skb(skb); + + return 0; +} + +static void ps_init(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + + serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_RTS); + usleep_range(5000, 10000); + serdev_device_set_tiocm(nxpdev->serdev, TIOCM_RTS, 0); + usleep_range(5000, 10000); + + switch (psdata->h2c_wakeupmode) { + case WAKEUP_METHOD_DTR: + serdev_device_set_tiocm(nxpdev->serdev, 0, TIOCM_DTR); + serdev_device_set_tiocm(nxpdev->serdev, TIOCM_DTR, 0); + break; + case WAKEUP_METHOD_BREAK: + default: + serdev_device_break_ctl(nxpdev->serdev, -1); + usleep_range(5000, 10000); + serdev_device_break_ctl(nxpdev->serdev, 0); + usleep_range(5000, 10000); + break; + } + if (psdata->cur_h2c_wakeupmode != psdata->h2c_wakeupmode) + hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); + if (psdata->cur_psmode != psdata->target_ps_mode) + hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); +} + +/* NXP Firmware Download Feature */ +static int nxp_download_firmware(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + nxpdev->fw_dnld_v1_offset = 0; + nxpdev->fw_v1_sent_bytes = 0; + nxpdev->fw_v1_expected_len = HDR_LEN; + nxpdev->fw_v3_offset_correction = 0; + nxpdev->baudrate_changed = false; + nxpdev->timeout_changed = false; + nxpdev->helper_downloaded = false; + + serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, false); + nxpdev->current_baudrate = HCI_NXP_PRI_BAUDRATE; + + /* Wait till FW is downloaded and CTS becomes low */ + err = wait_event_interruptible_timeout(nxpdev->fw_dnld_done_wait_q, + !test_bit(BTNXPUART_FW_DOWNLOADING, + &nxpdev->tx_state), + msecs_to_jiffies(60000)); + if (err == 0) { + bt_dev_err(hdev, "FW Download Timeout."); + return -ETIMEDOUT; + } + + serdev_device_set_flow_control(nxpdev->serdev, true); + err = serdev_device_wait_for_cts(nxpdev->serdev, 1, 60000); + if (err < 0) { + bt_dev_err(hdev, "CTS is still high. FW Download failed."); + return err; + } + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + + /* Allow the downloaded FW to initialize */ + usleep_range(800 * USEC_PER_MSEC, 1 * USEC_PER_SEC); + + return 0; +} + +static void nxp_send_ack(u8 ack, struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + u8 ack_nak[2]; + int len = 1; + + ack_nak[0] = ack; + if (ack == NXP_ACK_V3) { + ack_nak[1] = crc8(crc8_table, ack_nak, 1, 0xff); + len = 2; + } + serdev_device_write_buf(nxpdev->serdev, ack_nak, len); +} + +static bool nxp_fw_change_baudrate(struct hci_dev *hdev, u16 req_len) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct nxp_bootloader_cmd nxp_cmd5; + struct uart_config uart_config; + + if (req_len == sizeof(nxp_cmd5)) { + nxp_cmd5.header = __cpu_to_le32(5); + nxp_cmd5.arg = 0; + nxp_cmd5.payload_len = __cpu_to_le32(sizeof(uart_config)); + /* FW expects swapped CRC bytes */ + nxp_cmd5.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd5, + sizeof(nxp_cmd5) - 4)); + + serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd5, sizeof(nxp_cmd5)); + nxpdev->fw_v3_offset_correction += req_len; + } else if (req_len == sizeof(uart_config)) { + uart_config.clkdiv.address = __cpu_to_le32(CLKDIVADDR); + uart_config.clkdiv.value = __cpu_to_le32(0x00c00000); + uart_config.uartdiv.address = __cpu_to_le32(UARTDIVADDR); + uart_config.uartdiv.value = __cpu_to_le32(1); + uart_config.mcr.address = __cpu_to_le32(UARTMCRADDR); + uart_config.mcr.value = __cpu_to_le32(MCR); + uart_config.re_init.address = __cpu_to_le32(UARTREINITADDR); + uart_config.re_init.value = __cpu_to_le32(INIT); + uart_config.icr.address = __cpu_to_le32(UARTICRADDR); + uart_config.icr.value = __cpu_to_le32(ICR); + uart_config.fcr.address = __cpu_to_le32(UARTFCRADDR); + uart_config.fcr.value = __cpu_to_le32(FCR); + /* FW expects swapped CRC bytes */ + uart_config.crc = __cpu_to_be32(crc32_be(0UL, (char *)&uart_config, + sizeof(uart_config) - 4)); + + serdev_device_write_buf(nxpdev->serdev, (u8 *)&uart_config, sizeof(uart_config)); + serdev_device_wait_until_sent(nxpdev->serdev, 0); + nxpdev->fw_v3_offset_correction += req_len; + return true; + } + return false; +} + +static bool nxp_fw_change_timeout(struct hci_dev *hdev, u16 req_len) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct nxp_bootloader_cmd nxp_cmd7; + + if (req_len != sizeof(nxp_cmd7)) + return false; + + nxp_cmd7.header = __cpu_to_le32(7); + nxp_cmd7.arg = __cpu_to_le32(0x70); + nxp_cmd7.payload_len = 0; + /* FW expects swapped CRC bytes */ + nxp_cmd7.crc = __cpu_to_be32(crc32_be(0UL, (char *)&nxp_cmd7, + sizeof(nxp_cmd7) - 4)); + serdev_device_write_buf(nxpdev->serdev, (u8 *)&nxp_cmd7, sizeof(nxp_cmd7)); + serdev_device_wait_until_sent(nxpdev->serdev, 0); + nxpdev->fw_v3_offset_correction += req_len; + return true; +} + +static u32 nxp_get_data_len(const u8 *buf) +{ + struct nxp_bootloader_cmd *hdr = (struct nxp_bootloader_cmd *)buf; + + return __le32_to_cpu(hdr->payload_len); +} + +static bool is_fw_downloading(struct btnxpuart_dev *nxpdev) +{ + return test_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); +} + +static bool process_boot_signature(struct btnxpuart_dev *nxpdev) +{ + if (test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state)) { + clear_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->check_boot_sign_wait_q); + return false; + } + return is_fw_downloading(nxpdev); +} + +static int nxp_request_firmware(struct hci_dev *hdev, const char *fw_name) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + if (!strlen(nxpdev->fw_name)) { + snprintf(nxpdev->fw_name, MAX_FW_FILE_NAME_LEN, "%s", fw_name); + + bt_dev_dbg(hdev, "Request Firmware: %s", nxpdev->fw_name); + err = request_firmware(&nxpdev->fw, nxpdev->fw_name, &hdev->dev); + if (err < 0) { + bt_dev_err(hdev, "Firmware file %s not found", nxpdev->fw_name); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + } + return err; +} + +/* for legacy chipsets with V1 bootloader */ +static int nxp_recv_chip_ver_v1(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct v1_start_ind *req; + __u16 chip_id; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req) + goto free_skb; + + chip_id = le16_to_cpu(req->chip_id ^ req->chip_id_comp); + if (chip_id == 0xffff) { + nxpdev->fw_dnld_v1_offset = 0; + nxpdev->fw_v1_sent_bytes = 0; + nxpdev->fw_v1_expected_len = HDR_LEN; + release_firmware(nxpdev->fw); + memset(nxpdev->fw_name, 0, sizeof(nxpdev->fw_name)); + nxp_send_ack(NXP_ACK_V1, hdev); + } + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_recv_fw_req_v1(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct btnxpuart_data *nxp_data = nxpdev->nxp_data; + struct v1_data_req *req; + __u16 len; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req) + goto free_skb; + + len = __le16_to_cpu(req->len ^ req->len_comp); + if (len != 0xffff) { + bt_dev_dbg(hdev, "ERR: Send NAK"); + nxp_send_ack(NXP_NAK_V1, hdev); + goto free_skb; + } + nxp_send_ack(NXP_ACK_V1, hdev); + + len = __le16_to_cpu(req->len); + + if (!nxp_data->helper_fw_name) { + if (!nxpdev->timeout_changed) { + nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, + len); + goto free_skb; + } + if (!nxpdev->baudrate_changed) { + nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, + len); + if (nxpdev->baudrate_changed) { + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; + } + goto free_skb; + } + } + + if (!nxp_data->helper_fw_name || nxpdev->helper_downloaded) { + if (nxp_request_firmware(hdev, nxp_data->fw_name)) + goto free_skb; + } else if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { + if (nxp_request_firmware(hdev, nxp_data->helper_fw_name)) + goto free_skb; + } + + if (!len) { + bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + nxpdev->fw->size); + if (nxp_data->helper_fw_name && !nxpdev->helper_downloaded) { + nxpdev->helper_downloaded = true; + serdev_device_wait_until_sent(nxpdev->serdev, 0); + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + } else { + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); + } + goto free_skb; + } + if (len & 0x01) { + /* The CRC did not match at the other end. + * Simply send the same bytes again. + */ + len = nxpdev->fw_v1_sent_bytes; + bt_dev_dbg(hdev, "CRC error. Resend %d bytes of FW.", len); + } else { + nxpdev->fw_dnld_v1_offset += nxpdev->fw_v1_sent_bytes; + + /* The FW bin file is made up of many blocks of + * 16 byte header and payload data chunks. If the + * FW has requested a header, read the payload length + * info from the header, before sending the header. + * In the next iteration, the FW should request the + * payload data chunk, which should be equal to the + * payload length read from header. If there is a + * mismatch, clearly the driver and FW are out of sync, + * and we need to re-send the previous header again. + */ + if (len == nxpdev->fw_v1_expected_len) { + if (len == HDR_LEN) + nxpdev->fw_v1_expected_len = nxp_get_data_len(nxpdev->fw->data + + nxpdev->fw_dnld_v1_offset); + else + nxpdev->fw_v1_expected_len = HDR_LEN; + } else if (len == HDR_LEN) { + /* FW download out of sync. Send previous chunk again */ + nxpdev->fw_dnld_v1_offset -= nxpdev->fw_v1_sent_bytes; + nxpdev->fw_v1_expected_len = HDR_LEN; + } + } + + if (nxpdev->fw_dnld_v1_offset + len <= nxpdev->fw->size) + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + + nxpdev->fw_dnld_v1_offset, len); + nxpdev->fw_v1_sent_bytes = len; + +free_skb: + kfree_skb(skb); + return 0; +} + +static char *nxp_get_fw_name_from_chipid(struct hci_dev *hdev, u16 chipid) +{ + char *fw_name = NULL; + + switch (chipid) { + case CHIP_ID_W9098: + fw_name = FIRMWARE_W9098; + break; + case CHIP_ID_IW416: + fw_name = FIRMWARE_IW416; + break; + case CHIP_ID_IW612: + fw_name = FIRMWARE_IW612; + break; + default: + bt_dev_err(hdev, "Unknown chip signature %04x", chipid); + break; + } + return fw_name; +} + +static int nxp_recv_chip_ver_v3(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct v3_start_ind *req = skb_pull_data(skb, sizeof(*req)); + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + u16 chip_id; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + chip_id = le16_to_cpu(req->chip_id); + if (!nxp_request_firmware(hdev, nxp_get_fw_name_from_chipid(hdev, + chip_id))) + nxp_send_ack(NXP_ACK_V3, hdev); + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_recv_fw_req_v3(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct v3_data_req *req; + __u16 len; + __u32 offset; + + if (!process_boot_signature(nxpdev)) + goto free_skb; + + req = skb_pull_data(skb, sizeof(*req)); + if (!req || !nxpdev->fw) + goto free_skb; + + nxp_send_ack(NXP_ACK_V3, hdev); + + len = __le16_to_cpu(req->len); + + if (!nxpdev->timeout_changed) { + nxpdev->timeout_changed = nxp_fw_change_timeout(hdev, len); + goto free_skb; + } + + if (!nxpdev->baudrate_changed) { + nxpdev->baudrate_changed = nxp_fw_change_baudrate(hdev, len); + if (nxpdev->baudrate_changed) { + serdev_device_set_baudrate(nxpdev->serdev, + HCI_NXP_SEC_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + nxpdev->current_baudrate = HCI_NXP_SEC_BAUDRATE; + } + goto free_skb; + } + + if (req->len == 0) { + bt_dev_dbg(hdev, "FW Downloaded Successfully: %zu bytes", + nxpdev->fw->size); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + wake_up_interruptible(&nxpdev->fw_dnld_done_wait_q); + goto free_skb; + } + if (req->error) + bt_dev_dbg(hdev, "FW Download received err 0x%02x from chip", + req->error); + + offset = __le32_to_cpu(req->offset); + if (offset < nxpdev->fw_v3_offset_correction) { + /* This scenario should ideally never occur. But if it ever does, + * FW is out of sync and needs a power cycle. + */ + bt_dev_err(hdev, "Something went wrong during FW download"); + bt_dev_err(hdev, "Please power cycle and try again"); + goto free_skb; + } + + serdev_device_write_buf(nxpdev->serdev, nxpdev->fw->data + offset - + nxpdev->fw_v3_offset_correction, len); + +free_skb: + kfree_skb(skb); + return 0; +} + +static int nxp_set_baudrate_cmd(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + __le32 new_baudrate = __cpu_to_le32(nxpdev->new_baudrate); + struct ps_data *psdata = &nxpdev->psdata; + struct sk_buff *skb; + u8 *status; + + if (!psdata) + return 0; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_SET_OPER_SPEED, 4, (u8 *)&new_baudrate); + if (IS_ERR(skb)) { + bt_dev_err(hdev, "Setting baudrate failed (%ld)", PTR_ERR(skb)); + return PTR_ERR(skb); + } + + status = (u8 *)skb_pull_data(skb, 1); + if (status) { + if (*status == 0) { + serdev_device_set_baudrate(nxpdev->serdev, nxpdev->new_baudrate); + nxpdev->current_baudrate = nxpdev->new_baudrate; + } + bt_dev_dbg(hdev, "Set baudrate response: status=%d, baudrate=%d", + *status, nxpdev->new_baudrate); + } + kfree_skb(skb); + + return 0; +} + +static int nxp_set_ind_reset(struct hci_dev *hdev, void *data) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct sk_buff *skb; + u8 *status; + u8 pcmd = 0; + int err = 0; + + skb = nxp_drv_send_cmd(hdev, HCI_NXP_IND_RESET, 1, &pcmd); + if (IS_ERR(skb)) + return PTR_ERR(skb); + + status = skb_pull_data(skb, 1); + if (!status || *status) + goto free_skb; + + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + err = nxp_download_firmware(hdev); + if (err < 0) + goto free_skb; + serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); + nxpdev->current_baudrate = nxpdev->fw_init_baudrate; + if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { + nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; + nxp_set_baudrate_cmd(hdev, NULL); + } + hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); + hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); + +free_skb: + kfree_skb(skb); + return err; +} + +/* NXP protocol */ +static int nxp_check_boot_sign(struct btnxpuart_dev *nxpdev) +{ + serdev_device_set_baudrate(nxpdev->serdev, HCI_NXP_PRI_BAUDRATE); + serdev_device_set_flow_control(nxpdev->serdev, true); + set_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, &nxpdev->tx_state); + + return wait_event_interruptible_timeout(nxpdev->check_boot_sign_wait_q, + !test_bit(BTNXPUART_CHECK_BOOT_SIGNATURE, + &nxpdev->tx_state), + msecs_to_jiffies(1000)); +} + +static int nxp_setup(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + set_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + init_waitqueue_head(&nxpdev->fw_dnld_done_wait_q); + init_waitqueue_head(&nxpdev->check_boot_sign_wait_q); + + if (nxp_check_boot_sign(nxpdev)) { + bt_dev_dbg(hdev, "Need FW Download."); + err = nxp_download_firmware(hdev); + if (err < 0) + return err; + } else { + bt_dev_dbg(hdev, "FW already running."); + clear_bit(BTNXPUART_FW_DOWNLOADING, &nxpdev->tx_state); + } + + device_property_read_u32(&nxpdev->serdev->dev, "fw-init-baudrate", + &nxpdev->fw_init_baudrate); + if (!nxpdev->fw_init_baudrate) + nxpdev->fw_init_baudrate = FW_INIT_BAUDRATE; + serdev_device_set_baudrate(nxpdev->serdev, nxpdev->fw_init_baudrate); + nxpdev->current_baudrate = nxpdev->fw_init_baudrate; + + if (nxpdev->current_baudrate != HCI_NXP_SEC_BAUDRATE) { + nxpdev->new_baudrate = HCI_NXP_SEC_BAUDRATE; + hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); + } + + ps_init(hdev); + + return 0; +} + +static int btnxpuart_queue_skb(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + /* Prepend skb with frame type */ + memcpy(skb_push(skb, 1), &hci_skb_pkt_type(skb), 1); + skb_queue_tail(&nxpdev->txq, skb); + btnxpuart_tx_wakeup(nxpdev); + return 0; +} + +static int nxp_enqueue(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + struct ps_data *psdata = &nxpdev->psdata; + struct hci_command_hdr *hdr; + struct psmode_cmd_payload ps_parm; + struct wakeup_cmd_payload wakeup_parm; + __le32 baudrate_parm; + + /* if vendor commands are received from user space (e.g. hcitool), update + * driver flags accordingly and ask driver to re-send the command to FW. + * In case the payload for any command does not match expected payload + * length, let the firmware and user space program handle it, or throw + * an error. + */ + if (bt_cb(skb)->pkt_type == HCI_COMMAND_PKT && !psdata->driver_sent_cmd) { + hdr = (struct hci_command_hdr *)skb->data; + if (hdr->plen != (skb->len - HCI_COMMAND_HDR_SIZE)) + return btnxpuart_queue_skb(hdev, skb); + + switch (__le16_to_cpu(hdr->opcode)) { + case HCI_NXP_AUTO_SLEEP_MODE: + if (hdr->plen == sizeof(ps_parm)) { + memcpy(&ps_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + if (ps_parm.ps_cmd == BT_PS_ENABLE) + psdata->target_ps_mode = PS_MODE_ENABLE; + else if (ps_parm.ps_cmd == BT_PS_DISABLE) + psdata->target_ps_mode = PS_MODE_DISABLE; + psdata->c2h_ps_interval = __le16_to_cpu(ps_parm.c2h_ps_interval); + hci_cmd_sync_queue(hdev, send_ps_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_WAKEUP_METHOD: + if (hdr->plen == sizeof(wakeup_parm)) { + memcpy(&wakeup_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + psdata->c2h_wakeupmode = wakeup_parm.c2h_wakeupmode; + psdata->c2h_wakeup_gpio = wakeup_parm.c2h_wakeup_gpio; + psdata->h2c_wakeup_gpio = wakeup_parm.h2c_wakeup_gpio; + switch (wakeup_parm.h2c_wakeupmode) { + case BT_CTRL_WAKEUP_METHOD_DSR: + psdata->h2c_wakeupmode = WAKEUP_METHOD_DTR; + break; + case BT_CTRL_WAKEUP_METHOD_BREAK: + default: + psdata->h2c_wakeupmode = WAKEUP_METHOD_BREAK; + break; + } + hci_cmd_sync_queue(hdev, send_wakeup_method_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_SET_OPER_SPEED: + if (hdr->plen == sizeof(baudrate_parm)) { + memcpy(&baudrate_parm, skb->data + HCI_COMMAND_HDR_SIZE, hdr->plen); + nxpdev->new_baudrate = __le32_to_cpu(baudrate_parm); + hci_cmd_sync_queue(hdev, nxp_set_baudrate_cmd, NULL, NULL); + goto free_skb; + } + break; + case HCI_NXP_IND_RESET: + if (hdr->plen == 1) { + hci_cmd_sync_queue(hdev, nxp_set_ind_reset, NULL, NULL); + goto free_skb; + } + break; + default: + break; + } + } + + return btnxpuart_queue_skb(hdev, skb); + +free_skb: + kfree_skb(skb); + return 0; +} + +static struct sk_buff *nxp_dequeue(void *data) +{ + struct btnxpuart_dev *nxpdev = (struct btnxpuart_dev *)data; + + ps_wakeup(nxpdev); + ps_start_timer(nxpdev); + return skb_dequeue(&nxpdev->txq); +} + +/* btnxpuart based on serdev */ +static void btnxpuart_tx_work(struct work_struct *work) +{ + struct btnxpuart_dev *nxpdev = container_of(work, struct btnxpuart_dev, + tx_work); + struct serdev_device *serdev = nxpdev->serdev; + struct hci_dev *hdev = nxpdev->hdev; + struct sk_buff *skb; + int len; + + while ((skb = nxp_dequeue(nxpdev))) { + len = serdev_device_write_buf(serdev, skb->data, skb->len); + hdev->stat.byte_tx += len; + + skb_pull(skb, len); + if (skb->len > 0) { + skb_queue_head(&nxpdev->txq, skb); + break; + } + + switch (hci_skb_pkt_type(skb)) { + case HCI_COMMAND_PKT: + hdev->stat.cmd_tx++; + break; + case HCI_ACLDATA_PKT: + hdev->stat.acl_tx++; + break; + case HCI_SCODATA_PKT: + hdev->stat.sco_tx++; + break; + } + + kfree_skb(skb); + } + clear_bit(BTNXPUART_TX_STATE_ACTIVE, &nxpdev->tx_state); +} + +static int btnxpuart_open(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + int err = 0; + + err = serdev_device_open(nxpdev->serdev); + if (err) { + bt_dev_err(hdev, "Unable to open UART device %s", + dev_name(&nxpdev->serdev->dev)); + } else { + set_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + } + return err; +} + +static int btnxpuart_close(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + ps_wakeup(nxpdev); + serdev_device_close(nxpdev->serdev); + clear_bit(BTNXPUART_SERDEV_OPEN, &nxpdev->tx_state); + return 0; +} + +static int btnxpuart_flush(struct hci_dev *hdev) +{ + struct btnxpuart_dev *nxpdev = hci_get_drvdata(hdev); + + /* Flush any pending characters */ + serdev_device_write_flush(nxpdev->serdev); + skb_queue_purge(&nxpdev->txq); + + cancel_work_sync(&nxpdev->tx_work); + + kfree_skb(nxpdev->rx_skb); + nxpdev->rx_skb = NULL; + + return 0; +} + +static const struct h4_recv_pkt nxp_recv_pkts[] = { + { H4_RECV_ACL, .recv = hci_recv_frame }, + { H4_RECV_SCO, .recv = hci_recv_frame }, + { H4_RECV_EVENT, .recv = hci_recv_frame }, + { NXP_RECV_CHIP_VER_V1, .recv = nxp_recv_chip_ver_v1 }, + { NXP_RECV_FW_REQ_V1, .recv = nxp_recv_fw_req_v1 }, + { NXP_RECV_CHIP_VER_V3, .recv = nxp_recv_chip_ver_v3 }, + { NXP_RECV_FW_REQ_V3, .recv = nxp_recv_fw_req_v3 }, +}; + +static int btnxpuart_receive_buf(struct serdev_device *serdev, const u8 *data, + size_t count) +{ + struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); + + ps_start_timer(nxpdev); + + nxpdev->rx_skb = h4_recv_buf(nxpdev->hdev, nxpdev->rx_skb, data, count, + nxp_recv_pkts, ARRAY_SIZE(nxp_recv_pkts)); + if (IS_ERR(nxpdev->rx_skb)) { + int err = PTR_ERR(nxpdev->rx_skb); + /* Safe to ignore out-of-sync bootloader signatures */ + if (is_fw_downloading(nxpdev)) + return count; + bt_dev_err(nxpdev->hdev, "Frame reassembly failed (%d)", err); + nxpdev->rx_skb = NULL; + return err; + } + nxpdev->hdev->stat.byte_rx += count; + return count; +} + +static void btnxpuart_write_wakeup(struct serdev_device *serdev) +{ + serdev_device_write_wakeup(serdev); +} + +static const struct serdev_device_ops btnxpuart_client_ops = { + .receive_buf = btnxpuart_receive_buf, + .write_wakeup = btnxpuart_write_wakeup, +}; + +static int nxp_serdev_probe(struct serdev_device *serdev) +{ + struct hci_dev *hdev; + struct btnxpuart_dev *nxpdev; + + nxpdev = devm_kzalloc(&serdev->dev, sizeof(*nxpdev), GFP_KERNEL); + if (!nxpdev) + return -ENOMEM; + + nxpdev->nxp_data = (struct btnxpuart_data *)device_get_match_data(&serdev->dev); + + nxpdev->serdev = serdev; + serdev_device_set_drvdata(serdev, nxpdev); + + serdev_device_set_client_ops(serdev, &btnxpuart_client_ops); + + INIT_WORK(&nxpdev->tx_work, btnxpuart_tx_work); + skb_queue_head_init(&nxpdev->txq); + + crc8_populate_msb(crc8_table, POLYNOMIAL8); + + /* Initialize and register HCI device */ + hdev = hci_alloc_dev(); + if (!hdev) { + dev_err(&serdev->dev, "Can't allocate HCI device\n"); + return -ENOMEM; + } + + nxpdev->hdev = hdev; + + hdev->bus = HCI_UART; + hci_set_drvdata(hdev, nxpdev); + + hdev->manufacturer = MANUFACTURER_NXP; + hdev->open = btnxpuart_open; + hdev->close = btnxpuart_close; + hdev->flush = btnxpuart_flush; + hdev->setup = nxp_setup; + hdev->send = nxp_enqueue; + SET_HCIDEV_DEV(hdev, &serdev->dev); + + if (hci_register_dev(hdev) < 0) { + dev_err(&serdev->dev, "Can't register HCI device\n"); + hci_free_dev(hdev); + return -ENODEV; + } + + ps_init_work(hdev); + ps_init_timer(hdev); + + return 0; +} + +static void nxp_serdev_remove(struct serdev_device *serdev) +{ + struct btnxpuart_dev *nxpdev = serdev_device_get_drvdata(serdev); + struct hci_dev *hdev = nxpdev->hdev; + + /* Restore FW baudrate to fw_init_baudrate if changed. + * This will ensure FW baudrate is in sync with + * driver baudrate in case this driver is re-inserted. + */ + if (nxpdev->current_baudrate != nxpdev->fw_init_baudrate) { + nxpdev->new_baudrate = nxpdev->fw_init_baudrate; + nxp_set_baudrate_cmd(hdev, NULL); + } + + ps_cancel_timer(nxpdev); + hci_unregister_dev(hdev); + hci_free_dev(hdev); +} + +static struct btnxpuart_data w8987_data = { + .helper_fw_name = NULL, + .fw_name = FIRMWARE_W8987, +}; + +static struct btnxpuart_data w8997_data = { + .helper_fw_name = FIRMWARE_HELPER, + .fw_name = FIRMWARE_W8997, +}; + +static const struct of_device_id nxpuart_of_match_table[] = { + { .compatible = "nxp,88w8987-bt", .data = &w8987_data }, + { .compatible = "nxp,88w8997-bt", .data = &w8997_data }, + { } +}; +MODULE_DEVICE_TABLE(of, nxpuart_of_match_table); + +static struct serdev_device_driver nxp_serdev_driver = { + .probe = nxp_serdev_probe, + .remove = nxp_serdev_remove, + .driver = { + .name = "btnxpuart", + .of_match_table = of_match_ptr(nxpuart_of_match_table), + }, +}; + +module_serdev_device_driver(nxp_serdev_driver); + +MODULE_AUTHOR("Neeraj Sanjay Kale <neeraj.sanjaykale@nxp.com>"); +MODULE_DESCRIPTION("NXP Bluetooth Serial driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/bluetooth/btqca.c b/drivers/bluetooth/btqca.c index c9064d34d830..fd0941fe8608 100644 --- a/drivers/bluetooth/btqca.c +++ b/drivers/bluetooth/btqca.c @@ -614,6 +614,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, config.type = ELF_TYPE_PATCH; snprintf(config.fwname, sizeof(config.fwname), "qca/msbtfw%02x.mbn", rom_ver); + } else if (soc_type == QCA_WCN6855) { + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpbtfw%02x.tlv", rom_ver); } else { snprintf(config.fwname, sizeof(config.fwname), "qca/rampatch_%08x.bin", soc_ver); @@ -648,6 +651,9 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, else if (soc_type == QCA_WCN6750) snprintf(config.fwname, sizeof(config.fwname), "qca/msnv%02x.bin", rom_ver); + else if (soc_type == QCA_WCN6855) + snprintf(config.fwname, sizeof(config.fwname), + "qca/hpnv%02x.bin", rom_ver); else snprintf(config.fwname, sizeof(config.fwname), "qca/nvm_%08x.bin", soc_ver); @@ -685,11 +691,17 @@ int qca_uart_setup(struct hci_dev *hdev, uint8_t baudrate, return err; } - if (soc_type == QCA_WCN3991 || soc_type == QCA_WCN6750) { + switch (soc_type) { + case QCA_WCN3991: + case QCA_WCN6750: + case QCA_WCN6855: /* get fw build info */ err = qca_read_fw_build_info(hdev); if (err < 0) return err; + break; + default: + break; } bt_dev_info(hdev, "QCA setup on UART is completed"); diff --git a/drivers/bluetooth/btqca.h b/drivers/bluetooth/btqca.h index 61e9a50e66ae..b884095bcd9d 100644 --- a/drivers/bluetooth/btqca.h +++ b/drivers/bluetooth/btqca.h @@ -147,6 +147,7 @@ enum qca_btsoc_type { QCA_WCN3991, QCA_QCA6390, QCA_WCN6750, + QCA_WCN6855, }; #if IS_ENABLED(CONFIG_BT_QCA) @@ -168,6 +169,10 @@ static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type) { return soc_type == QCA_WCN6750; } +static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) +{ + return soc_type == QCA_WCN6855; +} #else @@ -206,6 +211,11 @@ static inline bool qca_is_wcn6750(enum qca_btsoc_type soc_type) return false; } +static inline bool qca_is_wcn6855(enum qca_btsoc_type soc_type) +{ + return false; +} + static inline int qca_send_pre_shutdown_cmd(struct hci_dev *hdev) { return -EOPNOTSUPP; diff --git a/drivers/bluetooth/btrtl.c b/drivers/bluetooth/btrtl.c index 69c3fe649ca7..2915c82d719d 100644 --- a/drivers/bluetooth/btrtl.c +++ b/drivers/bluetooth/btrtl.c @@ -17,19 +17,26 @@ #define VERSION "0.1" +#define RTL_CHIP_8723CS_CG 3 +#define RTL_CHIP_8723CS_VF 4 +#define RTL_CHIP_8723CS_XX 5 #define RTL_EPATCH_SIGNATURE "Realtech" +#define RTL_EPATCH_SIGNATURE_V2 "RTBTCore" +#define RTL_ROM_LMP_8703B 0x8703 #define RTL_ROM_LMP_8723A 0x1200 #define RTL_ROM_LMP_8723B 0x8723 #define RTL_ROM_LMP_8821A 0x8821 #define RTL_ROM_LMP_8761A 0x8761 #define RTL_ROM_LMP_8822B 0x8822 #define RTL_ROM_LMP_8852A 0x8852 +#define RTL_ROM_LMP_8851B 0x8851 #define RTL_CONFIG_MAGIC 0x8723ab55 #define IC_MATCH_FL_LMPSUBV (1 << 0) #define IC_MATCH_FL_HCIREV (1 << 1) #define IC_MATCH_FL_HCIVER (1 << 2) #define IC_MATCH_FL_HCIBUS (1 << 3) +#define IC_MATCH_FL_CHIP_TYPE (1 << 4) #define IC_INFO(lmps, hcir, hciv, bus) \ .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_HCIREV | \ IC_MATCH_FL_HCIVER | IC_MATCH_FL_HCIBUS, \ @@ -38,6 +45,14 @@ .hci_ver = (hciv), \ .hci_bus = (bus) +#define RTL_CHIP_SUBVER (&(struct rtl_vendor_cmd) {{0x10, 0x38, 0x04, 0x28, 0x80}}) +#define RTL_CHIP_REV (&(struct rtl_vendor_cmd) {{0x10, 0x3A, 0x04, 0x28, 0x80}}) +#define RTL_SEC_PROJ (&(struct rtl_vendor_cmd) {{0x10, 0xA4, 0x0D, 0x00, 0xb0}}) + +#define RTL_PATCH_SNIPPETS 0x01 +#define RTL_PATCH_DUMMY_HEADER 0x02 +#define RTL_PATCH_SECURITY_HEADER 0x03 + enum btrtl_chip_id { CHIP_ID_8723A, CHIP_ID_8723B, @@ -51,6 +66,7 @@ enum btrtl_chip_id { CHIP_ID_8852A = 18, CHIP_ID_8852B = 20, CHIP_ID_8852C = 25, + CHIP_ID_8851B = 36, }; struct id_table { @@ -59,6 +75,7 @@ struct id_table { __u16 hci_rev; __u8 hci_ver; __u8 hci_bus; + __u8 chip_type; bool config_needed; bool has_rom_version; bool has_msft_ext; @@ -75,6 +92,8 @@ struct btrtl_device_info { int cfg_len; bool drop_fw; int project_id; + u8 key_id; + struct list_head patch_subsecs; }; static const struct id_table ic_id_table[] = { @@ -99,6 +118,39 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8723b_fw.bin", .cfg_name = "rtl_bt/rtl8723b_config" }, + /* 8723CS-CG */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_CG, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_cg_fw.bin", + .cfg_name = "rtl_bt/rtl8723cs_cg_config" }, + + /* 8723CS-VF */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_VF, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_vf_fw.bin", + .cfg_name = "rtl_bt/rtl8723cs_vf_config" }, + + /* 8723CS-XX */ + { .match_flags = IC_MATCH_FL_LMPSUBV | IC_MATCH_FL_CHIP_TYPE | + IC_MATCH_FL_HCIBUS, + .lmp_subver = RTL_ROM_LMP_8703B, + .chip_type = RTL_CHIP_8723CS_XX, + .hci_bus = HCI_UART, + .config_needed = true, + .has_rom_version = true, + .fw_name = "rtl_bt/rtl8723cs_xx_fw.bin", + .cfg_name = "rtl_bt/rtl8723cs_xx_config" }, + /* 8723D */ { IC_INFO(RTL_ROM_LMP_8723B, 0xd, 0x8, HCI_USB), .config_needed = true, @@ -128,6 +180,14 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8821c_fw.bin", .cfg_name = "rtl_bt/rtl8821c_config" }, + /* 8821CS */ + { IC_INFO(RTL_ROM_LMP_8821A, 0xc, 0x8, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8821cs_fw.bin", + .cfg_name = "rtl_bt/rtl8821cs_config" }, + /* 8761A */ { IC_INFO(RTL_ROM_LMP_8761A, 0xa, 0x6, HCI_USB), .config_needed = false, @@ -190,6 +250,14 @@ static const struct id_table ic_id_table[] = { .fw_name = "rtl_bt/rtl8852au_fw.bin", .cfg_name = "rtl_bt/rtl8852au_config" }, + /* 8852B with UART interface */ + { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_UART), + .config_needed = true, + .has_rom_version = true, + .has_msft_ext = true, + .fw_name = "rtl_bt/rtl8852bs_fw.bin", + .cfg_name = "rtl_bt/rtl8852bs_config" }, + /* 8852B */ { IC_INFO(RTL_ROM_LMP_8852A, 0xb, 0xb, HCI_USB), .config_needed = false, @@ -205,10 +273,19 @@ static const struct id_table ic_id_table[] = { .has_msft_ext = true, .fw_name = "rtl_bt/rtl8852cu_fw.bin", .cfg_name = "rtl_bt/rtl8852cu_config" }, + + /* 8851B */ + { IC_INFO(RTL_ROM_LMP_8851B, 0xb, 0xc, HCI_USB), + .config_needed = false, + .has_rom_version = true, + .has_msft_ext = false, + .fw_name = "rtl_bt/rtl8851bu_fw.bin", + .cfg_name = "rtl_bt/rtl8851bu_config" }, }; static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, - u8 hci_ver, u8 hci_bus) + u8 hci_ver, u8 hci_bus, + u8 chip_type) { int i; @@ -225,6 +302,9 @@ static const struct id_table *btrtl_match_ic(u16 lmp_subver, u16 hci_rev, if ((ic_id_table[i].match_flags & IC_MATCH_FL_HCIBUS) && (ic_id_table[i].hci_bus != hci_bus)) continue; + if ((ic_id_table[i].match_flags & IC_MATCH_FL_CHIP_TYPE) && + (ic_id_table[i].chip_type != chip_type)) + continue; break; } @@ -284,6 +364,227 @@ static int rtl_read_rom_version(struct hci_dev *hdev, u8 *version) return 0; } +static int btrtl_vendor_read_reg16(struct hci_dev *hdev, + struct rtl_vendor_cmd *cmd, u8 *rp) +{ + struct sk_buff *skb; + int err = 0; + + skb = __hci_cmd_sync(hdev, 0xfc61, sizeof(*cmd), cmd, + HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + err = PTR_ERR(skb); + rtl_dev_err(hdev, "RTL: Read reg16 failed (%d)", err); + return err; + } + + if (skb->len != 3 || skb->data[0]) { + bt_dev_err(hdev, "RTL: Read reg16 length mismatch"); + kfree_skb(skb); + return -EIO; + } + + if (rp) + memcpy(rp, skb->data + 1, 2); + + kfree_skb(skb); + + return 0; +} + +static void *rtl_iov_pull_data(struct rtl_iovec *iov, u32 len) +{ + void *data = iov->data; + + if (iov->len < len) + return NULL; + + iov->data += len; + iov->len -= len; + + return data; +} + +static void btrtl_insert_ordered_subsec(struct rtl_subsection *node, + struct btrtl_device_info *btrtl_dev) +{ + struct list_head *pos; + struct list_head *next; + struct rtl_subsection *subsec; + + list_for_each_safe(pos, next, &btrtl_dev->patch_subsecs) { + subsec = list_entry(pos, struct rtl_subsection, list); + if (subsec->prio >= node->prio) + break; + } + __list_add(&node->list, pos->prev, pos); +} + +static int btrtl_parse_section(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, u32 opcode, + u8 *data, u32 len) +{ + struct rtl_section_hdr *hdr; + struct rtl_subsection *subsec; + struct rtl_common_subsec *common_subsec; + struct rtl_sec_hdr *sec_hdr; + int i; + u8 *ptr; + u16 num_subsecs; + u32 subsec_len; + int rc = 0; + struct rtl_iovec iov = { + .data = data, + .len = len, + }; + + hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); + if (!hdr) + return -EINVAL; + num_subsecs = le16_to_cpu(hdr->num); + + for (i = 0; i < num_subsecs; i++) { + common_subsec = rtl_iov_pull_data(&iov, sizeof(*common_subsec)); + if (!common_subsec) + break; + subsec_len = le32_to_cpu(common_subsec->len); + + rtl_dev_dbg(hdev, "subsec, eco 0x%02x, len %08x", + common_subsec->eco, subsec_len); + + ptr = rtl_iov_pull_data(&iov, subsec_len); + if (!ptr) + break; + + if (common_subsec->eco != btrtl_dev->rom_version + 1) + continue; + + switch (opcode) { + case RTL_PATCH_SECURITY_HEADER: + sec_hdr = (void *)common_subsec; + if (sec_hdr->key_id != btrtl_dev->key_id) + continue; + break; + } + + subsec = kzalloc(sizeof(*subsec), GFP_KERNEL); + if (!subsec) + return -ENOMEM; + subsec->opcode = opcode; + subsec->prio = common_subsec->prio; + subsec->len = subsec_len; + subsec->data = ptr; + btrtl_insert_ordered_subsec(subsec, btrtl_dev); + rc += subsec_len; + } + + return rc; +} + +static int rtlbt_parse_firmware_v2(struct hci_dev *hdev, + struct btrtl_device_info *btrtl_dev, + unsigned char **_buf) +{ + struct rtl_epatch_header_v2 *hdr; + int rc; + u8 reg_val[2]; + u8 key_id; + u32 num_sections; + struct rtl_section *section; + struct rtl_subsection *entry, *tmp; + u32 section_len; + u32 opcode; + int len = 0; + int i; + u8 *ptr; + struct rtl_iovec iov = { + .data = btrtl_dev->fw_data, + .len = btrtl_dev->fw_len - 7, /* Cut the tail */ + }; + + rc = btrtl_vendor_read_reg16(hdev, RTL_SEC_PROJ, reg_val); + if (rc < 0) + return -EIO; + key_id = reg_val[0]; + + rtl_dev_dbg(hdev, "%s: key id %u", __func__, key_id); + + btrtl_dev->key_id = key_id; + + hdr = rtl_iov_pull_data(&iov, sizeof(*hdr)); + if (!hdr) + return -EINVAL; + num_sections = le32_to_cpu(hdr->num_sections); + + rtl_dev_dbg(hdev, "FW version %08x-%08x", *((u32 *)hdr->fw_version), + *((u32 *)(hdr->fw_version + 4))); + + for (i = 0; i < num_sections; i++) { + section = rtl_iov_pull_data(&iov, sizeof(*section)); + if (!section) + break; + section_len = le32_to_cpu(section->len); + opcode = le32_to_cpu(section->opcode); + + rtl_dev_dbg(hdev, "opcode 0x%04x", section->opcode); + + ptr = rtl_iov_pull_data(&iov, section_len); + if (!ptr) + break; + + switch (opcode) { + case RTL_PATCH_SNIPPETS: + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + case RTL_PATCH_SECURITY_HEADER: + /* If key_id from chip is zero, ignore all security + * headers. + */ + if (!key_id) + break; + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + case RTL_PATCH_DUMMY_HEADER: + rc = btrtl_parse_section(hdev, btrtl_dev, opcode, + ptr, section_len); + break; + default: + rc = 0; + break; + } + if (rc < 0) { + rtl_dev_err(hdev, "RTL: Parse section (%u) err %d", + opcode, rc); + return rc; + } + len += rc; + } + + if (!len) + return -ENODATA; + + /* Allocate mem and copy all found subsecs. */ + ptr = kvmalloc(len, GFP_KERNEL); + if (!ptr) + return -ENOMEM; + + len = 0; + list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { + rtl_dev_dbg(hdev, "RTL: opcode %08x, addr %p, len 0x%x", + entry->opcode, entry->data, entry->len); + memcpy(ptr + len, entry->data, entry->len); + len += entry->len; + } + + if (!len) + return -EPERM; + + *_buf = ptr; + return len; +} + static int rtlbt_parse_firmware(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev, unsigned char **_buf) @@ -307,6 +608,7 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8723B, 1 }, { RTL_ROM_LMP_8821A, 2 }, { RTL_ROM_LMP_8761A, 3 }, + { RTL_ROM_LMP_8703B, 7 }, { RTL_ROM_LMP_8822B, 8 }, { RTL_ROM_LMP_8723B, 9 }, /* 8723D */ { RTL_ROM_LMP_8821A, 10 }, /* 8821C */ @@ -315,9 +617,21 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, { RTL_ROM_LMP_8852A, 18 }, /* 8852A */ { RTL_ROM_LMP_8852A, 20 }, /* 8852B */ { RTL_ROM_LMP_8852A, 25 }, /* 8852C */ + { RTL_ROM_LMP_8851B, 36 }, /* 8851B */ }; - min_size = sizeof(struct rtl_epatch_header) + sizeof(extension_sig) + 3; + if (btrtl_dev->fw_len <= 8) + return -EINVAL; + + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8)) + min_size = sizeof(struct rtl_epatch_header) + + sizeof(extension_sig) + 3; + else if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) + min_size = sizeof(struct rtl_epatch_header_v2) + + sizeof(extension_sig) + 3; + else + return -EINVAL; + if (btrtl_dev->fw_len < min_size) return -EINVAL; @@ -382,12 +696,14 @@ static int rtlbt_parse_firmware(struct hci_dev *hdev, return -EINVAL; } - epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; - if (memcmp(epatch_info->signature, RTL_EPATCH_SIGNATURE, 8) != 0) { + if (memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE, 8) != 0) { + if (!memcmp(btrtl_dev->fw_data, RTL_EPATCH_SIGNATURE_V2, 8)) + return rtlbt_parse_firmware_v2(hdev, btrtl_dev, _buf); rtl_dev_err(hdev, "bad EPATCH signature"); return -EINVAL; } + epatch_info = (struct rtl_epatch_header *)btrtl_dev->fw_data; num_patches = le16_to_cpu(epatch_info->num_patches); BT_DBG("fw_version=%x, num_patches=%d", le32_to_cpu(epatch_info->fw_version), num_patches); @@ -451,6 +767,7 @@ static int rtl_download_firmware(struct hci_dev *hdev, int frag_len = RTL_FRAG_LEN; int ret = 0; int i; + int j = 0; struct sk_buff *skb; struct hci_rp_read_local_version *rp; @@ -461,17 +778,16 @@ static int rtl_download_firmware(struct hci_dev *hdev, for (i = 0; i < frag_num; i++) { struct sk_buff *skb; - BT_DBG("download fw (%d/%d)", i, frag_num); - - if (i > 0x7f) - dl_cmd->index = (i & 0x7f) + 1; - else - dl_cmd->index = i; + dl_cmd->index = j++; + if (dl_cmd->index == 0x7f) + j = 1; if (i == (frag_num - 1)) { dl_cmd->index |= 0x80; /* data end */ frag_len = fw_len % RTL_FRAG_LEN; } + rtl_dev_dbg(hdev, "download fw (%d/%d). index = %d", i, + frag_num, dl_cmd->index); memcpy(dl_cmd->data, data, frag_len); /* Send download command */ @@ -587,10 +903,60 @@ out: return ret; } +static bool rtl_has_chip_type(u16 lmp_subver) +{ + switch (lmp_subver) { + case RTL_ROM_LMP_8703B: + return true; + default: + break; + } + + return false; +} + +static int rtl_read_chip_type(struct hci_dev *hdev, u8 *type) +{ + struct rtl_chip_type_evt *chip_type; + struct sk_buff *skb; + const unsigned char cmd_buf[] = {0x00, 0x94, 0xa0, 0x00, 0xb0}; + + /* Read RTL chip type command */ + skb = __hci_cmd_sync(hdev, 0xfc61, 5, cmd_buf, HCI_INIT_TIMEOUT); + if (IS_ERR(skb)) { + rtl_dev_err(hdev, "Read chip type failed (%ld)", + PTR_ERR(skb)); + return PTR_ERR(skb); + } + + chip_type = skb_pull_data(skb, sizeof(*chip_type)); + if (!chip_type) { + rtl_dev_err(hdev, "RTL chip type event length mismatch"); + kfree_skb(skb); + return -EIO; + } + + rtl_dev_info(hdev, "chip_type status=%x type=%x", + chip_type->status, chip_type->type); + + *type = chip_type->type & 0x0f; + + kfree_skb(skb); + return 0; +} + void btrtl_free(struct btrtl_device_info *btrtl_dev) { + struct rtl_subsection *entry, *tmp; + kvfree(btrtl_dev->fw_data); kvfree(btrtl_dev->cfg_data); + + list_for_each_entry_safe(entry, tmp, &btrtl_dev->patch_subsecs, list) { + list_del(&entry->list); + kfree(entry); + } + kfree(btrtl_dev); } EXPORT_SYMBOL_GPL(btrtl_free); @@ -603,10 +969,11 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, struct hci_rp_read_local_version *resp; char cfg_name[40]; u16 hci_rev, lmp_subver; - u8 hci_ver; + u8 hci_ver, lmp_ver, chip_type = 0; int ret; u16 opcode; u8 cmd[2]; + u8 reg_val[2]; btrtl_dev = kzalloc(sizeof(*btrtl_dev), GFP_KERNEL); if (!btrtl_dev) { @@ -614,6 +981,31 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, goto err_alloc; } + INIT_LIST_HEAD(&btrtl_dev->patch_subsecs); + +check_version: + ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_SUBVER, reg_val); + if (ret < 0) + goto err_free; + lmp_subver = get_unaligned_le16(reg_val); + + if (lmp_subver == RTL_ROM_LMP_8822B) { + ret = btrtl_vendor_read_reg16(hdev, RTL_CHIP_REV, reg_val); + if (ret < 0) + goto err_free; + hci_rev = get_unaligned_le16(reg_val); + + /* 8822E */ + if (hci_rev == 0x000e) { + hci_ver = 0x0c; + lmp_ver = 0x0c; + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, + hci_ver, hdev->bus, + chip_type); + goto next; + } + } + skb = btrtl_read_local_version(hdev); if (IS_ERR(skb)) { ret = PTR_ERR(skb); @@ -621,19 +1013,32 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, } resp = (struct hci_rp_read_local_version *)skb->data; - rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", - resp->hci_ver, resp->hci_rev, - resp->lmp_ver, resp->lmp_subver); - hci_ver = resp->hci_ver; - hci_rev = le16_to_cpu(resp->hci_rev); + hci_ver = resp->hci_ver; + hci_rev = le16_to_cpu(resp->hci_rev); + lmp_ver = resp->lmp_ver; lmp_subver = le16_to_cpu(resp->lmp_subver); + kfree_skb(skb); + + if (rtl_has_chip_type(lmp_subver)) { + ret = rtl_read_chip_type(hdev, &chip_type); + if (ret) + goto err_free; + } + btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, - hdev->bus); + hdev->bus, chip_type); - if (!btrtl_dev->ic_info) +next: + rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", + hci_ver, hci_rev, + lmp_ver, lmp_subver); + + if (!btrtl_dev->ic_info && !btrtl_dev->drop_fw) btrtl_dev->drop_fw = true; + else + btrtl_dev->drop_fw = false; if (btrtl_dev->drop_fw) { opcode = hci_opcode_pack(0x3f, 0x66); @@ -642,41 +1047,25 @@ struct btrtl_device_info *btrtl_initialize(struct hci_dev *hdev, skb = bt_skb_alloc(sizeof(cmd), GFP_KERNEL); if (!skb) - goto out_free; + goto err_free; skb_put_data(skb, cmd, sizeof(cmd)); hci_skb_pkt_type(skb) = HCI_COMMAND_PKT; - hdev->send(hdev, skb); + ret = hdev->send(hdev, skb); + if (ret < 0) { + bt_dev_err(hdev, "sending frame failed (%d)", ret); + kfree_skb(skb); + goto err_free; + } /* Ensure the above vendor command is sent to controller and * process has done. */ msleep(200); - /* Read the local version again. Expect to have the vanilla - * version as cold boot. - */ - skb = btrtl_read_local_version(hdev); - if (IS_ERR(skb)) { - ret = PTR_ERR(skb); - goto err_free; - } - - resp = (struct hci_rp_read_local_version *)skb->data; - rtl_dev_info(hdev, "examining hci_ver=%02x hci_rev=%04x lmp_ver=%02x lmp_subver=%04x", - resp->hci_ver, resp->hci_rev, - resp->lmp_ver, resp->lmp_subver); - - hci_ver = resp->hci_ver; - hci_rev = le16_to_cpu(resp->hci_rev); - lmp_subver = le16_to_cpu(resp->lmp_subver); - - btrtl_dev->ic_info = btrtl_match_ic(lmp_subver, hci_rev, hci_ver, - hdev->bus); + goto check_version; } -out_free: - kfree_skb(skb); if (!btrtl_dev->ic_info) { rtl_dev_info(hdev, "unknown IC info, lmp subver %04x, hci rev %04x, hci ver %04x", @@ -755,6 +1144,8 @@ int btrtl_download_firmware(struct hci_dev *hdev, case RTL_ROM_LMP_8761A: case RTL_ROM_LMP_8822B: case RTL_ROM_LMP_8852A: + case RTL_ROM_LMP_8703B: + case RTL_ROM_LMP_8851B: return btrtl_setup_rtl8723b(hdev, btrtl_dev); default: rtl_dev_info(hdev, "assuming no firmware upload needed"); @@ -779,6 +1170,7 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) case CHIP_ID_8852A: case CHIP_ID_8852B: case CHIP_ID_8852C: + case CHIP_ID_8851B: set_bit(HCI_QUIRK_VALID_LE_STATES, &hdev->quirks); set_bit(HCI_QUIRK_WIDEBAND_SPEECH_SUPPORTED, &hdev->quirks); @@ -795,6 +1187,22 @@ void btrtl_set_quirks(struct hci_dev *hdev, struct btrtl_device_info *btrtl_dev) rtl_dev_dbg(hdev, "WBS supported not enabled."); break; } + + if (!btrtl_dev->ic_info) + return; + + switch (btrtl_dev->ic_info->lmp_subver) { + case RTL_ROM_LMP_8703B: + /* 8723CS reports two pages for local ext features, + * but it doesn't support any features from page 2 - + * it either responds with garbage or with error status + */ + set_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, + &hdev->quirks); + break; + default: + break; + } } EXPORT_SYMBOL_GPL(btrtl_set_quirks); @@ -953,6 +1361,12 @@ MODULE_FIRMWARE("rtl_bt/rtl8723b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723b_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723bs_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723bs_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_cg_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_vf_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8723cs_xx_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723ds_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8723ds_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8761a_fw.bin"); @@ -963,7 +1377,11 @@ MODULE_FIRMWARE("rtl_bt/rtl8822b_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8822b_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852au_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852au_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bs_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8852bs_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852bu_config.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_fw.bin"); MODULE_FIRMWARE("rtl_bt/rtl8852cu_config.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8851bu_fw.bin"); +MODULE_FIRMWARE("rtl_bt/rtl8851bu_config.bin"); diff --git a/drivers/bluetooth/btrtl.h b/drivers/bluetooth/btrtl.h index ebf0101c959b..adb4c2c9abc5 100644 --- a/drivers/bluetooth/btrtl.h +++ b/drivers/bluetooth/btrtl.h @@ -14,6 +14,11 @@ struct btrtl_device_info; +struct rtl_chip_type_evt { + __u8 status; + __u8 type; +} __packed; + struct rtl_download_cmd { __u8 index; __u8 data[RTL_FRAG_LEN]; @@ -44,7 +49,58 @@ struct rtl_vendor_config_entry { struct rtl_vendor_config { __le32 signature; __le16 total_len; - struct rtl_vendor_config_entry entry[]; + __u8 entry[]; +} __packed; + +struct rtl_epatch_header_v2 { + __u8 signature[8]; + __u8 fw_version[8]; + __le32 num_sections; +} __packed; + +struct rtl_section { + __le32 opcode; + __le32 len; + u8 data[]; +} __packed; + +struct rtl_section_hdr { + __le16 num; + __le16 reserved; +} __packed; + +struct rtl_common_subsec { + __u8 eco; + __u8 prio; + __u8 cb[2]; + __le32 len; + __u8 data[]; +}; + +struct rtl_sec_hdr { + __u8 eco; + __u8 prio; + __u8 key_id; + __u8 reserved; + __le32 len; + __u8 data[]; +} __packed; + +struct rtl_subsection { + struct list_head list; + u32 opcode; + u32 len; + u8 prio; + u8 *data; +}; + +struct rtl_iovec { + u8 *data; + u32 len; +}; + +struct rtl_vendor_cmd { + __u8 param[5]; } __packed; enum { diff --git a/drivers/bluetooth/btsdio.c b/drivers/bluetooth/btsdio.c index 51000320e1ea..f19d31ee37ea 100644 --- a/drivers/bluetooth/btsdio.c +++ b/drivers/bluetooth/btsdio.c @@ -354,7 +354,6 @@ static void btsdio_remove(struct sdio_func *func) BT_DBG("func %p", func); - cancel_work_sync(&data->work); if (!data) return; diff --git a/drivers/bluetooth/btusb.c b/drivers/bluetooth/btusb.c index 5c536151ef83..2a8e2bb038f5 100644 --- a/drivers/bluetooth/btusb.c +++ b/drivers/bluetooth/btusb.c @@ -540,6 +540,10 @@ static const struct usb_device_id blacklist_table[] = { /* Realtek 8852BE Bluetooth devices */ { USB_DEVICE(0x0cb8, 0xc559), .driver_info = BTUSB_REALTEK | BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x0bda, 0x887b), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, + { USB_DEVICE(0x13d3, 0x3571), .driver_info = BTUSB_REALTEK | + BTUSB_WIDEBAND_SPEECH }, /* Realtek Bluetooth devices */ { USB_VENDOR_AND_INTERFACE_INFO(0x0bda, 0xe0, 0x01, 0x01), @@ -558,6 +562,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x043e, 0x310c), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x04ca, 0x3801), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, /* Additional MediaTek MT7668 Bluetooth devices */ { USB_DEVICE(0x043e, 0x3109), .driver_info = BTUSB_MEDIATEK | @@ -612,6 +619,9 @@ static const struct usb_device_id blacklist_table[] = { { USB_DEVICE(0x0489, 0xe0e2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, + { USB_DEVICE(0x0489, 0xe0e4), .driver_info = BTUSB_MEDIATEK | + BTUSB_WIDEBAND_SPEECH | + BTUSB_VALID_LE_STATES }, { USB_DEVICE(0x0489, 0xe0f2), .driver_info = BTUSB_MEDIATEK | BTUSB_WIDEBAND_SPEECH | BTUSB_VALID_LE_STATES }, @@ -723,6 +733,16 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { {} }; +struct qca_dump_info { + /* fields for dump collection */ + u16 id_vendor; + u16 id_product; + u32 fw_version; + u32 controller_id; + u32 ram_dump_size; + u16 ram_dump_seqno; +}; + #define BTUSB_MAX_ISOC_FRAMES 10 #define BTUSB_INTR_RUNNING 0 @@ -742,6 +762,7 @@ static const struct dmi_system_id btusb_needs_reset_resume_table[] = { #define BTUSB_WAKEUP_AUTOSUSPEND 14 #define BTUSB_USE_ALT3_FOR_WBS 15 #define BTUSB_ALT6_CONTINUOUS_TX 16 +#define BTUSB_HW_SSR_ACTIVE 17 struct btusb_data { struct hci_dev *hdev; @@ -804,6 +825,8 @@ struct btusb_data { int oob_wake_irq; /* irq for out-of-band wake-on-bt */ unsigned cmd_timeout_cnt; + + struct qca_dump_info qca_dump; }; static void btusb_reset(struct hci_dev *hdev) @@ -894,6 +917,11 @@ static void btusb_qca_cmd_timeout(struct hci_dev *hdev) struct btusb_data *data = hci_get_drvdata(hdev); struct gpio_desc *reset_gpio = data->reset_gpio; + if (test_bit(BTUSB_HW_SSR_ACTIVE, &data->flags)) { + bt_dev_info(hdev, "Ramdump in progress, defer cmd_timeout"); + return; + } + if (++data->cmd_timeout_cnt < 5) return; @@ -2376,16 +2404,47 @@ static int btusb_recv_bulk_intel(struct btusb_data *data, void *buffer, return btusb_recv_bulk(data, buffer, count); } +static int btusb_intel_diagnostics(struct hci_dev *hdev, struct sk_buff *skb) +{ + struct intel_tlv *tlv = (void *)&skb->data[5]; + + /* The first event is always an event type TLV */ + if (tlv->type != INTEL_TLV_TYPE_ID) + goto recv_frame; + + switch (tlv->val[0]) { + case INTEL_TLV_SYSTEM_EXCEPTION: + case INTEL_TLV_FATAL_EXCEPTION: + case INTEL_TLV_DEBUG_EXCEPTION: + case INTEL_TLV_TEST_EXCEPTION: + /* Generate devcoredump from exception */ + if (!hci_devcd_init(hdev, skb->len)) { + hci_devcd_append(hdev, skb); + hci_devcd_complete(hdev); + } else { + bt_dev_err(hdev, "Failed to generate devcoredump"); + kfree_skb(skb); + } + return 0; + default: + bt_dev_err(hdev, "Invalid exception type %02X", tlv->val[0]); + } + +recv_frame: + return hci_recv_frame(hdev, skb); +} + static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb) { - if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { - struct hci_event_hdr *hdr = (void *)skb->data; + struct hci_event_hdr *hdr = (void *)skb->data; + const char diagnostics_hdr[] = { 0x87, 0x80, 0x03 }; - if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && - hdr->plen > 0) { - const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; - unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; + if (skb->len > HCI_EVENT_HDR_SIZE && hdr->evt == 0xff && + hdr->plen > 0) { + const void *ptr = skb->data + HCI_EVENT_HDR_SIZE + 1; + unsigned int len = skb->len - HCI_EVENT_HDR_SIZE - 1; + if (btintel_test_flag(hdev, INTEL_BOOTLOADER)) { switch (skb->data[2]) { case 0x02: /* When switching to the operational firmware @@ -2404,6 +2463,15 @@ static int btusb_recv_event_intel(struct hci_dev *hdev, struct sk_buff *skb) break; } } + + /* Handle all diagnostics events separately. May still call + * hci_recv_frame. + */ + if (len >= sizeof(diagnostics_hdr) && + memcmp(&skb->data[2], diagnostics_hdr, + sizeof(diagnostics_hdr)) == 0) { + return btusb_intel_diagnostics(hdev, skb); + } } return hci_recv_frame(hdev, skb); @@ -3244,6 +3312,202 @@ static int btusb_set_bdaddr_wcn6855(struct hci_dev *hdev, return 0; } +#define QCA_MEMDUMP_ACL_HANDLE 0x2EDD +#define QCA_MEMDUMP_SIZE_MAX 0x100000 +#define QCA_MEMDUMP_VSE_CLASS 0x01 +#define QCA_MEMDUMP_MSG_TYPE 0x08 +#define QCA_MEMDUMP_PKT_SIZE 248 +#define QCA_LAST_SEQUENCE_NUM 0xffff + +struct qca_dump_hdr { + u8 vse_class; + u8 msg_type; + __le16 seqno; + u8 reserved; + union { + u8 data[0]; + struct { + __le32 ram_dump_size; + u8 data0[0]; + } __packed; + }; +} __packed; + + +static void btusb_dump_hdr_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[128]; + struct btusb_data *btdata = hci_get_drvdata(hdev); + + snprintf(buf, sizeof(buf), "Controller Name: 0x%x\n", + btdata->qca_dump.controller_id); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: 0x%x\n", + btdata->qca_dump.fw_version); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: %s\nVendor: qca\n", + btusb_driver.name); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "VID: 0x%x\nPID:0x%x\n", + btdata->qca_dump.id_vendor, btdata->qca_dump.id_product); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Lmp Subversion: 0x%x\n", + hdev->lmp_subver); + skb_put_data(skb, buf, strlen(buf)); +} + +static void btusb_coredump_qca(struct hci_dev *hdev) +{ + static const u8 param[] = { 0x26 }; + struct sk_buff *skb; + + skb = __hci_cmd_sync(hdev, 0xfc0c, 1, param, HCI_CMD_TIMEOUT); + if (IS_ERR(skb)) + bt_dev_err(hdev, "%s: triggle crash failed (%ld)", __func__, PTR_ERR(skb)); + kfree_skb(skb); +} + +/* + * ==0: not a dump pkt. + * < 0: fails to handle a dump pkt + * > 0: otherwise. + */ +static int handle_dump_pkt_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + int ret = 1; + u8 pkt_type; + u8 *sk_ptr; + unsigned int sk_len; + u16 seqno; + u32 dump_size; + + struct hci_event_hdr *event_hdr; + struct hci_acl_hdr *acl_hdr; + struct qca_dump_hdr *dump_hdr; + struct btusb_data *btdata = hci_get_drvdata(hdev); + struct usb_device *udev = btdata->udev; + + pkt_type = hci_skb_pkt_type(skb); + sk_ptr = skb->data; + sk_len = skb->len; + + if (pkt_type == HCI_ACLDATA_PKT) { + acl_hdr = hci_acl_hdr(skb); + if (le16_to_cpu(acl_hdr->handle) != QCA_MEMDUMP_ACL_HANDLE) + return 0; + sk_ptr += HCI_ACL_HDR_SIZE; + sk_len -= HCI_ACL_HDR_SIZE; + event_hdr = (struct hci_event_hdr *)sk_ptr; + } else { + event_hdr = hci_event_hdr(skb); + } + + if ((event_hdr->evt != HCI_VENDOR_PKT) + || (event_hdr->plen != (sk_len - HCI_EVENT_HDR_SIZE))) + return 0; + + sk_ptr += HCI_EVENT_HDR_SIZE; + sk_len -= HCI_EVENT_HDR_SIZE; + + dump_hdr = (struct qca_dump_hdr *)sk_ptr; + if ((sk_len < offsetof(struct qca_dump_hdr, data)) + || (dump_hdr->vse_class != QCA_MEMDUMP_VSE_CLASS) + || (dump_hdr->msg_type != QCA_MEMDUMP_MSG_TYPE)) + return 0; + + /*it is dump pkt now*/ + seqno = le16_to_cpu(dump_hdr->seqno); + if (seqno == 0) { + set_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); + dump_size = le32_to_cpu(dump_hdr->ram_dump_size); + if (!dump_size || (dump_size > QCA_MEMDUMP_SIZE_MAX)) { + ret = -EILSEQ; + bt_dev_err(hdev, "Invalid memdump size(%u)", + dump_size); + goto out; + } + + ret = hci_devcd_init(hdev, dump_size); + if (ret < 0) { + bt_dev_err(hdev, "memdump init error(%d)", ret); + goto out; + } + + btdata->qca_dump.ram_dump_size = dump_size; + btdata->qca_dump.ram_dump_seqno = 0; + sk_ptr += offsetof(struct qca_dump_hdr, data0); + sk_len -= offsetof(struct qca_dump_hdr, data0); + + usb_disable_autosuspend(udev); + bt_dev_info(hdev, "%s memdump size(%u)\n", + (pkt_type == HCI_ACLDATA_PKT) ? "ACL" : "event", + dump_size); + } else { + sk_ptr += offsetof(struct qca_dump_hdr, data); + sk_len -= offsetof(struct qca_dump_hdr, data); + } + + if (!btdata->qca_dump.ram_dump_size) { + ret = -EINVAL; + bt_dev_err(hdev, "memdump is not active"); + goto out; + } + + if ((seqno > btdata->qca_dump.ram_dump_seqno + 1) && (seqno != QCA_LAST_SEQUENCE_NUM)) { + dump_size = QCA_MEMDUMP_PKT_SIZE * (seqno - btdata->qca_dump.ram_dump_seqno - 1); + hci_devcd_append_pattern(hdev, 0x0, dump_size); + bt_dev_err(hdev, + "expected memdump seqno(%u) is not received(%u)\n", + btdata->qca_dump.ram_dump_seqno, seqno); + btdata->qca_dump.ram_dump_seqno = seqno; + kfree_skb(skb); + return ret; + } + + skb_pull(skb, skb->len - sk_len); + hci_devcd_append(hdev, skb); + btdata->qca_dump.ram_dump_seqno++; + if (seqno == QCA_LAST_SEQUENCE_NUM) { + bt_dev_info(hdev, + "memdump done: pkts(%u), total(%u)\n", + btdata->qca_dump.ram_dump_seqno, btdata->qca_dump.ram_dump_size); + + hci_devcd_complete(hdev); + goto out; + } + return ret; + +out: + if (btdata->qca_dump.ram_dump_size) + usb_enable_autosuspend(udev); + btdata->qca_dump.ram_dump_size = 0; + btdata->qca_dump.ram_dump_seqno = 0; + clear_bit(BTUSB_HW_SSR_ACTIVE, &btdata->flags); + + if (ret < 0) + kfree_skb(skb); + return ret; +} + +static int btusb_recv_acl_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (handle_dump_pkt_qca(hdev, skb)) + return 0; + return hci_recv_frame(hdev, skb); +} + +static int btusb_recv_evt_qca(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (handle_dump_pkt_qca(hdev, skb)) + return 0; + return hci_recv_frame(hdev, skb); +} + + #define QCA_DFU_PACKET_LEN 4096 #define QCA_GET_TARGET_VERSION 0x09 @@ -3578,6 +3842,9 @@ static int btusb_setup_qca(struct hci_dev *hdev) if (err < 0) return err; + btdata->qca_dump.fw_version = le32_to_cpu(ver.patch_version); + btdata->qca_dump.controller_id = le32_to_cpu(ver.rom_version); + if (!(status & QCA_SYSCFG_UPDATED)) { err = btusb_setup_qca_load_nvm(hdev, &ver, info); if (err < 0) @@ -3831,13 +4098,9 @@ static int btusb_probe(struct usb_interface *intf, BT_DBG("intf %p id %p", intf, id); - /* interface numbers are hardcoded in the spec */ - if (intf->cur_altsetting->desc.bInterfaceNumber != 0) { - if (!(id->driver_info & BTUSB_IFNUM_2)) - return -ENODEV; - if (intf->cur_altsetting->desc.bInterfaceNumber != 2) - return -ENODEV; - } + if ((id->driver_info & BTUSB_IFNUM_2) && + (intf->cur_altsetting->desc.bInterfaceNumber != 2)) + return -ENODEV; ifnum_base = intf->cur_altsetting->desc.bInterfaceNumber; @@ -4012,7 +4275,7 @@ static int btusb_probe(struct usb_interface *intf, /* Combined Intel Device setup to support multiple setup routine */ if (id->driver_info & BTUSB_INTEL_COMBINED) { - err = btintel_configure_setup(hdev); + err = btintel_configure_setup(hdev, btusb_driver.name); if (err) goto out_free_dev; @@ -4071,6 +4334,11 @@ static int btusb_probe(struct usb_interface *intf, } if (id->driver_info & BTUSB_QCA_WCN6855) { + data->qca_dump.id_vendor = id->idVendor; + data->qca_dump.id_product = id->idProduct; + data->recv_event = btusb_recv_evt_qca; + data->recv_acl = btusb_recv_acl_qca; + hci_devcd_register(hdev, btusb_coredump_qca, btusb_dump_hdr_qca, NULL); data->setup_on_usb = btusb_setup_qca; hdev->shutdown = btusb_shutdown_qca; hdev->set_bdaddr = btusb_set_bdaddr_wcn6855; @@ -4102,6 +4370,9 @@ static int btusb_probe(struct usb_interface *intf, if (id->driver_info & BTUSB_ACTIONS_SEMI) { /* Support is advertised, but not implemented */ set_bit(HCI_QUIRK_BROKEN_ERR_DATA_REPORTING, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_READ_TRANSMIT_POWER, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks); + set_bit(HCI_QUIRK_BROKEN_EXT_SCAN, &hdev->quirks); } if (!reset) @@ -4389,6 +4660,17 @@ done: } #endif +#ifdef CONFIG_DEV_COREDUMP +static void btusb_coredump(struct device *dev) +{ + struct btusb_data *data = dev_get_drvdata(dev); + struct hci_dev *hdev = data->hdev; + + if (hdev->dump.coredump) + hdev->dump.coredump(hdev); +} +#endif + static struct usb_driver btusb_driver = { .name = "btusb", .probe = btusb_probe, @@ -4400,6 +4682,14 @@ static struct usb_driver btusb_driver = { .id_table = btusb_table, .supports_autosuspend = 1, .disable_hub_initiated_lpm = 1, + +#ifdef CONFIG_DEV_COREDUMP + .drvwrap = { + .driver = { + .coredump = btusb_coredump, + }, + }, +#endif }; module_usb_driver(btusb_driver); diff --git a/drivers/bluetooth/hci_bcm.c b/drivers/bluetooth/hci_bcm.c index 2b6c0e1922cb..83bf5d4330c4 100644 --- a/drivers/bluetooth/hci_bcm.c +++ b/drivers/bluetooth/hci_bcm.c @@ -55,12 +55,14 @@ * @drive_rts_on_open: drive RTS signal on ->open() when platform requires it * @no_uart_clock_set: UART clock set command for >3Mbps mode is unavailable * @max_autobaud_speed: max baudrate supported by device in autobaud mode + * @max_speed: max baudrate supported */ struct bcm_device_data { bool no_early_set_baudrate; bool drive_rts_on_open; bool no_uart_clock_set; u32 max_autobaud_speed; + u32 max_speed; }; /** @@ -888,7 +890,7 @@ unlock: #endif /* Some firmware reports an IRQ which does not work (wrong pin in fw table?) */ -static struct gpiod_lookup_table asus_tf103c_irq_gpios = { +static struct gpiod_lookup_table irq_on_int33fc02_pin17_gpios = { .dev_id = "serial0-0", .table = { GPIO_LOOKUP("INT33FC:02", 17, "host-wakeup-alt", GPIO_ACTIVE_HIGH), @@ -898,12 +900,31 @@ static struct gpiod_lookup_table asus_tf103c_irq_gpios = { static const struct dmi_system_id bcm_broken_irq_dmi_table[] = { { + .ident = "Acer Iconia One 7 B1-750", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Insyde"), + DMI_MATCH(DMI_PRODUCT_NAME, "VESPA2"), + }, + .driver_data = &irq_on_int33fc02_pin17_gpios, + }, + { .ident = "Asus TF103C", .matches = { DMI_MATCH(DMI_SYS_VENDOR, "ASUSTeK COMPUTER INC."), DMI_MATCH(DMI_PRODUCT_NAME, "TF103C"), }, - .driver_data = &asus_tf103c_irq_gpios, + .driver_data = &irq_on_int33fc02_pin17_gpios, + }, + { + .ident = "Lenovo Yoga Tablet 2 830F/L / 1050F/L", + .matches = { + DMI_MATCH(DMI_SYS_VENDOR, "Intel Corp."), + DMI_MATCH(DMI_PRODUCT_NAME, "VALLEYVIEW C0 PLATFORM"), + DMI_MATCH(DMI_BOARD_NAME, "BYT-T FFD8"), + /* Partial match on beginning of BIOS version */ + DMI_MATCH(DMI_BIOS_VERSION, "BLADE_21"), + }, + .driver_data = &irq_on_int33fc02_pin17_gpios, }, { .ident = "Meegopad T08", @@ -1300,6 +1321,12 @@ static const struct hci_uart_proto bcm_proto = { }; #ifdef CONFIG_ACPI + +/* bcm43430a0/a1 BT does not support 48MHz UART clock, limit to 2000000 baud */ +static struct bcm_device_data bcm43430_device_data = { + .max_speed = 2000000, +}; + static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E00" }, { "BCM2E01" }, @@ -1414,19 +1441,19 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E71" }, { "BCM2E72" }, { "BCM2E73" }, - { "BCM2E74" }, - { "BCM2E75" }, + { "BCM2E74", (long)&bcm43430_device_data }, + { "BCM2E75", (long)&bcm43430_device_data }, { "BCM2E76" }, { "BCM2E77" }, { "BCM2E78" }, { "BCM2E79" }, { "BCM2E7A" }, - { "BCM2E7B" }, + { "BCM2E7B", (long)&bcm43430_device_data }, { "BCM2E7C" }, { "BCM2E7D" }, { "BCM2E7E" }, { "BCM2E7F" }, - { "BCM2E80" }, + { "BCM2E80", (long)&bcm43430_device_data }, { "BCM2E81" }, { "BCM2E82" }, { "BCM2E83" }, @@ -1435,7 +1462,7 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E86" }, { "BCM2E87" }, { "BCM2E88" }, - { "BCM2E89" }, + { "BCM2E89", (long)&bcm43430_device_data }, { "BCM2E8A" }, { "BCM2E8B" }, { "BCM2E8C" }, @@ -1444,29 +1471,30 @@ static const struct acpi_device_id bcm_acpi_match[] = { { "BCM2E90" }, { "BCM2E92" }, { "BCM2E93" }, - { "BCM2E94" }, + { "BCM2E94", (long)&bcm43430_device_data }, { "BCM2E95" }, { "BCM2E96" }, { "BCM2E97" }, { "BCM2E98" }, - { "BCM2E99" }, + { "BCM2E99", (long)&bcm43430_device_data }, { "BCM2E9A" }, - { "BCM2E9B" }, + { "BCM2E9B", (long)&bcm43430_device_data }, { "BCM2E9C" }, { "BCM2E9D" }, + { "BCM2E9F", (long)&bcm43430_device_data }, { "BCM2EA0" }, { "BCM2EA1" }, - { "BCM2EA2" }, - { "BCM2EA3" }, + { "BCM2EA2", (long)&bcm43430_device_data }, + { "BCM2EA3", (long)&bcm43430_device_data }, { "BCM2EA4" }, { "BCM2EA5" }, { "BCM2EA6" }, { "BCM2EA7" }, { "BCM2EA8" }, { "BCM2EA9" }, - { "BCM2EAA" }, - { "BCM2EAB" }, - { "BCM2EAC" }, + { "BCM2EAA", (long)&bcm43430_device_data }, + { "BCM2EAB", (long)&bcm43430_device_data }, + { "BCM2EAC", (long)&bcm43430_device_data }, { }, }; MODULE_DEVICE_TABLE(acpi, bcm_acpi_match); @@ -1535,6 +1563,8 @@ static int bcm_serdev_probe(struct serdev_device *serdev) bcmdev->no_early_set_baudrate = data->no_early_set_baudrate; bcmdev->drive_rts_on_open = data->drive_rts_on_open; bcmdev->no_uart_clock_set = data->no_uart_clock_set; + if (data->max_speed && bcmdev->oper_speed > data->max_speed) + bcmdev->oper_speed = data->max_speed; } return hci_uart_register_device(&bcmdev->serdev_hu, &bcm_proto); diff --git a/drivers/bluetooth/hci_h5.c b/drivers/bluetooth/hci_h5.c index 6455bc4fb5bb..fefc37b98b4a 100644 --- a/drivers/bluetooth/hci_h5.c +++ b/drivers/bluetooth/hci_h5.c @@ -463,6 +463,8 @@ static int h5_rx_3wire_hdr(struct hci_uart *hu, unsigned char c) if (H5_HDR_RELIABLE(hdr) && H5_HDR_SEQ(hdr) != h5->tx_ack) { bt_dev_err(hu->hdev, "Out-of-order packet arrived (%u != %u)", H5_HDR_SEQ(hdr), h5->tx_ack); + set_bit(H5_TX_ACK_REQ, &h5->flags); + hci_uart_tx_wakeup(hu); h5_reset_rx(h5); return 0; } @@ -936,6 +938,8 @@ static int h5_btrtl_setup(struct h5 *h5) err = btrtl_download_firmware(h5->hu->hdev, btrtl_dev); /* Give the device some time before the hci-core sends it a reset */ usleep_range(10000, 20000); + if (err) + goto out_free; btrtl_set_quirks(h5->hu->hdev, btrtl_dev); @@ -1100,6 +1104,8 @@ static const struct of_device_id rtl_bluetooth_of_match[] = { .data = (const void *)&h5_data_rtl8822cs }, { .compatible = "realtek,rtl8723bs-bt", .data = (const void *)&h5_data_rtl8723bs }, + { .compatible = "realtek,rtl8723cs-bt", + .data = (const void *)&h5_data_rtl8723bs }, { .compatible = "realtek,rtl8723ds-bt", .data = (const void *)&h5_data_rtl8723bs }, #endif diff --git a/drivers/bluetooth/hci_ldisc.c b/drivers/bluetooth/hci_ldisc.c index 865112e96ff9..efdda2c3fce8 100644 --- a/drivers/bluetooth/hci_ldisc.c +++ b/drivers/bluetooth/hci_ldisc.c @@ -323,9 +323,9 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) /* Disable hardware flow control */ ktermios = tty->termios; ktermios.c_cflag &= ~CRTSCTS; - status = tty_set_termios(tty, &ktermios); + tty_set_termios(tty, &ktermios); BT_DBG("Disabling hardware flow control: %s", - status ? "failed" : "success"); + (tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); /* Clear RTS to prevent the device from sending */ /* Most UARTs need OUT2 to enable interrupts */ @@ -357,9 +357,9 @@ void hci_uart_set_flow_control(struct hci_uart *hu, bool enable) /* Re-enable hardware flow control */ ktermios = tty->termios; ktermios.c_cflag |= CRTSCTS; - status = tty_set_termios(tty, &ktermios); + tty_set_termios(tty, &ktermios); BT_DBG("Enabling hardware flow control: %s", - status ? "failed" : "success"); + !(tty->termios.c_cflag & CRTSCTS) ? "failed" : "success"); } } diff --git a/drivers/bluetooth/hci_ll.c b/drivers/bluetooth/hci_ll.c index 5abc01a2acf7..4a0b5c3160c2 100644 --- a/drivers/bluetooth/hci_ll.c +++ b/drivers/bluetooth/hci_ll.c @@ -786,7 +786,7 @@ MODULE_DEVICE_TABLE(of, hci_ti_of_match); static struct serdev_device_driver hci_ti_drv = { .driver = { .name = "hci-ti", - .of_match_table = of_match_ptr(hci_ti_of_match), + .of_match_table = hci_ti_of_match, }, .probe = hci_ti_probe, .remove = hci_ti_remove, diff --git a/drivers/bluetooth/hci_mrvl.c b/drivers/bluetooth/hci_mrvl.c index fbc3f7c3a5c7..e08222395772 100644 --- a/drivers/bluetooth/hci_mrvl.c +++ b/drivers/bluetooth/hci_mrvl.c @@ -27,10 +27,12 @@ #define MRVL_ACK 0x5A #define MRVL_NAK 0xBF #define MRVL_RAW_DATA 0x1F +#define MRVL_SET_BAUDRATE 0xFC09 enum { STATE_CHIP_VER_PENDING, STATE_FW_REQ_PENDING, + STATE_FW_LOADED, }; struct mrvl_data { @@ -254,6 +256,14 @@ static int mrvl_recv(struct hci_uart *hu, const void *data, int count) if (!test_bit(HCI_UART_REGISTERED, &hu->flags)) return -EUNATCH; + /* We might receive some noise when there is no firmware loaded. Therefore, + * we drop data if the firmware is not loaded yet and if there is no fw load + * request pending. + */ + if (!test_bit(STATE_FW_REQ_PENDING, &mrvl->flags) && + !test_bit(STATE_FW_LOADED, &mrvl->flags)) + return count; + mrvl->rx_skb = h4_recv_buf(hu->hdev, mrvl->rx_skb, data, count, mrvl_recv_pkts, ARRAY_SIZE(mrvl_recv_pkts)); @@ -354,6 +364,7 @@ static int mrvl_load_firmware(struct hci_dev *hdev, const char *name) static int mrvl_setup(struct hci_uart *hu) { int err; + struct mrvl_data *mrvl = hu->priv; hci_uart_set_flow_control(hu, true); @@ -367,9 +378,9 @@ static int mrvl_setup(struct hci_uart *hu) hci_uart_wait_until_sent(hu); if (hu->serdev) - serdev_device_set_baudrate(hu->serdev, 3000000); + serdev_device_set_baudrate(hu->serdev, hu->oper_speed); else - hci_uart_set_baudrate(hu, 3000000); + hci_uart_set_baudrate(hu, hu->oper_speed); hci_uart_set_flow_control(hu, false); @@ -377,13 +388,54 @@ static int mrvl_setup(struct hci_uart *hu) if (err) return err; + set_bit(STATE_FW_LOADED, &mrvl->flags); + + return 0; +} + +static int mrvl_set_baudrate(struct hci_uart *hu, unsigned int speed) +{ + int err; + struct mrvl_data *mrvl = hu->priv; + __le32 speed_le = cpu_to_le32(speed); + + /* The firmware might be loaded by the Wifi driver over SDIO. We wait + * up to 10s for the CTS to go up. Afterward, we know that the firmware + * is ready. + */ + err = serdev_device_wait_for_cts(hu->serdev, true, 10000); + if (err) { + bt_dev_err(hu->hdev, "Wait for CTS failed with %d\n", err); + return err; + } + + set_bit(STATE_FW_LOADED, &mrvl->flags); + + err = __hci_cmd_sync_status(hu->hdev, MRVL_SET_BAUDRATE, + sizeof(speed_le), &speed_le, + HCI_INIT_TIMEOUT); + if (err) { + bt_dev_err(hu->hdev, "send command failed: %d", err); + return err; + } + + serdev_device_set_baudrate(hu->serdev, speed); + + /* We forcefully have to send a command to the bluetooth module so that + * the driver detects it after a baudrate change. This is foreseen by + * hci_serdev by setting HCI_UART_VND_DETECT which then causes a dummy + * local version read. + */ + set_bit(HCI_UART_VND_DETECT, &hu->hdev_flags); + return 0; } -static const struct hci_uart_proto mrvl_proto = { +static const struct hci_uart_proto mrvl_proto_8897 = { .id = HCI_UART_MRVL, .name = "Marvell", .init_speed = 115200, + .oper_speed = 3000000, .open = mrvl_open, .close = mrvl_close, .flush = mrvl_flush, @@ -393,18 +445,37 @@ static const struct hci_uart_proto mrvl_proto = { .dequeue = mrvl_dequeue, }; +static const struct hci_uart_proto mrvl_proto_8997 = { + .id = HCI_UART_MRVL, + .name = "Marvell 8997", + .init_speed = 115200, + .oper_speed = 3000000, + .open = mrvl_open, + .close = mrvl_close, + .flush = mrvl_flush, + .set_baudrate = mrvl_set_baudrate, + .recv = mrvl_recv, + .enqueue = mrvl_enqueue, + .dequeue = mrvl_dequeue, +}; + static int mrvl_serdev_probe(struct serdev_device *serdev) { struct mrvl_serdev *mrvldev; + const struct hci_uart_proto *mrvl_proto = device_get_match_data(&serdev->dev); mrvldev = devm_kzalloc(&serdev->dev, sizeof(*mrvldev), GFP_KERNEL); if (!mrvldev) return -ENOMEM; + mrvldev->hu.oper_speed = mrvl_proto->oper_speed; + if (mrvl_proto->set_baudrate) + of_property_read_u32(serdev->dev.of_node, "max-speed", &mrvldev->hu.oper_speed); + mrvldev->hu.serdev = serdev; serdev_device_set_drvdata(serdev, mrvldev); - return hci_uart_register_device(&mrvldev->hu, &mrvl_proto); + return hci_uart_register_device(&mrvldev->hu, mrvl_proto); } static void mrvl_serdev_remove(struct serdev_device *serdev) @@ -414,13 +485,12 @@ static void mrvl_serdev_remove(struct serdev_device *serdev) hci_uart_unregister_device(&mrvldev->hu); } -#ifdef CONFIG_OF -static const struct of_device_id mrvl_bluetooth_of_match[] = { - { .compatible = "mrvl,88w8897" }, +static const struct of_device_id __maybe_unused mrvl_bluetooth_of_match[] = { + { .compatible = "mrvl,88w8897", .data = &mrvl_proto_8897}, + { .compatible = "mrvl,88w8997", .data = &mrvl_proto_8997}, { }, }; MODULE_DEVICE_TABLE(of, mrvl_bluetooth_of_match); -#endif static struct serdev_device_driver mrvl_serdev_driver = { .probe = mrvl_serdev_probe, @@ -435,12 +505,12 @@ int __init mrvl_init(void) { serdev_device_driver_register(&mrvl_serdev_driver); - return hci_uart_register_proto(&mrvl_proto); + return hci_uart_register_proto(&mrvl_proto_8897); } int __exit mrvl_deinit(void) { serdev_device_driver_unregister(&mrvl_serdev_driver); - return hci_uart_unregister_proto(&mrvl_proto); + return hci_uart_unregister_proto(&mrvl_proto_8897); } diff --git a/drivers/bluetooth/hci_qca.c b/drivers/bluetooth/hci_qca.c index 3df8c3606e93..1b064504b388 100644 --- a/drivers/bluetooth/hci_qca.c +++ b/drivers/bluetooth/hci_qca.c @@ -1317,7 +1317,8 @@ static int qca_set_baudrate(struct hci_dev *hdev, uint8_t baudrate) /* Give the controller time to process the request */ if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu))) + qca_is_wcn6750(qca_soc_type(hu)) || + qca_is_wcn6855(qca_soc_type(hu))) usleep_range(1000, 10000); else msleep(300); @@ -1394,7 +1395,8 @@ static unsigned int qca_get_speed(struct hci_uart *hu, static int qca_check_speeds(struct hci_uart *hu) { if (qca_is_wcn399x(qca_soc_type(hu)) || - qca_is_wcn6750(qca_soc_type(hu))) { + qca_is_wcn6750(qca_soc_type(hu)) || + qca_is_wcn6855(qca_soc_type(hu))) { if (!qca_get_speed(hu, QCA_INIT_SPEED) && !qca_get_speed(hu, QCA_OPER_SPEED)) return -EINVAL; @@ -1428,7 +1430,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) * changing the baudrate of chip and host. */ if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) hci_uart_set_flow_control(hu, true); if (soc_type == QCA_WCN3990) { @@ -1446,7 +1449,8 @@ static int qca_set_speed(struct hci_uart *hu, enum qca_speed_type speed_type) error: if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) hci_uart_set_flow_control(hu, false); if (soc_type == QCA_WCN3990) { @@ -1682,7 +1686,8 @@ static int qca_power_on(struct hci_dev *hdev) return 0; if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) { ret = qca_regulator_init(hu); } else { qcadev = serdev_device_get_drvdata(hu->serdev); @@ -1723,7 +1728,8 @@ static int qca_setup(struct hci_uart *hu) bt_dev_info(hdev, "setting up %s", qca_is_wcn399x(soc_type) ? "wcn399x" : - (soc_type == QCA_WCN6750) ? "wcn6750" : "ROME/QCA6390"); + (soc_type == QCA_WCN6750) ? "wcn6750" : + (soc_type == QCA_WCN6855) ? "wcn6855" : "ROME/QCA6390"); qca->memdump_state = QCA_MEMDUMP_IDLE; @@ -1735,7 +1741,8 @@ retry: clear_bit(QCA_SSR_TRIGGERED, &qca->flags); if (qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type)) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type)) { set_bit(HCI_QUIRK_USE_BDADDR_PROPERTY, &hdev->quirks); hci_set_aosp_capable(hdev); @@ -1757,7 +1764,8 @@ retry: } if (!(qca_is_wcn399x(soc_type) || - qca_is_wcn6750(soc_type))) { + qca_is_wcn6750(soc_type) || + qca_is_wcn6855(soc_type))) { /* Get QCA version information */ ret = qca_read_soc_version(hdev, &ver, soc_type); if (ret) @@ -1827,7 +1835,7 @@ static const struct hci_uart_proto qca_proto = { .dequeue = qca_dequeue, }; -static const struct qca_device_data qca_soc_data_wcn3990 = { +static const struct qca_device_data qca_soc_data_wcn3990 __maybe_unused = { .soc_type = QCA_WCN3990, .vregs = (struct qca_vreg []) { { "vddio", 15000 }, @@ -1838,7 +1846,7 @@ static const struct qca_device_data qca_soc_data_wcn3990 = { .num_vregs = 4, }; -static const struct qca_device_data qca_soc_data_wcn3991 = { +static const struct qca_device_data qca_soc_data_wcn3991 __maybe_unused = { .soc_type = QCA_WCN3991, .vregs = (struct qca_vreg []) { { "vddio", 15000 }, @@ -1850,7 +1858,7 @@ static const struct qca_device_data qca_soc_data_wcn3991 = { .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; -static const struct qca_device_data qca_soc_data_wcn3998 = { +static const struct qca_device_data qca_soc_data_wcn3998 __maybe_unused = { .soc_type = QCA_WCN3998, .vregs = (struct qca_vreg []) { { "vddio", 10000 }, @@ -1861,12 +1869,12 @@ static const struct qca_device_data qca_soc_data_wcn3998 = { .num_vregs = 4, }; -static const struct qca_device_data qca_soc_data_qca6390 = { +static const struct qca_device_data qca_soc_data_qca6390 __maybe_unused = { .soc_type = QCA_QCA6390, .num_vregs = 0, }; -static const struct qca_device_data qca_soc_data_wcn6750 = { +static const struct qca_device_data qca_soc_data_wcn6750 __maybe_unused = { .soc_type = QCA_WCN6750, .vregs = (struct qca_vreg []) { { "vddio", 5000 }, @@ -1883,6 +1891,20 @@ static const struct qca_device_data qca_soc_data_wcn6750 = { .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, }; +static const struct qca_device_data qca_soc_data_wcn6855 __maybe_unused = { + .soc_type = QCA_WCN6855, + .vregs = (struct qca_vreg []) { + { "vddio", 5000 }, + { "vddbtcxmx", 126000 }, + { "vddrfacmn", 12500 }, + { "vddrfa0p8", 102000 }, + { "vddrfa1p7", 302000 }, + { "vddrfa1p2", 257000 }, + }, + .num_vregs = 6, + .capabilities = QCA_CAP_WIDEBAND_SPEECH | QCA_CAP_VALID_LE_STATES, +}; + static void qca_power_shutdown(struct hci_uart *hu) { struct qca_serdev *qcadev; @@ -1912,7 +1934,7 @@ static void qca_power_shutdown(struct hci_uart *hu) host_set_baudrate(hu, 2400); qca_send_power_pulse(hu, false); qca_regulator_disable(qcadev); - } else if (soc_type == QCA_WCN6750) { + } else if (soc_type == QCA_WCN6750 || soc_type == QCA_WCN6855) { gpiod_set_value_cansleep(qcadev->bt_en, 0); msleep(100); qca_regulator_disable(qcadev); @@ -2047,7 +2069,8 @@ static int qca_serdev_probe(struct serdev_device *serdev) if (data && (qca_is_wcn399x(data->soc_type) || - qca_is_wcn6750(data->soc_type))) { + qca_is_wcn6750(data->soc_type) || + qca_is_wcn6855(data->soc_type))) { qcadev->btsoc_type = data->soc_type; qcadev->bt_power = devm_kzalloc(&serdev->dev, sizeof(struct qca_power), @@ -2067,14 +2090,18 @@ static int qca_serdev_probe(struct serdev_device *serdev) qcadev->bt_en = devm_gpiod_get_optional(&serdev->dev, "enable", GPIOD_OUT_LOW); - if (IS_ERR_OR_NULL(qcadev->bt_en) && data->soc_type == QCA_WCN6750) { + if (IS_ERR_OR_NULL(qcadev->bt_en) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) { dev_err(&serdev->dev, "failed to acquire BT_EN gpio\n"); power_ctrl_enabled = false; } qcadev->sw_ctrl = devm_gpiod_get_optional(&serdev->dev, "swctrl", GPIOD_IN); - if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && data->soc_type == QCA_WCN6750) + if (IS_ERR_OR_NULL(qcadev->sw_ctrl) && + (data->soc_type == QCA_WCN6750 || + data->soc_type == QCA_WCN6855)) dev_warn(&serdev->dev, "failed to acquire SW_CTRL gpio\n"); qcadev->susclk = devm_clk_get_optional(&serdev->dev, NULL); @@ -2150,8 +2177,9 @@ static void qca_serdev_remove(struct serdev_device *serdev) struct qca_power *power = qcadev->bt_power; if ((qca_is_wcn399x(qcadev->btsoc_type) || - qca_is_wcn6750(qcadev->btsoc_type)) && - power->vregs_on) + qca_is_wcn6750(qcadev->btsoc_type) || + qca_is_wcn6855(qcadev->btsoc_type)) && + power->vregs_on) qca_power_shutdown(&qcadev->serdev_hu); else if (qcadev->susclk) clk_disable_unprepare(qcadev->susclk); @@ -2335,6 +2363,7 @@ static const struct of_device_id qca_bluetooth_of_match[] = { { .compatible = "qcom,wcn3991-bt", .data = &qca_soc_data_wcn3991}, { .compatible = "qcom,wcn3998-bt", .data = &qca_soc_data_wcn3998}, { .compatible = "qcom,wcn6750-bt", .data = &qca_soc_data_wcn6750}, + { .compatible = "qcom,wcn6855-bt", .data = &qca_soc_data_wcn6855}, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, qca_bluetooth_of_match); diff --git a/drivers/bluetooth/hci_vhci.c b/drivers/bluetooth/hci_vhci.c index c443c3b0a4da..40e2b9fa11a2 100644 --- a/drivers/bluetooth/hci_vhci.c +++ b/drivers/bluetooth/hci_vhci.c @@ -278,6 +278,104 @@ static int vhci_setup(struct hci_dev *hdev) return 0; } +static void vhci_coredump(struct hci_dev *hdev) +{ + /* No need to do anything */ +} + +static void vhci_coredump_hdr(struct hci_dev *hdev, struct sk_buff *skb) +{ + char buf[80]; + + snprintf(buf, sizeof(buf), "Controller Name: vhci_ctrl\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Firmware Version: vhci_fw\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Driver: vhci_drv\n"); + skb_put_data(skb, buf, strlen(buf)); + + snprintf(buf, sizeof(buf), "Vendor: vhci\n"); + skb_put_data(skb, buf, strlen(buf)); +} + +#define MAX_COREDUMP_LINE_LEN 40 + +struct devcoredump_test_data { + enum devcoredump_state state; + unsigned int timeout; + char data[MAX_COREDUMP_LINE_LEN]; +}; + +static inline void force_devcd_timeout(struct hci_dev *hdev, + unsigned int timeout) +{ +#ifdef CONFIG_DEV_COREDUMP + hdev->dump.timeout = msecs_to_jiffies(timeout * 1000); +#endif +} + +static ssize_t force_devcd_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct vhci_data *data = file->private_data; + struct hci_dev *hdev = data->hdev; + struct sk_buff *skb = NULL; + struct devcoredump_test_data dump_data; + size_t data_size; + int ret; + + if (count < offsetof(struct devcoredump_test_data, data) || + count > sizeof(dump_data)) + return -EINVAL; + + if (copy_from_user(&dump_data, user_buf, count)) + return -EFAULT; + + data_size = count - offsetof(struct devcoredump_test_data, data); + skb = alloc_skb(data_size, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + skb_put_data(skb, &dump_data.data, data_size); + + hci_devcd_register(hdev, vhci_coredump, vhci_coredump_hdr, NULL); + + /* Force the devcoredump timeout */ + if (dump_data.timeout) + force_devcd_timeout(hdev, dump_data.timeout); + + ret = hci_devcd_init(hdev, skb->len); + if (ret) { + BT_ERR("Failed to generate devcoredump"); + kfree_skb(skb); + return ret; + } + + hci_devcd_append(hdev, skb); + + switch (dump_data.state) { + case HCI_DEVCOREDUMP_DONE: + hci_devcd_complete(hdev); + break; + case HCI_DEVCOREDUMP_ABORT: + hci_devcd_abort(hdev); + break; + case HCI_DEVCOREDUMP_TIMEOUT: + /* Do nothing */ + break; + default: + return -EINVAL; + } + + return count; +} + +static const struct file_operations force_devcoredump_fops = { + .open = simple_open, + .write = force_devcd_write, +}; + static int __vhci_create_device(struct vhci_data *data, __u8 opcode) { struct hci_dev *hdev; @@ -355,6 +453,9 @@ static int __vhci_create_device(struct vhci_data *data, __u8 opcode) debugfs_create_file("aosp_capable", 0644, hdev->debugfs, data, &aosp_capable_fops); + debugfs_create_file("force_devcoredump", 0644, hdev->debugfs, data, + &force_devcoredump_fops); + hci_skb_pkt_type(skb) = HCI_VENDOR_PKT; skb_put_u8(skb, 0xff); diff --git a/drivers/net/Kconfig b/drivers/net/Kconfig index c34bd432da27..d0a1ed216d15 100644 --- a/drivers/net/Kconfig +++ b/drivers/net/Kconfig @@ -402,6 +402,8 @@ config TUN_VNET_CROSS_LE config VETH tristate "Virtual ethernet pair device" + select PAGE_POOL + select PAGE_POOL_STATS help This device is a local ethernet tunnel. Devices are created in pairs. When one end receives the packet it appears on its pair and vice diff --git a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c index 12083b9679b5..6ea5521074d3 100644 --- a/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c +++ b/drivers/net/ethernet/broadcom/bnx2x/bnx2x_cmn.c @@ -1935,8 +1935,7 @@ u16 bnx2x_select_queue(struct net_device *dev, struct sk_buff *skb, /* Skip VLAN tag if present */ if (ether_type == ETH_P_8021Q) { - struct vlan_ethhdr *vhdr = - (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *vhdr = skb_vlan_eth_hdr(skb); ether_type = ntohs(vhdr->h_vlan_encapsulated_proto); } diff --git a/drivers/net/ethernet/emulex/benet/be_main.c b/drivers/net/ethernet/emulex/benet/be_main.c index aed1b622f51f..7e408bcc88de 100644 --- a/drivers/net/ethernet/emulex/benet/be_main.c +++ b/drivers/net/ethernet/emulex/benet/be_main.c @@ -1124,7 +1124,7 @@ static struct sk_buff *be_lancer_xmit_workarounds(struct be_adapter *adapter, struct be_wrb_params *wrb_params) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb); unsigned int eth_hdr_len; struct iphdr *ip; diff --git a/drivers/net/ethernet/engleder/tsnep.h b/drivers/net/ethernet/engleder/tsnep.h index 058c2bcf31a7..11b29f56aaf9 100644 --- a/drivers/net/ethernet/engleder/tsnep.h +++ b/drivers/net/ethernet/engleder/tsnep.h @@ -18,6 +18,7 @@ #define TSNEP "tsnep" #define TSNEP_RING_SIZE 256 +#define TSNEP_RING_MASK (TSNEP_RING_SIZE - 1) #define TSNEP_RING_RX_REFILL 16 #define TSNEP_RING_RX_REUSE (TSNEP_RING_SIZE - TSNEP_RING_SIZE / 4) #define TSNEP_RING_ENTRIES_PER_PAGE (PAGE_SIZE / TSNEP_DESC_SIZE) @@ -69,6 +70,7 @@ struct tsnep_tx_entry { union { struct sk_buff *skb; struct xdp_frame *xdpf; + bool zc; }; size_t len; DEFINE_DMA_UNMAP_ADDR(dma); @@ -87,6 +89,7 @@ struct tsnep_tx { int read; u32 owner_counter; int increment_owner_counter; + struct xsk_buff_pool *xsk_pool; u32 packets; u32 bytes; @@ -100,7 +103,10 @@ struct tsnep_rx_entry { u32 properties; - struct page *page; + union { + struct page *page; + struct xdp_buff *xdp; + }; size_t len; dma_addr_t dma; }; @@ -120,6 +126,9 @@ struct tsnep_rx { u32 owner_counter; int increment_owner_counter; struct page_pool *page_pool; + struct page **page_buffer; + struct xsk_buff_pool *xsk_pool; + struct xdp_buff **xdp_batch; u32 packets; u32 bytes; @@ -128,6 +137,7 @@ struct tsnep_rx { u32 alloc_failed; struct xdp_rxq_info xdp_rxq; + struct xdp_rxq_info xdp_rxq_zc; }; struct tsnep_queue { @@ -213,6 +223,8 @@ int tsnep_rxnfc_del_rule(struct tsnep_adapter *adapter, int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, struct netlink_ext_ack *extack); +int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id); #if IS_ENABLED(CONFIG_TSNEP_SELFTESTS) int tsnep_ethtool_get_test_count(void); @@ -241,5 +253,7 @@ static inline void tsnep_ethtool_self_test(struct net_device *dev, void tsnep_get_system_time(struct tsnep_adapter *adapter, u64 *time); int tsnep_set_irq_coalesce(struct tsnep_queue *queue, u32 usecs); u32 tsnep_get_irq_coalesce(struct tsnep_queue *queue); +int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool); +void tsnep_disable_xsk(struct tsnep_queue *queue); #endif /* _TSNEP_H */ diff --git a/drivers/net/ethernet/engleder/tsnep_main.c b/drivers/net/ethernet/engleder/tsnep_main.c index ed1b6102cfeb..84751bb303a6 100644 --- a/drivers/net/ethernet/engleder/tsnep_main.c +++ b/drivers/net/ethernet/engleder/tsnep_main.c @@ -28,11 +28,16 @@ #include <linux/iopoll.h> #include <linux/bpf.h> #include <linux/bpf_trace.h> +#include <net/xdp_sock_drv.h> #define TSNEP_RX_OFFSET (max(NET_SKB_PAD, XDP_PACKET_HEADROOM) + NET_IP_ALIGN) #define TSNEP_HEADROOM ALIGN(TSNEP_RX_OFFSET, 4) #define TSNEP_MAX_RX_BUF_SIZE (PAGE_SIZE - TSNEP_HEADROOM - \ SKB_DATA_ALIGN(sizeof(struct skb_shared_info))) +/* XSK buffer shall store at least Q-in-Q frame */ +#define TSNEP_XSK_RX_BUF_SIZE (ALIGN(TSNEP_RX_INLINE_METADATA_SIZE + \ + ETH_FRAME_LEN + ETH_FCS_LEN + \ + VLAN_HLEN * 2, 4)) #ifdef CONFIG_ARCH_DMA_ADDR_T_64BIT #define DMA_ADDR_HIGH(dma_addr) ((u32)(((dma_addr) >> 32) & 0xFFFFFFFF)) @@ -49,6 +54,8 @@ #define TSNEP_TX_TYPE_SKB_FRAG BIT(1) #define TSNEP_TX_TYPE_XDP_TX BIT(2) #define TSNEP_TX_TYPE_XDP_NDO BIT(3) +#define TSNEP_TX_TYPE_XDP (TSNEP_TX_TYPE_XDP_TX | TSNEP_TX_TYPE_XDP_NDO) +#define TSNEP_TX_TYPE_XSK BIT(4) #define TSNEP_XDP_TX BIT(0) #define TSNEP_XDP_REDIRECT BIT(1) @@ -265,7 +272,7 @@ static void tsnep_tx_ring_cleanup(struct tsnep_tx *tx) } } -static int tsnep_tx_ring_init(struct tsnep_tx *tx) +static int tsnep_tx_ring_create(struct tsnep_tx *tx) { struct device *dmadev = tx->adapter->dmadev; struct tsnep_tx_entry *entry; @@ -288,11 +295,12 @@ static int tsnep_tx_ring_init(struct tsnep_tx *tx) entry->desc = (struct tsnep_tx_desc *) (((u8 *)entry->desc_wb) + TSNEP_DESC_OFFSET); entry->desc_dma = tx->page_dma[i] + TSNEP_DESC_SIZE * j; + entry->owner_user_flag = false; } } for (i = 0; i < TSNEP_RING_SIZE; i++) { entry = &tx->entry[i]; - next_entry = &tx->entry[(i + 1) % TSNEP_RING_SIZE]; + next_entry = &tx->entry[(i + 1) & TSNEP_RING_MASK]; entry->desc->next = __cpu_to_le64(next_entry->desc_dma); } @@ -303,13 +311,60 @@ alloc_failed: return retval; } +static void tsnep_tx_init(struct tsnep_tx *tx) +{ + dma_addr_t dma; + + dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; + iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); + iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); + tx->write = 0; + tx->read = 0; + tx->owner_counter = 1; + tx->increment_owner_counter = TSNEP_RING_SIZE - 1; +} + +static void tsnep_tx_enable(struct tsnep_tx *tx) +{ + struct netdev_queue *nq; + + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); + + __netif_tx_lock_bh(nq); + netif_tx_wake_queue(nq); + __netif_tx_unlock_bh(nq); +} + +static void tsnep_tx_disable(struct tsnep_tx *tx, struct napi_struct *napi) +{ + struct netdev_queue *nq; + u32 val; + + nq = netdev_get_tx_queue(tx->adapter->netdev, tx->queue_index); + + __netif_tx_lock_bh(nq); + netif_tx_stop_queue(nq); + __netif_tx_unlock_bh(nq); + + /* wait until TX is done in hardware */ + readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, + ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, + 1000000); + + /* wait until TX is also done in software */ + while (READ_ONCE(tx->read) != tx->write) { + napi_schedule(napi); + napi_synchronize(napi); + } +} + static void tsnep_tx_activate(struct tsnep_tx *tx, int index, int length, bool last) { struct tsnep_tx_entry *entry = &tx->entry[index]; entry->properties = 0; - /* xdpf is union with skb */ + /* xdpf and zc are union with skb */ if (entry->skb) { entry->properties = length & TSNEP_DESC_LENGTH_MASK; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; @@ -381,7 +436,7 @@ static int tsnep_tx_map(struct sk_buff *skb, struct tsnep_tx *tx, int count) int i; for (i = 0; i < count; i++) { - entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; + entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; if (!i) { len = skb_headlen(skb); @@ -419,7 +474,7 @@ static int tsnep_tx_unmap(struct tsnep_tx *tx, int index, int count) int i; for (i = 0; i < count; i++) { - entry = &tx->entry[(index + i) % TSNEP_RING_SIZE]; + entry = &tx->entry[(index + i) & TSNEP_RING_MASK]; if (entry->len) { if (entry->type & TSNEP_TX_TYPE_SKB) @@ -481,9 +536,9 @@ static netdev_tx_t tsnep_xmit_frame_ring(struct sk_buff *skb, skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS; for (i = 0; i < count; i++) - tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, + tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, i == count - 1); - tx->write = (tx->write + count) % TSNEP_RING_SIZE; + tx->write = (tx->write + count) & TSNEP_RING_MASK; skb_tx_timestamp(skb); @@ -516,7 +571,7 @@ static int tsnep_xdp_tx_map(struct xdp_frame *xdpf, struct tsnep_tx *tx, frag = NULL; len = xdpf->len; for (i = 0; i < count; i++) { - entry = &tx->entry[(tx->write + i) % TSNEP_RING_SIZE]; + entry = &tx->entry[(tx->write + i) & TSNEP_RING_MASK]; if (type & TSNEP_TX_TYPE_XDP_NDO) { data = unlikely(frag) ? skb_frag_address(frag) : xdpf->data; @@ -589,9 +644,9 @@ static bool tsnep_xdp_xmit_frame_ring(struct xdp_frame *xdpf, length = retval; for (i = 0; i < count; i++) - tsnep_tx_activate(tx, (tx->write + i) % TSNEP_RING_SIZE, length, + tsnep_tx_activate(tx, (tx->write + i) & TSNEP_RING_MASK, length, i == count - 1); - tx->write = (tx->write + count) % TSNEP_RING_SIZE; + tx->write = (tx->write + count) & TSNEP_RING_MASK; /* descriptor properties shall be valid before hardware is notified */ dma_wmb(); @@ -627,10 +682,69 @@ static bool tsnep_xdp_xmit_back(struct tsnep_adapter *adapter, return xmit; } +static int tsnep_xdp_tx_map_zc(struct xdp_desc *xdpd, struct tsnep_tx *tx) +{ + struct tsnep_tx_entry *entry; + dma_addr_t dma; + + entry = &tx->entry[tx->write]; + entry->zc = true; + + dma = xsk_buff_raw_get_dma(tx->xsk_pool, xdpd->addr); + xsk_buff_raw_dma_sync_for_device(tx->xsk_pool, dma, xdpd->len); + + entry->type = TSNEP_TX_TYPE_XSK; + entry->len = xdpd->len; + + entry->desc->tx = __cpu_to_le64(dma); + + return xdpd->len; +} + +static void tsnep_xdp_xmit_frame_ring_zc(struct xdp_desc *xdpd, + struct tsnep_tx *tx) +{ + int length; + + length = tsnep_xdp_tx_map_zc(xdpd, tx); + + tsnep_tx_activate(tx, tx->write, length, true); + tx->write = (tx->write + 1) & TSNEP_RING_MASK; +} + +static void tsnep_xdp_xmit_zc(struct tsnep_tx *tx) +{ + int desc_available = tsnep_tx_desc_available(tx); + struct xdp_desc *descs = tx->xsk_pool->tx_descs; + int batch, i; + + /* ensure that TX ring is not filled up by XDP, always MAX_SKB_FRAGS + * will be available for normal TX path and queue is stopped there if + * necessary + */ + if (desc_available <= (MAX_SKB_FRAGS + 1)) + return; + desc_available -= MAX_SKB_FRAGS + 1; + + batch = xsk_tx_peek_release_desc_batch(tx->xsk_pool, desc_available); + for (i = 0; i < batch; i++) + tsnep_xdp_xmit_frame_ring_zc(&descs[i], tx); + + if (batch) { + /* descriptor properties shall be valid before hardware is + * notified + */ + dma_wmb(); + + tsnep_xdp_xmit_flush(tx); + } +} + static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) { struct tsnep_tx_entry *entry; struct netdev_queue *nq; + int xsk_frames = 0; int budget = 128; int length; int count; @@ -657,7 +771,7 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) if ((entry->type & TSNEP_TX_TYPE_SKB) && skb_shinfo(entry->skb)->nr_frags > 0) count += skb_shinfo(entry->skb)->nr_frags; - else if (!(entry->type & TSNEP_TX_TYPE_SKB) && + else if ((entry->type & TSNEP_TX_TYPE_XDP) && xdp_frame_has_frags(entry->xdpf)) count += xdp_get_shared_info_from_frame(entry->xdpf)->nr_frags; @@ -686,12 +800,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) if (entry->type & TSNEP_TX_TYPE_SKB) napi_consume_skb(entry->skb, napi_budget); - else + else if (entry->type & TSNEP_TX_TYPE_XDP) xdp_return_frame_rx_napi(entry->xdpf); - /* xdpf is union with skb */ + else + xsk_frames++; + /* xdpf and zc are union with skb */ entry->skb = NULL; - tx->read = (tx->read + count) % TSNEP_RING_SIZE; + tx->read = (tx->read + count) & TSNEP_RING_MASK; tx->packets++; tx->bytes += length + ETH_FCS_LEN; @@ -699,6 +815,14 @@ static bool tsnep_tx_poll(struct tsnep_tx *tx, int napi_budget) budget--; } while (likely(budget)); + if (tx->xsk_pool) { + if (xsk_frames) + xsk_tx_completed(tx->xsk_pool, xsk_frames); + if (xsk_uses_need_wakeup(tx->xsk_pool)) + xsk_set_tx_need_wakeup(tx->xsk_pool); + tsnep_xdp_xmit_zc(tx); + } + if ((tsnep_tx_desc_available(tx) >= ((MAX_SKB_FRAGS + 1) * 2)) && netif_tx_queue_stopped(nq)) { netif_tx_wake_queue(nq); @@ -731,38 +855,21 @@ static bool tsnep_tx_pending(struct tsnep_tx *tx) return pending; } -static int tsnep_tx_open(struct tsnep_adapter *adapter, void __iomem *addr, - int queue_index, struct tsnep_tx *tx) +static int tsnep_tx_open(struct tsnep_tx *tx) { - dma_addr_t dma; int retval; - memset(tx, 0, sizeof(*tx)); - tx->adapter = adapter; - tx->addr = addr; - tx->queue_index = queue_index; - - retval = tsnep_tx_ring_init(tx); + retval = tsnep_tx_ring_create(tx); if (retval) return retval; - dma = tx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; - iowrite32(DMA_ADDR_LOW(dma), tx->addr + TSNEP_TX_DESC_ADDR_LOW); - iowrite32(DMA_ADDR_HIGH(dma), tx->addr + TSNEP_TX_DESC_ADDR_HIGH); - tx->owner_counter = 1; - tx->increment_owner_counter = TSNEP_RING_SIZE - 1; + tsnep_tx_init(tx); return 0; } static void tsnep_tx_close(struct tsnep_tx *tx) { - u32 val; - - readx_poll_timeout(ioread32, tx->addr + TSNEP_CONTROL, val, - ((val & TSNEP_CONTROL_TX_ENABLE) == 0), 10000, - 1000000); - tsnep_tx_ring_cleanup(tx); } @@ -774,9 +881,12 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) for (i = 0; i < TSNEP_RING_SIZE; i++) { entry = &rx->entry[i]; - if (entry->page) + if (!rx->xsk_pool && entry->page) page_pool_put_full_page(rx->page_pool, entry->page, false); + if (rx->xsk_pool && entry->xdp) + xsk_buff_free(entry->xdp); + /* xdp is union with page */ entry->page = NULL; } @@ -795,7 +905,7 @@ static void tsnep_rx_ring_cleanup(struct tsnep_rx *rx) } } -static int tsnep_rx_ring_init(struct tsnep_rx *rx) +static int tsnep_rx_ring_create(struct tsnep_rx *rx) { struct device *dmadev = rx->adapter->dmadev; struct tsnep_rx_entry *entry; @@ -839,7 +949,7 @@ static int tsnep_rx_ring_init(struct tsnep_rx *rx) for (i = 0; i < TSNEP_RING_SIZE; i++) { entry = &rx->entry[i]; - next_entry = &rx->entry[(i + 1) % TSNEP_RING_SIZE]; + next_entry = &rx->entry[(i + 1) & TSNEP_RING_MASK]; entry->desc->next = __cpu_to_le64(next_entry->desc_dma); } @@ -850,6 +960,37 @@ failed: return retval; } +static void tsnep_rx_init(struct tsnep_rx *rx) +{ + dma_addr_t dma; + + dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; + iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); + iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); + rx->write = 0; + rx->read = 0; + rx->owner_counter = 1; + rx->increment_owner_counter = TSNEP_RING_SIZE - 1; +} + +static void tsnep_rx_enable(struct tsnep_rx *rx) +{ + /* descriptor properties shall be valid before hardware is notified */ + dma_wmb(); + + iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); +} + +static void tsnep_rx_disable(struct tsnep_rx *rx) +{ + u32 val; + + iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); + readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, + ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, + 1000000); +} + static int tsnep_rx_desc_available(struct tsnep_rx *rx) { if (rx->read <= rx->write) @@ -858,6 +999,40 @@ static int tsnep_rx_desc_available(struct tsnep_rx *rx) return rx->read - rx->write - 1; } +static void tsnep_rx_free_page_buffer(struct tsnep_rx *rx) +{ + struct page **page; + + /* last entry of page_buffer is always zero, because ring cannot be + * filled completely + */ + page = rx->page_buffer; + while (*page) { + page_pool_put_full_page(rx->page_pool, *page, false); + *page = NULL; + page++; + } +} + +static int tsnep_rx_alloc_page_buffer(struct tsnep_rx *rx) +{ + int i; + + /* alloc for all ring entries except the last one, because ring cannot + * be filled completely + */ + for (i = 0; i < TSNEP_RING_SIZE - 1; i++) { + rx->page_buffer[i] = page_pool_dev_alloc_pages(rx->page_pool); + if (!rx->page_buffer[i]) { + tsnep_rx_free_page_buffer(rx); + + return -ENOMEM; + } + } + + return 0; +} + static void tsnep_rx_set_page(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, struct page *page) { @@ -893,7 +1068,7 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index) { struct tsnep_rx_entry *entry = &rx->entry[index]; - /* TSNEP_MAX_RX_BUF_SIZE is a multiple of 4 */ + /* TSNEP_MAX_RX_BUF_SIZE and TSNEP_XSK_RX_BUF_SIZE are multiple of 4 */ entry->properties = entry->len & TSNEP_DESC_LENGTH_MASK; entry->properties |= TSNEP_DESC_INTERRUPT_FLAG; if (index == rx->increment_owner_counter) { @@ -916,19 +1091,15 @@ static void tsnep_rx_activate(struct tsnep_rx *rx, int index) entry->desc->properties = __cpu_to_le32(entry->properties); } -static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) +static int tsnep_rx_alloc(struct tsnep_rx *rx, int count, bool reuse) { - int index; bool alloc_failed = false; - bool enable = false; - int i; - int retval; + int i, index; for (i = 0; i < count && !alloc_failed; i++) { - index = (rx->write + i) % TSNEP_RING_SIZE; + index = (rx->write + i) & TSNEP_RING_MASK; - retval = tsnep_rx_alloc_buffer(rx, index); - if (unlikely(retval)) { + if (unlikely(tsnep_rx_alloc_buffer(rx, index))) { rx->alloc_failed++; alloc_failed = true; @@ -940,24 +1111,95 @@ static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) } tsnep_rx_activate(rx, index); - - enable = true; } - if (enable) { - rx->write = (rx->write + i) % TSNEP_RING_SIZE; + if (i) + rx->write = (rx->write + i) & TSNEP_RING_MASK; - /* descriptor properties shall be valid before hardware is - * notified - */ - dma_wmb(); + return i; +} + +static int tsnep_rx_refill(struct tsnep_rx *rx, int count, bool reuse) +{ + int desc_refilled; + + desc_refilled = tsnep_rx_alloc(rx, count, reuse); + if (desc_refilled) + tsnep_rx_enable(rx); + + return desc_refilled; +} + +static void tsnep_rx_set_xdp(struct tsnep_rx *rx, struct tsnep_rx_entry *entry, + struct xdp_buff *xdp) +{ + entry->xdp = xdp; + entry->len = TSNEP_XSK_RX_BUF_SIZE; + entry->dma = xsk_buff_xdp_get_dma(entry->xdp); + entry->desc->rx = __cpu_to_le64(entry->dma); +} - iowrite32(TSNEP_CONTROL_RX_ENABLE, rx->addr + TSNEP_CONTROL); +static void tsnep_rx_reuse_buffer_zc(struct tsnep_rx *rx, int index) +{ + struct tsnep_rx_entry *entry = &rx->entry[index]; + struct tsnep_rx_entry *read = &rx->entry[rx->read]; + + tsnep_rx_set_xdp(rx, entry, read->xdp); + read->xdp = NULL; +} + +static int tsnep_rx_alloc_zc(struct tsnep_rx *rx, int count, bool reuse) +{ + u32 allocated; + int i; + + allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, count); + for (i = 0; i < allocated; i++) { + int index = (rx->write + i) & TSNEP_RING_MASK; + struct tsnep_rx_entry *entry = &rx->entry[index]; + + tsnep_rx_set_xdp(rx, entry, rx->xdp_batch[i]); + tsnep_rx_activate(rx, index); + } + if (i == 0) { + rx->alloc_failed++; + + if (reuse) { + tsnep_rx_reuse_buffer_zc(rx, rx->write); + tsnep_rx_activate(rx, rx->write); + } } + if (i) + rx->write = (rx->write + i) & TSNEP_RING_MASK; + return i; } +static void tsnep_rx_free_zc(struct tsnep_rx *rx) +{ + int i; + + for (i = 0; i < TSNEP_RING_SIZE; i++) { + struct tsnep_rx_entry *entry = &rx->entry[i]; + + if (entry->xdp) + xsk_buff_free(entry->xdp); + entry->xdp = NULL; + } +} + +static int tsnep_rx_refill_zc(struct tsnep_rx *rx, int count, bool reuse) +{ + int desc_refilled; + + desc_refilled = tsnep_rx_alloc_zc(rx, count, reuse); + if (desc_refilled) + tsnep_rx_enable(rx); + + return desc_refilled; +} + static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, struct xdp_buff *xdp, int *status, struct netdev_queue *tx_nq, struct tsnep_tx *tx) @@ -969,11 +1211,6 @@ static bool tsnep_xdp_run_prog(struct tsnep_rx *rx, struct bpf_prog *prog, length = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; act = bpf_prog_run_xdp(prog, xdp); - - /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU touch */ - sync = xdp->data_end - xdp->data_hard_start - XDP_PACKET_HEADROOM; - sync = max(sync, length); - switch (act) { case XDP_PASS: return false; @@ -995,12 +1232,56 @@ out_failure: trace_xdp_exception(rx->adapter->netdev, prog, act); fallthrough; case XDP_DROP: + /* Due xdp_adjust_tail: DMA sync for_device cover max len CPU + * touch + */ + sync = xdp->data_end - xdp->data_hard_start - + XDP_PACKET_HEADROOM; + sync = max(sync, length); page_pool_put_page(rx->page_pool, virt_to_head_page(xdp->data), sync, true); return true; } } +static bool tsnep_xdp_run_prog_zc(struct tsnep_rx *rx, struct bpf_prog *prog, + struct xdp_buff *xdp, int *status, + struct netdev_queue *tx_nq, + struct tsnep_tx *tx) +{ + u32 act; + + act = bpf_prog_run_xdp(prog, xdp); + + /* XDP_REDIRECT is the main action for zero-copy */ + if (likely(act == XDP_REDIRECT)) { + if (xdp_do_redirect(rx->adapter->netdev, xdp, prog) < 0) + goto out_failure; + *status |= TSNEP_XDP_REDIRECT; + return true; + } + + switch (act) { + case XDP_PASS: + return false; + case XDP_TX: + if (!tsnep_xdp_xmit_back(rx->adapter, xdp, tx_nq, tx)) + goto out_failure; + *status |= TSNEP_XDP_TX; + return true; + default: + bpf_warn_invalid_xdp_action(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_ABORTED: +out_failure: + trace_xdp_exception(rx->adapter->netdev, prog, act); + fallthrough; + case XDP_DROP: + xsk_buff_free(xdp); + return true; + } +} + static void tsnep_finalize_xdp(struct tsnep_adapter *adapter, int status, struct netdev_queue *tx_nq, struct tsnep_tx *tx) { @@ -1045,6 +1326,28 @@ static struct sk_buff *tsnep_build_skb(struct tsnep_rx *rx, struct page *page, return skb; } +static void tsnep_rx_page(struct tsnep_rx *rx, struct napi_struct *napi, + struct page *page, int length) +{ + struct sk_buff *skb; + + skb = tsnep_build_skb(rx, page, length); + if (skb) { + page_pool_release_page(rx->page_pool, page); + + rx->packets++; + rx->bytes += length; + if (skb->pkt_type == PACKET_MULTICAST) + rx->multicast++; + + napi_gro_receive(napi, skb); + } else { + page_pool_recycle_direct(rx->page_pool, page); + + rx->dropped++; + } +} + static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, int budget) { @@ -1054,7 +1357,6 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, struct netdev_queue *tx_nq; struct bpf_prog *prog; struct xdp_buff xdp; - struct sk_buff *skb; struct tsnep_tx *tx; int desc_available; int xdp_status = 0; @@ -1090,7 +1392,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, * empty RX ring, thus buffer cannot be used for * RX processing */ - rx->read = (rx->read + 1) % TSNEP_RING_SIZE; + rx->read = (rx->read + 1) & TSNEP_RING_MASK; desc_available++; rx->dropped++; @@ -1117,7 +1419,7 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, */ length -= TSNEP_RX_INLINE_METADATA_SIZE; - rx->read = (rx->read + 1) % TSNEP_RING_SIZE; + rx->read = (rx->read + 1) & TSNEP_RING_MASK; desc_available++; if (prog) { @@ -1139,31 +1441,135 @@ static int tsnep_rx_poll(struct tsnep_rx *rx, struct napi_struct *napi, } } - skb = tsnep_build_skb(rx, entry->page, length); - if (skb) { - page_pool_release_page(rx->page_pool, entry->page); + tsnep_rx_page(rx, napi, entry->page, length); + entry->page = NULL; + } + + if (xdp_status) + tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); + + if (desc_available) + tsnep_rx_refill(rx, desc_available, false); - rx->packets++; - rx->bytes += length; - if (skb->pkt_type == PACKET_MULTICAST) - rx->multicast++; + return done; +} - napi_gro_receive(napi, skb); - } else { - page_pool_recycle_direct(rx->page_pool, entry->page); +static int tsnep_rx_poll_zc(struct tsnep_rx *rx, struct napi_struct *napi, + int budget) +{ + struct tsnep_rx_entry *entry; + struct netdev_queue *tx_nq; + struct bpf_prog *prog; + struct tsnep_tx *tx; + int desc_available; + int xdp_status = 0; + struct page *page; + int done = 0; + int length; + + desc_available = tsnep_rx_desc_available(rx); + prog = READ_ONCE(rx->adapter->xdp_prog); + if (prog) { + tx_nq = netdev_get_tx_queue(rx->adapter->netdev, + rx->tx_queue_index); + tx = &rx->adapter->tx[rx->tx_queue_index]; + } + + while (likely(done < budget) && (rx->read != rx->write)) { + entry = &rx->entry[rx->read]; + if ((__le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_OWNER_COUNTER_MASK) != + (entry->properties & TSNEP_DESC_OWNER_COUNTER_MASK)) + break; + done++; + + if (desc_available >= TSNEP_RING_RX_REFILL) { + bool reuse = desc_available >= TSNEP_RING_RX_REUSE; + + desc_available -= tsnep_rx_refill_zc(rx, desc_available, + reuse); + if (!entry->xdp) { + /* buffer has been reused for refill to prevent + * empty RX ring, thus buffer cannot be used for + * RX processing + */ + rx->read = (rx->read + 1) & TSNEP_RING_MASK; + desc_available++; + + rx->dropped++; + + continue; + } + } + + /* descriptor properties shall be read first, because valid data + * is signaled there + */ + dma_rmb(); + + prefetch(entry->xdp->data); + length = __le32_to_cpu(entry->desc_wb->properties) & + TSNEP_DESC_LENGTH_MASK; + xsk_buff_set_size(entry->xdp, length); + xsk_buff_dma_sync_for_cpu(entry->xdp, rx->xsk_pool); + + /* RX metadata with timestamps is in front of actual data, + * subtract metadata size to get length of actual data and + * consider metadata size as offset of actual data during RX + * processing + */ + length -= TSNEP_RX_INLINE_METADATA_SIZE; + + rx->read = (rx->read + 1) & TSNEP_RING_MASK; + desc_available++; + + if (prog) { + bool consume; + + entry->xdp->data += TSNEP_RX_INLINE_METADATA_SIZE; + entry->xdp->data_meta += TSNEP_RX_INLINE_METADATA_SIZE; + + consume = tsnep_xdp_run_prog_zc(rx, prog, entry->xdp, + &xdp_status, tx_nq, tx); + if (consume) { + rx->packets++; + rx->bytes += length; + + entry->xdp = NULL; + continue; + } + } + + page = page_pool_dev_alloc_pages(rx->page_pool); + if (page) { + memcpy(page_address(page) + TSNEP_RX_OFFSET, + entry->xdp->data - TSNEP_RX_INLINE_METADATA_SIZE, + length + TSNEP_RX_INLINE_METADATA_SIZE); + tsnep_rx_page(rx, napi, page, length); + } else { rx->dropped++; } - entry->page = NULL; + xsk_buff_free(entry->xdp); + entry->xdp = NULL; } if (xdp_status) tsnep_finalize_xdp(rx->adapter, xdp_status, tx_nq, tx); if (desc_available) - tsnep_rx_refill(rx, desc_available, false); + desc_available -= tsnep_rx_refill_zc(rx, desc_available, false); - return done; + if (xsk_uses_need_wakeup(rx->xsk_pool)) { + if (desc_available) + xsk_set_rx_need_wakeup(rx->xsk_pool); + else + xsk_clear_rx_need_wakeup(rx->xsk_pool); + + return done; + } + + return desc_available ? budget : done; } static bool tsnep_rx_pending(struct tsnep_rx *rx) @@ -1181,44 +1587,125 @@ static bool tsnep_rx_pending(struct tsnep_rx *rx) return false; } -static int tsnep_rx_open(struct tsnep_adapter *adapter, void __iomem *addr, - int queue_index, struct tsnep_rx *rx) +static int tsnep_rx_open(struct tsnep_rx *rx) { - dma_addr_t dma; + int desc_available; int retval; - memset(rx, 0, sizeof(*rx)); - rx->adapter = adapter; - rx->addr = addr; - rx->queue_index = queue_index; - - retval = tsnep_rx_ring_init(rx); + retval = tsnep_rx_ring_create(rx); if (retval) return retval; - dma = rx->entry[0].desc_dma | TSNEP_RESET_OWNER_COUNTER; - iowrite32(DMA_ADDR_LOW(dma), rx->addr + TSNEP_RX_DESC_ADDR_LOW); - iowrite32(DMA_ADDR_HIGH(dma), rx->addr + TSNEP_RX_DESC_ADDR_HIGH); - rx->owner_counter = 1; - rx->increment_owner_counter = TSNEP_RING_SIZE - 1; + tsnep_rx_init(rx); - tsnep_rx_refill(rx, tsnep_rx_desc_available(rx), false); + desc_available = tsnep_rx_desc_available(rx); + if (rx->xsk_pool) + retval = tsnep_rx_alloc_zc(rx, desc_available, false); + else + retval = tsnep_rx_alloc(rx, desc_available, false); + if (retval != desc_available) { + retval = -ENOMEM; + + goto alloc_failed; + } + + /* prealloc pages to prevent allocation failures when XSK pool is + * disabled at runtime + */ + if (rx->xsk_pool) { + retval = tsnep_rx_alloc_page_buffer(rx); + if (retval) + goto alloc_failed; + } return 0; + +alloc_failed: + tsnep_rx_ring_cleanup(rx); + return retval; } static void tsnep_rx_close(struct tsnep_rx *rx) { - u32 val; - - iowrite32(TSNEP_CONTROL_RX_DISABLE, rx->addr + TSNEP_CONTROL); - readx_poll_timeout(ioread32, rx->addr + TSNEP_CONTROL, val, - ((val & TSNEP_CONTROL_RX_ENABLE) == 0), 10000, - 1000000); + if (rx->xsk_pool) + tsnep_rx_free_page_buffer(rx); tsnep_rx_ring_cleanup(rx); } +static void tsnep_rx_reopen(struct tsnep_rx *rx) +{ + struct page **page = rx->page_buffer; + int i; + + tsnep_rx_init(rx); + + for (i = 0; i < TSNEP_RING_SIZE; i++) { + struct tsnep_rx_entry *entry = &rx->entry[i]; + + /* defined initial values for properties are required for + * correct owner counter checking + */ + entry->desc->properties = 0; + entry->desc_wb->properties = 0; + + /* prevent allocation failures by reusing kept pages */ + if (*page) { + tsnep_rx_set_page(rx, entry, *page); + tsnep_rx_activate(rx, rx->write); + rx->write++; + + *page = NULL; + page++; + } + } +} + +static void tsnep_rx_reopen_xsk(struct tsnep_rx *rx) +{ + struct page **page = rx->page_buffer; + u32 allocated; + int i; + + tsnep_rx_init(rx); + + /* alloc all ring entries except the last one, because ring cannot be + * filled completely, as many buffers as possible is enough as wakeup is + * done if new buffers are available + */ + allocated = xsk_buff_alloc_batch(rx->xsk_pool, rx->xdp_batch, + TSNEP_RING_SIZE - 1); + + for (i = 0; i < TSNEP_RING_SIZE; i++) { + struct tsnep_rx_entry *entry = &rx->entry[i]; + + /* keep pages to prevent allocation failures when xsk is + * disabled + */ + if (entry->page) { + *page = entry->page; + entry->page = NULL; + + page++; + } + + /* defined initial values for properties are required for + * correct owner counter checking + */ + entry->desc->properties = 0; + entry->desc_wb->properties = 0; + + if (allocated) { + tsnep_rx_set_xdp(rx, entry, + rx->xdp_batch[allocated - 1]); + tsnep_rx_activate(rx, rx->write); + rx->write++; + + allocated--; + } + } +} + static bool tsnep_pending(struct tsnep_queue *queue) { if (queue->tx && tsnep_tx_pending(queue->tx)) @@ -1241,7 +1728,9 @@ static int tsnep_poll(struct napi_struct *napi, int budget) complete = tsnep_tx_poll(queue->tx, budget); if (queue->rx) { - done = tsnep_rx_poll(queue->rx, napi, budget); + done = queue->rx->xsk_pool ? + tsnep_rx_poll_zc(queue->rx, napi, budget) : + tsnep_rx_poll(queue->rx, napi, budget); if (done >= budget) complete = false; } @@ -1322,8 +1811,12 @@ static void tsnep_queue_close(struct tsnep_queue *queue, bool first) tsnep_free_irq(queue, first); - if (rx && xdp_rxq_info_is_reg(&rx->xdp_rxq)) - xdp_rxq_info_unreg(&rx->xdp_rxq); + if (rx) { + if (xdp_rxq_info_is_reg(&rx->xdp_rxq)) + xdp_rxq_info_unreg(&rx->xdp_rxq); + if (xdp_rxq_info_is_reg(&rx->xdp_rxq_zc)) + xdp_rxq_info_unreg(&rx->xdp_rxq_zc); + } netif_napi_del(&queue->napi); } @@ -1335,8 +1828,6 @@ static int tsnep_queue_open(struct tsnep_adapter *adapter, struct tsnep_tx *tx = queue->tx; int retval; - queue->adapter = adapter; - netif_napi_add(adapter->netdev, &queue->napi, tsnep_poll); if (rx) { @@ -1348,6 +1839,10 @@ static int tsnep_queue_open(struct tsnep_adapter *adapter, else rx->tx_queue_index = 0; + /* prepare both memory models to eliminate possible registration + * errors when memory model is switched between page pool and + * XSK pool during runtime + */ retval = xdp_rxq_info_reg(&rx->xdp_rxq, adapter->netdev, rx->queue_index, queue->napi.napi_id); if (retval) @@ -1357,6 +1852,17 @@ static int tsnep_queue_open(struct tsnep_adapter *adapter, rx->page_pool); if (retval) goto failed; + retval = xdp_rxq_info_reg(&rx->xdp_rxq_zc, adapter->netdev, + rx->queue_index, queue->napi.napi_id); + if (retval) + goto failed; + retval = xdp_rxq_info_reg_mem_model(&rx->xdp_rxq_zc, + MEM_TYPE_XSK_BUFF_POOL, + NULL); + if (retval) + goto failed; + if (rx->xsk_pool) + xsk_pool_set_rxq_info(rx->xsk_pool, &rx->xdp_rxq_zc); } retval = tsnep_request_irq(queue, first); @@ -1374,30 +1880,48 @@ failed: return retval; } +static void tsnep_queue_enable(struct tsnep_queue *queue) +{ + napi_enable(&queue->napi); + tsnep_enable_irq(queue->adapter, queue->irq_mask); + + if (queue->tx) + tsnep_tx_enable(queue->tx); + + if (queue->rx) + tsnep_rx_enable(queue->rx); +} + +static void tsnep_queue_disable(struct tsnep_queue *queue) +{ + if (queue->tx) + tsnep_tx_disable(queue->tx, &queue->napi); + + napi_disable(&queue->napi); + tsnep_disable_irq(queue->adapter, queue->irq_mask); + + /* disable RX after NAPI polling has been disabled, because RX can be + * enabled during NAPI polling + */ + if (queue->rx) + tsnep_rx_disable(queue->rx); +} + static int tsnep_netdev_open(struct net_device *netdev) { struct tsnep_adapter *adapter = netdev_priv(netdev); - int tx_queue_index = 0; - int rx_queue_index = 0; - void __iomem *addr; int i, retval; for (i = 0; i < adapter->num_queues; i++) { if (adapter->queue[i].tx) { - addr = adapter->addr + TSNEP_QUEUE(tx_queue_index); - retval = tsnep_tx_open(adapter, addr, tx_queue_index, - adapter->queue[i].tx); + retval = tsnep_tx_open(adapter->queue[i].tx); if (retval) goto failed; - tx_queue_index++; } if (adapter->queue[i].rx) { - addr = adapter->addr + TSNEP_QUEUE(rx_queue_index); - retval = tsnep_rx_open(adapter, addr, rx_queue_index, - adapter->queue[i].rx); + retval = tsnep_rx_open(adapter->queue[i].rx); if (retval) goto failed; - rx_queue_index++; } retval = tsnep_queue_open(adapter, &adapter->queue[i], i == 0); @@ -1419,11 +1943,8 @@ static int tsnep_netdev_open(struct net_device *netdev) if (retval) goto phy_failed; - for (i = 0; i < adapter->num_queues; i++) { - napi_enable(&adapter->queue[i].napi); - - tsnep_enable_irq(adapter, adapter->queue[i].irq_mask); - } + for (i = 0; i < adapter->num_queues; i++) + tsnep_queue_enable(&adapter->queue[i]); return 0; @@ -1450,9 +1971,7 @@ static int tsnep_netdev_close(struct net_device *netdev) tsnep_phy_close(adapter); for (i = 0; i < adapter->num_queues; i++) { - tsnep_disable_irq(adapter, adapter->queue[i].irq_mask); - - napi_disable(&adapter->queue[i].napi); + tsnep_queue_disable(&adapter->queue[i]); tsnep_queue_close(&adapter->queue[i], i == 0); @@ -1465,6 +1984,69 @@ static int tsnep_netdev_close(struct net_device *netdev) return 0; } +int tsnep_enable_xsk(struct tsnep_queue *queue, struct xsk_buff_pool *pool) +{ + bool running = netif_running(queue->adapter->netdev); + u32 frame_size; + + frame_size = xsk_pool_get_rx_frame_size(pool); + if (frame_size < TSNEP_XSK_RX_BUF_SIZE) + return -EOPNOTSUPP; + + queue->rx->page_buffer = kcalloc(TSNEP_RING_SIZE, + sizeof(*queue->rx->page_buffer), + GFP_KERNEL); + if (!queue->rx->page_buffer) + return -ENOMEM; + queue->rx->xdp_batch = kcalloc(TSNEP_RING_SIZE, + sizeof(*queue->rx->xdp_batch), + GFP_KERNEL); + if (!queue->rx->xdp_batch) { + kfree(queue->rx->page_buffer); + queue->rx->page_buffer = NULL; + + return -ENOMEM; + } + + xsk_pool_set_rxq_info(pool, &queue->rx->xdp_rxq_zc); + + if (running) + tsnep_queue_disable(queue); + + queue->tx->xsk_pool = pool; + queue->rx->xsk_pool = pool; + + if (running) { + tsnep_rx_reopen_xsk(queue->rx); + tsnep_queue_enable(queue); + } + + return 0; +} + +void tsnep_disable_xsk(struct tsnep_queue *queue) +{ + bool running = netif_running(queue->adapter->netdev); + + if (running) + tsnep_queue_disable(queue); + + tsnep_rx_free_zc(queue->rx); + + queue->rx->xsk_pool = NULL; + queue->tx->xsk_pool = NULL; + + if (running) { + tsnep_rx_reopen(queue->rx); + tsnep_queue_enable(queue); + } + + kfree(queue->rx->xdp_batch); + queue->rx->xdp_batch = NULL; + kfree(queue->rx->page_buffer); + queue->rx->page_buffer = NULL; +} + static netdev_tx_t tsnep_netdev_xmit_frame(struct sk_buff *skb, struct net_device *netdev) { @@ -1614,6 +2196,9 @@ static int tsnep_netdev_bpf(struct net_device *dev, struct netdev_bpf *bpf) switch (bpf->command) { case XDP_SETUP_PROG: return tsnep_xdp_setup_prog(adapter, bpf->prog, bpf->extack); + case XDP_SETUP_XSK_POOL: + return tsnep_xdp_setup_pool(adapter, bpf->xsk.pool, + bpf->xsk.queue_id); default: return -EOPNOTSUPP; } @@ -1668,6 +2253,24 @@ static int tsnep_netdev_xdp_xmit(struct net_device *dev, int n, return nxmit; } +static int tsnep_netdev_xsk_wakeup(struct net_device *dev, u32 queue_id, + u32 flags) +{ + struct tsnep_adapter *adapter = netdev_priv(dev); + struct tsnep_queue *queue; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + queue = &adapter->queue[queue_id]; + + if (!napi_if_scheduled_mark_missed(&queue->napi)) + napi_schedule(&queue->napi); + + return 0; +} + static const struct net_device_ops tsnep_netdev_ops = { .ndo_open = tsnep_netdev_open, .ndo_stop = tsnep_netdev_close, @@ -1681,6 +2284,7 @@ static const struct net_device_ops tsnep_netdev_ops = { .ndo_setup_tc = tsnep_tc_setup, .ndo_bpf = tsnep_netdev_bpf, .ndo_xdp_xmit = tsnep_netdev_xdp_xmit, + .ndo_xsk_wakeup = tsnep_netdev_xsk_wakeup, }; static int tsnep_mac_init(struct tsnep_adapter *adapter) @@ -1796,9 +2400,16 @@ static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) adapter->num_tx_queues = 1; adapter->num_rx_queues = 1; adapter->num_queues = 1; + adapter->queue[0].adapter = adapter; adapter->queue[0].irq = retval; adapter->queue[0].tx = &adapter->tx[0]; + adapter->queue[0].tx->adapter = adapter; + adapter->queue[0].tx->addr = adapter->addr + TSNEP_QUEUE(0); + adapter->queue[0].tx->queue_index = 0; adapter->queue[0].rx = &adapter->rx[0]; + adapter->queue[0].rx->adapter = adapter; + adapter->queue[0].rx->addr = adapter->addr + TSNEP_QUEUE(0); + adapter->queue[0].rx->queue_index = 0; adapter->queue[0].irq_mask = irq_mask; adapter->queue[0].irq_delay_addr = adapter->addr + ECM_INT_DELAY; retval = tsnep_set_irq_coalesce(&adapter->queue[0], @@ -1820,9 +2431,16 @@ static int tsnep_queue_init(struct tsnep_adapter *adapter, int queue_count) adapter->num_tx_queues++; adapter->num_rx_queues++; adapter->num_queues++; + adapter->queue[i].adapter = adapter; adapter->queue[i].irq = retval; adapter->queue[i].tx = &adapter->tx[i]; + adapter->queue[i].tx->adapter = adapter; + adapter->queue[i].tx->addr = adapter->addr + TSNEP_QUEUE(i); + adapter->queue[i].tx->queue_index = i; adapter->queue[i].rx = &adapter->rx[i]; + adapter->queue[i].rx->adapter = adapter; + adapter->queue[i].rx->addr = adapter->addr + TSNEP_QUEUE(i); + adapter->queue[i].rx->queue_index = i; adapter->queue[i].irq_mask = irq_mask << (ECM_INT_TXRX_SHIFT * i); adapter->queue[i].irq_delay_addr = @@ -1927,7 +2545,8 @@ static int tsnep_probe(struct platform_device *pdev) netdev->xdp_features = NETDEV_XDP_ACT_BASIC | NETDEV_XDP_ACT_REDIRECT | NETDEV_XDP_ACT_NDO_XMIT | - NETDEV_XDP_ACT_NDO_XMIT_SG; + NETDEV_XDP_ACT_NDO_XMIT_SG | + NETDEV_XDP_ACT_XSK_ZEROCOPY; /* carrier off reporting is important to ethtool even BEFORE open */ netif_carrier_off(netdev); diff --git a/drivers/net/ethernet/engleder/tsnep_xdp.c b/drivers/net/ethernet/engleder/tsnep_xdp.c index 4d14cb1fd772..c0513848c547 100644 --- a/drivers/net/ethernet/engleder/tsnep_xdp.c +++ b/drivers/net/ethernet/engleder/tsnep_xdp.c @@ -17,3 +17,69 @@ int tsnep_xdp_setup_prog(struct tsnep_adapter *adapter, struct bpf_prog *prog, return 0; } + +static int tsnep_xdp_enable_pool(struct tsnep_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + struct tsnep_queue *queue; + int retval; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + queue = &adapter->queue[queue_id]; + if (queue->rx->queue_index != queue_id || + queue->tx->queue_index != queue_id) { + netdev_err(adapter->netdev, + "XSK support only for TX/RX queue pairs\n"); + + return -EOPNOTSUPP; + } + + retval = xsk_pool_dma_map(pool, adapter->dmadev, + DMA_ATTR_SKIP_CPU_SYNC); + if (retval) { + netdev_err(adapter->netdev, "failed to map XSK pool\n"); + + return retval; + } + + retval = tsnep_enable_xsk(queue, pool); + if (retval) { + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); + + return retval; + } + + return 0; +} + +static int tsnep_xdp_disable_pool(struct tsnep_adapter *adapter, u16 queue_id) +{ + struct xsk_buff_pool *pool; + struct tsnep_queue *queue; + + if (queue_id >= adapter->num_rx_queues || + queue_id >= adapter->num_tx_queues) + return -EINVAL; + + pool = xsk_get_pool_from_qid(adapter->netdev, queue_id); + if (!pool) + return -EINVAL; + + queue = &adapter->queue[queue_id]; + + tsnep_disable_xsk(queue); + + xsk_pool_dma_unmap(pool, DMA_ATTR_SKIP_CPU_SYNC); + + return 0; +} + +int tsnep_xdp_setup_pool(struct tsnep_adapter *adapter, + struct xsk_buff_pool *pool, u16 queue_id) +{ + return pool ? tsnep_xdp_enable_pool(adapter, pool, queue_id) : + tsnep_xdp_disable_pool(adapter, queue_id); +} diff --git a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c index 9318a2554056..1fa676308c5e 100644 --- a/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c +++ b/drivers/net/ethernet/freescale/dpaa/dpaa_eth.c @@ -1482,13 +1482,8 @@ static int dpaa_enable_tx_csum(struct dpaa_priv *priv, parse_result = (struct fman_prs_result *)parse_results; /* If we're dealing with VLAN, get the real Ethernet type */ - if (ethertype == ETH_P_8021Q) { - /* We can't always assume the MAC header is set correctly - * by the stack, so reset to beginning of skb->data - */ - skb_reset_mac_header(skb); - ethertype = ntohs(vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); - } + if (ethertype == ETH_P_8021Q) + ethertype = ntohs(skb_vlan_eth_hdr(skb)->h_vlan_encapsulated_proto); /* Fill in the relevant L3 parse result fields * and read the L4 protocol type diff --git a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c index 5caea154362f..7356ad965487 100644 --- a/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c +++ b/drivers/net/ethernet/hisilicon/hns3/hns3_enet.c @@ -1532,7 +1532,7 @@ static int hns3_handle_vtags(struct hns3_enet_ring *tx_ring, if (unlikely(rc < 0)) return rc; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI |= cpu_to_be16((skb->priority << VLAN_PRIO_SHIFT) & VLAN_PRIO_MASK); diff --git a/drivers/net/ethernet/intel/i40e/i40e_txrx.c b/drivers/net/ethernet/intel/i40e/i40e_txrx.c index c8c2cbaa0ede..8b8bf4880faa 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_txrx.c +++ b/drivers/net/ethernet/intel/i40e/i40e_txrx.c @@ -3063,7 +3063,7 @@ static inline int i40e_tx_prepare_vlan_flags(struct sk_buff *skb, rc = skb_cow_head(skb, 0); if (rc < 0) return rc; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> I40E_TX_FLAGS_VLAN_SHIFT); } else { diff --git a/drivers/net/ethernet/intel/ice/ice_common.c b/drivers/net/ethernet/intel/ice/ice_common.c index c2fda4fa4188..0157f6e98d3e 100644 --- a/drivers/net/ethernet/intel/ice/ice_common.c +++ b/drivers/net/ethernet/intel/ice/ice_common.c @@ -1619,7 +1619,6 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, { struct ice_aq_desc desc_cpy; bool is_cmd_for_retry; - u8 *buf_cpy = NULL; u8 idx = 0; u16 opcode; int status; @@ -1629,11 +1628,8 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, memset(&desc_cpy, 0, sizeof(desc_cpy)); if (is_cmd_for_retry) { - if (buf) { - buf_cpy = kzalloc(buf_size, GFP_KERNEL); - if (!buf_cpy) - return -ENOMEM; - } + /* All retryable cmds are direct, without buf. */ + WARN_ON(buf); memcpy(&desc_cpy, desc, sizeof(desc_cpy)); } @@ -1645,17 +1641,12 @@ ice_sq_send_cmd_retry(struct ice_hw *hw, struct ice_ctl_q_info *cq, hw->adminq.sq_last_status != ICE_AQ_RC_EBUSY) break; - if (buf_cpy) - memcpy(buf, buf_cpy, buf_size); - memcpy(desc, &desc_cpy, sizeof(desc_cpy)); - mdelay(ICE_SQ_SEND_DELAY_TIME_MS); + msleep(ICE_SQ_SEND_DELAY_TIME_MS); } while (++idx < ICE_SQ_SEND_MAX_EXECUTE); - kfree(buf_cpy); - return status; } @@ -1992,19 +1983,19 @@ ice_acquire_res_exit: */ void ice_release_res(struct ice_hw *hw, enum ice_aq_res_ids res) { - u32 total_delay = 0; + unsigned long timeout; int status; - status = ice_aq_release_res(hw, res, 0, NULL); - /* there are some rare cases when trying to release the resource * results in an admin queue timeout, so handle them correctly */ - while ((status == -EIO) && (total_delay < hw->adminq.sq_cmd_timeout)) { - mdelay(1); + timeout = jiffies + 10 * ICE_CTL_Q_SQ_CMD_TIMEOUT; + do { status = ice_aq_release_res(hw, res, 0, NULL); - total_delay++; - } + if (status != -EIO) + break; + usleep_range(1000, 2000); + } while (time_before(jiffies, timeout)); } /** diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.c b/drivers/net/ethernet/intel/ice/ice_controlq.c index 6bcfee295991..d2faf1baad2f 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.c +++ b/drivers/net/ethernet/intel/ice/ice_controlq.c @@ -637,9 +637,6 @@ static int ice_init_ctrlq(struct ice_hw *hw, enum ice_ctl_q q_type) return -EIO; } - /* setup SQ command write back timeout */ - cq->sq_cmd_timeout = ICE_CTL_Q_SQ_CMD_TIMEOUT; - /* allocate the ATQ */ ret_code = ice_init_sq(hw, cq); if (ret_code) @@ -967,7 +964,7 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, struct ice_aq_desc *desc_on_ring; bool cmd_completed = false; struct ice_sq_cd *details; - u32 total_delay = 0; + unsigned long timeout; int status = 0; u16 retval = 0; u32 val = 0; @@ -1060,13 +1057,14 @@ ice_sq_send_cmd(struct ice_hw *hw, struct ice_ctl_q_info *cq, cq->sq.next_to_use = 0; wr32(hw, cq->sq.tail, cq->sq.next_to_use); + timeout = jiffies + ICE_CTL_Q_SQ_CMD_TIMEOUT; do { if (ice_sq_done(hw, cq)) break; - udelay(ICE_CTL_Q_SQ_CMD_USEC); - total_delay++; - } while (total_delay < cq->sq_cmd_timeout); + usleep_range(ICE_CTL_Q_SQ_CMD_USEC, + ICE_CTL_Q_SQ_CMD_USEC * 3 / 2); + } while (time_before(jiffies, timeout)); /* if ready, copy the desc back to temp */ if (ice_sq_done(hw, cq)) { diff --git a/drivers/net/ethernet/intel/ice/ice_controlq.h b/drivers/net/ethernet/intel/ice/ice_controlq.h index c07e9cc9fc6e..950b7f4a7a05 100644 --- a/drivers/net/ethernet/intel/ice/ice_controlq.h +++ b/drivers/net/ethernet/intel/ice/ice_controlq.h @@ -34,7 +34,7 @@ enum ice_ctl_q { }; /* Control Queue timeout settings - max delay 1s */ -#define ICE_CTL_Q_SQ_CMD_TIMEOUT 10000 /* Count 10000 times */ +#define ICE_CTL_Q_SQ_CMD_TIMEOUT HZ /* Wait max 1s */ #define ICE_CTL_Q_SQ_CMD_USEC 100 /* Check every 100usec */ #define ICE_CTL_Q_ADMIN_INIT_TIMEOUT 10 /* Count 10 times */ #define ICE_CTL_Q_ADMIN_INIT_MSEC 100 /* Check every 100msec */ @@ -87,7 +87,6 @@ struct ice_ctl_q_info { enum ice_ctl_q qtype; struct ice_ctl_q_ring rq; /* receive queue */ struct ice_ctl_q_ring sq; /* send queue */ - u32 sq_cmd_timeout; /* send queue cmd write back timeout */ u16 num_rq_entries; /* receive queue depth */ u16 num_sq_entries; /* send queue depth */ u16 rq_buf_size; /* receive queue buffer size */ diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.c b/drivers/net/ethernet/intel/ice/ice_gnss.c index 8dec748bb53a..2ea8a2b11bcd 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.c +++ b/drivers/net/ethernet/intel/ice/ice_gnss.c @@ -117,6 +117,7 @@ static void ice_gnss_read(struct kthread_work *work) { struct gnss_serial *gnss = container_of(work, struct gnss_serial, read_work.work); + unsigned long delay = ICE_GNSS_POLL_DATA_DELAY_TIME; unsigned int i, bytes_read, data_len, count; struct ice_aqc_link_topo_addr link_topo; struct ice_pf *pf; @@ -136,11 +137,6 @@ static void ice_gnss_read(struct kthread_work *work) return; hw = &pf->hw; - buf = (char *)get_zeroed_page(GFP_KERNEL); - if (!buf) { - err = -ENOMEM; - goto exit; - } memset(&link_topo, 0, sizeof(struct ice_aqc_link_topo_addr)); link_topo.topo_params.index = ICE_E810T_GNSS_I2C_BUS; @@ -151,25 +147,24 @@ static void ice_gnss_read(struct kthread_work *work) i2c_params = ICE_GNSS_UBX_DATA_LEN_WIDTH | ICE_AQC_I2C_USE_REPEATED_START; - /* Read data length in a loop, when it's not 0 the data is ready */ - for (i = 0; i < ICE_MAX_UBX_READ_TRIES; i++) { - err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, - cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H), - i2c_params, (u8 *)&data_len_b, NULL); - if (err) - goto exit_buf; + err = ice_aq_read_i2c(hw, link_topo, ICE_GNSS_UBX_I2C_BUS_ADDR, + cpu_to_le16(ICE_GNSS_UBX_DATA_LEN_H), + i2c_params, (u8 *)&data_len_b, NULL); + if (err) + goto requeue; - data_len = be16_to_cpu(data_len_b); - if (data_len != 0 && data_len != U16_MAX) - break; + data_len = be16_to_cpu(data_len_b); + if (data_len == 0 || data_len == U16_MAX) + goto requeue; - mdelay(10); - } + /* The u-blox has data_len bytes for us to read */ data_len = min_t(typeof(data_len), data_len, PAGE_SIZE); - if (!data_len) { + + buf = (char *)get_zeroed_page(GFP_KERNEL); + if (!buf) { err = -ENOMEM; - goto exit_buf; + goto requeue; } /* Read received data */ @@ -183,7 +178,7 @@ static void ice_gnss_read(struct kthread_work *work) cpu_to_le16(ICE_GNSS_UBX_EMPTY_DATA), bytes_read, &buf[i], NULL); if (err) - goto exit_buf; + goto free_buf; } count = gnss_insert_raw(pf->gnss_dev, buf, i); @@ -191,10 +186,11 @@ static void ice_gnss_read(struct kthread_work *work) dev_warn(ice_pf_to_dev(pf), "gnss_insert_raw ret=%d size=%d\n", count, i); -exit_buf: + delay = ICE_GNSS_TIMER_DELAY_TIME; +free_buf: free_page((unsigned long)buf); - kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, - ICE_GNSS_TIMER_DELAY_TIME); +requeue: + kthread_queue_delayed_work(gnss->kworker, &gnss->read_work, delay); exit: if (err) dev_dbg(ice_pf_to_dev(pf), "GNSS failed to read err=%d\n", err); diff --git a/drivers/net/ethernet/intel/ice/ice_gnss.h b/drivers/net/ethernet/intel/ice/ice_gnss.h index 4d49e5b0b4b8..b8bb8b63d081 100644 --- a/drivers/net/ethernet/intel/ice/ice_gnss.h +++ b/drivers/net/ethernet/intel/ice/ice_gnss.h @@ -5,6 +5,7 @@ #define _ICE_GNSS_H_ #define ICE_E810T_GNSS_I2C_BUS 0x2 +#define ICE_GNSS_POLL_DATA_DELAY_TIME (HZ / 50) /* poll every 20 ms */ #define ICE_GNSS_TIMER_DELAY_TIME (HZ / 10) /* 0.1 second per message */ #define ICE_GNSS_TTY_WRITE_BUF 250 #define ICE_MAX_I2C_DATA_SIZE FIELD_MAX(ICE_AQC_I2C_DATA_SIZE_M) @@ -20,8 +21,6 @@ * passed as I2C addr parameter. */ #define ICE_GNSS_UBX_WRITE_BYTES (ICE_MAX_I2C_WRITE_BYTES + 1) -#define ICE_MAX_UBX_READ_TRIES 255 -#define ICE_MAX_UBX_ACK_READ_TRIES 4095 struct gnss_write_buf { struct list_head queue; diff --git a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c index f2604fc05991..e961ef4bbf4d 100644 --- a/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c +++ b/drivers/net/ethernet/intel/ixgbe/ixgbe_main.c @@ -8798,7 +8798,7 @@ netdev_tx_t ixgbe_xmit_frame_ring(struct sk_buff *skb, if (skb_cow_head(skb, 0)) goto out_drop; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); vhdr->h_vlan_TCI = htons(tx_flags >> IXGBE_TX_FLAGS_VLAN_SHIFT); } else { diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.c b/drivers/net/ethernet/mediatek/mtk_ppe.c index dd9581334b05..9129821f3ab8 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.c +++ b/drivers/net/ethernet/mediatek/mtk_ppe.c @@ -601,6 +601,7 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, struct mtk_eth *eth = ppe->eth; u16 timestamp = mtk_eth_timestamp(eth); struct mtk_foe_entry *hwe; + u32 val; if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) { entry->ib1 &= ~MTK_FOE_IB1_BIND_TIMESTAMP_V2; @@ -617,8 +618,13 @@ __mtk_foe_entry_commit(struct mtk_ppe *ppe, struct mtk_foe_entry *entry, wmb(); hwe->ib1 = entry->ib1; - if (ppe->accounting) - *mtk_foe_entry_ib2(eth, hwe) |= MTK_FOE_IB2_MIB_CNT; + if (ppe->accounting) { + if (MTK_HAS_CAPS(eth->soc->caps, MTK_NETSYS_V2)) + val = MTK_FOE_IB2_MIB_CNT_V2; + else + val = MTK_FOE_IB2_MIB_CNT; + *mtk_foe_entry_ib2(eth, hwe) |= val; + } dma_wmb(); diff --git a/drivers/net/ethernet/mediatek/mtk_ppe.h b/drivers/net/ethernet/mediatek/mtk_ppe.h index e1aab2e8e262..e51de31a52ec 100644 --- a/drivers/net/ethernet/mediatek/mtk_ppe.h +++ b/drivers/net/ethernet/mediatek/mtk_ppe.h @@ -55,9 +55,10 @@ enum { #define MTK_FOE_IB2_PSE_QOS BIT(4) #define MTK_FOE_IB2_DEST_PORT GENMASK(7, 5) #define MTK_FOE_IB2_MULTICAST BIT(8) +#define MTK_FOE_IB2_MIB_CNT BIT(10) #define MTK_FOE_IB2_WDMA_QID2 GENMASK(13, 12) -#define MTK_FOE_IB2_MIB_CNT BIT(15) +#define MTK_FOE_IB2_MIB_CNT_V2 BIT(15) #define MTK_FOE_IB2_WDMA_DEVIDX BIT(16) #define MTK_FOE_IB2_WDMA_WINFO BIT(17) diff --git a/drivers/net/ethernet/microsoft/mana/mana_en.c b/drivers/net/ethernet/microsoft/mana/mana_en.c index cabecbfa1102..06d6292e09b3 100644 --- a/drivers/net/ethernet/microsoft/mana/mana_en.c +++ b/drivers/net/ethernet/microsoft/mana/mana_en.c @@ -553,6 +553,14 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) va = netdev_alloc_frag(mpc->rxbpre_alloc_size); if (!va) goto error; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < + get_order(mpc->rxbpre_alloc_size)) { + put_page(page); + goto error; + } } else { page = dev_alloc_page(); if (!page) @@ -563,7 +571,6 @@ static int mana_pre_alloc_rxbufs(struct mana_port_context *mpc, int new_mtu) da = dma_map_single(dev, va + mpc->rxbpre_headroom, mpc->rxbpre_datasize, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, da)) { put_page(virt_to_head_page(va)); goto error; @@ -1505,6 +1512,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, if (!va) return NULL; + + page = virt_to_head_page(va); + /* Check if the frag falls back to single page */ + if (compound_order(page) < get_order(rxq->alloc_size)) { + put_page(page); + return NULL; + } } else { page = dev_alloc_page(); if (!page) @@ -1515,7 +1529,6 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, *da = dma_map_single(dev, va + rxq->headroom, rxq->datasize, DMA_FROM_DEVICE); - if (dma_mapping_error(dev, *da)) { put_page(virt_to_head_page(va)); return NULL; @@ -1525,14 +1538,13 @@ static void *mana_get_rxfrag(struct mana_rxq *rxq, struct device *dev, } /* Allocate frag for rx buffer, and save the old buf */ -static void mana_refill_rxoob(struct device *dev, struct mana_rxq *rxq, - struct mana_recv_buf_oob *rxoob, void **old_buf) +static void mana_refill_rx_oob(struct device *dev, struct mana_rxq *rxq, + struct mana_recv_buf_oob *rxoob, void **old_buf) { dma_addr_t da; void *va; va = mana_get_rxfrag(rxq, dev, &da, true); - if (!va) return; @@ -1597,7 +1609,7 @@ static void mana_process_rx_cqe(struct mana_rxq *rxq, struct mana_cq *cq, rxbuf_oob = &rxq->rx_oobs[curr]; WARN_ON_ONCE(rxbuf_oob->wqe_inf.wqe_size_in_bu != 1); - mana_refill_rxoob(dev, rxq, rxbuf_oob, &old_buf); + mana_refill_rx_oob(dev, rxq, rxbuf_oob, &old_buf); /* Unsuccessful refill will have old_buf == NULL. * In this case, mana_rx_skb() will drop the packet. diff --git a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c index 59d0dd862fd1..1d1e183d3a8b 100644 --- a/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c +++ b/drivers/net/ethernet/qlogic/netxen/netxen_nic_main.c @@ -1854,7 +1854,7 @@ netxen_tso_check(struct net_device *netdev, if (protocol == cpu_to_be16(ETH_P_8021Q)) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); protocol = vh->h_vlan_encapsulated_proto; flags = FLAGS_VLAN_TAGGED; diff --git a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c index 92930a055cbc..41894d154013 100644 --- a/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c +++ b/drivers/net/ethernet/qlogic/qlcnic/qlcnic_io.c @@ -318,7 +318,7 @@ static void qlcnic_send_filter(struct qlcnic_adapter *adapter, if (adapter->flags & QLCNIC_VLAN_FILTERING) { if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); vlan_id = ntohs(vh->h_vlan_TCI); } else if (skb_vlan_tag_present(skb)) { vlan_id = skb_vlan_tag_get(skb); @@ -468,7 +468,7 @@ static int qlcnic_tx_pkt(struct qlcnic_adapter *adapter, u32 producer = tx_ring->producer; if (protocol == ETH_P_8021Q) { - vh = (struct vlan_ethhdr *)skb->data; + vh = skb_vlan_eth_hdr(skb); flags = QLCNIC_FLAGS_VLAN_TAGGED; vlan_tci = ntohs(vh->h_vlan_TCI); protocol = ntohs(vh->h_vlan_encapsulated_proto); diff --git a/drivers/net/ethernet/sfc/tx_tso.c b/drivers/net/ethernet/sfc/tx_tso.c index 898e5c61d908..d381d8164f07 100644 --- a/drivers/net/ethernet/sfc/tx_tso.c +++ b/drivers/net/ethernet/sfc/tx_tso.c @@ -147,7 +147,7 @@ static __be16 efx_tso_check_protocol(struct sk_buff *skb) EFX_WARN_ON_ONCE_PARANOID(((struct ethhdr *)skb->data)->h_proto != protocol); if (protocol == htons(ETH_P_8021Q)) { - struct vlan_ethhdr *veh = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *veh = skb_vlan_eth_hdr(skb); protocol = veh->h_vlan_encapsulated_proto; } diff --git a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c index f116e4ae293b..0fca81507a77 100644 --- a/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c +++ b/drivers/net/ethernet/stmicro/stmmac/stmmac_main.c @@ -4569,13 +4569,10 @@ dma_map_err: static void stmmac_rx_vlan(struct net_device *dev, struct sk_buff *skb) { - struct vlan_ethhdr *veth; - __be16 vlan_proto; + struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); + __be16 vlan_proto = veth->h_vlan_proto; u16 vlanid; - veth = (struct vlan_ethhdr *)skb->data; - vlan_proto = veth->h_vlan_proto; - if ((vlan_proto == htons(ETH_P_8021Q) && dev->features & NETIF_F_HW_VLAN_CTAG_RX) || (vlan_proto == htons(ETH_P_8021AD) && diff --git a/drivers/net/phy/Kconfig b/drivers/net/phy/Kconfig index bcfba077b80e..2f3ddc446cbb 100644 --- a/drivers/net/phy/Kconfig +++ b/drivers/net/phy/Kconfig @@ -244,6 +244,11 @@ config MICREL_PHY help Supports the KSZ9021, VSC8201, KS8001 PHYs. +config MICROCHIP_T1S_PHY + tristate "Microchip 10BASE-T1S Ethernet PHY" + help + Currently supports the LAN8670, LAN8671, LAN8672 + config MICROCHIP_PHY tristate "Microchip PHYs" help diff --git a/drivers/net/phy/Makefile b/drivers/net/phy/Makefile index ae11bf20b46e..f289ab16a1da 100644 --- a/drivers/net/phy/Makefile +++ b/drivers/net/phy/Makefile @@ -74,6 +74,7 @@ obj-$(CONFIG_MICREL_KS8995MA) += spi_ks8995.o obj-$(CONFIG_MICREL_PHY) += micrel.o obj-$(CONFIG_MICROCHIP_PHY) += microchip.o obj-$(CONFIG_MICROCHIP_T1_PHY) += microchip_t1.o +obj-$(CONFIG_MICROCHIP_T1S_PHY) += microchip_t1s.o obj-$(CONFIG_MICROSEMI_PHY) += mscc/ obj-$(CONFIG_MOTORCOMM_PHY) += motorcomm.o obj-$(CONFIG_NATIONAL_PHY) += national.o diff --git a/drivers/net/phy/dp83867.c b/drivers/net/phy/dp83867.c index 5821f04c69dc..d75f526a20a4 100644 --- a/drivers/net/phy/dp83867.c +++ b/drivers/net/phy/dp83867.c @@ -26,6 +26,8 @@ #define MII_DP83867_MICR 0x12 #define MII_DP83867_ISR 0x13 #define DP83867_CFG2 0x14 +#define DP83867_LEDCR1 0x18 +#define DP83867_LEDCR2 0x19 #define DP83867_CFG3 0x1e #define DP83867_CTRL 0x1f @@ -150,6 +152,12 @@ /* FLD_THR_CFG */ #define DP83867_FLD_THR_CFG_ENERGY_LOST_THR_MASK 0x7 +#define DP83867_LED_COUNT 4 + +/* LED_DRV bits */ +#define DP83867_LED_DRV_EN(x) BIT((x) * 4) +#define DP83867_LED_DRV_VAL(x) BIT((x) * 4 + 1) + enum { DP83867_PORT_MIRROING_KEEP, DP83867_PORT_MIRROING_EN, @@ -468,8 +476,7 @@ static int dp83867_set_tunable(struct phy_device *phydev, static int dp83867_config_port_mirroring(struct phy_device *phydev) { - struct dp83867_private *dp83867 = - (struct dp83867_private *)phydev->priv; + struct dp83867_private *dp83867 = phydev->priv; if (dp83867->port_mirroring == DP83867_PORT_MIRROING_EN) phy_set_bits_mmd(phydev, DP83867_DEVADDR, DP83867_CFG4, @@ -970,6 +977,27 @@ static int dp83867_loopback(struct phy_device *phydev, bool enable) enable ? BMCR_LOOPBACK : 0); } +static int +dp83867_led_brightness_set(struct phy_device *phydev, + u8 index, enum led_brightness brightness) +{ + u32 val; + + if (index >= DP83867_LED_COUNT) + return -EINVAL; + + /* DRV_EN==1: output is DRV_VAL */ + val = DP83867_LED_DRV_EN(index); + + if (brightness) + val |= DP83867_LED_DRV_VAL(index); + + return phy_modify(phydev, DP83867_LEDCR2, + DP83867_LED_DRV_VAL(index) | + DP83867_LED_DRV_EN(index), + val); +} + static struct phy_driver dp83867_driver[] = { { .phy_id = DP83867_PHY_ID, @@ -997,6 +1025,8 @@ static struct phy_driver dp83867_driver[] = { .link_change_notify = dp83867_link_change_notify, .set_loopback = dp83867_loopback, + + .led_brightness_set = dp83867_led_brightness_set, }, }; module_phy_driver(dp83867_driver); diff --git a/drivers/net/phy/microchip_t1s.c b/drivers/net/phy/microchip_t1s.c new file mode 100644 index 000000000000..094967b3c111 --- /dev/null +++ b/drivers/net/phy/microchip_t1s.c @@ -0,0 +1,138 @@ +// SPDX-License-Identifier: GPL-2.0+ +/* + * Driver for Microchip 10BASE-T1S LAN867X PHY + * + * Support: Microchip Phys: + * lan8670, lan8671, lan8672 + */ + +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/phy.h> + +#define PHY_ID_LAN867X 0x0007C160 + +#define LAN867X_REG_IRQ_1_CTL 0x001C +#define LAN867X_REG_IRQ_2_CTL 0x001D + +/* The arrays below are pulled from the following table from AN1699 + * Access MMD Address Value Mask + * RMW 0x1F 0x00D0 0x0002 0x0E03 + * RMW 0x1F 0x00D1 0x0000 0x0300 + * RMW 0x1F 0x0084 0x3380 0xFFC0 + * RMW 0x1F 0x0085 0x0006 0x000F + * RMW 0x1F 0x008A 0xC000 0xF800 + * RMW 0x1F 0x0087 0x801C 0x801C + * RMW 0x1F 0x0088 0x033F 0x1FFF + * W 0x1F 0x008B 0x0404 ------ + * RMW 0x1F 0x0080 0x0600 0x0600 + * RMW 0x1F 0x00F1 0x2400 0x7F00 + * RMW 0x1F 0x0096 0x2000 0x2000 + * W 0x1F 0x0099 0x7F80 ------ + */ + +static const int lan867x_fixup_registers[12] = { + 0x00D0, 0x00D1, 0x0084, 0x0085, + 0x008A, 0x0087, 0x0088, 0x008B, + 0x0080, 0x00F1, 0x0096, 0x0099, +}; + +static const int lan867x_fixup_values[12] = { + 0x0002, 0x0000, 0x3380, 0x0006, + 0xC000, 0x801C, 0x033F, 0x0404, + 0x0600, 0x2400, 0x2000, 0x7F80, +}; + +static const int lan867x_fixup_masks[12] = { + 0x0E03, 0x0300, 0xFFC0, 0x000F, + 0xF800, 0x801C, 0x1FFF, 0xFFFF, + 0x0600, 0x7F00, 0x2000, 0xFFFF, +}; + +static int lan867x_config_init(struct phy_device *phydev) +{ + /* HW quirk: Microchip states in the application note (AN1699) for the phy + * that a set of read-modify-write (rmw) operations has to be performed + * on a set of seemingly magic registers. + * The result of these operations is just described as 'optimal performance' + * Microchip gives no explanation as to what these mmd regs do, + * in fact they are marked as reserved in the datasheet. + * It is unclear if phy_modify_mmd would be safe to use or if a write + * really has to happen to each register. + * In order to exactly conform to what is stated in the AN phy_write_mmd is + * used, which might then write the same value back as read + modified. + */ + + int reg_value; + int err; + int reg; + + /* Read-Modified Write Pseudocode (from AN1699) + * current_val = read_register(mmd, addr) // Read current register value + * new_val = current_val AND (NOT mask) // Clear bit fields to be written + * new_val = new_val OR value // Set bits + * write_register(mmd, addr, new_val) // Write back updated register value + */ + for (int i = 0; i < ARRAY_SIZE(lan867x_fixup_registers); i++) { + reg = lan867x_fixup_registers[i]; + reg_value = phy_read_mmd(phydev, MDIO_MMD_VEND2, reg); + reg_value &= ~lan867x_fixup_masks[i]; + reg_value |= lan867x_fixup_values[i]; + err = phy_write_mmd(phydev, MDIO_MMD_VEND2, reg, reg_value); + if (err != 0) + return err; + } + + /* None of the interrupts in the lan867x phy seem relevant. + * Other phys inspect the link status and call phy_trigger_machine + * in the interrupt handler. + * This phy does not support link status, and thus has no interrupt + * for it either. + * So we'll just disable all interrupts on the chip. + */ + err = phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_IRQ_1_CTL, 0xFFFF); + if (err != 0) + return err; + return phy_write_mmd(phydev, MDIO_MMD_VEND2, LAN867X_REG_IRQ_2_CTL, 0xFFFF); +} + +static int lan867x_read_status(struct phy_device *phydev) +{ + /* The phy has some limitations, namely: + * - always reports link up + * - only supports 10MBit half duplex + * - does not support auto negotiate + */ + phydev->link = 1; + phydev->duplex = DUPLEX_HALF; + phydev->speed = SPEED_10; + phydev->autoneg = AUTONEG_DISABLE; + + return 0; +} + +static struct phy_driver lan867x_driver[] = { + { + PHY_ID_MATCH_MODEL(PHY_ID_LAN867X), + .name = "LAN867X", + .features = PHY_BASIC_T1S_P2MP_FEATURES, + .config_init = lan867x_config_init, + .read_status = lan867x_read_status, + .get_plca_cfg = genphy_c45_plca_get_cfg, + .set_plca_cfg = genphy_c45_plca_set_cfg, + .get_plca_status = genphy_c45_plca_get_status, + } +}; + +module_phy_driver(lan867x_driver); + +static struct mdio_device_id __maybe_unused tbl[] = { + { PHY_ID_MATCH_MODEL(PHY_ID_LAN867X) }, + { } +}; + +MODULE_DEVICE_TABLE(mdio, tbl); + +MODULE_DESCRIPTION("Microchip 10BASE-T1S lan867x Phy driver"); +MODULE_AUTHOR("Ramón Nordin Rodriguez"); +MODULE_LICENSE("GPL"); diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index d373446ab5ac..17d0d0555a79 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -3028,6 +3028,7 @@ static int of_phy_led(struct phy_device *phydev, struct led_init_data init_data = {}; struct led_classdev *cdev; struct phy_led *phyled; + u32 index; int err; phyled = devm_kzalloc(dev, sizeof(*phyled), GFP_KERNEL); @@ -3037,10 +3038,13 @@ static int of_phy_led(struct phy_device *phydev, cdev = &phyled->led_cdev; phyled->phydev = phydev; - err = of_property_read_u8(led, "reg", &phyled->index); + err = of_property_read_u32(led, "reg", &index); if (err) return err; + if (index > U8_MAX) + return -EINVAL; + phyled->index = index; if (phydev->drv->led_brightness_set) cdev->brightness_set_blocking = phy_led_set_brightness; if (phydev->drv->led_blink_set) diff --git a/drivers/net/veth.c b/drivers/net/veth.c index 4b3c6647edc6..dce9f9d63e04 100644 --- a/drivers/net/veth.c +++ b/drivers/net/veth.c @@ -26,6 +26,7 @@ #include <linux/ptr_ring.h> #include <linux/bpf_trace.h> #include <linux/net_tstamp.h> +#include <net/page_pool.h> #define DRV_NAME "veth" #define DRV_VERSION "1.0" @@ -65,6 +66,7 @@ struct veth_rq { bool rx_notify_masked; struct ptr_ring xdp_ring; struct xdp_rxq_info xdp_rxq; + struct page_pool *page_pool; }; struct veth_priv { @@ -155,6 +157,8 @@ static void veth_get_strings(struct net_device *dev, u32 stringset, u8 *buf) for (j = 0; j < VETH_TQ_STATS_LEN; j++) ethtool_sprintf(&p, "tx_queue_%u_%.18s", i, veth_tq_stats_desc[j].desc); + + page_pool_ethtool_stats_get_strings(p); break; } } @@ -165,7 +169,8 @@ static int veth_get_sset_count(struct net_device *dev, int sset) case ETH_SS_STATS: return ARRAY_SIZE(ethtool_stats_keys) + VETH_RQ_STATS_LEN * dev->real_num_rx_queues + - VETH_TQ_STATS_LEN * dev->real_num_tx_queues; + VETH_TQ_STATS_LEN * dev->real_num_tx_queues + + page_pool_ethtool_stats_get_count(); default: return -EOPNOTSUPP; } @@ -176,7 +181,8 @@ static void veth_get_ethtool_stats(struct net_device *dev, { struct veth_priv *rcv_priv, *priv = netdev_priv(dev); struct net_device *peer = rtnl_dereference(priv->peer); - int i, j, idx; + struct page_pool_stats pp_stats = {}; + int i, j, idx, pp_idx; data[0] = peer ? peer->ifindex : 0; idx = 1; @@ -195,9 +201,10 @@ static void veth_get_ethtool_stats(struct net_device *dev, } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); idx += VETH_RQ_STATS_LEN; } + pp_idx = idx; if (!peer) - return; + goto page_pool_stats; rcv_priv = netdev_priv(peer); for (i = 0; i < peer->real_num_rx_queues; i++) { @@ -214,7 +221,16 @@ static void veth_get_ethtool_stats(struct net_device *dev, data[tx_idx + j] += *(u64 *)(base + offset); } } while (u64_stats_fetch_retry(&rq_stats->syncp, start)); + pp_idx = tx_idx + VETH_TQ_STATS_LEN; } + +page_pool_stats: + for (i = 0; i < dev->real_num_rx_queues; i++) { + if (!priv->rq[i].page_pool) + continue; + page_pool_get_stats(priv->rq[i].page_pool, &pp_stats); + } + page_pool_ethtool_stats_get(&data[pp_idx], &pp_stats); } static void veth_get_channels(struct net_device *dev, @@ -727,17 +743,20 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, goto drop; /* Allocate skb head */ - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + page = page_pool_dev_alloc_pages(rq->page_pool); if (!page) goto drop; nskb = build_skb(page_address(page), PAGE_SIZE); if (!nskb) { - put_page(page); + page_pool_put_full_page(rq->page_pool, page, true); goto drop; } skb_reserve(nskb, VETH_XDP_HEADROOM); + skb_copy_header(nskb, skb); + skb_mark_for_recycle(nskb); + size = min_t(u32, skb->len, max_head_size); if (skb_copy_bits(skb, 0, nskb->data, size)) { consume_skb(nskb); @@ -745,7 +764,6 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, } skb_put(nskb, size); - skb_copy_header(nskb, skb); head_off = skb_headroom(nskb) - skb_headroom(skb); skb_headers_offset_update(nskb, head_off); @@ -754,7 +772,7 @@ static int veth_convert_skb_to_xdp_buff(struct veth_rq *rq, len = skb->len - off; for (i = 0; i < MAX_SKB_FRAGS && off < skb->len; i++) { - page = alloc_page(GFP_ATOMIC | __GFP_NOWARN); + page = page_pool_dev_alloc_pages(rq->page_pool); if (!page) { consume_skb(nskb); goto drop; @@ -1002,12 +1020,38 @@ static int veth_poll(struct napi_struct *napi, int budget) return done; } +static int veth_create_page_pool(struct veth_rq *rq) +{ + struct page_pool_params pp_params = { + .order = 0, + .pool_size = VETH_RING_SIZE, + .nid = NUMA_NO_NODE, + .dev = &rq->dev->dev, + }; + + rq->page_pool = page_pool_create(&pp_params); + if (IS_ERR(rq->page_pool)) { + int err = PTR_ERR(rq->page_pool); + + rq->page_pool = NULL; + return err; + } + + return 0; +} + static int __veth_napi_enable_range(struct net_device *dev, int start, int end) { struct veth_priv *priv = netdev_priv(dev); int err, i; for (i = start; i < end; i++) { + err = veth_create_page_pool(&priv->rq[i]); + if (err) + goto err_page_pool; + } + + for (i = start; i < end; i++) { struct veth_rq *rq = &priv->rq[i]; err = ptr_ring_init(&rq->xdp_ring, VETH_RING_SIZE, GFP_KERNEL); @@ -1027,6 +1071,11 @@ static int __veth_napi_enable_range(struct net_device *dev, int start, int end) err_xdp_ring: for (i--; i >= start; i--) ptr_ring_cleanup(&priv->rq[i].xdp_ring, veth_ptr_free); +err_page_pool: + for (i = start; i < end; i++) { + page_pool_destroy(priv->rq[i].page_pool); + priv->rq[i].page_pool = NULL; + } return err; } @@ -1056,6 +1105,11 @@ static void veth_napi_del_range(struct net_device *dev, int start, int end) rq->rx_notify_masked = false; ptr_ring_cleanup(&rq->xdp_ring, veth_ptr_free); } + + for (i = start; i < end; i++) { + page_pool_destroy(priv->rq[i].page_pool); + priv->rq[i].page_pool = NULL; + } } static void veth_napi_del(struct net_device *dev) diff --git a/drivers/nfc/nfcsim.c b/drivers/nfc/nfcsim.c index 85bf8d586c70..44eeb17ae48d 100644 --- a/drivers/nfc/nfcsim.c +++ b/drivers/nfc/nfcsim.c @@ -367,11 +367,6 @@ static void nfcsim_debugfs_init_dev(struct nfcsim *dev) } dev_dir = debugfs_create_dir(devname, nfcsim_debugfs_root); - if (!dev_dir) { - NFCSIM_ERR(dev, "Could not create debugfs entries for nfc%d\n", - idx); - return; - } debugfs_create_u8("dropframe", 0664, dev_dir, &dev->dropframe); } diff --git a/drivers/staging/gdm724x/gdm_lte.c b/drivers/staging/gdm724x/gdm_lte.c index 671ee8843c88..5703a9ddb6d0 100644 --- a/drivers/staging/gdm724x/gdm_lte.c +++ b/drivers/staging/gdm724x/gdm_lte.c @@ -349,7 +349,7 @@ static s32 gdm_lte_tx_nic_type(struct net_device *dev, struct sk_buff *skb) /* Get ethernet protocol */ eth = (struct ethhdr *)skb->data; if (ntohs(eth->h_proto) == ETH_P_8021Q) { - vlan_eth = (struct vlan_ethhdr *)skb->data; + vlan_eth = skb_vlan_eth_hdr(skb); mac_proto = ntohs(vlan_eth->h_vlan_encapsulated_proto); network_data = skb->data + VLAN_ETH_HLEN; nic_type |= NIC_TYPE_F_VLAN; @@ -435,7 +435,7 @@ static netdev_tx_t gdm_lte_tx(struct sk_buff *skb, struct net_device *dev) * driver based on the NIC mac */ if (nic_type & NIC_TYPE_F_VLAN) { - struct vlan_ethhdr *vlan_eth = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *vlan_eth = skb_vlan_eth_hdr(skb); nic->vlan_id = ntohs(vlan_eth->h_vlan_TCI) & VLAN_VID_MASK; data_buf = skb->data + (VLAN_ETH_HLEN - ETH_HLEN); diff --git a/drivers/tty/serdev/core.c b/drivers/tty/serdev/core.c index 678014253b7b..e7d663901c07 100644 --- a/drivers/tty/serdev/core.c +++ b/drivers/tty/serdev/core.c @@ -366,7 +366,7 @@ int serdev_device_set_parity(struct serdev_device *serdev, struct serdev_controller *ctrl = serdev->ctrl; if (!ctrl || !ctrl->ops->set_parity) - return -ENOTSUPP; + return -EOPNOTSUPP; return ctrl->ops->set_parity(ctrl, parity); } @@ -388,7 +388,7 @@ int serdev_device_get_tiocm(struct serdev_device *serdev) struct serdev_controller *ctrl = serdev->ctrl; if (!ctrl || !ctrl->ops->get_tiocm) - return -ENOTSUPP; + return -EOPNOTSUPP; return ctrl->ops->get_tiocm(ctrl); } @@ -399,12 +399,23 @@ int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) struct serdev_controller *ctrl = serdev->ctrl; if (!ctrl || !ctrl->ops->set_tiocm) - return -ENOTSUPP; + return -EOPNOTSUPP; return ctrl->ops->set_tiocm(ctrl, set, clear); } EXPORT_SYMBOL_GPL(serdev_device_set_tiocm); +int serdev_device_break_ctl(struct serdev_device *serdev, int break_state) +{ + struct serdev_controller *ctrl = serdev->ctrl; + + if (!ctrl || !ctrl->ops->break_ctl) + return -EOPNOTSUPP; + + return ctrl->ops->break_ctl(ctrl, break_state); +} +EXPORT_SYMBOL_GPL(serdev_device_break_ctl); + static int serdev_drv_probe(struct device *dev) { const struct serdev_device_driver *sdrv = to_serdev_device_driver(dev->driver); diff --git a/drivers/tty/serdev/serdev-ttyport.c b/drivers/tty/serdev/serdev-ttyport.c index d367803e2044..8033ef19669c 100644 --- a/drivers/tty/serdev/serdev-ttyport.c +++ b/drivers/tty/serdev/serdev-ttyport.c @@ -231,7 +231,7 @@ static int ttyport_get_tiocm(struct serdev_controller *ctrl) struct tty_struct *tty = serport->tty; if (!tty->ops->tiocmget) - return -ENOTSUPP; + return -EOPNOTSUPP; return tty->ops->tiocmget(tty); } @@ -242,11 +242,22 @@ static int ttyport_set_tiocm(struct serdev_controller *ctrl, unsigned int set, u struct tty_struct *tty = serport->tty; if (!tty->ops->tiocmset) - return -ENOTSUPP; + return -EOPNOTSUPP; return tty->ops->tiocmset(tty, set, clear); } +static int ttyport_break_ctl(struct serdev_controller *ctrl, unsigned int break_state) +{ + struct serport *serport = serdev_controller_get_drvdata(ctrl); + struct tty_struct *tty = serport->tty; + + if (!tty->ops->break_ctl) + return -EOPNOTSUPP; + + return tty->ops->break_ctl(tty, break_state); +} + static const struct serdev_controller_ops ctrl_ops = { .write_buf = ttyport_write_buf, .write_flush = ttyport_write_flush, @@ -259,6 +270,7 @@ static const struct serdev_controller_ops ctrl_ops = { .wait_until_sent = ttyport_wait_until_sent, .get_tiocm = ttyport_get_tiocm, .set_tiocm = ttyport_set_tiocm, + .break_ctl = ttyport_break_ctl, }; struct device *serdev_tty_port_register(struct tty_port *port, diff --git a/include/linux/if_vlan.h b/include/linux/if_vlan.h index 6864b89ef868..0f40f379d75c 100644 --- a/include/linux/if_vlan.h +++ b/include/linux/if_vlan.h @@ -62,6 +62,14 @@ static inline struct vlan_ethhdr *vlan_eth_hdr(const struct sk_buff *skb) return (struct vlan_ethhdr *)skb_mac_header(skb); } +/* Prefer this version in TX path, instead of + * skb_reset_mac_header() + vlan_eth_hdr() + */ +static inline struct vlan_ethhdr *skb_vlan_eth_hdr(const struct sk_buff *skb) +{ + return (struct vlan_ethhdr *)skb->data; +} + #define VLAN_PRIO_MASK 0xe000 /* Priority Code Point */ #define VLAN_PRIO_SHIFT 13 #define VLAN_CFI_MASK 0x1000 /* Canonical Format Indicator / Drop Eligible Indicator */ @@ -351,7 +359,8 @@ static inline int __vlan_insert_inner_tag(struct sk_buff *skb, /* Move the mac header sans proto to the beginning of the new header. */ if (likely(mac_len > ETH_TLEN)) memmove(skb->data, skb->data + VLAN_HLEN, mac_len - ETH_TLEN); - skb->mac_header -= VLAN_HLEN; + if (skb_mac_header_was_set(skb)) + skb->mac_header -= VLAN_HLEN; veth = (struct vlan_ethhdr *)(skb->data + mac_len - ETH_HLEN); @@ -528,7 +537,7 @@ static inline void __vlan_hwaccel_put_tag(struct sk_buff *skb, */ static inline int __vlan_get_tag(const struct sk_buff *skb, u16 *vlan_tci) { - struct vlan_ethhdr *veth = (struct vlan_ethhdr *)skb->data; + struct vlan_ethhdr *veth = skb_vlan_eth_hdr(skb); if (!eth_type_vlan(veth->h_vlan_proto)) return -EINVAL; @@ -677,6 +686,27 @@ static inline void vlan_set_encap_proto(struct sk_buff *skb, } /** + * vlan_remove_tag - remove outer VLAN tag from payload + * @skb: skbuff to remove tag from + * @vlan_tci: buffer to store value + * + * Expects the skb to contain a VLAN tag in the payload, and to have skb->data + * pointing at the MAC header. + * + * Returns a new pointer to skb->data, or NULL on failure to pull. + */ +static inline void *vlan_remove_tag(struct sk_buff *skb, u16 *vlan_tci) +{ + struct vlan_hdr *vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); + + *vlan_tci = ntohs(vhdr->h_vlan_TCI); + + memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); + vlan_set_encap_proto(skb, vhdr); + return __skb_pull(skb, VLAN_HLEN); +} + +/** * skb_vlan_tagged - check if skb is vlan tagged. * @skb: skbuff to query * @@ -712,7 +742,7 @@ static inline bool skb_vlan_tagged_multi(struct sk_buff *skb) if (unlikely(!pskb_may_pull(skb, VLAN_ETH_HLEN))) return false; - veh = (struct vlan_ethhdr *)skb->data; + veh = skb_vlan_eth_hdr(skb); protocol = veh->h_vlan_encapsulated_proto; } diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index a6a3e9457d6c..08fbd4622ccf 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -3194,7 +3194,10 @@ struct softnet_data { #ifdef CONFIG_RPS struct softnet_data *rps_ipi_list; #endif + bool in_net_rx_action; + bool in_napi_threaded_poll; + #ifdef CONFIG_NET_FLOW_LIMIT struct sd_flow_limit __rcu *flow_limit; #endif diff --git a/include/linux/netfilter/nfnetlink.h b/include/linux/netfilter/nfnetlink.h index 241e005f290a..e9a9ab34a7cc 100644 --- a/include/linux/netfilter/nfnetlink.h +++ b/include/linux/netfilter/nfnetlink.h @@ -45,7 +45,6 @@ struct nfnetlink_subsystem { int (*commit)(struct net *net, struct sk_buff *skb); int (*abort)(struct net *net, struct sk_buff *skb, enum nfnl_abort_action action); - void (*cleanup)(struct net *net); bool (*valid_genid)(struct net *net, u32 genid); }; diff --git a/include/linux/serdev.h b/include/linux/serdev.h index 5f6bfe4f6d95..f5f97fa25e8a 100644 --- a/include/linux/serdev.h +++ b/include/linux/serdev.h @@ -93,6 +93,7 @@ struct serdev_controller_ops { void (*wait_until_sent)(struct serdev_controller *, long); int (*get_tiocm)(struct serdev_controller *); int (*set_tiocm)(struct serdev_controller *, unsigned int, unsigned int); + int (*break_ctl)(struct serdev_controller *ctrl, unsigned int break_state); }; /** @@ -203,6 +204,7 @@ int serdev_device_write_buf(struct serdev_device *, const unsigned char *, size_ void serdev_device_wait_until_sent(struct serdev_device *, long); int serdev_device_get_tiocm(struct serdev_device *); int serdev_device_set_tiocm(struct serdev_device *, int, int); +int serdev_device_break_ctl(struct serdev_device *serdev, int break_state); void serdev_device_write_wakeup(struct serdev_device *); int serdev_device_write(struct serdev_device *, const unsigned char *, size_t, long); void serdev_device_write_flush(struct serdev_device *); @@ -250,11 +252,15 @@ static inline int serdev_device_write_buf(struct serdev_device *serdev, static inline void serdev_device_wait_until_sent(struct serdev_device *sdev, long timeout) {} static inline int serdev_device_get_tiocm(struct serdev_device *serdev) { - return -ENOTSUPP; + return -EOPNOTSUPP; } static inline int serdev_device_set_tiocm(struct serdev_device *serdev, int set, int clear) { - return -ENOTSUPP; + return -EOPNOTSUPP; +} +static inline int serdev_device_break_ctl(struct serdev_device *serdev, int break_state) +{ + return -EOPNOTSUPP; } static inline int serdev_device_write(struct serdev_device *sdev, const unsigned char *buf, size_t count, unsigned long timeout) diff --git a/include/net/bluetooth/bluetooth.h b/include/net/bluetooth/bluetooth.h index bcc5a4cd2c17..1b4230cd42a3 100644 --- a/include/net/bluetooth/bluetooth.h +++ b/include/net/bluetooth/bluetooth.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (C) 2000-2001 Qualcomm Incorporated + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -171,23 +172,39 @@ struct bt_iso_io_qos { __u8 rtn; }; -struct bt_iso_qos { - union { - __u8 cig; - __u8 big; - }; - union { - __u8 cis; - __u8 bis; - }; - union { - __u8 sca; - __u8 sync_interval; - }; +struct bt_iso_ucast_qos { + __u8 cig; + __u8 cis; + __u8 sca; + __u8 packing; + __u8 framing; + struct bt_iso_io_qos in; + struct bt_iso_io_qos out; +}; + +struct bt_iso_bcast_qos { + __u8 big; + __u8 bis; + __u8 sync_interval; __u8 packing; __u8 framing; struct bt_iso_io_qos in; struct bt_iso_io_qos out; + __u8 encryption; + __u8 bcode[16]; + __u8 options; + __u16 skip; + __u16 sync_timeout; + __u8 sync_cte_type; + __u8 mse; + __u16 timeout; +}; + +struct bt_iso_qos { + union { + struct bt_iso_ucast_qos ucast; + struct bt_iso_bcast_qos bcast; + }; }; #define BT_ISO_PHY_1M 0x01 diff --git a/include/net/bluetooth/coredump.h b/include/net/bluetooth/coredump.h new file mode 100644 index 000000000000..72f51b587a04 --- /dev/null +++ b/include/net/bluetooth/coredump.h @@ -0,0 +1,116 @@ +/* SPDX-License-Identifier: GPL-2.0-only */ +/* + * Copyright (C) 2022 Google Corporation + */ + +#ifndef __COREDUMP_H +#define __COREDUMP_H + +#define DEVCOREDUMP_TIMEOUT msecs_to_jiffies(10000) /* 10 sec */ + +typedef void (*coredump_t)(struct hci_dev *hdev); +typedef void (*dmp_hdr_t)(struct hci_dev *hdev, struct sk_buff *skb); +typedef void (*notify_change_t)(struct hci_dev *hdev, int state); + +/* struct hci_devcoredump - Devcoredump state + * + * @supported: Indicates if FW dump collection is supported by driver + * @state: Current state of dump collection + * @timeout: Indicates a timeout for collecting the devcoredump + * + * @alloc_size: Total size of the dump + * @head: Start of the dump + * @tail: Pointer to current end of dump + * @end: head + alloc_size for easy comparisons + * + * @dump_q: Dump queue for state machine to process + * @dump_rx: Devcoredump state machine work + * @dump_timeout: Devcoredump timeout work + * + * @coredump: Called from the driver's .coredump() function. + * @dmp_hdr: Create a dump header to identify controller/fw/driver info + * @notify_change: Notify driver when devcoredump state has changed + */ +struct hci_devcoredump { + bool supported; + + enum devcoredump_state { + HCI_DEVCOREDUMP_IDLE, + HCI_DEVCOREDUMP_ACTIVE, + HCI_DEVCOREDUMP_DONE, + HCI_DEVCOREDUMP_ABORT, + HCI_DEVCOREDUMP_TIMEOUT, + } state; + + unsigned long timeout; + + size_t alloc_size; + char *head; + char *tail; + char *end; + + struct sk_buff_head dump_q; + struct work_struct dump_rx; + struct delayed_work dump_timeout; + + coredump_t coredump; + dmp_hdr_t dmp_hdr; + notify_change_t notify_change; +}; + +#ifdef CONFIG_DEV_COREDUMP + +void hci_devcd_reset(struct hci_dev *hdev); +void hci_devcd_rx(struct work_struct *work); +void hci_devcd_timeout(struct work_struct *work); + +int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump, + dmp_hdr_t dmp_hdr, notify_change_t notify_change); +int hci_devcd_init(struct hci_dev *hdev, u32 dump_size); +int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb); +int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len); +int hci_devcd_complete(struct hci_dev *hdev); +int hci_devcd_abort(struct hci_dev *hdev); + +#else + +static inline void hci_devcd_reset(struct hci_dev *hdev) {} +static inline void hci_devcd_rx(struct work_struct *work) {} +static inline void hci_devcd_timeout(struct work_struct *work) {} + +static inline int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump, + dmp_hdr_t dmp_hdr, + notify_change_t notify_change) +{ + return -EOPNOTSUPP; +} + +static inline int hci_devcd_init(struct hci_dev *hdev, u32 dump_size) +{ + return -EOPNOTSUPP; +} + +static inline int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb) +{ + return -EOPNOTSUPP; +} + +static inline int hci_devcd_append_pattern(struct hci_dev *hdev, + u8 pattern, u32 len) +{ + return -EOPNOTSUPP; +} + +static inline int hci_devcd_complete(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +static inline int hci_devcd_abort(struct hci_dev *hdev) +{ + return -EOPNOTSUPP; +} + +#endif /* CONFIG_DEV_COREDUMP */ + +#endif /* __COREDUMP_H */ diff --git a/include/net/bluetooth/hci.h b/include/net/bluetooth/hci.h index 400f8a7d0c3f..07df96c47ef4 100644 --- a/include/net/bluetooth/hci.h +++ b/include/net/bluetooth/hci.h @@ -294,6 +294,21 @@ enum { * during the hdev->setup vendor callback. */ HCI_QUIRK_BROKEN_MWS_TRANSPORT_CONFIG, + + /* When this quirk is set, max_page for local extended features + * is set to 1, even if controller reports higher number. Some + * controllers (e.g. RTL8723CS) report more pages, but they + * don't actually support features declared there. + */ + HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, + + /* + * When this quirk is set, the HCI_OP_LE_SET_RPA_TIMEOUT command is + * skipped during initialization. This is required for the Actions + * Semiconductor ATS2851 based controllers, which erroneously claims + * to support it. + */ + HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, }; /* HCI device flags */ diff --git a/include/net/bluetooth/hci_core.h b/include/net/bluetooth/hci_core.h index d5311ceb21c6..a6c8aee2f256 100644 --- a/include/net/bluetooth/hci_core.h +++ b/include/net/bluetooth/hci_core.h @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -32,6 +33,7 @@ #include <net/bluetooth/hci.h> #include <net/bluetooth/hci_sync.h> #include <net/bluetooth/hci_sock.h> +#include <net/bluetooth/coredump.h> /* HCI priority */ #define HCI_PRIO_MAX 7 @@ -590,6 +592,10 @@ struct hci_dev { const char *fw_info; struct dentry *debugfs; +#ifdef CONFIG_DEV_COREDUMP + struct hci_devcoredump dump; +#endif + struct device dev; struct rfkill *rfkill; @@ -764,7 +770,10 @@ struct hci_conn { void *iso_data; struct amp_mgr *amp_mgr; - struct hci_conn *link; + struct list_head link_list; + struct hci_conn *parent; + struct hci_link *link; + struct bt_codec codec; void (*connect_cfm_cb) (struct hci_conn *conn, u8 status); @@ -774,6 +783,11 @@ struct hci_conn { void (*cleanup)(struct hci_conn *conn); }; +struct hci_link { + struct list_head list; + struct hci_conn *conn; +}; + struct hci_chan { struct list_head list; __u16 handle; @@ -979,7 +993,7 @@ static inline bool hci_conn_sc_enabled(struct hci_conn *conn) static inline void hci_conn_hash_add(struct hci_dev *hdev, struct hci_conn *c) { struct hci_conn_hash *h = &hdev->conn_hash; - list_add_rcu(&c->list, &h->list); + list_add_tail_rcu(&c->list, &h->list); switch (c->type) { case ACL_LINK: h->acl_num++; @@ -1091,7 +1105,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_bis(struct hci_dev *hdev, if (bacmp(&c->dst, ba) || c->type != ISO_LINK) continue; - if (c->iso_qos.big == big && c->iso_qos.bis == bis) { + if (c->iso_qos.bcast.big == big && c->iso_qos.bcast.bis == bis) { rcu_read_unlock(); return c; } @@ -1166,7 +1180,9 @@ static inline struct hci_conn *hci_conn_hash_lookup_le(struct hci_dev *hdev, static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, bdaddr_t *ba, - __u8 ba_type) + __u8 ba_type, + __u8 cig, + __u8 id) { struct hci_conn_hash *h = &hdev->conn_hash; struct hci_conn *c; @@ -1177,6 +1193,14 @@ static inline struct hci_conn *hci_conn_hash_lookup_cis(struct hci_dev *hdev, if (c->type != ISO_LINK) continue; + /* Match CIG ID if set */ + if (cig != BT_ISO_QOS_CIG_UNSET && cig != c->iso_qos.ucast.cig) + continue; + + /* Match CIS ID if set */ + if (id != BT_ISO_QOS_CIS_UNSET && id != c->iso_qos.ucast.cis) + continue; + if (ba_type == c->dst_type && !bacmp(&c->dst, ba)) { rcu_read_unlock(); return c; @@ -1200,7 +1224,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_cig(struct hci_dev *hdev, if (c->type != ISO_LINK) continue; - if (handle == c->iso_qos.cig) { + if (handle == c->iso_qos.ucast.cig) { rcu_read_unlock(); return c; } @@ -1223,7 +1247,7 @@ static inline struct hci_conn *hci_conn_hash_lookup_big(struct hci_dev *hdev, if (bacmp(&c->dst, BDADDR_ANY) || c->type != ISO_LINK) continue; - if (handle == c->iso_qos.big) { + if (handle == c->iso_qos.bcast.big) { rcu_read_unlock(); return c; } @@ -1332,7 +1356,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, struct bt_iso_qos *qos, __u8 data_len, __u8 *data); int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, - __u8 sid); + __u8 sid, struct bt_iso_qos *qos); int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, __u16 sync_handle, __u8 num_bis, __u8 bis[]); int hci_conn_check_link_mode(struct hci_conn *conn); @@ -1377,12 +1401,14 @@ static inline void hci_conn_put(struct hci_conn *conn) put_device(&conn->dev); } -static inline void hci_conn_hold(struct hci_conn *conn) +static inline struct hci_conn *hci_conn_hold(struct hci_conn *conn) { BT_DBG("hcon %p orig refcnt %d", conn, atomic_read(&conn->refcnt)); atomic_inc(&conn->refcnt); cancel_delayed_work(&conn->disc_work); + + return conn; } static inline void hci_conn_drop(struct hci_conn *conn) @@ -1497,6 +1523,15 @@ static inline void hci_set_aosp_capable(struct hci_dev *hdev) #endif } +static inline void hci_devcd_setup(struct hci_dev *hdev) +{ +#ifdef CONFIG_DEV_COREDUMP + INIT_WORK(&hdev->dump.dump_rx, hci_devcd_rx); + INIT_DELAYED_WORK(&hdev->dump.dump_timeout, hci_devcd_timeout); + skb_queue_head_init(&hdev->dump.dump_q); +#endif +} + int hci_dev_open(__u16 dev); int hci_dev_close(__u16 dev); int hci_dev_do_close(struct hci_dev *hdev); @@ -1668,9 +1703,13 @@ void hci_conn_del_sysfs(struct hci_conn *conn); #define scan_1m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_1M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_1M)) +#define le_2m_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_2M)) + #define scan_2m(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_2M) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_2M)) +#define le_coded_capable(dev) (((dev)->le_features[1] & HCI_LE_PHY_CODED)) + #define scan_coded(dev) (((dev)->le_tx_def_phys & HCI_LE_SET_PHY_CODED) || \ ((dev)->le_rx_def_phys & HCI_LE_SET_PHY_CODED)) diff --git a/include/net/bluetooth/hci_sync.h b/include/net/bluetooth/hci_sync.h index 17f5a4c32f36..2495be4d8b82 100644 --- a/include/net/bluetooth/hci_sync.h +++ b/include/net/bluetooth/hci_sync.h @@ -41,6 +41,8 @@ void hci_cmd_sync_clear(struct hci_dev *hdev); void hci_cmd_sync_cancel(struct hci_dev *hdev, int err); void __hci_cmd_sync_cancel(struct hci_dev *hdev, int err); +int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy); int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, void *data, hci_cmd_sync_work_destroy_t destroy); @@ -122,6 +124,8 @@ int hci_abort_conn_sync(struct hci_dev *hdev, struct hci_conn *conn, u8 reason); int hci_le_create_conn_sync(struct hci_dev *hdev, struct hci_conn *conn); +int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn); + int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle); int hci_le_terminate_big_sync(struct hci_dev *hdev, u8 handle, u8 reason); diff --git a/include/net/bluetooth/l2cap.h b/include/net/bluetooth/l2cap.h index 2f766e3437ce..cf393e72d6ed 100644 --- a/include/net/bluetooth/l2cap.h +++ b/include/net/bluetooth/l2cap.h @@ -694,7 +694,7 @@ struct l2cap_conn { struct sk_buff_head pending_rx; struct work_struct pending_rx_work; - struct work_struct id_addr_update_work; + struct delayed_work id_addr_timer; __u8 disc_reason; diff --git a/include/net/bluetooth/mgmt.h b/include/net/bluetooth/mgmt.h index e18a927669c0..a5801649f619 100644 --- a/include/net/bluetooth/mgmt.h +++ b/include/net/bluetooth/mgmt.h @@ -91,26 +91,26 @@ struct mgmt_rp_read_index_list { #define MGMT_MAX_NAME_LENGTH (HCI_MAX_NAME_LENGTH + 1) #define MGMT_MAX_SHORT_NAME_LENGTH (HCI_MAX_SHORT_NAME_LENGTH + 1) -#define MGMT_SETTING_POWERED 0x00000001 -#define MGMT_SETTING_CONNECTABLE 0x00000002 -#define MGMT_SETTING_FAST_CONNECTABLE 0x00000004 -#define MGMT_SETTING_DISCOVERABLE 0x00000008 -#define MGMT_SETTING_BONDABLE 0x00000010 -#define MGMT_SETTING_LINK_SECURITY 0x00000020 -#define MGMT_SETTING_SSP 0x00000040 -#define MGMT_SETTING_BREDR 0x00000080 -#define MGMT_SETTING_HS 0x00000100 -#define MGMT_SETTING_LE 0x00000200 -#define MGMT_SETTING_ADVERTISING 0x00000400 -#define MGMT_SETTING_SECURE_CONN 0x00000800 -#define MGMT_SETTING_DEBUG_KEYS 0x00001000 -#define MGMT_SETTING_PRIVACY 0x00002000 -#define MGMT_SETTING_CONFIGURATION 0x00004000 -#define MGMT_SETTING_STATIC_ADDRESS 0x00008000 -#define MGMT_SETTING_PHY_CONFIGURATION 0x00010000 -#define MGMT_SETTING_WIDEBAND_SPEECH 0x00020000 -#define MGMT_SETTING_CIS_CENTRAL 0x00040000 -#define MGMT_SETTING_CIS_PERIPHERAL 0x00080000 +#define MGMT_SETTING_POWERED BIT(0) +#define MGMT_SETTING_CONNECTABLE BIT(1) +#define MGMT_SETTING_FAST_CONNECTABLE BIT(2) +#define MGMT_SETTING_DISCOVERABLE BIT(3) +#define MGMT_SETTING_BONDABLE BIT(4) +#define MGMT_SETTING_LINK_SECURITY BIT(5) +#define MGMT_SETTING_SSP BIT(6) +#define MGMT_SETTING_BREDR BIT(7) +#define MGMT_SETTING_HS BIT(8) +#define MGMT_SETTING_LE BIT(9) +#define MGMT_SETTING_ADVERTISING BIT(10) +#define MGMT_SETTING_SECURE_CONN BIT(11) +#define MGMT_SETTING_DEBUG_KEYS BIT(12) +#define MGMT_SETTING_PRIVACY BIT(13) +#define MGMT_SETTING_CONFIGURATION BIT(14) +#define MGMT_SETTING_STATIC_ADDRESS BIT(15) +#define MGMT_SETTING_PHY_CONFIGURATION BIT(16) +#define MGMT_SETTING_WIDEBAND_SPEECH BIT(17) +#define MGMT_SETTING_CIS_CENTRAL BIT(18) +#define MGMT_SETTING_CIS_PERIPHERAL BIT(19) #define MGMT_OP_READ_INFO 0x0004 #define MGMT_READ_INFO_SIZE 0 @@ -635,21 +635,21 @@ struct mgmt_rp_get_phy_configuration { } __packed; #define MGMT_GET_PHY_CONFIGURATION_SIZE 0 -#define MGMT_PHY_BR_1M_1SLOT 0x00000001 -#define MGMT_PHY_BR_1M_3SLOT 0x00000002 -#define MGMT_PHY_BR_1M_5SLOT 0x00000004 -#define MGMT_PHY_EDR_2M_1SLOT 0x00000008 -#define MGMT_PHY_EDR_2M_3SLOT 0x00000010 -#define MGMT_PHY_EDR_2M_5SLOT 0x00000020 -#define MGMT_PHY_EDR_3M_1SLOT 0x00000040 -#define MGMT_PHY_EDR_3M_3SLOT 0x00000080 -#define MGMT_PHY_EDR_3M_5SLOT 0x00000100 -#define MGMT_PHY_LE_1M_TX 0x00000200 -#define MGMT_PHY_LE_1M_RX 0x00000400 -#define MGMT_PHY_LE_2M_TX 0x00000800 -#define MGMT_PHY_LE_2M_RX 0x00001000 -#define MGMT_PHY_LE_CODED_TX 0x00002000 -#define MGMT_PHY_LE_CODED_RX 0x00004000 +#define MGMT_PHY_BR_1M_1SLOT BIT(0) +#define MGMT_PHY_BR_1M_3SLOT BIT(1) +#define MGMT_PHY_BR_1M_5SLOT BIT(2) +#define MGMT_PHY_EDR_2M_1SLOT BIT(3) +#define MGMT_PHY_EDR_2M_3SLOT BIT(4) +#define MGMT_PHY_EDR_2M_5SLOT BIT(5) +#define MGMT_PHY_EDR_3M_1SLOT BIT(6) +#define MGMT_PHY_EDR_3M_3SLOT BIT(7) +#define MGMT_PHY_EDR_3M_5SLOT BIT(8) +#define MGMT_PHY_LE_1M_TX BIT(9) +#define MGMT_PHY_LE_1M_RX BIT(10) +#define MGMT_PHY_LE_2M_TX BIT(11) +#define MGMT_PHY_LE_2M_RX BIT(12) +#define MGMT_PHY_LE_CODED_TX BIT(13) +#define MGMT_PHY_LE_CODED_RX BIT(14) #define MGMT_PHY_BREDR_MASK (MGMT_PHY_BR_1M_1SLOT | MGMT_PHY_BR_1M_3SLOT | \ MGMT_PHY_BR_1M_5SLOT | MGMT_PHY_EDR_2M_1SLOT | \ @@ -974,11 +974,11 @@ struct mgmt_ev_auth_failed { __u8 status; } __packed; -#define MGMT_DEV_FOUND_CONFIRM_NAME 0x01 -#define MGMT_DEV_FOUND_LEGACY_PAIRING 0x02 -#define MGMT_DEV_FOUND_NOT_CONNECTABLE 0x04 -#define MGMT_DEV_FOUND_INITIATED_CONN 0x08 -#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED 0x10 +#define MGMT_DEV_FOUND_CONFIRM_NAME BIT(0) +#define MGMT_DEV_FOUND_LEGACY_PAIRING BIT(1) +#define MGMT_DEV_FOUND_NOT_CONNECTABLE BIT(2) +#define MGMT_DEV_FOUND_INITIATED_CONN BIT(3) +#define MGMT_DEV_FOUND_NAME_REQUEST_FAILED BIT(4) #define MGMT_EV_DEVICE_FOUND 0x0012 struct mgmt_ev_device_found { diff --git a/include/net/ip_vs.h b/include/net/ip_vs.h index 6d71a5ff52df..ff406ef4fd4a 100644 --- a/include/net/ip_vs.h +++ b/include/net/ip_vs.h @@ -265,26 +265,6 @@ static inline const char *ip_vs_dbg_addr(int af, char *buf, size_t buf_len, pr_err(msg, ##__VA_ARGS__); \ } while (0) -#ifdef CONFIG_IP_VS_DEBUG -#define EnterFunction(level) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - printk(KERN_DEBUG \ - pr_fmt("Enter: %s, %s line %i\n"), \ - __func__, __FILE__, __LINE__); \ - } while (0) -#define LeaveFunction(level) \ - do { \ - if (level <= ip_vs_get_debug_level()) \ - printk(KERN_DEBUG \ - pr_fmt("Leave: %s, %s line %i\n"), \ - __func__, __FILE__, __LINE__); \ - } while (0) -#else -#define EnterFunction(level) do {} while (0) -#define LeaveFunction(level) do {} while (0) -#endif - /* The port number of FTP service (in network order). */ #define FTPPORT cpu_to_be16(21) #define FTPDATA cpu_to_be16(20) @@ -604,7 +584,7 @@ struct ip_vs_conn { spinlock_t lock; /* lock for state transition */ volatile __u16 state; /* state info */ volatile __u16 old_state; /* old state, to be used for - * state transition triggerd + * state transition triggered * synchronization */ __u32 fwmark; /* Fire wall mark from skb */ @@ -630,8 +610,10 @@ struct ip_vs_conn { */ struct ip_vs_app *app; /* bound ip_vs_app object */ void *app_data; /* Application private data */ - struct ip_vs_seq in_seq; /* incoming seq. struct */ - struct ip_vs_seq out_seq; /* outgoing seq. struct */ + struct_group(sync_conn_opt, + struct ip_vs_seq in_seq; /* incoming seq. struct */ + struct ip_vs_seq out_seq; /* outgoing seq. struct */ + ); const struct ip_vs_pe *pe; char *pe_data; @@ -653,7 +635,7 @@ struct ip_vs_service_user_kern { u16 protocol; union nf_inet_addr addr; /* virtual ip address */ __be16 port; - u32 fwmark; /* firwall mark of service */ + u32 fwmark; /* firewall mark of service */ /* virtual service options */ char *sched_name; @@ -1054,7 +1036,7 @@ struct netns_ipvs { struct ipvs_sync_daemon_cfg bcfg; /* Backup Configuration */ /* net name space ptr */ struct net *net; /* Needed by timer routines */ - /* Number of heterogeneous destinations, needed becaus heterogeneous + /* Number of heterogeneous destinations, needed because heterogeneous * are not supported when synchronization is enabled. */ unsigned int mixed_address_family_dests; diff --git a/include/net/netfilter/nf_tables.h b/include/net/netfilter/nf_tables.h index 1b8e305bb54a..3ed21d2d5659 100644 --- a/include/net/netfilter/nf_tables.h +++ b/include/net/netfilter/nf_tables.h @@ -1046,6 +1046,18 @@ struct nft_rule_dp { __attribute__((aligned(__alignof__(struct nft_expr)))); }; +struct nft_rule_dp_last { + struct nft_rule_dp end; /* end of nft_rule_blob marker */ + struct rcu_head h; /* call_rcu head */ + struct nft_rule_blob *blob; /* ptr to free via call_rcu */ + const struct nft_chain *chain; /* for nftables tracing */ +}; + +static inline const struct nft_rule_dp *nft_rule_next(const struct nft_rule_dp *rule) +{ + return (void *)rule + sizeof(*rule) + rule->dlen; +} + struct nft_rule_blob { unsigned long size; unsigned char data[] @@ -1197,6 +1209,7 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv); * @genmask: generation mask * @afinfo: address family info * @name: name of the table + * @validate_state: internal, set when transaction adds jumps */ struct nft_table { struct list_head list; @@ -1215,6 +1228,7 @@ struct nft_table { char *name; u16 udlen; u8 *udata; + u8 validate_state; }; static inline bool nft_table_has_owner(const struct nft_table *table) @@ -1394,11 +1408,7 @@ void nft_unregister_flowtable_type(struct nf_flowtable_type *type); * @type: event type (enum nft_trace_types) * @skbid: hash of skb to be used as trace id * @packet_dumped: packet headers sent in a previous traceinfo message - * @pkt: pktinfo currently processed * @basechain: base chain currently processed - * @chain: chain currently processed - * @rule: rule that was evaluated - * @verdict: verdict given by rule */ struct nft_traceinfo { bool trace; @@ -1406,18 +1416,16 @@ struct nft_traceinfo { bool packet_dumped; enum nft_trace_types type:8; u32 skbid; - const struct nft_pktinfo *pkt; const struct nft_base_chain *basechain; - const struct nft_chain *chain; - const struct nft_rule_dp *rule; - const struct nft_verdict *verdict; }; void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, - const struct nft_verdict *verdict, const struct nft_chain *basechain); -void nft_trace_notify(struct nft_traceinfo *info); +void nft_trace_notify(const struct nft_pktinfo *pkt, + const struct nft_verdict *verdict, + const struct nft_rule_dp *rule, + struct nft_traceinfo *info); #define MODULE_ALIAS_NFT_CHAIN(family, name) \ MODULE_ALIAS("nft-chain-" __stringify(family) "-" name) @@ -1601,6 +1609,8 @@ struct nft_trans_chain { struct nft_stats __percpu *stats; u8 policy; u32 chain_id; + struct nft_base_chain *basechain; + struct list_head hook_list; }; #define nft_trans_chain_update(trans) \ @@ -1613,6 +1623,10 @@ struct nft_trans_chain { (((struct nft_trans_chain *)trans->data)->policy) #define nft_trans_chain_id(trans) \ (((struct nft_trans_chain *)trans->data)->chain_id) +#define nft_trans_basechain(trans) \ + (((struct nft_trans_chain *)trans->data)->basechain) +#define nft_trans_chain_hooks(trans) \ + (((struct nft_trans_chain *)trans->data)->hook_list) struct nft_trans_table { bool update; @@ -1688,7 +1702,6 @@ struct nftables_pernet { struct mutex commit_mutex; u64 table_handle; unsigned int base_seq; - u8 validate_state; }; extern unsigned int nf_tables_net_id; diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 125f4628687c..d3fdf82282af 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -439,7 +439,7 @@ void batadv_interface_rx(struct net_device *soft_iface, if (!pskb_may_pull(skb, VLAN_ETH_HLEN)) goto dropped; - vhdr = (struct vlan_ethhdr *)skb->data; + vhdr = skb_vlan_eth_hdr(skb); /* drop batman-in-batman packets to prevent loops */ if (vhdr->h_vlan_encapsulated_proto != htons(ETH_P_BATMAN)) diff --git a/net/bluetooth/Makefile b/net/bluetooth/Makefile index 0e7b7db42750..141ac1fda0bf 100644 --- a/net/bluetooth/Makefile +++ b/net/bluetooth/Makefile @@ -17,6 +17,8 @@ bluetooth-y := af_bluetooth.o hci_core.o hci_conn.o hci_event.o mgmt.o \ ecdh_helper.o hci_request.o mgmt_util.o mgmt_config.o hci_codec.o \ eir.o hci_sync.o +bluetooth-$(CONFIG_DEV_COREDUMP) += coredump.o + bluetooth-$(CONFIG_BT_BREDR) += sco.o bluetooth-$(CONFIG_BT_LE) += iso.o bluetooth-$(CONFIG_BT_HS) += a2mp.o amp.o diff --git a/net/bluetooth/coredump.c b/net/bluetooth/coredump.c new file mode 100644 index 000000000000..d2d2624ec708 --- /dev/null +++ b/net/bluetooth/coredump.c @@ -0,0 +1,536 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2023 Google Corporation + */ + +#include <linux/devcoredump.h> + +#include <asm/unaligned.h> +#include <net/bluetooth/bluetooth.h> +#include <net/bluetooth/hci_core.h> + +enum hci_devcoredump_pkt_type { + HCI_DEVCOREDUMP_PKT_INIT, + HCI_DEVCOREDUMP_PKT_SKB, + HCI_DEVCOREDUMP_PKT_PATTERN, + HCI_DEVCOREDUMP_PKT_COMPLETE, + HCI_DEVCOREDUMP_PKT_ABORT, +}; + +struct hci_devcoredump_skb_cb { + u16 pkt_type; +}; + +struct hci_devcoredump_skb_pattern { + u8 pattern; + u32 len; +} __packed; + +#define hci_dmp_cb(skb) ((struct hci_devcoredump_skb_cb *)((skb)->cb)) + +#define DBG_UNEXPECTED_STATE() \ + bt_dev_dbg(hdev, \ + "Unexpected packet (%d) for state (%d). ", \ + hci_dmp_cb(skb)->pkt_type, hdev->dump.state) + +#define MAX_DEVCOREDUMP_HDR_SIZE 512 /* bytes */ + +static int hci_devcd_update_hdr_state(char *buf, size_t size, int state) +{ + int len = 0; + + if (!buf) + return 0; + + len = scnprintf(buf, size, "Bluetooth devcoredump\nState: %d\n", state); + + return len + 1; /* scnprintf adds \0 at the end upon state rewrite */ +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_update_state(struct hci_dev *hdev, int state) +{ + bt_dev_dbg(hdev, "Updating devcoredump state from %d to %d.", + hdev->dump.state, state); + + hdev->dump.state = state; + + return hci_devcd_update_hdr_state(hdev->dump.head, + hdev->dump.alloc_size, state); +} + +static int hci_devcd_mkheader(struct hci_dev *hdev, struct sk_buff *skb) +{ + char dump_start[] = "--- Start dump ---\n"; + char hdr[80]; + int hdr_len; + + hdr_len = hci_devcd_update_hdr_state(hdr, sizeof(hdr), + HCI_DEVCOREDUMP_IDLE); + skb_put_data(skb, hdr, hdr_len); + + if (hdev->dump.dmp_hdr) + hdev->dump.dmp_hdr(hdev, skb); + + skb_put_data(skb, dump_start, strlen(dump_start)); + + return skb->len; +} + +/* Do not call with hci_dev_lock since this calls driver code. */ +static void hci_devcd_notify(struct hci_dev *hdev, int state) +{ + if (hdev->dump.notify_change) + hdev->dump.notify_change(hdev, state); +} + +/* Call with hci_dev_lock only. */ +void hci_devcd_reset(struct hci_dev *hdev) +{ + hdev->dump.head = NULL; + hdev->dump.tail = NULL; + hdev->dump.alloc_size = 0; + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); + + cancel_delayed_work(&hdev->dump.dump_timeout); + skb_queue_purge(&hdev->dump.dump_q); +} + +/* Call with hci_dev_lock only. */ +static void hci_devcd_free(struct hci_dev *hdev) +{ + if (hdev->dump.head) + vfree(hdev->dump.head); + + hci_devcd_reset(hdev); +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_alloc(struct hci_dev *hdev, u32 size) +{ + hdev->dump.head = vmalloc(size); + if (!hdev->dump.head) + return -ENOMEM; + + hdev->dump.alloc_size = size; + hdev->dump.tail = hdev->dump.head; + hdev->dump.end = hdev->dump.head + size; + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_IDLE); + + return 0; +} + +/* Call with hci_dev_lock only. */ +static bool hci_devcd_copy(struct hci_dev *hdev, char *buf, u32 size) +{ + if (hdev->dump.tail + size > hdev->dump.end) + return false; + + memcpy(hdev->dump.tail, buf, size); + hdev->dump.tail += size; + + return true; +} + +/* Call with hci_dev_lock only. */ +static bool hci_devcd_memset(struct hci_dev *hdev, u8 pattern, u32 len) +{ + if (hdev->dump.tail + len > hdev->dump.end) + return false; + + memset(hdev->dump.tail, pattern, len); + hdev->dump.tail += len; + + return true; +} + +/* Call with hci_dev_lock only. */ +static int hci_devcd_prepare(struct hci_dev *hdev, u32 dump_size) +{ + struct sk_buff *skb; + int dump_hdr_size; + int err = 0; + + skb = alloc_skb(MAX_DEVCOREDUMP_HDR_SIZE, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + dump_hdr_size = hci_devcd_mkheader(hdev, skb); + + if (hci_devcd_alloc(hdev, dump_hdr_size + dump_size)) { + err = -ENOMEM; + goto hdr_free; + } + + /* Insert the device header */ + if (!hci_devcd_copy(hdev, skb->data, skb->len)) { + bt_dev_err(hdev, "Failed to insert header"); + hci_devcd_free(hdev); + + err = -ENOMEM; + goto hdr_free; + } + +hdr_free: + kfree_skb(skb); + + return err; +} + +static void hci_devcd_handle_pkt_init(struct hci_dev *hdev, struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_IDLE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (skb->len != sizeof(dump_size)) { + bt_dev_dbg(hdev, "Invalid dump init pkt"); + return; + } + + dump_size = get_unaligned_le32(skb_pull_data(skb, 4)); + if (!dump_size) { + bt_dev_err(hdev, "Zero size dump init pkt"); + return; + } + + if (hci_devcd_prepare(hdev, dump_size)) { + bt_dev_err(hdev, "Failed to prepare for dump"); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ACTIVE); + queue_delayed_work(hdev->workqueue, &hdev->dump.dump_timeout, + hdev->dump.timeout); +} + +static void hci_devcd_handle_pkt_skb(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (!hci_devcd_copy(hdev, skb->data, skb->len)) + bt_dev_dbg(hdev, "Failed to insert skb"); +} + +static void hci_devcd_handle_pkt_pattern(struct hci_dev *hdev, + struct sk_buff *skb) +{ + struct hci_devcoredump_skb_pattern *pattern; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + if (skb->len != sizeof(*pattern)) { + bt_dev_dbg(hdev, "Invalid pattern skb"); + return; + } + + pattern = skb_pull_data(skb, sizeof(*pattern)); + + if (!hci_devcd_memset(hdev, pattern->pattern, pattern->len)) + bt_dev_dbg(hdev, "Failed to set pattern"); +} + +static void hci_devcd_handle_pkt_complete(struct hci_dev *hdev, + struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_DONE); + dump_size = hdev->dump.tail - hdev->dump.head; + + bt_dev_dbg(hdev, "complete with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); +} + +static void hci_devcd_handle_pkt_abort(struct hci_dev *hdev, + struct sk_buff *skb) +{ + u32 dump_size; + + if (hdev->dump.state != HCI_DEVCOREDUMP_ACTIVE) { + DBG_UNEXPECTED_STATE(); + return; + } + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_ABORT); + dump_size = hdev->dump.tail - hdev->dump.head; + + bt_dev_dbg(hdev, "aborted with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + /* Emit a devcoredump with the available data */ + dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); +} + +/* Bluetooth devcoredump state machine. + * + * Devcoredump states: + * + * HCI_DEVCOREDUMP_IDLE: The default state. + * + * HCI_DEVCOREDUMP_ACTIVE: A devcoredump will be in this state once it has + * been initialized using hci_devcd_init(). Once active, the driver + * can append data using hci_devcd_append() or insert a pattern + * using hci_devcd_append_pattern(). + * + * HCI_DEVCOREDUMP_DONE: Once the dump collection is complete, the drive + * can signal the completion using hci_devcd_complete(). A + * devcoredump is generated indicating the completion event and + * then the state machine is reset to the default state. + * + * HCI_DEVCOREDUMP_ABORT: The driver can cancel ongoing dump collection in + * case of any error using hci_devcd_abort(). A devcoredump is + * still generated with the available data indicating the abort + * event and then the state machine is reset to the default state. + * + * HCI_DEVCOREDUMP_TIMEOUT: A timeout timer for HCI_DEVCOREDUMP_TIMEOUT sec + * is started during devcoredump initialization. Once the timeout + * occurs, the driver is notified, a devcoredump is generated with + * the available data indicating the timeout event and then the + * state machine is reset to the default state. + * + * The driver must register using hci_devcd_register() before using the hci + * devcoredump APIs. + */ +void hci_devcd_rx(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, dump.dump_rx); + struct sk_buff *skb; + int start_state; + + while ((skb = skb_dequeue(&hdev->dump.dump_q))) { + /* Return if timeout occurs. The timeout handler function + * hci_devcd_timeout() will report the available dump data. + */ + if (hdev->dump.state == HCI_DEVCOREDUMP_TIMEOUT) { + kfree_skb(skb); + return; + } + + hci_dev_lock(hdev); + start_state = hdev->dump.state; + + switch (hci_dmp_cb(skb)->pkt_type) { + case HCI_DEVCOREDUMP_PKT_INIT: + hci_devcd_handle_pkt_init(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_SKB: + hci_devcd_handle_pkt_skb(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_PATTERN: + hci_devcd_handle_pkt_pattern(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_COMPLETE: + hci_devcd_handle_pkt_complete(hdev, skb); + break; + + case HCI_DEVCOREDUMP_PKT_ABORT: + hci_devcd_handle_pkt_abort(hdev, skb); + break; + + default: + bt_dev_dbg(hdev, "Unknown packet (%d) for state (%d). ", + hci_dmp_cb(skb)->pkt_type, hdev->dump.state); + break; + } + + hci_dev_unlock(hdev); + kfree_skb(skb); + + /* Notify the driver about any state changes before resetting + * the state machine + */ + if (start_state != hdev->dump.state) + hci_devcd_notify(hdev, hdev->dump.state); + + /* Reset the state machine if the devcoredump is complete */ + hci_dev_lock(hdev); + if (hdev->dump.state == HCI_DEVCOREDUMP_DONE || + hdev->dump.state == HCI_DEVCOREDUMP_ABORT) + hci_devcd_reset(hdev); + hci_dev_unlock(hdev); + } +} +EXPORT_SYMBOL(hci_devcd_rx); + +void hci_devcd_timeout(struct work_struct *work) +{ + struct hci_dev *hdev = container_of(work, struct hci_dev, + dump.dump_timeout.work); + u32 dump_size; + + hci_devcd_notify(hdev, HCI_DEVCOREDUMP_TIMEOUT); + + hci_dev_lock(hdev); + + cancel_work(&hdev->dump.dump_rx); + + hci_devcd_update_state(hdev, HCI_DEVCOREDUMP_TIMEOUT); + + dump_size = hdev->dump.tail - hdev->dump.head; + bt_dev_dbg(hdev, "timeout with size %u (expect %zu)", dump_size, + hdev->dump.alloc_size); + + /* Emit a devcoredump with the available data */ + dev_coredumpv(&hdev->dev, hdev->dump.head, dump_size, GFP_KERNEL); + + hci_devcd_reset(hdev); + + hci_dev_unlock(hdev); +} +EXPORT_SYMBOL(hci_devcd_timeout); + +int hci_devcd_register(struct hci_dev *hdev, coredump_t coredump, + dmp_hdr_t dmp_hdr, notify_change_t notify_change) +{ + /* Driver must implement coredump() and dmp_hdr() functions for + * bluetooth devcoredump. The coredump() should trigger a coredump + * event on the controller when the device's coredump sysfs entry is + * written to. The dmp_hdr() should create a dump header to identify + * the controller/fw/driver info. + */ + if (!coredump || !dmp_hdr) + return -EINVAL; + + hci_dev_lock(hdev); + hdev->dump.coredump = coredump; + hdev->dump.dmp_hdr = dmp_hdr; + hdev->dump.notify_change = notify_change; + hdev->dump.supported = true; + hdev->dump.timeout = DEVCOREDUMP_TIMEOUT; + hci_dev_unlock(hdev); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_register); + +static inline bool hci_devcd_enabled(struct hci_dev *hdev) +{ + return hdev->dump.supported; +} + +int hci_devcd_init(struct hci_dev *hdev, u32 dump_size) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(sizeof(dump_size), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_INIT; + put_unaligned_le32(dump_size, skb_put(skb, 4)); + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_init); + +int hci_devcd_append(struct hci_dev *hdev, struct sk_buff *skb) +{ + if (!skb) + return -ENOMEM; + + if (!hci_devcd_enabled(hdev)) { + kfree_skb(skb); + return -EOPNOTSUPP; + } + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_SKB; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_append); + +int hci_devcd_append_pattern(struct hci_dev *hdev, u8 pattern, u32 len) +{ + struct hci_devcoredump_skb_pattern p; + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(sizeof(p), GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + p.pattern = pattern; + p.len = len; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_PATTERN; + skb_put_data(skb, &p, sizeof(p)); + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_append_pattern); + +int hci_devcd_complete(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_COMPLETE; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_complete); + +int hci_devcd_abort(struct hci_dev *hdev) +{ + struct sk_buff *skb; + + if (!hci_devcd_enabled(hdev)) + return -EOPNOTSUPP; + + skb = alloc_skb(0, GFP_ATOMIC); + if (!skb) + return -ENOMEM; + + hci_dmp_cb(skb)->pkt_type = HCI_DEVCOREDUMP_PKT_ABORT; + + skb_queue_tail(&hdev->dump.dump_q, skb); + queue_work(hdev->workqueue, &hdev->dump.dump_rx); + + return 0; +} +EXPORT_SYMBOL(hci_devcd_abort); diff --git a/net/bluetooth/hci_conn.c b/net/bluetooth/hci_conn.c index 8455ba141ee6..640b951bf40a 100644 --- a/net/bluetooth/hci_conn.c +++ b/net/bluetooth/hci_conn.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -329,8 +330,11 @@ static void hci_add_sco(struct hci_conn *conn, __u16 handle) static bool find_next_esco_param(struct hci_conn *conn, const struct sco_param *esco_param, int size) { + if (!conn->parent) + return false; + for (; conn->attempt <= size; conn->attempt++) { - if (lmp_esco_2m_capable(conn->link) || + if (lmp_esco_2m_capable(conn->parent) || (esco_param[conn->attempt - 1].pkt_type & ESCO_2EV3)) break; BT_DBG("hcon %p skipped attempt %d, eSCO 2M not supported", @@ -460,7 +464,7 @@ static int hci_enhanced_setup_sync(struct hci_dev *hdev, void *data) break; case BT_CODEC_CVSD: - if (lmp_esco_capable(conn->link)) { + if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) return -EINVAL; @@ -530,7 +534,7 @@ static bool hci_setup_sync_conn(struct hci_conn *conn, __u16 handle) param = &esco_param_msbc[conn->attempt - 1]; break; case SCO_AIRMODE_CVSD: - if (lmp_esco_capable(conn->link)) { + if (conn->parent && lmp_esco_capable(conn->parent)) { if (!find_next_esco_param(conn, esco_param_cvsd, ARRAY_SIZE(esco_param_cvsd))) return false; @@ -636,21 +640,22 @@ void hci_le_start_enc(struct hci_conn *conn, __le16 ediv, __le64 rand, /* Device _must_ be locked */ void hci_sco_setup(struct hci_conn *conn, __u8 status) { - struct hci_conn *sco = conn->link; + struct hci_link *link; - if (!sco) + link = list_first_entry_or_null(&conn->link_list, struct hci_link, list); + if (!link || !link->conn) return; BT_DBG("hcon %p", conn); if (!status) { if (lmp_esco_capable(conn->hdev)) - hci_setup_sync(sco, conn->handle); + hci_setup_sync(link->conn, conn->handle); else - hci_add_sco(sco, conn->handle); + hci_add_sco(link->conn, conn->handle); } else { - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } @@ -795,8 +800,8 @@ static void bis_list(struct hci_conn *conn, void *data) if (bacmp(&conn->dst, BDADDR_ANY)) return; - if (d->big != conn->iso_qos.big || d->bis == BT_ISO_QOS_BIS_UNSET || - d->bis != conn->iso_qos.bis) + if (d->big != conn->iso_qos.bcast.big || d->bis == BT_ISO_QOS_BIS_UNSET || + d->bis != conn->iso_qos.bcast.bis) return; d->count++; @@ -916,10 +921,10 @@ static void bis_cleanup(struct hci_conn *conn) if (!test_and_clear_bit(HCI_CONN_PER_ADV, &conn->flags)) return; - hci_le_terminate_big(hdev, conn->iso_qos.big, - conn->iso_qos.bis); + hci_le_terminate_big(hdev, conn->iso_qos.bcast.big, + conn->iso_qos.bcast.bis); } else { - hci_le_big_terminate(hdev, conn->iso_qos.big, + hci_le_big_terminate(hdev, conn->iso_qos.bcast.big, conn->sync_handle); } } @@ -959,7 +964,7 @@ static void cis_cleanup(struct hci_conn *conn) struct iso_list_data d; memset(&d, 0, sizeof(d)); - d.cig = conn->iso_qos.cig; + d.cig = conn->iso_qos.ucast.cig; /* Check if ISO connection is a CIS and remove CIG if there are * no other connections using it. @@ -968,7 +973,7 @@ static void cis_cleanup(struct hci_conn *conn) if (d.count) return; - hci_le_remove_cig(hdev, conn->iso_qos.cig); + hci_le_remove_cig(hdev, conn->iso_qos.ucast.cig); } struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, @@ -1041,6 +1046,7 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, skb_queue_head_init(&conn->data_q); INIT_LIST_HEAD(&conn->chan_list); + INIT_LIST_HEAD(&conn->link_list); INIT_DELAYED_WORK(&conn->disc_work, hci_conn_timeout); INIT_DELAYED_WORK(&conn->auto_accept_work, hci_conn_auto_accept); @@ -1068,15 +1074,39 @@ struct hci_conn *hci_conn_add(struct hci_dev *hdev, int type, bdaddr_t *dst, return conn; } -static bool hci_conn_unlink(struct hci_conn *conn) +static void hci_conn_unlink(struct hci_conn *conn) { + struct hci_dev *hdev = conn->hdev; + + bt_dev_dbg(hdev, "hcon %p", conn); + + if (!conn->parent) { + struct hci_link *link, *t; + + list_for_each_entry_safe(link, t, &conn->link_list, list) + hci_conn_unlink(link->conn); + + return; + } + if (!conn->link) - return false; + return; + + hci_conn_put(conn->parent); + conn->parent = NULL; + + list_del_rcu(&conn->link->list); + synchronize_rcu(); - conn->link->link = NULL; + kfree(conn->link); conn->link = NULL; - return true; + /* Due to race, SCO connection might be not established + * yet at this point. Delete it now, otherwise it is + * possible for it to be stuck and can't be deleted. + */ + if (conn->handle == HCI_CONN_HANDLE_UNSET) + hci_conn_del(conn); } int hci_conn_del(struct hci_conn *conn) @@ -1090,18 +1120,7 @@ int hci_conn_del(struct hci_conn *conn) cancel_delayed_work_sync(&conn->idle_work); if (conn->type == ACL_LINK) { - struct hci_conn *link = conn->link; - - if (link) { - hci_conn_unlink(conn); - /* Due to race, SCO connection might be not established - * yet at this point. Delete it now, otherwise it is - * possible for it to be stuck and can't be deleted. - */ - if (link->handle == HCI_CONN_HANDLE_UNSET) - hci_conn_del(link); - } - + hci_conn_unlink(conn); /* Unacked frames */ hdev->acl_cnt += conn->sent; } else if (conn->type == LE_LINK) { @@ -1112,7 +1131,7 @@ int hci_conn_del(struct hci_conn *conn) else hdev->acl_cnt += conn->sent; } else { - struct hci_conn *acl = conn->link; + struct hci_conn *acl = conn->parent; if (acl) { hci_conn_unlink(conn); @@ -1411,7 +1430,7 @@ static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) struct iso_list_data data; /* Allocate a BIG if not set */ - if (qos->big == BT_ISO_QOS_BIG_UNSET) { + if (qos->bcast.big == BT_ISO_QOS_BIG_UNSET) { for (data.big = 0x00; data.big < 0xef; data.big++) { data.count = 0; data.bis = 0xff; @@ -1426,7 +1445,7 @@ static int qos_set_big(struct hci_dev *hdev, struct bt_iso_qos *qos) return -EADDRNOTAVAIL; /* Update BIG */ - qos->big = data.big; + qos->bcast.big = data.big; } return 0; @@ -1437,7 +1456,7 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) struct iso_list_data data; /* Allocate BIS if not set */ - if (qos->bis == BT_ISO_QOS_BIS_UNSET) { + if (qos->bcast.bis == BT_ISO_QOS_BIS_UNSET) { /* Find an unused adv set to advertise BIS, skip instance 0x00 * since it is reserved as general purpose set. */ @@ -1455,7 +1474,7 @@ static int qos_set_bis(struct hci_dev *hdev, struct bt_iso_qos *qos) return -EADDRNOTAVAIL; /* Update BIS */ - qos->bis = data.bis; + qos->bcast.bis = data.bis; } return 0; @@ -1484,8 +1503,8 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, if (err) return ERR_PTR(err); - data.big = qos->big; - data.bis = qos->bis; + data.big = qos->bcast.big; + data.bis = qos->bcast.bis; data.count = 0; /* Check if there is already a matching BIG/BIS */ @@ -1493,7 +1512,7 @@ static struct hci_conn *hci_add_bis(struct hci_dev *hdev, bdaddr_t *dst, if (data.count) return ERR_PTR(-EADDRINUSE); - conn = hci_conn_hash_lookup_bis(hdev, dst, qos->big, qos->bis); + conn = hci_conn_hash_lookup_bis(hdev, dst, qos->bcast.big, qos->bcast.bis); if (conn) return ERR_PTR(-EADDRINUSE); @@ -1599,11 +1618,40 @@ struct hci_conn *hci_connect_acl(struct hci_dev *hdev, bdaddr_t *dst, return acl; } +static struct hci_link *hci_conn_link(struct hci_conn *parent, + struct hci_conn *conn) +{ + struct hci_dev *hdev = parent->hdev; + struct hci_link *link; + + bt_dev_dbg(hdev, "parent %p hcon %p", parent, conn); + + if (conn->link) + return conn->link; + + if (conn->parent) + return NULL; + + link = kzalloc(sizeof(*link), GFP_KERNEL); + if (!link) + return NULL; + + link->conn = hci_conn_hold(conn); + conn->link = link; + conn->parent = hci_conn_get(parent); + + /* Use list_add_tail_rcu append to the list */ + list_add_tail_rcu(&link->list, &parent->link_list); + + return link; +} + struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, __u16 setting, struct bt_codec *codec) { struct hci_conn *acl; struct hci_conn *sco; + struct hci_link *link; acl = hci_connect_acl(hdev, dst, BT_SECURITY_LOW, HCI_AT_NO_BONDING, CONN_REASON_SCO_CONNECT); @@ -1619,10 +1667,12 @@ struct hci_conn *hci_connect_sco(struct hci_dev *hdev, int type, bdaddr_t *dst, } } - acl->link = sco; - sco->link = acl; - - hci_conn_hold(sco); + link = hci_conn_link(acl, sco); + if (!link) { + hci_conn_drop(acl); + hci_conn_drop(sco); + return NULL; + } sco->setting = setting; sco->codec = *codec; @@ -1648,13 +1698,13 @@ static void cis_add(struct iso_list_data *d, struct bt_iso_qos *qos) { struct hci_cis_params *cis = &d->pdu.cis[d->pdu.cp.num_cis]; - cis->cis_id = qos->cis; - cis->c_sdu = cpu_to_le16(qos->out.sdu); - cis->p_sdu = cpu_to_le16(qos->in.sdu); - cis->c_phy = qos->out.phy ? qos->out.phy : qos->in.phy; - cis->p_phy = qos->in.phy ? qos->in.phy : qos->out.phy; - cis->c_rtn = qos->out.rtn; - cis->p_rtn = qos->in.rtn; + cis->cis_id = qos->ucast.cis; + cis->c_sdu = cpu_to_le16(qos->ucast.out.sdu); + cis->p_sdu = cpu_to_le16(qos->ucast.in.sdu); + cis->c_phy = qos->ucast.out.phy ? qos->ucast.out.phy : qos->ucast.in.phy; + cis->p_phy = qos->ucast.in.phy ? qos->ucast.in.phy : qos->ucast.out.phy; + cis->c_rtn = qos->ucast.out.rtn; + cis->p_rtn = qos->ucast.in.rtn; d->pdu.cp.num_cis++; } @@ -1667,8 +1717,8 @@ static void cis_list(struct hci_conn *conn, void *data) if (!bacmp(&conn->dst, BDADDR_ANY)) return; - if (d->cig != conn->iso_qos.cig || d->cis == BT_ISO_QOS_CIS_UNSET || - d->cis != conn->iso_qos.cis) + if (d->cig != conn->iso_qos.ucast.cig || d->cis == BT_ISO_QOS_CIS_UNSET || + d->cis != conn->iso_qos.ucast.cis) return; d->count++; @@ -1687,18 +1737,18 @@ static int hci_le_create_big(struct hci_conn *conn, struct bt_iso_qos *qos) memset(&cp, 0, sizeof(cp)); - cp.handle = qos->big; - cp.adv_handle = qos->bis; + cp.handle = qos->bcast.big; + cp.adv_handle = qos->bcast.bis; cp.num_bis = 0x01; - hci_cpu_to_le24(qos->out.interval, cp.bis.sdu_interval); - cp.bis.sdu = cpu_to_le16(qos->out.sdu); - cp.bis.latency = cpu_to_le16(qos->out.latency); - cp.bis.rtn = qos->out.rtn; - cp.bis.phy = qos->out.phy; - cp.bis.packing = qos->packing; - cp.bis.framing = qos->framing; - cp.bis.encryption = 0x00; - memset(&cp.bis.bcode, 0, sizeof(cp.bis.bcode)); + hci_cpu_to_le24(qos->bcast.out.interval, cp.bis.sdu_interval); + cp.bis.sdu = cpu_to_le16(qos->bcast.out.sdu); + cp.bis.latency = cpu_to_le16(qos->bcast.out.latency); + cp.bis.rtn = qos->bcast.out.rtn; + cp.bis.phy = qos->bcast.out.phy; + cp.bis.packing = qos->bcast.packing; + cp.bis.framing = qos->bcast.framing; + cp.bis.encryption = qos->bcast.encryption; + memcpy(cp.bis.bcode, qos->bcast.bcode, sizeof(cp.bis.bcode)); return hci_send_cmd(hdev, HCI_OP_LE_CREATE_BIG, sizeof(cp), &cp); } @@ -1711,7 +1761,7 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) memset(&data, 0, sizeof(data)); /* Allocate a CIG if not set */ - if (qos->cig == BT_ISO_QOS_CIG_UNSET) { + if (qos->ucast.cig == BT_ISO_QOS_CIG_UNSET) { for (data.cig = 0x00; data.cig < 0xff; data.cig++) { data.count = 0; data.cis = 0xff; @@ -1731,22 +1781,22 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) return false; /* Update CIG */ - qos->cig = data.cig; + qos->ucast.cig = data.cig; } - data.pdu.cp.cig_id = qos->cig; - hci_cpu_to_le24(qos->out.interval, data.pdu.cp.c_interval); - hci_cpu_to_le24(qos->in.interval, data.pdu.cp.p_interval); - data.pdu.cp.sca = qos->sca; - data.pdu.cp.packing = qos->packing; - data.pdu.cp.framing = qos->framing; - data.pdu.cp.c_latency = cpu_to_le16(qos->out.latency); - data.pdu.cp.p_latency = cpu_to_le16(qos->in.latency); + data.pdu.cp.cig_id = qos->ucast.cig; + hci_cpu_to_le24(qos->ucast.out.interval, data.pdu.cp.c_interval); + hci_cpu_to_le24(qos->ucast.in.interval, data.pdu.cp.p_interval); + data.pdu.cp.sca = qos->ucast.sca; + data.pdu.cp.packing = qos->ucast.packing; + data.pdu.cp.framing = qos->ucast.framing; + data.pdu.cp.c_latency = cpu_to_le16(qos->ucast.out.latency); + data.pdu.cp.p_latency = cpu_to_le16(qos->ucast.in.latency); - if (qos->cis != BT_ISO_QOS_CIS_UNSET) { + if (qos->ucast.cis != BT_ISO_QOS_CIS_UNSET) { data.count = 0; - data.cig = qos->cig; - data.cis = qos->cis; + data.cig = qos->ucast.cig; + data.cis = qos->ucast.cis; hci_conn_hash_list_state(hdev, cis_list, ISO_LINK, BT_BOUND, &data); @@ -1757,7 +1807,7 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) } /* Reprogram all CIS(s) with the same CIG */ - for (data.cig = qos->cig, data.cis = 0x00; data.cis < 0x11; + for (data.cig = qos->ucast.cig, data.cis = 0x00; data.cis < 0x11; data.cis++) { data.count = 0; @@ -1767,14 +1817,14 @@ static bool hci_le_set_cig_params(struct hci_conn *conn, struct bt_iso_qos *qos) continue; /* Allocate a CIS if not set */ - if (qos->cis == BT_ISO_QOS_CIS_UNSET) { + if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET) { /* Update CIS */ - qos->cis = data.cis; + qos->ucast.cis = data.cis; cis_add(&data, qos); } } - if (qos->cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) + if (qos->ucast.cis == BT_ISO_QOS_CIS_UNSET || !data.pdu.cp.num_cis) return false; if (hci_send_cmd(hdev, HCI_OP_LE_SET_CIG_PARAMS, @@ -1791,7 +1841,8 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *cis; - cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type); + cis = hci_conn_hash_lookup_cis(hdev, dst, dst_type, qos->ucast.cig, + qos->ucast.cis); if (!cis) { cis = hci_conn_add(hdev, ISO_LINK, dst, HCI_ROLE_MASTER); if (!cis) @@ -1809,32 +1860,32 @@ struct hci_conn *hci_bind_cis(struct hci_dev *hdev, bdaddr_t *dst, return cis; /* Update LINK PHYs according to QoS preference */ - cis->le_tx_phy = qos->out.phy; - cis->le_rx_phy = qos->in.phy; + cis->le_tx_phy = qos->ucast.out.phy; + cis->le_rx_phy = qos->ucast.in.phy; /* If output interval is not set use the input interval as it cannot be * 0x000000. */ - if (!qos->out.interval) - qos->out.interval = qos->in.interval; + if (!qos->ucast.out.interval) + qos->ucast.out.interval = qos->ucast.in.interval; /* If input interval is not set use the output interval as it cannot be * 0x000000. */ - if (!qos->in.interval) - qos->in.interval = qos->out.interval; + if (!qos->ucast.in.interval) + qos->ucast.in.interval = qos->ucast.out.interval; /* If output latency is not set use the input latency as it cannot be * 0x0000. */ - if (!qos->out.latency) - qos->out.latency = qos->in.latency; + if (!qos->ucast.out.latency) + qos->ucast.out.latency = qos->ucast.in.latency; /* If input latency is not set use the output latency as it cannot be * 0x0000. */ - if (!qos->in.latency) - qos->in.latency = qos->out.latency; + if (!qos->ucast.in.latency) + qos->ucast.in.latency = qos->ucast.out.latency; if (!hci_le_set_cig_params(cis, qos)) { hci_conn_drop(cis); @@ -1854,7 +1905,7 @@ bool hci_iso_setup_path(struct hci_conn *conn) memset(&cmd, 0, sizeof(cmd)); - if (conn->iso_qos.out.sdu) { + if (conn->iso_qos.ucast.out.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x00; /* Input (Host to Controller) */ cmd.path = 0x00; /* HCI path if enabled */ @@ -1865,7 +1916,7 @@ bool hci_iso_setup_path(struct hci_conn *conn) return false; } - if (conn->iso_qos.in.sdu) { + if (conn->iso_qos.ucast.in.sdu) { cmd.handle = cpu_to_le16(conn->handle); cmd.direction = 0x01; /* Output (Controller to Host) */ cmd.path = 0x00; /* HCI path if enabled */ @@ -1881,76 +1932,39 @@ bool hci_iso_setup_path(struct hci_conn *conn) static int hci_create_cis_sync(struct hci_dev *hdev, void *data) { - struct { - struct hci_cp_le_create_cis cp; - struct hci_cis cis[0x1f]; - } cmd; - struct hci_conn *conn = data; - u8 cig; - - memset(&cmd, 0, sizeof(cmd)); - cmd.cis[0].acl_handle = cpu_to_le16(conn->link->handle); - cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); - cmd.cp.num_cis++; - cig = conn->iso_qos.cig; - - hci_dev_lock(hdev); - - rcu_read_lock(); - - list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; - - if (conn == data || conn->type != ISO_LINK || - conn->state == BT_CONNECTED || conn->iso_qos.cig != cig) - continue; - - /* Check if all CIS(s) belonging to a CIG are ready */ - if (!conn->link || conn->link->state != BT_CONNECTED || - conn->state != BT_CONNECT) { - cmd.cp.num_cis = 0; - break; - } - - /* Group all CIS with state BT_CONNECT since the spec don't - * allow to send them individually: - * - * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E - * page 2566: - * - * If the Host issues this command before all the - * HCI_LE_CIS_Established events from the previous use of the - * command have been generated, the Controller shall return the - * error code Command Disallowed (0x0C). - */ - cis->acl_handle = cpu_to_le16(conn->link->handle); - cis->cis_handle = cpu_to_le16(conn->handle); - cmd.cp.num_cis++; - } - - rcu_read_unlock(); - - hci_dev_unlock(hdev); - - if (!cmd.cp.num_cis) - return 0; - - return hci_send_cmd(hdev, HCI_OP_LE_CREATE_CIS, sizeof(cmd.cp) + - sizeof(cmd.cis[0]) * cmd.cp.num_cis, &cmd); + return hci_le_create_cis_sync(hdev, data); } int hci_le_create_cis(struct hci_conn *conn) { struct hci_conn *cis; + struct hci_link *link, *t; struct hci_dev *hdev = conn->hdev; int err; + bt_dev_dbg(hdev, "hcon %p", conn); + switch (conn->type) { case LE_LINK: - if (!conn->link || conn->state != BT_CONNECTED) + if (conn->state != BT_CONNECTED || list_empty(&conn->link_list)) return -EINVAL; - cis = conn->link; - break; + + cis = NULL; + + /* hci_conn_link uses list_add_tail_rcu so the list is in + * the same order as the connections are requested. + */ + list_for_each_entry_safe(link, t, &conn->link_list, list) { + if (link->conn->state == BT_BOUND) { + err = hci_le_create_cis(link->conn); + if (err) + return err; + + cis = link->conn; + } + } + + return cis ? 0 : -EINVAL; case ISO_LINK: cis = conn; break; @@ -2002,8 +2016,8 @@ static void hci_bind_bis(struct hci_conn *conn, struct bt_iso_qos *qos) { /* Update LINK PHYs according to QoS preference */ - conn->le_tx_phy = qos->out.phy; - conn->le_tx_phy = qos->out.phy; + conn->le_tx_phy = qos->bcast.out.phy; + conn->le_tx_phy = qos->bcast.out.phy; conn->iso_qos = *qos; conn->state = BT_BOUND; } @@ -2016,16 +2030,16 @@ static int create_big_sync(struct hci_dev *hdev, void *data) u32 flags = 0; int err; - if (qos->out.phy == 0x02) + if (qos->bcast.out.phy == 0x02) flags |= MGMT_ADV_FLAG_SEC_2M; /* Align intervals */ - interval = qos->out.interval / 1250; + interval = qos->bcast.out.interval / 1250; - if (qos->bis) - sync_interval = qos->sync_interval * 1600; + if (qos->bcast.bis) + sync_interval = qos->bcast.sync_interval * 1600; - err = hci_start_per_adv_sync(hdev, qos->bis, conn->le_per_adv_data_len, + err = hci_start_per_adv_sync(hdev, qos->bcast.bis, conn->le_per_adv_data_len, conn->le_per_adv_data, flags, interval, interval, sync_interval); if (err) @@ -2062,7 +2076,7 @@ static int create_pa_sync(struct hci_dev *hdev, void *data) } int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, - __u8 sid) + __u8 sid, struct bt_iso_qos *qos) { struct hci_cp_le_pa_create_sync *cp; @@ -2075,9 +2089,13 @@ int hci_pa_create_sync(struct hci_dev *hdev, bdaddr_t *dst, __u8 dst_type, return -ENOMEM; } + cp->options = qos->bcast.options; cp->sid = sid; cp->addr_type = dst_type; bacpy(&cp->addr, dst); + cp->skip = cpu_to_le16(qos->bcast.skip); + cp->sync_timeout = cpu_to_le16(qos->bcast.sync_timeout); + cp->sync_cte_type = qos->bcast.sync_cte_type; /* Queue start pa_create_sync and scan */ return hci_cmd_sync_queue(hdev, create_pa_sync, cp, create_pa_complete); @@ -2100,8 +2118,12 @@ int hci_le_big_create_sync(struct hci_dev *hdev, struct bt_iso_qos *qos, return err; memset(&pdu, 0, sizeof(pdu)); - pdu.cp.handle = qos->big; + pdu.cp.handle = qos->bcast.big; pdu.cp.sync_handle = cpu_to_le16(sync_handle); + pdu.cp.encryption = qos->bcast.encryption; + memcpy(pdu.cp.bcode, qos->bcast.bcode, sizeof(pdu.cp.bcode)); + pdu.cp.mse = qos->bcast.mse; + pdu.cp.timeout = cpu_to_le16(qos->bcast.timeout); pdu.cp.num_bis = num_bis; memcpy(pdu.bis, bis, num_bis); @@ -2151,7 +2173,7 @@ struct hci_conn *hci_connect_bis(struct hci_dev *hdev, bdaddr_t *dst, return ERR_PTR(err); } - hci_iso_qos_setup(hdev, conn, &qos->out, + hci_iso_qos_setup(hdev, conn, &qos->bcast.out, conn->le_tx_phy ? conn->le_tx_phy : hdev->le_tx_def_phys); @@ -2163,6 +2185,7 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, { struct hci_conn *le; struct hci_conn *cis; + struct hci_link *link; if (hci_dev_test_flag(hdev, HCI_ADVERTISING)) le = hci_connect_le(hdev, dst, dst_type, false, @@ -2177,9 +2200,9 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, if (IS_ERR(le)) return le; - hci_iso_qos_setup(hdev, le, &qos->out, + hci_iso_qos_setup(hdev, le, &qos->ucast.out, le->le_tx_phy ? le->le_tx_phy : hdev->le_tx_def_phys); - hci_iso_qos_setup(hdev, le, &qos->in, + hci_iso_qos_setup(hdev, le, &qos->ucast.in, le->le_rx_phy ? le->le_rx_phy : hdev->le_rx_def_phys); cis = hci_bind_cis(hdev, dst, dst_type, qos); @@ -2188,16 +2211,18 @@ struct hci_conn *hci_connect_cis(struct hci_dev *hdev, bdaddr_t *dst, return cis; } - le->link = cis; - cis->link = le; - - hci_conn_hold(cis); + link = hci_conn_link(le, cis); + if (!link) { + hci_conn_drop(le); + hci_conn_drop(cis); + return NULL; + } /* If LE is already connected and CIS handle is already set proceed to * Create CIS immediately. */ if (le->state == BT_CONNECTED && cis->handle != HCI_CONN_HANDLE_UNSET) - hci_le_create_cis(le); + hci_le_create_cis(cis); return cis; } diff --git a/net/bluetooth/hci_core.c b/net/bluetooth/hci_core.c index 334e308451f5..a856b1051d35 100644 --- a/net/bluetooth/hci_core.c +++ b/net/bluetooth/hci_core.c @@ -2544,6 +2544,7 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv) INIT_DELAYED_WORK(&hdev->cmd_timer, hci_cmd_timeout); INIT_DELAYED_WORK(&hdev->ncmd_timer, hci_ncmd_timeout); + hci_devcd_setup(hdev); hci_request_setup(hdev); hci_init_sysfs(hdev); @@ -2802,6 +2803,9 @@ int hci_suspend_dev(struct hci_dev *hdev) if (mgmt_powering_down(hdev)) return 0; + /* Cancel potentially blocking sync operation before suspend */ + __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); + hci_req_sync_lock(hdev); ret = hci_suspend_sync(hdev); hci_req_sync_unlock(hdev); diff --git a/net/bluetooth/hci_debugfs.c b/net/bluetooth/hci_debugfs.c index b7f682922a16..ec0df2f9188e 100644 --- a/net/bluetooth/hci_debugfs.c +++ b/net/bluetooth/hci_debugfs.c @@ -189,7 +189,7 @@ static int uuids_show(struct seq_file *f, void *p) } hci_dev_unlock(hdev); - return 0; + return 0; } DEFINE_SHOW_ATTRIBUTE(uuids); diff --git a/net/bluetooth/hci_event.c b/net/bluetooth/hci_event.c index e87c928c9e17..d00ef6e3fc45 100644 --- a/net/bluetooth/hci_event.c +++ b/net/bluetooth/hci_event.c @@ -1,6 +1,7 @@ /* BlueZ - Bluetooth protocol stack for Linux Copyright (c) 2000-2001, 2010, Code Aurora Forum. All rights reserved. + Copyright 2023 NXP Written 2000,2001 by Maxim Krasnyansky <maxk@qualcomm.com> @@ -886,8 +887,13 @@ static u8 hci_cc_read_local_ext_features(struct hci_dev *hdev, void *data, if (rp->status) return rp->status; - if (hdev->max_page < rp->max_page) - hdev->max_page = rp->max_page; + if (hdev->max_page < rp->max_page) { + if (test_bit(HCI_QUIRK_BROKEN_LOCAL_EXT_FEATURES_PAGE_2, + &hdev->quirks)) + bt_dev_warn(hdev, "broken local ext features page 2"); + else + hdev->max_page = rp->max_page; + } if (rp->page < HCI_MAX_PAGES) memcpy(hdev->features[rp->page], rp->features, 8); @@ -2339,7 +2345,8 @@ static void hci_cs_create_conn(struct hci_dev *hdev, __u8 status) static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) { struct hci_cp_add_sco *cp; - struct hci_conn *acl, *sco; + struct hci_conn *acl; + struct hci_link *link; __u16 handle; bt_dev_dbg(hdev, "status 0x%2.2x", status); @@ -2359,12 +2366,13 @@ static void hci_cs_add_sco(struct hci_dev *hdev, __u8 status) acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + link = list_first_entry_or_null(&acl->link_list, + struct hci_link, list); + if (link && link->conn) { + link->conn->state = BT_CLOSED; - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } @@ -2631,74 +2639,61 @@ static void hci_cs_read_remote_ext_features(struct hci_dev *hdev, __u8 status) hci_dev_unlock(hdev); } -static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) +static void hci_setup_sync_conn_status(struct hci_dev *hdev, __u16 handle, + __u8 status) { - struct hci_cp_setup_sync_conn *cp; - struct hci_conn *acl, *sco; - __u16 handle; + struct hci_conn *acl; + struct hci_link *link; - bt_dev_dbg(hdev, "status 0x%2.2x", status); - - if (!status) - return; - - cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); - if (!cp) - return; - - handle = __le16_to_cpu(cp->handle); - - bt_dev_dbg(hdev, "handle 0x%4.4x", handle); + bt_dev_dbg(hdev, "handle 0x%4.4x status 0x%2.2x", handle, status); hci_dev_lock(hdev); acl = hci_conn_hash_lookup_handle(hdev, handle); if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + link = list_first_entry_or_null(&acl->link_list, + struct hci_link, list); + if (link && link->conn) { + link->conn->state = BT_CLOSED; - hci_connect_cfm(sco, status); - hci_conn_del(sco); + hci_connect_cfm(link->conn, status); + hci_conn_del(link->conn); } } hci_dev_unlock(hdev); } -static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +static void hci_cs_setup_sync_conn(struct hci_dev *hdev, __u8 status) { - struct hci_cp_enhanced_setup_sync_conn *cp; - struct hci_conn *acl, *sco; - __u16 handle; + struct hci_cp_setup_sync_conn *cp; bt_dev_dbg(hdev, "status 0x%2.2x", status); if (!status) return; - cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + cp = hci_sent_cmd_data(hdev, HCI_OP_SETUP_SYNC_CONN); if (!cp) return; - handle = __le16_to_cpu(cp->handle); + hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); +} - bt_dev_dbg(hdev, "handle 0x%4.4x", handle); +static void hci_cs_enhanced_setup_sync_conn(struct hci_dev *hdev, __u8 status) +{ + struct hci_cp_enhanced_setup_sync_conn *cp; - hci_dev_lock(hdev); + bt_dev_dbg(hdev, "status 0x%2.2x", status); - acl = hci_conn_hash_lookup_handle(hdev, handle); - if (acl) { - sco = acl->link; - if (sco) { - sco->state = BT_CLOSED; + if (!status) + return; - hci_connect_cfm(sco, status); - hci_conn_del(sco); - } - } + cp = hci_sent_cmd_data(hdev, HCI_OP_ENHANCED_SETUP_SYNC_CONN); + if (!cp) + return; - hci_dev_unlock(hdev); + hci_setup_sync_conn_status(hdev, __le16_to_cpu(cp->handle), status); } static void hci_cs_sniff_mode(struct hci_dev *hdev, __u8 status) @@ -3828,19 +3823,20 @@ static u8 hci_cc_le_set_cig_params(struct hci_dev *hdev, void *data, rcu_read_lock(); list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { - if (conn->type != ISO_LINK || conn->iso_qos.cig != rp->cig_id || + if (conn->type != ISO_LINK || + conn->iso_qos.ucast.cig != rp->cig_id || conn->state == BT_CONNECTED) continue; conn->handle = __le16_to_cpu(rp->handle[i++]); - bt_dev_dbg(hdev, "%p handle 0x%4.4x link %p", conn, - conn->handle, conn->link); + bt_dev_dbg(hdev, "%p handle 0x%4.4x parent %p", conn, + conn->handle, conn->parent); /* Create CIS if LE is already connected */ - if (conn->link && conn->link->state == BT_CONNECTED) { + if (conn->parent && conn->parent->state == BT_CONNECTED) { rcu_read_unlock(); - hci_le_create_cis(conn->link); + hci_le_create_cis(conn); rcu_read_lock(); } @@ -3885,7 +3881,7 @@ static u8 hci_cc_le_setup_iso_path(struct hci_dev *hdev, void *data, /* Input (Host to Controller) */ case 0x00: /* Only confirm connection if output only */ - if (conn->iso_qos.out.sdu && !conn->iso_qos.in.sdu) + if (conn->iso_qos.ucast.out.sdu && !conn->iso_qos.ucast.in.sdu) hci_connect_cfm(conn, rp->status); break; /* Output (Controller to Host) */ @@ -5025,7 +5021,7 @@ static void hci_sync_conn_complete_evt(struct hci_dev *hdev, void *data, if (conn->out) { conn->pkt_type = (hdev->esco_type & SCO_ESCO_MASK) | (hdev->esco_type & EDR_ESCO_MASK); - if (hci_setup_sync(conn, conn->link->handle)) + if (hci_setup_sync(conn, conn->parent->handle)) goto unlock; } fallthrough; @@ -6813,15 +6809,15 @@ static void hci_le_cis_estabilished_evt(struct hci_dev *hdev, void *data, memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->c_latency, sizeof(ev->c_latency)); - conn->iso_qos.in.interval = le32_to_cpu(interval); + conn->iso_qos.ucast.in.interval = le32_to_cpu(interval); memcpy(&interval, ev->p_latency, sizeof(ev->p_latency)); - conn->iso_qos.out.interval = le32_to_cpu(interval); - conn->iso_qos.in.latency = le16_to_cpu(ev->interval); - conn->iso_qos.out.latency = le16_to_cpu(ev->interval); - conn->iso_qos.in.sdu = le16_to_cpu(ev->c_mtu); - conn->iso_qos.out.sdu = le16_to_cpu(ev->p_mtu); - conn->iso_qos.in.phy = ev->c_phy; - conn->iso_qos.out.phy = ev->p_phy; + conn->iso_qos.ucast.out.interval = le32_to_cpu(interval); + conn->iso_qos.ucast.in.latency = le16_to_cpu(ev->interval); + conn->iso_qos.ucast.out.latency = le16_to_cpu(ev->interval); + conn->iso_qos.ucast.in.sdu = le16_to_cpu(ev->c_mtu); + conn->iso_qos.ucast.out.sdu = le16_to_cpu(ev->p_mtu); + conn->iso_qos.ucast.in.phy = ev->c_phy; + conn->iso_qos.ucast.out.phy = ev->p_phy; } if (!ev->status) { @@ -6895,8 +6891,8 @@ static void hci_le_cis_req_evt(struct hci_dev *hdev, void *data, cis->handle = cis_handle; } - cis->iso_qos.cig = ev->cig_id; - cis->iso_qos.cis = ev->cis_id; + cis->iso_qos.ucast.cig = ev->cig_id; + cis->iso_qos.ucast.cis = ev->cis_id; if (!(flags & HCI_PROTO_DEFER)) { hci_le_accept_cis(hdev, ev->cis_handle); @@ -6983,13 +6979,13 @@ static void hci_le_big_sync_established_evt(struct hci_dev *hdev, void *data, bis->handle = handle; } - bis->iso_qos.big = ev->handle; + bis->iso_qos.bcast.big = ev->handle; memset(&interval, 0, sizeof(interval)); memcpy(&interval, ev->latency, sizeof(ev->latency)); - bis->iso_qos.in.interval = le32_to_cpu(interval); + bis->iso_qos.bcast.in.interval = le32_to_cpu(interval); /* Convert ISO Interval (1.25 ms slots) to latency (ms) */ - bis->iso_qos.in.latency = le16_to_cpu(ev->interval) * 125 / 100; - bis->iso_qos.in.sdu = le16_to_cpu(ev->max_pdu); + bis->iso_qos.bcast.in.latency = le16_to_cpu(ev->interval) * 125 / 100; + bis->iso_qos.bcast.in.sdu = le16_to_cpu(ev->max_pdu); hci_iso_setup_path(bis); } diff --git a/net/bluetooth/hci_sock.c b/net/bluetooth/hci_sock.c index 06581223238c..1d249d839819 100644 --- a/net/bluetooth/hci_sock.c +++ b/net/bluetooth/hci_sock.c @@ -987,6 +987,34 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, BT_DBG("cmd %x arg %lx", cmd, arg); + /* Make sure the cmd is valid before doing anything */ + switch (cmd) { + case HCIGETDEVLIST: + case HCIGETDEVINFO: + case HCIGETCONNLIST: + case HCIDEVUP: + case HCIDEVDOWN: + case HCIDEVRESET: + case HCIDEVRESTAT: + case HCISETSCAN: + case HCISETAUTH: + case HCISETENCRYPT: + case HCISETPTYPE: + case HCISETLINKPOL: + case HCISETLINKMODE: + case HCISETACLMTU: + case HCISETSCOMTU: + case HCIINQUIRY: + case HCISETRAW: + case HCIGETCONNINFO: + case HCIGETAUTHINFO: + case HCIBLOCKADDR: + case HCIUNBLOCKADDR: + break; + default: + return -ENOIOCTLCMD; + } + lock_sock(sk); if (hci_pi(sk)->channel != HCI_CHANNEL_RAW) { @@ -1003,7 +1031,14 @@ static int hci_sock_ioctl(struct socket *sock, unsigned int cmd, if (hci_sock_gen_cookie(sk)) { struct sk_buff *skb; - if (capable(CAP_NET_ADMIN)) + /* Perform careful checks before setting the HCI_SOCK_TRUSTED + * flag. Make sure that not only the current task but also + * the socket opener has the required capability, since + * privileged programs can be tricked into making ioctl calls + * on HCI sockets, and the socket should not be marked as + * trusted simply because the ioctl caller is privileged. + */ + if (sk_capable(sk, CAP_NET_ADMIN)) hci_sock_set_flag(sk, HCI_SOCK_TRUSTED); /* Send event to monitor */ diff --git a/net/bluetooth/hci_sync.c b/net/bluetooth/hci_sync.c index 632be1267288..647a8ce54062 100644 --- a/net/bluetooth/hci_sync.c +++ b/net/bluetooth/hci_sync.c @@ -684,8 +684,12 @@ void hci_cmd_sync_cancel(struct hci_dev *hdev, int err) } EXPORT_SYMBOL(hci_cmd_sync_cancel); -int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, - void *data, hci_cmd_sync_work_destroy_t destroy) +/* Submit HCI command to be run in as cmd_sync_work: + * + * - hdev must _not_ be unregistered + */ +int hci_cmd_sync_submit(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) { struct hci_cmd_sync_work_entry *entry; @@ -708,6 +712,23 @@ int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, return 0; } +EXPORT_SYMBOL(hci_cmd_sync_submit); + +/* Queue HCI command: + * + * - hdev must be running + */ +int hci_cmd_sync_queue(struct hci_dev *hdev, hci_cmd_sync_work_func_t func, + void *data, hci_cmd_sync_work_destroy_t destroy) +{ + /* Only queue command if hdev is running which means it had been opened + * and is either on init phase or is already up. + */ + if (!test_bit(HCI_RUNNING, &hdev->flags)) + return -ENETDOWN; + + return hci_cmd_sync_submit(hdev, func, data, destroy); +} EXPORT_SYMBOL(hci_cmd_sync_queue); int hci_update_eir_sync(struct hci_dev *hdev) @@ -2403,7 +2424,7 @@ static int hci_pause_addr_resolution(struct hci_dev *hdev) /* Return if address resolution is disabled and RPA is not used. */ if (!err && scan_use_rpa(hdev)) - return err; + return 0; hci_resume_advertising_sync(hdev); return err; @@ -4093,7 +4114,8 @@ static int hci_le_set_rpa_timeout_sync(struct hci_dev *hdev) { __le16 timeout = cpu_to_le16(hdev->rpa_timeout); - if (!(hdev->commands[35] & 0x04)) + if (!(hdev->commands[35] & 0x04) || + test_bit(HCI_QUIRK_BROKEN_SET_RPA_TIMEOUT, &hdev->quirks)) return 0; return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_RPA_TIMEOUT, @@ -4414,18 +4436,38 @@ static int hci_le_set_write_def_data_len_sync(struct hci_dev *hdev) sizeof(cp), &cp, HCI_CMD_TIMEOUT); } -/* Set Default PHY parameters if command is supported */ +/* Set Default PHY parameters if command is supported, enables all supported + * PHYs according to the LE Features bits. + */ static int hci_le_set_default_phy_sync(struct hci_dev *hdev) { struct hci_cp_le_set_default_phy cp; - if (!(hdev->commands[35] & 0x20)) + if (!(hdev->commands[35] & 0x20)) { + /* If the command is not supported it means only 1M PHY is + * supported. + */ + hdev->le_tx_def_phys = HCI_LE_SET_PHY_1M; + hdev->le_rx_def_phys = HCI_LE_SET_PHY_1M; return 0; + } memset(&cp, 0, sizeof(cp)); cp.all_phys = 0x00; - cp.tx_phys = hdev->le_tx_def_phys; - cp.rx_phys = hdev->le_rx_def_phys; + cp.tx_phys = HCI_LE_SET_PHY_1M; + cp.rx_phys = HCI_LE_SET_PHY_1M; + + /* Enables 2M PHY if supported */ + if (le_2m_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_2M; + cp.rx_phys |= HCI_LE_SET_PHY_2M; + } + + /* Enables Coded PHY if supported */ + if (le_coded_capable(hdev)) { + cp.tx_phys |= HCI_LE_SET_PHY_CODED; + cp.rx_phys |= HCI_LE_SET_PHY_CODED; + } return __hci_cmd_sync_status(hdev, HCI_OP_LE_SET_DEFAULT_PHY, sizeof(cp), &cp, HCI_CMD_TIMEOUT); @@ -4533,6 +4575,9 @@ static const struct { "HCI Set Event Filter command not supported."), HCI_QUIRK_BROKEN(ENHANCED_SETUP_SYNC_CONN, "HCI Enhanced Setup Synchronous Connection command is " + "advertised, but not supported."), + HCI_QUIRK_BROKEN(SET_RPA_TIMEOUT, + "HCI LE Set Random Private Address Timeout command is " "advertised, but not supported.") }; @@ -4727,6 +4772,8 @@ int hci_dev_open_sync(struct hci_dev *hdev) goto done; } + hci_devcd_reset(hdev); + set_bit(HCI_RUNNING, &hdev->flags); hci_sock_dev_event(hdev, HCI_DEV_OPEN); @@ -5108,10 +5155,12 @@ static int hci_disconnect_sync(struct hci_dev *hdev, struct hci_conn *conn, cp.handle = cpu_to_le16(conn->handle); cp.reason = reason; - /* Wait for HCI_EV_DISCONN_COMPLETE not HCI_EV_CMD_STATUS when not - * suspending. + /* Wait for HCI_EV_DISCONN_COMPLETE, not HCI_EV_CMD_STATUS, when the + * reason is anything but HCI_ERROR_REMOTE_POWER_OFF. This reason is + * used when suspending or powering off, where we don't want to wait + * for the peer's response. */ - if (!hdev->suspended) + if (reason != HCI_ERROR_REMOTE_POWER_OFF) return __hci_cmd_sync_status_sk(hdev, HCI_OP_DISCONNECT, sizeof(cp), &cp, HCI_EV_DISCONN_COMPLETE, @@ -5853,7 +5902,6 @@ static int hci_le_ext_directed_advertising_sync(struct hci_dev *hdev, memset(&cp, 0, sizeof(cp)); cp.evt_properties = cpu_to_le16(LE_LEGACY_ADV_DIRECT_IND); - cp.own_addr_type = own_addr_type; cp.channel_map = hdev->le_adv_channel_map; cp.tx_power = HCI_TX_POWER_INVALID; cp.primary_phy = HCI_ADV_PHY_1M; @@ -6114,6 +6162,71 @@ done: return err; } +int hci_le_create_cis_sync(struct hci_dev *hdev, struct hci_conn *conn) +{ + struct { + struct hci_cp_le_create_cis cp; + struct hci_cis cis[0x1f]; + } cmd; + u8 cig; + struct hci_conn *hcon = conn; + + memset(&cmd, 0, sizeof(cmd)); + cmd.cis[0].acl_handle = cpu_to_le16(conn->parent->handle); + cmd.cis[0].cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + cig = conn->iso_qos.ucast.cig; + + hci_dev_lock(hdev); + + rcu_read_lock(); + + list_for_each_entry_rcu(conn, &hdev->conn_hash.list, list) { + struct hci_cis *cis = &cmd.cis[cmd.cp.num_cis]; + + if (conn == hcon || conn->type != ISO_LINK || + conn->state == BT_CONNECTED || + conn->iso_qos.ucast.cig != cig) + continue; + + /* Check if all CIS(s) belonging to a CIG are ready */ + if (!conn->parent || conn->parent->state != BT_CONNECTED || + conn->state != BT_CONNECT) { + cmd.cp.num_cis = 0; + break; + } + + /* Group all CIS with state BT_CONNECT since the spec don't + * allow to send them individually: + * + * BLUETOOTH CORE SPECIFICATION Version 5.3 | Vol 4, Part E + * page 2566: + * + * If the Host issues this command before all the + * HCI_LE_CIS_Established events from the previous use of the + * command have been generated, the Controller shall return the + * error code Command Disallowed (0x0C). + */ + cis->acl_handle = cpu_to_le16(conn->parent->handle); + cis->cis_handle = cpu_to_le16(conn->handle); + cmd.cp.num_cis++; + } + + rcu_read_unlock(); + + hci_dev_unlock(hdev); + + if (!cmd.cp.num_cis) + return 0; + + /* Wait for HCI_LE_CIS_Established */ + return __hci_cmd_sync_status_sk(hdev, HCI_OP_LE_CREATE_CIS, + sizeof(cmd.cp) + sizeof(cmd.cis[0]) * + cmd.cp.num_cis, &cmd, + HCI_EVT_LE_CIS_ESTABLISHED, + conn->conn_timeout, NULL); +} + int hci_le_remove_cig_sync(struct hci_dev *hdev, u8 handle) { struct hci_cp_le_remove_cig cp; diff --git a/net/bluetooth/iso.c b/net/bluetooth/iso.c index 8d136a730163..34d55a85d8f6 100644 --- a/net/bluetooth/iso.c +++ b/net/bluetooth/iso.c @@ -3,6 +3,7 @@ * BlueZ - Bluetooth protocol stack for Linux * * Copyright (C) 2022 Intel Corporation + * Copyright 2023 NXP */ #include <linux/module.h> @@ -59,11 +60,17 @@ struct iso_pinfo { __u16 sync_handle; __u32 flags; struct bt_iso_qos qos; + bool qos_user_set; __u8 base_len; __u8 base[BASE_MAX_LENGTH]; struct iso_conn *conn; }; +static struct bt_iso_qos default_qos; + +static bool check_ucast_qos(struct bt_iso_qos *qos); +static bool check_bcast_qos(struct bt_iso_qos *qos); + /* ---- ISO timers ---- */ #define ISO_CONN_TIMEOUT (HZ * 40) #define ISO_DISCONN_TIMEOUT (HZ * 2) @@ -264,8 +271,15 @@ static int iso_connect_bis(struct sock *sk) goto unlock; } + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + /* Fail if out PHYs are marked as disabled */ - if (!iso_pi(sk)->qos.out.phy) { + if (!iso_pi(sk)->qos.bcast.out.phy) { err = -EINVAL; goto unlock; } @@ -336,8 +350,15 @@ static int iso_connect_cis(struct sock *sk) goto unlock; } + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_ucast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + err = -EINVAL; + goto unlock; + } + /* Fail if either PHYs are marked as disabled */ - if (!iso_pi(sk)->qos.in.phy && !iso_pi(sk)->qos.out.phy) { + if (!iso_pi(sk)->qos.ucast.in.phy && !iso_pi(sk)->qos.ucast.out.phy) { err = -EINVAL; goto unlock; } @@ -417,7 +438,7 @@ static int iso_send_frame(struct sock *sk, struct sk_buff *skb) BT_DBG("sk %p len %d", sk, skb->len); - if (skb->len > qos->out.sdu) + if (skb->len > qos->ucast.out.sdu) return -EMSGSIZE; len = skb->len; @@ -680,13 +701,23 @@ static struct proto iso_proto = { } static struct bt_iso_qos default_qos = { - .cig = BT_ISO_QOS_CIG_UNSET, - .cis = BT_ISO_QOS_CIS_UNSET, - .sca = 0x00, - .packing = 0x00, - .framing = 0x00, - .in = DEFAULT_IO_QOS, - .out = DEFAULT_IO_QOS, + .bcast = { + .big = BT_ISO_QOS_BIG_UNSET, + .bis = BT_ISO_QOS_BIS_UNSET, + .sync_interval = 0x00, + .packing = 0x00, + .framing = 0x00, + .in = DEFAULT_IO_QOS, + .out = DEFAULT_IO_QOS, + .encryption = 0x00, + .bcode = {0x00}, + .options = 0x00, + .skip = 0x0000, + .sync_timeout = 0x4000, + .sync_cte_type = 0x00, + .mse = 0x00, + .timeout = 0x4000, + }, }; static struct sock *iso_sock_alloc(struct net *net, struct socket *sock, @@ -893,9 +924,15 @@ static int iso_listen_bis(struct sock *sk) if (!hdev) return -EHOSTUNREACH; + /* Fail if user set invalid QoS */ + if (iso_pi(sk)->qos_user_set && !check_bcast_qos(&iso_pi(sk)->qos)) { + iso_pi(sk)->qos = default_qos; + return -EINVAL; + } + err = hci_pa_create_sync(hdev, &iso_pi(sk)->dst, le_addr_type(iso_pi(sk)->dst_type), - iso_pi(sk)->bc_sid); + iso_pi(sk)->bc_sid, &iso_pi(sk)->qos); hci_dev_put(hdev); @@ -1154,21 +1191,62 @@ static bool check_io_qos(struct bt_iso_io_qos *qos) return true; } -static bool check_qos(struct bt_iso_qos *qos) +static bool check_ucast_qos(struct bt_iso_qos *qos) { - if (qos->sca > 0x07) + if (qos->ucast.sca > 0x07) return false; - if (qos->packing > 0x01) + if (qos->ucast.packing > 0x01) return false; - if (qos->framing > 0x01) + if (qos->ucast.framing > 0x01) return false; - if (!check_io_qos(&qos->in)) + if (!check_io_qos(&qos->ucast.in)) return false; - if (!check_io_qos(&qos->out)) + if (!check_io_qos(&qos->ucast.out)) + return false; + + return true; +} + +static bool check_bcast_qos(struct bt_iso_qos *qos) +{ + if (qos->bcast.sync_interval > 0x07) + return false; + + if (qos->bcast.packing > 0x01) + return false; + + if (qos->bcast.framing > 0x01) + return false; + + if (!check_io_qos(&qos->bcast.in)) + return false; + + if (!check_io_qos(&qos->bcast.out)) + return false; + + if (qos->bcast.encryption > 0x01) + return false; + + if (qos->bcast.options > 0x07) + return false; + + if (qos->bcast.skip > 0x01f3) + return false; + + if (qos->bcast.sync_timeout < 0x000a || qos->bcast.sync_timeout > 0x4000) + return false; + + if (qos->bcast.sync_cte_type > 0x1f) + return false; + + if (qos->bcast.mse > 0x1f) + return false; + + if (qos->bcast.timeout < 0x000a || qos->bcast.timeout > 0x4000) return false; return true; @@ -1179,7 +1257,7 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, { struct sock *sk = sock->sk; int len, err = 0; - struct bt_iso_qos qos; + struct bt_iso_qos qos = default_qos; u32 opt; BT_DBG("sk %p", sk); @@ -1212,24 +1290,19 @@ static int iso_sock_setsockopt(struct socket *sock, int level, int optname, } len = min_t(unsigned int, sizeof(qos), optlen); - if (len != sizeof(qos)) { - err = -EINVAL; - break; - } - - memset(&qos, 0, sizeof(qos)); if (copy_from_sockptr(&qos, optval, len)) { err = -EFAULT; break; } - if (!check_qos(&qos)) { + if (len == sizeof(qos.ucast) && !check_ucast_qos(&qos)) { err = -EINVAL; break; } iso_pi(sk)->qos = qos; + iso_pi(sk)->qos_user_set = true; break; @@ -1419,7 +1492,7 @@ static bool iso_match_big(struct sock *sk, void *data) { struct hci_evt_le_big_sync_estabilished *ev = data; - return ev->handle == iso_pi(sk)->qos.big; + return ev->handle == iso_pi(sk)->qos.bcast.big; } static void iso_conn_ready(struct iso_conn *conn) @@ -1584,8 +1657,12 @@ static void iso_connect_cfm(struct hci_conn *hcon, __u8 status) /* Check if LE link has failed */ if (status) { - if (hcon->link) - iso_conn_del(hcon->link, bt_to_errno(status)); + struct hci_link *link, *t; + + list_for_each_entry_safe(link, t, &hcon->link_list, + list) + iso_conn_del(link->conn, bt_to_errno(status)); + return; } diff --git a/net/bluetooth/l2cap_core.c b/net/bluetooth/l2cap_core.c index 55a7226233f9..376b523c7b26 100644 --- a/net/bluetooth/l2cap_core.c +++ b/net/bluetooth/l2cap_core.c @@ -745,7 +745,7 @@ EXPORT_SYMBOL_GPL(l2cap_chan_list); static void l2cap_conn_update_id_addr(struct work_struct *work) { struct l2cap_conn *conn = container_of(work, struct l2cap_conn, - id_addr_update_work); + id_addr_timer.work); struct hci_conn *hcon = conn->hcon; struct l2cap_chan *chan; @@ -1907,8 +1907,7 @@ static void l2cap_conn_del(struct hci_conn *hcon, int err) if (work_pending(&conn->pending_rx_work)) cancel_work_sync(&conn->pending_rx_work); - if (work_pending(&conn->id_addr_update_work)) - cancel_work_sync(&conn->id_addr_update_work); + cancel_delayed_work_sync(&conn->id_addr_timer); l2cap_unregister_all_users(conn); @@ -4694,7 +4693,6 @@ static inline int l2cap_disconnect_rsp(struct l2cap_conn *conn, chan = l2cap_get_chan_by_scid(conn, scid); if (!chan) { - mutex_unlock(&conn->chan_lock); return 0; } @@ -7874,7 +7872,7 @@ static struct l2cap_conn *l2cap_conn_add(struct hci_conn *hcon) skb_queue_head_init(&conn->pending_rx); INIT_WORK(&conn->pending_rx_work, process_pending_rx); - INIT_WORK(&conn->id_addr_update_work, l2cap_conn_update_id_addr); + INIT_DELAYED_WORK(&conn->id_addr_timer, l2cap_conn_update_id_addr); conn->disc_reason = HCI_ERROR_REMOTE_USER_TERM; diff --git a/net/bluetooth/mgmt.c b/net/bluetooth/mgmt.c index 249dc6777fb4..f7b2d0971f24 100644 --- a/net/bluetooth/mgmt.c +++ b/net/bluetooth/mgmt.c @@ -1399,8 +1399,16 @@ static int set_powered(struct sock *sk, struct hci_dev *hdev, void *data, goto failed; } - err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, - mgmt_set_powered_complete); + /* Cancel potentially blocking sync operation before power off */ + if (cp->val == 0x00) { + __hci_cmd_sync_cancel(hdev, -EHOSTDOWN); + err = hci_cmd_sync_queue(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); + } else { + /* Use hci_cmd_sync_submit since hdev might not be running */ + err = hci_cmd_sync_submit(hdev, set_powered_sync, cmd, + mgmt_set_powered_complete); + } if (err < 0) mgmt_pending_remove(cmd); @@ -8393,10 +8401,10 @@ static u32 get_supported_adv_flags(struct hci_dev *hdev) flags |= MGMT_ADV_FLAG_HW_OFFLOAD; flags |= MGMT_ADV_FLAG_CAN_SET_TX_POWER; - if (hdev->le_features[1] & HCI_LE_PHY_2M) + if (le_2m_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_2M; - if (hdev->le_features[1] & HCI_LE_PHY_CODED) + if (le_coded_capable(hdev)) flags |= MGMT_ADV_FLAG_SEC_CODED; } diff --git a/net/bluetooth/msft.c b/net/bluetooth/msft.c index bee6a4c656be..bf5cee48916c 100644 --- a/net/bluetooth/msft.c +++ b/net/bluetooth/msft.c @@ -743,17 +743,12 @@ __u64 msft_get_features(struct hci_dev *hdev) } static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, - u8 status, u16 opcode, - struct sk_buff *skb) + void *user_data, + u8 status) { - struct msft_cp_le_set_advertisement_filter_enable *cp; - struct msft_rp_le_set_advertisement_filter_enable *rp; + struct msft_cp_le_set_advertisement_filter_enable *cp = user_data; struct msft_data *msft = hdev->msft_data; - rp = (struct msft_rp_le_set_advertisement_filter_enable *)skb->data; - if (skb->len < sizeof(*rp)) - return; - /* Error 0x0C would be returned if the filter enabled status is * already set to whatever we were trying to set. * Although the default state should be disabled, some controller set @@ -766,7 +761,6 @@ static void msft_le_set_advertisement_filter_enable_cb(struct hci_dev *hdev, hci_dev_lock(hdev); - cp = hci_sent_cmd_data(hdev, hdev->msft_opcode); msft->filter_enabled = cp->enable; if (status == 0x0C) @@ -804,31 +798,23 @@ int msft_remove_monitor(struct hci_dev *hdev, struct adv_monitor *monitor) return msft_remove_monitor_sync(hdev, monitor); } -void msft_req_add_set_filter_enable(struct hci_request *req, bool enable) -{ - struct hci_dev *hdev = req->hdev; - struct msft_cp_le_set_advertisement_filter_enable cp; - - cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; - cp.enable = enable; - - hci_req_add(req, hdev->msft_opcode, sizeof(cp), &cp); -} - int msft_set_filter_enable(struct hci_dev *hdev, bool enable) { - struct hci_request req; + struct msft_cp_le_set_advertisement_filter_enable cp; struct msft_data *msft = hdev->msft_data; int err; if (!msft) return -EOPNOTSUPP; - hci_req_init(&req, hdev); - msft_req_add_set_filter_enable(&req, enable); - err = hci_req_run_skb(&req, msft_le_set_advertisement_filter_enable_cb); + cp.sub_opcode = MSFT_OP_LE_SET_ADVERTISEMENT_FILTER_ENABLE; + cp.enable = enable; + err = __hci_cmd_sync_status(hdev, hdev->msft_opcode, sizeof(cp), &cp, + HCI_CMD_TIMEOUT); + + msft_le_set_advertisement_filter_enable_cb(hdev, &cp, err); - return err; + return 0; } bool msft_curve_validity(struct hci_dev *hdev) diff --git a/net/bluetooth/smp.c b/net/bluetooth/smp.c index 70663229b3cc..f1a9fc0012f0 100644 --- a/net/bluetooth/smp.c +++ b/net/bluetooth/smp.c @@ -58,6 +58,8 @@ #define SMP_TIMEOUT msecs_to_jiffies(30000) +#define ID_ADDR_TIMEOUT msecs_to_jiffies(200) + #define AUTH_REQ_MASK(dev) (hci_dev_test_flag(dev, HCI_SC_ENABLED) ? \ 0x3f : 0x07) #define KEY_DIST_MASK 0x07 @@ -1067,7 +1069,12 @@ static void smp_notify_keys(struct l2cap_conn *conn) if (hcon->type == LE_LINK) { bacpy(&hcon->dst, &smp->remote_irk->bdaddr); hcon->dst_type = smp->remote_irk->addr_type; - queue_work(hdev->workqueue, &conn->id_addr_update_work); + /* Use a short delay to make sure the new address is + * propagated _before_ the channels. + */ + queue_delayed_work(hdev->workqueue, + &conn->id_addr_timer, + ID_ADDR_TIMEOUT); } } diff --git a/net/core/dev.c b/net/core/dev.c index 1551aabac343..735096d42c1d 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -4603,10 +4603,10 @@ static void napi_schedule_rps(struct softnet_data *sd) sd->rps_ipi_next = mysd->rps_ipi_list; mysd->rps_ipi_list = sd; - /* If not called from net_rx_action() + /* If not called from net_rx_action() or napi_threaded_poll() * we have to raise NET_RX_SOFTIRQ. */ - if (!mysd->in_net_rx_action) + if (!mysd->in_net_rx_action && !mysd->in_napi_threaded_poll) __raise_softirq_irqoff(NET_RX_SOFTIRQ); return; } @@ -6598,9 +6598,31 @@ static int napi_thread_wait(struct napi_struct *napi) return -1; } +static void skb_defer_free_flush(struct softnet_data *sd) +{ + struct sk_buff *skb, *next; + + /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ + if (!READ_ONCE(sd->defer_list)) + return; + + spin_lock(&sd->defer_lock); + skb = sd->defer_list; + sd->defer_list = NULL; + sd->defer_count = 0; + spin_unlock(&sd->defer_lock); + + while (skb != NULL) { + next = skb->next; + napi_consume_skb(skb, 1); + skb = next; + } +} + static int napi_threaded_poll(void *data) { struct napi_struct *napi = data; + struct softnet_data *sd; void *have; while (!napi_thread_wait(napi)) { @@ -6608,11 +6630,21 @@ static int napi_threaded_poll(void *data) bool repoll = false; local_bh_disable(); + sd = this_cpu_ptr(&softnet_data); + sd->in_napi_threaded_poll = true; have = netpoll_poll_lock(napi); __napi_poll(napi, &repoll); netpoll_poll_unlock(have); + sd->in_napi_threaded_poll = false; + barrier(); + + if (sd_has_rps_ipi_waiting(sd)) { + local_irq_disable(); + net_rps_action_and_irq_enable(sd); + } + skb_defer_free_flush(sd); local_bh_enable(); if (!repoll) @@ -6624,27 +6656,6 @@ static int napi_threaded_poll(void *data) return 0; } -static void skb_defer_free_flush(struct softnet_data *sd) -{ - struct sk_buff *skb, *next; - - /* Paired with WRITE_ONCE() in skb_attempt_defer_free() */ - if (!READ_ONCE(sd->defer_list)) - return; - - spin_lock_irq(&sd->defer_lock); - skb = sd->defer_list; - sd->defer_list = NULL; - sd->defer_count = 0; - spin_unlock_irq(&sd->defer_lock); - - while (skb != NULL) { - next = skb->next; - napi_consume_skb(skb, 1); - skb = next; - } -} - static __latent_entropy void net_rx_action(struct softirq_action *h) { struct softnet_data *sd = this_cpu_ptr(&softnet_data); diff --git a/net/core/skbuff.c b/net/core/skbuff.c index 0d998806b377..c7c141f6fc14 100644 --- a/net/core/skbuff.c +++ b/net/core/skbuff.c @@ -5996,7 +5996,6 @@ EXPORT_SYMBOL(skb_ensure_writable); */ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) { - struct vlan_hdr *vhdr; int offset = skb->data - skb_mac_header(skb); int err; @@ -6012,13 +6011,8 @@ int __skb_vlan_pop(struct sk_buff *skb, u16 *vlan_tci) skb_postpull_rcsum(skb, skb->data + (2 * ETH_ALEN), VLAN_HLEN); - vhdr = (struct vlan_hdr *)(skb->data + ETH_HLEN); - *vlan_tci = ntohs(vhdr->h_vlan_TCI); - - memmove(skb->data + VLAN_HLEN, skb->data, 2 * ETH_ALEN); - __skb_pull(skb, VLAN_HLEN); + vlan_remove_tag(skb, vlan_tci); - vlan_set_encap_proto(skb, vhdr); skb->mac_header += VLAN_HLEN; if (skb_network_offset(skb) < ETH_HLEN) @@ -6870,7 +6864,6 @@ void skb_attempt_defer_free(struct sk_buff *skb) { int cpu = skb->alloc_cpu; struct softnet_data *sd; - unsigned long flags; unsigned int defer_max; bool kick; @@ -6881,12 +6874,15 @@ nodefer: __kfree_skb(skb); return; } + DEBUG_NET_WARN_ON_ONCE(skb_dst(skb)); + DEBUG_NET_WARN_ON_ONCE(skb->destructor); + sd = &per_cpu(softnet_data, cpu); defer_max = READ_ONCE(sysctl_skb_defer_max); if (READ_ONCE(sd->defer_count) >= defer_max) goto nodefer; - spin_lock_irqsave(&sd->defer_lock, flags); + spin_lock_bh(&sd->defer_lock); /* Send an IPI every time queue reaches half capacity. */ kick = sd->defer_count == (defer_max >> 1); /* Paired with the READ_ONCE() few lines above */ @@ -6895,7 +6891,7 @@ nodefer: __kfree_skb(skb); skb->next = sd->defer_list; /* Paired with READ_ONCE() in skb_defer_free_flush() */ WRITE_ONCE(sd->defer_list, skb); - spin_unlock_irqrestore(&sd->defer_lock, flags); + spin_unlock_bh(&sd->defer_lock); /* Make sure to trigger NET_RX_SOFTIRQ on the remote CPU * if we are unlucky enough (this seems very unlikely). diff --git a/net/dsa/tag.h b/net/dsa/tag.h index 7cfbca824f1c..32d12f4a9d73 100644 --- a/net/dsa/tag.h +++ b/net/dsa/tag.h @@ -229,7 +229,7 @@ static inline void *dsa_etype_header_pos_rx(struct sk_buff *skb) return skb->data - 2; } -/* On TX, skb->data points to skb_mac_header(skb), which means that EtherType +/* On TX, skb->data points to the MAC header, which means that EtherType * header taggers start exactly where the EtherType is (the EtherType is * treated as part of the DSA header). */ diff --git a/net/dsa/tag_8021q.c b/net/dsa/tag_8021q.c index 5ee9ef00954e..cbdfc392f7e0 100644 --- a/net/dsa/tag_8021q.c +++ b/net/dsa/tag_8021q.c @@ -461,8 +461,8 @@ EXPORT_SYMBOL_GPL(dsa_tag_8021q_unregister); struct sk_buff *dsa_8021q_xmit(struct sk_buff *skb, struct net_device *netdev, u16 tpid, u16 tci) { - /* skb->data points at skb_mac_header, which - * is fine for vlan_insert_tag. + /* skb->data points at the MAC header, which is fine + * for vlan_insert_tag(). */ return vlan_insert_tag(skb, htons(tpid), tci); } diff --git a/net/dsa/tag_ksz.c b/net/dsa/tag_ksz.c index 0eb1c7784c3d..ea100bd25939 100644 --- a/net/dsa/tag_ksz.c +++ b/net/dsa/tag_ksz.c @@ -120,18 +120,18 @@ static struct sk_buff *ksz_common_rcv(struct sk_buff *skb, static struct sk_buff *ksz8795_xmit(struct sk_buff *skb, struct net_device *dev) { struct dsa_port *dp = dsa_slave_to_port(dev); + struct ethhdr *hdr; u8 *tag; - u8 *addr; if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) return NULL; /* Tag encoding */ tag = skb_put(skb, KSZ_INGRESS_TAG_LEN); - addr = skb_mac_header(skb); + hdr = skb_eth_hdr(skb); *tag = 1 << dp->index; - if (is_link_local_ether_addr(addr)) + if (is_link_local_ether_addr(hdr->h_dest)) *tag |= KSZ8795_TAIL_TAG_OVERRIDE; return skb; @@ -273,8 +273,8 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, u16 queue_mapping = skb_get_queue_mapping(skb); u8 prio = netdev_txq_to_tc(dev, queue_mapping); struct dsa_port *dp = dsa_slave_to_port(dev); + struct ethhdr *hdr; __be16 *tag; - u8 *addr; u16 val; if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) @@ -284,13 +284,13 @@ static struct sk_buff *ksz9477_xmit(struct sk_buff *skb, ksz_xmit_timestamp(dp, skb); tag = skb_put(skb, KSZ9477_INGRESS_TAG_LEN); - addr = skb_mac_header(skb); + hdr = skb_eth_hdr(skb); val = BIT(dp->index); val |= FIELD_PREP(KSZ9477_TAIL_TAG_PRIO, prio); - if (is_link_local_ether_addr(addr)) + if (is_link_local_ether_addr(hdr->h_dest)) val |= KSZ9477_TAIL_TAG_OVERRIDE; *tag = cpu_to_be16(val); @@ -337,7 +337,7 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb, u16 queue_mapping = skb_get_queue_mapping(skb); u8 prio = netdev_txq_to_tc(dev, queue_mapping); struct dsa_port *dp = dsa_slave_to_port(dev); - u8 *addr; + struct ethhdr *hdr; u8 *tag; if (skb->ip_summed == CHECKSUM_PARTIAL && skb_checksum_help(skb)) @@ -347,13 +347,13 @@ static struct sk_buff *ksz9893_xmit(struct sk_buff *skb, ksz_xmit_timestamp(dp, skb); tag = skb_put(skb, KSZ_INGRESS_TAG_LEN); - addr = skb_mac_header(skb); + hdr = skb_eth_hdr(skb); *tag = BIT(dp->index); *tag |= FIELD_PREP(KSZ9893_TAIL_TAG_PRIO, prio); - if (is_link_local_ether_addr(addr)) + if (is_link_local_ether_addr(hdr->h_dest)) *tag |= KSZ9893_TAIL_TAG_OVERRIDE; return ksz_defer_xmit(dp, skb); diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 28ebecafdd24..20bf7074d5a6 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -26,11 +26,11 @@ static void ocelot_xmit_get_vlan_info(struct sk_buff *skb, struct dsa_port *dp, return; } - hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + hdr = skb_vlan_eth_hdr(skb); br_vlan_get_proto(br, &proto); if (ntohs(hdr->h_vlan_proto) == proto) { - __skb_vlan_pop(skb, &tci); + vlan_remove_tag(skb, &tci); *vlan_tci = tci; } else { rcu_read_lock(); diff --git a/net/dsa/tag_sja1105.c b/net/dsa/tag_sja1105.c index 1c2ceba4771b..a5f3b73da417 100644 --- a/net/dsa/tag_sja1105.c +++ b/net/dsa/tag_sja1105.c @@ -256,7 +256,7 @@ static struct sk_buff *sja1105_pvid_tag_control_pkt(struct dsa_port *dp, return NULL; } - hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + hdr = skb_vlan_eth_hdr(skb); /* If skb is already VLAN-tagged, leave that VLAN ID in place */ if (hdr->h_vlan_proto == xmit_tpid) @@ -516,7 +516,7 @@ static bool sja1110_skb_has_inband_control_extension(const struct sk_buff *skb) static void sja1105_vlan_rcv(struct sk_buff *skb, int *source_port, int *switch_id, int *vbid, u16 *vid) { - struct vlan_ethhdr *hdr = (struct vlan_ethhdr *)skb_mac_header(skb); + struct vlan_ethhdr *hdr = vlan_eth_hdr(skb); u16 vlan_tci; if (skb_vlan_tag_present(skb)) diff --git a/net/ethtool/coalesce.c b/net/ethtool/coalesce.c index 443e7e642c96..01a59ce211c8 100644 --- a/net/ethtool/coalesce.c +++ b/net/ethtool/coalesce.c @@ -254,13 +254,14 @@ ethnl_set_coalesce_validate(struct ethnl_req_info *req_info, } static int -ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) +__ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info, + bool *dual_change) { struct kernel_ethtool_coalesce kernel_coalesce = {}; struct net_device *dev = req_info->dev; struct ethtool_coalesce coalesce = {}; + bool mod_mode = false, mod = false; struct nlattr **tb = info->attrs; - bool mod = false; int ret; ret = dev->ethtool_ops->get_coalesce(dev, &coalesce, &kernel_coalesce, @@ -268,6 +269,7 @@ ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) if (ret < 0) return ret; + /* Update values */ ethnl_update_u32(&coalesce.rx_coalesce_usecs, tb[ETHTOOL_A_COALESCE_RX_USECS], &mod); ethnl_update_u32(&coalesce.rx_max_coalesced_frames, @@ -286,10 +288,6 @@ ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_IRQ], &mod); ethnl_update_u32(&coalesce.stats_block_coalesce_usecs, tb[ETHTOOL_A_COALESCE_STATS_BLOCK_USECS], &mod); - ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce, - tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod); - ethnl_update_bool32(&coalesce.use_adaptive_tx_coalesce, - tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX], &mod); ethnl_update_u32(&coalesce.pkt_rate_low, tb[ETHTOOL_A_COALESCE_PKT_RATE_LOW], &mod); ethnl_update_u32(&coalesce.rx_coalesce_usecs_low, @@ -312,17 +310,25 @@ ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) tb[ETHTOOL_A_COALESCE_TX_MAX_FRAMES_HIGH], &mod); ethnl_update_u32(&coalesce.rate_sample_interval, tb[ETHTOOL_A_COALESCE_RATE_SAMPLE_INTERVAL], &mod); - ethnl_update_u8(&kernel_coalesce.use_cqe_mode_tx, - tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_TX], &mod); - ethnl_update_u8(&kernel_coalesce.use_cqe_mode_rx, - tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_RX], &mod); ethnl_update_u32(&kernel_coalesce.tx_aggr_max_bytes, tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_BYTES], &mod); ethnl_update_u32(&kernel_coalesce.tx_aggr_max_frames, tb[ETHTOOL_A_COALESCE_TX_AGGR_MAX_FRAMES], &mod); ethnl_update_u32(&kernel_coalesce.tx_aggr_time_usecs, tb[ETHTOOL_A_COALESCE_TX_AGGR_TIME_USECS], &mod); - if (!mod) + + /* Update operation modes */ + ethnl_update_bool32(&coalesce.use_adaptive_rx_coalesce, + tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_RX], &mod_mode); + ethnl_update_bool32(&coalesce.use_adaptive_tx_coalesce, + tb[ETHTOOL_A_COALESCE_USE_ADAPTIVE_TX], &mod_mode); + ethnl_update_u8(&kernel_coalesce.use_cqe_mode_tx, + tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_TX], &mod_mode); + ethnl_update_u8(&kernel_coalesce.use_cqe_mode_rx, + tb[ETHTOOL_A_COALESCE_USE_CQE_MODE_RX], &mod_mode); + + *dual_change = mod && mod_mode; + if (!mod && !mod_mode) return 0; ret = dev->ethtool_ops->set_coalesce(dev, &coalesce, &kernel_coalesce, @@ -330,6 +336,32 @@ ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) return ret < 0 ? ret : 1; } +static int +ethnl_set_coalesce(struct ethnl_req_info *req_info, struct genl_info *info) +{ + bool dual_change; + int err, ret; + + /* SET_COALESCE may change operation mode and parameters in one call. + * Changing operation mode may cause the driver to reset the parameter + * values, and therefore ignore user input (driver does not know which + * parameters come from user and which are echoed back from ->get). + * To not complicate the drivers if user tries to change both the mode + * and parameters at once - call the driver twice. + */ + err = __ethnl_set_coalesce(req_info, info, &dual_change); + if (err < 0) + return err; + ret = err; + + if (ret && dual_change) { + err = __ethnl_set_coalesce(req_info, info, &dual_change); + if (err < 0) + return err; + } + return ret; +} + const struct ethnl_request_ops ethnl_coalesce_request_ops = { .request_cmd = ETHTOOL_MSG_COALESCE_GET, .reply_cmd = ETHTOOL_MSG_COALESCE_GET_REPLY, diff --git a/net/netfilter/ipvs/ip_vs_conn.c b/net/netfilter/ipvs/ip_vs_conn.c index 13534e02346c..928e64653837 100644 --- a/net/netfilter/ipvs/ip_vs_conn.c +++ b/net/netfilter/ipvs/ip_vs_conn.c @@ -1481,6 +1481,7 @@ void __net_exit ip_vs_conn_net_cleanup(struct netns_ipvs *ipvs) int __init ip_vs_conn_init(void) { + size_t tab_array_size; int idx; /* Compute size and mask */ @@ -1494,8 +1495,9 @@ int __init ip_vs_conn_init(void) /* * Allocate the connection hash table and initialize its list heads */ - ip_vs_conn_tab = vmalloc(array_size(ip_vs_conn_tab_size, - sizeof(*ip_vs_conn_tab))); + tab_array_size = array_size(ip_vs_conn_tab_size, + sizeof(*ip_vs_conn_tab)); + ip_vs_conn_tab = vmalloc(tab_array_size); if (!ip_vs_conn_tab) return -ENOMEM; @@ -1508,10 +1510,8 @@ int __init ip_vs_conn_init(void) return -ENOMEM; } - pr_info("Connection hash table configured " - "(size=%d, memory=%ldKbytes)\n", - ip_vs_conn_tab_size, - (long)(ip_vs_conn_tab_size*sizeof(*ip_vs_conn_tab))/1024); + pr_info("Connection hash table configured (size=%d, memory=%zdKbytes)\n", + ip_vs_conn_tab_size, tab_array_size / 1024); IP_VS_DBG(0, "Each connection entry needs %zd bytes at least\n", sizeof(struct ip_vs_conn)); diff --git a/net/netfilter/ipvs/ip_vs_core.c b/net/netfilter/ipvs/ip_vs_core.c index 2fcc26507d69..cb83ca506c5c 100644 --- a/net/netfilter/ipvs/ip_vs_core.c +++ b/net/netfilter/ipvs/ip_vs_core.c @@ -1140,7 +1140,6 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, __be16 vport; unsigned int flags; - EnterFunction(12); vaddr = &svc->addr; vport = svc->port; daddr = &iph->saddr; @@ -1208,7 +1207,6 @@ struct ip_vs_conn *ip_vs_new_conn_out(struct ip_vs_service *svc, IP_VS_DBG_ADDR(cp->af, &cp->vaddr), ntohs(cp->vport), IP_VS_DBG_ADDR(cp->af, &cp->daddr), ntohs(cp->dport), cp->flags, refcount_read(&cp->refcnt)); - LeaveFunction(12); return cp; } @@ -1316,13 +1314,11 @@ after_nat: ip_vs_update_conntrack(skb, cp, 0); ip_vs_conn_put(cp); - LeaveFunction(11); return NF_ACCEPT; drop: ip_vs_conn_put(cp); kfree_skb(skb); - LeaveFunction(11); return NF_STOLEN; } @@ -1341,8 +1337,6 @@ ip_vs_out_hook(void *priv, struct sk_buff *skb, const struct nf_hook_state *stat int af = state->pf; struct sock *sk; - EnterFunction(11); - /* Already marked as IPVS request or reply? */ if (skb->ipvs_property) return NF_ACCEPT; @@ -2365,7 +2359,6 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) struct netns_ipvs *ipvs; struct net *net; - EnterFunction(2); list_for_each_entry(net, net_list, exit_list) { ipvs = net_ipvs(net); ip_vs_unregister_hooks(ipvs, AF_INET); @@ -2374,7 +2367,6 @@ static void __net_exit __ip_vs_dev_cleanup_batch(struct list_head *net_list) smp_wmb(); ip_vs_sync_net_cleanup(ipvs); } - LeaveFunction(2); } static struct pernet_operations ipvs_core_ops = { diff --git a/net/netfilter/ipvs/ip_vs_ctl.c b/net/netfilter/ipvs/ip_vs_ctl.c index 2a5ed71c82c3..62606fb44d02 100644 --- a/net/netfilter/ipvs/ip_vs_ctl.c +++ b/net/netfilter/ipvs/ip_vs_ctl.c @@ -1061,8 +1061,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) unsigned int atype; int ret; - EnterFunction(2); - #ifdef CONFIG_IP_VS_IPV6 if (udest->af == AF_INET6) { atype = ipv6_addr_type(&udest->addr.in6); @@ -1111,7 +1109,6 @@ ip_vs_new_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) spin_lock_init(&dest->dst_lock); __ip_vs_update_dest(svc, dest, udest, 1); - LeaveFunction(2); return 0; err_stats: @@ -1134,8 +1131,6 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) __be16 dport = udest->port; int ret; - EnterFunction(2); - if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; @@ -1183,7 +1178,7 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) ret = ip_vs_start_estimator(svc->ipvs, &dest->stats); if (ret < 0) - goto err; + return ret; __ip_vs_update_dest(svc, dest, udest, 1); } else { /* @@ -1192,9 +1187,6 @@ ip_vs_add_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) ret = ip_vs_new_dest(svc, udest); } -err: - LeaveFunction(2); - return ret; } @@ -1209,8 +1201,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) union nf_inet_addr daddr; __be16 dport = udest->port; - EnterFunction(2); - if (udest->weight < 0) { pr_err("%s(): server weight less than zero\n", __func__); return -ERANGE; @@ -1242,7 +1232,6 @@ ip_vs_edit_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) } __ip_vs_update_dest(svc, dest, udest, 0); - LeaveFunction(2); return 0; } @@ -1317,8 +1306,6 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) struct ip_vs_dest *dest; __be16 dport = udest->port; - EnterFunction(2); - /* We use function that requires RCU lock */ rcu_read_lock(); dest = ip_vs_lookup_dest(svc, udest->af, &udest->addr, dport); @@ -1339,8 +1326,6 @@ ip_vs_del_dest(struct ip_vs_service *svc, struct ip_vs_dest_user_kern *udest) */ __ip_vs_del_dest(svc->ipvs, dest, false); - LeaveFunction(2); - return 0; } @@ -1746,7 +1731,6 @@ void ip_vs_service_nets_cleanup(struct list_head *net_list) struct netns_ipvs *ipvs; struct net *net; - EnterFunction(2); /* Check for "full" addressed entries */ mutex_lock(&__ip_vs_mutex); list_for_each_entry(net, net_list, exit_list) { @@ -1754,7 +1738,6 @@ void ip_vs_service_nets_cleanup(struct list_head *net_list) ip_vs_flush(ipvs, true); } mutex_unlock(&__ip_vs_mutex); - LeaveFunction(2); } /* Put all references for device (dst_cache) */ @@ -1792,7 +1775,6 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, if (event != NETDEV_DOWN || !ipvs) return NOTIFY_DONE; IP_VS_DBG(3, "%s() dev=%s\n", __func__, dev->name); - EnterFunction(2); mutex_lock(&__ip_vs_mutex); for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { hlist_for_each_entry(svc, &ip_vs_svc_table[idx], s_list) { @@ -1821,7 +1803,6 @@ static int ip_vs_dst_event(struct notifier_block *this, unsigned long event, } spin_unlock_bh(&ipvs->dest_trash_lock); mutex_unlock(&__ip_vs_mutex); - LeaveFunction(2); return NOTIFY_DONE; } @@ -4537,8 +4518,6 @@ int __init ip_vs_control_init(void) int idx; int ret; - EnterFunction(2); - /* Initialize svc_table, ip_vs_svc_fwm_table */ for (idx = 0; idx < IP_VS_SVC_TAB_SIZE; idx++) { INIT_HLIST_HEAD(&ip_vs_svc_table[idx]); @@ -4551,15 +4530,12 @@ int __init ip_vs_control_init(void) if (ret < 0) return ret; - LeaveFunction(2); return 0; } void ip_vs_control_cleanup(void) { - EnterFunction(2); unregister_netdevice_notifier(&ip_vs_dst_notifier); /* relying on common rcu_barrier() in ip_vs_cleanup() */ - LeaveFunction(2); } diff --git a/net/netfilter/ipvs/ip_vs_sync.c b/net/netfilter/ipvs/ip_vs_sync.c index 4963fec815da..264f2f87a437 100644 --- a/net/netfilter/ipvs/ip_vs_sync.c +++ b/net/netfilter/ipvs/ip_vs_sync.c @@ -603,7 +603,7 @@ static void ip_vs_sync_conn_v0(struct netns_ipvs *ipvs, struct ip_vs_conn *cp, if (cp->flags & IP_VS_CONN_F_SEQ_MASK) { struct ip_vs_sync_conn_options *opt = (struct ip_vs_sync_conn_options *)&s[1]; - memcpy(opt, &cp->in_seq, sizeof(*opt)); + memcpy(opt, &cp->sync_conn_opt, sizeof(*opt)); } m->nr_conns++; @@ -1582,13 +1582,11 @@ ip_vs_send_async(struct socket *sock, const char *buffer, const size_t length) struct kvec iov; int len; - EnterFunction(7); iov.iov_base = (void *)buffer; iov.iov_len = length; len = kernel_sendmsg(sock, &msg, &iov, 1, (size_t)(length)); - LeaveFunction(7); return len; } @@ -1614,15 +1612,12 @@ ip_vs_receive(struct socket *sock, char *buffer, const size_t buflen) struct kvec iov = {buffer, buflen}; int len; - EnterFunction(7); - /* Receive a packet */ iov_iter_kvec(&msg.msg_iter, ITER_DEST, &iov, 1, buflen); len = sock_recvmsg(sock, &msg, MSG_DONTWAIT); if (len < 0) return len; - LeaveFunction(7); return len; } diff --git a/net/netfilter/ipvs/ip_vs_xmit.c b/net/netfilter/ipvs/ip_vs_xmit.c index 99c349c0d968..feb1d7fcb09f 100644 --- a/net/netfilter/ipvs/ip_vs_xmit.c +++ b/net/netfilter/ipvs/ip_vs_xmit.c @@ -706,8 +706,6 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, { struct iphdr *iph = ip_hdr(skb); - EnterFunction(10); - if (__ip_vs_get_out_rt(cp->ipvs, cp->af, skb, NULL, iph->daddr, IP_VS_RT_MODE_NON_LOCAL, NULL, ipvsh) < 0) goto tx_error; @@ -719,12 +717,10 @@ ip_vs_bypass_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); - LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - LeaveFunction(10); return NF_STOLEN; } @@ -735,8 +731,6 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, { struct ipv6hdr *iph = ipv6_hdr(skb); - EnterFunction(10); - if (__ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, NULL, &iph->daddr, NULL, ipvsh, 0, IP_VS_RT_MODE_NON_LOCAL) < 0) @@ -747,12 +741,10 @@ ip_vs_bypass_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); - LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - LeaveFunction(10); return NF_STOLEN; } #endif @@ -768,8 +760,6 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, struct rtable *rt; /* Route to the other host */ int local, rc, was_input; - EnterFunction(10); - /* check if it is a connection of no-client-port */ if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT)) { __be16 _pt, *p; @@ -839,12 +829,10 @@ ip_vs_nat_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); - LeaveFunction(10); return rc; tx_error: kfree_skb(skb); - LeaveFunction(10); return NF_STOLEN; } @@ -856,8 +844,6 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, struct rt6_info *rt; /* Route to the other host */ int local, rc; - EnterFunction(10); - /* check if it is a connection of no-client-port */ if (unlikely(cp->flags & IP_VS_CONN_F_NO_CPORT && !ipvsh->fragoffs)) { __be16 _pt, *p; @@ -927,11 +913,9 @@ ip_vs_nat_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); - LeaveFunction(10); return rc; tx_error: - LeaveFunction(10); kfree_skb(skb); return NF_STOLEN; } @@ -1149,8 +1133,6 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, int tun_type, gso_type; int tun_flags; - EnterFunction(10); - local = __ip_vs_get_out_rt(ipvs, cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | @@ -1199,7 +1181,7 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, &next_protocol, NULL, &dsfield, &ttl, dfp); if (IS_ERR(skb)) - goto tx_error; + return NF_STOLEN; gso_type = __tun_gso_type_mask(AF_INET, cp->af); if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) { @@ -1267,14 +1249,10 @@ ip_vs_tunnel_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, else if (ret == NF_DROP) kfree_skb(skb); - LeaveFunction(10); - return NF_STOLEN; tx_error: - if (!IS_ERR(skb)) - kfree_skb(skb); - LeaveFunction(10); + kfree_skb(skb); return NF_STOLEN; } @@ -1298,8 +1276,6 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, int tun_type, gso_type; int tun_flags; - EnterFunction(10); - local = __ip_vs_get_out_rt_v6(ipvs, cp->af, skb, cp->dest, &cp->daddr.in6, &saddr, ipvsh, 1, @@ -1347,7 +1323,7 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, &next_protocol, &payload_len, &dsfield, &ttl, NULL); if (IS_ERR(skb)) - goto tx_error; + return NF_STOLEN; gso_type = __tun_gso_type_mask(AF_INET6, cp->af); if (tun_type == IP_VS_CONN_F_TUNNEL_TYPE_GUE) { @@ -1414,14 +1390,10 @@ ip_vs_tunnel_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, else if (ret == NF_DROP) kfree_skb(skb); - LeaveFunction(10); - return NF_STOLEN; tx_error: - if (!IS_ERR(skb)) - kfree_skb(skb); - LeaveFunction(10); + kfree_skb(skb); return NF_STOLEN; } #endif @@ -1437,8 +1409,6 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, { int local; - EnterFunction(10); - local = __ip_vs_get_out_rt(cp->ipvs, cp->af, skb, cp->dest, cp->daddr.ip, IP_VS_RT_MODE_LOCAL | IP_VS_RT_MODE_NON_LOCAL | @@ -1455,12 +1425,10 @@ ip_vs_dr_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_send_or_cont(NFPROTO_IPV4, skb, cp, 0); - LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - LeaveFunction(10); return NF_STOLEN; } @@ -1471,8 +1439,6 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, { int local; - EnterFunction(10); - local = __ip_vs_get_out_rt_v6(cp->ipvs, cp->af, skb, cp->dest, &cp->daddr.in6, NULL, ipvsh, 0, @@ -1489,12 +1455,10 @@ ip_vs_dr_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, ip_vs_send_or_cont(NFPROTO_IPV6, skb, cp, 0); - LeaveFunction(10); return NF_STOLEN; tx_error: kfree_skb(skb); - LeaveFunction(10); return NF_STOLEN; } #endif @@ -1514,8 +1478,6 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, int local; int rt_mode, was_input; - EnterFunction(10); - /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be forwarded directly here, because there is no need to translate address/port back */ @@ -1526,7 +1488,7 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, rc = NF_ACCEPT; /* do not touch skb anymore */ atomic_inc(&cp->in_pkts); - goto out; + return rc; } /* @@ -1582,14 +1544,11 @@ ip_vs_icmp_xmit(struct sk_buff *skb, struct ip_vs_conn *cp, /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; - rc = ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); - goto out; + return ip_vs_nat_send_or_cont(NFPROTO_IPV4, skb, cp, local); tx_error: kfree_skb(skb); rc = NF_STOLEN; - out: - LeaveFunction(10); return rc; } @@ -1604,8 +1563,6 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, int local; int rt_mode; - EnterFunction(10); - /* The ICMP packet for VS/TUN, VS/DR and LOCALNODE will be forwarded directly here, because there is no need to translate address/port back */ @@ -1616,7 +1573,7 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, rc = NF_ACCEPT; /* do not touch skb anymore */ atomic_inc(&cp->in_pkts); - goto out; + return rc; } /* @@ -1671,14 +1628,11 @@ ip_vs_icmp_xmit_v6(struct sk_buff *skb, struct ip_vs_conn *cp, /* Another hack: avoid icmp_send in ip_fragment */ skb->ignore_df = 1; - rc = ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); - goto out; + return ip_vs_nat_send_or_cont(NFPROTO_IPV6, skb, cp, local); tx_error: kfree_skb(skb); rc = NF_STOLEN; -out: - LeaveFunction(10); return rc; } #endif diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index e48ab8dfb541..09542951656c 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -102,11 +102,9 @@ static const u8 nft2audit_op[NFT_MSG_MAX] = { // enum nf_tables_msg_types [NFT_MSG_DELFLOWTABLE] = AUDIT_NFT_OP_FLOWTABLE_UNREGISTER, }; -static void nft_validate_state_update(struct net *net, u8 new_validate_state) +static void nft_validate_state_update(struct nft_table *table, u8 new_validate_state) { - struct nftables_pernet *nft_net = nft_pernet(net); - - switch (nft_net->validate_state) { + switch (table->validate_state) { case NFT_VALIDATE_SKIP: WARN_ON_ONCE(new_validate_state == NFT_VALIDATE_DO); break; @@ -117,7 +115,7 @@ static void nft_validate_state_update(struct net *net, u8 new_validate_state) return; } - nft_net->validate_state = new_validate_state; + table->validate_state = new_validate_state; } static void nf_tables_trans_destroy_work(struct work_struct *w); static DECLARE_WORK(trans_destroy_work, nf_tables_trans_destroy_work); @@ -821,12 +819,20 @@ static int nf_tables_fill_table_info(struct sk_buff *skb, struct net *net, goto nla_put_failure; if (nla_put_string(skb, NFTA_TABLE_NAME, table->name) || - nla_put_be32(skb, NFTA_TABLE_FLAGS, - htonl(table->flags & NFT_TABLE_F_MASK)) || nla_put_be32(skb, NFTA_TABLE_USE, htonl(table->use)) || nla_put_be64(skb, NFTA_TABLE_HANDLE, cpu_to_be64(table->handle), NFTA_TABLE_PAD)) goto nla_put_failure; + + if (event == NFT_MSG_DELTABLE) { + nlmsg_end(skb, nlh); + return 0; + } + + if (nla_put_be32(skb, NFTA_TABLE_FLAGS, + htonl(table->flags & NFT_TABLE_F_MASK))) + goto nla_put_failure; + if (nft_table_has_owner(table) && nla_put_be32(skb, NFTA_TABLE_OWNER, htonl(table->nlpid))) goto nla_put_failure; @@ -1224,6 +1230,7 @@ static int nf_tables_newtable(struct sk_buff *skb, const struct nfnl_info *info, if (table == NULL) goto err_kzalloc; + table->validate_state = NFT_VALIDATE_SKIP; table->name = nla_strdup(attr, GFP_KERNEL_ACCOUNT); if (table->name == NULL) goto err_strdup; @@ -1575,7 +1582,8 @@ nla_put_failure: } static int nft_dump_basechain_hook(struct sk_buff *skb, int family, - const struct nft_base_chain *basechain) + const struct nft_base_chain *basechain, + const struct list_head *hook_list) { const struct nf_hook_ops *ops = &basechain->ops; struct nft_hook *hook, *first = NULL; @@ -1592,7 +1600,11 @@ static int nft_dump_basechain_hook(struct sk_buff *skb, int family, if (nft_base_chain_netdev(family, ops->hooknum)) { nest_devs = nla_nest_start_noflag(skb, NFTA_HOOK_DEVS); - list_for_each_entry(hook, &basechain->hook_list, list) { + + if (!hook_list) + hook_list = &basechain->hook_list; + + list_for_each_entry(hook, hook_list, list) { if (!first) first = hook; @@ -1617,7 +1629,8 @@ nla_put_failure: static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, u32 portid, u32 seq, int event, u32 flags, int family, const struct nft_table *table, - const struct nft_chain *chain) + const struct nft_chain *chain, + const struct list_head *hook_list) { struct nlmsghdr *nlh; @@ -1627,19 +1640,22 @@ static int nf_tables_fill_chain_info(struct sk_buff *skb, struct net *net, if (!nlh) goto nla_put_failure; - if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name)) - goto nla_put_failure; - if (nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle), + if (nla_put_string(skb, NFTA_CHAIN_TABLE, table->name) || + nla_put_string(skb, NFTA_CHAIN_NAME, chain->name) || + nla_put_be64(skb, NFTA_CHAIN_HANDLE, cpu_to_be64(chain->handle), NFTA_CHAIN_PAD)) goto nla_put_failure; - if (nla_put_string(skb, NFTA_CHAIN_NAME, chain->name)) - goto nla_put_failure; + + if (event == NFT_MSG_DELCHAIN && !hook_list) { + nlmsg_end(skb, nlh); + return 0; + } if (nft_is_base_chain(chain)) { const struct nft_base_chain *basechain = nft_base_chain(chain); struct nft_stats __percpu *stats; - if (nft_dump_basechain_hook(skb, family, basechain)) + if (nft_dump_basechain_hook(skb, family, basechain, hook_list)) goto nla_put_failure; if (nla_put_be32(skb, NFTA_CHAIN_POLICY, @@ -1674,7 +1690,8 @@ nla_put_failure: return -1; } -static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) +static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event, + const struct list_head *hook_list) { struct nftables_pernet *nft_net; struct sk_buff *skb; @@ -1694,7 +1711,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) err = nf_tables_fill_chain_info(skb, ctx->net, ctx->portid, ctx->seq, event, flags, ctx->family, ctx->table, - ctx->chain); + ctx->chain, hook_list); if (err < 0) { kfree_skb(skb); goto err; @@ -1740,7 +1757,7 @@ static int nf_tables_dump_chains(struct sk_buff *skb, NFT_MSG_NEWCHAIN, NLM_F_MULTI, table->family, table, - chain) < 0) + chain, NULL) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@ -1794,7 +1811,7 @@ static int nf_tables_getchain(struct sk_buff *skb, const struct nfnl_info *info, err = nf_tables_fill_chain_info(skb2, net, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFT_MSG_NEWCHAIN, - 0, family, table, chain); + 0, family, table, chain, NULL); if (err < 0) goto err_fill_chain_info; @@ -1955,7 +1972,8 @@ static struct nft_hook *nft_hook_list_find(struct list_head *hook_list, static int nf_tables_parse_netdev_hooks(struct net *net, const struct nlattr *attr, - struct list_head *hook_list) + struct list_head *hook_list, + struct netlink_ext_ack *extack) { struct nft_hook *hook, *next; const struct nlattr *tmp; @@ -1969,10 +1987,12 @@ static int nf_tables_parse_netdev_hooks(struct net *net, hook = nft_netdev_hook_alloc(net, tmp); if (IS_ERR(hook)) { + NL_SET_BAD_ATTR(extack, tmp); err = PTR_ERR(hook); goto err_hook; } if (nft_hook_list_find(hook_list, hook)) { + NL_SET_BAD_ATTR(extack, tmp); kfree(hook); err = -EEXIST; goto err_hook; @@ -2003,38 +2023,41 @@ struct nft_chain_hook { struct list_head list; }; -static int nft_chain_parse_netdev(struct net *net, - struct nlattr *tb[], - struct list_head *hook_list) +static int nft_chain_parse_netdev(struct net *net, struct nlattr *tb[], + struct list_head *hook_list, + struct netlink_ext_ack *extack, u32 flags) { struct nft_hook *hook; int err; if (tb[NFTA_HOOK_DEV]) { hook = nft_netdev_hook_alloc(net, tb[NFTA_HOOK_DEV]); - if (IS_ERR(hook)) + if (IS_ERR(hook)) { + NL_SET_BAD_ATTR(extack, tb[NFTA_HOOK_DEV]); return PTR_ERR(hook); + } list_add_tail(&hook->list, hook_list); } else if (tb[NFTA_HOOK_DEVS]) { err = nf_tables_parse_netdev_hooks(net, tb[NFTA_HOOK_DEVS], - hook_list); + hook_list, extack); if (err < 0) return err; - if (list_empty(hook_list)) - return -EINVAL; - } else { - return -EINVAL; } + if (flags & NFT_CHAIN_HW_OFFLOAD && + list_empty(hook_list)) + return -EINVAL; + return 0; } static int nft_chain_parse_hook(struct net *net, + struct nft_base_chain *basechain, const struct nlattr * const nla[], struct nft_chain_hook *hook, u8 family, - struct netlink_ext_ack *extack, bool autoload) + u32 flags, struct netlink_ext_ack *extack) { struct nftables_pernet *nft_net = nft_pernet(net); struct nlattr *ha[NFTA_HOOK_MAX + 1]; @@ -2050,31 +2073,46 @@ static int nft_chain_parse_hook(struct net *net, if (err < 0) return err; - if (ha[NFTA_HOOK_HOOKNUM] == NULL || - ha[NFTA_HOOK_PRIORITY] == NULL) - return -EINVAL; + if (!basechain) { + if (!ha[NFTA_HOOK_HOOKNUM] || + !ha[NFTA_HOOK_PRIORITY]) + return -EINVAL; - hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); - hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); + hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); + hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); - type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT); - if (!type) - return -EOPNOTSUPP; + type = __nft_chain_type_get(family, NFT_CHAIN_T_DEFAULT); + if (!type) + return -EOPNOTSUPP; - if (nla[NFTA_CHAIN_TYPE]) { - type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], - family, autoload); - if (IS_ERR(type)) { - NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]); - return PTR_ERR(type); + if (nla[NFTA_CHAIN_TYPE]) { + type = nf_tables_chain_type_lookup(net, nla[NFTA_CHAIN_TYPE], + family, true); + if (IS_ERR(type)) { + NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_TYPE]); + return PTR_ERR(type); + } } - } - if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num))) - return -EOPNOTSUPP; + if (hook->num >= NFT_MAX_HOOKS || !(type->hook_mask & (1 << hook->num))) + return -EOPNOTSUPP; - if (type->type == NFT_CHAIN_T_NAT && - hook->priority <= NF_IP_PRI_CONNTRACK) - return -EOPNOTSUPP; + if (type->type == NFT_CHAIN_T_NAT && + hook->priority <= NF_IP_PRI_CONNTRACK) + return -EOPNOTSUPP; + } else { + if (ha[NFTA_HOOK_HOOKNUM]) { + hook->num = ntohl(nla_get_be32(ha[NFTA_HOOK_HOOKNUM])); + if (hook->num != basechain->ops.hooknum) + return -EOPNOTSUPP; + } + if (ha[NFTA_HOOK_PRIORITY]) { + hook->priority = ntohl(nla_get_be32(ha[NFTA_HOOK_PRIORITY])); + if (hook->priority != basechain->ops.priority) + return -EOPNOTSUPP; + } + + type = basechain->type; + } if (!try_module_get(type->owner)) { if (nla[NFTA_CHAIN_TYPE]) @@ -2086,7 +2124,7 @@ static int nft_chain_parse_hook(struct net *net, INIT_LIST_HEAD(&hook->list); if (nft_base_chain_netdev(family, hook->num)) { - err = nft_chain_parse_netdev(net, ha, &hook->list); + err = nft_chain_parse_netdev(net, ha, &hook->list, extack, flags); if (err < 0) { module_put(type->owner); return err; @@ -2110,38 +2148,34 @@ static void nft_chain_release_hook(struct nft_chain_hook *hook) module_put(hook->type->owner); } -struct nft_rules_old { - struct rcu_head h; - struct nft_rule_blob *blob; -}; - -static void nft_last_rule(struct nft_rule_blob *blob, const void *ptr) +static void nft_last_rule(const struct nft_chain *chain, const void *ptr) { - struct nft_rule_dp *prule; + struct nft_rule_dp_last *lrule; + + BUILD_BUG_ON(offsetof(struct nft_rule_dp_last, end) != 0); - prule = (struct nft_rule_dp *)ptr; - prule->is_last = 1; + lrule = (struct nft_rule_dp_last *)ptr; + lrule->end.is_last = 1; + lrule->chain = chain; /* blob size does not include the trailer rule */ } -static struct nft_rule_blob *nf_tables_chain_alloc_rules(unsigned int size) +static struct nft_rule_blob *nf_tables_chain_alloc_rules(const struct nft_chain *chain, + unsigned int size) { struct nft_rule_blob *blob; - /* size must include room for the last rule */ - if (size < offsetof(struct nft_rule_dp, data)) - return NULL; - - size += sizeof(struct nft_rule_blob) + sizeof(struct nft_rules_old); if (size > INT_MAX) return NULL; + size += sizeof(struct nft_rule_blob) + sizeof(struct nft_rule_dp_last); + blob = kvmalloc(size, GFP_KERNEL_ACCOUNT); if (!blob) return NULL; blob->size = 0; - nft_last_rule(blob, blob->data); + nft_last_rule(chain, blob->data); return blob; } @@ -2172,12 +2206,8 @@ static int nft_basechain_init(struct nft_base_chain *basechain, u8 family, list_splice_init(&hook->list, &basechain->hook_list); list_for_each_entry(h, &basechain->hook_list, list) nft_basechain_hook_init(&h->ops, family, hook, chain); - - basechain->ops.hooknum = hook->num; - basechain->ops.priority = hook->priority; - } else { - nft_basechain_hook_init(&basechain->ops, family, hook, chain); } + nft_basechain_hook_init(&basechain->ops, family, hook, chain); chain->flags |= NFT_CHAIN_BASE | flags; basechain->policy = NF_ACCEPT; @@ -2220,7 +2250,6 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, struct nft_rule_blob *blob; struct nft_trans *trans; struct nft_chain *chain; - unsigned int data_size; int err; if (table->use == UINT_MAX) @@ -2228,13 +2257,13 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, if (nla[NFTA_CHAIN_HOOK]) { struct nft_stats __percpu *stats = NULL; - struct nft_chain_hook hook; + struct nft_chain_hook hook = {}; if (flags & NFT_CHAIN_BINDING) return -EOPNOTSUPP; - err = nft_chain_parse_hook(net, nla, &hook, family, extack, - true); + err = nft_chain_parse_hook(net, NULL, nla, &hook, family, flags, + extack); if (err < 0) return err; @@ -2308,8 +2337,7 @@ static int nf_tables_addchain(struct nft_ctx *ctx, u8 family, u8 genmask, chain->udlen = nla_len(nla[NFTA_CHAIN_USERDATA]); } - data_size = offsetof(struct nft_rule_dp, data); /* last rule */ - blob = nf_tables_chain_alloc_rules(data_size); + blob = nf_tables_chain_alloc_rules(chain, 0); if (!blob) { err = -ENOMEM; goto err_destroy_chain; @@ -2349,65 +2377,57 @@ err_destroy_chain: return err; } -static bool nft_hook_list_equal(struct list_head *hook_list1, - struct list_head *hook_list2) -{ - struct nft_hook *hook; - int n = 0, m = 0; - - n = 0; - list_for_each_entry(hook, hook_list2, list) { - if (!nft_hook_list_find(hook_list1, hook)) - return false; - - n++; - } - list_for_each_entry(hook, hook_list1, list) - m++; - - return n == m; -} - static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, u32 flags, const struct nlattr *attr, struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; + struct nft_base_chain *basechain = NULL; struct nft_table *table = ctx->table; struct nft_chain *chain = ctx->chain; - struct nft_base_chain *basechain; + struct nft_chain_hook hook = {}; struct nft_stats *stats = NULL; - struct nft_chain_hook hook; + struct nft_hook *h, *next; struct nf_hook_ops *ops; struct nft_trans *trans; + bool unregister = false; int err; if (chain->flags ^ flags) return -EOPNOTSUPP; + INIT_LIST_HEAD(&hook.list); + if (nla[NFTA_CHAIN_HOOK]) { if (!nft_is_base_chain(chain)) { NL_SET_BAD_ATTR(extack, attr); return -EEXIST; } - err = nft_chain_parse_hook(ctx->net, nla, &hook, ctx->family, - extack, false); + + basechain = nft_base_chain(chain); + err = nft_chain_parse_hook(ctx->net, basechain, nla, &hook, + ctx->family, flags, extack); if (err < 0) return err; - basechain = nft_base_chain(chain); if (basechain->type != hook.type) { nft_chain_release_hook(&hook); NL_SET_BAD_ATTR(extack, attr); return -EEXIST; } - if (nft_base_chain_netdev(ctx->family, hook.num)) { - if (!nft_hook_list_equal(&basechain->hook_list, - &hook.list)) { - nft_chain_release_hook(&hook); - NL_SET_BAD_ATTR(extack, attr); - return -EEXIST; + if (nft_base_chain_netdev(ctx->family, basechain->ops.hooknum)) { + list_for_each_entry_safe(h, next, &hook.list, list) { + h->ops.pf = basechain->ops.pf; + h->ops.hooknum = basechain->ops.hooknum; + h->ops.priority = basechain->ops.priority; + h->ops.priv = basechain->ops.priv; + h->ops.hook = basechain->ops.hook; + + if (nft_hook_list_find(&basechain->hook_list, h)) { + list_del(&h->list); + kfree(h); + } } } else { ops = &basechain->ops; @@ -2418,7 +2438,6 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, return -EEXIST; } } - nft_chain_release_hook(&hook); } if (nla[NFTA_CHAIN_HANDLE] && @@ -2429,24 +2448,43 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, nla[NFTA_CHAIN_NAME], genmask); if (!IS_ERR(chain2)) { NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]); - return -EEXIST; + err = -EEXIST; + goto err_hooks; } } if (nla[NFTA_CHAIN_COUNTERS]) { - if (!nft_is_base_chain(chain)) - return -EOPNOTSUPP; + if (!nft_is_base_chain(chain)) { + err = -EOPNOTSUPP; + goto err_hooks; + } stats = nft_stats_alloc(nla[NFTA_CHAIN_COUNTERS]); - if (IS_ERR(stats)) - return PTR_ERR(stats); + if (IS_ERR(stats)) { + err = PTR_ERR(stats); + goto err_hooks; + } } + if (!(table->flags & NFT_TABLE_F_DORMANT) && + nft_is_base_chain(chain) && + !list_empty(&hook.list)) { + basechain = nft_base_chain(chain); + ops = &basechain->ops; + + if (nft_base_chain_netdev(table->family, basechain->ops.hooknum)) { + err = nft_netdev_register_hooks(ctx->net, &hook.list); + if (err < 0) + goto err_hooks; + } + } + + unregister = true; err = -ENOMEM; trans = nft_trans_alloc(ctx, NFT_MSG_NEWCHAIN, sizeof(struct nft_trans_chain)); if (trans == NULL) - goto err; + goto err_trans; nft_trans_chain_stats(trans) = stats; nft_trans_chain_update(trans) = true; @@ -2465,7 +2503,7 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, err = -ENOMEM; name = nla_strdup(nla[NFTA_CHAIN_NAME], GFP_KERNEL_ACCOUNT); if (!name) - goto err; + goto err_trans; err = -EEXIST; list_for_each_entry(tmp, &nft_net->commit_list, list) { @@ -2476,18 +2514,35 @@ static int nf_tables_updchain(struct nft_ctx *ctx, u8 genmask, u8 policy, strcmp(name, nft_trans_chain_name(tmp)) == 0) { NL_SET_BAD_ATTR(extack, nla[NFTA_CHAIN_NAME]); kfree(name); - goto err; + goto err_trans; } } nft_trans_chain_name(trans) = name; } + + nft_trans_basechain(trans) = basechain; + INIT_LIST_HEAD(&nft_trans_chain_hooks(trans)); + list_splice(&hook.list, &nft_trans_chain_hooks(trans)); + nft_trans_commit_list_add_tail(ctx->net, trans); return 0; -err: + +err_trans: free_percpu(stats); kfree(trans); +err_hooks: + if (nla[NFTA_CHAIN_HOOK]) { + list_for_each_entry_safe(h, next, &hook.list, list) { + if (unregister) + nf_unregister_net_hook(ctx->net, &h->ops); + list_del(&h->list); + kfree_rcu(h, rcu); + } + module_put(hook.type->owner); + } + return err; } @@ -2611,6 +2666,59 @@ static int nf_tables_newchain(struct sk_buff *skb, const struct nfnl_info *info, return nf_tables_addchain(&ctx, family, genmask, policy, flags, extack); } +static int nft_delchain_hook(struct nft_ctx *ctx, struct nft_chain *chain, + struct netlink_ext_ack *extack) +{ + const struct nlattr * const *nla = ctx->nla; + struct nft_chain_hook chain_hook = {}; + struct nft_base_chain *basechain; + struct nft_hook *this, *hook; + LIST_HEAD(chain_del_list); + struct nft_trans *trans; + int err; + + if (!nft_is_base_chain(chain)) + return -EOPNOTSUPP; + + basechain = nft_base_chain(chain); + err = nft_chain_parse_hook(ctx->net, basechain, nla, &chain_hook, + ctx->family, chain->flags, extack); + if (err < 0) + return err; + + list_for_each_entry(this, &chain_hook.list, list) { + hook = nft_hook_list_find(&basechain->hook_list, this); + if (!hook) { + err = -ENOENT; + goto err_chain_del_hook; + } + list_move(&hook->list, &chain_del_list); + } + + trans = nft_trans_alloc(ctx, NFT_MSG_DELCHAIN, + sizeof(struct nft_trans_chain)); + if (!trans) { + err = -ENOMEM; + goto err_chain_del_hook; + } + + nft_trans_basechain(trans) = basechain; + nft_trans_chain_update(trans) = true; + INIT_LIST_HEAD(&nft_trans_chain_hooks(trans)); + list_splice(&chain_del_list, &nft_trans_chain_hooks(trans)); + nft_chain_release_hook(&chain_hook); + + nft_trans_commit_list_add_tail(ctx->net, trans); + + return 0; + +err_chain_del_hook: + list_splice(&chain_del_list, &basechain->hook_list); + nft_chain_release_hook(&chain_hook); + + return err; +} + static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { @@ -2651,12 +2759,19 @@ static int nf_tables_delchain(struct sk_buff *skb, const struct nfnl_info *info, return PTR_ERR(chain); } + nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla); + + if (nla[NFTA_CHAIN_HOOK]) { + if (chain->flags & NFT_CHAIN_HW_OFFLOAD) + return -EOPNOTSUPP; + + return nft_delchain_hook(&ctx, chain, extack); + } + if (info->nlh->nlmsg_flags & NLM_F_NONREC && chain->use > 0) return -EBUSY; - nft_ctx_init(&ctx, net, skb, info->nlh, family, table, chain, nla); - use = chain->use; list_for_each_entry(rule, &chain->rules, list) { if (!nft_is_active_next(net, rule)) @@ -3666,7 +3781,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, } if (expr_info[i].ops->validate) - nft_validate_state_update(net, NFT_VALIDATE_NEED); + nft_validate_state_update(table, NFT_VALIDATE_NEED); expr_info[i].ops = NULL; expr = nft_expr_next(expr); @@ -3716,7 +3831,7 @@ static int nf_tables_newrule(struct sk_buff *skb, const struct nfnl_info *info, if (flow) nft_trans_flow_rule(trans) = flow; - if (nft_net->validate_state == NFT_VALIDATE_DO) + if (table->validate_state == NFT_VALIDATE_DO) return nft_table_validate(net, table); return 0; @@ -4151,6 +4266,12 @@ static int nf_tables_fill_set(struct sk_buff *skb, const struct nft_ctx *ctx, if (nla_put_be64(skb, NFTA_SET_HANDLE, cpu_to_be64(set->handle), NFTA_SET_PAD)) goto nla_put_failure; + + if (event == NFT_MSG_DELSET) { + nlmsg_end(skb, nlh); + return 0; + } + if (set->flags != 0) if (nla_put_be32(skb, NFTA_SET_FLAGS, htonl(set->flags))) goto nla_put_failure; @@ -6318,7 +6439,7 @@ static int nft_add_set_elem(struct nft_ctx *ctx, struct nft_set *set, if (desc.type == NFT_DATA_VERDICT && (elem.data.val.verdict.code == NFT_GOTO || elem.data.val.verdict.code == NFT_JUMP)) - nft_validate_state_update(ctx->net, + nft_validate_state_update(ctx->table, NFT_VALIDATE_NEED); } @@ -6443,7 +6564,6 @@ static int nf_tables_newsetelem(struct sk_buff *skb, const struct nfnl_info *info, const struct nlattr * const nla[]) { - struct nftables_pernet *nft_net = nft_pernet(info->net); struct netlink_ext_ack *extack = info->extack; u8 genmask = nft_genmask_next(info->net); u8 family = info->nfmsg->nfgen_family; @@ -6482,7 +6602,7 @@ static int nf_tables_newsetelem(struct sk_buff *skb, } } - if (nft_net->validate_state == NFT_VALIDATE_DO) + if (table->validate_state == NFT_VALIDATE_DO) return nft_table_validate(net, table); return 0; @@ -7156,13 +7276,20 @@ static int nf_tables_fill_obj_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_OBJ_TABLE, table->name) || nla_put_string(skb, NFTA_OBJ_NAME, obj->key.name) || - nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) || - nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) || - nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset) || nla_put_be64(skb, NFTA_OBJ_HANDLE, cpu_to_be64(obj->handle), NFTA_OBJ_PAD)) goto nla_put_failure; + if (event == NFT_MSG_DELOBJ) { + nlmsg_end(skb, nlh); + return 0; + } + + if (nla_put_be32(skb, NFTA_OBJ_TYPE, htonl(obj->ops->type->type)) || + nla_put_be32(skb, NFTA_OBJ_USE, htonl(obj->use)) || + nft_object_dump(skb, NFTA_OBJ_DATA, obj, reset)) + goto nla_put_failure; + if (obj->udata && nla_put(skb, NFTA_OBJ_USERDATA, obj->udlen, obj->udata)) goto nla_put_failure; @@ -7568,7 +7695,8 @@ static const struct nla_policy nft_flowtable_hook_policy[NFTA_FLOWTABLE_HOOK_MAX static int nft_flowtable_parse_hook(const struct nft_ctx *ctx, const struct nlattr *attr, struct nft_flowtable_hook *flowtable_hook, - struct nft_flowtable *flowtable, bool add) + struct nft_flowtable *flowtable, + struct netlink_ext_ack *extack, bool add) { struct nlattr *tb[NFTA_FLOWTABLE_HOOK_MAX + 1]; struct nft_hook *hook; @@ -7615,7 +7743,8 @@ static int nft_flowtable_parse_hook(const struct nft_ctx *ctx, if (tb[NFTA_FLOWTABLE_HOOK_DEVS]) { err = nf_tables_parse_netdev_hooks(ctx->net, tb[NFTA_FLOWTABLE_HOOK_DEVS], - &flowtable_hook->list); + &flowtable_hook->list, + extack); if (err < 0) return err; } @@ -7747,7 +7876,7 @@ err_unregister_net_hooks: return err; } -static void nft_flowtable_hooks_destroy(struct list_head *hook_list) +static void nft_hooks_destroy(struct list_head *hook_list) { struct nft_hook *hook, *next; @@ -7758,7 +7887,8 @@ static void nft_flowtable_hooks_destroy(struct list_head *hook_list) } static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, - struct nft_flowtable *flowtable) + struct nft_flowtable *flowtable, + struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; @@ -7769,7 +7899,7 @@ static int nft_flowtable_update(struct nft_ctx *ctx, const struct nlmsghdr *nlh, int err; err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK], - &flowtable_hook, flowtable, false); + &flowtable_hook, flowtable, extack, false); if (err < 0) return err; @@ -7874,7 +8004,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); - return nft_flowtable_update(&ctx, info->nlh, flowtable); + return nft_flowtable_update(&ctx, info->nlh, flowtable, extack); } nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); @@ -7915,7 +8045,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb, goto err3; err = nft_flowtable_parse_hook(&ctx, nla[NFTA_FLOWTABLE_HOOK], - &flowtable_hook, flowtable, true); + &flowtable_hook, flowtable, extack, true); if (err < 0) goto err4; @@ -7927,7 +8057,7 @@ static int nf_tables_newflowtable(struct sk_buff *skb, &flowtable->hook_list, flowtable); if (err < 0) { - nft_flowtable_hooks_destroy(&flowtable->hook_list); + nft_hooks_destroy(&flowtable->hook_list); goto err4; } @@ -7967,7 +8097,8 @@ static void nft_flowtable_hook_release(struct nft_flowtable_hook *flowtable_hook } static int nft_delflowtable_hook(struct nft_ctx *ctx, - struct nft_flowtable *flowtable) + struct nft_flowtable *flowtable, + struct netlink_ext_ack *extack) { const struct nlattr * const *nla = ctx->nla; struct nft_flowtable_hook flowtable_hook; @@ -7977,7 +8108,7 @@ static int nft_delflowtable_hook(struct nft_ctx *ctx, int err; err = nft_flowtable_parse_hook(ctx, nla[NFTA_FLOWTABLE_HOOK], - &flowtable_hook, flowtable, false); + &flowtable_hook, flowtable, extack, false); if (err < 0) return err; @@ -8059,7 +8190,7 @@ static int nf_tables_delflowtable(struct sk_buff *skb, nft_ctx_init(&ctx, net, skb, info->nlh, family, table, NULL, nla); if (nla[NFTA_FLOWTABLE_HOOK]) - return nft_delflowtable_hook(&ctx, flowtable); + return nft_delflowtable_hook(&ctx, flowtable, extack); if (flowtable->use > 0) { NL_SET_BAD_ATTR(extack, attr); @@ -8087,9 +8218,16 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, if (nla_put_string(skb, NFTA_FLOWTABLE_TABLE, flowtable->table->name) || nla_put_string(skb, NFTA_FLOWTABLE_NAME, flowtable->name) || - nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) || nla_put_be64(skb, NFTA_FLOWTABLE_HANDLE, cpu_to_be64(flowtable->handle), - NFTA_FLOWTABLE_PAD) || + NFTA_FLOWTABLE_PAD)) + goto nla_put_failure; + + if (event == NFT_MSG_DELFLOWTABLE && !hook_list) { + nlmsg_end(skb, nlh); + return 0; + } + + if (nla_put_be32(skb, NFTA_FLOWTABLE_USE, htonl(flowtable->use)) || nla_put_be32(skb, NFTA_FLOWTABLE_FLAGS, htonl(flowtable->data.flags))) goto nla_put_failure; @@ -8104,6 +8242,9 @@ static int nf_tables_fill_flowtable_info(struct sk_buff *skb, struct net *net, if (!nest_devs) goto nla_put_failure; + if (!hook_list) + hook_list = &flowtable->hook_list; + list_for_each_entry_rcu(hook, hook_list, list) { if (nla_put_string(skb, NFTA_DEVICE_NAME, hook->ops.dev->name)) goto nla_put_failure; @@ -8160,8 +8301,7 @@ static int nf_tables_dump_flowtable(struct sk_buff *skb, NFT_MSG_NEWFLOWTABLE, NLM_F_MULTI | NLM_F_APPEND, table->family, - flowtable, - &flowtable->hook_list) < 0) + flowtable, NULL) < 0) goto done; nl_dump_check_consistent(cb, nlmsg_hdr(skb)); @@ -8256,7 +8396,7 @@ static int nf_tables_getflowtable(struct sk_buff *skb, err = nf_tables_fill_flowtable_info(skb2, net, NETLINK_CB(skb).portid, info->nlh->nlmsg_seq, NFT_MSG_NEWFLOWTABLE, 0, family, - flowtable, &flowtable->hook_list); + flowtable, NULL); if (err < 0) goto err_fill_flowtable_info; @@ -8269,8 +8409,7 @@ err_fill_flowtable_info: static void nf_tables_flowtable_notify(struct nft_ctx *ctx, struct nft_flowtable *flowtable, - struct list_head *hook_list, - int event) + struct list_head *hook_list, int event) { struct nftables_pernet *nft_net = nft_pernet(ctx->net); struct sk_buff *skb; @@ -8634,17 +8773,20 @@ static int nf_tables_validate(struct net *net) struct nftables_pernet *nft_net = nft_pernet(net); struct nft_table *table; - switch (nft_net->validate_state) { - case NFT_VALIDATE_SKIP: - break; - case NFT_VALIDATE_NEED: - nft_validate_state_update(net, NFT_VALIDATE_DO); - fallthrough; - case NFT_VALIDATE_DO: - list_for_each_entry(table, &nft_net->tables, list) { + list_for_each_entry(table, &nft_net->tables, list) { + switch (table->validate_state) { + case NFT_VALIDATE_SKIP: + continue; + case NFT_VALIDATE_NEED: + nft_validate_state_update(table, NFT_VALIDATE_DO); + fallthrough; + case NFT_VALIDATE_DO: if (nft_table_validate(net, table) < 0) return -EAGAIN; + + nft_validate_state_update(table, NFT_VALIDATE_SKIP); } + break; } @@ -8729,7 +8871,10 @@ static void nft_commit_release(struct nft_trans *trans) break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: - nf_tables_chain_destroy(&trans->ctx); + if (nft_trans_chain_update(trans)) + nft_hooks_destroy(&nft_trans_chain_hooks(trans)); + else + nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_DELRULE: case NFT_MSG_DESTROYRULE: @@ -8752,7 +8897,7 @@ static void nft_commit_release(struct nft_trans *trans) case NFT_MSG_DELFLOWTABLE: case NFT_MSG_DESTROYFLOWTABLE: if (nft_trans_flowtable_update(trans)) - nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans)); + nft_hooks_destroy(&nft_trans_flowtable_hooks(trans)); else nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); break; @@ -8817,9 +8962,8 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha return -ENOMEM; } } - data_size += offsetof(struct nft_rule_dp, data); /* last rule */ - chain->blob_next = nf_tables_chain_alloc_rules(data_size); + chain->blob_next = nf_tables_chain_alloc_rules(chain, data_size); if (!chain->blob_next) return -ENOMEM; @@ -8864,12 +9008,11 @@ static int nf_tables_commit_chain_prepare(struct net *net, struct nft_chain *cha chain->blob_next->size += (unsigned long)(data - (void *)prule); } - prule = (struct nft_rule_dp *)data; - data += offsetof(struct nft_rule_dp, data); if (WARN_ON_ONCE(data > data_boundary)) return -ENOMEM; - nft_last_rule(chain->blob_next, prule); + prule = (struct nft_rule_dp *)data; + nft_last_rule(chain, prule); return 0; } @@ -8890,22 +9033,22 @@ static void nf_tables_commit_chain_prepare_cancel(struct net *net) } } -static void __nf_tables_commit_chain_free_rules_old(struct rcu_head *h) +static void __nf_tables_commit_chain_free_rules(struct rcu_head *h) { - struct nft_rules_old *o = container_of(h, struct nft_rules_old, h); + struct nft_rule_dp_last *l = container_of(h, struct nft_rule_dp_last, h); - kvfree(o->blob); + kvfree(l->blob); } static void nf_tables_commit_chain_free_rules_old(struct nft_rule_blob *blob) { - struct nft_rules_old *old; + struct nft_rule_dp_last *last; - /* rcu_head is after end marker */ - old = (void *)blob + sizeof(*blob) + blob->size; - old->blob = blob; + /* last rule trailer is after end marker */ + last = (void *)blob + sizeof(*blob) + blob->size; + last->blob = blob; - call_rcu(&old->h, __nf_tables_commit_chain_free_rules_old); + call_rcu(&last->h, __nf_tables_commit_chain_free_rules); } static void nf_tables_commit_chain(struct net *net, struct nft_chain *chain) @@ -9209,22 +9352,34 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { nft_chain_commit_update(trans); - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, + &nft_trans_chain_hooks(trans)); + list_splice(&nft_trans_chain_hooks(trans), + &nft_trans_basechain(trans)->hook_list); /* trans destroyed after rcu grace period */ } else { nft_chain_commit_drop_policy(trans); nft_clear(net, trans->ctx.chain); - nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_NEWCHAIN, NULL); nft_trans_destroy(trans); } break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: - nft_chain_del(trans->ctx.chain); - nf_tables_chain_notify(&trans->ctx, trans->msg_type); - nf_tables_unregister_hook(trans->ctx.net, - trans->ctx.table, - trans->ctx.chain); + if (nft_trans_chain_update(trans)) { + nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN, + &nft_trans_chain_hooks(trans)); + nft_netdev_unregister_hooks(net, + &nft_trans_chain_hooks(trans), + true); + } else { + nft_chain_del(trans->ctx.chain); + nf_tables_chain_notify(&trans->ctx, NFT_MSG_DELCHAIN, + NULL); + nf_tables_unregister_hook(trans->ctx.net, + trans->ctx.table, + trans->ctx.chain); + } break; case NFT_MSG_NEWRULE: nft_clear(trans->ctx.net, nft_trans_rule(trans)); @@ -9330,7 +9485,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) nft_clear(net, nft_trans_flowtable(trans)); nf_tables_flowtable_notify(&trans->ctx, nft_trans_flowtable(trans), - &nft_trans_flowtable(trans)->hook_list, + NULL, NFT_MSG_NEWFLOWTABLE); } nft_trans_destroy(trans); @@ -9348,7 +9503,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) list_del_rcu(&nft_trans_flowtable(trans)->list); nf_tables_flowtable_notify(&trans->ctx, nft_trans_flowtable(trans), - &nft_trans_flowtable(trans)->hook_list, + NULL, trans->msg_type); nft_unregister_flowtable_net_hooks(net, &nft_trans_flowtable(trans)->hook_list); @@ -9388,7 +9543,10 @@ static void nf_tables_abort_release(struct nft_trans *trans) nf_tables_table_destroy(&trans->ctx); break; case NFT_MSG_NEWCHAIN: - nf_tables_chain_destroy(&trans->ctx); + if (nft_trans_chain_update(trans)) + nft_hooks_destroy(&nft_trans_chain_hooks(trans)); + else + nf_tables_chain_destroy(&trans->ctx); break; case NFT_MSG_NEWRULE: nf_tables_rule_destroy(&trans->ctx, nft_trans_rule(trans)); @@ -9405,7 +9563,7 @@ static void nf_tables_abort_release(struct nft_trans *trans) break; case NFT_MSG_NEWFLOWTABLE: if (nft_trans_flowtable_update(trans)) - nft_flowtable_hooks_destroy(&nft_trans_flowtable_hooks(trans)); + nft_hooks_destroy(&nft_trans_flowtable_hooks(trans)); else nf_tables_flowtable_destroy(nft_trans_flowtable(trans)); break; @@ -9451,6 +9609,9 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_NEWCHAIN: if (nft_trans_chain_update(trans)) { + nft_netdev_unregister_hooks(net, + &nft_trans_chain_hooks(trans), + true); free_percpu(nft_trans_chain_stats(trans)); kfree(nft_trans_chain_name(trans)); nft_trans_destroy(trans); @@ -9468,8 +9629,13 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) break; case NFT_MSG_DELCHAIN: case NFT_MSG_DESTROYCHAIN: - trans->ctx.table->use++; - nft_clear(trans->ctx.net, trans->ctx.chain); + if (nft_trans_chain_update(trans)) { + list_splice(&nft_trans_chain_hooks(trans), + &nft_trans_basechain(trans)->hook_list); + } else { + trans->ctx.table->use++; + nft_clear(trans->ctx.net, trans->ctx.chain); + } nft_trans_destroy(trans); break; case NFT_MSG_NEWRULE: @@ -9586,11 +9752,6 @@ static int __nf_tables_abort(struct net *net, enum nfnl_abort_action action) return 0; } -static void nf_tables_cleanup(struct net *net) -{ - nft_validate_state_update(net, NFT_VALIDATE_SKIP); -} - static int nf_tables_abort(struct net *net, struct sk_buff *skb, enum nfnl_abort_action action) { @@ -9624,7 +9785,6 @@ static const struct nfnetlink_subsystem nf_tables_subsys = { .cb = nf_tables_cb, .commit = nf_tables_commit, .abort = nf_tables_abort, - .cleanup = nf_tables_cleanup, .valid_genid = nf_tables_valid_genid, .owner = THIS_MODULE, }; @@ -10367,7 +10527,6 @@ static int __net_init nf_tables_init_net(struct net *net) INIT_LIST_HEAD(&nft_net->notify_list); mutex_init(&nft_net->commit_mutex); nft_net->base_seq = 1; - nft_net->validate_state = NFT_VALIDATE_SKIP; return 0; } diff --git a/net/netfilter/nf_tables_core.c b/net/netfilter/nf_tables_core.c index 6ecd0ba2e546..4d0ce12221f6 100644 --- a/net/netfilter/nf_tables_core.c +++ b/net/netfilter/nf_tables_core.c @@ -41,39 +41,37 @@ static inline bool nf_skip_indirect_calls(void) { return false; } static inline void nf_skip_indirect_calls_enable(void) { } #endif -static noinline void __nft_trace_packet(struct nft_traceinfo *info, - const struct nft_chain *chain, +static noinline void __nft_trace_packet(const struct nft_pktinfo *pkt, + const struct nft_verdict *verdict, + const struct nft_rule_dp *rule, + struct nft_traceinfo *info, enum nft_trace_types type) { if (!info->trace || !info->nf_trace) return; - info->chain = chain; info->type = type; - nft_trace_notify(info); + nft_trace_notify(pkt, verdict, rule, info); } static inline void nft_trace_packet(const struct nft_pktinfo *pkt, + struct nft_verdict *verdict, struct nft_traceinfo *info, - const struct nft_chain *chain, const struct nft_rule_dp *rule, enum nft_trace_types type) { if (static_branch_unlikely(&nft_trace_enabled)) { info->nf_trace = pkt->skb->nf_trace; - info->rule = rule; - __nft_trace_packet(info, chain, type); + __nft_trace_packet(pkt, verdict, rule, info, type); } } static inline void nft_trace_copy_nftrace(const struct nft_pktinfo *pkt, struct nft_traceinfo *info) { - if (static_branch_unlikely(&nft_trace_enabled)) { - if (info->trace) - info->nf_trace = pkt->skb->nf_trace; - } + if (static_branch_unlikely(&nft_trace_enabled)) + info->nf_trace = pkt->skb->nf_trace; } static void nft_bitwise_fast_eval(const struct nft_expr *expr, @@ -110,8 +108,9 @@ static void nft_cmp16_fast_eval(const struct nft_expr *expr, regs->verdict.code = NFT_BREAK; } -static noinline void __nft_trace_verdict(struct nft_traceinfo *info, - const struct nft_chain *chain, +static noinline void __nft_trace_verdict(const struct nft_pktinfo *pkt, + struct nft_traceinfo *info, + const struct nft_rule_dp *rule, const struct nft_regs *regs) { enum nft_trace_types type; @@ -129,22 +128,20 @@ static noinline void __nft_trace_verdict(struct nft_traceinfo *info, type = NFT_TRACETYPE_RULE; if (info->trace) - info->nf_trace = info->pkt->skb->nf_trace; + info->nf_trace = pkt->skb->nf_trace; break; } - __nft_trace_packet(info, chain, type); + __nft_trace_packet(pkt, ®s->verdict, rule, info, type); } -static inline void nft_trace_verdict(struct nft_traceinfo *info, - const struct nft_chain *chain, +static inline void nft_trace_verdict(const struct nft_pktinfo *pkt, + struct nft_traceinfo *info, const struct nft_rule_dp *rule, const struct nft_regs *regs) { - if (static_branch_unlikely(&nft_trace_enabled)) { - info->rule = rule; - __nft_trace_verdict(info, chain, regs); - } + if (static_branch_unlikely(&nft_trace_enabled)) + __nft_trace_verdict(pkt, info, rule, regs); } static bool nft_payload_fast_eval(const struct nft_expr *expr, @@ -203,9 +200,7 @@ static noinline void nft_update_chain_stats(const struct nft_chain *chain, } struct nft_jumpstack { - const struct nft_chain *chain; const struct nft_rule_dp *rule; - const struct nft_rule_dp *last_rule; }; static void expr_call_ops_eval(const struct nft_expr *expr, @@ -248,7 +243,6 @@ indirect_call: #define nft_rule_expr_first(rule) (struct nft_expr *)&rule->data[0] #define nft_rule_expr_next(expr) ((void *)expr) + expr->ops->size #define nft_rule_expr_last(rule) (struct nft_expr *)&rule->data[rule->dlen] -#define nft_rule_next(rule) (void *)rule + sizeof(*rule) + rule->dlen #define nft_rule_dp_for_each_expr(expr, last, rule) \ for ((expr) = nft_rule_expr_first(rule), (last) = nft_rule_expr_last(rule); \ @@ -259,9 +253,9 @@ unsigned int nft_do_chain(struct nft_pktinfo *pkt, void *priv) { const struct nft_chain *chain = priv, *basechain = chain; - const struct nft_rule_dp *rule, *last_rule; const struct net *net = nft_net(pkt); const struct nft_expr *expr, *last; + const struct nft_rule_dp *rule; struct nft_regs regs = {}; unsigned int stackptr = 0; struct nft_jumpstack jumpstack[NFT_JUMP_STACK_SIZE]; @@ -271,7 +265,7 @@ nft_do_chain(struct nft_pktinfo *pkt, void *priv) info.trace = false; if (static_branch_unlikely(&nft_trace_enabled)) - nft_trace_init(&info, pkt, ®s.verdict, basechain); + nft_trace_init(&info, pkt, basechain); do_chain: if (genbit) blob = rcu_dereference(chain->blob_gen_1); @@ -279,10 +273,9 @@ do_chain: blob = rcu_dereference(chain->blob_gen_0); rule = (struct nft_rule_dp *)blob->data; - last_rule = (void *)blob->data + blob->size; next_rule: regs.verdict.code = NFT_CONTINUE; - for (; rule < last_rule; rule = nft_rule_next(rule)) { + for (; !rule->is_last ; rule = nft_rule_next(rule)) { nft_rule_dp_for_each_expr(expr, last, rule) { if (expr->ops == &nft_cmp_fast_ops) nft_cmp_fast_eval(expr, ®s); @@ -304,14 +297,14 @@ next_rule: nft_trace_copy_nftrace(pkt, &info); continue; case NFT_CONTINUE: - nft_trace_packet(pkt, &info, chain, rule, + nft_trace_packet(pkt, ®s.verdict, &info, rule, NFT_TRACETYPE_RULE); continue; } break; } - nft_trace_verdict(&info, chain, rule, ®s); + nft_trace_verdict(pkt, &info, rule, ®s); switch (regs.verdict.code & NF_VERDICT_MASK) { case NF_ACCEPT: @@ -325,9 +318,7 @@ next_rule: case NFT_JUMP: if (WARN_ON_ONCE(stackptr >= NFT_JUMP_STACK_SIZE)) return NF_DROP; - jumpstack[stackptr].chain = chain; jumpstack[stackptr].rule = nft_rule_next(rule); - jumpstack[stackptr].last_rule = last_rule; stackptr++; fallthrough; case NFT_GOTO: @@ -342,13 +333,11 @@ next_rule: if (stackptr > 0) { stackptr--; - chain = jumpstack[stackptr].chain; rule = jumpstack[stackptr].rule; - last_rule = jumpstack[stackptr].last_rule; goto next_rule; } - nft_trace_packet(pkt, &info, basechain, NULL, NFT_TRACETYPE_POLICY); + nft_trace_packet(pkt, ®s.verdict, &info, NULL, NFT_TRACETYPE_POLICY); if (static_branch_unlikely(&nft_counters_enabled)) nft_update_chain_stats(basechain, pkt); diff --git a/net/netfilter/nf_tables_trace.c b/net/netfilter/nf_tables_trace.c index 1163ba9c1401..6d41c0bd3d78 100644 --- a/net/netfilter/nf_tables_trace.c +++ b/net/netfilter/nf_tables_trace.c @@ -124,9 +124,11 @@ static int nf_trace_fill_pkt_info(struct sk_buff *nlskb, } static int nf_trace_fill_rule_info(struct sk_buff *nlskb, + const struct nft_verdict *verdict, + const struct nft_rule_dp *rule, const struct nft_traceinfo *info) { - if (!info->rule || info->rule->is_last) + if (!rule || rule->is_last) return 0; /* a continue verdict with ->type == RETURN means that this is @@ -135,15 +137,16 @@ static int nf_trace_fill_rule_info(struct sk_buff *nlskb, * Since no rule matched, the ->rule pointer is invalid. */ if (info->type == NFT_TRACETYPE_RETURN && - info->verdict->code == NFT_CONTINUE) + verdict->code == NFT_CONTINUE) return 0; return nla_put_be64(nlskb, NFTA_TRACE_RULE_HANDLE, - cpu_to_be64(info->rule->handle), + cpu_to_be64(rule->handle), NFTA_TRACE_PAD); } -static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info) +static bool nft_trace_have_verdict_chain(const struct nft_verdict *verdict, + struct nft_traceinfo *info) { switch (info->type) { case NFT_TRACETYPE_RETURN: @@ -153,7 +156,7 @@ static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info) return false; } - switch (info->verdict->code) { + switch (verdict->code) { case NFT_JUMP: case NFT_GOTO: break; @@ -164,9 +167,31 @@ static bool nft_trace_have_verdict_chain(struct nft_traceinfo *info) return true; } -void nft_trace_notify(struct nft_traceinfo *info) +static const struct nft_chain *nft_trace_get_chain(const struct nft_rule_dp *rule, + const struct nft_traceinfo *info) { - const struct nft_pktinfo *pkt = info->pkt; + const struct nft_rule_dp_last *last; + + if (!rule) + return &info->basechain->chain; + + while (!rule->is_last) + rule = nft_rule_next(rule); + + last = (const struct nft_rule_dp_last *)rule; + + if (WARN_ON_ONCE(!last->chain)) + return &info->basechain->chain; + + return last->chain; +} + +void nft_trace_notify(const struct nft_pktinfo *pkt, + const struct nft_verdict *verdict, + const struct nft_rule_dp *rule, + struct nft_traceinfo *info) +{ + const struct nft_chain *chain; struct nlmsghdr *nlh; struct sk_buff *skb; unsigned int size; @@ -176,9 +201,11 @@ void nft_trace_notify(struct nft_traceinfo *info) if (!nfnetlink_has_listeners(nft_net(pkt), NFNLGRP_NFTRACE)) return; + chain = nft_trace_get_chain(rule, info); + size = nlmsg_total_size(sizeof(struct nfgenmsg)) + - nla_total_size(strlen(info->chain->table->name)) + - nla_total_size(strlen(info->chain->name)) + + nla_total_size(strlen(chain->table->name)) + + nla_total_size(strlen(chain->name)) + nla_total_size_64bit(sizeof(__be64)) + /* rule handle */ nla_total_size(sizeof(__be32)) + /* trace type */ nla_total_size(0) + /* VERDICT, nested */ @@ -195,8 +222,8 @@ void nft_trace_notify(struct nft_traceinfo *info) nla_total_size(sizeof(u32)) + /* nfproto */ nla_total_size(sizeof(u32)); /* policy */ - if (nft_trace_have_verdict_chain(info)) - size += nla_total_size(strlen(info->verdict->chain->name)); /* jump target */ + if (nft_trace_have_verdict_chain(verdict, info)) + size += nla_total_size(strlen(verdict->chain->name)); /* jump target */ skb = nlmsg_new(size, GFP_ATOMIC); if (!skb) @@ -217,13 +244,13 @@ void nft_trace_notify(struct nft_traceinfo *info) if (nla_put_u32(skb, NFTA_TRACE_ID, info->skbid)) goto nla_put_failure; - if (nla_put_string(skb, NFTA_TRACE_CHAIN, info->chain->name)) + if (nla_put_string(skb, NFTA_TRACE_CHAIN, chain->name)) goto nla_put_failure; - if (nla_put_string(skb, NFTA_TRACE_TABLE, info->chain->table->name)) + if (nla_put_string(skb, NFTA_TRACE_TABLE, chain->table->name)) goto nla_put_failure; - if (nf_trace_fill_rule_info(skb, info)) + if (nf_trace_fill_rule_info(skb, verdict, rule, info)) goto nla_put_failure; switch (info->type) { @@ -232,11 +259,11 @@ void nft_trace_notify(struct nft_traceinfo *info) break; case NFT_TRACETYPE_RETURN: case NFT_TRACETYPE_RULE: - if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, info->verdict)) + if (nft_verdict_dump(skb, NFTA_TRACE_VERDICT, verdict)) goto nla_put_failure; /* pkt->skb undefined iff NF_STOLEN, disable dump */ - if (info->verdict->code == NF_STOLEN) + if (verdict->code == NF_STOLEN) info->packet_dumped = true; else mark = pkt->skb->mark; @@ -273,7 +300,6 @@ void nft_trace_notify(struct nft_traceinfo *info) } void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, - const struct nft_verdict *verdict, const struct nft_chain *chain) { static siphash_key_t trace_key __read_mostly; @@ -283,8 +309,6 @@ void nft_trace_init(struct nft_traceinfo *info, const struct nft_pktinfo *pkt, info->trace = true; info->nf_trace = pkt->skb->nf_trace; info->packet_dumped = false; - info->pkt = pkt; - info->verdict = verdict; net_get_random_once(&trace_key, sizeof(trace_key)); diff --git a/net/netfilter/nfnetlink.c b/net/netfilter/nfnetlink.c index 81c7737c803a..ae7146475d17 100644 --- a/net/netfilter/nfnetlink.c +++ b/net/netfilter/nfnetlink.c @@ -590,8 +590,6 @@ done: goto replay_abort; } } - if (ss->cleanup) - ss->cleanup(net); nfnl_err_deliver(&err_list, oskb); kfree_skb(skb); diff --git a/net/rxrpc/protocol.h b/net/rxrpc/protocol.h index 6760cb99c6d6..e8ee4af43ca8 100644 --- a/net/rxrpc/protocol.h +++ b/net/rxrpc/protocol.h @@ -126,7 +126,7 @@ struct rxrpc_ackpacket { uint8_t nAcks; /* number of ACKs */ #define RXRPC_MAXACKS 255 - uint8_t acks[0]; /* list of ACK/NAKs */ + uint8_t acks[]; /* list of ACK/NAKs */ #define RXRPC_ACK_TYPE_NACK 0 #define RXRPC_ACK_TYPE_ACK 1 diff --git a/net/sched/act_pedit.c b/net/sched/act_pedit.c index 4559a1507ea5..fb93d4c1faca 100644 --- a/net/sched/act_pedit.c +++ b/net/sched/act_pedit.c @@ -30,12 +30,13 @@ static const struct nla_policy pedit_policy[TCA_PEDIT_MAX + 1] = { }; static const struct nla_policy pedit_key_ex_policy[TCA_PEDIT_KEY_EX_MAX + 1] = { - [TCA_PEDIT_KEY_EX_HTYPE] = { .type = NLA_U16 }, - [TCA_PEDIT_KEY_EX_CMD] = { .type = NLA_U16 }, + [TCA_PEDIT_KEY_EX_HTYPE] = + NLA_POLICY_MAX(NLA_U16, TCA_PEDIT_HDR_TYPE_MAX), + [TCA_PEDIT_KEY_EX_CMD] = NLA_POLICY_MAX(NLA_U16, TCA_PEDIT_CMD_MAX), }; static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, - u8 n) + u8 n, struct netlink_ext_ack *extack) { struct tcf_pedit_key_ex *keys_ex; struct tcf_pedit_key_ex *k; @@ -56,12 +57,14 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, struct nlattr *tb[TCA_PEDIT_KEY_EX_MAX + 1]; if (!n) { + NL_SET_ERR_MSG_MOD(extack, "Can't parse more extended keys than requested"); err = -EINVAL; goto err_out; } n--; if (nla_type(ka) != TCA_PEDIT_KEY_EX) { + NL_SET_ERR_MSG_ATTR(extack, ka, "Unknown attribute, expected extended key"); err = -EINVAL; goto err_out; } @@ -72,25 +75,26 @@ static struct tcf_pedit_key_ex *tcf_pedit_keys_ex_parse(struct nlattr *nla, if (err) goto err_out; - if (!tb[TCA_PEDIT_KEY_EX_HTYPE] || - !tb[TCA_PEDIT_KEY_EX_CMD]) { + if (NL_REQ_ATTR_CHECK(extack, nla, tb, TCA_PEDIT_KEY_EX_HTYPE)) { + NL_SET_ERR_MSG(extack, "Missing required attribute"); err = -EINVAL; goto err_out; } - k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); - k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); - - if (k->htype > TCA_PEDIT_HDR_TYPE_MAX || - k->cmd > TCA_PEDIT_CMD_MAX) { + if (NL_REQ_ATTR_CHECK(extack, nla, tb, TCA_PEDIT_KEY_EX_CMD)) { + NL_SET_ERR_MSG(extack, "Missing required attribute"); err = -EINVAL; goto err_out; } + k->htype = nla_get_u16(tb[TCA_PEDIT_KEY_EX_HTYPE]); + k->cmd = nla_get_u16(tb[TCA_PEDIT_KEY_EX_CMD]); + k++; } if (n) { + NL_SET_ERR_MSG_MOD(extack, "Not enough extended keys to parse"); err = -EINVAL; goto err_out; } @@ -222,7 +226,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, } nparms->tcfp_keys_ex = - tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys); + tcf_pedit_keys_ex_parse(tb[TCA_PEDIT_KEYS_EX], parm->nkeys, extack); if (IS_ERR(nparms->tcfp_keys_ex)) { ret = PTR_ERR(nparms->tcfp_keys_ex); goto out_free; @@ -247,8 +251,16 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, memcpy(nparms->tcfp_keys, parm->keys, ksize); for (i = 0; i < nparms->tcfp_nkeys; ++i) { + u32 offmask = nparms->tcfp_keys[i].offmask; u32 cur = nparms->tcfp_keys[i].off; + /* The AT option can be added to static offsets in the datapath */ + if (!offmask && cur % 4) { + NL_SET_ERR_MSG_MOD(extack, "Offsets must be on 32bit boundaries"); + ret = -EINVAL; + goto put_chain; + } + /* sanitize the shift value for any later use */ nparms->tcfp_keys[i].shift = min_t(size_t, BITS_PER_TYPE(int) - 1, @@ -257,7 +269,7 @@ static int tcf_pedit_init(struct net *net, struct nlattr *nla, /* The AT option can read a single byte, we can bound the actual * value with uchar max. */ - cur += (0xff & nparms->tcfp_keys[i].offmask) >> nparms->tcfp_keys[i].shift; + cur += (0xff & offmask) >> nparms->tcfp_keys[i].shift; /* Each key touches 4 bytes starting from the computed offset */ nparms->tcfp_off_max_hint = @@ -313,37 +325,28 @@ static bool offset_valid(struct sk_buff *skb, int offset) return true; } -static int pedit_skb_hdr_offset(struct sk_buff *skb, - enum pedit_header_type htype, int *hoffset) +static void pedit_skb_hdr_offset(struct sk_buff *skb, + enum pedit_header_type htype, int *hoffset) { - int ret = -EINVAL; - + /* 'htype' is validated in the netlink parsing */ switch (htype) { case TCA_PEDIT_KEY_EX_HDR_TYPE_ETH: - if (skb_mac_header_was_set(skb)) { + if (skb_mac_header_was_set(skb)) *hoffset = skb_mac_offset(skb); - ret = 0; - } break; case TCA_PEDIT_KEY_EX_HDR_TYPE_NETWORK: case TCA_PEDIT_KEY_EX_HDR_TYPE_IP4: case TCA_PEDIT_KEY_EX_HDR_TYPE_IP6: *hoffset = skb_network_offset(skb); - ret = 0; break; case TCA_PEDIT_KEY_EX_HDR_TYPE_TCP: case TCA_PEDIT_KEY_EX_HDR_TYPE_UDP: - if (skb_transport_header_was_set(skb)) { + if (skb_transport_header_was_set(skb)) *hoffset = skb_transport_offset(skb); - ret = 0; - } break; default: - ret = -EINVAL; break; } - - return ret; } TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, @@ -376,10 +379,9 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, for (i = parms->tcfp_nkeys; i > 0; i--, tkey++) { int offset = tkey->off; + int hoffset = 0; u32 *ptr, hdata; - int hoffset; u32 val; - int rc; if (tkey_ex) { htype = tkey_ex->htype; @@ -388,36 +390,30 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, tkey_ex++; } - rc = pedit_skb_hdr_offset(skb, htype, &hoffset); - if (rc) { - pr_info("tc action pedit bad header type specified (0x%x)\n", - htype); - goto bad; - } + pedit_skb_hdr_offset(skb, htype, &hoffset); if (tkey->offmask) { u8 *d, _d; if (!offset_valid(skb, hoffset + tkey->at)) { - pr_info("tc action pedit 'at' offset %d out of bounds\n", - hoffset + tkey->at); + pr_info_ratelimited("tc action pedit 'at' offset %d out of bounds\n", + hoffset + tkey->at); goto bad; } d = skb_header_pointer(skb, hoffset + tkey->at, sizeof(_d), &_d); if (!d) goto bad; - offset += (*d & tkey->offmask) >> tkey->shift; - } - if (offset % 4) { - pr_info("tc action pedit offset must be on 32 bit boundaries\n"); - goto bad; + offset += (*d & tkey->offmask) >> tkey->shift; + if (offset % 4) { + pr_info_ratelimited("tc action pedit offset must be on 32 bit boundaries\n"); + goto bad; + } } if (!offset_valid(skb, hoffset + offset)) { - pr_info("tc action pedit offset %d out of bounds\n", - hoffset + offset); + pr_info_ratelimited("tc action pedit offset %d out of bounds\n", hoffset + offset); goto bad; } @@ -434,8 +430,7 @@ TC_INDIRECT_SCOPE int tcf_pedit_act(struct sk_buff *skb, val = (*ptr + tkey->val) & ~tkey->mask; break; default: - pr_info("tc action pedit bad command (%d)\n", - cmd); + pr_info_ratelimited("tc action pedit bad command (%d)\n", cmd); goto bad; } diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index a9aadc4e6858..37e41f972f69 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -502,7 +502,7 @@ static void dev_watchdog(struct timer_list *t) if (netif_device_present(dev) && netif_running(dev) && netif_carrier_ok(dev)) { - int some_queue_timedout = 0; + unsigned int timedout_ms = 0; unsigned int i; unsigned long trans_start; @@ -514,16 +514,16 @@ static void dev_watchdog(struct timer_list *t) if (netif_xmit_stopped(txq) && time_after(jiffies, (trans_start + dev->watchdog_timeo))) { - some_queue_timedout = 1; + timedout_ms = jiffies_to_msecs(jiffies - trans_start); atomic_long_inc(&txq->trans_timeout); break; } } - if (unlikely(some_queue_timedout)) { + if (unlikely(timedout_ms)) { trace_net_dev_xmit_timeout(dev, i); - WARN_ONCE(1, KERN_INFO "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out\n", - dev->name, netdev_drivername(dev), i); + WARN_ONCE(1, "NETDEV WATCHDOG: %s (%s): transmit queue %u timed out %u ms\n", + dev->name, netdev_drivername(dev), i, timedout_ms); netif_freeze_queues(dev); dev->netdev_ops->ndo_tx_timeout(dev, i); netif_unfreeze_queues(dev); |