diff options
637 files changed, 16505 insertions, 4862 deletions
diff --git a/Documentation/admin-guide/cgroup-v2.rst b/Documentation/admin-guide/cgroup-v2.rst index 6be43781ec7f..baa07b30845e 100644 --- a/Documentation/admin-guide/cgroup-v2.rst +++ b/Documentation/admin-guide/cgroup-v2.rst @@ -1324,15 +1324,26 @@ PAGE_SIZE multiple when read back. pgmajfault Number of major page faults incurred - workingset_refault - Number of refaults of previously evicted pages + workingset_refault_anon + Number of refaults of previously evicted anonymous pages. - workingset_activate - Number of refaulted pages that were immediately activated + workingset_refault_file + Number of refaults of previously evicted file pages. - workingset_restore - Number of restored pages which have been detected as an active - workingset before they got reclaimed. + workingset_activate_anon + Number of refaulted anonymous pages that were immediately + activated. + + workingset_activate_file + Number of refaulted file pages that were immediately activated. + + workingset_restore_anon + Number of restored anonymous pages which have been detected as + an active workingset before they got reclaimed. + + workingset_restore_file + Number of restored file pages which have been detected as an + active workingset before they got reclaimed. workingset_nodereclaim Number of times a shadow node has been reclaimed diff --git a/Documentation/admin-guide/device-mapper/dm-crypt.rst b/Documentation/admin-guide/device-mapper/dm-crypt.rst index 8f4a3f889d43..bc28a9527ee5 100644 --- a/Documentation/admin-guide/device-mapper/dm-crypt.rst +++ b/Documentation/admin-guide/device-mapper/dm-crypt.rst @@ -67,7 +67,7 @@ Parameters:: the value passed in <key_size>. <key_type> - Either 'logon' or 'user' kernel key type. + Either 'logon', 'user' or 'encrypted' kernel key type. <key_description> The kernel keyring key description crypt target should look for @@ -121,6 +121,14 @@ submit_from_crypt_cpus thread because it benefits CFQ to have writes submitted using the same context. +no_read_workqueue + Bypass dm-crypt internal workqueue and process read requests synchronously. + +no_write_workqueue + Bypass dm-crypt internal workqueue and process write requests synchronously. + This option is automatically enabled for host-managed zoned block devices + (e.g. host-managed SMR hard-disks). + integrity:<bytes>:<type> The device requires additional <bytes> metadata per-sector stored in per-bio integrity structure. This metadata must by provided diff --git a/Documentation/admin-guide/pm/cpuidle.rst b/Documentation/admin-guide/pm/cpuidle.rst index a96a423e3779..6ebe163f9dfe 100644 --- a/Documentation/admin-guide/pm/cpuidle.rst +++ b/Documentation/admin-guide/pm/cpuidle.rst @@ -690,7 +690,7 @@ which of the two parameters is added to the kernel command line. In the instruction of the CPUs (which, as a rule, suspends the execution of the program and causes the hardware to attempt to enter the shallowest available idle state) for this purpose, and if ``idle=poll`` is used, idle CPUs will execute a -more or less ``lightweight'' sequence of instructions in a tight loop. [Note +more or less "lightweight" sequence of instructions in a tight loop. [Note that using ``idle=poll`` is somewhat drastic in many cases, as preventing idle CPUs from saving almost any energy at all may not be the only effect of it. For example, on Intel hardware it effectively prevents CPUs from using diff --git a/Documentation/bpf/ringbuf.rst b/Documentation/bpf/ringbuf.rst index 75f943f0009d..6a615cd62bda 100644 --- a/Documentation/bpf/ringbuf.rst +++ b/Documentation/bpf/ringbuf.rst @@ -182,9 +182,6 @@ in the order of reservations, but only after all previous records where already committed. It is thus possible for slow producers to temporarily hold off submitted records, that were reserved later. -Reservation/commit/consumer protocol is verified by litmus tests in -Documentation/litmus_tests/bpf-rb/_. - One interesting implementation bit, that significantly simplifies (and thus speeds up as well) implementation of both producers and consumers is how data area is mapped twice contiguously back-to-back in the virtual memory. This @@ -200,7 +197,7 @@ a self-pacing notifications of new data being availability. being available after commit only if consumer has already caught up right up to the record being committed. If not, consumer still has to catch up and thus will see new data anyways without needing an extra poll notification. -Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbuf.c_) show that +Benchmarks (see tools/testing/selftests/bpf/benchs/bench_ringbufs.c) show that this allows to achieve a very high throughput without having to resort to tricks like "notify only every Nth sample", which are necessary with perf buffer. For extreme cases, when BPF program wants more manual control of diff --git a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml index 17e4f20c8d39..66ac37c21fa6 100644 --- a/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml +++ b/Documentation/devicetree/bindings/arm/bcm/raspberrypi,bcm2835-firmware.yaml @@ -23,7 +23,7 @@ properties: compatible: items: - const: raspberrypi,bcm2835-firmware - - const: simple-bus + - const: simple-mfd mboxes: $ref: '/schemas/types.yaml#/definitions/phandle' @@ -48,6 +48,22 @@ properties: - compatible - "#clock-cells" + reset: + type: object + + properties: + compatible: + const: raspberrypi,firmware-reset + + "#reset-cells": + const: 1 + description: > + The argument is the ID of the firmware reset line to affect. + + required: + - compatible + - "#reset-cells" + additionalProperties: false required: @@ -57,12 +73,17 @@ required: examples: - | firmware { - compatible = "raspberrypi,bcm2835-firmware", "simple-bus"; + compatible = "raspberrypi,bcm2835-firmware", "simple-mfd"; mboxes = <&mailbox>; firmware_clocks: clocks { compatible = "raspberrypi,firmware-clocks"; #clock-cells = <1>; }; + + reset: reset { + compatible = "raspberrypi,firmware-reset"; + #reset-cells = <1>; + }; }; ... diff --git a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml index 85ef69ffebed..1465c9ebaf93 100644 --- a/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml +++ b/Documentation/devicetree/bindings/crypto/ti,sa2ul.yaml @@ -67,7 +67,7 @@ examples: main_crypto: crypto@4e00000 { compatible = "ti,j721-sa2ul"; - reg = <0x0 0x4e00000 0x0 0x1200>; + reg = <0x4e00000 0x1200>; power-domains = <&k3_pds 264 TI_SCI_PD_EXCLUSIVE>; dmas = <&main_udmap 0xc000>, <&main_udmap 0x4000>, <&main_udmap 0x4001>; diff --git a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml index 52a939cade3b..7b9d468c3e52 100644 --- a/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml +++ b/Documentation/devicetree/bindings/display/xlnx/xlnx,zynqmp-dpsub.yaml @@ -145,10 +145,10 @@ examples: display@fd4a0000 { compatible = "xlnx,zynqmp-dpsub-1.7"; - reg = <0x0 0xfd4a0000 0x0 0x1000>, - <0x0 0xfd4aa000 0x0 0x1000>, - <0x0 0xfd4ab000 0x0 0x1000>, - <0x0 0xfd4ac000 0x0 0x1000>; + reg = <0xfd4a0000 0x1000>, + <0xfd4aa000 0x1000>, + <0xfd4ab000 0x1000>, + <0xfd4ac000 0x1000>; reg-names = "dp", "blend", "av_buf", "aud"; interrupts = <0 119 4>; interrupt-parent = <&gic>; diff --git a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml index 5de510f8c88c..2a595b18ff6c 100644 --- a/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml +++ b/Documentation/devicetree/bindings/dma/xilinx/xlnx,zynqmp-dpdma.yaml @@ -57,7 +57,7 @@ examples: dma: dma-controller@fd4c0000 { compatible = "xlnx,zynqmp-dpdma"; - reg = <0x0 0xfd4c0000 0x0 0x1000>; + reg = <0xfd4c0000 0x1000>; interrupts = <GIC_SPI 122 IRQ_TYPE_LEVEL_HIGH>; interrupt-parent = <&gic>; clocks = <&dpdma_clk>; diff --git a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt index d4d83916c09d..be329ea4794f 100644 --- a/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt +++ b/Documentation/devicetree/bindings/gpio/sgpio-aspeed.txt @@ -20,8 +20,9 @@ Required properties: - gpio-controller : Marks the device node as a GPIO controller - interrupts : Interrupt specifier, see interrupt-controller/interrupts.txt - interrupt-controller : Mark the GPIO controller as an interrupt-controller -- ngpios : number of GPIO lines, see gpio.txt - (should be multiple of 8, up to 80 pins) +- ngpios : number of *hardware* GPIO lines, see gpio.txt. This will expose + 2 software GPIOs per hardware GPIO: one for hardware input, one for hardware + output. Up to 80 pins, must be a multiple of 8. - clocks : A phandle to the APB clock for SGPM clock division - bus-frequency : SGPM CLK frequency diff --git a/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml b/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml index 24ad1446445e..fe7fa25877fd 100644 --- a/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml +++ b/Documentation/devicetree/bindings/leds/cznic,turris-omnia-leds.yaml @@ -30,7 +30,7 @@ properties: const: 0 patternProperties: - "^multi-led[0-9a-f]$": + "^multi-led@[0-9a-b]$": type: object allOf: - $ref: leds-class-multicolor.yaml# diff --git a/Documentation/devicetree/bindings/media/i2c/imx274.txt b/Documentation/devicetree/bindings/media/i2c/imx274.txt deleted file mode 100644 index 0727079d2410..000000000000 --- a/Documentation/devicetree/bindings/media/i2c/imx274.txt +++ /dev/null @@ -1,38 +0,0 @@ -* Sony 1/2.5-Inch 8.51Mp CMOS Digital Image Sensor - -The Sony imx274 is a 1/2.5-inch CMOS active pixel digital image sensor with -an active array size of 3864H x 2202V. It is programmable through I2C -interface. The I2C address is fixed to 0x1a as per sensor data sheet. -Image data is sent through MIPI CSI-2, which is configured as 4 lanes -at 1440 Mbps. - - -Required Properties: -- compatible: value should be "sony,imx274" for imx274 sensor -- reg: I2C bus address of the device - -Optional Properties: -- reset-gpios: Sensor reset GPIO -- clocks: Reference to the input clock. -- clock-names: Should be "inck". -- VANA-supply: Sensor 2.8v analog supply. -- VDIG-supply: Sensor 1.8v digital core supply. -- VDDL-supply: Sensor digital IO 1.2v supply. - -The imx274 device node should contain one 'port' child node with -an 'endpoint' subnode. For further reading on port node refer to -Documentation/devicetree/bindings/media/video-interfaces.txt. - -Example: - sensor@1a { - compatible = "sony,imx274"; - reg = <0x1a>; - #address-cells = <1>; - #size-cells = <0>; - reset-gpios = <&gpio_sensor 0 0>; - port { - sensor_out: endpoint { - remote-endpoint = <&csiss_in>; - }; - }; - }; diff --git a/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml b/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml new file mode 100644 index 000000000000..f697e1a20beb --- /dev/null +++ b/Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/media/i2c/sony,imx274.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Sony 1/2.5-Inch 8.51MP CMOS Digital Image Sensor + +maintainers: + - Leon Luo <leonl@leopardimaging.com> + +description: | + The Sony IMX274 is a 1/2.5-inch CMOS active pixel digital image sensor with an + active array size of 3864H x 2202V. It is programmable through I2C interface. + Image data is sent through MIPI CSI-2, which is configured as 4 lanes at 1440 + Mbps. + +properties: + compatible: + const: sony,imx274 + + reg: + const: 0x1a + + reset-gpios: + maxItems: 1 + + clocks: + maxItems: 1 + + clock-names: + const: inck + + vana-supply: + description: Sensor 2.8 V analog supply. + maxItems: 1 + + vdig-supply: + description: Sensor 1.8 V digital core supply. + maxItems: 1 + + vddl-supply: + description: Sensor digital IO 1.2 V supply. + maxItems: 1 + + port: + type: object + description: Output video port. See ../video-interfaces.txt. + +required: + - compatible + - reg + - port + +additionalProperties: false + +examples: + - | + i2c0 { + #address-cells = <1>; + #size-cells = <0>; + + imx274: camera-sensor@1a { + compatible = "sony,imx274"; + reg = <0x1a>; + reset-gpios = <&gpio_sensor 0 0>; + + port { + sensor_out: endpoint { + remote-endpoint = <&csiss_in>; + }; + }; + }; + }; + +... diff --git a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt index ed47e5cd067e..7c70f2ad9942 100644 --- a/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt +++ b/Documentation/devicetree/bindings/phy/fsl,imx8mq-usb-phy.txt @@ -1,7 +1,7 @@ * Freescale i.MX8MQ USB3 PHY binding Required properties: -- compatible: Should be "fsl,imx8mq-usb-phy" +- compatible: Should be "fsl,imx8mq-usb-phy" or "fsl,imx8mp-usb-phy" - #phys-cells: must be 0 (see phy-bindings.txt in this directory) - reg: The base address and length of the registers - clocks: phandles to the clocks for each clock listed in clock-names diff --git a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml index 77bb5309918e..edd9d70a672a 100644 --- a/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml +++ b/Documentation/devicetree/bindings/phy/intel,lgm-emmc-phy.yaml @@ -23,7 +23,9 @@ description: |+ properties: compatible: - const: intel,lgm-emmc-phy + oneOf: + - const: intel,lgm-emmc-phy + - const: intel,keembay-emmc-phy "#phy-cells": const: 0 @@ -34,6 +36,10 @@ properties: clocks: maxItems: 1 + clock-names: + items: + - const: emmcclk + required: - "#phy-cells" - compatible @@ -57,4 +63,13 @@ examples: #phy-cells = <0>; }; }; + + - | + phy@20290000 { + compatible = "intel,keembay-emmc-phy"; + reg = <0x20290000 0x54>; + clocks = <&emmc>; + clock-names = "emmcclk"; + #phy-cells = <0>; + }; ... diff --git a/Documentation/devicetree/bindings/phy/intel,lgm-usb-phy.yaml b/Documentation/devicetree/bindings/phy/intel,lgm-usb-phy.yaml new file mode 100644 index 000000000000..ce62c0b94daf --- /dev/null +++ b/Documentation/devicetree/bindings/phy/intel,lgm-usb-phy.yaml @@ -0,0 +1,58 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/intel,lgm-usb-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel LGM USB PHY Device Tree Bindings + +maintainers: + - Vadivel Murugan Ramuthevar <vadivel.muruganx.ramuthevar@linux.intel.com> + +properties: + compatible: + const: intel,lgm-usb-phy + + reg: + maxItems: 1 + + clocks: + maxItems: 1 + + resets: + items: + - description: USB PHY and Host controller reset + - description: APB BUS reset + - description: General Hardware reset + + reset-names: + items: + - const: phy + - const: apb + - const: phy31 + + "#phy-cells": + const: 0 + +required: + - compatible + - clocks + - reg + - resets + - reset-names + - "#phy-cells" + +additionalProperties: false + +examples: + - | + usb-phy@e7e00000 { + compatible = "intel,lgm-usb-phy"; + reg = <0xe7e00000 0x10000>; + clocks = <&cgu0 153>; + resets = <&rcu 0x70 0x24>, + <&rcu 0x70 0x26>, + <&rcu 0x70 0x28>; + reset-names = "phy", "apb", "phy31"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml index 4071438be2ba..e266ade53d87 100644 --- a/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml +++ b/Documentation/devicetree/bindings/phy/phy-cadence-torrent.yaml @@ -4,11 +4,13 @@ $id: "http://devicetree.org/schemas/phy/phy-cadence-torrent.yaml#" $schema: "http://devicetree.org/meta-schemas/core.yaml#" -title: Cadence Torrent SD0801 PHY binding for DisplayPort +title: Cadence Torrent SD0801 PHY binding description: This binding describes the Cadence SD0801 PHY (also known as Torrent PHY) - hardware included with the Cadence MHDP DisplayPort controller. + hardware included with the Cadence MHDP DisplayPort controller. Torrent + PHY also supports multilink multiprotocol combinations including protocols + such as PCIe, USB, SGMII, QSGMII etc. maintainers: - Swapnil Jakhade <sjakhade@cadence.com> @@ -49,13 +51,21 @@ properties: - const: dptx_phy resets: - maxItems: 1 - description: - Torrent PHY reset. - See Documentation/devicetree/bindings/reset/reset.txt + minItems: 1 + maxItems: 2 + items: + - description: Torrent PHY reset. + - description: Torrent APB reset. This is optional. + + reset-names: + minItems: 1 + maxItems: 2 + items: + - const: torrent_reset + - const: torrent_apb patternProperties: - '^phy@[0-7]+$': + '^phy@[0-3]$': type: object description: Each group of PHY lanes with a single master lane should be represented as a sub-node. @@ -63,6 +73,8 @@ patternProperties: reg: description: The master lane number. This is the lowest numbered lane in the lane group. + minimum: 0 + maximum: 3 resets: minItems: 1 @@ -78,15 +90,25 @@ patternProperties: Specifies the type of PHY for which the group of PHY lanes is used. Refer include/dt-bindings/phy/phy.h. Constants from the header should be used. $ref: /schemas/types.yaml#/definitions/uint32 - enum: [1, 2, 3, 4, 5, 6] + minimum: 1 + maximum: 9 cdns,num-lanes: description: - Number of DisplayPort lanes. + Number of lanes. $ref: /schemas/types.yaml#/definitions/uint32 - enum: [1, 2, 4] + enum: [1, 2, 3, 4] default: 4 + cdns,ssc-mode: + description: + Specifies the Spread Spectrum Clocking mode used. It can be NO_SSC, + EXTERNAL_SSC or INTERNAL_SSC. + Refer include/dt-bindings/phy/phy-cadence-torrent.h for the constants to be used. + $ref: /schemas/types.yaml#/definitions/uint32 + enum: [0, 1, 2] + default: 0 + cdns,max-bit-rate: description: Maximum DisplayPort link bit rate to use, in Mbps @@ -99,6 +121,7 @@ patternProperties: - resets - "#phy-cells" - cdns,phy-type + - cdns,num-lanes additionalProperties: false @@ -111,6 +134,7 @@ required: - reg - reg-names - resets + - reset-names additionalProperties: false @@ -128,18 +152,56 @@ examples: <0xf0 0xfb030a00 0x0 0x00000040>; reg-names = "torrent_phy", "dptx_phy"; resets = <&phyrst 0>; + reset-names = "torrent_reset"; clocks = <&ref_clk>; clock-names = "refclk"; #address-cells = <1>; #size-cells = <0>; phy@0 { - reg = <0>; - resets = <&phyrst 1>, <&phyrst 2>, - <&phyrst 3>, <&phyrst 4>; - #phy-cells = <0>; - cdns,phy-type = <PHY_TYPE_DP>; - cdns,num-lanes = <4>; - cdns,max-bit-rate = <8100>; + reg = <0>; + resets = <&phyrst 1>, <&phyrst 2>, + <&phyrst 3>, <&phyrst 4>; + #phy-cells = <0>; + cdns,phy-type = <PHY_TYPE_DP>; + cdns,num-lanes = <4>; + cdns,max-bit-rate = <8100>; + }; + }; + }; + - | + #include <dt-bindings/phy/phy.h> + #include <dt-bindings/phy/phy-cadence-torrent.h> + + bus { + #address-cells = <2>; + #size-cells = <2>; + + torrent-phy@f0fb500000 { + compatible = "cdns,torrent-phy"; + reg = <0xf0 0xfb500000 0x0 0x00100000>; + reg-names = "torrent_phy"; + resets = <&phyrst 0>, <&phyrst 1>; + reset-names = "torrent_reset", "torrent_apb"; + clocks = <&ref_clk>; + clock-names = "refclk"; + #address-cells = <1>; + #size-cells = <0>; + phy@0 { + reg = <0>; + resets = <&phyrst 2>, <&phyrst 3>; + #phy-cells = <0>; + cdns,phy-type = <PHY_TYPE_PCIE>; + cdns,num-lanes = <2>; + cdns,ssc-mode = <TORRENT_SERDES_NO_SSC>; + }; + + phy@2 { + reg = <2>; + resets = <&phyrst 4>; + #phy-cells = <0>; + cdns,phy-type = <PHY_TYPE_SGMII>; + cdns,num-lanes = <1>; + cdns,ssc-mode = <TORRENT_SERDES_NO_SSC>; }; }; }; diff --git a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml index ef8ae9f73092..33974ad10afe 100644 --- a/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml +++ b/Documentation/devicetree/bindings/phy/qcom,qmp-usb3-dp-phy.yaml @@ -13,17 +13,21 @@ maintainers: properties: compatible: enum: + - qcom,sc7180-qmp-usb3-dp-phy - qcom,sc7180-qmp-usb3-phy + - qcom,sdm845-qmp-usb3-dp-phy - qcom,sdm845-qmp-usb3-phy reg: items: - - description: Address and length of PHY's common serdes block. + - description: Address and length of PHY's USB serdes block. - description: Address and length of the DP_COM control block. + - description: Address and length of PHY's DP serdes block. reg-names: items: - - const: reg-base + - const: usb - const: dp_com + - const: dp "#clock-cells": enum: [ 1, 2 ] @@ -74,16 +78,74 @@ properties: #Required nodes: patternProperties: - "^phy@[0-9a-f]+$": + "^usb3-phy@[0-9a-f]+$": type: object description: - Each device node of QMP phy is required to have as many child nodes as - the number of lanes the PHY has. + The USB3 PHY. + + properties: + reg: + items: + - description: Address and length of TX. + - description: Address and length of RX. + - description: Address and length of PCS. + - description: Address and length of TX2. + - description: Address and length of RX2. + - description: Address and length of pcs_misc. + + clocks: + items: + - description: pipe clock + + clock-names: + items: + - const: pipe0 + + clock-output-names: + items: + - const: usb3_phy_pipe_clk_src + + '#clock-cells': + const: 0 + + '#phy-cells': + const: 0 + + required: + - reg + - clocks + - clock-names + - '#clock-cells' + - '#phy-cells' + + "^dp-phy@[0-9a-f]+$": + type: object + description: + The DP PHY. + + properties: + reg: + items: + - description: Address and length of TX. + - description: Address and length of RX. + - description: Address and length of PCS. + - description: Address and length of TX2. + - description: Address and length of RX2. + + '#clock-cells': + const: 1 + + '#phy-cells': + const: 0 + + required: + - reg + - '#clock-cells' + - '#phy-cells' required: - compatible - reg - - reg-names - "#clock-cells" - "#address-cells" - "#size-cells" @@ -101,14 +163,15 @@ examples: - | #include <dt-bindings/clock/qcom,gcc-sdm845.h> usb_1_qmpphy: phy-wrapper@88e9000 { - compatible = "qcom,sdm845-qmp-usb3-phy"; + compatible = "qcom,sdm845-qmp-usb3-dp-phy"; reg = <0x088e9000 0x18c>, - <0x088e8000 0x10>; - reg-names = "reg-base", "dp_com"; + <0x088e8000 0x10>, + <0x088ea000 0x40>; + reg-names = "usb", "dp_com", "dp"; #clock-cells = <1>; #address-cells = <1>; #size-cells = <1>; - ranges = <0x0 0x088e9000 0x1000>; + ranges = <0x0 0x088e9000 0x2000>; clocks = <&gcc GCC_USB3_PRIM_PHY_AUX_CLK>, <&gcc GCC_USB_PHY_CFG_AHB2PHY_CLK>, @@ -123,7 +186,7 @@ examples: vdda-phy-supply = <&vdda_usb2_ss_1p2>; vdda-pll-supply = <&vdda_usb2_ss_core>; - phy@200 { + usb3-phy@200 { reg = <0x200 0x128>, <0x400 0x200>, <0xc00 0x218>, @@ -136,4 +199,14 @@ examples: clock-names = "pipe0"; clock-output-names = "usb3_phy_pipe_clk_src"; }; + + dp-phy@88ea200 { + reg = <0xa200 0x200>, + <0xa400 0x200>, + <0xaa00 0x200>, + <0xa600 0x200>, + <0xa800 0x200>; + #clock-cells = <1>; + #phy-cells = <0>; + }; }; diff --git a/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml new file mode 100644 index 000000000000..bab2ff4d9dc9 --- /dev/null +++ b/Documentation/devicetree/bindings/phy/socionext,uniphier-ahci-phy.yaml @@ -0,0 +1,76 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/socionext,uniphier-ahci-phy.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Socionext UniPhier AHCI PHY + +description: | + This describes the deivcetree bindings for PHY interfaces built into + AHCI controller implemented on Socionext UniPhier SoCs. + +maintainers: + - Kunihiko Hayashi <hayashi.kunihiko@socionext.com> + +properties: + compatible: + enum: + - socionext,uniphier-pxs2-ahci-phy + - socionext,uniphier-pxs3-ahci-phy + + reg: + description: PHY register region (offset and length) + + "#phy-cells": + const: 0 + + clocks: + maxItems: 2 + + clock-names: + oneOf: + - items: # for PXs2 + - const: link + - items: # for others + - const: link + - const: phy + + resets: + maxItems: 2 + + reset-names: + items: + - const: link + - const: phy + +required: + - compatible + - reg + - "#phy-cells" + - clocks + - clock-names + - resets + - reset-names + +additionalProperties: false + +examples: + - | + ahci-glue@65700000 { + compatible = "socionext,uniphier-pxs3-ahci-glue", + "simple-mfd"; + #address-cells = <1>; + #size-cells = <1>; + ranges = <0 0x65700000 0x100>; + + ahci_phy: phy@10 { + compatible = "socionext,uniphier-pxs3-ahci-phy"; + reg = <0x10 0x10>; + #phy-cells = <0>; + clock-names = "link", "phy"; + clocks = <&sys_clk 28>, <&sys_clk 30>; + reset-names = "link", "phy"; + resets = <&sys_rst 28>, <&sys_rst 30>; + }; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml new file mode 100644 index 000000000000..15207ca9548f --- /dev/null +++ b/Documentation/devicetree/bindings/phy/ti,omap-usb2.yaml @@ -0,0 +1,74 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/phy/ti,omap-usb2.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: OMAP USB2 PHY + +maintainers: + - Kishon Vijay Abraham I <kishon@ti.com> + - Roger Quadros <rogerq@ti.com> + +properties: + compatible: + oneOf: + - items: + - enum: + - ti,dra7x-usb2 + - ti,dra7x-usb2-phy2 + - ti,am654-usb2 + - enum: + - ti,omap-usb2 + - items: + - const: ti,am437x-usb2 + - items: + - const: ti,omap-usb2 + + reg: + maxItems: 1 + + "#phy-cells": + const: 0 + + clocks: + minItems: 1 + items: + - description: wakeup clock + - description: reference clock + + clock-names: + minItems: 1 + items: + - const: wkupclk + - const: refclk + + syscon-phy-power: + $ref: /schemas/types.yaml#definitions/phandle-array + description: + phandle/offset pair. Phandle to the system control module and + register offset to power on/off the PHY. + + ctrl-module: + $ref: /schemas/types.yaml#definitions/phandle + description: + (deprecated) phandle of the control module used by PHY driver + to power on the PHY. Use syscon-phy-power instead. + +required: + - compatible + - reg + - "#phy-cells" + - clocks + - clock-names + +examples: + - | + usb0_phy: phy@4100000 { + compatible = "ti,am654-usb2", "ti,omap-usb2"; + reg = <0x4100000 0x54>; + syscon-phy-power = <&scm_conf 0x4000>; + clocks = <&k3_clks 151 0>, <&k3_clks 151 1>; + clock-names = "wkupclk", "refclk"; + #phy-cells = <0>; + }; diff --git a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml index 5ffc95c62909..c33e9bc79521 100644 --- a/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml +++ b/Documentation/devicetree/bindings/phy/ti,phy-j721e-wiz.yaml @@ -45,9 +45,15 @@ properties: ranges: true assigned-clocks: + minItems: 1 maxItems: 2 assigned-clock-parents: + minItems: 1 + maxItems: 2 + + assigned-clock-rates: + minItems: 1 maxItems: 2 typec-dir-gpios: @@ -119,9 +125,10 @@ patternProperties: logic. properties: clocks: + minItems: 2 maxItems: 4 - description: Phandle to four clock nodes representing the inputs to - refclk_dig + description: Phandle to two (Torrent) or four (Sierra) clock nodes representing + the inputs to refclk_dig "#clock-cells": const: 0 @@ -203,7 +210,7 @@ examples: }; refclk-dig { - clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, + clocks = <&k3_clks 292 11>, <&k3_clks 292 0>, <&dummy_cmn_refclk>, <&dummy_cmn_refclk1>; #clock-cells = <0>; assigned-clocks = <&wiz0_refclk_dig>; diff --git a/Documentation/devicetree/bindings/phy/ti-phy.txt b/Documentation/devicetree/bindings/phy/ti-phy.txt index 8f93c3b694a7..60c9d0ac75e6 100644 --- a/Documentation/devicetree/bindings/phy/ti-phy.txt +++ b/Documentation/devicetree/bindings/phy/ti-phy.txt @@ -27,43 +27,6 @@ omap_control_usb: omap-control-usb@4a002300 { reg-names = "otghs_control"; }; -OMAP USB2 PHY - -Required properties: - - compatible: Should be "ti,omap-usb2" - Should be "ti,dra7x-usb2" for the 1st instance of USB2 PHY on - DRA7x - Should be "ti,dra7x-usb2-phy2" for the 2nd instance of USB2 PHY - in DRA7x - Should be "ti,am654-usb2" for the USB2 PHYs on AM654. - - reg : Address and length of the register set for the device. - - #phy-cells: determine the number of cells that should be given in the - phandle while referencing this phy. - - clocks: a list of phandles and clock-specifier pairs, one for each entry in - clock-names. - - clock-names: should include: - * "wkupclk" - wakeup clock. - * "refclk" - reference clock (optional). - -Deprecated properties: - - ctrl-module : phandle of the control module used by PHY driver to power on - the PHY. - -Recommended properies: -- syscon-phy-power : phandle/offset pair. Phandle to the system control - module and the register offset to power on/off the PHY. - -This is usually a subnode of ocp2scp to which it is connected. - -usb2phy@4a0ad080 { - compatible = "ti,omap-usb2"; - reg = <0x4a0ad080 0x58>; - ctrl-module = <&omap_control_usb>; - #phy-cells = <0>; - clocks = <&usb_phy_cm_clk32k>, <&usb_otg_ss_refclk960m>; - clock-names = "wkupclk", "refclk"; -}; - TI PIPE3 PHY Required properties: diff --git a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml index 5b04a7dfa018..c0058332b967 100644 --- a/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml +++ b/Documentation/devicetree/bindings/usb/amlogic,meson-g12a-usb-ctrl.yaml @@ -25,13 +25,14 @@ description: | The Amlogic A1 embeds a DWC3 USB IP Core configured for USB2 in host-only mode. - The Amlogic GXL & GXM SoCs doesn't embed an USB3 PHY. + The Amlogic GXL, GXM & AXG SoCs doesn't embed an USB3 PHY. properties: compatible: enum: - amlogic,meson-gxl-usb-ctrl - amlogic,meson-gxm-usb-ctrl + - amlogic,meson-axg-usb-ctrl - amlogic,meson-g12a-usb-ctrl - amlogic,meson-a1-usb-ctrl @@ -155,6 +156,25 @@ allOf: properties: compatible: enum: + - amlogic,meson-axg-usb-ctrl + + then: + properties: + phy-names: + items: + - const: usb2-phy1 # USB2 PHY1 if USBOTG_B port is used + clocks: + minItems: 2 + clock-names: + items: + - const: usb_ctrl + - const: ddr + required: + - clock-names + - if: + properties: + compatible: + enum: - amlogic,meson-a1-usb-ctrl then: diff --git a/Documentation/devicetree/bindings/usb/atmel-usb.txt b/Documentation/devicetree/bindings/usb/atmel-usb.txt index 423b99a8fd97..a4002624ba14 100644 --- a/Documentation/devicetree/bindings/usb/atmel-usb.txt +++ b/Documentation/devicetree/bindings/usb/atmel-usb.txt @@ -82,6 +82,7 @@ Required properties: "atmel,at91sam9rl-udc" "atmel,at91sam9g45-udc" "atmel,sama5d3-udc" + "microchip,sam9x60-udc" - reg: Address and length of the register set for the device - interrupts: Should contain usba interrupt - clocks: Should reference the peripheral and host clocks diff --git a/Documentation/devicetree/bindings/usb/cdns,usb3.yaml b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml new file mode 100644 index 000000000000..ac20b98e9910 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/cdns,usb3.yaml @@ -0,0 +1,96 @@ +# SPDX-License-Identifier: GPL-2.0-only OR BSD-2-Clause +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/cdns,usb3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Cadence USBSS-DRD controller bindings + +maintainers: + - Pawel Laszczak <pawell@cadence.com> + +properties: + compatible: + const: cdns,usb3 + + reg: + items: + - description: OTG controller registers + - description: XHCI Host controller registers + - description: DEVICE controller registers + + reg-names: + items: + - const: otg + - const: xhci + - const: dev + + interrupts: + items: + - description: OTG/DRD controller interrupt + - description: XHCI host controller interrupt + - description: Device controller interrupt + + interrupt-names: + items: + - const: host + - const: peripheral + - const: otg + + dr_mode: + enum: [host, otg, peripheral] + + maximum-speed: + enum: [super-speed, high-speed, full-speed] + + phys: + minItems: 1 + maxItems: 2 + + phy-names: + minItems: 1 + maxItems: 2 + items: + anyOf: + - const: cdns3,usb2-phy + - const: cdns3,usb3-phy + + cdns,on-chip-buff-size: + description: + size of memory intended as internal memory for endpoints + buffers expressed in KB + $ref: /schemas/types.yaml#/definitions/uint32 + + cdns,phyrst-a-enable: + description: Enable resetting of PHY if Rx fail is detected + type: boolean + +required: + - compatible + - reg + - reg-names + - interrupts + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + bus { + #address-cells = <2>; + #size-cells = <2>; + + usb@6000000 { + compatible = "cdns,usb3"; + reg = <0x00 0x6000000 0x00 0x10000>, + <0x00 0x6010000 0x00 0x10000>, + <0x00 0x6020000 0x00 0x10000>; + reg-names = "otg", "xhci", "dev"; + interrupts = <GIC_SPI 96 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 102 IRQ_TYPE_LEVEL_HIGH>, + <GIC_SPI 120 IRQ_TYPE_LEVEL_HIGH>; + interrupt-names = "host", "peripheral", "otg"; + maximum-speed = "super-speed"; + dr_mode = "otg"; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/cdns-usb3.txt b/Documentation/devicetree/bindings/usb/cdns-usb3.txt deleted file mode 100644 index b7dc606d37b5..000000000000 --- a/Documentation/devicetree/bindings/usb/cdns-usb3.txt +++ /dev/null @@ -1,45 +0,0 @@ -Binding for the Cadence USBSS-DRD controller - -Required properties: - - reg: Physical base address and size of the controller's register areas. - Controller has 3 different regions: - - HOST registers area - - DEVICE registers area - - OTG/DRD registers area - - reg-names - register memory area names: - "xhci" - for HOST registers space - "dev" - for DEVICE registers space - "otg" - for OTG/DRD registers space - - compatible: Should contain: "cdns,usb3" - - interrupts: Interrupts used by cdns3 controller: - "host" - interrupt used by XHCI driver. - "peripheral" - interrupt used by device driver - "otg" - interrupt used by DRD/OTG part of driver - -Optional properties: - - maximum-speed : valid arguments are "super-speed", "high-speed" and - "full-speed"; refer to usb/generic.txt - - dr_mode: Should be one of "host", "peripheral" or "otg". - - phys: reference to the USB PHY - - phy-names: from the *Generic PHY* bindings; - Supported names are: - - cdns3,usb2-phy - - cdns3,usb3-phy - - - cdns,on-chip-buff-size : size of memory intended as internal memory for endpoints - buffers expressed in KB - -Example: - usb@f3000000 { - compatible = "cdns,usb3"; - interrupts = <GIC_USB_IRQ 7 IRQ_TYPE_LEVEL_HIGH>, - <GIC_USB_IRQ 7 IRQ_TYPE_LEVEL_HIGH>, - <GIC_USB_IRQ 8 IRQ_TYPE_LEVEL_HIGH>; - interrupt-names = "host", "peripheral", "otg"; - reg = <0xf3000000 0x10000>, /* memory area for HOST registers */ - <0xf3010000 0x10000>, /* memory area for DEVICE registers */ - <0xf3020000 0x10000>; /* memory area for OTG/DRD registers */ - reg-names = "xhci", "dev", "otg"; - phys = <&usb2_phy>, <&usb3_phy>; - phy-names = "cdns3,usb2-phy", "cnds3,usb3-phy"; - }; diff --git a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt index 51376cbe5f3d..a5c5db6a0b2d 100644 --- a/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt +++ b/Documentation/devicetree/bindings/usb/ci-hdrc-usb2.txt @@ -100,6 +100,15 @@ i.mx specific properties It's recommended to specify the over current polarity. - power-active-high: power signal polarity is active high - external-vbus-divider: enables off-chip resistor divider for Vbus +- samsung,picophy-pre-emp-curr-control: HS Transmitter Pre-Emphasis Current + Control. This signal controls the amount of current sourced to the + USB_OTG*_DP and USB_OTG*_DN pins after a J-to-K or K-to-J transition. + The range is from 0x0 to 0x3, the default value is 0x1. + Details can refer to TXPREEMPAMPTUNE0 bits of USBNC_n_PHY_CFG1. +- samsung,picophy-dc-vol-level-adjust: HS DC Voltage Level Adjustment. + Adjust the high-speed transmitter DC level voltage. + The range is from 0x0 to 0xf, the default value is 0x3. + Details can refer to TXVREFTUNE0 bits of USBNC_n_PHY_CFG1. Example: diff --git a/Documentation/devicetree/bindings/usb/dwc2.yaml b/Documentation/devicetree/bindings/usb/dwc2.yaml index ffa157a0fce7..e5ee51b7b470 100644 --- a/Documentation/devicetree/bindings/usb/dwc2.yaml +++ b/Documentation/devicetree/bindings/usb/dwc2.yaml @@ -39,6 +39,7 @@ properties: - amlogic,meson-g12a-usb - const: snps,dwc2 - const: amcc,dwc-otg + - const: apm,apm82181-dwc-otg - const: snps,dwc2 - const: st,stm32f4x9-fsotg - const: st,stm32f4x9-hsotg @@ -102,6 +103,10 @@ properties: dr_mode: enum: [host, peripheral, otg] + usb-role-switch: + $ref: /schemas/types.yaml#/definitions/flag + description: Support role switch. + g-rx-fifo-size: $ref: /schemas/types.yaml#/definitions/uint32 description: size of rx fifo size in gadget mode. diff --git a/Documentation/devicetree/bindings/usb/dwc3.txt b/Documentation/devicetree/bindings/usb/dwc3.txt index d03edf9d3935..1aae2b6160c1 100644 --- a/Documentation/devicetree/bindings/usb/dwc3.txt +++ b/Documentation/devicetree/bindings/usb/dwc3.txt @@ -78,6 +78,9 @@ Optional properties: park mode are disabled. - snps,dis_metastability_quirk: when set, disable metastability workaround. CAUTION: use only if you are absolutely sure of it. + - snps,dis-split-quirk: when set, change the way URBs are handled by the + driver. Needed to avoid -EPROTO errors with usbhid + on some devices (Hikey 970). - snps,is-utmi-l1-suspend: true when DWC3 asserts output signal utmi_l1_suspend_n, false when asserts utmi_sleep_n - snps,hird-threshold: HIRD threshold diff --git a/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml new file mode 100644 index 000000000000..dd32c10ce6c7 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/intel,keembay-dwc3.yaml @@ -0,0 +1,77 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/intel,keembay-dwc3.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: Intel Keem Bay DWC3 USB controller + +maintainers: + - Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com> + +properties: + compatible: + const: intel,keembay-dwc3 + + clocks: + maxItems: 4 + + clock-names: + items: + - const: async_master + - const: ref + - const: alt_ref + - const: suspend + + ranges: true + + '#address-cells': + enum: [ 1, 2 ] + + '#size-cells': + enum: [ 1, 2 ] + +# Required child node: + +patternProperties: + "^dwc3@[0-9a-f]+$": + type: object + description: + A child node must exist to represent the core DWC3 IP block. + The content of the node is defined in dwc3.txt. + +required: + - compatible + - clocks + - clock-names + - ranges + +additionalProperties: false + +examples: + - | + #include <dt-bindings/interrupt-controller/arm-gic.h> + #include <dt-bindings/interrupt-controller/irq.h> + #define KEEM_BAY_A53_AUX_USB + #define KEEM_BAY_A53_AUX_USB_REF + #define KEEM_BAY_A53_AUX_USB_ALT_REF + #define KEEM_BAY_A53_AUX_USB_SUSPEND + + usb { + compatible = "intel,keembay-dwc3"; + clocks = <&scmi_clk KEEM_BAY_A53_AUX_USB>, + <&scmi_clk KEEM_BAY_A53_AUX_USB_REF>, + <&scmi_clk KEEM_BAY_A53_AUX_USB_ALT_REF>, + <&scmi_clk KEEM_BAY_A53_AUX_USB_SUSPEND>; + clock-names = "async_master", "ref", "alt_ref", "suspend"; + ranges; + #address-cells = <1>; + #size-cells = <1>; + + dwc3@34000000 { + compatible = "snps,dwc3"; + reg = <0x34000000 0x10000>; + interrupts = <GIC_SPI 91 IRQ_TYPE_LEVEL_HIGH>; + dr_mode = "peripheral"; + }; + }; diff --git a/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml b/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml new file mode 100644 index 000000000000..1e8e1c22180e --- /dev/null +++ b/Documentation/devicetree/bindings/usb/mediatek,mt6360-tcpc.yaml @@ -0,0 +1,95 @@ +# SPDX-License-Identifier: (GPL-2.0-only OR BSD-2-Clause) +%YAML 1.2 +--- +$id: "http://devicetree.org/schemas/usb/mediatek,mt6360-tcpc.yaml#" +$schema: "http://devicetree.org/meta-schemas/core.yaml#" + +title: Mediatek MT6360 Type-C Port Switch and Power Delivery controller DT bindings + +maintainers: + - ChiYuan Huang <cy_huang@richtek.com> + +description: | + Mediatek MT6360 is a multi-functional device. It integrates charger, ADC, flash, RGB indicators, + regulators (BUCKs/LDOs), and TypeC Port Switch with Power Delivery controller. + This document only describes MT6360 Type-C Port Switch and Power Delivery controller. + +properties: + compatible: + enum: + - mediatek,mt6360-tcpc + + interrupts: + maxItems: 1 + + interrupt-names: + items: + - const: PD_IRQB + + connector: + type: object + $ref: ../connector/usb-connector.yaml# + description: + Properties for usb c connector. + +additionalProperties: false + +required: + - compatible + - interrupts + - interrupt-names + +examples: + - | + #include <dt-bindings/interrupt-controller/irq.h> + #include <dt-bindings/usb/pd.h> + i2c0 { + #address-cells = <1>; + #size-cells = <0>; + + mt6360@34 { + compatible = "mediatek,mt6360"; + reg = <0x34>; + tcpc { + compatible = "mediatek,mt6360-tcpc"; + interrupts-extended = <&gpio26 3 IRQ_TYPE_LEVEL_LOW>; + interrupt-names = "PD_IRQB"; + + connector { + compatible = "usb-c-connector"; + label = "USB-C"; + data-role = "dual"; + power-role = "dual"; + try-power-role = "sink"; + source-pdos = <PDO_FIXED(5000, 1000, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP)>; + sink-pdos = <PDO_FIXED(5000, 2000, PDO_FIXED_DUAL_ROLE | PDO_FIXED_DATA_SWAP)>; + op-sink-microwatt = <10000000>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + + port@0 { + reg = <0>; + endpoint { + remote-endpoint = <&usb_hs>; + }; + }; + port@1 { + reg = <1>; + endpoint { + remote-endpoint = <&usb_ss>; + }; + }; + port@2 { + reg = <2>; + endpoint { + remote-endpoint = <&dp_aux>; + }; + }; + }; + }; + }; + }; + }; +... diff --git a/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml b/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml index add9f7b66da0..0f078bd0a3e5 100644 --- a/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml +++ b/Documentation/devicetree/bindings/usb/renesas,usb-xhci.yaml @@ -30,6 +30,7 @@ properties: - renesas,xhci-r8a774a1 # RZ/G2M - renesas,xhci-r8a774b1 # RZ/G2N - renesas,xhci-r8a774c0 # RZ/G2E + - renesas,xhci-r8a774e1 # RZ/G2H - renesas,xhci-r8a7795 # R-Car H3 - renesas,xhci-r8a7796 # R-Car M3-W - renesas,xhci-r8a77961 # R-Car M3-W+ diff --git a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml index e3cdeab1199f..929a3f413b44 100644 --- a/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml +++ b/Documentation/devicetree/bindings/usb/renesas,usb3-peri.yaml @@ -16,6 +16,7 @@ properties: - renesas,r8a774a1-usb3-peri # RZ/G2M - renesas,r8a774b1-usb3-peri # RZ/G2N - renesas,r8a774c0-usb3-peri # RZ/G2E + - renesas,r8a774e1-usb3-peri # RZ/G2H - renesas,r8a7795-usb3-peri # R-Car H3 - renesas,r8a7796-usb3-peri # R-Car M3-W - renesas,r8a77961-usb3-peri # R-Car M3-W+ @@ -52,11 +53,24 @@ properties: $ref: /schemas/types.yaml#/definitions/phandle description: phandle of a companion. - port: + ports: description: | any connector to the data bus of this controller should be modelled using the OF graph bindings specified, if the "usb-role-switch" property is used. + type: object + properties: + port@0: + type: object + description: High Speed (HS) data bus. + + port@1: + type: object + description: Super Speed (SS) data bus. + + required: + - port@0 + - port@1 required: - compatible @@ -79,9 +93,20 @@ examples: companion = <&xhci0>; usb-role-switch; - port { - usb3_role_switch: endpoint { - remote-endpoint = <&hd3ss3220_ep>; - }; + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + usb3_hs_ep: endpoint { + remote-endpoint = <&hs_ep>; + }; + }; + port@1 { + reg = <1>; + usb3_role_switch: endpoint { + remote-endpoint = <&hd3ss3220_out_ep>; + }; + }; }; }; diff --git a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml index af4826fb6824..737c1f47b7de 100644 --- a/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml +++ b/Documentation/devicetree/bindings/usb/renesas,usbhs.yaml @@ -39,6 +39,7 @@ properties: - renesas,usbhs-r8a774a1 # RZ/G2M - renesas,usbhs-r8a774b1 # RZ/G2N - renesas,usbhs-r8a774c0 # RZ/G2E + - renesas,usbhs-r8a774e1 # RZ/G2H - renesas,usbhs-r8a7795 # R-Car H3 - renesas,usbhs-r8a7796 # R-Car M3-W - renesas,usbhs-r8a77961 # R-Car M3-W+ diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt deleted file mode 100644 index 2bd21b22ce95..000000000000 --- a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.txt +++ /dev/null @@ -1,38 +0,0 @@ -TI HD3SS3220 TypeC DRP Port Controller. - -Required properties: - - compatible: Must be "ti,hd3ss3220". - - reg: I2C slave address, must be 0x47 or 0x67 based on ADDR pin. - - interrupts: An interrupt specifier. - -Required sub-node: - - connector: The "usb-c-connector" attached to the hd3ss3220 chip. The - bindings of the connector node are specified in: - - Documentation/devicetree/bindings/connector/usb-connector.yaml - -Example: -hd3ss3220@47 { - compatible = "ti,hd3ss3220"; - reg = <0x47>; - interrupt-parent = <&gpio6>; - interrupts = <3 IRQ_TYPE_LEVEL_LOW>; - - connector { - compatible = "usb-c-connector"; - label = "USB-C"; - data-role = "dual"; - - ports { - #address-cells = <1>; - #size-cells = <0>; - - port@1 { - reg = <1>; - hd3ss3220_ep: endpoint { - remote-endpoint = <&usb3_role_switch>; - }; - }; - }; - }; -}; diff --git a/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml new file mode 100644 index 000000000000..5fe9e6211ba2 --- /dev/null +++ b/Documentation/devicetree/bindings/usb/ti,hd3ss3220.yaml @@ -0,0 +1,82 @@ +# SPDX-License-Identifier: (GPL-2.0 OR BSD-2-Clause) +%YAML 1.2 +--- +$id: http://devicetree.org/schemas/usb/ti,hd3ss3220.yaml# +$schema: http://devicetree.org/meta-schemas/core.yaml# + +title: TI HD3SS3220 TypeC DRP Port Controller + +maintainers: + - Biju Das <biju.das.jz@bp.renesas.com> + +description: |- + HD3SS3220 is a USB SuperSpeed (SS) 2:1 mux with DRP port controller. The device provides Channel + Configuration (CC) logic and 5V VCONN sourcing for ecosystems implementing USB Type-C. The + HD3SS3220 can be configured as a Downstream Facing Port (DFP), Upstream Facing Port (UFP) or a + Dual Role Port (DRP) making it ideal for any application. + +properties: + compatible: + const: ti,hd3ss3220 + + reg: + maxItems: 1 + + interrupts: + maxItems: 1 + + ports: + description: OF graph bindings (specified in bindings/graph.txt) that model + SS data bus to the SS capable connector. + type: object + properties: + port@0: + type: object + description: Super Speed (SS) MUX inputs connected to SS capable connector. + $ref: /connector/usb-connector.yaml#/properties/ports/properties/port@1 + + port@1: + type: object + description: Output of 2:1 MUX connected to Super Speed (SS) data bus. + + required: + - port@0 + - port@1 + +required: + - compatible + - reg + - interrupts + +additionalProperties: false + +examples: + - | + i2c0 { + #address-cells = <1>; + #size-cells = <0>; + + hd3ss3220@47 { + compatible = "ti,hd3ss3220"; + reg = <0x47>; + interrupt-parent = <&gpio6>; + interrupts = <3>; + + ports { + #address-cells = <1>; + #size-cells = <0>; + port@0 { + reg = <0>; + hd3ss3220_in_ep: endpoint { + remote-endpoint = <&ss_ep>; + }; + }; + port@1 { + reg = <1>; + hd3ss3220_out_ep: endpoint { + remote-endpoint = <&usb3_role_switch>; + }; + }; + }; + }; + }; diff --git a/Documentation/kbuild/llvm.rst b/Documentation/kbuild/llvm.rst index 334df758dce3..dae90c21aed3 100644 --- a/Documentation/kbuild/llvm.rst +++ b/Documentation/kbuild/llvm.rst @@ -39,10 +39,10 @@ which can help simplify cross compiling. :: ARCH=arm64 CROSS_COMPILE=aarch64-linux-gnu- make CC=clang ``CROSS_COMPILE`` is not used to prefix the Clang compiler binary, instead -``CROSS_COMPILE`` is used to set a command line flag: ``--target <triple>``. For +``CROSS_COMPILE`` is used to set a command line flag: ``--target=<triple>``. For example: :: - clang --target aarch64-linux-gnu foo.c + clang --target=aarch64-linux-gnu foo.c LLVM Utilities -------------- diff --git a/Documentation/networking/ethtool-netlink.rst b/Documentation/networking/ethtool-netlink.rst index d53bcb31645a..b5a79881551f 100644 --- a/Documentation/networking/ethtool-netlink.rst +++ b/Documentation/networking/ethtool-netlink.rst @@ -206,6 +206,7 @@ Userspace to kernel: ``ETHTOOL_MSG_TSINFO_GET`` get timestamping info ``ETHTOOL_MSG_CABLE_TEST_ACT`` action start cable test ``ETHTOOL_MSG_CABLE_TEST_TDR_ACT`` action start raw TDR cable test + ``ETHTOOL_MSG_TUNNEL_INFO_GET`` get tunnel offload info ===================================== ================================ Kernel to userspace: @@ -239,6 +240,7 @@ Kernel to userspace: ``ETHTOOL_MSG_TSINFO_GET_REPLY`` timestamping info ``ETHTOOL_MSG_CABLE_TEST_NTF`` Cable test results ``ETHTOOL_MSG_CABLE_TEST_TDR_NTF`` Cable test TDR results + ``ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY`` tunnel offload info ===================================== ================================= ``GET`` requests are sent by userspace applications to retrieve device @@ -1363,4 +1365,5 @@ are netlink only. ``ETHTOOL_SFECPARAM`` n/a n/a ''ETHTOOL_MSG_CABLE_TEST_ACT'' n/a ''ETHTOOL_MSG_CABLE_TEST_TDR_ACT'' + n/a ``ETHTOOL_MSG_TUNNEL_INFO_GET`` =================================== ===================================== diff --git a/Documentation/userspace-api/media/v4l/buffer.rst b/Documentation/userspace-api/media/v4l/buffer.rst index 57e752aaf414..2044ed13cd9d 100644 --- a/Documentation/userspace-api/media/v4l/buffer.rst +++ b/Documentation/userspace-api/media/v4l/buffer.rst @@ -701,23 +701,6 @@ Memory Consistency Flags :stub-columns: 0 :widths: 3 1 4 - * .. _`V4L2-FLAG-MEMORY-NON-CONSISTENT`: - - - ``V4L2_FLAG_MEMORY_NON_CONSISTENT`` - - 0x00000001 - - A buffer is allocated either in consistent (it will be automatically - coherent between the CPU and the bus) or non-consistent memory. The - latter can provide performance gains, for instance the CPU cache - sync/flush operations can be avoided if the buffer is accessed by the - corresponding device only and the CPU does not read/write to/from that - buffer. However, this requires extra care from the driver -- it must - guarantee memory consistency by issuing a cache flush/sync when - consistency is needed. If this flag is set V4L2 will attempt to - allocate the buffer in non-consistent memory. The flag takes effect - only if the buffer is used for :ref:`memory mapping <mmap>` I/O and the - queue reports the :ref:`V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS - <V4L2-BUF-CAP-SUPPORTS-MMAP-CACHE-HINTS>` capability. - .. c:type:: v4l2_memory enum v4l2_memory diff --git a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst index f2a702870fad..12cf6b44f414 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-create-bufs.rst @@ -120,13 +120,9 @@ than the number requested. If you want to just query the capabilities without making any other changes, then set ``count`` to 0, ``memory`` to ``V4L2_MEMORY_MMAP`` and ``format.type`` to the buffer type. - * - __u32 - - ``flags`` - - Specifies additional buffer management attributes. - See :ref:`memory-flags`. * - __u32 - - ``reserved``\ [6] + - ``reserved``\ [7] - A place holder for future extensions. Drivers and applications must set the array to zero. diff --git a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst index 75d894d9c36c..0e3e2fde65e8 100644 --- a/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst +++ b/Documentation/userspace-api/media/v4l/vidioc-reqbufs.rst @@ -112,17 +112,10 @@ aborting or finishing any DMA in progress, an implicit ``V4L2_MEMORY_MMAP`` and ``type`` set to the buffer type. This will free any previously allocated buffers, so this is typically something that will be done at the start of the application. - * - union { - - (anonymous) - * - __u32 - - ``flags`` - - Specifies additional buffer management attributes. - See :ref:`memory-flags`. * - __u32 - ``reserved``\ [1] - - Kept for backwards compatibility. Use ``flags`` instead. - * - } - - + - A place holder for future extensions. Drivers and applications + must set the array to zero. .. tabularcolumns:: |p{6.1cm}|p{2.2cm}|p{8.7cm}| @@ -169,7 +162,6 @@ aborting or finishing any DMA in progress, an implicit - This capability is set by the driver to indicate that the queue supports cache and memory management hints. However, it's only valid when the queue is used for :ref:`memory mapping <mmap>` streaming I/O. See - :ref:`V4L2_FLAG_MEMORY_NON_CONSISTENT <V4L2-FLAG-MEMORY-NON-CONSISTENT>`, :ref:`V4L2_BUF_FLAG_NO_CACHE_INVALIDATE <V4L2-BUF-FLAG-NO-CACHE-INVALIDATE>` and :ref:`V4L2_BUF_FLAG_NO_CACHE_CLEAN <V4L2-BUF-FLAG-NO-CACHE-CLEAN>`. diff --git a/Documentation/virt/kvm/api.rst b/Documentation/virt/kvm/api.rst index d2b733dc7892..51191b56e61c 100644 --- a/Documentation/virt/kvm/api.rst +++ b/Documentation/virt/kvm/api.rst @@ -6173,3 +6173,23 @@ specific interfaces must be consistent, i.e. if one says the feature is supported, than the other should as well and vice versa. For arm64 see Documentation/virt/kvm/devices/vcpu.rst "KVM_ARM_VCPU_PVTIME_CTRL". For x86 see Documentation/virt/kvm/msr.rst "MSR_KVM_STEAL_TIME". + +8.25 KVM_CAP_S390_DIAG318 +------------------------- + +:Architectures: s390 + +This capability enables a guest to set information about its control program +(i.e. guest kernel type and version). The information is helpful during +system/firmware service events, providing additional data about the guest +environments running on the machine. + +The information is associated with the DIAGNOSE 0x318 instruction, which sets +an 8-byte value consisting of a one-byte Control Program Name Code (CPNC) and +a 7-byte Control Program Version Code (CPVC). The CPNC determines what +environment the control program is running in (e.g. Linux, z/VM...), and the +CPVC is used for information specific to OS (e.g. Linux version, Linux +distribution...) + +If this capability is available, then the CPNC and CPVC can be synchronized +between KVM and userspace via the sync regs mechanism (KVM_SYNC_DIAG318). diff --git a/MAINTAINERS b/MAINTAINERS index d746519253c3..14f826faa512 100644 --- a/MAINTAINERS +++ b/MAINTAINERS @@ -3475,6 +3475,14 @@ F: drivers/bus/brcmstb_gisb.c F: drivers/pci/controller/pcie-brcmstb.c N: brcmstb +BROADCOM BDC DRIVER +M: Al Cooper <alcooperx@gmail.com> +L: linux-usb@vger.kernel.org +L: bcm-kernel-feedback-list@broadcom.com +S: Maintained +F: Documentation/devicetree/bindings/usb/brcm,bdc.txt +F: drivers/usb/gadget/udc/bdc/ + BROADCOM BMIPS CPUFREQ DRIVER M: Markus Mayer <mmayer@broadcom.com> M: bcm-kernel-feedback-list@broadcom.com @@ -3848,6 +3856,16 @@ S: Orphan F: Documentation/devicetree/bindings/mtd/cadence-nand-controller.txt F: drivers/mtd/nand/raw/cadence-nand-controller.c +CADENCE USB3 DRD IP DRIVER +M: Peter Chen <peter.chen@nxp.com> +M: Pawel Laszczak <pawell@cadence.com> +M: Roger Quadros <rogerq@ti.com> +L: linux-usb@vger.kernel.org +S: Maintained +T: git git://git.kernel.org/pub/scm/linux/kernel/git/peter.chen/usb.git +F: Documentation/devicetree/bindings/usb/cdns-usb3.txt +F: drivers/usb/cdns3/ + CADET FM/AM RADIO RECEIVER DRIVER M: Hans Verkuil <hverkuil@xs4all.nl> L: linux-media@vger.kernel.org @@ -4408,12 +4426,6 @@ T: git git://git.infradead.org/users/hch/configfs.git F: fs/configfs/ F: include/linux/configfs.h -CONNECTOR -M: Evgeniy Polyakov <zbr@ioremap.net> -L: netdev@vger.kernel.org -S: Maintained -F: drivers/connector/ - CONSOLE SUBSYSTEM M: Greg Kroah-Hartman <gregkh@linuxfoundation.org> S: Supported @@ -8329,8 +8341,9 @@ S: Supported F: drivers/pci/hotplug/rpaphp* IBM Power SRIOV Virtual NIC Device Driver -M: Thomas Falcon <tlfalcon@linux.ibm.com> -M: John Allen <jallen@linux.ibm.com> +M: Dany Madden <drt@linux.ibm.com> +M: Lijun Pan <ljp@linux.ibm.com> +M: Sukadev Bhattiprolu <sukadev@linux.ibm.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmvnic.* @@ -8344,7 +8357,7 @@ F: arch/powerpc/platforms/powernv/copy-paste.h F: arch/powerpc/platforms/powernv/vas* IBM Power Virtual Ethernet Device Driver -M: Thomas Falcon <tlfalcon@linux.ibm.com> +M: Cristobal Forno <cforno12@linux.ibm.com> L: netdev@vger.kernel.org S: Supported F: drivers/net/ethernet/ibm/ibmveth.* @@ -11042,6 +11055,7 @@ F: drivers/char/hw_random/mtk-rng.c MEDIATEK SWITCH DRIVER M: Sean Wang <sean.wang@mediatek.com> +M: Landen Chao <Landen.Chao@mediatek.com> L: netdev@vger.kernel.org S: Maintained F: drivers/net/dsa/mt7530.* @@ -11049,7 +11063,7 @@ F: net/dsa/tag_mtk.c MEDIATEK USB3 DRD IP DRIVER M: Chunfeng Yun <chunfeng.yun@mediatek.com> -L: linux-usb@vger.kernel.org (moderated for non-subscribers) +L: linux-usb@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) L: linux-mediatek@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -12055,6 +12069,7 @@ Q: http://patchwork.ozlabs.org/project/netdev/list/ T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net.git T: git git://git.kernel.org/pub/scm/linux/kernel/git/netdev/net-next.git F: Documentation/devicetree/bindings/net/ +F: drivers/connector/ F: drivers/net/ F: include/linux/etherdevice.h F: include/linux/fcdevice.h @@ -13185,6 +13200,7 @@ F: drivers/firmware/pcdp.* PCI DRIVER FOR AARDVARK (Marvell Armada 3700) M: Thomas Petazzoni <thomas.petazzoni@bootlin.com> +M: Pali Rohár <pali@kernel.org> L: linux-pci@vger.kernel.org L: linux-arm-kernel@lists.infradead.org (moderated for non-subscribers) S: Maintained @@ -16157,7 +16173,7 @@ M: Leon Luo <leonl@leopardimaging.com> L: linux-media@vger.kernel.org S: Maintained T: git git://linuxtv.org/media_tree.git -F: Documentation/devicetree/bindings/media/i2c/imx274.txt +F: Documentation/devicetree/bindings/media/i2c/sony,imx274.yaml F: drivers/media/i2c/imx274.c SONY IMX290 SENSOR DRIVER @@ -2,7 +2,7 @@ VERSION = 5 PATCHLEVEL = 9 SUBLEVEL = 0 -EXTRAVERSION = -rc6 +EXTRAVERSION = -rc8 NAME = Kleptomaniac Octopus # *DOCUMENTATION* diff --git a/arch/arm/boot/dts/at91-sam9x60ek.dts b/arch/arm/boot/dts/at91-sam9x60ek.dts index ca15ff8fea18..eae28b82c7fd 100644 --- a/arch/arm/boot/dts/at91-sam9x60ek.dts +++ b/arch/arm/boot/dts/at91-sam9x60ek.dts @@ -563,6 +563,12 @@ atmel,pins = <AT91_PIOD 18 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; }; }; + + usb0 { + pinctrl_usba_vbus: usba_vbus { + atmel,pins = <AT91_PIOB 16 AT91_PERIPH_GPIO AT91_PINCTRL_NONE>; + }; + }; }; /* pinctrl */ &pmc { @@ -666,6 +672,13 @@ }; }; +&usb0 { + atmel,vbus-gpio = <&pioB 16 GPIO_ACTIVE_HIGH>; + pinctrl-names = "default"; + pinctrl-0 = <&pinctrl_usba_vbus>; + status = "okay"; +}; + &usb1 { num-ports = <3>; atmel,vbus-gpio = <0 diff --git a/arch/arm/boot/dts/at91-sama5d2_icp.dts b/arch/arm/boot/dts/at91-sama5d2_icp.dts index 8d19925fc09e..6783cf16ff81 100644 --- a/arch/arm/boot/dts/at91-sama5d2_icp.dts +++ b/arch/arm/boot/dts/at91-sama5d2_icp.dts @@ -116,7 +116,6 @@ switch0: ksz8563@0 { compatible = "microchip,ksz8563"; reg = <0>; - phy-mode = "mii"; reset-gpios = <&pioA PIN_PD4 GPIO_ACTIVE_LOW>; spi-max-frequency = <500000>; @@ -140,6 +139,7 @@ reg = <2>; label = "cpu"; ethernet = <&macb0>; + phy-mode = "mii"; fixed-link { speed = <100>; full-duplex; diff --git a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts index 222d7825e1ab..e94244a215af 100644 --- a/arch/arm/boot/dts/bcm2711-rpi-4-b.dts +++ b/arch/arm/boot/dts/bcm2711-rpi-4-b.dts @@ -4,6 +4,8 @@ #include "bcm2835-rpi.dtsi" #include "bcm283x-rpi-usb-peripheral.dtsi" +#include <dt-bindings/reset/raspberrypi,firmware-reset.h> + / { compatible = "raspberrypi,4-model-b", "brcm,bcm2711"; model = "Raspberry Pi 4 Model B"; @@ -88,6 +90,11 @@ ""; status = "okay"; }; + + reset: reset { + compatible = "raspberrypi,firmware-reset"; + #reset-cells = <1>; + }; }; &gpio { @@ -207,6 +214,21 @@ }; }; +&pcie0 { + pci@1,0 { + #address-cells = <3>; + #size-cells = <2>; + ranges; + + reg = <0 0 0 0 0>; + + usb@1,0 { + reg = <0x10000 0 0 0 0>; + resets = <&reset RASPBERRYPI_FIRMWARE_RESET_ID_USB>; + }; + }; +}; + /* uart0 communicates with the BT module */ &uart0 { pinctrl-names = "default"; diff --git a/arch/arm/boot/dts/bcm2835-rpi.dtsi b/arch/arm/boot/dts/bcm2835-rpi.dtsi index f7ae5a4530b8..d94357b21f7e 100644 --- a/arch/arm/boot/dts/bcm2835-rpi.dtsi +++ b/arch/arm/boot/dts/bcm2835-rpi.dtsi @@ -13,7 +13,7 @@ soc { firmware: firmware { - compatible = "raspberrypi,bcm2835-firmware", "simple-bus"; + compatible = "raspberrypi,bcm2835-firmware", "simple-mfd"; #address-cells = <1>; #size-cells = <1>; diff --git a/arch/arm/boot/dts/sam9x60.dtsi b/arch/arm/boot/dts/sam9x60.dtsi index d10843da4a85..42f76212d472 100644 --- a/arch/arm/boot/dts/sam9x60.dtsi +++ b/arch/arm/boot/dts/sam9x60.dtsi @@ -69,6 +69,20 @@ #size-cells = <1>; ranges; + usb0: gadget@500000 { + #address-cells = <1>; + #size-cells = <0>; + compatible = "microchip,sam9x60-udc"; + reg = <0x00500000 0x100000 + 0xf803c000 0x400>; + interrupts = <23 IRQ_TYPE_LEVEL_HIGH 2>; + clocks = <&pmc PMC_TYPE_PERIPHERAL 23>, <&pmc PMC_TYPE_CORE PMC_UTMI>; + clock-names = "pclk", "hclk"; + assigned-clocks = <&pmc PMC_TYPE_CORE PMC_UTMI>; + assigned-clock-rates = <480000000>; + status = "disabled"; + }; + usb1: ohci@600000 { compatible = "atmel,at91rm9200-ohci", "usb-ohci"; reg = <0x00600000 0x100000>; diff --git a/arch/arm/mach-imx/cpuidle-imx6q.c b/arch/arm/mach-imx/cpuidle-imx6q.c index 24dd5bbe60e4..094337dc1bc7 100644 --- a/arch/arm/mach-imx/cpuidle-imx6q.c +++ b/arch/arm/mach-imx/cpuidle-imx6q.c @@ -24,7 +24,9 @@ static int imx6q_enter_wait(struct cpuidle_device *dev, imx6_set_lpm(WAIT_UNCLOCKED); raw_spin_unlock(&cpuidle_lock); + rcu_idle_enter(); cpu_do_idle(); + rcu_idle_exit(); raw_spin_lock(&cpuidle_lock); if (num_idle_cpus-- == num_online_cpus()) @@ -44,7 +46,7 @@ static struct cpuidle_driver imx6q_cpuidle_driver = { { .exit_latency = 50, .target_residency = 75, - .flags = CPUIDLE_FLAG_TIMER_STOP, + .flags = CPUIDLE_FLAG_TIMER_STOP | CPUIDLE_FLAG_RCU_IDLE, .enter = imx6q_enter_wait, .name = "WAIT", .desc = "Clock off", diff --git a/arch/arm64/include/asm/kvm_emulate.h b/arch/arm64/include/asm/kvm_emulate.h index 49a55be2b9a2..1cc5f5f72d0b 100644 --- a/arch/arm64/include/asm/kvm_emulate.h +++ b/arch/arm64/include/asm/kvm_emulate.h @@ -298,15 +298,15 @@ static __always_inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu) return (kvm_vcpu_get_esr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT; } -static __always_inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu) +static __always_inline bool kvm_vcpu_abt_iss1tw(const struct kvm_vcpu *vcpu) { return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_S1PTW); } +/* Always check for S1PTW *before* using this. */ static __always_inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu) { - return !!(kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR) || - kvm_vcpu_dabt_iss1tw(vcpu); /* AF/DBM update */ + return kvm_vcpu_get_esr(vcpu) & ESR_ELx_WNR; } static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu) @@ -335,6 +335,11 @@ static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu) return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW; } +static inline bool kvm_vcpu_trap_is_exec_fault(const struct kvm_vcpu *vcpu) +{ + return kvm_vcpu_trap_is_iabt(vcpu) && !kvm_vcpu_abt_iss1tw(vcpu); +} + static __always_inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu) { return kvm_vcpu_get_esr(vcpu) & ESR_ELx_FSC; @@ -372,6 +377,9 @@ static __always_inline int kvm_vcpu_sys_get_rt(struct kvm_vcpu *vcpu) static inline bool kvm_is_write_fault(struct kvm_vcpu *vcpu) { + if (kvm_vcpu_abt_iss1tw(vcpu)) + return true; + if (kvm_vcpu_trap_is_iabt(vcpu)) return false; diff --git a/arch/arm64/kernel/acpi.c b/arch/arm64/kernel/acpi.c index a85174d05473..cada0b816c8a 100644 --- a/arch/arm64/kernel/acpi.c +++ b/arch/arm64/kernel/acpi.c @@ -298,8 +298,21 @@ void __iomem *acpi_os_ioremap(acpi_physical_address phys, acpi_size size) case EFI_BOOT_SERVICES_DATA: case EFI_CONVENTIONAL_MEMORY: case EFI_PERSISTENT_MEMORY: - pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys); - return NULL; + if (memblock_is_map_memory(phys) || + !memblock_is_region_memory(phys, size)) { + pr_warn(FW_BUG "requested region covers kernel memory @ %pa\n", &phys); + return NULL; + } + /* + * Mapping kernel memory is permitted if the region in + * question is covered by a single memblock with the + * NOMAP attribute set: this enables the use of ACPI + * table overrides passed via initramfs, which are + * reserved in memory using arch_reserve_mem_area() + * below. As this particular use case only requires + * read access, fall through to the R/O mapping case. + */ + fallthrough; case EFI_RUNTIME_SERVICES_CODE: /* @@ -388,3 +401,8 @@ int apei_claim_sea(struct pt_regs *regs) return err; } + +void arch_reserve_mem_area(acpi_physical_address addr, size_t size) +{ + memblock_mark_nomap(addr, size); +} diff --git a/arch/arm64/kvm/hyp/include/hyp/switch.h b/arch/arm64/kvm/hyp/include/hyp/switch.h index 5b6b8fa00f0a..0261308bf944 100644 --- a/arch/arm64/kvm/hyp/include/hyp/switch.h +++ b/arch/arm64/kvm/hyp/include/hyp/switch.h @@ -449,7 +449,7 @@ static inline bool fixup_guest_exit(struct kvm_vcpu *vcpu, u64 *exit_code) kvm_vcpu_trap_get_fault_type(vcpu) == FSC_FAULT && kvm_vcpu_dabt_isvalid(vcpu) && !kvm_vcpu_abt_issea(vcpu) && - !kvm_vcpu_dabt_iss1tw(vcpu); + !kvm_vcpu_abt_iss1tw(vcpu); if (valid) { int ret = __vgic_v2_perform_cpuif_access(vcpu); diff --git a/arch/arm64/kvm/hyp/nvhe/tlb.c b/arch/arm64/kvm/hyp/nvhe/tlb.c index 69eae608d670..b15d65a42042 100644 --- a/arch/arm64/kvm/hyp/nvhe/tlb.c +++ b/arch/arm64/kvm/hyp/nvhe/tlb.c @@ -31,7 +31,14 @@ static void __tlb_switch_to_guest(struct kvm_s2_mmu *mmu, isb(); } + /* + * __load_guest_stage2() includes an ISB only when the AT + * workaround is applied. Take care of the opposite condition, + * ensuring that we always have an ISB, but not two ISBs back + * to back. + */ __load_guest_stage2(mmu); + asm(ALTERNATIVE("isb", "nop", ARM64_WORKAROUND_SPECULATIVE_AT)); } static void __tlb_switch_to_host(struct tlb_inv_context *cxt) diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c index 9a636b8064f1..3d26b47a1343 100644 --- a/arch/arm64/kvm/mmu.c +++ b/arch/arm64/kvm/mmu.c @@ -1849,7 +1849,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa, struct kvm_s2_mmu *mmu = vcpu->arch.hw_mmu; write_fault = kvm_is_write_fault(vcpu); - exec_fault = kvm_vcpu_trap_is_iabt(vcpu); + exec_fault = kvm_vcpu_trap_is_exec_fault(vcpu); VM_BUG_ON(write_fault && exec_fault); if (fault_status == FSC_PERM && !write_fault && !exec_fault) { @@ -2131,7 +2131,7 @@ int kvm_handle_guest_abort(struct kvm_vcpu *vcpu) goto out; } - if (kvm_vcpu_dabt_iss1tw(vcpu)) { + if (kvm_vcpu_abt_iss1tw(vcpu)) { kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu)); ret = 1; goto out_unlock; diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c index 0b3fb4c7af29..8e7b8c6c576e 100644 --- a/arch/ia64/mm/init.c +++ b/arch/ia64/mm/init.c @@ -538,7 +538,7 @@ virtual_memmap_init(u64 start, u64 end, void *arg) if (map_start < map_end) memmap_init_zone((unsigned long)(map_end - map_start), args->nid, args->zone, page_to_pfn(map_start), - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); return 0; } @@ -547,8 +547,8 @@ memmap_init (unsigned long size, int nid, unsigned long zone, unsigned long start_pfn) { if (!vmem_map) { - memmap_init_zone(size, nid, zone, start_pfn, MEMMAP_EARLY, - NULL); + memmap_init_zone(size, nid, zone, start_pfn, + MEMINIT_EARLY, NULL); } else { struct page *start; struct memmap_init_callback_data args; diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c index 82627c264964..01427bde2397 100644 --- a/arch/mips/bcm47xx/setup.c +++ b/arch/mips/bcm47xx/setup.c @@ -148,7 +148,7 @@ void __init plat_mem_setup(void) { struct cpuinfo_mips *c = ¤t_cpu_data; - if ((c->cputype == CPU_74K) || (c->cputype == CPU_1074K)) { + if (c->cputype == CPU_74K) { pr_info("Using bcma bus\n"); #ifdef CONFIG_BCM47XX_BCMA bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA; diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h index 75a7a382da09..3288cef4b168 100644 --- a/arch/mips/include/asm/cpu-type.h +++ b/arch/mips/include/asm/cpu-type.h @@ -47,6 +47,7 @@ static inline int __pure __get_cpu_type(const int cpu_type) case CPU_34K: case CPU_1004K: case CPU_74K: + case CPU_1074K: case CPU_M14KC: case CPU_M14KEC: case CPU_INTERAPTIV: diff --git a/arch/mips/loongson2ef/Platform b/arch/mips/loongson2ef/Platform index 4ab55f1123a0..ae023b9a1c51 100644 --- a/arch/mips/loongson2ef/Platform +++ b/arch/mips/loongson2ef/Platform @@ -44,6 +44,10 @@ ifdef CONFIG_CPU_LOONGSON2F_WORKAROUNDS endif endif +# Some -march= flags enable MMI instructions, and GCC complains about that +# support being enabled alongside -msoft-float. Thus explicitly disable MMI. +cflags-y += $(call cc-option,-mno-loongson-mmi) + # # Loongson Machines' Support # diff --git a/arch/mips/loongson64/cop2-ex.c b/arch/mips/loongson64/cop2-ex.c index f130f62129b8..00055d4b6042 100644 --- a/arch/mips/loongson64/cop2-ex.c +++ b/arch/mips/loongson64/cop2-ex.c @@ -95,10 +95,8 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, if (res) goto fault; - set_fpr64(current->thread.fpu.fpr, - insn.loongson3_lswc2_format.rt, value); - set_fpr64(current->thread.fpu.fpr, - insn.loongson3_lswc2_format.rq, value_next); + set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0, value); + set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0, value_next); compute_return_epc(regs); own_fpu(1); } @@ -130,15 +128,13 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, goto sigbus; lose_fpu(1); - value_next = get_fpr64(current->thread.fpu.fpr, - insn.loongson3_lswc2_format.rq); + value_next = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rq], 0); StoreDW(addr + 8, value_next, res); if (res) goto fault; - value = get_fpr64(current->thread.fpu.fpr, - insn.loongson3_lswc2_format.rt); + value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lswc2_format.rt], 0); StoreDW(addr, value, res); if (res) @@ -204,8 +200,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, if (res) goto fault; - set_fpr64(current->thread.fpu.fpr, - insn.loongson3_lsdc2_format.rt, value); + set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value); compute_return_epc(regs); own_fpu(1); @@ -221,8 +216,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, if (res) goto fault; - set_fpr64(current->thread.fpu.fpr, - insn.loongson3_lsdc2_format.rt, value); + set_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0, value); compute_return_epc(regs); own_fpu(1); break; @@ -286,8 +280,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, goto sigbus; lose_fpu(1); - value = get_fpr64(current->thread.fpu.fpr, - insn.loongson3_lsdc2_format.rt); + value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0); StoreW(addr, value, res); if (res) @@ -305,8 +298,7 @@ static int loongson_cu2_call(struct notifier_block *nfb, unsigned long action, goto sigbus; lose_fpu(1); - value = get_fpr64(current->thread.fpu.fpr, - insn.loongson3_lsdc2_format.rt); + value = get_fpr64(¤t->thread.fpu.fpr[insn.loongson3_lsdc2_format.rt], 0); StoreDW(addr, value, res); if (res) diff --git a/arch/riscv/include/asm/stackprotector.h b/arch/riscv/include/asm/stackprotector.h index d95f7b2a7f37..5962f8891f06 100644 --- a/arch/riscv/include/asm/stackprotector.h +++ b/arch/riscv/include/asm/stackprotector.h @@ -5,7 +5,6 @@ #include <linux/random.h> #include <linux/version.h> -#include <asm/timex.h> extern unsigned long __stack_chk_guard; @@ -18,12 +17,9 @@ extern unsigned long __stack_chk_guard; static __always_inline void boot_init_stack_canary(void) { unsigned long canary; - unsigned long tsc; /* Try to get a semi random initial value. */ get_random_bytes(&canary, sizeof(canary)); - tsc = get_cycles(); - canary += tsc + (tsc << BITS_PER_LONG/2); canary ^= LINUX_VERSION_CODE; canary &= CANARY_MASK; diff --git a/arch/riscv/include/asm/timex.h b/arch/riscv/include/asm/timex.h index 7f659dda0032..ab104905d4db 100644 --- a/arch/riscv/include/asm/timex.h +++ b/arch/riscv/include/asm/timex.h @@ -33,6 +33,19 @@ static inline u32 get_cycles_hi(void) #define get_cycles_hi get_cycles_hi #endif /* CONFIG_64BIT */ +/* + * Much like MIPS, we may not have a viable counter to use at an early point + * in the boot process. Unfortunately we don't have a fallback, so instead + * we just return 0. + */ +static inline unsigned long random_get_entropy(void) +{ + if (unlikely(clint_time_val == NULL)) + return 0; + return get_cycles(); +} +#define random_get_entropy() random_get_entropy() + #else /* CONFIG_RISCV_M_MODE */ static inline cycles_t get_cycles(void) diff --git a/arch/s390/include/asm/pgtable.h b/arch/s390/include/asm/pgtable.h index 7eb01a5459cd..b55561cc8786 100644 --- a/arch/s390/include/asm/pgtable.h +++ b/arch/s390/include/asm/pgtable.h @@ -1260,26 +1260,44 @@ static inline pgd_t *pgd_offset_raw(pgd_t *pgd, unsigned long address) #define pgd_offset(mm, address) pgd_offset_raw(READ_ONCE((mm)->pgd), address) -static inline p4d_t *p4d_offset(pgd_t *pgd, unsigned long address) +static inline p4d_t *p4d_offset_lockless(pgd_t *pgdp, pgd_t pgd, unsigned long address) { - if ((pgd_val(*pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) - return (p4d_t *) pgd_deref(*pgd) + p4d_index(address); - return (p4d_t *) pgd; + if ((pgd_val(pgd) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R1) + return (p4d_t *) pgd_deref(pgd) + p4d_index(address); + return (p4d_t *) pgdp; } +#define p4d_offset_lockless p4d_offset_lockless -static inline pud_t *pud_offset(p4d_t *p4d, unsigned long address) +static inline p4d_t *p4d_offset(pgd_t *pgdp, unsigned long address) { - if ((p4d_val(*p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) - return (pud_t *) p4d_deref(*p4d) + pud_index(address); - return (pud_t *) p4d; + return p4d_offset_lockless(pgdp, *pgdp, address); +} + +static inline pud_t *pud_offset_lockless(p4d_t *p4dp, p4d_t p4d, unsigned long address) +{ + if ((p4d_val(p4d) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R2) + return (pud_t *) p4d_deref(p4d) + pud_index(address); + return (pud_t *) p4dp; +} +#define pud_offset_lockless pud_offset_lockless + +static inline pud_t *pud_offset(p4d_t *p4dp, unsigned long address) +{ + return pud_offset_lockless(p4dp, *p4dp, address); } #define pud_offset pud_offset -static inline pmd_t *pmd_offset(pud_t *pud, unsigned long address) +static inline pmd_t *pmd_offset_lockless(pud_t *pudp, pud_t pud, unsigned long address) +{ + if ((pud_val(pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) + return (pmd_t *) pud_deref(pud) + pmd_index(address); + return (pmd_t *) pudp; +} +#define pmd_offset_lockless pmd_offset_lockless + +static inline pmd_t *pmd_offset(pud_t *pudp, unsigned long address) { - if ((pud_val(*pud) & _REGION_ENTRY_TYPE_MASK) >= _REGION_ENTRY_TYPE_R3) - return (pmd_t *) pud_deref(*pud) + pmd_index(address); - return (pmd_t *) pud; + return pmd_offset_lockless(pudp, *pudp, address); } #define pmd_offset pmd_offset diff --git a/arch/x86/entry/common.c b/arch/x86/entry/common.c index 2f84c7ca74ea..870efeec8bda 100644 --- a/arch/x86/entry/common.c +++ b/arch/x86/entry/common.c @@ -299,7 +299,7 @@ __visible noinstr void xen_pv_evtchn_do_upcall(struct pt_regs *regs) old_regs = set_irq_regs(regs); instrumentation_begin(); - run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, NULL, regs); + run_on_irqstack_cond(__xen_pv_evtchn_do_upcall, regs); instrumentation_begin(); set_irq_regs(old_regs); diff --git a/arch/x86/entry/entry_64.S b/arch/x86/entry/entry_64.S index 70dea9337816..d977079a7d02 100644 --- a/arch/x86/entry/entry_64.S +++ b/arch/x86/entry/entry_64.S @@ -682,6 +682,8 @@ SYM_CODE_END(.Lbad_gs) * rdx: Function argument (can be NULL if none) */ SYM_FUNC_START(asm_call_on_stack) +SYM_INNER_LABEL(asm_call_sysvec_on_stack, SYM_L_GLOBAL) +SYM_INNER_LABEL(asm_call_irq_on_stack, SYM_L_GLOBAL) /* * Save the frame pointer unconditionally. This allows the ORC * unwinder to handle the stack switch. diff --git a/arch/x86/include/asm/idtentry.h b/arch/x86/include/asm/idtentry.h index a43366191212..a0638640f1ed 100644 --- a/arch/x86/include/asm/idtentry.h +++ b/arch/x86/include/asm/idtentry.h @@ -242,7 +242,7 @@ __visible noinstr void func(struct pt_regs *regs) \ instrumentation_begin(); \ irq_enter_rcu(); \ kvm_set_cpu_l1tf_flush_l1d(); \ - run_on_irqstack_cond(__##func, regs, regs); \ + run_sysvec_on_irqstack_cond(__##func, regs); \ irq_exit_rcu(); \ instrumentation_end(); \ irqentry_exit(regs, state); \ diff --git a/arch/x86/include/asm/irq_stack.h b/arch/x86/include/asm/irq_stack.h index 4ae66f097101..775816965c6a 100644 --- a/arch/x86/include/asm/irq_stack.h +++ b/arch/x86/include/asm/irq_stack.h @@ -12,20 +12,50 @@ static __always_inline bool irqstack_active(void) return __this_cpu_read(irq_count) != -1; } -void asm_call_on_stack(void *sp, void *func, void *arg); +void asm_call_on_stack(void *sp, void (*func)(void), void *arg); +void asm_call_sysvec_on_stack(void *sp, void (*func)(struct pt_regs *regs), + struct pt_regs *regs); +void asm_call_irq_on_stack(void *sp, void (*func)(struct irq_desc *desc), + struct irq_desc *desc); -static __always_inline void __run_on_irqstack(void *func, void *arg) +static __always_inline void __run_on_irqstack(void (*func)(void)) { void *tos = __this_cpu_read(hardirq_stack_ptr); __this_cpu_add(irq_count, 1); - asm_call_on_stack(tos - 8, func, arg); + asm_call_on_stack(tos - 8, func, NULL); + __this_cpu_sub(irq_count, 1); +} + +static __always_inline void +__run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), + struct pt_regs *regs) +{ + void *tos = __this_cpu_read(hardirq_stack_ptr); + + __this_cpu_add(irq_count, 1); + asm_call_sysvec_on_stack(tos - 8, func, regs); + __this_cpu_sub(irq_count, 1); +} + +static __always_inline void +__run_irq_on_irqstack(void (*func)(struct irq_desc *desc), + struct irq_desc *desc) +{ + void *tos = __this_cpu_read(hardirq_stack_ptr); + + __this_cpu_add(irq_count, 1); + asm_call_irq_on_stack(tos - 8, func, desc); __this_cpu_sub(irq_count, 1); } #else /* CONFIG_X86_64 */ static inline bool irqstack_active(void) { return false; } -static inline void __run_on_irqstack(void *func, void *arg) { } +static inline void __run_on_irqstack(void (*func)(void)) { } +static inline void __run_sysvec_on_irqstack(void (*func)(struct pt_regs *regs), + struct pt_regs *regs) { } +static inline void __run_irq_on_irqstack(void (*func)(struct irq_desc *desc), + struct irq_desc *desc) { } #endif /* !CONFIG_X86_64 */ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) @@ -37,17 +67,40 @@ static __always_inline bool irq_needs_irq_stack(struct pt_regs *regs) return !user_mode(regs) && !irqstack_active(); } -static __always_inline void run_on_irqstack_cond(void *func, void *arg, + +static __always_inline void run_on_irqstack_cond(void (*func)(void), struct pt_regs *regs) { - void (*__func)(void *arg) = func; + lockdep_assert_irqs_disabled(); + + if (irq_needs_irq_stack(regs)) + __run_on_irqstack(func); + else + func(); +} + +static __always_inline void +run_sysvec_on_irqstack_cond(void (*func)(struct pt_regs *regs), + struct pt_regs *regs) +{ + lockdep_assert_irqs_disabled(); + if (irq_needs_irq_stack(regs)) + __run_sysvec_on_irqstack(func, regs); + else + func(regs); +} + +static __always_inline void +run_irq_on_irqstack_cond(void (*func)(struct irq_desc *desc), struct irq_desc *desc, + struct pt_regs *regs) +{ lockdep_assert_irqs_disabled(); if (irq_needs_irq_stack(regs)) - __run_on_irqstack(__func, arg); + __run_irq_on_irqstack(func, desc); else - __func(arg); + func(desc); } #endif diff --git a/arch/x86/kernel/apic/io_apic.c b/arch/x86/kernel/apic/io_apic.c index 779a89e31c4c..21f9c7f11779 100644 --- a/arch/x86/kernel/apic/io_apic.c +++ b/arch/x86/kernel/apic/io_apic.c @@ -2243,6 +2243,7 @@ static inline void __init check_timer(void) legacy_pic->init(0); legacy_pic->make_irq(0); apic_write(APIC_LVT0, APIC_DM_EXTINT); + legacy_pic->unmask(0); unlock_ExtINT_logic(); diff --git a/arch/x86/kernel/irq.c b/arch/x86/kernel/irq.c index 181060247e3c..c5dd50369e2f 100644 --- a/arch/x86/kernel/irq.c +++ b/arch/x86/kernel/irq.c @@ -227,7 +227,7 @@ static __always_inline void handle_irq(struct irq_desc *desc, struct pt_regs *regs) { if (IS_ENABLED(CONFIG_X86_64)) - run_on_irqstack_cond(desc->handle_irq, desc, regs); + run_irq_on_irqstack_cond(desc->handle_irq, desc, regs); else __handle_irq(desc, regs); } diff --git a/arch/x86/kernel/irq_64.c b/arch/x86/kernel/irq_64.c index 1b4fe93a86c5..440eed558558 100644 --- a/arch/x86/kernel/irq_64.c +++ b/arch/x86/kernel/irq_64.c @@ -74,5 +74,5 @@ int irq_init_percpu_irqstack(unsigned int cpu) void do_softirq_own_stack(void) { - run_on_irqstack_cond(__do_softirq, NULL, NULL); + run_on_irqstack_cond(__do_softirq, NULL); } diff --git a/arch/x86/kernel/kvm.c b/arch/x86/kernel/kvm.c index 1b51b727b140..9663ba31347c 100644 --- a/arch/x86/kernel/kvm.c +++ b/arch/x86/kernel/kvm.c @@ -652,6 +652,7 @@ static void __init kvm_guest_init(void) } if (pv_tlb_flush_supported()) { + pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; pv_ops.mmu.tlb_remove_table = tlb_remove_table; pr_info("KVM setup pv remote TLB flush\n"); } @@ -764,14 +765,6 @@ static __init int activate_jump_labels(void) } arch_initcall(activate_jump_labels); -static void kvm_free_pv_cpu_mask(void) -{ - unsigned int cpu; - - for_each_possible_cpu(cpu) - free_cpumask_var(per_cpu(__pv_cpu_mask, cpu)); -} - static __init int kvm_alloc_cpumask(void) { int cpu; @@ -790,20 +783,11 @@ static __init int kvm_alloc_cpumask(void) if (alloc) for_each_possible_cpu(cpu) { - if (!zalloc_cpumask_var_node( - per_cpu_ptr(&__pv_cpu_mask, cpu), - GFP_KERNEL, cpu_to_node(cpu))) { - goto zalloc_cpumask_fail; - } + zalloc_cpumask_var_node(per_cpu_ptr(&__pv_cpu_mask, cpu), + GFP_KERNEL, cpu_to_node(cpu)); } - apic->send_IPI_mask_allbutself = kvm_send_ipi_mask_allbutself; - pv_ops.mmu.flush_tlb_others = kvm_flush_tlb_others; return 0; - -zalloc_cpumask_fail: - kvm_free_pv_cpu_mask(); - return -ENOMEM; } arch_initcall(kvm_alloc_cpumask); diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c index c44f3e9140d5..91ea74ae71b8 100644 --- a/arch/x86/kvm/svm/svm.c +++ b/arch/x86/kvm/svm/svm.c @@ -2183,6 +2183,12 @@ static int iret_interception(struct vcpu_svm *svm) return 1; } +static int invd_interception(struct vcpu_svm *svm) +{ + /* Treat an INVD instruction as a NOP and just skip it. */ + return kvm_skip_emulated_instruction(&svm->vcpu); +} + static int invlpg_interception(struct vcpu_svm *svm) { if (!static_cpu_has(X86_FEATURE_DECODEASSISTS)) @@ -2774,7 +2780,7 @@ static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = { [SVM_EXIT_RDPMC] = rdpmc_interception, [SVM_EXIT_CPUID] = cpuid_interception, [SVM_EXIT_IRET] = iret_interception, - [SVM_EXIT_INVD] = emulate_on_interception, + [SVM_EXIT_INVD] = invd_interception, [SVM_EXIT_PAUSE] = pause_interception, [SVM_EXIT_HLT] = halt_interception, [SVM_EXIT_INVLPG] = invlpg_interception, diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c index 8646a797b7a8..96979c09ebd1 100644 --- a/arch/x86/kvm/vmx/vmx.c +++ b/arch/x86/kvm/vmx/vmx.c @@ -129,6 +129,9 @@ static bool __read_mostly enable_preemption_timer = 1; module_param_named(preemption_timer, enable_preemption_timer, bool, S_IRUGO); #endif +extern bool __read_mostly allow_smaller_maxphyaddr; +module_param(allow_smaller_maxphyaddr, bool, S_IRUGO); + #define KVM_VM_CR0_ALWAYS_OFF (X86_CR0_NW | X86_CR0_CD) #define KVM_VM_CR0_ALWAYS_ON_UNRESTRICTED_GUEST X86_CR0_NE #define KVM_VM_CR0_ALWAYS_ON \ @@ -791,6 +794,18 @@ void update_exception_bitmap(struct kvm_vcpu *vcpu) */ if (is_guest_mode(vcpu)) eb |= get_vmcs12(vcpu)->exception_bitmap; + else { + /* + * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched + * between guest and host. In that case we only care about present + * faults. For vmcs02, however, PFEC_MASK and PFEC_MATCH are set in + * prepare_vmcs02_rare. + */ + bool selective_pf_trap = enable_ept && (eb & (1u << PF_VECTOR)); + int mask = selective_pf_trap ? PFERR_PRESENT_MASK : 0; + vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, mask); + vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, mask); + } vmcs_write32(EXCEPTION_BITMAP, eb); } @@ -4352,16 +4367,6 @@ static void init_vmcs(struct vcpu_vmx *vmx) vmx->pt_desc.guest.output_mask = 0x7F; vmcs_write64(GUEST_IA32_RTIT_CTL, 0); } - - /* - * If EPT is enabled, #PF is only trapped if MAXPHYADDR is mismatched - * between guest and host. In that case we only care about present - * faults. - */ - if (enable_ept) { - vmcs_write32(PAGE_FAULT_ERROR_CODE_MASK, PFERR_PRESENT_MASK); - vmcs_write32(PAGE_FAULT_ERROR_CODE_MATCH, PFERR_PRESENT_MASK); - } } static void vmx_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event) @@ -4803,6 +4808,7 @@ static int handle_exception_nmi(struct kvm_vcpu *vcpu) * EPT will cause page fault only if we need to * detect illegal GPAs. */ + WARN_ON_ONCE(!allow_smaller_maxphyaddr); kvm_fixup_and_inject_pf_error(vcpu, cr2, error_code); return 1; } else @@ -5331,7 +5337,7 @@ static int handle_ept_violation(struct kvm_vcpu *vcpu) * would also use advanced VM-exit information for EPT violations to * reconstruct the page fault error code. */ - if (unlikely(kvm_mmu_is_illegal_gpa(vcpu, gpa))) + if (unlikely(allow_smaller_maxphyaddr && kvm_mmu_is_illegal_gpa(vcpu, gpa))) return kvm_emulate_instruction(vcpu, 0); return kvm_mmu_page_fault(vcpu, gpa, error_code, NULL, 0); @@ -8305,11 +8311,12 @@ static int __init vmx_init(void) vmx_check_vmcs12_offsets(); /* - * Intel processors don't have problems with - * GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable - * it for VMX by default + * Shadow paging doesn't have a (further) performance penalty + * from GUEST_MAXPHYADDR < HOST_MAXPHYADDR so enable it + * by default */ - allow_smaller_maxphyaddr = true; + if (!enable_ept) + allow_smaller_maxphyaddr = true; return 0; } diff --git a/arch/x86/kvm/vmx/vmx.h b/arch/x86/kvm/vmx/vmx.h index a2f82127c170..a0e47720f60c 100644 --- a/arch/x86/kvm/vmx/vmx.h +++ b/arch/x86/kvm/vmx/vmx.h @@ -552,7 +552,10 @@ static inline bool vmx_has_waitpkg(struct vcpu_vmx *vmx) static inline bool vmx_need_pf_intercept(struct kvm_vcpu *vcpu) { - return !enable_ept || cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; + if (!enable_ept) + return true; + + return allow_smaller_maxphyaddr && cpuid_maxphyaddr(vcpu) < boot_cpu_data.x86_phys_bits; } void dump_vmcs(void); diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c index 1994602a0851..ce856e0ece84 100644 --- a/arch/x86/kvm/x86.c +++ b/arch/x86/kvm/x86.c @@ -188,7 +188,7 @@ static struct kvm_shared_msrs __percpu *shared_msrs; u64 __read_mostly host_efer; EXPORT_SYMBOL_GPL(host_efer); -bool __read_mostly allow_smaller_maxphyaddr; +bool __read_mostly allow_smaller_maxphyaddr = 0; EXPORT_SYMBOL_GPL(allow_smaller_maxphyaddr); static u64 __read_mostly host_xss; @@ -976,6 +976,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) unsigned long old_cr4 = kvm_read_cr4(vcpu); unsigned long pdptr_bits = X86_CR4_PGE | X86_CR4_PSE | X86_CR4_PAE | X86_CR4_SMEP; + unsigned long mmu_role_bits = pdptr_bits | X86_CR4_SMAP | X86_CR4_PKE; if (kvm_valid_cr4(vcpu, cr4)) return 1; @@ -1003,7 +1004,7 @@ int kvm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4) if (kvm_x86_ops.set_cr4(vcpu, cr4)) return 1; - if (((cr4 ^ old_cr4) & pdptr_bits) || + if (((cr4 ^ old_cr4) & mmu_role_bits) || (!(cr4 & X86_CR4_PCIDE) && (old_cr4 & X86_CR4_PCIDE))) kvm_mmu_reset_context(vcpu); @@ -3221,9 +3222,22 @@ int kvm_get_msr_common(struct kvm_vcpu *vcpu, struct msr_data *msr_info) case MSR_IA32_POWER_CTL: msr_info->data = vcpu->arch.msr_ia32_power_ctl; break; - case MSR_IA32_TSC: - msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + vcpu->arch.tsc_offset; + case MSR_IA32_TSC: { + /* + * Intel SDM states that MSR_IA32_TSC read adds the TSC offset + * even when not intercepted. AMD manual doesn't explicitly + * state this but appears to behave the same. + * + * On userspace reads and writes, however, we unconditionally + * operate L1's TSC value to ensure backwards-compatible + * behavior for migration. + */ + u64 tsc_offset = msr_info->host_initiated ? vcpu->arch.l1_tsc_offset : + vcpu->arch.tsc_offset; + + msr_info->data = kvm_scale_tsc(vcpu, rdtsc()) + tsc_offset; break; + } case MSR_MTRRcap: case 0x200 ... 0x2ff: return kvm_mtrr_get_msr(vcpu, msr_info->index, &msr_info->data); diff --git a/arch/x86/lib/usercopy_64.c b/arch/x86/lib/usercopy_64.c index b0dfac3d3df7..1847e993ac63 100644 --- a/arch/x86/lib/usercopy_64.c +++ b/arch/x86/lib/usercopy_64.c @@ -120,7 +120,7 @@ long __copy_user_flushcache(void *dst, const void __user *src, unsigned size) */ if (size < 8) { if (!IS_ALIGNED(dest, 4) || size != 4) - clean_cache_range(dst, 1); + clean_cache_range(dst, size); } else { if (!IS_ALIGNED(dest, 8)) { dest = ALIGN(dest, boot_cpu_data.x86_clflush_size); diff --git a/block/blk-mq.c b/block/blk-mq.c index b3d2785eefe9..cdced4aca2e8 100644 --- a/block/blk-mq.c +++ b/block/blk-mq.c @@ -1412,6 +1412,11 @@ out: hctx->dispatched[queued_to_index(queued)]++; + /* If we didn't flush the entire list, we could have told the driver + * there was more coming, but that turned out to be a lie. + */ + if ((!list_empty(list) || errors) && q->mq_ops->commit_rqs && queued) + q->mq_ops->commit_rqs(hctx); /* * Any items that need requeuing? Stuff them into hctx->dispatch, * that is where we will continue on next queue run. @@ -1425,14 +1430,6 @@ out: blk_mq_release_budgets(q, nr_budgets); - /* - * If we didn't flush the entire list, we could have told - * the driver there was more coming, but that turned out to - * be a lie. - */ - if (q->mq_ops->commit_rqs && queued) - q->mq_ops->commit_rqs(hctx); - spin_lock(&hctx->lock); list_splice_tail_init(list, &hctx->dispatch); spin_unlock(&hctx->lock); @@ -2079,6 +2076,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, struct list_head *list) { int queued = 0; + int errors = 0; while (!list_empty(list)) { blk_status_t ret; @@ -2095,6 +2093,7 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, break; } blk_mq_end_request(rq, ret); + errors++; } else queued++; } @@ -2104,7 +2103,8 @@ void blk_mq_try_issue_list_directly(struct blk_mq_hw_ctx *hctx, * the driver there was more coming, but that turned out to * be a lie. */ - if (!list_empty(list) && hctx->queue->mq_ops->commit_rqs && queued) + if ((!list_empty(list) || errors) && + hctx->queue->mq_ops->commit_rqs && queued) hctx->queue->mq_ops->commit_rqs(hctx); } diff --git a/block/blk-settings.c b/block/blk-settings.c index 76a7e03bcd6c..34b721a2743a 100644 --- a/block/blk-settings.c +++ b/block/blk-settings.c @@ -801,6 +801,52 @@ bool blk_queue_can_use_dma_map_merging(struct request_queue *q, } EXPORT_SYMBOL_GPL(blk_queue_can_use_dma_map_merging); +/** + * blk_queue_set_zoned - configure a disk queue zoned model. + * @disk: the gendisk of the queue to configure + * @model: the zoned model to set + * + * Set the zoned model of the request queue of @disk according to @model. + * When @model is BLK_ZONED_HM (host managed), this should be called only + * if zoned block device support is enabled (CONFIG_BLK_DEV_ZONED option). + * If @model specifies BLK_ZONED_HA (host aware), the effective model used + * depends on CONFIG_BLK_DEV_ZONED settings and on the existence of partitions + * on the disk. + */ +void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model) +{ + switch (model) { + case BLK_ZONED_HM: + /* + * Host managed devices are supported only if + * CONFIG_BLK_DEV_ZONED is enabled. + */ + WARN_ON_ONCE(!IS_ENABLED(CONFIG_BLK_DEV_ZONED)); + break; + case BLK_ZONED_HA: + /* + * Host aware devices can be treated either as regular block + * devices (similar to drive managed devices) or as zoned block + * devices to take advantage of the zone command set, similarly + * to host managed devices. We try the latter if there are no + * partitions and zoned block device support is enabled, else + * we do nothing special as far as the block layer is concerned. + */ + if (!IS_ENABLED(CONFIG_BLK_DEV_ZONED) || + disk_has_partitions(disk)) + model = BLK_ZONED_NONE; + break; + case BLK_ZONED_NONE: + default: + if (WARN_ON_ONCE(model != BLK_ZONED_NONE)) + model = BLK_ZONED_NONE; + break; + } + + disk->queue->limits.zoned = model; +} +EXPORT_SYMBOL_GPL(blk_queue_set_zoned); + static int __init blk_settings_init(void) { blk_max_low_pfn = max_low_pfn - 1; diff --git a/drivers/acpi/bus.c b/drivers/acpi/bus.c index 54002670cb7a..53cc1e0cef21 100644 --- a/drivers/acpi/bus.c +++ b/drivers/acpi/bus.c @@ -551,6 +551,7 @@ struct device *acpi_get_first_physical_node(struct acpi_device *adev) mutex_unlock(physical_node_lock); return phys_dev; } +EXPORT_SYMBOL_GPL(acpi_get_first_physical_node); static struct acpi_device *acpi_primary_dev_companion(struct acpi_device *adev, const struct device *dev) diff --git a/drivers/acpi/processor_idle.c b/drivers/acpi/processor_idle.c index 7ecb90e90afd..f66236cff69b 100644 --- a/drivers/acpi/processor_idle.c +++ b/drivers/acpi/processor_idle.c @@ -176,6 +176,7 @@ static void lapic_timer_propagate_broadcast(struct acpi_processor *pr) { } static bool lapic_timer_needs_broadcast(struct acpi_processor *pr, struct acpi_processor_cx *cx) { + return false; } #endif diff --git a/drivers/atm/eni.c b/drivers/atm/eni.c index 39be444534d0..316a9947541f 100644 --- a/drivers/atm/eni.c +++ b/drivers/atm/eni.c @@ -2224,7 +2224,7 @@ static int eni_init_one(struct pci_dev *pci_dev, rc = dma_set_mask_and_coherent(&pci_dev->dev, DMA_BIT_MASK(32)); if (rc < 0) - goto out; + goto err_disable; rc = -ENOMEM; eni_dev = kmalloc(sizeof(struct eni_dev), GFP_KERNEL); diff --git a/drivers/base/node.c b/drivers/base/node.c index 508b80f6329b..50af16e68d98 100644 --- a/drivers/base/node.c +++ b/drivers/base/node.c @@ -761,14 +761,36 @@ static int __ref get_nid_for_pfn(unsigned long pfn) return pfn_to_nid(pfn); } +static int do_register_memory_block_under_node(int nid, + struct memory_block *mem_blk) +{ + int ret; + + /* + * If this memory block spans multiple nodes, we only indicate + * the last processed node. + */ + mem_blk->nid = nid; + + ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, + &mem_blk->dev.kobj, + kobject_name(&mem_blk->dev.kobj)); + if (ret) + return ret; + + return sysfs_create_link_nowarn(&mem_blk->dev.kobj, + &node_devices[nid]->dev.kobj, + kobject_name(&node_devices[nid]->dev.kobj)); +} + /* register memory section under specified node if it spans that node */ -static int register_mem_sect_under_node(struct memory_block *mem_blk, - void *arg) +static int register_mem_block_under_node_early(struct memory_block *mem_blk, + void *arg) { unsigned long memory_block_pfns = memory_block_size_bytes() / PAGE_SIZE; unsigned long start_pfn = section_nr_to_pfn(mem_blk->start_section_nr); unsigned long end_pfn = start_pfn + memory_block_pfns - 1; - int ret, nid = *(int *)arg; + int nid = *(int *)arg; unsigned long pfn; for (pfn = start_pfn; pfn <= end_pfn; pfn++) { @@ -785,39 +807,34 @@ static int register_mem_sect_under_node(struct memory_block *mem_blk, } /* - * We need to check if page belongs to nid only for the boot - * case, during hotplug we know that all pages in the memory - * block belong to the same node. - */ - if (system_state == SYSTEM_BOOTING) { - page_nid = get_nid_for_pfn(pfn); - if (page_nid < 0) - continue; - if (page_nid != nid) - continue; - } - - /* - * If this memory block spans multiple nodes, we only indicate - * the last processed node. + * We need to check if page belongs to nid only at the boot + * case because node's ranges can be interleaved. */ - mem_blk->nid = nid; - - ret = sysfs_create_link_nowarn(&node_devices[nid]->dev.kobj, - &mem_blk->dev.kobj, - kobject_name(&mem_blk->dev.kobj)); - if (ret) - return ret; + page_nid = get_nid_for_pfn(pfn); + if (page_nid < 0) + continue; + if (page_nid != nid) + continue; - return sysfs_create_link_nowarn(&mem_blk->dev.kobj, - &node_devices[nid]->dev.kobj, - kobject_name(&node_devices[nid]->dev.kobj)); + return do_register_memory_block_under_node(nid, mem_blk); } /* mem section does not span the specified node */ return 0; } /* + * During hotplug we know that all pages in the memory block belong to the same + * node. + */ +static int register_mem_block_under_node_hotplug(struct memory_block *mem_blk, + void *arg) +{ + int nid = *(int *)arg; + + return do_register_memory_block_under_node(nid, mem_blk); +} + +/* * Unregister a memory block device under the node it spans. Memory blocks * with multiple nodes cannot be offlined and therefore also never be removed. */ @@ -832,11 +849,19 @@ void unregister_memory_block_under_nodes(struct memory_block *mem_blk) kobject_name(&node_devices[mem_blk->nid]->dev.kobj)); } -int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn) +int link_mem_sections(int nid, unsigned long start_pfn, unsigned long end_pfn, + enum meminit_context context) { + walk_memory_blocks_func_t func; + + if (context == MEMINIT_HOTPLUG) + func = register_mem_block_under_node_hotplug; + else + func = register_mem_block_under_node_early; + return walk_memory_blocks(PFN_PHYS(start_pfn), PFN_PHYS(end_pfn - start_pfn), (void *)&nid, - register_mem_sect_under_node); + func); } #ifdef CONFIG_HUGETLBFS diff --git a/drivers/base/regmap/internal.h b/drivers/base/regmap/internal.h index 3d80c4b43f72..7be2fcfeea52 100644 --- a/drivers/base/regmap/internal.h +++ b/drivers/base/regmap/internal.h @@ -217,7 +217,7 @@ struct regmap_field { #ifdef CONFIG_DEBUG_FS extern void regmap_debugfs_initcall(void); -extern void regmap_debugfs_init(struct regmap *map, const char *name); +extern void regmap_debugfs_init(struct regmap *map); extern void regmap_debugfs_exit(struct regmap *map); static inline void regmap_debugfs_disable(struct regmap *map) @@ -227,7 +227,7 @@ static inline void regmap_debugfs_disable(struct regmap *map) #else static inline void regmap_debugfs_initcall(void) { } -static inline void regmap_debugfs_init(struct regmap *map, const char *name) { } +static inline void regmap_debugfs_init(struct regmap *map) { } static inline void regmap_debugfs_exit(struct regmap *map) { } static inline void regmap_debugfs_disable(struct regmap *map) { } #endif @@ -259,7 +259,7 @@ bool regcache_set_val(struct regmap *map, void *base, unsigned int idx, int regcache_lookup_reg(struct regmap *map, unsigned int reg); int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len); + const void *val, size_t val_len, bool noinc); void regmap_async_complete_cb(struct regmap_async *async, int ret); diff --git a/drivers/base/regmap/regcache.c b/drivers/base/regmap/regcache.c index a93cafd7be4f..7f4b3b62492c 100644 --- a/drivers/base/regmap/regcache.c +++ b/drivers/base/regmap/regcache.c @@ -717,7 +717,7 @@ static int regcache_sync_block_raw_flush(struct regmap *map, const void **data, map->cache_bypass = true; - ret = _regmap_raw_write(map, base, *data, count * val_bytes); + ret = _regmap_raw_write(map, base, *data, count * val_bytes, false); if (ret) dev_err(map->dev, "Unable to sync registers %#x-%#x. %d\n", base, cur - map->reg_stride, ret); diff --git a/drivers/base/regmap/regmap-debugfs.c b/drivers/base/regmap/regmap-debugfs.c index f58baff2be0a..b6d63ef16b44 100644 --- a/drivers/base/regmap/regmap-debugfs.c +++ b/drivers/base/regmap/regmap-debugfs.c @@ -17,7 +17,6 @@ struct regmap_debugfs_node { struct regmap *map; - const char *name; struct list_head link; }; @@ -544,11 +543,12 @@ static const struct file_operations regmap_cache_bypass_fops = { .write = regmap_cache_bypass_write_file, }; -void regmap_debugfs_init(struct regmap *map, const char *name) +void regmap_debugfs_init(struct regmap *map) { struct rb_node *next; struct regmap_range_node *range_node; const char *devname = "dummy"; + const char *name = map->name; /* * Userspace can initiate reads from the hardware over debugfs. @@ -569,7 +569,6 @@ void regmap_debugfs_init(struct regmap *map, const char *name) if (!node) return; node->map = map; - node->name = name; mutex_lock(®map_debugfs_early_lock); list_add(&node->link, ®map_debugfs_early_list); mutex_unlock(®map_debugfs_early_lock); @@ -679,7 +678,7 @@ void regmap_debugfs_initcall(void) mutex_lock(®map_debugfs_early_lock); list_for_each_entry_safe(node, tmp, ®map_debugfs_early_list, link) { - regmap_debugfs_init(node->map, node->name); + regmap_debugfs_init(node->map); list_del(&node->link); kfree(node); } diff --git a/drivers/base/regmap/regmap.c b/drivers/base/regmap/regmap.c index e93700af7e6e..b71f9ecddff5 100644 --- a/drivers/base/regmap/regmap.c +++ b/drivers/base/regmap/regmap.c @@ -581,14 +581,34 @@ static void regmap_range_exit(struct regmap *map) kfree(map->selector_work_buf); } +static int regmap_set_name(struct regmap *map, const struct regmap_config *config) +{ + if (config->name) { + const char *name = kstrdup_const(config->name, GFP_KERNEL); + + if (!name) + return -ENOMEM; + + kfree_const(map->name); + map->name = name; + } + + return 0; +} + int regmap_attach_dev(struct device *dev, struct regmap *map, const struct regmap_config *config) { struct regmap **m; + int ret; map->dev = dev; - regmap_debugfs_init(map, config->name); + ret = regmap_set_name(map, config); + if (ret) + return ret; + + regmap_debugfs_init(map); /* Add a devres resource for dev_get_regmap() */ m = devres_alloc(dev_get_regmap_release, sizeof(*m), GFP_KERNEL); @@ -687,13 +707,9 @@ struct regmap *__regmap_init(struct device *dev, goto err; } - if (config->name) { - map->name = kstrdup_const(config->name, GFP_KERNEL); - if (!map->name) { - ret = -ENOMEM; - goto err_map; - } - } + ret = regmap_set_name(map, config); + if (ret) + goto err_map; if (config->disable_locking) { map->lock = map->unlock = regmap_lock_unlock_none; @@ -1137,7 +1153,7 @@ skip_format_initialization: if (ret != 0) goto err_regcache; } else { - regmap_debugfs_init(map, config->name); + regmap_debugfs_init(map); } return map; @@ -1297,6 +1313,8 @@ EXPORT_SYMBOL_GPL(regmap_field_free); */ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) { + int ret; + regcache_exit(map); regmap_debugfs_exit(map); @@ -1309,7 +1327,11 @@ int regmap_reinit_cache(struct regmap *map, const struct regmap_config *config) map->readable_noinc_reg = config->readable_noinc_reg; map->cache_type = config->cache_type; - regmap_debugfs_init(map, config->name); + ret = regmap_set_name(map, config); + if (ret) + return ret; + + regmap_debugfs_init(map); map->cache_bypass = false; map->cache_only = false; @@ -1464,7 +1486,7 @@ static void regmap_set_work_buf_flag_mask(struct regmap *map, int max_bytes, } static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) + const void *val, size_t val_len, bool noinc) { struct regmap_range_node *range; unsigned long flags; @@ -1523,7 +1545,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, win_residue, val_len / map->format.val_bytes); ret = _regmap_raw_write_impl(map, reg, val, win_residue * - map->format.val_bytes); + map->format.val_bytes, noinc); if (ret != 0) return ret; @@ -1537,7 +1559,7 @@ static int _regmap_raw_write_impl(struct regmap *map, unsigned int reg, win_residue = range->window_len - win_offset; } - ret = _regmap_select_page(map, ®, range, val_num); + ret = _regmap_select_page(map, ®, range, noinc ? 1 : val_num); if (ret != 0) return ret; } @@ -1745,7 +1767,8 @@ static int _regmap_bus_raw_write(void *context, unsigned int reg, map->work_buf + map->format.reg_bytes + map->format.pad_bytes, - map->format.val_bytes); + map->format.val_bytes, + false); } static inline void *_regmap_map_get_context(struct regmap *map) @@ -1839,7 +1862,7 @@ int regmap_write_async(struct regmap *map, unsigned int reg, unsigned int val) EXPORT_SYMBOL_GPL(regmap_write_async); int _regmap_raw_write(struct regmap *map, unsigned int reg, - const void *val, size_t val_len) + const void *val, size_t val_len, bool noinc) { size_t val_bytes = map->format.val_bytes; size_t val_count = val_len / val_bytes; @@ -1860,7 +1883,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, /* Write as many bytes as possible with chunk_size */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes); + ret = _regmap_raw_write_impl(map, reg, val, chunk_bytes, noinc); if (ret) return ret; @@ -1871,7 +1894,7 @@ int _regmap_raw_write(struct regmap *map, unsigned int reg, /* Write remaining bytes */ if (val_len) - ret = _regmap_raw_write_impl(map, reg, val, val_len); + ret = _regmap_raw_write_impl(map, reg, val, val_len, noinc); return ret; } @@ -1904,7 +1927,7 @@ int regmap_raw_write(struct regmap *map, unsigned int reg, map->lock(map->lock_arg); - ret = _regmap_raw_write(map, reg, val, val_len); + ret = _regmap_raw_write(map, reg, val, val_len, false); map->unlock(map->lock_arg); @@ -1962,7 +1985,7 @@ int regmap_noinc_write(struct regmap *map, unsigned int reg, write_len = map->max_raw_write; else write_len = val_len; - ret = _regmap_raw_write(map, reg, val, write_len); + ret = _regmap_raw_write(map, reg, val, write_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + write_len; @@ -2439,7 +2462,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, map->async = true; - ret = _regmap_raw_write(map, reg, val, val_len); + ret = _regmap_raw_write(map, reg, val, val_len, false); map->async = false; @@ -2450,7 +2473,7 @@ int regmap_raw_write_async(struct regmap *map, unsigned int reg, EXPORT_SYMBOL_GPL(regmap_raw_write_async); static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, - unsigned int val_len) + unsigned int val_len, bool noinc) { struct regmap_range_node *range; int ret; @@ -2463,7 +2486,7 @@ static int _regmap_raw_read(struct regmap *map, unsigned int reg, void *val, range = _regmap_range_lookup(map, reg); if (range) { ret = _regmap_select_page(map, ®, range, - val_len / map->format.val_bytes); + noinc ? 1 : val_len / map->format.val_bytes); if (ret != 0) return ret; } @@ -2501,7 +2524,7 @@ static int _regmap_bus_read(void *context, unsigned int reg, if (!map->format.parse_val) return -EINVAL; - ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes); + ret = _regmap_raw_read(map, reg, work_val, map->format.val_bytes, false); if (ret == 0) *val = map->format.parse_val(work_val); @@ -2617,7 +2640,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read bytes that fit into whole chunks */ for (i = 0; i < chunk_count; i++) { - ret = _regmap_raw_read(map, reg, val, chunk_bytes); + ret = _regmap_raw_read(map, reg, val, chunk_bytes, false); if (ret != 0) goto out; @@ -2628,7 +2651,7 @@ int regmap_raw_read(struct regmap *map, unsigned int reg, void *val, /* Read remaining bytes */ if (val_len) { - ret = _regmap_raw_read(map, reg, val, val_len); + ret = _regmap_raw_read(map, reg, val, val_len, false); if (ret != 0) goto out; } @@ -2703,7 +2726,7 @@ int regmap_noinc_read(struct regmap *map, unsigned int reg, read_len = map->max_raw_read; else read_len = val_len; - ret = _regmap_raw_read(map, reg, val, read_len); + ret = _regmap_raw_read(map, reg, val, read_len, true); if (ret) goto out_unlock; val = ((u8 *)val) + read_len; diff --git a/drivers/bluetooth/ath3k.c b/drivers/bluetooth/ath3k.c index 4ce270513695..759d7828931d 100644 --- a/drivers/bluetooth/ath3k.c +++ b/drivers/bluetooth/ath3k.c @@ -212,19 +212,16 @@ static int ath3k_load_firmware(struct usb_device *udev, BT_DBG("udev %p", udev); - pipe = usb_sndctrlpipe(udev, 0); - send_buf = kmalloc(BULK_SIZE, GFP_KERNEL); if (!send_buf) { BT_ERR("Can't allocate memory chunk for firmware"); return -ENOMEM; } - memcpy(send_buf, firmware->data, FW_HDR_SIZE); - err = usb_control_msg(udev, pipe, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, - 0, 0, send_buf, FW_HDR_SIZE, - USB_CTRL_SET_TIMEOUT); - if (err < 0) { + err = usb_control_msg_send(udev, 0, USB_REQ_DFU_DNLOAD, USB_TYPE_VENDOR, + 0, 0, firmware->data, FW_HDR_SIZE, + USB_CTRL_SET_TIMEOUT, GFP_KERNEL); + if (err) { BT_ERR("Can't change to loading configuration err"); goto error; } @@ -259,44 +256,19 @@ error: static int ath3k_get_state(struct usb_device *udev, unsigned char *state) { - int ret, pipe = 0; - char *buf; - - buf = kmalloc(sizeof(*buf), GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pipe = usb_rcvctrlpipe(udev, 0); - ret = usb_control_msg(udev, pipe, ATH3K_GETSTATE, - USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, - buf, sizeof(*buf), USB_CTRL_SET_TIMEOUT); - - *state = *buf; - kfree(buf); - - return ret; + return usb_control_msg_recv(udev, 0, ATH3K_GETSTATE, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, + state, 1, USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); } static int ath3k_get_version(struct usb_device *udev, struct ath3k_version *version) { - int ret, pipe = 0; - struct ath3k_version *buf; - const int size = sizeof(*buf); - - buf = kmalloc(size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - - pipe = usb_rcvctrlpipe(udev, 0); - ret = usb_control_msg(udev, pipe, ATH3K_GETVERSION, - USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, - buf, size, USB_CTRL_SET_TIMEOUT); - - memcpy(version, buf, size); - kfree(buf); - - return ret; + return usb_control_msg_recv(udev, 0, ATH3K_GETVERSION, + USB_TYPE_VENDOR | USB_DIR_IN, 0, 0, + version, sizeof(*version), USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); } static int ath3k_load_fwfile(struct usb_device *udev, @@ -316,13 +288,11 @@ static int ath3k_load_fwfile(struct usb_device *udev, } size = min_t(uint, count, FW_HDR_SIZE); - memcpy(send_buf, firmware->data, size); - pipe = usb_sndctrlpipe(udev, 0); - ret = usb_control_msg(udev, pipe, ATH3K_DNLOAD, - USB_TYPE_VENDOR, 0, 0, send_buf, - size, USB_CTRL_SET_TIMEOUT); - if (ret < 0) { + ret = usb_control_msg_send(udev, 0, ATH3K_DNLOAD, USB_TYPE_VENDOR, 0, 0, + firmware->data, size, USB_CTRL_SET_TIMEOUT, + GFP_KERNEL); + if (ret) { BT_ERR("Can't change to loading configuration err"); kfree(send_buf); return ret; @@ -355,23 +325,19 @@ static int ath3k_load_fwfile(struct usb_device *udev, return 0; } -static int ath3k_switch_pid(struct usb_device *udev) +static void ath3k_switch_pid(struct usb_device *udev) { - int pipe = 0; - - pipe = usb_sndctrlpipe(udev, 0); - return usb_control_msg(udev, pipe, USB_REG_SWITCH_VID_PID, - USB_TYPE_VENDOR, 0, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); + usb_control_msg_send(udev, 0, USB_REG_SWITCH_VID_PID, USB_TYPE_VENDOR, + 0, 0, NULL, 0, USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_set_normal_mode(struct usb_device *udev) { unsigned char fw_state; - int pipe = 0, ret; + int ret; ret = ath3k_get_state(udev, &fw_state); - if (ret < 0) { + if (ret) { BT_ERR("Can't get state to change to normal mode err"); return ret; } @@ -381,10 +347,9 @@ static int ath3k_set_normal_mode(struct usb_device *udev) return 0; } - pipe = usb_sndctrlpipe(udev, 0); - return usb_control_msg(udev, pipe, ATH3K_SET_NORMAL_MODE, - USB_TYPE_VENDOR, 0, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); + return usb_control_msg_send(udev, 0, ATH3K_SET_NORMAL_MODE, + USB_TYPE_VENDOR, 0, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT, GFP_KERNEL); } static int ath3k_load_patch(struct usb_device *udev) @@ -397,7 +362,7 @@ static int ath3k_load_patch(struct usb_device *udev) int ret; ret = ath3k_get_state(udev, &fw_state); - if (ret < 0) { + if (ret) { BT_ERR("Can't get state to change to load ram patch err"); return ret; } @@ -408,7 +373,7 @@ static int ath3k_load_patch(struct usb_device *udev) } ret = ath3k_get_version(udev, &fw_version); - if (ret < 0) { + if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } @@ -449,13 +414,13 @@ static int ath3k_load_syscfg(struct usb_device *udev) int clk_value, ret; ret = ath3k_get_state(udev, &fw_state); - if (ret < 0) { + if (ret) { BT_ERR("Can't get state to change to load configuration err"); return -EBUSY; } ret = ath3k_get_version(udev, &fw_version); - if (ret < 0) { + if (ret) { BT_ERR("Can't get version to change to load ram patch err"); return ret; } @@ -529,7 +494,7 @@ static int ath3k_probe(struct usb_interface *intf, return ret; } ret = ath3k_set_normal_mode(udev); - if (ret < 0) { + if (ret) { BT_ERR("Set normal mode failed"); return ret; } diff --git a/drivers/clk/samsung/clk-exynos4.c b/drivers/clk/samsung/clk-exynos4.c index 51564fc23c63..f4086287bb71 100644 --- a/drivers/clk/samsung/clk-exynos4.c +++ b/drivers/clk/samsung/clk-exynos4.c @@ -927,7 +927,7 @@ static const struct samsung_gate_clock exynos4210_gate_clks[] __initconst = { GATE(CLK_PCIE, "pcie", "aclk133", GATE_IP_FSYS, 14, 0, 0), GATE(CLK_SMMU_PCIE, "smmu_pcie", "aclk133", GATE_IP_FSYS, 18, 0, 0), GATE(CLK_MODEMIF, "modemif", "aclk100", GATE_IP_PERIL, 28, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4210_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4210_GATE_IP_PERIR, 11, 0, @@ -969,7 +969,7 @@ static const struct samsung_gate_clock exynos4x12_gate_clks[] __initconst = { 0), GATE(CLK_TSADC, "tsadc", "aclk133", E4X12_GATE_BUS_FSYS1, 16, 0, 0), GATE(CLK_MIPI_HSI, "mipi_hsi", "aclk133", GATE_IP_FSYS, 10, 0, 0), - GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, 0, 0), + GATE(CLK_CHIPID, "chipid", "aclk100", E4X12_GATE_IP_PERIR, 0, CLK_IGNORE_UNUSED, 0), GATE(CLK_SYSREG, "sysreg", "aclk100", E4X12_GATE_IP_PERIR, 1, CLK_IGNORE_UNUSED, 0), GATE(CLK_HDMI_CEC, "hdmi_cec", "aclk100", E4X12_GATE_IP_PERIR, 11, 0, diff --git a/drivers/clk/samsung/clk-exynos5420.c b/drivers/clk/samsung/clk-exynos5420.c index fea33399a632..bd620876544d 100644 --- a/drivers/clk/samsung/clk-exynos5420.c +++ b/drivers/clk/samsung/clk-exynos5420.c @@ -1655,6 +1655,11 @@ static void __init exynos5x_clk_init(struct device_node *np, * main G3D clock enablement status. */ clk_prepare_enable(__clk_lookup("mout_sw_aclk_g3d")); + /* + * Keep top BPLL mux enabled permanently to ensure that DRAM operates + * properly. + */ + clk_prepare_enable(__clk_lookup("mout_bpll")); samsung_clk_of_add_provider(np, ctx); } diff --git a/drivers/clk/socfpga/clk-s10.c b/drivers/clk/socfpga/clk-s10.c index c1dfc9b34e4e..661a8e9bfb9b 100644 --- a/drivers/clk/socfpga/clk-s10.c +++ b/drivers/clk/socfpga/clk-s10.c @@ -209,7 +209,7 @@ static const struct stratix10_perip_cnt_clock s10_main_perip_cnt_clks[] = { { STRATIX10_EMAC_B_FREE_CLK, "emacb_free_clk", NULL, emacb_free_mux, ARRAY_SIZE(emacb_free_mux), 0, 0, 2, 0xB0, 1}, { STRATIX10_EMAC_PTP_FREE_CLK, "emac_ptp_free_clk", NULL, emac_ptp_free_mux, - ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 4, 0xB0, 2}, + ARRAY_SIZE(emac_ptp_free_mux), 0, 0, 2, 0xB0, 2}, { STRATIX10_GPIO_DB_FREE_CLK, "gpio_db_free_clk", NULL, gpio_db_free_mux, ARRAY_SIZE(gpio_db_free_mux), 0, 0, 0, 0xB0, 3}, { STRATIX10_SDMMC_FREE_CLK, "sdmmc_free_clk", NULL, sdmmc_free_mux, diff --git a/drivers/clk/tegra/clk-pll.c b/drivers/clk/tegra/clk-pll.c index f180c055d33f..c5cc0a2dac6f 100644 --- a/drivers/clk/tegra/clk-pll.c +++ b/drivers/clk/tegra/clk-pll.c @@ -1611,9 +1611,6 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) unsigned long flags = 0; unsigned long input_rate; - if (clk_pll_is_enabled(hw)) - return 0; - input_rate = clk_hw_get_rate(clk_hw_get_parent(hw)); if (_get_table_rate(hw, &sel, pll->params->fixed_rate, input_rate)) @@ -1673,7 +1670,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) pll_writel(val, PLLE_SS_CTRL, pll); udelay(1); - /* Enable hw control of xusb brick pll */ + /* Enable HW control of XUSB brick PLL */ val = pll_readl_misc(pll); val &= ~PLLE_MISC_IDDQ_SW_CTRL; pll_writel_misc(val, pll); @@ -1696,7 +1693,7 @@ static int clk_plle_tegra114_enable(struct clk_hw *hw) val |= XUSBIO_PLL_CFG0_SEQ_ENABLE; pll_writel(val, XUSBIO_PLL_CFG0, pll); - /* Enable hw control of SATA pll */ + /* Enable HW control of SATA PLL */ val = pll_readl(SATA_PLL_CFG0, pll); val &= ~SATA_PLL_CFG0_PADPLL_RESET_SWCTL; val |= SATA_PLL_CFG0_PADPLL_USE_LOCKDET; diff --git a/drivers/clk/tegra/clk-tegra210-emc.c b/drivers/clk/tegra/clk-tegra210-emc.c index 352a2c3fc374..51fd0ec2a2d0 100644 --- a/drivers/clk/tegra/clk-tegra210-emc.c +++ b/drivers/clk/tegra/clk-tegra210-emc.c @@ -12,6 +12,8 @@ #include <linux/io.h> #include <linux/slab.h> +#include "clk.h" + #define CLK_SOURCE_EMC 0x19c #define CLK_SOURCE_EMC_2X_CLK_SRC GENMASK(31, 29) #define CLK_SOURCE_EMC_MC_EMC_SAME_FREQ BIT(16) diff --git a/drivers/clocksource/h8300_timer8.c b/drivers/clocksource/h8300_timer8.c index 1d740a8c42ab..47114c2a7cb5 100644 --- a/drivers/clocksource/h8300_timer8.c +++ b/drivers/clocksource/h8300_timer8.c @@ -169,7 +169,7 @@ static int __init h8300_8timer_init(struct device_node *node) return PTR_ERR(clk); } - ret = ENXIO; + ret = -ENXIO; base = of_iomap(node, 0); if (!base) { pr_err("failed to map registers for clockevent\n"); diff --git a/drivers/clocksource/timer-clint.c b/drivers/clocksource/timer-clint.c index d17367dee02c..6cfe2ab73eb0 100644 --- a/drivers/clocksource/timer-clint.c +++ b/drivers/clocksource/timer-clint.c @@ -38,6 +38,7 @@ static unsigned int clint_timer_irq; #ifdef CONFIG_RISCV_M_MODE u64 __iomem *clint_time_val; +EXPORT_SYMBOL(clint_time_val); #endif static void clint_send_ipi(const struct cpumask *target) diff --git a/drivers/clocksource/timer-gx6605s.c b/drivers/clocksource/timer-gx6605s.c index 80d0939d040b..8d386adbe800 100644 --- a/drivers/clocksource/timer-gx6605s.c +++ b/drivers/clocksource/timer-gx6605s.c @@ -28,6 +28,7 @@ static irqreturn_t gx6605s_timer_interrupt(int irq, void *dev) void __iomem *base = timer_of_base(to_timer_of(ce)); writel_relaxed(GX6605S_STATUS_CLR, base + TIMER_STATUS); + writel_relaxed(0, base + TIMER_INI); ce->event_handler(ce); diff --git a/drivers/clocksource/timer-ti-dm-systimer.c b/drivers/clocksource/timer-ti-dm-systimer.c index f6fd1c1cc527..33b3e8aa2cc5 100644 --- a/drivers/clocksource/timer-ti-dm-systimer.c +++ b/drivers/clocksource/timer-ti-dm-systimer.c @@ -69,12 +69,33 @@ static bool dmtimer_systimer_revision1(struct dmtimer_systimer *t) return !(tidr >> 16); } +static void dmtimer_systimer_enable(struct dmtimer_systimer *t) +{ + u32 val; + + if (dmtimer_systimer_revision1(t)) + val = DMTIMER_TYPE1_ENABLE; + else + val = DMTIMER_TYPE2_ENABLE; + + writel_relaxed(val, t->base + t->sysc); +} + +static void dmtimer_systimer_disable(struct dmtimer_systimer *t) +{ + if (!dmtimer_systimer_revision1(t)) + return; + + writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc); +} + static int __init dmtimer_systimer_type1_reset(struct dmtimer_systimer *t) { void __iomem *syss = t->base + OMAP_TIMER_V1_SYS_STAT_OFFSET; int ret; u32 l; + dmtimer_systimer_enable(t); writel_relaxed(BIT(1) | BIT(2), t->base + t->ifctrl); ret = readl_poll_timeout_atomic(syss, l, l & BIT(0), 100, DMTIMER_RESET_WAIT); @@ -88,6 +109,7 @@ static int __init dmtimer_systimer_type2_reset(struct dmtimer_systimer *t) void __iomem *sysc = t->base + t->sysc; u32 l; + dmtimer_systimer_enable(t); l = readl_relaxed(sysc); l |= BIT(0); writel_relaxed(l, sysc); @@ -336,26 +358,6 @@ static int __init dmtimer_systimer_init_clock(struct dmtimer_systimer *t, return 0; } -static void dmtimer_systimer_enable(struct dmtimer_systimer *t) -{ - u32 val; - - if (dmtimer_systimer_revision1(t)) - val = DMTIMER_TYPE1_ENABLE; - else - val = DMTIMER_TYPE2_ENABLE; - - writel_relaxed(val, t->base + t->sysc); -} - -static void dmtimer_systimer_disable(struct dmtimer_systimer *t) -{ - if (!dmtimer_systimer_revision1(t)) - return; - - writel_relaxed(DMTIMER_TYPE1_DISABLE, t->base + t->sysc); -} - static int __init dmtimer_systimer_setup(struct device_node *np, struct dmtimer_systimer *t) { @@ -409,8 +411,8 @@ static int __init dmtimer_systimer_setup(struct device_node *np, t->wakeup = regbase + _OMAP_TIMER_WAKEUP_EN_OFFSET; t->ifctrl = regbase + _OMAP_TIMER_IF_CTRL_OFFSET; - dmtimer_systimer_enable(t); dmtimer_systimer_reset(t); + dmtimer_systimer_enable(t); pr_debug("dmtimer rev %08x sysc %08x\n", readl_relaxed(t->base), readl_relaxed(t->base + t->sysc)); diff --git a/drivers/cpufreq/intel_pstate.c b/drivers/cpufreq/intel_pstate.c index a827b000ef51..9a515c460a00 100644 --- a/drivers/cpufreq/intel_pstate.c +++ b/drivers/cpufreq/intel_pstate.c @@ -2781,6 +2781,7 @@ static int intel_pstate_update_status(const char *buf, size_t size) cpufreq_unregister_driver(intel_pstate_driver); intel_pstate_driver_cleanup(); + return 0; } if (size == 6 && !strncmp(buf, "active", size)) { diff --git a/drivers/cpuidle/cpuidle-psci.c b/drivers/cpuidle/cpuidle-psci.c index 74463841805f..d928b37718bd 100644 --- a/drivers/cpuidle/cpuidle-psci.c +++ b/drivers/cpuidle/cpuidle-psci.c @@ -66,7 +66,7 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev, return -1; /* Do runtime PM to manage a hierarchical CPU toplogy. */ - pm_runtime_put_sync_suspend(pd_dev); + RCU_NONIDLE(pm_runtime_put_sync_suspend(pd_dev)); state = psci_get_domain_state(); if (!state) @@ -74,7 +74,7 @@ static int psci_enter_domain_idle_state(struct cpuidle_device *dev, ret = psci_cpu_suspend_enter(state) ? -1 : idx; - pm_runtime_get_sync(pd_dev); + RCU_NONIDLE(pm_runtime_get_sync(pd_dev)); cpu_pm_exit(); diff --git a/drivers/cpuidle/cpuidle.c b/drivers/cpuidle/cpuidle.c index 6c7e5621cf9a..29e84687f3c3 100644 --- a/drivers/cpuidle/cpuidle.c +++ b/drivers/cpuidle/cpuidle.c @@ -142,11 +142,6 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, time_start = ns_to_ktime(local_clock()); - /* - * trace_suspend_resume() called by tick_freeze() for the last CPU - * executing it contains RCU usage regarded as invalid in the idle - * context, so tell RCU about that. - */ tick_freeze(); /* * The state used here cannot be a "coupled" one, because the "coupled" @@ -159,11 +154,6 @@ static void enter_s2idle_proper(struct cpuidle_driver *drv, target_state->enter_s2idle(dev, drv, index); if (WARN_ON_ONCE(!irqs_disabled())) local_irq_disable(); - /* - * timekeeping_resume() that will be called by tick_unfreeze() for the - * first CPU executing it calls functions containing RCU read-side - * critical sections, so tell RCU about that. - */ if (!(target_state->flags & CPUIDLE_FLAG_RCU_IDLE)) rcu_idle_exit(); tick_unfreeze(); diff --git a/drivers/devfreq/devfreq.c b/drivers/devfreq/devfreq.c index 561d91b2d3bf..071b59fe84d2 100644 --- a/drivers/devfreq/devfreq.c +++ b/drivers/devfreq/devfreq.c @@ -1766,20 +1766,23 @@ static int devfreq_summary_show(struct seq_file *s, void *data) struct devfreq *p_devfreq = NULL; unsigned long cur_freq, min_freq, max_freq; unsigned int polling_ms; + unsigned int timer; - seq_printf(s, "%-30s %-30s %-15s %10s %12s %12s %12s\n", + seq_printf(s, "%-30s %-30s %-15s %-10s %10s %12s %12s %12s\n", "dev", "parent_dev", "governor", + "timer", "polling_ms", "cur_freq_Hz", "min_freq_Hz", "max_freq_Hz"); - seq_printf(s, "%30s %30s %15s %10s %12s %12s %12s\n", + seq_printf(s, "%30s %30s %15s %10s %10s %12s %12s %12s\n", "------------------------------", "------------------------------", "---------------", "----------", + "----------", "------------", "------------", "------------"); @@ -1803,13 +1806,15 @@ static int devfreq_summary_show(struct seq_file *s, void *data) cur_freq = devfreq->previous_freq; get_freq_range(devfreq, &min_freq, &max_freq); polling_ms = devfreq->profile->polling_ms; + timer = devfreq->profile->timer; mutex_unlock(&devfreq->lock); seq_printf(s, - "%-30s %-30s %-15s %10d %12ld %12ld %12ld\n", + "%-30s %-30s %-15s %-10s %10d %12ld %12ld %12ld\n", dev_name(&devfreq->dev), p_devfreq ? dev_name(&p_devfreq->dev) : "null", devfreq->governor_name, + polling_ms ? timer_name[timer] : "null", polling_ms, cur_freq, min_freq, diff --git a/drivers/devfreq/tegra30-devfreq.c b/drivers/devfreq/tegra30-devfreq.c index e94a27804c20..dedd39de7367 100644 --- a/drivers/devfreq/tegra30-devfreq.c +++ b/drivers/devfreq/tegra30-devfreq.c @@ -836,7 +836,8 @@ static int tegra_devfreq_probe(struct platform_device *pdev) rate = clk_round_rate(tegra->emc_clock, ULONG_MAX); if (rate < 0) { dev_err(&pdev->dev, "Failed to round clock rate: %ld\n", rate); - return rate; + err = rate; + goto disable_clk; } tegra->max_freq = rate / KHZ; @@ -897,6 +898,7 @@ remove_opps: dev_pm_opp_remove_all_dynamic(&pdev->dev); reset_control_reset(tegra->reset); +disable_clk: clk_disable_unprepare(tegra->clock); return err; diff --git a/drivers/dma-buf/dma-buf.c b/drivers/dma-buf/dma-buf.c index 58564d82a3a2..844967f98866 100644 --- a/drivers/dma-buf/dma-buf.c +++ b/drivers/dma-buf/dma-buf.c @@ -59,6 +59,8 @@ static void dma_buf_release(struct dentry *dentry) struct dma_buf *dmabuf; dmabuf = dentry->d_fsdata; + if (unlikely(!dmabuf)) + return; BUG_ON(dmabuf->vmapping_counter); diff --git a/drivers/dma/dmatest.c b/drivers/dma/dmatest.c index 45d4d92e91db..a819611b8892 100644 --- a/drivers/dma/dmatest.c +++ b/drivers/dma/dmatest.c @@ -129,6 +129,7 @@ struct dmatest_params { * @nr_channels: number of channels under test * @lock: access protection to the fields of this structure * @did_init: module has been initialized completely + * @last_error: test has faced configuration issues */ static struct dmatest_info { /* Test parameters */ @@ -137,6 +138,7 @@ static struct dmatest_info { /* Internal state */ struct list_head channels; unsigned int nr_channels; + int last_error; struct mutex lock; bool did_init; } test_info = { @@ -1184,10 +1186,22 @@ static int dmatest_run_set(const char *val, const struct kernel_param *kp) return ret; } else if (dmatest_run) { if (!is_threaded_test_pending(info)) { - pr_info("No channels configured, continue with any\n"); - if (!is_threaded_test_run(info)) - stop_threaded_test(info); - add_threaded_test(info); + /* + * We have nothing to run. This can be due to: + */ + ret = info->last_error; + if (ret) { + /* 1) Misconfiguration */ + pr_err("Channel misconfigured, can't continue\n"); + mutex_unlock(&info->lock); + return ret; + } else { + /* 2) We rely on defaults */ + pr_info("No channels configured, continue with any\n"); + if (!is_threaded_test_run(info)) + stop_threaded_test(info); + add_threaded_test(info); + } } start_threaded_tests(info); } else { @@ -1204,7 +1218,7 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp) struct dmatest_info *info = &test_info; struct dmatest_chan *dtc; char chan_reset_val[20]; - int ret = 0; + int ret; mutex_lock(&info->lock); ret = param_set_copystring(val, kp); @@ -1259,12 +1273,14 @@ static int dmatest_chan_set(const char *val, const struct kernel_param *kp) goto add_chan_err; } + info->last_error = ret; mutex_unlock(&info->lock); return ret; add_chan_err: param_set_copystring(chan_reset_val, kp); + info->last_error = ret; mutex_unlock(&info->lock); return ret; diff --git a/drivers/firmware/Kconfig b/drivers/firmware/Kconfig index fbd785dd0513..4843e94713a4 100644 --- a/drivers/firmware/Kconfig +++ b/drivers/firmware/Kconfig @@ -178,9 +178,8 @@ config ISCSI_IBFT Otherwise, say N. config RASPBERRYPI_FIRMWARE - bool "Raspberry Pi Firmware Driver" + tristate "Raspberry Pi Firmware Driver" depends on BCM2835_MBOX - default USB_PCI help This option enables support for communicating with the firmware on the Raspberry Pi. diff --git a/drivers/firmware/raspberrypi.c b/drivers/firmware/raspberrypi.c index 8f2fb4c562da..2371d08bdd17 100644 --- a/drivers/firmware/raspberrypi.c +++ b/drivers/firmware/raspberrypi.c @@ -12,8 +12,6 @@ #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/slab.h> -#include <linux/pci.h> -#include <linux/delay.h> #include <soc/bcm2835/raspberrypi-firmware.h> #define MBOX_MSG(chan, data28) (((data28) & ~0xf) | ((chan) & 0xf)) @@ -21,8 +19,6 @@ #define MBOX_DATA28(msg) ((msg) & ~0xf) #define MBOX_CHAN_PROPERTY 8 -#define VL805_PCI_CONFIG_VERSION_OFFSET 0x50 - static struct platform_device *rpi_hwmon; static struct platform_device *rpi_clk; @@ -301,63 +297,6 @@ struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node) } EXPORT_SYMBOL_GPL(rpi_firmware_get); -/* - * The Raspberry Pi 4 gets its USB functionality from VL805, a PCIe chip that - * implements xHCI. After a PCI reset, VL805's firmware may either be loaded - * directly from an EEPROM or, if not present, by the SoC's co-processor, - * VideoCore. RPi4's VideoCore OS contains both the non public firmware load - * logic and the VL805 firmware blob. This function triggers the aforementioned - * process. - */ -int rpi_firmware_init_vl805(struct pci_dev *pdev) -{ - struct device_node *fw_np; - struct rpi_firmware *fw; - u32 dev_addr, version; - int ret; - - fw_np = of_find_compatible_node(NULL, NULL, - "raspberrypi,bcm2835-firmware"); - if (!fw_np) - return 0; - - fw = rpi_firmware_get(fw_np); - of_node_put(fw_np); - if (!fw) - return -ENODEV; - - /* - * Make sure we don't trigger a firmware load unnecessarily. - * - * If something went wrong with PCI, this whole exercise would be - * futile as VideoCore expects from us a configured PCI bus. Just take - * the faulty version (likely ~0) and let xHCI's registration fail - * further down the line. - */ - pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, &version); - if (version) - goto exit; - - dev_addr = pdev->bus->number << 20 | PCI_SLOT(pdev->devfn) << 15 | - PCI_FUNC(pdev->devfn) << 12; - - ret = rpi_firmware_property(fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET, - &dev_addr, sizeof(dev_addr)); - if (ret) - return ret; - - /* Wait for vl805 to startup */ - usleep_range(200, 1000); - - pci_read_config_dword(pdev, VL805_PCI_CONFIG_VERSION_OFFSET, - &version); -exit: - pci_info(pdev, "VL805 firmware version %08x\n", version); - - return 0; -} -EXPORT_SYMBOL_GPL(rpi_firmware_init_vl805); - static const struct of_device_id rpi_firmware_of_match[] = { { .compatible = "raspberrypi,bcm2835-firmware", }, {}, diff --git a/drivers/gpio/gpio-amd-fch.c b/drivers/gpio/gpio-amd-fch.c index 4e44ba4d7423..2a21354ed6a0 100644 --- a/drivers/gpio/gpio-amd-fch.c +++ b/drivers/gpio/gpio-amd-fch.c @@ -92,7 +92,7 @@ static int amd_fch_gpio_get_direction(struct gpio_chip *gc, unsigned int gpio) ret = (readl_relaxed(ptr) & AMD_FCH_GPIO_FLAG_DIRECTION); spin_unlock_irqrestore(&priv->lock, flags); - return ret ? GPIO_LINE_DIRECTION_IN : GPIO_LINE_DIRECTION_OUT; + return ret ? GPIO_LINE_DIRECTION_OUT : GPIO_LINE_DIRECTION_IN; } static void amd_fch_gpio_set(struct gpio_chip *gc, diff --git a/drivers/gpio/gpio-aspeed-sgpio.c b/drivers/gpio/gpio-aspeed-sgpio.c index 3aa45934d60c..64e54f8c30d2 100644 --- a/drivers/gpio/gpio-aspeed-sgpio.c +++ b/drivers/gpio/gpio-aspeed-sgpio.c @@ -17,7 +17,17 @@ #include <linux/spinlock.h> #include <linux/string.h> -#define MAX_NR_SGPIO 80 +/* + * MAX_NR_HW_GPIO represents the number of actual hardware-supported GPIOs (ie, + * slots within the clocked serial GPIO data). Since each HW GPIO is both an + * input and an output, we provide MAX_NR_HW_GPIO * 2 lines on our gpiochip + * device. + * + * We use SGPIO_OUTPUT_OFFSET to define the split between the inputs and + * outputs; the inputs start at line 0, the outputs start at OUTPUT_OFFSET. + */ +#define MAX_NR_HW_SGPIO 80 +#define SGPIO_OUTPUT_OFFSET MAX_NR_HW_SGPIO #define ASPEED_SGPIO_CTRL 0x54 @@ -30,8 +40,8 @@ struct aspeed_sgpio { struct clk *pclk; spinlock_t lock; void __iomem *base; - uint32_t dir_in[3]; int irq; + int n_sgpio; }; struct aspeed_sgpio_bank { @@ -111,31 +121,69 @@ static void __iomem *bank_reg(struct aspeed_sgpio *gpio, } } -#define GPIO_BANK(x) ((x) >> 5) -#define GPIO_OFFSET(x) ((x) & 0x1f) +#define GPIO_BANK(x) ((x % SGPIO_OUTPUT_OFFSET) >> 5) +#define GPIO_OFFSET(x) ((x % SGPIO_OUTPUT_OFFSET) & 0x1f) #define GPIO_BIT(x) BIT(GPIO_OFFSET(x)) static const struct aspeed_sgpio_bank *to_bank(unsigned int offset) { - unsigned int bank = GPIO_BANK(offset); + unsigned int bank; + + bank = GPIO_BANK(offset); WARN_ON(bank >= ARRAY_SIZE(aspeed_sgpio_banks)); return &aspeed_sgpio_banks[bank]; } +static int aspeed_sgpio_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); + int n = sgpio->n_sgpio; + int c = SGPIO_OUTPUT_OFFSET - n; + + WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); + + /* input GPIOs in the lower range */ + bitmap_set(valid_mask, 0, n); + bitmap_clear(valid_mask, n, c); + + /* output GPIOS above SGPIO_OUTPUT_OFFSET */ + bitmap_set(valid_mask, SGPIO_OUTPUT_OFFSET, n); + bitmap_clear(valid_mask, SGPIO_OUTPUT_OFFSET + n, c); + + return 0; +} + +static void aspeed_sgpio_irq_init_valid_mask(struct gpio_chip *gc, + unsigned long *valid_mask, unsigned int ngpios) +{ + struct aspeed_sgpio *sgpio = gpiochip_get_data(gc); + int n = sgpio->n_sgpio; + + WARN_ON(ngpios < MAX_NR_HW_SGPIO * 2); + + /* input GPIOs in the lower range */ + bitmap_set(valid_mask, 0, n); + bitmap_clear(valid_mask, n, ngpios - n); +} + +static bool aspeed_sgpio_is_input(unsigned int offset) +{ + return offset < SGPIO_OUTPUT_OFFSET; +} + static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); unsigned long flags; enum aspeed_sgpio_reg reg; - bool is_input; int rc = 0; spin_lock_irqsave(&gpio->lock, flags); - is_input = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); - reg = is_input ? reg_val : reg_rdata; + reg = aspeed_sgpio_is_input(offset) ? reg_val : reg_rdata; rc = !!(ioread32(bank_reg(gpio, bank, reg)) & GPIO_BIT(offset)); spin_unlock_irqrestore(&gpio->lock, flags); @@ -143,22 +191,31 @@ static int aspeed_sgpio_get(struct gpio_chip *gc, unsigned int offset) return rc; } -static void sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) +static int sgpio_set_value(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); const struct aspeed_sgpio_bank *bank = to_bank(offset); - void __iomem *addr; + void __iomem *addr_r, *addr_w; u32 reg = 0; - addr = bank_reg(gpio, bank, reg_val); - reg = ioread32(addr); + if (aspeed_sgpio_is_input(offset)) + return -EINVAL; + + /* Since this is an output, read the cached value from rdata, then + * update val. */ + addr_r = bank_reg(gpio, bank, reg_rdata); + addr_w = bank_reg(gpio, bank, reg_val); + + reg = ioread32(addr_r); if (val) reg |= GPIO_BIT(offset); else reg &= ~GPIO_BIT(offset); - iowrite32(reg, addr); + iowrite32(reg, addr_w); + + return 0; } static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) @@ -175,43 +232,28 @@ static void aspeed_sgpio_set(struct gpio_chip *gc, unsigned int offset, int val) static int aspeed_sgpio_dir_in(struct gpio_chip *gc, unsigned int offset) { - struct aspeed_sgpio *gpio = gpiochip_get_data(gc); - unsigned long flags; - - spin_lock_irqsave(&gpio->lock, flags); - gpio->dir_in[GPIO_BANK(offset)] |= GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); - - return 0; + return aspeed_sgpio_is_input(offset) ? 0 : -EINVAL; } static int aspeed_sgpio_dir_out(struct gpio_chip *gc, unsigned int offset, int val) { struct aspeed_sgpio *gpio = gpiochip_get_data(gc); unsigned long flags; + int rc; - spin_lock_irqsave(&gpio->lock, flags); - - gpio->dir_in[GPIO_BANK(offset)] &= ~GPIO_BIT(offset); - sgpio_set_value(gc, offset, val); + /* No special action is required for setting the direction; we'll + * error-out in sgpio_set_value if this isn't an output GPIO */ + spin_lock_irqsave(&gpio->lock, flags); + rc = sgpio_set_value(gc, offset, val); spin_unlock_irqrestore(&gpio->lock, flags); - return 0; + return rc; } static int aspeed_sgpio_get_direction(struct gpio_chip *gc, unsigned int offset) { - int dir_status; - struct aspeed_sgpio *gpio = gpiochip_get_data(gc); - unsigned long flags; - - spin_lock_irqsave(&gpio->lock, flags); - dir_status = gpio->dir_in[GPIO_BANK(offset)] & GPIO_BIT(offset); - spin_unlock_irqrestore(&gpio->lock, flags); - - return dir_status; - + return !!aspeed_sgpio_is_input(offset); } static void irqd_to_aspeed_sgpio_data(struct irq_data *d, @@ -402,6 +444,7 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, irq = &gpio->chip.irq; irq->chip = &aspeed_sgpio_irqchip; + irq->init_valid_mask = aspeed_sgpio_irq_init_valid_mask; irq->handler = handle_bad_irq; irq->default_type = IRQ_TYPE_NONE; irq->parent_handler = aspeed_sgpio_irq_handler; @@ -409,17 +452,15 @@ static int aspeed_sgpio_setup_irqs(struct aspeed_sgpio *gpio, irq->parents = &gpio->irq; irq->num_parents = 1; - /* set IRQ settings and Enable Interrupt */ + /* Apply default IRQ settings */ for (i = 0; i < ARRAY_SIZE(aspeed_sgpio_banks); i++) { bank = &aspeed_sgpio_banks[i]; /* set falling or level-low irq */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type0)); /* trigger type is edge */ iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type1)); - /* dual edge trigger mode. */ - iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_type2)); - /* enable irq */ - iowrite32(0xffffffff, bank_reg(gpio, bank, reg_irq_enable)); + /* single edge trigger */ + iowrite32(0x00000000, bank_reg(gpio, bank, reg_irq_type2)); } return 0; @@ -452,11 +493,12 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) if (rc < 0) { dev_err(&pdev->dev, "Could not read ngpios property\n"); return -EINVAL; - } else if (nr_gpios > MAX_NR_SGPIO) { + } else if (nr_gpios > MAX_NR_HW_SGPIO) { dev_err(&pdev->dev, "Number of GPIOs exceeds the maximum of %d: %d\n", - MAX_NR_SGPIO, nr_gpios); + MAX_NR_HW_SGPIO, nr_gpios); return -EINVAL; } + gpio->n_sgpio = nr_gpios; rc = of_property_read_u32(pdev->dev.of_node, "bus-frequency", &sgpio_freq); if (rc < 0) { @@ -497,7 +539,8 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) spin_lock_init(&gpio->lock); gpio->chip.parent = &pdev->dev; - gpio->chip.ngpio = nr_gpios; + gpio->chip.ngpio = MAX_NR_HW_SGPIO * 2; + gpio->chip.init_valid_mask = aspeed_sgpio_init_valid_mask; gpio->chip.direction_input = aspeed_sgpio_dir_in; gpio->chip.direction_output = aspeed_sgpio_dir_out; gpio->chip.get_direction = aspeed_sgpio_get_direction; @@ -509,9 +552,6 @@ static int __init aspeed_sgpio_probe(struct platform_device *pdev) gpio->chip.label = dev_name(&pdev->dev); gpio->chip.base = -1; - /* set all SGPIO pins as input (1). */ - memset(gpio->dir_in, 0xff, sizeof(gpio->dir_in)); - aspeed_sgpio_setup_irqs(gpio, pdev); rc = devm_gpiochip_add_data(&pdev->dev, &gpio->chip, gpio); diff --git a/drivers/gpio/gpio-aspeed.c b/drivers/gpio/gpio-aspeed.c index bf08b4561f36..e44d5de2a120 100644 --- a/drivers/gpio/gpio-aspeed.c +++ b/drivers/gpio/gpio-aspeed.c @@ -1114,8 +1114,8 @@ static const struct aspeed_gpio_config ast2500_config = static const struct aspeed_bank_props ast2600_bank_props[] = { /* input output */ - {5, 0xffffffff, 0x0000ffff}, /* U/V/W/X */ - {6, 0xffff0000, 0x0fff0000}, /* Y/Z */ + {5, 0xffffffff, 0xffffff00}, /* U/V/W/X */ + {6, 0x0000ffff, 0x0000ffff}, /* Y/Z */ { }, }; diff --git a/drivers/gpio/gpio-mockup.c b/drivers/gpio/gpio-mockup.c index bc345185db26..1652897fdf90 100644 --- a/drivers/gpio/gpio-mockup.c +++ b/drivers/gpio/gpio-mockup.c @@ -552,6 +552,7 @@ static int __init gpio_mockup_init(void) err = platform_driver_register(&gpio_mockup_driver); if (err) { gpio_mockup_err("error registering platform driver\n"); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return err; } @@ -582,6 +583,7 @@ static int __init gpio_mockup_init(void) gpio_mockup_err("error registering device"); platform_driver_unregister(&gpio_mockup_driver); gpio_mockup_unregister_pdevs(); + debugfs_remove_recursive(gpio_mockup_dbg_dir); return PTR_ERR(pdev); } diff --git a/drivers/gpio/gpio-omap.c b/drivers/gpio/gpio-omap.c index 7fbe0c9e1fc1..0ea640fb636c 100644 --- a/drivers/gpio/gpio-omap.c +++ b/drivers/gpio/gpio-omap.c @@ -1516,7 +1516,7 @@ static int __maybe_unused omap_gpio_runtime_resume(struct device *dev) return 0; } -static int omap_gpio_suspend(struct device *dev) +static int __maybe_unused omap_gpio_suspend(struct device *dev) { struct gpio_bank *bank = dev_get_drvdata(dev); @@ -1528,7 +1528,7 @@ static int omap_gpio_suspend(struct device *dev) return omap_gpio_runtime_suspend(dev); } -static int omap_gpio_resume(struct device *dev) +static int __maybe_unused omap_gpio_resume(struct device *dev) { struct gpio_bank *bank = dev_get_drvdata(dev); diff --git a/drivers/gpio/gpio-pca953x.c b/drivers/gpio/gpio-pca953x.c index bd2e96c34f82..fb61f2fc6ed7 100644 --- a/drivers/gpio/gpio-pca953x.c +++ b/drivers/gpio/gpio-pca953x.c @@ -818,6 +818,8 @@ static irqreturn_t pca953x_irq_handler(int irq, void *devid) int level; bool ret; + bitmap_zero(pending, MAX_LINE); + mutex_lock(&chip->i2c_lock); ret = pca953x_irq_pending(chip, pending); mutex_unlock(&chip->i2c_lock); @@ -940,6 +942,7 @@ out: static int device_pca957x_init(struct pca953x_chip *chip, u32 invert) { DECLARE_BITMAP(val, MAX_LINE); + unsigned int i; int ret; ret = device_pca95xx_init(chip, invert); @@ -947,7 +950,9 @@ static int device_pca957x_init(struct pca953x_chip *chip, u32 invert) goto out; /* To enable register 6, 7 to control pull up and pull down */ - memset(val, 0x02, NBANK(chip)); + for (i = 0; i < NBANK(chip); i++) + bitmap_set_value8(val, 0x02, i * BANK_SZ); + ret = pca953x_write_regs(chip, PCA957X_BKEN, val); if (ret) goto out; diff --git a/drivers/gpio/gpio-siox.c b/drivers/gpio/gpio-siox.c index 26e1fe092304..f8c5e9fc4bac 100644 --- a/drivers/gpio/gpio-siox.c +++ b/drivers/gpio/gpio-siox.c @@ -245,6 +245,7 @@ static int gpio_siox_probe(struct siox_device *sdevice) girq->chip = &ddata->ichip; girq->default_type = IRQ_TYPE_NONE; girq->handler = handle_level_irq; + girq->threaded = true; ret = devm_gpiochip_add_data(dev, &ddata->gchip, NULL); if (ret) diff --git a/drivers/gpio/gpio-sprd.c b/drivers/gpio/gpio-sprd.c index d7314d39ab65..36ea8a3bd451 100644 --- a/drivers/gpio/gpio-sprd.c +++ b/drivers/gpio/gpio-sprd.c @@ -149,17 +149,20 @@ static int sprd_gpio_irq_set_type(struct irq_data *data, sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_FALLING: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IEV, 0); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_EDGE_BOTH: sprd_gpio_update(chip, offset, SPRD_GPIO_IS, 0); sprd_gpio_update(chip, offset, SPRD_GPIO_IBE, 1); + sprd_gpio_update(chip, offset, SPRD_GPIO_IC, 1); irq_set_handler_locked(data, handle_edge_irq); break; case IRQ_TYPE_LEVEL_HIGH: diff --git a/drivers/gpio/gpio-tc3589x.c b/drivers/gpio/gpio-tc3589x.c index 58b0da9eb76f..ea3f68a28fea 100644 --- a/drivers/gpio/gpio-tc3589x.c +++ b/drivers/gpio/gpio-tc3589x.c @@ -212,7 +212,7 @@ static void tc3589x_gpio_irq_sync_unlock(struct irq_data *d) continue; tc3589x_gpio->oldregs[i][j] = new; - tc3589x_reg_write(tc3589x, regmap[i] + j * 8, new); + tc3589x_reg_write(tc3589x, regmap[i] + j, new); } } diff --git a/drivers/gpio/gpiolib-cdev.c b/drivers/gpio/gpiolib-cdev.c index e6c9b78adfc2..76c36b05aef6 100644 --- a/drivers/gpio/gpiolib-cdev.c +++ b/drivers/gpio/gpiolib-cdev.c @@ -423,6 +423,21 @@ static __poll_t lineevent_poll(struct file *file, return events; } +static ssize_t lineevent_get_size(void) +{ +#ifdef __x86_64__ + /* i386 has no padding after 'id' */ + if (in_ia32_syscall()) { + struct compat_gpioeevent_data { + compat_u64 timestamp; + u32 id; + }; + + return sizeof(struct compat_gpioeevent_data); + } +#endif + return sizeof(struct gpioevent_data); +} static ssize_t lineevent_read(struct file *file, char __user *buf, @@ -432,9 +447,20 @@ static ssize_t lineevent_read(struct file *file, struct lineevent_state *le = file->private_data; struct gpioevent_data ge; ssize_t bytes_read = 0; + ssize_t ge_size; int ret; - if (count < sizeof(ge)) + /* + * When compatible system call is being used the struct gpioevent_data, + * in case of at least ia32, has different size due to the alignment + * differences. Because we have first member 64 bits followed by one of + * 32 bits there is no gap between them. The only difference is the + * padding at the end of the data structure. Hence, we calculate the + * actual sizeof() and pass this as an argument to copy_to_user() to + * drop unneeded bytes from the output. + */ + ge_size = lineevent_get_size(); + if (count < ge_size) return -EINVAL; do { @@ -470,10 +496,10 @@ static ssize_t lineevent_read(struct file *file, break; } - if (copy_to_user(buf + bytes_read, &ge, sizeof(ge))) + if (copy_to_user(buf + bytes_read, &ge, ge_size)) return -EFAULT; - bytes_read += sizeof(ge); - } while (count >= bytes_read + sizeof(ge)); + bytes_read += ge_size; + } while (count >= bytes_read + ge_size); return bytes_read; } diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index eb7cfe87042e..d0b8d0d341af 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -80,8 +80,6 @@ MODULE_FIRMWARE("amdgpu/renoir_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi10_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi14_gpu_info.bin"); MODULE_FIRMWARE("amdgpu/navi12_gpu_info.bin"); -MODULE_FIRMWARE("amdgpu/sienna_cichlid_gpu_info.bin"); -MODULE_FIRMWARE("amdgpu/navy_flounder_gpu_info.bin"); #define AMDGPU_RESUME_MS 2000 @@ -1600,6 +1598,8 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_CARRIZO: case CHIP_STONEY: case CHIP_VEGA20: + case CHIP_SIENNA_CICHLID: + case CHIP_NAVY_FLOUNDER: default: return 0; case CHIP_VEGA10: @@ -1631,12 +1631,6 @@ static int amdgpu_device_parse_gpu_info_fw(struct amdgpu_device *adev) case CHIP_NAVI12: chip_name = "navi12"; break; - case CHIP_SIENNA_CICHLID: - chip_name = "sienna_cichlid"; - break; - case CHIP_NAVY_FLOUNDER: - chip_name = "navy_flounder"; - break; } snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_gpu_info.bin", chip_name); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c index d76172965199..44c1f6e00635 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_display.c @@ -297,7 +297,7 @@ int amdgpu_display_crtc_set_config(struct drm_mode_set *set, take the current one */ if (active && !adev->have_disp_power_ref) { adev->have_disp_power_ref = true; - goto out; + return ret; } /* if we have no active crtcs, then drop the power ref we got before */ diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c index 26127c7d2f32..321032d3a51a 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c @@ -1044,8 +1044,16 @@ static const struct pci_device_id pciidlist[] = { {0x1002, 0x1636, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_RENOIR|AMD_IS_APU}, /* Navi12 */ - {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, - {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12|AMD_EXP_HW_SUPPORT}, + {0x1002, 0x7360, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, + {0x1002, 0x7362, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_NAVI12}, + + /* Sienna_Cichlid */ + {0x1002, 0x73A0, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A2, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73A3, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AB, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73AE, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, + {0x1002, 0x73BF, PCI_ANY_ID, PCI_ANY_ID, 0, 0, CHIP_SIENNA_CICHLID}, {0, 0, 0} }; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index e11c5d69843d..978bae731398 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -1076,6 +1076,7 @@ static int amdgpu_ttm_tt_pin_userptr(struct ttm_tt *ttm) release_sg: kfree(ttm->sg); + ttm->sg = NULL; return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c index 037a187aa42f..f73ce9721233 100644 --- a/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c +++ b/drivers/gpu/drm/amd/amdgpu/gfx_v10_0.c @@ -3595,6 +3595,9 @@ static void gfx_v10_0_check_gfxoff_flag(struct amdgpu_device *adev) if (!gfx_v10_0_navi10_gfxoff_should_enable(adev)) adev->pm.pp_feature &= ~PP_GFXOFF_MASK; break; + case CHIP_NAVY_FLOUNDER: + adev->pm.pp_feature &= ~PP_GFXOFF_MASK; + break; default: break; } diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c index 63e5547cfb16..3a805eaf6f11 100644 --- a/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c +++ b/drivers/gpu/drm/amd/amdgpu/vcn_v3_0.c @@ -746,18 +746,18 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) | UVD_SUVD_CGC_GATE__IME_HEVC_MASK | UVD_SUVD_CGC_GATE__EFC_MASK | UVD_SUVD_CGC_GATE__SAOE_MASK - | 0x08000000 + | UVD_SUVD_CGC_GATE__SRE_AV1_MASK | UVD_SUVD_CGC_GATE__FBC_PCLK_MASK | UVD_SUVD_CGC_GATE__FBC_CCLK_MASK - | 0x40000000 + | UVD_SUVD_CGC_GATE__SCM_AV1_MASK | UVD_SUVD_CGC_GATE__SMPA_MASK); WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE, data); data = RREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2); data |= (UVD_SUVD_CGC_GATE2__MPBE0_MASK | UVD_SUVD_CGC_GATE2__MPBE1_MASK - | 0x00000004 - | 0x00000008 + | UVD_SUVD_CGC_GATE2__SIT_AV1_MASK + | UVD_SUVD_CGC_GATE2__SDB_AV1_MASK | UVD_SUVD_CGC_GATE2__MPC1_MASK); WREG32_SOC15(VCN, inst, mmUVD_SUVD_CGC_GATE2, data); @@ -776,8 +776,8 @@ static void vcn_v3_0_disable_clock_gating(struct amdgpu_device *adev, int inst) | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK - | 0x00008000 - | 0x00010000 + | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); @@ -892,8 +892,8 @@ static void vcn_v3_0_enable_clock_gating(struct amdgpu_device *adev, int inst) | UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK | UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK | UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK - | 0x00008000 - | 0x00010000 + | UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK + | UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK | UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK | UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK | UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c index 694c5bc93665..c2cd184f0bbd 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_hdcp.c @@ -604,7 +604,7 @@ struct hdcp_workqueue *hdcp_create_workqueue(struct amdgpu_device *adev, struct int i = 0; hdcp_work = kcalloc(max_caps, sizeof(*hdcp_work), GFP_KERNEL); - if (hdcp_work == NULL) + if (ZERO_OR_NULL_PTR(hdcp_work)) return NULL; hdcp_work->srm = kcalloc(PSP_HDCP_SRM_FIRST_GEN_MAX_SIZE, sizeof(*hdcp_work->srm), GFP_KERNEL); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c index 543afa34d87a..21a3073c8929 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn21/rn_clk_mgr.c @@ -783,7 +783,6 @@ void rn_clk_mgr_construct( } else { struct clk_log_info log_info = {0}; - clk_mgr->smu_ver = rn_vbios_smu_get_smu_version(clk_mgr); clk_mgr->periodic_retraining_disabled = rn_vbios_smu_is_periodic_retraining_disabled(clk_mgr); /* SMU Version 55.51.0 and up no longer have an issue diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile index 025637a83c3b..bd2a068f9863 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dcn30/Makefile @@ -31,9 +31,21 @@ DCN30 = dcn30_init.o dcn30_hubbub.o dcn30_hubp.o dcn30_dpp.o dcn30_optc.o \ dcn30_dio_link_encoder.o dcn30_resource.o -CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse -mpreferred-stack-boundary=4 - +ifdef CONFIG_X86 CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -msse +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -msse +endif + +ifdef CONFIG_PPC64 +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mhard-float -maltivec +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mhard-float -maltivec +endif + +ifdef CONFIG_ARM64 +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o := -mgeneral-regs-only +CFLAGS_REMOVE_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o := -mgeneral-regs-only +endif + ifdef CONFIG_CC_IS_GCC ifeq ($(call cc-ifversion, -lt, 0701, y), y) IS_OLD_GCC = 1 @@ -45,8 +57,10 @@ ifdef IS_OLD_GCC # GCC < 7.1 cannot compile code using `double` and -mpreferred-stack-boundary=3 # (8B stack alignment). CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -mpreferred-stack-boundary=4 +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -mpreferred-stack-boundary=4 else CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_resource.o += -msse2 +CFLAGS_$(AMDDALPATH)/dc/dcn30/dcn30_optc.o += -msse2 endif AMD_DAL_DCN30 = $(addprefix $(AMDDALPATH)/dc/dcn30/,$(DCN30)) diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h index 1116779252e6..e245e912535e 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_default.h @@ -2727,6 +2727,7 @@ #define mmDB_STENCIL_WRITE_BASE_DEFAULT 0x00000000 #define mmDB_RESERVED_REG_1_DEFAULT 0x00000000 #define mmDB_RESERVED_REG_3_DEFAULT 0x00000000 +#define mmDB_VRS_OVERRIDE_CNTL_DEFAULT 0x00000000 #define mmDB_Z_READ_BASE_HI_DEFAULT 0x00000000 #define mmDB_STENCIL_READ_BASE_HI_DEFAULT 0x00000000 #define mmDB_Z_WRITE_BASE_HI_DEFAULT 0x00000000 @@ -3062,6 +3063,7 @@ #define mmPA_SU_OVER_RASTERIZATION_CNTL_DEFAULT 0x00000000 #define mmPA_STEREO_CNTL_DEFAULT 0x00000000 #define mmPA_STATE_STEREO_X_DEFAULT 0x00000000 +#define mmPA_CL_VRS_CNTL_DEFAULT 0x00000000 #define mmPA_SU_POINT_SIZE_DEFAULT 0x00000000 #define mmPA_SU_POINT_MINMAX_DEFAULT 0x00000000 #define mmPA_SU_LINE_CNTL_DEFAULT 0x00000000 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h index 05d1b0a5f6d2..644a9fa71bb2 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_offset.h @@ -5379,6 +5379,8 @@ #define mmDB_RESERVED_REG_1_BASE_IDX 1 #define mmDB_RESERVED_REG_3 0x0017 #define mmDB_RESERVED_REG_3_BASE_IDX 1 +#define mmDB_VRS_OVERRIDE_CNTL 0x0019 +#define mmDB_VRS_OVERRIDE_CNTL_BASE_IDX 1 #define mmDB_Z_READ_BASE_HI 0x001a #define mmDB_Z_READ_BASE_HI_BASE_IDX 1 #define mmDB_STENCIL_READ_BASE_HI 0x001b @@ -6049,6 +6051,8 @@ #define mmPA_STEREO_CNTL_BASE_IDX 1 #define mmPA_STATE_STEREO_X 0x0211 #define mmPA_STATE_STEREO_X_BASE_IDX 1 +#define mmPA_CL_VRS_CNTL 0x0212 +#define mmPA_CL_VRS_CNTL_BASE_IDX 1 #define mmPA_SU_POINT_SIZE 0x0280 #define mmPA_SU_POINT_SIZE_BASE_IDX 1 #define mmPA_SU_POINT_MINMAX 0x0281 diff --git a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h index aac57f714cf1..2e449fcff893 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/gc/gc_10_3_0_sh_mask.h @@ -9777,6 +9777,7 @@ #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE__SHIFT 0x3 #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD__SHIFT 0x4 #define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE__SHIFT 0x8 +#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE__SHIFT 0x10 #define DB_EXCEPTION_CONTROL__DTAG_WATERMARK__SHIFT 0x18 #define DB_EXCEPTION_CONTROL__EARLY_Z_PANIC_DISABLE_MASK 0x00000001L #define DB_EXCEPTION_CONTROL__LATE_Z_PANIC_DISABLE_MASK 0x00000002L @@ -9784,6 +9785,7 @@ #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_HTILE_MASK 0x00000008L #define DB_EXCEPTION_CONTROL__AUTO_FLUSH_QUAD_MASK 0x00000010L #define DB_EXCEPTION_CONTROL__FORCE_SUMMARIZE_MASK 0x00000F00L +#define DB_EXCEPTION_CONTROL__FORCE_VRS_RATE_FINE_MASK 0x00FF0000L #define DB_EXCEPTION_CONTROL__DTAG_WATERMARK_MASK 0x7F000000L //DB_DFSM_CONFIG #define DB_DFSM_CONFIG__BYPASS_DFSM__SHIFT 0x0 @@ -10076,6 +10078,7 @@ #define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM__SHIFT 0x18 #define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT__SHIFT 0x19 #define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING__SHIFT 0x1a +#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT__SHIFT 0x1c #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT__SHIFT 0x1e #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC__SHIFT 0x1f #define CB_HW_CONTROL_3__DISABLE_SLOW_MODE_EMPTY_HALF_QUAD_KILL_MASK 0x00000001L @@ -10103,12 +10106,15 @@ #define CB_HW_CONTROL_3__DISABLE_NACK_PROCESSING_CM_MASK 0x01000000L #define CB_HW_CONTROL_3__DISABLE_NACK_COLOR_RD_WR_OPT_MASK 0x02000000L #define CB_HW_CONTROL_3__DISABLE_BLENDER_CLOCK_GATING_MASK 0x04000000L +#define CB_HW_CONTROL_3__DISABLE_DCC_VRS_OPT_MASK 0x10000000L #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_MASK 0x40000000L #define CB_HW_CONTROL_3__DISABLE_FMASK_NOFETCH_OPT_BC_MASK 0x80000000L //CB_HW_CONTROL #define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE__SHIFT 0x0 +#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION__SHIFT 0x1 #define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC__SHIFT 0x3 #define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX__SHIFT 0x4 +#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN__SHIFT 0x5 #define CB_HW_CONTROL__RMI_CREDITS__SHIFT 0x6 #define CB_HW_CONTROL__CHICKEN_BITS__SHIFT 0xc #define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS__SHIFT 0xf @@ -10129,8 +10135,10 @@ #define CB_HW_CONTROL__DISABLE_CC_IB_SERIALIZER_STATE_OPT__SHIFT 0x1e #define CB_HW_CONTROL__DISABLE_PIXEL_IN_QUAD_FIX_FOR_LINEAR_SURFACE__SHIFT 0x1f #define CB_HW_CONTROL__ALLOW_MRT_WITH_DUAL_SOURCE_MASK 0x00000001L +#define CB_HW_CONTROL__DISABLE_VRS_FILLRATE_OPTIMIZATION_MASK 0x00000002L #define CB_HW_CONTROL__DISABLE_FILLRATE_OPT_FIX_WITH_CFC_MASK 0x00000008L #define CB_HW_CONTROL__DISABLE_POST_DCC_WITH_CFC_FIX_MASK 0x00000010L +#define CB_HW_CONTROL__DISABLE_COMPRESS_1FRAG_WHEN_VRS_RATE_HINT_EN_MASK 0x00000020L #define CB_HW_CONTROL__RMI_CREDITS_MASK 0x00000FC0L #define CB_HW_CONTROL__CHICKEN_BITS_MASK 0x00007000L #define CB_HW_CONTROL__DISABLE_FMASK_MULTI_MGCG_DOMAINS_MASK 0x00008000L @@ -19881,6 +19889,7 @@ #define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS__SHIFT 0x16 #define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS__SHIFT 0x17 #define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL__SHIFT 0x19 +#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE__SHIFT 0x1a #define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE__SHIFT 0x1b #define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_CONTROL_MASK 0x00000003L #define DB_RENDER_OVERRIDE2__PARTIAL_SQUAD_LAUNCH_COUNTDOWN_MASK 0x0000001CL @@ -19898,6 +19907,7 @@ #define DB_RENDER_OVERRIDE2__PRESERVE_SRESULTS_MASK 0x00400000L #define DB_RENDER_OVERRIDE2__DISABLE_FAST_PASS_MASK 0x00800000L #define DB_RENDER_OVERRIDE2__ALLOW_PARTIAL_RES_HIER_KILL_MASK 0x02000000L +#define DB_RENDER_OVERRIDE2__FORCE_VRS_RATE_FINE_MASK 0x04000000L #define DB_RENDER_OVERRIDE2__CENTROID_COMPUTATION_MODE_MASK 0x18000000L //DB_HTILE_DATA_BASE #define DB_HTILE_DATA_BASE__BASE_256B__SHIFT 0x0 @@ -20021,6 +20031,13 @@ //DB_RESERVED_REG_3 #define DB_RESERVED_REG_3__FIELD_1__SHIFT 0x0 #define DB_RESERVED_REG_3__FIELD_1_MASK 0x003FFFFFL +//DB_VRS_OVERRIDE_CNTL +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE__SHIFT 0x0 +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X__SHIFT 0x4 +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y__SHIFT 0x6 +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_COMBINER_MODE_MASK 0x00000007L +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_X_MASK 0x00000030L +#define DB_VRS_OVERRIDE_CNTL__VRS_OVERRIDE_RATE_Y_MASK 0x000000C0L //DB_Z_READ_BASE_HI #define DB_Z_READ_BASE_HI__BASE_HI__SHIFT 0x0 #define DB_Z_READ_BASE_HI__BASE_HI_MASK 0x000000FFL @@ -22598,6 +22615,7 @@ #define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA__SHIFT 0x18 #define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG__SHIFT 0x19 #define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH__SHIFT 0x1b +#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE__SHIFT 0x1c #define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER__SHIFT 0x1d #define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER__SHIFT 0x1e #define PA_CL_VS_OUT_CNTL__CLIP_DIST_ENA_0_MASK 0x00000001L @@ -22627,6 +22645,7 @@ #define PA_CL_VS_OUT_CNTL__VS_OUT_MISC_SIDE_BUS_ENA_MASK 0x01000000L #define PA_CL_VS_OUT_CNTL__USE_VTX_GS_CUT_FLAG_MASK 0x02000000L #define PA_CL_VS_OUT_CNTL__USE_VTX_LINE_WIDTH_MASK 0x08000000L +#define PA_CL_VS_OUT_CNTL__USE_VTX_VRS_RATE_MASK 0x10000000L #define PA_CL_VS_OUT_CNTL__BYPASS_VTX_RATE_COMBINER_MASK 0x20000000L #define PA_CL_VS_OUT_CNTL__BYPASS_PRIM_RATE_COMBINER_MASK 0x40000000L //PA_CL_NANINF_CNTL @@ -22740,6 +22759,19 @@ //PA_STATE_STEREO_X #define PA_STATE_STEREO_X__STEREO_X_OFFSET__SHIFT 0x0 #define PA_STATE_STEREO_X__STEREO_X_OFFSET_MASK 0xFFFFFFFFL +//PA_CL_VRS_CNTL +#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE__SHIFT 0x0 +#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE__SHIFT 0x3 +#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE__SHIFT 0x6 +#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE__SHIFT 0x9 +#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK__SHIFT 0xd +#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO__SHIFT 0xe +#define PA_CL_VRS_CNTL__VERTEX_RATE_COMBINER_MODE_MASK 0x00000007L +#define PA_CL_VRS_CNTL__PRIMITIVE_RATE_COMBINER_MODE_MASK 0x00000038L +#define PA_CL_VRS_CNTL__HTILE_RATE_COMBINER_MODE_MASK 0x000001C0L +#define PA_CL_VRS_CNTL__SAMPLE_ITER_COMBINER_MODE_MASK 0x00000E00L +#define PA_CL_VRS_CNTL__EXPOSE_VRS_PIXELS_MASK_MASK 0x00002000L +#define PA_CL_VRS_CNTL__CMASK_RATE_HINT_FORCE_ZERO_MASK 0x00004000L //PA_SU_POINT_SIZE #define PA_SU_POINT_SIZE__HEIGHT__SHIFT 0x0 #define PA_SU_POINT_SIZE__WIDTH__SHIFT 0x10 @@ -23088,6 +23120,7 @@ #define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE__SHIFT 0x10 #define DB_HTILE_SURFACE__RESERVED_FIELD_6__SHIFT 0x11 #define DB_HTILE_SURFACE__PIPE_ALIGNED__SHIFT 0x12 +#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING__SHIFT 0x13 #define DB_HTILE_SURFACE__RESERVED_FIELD_1_MASK 0x00000001L #define DB_HTILE_SURFACE__FULL_CACHE_MASK 0x00000002L #define DB_HTILE_SURFACE__RESERVED_FIELD_2_MASK 0x00000004L @@ -23097,6 +23130,7 @@ #define DB_HTILE_SURFACE__DST_OUTSIDE_ZERO_TO_ONE_MASK 0x00010000L #define DB_HTILE_SURFACE__RESERVED_FIELD_6_MASK 0x00020000L #define DB_HTILE_SURFACE__PIPE_ALIGNED_MASK 0x00040000L +#define DB_HTILE_SURFACE__VRS_HTILE_ENCODING_MASK 0x00180000L //DB_SRESULTS_COMPARE_STATE0 #define DB_SRESULTS_COMPARE_STATE0__COMPAREFUNC0__SHIFT 0x0 #define DB_SRESULTS_COMPARE_STATE0__COMPAREVALUE0__SHIFT 0x4 @@ -24954,6 +24988,7 @@ #define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR0_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR0_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR0_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -24962,6 +24997,7 @@ #define CB_COLOR0_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR0_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR0_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR0_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR1_ATTRIB3 #define CB_COLOR1_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR1_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -24971,6 +25007,7 @@ #define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR1_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR1_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR1_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -24979,6 +25016,7 @@ #define CB_COLOR1_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR1_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR1_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR1_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR2_ATTRIB3 #define CB_COLOR2_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR2_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -24988,6 +25026,7 @@ #define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR2_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR2_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR2_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -24996,6 +25035,7 @@ #define CB_COLOR2_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR2_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR2_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR2_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR3_ATTRIB3 #define CB_COLOR3_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR3_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -25005,6 +25045,7 @@ #define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR3_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR3_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR3_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -25013,6 +25054,7 @@ #define CB_COLOR3_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR3_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR3_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR3_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR4_ATTRIB3 #define CB_COLOR4_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR4_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -25022,6 +25064,7 @@ #define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR4_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR4_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR4_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -25030,6 +25073,7 @@ #define CB_COLOR4_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR4_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR4_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR4_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR5_ATTRIB3 #define CB_COLOR5_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR5_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -25039,6 +25083,7 @@ #define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR5_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR5_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR5_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -25047,6 +25092,7 @@ #define CB_COLOR5_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR5_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR5_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR5_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR6_ATTRIB3 #define CB_COLOR6_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR6_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -25056,6 +25102,7 @@ #define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR6_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR6_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR6_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -25064,6 +25111,7 @@ #define CB_COLOR6_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR6_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR6_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR6_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L //CB_COLOR7_ATTRIB3 #define CB_COLOR7_ATTRIB3__MIP0_DEPTH__SHIFT 0x0 #define CB_COLOR7_ATTRIB3__META_LINEAR__SHIFT 0xd @@ -25073,6 +25121,7 @@ #define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED__SHIFT 0x1a #define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL__SHIFT 0x1b #define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED__SHIFT 0x1e +#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE__SHIFT 0x1f #define CB_COLOR7_ATTRIB3__MIP0_DEPTH_MASK 0x00001FFFL #define CB_COLOR7_ATTRIB3__META_LINEAR_MASK 0x00002000L #define CB_COLOR7_ATTRIB3__COLOR_SW_MODE_MASK 0x0007C000L @@ -25081,6 +25130,7 @@ #define CB_COLOR7_ATTRIB3__CMASK_PIPE_ALIGNED_MASK 0x04000000L #define CB_COLOR7_ATTRIB3__RESOURCE_LEVEL_MASK 0x38000000L #define CB_COLOR7_ATTRIB3__DCC_PIPE_ALIGNED_MASK 0x40000000L +#define CB_COLOR7_ATTRIB3__VRS_RATE_HINT_ENABLE_MASK 0x80000000L // addressBlock: gc_gfxudec diff --git a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h index c0efd90808f2..58cf7adb9d54 100644 --- a/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h +++ b/drivers/gpu/drm/amd/include/asic_reg/vcn/vcn_3_0_0_sh_mask.h @@ -2393,6 +2393,7 @@ #define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC__SHIFT 0x7 #define VCN_FEATURES__HAS_SCLR_DEC__SHIFT 0x8 #define VCN_FEATURES__HAS_VP9_DEC__SHIFT 0x9 +#define VCN_FEATURES__HAS_AV1_DEC__SHIFT 0xa #define VCN_FEATURES__HAS_EFC_ENC__SHIFT 0xb #define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC__SHIFT 0xc #define VCN_FEATURES__HAS_DUAL_MJPEG_DEC__SHIFT 0xd @@ -2407,6 +2408,7 @@ #define VCN_FEATURES__HAS_MJPEG2_IDCT_DEC_MASK 0x00000080L #define VCN_FEATURES__HAS_SCLR_DEC_MASK 0x00000100L #define VCN_FEATURES__HAS_VP9_DEC_MASK 0x00000200L +#define VCN_FEATURES__HAS_AV1_DEC_MASK 0x00000400L #define VCN_FEATURES__HAS_EFC_ENC_MASK 0x00000800L #define VCN_FEATURES__HAS_EFC_HDR2SDR_ENC_MASK 0x00001000L #define VCN_FEATURES__HAS_DUAL_MJPEG_DEC_MASK 0x00002000L @@ -2809,8 +2811,10 @@ #define UVD_SUVD_CGC_GATE__IME_HEVC__SHIFT 0x18 #define UVD_SUVD_CGC_GATE__EFC__SHIFT 0x19 #define UVD_SUVD_CGC_GATE__SAOE__SHIFT 0x1a +#define UVD_SUVD_CGC_GATE__SRE_AV1__SHIFT 0x1b #define UVD_SUVD_CGC_GATE__FBC_PCLK__SHIFT 0x1c #define UVD_SUVD_CGC_GATE__FBC_CCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_GATE__SCM_AV1__SHIFT 0x1e #define UVD_SUVD_CGC_GATE__SMPA__SHIFT 0x1f #define UVD_SUVD_CGC_GATE__SRE_MASK 0x00000001L #define UVD_SUVD_CGC_GATE__SIT_MASK 0x00000002L @@ -2839,8 +2843,10 @@ #define UVD_SUVD_CGC_GATE__IME_HEVC_MASK 0x01000000L #define UVD_SUVD_CGC_GATE__EFC_MASK 0x02000000L #define UVD_SUVD_CGC_GATE__SAOE_MASK 0x04000000L +#define UVD_SUVD_CGC_GATE__SRE_AV1_MASK 0x08000000L #define UVD_SUVD_CGC_GATE__FBC_PCLK_MASK 0x10000000L #define UVD_SUVD_CGC_GATE__FBC_CCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_GATE__SCM_AV1_MASK 0x40000000L #define UVD_SUVD_CGC_GATE__SMPA_MASK 0x80000000L //UVD_SUVD_CGC_STATUS #define UVD_SUVD_CGC_STATUS__SRE_VCLK__SHIFT 0x0 @@ -2873,6 +2879,8 @@ #define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK__SHIFT 0x1b #define UVD_SUVD_CGC_STATUS__EFC_DCLK__SHIFT 0x1c #define UVD_SUVD_CGC_STATUS__SAOE_DCLK__SHIFT 0x1d +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK__SHIFT 0x1e +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK__SHIFT 0x1f #define UVD_SUVD_CGC_STATUS__SRE_VCLK_MASK 0x00000001L #define UVD_SUVD_CGC_STATUS__SRE_DCLK_MASK 0x00000002L #define UVD_SUVD_CGC_STATUS__SIT_DCLK_MASK 0x00000004L @@ -2903,6 +2911,8 @@ #define UVD_SUVD_CGC_STATUS__IME_HEVC_DCLK_MASK 0x08000000L #define UVD_SUVD_CGC_STATUS__EFC_DCLK_MASK 0x10000000L #define UVD_SUVD_CGC_STATUS__SAOE_DCLK_MASK 0x20000000L +#define UVD_SUVD_CGC_STATUS__SRE_AV1_VCLK_MASK 0x40000000L +#define UVD_SUVD_CGC_STATUS__SCM_AV1_DCLK_MASK 0x80000000L //UVD_SUVD_CGC_CTRL #define UVD_SUVD_CGC_CTRL__SRE_MODE__SHIFT 0x0 #define UVD_SUVD_CGC_CTRL__SIT_MODE__SHIFT 0x1 @@ -2919,6 +2929,8 @@ #define UVD_SUVD_CGC_CTRL__SMPA_MODE__SHIFT 0xc #define UVD_SUVD_CGC_CTRL__MPBE0_MODE__SHIFT 0xd #define UVD_SUVD_CGC_CTRL__MPBE1_MODE__SHIFT 0xe +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE__SHIFT 0xf +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE__SHIFT 0x10 #define UVD_SUVD_CGC_CTRL__MPC1_MODE__SHIFT 0x11 #define UVD_SUVD_CGC_CTRL__FBC_PCLK__SHIFT 0x1c #define UVD_SUVD_CGC_CTRL__FBC_CCLK__SHIFT 0x1d @@ -2937,6 +2949,8 @@ #define UVD_SUVD_CGC_CTRL__SMPA_MODE_MASK 0x00001000L #define UVD_SUVD_CGC_CTRL__MPBE0_MODE_MASK 0x00002000L #define UVD_SUVD_CGC_CTRL__MPBE1_MODE_MASK 0x00004000L +#define UVD_SUVD_CGC_CTRL__SIT_AV1_MODE_MASK 0x00008000L +#define UVD_SUVD_CGC_CTRL__SDB_AV1_MODE_MASK 0x00010000L #define UVD_SUVD_CGC_CTRL__MPC1_MODE_MASK 0x00020000L #define UVD_SUVD_CGC_CTRL__FBC_PCLK_MASK 0x10000000L #define UVD_SUVD_CGC_CTRL__FBC_CCLK_MASK 0x20000000L @@ -3658,6 +3672,8 @@ #define UVD_SUVD_CGC_STATUS2__SMPA_VCLK__SHIFT 0x0 #define UVD_SUVD_CGC_STATUS2__SMPA_DCLK__SHIFT 0x1 #define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK__SHIFT 0x3 +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK__SHIFT 0x4 +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK__SHIFT 0x5 #define UVD_SUVD_CGC_STATUS2__MPC1_DCLK__SHIFT 0x6 #define UVD_SUVD_CGC_STATUS2__MPC1_SCLK__SHIFT 0x7 #define UVD_SUVD_CGC_STATUS2__MPC1_VCLK__SHIFT 0x8 @@ -3666,6 +3682,8 @@ #define UVD_SUVD_CGC_STATUS2__SMPA_VCLK_MASK 0x00000001L #define UVD_SUVD_CGC_STATUS2__SMPA_DCLK_MASK 0x00000002L #define UVD_SUVD_CGC_STATUS2__MPBE1_DCLK_MASK 0x00000008L +#define UVD_SUVD_CGC_STATUS2__SIT_AV1_DCLK_MASK 0x00000010L +#define UVD_SUVD_CGC_STATUS2__SDB_AV1_DCLK_MASK 0x00000020L #define UVD_SUVD_CGC_STATUS2__MPC1_DCLK_MASK 0x00000040L #define UVD_SUVD_CGC_STATUS2__MPC1_SCLK_MASK 0x00000080L #define UVD_SUVD_CGC_STATUS2__MPC1_VCLK_MASK 0x00000100L @@ -3674,25 +3692,41 @@ //UVD_SUVD_CGC_GATE2 #define UVD_SUVD_CGC_GATE2__MPBE0__SHIFT 0x0 #define UVD_SUVD_CGC_GATE2__MPBE1__SHIFT 0x1 +#define UVD_SUVD_CGC_GATE2__SIT_AV1__SHIFT 0x2 +#define UVD_SUVD_CGC_GATE2__SDB_AV1__SHIFT 0x3 #define UVD_SUVD_CGC_GATE2__MPC1__SHIFT 0x4 #define UVD_SUVD_CGC_GATE2__MPBE0_MASK 0x00000001L #define UVD_SUVD_CGC_GATE2__MPBE1_MASK 0x00000002L +#define UVD_SUVD_CGC_GATE2__SIT_AV1_MASK 0x00000004L +#define UVD_SUVD_CGC_GATE2__SDB_AV1_MASK 0x00000008L #define UVD_SUVD_CGC_GATE2__MPC1_MASK 0x00000010L //UVD_SUVD_INT_STATUS2 #define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT__SHIFT 0x0 #define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT__SHIFT 0x5 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT__SHIFT 0x6 +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT__SHIFT 0xb #define UVD_SUVD_INT_STATUS2__SMPA_FUNC_INT_MASK 0x0000001FL #define UVD_SUVD_INT_STATUS2__SMPA_ERR_INT_MASK 0x00000020L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_FUNC_INT_MASK 0x000007C0L +#define UVD_SUVD_INT_STATUS2__SDB_AV1_ERR_INT_MASK 0x00000800L //UVD_SUVD_INT_EN2 #define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN__SHIFT 0x0 #define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN__SHIFT 0x5 +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN__SHIFT 0x6 +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN__SHIFT 0xb #define UVD_SUVD_INT_EN2__SMPA_FUNC_INT_EN_MASK 0x0000001FL #define UVD_SUVD_INT_EN2__SMPA_ERR_INT_EN_MASK 0x00000020L +#define UVD_SUVD_INT_EN2__SDB_AV1_FUNC_INT_EN_MASK 0x000007C0L +#define UVD_SUVD_INT_EN2__SDB_AV1_ERR_INT_EN_MASK 0x00000800L //UVD_SUVD_INT_ACK2 #define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK__SHIFT 0x0 #define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK__SHIFT 0x5 +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK__SHIFT 0x6 +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK__SHIFT 0xb #define UVD_SUVD_INT_ACK2__SMPA_FUNC_INT_ACK_MASK 0x0000001FL #define UVD_SUVD_INT_ACK2__SMPA_ERR_INT_ACK_MASK 0x00000020L +#define UVD_SUVD_INT_ACK2__SDB_AV1_FUNC_INT_ACK_MASK 0x000007C0L +#define UVD_SUVD_INT_ACK2__SDB_AV1_ERR_INT_ACK_MASK 0x00000800L // addressBlock: uvd0_ecpudec diff --git a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c index 63f945f9f331..8dc5abb6931e 100644 --- a/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c +++ b/drivers/gpu/drm/amd/powerplay/amdgpu_smu.c @@ -479,17 +479,6 @@ static int smu_late_init(void *handle) return ret; } - /* - * Set initialized values (get from vbios) to dpm tables context such as - * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each - * type of clks. - */ - ret = smu_set_default_dpm_table(smu); - if (ret) { - dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); - return ret; - } - ret = smu_populate_umd_state_clk(smu); if (ret) { dev_err(adev->dev, "Failed to populate UMD state clocks!\n"); @@ -984,6 +973,17 @@ static int smu_smc_hw_setup(struct smu_context *smu) return ret; } + /* + * Set initialized values (get from vbios) to dpm tables context such as + * gfxclk, memclk, dcefclk, and etc. And enable the DPM feature for each + * type of clks. + */ + ret = smu_set_default_dpm_table(smu); + if (ret) { + dev_err(adev->dev, "Failed to setup default dpm clock tables!\n"); + return ret; + } + ret = smu_notify_display_change(smu); if (ret) return ret; diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c index 9ee8cf8267c8..43f7adff6cb7 100644 --- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c +++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu10_hwmgr.c @@ -563,6 +563,8 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, struct smu10_hwmgr *data = hwmgr->backend; uint32_t min_sclk = hwmgr->display_config->min_core_set_clock; uint32_t min_mclk = hwmgr->display_config->min_mem_set_clock/100; + uint32_t index_fclk = data->clock_vol_info.vdd_dep_on_fclk->count - 1; + uint32_t index_socclk = data->clock_vol_info.vdd_dep_on_socclk->count - 1; if (hwmgr->smu_version < 0x1E3700) { pr_info("smu firmware version too old, can not set dpm level\n"); @@ -676,13 +678,13 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinFclkByFreq, hwmgr->display_config->num_display > 3 ? - SMU10_UMD_PSTATE_PEAK_FCLK : + data->clock_vol_info.vdd_dep_on_fclk->entries[0].clk : min_mclk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinSocclkByFreq, - SMU10_UMD_PSTATE_MIN_SOCCLK, + data->clock_vol_info.vdd_dep_on_socclk->entries[0].clk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetHardMinVcn, @@ -695,11 +697,11 @@ static int smu10_dpm_force_dpm_level(struct pp_hwmgr *hwmgr, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxFclkByFreq, - SMU10_UMD_PSTATE_PEAK_FCLK, + data->clock_vol_info.vdd_dep_on_fclk->entries[index_fclk].clk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxSocclkByFreq, - SMU10_UMD_PSTATE_PEAK_SOCCLK, + data->clock_vol_info.vdd_dep_on_socclk->entries[index_socclk].clk, NULL); smum_send_msg_to_smc_with_parameter(hwmgr, PPSMC_MSG_SetSoftMaxVcn, diff --git a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c index dbb676c482fd..15263cf210d5 100644 --- a/drivers/gpu/drm/amd/powerplay/renoir_ppt.c +++ b/drivers/gpu/drm/amd/powerplay/renoir_ppt.c @@ -232,14 +232,16 @@ static int renoir_get_profiling_clk_mask(struct smu_context *smu, *sclk_mask = 0; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) { if (mclk_mask) - *mclk_mask = 0; + /* mclk levels are in reverse order */ + *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; } else if (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) { if(sclk_mask) /* The sclk as gfxclk and has three level about max/min/current */ *sclk_mask = 3 - 1; if(mclk_mask) - *mclk_mask = NUM_MEMCLK_DPM_LEVELS - 1; + /* mclk levels are in reverse order */ + *mclk_mask = 0; if(soc_mask) *soc_mask = NUM_SOCCLK_DPM_LEVELS - 1; @@ -333,7 +335,7 @@ static int renoir_get_dpm_ultimate_freq(struct smu_context *smu, case SMU_UCLK: case SMU_FCLK: case SMU_MCLK: - ret = renoir_get_dpm_clk_limited(smu, clk_type, 0, min); + ret = renoir_get_dpm_clk_limited(smu, clk_type, NUM_MEMCLK_DPM_LEVELS - 1, min); if (ret) goto failed; break; diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c index 8fa9b31a2484..f6d7e33c7099 100644 --- a/drivers/gpu/drm/i915/gvt/vgpu.c +++ b/drivers/gpu/drm/i915/gvt/vgpu.c @@ -368,6 +368,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu) static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, struct intel_vgpu_creation_params *param) { + struct drm_i915_private *dev_priv = gvt->gt->i915; struct intel_vgpu *vgpu; int ret; @@ -436,7 +437,10 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt, if (ret) goto out_clean_sched_policy; - ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); + if (IS_BROADWELL(dev_priv)) + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_B); + else + ret = intel_gvt_hypervisor_set_edid(vgpu, PORT_D); if (ret) goto out_clean_sched_policy; diff --git a/drivers/gpu/drm/i915/selftests/mock_gem_device.c b/drivers/gpu/drm/i915/selftests/mock_gem_device.c index f127e633f7ca..397c313a8b69 100644 --- a/drivers/gpu/drm/i915/selftests/mock_gem_device.c +++ b/drivers/gpu/drm/i915/selftests/mock_gem_device.c @@ -118,11 +118,11 @@ static struct dev_pm_domain pm_domain = { struct drm_i915_private *mock_gem_device(void) { - struct drm_i915_private *i915; - struct pci_dev *pdev; #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - struct dev_iommu iommu; + static struct dev_iommu fake_iommu = { .priv = (void *)-1 }; #endif + struct drm_i915_private *i915; + struct pci_dev *pdev; int err; pdev = kzalloc(sizeof(*pdev), GFP_KERNEL); @@ -141,10 +141,8 @@ struct drm_i915_private *mock_gem_device(void) dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); #if IS_ENABLED(CONFIG_IOMMU_API) && defined(CONFIG_INTEL_IOMMU) - /* HACK HACK HACK to disable iommu for the fake device; force identity mapping */ - memset(&iommu, 0, sizeof(iommu)); - iommu.priv = (void *)-1; - pdev->dev.iommu = &iommu; + /* HACK to disable iommu for the fake device; force identity mapping */ + pdev->dev.iommu = &fake_iommu; #endif pci_set_drvdata(pdev, i915); diff --git a/drivers/gpu/drm/sun4i/sun8i_csc.h b/drivers/gpu/drm/sun4i/sun8i_csc.h index f42441b1b14d..a55a38ad849c 100644 --- a/drivers/gpu/drm/sun4i/sun8i_csc.h +++ b/drivers/gpu/drm/sun4i/sun8i_csc.h @@ -12,7 +12,7 @@ struct sun8i_mixer; /* VI channel CSC units offsets */ #define CCSC00_OFFSET 0xAA050 -#define CCSC01_OFFSET 0xFA000 +#define CCSC01_OFFSET 0xFA050 #define CCSC10_OFFSET 0xA0000 #define CCSC11_OFFSET 0xF0000 diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c index cc4fb916318f..c3304028e3dc 100644 --- a/drivers/gpu/drm/sun4i/sun8i_mixer.c +++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c @@ -307,7 +307,7 @@ static struct regmap_config sun8i_mixer_regmap_config = { .reg_bits = 32, .val_bits = 32, .reg_stride = 4, - .max_register = 0xbfffc, /* guessed */ + .max_register = 0xffffc, /* guessed */ }; static int sun8i_mixer_of_get_id(struct device_node *node) diff --git a/drivers/gpu/drm/vc4/vc4_hdmi.c b/drivers/gpu/drm/vc4/vc4_hdmi.c index 15a11cd4de25..6339c6f0f571 100644 --- a/drivers/gpu/drm/vc4/vc4_hdmi.c +++ b/drivers/gpu/drm/vc4/vc4_hdmi.c @@ -1117,6 +1117,7 @@ static int vc4_hdmi_audio_init(struct vc4_hdmi *hdmi) card->num_links = 1; card->name = "vc4-hdmi"; card->dev = dev; + card->owner = THIS_MODULE; /* * Be careful, snd_soc_register_card() calls dev_set_drvdata() and diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c index 4a76fc7114ad..f8bdd4ea294a 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c @@ -55,7 +55,7 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man, id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL); if (id < 0) - return (id != -ENOMEM ? 0 : id); + return id; spin_lock(&gman->lock); diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c index b7c816ba7166..c8b9335bccd8 100644 --- a/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c +++ b/drivers/gpu/drm/vmwgfx/vmwgfx_thp.c @@ -95,7 +95,7 @@ found_unlock: mem->start = node->start; } - return 0; + return ret; } diff --git a/drivers/i2c/busses/i2c-cpm.c b/drivers/i2c/busses/i2c-cpm.c index 1213e1932ccb..24d584a1c9a7 100644 --- a/drivers/i2c/busses/i2c-cpm.c +++ b/drivers/i2c/busses/i2c-cpm.c @@ -65,6 +65,9 @@ struct i2c_ram { char res1[4]; /* Reserved */ ushort rpbase; /* Relocation pointer */ char res2[2]; /* Reserved */ + /* The following elements are only for CPM2 */ + char res3[4]; /* Reserved */ + uint sdmatmp; /* Internal */ }; #define I2COM_START 0x80 diff --git a/drivers/i2c/busses/i2c-i801.c b/drivers/i2c/busses/i2c-i801.c index ebb4c0b03057..bffca729e1c7 100644 --- a/drivers/i2c/busses/i2c-i801.c +++ b/drivers/i2c/busses/i2c-i801.c @@ -1917,6 +1917,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id) pci_set_drvdata(dev, priv); + dev_pm_set_driver_flags(&dev->dev, DPM_FLAG_NO_DIRECT_COMPLETE); pm_runtime_set_autosuspend_delay(&dev->dev, 1000); pm_runtime_use_autosuspend(&dev->dev); pm_runtime_put_autosuspend(&dev->dev); diff --git a/drivers/i2c/busses/i2c-npcm7xx.c b/drivers/i2c/busses/i2c-npcm7xx.c index dfcf04e1967f..2ad166355ec9 100644 --- a/drivers/i2c/busses/i2c-npcm7xx.c +++ b/drivers/i2c/busses/i2c-npcm7xx.c @@ -2163,6 +2163,15 @@ static int npcm_i2c_master_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, if (bus->cmd_err == -EAGAIN) ret = i2c_recover_bus(adap); + /* + * After any type of error, check if LAST bit is still set, + * due to a HW issue. + * It cannot be cleared without resetting the module. + */ + if (bus->cmd_err && + (NPCM_I2CRXF_CTL_LAST_PEC & ioread8(bus->reg + NPCM_I2CRXF_CTL))) + npcm_i2c_reset(bus); + #if IS_ENABLED(CONFIG_I2C_SLAVE) /* reenable slave if it was enabled */ if (bus->slave) diff --git a/drivers/iio/adc/ad7124.c b/drivers/iio/adc/ad7124.c index 8dce06e9e69c..766c73333604 100644 --- a/drivers/iio/adc/ad7124.c +++ b/drivers/iio/adc/ad7124.c @@ -177,12 +177,12 @@ static const struct iio_chan_spec ad7124_channel_template = { static struct ad7124_chip_info ad7124_chip_info_tbl[] = { [ID_AD7124_4] = { - .name = "ad7127-4", + .name = "ad7124-4", .chip_id = CHIPID_AD7124_4, .num_inputs = 8, }, [ID_AD7124_8] = { - .name = "ad7127-8", + .name = "ad7124-8", .chip_id = CHIPID_AD7124_8, .num_inputs = 16, }, diff --git a/drivers/iio/adc/qcom-spmi-adc5.c b/drivers/iio/adc/qcom-spmi-adc5.c index b4b73c9920b4..c10aa28be70a 100644 --- a/drivers/iio/adc/qcom-spmi-adc5.c +++ b/drivers/iio/adc/qcom-spmi-adc5.c @@ -982,7 +982,7 @@ static int adc5_probe(struct platform_device *pdev) static struct platform_driver adc5_driver = { .driver = { - .name = "qcom-spmi-adc5.c", + .name = "qcom-spmi-adc5", .of_match_table = adc5_match_table, }, .probe = adc5_probe, diff --git a/drivers/infiniband/core/device.c b/drivers/infiniband/core/device.c index c36b4d2b61e0..23ee65a9185f 100644 --- a/drivers/infiniband/core/device.c +++ b/drivers/infiniband/core/device.c @@ -1285,6 +1285,8 @@ static void disable_device(struct ib_device *device) remove_client_context(device, cid); } + ib_cq_pool_destroy(device); + /* Pairs with refcount_set in enable_device */ ib_device_put(device); wait_for_completion(&device->unreg_completion); @@ -1328,6 +1330,8 @@ static int enable_device_and_get(struct ib_device *device) goto out; } + ib_cq_pool_init(device); + down_read(&clients_rwsem); xa_for_each_marked (&clients, index, client, CLIENT_REGISTERED) { ret = add_client_context(device, client); @@ -1400,7 +1404,6 @@ int ib_register_device(struct ib_device *device, const char *name) goto dev_cleanup; } - ib_cq_pool_init(device); ret = enable_device_and_get(device); dev_set_uevent_suppress(&device->dev, false); /* Mark for userspace that device is ready */ @@ -1455,7 +1458,6 @@ static void __ib_unregister_device(struct ib_device *ib_dev) goto out; disable_device(ib_dev); - ib_cq_pool_destroy(ib_dev); /* Expedite removing unregistered pointers from the hash table */ free_netdevs(ib_dev); diff --git a/drivers/input/mouse/trackpoint.c b/drivers/input/mouse/trackpoint.c index 854d5e758724..ef2fa0905208 100644 --- a/drivers/input/mouse/trackpoint.c +++ b/drivers/input/mouse/trackpoint.c @@ -282,6 +282,8 @@ static int trackpoint_start_protocol(struct psmouse *psmouse, case TP_VARIANT_ALPS: case TP_VARIANT_ELAN: case TP_VARIANT_NXP: + case TP_VARIANT_JYT_SYNAPTICS: + case TP_VARIANT_SYNAPTICS: if (variant_id) *variant_id = param[0]; if (firmware_id) diff --git a/drivers/input/serio/i8042-x86ia64io.h b/drivers/input/serio/i8042-x86ia64io.h index 37fb9aa88f9c..a4c9b9652560 100644 --- a/drivers/input/serio/i8042-x86ia64io.h +++ b/drivers/input/serio/i8042-x86ia64io.h @@ -721,6 +721,13 @@ static const struct dmi_system_id __initconst i8042_dmi_nopnp_table[] = { DMI_MATCH(DMI_BOARD_VENDOR, "MICRO-STAR INTERNATIONAL CO., LTD"), }, }, + { + /* Acer Aspire 5 A515 */ + .matches = { + DMI_MATCH(DMI_BOARD_NAME, "Grumpy_PK"), + DMI_MATCH(DMI_BOARD_VENDOR, "PK"), + }, + }, { } }; diff --git a/drivers/iommu/amd/init.c b/drivers/iommu/amd/init.c index 445a08d23fed..1ba6b4cc56e8 100644 --- a/drivers/iommu/amd/init.c +++ b/drivers/iommu/amd/init.c @@ -1104,25 +1104,6 @@ static int __init add_early_maps(void) } /* - * Reads the device exclusion range from ACPI and initializes the IOMMU with - * it - */ -static void __init set_device_exclusion_range(u16 devid, struct ivmd_header *m) -{ - if (!(m->flags & IVMD_FLAG_EXCL_RANGE)) - return; - - /* - * Treat per-device exclusion ranges as r/w unity-mapped regions - * since some buggy BIOSes might lead to the overwritten exclusion - * range (exclusion_start and exclusion_length members). This - * happens when there are multiple exclusion ranges (IVMD entries) - * defined in ACPI table. - */ - m->flags = (IVMD_FLAG_IW | IVMD_FLAG_IR | IVMD_FLAG_UNITY_MAP); -} - -/* * Takes a pointer to an AMD IOMMU entry in the ACPI table and * initializes the hardware and our data structures with it. */ @@ -2073,30 +2054,6 @@ static void __init free_unity_maps(void) } } -/* called when we find an exclusion range definition in ACPI */ -static int __init init_exclusion_range(struct ivmd_header *m) -{ - int i; - - switch (m->type) { - case ACPI_IVMD_TYPE: - set_device_exclusion_range(m->devid, m); - break; - case ACPI_IVMD_TYPE_ALL: - for (i = 0; i <= amd_iommu_last_bdf; ++i) - set_device_exclusion_range(i, m); - break; - case ACPI_IVMD_TYPE_RANGE: - for (i = m->devid; i <= m->aux; ++i) - set_device_exclusion_range(i, m); - break; - default: - break; - } - - return 0; -} - /* called for unity map ACPI definition */ static int __init init_unity_map_range(struct ivmd_header *m) { @@ -2107,9 +2064,6 @@ static int __init init_unity_map_range(struct ivmd_header *m) if (e == NULL) return -ENOMEM; - if (m->flags & IVMD_FLAG_EXCL_RANGE) - init_exclusion_range(m); - switch (m->type) { default: kfree(e); @@ -2133,6 +2087,16 @@ static int __init init_unity_map_range(struct ivmd_header *m) e->address_end = e->address_start + PAGE_ALIGN(m->range_length); e->prot = m->flags >> 1; + /* + * Treat per-device exclusion ranges as r/w unity-mapped regions + * since some buggy BIOSes might lead to the overwritten exclusion + * range (exclusion_start and exclusion_length members). This + * happens when there are multiple exclusion ranges (IVMD entries) + * defined in ACPI table. + */ + if (m->flags & IVMD_FLAG_EXCL_RANGE) + e->prot = (IVMD_FLAG_IW | IVMD_FLAG_IR) >> 1; + DUMP_printk("%s devid_start: %02x:%02x.%x devid_end: %02x:%02x.%x" " range_start: %016llx range_end: %016llx flags: %x\n", s, PCI_BUS_NUM(e->devid_start), PCI_SLOT(e->devid_start), diff --git a/drivers/iommu/exynos-iommu.c b/drivers/iommu/exynos-iommu.c index bad3c0ce10cb..de324b4eedfe 100644 --- a/drivers/iommu/exynos-iommu.c +++ b/drivers/iommu/exynos-iommu.c @@ -1295,13 +1295,17 @@ static int exynos_iommu_of_xlate(struct device *dev, return -ENODEV; data = platform_get_drvdata(sysmmu); - if (!data) + if (!data) { + put_device(&sysmmu->dev); return -ENODEV; + } if (!owner) { owner = kzalloc(sizeof(*owner), GFP_KERNEL); - if (!owner) + if (!owner) { + put_device(&sysmmu->dev); return -ENOMEM; + } INIT_LIST_HEAD(&owner->controllers); mutex_init(&owner->rpm_lock); diff --git a/drivers/iommu/intel/iommu.c b/drivers/iommu/intel/iommu.c index 87b17bac04c2..2239c211178b 100644 --- a/drivers/iommu/intel/iommu.c +++ b/drivers/iommu/intel/iommu.c @@ -2664,7 +2664,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, } /* Setup the PASID entry for requests without PASID: */ - spin_lock(&iommu->lock); + spin_lock_irqsave(&iommu->lock, flags); if (hw_pass_through && domain_type_is_si(domain)) ret = intel_pasid_setup_pass_through(iommu, domain, dev, PASID_RID2PASID); @@ -2674,7 +2674,7 @@ static struct dmar_domain *dmar_insert_one_dev_info(struct intel_iommu *iommu, else ret = intel_pasid_setup_second_level(iommu, domain, dev, PASID_RID2PASID); - spin_unlock(&iommu->lock); + spin_unlock_irqrestore(&iommu->lock, flags); if (ret) { dev_err(dev, "Setup RID2PASID failed\n"); dmar_remove_one_dev_info(dev); diff --git a/drivers/md/dm.c b/drivers/md/dm.c index 4a40df8af7d3..6ed05ca65a0f 100644 --- a/drivers/md/dm.c +++ b/drivers/md/dm.c @@ -1724,23 +1724,6 @@ out: return ret; } -static void dm_queue_split(struct mapped_device *md, struct dm_target *ti, struct bio **bio) -{ - unsigned len, sector_count; - - sector_count = bio_sectors(*bio); - len = min_t(sector_t, max_io_len((*bio)->bi_iter.bi_sector, ti), sector_count); - - if (sector_count > len) { - struct bio *split = bio_split(*bio, len, GFP_NOIO, &md->queue->bio_split); - - bio_chain(split, *bio); - trace_block_split(md->queue, split, (*bio)->bi_iter.bi_sector); - submit_bio_noacct(*bio); - *bio = split; - } -} - static blk_qc_t dm_process_bio(struct mapped_device *md, struct dm_table *map, struct bio *bio) { @@ -1761,21 +1744,21 @@ static blk_qc_t dm_process_bio(struct mapped_device *md, } /* - * If in ->queue_bio we need to use blk_queue_split(), otherwise + * If in ->submit_bio we need to use blk_queue_split(), otherwise * queue_limits for abnormal requests (e.g. discard, writesame, etc) * won't be imposed. + * If called from dm_wq_work() for deferred bio processing, bio + * was already handled by following code with previous ->submit_bio. */ if (current->bio_list) { if (is_abnormal_io(bio)) blk_queue_split(&bio); - else - dm_queue_split(md, ti, &bio); + /* regular IO is split by __split_and_process_bio */ } if (dm_get_md_type(md) == DM_TYPE_NVME_BIO_BASED) return __process_bio(md, map, bio, ti); - else - return __split_and_process_bio(md, map, bio); + return __split_and_process_bio(md, map, bio); } static blk_qc_t dm_submit_bio(struct bio *bio) diff --git a/drivers/media/cec/core/cec-adap.c b/drivers/media/cec/core/cec-adap.c index 4efe8014445e..926d65db6d3e 100644 --- a/drivers/media/cec/core/cec-adap.c +++ b/drivers/media/cec/core/cec-adap.c @@ -1199,7 +1199,7 @@ void cec_received_msg_ts(struct cec_adapter *adap, /* Cancel the pending timeout work */ if (!cancel_delayed_work(&data->work)) { mutex_unlock(&adap->lock); - flush_scheduled_work(); + cancel_delayed_work_sync(&data->work); mutex_lock(&adap->lock); } /* diff --git a/drivers/media/common/videobuf2/videobuf2-core.c b/drivers/media/common/videobuf2/videobuf2-core.c index f544d3393e9d..4eab6d81cce1 100644 --- a/drivers/media/common/videobuf2/videobuf2-core.c +++ b/drivers/media/common/videobuf2/videobuf2-core.c @@ -721,39 +721,14 @@ int vb2_verify_memory_type(struct vb2_queue *q, } EXPORT_SYMBOL(vb2_verify_memory_type); -static void set_queue_consistency(struct vb2_queue *q, bool consistent_mem) -{ - q->dma_attrs &= ~DMA_ATTR_NON_CONSISTENT; - - if (!vb2_queue_allows_cache_hints(q)) - return; - if (!consistent_mem) - q->dma_attrs |= DMA_ATTR_NON_CONSISTENT; -} - -static bool verify_consistency_attr(struct vb2_queue *q, bool consistent_mem) -{ - bool queue_is_consistent = !(q->dma_attrs & DMA_ATTR_NON_CONSISTENT); - - if (consistent_mem != queue_is_consistent) { - dprintk(q, 1, "memory consistency model mismatch\n"); - return false; - } - return true; -} - int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int flags, unsigned int *count) + unsigned int *count) { unsigned int num_buffers, allocated_buffers, num_planes = 0; unsigned plane_sizes[VB2_MAX_PLANES] = { }; - bool consistent_mem = true; unsigned int i; int ret; - if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) - consistent_mem = false; - if (q->streaming) { dprintk(q, 1, "streaming active\n"); return -EBUSY; @@ -765,8 +740,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, } if (*count == 0 || q->num_buffers != 0 || - (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory) || - !verify_consistency_attr(q, consistent_mem)) { + (q->memory != VB2_MEMORY_UNKNOWN && q->memory != memory)) { /* * We already have buffers allocated, so first check if they * are not in use and can be freed. @@ -803,7 +777,6 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, num_buffers = min_t(unsigned int, num_buffers, VB2_MAX_FRAME); memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; - set_queue_consistency(q, consistent_mem); /* * Ask the driver how many buffers and planes per buffer it requires. @@ -888,18 +861,14 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, EXPORT_SYMBOL_GPL(vb2_core_reqbufs); int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int flags, unsigned int *count, + unsigned int *count, unsigned int requested_planes, const unsigned int requested_sizes[]) { unsigned int num_planes = 0, num_buffers, allocated_buffers; unsigned plane_sizes[VB2_MAX_PLANES] = { }; - bool consistent_mem = true; int ret; - if (flags & V4L2_FLAG_MEMORY_NON_CONSISTENT) - consistent_mem = false; - if (q->num_buffers == VB2_MAX_FRAME) { dprintk(q, 1, "maximum number of buffers already allocated\n"); return -ENOBUFS; @@ -912,15 +881,12 @@ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, } memset(q->alloc_devs, 0, sizeof(q->alloc_devs)); q->memory = memory; - set_queue_consistency(q, consistent_mem); q->waiting_for_buffers = !q->is_output; } else { if (q->memory != memory) { dprintk(q, 1, "memory model mismatch\n"); return -EINVAL; } - if (!verify_consistency_attr(q, consistent_mem)) - return -EINVAL; } num_buffers = min(*count, VB2_MAX_FRAME - q->num_buffers); @@ -2581,7 +2547,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) fileio->memory = VB2_MEMORY_MMAP; fileio->type = q->type; q->fileio = fileio; - ret = vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + ret = vb2_core_reqbufs(q, fileio->memory, &fileio->count); if (ret) goto err_kfree; @@ -2638,7 +2604,7 @@ static int __vb2_init_fileio(struct vb2_queue *q, int read) err_reqbufs: fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, &fileio->count); err_kfree: q->fileio = NULL; @@ -2658,7 +2624,7 @@ static int __vb2_cleanup_fileio(struct vb2_queue *q) vb2_core_streamoff(q, q->type); q->fileio = NULL; fileio->count = 0; - vb2_core_reqbufs(q, fileio->memory, 0, &fileio->count); + vb2_core_reqbufs(q, fileio->memory, &fileio->count); kfree(fileio); dprintk(q, 3, "file io emulator closed\n"); } diff --git a/drivers/media/common/videobuf2/videobuf2-dma-contig.c b/drivers/media/common/videobuf2/videobuf2-dma-contig.c index ec3446cc45b8..7b1b86ec942d 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-contig.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-contig.c @@ -42,11 +42,6 @@ struct vb2_dc_buf { struct dma_buf_attachment *db_attach; }; -static inline bool vb2_dc_buffer_consistent(unsigned long attr) -{ - return !(attr & DMA_ATTR_NON_CONSISTENT); -} - /*********************************************/ /* scatterlist table functions */ /*********************************************/ @@ -341,13 +336,6 @@ static int vb2_dc_dmabuf_ops_begin_cpu_access(struct dma_buf *dbuf, enum dma_data_direction direction) { - struct vb2_dc_buf *buf = dbuf->priv; - struct sg_table *sgt = buf->dma_sgt; - - if (vb2_dc_buffer_consistent(buf->attrs)) - return 0; - - dma_sync_sg_for_cpu(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); return 0; } @@ -355,13 +343,6 @@ static int vb2_dc_dmabuf_ops_end_cpu_access(struct dma_buf *dbuf, enum dma_data_direction direction) { - struct vb2_dc_buf *buf = dbuf->priv; - struct sg_table *sgt = buf->dma_sgt; - - if (vb2_dc_buffer_consistent(buf->attrs)) - return 0; - - dma_sync_sg_for_device(buf->dev, sgt->sgl, sgt->nents, buf->dma_dir); return 0; } diff --git a/drivers/media/common/videobuf2/videobuf2-dma-sg.c b/drivers/media/common/videobuf2/videobuf2-dma-sg.c index 0a40e00f0d7e..a86fce5d8ea8 100644 --- a/drivers/media/common/videobuf2/videobuf2-dma-sg.c +++ b/drivers/media/common/videobuf2/videobuf2-dma-sg.c @@ -123,8 +123,7 @@ static void *vb2_dma_sg_alloc(struct device *dev, unsigned long dma_attrs, /* * NOTE: dma-sg allocates memory using the page allocator directly, so * there is no memory consistency guarantee, hence dma-sg ignores DMA - * attributes passed from the upper layer. That means that - * V4L2_FLAG_MEMORY_NON_CONSISTENT has no effect on dma-sg buffers. + * attributes passed from the upper layer. */ buf->pages = kvmalloc_array(buf->num_pages, sizeof(struct page *), GFP_KERNEL | __GFP_ZERO); diff --git a/drivers/media/common/videobuf2/videobuf2-v4l2.c b/drivers/media/common/videobuf2/videobuf2-v4l2.c index 30caad27281e..cfe197df970d 100644 --- a/drivers/media/common/videobuf2/videobuf2-v4l2.c +++ b/drivers/media/common/videobuf2/videobuf2-v4l2.c @@ -722,22 +722,12 @@ static void fill_buf_caps(struct vb2_queue *q, u32 *caps) #endif } -static void clear_consistency_attr(struct vb2_queue *q, - int memory, - unsigned int *flags) -{ - if (!q->allow_cache_hints || memory != V4L2_MEMORY_MMAP) - *flags &= ~V4L2_FLAG_MEMORY_NON_CONSISTENT; -} - int vb2_reqbufs(struct vb2_queue *q, struct v4l2_requestbuffers *req) { int ret = vb2_verify_memory_type(q, req->memory, req->type); fill_buf_caps(q, &req->capabilities); - clear_consistency_attr(q, req->memory, &req->flags); - return ret ? ret : vb2_core_reqbufs(q, req->memory, - req->flags, &req->count); + return ret ? ret : vb2_core_reqbufs(q, req->memory, &req->count); } EXPORT_SYMBOL_GPL(vb2_reqbufs); @@ -769,7 +759,6 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) unsigned i; fill_buf_caps(q, &create->capabilities); - clear_consistency_attr(q, create->memory, &create->flags); create->index = q->num_buffers; if (create->count == 0) return ret != -EBUSY ? ret : 0; @@ -813,7 +802,6 @@ int vb2_create_bufs(struct vb2_queue *q, struct v4l2_create_buffers *create) if (requested_sizes[i] == 0) return -EINVAL; return ret ? ret : vb2_core_create_bufs(q, create->memory, - create->flags, &create->count, requested_planes, requested_sizes); @@ -998,12 +986,11 @@ int vb2_ioctl_reqbufs(struct file *file, void *priv, int res = vb2_verify_memory_type(vdev->queue, p->memory, p->type); fill_buf_caps(vdev->queue, &p->capabilities); - clear_consistency_attr(vdev->queue, p->memory, &p->flags); if (res) return res; if (vb2_queue_is_busy(vdev, file)) return -EBUSY; - res = vb2_core_reqbufs(vdev->queue, p->memory, p->flags, &p->count); + res = vb2_core_reqbufs(vdev->queue, p->memory, &p->count); /* If count == 0, then the owner has released all buffers and he is no longer owner of the queue. Otherwise we have a new owner. */ if (res == 0) @@ -1021,7 +1008,6 @@ int vb2_ioctl_create_bufs(struct file *file, void *priv, p->index = vdev->queue->num_buffers; fill_buf_caps(vdev->queue, &p->capabilities); - clear_consistency_attr(vdev->queue, p->memory, &p->flags); /* * If count == 0, then just check if memory and type are valid. * Any -EBUSY result from vb2_verify_memory_type can be mapped to 0. diff --git a/drivers/media/dvb-core/dvb_vb2.c b/drivers/media/dvb-core/dvb_vb2.c index 959d110407a4..6974f1731529 100644 --- a/drivers/media/dvb-core/dvb_vb2.c +++ b/drivers/media/dvb-core/dvb_vb2.c @@ -342,7 +342,7 @@ int dvb_vb2_reqbufs(struct dvb_vb2_ctx *ctx, struct dmx_requestbuffers *req) ctx->buf_siz = req->size; ctx->buf_cnt = req->count; - ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, 0, &req->count); + ret = vb2_core_reqbufs(&ctx->vb_q, VB2_MEMORY_MMAP, &req->count); if (ret) { ctx->state = DVB_VB2_STATE_NONE; dprintk(1, "[%s] count=%d size=%d errno=%d\n", ctx->name, diff --git a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c index 593bcf6c3735..a99e82ec9ab6 100644 --- a/drivers/media/v4l2-core/v4l2-compat-ioctl32.c +++ b/drivers/media/v4l2-core/v4l2-compat-ioctl32.c @@ -246,9 +246,6 @@ struct v4l2_format32 { * @memory: buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. - * @flags: additional buffer management attributes (ignored unless the - * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability and - * configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers32 { @@ -257,8 +254,7 @@ struct v4l2_create_buffers32 { __u32 memory; /* enum v4l2_memory */ struct v4l2_format32 format; __u32 capabilities; - __u32 flags; - __u32 reserved[6]; + __u32 reserved[7]; }; static int __bufsize_v4l2_format(struct v4l2_format32 __user *p32, u32 *size) @@ -359,8 +355,7 @@ static int get_v4l2_create32(struct v4l2_create_buffers __user *p64, { if (!access_ok(p32, sizeof(*p32)) || copy_in_user(p64, p32, - offsetof(struct v4l2_create_buffers32, format)) || - assign_in_user(&p64->flags, &p32->flags)) + offsetof(struct v4l2_create_buffers32, format))) return -EFAULT; return __get_v4l2_format32(&p64->format, &p32->format, aux_buf, aux_space); @@ -422,7 +417,6 @@ static int put_v4l2_create32(struct v4l2_create_buffers __user *p64, copy_in_user(p32, p64, offsetof(struct v4l2_create_buffers32, format)) || assign_in_user(&p32->capabilities, &p64->capabilities) || - assign_in_user(&p32->flags, &p64->flags) || copy_in_user(p32->reserved, p64->reserved, sizeof(p64->reserved))) return -EFAULT; return __put_v4l2_format32(&p64->format, &p32->format); diff --git a/drivers/media/v4l2-core/v4l2-ioctl.c b/drivers/media/v4l2-core/v4l2-ioctl.c index f74b42280892..eeff398fbdcc 100644 --- a/drivers/media/v4l2-core/v4l2-ioctl.c +++ b/drivers/media/v4l2-core/v4l2-ioctl.c @@ -2042,6 +2042,9 @@ static int v4l_reqbufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; + + CLEAR_AFTER_FIELD(p, capabilities); + return ops->vidioc_reqbufs(file, fh, p); } @@ -2081,7 +2084,7 @@ static int v4l_create_bufs(const struct v4l2_ioctl_ops *ops, if (ret) return ret; - CLEAR_AFTER_FIELD(create, flags); + CLEAR_AFTER_FIELD(create, capabilities); v4l_sanitize_format(&create->format); diff --git a/drivers/memstick/core/memstick.c b/drivers/memstick/core/memstick.c index 693ee73eb291..ef03d6fafc5c 100644 --- a/drivers/memstick/core/memstick.c +++ b/drivers/memstick/core/memstick.c @@ -441,6 +441,9 @@ static void memstick_check(struct work_struct *work) } else if (host->card->stop) host->card->stop(host->card); + if (host->removing) + goto out_power_off; + card = memstick_alloc_card(host); if (!card) { @@ -545,6 +548,7 @@ EXPORT_SYMBOL(memstick_add_host); */ void memstick_remove_host(struct memstick_host *host) { + host->removing = 1; flush_workqueue(workqueue); mutex_lock(&host->lock); if (host->card) diff --git a/drivers/mmc/host/mmc_spi.c b/drivers/mmc/host/mmc_spi.c index 5055a7eb134a..18a850f37ddc 100644 --- a/drivers/mmc/host/mmc_spi.c +++ b/drivers/mmc/host/mmc_spi.c @@ -1320,7 +1320,7 @@ static void mmc_spi_dma_free(struct mmc_spi_host *host) DMA_BIDIRECTIONAL); } #else -static inline mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } +static inline int mmc_spi_dma_alloc(struct mmc_spi_host *host) { return 0; } static inline void mmc_spi_dma_free(struct mmc_spi_host *host) {} #endif diff --git a/drivers/mmc/host/sdhci-pci-core.c b/drivers/mmc/host/sdhci-pci-core.c index af413805bbf1..914f5184295f 100644 --- a/drivers/mmc/host/sdhci-pci-core.c +++ b/drivers/mmc/host/sdhci-pci-core.c @@ -794,7 +794,8 @@ static int byt_emmc_probe_slot(struct sdhci_pci_slot *slot) static bool glk_broken_cqhci(struct sdhci_pci_slot *slot) { return slot->chip->pdev->device == PCI_DEVICE_ID_INTEL_GLK_EMMC && - dmi_match(DMI_BIOS_VENDOR, "LENOVO"); + (dmi_match(DMI_BIOS_VENDOR, "LENOVO") || + dmi_match(DMI_SYS_VENDOR, "IRBIS")); } static int glk_emmc_probe_slot(struct sdhci_pci_slot *slot) diff --git a/drivers/net/dsa/microchip/ksz8795.c b/drivers/net/dsa/microchip/ksz8795.c index 8f1d15ea15d9..f5779e152377 100644 --- a/drivers/net/dsa/microchip/ksz8795.c +++ b/drivers/net/dsa/microchip/ksz8795.c @@ -932,11 +932,19 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz_port_cfg(dev, port, P_PRIO_CTRL, PORT_802_1P_ENABLE, true); if (cpu_port) { + if (!p->interface && dev->compat_interface) { + dev_warn(dev->dev, + "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " + "Please update your device tree.\n", + port); + p->interface = dev->compat_interface; + } + /* Configure MII interface for proper network communication. */ ksz_read8(dev, REG_PORT_5_CTRL_6, &data8); data8 &= ~PORT_INTERFACE_TYPE; data8 &= ~PORT_GMII_1GPS_MODE; - switch (dev->interface) { + switch (p->interface) { case PHY_INTERFACE_MODE_MII: p->phydev.speed = SPEED_100; break; @@ -952,11 +960,11 @@ static void ksz8795_port_setup(struct ksz_device *dev, int port, bool cpu_port) default: data8 &= ~PORT_RGMII_ID_IN_ENABLE; data8 &= ~PORT_RGMII_ID_OUT_ENABLE; - if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || - dev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_RXID) data8 |= PORT_RGMII_ID_IN_ENABLE; - if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || - dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_TXID) data8 |= PORT_RGMII_ID_OUT_ENABLE; data8 |= PORT_GMII_1GPS_MODE; data8 |= PORT_INTERFACE_RGMII; @@ -1252,7 +1260,7 @@ static int ksz8795_switch_init(struct ksz_device *dev) } /* set the real number of ports */ - dev->ds->num_ports = dev->port_cnt; + dev->ds->num_ports = dev->port_cnt + 1; return 0; } diff --git a/drivers/net/dsa/microchip/ksz9477.c b/drivers/net/dsa/microchip/ksz9477.c index 3cb22d149813..2f5506ac7d19 100644 --- a/drivers/net/dsa/microchip/ksz9477.c +++ b/drivers/net/dsa/microchip/ksz9477.c @@ -1208,7 +1208,7 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) /* configure MAC to 1G & RGMII mode */ ksz_pread8(dev, port, REG_PORT_XMII_CTRL_1, &data8); - switch (dev->interface) { + switch (p->interface) { case PHY_INTERFACE_MODE_MII: ksz9477_set_xmii(dev, 0, &data8); ksz9477_set_gbit(dev, false, &data8); @@ -1229,11 +1229,11 @@ static void ksz9477_port_setup(struct ksz_device *dev, int port, bool cpu_port) ksz9477_set_gbit(dev, true, &data8); data8 &= ~PORT_RGMII_ID_IG_ENABLE; data8 &= ~PORT_RGMII_ID_EG_ENABLE; - if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || - dev->interface == PHY_INTERFACE_MODE_RGMII_RXID) + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_RXID) data8 |= PORT_RGMII_ID_IG_ENABLE; - if (dev->interface == PHY_INTERFACE_MODE_RGMII_ID || - dev->interface == PHY_INTERFACE_MODE_RGMII_TXID) + if (p->interface == PHY_INTERFACE_MODE_RGMII_ID || + p->interface == PHY_INTERFACE_MODE_RGMII_TXID) data8 |= PORT_RGMII_ID_EG_ENABLE; p->phydev.speed = SPEED_1000; break; @@ -1269,23 +1269,32 @@ static void ksz9477_config_cpu_port(struct dsa_switch *ds) dev->cpu_port = i; dev->host_mask = (1 << dev->cpu_port); dev->port_mask |= dev->host_mask; + p = &dev->ports[i]; /* Read from XMII register to determine host port * interface. If set specifically in device tree * note the difference to help debugging. */ interface = ksz9477_get_interface(dev, i); - if (!dev->interface) - dev->interface = interface; - if (interface && interface != dev->interface) + if (!p->interface) { + if (dev->compat_interface) { + dev_warn(dev->dev, + "Using legacy switch \"phy-mode\" property, because it is missing on port %d node. " + "Please update your device tree.\n", + i); + p->interface = dev->compat_interface; + } else { + p->interface = interface; + } + } + if (interface && interface != p->interface) dev_info(dev->dev, "use %s instead of %s\n", - phy_modes(dev->interface), + phy_modes(p->interface), phy_modes(interface)); /* enable cpu port */ ksz9477_port_setup(dev, i, true); - p = &dev->ports[dev->cpu_port]; p->vid_member = dev->port_mask; p->on = 1; } diff --git a/drivers/net/dsa/microchip/ksz_common.c b/drivers/net/dsa/microchip/ksz_common.c index 8d53b12d40a8..8e755b50c9c1 100644 --- a/drivers/net/dsa/microchip/ksz_common.c +++ b/drivers/net/dsa/microchip/ksz_common.c @@ -388,6 +388,8 @@ int ksz_switch_register(struct ksz_device *dev, const struct ksz_dev_ops *ops) { phy_interface_t interface; + struct device_node *port; + unsigned int port_num; int ret; if (dev->pdata) @@ -421,10 +423,19 @@ int ksz_switch_register(struct ksz_device *dev, /* Host port interface will be self detected, or specifically set in * device tree. */ + for (port_num = 0; port_num < dev->port_cnt; ++port_num) + dev->ports[port_num].interface = PHY_INTERFACE_MODE_NA; if (dev->dev->of_node) { ret = of_get_phy_mode(dev->dev->of_node, &interface); if (ret == 0) - dev->interface = interface; + dev->compat_interface = interface; + for_each_available_child_of_node(dev->dev->of_node, port) { + if (of_property_read_u32(port, "reg", &port_num)) + continue; + if (port_num >= dev->port_cnt) + return -EINVAL; + of_get_phy_mode(port, &dev->ports[port_num].interface); + } dev->synclko_125 = of_property_read_bool(dev->dev->of_node, "microchip,synclko-125"); } diff --git a/drivers/net/dsa/microchip/ksz_common.h b/drivers/net/dsa/microchip/ksz_common.h index 206838160f49..cf866e48ff66 100644 --- a/drivers/net/dsa/microchip/ksz_common.h +++ b/drivers/net/dsa/microchip/ksz_common.h @@ -39,6 +39,7 @@ struct ksz_port { u32 freeze:1; /* MIB counter freeze is enabled */ struct ksz_port_mib mib; + phy_interface_t interface; }; struct ksz_device { @@ -72,7 +73,7 @@ struct ksz_device { int mib_cnt; int mib_port_cnt; int last_port; /* ports after that not used */ - phy_interface_t interface; + phy_interface_t compat_interface; u32 regs_size; bool phy_errata_9477; bool synclko_125; diff --git a/drivers/net/dsa/ocelot/felix.c b/drivers/net/dsa/ocelot/felix.c index 04bfa6e465ff..01427cd08448 100644 --- a/drivers/net/dsa/ocelot/felix.c +++ b/drivers/net/dsa/ocelot/felix.c @@ -585,7 +585,10 @@ static int felix_setup(struct dsa_switch *ds) if (err) return err; - ocelot_init(ocelot); + err = ocelot_init(ocelot); + if (err) + return err; + if (ocelot->ptp) { err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info); if (err) { @@ -640,10 +643,13 @@ static void felix_teardown(struct dsa_switch *ds) { struct ocelot *ocelot = ds->priv; struct felix *felix = ocelot_to_felix(ocelot); + int port; if (felix->info->mdio_bus_free) felix->info->mdio_bus_free(ocelot); + for (port = 0; port < ocelot->num_phys_ports; port++) + ocelot_deinit_port(ocelot, port); ocelot_deinit_timestamp(ocelot); /* stop workqueue thread */ ocelot_deinit(ocelot); diff --git a/drivers/net/dsa/ocelot/felix_vsc9959.c b/drivers/net/dsa/ocelot/felix_vsc9959.c index 9b720c8ddfc3..6855c94256f8 100644 --- a/drivers/net/dsa/ocelot/felix_vsc9959.c +++ b/drivers/net/dsa/ocelot/felix_vsc9959.c @@ -645,17 +645,17 @@ static struct vcap_field vsc9959_vcap_is2_keys[] = { [VCAP_IS2_HK_DIP_EQ_SIP] = {118, 1}, /* IP4_TCP_UDP (TYPE=100) */ [VCAP_IS2_HK_TCP] = {119, 1}, - [VCAP_IS2_HK_L4_SPORT] = {120, 16}, - [VCAP_IS2_HK_L4_DPORT] = {136, 16}, + [VCAP_IS2_HK_L4_DPORT] = {120, 16}, + [VCAP_IS2_HK_L4_SPORT] = {136, 16}, [VCAP_IS2_HK_L4_RNG] = {152, 8}, [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {160, 1}, [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {161, 1}, - [VCAP_IS2_HK_L4_URG] = {162, 1}, - [VCAP_IS2_HK_L4_ACK] = {163, 1}, - [VCAP_IS2_HK_L4_PSH] = {164, 1}, - [VCAP_IS2_HK_L4_RST] = {165, 1}, - [VCAP_IS2_HK_L4_SYN] = {166, 1}, - [VCAP_IS2_HK_L4_FIN] = {167, 1}, + [VCAP_IS2_HK_L4_FIN] = {162, 1}, + [VCAP_IS2_HK_L4_SYN] = {163, 1}, + [VCAP_IS2_HK_L4_RST] = {164, 1}, + [VCAP_IS2_HK_L4_PSH] = {165, 1}, + [VCAP_IS2_HK_L4_ACK] = {166, 1}, + [VCAP_IS2_HK_L4_URG] = {167, 1}, [VCAP_IS2_HK_L4_1588_DOM] = {168, 8}, [VCAP_IS2_HK_L4_1588_VER] = {176, 4}, /* IP4_OTHER (TYPE=101) */ diff --git a/drivers/net/dsa/ocelot/seville_vsc9953.c b/drivers/net/dsa/ocelot/seville_vsc9953.c index 625b1891d955..29df0797ecf5 100644 --- a/drivers/net/dsa/ocelot/seville_vsc9953.c +++ b/drivers/net/dsa/ocelot/seville_vsc9953.c @@ -659,17 +659,17 @@ static struct vcap_field vsc9953_vcap_is2_keys[] = { [VCAP_IS2_HK_DIP_EQ_SIP] = {122, 1}, /* IP4_TCP_UDP (TYPE=100) */ [VCAP_IS2_HK_TCP] = {123, 1}, - [VCAP_IS2_HK_L4_SPORT] = {124, 16}, - [VCAP_IS2_HK_L4_DPORT] = {140, 16}, + [VCAP_IS2_HK_L4_DPORT] = {124, 16}, + [VCAP_IS2_HK_L4_SPORT] = {140, 16}, [VCAP_IS2_HK_L4_RNG] = {156, 8}, [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {164, 1}, [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {165, 1}, - [VCAP_IS2_HK_L4_URG] = {166, 1}, - [VCAP_IS2_HK_L4_ACK] = {167, 1}, - [VCAP_IS2_HK_L4_PSH] = {168, 1}, - [VCAP_IS2_HK_L4_RST] = {169, 1}, - [VCAP_IS2_HK_L4_SYN] = {170, 1}, - [VCAP_IS2_HK_L4_FIN] = {171, 1}, + [VCAP_IS2_HK_L4_FIN] = {166, 1}, + [VCAP_IS2_HK_L4_SYN] = {167, 1}, + [VCAP_IS2_HK_L4_RST] = {168, 1}, + [VCAP_IS2_HK_L4_PSH] = {169, 1}, + [VCAP_IS2_HK_L4_ACK] = {170, 1}, + [VCAP_IS2_HK_L4_URG] = {171, 1}, /* IP4_OTHER (TYPE=101) */ [VCAP_IS2_HK_IP4_L3_PROTO] = {123, 8}, [VCAP_IS2_HK_L3_PAYLOAD] = {131, 56}, @@ -1008,7 +1008,7 @@ static const struct felix_info seville_info_vsc9953 = { .vcap_is2_keys = vsc9953_vcap_is2_keys, .vcap_is2_actions = vsc9953_vcap_is2_actions, .vcap = vsc9953_vcap_props, - .shared_queue_sz = 128 * 1024, + .shared_queue_sz = 2048 * 1024, .num_mact_rows = 2048, .num_ports = 10, .mdio_bus_alloc = vsc9953_mdio_bus_alloc, diff --git a/drivers/net/dsa/rtl8366.c b/drivers/net/dsa/rtl8366.c index 8f40fbf70a82..a8c5a934c3d3 100644 --- a/drivers/net/dsa/rtl8366.c +++ b/drivers/net/dsa/rtl8366.c @@ -452,13 +452,19 @@ int rtl8366_vlan_del(struct dsa_switch *ds, int port, return ret; if (vid == vlanmc.vid) { - /* clear VLAN member configurations */ - vlanmc.vid = 0; - vlanmc.priority = 0; - vlanmc.member = 0; - vlanmc.untag = 0; - vlanmc.fid = 0; - + /* Remove this port from the VLAN */ + vlanmc.member &= ~BIT(port); + vlanmc.untag &= ~BIT(port); + /* + * If no ports are members of this VLAN + * anymore then clear the whole member + * config so it can be reused. + */ + if (!vlanmc.member && vlanmc.untag) { + vlanmc.vid = 0; + vlanmc.priority = 0; + vlanmc.fid = 0; + } ret = smi->ops->set_vlan_mc(smi, i, &vlanmc); if (ret) { dev_err(smi->dev, diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.c b/drivers/net/ethernet/broadcom/bnxt/bnxt.c index b167066af450..7b7e8b7883c8 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.c @@ -3782,6 +3782,7 @@ static int bnxt_hwrm_func_qstat_ext(struct bnxt *bp, return -EOPNOTSUPP; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_FUNC_QSTATS_EXT, -1, -1); + req.fid = cpu_to_le16(0xffff); req.flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; mutex_lock(&bp->hwrm_cmd_lock); rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); @@ -3852,7 +3853,7 @@ static void bnxt_init_stats(struct bnxt *bp) tx_masks = stats->hw_masks; tx_count = sizeof(struct tx_port_stats_ext) / 8; - flags = FUNC_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; + flags = PORT_QSTATS_EXT_REQ_FLAGS_COUNTER_MASK; rc = bnxt_hwrm_port_qstats_ext(bp, flags); if (rc) { mask = (1ULL << 40) - 1; @@ -4305,7 +4306,7 @@ static int bnxt_hwrm_do_send_msg(struct bnxt *bp, void *msg, u32 msg_len, u32 bar_offset = BNXT_GRCPF_REG_CHIMP_COMM; u16 dst = BNXT_HWRM_CHNL_CHIMP; - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + if (BNXT_NO_FW_ACCESS(bp)) return -EBUSY; if (msg_len > BNXT_HWRM_MAX_REQ_LEN) { @@ -5723,7 +5724,7 @@ static int hwrm_ring_free_send_msg(struct bnxt *bp, struct hwrm_ring_free_output *resp = bp->hwrm_cmd_resp_addr; u16 error_code; - if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + if (BNXT_NO_FW_ACCESS(bp)) return 0; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_RING_FREE, cmpl_ring_id, -1); @@ -7817,7 +7818,7 @@ static int bnxt_set_tpa(struct bnxt *bp, bool set_tpa) if (set_tpa) tpa_flags = bp->flags & BNXT_FLAG_TPA; - else if (test_bit(BNXT_STATE_FW_FATAL_COND, &bp->state)) + else if (BNXT_NO_FW_ACCESS(bp)) return 0; for (i = 0; i < bp->nr_vnics; i++) { rc = bnxt_hwrm_vnic_set_tpa(bp, i, tpa_flags); @@ -9311,18 +9312,16 @@ static ssize_t bnxt_show_temp(struct device *dev, struct hwrm_temp_monitor_query_output *resp; struct bnxt *bp = dev_get_drvdata(dev); u32 len = 0; + int rc; resp = bp->hwrm_cmd_resp_addr; bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); mutex_lock(&bp->hwrm_cmd_lock); - if (!_hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT)) + rc = _hwrm_send_message(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (!rc) len = sprintf(buf, "%u\n", resp->temp * 1000); /* display millidegree */ mutex_unlock(&bp->hwrm_cmd_lock); - - if (len) - return len; - - return sprintf(buf, "unknown\n"); + return rc ?: len; } static SENSOR_DEVICE_ATTR(temp1_input, 0444, bnxt_show_temp, NULL, 0); @@ -9342,7 +9341,16 @@ static void bnxt_hwmon_close(struct bnxt *bp) static void bnxt_hwmon_open(struct bnxt *bp) { + struct hwrm_temp_monitor_query_input req = {0}; struct pci_dev *pdev = bp->pdev; + int rc; + + bnxt_hwrm_cmd_hdr_init(bp, &req, HWRM_TEMP_MONITOR_QUERY, -1, -1); + rc = hwrm_send_message_silent(bp, &req, sizeof(req), HWRM_CMD_TIMEOUT); + if (rc == -EACCES || rc == -EOPNOTSUPP) { + bnxt_hwmon_close(bp); + return; + } if (bp->hwmon_dev) return; @@ -11779,6 +11787,10 @@ static void bnxt_remove_one(struct pci_dev *pdev) if (BNXT_PF(bp)) bnxt_sriov_disable(bp); + clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); + bnxt_cancel_sp_work(bp); + bp->sp_event = 0; + bnxt_dl_fw_reporters_destroy(bp, true); if (BNXT_PF(bp)) devlink_port_type_clear(&bp->dl_port); @@ -11786,9 +11798,6 @@ static void bnxt_remove_one(struct pci_dev *pdev) unregister_netdev(dev); bnxt_dl_unregister(bp); bnxt_shutdown_tc(bp); - clear_bit(BNXT_STATE_IN_FW_RESET, &bp->state); - bnxt_cancel_sp_work(bp); - bp->sp_event = 0; bnxt_clear_int_mode(bp); bnxt_hwrm_func_drv_unrgtr(bp); @@ -12089,7 +12098,7 @@ static int bnxt_init_mac_addr(struct bnxt *bp) static void bnxt_vpd_read_info(struct bnxt *bp) { struct pci_dev *pdev = bp->pdev; - int i, len, pos, ro_size; + int i, len, pos, ro_size, size; ssize_t vpd_size; u8 *vpd_data; @@ -12124,7 +12133,8 @@ static void bnxt_vpd_read_info(struct bnxt *bp) if (len + pos > vpd_size) goto read_sn; - strlcpy(bp->board_partno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); + size = min(len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_partno, &vpd_data[pos], size); read_sn: pos = pci_vpd_find_info_keyword(vpd_data, i, ro_size, @@ -12137,7 +12147,8 @@ read_sn: if (len + pos > vpd_size) goto exit; - strlcpy(bp->board_serialno, &vpd_data[pos], min(len, BNXT_VPD_FLD_LEN)); + size = min(len, BNXT_VPD_FLD_LEN - 1); + memcpy(bp->board_serialno, &vpd_data[pos], size); exit: kfree(vpd_data); } diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt.h b/drivers/net/ethernet/broadcom/bnxt/bnxt.h index 5a13eb66beda..0ef89dabfd61 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt.h +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt.h @@ -1737,6 +1737,10 @@ struct bnxt { #define BNXT_STATE_FW_FATAL_COND 6 #define BNXT_STATE_DRV_REGISTERED 7 +#define BNXT_NO_FW_ACCESS(bp) \ + (test_bit(BNXT_STATE_FW_FATAL_COND, &(bp)->state) || \ + pci_channel_offline((bp)->pdev)) + struct bnxt_irq *irq_tbl; int total_irqs; u8 mac_addr[ETH_ALEN]; diff --git a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c index d0928334bdc8..fecdfd875af1 100644 --- a/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c +++ b/drivers/net/ethernet/broadcom/bnxt/bnxt_ethtool.c @@ -1322,6 +1322,9 @@ static int bnxt_get_regs_len(struct net_device *dev) struct bnxt *bp = netdev_priv(dev); int reg_len; + if (!BNXT_PF(bp)) + return -EOPNOTSUPP; + reg_len = BNXT_PXP_REG_LEN; if (bp->fw_cap & BNXT_FW_CAP_PCIE_STATS_SUPPORTED) @@ -1788,9 +1791,12 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (!BNXT_PHY_CFG_ABLE(bp)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); if (epause->autoneg) { - if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) - return -EINVAL; + if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { + rc = -EINVAL; + goto pause_exit; + } link_info->autoneg |= BNXT_AUTONEG_FLOW_CTRL; if (bp->hwrm_spec_code >= 0x10201) @@ -1811,11 +1817,11 @@ static int bnxt_set_pauseparam(struct net_device *dev, if (epause->tx_pause) link_info->req_flow_ctrl |= BNXT_LINK_PAUSE_TX; - if (netif_running(dev)) { - mutex_lock(&bp->link_lock); + if (netif_running(dev)) rc = bnxt_hwrm_set_pause(bp); - mutex_unlock(&bp->link_lock); - } + +pause_exit: + mutex_unlock(&bp->link_lock); return rc; } @@ -2552,8 +2558,7 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) struct bnxt *bp = netdev_priv(dev); struct ethtool_eee *eee = &bp->eee; struct bnxt_link_info *link_info = &bp->link_info; - u32 advertising = - _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); + u32 advertising; int rc = 0; if (!BNXT_PHY_CFG_ABLE(bp)) @@ -2562,19 +2567,23 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) if (!(bp->flags & BNXT_FLAG_EEE_CAP)) return -EOPNOTSUPP; + mutex_lock(&bp->link_lock); + advertising = _bnxt_fw_to_ethtool_adv_spds(link_info->advertising, 0); if (!edata->eee_enabled) goto eee_ok; if (!(link_info->autoneg & BNXT_AUTONEG_SPEED)) { netdev_warn(dev, "EEE requires autoneg\n"); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } if (edata->tx_lpi_enabled) { if (bp->lpi_tmr_hi && (edata->tx_lpi_timer > bp->lpi_tmr_hi || edata->tx_lpi_timer < bp->lpi_tmr_lo)) { netdev_warn(dev, "Valid LPI timer range is %d and %d microsecs\n", bp->lpi_tmr_lo, bp->lpi_tmr_hi); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } else if (!bp->lpi_tmr_hi) { edata->tx_lpi_timer = eee->tx_lpi_timer; } @@ -2584,7 +2593,8 @@ static int bnxt_set_eee(struct net_device *dev, struct ethtool_eee *edata) } else if (edata->advertised & ~advertising) { netdev_warn(dev, "EEE advertised %x must be a subset of autoneg advertised speeds %x\n", edata->advertised, advertising); - return -EINVAL; + rc = -EINVAL; + goto eee_exit; } eee->advertised = edata->advertised; @@ -2596,6 +2606,8 @@ eee_ok: if (netif_running(dev)) rc = bnxt_hwrm_set_link_setting(bp, false, true); +eee_exit: + mutex_unlock(&bp->link_lock); return rc; } diff --git a/drivers/net/ethernet/cadence/macb_main.c b/drivers/net/ethernet/cadence/macb_main.c index 6761f404b8aa..9179f7b0b900 100644 --- a/drivers/net/ethernet/cadence/macb_main.c +++ b/drivers/net/ethernet/cadence/macb_main.c @@ -647,8 +647,7 @@ static void macb_mac_link_up(struct phylink_config *config, ctrl |= GEM_BIT(GBE); } - /* We do not support MLO_PAUSE_RX yet */ - if (tx_pause) + if (rx_pause) ctrl |= MACB_BIT(PAE); macb_set_tx_clk(bp->tx_clk, speed, ndev); diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c index 650db92cb11c..481498585ead 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_filter.c @@ -1911,13 +1911,16 @@ out: static int configure_filter_tcb(struct adapter *adap, unsigned int tid, struct filter_entry *f) { - if (f->fs.hitcnts) + if (f->fs.hitcnts) { set_tcb_field(adap, f, tid, TCB_TIMESTAMP_W, - TCB_TIMESTAMP_V(TCB_TIMESTAMP_M) | + TCB_TIMESTAMP_V(TCB_TIMESTAMP_M), + TCB_TIMESTAMP_V(0ULL), + 1); + set_tcb_field(adap, f, tid, TCB_RTT_TS_RECENT_AGE_W, TCB_RTT_TS_RECENT_AGE_V(TCB_RTT_TS_RECENT_AGE_M), - TCB_TIMESTAMP_V(0ULL) | TCB_RTT_TS_RECENT_AGE_V(0ULL), 1); + } if (f->fs.newdmac) set_tcb_tflag(adap, f, tid, TF_CCTRL_ECE_S, 1, diff --git a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c index b1a073eea60b..a020e8490681 100644 --- a/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c +++ b/drivers/net/ethernet/chelsio/cxgb4/cxgb4_mps.c @@ -229,7 +229,7 @@ void cxgb4_free_mps_ref_entries(struct adapter *adap) { struct mps_entries_ref *mps_entry, *tmp; - if (!list_empty(&adap->mps_ref)) + if (list_empty(&adap->mps_ref)) return; spin_lock(&adap->mps_ref_lock); diff --git a/drivers/net/ethernet/dec/tulip/de2104x.c b/drivers/net/ethernet/dec/tulip/de2104x.c index cb116b530f5e..2610efe4f873 100644 --- a/drivers/net/ethernet/dec/tulip/de2104x.c +++ b/drivers/net/ethernet/dec/tulip/de2104x.c @@ -85,7 +85,7 @@ MODULE_PARM_DESC (rx_copybreak, "de2104x Breakpoint at which Rx packets are copi #define DSL CONFIG_DE2104X_DSL #endif -#define DE_RX_RING_SIZE 64 +#define DE_RX_RING_SIZE 128 #define DE_TX_RING_SIZE 64 #define DE_RING_BYTES \ ((sizeof(struct de_desc) * DE_RX_RING_SIZE) + \ diff --git a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h index 3ea51dd9374b..a24b20f76938 100644 --- a/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h +++ b/drivers/net/ethernet/freescale/dpaa2/dpmac-cmd.h @@ -66,8 +66,8 @@ struct dpmac_cmd_get_counter { }; struct dpmac_rsp_get_counter { - u64 pad; - u64 counter; + __le64 pad; + __le64 counter; }; #endif /* _FSL_DPMAC_CMD_H */ diff --git a/drivers/net/ethernet/freescale/enetc/enetc_pf.c b/drivers/net/ethernet/freescale/enetc/enetc_pf.c index 26d5981b798f..177334f0adb1 100644 --- a/drivers/net/ethernet/freescale/enetc/enetc_pf.c +++ b/drivers/net/ethernet/freescale/enetc/enetc_pf.c @@ -1053,7 +1053,6 @@ static int enetc_pf_probe(struct pci_dev *pdev, err_reg_netdev: enetc_teardown_serdes(priv); - enetc_mdio_remove(pf); enetc_free_msix(priv); err_alloc_msix: enetc_free_si_resources(priv); @@ -1061,6 +1060,7 @@ err_alloc_si_res: si->ndev = NULL; free_netdev(ndev); err_alloc_netdev: + enetc_mdio_remove(pf); enetc_of_put_phy(pf); err_map_pf_space: enetc_pci_remove(pdev); diff --git a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c index ed3829ae4ef1..a769273b36f7 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_dsaf_misc.c @@ -334,7 +334,7 @@ static void hns_dsaf_xge_srst_by_port_acpi(struct dsaf_device *dsaf_dev, * bit6-11 for ppe0-5 * bit12-17 for roce0-5 * bit18-19 for com/dfx - * @enable: false - request reset , true - drop reset + * @dereset: false - request reset , true - drop reset */ static void hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) @@ -357,7 +357,7 @@ hns_dsaf_srst_chns(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) * bit6-11 for ppe0-5 * bit12-17 for roce0-5 * bit18-19 for com/dfx - * @enable: false - request reset , true - drop reset + * @dereset: false - request reset , true - drop reset */ static void hns_dsaf_srst_chns_acpi(struct dsaf_device *dsaf_dev, u32 msk, bool dereset) diff --git a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c index 4eb50296f653..14e60c9e491d 100644 --- a/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c +++ b/drivers/net/ethernet/hisilicon/hns/hns_ethtool.c @@ -463,8 +463,8 @@ static int __lb_clean_rings(struct hns_nic_priv *priv, /** * nic_run_loopback_test - run loopback test - * @nic_dev: net device - * @loopback_type: loopback type + * @ndev: net device + * @loop_mode: loopback mode */ static int __lb_run_test(struct net_device *ndev, enum hnae_loop loop_mode) @@ -572,7 +572,7 @@ static int __lb_down(struct net_device *ndev, enum hnae_loop loop) /** * hns_nic_self_test - self test - * @dev: net device + * @ndev: net device * @eth_test: test cmd * @data: test result */ @@ -633,7 +633,7 @@ static void hns_nic_self_test(struct net_device *ndev, /** * hns_nic_get_drvinfo - get net driver info - * @dev: net device + * @net_dev: net device * @drvinfo: driver info */ static void hns_nic_get_drvinfo(struct net_device *net_dev, @@ -658,7 +658,7 @@ static void hns_nic_get_drvinfo(struct net_device *net_dev, /** * hns_get_ringparam - get ring parameter - * @dev: net device + * @net_dev: net device * @param: ethtool parameter */ static void hns_get_ringparam(struct net_device *net_dev, @@ -683,7 +683,7 @@ static void hns_get_ringparam(struct net_device *net_dev, /** * hns_get_pauseparam - get pause parameter - * @dev: net device + * @net_dev: net device * @param: pause parameter */ static void hns_get_pauseparam(struct net_device *net_dev, @@ -701,7 +701,7 @@ static void hns_get_pauseparam(struct net_device *net_dev, /** * hns_set_pauseparam - set pause parameter - * @dev: net device + * @net_dev: net device * @param: pause parameter * * Return 0 on success, negative on failure @@ -725,7 +725,7 @@ static int hns_set_pauseparam(struct net_device *net_dev, /** * hns_get_coalesce - get coalesce info. - * @dev: net device + * @net_dev: net device * @ec: coalesce info. * * Return 0 on success, negative on failure. @@ -769,7 +769,7 @@ static int hns_get_coalesce(struct net_device *net_dev, /** * hns_set_coalesce - set coalesce info. - * @dev: net device + * @net_dev: net device * @ec: coalesce info. * * Return 0 on success, negative on failure. @@ -808,7 +808,7 @@ static int hns_set_coalesce(struct net_device *net_dev, /** * hns_get_channels - get channel info. - * @dev: net device + * @net_dev: net device * @ch: channel info. */ static void @@ -825,7 +825,7 @@ hns_get_channels(struct net_device *net_dev, struct ethtool_channels *ch) /** * get_ethtool_stats - get detail statistics. - * @dev: net device + * @netdev: net device * @stats: statistics info. * @data: statistics data. */ @@ -883,8 +883,8 @@ static void hns_get_ethtool_stats(struct net_device *netdev, /** * get_strings: Return a set of strings that describe the requested objects - * @dev: net device - * @stats: string set ID. + * @netdev: net device + * @stringset: string set ID. * @data: objects data. */ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) @@ -972,7 +972,7 @@ static void hns_get_strings(struct net_device *netdev, u32 stringset, u8 *data) /** * nic_get_sset_count - get string set count witch returned by nic_get_strings. - * @dev: net device + * @netdev: net device * @stringset: string set index, 0: self test string; 1: statistics string. * * Return string set count. @@ -1006,7 +1006,7 @@ static int hns_get_sset_count(struct net_device *netdev, int stringset) /** * hns_phy_led_set - set phy LED status. - * @dev: net device + * @netdev: net device * @value: LED state. * * Return 0 on success, negative on failure. @@ -1028,7 +1028,7 @@ static int hns_phy_led_set(struct net_device *netdev, int value) /** * nic_set_phys_id - set phy identify LED. - * @dev: net device + * @netdev: net device * @state: LED state. * * Return 0 on success, negative on failure. @@ -1104,9 +1104,9 @@ hns_set_phys_id(struct net_device *netdev, enum ethtool_phys_id_state state) /** * hns_get_regs - get net device register - * @dev: net device + * @net_dev: net device * @cmd: ethtool cmd - * @date: register data + * @data: register data */ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, void *data) @@ -1126,7 +1126,7 @@ static void hns_get_regs(struct net_device *net_dev, struct ethtool_regs *cmd, /** * nic_get_regs_len - get total register len. - * @dev: net device + * @net_dev: net device * * Return total register len. */ @@ -1151,7 +1151,7 @@ static int hns_get_regs_len(struct net_device *net_dev) /** * hns_nic_nway_reset - nway reset - * @dev: net device + * @netdev: net device * * Return 0 on success, negative on failure */ diff --git a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c index 6bb65ade1d77..c340d9acba80 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_ethtool.c @@ -1654,6 +1654,7 @@ static void hinic_diag_test(struct net_device *netdev, } netif_carrier_off(netdev); + netif_tx_disable(netdev); err = do_lp_test(nic_dev, eth_test->flags, LP_DEFAULT_TIME, &test_index); @@ -1662,9 +1663,12 @@ static void hinic_diag_test(struct net_device *netdev, data[test_index] = 1; } + netif_tx_wake_all_queues(netdev); + err = hinic_port_link_state(nic_dev, &link_state); if (!err && link_state == HINIC_LINK_STATE_UP) netif_carrier_on(netdev); + } static int hinic_set_phys_id(struct net_device *netdev, diff --git a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c index c6ce5966284c..2ebae6cb5db5 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_hw_mgmt.c @@ -47,8 +47,12 @@ #define MGMT_MSG_TIMEOUT 5000 +#define SET_FUNC_PORT_MBOX_TIMEOUT 30000 + #define SET_FUNC_PORT_MGMT_TIMEOUT 25000 +#define UPDATE_FW_MGMT_TIMEOUT 20000 + #define mgmt_to_pfhwdev(pf_mgmt) \ container_of(pf_mgmt, struct hinic_pfhwdev, pf_to_mgmt) @@ -361,16 +365,22 @@ int hinic_msg_to_mgmt(struct hinic_pf_to_mgmt *pf_to_mgmt, return -EINVAL; } - if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) - timeout = SET_FUNC_PORT_MGMT_TIMEOUT; + if (HINIC_IS_VF(hwif)) { + if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) + timeout = SET_FUNC_PORT_MBOX_TIMEOUT; - if (HINIC_IS_VF(hwif)) return hinic_mbox_to_pf(pf_to_mgmt->hwdev, mod, cmd, buf_in, - in_size, buf_out, out_size, 0); - else + in_size, buf_out, out_size, timeout); + } else { + if (cmd == HINIC_PORT_CMD_SET_FUNC_STATE) + timeout = SET_FUNC_PORT_MGMT_TIMEOUT; + else if (cmd == HINIC_PORT_CMD_UPDATE_FW) + timeout = UPDATE_FW_MGMT_TIMEOUT; + return msg_to_mgmt_sync(pf_to_mgmt, mod, cmd, buf_in, in_size, buf_out, out_size, MGMT_DIRECT_SEND, MSG_NOT_RESP, timeout); + } } static void recv_mgmt_msg_work_handler(struct work_struct *work) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_main.c b/drivers/net/ethernet/huawei/hinic/hinic_main.c index 501056fd32ee..28581bd8ce07 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_main.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_main.c @@ -174,6 +174,24 @@ err_init_txq: return err; } +static void enable_txqs_napi(struct hinic_dev *nic_dev) +{ + int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + int i; + + for (i = 0; i < num_txqs; i++) + napi_enable(&nic_dev->txqs[i].napi); +} + +static void disable_txqs_napi(struct hinic_dev *nic_dev) +{ + int num_txqs = hinic_hwdev_num_qps(nic_dev->hwdev); + int i; + + for (i = 0; i < num_txqs; i++) + napi_disable(&nic_dev->txqs[i].napi); +} + /** * free_txqs - Free the Logical Tx Queues of specific NIC device * @nic_dev: the specific NIC device @@ -400,6 +418,8 @@ int hinic_open(struct net_device *netdev) goto err_create_txqs; } + enable_txqs_napi(nic_dev); + err = create_rxqs(nic_dev); if (err) { netif_err(nic_dev, drv, netdev, @@ -484,6 +504,7 @@ err_port_state: } err_create_rxqs: + disable_txqs_napi(nic_dev); free_txqs(nic_dev); err_create_txqs: @@ -497,6 +518,9 @@ int hinic_close(struct net_device *netdev) struct hinic_dev *nic_dev = netdev_priv(netdev); unsigned int flags; + /* Disable txq napi firstly to aviod rewaking txq in free_tx_poll */ + disable_txqs_napi(nic_dev); + down(&nic_dev->mgmt_lock); flags = nic_dev->flags; diff --git a/drivers/net/ethernet/huawei/hinic/hinic_rx.c b/drivers/net/ethernet/huawei/hinic/hinic_rx.c index 5bee951fe9d4..d0072f5e7efc 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_rx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_rx.c @@ -543,18 +543,25 @@ static int rx_request_irq(struct hinic_rxq *rxq) if (err) { netif_err(nic_dev, drv, rxq->netdev, "Failed to set RX interrupt coalescing attribute\n"); - rx_del_napi(rxq); - return err; + goto err_req_irq; } err = request_irq(rq->irq, rx_irq, 0, rxq->irq_name, rxq); - if (err) { - rx_del_napi(rxq); - return err; - } + if (err) + goto err_req_irq; cpumask_set_cpu(qp->q_id % num_online_cpus(), &rq->affinity_mask); - return irq_set_affinity_hint(rq->irq, &rq->affinity_mask); + err = irq_set_affinity_hint(rq->irq, &rq->affinity_mask); + if (err) + goto err_irq_affinity; + + return 0; + +err_irq_affinity: + free_irq(rq->irq, rxq); +err_req_irq: + rx_del_napi(rxq); + return err; } static void rx_free_irq(struct hinic_rxq *rxq) diff --git a/drivers/net/ethernet/huawei/hinic/hinic_tx.c b/drivers/net/ethernet/huawei/hinic/hinic_tx.c index a97498ee6914..c1f81e9144a1 100644 --- a/drivers/net/ethernet/huawei/hinic/hinic_tx.c +++ b/drivers/net/ethernet/huawei/hinic/hinic_tx.c @@ -717,8 +717,8 @@ static int free_tx_poll(struct napi_struct *napi, int budget) netdev_txq = netdev_get_tx_queue(txq->netdev, qp->q_id); __netif_tx_lock(netdev_txq, smp_processor_id()); - - netif_wake_subqueue(nic_dev->netdev, qp->q_id); + if (!netif_testing(nic_dev->netdev)) + netif_wake_subqueue(nic_dev->netdev, qp->q_id); __netif_tx_unlock(netdev_txq); @@ -745,18 +745,6 @@ static int free_tx_poll(struct napi_struct *napi, int budget) return budget; } -static void tx_napi_add(struct hinic_txq *txq, int weight) -{ - netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, weight); - napi_enable(&txq->napi); -} - -static void tx_napi_del(struct hinic_txq *txq) -{ - napi_disable(&txq->napi); - netif_napi_del(&txq->napi); -} - static irqreturn_t tx_irq(int irq, void *data) { struct hinic_txq *txq = data; @@ -790,7 +778,7 @@ static int tx_request_irq(struct hinic_txq *txq) qp = container_of(sq, struct hinic_qp, sq); - tx_napi_add(txq, nic_dev->tx_weight); + netif_napi_add(txq->netdev, &txq->napi, free_tx_poll, nic_dev->tx_weight); hinic_hwdev_msix_set(nic_dev->hwdev, sq->msix_entry, TX_IRQ_NO_PENDING, TX_IRQ_NO_COALESC, @@ -807,14 +795,14 @@ static int tx_request_irq(struct hinic_txq *txq) if (err) { netif_err(nic_dev, drv, txq->netdev, "Failed to set TX interrupt coalescing attribute\n"); - tx_napi_del(txq); + netif_napi_del(&txq->napi); return err; } err = request_irq(sq->irq, tx_irq, 0, txq->irq_name, txq); if (err) { dev_err(&pdev->dev, "Failed to request Tx irq\n"); - tx_napi_del(txq); + netif_napi_del(&txq->napi); return err; } @@ -826,7 +814,7 @@ static void tx_free_irq(struct hinic_txq *txq) struct hinic_sq *sq = txq->sq; free_irq(sq->irq, txq); - tx_napi_del(txq); + netif_napi_del(&txq->napi); } /** diff --git a/drivers/net/ethernet/ibm/ibmvnic.c b/drivers/net/ethernet/ibm/ibmvnic.c index d3a774331afc..1b702a43a5d0 100644 --- a/drivers/net/ethernet/ibm/ibmvnic.c +++ b/drivers/net/ethernet/ibm/ibmvnic.c @@ -2032,16 +2032,18 @@ static int do_reset(struct ibmvnic_adapter *adapter, } else { rc = reset_tx_pools(adapter); - if (rc) + if (rc) { netdev_dbg(adapter->netdev, "reset tx pools failed (%d)\n", rc); goto out; + } rc = reset_rx_pools(adapter); - if (rc) + if (rc) { netdev_dbg(adapter->netdev, "reset rx pools failed (%d)\n", rc); goto out; + } } ibmvnic_disable_irqs(adapter); } diff --git a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c index 8e133d6545bd..47bfb2e95e2d 100644 --- a/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c +++ b/drivers/net/ethernet/intel/i40e/i40e_virtchnl_pf.c @@ -1115,7 +1115,7 @@ static int i40e_quiesce_vf_pci(struct i40e_vf *vf) static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) { struct i40e_mac_filter *f; - int num_vlans = 0, bkt; + u16 num_vlans = 0, bkt; hash_for_each(vsi->mac_filter_hash, bkt, f, hlist) { if (f->vlan >= 0 && f->vlan <= I40E_MAX_VLANID) @@ -1134,8 +1134,8 @@ static int i40e_getnum_vf_vsi_vlan_filters(struct i40e_vsi *vsi) * * Called to get number of VLANs and VLAN list present in mac_filter_hash. **/ -static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, int *num_vlans, - s16 **vlan_list) +static void i40e_get_vlan_list_sync(struct i40e_vsi *vsi, u16 *num_vlans, + s16 **vlan_list) { struct i40e_mac_filter *f; int i = 0; @@ -1169,11 +1169,11 @@ err: **/ static i40e_status i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, - bool unicast_enable, s16 *vl, int num_vlans) + bool unicast_enable, s16 *vl, u16 num_vlans) { + i40e_status aq_ret, aq_tmp = 0; struct i40e_pf *pf = vf->pf; struct i40e_hw *hw = &pf->hw; - i40e_status aq_ret; int i; /* No VLAN to set promisc on, set on VSI */ @@ -1222,6 +1222,9 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, vf->vf_id, i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); + + if (!aq_tmp) + aq_tmp = aq_ret; } aq_ret = i40e_aq_set_vsi_uc_promisc_on_vlan(hw, seid, @@ -1235,8 +1238,15 @@ i40e_set_vsi_promisc(struct i40e_vf *vf, u16 seid, bool multi_enable, vf->vf_id, i40e_stat_str(&pf->hw, aq_ret), i40e_aq_str(&pf->hw, aq_err)); + + if (!aq_tmp) + aq_tmp = aq_ret; } } + + if (aq_tmp) + aq_ret = aq_tmp; + return aq_ret; } @@ -1258,7 +1268,7 @@ static i40e_status i40e_config_vf_promiscuous_mode(struct i40e_vf *vf, i40e_status aq_ret = I40E_SUCCESS; struct i40e_pf *pf = vf->pf; struct i40e_vsi *vsi; - int num_vlans; + u16 num_vlans; s16 *vl; vsi = i40e_find_vsi_from_id(pf, vsi_id); diff --git a/drivers/net/ethernet/intel/igc/igc.h b/drivers/net/ethernet/intel/igc/igc.h index 3070dfdb7eb4..2d566f3c827b 100644 --- a/drivers/net/ethernet/intel/igc/igc.h +++ b/drivers/net/ethernet/intel/igc/igc.h @@ -299,18 +299,14 @@ extern char igc_driver_name[]; #define IGC_RX_HDR_LEN IGC_RXBUFFER_256 /* Transmit and receive latency (for PTP timestamps) */ -/* FIXME: These values were estimated using the ones that i225 has as - * basis, they seem to provide good numbers with ptp4l/phc2sys, but we - * need to confirm them. - */ -#define IGC_I225_TX_LATENCY_10 9542 -#define IGC_I225_TX_LATENCY_100 1024 -#define IGC_I225_TX_LATENCY_1000 178 -#define IGC_I225_TX_LATENCY_2500 64 -#define IGC_I225_RX_LATENCY_10 20662 -#define IGC_I225_RX_LATENCY_100 2213 -#define IGC_I225_RX_LATENCY_1000 448 -#define IGC_I225_RX_LATENCY_2500 160 +#define IGC_I225_TX_LATENCY_10 240 +#define IGC_I225_TX_LATENCY_100 58 +#define IGC_I225_TX_LATENCY_1000 80 +#define IGC_I225_TX_LATENCY_2500 1325 +#define IGC_I225_RX_LATENCY_10 6450 +#define IGC_I225_RX_LATENCY_100 185 +#define IGC_I225_RX_LATENCY_1000 300 +#define IGC_I225_RX_LATENCY_2500 1485 /* RX and TX descriptor control thresholds. * PTHRESH - MAC will consider prefetch if it has fewer than this number of diff --git a/drivers/net/ethernet/intel/igc/igc_ptp.c b/drivers/net/ethernet/intel/igc/igc_ptp.c index 36c999250fcc..6a9b5102aa55 100644 --- a/drivers/net/ethernet/intel/igc/igc_ptp.c +++ b/drivers/net/ethernet/intel/igc/igc_ptp.c @@ -364,6 +364,7 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) struct sk_buff *skb = adapter->ptp_tx_skb; struct skb_shared_hwtstamps shhwtstamps; struct igc_hw *hw = &adapter->hw; + int adjust = 0; u64 regval; if (WARN_ON_ONCE(!skb)) @@ -373,6 +374,24 @@ static void igc_ptp_tx_hwtstamp(struct igc_adapter *adapter) regval |= (u64)rd32(IGC_TXSTMPH) << 32; igc_ptp_systim_to_hwtstamp(adapter, &shhwtstamps, regval); + switch (adapter->link_speed) { + case SPEED_10: + adjust = IGC_I225_TX_LATENCY_10; + break; + case SPEED_100: + adjust = IGC_I225_TX_LATENCY_100; + break; + case SPEED_1000: + adjust = IGC_I225_TX_LATENCY_1000; + break; + case SPEED_2500: + adjust = IGC_I225_TX_LATENCY_2500; + break; + } + + shhwtstamps.hwtstamp = + ktime_add_ns(shhwtstamps.hwtstamp, adjust); + /* Clear the lock early before calling skb_tstamp_tx so that * applications are not woken up before the lock bit is clear. We use * a copy of the skb pointer to ensure other threads can't change it diff --git a/drivers/net/ethernet/lantiq_xrx200.c b/drivers/net/ethernet/lantiq_xrx200.c index 1645e4e7ebdb..635ff3a5dcfb 100644 --- a/drivers/net/ethernet/lantiq_xrx200.c +++ b/drivers/net/ethernet/lantiq_xrx200.c @@ -230,8 +230,8 @@ static int xrx200_poll_rx(struct napi_struct *napi, int budget) } if (rx < budget) { - napi_complete(&ch->napi); - ltq_dma_enable_irq(&ch->dma); + if (napi_complete_done(&ch->napi, rx)) + ltq_dma_enable_irq(&ch->dma); } return rx; @@ -268,9 +268,12 @@ static int xrx200_tx_housekeeping(struct napi_struct *napi, int budget) net_dev->stats.tx_bytes += bytes; netdev_completed_queue(ch->priv->net_dev, pkts, bytes); + if (netif_queue_stopped(net_dev)) + netif_wake_queue(net_dev); + if (pkts < budget) { - napi_complete(&ch->napi); - ltq_dma_enable_irq(&ch->dma); + if (napi_complete_done(&ch->napi, pkts)) + ltq_dma_enable_irq(&ch->dma); } return pkts; @@ -342,10 +345,12 @@ static irqreturn_t xrx200_dma_irq(int irq, void *ptr) { struct xrx200_chan *ch = ptr; - ltq_dma_disable_irq(&ch->dma); - ltq_dma_ack_irq(&ch->dma); + if (napi_schedule_prep(&ch->napi)) { + __napi_schedule(&ch->napi); + ltq_dma_disable_irq(&ch->dma); + } - napi_schedule(&ch->napi); + ltq_dma_ack_irq(&ch->dma); return IRQ_HANDLED; } @@ -499,7 +504,7 @@ static int xrx200_probe(struct platform_device *pdev) /* setup NAPI */ netif_napi_add(net_dev, &priv->chan_rx.napi, xrx200_poll_rx, 32); - netif_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); + netif_tx_napi_add(net_dev, &priv->chan_tx.napi, xrx200_tx_housekeeping, 32); platform_set_drvdata(pdev, priv); diff --git a/drivers/net/ethernet/marvell/mvneta.c b/drivers/net/ethernet/marvell/mvneta.c index dfcb1767acbb..c4345e3d616f 100644 --- a/drivers/net/ethernet/marvell/mvneta.c +++ b/drivers/net/ethernet/marvell/mvneta.c @@ -2029,11 +2029,11 @@ mvneta_xdp_put_buff(struct mvneta_port *pp, struct mvneta_rx_queue *rxq, struct skb_shared_info *sinfo = xdp_get_shared_info_from_buff(xdp); int i; - page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), - sync_len, napi); for (i = 0; i < sinfo->nr_frags; i++) page_pool_put_full_page(rxq->page_pool, skb_frag_page(&sinfo->frags[i]), napi); + page_pool_put_page(rxq->page_pool, virt_to_head_page(xdp->data), + sync_len, napi); } static int @@ -2383,8 +2383,12 @@ static int mvneta_rx_swbm(struct napi_struct *napi, mvneta_swbm_rx_frame(pp, rx_desc, rxq, &xdp_buf, &size, page, &ps); } else { - if (unlikely(!xdp_buf.data_hard_start)) + if (unlikely(!xdp_buf.data_hard_start)) { + rx_desc->buf_phys_addr = 0; + page_pool_put_full_page(rxq->page_pool, page, + true); continue; + } mvneta_swbm_add_rx_fragment(pp, rx_desc, rxq, &xdp_buf, &size, page); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en.h b/drivers/net/ethernet/mellanox/mlx5/core/en.h index 0cc2080fd847..90d5caabd6af 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en.h @@ -600,7 +600,7 @@ struct mlx5e_rq { struct dim dim; /* Dynamic Interrupt Moderation */ /* XDP */ - struct bpf_prog *xdp_prog; + struct bpf_prog __rcu *xdp_prog; struct mlx5e_xdpsq *xdpsq; DECLARE_BITMAP(flags, 8); struct page_pool *page_pool; @@ -1005,7 +1005,6 @@ int mlx5e_update_nic_rx(struct mlx5e_priv *priv); void mlx5e_update_carrier(struct mlx5e_priv *priv); int mlx5e_close(struct net_device *netdev); int mlx5e_open(struct net_device *netdev); -void mlx5e_update_ndo_stats(struct mlx5e_priv *priv); void mlx5e_queue_update_stats(struct mlx5e_priv *priv); int mlx5e_bits_invert(unsigned long a, int size); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c index 8fe8b4d6ad1c..254c84739046 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/monitor_stats.c @@ -51,7 +51,7 @@ static void mlx5e_monitor_counters_work(struct work_struct *work) monitor_counters_work); mutex_lock(&priv->state_lock); - mlx5e_update_ndo_stats(priv); + mlx5e_stats_update_ndo_stats(priv); mutex_unlock(&priv->state_lock); mlx5e_monitor_counter_arm(priv); } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c index 5de1cb9f5330..96608dbb9314 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/port.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/port.c @@ -490,11 +490,8 @@ bool mlx5e_fec_in_caps(struct mlx5_core_dev *dev, int fec_policy) int err; int i; - if (!MLX5_CAP_GEN(dev, pcam_reg)) - return -EOPNOTSUPP; - - if (!MLX5_CAP_PCAM_REG(dev, pplm)) - return -EOPNOTSUPP; + if (!MLX5_CAP_GEN(dev, pcam_reg) || !MLX5_CAP_PCAM_REG(dev, pplm)) + return false; MLX5_SET(pplm_reg, in, local_port, 1); err = mlx5_core_access_reg(dev, in, sz, out, sz, MLX5_REG_PPLM, 0, 0); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c index c6bc9224c3b1..bc5f72ec3623 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.c @@ -699,6 +699,7 @@ mlx5_tc_ct_entry_add_rule(struct mlx5_tc_ct_priv *ct_priv, err_rule: mlx5e_mod_hdr_detach(ct_priv->esw->dev, &esw->offloads.mod_hdr, zone_rule->mh); + mapping_remove(ct_priv->labels_mapping, attr->ct_attr.ct_labels_id); err_mod_hdr: kfree(spec); return err; @@ -958,12 +959,22 @@ mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, return 0; } +void mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) +{ + struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); + + if (!ct_priv || !ct_attr->ct_labels_id) + return; + + mapping_remove(ct_priv->labels_mapping, ct_attr->ct_labels_id); +} + int -mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - struct flow_cls_offload *f, - struct mlx5_ct_attr *ct_attr, - struct netlink_ext_ack *extack) +mlx5_tc_ct_match_add(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, + struct netlink_ext_ack *extack) { struct mlx5_tc_ct_priv *ct_priv = mlx5_tc_ct_get_ct_priv(priv); struct flow_rule *rule = flow_cls_offload_flow_rule(f); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h index 3baef917a677..708c216325d3 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/tc_ct.h @@ -87,12 +87,15 @@ mlx5_tc_ct_init(struct mlx5_rep_uplink_priv *uplink_priv); void mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv); +void +mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr); + int -mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - struct flow_cls_offload *f, - struct mlx5_ct_attr *ct_attr, - struct netlink_ext_ack *extack); +mlx5_tc_ct_match_add(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, + struct netlink_ext_ack *extack); int mlx5_tc_ct_add_no_trk_match(struct mlx5e_priv *priv, struct mlx5_flow_spec *spec); @@ -130,12 +133,15 @@ mlx5_tc_ct_clean(struct mlx5_rep_uplink_priv *uplink_priv) { } +static inline void +mlx5_tc_ct_match_del(struct mlx5e_priv *priv, struct mlx5_ct_attr *ct_attr) {} + static inline int -mlx5_tc_ct_parse_match(struct mlx5e_priv *priv, - struct mlx5_flow_spec *spec, - struct flow_cls_offload *f, - struct mlx5_ct_attr *ct_attr, - struct netlink_ext_ack *extack) +mlx5_tc_ct_match_add(struct mlx5e_priv *priv, + struct mlx5_flow_spec *spec, + struct flow_cls_offload *f, + struct mlx5_ct_attr *ct_attr, + struct netlink_ext_ack *extack) { struct flow_rule *rule = flow_cls_offload_flow_rule(f); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h index 9334c9c3e208..24336c60123a 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/txrx.h @@ -20,6 +20,11 @@ enum mlx5e_icosq_wqe_type { }; /* General */ +static inline bool mlx5e_skb_is_multicast(struct sk_buff *skb) +{ + return skb->pkt_type == PACKET_MULTICAST || skb->pkt_type == PACKET_BROADCAST; +} + void mlx5e_trigger_irq(struct mlx5e_icosq *sq); void mlx5e_completion_event(struct mlx5_core_cq *mcq, struct mlx5_eqe *eqe); void mlx5e_cq_error_event(struct mlx5_core_cq *mcq, enum mlx5_event event); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c index 0e6946fc121f..b28df21981a1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xdp.c @@ -122,7 +122,7 @@ mlx5e_xmit_xdp_buff(struct mlx5e_xdpsq *sq, struct mlx5e_rq *rq, bool mlx5e_xdp_handle(struct mlx5e_rq *rq, struct mlx5e_dma_info *di, u32 *len, struct xdp_buff *xdp) { - struct bpf_prog *prog = READ_ONCE(rq->xdp_prog); + struct bpf_prog *prog = rcu_dereference(rq->xdp_prog); u32 act; int err; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c index a33a1f762c70..40db27bf790b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/rx.c @@ -31,7 +31,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, { struct xdp_buff *xdp = wi->umr.dma_info[page_idx].xsk; u32 cqe_bcnt32 = cqe_bcnt; - bool consumed; /* Check packet size. Note LRO doesn't use linear SKB */ if (unlikely(cqe_bcnt > rq->hw_mtu)) { @@ -51,10 +50,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, xsk_buff_dma_sync_for_cpu(xdp); prefetch(xdp->data); - rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp); - rcu_read_unlock(); - /* Possible flows: * - XDP_REDIRECT to XSKMAP: * The page is owned by the userspace from now. @@ -70,7 +65,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, * allocated first from the Reuse Ring, so it has enough space. */ - if (likely(consumed)) { + if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt32, xdp))) { if (likely(__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags))) __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ return NULL; /* page/packet was consumed by XDP */ @@ -88,7 +83,6 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, u32 cqe_bcnt) { struct xdp_buff *xdp = wi->di->xsk; - bool consumed; /* wi->offset is not used in this function, because xdp->data and the * DMA address point directly to the necessary place. Furthermore, the @@ -107,11 +101,7 @@ struct sk_buff *mlx5e_xsk_skb_from_cqe_linear(struct mlx5e_rq *rq, return NULL; } - rcu_read_lock(); - consumed = mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp); - rcu_read_unlock(); - - if (likely(consumed)) + if (likely(mlx5e_xdp_handle(rq, NULL, &cqe_bcnt, xdp))) return NULL; /* page/packet was consumed by XDP */ /* XDP_PASS: copy the data from the UMEM to a new SKB. The frame reuse diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c index dd9df519d383..55e65a438de7 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en/xsk/setup.c @@ -106,8 +106,7 @@ err_free_cparam: void mlx5e_close_xsk(struct mlx5e_channel *c) { clear_bit(MLX5E_CHANNEL_STATE_XSK, c->state); - napi_synchronize(&c->napi); - synchronize_rcu(); /* Sync with the XSK wakeup. */ + synchronize_rcu(); /* Sync with the XSK wakeup and with NAPI. */ mlx5e_close_rq(&c->xskrq); mlx5e_close_cq(&c->xskrq.cq); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c index acf6d80a6bb7..6bbfcf18107d 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/ktls_rx.c @@ -234,7 +234,7 @@ mlx5e_get_ktls_rx_priv_ctx(struct tls_context *tls_ctx) /* Re-sync */ /* Runs in work context */ -static struct mlx5_wqe_ctrl_seg * +static int resync_post_get_progress_params(struct mlx5e_icosq *sq, struct mlx5e_ktls_offload_context_rx *priv_rx) { @@ -258,15 +258,19 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); if (unlikely(dma_mapping_error(pdev, buf->dma_addr))) { err = -ENOMEM; - goto err_out; + goto err_free; } buf->priv_rx = priv_rx; BUILD_BUG_ON(MLX5E_KTLS_GET_PROGRESS_WQEBBS != 1); + + spin_lock(&sq->channel->async_icosq_lock); + if (unlikely(!mlx5e_wqc_has_room_for(&sq->wq, sq->cc, sq->pc, 1))) { + spin_unlock(&sq->channel->async_icosq_lock); err = -ENOSPC; - goto err_out; + goto err_dma_unmap; } pi = mlx5e_icosq_get_next_pi(sq, 1); @@ -294,12 +298,18 @@ resync_post_get_progress_params(struct mlx5e_icosq *sq, }; icosq_fill_wi(sq, pi, &wi); sq->pc++; + mlx5e_notify_hw(&sq->wq, sq->pc, sq->uar_map, cseg); + spin_unlock(&sq->channel->async_icosq_lock); - return cseg; + return 0; +err_dma_unmap: + dma_unmap_single(pdev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); +err_free: + kfree(buf); err_out: priv_rx->stats->tls_resync_req_skip++; - return ERR_PTR(err); + return err; } /* Function is called with elevated refcount. @@ -309,10 +319,8 @@ static void resync_handle_work(struct work_struct *work) { struct mlx5e_ktls_offload_context_rx *priv_rx; struct mlx5e_ktls_rx_resync_ctx *resync; - struct mlx5_wqe_ctrl_seg *cseg; struct mlx5e_channel *c; struct mlx5e_icosq *sq; - struct mlx5_wq_cyc *wq; resync = container_of(work, struct mlx5e_ktls_rx_resync_ctx, work); priv_rx = container_of(resync, struct mlx5e_ktls_offload_context_rx, resync); @@ -324,18 +332,9 @@ static void resync_handle_work(struct work_struct *work) c = resync->priv->channels.c[priv_rx->rxq]; sq = &c->async_icosq; - wq = &sq->wq; - - spin_lock(&c->async_icosq_lock); - cseg = resync_post_get_progress_params(sq, priv_rx); - if (IS_ERR(cseg)) { + if (resync_post_get_progress_params(sq, priv_rx)) refcount_dec(&resync->refcnt); - goto unlock; - } - mlx5e_notify_hw(wq, sq->pc, sq->uar_map, cseg); -unlock: - spin_unlock(&c->async_icosq_lock); } static void resync_init(struct mlx5e_ktls_rx_resync_ctx *resync, @@ -386,16 +385,17 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, struct mlx5e_ktls_offload_context_rx *priv_rx; struct mlx5e_ktls_rx_resync_ctx *resync; u8 tracker_state, auth_state, *ctx; + struct device *dev; u32 hw_seq; priv_rx = buf->priv_rx; resync = &priv_rx->resync; - + dev = resync->priv->mdev->device; if (unlikely(test_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags))) goto out; - dma_sync_single_for_cpu(resync->priv->mdev->device, buf->dma_addr, - PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); + dma_sync_single_for_cpu(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, + DMA_FROM_DEVICE); ctx = buf->progress.ctx; tracker_state = MLX5_GET(tls_progress_params, ctx, record_tracker_state); @@ -411,6 +411,7 @@ void mlx5e_ktls_handle_get_psv_completion(struct mlx5e_icosq_wqe_info *wi, priv_rx->stats->tls_resync_req_end++; out: refcount_dec(&resync->refcnt); + dma_unmap_single(dev, buf->dma_addr, PROGRESS_PARAMS_PADDED_SIZE, DMA_FROM_DEVICE); kfree(buf); } @@ -659,7 +660,7 @@ void mlx5e_ktls_del_rx(struct net_device *netdev, struct tls_context *tls_ctx) priv_rx = mlx5e_get_ktls_rx_priv_ctx(tls_ctx); set_bit(MLX5E_PRIV_RX_FLAG_DELETING, priv_rx->flags); mlx5e_set_ktls_rx_priv_ctx(tls_ctx, NULL); - napi_synchronize(&priv->channels.c[priv_rx->rxq]->napi); + synchronize_rcu(); /* Sync with NAPI */ if (!cancel_work_sync(&priv_rx->rule.work)) /* completion is needed, as the priv_rx in the add flow * is maintained on the wqe info (wi), not on the socket. diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c index 01468ec27446..b949b9a7538b 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_accel/tls_stats.c @@ -35,7 +35,6 @@ #include <net/sock.h> #include "en.h" -#include "accel/tls.h" #include "fpga/sdk.h" #include "en_accel/tls.h" @@ -51,9 +50,14 @@ static const struct counter_desc mlx5e_tls_sw_stats_desc[] = { #define NUM_TLS_SW_COUNTERS ARRAY_SIZE(mlx5e_tls_sw_stats_desc) +static bool is_tls_atomic_stats(struct mlx5e_priv *priv) +{ + return priv->tls && !mlx5_accel_is_ktls_device(priv->mdev); +} + int mlx5e_tls_get_count(struct mlx5e_priv *priv) { - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; return NUM_TLS_SW_COUNTERS; @@ -63,7 +67,7 @@ int mlx5e_tls_get_strings(struct mlx5e_priv *priv, uint8_t *data) { unsigned int i, idx = 0; - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) @@ -77,7 +81,7 @@ int mlx5e_tls_get_stats(struct mlx5e_priv *priv, u64 *data) { int i, idx = 0; - if (!priv->tls) + if (!is_tls_atomic_stats(priv)) return 0; for (i = 0; i < NUM_TLS_SW_COUNTERS; i++) diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c index aebcf73f8546..b3cda7b6e5e1 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_main.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_main.c @@ -158,16 +158,6 @@ static void mlx5e_update_carrier_work(struct work_struct *work) mutex_unlock(&priv->state_lock); } -void mlx5e_update_ndo_stats(struct mlx5e_priv *priv) -{ - int i; - - for (i = mlx5e_nic_stats_grps_num(priv) - 1; i >= 0; i--) - if (mlx5e_nic_stats_grps[i]->update_stats_mask & - MLX5E_NDO_UPDATE_STATS) - mlx5e_nic_stats_grps[i]->update_stats(priv); -} - static void mlx5e_update_stats_work(struct work_struct *work) { struct mlx5e_priv *priv = container_of(work, struct mlx5e_priv, @@ -399,7 +389,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (params->xdp_prog) bpf_prog_inc(params->xdp_prog); - rq->xdp_prog = params->xdp_prog; + RCU_INIT_POINTER(rq->xdp_prog, params->xdp_prog); rq_xdp_ix = rq->ix; if (xsk) @@ -408,7 +398,7 @@ static int mlx5e_alloc_rq(struct mlx5e_channel *c, if (err < 0) goto err_rq_wq_destroy; - rq->buff.map_dir = rq->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; + rq->buff.map_dir = params->xdp_prog ? DMA_BIDIRECTIONAL : DMA_FROM_DEVICE; rq->buff.headroom = mlx5e_get_rq_headroom(mdev, params, xsk); pool_size = 1 << params->log_rq_mtu_frames; @@ -564,8 +554,8 @@ err_free: } err_rq_wq_destroy: - if (rq->xdp_prog) - bpf_prog_put(rq->xdp_prog); + if (params->xdp_prog) + bpf_prog_put(params->xdp_prog); xdp_rxq_info_unreg(&rq->xdp_rxq); page_pool_destroy(rq->page_pool); mlx5_wq_destroy(&rq->wq_ctrl); @@ -575,10 +565,16 @@ err_rq_wq_destroy: static void mlx5e_free_rq(struct mlx5e_rq *rq) { + struct mlx5e_channel *c = rq->channel; + struct bpf_prog *old_prog = NULL; int i; - if (rq->xdp_prog) - bpf_prog_put(rq->xdp_prog); + /* drop_rq has neither channel nor xdp_prog. */ + if (c) + old_prog = rcu_dereference_protected(rq->xdp_prog, + lockdep_is_held(&c->priv->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); switch (rq->wq_type) { case MLX5_WQ_TYPE_LINKED_LIST_STRIDING_RQ: @@ -867,7 +863,7 @@ void mlx5e_activate_rq(struct mlx5e_rq *rq) void mlx5e_deactivate_rq(struct mlx5e_rq *rq) { clear_bit(MLX5E_RQ_STATE_ENABLED, &rq->state); - napi_synchronize(&rq->channel->napi); /* prevent mlx5e_post_rx_wqes */ + synchronize_rcu(); /* Sync with NAPI to prevent mlx5e_post_rx_wqes. */ } void mlx5e_close_rq(struct mlx5e_rq *rq) @@ -1312,12 +1308,10 @@ void mlx5e_tx_disable_queue(struct netdev_queue *txq) static void mlx5e_deactivate_txqsq(struct mlx5e_txqsq *sq) { - struct mlx5e_channel *c = sq->channel; struct mlx5_wq_cyc *wq = &sq->wq; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - /* prevent netif_tx_wake_queue */ - napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with NAPI to prevent netif_tx_wake_queue. */ mlx5e_tx_disable_queue(sq->txq); @@ -1392,10 +1386,8 @@ void mlx5e_activate_icosq(struct mlx5e_icosq *icosq) void mlx5e_deactivate_icosq(struct mlx5e_icosq *icosq) { - struct mlx5e_channel *c = icosq->channel; - clear_bit(MLX5E_SQ_STATE_ENABLED, &icosq->state); - napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with NAPI. */ } void mlx5e_close_icosq(struct mlx5e_icosq *sq) @@ -1474,7 +1466,7 @@ void mlx5e_close_xdpsq(struct mlx5e_xdpsq *sq) struct mlx5e_channel *c = sq->channel; clear_bit(MLX5E_SQ_STATE_ENABLED, &sq->state); - napi_synchronize(&c->napi); + synchronize_rcu(); /* Sync with NAPI. */ mlx5e_destroy_sq(c->mdev, sq->sqn); mlx5e_free_xdpsq_descs(sq); @@ -3567,6 +3559,7 @@ void mlx5e_fold_sw_stats64(struct mlx5e_priv *priv, struct rtnl_link_stats64 *s) s->rx_packets += rq_stats->packets + xskrq_stats->packets; s->rx_bytes += rq_stats->bytes + xskrq_stats->bytes; + s->multicast += rq_stats->mcast_packets + xskrq_stats->mcast_packets; for (j = 0; j < priv->max_opened_tc; j++) { struct mlx5e_sq_stats *sq_stats = &channel_stats->sq[j]; @@ -3582,7 +3575,6 @@ void mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) { struct mlx5e_priv *priv = netdev_priv(dev); - struct mlx5e_vport_stats *vstats = &priv->stats.vport; struct mlx5e_pport_stats *pstats = &priv->stats.pport; /* In switchdev mode, monitor counters doesn't monitor @@ -3617,12 +3609,6 @@ mlx5e_get_stats(struct net_device *dev, struct rtnl_link_stats64 *stats) stats->rx_errors = stats->rx_length_errors + stats->rx_crc_errors + stats->rx_frame_errors; stats->tx_errors = stats->tx_aborted_errors + stats->tx_carrier_errors; - - /* vport multicast also counts packets that are dropped due to steering - * or rx out of buffer - */ - stats->multicast = - VPORT_COUNTER_GET(vstats, received_eth_multicast.packets); } static void mlx5e_set_rx_mode(struct net_device *dev) @@ -4330,6 +4316,16 @@ static int mlx5e_xdp_allowed(struct mlx5e_priv *priv, struct bpf_prog *prog) return 0; } +static void mlx5e_rq_replace_xdp_prog(struct mlx5e_rq *rq, struct bpf_prog *prog) +{ + struct bpf_prog *old_prog; + + old_prog = rcu_replace_pointer(rq->xdp_prog, prog, + lockdep_is_held(&rq->channel->priv->state_lock)); + if (old_prog) + bpf_prog_put(old_prog); +} + static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) { struct mlx5e_priv *priv = netdev_priv(netdev); @@ -4388,29 +4384,10 @@ static int mlx5e_xdp_set(struct net_device *netdev, struct bpf_prog *prog) */ for (i = 0; i < priv->channels.num; i++) { struct mlx5e_channel *c = priv->channels.c[i]; - bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); - - clear_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); - if (xsk_open) - clear_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); - napi_synchronize(&c->napi); - /* prevent mlx5e_poll_rx_cq from accessing rq->xdp_prog */ - - old_prog = xchg(&c->rq.xdp_prog, prog); - if (old_prog) - bpf_prog_put(old_prog); - - if (xsk_open) { - old_prog = xchg(&c->xskrq.xdp_prog, prog); - if (old_prog) - bpf_prog_put(old_prog); - } - set_bit(MLX5E_RQ_STATE_ENABLED, &c->rq.state); - if (xsk_open) - set_bit(MLX5E_RQ_STATE_ENABLED, &c->xskrq.state); - /* napi_schedule in case we have missed anything */ - napi_schedule(&c->napi); + mlx5e_rq_replace_xdp_prog(&c->rq, prog); + if (test_bit(MLX5E_CHANNEL_STATE_XSK, c->state)) + mlx5e_rq_replace_xdp_prog(&c->xskrq, prog); } unlock: @@ -5200,7 +5177,7 @@ static const struct mlx5e_profile mlx5e_nic_profile = { .enable = mlx5e_nic_enable, .disable = mlx5e_nic_disable, .update_rx = mlx5e_update_nic_rx, - .update_stats = mlx5e_update_ndo_stats, + .update_stats = mlx5e_stats_update_ndo_stats, .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_nic, .max_tc = MLX5E_MAX_NUM_TC, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c index e13e5d1b3eae..e979bff64c49 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rep.c @@ -1171,7 +1171,7 @@ static const struct mlx5e_profile mlx5e_rep_profile = { .cleanup_tx = mlx5e_cleanup_rep_tx, .enable = mlx5e_rep_enable, .update_rx = mlx5e_update_rep_rx, - .update_stats = mlx5e_update_ndo_stats, + .update_stats = mlx5e_stats_update_ndo_stats, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = 1, .rq_groups = MLX5E_NUM_RQ_GROUPS(REGULAR), @@ -1189,7 +1189,7 @@ static const struct mlx5e_profile mlx5e_uplink_rep_profile = { .enable = mlx5e_uplink_rep_enable, .disable = mlx5e_uplink_rep_disable, .update_rx = mlx5e_update_rep_rx, - .update_stats = mlx5e_update_ndo_stats, + .update_stats = mlx5e_stats_update_ndo_stats, .update_carrier = mlx5e_update_carrier, .rx_handlers = &mlx5e_rx_handlers_rep, .max_tc = MLX5E_MAX_NUM_TC, diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c index 65828af120b7..64c8ac5eabf6 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_rx.c @@ -53,6 +53,7 @@ #include "en/xsk/rx.h" #include "en/health.h" #include "en/params.h" +#include "en/txrx.h" static struct sk_buff * mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, @@ -1080,6 +1081,9 @@ static inline void mlx5e_build_rx_skb(struct mlx5_cqe64 *cqe, mlx5e_enable_ecn(rq, skb); skb->protocol = eth_type_trans(skb, netdev); + + if (unlikely(mlx5e_skb_is_multicast(skb))) + stats->mcast_packets++; } static inline void mlx5e_complete_rx_cqe(struct mlx5e_rq *rq, @@ -1132,7 +1136,6 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, struct xdp_buff xdp; struct sk_buff *skb; void *va, *data; - bool consumed; u32 frag_size; va = page_address(di->page) + wi->offset; @@ -1144,11 +1147,8 @@ mlx5e_skb_from_cqe_linear(struct mlx5e_rq *rq, struct mlx5_cqe64 *cqe, prefetchw(va); /* xdp_frame data area */ prefetch(data); - rcu_read_lock(); mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt, &xdp); - consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp); - rcu_read_unlock(); - if (consumed) + if (mlx5e_xdp_handle(rq, di, &cqe_bcnt, &xdp)) return NULL; /* page/packet was consumed by XDP */ rx_headroom = xdp.data - xdp.data_hard_start; @@ -1438,7 +1438,6 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, struct sk_buff *skb; void *va, *data; u32 frag_size; - bool consumed; /* Check packet size. Note LRO doesn't use linear SKB */ if (unlikely(cqe_bcnt > rq->hw_mtu)) { @@ -1455,11 +1454,8 @@ mlx5e_skb_from_cqe_mpwrq_linear(struct mlx5e_rq *rq, struct mlx5e_mpw_info *wi, prefetchw(va); /* xdp_frame data area */ prefetch(data); - rcu_read_lock(); mlx5e_fill_xdp_buff(rq, va, rx_headroom, cqe_bcnt32, &xdp); - consumed = mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp); - rcu_read_unlock(); - if (consumed) { + if (mlx5e_xdp_handle(rq, di, &cqe_bcnt32, &xdp)) { if (__test_and_clear_bit(MLX5E_RQ_FLAG_XDP_XMIT, rq->flags)) __set_bit(page_idx, wi->xdp_xmit_bitmap); /* non-atomic */ return NULL; /* page/packet was consumed by XDP */ diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c index e3b2f59408e6..f6383bc2bc3f 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.c @@ -54,6 +54,18 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv) return total; } +void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv) +{ + mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; + const unsigned int num_stats_grps = stats_grps_num(priv); + int i; + + for (i = num_stats_grps - 1; i >= 0; i--) + if (stats_grps[i]->update_stats && + stats_grps[i]->update_stats_mask & MLX5E_NDO_UPDATE_STATS) + stats_grps[i]->update_stats(priv); +} + void mlx5e_stats_update(struct mlx5e_priv *priv) { mlx5e_stats_grp_t *stats_grps = priv->profile->stats_grps; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h index 2e1cca1923b9..562263d62141 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_stats.h @@ -103,6 +103,7 @@ unsigned int mlx5e_stats_total_num(struct mlx5e_priv *priv); void mlx5e_stats_update(struct mlx5e_priv *priv); void mlx5e_stats_fill(struct mlx5e_priv *priv, u64 *data, int idx); void mlx5e_stats_fill_strings(struct mlx5e_priv *priv, u8 *data); +void mlx5e_stats_update_ndo_stats(struct mlx5e_priv *priv); /* Concrete NIC Stats */ @@ -119,6 +120,7 @@ struct mlx5e_sw_stats { u64 tx_nop; u64 rx_lro_packets; u64 rx_lro_bytes; + u64 rx_mcast_packets; u64 rx_ecn_mark; u64 rx_removed_vlan_packets; u64 rx_csum_unnecessary; @@ -298,6 +300,7 @@ struct mlx5e_rq_stats { u64 csum_none; u64 lro_packets; u64 lro_bytes; + u64 mcast_packets; u64 ecn_mark; u64 removed_vlan_packets; u64 xdp_drop; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c index fd53d101d8fd..1c93f92d9210 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_tc.c @@ -1290,11 +1290,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, mlx5e_put_flow_tunnel_id(flow); - if (flow_flag_test(flow, NOT_READY)) { + if (flow_flag_test(flow, NOT_READY)) remove_unready_flow(flow); - kvfree(attr->parse_attr); - return; - } if (mlx5e_is_offloaded_flow(flow)) { if (flow_flag_test(flow, SLOW)) @@ -1315,6 +1312,8 @@ static void mlx5e_tc_del_fdb_flow(struct mlx5e_priv *priv, } kvfree(attr->parse_attr); + mlx5_tc_ct_match_del(priv, &flow->esw_attr->ct_attr); + if (attr->action & MLX5_FLOW_CONTEXT_ACTION_MOD_HDR) mlx5e_detach_mod_hdr(priv, flow); @@ -2625,6 +2624,22 @@ static struct mlx5_fields fields[] = { OFFLOAD(UDP_DPORT, 16, U16_MAX, udp.dest, 0, udp_dport), }; +static unsigned long mask_to_le(unsigned long mask, int size) +{ + __be32 mask_be32; + __be16 mask_be16; + + if (size == 32) { + mask_be32 = (__force __be32)(mask); + mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); + } else if (size == 16) { + mask_be32 = (__force __be32)(mask); + mask_be16 = *(__be16 *)&mask_be32; + mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); + } + + return mask; +} static int offload_pedit_fields(struct mlx5e_priv *priv, int namespace, struct pedit_headers_action *hdrs, @@ -2638,9 +2653,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, u32 *s_masks_p, *a_masks_p, s_mask, a_mask; struct mlx5e_tc_mod_hdr_acts *mod_acts; struct mlx5_fields *f; - unsigned long mask; - __be32 mask_be32; - __be16 mask_be16; + unsigned long mask, field_mask; int err; u8 cmd; @@ -2706,14 +2719,7 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (skip) continue; - if (f->field_bsize == 32) { - mask_be32 = (__force __be32)(mask); - mask = (__force unsigned long)cpu_to_le32(be32_to_cpu(mask_be32)); - } else if (f->field_bsize == 16) { - mask_be32 = (__force __be32)(mask); - mask_be16 = *(__be16 *)&mask_be32; - mask = (__force unsigned long)cpu_to_le16(be16_to_cpu(mask_be16)); - } + mask = mask_to_le(mask, f->field_bsize); first = find_first_bit(&mask, f->field_bsize); next_z = find_next_zero_bit(&mask, f->field_bsize, first); @@ -2744,9 +2750,10 @@ static int offload_pedit_fields(struct mlx5e_priv *priv, if (cmd == MLX5_ACTION_TYPE_SET) { int start; + field_mask = mask_to_le(f->field_mask, f->field_bsize); + /* if field is bit sized it can start not from first bit */ - start = find_first_bit((unsigned long *)&f->field_mask, - f->field_bsize); + start = find_first_bit(&field_mask, f->field_bsize); MLX5_SET(set_action_in, action, offset, first - start); /* length is num of bits to be written, zero means length of 32 */ @@ -4402,8 +4409,8 @@ __mlx5e_add_fdb_flow(struct mlx5e_priv *priv, goto err_free; /* actions validation depends on parsing the ct matches first */ - err = mlx5_tc_ct_parse_match(priv, &parse_attr->spec, f, - &flow->esw_attr->ct_attr, extack); + err = mlx5_tc_ct_match_add(priv, &parse_attr->spec, f, + &flow->esw_attr->ct_attr, extack); if (err) goto err_free; diff --git a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c index de10b06bade5..d5868670f8a5 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/en_txrx.c @@ -121,13 +121,17 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) struct mlx5e_xdpsq *xsksq = &c->xsksq; struct mlx5e_rq *xskrq = &c->xskrq; struct mlx5e_rq *rq = &c->rq; - bool xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); bool aff_change = false; bool busy_xsk = false; bool busy = false; int work_done = 0; + bool xsk_open; int i; + rcu_read_lock(); + + xsk_open = test_bit(MLX5E_CHANNEL_STATE_XSK, c->state); + ch_stats->poll++; for (i = 0; i < c->num_tc; i++) @@ -167,8 +171,10 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) busy |= busy_xsk; if (busy) { - if (likely(mlx5e_channel_no_affinity_change(c))) - return budget; + if (likely(mlx5e_channel_no_affinity_change(c))) { + work_done = budget; + goto out; + } ch_stats->aff_change++; aff_change = true; if (budget && work_done == budget) @@ -176,7 +182,7 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) } if (unlikely(!napi_complete_done(napi, work_done))) - return work_done; + goto out; ch_stats->arm++; @@ -203,6 +209,9 @@ int mlx5e_napi_poll(struct napi_struct *napi, int budget) ch_stats->force_irq++; } +out: + rcu_read_unlock(); + return work_done; } diff --git a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c index d2516922d867..1bcf2609dca8 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/eswitch_offloads.c @@ -1219,35 +1219,37 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) } esw->fdb_table.offloads.send_to_vport_grp = g; - /* create peer esw miss group */ - memset(flow_group_in, 0, inlen); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) { + /* create peer esw miss group */ + memset(flow_group_in, 0, inlen); - esw_set_flow_group_source_port(esw, flow_group_in); + esw_set_flow_group_source_port(esw, flow_group_in); - if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { - match_criteria = MLX5_ADDR_OF(create_flow_group_in, - flow_group_in, - match_criteria); + if (!mlx5_eswitch_vport_match_metadata_enabled(esw)) { + match_criteria = MLX5_ADDR_OF(create_flow_group_in, + flow_group_in, + match_criteria); - MLX5_SET_TO_ONES(fte_match_param, match_criteria, - misc_parameters.source_eswitch_owner_vhca_id); + MLX5_SET_TO_ONES(fte_match_param, match_criteria, + misc_parameters.source_eswitch_owner_vhca_id); - MLX5_SET(create_flow_group_in, flow_group_in, - source_eswitch_owner_vhca_id_valid, 1); - } + MLX5_SET(create_flow_group_in, flow_group_in, + source_eswitch_owner_vhca_id_valid, 1); + } - MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); - MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, - ix + esw->total_vports - 1); - ix += esw->total_vports; + MLX5_SET(create_flow_group_in, flow_group_in, start_flow_index, ix); + MLX5_SET(create_flow_group_in, flow_group_in, end_flow_index, + ix + esw->total_vports - 1); + ix += esw->total_vports; - g = mlx5_create_flow_group(fdb, flow_group_in); - if (IS_ERR(g)) { - err = PTR_ERR(g); - esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); - goto peer_miss_err; + g = mlx5_create_flow_group(fdb, flow_group_in); + if (IS_ERR(g)) { + err = PTR_ERR(g); + esw_warn(dev, "Failed to create peer miss flow group err(%d)\n", err); + goto peer_miss_err; + } + esw->fdb_table.offloads.peer_miss_grp = g; } - esw->fdb_table.offloads.peer_miss_grp = g; /* create miss group */ memset(flow_group_in, 0, inlen); @@ -1281,7 +1283,8 @@ static int esw_create_offloads_fdb_tables(struct mlx5_eswitch *esw) miss_rule_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); miss_err: - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); peer_miss_err: mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); send_vport_err: @@ -1305,7 +1308,8 @@ static void esw_destroy_offloads_fdb_tables(struct mlx5_eswitch *esw) mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_multi); mlx5_del_flow_rules(esw->fdb_table.offloads.miss_rule_uni); mlx5_destroy_flow_group(esw->fdb_table.offloads.send_to_vport_grp); - mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); + if (MLX5_CAP_ESW(esw->dev, merged_eswitch)) + mlx5_destroy_flow_group(esw->fdb_table.offloads.peer_miss_grp); mlx5_destroy_flow_group(esw->fdb_table.offloads.miss_grp); mlx5_esw_chains_destroy(esw); diff --git a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c index 9ccec5f8b92a..75fa44eee434 100644 --- a/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c +++ b/drivers/net/ethernet/mellanox/mlx5/core/fs_core.c @@ -654,7 +654,7 @@ static struct fs_fte *alloc_fte(struct mlx5_flow_table *ft, fte->action = *flow_act; fte->flow_context = spec->flow_context; - tree_init_node(&fte->node, NULL, del_sw_fte); + tree_init_node(&fte->node, del_hw_fte, del_sw_fte); return fte; } @@ -1792,7 +1792,6 @@ skip_search: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); - tree_put_node(&fte->node, false); return rule; } rule = ERR_PTR(-ENOENT); @@ -1891,7 +1890,6 @@ search_again_locked: up_write_ref_node(&g->node, false); rule = add_rule_fg(g, spec, flow_act, dest, dest_num, fte); up_write_ref_node(&fte->node, false); - tree_put_node(&fte->node, false); tree_put_node(&g->node, false); return rule; @@ -2001,7 +1999,9 @@ void mlx5_del_flow_rules(struct mlx5_flow_handle *handle) up_write_ref_node(&fte->node, false); } else { del_hw_fte(&fte->node); - up_write(&fte->node.lock); + /* Avoid double call to del_hw_fte */ + fte->node.del_hw_func = NULL; + up_write_ref_node(&fte->node, false); tree_put_node(&fte->node, false); } kfree(handle); diff --git a/drivers/net/ethernet/mscc/ocelot.c b/drivers/net/ethernet/mscc/ocelot.c index 5abb7d2b0a9e..8518e1d60da4 100644 --- a/drivers/net/ethernet/mscc/ocelot.c +++ b/drivers/net/ethernet/mscc/ocelot.c @@ -421,10 +421,15 @@ int ocelot_port_add_txtstamp_skb(struct ocelot_port *ocelot_port, if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP && ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { + spin_lock(&ocelot_port->ts_id_lock); + shinfo->tx_flags |= SKBTX_IN_PROGRESS; /* Store timestamp ID in cb[0] of sk_buff */ - skb->cb[0] = ocelot_port->ts_id % 4; + skb->cb[0] = ocelot_port->ts_id; + ocelot_port->ts_id = (ocelot_port->ts_id + 1) % 4; skb_queue_tail(&ocelot_port->tx_skbs, skb); + + spin_unlock(&ocelot_port->ts_id_lock); return 0; } return -ENODATA; @@ -1300,6 +1305,7 @@ void ocelot_init_port(struct ocelot *ocelot, int port) struct ocelot_port *ocelot_port = ocelot->ports[port]; skb_queue_head_init(&ocelot_port->tx_skbs); + spin_lock_init(&ocelot_port->ts_id_lock); /* Basic L2 initialization */ @@ -1544,18 +1550,18 @@ EXPORT_SYMBOL(ocelot_init); void ocelot_deinit(struct ocelot *ocelot) { - struct ocelot_port *port; - int i; - cancel_delayed_work(&ocelot->stats_work); destroy_workqueue(ocelot->stats_queue); mutex_destroy(&ocelot->stats_lock); - - for (i = 0; i < ocelot->num_phys_ports; i++) { - port = ocelot->ports[i]; - skb_queue_purge(&port->tx_skbs); - } } EXPORT_SYMBOL(ocelot_deinit); +void ocelot_deinit_port(struct ocelot *ocelot, int port) +{ + struct ocelot_port *ocelot_port = ocelot->ports[port]; + + skb_queue_purge(&ocelot_port->tx_skbs); +} +EXPORT_SYMBOL(ocelot_deinit_port); + MODULE_LICENSE("Dual MIT/GPL"); diff --git a/drivers/net/ethernet/mscc/ocelot_net.c b/drivers/net/ethernet/mscc/ocelot_net.c index 0668d23cdbfa..8490e42e9e2d 100644 --- a/drivers/net/ethernet/mscc/ocelot_net.c +++ b/drivers/net/ethernet/mscc/ocelot_net.c @@ -330,6 +330,7 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) u8 grp = 0; /* Send everything on CPU group 0 */ unsigned int i, count, last; int port = priv->chip_port; + bool do_tstamp; val = ocelot_read(ocelot, QS_INJ_STATUS); if (!(val & QS_INJ_STATUS_FIFO_RDY(BIT(grp))) || @@ -344,10 +345,12 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) info.vid = skb_vlan_tag_get(skb); /* Check if timestamping is needed */ + do_tstamp = (ocelot_port_add_txtstamp_skb(ocelot_port, skb) == 0); + if (ocelot->ptp && shinfo->tx_flags & SKBTX_HW_TSTAMP) { info.rew_op = ocelot_port->ptp_cmd; if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) - info.rew_op |= (ocelot_port->ts_id % 4) << 3; + info.rew_op |= skb->cb[0] << 3; } ocelot_gen_ifh(ifh, &info); @@ -380,12 +383,9 @@ static int ocelot_port_xmit(struct sk_buff *skb, struct net_device *dev) dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; - if (!ocelot_port_add_txtstamp_skb(ocelot_port, skb)) { - ocelot_port->ts_id++; - return NETDEV_TX_OK; - } + if (!do_tstamp) + dev_kfree_skb_any(skb); - dev_kfree_skb_any(skb); return NETDEV_TX_OK; } diff --git a/drivers/net/ethernet/mscc/ocelot_vsc7514.c b/drivers/net/ethernet/mscc/ocelot_vsc7514.c index 65408bc994c4..dfb1535f26f2 100644 --- a/drivers/net/ethernet/mscc/ocelot_vsc7514.c +++ b/drivers/net/ethernet/mscc/ocelot_vsc7514.c @@ -806,17 +806,17 @@ static const struct vcap_field vsc7514_vcap_is2_keys[] = { [VCAP_IS2_HK_DIP_EQ_SIP] = {123, 1}, /* IP4_TCP_UDP (TYPE=100) */ [VCAP_IS2_HK_TCP] = {124, 1}, - [VCAP_IS2_HK_L4_SPORT] = {125, 16}, - [VCAP_IS2_HK_L4_DPORT] = {141, 16}, + [VCAP_IS2_HK_L4_DPORT] = {125, 16}, + [VCAP_IS2_HK_L4_SPORT] = {141, 16}, [VCAP_IS2_HK_L4_RNG] = {157, 8}, [VCAP_IS2_HK_L4_SPORT_EQ_DPORT] = {165, 1}, [VCAP_IS2_HK_L4_SEQUENCE_EQ0] = {166, 1}, - [VCAP_IS2_HK_L4_URG] = {167, 1}, - [VCAP_IS2_HK_L4_ACK] = {168, 1}, - [VCAP_IS2_HK_L4_PSH] = {169, 1}, - [VCAP_IS2_HK_L4_RST] = {170, 1}, - [VCAP_IS2_HK_L4_SYN] = {171, 1}, - [VCAP_IS2_HK_L4_FIN] = {172, 1}, + [VCAP_IS2_HK_L4_FIN] = {167, 1}, + [VCAP_IS2_HK_L4_SYN] = {168, 1}, + [VCAP_IS2_HK_L4_RST] = {169, 1}, + [VCAP_IS2_HK_L4_PSH] = {170, 1}, + [VCAP_IS2_HK_L4_ACK] = {171, 1}, + [VCAP_IS2_HK_L4_URG] = {172, 1}, [VCAP_IS2_HK_L4_1588_DOM] = {173, 8}, [VCAP_IS2_HK_L4_1588_VER] = {181, 4}, /* IP4_OTHER (TYPE=101) */ @@ -896,11 +896,137 @@ static struct ptp_clock_info ocelot_ptp_clock_info = { .enable = ocelot_ptp_enable, }; +static void mscc_ocelot_release_ports(struct ocelot *ocelot) +{ + int port; + + for (port = 0; port < ocelot->num_phys_ports; port++) { + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + + ocelot_port = ocelot->ports[port]; + if (!ocelot_port) + continue; + + ocelot_deinit_port(ocelot, port); + + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + + unregister_netdev(priv->dev); + free_netdev(priv->dev); + } +} + +static int mscc_ocelot_init_ports(struct platform_device *pdev, + struct device_node *ports) +{ + struct ocelot *ocelot = platform_get_drvdata(pdev); + struct device_node *portnp; + int err; + + ocelot->ports = devm_kcalloc(ocelot->dev, ocelot->num_phys_ports, + sizeof(struct ocelot_port *), GFP_KERNEL); + if (!ocelot->ports) + return -ENOMEM; + + /* No NPI port */ + ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE, + OCELOT_TAG_PREFIX_NONE); + + for_each_available_child_of_node(ports, portnp) { + struct ocelot_port_private *priv; + struct ocelot_port *ocelot_port; + struct device_node *phy_node; + phy_interface_t phy_mode; + struct phy_device *phy; + struct regmap *target; + struct resource *res; + struct phy *serdes; + char res_name[8]; + u32 port; + + if (of_property_read_u32(portnp, "reg", &port)) + continue; + + snprintf(res_name, sizeof(res_name), "port%d", port); + + res = platform_get_resource_byname(pdev, IORESOURCE_MEM, + res_name); + target = ocelot_regmap_init(ocelot, res); + if (IS_ERR(target)) + continue; + + phy_node = of_parse_phandle(portnp, "phy-handle", 0); + if (!phy_node) + continue; + + phy = of_phy_find_device(phy_node); + of_node_put(phy_node); + if (!phy) + continue; + + err = ocelot_probe_port(ocelot, port, target, phy); + if (err) { + of_node_put(portnp); + return err; + } + + ocelot_port = ocelot->ports[port]; + priv = container_of(ocelot_port, struct ocelot_port_private, + port); + + of_get_phy_mode(portnp, &phy_mode); + + ocelot_port->phy_mode = phy_mode; + + switch (ocelot_port->phy_mode) { + case PHY_INTERFACE_MODE_NA: + continue; + case PHY_INTERFACE_MODE_SGMII: + break; + case PHY_INTERFACE_MODE_QSGMII: + /* Ensure clock signals and speed is set on all + * QSGMII links + */ + ocelot_port_writel(ocelot_port, + DEV_CLOCK_CFG_LINK_SPEED + (OCELOT_SPEED_1000), + DEV_CLOCK_CFG); + break; + default: + dev_err(ocelot->dev, + "invalid phy mode for port%d, (Q)SGMII only\n", + port); + of_node_put(portnp); + return -EINVAL; + } + + serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); + if (IS_ERR(serdes)) { + err = PTR_ERR(serdes); + if (err == -EPROBE_DEFER) + dev_dbg(ocelot->dev, "deferring probe\n"); + else + dev_err(ocelot->dev, + "missing SerDes phys for port%d\n", + port); + + of_node_put(portnp); + return err; + } + + priv->serdes = serdes; + } + + return 0; +} + static int mscc_ocelot_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node; - struct device_node *ports, *portnp; int err, irq_xtr, irq_ptp_rdy; + struct device_node *ports; struct ocelot *ocelot; struct regmap *hsio; unsigned int i; @@ -985,20 +1111,24 @@ static int mscc_ocelot_probe(struct platform_device *pdev) ports = of_get_child_by_name(np, "ethernet-ports"); if (!ports) { - dev_err(&pdev->dev, "no ethernet-ports child node found\n"); + dev_err(ocelot->dev, "no ethernet-ports child node found\n"); return -ENODEV; } ocelot->num_phys_ports = of_get_child_count(ports); - ocelot->ports = devm_kcalloc(&pdev->dev, ocelot->num_phys_ports, - sizeof(struct ocelot_port *), GFP_KERNEL); - ocelot->vcap_is2_keys = vsc7514_vcap_is2_keys; ocelot->vcap_is2_actions = vsc7514_vcap_is2_actions; ocelot->vcap = vsc7514_vcap_props; - ocelot_init(ocelot); + err = ocelot_init(ocelot); + if (err) + goto out_put_ports; + + err = mscc_ocelot_init_ports(pdev, ports); + if (err) + goto out_put_ports; + if (ocelot->ptp) { err = ocelot_init_timestamp(ocelot, &ocelot_ptp_clock_info); if (err) { @@ -1008,96 +1138,6 @@ static int mscc_ocelot_probe(struct platform_device *pdev) } } - /* No NPI port */ - ocelot_configure_cpu(ocelot, -1, OCELOT_TAG_PREFIX_NONE, - OCELOT_TAG_PREFIX_NONE); - - for_each_available_child_of_node(ports, portnp) { - struct ocelot_port_private *priv; - struct ocelot_port *ocelot_port; - struct device_node *phy_node; - phy_interface_t phy_mode; - struct phy_device *phy; - struct regmap *target; - struct resource *res; - struct phy *serdes; - char res_name[8]; - u32 port; - - if (of_property_read_u32(portnp, "reg", &port)) - continue; - - snprintf(res_name, sizeof(res_name), "port%d", port); - - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - res_name); - target = ocelot_regmap_init(ocelot, res); - if (IS_ERR(target)) - continue; - - phy_node = of_parse_phandle(portnp, "phy-handle", 0); - if (!phy_node) - continue; - - phy = of_phy_find_device(phy_node); - of_node_put(phy_node); - if (!phy) - continue; - - err = ocelot_probe_port(ocelot, port, target, phy); - if (err) { - of_node_put(portnp); - goto out_put_ports; - } - - ocelot_port = ocelot->ports[port]; - priv = container_of(ocelot_port, struct ocelot_port_private, - port); - - of_get_phy_mode(portnp, &phy_mode); - - ocelot_port->phy_mode = phy_mode; - - switch (ocelot_port->phy_mode) { - case PHY_INTERFACE_MODE_NA: - continue; - case PHY_INTERFACE_MODE_SGMII: - break; - case PHY_INTERFACE_MODE_QSGMII: - /* Ensure clock signals and speed is set on all - * QSGMII links - */ - ocelot_port_writel(ocelot_port, - DEV_CLOCK_CFG_LINK_SPEED - (OCELOT_SPEED_1000), - DEV_CLOCK_CFG); - break; - default: - dev_err(ocelot->dev, - "invalid phy mode for port%d, (Q)SGMII only\n", - port); - of_node_put(portnp); - err = -EINVAL; - goto out_put_ports; - } - - serdes = devm_of_phy_get(ocelot->dev, portnp, NULL); - if (IS_ERR(serdes)) { - err = PTR_ERR(serdes); - if (err == -EPROBE_DEFER) - dev_dbg(ocelot->dev, "deferring probe\n"); - else - dev_err(ocelot->dev, - "missing SerDes phys for port%d\n", - port); - - of_node_put(portnp); - goto out_put_ports; - } - - priv->serdes = serdes; - } - register_netdevice_notifier(&ocelot_netdevice_nb); register_switchdev_notifier(&ocelot_switchdev_nb); register_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); @@ -1114,6 +1154,7 @@ static int mscc_ocelot_remove(struct platform_device *pdev) struct ocelot *ocelot = platform_get_drvdata(pdev); ocelot_deinit_timestamp(ocelot); + mscc_ocelot_release_ports(ocelot); ocelot_deinit(ocelot); unregister_switchdev_blocking_notifier(&ocelot_switchdev_blocking_nb); unregister_switchdev_notifier(&ocelot_switchdev_nb); diff --git a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c index 6eb9fb9a1814..9c9ae33d84ce 100644 --- a/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c +++ b/drivers/net/ethernet/netronome/nfp/nfp_net_ethtool.c @@ -829,8 +829,8 @@ nfp_port_get_fecparam(struct net_device *netdev, struct nfp_eth_table_port *eth_port; struct nfp_port *port; - param->active_fec = ETHTOOL_FEC_NONE_BIT; - param->fec = ETHTOOL_FEC_NONE_BIT; + param->active_fec = ETHTOOL_FEC_NONE; + param->fec = ETHTOOL_FEC_NONE; port = nfp_port_from_netdev(netdev); eth_port = nfp_port_get_eth_port(port); diff --git a/drivers/net/ethernet/qlogic/qed/qed_dev.c b/drivers/net/ethernet/qlogic/qed/qed_dev.c index b8f076e4e6b8..3db181f3617a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_dev.c +++ b/drivers/net/ethernet/qlogic/qed/qed_dev.c @@ -4253,7 +4253,8 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | BIT(QED_MF_LLH_PROTO_CLSS) | BIT(QED_MF_LL2_NON_UNICAST) | - BIT(QED_MF_INTER_PF_SWITCH); + BIT(QED_MF_INTER_PF_SWITCH) | + BIT(QED_MF_DISABLE_ARFS); break; case NVM_CFG1_GLOB_MF_MODE_DEFAULT: cdev->mf_bits = BIT(QED_MF_LLH_MAC_CLSS) | @@ -4266,6 +4267,14 @@ static int qed_hw_get_nvm_info(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt) DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", cdev->mf_bits); + + /* In CMT the PF is unknown when the GFS block processes the + * packet. Therefore cannot use searcher as it has a per PF + * database, and thus ARFS must be disabled. + * + */ + if (QED_IS_CMT(cdev)) + cdev->mf_bits |= BIT(QED_MF_DISABLE_ARFS); } DP_INFO(p_hwfn, "Multi function mode is 0x%lx\n", diff --git a/drivers/net/ethernet/qlogic/qed/qed_l2.c b/drivers/net/ethernet/qlogic/qed/qed_l2.c index 4c6ac8862744..07824bf9d68d 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_l2.c +++ b/drivers/net/ethernet/qlogic/qed/qed_l2.c @@ -1980,6 +1980,9 @@ void qed_arfs_mode_configure(struct qed_hwfn *p_hwfn, struct qed_ptt *p_ptt, struct qed_arfs_config_params *p_cfg_params) { + if (test_bit(QED_MF_DISABLE_ARFS, &p_hwfn->cdev->mf_bits)) + return; + if (p_cfg_params->mode != QED_FILTER_CONFIG_MODE_DISABLE) { qed_gft_config(p_hwfn, p_ptt, p_hwfn->rel_pf_id, p_cfg_params->tcp, diff --git a/drivers/net/ethernet/qlogic/qed/qed_main.c b/drivers/net/ethernet/qlogic/qed/qed_main.c index f39f629242a1..50e5eb22e60a 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_main.c +++ b/drivers/net/ethernet/qlogic/qed/qed_main.c @@ -444,6 +444,8 @@ int qed_fill_dev_info(struct qed_dev *cdev, dev_info->fw_eng = FW_ENGINEERING_VERSION; dev_info->b_inter_pf_switch = test_bit(QED_MF_INTER_PF_SWITCH, &cdev->mf_bits); + if (!test_bit(QED_MF_DISABLE_ARFS, &cdev->mf_bits)) + dev_info->b_arfs_capable = true; dev_info->tx_switching = true; if (hw_info->b_wol_support == QED_WOL_SUPPORT_PME) diff --git a/drivers/net/ethernet/qlogic/qed/qed_sriov.c b/drivers/net/ethernet/qlogic/qed/qed_sriov.c index f1f75b6d0421..b8dc5c4591ef 100644 --- a/drivers/net/ethernet/qlogic/qed/qed_sriov.c +++ b/drivers/net/ethernet/qlogic/qed/qed_sriov.c @@ -71,6 +71,7 @@ static int qed_sp_vf_start(struct qed_hwfn *p_hwfn, struct qed_vf_info *p_vf) p_ramrod->personality = PERSONALITY_ETH; break; case QED_PCI_ETH_ROCE: + case QED_PCI_ETH_IWARP: p_ramrod->personality = PERSONALITY_RDMA_AND_ETH; break; default: diff --git a/drivers/net/ethernet/qlogic/qede/qede_filter.c b/drivers/net/ethernet/qlogic/qede/qede_filter.c index f961f65d9372..c59b72c90293 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_filter.c +++ b/drivers/net/ethernet/qlogic/qede/qede_filter.c @@ -311,6 +311,9 @@ int qede_alloc_arfs(struct qede_dev *edev) { int i; + if (!edev->dev_info.common.b_arfs_capable) + return -EINVAL; + edev->arfs = vzalloc(sizeof(*edev->arfs)); if (!edev->arfs) return -ENOMEM; diff --git a/drivers/net/ethernet/qlogic/qede/qede_main.c b/drivers/net/ethernet/qlogic/qede/qede_main.c index 140a392a81bb..9e1f41ba766c 100644 --- a/drivers/net/ethernet/qlogic/qede/qede_main.c +++ b/drivers/net/ethernet/qlogic/qede/qede_main.c @@ -804,7 +804,7 @@ static void qede_init_ndev(struct qede_dev *edev) NETIF_F_IP_CSUM | NETIF_F_IPV6_CSUM | NETIF_F_TSO | NETIF_F_TSO6 | NETIF_F_HW_TC; - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) + if (edev->dev_info.common.b_arfs_capable) hw_features |= NETIF_F_NTUPLE; if (edev->dev_info.common.vxlan_enable || @@ -2274,7 +2274,7 @@ static void qede_unload(struct qede_dev *edev, enum qede_unload_mode mode, qede_vlan_mark_nonconfigured(edev); edev->ops->fastpath_stop(edev->cdev); - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { + if (edev->dev_info.common.b_arfs_capable) { qede_poll_for_freeing_arfs_filters(edev); qede_free_arfs(edev); } @@ -2341,10 +2341,9 @@ static int qede_load(struct qede_dev *edev, enum qede_load_mode mode, if (rc) goto err2; - if (!IS_VF(edev) && edev->dev_info.common.num_hwfns == 1) { - rc = qede_alloc_arfs(edev); - if (rc) - DP_NOTICE(edev, "aRFS memory allocation failed\n"); + if (qede_alloc_arfs(edev)) { + edev->ndev->features &= ~NETIF_F_NTUPLE; + edev->dev_info.common.b_arfs_capable = false; } qede_napi_add_enable(edev); diff --git a/drivers/net/ethernet/sfc/ef100.c b/drivers/net/ethernet/sfc/ef100.c index c54b7f8243f3..ffdb36715a49 100644 --- a/drivers/net/ethernet/sfc/ef100.c +++ b/drivers/net/ethernet/sfc/ef100.c @@ -490,6 +490,7 @@ static int ef100_pci_probe(struct pci_dev *pci_dev, if (fcw.offset > pci_resource_len(efx->pci_dev, fcw.bar) - ESE_GZ_FCW_LEN) { netif_err(efx, probe, efx->net_dev, "Func control window overruns BAR\n"); + rc = -EIO; goto fail; } diff --git a/drivers/net/ethernet/ti/cpsw_new.c b/drivers/net/ethernet/ti/cpsw_new.c index 8ed78577cded..15672d0a4de6 100644 --- a/drivers/net/ethernet/ti/cpsw_new.c +++ b/drivers/net/ethernet/ti/cpsw_new.c @@ -17,6 +17,7 @@ #include <linux/phy.h> #include <linux/phy/phy.h> #include <linux/delay.h> +#include <linux/pinctrl/consumer.h> #include <linux/pm_runtime.h> #include <linux/gpio/consumer.h> #include <linux/of.h> @@ -2070,9 +2071,61 @@ static int cpsw_remove(struct platform_device *pdev) return 0; } +static int __maybe_unused cpsw_suspend(struct device *dev) +{ + struct cpsw_common *cpsw = dev_get_drvdata(dev); + int i; + + rtnl_lock(); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct net_device *ndev = cpsw->slaves[i].ndev; + + if (!(ndev && netif_running(ndev))) + continue; + + cpsw_ndo_stop(ndev); + } + + rtnl_unlock(); + + /* Select sleep pin state */ + pinctrl_pm_select_sleep_state(dev); + + return 0; +} + +static int __maybe_unused cpsw_resume(struct device *dev) +{ + struct cpsw_common *cpsw = dev_get_drvdata(dev); + int i; + + /* Select default pin state */ + pinctrl_pm_select_default_state(dev); + + /* shut up ASSERT_RTNL() warning in netif_set_real_num_tx/rx_queues */ + rtnl_lock(); + + for (i = 0; i < cpsw->data.slaves; i++) { + struct net_device *ndev = cpsw->slaves[i].ndev; + + if (!(ndev && netif_running(ndev))) + continue; + + cpsw_ndo_open(ndev); + } + + rtnl_unlock(); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(cpsw_pm_ops, cpsw_suspend, cpsw_resume); + static struct platform_driver cpsw_driver = { .driver = { .name = "cpsw-switch", + .pm = &cpsw_pm_ops, .of_match_table = cpsw_of_mtable, }, .probe = cpsw_probe, diff --git a/drivers/net/geneve.c b/drivers/net/geneve.c index c71f994fbc73..974a244f45ba 100644 --- a/drivers/net/geneve.c +++ b/drivers/net/geneve.c @@ -777,7 +777,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs4, struct flowi4 *fl4, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -793,6 +794,8 @@ static struct rtable *geneve_get_v4_rt(struct sk_buff *skb, fl4->flowi4_proto = IPPROTO_UDP; fl4->daddr = info->key.u.ipv4.dst; fl4->saddr = info->key.u.ipv4.src; + fl4->fl4_dport = dport; + fl4->fl4_sport = sport; tos = info->key.tos; if ((tos == 1) && !geneve->cfg.collect_md) { @@ -827,7 +830,8 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, struct net_device *dev, struct geneve_sock *gs6, struct flowi6 *fl6, - const struct ip_tunnel_info *info) + const struct ip_tunnel_info *info, + __be16 dport, __be16 sport) { bool use_cache = ip_tunnel_dst_cache_usable(skb, info); struct geneve_dev *geneve = netdev_priv(dev); @@ -843,6 +847,9 @@ static struct dst_entry *geneve_get_v6_dst(struct sk_buff *skb, fl6->flowi6_proto = IPPROTO_UDP; fl6->daddr = info->key.u.ipv6.dst; fl6->saddr = info->key.u.ipv6.src; + fl6->fl6_dport = dport; + fl6->fl6_sport = sport; + prio = info->key.tos; if ((prio == 1) && !geneve->cfg.collect_md) { prio = ip_tunnel_get_dsfield(ip_hdr(skb), skb); @@ -889,7 +896,9 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -919,7 +928,6 @@ static int geneve_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { tos = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -974,7 +982,9 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, __be16 sport; int err; - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1003,7 +1013,6 @@ static int geneve6_xmit_skb(struct sk_buff *skb, struct net_device *dev, return -EMSGSIZE; } - sport = udp_flow_src_port(geneve->net, skb, 1, USHRT_MAX, true); if (geneve->cfg.collect_md) { prio = ip_tunnel_ecn_encap(key->tos, ip_hdr(skb), skb); ttl = key->ttl; @@ -1085,13 +1094,18 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) { struct ip_tunnel_info *info = skb_tunnel_info(skb); struct geneve_dev *geneve = netdev_priv(dev); + __be16 sport; if (ip_tunnel_info_af(info) == AF_INET) { struct rtable *rt; struct flowi4 fl4; + struct geneve_sock *gs4 = rcu_dereference(geneve->sock4); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info); + rt = geneve_get_v4_rt(skb, dev, gs4, &fl4, info, + geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(rt)) return PTR_ERR(rt); @@ -1101,9 +1115,13 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) } else if (ip_tunnel_info_af(info) == AF_INET6) { struct dst_entry *dst; struct flowi6 fl6; + struct geneve_sock *gs6 = rcu_dereference(geneve->sock6); + sport = udp_flow_src_port(geneve->net, skb, + 1, USHRT_MAX, true); - dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info); + dst = geneve_get_v6_dst(skb, dev, gs6, &fl6, info, + geneve->cfg.info.key.tp_dst, sport); if (IS_ERR(dst)) return PTR_ERR(dst); @@ -1114,8 +1132,7 @@ static int geneve_fill_metadata_dst(struct net_device *dev, struct sk_buff *skb) return -EINVAL; } - info->key.tp_src = udp_flow_src_port(geneve->net, skb, - 1, USHRT_MAX, true); + info->key.tp_src = sport; info->key.tp_dst = geneve->cfg.info.key.tp_dst; return 0; } diff --git a/drivers/net/hyperv/hyperv_net.h b/drivers/net/hyperv/hyperv_net.h index 2181d4538ab7..a0f338cf1424 100644 --- a/drivers/net/hyperv/hyperv_net.h +++ b/drivers/net/hyperv/hyperv_net.h @@ -847,6 +847,10 @@ struct nvsp_message { #define NETVSC_XDP_HDRM 256 +#define NETVSC_XFER_HEADER_SIZE(rng_cnt) \ + (offsetof(struct vmtransfer_page_packet_header, ranges) + \ + (rng_cnt) * sizeof(struct vmtransfer_page_range)) + struct multi_send_data { struct sk_buff *skb; /* skb containing the pkt */ struct hv_netvsc_packet *pkt; /* netvsc pkt pending */ @@ -974,6 +978,9 @@ struct net_device_context { /* Serial number of the VF to team with */ u32 vf_serial; + /* Is the current data path through the VF NIC? */ + bool data_path_is_vf; + /* Used to temporarily save the config info across hibernation */ struct netvsc_device_info *saved_netvsc_dev_info; }; diff --git a/drivers/net/hyperv/netvsc.c b/drivers/net/hyperv/netvsc.c index 41f5cf0bb997..5a57d1985bae 100644 --- a/drivers/net/hyperv/netvsc.c +++ b/drivers/net/hyperv/netvsc.c @@ -388,6 +388,15 @@ static int netvsc_init_buf(struct hv_device *device, net_device->recv_section_size = resp->sections[0].sub_alloc_size; net_device->recv_section_cnt = resp->sections[0].num_sub_allocs; + /* Ensure buffer will not overflow */ + if (net_device->recv_section_size < NETVSC_MTU_MIN || (u64)net_device->recv_section_size * + (u64)net_device->recv_section_cnt > (u64)buf_size) { + netdev_err(ndev, "invalid recv_section_size %u\n", + net_device->recv_section_size); + ret = -EINVAL; + goto cleanup; + } + /* Setup receive completion ring. * Add 1 to the recv_section_cnt because at least one entry in a * ring buffer has to be empty. @@ -460,6 +469,12 @@ static int netvsc_init_buf(struct hv_device *device, /* Parse the response */ net_device->send_section_size = init_packet->msg. v1_msg.send_send_buf_complete.section_size; + if (net_device->send_section_size < NETVSC_MTU_MIN) { + netdev_err(ndev, "invalid send_section_size %u\n", + net_device->send_section_size); + ret = -EINVAL; + goto cleanup; + } /* Section count is simply the size divided by the section size. */ net_device->send_section_cnt = buf_size / net_device->send_section_size; @@ -731,12 +746,49 @@ static void netvsc_send_completion(struct net_device *ndev, int budget) { const struct nvsp_message *nvsp_packet = hv_pkt_data(desc); + u32 msglen = hv_pkt_datalen(desc); + + /* Ensure packet is big enough to read header fields */ + if (msglen < sizeof(struct nvsp_message_header)) { + netdev_err(ndev, "nvsp_message length too small: %u\n", msglen); + return; + } switch (nvsp_packet->hdr.msg_type) { case NVSP_MSG_TYPE_INIT_COMPLETE: + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_message_init_complete)) { + netdev_err(ndev, "nvsp_msg length too small: %u\n", + msglen); + return; + } + fallthrough; + case NVSP_MSG1_TYPE_SEND_RECV_BUF_COMPLETE: + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_1_message_send_receive_buffer_complete)) { + netdev_err(ndev, "nvsp_msg1 length too small: %u\n", + msglen); + return; + } + fallthrough; + case NVSP_MSG1_TYPE_SEND_SEND_BUF_COMPLETE: + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_1_message_send_send_buffer_complete)) { + netdev_err(ndev, "nvsp_msg1 length too small: %u\n", + msglen); + return; + } + fallthrough; + case NVSP_MSG5_TYPE_SUBCHANNEL: + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_5_subchannel_complete)) { + netdev_err(ndev, "nvsp_msg5 length too small: %u\n", + msglen); + return; + } /* Copy the response back */ memcpy(&net_device->channel_init_pkt, nvsp_packet, sizeof(struct nvsp_message)); @@ -1117,19 +1169,28 @@ static void enq_receive_complete(struct net_device *ndev, static int netvsc_receive(struct net_device *ndev, struct netvsc_device *net_device, struct netvsc_channel *nvchan, - const struct vmpacket_descriptor *desc, - const struct nvsp_message *nvsp) + const struct vmpacket_descriptor *desc) { struct net_device_context *net_device_ctx = netdev_priv(ndev); struct vmbus_channel *channel = nvchan->channel; const struct vmtransfer_page_packet_header *vmxferpage_packet = container_of(desc, const struct vmtransfer_page_packet_header, d); + const struct nvsp_message *nvsp = hv_pkt_data(desc); + u32 msglen = hv_pkt_datalen(desc); u16 q_idx = channel->offermsg.offer.sub_channel_index; char *recv_buf = net_device->recv_buf; u32 status = NVSP_STAT_SUCCESS; int i; int count = 0; + /* Ensure packet is big enough to read header fields */ + if (msglen < sizeof(struct nvsp_message_header)) { + netif_err(net_device_ctx, rx_err, ndev, + "invalid nvsp header, length too small: %u\n", + msglen); + return 0; + } + /* Make sure this is a valid nvsp packet */ if (unlikely(nvsp->hdr.msg_type != NVSP_MSG1_TYPE_SEND_RNDIS_PKT)) { netif_err(net_device_ctx, rx_err, ndev, @@ -1138,6 +1199,14 @@ static int netvsc_receive(struct net_device *ndev, return 0; } + /* Validate xfer page pkt header */ + if ((desc->offset8 << 3) < sizeof(struct vmtransfer_page_packet_header)) { + netif_err(net_device_ctx, rx_err, ndev, + "Invalid xfer page pkt, offset too small: %u\n", + desc->offset8 << 3); + return 0; + } + if (unlikely(vmxferpage_packet->xfer_pageset_id != NETVSC_RECEIVE_BUFFER_ID)) { netif_err(net_device_ctx, rx_err, ndev, "Invalid xfer page set id - expecting %x got %x\n", @@ -1148,6 +1217,14 @@ static int netvsc_receive(struct net_device *ndev, count = vmxferpage_packet->range_cnt; + /* Check count for a valid value */ + if (NETVSC_XFER_HEADER_SIZE(count) > desc->offset8 << 3) { + netif_err(net_device_ctx, rx_err, ndev, + "Range count is not valid: %d\n", + count); + return 0; + } + /* Each range represents 1 RNDIS pkt that contains 1 ethernet frame */ for (i = 0; i < count; i++) { u32 offset = vmxferpage_packet->ranges[i].byte_offset; @@ -1155,7 +1232,8 @@ static int netvsc_receive(struct net_device *ndev, void *data; int ret; - if (unlikely(offset + buflen > net_device->recv_buf_size)) { + if (unlikely(offset > net_device->recv_buf_size || + buflen > net_device->recv_buf_size - offset)) { nvchan->rsc.cnt = 0; status = NVSP_STAT_FAIL; netif_err(net_device_ctx, rx_err, ndev, @@ -1194,6 +1272,13 @@ static void netvsc_send_table(struct net_device *ndev, u32 count, offset, *tab; int i; + /* Ensure packet is big enough to read send_table fields */ + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_5_send_indirect_table)) { + netdev_err(ndev, "nvsp_v5_msg length too small: %u\n", msglen); + return; + } + count = nvmsg->msg.v5_msg.send_table.count; offset = nvmsg->msg.v5_msg.send_table.offset; @@ -1225,10 +1310,18 @@ static void netvsc_send_table(struct net_device *ndev, } static void netvsc_send_vf(struct net_device *ndev, - const struct nvsp_message *nvmsg) + const struct nvsp_message *nvmsg, + u32 msglen) { struct net_device_context *net_device_ctx = netdev_priv(ndev); + /* Ensure packet is big enough to read its fields */ + if (msglen < sizeof(struct nvsp_message_header) + + sizeof(struct nvsp_4_send_vf_association)) { + netdev_err(ndev, "nvsp_v4_msg length too small: %u\n", msglen); + return; + } + net_device_ctx->vf_alloc = nvmsg->msg.v4_msg.vf_assoc.allocated; net_device_ctx->vf_serial = nvmsg->msg.v4_msg.vf_assoc.serial; netdev_info(ndev, "VF slot %u %s\n", @@ -1238,16 +1331,24 @@ static void netvsc_send_vf(struct net_device *ndev, static void netvsc_receive_inband(struct net_device *ndev, struct netvsc_device *nvscdev, - const struct nvsp_message *nvmsg, - u32 msglen) + const struct vmpacket_descriptor *desc) { + const struct nvsp_message *nvmsg = hv_pkt_data(desc); + u32 msglen = hv_pkt_datalen(desc); + + /* Ensure packet is big enough to read header fields */ + if (msglen < sizeof(struct nvsp_message_header)) { + netdev_err(ndev, "inband nvsp_message length too small: %u\n", msglen); + return; + } + switch (nvmsg->hdr.msg_type) { case NVSP_MSG5_TYPE_SEND_INDIRECTION_TABLE: netvsc_send_table(ndev, nvscdev, nvmsg, msglen); break; case NVSP_MSG4_TYPE_SEND_VF_ASSOCIATION: - netvsc_send_vf(ndev, nvmsg); + netvsc_send_vf(ndev, nvmsg, msglen); break; } } @@ -1261,23 +1362,20 @@ static int netvsc_process_raw_pkt(struct hv_device *device, { struct vmbus_channel *channel = nvchan->channel; const struct nvsp_message *nvmsg = hv_pkt_data(desc); - u32 msglen = hv_pkt_datalen(desc); trace_nvsp_recv(ndev, channel, nvmsg); switch (desc->type) { case VM_PKT_COMP: - netvsc_send_completion(ndev, net_device, channel, - desc, budget); + netvsc_send_completion(ndev, net_device, channel, desc, budget); break; case VM_PKT_DATA_USING_XFER_PAGES: - return netvsc_receive(ndev, net_device, nvchan, - desc, nvmsg); + return netvsc_receive(ndev, net_device, nvchan, desc); break; case VM_PKT_DATA_INBAND: - netvsc_receive_inband(ndev, net_device, nvmsg, msglen); + netvsc_receive_inband(ndev, net_device, desc); break; default: diff --git a/drivers/net/hyperv/netvsc_drv.c b/drivers/net/hyperv/netvsc_drv.c index 64b0a74c1523..9869e390875e 100644 --- a/drivers/net/hyperv/netvsc_drv.c +++ b/drivers/net/hyperv/netvsc_drv.c @@ -748,6 +748,13 @@ void netvsc_linkstatus_callback(struct net_device *net, struct netvsc_reconfig *event; unsigned long flags; + /* Ensure the packet is big enough to access its fields */ + if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(struct rndis_indicate_status)) { + netdev_err(net, "invalid rndis_indicate_status packet, len: %u\n", + resp->msg_len); + return; + } + /* Update the physical link speed when changing to another vSwitch */ if (indicate->status == RNDIS_STATUS_LINK_SPEED_CHANGE) { u32 speed; @@ -2366,7 +2373,16 @@ static int netvsc_register_vf(struct net_device *vf_netdev) return NOTIFY_OK; } -/* VF up/down change detected, schedule to change data path */ +/* Change the data path when VF UP/DOWN/CHANGE are detected. + * + * Typically a UP or DOWN event is followed by a CHANGE event, so + * net_device_ctx->data_path_is_vf is used to cache the current data path + * to avoid the duplicate call of netvsc_switch_datapath() and the duplicate + * message. + * + * During hibernation, if a VF NIC driver (e.g. mlx5) preserves the network + * interface, there is only the CHANGE event and no UP or DOWN event. + */ static int netvsc_vf_changed(struct net_device *vf_netdev) { struct net_device_context *net_device_ctx; @@ -2383,6 +2399,10 @@ static int netvsc_vf_changed(struct net_device *vf_netdev) if (!netvsc_dev) return NOTIFY_DONE; + if (net_device_ctx->data_path_is_vf == vf_is_up) + return NOTIFY_OK; + net_device_ctx->data_path_is_vf = vf_is_up; + netvsc_switch_datapath(ndev, vf_is_up); netdev_info(ndev, "Data path switched %s VF: %s\n", vf_is_up ? "to" : "from", vf_netdev->name); @@ -2587,8 +2607,8 @@ static int netvsc_remove(struct hv_device *dev) static int netvsc_suspend(struct hv_device *dev) { struct net_device_context *ndev_ctx; - struct net_device *vf_netdev, *net; struct netvsc_device *nvdev; + struct net_device *net; int ret; net = hv_get_drvdata(dev); @@ -2604,10 +2624,6 @@ static int netvsc_suspend(struct hv_device *dev) goto out; } - vf_netdev = rtnl_dereference(ndev_ctx->vf_netdev); - if (vf_netdev) - netvsc_unregister_vf(vf_netdev); - /* Save the current config info */ ndev_ctx->saved_netvsc_dev_info = netvsc_devinfo_get(nvdev); @@ -2628,6 +2644,12 @@ static int netvsc_resume(struct hv_device *dev) rtnl_lock(); net_device_ctx = netdev_priv(net); + + /* Reset the data path to the netvsc NIC before re-opening the vmbus + * channel. Later netvsc_netdev_event() will switch the data path to + * the VF upon the UP or CHANGE event. + */ + net_device_ctx->data_path_is_vf = false; device_info = net_device_ctx->saved_netvsc_dev_info; ret = netvsc_attach(net, device_info); @@ -2695,6 +2717,7 @@ static int netvsc_netdev_event(struct notifier_block *this, return netvsc_unregister_vf(event_dev); case NETDEV_UP: case NETDEV_DOWN: + case NETDEV_CHANGE: return netvsc_vf_changed(event_dev); default: return NOTIFY_DONE; diff --git a/drivers/net/hyperv/rndis_filter.c b/drivers/net/hyperv/rndis_filter.c index b81ceba38218..12ad471ac5e1 100644 --- a/drivers/net/hyperv/rndis_filter.c +++ b/drivers/net/hyperv/rndis_filter.c @@ -275,6 +275,16 @@ static void rndis_filter_receive_response(struct net_device *ndev, return; } + /* Ensure the packet is big enough to read req_id. Req_id is the 1st + * field in any request/response message, so the payload should have at + * least sizeof(u32) bytes + */ + if (resp->msg_len - RNDIS_HEADER_SIZE < sizeof(u32)) { + netdev_err(ndev, "rndis msg_len too small: %u\n", + resp->msg_len); + return; + } + spin_lock_irqsave(&dev->request_lock, flags); list_for_each_entry(request, &dev->req_list, list_ent) { /* @@ -331,8 +341,9 @@ static void rndis_filter_receive_response(struct net_device *ndev, * Get the Per-Packet-Info with the specified type * return NULL if not found. */ -static inline void *rndis_get_ppi(struct rndis_packet *rpkt, - u32 type, u8 internal) +static inline void *rndis_get_ppi(struct net_device *ndev, + struct rndis_packet *rpkt, + u32 rpkt_len, u32 type, u8 internal) { struct rndis_per_packet_info *ppi; int len; @@ -340,11 +351,36 @@ static inline void *rndis_get_ppi(struct rndis_packet *rpkt, if (rpkt->per_pkt_info_offset == 0) return NULL; + /* Validate info_offset and info_len */ + if (rpkt->per_pkt_info_offset < sizeof(struct rndis_packet) || + rpkt->per_pkt_info_offset > rpkt_len) { + netdev_err(ndev, "Invalid per_pkt_info_offset: %u\n", + rpkt->per_pkt_info_offset); + return NULL; + } + + if (rpkt->per_pkt_info_len > rpkt_len - rpkt->per_pkt_info_offset) { + netdev_err(ndev, "Invalid per_pkt_info_len: %u\n", + rpkt->per_pkt_info_len); + return NULL; + } + ppi = (struct rndis_per_packet_info *)((ulong)rpkt + rpkt->per_pkt_info_offset); len = rpkt->per_pkt_info_len; while (len > 0) { + /* Validate ppi_offset and ppi_size */ + if (ppi->size > len) { + netdev_err(ndev, "Invalid ppi size: %u\n", ppi->size); + continue; + } + + if (ppi->ppi_offset >= ppi->size) { + netdev_err(ndev, "Invalid ppi_offset: %u\n", ppi->ppi_offset); + continue; + } + if (ppi->type == type && ppi->internal == internal) return (void *)((ulong)ppi + ppi->ppi_offset); len -= ppi->size; @@ -388,14 +424,29 @@ static int rndis_filter_receive_data(struct net_device *ndev, const struct ndis_pkt_8021q_info *vlan; const struct rndis_pktinfo_id *pktinfo_id; const u32 *hash_info; - u32 data_offset; + u32 data_offset, rpkt_len; void *data; bool rsc_more = false; int ret; + /* Ensure data_buflen is big enough to read header fields */ + if (data_buflen < RNDIS_HEADER_SIZE + sizeof(struct rndis_packet)) { + netdev_err(ndev, "invalid rndis pkt, data_buflen too small: %u\n", + data_buflen); + return NVSP_STAT_FAIL; + } + + /* Validate rndis_pkt offset */ + if (rndis_pkt->data_offset >= data_buflen - RNDIS_HEADER_SIZE) { + netdev_err(ndev, "invalid rndis packet offset: %u\n", + rndis_pkt->data_offset); + return NVSP_STAT_FAIL; + } + /* Remove the rndis header and pass it back up the stack */ data_offset = RNDIS_HEADER_SIZE + rndis_pkt->data_offset; + rpkt_len = data_buflen - RNDIS_HEADER_SIZE; data_buflen -= data_offset; /* @@ -410,13 +461,13 @@ static int rndis_filter_receive_data(struct net_device *ndev, return NVSP_STAT_FAIL; } - vlan = rndis_get_ppi(rndis_pkt, IEEE_8021Q_INFO, 0); + vlan = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, IEEE_8021Q_INFO, 0); - csum_info = rndis_get_ppi(rndis_pkt, TCPIP_CHKSUM_PKTINFO, 0); + csum_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, TCPIP_CHKSUM_PKTINFO, 0); - hash_info = rndis_get_ppi(rndis_pkt, NBL_HASH_VALUE, 0); + hash_info = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, NBL_HASH_VALUE, 0); - pktinfo_id = rndis_get_ppi(rndis_pkt, RNDIS_PKTINFO_ID, 1); + pktinfo_id = rndis_get_ppi(ndev, rndis_pkt, rpkt_len, RNDIS_PKTINFO_ID, 1); data = (void *)msg + data_offset; @@ -474,6 +525,14 @@ int rndis_filter_receive(struct net_device *ndev, if (netif_msg_rx_status(net_device_ctx)) dump_rndis_message(ndev, rndis_msg); + /* Validate incoming rndis_message packet */ + if (buflen < RNDIS_HEADER_SIZE || rndis_msg->msg_len < RNDIS_HEADER_SIZE || + buflen < rndis_msg->msg_len) { + netdev_err(ndev, "Invalid rndis_msg (buflen: %u, msg_len: %u)\n", + buflen, rndis_msg->msg_len); + return NVSP_STAT_FAIL; + } + switch (rndis_msg->ndis_msg_type) { case RNDIS_MSG_PACKET: return rndis_filter_receive_data(ndev, net_dev, nvchan, diff --git a/drivers/net/ieee802154/adf7242.c b/drivers/net/ieee802154/adf7242.c index c11f32f644db..7db9cbd0f5de 100644 --- a/drivers/net/ieee802154/adf7242.c +++ b/drivers/net/ieee802154/adf7242.c @@ -882,7 +882,9 @@ static int adf7242_rx(struct adf7242_local *lp) int ret; u8 lqi, len_u8, *data; - adf7242_read_reg(lp, 0, &len_u8); + ret = adf7242_read_reg(lp, 0, &len_u8); + if (ret) + return ret; len = len_u8; diff --git a/drivers/net/ieee802154/ca8210.c b/drivers/net/ieee802154/ca8210.c index e04c3b60cae7..4eb64709d44c 100644 --- a/drivers/net/ieee802154/ca8210.c +++ b/drivers/net/ieee802154/ca8210.c @@ -2925,6 +2925,7 @@ static int ca8210_dev_com_init(struct ca8210_priv *priv) ); if (!priv->irq_workqueue) { dev_crit(&priv->spi->dev, "alloc of irq_workqueue failed!\n"); + destroy_workqueue(priv->mlme_workqueue); return -ENOMEM; } diff --git a/drivers/net/ipa/ipa_table.c b/drivers/net/ipa/ipa_table.c index 2098ca2f2c90..b3790aa952a1 100644 --- a/drivers/net/ipa/ipa_table.c +++ b/drivers/net/ipa/ipa_table.c @@ -521,7 +521,7 @@ static void ipa_filter_tuple_zero(struct ipa_endpoint *endpoint) val = ioread32(endpoint->ipa->reg_virt + offset); /* Zero all filter-related fields, preserving the rest */ - u32_replace_bits(val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL); + u32p_replace_bits(&val, 0, IPA_REG_ENDP_FILTER_HASH_MSK_ALL); iowrite32(val, endpoint->ipa->reg_virt + offset); } @@ -573,7 +573,7 @@ static void ipa_route_tuple_zero(struct ipa *ipa, u32 route_id) val = ioread32(ipa->reg_virt + offset); /* Zero all route-related fields, preserving the rest */ - u32_replace_bits(val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL); + u32p_replace_bits(&val, 0, IPA_REG_ENDP_ROUTER_HASH_MSK_ALL); iowrite32(val, ipa->reg_virt + offset); } diff --git a/drivers/net/phy/phy.c b/drivers/net/phy/phy.c index 735a806045ac..8947d58f2a25 100644 --- a/drivers/net/phy/phy.c +++ b/drivers/net/phy/phy.c @@ -996,7 +996,7 @@ void phy_stop(struct phy_device *phydev) { struct net_device *dev = phydev->attached_dev; - if (!phy_is_started(phydev)) { + if (!phy_is_started(phydev) && phydev->state != PHY_DOWN) { WARN(1, "called from state %s\n", phy_state_to_str(phydev->state)); return; diff --git a/drivers/net/phy/phy_device.c b/drivers/net/phy/phy_device.c index 8adfbad0a1e8..5dab6be6fc38 100644 --- a/drivers/net/phy/phy_device.c +++ b/drivers/net/phy/phy_device.c @@ -1143,10 +1143,6 @@ int phy_init_hw(struct phy_device *phydev) if (ret < 0) return ret; - ret = phy_disable_interrupts(phydev); - if (ret) - return ret; - if (phydev->drv->config_init) ret = phydev->drv->config_init(phydev); @@ -1423,6 +1419,10 @@ int phy_attach_direct(struct net_device *dev, struct phy_device *phydev, if (err) goto error; + err = phy_disable_interrupts(phydev); + if (err) + return err; + phy_resume(phydev); phy_led_triggers_register(phydev); @@ -1682,7 +1682,8 @@ void phy_detach(struct phy_device *phydev) phy_led_triggers_unregister(phydev); - module_put(phydev->mdio.dev.driver->owner); + if (phydev->mdio.dev.driver) + module_put(phydev->mdio.dev.driver->owner); /* If the device had no specific driver before (i.e. - it * was using the generic driver), we unbind the device diff --git a/drivers/net/usb/pegasus.c b/drivers/net/usb/pegasus.c index e92cb51a2c77..26b4e48bf91f 100644 --- a/drivers/net/usb/pegasus.c +++ b/drivers/net/usb/pegasus.c @@ -124,62 +124,31 @@ static void async_ctrl_callback(struct urb *urb) static int get_registers(pegasus_t *pegasus, __u16 indx, __u16 size, void *data) { - u8 *buf; - int ret; - - buf = kmalloc(size, GFP_NOIO); - if (!buf) - return -ENOMEM; - - ret = usb_control_msg(pegasus->usb, usb_rcvctrlpipe(pegasus->usb, 0), - PEGASUS_REQ_GET_REGS, PEGASUS_REQT_READ, 0, - indx, buf, size, 1000); - if (ret < 0) - netif_dbg(pegasus, drv, pegasus->net, - "%s returned %d\n", __func__, ret); - else if (ret <= size) - memcpy(data, buf, ret); - kfree(buf); - return ret; + return usb_control_msg_recv(pegasus->usb, 0, PEGASUS_REQ_GET_REGS, + PEGASUS_REQT_READ, 0, indx, data, size, + 1000, GFP_NOIO); } static int set_registers(pegasus_t *pegasus, __u16 indx, __u16 size, const void *data) { - u8 *buf; - int ret; - - buf = kmemdup(data, size, GFP_NOIO); - if (!buf) - return -ENOMEM; - - ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), - PEGASUS_REQ_SET_REGS, PEGASUS_REQT_WRITE, 0, - indx, buf, size, 100); - if (ret < 0) - netif_dbg(pegasus, drv, pegasus->net, - "%s returned %d\n", __func__, ret); - kfree(buf); - return ret; + return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REGS, + PEGASUS_REQT_WRITE, 0, indx, data, size, + 1000, GFP_NOIO); } +/* + * There is only one way to write to a single ADM8511 register and this is via + * specific control request. 'data' is ignored by the device, but it is here to + * not break the API. + */ static int set_register(pegasus_t *pegasus, __u16 indx, __u8 data) { - u8 *buf; - int ret; - - buf = kmemdup(&data, 1, GFP_NOIO); - if (!buf) - return -ENOMEM; + void *buf = &data; - ret = usb_control_msg(pegasus->usb, usb_sndctrlpipe(pegasus->usb, 0), - PEGASUS_REQ_SET_REG, PEGASUS_REQT_WRITE, data, - indx, buf, 1, 1000); - if (ret < 0) - netif_dbg(pegasus, drv, pegasus->net, - "%s returned %d\n", __func__, ret); - kfree(buf); - return ret; + return usb_control_msg_send(pegasus->usb, 0, PEGASUS_REQ_SET_REG, + PEGASUS_REQT_WRITE, data, indx, buf, 1, + 1000, GFP_NOIO); } static int update_eth_regs_async(pegasus_t *pegasus) diff --git a/drivers/net/usb/rndis_host.c b/drivers/net/usb/rndis_host.c index bd9c07888ebb..6fa7a009a24a 100644 --- a/drivers/net/usb/rndis_host.c +++ b/drivers/net/usb/rndis_host.c @@ -201,7 +201,7 @@ int rndis_command(struct usbnet *dev, struct rndis_msg_hdr *buf, int buflen) dev_dbg(&info->control->dev, "rndis response error, code %d\n", retval); } - msleep(20); + msleep(40); } dev_dbg(&info->control->dev, "rndis response timeout\n"); return -ETIMEDOUT; diff --git a/drivers/net/usb/rtl8150.c b/drivers/net/usb/rtl8150.c index 733f120c852b..b3a0b188b1a1 100644 --- a/drivers/net/usb/rtl8150.c +++ b/drivers/net/usb/rtl8150.c @@ -152,36 +152,16 @@ static const char driver_name [] = "rtl8150"; */ static int get_registers(rtl8150_t * dev, u16 indx, u16 size, void *data) { - void *buf; - int ret; - - buf = kmalloc(size, GFP_NOIO); - if (!buf) - return -ENOMEM; - - ret = usb_control_msg(dev->udev, usb_rcvctrlpipe(dev->udev, 0), - RTL8150_REQ_GET_REGS, RTL8150_REQT_READ, - indx, 0, buf, size, 500); - if (ret > 0 && ret <= size) - memcpy(data, buf, ret); - kfree(buf); - return ret; + return usb_control_msg_recv(dev->udev, 0, RTL8150_REQ_GET_REGS, + RTL8150_REQT_READ, indx, 0, data, size, + 1000, GFP_NOIO); } static int set_registers(rtl8150_t * dev, u16 indx, u16 size, const void *data) { - void *buf; - int ret; - - buf = kmemdup(data, size, GFP_NOIO); - if (!buf) - return -ENOMEM; - - ret = usb_control_msg(dev->udev, usb_sndctrlpipe(dev->udev, 0), - RTL8150_REQ_SET_REGS, RTL8150_REQT_WRITE, - indx, 0, buf, size, 500); - kfree(buf); - return ret; + return usb_control_msg_send(dev->udev, 0, RTL8150_REQ_SET_REGS, + RTL8150_REQT_WRITE, indx, 0, data, size, + 1000, GFP_NOIO); } static void async_set_reg_cb(struct urb *urb) diff --git a/drivers/net/wan/hdlc_cisco.c b/drivers/net/wan/hdlc_cisco.c index 444130655d8e..cb5898f7d68c 100644 --- a/drivers/net/wan/hdlc_cisco.c +++ b/drivers/net/wan/hdlc_cisco.c @@ -118,6 +118,7 @@ static void cisco_keepalive_send(struct net_device *dev, u32 type, skb_put(skb, sizeof(struct cisco_packet)); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); diff --git a/drivers/net/wan/hdlc_fr.c b/drivers/net/wan/hdlc_fr.c index 9acad651ea1f..d6cfd51613ed 100644 --- a/drivers/net/wan/hdlc_fr.c +++ b/drivers/net/wan/hdlc_fr.c @@ -433,6 +433,8 @@ static netdev_tx_t pvc_xmit(struct sk_buff *skb, struct net_device *dev) if (pvc->state.fecn) /* TX Congestion counter */ dev->stats.tx_compressed++; skb->dev = pvc->frad; + skb->protocol = htons(ETH_P_HDLC); + skb_reset_network_header(skb); dev_queue_xmit(skb); return NETDEV_TX_OK; } @@ -555,6 +557,7 @@ static void fr_lmi_send(struct net_device *dev, int fullrep) skb_put(skb, i); skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); dev_queue_xmit(skb); @@ -1041,7 +1044,7 @@ static void pvc_setup(struct net_device *dev) { dev->type = ARPHRD_DLCI; dev->flags = IFF_POINTOPOINT; - dev->hard_header_len = 10; + dev->hard_header_len = 0; dev->addr_len = 2; netif_keep_dst(dev); } @@ -1093,6 +1096,7 @@ static int fr_add_pvc(struct net_device *frad, unsigned int dlci, int type) dev->mtu = HDLC_MAX_MTU; dev->min_mtu = 68; dev->max_mtu = HDLC_MAX_MTU; + dev->needed_headroom = 10; dev->priv_flags |= IFF_NO_QUEUE; dev->ml_priv = pvc; diff --git a/drivers/net/wan/hdlc_ppp.c b/drivers/net/wan/hdlc_ppp.c index 48ced3912576..64f855651336 100644 --- a/drivers/net/wan/hdlc_ppp.c +++ b/drivers/net/wan/hdlc_ppp.c @@ -251,6 +251,7 @@ static void ppp_tx_cp(struct net_device *dev, u16 pid, u8 code, skb->priority = TC_PRIO_CONTROL; skb->dev = dev; + skb->protocol = htons(ETH_P_HDLC); skb_reset_network_header(skb); skb_queue_tail(&tx_queue, skb); } @@ -383,11 +384,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } for (opt = data; len; len -= opt[1], opt += opt[1]) { - if (len < 2 || len < opt[1]) { - dev->stats.rx_errors++; - kfree(out); - return; /* bad packet, drop silently */ - } + if (len < 2 || opt[1] < 2 || len < opt[1]) + goto err_out; if (pid == PID_LCP) switch (opt[0]) { @@ -395,6 +393,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, continue; /* MRU always OK and > 1500 bytes? */ case LCP_OPTION_ACCM: /* async control character map */ + if (opt[1] < sizeof(valid_accm)) + goto err_out; if (!memcmp(opt, valid_accm, sizeof(valid_accm))) continue; @@ -406,6 +406,8 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, } break; case LCP_OPTION_MAGIC: + if (len < 6) + goto err_out; if (opt[1] != 6 || (!opt[2] && !opt[3] && !opt[4] && !opt[5])) break; /* reject invalid magic number */ @@ -424,6 +426,11 @@ static void ppp_cp_parse_cr(struct net_device *dev, u16 pid, u8 id, ppp_cp_event(dev, pid, RCR_GOOD, CP_CONF_ACK, id, req_len, data); kfree(out); + return; + +err_out: + dev->stats.rx_errors++; + kfree(out); } static int ppp_rx(struct sk_buff *skb) diff --git a/drivers/net/wan/lapbether.c b/drivers/net/wan/lapbether.c index 732a6c1851f5..b6be2454b8bd 100644 --- a/drivers/net/wan/lapbether.c +++ b/drivers/net/wan/lapbether.c @@ -198,8 +198,6 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) struct net_device *dev; int size = skb->len; - skb->protocol = htons(ETH_P_X25); - ptr = skb_push(skb, 2); *ptr++ = size % 256; @@ -210,6 +208,8 @@ static void lapbeth_data_transmit(struct net_device *ndev, struct sk_buff *skb) skb->dev = dev = lapbeth->ethdev; + skb->protocol = htons(ETH_P_DEC); + skb_reset_network_header(skb); dev_hard_header(skb, dev, ETH_P_DEC, bcast_addr, NULL, 0); diff --git a/drivers/net/wireguard/noise.c b/drivers/net/wireguard/noise.c index 3dd3b76790d0..c0cfd9b36c0b 100644 --- a/drivers/net/wireguard/noise.c +++ b/drivers/net/wireguard/noise.c @@ -87,15 +87,12 @@ static void handshake_zero(struct noise_handshake *handshake) void wg_noise_handshake_clear(struct noise_handshake *handshake) { + down_write(&handshake->lock); wg_index_hashtable_remove( handshake->entry.peer->device->index_hashtable, &handshake->entry); - down_write(&handshake->lock); handshake_zero(handshake); up_write(&handshake->lock); - wg_index_hashtable_remove( - handshake->entry.peer->device->index_hashtable, - &handshake->entry); } static struct noise_keypair *keypair_create(struct wg_peer *peer) diff --git a/drivers/net/wireguard/peerlookup.c b/drivers/net/wireguard/peerlookup.c index e4deb331476b..f2783aa7a88f 100644 --- a/drivers/net/wireguard/peerlookup.c +++ b/drivers/net/wireguard/peerlookup.c @@ -167,9 +167,13 @@ bool wg_index_hashtable_replace(struct index_hashtable *table, struct index_hashtable_entry *old, struct index_hashtable_entry *new) { - if (unlikely(hlist_unhashed(&old->index_hash))) - return false; + bool ret; + spin_lock_bh(&table->lock); + ret = !hlist_unhashed(&old->index_hash); + if (unlikely(!ret)) + goto out; + new->index = old->index; hlist_replace_rcu(&old->index_hash, &new->index_hash); @@ -180,8 +184,9 @@ bool wg_index_hashtable_replace(struct index_hashtable *table, * simply gets dropped, which isn't terrible. */ INIT_HLIST_NODE(&old->index_hash); +out: spin_unlock_bh(&table->lock); - return true; + return ret; } void wg_index_hashtable_remove(struct index_hashtable *table, diff --git a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c index e8712ad3ac45..3c07d1bbe1c6 100644 --- a/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c +++ b/drivers/net/wireless/broadcom/brcm80211/brcmfmac/sdio.c @@ -664,9 +664,15 @@ static void pkt_align(struct sk_buff *p, int len, int align) /* To check if there's window offered */ static bool data_ok(struct brcmf_sdio *bus) { - /* Reserve TXCTL_CREDITS credits for txctl */ - return (bus->tx_max - bus->tx_seq) > TXCTL_CREDITS && - ((bus->tx_max - bus->tx_seq) & 0x80) == 0; + u8 tx_rsv = 0; + + /* Reserve TXCTL_CREDITS credits for txctl when it is ready to send */ + if (bus->ctrl_frame_stat) + tx_rsv = TXCTL_CREDITS; + + return (bus->tx_max - bus->tx_seq - tx_rsv) != 0 && + ((bus->tx_max - bus->tx_seq - tx_rsv) & 0x80) == 0; + } /* To check if there's window offered */ diff --git a/drivers/net/wireless/marvell/mwifiex/fw.h b/drivers/net/wireless/marvell/mwifiex/fw.h index 8047e307892e..d9f8bdbc817b 100644 --- a/drivers/net/wireless/marvell/mwifiex/fw.h +++ b/drivers/net/wireless/marvell/mwifiex/fw.h @@ -954,7 +954,7 @@ struct mwifiex_tkip_param { struct mwifiex_aes_param { u8 pn[WPA_PN_SIZE]; __le16 key_len; - u8 key[WLAN_KEY_LEN_CCMP]; + u8 key[WLAN_KEY_LEN_CCMP_256]; } __packed; struct mwifiex_wapi_param { diff --git a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c index 962d8bfe6f10..119ccacd1fcc 100644 --- a/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c +++ b/drivers/net/wireless/marvell/mwifiex/sta_cmdresp.c @@ -619,7 +619,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, key_v2 = &resp->params.key_material_v2; len = le16_to_cpu(key_v2->key_param_set.key_params.aes.key_len); - if (len > WLAN_KEY_LEN_CCMP) + if (len > sizeof(key_v2->key_param_set.key_params.aes.key)) return -EINVAL; if (le16_to_cpu(key_v2->action) == HostCmd_ACT_GEN_SET) { @@ -635,7 +635,7 @@ static int mwifiex_ret_802_11_key_material_v2(struct mwifiex_private *priv, return 0; memset(priv->aes_key_v2.key_param_set.key_params.aes.key, 0, - WLAN_KEY_LEN_CCMP); + sizeof(key_v2->key_param_set.key_params.aes.key)); priv->aes_key_v2.key_param_set.key_params.aes.key_len = cpu_to_le16(len); memcpy(priv->aes_key_v2.key_param_set.key_params.aes.key, diff --git a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c index d0cbb283982f..bd316dbd9041 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c +++ b/drivers/net/wireless/mediatek/mt76/mt7615/mcu.c @@ -2128,7 +2128,8 @@ static int mt7615_load_n9(struct mt7615_dev *dev, const char *name) sizeof(dev->mt76.hw->wiphy->fw_version), "%.10s-%.15s", hdr->fw_ver, hdr->build_date); - if (!strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { + if (!is_mt7615(&dev->mt76) && + !strncmp(hdr->fw_ver, "2.0", sizeof(hdr->fw_ver))) { dev->fw_ver = MT7615_FIRMWARE_V2; dev->mcu_ops = &sta_update_ops; } else { diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/init.c b/drivers/net/wireless/mediatek/mt76/mt7915/init.c index e90d0087e377..8d6ceb3b67b4 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/init.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/init.c @@ -699,8 +699,12 @@ void mt7915_unregister_device(struct mt7915_dev *dev) spin_lock_bh(&dev->token_lock); idr_for_each_entry(&dev->token, txwi, id) { mt7915_txp_skb_unmap(&dev->mt76, txwi); - if (txwi->skb) - dev_kfree_skb_any(txwi->skb); + if (txwi->skb) { + struct ieee80211_hw *hw; + + hw = mt76_tx_status_get_hw(&dev->mt76, txwi->skb); + ieee80211_free_txskb(hw, txwi->skb); + } mt76_put_txwi(&dev->mt76, txwi); } spin_unlock_bh(&dev->token_lock); diff --git a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c index 6825afca1efb..036207f828f3 100644 --- a/drivers/net/wireless/mediatek/mt76/mt7915/mac.c +++ b/drivers/net/wireless/mediatek/mt76/mt7915/mac.c @@ -841,7 +841,7 @@ mt7915_tx_complete_status(struct mt76_dev *mdev, struct sk_buff *skb, if (sta || !(info->flags & IEEE80211_TX_CTL_NO_ACK)) mt7915_tx_status(sta, hw, info, NULL); - dev_kfree_skb(skb); + ieee80211_free_txskb(hw, skb); } void mt7915_txp_skb_unmap(struct mt76_dev *dev, diff --git a/drivers/net/wireless/ti/wlcore/cmd.h b/drivers/net/wireless/ti/wlcore/cmd.h index 9acd8a41ea61..f2609d5b6bf7 100644 --- a/drivers/net/wireless/ti/wlcore/cmd.h +++ b/drivers/net/wireless/ti/wlcore/cmd.h @@ -458,7 +458,6 @@ enum wl1271_cmd_key_type { KEY_TKIP = 2, KEY_AES = 3, KEY_GEM = 4, - KEY_IGTK = 5, }; struct wl1271_cmd_set_keys { diff --git a/drivers/net/wireless/ti/wlcore/main.c b/drivers/net/wireless/ti/wlcore/main.c index 821ad1acd505..d2bbd5108f7e 100644 --- a/drivers/net/wireless/ti/wlcore/main.c +++ b/drivers/net/wireless/ti/wlcore/main.c @@ -3559,9 +3559,6 @@ int wlcore_set_key(struct wl1271 *wl, enum set_key_cmd cmd, case WL1271_CIPHER_SUITE_GEM: key_type = KEY_GEM; break; - case WLAN_CIPHER_SUITE_AES_CMAC: - key_type = KEY_IGTK; - break; default: wl1271_error("Unknown key algo 0x%x", key_conf->cipher); @@ -6231,7 +6228,6 @@ static int wl1271_init_ieee80211(struct wl1271 *wl) WLAN_CIPHER_SUITE_TKIP, WLAN_CIPHER_SUITE_CCMP, WL1271_CIPHER_SUITE_GEM, - WLAN_CIPHER_SUITE_AES_CMAC, }; /* The tx descriptor buffer */ diff --git a/drivers/nvme/host/Kconfig b/drivers/nvme/host/Kconfig index 3ed9786b88d8..a44d49d63968 100644 --- a/drivers/nvme/host/Kconfig +++ b/drivers/nvme/host/Kconfig @@ -73,6 +73,7 @@ config NVME_TCP depends on INET depends on BLK_DEV_NVME select NVME_FABRICS + select CRYPTO select CRYPTO_CRC32C help This provides support for the NVMe over Fabrics protocol using diff --git a/drivers/nvme/host/core.c b/drivers/nvme/host/core.c index f3a61a24d45f..8f9d61e0729f 100644 --- a/drivers/nvme/host/core.c +++ b/drivers/nvme/host/core.c @@ -3041,7 +3041,7 @@ static int nvme_get_effects_log(struct nvme_ctrl *ctrl, u8 csi, if (!cel) return -ENOMEM; - ret = nvme_get_log(ctrl, NVME_NSID_ALL, NVME_LOG_CMD_EFFECTS, 0, csi, + ret = nvme_get_log(ctrl, 0x00, NVME_LOG_CMD_EFFECTS, 0, csi, &cel->log, sizeof(cel->log), 0); if (ret) { kfree(cel); @@ -3236,8 +3236,11 @@ int nvme_init_identify(struct nvme_ctrl *ctrl) if (ret < 0) return ret; - if (!ctrl->identified) - nvme_hwmon_init(ctrl); + if (!ctrl->identified) { + ret = nvme_hwmon_init(ctrl); + if (ret < 0) + return ret; + } ctrl->identified = true; @@ -3261,10 +3264,24 @@ static int nvme_dev_open(struct inode *inode, struct file *file) return -EWOULDBLOCK; } + nvme_get_ctrl(ctrl); + if (!try_module_get(ctrl->ops->module)) + return -EINVAL; + file->private_data = ctrl; return 0; } +static int nvme_dev_release(struct inode *inode, struct file *file) +{ + struct nvme_ctrl *ctrl = + container_of(inode->i_cdev, struct nvme_ctrl, cdev); + + module_put(ctrl->ops->module); + nvme_put_ctrl(ctrl); + return 0; +} + static int nvme_dev_user_cmd(struct nvme_ctrl *ctrl, void __user *argp) { struct nvme_ns *ns; @@ -3327,6 +3344,7 @@ static long nvme_dev_ioctl(struct file *file, unsigned int cmd, static const struct file_operations nvme_dev_fops = { .owner = THIS_MODULE, .open = nvme_dev_open, + .release = nvme_dev_release, .unlocked_ioctl = nvme_dev_ioctl, .compat_ioctl = compat_ptr_ioctl, }; diff --git a/drivers/nvme/host/fc.c b/drivers/nvme/host/fc.c index e8ef42b9d50c..e2e09e25c056 100644 --- a/drivers/nvme/host/fc.c +++ b/drivers/nvme/host/fc.c @@ -3671,12 +3671,14 @@ nvme_fc_create_ctrl(struct device *dev, struct nvmf_ctrl_options *opts) spin_lock_irqsave(&nvme_fc_lock, flags); list_for_each_entry(lport, &nvme_fc_lport_list, port_list) { if (lport->localport.node_name != laddr.nn || - lport->localport.port_name != laddr.pn) + lport->localport.port_name != laddr.pn || + lport->localport.port_state != FC_OBJSTATE_ONLINE) continue; list_for_each_entry(rport, &lport->endp_list, endp_list) { if (rport->remoteport.node_name != raddr.nn || - rport->remoteport.port_name != raddr.pn) + rport->remoteport.port_name != raddr.pn || + rport->remoteport.port_state != FC_OBJSTATE_ONLINE) continue; /* if fail to get reference fall through. Will error */ diff --git a/drivers/nvme/host/hwmon.c b/drivers/nvme/host/hwmon.c index 412a6c97c0d8..552dbc04567b 100644 --- a/drivers/nvme/host/hwmon.c +++ b/drivers/nvme/host/hwmon.c @@ -59,12 +59,8 @@ static int nvme_set_temp_thresh(struct nvme_ctrl *ctrl, int sensor, bool under, static int nvme_hwmon_get_smart_log(struct nvme_hwmon_data *data) { - int ret; - - ret = nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, + return nvme_get_log(data->ctrl, NVME_NSID_ALL, NVME_LOG_SMART, 0, NVME_CSI_NVM, &data->log, sizeof(data->log), 0); - - return ret <= 0 ? ret : -EIO; } static int nvme_hwmon_read(struct device *dev, enum hwmon_sensor_types type, @@ -225,7 +221,7 @@ static const struct hwmon_chip_info nvme_hwmon_chip_info = { .info = nvme_hwmon_info, }; -void nvme_hwmon_init(struct nvme_ctrl *ctrl) +int nvme_hwmon_init(struct nvme_ctrl *ctrl) { struct device *dev = ctrl->dev; struct nvme_hwmon_data *data; @@ -234,7 +230,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) data = devm_kzalloc(dev, sizeof(*data), GFP_KERNEL); if (!data) - return; + return 0; data->ctrl = ctrl; mutex_init(&data->read_lock); @@ -244,7 +240,7 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) dev_warn(ctrl->device, "Failed to read smart log (error %d)\n", err); devm_kfree(dev, data); - return; + return err; } hwmon = devm_hwmon_device_register_with_info(dev, "nvme", data, @@ -254,4 +250,6 @@ void nvme_hwmon_init(struct nvme_ctrl *ctrl) dev_warn(dev, "Failed to instantiate hwmon device\n"); devm_kfree(dev, data); } + + return 0; } diff --git a/drivers/nvme/host/nvme.h b/drivers/nvme/host/nvme.h index 9fd45ff656da..2aaedfa43ed8 100644 --- a/drivers/nvme/host/nvme.h +++ b/drivers/nvme/host/nvme.h @@ -827,9 +827,12 @@ static inline struct nvme_ns *nvme_get_ns_from_dev(struct device *dev) } #ifdef CONFIG_NVME_HWMON -void nvme_hwmon_init(struct nvme_ctrl *ctrl); +int nvme_hwmon_init(struct nvme_ctrl *ctrl); #else -static inline void nvme_hwmon_init(struct nvme_ctrl *ctrl) { } +static inline int nvme_hwmon_init(struct nvme_ctrl *ctrl) +{ + return 0; +} #endif u32 nvme_command_effects(struct nvme_ctrl *ctrl, struct nvme_ns *ns, diff --git a/drivers/nvme/host/pci.c b/drivers/nvme/host/pci.c index 899d2f4d7ab6..8984796db0c8 100644 --- a/drivers/nvme/host/pci.c +++ b/drivers/nvme/host/pci.c @@ -940,13 +940,6 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) struct nvme_completion *cqe = &nvmeq->cqes[idx]; struct request *req; - if (unlikely(cqe->command_id >= nvmeq->q_depth)) { - dev_warn(nvmeq->dev->ctrl.device, - "invalid id %d completed on queue %d\n", - cqe->command_id, le16_to_cpu(cqe->sq_id)); - return; - } - /* * AEN requests are special as they don't time out and can * survive any kind of queue freeze and often don't respond to @@ -960,6 +953,13 @@ static inline void nvme_handle_cqe(struct nvme_queue *nvmeq, u16 idx) } req = blk_mq_tag_to_rq(nvme_queue_tagset(nvmeq), cqe->command_id); + if (unlikely(!req)) { + dev_warn(nvmeq->dev->ctrl.device, + "invalid id %d completed on queue %d\n", + cqe->command_id, le16_to_cpu(cqe->sq_id)); + return; + } + trace_nvme_sq(req, cqe->sq_head, nvmeq->sq_tail); if (!nvme_try_complete_req(req, cqe->status, cqe->result)) nvme_pci_complete_rq(req); @@ -3153,7 +3153,8 @@ static const struct pci_device_id nvme_id_table[] = { { PCI_VDEVICE(INTEL, 0xf1a5), /* Intel 600P/P3100 */ .driver_data = NVME_QUIRK_NO_DEEPEST_PS | NVME_QUIRK_MEDIUM_PRIO_SQ | - NVME_QUIRK_NO_TEMP_THRESH_CHANGE }, + NVME_QUIRK_NO_TEMP_THRESH_CHANGE | + NVME_QUIRK_DISABLE_WRITE_ZEROES, }, { PCI_VDEVICE(INTEL, 0xf1a6), /* Intel 760p/Pro 7600p */ .driver_data = NVME_QUIRK_IGNORE_DEV_SUBNQN, }, { PCI_VDEVICE(INTEL, 0x5845), /* Qemu emulated controller */ diff --git a/drivers/nvme/target/passthru.c b/drivers/nvme/target/passthru.c index 8bd7f656e240..dacfa7435d0b 100644 --- a/drivers/nvme/target/passthru.c +++ b/drivers/nvme/target/passthru.c @@ -517,6 +517,7 @@ int nvmet_passthru_ctrl_enable(struct nvmet_subsys *subsys) subsys->ver = NVME_VS(1, 2, 1); } + __module_get(subsys->passthru_ctrl->ops->module); mutex_unlock(&subsys->lock); return 0; @@ -531,6 +532,7 @@ static void __nvmet_passthru_ctrl_disable(struct nvmet_subsys *subsys) { if (subsys->passthru_ctrl) { xa_erase(&passthru_subsystems, subsys->passthru_ctrl->cntlid); + module_put(subsys->passthru_ctrl->ops->module); nvme_put_ctrl(subsys->passthru_ctrl); } subsys->passthru_ctrl = NULL; diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c index 85fa7d54f11f..bac63d04297f 100644 --- a/drivers/pci/controller/pcie-brcmstb.c +++ b/drivers/pci/controller/pcie-brcmstb.c @@ -28,8 +28,6 @@ #include <linux/string.h> #include <linux/types.h> -#include <soc/bcm2835/raspberrypi-firmware.h> - #include "../pci.h" /* BRCM_PCIE_CAP_REGS - Offset for the mandatory capability config regs */ @@ -931,24 +929,9 @@ static int brcm_pcie_probe(struct platform_device *pdev) { struct device_node *np = pdev->dev.of_node, *msi_np; struct pci_host_bridge *bridge; - struct device_node *fw_np; struct brcm_pcie *pcie; int ret; - /* - * We have to wait for Raspberry Pi's firmware interface to be up as a - * PCI fixup, rpi_firmware_init_vl805(), depends on it. This driver's - * probe can race with the firmware interface's (see - * drivers/firmware/raspberrypi.c) and potentially break the PCI fixup. - */ - fw_np = of_find_compatible_node(NULL, NULL, - "raspberrypi,bcm2835-firmware"); - if (fw_np && !rpi_firmware_get(fw_np)) { - of_node_put(fw_np); - return -EPROBE_DEFER; - } - of_node_put(fw_np); - bridge = devm_pci_alloc_host_bridge(&pdev->dev, sizeof(*pcie)); if (!bridge) return -ENOMEM; diff --git a/drivers/pci/controller/pcie-rockchip-host.c b/drivers/pci/controller/pcie-rockchip-host.c index 0bb2fb3e8a0b..9705059523a6 100644 --- a/drivers/pci/controller/pcie-rockchip-host.c +++ b/drivers/pci/controller/pcie-rockchip-host.c @@ -71,16 +71,13 @@ static void rockchip_pcie_update_txcredit_mui(struct rockchip_pcie *rockchip) static int rockchip_pcie_valid_device(struct rockchip_pcie *rockchip, struct pci_bus *bus, int dev) { - /* access only one slot on each root port */ - if (pci_is_root_bus(bus) && dev > 0) - return 0; - /* - * do not read more than one device on the bus directly attached + * Access only one slot on each root port. + * Do not read more than one device on the bus directly attached * to RC's downstream side. */ - if (pci_is_root_bus(bus->parent) && dev > 0) - return 0; + if (pci_is_root_bus(bus) || pci_is_root_bus(bus->parent)) + return dev == 0; return 1; } diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 2a589b6d6ed8..01f23e30bd8f 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -3673,63 +3673,6 @@ static void quirk_apple_poweroff_thunderbolt(struct pci_dev *dev) DECLARE_PCI_FIXUP_SUSPEND_LATE(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, quirk_apple_poweroff_thunderbolt); - -/* - * Apple: Wait for the Thunderbolt controller to reestablish PCI tunnels - * - * During suspend the Thunderbolt controller is reset and all PCI - * tunnels are lost. The NHI driver will try to reestablish all tunnels - * during resume. We have to manually wait for the NHI since there is - * no parent child relationship between the NHI and the tunneled - * bridges. - */ -static void quirk_apple_wait_for_thunderbolt(struct pci_dev *dev) -{ - struct pci_dev *sibling = NULL; - struct pci_dev *nhi = NULL; - - if (!x86_apple_machine) - return; - if (pci_pcie_type(dev) != PCI_EXP_TYPE_DOWNSTREAM) - return; - - /* - * Find the NHI and confirm that we are a bridge on the Thunderbolt - * host controller and not on a Thunderbolt endpoint. - */ - sibling = pci_get_slot(dev->bus, 0x0); - if (sibling == dev) - goto out; /* we are the downstream bridge to the NHI */ - if (!sibling || !sibling->subordinate) - goto out; - nhi = pci_get_slot(sibling->subordinate, 0x0); - if (!nhi) - goto out; - if (nhi->vendor != PCI_VENDOR_ID_INTEL - || (nhi->device != PCI_DEVICE_ID_INTEL_LIGHT_RIDGE && - nhi->device != PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C && - nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI && - nhi->device != PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI) - || nhi->class != PCI_CLASS_SYSTEM_OTHER << 8) - goto out; - pci_info(dev, "quirk: waiting for Thunderbolt to reestablish PCI tunnels...\n"); - device_pm_wait_for_dev(&dev->dev, &nhi->dev); -out: - pci_dev_put(nhi); - pci_dev_put(sibling); -} -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_LIGHT_RIDGE, - quirk_apple_wait_for_thunderbolt); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C, - quirk_apple_wait_for_thunderbolt); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE, - quirk_apple_wait_for_thunderbolt); -DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_INTEL, - PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE, - quirk_apple_wait_for_thunderbolt); #endif /* diff --git a/drivers/phy/Kconfig b/drivers/phy/Kconfig index de9362c25c07..01b53f86004c 100644 --- a/drivers/phy/Kconfig +++ b/drivers/phy/Kconfig @@ -49,6 +49,17 @@ config PHY_XGENE help This option enables support for APM X-Gene SoC multi-purpose PHY. +config USB_LGM_PHY + tristate "INTEL Lightning Mountain USB PHY Driver" + depends on USB_SUPPORT + select USB_PHY + select REGULATOR + select REGULATOR_FIXED_VOLTAGE + help + Enable this to support Intel DWC3 PHY USB phy. This driver provides + interface to interact with USB GEN-II and USB 3.x PHY that is part + of the Intel network SOC. + source "drivers/phy/allwinner/Kconfig" source "drivers/phy/amlogic/Kconfig" source "drivers/phy/broadcom/Kconfig" diff --git a/drivers/phy/Makefile b/drivers/phy/Makefile index c27408e4daae..6eb2916773c5 100644 --- a/drivers/phy/Makefile +++ b/drivers/phy/Makefile @@ -8,6 +8,7 @@ obj-$(CONFIG_GENERIC_PHY_MIPI_DPHY) += phy-core-mipi-dphy.o obj-$(CONFIG_PHY_LPC18XX_USB_OTG) += phy-lpc18xx-usb-otg.o obj-$(CONFIG_PHY_XGENE) += phy-xgene.o obj-$(CONFIG_PHY_PISTACHIO_USB) += phy-pistachio-usb.o +obj-$(CONFIG_USB_LGM_PHY) += phy-lgm-usb.o obj-y += allwinner/ \ amlogic/ \ broadcom/ \ diff --git a/drivers/phy/broadcom/phy-bcm-ns-usb3.c b/drivers/phy/broadcom/phy-bcm-ns-usb3.c index 14f45bc35cc5..47b029fbebbd 100644 --- a/drivers/phy/broadcom/phy-bcm-ns-usb3.c +++ b/drivers/phy/broadcom/phy-bcm-ns-usb3.c @@ -13,6 +13,7 @@ #include <linux/bcma/bcma.h> #include <linux/delay.h> #include <linux/err.h> +#include <linux/iopoll.h> #include <linux/mdio.h> #include <linux/module.h> #include <linux/of_address.h> @@ -258,29 +259,24 @@ static struct mdio_driver bcm_ns_usb3_mdio_driver = { **************************************************/ static int bcm_ns_usb3_wait_reg(struct bcm_ns_usb3 *usb3, void __iomem *addr, - u32 mask, u32 value, unsigned long timeout) + u32 mask, u32 value, int usec) { - unsigned long deadline = jiffies + timeout; u32 val; + int ret; - do { - val = readl(addr); - if ((val & mask) == value) - return 0; - cpu_relax(); - udelay(10); - } while (!time_after_eq(jiffies, deadline)); + ret = readl_poll_timeout_atomic(addr, val, ((val & mask) == value), + 10, usec); + if (ret) + dev_err(usb3->dev, "Timeout waiting for register %p\n", addr); - dev_err(usb3->dev, "Timeout waiting for register %p\n", addr); - - return -EBUSY; + return ret; } static inline int bcm_ns_usb3_mii_mng_wait_idle(struct bcm_ns_usb3 *usb3) { return bcm_ns_usb3_wait_reg(usb3, usb3->ccb_mii + BCMA_CCB_MII_MNG_CTL, 0x0100, 0x0000, - usecs_to_jiffies(BCM_NS_USB3_MII_MNG_TIMEOUT_US)); + BCM_NS_USB3_MII_MNG_TIMEOUT_US); } static int bcm_ns_usb3_platform_phy_write(struct bcm_ns_usb3 *usb3, u16 reg, diff --git a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c index 527625912b78..9630ac127366 100644 --- a/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c +++ b/drivers/phy/broadcom/phy-bcm-ns2-usbdrd.c @@ -18,6 +18,7 @@ #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/irq.h> #include <linux/mfd/syscon.h> #include <linux/module.h> @@ -87,17 +88,11 @@ static const unsigned int usb_extcon_cable[] = { static inline int pll_lock_stat(u32 usb_reg, int reg_mask, struct ns2_phy_driver *driver) { - int retry = PLL_LOCK_RETRY; u32 val; - do { - udelay(1); - val = readl(driver->icfgdrd_regs + usb_reg); - if (val & reg_mask) - return 0; - } while (--retry > 0); - - return -EBUSY; + return readl_poll_timeout_atomic(driver->icfgdrd_regs + usb_reg, + val, (val & reg_mask), 1, + PLL_LOCK_RETRY); } static int ns2_drd_phy_init(struct phy *phy) diff --git a/drivers/phy/broadcom/phy-bcm-sr-usb.c b/drivers/phy/broadcom/phy-bcm-sr-usb.c index 77c025a0720c..c3e99ad17487 100644 --- a/drivers/phy/broadcom/phy-bcm-sr-usb.c +++ b/drivers/phy/broadcom/phy-bcm-sr-usb.c @@ -5,6 +5,7 @@ #include <linux/delay.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/of.h> #include <linux/phy/phy.h> @@ -109,19 +110,15 @@ static inline void bcm_usb_reg32_setbits(void __iomem *addr, uint32_t set) static int bcm_usb_pll_lock_check(void __iomem *addr, u32 bit) { - int retry; - u32 rd_data; + u32 data; + int ret; - retry = PLL_LOCK_RETRY_COUNT; - do { - rd_data = readl(addr); - if (rd_data & bit) - return 0; - udelay(1); - } while (--retry > 0); + ret = readl_poll_timeout_atomic(addr, data, (data & bit), 1, + PLL_LOCK_RETRY_COUNT); + if (ret) + pr_err("%s: FAIL\n", __func__); - pr_err("%s: FAIL\n", __func__); - return -ETIMEDOUT; + return ret; } static int bcm_usb_ss_phy_init(struct bcm_usb_phy_cfg *phy_cfg) diff --git a/drivers/phy/cadence/phy-cadence-salvo.c b/drivers/phy/cadence/phy-cadence-salvo.c index 016514e4aa54..88e239adc3b8 100644 --- a/drivers/phy/cadence/phy-cadence-salvo.c +++ b/drivers/phy/cadence/phy-cadence-salvo.c @@ -97,7 +97,7 @@ struct cdns_reg_pairs { struct cdns_salvo_data { u8 reg_offset_shift; - struct cdns_reg_pairs *init_sequence_val; + const struct cdns_reg_pairs *init_sequence_val; u8 init_sequence_length; }; @@ -126,7 +126,7 @@ static void cdns_salvo_write(struct cdns_salvo_phy *salvo_phy, * Below bringup sequence pair are from Cadence PHY's User Guide * and NXP platform tuning results. */ -static struct cdns_reg_pairs cdns_nxp_sequence_pair[] = { +static const struct cdns_reg_pairs cdns_nxp_sequence_pair[] = { {0x0830, PHY_PMA_CMN_CTRL1}, {0x0010, TB_ADDR_CMN_DIAG_HSCLK_SEL}, {0x00f0, TB_ADDR_CMN_PLL0_VCOCAL_INIT_TMR}, @@ -217,7 +217,7 @@ static int cdns_salvo_phy_init(struct phy *phy) return ret; for (i = 0; i < data->init_sequence_length; i++) { - struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i; + const struct cdns_reg_pairs *reg_pair = data->init_sequence_val + i; cdns_salvo_write(salvo_phy, reg_pair->off, reg_pair->val); } @@ -251,7 +251,7 @@ static int cdns_salvo_phy_power_off(struct phy *phy) return 0; } -static struct phy_ops cdns_salvo_phy_ops = { +static const struct phy_ops cdns_salvo_phy_ops = { .init = cdns_salvo_phy_init, .power_on = cdns_salvo_phy_power_on, .power_off = cdns_salvo_phy_power_off, diff --git a/drivers/phy/cadence/phy-cadence-sierra.c b/drivers/phy/cadence/phy-cadence-sierra.c index faed652b73f7..453ef26fa1c7 100644 --- a/drivers/phy/cadence/phy-cadence-sierra.c +++ b/drivers/phy/cadence/phy-cadence-sierra.c @@ -172,10 +172,10 @@ struct cdns_sierra_data { u32 pcie_ln_regs; u32 usb_cmn_regs; u32 usb_ln_regs; - struct cdns_reg_pairs *pcie_cmn_vals; - struct cdns_reg_pairs *pcie_ln_vals; - struct cdns_reg_pairs *usb_cmn_vals; - struct cdns_reg_pairs *usb_ln_vals; + const struct cdns_reg_pairs *pcie_cmn_vals; + const struct cdns_reg_pairs *pcie_ln_vals; + const struct cdns_reg_pairs *usb_cmn_vals; + const struct cdns_reg_pairs *usb_ln_vals; }; struct cdns_regmap_cdb_context { @@ -233,7 +233,7 @@ static int cdns_regmap_read(void *context, unsigned int reg, unsigned int *val) .reg_read = cdns_regmap_read, \ } -static struct regmap_config cdns_sierra_lane_cdb_config[] = { +static const struct regmap_config cdns_sierra_lane_cdb_config[] = { SIERRA_LANE_CDB_REGMAP_CONF("0"), SIERRA_LANE_CDB_REGMAP_CONF("1"), SIERRA_LANE_CDB_REGMAP_CONF("2"), @@ -252,7 +252,7 @@ static struct regmap_config cdns_sierra_lane_cdb_config[] = { SIERRA_LANE_CDB_REGMAP_CONF("15"), }; -static struct regmap_config cdns_sierra_common_cdb_config = { +static const struct regmap_config cdns_sierra_common_cdb_config = { .name = "sierra_common_cdb", .reg_stride = 1, .fast_io = true, @@ -260,7 +260,7 @@ static struct regmap_config cdns_sierra_common_cdb_config = { .reg_read = cdns_regmap_read, }; -static struct regmap_config cdns_sierra_phy_config_ctrl_config = { +static const struct regmap_config cdns_sierra_phy_config_ctrl_config = { .name = "sierra_phy_config_ctrl", .reg_stride = 1, .fast_io = true, @@ -274,7 +274,7 @@ static int cdns_sierra_phy_init(struct phy *gphy) struct cdns_sierra_phy *phy = dev_get_drvdata(gphy->dev.parent); struct regmap *regmap; int i, j; - struct cdns_reg_pairs *cmn_vals, *ln_vals; + const struct cdns_reg_pairs *cmn_vals, *ln_vals; u32 num_cmn_regs, num_ln_regs; /* Initialise the PHY registers, unless auto configured */ @@ -654,7 +654,7 @@ static int cdns_sierra_phy_remove(struct platform_device *pdev) } /* refclk100MHz_32b_PCIe_cmn_pll_ext_ssc */ -static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { +static const struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, {0x2106, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, {0x8A06, SIERRA_CMN_PLLLC_BWCAL_MODE1_PREG}, @@ -663,7 +663,7 @@ static struct cdns_reg_pairs cdns_pcie_cmn_regs_ext_ssc[] = { }; /* refclk100MHz_32b_PCIe_ln_ext_ssc */ -static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { +static const struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { {0x813E, SIERRA_CLKPATHCTRL_TMR_PREG}, {0x8047, SIERRA_RX_CREQ_FLTR_A_MODE3_PREG}, {0x808F, SIERRA_RX_CREQ_FLTR_A_MODE2_PREG}, @@ -674,7 +674,7 @@ static struct cdns_reg_pairs cdns_pcie_ln_regs_ext_ssc[] = { }; /* refclk100MHz_20b_USB_cmn_pll_ext_ssc */ -static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = { +static const struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = { {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE1_PREG}, {0x2085, SIERRA_CMN_PLLLC_LF_COEFF_MODE0_PREG}, {0x0000, SIERRA_CMN_PLLLC_BWCAL_MODE0_PREG}, @@ -682,7 +682,7 @@ static struct cdns_reg_pairs cdns_usb_cmn_regs_ext_ssc[] = { }; /* refclk100MHz_20b_USB_ln_ext_ssc */ -static struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = { +static const struct cdns_reg_pairs cdns_usb_ln_regs_ext_ssc[] = { {0xFE0A, SIERRA_DET_STANDEC_A_PREG}, {0x000F, SIERRA_DET_STANDEC_B_PREG}, {0x55A5, SIERRA_DET_STANDEC_C_PREG}, diff --git a/drivers/phy/cadence/phy-cadence-torrent.c b/drivers/phy/cadence/phy-cadence-torrent.c index 7116127358ee..f310e15d94cb 100644 --- a/drivers/phy/cadence/phy-cadence-torrent.c +++ b/drivers/phy/cadence/phy-cadence-torrent.c @@ -25,11 +25,14 @@ #define REF_CLK_19_2MHz 19200000 #define REF_CLK_25MHz 25000000 -#define DEFAULT_NUM_LANES 4 #define MAX_NUM_LANES 4 #define DEFAULT_MAX_BIT_RATE 8100 /* in Mbps */ +#define NUM_SSC_MODE 3 +#define NUM_PHY_TYPE 6 + #define POLL_TIMEOUT_US 5000 +#define PLL_LOCK_TIMEOUT 100000 #define TORRENT_COMMON_CDB_OFFSET 0x0 @@ -79,6 +82,8 @@ #define CMN_PLLSM0_PLLLOCK_TMR 0x002CU #define CMN_PLLSM1_PLLPRE_TMR 0x0032U #define CMN_PLLSM1_PLLLOCK_TMR 0x0034U +#define CMN_CDIAG_CDB_PWRI_OVRD 0x0041U +#define CMN_CDIAG_XCVRC_PWRI_OVRD 0x0047U #define CMN_BGCAL_INIT_TMR 0x0064U #define CMN_BGCAL_ITER_TMR 0x0065U #define CMN_IBCAL_INIT_TMR 0x0074U @@ -99,6 +104,14 @@ #define CMN_PLL0_LOCK_REFCNT_START 0x009CU #define CMN_PLL0_LOCK_PLLCNT_START 0x009EU #define CMN_PLL0_LOCK_PLLCNT_THR 0x009FU +#define CMN_PLL0_INTDIV_M1 0x00A0U +#define CMN_PLL0_FRACDIVH_M1 0x00A2U +#define CMN_PLL0_HIGH_THR_M1 0x00A3U +#define CMN_PLL0_DSM_DIAG_M1 0x00A4U +#define CMN_PLL0_SS_CTRL1_M1 0x00A8U +#define CMN_PLL0_SS_CTRL2_M1 0x00A9U +#define CMN_PLL0_SS_CTRL3_M1 0x00AAU +#define CMN_PLL0_SS_CTRL4_M1 0x00ABU #define CMN_PLL1_VCOCAL_TCTRL 0x00C2U #define CMN_PLL1_VCOCAL_INIT_TMR 0x00C4U #define CMN_PLL1_VCOCAL_ITER_TMR 0x00C5U @@ -116,8 +129,10 @@ #define CMN_PLL1_LOCK_REFCNT_START 0x00DCU #define CMN_PLL1_LOCK_PLLCNT_START 0x00DEU #define CMN_PLL1_LOCK_PLLCNT_THR 0x00DFU +#define CMN_TXPUCAL_TUNE 0x0103U #define CMN_TXPUCAL_INIT_TMR 0x0104U #define CMN_TXPUCAL_ITER_TMR 0x0105U +#define CMN_TXPDCAL_TUNE 0x010BU #define CMN_TXPDCAL_INIT_TMR 0x010CU #define CMN_TXPDCAL_ITER_TMR 0x010DU #define CMN_RXCAL_INIT_TMR 0x0114U @@ -131,24 +146,31 @@ #define CMN_PDIAG_PLL0_CP_PADJ_M0 0x01A4U #define CMN_PDIAG_PLL0_CP_IADJ_M0 0x01A5U #define CMN_PDIAG_PLL0_FILT_PADJ_M0 0x01A6U +#define CMN_PDIAG_PLL0_CTRL_M1 0x01B0U +#define CMN_PDIAG_PLL0_CLK_SEL_M1 0x01B1U #define CMN_PDIAG_PLL0_CP_PADJ_M1 0x01B4U #define CMN_PDIAG_PLL0_CP_IADJ_M1 0x01B5U +#define CMN_PDIAG_PLL0_FILT_PADJ_M1 0x01B6U #define CMN_PDIAG_PLL1_CTRL_M0 0x01C0U #define CMN_PDIAG_PLL1_CLK_SEL_M0 0x01C1U #define CMN_PDIAG_PLL1_CP_PADJ_M0 0x01C4U #define CMN_PDIAG_PLL1_CP_IADJ_M0 0x01C5U #define CMN_PDIAG_PLL1_FILT_PADJ_M0 0x01C6U +#define CMN_DIAG_BIAS_OVRD1 0x01E1U /* PMA TX Lane registers */ #define TX_TXCC_CTRL 0x0040U #define TX_TXCC_CPOST_MULT_00 0x004CU +#define TX_TXCC_CPOST_MULT_01 0x004DU #define TX_TXCC_MGNFS_MULT_000 0x0050U #define DRV_DIAG_TX_DRV 0x00C6U #define XCVR_DIAG_PLLDRC_CTRL 0x00E5U #define XCVR_DIAG_HSCLK_SEL 0x00E6U #define XCVR_DIAG_HSCLK_DIV 0x00E7U #define XCVR_DIAG_BIDI_CTRL 0x00EAU +#define XCVR_DIAG_PSC_OVRD 0x00EBU #define TX_PSC_A0 0x0100U +#define TX_PSC_A1 0x0101U #define TX_PSC_A2 0x0102U #define TX_PSC_A3 0x0103U #define TX_RCVDET_ST_TMR 0x0123U @@ -157,23 +179,49 @@ /* PMA RX Lane registers */ #define RX_PSC_A0 0x0000U +#define RX_PSC_A1 0x0001U #define RX_PSC_A2 0x0002U #define RX_PSC_A3 0x0003U #define RX_PSC_CAL 0x0006U +#define RX_CDRLF_CNFG 0x0080U +#define RX_CDRLF_CNFG3 0x0082U +#define RX_SIGDET_HL_FILT_TMR 0x0090U #define RX_REE_GCSM1_CTRL 0x0108U +#define RX_REE_GCSM1_EQENM_PH1 0x0109U +#define RX_REE_GCSM1_EQENM_PH2 0x010AU #define RX_REE_GCSM2_CTRL 0x0110U #define RX_REE_PERGCSM_CTRL 0x0118U +#define RX_REE_ATTEN_THR 0x0149U +#define RX_REE_TAP1_CLIP 0x0171U +#define RX_REE_TAP2TON_CLIP 0x0172U +#define RX_REE_SMGM_CTRL1 0x0177U +#define RX_REE_SMGM_CTRL2 0x0178U +#define RX_DIAG_DFE_CTRL 0x01E0U +#define RX_DIAG_DFE_AMP_TUNE_2 0x01E2U +#define RX_DIAG_DFE_AMP_TUNE_3 0x01E3U +#define RX_DIAG_NQST_CTRL 0x01E5U +#define RX_DIAG_SIGDET_TUNE 0x01E8U +#define RX_DIAG_PI_RATE 0x01F4U +#define RX_DIAG_PI_CAP 0x01F5U +#define RX_DIAG_ACYA 0x01FFU /* PHY PCS common registers */ #define PHY_PLL_CFG 0x000EU +#define PHY_PIPE_USB3_GEN2_PRE_CFG0 0x0020U +#define PHY_PIPE_USB3_GEN2_POST_CFG0 0x0022U +#define PHY_PIPE_USB3_GEN2_POST_CFG1 0x0023U /* PHY PMA common registers */ +#define PHY_PMA_CMN_CTRL1 0x0000U #define PHY_PMA_CMN_CTRL2 0x0001U #define PHY_PMA_PLL_RAW_CTRL 0x0003U static const struct reg_field phy_pll_cfg = REG_FIELD(PHY_PLL_CFG, 0, 1); +static const struct reg_field phy_pma_cmn_ctrl_1 = + REG_FIELD(PHY_PMA_CMN_CTRL1, 0, 0); + static const struct reg_field phy_pma_cmn_ctrl_2 = REG_FIELD(PHY_PMA_CMN_CTRL2, 0, 7); @@ -183,14 +231,28 @@ static const struct reg_field phy_pma_pll_raw_ctrl = static const struct reg_field phy_reset_ctrl = REG_FIELD(PHY_RESET, 8, 8); -static const struct of_device_id cdns_torrent_phy_of_match[]; +enum cdns_torrent_phy_type { + TYPE_NONE, + TYPE_DP, + TYPE_PCIE, + TYPE_SGMII, + TYPE_QSGMII, + TYPE_USB, +}; + +enum cdns_torrent_ssc_mode { + NO_SSC, + EXTERNAL_SSC, + INTERNAL_SSC +}; struct cdns_torrent_inst { struct phy *phy; u32 mlane; - u32 phy_type; + enum cdns_torrent_phy_type phy_type; u32 num_lanes; struct reset_control *lnk_rst; + enum cdns_torrent_ssc_mode ssc_mode; }; struct cdns_torrent_phy { @@ -198,11 +260,13 @@ struct cdns_torrent_phy { void __iomem *sd_base; /* SD0801 registers base */ u32 max_bit_rate; /* Maximum link bit rate to use (in Mbps) */ struct reset_control *phy_rst; + struct reset_control *apb_rst; struct device *dev; struct clk *clk; unsigned long ref_clk_rate; struct cdns_torrent_inst phys[MAX_NUM_LANES]; int nsubnodes; + const struct cdns_torrent_data *init_data; struct regmap *regmap; struct regmap *regmap_common_cdb; struct regmap *regmap_phy_pcs_common_cdb; @@ -211,6 +275,7 @@ struct cdns_torrent_phy { struct regmap *regmap_rx_lane_cdb[MAX_NUM_LANES]; struct regmap *regmap_dptx_phy_reg; struct regmap_field *phy_pll_cfg; + struct regmap_field *phy_pma_cmn_ctrl_1; struct regmap_field *phy_pma_cmn_ctrl_2; struct regmap_field *phy_pma_pll_raw_ctrl; struct regmap_field *phy_reset_ctrl; @@ -223,8 +288,8 @@ enum phy_powerstate { POWERSTATE_A3 = 3, }; +static int cdns_torrent_phy_init(struct phy *phy); static int cdns_torrent_dp_init(struct phy *phy); -static int cdns_torrent_dp_exit(struct phy *phy); static int cdns_torrent_dp_run(struct cdns_torrent_phy *cdns_phy, u32 num_lanes); static @@ -254,17 +319,38 @@ static int cdns_torrent_phy_on(struct phy *phy); static int cdns_torrent_phy_off(struct phy *phy); static const struct phy_ops cdns_torrent_phy_ops = { - .init = cdns_torrent_dp_init, - .exit = cdns_torrent_dp_exit, + .init = cdns_torrent_phy_init, .configure = cdns_torrent_dp_configure, .power_on = cdns_torrent_phy_on, .power_off = cdns_torrent_phy_off, .owner = THIS_MODULE, }; +struct cdns_reg_pairs { + u32 val; + u32 off; +}; + +struct cdns_torrent_vals { + struct cdns_reg_pairs *reg_pairs; + u32 num_regs; +}; + struct cdns_torrent_data { - u8 block_offset_shift; - u8 reg_offset_shift; + u8 block_offset_shift; + u8 reg_offset_shift; + struct cdns_torrent_vals *link_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *xcvr_diag_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *pcs_cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *cmn_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *tx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; + struct cdns_torrent_vals *rx_ln_vals[NUM_PHY_TYPE][NUM_PHY_TYPE] + [NUM_SSC_MODE]; }; struct cdns_regmap_cdb_context { @@ -331,21 +417,21 @@ static int cdns_regmap_dptx_read(void *context, unsigned int reg, .reg_read = cdns_regmap_read, \ } -static struct regmap_config cdns_torrent_tx_lane_cdb_config[] = { +static const struct regmap_config cdns_torrent_tx_lane_cdb_config[] = { TORRENT_TX_LANE_CDB_REGMAP_CONF("0"), TORRENT_TX_LANE_CDB_REGMAP_CONF("1"), TORRENT_TX_LANE_CDB_REGMAP_CONF("2"), TORRENT_TX_LANE_CDB_REGMAP_CONF("3"), }; -static struct regmap_config cdns_torrent_rx_lane_cdb_config[] = { +static const struct regmap_config cdns_torrent_rx_lane_cdb_config[] = { TORRENT_RX_LANE_CDB_REGMAP_CONF("0"), TORRENT_RX_LANE_CDB_REGMAP_CONF("1"), TORRENT_RX_LANE_CDB_REGMAP_CONF("2"), TORRENT_RX_LANE_CDB_REGMAP_CONF("3"), }; -static struct regmap_config cdns_torrent_common_cdb_config = { +static const struct regmap_config cdns_torrent_common_cdb_config = { .name = "torrent_common_cdb", .reg_stride = 1, .fast_io = true, @@ -353,7 +439,7 @@ static struct regmap_config cdns_torrent_common_cdb_config = { .reg_read = cdns_regmap_read, }; -static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { +static const struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { .name = "torrent_phy_pcs_cmn_cdb", .reg_stride = 1, .fast_io = true, @@ -361,7 +447,7 @@ static struct regmap_config cdns_torrent_phy_pcs_cmn_cdb_config = { .reg_read = cdns_regmap_read, }; -static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = { +static const struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = { .name = "torrent_phy_pma_cmn_cdb", .reg_stride = 1, .fast_io = true, @@ -369,7 +455,7 @@ static struct regmap_config cdns_torrent_phy_pma_cmn_cdb_config = { .reg_read = cdns_regmap_read, }; -static struct regmap_config cdns_torrent_dptx_phy_config = { +static const struct regmap_config cdns_torrent_dptx_phy_config = { .name = "torrent_dptx_phy", .reg_stride = 1, .fast_io = true, @@ -848,19 +934,6 @@ static int cdns_torrent_dp_init(struct phy *phy) struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); struct regmap *regmap = cdns_phy->regmap_dptx_phy_reg; - ret = clk_prepare_enable(cdns_phy->clk); - if (ret) { - dev_err(cdns_phy->dev, "Failed to prepare ref clock\n"); - return ret; - } - - cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk); - if (!(cdns_phy->ref_clk_rate)) { - dev_err(cdns_phy->dev, "Failed to get ref clock rate\n"); - clk_disable_unprepare(cdns_phy->clk); - return -EINVAL; - } - switch (cdns_phy->ref_clk_rate) { case REF_CLK_19_2MHz: case REF_CLK_25MHz: @@ -920,14 +993,6 @@ static int cdns_torrent_dp_init(struct phy *phy) return ret; } -static int cdns_torrent_dp_exit(struct phy *phy) -{ - struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); - - clk_disable_unprepare(cdns_phy->clk); - return 0; -} - static int cdns_torrent_dp_wait_pma_cmn_ready(struct cdns_torrent_phy *cdns_phy) { @@ -1543,15 +1608,34 @@ static int cdns_torrent_phy_on(struct phy *phy) { struct cdns_torrent_inst *inst = phy_get_drvdata(phy); struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + u32 read_val; int ret; - /* Take the PHY out of reset */ - ret = reset_control_deassert(cdns_phy->phy_rst); - if (ret) + if (cdns_phy->nsubnodes == 1) { + /* Take the PHY lane group out of reset */ + reset_control_deassert(inst->lnk_rst); + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; + } + + /* + * Wait for cmn_ready assertion + * PHY_PMA_CMN_CTRL1[0] == 1 + */ + ret = regmap_field_read_poll_timeout(cdns_phy->phy_pma_cmn_ctrl_1, + read_val, read_val, 1000, + PLL_LOCK_TIMEOUT); + if (ret) { + dev_err(cdns_phy->dev, "Timeout waiting for CMN ready\n"); return ret; + } + + mdelay(10); - /* Take the PHY lane group out of reset */ - return reset_control_deassert(inst->lnk_rst); + return 0; } static int cdns_torrent_phy_off(struct phy *phy) @@ -1560,6 +1644,9 @@ static int cdns_torrent_phy_off(struct phy *phy) struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); int ret; + if (cdns_phy->nsubnodes != 1) + return 0; + ret = reset_control_assert(cdns_phy->phy_rst); if (ret) return ret; @@ -1585,7 +1672,24 @@ static struct regmap *cdns_regmap_init(struct device *dev, void __iomem *base, return devm_regmap_init(dev, NULL, ctx, config); } -static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy) +static int cdns_torrent_dp_regfield_init(struct cdns_torrent_phy *cdns_phy) +{ + struct device *dev = cdns_phy->dev; + struct regmap_field *field; + struct regmap *regmap; + + regmap = cdns_phy->regmap_dptx_phy_reg; + field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl); + if (IS_ERR(field)) { + dev_err(dev, "PHY_RESET reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_reset_ctrl = field; + + return 0; +} + +static int cdns_torrent_regfield_init(struct cdns_torrent_phy *cdns_phy) { struct device *dev = cdns_phy->dev; struct regmap_field *field; @@ -1600,6 +1704,14 @@ static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy) cdns_phy->phy_pll_cfg = field; regmap = cdns_phy->regmap_phy_pma_common_cdb; + field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_1); + if (IS_ERR(field)) { + dev_err(dev, "PHY_PMA_CMN_CTRL1 reg field init failed\n"); + return PTR_ERR(field); + } + cdns_phy->phy_pma_cmn_ctrl_1 = field; + + regmap = cdns_phy->regmap_phy_pma_common_cdb; field = devm_regmap_field_alloc(dev, regmap, phy_pma_cmn_ctrl_2); if (IS_ERR(field)) { dev_err(dev, "PHY_PMA_CMN_CTRL2 reg field init failed\n"); @@ -1615,28 +1727,44 @@ static int cdns_regfield_init(struct cdns_torrent_phy *cdns_phy) } cdns_phy->phy_pma_pll_raw_ctrl = field; - regmap = cdns_phy->regmap_dptx_phy_reg; - field = devm_regmap_field_alloc(dev, regmap, phy_reset_ctrl); - if (IS_ERR(field)) { - dev_err(dev, "PHY_RESET reg field init failed\n"); - return PTR_ERR(field); + return 0; +} + +static int cdns_torrent_dp_regmap_init(struct cdns_torrent_phy *cdns_phy) +{ + void __iomem *base = cdns_phy->base; + struct device *dev = cdns_phy->dev; + struct regmap *regmap; + u8 reg_offset_shift; + u32 block_offset; + + reg_offset_shift = cdns_phy->init_data->reg_offset_shift; + + block_offset = TORRENT_DPTX_PHY_OFFSET; + regmap = cdns_regmap_init(dev, base, block_offset, + reg_offset_shift, + &cdns_torrent_dptx_phy_config); + if (IS_ERR(regmap)) { + dev_err(dev, "Failed to init DPTX PHY regmap\n"); + return PTR_ERR(regmap); } - cdns_phy->phy_reset_ctrl = field; + cdns_phy->regmap_dptx_phy_reg = regmap; return 0; } -static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy, - void __iomem *sd_base, - void __iomem *base, - u8 block_offset_shift, - u8 reg_offset_shift) +static int cdns_torrent_regmap_init(struct cdns_torrent_phy *cdns_phy) { + void __iomem *sd_base = cdns_phy->sd_base; + u8 block_offset_shift, reg_offset_shift; struct device *dev = cdns_phy->dev; struct regmap *regmap; u32 block_offset; int i; + block_offset_shift = cdns_phy->init_data->block_offset_shift; + reg_offset_shift = cdns_phy->init_data->reg_offset_shift; + for (i = 0; i < MAX_NUM_LANES; i++) { block_offset = TORRENT_TX_LANE_CDB_OFFSET(i, block_offset_shift, reg_offset_shift); @@ -1691,43 +1819,282 @@ static int cdns_regmap_init_torrent_dp(struct cdns_torrent_phy *cdns_phy, } cdns_phy->regmap_phy_pma_common_cdb = regmap; - block_offset = TORRENT_DPTX_PHY_OFFSET; - regmap = cdns_regmap_init(dev, base, block_offset, - reg_offset_shift, - &cdns_torrent_dptx_phy_config); - if (IS_ERR(regmap)) { - dev_err(dev, "Failed to init DPTX PHY regmap\n"); - return PTR_ERR(regmap); + return 0; +} + +static int cdns_torrent_phy_init(struct phy *phy) +{ + struct cdns_torrent_phy *cdns_phy = dev_get_drvdata(phy->dev.parent); + const struct cdns_torrent_data *init_data = cdns_phy->init_data; + struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; + struct cdns_torrent_inst *inst = phy_get_drvdata(phy); + enum cdns_torrent_phy_type phy_type = inst->phy_type; + enum cdns_torrent_ssc_mode ssc = inst->ssc_mode; + struct cdns_torrent_vals *pcs_cmn_vals; + struct cdns_reg_pairs *reg_pairs; + struct regmap *regmap; + u32 num_regs; + int i, j; + + if (cdns_phy->nsubnodes > 1) + return 0; + + if (phy_type == TYPE_DP) + return cdns_torrent_dp_init(phy); + + /** + * Spread spectrum generation is not required or supported + * for SGMII/QSGMII + */ + if (phy_type == TYPE_SGMII || phy_type == TYPE_QSGMII) + ssc = NO_SSC; + + /* PHY configuration specific registers for single link */ + link_cmn_vals = init_data->link_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (link_cmn_vals) { + reg_pairs = link_cmn_vals->reg_pairs; + num_regs = link_cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + + /** + * First array value in link_cmn_vals must be of + * PHY_PLL_CFG register + */ + regmap_field_write(cdns_phy->phy_pll_cfg, reg_pairs[0].val); + + for (i = 1; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + xcvr_diag_vals = init_data->xcvr_diag_vals[phy_type][TYPE_NONE][ssc]; + if (xcvr_diag_vals) { + reg_pairs = xcvr_diag_vals->reg_pairs; + num_regs = xcvr_diag_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_type][TYPE_NONE][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA common registers configurations */ + cmn_vals = init_data->cmn_vals[phy_type][TYPE_NONE][ssc]; + if (cmn_vals) { + reg_pairs = cmn_vals->reg_pairs; + num_regs = cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA TX lane registers configurations */ + tx_ln_vals = init_data->tx_ln_vals[phy_type][TYPE_NONE][ssc]; + if (tx_ln_vals) { + reg_pairs = tx_ln_vals->reg_pairs; + num_regs = tx_ln_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PMA RX lane registers configurations */ + rx_ln_vals = init_data->rx_ln_vals[phy_type][TYPE_NONE][ssc]; + if (rx_ln_vals) { + reg_pairs = rx_ln_vals->reg_pairs; + num_regs = rx_ln_vals->num_regs; + for (i = 0; i < inst->num_lanes; i++) { + regmap = cdns_phy->regmap_rx_lane_cdb[i + inst->mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } } - cdns_phy->regmap_dptx_phy_reg = regmap; + + return 0; +} + +static +int cdns_torrent_phy_configure_multilink(struct cdns_torrent_phy *cdns_phy) +{ + const struct cdns_torrent_data *init_data = cdns_phy->init_data; + struct cdns_torrent_vals *cmn_vals, *tx_ln_vals, *rx_ln_vals; + struct cdns_torrent_vals *link_cmn_vals, *xcvr_diag_vals; + enum cdns_torrent_phy_type phy_t1, phy_t2, tmp_phy_type; + struct cdns_torrent_vals *pcs_cmn_vals; + int i, j, node, mlane, num_lanes, ret; + struct cdns_reg_pairs *reg_pairs; + enum cdns_torrent_ssc_mode ssc; + struct regmap *regmap; + u32 num_regs; + + /* Maximum 2 links (subnodes) are supported */ + if (cdns_phy->nsubnodes != 2) + return -EINVAL; + + phy_t1 = cdns_phy->phys[0].phy_type; + phy_t2 = cdns_phy->phys[1].phy_type; + + /** + * First configure the PHY for first link with phy_t1. Get the array + * values as [phy_t1][phy_t2][ssc]. + */ + for (node = 0; node < cdns_phy->nsubnodes; node++) { + if (node == 1) { + /** + * If first link with phy_t1 is configured, then + * configure the PHY for second link with phy_t2. + * Get the array values as [phy_t2][phy_t1][ssc]. + */ + tmp_phy_type = phy_t1; + phy_t1 = phy_t2; + phy_t2 = tmp_phy_type; + } + + mlane = cdns_phy->phys[node].mlane; + ssc = cdns_phy->phys[node].ssc_mode; + num_lanes = cdns_phy->phys[node].num_lanes; + + /** + * PHY configuration specific registers: + * link_cmn_vals depend on combination of PHY types being + * configured and are common for both PHY types, so array + * values should be same for [phy_t1][phy_t2][ssc] and + * [phy_t2][phy_t1][ssc]. + * xcvr_diag_vals also depend on combination of PHY types + * being configured, but these can be different for particular + * PHY type and are per lane. + */ + link_cmn_vals = init_data->link_cmn_vals[phy_t1][phy_t2][ssc]; + if (link_cmn_vals) { + reg_pairs = link_cmn_vals->reg_pairs; + num_regs = link_cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + + /** + * First array value in link_cmn_vals must be of + * PHY_PLL_CFG register + */ + regmap_field_write(cdns_phy->phy_pll_cfg, + reg_pairs[0].val); + + for (i = 1; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + xcvr_diag_vals = init_data->xcvr_diag_vals[phy_t1][phy_t2][ssc]; + if (xcvr_diag_vals) { + reg_pairs = xcvr_diag_vals->reg_pairs; + num_regs = xcvr_diag_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PHY PCS common registers configurations */ + pcs_cmn_vals = init_data->pcs_cmn_vals[phy_t1][phy_t2][ssc]; + if (pcs_cmn_vals) { + reg_pairs = pcs_cmn_vals->reg_pairs; + num_regs = pcs_cmn_vals->num_regs; + regmap = cdns_phy->regmap_phy_pcs_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA common registers configurations */ + cmn_vals = init_data->cmn_vals[phy_t1][phy_t2][ssc]; + if (cmn_vals) { + reg_pairs = cmn_vals->reg_pairs; + num_regs = cmn_vals->num_regs; + regmap = cdns_phy->regmap_common_cdb; + for (i = 0; i < num_regs; i++) + regmap_write(regmap, reg_pairs[i].off, + reg_pairs[i].val); + } + + /* PMA TX lane registers configurations */ + tx_ln_vals = init_data->tx_ln_vals[phy_t1][phy_t2][ssc]; + if (tx_ln_vals) { + reg_pairs = tx_ln_vals->reg_pairs; + num_regs = tx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_tx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + /* PMA RX lane registers configurations */ + rx_ln_vals = init_data->rx_ln_vals[phy_t1][phy_t2][ssc]; + if (rx_ln_vals) { + reg_pairs = rx_ln_vals->reg_pairs; + num_regs = rx_ln_vals->num_regs; + for (i = 0; i < num_lanes; i++) { + regmap = cdns_phy->regmap_rx_lane_cdb[i + mlane]; + for (j = 0; j < num_regs; j++) + regmap_write(regmap, reg_pairs[j].off, + reg_pairs[j].val); + } + } + + reset_control_deassert(cdns_phy->phys[node].lnk_rst); + } + + /* Take the PHY out of reset */ + ret = reset_control_deassert(cdns_phy->phy_rst); + if (ret) + return ret; return 0; } static int cdns_torrent_phy_probe(struct platform_device *pdev) { - struct resource *regs; struct cdns_torrent_phy *cdns_phy; struct device *dev = &pdev->dev; struct phy_provider *phy_provider; - const struct of_device_id *match; - struct cdns_torrent_data *data; + const struct cdns_torrent_data *data; struct device_node *child; int ret, subnodes, node = 0, i; + u32 total_num_lanes = 0; + u8 init_dp_regmap = 0; + u32 phy_type; /* Get init data for this PHY */ - match = of_match_device(cdns_torrent_phy_of_match, dev); - if (!match) + data = of_device_get_match_data(dev); + if (!data) return -EINVAL; - data = (struct cdns_torrent_data *)match->data; - cdns_phy = devm_kzalloc(dev, sizeof(*cdns_phy), GFP_KERNEL); if (!cdns_phy) return -ENOMEM; dev_set_drvdata(dev, cdns_phy); cdns_phy->dev = dev; + cdns_phy->init_data = data; cdns_phy->phy_rst = devm_reset_control_get_exclusive_by_index(dev, 0); if (IS_ERR(cdns_phy->phy_rst)) { @@ -1736,14 +2103,20 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) return PTR_ERR(cdns_phy->phy_rst); } + cdns_phy->apb_rst = devm_reset_control_get_optional(dev, "torrent_apb"); + if (IS_ERR(cdns_phy->apb_rst)) { + dev_err(dev, "%s: failed to get apb reset\n", + dev->of_node->full_name); + return PTR_ERR(cdns_phy->apb_rst); + } + cdns_phy->clk = devm_clk_get(dev, "refclk"); if (IS_ERR(cdns_phy->clk)) { dev_err(dev, "phy ref clock not found\n"); return PTR_ERR(cdns_phy->clk); } - regs = platform_get_resource(pdev, IORESOURCE_MEM, 0); - cdns_phy->sd_base = devm_ioremap_resource(&pdev->dev, regs); + cdns_phy->sd_base = devm_platform_ioremap_resource(pdev, 0); if (IS_ERR(cdns_phy->sd_base)) return PTR_ERR(cdns_phy->sd_base); @@ -1751,14 +2124,39 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) if (subnodes == 0) { dev_err(dev, "No available link subnodes found\n"); return -EINVAL; - } else if (subnodes != 1) { - dev_err(dev, "Driver supports only one link subnode.\n"); + } + + ret = cdns_torrent_regmap_init(cdns_phy); + if (ret) + return ret; + + ret = cdns_torrent_regfield_init(cdns_phy); + if (ret) + return ret; + + ret = clk_prepare_enable(cdns_phy->clk); + if (ret) { + dev_err(cdns_phy->dev, "Failed to prepare ref clock\n"); + return ret; + } + + cdns_phy->ref_clk_rate = clk_get_rate(cdns_phy->clk); + if (!(cdns_phy->ref_clk_rate)) { + dev_err(cdns_phy->dev, "Failed to get ref clock rate\n"); + clk_disable_unprepare(cdns_phy->clk); return -EINVAL; } + /* Enable APB */ + reset_control_deassert(cdns_phy->apb_rst); + for_each_available_child_of_node(dev->of_node, child) { struct phy *gphy; + /* PHY subnode name must be 'phy'. */ + if (!(of_node_name_eq(child, "phy"))) + continue; + cdns_phy->phys[node].lnk_rst = of_reset_control_array_get_exclusive(child); if (IS_ERR(cdns_phy->phys[node].lnk_rst)) { @@ -1776,27 +2174,57 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) goto put_child; } - if (cdns_phy->phys[node].mlane != 0) { - dev_err(dev, - "%s: Driver supports only lane-0 as master lane.\n", + if (of_property_read_u32(child, "cdns,phy-type", &phy_type)) { + dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n", child->full_name); ret = -EINVAL; goto put_child; } - if (of_property_read_u32(child, "cdns,phy-type", - &cdns_phy->phys[node].phy_type)) { - dev_err(dev, "%s: No \"cdns,phy-type\"-property.\n", + switch (phy_type) { + case PHY_TYPE_PCIE: + cdns_phy->phys[node].phy_type = TYPE_PCIE; + break; + case PHY_TYPE_DP: + cdns_phy->phys[node].phy_type = TYPE_DP; + break; + case PHY_TYPE_SGMII: + cdns_phy->phys[node].phy_type = TYPE_SGMII; + break; + case PHY_TYPE_QSGMII: + cdns_phy->phys[node].phy_type = TYPE_QSGMII; + break; + case PHY_TYPE_USB3: + cdns_phy->phys[node].phy_type = TYPE_USB; + break; + default: + dev_err(dev, "Unsupported protocol\n"); + ret = -EINVAL; + goto put_child; + } + + if (of_property_read_u32(child, "cdns,num-lanes", + &cdns_phy->phys[node].num_lanes)) { + dev_err(dev, "%s: No \"cdns,num-lanes\"-property.\n", child->full_name); ret = -EINVAL; goto put_child; } - cdns_phy->phys[node].num_lanes = DEFAULT_NUM_LANES; - of_property_read_u32(child, "cdns,num-lanes", - &cdns_phy->phys[node].num_lanes); + total_num_lanes += cdns_phy->phys[node].num_lanes; + + /* Get SSC mode */ + cdns_phy->phys[node].ssc_mode = NO_SSC; + of_property_read_u32(child, "cdns,ssc-mode", + &cdns_phy->phys[node].ssc_mode); + + gphy = devm_phy_create(dev, child, &cdns_torrent_phy_ops); + if (IS_ERR(gphy)) { + ret = PTR_ERR(gphy); + goto put_child; + } - if (cdns_phy->phys[node].phy_type == PHY_TYPE_DP) { + if (cdns_phy->phys[node].phy_type == TYPE_DP) { switch (cdns_phy->phys[node].num_lanes) { case 1: case 2: @@ -1833,30 +2261,34 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) } /* DPTX registers */ - regs = platform_get_resource(pdev, IORESOURCE_MEM, 1); - cdns_phy->base = devm_ioremap_resource(&pdev->dev, - regs); + cdns_phy->base = devm_platform_ioremap_resource(pdev, 1); if (IS_ERR(cdns_phy->base)) { ret = PTR_ERR(cdns_phy->base); goto put_child; } - gphy = devm_phy_create(dev, child, - &cdns_torrent_phy_ops); - if (IS_ERR(gphy)) { - ret = PTR_ERR(gphy); - goto put_child; + if (!init_dp_regmap) { + ret = cdns_torrent_dp_regmap_init(cdns_phy); + if (ret) + goto put_child; + + ret = cdns_torrent_dp_regfield_init(cdns_phy); + if (ret) + goto put_child; + + init_dp_regmap++; } dev_info(dev, "%d lanes, max bit rate %d.%03d Gbps\n", cdns_phy->phys[node].num_lanes, cdns_phy->max_bit_rate / 1000, cdns_phy->max_bit_rate % 1000); - } else { - dev_err(dev, "Driver supports only PHY_TYPE_DP\n"); - ret = -ENOTSUPP; - goto put_child; + + gphy->attrs.bus_width = cdns_phy->phys[node].num_lanes; + gphy->attrs.max_link_rate = cdns_phy->max_bit_rate; + gphy->attrs.mode = PHY_MODE_DP; } + cdns_phy->phys[node].phy = gphy; phy_set_drvdata(gphy, &cdns_phy->phys[node]); @@ -1864,16 +2296,16 @@ static int cdns_torrent_phy_probe(struct platform_device *pdev) } cdns_phy->nsubnodes = node; - ret = cdns_regmap_init_torrent_dp(cdns_phy, cdns_phy->sd_base, - cdns_phy->base, - data->block_offset_shift, - data->reg_offset_shift); - if (ret) + if (total_num_lanes > MAX_NUM_LANES) { + dev_err(dev, "Invalid lane configuration\n"); goto put_lnk_rst; + } - ret = cdns_regfield_init(cdns_phy); - if (ret) - goto put_lnk_rst; + if (cdns_phy->nsubnodes > 1) { + ret = cdns_torrent_phy_configure_multilink(cdns_phy); + if (ret) + goto put_lnk_rst; + } phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); if (IS_ERR(phy_provider)) { @@ -1889,6 +2321,8 @@ put_lnk_rst: for (i = 0; i < node; i++) reset_control_put(cdns_phy->phys[i].lnk_rst); of_node_put(child); + reset_control_assert(cdns_phy->apb_rst); + clk_disable_unprepare(cdns_phy->clk); return ret; } @@ -1898,22 +2332,1505 @@ static int cdns_torrent_phy_remove(struct platform_device *pdev) int i; reset_control_assert(cdns_phy->phy_rst); + reset_control_assert(cdns_phy->apb_rst); for (i = 0; i < cdns_phy->nsubnodes; i++) { reset_control_assert(cdns_phy->phys[i].lnk_rst); reset_control_put(cdns_phy->phys[i].lnk_rst); } + clk_disable_unprepare(cdns_phy->clk); + return 0; } +/* USB and SGMII/QSGMII link configuration */ +static struct cdns_reg_pairs usb_sgmii_link_cmn_regs[] = { + {0x0002, PHY_PLL_CFG}, + {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs usb_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0041, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs sgmii_usb_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x009B, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals usb_sgmii_link_cmn_vals = { + .reg_pairs = usb_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(usb_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals usb_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = usb_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(usb_sgmii_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_usb_xcvr_diag_ln_vals = { + .reg_pairs = sgmii_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_usb_xcvr_diag_ln_regs), +}; + +/* PCIe and USB Unique SSC link configuration */ +static struct cdns_reg_pairs pcie_usb_link_cmn_regs[] = { + {0x0003, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, + {0x8600, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs pcie_usb_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0012, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs usb_pcie_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x00C9, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals pcie_usb_link_cmn_vals = { + .reg_pairs = pcie_usb_link_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_usb_link_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_usb_xcvr_diag_ln_vals = { + .reg_pairs = pcie_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(pcie_usb_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals usb_pcie_xcvr_diag_ln_vals = { + .reg_pairs = usb_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(usb_pcie_xcvr_diag_ln_regs), +}; + +/* USB 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs usb_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} +}; + +static struct cdns_torrent_vals usb_100_int_ssc_cmn_vals = { + .reg_pairs = usb_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_int_ssc_cmn_regs), +}; + +/* Single USB link configuration */ +static struct cdns_reg_pairs sl_usb_link_cmn_regs[] = { + {0x0000, PHY_PLL_CFG}, + {0x8600, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs sl_usb_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0041, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sl_usb_link_cmn_vals = { + .reg_pairs = sl_usb_link_cmn_regs, + .num_regs = ARRAY_SIZE(sl_usb_link_cmn_regs), +}; + +static struct cdns_torrent_vals sl_usb_xcvr_diag_ln_vals = { + .reg_pairs = sl_usb_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sl_usb_xcvr_diag_ln_regs), +}; + +/* USB PHY PCS common configuration */ +static struct cdns_reg_pairs usb_phy_pcs_cmn_regs[] = { + {0x0A0A, PHY_PIPE_USB3_GEN2_PRE_CFG0}, + {0x1000, PHY_PIPE_USB3_GEN2_POST_CFG0}, + {0x0010, PHY_PIPE_USB3_GEN2_POST_CFG1} +}; + +static struct cdns_torrent_vals usb_phy_pcs_cmn_vals = { + .reg_pairs = usb_phy_pcs_cmn_regs, + .num_regs = ARRAY_SIZE(usb_phy_pcs_cmn_regs), +}; + +/* USB 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs usb_100_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} +}; + +static struct cdns_reg_pairs usb_100_no_ssc_tx_ln_regs[] = { + {0x02FF, TX_PSC_A0}, + {0x06AF, TX_PSC_A1}, + {0x06AE, TX_PSC_A2}, + {0x06AE, TX_PSC_A3}, + {0x2A82, TX_TXCC_CTRL}, + {0x0014, TX_TXCC_CPOST_MULT_01}, + {0x0003, XCVR_DIAG_PSC_OVRD} +}; + +static struct cdns_reg_pairs usb_100_no_ssc_rx_ln_regs[] = { + {0x0D1D, RX_PSC_A0}, + {0x0D1D, RX_PSC_A1}, + {0x0D00, RX_PSC_A2}, + {0x0500, RX_PSC_A3}, + {0x0013, RX_SIGDET_HL_FILT_TMR}, + {0x0000, RX_REE_GCSM1_CTRL}, + {0x0C02, RX_REE_ATTEN_THR}, + {0x0330, RX_REE_SMGM_CTRL1}, + {0x0300, RX_REE_SMGM_CTRL2}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x1004, RX_DIAG_SIGDET_TUNE}, + {0x00F9, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0002, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0031, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, + {0x0003, RX_CDRLF_CNFG3} +}; + +static struct cdns_torrent_vals usb_100_no_ssc_cmn_vals = { + .reg_pairs = usb_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals usb_100_no_ssc_tx_ln_vals = { + .reg_pairs = usb_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals usb_100_no_ssc_rx_ln_vals = { + .reg_pairs = usb_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(usb_100_no_ssc_rx_ln_regs), +}; + +/* Single link USB, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sl_usb_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x8200, CMN_CDIAG_CDB_PWRI_OVRD}, + {0x8200, CMN_CDIAG_XCVRC_PWRI_OVRD} +}; + +static struct cdns_torrent_vals sl_usb_100_int_ssc_cmn_vals = { + .reg_pairs = sl_usb_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_usb_100_int_ssc_cmn_regs), +}; + +/* PCIe and SGMII/QSGMII Unique SSC link configuration */ +static struct cdns_reg_pairs pcie_sgmii_link_cmn_regs[] = { + {0x0003, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0}, + {0x0400, CMN_PDIAG_PLL0_CLK_SEL_M1}, + {0x0601, CMN_PDIAG_PLL1_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs pcie_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0001, XCVR_DIAG_HSCLK_DIV}, + {0x0012, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_reg_pairs sgmii_pcie_xcvr_diag_ln_regs[] = { + {0x0011, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x009B, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals pcie_sgmii_link_cmn_vals = { + .reg_pairs = pcie_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = pcie_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(pcie_sgmii_xcvr_diag_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_pcie_xcvr_diag_ln_vals = { + .reg_pairs = sgmii_pcie_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_pcie_xcvr_diag_ln_regs), +}; + +/* SGMII 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs sgmii_100_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x3700, CMN_DIAG_BIAS_OVRD1}, + {0x0008, CMN_TXPUCAL_TUNE}, + {0x0008, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_reg_pairs sgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x00B3, DRV_DIAG_TX_DRV} +}; + +static struct cdns_reg_pairs sgmii_100_no_ssc_rx_ln_regs[] = { + {0x091D, RX_PSC_A0}, + {0x0900, RX_PSC_A2}, + {0x0100, RX_PSC_A3}, + {0x03C7, RX_REE_GCSM1_EQENM_PH1}, + {0x01C7, RX_REE_GCSM1_EQENM_PH2}, + {0x0000, RX_DIAG_DFE_CTRL}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0098, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0000, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0010, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_cmn_vals = { + .reg_pairs = sgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = sgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals sgmii_100_no_ssc_rx_ln_vals = { + .reg_pairs = sgmii_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(sgmii_100_no_ssc_rx_ln_regs), +}; + +/* SGMII 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sgmii_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR}, + {0x3700, CMN_DIAG_BIAS_OVRD1}, + {0x0008, CMN_TXPUCAL_TUNE}, + {0x0008, CMN_TXPDCAL_TUNE} +}; + +static struct cdns_torrent_vals sgmii_100_int_ssc_cmn_vals = { + .reg_pairs = sgmii_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sgmii_100_int_ssc_cmn_regs), +}; + +/* QSGMII 100 MHz Ref clk, no SSC */ +static struct cdns_reg_pairs qsgmii_100_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_reg_pairs qsgmii_100_no_ssc_tx_ln_regs[] = { + {0x00F3, TX_PSC_A0}, + {0x04A2, TX_PSC_A2}, + {0x04A2, TX_PSC_A3}, + {0x0000, TX_TXCC_CPOST_MULT_00}, + {0x0003, DRV_DIAG_TX_DRV} +}; + +static struct cdns_reg_pairs qsgmii_100_no_ssc_rx_ln_regs[] = { + {0x091D, RX_PSC_A0}, + {0x0900, RX_PSC_A2}, + {0x0100, RX_PSC_A3}, + {0x03C7, RX_REE_GCSM1_EQENM_PH1}, + {0x01C7, RX_REE_GCSM1_EQENM_PH2}, + {0x0000, RX_DIAG_DFE_CTRL}, + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0098, RX_DIAG_NQST_CTRL}, + {0x0C01, RX_DIAG_DFE_AMP_TUNE_2}, + {0x0000, RX_DIAG_DFE_AMP_TUNE_3}, + {0x0000, RX_DIAG_PI_CAP}, + {0x0010, RX_DIAG_PI_RATE}, + {0x0001, RX_DIAG_ACYA}, + {0x018C, RX_CDRLF_CNFG}, +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_cmn_vals = { + .reg_pairs = qsgmii_100_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_tx_ln_vals = { + .reg_pairs = qsgmii_100_no_ssc_tx_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_tx_ln_regs), +}; + +static struct cdns_torrent_vals qsgmii_100_no_ssc_rx_ln_vals = { + .reg_pairs = qsgmii_100_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_no_ssc_rx_ln_regs), +}; + +/* QSGMII 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs qsgmii_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static struct cdns_torrent_vals qsgmii_100_int_ssc_cmn_vals = { + .reg_pairs = qsgmii_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(qsgmii_100_int_ssc_cmn_regs), +}; + +/* Single SGMII/QSGMII link configuration */ +static struct cdns_reg_pairs sl_sgmii_link_cmn_regs[] = { + {0x0000, PHY_PLL_CFG}, + {0x0601, CMN_PDIAG_PLL0_CLK_SEL_M0} +}; + +static struct cdns_reg_pairs sl_sgmii_xcvr_diag_ln_regs[] = { + {0x0000, XCVR_DIAG_HSCLK_SEL}, + {0x0003, XCVR_DIAG_HSCLK_DIV}, + {0x0013, XCVR_DIAG_PLLDRC_CTRL} +}; + +static struct cdns_torrent_vals sl_sgmii_link_cmn_vals = { + .reg_pairs = sl_sgmii_link_cmn_regs, + .num_regs = ARRAY_SIZE(sl_sgmii_link_cmn_regs), +}; + +static struct cdns_torrent_vals sl_sgmii_xcvr_diag_ln_vals = { + .reg_pairs = sl_sgmii_xcvr_diag_ln_regs, + .num_regs = ARRAY_SIZE(sl_sgmii_xcvr_diag_ln_regs), +}; + +/* Multi link PCIe, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs pcie_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0064, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0044, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x006E, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x000E, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static struct cdns_torrent_vals pcie_100_int_ssc_cmn_vals = { + .reg_pairs = pcie_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_int_ssc_cmn_regs), +}; + +/* Single link PCIe, 100 MHz Ref clk, internal SSC */ +static struct cdns_reg_pairs sl_pcie_100_int_ssc_cmn_regs[] = { + {0x0004, CMN_PLL0_DSM_DIAG_M0}, + {0x0004, CMN_PLL0_DSM_DIAG_M1}, + {0x0004, CMN_PLL1_DSM_DIAG_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M0}, + {0x0509, CMN_PDIAG_PLL0_CP_PADJ_M1}, + {0x0509, CMN_PDIAG_PLL1_CP_PADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M0}, + {0x0F00, CMN_PDIAG_PLL0_CP_IADJ_M1}, + {0x0F00, CMN_PDIAG_PLL1_CP_IADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M0}, + {0x0F08, CMN_PDIAG_PLL0_FILT_PADJ_M1}, + {0x0F08, CMN_PDIAG_PLL1_FILT_PADJ_M0}, + {0x0064, CMN_PLL0_INTDIV_M0}, + {0x0050, CMN_PLL0_INTDIV_M1}, + {0x0050, CMN_PLL1_INTDIV_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M0}, + {0x0002, CMN_PLL0_FRACDIVH_M1}, + {0x0002, CMN_PLL1_FRACDIVH_M0}, + {0x0044, CMN_PLL0_HIGH_THR_M0}, + {0x0036, CMN_PLL0_HIGH_THR_M1}, + {0x0036, CMN_PLL1_HIGH_THR_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M0}, + {0x0002, CMN_PDIAG_PLL0_CTRL_M1}, + {0x0002, CMN_PDIAG_PLL1_CTRL_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M0}, + {0x0001, CMN_PLL0_SS_CTRL1_M1}, + {0x0001, CMN_PLL1_SS_CTRL1_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M0}, + {0x011B, CMN_PLL0_SS_CTRL2_M1}, + {0x011B, CMN_PLL1_SS_CTRL2_M0}, + {0x006E, CMN_PLL0_SS_CTRL3_M0}, + {0x0058, CMN_PLL0_SS_CTRL3_M1}, + {0x0058, CMN_PLL1_SS_CTRL3_M0}, + {0x000E, CMN_PLL0_SS_CTRL4_M0}, + {0x0012, CMN_PLL0_SS_CTRL4_M1}, + {0x0012, CMN_PLL1_SS_CTRL4_M0}, + {0x0C5E, CMN_PLL0_VCOCAL_REFTIM_START}, + {0x0C5E, CMN_PLL1_VCOCAL_REFTIM_START}, + {0x0C56, CMN_PLL0_VCOCAL_PLLCNT_START}, + {0x0C56, CMN_PLL1_VCOCAL_PLLCNT_START}, + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL}, + {0x00C7, CMN_PLL0_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL1_LOCK_REFCNT_START}, + {0x00C7, CMN_PLL0_LOCK_PLLCNT_START}, + {0x00C7, CMN_PLL1_LOCK_PLLCNT_START}, + {0x0005, CMN_PLL0_LOCK_PLLCNT_THR}, + {0x0005, CMN_PLL1_LOCK_PLLCNT_THR} +}; + +static struct cdns_torrent_vals sl_pcie_100_int_ssc_cmn_vals = { + .reg_pairs = sl_pcie_100_int_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(sl_pcie_100_int_ssc_cmn_regs), +}; + +/* PCIe, 100 MHz Ref clk, no SSC & external SSC */ +static struct cdns_reg_pairs pcie_100_ext_no_ssc_cmn_regs[] = { + {0x0003, CMN_PLL0_VCOCAL_TCTRL}, + {0x0003, CMN_PLL1_VCOCAL_TCTRL} +}; + +static struct cdns_reg_pairs pcie_100_ext_no_ssc_rx_ln_regs[] = { + {0x0019, RX_REE_TAP1_CLIP}, + {0x0019, RX_REE_TAP2TON_CLIP}, + {0x0001, RX_DIAG_ACYA} +}; + +static struct cdns_torrent_vals pcie_100_no_ssc_cmn_vals = { + .reg_pairs = pcie_100_ext_no_ssc_cmn_regs, + .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_cmn_regs), +}; + +static struct cdns_torrent_vals pcie_100_no_ssc_rx_ln_vals = { + .reg_pairs = pcie_100_ext_no_ssc_rx_ln_regs, + .num_regs = ARRAY_SIZE(pcie_100_ext_no_ssc_rx_ln_regs), +}; + static const struct cdns_torrent_data cdns_map_torrent = { .block_offset_shift = 0x2, .reg_offset_shift = 0x2, + .link_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_link_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_link_cmn_vals, + [INTERNAL_SSC] = &sl_usb_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + }, + .xcvr_diag_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + }, + }, + .pcs_cmn_vals = { + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + }, + }, + .cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + }, + }, + .tx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + }, + }, + .rx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + }, + }, }; static const struct cdns_torrent_data ti_j721e_map_torrent = { .block_offset_shift = 0x0, .reg_offset_shift = 0x1, + .link_cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &pcie_sgmii_link_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_link_cmn_vals, + [EXTERNAL_SSC] = &sl_usb_link_cmn_vals, + [INTERNAL_SSC] = &sl_usb_link_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &pcie_usb_link_cmn_vals, + [EXTERNAL_SSC] = &pcie_usb_link_cmn_vals, + [INTERNAL_SSC] = &pcie_usb_link_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_link_cmn_vals, + [EXTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + [INTERNAL_SSC] = &usb_sgmii_link_cmn_vals, + }, + }, + }, + .xcvr_diag_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &pcie_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_pcie_xcvr_diag_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sgmii_usb_xcvr_diag_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &sl_usb_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &sl_usb_xcvr_diag_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_pcie_xcvr_diag_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [EXTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + [INTERNAL_SSC] = &usb_sgmii_xcvr_diag_ln_vals, + }, + }, + }, + .pcs_cmn_vals = { + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_phy_pcs_cmn_vals, + [EXTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + [INTERNAL_SSC] = &usb_phy_pcs_cmn_vals, + }, + }, + }, + .cmn_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_pcie_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &pcie_100_int_ssc_cmn_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_int_ssc_cmn_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_cmn_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &usb_100_int_ssc_cmn_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_cmn_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_cmn_vals, + [INTERNAL_SSC] = &sl_usb_100_int_ssc_cmn_vals, + }, + }, + }, + .tx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_SGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_QSGMII] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + [TYPE_USB] = { + [NO_SSC] = NULL, + [EXTERNAL_SSC] = NULL, + [INTERNAL_SSC] = NULL, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_tx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_tx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_tx_ln_vals, + }, + }, + }, + .rx_ln_vals = { + [TYPE_PCIE] = { + [TYPE_NONE] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &pcie_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_SGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &sgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_QSGMII] = { + [TYPE_NONE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + [TYPE_USB] = { + [NO_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &qsgmii_100_no_ssc_rx_ln_vals, + }, + }, + [TYPE_USB] = { + [TYPE_NONE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_PCIE] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_SGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + [TYPE_QSGMII] = { + [NO_SSC] = &usb_100_no_ssc_rx_ln_vals, + [EXTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + [INTERNAL_SSC] = &usb_100_no_ssc_rx_ln_vals, + }, + }, + }, }; static const struct of_device_id cdns_torrent_phy_of_match[] = { diff --git a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c index 0c4833da7be0..62d6d6849ad6 100644 --- a/drivers/phy/freescale/phy-fsl-imx8mq-usb.c +++ b/drivers/phy/freescale/phy-fsl-imx8mq-usb.c @@ -1,15 +1,20 @@ // SPDX-License-Identifier: GPL-2.0+ /* Copyright (c) 2017 NXP. */ +#include <linux/bitfield.h> #include <linux/clk.h> +#include <linux/delay.h> #include <linux/io.h> #include <linux/module.h> +#include <linux/of_platform.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/regulator/consumer.h> #define PHY_CTRL0 0x0 #define PHY_CTRL0_REF_SSP_EN BIT(2) +#define PHY_CTRL0_FSEL_MASK GENMASK(10, 5) +#define PHY_CTRL0_FSEL_24M 0x2a #define PHY_CTRL1 0x4 #define PHY_CTRL1_RESET BIT(0) @@ -20,6 +25,11 @@ #define PHY_CTRL2 0x8 #define PHY_CTRL2_TXENABLEN0 BIT(8) +#define PHY_CTRL2_OTG_DISABLE BIT(9) + +#define PHY_CTRL6 0x18 +#define PHY_CTRL6_ALT_CLK_EN BIT(1) +#define PHY_CTRL6_ALT_CLK_SEL BIT(0) struct imx8mq_usb_phy { struct phy *phy; @@ -54,6 +64,44 @@ static int imx8mq_usb_phy_init(struct phy *phy) return 0; } +static int imx8mp_usb_phy_init(struct phy *phy) +{ + struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy); + u32 value; + + /* USB3.0 PHY signal fsel for 24M ref */ + value = readl(imx_phy->base + PHY_CTRL0); + value &= ~PHY_CTRL0_FSEL_MASK; + value |= FIELD_PREP(PHY_CTRL0_FSEL_MASK, PHY_CTRL0_FSEL_24M); + writel(value, imx_phy->base + PHY_CTRL0); + + /* Disable alt_clk_en and use internal MPLL clocks */ + value = readl(imx_phy->base + PHY_CTRL6); + value &= ~(PHY_CTRL6_ALT_CLK_SEL | PHY_CTRL6_ALT_CLK_EN); + writel(value, imx_phy->base + PHY_CTRL6); + + value = readl(imx_phy->base + PHY_CTRL1); + value &= ~(PHY_CTRL1_VDATSRCENB0 | PHY_CTRL1_VDATDETENB0); + value |= PHY_CTRL1_RESET | PHY_CTRL1_ATERESET; + writel(value, imx_phy->base + PHY_CTRL1); + + value = readl(imx_phy->base + PHY_CTRL0); + value |= PHY_CTRL0_REF_SSP_EN; + writel(value, imx_phy->base + PHY_CTRL0); + + value = readl(imx_phy->base + PHY_CTRL2); + value |= PHY_CTRL2_TXENABLEN0 | PHY_CTRL2_OTG_DISABLE; + writel(value, imx_phy->base + PHY_CTRL2); + + udelay(10); + + value = readl(imx_phy->base + PHY_CTRL1); + value &= ~(PHY_CTRL1_RESET | PHY_CTRL1_ATERESET); + writel(value, imx_phy->base + PHY_CTRL1); + + return 0; +} + static int imx8mq_phy_power_on(struct phy *phy) { struct imx8mq_usb_phy *imx_phy = phy_get_drvdata(phy); @@ -76,19 +124,36 @@ static int imx8mq_phy_power_off(struct phy *phy) return 0; } -static struct phy_ops imx8mq_usb_phy_ops = { +static const struct phy_ops imx8mq_usb_phy_ops = { .init = imx8mq_usb_phy_init, .power_on = imx8mq_phy_power_on, .power_off = imx8mq_phy_power_off, .owner = THIS_MODULE, }; +static struct phy_ops imx8mp_usb_phy_ops = { + .init = imx8mp_usb_phy_init, + .power_on = imx8mq_phy_power_on, + .power_off = imx8mq_phy_power_off, + .owner = THIS_MODULE, +}; + +static const struct of_device_id imx8mq_usb_phy_of_match[] = { + {.compatible = "fsl,imx8mq-usb-phy", + .data = &imx8mq_usb_phy_ops,}, + {.compatible = "fsl,imx8mp-usb-phy", + .data = &imx8mp_usb_phy_ops,}, + { } +}; +MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match); + static int imx8mq_usb_phy_probe(struct platform_device *pdev) { struct phy_provider *phy_provider; struct device *dev = &pdev->dev; struct imx8mq_usb_phy *imx_phy; struct resource *res; + const struct phy_ops *phy_ops; imx_phy = devm_kzalloc(dev, sizeof(*imx_phy), GFP_KERNEL); if (!imx_phy) @@ -105,7 +170,11 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev) if (IS_ERR(imx_phy->base)) return PTR_ERR(imx_phy->base); - imx_phy->phy = devm_phy_create(dev, NULL, &imx8mq_usb_phy_ops); + phy_ops = of_device_get_match_data(dev); + if (!phy_ops) + return -EINVAL; + + imx_phy->phy = devm_phy_create(dev, NULL, phy_ops); if (IS_ERR(imx_phy->phy)) return PTR_ERR(imx_phy->phy); @@ -120,12 +189,6 @@ static int imx8mq_usb_phy_probe(struct platform_device *pdev) return PTR_ERR_OR_ZERO(phy_provider); } -static const struct of_device_id imx8mq_usb_phy_of_match[] = { - {.compatible = "fsl,imx8mq-usb-phy",}, - { }, -}; -MODULE_DEVICE_TABLE(of, imx8mq_usb_phy_of_match); - static struct platform_driver imx8mq_usb_phy_driver = { .probe = imx8mq_usb_phy_probe, .driver = { diff --git a/drivers/phy/hisilicon/phy-hi3660-usb3.c b/drivers/phy/hisilicon/phy-hi3660-usb3.c index cc0af2c044d0..84adce9b4277 100644 --- a/drivers/phy/hisilicon/phy-hi3660-usb3.c +++ b/drivers/phy/hisilicon/phy-hi3660-usb3.c @@ -161,7 +161,7 @@ out: return ret; } -static struct phy_ops hi3660_phy_ops = { +static const struct phy_ops hi3660_phy_ops = { .init = hi3660_phy_init, .exit = hi3660_phy_exit, .owner = THIS_MODULE, diff --git a/drivers/phy/intel/Kconfig b/drivers/phy/intel/Kconfig index 7b47682a4e0e..58ec695c92ec 100644 --- a/drivers/phy/intel/Kconfig +++ b/drivers/phy/intel/Kconfig @@ -1,9 +1,21 @@ # SPDX-License-Identifier: GPL-2.0 # -# Phy drivers for Intel Lightning Mountain(LGM) platform +# Phy drivers for Intel platforms # -config PHY_INTEL_COMBO - bool "Intel ComboPHY driver" +config PHY_INTEL_KEEMBAY_EMMC + tristate "Intel Keem Bay EMMC PHY driver" + depends on (OF && ARM64) || COMPILE_TEST + depends on HAS_IOMEM + select GENERIC_PHY + select REGMAP_MMIO + help + Choose this option if you have an Intel Keem Bay SoC. + + To compile this driver as a module, choose M here: the module + will be called phy-keembay-emmc.ko. + +config PHY_INTEL_LGM_COMBO + bool "Intel Lightning Mountain ComboPHY driver" depends on X86 || COMPILE_TEST depends on OF && HAS_IOMEM select MFD_SYSCON @@ -16,8 +28,8 @@ config PHY_INTEL_COMBO chipsets which provides PHYs for various controllers, EMAC, SATA and PCIe. -config PHY_INTEL_EMMC - tristate "Intel EMMC PHY driver" +config PHY_INTEL_LGM_EMMC + tristate "Intel Lightning Mountain EMMC PHY driver" depends on X86 || COMPILE_TEST select GENERIC_PHY help diff --git a/drivers/phy/intel/Makefile b/drivers/phy/intel/Makefile index 233d530dadde..a5e0af5ccd75 100644 --- a/drivers/phy/intel/Makefile +++ b/drivers/phy/intel/Makefile @@ -1,3 +1,4 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PHY_INTEL_COMBO) += phy-intel-combo.o -obj-$(CONFIG_PHY_INTEL_EMMC) += phy-intel-emmc.o +obj-$(CONFIG_PHY_INTEL_KEEMBAY_EMMC) += phy-intel-keembay-emmc.o +obj-$(CONFIG_PHY_INTEL_LGM_COMBO) += phy-intel-lgm-combo.o +obj-$(CONFIG_PHY_INTEL_LGM_EMMC) += phy-intel-lgm-emmc.o diff --git a/drivers/phy/intel/phy-intel-keembay-emmc.c b/drivers/phy/intel/phy-intel-keembay-emmc.c new file mode 100644 index 000000000000..eb7c635ed89a --- /dev/null +++ b/drivers/phy/intel/phy-intel-keembay-emmc.c @@ -0,0 +1,307 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Intel Keem Bay eMMC PHY driver + * Copyright (C) 2020 Intel Corporation + */ + +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/delay.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> + +/* eMMC/SD/SDIO core/phy configuration registers */ +#define PHY_CFG_0 0x24 +#define SEL_DLY_TXCLK_MASK BIT(29) +#define OTAP_DLY_ENA_MASK BIT(27) +#define OTAP_DLY_SEL_MASK GENMASK(26, 23) +#define DLL_EN_MASK BIT(10) +#define PWR_DOWN_MASK BIT(0) + +#define PHY_CFG_2 0x2c +#define SEL_FREQ_MASK GENMASK(12, 10) + +#define PHY_STAT 0x40 +#define CAL_DONE_MASK BIT(6) +#define IS_CALDONE(x) ((x) & CAL_DONE_MASK) +#define DLL_RDY_MASK BIT(5) +#define IS_DLLRDY(x) ((x) & DLL_RDY_MASK) + +/* From ACS_eMMC51_16nFFC_RO1100_Userguide_v1p0.pdf p17 */ +#define FREQSEL_200M_170M 0x0 +#define FREQSEL_170M_140M 0x1 +#define FREQSEL_140M_110M 0x2 +#define FREQSEL_110M_80M 0x3 +#define FREQSEL_80M_50M 0x4 + +struct keembay_emmc_phy { + struct regmap *syscfg; + struct clk *emmcclk; +}; + +static const struct regmap_config keembay_regmap_config = { + .reg_bits = 32, + .val_bits = 32, + .reg_stride = 4, +}; + +static int keembay_emmc_phy_power(struct phy *phy, bool on_off) +{ + struct keembay_emmc_phy *priv = phy_get_drvdata(phy); + unsigned int caldone; + unsigned int dllrdy; + unsigned int freqsel; + unsigned int mhz; + int ret; + + /* + * Keep phyctrl_pdb and phyctrl_endll low to allow + * initialization of CALIO state M/C DFFs + */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, PWR_DOWN_MASK, + FIELD_PREP(PWR_DOWN_MASK, 0)); + if (ret) { + dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret); + return ret; + } + + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, DLL_EN_MASK, + FIELD_PREP(DLL_EN_MASK, 0)); + if (ret) { + dev_err(&phy->dev, "turn off the dll failed: %d\n", ret); + return ret; + } + + /* Already finish power off above */ + if (!on_off) + return 0; + + mhz = DIV_ROUND_CLOSEST(clk_get_rate(priv->emmcclk), 1000000); + if (mhz <= 200 && mhz >= 170) + freqsel = FREQSEL_200M_170M; + else if (mhz <= 170 && mhz >= 140) + freqsel = FREQSEL_170M_140M; + else if (mhz <= 140 && mhz >= 110) + freqsel = FREQSEL_140M_110M; + else if (mhz <= 110 && mhz >= 80) + freqsel = FREQSEL_110M_80M; + else if (mhz <= 80 && mhz >= 50) + freqsel = FREQSEL_80M_50M; + else + freqsel = 0x0; + + if (mhz < 50 || mhz > 200) + dev_warn(&phy->dev, "Unsupported rate: %d MHz\n", mhz); + + /* + * According to the user manual, calpad calibration + * cycle takes more than 2us without the minimal recommended + * value, so we may need a little margin here + */ + udelay(5); + + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, PWR_DOWN_MASK, + FIELD_PREP(PWR_DOWN_MASK, 1)); + if (ret) { + dev_err(&phy->dev, "CALIO power down bar failed: %d\n", ret); + return ret; + } + + /* + * According to the user manual, it asks driver to wait 5us for + * calpad busy trimming. However it is documented that this value is + * PVT(A.K.A. process, voltage and temperature) relevant, so some + * failure cases are found which indicates we should be more tolerant + * to calpad busy trimming. + */ + ret = regmap_read_poll_timeout(priv->syscfg, PHY_STAT, + caldone, IS_CALDONE(caldone), + 0, 50); + if (ret) { + dev_err(&phy->dev, "caldone failed, ret=%d\n", ret); + return ret; + } + + /* Set the frequency of the DLL operation */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_2, SEL_FREQ_MASK, + FIELD_PREP(SEL_FREQ_MASK, freqsel)); + if (ret) { + dev_err(&phy->dev, "set the frequency of dll failed:%d\n", ret); + return ret; + } + + /* Turn on the DLL */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, DLL_EN_MASK, + FIELD_PREP(DLL_EN_MASK, 1)); + if (ret) { + dev_err(&phy->dev, "turn on the dll failed: %d\n", ret); + return ret; + } + + /* + * We turned on the DLL even though the rate was 0 because we the + * clock might be turned on later. ...but we can't wait for the DLL + * to lock when the rate is 0 because it will never lock with no + * input clock. + * + * Technically we should be checking the lock later when the clock + * is turned on, but for now we won't. + */ + if (mhz == 0) + return 0; + + /* + * After enabling analog DLL circuits docs say that we need 10.2 us if + * our source clock is at 50 MHz and that lock time scales linearly + * with clock speed. If we are powering on the PHY and the card clock + * is super slow (like 100kHz) this could take as long as 5.1 ms as + * per the math: 10.2 us * (50000000 Hz / 100000 Hz) => 5.1 ms + * hopefully we won't be running at 100 kHz, but we should still make + * sure we wait long enough. + * + * NOTE: There appear to be corner cases where the DLL seems to take + * extra long to lock for reasons that aren't understood. In some + * extreme cases we've seen it take up to over 10ms (!). We'll be + * generous and give it 50ms. + */ + ret = regmap_read_poll_timeout(priv->syscfg, PHY_STAT, + dllrdy, IS_DLLRDY(dllrdy), + 0, 50 * USEC_PER_MSEC); + if (ret) + dev_err(&phy->dev, "dllrdy failed, ret=%d\n", ret); + + return ret; +} + +static int keembay_emmc_phy_init(struct phy *phy) +{ + struct keembay_emmc_phy *priv = phy_get_drvdata(phy); + + /* + * We purposely get the clock here and not in probe to avoid the + * circular dependency problem. We expect: + * - PHY driver to probe + * - SDHCI driver to start probe + * - SDHCI driver to register it's clock + * - SDHCI driver to get the PHY + * - SDHCI driver to init the PHY + * + * The clock is optional, so upon any error just return it like + * any other error to user. + */ + priv->emmcclk = clk_get_optional(&phy->dev, "emmcclk"); + + return PTR_ERR_OR_ZERO(priv->emmcclk); +} + +static int keembay_emmc_phy_exit(struct phy *phy) +{ + struct keembay_emmc_phy *priv = phy_get_drvdata(phy); + + clk_put(priv->emmcclk); + + return 0; +}; + +static int keembay_emmc_phy_power_on(struct phy *phy) +{ + struct keembay_emmc_phy *priv = phy_get_drvdata(phy); + int ret; + + /* Delay chain based txclk: enable */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, SEL_DLY_TXCLK_MASK, + FIELD_PREP(SEL_DLY_TXCLK_MASK, 1)); + if (ret) { + dev_err(&phy->dev, "ERROR: delay chain txclk set: %d\n", ret); + return ret; + } + + /* Output tap delay: enable */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, OTAP_DLY_ENA_MASK, + FIELD_PREP(OTAP_DLY_ENA_MASK, 1)); + if (ret) { + dev_err(&phy->dev, "ERROR: output tap delay set: %d\n", ret); + return ret; + } + + /* Output tap delay */ + ret = regmap_update_bits(priv->syscfg, PHY_CFG_0, OTAP_DLY_SEL_MASK, + FIELD_PREP(OTAP_DLY_SEL_MASK, 2)); + if (ret) { + dev_err(&phy->dev, "ERROR: output tap delay select: %d\n", ret); + return ret; + } + + /* Power up eMMC phy analog blocks */ + return keembay_emmc_phy_power(phy, true); +} + +static int keembay_emmc_phy_power_off(struct phy *phy) +{ + /* Power down eMMC phy analog blocks */ + return keembay_emmc_phy_power(phy, false); +} + +static const struct phy_ops ops = { + .init = keembay_emmc_phy_init, + .exit = keembay_emmc_phy_exit, + .power_on = keembay_emmc_phy_power_on, + .power_off = keembay_emmc_phy_power_off, + .owner = THIS_MODULE, +}; + +static int keembay_emmc_phy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct device_node *np = dev->of_node; + struct keembay_emmc_phy *priv; + struct phy *generic_phy; + struct phy_provider *phy_provider; + void __iomem *base; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(base)) + return PTR_ERR(base); + + priv->syscfg = devm_regmap_init_mmio(dev, base, &keembay_regmap_config); + if (IS_ERR(priv->syscfg)) + return PTR_ERR(priv->syscfg); + + generic_phy = devm_phy_create(dev, np, &ops); + if (IS_ERR(generic_phy)) + return dev_err_probe(dev, PTR_ERR(generic_phy), + "failed to create PHY\n"); + + phy_set_drvdata(generic_phy, priv); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + + return PTR_ERR_OR_ZERO(phy_provider); +} + +static const struct of_device_id keembay_emmc_phy_dt_ids[] = { + { .compatible = "intel,keembay-emmc-phy" }, + {} +}; +MODULE_DEVICE_TABLE(of, keembay_emmc_phy_dt_ids); + +static struct platform_driver keembay_emmc_phy_driver = { + .probe = keembay_emmc_phy_probe, + .driver = { + .name = "keembay-emmc-phy", + .of_match_table = keembay_emmc_phy_dt_ids, + }, +}; +module_platform_driver(keembay_emmc_phy_driver); + +MODULE_AUTHOR("Wan Ahmad Zainie <wan.ahmad.zainie.wan.mohamad@intel.com>"); +MODULE_DESCRIPTION("Intel Keem Bay eMMC PHY driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/intel/phy-intel-combo.c b/drivers/phy/intel/phy-intel-lgm-combo.c index 360b1eb2ebd6..360b1eb2ebd6 100644 --- a/drivers/phy/intel/phy-intel-combo.c +++ b/drivers/phy/intel/phy-intel-lgm-combo.c diff --git a/drivers/phy/intel/phy-intel-emmc.c b/drivers/phy/intel/phy-intel-lgm-emmc.c index 703aeb122541..703aeb122541 100644 --- a/drivers/phy/intel/phy-intel-emmc.c +++ b/drivers/phy/intel/phy-intel-lgm-emmc.c diff --git a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c index be09b1530ae6..a7d126192cf1 100644 --- a/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c +++ b/drivers/phy/lantiq/phy-lantiq-rcu-usb2.c @@ -141,7 +141,7 @@ static int ltq_rcu_usb2_phy_power_off(struct phy *phy) return 0; } -static struct phy_ops ltq_rcu_usb2_phy_ops = { +static const struct phy_ops ltq_rcu_usb2_phy_ops = { .init = ltq_rcu_usb2_phy_init, .power_on = ltq_rcu_usb2_phy_power_on, .power_off = ltq_rcu_usb2_phy_power_off, diff --git a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c index 2ff9a48d833e..22c5698123cf 100644 --- a/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c +++ b/drivers/phy/lantiq/phy-lantiq-vrx200-pcie.c @@ -349,7 +349,7 @@ static int ltq_vrx200_pcie_phy_power_off(struct phy *phy) return 0; } -static struct phy_ops ltq_vrx200_pcie_phy_ops = { +static const struct phy_ops ltq_vrx200_pcie_phy_ops = { .init = ltq_vrx200_pcie_phy_init, .exit = ltq_vrx200_pcie_phy_exit, .power_on = ltq_vrx200_pcie_phy_power_on, diff --git a/drivers/phy/marvell/phy-pxa-28nm-hsic.c b/drivers/phy/marvell/phy-pxa-28nm-hsic.c index ae8370af59c0..31b43d2ee39a 100644 --- a/drivers/phy/marvell/phy-pxa-28nm-hsic.c +++ b/drivers/phy/marvell/phy-pxa-28nm-hsic.c @@ -12,6 +12,7 @@ #include <linux/slab.h> #include <linux/of.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/module.h> @@ -44,15 +45,12 @@ struct mv_hsic_phy { struct clk *clk; }; -static bool wait_for_reg(void __iomem *reg, u32 mask, unsigned long timeout) +static int wait_for_reg(void __iomem *reg, u32 mask, u32 ms) { - timeout += jiffies; - while (time_is_after_eq_jiffies(timeout)) { - if ((readl(reg) & mask) == mask) - return true; - msleep(1); - } - return false; + u32 val; + + return readl_poll_timeout(reg, val, ((val & mask) == mask), + 1000, 1000 * ms); } static int mv_hsic_phy_init(struct phy *phy) @@ -60,6 +58,7 @@ static int mv_hsic_phy_init(struct phy *phy) struct mv_hsic_phy *mv_phy = phy_get_drvdata(phy); struct platform_device *pdev = mv_phy->pdev; void __iomem *base = mv_phy->base; + int ret; clk_prepare_enable(mv_phy->clk); @@ -75,14 +74,14 @@ static int mv_hsic_phy_init(struct phy *phy) base + PHY_28NM_HSIC_PLL_CTRL2); /* Make sure PHY PLL is locked */ - if (!wait_for_reg(base + PHY_28NM_HSIC_PLL_CTRL2, - PHY_28NM_HSIC_H2S_PLL_LOCK, HZ / 10)) { + ret = wait_for_reg(base + PHY_28NM_HSIC_PLL_CTRL2, + PHY_28NM_HSIC_H2S_PLL_LOCK, 100); + if (ret) { dev_err(&pdev->dev, "HSIC PHY PLL not locked after 100mS."); clk_disable_unprepare(mv_phy->clk); - return -ETIMEDOUT; } - return 0; + return ret; } static int mv_hsic_phy_power_on(struct phy *phy) @@ -91,6 +90,7 @@ static int mv_hsic_phy_power_on(struct phy *phy) struct platform_device *pdev = mv_phy->pdev; void __iomem *base = mv_phy->base; u32 reg; + int ret; reg = readl(base + PHY_28NM_HSIC_CTRL); /* Avoid SE0 state when resume for some device will take it as reset */ @@ -108,20 +108,20 @@ static int mv_hsic_phy_power_on(struct phy *phy) */ /* Make sure PHY Calibration is ready */ - if (!wait_for_reg(base + PHY_28NM_HSIC_IMPCAL_CAL, - PHY_28NM_HSIC_H2S_IMPCAL_DONE, HZ / 10)) { + ret = wait_for_reg(base + PHY_28NM_HSIC_IMPCAL_CAL, + PHY_28NM_HSIC_H2S_IMPCAL_DONE, 100); + if (ret) { dev_warn(&pdev->dev, "HSIC PHY READY not set after 100mS."); - return -ETIMEDOUT; + return ret; } /* Waiting for HSIC connect int*/ - if (!wait_for_reg(base + PHY_28NM_HSIC_INT, - PHY_28NM_HSIC_CONNECT_INT, HZ / 5)) { + ret = wait_for_reg(base + PHY_28NM_HSIC_INT, + PHY_28NM_HSIC_CONNECT_INT, 200); + if (ret) dev_warn(&pdev->dev, "HSIC wait for connect interrupt timeout."); - return -ETIMEDOUT; - } - return 0; + return ret; } static int mv_hsic_phy_power_off(struct phy *phy) diff --git a/drivers/phy/marvell/phy-pxa-28nm-usb2.c b/drivers/phy/marvell/phy-pxa-28nm-usb2.c index 9fd881787fa6..a175ae915f02 100644 --- a/drivers/phy/marvell/phy-pxa-28nm-usb2.c +++ b/drivers/phy/marvell/phy-pxa-28nm-usb2.c @@ -13,6 +13,7 @@ #include <linux/of.h> #include <linux/of_device.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/err.h> #include <linux/clk.h> #include <linux/module.h> @@ -138,15 +139,12 @@ struct mv_usb2_phy { struct clk *clk; }; -static bool wait_for_reg(void __iomem *reg, u32 mask, unsigned long timeout) +static int wait_for_reg(void __iomem *reg, u32 mask, u32 ms) { - timeout += jiffies; - while (time_is_after_eq_jiffies(timeout)) { - if ((readl(reg) & mask) == mask) - return true; - msleep(1); - } - return false; + u32 val; + + return readl_poll_timeout(reg, val, ((val & mask) == mask), + 1000, 1000 * ms); } static int mv_usb2_phy_28nm_init(struct phy *phy) @@ -208,24 +206,23 @@ static int mv_usb2_phy_28nm_init(struct phy *phy) */ /* Make sure PHY Calibration is ready */ - if (!wait_for_reg(base + PHY_28NM_CAL_REG, - PHY_28NM_PLL_PLLCAL_DONE | PHY_28NM_PLL_IMPCAL_DONE, - HZ / 10)) { + ret = wait_for_reg(base + PHY_28NM_CAL_REG, + PHY_28NM_PLL_PLLCAL_DONE | PHY_28NM_PLL_IMPCAL_DONE, + 100); + if (ret) { dev_warn(&pdev->dev, "USB PHY PLL calibrate not done after 100mS."); - ret = -ETIMEDOUT; goto err_clk; } - if (!wait_for_reg(base + PHY_28NM_RX_REG1, - PHY_28NM_RX_SQCAL_DONE, HZ / 10)) { + ret = wait_for_reg(base + PHY_28NM_RX_REG1, + PHY_28NM_RX_SQCAL_DONE, 100); + if (ret) { dev_warn(&pdev->dev, "USB PHY RX SQ calibrate not done after 100mS."); - ret = -ETIMEDOUT; goto err_clk; } /* Make sure PHY PLL is ready */ - if (!wait_for_reg(base + PHY_28NM_PLL_REG0, - PHY_28NM_PLL_READY, HZ / 10)) { + ret = wait_for_reg(base + PHY_28NM_PLL_REG0, PHY_28NM_PLL_READY, 100); + if (ret) { dev_warn(&pdev->dev, "PLL_READY not set after 100mS."); - ret = -ETIMEDOUT; goto err_clk; } diff --git a/drivers/phy/phy-lgm-usb.c b/drivers/phy/phy-lgm-usb.c new file mode 100644 index 000000000000..309c8f0e0724 --- /dev/null +++ b/drivers/phy/phy-lgm-usb.c @@ -0,0 +1,284 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Intel LGM USB PHY driver + * + * Copyright (C) 2020 Intel Corporation. + */ + +#include <linux/bitfield.h> +#include <linux/delay.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regulator/consumer.h> +#include <linux/reset.h> +#include <linux/usb/phy.h> +#include <linux/workqueue.h> + +#define CTRL1_OFFSET 0x14 +#define SRAM_EXT_LD_DONE BIT(25) +#define SRAM_INIT_DONE BIT(26) + +#define TCPC_OFFSET 0x1014 +#define TCPC_MUX_CTL GENMASK(1, 0) +#define MUX_NC 0 +#define MUX_USB 1 +#define MUX_DP 2 +#define MUX_USBDP 3 +#define TCPC_FLIPPED BIT(2) +#define TCPC_LOW_POWER_EN BIT(3) +#define TCPC_VALID BIT(4) +#define TCPC_CONN \ + (TCPC_VALID | FIELD_PREP(TCPC_MUX_CTL, MUX_USB)) +#define TCPC_DISCONN \ + (TCPC_VALID | FIELD_PREP(TCPC_MUX_CTL, MUX_NC) | TCPC_LOW_POWER_EN) + +static const char *const PHY_RESETS[] = { "phy31", "phy", }; +static const char *const CTL_RESETS[] = { "apb", "ctrl", }; + +struct tca_apb { + struct reset_control *resets[ARRAY_SIZE(PHY_RESETS)]; + struct regulator *vbus; + struct work_struct wk; + struct usb_phy phy; + + bool regulator_enabled; + bool phy_initialized; + bool connected; +}; + +static int get_flipped(struct tca_apb *ta, bool *flipped) +{ + union extcon_property_value property; + int ret; + + ret = extcon_get_property(ta->phy.edev, EXTCON_USB_HOST, + EXTCON_PROP_USB_TYPEC_POLARITY, &property); + if (ret) { + dev_err(ta->phy.dev, "no polarity property from extcon\n"); + return ret; + } + + *flipped = property.intval; + + return 0; +} + +static int phy_init(struct usb_phy *phy) +{ + struct tca_apb *ta = container_of(phy, struct tca_apb, phy); + void __iomem *ctrl1 = phy->io_priv + CTRL1_OFFSET; + int val, ret, i; + + if (ta->phy_initialized) + return 0; + + for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++) + reset_control_deassert(ta->resets[i]); + + ret = readl_poll_timeout(ctrl1, val, val & SRAM_INIT_DONE, 10, 10 * 1000); + if (ret) { + dev_err(ta->phy.dev, "SRAM init failed, 0x%x\n", val); + return ret; + } + + writel(readl(ctrl1) | SRAM_EXT_LD_DONE, ctrl1); + + ta->phy_initialized = true; + if (!ta->phy.edev) { + writel(TCPC_CONN, ta->phy.io_priv + TCPC_OFFSET); + return phy->set_vbus(phy, true); + } + + schedule_work(&ta->wk); + + return ret; +} + +static void phy_shutdown(struct usb_phy *phy) +{ + struct tca_apb *ta = container_of(phy, struct tca_apb, phy); + int i; + + if (!ta->phy_initialized) + return; + + ta->phy_initialized = false; + flush_work(&ta->wk); + ta->phy.set_vbus(&ta->phy, false); + + ta->connected = false; + writel(TCPC_DISCONN, ta->phy.io_priv + TCPC_OFFSET); + + for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++) + reset_control_assert(ta->resets[i]); +} + +static int phy_set_vbus(struct usb_phy *phy, int on) +{ + struct tca_apb *ta = container_of(phy, struct tca_apb, phy); + int ret; + + if (!!on == ta->regulator_enabled) + return 0; + + if (on) + ret = regulator_enable(ta->vbus); + else + ret = regulator_disable(ta->vbus); + + if (!ret) + ta->regulator_enabled = on; + + dev_dbg(ta->phy.dev, "set vbus: %d\n", on); + return ret; +} + +static void tca_work(struct work_struct *work) +{ + struct tca_apb *ta = container_of(work, struct tca_apb, wk); + bool connected; + bool flipped = false; + u32 val; + int ret; + + ret = get_flipped(ta, &flipped); + if (ret) + return; + + connected = extcon_get_state(ta->phy.edev, EXTCON_USB_HOST); + if (connected == ta->connected) + return; + + ta->connected = connected; + if (connected) { + val = TCPC_CONN; + if (flipped) + val |= TCPC_FLIPPED; + dev_dbg(ta->phy.dev, "connected%s\n", flipped ? " flipped" : ""); + } else { + val = TCPC_DISCONN; + dev_dbg(ta->phy.dev, "disconnected\n"); + } + + writel(val, ta->phy.io_priv + TCPC_OFFSET); + + ret = ta->phy.set_vbus(&ta->phy, connected); + if (ret) + dev_err(ta->phy.dev, "failed to set VBUS\n"); +} + +static int id_notifier(struct notifier_block *nb, unsigned long event, void *ptr) +{ + struct tca_apb *ta = container_of(nb, struct tca_apb, phy.id_nb); + + if (ta->phy_initialized) + schedule_work(&ta->wk); + + return NOTIFY_DONE; +} + +static int vbus_notifier(struct notifier_block *nb, unsigned long evnt, void *ptr) +{ + return NOTIFY_DONE; +} + +static int phy_probe(struct platform_device *pdev) +{ + struct reset_control *resets[ARRAY_SIZE(CTL_RESETS)]; + struct device *dev = &pdev->dev; + struct usb_phy *phy; + struct tca_apb *ta; + int i; + + ta = devm_kzalloc(dev, sizeof(*ta), GFP_KERNEL); + if (!ta) + return -ENOMEM; + + platform_set_drvdata(pdev, ta); + INIT_WORK(&ta->wk, tca_work); + + phy = &ta->phy; + phy->dev = dev; + phy->label = dev_name(dev); + phy->type = USB_PHY_TYPE_USB3; + phy->init = phy_init; + phy->shutdown = phy_shutdown; + phy->set_vbus = phy_set_vbus; + phy->id_nb.notifier_call = id_notifier; + phy->vbus_nb.notifier_call = vbus_notifier; + + phy->io_priv = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(phy->io_priv)) + return PTR_ERR(phy->io_priv); + + ta->vbus = devm_regulator_get(dev, "vbus"); + if (IS_ERR(ta->vbus)) + return PTR_ERR(ta->vbus); + + for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++) { + resets[i] = devm_reset_control_get_exclusive(dev, CTL_RESETS[i]); + if (IS_ERR(resets[i])) { + dev_err(dev, "%s reset not found\n", CTL_RESETS[i]); + return PTR_ERR(resets[i]); + } + } + + for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++) { + ta->resets[i] = devm_reset_control_get_exclusive(dev, PHY_RESETS[i]); + if (IS_ERR(ta->resets[i])) { + dev_err(dev, "%s reset not found\n", PHY_RESETS[i]); + return PTR_ERR(ta->resets[i]); + } + } + + for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++) + reset_control_assert(resets[i]); + + for (i = 0; i < ARRAY_SIZE(PHY_RESETS); i++) + reset_control_assert(ta->resets[i]); + /* + * Out-of-band reset of the controller after PHY reset will cause + * controller malfunctioning, so we should use in-band controller + * reset only and leave the controller de-asserted here. + */ + for (i = 0; i < ARRAY_SIZE(CTL_RESETS); i++) + reset_control_deassert(resets[i]); + + /* Need to wait at least 20us after de-assert the controller */ + usleep_range(20, 100); + + return usb_add_phy_dev(phy); +} + +static int phy_remove(struct platform_device *pdev) +{ + struct tca_apb *ta = platform_get_drvdata(pdev); + + usb_remove_phy(&ta->phy); + + return 0; +} + +static const struct of_device_id intel_usb_phy_dt_ids[] = { + { .compatible = "intel,lgm-usb-phy" }, + { } +}; +MODULE_DEVICE_TABLE(of, intel_usb_phy_dt_ids); + +static struct platform_driver lgm_phy_driver = { + .driver = { + .name = "lgm-usb-phy", + .of_match_table = intel_usb_phy_dt_ids, + }, + .probe = phy_probe, + .remove = phy_remove, +}; + +module_platform_driver(lgm_phy_driver); + +MODULE_DESCRIPTION("Intel LGM USB PHY driver"); +MODULE_AUTHOR("Li Yin <yin1.li@intel.com>"); +MODULE_AUTHOR("Vadivel Murugan R <vadivel.muruganx.ramuthevar@linux.intel.com>"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c index febe0aef68d4..ce91ae7f8dbd 100644 --- a/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c +++ b/drivers/phy/qualcomm/phy-qcom-apq8064-sata.c @@ -4,6 +4,7 @@ */ #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/module.h> #include <linux/of.h> @@ -72,18 +73,12 @@ struct qcom_apq8064_sata_phy { }; /* Helper function to do poll and timeout */ -static int read_poll_timeout(void __iomem *addr, u32 mask) +static int poll_timeout(void __iomem *addr, u32 mask) { - unsigned long timeout = jiffies + msecs_to_jiffies(TIMEOUT_MS); + u32 val; - do { - if (readl_relaxed(addr) & mask) - return 0; - - usleep_range(DELAY_INTERVAL_US, DELAY_INTERVAL_US + 50); - } while (!time_after(jiffies, timeout)); - - return (readl_relaxed(addr) & mask) ? 0 : -ETIMEDOUT; + return readl_relaxed_poll_timeout(addr, val, (val & mask), + DELAY_INTERVAL_US, TIMEOUT_MS * 1000); } static int qcom_apq8064_sata_phy_init(struct phy *generic_phy) @@ -137,21 +132,21 @@ static int qcom_apq8064_sata_phy_init(struct phy *generic_phy) writel_relaxed(0x05, base + UNIPHY_PLL_LKDET_CFG2); /* PLL Lock wait */ - ret = read_poll_timeout(base + UNIPHY_PLL_STATUS, UNIPHY_PLL_LOCK); + ret = poll_timeout(base + UNIPHY_PLL_STATUS, UNIPHY_PLL_LOCK); if (ret) { dev_err(phy->dev, "poll timeout UNIPHY_PLL_STATUS\n"); return ret; } /* TX Calibration */ - ret = read_poll_timeout(base + SATA_PHY_TX_IMCAL_STAT, SATA_PHY_TX_CAL); + ret = poll_timeout(base + SATA_PHY_TX_IMCAL_STAT, SATA_PHY_TX_CAL); if (ret) { dev_err(phy->dev, "poll timeout SATA_PHY_TX_IMCAL_STAT\n"); return ret; } /* RX Calibration */ - ret = read_poll_timeout(base + SATA_PHY_RX_IMCAL_STAT, SATA_PHY_RX_CAL); + ret = poll_timeout(base + SATA_PHY_RX_IMCAL_STAT, SATA_PHY_RX_CAL); if (ret) { dev_err(phy->dev, "poll timeout SATA_PHY_RX_IMCAL_STAT\n"); return ret; diff --git a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c index b8ef331e1545..fc7f9df80a7b 100644 --- a/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c +++ b/drivers/phy/qualcomm/phy-qcom-ipq4019-usb.c @@ -48,7 +48,7 @@ static int ipq4019_ss_phy_power_on(struct phy *_phy) return 0; } -static struct phy_ops ipq4019_usb_ss_phy_ops = { +static const struct phy_ops ipq4019_usb_ss_phy_ops = { .power_on = ipq4019_ss_phy_power_on, .power_off = ipq4019_ss_phy_power_off, }; @@ -80,7 +80,7 @@ static int ipq4019_hs_phy_power_on(struct phy *_phy) return 0; } -static struct phy_ops ipq4019_usb_hs_phy_ops = { +static const struct phy_ops ipq4019_usb_hs_phy_ops = { .power_on = ipq4019_hs_phy_power_on, .power_off = ipq4019_hs_phy_power_off, }; diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.c b/drivers/phy/qualcomm/phy-qcom-qmp.c index 6e6f992a9524..5d33ad4d06f2 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.c +++ b/drivers/phy/qualcomm/phy-qcom-qmp.c @@ -946,6 +946,88 @@ static const struct qmp_phy_init_tbl qmp_v3_usb3_tx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), }; +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SVS_MODE_CLK_SEL, 0x01), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_EN_SEL, 0x37), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYS_CLK_CTRL, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_ENABLE1, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_SYSCLK_BUF_ENABLE, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CLK_SELECT, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CMN_CONFIG, 0x02), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN0_MODE0, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_INTEGLOOP_GAIN1_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_MAP, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP3_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BG_TIMER, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORECLK_DIV_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_VCO_TUNE_CTRL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN, 0x3f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CORE_CLK_EN, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_IVCO, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_CCTRL_MODE0, 0x36), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_PLL_RCTRL_MODE0, 0x16), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_CP_CTRL_MODE0, 0x06), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_rbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x0c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x6f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x08), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x04), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x0e), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr2[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x8c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x1f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x1c), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x00), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_serdes_tbl_hbr3[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_COM_HSCLK_SEL, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DEC_START_MODE0, 0x69), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START2_MODE0, 0x80), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_DIV_FRAC_START3_MODE0, 0x07), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP1_MODE0, 0x2f), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP2_MODE0, 0x2a), + QMP_PHY_INIT_CFG(QSERDES_V3_COM_LOCK_CMP_EN, 0x08), +}; + +static const struct qmp_phy_init_tbl qmp_v3_dp_tx_tbl[] = { + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRANSCEIVER_BIAS_EN, 0x1a), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_VMODE_CTRL1, 0x40), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN, 0x30), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_INTERFACE_SELECT, 0x3d), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_CLKBUF_ENABLE, 0x0f), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RESET_TSYNC_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TRAN_DRVR_EMP_EN, 0x03), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_INTERFACE_MODE, 0x00), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_BAND, 0x4), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_POL_INV, 0x0a), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_DRV_LVL, 0x38), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_TX_EMP_POST1_LVL, 0x20), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX, 0x06), + QMP_PHY_INIT_CFG(QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX, 0x07), +}; + static const struct qmp_phy_init_tbl qmp_v3_usb3_rx_tbl[] = { QMP_PHY_INIT_CFG(QSERDES_V3_RX_UCDR_FASTLOCK_FO_GAIN, 0x0b), QMP_PHY_INIT_CFG(QSERDES_V3_RX_RX_EQU_ADAPTOR_CNTRL2, 0x0f), @@ -1761,6 +1843,16 @@ struct qmp_phy_cfg { const struct qmp_phy_init_tbl *pcs_misc_tbl; int pcs_misc_tbl_num; + /* Init sequence for DP PHY block link rates */ + const struct qmp_phy_init_tbl *serdes_tbl_rbr; + int serdes_tbl_rbr_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr; + int serdes_tbl_hbr_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr2; + int serdes_tbl_hbr2_num; + const struct qmp_phy_init_tbl *serdes_tbl_hbr3; + int serdes_tbl_hbr3_num; + /* clock ids to be requested */ const char * const *clk_list; int num_clks; @@ -1797,10 +1889,17 @@ struct qmp_phy_cfg { bool no_pcs_sw_reset; }; +struct qmp_phy_combo_cfg { + const struct qmp_phy_cfg *usb_cfg; + const struct qmp_phy_cfg *dp_cfg; +}; + /** * struct qmp_phy - per-lane phy descriptor * * @phy: generic phy + * @cfg: phy specific configuration + * @serdes: iomapped memory space for phy's serdes (i.e. PLL) * @tx: iomapped memory space for lane's tx * @rx: iomapped memory space for lane's rx * @pcs: iomapped memory space for lane's pcs @@ -1811,9 +1910,12 @@ struct qmp_phy_cfg { * @index: lane index * @qmp: QMP phy to which this lane belongs * @lane_rst: lane's reset controller + * @mode: current PHY mode */ struct qmp_phy { struct phy *phy; + const struct qmp_phy_cfg *cfg; + void __iomem *serdes; void __iomem *tx; void __iomem *rx; void __iomem *pcs; @@ -1824,43 +1926,45 @@ struct qmp_phy { unsigned int index; struct qcom_qmp *qmp; struct reset_control *lane_rst; + enum phy_mode mode; + unsigned int dp_aux_cfg; + struct phy_configure_opts_dp dp_opts; + struct qmp_phy_dp_clks *dp_clks; +}; + +struct qmp_phy_dp_clks { + struct qmp_phy *qphy; + struct clk_hw dp_link_hw; + struct clk_hw dp_pixel_hw; }; /** * struct qcom_qmp - structure holding QMP phy block attributes * * @dev: device - * @serdes: iomapped memory space for phy's serdes * @dp_com: iomapped memory space for phy's dp_com control block * * @clks: array of clocks required by phy * @resets: array of resets required by phy * @vregs: regulator supplies bulk data * - * @cfg: phy specific configuration * @phys: array of per-lane phy descriptors * @phy_mutex: mutex lock for PHY common block initialization * @init_count: phy common block initialization count - * @phy_initialized: indicate if PHY has been initialized - * @mode: current PHY mode * @ufs_reset: optional UFS PHY reset handle */ struct qcom_qmp { struct device *dev; - void __iomem *serdes; void __iomem *dp_com; struct clk_bulk_data *clks; struct reset_control **resets; struct regulator_bulk_data *vregs; - const struct qmp_phy_cfg *cfg; struct qmp_phy **phys; struct mutex phy_mutex; int init_count; - bool phy_initialized; - enum phy_mode mode; struct reset_control *ufs_reset; }; @@ -2203,6 +2307,41 @@ static const struct qmp_phy_cfg sc7180_usb3phy_cfg = { .is_dual_lane_phy = true, }; +static const struct qmp_phy_cfg sc7180_dpphy_cfg = { + .type = PHY_TYPE_DP, + .nlanes = 1, + + .serdes_tbl = qmp_v3_dp_serdes_tbl, + .serdes_tbl_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl), + .tx_tbl = qmp_v3_dp_tx_tbl, + .tx_tbl_num = ARRAY_SIZE(qmp_v3_dp_tx_tbl), + + .serdes_tbl_rbr = qmp_v3_dp_serdes_tbl_rbr, + .serdes_tbl_rbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_rbr), + .serdes_tbl_hbr = qmp_v3_dp_serdes_tbl_hbr, + .serdes_tbl_hbr_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr), + .serdes_tbl_hbr2 = qmp_v3_dp_serdes_tbl_hbr2, + .serdes_tbl_hbr2_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr2), + .serdes_tbl_hbr3 = qmp_v3_dp_serdes_tbl_hbr3, + .serdes_tbl_hbr3_num = ARRAY_SIZE(qmp_v3_dp_serdes_tbl_hbr3), + + .clk_list = qmp_v3_phy_clk_l, + .num_clks = ARRAY_SIZE(qmp_v3_phy_clk_l), + .reset_list = sc7180_usb3phy_reset_l, + .num_resets = ARRAY_SIZE(sc7180_usb3phy_reset_l), + .vreg_list = qmp_phy_vreg_l, + .num_vregs = ARRAY_SIZE(qmp_phy_vreg_l), + .regs = qmp_v3_usb3phy_regs_layout, + + .has_phy_dp_com_ctrl = true, + .is_dual_lane_phy = true, +}; + +static const struct qmp_phy_combo_cfg sc7180_usb3dpphy_cfg = { + .usb_cfg = &sc7180_usb3phy_cfg, + .dp_cfg = &sc7180_dpphy_cfg, +}; + static const struct qmp_phy_cfg qmp_v3_usb3_uniphy_cfg = { .type = PHY_TYPE_USB3, .nlanes = 1, @@ -2479,11 +2618,300 @@ static void qcom_qmp_phy_configure(void __iomem *base, qcom_qmp_phy_configure_lane(base, regs, tbl, num, 0xff); } +static int qcom_qmp_phy_serdes_init(struct qmp_phy *qphy) +{ + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + const struct qmp_phy_init_tbl *serdes_tbl = cfg->serdes_tbl; + int serdes_tbl_num = cfg->serdes_tbl_num; + int ret; + + qcom_qmp_phy_configure(serdes, cfg->regs, serdes_tbl, serdes_tbl_num); + + if (cfg->type == PHY_TYPE_DP) { + switch (dp_opts->link_rate) { + case 1620: + qcom_qmp_phy_configure(serdes, cfg->regs, + cfg->serdes_tbl_rbr, + cfg->serdes_tbl_rbr_num); + break; + case 2700: + qcom_qmp_phy_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr, + cfg->serdes_tbl_hbr_num); + break; + case 5400: + qcom_qmp_phy_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr2, + cfg->serdes_tbl_hbr2_num); + break; + case 8100: + qcom_qmp_phy_configure(serdes, cfg->regs, + cfg->serdes_tbl_hbr3, + cfg->serdes_tbl_hbr3_num); + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + } + + + if (cfg->has_phy_com_ctrl) { + void __iomem *status; + unsigned int mask, val; + + qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); + qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], + SERDES_START | PCS_START); + + status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS]; + mask = cfg->mask_com_pcs_ready; + + ret = readl_poll_timeout(status, val, (val & mask), 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, + "phy common block init timed-out\n"); + return ret; + } + } + + return 0; +} + +static void qcom_qmp_phy_dp_aux_init(struct qmp_phy *qphy) +{ + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN, + qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL); + + /* Turn on BIAS current for PHY/PLL */ + writel(QSERDES_V3_COM_BIAS_EN | QSERDES_V3_COM_BIAS_EN_MUX | + QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL, + qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + + writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL); + + writel(DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_LANE_0_1_PWRDN | + DP_PHY_PD_CTL_LANE_2_3_PWRDN | DP_PHY_PD_CTL_PLL_PWRDN | + DP_PHY_PD_CTL_DP_CLAMP_EN, + qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL); + + writel(QSERDES_V3_COM_BIAS_EN | + QSERDES_V3_COM_BIAS_EN_MUX | QSERDES_V3_COM_CLKBUF_R_EN | + QSERDES_V3_COM_CLKBUF_L_EN | QSERDES_V3_COM_EN_SYSCLK_TX_SEL | + QSERDES_V3_COM_CLKBUF_RX_DRIVE_L, + qphy->serdes + QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN); + + writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG0); + writel(0x13, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1); + writel(0x24, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2); + writel(0x00, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG3); + writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG4); + writel(0x26, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG5); + writel(0x0a, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG6); + writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG7); + writel(0xbb, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG8); + writel(0x03, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG9); + qphy->dp_aux_cfg = 0; + + writel(PHY_AUX_STOP_ERR_MASK | PHY_AUX_DEC_ERR_MASK | + PHY_AUX_SYNC_ERR_MASK | PHY_AUX_ALIGN_ERR_MASK | + PHY_AUX_REQ_ERR_MASK, + qphy->pcs + QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK); +} + +static const u8 qmp_dp_v3_pre_emphasis_hbr_rbr[4][4] = { + { 0x00, 0x0c, 0x14, 0x19 }, + { 0x00, 0x0b, 0x12, 0xff }, + { 0x00, 0x0b, 0xff, 0xff }, + { 0x04, 0xff, 0xff, 0xff } +}; + +static const u8 qmp_dp_v3_voltage_swing_hbr_rbr[4][4] = { + { 0x08, 0x0f, 0x16, 0x1f }, + { 0x11, 0x1e, 0x1f, 0xff }, + { 0x19, 0x1f, 0xff, 0xff }, + { 0x1f, 0xff, 0xff, 0xff } +}; + +static void qcom_qmp_phy_configure_dp_tx(struct qmp_phy *qphy) +{ + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + unsigned int v_level = 0, p_level = 0; + u32 bias_en, drvr_en; + u8 voltage_swing_cfg, pre_emphasis_cfg; + int i; + + for (i = 0; i < dp_opts->lanes; i++) { + v_level = max(v_level, dp_opts->voltage[i]); + p_level = max(p_level, dp_opts->pre[i]); + } + + if (dp_opts->lanes == 1) { + bias_en = 0x3e; + drvr_en = 0x13; + } else { + bias_en = 0x3f; + drvr_en = 0x10; + } + + voltage_swing_cfg = qmp_dp_v3_voltage_swing_hbr_rbr[v_level][p_level]; + pre_emphasis_cfg = qmp_dp_v3_pre_emphasis_hbr_rbr[v_level][p_level]; + + /* TODO: Move check to config check */ + if (voltage_swing_cfg == 0xFF && pre_emphasis_cfg == 0xFF) + return; + + /* Enable MUX to use Cursor values from these registers */ + voltage_swing_cfg |= DP_PHY_TXn_TX_DRV_LVL_MUX_EN; + pre_emphasis_cfg |= DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN; + + writel(voltage_swing_cfg, qphy->tx + QSERDES_V3_TX_TX_DRV_LVL); + writel(pre_emphasis_cfg, qphy->tx + QSERDES_V3_TX_TX_EMP_POST1_LVL); + writel(voltage_swing_cfg, qphy->tx2 + QSERDES_V3_TX_TX_DRV_LVL); + writel(pre_emphasis_cfg, qphy->tx2 + QSERDES_V3_TX_TX_EMP_POST1_LVL); + + writel(drvr_en, qphy->tx + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qphy->tx + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); + writel(drvr_en, qphy->tx2 + QSERDES_V3_TX_HIGHZ_DRVR_EN); + writel(bias_en, qphy->tx2 + QSERDES_V3_TX_TRANSCEIVER_BIAS_EN); +} + +static int qcom_qmp_dp_phy_configure(struct phy *phy, union phy_configure_opts *opts) +{ + const struct phy_configure_opts_dp *dp_opts = &opts->dp; + struct qmp_phy *qphy = phy_get_drvdata(phy); + + memcpy(&qphy->dp_opts, dp_opts, sizeof(*dp_opts)); + if (qphy->dp_opts.set_voltages) { + qcom_qmp_phy_configure_dp_tx(qphy); + qphy->dp_opts.set_voltages = 0; + } + + return 0; +} + +static int qcom_qmp_phy_configure_dp_phy(struct qmp_phy *qphy) +{ + const struct qmp_phy_dp_clks *dp_clks = qphy->dp_clks; + const struct phy_configure_opts_dp *dp_opts = &qphy->dp_opts; + u32 val, phy_vco_div, status; + unsigned long pixel_freq; + + val = DP_PHY_PD_CTL_PWRDN | DP_PHY_PD_CTL_AUX_PWRDN | + DP_PHY_PD_CTL_PLL_PWRDN | DP_PHY_PD_CTL_DP_CLAMP_EN; + + /* + * TODO: Assume orientation is CC1 for now and two lanes, need to + * use type-c connector to understand orientation and lanes. + * + * Otherwise val changes to be like below if this code understood + * the orientation of the type-c cable. + * + * if (lane_cnt == 4 || orientation == ORIENTATION_CC2) + * val |= DP_PHY_PD_CTL_LANE_0_1_PWRDN; + * if (lane_cnt == 4 || orientation == ORIENTATION_CC1) + * val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; + * if (orientation == ORIENTATION_CC2) + * writel(0x4c, qphy->pcs + QSERDES_V3_DP_PHY_MODE); + */ + val |= DP_PHY_PD_CTL_LANE_2_3_PWRDN; + writel(val, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL); + + writel(0x5c, qphy->pcs + QSERDES_V3_DP_PHY_MODE); + writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL); + writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL); + + switch (dp_opts->link_rate) { + case 1620: + phy_vco_div = 0x1; + pixel_freq = 1620000000UL / 2; + break; + case 2700: + phy_vco_div = 0x1; + pixel_freq = 2700000000UL / 2; + break; + case 5400: + phy_vco_div = 0x2; + pixel_freq = 5400000000UL / 4; + break; + case 8100: + phy_vco_div = 0x0; + pixel_freq = 8100000000UL / 6; + break; + default: + /* Other link rates aren't supported */ + return -EINVAL; + } + writel(phy_vco_div, qphy->pcs + QSERDES_V3_DP_PHY_VCO_DIV); + + clk_set_rate(dp_clks->dp_link_hw.clk, dp_opts->link_rate * 100000); + clk_set_rate(dp_clks->dp_pixel_hw.clk, pixel_freq); + + writel(0x04, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG2); + writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + writel(0x05, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + writel(0x01, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + writel(0x09, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + + writel(0x20, qphy->serdes + QSERDES_V3_COM_RESETSM_CNTRL); + + if (readl_poll_timeout(qphy->serdes + QSERDES_V3_COM_C_READY_STATUS, + status, + ((status & BIT(0)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + + if (readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000)) + return -ETIMEDOUT; + + writel(0x18, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + udelay(2000); + writel(0x19, qphy->pcs + QSERDES_V3_DP_PHY_CFG); + + return readl_poll_timeout(qphy->pcs + QSERDES_V3_DP_PHY_STATUS, + status, + ((status & BIT(1)) > 0), + 500, + 10000); +} + +/* + * We need to calibrate the aux setting here as many times + * as the caller tries + */ +static int qcom_qmp_dp_phy_calibrate(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const u8 cfg1_settings[] = { 0x13, 0x23, 0x1d }; + u8 val; + + qphy->dp_aux_cfg++; + qphy->dp_aux_cfg %= ARRAY_SIZE(cfg1_settings); + val = cfg1_settings[qphy->dp_aux_cfg]; + + writel(val, qphy->pcs + QSERDES_V3_DP_PHY_AUX_CFG1); + + return 0; +} + static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) { struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qmp->cfg; - void __iomem *serdes = qmp->serdes; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; void __iomem *pcs = qphy->pcs; void __iomem *dp_com = qmp->dp_com; int ret, i; @@ -2514,7 +2942,7 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) ret = reset_control_deassert(qmp->resets[i]); if (ret) { dev_err(qmp->dev, "%s reset deassert failed\n", - qmp->cfg->reset_list[i]); + qphy->cfg->reset_list[i]); goto err_rst; } } @@ -2533,6 +2961,9 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + /* Default type-c orientation, i.e CC1 */ + qphy_setbits(dp_com, QPHY_V3_DP_COM_TYPEC_CTRL, 0x02); + qphy_setbits(dp_com, QPHY_V3_DP_COM_PHY_MODE_CTRL, USB3_MODE | DP_MODE); @@ -2540,6 +2971,9 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) qphy_clrbits(dp_com, QPHY_V3_DP_COM_RESET_OVRD_CTRL, SW_DPPHY_RESET_MUX | SW_DPPHY_RESET | SW_USB3PHY_RESET_MUX | SW_USB3PHY_RESET); + + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SWI_CTRL, 0x03); + qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); } if (cfg->has_phy_com_ctrl) { @@ -2555,36 +2989,10 @@ static int qcom_qmp_phy_com_init(struct qmp_phy *qphy) cfg->pwrdn_ctrl); } - /* Serdes configuration */ - qcom_qmp_phy_configure(serdes, cfg->regs, cfg->serdes_tbl, - cfg->serdes_tbl_num); - - if (cfg->has_phy_com_ctrl) { - void __iomem *status; - unsigned int mask, val; - - qphy_clrbits(serdes, cfg->regs[QPHY_COM_SW_RESET], SW_RESET); - qphy_setbits(serdes, cfg->regs[QPHY_COM_START_CONTROL], - SERDES_START | PCS_START); - - status = serdes + cfg->regs[QPHY_COM_PCS_READY_STATUS]; - mask = cfg->mask_com_pcs_ready; - - ret = readl_poll_timeout(status, val, (val & mask), 10, - PHY_INIT_COMPLETE_TIMEOUT); - if (ret) { - dev_err(qmp->dev, - "phy common block init timed-out\n"); - goto err_com_init; - } - } - mutex_unlock(&qmp->phy_mutex); return 0; -err_com_init: - clk_bulk_disable_unprepare(cfg->num_clks, qmp->clks); err_rst: while (++i < cfg->num_resets) reset_control_assert(qmp->resets[i]); @@ -2596,10 +3004,11 @@ err_reg_enable: return ret; } -static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp) +static int qcom_qmp_phy_com_exit(struct qmp_phy *qphy) { - const struct qmp_phy_cfg *cfg = qmp->cfg; - void __iomem *serdes = qmp->serdes; + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *serdes = qphy->serdes; int i = cfg->num_resets; mutex_lock(&qmp->phy_mutex); @@ -2630,20 +3039,12 @@ static int qcom_qmp_phy_com_exit(struct qcom_qmp *qmp) return 0; } -static int qcom_qmp_phy_enable(struct phy *phy) +static int qcom_qmp_phy_init(struct phy *phy) { struct qmp_phy *qphy = phy_get_drvdata(phy); struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qmp->cfg; - void __iomem *tx = qphy->tx; - void __iomem *rx = qphy->rx; - void __iomem *pcs = qphy->pcs; - void __iomem *pcs_misc = qphy->pcs_misc; - void __iomem *dp_com = qmp->dp_com; - void __iomem *status; - unsigned int mask, val, ready; + const struct qmp_phy_cfg *cfg = qphy->cfg; int ret; - dev_vdbg(qmp->dev, "Initializing QMP phy\n"); if (cfg->no_pcs_sw_reset) { @@ -2670,13 +3071,34 @@ static int qcom_qmp_phy_enable(struct phy *phy) ret = reset_control_assert(qmp->ufs_reset); if (ret) - goto err_lane_rst; + return ret; } ret = qcom_qmp_phy_com_init(qphy); if (ret) return ret; + if (cfg->type == PHY_TYPE_DP) + qcom_qmp_phy_dp_aux_init(qphy); + + return 0; +} + +static int qcom_qmp_phy_power_on(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + struct qcom_qmp *qmp = qphy->qmp; + const struct qmp_phy_cfg *cfg = qphy->cfg; + void __iomem *tx = qphy->tx; + void __iomem *rx = qphy->rx; + void __iomem *pcs = qphy->pcs; + void __iomem *pcs_misc = qphy->pcs_misc; + void __iomem *status; + unsigned int mask, val, ready; + int ret; + + qcom_qmp_phy_serdes_init(qphy); + if (cfg->has_lane_rst) { ret = reset_control_deassert(qphy->lane_rst); if (ret) { @@ -2700,13 +3122,23 @@ static int qcom_qmp_phy_enable(struct phy *phy) qcom_qmp_phy_configure_lane(qphy->tx2, cfg->regs, cfg->tx_tbl, cfg->tx_tbl_num, 2); + /* Configure special DP tx tunings */ + if (cfg->type == PHY_TYPE_DP) + qcom_qmp_phy_configure_dp_tx(qphy); + qcom_qmp_phy_configure_lane(rx, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 1); + if (cfg->is_dual_lane_phy) qcom_qmp_phy_configure_lane(qphy->rx2, cfg->regs, cfg->rx_tbl, cfg->rx_tbl_num, 2); - qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + /* Configure link rate, swing, etc. */ + if (cfg->type == PHY_TYPE_DP) + qcom_qmp_phy_configure_dp_phy(qphy); + else + qcom_qmp_phy_configure(pcs, cfg->regs, cfg->pcs_tbl, cfg->pcs_tbl_num); + ret = reset_control_deassert(qmp->ufs_reset); if (ret) goto err_lane_rst; @@ -2724,102 +3156,129 @@ static int qcom_qmp_phy_enable(struct phy *phy) if (cfg->has_pwrdn_delay) usleep_range(cfg->pwrdn_delay_min, cfg->pwrdn_delay_max); - /* Pull PHY out of reset state */ - if (!cfg->no_pcs_sw_reset) - qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - - if (cfg->has_phy_dp_com_ctrl) - qphy_clrbits(dp_com, QPHY_V3_DP_COM_SW_RESET, SW_RESET); - - /* start SerDes and Phy-Coding-Sublayer */ - qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); - - if (cfg->type == PHY_TYPE_UFS) { - status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; - mask = PCS_READY; - ready = PCS_READY; - } else { - status = pcs + cfg->regs[QPHY_PCS_STATUS]; - mask = PHYSTATUS; - ready = 0; - } + if (cfg->type != PHY_TYPE_DP) { + /* Pull PHY out of reset state */ + if (!cfg->no_pcs_sw_reset) + qphy_clrbits(pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + /* start SerDes and Phy-Coding-Sublayer */ + qphy_setbits(pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + + if (cfg->type == PHY_TYPE_UFS) { + status = pcs + cfg->regs[QPHY_PCS_READY_STATUS]; + mask = PCS_READY; + ready = PCS_READY; + } else { + status = pcs + cfg->regs[QPHY_PCS_STATUS]; + mask = PHYSTATUS; + ready = 0; + } - ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, - PHY_INIT_COMPLETE_TIMEOUT); - if (ret) { - dev_err(qmp->dev, "phy initialization timed-out\n"); - goto err_pcs_ready; + ret = readl_poll_timeout(status, val, (val & mask) == ready, 10, + PHY_INIT_COMPLETE_TIMEOUT); + if (ret) { + dev_err(qmp->dev, "phy initialization timed-out\n"); + goto err_pcs_ready; + } } - qmp->phy_initialized = true; return 0; err_pcs_ready: - reset_control_assert(qmp->ufs_reset); clk_disable_unprepare(qphy->pipe_clk); err_clk_enable: if (cfg->has_lane_rst) reset_control_assert(qphy->lane_rst); err_lane_rst: - qcom_qmp_phy_com_exit(qmp); - return ret; } -static int qcom_qmp_phy_disable(struct phy *phy) +static int qcom_qmp_phy_power_off(struct phy *phy) { struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg *cfg = qphy->cfg; clk_disable_unprepare(qphy->pipe_clk); - /* PHY reset */ - if (!cfg->no_pcs_sw_reset) - qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); + if (cfg->type == PHY_TYPE_DP) { + /* Assert DP PHY power down */ + writel(DP_PHY_PD_CTL_PSR_PWRDN, qphy->pcs + QSERDES_V3_DP_PHY_PD_CTL); + } else { + /* PHY reset */ + if (!cfg->no_pcs_sw_reset) + qphy_setbits(qphy->pcs, cfg->regs[QPHY_SW_RESET], SW_RESET); - /* stop SerDes and Phy-Coding-Sublayer */ - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); + /* stop SerDes and Phy-Coding-Sublayer */ + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_START_CTRL], cfg->start_ctrl); - /* Put PHY into POWER DOWN state: active low */ - if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { - qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], - cfg->pwrdn_ctrl); - } else { - qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, - cfg->pwrdn_ctrl); + /* Put PHY into POWER DOWN state: active low */ + if (cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL]) { + qphy_clrbits(qphy->pcs, cfg->regs[QPHY_PCS_POWER_DOWN_CONTROL], + cfg->pwrdn_ctrl); + } else { + qphy_clrbits(qphy->pcs, QPHY_POWER_DOWN_CONTROL, + cfg->pwrdn_ctrl); + } } + return 0; +} + +static int qcom_qmp_phy_exit(struct phy *phy) +{ + struct qmp_phy *qphy = phy_get_drvdata(phy); + const struct qmp_phy_cfg *cfg = qphy->cfg; + if (cfg->has_lane_rst) reset_control_assert(qphy->lane_rst); - qcom_qmp_phy_com_exit(qmp); - - qmp->phy_initialized = false; + qcom_qmp_phy_com_exit(qphy); return 0; } +static int qcom_qmp_phy_enable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_init(phy); + if (ret) + return ret; + + ret = qcom_qmp_phy_power_on(phy); + if (ret) + qcom_qmp_phy_exit(phy); + + return ret; +} + +static int qcom_qmp_phy_disable(struct phy *phy) +{ + int ret; + + ret = qcom_qmp_phy_power_off(phy); + if (ret) + return ret; + return qcom_qmp_phy_exit(phy); +} + static int qcom_qmp_phy_set_mode(struct phy *phy, enum phy_mode mode, int submode) { struct qmp_phy *qphy = phy_get_drvdata(phy); - struct qcom_qmp *qmp = qphy->qmp; - qmp->mode = mode; + qphy->mode = mode; return 0; } static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg *cfg = qphy->cfg; void __iomem *pcs = qphy->pcs; void __iomem *pcs_misc = qphy->pcs_misc; u32 intr_mask; - if (qmp->mode == PHY_MODE_USB_HOST_SS || - qmp->mode == PHY_MODE_USB_DEVICE_SS) + if (qphy->mode == PHY_MODE_USB_HOST_SS || + qphy->mode == PHY_MODE_USB_DEVICE_SS) intr_mask = ARCVR_DTCT_EN | ALFPS_DTCT_EN; else intr_mask = ARCVR_DTCT_EN | ARCVR_DTCT_EVENT_SEL; @@ -2842,8 +3301,7 @@ static void qcom_qmp_phy_enable_autonomous_mode(struct qmp_phy *qphy) static void qcom_qmp_phy_disable_autonomous_mode(struct qmp_phy *qphy) { - struct qcom_qmp *qmp = qphy->qmp; - const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg *cfg = qphy->cfg; void __iomem *pcs = qphy->pcs; void __iomem *pcs_misc = qphy->pcs_misc; @@ -2863,15 +3321,15 @@ static int __maybe_unused qcom_qmp_phy_runtime_suspend(struct device *dev) { struct qcom_qmp *qmp = dev_get_drvdata(dev); struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg *cfg = qphy->cfg; - dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qmp->mode); + dev_vdbg(dev, "Suspending QMP phy, mode:%d\n", qphy->mode); - /* Supported only for USB3 PHY */ + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ if (cfg->type != PHY_TYPE_USB3) return 0; - if (!qmp->phy_initialized) { + if (!qmp->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); return 0; } @@ -2888,16 +3346,16 @@ static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev) { struct qcom_qmp *qmp = dev_get_drvdata(dev); struct qmp_phy *qphy = qmp->phys[0]; - const struct qmp_phy_cfg *cfg = qmp->cfg; + const struct qmp_phy_cfg *cfg = qphy->cfg; int ret = 0; - dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qmp->mode); + dev_vdbg(dev, "Resuming QMP phy, mode:%d\n", qphy->mode); - /* Supported only for USB3 PHY */ + /* Supported only for USB3 PHY and luckily USB3 is the first phy */ if (cfg->type != PHY_TYPE_USB3) return 0; - if (!qmp->phy_initialized) { + if (!qmp->init_count) { dev_vdbg(dev, "PHY not initialized, bailing out\n"); return 0; } @@ -2920,10 +3378,10 @@ static int __maybe_unused qcom_qmp_phy_runtime_resume(struct device *dev) return 0; } -static int qcom_qmp_phy_vreg_init(struct device *dev) +static int qcom_qmp_phy_vreg_init(struct device *dev, const struct qmp_phy_cfg *cfg) { struct qcom_qmp *qmp = dev_get_drvdata(dev); - int num = qmp->cfg->num_vregs; + int num = cfg->num_vregs; int i; qmp->vregs = devm_kcalloc(dev, num, sizeof(*qmp->vregs), GFP_KERNEL); @@ -2931,24 +3389,24 @@ static int qcom_qmp_phy_vreg_init(struct device *dev) return -ENOMEM; for (i = 0; i < num; i++) - qmp->vregs[i].supply = qmp->cfg->vreg_list[i]; + qmp->vregs[i].supply = cfg->vreg_list[i]; return devm_regulator_bulk_get(dev, num, qmp->vregs); } -static int qcom_qmp_phy_reset_init(struct device *dev) +static int qcom_qmp_phy_reset_init(struct device *dev, const struct qmp_phy_cfg *cfg) { struct qcom_qmp *qmp = dev_get_drvdata(dev); int i; - qmp->resets = devm_kcalloc(dev, qmp->cfg->num_resets, + qmp->resets = devm_kcalloc(dev, cfg->num_resets, sizeof(*qmp->resets), GFP_KERNEL); if (!qmp->resets) return -ENOMEM; - for (i = 0; i < qmp->cfg->num_resets; i++) { + for (i = 0; i < cfg->num_resets; i++) { struct reset_control *rst; - const char *name = qmp->cfg->reset_list[i]; + const char *name = cfg->reset_list[i]; rst = devm_reset_control_get(dev, name); if (IS_ERR(rst)) { @@ -2961,10 +3419,10 @@ static int qcom_qmp_phy_reset_init(struct device *dev) return 0; } -static int qcom_qmp_phy_clk_init(struct device *dev) +static int qcom_qmp_phy_clk_init(struct device *dev, const struct qmp_phy_cfg *cfg) { struct qcom_qmp *qmp = dev_get_drvdata(dev); - int num = qmp->cfg->num_clks; + int num = cfg->num_clks; int i; qmp->clks = devm_kcalloc(dev, num, sizeof(*qmp->clks), GFP_KERNEL); @@ -2972,12 +3430,12 @@ static int qcom_qmp_phy_clk_init(struct device *dev) return -ENOMEM; for (i = 0; i < num; i++) - qmp->clks[i].id = qmp->cfg->clk_list[i]; + qmp->clks[i].id = cfg->clk_list[i]; return devm_clk_bulk_get(dev, num, qmp->clks); } -static void phy_pipe_clk_release_provider(void *res) +static void phy_clk_release_provider(void *res) { of_clk_del_provider(res); } @@ -3006,12 +3464,6 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) struct clk_init_data init = { }; int ret; - if ((qmp->cfg->type != PHY_TYPE_USB3) && - (qmp->cfg->type != PHY_TYPE_PCIE)) { - /* not all phys register pipe clocks, so return success */ - return 0; - } - ret = of_property_read_string(np, "clock-output-names", &init.name); if (ret) { dev_err(qmp->dev, "%pOFn: No clock-output-names\n", np); @@ -3040,9 +3492,202 @@ static int phy_pipe_clk_register(struct qcom_qmp *qmp, struct device_node *np) * Roll a devm action because the clock provider is the child node, but * the child node is not actually a device. */ - ret = devm_add_action(qmp->dev, phy_pipe_clk_release_provider, np); + ret = devm_add_action(qmp->dev, phy_clk_release_provider, np); + if (ret) + phy_clk_release_provider(np); + + return ret; +} + +/* + * Display Port PLL driver block diagram for branch clocks + * + * +------------------------------+ + * | DP_VCO_CLK | + * | | + * | +-------------------+ | + * | | (DP PLL/VCO) | | + * | +---------+---------+ | + * | v | + * | +----------+-----------+ | + * | | hsclk_divsel_clk_src | | + * | +----------+-----------+ | + * +------------------------------+ + * | + * +---------<---------v------------>----------+ + * | | + * +--------v----------------+ | + * | dp_phy_pll_link_clk | | + * | link_clk | | + * +--------+----------------+ | + * | | + * | | + * v v + * Input to DISPCC block | + * for link clk, crypto clk | + * and interface clock | + * | + * | + * +--------<------------+-----------------+---<---+ + * | | | + * +----v---------+ +--------v-----+ +--------v------+ + * | vco_divided | | vco_divided | | vco_divided | + * | _clk_src | | _clk_src | | _clk_src | + * | | | | | | + * |divsel_six | | divsel_two | | divsel_four | + * +-------+------+ +-----+--------+ +--------+------+ + * | | | + * v---->----------v-------------<------v + * | + * +----------+-----------------+ + * | dp_phy_pll_vco_div_clk | + * +---------+------------------+ + * | + * v + * Input to DISPCC block + * for DP pixel clock + * + */ +static int qcom_qmp_dp_pixel_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 1620000000UL / 2: + case 2700000000UL / 2: + /* 5.4 and 8.1 GHz are same link rate as 2.7GHz, i.e. div 4 and div 6 */ + return 0; + default: + return -EINVAL; + } +} + +static unsigned long +qcom_qmp_dp_pixel_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qmp_phy_dp_clks *dp_clks; + const struct qmp_phy *qphy; + const struct phy_configure_opts_dp *dp_opts; + + dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_pixel_hw); + qphy = dp_clks->qphy; + dp_opts = &qphy->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + return 1620000000UL / 2; + case 2700: + return 2700000000UL / 2; + case 5400: + return 5400000000UL / 4; + case 8100: + return 8100000000UL / 6; + default: + return 0; + } +} + +static const struct clk_ops qcom_qmp_dp_pixel_clk_ops = { + .determine_rate = qcom_qmp_dp_pixel_clk_determine_rate, + .recalc_rate = qcom_qmp_dp_pixel_clk_recalc_rate, +}; + +static int qcom_qmp_dp_link_clk_determine_rate(struct clk_hw *hw, + struct clk_rate_request *req) +{ + switch (req->rate) { + case 162000000: + case 270000000: + case 540000000: + case 810000000: + return 0; + default: + return -EINVAL; + } +} + +static unsigned long +qcom_qmp_dp_link_clk_recalc_rate(struct clk_hw *hw, unsigned long parent_rate) +{ + const struct qmp_phy_dp_clks *dp_clks; + const struct qmp_phy *qphy; + const struct phy_configure_opts_dp *dp_opts; + + dp_clks = container_of(hw, struct qmp_phy_dp_clks, dp_link_hw); + qphy = dp_clks->qphy; + dp_opts = &qphy->dp_opts; + + switch (dp_opts->link_rate) { + case 1620: + case 2700: + case 5400: + case 8100: + return dp_opts->link_rate * 100000; + default: + return 0; + } +} + +static const struct clk_ops qcom_qmp_dp_link_clk_ops = { + .determine_rate = qcom_qmp_dp_link_clk_determine_rate, + .recalc_rate = qcom_qmp_dp_link_clk_recalc_rate, +}; + +static struct clk_hw * +qcom_qmp_dp_clks_hw_get(struct of_phandle_args *clkspec, void *data) +{ + struct qmp_phy_dp_clks *dp_clks = data; + unsigned int idx = clkspec->args[0]; + + if (idx >= 2) { + pr_err("%s: invalid index %u\n", __func__, idx); + return ERR_PTR(-EINVAL); + } + + if (idx == 0) + return &dp_clks->dp_link_hw; + + return &dp_clks->dp_pixel_hw; +} + +static int phy_dp_clks_register(struct qcom_qmp *qmp, struct qmp_phy *qphy, + struct device_node *np) +{ + struct clk_init_data init = { }; + struct qmp_phy_dp_clks *dp_clks; + int ret; + + dp_clks = devm_kzalloc(qmp->dev, sizeof(*dp_clks), GFP_KERNEL); + if (!dp_clks) + return -ENOMEM; + + dp_clks->qphy = qphy; + qphy->dp_clks = dp_clks; + + init.ops = &qcom_qmp_dp_link_clk_ops; + init.name = "qmp_dp_phy_pll_link_clk"; + dp_clks->dp_link_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_link_hw); + if (ret) + return ret; + + init.ops = &qcom_qmp_dp_pixel_clk_ops; + init.name = "qmp_dp_phy_pll_vco_div_clk"; + dp_clks->dp_pixel_hw.init = &init; + ret = devm_clk_hw_register(qmp->dev, &dp_clks->dp_pixel_hw); + if (ret) + return ret; + + ret = of_clk_add_hw_provider(np, qcom_qmp_dp_clks_hw_get, dp_clks); + if (ret) + return ret; + + /* + * Roll a devm action because the clock provider is the child node, but + * the child node is not actually a device. + */ + ret = devm_add_action(qmp->dev, phy_clk_release_provider, np); if (ret) - phy_pipe_clk_release_provider(np); + phy_clk_release_provider(np); return ret; } @@ -3054,6 +3699,17 @@ static const struct phy_ops qcom_qmp_phy_gen_ops = { .owner = THIS_MODULE, }; +static const struct phy_ops qcom_qmp_phy_dp_ops = { + .init = qcom_qmp_phy_init, + .configure = qcom_qmp_dp_phy_configure, + .power_on = qcom_qmp_phy_power_on, + .calibrate = qcom_qmp_dp_phy_calibrate, + .power_off = qcom_qmp_phy_power_off, + .exit = qcom_qmp_phy_exit, + .set_mode = qcom_qmp_phy_set_mode, + .owner = THIS_MODULE, +}; + static const struct phy_ops qcom_qmp_pcie_ufs_ops = { .power_on = qcom_qmp_phy_enable, .power_off = qcom_qmp_phy_disable, @@ -3062,12 +3718,13 @@ static const struct phy_ops qcom_qmp_pcie_ufs_ops = { }; static -int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) +int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id, + void __iomem *serdes, const struct qmp_phy_cfg *cfg) { struct qcom_qmp *qmp = dev_get_drvdata(dev); struct phy *generic_phy; struct qmp_phy *qphy; - const struct phy_ops *ops = &qcom_qmp_phy_gen_ops; + const struct phy_ops *ops; char prop_name[MAX_PROP_NAME]; int ret; @@ -3075,6 +3732,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) if (!qphy) return -ENOMEM; + qphy->cfg = cfg; + qphy->serdes = serdes; /* * Get memory resources for each phy lane: * Resources are indexed as: tx -> 0; rx -> 1; pcs -> 2. @@ -3099,7 +3758,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) * back to old legacy behavior of assuming they can be reached at an * offset from the first lane. */ - if (qmp->cfg->is_dual_lane_phy) { + if (cfg->is_dual_lane_phy) { qphy->tx2 = of_iomap(np, 3); qphy->rx2 = of_iomap(np, 4); if (!qphy->tx2 || !qphy->rx2) { @@ -3132,8 +3791,8 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) snprintf(prop_name, sizeof(prop_name), "pipe%d", id); qphy->pipe_clk = of_clk_get_by_name(np, prop_name); if (IS_ERR(qphy->pipe_clk)) { - if (qmp->cfg->type == PHY_TYPE_PCIE || - qmp->cfg->type == PHY_TYPE_USB3) { + if (cfg->type == PHY_TYPE_PCIE || + cfg->type == PHY_TYPE_USB3) { ret = PTR_ERR(qphy->pipe_clk); if (ret != -EPROBE_DEFER) dev_err(dev, @@ -3145,7 +3804,7 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) } /* Get lane reset, if any */ - if (qmp->cfg->has_lane_rst) { + if (cfg->has_lane_rst) { snprintf(prop_name, sizeof(prop_name), "lane%d", id); qphy->lane_rst = of_reset_control_get(np, prop_name); if (IS_ERR(qphy->lane_rst)) { @@ -3154,8 +3813,12 @@ int qcom_qmp_phy_create(struct device *dev, struct device_node *np, int id) } } - if (qmp->cfg->type == PHY_TYPE_UFS || qmp->cfg->type == PHY_TYPE_PCIE) + if (cfg->type == PHY_TYPE_UFS || cfg->type == PHY_TYPE_PCIE) ops = &qcom_qmp_pcie_ufs_ops; + else if (cfg->type == PHY_TYPE_DP) + ops = &qcom_qmp_phy_dp_ops; + else + ops = &qcom_qmp_phy_gen_ops; generic_phy = devm_phy_create(dev, np, ops); if (IS_ERR(generic_phy)) { @@ -3199,6 +3862,9 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { .compatible = "qcom,sc7180-qmp-usb3-phy", .data = &sc7180_usb3phy_cfg, }, { + .compatible = "qcom,sc7180-qmp-usb3-dp-phy", + /* It's a combo phy */ + }, { .compatible = "qcom,sdm845-qhp-pcie-phy", .data = &sdm845_qhp_pciephy_cfg, }, { @@ -3239,6 +3905,14 @@ static const struct of_device_id qcom_qmp_phy_of_match_table[] = { }; MODULE_DEVICE_TABLE(of, qcom_qmp_phy_of_match_table); +static const struct of_device_id qcom_qmp_combo_phy_of_match_table[] = { + { + .compatible = "qcom,sc7180-qmp-usb3-dp-phy", + .data = &sc7180_usb3dpphy_cfg, + }, + { } +}; + static const struct dev_pm_ops qcom_qmp_phy_pm_ops = { SET_RUNTIME_PM_OPS(qcom_qmp_phy_runtime_suspend, qcom_qmp_phy_runtime_resume, NULL) @@ -3248,11 +3922,16 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev) { struct qcom_qmp *qmp; struct device *dev = &pdev->dev; - struct resource *res; struct device_node *child; struct phy_provider *phy_provider; - void __iomem *base; - int num, id; + void __iomem *serdes; + void __iomem *usb_serdes; + void __iomem *dp_serdes; + const struct qmp_phy_combo_cfg *combo_cfg = NULL; + const struct qmp_phy_cfg *cfg = NULL; + const struct qmp_phy_cfg *usb_cfg = NULL; + const struct qmp_phy_cfg *dp_cfg = NULL; + int num, id, expected_phys; int ret; qmp = devm_kzalloc(dev, sizeof(*qmp), GFP_KERNEL); @@ -3263,40 +3942,57 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev) dev_set_drvdata(dev, qmp); /* Get the specific init parameters of QMP phy */ - qmp->cfg = of_device_get_match_data(dev); - if (!qmp->cfg) - return -EINVAL; + cfg = of_device_get_match_data(dev); + if (!cfg) { + const struct of_device_id *match; - res = platform_get_resource(pdev, IORESOURCE_MEM, 0); - base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); + match = of_match_device(qcom_qmp_combo_phy_of_match_table, dev); + if (!match) + return -EINVAL; + + combo_cfg = match->data; + if (!combo_cfg) + return -EINVAL; + + usb_cfg = combo_cfg->usb_cfg; + cfg = usb_cfg; /* Setup clks and regulators */ + } /* per PHY serdes; usually located at base address */ - qmp->serdes = base; + usb_serdes = serdes = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(serdes)) + return PTR_ERR(serdes); /* per PHY dp_com; if PHY has dp_com control block */ - if (qmp->cfg->has_phy_dp_com_ctrl) { - res = platform_get_resource_byname(pdev, IORESOURCE_MEM, - "dp_com"); - base = devm_ioremap_resource(dev, res); - if (IS_ERR(base)) - return PTR_ERR(base); - - qmp->dp_com = base; + if (combo_cfg || cfg->has_phy_dp_com_ctrl) { + qmp->dp_com = devm_platform_ioremap_resource(pdev, 1); + if (IS_ERR(qmp->dp_com)) + return PTR_ERR(qmp->dp_com); + } + + if (combo_cfg) { + /* Only two serdes for combo PHY */ + dp_serdes = devm_platform_ioremap_resource(pdev, 2); + if (IS_ERR(dp_serdes)) + return PTR_ERR(dp_serdes); + + dp_cfg = combo_cfg->dp_cfg; + expected_phys = 2; + } else { + expected_phys = cfg->nlanes; } mutex_init(&qmp->phy_mutex); - ret = qcom_qmp_phy_clk_init(dev); + ret = qcom_qmp_phy_clk_init(dev, cfg); if (ret) return ret; - ret = qcom_qmp_phy_reset_init(dev); + ret = qcom_qmp_phy_reset_init(dev, cfg); if (ret) return ret; - ret = qcom_qmp_phy_vreg_init(dev); + ret = qcom_qmp_phy_vreg_init(dev, cfg); if (ret) { if (ret != -EPROBE_DEFER) dev_err(dev, "failed to get regulator supplies: %d\n", @@ -3306,14 +4002,13 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev) num = of_get_available_child_count(dev->of_node); /* do we have a rogue child node ? */ - if (num > qmp->cfg->nlanes) + if (num > expected_phys) return -EINVAL; qmp->phys = devm_kcalloc(dev, num, sizeof(*qmp->phys), GFP_KERNEL); if (!qmp->phys) return -ENOMEM; - id = 0; pm_runtime_set_active(dev); pm_runtime_enable(dev); /* @@ -3322,9 +4017,18 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev) */ pm_runtime_forbid(dev); + id = 0; for_each_available_child_of_node(dev->of_node, child) { + if (of_node_name_eq(child, "dp-phy")) { + cfg = dp_cfg; + serdes = dp_serdes; + } else if (of_node_name_eq(child, "usb3-phy")) { + cfg = usb_cfg; + serdes = usb_serdes; + } + /* Create per-lane phy */ - ret = qcom_qmp_phy_create(dev, child, id); + ret = qcom_qmp_phy_create(dev, child, id, serdes, cfg); if (ret) { dev_err(dev, "failed to create lane%d phy, %d\n", id, ret); @@ -3335,11 +4039,20 @@ static int qcom_qmp_phy_probe(struct platform_device *pdev) * Register the pipe clock provided by phy. * See function description to see details of this pipe clock. */ - ret = phy_pipe_clk_register(qmp, child); - if (ret) { - dev_err(qmp->dev, - "failed to register pipe clock source\n"); - goto err_node_put; + if (cfg->type == PHY_TYPE_USB3 || cfg->type == PHY_TYPE_PCIE) { + ret = phy_pipe_clk_register(qmp, child); + if (ret) { + dev_err(qmp->dev, + "failed to register pipe clock source\n"); + goto err_node_put; + } + } else if (cfg->type == PHY_TYPE_DP) { + ret = phy_dp_clks_register(qmp, qmp->phys[id], child); + if (ret) { + dev_err(qmp->dev, + "failed to register DP clock source\n"); + goto err_node_put; + } } id++; } diff --git a/drivers/phy/qualcomm/phy-qcom-qmp.h b/drivers/phy/qualcomm/phy-qcom-qmp.h index 904b80ab9009..b7c530088a6c 100644 --- a/drivers/phy/qualcomm/phy-qcom-qmp.h +++ b/drivers/phy/qualcomm/phy-qcom-qmp.h @@ -137,6 +137,9 @@ #define QPHY_V3_DP_COM_RESET_OVRD_CTRL 0x1c /* Only for QMP V3 PHY - QSERDES COM registers */ +#define QSERDES_V3_COM_ATB_SEL1 0x000 +#define QSERDES_V3_COM_ATB_SEL2 0x004 +#define QSERDES_V3_COM_FREQ_UPDATE 0x008 #define QSERDES_V3_COM_BG_TIMER 0x00c #define QSERDES_V3_COM_SSC_EN_CENTER 0x010 #define QSERDES_V3_COM_SSC_ADJ_PER1 0x014 @@ -146,6 +149,13 @@ #define QSERDES_V3_COM_SSC_STEP_SIZE1 0x024 #define QSERDES_V3_COM_SSC_STEP_SIZE2 0x028 #define QSERDES_V3_COM_BIAS_EN_CLKBUFLR_EN 0x034 +# define QSERDES_V3_COM_BIAS_EN 0x0001 +# define QSERDES_V3_COM_BIAS_EN_MUX 0x0002 +# define QSERDES_V3_COM_CLKBUF_R_EN 0x0004 +# define QSERDES_V3_COM_CLKBUF_L_EN 0x0008 +# define QSERDES_V3_COM_EN_SYSCLK_TX_SEL 0x0010 +# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_L 0x0020 +# define QSERDES_V3_COM_CLKBUF_RX_DRIVE_R 0x0040 #define QSERDES_V3_COM_CLK_ENABLE1 0x038 #define QSERDES_V3_COM_SYS_CLK_CTRL 0x03c #define QSERDES_V3_COM_SYSCLK_BUF_ENABLE 0x040 @@ -207,12 +217,36 @@ #define QSERDES_V3_COM_CMN_MODE 0x184 /* Only for QMP V3 PHY - TX registers */ +#define QSERDES_V3_TX_BIST_MODE_LANENO 0x000 +#define QSERDES_V3_TX_CLKBUF_ENABLE 0x008 +#define QSERDES_V3_TX_TX_EMP_POST1_LVL 0x00c +# define DP_PHY_TXn_TX_EMP_POST1_LVL_MASK 0x001f +# define DP_PHY_TXn_TX_EMP_POST1_LVL_MUX_EN 0x0020 + +#define QSERDES_V3_TX_TX_DRV_LVL 0x01c +# define DP_PHY_TXn_TX_DRV_LVL_MASK 0x001f +# define DP_PHY_TXn_TX_DRV_LVL_MUX_EN 0x0020 + +#define QSERDES_V3_TX_RESET_TSYNC_EN 0x024 +#define QSERDES_V3_TX_PRE_STALL_LDO_BOOST_EN 0x028 + +#define QSERDES_V3_TX_TX_BAND 0x02c +#define QSERDES_V3_TX_SLEW_CNTL 0x030 +#define QSERDES_V3_TX_INTERFACE_SELECT 0x034 +#define QSERDES_V3_TX_RES_CODE_LANE_TX 0x03c +#define QSERDES_V3_TX_RES_CODE_LANE_RX 0x040 #define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_TX 0x044 #define QSERDES_V3_TX_RES_CODE_LANE_OFFSET_RX 0x048 #define QSERDES_V3_TX_DEBUG_BUS_SEL 0x058 +#define QSERDES_V3_TX_TRANSCEIVER_BIAS_EN 0x05c #define QSERDES_V3_TX_HIGHZ_DRVR_EN 0x060 +#define QSERDES_V3_TX_TX_POL_INV 0x064 +#define QSERDES_V3_TX_PARRATE_REC_DETECT_IDLE_EN 0x068 #define QSERDES_V3_TX_LANE_MODE_1 0x08c #define QSERDES_V3_TX_RCV_DETECT_LVL_2 0x0a4 +#define QSERDES_V3_TX_TRAN_DRVR_EMP_EN 0x0c0 +#define QSERDES_V3_TX_TX_INTERFACE_MODE 0x0c4 +#define QSERDES_V3_TX_VMODE_CTRL1 0x0f0 /* Only for QMP V3 PHY - RX registers */ #define QSERDES_V3_RX_UCDR_FO_GAIN 0x008 @@ -315,6 +349,52 @@ #define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG4 0x5c #define QPHY_V3_PCS_MISC_OSC_DTCT_MODE2_CONFIG5 0x60 +/* Only for QMP V3 PHY - DP PHY registers */ +#define QSERDES_V3_DP_PHY_REVISION_ID0 0x000 +#define QSERDES_V3_DP_PHY_REVISION_ID1 0x004 +#define QSERDES_V3_DP_PHY_REVISION_ID2 0x008 +#define QSERDES_V3_DP_PHY_REVISION_ID3 0x00c +#define QSERDES_V3_DP_PHY_CFG 0x010 +#define QSERDES_V3_DP_PHY_PD_CTL 0x018 +# define DP_PHY_PD_CTL_PWRDN 0x001 +# define DP_PHY_PD_CTL_PSR_PWRDN 0x002 +# define DP_PHY_PD_CTL_AUX_PWRDN 0x004 +# define DP_PHY_PD_CTL_LANE_0_1_PWRDN 0x008 +# define DP_PHY_PD_CTL_LANE_2_3_PWRDN 0x010 +# define DP_PHY_PD_CTL_PLL_PWRDN 0x020 +# define DP_PHY_PD_CTL_DP_CLAMP_EN 0x040 +#define QSERDES_V3_DP_PHY_MODE 0x01c +#define QSERDES_V3_DP_PHY_AUX_CFG0 0x020 +#define QSERDES_V3_DP_PHY_AUX_CFG1 0x024 +#define QSERDES_V3_DP_PHY_AUX_CFG2 0x028 +#define QSERDES_V3_DP_PHY_AUX_CFG3 0x02c +#define QSERDES_V3_DP_PHY_AUX_CFG4 0x030 +#define QSERDES_V3_DP_PHY_AUX_CFG5 0x034 +#define QSERDES_V3_DP_PHY_AUX_CFG6 0x038 +#define QSERDES_V3_DP_PHY_AUX_CFG7 0x03c +#define QSERDES_V3_DP_PHY_AUX_CFG8 0x040 +#define QSERDES_V3_DP_PHY_AUX_CFG9 0x044 + +#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_MASK 0x048 +# define PHY_AUX_STOP_ERR_MASK 0x01 +# define PHY_AUX_DEC_ERR_MASK 0x02 +# define PHY_AUX_SYNC_ERR_MASK 0x04 +# define PHY_AUX_ALIGN_ERR_MASK 0x08 +# define PHY_AUX_REQ_ERR_MASK 0x10 + +#define QSERDES_V3_DP_PHY_AUX_INTERRUPT_CLEAR 0x04c +#define QSERDES_V3_DP_PHY_AUX_BIST_CFG 0x050 + +#define QSERDES_V3_DP_PHY_VCO_DIV 0x064 +#define QSERDES_V3_DP_PHY_TX0_TX1_LANE_CTL 0x06c +#define QSERDES_V3_DP_PHY_TX2_TX3_LANE_CTL 0x088 + +#define QSERDES_V3_DP_PHY_SPARE0 0x0ac +#define DP_PHY_SPARE0_MASK 0x0f +#define DP_PHY_SPARE0_ORIENTATION_INFO_SHIFT 0x04(0x0004) + +#define QSERDES_V3_DP_PHY_STATUS 0x0c0 + /* Only for QMP V4 PHY - QSERDES COM registers */ #define QSERDES_V4_COM_SSC_EN_CENTER 0x010 #define QSERDES_V4_COM_SSC_PER1 0x01c diff --git a/drivers/phy/ralink/phy-ralink-usb.c b/drivers/phy/ralink/phy-ralink-usb.c index ba3c197fc5b0..95dfa9fd284d 100644 --- a/drivers/phy/ralink/phy-ralink-usb.c +++ b/drivers/phy/ralink/phy-ralink-usb.c @@ -142,7 +142,7 @@ static int ralink_usb_phy_power_off(struct phy *_phy) return 0; } -static struct phy_ops ralink_usb_phy_ops = { +static const struct phy_ops ralink_usb_phy_ops = { .power_on = ralink_usb_phy_power_on, .power_off = ralink_usb_phy_power_off, .owner = THIS_MODULE, diff --git a/drivers/phy/rockchip/Kconfig b/drivers/phy/rockchip/Kconfig index 0824b9dd5683..c2f22f90736c 100644 --- a/drivers/phy/rockchip/Kconfig +++ b/drivers/phy/rockchip/Kconfig @@ -9,6 +9,18 @@ config PHY_ROCKCHIP_DP help Enable this to support the Rockchip Display Port PHY. +config PHY_ROCKCHIP_DPHY_RX0 + tristate "Rockchip MIPI Synopsys DPHY RX0 driver" + depends on ARCH_ROCKCHIP || COMPILE_TEST + select GENERIC_PHY_MIPI_DPHY + select GENERIC_PHY + help + Enable this to support the Rockchip MIPI Synopsys DPHY RX0 + associated to the Rockchip ISP module present in RK3399 SoCs. + + To compile this driver as a module, choose M here: the module + will be called phy-rockchip-dphy-rx0. + config PHY_ROCKCHIP_EMMC tristate "Rockchip EMMC PHY Driver" depends on ARCH_ROCKCHIP && OF diff --git a/drivers/phy/rockchip/Makefile b/drivers/phy/rockchip/Makefile index 9f59a81e4e0d..c3cfc7f0af5c 100644 --- a/drivers/phy/rockchip/Makefile +++ b/drivers/phy/rockchip/Makefile @@ -1,5 +1,6 @@ # SPDX-License-Identifier: GPL-2.0 obj-$(CONFIG_PHY_ROCKCHIP_DP) += phy-rockchip-dp.o +obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o obj-$(CONFIG_PHY_ROCKCHIP_EMMC) += phy-rockchip-emmc.o obj-$(CONFIG_PHY_ROCKCHIP_INNO_DSIDPHY) += phy-rockchip-inno-dsidphy.o obj-$(CONFIG_PHY_ROCKCHIP_INNO_HDMI) += phy-rockchip-inno-hdmi.o diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c index 7c4df6d48c43..4df9476ef2a9 100644 --- a/drivers/staging/media/phy-rockchip-dphy-rx0/phy-rockchip-dphy-rx0.c +++ b/drivers/phy/rockchip/phy-rockchip-dphy-rx0.c @@ -16,6 +16,7 @@ */ #include <linux/clk.h> +#include <linux/delay.h> #include <linux/io.h> #include <linux/mfd/syscon.h> #include <linux/module.h> diff --git a/drivers/phy/samsung/phy-exynos5-usbdrd.c b/drivers/phy/samsung/phy-exynos5-usbdrd.c index 0d818b77a0d8..cfa9b8b7e5ac 100644 --- a/drivers/phy/samsung/phy-exynos5-usbdrd.c +++ b/drivers/phy/samsung/phy-exynos5-usbdrd.c @@ -16,6 +16,7 @@ #include <linux/of.h> #include <linux/of_address.h> #include <linux/of_device.h> +#include <linux/iopoll.h> #include <linux/phy/phy.h> #include <linux/platform_device.h> #include <linux/mutex.h> @@ -556,41 +557,25 @@ static int exynos5_usbdrd_phy_power_off(struct phy *phy) static int crport_handshake(struct exynos5_usbdrd_phy *phy_drd, u32 val, u32 cmd) { - u32 usec = 100; unsigned int result; + int err; writel(val | cmd, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0); - do { - result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1); - if (result & PHYREG1_CR_ACK) - break; - - udelay(1); - } while (usec-- > 0); - - if (!usec) { - dev_err(phy_drd->dev, - "CRPORT handshake timeout1 (0x%08x)\n", val); - return -ETIME; + err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1, + result, (result & PHYREG1_CR_ACK), 1, 100); + if (err == -ETIMEDOUT) { + dev_err(phy_drd->dev, "CRPORT handshake timeout1 (0x%08x)\n", val); + return err; } - usec = 100; - writel(val, phy_drd->reg_phy + EXYNOS5_DRD_PHYREG0); - do { - result = readl(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1); - if (!(result & PHYREG1_CR_ACK)) - break; - - udelay(1); - } while (usec-- > 0); - - if (!usec) { - dev_err(phy_drd->dev, - "CRPORT handshake timeout2 (0x%08x)\n", val); - return -ETIME; + err = readl_poll_timeout(phy_drd->reg_phy + EXYNOS5_DRD_PHYREG1, + result, !(result & PHYREG1_CR_ACK), 1, 100); + if (err == -ETIMEDOUT) { + dev_err(phy_drd->dev, "CRPORT handshake timeout2 (0x%08x)\n", val); + return err; } return 0; diff --git a/drivers/phy/samsung/phy-samsung-ufs.c b/drivers/phy/samsung/phy-samsung-ufs.c index 9832599a0283..dd9ab1519d83 100644 --- a/drivers/phy/samsung/phy-samsung-ufs.c +++ b/drivers/phy/samsung/phy-samsung-ufs.c @@ -268,7 +268,7 @@ static int samsung_ufs_phy_exit(struct phy *phy) return 0; } -static struct phy_ops samsung_ufs_phy_ops = { +static const struct phy_ops samsung_ufs_phy_ops = { .init = samsung_ufs_phy_init, .exit = samsung_ufs_phy_exit, .power_on = samsung_ufs_phy_power_on, diff --git a/drivers/phy/socionext/Kconfig b/drivers/phy/socionext/Kconfig index 8c9d7c37536a..a3970e0f89da 100644 --- a/drivers/phy/socionext/Kconfig +++ b/drivers/phy/socionext/Kconfig @@ -34,3 +34,13 @@ config PHY_UNIPHIER_PCIE help Enable this to support PHY implemented in PCIe controller on UniPhier SoCs. This driver supports LD20 and PXs3 SoCs. + +config PHY_UNIPHIER_AHCI + tristate "UniPhier AHCI PHY driver" + depends on ARCH_UNIPHIER || COMPILE_TEST + depends on OF && HAS_IOMEM + default SATA_AHCI_PLATFORM + select GENERIC_PHY + help + Enable this to support PHY implemented in AHCI controller + on UniPhier SoCs. This driver supports PXs2 and PXs3 SoCs. diff --git a/drivers/phy/socionext/Makefile b/drivers/phy/socionext/Makefile index 7dc9095b5bb7..e67c2da6675c 100644 --- a/drivers/phy/socionext/Makefile +++ b/drivers/phy/socionext/Makefile @@ -6,3 +6,4 @@ obj-$(CONFIG_PHY_UNIPHIER_USB2) += phy-uniphier-usb2.o obj-$(CONFIG_PHY_UNIPHIER_USB3) += phy-uniphier-usb3hs.o phy-uniphier-usb3ss.o obj-$(CONFIG_PHY_UNIPHIER_PCIE) += phy-uniphier-pcie.o +obj-$(CONFIG_PHY_UNIPHIER_AHCI) += phy-uniphier-ahci.o diff --git a/drivers/phy/socionext/phy-uniphier-ahci.c b/drivers/phy/socionext/phy-uniphier-ahci.c new file mode 100644 index 000000000000..7427c40bf4ae --- /dev/null +++ b/drivers/phy/socionext/phy-uniphier-ahci.c @@ -0,0 +1,321 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * phy-uniphier-ahci.c - PHY driver for UniPhier AHCI controller + * Copyright 2016-2020, Socionext Inc. + * Author: Kunihiko Hayashi <hayashi.kunihiko@socionext.com> + */ + +#include <linux/bitfield.h> +#include <linux/bitops.h> +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> +#include <linux/phy/phy.h> +#include <linux/platform_device.h> +#include <linux/reset.h> + +struct uniphier_ahciphy_priv { + struct device *dev; + void __iomem *base; + struct clk *clk, *clk_parent; + struct reset_control *rst, *rst_parent; + const struct uniphier_ahciphy_soc_data *data; +}; + +struct uniphier_ahciphy_soc_data { + int (*init)(struct uniphier_ahciphy_priv *priv); + int (*power_on)(struct uniphier_ahciphy_priv *priv); + int (*power_off)(struct uniphier_ahciphy_priv *priv); + bool is_ready_high; + bool is_phy_clk; +}; + +/* for PXs2/PXs3 */ +#define CKCTRL 0x0 +#define CKCTRL_P0_READY BIT(15) +#define CKCTRL_P0_RESET BIT(10) +#define CKCTRL_REF_SSP_EN BIT(9) +#define TXCTRL0 0x4 +#define TXCTRL0_AMP_G3_MASK GENMASK(22, 16) +#define TXCTRL0_AMP_G2_MASK GENMASK(14, 8) +#define TXCTRL0_AMP_G1_MASK GENMASK(6, 0) +#define TXCTRL1 0x8 +#define TXCTRL1_DEEMPH_G3_MASK GENMASK(21, 16) +#define TXCTRL1_DEEMPH_G2_MASK GENMASK(13, 8) +#define TXCTRL1_DEEMPH_G1_MASK GENMASK(5, 0) +#define RXCTRL 0xc +#define RXCTRL_LOS_LVL_MASK GENMASK(20, 16) +#define RXCTRL_LOS_BIAS_MASK GENMASK(10, 8) +#define RXCTRL_RX_EQ_MASK GENMASK(2, 0) + +static void uniphier_ahciphy_pxs2_enable(struct uniphier_ahciphy_priv *priv, + bool enable) +{ + u32 val; + + val = readl(priv->base + CKCTRL); + + if (enable) { + val |= CKCTRL_REF_SSP_EN; + writel(val, priv->base + CKCTRL); + val &= ~CKCTRL_P0_RESET; + writel(val, priv->base + CKCTRL); + } else { + val |= CKCTRL_P0_RESET; + writel(val, priv->base + CKCTRL); + val &= ~CKCTRL_REF_SSP_EN; + writel(val, priv->base + CKCTRL); + } +} + +static int uniphier_ahciphy_pxs2_power_on(struct uniphier_ahciphy_priv *priv) +{ + int ret; + u32 val; + + uniphier_ahciphy_pxs2_enable(priv, true); + + /* wait until PLL is ready */ + if (priv->data->is_ready_high) + ret = readl_poll_timeout(priv->base + CKCTRL, val, + (val & CKCTRL_P0_READY), 200, 400); + else + ret = readl_poll_timeout(priv->base + CKCTRL, val, + !(val & CKCTRL_P0_READY), 200, 400); + if (ret) { + dev_err(priv->dev, "Failed to check whether PHY PLL is ready\n"); + uniphier_ahciphy_pxs2_enable(priv, false); + } + + return ret; +} + +static int uniphier_ahciphy_pxs2_power_off(struct uniphier_ahciphy_priv *priv) +{ + uniphier_ahciphy_pxs2_enable(priv, false); + + return 0; +} + +static int uniphier_ahciphy_pxs3_init(struct uniphier_ahciphy_priv *priv) +{ + int i; + u32 val; + + /* setup port parameter */ + val = readl(priv->base + TXCTRL0); + val &= ~TXCTRL0_AMP_G3_MASK; + val |= FIELD_PREP(TXCTRL0_AMP_G3_MASK, 0x73); + val &= ~TXCTRL0_AMP_G2_MASK; + val |= FIELD_PREP(TXCTRL0_AMP_G2_MASK, 0x46); + val &= ~TXCTRL0_AMP_G1_MASK; + val |= FIELD_PREP(TXCTRL0_AMP_G1_MASK, 0x42); + writel(val, priv->base + TXCTRL0); + + val = readl(priv->base + TXCTRL1); + val &= ~TXCTRL1_DEEMPH_G3_MASK; + val |= FIELD_PREP(TXCTRL1_DEEMPH_G3_MASK, 0x23); + val &= ~TXCTRL1_DEEMPH_G2_MASK; + val |= FIELD_PREP(TXCTRL1_DEEMPH_G2_MASK, 0x05); + val &= ~TXCTRL1_DEEMPH_G1_MASK; + val |= FIELD_PREP(TXCTRL1_DEEMPH_G1_MASK, 0x05); + + val = readl(priv->base + RXCTRL); + val &= ~RXCTRL_LOS_LVL_MASK; + val |= FIELD_PREP(RXCTRL_LOS_LVL_MASK, 0x9); + val &= ~RXCTRL_LOS_BIAS_MASK; + val |= FIELD_PREP(RXCTRL_LOS_BIAS_MASK, 0x2); + val &= ~RXCTRL_RX_EQ_MASK; + val |= FIELD_PREP(RXCTRL_RX_EQ_MASK, 0x1); + + /* dummy read 25 times to make a wait time for the phy to stabilize */ + for (i = 0; i < 25; i++) + readl(priv->base + CKCTRL); + + return 0; +} + +static int uniphier_ahciphy_init(struct phy *phy) +{ + struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); + int ret; + + ret = clk_prepare_enable(priv->clk_parent); + if (ret) + return ret; + + ret = reset_control_deassert(priv->rst_parent); + if (ret) + goto out_clk_disable; + + if (priv->data->init) { + ret = priv->data->init(priv); + if (ret) + goto out_rst_assert; + } + + return 0; + +out_rst_assert: + reset_control_assert(priv->rst_parent); +out_clk_disable: + clk_disable_unprepare(priv->clk_parent); + + return ret; +} + +static int uniphier_ahciphy_exit(struct phy *phy) +{ + struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); + + reset_control_assert(priv->rst_parent); + clk_disable_unprepare(priv->clk_parent); + + return 0; +} + +static int uniphier_ahciphy_power_on(struct phy *phy) +{ + struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + ret = clk_prepare_enable(priv->clk); + if (ret) + return ret; + + ret = reset_control_deassert(priv->rst); + if (ret) + goto out_clk_disable; + + if (priv->data->power_on) { + ret = priv->data->power_on(priv); + if (ret) + goto out_reset_assert; + } + + return 0; + +out_reset_assert: + reset_control_assert(priv->rst); +out_clk_disable: + clk_disable_unprepare(priv->clk); + + return ret; +} + +static int uniphier_ahciphy_power_off(struct phy *phy) +{ + struct uniphier_ahciphy_priv *priv = phy_get_drvdata(phy); + int ret = 0; + + if (priv->data->power_off) + ret = priv->data->power_off(priv); + + reset_control_assert(priv->rst); + clk_disable_unprepare(priv->clk); + + return ret; +} + +static const struct phy_ops uniphier_ahciphy_ops = { + .init = uniphier_ahciphy_init, + .exit = uniphier_ahciphy_exit, + .power_on = uniphier_ahciphy_power_on, + .power_off = uniphier_ahciphy_power_off, + .owner = THIS_MODULE, +}; + +static int uniphier_ahciphy_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct uniphier_ahciphy_priv *priv; + struct phy *phy; + struct phy_provider *phy_provider; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + priv->dev = dev; + priv->data = of_device_get_match_data(dev); + if (WARN_ON(!priv->data)) + return -EINVAL; + + priv->base = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(priv->base)) + return PTR_ERR(priv->base); + + priv->clk_parent = devm_clk_get(dev, "link"); + if (IS_ERR(priv->clk_parent)) + return PTR_ERR(priv->clk_parent); + + if (priv->data->is_phy_clk) { + priv->clk = devm_clk_get(dev, "phy"); + if (IS_ERR(priv->clk)) + return PTR_ERR(priv->clk); + } + + priv->rst_parent = devm_reset_control_get_shared(dev, "link"); + if (IS_ERR(priv->rst_parent)) + return PTR_ERR(priv->rst_parent); + + priv->rst = devm_reset_control_get_shared(dev, "phy"); + if (IS_ERR(priv->rst)) + return PTR_ERR(priv->rst); + + phy = devm_phy_create(dev, dev->of_node, &uniphier_ahciphy_ops); + if (IS_ERR(phy)) { + dev_err(dev, "failed to create phy\n"); + return PTR_ERR(phy); + } + + phy_set_drvdata(phy, priv); + phy_provider = devm_of_phy_provider_register(dev, of_phy_simple_xlate); + if (IS_ERR(phy_provider)) + return PTR_ERR(phy_provider); + + return 0; +} + +static const struct uniphier_ahciphy_soc_data uniphier_pxs2_data = { + .power_on = uniphier_ahciphy_pxs2_power_on, + .power_off = uniphier_ahciphy_pxs2_power_off, + .is_ready_high = false, + .is_phy_clk = false, +}; + +static const struct uniphier_ahciphy_soc_data uniphier_pxs3_data = { + .init = uniphier_ahciphy_pxs3_init, + .power_on = uniphier_ahciphy_pxs2_power_on, + .power_off = uniphier_ahciphy_pxs2_power_off, + .is_ready_high = true, + .is_phy_clk = true, +}; + +static const struct of_device_id uniphier_ahciphy_match[] = { + { + .compatible = "socionext,uniphier-pxs2-ahci-phy", + .data = &uniphier_pxs2_data, + }, + { + .compatible = "socionext,uniphier-pxs3-ahci-phy", + .data = &uniphier_pxs3_data, + }, + { /* Sentinel */ }, +}; +MODULE_DEVICE_TABLE(of, uniphier_ahciphy_match); + +static struct platform_driver uniphier_ahciphy_driver = { + .probe = uniphier_ahciphy_probe, + .driver = { + .name = "uniphier-ahci-phy", + .of_match_table = uniphier_ahciphy_match, + }, +}; +module_platform_driver(uniphier_ahciphy_driver); + +MODULE_AUTHOR("Kunihiko Hayashi <hayashi.kunihiko@socionext.com>"); +MODULE_DESCRIPTION("UniPhier PHY driver for AHCI controller"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/phy/ti/phy-am654-serdes.c b/drivers/phy/ti/phy-am654-serdes.c index a174b3c3f010..2ff56ce77b30 100644 --- a/drivers/phy/ti/phy-am654-serdes.c +++ b/drivers/phy/ti/phy-am654-serdes.c @@ -19,15 +19,38 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> +#define CMU_R004 0x4 +#define CMU_R060 0x60 #define CMU_R07C 0x7c - +#define CMU_R088 0x88 +#define CMU_R0D0 0xd0 +#define CMU_R0E8 0xe8 + +#define LANE_R048 0x248 +#define LANE_R058 0x258 +#define LANE_R06c 0x26c +#define LANE_R070 0x270 +#define LANE_R070 0x270 +#define LANE_R19C 0x39c + +#define COMLANE_R004 0xa04 #define COMLANE_R138 0xb38 -#define VERSION 0x70 +#define VERSION_VAL 0x70 #define COMLANE_R190 0xb90 - #define COMLANE_R194 0xb94 +#define COMRXEQ_R004 0x1404 +#define COMRXEQ_R008 0x1408 +#define COMRXEQ_R00C 0x140c +#define COMRXEQ_R014 0x1414 +#define COMRXEQ_R018 0x1418 +#define COMRXEQ_R01C 0x141c +#define COMRXEQ_R04C 0x144c +#define COMRXEQ_R088 0x1488 +#define COMRXEQ_R094 0x1494 +#define COMRXEQ_R098 0x1498 + #define SERDES_CTRL 0x1fd0 #define WIZ_LANEXCTL_STS 0x1fe0 @@ -80,27 +103,136 @@ static const struct regmap_config serdes_am654_regmap_config = { .max_register = 0x1ffc, }; -static const struct reg_field cmu_master_cdn_o = REG_FIELD(CMU_R07C, 24, 24); -static const struct reg_field config_version = REG_FIELD(COMLANE_R138, 16, 23); -static const struct reg_field l1_master_cdn_o = REG_FIELD(COMLANE_R190, 9, 9); -static const struct reg_field cmu_ok_i_0 = REG_FIELD(COMLANE_R194, 19, 19); -static const struct reg_field por_en = REG_FIELD(SERDES_CTRL, 29, 29); -static const struct reg_field tx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31); -static const struct reg_field rx0_enable = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15); -static const struct reg_field pll_enable = REG_FIELD(WIZ_PLL_CTRL, 29, 31); -static const struct reg_field pll_ok = REG_FIELD(WIZ_PLL_CTRL, 28, 28); +enum serdes_am654_fields { + /* CMU PLL Control */ + CMU_PLL_CTRL, + + LANE_PLL_CTRL_RXEQ_RXIDLE, + + /* CMU VCO bias current and VREG setting */ + AHB_PMA_CM_VCO_VBIAS_VREG, + AHB_PMA_CM_VCO_BIAS_VREG, + + AHB_PMA_CM_SR, + AHB_SSC_GEN_Z_O_20_13, + + /* AHB PMA Lane Configuration */ + AHB_PMA_LN_AGC_THSEL_VREGH, + + /* AGC and Signal detect threshold for Gen3 */ + AHB_PMA_LN_GEN3_AGC_SD_THSEL, + + AHB_PMA_LN_RX_SELR_GEN3, + AHB_PMA_LN_TX_DRV, + + /* CMU Master Reset */ + CMU_MASTER_CDN, + + /* P2S ring buffer initial startup pointer difference */ + P2S_RBUF_PTR_DIFF, + + CONFIG_VERSION, + + /* Lane 1 Master Reset */ + L1_MASTER_CDN, + + /* CMU OK Status */ + CMU_OK_I_0, + + /* Mid-speed initial calibration control */ + COMRXEQ_MS_INIT_CTRL_7_0, + + /* High-speed initial calibration control */ + COMRXEQ_HS_INIT_CAL_7_0, + + /* Mid-speed recalibration control */ + COMRXEQ_MS_RECAL_CTRL_7_0, + + /* High-speed recalibration control */ + COMRXEQ_HS_RECAL_CTRL_7_0, + + /* ATT configuration */ + COMRXEQ_CSR_ATT_CONFIG, + + /* Edge based boost adaptation window length */ + COMRXEQ_CSR_EBSTADAPT_WIN_LEN, + + /* COMRXEQ control 3 & 4 */ + COMRXEQ_CTRL_3_4, + + /* COMRXEQ control 14, 15 and 16*/ + COMRXEQ_CTRL_14_15_16, + + /* Threshold for errors in pattern data */ + COMRXEQ_CSR_DLEV_ERR_THRESH, + + /* COMRXEQ control 25 */ + COMRXEQ_CTRL_25, + + /* Mid-speed rate change calibration control */ + CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O, + + /* High-speed rate change calibration control */ + COMRXEQ_HS_RCHANGE_CTRL_7_0, + + /* Serdes reset */ + POR_EN, + + /* Tx Enable Value */ + TX0_ENABLE, + + /* Rx Enable Value */ + RX0_ENABLE, + + /* PLL Enable Value */ + PLL_ENABLE, + + /* PLL ready for use */ + PLL_OK, + + /* sentinel */ + MAX_FIELDS + +}; + +static const struct reg_field serdes_am654_reg_fields[] = { + [CMU_PLL_CTRL] = REG_FIELD(CMU_R004, 8, 15), + [AHB_PMA_CM_VCO_VBIAS_VREG] = REG_FIELD(CMU_R060, 8, 15), + [CMU_MASTER_CDN] = REG_FIELD(CMU_R07C, 24, 31), + [AHB_PMA_CM_VCO_BIAS_VREG] = REG_FIELD(CMU_R088, 24, 31), + [AHB_PMA_CM_SR] = REG_FIELD(CMU_R0D0, 24, 31), + [AHB_SSC_GEN_Z_O_20_13] = REG_FIELD(CMU_R0E8, 8, 15), + [LANE_PLL_CTRL_RXEQ_RXIDLE] = REG_FIELD(LANE_R048, 8, 15), + [AHB_PMA_LN_AGC_THSEL_VREGH] = REG_FIELD(LANE_R058, 16, 23), + [AHB_PMA_LN_GEN3_AGC_SD_THSEL] = REG_FIELD(LANE_R06c, 0, 7), + [AHB_PMA_LN_RX_SELR_GEN3] = REG_FIELD(LANE_R070, 16, 23), + [AHB_PMA_LN_TX_DRV] = REG_FIELD(LANE_R19C, 16, 23), + [P2S_RBUF_PTR_DIFF] = REG_FIELD(COMLANE_R004, 0, 7), + [CONFIG_VERSION] = REG_FIELD(COMLANE_R138, 16, 23), + [L1_MASTER_CDN] = REG_FIELD(COMLANE_R190, 8, 15), + [CMU_OK_I_0] = REG_FIELD(COMLANE_R194, 19, 19), + [COMRXEQ_MS_INIT_CTRL_7_0] = REG_FIELD(COMRXEQ_R004, 24, 31), + [COMRXEQ_HS_INIT_CAL_7_0] = REG_FIELD(COMRXEQ_R008, 0, 7), + [COMRXEQ_MS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 8, 15), + [COMRXEQ_HS_RECAL_CTRL_7_0] = REG_FIELD(COMRXEQ_R00C, 16, 23), + [COMRXEQ_CSR_ATT_CONFIG] = REG_FIELD(COMRXEQ_R014, 16, 23), + [COMRXEQ_CSR_EBSTADAPT_WIN_LEN] = REG_FIELD(COMRXEQ_R018, 16, 23), + [COMRXEQ_CTRL_3_4] = REG_FIELD(COMRXEQ_R01C, 8, 15), + [COMRXEQ_CTRL_14_15_16] = REG_FIELD(COMRXEQ_R04C, 0, 7), + [COMRXEQ_CSR_DLEV_ERR_THRESH] = REG_FIELD(COMRXEQ_R088, 16, 23), + [COMRXEQ_CTRL_25] = REG_FIELD(COMRXEQ_R094, 24, 31), + [CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O] = REG_FIELD(COMRXEQ_R098, 8, 15), + [COMRXEQ_HS_RCHANGE_CTRL_7_0] = REG_FIELD(COMRXEQ_R098, 16, 23), + [POR_EN] = REG_FIELD(SERDES_CTRL, 29, 29), + [TX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 29, 31), + [RX0_ENABLE] = REG_FIELD(WIZ_LANEXCTL_STS, 13, 15), + [PLL_ENABLE] = REG_FIELD(WIZ_PLL_CTRL, 29, 31), + [PLL_OK] = REG_FIELD(WIZ_PLL_CTRL, 28, 28), +}; struct serdes_am654 { struct regmap *regmap; - struct regmap_field *cmu_master_cdn_o; - struct regmap_field *config_version; - struct regmap_field *l1_master_cdn_o; - struct regmap_field *cmu_ok_i_0; - struct regmap_field *por_en; - struct regmap_field *tx0_enable; - struct regmap_field *rx0_enable; - struct regmap_field *pll_enable; - struct regmap_field *pll_ok; + struct regmap_field *fields[MAX_FIELDS]; struct device *dev; struct mux_control *control; @@ -116,12 +248,12 @@ static int serdes_am654_enable_pll(struct serdes_am654 *phy) int ret; u32 val; - ret = regmap_field_write(phy->pll_enable, PLL_ENABLE_STATE); + ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_ENABLE_STATE); if (ret) return ret; - return regmap_field_read_poll_timeout(phy->pll_ok, val, val, 1000, - PLL_LOCK_TIME); + return regmap_field_read_poll_timeout(phy->fields[PLL_OK], val, val, + 1000, PLL_LOCK_TIME); } static void serdes_am654_disable_pll(struct serdes_am654 *phy) @@ -129,41 +261,39 @@ static void serdes_am654_disable_pll(struct serdes_am654 *phy) struct device *dev = phy->dev; int ret; - ret = regmap_field_write(phy->pll_enable, PLL_DISABLE_STATE); + ret = regmap_field_write(phy->fields[PLL_ENABLE], PLL_DISABLE_STATE); if (ret) dev_err(dev, "Failed to disable PLL\n"); } static int serdes_am654_enable_txrx(struct serdes_am654 *phy) { - int ret; + int ret = 0; /* Enable TX */ - ret = regmap_field_write(phy->tx0_enable, TX0_ENABLE_STATE); - if (ret) - return ret; + ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_ENABLE_STATE); /* Enable RX */ - ret = regmap_field_write(phy->rx0_enable, RX0_ENABLE_STATE); + ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_ENABLE_STATE); + if (ret) - return ret; + return -EIO; return 0; } static int serdes_am654_disable_txrx(struct serdes_am654 *phy) { - int ret; + int ret = 0; /* Disable TX */ - ret = regmap_field_write(phy->tx0_enable, TX0_DISABLE_STATE); - if (ret) - return ret; + ret |= regmap_field_write(phy->fields[TX0_ENABLE], TX0_DISABLE_STATE); /* Disable RX */ - ret = regmap_field_write(phy->rx0_enable, RX0_DISABLE_STATE); + ret |= regmap_field_write(phy->fields[RX0_ENABLE], RX0_DISABLE_STATE); + if (ret) - return ret; + return -EIO; return 0; } @@ -187,8 +317,8 @@ static int serdes_am654_power_on(struct phy *x) return ret; } - return regmap_field_read_poll_timeout(phy->cmu_ok_i_0, val, val, - SLEEP_TIME, PLL_LOCK_TIME); + return regmap_field_read_poll_timeout(phy->fields[CMU_OK_I_0], val, + val, SLEEP_TIME, PLL_LOCK_TIME); } static int serdes_am654_power_off(struct phy *x) @@ -286,19 +416,37 @@ static int serdes_am654_usb3_init(struct serdes_am654 *phy) static int serdes_am654_pcie_init(struct serdes_am654 *phy) { - int ret; + int ret = 0; - ret = regmap_field_write(phy->config_version, VERSION); - if (ret) - return ret; + ret |= regmap_field_write(phy->fields[CMU_PLL_CTRL], 0x2); + ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_VBIAS_VREG], 0x98); + ret |= regmap_field_write(phy->fields[AHB_PMA_CM_VCO_BIAS_VREG], 0x98); + ret |= regmap_field_write(phy->fields[AHB_PMA_CM_SR], 0x45); + ret |= regmap_field_write(phy->fields[AHB_SSC_GEN_Z_O_20_13], 0xe); + ret |= regmap_field_write(phy->fields[LANE_PLL_CTRL_RXEQ_RXIDLE], 0x5); + ret |= regmap_field_write(phy->fields[AHB_PMA_LN_AGC_THSEL_VREGH], 0x83); + ret |= regmap_field_write(phy->fields[AHB_PMA_LN_GEN3_AGC_SD_THSEL], 0x83); + ret |= regmap_field_write(phy->fields[AHB_PMA_LN_RX_SELR_GEN3], 0x81); + ret |= regmap_field_write(phy->fields[AHB_PMA_LN_TX_DRV], 0x3b); + ret |= regmap_field_write(phy->fields[P2S_RBUF_PTR_DIFF], 0x3); + ret |= regmap_field_write(phy->fields[CONFIG_VERSION], VERSION_VAL); + ret |= regmap_field_write(phy->fields[COMRXEQ_MS_INIT_CTRL_7_0], 0xf); + ret |= regmap_field_write(phy->fields[COMRXEQ_HS_INIT_CAL_7_0], 0x4f); + ret |= regmap_field_write(phy->fields[COMRXEQ_MS_RECAL_CTRL_7_0], 0xf); + ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RECAL_CTRL_7_0], 0x4f); + ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_ATT_CONFIG], 0x7); + ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_EBSTADAPT_WIN_LEN], 0x7f); + ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_3_4], 0xf); + ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_14_15_16], 0x9a); + ret |= regmap_field_write(phy->fields[COMRXEQ_CSR_DLEV_ERR_THRESH], 0x32); + ret |= regmap_field_write(phy->fields[COMRXEQ_CTRL_25], 0x80); + ret |= regmap_field_write(phy->fields[CSR_RXEQ_RATE_CHANGE_CAL_RUN_RATE2_O], 0xf); + ret |= regmap_field_write(phy->fields[COMRXEQ_HS_RCHANGE_CTRL_7_0], 0x4f); + ret |= regmap_field_write(phy->fields[CMU_MASTER_CDN], 0x1); + ret |= regmap_field_write(phy->fields[L1_MASTER_CDN], 0x2); - ret = regmap_field_write(phy->cmu_master_cdn_o, 0x1); if (ret) - return ret; - - ret = regmap_field_write(phy->l1_master_cdn_o, 0x1); - if (ret) - return ret; + return -EIO; return 0; } @@ -320,20 +468,19 @@ static int serdes_am654_init(struct phy *x) static int serdes_am654_reset(struct phy *x) { struct serdes_am654 *phy = phy_get_drvdata(x); - int ret; + int ret = 0; serdes_am654_disable_pll(phy); serdes_am654_disable_txrx(phy); - ret = regmap_field_write(phy->por_en, 0x1); - if (ret) - return ret; + ret |= regmap_field_write(phy->fields[POR_EN], 0x1); mdelay(1); - ret = regmap_field_write(phy->por_en, 0x0); + ret |= regmap_field_write(phy->fields[POR_EN], 0x0); + if (ret) - return ret; + return -EIO; return 0; } @@ -587,66 +734,16 @@ static int serdes_am654_regfield_init(struct serdes_am654 *am654_phy) { struct regmap *regmap = am654_phy->regmap; struct device *dev = am654_phy->dev; + int i; - am654_phy->cmu_master_cdn_o = devm_regmap_field_alloc(dev, regmap, - cmu_master_cdn_o); - if (IS_ERR(am654_phy->cmu_master_cdn_o)) { - dev_err(dev, "CMU_MASTER_CDN_O reg field init failed\n"); - return PTR_ERR(am654_phy->cmu_master_cdn_o); - } - - am654_phy->config_version = devm_regmap_field_alloc(dev, regmap, - config_version); - if (IS_ERR(am654_phy->config_version)) { - dev_err(dev, "CONFIG_VERSION reg field init failed\n"); - return PTR_ERR(am654_phy->config_version); - } - - am654_phy->l1_master_cdn_o = devm_regmap_field_alloc(dev, regmap, - l1_master_cdn_o); - if (IS_ERR(am654_phy->l1_master_cdn_o)) { - dev_err(dev, "L1_MASTER_CDN_O reg field init failed\n"); - return PTR_ERR(am654_phy->l1_master_cdn_o); - } - - am654_phy->cmu_ok_i_0 = devm_regmap_field_alloc(dev, regmap, - cmu_ok_i_0); - if (IS_ERR(am654_phy->cmu_ok_i_0)) { - dev_err(dev, "CMU_OK_I_0 reg field init failed\n"); - return PTR_ERR(am654_phy->cmu_ok_i_0); - } - - am654_phy->por_en = devm_regmap_field_alloc(dev, regmap, por_en); - if (IS_ERR(am654_phy->por_en)) { - dev_err(dev, "POR_EN reg field init failed\n"); - return PTR_ERR(am654_phy->por_en); - } - - am654_phy->tx0_enable = devm_regmap_field_alloc(dev, regmap, - tx0_enable); - if (IS_ERR(am654_phy->tx0_enable)) { - dev_err(dev, "TX0_ENABLE reg field init failed\n"); - return PTR_ERR(am654_phy->tx0_enable); - } - - am654_phy->rx0_enable = devm_regmap_field_alloc(dev, regmap, - rx0_enable); - if (IS_ERR(am654_phy->rx0_enable)) { - dev_err(dev, "RX0_ENABLE reg field init failed\n"); - return PTR_ERR(am654_phy->rx0_enable); - } - - am654_phy->pll_enable = devm_regmap_field_alloc(dev, regmap, - pll_enable); - if (IS_ERR(am654_phy->pll_enable)) { - dev_err(dev, "PLL_ENABLE reg field init failed\n"); - return PTR_ERR(am654_phy->pll_enable); - } - - am654_phy->pll_ok = devm_regmap_field_alloc(dev, regmap, pll_ok); - if (IS_ERR(am654_phy->pll_ok)) { - dev_err(dev, "PLL_OK reg field init failed\n"); - return PTR_ERR(am654_phy->pll_ok); + for (i = 0; i < MAX_FIELDS; i++) { + am654_phy->fields[i] = devm_regmap_field_alloc(dev, + regmap, + serdes_am654_reg_fields[i]); + if (IS_ERR(am654_phy->fields[i])) { + dev_err(dev, "Unable to allocate regmap field %d\n", i); + return PTR_ERR(am654_phy->fields[i]); + } } return 0; @@ -725,8 +822,10 @@ static int serdes_am654_probe(struct platform_device *pdev) pm_runtime_enable(dev); phy = devm_phy_create(dev, NULL, &ops); - if (IS_ERR(phy)) - return PTR_ERR(phy); + if (IS_ERR(phy)) { + ret = PTR_ERR(phy); + goto clk_err; + } phy_set_drvdata(phy, am654_phy); phy_provider = devm_of_phy_provider_register(dev, serdes_am654_xlate); diff --git a/drivers/phy/ti/phy-gmii-sel.c b/drivers/phy/ti/phy-gmii-sel.c index 7edd5c3bc536..5fd2e8a08bfc 100644 --- a/drivers/phy/ti/phy-gmii-sel.c +++ b/drivers/phy/ti/phy-gmii-sel.c @@ -11,6 +11,7 @@ #include <linux/module.h> #include <linux/mfd/syscon.h> #include <linux/of.h> +#include <linux/of_address.h> #include <linux/of_net.h> #include <linux/phy.h> #include <linux/phy/phy.h> @@ -22,7 +23,7 @@ #define AM33XX_GMII_SEL_MODE_RGMII 2 enum { - PHY_GMII_SEL_PORT_MODE, + PHY_GMII_SEL_PORT_MODE = 0, PHY_GMII_SEL_RGMII_ID_MODE, PHY_GMII_SEL_RMII_IO_CLK_EN, PHY_GMII_SEL_LAST, @@ -41,6 +42,7 @@ struct phy_gmii_sel_soc_data { u32 num_ports; u32 features; const struct reg_field (*regfields)[PHY_GMII_SEL_LAST]; + bool use_of_data; }; struct phy_gmii_sel_priv { @@ -49,6 +51,8 @@ struct phy_gmii_sel_priv { struct regmap *regmap; struct phy_provider *phy_provider; struct phy_gmii_sel_phy_priv *if_phys; + u32 num_ports; + u32 reg_offset; }; static int phy_gmii_sel_mode(struct phy *phy, enum phy_mode mode, int submode) @@ -147,13 +151,9 @@ static const struct reg_field phy_gmii_sel_fields_dra7[][PHY_GMII_SEL_LAST] = { { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 0, 1), - [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0), - [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0), }, { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x554, 4, 5), - [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0), - [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0), }, }; @@ -172,16 +172,19 @@ struct phy_gmii_sel_soc_data phy_gmii_sel_soc_dm814 = { static const struct reg_field phy_gmii_sel_fields_am654[][PHY_GMII_SEL_LAST] = { - { - [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4040, 0, 1), - [PHY_GMII_SEL_RGMII_ID_MODE] = REG_FIELD((~0), 0, 0), - [PHY_GMII_SEL_RMII_IO_CLK_EN] = REG_FIELD((~0), 0, 0), - }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x0, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x4, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x8, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0xC, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x10, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x14, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x18, 0, 2), }, + { [PHY_GMII_SEL_PORT_MODE] = REG_FIELD(0x1C, 0, 2), }, }; static const struct phy_gmii_sel_soc_data phy_gmii_sel_soc_am654 = { - .num_ports = 1, + .use_of_data = true, .regfields = phy_gmii_sel_fields_am654, }; @@ -228,7 +231,7 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev, if (priv->soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN) && args->args_count < 2) return ERR_PTR(-EINVAL); - if (phy_id > priv->soc_data->num_ports) + if (phy_id > priv->num_ports) return ERR_PTR(-EINVAL); if (phy_id != priv->if_phys[phy_id - 1].id) return ERR_PTR(-EINVAL); @@ -242,68 +245,97 @@ static struct phy *phy_gmii_sel_of_xlate(struct device *dev, return priv->if_phys[phy_id].if_phy; } -static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) +static int phy_gmii_init_phy(struct phy_gmii_sel_priv *priv, int port, + struct phy_gmii_sel_phy_priv *if_phy) { const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data; struct device *dev = priv->dev; + const struct reg_field *fields; + struct regmap_field *regfield; + struct reg_field field; + int ret; + + if_phy->id = port; + if_phy->priv = priv; + + fields = soc_data->regfields[port - 1]; + field = *fields++; + field.reg += priv->reg_offset; + dev_dbg(dev, "%s field %x %d %d\n", __func__, + field.reg, field.msb, field.lsb); + + regfield = devm_regmap_field_alloc(dev, priv->regmap, field); + if (IS_ERR(regfield)) + return PTR_ERR(regfield); + if_phy->fields[PHY_GMII_SEL_PORT_MODE] = regfield; + + field = *fields++; + field.reg += priv->reg_offset; + if (soc_data->features & BIT(PHY_GMII_SEL_RGMII_ID_MODE)) { + regfield = devm_regmap_field_alloc(dev, + priv->regmap, + field); + if (IS_ERR(regfield)) + return PTR_ERR(regfield); + if_phy->fields[PHY_GMII_SEL_RGMII_ID_MODE] = regfield; + dev_dbg(dev, "%s field %x %d %d\n", __func__, + field.reg, field.msb, field.lsb); + } + + field = *fields; + field.reg += priv->reg_offset; + if (soc_data->features & BIT(PHY_GMII_SEL_RMII_IO_CLK_EN)) { + regfield = devm_regmap_field_alloc(dev, + priv->regmap, + field); + if (IS_ERR(regfield)) + return PTR_ERR(regfield); + if_phy->fields[PHY_GMII_SEL_RMII_IO_CLK_EN] = regfield; + dev_dbg(dev, "%s field %x %d %d\n", __func__, + field.reg, field.msb, field.lsb); + } + + if_phy->if_phy = devm_phy_create(dev, + priv->dev->of_node, + &phy_gmii_sel_ops); + if (IS_ERR(if_phy->if_phy)) { + ret = PTR_ERR(if_phy->if_phy); + dev_err(dev, "Failed to create phy%d %d\n", port, ret); + return ret; + } + phy_set_drvdata(if_phy->if_phy, if_phy); + + return 0; +} + +static int phy_gmii_sel_init_ports(struct phy_gmii_sel_priv *priv) +{ + const struct phy_gmii_sel_soc_data *soc_data = priv->soc_data; struct phy_gmii_sel_phy_priv *if_phys; - int i, num_ports, ret; + struct device *dev = priv->dev; + int i, ret; - num_ports = priv->soc_data->num_ports; + if (soc_data->use_of_data) { + const __be32 *offset; + u64 size; - if_phys = devm_kcalloc(priv->dev, num_ports, + offset = of_get_address(dev->of_node, 0, &size, NULL); + priv->num_ports = size / sizeof(u32); + if (!priv->num_ports) + return -EINVAL; + priv->reg_offset = __be32_to_cpu(*offset); + } + + if_phys = devm_kcalloc(dev, priv->num_ports, sizeof(*if_phys), GFP_KERNEL); if (!if_phys) return -ENOMEM; - dev_dbg(dev, "%s %d\n", __func__, num_ports); - - for (i = 0; i < num_ports; i++) { - const struct reg_field *field; - struct regmap_field *regfield; + dev_dbg(dev, "%s %d\n", __func__, priv->num_ports); - if_phys[i].id = i + 1; - if_phys[i].priv = priv; - - field = &soc_data->regfields[i][PHY_GMII_SEL_PORT_MODE]; - dev_dbg(dev, "%s field %x %d %d\n", __func__, - field->reg, field->msb, field->lsb); - - regfield = devm_regmap_field_alloc(dev, priv->regmap, *field); - if (IS_ERR(regfield)) - return PTR_ERR(regfield); - if_phys[i].fields[PHY_GMII_SEL_PORT_MODE] = regfield; - - field = &soc_data->regfields[i][PHY_GMII_SEL_RGMII_ID_MODE]; - if (field->reg != (~0)) { - regfield = devm_regmap_field_alloc(dev, - priv->regmap, - *field); - if (IS_ERR(regfield)) - return PTR_ERR(regfield); - if_phys[i].fields[PHY_GMII_SEL_RGMII_ID_MODE] = - regfield; - } - - field = &soc_data->regfields[i][PHY_GMII_SEL_RMII_IO_CLK_EN]; - if (field->reg != (~0)) { - regfield = devm_regmap_field_alloc(dev, - priv->regmap, - *field); - if (IS_ERR(regfield)) - return PTR_ERR(regfield); - if_phys[i].fields[PHY_GMII_SEL_RMII_IO_CLK_EN] = - regfield; - } - - if_phys[i].if_phy = devm_phy_create(dev, - priv->dev->of_node, - &phy_gmii_sel_ops); - if (IS_ERR(if_phys[i].if_phy)) { - ret = PTR_ERR(if_phys[i].if_phy); - dev_err(dev, "Failed to create phy%d %d\n", i, ret); + for (i = 0; i < priv->num_ports; i++) { + ret = phy_gmii_init_phy(priv, i + 1, &if_phys[i]); + if (ret) return ret; - } - phy_set_drvdata(if_phys[i].if_phy, &if_phys[i]); } priv->if_phys = if_phys; @@ -328,6 +360,7 @@ static int phy_gmii_sel_probe(struct platform_device *pdev) priv->dev = &pdev->dev; priv->soc_data = of_id->data; + priv->num_ports = priv->soc_data->num_ports; priv->regmap = syscon_node_to_regmap(node->parent); if (IS_ERR(priv->regmap)) { diff --git a/drivers/phy/ti/phy-j721e-wiz.c b/drivers/phy/ti/phy-j721e-wiz.c index 33c4cf0105a4..c9cfafe89cbf 100644 --- a/drivers/phy/ti/phy-j721e-wiz.c +++ b/drivers/phy/ti/phy-j721e-wiz.c @@ -20,7 +20,6 @@ #include <linux/pm_runtime.h> #include <linux/regmap.h> #include <linux/reset-controller.h> -#include <dt-bindings/phy/phy.h> #define WIZ_SERDES_CTRL 0x404 #define WIZ_SERDES_TOP_CTRL 0x408 diff --git a/drivers/phy/ti/phy-omap-usb2.c b/drivers/phy/ti/phy-omap-usb2.c index 507f79d14adb..4fec90d2624f 100644 --- a/drivers/phy/ti/phy-omap-usb2.c +++ b/drivers/phy/ti/phy-omap-usb2.c @@ -6,23 +6,23 @@ * Author: Kishon Vijay Abraham I <kishon@ti.com> */ -#include <linux/module.h> -#include <linux/platform_device.h> -#include <linux/slab.h> -#include <linux/of.h> -#include <linux/io.h> -#include <linux/phy/omap_usb.h> -#include <linux/usb/phy_companion.h> #include <linux/clk.h> -#include <linux/err.h> -#include <linux/pm_runtime.h> #include <linux/delay.h> +#include <linux/err.h> +#include <linux/io.h> +#include <linux/mfd/syscon.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/of_platform.h> #include <linux/phy/omap_control_phy.h> +#include <linux/phy/omap_usb.h> #include <linux/phy/phy.h> -#include <linux/mfd/syscon.h> +#include <linux/platform_device.h> +#include <linux/pm_runtime.h> #include <linux/regmap.h> -#include <linux/of_platform.h> +#include <linux/slab.h> #include <linux/sys_soc.h> +#include <linux/usb/phy_companion.h> #define USB2PHY_ANA_CONFIG1 0x4c #define USB2PHY_DISCON_BYP_LATCH BIT(31) @@ -89,7 +89,7 @@ static inline void omap_usb_writel(void __iomem *addr, unsigned int offset, } /** - * omap_usb2_set_comparator - links the comparator present in the sytem with + * omap_usb2_set_comparator - links the comparator present in the system with * this phy * @comparator - the companion phy(comparator) for this phy * @@ -142,7 +142,7 @@ static int omap_usb_set_host(struct usb_otg *otg, struct usb_bus *host) } static int omap_usb_set_peripheral(struct usb_otg *otg, - struct usb_gadget *gadget) + struct usb_gadget *gadget) { otg->gadget = gadget; if (!gadget) @@ -409,7 +409,7 @@ static int omap_usb2_probe(struct platform_device *pdev) return PTR_ERR(phy->phy_base); phy->syscon_phy_power = syscon_regmap_lookup_by_phandle(node, - "syscon-phy-power"); + "syscon-phy-power"); if (IS_ERR(phy->syscon_phy_power)) { dev_dbg(&pdev->dev, "can't get syscon-phy-power, using control device\n"); @@ -438,7 +438,6 @@ static int omap_usb2_probe(struct platform_device *pdev) } } - phy->wkupclk = devm_clk_get(phy->dev, "wkupclk"); if (IS_ERR(phy->wkupclk)) { if (PTR_ERR(phy->wkupclk) == -EPROBE_DEFER) @@ -452,10 +451,10 @@ static int omap_usb2_probe(struct platform_device *pdev) if (PTR_ERR(phy->wkupclk) != -EPROBE_DEFER) dev_err(&pdev->dev, "unable to get usb_phy_cm_clk32k\n"); return PTR_ERR(phy->wkupclk); - } else { - dev_warn(&pdev->dev, - "found usb_phy_cm_clk32k, please fix DTS\n"); } + + dev_warn(&pdev->dev, + "found usb_phy_cm_clk32k, please fix DTS\n"); } phy->optclk = devm_clk_get(phy->dev, "refclk"); @@ -504,7 +503,6 @@ static int omap_usb2_probe(struct platform_device *pdev) return PTR_ERR(phy_provider); } - usb_add_phy_dev(&phy->phy); return 0; diff --git a/drivers/pinctrl/intel/pinctrl-cherryview.c b/drivers/pinctrl/intel/pinctrl-cherryview.c index 9ef246145bde..06521097513a 100644 --- a/drivers/pinctrl/intel/pinctrl-cherryview.c +++ b/drivers/pinctrl/intel/pinctrl-cherryview.c @@ -58,6 +58,7 @@ #define CHV_PADCTRL1_CFGLOCK BIT(31) #define CHV_PADCTRL1_INVRXTX_SHIFT 4 #define CHV_PADCTRL1_INVRXTX_MASK GENMASK(7, 4) +#define CHV_PADCTRL1_INVRXTX_TXDATA BIT(7) #define CHV_PADCTRL1_INVRXTX_RXDATA BIT(6) #define CHV_PADCTRL1_INVRXTX_TXENABLE BIT(5) #define CHV_PADCTRL1_ODEN BIT(3) @@ -792,11 +793,22 @@ static int chv_pinmux_set_mux(struct pinctrl_dev *pctldev, static void chv_gpio_clear_triggering(struct chv_pinctrl *pctrl, unsigned int offset) { + u32 invrxtx_mask = CHV_PADCTRL1_INVRXTX_MASK; u32 value; + /* + * One some devices the GPIO should output the inverted value from what + * device-drivers / ACPI code expects (inverted external buffer?). The + * BIOS makes this work by setting the CHV_PADCTRL1_INVRXTX_TXDATA flag, + * preserve this flag if the pin is already setup as GPIO. + */ + value = chv_readl(pctrl, offset, CHV_PADCTRL0); + if (value & CHV_PADCTRL0_GPIOEN) + invrxtx_mask &= ~CHV_PADCTRL1_INVRXTX_TXDATA; + value = chv_readl(pctrl, offset, CHV_PADCTRL1); value &= ~CHV_PADCTRL1_INTWAKECFG_MASK; - value &= ~CHV_PADCTRL1_INVRXTX_MASK; + value &= ~invrxtx_mask; chv_writel(pctrl, offset, CHV_PADCTRL1, value); } diff --git a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c index 2f3dfb56c3fa..35bbe5935708 100644 --- a/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c +++ b/drivers/pinctrl/mediatek/pinctrl-mtk-common-v2.c @@ -259,6 +259,10 @@ bool mtk_is_virt_gpio(struct mtk_pinctrl *hw, unsigned int gpio_n) desc = (const struct mtk_pin_desc *)&hw->soc->pins[gpio_n]; + /* if the GPIO is not supported for eint mode */ + if (desc->eint.eint_m == NO_EINT_SUPPORT) + return virt_gpio; + if (desc->funcs && !desc->funcs[desc->eint.eint_m].name) virt_gpio = true; diff --git a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c index a767a05fa3a0..48e2a6c56a83 100644 --- a/drivers/pinctrl/mvebu/pinctrl-armada-xp.c +++ b/drivers/pinctrl/mvebu/pinctrl-armada-xp.c @@ -414,7 +414,7 @@ static struct mvebu_mpp_mode mv98dx3236_mpp_modes[] = { MPP_VAR_FUNCTION(0x1, "i2c0", "sck", V_98DX3236_PLUS)), MPP_MODE(15, MPP_VAR_FUNCTION(0x0, "gpio", NULL, V_98DX3236_PLUS), - MPP_VAR_FUNCTION(0x4, "i2c0", "sda", V_98DX3236_PLUS)), + MPP_VAR_FUNCTION(0x1, "i2c0", "sda", V_98DX3236_PLUS)), MPP_MODE(16, MPP_VAR_FUNCTION(0x0, "gpo", NULL, V_98DX3236_PLUS), MPP_VAR_FUNCTION(0x4, "dev", "oe", V_98DX3236_PLUS)), diff --git a/drivers/pinctrl/qcom/pinctrl-sm8250.c b/drivers/pinctrl/qcom/pinctrl-sm8250.c index a660f1274b66..826df0d637ea 100644 --- a/drivers/pinctrl/qcom/pinctrl-sm8250.c +++ b/drivers/pinctrl/qcom/pinctrl-sm8250.c @@ -1308,7 +1308,7 @@ static const struct msm_pingroup sm8250_groups[] = { [178] = PINGROUP(178, WEST, _, _, _, _, _, _, _, _, _), [179] = PINGROUP(179, WEST, _, _, _, _, _, _, _, _, _), [180] = UFS_RESET(ufs_reset, 0xb8000), - [181] = SDC_PINGROUP(sdc2_clk, 0x7000, 14, 6), + [181] = SDC_PINGROUP(sdc2_clk, 0xb7000, 14, 6), [182] = SDC_PINGROUP(sdc2_cmd, 0xb7000, 11, 3), [183] = SDC_PINGROUP(sdc2_data, 0xb7000, 9, 0), }; diff --git a/drivers/regulator/axp20x-regulator.c b/drivers/regulator/axp20x-regulator.c index 1bacb37e8a99..cd1224182ad7 100644 --- a/drivers/regulator/axp20x-regulator.c +++ b/drivers/regulator/axp20x-regulator.c @@ -42,8 +42,9 @@ #define AXP20X_DCDC2_V_OUT_MASK GENMASK(5, 0) #define AXP20X_DCDC3_V_OUT_MASK GENMASK(7, 0) -#define AXP20X_LDO24_V_OUT_MASK GENMASK(7, 4) +#define AXP20X_LDO2_V_OUT_MASK GENMASK(7, 4) #define AXP20X_LDO3_V_OUT_MASK GENMASK(6, 0) +#define AXP20X_LDO4_V_OUT_MASK GENMASK(3, 0) #define AXP20X_LDO5_V_OUT_MASK GENMASK(7, 4) #define AXP20X_PWR_OUT_EXTEN_MASK BIT_MASK(0) @@ -542,14 +543,14 @@ static const struct regulator_desc axp20x_regulators[] = { AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_DCDC3_MASK), AXP_DESC_FIXED(AXP20X, LDO1, "ldo1", "acin", 1300), AXP_DESC(AXP20X, LDO2, "ldo2", "ldo24in", 1800, 3300, 100, - AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, + AXP20X_LDO24_V_OUT, AXP20X_LDO2_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO2_MASK), AXP_DESC(AXP20X, LDO3, "ldo3", "ldo3in", 700, 3500, 25, AXP20X_LDO3_V_OUT, AXP20X_LDO3_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO3_MASK), AXP_DESC_RANGES(AXP20X, LDO4, "ldo4", "ldo24in", axp20x_ldo4_ranges, AXP20X_LDO4_V_OUT_NUM_VOLTAGES, - AXP20X_LDO24_V_OUT, AXP20X_LDO24_V_OUT_MASK, + AXP20X_LDO24_V_OUT, AXP20X_LDO4_V_OUT_MASK, AXP20X_PWR_OUT_CTRL, AXP20X_PWR_OUT_LDO4_MASK), AXP_DESC_IO(AXP20X, LDO5, "ldo5", "ldo5in", 1800, 3300, 100, AXP20X_LDO5_V_OUT, AXP20X_LDO5_V_OUT_MASK, diff --git a/drivers/reset/Kconfig b/drivers/reset/Kconfig index d9efbfd29646..97e848740e13 100644 --- a/drivers/reset/Kconfig +++ b/drivers/reset/Kconfig @@ -140,6 +140,17 @@ config RESET_QCOM_PDC to control reset signals provided by PDC for Modem, Compute, Display, GPU, Debug, AOP, Sensors, Audio, SP and APPS. +config RESET_RASPBERRYPI + tristate "Raspberry Pi 4 Firmware Reset Driver" + depends on RASPBERRYPI_FIRMWARE || (RASPBERRYPI_FIRMWARE=n && COMPILE_TEST) + default USB_XHCI_PCI + help + Raspberry Pi 4's co-processor controls some of the board's HW + initialization process, but it's up to Linux to trigger it when + relevant. This driver provides a reset controller capable of + interfacing with RPi4's co-processor and model these firmware + initialization routines as reset lines. + config RESET_SCMI tristate "Reset driver controlled via ARM SCMI interface" depends on ARM_SCMI_PROTOCOL || COMPILE_TEST diff --git a/drivers/reset/Makefile b/drivers/reset/Makefile index 249ed357c997..16947610cc3b 100644 --- a/drivers/reset/Makefile +++ b/drivers/reset/Makefile @@ -21,6 +21,7 @@ obj-$(CONFIG_RESET_OXNAS) += reset-oxnas.o obj-$(CONFIG_RESET_PISTACHIO) += reset-pistachio.o obj-$(CONFIG_RESET_QCOM_AOSS) += reset-qcom-aoss.o obj-$(CONFIG_RESET_QCOM_PDC) += reset-qcom-pdc.o +obj-$(CONFIG_RESET_RASPBERRYPI) += reset-raspberrypi.o obj-$(CONFIG_RESET_SCMI) += reset-scmi.o obj-$(CONFIG_RESET_SIMPLE) += reset-simple.o obj-$(CONFIG_RESET_STM32MP157) += reset-stm32mp1.o diff --git a/drivers/reset/reset-raspberrypi.c b/drivers/reset/reset-raspberrypi.c new file mode 100644 index 000000000000..02f59c06f69b --- /dev/null +++ b/drivers/reset/reset-raspberrypi.c @@ -0,0 +1,122 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Raspberry Pi 4 firmware reset driver + * + * Copyright (C) 2020 Nicolas Saenz Julienne <nsaenzjulienne@suse.de> + */ +#include <linux/delay.h> +#include <linux/device.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/reset-controller.h> +#include <soc/bcm2835/raspberrypi-firmware.h> +#include <dt-bindings/reset/raspberrypi,firmware-reset.h> + +struct rpi_reset { + struct reset_controller_dev rcdev; + struct rpi_firmware *fw; +}; + +static inline struct rpi_reset *to_rpi(struct reset_controller_dev *rcdev) +{ + return container_of(rcdev, struct rpi_reset, rcdev); +} + +static int rpi_reset_reset(struct reset_controller_dev *rcdev, unsigned long id) +{ + struct rpi_reset *priv = to_rpi(rcdev); + u32 dev_addr; + int ret; + + switch (id) { + case RASPBERRYPI_FIRMWARE_RESET_ID_USB: + /* + * The Raspberry Pi 4 gets its USB functionality from VL805, a + * PCIe chip that implements xHCI. After a PCI reset, VL805's + * firmware may either be loaded directly from an EEPROM or, if + * not present, by the SoC's co-processor, VideoCore. rpi's + * VideoCore OS contains both the non public firmware load + * logic and the VL805 firmware blob. This triggers the + * aforementioned process. + * + * The pci device address is expected is expected by the + * firmware encoded like this: + * + * PCI_BUS << 20 | PCI_SLOT << 15 | PCI_FUNC << 12 + * + * But since rpi's PCIe is hardwired, we know the address in + * advance. + */ + dev_addr = 0x100000; + ret = rpi_firmware_property(priv->fw, RPI_FIRMWARE_NOTIFY_XHCI_RESET, + &dev_addr, sizeof(dev_addr)); + if (ret) + return ret; + + /* Wait for vl805 to startup */ + usleep_range(200, 1000); + break; + + default: + return -EINVAL; + } + + return 0; +} + +static const struct reset_control_ops rpi_reset_ops = { + .reset = rpi_reset_reset, +}; + +static int rpi_reset_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + struct rpi_firmware *fw; + struct device_node *np; + struct rpi_reset *priv; + + np = of_get_parent(dev->of_node); + if (!np) { + dev_err(dev, "Missing firmware node\n"); + return -ENOENT; + } + + fw = rpi_firmware_get(np); + of_node_put(np); + if (!fw) + return -EPROBE_DEFER; + + priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL); + if (!priv) + return -ENOMEM; + + dev_set_drvdata(dev, priv); + + priv->fw = fw; + priv->rcdev.owner = THIS_MODULE; + priv->rcdev.nr_resets = RASPBERRYPI_FIRMWARE_RESET_NUM_IDS; + priv->rcdev.ops = &rpi_reset_ops; + priv->rcdev.of_node = dev->of_node; + + return devm_reset_controller_register(dev, &priv->rcdev); +} + +static const struct of_device_id rpi_reset_of_match[] = { + { .compatible = "raspberrypi,firmware-reset" }, + { /* sentinel */ } +}; +MODULE_DEVICE_TABLE(of, rpi_reset_of_match); + +static struct platform_driver rpi_reset_driver = { + .probe = rpi_reset_probe, + .driver = { + .name = "raspberrypi-reset", + .of_match_table = rpi_reset_of_match, + }, +}; +module_platform_driver(rpi_reset_driver); + +MODULE_AUTHOR("Nicolas Saenz Julienne <nsaenzjulienne@suse.de>"); +MODULE_DESCRIPTION("Raspberry Pi 4 firmware reset driver"); +MODULE_LICENSE("GPL"); diff --git a/drivers/s390/block/dasd_fba.c b/drivers/s390/block/dasd_fba.c index cbb770824226..1a44e321b54e 100644 --- a/drivers/s390/block/dasd_fba.c +++ b/drivers/s390/block/dasd_fba.c @@ -40,6 +40,7 @@ MODULE_LICENSE("GPL"); static struct dasd_discipline dasd_fba_discipline; +static void *dasd_fba_zero_page; struct dasd_fba_private { struct dasd_fba_characteristics rdc_data; @@ -270,7 +271,7 @@ static void ccw_write_zero(struct ccw1 *ccw, int count) ccw->cmd_code = DASD_FBA_CCW_WRITE; ccw->flags |= CCW_FLAG_SLI; ccw->count = count; - ccw->cda = (__u32) (addr_t) page_to_phys(ZERO_PAGE(0)); + ccw->cda = (__u32) (addr_t) dasd_fba_zero_page; } /* @@ -830,6 +831,11 @@ dasd_fba_init(void) int ret; ASCEBC(dasd_fba_discipline.ebcname, 4); + + dasd_fba_zero_page = (void *)get_zeroed_page(GFP_KERNEL | GFP_DMA); + if (!dasd_fba_zero_page) + return -ENOMEM; + ret = ccw_driver_register(&dasd_fba_driver); if (!ret) wait_for_device_probe(); @@ -841,6 +847,7 @@ static void __exit dasd_fba_cleanup(void) { ccw_driver_unregister(&dasd_fba_driver); + free_page((unsigned long)dasd_fba_zero_page); } module_init(dasd_fba_init); diff --git a/drivers/s390/crypto/zcrypt_api.c b/drivers/s390/crypto/zcrypt_api.c index 4dbbfd88262c..f314936b5462 100644 --- a/drivers/s390/crypto/zcrypt_api.c +++ b/drivers/s390/crypto/zcrypt_api.c @@ -1449,7 +1449,8 @@ static long zcrypt_unlocked_ioctl(struct file *filp, unsigned int cmd, if (!reqcnt) return -ENOMEM; zcrypt_perdev_reqcnt(reqcnt, AP_DEVICES); - if (copy_to_user((int __user *) arg, reqcnt, sizeof(reqcnt))) + if (copy_to_user((int __user *) arg, reqcnt, + sizeof(u32) * AP_DEVICES)) rc = -EFAULT; kfree(reqcnt); return rc; diff --git a/drivers/s390/net/qeth_l2_main.c b/drivers/s390/net/qeth_l2_main.c index 3a94f6cad167..6384f7adba66 100644 --- a/drivers/s390/net/qeth_l2_main.c +++ b/drivers/s390/net/qeth_l2_main.c @@ -284,11 +284,11 @@ static void qeth_l2_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_clear_ipacmd_list(card); - qeth_drain_output_queues(card); card->state = CARD_STATE_DOWN; } qeth_qdio_clear_card(card, 0); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); flush_workqueue(card->event_wq); qeth_flush_local_addrs(card); diff --git a/drivers/s390/net/qeth_l3_main.c b/drivers/s390/net/qeth_l3_main.c index 4d461960370d..09ef518ca1ea 100644 --- a/drivers/s390/net/qeth_l3_main.c +++ b/drivers/s390/net/qeth_l3_main.c @@ -1168,11 +1168,11 @@ static void qeth_l3_stop_card(struct qeth_card *card) if (card->state == CARD_STATE_SOFTSETUP) { qeth_l3_clear_ip_htable(card, 1); qeth_clear_ipacmd_list(card); - qeth_drain_output_queues(card); card->state = CARD_STATE_DOWN; } qeth_qdio_clear_card(card, 0); + qeth_drain_output_queues(card); qeth_clear_working_pool_list(card); flush_workqueue(card->event_wq); qeth_flush_local_addrs(card); diff --git a/drivers/scsi/iscsi_tcp.c b/drivers/scsi/iscsi_tcp.c index b5dd1caae5e9..d10efb66cf19 100644 --- a/drivers/scsi/iscsi_tcp.c +++ b/drivers/scsi/iscsi_tcp.c @@ -736,6 +736,7 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, struct iscsi_tcp_conn *tcp_conn = conn->dd_data; struct iscsi_sw_tcp_conn *tcp_sw_conn = tcp_conn->dd_data; struct sockaddr_in6 addr; + struct socket *sock; int rc; switch(param) { @@ -747,13 +748,17 @@ static int iscsi_sw_tcp_conn_get_param(struct iscsi_cls_conn *cls_conn, spin_unlock_bh(&conn->session->frwd_lock); return -ENOTCONN; } + sock = tcp_sw_conn->sock; + sock_hold(sock->sk); + spin_unlock_bh(&conn->session->frwd_lock); + if (param == ISCSI_PARAM_LOCAL_PORT) - rc = kernel_getsockname(tcp_sw_conn->sock, + rc = kernel_getsockname(sock, (struct sockaddr *)&addr); else - rc = kernel_getpeername(tcp_sw_conn->sock, + rc = kernel_getpeername(sock, (struct sockaddr *)&addr); - spin_unlock_bh(&conn->session->frwd_lock); + sock_put(sock->sk); if (rc < 0) return rc; @@ -775,6 +780,7 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, struct iscsi_tcp_conn *tcp_conn; struct iscsi_sw_tcp_conn *tcp_sw_conn; struct sockaddr_in6 addr; + struct socket *sock; int rc; switch (param) { @@ -789,16 +795,18 @@ static int iscsi_sw_tcp_host_get_param(struct Scsi_Host *shost, return -ENOTCONN; } tcp_conn = conn->dd_data; - tcp_sw_conn = tcp_conn->dd_data; - if (!tcp_sw_conn->sock) { + sock = tcp_sw_conn->sock; + if (!sock) { spin_unlock_bh(&session->frwd_lock); return -ENOTCONN; } + sock_hold(sock->sk); + spin_unlock_bh(&session->frwd_lock); - rc = kernel_getsockname(tcp_sw_conn->sock, + rc = kernel_getsockname(sock, (struct sockaddr *)&addr); - spin_unlock_bh(&session->frwd_lock); + sock_put(sock->sk); if (rc < 0) return rc; diff --git a/drivers/scsi/lpfc/lpfc_hbadisc.c b/drivers/scsi/lpfc/lpfc_hbadisc.c index d32c7e7ab09d..bb02fd8bc2dd 100644 --- a/drivers/scsi/lpfc/lpfc_hbadisc.c +++ b/drivers/scsi/lpfc/lpfc_hbadisc.c @@ -71,6 +71,7 @@ static void lpfc_disc_timeout_handler(struct lpfc_vport *); static void lpfc_disc_flush_list(struct lpfc_vport *vport); static void lpfc_unregister_fcfi_cmpl(struct lpfc_hba *, LPFC_MBOXQ_t *); static int lpfc_fcf_inuse(struct lpfc_hba *); +static void lpfc_mbx_cmpl_read_sparam(struct lpfc_hba *, LPFC_MBOXQ_t *); void lpfc_terminate_rport_io(struct fc_rport *rport) @@ -1138,11 +1139,13 @@ out: return; } - void lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) { struct lpfc_vport *vport = pmb->vport; + LPFC_MBOXQ_t *sparam_mb; + struct lpfc_dmabuf *sparam_mp; + int rc; if (pmb->u.mb.mbxStatus) goto out; @@ -1167,12 +1170,42 @@ lpfc_mbx_cmpl_local_config_link(struct lpfc_hba *phba, LPFC_MBOXQ_t *pmb) } /* Start discovery by sending a FLOGI. port_state is identically - * LPFC_FLOGI while waiting for FLOGI cmpl. Check if sending - * the FLOGI is being deferred till after MBX_READ_SPARAM completes. + * LPFC_FLOGI while waiting for FLOGI cmpl. */ if (vport->port_state != LPFC_FLOGI) { - if (!(phba->hba_flag & HBA_DEFER_FLOGI)) + /* Issue MBX_READ_SPARAM to update CSPs before FLOGI if + * bb-credit recovery is in place. + */ + if (phba->bbcredit_support && phba->cfg_enable_bbcr && + !(phba->link_flag & LS_LOOPBACK_MODE)) { + sparam_mb = mempool_alloc(phba->mbox_mem_pool, + GFP_KERNEL); + if (!sparam_mb) + goto sparam_out; + + rc = lpfc_read_sparam(phba, sparam_mb, 0); + if (rc) { + mempool_free(sparam_mb, phba->mbox_mem_pool); + goto sparam_out; + } + sparam_mb->vport = vport; + sparam_mb->mbox_cmpl = lpfc_mbx_cmpl_read_sparam; + rc = lpfc_sli_issue_mbox(phba, sparam_mb, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + sparam_mp = (struct lpfc_dmabuf *) + sparam_mb->ctx_buf; + lpfc_mbuf_free(phba, sparam_mp->virt, + sparam_mp->phys); + kfree(sparam_mp); + sparam_mb->ctx_buf = NULL; + mempool_free(sparam_mb, phba->mbox_mem_pool); + goto sparam_out; + } + + phba->hba_flag |= HBA_DEFER_FLOGI; + } else { lpfc_initial_flogi(vport); + } } else { if (vport->fc_flag & FC_PT2PT) lpfc_disc_start(vport); @@ -1184,6 +1217,7 @@ out: "0306 CONFIG_LINK mbxStatus error x%x " "HBA state x%x\n", pmb->u.mb.mbxStatus, vport->port_state); +sparam_out: mempool_free(pmb, phba->mbox_mem_pool); lpfc_linkdown(phba); @@ -3239,21 +3273,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) lpfc_linkup(phba); sparam_mbox = NULL; - if (!(phba->hba_flag & HBA_FCOE_MODE)) { - cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); - if (!cfglink_mbox) - goto out; - vport->port_state = LPFC_LOCAL_CFG_LINK; - lpfc_config_link(phba, cfglink_mbox); - cfglink_mbox->vport = vport; - cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; - rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); - if (rc == MBX_NOT_FINISHED) { - mempool_free(cfglink_mbox, phba->mbox_mem_pool); - goto out; - } - } - sparam_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); if (!sparam_mbox) goto out; @@ -3274,7 +3293,20 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) goto out; } - if (phba->hba_flag & HBA_FCOE_MODE) { + if (!(phba->hba_flag & HBA_FCOE_MODE)) { + cfglink_mbox = mempool_alloc(phba->mbox_mem_pool, GFP_KERNEL); + if (!cfglink_mbox) + goto out; + vport->port_state = LPFC_LOCAL_CFG_LINK; + lpfc_config_link(phba, cfglink_mbox); + cfglink_mbox->vport = vport; + cfglink_mbox->mbox_cmpl = lpfc_mbx_cmpl_local_config_link; + rc = lpfc_sli_issue_mbox(phba, cfglink_mbox, MBX_NOWAIT); + if (rc == MBX_NOT_FINISHED) { + mempool_free(cfglink_mbox, phba->mbox_mem_pool); + goto out; + } + } else { vport->port_state = LPFC_VPORT_UNKNOWN; /* * Add the driver's default FCF record at FCF index 0 now. This @@ -3331,10 +3363,6 @@ lpfc_mbx_process_link_up(struct lpfc_hba *phba, struct lpfc_mbx_read_top *la) } /* Reset FCF roundrobin bmask for new discovery */ lpfc_sli4_clear_fcf_rr_bmask(phba); - } else { - if (phba->bbcredit_support && phba->cfg_enable_bbcr && - !(phba->link_flag & LS_LOOPBACK_MODE)) - phba->hba_flag |= HBA_DEFER_FLOGI; } /* Prepare for LINK up registrations */ diff --git a/drivers/scsi/sd.c b/drivers/scsi/sd.c index 95018e650f2d..16503e22691e 100644 --- a/drivers/scsi/sd.c +++ b/drivers/scsi/sd.c @@ -2964,26 +2964,32 @@ static void sd_read_block_characteristics(struct scsi_disk *sdkp) if (sdkp->device->type == TYPE_ZBC) { /* Host-managed */ - q->limits.zoned = BLK_ZONED_HM; + blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HM); } else { sdkp->zoned = (buffer[8] >> 4) & 3; - if (sdkp->zoned == 1 && !disk_has_partitions(sdkp->disk)) { + if (sdkp->zoned == 1) { /* Host-aware */ - q->limits.zoned = BLK_ZONED_HA; + blk_queue_set_zoned(sdkp->disk, BLK_ZONED_HA); } else { - /* - * Treat drive-managed devices and host-aware devices - * with partitions as regular block devices. - */ - q->limits.zoned = BLK_ZONED_NONE; - if (sdkp->zoned == 2 && sdkp->first_scan) - sd_printk(KERN_NOTICE, sdkp, - "Drive-managed SMR disk\n"); + /* Regular disk or drive managed disk */ + blk_queue_set_zoned(sdkp->disk, BLK_ZONED_NONE); } } - if (blk_queue_is_zoned(q) && sdkp->first_scan) + + if (!sdkp->first_scan) + goto out; + + if (blk_queue_is_zoned(q)) { sd_printk(KERN_NOTICE, sdkp, "Host-%s zoned block device\n", q->limits.zoned == BLK_ZONED_HM ? "managed" : "aware"); + } else { + if (sdkp->zoned == 1) + sd_printk(KERN_NOTICE, sdkp, + "Host-aware SMR disk used as regular disk\n"); + else if (sdkp->zoned == 2) + sd_printk(KERN_NOTICE, sdkp, + "Drive-managed SMR disk\n"); + } out: kfree(buffer); @@ -3404,10 +3410,6 @@ static int sd_probe(struct device *dev) sdkp->first_scan = 1; sdkp->max_medium_access_timeouts = SD_MAX_MEDIUM_TIMEOUTS; - error = sd_zbc_init_disk(sdkp); - if (error) - goto out_free_index; - sd_revalidate_disk(gd); gd->flags = GENHD_FL_EXT_DEVT; diff --git a/drivers/scsi/sd.h b/drivers/scsi/sd.h index 4933e7daf17d..a3aad608bc38 100644 --- a/drivers/scsi/sd.h +++ b/drivers/scsi/sd.h @@ -215,7 +215,6 @@ static inline int sd_is_zoned(struct scsi_disk *sdkp) #ifdef CONFIG_BLK_DEV_ZONED -int sd_zbc_init_disk(struct scsi_disk *sdkp); void sd_zbc_release_disk(struct scsi_disk *sdkp); int sd_zbc_read_zones(struct scsi_disk *sdkp, unsigned char *buffer); int sd_zbc_revalidate_zones(struct scsi_disk *sdkp); @@ -231,11 +230,6 @@ blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, sector_t *lba, #else /* CONFIG_BLK_DEV_ZONED */ -static inline int sd_zbc_init_disk(struct scsi_disk *sdkp) -{ - return 0; -} - static inline void sd_zbc_release_disk(struct scsi_disk *sdkp) {} static inline int sd_zbc_read_zones(struct scsi_disk *sdkp, @@ -259,7 +253,7 @@ static inline blk_status_t sd_zbc_setup_zone_mgmt_cmnd(struct scsi_cmnd *cmd, static inline unsigned int sd_zbc_complete(struct scsi_cmnd *cmd, unsigned int good_bytes, struct scsi_sense_hdr *sshdr) { - return 0; + return good_bytes; } static inline blk_status_t sd_zbc_prepare_zone_append(struct scsi_cmnd *cmd, diff --git a/drivers/scsi/sd_zbc.c b/drivers/scsi/sd_zbc.c index 0e94ff056bff..cf07b7f93579 100644 --- a/drivers/scsi/sd_zbc.c +++ b/drivers/scsi/sd_zbc.c @@ -651,6 +651,28 @@ static void sd_zbc_print_zones(struct scsi_disk *sdkp) sdkp->zone_blocks); } +static int sd_zbc_init_disk(struct scsi_disk *sdkp) +{ + sdkp->zones_wp_offset = NULL; + spin_lock_init(&sdkp->zones_wp_offset_lock); + sdkp->rev_wp_offset = NULL; + mutex_init(&sdkp->rev_mutex); + INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); + sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); + if (!sdkp->zone_wp_update_buf) + return -ENOMEM; + + return 0; +} + +void sd_zbc_release_disk(struct scsi_disk *sdkp) +{ + kvfree(sdkp->zones_wp_offset); + sdkp->zones_wp_offset = NULL; + kfree(sdkp->zone_wp_update_buf); + sdkp->zone_wp_update_buf = NULL; +} + static void sd_zbc_revalidate_zones_cb(struct gendisk *disk) { struct scsi_disk *sdkp = scsi_disk(disk); @@ -667,7 +689,24 @@ int sd_zbc_revalidate_zones(struct scsi_disk *sdkp) u32 max_append; int ret = 0; - if (!sd_is_zoned(sdkp)) + /* + * For all zoned disks, initialize zone append emulation data if not + * already done. This is necessary also for host-aware disks used as + * regular disks due to the presence of partitions as these partitions + * may be deleted and the disk zoned model changed back from + * BLK_ZONED_NONE to BLK_ZONED_HA. + */ + if (sd_is_zoned(sdkp) && !sdkp->zone_wp_update_buf) { + ret = sd_zbc_init_disk(sdkp); + if (ret) + return ret; + } + + /* + * There is nothing to do for regular disks, including host-aware disks + * that have partitions. + */ + if (!blk_queue_is_zoned(q)) return 0; /* @@ -764,28 +803,3 @@ err: return ret; } - -int sd_zbc_init_disk(struct scsi_disk *sdkp) -{ - if (!sd_is_zoned(sdkp)) - return 0; - - sdkp->zones_wp_offset = NULL; - spin_lock_init(&sdkp->zones_wp_offset_lock); - sdkp->rev_wp_offset = NULL; - mutex_init(&sdkp->rev_mutex); - INIT_WORK(&sdkp->zone_wp_offset_work, sd_zbc_update_wp_offset_workfn); - sdkp->zone_wp_update_buf = kzalloc(SD_BUF_SIZE, GFP_KERNEL); - if (!sdkp->zone_wp_update_buf) - return -ENOMEM; - - return 0; -} - -void sd_zbc_release_disk(struct scsi_disk *sdkp) -{ - kvfree(sdkp->zones_wp_offset); - sdkp->zones_wp_offset = NULL; - kfree(sdkp->zone_wp_update_buf); - sdkp->zone_wp_update_buf = NULL; -} diff --git a/drivers/spi/spi-bcm-qspi.c b/drivers/spi/spi-bcm-qspi.c index 681d09085175..9cfa15ec8b08 100644 --- a/drivers/spi/spi-bcm-qspi.c +++ b/drivers/spi/spi-bcm-qspi.c @@ -1295,7 +1295,7 @@ static const struct of_device_id bcm_qspi_of_match[] = { }, { .compatible = "brcm,spi-bcm-qspi", - .data = &bcm_qspi_rev_data, + .data = &bcm_qspi_no_rev_data, }, { .compatible = "brcm,spi-bcm7216-qspi", diff --git a/drivers/spi/spi-bcm2835.c b/drivers/spi/spi-bcm2835.c index c45d76c848c8..41986ac0fbfb 100644 --- a/drivers/spi/spi-bcm2835.c +++ b/drivers/spi/spi-bcm2835.c @@ -75,7 +75,7 @@ #define DRV_NAME "spi-bcm2835" /* define polling limits */ -unsigned int polling_limit_us = 30; +static unsigned int polling_limit_us = 30; module_param(polling_limit_us, uint, 0664); MODULE_PARM_DESC(polling_limit_us, "time in us to run a transfer in polling mode\n"); diff --git a/drivers/spi/spi-fsl-dspi.c b/drivers/spi/spi-fsl-dspi.c index 91c6affe139c..127323a4b27c 100644 --- a/drivers/spi/spi-fsl-dspi.c +++ b/drivers/spi/spi-fsl-dspi.c @@ -174,17 +174,17 @@ static const struct fsl_dspi_devtype_data devtype_data[] = { .fifo_size = 16, }, [LS2080A] = { - .trans_mode = DSPI_DMA_MODE, + .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, }, [LS2085A] = { - .trans_mode = DSPI_DMA_MODE, + .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, }, [LX2160A] = { - .trans_mode = DSPI_DMA_MODE, + .trans_mode = DSPI_XSPI_MODE, .max_clock_factor = 8, .fifo_size = 4, }, @@ -1273,11 +1273,14 @@ static int dspi_probe(struct platform_device *pdev) void __iomem *base; bool big_endian; - ctlr = spi_alloc_master(&pdev->dev, sizeof(struct fsl_dspi)); + dspi = devm_kzalloc(&pdev->dev, sizeof(*dspi), GFP_KERNEL); + if (!dspi) + return -ENOMEM; + + ctlr = spi_alloc_master(&pdev->dev, 0); if (!ctlr) return -ENOMEM; - dspi = spi_controller_get_devdata(ctlr); dspi->pdev = pdev; dspi->ctlr = ctlr; @@ -1414,7 +1417,7 @@ poll_mode: if (dspi->devtype_data->trans_mode != DSPI_DMA_MODE) ctlr->ptp_sts_supported = true; - platform_set_drvdata(pdev, ctlr); + platform_set_drvdata(pdev, dspi); ret = spi_register_controller(ctlr); if (ret != 0) { @@ -1437,8 +1440,7 @@ out_ctlr_put: static int dspi_remove(struct platform_device *pdev) { - struct spi_controller *ctlr = platform_get_drvdata(pdev); - struct fsl_dspi *dspi = spi_controller_get_devdata(ctlr); + struct fsl_dspi *dspi = platform_get_drvdata(pdev); /* Disconnect from the SPI framework */ spi_unregister_controller(dspi->ctlr); diff --git a/drivers/spi/spi-fsl-espi.c b/drivers/spi/spi-fsl-espi.c index e60581283a24..6d148ab70b93 100644 --- a/drivers/spi/spi-fsl-espi.c +++ b/drivers/spi/spi-fsl-espi.c @@ -564,13 +564,14 @@ static void fsl_espi_cpu_irq(struct fsl_espi *espi, u32 events) static irqreturn_t fsl_espi_irq(s32 irq, void *context_data) { struct fsl_espi *espi = context_data; - u32 events; + u32 events, mask; spin_lock(&espi->lock); /* Get interrupt events(tx/rx) */ events = fsl_espi_read_reg(espi, ESPI_SPIE); - if (!events) { + mask = fsl_espi_read_reg(espi, ESPI_SPIM); + if (!(events & mask)) { spin_unlock(&espi->lock); return IRQ_NONE; } diff --git a/drivers/staging/media/Kconfig b/drivers/staging/media/Kconfig index 71d077762698..63d3bc61b529 100644 --- a/drivers/staging/media/Kconfig +++ b/drivers/staging/media/Kconfig @@ -42,8 +42,6 @@ source "drivers/staging/media/tegra-video/Kconfig" source "drivers/staging/media/ipu3/Kconfig" -source "drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig" - source "drivers/staging/media/rkisp1/Kconfig" if MEDIA_ANALOG_TV_SUPPORT diff --git a/drivers/staging/media/Makefile b/drivers/staging/media/Makefile index 17ececa1e095..fee9c9b2aaaf 100644 --- a/drivers/staging/media/Makefile +++ b/drivers/staging/media/Makefile @@ -10,6 +10,5 @@ obj-$(CONFIG_VIDEO_TEGRA) += tegra-video/ obj-$(CONFIG_TEGRA_VDE) += tegra-vde/ obj-$(CONFIG_VIDEO_HANTRO) += hantro/ obj-$(CONFIG_VIDEO_IPU3_IMGU) += ipu3/ -obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0/ obj-$(CONFIG_VIDEO_ROCKCHIP_ISP1) += rkisp1/ obj-$(CONFIG_VIDEO_USBVISION) += usbvision/ diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig b/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig deleted file mode 100644 index fb74df829371..000000000000 --- a/drivers/staging/media/phy-rockchip-dphy-rx0/Kconfig +++ /dev/null @@ -1,13 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0-only - -config PHY_ROCKCHIP_DPHY_RX0 - tristate "Rockchip MIPI Synopsys DPHY RX0 driver" - depends on ARCH_ROCKCHIP || COMPILE_TEST - select GENERIC_PHY_MIPI_DPHY - select GENERIC_PHY - help - Enable this to support the Rockchip MIPI Synopsys DPHY RX0 - associated to the Rockchip ISP module present in RK3399 SoCs. - - To compile this driver as a module, choose M here: the module - will be called phy-rockchip-dphy-rx0. diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile b/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile deleted file mode 100644 index 507e5d0593ab..000000000000 --- a/drivers/staging/media/phy-rockchip-dphy-rx0/Makefile +++ /dev/null @@ -1,2 +0,0 @@ -# SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_PHY_ROCKCHIP_DPHY_RX0) += phy-rockchip-dphy-rx0.o diff --git a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO b/drivers/staging/media/phy-rockchip-dphy-rx0/TODO deleted file mode 100644 index ab612e5b27dc..000000000000 --- a/drivers/staging/media/phy-rockchip-dphy-rx0/TODO +++ /dev/null @@ -1,6 +0,0 @@ -The main reason for keeping this in staging is because the only driver -that uses this is rkisp1, which is also in staging. It should be moved together -with rkisp1. - -Please CC patches to Linux Media <linux-media@vger.kernel.org> and -Helen Koike <helen.koike@collabora.com>. diff --git a/drivers/target/target_core_transport.c b/drivers/target/target_core_transport.c index 590eac2df909..ff26ab0a5f60 100644 --- a/drivers/target/target_core_transport.c +++ b/drivers/target/target_core_transport.c @@ -1840,7 +1840,8 @@ int target_submit_tmr(struct se_cmd *se_cmd, struct se_session *se_sess, * out unpacked_lun for the original se_cmd. */ if (tm_type == TMR_ABORT_TASK && (flags & TARGET_SCF_LOOKUP_LUN_FROM_TAG)) { - if (!target_lookup_lun_from_tag(se_sess, tag, &unpacked_lun)) + if (!target_lookup_lun_from_tag(se_sess, tag, + &se_cmd->orig_fe_lun)) goto failure; } diff --git a/drivers/thunderbolt/Kconfig b/drivers/thunderbolt/Kconfig index 354e61c0f2e5..7fc058f81d00 100644 --- a/drivers/thunderbolt/Kconfig +++ b/drivers/thunderbolt/Kconfig @@ -16,7 +16,19 @@ menuconfig USB4 To compile this driver a module, choose M here. The module will be called thunderbolt. +if USB4 + +config USB4_DEBUGFS_WRITE + bool "Enable write by debugfs to configuration spaces (DANGEROUS)" + help + Enables writing to device configuration registers through + debugfs interface. + + Only enable this if you know what you are doing! Never enable + this for production systems or distro kernels. + config USB4_KUNIT_TEST bool "KUnit tests" depends on KUNIT=y - depends on USB4=y + +endif # USB4 diff --git a/drivers/thunderbolt/Makefile b/drivers/thunderbolt/Makefile index 4ab5bfad7bfd..571537371072 100644 --- a/drivers/thunderbolt/Makefile +++ b/drivers/thunderbolt/Makefile @@ -4,4 +4,6 @@ thunderbolt-objs := nhi.o nhi_ops.o ctl.o tb.o switch.o cap.o path.o tunnel.o ee thunderbolt-objs += domain.o dma_port.o icm.o property.o xdomain.o lc.o tmu.o usb4.o thunderbolt-objs += nvm.o retimer.o quirks.o -obj-${CONFIG_USB4_KUNIT_TEST} += test.o +thunderbolt-${CONFIG_ACPI} += acpi.o +thunderbolt-$(CONFIG_DEBUG_FS) += debugfs.o +thunderbolt-${CONFIG_USB4_KUNIT_TEST} += test.o diff --git a/drivers/thunderbolt/acpi.c b/drivers/thunderbolt/acpi.c new file mode 100644 index 000000000000..a5f988a9f948 --- /dev/null +++ b/drivers/thunderbolt/acpi.c @@ -0,0 +1,117 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * ACPI support + * + * Copyright (C) 2020, Intel Corporation + * Author: Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/acpi.h> + +#include "tb.h" + +static acpi_status tb_acpi_add_link(acpi_handle handle, u32 level, void *data, + void **return_value) +{ + struct fwnode_reference_args args; + struct fwnode_handle *fwnode; + struct tb_nhi *nhi = data; + struct acpi_device *adev; + struct pci_dev *pdev; + struct device *dev; + int ret; + + if (acpi_bus_get_device(handle, &adev)) + return AE_OK; + + fwnode = acpi_fwnode_handle(adev); + ret = fwnode_property_get_reference_args(fwnode, "usb4-host-interface", + NULL, 0, 0, &args); + if (ret) + return AE_OK; + + /* It needs to reference this NHI */ + if (nhi->pdev->dev.fwnode != args.fwnode) + goto out_put; + + /* + * Try to find physical device walking upwards to the hierarcy. + * We need to do this because the xHCI driver might not yet be + * bound so the USB3 SuperSpeed ports are not yet created. + */ + dev = acpi_get_first_physical_node(adev); + while (!dev) { + adev = adev->parent; + if (!adev) + break; + dev = acpi_get_first_physical_node(adev); + } + + if (!dev) + goto out_put; + + /* + * Check that the device is PCIe. This is because USB3 + * SuperSpeed ports have this property and they are not power + * managed with the xHCI and the SuperSpeed hub so we create the + * link from xHCI instead. + */ + while (!dev_is_pci(dev)) + dev = dev->parent; + + if (!dev) + goto out_put; + + /* + * Check that this actually matches the type of device we + * expect. It should either be xHCI or PCIe root/downstream + * port. + */ + pdev = to_pci_dev(dev); + if (pdev->class == PCI_CLASS_SERIAL_USB_XHCI || + (pci_is_pcie(pdev) && + (pci_pcie_type(pdev) == PCI_EXP_TYPE_ROOT_PORT || + pci_pcie_type(pdev) == PCI_EXP_TYPE_DOWNSTREAM))) { + const struct device_link *link; + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + } + +out_put: + fwnode_handle_put(args.fwnode); + return AE_OK; +} + +/** + * tb_acpi_add_links() - Add device links based on ACPI description + * @nhi: Pointer to NHI + * + * Goes over ACPI namespace finding tunneled ports that reference to + * @nhi ACPI node. For each reference a device link is added. The link + * is automatically removed by the driver core. + */ +void tb_acpi_add_links(struct tb_nhi *nhi) +{ + acpi_status status; + + if (!has_acpi_companion(&nhi->pdev->dev)) + return; + + /* + * Find all devices that have usb4-host-controller interface + * property that references to this NHI. + */ + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, ACPI_ROOT_OBJECT, 32, + tb_acpi_add_link, NULL, nhi, NULL); + if (ACPI_FAILURE(status)) + dev_warn(&nhi->pdev->dev, "failed to enumerate tunneled ports\n"); +} diff --git a/drivers/thunderbolt/cap.c b/drivers/thunderbolt/cap.c index 19db6cdc5b70..6f571e912cf2 100644 --- a/drivers/thunderbolt/cap.c +++ b/drivers/thunderbolt/cap.c @@ -15,14 +15,6 @@ #define VSE_CAP_OFFSET_MAX 0xffff #define TMU_ACCESS_EN BIT(20) -struct tb_cap_any { - union { - struct tb_cap_basic basic; - struct tb_cap_extended_short extended_short; - struct tb_cap_extended_long extended_long; - }; -} __packed; - static int tb_port_enable_tmu(struct tb_port *port, bool enable) { struct tb_switch *sw = port->sw; @@ -67,23 +59,50 @@ static void tb_port_dummy_read(struct tb_port *port) } } +/** + * tb_port_next_cap() - Return next capability in the linked list + * @port: Port to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Returns dword offset of the next capability in port config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_port_next_cap(struct tb_port *port, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return port->config.first_cap_offset; + + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); + if (ret) + return ret; + + return header.basic.next; +} + static int __tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) { - u32 offset = 1; + int offset = 0; do { struct tb_cap_any header; int ret; + offset = tb_port_next_cap(port, offset); + if (offset < 0) + return offset; + ret = tb_port_read(port, &header, TB_CFG_PORT, offset, 1); if (ret) return ret; if (header.basic.cap == cap) return offset; - - offset = header.basic.next; - } while (offset); + } while (offset > 0); return -ENOENT; } @@ -114,6 +133,50 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) } /** + * tb_switch_next_cap() - Return next capability in the linked list + * @sw: Switch to find the capability for + * @offset: Previous capability offset (%0 for start) + * + * Finds dword offset of the next capability in router config space + * capability list and returns it. Passing %0 returns the first entry in + * the capability list. If no next capability is found returns %0. In case + * of failure returns negative errno. + */ +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset) +{ + struct tb_cap_any header; + int ret; + + if (!offset) + return sw->config.first_cap_offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); + if (ret) + return ret; + + switch (header.basic.cap) { + case TB_SWITCH_CAP_TMU: + ret = header.basic.next; + break; + + case TB_SWITCH_CAP_VSE: + if (!header.extended_short.length) + ret = header.extended_long.next; + else + ret = header.extended_short.next; + break; + + default: + tb_sw_dbg(sw, "unknown capability %#x at %#x\n", + header.basic.cap, offset); + ret = -EINVAL; + break; + } + + return ret >= VSE_CAP_OFFSET_MAX ? 0 : ret; +} + +/** * tb_switch_find_cap() - Find switch capability * @sw Switch to find the capability for * @cap: Capability to look @@ -124,21 +187,23 @@ int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap) */ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) { - int offset = sw->config.first_cap_offset; + int offset = 0; - while (offset > 0 && offset < CAP_OFFSET_MAX) { + do { struct tb_cap_any header; int ret; + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); if (ret) return ret; if (header.basic.cap == cap) return offset; - - offset = header.basic.next; - } + } while (offset); return -ENOENT; } @@ -155,37 +220,24 @@ int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap) */ int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec) { - struct tb_cap_any header; - int offset; + int offset = 0; - offset = tb_switch_find_cap(sw, TB_SWITCH_CAP_VSE); - if (offset < 0) - return offset; - - while (offset > 0 && offset < VSE_CAP_OFFSET_MAX) { + do { + struct tb_cap_any header; int ret; - ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 2); + offset = tb_switch_next_cap(sw, offset); + if (offset < 0) + return offset; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, offset, 1); if (ret) return ret; - /* - * Extended vendor specific capabilities come in two - * flavors: short and long. The latter is used when - * offset is over 0xff. - */ - if (offset >= CAP_OFFSET_MAX) { - if (header.extended_long.vsec_id == vsec) - return offset; - offset = header.extended_long.next; - } else { - if (header.extended_short.vsec_id == vsec) - return offset; - if (!header.extended_short.length) - return -ENOENT; - offset = header.extended_short.next; - } - } + if (header.extended_short.cap == TB_SWITCH_CAP_VSE && + header.extended_short.vsec_id == vsec) + return offset; + } while (offset); return -ENOENT; } diff --git a/drivers/thunderbolt/ctl.c b/drivers/thunderbolt/ctl.c index 394a23ce6ca4..9894b8f63064 100644 --- a/drivers/thunderbolt/ctl.c +++ b/drivers/thunderbolt/ctl.c @@ -219,6 +219,7 @@ static int check_config_address(struct tb_cfg_address addr, static struct tb_cfg_result decode_error(const struct ctl_pkg *response) { struct cfg_error_pkg *pkg = response->buffer; + struct tb_ctl *ctl = response->ctl; struct tb_cfg_result res = { 0 }; res.response_route = tb_cfg_get_route(&pkg->header); res.response_port = 0; @@ -227,9 +228,13 @@ static struct tb_cfg_result decode_error(const struct ctl_pkg *response) if (res.err) return res; - WARN(pkg->zero1, "pkg->zero1 is %#x\n", pkg->zero1); - WARN(pkg->zero2, "pkg->zero1 is %#x\n", pkg->zero1); - WARN(pkg->zero3, "pkg->zero1 is %#x\n", pkg->zero1); + if (pkg->zero1) + tb_ctl_warn(ctl, "pkg->zero1 is %#x\n", pkg->zero1); + if (pkg->zero2) + tb_ctl_warn(ctl, "pkg->zero2 is %#x\n", pkg->zero2); + if (pkg->zero3) + tb_ctl_warn(ctl, "pkg->zero3 is %#x\n", pkg->zero3); + res.err = 1; res.tb_error = pkg->error; res.response_port = pkg->port; @@ -266,9 +271,8 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, * Invalid cfg_space/offset/length combination in * cfg_read/cfg_write. */ - tb_ctl_WARN(ctl, - "CFG_ERROR(%llx:%x): Invalid config space or offset\n", - res->response_route, res->response_port); + tb_ctl_dbg(ctl, "%llx:%x: invalid config space or offset\n", + res->response_route, res->response_port); return; case TB_CFG_ERROR_NO_SUCH_PORT: /* @@ -283,6 +287,10 @@ static void tb_cfg_print_error(struct tb_ctl *ctl, tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Route contains a loop\n", res->response_route, res->response_port); return; + case TB_CFG_ERROR_LOCK: + tb_ctl_warn(ctl, "%llx:%x: downstream port is locked\n", + res->response_route, res->response_port); + return; default: /* 5,6,7,9 and 11 are also valid error codes */ tb_ctl_WARN(ctl, "CFG_ERROR(%llx:%x): Unknown error\n", @@ -951,6 +959,9 @@ static int tb_cfg_get_error(struct tb_ctl *ctl, enum tb_cfg_space space, return -ENODEV; tb_cfg_print_error(ctl, res); + + if (res->tb_error == TB_CFG_ERROR_LOCK) + return -EACCES; return -EIO; } diff --git a/drivers/thunderbolt/debugfs.c b/drivers/thunderbolt/debugfs.c new file mode 100644 index 000000000000..3680b2784ea1 --- /dev/null +++ b/drivers/thunderbolt/debugfs.c @@ -0,0 +1,701 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Debugfs interface + * + * Copyright (C) 2020, Intel Corporation + * Authors: Gil Fine <gil.fine@intel.com> + * Mika Westerberg <mika.westerberg@linux.intel.com> + */ + +#include <linux/debugfs.h> +#include <linux/pm_runtime.h> + +#include "tb.h" + +#define PORT_CAP_PCIE_LEN 1 +#define PORT_CAP_POWER_LEN 2 +#define PORT_CAP_LANE_LEN 3 +#define PORT_CAP_USB3_LEN 5 +#define PORT_CAP_DP_LEN 8 +#define PORT_CAP_TMU_LEN 8 +#define PORT_CAP_BASIC_LEN 9 +#define PORT_CAP_USB4_LEN 20 + +#define SWITCH_CAP_TMU_LEN 26 +#define SWITCH_CAP_BASIC_LEN 27 + +#define PATH_LEN 2 + +#define COUNTER_SET_LEN 3 + +#define DEBUGFS_ATTR(__space, __write) \ +static int __space ## _open(struct inode *inode, struct file *file) \ +{ \ + return single_open(file, __space ## _show, inode->i_private); \ +} \ + \ +static const struct file_operations __space ## _fops = { \ + .owner = THIS_MODULE, \ + .open = __space ## _open, \ + .release = single_release, \ + .read = seq_read, \ + .write = __write, \ + .llseek = seq_lseek, \ +} + +#define DEBUGFS_ATTR_RO(__space) \ + DEBUGFS_ATTR(__space, NULL) + +#define DEBUGFS_ATTR_RW(__space) \ + DEBUGFS_ATTR(__space, __space ## _write) + +static struct dentry *tb_debugfs_root; + +static void *validate_and_copy_from_user(const void __user *user_buf, + size_t *count) +{ + size_t nbytes; + void *buf; + + if (!*count) + return ERR_PTR(-EINVAL); + + if (!access_ok(user_buf, *count)) + return ERR_PTR(-EFAULT); + + buf = (void *)get_zeroed_page(GFP_KERNEL); + if (!buf) + return ERR_PTR(-ENOMEM); + + nbytes = min_t(size_t, *count, PAGE_SIZE); + if (copy_from_user(buf, user_buf, nbytes)) { + free_page((unsigned long)buf); + return ERR_PTR(-EFAULT); + } + + *count = nbytes; + return buf; +} + +static bool parse_line(char **line, u32 *offs, u32 *val, int short_fmt_len, + int long_fmt_len) +{ + char *token; + u32 v[5]; + int ret; + + token = strsep(line, "\n"); + if (!token) + return false; + + /* + * For Adapter/Router configuration space: + * Short format is: offset value\n + * v[0] v[1] + * Long format as produced from the read side: + * offset relative_offset cap_id vs_cap_id value\n + * v[0] v[1] v[2] v[3] v[4] + * + * For Counter configuration space: + * Short format is: offset\n + * v[0] + * Long format as produced from the read side: + * offset relative_offset counter_id value\n + * v[0] v[1] v[2] v[3] + */ + ret = sscanf(token, "%i %i %i %i %i", &v[0], &v[1], &v[2], &v[3], &v[4]); + /* In case of Counters, clear counter, "val" content is NA */ + if (ret == short_fmt_len) { + *offs = v[0]; + *val = v[short_fmt_len - 1]; + return true; + } else if (ret == long_fmt_len) { + *offs = v[0]; + *val = v[long_fmt_len - 1]; + return true; + } + + return false; +} + +#if IS_ENABLED(CONFIG_USB4_DEBUGFS_WRITE) +static ssize_t regs_write(struct tb_switch *sw, struct tb_port *port, + const char __user *user_buf, size_t count, + loff_t *ppos) +{ + struct tb *tb = sw->tb; + char *line, *buf; + u32 val, offset; + int ret = 0; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* User did hardware changes behind the driver's back */ + add_taint(TAINT_USER, LOCKDEP_STILL_OK); + + line = buf; + while (parse_line(&line, &offset, &val, 2, 5)) { + if (port) + ret = tb_port_write(port, &val, TB_CFG_PORT, offset, 1); + else + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, offset, 1); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static ssize_t port_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + + return regs_write(port->sw, port, user_buf, count, ppos); +} + +static ssize_t switch_regs_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_switch *sw = s->private; + + return regs_write(sw, NULL, user_buf, count, ppos); +} +#define DEBUGFS_MODE 0600 +#else +#define port_regs_write NULL +#define switch_regs_write NULL +#define DEBUGFS_MODE 0400 +#endif + +static int port_clear_all_counters(struct tb_port *port) +{ + u32 *buf; + int ret; + + buf = kcalloc(COUNTER_SET_LEN * port->config.max_counters, sizeof(u32), + GFP_KERNEL); + if (!buf) + return -ENOMEM; + + ret = tb_port_write(port, buf, TB_CFG_COUNTERS, 0, + COUNTER_SET_LEN * port->config.max_counters); + kfree(buf); + + return ret; +} + +static ssize_t counters_write(struct file *file, const char __user *user_buf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = port->sw->tb; + char *buf; + int ret; + + buf = validate_and_copy_from_user(user_buf, &count); + if (IS_ERR(buf)) + return PTR_ERR(buf); + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + /* If written delimiter only, clear all counters in one shot */ + if (buf[0] == '\n') { + ret = port_clear_all_counters(port); + } else { + char *line = buf; + u32 val, offset; + + ret = -EINVAL; + while (parse_line(&line, &offset, &val, 1, 4)) { + ret = tb_port_write(port, &val, TB_CFG_COUNTERS, + offset, 1); + if (ret) + break; + } + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + free_page((unsigned long)buf); + + return ret < 0 ? ret : count; +} + +static void cap_show(struct seq_file *s, struct tb_switch *sw, + struct tb_port *port, unsigned int cap, u8 cap_id, + u8 vsec_id, int length) +{ + int ret, offset = 0; + + while (length > 0) { + int i, dwords = min(length, TB_MAX_CONFIG_RW_LENGTH); + u32 data[TB_MAX_CONFIG_RW_LENGTH]; + + if (port) + ret = tb_port_read(port, data, TB_CFG_PORT, cap + offset, + dwords); + else + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, cap + offset, dwords); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", + cap + offset); + if (dwords > 1) + seq_printf(s, "0x%04x ...\n", cap + offset + 1); + return; + } + + for (i = 0; i < dwords; i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%02x 0x%08x\n", + cap + offset + i, offset + i, + cap_id, vsec_id, data[i]); + } + + length -= dwords; + offset += dwords; + } +} + +static void port_cap_show(struct tb_port *port, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + u8 vsec_id = 0; + size_t length; + int ret; + + ret = tb_port_read(port, &header, TB_CFG_PORT, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + switch (header.basic.cap) { + case TB_PORT_CAP_PHY: + length = PORT_CAP_LANE_LEN; + break; + + case TB_PORT_CAP_TIME1: + length = PORT_CAP_TMU_LEN; + break; + + case TB_PORT_CAP_POWER: + length = PORT_CAP_POWER_LEN; + break; + + case TB_PORT_CAP_ADAP: + if (tb_port_is_pcie_down(port) || tb_port_is_pcie_up(port)) { + length = PORT_CAP_PCIE_LEN; + } else if (tb_port_is_dpin(port) || tb_port_is_dpout(port)) { + length = PORT_CAP_DP_LEN; + } else if (tb_port_is_usb3_down(port) || + tb_port_is_usb3_up(port)) { + length = PORT_CAP_USB3_LEN; + } else { + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + break; + + case TB_PORT_CAP_VSE: + if (!header.extended_short.length) { + ret = tb_port_read(port, (u32 *)&header + 1, TB_CFG_PORT, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + vsec_id = header.extended_short.vsec_id; + } else { + length = header.extended_short.length; + vsec_id = header.extended_short.vsec_id; + /* + * Ice Lake and Tiger Lake do not implement the + * full length of the capability, only first 32 + * dwords so hard-code it here. + */ + if (!vsec_id && + (tb_switch_is_ice_lake(port->sw) || + tb_switch_is_tiger_lake(port->sw))) + length = 32; + } + break; + + case TB_PORT_CAP_USB4: + length = PORT_CAP_USB4_LEN; + break; + + default: + seq_printf(s, "0x%04x <unsupported capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + + cap_show(s, NULL, port, cap, header.basic.cap, vsec_id, length); +} + +static void port_caps_show(struct tb_port *port, struct seq_file *s) +{ + int cap; + + cap = tb_port_next_cap(port, 0); + while (cap > 0) { + port_cap_show(port, s, cap); + cap = tb_port_next_cap(port, cap); + } +} + +static int port_basic_regs_show(struct tb_port *port, struct seq_file *s) +{ + u32 data[PORT_CAP_BASIC_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_PORT, 0, ARRAY_SIZE(data)); + if (ret) + return ret; + + for (i = 0; i < ARRAY_SIZE(data); i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int port_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = port_basic_regs_show(port, s); + if (ret) + goto out_unlock; + + port_caps_show(port, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(port_regs); + +static void switch_cap_show(struct tb_switch *sw, struct seq_file *s, + unsigned int cap) +{ + struct tb_cap_any header; + int ret, length; + u8 vsec_id = 0; + + ret = tb_sw_read(sw, &header, TB_CFG_SWITCH, cap, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", cap); + return; + } + + if (header.basic.cap == TB_SWITCH_CAP_VSE) { + if (!header.extended_short.length) { + ret = tb_sw_read(sw, (u32 *)&header + 1, TB_CFG_SWITCH, + cap + 1, 1); + if (ret) { + seq_printf(s, "0x%04x <capability read failed>\n", + cap + 1); + return; + } + length = header.extended_long.length; + } else { + length = header.extended_short.length; + } + vsec_id = header.extended_short.vsec_id; + } else { + if (header.basic.cap == TB_SWITCH_CAP_TMU) { + length = SWITCH_CAP_TMU_LEN; + } else { + seq_printf(s, "0x%04x <unknown capability 0x%02x>\n", + cap, header.basic.cap); + return; + } + } + + cap_show(s, sw, NULL, cap, header.basic.cap, vsec_id, length); +} + +static void switch_caps_show(struct tb_switch *sw, struct seq_file *s) +{ + int cap; + + cap = tb_switch_next_cap(sw, 0); + while (cap > 0) { + switch_cap_show(sw, s, cap); + cap = tb_switch_next_cap(sw, cap); + } +} + +static int switch_basic_regs_show(struct tb_switch *sw, struct seq_file *s) +{ + u32 data[SWITCH_CAP_BASIC_LEN]; + size_t dwords; + int ret, i; + + /* Only USB4 has the additional registers */ + if (tb_switch_is_usb4(sw)) + dwords = ARRAY_SIZE(data); + else + dwords = 7; + + ret = tb_sw_read(sw, data, TB_CFG_SWITCH, 0, dwords); + if (ret) + return ret; + + for (i = 0; i < dwords; i++) + seq_printf(s, "0x%04x %4d 0x00 0x00 0x%08x\n", i, i, data[i]); + + return 0; +} + +static int switch_regs_show(struct seq_file *s, void *not_used) +{ + struct tb_switch *sw = s->private; + struct tb *tb = sw->tb; + int ret; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset cap_id vs_cap_id value\n"); + + ret = switch_basic_regs_show(sw, s); + if (ret) + goto out_unlock; + + switch_caps_show(sw, s); + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(switch_regs); + +static int path_show_one(struct tb_port *port, struct seq_file *s, int hopid) +{ + u32 data[PATH_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_HOPS, hopid * PATH_LEN, + ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", hopid * PATH_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + hopid * PATH_LEN + i, i, hopid, data[i]); + } + + return 0; +} + +static int path_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int start, i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out_rpm_put; + } + + seq_puts(s, "# offset relative_offset in_hop_id value\n"); + + /* NHI and lane adapters have entry for path 0 */ + if (tb_port_is_null(port) || tb_port_is_nhi(port)) { + ret = path_show_one(port, s, 0); + if (ret) + goto out_unlock; + } + + start = tb_port_is_nhi(port) ? 1 : TB_PATH_MIN_HOPID; + + for (i = start; i <= port->config.max_in_hop_id; i++) { + ret = path_show_one(port, s, i); + if (ret) + break; + } + +out_unlock: + mutex_unlock(&tb->lock); +out_rpm_put: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RO(path); + +static int counter_set_regs_show(struct tb_port *port, struct seq_file *s, + int counter) +{ + u32 data[COUNTER_SET_LEN]; + int ret, i; + + ret = tb_port_read(port, data, TB_CFG_COUNTERS, + counter * COUNTER_SET_LEN, ARRAY_SIZE(data)); + if (ret) { + seq_printf(s, "0x%04x <not accessible>\n", + counter * COUNTER_SET_LEN); + return ret; + } + + for (i = 0; i < ARRAY_SIZE(data); i++) { + seq_printf(s, "0x%04x %4d 0x%02x 0x%08x\n", + counter * COUNTER_SET_LEN + i, i, counter, data[i]); + } + + return 0; +} + +static int counters_show(struct seq_file *s, void *not_used) +{ + struct tb_port *port = s->private; + struct tb_switch *sw = port->sw; + struct tb *tb = sw->tb; + int i, ret = 0; + + pm_runtime_get_sync(&sw->dev); + + if (mutex_lock_interruptible(&tb->lock)) { + ret = -ERESTARTSYS; + goto out; + } + + seq_puts(s, "# offset relative_offset counter_id value\n"); + + for (i = 0; i < port->config.max_counters; i++) { + ret = counter_set_regs_show(port, s, i); + if (ret) + break; + } + + mutex_unlock(&tb->lock); + +out: + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + + return ret; +} +DEBUGFS_ATTR_RW(counters); + +/** + * tb_switch_debugfs_init() - Add debugfs entries for router + * @sw: Pointer to the router + * + * Adds debugfs directories and files for given router. + */ +void tb_switch_debugfs_init(struct tb_switch *sw) +{ + struct dentry *debugfs_dir; + struct tb_port *port; + + debugfs_dir = debugfs_create_dir(dev_name(&sw->dev), tb_debugfs_root); + sw->debugfs_dir = debugfs_dir; + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, sw, + &switch_regs_fops); + + tb_switch_for_each_port(sw, port) { + struct dentry *debugfs_dir; + char dir_name[10]; + + if (port->disabled) + continue; + if (port->config.type == TB_TYPE_INACTIVE) + continue; + + snprintf(dir_name, sizeof(dir_name), "port%d", port->port); + debugfs_dir = debugfs_create_dir(dir_name, sw->debugfs_dir); + debugfs_create_file("regs", DEBUGFS_MODE, debugfs_dir, + port, &port_regs_fops); + debugfs_create_file("path", 0400, debugfs_dir, port, + &path_fops); + if (port->config.counters_support) + debugfs_create_file("counters", 0600, debugfs_dir, port, + &counters_fops); + } +} + +/** + * tb_switch_debugfs_remove() - Remove all router debugfs entries + * @sw: Pointer to the router + * + * Removes all previously added debugfs entries under this router. + */ +void tb_switch_debugfs_remove(struct tb_switch *sw) +{ + debugfs_remove_recursive(sw->debugfs_dir); +} + +void tb_debugfs_init(void) +{ + tb_debugfs_root = debugfs_create_dir("thunderbolt", NULL); +} + +void tb_debugfs_exit(void) +{ + debugfs_remove_recursive(tb_debugfs_root); +} diff --git a/drivers/thunderbolt/domain.c b/drivers/thunderbolt/domain.c index bba4cbfa9759..f0de94f7acbf 100644 --- a/drivers/thunderbolt/domain.c +++ b/drivers/thunderbolt/domain.c @@ -275,7 +275,7 @@ static struct attribute *domain_attrs[] = { static umode_t domain_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct tb *tb = container_of(dev, struct tb, dev); if (attr == &dev_attr_boot_acl.attr) { @@ -455,6 +455,8 @@ int tb_domain_add(struct tb *tb) /* This starts event processing */ mutex_unlock(&tb->lock); + device_init_wakeup(&tb->dev, true); + pm_runtime_no_callbacks(&tb->dev); pm_runtime_set_active(&tb->dev); pm_runtime_enable(&tb->dev); @@ -544,6 +546,33 @@ int tb_domain_suspend(struct tb *tb) return tb->cm_ops->suspend ? tb->cm_ops->suspend(tb) : 0; } +int tb_domain_freeze_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + if (tb->cm_ops->freeze_noirq) + ret = tb->cm_ops->freeze_noirq(tb); + if (!ret) + tb_ctl_stop(tb->ctl); + mutex_unlock(&tb->lock); + + return ret; +} + +int tb_domain_thaw_noirq(struct tb *tb) +{ + int ret = 0; + + mutex_lock(&tb->lock); + tb_ctl_start(tb->ctl); + if (tb->cm_ops->thaw_noirq) + ret = tb->cm_ops->thaw_noirq(tb); + mutex_unlock(&tb->lock); + + return ret; +} + void tb_domain_complete(struct tb *tb) { if (tb->cm_ops->complete) @@ -798,12 +827,23 @@ int tb_domain_init(void) { int ret; + tb_test_init(); + + tb_debugfs_init(); ret = tb_xdomain_init(); if (ret) - return ret; + goto err_debugfs; ret = bus_register(&tb_bus_type); if (ret) - tb_xdomain_exit(); + goto err_xdomain; + + return 0; + +err_xdomain: + tb_xdomain_exit(); +err_debugfs: + tb_debugfs_exit(); + tb_test_exit(); return ret; } @@ -814,4 +854,6 @@ void tb_domain_exit(void) ida_destroy(&tb_domain_ida); tb_nvm_exit(); tb_xdomain_exit(); + tb_debugfs_exit(); + tb_test_exit(); } diff --git a/drivers/thunderbolt/icm.c b/drivers/thunderbolt/icm.c index ffcc8c3459e5..b51fc3f62b1f 100644 --- a/drivers/thunderbolt/icm.c +++ b/drivers/thunderbolt/icm.c @@ -1635,11 +1635,14 @@ static void icm_icl_rtd3_veto(struct tb *tb, const struct icm_pkg_header *hdr) static bool icm_tgl_is_supported(struct tb *tb) { + u32 val; + /* * If the firmware is not running use software CM. This platform * should fully support both. */ - return icm_firmware_running(tb->nhi); + val = ioread32(tb->nhi->iobase + REG_FW_STS); + return !!(val & REG_FW_STS_NVM_AUTH_DONE); } static void icm_handle_notification(struct work_struct *work) diff --git a/drivers/thunderbolt/lc.c b/drivers/thunderbolt/lc.c index 19be627d090f..41e6c738f6c8 100644 --- a/drivers/thunderbolt/lc.c +++ b/drivers/thunderbolt/lc.c @@ -45,7 +45,7 @@ static int find_port_lc_cap(struct tb_port *port) return sw->cap_lc + start + phys * size; } -static int tb_lc_configure_lane(struct tb_port *port, bool configure) +static int tb_lc_set_port_configured(struct tb_port *port, bool configured) { bool upstream = tb_is_upstream_port(port); struct tb_switch *sw = port->sw; @@ -69,7 +69,7 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure) else lane = TB_LC_SX_CTRL_L2C; - if (configure) { + if (configured) { ctrl |= lane; if (upstream) ctrl |= TB_LC_SX_CTRL_UPSTREAM; @@ -83,55 +83,146 @@ static int tb_lc_configure_lane(struct tb_port *port, bool configure) } /** - * tb_lc_configure_link() - Let LC know about configured link - * @sw: Switch that is being added + * tb_lc_configure_port() - Let LC know about configured port + * @port: Port that is set as configured * - * Informs LC of both parent switch and @sw that there is established - * link between the two. + * Sets the port configured for power management purposes. */ -int tb_lc_configure_link(struct tb_switch *sw) +int tb_lc_configure_port(struct tb_port *port) { - struct tb_port *up, *down; - int ret; + return tb_lc_set_port_configured(port, true); +} + +/** + * tb_lc_unconfigure_port() - Let LC know about unconfigured port + * @port: Port that is set as configured + * + * Sets the port unconfigured for power management purposes. + */ +void tb_lc_unconfigure_port(struct tb_port *port) +{ + tb_lc_set_port_configured(port, false); +} - if (!tb_route(sw) || tb_switch_is_icm(sw)) +static int tb_lc_set_xdomain_configured(struct tb_port *port, bool configure) +{ + struct tb_switch *sw = port->sw; + u32 ctrl, lane; + int cap, ret; + + if (sw->generation < 2) return 0; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); + cap = find_port_lc_cap(port); + if (cap < 0) + return cap; - /* Configure parent link toward this switch */ - ret = tb_lc_configure_lane(down, true); + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); if (ret) return ret; - /* Configure upstream link from this switch to the parent */ - ret = tb_lc_configure_lane(up, true); + /* Resolve correct lane */ + if (port->port % 2) + lane = TB_LC_SX_CTRL_L1D; + else + lane = TB_LC_SX_CTRL_L2D; + + if (configure) + ctrl |= lane; + else + ctrl &= ~lane; + + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, cap + TB_LC_SX_CTRL, 1); +} + +/** + * tb_lc_configure_xdomain() - Inform LC that the link is XDomain + * @port: Switch downstream port connected to another host + * + * Sets the lane configured for XDomain accordingly so that the LC knows + * about this. Returns %0 in success and negative errno in failure. + */ +int tb_lc_configure_xdomain(struct tb_port *port) +{ + return tb_lc_set_xdomain_configured(port, true); +} + +/** + * tb_lc_unconfigure_xdomain() - Unconfigure XDomain from port + * @port: Switch downstream port that was connected to another host + * + * Unsets the lane XDomain configuration. + */ +void tb_lc_unconfigure_xdomain(struct tb_port *port) +{ + tb_lc_set_xdomain_configured(port, false); +} + +static int tb_lc_set_wake_one(struct tb_switch *sw, unsigned int offset, + unsigned int flags) +{ + u32 ctrl; + int ret; + + /* + * Enable wake on PCIe and USB4 (wake coming from another + * router). + */ + ret = tb_sw_read(sw, &ctrl, TB_CFG_SWITCH, + offset + TB_LC_SX_CTRL, 1); if (ret) - tb_lc_configure_lane(down, false); + return ret; + + ctrl &= ~(TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD | TB_LC_SX_CTRL_WOP | + TB_LC_SX_CTRL_WOU4); + + if (flags & TB_WAKE_ON_CONNECT) + ctrl |= TB_LC_SX_CTRL_WOC | TB_LC_SX_CTRL_WOD; + if (flags & TB_WAKE_ON_USB4) + ctrl |= TB_LC_SX_CTRL_WOU4; + if (flags & TB_WAKE_ON_PCIE) + ctrl |= TB_LC_SX_CTRL_WOP; - return ret; + return tb_sw_write(sw, &ctrl, TB_CFG_SWITCH, offset + TB_LC_SX_CTRL, 1); } /** - * tb_lc_unconfigure_link() - Let LC know about unconfigured link - * @sw: Switch to unconfigure + * tb_lc_set_wake() - Enable/disable wake + * @sw: Switch whose wakes to configure + * @flags: Wakeup flags (%0 to disable) * - * Informs LC of both parent switch and @sw that the link between the - * two does not exist anymore. + * For each LC sets wake bits accordingly. */ -void tb_lc_unconfigure_link(struct tb_switch *sw) +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags) { - struct tb_port *up, *down; + int start, size, nlc, ret, i; + u32 desc; - if (sw->is_unplugged || !tb_route(sw) || tb_switch_is_icm(sw)) - return; + if (sw->generation < 2) + return 0; - up = tb_upstream_port(sw); - down = tb_port_at(tb_route(sw), tb_to_switch(sw->dev.parent)); + if (!tb_route(sw)) + return 0; - tb_lc_configure_lane(up, false); - tb_lc_configure_lane(down, false); + ret = read_lc_desc(sw, &desc); + if (ret) + return ret; + + /* Figure out number of link controllers */ + nlc = desc & TB_LC_DESC_NLC_MASK; + start = (desc & TB_LC_DESC_SIZE_MASK) >> TB_LC_DESC_SIZE_SHIFT; + size = (desc & TB_LC_DESC_PORT_SIZE_MASK) >> TB_LC_DESC_PORT_SIZE_SHIFT; + + /* For each link controller set sleep bit */ + for (i = 0; i < nlc; i++) { + unsigned int offset = sw->cap_lc + start + i * size; + + ret = tb_lc_set_wake_one(sw, offset, flags); + if (ret) + return ret; + } + + return 0; } /** diff --git a/drivers/thunderbolt/nhi.c b/drivers/thunderbolt/nhi.c index 5f7489fa1327..3f79baa54829 100644 --- a/drivers/thunderbolt/nhi.c +++ b/drivers/thunderbolt/nhi.c @@ -17,6 +17,7 @@ #include <linux/module.h> #include <linux/delay.h> #include <linux/property.h> +#include <linux/platform_data/x86/apple.h> #include "nhi.h" #include "nhi_regs.h" @@ -863,6 +864,22 @@ static int nhi_suspend_noirq(struct device *dev) return __nhi_suspend_noirq(dev, device_may_wakeup(dev)); } +static int nhi_freeze_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_freeze_noirq(tb); +} + +static int nhi_thaw_noirq(struct device *dev) +{ + struct pci_dev *pdev = to_pci_dev(dev); + struct tb *tb = pci_get_drvdata(pdev); + + return tb_domain_thaw_noirq(tb); +} + static bool nhi_wake_supported(struct pci_dev *pdev) { u8 val; @@ -1069,6 +1086,69 @@ static bool nhi_imr_valid(struct pci_dev *pdev) return true; } +/* + * During suspend the Thunderbolt controller is reset and all PCIe + * tunnels are lost. The NHI driver will try to reestablish all tunnels + * during resume. This adds device links between the tunneled PCIe + * downstream ports and the NHI so that the device core will make sure + * NHI is resumed first before the rest. + */ +static void tb_apple_add_links(struct tb_nhi *nhi) +{ + struct pci_dev *upstream, *pdev; + + if (!x86_apple_machine) + return; + + switch (nhi->pdev->device) { + case PCI_DEVICE_ID_INTEL_LIGHT_RIDGE: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_NHI: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_NHI: + break; + default: + return; + } + + upstream = pci_upstream_bridge(nhi->pdev); + while (upstream) { + if (!pci_is_pcie(upstream)) + return; + if (pci_pcie_type(upstream) == PCI_EXP_TYPE_UPSTREAM) + break; + upstream = pci_upstream_bridge(upstream); + } + + if (!upstream) + return; + + /* + * For each hotplug downstream port, create add device link + * back to NHI so that PCIe tunnels can be re-established after + * sleep. + */ + for_each_pci_bridge(pdev, upstream->subordinate) { + const struct device_link *link; + + if (!pci_is_pcie(pdev)) + continue; + if (pci_pcie_type(pdev) != PCI_EXP_TYPE_DOWNSTREAM || + !pdev->is_hotplug_bridge) + continue; + + link = device_link_add(&pdev->dev, &nhi->pdev->dev, + DL_FLAG_AUTOREMOVE_SUPPLIER | + DL_FLAG_PM_RUNTIME); + if (link) { + dev_dbg(&nhi->pdev->dev, "created link from %s\n", + dev_name(&pdev->dev)); + } else { + dev_warn(&nhi->pdev->dev, "device link creation from %s failed\n", + dev_name(&pdev->dev)); + } + } +} + static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) { struct tb_nhi *nhi; @@ -1134,6 +1214,9 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) return res; } + tb_apple_add_links(nhi); + tb_acpi_add_links(nhi); + tb = icm_probe(nhi); if (!tb) tb = tb_probe(nhi); @@ -1157,6 +1240,8 @@ static int nhi_probe(struct pci_dev *pdev, const struct pci_device_id *id) } pci_set_drvdata(pdev, tb); + device_wakeup_enable(&pdev->dev); + pm_runtime_allow(&pdev->dev); pm_runtime_set_autosuspend_delay(&pdev->dev, TB_AUTOSUSPEND_DELAY); pm_runtime_use_autosuspend(&pdev->dev); @@ -1186,14 +1271,13 @@ static void nhi_remove(struct pci_dev *pdev) static const struct dev_pm_ops nhi_pm_ops = { .suspend_noirq = nhi_suspend_noirq, .resume_noirq = nhi_resume_noirq, - .freeze_noirq = nhi_suspend_noirq, /* + .freeze_noirq = nhi_freeze_noirq, /* * we just disable hotplug, the * pci-tunnels stay alive. */ - .thaw_noirq = nhi_resume_noirq, + .thaw_noirq = nhi_thaw_noirq, .restore_noirq = nhi_resume_noirq, .suspend = nhi_suspend, - .freeze = nhi_suspend, .poweroff_noirq = nhi_poweroff_noirq, .poweroff = nhi_suspend, .complete = nhi_complete, diff --git a/drivers/thunderbolt/nhi_ops.c b/drivers/thunderbolt/nhi_ops.c index 6795851aac95..96da07e88c52 100644 --- a/drivers/thunderbolt/nhi_ops.c +++ b/drivers/thunderbolt/nhi_ops.c @@ -59,7 +59,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) pci_write_config_dword(nhi->pdev, VS_CAP_22, vs_cap); if (power) { - unsigned int retries = 10; + unsigned int retries = 350; u32 val; /* Wait until the firmware tells it is up and running */ @@ -67,7 +67,7 @@ static int icl_nhi_force_power(struct tb_nhi *nhi, bool power) pci_read_config_dword(nhi->pdev, VS_CAP_9, &val); if (val & VS_CAP_9_FW_READY) return 0; - msleep(250); + usleep_range(3000, 3100); } while (--retries); return -ETIMEDOUT; @@ -97,7 +97,7 @@ static int icl_nhi_lc_mailbox_cmd_complete(struct tb_nhi *nhi, int timeout) pci_read_config_dword(nhi->pdev, VS_CAP_18, &data); if (data & VS_CAP_18_DONE) goto clear; - msleep(100); + usleep_range(1000, 1100); } while (time_before(jiffies, end)); return -ETIMEDOUT; @@ -121,31 +121,38 @@ static void icl_nhi_set_ltr(struct tb_nhi *nhi) static int icl_nhi_suspend(struct tb_nhi *nhi) { + struct tb *tb = pci_get_drvdata(nhi->pdev); int ret; if (icl_nhi_is_device_connected(nhi)) return 0; - /* - * If there is no device connected we need to perform both: a - * handshake through LC mailbox and force power down before - * entering D3. - */ - icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); - ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); - if (ret) - return ret; + if (tb_switch_is_icm(tb->root_switch)) { + /* + * If there is no device connected we need to perform + * both: a handshake through LC mailbox and force power + * down before entering D3. + */ + icl_nhi_lc_mailbox_cmd(nhi, ICL_LC_PREPARE_FOR_RESET); + ret = icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); + if (ret) + return ret; + } return icl_nhi_force_power(nhi, false); } static int icl_nhi_suspend_noirq(struct tb_nhi *nhi, bool wakeup) { + struct tb *tb = pci_get_drvdata(nhi->pdev); enum icl_lc_mailbox_cmd cmd; if (!pm_suspend_via_firmware()) return icl_nhi_suspend(nhi); + if (!tb_switch_is_icm(tb->root_switch)) + return 0; + cmd = wakeup ? ICL_LC_GO2SX : ICL_LC_GO2SX_NO_WAKE; icl_nhi_lc_mailbox_cmd(nhi, cmd); return icl_nhi_lc_mailbox_cmd_complete(nhi, ICL_LC_MAILBOX_TIMEOUT); diff --git a/drivers/thunderbolt/quirks.c b/drivers/thunderbolt/quirks.c index 7eac3e0f90a2..57e2978a3c21 100644 --- a/drivers/thunderbolt/quirks.c +++ b/drivers/thunderbolt/quirks.c @@ -27,7 +27,7 @@ static const struct tb_quirk tb_quirks[] = { * tb_check_quirks() - Check for quirks to apply * @sw: Thunderbolt switch * - * Apply any quirks for the Thunderbolt controller + * Apply any quirks for the Thunderbolt controller. */ void tb_check_quirks(struct tb_switch *sw) { diff --git a/drivers/thunderbolt/switch.c b/drivers/thunderbolt/switch.c index a921de9ce7cb..c73bbfe69ba1 100644 --- a/drivers/thunderbolt/switch.c +++ b/drivers/thunderbolt/switch.c @@ -601,6 +601,13 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits) if (credits == 0 || port->sw->is_unplugged) return 0; + /* + * USB4 restricts programming NFC buffers to lane adapters only + * so skip other ports. + */ + if (tb_switch_is_usb4(port->sw) && !tb_port_is_null(port)) + return 0; + nfc_credits = port->config.nfc_credits & ADP_CS_4_NFC_BUFFERS_MASK; nfc_credits += credits; @@ -666,6 +673,50 @@ int tb_port_unlock(struct tb_port *port) return 0; } +static int __tb_port_enable(struct tb_port *port, bool enable) +{ + int ret; + u32 phy; + + if (!tb_port_is_null(port)) + return -EINVAL; + + ret = tb_port_read(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); + if (ret) + return ret; + + if (enable) + phy &= ~LANE_ADP_CS_1_LD; + else + phy |= LANE_ADP_CS_1_LD; + + return tb_port_write(port, &phy, TB_CFG_PORT, + port->cap_phy + LANE_ADP_CS_1, 1); +} + +/** + * tb_port_enable() - Enable lane adapter + * @port: Port to enable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to enable it. + */ +int tb_port_enable(struct tb_port *port) +{ + return __tb_port_enable(port, true); +} + +/** + * tb_port_disable() - Disable lane adapter + * @port: Port to disable (can be %NULL) + * + * This is used for lane 0 and 1 adapters to disable it. + */ +int tb_port_disable(struct tb_port *port) +{ + return __tb_port_enable(port, false); +} + /** * tb_init_port() - initialize a port * @@ -739,7 +790,7 @@ static int tb_port_alloc_hopid(struct tb_port *port, bool in, int min_hopid, * NHI can use HopIDs 1-max for other adapters HopIDs 0-7 are * reserved. */ - if (port->config.type != TB_TYPE_NHI && min_hopid < TB_PATH_MIN_HOPID) + if (!tb_port_is_nhi(port) && min_hopid < TB_PATH_MIN_HOPID) min_hopid = TB_PATH_MIN_HOPID; if (max_hopid < 0 || max_hopid > port_max_hopid) @@ -1227,23 +1278,24 @@ static void tb_dump_switch(const struct tb *tb, const struct tb_switch *sw) /** * reset_switch() - reconfigure route, enable and send TB_CFG_PKG_RESET + * @sw: Switch to reset * * Return: Returns 0 on success or an error code on failure. */ -int tb_switch_reset(struct tb *tb, u64 route) +int tb_switch_reset(struct tb_switch *sw) { struct tb_cfg_result res; - struct tb_regs_switch_header header = { - header.route_hi = route >> 32, - header.route_lo = route, - header.enabled = true, - }; - tb_dbg(tb, "resetting switch at %llx\n", route); - res.err = tb_cfg_write(tb->ctl, ((u32 *) &header) + 2, route, - 0, 2, 2, 2); + + if (sw->generation > 1) + return 0; + + tb_sw_dbg(sw, "resetting switch\n"); + + res.err = tb_sw_write(sw, ((u32 *) &sw->config) + 2, + TB_CFG_SWITCH, 2, 2); if (res.err) return res.err; - res = tb_cfg_reset(tb->ctl, route, TB_CFG_DEFAULT_TIMEOUT); + res = tb_cfg_reset(sw->tb->ctl, tb_route(sw), TB_CFG_DEFAULT_TIMEOUT); if (res.err > 0) return -EIO; return res.err; @@ -1261,7 +1313,7 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) u32 data; int res; - if (tb_switch_is_icm(sw)) + if (tb_switch_is_icm(sw) || tb_switch_is_usb4(sw)) return 0; sw->config.plug_events_delay = 0xff; @@ -1269,10 +1321,6 @@ static int tb_plug_events_active(struct tb_switch *sw, bool active) if (res) return res; - /* Plug events are always enabled in USB4 */ - if (tb_switch_is_usb4(sw)) - return 0; - res = tb_sw_read(sw, &data, TB_CFG_SWITCH, sw->cap_plug_events + 1, 1); if (res) return res; @@ -1649,7 +1697,7 @@ static struct attribute *switch_attrs[] = { static umode_t switch_attr_is_visible(struct kobject *kobj, struct attribute *attr, int n) { - struct device *dev = container_of(kobj, struct device, kobj); + struct device *dev = kobj_to_dev(kobj); struct tb_switch *sw = tb_to_switch(dev); if (attr == &dev_attr_device.attr) { @@ -1988,7 +2036,7 @@ int tb_switch_configure(struct tb_switch *sw) route = tb_route(sw); tb_dbg(tb, "%s Switch at %#llx (depth: %d, up port: %d)\n", - sw->config.enabled ? "restoring " : "initializing", route, + sw->config.enabled ? "restoring" : "initializing", route, tb_route_length(route), sw->config.upstream_port_number); sw->config.enabled = 1; @@ -2008,10 +2056,6 @@ int tb_switch_configure(struct tb_switch *sw) return ret; ret = usb4_switch_setup(sw); - if (ret) - return ret; - - ret = usb4_switch_configure_link(sw); } else { if (sw->config.vendor_id != PCI_VENDOR_ID_INTEL) tb_sw_warn(sw, "unknown switch vendor id %#x\n", @@ -2025,10 +2069,6 @@ int tb_switch_configure(struct tb_switch *sw) /* Enumerate the switch */ ret = tb_sw_write(sw, (u32 *)&sw->config + 1, TB_CFG_SWITCH, ROUTER_CS_1, 3); - if (ret) - return ret; - - ret = tb_lc_configure_link(sw); } if (ret) return ret; @@ -2312,6 +2352,69 @@ void tb_switch_lane_bonding_disable(struct tb_switch *sw) } /** + * tb_switch_configure_link() - Set link configured + * @sw: Switch whose link is configured + * + * Sets the link upstream from @sw configured (from both ends) so that + * it will not be disconnected when the domain exits sleep. Can be + * called for any switch. + * + * It is recommended that this is called after lane bonding is enabled. + * + * Returns %0 on success and negative errno in case of error. + */ +int tb_switch_configure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + int ret; + + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return 0; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + ret = usb4_port_configure(up); + else + ret = tb_lc_configure_port(up); + if (ret) + return ret; + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + return usb4_port_configure(down); + return tb_lc_configure_port(down); +} + +/** + * tb_switch_unconfigure_link() - Unconfigure link + * @sw: Switch whose link is unconfigured + * + * Sets the link unconfigured so the @sw will be disconnected if the + * domain exists sleep. + */ +void tb_switch_unconfigure_link(struct tb_switch *sw) +{ + struct tb_port *up, *down; + + if (sw->is_unplugged) + return; + if (!tb_route(sw) || tb_switch_is_icm(sw)) + return; + + up = tb_upstream_port(sw); + if (tb_switch_is_usb4(up->sw)) + usb4_port_unconfigure(up); + else + tb_lc_unconfigure_port(up); + + down = up->remote; + if (tb_switch_is_usb4(down->sw)) + usb4_port_unconfigure(down); + else + tb_lc_unconfigure_port(down); +} + +/** * tb_switch_add() - Add a switch to the domain * @sw: Switch to add * @@ -2399,6 +2502,13 @@ int tb_switch_add(struct tb_switch *sw) return ret; } + /* + * Thunderbolt routers do not generate wakeups themselves but + * they forward wakeups from tunneled protocols, so enable it + * here. + */ + device_init_wakeup(&sw->dev, true); + pm_runtime_set_active(&sw->dev); if (sw->rpm) { pm_runtime_set_autosuspend_delay(&sw->dev, TB_AUTOSUSPEND_DELAY); @@ -2408,6 +2518,7 @@ int tb_switch_add(struct tb_switch *sw) pm_request_autosuspend(&sw->dev); } + tb_switch_debugfs_init(sw); return 0; } @@ -2423,6 +2534,8 @@ void tb_switch_remove(struct tb_switch *sw) { struct tb_port *port; + tb_switch_debugfs_remove(sw); + if (sw->rpm) { pm_runtime_get_sync(&sw->dev); pm_runtime_disable(&sw->dev); @@ -2445,11 +2558,6 @@ void tb_switch_remove(struct tb_switch *sw) if (!sw->is_unplugged) tb_plug_events_active(sw, false); - if (tb_switch_is_usb4(sw)) - usb4_switch_unconfigure_link(sw); - else - tb_lc_unconfigure_link(sw); - tb_switch_nvm_remove(sw); if (tb_route(sw)) @@ -2481,6 +2589,18 @@ void tb_sw_set_unplugged(struct tb_switch *sw) } } +static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags) +{ + if (flags) + tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags); + else + tb_sw_dbg(sw, "disabling wakeup\n"); + + if (tb_switch_is_usb4(sw)) + return usb4_switch_set_wake(sw, flags); + return tb_lc_set_wake(sw, flags); +} + int tb_switch_resume(struct tb_switch *sw) { struct tb_port *port; @@ -2526,6 +2646,13 @@ int tb_switch_resume(struct tb_switch *sw) if (err) return err; + /* Disable wakes */ + tb_switch_set_wake(sw, 0); + + err = tb_switch_tmu_init(sw); + if (err) + return err; + /* check for surviving downstream switches */ tb_switch_for_each_port(sw, port) { if (!tb_port_has_remote(port) && !port->xdomain) @@ -2555,20 +2682,43 @@ int tb_switch_resume(struct tb_switch *sw) return 0; } -void tb_switch_suspend(struct tb_switch *sw) +/** + * tb_switch_suspend() - Put a switch to sleep + * @sw: Switch to suspend + * @runtime: Is this runtime suspend or system sleep + * + * Suspends router and all its children. Enables wakes according to + * value of @runtime and then sets sleep bit for the router. If @sw is + * host router the domain is ready to go to sleep once this function + * returns. + */ +void tb_switch_suspend(struct tb_switch *sw, bool runtime) { + unsigned int flags = 0; struct tb_port *port; int err; + tb_sw_dbg(sw, "suspending switch\n"); + err = tb_plug_events_active(sw, false); if (err) return; tb_switch_for_each_port(sw, port) { if (tb_port_has_remote(port)) - tb_switch_suspend(port->remote->sw); + tb_switch_suspend(port->remote->sw, runtime); } + if (runtime) { + /* Trigger wake when something is plugged in/out */ + flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT; + flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; + } else if (device_may_wakeup(&sw->dev)) { + flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE; + } + + tb_switch_set_wake(sw, flags); + if (tb_switch_is_usb4(sw)) usb4_switch_set_sleep(sw); else diff --git a/drivers/thunderbolt/tb.c b/drivers/thunderbolt/tb.c index f507815040eb..214fbc92c1b7 100644 --- a/drivers/thunderbolt/tb.c +++ b/drivers/thunderbolt/tb.c @@ -9,6 +9,7 @@ #include <linux/slab.h> #include <linux/errno.h> #include <linux/delay.h> +#include <linux/pm_runtime.h> #include "tb.h" #include "tb_regs.h" @@ -22,13 +23,21 @@ * events and exit if this is not set (it needs to * acquire the lock one more time). Used to drain wq * after cfg has been paused. + * @remove_work: Work used to remove any unplugged routers after + * runtime resume */ struct tb_cm { struct list_head tunnel_list; struct list_head dp_resources; bool hotplug_active; + struct delayed_work remove_work; }; +static inline struct tb *tcm_to_tb(struct tb_cm *tcm) +{ + return ((void *)tcm - sizeof(struct tb)); +} + struct tb_hotplug_event { struct work_struct work; struct tb *tb; @@ -140,6 +149,29 @@ static void tb_discover_tunnels(struct tb_switch *sw) } } +static int tb_port_configure_xdomain(struct tb_port *port) +{ + /* + * XDomain paths currently only support single lane so we must + * disable the other lane according to USB4 spec. + */ + tb_port_disable(port->dual_link_port); + + if (tb_switch_is_usb4(port->sw)) + return usb4_port_configure_xdomain(port); + return tb_lc_configure_xdomain(port); +} + +static void tb_port_unconfigure_xdomain(struct tb_port *port) +{ + if (tb_switch_is_usb4(port->sw)) + usb4_port_unconfigure_xdomain(port); + else + tb_lc_unconfigure_xdomain(port); + + tb_port_enable(port->dual_link_port); +} + static void tb_scan_xdomain(struct tb_port *port) { struct tb_switch *sw = port->sw; @@ -158,6 +190,7 @@ static void tb_scan_xdomain(struct tb_port *port) NULL); if (xd) { tb_port_at(route, sw)->xdomain = xd; + tb_port_configure_xdomain(port); tb_xdomain_add(xd); } } @@ -502,8 +535,13 @@ static void tb_scan_switch(struct tb_switch *sw) { struct tb_port *port; + pm_runtime_get_sync(&sw->dev); + tb_switch_for_each_port(sw, port) tb_scan_port(port); + + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); } /** @@ -566,6 +604,7 @@ static void tb_scan_port(struct tb_port *port) */ if (port->xdomain) { tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); port->xdomain = NULL; } @@ -577,6 +616,12 @@ static void tb_scan_port(struct tb_port *port) if (!tcm->hotplug_active) dev_set_uevent_suppress(&sw->dev, true); + /* + * At the moment Thunderbolt 2 and beyond (devices with LC) we + * can support runtime PM. + */ + sw->rpm = sw->generation > 1; + if (tb_switch_add(sw)) { tb_switch_put(sw); return; @@ -592,8 +637,9 @@ static void tb_scan_port(struct tb_port *port) } /* Enable lane bonding if supported */ - if (tb_switch_lane_bonding_enable(sw)) - tb_sw_warn(sw, "failed to enable lane bonding\n"); + tb_switch_lane_bonding_enable(sw); + /* Set the link configured */ + tb_switch_configure_link(sw); if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to enable TMU\n"); @@ -636,6 +682,11 @@ static void tb_deactivate_and_free_tunnel(struct tb_tunnel *tunnel) * deallocated properly. */ tb_switch_dealloc_dp_resource(src_port->sw, src_port); + /* Now we can allow the domain to runtime suspend again */ + pm_runtime_mark_last_busy(&dst_port->sw->dev); + pm_runtime_put_autosuspend(&dst_port->sw->dev); + pm_runtime_mark_last_busy(&src_port->sw->dev); + pm_runtime_put_autosuspend(&src_port->sw->dev); fallthrough; case TB_TUNNEL_USB3: @@ -682,6 +733,7 @@ static void tb_free_unplugged_children(struct tb_switch *sw) if (port->remote->sw->is_unplugged) { tb_retimer_remove_all(port); tb_remove_dp_resources(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -821,9 +873,20 @@ static void tb_tunnel_dp(struct tb *tb) return; } + /* + * DP stream needs the domain to be active so runtime resume + * both ends of the tunnel. + * + * This should bring the routers in the middle active as well + * and keeps the domain from runtime suspending while the DP + * tunnel is active. + */ + pm_runtime_get_sync(&in->sw->dev); + pm_runtime_get_sync(&out->sw->dev); + if (tb_switch_alloc_dp_resource(in->sw, in)) { tb_port_dbg(in, "no resource available for DP IN, not tunneling\n"); - return; + goto err_rpm_put; } /* Make all unused USB3 bandwidth available for the new DP tunnel */ @@ -862,6 +925,11 @@ err_reclaim: tb_reclaim_usb3_bandwidth(tb, in, out); err_dealloc_dp: tb_switch_dealloc_dp_resource(in->sw, in); +err_rpm_put: + pm_runtime_mark_last_busy(&out->sw->dev); + pm_runtime_put_autosuspend(&out->sw->dev); + pm_runtime_mark_last_busy(&in->sw->dev); + pm_runtime_put_autosuspend(&in->sw->dev); } static void tb_dp_resource_unavailable(struct tb *tb, struct tb_port *port) @@ -911,6 +979,29 @@ static void tb_dp_resource_available(struct tb *tb, struct tb_port *port) tb_tunnel_dp(tb); } +static void tb_disconnect_and_release_dp(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + /* + * Tear down all DP tunnels and release their resources. They + * will be re-established after resume based on plug events. + */ + list_for_each_entry_safe_reverse(tunnel, n, &tcm->tunnel_list, list) { + if (tb_tunnel_is_dp(tunnel)) + tb_deactivate_and_free_tunnel(tunnel); + } + + while (!list_empty(&tcm->dp_resources)) { + struct tb_port *port; + + port = list_first_entry(&tcm->dp_resources, + struct tb_port, list); + list_del_init(&port->list); + } +} + static int tb_tunnel_pci(struct tb *tb, struct tb_switch *sw) { struct tb_port *up, *down, *port; @@ -1022,6 +1113,10 @@ static void tb_handle_hotplug(struct work_struct *work) struct tb_cm *tcm = tb_priv(tb); struct tb_switch *sw; struct tb_port *port; + + /* Bring the domain back from sleep if it was suspended */ + pm_runtime_get_sync(&tb->dev); + mutex_lock(&tb->lock); if (!tcm->hotplug_active) goto out; /* during init, suspend or shutdown */ @@ -1045,6 +1140,9 @@ static void tb_handle_hotplug(struct work_struct *work) ev->route, ev->port, ev->unplug); goto put_sw; } + + pm_runtime_get_sync(&sw->dev); + if (ev->unplug) { tb_retimer_remove_all(port); @@ -1054,6 +1152,7 @@ static void tb_handle_hotplug(struct work_struct *work) tb_free_invalid_tunnels(tb); tb_remove_dp_resources(port->remote->sw); tb_switch_tmu_disable(port->remote->sw); + tb_switch_unconfigure_link(port->remote->sw); tb_switch_lane_bonding_disable(port->remote->sw); tb_switch_remove(port->remote->sw); port->remote = NULL; @@ -1077,6 +1176,7 @@ static void tb_handle_hotplug(struct work_struct *work) port->xdomain = NULL; __tb_disconnect_xdomain_paths(tb, xd); tb_xdomain_put(xd); + tb_port_unconfigure_xdomain(port); } else if (tb_port_is_dpout(port) || tb_port_is_dpin(port)) { tb_dp_resource_unavailable(tb, port); } else { @@ -1096,10 +1196,17 @@ static void tb_handle_hotplug(struct work_struct *work) } } + pm_runtime_mark_last_busy(&sw->dev); + pm_runtime_put_autosuspend(&sw->dev); + put_sw: tb_switch_put(sw); out: mutex_unlock(&tb->lock); + + pm_runtime_mark_last_busy(&tb->dev); + pm_runtime_put_autosuspend(&tb->dev); + kfree(ev); } @@ -1135,6 +1242,7 @@ static void tb_stop(struct tb *tb) struct tb_tunnel *tunnel; struct tb_tunnel *n; + cancel_delayed_work(&tcm->remove_work); /* tunnels are only present after everything has been initialized */ list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) { /* @@ -1186,6 +1294,8 @@ static int tb_start(struct tb *tb) * root switch. */ tb->root_switch->no_nvm_upgrade = true; + /* All USB4 routers support runtime PM */ + tb->root_switch->rpm = tb_switch_is_usb4(tb->root_switch); ret = tb_switch_configure(tb->root_switch); if (ret) { @@ -1227,7 +1337,8 @@ static int tb_suspend_noirq(struct tb *tb) struct tb_cm *tcm = tb_priv(tb); tb_dbg(tb, "suspending...\n"); - tb_switch_suspend(tb->root_switch); + tb_disconnect_and_release_dp(tb); + tb_switch_suspend(tb->root_switch, false); tcm->hotplug_active = false; /* signal tb_handle_hotplug to quit */ tb_dbg(tb, "suspend finished\n"); @@ -1238,17 +1349,25 @@ static void tb_restore_children(struct tb_switch *sw) { struct tb_port *port; + /* No need to restore if the router is already unplugged */ + if (sw->is_unplugged) + return; + if (tb_enable_tmu(sw)) tb_sw_warn(sw, "failed to restore TMU configuration\n"); tb_switch_for_each_port(sw, port) { - if (!tb_port_has_remote(port)) + if (!tb_port_has_remote(port) && !port->xdomain) continue; - if (tb_switch_lane_bonding_enable(port->remote->sw)) - dev_warn(&sw->dev, "failed to restore lane bonding\n"); + if (port->remote) { + tb_switch_lane_bonding_enable(port->remote->sw); + tb_switch_configure_link(port->remote->sw); - tb_restore_children(port->remote->sw); + tb_restore_children(port->remote->sw); + } else if (port->xdomain) { + tb_port_configure_xdomain(port); + } } } @@ -1260,7 +1379,7 @@ static int tb_resume_noirq(struct tb *tb) tb_dbg(tb, "resuming...\n"); /* remove any pci devices the firmware might have setup */ - tb_switch_reset(tb, 0); + tb_switch_reset(tb->root_switch); tb_switch_resume(tb->root_switch); tb_free_invalid_tunnels(tb); @@ -1294,6 +1413,7 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw) if (port->xdomain && port->xdomain->is_unplugged) { tb_retimer_remove_all(port); tb_xdomain_remove(port->xdomain); + tb_port_unconfigure_xdomain(port); port->xdomain = NULL; ret++; } else if (port->remote) { @@ -1304,6 +1424,22 @@ static int tb_free_unplugged_xdomains(struct tb_switch *sw) return ret; } +static int tb_freeze_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = false; + return 0; +} + +static int tb_thaw_noirq(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + tcm->hotplug_active = true; + return 0; +} + static void tb_complete(struct tb *tb) { /* @@ -1317,12 +1453,64 @@ static void tb_complete(struct tb *tb) mutex_unlock(&tb->lock); } +static int tb_runtime_suspend(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + + mutex_lock(&tb->lock); + tb_switch_suspend(tb->root_switch, true); + tcm->hotplug_active = false; + mutex_unlock(&tb->lock); + + return 0; +} + +static void tb_remove_work(struct work_struct *work) +{ + struct tb_cm *tcm = container_of(work, struct tb_cm, remove_work.work); + struct tb *tb = tcm_to_tb(tcm); + + mutex_lock(&tb->lock); + if (tb->root_switch) { + tb_free_unplugged_children(tb->root_switch); + tb_free_unplugged_xdomains(tb->root_switch); + } + mutex_unlock(&tb->lock); +} + +static int tb_runtime_resume(struct tb *tb) +{ + struct tb_cm *tcm = tb_priv(tb); + struct tb_tunnel *tunnel, *n; + + mutex_lock(&tb->lock); + tb_switch_resume(tb->root_switch); + tb_free_invalid_tunnels(tb); + tb_restore_children(tb->root_switch); + list_for_each_entry_safe(tunnel, n, &tcm->tunnel_list, list) + tb_tunnel_restart(tunnel); + tcm->hotplug_active = true; + mutex_unlock(&tb->lock); + + /* + * Schedule cleanup of any unplugged devices. Run this in a + * separate thread to avoid possible deadlock if the device + * removal runtime resumes the unplugged device. + */ + queue_delayed_work(tb->wq, &tcm->remove_work, msecs_to_jiffies(50)); + return 0; +} + static const struct tb_cm_ops tb_cm_ops = { .start = tb_start, .stop = tb_stop, .suspend_noirq = tb_suspend_noirq, .resume_noirq = tb_resume_noirq, + .freeze_noirq = tb_freeze_noirq, + .thaw_noirq = tb_thaw_noirq, .complete = tb_complete, + .runtime_suspend = tb_runtime_suspend, + .runtime_resume = tb_runtime_resume, .handle_event = tb_handle_event, .approve_switch = tb_tunnel_pci, .approve_xdomain_paths = tb_approve_xdomain_paths, @@ -1344,6 +1532,7 @@ struct tb *tb_probe(struct tb_nhi *nhi) tcm = tb_priv(tb); INIT_LIST_HEAD(&tcm->tunnel_list); INIT_LIST_HEAD(&tcm->dp_resources); + INIT_DELAYED_WORK(&tcm->remove_work, tb_remove_work); return tb; } diff --git a/drivers/thunderbolt/tb.h b/drivers/thunderbolt/tb.h index 3c620a9203c5..a9995e21b916 100644 --- a/drivers/thunderbolt/tb.h +++ b/drivers/thunderbolt/tb.h @@ -125,6 +125,7 @@ struct tb_switch_tmu { * @rpm: The switch supports runtime PM * @authorized: Whether the switch is authorized by user or policy * @security_level: Switch supported security level + * @debugfs_dir: Pointer to the debugfs structure * @key: Contains the key used to challenge the device or %NULL if not * supported. Size of the key is %TB_SWITCH_KEY_SIZE. * @connection_id: Connection ID used with ICM messaging @@ -166,6 +167,7 @@ struct tb_switch { bool rpm; unsigned int authorized; enum tb_security_level security_level; + struct dentry *debugfs_dir; u8 *key; u8 connection_id; u8 connection_key; @@ -333,6 +335,13 @@ struct tb_path { */ #define TB_PATH_MAX_HOPS (7 * 2) +/* Possible wake types */ +#define TB_WAKE_ON_CONNECT BIT(0) +#define TB_WAKE_ON_DISCONNECT BIT(1) +#define TB_WAKE_ON_USB4 BIT(2) +#define TB_WAKE_ON_USB3 BIT(3) +#define TB_WAKE_ON_PCIE BIT(4) + /** * struct tb_cm_ops - Connection manager specific operations vector * @driver_ready: Called right after control channel is started. Used by @@ -342,6 +351,8 @@ struct tb_path { * @suspend_noirq: Connection manager specific suspend_noirq * @resume_noirq: Connection manager specific resume_noirq * @suspend: Connection manager specific suspend + * @freeze_noirq: Connection manager specific freeze_noirq + * @thaw_noirq: Connection manager specific thaw_noirq * @complete: Connection manager specific complete * @runtime_suspend: Connection manager specific runtime_suspend * @runtime_resume: Connection manager specific runtime_resume @@ -364,6 +375,8 @@ struct tb_cm_ops { int (*suspend_noirq)(struct tb *tb); int (*resume_noirq)(struct tb *tb); int (*suspend)(struct tb *tb); + int (*freeze_noirq)(struct tb *tb); + int (*thaw_noirq)(struct tb *tb); void (*complete)(struct tb *tb); int (*runtime_suspend)(struct tb *tb); int (*runtime_resume)(struct tb *tb); @@ -457,6 +470,11 @@ static inline bool tb_port_is_null(const struct tb_port *port) return port && port->port && port->config.type == TB_TYPE_PORT; } +static inline bool tb_port_is_nhi(const struct tb_port *port) +{ + return port && port->config.type == TB_TYPE_NHI; +} + static inline bool tb_port_is_pcie_down(const struct tb_port *port) { return port && port->config.type == TB_TYPE_PCIE_DOWN; @@ -593,6 +611,8 @@ void tb_domain_remove(struct tb *tb); int tb_domain_suspend_noirq(struct tb *tb); int tb_domain_resume_noirq(struct tb *tb); int tb_domain_suspend(struct tb *tb); +int tb_domain_freeze_noirq(struct tb *tb); +int tb_domain_thaw_noirq(struct tb *tb); void tb_domain_complete(struct tb *tb); int tb_domain_runtime_suspend(struct tb *tb); int tb_domain_runtime_resume(struct tb *tb); @@ -632,9 +652,9 @@ struct tb_switch *tb_switch_alloc_safe_mode(struct tb *tb, int tb_switch_configure(struct tb_switch *sw); int tb_switch_add(struct tb_switch *sw); void tb_switch_remove(struct tb_switch *sw); -void tb_switch_suspend(struct tb_switch *sw); +void tb_switch_suspend(struct tb_switch *sw, bool runtime); int tb_switch_resume(struct tb_switch *sw); -int tb_switch_reset(struct tb *tb, u64 route); +int tb_switch_reset(struct tb_switch *sw); void tb_sw_set_unplugged(struct tb_switch *sw); struct tb_port *tb_switch_find_port(struct tb_switch *sw, enum tb_port_type type); @@ -685,59 +705,89 @@ static inline struct tb_switch *tb_switch_parent(struct tb_switch *sw) static inline bool tb_switch_is_light_ridge(const struct tb_switch *sw) { - return sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_LIGHT_RIDGE; } static inline bool tb_switch_is_eagle_ridge(const struct tb_switch *sw) { - return sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; + return sw->config.vendor_id == PCI_VENDOR_ID_INTEL && + sw->config.device_id == PCI_DEVICE_ID_INTEL_EAGLE_RIDGE; } static inline bool tb_switch_is_cactus_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: - case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_2C: + case PCI_DEVICE_ID_INTEL_CACTUS_RIDGE_4C: + return true; + } } + return false; } static inline bool tb_switch_is_falcon_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_FALCON_RIDGE_4C_BRIDGE: + return true; + } } + return false; } static inline bool tb_switch_is_alpine_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: - case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_LP_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_ALPINE_RIDGE_C_2C_BRIDGE: + return true; + } } + return false; } static inline bool tb_switch_is_titan_ridge(const struct tb_switch *sw) { - switch (sw->config.device_id) { - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: - case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: - return true; - default: - return false; + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_2C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_4C_BRIDGE: + case PCI_DEVICE_ID_INTEL_TITAN_RIDGE_DD_BRIDGE: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_ice_lake(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_ICL_NHI0: + case PCI_DEVICE_ID_INTEL_ICL_NHI1: + return true; + } + } + return false; +} + +static inline bool tb_switch_is_tiger_lake(const struct tb_switch *sw) +{ + if (sw->config.vendor_id == PCI_VENDOR_ID_INTEL) { + switch (sw->config.device_id) { + case PCI_DEVICE_ID_INTEL_TGL_NHI0: + case PCI_DEVICE_ID_INTEL_TGL_NHI1: + return true; + } } + return false; } /** @@ -767,6 +817,8 @@ static inline bool tb_switch_is_icm(const struct tb_switch *sw) int tb_switch_lane_bonding_enable(struct tb_switch *sw); void tb_switch_lane_bonding_disable(struct tb_switch *sw); +int tb_switch_configure_link(struct tb_switch *sw); +void tb_switch_unconfigure_link(struct tb_switch *sw); bool tb_switch_query_dp_resource(struct tb_switch *sw, struct tb_port *in); int tb_switch_alloc_dp_resource(struct tb_switch *sw, struct tb_port *in); @@ -788,6 +840,8 @@ int tb_port_add_nfc_credits(struct tb_port *port, int credits); int tb_port_set_initial_credits(struct tb_port *port, u32 credits); int tb_port_clear_counter(struct tb_port *port, int counter); int tb_port_unlock(struct tb_port *port); +int tb_port_enable(struct tb_port *port); +int tb_port_disable(struct tb_port *port); int tb_port_alloc_in_hopid(struct tb_port *port, int hopid, int max_hopid); void tb_port_release_in_hopid(struct tb_port *port, int hopid); int tb_port_alloc_out_hopid(struct tb_port *port, int hopid, int max_hopid); @@ -811,7 +865,9 @@ int tb_port_get_link_speed(struct tb_port *port); int tb_switch_find_vse_cap(struct tb_switch *sw, enum tb_switch_vse_cap vsec); int tb_switch_find_cap(struct tb_switch *sw, enum tb_switch_cap cap); +int tb_switch_next_cap(struct tb_switch *sw, unsigned int offset); int tb_port_find_cap(struct tb_port *port, enum tb_port_cap cap); +int tb_port_next_cap(struct tb_port *port, unsigned int offset); bool tb_port_is_enabled(struct tb_port *port); bool tb_usb3_port_is_enabled(struct tb_port *port); @@ -844,8 +900,11 @@ int tb_drom_read(struct tb_switch *sw); int tb_drom_read_uid_only(struct tb_switch *sw, u64 *uid); int tb_lc_read_uuid(struct tb_switch *sw, u32 *uuid); -int tb_lc_configure_link(struct tb_switch *sw); -void tb_lc_unconfigure_link(struct tb_switch *sw); +int tb_lc_configure_port(struct tb_port *port); +void tb_lc_unconfigure_port(struct tb_port *port); +int tb_lc_configure_xdomain(struct tb_port *port); +void tb_lc_unconfigure_xdomain(struct tb_port *port); +int tb_lc_set_wake(struct tb_switch *sw, unsigned int flags); int tb_lc_set_sleep(struct tb_switch *sw); bool tb_lc_lane_bonding_possible(struct tb_switch *sw); bool tb_lc_dp_sink_query(struct tb_switch *sw, struct tb_port *in); @@ -900,9 +959,8 @@ int usb4_switch_setup(struct tb_switch *sw); int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid); int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, size_t size); -int usb4_switch_configure_link(struct tb_switch *sw); -void usb4_switch_unconfigure_link(struct tb_switch *sw); bool usb4_switch_lane_bonding_possible(struct tb_switch *sw); +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags); int usb4_switch_set_sleep(struct tb_switch *sw); int usb4_switch_nvm_sector_size(struct tb_switch *sw); int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf, @@ -919,6 +977,10 @@ struct tb_port *usb4_switch_map_usb3_down(struct tb_switch *sw, const struct tb_port *port); int usb4_port_unlock(struct tb_port *port); +int usb4_port_configure(struct tb_port *port); +void usb4_port_unconfigure(struct tb_port *port); +int usb4_port_configure_xdomain(struct tb_port *port); +void usb4_port_unconfigure_xdomain(struct tb_port *port); int usb4_port_enumerate_retimers(struct tb_port *port); int usb4_port_retimer_read(struct tb_port *port, u8 index, u8 reg, void *buf, @@ -945,9 +1007,35 @@ int usb4_usb3_port_allocate_bandwidth(struct tb_port *port, int *upstream_bw, int usb4_usb3_port_release_bandwidth(struct tb_port *port, int *upstream_bw, int *downstream_bw); -/* keep link controller awake during update */ +/* Keep link controller awake during update */ #define QUIRK_FORCE_POWER_LINK_CONTROLLER BIT(0) void tb_check_quirks(struct tb_switch *sw); +#ifdef CONFIG_ACPI +void tb_acpi_add_links(struct tb_nhi *nhi); +#else +static inline void tb_acpi_add_links(struct tb_nhi *nhi) { } +#endif + +#ifdef CONFIG_DEBUG_FS +void tb_debugfs_init(void); +void tb_debugfs_exit(void); +void tb_switch_debugfs_init(struct tb_switch *sw); +void tb_switch_debugfs_remove(struct tb_switch *sw); +#else +static inline void tb_debugfs_init(void) { } +static inline void tb_debugfs_exit(void) { } +static inline void tb_switch_debugfs_init(struct tb_switch *sw) { } +static inline void tb_switch_debugfs_remove(struct tb_switch *sw) { } +#endif + +#ifdef CONFIG_USB4_KUNIT_TEST +int tb_test_init(void); +void tb_test_exit(void); +#else +static inline int tb_test_init(void) { return 0; } +static inline void tb_test_exit(void) { } +#endif + #endif diff --git a/drivers/thunderbolt/tb_msgs.h b/drivers/thunderbolt/tb_msgs.h index fc208c567953..0e01dbc63e72 100644 --- a/drivers/thunderbolt/tb_msgs.h +++ b/drivers/thunderbolt/tb_msgs.h @@ -28,6 +28,7 @@ enum tb_cfg_error { TB_CFG_ERROR_LOOP = 8, TB_CFG_ERROR_HEC_ERROR_DETECTED = 12, TB_CFG_ERROR_FLOW_CONTROL_ERROR = 13, + TB_CFG_ERROR_LOCK = 15, }; /* common header */ diff --git a/drivers/thunderbolt/tb_regs.h b/drivers/thunderbolt/tb_regs.h index fd4fc144d17f..e7d9529822fa 100644 --- a/drivers/thunderbolt/tb_regs.h +++ b/drivers/thunderbolt/tb_regs.h @@ -39,6 +39,7 @@ enum tb_switch_vse_cap { enum tb_port_cap { TB_PORT_CAP_PHY = 0x01, + TB_PORT_CAP_POWER = 0x02, TB_PORT_CAP_TIME1 = 0x03, TB_PORT_CAP_ADAP = 0x04, TB_PORT_CAP_VSE = 0x05, @@ -93,6 +94,20 @@ struct tb_cap_extended_long { u16 length; } __packed; +/** + * struct tb_cap_any - Structure capable of hold every capability + * @basic: Basic capability + * @extended_short: Vendor specific capability + * @extended_long: Vendor specific extended capability + */ +struct tb_cap_any { + union { + struct tb_cap_basic basic; + struct tb_cap_extended_short extended_short; + struct tb_cap_extended_long extended_long; + }; +} __packed; + /* capabilities */ struct tb_cap_link_controller { @@ -178,6 +193,8 @@ struct tb_regs_switch_header { #define ROUTER_CS_4 0x04 #define ROUTER_CS_5 0x05 #define ROUTER_CS_5_SLP BIT(0) +#define ROUTER_CS_5_WOP BIT(1) +#define ROUTER_CS_5_WOU BIT(2) #define ROUTER_CS_5_C3S BIT(23) #define ROUTER_CS_5_PTO BIT(24) #define ROUTER_CS_5_UTO BIT(25) @@ -186,6 +203,8 @@ struct tb_regs_switch_header { #define ROUTER_CS_6 0x06 #define ROUTER_CS_6_SLPR BIT(0) #define ROUTER_CS_6_TNS BIT(1) +#define ROUTER_CS_6_WOPS BIT(2) +#define ROUTER_CS_6_WOUS BIT(3) #define ROUTER_CS_6_HCI BIT(18) #define ROUTER_CS_6_CR BIT(25) #define ROUTER_CS_7 0x07 @@ -234,7 +253,8 @@ struct tb_regs_port_header { /* DWORD 1 */ u32 first_cap_offset:8; u32 max_counters:11; - u32 __unknown1:5; + u32 counters_support:1; + u32 __unknown1:4; u32 revision:8; /* DWORD 2 */ enum tb_port_type type:24; @@ -279,6 +299,7 @@ struct tb_regs_port_header { #define LANE_ADP_CS_1_TARGET_WIDTH_SHIFT 4 #define LANE_ADP_CS_1_TARGET_WIDTH_SINGLE 0x1 #define LANE_ADP_CS_1_TARGET_WIDTH_DUAL 0x3 +#define LANE_ADP_CS_1_LD BIT(14) #define LANE_ADP_CS_1_LB BIT(15) #define LANE_ADP_CS_1_CURRENT_SPEED_MASK GENMASK(19, 16) #define LANE_ADP_CS_1_CURRENT_SPEED_SHIFT 16 @@ -301,8 +322,13 @@ struct tb_regs_port_header { #define PORT_CS_18 0x12 #define PORT_CS_18_BE BIT(8) #define PORT_CS_18_TCM BIT(9) +#define PORT_CS_18_WOU4S BIT(18) #define PORT_CS_19 0x13 #define PORT_CS_19_PC BIT(3) +#define PORT_CS_19_PID BIT(4) +#define PORT_CS_19_WOC BIT(16) +#define PORT_CS_19_WOD BIT(17) +#define PORT_CS_19_WOU4 BIT(18) /* Display Port adapter registers */ #define ADP_DP_CS_0 0x00 @@ -416,8 +442,14 @@ struct tb_regs_hop { #define TB_LC_PORT_ATTR_BE BIT(12) #define TB_LC_SX_CTRL 0x96 +#define TB_LC_SX_CTRL_WOC BIT(1) +#define TB_LC_SX_CTRL_WOD BIT(2) +#define TB_LC_SX_CTRL_WOU4 BIT(5) +#define TB_LC_SX_CTRL_WOP BIT(6) #define TB_LC_SX_CTRL_L1C BIT(16) +#define TB_LC_SX_CTRL_L1D BIT(17) #define TB_LC_SX_CTRL_L2C BIT(20) +#define TB_LC_SX_CTRL_L2D BIT(21) #define TB_LC_SX_CTRL_UPSTREAM BIT(30) #define TB_LC_SX_CTRL_SLP BIT(31) diff --git a/drivers/thunderbolt/test.c b/drivers/thunderbolt/test.c index a4d78811f7e2..464c2d37b992 100644 --- a/drivers/thunderbolt/test.c +++ b/drivers/thunderbolt/test.c @@ -1623,4 +1623,15 @@ static struct kunit_suite tb_test_suite = { .name = "thunderbolt", .test_cases = tb_test_cases, }; -kunit_test_suite(tb_test_suite); + +static struct kunit_suite *tb_test_suites[] = { &tb_test_suite, NULL }; + +int tb_test_init(void) +{ + return __kunit_test_suites_init(tb_test_suites); +} + +void tb_test_exit(void) +{ + return __kunit_test_suites_exit(tb_test_suites); +} diff --git a/drivers/thunderbolt/usb4.c b/drivers/thunderbolt/usb4.c index 2b8355e6b65f..40f13579a3fe 100644 --- a/drivers/thunderbolt/usb4.c +++ b/drivers/thunderbolt/usb4.c @@ -196,6 +196,46 @@ static int usb4_switch_op(struct tb_switch *sw, u16 opcode, u8 *status) return 0; } +static void usb4_switch_check_wakes(struct tb_switch *sw) +{ + struct tb_port *port; + bool wakeup = false; + u32 val; + + if (!device_may_wakeup(&sw->dev)) + return; + + if (tb_route(sw)) { + if (tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_6, 1)) + return; + + tb_sw_dbg(sw, "PCIe wake: %s, USB3 wake: %s\n", + (val & ROUTER_CS_6_WOPS) ? "yes" : "no", + (val & ROUTER_CS_6_WOUS) ? "yes" : "no"); + + wakeup = val & (ROUTER_CS_6_WOPS | ROUTER_CS_6_WOUS); + } + + /* Check for any connected downstream ports for USB4 wake */ + tb_switch_for_each_port(sw, port) { + if (!tb_port_has_remote(port)) + continue; + + if (tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_18, 1)) + break; + + tb_port_dbg(port, "USB4 wake: %s\n", + (val & PORT_CS_18_WOU4S) ? "yes" : "no"); + + if (val & PORT_CS_18_WOU4S) + wakeup = true; + } + + if (wakeup) + pm_wakeup_event(&sw->dev, 0); +} + static bool link_is_usb4(struct tb_port *port) { u32 val; @@ -229,6 +269,8 @@ int usb4_switch_setup(struct tb_switch *sw) u32 val = 0; int ret; + usb4_switch_check_wakes(sw); + if (!tb_route(sw)) return 0; @@ -338,87 +380,99 @@ int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf, usb4_switch_drom_read_block, sw); } -static int usb4_set_port_configured(struct tb_port *port, bool configured) +/** + * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding + * @sw: USB4 router + * + * Checks whether conditions are met so that lane bonding can be + * established with the upstream router. Call only for device routers. + */ +bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) { + struct tb_port *up; int ret; u32 val; - ret = tb_port_read(port, &val, TB_CFG_PORT, - port->cap_usb4 + PORT_CS_19, 1); + up = tb_upstream_port(sw); + ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); if (ret) - return ret; - - if (configured) - val |= PORT_CS_19_PC; - else - val &= ~PORT_CS_19_PC; + return false; - return tb_port_write(port, &val, TB_CFG_PORT, - port->cap_usb4 + PORT_CS_19, 1); + return !!(val & PORT_CS_18_BE); } /** - * usb4_switch_configure_link() - Set upstream USB4 link configured + * usb4_switch_set_wake() - Enabled/disable wake * @sw: USB4 router + * @flags: Wakeup flags (%0 to disable) * - * Sets the upstream USB4 link to be configured for power management - * purposes. + * Enables/disables router to wake up from sleep. */ -int usb4_switch_configure_link(struct tb_switch *sw) +int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags) { - struct tb_port *up; + struct tb_port *port; + u64 route = tb_route(sw); + u32 val; + int ret; - if (!tb_route(sw)) - return 0; + /* + * Enable wakes coming from all USB4 downstream ports (from + * child routers). For device routers do this also for the + * upstream USB4 port. + */ + tb_switch_for_each_port(sw, port) { + if (!route && tb_is_upstream_port(port)) + continue; - up = tb_upstream_port(sw); - return usb4_set_port_configured(up, true); -} + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; -/** - * usb4_switch_unconfigure_link() - Un-set upstream USB4 link configuration - * @sw: USB4 router - * - * Reverse of usb4_switch_configure_link(). - */ -void usb4_switch_unconfigure_link(struct tb_switch *sw) -{ - struct tb_port *up; + val &= ~(PORT_CS_19_WOC | PORT_CS_19_WOD | PORT_CS_19_WOU4); - if (sw->is_unplugged || !tb_route(sw)) - return; + if (flags & TB_WAKE_ON_CONNECT) + val |= PORT_CS_19_WOC; + if (flags & TB_WAKE_ON_DISCONNECT) + val |= PORT_CS_19_WOD; + if (flags & TB_WAKE_ON_USB4) + val |= PORT_CS_19_WOU4; - up = tb_upstream_port(sw); - usb4_set_port_configured(up, false); -} + ret = tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + } -/** - * usb4_switch_lane_bonding_possible() - Are conditions met for lane bonding - * @sw: USB4 router - * - * Checks whether conditions are met so that lane bonding can be - * established with the upstream router. Call only for device routers. - */ -bool usb4_switch_lane_bonding_possible(struct tb_switch *sw) -{ - struct tb_port *up; - int ret; - u32 val; + /* + * Enable wakes from PCIe and USB 3.x on this router. Only + * needed for device routers. + */ + if (route) { + ret = tb_sw_read(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; - up = tb_upstream_port(sw); - ret = tb_port_read(up, &val, TB_CFG_PORT, up->cap_usb4 + PORT_CS_18, 1); - if (ret) - return false; + val &= ~(ROUTER_CS_5_WOP | ROUTER_CS_5_WOU); + if (flags & TB_WAKE_ON_USB3) + val |= ROUTER_CS_5_WOU; + if (flags & TB_WAKE_ON_PCIE) + val |= ROUTER_CS_5_WOP; - return !!(val & PORT_CS_18_BE); + ret = tb_sw_write(sw, &val, TB_CFG_SWITCH, ROUTER_CS_5, 1); + if (ret) + return ret; + } + + return 0; } /** * usb4_switch_set_sleep() - Prepare the router to enter sleep * @sw: USB4 router * - * Enables wakes and sets sleep bit for the router. Returns when the - * router sleep ready bit has been asserted. + * Sets sleep bit for the router. Returns when the router sleep ready + * bit has been asserted. */ int usb4_switch_set_sleep(struct tb_switch *sw) { @@ -795,6 +849,95 @@ int usb4_port_unlock(struct tb_port *port) return tb_port_write(port, &val, TB_CFG_PORT, ADP_CS_4, 1); } +static int usb4_port_set_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PC; + else + val &= ~PORT_CS_19_PC; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure() - Set USB4 port configured + * @port: USB4 router + * + * Sets the USB4 link to be configured for power management purposes. + */ +int usb4_port_configure(struct tb_port *port) +{ + return usb4_port_set_configured(port, true); +} + +/** + * usb4_port_unconfigure() - Set USB4 port unconfigured + * @port: USB4 router + * + * Sets the USB4 link to be unconfigured for power management purposes. + */ +void usb4_port_unconfigure(struct tb_port *port) +{ + usb4_port_set_configured(port, false); +} + +static int usb4_set_xdomain_configured(struct tb_port *port, bool configured) +{ + int ret; + u32 val; + + if (!port->cap_usb4) + return -EINVAL; + + ret = tb_port_read(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); + if (ret) + return ret; + + if (configured) + val |= PORT_CS_19_PID; + else + val &= ~PORT_CS_19_PID; + + return tb_port_write(port, &val, TB_CFG_PORT, + port->cap_usb4 + PORT_CS_19, 1); +} + +/** + * usb4_port_configure_xdomain() - Configure port for XDomain + * @port: USB4 port connected to another host + * + * Marks the USB4 port as being connected to another host. Returns %0 in + * success and negative errno in failure. + */ +int usb4_port_configure_xdomain(struct tb_port *port) +{ + return usb4_set_xdomain_configured(port, true); +} + +/** + * usb4_port_unconfigure_xdomain() - Unconfigure port for XDomain + * @port: USB4 port that was connected to another host + * + * Clears USB4 port from being marked as XDomain. + */ +void usb4_port_unconfigure_xdomain(struct tb_port *port) +{ + usb4_set_xdomain_configured(port, false); +} + static int usb4_port_wait_for_bit(struct tb_port *port, u32 offset, u32 bit, u32 value, int timeout_msec) { diff --git a/drivers/usb/atm/cxacru.c b/drivers/usb/atm/cxacru.c index ea66f8f385ba..e62a770a5d3b 100644 --- a/drivers/usb/atm/cxacru.c +++ b/drivers/usb/atm/cxacru.c @@ -230,12 +230,12 @@ CXACRU__ATTR_INIT(_name) static ssize_t cxacru_sysfs_showattr_u32(u32 value, char *buf) { - return snprintf(buf, PAGE_SIZE, "%u\n", value); + return sprintf(buf, "%u\n", value); } static ssize_t cxacru_sysfs_showattr_s8(s8 value, char *buf) { - return snprintf(buf, PAGE_SIZE, "%d\n", value); + return sprintf(buf, "%d\n", value); } static ssize_t cxacru_sysfs_showattr_dB(s16 value, char *buf) @@ -255,8 +255,8 @@ static ssize_t cxacru_sysfs_showattr_bool(u32 value, char *buf) static char *str[] = { "no", "yes" }; if (unlikely(value >= ARRAY_SIZE(str))) - return snprintf(buf, PAGE_SIZE, "%u\n", value); - return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); + return sprintf(buf, "%u\n", value); + return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf) @@ -264,8 +264,8 @@ static ssize_t cxacru_sysfs_showattr_LINK(u32 value, char *buf) static char *str[] = { NULL, "not connected", "connected", "lost" }; if (unlikely(value >= ARRAY_SIZE(str) || str[value] == NULL)) - return snprintf(buf, PAGE_SIZE, "%u\n", value); - return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); + return sprintf(buf, "%u\n", value); + return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf) @@ -275,8 +275,8 @@ static ssize_t cxacru_sysfs_showattr_LINE(u32 value, char *buf) "waiting", "initialising" }; if (unlikely(value >= ARRAY_SIZE(str))) - return snprintf(buf, PAGE_SIZE, "%u\n", value); - return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); + return sprintf(buf, "%u\n", value); + return sprintf(buf, "%s\n", str[value]); } static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) @@ -288,8 +288,8 @@ static ssize_t cxacru_sysfs_showattr_MODU(u32 value, char *buf) "ITU-T G.992.2 (G.LITE)" }; if (unlikely(value >= ARRAY_SIZE(str))) - return snprintf(buf, PAGE_SIZE, "%u\n", value); - return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); + return sprintf(buf, "%u\n", value); + return sprintf(buf, "%s\n", str[value]); } /* @@ -309,8 +309,7 @@ static ssize_t mac_address_show(struct device *dev, if (instance == NULL || instance->usbatm->atm_dev == NULL) return -ENODEV; - return snprintf(buf, PAGE_SIZE, "%pM\n", - instance->usbatm->atm_dev->esi); + return sprintf(buf, "%pM\n", instance->usbatm->atm_dev->esi); } static ssize_t adsl_state_show(struct device *dev, @@ -326,8 +325,8 @@ static ssize_t adsl_state_show(struct device *dev, value = instance->card_info[CXINF_LINE_STARTABLE]; if (unlikely(value >= ARRAY_SIZE(str))) - return snprintf(buf, PAGE_SIZE, "%u\n", value); - return snprintf(buf, PAGE_SIZE, "%s\n", str[value]); + return sprintf(buf, "%u\n", value); + return sprintf(buf, "%s\n", str[value]); } static ssize_t adsl_state_store(struct device *dev, diff --git a/drivers/usb/atm/usbatm.c b/drivers/usb/atm/usbatm.c index 4e12a32ca392..56fe30d247da 100644 --- a/drivers/usb/atm/usbatm.c +++ b/drivers/usb/atm/usbatm.c @@ -511,9 +511,10 @@ static unsigned int usbatm_write_cells(struct usbatm_data *instance, ** receive ** **************/ -static void usbatm_rx_process(unsigned long data) +static void usbatm_rx_process(struct tasklet_struct *t) { - struct usbatm_data *instance = (struct usbatm_data *)data; + struct usbatm_data *instance = from_tasklet(instance, t, + rx_channel.tasklet); struct urb *urb; while ((urb = usbatm_pop_urb(&instance->rx_channel))) { @@ -564,9 +565,10 @@ static void usbatm_rx_process(unsigned long data) ** send ** ***********/ -static void usbatm_tx_process(unsigned long data) +static void usbatm_tx_process(struct tasklet_struct *t) { - struct usbatm_data *instance = (struct usbatm_data *)data; + struct usbatm_data *instance = from_tasklet(instance, t, + tx_channel.tasklet); struct sk_buff *skb = instance->current_skb; struct urb *urb = NULL; const unsigned int buf_size = instance->tx_channel.buf_size; @@ -1069,8 +1071,8 @@ int usbatm_usb_probe(struct usb_interface *intf, const struct usb_device_id *id, usbatm_init_channel(&instance->rx_channel); usbatm_init_channel(&instance->tx_channel); - tasklet_init(&instance->rx_channel.tasklet, usbatm_rx_process, (unsigned long)instance); - tasklet_init(&instance->tx_channel.tasklet, usbatm_tx_process, (unsigned long)instance); + tasklet_setup(&instance->rx_channel.tasklet, usbatm_rx_process); + tasklet_setup(&instance->tx_channel.tasklet, usbatm_tx_process); instance->rx_channel.stride = ATM_CELL_SIZE + driver->rx_padding; instance->tx_channel.stride = ATM_CELL_SIZE + driver->tx_padding; instance->rx_channel.usbatm = instance->tx_channel.usbatm = instance; diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c index 60f4711717d2..e65f1a0ae80b 100644 --- a/drivers/usb/c67x00/c67x00-sched.c +++ b/drivers/usb/c67x00/c67x00-sched.c @@ -1123,9 +1123,9 @@ static void c67x00_do_work(struct c67x00_hcd *c67x00) /* -------------------------------------------------------------------------- */ -static void c67x00_sched_tasklet(unsigned long __c67x00) +static void c67x00_sched_tasklet(struct tasklet_struct *t) { - struct c67x00_hcd *c67x00 = (struct c67x00_hcd *)__c67x00; + struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet); c67x00_do_work(c67x00); } @@ -1136,8 +1136,7 @@ void c67x00_sched_kick(struct c67x00_hcd *c67x00) int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00) { - tasklet_init(&c67x00->tasklet, c67x00_sched_tasklet, - (unsigned long)c67x00); + tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet); return 0; } diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c index aba988e71958..54a2d70a9c73 100644 --- a/drivers/usb/cdns3/cdns3-imx.c +++ b/drivers/usb/cdns3/cdns3-imx.c @@ -15,6 +15,8 @@ #include <linux/io.h> #include <linux/of_platform.h> #include <linux/iopoll.h> +#include <linux/pm_runtime.h> +#include "core.h" #define USB3_CORE_CTRL1 0x00 #define USB3_CORE_CTRL2 0x04 @@ -32,7 +34,7 @@ /* Register bits definition */ /* USB3_CORE_CTRL1 */ -#define SW_RESET_MASK (0x3f << 26) +#define SW_RESET_MASK GENMASK(31, 26) #define PWR_SW_RESET BIT(31) #define APB_SW_RESET BIT(30) #define AXI_SW_RESET BIT(29) @@ -53,8 +55,8 @@ #define LPM_CLK_REQ BIT(28) #define DEVU3_WAEKUP_EN BIT(14) #define OTG_WAKEUP_EN BIT(12) -#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */ -#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */ +#define DEV_INT_EN (3 << 8) /* DEV INT b9:8 */ +#define HOST_INT1_EN (1 << 0) /* HOST INT b7:0 */ /* USB3_CORE_STATUS */ #define MDCTRL_CLK_STATUS BIT(15) @@ -66,11 +68,30 @@ #define CLK_VALID_COMPARE_BITS (0xf << 28) #define PHY_REFCLK_REQ (1 << 0) +/* OTG registers definition */ +#define OTGSTS 0x4 +/* OTGSTS */ +#define OTG_NRDY BIT(11) + +/* xHCI registers definition */ +#define XECP_PM_PMCSR 0x8018 +#define XECP_AUX_CTRL_REG1 0x8120 + +/* Register bits definition */ +/* XECP_AUX_CTRL_REG1 */ +#define CFG_RXDET_P3_EN BIT(15) + +/* XECP_PM_PMCSR */ +#define PS_MASK GENMASK(1, 0) +#define PS_D0 0 +#define PS_D1 1 + struct cdns_imx { struct device *dev; void __iomem *noncore; struct clk_bulk_data *clks; int num_clks; + struct platform_device *cdns3_pdev; }; static inline u32 cdns_imx_readl(struct cdns_imx *data, u32 offset) @@ -126,6 +147,20 @@ static int cdns_imx_noncore_init(struct cdns_imx *data) return ret; } +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup); +static struct cdns3_platform_data cdns_imx_pdata = { + .platform_suspend = cdns_imx_platform_suspend, +}; + +static const struct of_dev_auxdata cdns_imx_auxdata[] = { + { + .compatible = "cdns,usb3", + .platform_data = &cdns_imx_pdata, + }, + {}, +}; + static int cdns_imx_probe(struct platform_device *pdev) { struct device *dev = &pdev->dev; @@ -162,14 +197,18 @@ static int cdns_imx_probe(struct platform_device *pdev) if (ret) goto err; - ret = of_platform_populate(node, NULL, NULL, dev); + ret = of_platform_populate(node, NULL, cdns_imx_auxdata, dev); if (ret) { dev_err(dev, "failed to create children: %d\n", ret); goto err; } - return ret; + device_set_wakeup_capable(dev, true); + pm_runtime_set_active(dev); + pm_runtime_enable(dev); + pm_runtime_forbid(dev); + return ret; err: clk_bulk_disable_unprepare(data->num_clks, data->clks); return ret; @@ -194,6 +233,147 @@ static int cdns_imx_remove(struct platform_device *pdev) return 0; } +#ifdef CONFIG_PM +static void cdns3_set_wakeup(struct cdns_imx *data, bool enable) +{ + u32 value; + + value = cdns_imx_readl(data, USB3_INT_REG); + if (enable) + value |= OTG_WAKEUP_EN | DEVU3_WAEKUP_EN; + else + value &= ~(OTG_WAKEUP_EN | DEVU3_WAEKUP_EN); + + cdns_imx_writel(data, USB3_INT_REG, value); +} + +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + struct device *parent = dev->parent; + struct cdns_imx *data = dev_get_drvdata(parent); + void __iomem *otg_regs = (void __iomem *)(cdns->otg_regs); + void __iomem *xhci_regs = cdns->xhci_regs; + u32 value; + int ret = 0; + + if (cdns->role != USB_ROLE_HOST) + return 0; + + if (suspend) { + /* SW request low power when all usb ports allow to it ??? */ + value = readl(xhci_regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D1; + writel(value, xhci_regs + XECP_PM_PMCSR); + + /* mdctrl_clk_sel */ + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value |= MDCTRL_CLK_SEL; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + + /* wait for mdctrl_clk_status */ + value = cdns_imx_readl(data, USB3_CORE_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value, + (value & MDCTRL_CLK_STATUS) == MDCTRL_CLK_STATUS, + 10, 100000); + if (ret) + dev_warn(parent, "wait mdctrl_clk_status timeout\n"); + + /* wait lpm_clk_req to be 0 */ + value = cdns_imx_readl(data, USB3_INT_REG); + ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value, + (value & LPM_CLK_REQ) != LPM_CLK_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait lpm_clk_req timeout\n"); + + /* wait phy_refclk_req to be 0 */ + value = cdns_imx_readl(data, USB3_SSPHY_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_SSPHY_STATUS, value, + (value & PHY_REFCLK_REQ) != PHY_REFCLK_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait phy_refclk_req timeout\n"); + + cdns3_set_wakeup(data, wakeup); + } else { + cdns3_set_wakeup(data, false); + + /* SW request D0 */ + value = readl(xhci_regs + XECP_PM_PMCSR); + value &= ~PS_MASK; + value |= PS_D0; + writel(value, xhci_regs + XECP_PM_PMCSR); + + /* clr CFG_RXDET_P3_EN */ + value = readl(xhci_regs + XECP_AUX_CTRL_REG1); + value &= ~CFG_RXDET_P3_EN; + writel(value, xhci_regs + XECP_AUX_CTRL_REG1); + + /* clear mdctrl_clk_sel */ + value = cdns_imx_readl(data, USB3_CORE_CTRL1); + value &= ~MDCTRL_CLK_SEL; + cdns_imx_writel(data, USB3_CORE_CTRL1, value); + + /* wait CLK_125_REQ to be 1 */ + value = cdns_imx_readl(data, USB3_INT_REG); + ret = readl_poll_timeout(data->noncore + USB3_INT_REG, value, + (value & CLK_125_REQ) == CLK_125_REQ, + 10, 100000); + if (ret) + dev_warn(parent, "wait CLK_125_REQ timeout\n"); + + /* wait for mdctrl_clk_status is cleared */ + value = cdns_imx_readl(data, USB3_CORE_STATUS); + ret = readl_poll_timeout(data->noncore + USB3_CORE_STATUS, value, + (value & MDCTRL_CLK_STATUS) != MDCTRL_CLK_STATUS, + 10, 100000); + if (ret) + dev_warn(parent, "wait mdctrl_clk_status cleared timeout\n"); + + /* Wait until OTG_NRDY is 0 */ + value = readl(otg_regs + OTGSTS); + ret = readl_poll_timeout(otg_regs + OTGSTS, value, + (value & OTG_NRDY) != OTG_NRDY, + 10, 100000); + if (ret) + dev_warn(parent, "wait OTG ready timeout\n"); + } + + return ret; + +} + +static int cdns_imx_resume(struct device *dev) +{ + struct cdns_imx *data = dev_get_drvdata(dev); + + return clk_bulk_prepare_enable(data->num_clks, data->clks); +} + +static int cdns_imx_suspend(struct device *dev) +{ + struct cdns_imx *data = dev_get_drvdata(dev); + + clk_bulk_disable_unprepare(data->num_clks, data->clks); + + return 0; +} +#else +static int cdns_imx_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + return 0; +} + +#endif /* CONFIG_PM */ + +static const struct dev_pm_ops cdns_imx_pm_ops = { + SET_RUNTIME_PM_OPS(cdns_imx_suspend, cdns_imx_resume, NULL) +}; + static const struct of_device_id cdns_imx_of_match[] = { { .compatible = "fsl,imx8qm-usb3", }, {}, @@ -206,6 +386,7 @@ static struct platform_driver cdns_imx_driver = { .driver = { .name = "cdns3-imx", .of_match_table = cdns_imx_of_match, + .pm = &cdns_imx_pm_ops, }, }; module_platform_driver(cdns_imx_driver); diff --git a/drivers/usb/cdns3/core.c b/drivers/usb/cdns3/core.c index 5c1586ec7824..a0f73d4711ae 100644 --- a/drivers/usb/cdns3/core.c +++ b/drivers/usb/cdns3/core.c @@ -280,6 +280,10 @@ int cdns3_hw_role_switch(struct cdns3 *cdns) enum usb_role real_role, current_role; int ret = 0; + /* Depends on role switch class */ + if (cdns->role_sw) + return 0; + pm_runtime_get_sync(cdns->dev); current_role = cdns->role; @@ -371,6 +375,50 @@ pm_put: return ret; } +static int set_phy_power_on(struct cdns3 *cdns) +{ + int ret; + + ret = phy_power_on(cdns->usb2_phy); + if (ret) + return ret; + + ret = phy_power_on(cdns->usb3_phy); + if (ret) + phy_power_off(cdns->usb2_phy); + + return ret; +} + +static void set_phy_power_off(struct cdns3 *cdns) +{ + phy_power_off(cdns->usb3_phy); + phy_power_off(cdns->usb2_phy); +} + +/** + * cdns3_wakeup_irq - interrupt handler for wakeup events + * @irq: irq number for cdns3 core device + * @data: structure of cdns3 + * + * Returns IRQ_HANDLED or IRQ_NONE + */ +static irqreturn_t cdns3_wakeup_irq(int irq, void *data) +{ + struct cdns3 *cdns = data; + + if (cdns->in_lpm) { + disable_irq_nosync(irq); + cdns->wakeup_pending = true; + if ((cdns->role == USB_ROLE_HOST) && cdns->host_dev) + pm_request_resume(&cdns->host_dev->dev); + + return IRQ_HANDLED; + } + + return IRQ_NONE; +} + /** * cdns3_probe - probe for cdns3 core device * @pdev: Pointer to cdns3 core platform device @@ -397,6 +445,7 @@ static int cdns3_probe(struct platform_device *pdev) return -ENOMEM; cdns->dev = dev; + cdns->pdata = dev_get_platdata(dev); platform_set_drvdata(pdev, cdns); @@ -443,8 +492,21 @@ static int cdns3_probe(struct platform_device *pdev) return -ENXIO; } + cdns->phyrst_a_enable = device_property_read_bool(dev, "cdns,phyrst-a-enable"); + cdns->otg_res = *res; + cdns->wakeup_irq = platform_get_irq_byname_optional(pdev, "wakeup"); + if (cdns->wakeup_irq == -EPROBE_DEFER) + return cdns->wakeup_irq; + else if (cdns->wakeup_irq == 0) + return -EINVAL; + + if (cdns->wakeup_irq < 0) { + dev_dbg(dev, "couldn't get wakeup irq\n"); + cdns->wakeup_irq = 0x0; + } + mutex_init(&cdns->mutex); cdns->usb2_phy = devm_phy_optional_get(dev, "cdns3,usb2-phy"); @@ -463,14 +525,10 @@ static int cdns3_probe(struct platform_device *pdev) if (ret) goto err1; - ret = phy_power_on(cdns->usb2_phy); + ret = set_phy_power_on(cdns); if (ret) goto err2; - ret = phy_power_on(cdns->usb3_phy); - if (ret) - goto err3; - sw_desc.set = cdns3_role_set; sw_desc.get = cdns3_role_get; sw_desc.allow_userspace_control = true; @@ -482,20 +540,34 @@ static int cdns3_probe(struct platform_device *pdev) if (IS_ERR(cdns->role_sw)) { ret = PTR_ERR(cdns->role_sw); dev_warn(dev, "Unable to register Role Switch\n"); - goto err4; + goto err3; + } + + if (cdns->wakeup_irq) { + ret = devm_request_irq(cdns->dev, cdns->wakeup_irq, + cdns3_wakeup_irq, + IRQF_SHARED, + dev_name(cdns->dev), cdns); + + if (ret) { + dev_err(cdns->dev, "couldn't register wakeup irq handler\n"); + goto err3; + } } ret = cdns3_drd_init(cdns); if (ret) - goto err5; + goto err4; ret = cdns3_core_init_role(cdns); if (ret) - goto err5; + goto err4; + spin_lock_init(&cdns->lock); device_set_wakeup_capable(dev, true); pm_runtime_set_active(dev); pm_runtime_enable(dev); + pm_runtime_forbid(dev); /* * The controller needs less time between bus and controller suspend, @@ -508,14 +580,11 @@ static int cdns3_probe(struct platform_device *pdev) dev_dbg(dev, "Cadence USB3 core: probe succeed\n"); return 0; -err5: +err4: cdns3_drd_exit(cdns); usb_role_switch_unregister(cdns->role_sw); -err4: - phy_power_off(cdns->usb3_phy); - err3: - phy_power_off(cdns->usb2_phy); + set_phy_power_off(cdns); err2: phy_exit(cdns->usb3_phy); err1: @@ -539,59 +608,128 @@ static int cdns3_remove(struct platform_device *pdev) pm_runtime_put_noidle(&pdev->dev); cdns3_exit_roles(cdns); usb_role_switch_unregister(cdns->role_sw); - phy_power_off(cdns->usb2_phy); - phy_power_off(cdns->usb3_phy); + set_phy_power_off(cdns); phy_exit(cdns->usb2_phy); phy_exit(cdns->usb3_phy); return 0; } -#ifdef CONFIG_PM_SLEEP +#ifdef CONFIG_PM -static int cdns3_suspend(struct device *dev) +static int cdns3_set_platform_suspend(struct device *dev, + bool suspend, bool wakeup) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + int ret = 0; + + if (cdns->pdata && cdns->pdata->platform_suspend) + ret = cdns->pdata->platform_suspend(dev, suspend, wakeup); + + return ret; +} + +static int cdns3_controller_suspend(struct device *dev, pm_message_t msg) { struct cdns3 *cdns = dev_get_drvdata(dev); + bool wakeup; unsigned long flags; - if (cdns->role == USB_ROLE_HOST) + if (cdns->in_lpm) return 0; - if (pm_runtime_status_suspended(dev)) - pm_runtime_resume(dev); + if (PMSG_IS_AUTO(msg)) + wakeup = true; + else + wakeup = device_may_wakeup(dev); - if (cdns->roles[cdns->role]->suspend) { - spin_lock_irqsave(&cdns->gadget_dev->lock, flags); - cdns->roles[cdns->role]->suspend(cdns, false); - spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags); - } + cdns3_set_platform_suspend(cdns->dev, true, wakeup); + set_phy_power_off(cdns); + spin_lock_irqsave(&cdns->lock, flags); + cdns->in_lpm = true; + spin_unlock_irqrestore(&cdns->lock, flags); + dev_dbg(cdns->dev, "%s ends\n", __func__); return 0; } -static int cdns3_resume(struct device *dev) +static int cdns3_controller_resume(struct device *dev, pm_message_t msg) { struct cdns3 *cdns = dev_get_drvdata(dev); + int ret; unsigned long flags; - if (cdns->role == USB_ROLE_HOST) + if (!cdns->in_lpm) return 0; - if (cdns->roles[cdns->role]->resume) { - spin_lock_irqsave(&cdns->gadget_dev->lock, flags); + ret = set_phy_power_on(cdns); + if (ret) + return ret; + + cdns3_set_platform_suspend(cdns->dev, false, false); + + spin_lock_irqsave(&cdns->lock, flags); + if (cdns->roles[cdns->role]->resume && !PMSG_IS_AUTO(msg)) cdns->roles[cdns->role]->resume(cdns, false); - spin_unlock_irqrestore(&cdns->gadget_dev->lock, flags); + + cdns->in_lpm = false; + spin_unlock_irqrestore(&cdns->lock, flags); + if (cdns->wakeup_pending) { + cdns->wakeup_pending = false; + enable_irq(cdns->wakeup_irq); + } + dev_dbg(cdns->dev, "%s ends\n", __func__); + + return ret; +} + +static int cdns3_runtime_suspend(struct device *dev) +{ + return cdns3_controller_suspend(dev, PMSG_AUTO_SUSPEND); +} + +static int cdns3_runtime_resume(struct device *dev) +{ + return cdns3_controller_resume(dev, PMSG_AUTO_RESUME); +} +#ifdef CONFIG_PM_SLEEP + +static int cdns3_suspend(struct device *dev) +{ + struct cdns3 *cdns = dev_get_drvdata(dev); + unsigned long flags; + + if (pm_runtime_status_suspended(dev)) + pm_runtime_resume(dev); + + if (cdns->roles[cdns->role]->suspend) { + spin_lock_irqsave(&cdns->lock, flags); + cdns->roles[cdns->role]->suspend(cdns, false); + spin_unlock_irqrestore(&cdns->lock, flags); } + return cdns3_controller_suspend(dev, PMSG_SUSPEND); +} + +static int cdns3_resume(struct device *dev) +{ + int ret; + + ret = cdns3_controller_resume(dev, PMSG_RESUME); + if (ret) + return ret; + pm_runtime_disable(dev); pm_runtime_set_active(dev); pm_runtime_enable(dev); - return 0; + return ret; } -#endif +#endif /* CONFIG_PM_SLEEP */ +#endif /* CONFIG_PM */ static const struct dev_pm_ops cdns3_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(cdns3_suspend, cdns3_resume) + SET_RUNTIME_PM_OPS(cdns3_runtime_suspend, cdns3_runtime_resume, NULL) }; #ifdef CONFIG_OF diff --git a/drivers/usb/cdns3/core.h b/drivers/usb/cdns3/core.h index 1ad1f1fe61e9..8a40d53d5ede 100644 --- a/drivers/usb/cdns3/core.h +++ b/drivers/usb/cdns3/core.h @@ -38,6 +38,12 @@ struct cdns3_role_driver { }; #define CDNS3_XHCI_RESOURCES_NUM 2 + +struct cdns3_platform_data { + int (*platform_suspend)(struct device *dev, + bool suspend, bool wakeup); +}; + /** * struct cdns3 - Representation of Cadence USB3 DRD controller. * @dev: pointer to Cadence device struct @@ -50,6 +56,7 @@ struct cdns3_role_driver { * @otg_regs: pointer to base of otg registers * @otg_irq: irq number for otg controller * @dev_irq: irq number for device controller + * @wakeup_irq: irq number for wakeup event, it is optional * @roles: array of supported roles for this controller * @role: current role * @host_dev: the child host device pointer for cdns3 core @@ -62,6 +69,10 @@ struct cdns3_role_driver { * This field based on firmware setting, kernel configuration * and hardware configuration. * @role_sw: pointer to role switch object. + * @in_lpm: indicate the controller is in low power mode + * @wakeup_pending: wakeup interrupt pending + * @pdata: platform data from glue layer + * @lock: spinlock structure */ struct cdns3 { struct device *dev; @@ -76,9 +87,11 @@ struct cdns3 { #define CDNS3_CONTROLLER_V0 0 #define CDNS3_CONTROLLER_V1 1 u32 version; + bool phyrst_a_enable; int otg_irq; int dev_irq; + int wakeup_irq; struct cdns3_role_driver *roles[USB_ROLE_DEVICE + 1]; enum usb_role role; struct platform_device *host_dev; @@ -89,6 +102,10 @@ struct cdns3 { struct mutex mutex; enum usb_dr_mode dr_mode; struct usb_role_switch *role_sw; + bool in_lpm; + bool wakeup_pending; + struct cdns3_platform_data *pdata; + spinlock_t lock; }; int cdns3_hw_role_switch(struct cdns3 *cdns); diff --git a/drivers/usb/cdns3/drd.c b/drivers/usb/cdns3/drd.c index 6234bcd6158a..38ccd29e4cde 100644 --- a/drivers/usb/cdns3/drd.c +++ b/drivers/usb/cdns3/drd.c @@ -15,6 +15,7 @@ #include <linux/delay.h> #include <linux/iopoll.h> #include <linux/usb/otg.h> +#include <linux/phy/phy.h> #include "gadget.h" #include "drd.h" @@ -42,6 +43,18 @@ int cdns3_set_mode(struct cdns3 *cdns, enum usb_dr_mode mode) reg = readl(&cdns->otg_v1_regs->override); reg |= OVERRIDE_IDPULLUP; writel(reg, &cdns->otg_v1_regs->override); + + /* + * Enable work around feature built into the + * controller to address issue with RX Sensitivity + * est (EL_17) for USB2 PHY. The issue only occures + * for 0x0002450D controller version. + */ + if (cdns->phyrst_a_enable) { + reg = readl(&cdns->otg_v1_regs->phyrst_cfg); + reg |= PHYRST_CFG_PHYRST_A_ENABLE; + writel(reg, &cdns->otg_v1_regs->phyrst_cfg); + } } else { reg = readl(&cdns->otg_v0_regs->ctrl1); reg |= OVERRIDE_IDPULLUP_V0; @@ -145,6 +158,7 @@ int cdns3_drd_host_on(struct cdns3 *cdns) if (ret) dev_err(cdns->dev, "timeout waiting for xhci_ready\n"); + phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_HOST); return ret; } @@ -164,6 +178,7 @@ void cdns3_drd_host_off(struct cdns3 *cdns) readl_poll_timeout_atomic(&cdns->otg_regs->state, val, !(val & OTGSTATE_HOST_STATE_MASK), 1, 2000000); + phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); } /** @@ -190,6 +205,7 @@ int cdns3_drd_gadget_on(struct cdns3 *cdns) return ret; } + phy_set_mode(cdns->usb3_phy, PHY_MODE_USB_DEVICE); return 0; } @@ -213,6 +229,7 @@ void cdns3_drd_gadget_off(struct cdns3 *cdns) readl_poll_timeout_atomic(&cdns->otg_regs->state, val, !(val & OTGSTATE_DEV_STATE_MASK), 1, 2000000); + phy_set_mode(cdns->usb3_phy, PHY_MODE_INVALID); } /** @@ -293,6 +310,9 @@ static irqreturn_t cdns3_drd_irq(int irq, void *data) if (cdns->dr_mode != USB_DR_MODE_OTG) return IRQ_NONE; + if (cdns->in_lpm) + return ret; + reg = readl(&cdns->otg_regs->ivect); if (!reg) diff --git a/drivers/usb/cdns3/drd.h b/drivers/usb/cdns3/drd.h index 7e7cf7fa2dd3..f1ccae285a16 100644 --- a/drivers/usb/cdns3/drd.h +++ b/drivers/usb/cdns3/drd.h @@ -31,7 +31,7 @@ struct cdns3_otg_regs { __le32 simulate; __le32 override; __le32 susp_ctrl; - __le32 reserved4; + __le32 phyrst_cfg; __le32 anasts; __le32 adp_ramp_time; __le32 ctrl1; @@ -153,6 +153,9 @@ struct cdns3_otg_common_regs { /* Only for CDNS3_CONTROLLER_V0 version */ #define OVERRIDE_IDPULLUP_V0 BIT(24) +/* PHYRST_CFG - bitmasks */ +#define PHYRST_CFG_PHYRST_A_ENABLE BIT(0) + #define CDNS3_ID_PERIPHERAL 1 #define CDNS3_ID_HOST 0 diff --git a/drivers/usb/cdns3/ep0.c b/drivers/usb/cdns3/ep0.c index d9779abc65b2..4761c852d9c4 100644 --- a/drivers/usb/cdns3/ep0.c +++ b/drivers/usb/cdns3/ep0.c @@ -717,9 +717,17 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, /* send STATUS stage. Should be called only for SET_CONFIGURATION */ if (priv_dev->ep0_stage == CDNS3_STATUS_STAGE) { + u32 val; + cdns3_select_ep(priv_dev, 0x00); cdns3_set_hw_configuration(priv_dev); cdns3_ep0_complete_setup(priv_dev, 0, 1); + /* wait until configuration set */ + ret = readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val, + val & USB_STS_CFGSTS_MASK, 1, 100); + if (ret == -ETIMEDOUT) + dev_warn(priv_dev->dev, "timeout for waiting configuration set\n"); + request->actual = 0; priv_dev->status_completion_no_call = true; priv_dev->pending_status_request = request; @@ -731,7 +739,7 @@ static int cdns3_gadget_ep0_queue(struct usb_ep *ep, * ep0_queue is back. */ queue_work(system_freezable_wq, &priv_dev->pending_status_wq); - return 0; + return ret; } if (!list_empty(&priv_ep->pending_req_list)) { diff --git a/drivers/usb/cdns3/gadget.c b/drivers/usb/cdns3/gadget.c index dea649ee173b..6e7b70a2e352 100644 --- a/drivers/usb/cdns3/gadget.c +++ b/drivers/usb/cdns3/gadget.c @@ -261,8 +261,8 @@ int cdns3_allocate_trb_pool(struct cdns3_endpoint *priv_ep) */ link_trb->control = 0; } else { - link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma); - link_trb->control = TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE; + link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma)); + link_trb->control = cpu_to_le32(TRB_CYCLE | TRB_TYPE(TRB_LINK) | TRB_TOGGLE); } return 0; } @@ -462,6 +462,36 @@ static int cdns3_start_all_request(struct cdns3_device *priv_dev, (reg) |= EP_STS_EN_DESCMISEN; \ } } while (0) +static void __cdns3_descmiss_copy_data(struct usb_request *request, + struct usb_request *descmiss_req) +{ + int length = request->actual + descmiss_req->actual; + struct scatterlist *s = request->sg; + + if (!s) { + if (length <= request->length) { + memcpy(&((u8 *)request->buf)[request->actual], + descmiss_req->buf, + descmiss_req->actual); + request->actual = length; + } else { + /* It should never occures */ + request->status = -ENOMEM; + } + } else { + if (length <= sg_dma_len(s)) { + void *p = phys_to_virt(sg_dma_address(s)); + + memcpy(&((u8 *)p)[request->actual], + descmiss_req->buf, + descmiss_req->actual); + request->actual = length; + } else { + request->status = -ENOMEM; + } + } +} + /** * cdns3_wa2_descmiss_copy_data copy data from internal requests to * request queued by class driver. @@ -488,21 +518,9 @@ static void cdns3_wa2_descmiss_copy_data(struct cdns3_endpoint *priv_ep, chunk_end = descmiss_priv_req->flags & REQUEST_INTERNAL_CH; length = request->actual + descmiss_req->actual; - request->status = descmiss_req->status; - - if (length <= request->length) { - memcpy(&((u8 *)request->buf)[request->actual], - descmiss_req->buf, - descmiss_req->actual); - request->actual = length; - } else { - /* It should never occures */ - request->status = -ENOMEM; - } - + __cdns3_descmiss_copy_data(request, descmiss_req); list_del_init(&descmiss_priv_req->list); - kfree(descmiss_req->buf); cdns3_gadget_ep_free_request(&priv_ep->endpoint, descmiss_req); --priv_ep->wa2_counter; @@ -817,6 +835,8 @@ void cdns3_gadget_giveback(struct cdns3_endpoint *priv_ep, request->length); priv_req->flags &= ~(REQUEST_PENDING | REQUEST_UNALIGNED); + /* All TRBs have finished, clear the counter */ + priv_req->finished_trb = 0; trace_cdns3_gadget_giveback(priv_req); if (priv_dev->dev_ver < DEV_VER_V2) { @@ -847,10 +867,10 @@ static void cdns3_wa1_restore_cycle_bit(struct cdns3_endpoint *priv_ep) priv_ep->wa1_trb_index = 0xFFFF; if (priv_ep->wa1_cycle_bit) { priv_ep->wa1_trb->control = - priv_ep->wa1_trb->control | 0x1; + priv_ep->wa1_trb->control | cpu_to_le32(0x1); } else { priv_ep->wa1_trb->control = - priv_ep->wa1_trb->control & ~0x1; + priv_ep->wa1_trb->control & cpu_to_le32(~0x1); } } } @@ -1008,17 +1028,16 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, TRB_STREAM_ID(priv_req->request.stream_id) | TRB_ISP; if (!request->num_sgs) { - trb->buffer = TRB_BUFFER(trb_dma); + trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); length = request->length; } else { - trb->buffer = TRB_BUFFER(request->sg[sg_idx].dma_address); + trb->buffer = cpu_to_le32(TRB_BUFFER(request->sg[sg_idx].dma_address)); length = request->sg[sg_idx].length; } tdl = DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); - trb->length = TRB_BURST_LEN(16 /*priv_ep->trb_burst_size*/) | - TRB_LEN(length); + trb->length = cpu_to_le32(TRB_BURST_LEN(16) | TRB_LEN(length)); /* * For DEV_VER_V2 controller version we have enabled @@ -1027,11 +1046,11 @@ static int cdns3_ep_run_stream_transfer(struct cdns3_endpoint *priv_ep, */ if (priv_dev->dev_ver >= DEV_VER_V2) { if (priv_dev->gadget.speed == USB_SPEED_SUPER) - trb->length |= TRB_TDL_SS_SIZE(tdl); + trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(tdl)); } priv_req->flags |= REQUEST_PENDING; - trb->control = control; + trb->control = cpu_to_le32(control); trace_cdns3_prepare_trb(priv_ep, priv_req->trb); @@ -1091,6 +1110,7 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, struct cdns3_device *priv_dev = priv_ep->cdns3_dev; struct cdns3_request *priv_req; struct cdns3_trb *trb; + struct cdns3_trb *link_trb; dma_addr_t trb_dma; u32 togle_pcs = 1; int sg_iter = 0; @@ -1099,11 +1119,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, u32 control; int pcs; u16 total_tdl = 0; + struct scatterlist *s = NULL; + bool sg_supported = !!(request->num_mapped_sgs); if (priv_ep->type == USB_ENDPOINT_XFER_ISOC) num_trb = priv_ep->interval; else - num_trb = request->num_sgs ? request->num_sgs : 1; + num_trb = sg_supported ? request->num_mapped_sgs : 1; if (num_trb > priv_ep->free_trbs) { priv_ep->flags |= EP_RING_FULL; @@ -1129,7 +1151,6 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, /* prepare ring */ if ((priv_ep->enqueue + num_trb) >= (priv_ep->num_trbs - 1)) { - struct cdns3_trb *link_trb; int doorbell, dma_index; u32 ch_bit = 0; @@ -1156,13 +1177,16 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, TRBS_PER_SEGMENT > 2) ch_bit = TRB_CHAIN; - link_trb->control = ((priv_ep->pcs) ? TRB_CYCLE : 0) | - TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit; + link_trb->control = cpu_to_le32(((priv_ep->pcs) ? TRB_CYCLE : 0) | + TRB_TYPE(TRB_LINK) | TRB_TOGGLE | ch_bit); } if (priv_dev->dev_ver <= DEV_VER_V2) togle_pcs = cdns3_wa1_update_guard(priv_ep, trb); + if (sg_supported) + s = request->sg; + /* set incorrect Cycle Bit for first trb*/ control = priv_ep->pcs ? 0 : TRB_CYCLE; @@ -1172,13 +1196,13 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, /* fill TRB */ control |= TRB_TYPE(TRB_NORMAL); - trb->buffer = TRB_BUFFER(request->num_sgs == 0 - ? trb_dma : request->sg[sg_iter].dma_address); - - if (likely(!request->num_sgs)) + if (sg_supported) { + trb->buffer = cpu_to_le32(TRB_BUFFER(sg_dma_address(s))); + length = sg_dma_len(s); + } else { + trb->buffer = cpu_to_le32(TRB_BUFFER(trb_dma)); length = request->length; - else - length = request->sg[sg_iter].length; + } if (likely(priv_dev->dev_ver >= DEV_VER_V2)) td_size = DIV_ROUND_UP(length, @@ -1187,10 +1211,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, total_tdl += DIV_ROUND_UP(length, priv_ep->endpoint.maxpacket); - trb->length = TRB_BURST_LEN(priv_ep->trb_burst_size) | - TRB_LEN(length); + trb->length = cpu_to_le32(TRB_BURST_LEN(priv_ep->trb_burst_size) | + TRB_LEN(length)); if (priv_dev->gadget.speed == USB_SPEED_SUPER) - trb->length |= TRB_TDL_SS_SIZE(td_size); + trb->length |= cpu_to_le32(TRB_TDL_SS_SIZE(td_size)); else control |= TRB_TDL_HS_SIZE(td_size); @@ -1212,9 +1236,18 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, } if (sg_iter) - trb->control = control; + trb->control = cpu_to_le32(control); else - priv_req->trb->control = control; + priv_req->trb->control = cpu_to_le32(control); + + if (sg_supported) { + trb->control |= TRB_ISP; + /* Don't set chain bit for last TRB */ + if (sg_iter < num_trb - 1) + trb->control |= TRB_CHAIN; + + s = sg_next(s); + } control = 0; ++sg_iter; @@ -1226,9 +1259,10 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, trb = priv_req->trb; priv_req->flags |= REQUEST_PENDING; + priv_req->num_of_trb = num_trb; if (sg_iter == 1) - trb->control |= TRB_IOC | TRB_ISP; + trb->control |= cpu_to_le32(TRB_IOC | TRB_ISP); if (priv_dev->dev_ver < DEV_VER_V2 && (priv_ep->flags & EP_TDLCHK_EN)) { @@ -1254,12 +1288,27 @@ static int cdns3_ep_run_transfer(struct cdns3_endpoint *priv_ep, /* give the TD to the consumer*/ if (togle_pcs) - trb->control = trb->control ^ 1; + trb->control = trb->control ^ cpu_to_le32(1); if (priv_dev->dev_ver <= DEV_VER_V2) cdns3_wa1_tray_restore_cycle_bit(priv_dev, priv_ep); - trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + if (num_trb > 1) { + int i = 0; + + while (i < num_trb) { + trace_cdns3_prepare_trb(priv_ep, trb + i); + if (trb + i == link_trb) { + trb = priv_ep->trb_pool; + num_trb = num_trb - i; + i = 0; + } else { + i++; + } + } + } else { + trace_cdns3_prepare_trb(priv_ep, priv_req->trb); + } /* * Memory barrier - Cycle Bit must be set before trb->length and @@ -1310,7 +1359,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) { struct cdns3_endpoint *priv_ep; struct usb_ep *ep; - int val; if (priv_dev->hw_configured_flag) return; @@ -1320,10 +1368,6 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) cdns3_set_register_bit(&priv_dev->regs->usb_conf, USB_CONF_U1EN | USB_CONF_U2EN); - /* wait until configuration set */ - readl_poll_timeout_atomic(&priv_dev->regs->usb_sts, val, - val & USB_STS_CFGSTS_MASK, 1, 100); - priv_dev->hw_configured_flag = 1; list_for_each_entry(ep, &priv_dev->gadget.ep_list, ep_list) { @@ -1337,7 +1381,7 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) } /** - * cdns3_request_handled - check whether request has been handled by DMA + * cdns3_trb_handled - check whether trb has been handled by DMA * * @priv_ep: extended endpoint object. * @priv_req: request object for checking @@ -1354,32 +1398,28 @@ void cdns3_set_hw_configuration(struct cdns3_device *priv_dev) * ET = priv_req->end_trb - index of last TRB in transfer ring * CI = current_index - index of processed TRB by DMA. * - * As first step, function checks if cycle bit for priv_req->start_trb is - * correct. + * As first step, we check if the TRB between the ST and ET. + * Then, we check if cycle bit for index priv_ep->dequeue + * is correct. * * some rules: - * 1. priv_ep->dequeue never exceed current_index. + * 1. priv_ep->dequeue never equals to current_index. * 2 priv_ep->enqueue never exceed priv_ep->dequeue * 3. exception: priv_ep->enqueue == priv_ep->dequeue * and priv_ep->free_trbs is zero. * This case indicate that TR is full. * - * Then We can split recognition into two parts: + * At below two cases, the request have been handled. * Case 1 - priv_ep->dequeue < current_index * SR ... EQ ... DQ ... CI ... ER * SR ... DQ ... CI ... EQ ... ER * - * Request has been handled by DMA if ST and ET is between DQ and CI. - * * Case 2 - priv_ep->dequeue > current_index - * This situation take place when CI go through the LINK TRB at the end of + * This situation takes place when CI go through the LINK TRB at the end of * transfer ring. * SR ... CI ... EQ ... DQ ... ER - * - * Request has been handled by DMA if ET is less then CI or - * ET is greater or equal DQ. */ -static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, +static bool cdns3_trb_handled(struct cdns3_endpoint *priv_ep, struct cdns3_request *priv_req) { struct cdns3_device *priv_dev = priv_ep->cdns3_dev; @@ -1391,9 +1431,27 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, current_index = cdns3_get_dma_pos(priv_dev, priv_ep); doorbell = !!(readl(&priv_dev->regs->ep_cmd) & EP_CMD_DRDY); - trb = &priv_ep->trb_pool[priv_req->start_trb]; + /* current trb doesn't belong to this request */ + if (priv_req->start_trb < priv_req->end_trb) { + if (priv_ep->dequeue > priv_req->end_trb) + goto finish; + + if (priv_ep->dequeue < priv_req->start_trb) + goto finish; + } + + if ((priv_req->start_trb > priv_req->end_trb) && + (priv_ep->dequeue > priv_req->end_trb) && + (priv_ep->dequeue < priv_req->start_trb)) + goto finish; - if ((trb->control & TRB_CYCLE) != priv_ep->ccs) + if ((priv_req->start_trb == priv_req->end_trb) && + (priv_ep->dequeue != priv_req->end_trb)) + goto finish; + + trb = &priv_ep->trb_pool[priv_ep->dequeue]; + + if ((le32_to_cpu(trb->control) & TRB_CYCLE) != priv_ep->ccs) goto finish; if (doorbell == 1 && current_index == priv_ep->dequeue) @@ -1413,12 +1471,8 @@ static bool cdns3_request_handled(struct cdns3_endpoint *priv_ep, !priv_ep->dequeue) goto finish; - if (priv_req->end_trb >= priv_ep->dequeue && - priv_req->end_trb < current_index) - handled = 1; + handled = 1; } else if (priv_ep->dequeue > current_index) { - if (priv_req->end_trb < current_index || - priv_req->end_trb >= priv_ep->dequeue) handled = 1; } @@ -1434,6 +1488,8 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, struct cdns3_request *priv_req; struct usb_request *request; struct cdns3_trb *trb; + bool request_handled = false; + bool transfer_end = false; while (!list_empty(&priv_ep->pending_req_list)) { request = cdns3_next_request(&priv_ep->pending_req_list); @@ -1442,7 +1498,7 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, trb = priv_ep->trb_pool + priv_ep->dequeue; /* Request was dequeued and TRB was changed to TRB_LINK. */ - if (TRB_FIELD_TO_TYPE(trb->control) == TRB_LINK) { + if (TRB_FIELD_TO_TYPE(le32_to_cpu(trb->control)) == TRB_LINK) { trace_cdns3_complete_trb(priv_ep, trb); cdns3_move_deq_to_next_trb(priv_req); } @@ -1453,20 +1509,32 @@ static void cdns3_transfer_completed(struct cdns3_device *priv_dev, */ cdns3_select_ep(priv_dev, priv_ep->endpoint.address); - if (!cdns3_request_handled(priv_ep, priv_req)) - goto prepare_next_td; + while (cdns3_trb_handled(priv_ep, priv_req)) { + priv_req->finished_trb++; + if (priv_req->finished_trb >= priv_req->num_of_trb) + request_handled = true; - trb = priv_ep->trb_pool + priv_ep->dequeue; - trace_cdns3_complete_trb(priv_ep, trb); + trb = priv_ep->trb_pool + priv_ep->dequeue; + trace_cdns3_complete_trb(priv_ep, trb); - if (trb != priv_req->trb) - dev_warn(priv_dev->dev, - "request_trb=0x%p, queue_trb=0x%p\n", - priv_req->trb, trb); + if (!transfer_end) + request->actual += + TRB_LEN(le32_to_cpu(trb->length)); - request->actual = TRB_LEN(le32_to_cpu(trb->length)); - cdns3_move_deq_to_next_trb(priv_req); - cdns3_gadget_giveback(priv_ep, priv_req, 0); + if (priv_req->num_of_trb > 1 && + le32_to_cpu(trb->control) & TRB_SMM) + transfer_end = true; + + cdns3_ep_inc_deq(priv_ep); + } + + if (request_handled) { + cdns3_gadget_giveback(priv_ep, priv_req, 0); + request_handled = false; + transfer_end = false; + } else { + goto prepare_next_td; + } if (priv_ep->type != USB_ENDPOINT_XFER_ISOC && TRBS_PER_SEGMENT == 2) @@ -1574,7 +1642,7 @@ static int cdns3_check_ep_interrupt_proceed(struct cdns3_endpoint *priv_ep) * that host ignore the ERDY packet and driver has to send it * again. */ - if (tdl && (dbusy | !EP_STS_BUFFEMPTY(ep_sts_reg) | + if (tdl && (dbusy || !EP_STS_BUFFEMPTY(ep_sts_reg) || EP_STS_HOSTPP(ep_sts_reg))) { writel(EP_CMD_ERDY | EP_CMD_ERDY_SID(priv_ep->last_stream_id), @@ -1769,9 +1837,13 @@ static void cdns3_check_usb_interrupt_proceed(struct cdns3_device *priv_dev, static irqreturn_t cdns3_device_irq_handler(int irq, void *data) { struct cdns3_device *priv_dev = data; + struct cdns3 *cdns = dev_get_drvdata(priv_dev->dev); irqreturn_t ret = IRQ_NONE; u32 reg; + if (cdns->in_lpm) + return ret; + /* check USB device interrupt */ reg = readl(&priv_dev->regs->usb_ists); if (reg) { @@ -2552,10 +2624,10 @@ found: /* Update ring only if removed request is on pending_req_list list */ if (req_on_hw_ring && link_trb) { - link_trb->buffer = TRB_BUFFER(priv_ep->trb_pool_dma + - ((priv_req->end_trb + 1) * TRB_SIZE)); - link_trb->control = (link_trb->control & TRB_CYCLE) | - TRB_TYPE(TRB_LINK) | TRB_CHAIN; + link_trb->buffer = cpu_to_le32(TRB_BUFFER(priv_ep->trb_pool_dma + + ((priv_req->end_trb + 1) * TRB_SIZE))); + link_trb->control = cpu_to_le32((le32_to_cpu(link_trb->control) & TRB_CYCLE) | + TRB_TYPE(TRB_LINK) | TRB_CHAIN); if (priv_ep->wa1_trb == priv_req->trb) cdns3_wa1_restore_cycle_bit(priv_ep); @@ -2610,7 +2682,7 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) priv_req = to_cdns3_request(request); trb = priv_req->trb; if (trb) - trb->control = trb->control ^ TRB_CYCLE; + trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); } writel(EP_CMD_CSTALL | EP_CMD_EPRST, &priv_dev->regs->ep_cmd); @@ -2625,7 +2697,8 @@ int __cdns3_gadget_ep_clear_halt(struct cdns3_endpoint *priv_ep) if (request) { if (trb) - trb->control = trb->control ^ TRB_CYCLE; + trb->control = trb->control ^ cpu_to_le32(TRB_CYCLE); + cdns3_rearm_transfer(priv_ep, 1); } @@ -2735,10 +2808,13 @@ static int cdns3_gadget_pullup(struct usb_gadget *gadget, int is_on) { struct cdns3_device *priv_dev = gadget_to_cdns3_device(gadget); - if (is_on) + if (is_on) { writel(USB_CONF_DEVEN, &priv_dev->regs->usb_conf); - else + } else { + writel(~0, &priv_dev->regs->ep_ists); + writel(~0, &priv_dev->regs->usb_ists); writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); + } return 0; } @@ -2779,6 +2855,8 @@ static void cdns3_gadget_config(struct cdns3_device *priv_dev) /* enable generic interrupt*/ writel(USB_IEN_INIT, ®s->usb_ien); writel(USB_CONF_CLK2OFFDS | USB_CONF_L1DS, ®s->usb_conf); + /* keep Fast Access bit */ + writel(PUSB_PWR_FST_REG_ACCESS, &priv_dev->regs->usb_pwr); cdns3_configure_dmult(priv_dev, NULL); } @@ -2862,6 +2940,7 @@ static int cdns3_gadget_udc_stop(struct usb_gadget *gadget) /* disable interrupt for device */ writel(0, &priv_dev->regs->usb_ien); + writel(0, &priv_dev->regs->usb_pwr); writel(USB_CONF_DEVDS, &priv_dev->regs->usb_conf); return 0; @@ -2984,18 +3063,26 @@ err: return -ENOMEM; } +static void cdns3_gadget_release(struct device *dev) +{ + struct cdns3_device *priv_dev = container_of(dev, + struct cdns3_device, gadget.dev); + + kfree(priv_dev); +} + void cdns3_gadget_exit(struct cdns3 *cdns) { struct cdns3_device *priv_dev; priv_dev = cdns->gadget_dev; - devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); pm_runtime_mark_last_busy(cdns->dev); pm_runtime_put_autosuspend(cdns->dev); - usb_del_gadget_udc(&priv_dev->gadget); + usb_del_gadget(&priv_dev->gadget); + devm_free_irq(cdns->dev, cdns->dev_irq, priv_dev); cdns3_free_all_eps(priv_dev); @@ -3015,7 +3102,7 @@ void cdns3_gadget_exit(struct cdns3 *cdns) priv_dev->setup_dma); kfree(priv_dev->zlp_buf); - kfree(priv_dev); + usb_put_gadget(&priv_dev->gadget); cdns->gadget_dev = NULL; cdns3_drd_gadget_off(cdns); } @@ -3030,6 +3117,8 @@ static int cdns3_gadget_start(struct cdns3 *cdns) if (!priv_dev) return -ENOMEM; + usb_initialize_gadget(cdns->dev, &priv_dev->gadget, + cdns3_gadget_release); cdns->gadget_dev = priv_dev; priv_dev->sysdev = cdns->dev; priv_dev->dev = cdns->dev; @@ -3070,7 +3159,6 @@ static int cdns3_gadget_start(struct cdns3 *cdns) priv_dev->gadget.speed = USB_SPEED_UNKNOWN; priv_dev->gadget.ops = &cdns3_gadget_ops; priv_dev->gadget.name = "usb-ss-gadget"; - priv_dev->gadget.sg_supported = 1; priv_dev->gadget.quirk_avoids_skb_reserve = 1; priv_dev->gadget.irq = cdns->dev_irq; @@ -3109,6 +3197,8 @@ static int cdns3_gadget_start(struct cdns3 *cdns) readl(&priv_dev->regs->usb_cap2)); priv_dev->dev_ver = GET_DEV_BASE_VERSION(priv_dev->dev_ver); + if (priv_dev->dev_ver >= DEV_VER_V2) + priv_dev->gadget.sg_supported = 1; priv_dev->zlp_buf = kzalloc(CDNS3_EP_ZLP_BUF_SIZE, GFP_KERNEL); if (!priv_dev->zlp_buf) { @@ -3117,10 +3207,9 @@ static int cdns3_gadget_start(struct cdns3 *cdns) } /* add USB gadget device */ - ret = usb_add_gadget_udc(priv_dev->dev, &priv_dev->gadget); + ret = usb_add_gadget(&priv_dev->gadget); if (ret < 0) { - dev_err(priv_dev->dev, - "Failed to register USB device controller\n"); + dev_err(priv_dev->dev, "Failed to add gadget\n"); goto err4; } @@ -3133,6 +3222,7 @@ err3: err2: cdns3_free_all_eps(priv_dev); err1: + usb_put_gadget(&priv_dev->gadget); cdns->gadget_dev = NULL; return ret; } diff --git a/drivers/usb/cdns3/gadget.h b/drivers/usb/cdns3/gadget.h index 52765b098b9e..1ccecd237530 100644 --- a/drivers/usb/cdns3/gadget.h +++ b/drivers/usb/cdns3/gadget.h @@ -966,7 +966,7 @@ struct cdns3_usb_regs { /* * USBSS-DEV DMA interface. */ -#define TRBS_PER_SEGMENT 40 +#define TRBS_PER_SEGMENT 600 #define ISO_MAX_INTERVAL 10 @@ -1030,6 +1030,11 @@ struct cdns3_trb { * When set to '1', the device will toggle its interpretation of the Cycle bit */ #define TRB_TOGGLE BIT(1) +/* + * The controller will set it if OUTSMM (OUT size mismatch) is detected, + * this bit is for normal TRB + */ +#define TRB_SMM BIT(1) /* * Short Packet (SP). OUT EPs at DMULT=1 only. Indicates if the TRB was @@ -1215,6 +1220,8 @@ struct cdns3_aligned_buf { * this endpoint * @flags: flag specifying special usage of request * @list: used by internally allocated request to add to wa2_descmiss_req_list. + * @finished_trb: number of trb has already finished per request + * @num_of_trb: how many trbs in this request */ struct cdns3_request { struct usb_request request; @@ -1230,6 +1237,8 @@ struct cdns3_request { #define REQUEST_UNALIGNED BIT(4) u32 flags; struct list_head list; + int finished_trb; + int num_of_trb; }; #define to_cdns3_request(r) (container_of(r, struct cdns3_request, request)) diff --git a/drivers/usb/cdns3/host.c b/drivers/usb/cdns3/host.c index 36c63d9ecd37..b3e2cb69762c 100644 --- a/drivers/usb/cdns3/host.c +++ b/drivers/usb/cdns3/host.c @@ -13,11 +13,13 @@ #include "core.h" #include "drd.h" #include "host-export.h" +#include <linux/usb/hcd.h> static int __cdns3_host_init(struct cdns3 *cdns) { struct platform_device *xhci; int ret; + struct usb_hcd *hcd; cdns3_drd_host_on(cdns); @@ -43,6 +45,11 @@ static int __cdns3_host_init(struct cdns3 *cdns) goto err1; } + /* Glue needs to access xHCI region register for Power management */ + hcd = platform_get_drvdata(xhci); + if (hcd) + cdns->xhci_regs = hcd->regs; + return 0; err1: platform_device_put(xhci); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c index c39e2b615ac6..25c65accf089 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.c +++ b/drivers/usb/chipidea/ci_hdrc_imx.c @@ -165,6 +165,11 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev) if (of_usb_get_phy_mode(np) == USBPHY_INTERFACE_MODE_ULPI) data->ulpi = 1; + of_property_read_u32(np, "samsung,picophy-pre-emp-curr-control", + &data->emp_curr_control); + of_property_read_u32(np, "samsung,picophy-dc-vol-level-adjust", + &data->dc_vol_level_adjust); + return data; } @@ -609,7 +614,12 @@ static int __maybe_unused ci_hdrc_imx_suspend(struct device *dev) } } - return imx_controller_suspend(dev); + ret = imx_controller_suspend(dev); + if (ret) + return ret; + + pinctrl_pm_select_sleep_state(dev); + return ret; } static int __maybe_unused ci_hdrc_imx_resume(struct device *dev) @@ -617,6 +627,7 @@ static int __maybe_unused ci_hdrc_imx_resume(struct device *dev) struct ci_hdrc_imx_data *data = dev_get_drvdata(dev); int ret; + pinctrl_pm_select_default_state(dev); ret = imx_controller_resume(dev); if (!ret && data->supports_runtime_pm) { pm_runtime_disable(dev); diff --git a/drivers/usb/chipidea/ci_hdrc_imx.h b/drivers/usb/chipidea/ci_hdrc_imx.h index 99f846119c00..999c65390b7f 100644 --- a/drivers/usb/chipidea/ci_hdrc_imx.h +++ b/drivers/usb/chipidea/ci_hdrc_imx.h @@ -26,6 +26,8 @@ struct imx_usbmisc_data { unsigned int ext_vbus:1; /* Vbus from exteranl event */ struct usb_phy *usb_phy; enum usb_dr_mode available_role; /* runtime usb dr mode */ + int emp_curr_control; + int dc_vol_level_adjust; }; int imx_usbmisc_init(struct imx_usbmisc_data *data); diff --git a/drivers/usb/chipidea/usbmisc_imx.c b/drivers/usb/chipidea/usbmisc_imx.c index 322e4de6b24a..6d8331e7da99 100644 --- a/drivers/usb/chipidea/usbmisc_imx.c +++ b/drivers/usb/chipidea/usbmisc_imx.c @@ -128,6 +128,12 @@ #define MX7D_USB_OTG_PHY_STATUS_VBUS_VLD BIT(3) #define MX7D_USB_OTG_PHY_STATUS_CHRGDET BIT(29) +#define MX7D_USB_OTG_PHY_CFG1 0x30 +#define TXPREEMPAMPTUNE0_BIT 28 +#define TXPREEMPAMPTUNE0_MASK (3 << 28) +#define TXVREFTUNE0_BIT 20 +#define TXVREFTUNE0_MASK (0xf << 20) + #define MX6_USB_OTG_WAKEUP_BITS (MX6_BM_WAKEUP_ENABLE | MX6_BM_VBUS_WAKEUP | \ MX6_BM_ID_WAKEUP) @@ -649,6 +655,21 @@ static int usbmisc_imx7d_init(struct imx_usbmisc_data *data) writel(reg | MX7D_USB_VBUS_WAKEUP_SOURCE_BVALID | MX7D_USBNC_AUTO_RESUME, usbmisc->base + MX7D_USBNC_USB_CTRL2); + /* PHY tuning for signal quality */ + reg = readl(usbmisc->base + MX7D_USB_OTG_PHY_CFG1); + if (data->emp_curr_control && data->emp_curr_control <= + (TXPREEMPAMPTUNE0_MASK >> TXPREEMPAMPTUNE0_BIT)) { + reg &= ~TXPREEMPAMPTUNE0_MASK; + reg |= (data->emp_curr_control << TXPREEMPAMPTUNE0_BIT); + } + + if (data->dc_vol_level_adjust && data->dc_vol_level_adjust <= + (TXVREFTUNE0_MASK >> TXVREFTUNE0_BIT)) { + reg &= ~TXVREFTUNE0_MASK; + reg |= (data->dc_vol_level_adjust << TXVREFTUNE0_BIT); + } + + writel(reg, usbmisc->base + MX7D_USB_OTG_PHY_CFG1); } spin_unlock_irqrestore(&usbmisc->lock, flags); diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c index 7f6f3ab5b8a6..30ef946a8e1a 100644 --- a/drivers/usb/class/cdc-acm.c +++ b/drivers/usb/class/cdc-acm.c @@ -173,7 +173,7 @@ static int acm_wb_alloc(struct acm *acm) for (;;) { wb = &acm->wb[wbn]; if (!wb->use) { - wb->use = 1; + wb->use = true; wb->len = 0; return wbn; } @@ -191,7 +191,8 @@ static int acm_wb_is_avail(struct acm *acm) n = ACM_NW; spin_lock_irqsave(&acm->write_lock, flags); for (i = 0; i < ACM_NW; i++) - n -= acm->wb[i].use; + if(acm->wb[i].use) + n--; spin_unlock_irqrestore(&acm->write_lock, flags); return n; } @@ -201,7 +202,7 @@ static int acm_wb_is_avail(struct acm *acm) */ static void acm_write_done(struct acm *acm, struct acm_wb *wb) { - wb->use = 0; + wb->use = false; acm->transmitting--; usb_autopm_put_interface_async(acm->control); } @@ -741,7 +742,7 @@ static void acm_port_shutdown(struct tty_port *port) if (!urb) break; wb = urb->context; - wb->use = 0; + wb->use = false; usb_autopm_put_interface_async(acm->control); } @@ -792,7 +793,7 @@ static int acm_tty_write(struct tty_struct *tty, wb = &acm->wb[wbn]; if (!acm->dev) { - wb->use = 0; + wb->use = false; spin_unlock_irqrestore(&acm->write_lock, flags); return -ENODEV; } @@ -804,7 +805,7 @@ static int acm_tty_write(struct tty_struct *tty, stat = usb_autopm_get_interface_async(acm->control); if (stat) { - wb->use = 0; + wb->use = false; spin_unlock_irqrestore(&acm->write_lock, flags); return stat; } @@ -1196,9 +1197,6 @@ static int acm_probe(struct usb_interface *intf, return -EINVAL; } - if (!intf->cur_altsetting) - return -EINVAL; - if (!buflen) { if (intf->cur_altsetting->endpoint && intf->cur_altsetting->endpoint->extralen && @@ -1221,39 +1219,42 @@ static int acm_probe(struct usb_interface *intf, call_intf_num = cmgmd->bDataInterface; if (!union_header) { - if (call_intf_num > 0) { + if (intf->cur_altsetting->desc.bNumEndpoints == 3) { + dev_dbg(&intf->dev, "No union descriptor, assuming single interface\n"); + combined_interfaces = 1; + control_interface = data_interface = intf; + goto look_for_collapsed_interface; + } else if (call_intf_num > 0) { dev_dbg(&intf->dev, "No union descriptor, using call management descriptor\n"); - /* quirks for Droids MuIn LCD */ - if (quirks & NO_DATA_INTERFACE) { - data_interface = usb_ifnum_to_if(usb_dev, 0); - } else { - data_intf_num = call_intf_num; - data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); - } + data_intf_num = call_intf_num; + data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); control_interface = intf; } else { - if (intf->cur_altsetting->desc.bNumEndpoints != 3) { - dev_dbg(&intf->dev,"No union descriptor, giving up\n"); - return -ENODEV; - } else { - dev_warn(&intf->dev,"No union descriptor, testing for castrated device\n"); - combined_interfaces = 1; - control_interface = data_interface = intf; - goto look_for_collapsed_interface; - } + dev_dbg(&intf->dev, "No union descriptor, giving up\n"); + return -ENODEV; } } else { + int class = -1; + data_intf_num = union_header->bSlaveInterface0; control_interface = usb_ifnum_to_if(usb_dev, union_header->bMasterInterface0); data_interface = usb_ifnum_to_if(usb_dev, data_intf_num); + + if (control_interface) + class = control_interface->cur_altsetting->desc.bInterfaceClass; + + if (class != USB_CLASS_COMM && class != USB_CLASS_CDC_DATA) { + dev_dbg(&intf->dev, "Broken union descriptor, assuming single interface\n"); + combined_interfaces = 1; + control_interface = data_interface = intf; + goto look_for_collapsed_interface; + } } if (!control_interface || !data_interface) { dev_dbg(&intf->dev, "no interfaces\n"); return -ENODEV; } - if (!data_interface->cur_altsetting || !control_interface->cur_altsetting) - return -ENODEV; if (data_intf_num != call_intf_num) dev_dbg(&intf->dev, "Separate call control interface. That is not fully supported.\n"); @@ -1280,10 +1281,8 @@ look_for_collapsed_interface: skip_normal_probe: /*workaround for switched interfaces */ - if (data_interface->cur_altsetting->desc.bInterfaceClass - != CDC_DATA_INTERFACE_TYPE) { - if (control_interface->cur_altsetting->desc.bInterfaceClass - == CDC_DATA_INTERFACE_TYPE) { + if (data_interface->cur_altsetting->desc.bInterfaceClass != USB_CLASS_CDC_DATA) { + if (control_interface->cur_altsetting->desc.bInterfaceClass == USB_CLASS_CDC_DATA) { dev_dbg(&intf->dev, "Your device has switched interfaces.\n"); swap(control_interface, data_interface); @@ -1876,11 +1875,6 @@ static const struct usb_device_id acm_ids[] = { /* NOTE: non-Nokia COMM/ACM/0xff is likely MSFT RNDIS... NOT a modem! */ - /* Support for Droids MuIn LCD */ - { USB_DEVICE(0x04d8, 0x000b), - .driver_info = NO_DATA_INTERFACE, - }, - #if IS_ENABLED(CONFIG_INPUT_IMS_PCU) { USB_DEVICE(0x04d8, 0x0082), /* Application mode */ .driver_info = IGNORE_DEVICE, @@ -1906,6 +1900,17 @@ static const struct usb_device_id acm_ids[] = { .driver_info = IGNORE_DEVICE, }, + /* Exclude ETAS ES58x */ + { USB_DEVICE(0x108c, 0x0159), /* ES581.4 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x108c, 0x0168), /* ES582.1 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x108c, 0x0169), /* ES584.1 */ + .driver_info = IGNORE_DEVICE, + }, + { USB_DEVICE(0x1bc7, 0x0021), /* Telit 3G ACM only composition */ .driver_info = SEND_ZERO_PACKET, }, diff --git a/drivers/usb/class/cdc-acm.h b/drivers/usb/class/cdc-acm.h index cd5e9d8ab237..9dce179d031b 100644 --- a/drivers/usb/class/cdc-acm.h +++ b/drivers/usb/class/cdc-acm.h @@ -64,12 +64,12 @@ #define ACM_NR 16 struct acm_wb { - unsigned char *buf; + u8 *buf; dma_addr_t dmah; - int len; - int use; + unsigned int len; struct urb *urb; struct acm *instance; + bool use; }; struct acm_rb { @@ -131,15 +131,12 @@ struct acm { unsigned long quirks; }; -#define CDC_DATA_INTERFACE_TYPE 0x0a - /* constants describing various quirks and errors */ #define NO_UNION_NORMAL BIT(0) #define SINGLE_RX_URB BIT(1) #define NO_CAP_LINE BIT(2) -#define NO_DATA_INTERFACE BIT(4) -#define IGNORE_DEVICE BIT(5) -#define QUIRK_CONTROL_LINE_STATE BIT(6) -#define CLEAR_HALT_CONDITIONS BIT(7) -#define SEND_ZERO_PACKET BIT(8) -#define DISABLE_ECHO BIT(9) +#define IGNORE_DEVICE BIT(3) +#define QUIRK_CONTROL_LINE_STATE BIT(4) +#define CLEAR_HALT_CONDITIONS BIT(5) +#define SEND_ZERO_PACKET BIT(6) +#define DISABLE_ECHO BIT(7) diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c index 7f5de956a2fc..02d0cfd23bb2 100644 --- a/drivers/usb/class/cdc-wdm.c +++ b/drivers/usb/class/cdc-wdm.c @@ -58,6 +58,9 @@ MODULE_DEVICE_TABLE (usb, wdm_ids); #define WDM_MAX 16 +/* we cannot wait forever at flush() */ +#define WDM_FLUSH_TIMEOUT (30 * HZ) + /* CDC-WMC r1.1 requires wMaxCommand to be "at least 256 decimal (0x100)" */ #define WDM_DEFAULT_BUFSIZE 256 @@ -151,7 +154,7 @@ static void wdm_out_callback(struct urb *urb) kfree(desc->outbuf); desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); - wake_up(&desc->wait); + wake_up_all(&desc->wait); } static void wdm_in_callback(struct urb *urb) @@ -393,6 +396,9 @@ static ssize_t wdm_write if (test_bit(WDM_RESETTING, &desc->flags)) r = -EIO; + if (test_bit(WDM_DISCONNECTING, &desc->flags)) + r = -ENODEV; + if (r < 0) { rv = r; goto out_free_mem_pm; @@ -424,6 +430,7 @@ static ssize_t wdm_write if (rv < 0) { desc->outbuf = NULL; clear_bit(WDM_IN_USE, &desc->flags); + wake_up_all(&desc->wait); /* for wdm_wait_for_response() */ dev_err(&desc->intf->dev, "Tx URB error: %d\n", rv); rv = usb_translate_errors(rv); goto out_free_mem_pm; @@ -583,28 +590,58 @@ err: return rv; } -static int wdm_flush(struct file *file, fl_owner_t id) +static int wdm_wait_for_response(struct file *file, long timeout) { struct wdm_device *desc = file->private_data; + long rv; /* Use long here because (int) MAX_SCHEDULE_TIMEOUT < 0. */ + + /* + * Needs both flags. We cannot do with one because resetting it would + * cause a race with write() yet we need to signal a disconnect. + */ + rv = wait_event_interruptible_timeout(desc->wait, + !test_bit(WDM_IN_USE, &desc->flags) || + test_bit(WDM_DISCONNECTING, &desc->flags), + timeout); - wait_event(desc->wait, - /* - * needs both flags. We cannot do with one - * because resetting it would cause a race - * with write() yet we need to signal - * a disconnect - */ - !test_bit(WDM_IN_USE, &desc->flags) || - test_bit(WDM_DISCONNECTING, &desc->flags)); - - /* cannot dereference desc->intf if WDM_DISCONNECTING */ + /* + * To report the correct error. This is best effort. + * We are inevitably racing with the hardware. + */ if (test_bit(WDM_DISCONNECTING, &desc->flags)) return -ENODEV; - if (desc->werr < 0) - dev_err(&desc->intf->dev, "Error in flush path: %d\n", - desc->werr); + if (!rv) + return -EIO; + if (rv < 0) + return -EINTR; + + spin_lock_irq(&desc->iuspin); + rv = desc->werr; + desc->werr = 0; + spin_unlock_irq(&desc->iuspin); + + return usb_translate_errors(rv); + +} + +/* + * You need to send a signal when you react to malicious or defective hardware. + * Also, don't abort when fsync() returned -EINVAL, for older kernels which do + * not implement wdm_flush() will return -EINVAL. + */ +static int wdm_fsync(struct file *file, loff_t start, loff_t end, int datasync) +{ + return wdm_wait_for_response(file, MAX_SCHEDULE_TIMEOUT); +} - return usb_translate_errors(desc->werr); +/* + * Same with wdm_fsync(), except it uses finite timeout in order to react to + * malicious or defective hardware which ceased communication after close() was + * implicitly called due to process termination. + */ +static int wdm_flush(struct file *file, fl_owner_t id) +{ + return wdm_wait_for_response(file, WDM_FLUSH_TIMEOUT); } static __poll_t wdm_poll(struct file *file, struct poll_table_struct *wait) @@ -729,6 +766,7 @@ static const struct file_operations wdm_fops = { .owner = THIS_MODULE, .read = wdm_read, .write = wdm_write, + .fsync = wdm_fsync, .open = wdm_open, .flush = wdm_flush, .release = wdm_release, diff --git a/drivers/usb/common/usb-conn-gpio.c b/drivers/usb/common/usb-conn-gpio.c index 7b3a21360d7c..6c4e3a19f42c 100644 --- a/drivers/usb/common/usb-conn-gpio.c +++ b/drivers/usb/common/usb-conn-gpio.c @@ -91,14 +91,14 @@ static void usb_conn_detect_cable(struct work_struct *work) return; } - if (info->last_role == USB_ROLE_HOST) + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); ret = usb_role_switch_set_role(info->role_sw, role); if (ret) dev_err(info->dev, "failed to set role: %d\n", ret); - if (role == USB_ROLE_HOST) { + if (role == USB_ROLE_HOST && info->vbus) { ret = regulator_enable(info->vbus); if (ret) dev_err(info->dev, "enable vbus regulator failed\n"); @@ -106,8 +106,9 @@ static void usb_conn_detect_cable(struct work_struct *work) info->last_role = role; - dev_dbg(info->dev, "vbus regulator is %s\n", - regulator_is_enabled(info->vbus) ? "enabled" : "disabled"); + if (info->vbus) + dev_dbg(info->dev, "vbus regulator is %s\n", + regulator_is_enabled(info->vbus) ? "enabled" : "disabled"); power_supply_changed(info->charger); } @@ -156,6 +157,7 @@ static int usb_conn_probe(struct platform_device *pdev) struct power_supply_config cfg = { .of_node = dev->of_node, }; + bool need_vbus = true; int ret = 0; info = devm_kzalloc(dev, sizeof(*info), GFP_KERNEL); @@ -185,10 +187,26 @@ static int usb_conn_probe(struct platform_device *pdev) INIT_DELAYED_WORK(&info->dw_det, usb_conn_detect_cable); - info->vbus = devm_regulator_get(dev, "vbus"); + /* + * If the USB connector is a child of a USB port and that port already provides the VBUS + * supply, there's no need for the USB connector to provide it again. + */ + if (dev->parent && dev->parent->of_node) { + if (of_find_property(dev->parent->of_node, "vbus-supply", NULL)) + need_vbus = false; + } + + if (!need_vbus) { + info->vbus = devm_regulator_get_optional(dev, "vbus"); + if (PTR_ERR(info->vbus) == -ENODEV) + info->vbus = NULL; + } else { + info->vbus = devm_regulator_get(dev, "vbus"); + } + if (IS_ERR(info->vbus)) { if (PTR_ERR(info->vbus) != -EPROBE_DEFER) - dev_err(dev, "failed to get vbus\n"); + dev_err(dev, "failed to get vbus: %ld\n", PTR_ERR(info->vbus)); return PTR_ERR(info->vbus); } @@ -266,7 +284,7 @@ static int usb_conn_remove(struct platform_device *pdev) cancel_delayed_work_sync(&info->dw_det); - if (info->last_role == USB_ROLE_HOST) + if (info->last_role == USB_ROLE_HOST && info->vbus) regulator_disable(info->vbus); usb_role_switch_put(info->role_sw); diff --git a/drivers/usb/core/Kconfig b/drivers/usb/core/Kconfig index dfacc478a8fc..351ede4b5de2 100644 --- a/drivers/usb/core/Kconfig +++ b/drivers/usb/core/Kconfig @@ -32,6 +32,20 @@ config USB_DEFAULT_PERSIST If you have any questions about this, say Y here, only say N if you know exactly what you are doing. +config USB_FEW_INIT_RETRIES + bool "Limit USB device initialization to only a few retries" + help + When a new USB device is detected, the kernel tries very hard + to initialize and enumerate it, with lots of nested retry loops. + This almost always works, but when it fails it can take a long time. + This option tells the kernel to make only a few retry attempts, + so that the total time required for a failed initialization is + no more than 30 seconds (as required by the USB OTG spec). + + Say N here unless you require new-device enumeration failure to + occur within 30 seconds (as might be needed in an embedded + application). + config USB_DYNAMIC_MINORS bool "Dynamic USB minor allocation" help diff --git a/drivers/usb/core/devices.c b/drivers/usb/core/devices.c index 696b2b692b83..1ef2de6e375a 100644 --- a/drivers/usb/core/devices.c +++ b/drivers/usb/core/devices.c @@ -39,7 +39,6 @@ #include <linux/fs.h> #include <linux/mm.h> #include <linux/gfp.h> -#include <linux/poll.h> #include <linux/usb.h> #include <linux/usbdevice_fs.h> #include <linux/usb/hcd.h> @@ -97,22 +96,6 @@ static const char format_endpt[] = /* E: Ad=xx(s) Atr=xx(ssss) MxPS=dddd Ivl=D?s */ "E: Ad=%02x(%c) Atr=%02x(%-4s) MxPS=%4d Ivl=%d%cs\n"; -/* - * Wait for an connect/disconnect event to happen. We initialize - * the event counter with an odd number, and each event will increment - * the event counter by two, so it will always _stay_ odd. That means - * that it will never be zero, so "event 0" will never match a current - * event, and thus 'poll' will always trigger as readable for the first - * time it gets called. - */ -static struct device_connect_event { - atomic_t count; - wait_queue_head_t wait; -} device_event = { - .count = ATOMIC_INIT(1), - .wait = __WAIT_QUEUE_HEAD_INITIALIZER(device_event.wait) -}; - struct class_info { int class; char *class_name; @@ -146,12 +129,6 @@ static const struct class_info clas_info[] = { /*****************************************************************/ -void usbfs_conn_disc_event(void) -{ - atomic_add(2, &device_event.count); - wake_up(&device_event.wait); -} - static const char *class_decode(const int class) { int ix; @@ -623,25 +600,7 @@ static ssize_t usb_device_read(struct file *file, char __user *buf, return total_written; } -/* Kernel lock for "lastev" protection */ -static __poll_t usb_device_poll(struct file *file, - struct poll_table_struct *wait) -{ - unsigned int event_count; - - poll_wait(file, &device_event.wait, wait); - - event_count = atomic_read(&device_event.count); - if (file->f_version != event_count) { - file->f_version = event_count; - return EPOLLIN | EPOLLRDNORM; - } - - return 0; -} - const struct file_operations usbfs_devices_fops = { .llseek = no_seek_end_llseek, .read = usb_device_read, - .poll = usb_device_poll, }; diff --git a/drivers/usb/core/driver.c b/drivers/usb/core/driver.c index 7e73e989645b..98b7449c11f3 100644 --- a/drivers/usb/core/driver.c +++ b/drivers/usb/core/driver.c @@ -269,8 +269,30 @@ static int usb_probe_device(struct device *dev) if (error) return error; + /* Probe the USB device with the driver in hand, but only + * defer to a generic driver in case the current USB + * device driver has an id_table or a match function; i.e., + * when the device driver was explicitly matched against + * a device. + * + * If the device driver does not have either of these, + * then we assume that it can bind to any device and is + * not truly a more specialized/non-generic driver, so a + * return value of -ENODEV should not force the device + * to be handled by the generic USB driver, as there + * can still be another, more specialized, device driver. + * + * This accommodates the usbip driver. + * + * TODO: What if, in the future, there are multiple + * specialized USB device drivers for a particular device? + * In such cases, there is a need to try all matching + * specialised device drivers prior to setting the + * use_generic_driver bit. + */ error = udriver->probe(udev); - if (error == -ENODEV && udriver != &usb_generic_driver) { + if (error == -ENODEV && udriver != &usb_generic_driver && + (udriver->id_table || udriver->match)) { udev->use_generic_driver = 1; return -EPROBE_DEFER; } @@ -831,14 +853,17 @@ static int usb_device_match(struct device *dev, struct device_driver *drv) udev = to_usb_device(dev); udrv = to_usb_device_driver(drv); - if (udrv->id_table && - usb_device_match_id(udev, udrv->id_table) != NULL) { - return 1; - } + if (udrv->id_table) + return usb_device_match_id(udev, udrv->id_table) != NULL; if (udrv->match) return udrv->match(udev); - return 0; + + /* If the device driver under consideration does not have a + * id_table or a match function, then let the driver's probe + * function decide. + */ + return 1; } else if (is_usb_interface(dev)) { struct usb_interface *intf; @@ -905,26 +930,19 @@ static int usb_uevent(struct device *dev, struct kobj_uevent_env *env) return 0; } -static bool is_dev_usb_generic_driver(struct device *dev) -{ - struct usb_device_driver *udd = dev->driver ? - to_usb_device_driver(dev->driver) : NULL; - - return udd == &usb_generic_driver; -} - static int __usb_bus_reprobe_drivers(struct device *dev, void *data) { struct usb_device_driver *new_udriver = data; struct usb_device *udev; int ret; - if (!is_dev_usb_generic_driver(dev)) + /* Don't reprobe if current driver isn't usb_generic_driver */ + if (dev->driver != &usb_generic_driver.drvwrap.driver) return 0; udev = to_usb_device(dev); if (usb_device_match_id(udev, new_udriver->id_table) == NULL && - (!new_udriver->match || new_udriver->match(udev) != 0)) + (!new_udriver->match || new_udriver->match(udev) == 0)) return 0; ret = device_reprobe(dev); @@ -973,8 +991,7 @@ int usb_register_device_driver(struct usb_device_driver *new_udriver, bus_for_each_dev(&usb_bus_type, NULL, new_udriver, __usb_bus_reprobe_drivers); } else { - printk(KERN_ERR "%s: error %d registering device " - " driver %s\n", + pr_err("%s: error %d registering device driver %s\n", usbcore_name, retval, new_udriver->name); } @@ -1050,9 +1067,8 @@ out: out_newid: driver_unregister(&new_driver->drvwrap.driver); - printk(KERN_ERR "%s: error %d registering interface " - " driver %s\n", - usbcore_name, retval, new_driver->name); + pr_err("%s: error %d registering interface driver %s\n", + usbcore_name, retval, new_driver->name); goto out; } EXPORT_SYMBOL_GPL(usb_register_driver); diff --git a/drivers/usb/core/generic.c b/drivers/usb/core/generic.c index 2b2f1ab6e36a..22c887f5c497 100644 --- a/drivers/usb/core/generic.c +++ b/drivers/usb/core/generic.c @@ -195,7 +195,7 @@ int usb_choose_configuration(struct usb_device *udev) } EXPORT_SYMBOL_GPL(usb_choose_configuration); -static int __check_usb_generic(struct device_driver *drv, void *data) +static int __check_for_non_generic_match(struct device_driver *drv, void *data) { struct usb_device *udev = data; struct usb_device_driver *udrv; @@ -219,7 +219,7 @@ static bool usb_generic_driver_match(struct usb_device *udev) * If any other driver wants the device, leave the device to this other * driver. */ - if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_usb_generic)) + if (bus_for_each_drv(&usb_bus_type, NULL, udev, __check_for_non_generic_match)) return false; return true; diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c index a33b849e8beb..2c6b9578a7d3 100644 --- a/drivers/usb/core/hcd.c +++ b/drivers/usb/core/hcd.c @@ -1657,9 +1657,9 @@ static void __usb_hcd_giveback_urb(struct urb *urb) usb_put_urb(urb); } -static void usb_giveback_urb_bh(unsigned long param) +static void usb_giveback_urb_bh(struct tasklet_struct *t) { - struct giveback_urb_bh *bh = (struct giveback_urb_bh *)param; + struct giveback_urb_bh *bh = from_tasklet(bh, t, bh); struct list_head local_list; spin_lock_irq(&bh->lock); @@ -2403,7 +2403,7 @@ static void init_giveback_urb_bh(struct giveback_urb_bh *bh) spin_lock_init(&bh->lock); INIT_LIST_HEAD(&bh->head); - tasklet_init(&bh->bh, usb_giveback_urb_bh, (unsigned long)bh); + tasklet_setup(&bh->bh, usb_giveback_urb_bh); } struct usb_hcd *__usb_create_hcd(const struct hc_driver *driver, diff --git a/drivers/usb/core/hub.c b/drivers/usb/core/hub.c index 5b768b80d1ee..17202b2ee063 100644 --- a/drivers/usb/core/hub.c +++ b/drivers/usb/core/hub.c @@ -2705,11 +2705,20 @@ static unsigned hub_is_wusb(struct usb_hub *hub) } +#ifdef CONFIG_USB_FEW_INIT_RETRIES +#define PORT_RESET_TRIES 2 +#define SET_ADDRESS_TRIES 1 +#define GET_DESCRIPTOR_TRIES 1 +#define GET_MAXPACKET0_TRIES 1 +#define PORT_INIT_TRIES 4 + +#else #define PORT_RESET_TRIES 5 #define SET_ADDRESS_TRIES 2 #define GET_DESCRIPTOR_TRIES 2 -#define SET_CONFIG_TRIES (2 * (use_both_schemes + 1)) -#define USE_NEW_SCHEME(i, scheme) ((i) / 2 == (int)(scheme)) +#define GET_MAXPACKET0_TRIES 3 +#define PORT_INIT_TRIES 4 +#endif /* CONFIG_USB_FEW_INIT_RETRIES */ #define HUB_ROOT_RESET_TIME 60 /* times are in msec */ #define HUB_SHORT_RESET_TIME 10 @@ -2717,23 +2726,31 @@ static unsigned hub_is_wusb(struct usb_hub *hub) #define HUB_LONG_RESET_TIME 200 #define HUB_RESET_TIMEOUT 800 -/* - * "New scheme" enumeration causes an extra state transition to be - * exposed to an xhci host and causes USB3 devices to receive control - * commands in the default state. This has been seen to cause - * enumeration failures, so disable this enumeration scheme for USB3 - * devices. - */ static bool use_new_scheme(struct usb_device *udev, int retry, struct usb_port *port_dev) { int old_scheme_first_port = - port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME; + (port_dev->quirks & USB_PORT_QUIRK_OLD_SCHEME) || + old_scheme_first; + /* + * "New scheme" enumeration causes an extra state transition to be + * exposed to an xhci host and causes USB3 devices to receive control + * commands in the default state. This has been seen to cause + * enumeration failures, so disable this enumeration scheme for USB3 + * devices. + */ if (udev->speed >= USB_SPEED_SUPER) return false; - return USE_NEW_SCHEME(retry, old_scheme_first_port || old_scheme_first); + /* + * If use_both_schemes is set, use the first scheme (whichever + * it is) for the larger half of the retries, then use the other + * scheme. Otherwise, use the first scheme for all the retries. + */ + if (use_both_schemes && retry >= (PORT_INIT_TRIES + 1) / 2) + return old_scheme_first_port; /* Second half */ + return !old_scheme_first_port; /* First half or all */ } /* Is a USB 3.0 port in the Inactive or Compliance Mode state? @@ -4545,6 +4562,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, const char *speed; int devnum = udev->devnum; const char *driver_name; + bool do_new_scheme; /* root hub ports have a slightly longer reset period * (from USB 2.0 spec, section 7.1.7.5) @@ -4657,14 +4675,13 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, * first 8 bytes of the device descriptor to get the ep0 maxpacket * value. */ - for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { - bool did_new_scheme = false; + do_new_scheme = use_new_scheme(udev, retry_counter, port_dev); - if (use_new_scheme(udev, retry_counter, port_dev)) { + for (retries = 0; retries < GET_DESCRIPTOR_TRIES; (++retries, msleep(100))) { + if (do_new_scheme) { struct usb_device_descriptor *buf; int r = 0; - did_new_scheme = true; retval = hub_enable_device(udev); if (retval < 0) { dev_err(&udev->dev, @@ -4684,7 +4701,8 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, * 255 is for WUSB devices, we actually need to use * 512 (WUSB1.0[4.8.1]). */ - for (operations = 0; operations < 3; ++operations) { + for (operations = 0; operations < GET_MAXPACKET0_TRIES; + ++operations) { buf->bMaxPacketSize0 = 0; r = usb_control_msg(udev, usb_rcvaddr0pipe(), USB_REQ_GET_DESCRIPTOR, USB_DIR_IN, @@ -4773,11 +4791,7 @@ hub_port_init(struct usb_hub *hub, struct usb_device *udev, int port1, * - read ep0 maxpacket even for high and low speed, */ msleep(10); - /* use_new_scheme() checks the speed which may have - * changed since the initial look so we cache the result - * in did_new_scheme - */ - if (did_new_scheme) + if (do_new_scheme) break; } @@ -5106,7 +5120,7 @@ static void hub_port_connect(struct usb_hub *hub, int port1, u16 portstatus, unit_load = 100; status = 0; - for (i = 0; i < SET_CONFIG_TRIES; i++) { + for (i = 0; i < PORT_INIT_TRIES; i++) { /* reallocate for each attempt, since references * to the previous one can escape in various ways @@ -5239,7 +5253,7 @@ loop: break; /* When halfway through our retry count, power-cycle the port */ - if (i == (SET_CONFIG_TRIES / 2) - 1) { + if (i == (PORT_INIT_TRIES - 1) / 2) { dev_info(&port_dev->dev, "attempt power cycle\n"); usb_hub_set_port_power(hdev, hub, port1, false); msleep(2 * hub_power_on_good_delay(hub)); @@ -5770,7 +5784,7 @@ static int usb_reset_and_verify_device(struct usb_device *udev) bos = udev->bos; udev->bos = NULL; - for (i = 0; i < SET_CONFIG_TRIES; ++i) { + for (i = 0; i < PORT_INIT_TRIES; ++i) { /* ep0 maxpacket size may change; let the HCD know about it. * Other endpoints will be handled by re-enumeration. */ diff --git a/drivers/usb/core/message.c b/drivers/usb/core/message.c index ae1de9cc4b09..f4107b9e8c38 100644 --- a/drivers/usb/core/message.c +++ b/drivers/usb/core/message.c @@ -163,6 +163,143 @@ int usb_control_msg(struct usb_device *dev, unsigned int pipe, __u8 request, EXPORT_SYMBOL_GPL(usb_control_msg); /** + * usb_control_msg_send - Builds a control "send" message, sends it off and waits for completion + * @dev: pointer to the usb device to send the message to + * @endpoint: endpoint to send the message to + * @request: USB message request value + * @requesttype: USB message request type value + * @value: USB message value + * @index: USB message index value + * @driver_data: pointer to the data to send + * @size: length in bytes of the data to send + * @timeout: time in msecs to wait for the message to complete before timing + * out (if 0 the wait is forever) + * @memflags: the flags for memory allocation for buffers + * + * Context: !in_interrupt () + * + * This function sends a control message to a specified endpoint that is not + * expected to fill in a response (i.e. a "send message") and waits for the + * message to complete, or timeout. + * + * Do not use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb(). If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. + * + * The data pointer can be made to a reference on the stack, or anywhere else, + * as it will not be modified at all. This does not have the restriction that + * usb_control_msg() has where the data pointer must be to dynamically allocated + * memory (i.e. memory that can be successfully DMAed to a device). + * + * Return: If successful, 0 is returned, Otherwise, a negative error number. + */ +int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + const void *driver_data, __u16 size, int timeout, + gfp_t memflags) +{ + unsigned int pipe = usb_sndctrlpipe(dev, endpoint); + int ret; + u8 *data = NULL; + + if (usb_pipe_type_check(dev, pipe)) + return -EINVAL; + + if (size) { + data = kmemdup(driver_data, size, memflags); + if (!data) + return -ENOMEM; + } + + ret = usb_control_msg(dev, pipe, request, requesttype, value, index, + data, size, timeout); + kfree(data); + + if (ret < 0) + return ret; + if (ret == size) + return 0; + return -EINVAL; +} +EXPORT_SYMBOL_GPL(usb_control_msg_send); + +/** + * usb_control_msg_recv - Builds a control "receive" message, sends it off and waits for completion + * @dev: pointer to the usb device to send the message to + * @endpoint: endpoint to send the message to + * @request: USB message request value + * @requesttype: USB message request type value + * @value: USB message value + * @index: USB message index value + * @driver_data: pointer to the data to be filled in by the message + * @size: length in bytes of the data to be received + * @timeout: time in msecs to wait for the message to complete before timing + * out (if 0 the wait is forever) + * @memflags: the flags for memory allocation for buffers + * + * Context: !in_interrupt () + * + * This function sends a control message to a specified endpoint that is + * expected to fill in a response (i.e. a "receive message") and waits for the + * message to complete, or timeout. + * + * Do not use this function from within an interrupt context. If you need + * an asynchronous message, or need to send a message from within interrupt + * context, use usb_submit_urb(). If a thread in your driver uses this call, + * make sure your disconnect() method can wait for it to complete. Since you + * don't have a handle on the URB used, you can't cancel the request. + * + * The data pointer can be made to a reference on the stack, or anywhere else + * that can be successfully written to. This function does not have the + * restriction that usb_control_msg() has where the data pointer must be to + * dynamically allocated memory (i.e. memory that can be successfully DMAed to a + * device). + * + * The "whole" message must be properly received from the device in order for + * this function to be successful. If a device returns less than the expected + * amount of data, then the function will fail. Do not use this for messages + * where a variable amount of data might be returned. + * + * Return: If successful, 0 is returned, Otherwise, a negative error number. + */ +int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + void *driver_data, __u16 size, int timeout, + gfp_t memflags) +{ + unsigned int pipe = usb_rcvctrlpipe(dev, endpoint); + int ret; + u8 *data; + + if (!size || !driver_data || usb_pipe_type_check(dev, pipe)) + return -EINVAL; + + data = kmalloc(size, memflags); + if (!data) + return -ENOMEM; + + ret = usb_control_msg(dev, pipe, request, requesttype, value, index, + data, size, timeout); + + if (ret < 0) + goto exit; + + if (ret == size) { + memcpy(driver_data, data, size); + ret = 0; + } else { + ret = -EINVAL; + } + +exit: + kfree(data); + return ret; +} +EXPORT_SYMBOL_GPL(usb_control_msg_recv); + +/** * usb_interrupt_msg - Builds an interrupt urb, sends it off and waits for completion * @usb_dev: pointer to the usb device to send the message to * @pipe: endpoint "pipe" to send the message to @@ -948,11 +1085,12 @@ int usb_set_isoch_delay(struct usb_device *dev) if (dev->speed < USB_SPEED_SUPER) return 0; - return usb_control_msg(dev, usb_sndctrlpipe(dev, 0), + return usb_control_msg_send(dev, 0, USB_REQ_SET_ISOCH_DELAY, USB_DIR_OUT | USB_TYPE_STANDARD | USB_RECIP_DEVICE, dev->hub_delay, 0, NULL, 0, - USB_CTRL_SET_TIMEOUT); + USB_CTRL_SET_TIMEOUT, + GFP_NOIO); } /** @@ -1070,13 +1208,13 @@ int usb_clear_halt(struct usb_device *dev, int pipe) * (like some ibmcam model 1 units) seem to expect hosts to make * this request for iso endpoints, which can't halt! */ - result = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, - USB_ENDPOINT_HALT, endp, NULL, 0, - USB_CTRL_SET_TIMEOUT); + result = usb_control_msg_send(dev, 0, + USB_REQ_CLEAR_FEATURE, USB_RECIP_ENDPOINT, + USB_ENDPOINT_HALT, endp, NULL, 0, + USB_CTRL_SET_TIMEOUT, GFP_NOIO); /* don't un-halt or force to DATA0 except on success */ - if (result < 0) + if (result) return result; /* NOTE: seems like Microsoft and Apple don't bother verifying @@ -1438,9 +1576,11 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) if (dev->quirks & USB_QUIRK_NO_SET_INTF) ret = -EPIPE; else - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_INTERFACE, USB_RECIP_INTERFACE, - alternate, interface, NULL, 0, 5000); + ret = usb_control_msg_send(dev, 0, + USB_REQ_SET_INTERFACE, + USB_RECIP_INTERFACE, alternate, + interface, NULL, 0, 5000, + GFP_NOIO); /* 9.4.10 says devices don't need this and are free to STALL the * request if the interface only has one alternate setting. @@ -1450,7 +1590,7 @@ int usb_set_interface(struct usb_device *dev, int interface, int alternate) "manual set_interface for iface %d, alt %d\n", interface, alternate); manual = 1; - } else if (ret < 0) { + } else if (ret) { /* Re-instate the old alt setting */ usb_hcd_alloc_bandwidth(dev, NULL, alt, iface->cur_altsetting); usb_enable_lpm(dev); @@ -1574,11 +1714,11 @@ int usb_reset_configuration(struct usb_device *dev) mutex_unlock(hcd->bandwidth_mutex); return retval; } - retval = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_CONFIGURATION, 0, - config->desc.bConfigurationValue, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (retval < 0) { + retval = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, + config->desc.bConfigurationValue, 0, + NULL, 0, USB_CTRL_SET_TIMEOUT, + GFP_NOIO); + if (retval) { usb_hcd_alloc_bandwidth(dev, NULL, NULL, NULL); usb_enable_lpm(dev); mutex_unlock(hcd->bandwidth_mutex); @@ -1963,10 +2103,10 @@ free_interfaces: } kfree(new_interfaces); - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - USB_REQ_SET_CONFIGURATION, 0, configuration, 0, - NULL, 0, USB_CTRL_SET_TIMEOUT); - if (ret < 0 && cp) { + ret = usb_control_msg_send(dev, 0, USB_REQ_SET_CONFIGURATION, 0, + configuration, 0, NULL, 0, + USB_CTRL_SET_TIMEOUT, GFP_NOIO); + if (ret && cp) { /* * All the old state is gone, so what else can we do? * The device is probably useless now anyway. diff --git a/drivers/usb/core/urb.c b/drivers/usb/core/urb.c index 7bc23469f4e4..357b149b20d3 100644 --- a/drivers/usb/core/urb.c +++ b/drivers/usb/core/urb.c @@ -192,24 +192,39 @@ static const int pipetypes[4] = { }; /** - * usb_urb_ep_type_check - sanity check of endpoint in the given urb - * @urb: urb to be checked + * usb_pipe_type_check - sanity check of a specific pipe for a usb device + * @dev: struct usb_device to be checked + * @pipe: pipe to check * * This performs a light-weight sanity check for the endpoint in the - * given urb. It returns 0 if the urb contains a valid endpoint, otherwise - * a negative error code. + * given usb device. It returns 0 if the pipe is valid for the specific usb + * device, otherwise a negative error code. */ -int usb_urb_ep_type_check(const struct urb *urb) +int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe) { const struct usb_host_endpoint *ep; - ep = usb_pipe_endpoint(urb->dev, urb->pipe); + ep = usb_pipe_endpoint(dev, pipe); if (!ep) return -EINVAL; - if (usb_pipetype(urb->pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) + if (usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) return -EINVAL; return 0; } +EXPORT_SYMBOL_GPL(usb_pipe_type_check); + +/** + * usb_urb_ep_type_check - sanity check of endpoint in the given urb + * @urb: urb to be checked + * + * This performs a light-weight sanity check for the endpoint in the + * given urb. It returns 0 if the urb contains a valid endpoint, otherwise + * a negative error code. + */ +int usb_urb_ep_type_check(const struct urb *urb) +{ + return usb_pipe_type_check(urb->dev, urb->pipe); +} EXPORT_SYMBOL_GPL(usb_urb_ep_type_check); /** @@ -474,7 +489,7 @@ int usb_submit_urb(struct urb *urb, gfp_t mem_flags) */ /* Check that the pipe's type matches the endpoint's type */ - if (usb_urb_ep_type_check(urb)) + if (usb_pipe_type_check(urb->dev, urb->pipe)) dev_WARN(&dev->dev, "BOGUS urb xfer, pipe %x != type %x\n", usb_pipetype(urb->pipe), pipetypes[xfertype]); @@ -772,11 +787,12 @@ void usb_block_urb(struct urb *urb) EXPORT_SYMBOL_GPL(usb_block_urb); /** - * usb_kill_anchored_urbs - cancel transfer requests en masse + * usb_kill_anchored_urbs - kill all URBs associated with an anchor * @anchor: anchor the requests are bound to * - * this allows all outstanding URBs to be killed starting - * from the back of the queue + * This kills all outstanding URBs starting from the back of the queue, + * with guarantee that no completer callbacks will take place from the + * anchor after this function returns. * * This routine should not be called by a driver after its disconnect * method has returned. @@ -784,20 +800,26 @@ EXPORT_SYMBOL_GPL(usb_block_urb); void usb_kill_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; + int surely_empty; - spin_lock_irq(&anchor->lock); - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - /* we must make sure the URB isn't freed before we kill it*/ - usb_get_urb(victim); - spin_unlock_irq(&anchor->lock); - /* this will unanchor the URB */ - usb_kill_urb(victim); - usb_put_urb(victim); + do { spin_lock_irq(&anchor->lock); - } - spin_unlock_irq(&anchor->lock); + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + /* make sure the URB isn't freed before we kill it */ + usb_get_urb(victim); + spin_unlock_irq(&anchor->lock); + /* this will unanchor the URB */ + usb_kill_urb(victim); + usb_put_urb(victim); + spin_lock_irq(&anchor->lock); + } + surely_empty = usb_anchor_check_wakeup(anchor); + + spin_unlock_irq(&anchor->lock); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); @@ -816,21 +838,27 @@ EXPORT_SYMBOL_GPL(usb_kill_anchored_urbs); void usb_poison_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; + int surely_empty; - spin_lock_irq(&anchor->lock); - anchor->poisoned = 1; - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - /* we must make sure the URB isn't freed before we kill it*/ - usb_get_urb(victim); - spin_unlock_irq(&anchor->lock); - /* this will unanchor the URB */ - usb_poison_urb(victim); - usb_put_urb(victim); + do { spin_lock_irq(&anchor->lock); - } - spin_unlock_irq(&anchor->lock); + anchor->poisoned = 1; + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + /* make sure the URB isn't freed before we kill it */ + usb_get_urb(victim); + spin_unlock_irq(&anchor->lock); + /* this will unanchor the URB */ + usb_poison_urb(victim); + usb_put_urb(victim); + spin_lock_irq(&anchor->lock); + } + surely_empty = usb_anchor_check_wakeup(anchor); + + spin_unlock_irq(&anchor->lock); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_poison_anchored_urbs); @@ -970,14 +998,20 @@ void usb_scuttle_anchored_urbs(struct usb_anchor *anchor) { struct urb *victim; unsigned long flags; + int surely_empty; + + do { + spin_lock_irqsave(&anchor->lock, flags); + while (!list_empty(&anchor->urb_list)) { + victim = list_entry(anchor->urb_list.prev, + struct urb, anchor_list); + __usb_unanchor_urb(victim, anchor); + } + surely_empty = usb_anchor_check_wakeup(anchor); - spin_lock_irqsave(&anchor->lock, flags); - while (!list_empty(&anchor->urb_list)) { - victim = list_entry(anchor->urb_list.prev, struct urb, - anchor_list); - __usb_unanchor_urb(victim, anchor); - } - spin_unlock_irqrestore(&anchor->lock, flags); + spin_unlock_irqrestore(&anchor->lock, flags); + cpu_relax(); + } while (!surely_empty); } EXPORT_SYMBOL_GPL(usb_scuttle_anchored_urbs); diff --git a/drivers/usb/core/usb.h b/drivers/usb/core/usb.h index 98e7d1ee63dc..c893f54a3420 100644 --- a/drivers/usb/core/usb.h +++ b/drivers/usb/core/usb.h @@ -191,7 +191,6 @@ extern const struct attribute_group *usb_interface_groups[]; extern struct usb_driver usbfs_driver; extern const struct file_operations usbfs_devices_fops; extern const struct file_operations usbdev_file_operations; -extern void usbfs_conn_disc_event(void); extern int usb_devio_init(void); extern void usb_devio_cleanup(void); diff --git a/drivers/usb/dwc2/Kconfig b/drivers/usb/dwc2/Kconfig index 16e1aa304edc..c131719367ec 100644 --- a/drivers/usb/dwc2/Kconfig +++ b/drivers/usb/dwc2/Kconfig @@ -5,6 +5,7 @@ config USB_DWC2 depends on HAS_DMA depends on USB || USB_GADGET depends on HAS_IOMEM + select USB_ROLE_SWITCH help Say Y here if your system has a Dual Role Hi-Speed USB controller based on the DesignWare HSOTG IP Core. diff --git a/drivers/usb/dwc2/Makefile b/drivers/usb/dwc2/Makefile index 440320cc20a4..2bcd6945df46 100644 --- a/drivers/usb/dwc2/Makefile +++ b/drivers/usb/dwc2/Makefile @@ -3,7 +3,7 @@ ccflags-$(CONFIG_USB_DWC2_DEBUG) += -DDEBUG ccflags-$(CONFIG_USB_DWC2_VERBOSE) += -DVERBOSE_DEBUG obj-$(CONFIG_USB_DWC2) += dwc2.o -dwc2-y := core.o core_intr.o platform.o +dwc2-y := core.o core_intr.o platform.o drd.o dwc2-y += params.o ifneq ($(filter y,$(CONFIG_USB_DWC2_HOST) $(CONFIG_USB_DWC2_DUAL_ROLE)),) diff --git a/drivers/usb/dwc2/core.h b/drivers/usb/dwc2/core.h index 9deff0400a92..7161344c6522 100644 --- a/drivers/usb/dwc2/core.h +++ b/drivers/usb/dwc2/core.h @@ -860,6 +860,7 @@ struct dwc2_hregs_backup { * - USB_DR_MODE_PERIPHERAL * - USB_DR_MODE_HOST * - USB_DR_MODE_OTG + * @role_sw: usb_role_switch handle * @hcd_enabled: Host mode sub-driver initialization indicator. * @gadget_enabled: Peripheral mode sub-driver initialization indicator. * @ll_hw_enabled: Status of low-level hardware resources. @@ -1054,6 +1055,7 @@ struct dwc2_hsotg { struct dwc2_core_params params; enum usb_otg_state op_state; enum usb_dr_mode dr_mode; + struct usb_role_switch *role_sw; unsigned int hcd_enabled:1; unsigned int gadget_enabled:1; unsigned int ll_hw_enabled:1; @@ -1376,6 +1378,11 @@ static inline int dwc2_is_device_mode(struct dwc2_hsotg *hsotg) return (dwc2_readl(hsotg, GINTSTS) & GINTSTS_CURMODE_HOST) == 0; } +int dwc2_drd_init(struct dwc2_hsotg *hsotg); +void dwc2_drd_suspend(struct dwc2_hsotg *hsotg); +void dwc2_drd_resume(struct dwc2_hsotg *hsotg); +void dwc2_drd_exit(struct dwc2_hsotg *hsotg); + /* * Dump core registers and SPRAM */ @@ -1392,6 +1399,7 @@ int dwc2_hsotg_resume(struct dwc2_hsotg *dwc2); int dwc2_gadget_init(struct dwc2_hsotg *hsotg); void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2, bool reset); +void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg); void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2); int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, int testmode); @@ -1417,6 +1425,7 @@ static inline int dwc2_gadget_init(struct dwc2_hsotg *hsotg) { return 0; } static inline void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *dwc2, bool reset) {} +static inline void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hsotg_core_connect(struct dwc2_hsotg *hsotg) {} static inline void dwc2_hsotg_disconnect(struct dwc2_hsotg *dwc2) {} static inline int dwc2_hsotg_set_test_mode(struct dwc2_hsotg *hsotg, diff --git a/drivers/usb/dwc2/drd.c b/drivers/usb/dwc2/drd.c new file mode 100644 index 000000000000..2d4176f5788e --- /dev/null +++ b/drivers/usb/dwc2/drd.c @@ -0,0 +1,180 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * drd.c - DesignWare USB2 DRD Controller Dual-role support + * + * Copyright (C) 2020 STMicroelectronics + * + * Author(s): Amelie Delaunay <amelie.delaunay@st.com> + */ + +#include <linux/iopoll.h> +#include <linux/platform_device.h> +#include <linux/usb/role.h> +#include "core.h" + +static void dwc2_ovr_init(struct dwc2_hsotg *hsotg) +{ + unsigned long flags; + u32 gotgctl; + + spin_lock_irqsave(&hsotg->lock, flags); + + gotgctl = dwc2_readl(hsotg, GOTGCTL); + gotgctl |= GOTGCTL_BVALOEN | GOTGCTL_AVALOEN | GOTGCTL_VBVALOEN; + gotgctl |= GOTGCTL_DBNCE_FLTR_BYPASS; + gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL); + dwc2_writel(hsotg, gotgctl, GOTGCTL); + + dwc2_force_mode(hsotg, false); + + spin_unlock_irqrestore(&hsotg->lock, flags); +} + +static int dwc2_ovr_avalid(struct dwc2_hsotg *hsotg, bool valid) +{ + u32 gotgctl = dwc2_readl(hsotg, GOTGCTL); + + /* Check if A-Session is already in the right state */ + if ((valid && (gotgctl & GOTGCTL_ASESVLD)) || + (!valid && !(gotgctl & GOTGCTL_ASESVLD))) + return -EALREADY; + + if (valid) + gotgctl |= GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL; + else + gotgctl &= ~(GOTGCTL_AVALOVAL | GOTGCTL_VBVALOVAL); + dwc2_writel(hsotg, gotgctl, GOTGCTL); + + return 0; +} + +static int dwc2_ovr_bvalid(struct dwc2_hsotg *hsotg, bool valid) +{ + u32 gotgctl = dwc2_readl(hsotg, GOTGCTL); + + /* Check if B-Session is already in the right state */ + if ((valid && (gotgctl & GOTGCTL_BSESVLD)) || + (!valid && !(gotgctl & GOTGCTL_BSESVLD))) + return -EALREADY; + + if (valid) + gotgctl |= GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL; + else + gotgctl &= ~(GOTGCTL_BVALOVAL | GOTGCTL_VBVALOVAL); + dwc2_writel(hsotg, gotgctl, GOTGCTL); + + return 0; +} + +static int dwc2_drd_role_sw_set(struct usb_role_switch *sw, enum usb_role role) +{ + struct dwc2_hsotg *hsotg = usb_role_switch_get_drvdata(sw); + unsigned long flags; + int already = 0; + + /* Skip session not in line with dr_mode */ + if ((role == USB_ROLE_DEVICE && hsotg->dr_mode == USB_DR_MODE_HOST) || + (role == USB_ROLE_HOST && hsotg->dr_mode == USB_DR_MODE_PERIPHERAL)) + return -EINVAL; + +#if IS_ENABLED(CONFIG_USB_DWC2_PERIPHERAL) || \ + IS_ENABLED(CONFIG_USB_DWC2_DUAL_ROLE) + /* Skip session if core is in test mode */ + if (role == USB_ROLE_NONE && hsotg->test_mode) { + dev_dbg(hsotg->dev, "Core is in test mode\n"); + return -EBUSY; + } +#endif + + spin_lock_irqsave(&hsotg->lock, flags); + + if (role == USB_ROLE_HOST) { + already = dwc2_ovr_avalid(hsotg, true); + } else if (role == USB_ROLE_DEVICE) { + already = dwc2_ovr_bvalid(hsotg, true); + /* This clear DCTL.SFTDISCON bit */ + dwc2_hsotg_core_connect(hsotg); + } else { + if (dwc2_is_device_mode(hsotg)) { + if (!dwc2_ovr_bvalid(hsotg, false)) + /* This set DCTL.SFTDISCON bit */ + dwc2_hsotg_core_disconnect(hsotg); + } else { + dwc2_ovr_avalid(hsotg, false); + } + } + + spin_unlock_irqrestore(&hsotg->lock, flags); + + if (!already && hsotg->dr_mode == USB_DR_MODE_OTG) + /* This will raise a Connector ID Status Change Interrupt */ + dwc2_force_mode(hsotg, role == USB_ROLE_HOST); + + dev_dbg(hsotg->dev, "%s-session valid\n", + role == USB_ROLE_NONE ? "No" : + role == USB_ROLE_HOST ? "A" : "B"); + + return 0; +} + +int dwc2_drd_init(struct dwc2_hsotg *hsotg) +{ + struct usb_role_switch_desc role_sw_desc = {0}; + struct usb_role_switch *role_sw; + int ret; + + if (!device_property_read_bool(hsotg->dev, "usb-role-switch")) + return 0; + + role_sw_desc.driver_data = hsotg; + role_sw_desc.fwnode = dev_fwnode(hsotg->dev); + role_sw_desc.set = dwc2_drd_role_sw_set; + role_sw_desc.allow_userspace_control = true; + + role_sw = usb_role_switch_register(hsotg->dev, &role_sw_desc); + if (IS_ERR(role_sw)) { + ret = PTR_ERR(role_sw); + dev_err(hsotg->dev, + "failed to register role switch: %d\n", ret); + return ret; + } + + hsotg->role_sw = role_sw; + + /* Enable override and initialize values */ + dwc2_ovr_init(hsotg); + + return 0; +} + +void dwc2_drd_suspend(struct dwc2_hsotg *hsotg) +{ + u32 gintsts, gintmsk; + + if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) { + gintmsk = dwc2_readl(hsotg, GINTMSK); + gintmsk &= ~GINTSTS_CONIDSTSCHNG; + dwc2_writel(hsotg, gintmsk, GINTMSK); + gintsts = dwc2_readl(hsotg, GINTSTS); + dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS); + } +} + +void dwc2_drd_resume(struct dwc2_hsotg *hsotg) +{ + u32 gintsts, gintmsk; + + if (hsotg->role_sw && !hsotg->params.external_id_pin_ctl) { + gintsts = dwc2_readl(hsotg, GINTSTS); + dwc2_writel(hsotg, gintsts | GINTSTS_CONIDSTSCHNG, GINTSTS); + gintmsk = dwc2_readl(hsotg, GINTMSK); + gintmsk |= GINTSTS_CONIDSTSCHNG; + dwc2_writel(hsotg, gintmsk, GINTMSK); + } +} + +void dwc2_drd_exit(struct dwc2_hsotg *hsotg) +{ + if (hsotg->role_sw) + usb_role_switch_unregister(hsotg->role_sw); +} diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c index 5b9d23991c99..0a0d11151cfb 100644 --- a/drivers/usb/dwc2/gadget.c +++ b/drivers/usb/dwc2/gadget.c @@ -713,8 +713,11 @@ static u32 dwc2_hsotg_read_frameno(struct dwc2_hsotg *hsotg) */ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; int is_isoc = hs_ep->isochronous; unsigned int maxsize; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; if (is_isoc) maxsize = (hs_ep->dir_in ? DEV_DMA_ISOC_TX_NBYTES_LIMIT : @@ -723,6 +726,11 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) else maxsize = DEV_DMA_NBYTES_LIMIT * MAX_DMA_DESC_NUM_GENERIC; + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) + maxsize = mps * MAX_DMA_DESC_NUM_GENERIC; + return maxsize; } @@ -738,11 +746,14 @@ static unsigned int dwc2_gadget_get_chain_limit(struct dwc2_hsotg_ep *hs_ep) * Isochronous - descriptor rx/tx bytes bitfield limit, * Control In/Bulk/Interrupt - multiple of mps. This will allow to not * have concatenations from various descriptors within one packet. + * Interrupt OUT - if mps not multiple of 4 then a single packet corresponds + * to a single descriptor. * * Selects corresponding mask for RX/TX bytes as well. */ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; u32 mps = hs_ep->ep.maxpacket; int dir_in = hs_ep->dir_in; u32 desc_size = 0; @@ -766,6 +777,13 @@ static u32 dwc2_gadget_get_desc_params(struct dwc2_hsotg_ep *hs_ep, u32 *mask) desc_size -= desc_size % mps; } + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) { + desc_size = mps; + *mask = DEV_DMA_NBYTES_MASK; + } + return desc_size; } @@ -1123,13 +1141,7 @@ static void dwc2_hsotg_start_req(struct dwc2_hsotg *hsotg, length += (mps - (length % mps)); } - /* - * If more data to send, adjust DMA for EP0 out data stage. - * ureq->dma stays unchanged, hence increment it by already - * passed passed data count before starting new transaction. - */ - if (!index && hsotg->ep0_state == DWC2_EP0_DATA_OUT && - continuing) + if (continuing) offset = ureq->actual; /* Fill DDMA chain entries */ @@ -2320,22 +2332,36 @@ static void dwc2_hsotg_change_ep_iso_parity(struct dwc2_hsotg *hsotg, */ static unsigned int dwc2_gadget_get_xfersize_ddma(struct dwc2_hsotg_ep *hs_ep) { + const struct usb_endpoint_descriptor *ep_desc = hs_ep->ep.desc; struct dwc2_hsotg *hsotg = hs_ep->parent; unsigned int bytes_rem = 0; + unsigned int bytes_rem_correction = 0; struct dwc2_dma_desc *desc = hs_ep->desc_list; int i; u32 status; + u32 mps = hs_ep->ep.maxpacket; + int dir_in = hs_ep->dir_in; if (!desc) return -EINVAL; + /* Interrupt OUT EP with mps not multiple of 4 */ + if (hs_ep->index) + if (usb_endpoint_xfer_int(ep_desc) && !dir_in && (mps % 4)) + bytes_rem_correction = 4 - (mps % 4); + for (i = 0; i < hs_ep->desc_count; ++i) { status = desc->status; bytes_rem += status & DEV_DMA_NBYTES_MASK; + bytes_rem -= bytes_rem_correction; if (status & DEV_DMA_STS_MASK) dev_err(hsotg->dev, "descriptor %d closed with %x\n", i, status & DEV_DMA_STS_MASK); + + if (status & DEV_DMA_L) + break; + desc++; } @@ -3530,7 +3556,7 @@ void dwc2_hsotg_core_init_disconnected(struct dwc2_hsotg *hsotg, dwc2_readl(hsotg, DOEPCTL0)); } -static void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) +void dwc2_hsotg_core_disconnect(struct dwc2_hsotg *hsotg) { /* set the soft-disconnect bit */ dwc2_set_bit(hsotg, DCTL, DCTL_SFTDISCON); diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c index 8f9d061c4d5f..267543c3dc38 100644 --- a/drivers/usb/dwc2/params.c +++ b/drivers/usb/dwc2/params.c @@ -185,7 +185,7 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg) struct dwc2_core_params *p = &hsotg->params; p->otg_cap = DWC2_CAP_PARAM_NO_HNP_SRP_CAPABLE; - p->activate_stm_id_vb_detection = true; + p->activate_stm_id_vb_detection = !device_property_read_bool(hsotg->dev, "usb-role-switch"); p->host_rx_fifo_size = 440; p->host_nperio_tx_fifo_size = 256; p->host_perio_tx_fifo_size = 256; @@ -210,6 +210,7 @@ const struct of_device_id dwc2_of_match_table[] = { { .compatible = "amlogic,meson-g12a-usb", .data = dwc2_set_amlogic_g12a_params }, { .compatible = "amcc,dwc-otg", .data = dwc2_set_amcc_params }, + { .compatible = "apm,apm82181-dwc-otg", .data = dwc2_set_amcc_params }, { .compatible = "st,stm32f4x9-fsotg", .data = dwc2_set_stm32f4x9_fsotg_params }, { .compatible = "st,stm32f4x9-hsotg" }, @@ -860,7 +861,7 @@ int dwc2_get_hwparams(struct dwc2_hsotg *hsotg) int dwc2_init_params(struct dwc2_hsotg *hsotg) { const struct of_device_id *match; - void (*set_params)(void *data); + void (*set_params)(struct dwc2_hsotg *data); dwc2_set_default_params(hsotg); dwc2_get_device_properties(hsotg); diff --git a/drivers/usb/dwc2/platform.c b/drivers/usb/dwc2/platform.c index db9fd4bd1a38..e2820676beb1 100644 --- a/drivers/usb/dwc2/platform.c +++ b/drivers/usb/dwc2/platform.c @@ -121,6 +121,13 @@ static int dwc2_get_dr_mode(struct dwc2_hsotg *hsotg) return 0; } +static void __dwc2_disable_regulators(void *data) +{ + struct dwc2_hsotg *hsotg = data; + + regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), hsotg->supplies); +} + static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) { struct platform_device *pdev = to_platform_device(hsotg->dev); @@ -131,6 +138,11 @@ static int __dwc2_lowlevel_hw_enable(struct dwc2_hsotg *hsotg) if (ret) return ret; + ret = devm_add_action_or_reset(&pdev->dev, + __dwc2_disable_regulators, hsotg); + if (ret) + return ret; + if (hsotg->clk) { ret = clk_prepare_enable(hsotg->clk); if (ret) @@ -186,10 +198,7 @@ static int __dwc2_lowlevel_hw_disable(struct dwc2_hsotg *hsotg) if (hsotg->clk) clk_disable_unprepare(hsotg->clk); - ret = regulator_bulk_disable(ARRAY_SIZE(hsotg->supplies), - hsotg->supplies); - - return ret; + return 0; } /** @@ -314,6 +323,8 @@ static int dwc2_driver_remove(struct platform_device *dev) if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); + dwc2_drd_exit(hsotg); + if (hsotg->params.activate_stm_id_vb_detection) regulator_disable(hsotg->usb33d); @@ -533,10 +544,17 @@ static int dwc2_driver_probe(struct platform_device *dev) dwc2_writel(hsotg, ggpio, GGPIO); } + retval = dwc2_drd_init(hsotg); + if (retval) { + if (retval != -EPROBE_DEFER) + dev_err(hsotg->dev, "failed to initialize dual-role\n"); + goto error_init; + } + if (hsotg->dr_mode != USB_DR_MODE_HOST) { retval = dwc2_gadget_init(hsotg); if (retval) - goto error_init; + goto error_drd; hsotg->gadget_enabled = 1; } @@ -562,7 +580,7 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) { if (hsotg->gadget_enabled) dwc2_hsotg_remove(hsotg); - goto error_init; + goto error_drd; } hsotg->hcd_enabled = 1; } @@ -584,12 +602,19 @@ static int dwc2_driver_probe(struct platform_device *dev) if (retval) { hsotg->gadget.udc = NULL; dwc2_hsotg_remove(hsotg); - goto error_init; + goto error_debugfs; } } #endif /* CONFIG_USB_DWC2_PERIPHERAL || CONFIG_USB_DWC2_DUAL_ROLE */ return 0; +error_debugfs: + dwc2_debugfs_exit(hsotg); + if (hsotg->hcd_enabled) + dwc2_hcd_remove(hsotg); +error_drd: + dwc2_drd_exit(hsotg); + error_init: if (hsotg->params.activate_stm_id_vb_detection) regulator_disable(hsotg->usb33d); @@ -608,6 +633,8 @@ static int __maybe_unused dwc2_suspend(struct device *dev) if (is_device_mode) dwc2_hsotg_suspend(dwc2); + dwc2_drd_suspend(dwc2); + if (dwc2->params.activate_stm_id_vb_detection) { unsigned long flags; u32 ggpio, gotgctl; @@ -688,6 +715,8 @@ static int __maybe_unused dwc2_resume(struct device *dev) /* Need to restore FORCEDEVMODE/FORCEHOSTMODE */ dwc2_force_dr_mode(dwc2); + dwc2_drd_resume(dwc2); + if (dwc2_is_device_mode(dwc2)) ret = dwc2_hsotg_resume(dwc2); diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c index 2eb34c8b4065..bdf0925da6b6 100644 --- a/drivers/usb/dwc3/core.c +++ b/drivers/usb/dwc3/core.c @@ -119,9 +119,7 @@ static void __dwc3_set_mode(struct work_struct *work) struct dwc3 *dwc = work_to_dwc(work); unsigned long flags; int ret; - - if (dwc->dr_mode != USB_DR_MODE_OTG) - return; + u32 reg; pm_runtime_get_sync(dwc->dev); @@ -172,6 +170,11 @@ static void __dwc3_set_mode(struct work_struct *work) otg_set_vbus(dwc->usb2_phy->otg, true); phy_set_mode(dwc->usb2_generic_phy, PHY_MODE_USB_HOST); phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST); + if (dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } } break; case DWC3_GCTL_PRTCAP_DEVICE: @@ -203,6 +206,9 @@ void dwc3_set_mode(struct dwc3 *dwc, u32 mode) { unsigned long flags; + if (dwc->dr_mode != USB_DR_MODE_OTG) + return; + spin_lock_irqsave(&dwc->lock, flags); dwc->desired_dr_role = mode; spin_unlock_irqrestore(&dwc->lock, flags); @@ -929,13 +935,6 @@ static int dwc3_core_init(struct dwc3 *dwc) */ dwc3_writel(dwc->regs, DWC3_GUID, LINUX_VERSION_CODE); - /* Handle USB2.0-only core configuration */ - if (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_DIS) { - if (dwc->maximum_speed == USB_SPEED_SUPER) - dwc->maximum_speed = USB_SPEED_HIGH; - } - ret = dwc3_phy_setup(dwc); if (ret) goto err0; @@ -1356,6 +1355,9 @@ static void dwc3_get_properties(struct dwc3 *dwc) dwc->dis_metastability_quirk = device_property_read_bool(dev, "snps,dis_metastability_quirk"); + dwc->dis_split_quirk = device_property_read_bool(dev, + "snps,dis-split-quirk"); + dwc->lpm_nyet_threshold = lpm_nyet_threshold; dwc->tx_de_emphasis = tx_de_emphasis; @@ -1381,6 +1383,8 @@ bool dwc3_has_imod(struct dwc3 *dwc) static void dwc3_check_params(struct dwc3 *dwc) { struct device *dev = dwc->dev; + unsigned int hwparam_gen = + DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3); /* Check for proper value of imod_interval */ if (dwc->imod_interval && !dwc3_has_imod(dwc)) { @@ -1404,25 +1408,40 @@ static void dwc3_check_params(struct dwc3 *dwc) case USB_SPEED_LOW: case USB_SPEED_FULL: case USB_SPEED_HIGH: + break; case USB_SPEED_SUPER: + if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) + dev_warn(dev, "UDC doesn't support Gen 1\n"); + break; case USB_SPEED_SUPER_PLUS: + if ((DWC3_IP_IS(DWC32) && + hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_DIS) || + (!DWC3_IP_IS(DWC32) && + hwparam_gen != DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + dev_warn(dev, "UDC doesn't support SSP\n"); break; default: dev_err(dev, "invalid maximum_speed parameter %d\n", dwc->maximum_speed); fallthrough; case USB_SPEED_UNKNOWN: - /* default to superspeed */ - dwc->maximum_speed = USB_SPEED_SUPER; - - /* - * default to superspeed plus if we are capable. - */ - if ((DWC3_IP_IS(DWC31) || DWC3_IP_IS(DWC32)) && - (DWC3_GHWPARAMS3_SSPHY_IFC(dwc->hwparams.hwparams3) == - DWC3_GHWPARAMS3_SSPHY_IFC_GEN2)) + switch (hwparam_gen) { + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2: dwc->maximum_speed = USB_SPEED_SUPER_PLUS; - + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1: + if (DWC3_IP_IS(DWC32)) + dwc->maximum_speed = USB_SPEED_SUPER_PLUS; + else + dwc->maximum_speed = USB_SPEED_SUPER; + break; + case DWC3_GHWPARAMS3_SSPHY_IFC_DIS: + dwc->maximum_speed = USB_SPEED_HIGH; + break; + default: + dwc->maximum_speed = USB_SPEED_SUPER; + break; + } break; } } @@ -1554,6 +1573,17 @@ static int dwc3_probe(struct platform_device *pdev) err5: dwc3_event_buffers_cleanup(dwc); + + usb_phy_shutdown(dwc->usb2_phy); + usb_phy_shutdown(dwc->usb3_phy); + phy_exit(dwc->usb2_generic_phy); + phy_exit(dwc->usb3_generic_phy); + + usb_phy_set_suspend(dwc->usb2_phy, 1); + usb_phy_set_suspend(dwc->usb3_phy, 1); + phy_power_off(dwc->usb2_generic_phy); + phy_power_off(dwc->usb3_generic_phy); + dwc3_ulpi_exit(dwc); err4: @@ -1589,9 +1619,9 @@ static int dwc3_remove(struct platform_device *pdev) dwc3_core_exit(dwc); dwc3_ulpi_exit(dwc); - pm_runtime_put_sync(&pdev->dev); - pm_runtime_allow(&pdev->dev); pm_runtime_disable(&pdev->dev); + pm_runtime_put_noidle(&pdev->dev); + pm_runtime_set_suspended(&pdev->dev); dwc3_free_event_buffers(dwc); dwc3_free_scratch_buffers(dwc); @@ -1865,10 +1895,26 @@ static int dwc3_resume(struct device *dev) return 0; } + +static void dwc3_complete(struct device *dev) +{ + struct dwc3 *dwc = dev_get_drvdata(dev); + u32 reg; + + if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST && + dwc->dis_split_quirk) { + reg = dwc3_readl(dwc->regs, DWC3_GUCTL3); + reg |= DWC3_GUCTL3_SPLITDISABLE; + dwc3_writel(dwc->regs, DWC3_GUCTL3, reg); + } +} +#else +#define dwc3_complete NULL #endif /* CONFIG_PM_SLEEP */ static const struct dev_pm_ops dwc3_dev_pm_ops = { SET_SYSTEM_SLEEP_PM_OPS(dwc3_suspend, dwc3_resume) + .complete = dwc3_complete, SET_RUNTIME_PM_OPS(dwc3_runtime_suspend, dwc3_runtime_resume, dwc3_runtime_idle) }; diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h index 2f04b3e42bf1..74323b10a64a 100644 --- a/drivers/usb/dwc3/core.h +++ b/drivers/usb/dwc3/core.h @@ -138,6 +138,7 @@ #define DWC3_GEVNTCOUNT(n) (0xc40c + ((n) * 0x10)) #define DWC3_GHWPARAMS8 0xc600 +#define DWC3_GUCTL3 0xc60c #define DWC3_GFLADJ 0xc630 /* Device Registers */ @@ -380,6 +381,9 @@ /* Global User Control Register 2 */ #define DWC3_GUCTL2_RST_ACTBITLATER BIT(14) +/* Global User Control Register 3 */ +#define DWC3_GUCTL3_SPLITDISABLE BIT(14) + /* Device Configuration Register */ #define DWC3_DCFG_DEVADDR(addr) ((addr) << 3) #define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f) @@ -634,7 +638,7 @@ struct dwc3_trb; struct dwc3_event_buffer { void *buf; void *cache; - unsigned length; + unsigned int length; unsigned int lpos; unsigned int count; unsigned int flags; @@ -694,7 +698,7 @@ struct dwc3_ep { struct dwc3 *dwc; u32 saved_state; - unsigned flags; + unsigned int flags; #define DWC3_EP_ENABLED BIT(0) #define DWC3_EP_STALL BIT(1) #define DWC3_EP_WEDGE BIT(2) @@ -706,6 +710,7 @@ struct dwc3_ep { #define DWC3_EP_IGNORE_NEXT_NOSTREAM BIT(8) #define DWC3_EP_FORCE_RESTART_STREAM BIT(9) #define DWC3_EP_FIRST_STREAM_PRIMED BIT(10) +#define DWC3_EP_PENDING_CLEAR_STALL BIT(11) /* This last one is specific to EP0 */ #define DWC3_EP0_DIR_IN BIT(31) @@ -893,9 +898,9 @@ struct dwc3_request { struct scatterlist *sg; struct scatterlist *start_sg; - unsigned num_pending_sgs; + unsigned int num_pending_sgs; unsigned int num_queued_sgs; - unsigned remaining; + unsigned int remaining; unsigned int status; #define DWC3_REQUEST_STATUS_QUEUED 0 @@ -908,11 +913,11 @@ struct dwc3_request { struct dwc3_trb *trb; dma_addr_t trb_dma; - unsigned num_trbs; + unsigned int num_trbs; - unsigned needs_extra_trb:1; - unsigned direction:1; - unsigned mapped:1; + unsigned int needs_extra_trb:1; + unsigned int direction:1; + unsigned int mapped:1; }; /* @@ -1010,8 +1015,8 @@ struct dwc3_scratchpad_array { * @has_lpm_erratum: true when core was configured with LPM Erratum. Note that * there's now way for software to detect this in runtime. * @is_utmi_l1_suspend: the core asserts output signal - * 0 - utmi_sleep_n - * 1 - utmi_l1_suspend_n + * 0 - utmi_sleep_n + * 1 - utmi_l1_suspend_n * @is_fpga: true when we are using the FPGA board * @pending_events: true when we have pending IRQs to be handled * @pullups_connected: true when Run/Stop bit is set @@ -1047,13 +1052,14 @@ struct dwc3_scratchpad_array { * instances in park mode. * @tx_de_emphasis_quirk: set if we enable Tx de-emphasis quirk * @tx_de_emphasis: Tx de-emphasis value - * 0 - -6dB de-emphasis - * 1 - -3.5dB de-emphasis - * 2 - No de-emphasis - * 3 - Reserved + * 0 - -6dB de-emphasis + * 1 - -3.5dB de-emphasis + * 2 - No de-emphasis + * 3 - Reserved * @dis_metastability_quirk: set to disable metastability quirk. + * @dis_split_quirk: set to disable split boundary. * @imod_interval: set the interrupt moderation interval in 250ns - * increments or 0 to disable. + * increments or 0 to disable. */ struct dwc3 { struct work_struct drd_work; @@ -1079,7 +1085,7 @@ struct dwc3 { struct dwc3_event_buffer *ev_buf; struct dwc3_ep *eps[DWC3_ENDPOINTS_NUM]; - struct usb_gadget gadget; + struct usb_gadget *gadget; struct usb_gadget_driver *gadget_driver; struct clk_bulk_data *clks; @@ -1245,6 +1251,8 @@ struct dwc3 { unsigned dis_metastability_quirk:1; + unsigned dis_split_quirk:1; + u16 imod_interval; }; @@ -1456,9 +1464,10 @@ void dwc3_gadget_exit(struct dwc3 *dwc); int dwc3_gadget_set_test_mode(struct dwc3 *dwc, int mode); int dwc3_gadget_get_link_state(struct dwc3 *dwc); int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state); -int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, +int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, struct dwc3_gadget_ep_cmd_params *params); -int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param); +int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, + u32 param); #else static inline int dwc3_gadget_init(struct dwc3 *dwc) { return 0; } @@ -1472,7 +1481,7 @@ static inline int dwc3_gadget_set_link_state(struct dwc3 *dwc, enum dwc3_link_state state) { return 0; } -static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, +static inline int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, struct dwc3_gadget_ep_cmd_params *params) { return 0; } static inline int dwc3_send_gadget_generic_command(struct dwc3 *dwc, diff --git a/drivers/usb/dwc3/debug.h b/drivers/usb/dwc3/debug.h index 3d16dac4e5cc..8ab394942360 100644 --- a/drivers/usb/dwc3/debug.h +++ b/drivers/usb/dwc3/debug.h @@ -371,7 +371,9 @@ static inline const char *dwc3_gadget_event_type_string(u8 event) static inline const char *dwc3_decode_event(char *str, size_t size, u32 event, u32 ep0state) { - const union dwc3_event evt = (union dwc3_event) event; + union dwc3_event evt; + + memcpy(&evt, &event, sizeof(event)); if (evt.type.is_devspec) return dwc3_gadget_event_string(str, size, &evt.devt); @@ -411,8 +413,8 @@ static inline const char *dwc3_gadget_generic_cmd_status_string(int status) #ifdef CONFIG_DEBUG_FS -extern void dwc3_debugfs_init(struct dwc3 *); -extern void dwc3_debugfs_exit(struct dwc3 *); +extern void dwc3_debugfs_init(struct dwc3 *d); +extern void dwc3_debugfs_exit(struct dwc3 *d); #else static inline void dwc3_debugfs_init(struct dwc3 *d) { } diff --git a/drivers/usb/dwc3/debugfs.c b/drivers/usb/dwc3/debugfs.c index 2c7b6dd79cdf..5da4f6082d93 100644 --- a/drivers/usb/dwc3/debugfs.c +++ b/drivers/usb/dwc3/debugfs.c @@ -397,13 +397,13 @@ static int dwc3_mode_show(struct seq_file *s, void *unused) switch (DWC3_GCTL_PRTCAP(reg)) { case DWC3_GCTL_PRTCAP_HOST: - seq_printf(s, "host\n"); + seq_puts(s, "host\n"); break; case DWC3_GCTL_PRTCAP_DEVICE: - seq_printf(s, "device\n"); + seq_puts(s, "device\n"); break; case DWC3_GCTL_PRTCAP_OTG: - seq_printf(s, "otg\n"); + seq_puts(s, "otg\n"); break; default: seq_printf(s, "UNKNOWN %08x\n", DWC3_GCTL_PRTCAP(reg)); @@ -428,6 +428,9 @@ static ssize_t dwc3_mode_write(struct file *file, if (copy_from_user(&buf, ubuf, min_t(size_t, sizeof(buf) - 1, count))) return -EFAULT; + if (dwc->dr_mode != USB_DR_MODE_OTG) + return count; + if (!strncmp(buf, "host", 4)) mode = DWC3_GCTL_PRTCAP_HOST; @@ -464,22 +467,22 @@ static int dwc3_testmode_show(struct seq_file *s, void *unused) switch (reg) { case 0: - seq_printf(s, "no test\n"); + seq_puts(s, "no test\n"); break; case USB_TEST_J: - seq_printf(s, "test_j\n"); + seq_puts(s, "test_j\n"); break; case USB_TEST_K: - seq_printf(s, "test_k\n"); + seq_puts(s, "test_k\n"); break; case USB_TEST_SE0_NAK: - seq_printf(s, "test_se0_nak\n"); + seq_puts(s, "test_se0_nak\n"); break; case USB_TEST_PACKET: - seq_printf(s, "test_packet\n"); + seq_puts(s, "test_packet\n"); break; case USB_TEST_FORCE_ENABLE: - seq_printf(s, "test_force_enable\n"); + seq_puts(s, "test_force_enable\n"); break; default: seq_printf(s, "UNKNOWN %d\n", reg); @@ -760,27 +763,26 @@ static int dwc3_transfer_type_show(struct seq_file *s, void *unused) unsigned long flags; spin_lock_irqsave(&dwc->lock, flags); - if (!(dep->flags & DWC3_EP_ENABLED) || - !dep->endpoint.desc) { - seq_printf(s, "--\n"); + if (!(dep->flags & DWC3_EP_ENABLED) || !dep->endpoint.desc) { + seq_puts(s, "--\n"); goto out; } switch (usb_endpoint_type(dep->endpoint.desc)) { case USB_ENDPOINT_XFER_CONTROL: - seq_printf(s, "control\n"); + seq_puts(s, "control\n"); break; case USB_ENDPOINT_XFER_ISOC: - seq_printf(s, "isochronous\n"); + seq_puts(s, "isochronous\n"); break; case USB_ENDPOINT_XFER_BULK: - seq_printf(s, "bulk\n"); + seq_puts(s, "bulk\n"); break; case USB_ENDPOINT_XFER_INT: - seq_printf(s, "interrupt\n"); + seq_puts(s, "interrupt\n"); break; default: - seq_printf(s, "--\n"); + seq_puts(s, "--\n"); } out: @@ -798,11 +800,11 @@ static int dwc3_trb_ring_show(struct seq_file *s, void *unused) spin_lock_irqsave(&dwc->lock, flags); if (dep->number <= 1) { - seq_printf(s, "--\n"); + seq_puts(s, "--\n"); goto out; } - seq_printf(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n"); + seq_puts(s, "buffer_addr,size,type,ioc,isp_imi,csp,chn,lst,hwo\n"); for (i = 0; i < DWC3_TRB_NUM; i++) { struct dwc3_trb *trb = &dep->trb_pool[i]; @@ -884,7 +886,7 @@ static void dwc3_debugfs_create_endpoint_files(struct dwc3_ep *dep, const struct file_operations *fops = dwc3_ep_file_map[i].fops; const char *name = dwc3_ep_file_map[i].name; - debugfs_create_file(name, S_IRUGO, parent, dep, fops); + debugfs_create_file(name, 0444, parent, dep, fops); } } @@ -929,21 +931,18 @@ void dwc3_debugfs_init(struct dwc3 *dwc) root = debugfs_create_dir(dev_name(dwc->dev), usb_debug_root); dwc->root = root; - debugfs_create_regset32("regdump", S_IRUGO, root, dwc->regset); - - debugfs_create_file("lsp_dump", S_IRUGO | S_IWUSR, root, dwc, - &dwc3_lsp_fops); + debugfs_create_regset32("regdump", 0444, root, dwc->regset); + debugfs_create_file("lsp_dump", 0644, root, dwc, &dwc3_lsp_fops); - if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) { - debugfs_create_file("mode", S_IRUGO | S_IWUSR, root, dwc, + if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE)) + debugfs_create_file("mode", 0644, root, dwc, &dwc3_mode_fops); - } if (IS_ENABLED(CONFIG_USB_DWC3_DUAL_ROLE) || IS_ENABLED(CONFIG_USB_DWC3_GADGET)) { - debugfs_create_file("testmode", S_IRUGO | S_IWUSR, root, dwc, - &dwc3_testmode_fops); - debugfs_create_file("link_state", S_IRUGO | S_IWUSR, root, dwc, + debugfs_create_file("testmode", 0644, root, dwc, + &dwc3_testmode_fops); + debugfs_create_file("link_state", 0644, root, dwc, &dwc3_link_state_fops); dwc3_debugfs_create_endpoint_dirs(dwc, root); } diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c index 1f7f4d88ed9d..417e05381b5d 100644 --- a/drivers/usb/dwc3/dwc3-meson-g12a.c +++ b/drivers/usb/dwc3/dwc3-meson-g12a.c @@ -116,23 +116,24 @@ static struct clk_bulk_data meson_a1_clocks[] = { { .id = "xtal_usb_ctrl" }, }; -static const char *meson_gxm_phy_names[] = { +static const char * const meson_gxm_phy_names[] = { "usb2-phy0", "usb2-phy1", "usb2-phy2", }; -static const char *meson_g12a_phy_names[] = { +static const char * const meson_g12a_phy_names[] = { "usb2-phy0", "usb2-phy1", "usb3-phy0", }; /* * Amlogic A1 has a single physical PHY, in slot 1, but still has the * two U2 PHY controls register blocks like G12A. + * AXG has the similar scheme, thus needs the same tweak. * Handling the first PHY on slot 1 would need a large amount of code * changes, and the current management is generic enough to handle it * correctly when only the "usb2-phy1" phy is specified on-par with the * DT bindings. */ -static const char *meson_a1_phy_names[] = { +static const char * const meson_a1_phy_names[] = { "usb2-phy0", "usb2-phy1" }; @@ -143,7 +144,7 @@ struct dwc3_meson_g12a_drvdata { bool otg_phy_host_port_disable; struct clk_bulk_data *clks; int num_clks; - const char **phy_names; + const char * const *phy_names; int num_phys; int (*setup_regmaps)(struct dwc3_meson_g12a *priv, void __iomem *base); int (*usb2_init_phy)(struct dwc3_meson_g12a *priv, int i, @@ -215,6 +216,19 @@ static struct dwc3_meson_g12a_drvdata gxm_drvdata = { .usb_post_init = dwc3_meson_gxl_usb_post_init, }; +static struct dwc3_meson_g12a_drvdata axg_drvdata = { + .otg_switch_supported = true, + .clks = meson_gxl_clocks, + .num_clks = ARRAY_SIZE(meson_gxl_clocks), + .phy_names = meson_a1_phy_names, + .num_phys = ARRAY_SIZE(meson_a1_phy_names), + .setup_regmaps = dwc3_meson_gxl_setup_regmaps, + .usb2_init_phy = dwc3_meson_gxl_usb2_init_phy, + .set_phy_mode = dwc3_meson_gxl_set_phy_mode, + .usb_init = dwc3_meson_g12a_usb_init, + .usb_post_init = dwc3_meson_gxl_usb_post_init, +}; + static struct dwc3_meson_g12a_drvdata g12a_drvdata = { .otg_switch_supported = true, .clks = meson_g12a_clocks, @@ -520,11 +534,7 @@ static int dwc3_meson_g12a_role_set(struct usb_role_switch *sw, return 0; if (priv->drvdata->otg_phy_host_port_disable) - dev_warn_once(priv->dev, "Manual OTG switch is broken on this "\ - "SoC, when manual switching from "\ - "Host to device, DWC3 controller "\ - "will need to be resetted in order "\ - "to recover usage of the Host port"); + dev_warn_once(priv->dev, "Broken manual OTG switch\n"); return dwc3_meson_g12a_otg_mode_set(priv, mode); } @@ -626,10 +636,7 @@ static int dwc3_meson_gxl_setup_regmaps(struct dwc3_meson_g12a *priv, /* GXL controls the PHY mode in the PHY registers unlike G12A */ priv->usb_glue_regmap = devm_regmap_init_mmio(priv->dev, base, &phy_meson_g12a_usb_glue_regmap_conf); - if (IS_ERR(priv->usb_glue_regmap)) - return PTR_ERR(priv->usb_glue_regmap); - - return 0; + return PTR_ERR_OR_ZERO(priv->usb_glue_regmap); } static int dwc3_meson_g12a_setup_regmaps(struct dwc3_meson_g12a *priv, @@ -906,8 +913,8 @@ static int __maybe_unused dwc3_meson_g12a_resume(struct device *dev) return ret; } - if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) { - ret = regulator_enable(priv->vbus); + if (priv->vbus && priv->otg_phy_mode == PHY_MODE_USB_HOST) { + ret = regulator_enable(priv->vbus); if (ret) return ret; } @@ -931,6 +938,10 @@ static const struct of_device_id dwc3_meson_g12a_match[] = { .data = &gxm_drvdata, }, { + .compatible = "amlogic,meson-axg-usb-ctrl", + .data = &axg_drvdata, + }, + { .compatible = "amlogic,meson-g12a-usb-ctrl", .data = &g12a_drvdata, }, diff --git a/drivers/usb/dwc3/dwc3-of-simple.c b/drivers/usb/dwc3/dwc3-of-simple.c index 7df115012935..e62ecd22b3ed 100644 --- a/drivers/usb/dwc3/dwc3-of-simple.c +++ b/drivers/usb/dwc3/dwc3-of-simple.c @@ -176,6 +176,8 @@ static const struct of_device_id of_dwc3_simple_match[] = { { .compatible = "cavium,octeon-7130-usb-uctl" }, { .compatible = "sprd,sc9860-dwc3" }, { .compatible = "allwinner,sun50i-h6-dwc3" }, + { .compatible = "hisilicon,hi3670-dwc3" }, + { .compatible = "intel,keembay-dwc3" }, { /* Sentinel */ } }; MODULE_DEVICE_TABLE(of, of_dwc3_simple_match); diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c index f5a61f57c74f..242b6210380a 100644 --- a/drivers/usb/dwc3/dwc3-pci.c +++ b/drivers/usb/dwc3/dwc3-pci.c @@ -147,7 +147,8 @@ static int dwc3_pci_quirks(struct dwc3_pci *dwc) if (pdev->vendor == PCI_VENDOR_ID_INTEL) { if (pdev->device == PCI_DEVICE_ID_INTEL_BXT || - pdev->device == PCI_DEVICE_ID_INTEL_BXT_M) { + pdev->device == PCI_DEVICE_ID_INTEL_BXT_M || + pdev->device == PCI_DEVICE_ID_INTEL_EHLLP) { guid_parse(PCI_INTEL_BXT_DSM_GUID, &dwc->guid); dwc->has_dsm_for_pm = true; } diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c index e1e78e9824b1..c703d552bbcf 100644 --- a/drivers/usb/dwc3/dwc3-qcom.c +++ b/drivers/usb/dwc3/dwc3-qcom.c @@ -13,6 +13,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/extcon.h> +#include <linux/interconnect.h> #include <linux/of_platform.h> #include <linux/platform_device.h> #include <linux/phy/phy.h> @@ -43,6 +44,14 @@ #define SDM845_QSCRATCH_SIZE 0x400 #define SDM845_DWC3_CORE_SIZE 0xcd00 +/* Interconnect path bandwidths in MBps */ +#define USB_MEMORY_AVG_HS_BW MBps_to_icc(240) +#define USB_MEMORY_PEAK_HS_BW MBps_to_icc(700) +#define USB_MEMORY_AVG_SS_BW MBps_to_icc(1000) +#define USB_MEMORY_PEAK_SS_BW MBps_to_icc(2500) +#define APPS_USB_AVG_BW 0 +#define APPS_USB_PEAK_BW MBps_to_icc(40) + struct dwc3_acpi_pdata { u32 qscratch_base_offset; u32 qscratch_base_size; @@ -76,6 +85,8 @@ struct dwc3_qcom { enum usb_dr_mode mode; bool is_suspended; bool pm_suspended; + struct icc_path *icc_path_ddr; + struct icc_path *icc_path_apps; }; static inline void dwc3_qcom_setbits(void __iomem *base, u32 offset, u32 val) @@ -190,6 +201,96 @@ static int dwc3_qcom_register_extcon(struct dwc3_qcom *qcom) return 0; } +static int dwc3_qcom_interconnect_enable(struct dwc3_qcom *qcom) +{ + int ret; + + ret = icc_enable(qcom->icc_path_ddr); + if (ret) + return ret; + + ret = icc_enable(qcom->icc_path_apps); + if (ret) + icc_disable(qcom->icc_path_ddr); + + return ret; +} + +static int dwc3_qcom_interconnect_disable(struct dwc3_qcom *qcom) +{ + int ret; + + ret = icc_disable(qcom->icc_path_ddr); + if (ret) + return ret; + + ret = icc_disable(qcom->icc_path_apps); + if (ret) + icc_enable(qcom->icc_path_ddr); + + return ret; +} + +/** + * dwc3_qcom_interconnect_init() - Get interconnect path handles + * and set bandwidhth. + * @qcom: Pointer to the concerned usb core. + * + */ +static int dwc3_qcom_interconnect_init(struct dwc3_qcom *qcom) +{ + struct device *dev = qcom->dev; + int ret; + + qcom->icc_path_ddr = of_icc_get(dev, "usb-ddr"); + if (IS_ERR(qcom->icc_path_ddr)) { + dev_err(dev, "failed to get usb-ddr path: %ld\n", + PTR_ERR(qcom->icc_path_ddr)); + return PTR_ERR(qcom->icc_path_ddr); + } + + qcom->icc_path_apps = of_icc_get(dev, "apps-usb"); + if (IS_ERR(qcom->icc_path_apps)) { + dev_err(dev, "failed to get apps-usb path: %ld\n", + PTR_ERR(qcom->icc_path_apps)); + return PTR_ERR(qcom->icc_path_apps); + } + + if (usb_get_maximum_speed(&qcom->dwc3->dev) >= USB_SPEED_SUPER || + usb_get_maximum_speed(&qcom->dwc3->dev) == USB_SPEED_UNKNOWN) + ret = icc_set_bw(qcom->icc_path_ddr, + USB_MEMORY_AVG_SS_BW, USB_MEMORY_PEAK_SS_BW); + else + ret = icc_set_bw(qcom->icc_path_ddr, + USB_MEMORY_AVG_HS_BW, USB_MEMORY_PEAK_HS_BW); + + if (ret) { + dev_err(dev, "failed to set bandwidth for usb-ddr path: %d\n", ret); + return ret; + } + + ret = icc_set_bw(qcom->icc_path_apps, + APPS_USB_AVG_BW, APPS_USB_PEAK_BW); + if (ret) { + dev_err(dev, "failed to set bandwidth for apps-usb path: %d\n", ret); + return ret; + } + + return 0; +} + +/** + * dwc3_qcom_interconnect_exit() - Release interconnect path handles + * @qcom: Pointer to the concerned usb core. + * + * This function is used to release interconnect path handle. + */ +static void dwc3_qcom_interconnect_exit(struct dwc3_qcom *qcom) +{ + icc_put(qcom->icc_path_ddr); + icc_put(qcom->icc_path_apps); +} + static void dwc3_qcom_disable_interrupts(struct dwc3_qcom *qcom) { if (qcom->hs_phy_irq) { @@ -239,7 +340,7 @@ static void dwc3_qcom_enable_interrupts(struct dwc3_qcom *qcom) static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) { u32 val; - int i; + int i, ret; if (qcom->is_suspended) return 0; @@ -251,6 +352,10 @@ static int dwc3_qcom_suspend(struct dwc3_qcom *qcom) for (i = qcom->num_clocks - 1; i >= 0; i--) clk_disable_unprepare(qcom->clks[i]); + ret = dwc3_qcom_interconnect_disable(qcom); + if (ret) + dev_warn(qcom->dev, "failed to disable interconnect: %d\n", ret); + qcom->is_suspended = true; dwc3_qcom_enable_interrupts(qcom); @@ -276,6 +381,10 @@ static int dwc3_qcom_resume(struct dwc3_qcom *qcom) } } + ret = dwc3_qcom_interconnect_enable(qcom); + if (ret) + dev_warn(qcom->dev, "failed to enable interconnect: %d\n", ret); + /* Clear existing events from PHY related to L2 in/out */ dwc3_qcom_setbits(qcom->qscratch_base, PWR_EVNT_IRQ_STAT_REG, PWR_EVNT_LPM_IN_L2_MASK | PWR_EVNT_LPM_OUT_L2_MASK); @@ -335,7 +444,9 @@ static int dwc3_qcom_setup_irq(struct platform_device *pdev) { struct dwc3_qcom *qcom = platform_get_drvdata(pdev); const struct dwc3_acpi_pdata *pdata = qcom->acpi_pdata; - int irq, ret; + int irq; + int ret; + irq = dwc3_qcom_get_irq(pdev, "hs_phy_irq", pdata ? pdata->hs_phy_irq_index : -1); if (irq > 0) { @@ -454,7 +565,7 @@ static const struct property_entry dwc3_qcom_acpi_properties[] = { static int dwc3_qcom_acpi_register_core(struct platform_device *pdev) { - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); struct device *dev = &pdev->dev; struct resource *res, *child_res = NULL; int irq; @@ -514,7 +625,7 @@ out: static int dwc3_qcom_of_register_core(struct platform_device *pdev) { - struct dwc3_qcom *qcom = platform_get_drvdata(pdev); + struct dwc3_qcom *qcom = platform_get_drvdata(pdev); struct device_node *np = pdev->dev.of_node, *dwc3_np; struct device *dev = &pdev->dev; int ret; @@ -638,6 +749,10 @@ static int dwc3_qcom_probe(struct platform_device *pdev) goto depopulate; } + ret = dwc3_qcom_interconnect_init(qcom); + if (ret) + goto depopulate; + qcom->mode = usb_get_dr_mode(&qcom->dwc3->dev); /* enable vbus override for device mode */ @@ -647,7 +762,7 @@ static int dwc3_qcom_probe(struct platform_device *pdev) /* register extcon to override sw_vbus on Vbus change later */ ret = dwc3_qcom_register_extcon(qcom); if (ret) - goto depopulate; + goto interconnect_exit; device_init_wakeup(&pdev->dev, 1); qcom->is_suspended = false; @@ -657,6 +772,8 @@ static int dwc3_qcom_probe(struct platform_device *pdev) return 0; +interconnect_exit: + dwc3_qcom_interconnect_exit(qcom); depopulate: if (np) of_platform_depopulate(&pdev->dev); @@ -687,6 +804,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev) } qcom->num_clocks = 0; + dwc3_qcom_interconnect_exit(qcom); reset_control_assert(qcom->resets); pm_runtime_allow(dev); diff --git a/drivers/usb/dwc3/ep0.c b/drivers/usb/dwc3/ep0.c index 59f2e8c31bd1..7be3903cb842 100644 --- a/drivers/usb/dwc3/ep0.c +++ b/drivers/usb/dwc3/ep0.c @@ -105,7 +105,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, * IRQ we were waiting for is long gone. */ if (dep->flags & DWC3_EP_PENDING_REQUEST) { - unsigned direction; + unsigned int direction; direction = !!(dep->flags & DWC3_EP0_DIR_IN); @@ -127,11 +127,11 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, * handle it here. */ if (dwc->delayed_status) { - unsigned direction; + unsigned int direction; direction = !dwc->ep0_expect_in; dwc->delayed_status = false; - usb_gadget_set_state(&dwc->gadget, USB_STATE_CONFIGURED); + usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED); if (dwc->ep0state == EP0_STATUS_PHASE) __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); @@ -172,7 +172,7 @@ static int __dwc3_gadget_ep0_queue(struct dwc3_ep *dep, * XferNotReady(STATUS). */ if (dwc->three_stage_setup) { - unsigned direction; + unsigned int direction; direction = dwc->ep0_expect_in; dwc->ep0state = EP0_DATA_PHASE; @@ -197,7 +197,7 @@ int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, int ret; spin_lock_irqsave(&dwc->lock, flags); - if (!dep->endpoint.desc) { + if (!dep->endpoint.desc || !dwc->pullups_connected) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); ret = -ESHUTDOWN; @@ -325,7 +325,7 @@ static int dwc3_ep0_handle_status(struct dwc3 *dwc, /* * LTM will be set once we know how to set this in HW. */ - usb_status |= dwc->gadget.is_selfpowered; + usb_status |= dwc->gadget->is_selfpowered; if ((dwc->speed == DWC3_DSTS_SUPERSPEED) || (dwc->speed == DWC3_DSTS_SUPERSPEED_PLUS)) { @@ -450,7 +450,7 @@ static int dwc3_ep0_handle_device(struct dwc3 *dwc, wValue = le16_to_cpu(ctrl->wValue); wIndex = le16_to_cpu(ctrl->wIndex); - state = dwc->gadget.state; + state = dwc->gadget->state; switch (wValue) { case USB_DEVICE_REMOTE_WAKEUP: @@ -524,6 +524,11 @@ static int dwc3_ep0_handle_endpoint(struct dwc3 *dwc, ret = __dwc3_gadget_ep_set_halt(dep, set, true); if (ret) return -EINVAL; + + /* ClearFeature(Halt) may need delayed status */ + if (!set && (dep->flags & DWC3_EP_END_TRANSFER_PENDING)) + return USB_GADGET_DELAYED_STATUS; + break; default: return -EINVAL; @@ -559,7 +564,7 @@ static int dwc3_ep0_handle_feature(struct dwc3 *dwc, static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) { - enum usb_device_state state = dwc->gadget.state; + enum usb_device_state state = dwc->gadget->state; u32 addr; u32 reg; @@ -580,9 +585,9 @@ static int dwc3_ep0_set_address(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) dwc3_writel(dwc->regs, DWC3_DCFG, reg); if (addr) - usb_gadget_set_state(&dwc->gadget, USB_STATE_ADDRESS); + usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS); else - usb_gadget_set_state(&dwc->gadget, USB_STATE_DEFAULT); + usb_gadget_set_state(dwc->gadget, USB_STATE_DEFAULT); return 0; } @@ -592,14 +597,14 @@ static int dwc3_ep0_delegate_req(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) int ret; spin_unlock(&dwc->lock); - ret = dwc->gadget_driver->setup(&dwc->gadget, ctrl); + ret = dwc->gadget_driver->setup(dwc->gadget, ctrl); spin_lock(&dwc->lock); return ret; } static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) { - enum usb_device_state state = dwc->gadget.state; + enum usb_device_state state = dwc->gadget->state; u32 cfg; int ret; u32 reg; @@ -622,7 +627,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) * to change the state on the next usb_ep_queue() */ if (ret == 0) - usb_gadget_set_state(&dwc->gadget, + usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED); /* @@ -641,7 +646,7 @@ static int dwc3_ep0_set_config(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) case USB_STATE_CONFIGURED: ret = dwc3_ep0_delegate_req(dwc, ctrl); if (!cfg && !ret) - usb_gadget_set_state(&dwc->gadget, + usb_gadget_set_state(dwc->gadget, USB_STATE_ADDRESS); break; default: @@ -697,7 +702,7 @@ static void dwc3_ep0_set_sel_cmpl(struct usb_ep *ep, struct usb_request *req) static int dwc3_ep0_set_sel(struct dwc3 *dwc, struct usb_ctrlrequest *ctrl) { struct dwc3_ep *dep; - enum usb_device_state state = dwc->gadget.state; + enum usb_device_state state = dwc->gadget->state; u16 wLength; if (state == USB_STATE_DEFAULT) @@ -741,7 +746,7 @@ static int dwc3_ep0_set_isoch_delay(struct dwc3 *dwc, struct usb_ctrlrequest *ct if (wIndex || wLength) return -EINVAL; - dwc->gadget.isoch_delay = wValue; + dwc->gadget->isoch_delay = wValue; return 0; } @@ -942,12 +947,16 @@ static void dwc3_ep0_xfer_complete(struct dwc3 *dwc, static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, struct dwc3_ep *dep, struct dwc3_request *req) { + unsigned int trb_length = 0; int ret; req->direction = !!dep->number; if (req->request.length == 0) { - dwc3_ep0_prepare_one_trb(dep, dwc->ep0_trb_addr, 0, + if (!req->direction) + trb_length = dep->endpoint.maxpacket; + + dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, trb_length, DWC3_TRBCTL_CONTROL_DATA, false); ret = dwc3_ep0_start_trans(dep); } else if (!IS_ALIGNED(req->request.length, dep->endpoint.maxpacket) @@ -994,9 +1003,12 @@ static void __dwc3_ep0_do_control_data(struct dwc3 *dwc, req->trb = &dwc->ep0_trb[dep->trb_enqueue - 1]; + if (!req->direction) + trb_length = dep->endpoint.maxpacket; + /* Now prepare one extra TRB to align transfer size */ dwc3_ep0_prepare_one_trb(dep, dwc->bounce_addr, - 0, DWC3_TRBCTL_CONTROL_DATA, + trb_length, DWC3_TRBCTL_CONTROL_DATA, false); ret = dwc3_ep0_start_trans(dep); } else { @@ -1042,6 +1054,17 @@ static void dwc3_ep0_do_control_status(struct dwc3 *dwc, __dwc3_ep0_do_control_status(dwc, dep); } +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc) +{ + unsigned int direction = !dwc->ep0_expect_in; + + if (dwc->ep0state != EP0_STATUS_PHASE) + return; + + dwc->delayed_status = false; + __dwc3_ep0_do_control_status(dwc, dwc->eps[direction]); +} + static void dwc3_ep0_end_control_data(struct dwc3 *dwc, struct dwc3_ep *dep) { struct dwc3_gadget_ep_cmd_params params; @@ -1102,7 +1125,7 @@ static void dwc3_ep0_xfernotready(struct dwc3 *dwc, */ if (!list_empty(&dep->pending_list)) { dwc->delayed_status = false; - usb_gadget_set_state(&dwc->gadget, + usb_gadget_set_state(dwc->gadget, USB_STATE_CONFIGURED); dwc3_ep0_do_control_status(dwc, event); } diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c index c2a0f64f8d1e..78cb4db8a6e4 100644 --- a/drivers/usb/dwc3/gadget.c +++ b/drivers/usb/dwc3/gadget.c @@ -227,7 +227,8 @@ void dwc3_gadget_giveback(struct dwc3_ep *dep, struct dwc3_request *req, * Caller should take care of locking. Issue @cmd with a given @param to @dwc * and wait for its completion. */ -int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned cmd, u32 param) +int dwc3_send_gadget_generic_command(struct dwc3 *dwc, unsigned int cmd, + u32 param) { u32 timeout = 500; int status = 0; @@ -268,7 +269,7 @@ static int __dwc3_gadget_wakeup(struct dwc3 *dwc); * Caller should handle locking. This function will issue @cmd with given * @params to @dep and wait for its completion. */ -int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, +int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned int cmd, struct dwc3_gadget_ep_cmd_params *params) { const struct usb_endpoint_descriptor *desc = dep->endpoint.desc; @@ -290,7 +291,7 @@ int dwc3_send_gadget_ep_cmd(struct dwc3_ep *dep, unsigned cmd, * * DWC_usb3 3.30a and DWC_usb31 1.90a programming guide section 3.2.2 */ - if (dwc->gadget.speed <= USB_SPEED_HIGH) { + if (dwc->gadget->speed <= USB_SPEED_HIGH) { reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0)); if (unlikely(reg & DWC3_GUSB2PHYCFG_SUSPHY)) { saved_config |= DWC3_GUSB2PHYCFG_SUSPHY; @@ -422,7 +423,7 @@ static int dwc3_send_clear_stall_ep_cmd(struct dwc3_ep *dep) */ if (dep->direction && !DWC3_VER_IS_PRIOR(DWC3, 260A) && - (dwc->gadget.speed >= USB_SPEED_SUPER)) + (dwc->gadget->speed >= USB_SPEED_SUPER)) cmd |= DWC3_DEPCMD_CLEARPENDIN; memset(¶ms, 0, sizeof(params)); @@ -562,8 +563,9 @@ static int dwc3_gadget_set_ep_config(struct dwc3_ep *dep, unsigned int action) | DWC3_DEPCFG_MAX_PACKET_SIZE(usb_endpoint_maxp(desc)); /* Burst size is only needed in SuperSpeed mode */ - if (dwc->gadget.speed >= USB_SPEED_SUPER) { + if (dwc->gadget->speed >= USB_SPEED_SUPER) { u32 burst = dep->endpoint.maxburst; + params.param0 |= DWC3_DEPCFG_BURST_SIZE(burst - 1); } @@ -942,12 +944,13 @@ static u32 dwc3_calc_trbs_left(struct dwc3_ep *dep) } static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, - dma_addr_t dma, unsigned length, unsigned chain, unsigned node, - unsigned stream_id, unsigned short_not_ok, - unsigned no_interrupt, unsigned is_last) + dma_addr_t dma, unsigned int length, unsigned int chain, + unsigned int node, unsigned int stream_id, + unsigned int short_not_ok, unsigned int no_interrupt, + unsigned int is_last, bool must_interrupt) { struct dwc3 *dwc = dep->dwc; - struct usb_gadget *gadget = &dwc->gadget; + struct usb_gadget *gadget = dwc->gadget; enum usb_device_speed speed = gadget->speed; trb->size = DWC3_TRB_SIZE_LENGTH(length); @@ -1031,8 +1034,7 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, trb->ctrl |= DWC3_TRB_CTRL_ISP_IMI; } - if ((!no_interrupt && !chain) || - (dwc3_calc_trbs_left(dep) == 1)) + if ((!no_interrupt && !chain) || must_interrupt) trb->ctrl |= DWC3_TRB_CTRL_IOC; if (chain) @@ -1057,19 +1059,24 @@ static void __dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_trb *trb, * @trb_length: buffer size of the TRB * @chain: should this TRB be chained to the next? * @node: only for isochronous endpoints. First TRB needs different type. + * @use_bounce_buffer: set to use bounce buffer + * @must_interrupt: set to interrupt on TRB completion */ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, struct dwc3_request *req, unsigned int trb_length, - unsigned chain, unsigned node) + unsigned int chain, unsigned int node, bool use_bounce_buffer, + bool must_interrupt) { struct dwc3_trb *trb; dma_addr_t dma; - unsigned stream_id = req->request.stream_id; - unsigned short_not_ok = req->request.short_not_ok; - unsigned no_interrupt = req->request.no_interrupt; - unsigned is_last = req->request.is_last; - - if (req->request.num_sgs > 0) + unsigned int stream_id = req->request.stream_id; + unsigned int short_not_ok = req->request.short_not_ok; + unsigned int no_interrupt = req->request.no_interrupt; + unsigned int is_last = req->request.is_last; + + if (use_bounce_buffer) + dma = dep->dwc->bounce_addr; + else if (req->request.num_sgs > 0) dma = sg_dma_address(req->start_sg); else dma = req->request.dma; @@ -1085,10 +1092,63 @@ static void dwc3_prepare_one_trb(struct dwc3_ep *dep, req->num_trbs++; __dwc3_prepare_one_trb(dep, trb, dma, trb_length, chain, node, - stream_id, short_not_ok, no_interrupt, is_last); + stream_id, short_not_ok, no_interrupt, is_last, + must_interrupt); } -static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, +static bool dwc3_needs_extra_trb(struct dwc3_ep *dep, struct dwc3_request *req) +{ + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + unsigned int rem = req->request.length % maxp; + + if ((req->request.length && req->request.zero && !rem && + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) || + (!req->direction && rem)) + return true; + + return false; +} + +/** + * dwc3_prepare_last_sg - prepare TRBs for the last SG entry + * @dep: The endpoint that the request belongs to + * @req: The request to prepare + * @entry_length: The last SG entry size + * @node: Indicates whether this is not the first entry (for isoc only) + * + * Return the number of TRBs prepared. + */ +static int dwc3_prepare_last_sg(struct dwc3_ep *dep, + struct dwc3_request *req, unsigned int entry_length, + unsigned int node) +{ + unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + unsigned int rem = req->request.length % maxp; + unsigned int num_trbs = 1; + + if (dwc3_needs_extra_trb(dep, req)) + num_trbs++; + + if (dwc3_calc_trbs_left(dep) < num_trbs) + return 0; + + req->needs_extra_trb = num_trbs > 1; + + /* Prepare a normal TRB */ + if (req->direction || req->request.length) + dwc3_prepare_one_trb(dep, req, entry_length, + req->needs_extra_trb, node, false, false); + + /* Prepare extra TRBs for ZLP and MPS OUT transfer alignment */ + if ((!req->direction && !req->request.length) || req->needs_extra_trb) + dwc3_prepare_one_trb(dep, req, + req->direction ? 0 : maxp - rem, + false, 1, true, false); + + return num_trbs; +} + +static int dwc3_prepare_trbs_sg(struct dwc3_ep *dep, struct dwc3_request *req) { struct scatterlist *sg = req->start_sg; @@ -1097,6 +1157,8 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, unsigned int length = req->request.length; unsigned int remaining = req->request.num_mapped_sgs - req->num_queued_sgs; + unsigned int num_trbs = req->num_trbs; + bool needs_extra_trb = dwc3_needs_extra_trb(dep, req); /* * If we resume preparing the request, then get the remaining length of @@ -1106,10 +1168,10 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, length -= sg_dma_len(s); for_each_sg(sg, s, remaining, i) { - unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); - unsigned int rem = length % maxp; + unsigned int num_trbs_left = dwc3_calc_trbs_left(dep); unsigned int trb_length; - unsigned chain = true; + bool must_interrupt = false; + bool last_sg = false; trb_length = min_t(unsigned int, length, sg_dma_len(s)); @@ -1123,59 +1185,28 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * mapped sg. */ if ((i == remaining - 1) || !length) - chain = false; - - if (rem && usb_endpoint_dir_out(dep->endpoint.desc) && !chain) { - struct dwc3 *dwc = dep->dwc; - struct dwc3_trb *trb; - - req->needs_extra_trb = true; - - /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, trb_length, true, i); - - /* Now prepare one extra TRB to align transfer size */ - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, - maxp - rem, false, 1, - req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - } else if (req->request.zero && req->request.length && - !usb_endpoint_xfer_isoc(dep->endpoint.desc) && - !rem && !chain) { - struct dwc3 *dwc = dep->dwc; - struct dwc3_trb *trb; - - req->needs_extra_trb = true; - - /* Prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, trb_length, true, i); - - /* Prepare one extra TRB to handle ZLP */ - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - !req->direction, 1, - req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - - /* Prepare one more TRB to handle MPS alignment */ - if (!req->direction) { - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, - false, 1, req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - } + last_sg = true; + + if (!num_trbs_left) + break; + + if (last_sg) { + if (!dwc3_prepare_last_sg(dep, req, trb_length, i)) + break; } else { - dwc3_prepare_one_trb(dep, req, trb_length, chain, i); + /* + * Look ahead to check if we have enough TRBs for the + * next SG entry. If not, set interrupt on this TRB to + * resume preparing the next SG entry when more TRBs are + * free. + */ + if (num_trbs_left == 1 || (needs_extra_trb && + num_trbs_left <= 2 && + sg_dma_len(sg_next(s)) >= length)) + must_interrupt = true; + + dwc3_prepare_one_trb(dep, req, trb_length, 1, i, false, + must_interrupt); } /* @@ -1185,7 +1216,7 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, * we have free trbs we can continue queuing from where we * previously stopped */ - if (chain) + if (!last_sg) req->start_sg = sg_next(s); req->num_queued_sgs++; @@ -1200,68 +1231,17 @@ static void dwc3_prepare_one_trb_sg(struct dwc3_ep *dep, break; } - if (!dwc3_calc_trbs_left(dep)) + if (must_interrupt) break; } + + return req->num_trbs - num_trbs; } -static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, +static int dwc3_prepare_trbs_linear(struct dwc3_ep *dep, struct dwc3_request *req) { - unsigned int length = req->request.length; - unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); - unsigned int rem = length % maxp; - - if ((!length || rem) && usb_endpoint_dir_out(dep->endpoint.desc)) { - struct dwc3 *dwc = dep->dwc; - struct dwc3_trb *trb; - - req->needs_extra_trb = true; - - /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, length, true, 0); - - /* Now prepare one extra TRB to align transfer size */ - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp - rem, - false, 1, req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - } else if (req->request.zero && req->request.length && - !usb_endpoint_xfer_isoc(dep->endpoint.desc) && - (IS_ALIGNED(req->request.length, maxp))) { - struct dwc3 *dwc = dep->dwc; - struct dwc3_trb *trb; - - req->needs_extra_trb = true; - - /* prepare normal TRB */ - dwc3_prepare_one_trb(dep, req, length, true, 0); - - /* Prepare one extra TRB to handle ZLP */ - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, 0, - !req->direction, 1, req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - - /* Prepare one more TRB to handle MPS alignment for OUT */ - if (!req->direction) { - trb = &dep->trb_pool[dep->trb_enqueue]; - req->num_trbs++; - __dwc3_prepare_one_trb(dep, trb, dwc->bounce_addr, maxp, - false, 1, req->request.stream_id, - req->request.short_not_ok, - req->request.no_interrupt, - req->request.is_last); - } - } else { - dwc3_prepare_one_trb(dep, req, length, false, 0); - } + return dwc3_prepare_last_sg(dep, req, req->request.length, 0); } /* @@ -1271,10 +1251,13 @@ static void dwc3_prepare_one_trb_linear(struct dwc3_ep *dep, * The function goes through the requests list and sets up TRBs for the * transfers. The function returns once there are no more TRBs available or * it runs out of requests. + * + * Returns the number of TRBs prepared or negative errno. */ -static void dwc3_prepare_trbs(struct dwc3_ep *dep) +static int dwc3_prepare_trbs(struct dwc3_ep *dep) { struct dwc3_request *req, *n; + int ret = 0; BUILD_BUG_ON_NOT_POWER_OF_2(DWC3_TRB_NUM); @@ -1289,11 +1272,14 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) * break things. */ list_for_each_entry(req, &dep->started_list, list) { - if (req->num_pending_sgs > 0) - dwc3_prepare_one_trb_sg(dep, req); + if (req->num_pending_sgs > 0) { + ret = dwc3_prepare_trbs_sg(dep, req); + if (!ret || req->num_pending_sgs) + return ret; + } if (!dwc3_calc_trbs_left(dep)) - return; + return ret; /* * Don't prepare beyond a transfer. In DWC_usb32, its transfer @@ -1301,30 +1287,32 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) * active transfer instead of stopping. */ if (dep->stream_capable && req->request.is_last) - return; + return ret; } list_for_each_entry_safe(req, n, &dep->pending_list, list) { struct dwc3 *dwc = dep->dwc; - int ret; ret = usb_gadget_map_request_by_dev(dwc->sysdev, &req->request, dep->direction); if (ret) - return; + return ret; req->sg = req->request.sg; req->start_sg = req->sg; req->num_queued_sgs = 0; req->num_pending_sgs = req->request.num_mapped_sgs; - if (req->num_pending_sgs > 0) - dwc3_prepare_one_trb_sg(dep, req); - else - dwc3_prepare_one_trb_linear(dep, req); + if (req->num_pending_sgs > 0) { + ret = dwc3_prepare_trbs_sg(dep, req); + if (req->num_pending_sgs) + return ret; + } else { + ret = dwc3_prepare_trbs_linear(dep, req); + } - if (!dwc3_calc_trbs_left(dep)) - return; + if (!ret || !dwc3_calc_trbs_left(dep)) + return ret; /* * Don't prepare beyond a transfer. In DWC_usb32, its transfer @@ -1332,8 +1320,10 @@ static void dwc3_prepare_trbs(struct dwc3_ep *dep) * active transfer instead of stopping. */ if (dep->stream_capable && req->request.is_last) - return; + return ret; } + + return ret; } static void dwc3_gadget_ep_cleanup_cancelled_requests(struct dwc3_ep *dep); @@ -1346,12 +1336,24 @@ static int __dwc3_gadget_kick_transfer(struct dwc3_ep *dep) int ret; u32 cmd; - if (!dwc3_calc_trbs_left(dep)) - return 0; + /* + * Note that it's normal to have no new TRBs prepared (i.e. ret == 0). + * This happens when we need to stop and restart a transfer such as in + * the case of reinitiating a stream or retrying an isoc transfer. + */ + ret = dwc3_prepare_trbs(dep); + if (ret < 0) + return ret; starting = !(dep->flags & DWC3_EP_TRANSFER_STARTED); - dwc3_prepare_trbs(dep); + /* + * If there's no new TRB prepared and we don't need to restart a + * transfer, there's no need to update the transfer. + */ + if (!ret && !starting) + return ret; + req = next_request(&dep->started_list); if (!req) { dep->flags |= DWC3_EP_PENDING_REQUEST; @@ -1539,12 +1541,12 @@ static int __dwc3_gadget_start_isoc(struct dwc3_ep *dep) if (!dwc->dis_start_transfer_quirk && (DWC3_VER_IS_PRIOR(DWC31, 170A) || DWC3_VER_TYPE_IS_WITHIN(DWC31, 170A, EA01, EA06))) { - if (dwc->gadget.speed <= USB_SPEED_HIGH && dep->direction) + if (dwc->gadget->speed <= USB_SPEED_HIGH && dep->direction) return dwc3_gadget_start_isoc_quirk(dep); } if (desc->bInterval <= 14 && - dwc->gadget.speed >= USB_SPEED_HIGH) { + dwc->gadget->speed >= USB_SPEED_HIGH) { u32 frame = __dwc3_gadget_get_frame(dwc); bool rollover = frame < (dep->frame_number & DWC3_FRNUMBER_MASK); @@ -1600,7 +1602,7 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) { struct dwc3 *dwc = dep->dwc; - if (!dep->endpoint.desc) { + if (!dep->endpoint.desc || !dwc->pullups_connected) { dev_err(dwc->dev, "%s: can't queue to disabled endpoint\n", dep->name); return -ESHUTDOWN; @@ -1628,8 +1630,13 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) if (dep->flags & DWC3_EP_WAIT_TRANSFER_COMPLETE) return 0; - /* Start the transfer only after the END_TRANSFER is completed */ - if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { + /* + * Start the transfer only after the END_TRANSFER is completed + * and endpoint STALL is cleared. + */ + if ((dep->flags & DWC3_EP_END_TRANSFER_PENDING) || + (dep->flags & DWC3_EP_WEDGE) || + (dep->flags & DWC3_EP_STALL)) { dep->flags |= DWC3_EP_DELAY_START; return 0; } @@ -1648,9 +1655,8 @@ static int __dwc3_gadget_ep_queue(struct dwc3_ep *dep, struct dwc3_request *req) return 0; if ((dep->flags & DWC3_EP_PENDING_REQUEST)) { - if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) { + if (!(dep->flags & DWC3_EP_TRANSFER_STARTED)) return __dwc3_gadget_start_isoc(dep); - } } } @@ -1788,8 +1794,8 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) if (value) { struct dwc3_trb *trb; - unsigned transfer_in_flight; - unsigned started; + unsigned int transfer_in_flight; + unsigned int started; if (dep->number > 1) trb = dwc3_ep_prev_trb(dep, dep->trb_enqueue); @@ -1822,6 +1828,18 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) return 0; } + dwc3_stop_active_transfer(dep, true, true); + + list_for_each_entry_safe(req, tmp, &dep->started_list, list) + dwc3_gadget_move_cancelled_request(req); + + if (dep->flags & DWC3_EP_END_TRANSFER_PENDING) { + dep->flags |= DWC3_EP_PENDING_CLEAR_STALL; + return 0; + } + + dwc3_gadget_ep_cleanup_cancelled_requests(dep); + ret = dwc3_send_clear_stall_ep_cmd(dep); if (ret) { dev_err(dwc->dev, "failed to clear STALL on %s\n", @@ -1831,18 +1849,11 @@ int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol) dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); - dwc3_stop_active_transfer(dep, true, true); - - list_for_each_entry_safe(req, tmp, &dep->started_list, list) - dwc3_gadget_move_cancelled_request(req); - - list_for_each_entry_safe(req, tmp, &dep->pending_list, list) - dwc3_gadget_move_cancelled_request(req); + if ((dep->flags & DWC3_EP_DELAY_START) && + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) + __dwc3_gadget_kick_transfer(dep); - if (!(dep->flags & DWC3_EP_END_TRANSFER_PENDING)) { - dep->flags &= ~DWC3_EP_DELAY_START; - dwc3_gadget_ep_cleanup_cancelled_requests(dep); - } + dep->flags &= ~DWC3_EP_DELAY_START; } return ret; @@ -2010,6 +2021,21 @@ static int dwc3_gadget_set_selfpowered(struct usb_gadget *g, return 0; } +static void dwc3_stop_active_transfers(struct dwc3 *dwc) +{ + u32 epnum; + + for (epnum = 2; epnum < dwc->num_eps; epnum++) { + struct dwc3_ep *dep; + + dep = dwc->eps[epnum]; + if (!dep) + continue; + + dwc3_remove_requests(dwc, dep); + } +} + static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) { u32 reg; @@ -2055,6 +2081,9 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend) return 0; } +static void dwc3_gadget_disable_irq(struct dwc3 *dwc); +static void __dwc3_gadget_stop(struct dwc3 *dwc); + static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) { struct dwc3 *dwc = gadget_to_dwc(g); @@ -2078,7 +2107,46 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on) } } + /* + * Synchronize any pending event handling before executing the controller + * halt routine. + */ + if (!is_on) { + dwc3_gadget_disable_irq(dwc); + synchronize_irq(dwc->irq_gadget); + } + spin_lock_irqsave(&dwc->lock, flags); + + if (!is_on) { + u32 count; + + /* + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a + * Section 4.1.8 Table 4-7, it states that for a device-initiated + * disconnect, the SW needs to ensure that it sends "a DEPENDXFER + * command for any active transfers" before clearing the RunStop + * bit. + */ + dwc3_stop_active_transfers(dwc); + __dwc3_gadget_stop(dwc); + + /* + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a + * Section 1.3.4, it mentions that for the DEVCTRLHLT bit, the + * "software needs to acknowledge the events that are generated + * (by writing to GEVNTCOUNTn) while it is waiting for this bit + * to be set to '1'." + */ + count = dwc3_readl(dwc->regs, DWC3_GEVNTCOUNT(0)); + count &= DWC3_GEVNTCOUNT_MASK; + if (count > 0) { + dwc3_writel(dwc->regs, DWC3_GEVNTCOUNT(0), count); + dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) % + dwc->ev_buf->length; + } + } + ret = dwc3_gadget_run_stop(dwc, is_on, false); spin_unlock_irqrestore(&dwc->lock, flags); @@ -2244,7 +2312,7 @@ static int dwc3_gadget_start(struct usb_gadget *g, spin_lock_irqsave(&dwc->lock, flags); if (dwc->gadget_driver) { dev_err(dwc->dev, "%s is already bound to %s\n", - dwc->gadget.name, + dwc->gadget->name, dwc->gadget_driver->driver.name); ret = -EBUSY; goto err1; @@ -2416,7 +2484,7 @@ static int dwc3_gadget_init_control_endpoint(struct dwc3_ep *dep) dep->endpoint.maxburst = 1; dep->endpoint.ops = &dwc3_gadget_ep0_ops; if (!dep->direction) - dwc->gadget.ep0 = &dep->endpoint; + dwc->gadget->ep0 = &dep->endpoint; dep->endpoint.caps.type_control = true; @@ -2459,10 +2527,10 @@ static int dwc3_gadget_init_in_endpoint(struct dwc3_ep *dep) usb_ep_set_maxpacket_limit(&dep->endpoint, size); - dep->endpoint.max_streams = 15; + dep->endpoint.max_streams = 16; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, - &dwc->gadget.ep_list); + &dwc->gadget->ep_list); dep->endpoint.caps.type_iso = true; dep->endpoint.caps.type_bulk = true; dep->endpoint.caps.type_int = true; @@ -2508,10 +2576,10 @@ static int dwc3_gadget_init_out_endpoint(struct dwc3_ep *dep) size /= 3; usb_ep_set_maxpacket_limit(&dep->endpoint, size); - dep->endpoint.max_streams = 15; + dep->endpoint.max_streams = 16; dep->endpoint.ops = &dwc3_gadget_ep_ops; list_add_tail(&dep->endpoint.ep_list, - &dwc->gadget.ep_list); + &dwc->gadget->ep_list); dep->endpoint.caps.type_iso = true; dep->endpoint.caps.type_bulk = true; dep->endpoint.caps.type_int = true; @@ -2572,7 +2640,7 @@ static int dwc3_gadget_init_endpoints(struct dwc3 *dwc, u8 total) { u8 epnum; - INIT_LIST_HEAD(&dwc->gadget.ep_list); + INIT_LIST_HEAD(&dwc->gadget->ep_list); for (epnum = 0; epnum < total; epnum++) { int ret; @@ -2652,12 +2720,12 @@ static int dwc3_gadget_ep_reclaim_completed_trb(struct dwc3_ep *dep, } /* - * If we're dealing with unaligned size OUT transfer, we will be left - * with one TRB pending in the ring. We need to manually clear HWO bit - * from that TRB. + * We use bounce buffer for requests that needs extra TRB or OUT ZLP. If + * this TRB points to the bounce buffer address, it's a MPS alignment + * TRB. Don't add it to req->remaining calculation. */ - - if (req->needs_extra_trb && !(trb->ctrl & DWC3_TRB_CTRL_CHN)) { + if (trb->bpl == lower_32_bits(dep->dwc->bounce_addr) && + trb->bph == upper_32_bits(dep->dwc->bounce_addr)) { trb->ctrl &= ~DWC3_TRB_CTRL_HWO; return 1; } @@ -2732,26 +2800,17 @@ static int dwc3_gadget_ep_cleanup_completed_request(struct dwc3_ep *dep, ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - if (req->needs_extra_trb) { - unsigned int maxp = usb_endpoint_maxp(dep->endpoint.desc); + req->request.actual = req->request.length - req->remaining; + + if (!dwc3_gadget_ep_request_completed(req)) + goto out; + if (req->needs_extra_trb) { ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - - /* Reclaim MPS padding TRB for ZLP */ - if (!req->direction && req->request.zero && req->request.length && - !usb_endpoint_xfer_isoc(dep->endpoint.desc) && - (IS_ALIGNED(req->request.length, maxp))) - ret = dwc3_gadget_ep_reclaim_trb_linear(dep, req, event, status); - req->needs_extra_trb = false; } - req->request.actual = req->request.length - req->remaining; - - if (!dwc3_gadget_ep_request_completed(req)) - goto out; - dwc3_gadget_giveback(dep, req, status); out: @@ -2896,6 +2955,43 @@ static void dwc3_gadget_endpoint_transfer_not_ready(struct dwc3_ep *dep, (void) __dwc3_gadget_start_isoc(dep); } +static void dwc3_gadget_endpoint_command_complete(struct dwc3_ep *dep, + const struct dwc3_event_depevt *event) +{ + u8 cmd = DEPEVT_PARAMETER_CMD(event->parameters); + + if (cmd != DWC3_DEPCMD_ENDTRANSFER) + return; + + dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; + dep->flags &= ~DWC3_EP_TRANSFER_STARTED; + dwc3_gadget_ep_cleanup_cancelled_requests(dep); + + if (dep->flags & DWC3_EP_PENDING_CLEAR_STALL) { + struct dwc3 *dwc = dep->dwc; + + dep->flags &= ~DWC3_EP_PENDING_CLEAR_STALL; + if (dwc3_send_clear_stall_ep_cmd(dep)) { + struct usb_ep *ep0 = &dwc->eps[0]->endpoint; + + dev_err(dwc->dev, "failed to clear STALL on %s\n", dep->name); + if (dwc->delayed_status) + __dwc3_gadget_ep0_set_halt(ep0, 1); + return; + } + + dep->flags &= ~(DWC3_EP_STALL | DWC3_EP_WEDGE); + if (dwc->delayed_status) + dwc3_ep0_send_delayed_status(dwc); + } + + if ((dep->flags & DWC3_EP_DELAY_START) && + !usb_endpoint_xfer_isoc(dep->endpoint.desc)) + __dwc3_gadget_kick_transfer(dep); + + dep->flags &= ~DWC3_EP_DELAY_START; +} + static void dwc3_gadget_endpoint_stream_event(struct dwc3_ep *dep, const struct dwc3_event_depevt *event) { @@ -2965,7 +3061,6 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, { struct dwc3_ep *dep; u8 epnum = event->endpoint_number; - u8 cmd; dep = dwc->eps[epnum]; @@ -2991,18 +3086,7 @@ static void dwc3_endpoint_interrupt(struct dwc3 *dwc, dwc3_gadget_endpoint_transfer_not_ready(dep, event); break; case DWC3_DEPEVT_EPCMDCMPLT: - cmd = DEPEVT_PARAMETER_CMD(event->parameters); - - if (cmd == DWC3_DEPCMD_ENDTRANSFER) { - dep->flags &= ~DWC3_EP_END_TRANSFER_PENDING; - dep->flags &= ~DWC3_EP_TRANSFER_STARTED; - dwc3_gadget_ep_cleanup_cancelled_requests(dep); - if ((dep->flags & DWC3_EP_DELAY_START) && - !usb_endpoint_xfer_isoc(dep->endpoint.desc)) - __dwc3_gadget_kick_transfer(dep); - - dep->flags &= ~DWC3_EP_DELAY_START; - } + dwc3_gadget_endpoint_command_complete(dep, event); break; case DWC3_DEPEVT_XFERCOMPLETE: dwc3_gadget_endpoint_transfer_complete(dep, event); @@ -3019,7 +3103,7 @@ static void dwc3_disconnect_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->disconnect) { spin_unlock(&dwc->lock); - dwc->gadget_driver->disconnect(&dwc->gadget); + dwc->gadget_driver->disconnect(dwc->gadget); spin_lock(&dwc->lock); } } @@ -3028,7 +3112,7 @@ static void dwc3_suspend_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->suspend) { spin_unlock(&dwc->lock); - dwc->gadget_driver->suspend(&dwc->gadget); + dwc->gadget_driver->suspend(dwc->gadget); spin_lock(&dwc->lock); } } @@ -3037,7 +3121,7 @@ static void dwc3_resume_gadget(struct dwc3 *dwc) { if (dwc->gadget_driver && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); - dwc->gadget_driver->resume(&dwc->gadget); + dwc->gadget_driver->resume(dwc->gadget); spin_lock(&dwc->lock); } } @@ -3047,9 +3131,9 @@ static void dwc3_reset_gadget(struct dwc3 *dwc) if (!dwc->gadget_driver) return; - if (dwc->gadget.speed != USB_SPEED_UNKNOWN) { + if (dwc->gadget->speed != USB_SPEED_UNKNOWN) { spin_unlock(&dwc->lock); - usb_gadget_udc_reset(&dwc->gadget, dwc->gadget_driver); + usb_gadget_udc_reset(dwc->gadget, dwc->gadget_driver); spin_lock(&dwc->lock); } } @@ -3150,9 +3234,9 @@ static void dwc3_gadget_disconnect_interrupt(struct dwc3 *dwc) dwc3_disconnect_gadget(dwc); - dwc->gadget.speed = USB_SPEED_UNKNOWN; + dwc->gadget->speed = USB_SPEED_UNKNOWN; dwc->setup_packet_pending = false; - usb_gadget_set_state(&dwc->gadget, USB_STATE_NOTATTACHED); + usb_gadget_set_state(dwc->gadget, USB_STATE_NOTATTACHED); dwc->connected = false; } @@ -3195,6 +3279,13 @@ static void dwc3_gadget_reset_interrupt(struct dwc3 *dwc) } dwc3_reset_gadget(dwc); + /* + * In the Synopsis DesignWare Cores USB3 Databook Rev. 3.30a + * Section 4.1.2 Table 4-2, it states that during a USB reset, the SW + * needs to ensure that it sends "a DEPENDXFER command for any active + * transfers." + */ + dwc3_stop_active_transfers(dwc); reg = dwc3_readl(dwc->regs, DWC3_DCTL); reg &= ~DWC3_DCTL_TSTCTRL_MASK; @@ -3231,8 +3322,8 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) switch (speed) { case DWC3_DSTS_SUPERSPEED_PLUS: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); - dwc->gadget.ep0->maxpacket = 512; - dwc->gadget.speed = USB_SPEED_SUPER_PLUS; + dwc->gadget->ep0->maxpacket = 512; + dwc->gadget->speed = USB_SPEED_SUPER_PLUS; break; case DWC3_DSTS_SUPERSPEED: /* @@ -3252,27 +3343,27 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc) dwc3_gadget_reset_interrupt(dwc); dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512); - dwc->gadget.ep0->maxpacket = 512; - dwc->gadget.speed = USB_SPEED_SUPER; + dwc->gadget->ep0->maxpacket = 512; + dwc->gadget->speed = USB_SPEED_SUPER; break; case DWC3_DSTS_HIGHSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); - dwc->gadget.ep0->maxpacket = 64; - dwc->gadget.speed = USB_SPEED_HIGH; + dwc->gadget->ep0->maxpacket = 64; + dwc->gadget->speed = USB_SPEED_HIGH; break; case DWC3_DSTS_FULLSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64); - dwc->gadget.ep0->maxpacket = 64; - dwc->gadget.speed = USB_SPEED_FULL; + dwc->gadget->ep0->maxpacket = 64; + dwc->gadget->speed = USB_SPEED_FULL; break; case DWC3_DSTS_LOWSPEED: dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(8); - dwc->gadget.ep0->maxpacket = 8; - dwc->gadget.speed = USB_SPEED_LOW; + dwc->gadget->ep0->maxpacket = 8; + dwc->gadget->speed = USB_SPEED_LOW; break; } - dwc->eps[1]->endpoint.maxpacket = dwc->gadget.ep0->maxpacket; + dwc->eps[1]->endpoint.maxpacket = dwc->gadget->ep0->maxpacket; /* Enable USB2 LPM Capability */ @@ -3340,7 +3431,7 @@ static void dwc3_gadget_wakeup_interrupt(struct dwc3 *dwc) if (dwc->gadget_driver && dwc->gadget_driver->resume) { spin_unlock(&dwc->lock); - dwc->gadget_driver->resume(&dwc->gadget); + dwc->gadget_driver->resume(dwc->gadget); spin_lock(&dwc->lock); } } @@ -3511,7 +3602,7 @@ static void dwc3_gadget_interrupt(struct dwc3 *dwc, * Ignore suspend event until the gadget enters into * USB_STATE_CONFIGURED state. */ - if (dwc->gadget.state >= USB_STATE_CONFIGURED) + if (dwc->gadget->state >= USB_STATE_CONFIGURED) dwc3_gadget_suspend_interrupt(dwc, event->event_info); } @@ -3686,6 +3777,13 @@ out: return irq; } +static void dwc_gadget_release(struct device *dev) +{ + struct usb_gadget *gadget = container_of(dev, struct usb_gadget, dev); + + kfree(gadget); +} + /** * dwc3_gadget_init - initializes gadget related registers * @dwc: pointer to our controller context structure @@ -3696,6 +3794,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) { int ret; int irq; + struct device *dev; irq = dwc3_gadget_get_irq(dwc); if (irq < 0) { @@ -3728,12 +3827,21 @@ int dwc3_gadget_init(struct dwc3 *dwc) } init_completion(&dwc->ep0_in_setup); + dwc->gadget = kzalloc(sizeof(struct usb_gadget), GFP_KERNEL); + if (!dwc->gadget) { + ret = -ENOMEM; + goto err3; + } - dwc->gadget.ops = &dwc3_gadget_ops; - dwc->gadget.speed = USB_SPEED_UNKNOWN; - dwc->gadget.sg_supported = true; - dwc->gadget.name = "dwc3-gadget"; - dwc->gadget.lpm_capable = true; + + usb_initialize_gadget(dwc->dev, dwc->gadget, dwc_gadget_release); + dev = &dwc->gadget->dev; + dev->platform_data = dwc; + dwc->gadget->ops = &dwc3_gadget_ops; + dwc->gadget->speed = USB_SPEED_UNKNOWN; + dwc->gadget->sg_supported = true; + dwc->gadget->name = "dwc3-gadget"; + dwc->gadget->lpm_capable = true; /* * FIXME We might be setting max_speed to <SUPER, however versions @@ -3756,7 +3864,7 @@ int dwc3_gadget_init(struct dwc3 *dwc) dev_info(dwc->dev, "changing max_speed on rev %08x\n", dwc->revision); - dwc->gadget.max_speed = dwc->maximum_speed; + dwc->gadget->max_speed = dwc->maximum_speed; /* * REVISIT: Here we should clear all pending IRQs to be @@ -3765,21 +3873,22 @@ int dwc3_gadget_init(struct dwc3 *dwc) ret = dwc3_gadget_init_endpoints(dwc, dwc->num_eps); if (ret) - goto err3; + goto err4; - ret = usb_add_gadget_udc(dwc->dev, &dwc->gadget); + ret = usb_add_gadget(dwc->gadget); if (ret) { - dev_err(dwc->dev, "failed to register udc\n"); - goto err4; + dev_err(dwc->dev, "failed to add gadget\n"); + goto err5; } - dwc3_gadget_set_speed(&dwc->gadget, dwc->maximum_speed); + dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed); return 0; -err4: +err5: dwc3_gadget_free_endpoints(dwc); - +err4: + usb_put_gadget(dwc->gadget); err3: dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, dwc->bounce_addr); @@ -3799,7 +3908,7 @@ err0: void dwc3_gadget_exit(struct dwc3 *dwc) { - usb_del_gadget_udc(&dwc->gadget); + usb_del_gadget_udc(dwc->gadget); dwc3_gadget_free_endpoints(dwc); dma_free_coherent(dwc->sysdev, DWC3_BOUNCE_SIZE, dwc->bounce, dwc->bounce_addr); diff --git a/drivers/usb/dwc3/gadget.h b/drivers/usb/dwc3/gadget.h index bd85eb7fa9ef..0cd281949970 100644 --- a/drivers/usb/dwc3/gadget.h +++ b/drivers/usb/dwc3/gadget.h @@ -17,7 +17,7 @@ struct dwc3; #define to_dwc3_ep(ep) (container_of(ep, struct dwc3_ep, endpoint)) -#define gadget_to_dwc(g) (container_of(g, struct dwc3, gadget)) +#define gadget_to_dwc(g) (dev_get_platdata(&g->dev)) /* DEPCFG parameter 1 */ #define DWC3_DEPCFG_INT_NUM(n) (((n) & 0x1f) << 0) @@ -113,6 +113,7 @@ int dwc3_gadget_ep0_set_halt(struct usb_ep *ep, int value); int dwc3_gadget_ep0_queue(struct usb_ep *ep, struct usb_request *request, gfp_t gfp_flags); int __dwc3_gadget_ep_set_halt(struct dwc3_ep *dep, int value, int protocol); +void dwc3_ep0_send_delayed_status(struct dwc3 *dwc); /** * dwc3_gadget_ep_get_transfer_index - Gets transfer index from HW diff --git a/drivers/usb/dwc3/trace.h b/drivers/usb/dwc3/trace.h index da1be01637c8..97f4f1125a41 100644 --- a/drivers/usb/dwc3/trace.h +++ b/drivers/usb/dwc3/trace.h @@ -104,8 +104,8 @@ DECLARE_EVENT_CLASS(dwc3_log_request, TP_STRUCT__entry( __string(name, req->dep->name) __field(struct dwc3_request *, req) - __field(unsigned, actual) - __field(unsigned, length) + __field(unsigned int, actual) + __field(unsigned int, length) __field(int, status) __field(int, zero) __field(int, short_not_ok) @@ -246,6 +246,7 @@ DECLARE_EVENT_CLASS(dwc3_log_trb, __entry->dequeue, __entry->bph, __entry->bpl, ({char *s; int pcm = ((__entry->size >> 24) & 3) + 1; + switch (__entry->type) { case USB_ENDPOINT_XFER_INT: case USB_ENDPOINT_XFER_ISOC: @@ -291,12 +292,12 @@ DECLARE_EVENT_CLASS(dwc3_log_ep, TP_ARGS(dep), TP_STRUCT__entry( __string(name, dep->name) - __field(unsigned, maxpacket) - __field(unsigned, maxpacket_limit) - __field(unsigned, max_streams) - __field(unsigned, maxburst) - __field(unsigned, flags) - __field(unsigned, direction) + __field(unsigned int, maxpacket) + __field(unsigned int, maxpacket_limit) + __field(unsigned int, max_streams) + __field(unsigned int, maxburst) + __field(unsigned int, flags) + __field(unsigned int, direction) __field(u8, trb_enqueue) __field(u8, trb_dequeue) ), diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c index e6e6176386a4..aa213c9815f6 100644 --- a/drivers/usb/dwc3/ulpi.c +++ b/drivers/usb/dwc3/ulpi.c @@ -19,7 +19,7 @@ static int dwc3_ulpi_busyloop(struct dwc3 *dwc) { - unsigned count = 1000; + unsigned int count = 1000; u32 reg; while (count--) { diff --git a/drivers/usb/early/ehci-dbgp.c b/drivers/usb/early/ehci-dbgp.c index b075dbfad730..45b42d8f6453 100644 --- a/drivers/usb/early/ehci-dbgp.c +++ b/drivers/usb/early/ehci-dbgp.c @@ -15,6 +15,7 @@ #include <linux/console.h> #include <linux/errno.h> #include <linux/init.h> +#include <linux/iopoll.h> #include <linux/pci_regs.h> #include <linux/pci_ids.h> #include <linux/usb/ch9.h> @@ -161,17 +162,11 @@ static inline u32 dbgp_pid_read_update(u32 x, u32 tok) static int dbgp_wait_until_complete(void) { u32 ctrl; - int loop = DBGP_TIMEOUT; - - do { - ctrl = readl(&ehci_debug->control); - /* Stop when the transaction is finished */ - if (ctrl & DBGP_DONE) - break; - udelay(1); - } while (--loop > 0); + int ret; - if (!loop) + ret = readl_poll_timeout_atomic(&ehci_debug->control, ctrl, + (ctrl & DBGP_DONE), 1, DBGP_TIMEOUT); + if (ret) return -DBGP_TIMEOUT; /* diff --git a/drivers/usb/early/xhci-dbc.c b/drivers/usb/early/xhci-dbc.c index c0507767a8e3..be4ecbabdd58 100644 --- a/drivers/usb/early/xhci-dbc.c +++ b/drivers/usb/early/xhci-dbc.c @@ -14,6 +14,7 @@ #include <linux/pci_ids.h> #include <linux/memblock.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <asm/pci-direct.h> #include <asm/fixmap.h> #include <linux/bcd.h> @@ -135,16 +136,9 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, int wait, int delay) { u32 result; - do { - result = readl(ptr); - result &= mask; - if (result == done) - return 0; - udelay(delay); - wait -= delay; - } while (wait > 0); - - return -ETIMEDOUT; + return readl_poll_timeout_atomic(ptr, result, + ((result & mask) == done), + delay, wait); } static void __init xdbc_bios_handoff(void) diff --git a/drivers/usb/gadget/function/f_acm.c b/drivers/usb/gadget/function/f_acm.c index 200596ea9557..46647bfac2ef 100644 --- a/drivers/usb/gadget/function/f_acm.c +++ b/drivers/usb/gadget/function/f_acm.c @@ -425,9 +425,11 @@ static int acm_set_alt(struct usb_function *f, unsigned intf, unsigned alt) /* we know alt == 0, so this is an activation or a reset */ if (intf == acm->ctrl_id) { - dev_vdbg(&cdev->gadget->dev, - "reset acm control interface %d\n", intf); - usb_ep_disable(acm->notify); + if (acm->notify->enabled) { + dev_vdbg(&cdev->gadget->dev, + "reset acm control interface %d\n", intf); + usb_ep_disable(acm->notify); + } if (!acm->notify->desc) if (config_ep_by_speed(cdev->gadget, f, acm->notify)) diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c index 46af0aa07e2e..85cb15734aa8 100644 --- a/drivers/usb/gadget/function/f_midi.c +++ b/drivers/usb/gadget/function/f_midi.c @@ -698,9 +698,9 @@ drop_out: f_midi_drop_out_substreams(midi); } -static void f_midi_in_tasklet(unsigned long data) +static void f_midi_in_tasklet(struct tasklet_struct *t) { - struct f_midi *midi = (struct f_midi *) data; + struct f_midi *midi = from_tasklet(midi, t, tasklet); f_midi_transmit(midi); } @@ -875,7 +875,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f) int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0; midi->gadget = cdev->gadget; - tasklet_init(&midi->tasklet, f_midi_in_tasklet, (unsigned long) midi); + tasklet_setup(&midi->tasklet, f_midi_in_tasklet); status = f_midi_register_card(midi); if (status < 0) goto fail_register; diff --git a/drivers/usb/gadget/function/f_ncm.c b/drivers/usb/gadget/function/f_ncm.c index b4206b0dede5..019bea8e09cc 100644 --- a/drivers/usb/gadget/function/f_ncm.c +++ b/drivers/usb/gadget/function/f_ncm.c @@ -85,8 +85,10 @@ static inline struct f_ncm *func_to_ncm(struct usb_function *f) /* peak (theoretical) bulk transfer rate in bits-per-second */ static inline unsigned ncm_bitrate(struct usb_gadget *g) { - if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) - return 13 * 1024 * 8 * 1000 * 8; + if (gadget_is_superspeed(g) && g->speed >= USB_SPEED_SUPER_PLUS) + return 4250000000U; + else if (gadget_is_superspeed(g) && g->speed == USB_SPEED_SUPER) + return 3750000000U; else if (gadget_is_dualspeed(g) && g->speed == USB_SPEED_HIGH) return 13 * 512 * 8 * 1000 * 8; else @@ -376,7 +378,7 @@ static struct usb_ss_ep_comp_descriptor ss_ncm_bulk_comp_desc = { .bDescriptorType = USB_DT_SS_ENDPOINT_COMP, /* the following 2 values can be tweaked if necessary */ - /* .bMaxBurst = 0, */ + .bMaxBurst = 15, /* .bmAttributes = 0, */ }; @@ -1189,7 +1191,6 @@ static int ncm_unwrap_ntb(struct gether *port, const struct ndp_parser_opts *opts = ncm->parser_opts; unsigned crc_len = ncm->is_crc ? sizeof(uint32_t) : 0; int dgram_counter; - bool ndp_after_header; /* dwSignature */ if (get_unaligned_le32(tmp) != opts->nth_sign) { @@ -1216,7 +1217,6 @@ static int ncm_unwrap_ntb(struct gether *port, } ndp_index = get_ncm(&tmp, opts->ndp_index); - ndp_after_header = false; /* Run through all the NDP's in the NTB */ do { @@ -1232,8 +1232,6 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_index); goto err; } - if (ndp_index == opts->nth_size) - ndp_after_header = true; /* * walk through NDP @@ -1312,37 +1310,13 @@ static int ncm_unwrap_ntb(struct gether *port, index2 = get_ncm(&tmp, opts->dgram_item_len); dg_len2 = get_ncm(&tmp, opts->dgram_item_len); - if (index2 == 0 || dg_len2 == 0) - break; - /* wDatagramIndex[1] */ - if (ndp_after_header) { - if (index2 < opts->nth_size + opts->ndp_size) { - INFO(port->func.config->cdev, - "Bad index: %#X\n", index2); - goto err; - } - } else { - if (index2 < opts->nth_size + opts->dpe_size) { - INFO(port->func.config->cdev, - "Bad index: %#X\n", index2); - goto err; - } - } if (index2 > block_len - opts->dpe_size) { INFO(port->func.config->cdev, "Bad index: %#X\n", index2); goto err; } - /* wDatagramLength[1] */ - if ((dg_len2 < 14 + crc_len) || - (dg_len2 > frame_max)) { - INFO(port->func.config->cdev, - "Bad dgram length: %#X\n", dg_len); - goto err; - } - /* * Copy the data into a new skb. * This ensures the truesize is correct @@ -1359,6 +1333,8 @@ static int ncm_unwrap_ntb(struct gether *port, ndp_len -= 2 * (opts->dgram_item_len * 2); dgram_counter++; + if (index2 == 0 || dg_len2 == 0) + break; } while (ndp_len > 2 * (opts->dgram_item_len * 2)); } while (ndp_index); @@ -1560,7 +1536,7 @@ static int ncm_bind(struct usb_configuration *c, struct usb_function *f) fs_ncm_notify_desc.bEndpointAddress; status = usb_assign_descriptors(f, ncm_fs_function, ncm_hs_function, - ncm_ss_function, NULL); + ncm_ss_function, ncm_ss_function); if (status) goto fail; diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c index 68697f596066..64a4112068fc 100644 --- a/drivers/usb/gadget/function/f_printer.c +++ b/drivers/usb/gadget/function/f_printer.c @@ -31,6 +31,7 @@ #include <linux/types.h> #include <linux/ctype.h> #include <linux/cdev.h> +#include <linux/kref.h> #include <asm/byteorder.h> #include <linux/io.h> @@ -64,7 +65,7 @@ struct printer_dev { struct usb_gadget *gadget; s8 interface; struct usb_ep *in_ep, *out_ep; - + struct kref kref; struct list_head rx_reqs; /* List of free RX structs */ struct list_head rx_reqs_active; /* List of Active RX xfers */ struct list_head rx_buffers; /* List of completed xfers */ @@ -218,6 +219,13 @@ static inline struct usb_endpoint_descriptor *ep_desc(struct usb_gadget *gadget, /*-------------------------------------------------------------------------*/ +static void printer_dev_free(struct kref *kref) +{ + struct printer_dev *dev = container_of(kref, struct printer_dev, kref); + + kfree(dev); +} + static struct usb_request * printer_req_alloc(struct usb_ep *ep, unsigned len, gfp_t gfp_flags) { @@ -353,6 +361,7 @@ printer_open(struct inode *inode, struct file *fd) spin_unlock_irqrestore(&dev->lock, flags); + kref_get(&dev->kref); DBG(dev, "printer_open returned %x\n", ret); return ret; } @@ -370,6 +379,7 @@ printer_close(struct inode *inode, struct file *fd) dev->printer_status &= ~PRINTER_SELECTED; spin_unlock_irqrestore(&dev->lock, flags); + kref_put(&dev->kref, printer_dev_free); DBG(dev, "printer_close\n"); return 0; @@ -1386,7 +1396,8 @@ static void gprinter_free(struct usb_function *f) struct f_printer_opts *opts; opts = container_of(f->fi, struct f_printer_opts, func_inst); - kfree(dev); + + kref_put(&dev->kref, printer_dev_free); mutex_lock(&opts->lock); --opts->refcnt; mutex_unlock(&opts->lock); @@ -1455,6 +1466,7 @@ static struct usb_function *gprinter_alloc(struct usb_function_instance *fi) return ERR_PTR(-ENOMEM); } + kref_init(&dev->kref); ++opts->refcnt; dev->minor = opts->minor; dev->pnp_string = opts->pnp_string; diff --git a/drivers/usb/gadget/function/f_tcm.c b/drivers/usb/gadget/function/f_tcm.c index 184165e27908..410fa89eae8f 100644 --- a/drivers/usb/gadget/function/f_tcm.c +++ b/drivers/usb/gadget/function/f_tcm.c @@ -392,12 +392,12 @@ static void bot_set_alt(struct f_uas *fu) fu->flags = USBG_IS_BOT; - config_ep_by_speed(gadget, f, fu->ep_in); + config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_BBB); ret = usb_ep_enable(fu->ep_in); if (ret) goto err_b_in; - config_ep_by_speed(gadget, f, fu->ep_out); + config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_BBB); ret = usb_ep_enable(fu->ep_out); if (ret) goto err_b_out; @@ -852,21 +852,21 @@ static void uasp_set_alt(struct f_uas *fu) if (gadget->speed >= USB_SPEED_SUPER) fu->flags |= USBG_USE_STREAMS; - config_ep_by_speed(gadget, f, fu->ep_in); + config_ep_by_speed_and_alt(gadget, f, fu->ep_in, USB_G_ALT_INT_UAS); ret = usb_ep_enable(fu->ep_in); if (ret) goto err_b_in; - config_ep_by_speed(gadget, f, fu->ep_out); + config_ep_by_speed_and_alt(gadget, f, fu->ep_out, USB_G_ALT_INT_UAS); ret = usb_ep_enable(fu->ep_out); if (ret) goto err_b_out; - config_ep_by_speed(gadget, f, fu->ep_cmd); + config_ep_by_speed_and_alt(gadget, f, fu->ep_cmd, USB_G_ALT_INT_UAS); ret = usb_ep_enable(fu->ep_cmd); if (ret) goto err_cmd; - config_ep_by_speed(gadget, f, fu->ep_status); + config_ep_by_speed_and_alt(gadget, f, fu->ep_status, USB_G_ALT_INT_UAS); ret = usb_ep_enable(fu->ep_status); if (ret) goto err_status; diff --git a/drivers/usb/gadget/function/f_uvc.c b/drivers/usb/gadget/function/f_uvc.c index 0b9712616455..44b4352a2676 100644 --- a/drivers/usb/gadget/function/f_uvc.c +++ b/drivers/usb/gadget/function/f_uvc.c @@ -740,20 +740,20 @@ uvc_function_bind(struct usb_configuration *c, struct usb_function *f) /* Initialise video. */ ret = uvcg_video_init(&uvc->video, uvc); if (ret < 0) - goto error; + goto v4l2_error; /* Register a V4L2 device. */ ret = uvc_register_video(uvc); if (ret < 0) { uvcg_err(f, "failed to register video device\n"); - goto error; + goto v4l2_error; } return 0; -error: +v4l2_error: v4l2_device_unregister(&uvc->v4l2_dev); - +error: if (uvc->control_req) usb_ep_free_request(cdev->gadget->ep0, uvc->control_req); kfree(uvc->control_buf); diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c index c3cc6bd14e61..31ea76adcc0d 100644 --- a/drivers/usb/gadget/function/u_ether.c +++ b/drivers/usb/gadget/function/u_ether.c @@ -93,7 +93,7 @@ struct eth_dev { static inline int qlen(struct usb_gadget *gadget, unsigned qmult) { if (gadget_is_dualspeed(gadget) && (gadget->speed == USB_SPEED_HIGH || - gadget->speed == USB_SPEED_SUPER)) + gadget->speed >= USB_SPEED_SUPER)) return qmult * DEFAULT_QLEN; else return DEFAULT_QLEN; diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c index 127ecc2b4317..2caccbb6e014 100644 --- a/drivers/usb/gadget/function/u_serial.c +++ b/drivers/usb/gadget/function/u_serial.c @@ -1391,6 +1391,7 @@ void gserial_disconnect(struct gserial *gser) if (port->port.tty) tty_hangup(port->port.tty); } + port->suspended = false; spin_unlock_irqrestore(&port->port_lock, flags); /* disable endpoints, aborting down any active I/O */ diff --git a/drivers/usb/gadget/udc/aspeed-vhub/core.c b/drivers/usb/gadget/udc/aspeed-vhub/core.c index cdf96911e4b1..be7bb64e3594 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/core.c +++ b/drivers/usb/gadget/udc/aspeed-vhub/core.c @@ -135,13 +135,9 @@ static irqreturn_t ast_vhub_irq(int irq, void *data) /* Handle device interrupts */ if (istat & vhub->port_irq_mask) { - unsigned long bitmap = istat; - int offset = VHUB_IRQ_DEV1_BIT; - int size = VHUB_IRQ_DEV1_BIT + vhub->max_ports; - - for_each_set_bit_from(offset, &bitmap, size) { - i = offset - VHUB_IRQ_DEV1_BIT; - ast_vhub_dev_irq(&vhub->ports[i].dev); + for (i = 0; i < vhub->max_ports; i++) { + if (istat & VHUB_DEV_IRQ(i)) + ast_vhub_dev_irq(&vhub->ports[i].dev); } } diff --git a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h index 2e5a1ef14a75..87a5dea12d3c 100644 --- a/drivers/usb/gadget/udc/aspeed-vhub/vhub.h +++ b/drivers/usb/gadget/udc/aspeed-vhub/vhub.h @@ -67,6 +67,9 @@ #define VHUB_IRQ_HUB_EP0_SETUP (1 << 0) #define VHUB_IRQ_ACK_ALL 0x1ff +/* Downstream device IRQ mask. */ +#define VHUB_DEV_IRQ(n) (VHUB_IRQ_DEVICE1 << (n)) + /* SW reset reg */ #define VHUB_SW_RESET_EP_POOL (1 << 9) #define VHUB_SW_RESET_DMA_CONTROLLER (1 << 8) diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.c b/drivers/usb/gadget/udc/atmel_usba_udc.c index a6426dd1cfef..2b893bceea45 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.c +++ b/drivers/usb/gadget/udc/atmel_usba_udc.c @@ -1056,16 +1056,19 @@ found_ep: switch (usb_endpoint_type(desc)) { case USB_ENDPOINT_XFER_CONTROL: + ep->nr_banks = 1; break; case USB_ENDPOINT_XFER_ISOC: ep->fifo_size = 1024; - ep->nr_banks = 2; + if (ep->udc->ep_prealloc) + ep->nr_banks = 2; break; case USB_ENDPOINT_XFER_BULK: ep->fifo_size = 512; - ep->nr_banks = 1; + if (ep->udc->ep_prealloc) + ep->nr_banks = 1; break; case USB_ENDPOINT_XFER_INT: @@ -1075,7 +1078,8 @@ found_ep: else ep->fifo_size = roundup_pow_of_two(le16_to_cpu(desc->wMaxPacketSize)); - ep->nr_banks = 1; + if (ep->udc->ep_prealloc) + ep->nr_banks = 1; break; } @@ -1091,8 +1095,6 @@ found_ep: USBA_BF(EPT_SIZE, fls(ep->fifo_size - 1) - 3); ep->ept_cfg |= USBA_BF(BK_NUMBER, ep->nr_banks); - - ep->udc->configured_ep++; } return _ep; @@ -1786,7 +1788,7 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) if (status & USBA_END_OF_RESET) { struct usba_ep *ep0, *ep; - int i, n; + int i; usba_writel(udc, INT_CLR, USBA_END_OF_RESET|USBA_END_OF_RESUME @@ -1834,13 +1836,14 @@ static irqreturn_t usba_udc_irq(int irq, void *devid) "ODD: EP0 configuration is invalid!\n"); /* Preallocate other endpoints */ - n = fifo_mode ? udc->num_ep : udc->configured_ep; - for (i = 1; i < n; i++) { + for (i = 1; i < udc->num_ep; i++) { ep = &udc->usba_ep[i]; - usba_ep_writel(ep, CFG, ep->ept_cfg); - if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) - dev_err(&udc->pdev->dev, - "ODD: EP%d configuration is invalid!\n", i); + if (ep->ep.claimed) { + usba_ep_writel(ep, CFG, ep->ept_cfg); + if (!(usba_ep_readl(ep, CFG) & USBA_EPT_MAPPED)) + dev_err(&udc->pdev->dev, + "ODD: EP%d configuration is invalid!\n", i); + } } } @@ -2025,9 +2028,6 @@ static int atmel_usba_stop(struct usb_gadget *gadget) if (udc->vbus_pin) disable_irq(gpiod_to_irq(udc->vbus_pin)); - if (fifo_mode == 0) - udc->configured_ep = 1; - udc->suspended = false; usba_stop(udc); @@ -2090,33 +2090,51 @@ static const struct usba_udc_config udc_at91sam9rl_cfg = { .errata = &at91sam9rl_errata, .config = ep_config_sam9, .num_ep = ARRAY_SIZE(ep_config_sam9), + .ep_prealloc = true, }; static const struct usba_udc_config udc_at91sam9g45_cfg = { .errata = &at91sam9g45_errata, .config = ep_config_sam9, .num_ep = ARRAY_SIZE(ep_config_sam9), + .ep_prealloc = true, }; static const struct usba_udc_config udc_sama5d3_cfg = { .config = ep_config_sama5, .num_ep = ARRAY_SIZE(ep_config_sama5), + .ep_prealloc = true, +}; + +static const struct usba_udc_config udc_sam9x60_cfg = { + .num_ep = ARRAY_SIZE(ep_config_sam9), + .config = ep_config_sam9, + .ep_prealloc = false, }; static const struct of_device_id atmel_udc_dt_ids[] = { { .compatible = "atmel,at91sam9rl-udc", .data = &udc_at91sam9rl_cfg }, { .compatible = "atmel,at91sam9g45-udc", .data = &udc_at91sam9g45_cfg }, { .compatible = "atmel,sama5d3-udc", .data = &udc_sama5d3_cfg }, + { .compatible = "microchip,sam9x60-udc", .data = &udc_sam9x60_cfg }, { /* sentinel */ } }; MODULE_DEVICE_TABLE(of, atmel_udc_dt_ids); +static const struct of_device_id atmel_pmc_dt_ids[] = { + { .compatible = "atmel,at91sam9g45-pmc" }, + { .compatible = "atmel,at91sam9rl-pmc" }, + { .compatible = "atmel,at91sam9x5-pmc" }, + { /* sentinel */ } +}; + static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, struct usba_udc *udc) { struct device_node *np = pdev->dev.of_node; const struct of_device_id *match; + struct device_node *pp; int i, ret; struct usba_ep *eps, *ep; const struct usba_udc_config *udc_config; @@ -2126,14 +2144,19 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, return ERR_PTR(-EINVAL); udc_config = match->data; + udc->ep_prealloc = udc_config->ep_prealloc; udc->errata = udc_config->errata; - udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9g45-pmc"); - if (IS_ERR(udc->pmc)) - udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9rl-pmc"); - if (IS_ERR(udc->pmc)) - udc->pmc = syscon_regmap_lookup_by_compatible("atmel,at91sam9x5-pmc"); - if (udc->errata && IS_ERR(udc->pmc)) - return ERR_CAST(udc->pmc); + if (udc->errata) { + pp = of_find_matching_node_and_match(NULL, atmel_pmc_dt_ids, + NULL); + if (!pp) + return ERR_PTR(-ENODEV); + + udc->pmc = syscon_node_to_regmap(pp); + of_node_put(pp); + if (IS_ERR(udc->pmc)) + return ERR_CAST(udc->pmc); + } udc->num_ep = 0; @@ -2142,7 +2165,6 @@ static struct usba_ep * atmel_udc_of_init(struct platform_device *pdev, if (fifo_mode == 0) { udc->num_ep = udc_config->num_ep; - udc->configured_ep = 1; } else { udc->num_ep = usba_config_fifo_table(udc); } diff --git a/drivers/usb/gadget/udc/atmel_usba_udc.h b/drivers/usb/gadget/udc/atmel_usba_udc.h index 48e332439ed5..620472f218bc 100644 --- a/drivers/usb/gadget/udc/atmel_usba_udc.h +++ b/drivers/usb/gadget/udc/atmel_usba_udc.h @@ -317,6 +317,7 @@ struct usba_udc_config { const struct usba_udc_errata *errata; const struct usba_ep_config *config; const int num_ep; + const bool ep_prealloc; }; struct usba_udc { @@ -336,7 +337,6 @@ struct usba_udc { int irq; struct gpio_desc *vbus_pin; int num_ep; - int configured_ep; struct usba_fifo_cfg *fifo_cfg; struct clk *pclk; struct clk *hclk; @@ -344,6 +344,7 @@ struct usba_udc { bool bias_pulse_needed; bool clocked; bool suspended; + bool ep_prealloc; u16 devstatus; diff --git a/drivers/usb/gadget/udc/bcm63xx_udc.c b/drivers/usb/gadget/udc/bcm63xx_udc.c index feaec00a3c16..9cd4a70ccdd6 100644 --- a/drivers/usb/gadget/udc/bcm63xx_udc.c +++ b/drivers/usb/gadget/udc/bcm63xx_udc.c @@ -26,6 +26,7 @@ #include <linux/seq_file.h> #include <linux/slab.h> #include <linux/timer.h> +#include <linux/usb.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> #include <linux/workqueue.h> diff --git a/drivers/usb/gadget/udc/bdc/bdc_core.c b/drivers/usb/gadget/udc/bdc/bdc_core.c index 5ff36525044e..0bef6b3f049b 100644 --- a/drivers/usb/gadget/udc/bdc/bdc_core.c +++ b/drivers/usb/gadget/udc/bdc/bdc_core.c @@ -484,7 +484,7 @@ static void bdc_phy_exit(struct bdc *bdc) static int bdc_probe(struct platform_device *pdev) { struct bdc *bdc; - int ret = -ENOMEM; + int ret; int irq; u32 temp; struct device *dev = &pdev->dev; @@ -510,10 +510,9 @@ static int bdc_probe(struct platform_device *pdev) bdc->clk = clk; bdc->regs = devm_platform_ioremap_resource(pdev, 0); - if (IS_ERR(bdc->regs)) { - dev_err(dev, "ioremap error\n"); - return -ENOMEM; - } + if (IS_ERR(bdc->regs)) + return PTR_ERR(bdc->regs); + irq = platform_get_irq(pdev, 0); if (irq < 0) return irq; diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c index 4f82bcd31fd3..debf54205d22 100644 --- a/drivers/usb/gadget/udc/core.c +++ b/drivers/usb/gadget/udc/core.c @@ -715,6 +715,9 @@ int usb_gadget_disconnect(struct usb_gadget *gadget) goto out; } + if (!gadget->connected) + goto out; + if (gadget->deactivated) { /* * If gadget is deactivated we only save new state. @@ -1164,21 +1167,18 @@ static int check_pending_gadget_drivers(struct usb_udc *udc) } /** - * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list + * usb_initialize_gadget - initialize a gadget and its embedded struct device * @parent: the parent device to this udc. Usually the controller driver's * device. - * @gadget: the gadget to be added to the list. + * @gadget: the gadget to be initialized. * @release: a gadget release function. * * Returns zero on success, negative errno otherwise. * Calls the gadget release function in the latter case. */ -int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, +void usb_initialize_gadget(struct device *parent, struct usb_gadget *gadget, void (*release)(struct device *dev)) { - struct usb_udc *udc; - int ret = -ENOMEM; - dev_set_name(&gadget->dev, "gadget"); INIT_WORK(&gadget->work, usb_gadget_state_work); gadget->dev.parent = parent; @@ -1189,17 +1189,32 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, gadget->dev.release = usb_udc_nop_release; device_initialize(&gadget->dev); +} +EXPORT_SYMBOL_GPL(usb_initialize_gadget); + +/** + * usb_add_gadget - adds a new gadget to the udc class driver list + * @gadget: the gadget to be added to the list. + * + * Returns zero on success, negative errno otherwise. + * Does not do a final usb_put_gadget() if an error occurs. + */ +int usb_add_gadget(struct usb_gadget *gadget) +{ + struct usb_udc *udc; + int ret = -ENOMEM; udc = kzalloc(sizeof(*udc), GFP_KERNEL); if (!udc) - goto err_put_gadget; + goto error; device_initialize(&udc->dev); udc->dev.release = usb_udc_release; udc->dev.class = udc_class; udc->dev.groups = usb_udc_attr_groups; - udc->dev.parent = parent; - ret = dev_set_name(&udc->dev, "%s", kobject_name(&parent->kobj)); + udc->dev.parent = gadget->dev.parent; + ret = dev_set_name(&udc->dev, "%s", + kobject_name(&gadget->dev.parent->kobj)); if (ret) goto err_put_udc; @@ -1242,8 +1257,30 @@ int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, err_put_udc: put_device(&udc->dev); - err_put_gadget: - put_device(&gadget->dev); + error: + return ret; +} +EXPORT_SYMBOL_GPL(usb_add_gadget); + +/** + * usb_add_gadget_udc_release - adds a new gadget to the udc class driver list + * @parent: the parent device to this udc. Usually the controller driver's + * device. + * @gadget: the gadget to be added to the list. + * @release: a gadget release function. + * + * Returns zero on success, negative errno otherwise. + * Calls the gadget release function in the latter case. + */ +int usb_add_gadget_udc_release(struct device *parent, struct usb_gadget *gadget, + void (*release)(struct device *dev)) +{ + int ret; + + usb_initialize_gadget(parent, gadget, release); + ret = usb_add_gadget(gadget); + if (ret) + usb_put_gadget(gadget); return ret; } EXPORT_SYMBOL_GPL(usb_add_gadget_udc_release); @@ -1311,13 +1348,14 @@ static void usb_gadget_remove_driver(struct usb_udc *udc) } /** - * usb_del_gadget_udc - deletes @udc from udc_list + * usb_del_gadget - deletes @udc from udc_list * @gadget: the gadget to be removed. * - * This, will call usb_gadget_unregister_driver() if + * This will call usb_gadget_unregister_driver() if * the @udc is still busy. + * It will not do a final usb_put_gadget(). */ -void usb_del_gadget_udc(struct usb_gadget *gadget) +void usb_del_gadget(struct usb_gadget *gadget) { struct usb_udc *udc = gadget->udc; @@ -1340,8 +1378,20 @@ void usb_del_gadget_udc(struct usb_gadget *gadget) kobject_uevent(&udc->dev.kobj, KOBJ_REMOVE); flush_work(&gadget->work); device_unregister(&udc->dev); - device_unregister(&gadget->dev); - memset(&gadget->dev, 0x00, sizeof(gadget->dev)); + device_del(&gadget->dev); +} +EXPORT_SYMBOL_GPL(usb_del_gadget); + +/** + * usb_del_gadget_udc - deletes @udc from udc_list + * @gadget: the gadget to be removed. + * + * Calls usb_del_gadget() and does a final usb_put_gadget(). + */ +void usb_del_gadget_udc(struct usb_gadget *gadget) +{ + usb_del_gadget(gadget); + usb_put_gadget(gadget); } EXPORT_SYMBOL_GPL(usb_del_gadget_udc); diff --git a/drivers/usb/gadget/udc/fsl_qe_udc.c b/drivers/usb/gadget/udc/fsl_qe_udc.c index 2707be628298..fa66449b3907 100644 --- a/drivers/usb/gadget/udc/fsl_qe_udc.c +++ b/drivers/usb/gadget/udc/fsl_qe_udc.c @@ -923,9 +923,9 @@ static int qe_ep_rxframe_handle(struct qe_ep *ep) return 0; } -static void ep_rx_tasklet(unsigned long data) +static void ep_rx_tasklet(struct tasklet_struct *t) { - struct qe_udc *udc = (struct qe_udc *)data; + struct qe_udc *udc = from_tasklet(udc, t, rx_tasklet); struct qe_ep *ep; struct qe_frame *pframe; struct qe_bd __iomem *bd; @@ -2553,8 +2553,7 @@ static int qe_udc_probe(struct platform_device *ofdev) DMA_TO_DEVICE); } - tasklet_init(&udc->rx_tasklet, ep_rx_tasklet, - (unsigned long)udc); + tasklet_setup(&udc->rx_tasklet, ep_rx_tasklet); /* request irq and disable DR */ udc->usb_irq = irq_of_parse_and_map(np, 0); if (!udc->usb_irq) { diff --git a/drivers/usb/gadget/udc/fsl_udc_core.c b/drivers/usb/gadget/udc/fsl_udc_core.c index a6f7b2594c09..de528e3b0662 100644 --- a/drivers/usb/gadget/udc/fsl_udc_core.c +++ b/drivers/usb/gadget/udc/fsl_udc_core.c @@ -2061,7 +2061,7 @@ static int fsl_proc_read(struct seq_file *m, void *v) "Sleep Enable: %d SOF Received Enable: %d " "Reset Enable: %d\n" "System Error Enable: %d " - "Port Change Dectected Enable: %d\n" + "Port Change Detected Enable: %d\n" "USB Error Intr Enable: %d USB Intr Enable: %d\n\n", (tmp_reg & USB_INTR_DEVICE_SUSPEND) ? 1 : 0, (tmp_reg & USB_INTR_SOF_EN) ? 1 : 0, @@ -2439,11 +2439,12 @@ static int fsl_udc_probe(struct platform_device *pdev) /* DEN is bidirectional ep number, max_ep doubles the number */ udc_controller->max_ep = (dccparams & DCCPARAMS_DEN_MASK) * 2; - udc_controller->irq = platform_get_irq(pdev, 0); - if (udc_controller->irq <= 0) { - ret = udc_controller->irq ? : -ENODEV; + ret = platform_get_irq(pdev, 0); + if (ret <= 0) { + ret = ret ? : -ENODEV; goto err_iounmap; } + udc_controller->irq = ret; ret = request_irq(udc_controller->irq, fsl_udc_irq, IRQF_SHARED, driver_name, udc_controller); diff --git a/drivers/usb/gadget/udc/lpc32xx_udc.c b/drivers/usb/gadget/udc/lpc32xx_udc.c index e8a4637a9a17..3f1c62adce4b 100644 --- a/drivers/usb/gadget/udc/lpc32xx_udc.c +++ b/drivers/usb/gadget/udc/lpc32xx_udc.c @@ -495,7 +495,7 @@ static void proc_ep_show(struct seq_file *s, struct lpc32xx_ep *ep) } } -static int proc_udc_show(struct seq_file *s, void *unused) +static int udc_show(struct seq_file *s, void *unused) { struct lpc32xx_udc *udc = s->private; struct lpc32xx_ep *ep; @@ -524,22 +524,11 @@ static int proc_udc_show(struct seq_file *s, void *unused) return 0; } -static int proc_udc_open(struct inode *inode, struct file *file) -{ - return single_open(file, proc_udc_show, PDE_DATA(inode)); -} - -static const struct file_operations proc_ops = { - .owner = THIS_MODULE, - .open = proc_udc_open, - .read = seq_read, - .llseek = seq_lseek, - .release = single_release, -}; +DEFINE_SHOW_ATTRIBUTE(udc); static void create_debug_file(struct lpc32xx_udc *udc) { - udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &proc_ops); + udc->pde = debugfs_create_file(debug_filename, 0, NULL, udc, &udc_fops); } static void remove_debug_file(struct lpc32xx_udc *udc) diff --git a/drivers/usb/gadget/udc/net2272.c b/drivers/usb/gadget/udc/net2272.c index 44d1ea2307bb..23a735641c3d 100644 --- a/drivers/usb/gadget/udc/net2272.c +++ b/drivers/usb/gadget/udc/net2272.c @@ -9,7 +9,6 @@ #include <linux/delay.h> #include <linux/device.h> #include <linux/errno.h> -#include <linux/gpio.h> #include <linux/init.h> #include <linux/interrupt.h> #include <linux/io.h> @@ -2196,7 +2195,8 @@ static int net2272_present(struct net2272 *dev) static void net2272_gadget_release(struct device *_dev) { - struct net2272 *dev = dev_get_drvdata(_dev); + struct net2272 *dev = container_of(_dev, struct net2272, gadget.dev); + kfree(dev); } @@ -2205,7 +2205,8 @@ net2272_gadget_release(struct device *_dev) static void net2272_remove(struct net2272 *dev) { - usb_del_gadget_udc(&dev->gadget); + if (dev->added) + usb_del_gadget(&dev->gadget); free_irq(dev->irq, dev); iounmap(dev->base_addr); device_remove_file(dev->dev, &dev_attr_registers); @@ -2235,6 +2236,7 @@ static struct net2272 *net2272_probe_init(struct device *dev, unsigned int irq) /* the "gadget" abstracts/virtualizes the controller */ ret->gadget.name = driver_name; + usb_initialize_gadget(dev, &ret->gadget, net2272_gadget_release); return ret; } @@ -2273,10 +2275,10 @@ net2272_probe_fin(struct net2272 *dev, unsigned int irqflags) if (ret) goto err_irq; - ret = usb_add_gadget_udc_release(dev->dev, &dev->gadget, - net2272_gadget_release); + ret = usb_add_gadget(&dev->gadget); if (ret) goto err_add_udc; + dev->added = 1; return 0; @@ -2451,7 +2453,7 @@ net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (pci_enable_device(pdev) < 0) { ret = -ENODEV; - goto err_free; + goto err_put; } pci_set_master(pdev); @@ -2474,8 +2476,8 @@ net2272_pci_probe(struct pci_dev *pdev, const struct pci_device_id *id) err_pci: pci_disable_device(pdev); - err_free: - kfree(dev); + err_put: + usb_put_gadget(&dev->gadget); return ret; } @@ -2536,7 +2538,7 @@ net2272_pci_remove(struct pci_dev *pdev) pci_disable_device(pdev); - kfree(dev); + usb_put_gadget(&dev->gadget); } /* Table of matching PCI IDs */ @@ -2649,7 +2651,7 @@ net2272_plat_probe(struct platform_device *pdev) err_req: release_mem_region(base, len); err: - kfree(dev); + usb_put_gadget(&dev->gadget); return ret; } @@ -2664,7 +2666,7 @@ net2272_plat_remove(struct platform_device *pdev) release_mem_region(pdev->resource[0].start, resource_size(&pdev->resource[0])); - kfree(dev); + usb_put_gadget(&dev->gadget); return 0; } diff --git a/drivers/usb/gadget/udc/net2272.h b/drivers/usb/gadget/udc/net2272.h index 87d0ab9ffeeb..c669308111c2 100644 --- a/drivers/usb/gadget/udc/net2272.h +++ b/drivers/usb/gadget/udc/net2272.h @@ -441,6 +441,7 @@ struct net2272 { unsigned protocol_stall:1, softconnect:1, wakeup:1, + added:1, dma_eot_polarity:1, dma_dack_polarity:1, dma_dreq_polarity:1, diff --git a/drivers/usb/gadget/udc/net2280.c b/drivers/usb/gadget/udc/net2280.c index 7530bd9a08c4..fc9f99fe7f37 100644 --- a/drivers/usb/gadget/udc/net2280.c +++ b/drivers/usb/gadget/udc/net2280.c @@ -52,6 +52,7 @@ #include <linux/usb/gadget.h> #include <linux/prefetch.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <asm/byteorder.h> #include <asm/irq.h> @@ -360,18 +361,16 @@ print_err: static int handshake(u32 __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; + int ret; - do { - result = readl(ptr); - if (result == ~(u32)0) /* "device unplugged" */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + ((result & mask) == done || + result == U32_MAX), + 1, usec); + if (result == U32_MAX) /* device unplugged */ + return -ENODEV; + + return ret; } static const struct usb_ep_ops net2280_ep_ops; @@ -3561,7 +3560,7 @@ static irqreturn_t net2280_irq(int irq, void *_dev) static void gadget_release(struct device *_dev) { - struct net2280 *dev = dev_get_drvdata(_dev); + struct net2280 *dev = container_of(_dev, struct net2280, gadget.dev); kfree(dev); } @@ -3572,7 +3571,8 @@ static void net2280_remove(struct pci_dev *pdev) { struct net2280 *dev = pci_get_drvdata(pdev); - usb_del_gadget_udc(&dev->gadget); + if (dev->added) + usb_del_gadget(&dev->gadget); BUG_ON(dev->driver); @@ -3603,6 +3603,7 @@ static void net2280_remove(struct pci_dev *pdev) device_remove_file(&pdev->dev, &dev_attr_registers); ep_info(dev, "unbind\n"); + usb_put_gadget(&dev->gadget); } /* wrap this driver around the specified device, but @@ -3624,6 +3625,7 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) } pci_set_drvdata(pdev, dev); + usb_initialize_gadget(&pdev->dev, &dev->gadget, gadget_release); spin_lock_init(&dev->lock); dev->quirks = id->driver_data; dev->pdev = pdev; @@ -3774,10 +3776,10 @@ static int net2280_probe(struct pci_dev *pdev, const struct pci_device_id *id) if (retval) goto done; - retval = usb_add_gadget_udc_release(&pdev->dev, &dev->gadget, - gadget_release); + retval = usb_add_gadget(&dev->gadget); if (retval) goto done; + dev->added = 1; return 0; done: diff --git a/drivers/usb/gadget/udc/net2280.h b/drivers/usb/gadget/udc/net2280.h index 85d3ca1698ba..7da3dc1e9729 100644 --- a/drivers/usb/gadget/udc/net2280.h +++ b/drivers/usb/gadget/udc/net2280.h @@ -156,6 +156,7 @@ struct net2280 { softconnect : 1, got_irq : 1, region:1, + added:1, u1_enable:1, u2_enable:1, ltm_enable:1, diff --git a/drivers/usb/gadget/udc/pch_udc.c b/drivers/usb/gadget/udc/pch_udc.c index 8afc31d94b0e..a3c1fc924268 100644 --- a/drivers/usb/gadget/udc/pch_udc.c +++ b/drivers/usb/gadget/udc/pch_udc.c @@ -12,12 +12,9 @@ #include <linux/interrupt.h> #include <linux/usb/ch9.h> #include <linux/usb/gadget.h> -#include <linux/gpio.h> +#include <linux/gpio/consumer.h> #include <linux/irq.h> -/* GPIO port for VBUS detecting */ -static int vbus_gpio_port = -1; /* GPIO port number (-1:Not used) */ - #define PCH_VBUS_PERIOD 3000 /* VBUS polling period (msec) */ #define PCH_VBUS_INTERVAL 10 /* VBUS polling interval (msec) */ @@ -301,13 +298,13 @@ struct pch_udc_ep { /** * struct pch_vbus_gpio_data - Structure holding GPIO informaton * for detecting VBUS - * @port: gpio port number + * @port: gpio descriptor for the VBUS GPIO * @intr: gpio interrupt number * @irq_work_fall: Structure for WorkQueue * @irq_work_rise: Structure for WorkQueue */ struct pch_vbus_gpio_data { - int port; + struct gpio_desc *port; int intr; struct work_struct irq_work_fall; struct work_struct irq_work_rise; @@ -1254,7 +1251,7 @@ static int pch_vbus_gpio_get_value(struct pch_udc_dev *dev) int vbus = 0; if (dev->vbus_gpio.port) - vbus = gpio_get_value(dev->vbus_gpio.port) ? 1 : 0; + vbus = gpiod_get_value(dev->vbus_gpio.port) ? 1 : 0; else vbus = -1; @@ -1356,42 +1353,30 @@ static irqreturn_t pch_vbus_gpio_irq(int irq, void *data) /** * pch_vbus_gpio_init() - This API initializes GPIO port detecting VBUS. * @dev: Reference to the driver structure - * @vbus_gpio_port: Number of GPIO port to detect gpio * * Return codes: * 0: Success * -EINVAL: GPIO port is invalid or can't be initialized. */ -static int pch_vbus_gpio_init(struct pch_udc_dev *dev, int vbus_gpio_port) +static int pch_vbus_gpio_init(struct pch_udc_dev *dev) { int err; int irq_num = 0; + struct gpio_desc *gpiod; - dev->vbus_gpio.port = 0; + dev->vbus_gpio.port = NULL; dev->vbus_gpio.intr = 0; - if (vbus_gpio_port <= -1) - return -EINVAL; - - err = gpio_is_valid(vbus_gpio_port); - if (!err) { - pr_err("%s: gpio port %d is invalid\n", - __func__, vbus_gpio_port); - return -EINVAL; - } - - err = gpio_request(vbus_gpio_port, "pch_vbus"); - if (err) { - pr_err("%s: can't request gpio port %d, err: %d\n", - __func__, vbus_gpio_port, err); - return -EINVAL; - } + /* Retrieve the GPIO line from the USB gadget device */ + gpiod = devm_gpiod_get(dev->gadget.dev.parent, NULL, GPIOD_IN); + if (IS_ERR(gpiod)) + return PTR_ERR(gpiod); + gpiod_set_consumer_name(gpiod, "pch_vbus"); - dev->vbus_gpio.port = vbus_gpio_port; - gpio_direction_input(vbus_gpio_port); + dev->vbus_gpio.port = gpiod; INIT_WORK(&dev->vbus_gpio.irq_work_fall, pch_vbus_gpio_work_fall); - irq_num = gpio_to_irq(vbus_gpio_port); + irq_num = gpiod_to_irq(gpiod); if (irq_num > 0) { irq_set_irq_type(irq_num, IRQ_TYPE_EDGE_BOTH); err = request_irq(irq_num, pch_vbus_gpio_irq, 0, @@ -1417,9 +1402,6 @@ static void pch_vbus_gpio_free(struct pch_udc_dev *dev) { if (dev->vbus_gpio.intr) free_irq(dev->vbus_gpio.intr, dev); - - if (dev->vbus_gpio.port) - gpio_free(dev->vbus_gpio.port); } /** @@ -2894,7 +2876,7 @@ static int pch_udc_pcd_init(struct pch_udc_dev *dev) { pch_udc_init(dev); pch_udc_pcd_reinit(dev); - pch_vbus_gpio_init(dev, vbus_gpio_port); + pch_vbus_gpio_init(dev); return 0; } @@ -3096,6 +3078,13 @@ static int pch_udc_probe(struct pci_dev *pdev, dev->base_addr = pcim_iomap_table(pdev)[bar]; + /* + * FIXME: add a GPIO descriptor table to pdev.dev using + * gpiod_add_descriptor_table() from <linux/gpio/machine.h> based on + * the PCI subsystem ID. The system-dependent GPIO is necessary for + * VBUS operation. + */ + /* initialize the hardware */ if (pch_udc_pcd_init(dev)) return -ENODEV; diff --git a/drivers/usb/gadget/udc/s3c2410_udc.c b/drivers/usb/gadget/udc/s3c2410_udc.c index bc2e8eb737c3..e875a0b967c0 100644 --- a/drivers/usb/gadget/udc/s3c2410_udc.c +++ b/drivers/usb/gadget/udc/s3c2410_udc.c @@ -1270,7 +1270,6 @@ static int s3c2410_udc_queue(struct usb_ep *_ep, struct usb_request *_req, static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req) { struct s3c2410_ep *ep = to_s3c2410_ep(_ep); - struct s3c2410_udc *udc; int retval = -EINVAL; unsigned long flags; struct s3c2410_request *req = NULL; @@ -1283,8 +1282,6 @@ static int s3c2410_udc_dequeue(struct usb_ep *_ep, struct usb_request *_req) if (!_ep || !_req) return retval; - udc = to_s3c2410_udc(ep->gadget); - local_irq_save(flags); list_for_each_entry(req, &ep->queue, queue) { diff --git a/drivers/usb/gadget/udc/tegra-xudc.c b/drivers/usb/gadget/udc/tegra-xudc.c index d6ff68c06911..580bef8eb4cb 100644 --- a/drivers/usb/gadget/udc/tegra-xudc.c +++ b/drivers/usb/gadget/udc/tegra-xudc.c @@ -705,11 +705,11 @@ static void tegra_xudc_device_mode_on(struct tegra_xudc *xudc) err = phy_power_on(xudc->curr_utmi_phy); if (err < 0) - dev_err(xudc->dev, "utmi power on failed %d\n", err); + dev_err(xudc->dev, "UTMI power on failed: %d\n", err); err = phy_power_on(xudc->curr_usb3_phy); if (err < 0) - dev_err(xudc->dev, "usb3 phy power on failed %d\n", err); + dev_err(xudc->dev, "USB3 PHY power on failed: %d\n", err); dev_dbg(xudc->dev, "device mode on\n"); @@ -759,11 +759,11 @@ static void tegra_xudc_device_mode_off(struct tegra_xudc *xudc) err = phy_power_off(xudc->curr_utmi_phy); if (err < 0) - dev_err(xudc->dev, "utmi_phy power off failed %d\n", err); + dev_err(xudc->dev, "UTMI PHY power off failed: %d\n", err); err = phy_power_off(xudc->curr_usb3_phy); if (err < 0) - dev_err(xudc->dev, "usb3_phy power off failed %d\n", err); + dev_err(xudc->dev, "USB3 PHY power off failed: %d\n", err); pm_runtime_put(xudc->dev); } @@ -1539,7 +1539,7 @@ static int __tegra_xudc_ep_set_halt(struct tegra_xudc_ep *ep, bool halt) return -EINVAL; if (usb_endpoint_xfer_isoc(ep->desc)) { - dev_err(xudc->dev, "can't halt isoc EP\n"); + dev_err(xudc->dev, "can't halt isochronous EP\n"); return -ENOTSUPP; } @@ -1788,7 +1788,7 @@ static int __tegra_xudc_ep_enable(struct tegra_xudc_ep *ep, if (usb_endpoint_xfer_isoc(desc)) { if (xudc->nr_isoch_eps > XUDC_MAX_ISOCH_EPS) { - dev_err(xudc->dev, "too many isoch endpoints\n"); + dev_err(xudc->dev, "too many isochronous endpoints\n"); return -EBUSY; } xudc->nr_isoch_eps++; @@ -3509,7 +3509,7 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) if (IS_ERR(xudc->utmi_phy[i])) { err = PTR_ERR(xudc->utmi_phy[i]); if (err != -EPROBE_DEFER) - dev_err(xudc->dev, "failed to get usb2-%d phy: %d\n", + dev_err(xudc->dev, "failed to get usb2-%d PHY: %d\n", i, err); goto clean_up; @@ -3539,12 +3539,12 @@ static int tegra_xudc_phy_get(struct tegra_xudc *xudc) if (IS_ERR(xudc->usb3_phy[i])) { err = PTR_ERR(xudc->usb3_phy[i]); if (err != -EPROBE_DEFER) - dev_err(xudc->dev, "failed to get usb3-%d phy: %d\n", + dev_err(xudc->dev, "failed to get usb3-%d PHY: %d\n", usb3, err); goto clean_up; } else if (xudc->usb3_phy[i]) - dev_dbg(xudc->dev, "usb3_phy-%d registered", usb3); + dev_dbg(xudc->dev, "usb3-%d PHY registered", usb3); } return err; @@ -3577,13 +3577,13 @@ static int tegra_xudc_phy_init(struct tegra_xudc *xudc) for (i = 0; i < xudc->soc->num_phys; i++) { err = phy_init(xudc->utmi_phy[i]); if (err < 0) { - dev_err(xudc->dev, "utmi phy init failed: %d\n", err); + dev_err(xudc->dev, "UTMI PHY #%u initialization failed: %d\n", i, err); goto exit_phy; } err = phy_init(xudc->usb3_phy[i]); if (err < 0) { - dev_err(xudc->dev, "usb3 phy init failed: %d\n", err); + dev_err(xudc->dev, "USB3 PHY #%u initialization failed: %d\n", i, err); goto exit_phy; } } @@ -3692,34 +3692,33 @@ static int tegra_xudc_powerdomain_init(struct tegra_xudc *xudc) struct device *dev = xudc->dev; int err; - xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, - "dev"); + xudc->genpd_dev_device = dev_pm_domain_attach_by_name(dev, "dev"); if (IS_ERR(xudc->genpd_dev_device)) { err = PTR_ERR(xudc->genpd_dev_device); - dev_err(dev, "failed to get dev pm-domain: %d\n", err); + dev_err(dev, "failed to get device power domain: %d\n", err); return err; } xudc->genpd_dev_ss = dev_pm_domain_attach_by_name(dev, "ss"); if (IS_ERR(xudc->genpd_dev_ss)) { err = PTR_ERR(xudc->genpd_dev_ss); - dev_err(dev, "failed to get superspeed pm-domain: %d\n", err); + dev_err(dev, "failed to get SuperSpeed power domain: %d\n", err); return err; } xudc->genpd_dl_device = device_link_add(dev, xudc->genpd_dev_device, - DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS); + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS); if (!xudc->genpd_dl_device) { - dev_err(dev, "adding usb device device link failed!\n"); + dev_err(dev, "failed to add USB device link\n"); return -ENODEV; } xudc->genpd_dl_ss = device_link_add(dev, xudc->genpd_dev_ss, - DL_FLAG_PM_RUNTIME | - DL_FLAG_STATELESS); + DL_FLAG_PM_RUNTIME | + DL_FLAG_STATELESS); if (!xudc->genpd_dl_ss) { - dev_err(dev, "adding superspeed device link failed!\n"); + dev_err(dev, "failed to add SuperSpeed device link\n"); return -ENODEV; } @@ -3733,7 +3732,7 @@ static int tegra_xudc_probe(struct platform_device *pdev) unsigned int i; int err; - xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_ATOMIC); + xudc = devm_kzalloc(&pdev->dev, sizeof(*xudc), GFP_KERNEL); if (!xudc) return -ENOMEM; @@ -3772,18 +3771,19 @@ static int tegra_xudc_probe(struct platform_device *pdev) return err; } - xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, - sizeof(*xudc->clks), GFP_KERNEL); + xudc->clks = devm_kcalloc(&pdev->dev, xudc->soc->num_clks, sizeof(*xudc->clks), + GFP_KERNEL); if (!xudc->clks) return -ENOMEM; for (i = 0; i < xudc->soc->num_clks; i++) xudc->clks[i].id = xudc->soc->clock_names[i]; - err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, - xudc->clks); + err = devm_clk_bulk_get(&pdev->dev, xudc->soc->num_clks, xudc->clks); if (err) { - dev_err(xudc->dev, "failed to request clks %d\n", err); + if (err != -EPROBE_DEFER) + dev_err(xudc->dev, "failed to request clocks: %d\n", err); + return err; } @@ -3798,7 +3798,9 @@ static int tegra_xudc_probe(struct platform_device *pdev) err = devm_regulator_bulk_get(&pdev->dev, xudc->soc->num_supplies, xudc->supplies); if (err) { - dev_err(xudc->dev, "failed to request regulators %d\n", err); + if (err != -EPROBE_DEFER) + dev_err(xudc->dev, "failed to request regulators: %d\n", err); + return err; } @@ -3808,7 +3810,7 @@ static int tegra_xudc_probe(struct platform_device *pdev) err = regulator_bulk_enable(xudc->soc->num_supplies, xudc->supplies); if (err) { - dev_err(xudc->dev, "failed to enable regulators %d\n", err); + dev_err(xudc->dev, "failed to enable regulators: %d\n", err); goto put_padctl; } diff --git a/drivers/usb/host/bcma-hcd.c b/drivers/usb/host/bcma-hcd.c index b1b777f33521..337b425dd4b0 100644 --- a/drivers/usb/host/bcma-hcd.c +++ b/drivers/usb/host/bcma-hcd.c @@ -498,15 +498,4 @@ static struct bcma_driver bcma_hcd_driver = { .suspend = bcma_hcd_suspend, .resume = bcma_hcd_resume, }; - -static int __init bcma_hcd_init(void) -{ - return bcma_driver_register(&bcma_hcd_driver); -} -module_init(bcma_hcd_init); - -static void __exit bcma_hcd_exit(void) -{ - bcma_driver_unregister(&bcma_hcd_driver); -} -module_exit(bcma_hcd_exit); +module_bcma_driver(bcma_hcd_driver); diff --git a/drivers/usb/host/ehci-npcm7xx.c b/drivers/usb/host/ehci-npcm7xx.c index adaf8fb4b459..6b5a7a873e01 100644 --- a/drivers/usb/host/ehci-npcm7xx.c +++ b/drivers/usb/host/ehci-npcm7xx.c @@ -37,8 +37,7 @@ static const char hcd_name[] = "npcm7xx-ehci"; static struct hc_driver __read_mostly ehci_npcm7xx_hc_driver; -#ifdef CONFIG_PM_SLEEP -static int ehci_npcm7xx_drv_suspend(struct device *dev) +static int __maybe_unused ehci_npcm7xx_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); bool do_wakeup = device_may_wakeup(dev); @@ -46,14 +45,13 @@ static int ehci_npcm7xx_drv_suspend(struct device *dev) return ehci_suspend(hcd, do_wakeup); } -static int ehci_npcm7xx_drv_resume(struct device *dev) +static int __maybe_unused ehci_npcm7xx_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); ehci_resume(hcd, false); return 0; } -#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(ehci_npcm7xx_pm_ops, ehci_npcm7xx_drv_suspend, ehci_npcm7xx_drv_resume); @@ -183,7 +181,7 @@ static struct platform_driver npcm7xx_ehci_hcd_driver = { .driver = { .name = "npcm7xx-ehci", .bus = &platform_bus_type, - .pm = &ehci_npcm7xx_pm_ops, + .pm = pm_ptr(&ehci_npcm7xx_pm_ops), .of_match_table = npcm7xx_ehci_id_table, } }; diff --git a/drivers/usb/host/ehci-platform.c b/drivers/usb/host/ehci-platform.c index 006c4f6188a5..a48dd3fac153 100644 --- a/drivers/usb/host/ehci-platform.c +++ b/drivers/usb/host/ehci-platform.c @@ -42,6 +42,9 @@ #define EHCI_MAX_CLKS 4 #define hcd_to_ehci_priv(h) ((struct ehci_platform_priv *)hcd_to_ehci(h)->priv) +#define BCM_USB_FIFO_THRESHOLD 0x00800040 +#define bcm_iproc_insnreg01 hostpc[0] + struct ehci_platform_priv { struct clk *clks[EHCI_MAX_CLKS]; struct reset_control *rsts; @@ -75,6 +78,11 @@ static int ehci_platform_reset(struct usb_hcd *hcd) if (pdata->no_io_watchdog) ehci->need_io_watchdog = 0; + + if (of_device_is_compatible(pdev->dev.of_node, "brcm,xgs-iproc-ehci")) + ehci_writel(ehci, BCM_USB_FIFO_THRESHOLD, + &ehci->regs->bcm_iproc_insnreg01); + return 0; } @@ -410,8 +418,7 @@ static int ehci_platform_remove(struct platform_device *dev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int ehci_platform_suspend(struct device *dev) +static int __maybe_unused ehci_platform_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(dev); @@ -433,7 +440,7 @@ static int ehci_platform_suspend(struct device *dev) return ret; } -static int ehci_platform_resume(struct device *dev) +static int __maybe_unused ehci_platform_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct usb_ehci_pdata *pdata = dev_get_platdata(dev); @@ -464,7 +471,6 @@ static int ehci_platform_resume(struct device *dev) return 0; } -#endif /* CONFIG_PM_SLEEP */ static const struct of_device_id vt8500_ehci_ids[] = { { .compatible = "via,vt8500-ehci", }, @@ -499,7 +505,7 @@ static struct platform_driver ehci_platform_driver = { .shutdown = usb_hcd_platform_shutdown, .driver = { .name = "ehci-platform", - .pm = &ehci_platform_pm_ops, + .pm = pm_ptr(&ehci_platform_pm_ops), .of_match_table = vt8500_ehci_ids, .acpi_match_table = ACPI_PTR(ehci_acpi_match), } diff --git a/drivers/usb/host/ehci-sched.c b/drivers/usb/host/ehci-sched.c index 847979f265b1..6dfb242f9a4b 100644 --- a/drivers/usb/host/ehci-sched.c +++ b/drivers/usb/host/ehci-sched.c @@ -307,26 +307,6 @@ static int __maybe_unused same_tt(struct usb_device *dev1, #ifdef CONFIG_USB_EHCI_TT_NEWSCHED -/* Which uframe does the low/fullspeed transfer start in? - * - * The parameter is the mask of ssplits in "H-frame" terms - * and this returns the transfer start uframe in "B-frame" terms, - * which allows both to match, e.g. a ssplit in "H-frame" uframe 0 - * will cause a transfer in "B-frame" uframe 0. "B-frames" lag - * "H-frames" by 1 uframe. See the EHCI spec sec 4.5 and figure 4.7. - */ -static inline unsigned char tt_start_uframe(struct ehci_hcd *ehci, __hc32 mask) -{ - unsigned char smask = hc32_to_cpu(ehci, mask) & QH_SMASK; - - if (!smask) { - ehci_err(ehci, "invalid empty smask!\n"); - /* uframe 7 can't have bw so this will indicate failure */ - return 7; - } - return ffs(smask) - 1; -} - static const unsigned char max_tt_usecs[] = { 125, 125, 125, 125, 125, 125, 30, 0 }; diff --git a/drivers/usb/host/ehci-spear.c b/drivers/usb/host/ehci-spear.c index add796c78561..3694e450a11a 100644 --- a/drivers/usb/host/ehci-spear.c +++ b/drivers/usb/host/ehci-spear.c @@ -34,8 +34,7 @@ struct spear_ehci { static struct hc_driver __read_mostly ehci_spear_hc_driver; -#ifdef CONFIG_PM_SLEEP -static int ehci_spear_drv_suspend(struct device *dev) +static int __maybe_unused ehci_spear_drv_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); bool do_wakeup = device_may_wakeup(dev); @@ -43,14 +42,13 @@ static int ehci_spear_drv_suspend(struct device *dev) return ehci_suspend(hcd, do_wakeup); } -static int ehci_spear_drv_resume(struct device *dev) +static int __maybe_unused ehci_spear_drv_resume(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); ehci_resume(hcd, false); return 0; } -#endif /* CONFIG_PM_SLEEP */ static SIMPLE_DEV_PM_OPS(ehci_spear_pm_ops, ehci_spear_drv_suspend, ehci_spear_drv_resume); @@ -155,7 +153,7 @@ static struct platform_driver spear_ehci_hcd_driver = { .driver = { .name = "spear-ehci", .bus = &platform_bus_type, - .pm = &ehci_spear_pm_ops, + .pm = pm_ptr(&ehci_spear_pm_ops), .of_match_table = spear_ehci_id_table, } }; diff --git a/drivers/usb/host/fotg210-hcd.c b/drivers/usb/host/fotg210-hcd.c index 194df8282471..1d94fcfac2c2 100644 --- a/drivers/usb/host/fotg210-hcd.c +++ b/drivers/usb/host/fotg210-hcd.c @@ -32,6 +32,7 @@ #include <linux/uaccess.h> #include <linux/platform_device.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/clk.h> #include <asm/byteorder.h> @@ -883,18 +884,15 @@ static int handshake(struct fotg210_hcd *fotg210, void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; + int ret; - do { - result = fotg210_readl(fotg210, ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + ((result & mask) == done || + result == U32_MAX), 1, usec); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; } /* Force HC to halt state from unknown (EHCI spec section 2.3). diff --git a/drivers/usb/host/ohci-hcd.c b/drivers/usb/host/ohci-hcd.c index dd37e77dae00..73e13e7c2b46 100644 --- a/drivers/usb/host/ohci-hcd.c +++ b/drivers/usb/host/ohci-hcd.c @@ -102,7 +102,7 @@ static void io_watchdog_func(struct timer_list *t); /* Some boards misreport power switching/overcurrent */ -static bool distrust_firmware = true; +static bool distrust_firmware; module_param (distrust_firmware, bool, 0); MODULE_PARM_DESC (distrust_firmware, "true to distrust firmware power/overcurrent setup"); @@ -673,20 +673,24 @@ retry: /* handle root hub init quirks ... */ val = roothub_a (ohci); - val &= ~(RH_A_PSM | RH_A_OCPM); + /* Configure for per-port over-current protection by default */ + val &= ~RH_A_NOCP; + val |= RH_A_OCPM; if (ohci->flags & OHCI_QUIRK_SUPERIO) { - /* NSC 87560 and maybe others */ + /* NSC 87560 and maybe others. + * Ganged power switching, no over-current protection. + */ val |= RH_A_NOCP; - val &= ~(RH_A_POTPGT | RH_A_NPS); - ohci_writel (ohci, val, &ohci->regs->roothub.a); + val &= ~(RH_A_POTPGT | RH_A_NPS | RH_A_PSM | RH_A_OCPM); } else if ((ohci->flags & OHCI_QUIRK_AMD756) || (ohci->flags & OHCI_QUIRK_HUB_POWER)) { /* hub power always on; required for AMD-756 and some - * Mac platforms. ganged overcurrent reporting, if any. + * Mac platforms. */ val |= RH_A_NPS; - ohci_writel (ohci, val, &ohci->regs->roothub.a); } + ohci_writel(ohci, val, &ohci->regs->roothub.a); + ohci_writel (ohci, RH_HS_LPSC, &ohci->regs->roothub.status); ohci_writel (ohci, (val & RH_A_NPS) ? 0 : RH_B_PPCM, &ohci->regs->roothub.b); diff --git a/drivers/usb/host/oxu210hp-hcd.c b/drivers/usb/host/oxu210hp-hcd.c index cfa7dd2cc7d3..27dbbe1b28b1 100644 --- a/drivers/usb/host/oxu210hp-hcd.c +++ b/drivers/usb/host/oxu210hp-hcd.c @@ -24,6 +24,7 @@ #include <linux/moduleparam.h> #include <linux/dma-mapping.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <asm/irq.h> #include <asm/unaligned.h> @@ -748,18 +749,16 @@ static int handshake(struct oxu_hcd *oxu, void __iomem *ptr, u32 mask, u32 done, int usec) { u32 result; + int ret; - do { - result = readl(ptr); - if (result == ~(u32)0) /* card removed */ - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + ret = readl_poll_timeout_atomic(ptr, result, + ((result & mask) == done || + result == U32_MAX), + 1, usec); + if (result == U32_MAX) /* card removed */ + return -ENODEV; + + return ret; } /* Force HC to halt state from unknown (EHCI spec section 2.3) */ diff --git a/drivers/usb/host/pci-quirks.c b/drivers/usb/host/pci-quirks.c index 8c1bbac6d136..ef08d68b9714 100644 --- a/drivers/usb/host/pci-quirks.c +++ b/drivers/usb/host/pci-quirks.c @@ -16,8 +16,8 @@ #include <linux/export.h> #include <linux/acpi.h> #include <linux/dmi.h> - -#include <soc/bcm2835/raspberrypi-firmware.h> +#include <linux/of.h> +#include <linux/iopoll.h> #include "pci-quirks.h" #include "xhci-ext-caps.h" @@ -1013,15 +1013,9 @@ static int handshake(void __iomem *ptr, u32 mask, u32 done, { u32 result; - do { - result = readl(ptr); - result &= mask; - if (result == done) - return 0; - udelay(delay_usec); - wait_usec -= delay_usec; - } while (wait_usec > 0); - return -ETIMEDOUT; + return readl_poll_timeout_atomic(ptr, result, + ((result & mask) == done), + delay_usec, wait_usec); } /* @@ -1247,7 +1241,8 @@ iounmap: static void quirk_usb_early_handoff(struct pci_dev *pdev) { - int ret; + struct device_node *parent; + bool is_rpi; /* Skip Netlogic mips SoC's internal PCI USB controller. * This device does not need/support EHCI/OHCI handoff @@ -1255,14 +1250,16 @@ static void quirk_usb_early_handoff(struct pci_dev *pdev) if (pdev->vendor == 0x184e) /* vendor Netlogic */ return; + /* + * Bypass the Raspberry Pi 4 controller xHCI controller, things are + * taken care of by the board's co-processor. + */ if (pdev->vendor == PCI_VENDOR_ID_VIA && pdev->device == 0x3483) { - ret = rpi_firmware_init_vl805(pdev); - if (ret) { - /* Firmware might be outdated, or something failed */ - dev_warn(&pdev->dev, - "Failed to load VL805's firmware: %d. Will continue to attempt to work, but bad things might happen. You should fix this...\n", - ret); - } + parent = of_get_parent(pdev->bus->dev.of_node); + is_rpi = of_device_is_compatible(parent, "brcm,bcm2711-pcie"); + of_node_put(parent); + if (is_rpi) + return; } if (pdev->class != PCI_CLASS_SERIAL_USB_UHCI && diff --git a/drivers/usb/host/xhci-dbgtty.c b/drivers/usb/host/xhci-dbgtty.c index b8918f73a432..ae4e4ab638b5 100644 --- a/drivers/usb/host/xhci-dbgtty.c +++ b/drivers/usb/host/xhci-dbgtty.c @@ -288,14 +288,14 @@ static const struct tty_operations dbc_tty_ops = { .unthrottle = dbc_tty_unthrottle, }; -static void dbc_rx_push(unsigned long _port) +static void dbc_rx_push(struct tasklet_struct *t) { struct dbc_request *req; struct tty_struct *tty; unsigned long flags; bool do_push = false; bool disconnect = false; - struct dbc_port *port = (void *)_port; + struct dbc_port *port = from_tasklet(port, t, push); struct list_head *queue = &port->read_queue; spin_lock_irqsave(&port->port_lock, flags); @@ -382,7 +382,7 @@ xhci_dbc_tty_init_port(struct xhci_dbc *dbc, struct dbc_port *port) { tty_port_init(&port->port); spin_lock_init(&port->port_lock); - tasklet_init(&port->push, dbc_rx_push, (unsigned long)port); + tasklet_setup(&port->push, dbc_rx_push); INIT_LIST_HEAD(&port->read_pool); INIT_LIST_HEAD(&port->read_queue); INIT_LIST_HEAD(&port->write_pool); diff --git a/drivers/usb/host/xhci-debugfs.c b/drivers/usb/host/xhci-debugfs.c index c88bffd68742..2c0fda57869e 100644 --- a/drivers/usb/host/xhci-debugfs.c +++ b/drivers/usb/host/xhci-debugfs.c @@ -451,9 +451,11 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, if (!epriv) return; + epriv->show_ring = dev->eps[ep_index].ring; + snprintf(epriv->name, sizeof(epriv->name), "ep%02d", ep_index); epriv->root = xhci_debugfs_create_ring_dir(xhci, - &dev->eps[ep_index].ring, + &epriv->show_ring, epriv->name, spriv->root); spriv->eps[ep_index] = epriv; @@ -475,6 +477,111 @@ void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci, kfree(epriv); } +static int xhci_stream_id_show(struct seq_file *s, void *unused) +{ + struct xhci_ep_priv *epriv = s->private; + + if (!epriv->stream_info) + return -EPERM; + + seq_printf(s, "Show stream ID %d trb ring, supported [1 - %d]\n", + epriv->stream_id, epriv->stream_info->num_streams - 1); + + return 0; +} + +static int xhci_stream_id_open(struct inode *inode, struct file *file) +{ + return single_open(file, xhci_stream_id_show, inode->i_private); +} + +static ssize_t xhci_stream_id_write(struct file *file, const char __user *ubuf, + size_t count, loff_t *ppos) +{ + struct seq_file *s = file->private_data; + struct xhci_ep_priv *epriv = s->private; + int ret; + u16 stream_id; /* MaxPStreams + 1 <= 16 */ + + if (!epriv->stream_info) + return -EPERM; + + /* Decimal number */ + ret = kstrtou16_from_user(ubuf, count, 10, &stream_id); + if (ret) + return ret; + + if (stream_id == 0 || stream_id >= epriv->stream_info->num_streams) + return -EINVAL; + + epriv->stream_id = stream_id; + epriv->show_ring = epriv->stream_info->stream_rings[stream_id]; + + return count; +} + +static const struct file_operations stream_id_fops = { + .open = xhci_stream_id_open, + .write = xhci_stream_id_write, + .read = seq_read, + .llseek = seq_lseek, + .release = single_release, +}; + +static int xhci_stream_context_array_show(struct seq_file *s, void *unused) +{ + struct xhci_ep_priv *epriv = s->private; + struct xhci_stream_ctx *stream_ctx; + dma_addr_t dma; + int id; + + if (!epriv->stream_info) + return -EPERM; + + seq_printf(s, "Allocated %d streams and %d stream context array entries\n", + epriv->stream_info->num_streams, + epriv->stream_info->num_stream_ctxs); + + for (id = 0; id < epriv->stream_info->num_stream_ctxs; id++) { + stream_ctx = epriv->stream_info->stream_ctx_array + id; + dma = epriv->stream_info->ctx_array_dma + id * 16; + if (id < epriv->stream_info->num_streams) + seq_printf(s, "%pad stream id %d deq %016llx\n", &dma, + id, le64_to_cpu(stream_ctx->stream_ring)); + else + seq_printf(s, "%pad stream context entry not used deq %016llx\n", + &dma, le64_to_cpu(stream_ctx->stream_ring)); + } + + return 0; +} +DEFINE_SHOW_ATTRIBUTE(xhci_stream_context_array); + +void xhci_debugfs_create_stream_files(struct xhci_hcd *xhci, + struct xhci_virt_device *dev, + int ep_index) +{ + struct xhci_slot_priv *spriv = dev->debugfs_private; + struct xhci_ep_priv *epriv; + + if (!spriv || !spriv->eps[ep_index] || + !dev->eps[ep_index].stream_info) + return; + + epriv = spriv->eps[ep_index]; + epriv->stream_info = dev->eps[ep_index].stream_info; + + /* Show trb ring of stream ID 1 by default */ + epriv->stream_id = 1; + epriv->show_ring = epriv->stream_info->stream_rings[1]; + debugfs_create_file("stream_id", 0644, + epriv->root, epriv, + &stream_id_fops); + debugfs_create_file("stream_context_array", 0444, + epriv->root, epriv, + &xhci_stream_context_array_fops); +} + void xhci_debugfs_create_slot(struct xhci_hcd *xhci, int slot_id) { struct xhci_slot_priv *priv; diff --git a/drivers/usb/host/xhci-debugfs.h b/drivers/usb/host/xhci-debugfs.h index 56db635fcd6e..7c074b4be819 100644 --- a/drivers/usb/host/xhci-debugfs.h +++ b/drivers/usb/host/xhci-debugfs.h @@ -91,6 +91,9 @@ struct xhci_file_map { struct xhci_ep_priv { char name[DEBUGFS_NAMELEN]; struct dentry *root; + struct xhci_stream_info *stream_info; + struct xhci_ring *show_ring; + unsigned int stream_id; }; struct xhci_slot_priv { @@ -113,6 +116,9 @@ void xhci_debugfs_create_endpoint(struct xhci_hcd *xhci, void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int ep_index); +void xhci_debugfs_create_stream_files(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int ep_index); #else static inline void xhci_debugfs_init(struct xhci_hcd *xhci) { } static inline void xhci_debugfs_exit(struct xhci_hcd *xhci) { } @@ -128,6 +134,10 @@ static inline void xhci_debugfs_remove_endpoint(struct xhci_hcd *xhci, struct xhci_virt_device *virt_dev, int ep_index) { } +static inline void +xhci_debugfs_create_stream_files(struct xhci_hcd *xhci, + struct xhci_virt_device *virt_dev, + int ep_index) { } #endif /* CONFIG_DEBUG_FS */ #endif /* __LINUX_XHCI_DEBUGFS_H */ diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c index 4311d4c9b68d..8f321f39ab96 100644 --- a/drivers/usb/host/xhci-mtk.c +++ b/drivers/usb/host/xhci-mtk.c @@ -77,7 +77,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) { struct mu3c_ippc_regs __iomem *ippc = mtk->ippc_regs; u32 value, check_val; - int u3_ports_disabed = 0; + int u3_ports_disabled = 0; int ret; int i; @@ -92,7 +92,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) /* power on and enable u3 ports except skipped ones */ for (i = 0; i < mtk->num_u3_ports; i++) { if ((0x1 << i) & mtk->u3p_dis_msk) { - u3_ports_disabed++; + u3_ports_disabled++; continue; } @@ -117,7 +117,7 @@ static int xhci_mtk_host_enable(struct xhci_hcd_mtk *mtk) check_val = STS1_SYSPLL_STABLE | STS1_REF_RST | STS1_SYS125_RST | STS1_XHCI_RST; - if (mtk->num_u3_ports > u3_ports_disabed) + if (mtk->num_u3_ports > u3_ports_disabled) check_val |= STS1_U3_MAC_RST; ret = readl_poll_timeout(&ippc->ip_pw_sts1, value, diff --git a/drivers/usb/host/xhci-pci.c b/drivers/usb/host/xhci-pci.c index 3feaafebfe58..c26c06e5c88c 100644 --- a/drivers/usb/host/xhci-pci.c +++ b/drivers/usb/host/xhci-pci.c @@ -12,6 +12,7 @@ #include <linux/slab.h> #include <linux/module.h> #include <linux/acpi.h> +#include <linux/reset.h> #include "xhci.h" #include "xhci-trace.h" @@ -346,6 +347,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) struct xhci_hcd *xhci; struct usb_hcd *hcd; struct xhci_driver_data *driver_data; + struct reset_control *reset; driver_data = (struct xhci_driver_data *)id->driver_data; if (driver_data && driver_data->quirks & XHCI_RENESAS_FW_QUIRK) { @@ -354,6 +356,11 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) return retval; } + reset = devm_reset_control_get_optional_exclusive(&dev->dev, NULL); + if (IS_ERR(reset)) + return PTR_ERR(reset); + reset_control_reset(reset); + /* Prevent runtime suspending between USB-2 and USB-3 initialization */ pm_runtime_get_noresume(&dev->dev); @@ -371,6 +378,7 @@ static int xhci_pci_probe(struct pci_dev *dev, const struct pci_device_id *id) /* USB 2.0 roothub is stored in the PCI device now. */ hcd = dev_get_drvdata(&dev->dev); xhci = hcd_to_xhci(hcd); + xhci->reset = reset; xhci->shared_hcd = usb_create_shared_hcd(&xhci_pci_hc_driver, &dev->dev, pci_name(dev), hcd); if (!xhci->shared_hcd) { @@ -522,6 +530,8 @@ static int xhci_pci_resume(struct usb_hcd *hcd, bool hibernated) struct pci_dev *pdev = to_pci_dev(hcd->self.controller); int retval = 0; + reset_control_reset(xhci->reset); + /* The BIOS on systems with the Intel Panther Point chipset may or may * not support xHCI natively. That means that during system resume, it * may switch the ports back to EHCI so that users can use their diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c index 3057cfc76d6a..aa2d35f98200 100644 --- a/drivers/usb/host/xhci-plat.c +++ b/drivers/usb/host/xhci-plat.c @@ -54,6 +54,16 @@ static int xhci_priv_init_quirk(struct usb_hcd *hcd) return priv->init_quirk(hcd); } +static int xhci_priv_suspend_quirk(struct usb_hcd *hcd) +{ + struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + + if (!priv->suspend_quirk) + return 0; + + return priv->suspend_quirk(hcd); +} + static int xhci_priv_resume_quirk(struct usb_hcd *hcd) { struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); @@ -173,6 +183,8 @@ static int xhci_plat_probe(struct platform_device *pdev) struct usb_hcd *hcd; int ret; int irq; + struct xhci_plat_priv *priv = NULL; + if (usb_disabled()) return -ENODEV; @@ -264,16 +276,18 @@ static int xhci_plat_probe(struct platform_device *pdev) if (ret) goto disable_reg_clk; - priv_match = of_device_get_match_data(&pdev->dev); - if (priv_match) { - struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); + if (pdev->dev.of_node) + priv_match = of_device_get_match_data(&pdev->dev); + else + priv_match = dev_get_platdata(&pdev->dev); + if (priv_match) { + priv = hcd_to_xhci_priv(hcd); /* Just copy data for now */ - if (priv_match) - *priv = *priv_match; + *priv = *priv_match; } - device_wakeup_enable(hcd->self.controller); + device_set_wakeup_capable(&pdev->dev, true); xhci->main_hcd = hcd; xhci->shared_hcd = __usb_create_hcd(driver, sysdev, &pdev->dev, @@ -316,6 +330,9 @@ static int xhci_plat_probe(struct platform_device *pdev) hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node); xhci->shared_hcd->tpl_support = hcd->tpl_support; + if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)) + hcd->skip_phy_initialization = 1; + ret = usb_add_hcd(hcd, irq, IRQF_SHARED); if (ret) goto disable_usb_phy; @@ -397,14 +414,14 @@ static int __maybe_unused xhci_plat_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + ret = xhci_priv_suspend_quirk(hcd); + if (ret) + return ret; /* * xhci_suspend() needs `do_wakeup` to know whether host is allowed - * to do wakeup during suspend. Since xhci_plat_suspend is currently - * only designed for system suspend, device_may_wakeup() is enough - * to dertermine whether host is allowed to do wakeup. Need to - * reconsider this when xhci_plat_suspend enlarges its scope, e.g., - * also applies to runtime suspend. + * to do wakeup during suspend. */ return xhci_suspend(xhci, device_may_wakeup(dev)); } @@ -434,6 +451,11 @@ static int __maybe_unused xhci_plat_runtime_suspend(struct device *dev) { struct usb_hcd *hcd = dev_get_drvdata(dev); struct xhci_hcd *xhci = hcd_to_xhci(hcd); + int ret; + + ret = xhci_priv_suspend_quirk(hcd); + if (ret) + return ret; return xhci_suspend(xhci, true); } diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h index b49f6447bd3a..1fb149d1fbce 100644 --- a/drivers/usb/host/xhci-plat.h +++ b/drivers/usb/host/xhci-plat.h @@ -15,6 +15,7 @@ struct xhci_plat_priv { unsigned long long quirks; void (*plat_start)(struct usb_hcd *); int (*init_quirk)(struct usb_hcd *); + int (*suspend_quirk)(struct usb_hcd *); int (*resume_quirk)(struct usb_hcd *); }; diff --git a/drivers/usb/host/xhci-rcar.c b/drivers/usb/host/xhci-rcar.c index c1025d321a41..1bc4fe7b8c75 100644 --- a/drivers/usb/host/xhci-rcar.c +++ b/drivers/usb/host/xhci-rcar.c @@ -6,6 +6,7 @@ */ #include <linux/firmware.h> +#include <linux/iopoll.h> #include <linux/module.h> #include <linux/platform_device.h> #include <linux/of.h> @@ -127,8 +128,7 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd) void __iomem *regs = hcd->regs; struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd); const struct firmware *fw; - int retval, index, j, time; - int timeout = 10000; + int retval, index, j; u32 data, val, temp; u32 quirks = 0; const struct soc_device_attribute *attr; @@ -166,32 +166,19 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd) temp |= RCAR_USB3_DL_CTRL_FW_SET_DATA0; writel(temp, regs + RCAR_USB3_DL_CTRL); - for (time = 0; time < timeout; time++) { - val = readl(regs + RCAR_USB3_DL_CTRL); - if ((val & RCAR_USB3_DL_CTRL_FW_SET_DATA0) == 0) - break; - udelay(1); - } - if (time == timeout) { - retval = -ETIMEDOUT; + retval = readl_poll_timeout_atomic(regs + RCAR_USB3_DL_CTRL, + val, !(val & RCAR_USB3_DL_CTRL_FW_SET_DATA0), + 1, 10000); + if (retval < 0) break; - } } temp = readl(regs + RCAR_USB3_DL_CTRL); temp &= ~RCAR_USB3_DL_CTRL_ENABLE; writel(temp, regs + RCAR_USB3_DL_CTRL); - for (time = 0; time < timeout; time++) { - val = readl(regs + RCAR_USB3_DL_CTRL); - if (val & RCAR_USB3_DL_CTRL_FW_SUCCESS) { - retval = 0; - break; - } - udelay(1); - } - if (time == timeout) - retval = -ETIMEDOUT; + retval = readl_poll_timeout_atomic((regs + RCAR_USB3_DL_CTRL), + val, val & RCAR_USB3_DL_CTRL_FW_SUCCESS, 1, 10000); release_firmware(fw); @@ -200,18 +187,12 @@ static int xhci_rcar_download_firmware(struct usb_hcd *hcd) static bool xhci_rcar_wait_for_pll_active(struct usb_hcd *hcd) { - int timeout = 1000; + int retval; u32 val, mask = RCAR_USB3_AXH_STA_PLL_ACTIVE_MASK; - while (timeout > 0) { - val = readl(hcd->regs + RCAR_USB3_AXH_STA); - if ((val & mask) == mask) - return true; - udelay(1); - timeout--; - } - - return false; + retval = readl_poll_timeout_atomic(hcd->regs + RCAR_USB3_AXH_STA, + val, (val & mask) == mask, 1, 1000); + return !retval; } /* This function needs to initialize a "phy" of usb before */ diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c index a741a38a4c69..167dae117f73 100644 --- a/drivers/usb/host/xhci-ring.c +++ b/drivers/usb/host/xhci-ring.c @@ -3736,6 +3736,24 @@ static int xhci_get_isoc_frame_id(struct xhci_hcd *xhci, return start_frame; } +/* Check if we should generate event interrupt for a TD in an isoc URB */ +static bool trb_block_event_intr(struct xhci_hcd *xhci, int num_tds, int i) +{ + if (xhci->hci_version < 0x100) + return false; + /* always generate an event interrupt for the last TD */ + if (i == num_tds - 1) + return false; + /* + * If AVOID_BEI is set the host handles full event rings poorly, + * generate an event at least every 8th TD to clear the event ring + */ + if (i && xhci->quirks & XHCI_AVOID_BEI) + return !!(i % 8); + + return true; +} + /* This is for isoc transfer */ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, struct urb *urb, int slot_id, unsigned int ep_index) @@ -3843,10 +3861,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags, more_trbs_coming = false; td->last_trb = ep_ring->enqueue; field |= TRB_IOC; - /* set BEI, except for the last TD */ - if (xhci->hci_version >= 0x100 && - !(xhci->quirks & XHCI_AVOID_BEI) && - i < num_tds - 1) + if (trb_block_event_intr(xhci, num_tds, i)) field |= TRB_BEI; } /* Calculate TRB length */ diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c index 190923d8b246..934be1686352 100644 --- a/drivers/usb/host/xhci-tegra.c +++ b/drivers/usb/host/xhci-tegra.c @@ -1866,7 +1866,6 @@ static const struct tegra_xusb_phy_type tegra124_phy_types[] = { static const unsigned int tegra124_xusb_context_ipfs[] = { IPFS_XUSB_HOST_MSI_BAR_SZ_0, - IPFS_XUSB_HOST_MSI_BAR_SZ_0, IPFS_XUSB_HOST_MSI_AXI_BAR_ST_0, IPFS_XUSB_HOST_MSI_FPCI_BAR_ST_0, IPFS_XUSB_HOST_MSI_VEC0_0, diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c index f4cedcaee14b..482fe8c5e3b4 100644 --- a/drivers/usb/host/xhci.c +++ b/drivers/usb/host/xhci.c @@ -982,12 +982,15 @@ int xhci_suspend(struct xhci_hcd *xhci, bool do_wakeup) xhci->shared_hcd->state != HC_STATE_SUSPENDED) return -EINVAL; - xhci_dbc_suspend(xhci); - /* Clear root port wake on bits if wakeup not allowed. */ if (!do_wakeup) xhci_disable_port_wake_on_bits(xhci); + if (!HCD_HW_ACCESSIBLE(hcd)) + return 0; + + xhci_dbc_suspend(xhci); + /* Don't poll the roothubs on bus suspend. */ xhci_dbg(xhci, "%s: stopping port polling.\n", __func__); clear_bit(HCD_FLAG_POLL_RH, &hcd->flags); @@ -1915,8 +1918,6 @@ static int xhci_add_endpoint(struct usb_hcd *hcd, struct usb_device *udev, ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index); trace_xhci_add_endpoint(ep_ctx); - xhci_debugfs_create_endpoint(xhci, virt_dev, ep_index); - xhci_dbg(xhci, "add ep 0x%x, slot id %d, new drop flags = %#x, new add flags = %#x\n", (unsigned int) ep->desc.bEndpointAddress, udev->slot_id, @@ -2949,6 +2950,7 @@ static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev) xhci_check_bw_drop_ep_streams(xhci, virt_dev, i); virt_dev->eps[i].ring = virt_dev->eps[i].new_ring; virt_dev->eps[i].new_ring = NULL; + xhci_debugfs_create_endpoint(xhci, virt_dev, i); } command_cleanup: kfree(command->completion); @@ -3531,6 +3533,7 @@ static int xhci_alloc_streams(struct usb_hcd *hcd, struct usb_device *udev, xhci_dbg(xhci, "Slot %u ep ctx %u now has streams.\n", udev->slot_id, ep_index); vdev->eps[ep_index].ep_state |= EP_HAS_STREAMS; + xhci_debugfs_create_stream_files(xhci, vdev, ep_index); } xhci_free_command(xhci, config_cmd); spin_unlock_irqrestore(&xhci->lock, flags); diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h index ea1754f185a2..8be88379c0fb 100644 --- a/drivers/usb/host/xhci.h +++ b/drivers/usb/host/xhci.h @@ -1770,6 +1770,8 @@ struct xhci_hcd { /* optional clocks */ struct clk *clk; struct clk *reg_clk; + /* optional reset controller */ + struct reset_control *reset; /* data structures */ struct xhci_device_context_array *dcbaa; struct xhci_ring *cmd_ring; @@ -1874,6 +1876,7 @@ struct xhci_hcd { #define XHCI_RESET_PLL_ON_DISCONNECT BIT_ULL(34) #define XHCI_SNPS_BROKEN_SUSPEND BIT_ULL(35) #define XHCI_RENESAS_FW_QUIRK BIT_ULL(36) +#define XHCI_SKIP_PHY_INIT BIT_ULL(37) unsigned int num_active_eps; unsigned int limit_active_eps; diff --git a/drivers/usb/image/microtek.c b/drivers/usb/image/microtek.c index 360416680e82..59b02a539963 100644 --- a/drivers/usb/image/microtek.c +++ b/drivers/usb/image/microtek.c @@ -389,7 +389,7 @@ void mts_int_submit_urb (struct urb* transfer, res = usb_submit_urb( transfer, GFP_ATOMIC ); if ( unlikely(res) ) { MTS_INT_ERROR( "could not submit URB! Error was %d\n",(int)res ); - context->srb->result = DID_ERROR << 16; + set_host_byte(context->srb, DID_ERROR); mts_transfer_cleanup(transfer); } } @@ -438,7 +438,7 @@ static void mts_data_done( struct urb* transfer ) scsi_set_resid(context->srb, context->data_length - transfer->actual_length); } else if ( unlikely(status) ) { - context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; + set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR)); } mts_get_status(transfer); @@ -455,12 +455,12 @@ static void mts_command_done( struct urb *transfer ) if (status == -ENOENT) { /* We are being killed */ MTS_DEBUG_GOT_HERE(); - context->srb->result = DID_ABORT<<16; + set_host_byte(context->srb, DID_ABORT); } else { /* A genuine error has occurred */ MTS_DEBUG_GOT_HERE(); - context->srb->result = DID_ERROR<<16; + set_host_byte(context->srb, DID_ERROR); } mts_transfer_cleanup(transfer); @@ -495,7 +495,7 @@ static void mts_do_sg (struct urb* transfer) scsi_sg_count(context->srb)); if (unlikely(status)) { - context->srb->result = (status == -ENOENT ? DID_ABORT : DID_ERROR)<<16; + set_host_byte(context->srb, (status == -ENOENT ? DID_ABORT : DID_ERROR)); mts_transfer_cleanup(transfer); } @@ -578,7 +578,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback MTS_DEBUG("this device doesn't exist\n"); - srb->result = DID_BAD_TARGET << 16; + set_host_byte(srb, DID_BAD_TARGET); if(likely(callback != NULL)) callback(srb); @@ -605,7 +605,7 @@ mts_scsi_queuecommand_lck(struct scsi_cmnd *srb, mts_scsi_cmnd_callback callback if(unlikely(res)){ MTS_ERROR("error %d submitting URB\n",(int)res); - srb->result = DID_ERROR << 16; + set_host_byte(srb, DID_ERROR); if(likely(callback != NULL)) callback(srb); diff --git a/drivers/usb/isp1760/isp1760-hcd.c b/drivers/usb/isp1760/isp1760-hcd.c index dd74ab7a2f9c..33ae656c4b68 100644 --- a/drivers/usb/isp1760/isp1760-hcd.c +++ b/drivers/usb/isp1760/isp1760-hcd.c @@ -22,6 +22,7 @@ #include <linux/debugfs.h> #include <linux/uaccess.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/mm.h> #include <linux/timer.h> #include <asm/unaligned.h> @@ -380,18 +381,15 @@ static int handshake(struct usb_hcd *hcd, u32 reg, u32 mask, u32 done, int usec) { u32 result; + int ret; + + ret = readl_poll_timeout_atomic(hcd->regs + reg, result, + ((result & mask) == done || + result == U32_MAX), 1, usec); + if (result == U32_MAX) + return -ENODEV; - do { - result = reg_read32(hcd->regs, reg); - if (result == ~0) - return -ENODEV; - result &= mask; - if (result == done) - return 0; - udelay(1); - usec--; - } while (usec > 0); - return -ETIMEDOUT; + return ret; } /* reset a non-running (STS_HALT == 1) controller */ diff --git a/drivers/usb/misc/adutux.c b/drivers/usb/misc/adutux.c index a7eefe11f31a..45a387979935 100644 --- a/drivers/usb/misc/adutux.c +++ b/drivers/usb/misc/adutux.c @@ -209,6 +209,7 @@ static void adu_interrupt_out_callback(struct urb *urb) if (status != 0) { if ((status != -ENOENT) && + (status != -ESHUTDOWN) && (status != -ECONNRESET)) { dev_dbg(&dev->udev->dev, "%s :nonzero status received: %d\n", __func__, diff --git a/drivers/usb/misc/appledisplay.c b/drivers/usb/misc/appledisplay.c index 36fed1a09666..c8098e9b432e 100644 --- a/drivers/usb/misc/appledisplay.c +++ b/drivers/usb/misc/appledisplay.c @@ -342,20 +342,8 @@ static struct usb_driver appledisplay_driver = { .disconnect = appledisplay_disconnect, .id_table = appledisplay_table, }; - -static int __init appledisplay_init(void) -{ - return usb_register(&appledisplay_driver); -} - -static void __exit appledisplay_exit(void) -{ - usb_deregister(&appledisplay_driver); -} +module_usb_driver(appledisplay_driver); MODULE_AUTHOR("Michael Hanselmann"); MODULE_DESCRIPTION("Apple Cinema Display driver"); MODULE_LICENSE("GPL"); - -module_init(appledisplay_init); -module_exit(appledisplay_exit); diff --git a/drivers/usb/misc/legousbtower.c b/drivers/usb/misc/legousbtower.c index f922544056de..ba655b4af4fc 100644 --- a/drivers/usb/misc/legousbtower.c +++ b/drivers/usb/misc/legousbtower.c @@ -308,15 +308,9 @@ static int tower_open(struct inode *inode, struct file *file) int subminor; int retval = 0; struct usb_interface *interface; - struct tower_reset_reply *reset_reply; + struct tower_reset_reply reset_reply; int result; - reset_reply = kmalloc(sizeof(*reset_reply), GFP_KERNEL); - if (!reset_reply) { - retval = -ENOMEM; - goto exit; - } - nonseekable_open(inode, file); subminor = iminor(inode); @@ -347,15 +341,12 @@ static int tower_open(struct inode *inode, struct file *file) } /* reset the tower */ - result = usb_control_msg(dev->udev, - usb_rcvctrlpipe(dev->udev, 0), - LEGO_USB_TOWER_REQUEST_RESET, - USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, - 0, - 0, - reset_reply, - sizeof(*reset_reply), - 1000); + result = usb_control_msg_recv(dev->udev, 0, + LEGO_USB_TOWER_REQUEST_RESET, + USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, + 0, 0, + &reset_reply, sizeof(reset_reply), 1000, + GFP_KERNEL); if (result < 0) { dev_err(&dev->udev->dev, "LEGO USB Tower reset control request failed\n"); @@ -394,7 +385,6 @@ unlock_exit: mutex_unlock(&dev->lock); exit: - kfree(reset_reply); return retval; } @@ -753,7 +743,7 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_ struct device *idev = &interface->dev; struct usb_device *udev = interface_to_usbdev(interface); struct lego_usb_tower *dev; - struct tower_get_version_reply *get_version_reply = NULL; + struct tower_get_version_reply get_version_reply; int retval = -ENOMEM; int result; @@ -798,34 +788,25 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_ dev->interrupt_in_interval = interrupt_in_interval ? interrupt_in_interval : dev->interrupt_in_endpoint->bInterval; dev->interrupt_out_interval = interrupt_out_interval ? interrupt_out_interval : dev->interrupt_out_endpoint->bInterval; - get_version_reply = kmalloc(sizeof(*get_version_reply), GFP_KERNEL); - if (!get_version_reply) { - retval = -ENOMEM; - goto error; - } - /* get the firmware version and log it */ - result = usb_control_msg(udev, - usb_rcvctrlpipe(udev, 0), - LEGO_USB_TOWER_REQUEST_GET_VERSION, - USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, - 0, - 0, - get_version_reply, - sizeof(*get_version_reply), - 1000); - if (result != sizeof(*get_version_reply)) { - if (result >= 0) - result = -EIO; + result = usb_control_msg_recv(udev, 0, + LEGO_USB_TOWER_REQUEST_GET_VERSION, + USB_TYPE_VENDOR | USB_DIR_IN | USB_RECIP_DEVICE, + 0, + 0, + &get_version_reply, + sizeof(get_version_reply), + 1000, GFP_KERNEL); + if (!result) { dev_err(idev, "get version request failed: %d\n", result); retval = result; goto error; } dev_info(&interface->dev, "LEGO USB Tower firmware version is %d.%d build %d\n", - get_version_reply->major, - get_version_reply->minor, - le16_to_cpu(get_version_reply->build_no)); + get_version_reply.major, + get_version_reply.minor, + le16_to_cpu(get_version_reply.build_no)); /* we can register the device now, as it is ready */ usb_set_intfdata(interface, dev); @@ -844,11 +825,9 @@ static int tower_probe(struct usb_interface *interface, const struct usb_device_ USB_MAJOR, dev->minor); exit: - kfree(get_version_reply); return retval; error: - kfree(get_version_reply); tower_delete(dev); return retval; } diff --git a/drivers/usb/misc/usb3503.c b/drivers/usb/misc/usb3503.c index 116bd789e568..48099c6bf04c 100644 --- a/drivers/usb/misc/usb3503.c +++ b/drivers/usb/misc/usb3503.c @@ -322,8 +322,7 @@ static int usb3503_platform_remove(struct platform_device *pdev) return 0; } -#ifdef CONFIG_PM_SLEEP -static int usb3503_suspend(struct usb3503 *hub) +static int __maybe_unused usb3503_suspend(struct usb3503 *hub) { usb3503_switch_mode(hub, USB3503_MODE_STANDBY); clk_disable_unprepare(hub->clk); @@ -331,7 +330,7 @@ static int usb3503_suspend(struct usb3503 *hub) return 0; } -static int usb3503_resume(struct usb3503 *hub) +static int __maybe_unused usb3503_resume(struct usb3503 *hub) { clk_prepare_enable(hub->clk); usb3503_switch_mode(hub, hub->mode); @@ -339,30 +338,29 @@ static int usb3503_resume(struct usb3503 *hub) return 0; } -static int usb3503_i2c_suspend(struct device *dev) +static int __maybe_unused usb3503_i2c_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); return usb3503_suspend(i2c_get_clientdata(client)); } -static int usb3503_i2c_resume(struct device *dev) +static int __maybe_unused usb3503_i2c_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); return usb3503_resume(i2c_get_clientdata(client)); } -static int usb3503_platform_suspend(struct device *dev) +static int __maybe_unused usb3503_platform_suspend(struct device *dev) { return usb3503_suspend(dev_get_drvdata(dev)); } -static int usb3503_platform_resume(struct device *dev) +static int __maybe_unused usb3503_platform_resume(struct device *dev) { return usb3503_resume(dev_get_drvdata(dev)); } -#endif static SIMPLE_DEV_PM_OPS(usb3503_i2c_pm_ops, usb3503_i2c_suspend, usb3503_i2c_resume); @@ -388,7 +386,7 @@ MODULE_DEVICE_TABLE(of, usb3503_of_match); static struct i2c_driver usb3503_i2c_driver = { .driver = { .name = USB3503_I2C_NAME, - .pm = &usb3503_i2c_pm_ops, + .pm = pm_ptr(&usb3503_i2c_pm_ops), .of_match_table = of_match_ptr(usb3503_of_match), }, .probe = usb3503_i2c_probe, @@ -400,7 +398,7 @@ static struct platform_driver usb3503_platform_driver = { .driver = { .name = USB3503_I2C_NAME, .of_match_table = of_match_ptr(usb3503_of_match), - .pm = &usb3503_platform_pm_ops, + .pm = pm_ptr(&usb3503_platform_pm_ops), }, .probe = usb3503_platform_probe, .remove = usb3503_platform_remove, diff --git a/drivers/usb/misc/usb4604.c b/drivers/usb/misc/usb4604.c index 1b4de651e697..2142af9bbdec 100644 --- a/drivers/usb/misc/usb4604.c +++ b/drivers/usb/misc/usb4604.c @@ -112,8 +112,7 @@ static int usb4604_i2c_probe(struct i2c_client *i2c, return usb4604_probe(hub); } -#ifdef CONFIG_PM_SLEEP -static int usb4604_i2c_suspend(struct device *dev) +static int __maybe_unused usb4604_i2c_suspend(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct usb4604 *hub = i2c_get_clientdata(client); @@ -123,7 +122,7 @@ static int usb4604_i2c_suspend(struct device *dev) return 0; } -static int usb4604_i2c_resume(struct device *dev) +static int __maybe_unused usb4604_i2c_resume(struct device *dev) { struct i2c_client *client = to_i2c_client(dev); struct usb4604 *hub = i2c_get_clientdata(client); @@ -132,7 +131,6 @@ static int usb4604_i2c_resume(struct device *dev) return 0; } -#endif static SIMPLE_DEV_PM_OPS(usb4604_i2c_pm_ops, usb4604_i2c_suspend, usb4604_i2c_resume); @@ -154,7 +152,7 @@ MODULE_DEVICE_TABLE(of, usb4604_of_match); static struct i2c_driver usb4604_i2c_driver = { .driver = { .name = "usb4604", - .pm = &usb4604_i2c_pm_ops, + .pm = pm_ptr(&usb4604_i2c_pm_ops), .of_match_table = of_match_ptr(usb4604_of_match), }, .probe = usb4604_i2c_probe, diff --git a/drivers/usb/misc/usblcd.c b/drivers/usb/misc/usblcd.c index 61e9e987fe4a..bb546f624a45 100644 --- a/drivers/usb/misc/usblcd.c +++ b/drivers/usb/misc/usblcd.c @@ -187,7 +187,6 @@ static long lcd_ioctl(struct file *file, unsigned int cmd, unsigned long arg) break; default: return -ENOTTY; - break; } return 0; diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c index b2e09883c7e2..e3165d79b5f6 100644 --- a/drivers/usb/misc/yurex.c +++ b/drivers/usb/misc/yurex.c @@ -96,15 +96,13 @@ static void yurex_delete(struct kref *kref) if (dev->cntl_urb) { usb_kill_urb(dev->cntl_urb); kfree(dev->cntl_req); - if (dev->cntl_buffer) - usb_free_coherent(dev->udev, YUREX_BUF_SIZE, + usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->cntl_buffer, dev->cntl_urb->transfer_dma); usb_free_urb(dev->cntl_urb); } if (dev->urb) { usb_kill_urb(dev->urb); - if (dev->int_buffer) - usb_free_coherent(dev->udev, YUREX_BUF_SIZE, + usb_free_coherent(dev->udev, YUREX_BUF_SIZE, dev->int_buffer, dev->urb->transfer_dma); usb_free_urb(dev->urb); } diff --git a/drivers/usb/mtu3/mtu3.h b/drivers/usb/mtu3/mtu3.h index 71f4f02c05c6..aef0a0bba25a 100644 --- a/drivers/usb/mtu3/mtu3.h +++ b/drivers/usb/mtu3/mtu3.h @@ -370,12 +370,6 @@ static inline struct mtu3 *gadget_to_mtu3(struct usb_gadget *g) return container_of(g, struct mtu3, g); } -static inline int is_first_entry(const struct list_head *list, - const struct list_head *head) -{ - return list_is_last(head, list); -} - static inline struct mtu3_request *to_mtu3_request(struct usb_request *req) { return req ? container_of(req, struct mtu3_request, request) : NULL; diff --git a/drivers/usb/phy/phy-ab8500-usb.c b/drivers/usb/phy/phy-ab8500-usb.c index aa4a3140394b..4c52ba96f17e 100644 --- a/drivers/usb/phy/phy-ab8500-usb.c +++ b/drivers/usb/phy/phy-ab8500-usb.c @@ -518,7 +518,7 @@ static int ab8500_usb_link_status_update(struct ab8500_usb *ab, * 3. Enable AB regulators * 4. Enable USB phy * 5. Reset the musb controller - * 6. Switch the ULPI GPIO pins to fucntion mode + * 6. Switch the ULPI GPIO pins to function mode * 7. Enable the musb Peripheral5 clock * 8. Restore MUSB context */ diff --git a/drivers/usb/phy/phy-mv-usb.c b/drivers/usb/phy/phy-mv-usb.c index ce767ecc0636..576d925af77c 100644 --- a/drivers/usb/phy/phy-mv-usb.c +++ b/drivers/usb/phy/phy-mv-usb.c @@ -8,6 +8,7 @@ #include <linux/module.h> #include <linux/kernel.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/uaccess.h> #include <linux/device.h> #include <linux/proc_fs.h> @@ -135,8 +136,8 @@ static int mv_otg_set_timer(struct mv_otg *mvotg, unsigned int id, static int mv_otg_reset(struct mv_otg *mvotg) { - unsigned int loops; u32 tmp; + int ret; /* Stop the controller */ tmp = readl(&mvotg->op_regs->usbcmd); @@ -146,15 +147,12 @@ static int mv_otg_reset(struct mv_otg *mvotg) /* Reset the controller to get default values */ writel(USBCMD_CTRL_RESET, &mvotg->op_regs->usbcmd); - loops = 500; - while (readl(&mvotg->op_regs->usbcmd) & USBCMD_CTRL_RESET) { - if (loops == 0) { - dev_err(&mvotg->pdev->dev, - "Wait for RESET completed TIMEOUT\n"); - return -ETIMEDOUT; - } - loops--; - udelay(20); + ret = readl_poll_timeout_atomic(&mvotg->op_regs->usbcmd, tmp, + (tmp & USBCMD_CTRL_RESET), 10, 10000); + if (ret < 0) { + dev_err(&mvotg->pdev->dev, + "Wait for RESET completed TIMEOUT\n"); + return ret; } writel(0x0, &mvotg->op_regs->usbintr); diff --git a/drivers/usb/phy/phy-ulpi-viewport.c b/drivers/usb/phy/phy-ulpi-viewport.c index 7a14e0e3b635..0f61e328eaef 100644 --- a/drivers/usb/phy/phy-ulpi-viewport.c +++ b/drivers/usb/phy/phy-ulpi-viewport.c @@ -7,6 +7,7 @@ #include <linux/kernel.h> #include <linux/usb.h> #include <linux/io.h> +#include <linux/iopoll.h> #include <linux/usb/otg.h> #include <linux/usb/ulpi.h> @@ -20,16 +21,9 @@ static int ulpi_viewport_wait(void __iomem *view, u32 mask) { - unsigned long usec = 2000; + u32 val; - while (usec--) { - if (!(readl(view) & mask)) - return 0; - - udelay(1); - } - - return -ETIMEDOUT; + return readl_poll_timeout_atomic(view, val, !(val & mask), 1, 2000); } static int ulpi_viewport_read(struct usb_phy *otg, u32 reg) diff --git a/drivers/usb/serial/mos7720.c b/drivers/usb/serial/mos7720.c index 2ec4eeacebc7..5eed1078fac8 100644 --- a/drivers/usb/serial/mos7720.c +++ b/drivers/usb/serial/mos7720.c @@ -282,11 +282,12 @@ static void destroy_urbtracker(struct kref *kref) * port callback had to be deferred because the disconnect mutex could not be * obtained at the time. */ -static void send_deferred_urbs(unsigned long _mos_parport) +static void send_deferred_urbs(struct tasklet_struct *t) { int ret_val; unsigned long flags; - struct mos7715_parport *mos_parport = (void *)_mos_parport; + struct mos7715_parport *mos_parport = from_tasklet(mos_parport, t, + urb_tasklet); struct urbtracker *urbtrack, *tmp; struct list_head *cursor, *next; struct device *dev; @@ -716,8 +717,7 @@ static int mos7715_parport_init(struct usb_serial *serial) INIT_LIST_HEAD(&mos_parport->deferred_urbs); usb_set_serial_data(serial, mos_parport); /* hijack private pointer */ mos_parport->serial = serial; - tasklet_init(&mos_parport->urb_tasklet, send_deferred_urbs, - (unsigned long) mos_parport); + tasklet_setup(&mos_parport->urb_tasklet, send_deferred_urbs); init_completion(&mos_parport->syncmsg_compl); /* cycle parallel port reset bit */ diff --git a/drivers/usb/storage/isd200.c b/drivers/usb/storage/isd200.c index 89f5e33a6e6d..3c76336e43bb 100644 --- a/drivers/usb/storage/isd200.c +++ b/drivers/usb/storage/isd200.c @@ -1383,7 +1383,7 @@ static int isd200_scsi_to_ata(struct scsi_cmnd *srb, struct us_data *us, ATA_CMD_MEDIA_LOCK : ATA_CMD_MEDIA_UNLOCK; isd200_srb_set_bufflen(srb, 0); } else { - usb_stor_dbg(us, " Not removeable media, just report okay\n"); + usb_stor_dbg(us, " Not removable media, just report okay\n"); srb->result = SAM_STAT_GOOD; sendToTransport = 0; } diff --git a/drivers/usb/storage/scsiglue.c b/drivers/usb/storage/scsiglue.c index e5a971b83e3f..560efd1479ba 100644 --- a/drivers/usb/storage/scsiglue.c +++ b/drivers/usb/storage/scsiglue.c @@ -92,7 +92,7 @@ static int slave_alloc (struct scsi_device *sdev) static int slave_configure(struct scsi_device *sdev) { struct us_data *us = host_to_us(sdev->host); - struct device *dev = us->pusb_dev->bus->sysdev; + struct device *dev = sdev->host->dma_dev; /* * Many devices have trouble transferring more than 32KB at a time, diff --git a/drivers/usb/storage/uas.c b/drivers/usb/storage/uas.c index 8183504e3abb..c8a577309e8f 100644 --- a/drivers/usb/storage/uas.c +++ b/drivers/usb/storage/uas.c @@ -279,17 +279,17 @@ static bool uas_evaluate_response_iu(struct response_iu *riu, struct scsi_cmnd * switch (response_code) { case RC_INCORRECT_LUN: - cmnd->result = DID_BAD_TARGET << 16; + set_host_byte(cmnd, DID_BAD_TARGET); break; case RC_TMF_SUCCEEDED: - cmnd->result = DID_OK << 16; + set_host_byte(cmnd, DID_OK); break; case RC_TMF_NOT_SUPPORTED: - cmnd->result = DID_TARGET_FAILURE << 16; + set_host_byte(cmnd, DID_TARGET_FAILURE); break; default: uas_log_cmd_state(cmnd, "response iu", response_code); - cmnd->result = DID_ERROR << 16; + set_host_byte(cmnd, DID_ERROR); break; } @@ -660,7 +660,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, spin_lock_irqsave(&devinfo->lock, flags); if (devinfo->resetting) { - cmnd->result = DID_ERROR << 16; + set_host_byte(cmnd, DID_ERROR); cmnd->scsi_done(cmnd); goto zombie; } @@ -704,7 +704,7 @@ static int uas_queuecommand_lck(struct scsi_cmnd *cmnd, * of queueing, no matter how fatal the error */ if (err == -ENODEV) { - cmnd->result = DID_ERROR << 16; + set_host_byte(cmnd, DID_ERROR); cmnd->scsi_done(cmnd); goto zombie; } @@ -837,17 +837,24 @@ static int uas_slave_alloc(struct scsi_device *sdev) */ blk_queue_update_dma_alignment(sdev->request_queue, (512 - 1)); - if (devinfo->flags & US_FL_MAX_SECTORS_64) - blk_queue_max_hw_sectors(sdev->request_queue, 64); - else if (devinfo->flags & US_FL_MAX_SECTORS_240) - blk_queue_max_hw_sectors(sdev->request_queue, 240); - return 0; } static int uas_slave_configure(struct scsi_device *sdev) { struct uas_dev_info *devinfo = sdev->hostdata; + struct device *dev = sdev->host->dma_dev; + + if (devinfo->flags & US_FL_MAX_SECTORS_64) + blk_queue_max_hw_sectors(sdev->request_queue, 64); + else if (devinfo->flags & US_FL_MAX_SECTORS_240) + blk_queue_max_hw_sectors(sdev->request_queue, 240); + else if (devinfo->udev->speed >= USB_SPEED_SUPER) + blk_queue_max_hw_sectors(sdev->request_queue, 2048); + + blk_queue_max_hw_sectors(sdev->request_queue, + min_t(size_t, queue_max_hw_sectors(sdev->request_queue), + dma_max_mapping_size(dev) >> SECTOR_SHIFT)); if (devinfo->flags & US_FL_NO_REPORT_OPCODES) sdev->no_report_opcodes = 1; @@ -1033,7 +1040,7 @@ static int uas_probe(struct usb_interface *intf, const struct usb_device_id *id) shost->can_queue = devinfo->qdepth - 2; usb_set_intfdata(intf, shost); - result = scsi_add_host(shost, &intf->dev); + result = scsi_add_host_with_dma(shost, &intf->dev, udev->bus->sysdev); if (result) goto free_streams; diff --git a/drivers/usb/storage/usb.c b/drivers/usb/storage/usb.c index 94a64729dc27..c2ef367cf257 100644 --- a/drivers/usb/storage/usb.c +++ b/drivers/usb/storage/usb.c @@ -1049,8 +1049,9 @@ int usb_stor_probe2(struct us_data *us) goto BadDevice; usb_autopm_get_interface_no_resume(us->pusb_intf); snprintf(us->scsi_name, sizeof(us->scsi_name), "usb-storage %s", - dev_name(&us->pusb_intf->dev)); - result = scsi_add_host(us_to_host(us), dev); + dev_name(dev)); + result = scsi_add_host_with_dma(us_to_host(us), dev, + us->pusb_dev->bus->sysdev); if (result) { dev_warn(dev, "Unable to add the scsi host\n"); diff --git a/drivers/usb/typec/Kconfig b/drivers/usb/typec/Kconfig index 559dd06117e7..eee8536ae600 100644 --- a/drivers/usb/typec/Kconfig +++ b/drivers/usb/typec/Kconfig @@ -73,6 +73,18 @@ config TYPEC_TPS6598X If you choose to build this driver as a dynamically linked module, the module will be called tps6598x.ko. +config TYPEC_STUSB160X + tristate "STMicroelectronics STUSB160x Type-C controller driver" + depends on I2C + depends on REGMAP_I2C + depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH + help + Say Y or M here if your system has STMicroelectronics STUSB160x + Type-C port controller. + + If you choose to build this driver as a dynamically linked module, the + module will be called stusb160x.ko. + source "drivers/usb/typec/mux/Kconfig" source "drivers/usb/typec/altmodes/Kconfig" diff --git a/drivers/usb/typec/Makefile b/drivers/usb/typec/Makefile index 7753a5c3cd46..671bc2d3cd6a 100644 --- a/drivers/usb/typec/Makefile +++ b/drivers/usb/typec/Makefile @@ -6,4 +6,5 @@ obj-$(CONFIG_TYPEC_TCPM) += tcpm/ obj-$(CONFIG_TYPEC_UCSI) += ucsi/ obj-$(CONFIG_TYPEC_HD3SS3220) += hd3ss3220.o obj-$(CONFIG_TYPEC_TPS6598X) += tps6598x.o +obj-$(CONFIG_TYPEC_STUSB160X) += stusb160x.o obj-$(CONFIG_TYPEC) += mux/ diff --git a/drivers/usb/typec/altmodes/displayport.c b/drivers/usb/typec/altmodes/displayport.c index 7b20073d7fc0..e62e5e3da01e 100644 --- a/drivers/usb/typec/altmodes/displayport.c +++ b/drivers/usb/typec/altmodes/displayport.c @@ -190,7 +190,7 @@ static void dp_altmode_work(struct work_struct *work) switch (dp->state) { case DP_STATE_ENTER: ret = typec_altmode_enter(dp->alt, NULL); - if (ret) + if (ret && ret != -EBUSY) dev_err(&dp->alt->dev, "failed to enter mode\n"); break; case DP_STATE_UPDATE: diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c index 02655694f200..35eec707cb51 100644 --- a/drivers/usb/typec/class.c +++ b/drivers/usb/typec/class.c @@ -1449,6 +1449,21 @@ void typec_set_pwr_opmode(struct typec_port *port, EXPORT_SYMBOL_GPL(typec_set_pwr_opmode); /** + * typec_find_pwr_opmode - Get the typec power operation mode capability + * @name: power operation mode string + * + * This routine is used to find the typec_pwr_opmode by its string @name. + * + * Returns typec_pwr_opmode if success, otherwise negative error code. + */ +int typec_find_pwr_opmode(const char *name) +{ + return match_string(typec_pwr_opmodes, + ARRAY_SIZE(typec_pwr_opmodes), name); +} +EXPORT_SYMBOL_GPL(typec_find_pwr_opmode); + +/** * typec_find_orientation - Convert orientation string to enum typec_orientation * @name: Orientation string * diff --git a/drivers/usb/typec/hd3ss3220.c b/drivers/usb/typec/hd3ss3220.c index 323dfa8160ab..f633ec15b1a1 100644 --- a/drivers/usb/typec/hd3ss3220.c +++ b/drivers/usb/typec/hd3ss3220.c @@ -155,7 +155,7 @@ static int hd3ss3220_probe(struct i2c_client *client, { struct typec_capability typec_cap = { }; struct hd3ss3220 *hd3ss3220; - struct fwnode_handle *connector; + struct fwnode_handle *connector, *ep; int ret; unsigned int data; @@ -173,11 +173,21 @@ static int hd3ss3220_probe(struct i2c_client *client, hd3ss3220_set_source_pref(hd3ss3220, HD3SS3220_REG_GEN_CTRL_SRC_PREF_DRP_DEFAULT); + /* For backward compatibility check the connector child node first */ connector = device_get_named_child_node(hd3ss3220->dev, "connector"); - if (!connector) - return -ENODEV; + if (connector) { + hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector); + } else { + ep = fwnode_graph_get_next_endpoint(dev_fwnode(hd3ss3220->dev), NULL); + if (!ep) + return -ENODEV; + connector = fwnode_graph_get_remote_port_parent(ep); + fwnode_handle_put(ep); + if (!connector) + return -ENODEV; + hd3ss3220->role_sw = usb_role_switch_get(hd3ss3220->dev); + } - hd3ss3220->role_sw = fwnode_usb_role_switch_get(connector); if (IS_ERR(hd3ss3220->role_sw)) { ret = PTR_ERR(hd3ss3220->role_sw); goto err_put_fwnode; diff --git a/drivers/usb/typec/mux/Kconfig b/drivers/usb/typec/mux/Kconfig index a4dbd11f8ee2..edead555835e 100644 --- a/drivers/usb/typec/mux/Kconfig +++ b/drivers/usb/typec/mux/Kconfig @@ -11,6 +11,7 @@ config TYPEC_MUX_PI3USB30532 config TYPEC_MUX_INTEL_PMC tristate "Intel PMC mux control" + depends on ACPI depends on INTEL_SCU_IPC select USB_ROLE_SWITCH help diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c index 676b525c2a66..d7f63b74c6b1 100644 --- a/drivers/usb/typec/mux/intel_pmc_mux.c +++ b/drivers/usb/typec/mux/intel_pmc_mux.c @@ -80,10 +80,48 @@ enum { #define PMC_USB_DP_HPD_LVL BIT(4) #define PMC_USB_DP_HPD_IRQ BIT(5) +/* + * Input Output Manager (IOM) PORT STATUS + */ +#define IOM_PORT_STATUS_OFFSET 0x560 + +#define IOM_PORT_STATUS_ACTIVITY_TYPE_MASK GENMASK(9, 6) +#define IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT 6 +#define IOM_PORT_STATUS_ACTIVITY_TYPE_USB 0x03 +/* activity type: Safe Mode */ +#define IOM_PORT_STATUS_ACTIVITY_TYPE_SAFE_MODE 0x04 +/* activity type: Display Port */ +#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP 0x05 +/* activity type: Display Port Multi Function Device */ +#define IOM_PORT_STATUS_ACTIVITY_TYPE_DP_MFD 0x06 +/* activity type: Thunderbolt */ +#define IOM_PORT_STATUS_ACTIVITY_TYPE_TBT 0x07 +#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_USB 0x0c +#define IOM_PORT_STATUS_ACTIVITY_TYPE_ALT_MODE_TBT_USB 0x0d +/* Upstream Facing Port Information */ +#define IOM_PORT_STATUS_UFP BIT(10) +/* Display Port Hot Plug Detect status */ +#define IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK GENMASK(13, 12) +#define IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT 12 +#define IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT 0x01 +#define IOM_PORT_STATUS_DHPD_HPD_SOURCE_TBT BIT(14) +#define IOM_PORT_STATUS_CONNECTED BIT(31) + +#define IOM_PORT_ACTIVITY_IS(_status_, _type_) \ + ((((_status_) & IOM_PORT_STATUS_ACTIVITY_TYPE_MASK) >> \ + IOM_PORT_STATUS_ACTIVITY_TYPE_SHIFT) == \ + (IOM_PORT_STATUS_ACTIVITY_TYPE_##_type_)) + +#define IOM_PORT_HPD_ASSERTED(_status_) \ + ((((_status_) & IOM_PORT_STATUS_DHPD_HPD_STATUS_MASK) >> \ + IOM_PORT_STATUS_DHPD_HPD_STATUS_SHIFT) & \ + IOM_PORT_STATUS_DHPD_HPD_STATUS_ASSERT) + struct pmc_usb; struct pmc_usb_port { int num; + u32 iom_status; struct pmc_usb *pmc; struct typec_mux *typec_mux; struct typec_switch *typec_sw; @@ -104,8 +142,21 @@ struct pmc_usb { struct device *dev; struct intel_scu_ipc_dev *ipc; struct pmc_usb_port *port; + struct acpi_device *iom_adev; + void __iomem *iom_base; }; +static void update_port_status(struct pmc_usb_port *port) +{ + u8 port_num; + + /* SoC expects the USB Type-C port numbers to start with 0 */ + port_num = port->usb3_port - 1; + + port->iom_status = readl(port->pmc->iom_base + IOM_PORT_STATUS_OFFSET + + port_num * sizeof(u32)); +} + static int sbu_orientation(struct pmc_usb_port *port) { if (port->sbu_orientation) @@ -148,18 +199,17 @@ static int pmc_usb_command(struct pmc_usb_port *port, u8 *msg, u32 len) } static int -pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_mux_state *state) +pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp) { - struct typec_displayport_data *data = state->data; u8 msg[2] = { }; msg[0] = PMC_USB_DP_HPD; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; - if (data->status & DP_STATUS_IRQ_HPD) + if (dp->status & DP_STATUS_IRQ_HPD) msg[1] = PMC_USB_DP_HPD_IRQ; - if (data->status & DP_STATUS_HPD_STATE) + if (dp->status & DP_STATUS_HPD_STATE) msg[1] |= PMC_USB_DP_HPD_LVL; return pmc_usb_command(port, msg, sizeof(msg)); @@ -172,8 +222,15 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) struct altmode_req req = { }; int ret; - if (data->status & DP_STATUS_IRQ_HPD) - return pmc_usb_mux_dp_hpd(port, state); + if (IOM_PORT_ACTIVITY_IS(port->iom_status, DP) || + IOM_PORT_ACTIVITY_IS(port->iom_status, DP_MFD)) { + if (IOM_PORT_HPD_ASSERTED(port->iom_status) && + (!(data->status & DP_STATUS_IRQ_HPD) && + data->status & DP_STATUS_HPD_STATE)) + return 0; + + return pmc_usb_mux_dp_hpd(port, state->data); + } req.usage = PMC_USB_ALT_MODE; req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; @@ -189,8 +246,8 @@ pmc_usb_mux_dp(struct pmc_usb_port *port, struct typec_mux_state *state) if (ret) return ret; - if (data->status & DP_STATUS_HPD_STATE) - return pmc_usb_mux_dp_hpd(port, state); + if (data->status & (DP_STATUS_IRQ_HPD | DP_STATUS_HPD_STATE)) + return pmc_usb_mux_dp_hpd(port, state->data); return 0; } @@ -202,6 +259,10 @@ pmc_usb_mux_tbt(struct pmc_usb_port *port, struct typec_mux_state *state) u8 cable_speed = TBT_CABLE_SPEED(data->cable_mode); struct altmode_req req = { }; + if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) || + IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) + return 0; + req.usage = PMC_USB_ALT_MODE; req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT; @@ -233,6 +294,10 @@ pmc_usb_mux_usb4(struct pmc_usb_port *port, struct typec_mux_state *state) struct altmode_req req = { }; u8 cable_speed; + if (IOM_PORT_ACTIVITY_IS(port->iom_status, TBT) || + IOM_PORT_ACTIVITY_IS(port->iom_status, ALT_MODE_TBT_USB)) + return 0; + req.usage = PMC_USB_ALT_MODE; req.usage |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; req.mode_type = PMC_USB_MODE_TYPE_TBT << PMC_USB_MODE_TYPE_SHIFT; @@ -267,34 +332,61 @@ static int pmc_usb_mux_safe_state(struct pmc_usb_port *port) { u8 msg; + if (IOM_PORT_ACTIVITY_IS(port->iom_status, SAFE_MODE)) + return 0; + msg = PMC_USB_SAFE_MODE; msg |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; return pmc_usb_command(port, &msg, sizeof(msg)); } -static int pmc_usb_connect(struct pmc_usb_port *port) +static int pmc_usb_disconnect(struct pmc_usb_port *port) { + struct typec_displayport_data data = { }; u8 msg[2]; - msg[0] = PMC_USB_CONNECT; + if (!(port->iom_status & IOM_PORT_STATUS_CONNECTED)) + return 0; + + /* Clear DisplayPort HPD if it's still asserted. */ + if (IOM_PORT_HPD_ASSERTED(port->iom_status)) + pmc_usb_mux_dp_hpd(port, &data); + + msg[0] = PMC_USB_DISCONNECT; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT; - msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT; - msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT; return pmc_usb_command(port, msg, sizeof(msg)); } -static int pmc_usb_disconnect(struct pmc_usb_port *port) +static int pmc_usb_connect(struct pmc_usb_port *port, enum usb_role role) { + u8 ufp = role == USB_ROLE_DEVICE ? 1 : 0; u8 msg[2]; + int ret; - msg[0] = PMC_USB_DISCONNECT; + if (port->orientation == TYPEC_ORIENTATION_NONE) + return -EINVAL; + + if (port->iom_status & IOM_PORT_STATUS_CONNECTED) { + if (port->role == role || port->role == USB_ROLE_NONE) + return 0; + + /* Role swap */ + ret = pmc_usb_disconnect(port); + if (ret) + return ret; + } + + msg[0] = PMC_USB_CONNECT; msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT; msg[1] = port->usb2_port << PMC_USB_MSG_USB2_PORT_SHIFT; + msg[1] |= ufp << PMC_USB_MSG_UFP_SHIFT; + msg[1] |= hsl_orientation(port) << PMC_USB_MSG_ORI_HSL_SHIFT; + msg[1] |= sbu_orientation(port) << PMC_USB_MSG_ORI_AUX_SHIFT; return pmc_usb_command(port, msg, sizeof(msg)); } @@ -304,13 +396,15 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state) { struct pmc_usb_port *port = typec_mux_get_drvdata(mux); + update_port_status(port); + if (port->orientation == TYPEC_ORIENTATION_NONE || port->role == USB_ROLE_NONE) return 0; if (state->mode == TYPEC_STATE_SAFE) return pmc_usb_mux_safe_state(port); if (state->mode == TYPEC_STATE_USB) - return pmc_usb_connect(port); + return pmc_usb_connect(port, port->role); if (state->alt) { switch (state->alt->svid) { @@ -325,7 +419,7 @@ pmc_usb_mux_set(struct typec_mux *mux, struct typec_mux_state *state) /* REVISIT: Try with usb3_port set to 0? */ break; case TYPEC_MODE_USB3: - return pmc_usb_connect(port); + return pmc_usb_connect(port, port->role); case TYPEC_MODE_USB4: return pmc_usb_mux_usb4(port, state); } @@ -339,38 +433,28 @@ static int pmc_usb_set_orientation(struct typec_switch *sw, { struct pmc_usb_port *port = typec_switch_get_drvdata(sw); - if (port->orientation == orientation) - return 0; + update_port_status(port); port->orientation = orientation; - if (port->role) { - if (orientation == TYPEC_ORIENTATION_NONE) - return pmc_usb_disconnect(port); - else - return pmc_usb_connect(port); - } - return 0; } static int pmc_usb_set_role(struct usb_role_switch *sw, enum usb_role role) { struct pmc_usb_port *port = usb_role_switch_get_drvdata(sw); + int ret; - if (port->role == role) - return 0; + update_port_status(port); - port->role = role; + if (role == USB_ROLE_NONE) + ret = pmc_usb_disconnect(port); + else + ret = pmc_usb_connect(port, role); - if (port->orientation) { - if (role == USB_ROLE_NONE) - return pmc_usb_disconnect(port); - else - return pmc_usb_connect(port); - } + port->role = role; - return 0; + return ret; } static int pmc_usb_register_port(struct pmc_usb *pmc, int index, @@ -444,6 +528,45 @@ err_unregister_switch: return ret; } +static int is_memory(struct acpi_resource *res, void *data) +{ + struct resource r; + + return !acpi_dev_resource_memory(res, &r); +} + +static int pmc_usb_probe_iom(struct pmc_usb *pmc) +{ + struct list_head resource_list; + struct resource_entry *rentry; + struct acpi_device *adev; + int ret; + + adev = acpi_dev_get_first_match_dev("INTC1072", NULL, -1); + if (!adev) + return -ENODEV; + + INIT_LIST_HEAD(&resource_list); + ret = acpi_dev_get_resources(adev, &resource_list, is_memory, NULL); + if (ret < 0) + return ret; + + rentry = list_first_entry_or_null(&resource_list, struct resource_entry, node); + if (rentry) + pmc->iom_base = devm_ioremap_resource(pmc->dev, rentry->res); + + acpi_dev_free_resource_list(&resource_list); + + if (!pmc->iom_base) { + put_device(&adev->dev); + return -ENOMEM; + } + + pmc->iom_adev = adev; + + return 0; +} + static int pmc_usb_probe(struct platform_device *pdev) { struct fwnode_handle *fwnode = NULL; @@ -458,6 +581,12 @@ static int pmc_usb_probe(struct platform_device *pdev) device_for_each_child_node(&pdev->dev, fwnode) pmc->num_ports++; + /* The IOM microcontroller has a limitation of max 4 ports. */ + if (pmc->num_ports > 4) { + dev_err(&pdev->dev, "driver limited to 4 ports\n"); + return -ERANGE; + } + pmc->port = devm_kcalloc(&pdev->dev, pmc->num_ports, sizeof(struct pmc_usb_port), GFP_KERNEL); if (!pmc->port) @@ -469,6 +598,10 @@ static int pmc_usb_probe(struct platform_device *pdev) pmc->dev = &pdev->dev; + ret = pmc_usb_probe_iom(pmc); + if (ret) + return ret; + /* * For every physical USB connector (USB2 and USB3 combo) there is a * child ACPI device node under the PMC mux ACPI device object. @@ -494,6 +627,8 @@ err_remove_ports: usb_role_switch_unregister(pmc->port[i].usb_sw); } + put_device(&pmc->iom_adev->dev); + return ret; } @@ -508,6 +643,8 @@ static int pmc_usb_remove(struct platform_device *pdev) usb_role_switch_unregister(pmc->port[i].usb_sw); } + put_device(&pmc->iom_adev->dev); + return 0; } diff --git a/drivers/usb/typec/stusb160x.c b/drivers/usb/typec/stusb160x.c new file mode 100644 index 000000000000..ce0bd7b3ad88 --- /dev/null +++ b/drivers/usb/typec/stusb160x.c @@ -0,0 +1,875 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * STMicroelectronics STUSB160x Type-C controller family driver + * + * Copyright (C) 2020, STMicroelectronics + * Author(s): Amelie Delaunay <amelie.delaunay@st.com> + */ + +#include <linux/bitfield.h> +#include <linux/i2c.h> +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/regmap.h> +#include <linux/regulator/consumer.h> +#include <linux/usb/role.h> +#include <linux/usb/typec.h> + +#define STUSB160X_ALERT_STATUS 0x0B /* RC */ +#define STUSB160X_ALERT_STATUS_MASK_CTRL 0x0C /* RW */ +#define STUSB160X_CC_CONNECTION_STATUS_TRANS 0x0D /* RC */ +#define STUSB160X_CC_CONNECTION_STATUS 0x0E /* RO */ +#define STUSB160X_MONITORING_STATUS_TRANS 0x0F /* RC */ +#define STUSB160X_MONITORING_STATUS 0x10 /* RO */ +#define STUSB160X_CC_OPERATION_STATUS 0x11 /* RO */ +#define STUSB160X_HW_FAULT_STATUS_TRANS 0x12 /* RC */ +#define STUSB160X_HW_FAULT_STATUS 0x13 /* RO */ +#define STUSB160X_CC_CAPABILITY_CTRL 0x18 /* RW */ +#define STUSB160X_CC_VCONN_SWITCH_CTRL 0x1E /* RW */ +#define STUSB160X_VCONN_MONITORING_CTRL 0x20 /* RW */ +#define STUSB160X_VBUS_MONITORING_RANGE_CTRL 0x22 /* RW */ +#define STUSB160X_RESET_CTRL 0x23 /* RW */ +#define STUSB160X_VBUS_DISCHARGE_TIME_CTRL 0x25 /* RW */ +#define STUSB160X_VBUS_DISCHARGE_STATUS 0x26 /* RO */ +#define STUSB160X_VBUS_ENABLE_STATUS 0x27 /* RO */ +#define STUSB160X_CC_POWER_MODE_CTRL 0x28 /* RW */ +#define STUSB160X_VBUS_MONITORING_CTRL 0x2E /* RW */ +#define STUSB1600_REG_MAX 0x2F /* RO - Reserved */ + +/* STUSB160X_ALERT_STATUS/STUSB160X_ALERT_STATUS_MASK_CTRL bitfields */ +#define STUSB160X_HW_FAULT BIT(4) +#define STUSB160X_MONITORING BIT(5) +#define STUSB160X_CC_CONNECTION BIT(6) +#define STUSB160X_ALL_ALERTS GENMASK(6, 4) + +/* STUSB160X_CC_CONNECTION_STATUS_TRANS bitfields */ +#define STUSB160X_CC_ATTACH_TRANS BIT(0) + +/* STUSB160X_CC_CONNECTION_STATUS bitfields */ +#define STUSB160X_CC_ATTACH BIT(0) +#define STUSB160X_CC_VCONN_SUPPLY BIT(1) +#define STUSB160X_CC_DATA_ROLE(s) (!!((s) & BIT(2))) +#define STUSB160X_CC_POWER_ROLE(s) (!!((s) & BIT(3))) +#define STUSB160X_CC_ATTACHED_MODE GENMASK(7, 5) + +/* STUSB160X_MONITORING_STATUS_TRANS bitfields */ +#define STUSB160X_VCONN_PRESENCE_TRANS BIT(0) +#define STUSB160X_VBUS_PRESENCE_TRANS BIT(1) +#define STUSB160X_VBUS_VSAFE0V_TRANS BIT(2) +#define STUSB160X_VBUS_VALID_TRANS BIT(3) + +/* STUSB160X_MONITORING_STATUS bitfields */ +#define STUSB160X_VCONN_PRESENCE BIT(0) +#define STUSB160X_VBUS_PRESENCE BIT(1) +#define STUSB160X_VBUS_VSAFE0V BIT(2) +#define STUSB160X_VBUS_VALID BIT(3) + +/* STUSB160X_CC_OPERATION_STATUS bitfields */ +#define STUSB160X_TYPEC_FSM_STATE GENMASK(4, 0) +#define STUSB160X_SINK_POWER_STATE GENMASK(6, 5) +#define STUSB160X_CC_ATTACHED BIT(7) + +/* STUSB160X_HW_FAULT_STATUS_TRANS bitfields */ +#define STUSB160X_VCONN_SW_OVP_FAULT_TRANS BIT(0) +#define STUSB160X_VCONN_SW_OCP_FAULT_TRANS BIT(1) +#define STUSB160X_VCONN_SW_RVP_FAULT_TRANS BIT(2) +#define STUSB160X_VPU_VALID_TRANS BIT(4) +#define STUSB160X_VPU_OVP_FAULT_TRANS BIT(5) +#define STUSB160X_THERMAL_FAULT BIT(7) + +/* STUSB160X_HW_FAULT_STATUS bitfields */ +#define STUSB160X_VCONN_SW_OVP_FAULT_CC2 BIT(0) +#define STUSB160X_VCONN_SW_OVP_FAULT_CC1 BIT(1) +#define STUSB160X_VCONN_SW_OCP_FAULT_CC2 BIT(2) +#define STUSB160X_VCONN_SW_OCP_FAULT_CC1 BIT(3) +#define STUSB160X_VCONN_SW_RVP_FAULT_CC2 BIT(4) +#define STUSB160X_VCONN_SW_RVP_FAULT_CC1 BIT(5) +#define STUSB160X_VPU_VALID BIT(6) +#define STUSB160X_VPU_OVP_FAULT BIT(7) + +/* STUSB160X_CC_CAPABILITY_CTRL bitfields */ +#define STUSB160X_CC_VCONN_SUPPLY_EN BIT(0) +#define STUSB160X_CC_VCONN_DISCHARGE_EN BIT(4) +#define STUSB160X_CC_CURRENT_ADVERTISED GENMASK(7, 6) + +/* STUSB160X_VCONN_SWITCH_CTRL bitfields */ +#define STUSB160X_CC_VCONN_SWITCH_ILIM GENMASK(3, 0) + +/* STUSB160X_VCONN_MONITORING_CTRL bitfields */ +#define STUSB160X_VCONN_UVLO_THRESHOLD BIT(6) +#define STUSB160X_VCONN_MONITORING_EN BIT(7) + +/* STUSB160X_VBUS_MONITORING_RANGE_CTRL bitfields */ +#define STUSB160X_SHIFT_LOW_VBUS_LIMIT GENMASK(3, 0) +#define STUSB160X_SHIFT_HIGH_VBUS_LIMIT GENMASK(7, 4) + +/* STUSB160X_RESET_CTRL bitfields */ +#define STUSB160X_SW_RESET_EN BIT(0) + +/* STUSB160X_VBUS_DISCHARGE_TIME_CTRL bitfields */ +#define STUSBXX02_VBUS_DISCHARGE_TIME_TO_PDO GENMASK(3, 0) +#define STUSB160X_VBUS_DISCHARGE_TIME_TO_0V GENMASK(7, 4) + +/* STUSB160X_VBUS_DISCHARGE_STATUS bitfields */ +#define STUSB160X_VBUS_DISCHARGE_EN BIT(7) + +/* STUSB160X_VBUS_ENABLE_STATUS bitfields */ +#define STUSB160X_VBUS_SOURCE_EN BIT(0) +#define STUSB160X_VBUS_SINK_EN BIT(1) + +/* STUSB160X_CC_POWER_MODE_CTRL bitfields */ +#define STUSB160X_CC_POWER_MODE GENMASK(2, 0) + +/* STUSB160X_VBUS_MONITORING_CTRL bitfields */ +#define STUSB160X_VDD_UVLO_DISABLE BIT(0) +#define STUSB160X_VBUS_VSAFE0V_THRESHOLD GENMASK(2, 1) +#define STUSB160X_VBUS_RANGE_DISABLE BIT(4) +#define STUSB160X_VDD_OVLO_DISABLE BIT(6) + +enum stusb160x_pwr_mode { + SOURCE_WITH_ACCESSORY, + SINK_WITH_ACCESSORY, + SINK_WITHOUT_ACCESSORY, + DUAL_WITH_ACCESSORY, + DUAL_WITH_ACCESSORY_AND_TRY_SRC, + DUAL_WITH_ACCESSORY_AND_TRY_SNK, +}; + +enum stusb160x_attached_mode { + NO_DEVICE_ATTACHED, + SINK_ATTACHED, + SOURCE_ATTACHED, + DEBUG_ACCESSORY_ATTACHED, + AUDIO_ACCESSORY_ATTACHED, +}; + +struct stusb160x { + struct device *dev; + struct regmap *regmap; + struct regulator *vdd_supply; + struct regulator *vsys_supply; + struct regulator *vconn_supply; + struct regulator *main_supply; + + struct typec_port *port; + struct typec_capability capability; + struct typec_partner *partner; + + enum typec_port_type port_type; + enum typec_pwr_opmode pwr_opmode; + bool vbus_on; + + struct usb_role_switch *role_sw; +}; + +static bool stusb160x_reg_writeable(struct device *dev, unsigned int reg) +{ + switch (reg) { + case STUSB160X_ALERT_STATUS_MASK_CTRL: + case STUSB160X_CC_CAPABILITY_CTRL: + case STUSB160X_CC_VCONN_SWITCH_CTRL: + case STUSB160X_VCONN_MONITORING_CTRL: + case STUSB160X_VBUS_MONITORING_RANGE_CTRL: + case STUSB160X_RESET_CTRL: + case STUSB160X_VBUS_DISCHARGE_TIME_CTRL: + case STUSB160X_CC_POWER_MODE_CTRL: + case STUSB160X_VBUS_MONITORING_CTRL: + return true; + default: + return false; + } +} + +static bool stusb160x_reg_readable(struct device *dev, unsigned int reg) +{ + if (reg <= 0x0A || + (reg >= 0x14 && reg <= 0x17) || + (reg >= 0x19 && reg <= 0x1D) || + (reg >= 0x29 && reg <= 0x2D) || + (reg == 0x1F || reg == 0x21 || reg == 0x24 || reg == 0x2F)) + return false; + else + return true; +} + +static bool stusb160x_reg_volatile(struct device *dev, unsigned int reg) +{ + switch (reg) { + case STUSB160X_ALERT_STATUS: + case STUSB160X_CC_CONNECTION_STATUS_TRANS: + case STUSB160X_CC_CONNECTION_STATUS: + case STUSB160X_MONITORING_STATUS_TRANS: + case STUSB160X_MONITORING_STATUS: + case STUSB160X_CC_OPERATION_STATUS: + case STUSB160X_HW_FAULT_STATUS_TRANS: + case STUSB160X_HW_FAULT_STATUS: + case STUSB160X_VBUS_DISCHARGE_STATUS: + case STUSB160X_VBUS_ENABLE_STATUS: + return true; + default: + return false; + } +} + +static bool stusb160x_reg_precious(struct device *dev, unsigned int reg) +{ + switch (reg) { + case STUSB160X_ALERT_STATUS: + case STUSB160X_CC_CONNECTION_STATUS_TRANS: + case STUSB160X_MONITORING_STATUS_TRANS: + case STUSB160X_HW_FAULT_STATUS_TRANS: + return true; + default: + return false; + } +} + +static const struct regmap_config stusb1600_regmap_config = { + .reg_bits = 8, + .reg_stride = 1, + .val_bits = 8, + .max_register = STUSB1600_REG_MAX, + .writeable_reg = stusb160x_reg_writeable, + .readable_reg = stusb160x_reg_readable, + .volatile_reg = stusb160x_reg_volatile, + .precious_reg = stusb160x_reg_precious, + .cache_type = REGCACHE_RBTREE, +}; + +static bool stusb160x_get_vconn(struct stusb160x *chip) +{ + u32 val; + int ret; + + ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val); + if (ret) { + dev_err(chip->dev, "Unable to get Vconn status: %d\n", ret); + return false; + } + + return !!FIELD_GET(STUSB160X_CC_VCONN_SUPPLY_EN, val); +} + +static int stusb160x_set_vconn(struct stusb160x *chip, bool on) +{ + int ret; + + /* Manage VCONN input supply */ + if (chip->vconn_supply) { + if (on) { + ret = regulator_enable(chip->vconn_supply); + if (ret) { + dev_err(chip->dev, + "failed to enable vconn supply: %d\n", + ret); + return ret; + } + } else { + regulator_disable(chip->vconn_supply); + } + } + + /* Manage VCONN monitoring and power path */ + ret = regmap_update_bits(chip->regmap, STUSB160X_VCONN_MONITORING_CTRL, + STUSB160X_VCONN_MONITORING_EN, + on ? STUSB160X_VCONN_MONITORING_EN : 0); + if (ret) + goto vconn_reg_disable; + + return 0; + +vconn_reg_disable: + if (chip->vconn_supply && on) + regulator_disable(chip->vconn_supply); + + return ret; +} + +static enum typec_pwr_opmode stusb160x_get_pwr_opmode(struct stusb160x *chip) +{ + u32 val; + int ret; + + ret = regmap_read(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, &val); + if (ret) { + dev_err(chip->dev, "Unable to get pwr opmode: %d\n", ret); + return TYPEC_PWR_MODE_USB; + } + + return FIELD_GET(STUSB160X_CC_CURRENT_ADVERTISED, val); +} + +static enum typec_accessory stusb160x_get_accessory(u32 status) +{ + enum stusb160x_attached_mode mode; + + mode = FIELD_GET(STUSB160X_CC_ATTACHED_MODE, status); + + switch (mode) { + case DEBUG_ACCESSORY_ATTACHED: + return TYPEC_ACCESSORY_DEBUG; + case AUDIO_ACCESSORY_ATTACHED: + return TYPEC_ACCESSORY_AUDIO; + default: + return TYPEC_ACCESSORY_NONE; + } +} + +static enum typec_role stusb160x_get_vconn_role(u32 status) +{ + if (FIELD_GET(STUSB160X_CC_VCONN_SUPPLY, status)) + return TYPEC_SOURCE; + + return TYPEC_SINK; +} + +static void stusb160x_set_data_role(struct stusb160x *chip, + enum typec_data_role data_role, + bool attached) +{ + enum usb_role usb_role = USB_ROLE_NONE; + + if (attached) { + if (data_role == TYPEC_HOST) + usb_role = USB_ROLE_HOST; + else + usb_role = USB_ROLE_DEVICE; + } + + usb_role_switch_set_role(chip->role_sw, usb_role); + typec_set_data_role(chip->port, data_role); +} + +static int stusb160x_attach(struct stusb160x *chip, u32 status) +{ + struct typec_partner_desc desc; + int ret; + + if ((STUSB160X_CC_POWER_ROLE(status) == TYPEC_SOURCE) && + chip->vdd_supply) { + ret = regulator_enable(chip->vdd_supply); + if (ret) { + dev_err(chip->dev, + "Failed to enable Vbus supply: %d\n", ret); + return ret; + } + chip->vbus_on = true; + } + + desc.usb_pd = false; + desc.accessory = stusb160x_get_accessory(status); + desc.identity = NULL; + + chip->partner = typec_register_partner(chip->port, &desc); + if (IS_ERR(chip->partner)) { + ret = PTR_ERR(chip->partner); + goto vbus_disable; + } + + typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status)); + typec_set_pwr_opmode(chip->port, stusb160x_get_pwr_opmode(chip)); + typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status)); + stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), true); + + return 0; + +vbus_disable: + if (chip->vbus_on) { + regulator_disable(chip->vdd_supply); + chip->vbus_on = false; + } + + return ret; +} + +static void stusb160x_detach(struct stusb160x *chip, u32 status) +{ + typec_unregister_partner(chip->partner); + chip->partner = NULL; + + typec_set_pwr_role(chip->port, STUSB160X_CC_POWER_ROLE(status)); + typec_set_pwr_opmode(chip->port, TYPEC_PWR_MODE_USB); + typec_set_vconn_role(chip->port, stusb160x_get_vconn_role(status)); + stusb160x_set_data_role(chip, STUSB160X_CC_DATA_ROLE(status), false); + + if (chip->vbus_on) { + regulator_disable(chip->vdd_supply); + chip->vbus_on = false; + } +} + +static irqreturn_t stusb160x_irq_handler(int irq, void *data) +{ + struct stusb160x *chip = data; + u32 pending, trans, status; + int ret; + + ret = regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &pending); + if (ret) + goto err; + + if (pending & STUSB160X_CC_CONNECTION) { + ret = regmap_read(chip->regmap, + STUSB160X_CC_CONNECTION_STATUS_TRANS, &trans); + if (ret) + goto err; + ret = regmap_read(chip->regmap, + STUSB160X_CC_CONNECTION_STATUS, &status); + if (ret) + goto err; + + if (trans & STUSB160X_CC_ATTACH_TRANS) { + if (status & STUSB160X_CC_ATTACH) { + ret = stusb160x_attach(chip, status); + if (ret) + goto err; + } else { + stusb160x_detach(chip, status); + } + } + } +err: + return IRQ_HANDLED; +} + +static int stusb160x_irq_init(struct stusb160x *chip, int irq) +{ + u32 status; + int ret; + + ret = regmap_read(chip->regmap, + STUSB160X_CC_CONNECTION_STATUS, &status); + if (ret) + return ret; + + if (status & STUSB160X_CC_ATTACH) { + ret = stusb160x_attach(chip, status); + if (ret) + dev_err(chip->dev, "attach failed: %d\n", ret); + } + + ret = devm_request_threaded_irq(chip->dev, irq, NULL, + stusb160x_irq_handler, IRQF_ONESHOT, + dev_name(chip->dev), chip); + if (ret) + goto partner_unregister; + + /* Unmask CC_CONNECTION events */ + ret = regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL, + STUSB160X_CC_CONNECTION, 0); + if (ret) + goto partner_unregister; + + return 0; + +partner_unregister: + if (chip->partner) { + typec_unregister_partner(chip->partner); + chip->partner = NULL; + } + + return ret; +} + +static int stusb160x_chip_init(struct stusb160x *chip) +{ + u32 val; + int ret; + + /* Change the default Type-C power mode */ + if (chip->port_type == TYPEC_PORT_SRC) + ret = regmap_update_bits(chip->regmap, + STUSB160X_CC_POWER_MODE_CTRL, + STUSB160X_CC_POWER_MODE, + SOURCE_WITH_ACCESSORY); + else if (chip->port_type == TYPEC_PORT_SNK) + ret = regmap_update_bits(chip->regmap, + STUSB160X_CC_POWER_MODE_CTRL, + STUSB160X_CC_POWER_MODE, + SINK_WITH_ACCESSORY); + else /* (chip->port_type == TYPEC_PORT_DRP) */ + ret = regmap_update_bits(chip->regmap, + STUSB160X_CC_POWER_MODE_CTRL, + STUSB160X_CC_POWER_MODE, + DUAL_WITH_ACCESSORY); + if (ret) + return ret; + + if (chip->port_type == TYPEC_PORT_SNK) + goto skip_src; + + /* Change the default Type-C Source power operation mode capability */ + ret = regmap_update_bits(chip->regmap, STUSB160X_CC_CAPABILITY_CTRL, + STUSB160X_CC_CURRENT_ADVERTISED, + FIELD_PREP(STUSB160X_CC_CURRENT_ADVERTISED, + chip->pwr_opmode)); + if (ret) + return ret; + + /* Manage Type-C Source Vconn supply */ + if (stusb160x_get_vconn(chip)) { + ret = stusb160x_set_vconn(chip, true); + if (ret) + return ret; + } + +skip_src: + /* Mask all events interrupts - to be unmasked with interrupt support */ + ret = regmap_update_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL, + STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS); + if (ret) + return ret; + + /* Read status at least once to clear any stale interrupts */ + regmap_read(chip->regmap, STUSB160X_ALERT_STATUS, &val); + regmap_read(chip->regmap, STUSB160X_CC_CONNECTION_STATUS_TRANS, &val); + regmap_read(chip->regmap, STUSB160X_MONITORING_STATUS_TRANS, &val); + regmap_read(chip->regmap, STUSB160X_HW_FAULT_STATUS_TRANS, &val); + + return 0; +} + +static int stusb160x_get_fw_caps(struct stusb160x *chip, + struct fwnode_handle *fwnode) +{ + const char *cap_str; + int ret; + + chip->capability.fwnode = fwnode; + + /* + * Supported port type can be configured through device tree + * else it is read from chip registers in stusb160x_get_caps. + */ + ret = fwnode_property_read_string(fwnode, "power-role", &cap_str); + if (!ret) { + chip->port_type = typec_find_port_power_role(cap_str); + if (chip->port_type < 0) { + ret = chip->port_type; + return ret; + } + } + chip->capability.type = chip->port_type; + + /* Skip DRP/Source capabilities in case of Sink only */ + if (chip->port_type == TYPEC_PORT_SNK) + return 0; + + if (chip->port_type == TYPEC_PORT_DRP) + chip->capability.prefer_role = TYPEC_SINK; + + /* + * Supported power operation mode can be configured through device tree + * else it is read from chip registers in stusb160x_get_caps. + */ + ret = fwnode_property_read_string(fwnode, "power-opmode", &cap_str); + if (!ret) { + chip->pwr_opmode = typec_find_pwr_opmode(cap_str); + /* Power delivery not yet supported */ + if (chip->pwr_opmode < 0 || + chip->pwr_opmode == TYPEC_PWR_MODE_PD) { + ret = chip->pwr_opmode < 0 ? chip->pwr_opmode : -EINVAL; + dev_err(chip->dev, "bad power operation mode: %d\n", + chip->pwr_opmode); + return ret; + } + } + + return 0; +} + +static int stusb160x_get_caps(struct stusb160x *chip) +{ + enum typec_port_type *type = &chip->capability.type; + enum typec_port_data *data = &chip->capability.data; + enum typec_accessory *accessory = chip->capability.accessory; + u32 val; + int ret; + + chip->capability.revision = USB_TYPEC_REV_1_2; + + ret = regmap_read(chip->regmap, STUSB160X_CC_POWER_MODE_CTRL, &val); + if (ret) + return ret; + + switch (FIELD_GET(STUSB160X_CC_POWER_MODE, val)) { + case SOURCE_WITH_ACCESSORY: + *type = TYPEC_PORT_SRC; + *data = TYPEC_PORT_DFP; + *accessory++ = TYPEC_ACCESSORY_AUDIO; + *accessory++ = TYPEC_ACCESSORY_DEBUG; + break; + case SINK_WITH_ACCESSORY: + *type = TYPEC_PORT_SNK; + *data = TYPEC_PORT_UFP; + *accessory++ = TYPEC_ACCESSORY_AUDIO; + *accessory++ = TYPEC_ACCESSORY_DEBUG; + break; + case SINK_WITHOUT_ACCESSORY: + *type = TYPEC_PORT_SNK; + *data = TYPEC_PORT_UFP; + break; + case DUAL_WITH_ACCESSORY: + case DUAL_WITH_ACCESSORY_AND_TRY_SRC: + case DUAL_WITH_ACCESSORY_AND_TRY_SNK: + *type = TYPEC_PORT_DRP; + *data = TYPEC_PORT_DRD; + *accessory++ = TYPEC_ACCESSORY_AUDIO; + *accessory++ = TYPEC_ACCESSORY_DEBUG; + break; + default: + return -EINVAL; + } + + chip->port_type = *type; + chip->pwr_opmode = stusb160x_get_pwr_opmode(chip); + + return 0; +} + +static const struct of_device_id stusb160x_of_match[] = { + { .compatible = "st,stusb1600", .data = &stusb1600_regmap_config}, + {}, +}; + +static int stusb160x_probe(struct i2c_client *client) +{ + struct stusb160x *chip; + const struct of_device_id *match; + struct regmap_config *regmap_config; + struct fwnode_handle *fwnode; + int ret; + + chip = devm_kzalloc(&client->dev, sizeof(struct stusb160x), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + i2c_set_clientdata(client, chip); + + match = i2c_of_match_device(stusb160x_of_match, client); + regmap_config = (struct regmap_config *)match->data; + chip->regmap = devm_regmap_init_i2c(client, regmap_config); + if (IS_ERR(chip->regmap)) { + ret = PTR_ERR(chip->regmap); + dev_err(&client->dev, + "Failed to allocate register map:%d\n", ret); + return ret; + } + + chip->dev = &client->dev; + + chip->vsys_supply = devm_regulator_get_optional(chip->dev, "vsys"); + if (IS_ERR(chip->vsys_supply)) { + ret = PTR_ERR(chip->vsys_supply); + if (ret != -ENODEV) + return ret; + chip->vsys_supply = NULL; + } + + chip->vdd_supply = devm_regulator_get_optional(chip->dev, "vdd"); + if (IS_ERR(chip->vdd_supply)) { + ret = PTR_ERR(chip->vdd_supply); + if (ret != -ENODEV) + return ret; + chip->vdd_supply = NULL; + } + + chip->vconn_supply = devm_regulator_get_optional(chip->dev, "vconn"); + if (IS_ERR(chip->vconn_supply)) { + ret = PTR_ERR(chip->vconn_supply); + if (ret != -ENODEV) + return ret; + chip->vconn_supply = NULL; + } + + fwnode = device_get_named_child_node(chip->dev, "connector"); + if (IS_ERR(fwnode)) + return PTR_ERR(fwnode); + + /* + * When both VDD and VSYS power supplies are present, the low power + * supply VSYS is selected when VSYS voltage is above 3.1 V. + * Otherwise VDD is selected. + */ + if (chip->vdd_supply && + (!chip->vsys_supply || + (regulator_get_voltage(chip->vsys_supply) <= 3100000))) + chip->main_supply = chip->vdd_supply; + else + chip->main_supply = chip->vsys_supply; + + if (chip->main_supply) { + ret = regulator_enable(chip->main_supply); + if (ret) { + dev_err(chip->dev, + "Failed to enable main supply: %d\n", ret); + goto fwnode_put; + } + } + + /* Get configuration from chip */ + ret = stusb160x_get_caps(chip); + if (ret) { + dev_err(chip->dev, "Failed to get port caps: %d\n", ret); + goto main_reg_disable; + } + + /* Get optional re-configuration from device tree */ + ret = stusb160x_get_fw_caps(chip, fwnode); + if (ret) { + dev_err(chip->dev, "Failed to get connector caps: %d\n", ret); + goto main_reg_disable; + } + + ret = stusb160x_chip_init(chip); + if (ret) { + dev_err(chip->dev, "Failed to init port: %d\n", ret); + goto main_reg_disable; + } + + chip->port = typec_register_port(chip->dev, &chip->capability); + if (!chip->port) { + ret = -ENODEV; + goto all_reg_disable; + } + + /* + * Default power operation mode initialization: will be updated upon + * attach/detach interrupt + */ + typec_set_pwr_opmode(chip->port, chip->pwr_opmode); + + if (client->irq) { + ret = stusb160x_irq_init(chip, client->irq); + if (ret) + goto port_unregister; + + chip->role_sw = fwnode_usb_role_switch_get(fwnode); + if (IS_ERR(chip->role_sw)) { + ret = PTR_ERR(chip->role_sw); + if (ret != -EPROBE_DEFER) + dev_err(chip->dev, + "Failed to get usb role switch: %d\n", + ret); + goto port_unregister; + } + } else { + /* + * If Source or Dual power role, need to enable VDD supply + * providing Vbus if present. In case of interrupt support, + * VDD supply will be dynamically managed upon attach/detach + * interrupt. + */ + if (chip->port_type != TYPEC_PORT_SNK && chip->vdd_supply) { + ret = regulator_enable(chip->vdd_supply); + if (ret) { + dev_err(chip->dev, + "Failed to enable VDD supply: %d\n", + ret); + goto port_unregister; + } + chip->vbus_on = true; + } + } + + fwnode_handle_put(fwnode); + + return 0; + +port_unregister: + typec_unregister_port(chip->port); +all_reg_disable: + if (stusb160x_get_vconn(chip)) + stusb160x_set_vconn(chip, false); +main_reg_disable: + if (chip->main_supply) + regulator_disable(chip->main_supply); +fwnode_put: + fwnode_handle_put(fwnode); + + return ret; +} + +static int stusb160x_remove(struct i2c_client *client) +{ + struct stusb160x *chip = i2c_get_clientdata(client); + + if (chip->partner) { + typec_unregister_partner(chip->partner); + chip->partner = NULL; + } + + if (chip->vbus_on) + regulator_disable(chip->vdd_supply); + + if (chip->role_sw) + usb_role_switch_put(chip->role_sw); + + typec_unregister_port(chip->port); + + if (stusb160x_get_vconn(chip)) + stusb160x_set_vconn(chip, false); + + if (chip->main_supply) + regulator_disable(chip->main_supply); + + return 0; +} + +static int __maybe_unused stusb160x_suspend(struct device *dev) +{ + struct stusb160x *chip = dev_get_drvdata(dev); + + /* Mask interrupts */ + return regmap_update_bits(chip->regmap, + STUSB160X_ALERT_STATUS_MASK_CTRL, + STUSB160X_ALL_ALERTS, STUSB160X_ALL_ALERTS); +} + +static int __maybe_unused stusb160x_resume(struct device *dev) +{ + struct stusb160x *chip = dev_get_drvdata(dev); + u32 status; + int ret; + + ret = regcache_sync(chip->regmap); + if (ret) + return ret; + + /* Check if attach/detach occurred during low power */ + ret = regmap_read(chip->regmap, + STUSB160X_CC_CONNECTION_STATUS, &status); + if (ret) + return ret; + + if (chip->partner && !(status & STUSB160X_CC_ATTACH)) + stusb160x_detach(chip, status); + + if (!chip->partner && (status & STUSB160X_CC_ATTACH)) { + ret = stusb160x_attach(chip, status); + if (ret) + dev_err(chip->dev, "attach failed: %d\n", ret); + } + + /* Unmask interrupts */ + return regmap_write_bits(chip->regmap, STUSB160X_ALERT_STATUS_MASK_CTRL, + STUSB160X_CC_CONNECTION, 0); +} + +static SIMPLE_DEV_PM_OPS(stusb160x_pm_ops, stusb160x_suspend, stusb160x_resume); + +static struct i2c_driver stusb160x_driver = { + .driver = { + .name = "stusb160x", + .pm = &stusb160x_pm_ops, + .of_match_table = stusb160x_of_match, + }, + .probe_new = stusb160x_probe, + .remove = stusb160x_remove, +}; +module_i2c_driver(stusb160x_driver); + +MODULE_AUTHOR("Amelie Delaunay <amelie.delaunay@st.com>"); +MODULE_DESCRIPTION("STMicroelectronics STUSB160x Type-C controller driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/typec/tcpm/Kconfig b/drivers/usb/typec/tcpm/Kconfig index fa3f39336246..557f392fe24d 100644 --- a/drivers/usb/typec/tcpm/Kconfig +++ b/drivers/usb/typec/tcpm/Kconfig @@ -27,6 +27,20 @@ config TYPEC_RT1711H Type-C Port Controller Manager to provide USB PD and USB Type-C functionalities. +config TYPEC_MT6360 + tristate "Mediatek MT6360 Type-C driver" + depends on MFD_MT6360 + help + Mediatek MT6360 is a multi-functional IC that includes + USB Type-C. It works with Type-C Port Controller Manager + to provide USB PD and USB Type-C functionalities. + +config TYPEC_TCPCI_MAXIM + tristate "Maxim TCPCI based Type-C chip driver" + help + MAXIM TCPCI based Type-C/PD chip driver. Works with + with Type-C Port Controller Manager. + endif # TYPEC_TCPCI config TYPEC_FUSB302 diff --git a/drivers/usb/typec/tcpm/Makefile b/drivers/usb/typec/tcpm/Makefile index a5ff6c8eb892..7d499f3569fd 100644 --- a/drivers/usb/typec/tcpm/Makefile +++ b/drivers/usb/typec/tcpm/Makefile @@ -1,7 +1,9 @@ # SPDX-License-Identifier: GPL-2.0 -obj-$(CONFIG_TYPEC_TCPM) += tcpm.o -obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o -obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o -typec_wcove-y := wcove.o -obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o -obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o +obj-$(CONFIG_TYPEC_TCPM) += tcpm.o +obj-$(CONFIG_TYPEC_FUSB302) += fusb302.o +obj-$(CONFIG_TYPEC_WCOVE) += typec_wcove.o +typec_wcove-y := wcove.o +obj-$(CONFIG_TYPEC_TCPCI) += tcpci.o +obj-$(CONFIG_TYPEC_RT1711H) += tcpci_rt1711h.o +obj-$(CONFIG_TYPEC_MT6360) += tcpci_mt6360.o +obj-$(CONFIG_TYPEC_TCPCI_MAXIM) += tcpci_maxim.o diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c index bd80e03b2b6f..f9f0af64da5f 100644 --- a/drivers/usb/typec/tcpm/tcpci.c +++ b/drivers/usb/typec/tcpm/tcpci.c @@ -38,6 +38,12 @@ struct tcpci_chip { struct tcpci_data data; }; +struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci) +{ + return tcpci->port; +} +EXPORT_SYMBOL_GPL(tcpci_get_tcpm_port); + static inline struct tcpci *tcpc_to_tcpci(struct tcpc_dev *tcpc) { return container_of(tcpc, struct tcpci, tcpc); @@ -191,12 +197,47 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc, struct tcpci *tcpci = tcpc_to_tcpci(tcpc); unsigned int reg; int ret; + enum typec_cc_status cc1, cc2; - /* Keep the disconnect cc line open */ + /* Obtain Rp setting from role control */ ret = regmap_read(tcpci->regmap, TCPC_ROLE_CTRL, ®); if (ret < 0) return ret; + ret = tcpci_get_cc(tcpc, &cc1, &cc2); + if (ret < 0) + return ret; + + /* + * When port has drp toggling enabled, ROLE_CONTROL would only have the initial + * terminations for the toggling and does not indicate the final cc + * terminations when ConnectionResult is 0 i.e. drp toggling stops and + * the connection is resolbed. Infer port role from TCPC_CC_STATUS based on the + * terminations seen. The port role is then used to set the cc terminations. + */ + if (reg & TCPC_ROLE_CTRL_DRP) { + /* Disable DRP for the OPEN setting to take effect */ + reg = reg & ~TCPC_ROLE_CTRL_DRP; + + if (polarity == TYPEC_POLARITY_CC2) { + reg &= ~(TCPC_ROLE_CTRL_CC2_MASK << TCPC_ROLE_CTRL_CC2_SHIFT); + /* Local port is source */ + if (cc2 == TYPEC_CC_RD) + /* Role control would have the Rp setting when DRP was enabled */ + reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC2_SHIFT; + else + reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC2_SHIFT; + } else { + reg &= ~(TCPC_ROLE_CTRL_CC1_MASK << TCPC_ROLE_CTRL_CC1_SHIFT); + /* Local port is source */ + if (cc1 == TYPEC_CC_RD) + /* Role control would have the Rp setting when DRP was enabled */ + reg |= TCPC_ROLE_CTRL_CC_RP << TCPC_ROLE_CTRL_CC1_SHIFT; + else + reg |= TCPC_ROLE_CTRL_CC_RD << TCPC_ROLE_CTRL_CC1_SHIFT; + } + } + if (polarity == TYPEC_POLARITY_CC2) reg |= TCPC_ROLE_CTRL_CC_OPEN << TCPC_ROLE_CTRL_CC1_SHIFT; else @@ -227,6 +268,22 @@ static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable) enable ? TCPC_POWER_CTRL_VCONN_ENABLE : 0); } +static int tcpci_enable_frs(struct tcpc_dev *dev, bool enable) +{ + struct tcpci *tcpci = tcpc_to_tcpci(dev); + int ret; + + /* To prevent disconnect during FRS, set disconnect threshold to 3.5V */ + ret = tcpci_write16(tcpci, TCPC_VBUS_SINK_DISCONNECT_THRESH, enable ? 0 : 0x8c); + if (ret < 0) + return ret; + + ret = regmap_update_bits(tcpci->regmap, TCPC_POWER_CTRL, TCPC_FAST_ROLE_SWAP_EN, enable ? + TCPC_FAST_ROLE_SWAP_EN : 0); + + return ret; +} + static int tcpci_set_bist_data(struct tcpc_dev *tcpc, bool enable) { struct tcpci *tcpci = tcpc_to_tcpci(tcpc); @@ -287,6 +344,13 @@ static int tcpci_set_vbus(struct tcpc_dev *tcpc, bool source, bool sink) struct tcpci *tcpci = tcpc_to_tcpci(tcpc); int ret; + if (tcpci->data->set_vbus) { + ret = tcpci->data->set_vbus(tcpci, tcpci->data, source, sink); + /* Bypass when ret > 0 */ + if (ret != 0) + return ret < 0 ? ret : 0; + } + /* Disable both source and sink first before enabling anything */ if (!source) { @@ -330,23 +394,47 @@ static int tcpci_pd_transmit(struct tcpc_dev *tcpc, int ret; cnt = msg ? pd_header_cnt(header) * 4 : 0; - ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2); - if (ret < 0) - return ret; + /** + * TCPCI spec forbids direct access of TCPC_TX_DATA. + * But, since some of the chipsets offer this capability, + * it's fair to support both. + */ + if (tcpci->data->TX_BUF_BYTE_x_hidden) { + u8 buf[TCPC_TRANSMIT_BUFFER_MAX_LEN] = {0,}; + u8 pos = 0; - ret = tcpci_write16(tcpci, TCPC_TX_HDR, header); - if (ret < 0) - return ret; + /* Payload + header + TCPC_TX_BYTE_CNT */ + buf[pos++] = cnt + 2; + + if (msg) + memcpy(&buf[pos], &msg->header, sizeof(msg->header)); - if (cnt > 0) { - ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, - &msg->payload, cnt); + pos += sizeof(header); + + if (cnt > 0) + memcpy(&buf[pos], msg->payload, cnt); + + pos += cnt; + ret = regmap_raw_write(tcpci->regmap, TCPC_TX_BYTE_CNT, buf, pos); + if (ret < 0) + return ret; + } else { + ret = regmap_write(tcpci->regmap, TCPC_TX_BYTE_CNT, cnt + 2); if (ret < 0) return ret; + + ret = tcpci_write16(tcpci, TCPC_TX_HDR, header); + if (ret < 0) + return ret; + + if (cnt > 0) { + ret = regmap_raw_write(tcpci->regmap, TCPC_TX_DATA, &msg->payload, cnt); + if (ret < 0) + return ret; + } } - reg = (PD_RETRY_COUNT << TCPC_TRANSMIT_RETRY_SHIFT) | - (type << TCPC_TRANSMIT_TYPE_SHIFT); + reg = (PD_RETRY_COUNT << TCPC_TRANSMIT_RETRY_SHIFT) | (type << TCPC_TRANSMIT_TYPE_SHIFT); ret = regmap_write(tcpci->regmap, TCPC_TRANSMIT, reg); if (ret < 0) return ret; @@ -539,6 +627,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data) tcpci->tcpc.set_roles = tcpci_set_roles; tcpci->tcpc.pd_transmit = tcpci_pd_transmit; tcpci->tcpc.set_bist_data = tcpci_set_bist_data; + tcpci->tcpc.enable_frs = tcpci_enable_frs; err = tcpci_parse_config(tcpci); if (err < 0) diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h index 11c36d086c86..5ef07a56d67a 100644 --- a/drivers/usb/typec/tcpm/tcpci.h +++ b/drivers/usb/typec/tcpm/tcpci.h @@ -16,6 +16,8 @@ #define TCPC_PD_INT_REV 0xa #define TCPC_ALERT 0x10 +#define TCPC_ALERT_EXTND BIT(14) +#define TCPC_ALERT_EXTENDED_STATUS BIT(13) #define TCPC_ALERT_VBUS_DISCNCT BIT(11) #define TCPC_ALERT_RX_BUF_OVF BIT(10) #define TCPC_ALERT_FAULT BIT(9) @@ -32,6 +34,13 @@ #define TCPC_ALERT_MASK 0x12 #define TCPC_POWER_STATUS_MASK 0x14 #define TCPC_FAULT_STATUS_MASK 0x15 + +#define TCPC_EXTENDED_STATUS_MASK 0x16 +#define TCPC_EXTENDED_STATUS_MASK_VSAFE0V BIT(0) + +#define TCPC_ALERT_EXTENDED_MASK 0x17 +#define TCPC_SINK_FAST_ROLE_SWAP BIT(0) + #define TCPC_CONFIG_STD_OUTPUT 0x18 #define TCPC_TCPC_CTRL 0x19 @@ -58,6 +67,7 @@ #define TCPC_POWER_CTRL 0x1c #define TCPC_POWER_CTRL_VCONN_ENABLE BIT(0) +#define TCPC_FAST_ROLE_SWAP_EN BIT(7) #define TCPC_CC_STATUS 0x1d #define TCPC_CC_STATUS_TOGGLING BIT(5) @@ -69,11 +79,14 @@ #define TCPC_POWER_STATUS 0x1e #define TCPC_POWER_STATUS_UNINIT BIT(6) +#define TCPC_POWER_STATUS_SOURCING_VBUS BIT(4) #define TCPC_POWER_STATUS_VBUS_DET BIT(3) #define TCPC_POWER_STATUS_VBUS_PRES BIT(2) #define TCPC_FAULT_STATUS 0x1f +#define TCPC_ALERT_EXTENDED 0x21 + #define TCPC_COMMAND 0x23 #define TCPC_CMD_WAKE_I2C 0x11 #define TCPC_CMD_DISABLE_VBUS_DETECT 0x22 @@ -104,6 +117,7 @@ #define TCPC_RX_BYTE_CNT 0x30 #define TCPC_RX_BUF_FRAME_TYPE 0x31 +#define TCPC_RX_BUF_FRAME_TYPE_SOP 0 #define TCPC_RX_HDR 0x32 #define TCPC_RX_DATA 0x34 /* through 0x4f */ @@ -123,18 +137,29 @@ #define TCPC_VBUS_VOLTAGE_ALARM_HI_CFG 0x76 #define TCPC_VBUS_VOLTAGE_ALARM_LO_CFG 0x78 +/* I2C_WRITE_BYTE_COUNT + 1 when TX_BUF_BYTE_x is only accessible I2C_WRITE_BYTE_COUNT */ +#define TCPC_TRANSMIT_BUFFER_MAX_LEN 31 + +/* + * @TX_BUF_BYTE_x_hidden + * optional; Set when TX_BUF_BYTE_x can only be accessed through I2C_WRITE_BYTE_COUNT. + */ struct tcpci; struct tcpci_data { struct regmap *regmap; + unsigned char TX_BUF_BYTE_x_hidden:1; int (*init)(struct tcpci *tcpci, struct tcpci_data *data); int (*set_vconn)(struct tcpci *tcpci, struct tcpci_data *data, bool enable); int (*start_drp_toggling)(struct tcpci *tcpci, struct tcpci_data *data, enum typec_cc_status cc); + int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink); }; struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data); void tcpci_unregister_port(struct tcpci *tcpci); irqreturn_t tcpci_irq(struct tcpci *tcpci); +struct tcpm_port; +struct tcpm_port *tcpci_get_tcpm_port(struct tcpci *tcpci); #endif /* __LINUX_USB_TCPCI_H */ diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c new file mode 100644 index 000000000000..723d7dd38f75 --- /dev/null +++ b/drivers/usb/typec/tcpm/tcpci_maxim.c @@ -0,0 +1,503 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (C) 2020, Google LLC + * + * MAXIM TCPCI based TCPC driver + */ + +#include <linux/gpio.h> +#include <linux/gpio/consumer.h> +#include <linux/interrupt.h> +#include <linux/i2c.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of_gpio.h> +#include <linux/regmap.h> +#include <linux/usb/pd.h> +#include <linux/usb/tcpm.h> +#include <linux/usb/typec.h> + +#include "tcpci.h" + +#define PD_ACTIVITY_TIMEOUT_MS 10000 + +#define TCPC_VENDOR_ALERT 0x80 + +#define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0 +#define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1 +#define TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET 2 + +/* + * LongMessage not supported, hence 32 bytes for buf to be read from RECEIVE_BUFFER. + * DEVICE_CAPABILITIES_2.LongMessage = 0, the value in READABLE_BYTE_COUNT reg shall be + * less than or equal to 31. Since, RECEIVE_BUFFER len = 31 + 1(READABLE_BYTE_COUNT). + */ +#define TCPC_RECEIVE_BUFFER_LEN 32 + +#define MAX_BUCK_BOOST_SID 0x69 +#define MAX_BUCK_BOOST_OP 0xb9 +#define MAX_BUCK_BOOST_OFF 0 +#define MAX_BUCK_BOOST_SOURCE 0xa +#define MAX_BUCK_BOOST_SINK 0x5 + +struct max_tcpci_chip { + struct tcpci_data data; + struct tcpci *tcpci; + struct device *dev; + struct i2c_client *client; + struct tcpm_port *port; +}; + +static const struct regmap_range max_tcpci_tcpci_range[] = { + regmap_reg_range(0x00, 0x95) +}; + +const struct regmap_access_table max_tcpci_tcpci_write_table = { + .yes_ranges = max_tcpci_tcpci_range, + .n_yes_ranges = ARRAY_SIZE(max_tcpci_tcpci_range), +}; + +static const struct regmap_config max_tcpci_regmap_config = { + .reg_bits = 8, + .val_bits = 8, + .max_register = 0x95, + .wr_table = &max_tcpci_tcpci_write_table, +}; + +static struct max_tcpci_chip *tdata_to_max_tcpci(struct tcpci_data *tdata) +{ + return container_of(tdata, struct max_tcpci_chip, data); +} + +static int max_tcpci_read16(struct max_tcpci_chip *chip, unsigned int reg, u16 *val) +{ + return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u16)); +} + +static int max_tcpci_write16(struct max_tcpci_chip *chip, unsigned int reg, u16 val) +{ + return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u16)); +} + +static int max_tcpci_read8(struct max_tcpci_chip *chip, unsigned int reg, u8 *val) +{ + return regmap_raw_read(chip->data.regmap, reg, val, sizeof(u8)); +} + +static int max_tcpci_write8(struct max_tcpci_chip *chip, unsigned int reg, u8 val) +{ + return regmap_raw_write(chip->data.regmap, reg, &val, sizeof(u8)); +} + +static void max_tcpci_init_regs(struct max_tcpci_chip *chip) +{ + u16 alert_mask = 0; + int ret; + + ret = max_tcpci_write16(chip, TCPC_ALERT, 0xffff); + if (ret < 0) { + dev_err(chip->dev, "Error writing to TCPC_ALERT ret:%d\n", ret); + return; + } + + ret = max_tcpci_write16(chip, TCPC_VENDOR_ALERT, 0xffff); + if (ret < 0) { + dev_err(chip->dev, "Error writing to TCPC_VENDOR_ALERT ret:%d\n", ret); + return; + } + + ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, 0xff); + if (ret < 0) { + dev_err(chip->dev, "Unable to clear TCPC_ALERT_EXTENDED ret:%d\n", ret); + return; + } + + alert_mask = TCPC_ALERT_TX_SUCCESS | TCPC_ALERT_TX_DISCARDED | TCPC_ALERT_TX_FAILED | + TCPC_ALERT_RX_HARD_RST | TCPC_ALERT_RX_STATUS | TCPC_ALERT_CC_STATUS | + TCPC_ALERT_VBUS_DISCNCT | TCPC_ALERT_RX_BUF_OVF | TCPC_ALERT_POWER_STATUS | + /* Enable Extended alert for detecting Fast Role Swap Signal */ + TCPC_ALERT_EXTND; + + ret = max_tcpci_write16(chip, TCPC_ALERT_MASK, alert_mask); + if (ret < 0) { + dev_err(chip->dev, + "Error enabling TCPC_ALERT: TCPC_ALERT_MASK write failed ret:%d\n", ret); + return; + } + + /* Enable vbus voltage monitoring and voltage alerts */ + ret = max_tcpci_write8(chip, TCPC_POWER_CTRL, 0); + if (ret < 0) { + dev_err(chip->dev, "Error writing to TCPC_POWER_CTRL ret:%d\n", ret); + return; + } + + ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED_MASK, TCPC_SINK_FAST_ROLE_SWAP); + if (ret < 0) + return; +} + +static void process_rx(struct max_tcpci_chip *chip, u16 status) +{ + struct pd_message msg; + u8 count, frame_type, rx_buf[TCPC_RECEIVE_BUFFER_LEN]; + int ret, payload_index; + u8 *rx_buf_ptr; + + /* + * READABLE_BYTE_COUNT: Indicates the number of bytes in the RX_BUF_BYTE_x registers + * plus one (for the RX_BUF_FRAME_TYPE) Table 4-36. + * Read the count and frame type. + */ + ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2); + if (ret < 0) { + dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d", ret); + return; + } + + count = rx_buf[TCPC_RECEIVE_BUFFER_COUNT_OFFSET]; + frame_type = rx_buf[TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET]; + + if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) { + max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS); + dev_err(chip->dev, "%s", count == 0 ? "error: count is 0" : + "error frame_type is not SOP"); + return; + } + + if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) { + dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d", count); + return; + } + + /* + * Read count + 1 as RX_BUF_BYTE_x is hidden and can only be read through + * TCPC_RX_BYTE_CNT + */ + count += 1; + ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count); + if (ret < 0) { + dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d", ret); + return; + } + + rx_buf_ptr = rx_buf + TCPC_RECEIVE_BUFFER_RX_BYTE_BUF_OFFSET; + msg.header = cpu_to_le16(*(u16 *)rx_buf_ptr); + rx_buf_ptr = rx_buf_ptr + sizeof(msg.header); + for (payload_index = 0; payload_index < pd_header_cnt_le(msg.header); payload_index++, + rx_buf_ptr += sizeof(msg.payload[0])) + msg.payload[payload_index] = cpu_to_le32(*(u32 *)rx_buf_ptr); + + /* + * Read complete, clear RX status alert bit. + * Clear overflow as well if set. + */ + ret = max_tcpci_write16(chip, TCPC_ALERT, status & TCPC_ALERT_RX_BUF_OVF ? + TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF : + TCPC_ALERT_RX_STATUS); + if (ret < 0) + return; + + tcpm_pd_receive(chip->port, &msg); +} + +static int max_tcpci_set_vbus(struct tcpci *tcpci, struct tcpci_data *tdata, bool source, bool sink) +{ + struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata); + u8 buffer_source[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SOURCE}; + u8 buffer_sink[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_SINK}; + u8 buffer_none[2] = {MAX_BUCK_BOOST_OP, MAX_BUCK_BOOST_OFF}; + struct i2c_client *i2c = chip->client; + int ret; + + struct i2c_msg msgs[] = { + { + .addr = MAX_BUCK_BOOST_SID, + .flags = i2c->flags & I2C_M_TEN, + .len = 2, + .buf = source ? buffer_source : sink ? buffer_sink : buffer_none, + }, + }; + + if (source && sink) { + dev_err(chip->dev, "Both source and sink set\n"); + return -EINVAL; + } + + ret = i2c_transfer(i2c->adapter, msgs, 1); + + return ret < 0 ? ret : 1; +} + +static void process_power_status(struct max_tcpci_chip *chip) +{ + u8 pwr_status; + int ret; + + ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &pwr_status); + if (ret < 0) + return; + + if (pwr_status == 0xff) { + max_tcpci_init_regs(chip); + } else if (pwr_status & TCPC_POWER_STATUS_SOURCING_VBUS) { + tcpm_sourcing_vbus(chip->port); + /* + * Alawys re-enable boost here. + * In normal case, when say an headset is attached, TCPM would + * have instructed to TCPC to enable boost, so the call is a + * no-op. + * But for Fast Role Swap case, Boost turns on autonomously without + * AP intervention, but, needs AP to enable source mode explicitly + * for AP to regain control. + */ + max_tcpci_set_vbus(chip->tcpci, &chip->data, true, false); + } else { + tcpm_vbus_change(chip->port); + } +} + +static void process_tx(struct max_tcpci_chip *chip, u16 status) +{ + if (status & TCPC_ALERT_TX_SUCCESS) + tcpm_pd_transmit_complete(chip->port, TCPC_TX_SUCCESS); + else if (status & TCPC_ALERT_TX_DISCARDED) + tcpm_pd_transmit_complete(chip->port, TCPC_TX_DISCARDED); + else if (status & TCPC_ALERT_TX_FAILED) + tcpm_pd_transmit_complete(chip->port, TCPC_TX_FAILED); + + /* Reinit regs as Hard reset sets them to default value */ + if ((status & TCPC_ALERT_TX_SUCCESS) && (status & TCPC_ALERT_TX_FAILED)) + max_tcpci_init_regs(chip); +} + +static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status) +{ + u16 mask; + int ret; + u8 reg_status; + + /* + * Clear alert status for everything except RX_STATUS, which shouldn't + * be cleared until we have successfully retrieved message. + */ + if (status & ~TCPC_ALERT_RX_STATUS) { + mask = status & TCPC_ALERT_RX_BUF_OVF ? + status & ~(TCPC_ALERT_RX_STATUS | TCPC_ALERT_RX_BUF_OVF) : + status & ~TCPC_ALERT_RX_STATUS; + ret = max_tcpci_write16(chip, TCPC_ALERT, mask); + if (ret < 0) { + dev_err(chip->dev, "ALERT clear failed\n"); + return ret; + } + } + + if (status & TCPC_ALERT_RX_BUF_OVF && !(status & TCPC_ALERT_RX_STATUS)) { + ret = max_tcpci_write16(chip, TCPC_ALERT, (TCPC_ALERT_RX_STATUS | + TCPC_ALERT_RX_BUF_OVF)); + if (ret < 0) { + dev_err(chip->dev, "ALERT clear failed\n"); + return ret; + } + } + + if (status & TCPC_ALERT_EXTND) { + ret = max_tcpci_read8(chip, TCPC_ALERT_EXTENDED, ®_status); + if (ret < 0) + return ret; + + ret = max_tcpci_write8(chip, TCPC_ALERT_EXTENDED, reg_status); + if (ret < 0) + return ret; + + if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) { + dev_info(chip->dev, "FRS Signal"); + tcpm_sink_frs(chip->port); + } + } + + if (status & TCPC_ALERT_RX_STATUS) + process_rx(chip, status); + + if (status & TCPC_ALERT_VBUS_DISCNCT) + tcpm_vbus_change(chip->port); + + if (status & TCPC_ALERT_CC_STATUS) + tcpm_cc_change(chip->port); + + if (status & TCPC_ALERT_POWER_STATUS) + process_power_status(chip); + + if (status & TCPC_ALERT_RX_HARD_RST) { + tcpm_pd_hard_reset(chip->port); + max_tcpci_init_regs(chip); + } + + if (status & TCPC_ALERT_TX_SUCCESS || status & TCPC_ALERT_TX_DISCARDED || status & + TCPC_ALERT_TX_FAILED) + process_tx(chip, status); + + return IRQ_HANDLED; +} + +static irqreturn_t max_tcpci_irq(int irq, void *dev_id) +{ + struct max_tcpci_chip *chip = dev_id; + u16 status; + irqreturn_t irq_return; + int ret; + + if (!chip->port) + return IRQ_HANDLED; + + ret = max_tcpci_read16(chip, TCPC_ALERT, &status); + if (ret < 0) { + dev_err(chip->dev, "ALERT read failed\n"); + return ret; + } + while (status) { + irq_return = _max_tcpci_irq(chip, status); + /* Do not return if the ALERT is already set. */ + ret = max_tcpci_read16(chip, TCPC_ALERT, &status); + if (ret < 0) + break; + } + + return irq_return; +} + +static irqreturn_t max_tcpci_isr(int irq, void *dev_id) +{ + struct max_tcpci_chip *chip = dev_id; + + pm_wakeup_event(chip->dev, PD_ACTIVITY_TIMEOUT_MS); + + if (!chip->port) + return IRQ_HANDLED; + + return IRQ_WAKE_THREAD; +} + +static int max_tcpci_init_alert(struct max_tcpci_chip *chip, struct i2c_client *client) +{ + int ret; + + ret = devm_request_threaded_irq(chip->dev, client->irq, max_tcpci_isr, max_tcpci_irq, + (IRQF_TRIGGER_LOW | IRQF_ONESHOT), dev_name(chip->dev), + chip); + + if (ret < 0) + return ret; + + enable_irq_wake(client->irq); + return 0; +} + +static int max_tcpci_start_toggling(struct tcpci *tcpci, struct tcpci_data *tdata, + enum typec_cc_status cc) +{ + struct max_tcpci_chip *chip = tdata_to_max_tcpci(tdata); + + max_tcpci_init_regs(chip); + + return 0; +} + +static int tcpci_init(struct tcpci *tcpci, struct tcpci_data *data) +{ + /* + * Generic TCPCI overwrites the regs once this driver initializes + * them. Prevent this by returning -1. + */ + return -1; +} + +static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id *i2c_id) +{ + int ret; + struct max_tcpci_chip *chip; + u8 power_status; + + chip = devm_kzalloc(&client->dev, sizeof(*chip), GFP_KERNEL); + if (!chip) + return -ENOMEM; + + chip->client = client; + chip->data.regmap = devm_regmap_init_i2c(client, &max_tcpci_regmap_config); + if (IS_ERR(chip->data.regmap)) { + dev_err(&client->dev, "Regmap init failed\n"); + return PTR_ERR(chip->data.regmap); + } + + chip->dev = &client->dev; + i2c_set_clientdata(client, chip); + + ret = max_tcpci_read8(chip, TCPC_POWER_STATUS, &power_status); + if (ret < 0) + return ret; + + /* Chip level tcpci callbacks */ + chip->data.set_vbus = max_tcpci_set_vbus; + chip->data.start_drp_toggling = max_tcpci_start_toggling; + chip->data.TX_BUF_BYTE_x_hidden = true; + chip->data.init = tcpci_init; + + max_tcpci_init_regs(chip); + chip->tcpci = tcpci_register_port(chip->dev, &chip->data); + if (IS_ERR_OR_NULL(chip->tcpci)) { + dev_err(&client->dev, "TCPCI port registration failed"); + ret = PTR_ERR(chip->tcpci); + return PTR_ERR(chip->tcpci); + } + chip->port = tcpci_get_tcpm_port(chip->tcpci); + ret = max_tcpci_init_alert(chip, client); + if (ret < 0) + goto unreg_port; + + device_init_wakeup(chip->dev, true); + return 0; + +unreg_port: + tcpci_unregister_port(chip->tcpci); + + return ret; +} + +static int max_tcpci_remove(struct i2c_client *client) +{ + struct max_tcpci_chip *chip = i2c_get_clientdata(client); + + if (!IS_ERR_OR_NULL(chip->tcpci)) + tcpci_unregister_port(chip->tcpci); + + return 0; +} + +static const struct i2c_device_id max_tcpci_id[] = { + { "maxtcpc", 0 }, + { } +}; +MODULE_DEVICE_TABLE(i2c, max_tcpci_id); + +#ifdef CONFIG_OF +static const struct of_device_id max_tcpci_of_match[] = { + { .compatible = "maxim,tcpc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, max_tcpci_of_match); +#endif + +static struct i2c_driver max_tcpci_i2c_driver = { + .driver = { + .name = "maxtcpc", + .of_match_table = of_match_ptr(max_tcpci_of_match), + }, + .probe = max_tcpci_probe, + .remove = max_tcpci_remove, + .id_table = max_tcpci_id, +}; +module_i2c_driver(max_tcpci_i2c_driver); + +MODULE_AUTHOR("Badhri Jagan Sridharan <badhri@google.com>"); +MODULE_DESCRIPTION("Maxim TCPCI based USB Type-C Port Controller Interface Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/typec/tcpm/tcpci_mt6360.c b/drivers/usb/typec/tcpm/tcpci_mt6360.c new file mode 100644 index 000000000000..f1bd9e09bc87 --- /dev/null +++ b/drivers/usb/typec/tcpm/tcpci_mt6360.c @@ -0,0 +1,212 @@ +// SPDX-License-Identifier: GPL-2.0-only +/* + * Copyright (C) 2020 MediaTek Inc. + * + * Author: ChiYuan Huang <cy_huang@richtek.com> + */ + +#include <linux/interrupt.h> +#include <linux/kernel.h> +#include <linux/module.h> +#include <linux/of.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/usb/tcpm.h> + +#include "tcpci.h" + +#define MT6360_REG_VCONNCTRL1 0x8C +#define MT6360_REG_MODECTRL2 0x8F +#define MT6360_REG_SWRESET 0xA0 +#define MT6360_REG_DEBCTRL1 0xA1 +#define MT6360_REG_DRPCTRL1 0xA2 +#define MT6360_REG_DRPCTRL2 0xA3 +#define MT6360_REG_I2CTORST 0xBF +#define MT6360_REG_RXCTRL2 0xCF +#define MT6360_REG_CTDCTRL2 0xEC + +/* MT6360_REG_VCONNCTRL1 */ +#define MT6360_VCONNCL_ENABLE BIT(0) +/* MT6360_REG_RXCTRL2 */ +#define MT6360_OPEN40M_ENABLE BIT(7) +/* MT6360_REG_CTDCTRL2 */ +#define MT6360_RPONESHOT_ENABLE BIT(6) + +struct mt6360_tcpc_info { + struct tcpci_data tdata; + struct tcpci *tcpci; + struct device *dev; + int irq; +}; + +static inline int mt6360_tcpc_read16(struct regmap *regmap, + unsigned int reg, u16 *val) +{ + return regmap_raw_read(regmap, reg, val, sizeof(u16)); +} + +static inline int mt6360_tcpc_write16(struct regmap *regmap, + unsigned int reg, u16 val) +{ + return regmap_raw_write(regmap, reg, &val, sizeof(u16)); +} + +static int mt6360_tcpc_init(struct tcpci *tcpci, struct tcpci_data *tdata) +{ + struct regmap *regmap = tdata->regmap; + int ret; + + ret = regmap_write(regmap, MT6360_REG_SWRESET, 0x01); + if (ret) + return ret; + + /* after reset command, wait 1~2ms to wait IC action */ + usleep_range(1000, 2000); + + /* write all alert to masked */ + ret = mt6360_tcpc_write16(regmap, TCPC_ALERT_MASK, 0); + if (ret) + return ret; + + /* config I2C timeout reset enable , and timeout to 200ms */ + ret = regmap_write(regmap, MT6360_REG_I2CTORST, 0x8F); + if (ret) + return ret; + + /* config CC Detect Debounce : 26.7*val us */ + ret = regmap_write(regmap, MT6360_REG_DEBCTRL1, 0x10); + if (ret) + return ret; + + /* DRP Toggle Cycle : 51.2 + 6.4*val ms */ + ret = regmap_write(regmap, MT6360_REG_DRPCTRL1, 4); + if (ret) + return ret; + + /* DRP Duyt Ctrl : dcSRC: /1024 */ + ret = mt6360_tcpc_write16(regmap, MT6360_REG_DRPCTRL2, 330); + if (ret) + return ret; + + /* Enable VCONN Current Limit function */ + ret = regmap_update_bits(regmap, MT6360_REG_VCONNCTRL1, MT6360_VCONNCL_ENABLE, + MT6360_VCONNCL_ENABLE); + if (ret) + return ret; + + /* Enable cc open 40ms when pmic send vsysuv signal */ + ret = regmap_update_bits(regmap, MT6360_REG_RXCTRL2, MT6360_OPEN40M_ENABLE, + MT6360_OPEN40M_ENABLE); + if (ret) + return ret; + + /* Enable Rpdet oneshot detection */ + ret = regmap_update_bits(regmap, MT6360_REG_CTDCTRL2, MT6360_RPONESHOT_ENABLE, + MT6360_RPONESHOT_ENABLE); + if (ret) + return ret; + + /* Set shipping mode off, AUTOIDLE on */ + return regmap_write(regmap, MT6360_REG_MODECTRL2, 0x7A); +} + +static irqreturn_t mt6360_irq(int irq, void *dev_id) +{ + struct mt6360_tcpc_info *mti = dev_id; + + return tcpci_irq(mti->tcpci); +} + +static int mt6360_tcpc_probe(struct platform_device *pdev) +{ + struct mt6360_tcpc_info *mti; + int ret; + + mti = devm_kzalloc(&pdev->dev, sizeof(*mti), GFP_KERNEL); + if (!mti) + return -ENOMEM; + + mti->dev = &pdev->dev; + + mti->tdata.regmap = dev_get_regmap(pdev->dev.parent, NULL); + if (!mti->tdata.regmap) { + dev_err(&pdev->dev, "Failed to get parent regmap\n"); + return -ENODEV; + } + + mti->irq = platform_get_irq_byname(pdev, "PD_IRQB"); + if (mti->irq < 0) + return mti->irq; + + mti->tdata.init = mt6360_tcpc_init; + mti->tcpci = tcpci_register_port(&pdev->dev, &mti->tdata); + if (IS_ERR(mti->tcpci)) { + dev_err(&pdev->dev, "Failed to register tcpci port\n"); + return PTR_ERR(mti->tcpci); + } + + ret = devm_request_threaded_irq(mti->dev, mti->irq, NULL, mt6360_irq, IRQF_ONESHOT, + dev_name(&pdev->dev), mti); + if (ret) { + dev_err(mti->dev, "Failed to register irq\n"); + tcpci_unregister_port(mti->tcpci); + return ret; + } + + device_init_wakeup(&pdev->dev, true); + platform_set_drvdata(pdev, mti); + + return 0; +} + +static int mt6360_tcpc_remove(struct platform_device *pdev) +{ + struct mt6360_tcpc_info *mti = platform_get_drvdata(pdev); + + disable_irq(mti->irq); + tcpci_unregister_port(mti->tcpci); + return 0; +} + +static int __maybe_unused mt6360_tcpc_suspend(struct device *dev) +{ + struct mt6360_tcpc_info *mti = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + enable_irq_wake(mti->irq); + + return 0; +} + +static int __maybe_unused mt6360_tcpc_resume(struct device *dev) +{ + struct mt6360_tcpc_info *mti = dev_get_drvdata(dev); + + if (device_may_wakeup(dev)) + disable_irq_wake(mti->irq); + + return 0; +} + +static SIMPLE_DEV_PM_OPS(mt6360_tcpc_pm_ops, mt6360_tcpc_suspend, mt6360_tcpc_resume); + +static const struct of_device_id __maybe_unused mt6360_tcpc_of_id[] = { + { .compatible = "mediatek,mt6360-tcpc", }, + {}, +}; +MODULE_DEVICE_TABLE(of, mt6360_tcpc_of_id); + +static struct platform_driver mt6360_tcpc_driver = { + .driver = { + .name = "mt6360-tcpc", + .pm = &mt6360_tcpc_pm_ops, + .of_match_table = mt6360_tcpc_of_id, + }, + .probe = mt6360_tcpc_probe, + .remove = mt6360_tcpc_remove, +}; +module_platform_driver(mt6360_tcpc_driver); + +MODULE_AUTHOR("ChiYuan Huang <cy_huang@richtek.com>"); +MODULE_DESCRIPTION("MT6360 USB Type-C Port Controller Interface Driver"); +MODULE_LICENSE("GPL v2"); diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c index a48e3f90d196..55535c4f66bf 100644 --- a/drivers/usb/typec/tcpm/tcpm.c +++ b/drivers/usb/typec/tcpm/tcpm.c @@ -8,8 +8,10 @@ #include <linux/completion.h> #include <linux/debugfs.h> #include <linux/device.h> +#include <linux/hrtimer.h> #include <linux/jiffies.h> #include <linux/kernel.h> +#include <linux/kthread.h> #include <linux/module.h> #include <linux/mutex.h> #include <linux/power_supply.h> @@ -28,7 +30,8 @@ #include <linux/usb/role.h> #include <linux/usb/tcpm.h> #include <linux/usb/typec_altmode.h> -#include <linux/workqueue.h> + +#include <uapi/linux/sched/types.h> #define FOREACH_STATE(S) \ S(INVALID_STATE), \ @@ -103,6 +106,13 @@ S(VCONN_SWAP_TURN_ON_VCONN), \ S(VCONN_SWAP_TURN_OFF_VCONN), \ \ + S(FR_SWAP_SEND), \ + S(FR_SWAP_SEND_TIMEOUT), \ + S(FR_SWAP_SNK_SRC_TRANSITION_TO_OFF), \ + S(FR_SWAP_SNK_SRC_NEW_SINK_READY), \ + S(FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED), \ + S(FR_SWAP_CANCEL), \ + \ S(SNK_TRY), \ S(SNK_TRY_WAIT), \ S(SNK_TRY_WAIT_DEBOUNCE), \ @@ -124,6 +134,9 @@ S(GET_PPS_STATUS_SEND), \ S(GET_PPS_STATUS_SEND_TIMEOUT), \ \ + S(GET_SINK_CAP), \ + S(GET_SINK_CAP_TIMEOUT), \ + \ S(ERROR_RECOVERY), \ S(PORT_RESET), \ S(PORT_RESET_WAIT_OFF) @@ -167,11 +180,25 @@ enum adev_actions { ADEV_ATTENTION, }; +/* + * Initial current capability of the new source when vSafe5V is applied during PD3.0 Fast Role Swap. + * Based on "Table 6-14 Fixed Supply PDO - Sink" of "USB Power Delivery Specification Revision 3.0, + * Version 1.2" + */ +enum frs_typec_current { + FRS_NOT_SUPPORTED, + FRS_DEFAULT_POWER, + FRS_5V_1P5A, + FRS_5V_3A, +}; + /* Events from low level driver */ #define TCPM_CC_EVENT BIT(0) #define TCPM_VBUS_EVENT BIT(1) #define TCPM_RESET_EVENT BIT(2) +#define TCPM_FRS_EVENT BIT(3) +#define TCPM_SOURCING_VBUS BIT(4) #define LOG_BUFFER_ENTRIES 1024 #define LOG_BUFFER_ENTRY_SIZE 128 @@ -181,6 +208,8 @@ enum adev_actions { #define SVID_DISCOVERY_MAX 16 #define ALTMODE_DISCOVERY_MAX (SVID_DISCOVERY_MAX * MODE_DISCOVERY_MAX) +#define GET_SINK_CAP_RETRY_MS 100 + struct pd_mode_data { int svid_index; /* current SVID index */ int nsvids; @@ -203,7 +232,7 @@ struct tcpm_port { struct device *dev; struct mutex lock; /* tcpm state machine lock */ - struct workqueue_struct *wq; + struct kthread_worker *wq; struct typec_capability typec_caps; struct typec_port *typec_port; @@ -247,15 +276,19 @@ struct tcpm_port { enum tcpm_state prev_state; enum tcpm_state state; enum tcpm_state delayed_state; - unsigned long delayed_runtime; + ktime_t delayed_runtime; unsigned long delay_ms; spinlock_t pd_event_lock; u32 pd_events; - struct work_struct event_work; - struct delayed_work state_machine; - struct delayed_work vdm_state_machine; + struct kthread_work event_work; + struct hrtimer state_machine_timer; + struct kthread_work state_machine; + struct hrtimer vdm_state_machine_timer; + struct kthread_work vdm_state_machine; + struct hrtimer enable_frs_timer; + struct kthread_work enable_frs; bool state_machine_running; struct completion tx_complete; @@ -330,6 +363,12 @@ struct tcpm_port { /* port belongs to a self powered device */ bool self_powered; + /* FRS */ + enum frs_typec_current frs_current; + + /* Sink caps have been queried */ + bool sink_cap_done; + #ifdef CONFIG_DEBUG_FS struct dentry *dentry; struct mutex logbuffer_lock; /* log buffer access lock */ @@ -340,7 +379,7 @@ struct tcpm_port { }; struct pd_rx_event { - struct work_struct work; + struct kthread_work work; struct tcpm_port *port; struct pd_message msg; }; @@ -914,6 +953,37 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port) return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg); } +static void mod_tcpm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) +{ + if (delay_ms) { + hrtimer_start(&port->state_machine_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); + } else { + hrtimer_cancel(&port->state_machine_timer); + kthread_queue_work(port->wq, &port->state_machine); + } +} + +static void mod_vdm_delayed_work(struct tcpm_port *port, unsigned int delay_ms) +{ + if (delay_ms) { + hrtimer_start(&port->vdm_state_machine_timer, ms_to_ktime(delay_ms), + HRTIMER_MODE_REL); + } else { + hrtimer_cancel(&port->vdm_state_machine_timer); + kthread_queue_work(port->wq, &port->vdm_state_machine); + } +} + +static void mod_enable_frs_delayed_work(struct tcpm_port *port, unsigned int delay_ms) +{ + if (delay_ms) { + hrtimer_start(&port->enable_frs_timer, ms_to_ktime(delay_ms), HRTIMER_MODE_REL); + } else { + hrtimer_cancel(&port->enable_frs_timer); + kthread_queue_work(port->wq, &port->enable_frs); + } +} + static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, unsigned int delay_ms) { @@ -922,9 +992,8 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, tcpm_states[port->state], tcpm_states[state], delay_ms); port->delayed_state = state; - mod_delayed_work(port->wq, &port->state_machine, - msecs_to_jiffies(delay_ms)); - port->delayed_runtime = jiffies + msecs_to_jiffies(delay_ms); + mod_tcpm_delayed_work(port, delay_ms); + port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms)); port->delay_ms = delay_ms; } else { tcpm_log(port, "state change %s -> %s", @@ -939,7 +1008,7 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state, * machine. */ if (!port->state_machine_running) - mod_delayed_work(port->wq, &port->state_machine, 0); + mod_tcpm_delayed_work(port, 0); } } @@ -960,7 +1029,7 @@ static void tcpm_queue_message(struct tcpm_port *port, enum pd_msg_request message) { port->queued_message = message; - mod_delayed_work(port->wq, &port->state_machine, 0); + mod_tcpm_delayed_work(port, 0); } /* @@ -981,7 +1050,7 @@ static void tcpm_queue_vdm(struct tcpm_port *port, const u32 header, port->vdm_retries = 0; port->vdm_state = VDM_STATE_READY; - mod_delayed_work(port->wq, &port->vdm_state_machine, 0); + mod_vdm_delayed_work(port, 0); } static void tcpm_queue_vdm_unlocked(struct tcpm_port *port, const u32 header, @@ -1244,8 +1313,7 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port, port->vdm_state = VDM_STATE_WAIT_RSP_BUSY; port->vdo_retry = (p[0] & ~VDO_CMDT_MASK) | CMDT_INIT; - mod_delayed_work(port->wq, &port->vdm_state_machine, - msecs_to_jiffies(PD_T_VDM_BUSY)); + mod_vdm_delayed_work(port, PD_T_VDM_BUSY); return; } port->vdm_state = VDM_STATE_DONE; @@ -1390,8 +1458,7 @@ static void vdm_run_state_machine(struct tcpm_port *port) port->vdm_retries = 0; port->vdm_state = VDM_STATE_BUSY; timeout = vdm_ready_timeout(port->vdo_data[0]); - mod_delayed_work(port->wq, &port->vdm_state_machine, - timeout); + mod_vdm_delayed_work(port, timeout); } break; case VDM_STATE_WAIT_RSP_BUSY: @@ -1420,10 +1487,9 @@ static void vdm_run_state_machine(struct tcpm_port *port) } } -static void vdm_state_machine_work(struct work_struct *work) +static void vdm_state_machine_work(struct kthread_work *work) { - struct tcpm_port *port = container_of(work, struct tcpm_port, - vdm_state_machine.work); + struct tcpm_port *port = container_of(work, struct tcpm_port, vdm_state_machine); enum vdm_states prev_state; mutex_lock(&port->lock); @@ -1591,6 +1657,7 @@ static int tcpm_altmode_vdm(struct typec_altmode *altmode, struct tcpm_port *port = typec_altmode_get_drvdata(altmode); tcpm_queue_vdm_unlocked(port, header, data, count - 1); + return 0; } @@ -1646,6 +1713,9 @@ static void tcpm_pd_data_request(struct tcpm_port *port, unsigned int cnt = pd_header_cnt_le(msg->header); unsigned int rev = pd_header_rev_le(msg->header); unsigned int i; + enum frs_typec_current frs_current; + bool frs_enable; + int ret; switch (type) { case PD_DATA_SOURCE_CAP: @@ -1715,7 +1785,21 @@ static void tcpm_pd_data_request(struct tcpm_port *port, /* We don't do anything with this at the moment... */ for (i = 0; i < cnt; i++) port->sink_caps[i] = le32_to_cpu(msg->payload[i]); + + frs_current = (port->sink_caps[0] & PDO_FIXED_FRS_CURR_MASK) >> + PDO_FIXED_FRS_CURR_SHIFT; + frs_enable = frs_current && (frs_current <= port->frs_current); + tcpm_log(port, + "Port partner FRS capable partner_frs_current:%u port_frs_current:%u enable:%c", + frs_current, port->frs_current, frs_enable ? 'y' : 'n'); + if (frs_enable) { + ret = port->tcpc->enable_frs(port->tcpc, true); + tcpm_log(port, "Enable FRS %s, ret:%d\n", ret ? "fail" : "success", ret); + } + port->nr_sink_caps = cnt; + port->sink_cap_done = true; + tcpm_set_state(port, SNK_READY, 0); break; case PD_DATA_VENDOR_DEF: tcpm_handle_vdm_request(port, msg->payload, cnt); @@ -1810,6 +1894,9 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, case VCONN_SWAP_WAIT_FOR_VCONN: tcpm_set_state(port, VCONN_SWAP_TURN_OFF_VCONN, 0); break; + case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: + tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0); + break; default: break; } @@ -1849,6 +1936,13 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, -EAGAIN : -EOPNOTSUPP); tcpm_set_state(port, VCONN_SWAP_CANCEL, 0); break; + case FR_SWAP_SEND: + tcpm_set_state(port, FR_SWAP_CANCEL, 0); + break; + case GET_SINK_CAP: + port->sink_cap_done = true; + tcpm_set_state(port, ready_state(port), 0); + break; default: break; } @@ -1883,6 +1977,9 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port, case VCONN_SWAP_SEND: tcpm_set_state(port, VCONN_SWAP_START, 0); break; + case FR_SWAP_SEND: + tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0); + break; default: break; } @@ -2005,7 +2102,7 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port, } } -static void tcpm_pd_rx_handler(struct work_struct *work) +static void tcpm_pd_rx_handler(struct kthread_work *work) { struct pd_rx_event *event = container_of(work, struct pd_rx_event, work); @@ -2067,10 +2164,10 @@ void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg) if (!event) return; - INIT_WORK(&event->work, tcpm_pd_rx_handler); + kthread_init_work(&event->work, tcpm_pd_rx_handler); event->port = port; memcpy(&event->msg, msg, sizeof(*msg)); - queue_work(port->wq, &event->work); + kthread_queue_work(port->wq, &event->work); } EXPORT_SYMBOL_GPL(tcpm_pd_receive); @@ -2123,9 +2220,9 @@ static bool tcpm_send_queued_message(struct tcpm_port *port) } while (port->queued_message != PD_MSG_NONE); if (port->delayed_state != INVALID_STATE) { - if (time_is_after_jiffies(port->delayed_runtime)) { - mod_delayed_work(port->wq, &port->state_machine, - port->delayed_runtime - jiffies); + if (ktime_after(port->delayed_runtime, ktime_get())) { + mod_tcpm_delayed_work(port, ktime_to_ms(ktime_sub(port->delayed_runtime, + ktime_get()))); return true; } port->delayed_state = INVALID_STATE; @@ -2783,6 +2880,10 @@ static void tcpm_reset_port(struct tcpm_port *port) port->try_src_count = 0; port->try_snk_count = 0; port->usb_type = POWER_SUPPLY_USB_TYPE_C; + port->nr_sink_caps = 0; + port->sink_cap_done = false; + if (port->tcpc->enable_frs) + port->tcpc->enable_frs(port->tcpc, false); power_supply_changed(port->psy); } @@ -3258,10 +3359,9 @@ static void run_state_machine(struct tcpm_port *port) case SNK_DISCOVERY_DEBOUNCE_DONE: if (!tcpm_port_is_disconnected(port) && tcpm_port_is_sink(port) && - time_is_after_jiffies(port->delayed_runtime)) { + ktime_after(port->delayed_runtime, ktime_get())) { tcpm_set_state(port, SNK_DISCOVERY, - jiffies_to_msecs(port->delayed_runtime - - jiffies)); + ktime_to_ms(ktime_sub(port->delayed_runtime, ktime_get()))); break; } tcpm_set_state(port, unattached_state(port), 0); @@ -3334,10 +3434,9 @@ static void run_state_machine(struct tcpm_port *port) tcpm_swap_complete(port, 0); tcpm_typec_connect(port); tcpm_check_send_discover(port); + mod_enable_frs_delayed_work(port, 0); tcpm_pps_complete(port, port->pps_status); - power_supply_changed(port->psy); - break; /* Accessory states */ @@ -3361,9 +3460,13 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, HARD_RESET_START, 0); break; case HARD_RESET_START: + port->sink_cap_done = false; + if (port->tcpc->enable_frs) + port->tcpc->enable_frs(port->tcpc, false); port->hard_reset_count++; port->tcpc->set_pd_rx(port->tcpc, false); tcpm_unregister_altmodes(port); + port->nr_sink_caps = 0; port->send_discover = true; if (port->pwr_role == TYPEC_SOURCE) tcpm_set_state(port, SRC_HARD_RESET_VBUS_OFF, @@ -3495,6 +3598,35 @@ static void run_state_machine(struct tcpm_port *port) tcpm_set_state(port, ready_state(port), 0); break; + case FR_SWAP_SEND: + if (tcpm_pd_send_control(port, PD_CTRL_FR_SWAP)) { + tcpm_set_state(port, ERROR_RECOVERY, 0); + break; + } + tcpm_set_state_cond(port, FR_SWAP_SEND_TIMEOUT, PD_T_SENDER_RESPONSE); + break; + case FR_SWAP_SEND_TIMEOUT: + tcpm_set_state(port, ERROR_RECOVERY, 0); + break; + case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: + tcpm_set_state(port, ERROR_RECOVERY, PD_T_PS_SOURCE_OFF); + break; + case FR_SWAP_SNK_SRC_NEW_SINK_READY: + if (port->vbus_source) + tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0); + else + tcpm_set_state(port, ERROR_RECOVERY, PD_T_RECEIVER_RESPONSE); + break; + case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: + tcpm_set_pwr_role(port, TYPEC_SOURCE); + if (tcpm_pd_send_control(port, PD_CTRL_PS_RDY)) { + tcpm_set_state(port, ERROR_RECOVERY, 0); + break; + } + tcpm_set_cc(port, tcpm_rp_cc(port)); + tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); + break; + /* PR_Swap states */ case PR_SWAP_ACCEPT: tcpm_pd_send_control(port, PD_CTRL_ACCEPT); @@ -3573,7 +3705,7 @@ static void run_state_machine(struct tcpm_port *port) */ tcpm_set_pwr_role(port, TYPEC_SOURCE); tcpm_pd_send_control(port, PD_CTRL_PS_RDY); - tcpm_set_state(port, SRC_STARTUP, 0); + tcpm_set_state(port, SRC_STARTUP, PD_T_SWAP_SRC_START); break; case VCONN_SWAP_ACCEPT: @@ -3618,6 +3750,12 @@ static void run_state_machine(struct tcpm_port *port) else tcpm_set_state(port, SNK_READY, 0); break; + case FR_SWAP_CANCEL: + if (port->pwr_role == TYPEC_SOURCE) + tcpm_set_state(port, SRC_READY, 0); + else + tcpm_set_state(port, SNK_READY, 0); + break; case BIST_RX: switch (BDO_MODE_MASK(port->bist_request)) { @@ -3652,6 +3790,14 @@ static void run_state_machine(struct tcpm_port *port) case GET_PPS_STATUS_SEND_TIMEOUT: tcpm_set_state(port, ready_state(port), 0); break; + case GET_SINK_CAP: + tcpm_pd_send_control(port, PD_CTRL_GET_SINK_CAP); + tcpm_set_state(port, GET_SINK_CAP_TIMEOUT, PD_T_SENDER_RESPONSE); + break; + case GET_SINK_CAP_TIMEOUT: + port->sink_cap_done = true; + tcpm_set_state(port, ready_state(port), 0); + break; case ERROR_RECOVERY: tcpm_swap_complete(port, -EPROTO); tcpm_pps_complete(port, -EPROTO); @@ -3674,10 +3820,9 @@ static void run_state_machine(struct tcpm_port *port) } } -static void tcpm_state_machine_work(struct work_struct *work) +static void tcpm_state_machine_work(struct kthread_work *work) { - struct tcpm_port *port = container_of(work, struct tcpm_port, - state_machine.work); + struct tcpm_port *port = container_of(work, struct tcpm_port, state_machine); enum tcpm_state prev_state; mutex_lock(&port->lock); @@ -3868,6 +4013,13 @@ static void _tcpm_cc_change(struct tcpm_port *port, enum typec_cc_status cc1, * Ignore it. */ break; + case FR_SWAP_SEND: + case FR_SWAP_SEND_TIMEOUT: + case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: + case FR_SWAP_SNK_SRC_NEW_SINK_READY: + case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: + /* Do nothing, CC change expected */ + break; case PORT_RESET: case PORT_RESET_WAIT_OFF: @@ -3938,6 +4090,9 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port) case SRC_TRY_DEBOUNCE: /* Do nothing, waiting for sink detection */ break; + case FR_SWAP_SNK_SRC_NEW_SINK_READY: + tcpm_set_state(port, FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED, 0); + break; case PORT_RESET: case PORT_RESET_WAIT_OFF: @@ -4017,6 +4172,14 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port) */ break; + case FR_SWAP_SEND: + case FR_SWAP_SEND_TIMEOUT: + case FR_SWAP_SNK_SRC_TRANSITION_TO_OFF: + case FR_SWAP_SNK_SRC_NEW_SINK_READY: + case FR_SWAP_SNK_SRC_SOURCE_VBUS_APPLIED: + /* Do nothing, vbus drop expected */ + break; + default: if (port->pwr_role == TYPEC_SINK && port->attached) @@ -4041,7 +4204,7 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port) 0); } -static void tcpm_pd_event_handler(struct work_struct *work) +static void tcpm_pd_event_handler(struct kthread_work *work) { struct tcpm_port *port = container_of(work, struct tcpm_port, event_work); @@ -4071,6 +4234,25 @@ static void tcpm_pd_event_handler(struct work_struct *work) if (port->tcpc->get_cc(port->tcpc, &cc1, &cc2) == 0) _tcpm_cc_change(port, cc1, cc2); } + if (events & TCPM_FRS_EVENT) { + if (port->state == SNK_READY) + tcpm_set_state(port, FR_SWAP_SEND, 0); + else + tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready"); + } + if (events & TCPM_SOURCING_VBUS) { + tcpm_log(port, "sourcing vbus"); + /* + * In fast role swap case TCPC autonomously sources vbus. Set vbus_source + * true as TCPM wouldn't have called tcpm_set_vbus. + * + * When vbus is sourced on the command on TCPM i.e. TCPM called + * tcpm_set_vbus to source vbus, vbus_source would already be true. + */ + port->vbus_source = true; + _tcpm_pd_vbus_on(port); + } + spin_lock(&port->pd_event_lock); } spin_unlock(&port->pd_event_lock); @@ -4082,7 +4264,7 @@ void tcpm_cc_change(struct tcpm_port *port) spin_lock(&port->pd_event_lock); port->pd_events |= TCPM_CC_EVENT; spin_unlock(&port->pd_event_lock); - queue_work(port->wq, &port->event_work); + kthread_queue_work(port->wq, &port->event_work); } EXPORT_SYMBOL_GPL(tcpm_cc_change); @@ -4091,7 +4273,7 @@ void tcpm_vbus_change(struct tcpm_port *port) spin_lock(&port->pd_event_lock); port->pd_events |= TCPM_VBUS_EVENT; spin_unlock(&port->pd_event_lock); - queue_work(port->wq, &port->event_work); + kthread_queue_work(port->wq, &port->event_work); } EXPORT_SYMBOL_GPL(tcpm_vbus_change); @@ -4100,10 +4282,54 @@ void tcpm_pd_hard_reset(struct tcpm_port *port) spin_lock(&port->pd_event_lock); port->pd_events = TCPM_RESET_EVENT; spin_unlock(&port->pd_event_lock); - queue_work(port->wq, &port->event_work); + kthread_queue_work(port->wq, &port->event_work); } EXPORT_SYMBOL_GPL(tcpm_pd_hard_reset); +void tcpm_sink_frs(struct tcpm_port *port) +{ + spin_lock(&port->pd_event_lock); + port->pd_events = TCPM_FRS_EVENT; + spin_unlock(&port->pd_event_lock); + kthread_queue_work(port->wq, &port->event_work); +} +EXPORT_SYMBOL_GPL(tcpm_sink_frs); + +void tcpm_sourcing_vbus(struct tcpm_port *port) +{ + spin_lock(&port->pd_event_lock); + port->pd_events = TCPM_SOURCING_VBUS; + spin_unlock(&port->pd_event_lock); + kthread_queue_work(port->wq, &port->event_work); +} +EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus); + +static void tcpm_enable_frs_work(struct kthread_work *work) +{ + struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs); + + mutex_lock(&port->lock); + /* Not FRS capable */ + if (!port->connected || port->port_type != TYPEC_PORT_DRP || + port->pwr_opmode != TYPEC_PWR_MODE_PD || + !port->tcpc->enable_frs || + /* Sink caps queried */ + port->sink_cap_done || port->negotiated_rev < PD_REV30) + goto unlock; + + /* Send when the state machine is idle */ + if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover) + goto resched; + + tcpm_set_state(port, GET_SINK_CAP, 0); + port->sink_cap_done = true; + +resched: + mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS); +unlock: + mutex_unlock(&port->lock); +} + static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data) { struct tcpm_port *port = typec_get_drvdata(p); @@ -4511,7 +4737,7 @@ static int tcpm_fw_get_caps(struct tcpm_port *port, { const char *cap_str; int ret; - u32 mw; + u32 mw, frs_current; if (!fwnode) return -EINVAL; @@ -4580,6 +4806,13 @@ sink: port->self_powered = fwnode_property_read_bool(fwnode, "self-powered"); + /* FRS can only be supported byb DRP ports */ + if (port->port_type == TYPEC_PORT_DRP) { + ret = fwnode_property_read_u32(fwnode, "frs-typec-current", &frs_current); + if (ret >= 0 && frs_current <= FRS_5V_3A) + port->frs_current = frs_current; + } + return 0; } @@ -4808,6 +5041,30 @@ static int devm_tcpm_psy_register(struct tcpm_port *port) return PTR_ERR_OR_ZERO(port->psy); } +static enum hrtimer_restart state_machine_timer_handler(struct hrtimer *timer) +{ + struct tcpm_port *port = container_of(timer, struct tcpm_port, state_machine_timer); + + kthread_queue_work(port->wq, &port->state_machine); + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart vdm_state_machine_timer_handler(struct hrtimer *timer) +{ + struct tcpm_port *port = container_of(timer, struct tcpm_port, vdm_state_machine_timer); + + kthread_queue_work(port->wq, &port->vdm_state_machine); + return HRTIMER_NORESTART; +} + +static enum hrtimer_restart enable_frs_timer_handler(struct hrtimer *timer) +{ + struct tcpm_port *port = container_of(timer, struct tcpm_port, enable_frs_timer); + + kthread_queue_work(port->wq, &port->enable_frs); + return HRTIMER_NORESTART; +} + struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) { struct tcpm_port *port; @@ -4829,12 +5086,21 @@ struct tcpm_port *tcpm_register_port(struct device *dev, struct tcpc_dev *tcpc) mutex_init(&port->lock); mutex_init(&port->swap_lock); - port->wq = create_singlethread_workqueue(dev_name(dev)); - if (!port->wq) - return ERR_PTR(-ENOMEM); - INIT_DELAYED_WORK(&port->state_machine, tcpm_state_machine_work); - INIT_DELAYED_WORK(&port->vdm_state_machine, vdm_state_machine_work); - INIT_WORK(&port->event_work, tcpm_pd_event_handler); + port->wq = kthread_create_worker(0, dev_name(dev)); + if (IS_ERR(port->wq)) + return ERR_CAST(port->wq); + sched_set_fifo(port->wq->task); + + kthread_init_work(&port->state_machine, tcpm_state_machine_work); + kthread_init_work(&port->vdm_state_machine, vdm_state_machine_work); + kthread_init_work(&port->event_work, tcpm_pd_event_handler); + kthread_init_work(&port->enable_frs, tcpm_enable_frs_work); + hrtimer_init(&port->state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->state_machine_timer.function = state_machine_timer_handler; + hrtimer_init(&port->vdm_state_machine_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->vdm_state_machine_timer.function = vdm_state_machine_timer_handler; + hrtimer_init(&port->enable_frs_timer, CLOCK_MONOTONIC, HRTIMER_MODE_REL); + port->enable_frs_timer.function = enable_frs_timer_handler; spin_lock_init(&port->pd_event_lock); @@ -4886,7 +5152,7 @@ out_role_sw_put: usb_role_switch_put(port->role_sw); out_destroy_wq: tcpm_debugfs_exit(port); - destroy_workqueue(port->wq); + kthread_destroy_worker(port->wq); return ERR_PTR(err); } EXPORT_SYMBOL_GPL(tcpm_register_port); @@ -4901,7 +5167,7 @@ void tcpm_unregister_port(struct tcpm_port *port) typec_unregister_port(port->typec_port); usb_role_switch_put(port->role_sw); tcpm_debugfs_exit(port); - destroy_workqueue(port->wq); + kthread_destroy_worker(port->wq); } EXPORT_SYMBOL_GPL(tcpm_unregister_port); diff --git a/drivers/usb/usbip/stub_dev.c b/drivers/usb/usbip/stub_dev.c index 9d7d642022d1..2305d425e6c9 100644 --- a/drivers/usb/usbip/stub_dev.c +++ b/drivers/usb/usbip/stub_dev.c @@ -461,11 +461,6 @@ static void stub_disconnect(struct usb_device *udev) return; } -static bool usbip_match(struct usb_device *udev) -{ - return true; -} - #ifdef CONFIG_PM /* These functions need usb_port_suspend and usb_port_resume, @@ -491,7 +486,6 @@ struct usb_device_driver stub_driver = { .name = "usbip-host", .probe = stub_probe, .disconnect = stub_disconnect, - .match = usbip_match, #ifdef CONFIG_PM .suspend = stub_suspend, .resume = stub_resume, diff --git a/drivers/usb/usbip/usbip_common.c b/drivers/usb/usbip/usbip_common.c index e4b96674c405..4ce6c6a45eb1 100644 --- a/drivers/usb/usbip/usbip_common.c +++ b/drivers/usb/usbip/usbip_common.c @@ -755,13 +755,7 @@ EXPORT_SYMBOL_GPL(usbip_recv_xbuff); static int __init usbip_core_init(void) { - int ret; - - ret = usbip_init_eh(); - if (ret) - return ret; - - return 0; + return usbip_init_eh(); } static void __exit usbip_core_exit(void) diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c index 1b598db5d8b9..66cde5e5f796 100644 --- a/drivers/usb/usbip/vhci_hcd.c +++ b/drivers/usb/usbip/vhci_hcd.c @@ -797,8 +797,14 @@ no_need_xmit: usb_hcd_unlink_urb_from_ep(hcd, urb); no_need_unlink: spin_unlock_irqrestore(&vhci->lock, flags); - if (!ret) + if (!ret) { + /* usb_hcd_giveback_urb() should be called with + * irqs disabled + */ + local_irq_disable(); usb_hcd_giveback_urb(hcd, urb, urb->status); + local_irq_enable(); + } return ret; } diff --git a/drivers/vhost/iotlb.c b/drivers/vhost/iotlb.c index 34aec4ba331e..0fd3f87e913c 100644 --- a/drivers/vhost/iotlb.c +++ b/drivers/vhost/iotlb.c @@ -149,7 +149,7 @@ EXPORT_SYMBOL_GPL(vhost_iotlb_free); * vhost_iotlb_itree_first - return the first overlapped range * @iotlb: the IOTLB * @start: start of IOVA range - * @end: end of IOVA range + * @last: last byte in IOVA range */ struct vhost_iotlb_map * vhost_iotlb_itree_first(struct vhost_iotlb *iotlb, u64 start, u64 last) @@ -162,7 +162,7 @@ EXPORT_SYMBOL_GPL(vhost_iotlb_itree_first); * vhost_iotlb_itree_next - return the next overlapped range * @map: the starting map node * @start: start of IOVA range - * @end: end of IOVA range + * @last: last byte IOVA range */ struct vhost_iotlb_map * vhost_iotlb_itree_next(struct vhost_iotlb_map *map, u64 start, u64 last) diff --git a/drivers/vhost/vdpa.c b/drivers/vhost/vdpa.c index 3fab94f88894..796fe979f997 100644 --- a/drivers/vhost/vdpa.c +++ b/drivers/vhost/vdpa.c @@ -353,8 +353,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, struct vdpa_callback cb; struct vhost_virtqueue *vq; struct vhost_vring_state s; - u64 __user *featurep = argp; - u64 features; u32 idx; long r; @@ -381,18 +379,6 @@ static long vhost_vdpa_vring_ioctl(struct vhost_vdpa *v, unsigned int cmd, vq->last_avail_idx = vq_state.avail_index; break; - case VHOST_GET_BACKEND_FEATURES: - features = VHOST_VDPA_BACKEND_FEATURES; - if (copy_to_user(featurep, &features, sizeof(features))) - return -EFAULT; - return 0; - case VHOST_SET_BACKEND_FEATURES: - if (copy_from_user(&features, featurep, sizeof(features))) - return -EFAULT; - if (features & ~VHOST_VDPA_BACKEND_FEATURES) - return -EOPNOTSUPP; - vhost_set_backend_features(&v->vdev, features); - return 0; } r = vhost_vring_ioctl(&v->vdev, cmd, argp); @@ -440,8 +426,20 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, struct vhost_vdpa *v = filep->private_data; struct vhost_dev *d = &v->vdev; void __user *argp = (void __user *)arg; + u64 __user *featurep = argp; + u64 features; long r; + if (cmd == VHOST_SET_BACKEND_FEATURES) { + r = copy_from_user(&features, featurep, sizeof(features)); + if (r) + return r; + if (features & ~VHOST_VDPA_BACKEND_FEATURES) + return -EOPNOTSUPP; + vhost_set_backend_features(&v->vdev, features); + return 0; + } + mutex_lock(&d->mutex); switch (cmd) { @@ -476,6 +474,10 @@ static long vhost_vdpa_unlocked_ioctl(struct file *filep, case VHOST_VDPA_SET_CONFIG_CALL: r = vhost_vdpa_set_config_call(v, argp); break; + case VHOST_GET_BACKEND_FEATURES: + features = VHOST_VDPA_BACKEND_FEATURES; + r = copy_to_user(featurep, &features, sizeof(features)); + break; default: r = vhost_dev_ioctl(&v->vdev, cmd, argp); if (r == -ENOIOCTLCMD) diff --git a/drivers/xen/events/events_base.c b/drivers/xen/events/events_base.c index 90b8f56fbadb..6f02c18fa65c 100644 --- a/drivers/xen/events/events_base.c +++ b/drivers/xen/events/events_base.c @@ -92,6 +92,8 @@ static bool (*pirq_needs_eoi)(unsigned irq); /* Xen will never allocate port zero for any purpose. */ #define VALID_EVTCHN(chn) ((chn) != 0) +static struct irq_info *legacy_info_ptrs[NR_IRQS_LEGACY]; + static struct irq_chip xen_dynamic_chip; static struct irq_chip xen_percpu_chip; static struct irq_chip xen_pirq_chip; @@ -156,7 +158,18 @@ int get_evtchn_to_irq(evtchn_port_t evtchn) /* Get info for IRQ */ struct irq_info *info_for_irq(unsigned irq) { - return irq_get_chip_data(irq); + if (irq < nr_legacy_irqs()) + return legacy_info_ptrs[irq]; + else + return irq_get_chip_data(irq); +} + +static void set_info_for_irq(unsigned int irq, struct irq_info *info) +{ + if (irq < nr_legacy_irqs()) + legacy_info_ptrs[irq] = info; + else + irq_set_chip_data(irq, info); } /* Constructors for packed IRQ information. */ @@ -377,7 +390,7 @@ static void xen_irq_init(unsigned irq) info->type = IRQT_UNBOUND; info->refcnt = -1; - irq_set_chip_data(irq, info); + set_info_for_irq(irq, info); list_add_tail(&info->list, &xen_irq_list_head); } @@ -426,14 +439,14 @@ static int __must_check xen_allocate_irq_gsi(unsigned gsi) static void xen_free_irq(unsigned irq) { - struct irq_info *info = irq_get_chip_data(irq); + struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; list_del(&info->list); - irq_set_chip_data(irq, NULL); + set_info_for_irq(irq, NULL); WARN_ON(info->refcnt > 0); @@ -603,7 +616,7 @@ EXPORT_SYMBOL_GPL(xen_irq_from_gsi); static void __unbind_from_irq(unsigned int irq) { evtchn_port_t evtchn = evtchn_from_irq(irq); - struct irq_info *info = irq_get_chip_data(irq); + struct irq_info *info = info_for_irq(irq); if (info->refcnt > 0) { info->refcnt--; @@ -1108,7 +1121,7 @@ int bind_ipi_to_irqhandler(enum ipi_vector ipi, void unbind_from_irqhandler(unsigned int irq, void *dev_id) { - struct irq_info *info = irq_get_chip_data(irq); + struct irq_info *info = info_for_irq(irq); if (WARN_ON(!info)) return; @@ -1142,7 +1155,7 @@ int evtchn_make_refcounted(evtchn_port_t evtchn) if (irq == -1) return -ENOENT; - info = irq_get_chip_data(irq); + info = info_for_irq(irq); if (!info) return -ENOENT; @@ -1170,7 +1183,7 @@ int evtchn_get(evtchn_port_t evtchn) if (irq == -1) goto done; - info = irq_get_chip_data(irq); + info = info_for_irq(irq); if (!info) goto done; diff --git a/fs/autofs/waitq.c b/fs/autofs/waitq.c index 74c886f7c51c..5ced859dac53 100644 --- a/fs/autofs/waitq.c +++ b/fs/autofs/waitq.c @@ -53,7 +53,7 @@ static int autofs_write(struct autofs_sb_info *sbi, mutex_lock(&sbi->pipe_mutex); while (bytes) { - wr = kernel_write(file, data, bytes, &file->f_pos); + wr = __kernel_write(file, data, bytes, NULL); if (wr <= 0) break; data += wr; diff --git a/fs/btrfs/dev-replace.c b/fs/btrfs/dev-replace.c index db93909b25e0..e4a1c6afe35d 100644 --- a/fs/btrfs/dev-replace.c +++ b/fs/btrfs/dev-replace.c @@ -599,6 +599,37 @@ static void btrfs_rm_dev_replace_unblocked(struct btrfs_fs_info *fs_info) wake_up(&fs_info->dev_replace.replace_wait); } +/* + * When finishing the device replace, before swapping the source device with the + * target device we must update the chunk allocation state in the target device, + * as it is empty because replace works by directly copying the chunks and not + * through the normal chunk allocation path. + */ +static int btrfs_set_target_alloc_state(struct btrfs_device *srcdev, + struct btrfs_device *tgtdev) +{ + struct extent_state *cached_state = NULL; + u64 start = 0; + u64 found_start; + u64 found_end; + int ret = 0; + + lockdep_assert_held(&srcdev->fs_info->chunk_mutex); + + while (!find_first_extent_bit(&srcdev->alloc_state, start, + &found_start, &found_end, + CHUNK_ALLOCATED, &cached_state)) { + ret = set_extent_bits(&tgtdev->alloc_state, found_start, + found_end, CHUNK_ALLOCATED); + if (ret) + break; + start = found_end + 1; + } + + free_extent_state(cached_state); + return ret; +} + static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, int scrub_ret) { @@ -673,8 +704,14 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, dev_replace->time_stopped = ktime_get_real_seconds(); dev_replace->item_needs_writeback = 1; - /* replace old device with new one in mapping tree */ + /* + * Update allocation state in the new device and replace the old device + * with the new one in the mapping tree. + */ if (!scrub_ret) { + scrub_ret = btrfs_set_target_alloc_state(src_device, tgt_device); + if (scrub_ret) + goto error; btrfs_dev_replace_update_device_in_mapping_tree(fs_info, src_device, tgt_device); @@ -685,6 +722,7 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, btrfs_dev_name(src_device), src_device->devid, rcu_str_deref(tgt_device->name), scrub_ret); +error: up_write(&dev_replace->rwsem); mutex_unlock(&fs_info->chunk_mutex); mutex_unlock(&fs_info->fs_devices->device_list_mutex); @@ -745,7 +783,9 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, /* replace the sysfs entry */ btrfs_sysfs_remove_devices_dir(fs_info->fs_devices, src_device); btrfs_sysfs_update_devid(tgt_device); - btrfs_rm_dev_replace_free_srcdev(src_device); + if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &src_device->dev_state)) + btrfs_scratch_superblocks(fs_info, src_device->bdev, + src_device->name->str); /* write back the superblocks */ trans = btrfs_start_transaction(root, 0); @@ -754,6 +794,8 @@ static int btrfs_dev_replace_finishing(struct btrfs_fs_info *fs_info, mutex_unlock(&dev_replace->lock_finishing_cancel_unmount); + btrfs_rm_dev_replace_free_srcdev(src_device); + return 0; } diff --git a/fs/btrfs/disk-io.c b/fs/btrfs/disk-io.c index abf86b202b43..9f72b092bc22 100644 --- a/fs/btrfs/disk-io.c +++ b/fs/btrfs/disk-io.c @@ -636,16 +636,15 @@ static int btree_readpage_end_io_hook(struct btrfs_io_bio *io_bio, csum_tree_block(eb, result); if (memcmp_extent_buffer(eb, result, 0, csum_size)) { - u32 val; - u32 found = 0; - - memcpy(&found, result, csum_size); + u8 val[BTRFS_CSUM_SIZE] = { 0 }; read_extent_buffer(eb, &val, 0, csum_size); btrfs_warn_rl(fs_info, - "%s checksum verify failed on %llu wanted %x found %x level %d", + "%s checksum verify failed on %llu wanted " CSUM_FMT " found " CSUM_FMT " level %d", fs_info->sb->s_id, eb->start, - val, found, btrfs_header_level(eb)); + CSUM_FMT_VALUE(csum_size, val), + CSUM_FMT_VALUE(csum_size, result), + btrfs_header_level(eb)); ret = -EUCLEAN; goto err; } diff --git a/fs/btrfs/sysfs.c b/fs/btrfs/sysfs.c index c8df2edafd85..5be30066563c 100644 --- a/fs/btrfs/sysfs.c +++ b/fs/btrfs/sysfs.c @@ -1170,10 +1170,12 @@ int btrfs_sysfs_remove_devices_dir(struct btrfs_fs_devices *fs_devices, disk_kobj->name); } - kobject_del(&one_device->devid_kobj); - kobject_put(&one_device->devid_kobj); + if (one_device->devid_kobj.state_initialized) { + kobject_del(&one_device->devid_kobj); + kobject_put(&one_device->devid_kobj); - wait_for_completion(&one_device->kobj_unregister); + wait_for_completion(&one_device->kobj_unregister); + } return 0; } @@ -1186,10 +1188,12 @@ int btrfs_sysfs_remove_devices_dir(struct btrfs_fs_devices *fs_devices, sysfs_remove_link(fs_devices->devices_kobj, disk_kobj->name); } - kobject_del(&one_device->devid_kobj); - kobject_put(&one_device->devid_kobj); + if (one_device->devid_kobj.state_initialized) { + kobject_del(&one_device->devid_kobj); + kobject_put(&one_device->devid_kobj); - wait_for_completion(&one_device->kobj_unregister); + wait_for_completion(&one_device->kobj_unregister); + } } return 0; diff --git a/fs/btrfs/volumes.c b/fs/btrfs/volumes.c index 117b43367629..1997a7d67f22 100644 --- a/fs/btrfs/volumes.c +++ b/fs/btrfs/volumes.c @@ -1999,9 +1999,9 @@ static u64 btrfs_num_devices(struct btrfs_fs_info *fs_info) return num_devices; } -static void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, - struct block_device *bdev, - const char *device_path) +void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, + struct block_device *bdev, + const char *device_path) { struct btrfs_super_block *disk_super; int copy_num; @@ -2224,11 +2224,7 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) struct btrfs_fs_info *fs_info = srcdev->fs_info; struct btrfs_fs_devices *fs_devices = srcdev->fs_devices; - if (test_bit(BTRFS_DEV_STATE_WRITEABLE, &srcdev->dev_state)) { - /* zero out the old super if it is writable */ - btrfs_scratch_superblocks(fs_info, srcdev->bdev, - srcdev->name->str); - } + mutex_lock(&uuid_mutex); btrfs_close_bdev(srcdev); synchronize_rcu(); @@ -2258,6 +2254,7 @@ void btrfs_rm_dev_replace_free_srcdev(struct btrfs_device *srcdev) close_fs_devices(fs_devices); free_fs_devices(fs_devices); } + mutex_unlock(&uuid_mutex); } void btrfs_destroy_dev_replace_tgtdev(struct btrfs_device *tgtdev) diff --git a/fs/btrfs/volumes.h b/fs/btrfs/volumes.h index 5eea93916fbf..302c9234f7d0 100644 --- a/fs/btrfs/volumes.h +++ b/fs/btrfs/volumes.h @@ -573,6 +573,9 @@ void btrfs_set_fs_info_ptr(struct btrfs_fs_info *fs_info); void btrfs_reset_fs_info_ptr(struct btrfs_fs_info *fs_info); bool btrfs_check_rw_degradable(struct btrfs_fs_info *fs_info, struct btrfs_device *failing_dev); +void btrfs_scratch_superblocks(struct btrfs_fs_info *fs_info, + struct block_device *bdev, + const char *device_path); int btrfs_bg_type_to_factor(u64 flags); const char *btrfs_bg_type_to_raid_name(u64 flags); diff --git a/fs/eventpoll.c b/fs/eventpoll.c index 8107e06d7f6f..4df61129566d 100644 --- a/fs/eventpoll.c +++ b/fs/eventpoll.c @@ -218,8 +218,7 @@ struct eventpoll { struct file *file; /* used to optimize loop detection check */ - struct list_head visited_list_link; - int visited; + u64 gen; #ifdef CONFIG_NET_RX_BUSY_POLL /* used to track busy poll napi_id */ @@ -274,6 +273,8 @@ static long max_user_watches __read_mostly; */ static DEFINE_MUTEX(epmutex); +static u64 loop_check_gen = 0; + /* Used to check for epoll file descriptor inclusion loops */ static struct nested_calls poll_loop_ncalls; @@ -283,9 +284,6 @@ static struct kmem_cache *epi_cache __read_mostly; /* Slab cache used to allocate "struct eppoll_entry" */ static struct kmem_cache *pwq_cache __read_mostly; -/* Visited nodes during ep_loop_check(), so we can unset them when we finish */ -static LIST_HEAD(visited_list); - /* * List of files with newly added links, where we may need to limit the number * of emanating paths. Protected by the epmutex. @@ -1450,7 +1448,7 @@ static int reverse_path_check(void) static int ep_create_wakeup_source(struct epitem *epi) { - const char *name; + struct name_snapshot n; struct wakeup_source *ws; if (!epi->ep->ws) { @@ -1459,8 +1457,9 @@ static int ep_create_wakeup_source(struct epitem *epi) return -ENOMEM; } - name = epi->ffd.file->f_path.dentry->d_name.name; - ws = wakeup_source_register(NULL, name); + take_dentry_name_snapshot(&n, epi->ffd.file->f_path.dentry); + ws = wakeup_source_register(NULL, n.name.name); + release_dentry_name_snapshot(&n); if (!ws) return -ENOMEM; @@ -1522,6 +1521,22 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, RCU_INIT_POINTER(epi->ws, NULL); } + /* Add the current item to the list of active epoll hook for this file */ + spin_lock(&tfile->f_lock); + list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); + spin_unlock(&tfile->f_lock); + + /* + * Add the current item to the RB tree. All RB tree operations are + * protected by "mtx", and ep_insert() is called with "mtx" held. + */ + ep_rbtree_insert(ep, epi); + + /* now check if we've created too many backpaths */ + error = -EINVAL; + if (full_check && reverse_path_check()) + goto error_remove_epi; + /* Initialize the poll table using the queue callback */ epq.epi = epi; init_poll_funcptr(&epq.pt, ep_ptable_queue_proc); @@ -1544,22 +1559,6 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, if (epi->nwait < 0) goto error_unregister; - /* Add the current item to the list of active epoll hook for this file */ - spin_lock(&tfile->f_lock); - list_add_tail_rcu(&epi->fllink, &tfile->f_ep_links); - spin_unlock(&tfile->f_lock); - - /* - * Add the current item to the RB tree. All RB tree operations are - * protected by "mtx", and ep_insert() is called with "mtx" held. - */ - ep_rbtree_insert(ep, epi); - - /* now check if we've created too many backpaths */ - error = -EINVAL; - if (full_check && reverse_path_check()) - goto error_remove_epi; - /* We have to drop the new item inside our item list to keep track of it */ write_lock_irq(&ep->lock); @@ -1588,6 +1587,8 @@ static int ep_insert(struct eventpoll *ep, const struct epoll_event *event, return 0; +error_unregister: + ep_unregister_pollwait(ep, epi); error_remove_epi: spin_lock(&tfile->f_lock); list_del_rcu(&epi->fllink); @@ -1595,9 +1596,6 @@ error_remove_epi: rb_erase_cached(&epi->rbn, &ep->rbr); -error_unregister: - ep_unregister_pollwait(ep, epi); - /* * We need to do this because an event could have been arrived on some * allocated wait queue. Note that we don't care about the ep->ovflist @@ -1972,13 +1970,12 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) struct epitem *epi; mutex_lock_nested(&ep->mtx, call_nests + 1); - ep->visited = 1; - list_add(&ep->visited_list_link, &visited_list); + ep->gen = loop_check_gen; for (rbp = rb_first_cached(&ep->rbr); rbp; rbp = rb_next(rbp)) { epi = rb_entry(rbp, struct epitem, rbn); if (unlikely(is_file_epoll(epi->ffd.file))) { ep_tovisit = epi->ffd.file->private_data; - if (ep_tovisit->visited) + if (ep_tovisit->gen == loop_check_gen) continue; error = ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, epi->ffd.file, @@ -2019,18 +2016,8 @@ static int ep_loop_check_proc(void *priv, void *cookie, int call_nests) */ static int ep_loop_check(struct eventpoll *ep, struct file *file) { - int ret; - struct eventpoll *ep_cur, *ep_next; - - ret = ep_call_nested(&poll_loop_ncalls, + return ep_call_nested(&poll_loop_ncalls, ep_loop_check_proc, file, ep, current); - /* clear visited list */ - list_for_each_entry_safe(ep_cur, ep_next, &visited_list, - visited_list_link) { - ep_cur->visited = 0; - list_del(&ep_cur->visited_list_link); - } - return ret; } static void clear_tfile_check_list(void) @@ -2195,11 +2182,13 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, goto error_tgt_fput; if (op == EPOLL_CTL_ADD) { if (!list_empty(&f.file->f_ep_links) || + ep->gen == loop_check_gen || is_file_epoll(tf.file)) { mutex_unlock(&ep->mtx); error = epoll_mutex_lock(&epmutex, 0, nonblock); if (error) goto error_tgt_fput; + loop_check_gen++; full_check = 1; if (is_file_epoll(tf.file)) { error = -ELOOP; @@ -2263,6 +2252,7 @@ int do_epoll_ctl(int epfd, int op, int fd, struct epoll_event *epds, error_tgt_fput: if (full_check) { clear_tfile_check_list(); + loop_check_gen++; mutex_unlock(&epmutex); } diff --git a/fs/fuse/file.c b/fs/fuse/file.c index 6611ef3269a8..43c165e796da 100644 --- a/fs/fuse/file.c +++ b/fs/fuse/file.c @@ -3091,11 +3091,10 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) ssize_t ret = 0; struct file *file = iocb->ki_filp; struct fuse_file *ff = file->private_data; - bool async_dio = ff->fc->async_dio; loff_t pos = 0; struct inode *inode; loff_t i_size; - size_t count = iov_iter_count(iter); + size_t count = iov_iter_count(iter), shortened = 0; loff_t offset = iocb->ki_pos; struct fuse_io_priv *io; @@ -3103,17 +3102,9 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) inode = file->f_mapping->host; i_size = i_size_read(inode); - if ((iov_iter_rw(iter) == READ) && (offset > i_size)) + if ((iov_iter_rw(iter) == READ) && (offset >= i_size)) return 0; - /* optimization for short read */ - if (async_dio && iov_iter_rw(iter) != WRITE && offset + count > i_size) { - if (offset >= i_size) - return 0; - iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); - count = iov_iter_count(iter); - } - io = kmalloc(sizeof(struct fuse_io_priv), GFP_KERNEL); if (!io) return -ENOMEM; @@ -3129,15 +3120,22 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) * By default, we want to optimize all I/Os with async request * submission to the client filesystem if supported. */ - io->async = async_dio; + io->async = ff->fc->async_dio; io->iocb = iocb; io->blocking = is_sync_kiocb(iocb); + /* optimization for short read */ + if (io->async && !io->write && offset + count > i_size) { + iov_iter_truncate(iter, fuse_round_up(ff->fc, i_size - offset)); + shortened = count - iov_iter_count(iter); + count -= shortened; + } + /* * We cannot asynchronously extend the size of a file. * In such case the aio will behave exactly like sync io. */ - if ((offset + count > i_size) && iov_iter_rw(iter) == WRITE) + if ((offset + count > i_size) && io->write) io->blocking = true; if (io->async && io->blocking) { @@ -3155,6 +3153,7 @@ fuse_direct_IO(struct kiocb *iocb, struct iov_iter *iter) } else { ret = __fuse_direct_read(io, iter, &pos); } + iov_iter_reexpand(iter, iov_iter_count(iter) + shortened); if (io->async) { bool blocking = io->blocking; diff --git a/fs/io_uring.c b/fs/io_uring.c index 3790c7fe9fee..aae0ef2ec34d 100644 --- a/fs/io_uring.c +++ b/fs/io_uring.c @@ -1753,6 +1753,9 @@ static int io_req_task_work_add(struct io_kiocb *req, struct callback_head *cb, struct io_ring_ctx *ctx = req->ctx; int ret, notify; + if (tsk->flags & PF_EXITING) + return -ESRCH; + /* * SQPOLL kernel thread doesn't need notification, just a wakeup. For * all other cases, use TWA_SIGNAL unconditionally to ensure we're @@ -1787,8 +1790,10 @@ static void __io_req_task_cancel(struct io_kiocb *req, int error) static void io_req_task_cancel(struct callback_head *cb) { struct io_kiocb *req = container_of(cb, struct io_kiocb, task_work); + struct io_ring_ctx *ctx = req->ctx; __io_req_task_cancel(req, -ECANCELED); + percpu_ref_put(&ctx->refs); } static void __io_req_task_submit(struct io_kiocb *req) @@ -2010,6 +2015,12 @@ static inline unsigned int io_put_rw_kbuf(struct io_kiocb *req) static inline bool io_run_task_work(void) { + /* + * Not safe to run on exiting task, and the task_work handling will + * not add work to such a task. + */ + if (unlikely(current->flags & PF_EXITING)) + return false; if (current->task_works) { __set_current_state(TASK_RUNNING); task_work_run(); @@ -2283,13 +2294,17 @@ static bool io_resubmit_prep(struct io_kiocb *req, int error) goto end_req; } - ret = io_import_iovec(rw, req, &iovec, &iter, false); - if (ret < 0) - goto end_req; - ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); - if (!ret) + if (!req->io) { + ret = io_import_iovec(rw, req, &iovec, &iter, false); + if (ret < 0) + goto end_req; + ret = io_setup_async_rw(req, iovec, inline_vecs, &iter, false); + if (!ret) + return true; + kfree(iovec); + } else { return true; - kfree(iovec); + } end_req: req_set_fail_links(req); io_req_complete(req, ret); @@ -3034,6 +3049,7 @@ static int io_async_buf_func(struct wait_queue_entry *wait, unsigned mode, if (!wake_page_match(wpq, key)) return 0; + req->rw.kiocb.ki_flags &= ~IOCB_WAITQ; list_del_init(&wait->entry); init_task_work(&req->task_work, io_req_task_submit); @@ -3091,6 +3107,7 @@ static bool io_rw_should_retry(struct io_kiocb *req) wait->wait.flags = 0; INIT_LIST_HEAD(&wait->wait.entry); kiocb->ki_flags |= IOCB_WAITQ; + kiocb->ki_flags &= ~IOCB_NOWAIT; kiocb->ki_waitq = wait; io_get_req_task(req); @@ -3115,6 +3132,7 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, struct iov_iter __iter, *iter = &__iter; ssize_t io_size, ret, ret2; size_t iov_count; + bool no_async; if (req->io) iter = &req->io->rw.iter; @@ -3132,7 +3150,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, kiocb->ki_flags &= ~IOCB_NOWAIT; /* If the file doesn't support async, just async punt */ - if (force_nonblock && !io_file_supports_async(req->file, READ)) + no_async = force_nonblock && !io_file_supports_async(req->file, READ); + if (no_async) goto copy_iov; ret = rw_verify_area(READ, req->file, io_kiocb_ppos(kiocb), iov_count); @@ -3155,10 +3174,8 @@ static int io_read(struct io_kiocb *req, bool force_nonblock, goto done; /* some cases will consume bytes even on error returns */ iov_iter_revert(iter, iov_count - iov_iter_count(iter)); - ret = io_setup_async_rw(req, iovec, inline_vecs, iter, false); - if (ret) - goto out_free; - return -EAGAIN; + ret = 0; + goto copy_iov; } else if (ret < 0) { /* make sure -ERESTARTSYS -> -EINTR is done */ goto done; @@ -3176,6 +3193,8 @@ copy_iov: ret = ret2; goto out_free; } + if (no_async) + return -EAGAIN; /* it's copied and will be cleaned with ->io */ iovec = NULL; /* now use our persistent iterator, if we aren't already */ @@ -3508,8 +3527,6 @@ static int __io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe const char __user *fname; int ret; - if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) - return -EINVAL; if (unlikely(sqe->ioprio || sqe->buf_index)) return -EINVAL; if (unlikely(req->flags & REQ_F_FIXED_FILE)) @@ -3536,6 +3553,8 @@ static int io_openat_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { u64 flags, mode; + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; if (req->flags & REQ_F_NEED_CLEANUP) return 0; mode = READ_ONCE(sqe->len); @@ -3550,6 +3569,8 @@ static int io_openat2_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) size_t len; int ret; + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL|IORING_SETUP_SQPOLL))) + return -EINVAL; if (req->flags & REQ_F_NEED_CLEANUP) return 0; how = u64_to_user_ptr(READ_ONCE(sqe->addr2)); @@ -3767,7 +3788,7 @@ static int io_epoll_ctl_prep(struct io_kiocb *req, #if defined(CONFIG_EPOLL) if (sqe->ioprio || sqe->buf_index) return -EINVAL; - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) return -EINVAL; req->epoll.epfd = READ_ONCE(sqe->fd); @@ -3882,7 +3903,7 @@ static int io_fadvise(struct io_kiocb *req, bool force_nonblock) static int io_statx_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { - if (unlikely(req->ctx->flags & IORING_SETUP_IOPOLL)) + if (unlikely(req->ctx->flags & (IORING_SETUP_IOPOLL | IORING_SETUP_SQPOLL))) return -EINVAL; if (sqe->ioprio || sqe->buf_index) return -EINVAL; @@ -4724,6 +4745,8 @@ static int io_poll_double_wake(struct wait_queue_entry *wait, unsigned mode, if (mask && !(mask & poll->events)) return 0; + list_del_init(&wait->entry); + if (poll && poll->head) { bool done; @@ -5399,6 +5422,8 @@ static int io_async_cancel(struct io_kiocb *req) static int io_files_update_prep(struct io_kiocb *req, const struct io_uring_sqe *sqe) { + if (unlikely(req->ctx->flags & IORING_SETUP_SQPOLL)) + return -EINVAL; if (unlikely(req->flags & (REQ_F_FIXED_FILE | REQ_F_BUFFER_SELECT))) return -EINVAL; if (sqe->ioprio || sqe->rw_flags) @@ -5449,6 +5474,8 @@ static int io_req_defer_prep(struct io_kiocb *req, if (unlikely(ret)) return ret; + io_prep_async_work(req); + switch (req->opcode) { case IORING_OP_NOP: break; @@ -5646,6 +5673,11 @@ static void __io_clean_op(struct io_kiocb *req) io_put_file(req, req->splice.file_in, (req->splice.flags & SPLICE_F_FD_IN_FIXED)); break; + case IORING_OP_OPENAT: + case IORING_OP_OPENAT2: + if (req->open.filename) + putname(req->open.filename); + break; } req->flags &= ~REQ_F_NEED_CLEANUP; } @@ -6323,9 +6355,6 @@ static void io_submit_state_start(struct io_submit_state *state, struct io_ring_ctx *ctx, unsigned int max_ios) { blk_start_plug(&state->plug); -#ifdef CONFIG_BLOCK - state->plug.nowait = true; -#endif state->comp.nr = 0; INIT_LIST_HEAD(&state->comp.list); state->comp.ctx = ctx; @@ -8180,6 +8209,8 @@ static void io_uring_cancel_files(struct io_ring_ctx *ctx, /* cancel this request, or head link requests */ io_attempt_cancel(ctx, cancel_req); io_put_req(cancel_req); + /* cancellations _may_ trigger task work */ + io_run_task_work(); schedule(); finish_wait(&ctx->inflight_wait, &wait); } @@ -8385,11 +8416,19 @@ static int io_uring_show_cred(int id, void *p, void *data) static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) { + bool has_lock; int i; - mutex_lock(&ctx->uring_lock); + /* + * Avoid ABBA deadlock between the seq lock and the io_uring mutex, + * since fdinfo case grabs it in the opposite direction of normal use + * cases. If we fail to get the lock, we just don't iterate any + * structures that could be going away outside the io_uring mutex. + */ + has_lock = mutex_trylock(&ctx->uring_lock); + seq_printf(m, "UserFiles:\t%u\n", ctx->nr_user_files); - for (i = 0; i < ctx->nr_user_files; i++) { + for (i = 0; has_lock && i < ctx->nr_user_files; i++) { struct fixed_file_table *table; struct file *f; @@ -8401,13 +8440,13 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) seq_printf(m, "%5u: <none>\n", i); } seq_printf(m, "UserBufs:\t%u\n", ctx->nr_user_bufs); - for (i = 0; i < ctx->nr_user_bufs; i++) { + for (i = 0; has_lock && i < ctx->nr_user_bufs; i++) { struct io_mapped_ubuf *buf = &ctx->user_bufs[i]; seq_printf(m, "%5u: 0x%llx/%u\n", i, buf->ubuf, (unsigned int) buf->len); } - if (!idr_is_empty(&ctx->personality_idr)) { + if (has_lock && !idr_is_empty(&ctx->personality_idr)) { seq_printf(m, "Personalities:\n"); idr_for_each(&ctx->personality_idr, io_uring_show_cred, m); } @@ -8422,7 +8461,8 @@ static void __io_uring_show_fdinfo(struct io_ring_ctx *ctx, struct seq_file *m) req->task->task_works != NULL); } spin_unlock_irq(&ctx->completion_lock); - mutex_unlock(&ctx->uring_lock); + if (has_lock) + mutex_unlock(&ctx->uring_lock); } static void io_uring_show_fdinfo(struct seq_file *m, struct file *f) diff --git a/fs/nfs/dir.c b/fs/nfs/dir.c index e732580fe47b..cb52db9a0cfb 100644 --- a/fs/nfs/dir.c +++ b/fs/nfs/dir.c @@ -579,6 +579,9 @@ int nfs_readdir_page_filler(nfs_readdir_descriptor_t *desc, struct nfs_entry *en xdr_set_scratch_buffer(&stream, page_address(scratch), PAGE_SIZE); do { + if (entry->label) + entry->label->len = NFS4_MAXLABELLEN; + status = xdr_decode(desc, entry, &stream); if (status != 0) { if (status == -EAGAIN) diff --git a/fs/nfs/flexfilelayout/flexfilelayout.c b/fs/nfs/flexfilelayout/flexfilelayout.c index ff8965d1a4d4..a163533446fa 100644 --- a/fs/nfs/flexfilelayout/flexfilelayout.c +++ b/fs/nfs/flexfilelayout/flexfilelayout.c @@ -715,7 +715,7 @@ nfs4_ff_layout_stat_io_end_write(struct rpc_task *task, } static void -ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) +ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, u32 idx) { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); @@ -724,7 +724,7 @@ ff_layout_mark_ds_unreachable(struct pnfs_layout_segment *lseg, int idx) } static void -ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx) +ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, u32 idx) { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); @@ -734,14 +734,14 @@ ff_layout_mark_ds_reachable(struct pnfs_layout_segment *lseg, int idx) static struct nfs4_pnfs_ds * ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, - int start_idx, int *best_idx, + u32 start_idx, u32 *best_idx, bool check_device) { struct nfs4_ff_layout_segment *fls = FF_LAYOUT_LSEG(lseg); struct nfs4_ff_layout_mirror *mirror; struct nfs4_pnfs_ds *ds; bool fail_return = false; - int idx; + u32 idx; /* mirrors are initially sorted by efficiency */ for (idx = start_idx; idx < fls->mirror_array_cnt; idx++) { @@ -766,21 +766,21 @@ ff_layout_choose_ds_for_read(struct pnfs_layout_segment *lseg, static struct nfs4_pnfs_ds * ff_layout_choose_any_ds_for_read(struct pnfs_layout_segment *lseg, - int start_idx, int *best_idx) + u32 start_idx, u32 *best_idx) { return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, false); } static struct nfs4_pnfs_ds * ff_layout_choose_valid_ds_for_read(struct pnfs_layout_segment *lseg, - int start_idx, int *best_idx) + u32 start_idx, u32 *best_idx) { return ff_layout_choose_ds_for_read(lseg, start_idx, best_idx, true); } static struct nfs4_pnfs_ds * ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, - int start_idx, int *best_idx) + u32 start_idx, u32 *best_idx) { struct nfs4_pnfs_ds *ds; @@ -791,7 +791,8 @@ ff_layout_choose_best_ds_for_read(struct pnfs_layout_segment *lseg, } static struct nfs4_pnfs_ds * -ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, int *best_idx) +ff_layout_get_ds_for_read(struct nfs_pageio_descriptor *pgio, + u32 *best_idx) { struct pnfs_layout_segment *lseg = pgio->pg_lseg; struct nfs4_pnfs_ds *ds; @@ -837,7 +838,7 @@ ff_layout_pg_init_read(struct nfs_pageio_descriptor *pgio, struct nfs_pgio_mirror *pgm; struct nfs4_ff_layout_mirror *mirror; struct nfs4_pnfs_ds *ds; - int ds_idx; + u32 ds_idx, i; retry: ff_layout_pg_check_layout(pgio, req); @@ -863,14 +864,14 @@ retry: goto retry; } - mirror = FF_LAYOUT_COMP(pgio->pg_lseg, ds_idx); + for (i = 0; i < pgio->pg_mirror_count; i++) { + mirror = FF_LAYOUT_COMP(pgio->pg_lseg, i); + pgm = &pgio->pg_mirrors[i]; + pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; + } pgio->pg_mirror_idx = ds_idx; - /* read always uses only one mirror - idx 0 for pgio layer */ - pgm = &pgio->pg_mirrors[0]; - pgm->pg_bsize = mirror->mirror_ds->ds_versions[0].rsize; - if (NFS_SERVER(pgio->pg_inode)->flags & (NFS_MOUNT_SOFT|NFS_MOUNT_SOFTERR)) pgio->pg_maxretrans = io_maxretrans; @@ -894,7 +895,7 @@ ff_layout_pg_init_write(struct nfs_pageio_descriptor *pgio, struct nfs4_ff_layout_mirror *mirror; struct nfs_pgio_mirror *pgm; struct nfs4_pnfs_ds *ds; - int i; + u32 i; retry: ff_layout_pg_check_layout(pgio, req); @@ -1038,7 +1039,7 @@ static void ff_layout_reset_write(struct nfs_pgio_header *hdr, bool retry_pnfs) static void ff_layout_resend_pnfs_read(struct nfs_pgio_header *hdr) { u32 idx = hdr->pgio_mirror_idx + 1; - int new_idx = 0; + u32 new_idx = 0; if (ff_layout_choose_any_ds_for_read(hdr->lseg, idx + 1, &new_idx)) ff_layout_send_layouterror(hdr->lseg); @@ -1075,7 +1076,7 @@ static int ff_layout_async_handle_error_v4(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, - int idx) + u32 idx) { struct pnfs_layout_hdr *lo = lseg->pls_layout; struct inode *inode = lo->plh_inode; @@ -1149,7 +1150,7 @@ reset: /* Retry all errors through either pNFS or MDS except for -EJUKEBOX */ static int ff_layout_async_handle_error_v3(struct rpc_task *task, struct pnfs_layout_segment *lseg, - int idx) + u32 idx) { struct nfs4_deviceid_node *devid = FF_LAYOUT_DEVID_NODE(lseg, idx); @@ -1184,7 +1185,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task, struct nfs4_state *state, struct nfs_client *clp, struct pnfs_layout_segment *lseg, - int idx) + u32 idx) { int vers = clp->cl_nfs_mod->rpc_vers->number; @@ -1211,7 +1212,7 @@ static int ff_layout_async_handle_error(struct rpc_task *task, } static void ff_layout_io_track_ds_error(struct pnfs_layout_segment *lseg, - int idx, u64 offset, u64 length, + u32 idx, u64 offset, u64 length, u32 *op_status, int opnum, int error) { struct nfs4_ff_layout_mirror *mirror; @@ -1809,7 +1810,7 @@ ff_layout_write_pagelist(struct nfs_pgio_header *hdr, int sync) loff_t offset = hdr->args.offset; int vers; struct nfs_fh *fh; - int idx = hdr->pgio_mirror_idx; + u32 idx = hdr->pgio_mirror_idx; mirror = FF_LAYOUT_COMP(lseg, idx); ds = nfs4_ff_layout_prepare_ds(lseg, mirror, true); diff --git a/fs/nfs/nfs42proc.c b/fs/nfs/nfs42proc.c index 142225f0af59..2b2211d1234e 100644 --- a/fs/nfs/nfs42proc.c +++ b/fs/nfs/nfs42proc.c @@ -356,7 +356,15 @@ static ssize_t _nfs42_proc_copy(struct file *src, truncate_pagecache_range(dst_inode, pos_dst, pos_dst + res->write_res.count); - + spin_lock(&dst_inode->i_lock); + NFS_I(dst_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE | + NFS_INO_REVAL_FORCED | NFS_INO_INVALID_SIZE | + NFS_INO_INVALID_ATTR | NFS_INO_INVALID_DATA); + spin_unlock(&dst_inode->i_lock); + spin_lock(&src_inode->i_lock); + NFS_I(src_inode)->cache_validity |= (NFS_INO_REVAL_PAGECACHE | + NFS_INO_REVAL_FORCED | NFS_INO_INVALID_ATIME); + spin_unlock(&src_inode->i_lock); status = res->write_res.count; out: if (args->sync) diff --git a/fs/pipe.c b/fs/pipe.c index 60dbee457143..117db82b10af 100644 --- a/fs/pipe.c +++ b/fs/pipe.c @@ -106,25 +106,6 @@ void pipe_double_lock(struct pipe_inode_info *pipe1, } } -/* Drop the inode semaphore and wait for a pipe event, atomically */ -void pipe_wait(struct pipe_inode_info *pipe) -{ - DEFINE_WAIT(rdwait); - DEFINE_WAIT(wrwait); - - /* - * Pipes are system-local resources, so sleeping on them - * is considered a noninteractive wait: - */ - prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); - prepare_to_wait(&pipe->wr_wait, &wrwait, TASK_INTERRUPTIBLE); - pipe_unlock(pipe); - schedule(); - finish_wait(&pipe->rd_wait, &rdwait); - finish_wait(&pipe->wr_wait, &wrwait); - pipe_lock(pipe); -} - static void anon_pipe_buf_release(struct pipe_inode_info *pipe, struct pipe_buffer *buf) { @@ -1035,12 +1016,52 @@ SYSCALL_DEFINE1(pipe, int __user *, fildes) return do_pipe2(fildes, 0); } +/* + * This is the stupid "wait for pipe to be readable or writable" + * model. + * + * See pipe_read/write() for the proper kind of exclusive wait, + * but that requires that we wake up any other readers/writers + * if we then do not end up reading everything (ie the whole + * "wake_next_reader/writer" logic in pipe_read/write()). + */ +void pipe_wait_readable(struct pipe_inode_info *pipe) +{ + pipe_unlock(pipe); + wait_event_interruptible(pipe->rd_wait, pipe_readable(pipe)); + pipe_lock(pipe); +} + +void pipe_wait_writable(struct pipe_inode_info *pipe) +{ + pipe_unlock(pipe); + wait_event_interruptible(pipe->wr_wait, pipe_writable(pipe)); + pipe_lock(pipe); +} + +/* + * This depends on both the wait (here) and the wakeup (wake_up_partner) + * holding the pipe lock, so "*cnt" is stable and we know a wakeup cannot + * race with the count check and waitqueue prep. + * + * Normally in order to avoid races, you'd do the prepare_to_wait() first, + * then check the condition you're waiting for, and only then sleep. But + * because of the pipe lock, we can check the condition before being on + * the wait queue. + * + * We use the 'rd_wait' waitqueue for pipe partner waiting. + */ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) { + DEFINE_WAIT(rdwait); int cur = *cnt; while (cur == *cnt) { - pipe_wait(pipe); + prepare_to_wait(&pipe->rd_wait, &rdwait, TASK_INTERRUPTIBLE); + pipe_unlock(pipe); + schedule(); + finish_wait(&pipe->rd_wait, &rdwait); + pipe_lock(pipe); if (signal_pending(current)) break; } @@ -1050,7 +1071,6 @@ static int wait_for_partner(struct pipe_inode_info *pipe, unsigned int *cnt) static void wake_up_partner(struct pipe_inode_info *pipe) { wake_up_interruptible_all(&pipe->rd_wait); - wake_up_interruptible_all(&pipe->wr_wait); } static int fifo_open(struct inode *inode, struct file *filp) diff --git a/fs/read_write.c b/fs/read_write.c index 5db58b8c78d0..d3428189f36b 100644 --- a/fs/read_write.c +++ b/fs/read_write.c @@ -538,6 +538,14 @@ ssize_t __kernel_write(struct file *file, const void *buf, size_t count, loff_t inc_syscw(current); return ret; } +/* + * This "EXPORT_SYMBOL_GPL()" is more of a "EXPORT_SYMBOL_DONTUSE()", + * but autofs is one of the few internal kernel users that actually + * wants this _and_ can be built as a module. So we need to export + * this symbol for autofs, even though it really isn't appropriate + * for any other kernel modules. + */ +EXPORT_SYMBOL_GPL(__kernel_write); ssize_t kernel_write(struct file *file, const void *buf, size_t count, loff_t *pos) diff --git a/fs/splice.c b/fs/splice.c index d7c8a7c4db07..c3d00dfc7344 100644 --- a/fs/splice.c +++ b/fs/splice.c @@ -563,7 +563,7 @@ static int splice_from_pipe_next(struct pipe_inode_info *pipe, struct splice_des sd->need_wakeup = false; } - pipe_wait(pipe); + pipe_wait_readable(pipe); } return 1; @@ -1077,7 +1077,7 @@ static int wait_for_space(struct pipe_inode_info *pipe, unsigned flags) return -EAGAIN; if (signal_pending(current)) return -ERESTARTSYS; - pipe_wait(pipe); + pipe_wait_writable(pipe); } } @@ -1454,7 +1454,7 @@ static int ipipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ret = -EAGAIN; break; } - pipe_wait(pipe); + pipe_wait_readable(pipe); } pipe_unlock(pipe); @@ -1493,7 +1493,7 @@ static int opipe_prep(struct pipe_inode_info *pipe, unsigned int flags) ret = -ERESTARTSYS; break; } - pipe_wait(pipe); + pipe_wait_writable(pipe); } pipe_unlock(pipe); diff --git a/fs/vboxsf/super.c b/fs/vboxsf/super.c index 8fe03b4a0d2b..25aade344192 100644 --- a/fs/vboxsf/super.c +++ b/fs/vboxsf/super.c @@ -384,7 +384,7 @@ fail_nomem: static int vboxsf_parse_monolithic(struct fs_context *fc, void *data) { - char *options = data; + unsigned char *options = data; if (options && options[0] == VBSF_MOUNT_SIGNATURE_BYTE_0 && options[1] == VBSF_MOUNT_SIGNATURE_BYTE_1 && diff --git a/include/dt-bindings/phy/phy-cadence-torrent.h b/include/dt-bindings/phy/phy-cadence-torrent.h new file mode 100644 index 000000000000..e387b6a95741 --- /dev/null +++ b/include/dt-bindings/phy/phy-cadence-torrent.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * This header provides constants for Cadence Torrent SERDES. + */ + +#ifndef _DT_BINDINGS_TORRENT_SERDES_H +#define _DT_BINDINGS_TORRENT_SERDES_H + +#define TORRENT_SERDES_NO_SSC 0 +#define TORRENT_SERDES_EXTERNAL_SSC 1 +#define TORRENT_SERDES_INTERNAL_SSC 2 + +#endif /* _DT_BINDINGS_TORRENT_SERDES_H */ diff --git a/include/dt-bindings/phy/phy.h b/include/dt-bindings/phy/phy.h index 36e8c241cf48..887a31b250a8 100644 --- a/include/dt-bindings/phy/phy.h +++ b/include/dt-bindings/phy/phy.h @@ -19,5 +19,6 @@ #define PHY_TYPE_DP 6 #define PHY_TYPE_XPCS 7 #define PHY_TYPE_SGMII 8 +#define PHY_TYPE_QSGMII 9 #endif /* _DT_BINDINGS_PHY */ diff --git a/include/dt-bindings/reset/raspberrypi,firmware-reset.h b/include/dt-bindings/reset/raspberrypi,firmware-reset.h new file mode 100644 index 000000000000..1a4f4c792723 --- /dev/null +++ b/include/dt-bindings/reset/raspberrypi,firmware-reset.h @@ -0,0 +1,13 @@ +/* SPDX-License-Identifier: GPL-2.0 */ +/* + * Copyright (c) 2020 Nicolas Saenz Julienne + * Author: Nicolas Saenz Julienne <nsaenzjulienne@suse.com> + */ + +#ifndef _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H +#define _DT_BINDINGS_RASPBERRYPI_FIRMWARE_RESET_H + +#define RASPBERRYPI_FIRMWARE_RESET_ID_USB 0 +#define RASPBERRYPI_FIRMWARE_RESET_NUM_IDS 1 + +#endif diff --git a/include/linux/acpi.h b/include/linux/acpi.h index 1e4cdc6c7ae2..64ae25c59d55 100644 --- a/include/linux/acpi.h +++ b/include/linux/acpi.h @@ -958,7 +958,7 @@ void acpi_os_set_prepare_extended_sleep(int (*func)(u8 sleep_state, acpi_status acpi_os_prepare_extended_sleep(u8 sleep_state, u32 val_a, u32 val_b); -#ifdef CONFIG_X86 +#ifndef CONFIG_IA64 void arch_reserve_mem_area(acpi_physical_address addr, size_t size); #else static inline void arch_reserve_mem_area(acpi_physical_address addr, diff --git a/include/linux/blk_types.h b/include/linux/blk_types.h index 4ecf4fed171f..b3fc5d3dd8ea 100644 --- a/include/linux/blk_types.h +++ b/include/linux/blk_types.h @@ -497,13 +497,12 @@ static inline int op_stat_group(unsigned int op) typedef unsigned int blk_qc_t; #define BLK_QC_T_NONE -1U -#define BLK_QC_T_EAGAIN -2U #define BLK_QC_T_SHIFT 16 #define BLK_QC_T_INTERNAL (1U << 31) static inline bool blk_qc_t_valid(blk_qc_t cookie) { - return cookie != BLK_QC_T_NONE && cookie != BLK_QC_T_EAGAIN; + return cookie != BLK_QC_T_NONE; } static inline unsigned int blk_qc_t_to_queue_num(blk_qc_t cookie) diff --git a/include/linux/blkdev.h b/include/linux/blkdev.h index bb5636cc17b9..868e11face00 100644 --- a/include/linux/blkdev.h +++ b/include/linux/blkdev.h @@ -352,6 +352,8 @@ struct queue_limits { typedef int (*report_zones_cb)(struct blk_zone *zone, unsigned int idx, void *data); +void blk_queue_set_zoned(struct gendisk *disk, enum blk_zoned_model model); + #ifdef CONFIG_BLK_DEV_ZONED #define BLK_ALL_ZONES ((unsigned int)-1) diff --git a/include/linux/dax.h b/include/linux/dax.h index 497031392e0a..43b39ab9de1a 100644 --- a/include/linux/dax.h +++ b/include/linux/dax.h @@ -58,6 +58,8 @@ static inline void set_dax_synchronous(struct dax_device *dax_dev) { __set_dax_synchronous(dax_dev); } +bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, + int blocksize, sector_t start, sector_t len); /* * Check if given mapping is supported by the file / underlying device. */ @@ -104,6 +106,12 @@ static inline bool dax_synchronous(struct dax_device *dax_dev) static inline void set_dax_synchronous(struct dax_device *dax_dev) { } +static inline bool dax_supported(struct dax_device *dax_dev, + struct block_device *bdev, int blocksize, sector_t start, + sector_t len) +{ + return false; +} static inline bool daxdev_mapping_supported(struct vm_area_struct *vma, struct dax_device *dax_dev) { @@ -130,8 +138,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev, return __generic_fsdax_supported(dax_dev, bdev, blocksize, start, sectors); } -bool dax_supported(struct dax_device *dax_dev, struct block_device *bdev, - int blocksize, sector_t start, sector_t len); static inline void fs_put_dax(struct dax_device *dax_dev) { @@ -159,13 +165,6 @@ static inline bool generic_fsdax_supported(struct dax_device *dax_dev, return false; } -static inline bool dax_supported(struct dax_device *dax_dev, - struct block_device *bdev, int blocksize, sector_t start, - sector_t len) -{ - return false; -} - static inline void fs_put_dax(struct dax_device *dax_dev) { } diff --git a/include/linux/fs_parser.h b/include/linux/fs_parser.h index 2eab6d5f6736..aab0ffc6bac6 100644 --- a/include/linux/fs_parser.h +++ b/include/linux/fs_parser.h @@ -120,7 +120,7 @@ static inline bool fs_validate_description(const char *name, #define fsparam_u32oct(NAME, OPT) \ __fsparam(fs_param_is_u32, NAME, OPT, 0, (void *)8) #define fsparam_u32hex(NAME, OPT) \ - __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *16)) + __fsparam(fs_param_is_u32_hex, NAME, OPT, 0, (void *)16) #define fsparam_s32(NAME, OPT) __fsparam(fs_param_is_s32, NAME, OPT, 0, NULL) #define fsparam_u64(NAME, OPT) __fsparam(fs_param_is_u64, NAME, OPT, 0, NULL) #define fsparam_enum(NAME, OPT, array) __fsparam(fs_param_is_enum, NAME, OPT, 0, array) diff --git a/include/linux/iopoll.h b/include/linux/iopoll.h index bc89ac625f26..2c8860e406bd 100644 --- a/include/linux/iopoll.h +++ b/include/linux/iopoll.h @@ -60,8 +60,7 @@ /** * read_poll_timeout_atomic - Periodically poll an address until a condition is * met or a timeout occurs - * @op: accessor function (takes @addr as its only argument) - * @addr: Address to poll + * @op: accessor function (takes @args as its arguments) * @val: Variable to read the value into * @cond: Break condition (usually involving @val) * @delay_us: Time to udelay between reads in us (0 tight-loops). Should @@ -69,6 +68,7 @@ * Documentation/timers/timers-howto.rst). * @timeout_us: Timeout in us, 0 means never timeout * @delay_before_read: if it is true, delay @delay_us before read. + * @args: arguments for @op poll * * Returns 0 on success and -ETIMEDOUT upon a timeout. In either * case, the last read value at @args is stored in @val. diff --git a/include/linux/kprobes.h b/include/linux/kprobes.h index 9be1bff4f586..8aab327b5539 100644 --- a/include/linux/kprobes.h +++ b/include/linux/kprobes.h @@ -373,6 +373,8 @@ void unregister_kretprobes(struct kretprobe **rps, int num); void kprobe_flush_task(struct task_struct *tk); void recycle_rp_inst(struct kretprobe_instance *ri, struct hlist_head *head); +void kprobe_free_init_mem(void); + int disable_kprobe(struct kprobe *kp); int enable_kprobe(struct kprobe *kp); @@ -435,6 +437,9 @@ static inline void unregister_kretprobes(struct kretprobe **rps, int num) static inline void kprobe_flush_task(struct task_struct *tk) { } +static inline void kprobe_free_init_mem(void) +{ +} static inline int disable_kprobe(struct kprobe *kp) { return -ENOSYS; diff --git a/include/linux/memstick.h b/include/linux/memstick.h index da4c65f9435f..ebf73d4ee969 100644 --- a/include/linux/memstick.h +++ b/include/linux/memstick.h @@ -281,6 +281,7 @@ struct memstick_host { struct memstick_dev *card; unsigned int retries; + bool removing; /* Notify the host that some requests are pending. */ void (*request)(struct memstick_host *host); diff --git a/include/linux/mm.h b/include/linux/mm.h index b2f370f0b420..16b799a0522c 100644 --- a/include/linux/mm.h +++ b/include/linux/mm.h @@ -1646,7 +1646,7 @@ struct mmu_notifier_range; void free_pgd_range(struct mmu_gather *tlb, unsigned long addr, unsigned long end, unsigned long floor, unsigned long ceiling); int copy_page_range(struct mm_struct *dst, struct mm_struct *src, - struct vm_area_struct *vma); + struct vm_area_struct *vma, struct vm_area_struct *new); int follow_pte_pmd(struct mm_struct *mm, unsigned long address, struct mmu_notifier_range *range, pte_t **ptepp, pmd_t **pmdpp, spinlock_t **ptlp); @@ -2416,7 +2416,7 @@ extern int __meminit __early_pfn_to_nid(unsigned long pfn, extern void set_dma_reserve(unsigned long new_dma_reserve); extern void memmap_init_zone(unsigned long, int, unsigned long, unsigned long, - enum memmap_context, struct vmem_altmap *); + enum meminit_context, struct vmem_altmap *); extern void setup_per_zone_wmarks(void); extern int __meminit init_per_zone_wmark_min(void); extern void mem_init(void); diff --git a/include/linux/mm_types.h b/include/linux/mm_types.h index 496c3ff97cce..ed028af3cb19 100644 --- a/include/linux/mm_types.h +++ b/include/linux/mm_types.h @@ -436,6 +436,16 @@ struct mm_struct { */ atomic_t mm_count; + /** + * @has_pinned: Whether this mm has pinned any pages. This can + * be either replaced in the future by @pinned_vm when it + * becomes stable, or grow into a counter on its own. We're + * aggresive on this bit now - even if the pinned pages were + * unpinned later on, we'll still keep this bit set for the + * lifecycle of this mm just for simplicity. + */ + atomic_t has_pinned; + #ifdef CONFIG_MMU atomic_long_t pgtables_bytes; /* PTE page table pages */ #endif diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h index 8379432f4f2f..0f7a4ff4b059 100644 --- a/include/linux/mmzone.h +++ b/include/linux/mmzone.h @@ -824,10 +824,15 @@ bool zone_watermark_ok(struct zone *z, unsigned int order, unsigned int alloc_flags); bool zone_watermark_ok_safe(struct zone *z, unsigned int order, unsigned long mark, int highest_zoneidx); -enum memmap_context { - MEMMAP_EARLY, - MEMMAP_HOTPLUG, +/* + * Memory initialization context, use to differentiate memory added by + * the platform statically or via memory hotplug interface. + */ +enum meminit_context { + MEMINIT_EARLY, + MEMINIT_HOTPLUG, }; + extern void init_currently_empty_zone(struct zone *zone, unsigned long start_pfn, unsigned long size); diff --git a/include/linux/netdev_features.h b/include/linux/netdev_features.h index 2cc3cf80b49a..0b17c4322b09 100644 --- a/include/linux/netdev_features.h +++ b/include/linux/netdev_features.h @@ -193,7 +193,7 @@ static inline int find_next_netdev_feature(u64 feature, unsigned long start) #define NETIF_F_GSO_MASK (__NETIF_F_BIT(NETIF_F_GSO_LAST + 1) - \ __NETIF_F_BIT(NETIF_F_GSO_SHIFT)) -/* List of IP checksum features. Note that NETIF_F_ HW_CSUM should not be +/* List of IP checksum features. Note that NETIF_F_HW_CSUM should not be * set in features when NETIF_F_IP_CSUM or NETIF_F_IPV6_CSUM are set-- * this would be contradictory */ diff --git a/include/linux/netdevice.h b/include/linux/netdevice.h index b0e303f6603f..7bd4fcdd0738 100644 --- a/include/linux/netdevice.h +++ b/include/linux/netdevice.h @@ -1784,6 +1784,7 @@ enum netdev_priv_flags { * the watchdog (see dev_watchdog()) * @watchdog_timer: List of timers * + * @proto_down_reason: reason a netdev interface is held down * @pcpu_refcnt: Number of references to this device * @todo_list: Delayed register/unregister * @link_watch_list: XXX: need comments on this one @@ -1848,6 +1849,7 @@ enum netdev_priv_flags { * @udp_tunnel_nic_info: static structure describing the UDP tunnel * offload capabilities of the device * @udp_tunnel_nic: UDP tunnel offload state + * @xdp_state: stores info on attached XDP BPF programs * * FIXME: cleanup struct net_device such that network protocol info * moves out. diff --git a/include/linux/nfs_xdr.h b/include/linux/nfs_xdr.h index 9408f3252c8e..69cb46f7b8d2 100644 --- a/include/linux/nfs_xdr.h +++ b/include/linux/nfs_xdr.h @@ -1611,8 +1611,8 @@ struct nfs_pgio_header { __u64 mds_offset; /* Filelayout dense stripe */ struct nfs_page_array page_array; struct nfs_client *ds_clp; /* pNFS data server */ - int ds_commit_idx; /* ds index if ds_clp is set */ - int pgio_mirror_idx;/* mirror index in pgio layer */ + u32 ds_commit_idx; /* ds index if ds_clp is set */ + u32 pgio_mirror_idx;/* mirror index in pgio layer */ }; struct nfs_mds_commit_info { diff --git a/include/linux/node.h b/include/linux/node.h index 4866f32a02d8..014ba3ab2efd 100644 --- a/include/linux/node.h +++ b/include/linux/node.h @@ -99,11 +99,13 @@ extern struct node *node_devices[]; typedef void (*node_registration_func_t)(struct node *); #if defined(CONFIG_MEMORY_HOTPLUG_SPARSE) && defined(CONFIG_NUMA) -extern int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn); +int link_mem_sections(int nid, unsigned long start_pfn, + unsigned long end_pfn, + enum meminit_context context); #else static inline int link_mem_sections(int nid, unsigned long start_pfn, - unsigned long end_pfn) + unsigned long end_pfn, + enum meminit_context context) { return 0; } @@ -128,7 +130,8 @@ static inline int register_one_node(int nid) if (error) return error; /* link memory sections under this node */ - error = link_mem_sections(nid, start_pfn, end_pfn); + error = link_mem_sections(nid, start_pfn, end_pfn, + MEMINIT_EARLY); } return error; diff --git a/include/linux/pgtable.h b/include/linux/pgtable.h index e8cbc2e795d5..90654cb63e9e 100644 --- a/include/linux/pgtable.h +++ b/include/linux/pgtable.h @@ -1427,6 +1427,16 @@ typedef unsigned int pgtbl_mod_mask; #define mm_pmd_folded(mm) __is_defined(__PAGETABLE_PMD_FOLDED) #endif +#ifndef p4d_offset_lockless +#define p4d_offset_lockless(pgdp, pgd, address) p4d_offset(&(pgd), address) +#endif +#ifndef pud_offset_lockless +#define pud_offset_lockless(p4dp, p4d, address) pud_offset(&(p4d), address) +#endif +#ifndef pmd_offset_lockless +#define pmd_offset_lockless(pudp, pud, address) pmd_offset(&(pud), address) +#endif + /* * p?d_leaf() - true if this entry is a final mapping to a physical address. * This differs from p?d_huge() by the fact that they are always available (if diff --git a/include/linux/phy/phy.h b/include/linux/phy/phy.h index bcee8eba62b3..e435bdb0bab3 100644 --- a/include/linux/phy/phy.h +++ b/include/linux/phy/phy.h @@ -115,10 +115,12 @@ struct phy_ops { /** * struct phy_attrs - represents phy attributes * @bus_width: Data path width implemented by PHY + * @max_link_rate: Maximum link rate supported by PHY (in Mbps) * @mode: PHY mode */ struct phy_attrs { u32 bus_width; + u32 max_link_rate; enum phy_mode mode; }; diff --git a/include/linux/pipe_fs_i.h b/include/linux/pipe_fs_i.h index 50afd0d0084c..5d2705f1d01c 100644 --- a/include/linux/pipe_fs_i.h +++ b/include/linux/pipe_fs_i.h @@ -240,8 +240,9 @@ extern unsigned int pipe_max_size; extern unsigned long pipe_user_pages_hard; extern unsigned long pipe_user_pages_soft; -/* Drop the inode semaphore and wait for a pipe event, atomically */ -void pipe_wait(struct pipe_inode_info *pipe); +/* Wait for a pipe to be readable/writable while dropping the pipe lock */ +void pipe_wait_readable(struct pipe_inode_info *); +void pipe_wait_writable(struct pipe_inode_info *); struct pipe_inode_info *alloc_pipe_info(void); void free_pipe_info(struct pipe_inode_info *); diff --git a/include/linux/qed/qed_if.h b/include/linux/qed/qed_if.h index cd6a5c7e56eb..cdd73afc4c46 100644 --- a/include/linux/qed/qed_if.h +++ b/include/linux/qed/qed_if.h @@ -623,6 +623,7 @@ struct qed_dev_info { #define QED_MFW_VERSION_3_OFFSET 24 u32 flash_size; + bool b_arfs_capable; bool b_inter_pf_switch; bool tx_switching; bool rdma_supported; diff --git a/include/linux/skbuff.h b/include/linux/skbuff.h index ed9bea924dc3..04a18e01b362 100644 --- a/include/linux/skbuff.h +++ b/include/linux/skbuff.h @@ -3223,8 +3223,9 @@ static inline int skb_padto(struct sk_buff *skb, unsigned int len) * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error if @free_on_error is true. */ -static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, - bool free_on_error) +static inline int __must_check __skb_put_padto(struct sk_buff *skb, + unsigned int len, + bool free_on_error) { unsigned int size = skb->len; @@ -3247,7 +3248,7 @@ static inline int __skb_put_padto(struct sk_buff *skb, unsigned int len, * is untouched. Otherwise it is extended. Returns zero on * success. The skb is freed on error. */ -static inline int skb_put_padto(struct sk_buff *skb, unsigned int len) +static inline int __must_check skb_put_padto(struct sk_buff *skb, unsigned int len) { return __skb_put_padto(skb, len, true); } diff --git a/include/linux/usb.h b/include/linux/usb.h index 20c555db4621..7d72c4e0713c 100644 --- a/include/linux/usb.h +++ b/include/linux/usb.h @@ -1764,6 +1764,7 @@ static inline int usb_urb_dir_out(struct urb *urb) return (urb->transfer_flags & URB_DIR_MASK) == URB_DIR_OUT; } +int usb_pipe_type_check(struct usb_device *dev, unsigned int pipe); int usb_urb_ep_type_check(const struct urb *urb); void *usb_alloc_coherent(struct usb_device *dev, size_t size, @@ -1801,6 +1802,14 @@ extern int usb_bulk_msg(struct usb_device *usb_dev, unsigned int pipe, int timeout); /* wrappers around usb_control_msg() for the most common standard requests */ +int usb_control_msg_send(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + const void *data, __u16 size, int timeout, + gfp_t memflags); +int usb_control_msg_recv(struct usb_device *dev, __u8 endpoint, __u8 request, + __u8 requesttype, __u16 value, __u16 index, + void *data, __u16 size, int timeout, + gfp_t memflags); extern int usb_get_descriptor(struct usb_device *dev, unsigned char desctype, unsigned char descindex, void *buf, int size); extern int usb_get_status(struct usb_device *dev, diff --git a/include/linux/usb/gadget.h b/include/linux/usb/gadget.h index 52ce1f6b8f83..e7351d64f11f 100644 --- a/include/linux/usb/gadget.h +++ b/include/linux/usb/gadget.h @@ -436,6 +436,7 @@ struct usb_gadget { }; #define work_to_gadget(w) (container_of((w), struct usb_gadget, work)) +/* Interface to the device model */ static inline void set_gadget_data(struct usb_gadget *gadget, void *data) { dev_set_drvdata(&gadget->dev, data); } static inline void *get_gadget_data(struct usb_gadget *gadget) @@ -444,6 +445,26 @@ static inline struct usb_gadget *dev_to_usb_gadget(struct device *dev) { return container_of(dev, struct usb_gadget, dev); } +static inline struct usb_gadget *usb_get_gadget(struct usb_gadget *gadget) +{ + get_device(&gadget->dev); + return gadget; +} +static inline void usb_put_gadget(struct usb_gadget *gadget) +{ + put_device(&gadget->dev); +} +extern void usb_initialize_gadget(struct device *parent, + struct usb_gadget *gadget, void (*release)(struct device *dev)); +extern int usb_add_gadget(struct usb_gadget *gadget); +extern void usb_del_gadget(struct usb_gadget *gadget); + +/* Legacy device-model interface */ +extern int usb_add_gadget_udc_release(struct device *parent, + struct usb_gadget *gadget, void (*release)(struct device *dev)); +extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); +extern void usb_del_gadget_udc(struct usb_gadget *gadget); +extern char *usb_get_gadget_udc_name(void); /* iterates the non-control endpoints; 'tmp' is a struct usb_ep pointer */ #define gadget_for_each_ep(tmp, gadget) \ @@ -735,12 +756,6 @@ int usb_gadget_probe_driver(struct usb_gadget_driver *driver); */ int usb_gadget_unregister_driver(struct usb_gadget_driver *driver); -extern int usb_add_gadget_udc_release(struct device *parent, - struct usb_gadget *gadget, void (*release)(struct device *dev)); -extern int usb_add_gadget_udc(struct device *parent, struct usb_gadget *gadget); -extern void usb_del_gadget_udc(struct usb_gadget *gadget); -extern char *usb_get_gadget_udc_name(void); - /*-------------------------------------------------------------------------*/ /* utility to simplify dealing with string descriptors */ diff --git a/include/linux/usb/pd.h b/include/linux/usb/pd.h index b6c233e79bd4..3a805e2ecbc9 100644 --- a/include/linux/usb/pd.h +++ b/include/linux/usb/pd.h @@ -219,14 +219,16 @@ enum pd_pdo_type { #define PDO_CURR_MASK 0x3ff #define PDO_PWR_MASK 0x3ff -#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */ -#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */ -#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */ -#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */ -#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */ -#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */ -#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ -#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ +#define PDO_FIXED_DUAL_ROLE BIT(29) /* Power role swap supported */ +#define PDO_FIXED_SUSPEND BIT(28) /* USB Suspend supported (Source) */ +#define PDO_FIXED_HIGHER_CAP BIT(28) /* Requires more than vSafe5V (Sink) */ +#define PDO_FIXED_EXTPOWER BIT(27) /* Externally powered */ +#define PDO_FIXED_USB_COMM BIT(26) /* USB communications capable */ +#define PDO_FIXED_DATA_SWAP BIT(25) /* Data role swap supported */ +#define PDO_FIXED_FRS_CURR_MASK (BIT(24) | BIT(23)) /* FR_Swap Current (Sink) */ +#define PDO_FIXED_FRS_CURR_SHIFT 23 +#define PDO_FIXED_VOLT_SHIFT 10 /* 50mV units */ +#define PDO_FIXED_CURR_SHIFT 0 /* 10mA units */ #define PDO_FIXED_VOLT(mv) ((((mv) / 50) & PDO_VOLT_MASK) << PDO_FIXED_VOLT_SHIFT) #define PDO_FIXED_CURR(ma) ((((ma) / 10) & PDO_CURR_MASK) << PDO_FIXED_CURR_SHIFT) @@ -454,6 +456,7 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_DB_DETECT 10000 /* 10 - 15 seconds */ #define PD_T_SEND_SOURCE_CAP 150 /* 100 - 200 ms */ #define PD_T_SENDER_RESPONSE 60 /* 24 - 30 ms, relaxed */ +#define PD_T_RECEIVER_RESPONSE 15 /* 15ms max */ #define PD_T_SOURCE_ACTIVITY 45 #define PD_T_SINK_ACTIVITY 135 #define PD_T_SINK_WAIT_CAP 240 @@ -471,8 +474,10 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_T_VCONN_SOURCE_ON 100 #define PD_T_SINK_REQUEST 100 /* 100 ms minimum */ #define PD_T_ERROR_RECOVERY 100 /* minimum 25 is insufficient */ -#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */ -#define PD_T_NEWSRC 250 /* Maximum of 275ms */ +#define PD_T_SRCSWAPSTDBY 625 /* Maximum of 650ms */ +#define PD_T_NEWSRC 250 /* Maximum of 275ms */ +#define PD_T_SWAP_SRC_START 20 /* Minimum of 20ms */ +#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */ #define PD_T_DRP_TRY 100 /* 75 - 150 ms */ #define PD_T_DRP_TRYWAIT 600 /* 400 - 800 ms */ @@ -483,5 +488,4 @@ static inline unsigned int rdo_max_power(u32 rdo) #define PD_N_CAPS_COUNT (PD_T_NO_RESPONSE / PD_T_SEND_SOURCE_CAP) #define PD_N_HARD_RESET_COUNT 2 -#define PD_T_BIST_CONT_MODE 50 /* 30 - 60 ms */ #endif /* __LINUX_USB_PD_H */ diff --git a/include/linux/usb/tcpm.h b/include/linux/usb/tcpm.h index 89f58760cf48..09762d26fa0c 100644 --- a/include/linux/usb/tcpm.h +++ b/include/linux/usb/tcpm.h @@ -78,8 +78,11 @@ enum tcpm_transmit_type { * automatically if a connection is established. * @try_role: Optional; called to set a preferred role * @pd_transmit:Called to transmit PD message - * @mux: Pointer to multiplexer data * @set_bist_data: Turn on/off bist data mode for compliance testing + * @enable_frs: + * Optional; Called to enable/disable PD 3.0 fast role swap. + * Enabling frs is accessory dependent as not all PD3.0 + * accessories support fast role swap. */ struct tcpc_dev { struct fwnode_handle *fwnode; @@ -105,6 +108,7 @@ struct tcpc_dev { int (*pd_transmit)(struct tcpc_dev *dev, enum tcpm_transmit_type type, const struct pd_message *msg); int (*set_bist_data)(struct tcpc_dev *dev, bool on); + int (*enable_frs)(struct tcpc_dev *dev, bool enable); }; struct tcpm_port; @@ -114,6 +118,8 @@ void tcpm_unregister_port(struct tcpm_port *port); void tcpm_vbus_change(struct tcpm_port *port); void tcpm_cc_change(struct tcpm_port *port); +void tcpm_sink_frs(struct tcpm_port *port); +void tcpm_sourcing_vbus(struct tcpm_port *port); void tcpm_pd_receive(struct tcpm_port *port, const struct pd_message *msg); void tcpm_pd_transmit_complete(struct tcpm_port *port, diff --git a/include/linux/usb/typec.h b/include/linux/usb/typec.h index 9cb1bec94b71..6be558045942 100644 --- a/include/linux/usb/typec.h +++ b/include/linux/usb/typec.h @@ -268,6 +268,7 @@ int typec_set_mode(struct typec_port *port, int mode); void *typec_get_drvdata(struct typec_port *port); +int typec_find_pwr_opmode(const char *name); int typec_find_orientation(const char *name); int typec_find_port_power_role(const char *name); int typec_find_power_role(const char *name); diff --git a/include/linux/vmstat.h b/include/linux/vmstat.h index 91220ace31da..7557c1070fd7 100644 --- a/include/linux/vmstat.h +++ b/include/linux/vmstat.h @@ -312,6 +312,11 @@ static inline void __mod_zone_page_state(struct zone *zone, static inline void __mod_node_page_state(struct pglist_data *pgdat, enum node_stat_item item, int delta) { + if (vmstat_item_in_bytes(item)) { + VM_WARN_ON_ONCE(delta & (PAGE_SIZE - 1)); + delta >>= PAGE_SHIFT; + } + node_page_state_add(delta, pgdat, item); } diff --git a/include/media/videobuf2-core.h b/include/media/videobuf2-core.h index 52ef92049073..bbb3f26fbde9 100644 --- a/include/media/videobuf2-core.h +++ b/include/media/videobuf2-core.h @@ -744,8 +744,6 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * vb2_core_reqbufs() - Initiate streaming. * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. - * @flags: auxiliary queue/buffer management flags. Currently, the only - * used flag is %V4L2_FLAG_MEMORY_NON_CONSISTENT. * @count: requested buffer count. * * Videobuf2 core helper to implement VIDIOC_REQBUF() operation. It is called @@ -770,13 +768,12 @@ void vb2_core_querybuf(struct vb2_queue *q, unsigned int index, void *pb); * Return: returns zero on success; an error code otherwise. */ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int flags, unsigned int *count); + unsigned int *count); /** * vb2_core_create_bufs() - Allocate buffers and any required auxiliary structs * @q: pointer to &struct vb2_queue with videobuf2 queue. * @memory: memory type, as defined by &enum vb2_memory. - * @flags: auxiliary queue/buffer management flags. * @count: requested buffer count. * @requested_planes: number of planes requested. * @requested_sizes: array with the size of the planes. @@ -794,7 +791,7 @@ int vb2_core_reqbufs(struct vb2_queue *q, enum vb2_memory memory, * Return: returns zero on success; an error code otherwise. */ int vb2_core_create_bufs(struct vb2_queue *q, enum vb2_memory memory, - unsigned int flags, unsigned int *count, + unsigned int *count, unsigned int requested_planes, const unsigned int requested_sizes[]); diff --git a/include/net/flow.h b/include/net/flow.h index 929d3ca614d0..b2531df3f65f 100644 --- a/include/net/flow.h +++ b/include/net/flow.h @@ -116,6 +116,7 @@ static inline void flowi4_init_output(struct flowi4 *fl4, int oif, fl4->saddr = saddr; fl4->fl4_dport = dport; fl4->fl4_sport = sport; + fl4->flowi4_multipath_hash = 0; } /* Reset some input parameters after previous lookup */ diff --git a/include/net/netlink.h b/include/net/netlink.h index c0411f14fb53..8e0eb2c9c528 100644 --- a/include/net/netlink.h +++ b/include/net/netlink.h @@ -726,7 +726,6 @@ static inline int __nlmsg_parse(const struct nlmsghdr *nlh, int hdrlen, * @hdrlen: length of family specific header * @tb: destination array with maxtype+1 elements * @maxtype: maximum attribute type to be expected - * @validate: validation strictness * @extack: extended ACK report struct * * See nla_parse() @@ -824,7 +823,6 @@ static inline int nla_validate_deprecated(const struct nlattr *head, int len, * @len: length of attribute stream * @maxtype: maximum attribute type to be expected * @policy: validation policy - * @validate: validation strictness * @extack: extended ACK report struct * * Validates all attributes in the specified attribute stream against the diff --git a/include/net/netns/nftables.h b/include/net/netns/nftables.h index a1a8d45adb42..6c0806bd8d1e 100644 --- a/include/net/netns/nftables.h +++ b/include/net/netns/nftables.h @@ -8,6 +8,7 @@ struct netns_nftables { struct list_head tables; struct list_head commit_list; struct list_head module_list; + struct list_head notify_list; struct mutex commit_mutex; unsigned int base_seq; u8 gencursor; diff --git a/include/net/sctp/structs.h b/include/net/sctp/structs.h index b33f1aefad09..0bdff38eb4bb 100644 --- a/include/net/sctp/structs.h +++ b/include/net/sctp/structs.h @@ -226,12 +226,14 @@ struct sctp_sock { data_ready_signalled:1; atomic_t pd_mode; + + /* Fields after this point will be skipped on copies, like on accept + * and peeloff operations + */ + /* Receive to here while partial delivery is in effect. */ struct sk_buff_head pd_lobby; - /* These must be the last fields, as they will skipped on copies, - * like on accept and peeloff operations - */ struct list_head auto_asconf_list; int do_auto_asconf; }; diff --git a/include/net/vxlan.h b/include/net/vxlan.h index 3a41627cbdfe..08537aa14f7c 100644 --- a/include/net/vxlan.h +++ b/include/net/vxlan.h @@ -121,6 +121,9 @@ struct vxlanhdr_gbp { #define VXLAN_GBP_POLICY_APPLIED (BIT(3) << 16) #define VXLAN_GBP_ID_MASK (0xFFFF) +#define VXLAN_GBP_MASK (VXLAN_GBP_DONT_LEARN | VXLAN_GBP_POLICY_APPLIED | \ + VXLAN_GBP_ID_MASK) + /* * VXLAN Generic Protocol Extension (VXLAN_F_GPE): * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+ diff --git a/include/soc/bcm2835/raspberrypi-firmware.h b/include/soc/bcm2835/raspberrypi-firmware.h index 3025aca3c358..cc9cdbc66403 100644 --- a/include/soc/bcm2835/raspberrypi-firmware.h +++ b/include/soc/bcm2835/raspberrypi-firmware.h @@ -10,7 +10,6 @@ #include <linux/of_device.h> struct rpi_firmware; -struct pci_dev; enum rpi_firmware_property_status { RPI_FIRMWARE_STATUS_REQUEST = 0, @@ -142,7 +141,6 @@ int rpi_firmware_property(struct rpi_firmware *fw, int rpi_firmware_property_list(struct rpi_firmware *fw, void *data, size_t tag_size); struct rpi_firmware *rpi_firmware_get(struct device_node *firmware_node); -int rpi_firmware_init_vl805(struct pci_dev *pdev); #else static inline int rpi_firmware_property(struct rpi_firmware *fw, u32 tag, void *data, size_t len) @@ -160,11 +158,6 @@ static inline struct rpi_firmware *rpi_firmware_get(struct device_node *firmware { return NULL; } - -static inline int rpi_firmware_init_vl805(struct pci_dev *pdev) -{ - return 0; -} #endif #endif /* __SOC_RASPBERRY_FIRMWARE_H__ */ diff --git a/include/soc/mscc/ocelot.h b/include/soc/mscc/ocelot.h index da369b12005f..0ac4e7fba086 100644 --- a/include/soc/mscc/ocelot.h +++ b/include/soc/mscc/ocelot.h @@ -566,6 +566,7 @@ struct ocelot_port { u8 ptp_cmd; struct sk_buff_head tx_skbs; u8 ts_id; + spinlock_t ts_id_lock; phy_interface_t phy_mode; @@ -677,6 +678,7 @@ void ocelot_configure_cpu(struct ocelot *ocelot, int npi, int ocelot_init(struct ocelot *ocelot); void ocelot_deinit(struct ocelot *ocelot); void ocelot_init_port(struct ocelot *ocelot, int port); +void ocelot_deinit_port(struct ocelot *ocelot, int port); /* DSA callbacks */ void ocelot_port_enable(struct ocelot *ocelot, int port, diff --git a/include/uapi/linux/ethtool_netlink.h b/include/uapi/linux/ethtool_netlink.h index 5dcd24cb33ea..72ba36be9655 100644 --- a/include/uapi/linux/ethtool_netlink.h +++ b/include/uapi/linux/ethtool_netlink.h @@ -79,6 +79,7 @@ enum { ETHTOOL_MSG_TSINFO_GET_REPLY, ETHTOOL_MSG_CABLE_TEST_NTF, ETHTOOL_MSG_CABLE_TEST_TDR_NTF, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, /* add new constants above here */ __ETHTOOL_MSG_KERNEL_CNT, diff --git a/include/uapi/linux/videodev2.h b/include/uapi/linux/videodev2.h index c7b70ff53bc1..235db7754606 100644 --- a/include/uapi/linux/videodev2.h +++ b/include/uapi/linux/videodev2.h @@ -191,8 +191,6 @@ enum v4l2_memory { V4L2_MEMORY_DMABUF = 4, }; -#define V4L2_FLAG_MEMORY_NON_CONSISTENT (1 << 0) - /* see also http://vektor.theorem.ca/graphics/ycbcr/ */ enum v4l2_colorspace { /* @@ -949,10 +947,7 @@ struct v4l2_requestbuffers { __u32 type; /* enum v4l2_buf_type */ __u32 memory; /* enum v4l2_memory */ __u32 capabilities; - union { - __u32 flags; - __u32 reserved[1]; - }; + __u32 reserved[1]; }; /* capabilities for struct v4l2_requestbuffers and v4l2_create_buffers */ @@ -2456,9 +2451,6 @@ struct v4l2_dbg_chip_info { * @memory: enum v4l2_memory; buffer memory type * @format: frame format, for which buffers are requested * @capabilities: capabilities of this buffer type. - * @flags: additional buffer management attributes (ignored unless the - * queue has V4L2_BUF_CAP_SUPPORTS_MMAP_CACHE_HINTS capability - * and configured for MMAP streaming I/O). * @reserved: future extensions */ struct v4l2_create_buffers { @@ -2467,8 +2459,7 @@ struct v4l2_create_buffers { __u32 memory; struct v4l2_format format; __u32 capabilities; - __u32 flags; - __u32 reserved[6]; + __u32 reserved[7]; }; /* diff --git a/init/main.c b/init/main.c index ae78fb68d231..e880b4ecb314 100644 --- a/init/main.c +++ b/init/main.c @@ -33,6 +33,7 @@ #include <linux/nmi.h> #include <linux/percpu.h> #include <linux/kmod.h> +#include <linux/kprobes.h> #include <linux/vmalloc.h> #include <linux/kernel_stat.h> #include <linux/start_kernel.h> @@ -303,7 +304,7 @@ static void * __init get_boot_config_from_initrd(u32 *_size, u32 *_csum) #ifdef CONFIG_BOOT_CONFIG -char xbc_namebuf[XBC_KEYLEN_MAX] __initdata; +static char xbc_namebuf[XBC_KEYLEN_MAX] __initdata; #define rest(dst, end) ((end) > (dst) ? (end) - (dst) : 0) @@ -1402,6 +1403,7 @@ static int __ref kernel_init(void *unused) kernel_init_freeable(); /* need to finish all async __init code before freeing the memory */ async_synchronize_full(); + kprobe_free_init_mem(); ftrace_free_init_mem(); free_initmem(); mark_readonly(); diff --git a/kernel/bpf/hashtab.c b/kernel/bpf/hashtab.c index 78dfff6a501b..7df28a45c66b 100644 --- a/kernel/bpf/hashtab.c +++ b/kernel/bpf/hashtab.c @@ -1622,7 +1622,6 @@ struct bpf_iter_seq_hash_map_info { struct bpf_map *map; struct bpf_htab *htab; void *percpu_value_buf; // non-zero means percpu hash - unsigned long flags; u32 bucket_id; u32 skip_elems; }; @@ -1632,7 +1631,6 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, struct htab_elem *prev_elem) { const struct bpf_htab *htab = info->htab; - unsigned long flags = info->flags; u32 skip_elems = info->skip_elems; u32 bucket_id = info->bucket_id; struct hlist_nulls_head *head; @@ -1656,19 +1654,18 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, /* not found, unlock and go to the next bucket */ b = &htab->buckets[bucket_id++]; - htab_unlock_bucket(htab, b, flags); + rcu_read_unlock(); skip_elems = 0; } for (i = bucket_id; i < htab->n_buckets; i++) { b = &htab->buckets[i]; - flags = htab_lock_bucket(htab, b); + rcu_read_lock(); count = 0; head = &b->head; hlist_nulls_for_each_entry_rcu(elem, n, head, hash_node) { if (count >= skip_elems) { - info->flags = flags; info->bucket_id = i; info->skip_elems = count; return elem; @@ -1676,7 +1673,7 @@ bpf_hash_map_seq_find_next(struct bpf_iter_seq_hash_map_info *info, count++; } - htab_unlock_bucket(htab, b, flags); + rcu_read_unlock(); skip_elems = 0; } @@ -1754,14 +1751,10 @@ static int bpf_hash_map_seq_show(struct seq_file *seq, void *v) static void bpf_hash_map_seq_stop(struct seq_file *seq, void *v) { - struct bpf_iter_seq_hash_map_info *info = seq->private; - if (!v) (void)__bpf_hash_map_seq_show(seq, NULL); else - htab_unlock_bucket(info->htab, - &info->htab->buckets[info->bucket_id], - info->flags); + rcu_read_unlock(); } static int bpf_iter_init_hash_map(void *priv_data, diff --git a/kernel/bpf/inode.c b/kernel/bpf/inode.c index fb878ba3f22f..18f4969552ac 100644 --- a/kernel/bpf/inode.c +++ b/kernel/bpf/inode.c @@ -226,10 +226,12 @@ static void *map_seq_next(struct seq_file *m, void *v, loff_t *pos) else prev_key = key; + rcu_read_lock(); if (map->ops->map_get_next_key(map, prev_key, key)) { map_iter(m)->done = true; - return NULL; + key = NULL; } + rcu_read_unlock(); return key; } diff --git a/kernel/fork.c b/kernel/fork.c index 49677d668de4..da8d360fb032 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -589,7 +589,7 @@ static __latent_entropy int dup_mmap(struct mm_struct *mm, mm->map_count++; if (!(tmp->vm_flags & VM_WIPEONFORK)) - retval = copy_page_range(mm, oldmm, mpnt); + retval = copy_page_range(mm, oldmm, mpnt, tmp); if (tmp->vm_ops && tmp->vm_ops->open) tmp->vm_ops->open(tmp); @@ -1011,6 +1011,7 @@ static struct mm_struct *mm_init(struct mm_struct *mm, struct task_struct *p, mm_pgtables_bytes_init(mm); mm->map_count = 0; mm->locked_vm = 0; + atomic_set(&mm->has_pinned, 0); atomic64_set(&mm->pinned_vm, 0); memset(&mm->rss_stat, 0, sizeof(mm->rss_stat)); spin_lock_init(&mm->page_table_lock); diff --git a/kernel/kprobes.c b/kernel/kprobes.c index 049da84e1952..e995541d277d 100644 --- a/kernel/kprobes.c +++ b/kernel/kprobes.c @@ -2162,9 +2162,10 @@ static void kill_kprobe(struct kprobe *p) /* * The module is going away. We should disarm the kprobe which - * is using ftrace. + * is using ftrace, because ftrace framework is still available at + * MODULE_STATE_GOING notification. */ - if (kprobe_ftrace(p)) + if (kprobe_ftrace(p) && !kprobe_disabled(p) && !kprobes_all_disarmed) disarm_kprobe_ftrace(p); } @@ -2459,6 +2460,28 @@ static struct notifier_block kprobe_module_nb = { extern unsigned long __start_kprobe_blacklist[]; extern unsigned long __stop_kprobe_blacklist[]; +void kprobe_free_init_mem(void) +{ + void *start = (void *)(&__init_begin); + void *end = (void *)(&__init_end); + struct hlist_head *head; + struct kprobe *p; + int i; + + mutex_lock(&kprobe_mutex); + + /* Kill all kprobes on initmem */ + for (i = 0; i < KPROBE_TABLE_SIZE; i++) { + head = &kprobe_table[i]; + hlist_for_each_entry(p, head, hlist) { + if (start <= (void *)p->addr && (void *)p->addr < end) + kill_kprobe(p); + } + } + + mutex_unlock(&kprobe_mutex); +} + static int __init init_kprobes(void) { int i, err = 0; diff --git a/kernel/rcu/tasks.h b/kernel/rcu/tasks.h index 835e2df8590a..05d3e1375e4c 100644 --- a/kernel/rcu/tasks.h +++ b/kernel/rcu/tasks.h @@ -590,7 +590,7 @@ void exit_tasks_rcu_finish(void) __releases(&tasks_rcu_exit_srcu) } #else /* #ifdef CONFIG_TASKS_RCU */ -static void show_rcu_tasks_classic_gp_kthread(void) { } +static inline void show_rcu_tasks_classic_gp_kthread(void) { } void exit_tasks_rcu_start(void) { } void exit_tasks_rcu_finish(void) { exit_tasks_rcu_finish_trace(current); } #endif /* #else #ifdef CONFIG_TASKS_RCU */ diff --git a/kernel/rcu/tree.c b/kernel/rcu/tree.c index 8ce77d9ac716..f78ee759af9c 100644 --- a/kernel/rcu/tree.c +++ b/kernel/rcu/tree.c @@ -673,6 +673,7 @@ void rcu_idle_enter(void) lockdep_assert_irqs_disabled(); rcu_eqs_enter(false); } +EXPORT_SYMBOL_GPL(rcu_idle_enter); #ifdef CONFIG_NO_HZ_FULL /** @@ -886,6 +887,7 @@ void rcu_idle_exit(void) rcu_eqs_exit(false); local_irq_restore(flags); } +EXPORT_SYMBOL_GPL(rcu_idle_exit); #ifdef CONFIG_NO_HZ_FULL /** diff --git a/kernel/trace/ftrace.c b/kernel/trace/ftrace.c index e9fa580f3083..541453927c82 100644 --- a/kernel/trace/ftrace.c +++ b/kernel/trace/ftrace.c @@ -2782,6 +2782,7 @@ static void ftrace_remove_trampoline_from_kallsyms(struct ftrace_ops *ops) { lockdep_assert_held(&ftrace_lock); list_del_rcu(&ops->list); + synchronize_rcu(); } /* @@ -2862,6 +2863,8 @@ int ftrace_startup(struct ftrace_ops *ops, int command) __unregister_ftrace_function(ops); ftrace_start_up--; ops->flags &= ~FTRACE_OPS_FL_ENABLED; + if (ops->flags & FTRACE_OPS_FL_DYNAMIC) + ftrace_trampoline_free(ops); return ret; } @@ -6990,16 +6993,14 @@ static void ftrace_ops_assist_func(unsigned long ip, unsigned long parent_ip, { int bit; - if ((op->flags & FTRACE_OPS_FL_RCU) && !rcu_is_watching()) - return; - bit = trace_test_and_set_recursion(TRACE_LIST_START, TRACE_LIST_MAX); if (bit < 0) return; preempt_disable_notrace(); - op->func(ip, parent_ip, op, regs); + if (!(op->flags & FTRACE_OPS_FL_RCU) || rcu_is_watching()) + op->func(ip, parent_ip, op, regs); preempt_enable_notrace(); trace_clear_recursion(bit); diff --git a/kernel/trace/trace.c b/kernel/trace/trace.c index f40d850ebabc..d3e5de717df2 100644 --- a/kernel/trace/trace.c +++ b/kernel/trace/trace.c @@ -3546,13 +3546,15 @@ struct trace_entry *trace_find_next_entry(struct trace_iterator *iter, if (iter->ent && iter->ent != iter->temp) { if ((!iter->temp || iter->temp_size < iter->ent_size) && !WARN_ON_ONCE(iter->temp == static_temp_buf)) { - kfree(iter->temp); - iter->temp = kmalloc(iter->ent_size, GFP_KERNEL); - if (!iter->temp) + void *temp; + temp = kmalloc(iter->ent_size, GFP_KERNEL); + if (!temp) return NULL; + kfree(iter->temp); + iter->temp = temp; + iter->temp_size = iter->ent_size; } memcpy(iter->temp, iter->ent, iter->ent_size); - iter->temp_size = iter->ent_size; iter->ent = iter->temp; } entry = __find_next_entry(iter, ent_cpu, NULL, ent_ts); @@ -3782,14 +3784,14 @@ unsigned long trace_total_entries(struct trace_array *tr) static void print_lat_help_header(struct seq_file *m) { - seq_puts(m, "# _------=> CPU# \n" - "# / _-----=> irqs-off \n" - "# | / _----=> need-resched \n" - "# || / _---=> hardirq/softirq \n" - "# ||| / _--=> preempt-depth \n" - "# |||| / delay \n" - "# cmd pid ||||| time | caller \n" - "# \\ / ||||| \\ | / \n"); + seq_puts(m, "# _------=> CPU# \n" + "# / _-----=> irqs-off \n" + "# | / _----=> need-resched \n" + "# || / _---=> hardirq/softirq \n" + "# ||| / _--=> preempt-depth \n" + "# |||| / delay \n" + "# cmd pid ||||| time | caller \n" + "# \\ / ||||| \\ | / \n"); } static void print_event_info(struct array_buffer *buf, struct seq_file *m) @@ -3810,26 +3812,26 @@ static void print_func_help_header(struct array_buffer *buf, struct seq_file *m, print_event_info(buf, m); - seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? "TGID " : ""); - seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); + seq_printf(m, "# TASK-PID %s CPU# TIMESTAMP FUNCTION\n", tgid ? " TGID " : ""); + seq_printf(m, "# | | %s | | |\n", tgid ? " | " : ""); } static void print_func_help_header_irq(struct array_buffer *buf, struct seq_file *m, unsigned int flags) { bool tgid = flags & TRACE_ITER_RECORD_TGID; - const char *space = " "; - int prec = tgid ? 10 : 2; + const char *space = " "; + int prec = tgid ? 12 : 2; print_event_info(buf, m); - seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); - seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); - seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); - seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); - seq_printf(m, "# %.*s||| / delay\n", prec, space); - seq_printf(m, "# TASK-PID %.*sCPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); - seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); + seq_printf(m, "# %.*s _-----=> irqs-off\n", prec, space); + seq_printf(m, "# %.*s / _----=> need-resched\n", prec, space); + seq_printf(m, "# %.*s| / _---=> hardirq/softirq\n", prec, space); + seq_printf(m, "# %.*s|| / _--=> preempt-depth\n", prec, space); + seq_printf(m, "# %.*s||| / delay\n", prec, space); + seq_printf(m, "# TASK-PID %.*s CPU# |||| TIMESTAMP FUNCTION\n", prec, " TGID "); + seq_printf(m, "# | | %.*s | |||| | |\n", prec, " | "); } void diff --git a/kernel/trace/trace_events_hist.c b/kernel/trace/trace_events_hist.c index 0b933546142e..1b2ef6490229 100644 --- a/kernel/trace/trace_events_hist.c +++ b/kernel/trace/trace_events_hist.c @@ -3865,7 +3865,6 @@ static int parse_var_defs(struct hist_trigger_data *hist_data) s = kstrdup(field_str, GFP_KERNEL); if (!s) { - kfree(hist_data->attrs->var_defs.name[n_vars]); ret = -ENOMEM; goto free; } diff --git a/kernel/trace/trace_output.c b/kernel/trace/trace_output.c index 4d1893564912..000e9dc224c6 100644 --- a/kernel/trace/trace_output.c +++ b/kernel/trace/trace_output.c @@ -497,7 +497,7 @@ lat_print_generic(struct trace_seq *s, struct trace_entry *entry, int cpu) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%8.8s-%-5d %3d", + trace_seq_printf(s, "%8.8s-%-7d %3d", comm, entry->pid, cpu); return trace_print_lat_fmt(s, entry); @@ -588,15 +588,15 @@ int trace_print_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); - trace_seq_printf(s, "%16s-%-5d ", comm, entry->pid); + trace_seq_printf(s, "%16s-%-7d ", comm, entry->pid); if (tr->trace_flags & TRACE_ITER_RECORD_TGID) { unsigned int tgid = trace_find_tgid(entry->pid); if (!tgid) - trace_seq_printf(s, "(-----) "); + trace_seq_printf(s, "(-------) "); else - trace_seq_printf(s, "(%5d) ", tgid); + trace_seq_printf(s, "(%7d) ", tgid); } trace_seq_printf(s, "[%03d] ", iter->cpu); @@ -636,7 +636,7 @@ int trace_print_lat_context(struct trace_iterator *iter) trace_find_cmdline(entry->pid, comm); trace_seq_printf( - s, "%16s %5d %3d %d %08x %08lx ", + s, "%16s %7d %3d %d %08x %08lx ", comm, entry->pid, iter->cpu, entry->flags, entry->preempt_count, iter->idx); } else { @@ -917,7 +917,7 @@ static enum print_line_t trace_ctxwake_print(struct trace_iterator *iter, S = task_index_to_char(field->prev_state); trace_find_cmdline(field->next_pid, comm); trace_seq_printf(&iter->seq, - " %5d:%3d:%c %s [%03d] %5d:%3d:%c %s\n", + " %7d:%3d:%c %s [%03d] %7d:%3d:%c %s\n", field->prev_pid, field->prev_prio, S, delim, diff --git a/lib/bootconfig.c b/lib/bootconfig.c index 2c905a91d4eb..649ed44f199c 100644 --- a/lib/bootconfig.c +++ b/lib/bootconfig.c @@ -31,6 +31,8 @@ static size_t xbc_data_size __initdata; static struct xbc_node *last_parent __initdata; static const char *xbc_err_msg __initdata; static int xbc_err_pos __initdata; +static int open_brace[XBC_DEPTH_MAX] __initdata; +static int brace_index __initdata; static int __init xbc_parse_error(const char *msg, const char *p) { @@ -431,27 +433,27 @@ static char *skip_spaces_until_newline(char *p) return p; } -static int __init __xbc_open_brace(void) +static int __init __xbc_open_brace(char *p) { - /* Mark the last key as open brace */ - last_parent->next = XBC_NODE_MAX; + /* Push the last key as open brace */ + open_brace[brace_index++] = xbc_node_index(last_parent); + if (brace_index >= XBC_DEPTH_MAX) + return xbc_parse_error("Exceed max depth of braces", p); return 0; } static int __init __xbc_close_brace(char *p) { - struct xbc_node *node; - - if (!last_parent || last_parent->next != XBC_NODE_MAX) + brace_index--; + if (!last_parent || brace_index < 0 || + (open_brace[brace_index] != xbc_node_index(last_parent))) return xbc_parse_error("Unexpected closing brace", p); - node = last_parent; - node->next = 0; - do { - node = xbc_node_get_parent(node); - } while (node && node->next != XBC_NODE_MAX); - last_parent = node; + if (brace_index == 0) + last_parent = NULL; + else + last_parent = &xbc_nodes[open_brace[brace_index - 1]]; return 0; } @@ -492,8 +494,8 @@ static int __init __xbc_parse_value(char **__v, char **__n) break; } if (strchr(",;\n#}", c)) { - v = strim(v); *p++ = '\0'; + v = strim(v); break; } } @@ -661,7 +663,7 @@ static int __init xbc_open_brace(char **k, char *n) return ret; *k = n; - return __xbc_open_brace(); + return __xbc_open_brace(n - 1); } static int __init xbc_close_brace(char **k, char *n) @@ -681,6 +683,13 @@ static int __init xbc_verify_tree(void) int i, depth, len, wlen; struct xbc_node *n, *m; + /* Brace closing */ + if (brace_index) { + n = &xbc_nodes[open_brace[brace_index]]; + return xbc_parse_error("Brace is not closed", + xbc_node_get_data(n)); + } + /* Empty tree */ if (xbc_node_num == 0) { xbc_parse_error("Empty config", xbc_data); @@ -745,6 +754,7 @@ void __init xbc_destroy_all(void) xbc_node_num = 0; memblock_free(__pa(xbc_nodes), sizeof(struct xbc_node) * XBC_NODE_MAX); xbc_nodes = NULL; + brace_index = 0; } /** diff --git a/lib/memregion.c b/lib/memregion.c index 77c85b5251da..be5cfa5a3b57 100644 --- a/lib/memregion.c +++ b/lib/memregion.c @@ -2,6 +2,7 @@ /* identifiers for device / performance-differentiated memory regions */ #include <linux/idr.h> #include <linux/types.h> +#include <linux/memregion.h> static DEFINE_IDA(memregion_ids); diff --git a/lib/random32.c b/lib/random32.c index 932345323af0..dfb9981ab798 100644 --- a/lib/random32.c +++ b/lib/random32.c @@ -49,7 +49,7 @@ static inline void prandom_state_selftest(void) } #endif -DEFINE_PER_CPU(struct rnd_state, net_rand_state); +DEFINE_PER_CPU(struct rnd_state, net_rand_state) __latent_entropy; /** * prandom_u32_state - seeded pseudo-random number generator. diff --git a/lib/string.c b/lib/string.c index 6012c385fb31..4288e0158d47 100644 --- a/lib/string.c +++ b/lib/string.c @@ -272,6 +272,30 @@ ssize_t strscpy_pad(char *dest, const char *src, size_t count) } EXPORT_SYMBOL(strscpy_pad); +/** + * stpcpy - copy a string from src to dest returning a pointer to the new end + * of dest, including src's %NUL-terminator. May overrun dest. + * @dest: pointer to end of string being copied into. Must be large enough + * to receive copy. + * @src: pointer to the beginning of string being copied from. Must not overlap + * dest. + * + * stpcpy differs from strcpy in a key way: the return value is a pointer + * to the new %NUL-terminating character in @dest. (For strcpy, the return + * value is a pointer to the start of @dest). This interface is considered + * unsafe as it doesn't perform bounds checking of the inputs. As such it's + * not recommended for usage. Instead, its definition is provided in case + * the compiler lowers other libcalls to stpcpy. + */ +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src); +char *stpcpy(char *__restrict__ dest, const char *__restrict__ src) +{ + while ((*dest++ = *src++) != '\0') + /* nothing */; + return --dest; +} +EXPORT_SYMBOL(stpcpy); + #ifndef __HAVE_ARCH_STRCAT /** * strcat - Append one %NUL-terminated string to another diff --git a/lib/test_rhashtable.c b/lib/test_rhashtable.c index c5a6fef7b45d..76c607ee6db5 100644 --- a/lib/test_rhashtable.c +++ b/lib/test_rhashtable.c @@ -434,7 +434,7 @@ static int __init test_rhltable(unsigned int entries) } else { if (WARN(err != -ENOENT, "removed non-existent element, error %d not %d", err, -ENOENT)) - continue; + continue; } } diff --git a/mm/filemap.c b/mm/filemap.c index 5202e38ab79e..99c49eeae71b 100644 --- a/mm/filemap.c +++ b/mm/filemap.c @@ -2365,7 +2365,11 @@ readpage: } if (!PageUptodate(page)) { - error = lock_page_killable(page); + if (iocb->ki_flags & IOCB_WAITQ) + error = lock_page_async(page, iocb->ki_waitq); + else + error = lock_page_killable(page); + if (unlikely(error)) goto readpage_error; if (!PageUptodate(page)) { @@ -1255,6 +1255,9 @@ static __always_inline long __get_user_pages_locked(struct mm_struct *mm, BUG_ON(*locked != 1); } + if (flags & FOLL_PIN) + atomic_set(&mm->has_pinned, 1); + /* * FOLL_PIN and FOLL_GET are mutually exclusive. Traditional behavior * is to set FOLL_GET if the caller wants pages[] filled in (but has @@ -2485,13 +2488,13 @@ static int gup_huge_pgd(pgd_t orig, pgd_t *pgdp, unsigned long addr, return 1; } -static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, +static int gup_pmd_range(pud_t *pudp, pud_t pud, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pmd_t *pmdp; - pmdp = pmd_offset(&pud, addr); + pmdp = pmd_offset_lockless(pudp, pud, addr); do { pmd_t pmd = READ_ONCE(*pmdp); @@ -2528,13 +2531,13 @@ static int gup_pmd_range(pud_t pud, unsigned long addr, unsigned long end, return 1; } -static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, +static int gup_pud_range(p4d_t *p4dp, p4d_t p4d, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; pud_t *pudp; - pudp = pud_offset(&p4d, addr); + pudp = pud_offset_lockless(p4dp, p4d, addr); do { pud_t pud = READ_ONCE(*pudp); @@ -2549,20 +2552,20 @@ static int gup_pud_range(p4d_t p4d, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pud_val(pud)), addr, PUD_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pmd_range(pud, addr, next, flags, pages, nr)) + } else if (!gup_pmd_range(pudp, pud, addr, next, flags, pages, nr)) return 0; } while (pudp++, addr = next, addr != end); return 1; } -static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, +static int gup_p4d_range(pgd_t *pgdp, pgd_t pgd, unsigned long addr, unsigned long end, unsigned int flags, struct page **pages, int *nr) { unsigned long next; p4d_t *p4dp; - p4dp = p4d_offset(&pgd, addr); + p4dp = p4d_offset_lockless(pgdp, pgd, addr); do { p4d_t p4d = READ_ONCE(*p4dp); @@ -2574,7 +2577,7 @@ static int gup_p4d_range(pgd_t pgd, unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(p4d_val(p4d)), addr, P4D_SHIFT, next, flags, pages, nr)) return 0; - } else if (!gup_pud_range(p4d, addr, next, flags, pages, nr)) + } else if (!gup_pud_range(p4dp, p4d, addr, next, flags, pages, nr)) return 0; } while (p4dp++, addr = next, addr != end); @@ -2602,7 +2605,7 @@ static void gup_pgd_range(unsigned long addr, unsigned long end, if (!gup_huge_pd(__hugepd(pgd_val(pgd)), addr, PGDIR_SHIFT, next, flags, pages, nr)) return; - } else if (!gup_p4d_range(pgd, addr, next, flags, pages, nr)) + } else if (!gup_p4d_range(pgdp, pgd, addr, next, flags, pages, nr)) return; } while (pgdp++, addr = next, addr != end); } @@ -2660,6 +2663,9 @@ static int internal_get_user_pages_fast(unsigned long start, int nr_pages, FOLL_FAST_ONLY))) return -EINVAL; + if (gup_flags & FOLL_PIN) + atomic_set(¤t->mm->has_pinned, 1); + if (!(gup_flags & FOLL_FAST_ONLY)) might_lock_read(¤t->mm->mmap_lock); diff --git a/mm/huge_memory.c b/mm/huge_memory.c index faadc449cca5..da397779a6d4 100644 --- a/mm/huge_memory.c +++ b/mm/huge_memory.c @@ -1074,6 +1074,24 @@ int copy_huge_pmd(struct mm_struct *dst_mm, struct mm_struct *src_mm, src_page = pmd_page(pmd); VM_BUG_ON_PAGE(!PageHead(src_page), src_page); + + /* + * If this page is a potentially pinned page, split and retry the fault + * with smaller page size. Normally this should not happen because the + * userspace should use MADV_DONTFORK upon pinned regions. This is a + * best effort that the pinned pages won't be replaced by another + * random page during the coming copy-on-write. + */ + if (unlikely(is_cow_mapping(vma->vm_flags) && + atomic_read(&src_mm->has_pinned) && + page_maybe_dma_pinned(src_page))) { + pte_free(dst_mm, pgtable); + spin_unlock(src_ptl); + spin_unlock(dst_ptl); + __split_huge_pmd(vma, src_pmd, addr, false, NULL); + return -EAGAIN; + } + get_page(src_page); page_dup_rmap(src_page, true); add_mm_counter(dst_mm, MM_ANONPAGES, HPAGE_PMD_NR); @@ -1177,6 +1195,16 @@ int copy_huge_pud(struct mm_struct *dst_mm, struct mm_struct *src_mm, /* No huge zero pud yet */ } + /* Please refer to comments in copy_huge_pmd() */ + if (unlikely(is_cow_mapping(vma->vm_flags) && + atomic_read(&src_mm->has_pinned) && + page_maybe_dma_pinned(pud_page(pud)))) { + spin_unlock(src_ptl); + spin_unlock(dst_ptl); + __split_huge_pud(vma, src_pud, addr); + return -EAGAIN; + } + pudp_set_wrprotect(src_mm, addr, src_pud); pud = pud_mkold(pud_wrprotect(pud)); set_pud_at(dst_mm, addr, dst_pud, pud); diff --git a/mm/madvise.c b/mm/madvise.c index d4aa5f776543..0e0d61003fc6 100644 --- a/mm/madvise.c +++ b/mm/madvise.c @@ -381,9 +381,9 @@ huge_unlock: return 0; } +regular_page: if (pmd_trans_unstable(pmd)) return 0; -regular_page: #endif tlb_change_page_size(tlb, PAGE_SIZE); orig_pte = pte = pte_offset_map_lock(vma->vm_mm, pmd, addr, &ptl); diff --git a/mm/memcontrol.c b/mm/memcontrol.c index cfa6cbad21d5..6877c765b8d0 100644 --- a/mm/memcontrol.c +++ b/mm/memcontrol.c @@ -1538,9 +1538,9 @@ static char *memory_stat_format(struct mem_cgroup *memcg) memcg_page_state(memcg, WORKINGSET_ACTIVATE_ANON)); seq_buf_printf(&s, "workingset_activate_file %lu\n", memcg_page_state(memcg, WORKINGSET_ACTIVATE_FILE)); - seq_buf_printf(&s, "workingset_restore %lu\n", + seq_buf_printf(&s, "workingset_restore_anon %lu\n", memcg_page_state(memcg, WORKINGSET_RESTORE_ANON)); - seq_buf_printf(&s, "workingset_restore %lu\n", + seq_buf_printf(&s, "workingset_restore_file %lu\n", memcg_page_state(memcg, WORKINGSET_RESTORE_FILE)); seq_buf_printf(&s, "workingset_nodereclaim %lu\n", memcg_page_state(memcg, WORKINGSET_NODERECLAIM)); diff --git a/mm/memory.c b/mm/memory.c index 469af373ae76..fcfc4ca36eba 100644 --- a/mm/memory.c +++ b/mm/memory.c @@ -695,84 +695,218 @@ out: * covered by this vma. */ -static inline unsigned long -copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, +static unsigned long +copy_nonpresent_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, unsigned long addr, int *rss) { unsigned long vm_flags = vma->vm_flags; pte_t pte = *src_pte; struct page *page; + swp_entry_t entry = pte_to_swp_entry(pte); + + if (likely(!non_swap_entry(entry))) { + if (swap_duplicate(entry) < 0) + return entry.val; + + /* make sure dst_mm is on swapoff's mmlist. */ + if (unlikely(list_empty(&dst_mm->mmlist))) { + spin_lock(&mmlist_lock); + if (list_empty(&dst_mm->mmlist)) + list_add(&dst_mm->mmlist, + &src_mm->mmlist); + spin_unlock(&mmlist_lock); + } + rss[MM_SWAPENTS]++; + } else if (is_migration_entry(entry)) { + page = migration_entry_to_page(entry); - /* pte contains position in swap or file, so copy. */ - if (unlikely(!pte_present(pte))) { - swp_entry_t entry = pte_to_swp_entry(pte); - - if (likely(!non_swap_entry(entry))) { - if (swap_duplicate(entry) < 0) - return entry.val; - - /* make sure dst_mm is on swapoff's mmlist. */ - if (unlikely(list_empty(&dst_mm->mmlist))) { - spin_lock(&mmlist_lock); - if (list_empty(&dst_mm->mmlist)) - list_add(&dst_mm->mmlist, - &src_mm->mmlist); - spin_unlock(&mmlist_lock); - } - rss[MM_SWAPENTS]++; - } else if (is_migration_entry(entry)) { - page = migration_entry_to_page(entry); - - rss[mm_counter(page)]++; - - if (is_write_migration_entry(entry) && - is_cow_mapping(vm_flags)) { - /* - * COW mappings require pages in both - * parent and child to be set to read. - */ - make_migration_entry_read(&entry); - pte = swp_entry_to_pte(entry); - if (pte_swp_soft_dirty(*src_pte)) - pte = pte_swp_mksoft_dirty(pte); - if (pte_swp_uffd_wp(*src_pte)) - pte = pte_swp_mkuffd_wp(pte); - set_pte_at(src_mm, addr, src_pte, pte); - } - } else if (is_device_private_entry(entry)) { - page = device_private_entry_to_page(entry); + rss[mm_counter(page)]++; + if (is_write_migration_entry(entry) && + is_cow_mapping(vm_flags)) { /* - * Update rss count even for unaddressable pages, as - * they should treated just like normal pages in this - * respect. - * - * We will likely want to have some new rss counters - * for unaddressable pages, at some point. But for now - * keep things as they are. + * COW mappings require pages in both + * parent and child to be set to read. */ - get_page(page); - rss[mm_counter(page)]++; - page_dup_rmap(page, false); + make_migration_entry_read(&entry); + pte = swp_entry_to_pte(entry); + if (pte_swp_soft_dirty(*src_pte)) + pte = pte_swp_mksoft_dirty(pte); + if (pte_swp_uffd_wp(*src_pte)) + pte = pte_swp_mkuffd_wp(pte); + set_pte_at(src_mm, addr, src_pte, pte); + } + } else if (is_device_private_entry(entry)) { + page = device_private_entry_to_page(entry); - /* - * We do not preserve soft-dirty information, because so - * far, checkpoint/restore is the only feature that - * requires that. And checkpoint/restore does not work - * when a device driver is involved (you cannot easily - * save and restore device driver state). - */ - if (is_write_device_private_entry(entry) && - is_cow_mapping(vm_flags)) { - make_device_private_entry_read(&entry); - pte = swp_entry_to_pte(entry); - if (pte_swp_uffd_wp(*src_pte)) - pte = pte_swp_mkuffd_wp(pte); - set_pte_at(src_mm, addr, src_pte, pte); - } + /* + * Update rss count even for unaddressable pages, as + * they should treated just like normal pages in this + * respect. + * + * We will likely want to have some new rss counters + * for unaddressable pages, at some point. But for now + * keep things as they are. + */ + get_page(page); + rss[mm_counter(page)]++; + page_dup_rmap(page, false); + + /* + * We do not preserve soft-dirty information, because so + * far, checkpoint/restore is the only feature that + * requires that. And checkpoint/restore does not work + * when a device driver is involved (you cannot easily + * save and restore device driver state). + */ + if (is_write_device_private_entry(entry) && + is_cow_mapping(vm_flags)) { + make_device_private_entry_read(&entry); + pte = swp_entry_to_pte(entry); + if (pte_swp_uffd_wp(*src_pte)) + pte = pte_swp_mkuffd_wp(pte); + set_pte_at(src_mm, addr, src_pte, pte); } - goto out_set_pte; + } + set_pte_at(dst_mm, addr, dst_pte, pte); + return 0; +} + +/* + * Copy a present and normal page if necessary. + * + * NOTE! The usual case is that this doesn't need to do + * anything, and can just return a positive value. That + * will let the caller know that it can just increase + * the page refcount and re-use the pte the traditional + * way. + * + * But _if_ we need to copy it because it needs to be + * pinned in the parent (and the child should get its own + * copy rather than just a reference to the same page), + * we'll do that here and return zero to let the caller + * know we're done. + * + * And if we need a pre-allocated page but don't yet have + * one, return a negative error to let the preallocation + * code know so that it can do so outside the page table + * lock. + */ +static inline int +copy_present_page(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pte_t *dst_pte, pte_t *src_pte, + struct vm_area_struct *vma, struct vm_area_struct *new, + unsigned long addr, int *rss, struct page **prealloc, + pte_t pte, struct page *page) +{ + struct page *new_page; + + if (!is_cow_mapping(vma->vm_flags)) + return 1; + + /* + * The trick starts. + * + * What we want to do is to check whether this page may + * have been pinned by the parent process. If so, + * instead of wrprotect the pte on both sides, we copy + * the page immediately so that we'll always guarantee + * the pinned page won't be randomly replaced in the + * future. + * + * To achieve this, we do the following: + * + * 1. Write-protect the pte if it's writable. This is + * to protect concurrent write fast-gup with + * FOLL_PIN, so that we'll fail the fast-gup with + * the write bit removed. + * + * 2. Check page_maybe_dma_pinned() to see whether this + * page may have been pinned. + * + * The order of these steps is important to serialize + * against the fast-gup code (gup_pte_range()) on the + * pte check and try_grab_compound_head(), so that + * we'll make sure either we'll capture that fast-gup + * so we'll copy the pinned page here, or we'll fail + * that fast-gup. + * + * NOTE! Even if we don't end up copying the page, + * we won't undo this wrprotect(), because the normal + * reference copy will need it anyway. + */ + if (pte_write(pte)) + ptep_set_wrprotect(src_mm, addr, src_pte); + + /* + * These are the "normally we can just copy by reference" + * checks. + */ + if (likely(!atomic_read(&src_mm->has_pinned))) + return 1; + if (likely(!page_maybe_dma_pinned(page))) + return 1; + + /* + * Uhhuh. It looks like the page might be a pinned page, + * and we actually need to copy it. Now we can set the + * source pte back to being writable. + */ + if (pte_write(pte)) + set_pte_at(src_mm, addr, src_pte, pte); + + new_page = *prealloc; + if (!new_page) + return -EAGAIN; + + /* + * We have a prealloc page, all good! Take it + * over and copy the page & arm it. + */ + *prealloc = NULL; + copy_user_highpage(new_page, page, addr, vma); + __SetPageUptodate(new_page); + page_add_new_anon_rmap(new_page, new, addr, false); + lru_cache_add_inactive_or_unevictable(new_page, new); + rss[mm_counter(new_page)]++; + + /* All done, just insert the new page copy in the child */ + pte = mk_pte(new_page, new->vm_page_prot); + pte = maybe_mkwrite(pte_mkdirty(pte), new); + set_pte_at(dst_mm, addr, dst_pte, pte); + return 0; +} + +/* + * Copy one pte. Returns 0 if succeeded, or -EAGAIN if one preallocated page + * is required to copy this pte. + */ +static inline int +copy_present_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, + pte_t *dst_pte, pte_t *src_pte, struct vm_area_struct *vma, + struct vm_area_struct *new, + unsigned long addr, int *rss, struct page **prealloc) +{ + unsigned long vm_flags = vma->vm_flags; + pte_t pte = *src_pte; + struct page *page; + + page = vm_normal_page(vma, addr, pte); + if (page) { + int retval; + + retval = copy_present_page(dst_mm, src_mm, + dst_pte, src_pte, + vma, new, + addr, rss, prealloc, + pte, page); + if (retval <= 0) + return retval; + + get_page(page); + page_dup_rmap(page, false); + rss[mm_counter(page)]++; } /* @@ -800,35 +934,51 @@ copy_one_pte(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (!(vm_flags & VM_UFFD_WP)) pte = pte_clear_uffd_wp(pte); - page = vm_normal_page(vma, addr, pte); - if (page) { - get_page(page); - page_dup_rmap(page, false); - rss[mm_counter(page)]++; - } - -out_set_pte: set_pte_at(dst_mm, addr, dst_pte, pte); return 0; } +static inline struct page * +page_copy_prealloc(struct mm_struct *src_mm, struct vm_area_struct *vma, + unsigned long addr) +{ + struct page *new_page; + + new_page = alloc_page_vma(GFP_HIGHUSER_MOVABLE, vma, addr); + if (!new_page) + return NULL; + + if (mem_cgroup_charge(new_page, src_mm, GFP_KERNEL)) { + put_page(new_page); + return NULL; + } + cgroup_throttle_swaprate(new_page, GFP_KERNEL); + + return new_page; +} + static int copy_pte_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pmd_t *dst_pmd, pmd_t *src_pmd, struct vm_area_struct *vma, + struct vm_area_struct *new, unsigned long addr, unsigned long end) { pte_t *orig_src_pte, *orig_dst_pte; pte_t *src_pte, *dst_pte; spinlock_t *src_ptl, *dst_ptl; - int progress = 0; + int progress, ret = 0; int rss[NR_MM_COUNTERS]; swp_entry_t entry = (swp_entry_t){0}; + struct page *prealloc = NULL; again: + progress = 0; init_rss_vec(rss); dst_pte = pte_alloc_map_lock(dst_mm, dst_pmd, addr, &dst_ptl); - if (!dst_pte) - return -ENOMEM; + if (!dst_pte) { + ret = -ENOMEM; + goto out; + } src_pte = pte_offset_map(src_pmd, addr); src_ptl = pte_lockptr(src_mm, src_pmd); spin_lock_nested(src_ptl, SINGLE_DEPTH_NESTING); @@ -851,10 +1001,34 @@ again: progress++; continue; } - entry.val = copy_one_pte(dst_mm, src_mm, dst_pte, src_pte, + if (unlikely(!pte_present(*src_pte))) { + entry.val = copy_nonpresent_pte(dst_mm, src_mm, + dst_pte, src_pte, vma, addr, rss); - if (entry.val) + if (entry.val) + break; + progress += 8; + continue; + } + /* copy_present_pte() will clear `*prealloc' if consumed */ + ret = copy_present_pte(dst_mm, src_mm, dst_pte, src_pte, + vma, new, addr, rss, &prealloc); + /* + * If we need a pre-allocated page for this pte, drop the + * locks, allocate, and try again. + */ + if (unlikely(ret == -EAGAIN)) break; + if (unlikely(prealloc)) { + /* + * pre-alloc page cannot be reused by next time so as + * to strictly follow mempolicy (e.g., alloc_page_vma() + * will allocate page according to address). This + * could only happen if one pinned pte changed. + */ + put_page(prealloc); + prealloc = NULL; + } progress += 8; } while (dst_pte++, src_pte++, addr += PAGE_SIZE, addr != end); @@ -866,17 +1040,30 @@ again: cond_resched(); if (entry.val) { - if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) + if (add_swap_count_continuation(entry, GFP_KERNEL) < 0) { + ret = -ENOMEM; + goto out; + } + entry.val = 0; + } else if (ret) { + WARN_ON_ONCE(ret != -EAGAIN); + prealloc = page_copy_prealloc(src_mm, vma, addr); + if (!prealloc) return -ENOMEM; - progress = 0; + /* We've captured and resolved the error. Reset, try again. */ + ret = 0; } if (addr != end) goto again; - return 0; +out: + if (unlikely(prealloc)) + put_page(prealloc); + return ret; } static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pud_t *dst_pud, pud_t *src_pud, struct vm_area_struct *vma, + struct vm_area_struct *new, unsigned long addr, unsigned long end) { pmd_t *src_pmd, *dst_pmd; @@ -903,7 +1090,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src if (pmd_none_or_clear_bad(src_pmd)) continue; if (copy_pte_range(dst_mm, src_mm, dst_pmd, src_pmd, - vma, addr, next)) + vma, new, addr, next)) return -ENOMEM; } while (dst_pmd++, src_pmd++, addr = next, addr != end); return 0; @@ -911,6 +1098,7 @@ static inline int copy_pmd_range(struct mm_struct *dst_mm, struct mm_struct *src static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, p4d_t *dst_p4d, p4d_t *src_p4d, struct vm_area_struct *vma, + struct vm_area_struct *new, unsigned long addr, unsigned long end) { pud_t *src_pud, *dst_pud; @@ -937,7 +1125,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src if (pud_none_or_clear_bad(src_pud)) continue; if (copy_pmd_range(dst_mm, src_mm, dst_pud, src_pud, - vma, addr, next)) + vma, new, addr, next)) return -ENOMEM; } while (dst_pud++, src_pud++, addr = next, addr != end); return 0; @@ -945,6 +1133,7 @@ static inline int copy_pud_range(struct mm_struct *dst_mm, struct mm_struct *src static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, pgd_t *dst_pgd, pgd_t *src_pgd, struct vm_area_struct *vma, + struct vm_area_struct *new, unsigned long addr, unsigned long end) { p4d_t *src_p4d, *dst_p4d; @@ -959,14 +1148,14 @@ static inline int copy_p4d_range(struct mm_struct *dst_mm, struct mm_struct *src if (p4d_none_or_clear_bad(src_p4d)) continue; if (copy_pud_range(dst_mm, src_mm, dst_p4d, src_p4d, - vma, addr, next)) + vma, new, addr, next)) return -ENOMEM; } while (dst_p4d++, src_p4d++, addr = next, addr != end); return 0; } int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, - struct vm_area_struct *vma) + struct vm_area_struct *vma, struct vm_area_struct *new) { pgd_t *src_pgd, *dst_pgd; unsigned long next; @@ -1021,7 +1210,7 @@ int copy_page_range(struct mm_struct *dst_mm, struct mm_struct *src_mm, if (pgd_none_or_clear_bad(src_pgd)) continue; if (unlikely(copy_p4d_range(dst_mm, src_mm, dst_pgd, src_pgd, - vma, addr, next))) { + vma, new, addr, next))) { ret = -ENOMEM; break; } @@ -2955,8 +3144,8 @@ static vm_fault_t do_wp_page(struct vm_fault *vmf) * page count reference, and the page is locked, * it's dark out, and we're wearing sunglasses. Hit it. */ - wp_page_reuse(vmf); unlock_page(page); + wp_page_reuse(vmf); return VM_FAULT_WRITE; } else if (unlikely((vma->vm_flags & (VM_WRITE|VM_SHARED)) == (VM_WRITE|VM_SHARED))) { diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c index b11a269e2356..ce3e73e3a5c1 100644 --- a/mm/memory_hotplug.c +++ b/mm/memory_hotplug.c @@ -729,7 +729,7 @@ void __ref move_pfn_range_to_zone(struct zone *zone, unsigned long start_pfn, * are reserved so nobody should be touching them so we should be safe */ memmap_init_zone(nr_pages, nid, zone_idx(zone), start_pfn, - MEMMAP_HOTPLUG, altmap); + MEMINIT_HOTPLUG, altmap); set_zone_contiguous(zone); } @@ -1080,7 +1080,8 @@ int __ref add_memory_resource(int nid, struct resource *res) } /* link memory sections under this node.*/ - ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1)); + ret = link_mem_sections(nid, PFN_DOWN(start), PFN_UP(start + size - 1), + MEMINIT_HOTPLUG); BUG_ON(ret); /* create new memmap entry */ diff --git a/mm/migrate.c b/mm/migrate.c index aecb1433cf3c..04a98bb2f568 100644 --- a/mm/migrate.c +++ b/mm/migrate.c @@ -1446,7 +1446,7 @@ retry: * Capture required information that might get lost * during migration. */ - is_thp = PageTransHuge(page); + is_thp = PageTransHuge(page) && !PageHuge(page); nr_subpages = thp_nr_pages(page); cond_resched(); @@ -1472,7 +1472,7 @@ retry: * we encounter them after the rest of the list * is processed. */ - if (PageTransHuge(page) && !PageHuge(page)) { + if (is_thp) { lock_page(page); rc = split_huge_page_to_list(page, from); unlock_page(page); @@ -1481,8 +1481,7 @@ retry: nr_thp_split++; goto retry; } - } - if (is_thp) { + nr_thp_failed++; nr_failed += nr_subpages; goto out; diff --git a/mm/page_alloc.c b/mm/page_alloc.c index fab5e97dc9ca..6866533de8e6 100644 --- a/mm/page_alloc.c +++ b/mm/page_alloc.c @@ -3367,9 +3367,16 @@ struct page *rmqueue(struct zone *preferred_zone, struct page *page; if (likely(order == 0)) { - page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, + /* + * MIGRATE_MOVABLE pcplist could have the pages on CMA area and + * we need to skip it when CMA area isn't allowed. + */ + if (!IS_ENABLED(CONFIG_CMA) || alloc_flags & ALLOC_CMA || + migratetype != MIGRATE_MOVABLE) { + page = rmqueue_pcplist(preferred_zone, zone, gfp_flags, migratetype, alloc_flags); - goto out; + goto out; + } } /* @@ -3381,7 +3388,13 @@ struct page *rmqueue(struct zone *preferred_zone, do { page = NULL; - if (alloc_flags & ALLOC_HARDER) { + /* + * order-0 request can reach here when the pcplist is skipped + * due to non-CMA allocation context. HIGHATOMIC area is + * reserved for high-order atomic allocation, so order-0 + * request should skip it. + */ + if (order > 0 && alloc_flags & ALLOC_HARDER) { page = __rmqueue_smallest(zone, order, MIGRATE_HIGHATOMIC); if (page) trace_mm_page_alloc_zone_locked(page, order, migratetype); @@ -5975,7 +5988,7 @@ overlap_memmap_init(unsigned long zone, unsigned long *pfn) * done. Non-atomic initialization, single-pass. */ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, - unsigned long start_pfn, enum memmap_context context, + unsigned long start_pfn, enum meminit_context context, struct vmem_altmap *altmap) { unsigned long pfn, end_pfn = start_pfn + size; @@ -6007,7 +6020,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, * There can be holes in boot-time mem_map[]s handed to this * function. They do not exist on hotplugged memory. */ - if (context == MEMMAP_EARLY) { + if (context == MEMINIT_EARLY) { if (overlap_memmap_init(zone, &pfn)) continue; if (defer_init(nid, pfn, end_pfn)) @@ -6016,7 +6029,7 @@ void __meminit memmap_init_zone(unsigned long size, int nid, unsigned long zone, page = pfn_to_page(pfn); __init_single_page(page, pfn, zone, nid); - if (context == MEMMAP_HOTPLUG) + if (context == MEMINIT_HOTPLUG) __SetPageReserved(page); /* @@ -6099,7 +6112,7 @@ void __ref memmap_init_zone_device(struct zone *zone, * check here not to call set_pageblock_migratetype() against * pfn out of zone. * - * Please note that MEMMAP_HOTPLUG path doesn't clear memmap + * Please note that MEMINIT_HOTPLUG path doesn't clear memmap * because this is done early in section_activate() */ if (!(pfn & (pageblock_nr_pages - 1))) { @@ -6137,7 +6150,7 @@ void __meminit __weak memmap_init(unsigned long size, int nid, if (end_pfn > start_pfn) { size = end_pfn - start_pfn; memmap_init_zone(size, nid, zone, start_pfn, - MEMMAP_EARLY, NULL); + MEMINIT_EARLY, NULL); } } } diff --git a/mm/slab.c b/mm/slab.c index 3160dff6fd76..f658e86ec8ce 100644 --- a/mm/slab.c +++ b/mm/slab.c @@ -1632,6 +1632,10 @@ static void slab_destroy(struct kmem_cache *cachep, struct page *page) kmem_cache_free(cachep->freelist_cache, freelist); } +/* + * Update the size of the caches before calling slabs_destroy as it may + * recursively call kfree. + */ static void slabs_destroy(struct kmem_cache *cachep, struct list_head *list) { struct page *page, *n; @@ -2153,8 +2157,8 @@ static void do_drain(void *arg) spin_lock(&n->list_lock); free_block(cachep, ac->entry, ac->avail, node, &list); spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); ac->avail = 0; + slabs_destroy(cachep, &list); } static void drain_cpu_caches(struct kmem_cache *cachep) @@ -3402,9 +3406,9 @@ free_done: } #endif spin_unlock(&n->list_lock); - slabs_destroy(cachep, &list); ac->avail -= batchcount; memmove(ac->entry, &(ac->entry[batchcount]), sizeof(void *)*ac->avail); + slabs_destroy(cachep, &list); } /* diff --git a/mm/slub.c b/mm/slub.c index d4177aecedf6..6d3574013b2f 100644 --- a/mm/slub.c +++ b/mm/slub.c @@ -1413,10 +1413,6 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, char *next_block; slab_flags_t block_flags; - /* If slub_debug = 0, it folds into the if conditional. */ - if (!slub_debug_string) - return flags | slub_debug; - len = strlen(name); next_block = slub_debug_string; /* Go through all blocks of debug options, see if any matches our slab's name */ @@ -1450,7 +1446,7 @@ slab_flags_t kmem_cache_flags(unsigned int object_size, } } - return slub_debug; + return flags | slub_debug; } #else /* !CONFIG_SLUB_DEBUG */ static inline void setup_object_debug(struct kmem_cache *s, diff --git a/mm/swapfile.c b/mm/swapfile.c index 12f59e641b5e..debc94155f74 100644 --- a/mm/swapfile.c +++ b/mm/swapfile.c @@ -1078,7 +1078,7 @@ start_over: goto nextsi; } if (size == SWAPFILE_CLUSTER) { - if (!(si->flags & SWP_FS)) + if (si->flags & SWP_BLKDEV) n_ret = swap_alloc_cluster(si, swp_entries); } else n_ret = scan_swap_map_slots(si, SWAP_HAS_CACHE, diff --git a/net/batman-adv/bridge_loop_avoidance.c b/net/batman-adv/bridge_loop_avoidance.c index 8500f56cbd10..c350ab63cd54 100644 --- a/net/batman-adv/bridge_loop_avoidance.c +++ b/net/batman-adv/bridge_loop_avoidance.c @@ -25,6 +25,7 @@ #include <linux/lockdep.h> #include <linux/netdevice.h> #include <linux/netlink.h> +#include <linux/preempt.h> #include <linux/rculist.h> #include <linux/rcupdate.h> #include <linux/seq_file.h> @@ -83,11 +84,12 @@ static inline u32 batadv_choose_claim(const void *data, u32 size) */ static inline u32 batadv_choose_backbone_gw(const void *data, u32 size) { - const struct batadv_bla_claim *claim = (struct batadv_bla_claim *)data; + const struct batadv_bla_backbone_gw *gw; u32 hash = 0; - hash = jhash(&claim->addr, sizeof(claim->addr), hash); - hash = jhash(&claim->vid, sizeof(claim->vid), hash); + gw = (struct batadv_bla_backbone_gw *)data; + hash = jhash(&gw->orig, sizeof(gw->orig), hash); + hash = jhash(&gw->vid, sizeof(gw->vid), hash); return hash % size; } @@ -1579,13 +1581,16 @@ int batadv_bla_init(struct batadv_priv *bat_priv) } /** - * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * batadv_bla_check_duplist() - Check if a frame is in the broadcast dup. * @bat_priv: the bat priv with all the soft interface information - * @skb: contains the bcast_packet to be checked + * @skb: contains the multicast packet to be checked + * @payload_ptr: pointer to position inside the head buffer of the skb + * marking the start of the data to be CRC'ed + * @orig: originator mac address, NULL if unknown * - * check if it is on our broadcast list. Another gateway might - * have sent the same packet because it is connected to the same backbone, - * so we have to remove this duplicate. + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. * * This is performed by checking the CRC, which will tell us * with a good chance that it is the same packet. If it is furthermore @@ -1594,19 +1599,17 @@ int batadv_bla_init(struct batadv_priv *bat_priv) * * Return: true if a packet is in the duplicate list, false otherwise. */ -bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, - struct sk_buff *skb) +static bool batadv_bla_check_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb, u8 *payload_ptr, + const u8 *orig) { - int i, curr; - __be32 crc; - struct batadv_bcast_packet *bcast_packet; struct batadv_bcast_duplist_entry *entry; bool ret = false; - - bcast_packet = (struct batadv_bcast_packet *)skb->data; + int i, curr; + __be32 crc; /* calculate the crc ... */ - crc = batadv_skb_crc32(skb, (u8 *)(bcast_packet + 1)); + crc = batadv_skb_crc32(skb, payload_ptr); spin_lock_bh(&bat_priv->bla.bcast_duplist_lock); @@ -1625,8 +1628,21 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, if (entry->crc != crc) continue; - if (batadv_compare_eth(entry->orig, bcast_packet->orig)) - continue; + /* are the originators both known and not anonymous? */ + if (orig && !is_zero_ether_addr(orig) && + !is_zero_ether_addr(entry->orig)) { + /* If known, check if the new frame came from + * the same originator: + * We are safe to take identical frames from the + * same orig, if known, as multiplications in + * the mesh are detected via the (orig, seqno) pair. + * So we can be a bit more liberal here and allow + * identical frames from the same orig which the source + * host might have sent multiple times on purpose. + */ + if (batadv_compare_eth(entry->orig, orig)) + continue; + } /* this entry seems to match: same crc, not too old, * and from another gw. therefore return true to forbid it. @@ -1642,7 +1658,14 @@ bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, entry = &bat_priv->bla.bcast_duplist[curr]; entry->crc = crc; entry->entrytime = jiffies; - ether_addr_copy(entry->orig, bcast_packet->orig); + + /* known originator */ + if (orig) + ether_addr_copy(entry->orig, orig); + /* anonymous originator */ + else + eth_zero_addr(entry->orig); + bat_priv->bla.bcast_duplist_curr = curr; out: @@ -1652,6 +1675,48 @@ out: } /** + * batadv_bla_check_ucast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the multicast packet to be checked, decapsulated from a + * unicast_packet + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +static bool batadv_bla_check_ucast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + return batadv_bla_check_duplist(bat_priv, skb, (u8 *)skb->data, NULL); +} + +/** + * batadv_bla_check_bcast_duplist() - Check if a frame is in the broadcast dup. + * @bat_priv: the bat priv with all the soft interface information + * @skb: contains the bcast_packet to be checked + * + * Check if it is on our broadcast list. Another gateway might have sent the + * same packet because it is connected to the same backbone, so we have to + * remove this duplicate. + * + * Return: true if a packet is in the duplicate list, false otherwise. + */ +bool batadv_bla_check_bcast_duplist(struct batadv_priv *bat_priv, + struct sk_buff *skb) +{ + struct batadv_bcast_packet *bcast_packet; + u8 *payload_ptr; + + bcast_packet = (struct batadv_bcast_packet *)skb->data; + payload_ptr = (u8 *)(bcast_packet + 1); + + return batadv_bla_check_duplist(bat_priv, skb, payload_ptr, + bcast_packet->orig); +} + +/** * batadv_bla_is_backbone_gw_orig() - Check if the originator is a gateway for * the VLAN identified by vid. * @bat_priv: the bat priv with all the soft interface information @@ -1812,7 +1877,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * @bat_priv: the bat priv with all the soft interface information * @skb: the frame to be checked * @vid: the VLAN ID of the frame - * @is_bcast: the packet came in a broadcast packet type. + * @packet_type: the batman packet type this frame came in * * batadv_bla_rx avoidance checks if: * * we have to race for a claim @@ -1824,7 +1889,7 @@ batadv_bla_loopdetect_check(struct batadv_priv *bat_priv, struct sk_buff *skb, * further process the skb. */ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast) + unsigned short vid, int packet_type) { struct batadv_bla_backbone_gw *backbone_gw; struct ethhdr *ethhdr; @@ -1846,9 +1911,32 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto handled; if (unlikely(atomic_read(&bat_priv->bla.num_requests))) - /* don't allow broadcasts while requests are in flight */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) - goto handled; + /* don't allow multicast packets while requests are in flight */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + /* Both broadcast flooding or multicast-via-unicasts + * delivery might send to multiple backbone gateways + * sharing the same LAN and therefore need to coordinate + * which backbone gateway forwards into the LAN, + * by claiming the payload source address. + * + * Broadcast flooding and multicast-via-unicasts + * delivery use the following two batman packet types. + * Note: explicitly exclude BATADV_UNICAST_4ADDR, + * as the DHCP gateway feature will send explicitly + * to only one BLA gateway, so the claiming process + * should be avoided there. + */ + if (packet_type == BATADV_BCAST || + packet_type == BATADV_UNICAST) + goto handled; + + /* potential duplicates from foreign BLA backbone gateways via + * multicast-in-unicast packets + */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + packet_type == BATADV_UNICAST && + batadv_bla_check_ucast_duplist(bat_priv, skb)) + goto handled; ether_addr_copy(search_claim.addr, ethhdr->h_source); search_claim.vid = vid; @@ -1883,13 +1971,14 @@ bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, goto allow; } - /* if it is a broadcast ... */ - if (is_multicast_ether_addr(ethhdr->h_dest) && is_bcast) { + /* if it is a multicast ... */ + if (is_multicast_ether_addr(ethhdr->h_dest) && + (packet_type == BATADV_BCAST || packet_type == BATADV_UNICAST)) { /* ... drop it. the responsible gateway is in charge. * - * We need to check is_bcast because with the gateway + * We need to check packet type because with the gateway * feature, broadcasts (like DHCP requests) may be sent - * using a unicast packet type. + * using a unicast 4 address packet type. See comment above. */ goto handled; } else { diff --git a/net/batman-adv/bridge_loop_avoidance.h b/net/batman-adv/bridge_loop_avoidance.h index 41edb2c4a327..a81c41b636f9 100644 --- a/net/batman-adv/bridge_loop_avoidance.h +++ b/net/batman-adv/bridge_loop_avoidance.h @@ -35,7 +35,7 @@ static inline bool batadv_bla_is_loopdetect_mac(const uint8_t *mac) #ifdef CONFIG_BATMAN_ADV_BLA bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, - unsigned short vid, bool is_bcast); + unsigned short vid, int packet_type); bool batadv_bla_tx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); bool batadv_bla_is_backbone_gw(struct sk_buff *skb, @@ -66,7 +66,7 @@ bool batadv_bla_check_claim(struct batadv_priv *bat_priv, u8 *addr, static inline bool batadv_bla_rx(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid, - bool is_bcast) + int packet_type) { return false; } diff --git a/net/batman-adv/multicast.c b/net/batman-adv/multicast.c index bdc4a1fba1c6..ca24a2e522b7 100644 --- a/net/batman-adv/multicast.c +++ b/net/batman-adv/multicast.c @@ -51,6 +51,7 @@ #include <uapi/linux/batadv_packet.h> #include <uapi/linux/batman_adv.h> +#include "bridge_loop_avoidance.h" #include "hard-interface.h" #include "hash.h" #include "log.h" @@ -1435,6 +1436,35 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, } /** + * batadv_mcast_forw_send_orig() - send a multicast packet to an originator + * @bat_priv: the bat priv with all the soft interface information + * @skb: the multicast packet to send + * @vid: the vlan identifier + * @orig_node: the originator to send the packet to + * + * Return: NET_XMIT_DROP in case of error or NET_XMIT_SUCCESS otherwise. + */ +int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node) +{ + /* Avoid sending multicast-in-unicast packets to other BLA + * gateways - they already got the frame from the LAN side + * we share with them. + * TODO: Refactor to take BLA into account earlier, to avoid + * reducing the mcast_fanout count. + */ + if (batadv_bla_is_backbone_gw_orig(bat_priv, orig_node->orig, vid)) { + dev_kfree_skb(skb); + return NET_XMIT_SUCCESS; + } + + return batadv_send_skb_unicast(bat_priv, skb, BATADV_UNICAST, 0, + orig_node, vid); +} + +/** * batadv_mcast_forw_tt() - forwards a packet to multicast listeners * @bat_priv: the bat priv with all the soft interface information * @skb: the multicast packet to transmit @@ -1471,8 +1501,8 @@ batadv_mcast_forw_tt(struct batadv_priv *bat_priv, struct sk_buff *skb, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_entry->orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, + orig_entry->orig_node); } rcu_read_unlock(); @@ -1513,8 +1543,7 @@ batadv_mcast_forw_want_all_ipv4(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1551,8 +1580,7 @@ batadv_mcast_forw_want_all_ipv6(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1618,8 +1646,7 @@ batadv_mcast_forw_want_all_rtr4(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; @@ -1656,8 +1683,7 @@ batadv_mcast_forw_want_all_rtr6(struct batadv_priv *bat_priv, break; } - batadv_send_skb_unicast(bat_priv, newskb, BATADV_UNICAST, 0, - orig_node, vid); + batadv_mcast_forw_send_orig(bat_priv, newskb, vid, orig_node); } rcu_read_unlock(); return ret; diff --git a/net/batman-adv/multicast.h b/net/batman-adv/multicast.h index ebf825991ecd..3e114bc5ca3b 100644 --- a/net/batman-adv/multicast.h +++ b/net/batman-adv/multicast.h @@ -46,6 +46,11 @@ enum batadv_forw_mode batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, struct batadv_orig_node **mcast_single_orig); +int batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node); + int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid); @@ -72,6 +77,16 @@ batadv_mcast_forw_mode(struct batadv_priv *bat_priv, struct sk_buff *skb, } static inline int +batadv_mcast_forw_send_orig(struct batadv_priv *bat_priv, + struct sk_buff *skb, + unsigned short vid, + struct batadv_orig_node *orig_node) +{ + kfree_skb(skb); + return NET_XMIT_DROP; +} + +static inline int batadv_mcast_forw_send(struct batadv_priv *bat_priv, struct sk_buff *skb, unsigned short vid) { diff --git a/net/batman-adv/routing.c b/net/batman-adv/routing.c index 27cdf5e4349a..9e5c71e406ff 100644 --- a/net/batman-adv/routing.c +++ b/net/batman-adv/routing.c @@ -826,6 +826,10 @@ static bool batadv_check_unicast_ttvn(struct batadv_priv *bat_priv, vid = batadv_get_vid(skb, hdr_len); ethhdr = (struct ethhdr *)(skb->data + hdr_len); + /* do not reroute multicast frames in a unicast header */ + if (is_multicast_ether_addr(ethhdr->h_dest)) + return true; + /* check if the destination client was served by this node and it is now * roaming. In this case, it means that the node has got a ROAM_ADV * message and that it knows the new destination in the mesh to re-route diff --git a/net/batman-adv/soft-interface.c b/net/batman-adv/soft-interface.c index 23833a0ba5e6..cdde943c1b83 100644 --- a/net/batman-adv/soft-interface.c +++ b/net/batman-adv/soft-interface.c @@ -364,9 +364,8 @@ send: goto dropped; ret = batadv_send_skb_via_gw(bat_priv, skb, vid); } else if (mcast_single_orig) { - ret = batadv_send_skb_unicast(bat_priv, skb, - BATADV_UNICAST, 0, - mcast_single_orig, vid); + ret = batadv_mcast_forw_send_orig(bat_priv, skb, vid, + mcast_single_orig); } else if (forw_mode == BATADV_FORW_SOME) { ret = batadv_mcast_forw_send(bat_priv, skb, vid); } else { @@ -425,10 +424,10 @@ void batadv_interface_rx(struct net_device *soft_iface, struct vlan_ethhdr *vhdr; struct ethhdr *ethhdr; unsigned short vid; - bool is_bcast; + int packet_type; batadv_bcast_packet = (struct batadv_bcast_packet *)skb->data; - is_bcast = (batadv_bcast_packet->packet_type == BATADV_BCAST); + packet_type = batadv_bcast_packet->packet_type; skb_pull_rcsum(skb, hdr_size); skb_reset_mac_header(skb); @@ -471,7 +470,7 @@ void batadv_interface_rx(struct net_device *soft_iface, /* Let the bridge loop avoidance check the packet. If will * not handle it, we can safely push it up. */ - if (batadv_bla_rx(bat_priv, skb, vid, is_bcast)) + if (batadv_bla_rx(bat_priv, skb, vid, packet_type)) goto out; if (orig_node) diff --git a/net/bridge/br_vlan.c b/net/bridge/br_vlan.c index f9092c71225f..61c94cefa843 100644 --- a/net/bridge/br_vlan.c +++ b/net/bridge/br_vlan.c @@ -1288,11 +1288,13 @@ void br_vlan_get_stats(const struct net_bridge_vlan *v, } } -static int __br_vlan_get_pvid(const struct net_device *dev, - struct net_bridge_port *p, u16 *p_pvid) +int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) { struct net_bridge_vlan_group *vg; + struct net_bridge_port *p; + ASSERT_RTNL(); + p = br_port_get_check_rtnl(dev); if (p) vg = nbp_vlan_group(p); else if (netif_is_bridge_master(dev)) @@ -1303,18 +1305,23 @@ static int __br_vlan_get_pvid(const struct net_device *dev, *p_pvid = br_get_pvid(vg); return 0; } - -int br_vlan_get_pvid(const struct net_device *dev, u16 *p_pvid) -{ - ASSERT_RTNL(); - - return __br_vlan_get_pvid(dev, br_port_get_check_rtnl(dev), p_pvid); -} EXPORT_SYMBOL_GPL(br_vlan_get_pvid); int br_vlan_get_pvid_rcu(const struct net_device *dev, u16 *p_pvid) { - return __br_vlan_get_pvid(dev, br_port_get_check_rcu(dev), p_pvid); + struct net_bridge_vlan_group *vg; + struct net_bridge_port *p; + + p = br_port_get_check_rcu(dev); + if (p) + vg = nbp_vlan_group_rcu(p); + else if (netif_is_bridge_master(dev)) + vg = br_vlan_group_rcu(netdev_priv(dev)); + else + return -EINVAL; + + *p_pvid = br_get_pvid(vg); + return 0; } EXPORT_SYMBOL_GPL(br_vlan_get_pvid_rcu); diff --git a/net/core/dev.c b/net/core/dev.c index 4086d335978c..266073e300b5 100644 --- a/net/core/dev.c +++ b/net/core/dev.c @@ -8647,7 +8647,7 @@ int dev_get_port_parent_id(struct net_device *dev, if (!first.id_len) first = *ppid; else if (memcmp(&first, ppid, sizeof(*ppid))) - return -ENODATA; + return -EOPNOTSUPP; } return err; diff --git a/net/core/dst.c b/net/core/dst.c index d6b6ced0d451..0c01bd8d9d81 100644 --- a/net/core/dst.c +++ b/net/core/dst.c @@ -144,7 +144,7 @@ static void dst_destroy_rcu(struct rcu_head *head) /* Operations to mark dst as DEAD and clean up the net device referenced * by dst: - * 1. put the dst under loopback interface and discard all tx/rx packets + * 1. put the dst under blackhole interface and discard all tx/rx packets * on this route. * 2. release the net_device * This function should be called when removing routes from the fib tree diff --git a/net/core/fib_rules.c b/net/core/fib_rules.c index 51678a528f85..7bcfb16854cb 100644 --- a/net/core/fib_rules.c +++ b/net/core/fib_rules.c @@ -16,7 +16,7 @@ #include <net/ip_tunnels.h> #include <linux/indirect_call_wrapper.h> -#ifdef CONFIG_IPV6_MULTIPLE_TABLES +#if defined(CONFIG_IPV6) && defined(CONFIG_IPV6_MULTIPLE_TABLES) #ifdef CONFIG_IP_MULTIPLE_TABLES #define INDIRECT_CALL_MT(f, f2, f1, ...) \ INDIRECT_CALL_INET(f, f2, f1, __VA_ARGS__) diff --git a/net/core/filter.c b/net/core/filter.c index 1f647ab986b6..21eaf3b182f2 100644 --- a/net/core/filter.c +++ b/net/core/filter.c @@ -4838,6 +4838,7 @@ static int bpf_ipv4_fib_lookup(struct net *net, struct bpf_fib_lookup *params, fl4.saddr = params->ipv4_src; fl4.fl4_sport = params->sport; fl4.fl4_dport = params->dport; + fl4.flowi4_multipath_hash = 0; if (flags & BPF_FIB_LOOKUP_DIRECT) { u32 tbid = l3mdev_fib_table_rcu(dev) ? : RT_TABLE_MAIN; @@ -7065,8 +7066,6 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, bool indirect = BPF_MODE(orig->code) == BPF_IND; struct bpf_insn *insn = insn_buf; - /* We're guaranteed here that CTX is in R6. */ - *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); if (!indirect) { *insn++ = BPF_MOV64_IMM(BPF_REG_2, orig->imm); } else { @@ -7074,6 +7073,8 @@ static int bpf_gen_ld_abs(const struct bpf_insn *orig, if (orig->imm) *insn++ = BPF_ALU64_IMM(BPF_ADD, BPF_REG_2, orig->imm); } + /* We're guaranteed here that CTX is in R6. */ + *insn++ = BPF_MOV64_REG(BPF_REG_1, BPF_REG_CTX); switch (BPF_SIZE(orig->code)) { case BPF_B: @@ -9522,7 +9523,7 @@ BPF_CALL_1(bpf_skc_to_tcp6_sock, struct sock *, sk) * trigger an explicit type generation here. */ BTF_TYPE_EMIT(struct tcp6_sock); - if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && + if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP && sk->sk_family == AF_INET6) return (unsigned long)sk; @@ -9540,7 +9541,7 @@ const struct bpf_func_proto bpf_skc_to_tcp6_sock_proto = { BPF_CALL_1(bpf_skc_to_tcp_sock, struct sock *, sk) { - if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) + if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_TCP) return (unsigned long)sk; return (unsigned long)NULL; @@ -9558,12 +9559,12 @@ const struct bpf_func_proto bpf_skc_to_tcp_sock_proto = { BPF_CALL_1(bpf_skc_to_tcp_timewait_sock, struct sock *, sk) { #ifdef CONFIG_INET - if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) + if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_TIME_WAIT) return (unsigned long)sk; #endif #if IS_BUILTIN(CONFIG_IPV6) - if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) + if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_TIME_WAIT) return (unsigned long)sk; #endif @@ -9582,12 +9583,12 @@ const struct bpf_func_proto bpf_skc_to_tcp_timewait_sock_proto = { BPF_CALL_1(bpf_skc_to_tcp_request_sock, struct sock *, sk) { #ifdef CONFIG_INET - if (sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && sk->sk_prot == &tcp_prot && sk->sk_state == TCP_NEW_SYN_RECV) return (unsigned long)sk; #endif #if IS_BUILTIN(CONFIG_IPV6) - if (sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) + if (sk && sk->sk_prot == &tcpv6_prot && sk->sk_state == TCP_NEW_SYN_RECV) return (unsigned long)sk; #endif @@ -9609,7 +9610,7 @@ BPF_CALL_1(bpf_skc_to_udp6_sock, struct sock *, sk) * trigger an explicit type generation here. */ BTF_TYPE_EMIT(struct udp6_sock); - if (sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && + if (sk && sk_fullsock(sk) && sk->sk_protocol == IPPROTO_UDP && sk->sk_type == SOCK_DGRAM && sk->sk_family == AF_INET6) return (unsigned long)sk; diff --git a/net/core/net_namespace.c b/net/core/net_namespace.c index dcd61aca343e..944ab214e5ae 100644 --- a/net/core/net_namespace.c +++ b/net/core/net_namespace.c @@ -251,10 +251,10 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) if (refcount_read(&net->count) == 0) return NETNSA_NSID_NOT_ASSIGNED; - spin_lock(&net->nsid_lock); + spin_lock_bh(&net->nsid_lock); id = __peernet2id(net, peer); if (id >= 0) { - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); return id; } @@ -264,12 +264,12 @@ int peernet2id_alloc(struct net *net, struct net *peer, gfp_t gfp) * just been idr_remove()'d from there in cleanup_net(). */ if (!maybe_get_net(peer)) { - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); return NETNSA_NSID_NOT_ASSIGNED; } id = alloc_netid(net, peer, -1); - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); put_net(peer); if (id < 0) @@ -534,20 +534,20 @@ static void unhash_nsid(struct net *net, struct net *last) for_each_net(tmp) { int id; - spin_lock(&tmp->nsid_lock); + spin_lock_bh(&tmp->nsid_lock); id = __peernet2id(tmp, net); if (id >= 0) idr_remove(&tmp->netns_ids, id); - spin_unlock(&tmp->nsid_lock); + spin_unlock_bh(&tmp->nsid_lock); if (id >= 0) rtnl_net_notifyid(tmp, RTM_DELNSID, id, 0, NULL, GFP_KERNEL); if (tmp == last) break; } - spin_lock(&net->nsid_lock); + spin_lock_bh(&net->nsid_lock); idr_destroy(&net->netns_ids); - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); } static LLIST_HEAD(cleanup_list); @@ -760,9 +760,9 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, return PTR_ERR(peer); } - spin_lock(&net->nsid_lock); + spin_lock_bh(&net->nsid_lock); if (__peernet2id(net, peer) >= 0) { - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); err = -EEXIST; NL_SET_BAD_ATTR(extack, nla); NL_SET_ERR_MSG(extack, @@ -771,7 +771,7 @@ static int rtnl_net_newid(struct sk_buff *skb, struct nlmsghdr *nlh, } err = alloc_netid(net, peer, nsid); - spin_unlock(&net->nsid_lock); + spin_unlock_bh(&net->nsid_lock); if (err >= 0) { rtnl_net_notifyid(net, RTM_NEWNSID, err, NETLINK_CB(skb).portid, nlh, GFP_KERNEL); diff --git a/net/dcb/dcbnl.c b/net/dcb/dcbnl.c index 84dde5a2066e..16014ad19406 100644 --- a/net/dcb/dcbnl.c +++ b/net/dcb/dcbnl.c @@ -1426,6 +1426,7 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, { const struct dcbnl_rtnl_ops *ops = netdev->dcbnl_ops; struct nlattr *ieee[DCB_ATTR_IEEE_MAX + 1]; + int prio; int err; if (!ops) @@ -1475,6 +1476,13 @@ static int dcbnl_ieee_set(struct net_device *netdev, struct nlmsghdr *nlh, struct dcbnl_buffer *buffer = nla_data(ieee[DCB_ATTR_DCB_BUFFER]); + for (prio = 0; prio < ARRAY_SIZE(buffer->prio2buffer); prio++) { + if (buffer->prio2buffer[prio] >= DCBX_MAX_BUFFERS) { + err = -EINVAL; + goto err; + } + } + err = ops->dcbnl_setbuffer(netdev, buffer); if (err) goto err; diff --git a/net/dsa/slave.c b/net/dsa/slave.c index 9af1a2d0cec4..16e5f98d4882 100644 --- a/net/dsa/slave.c +++ b/net/dsa/slave.c @@ -1799,15 +1799,27 @@ int dsa_slave_create(struct dsa_port *port) dsa_slave_notify(slave_dev, DSA_PORT_REGISTER); - ret = register_netdev(slave_dev); + rtnl_lock(); + + ret = register_netdevice(slave_dev); if (ret) { netdev_err(master, "error %d registering interface %s\n", ret, slave_dev->name); + rtnl_unlock(); goto out_phy; } + ret = netdev_upper_dev_link(master, slave_dev, NULL); + + rtnl_unlock(); + + if (ret) + goto out_unregister; + return 0; +out_unregister: + unregister_netdev(slave_dev); out_phy: rtnl_lock(); phylink_disconnect_phy(p->dp->pl); @@ -1824,16 +1836,18 @@ out_free: void dsa_slave_destroy(struct net_device *slave_dev) { + struct net_device *master = dsa_slave_to_master(slave_dev); struct dsa_port *dp = dsa_slave_to_port(slave_dev); struct dsa_slave_priv *p = netdev_priv(slave_dev); netif_carrier_off(slave_dev); rtnl_lock(); + netdev_upper_dev_unlink(master, slave_dev); + unregister_netdevice(slave_dev); phylink_disconnect_phy(dp->pl); rtnl_unlock(); dsa_slave_notify(slave_dev, DSA_PORT_UNREGISTER); - unregister_netdev(slave_dev); phylink_destroy(dp->pl); gro_cells_destroy(&p->gcells); free_percpu(p->stats64); diff --git a/net/dsa/tag_ocelot.c b/net/dsa/tag_ocelot.c index 42f327c06dca..b4fc05cafaa6 100644 --- a/net/dsa/tag_ocelot.c +++ b/net/dsa/tag_ocelot.c @@ -160,11 +160,14 @@ static struct sk_buff *ocelot_xmit(struct sk_buff *skb, packing(injection, &qos_class, 19, 17, OCELOT_TAG_LEN, PACK, 0); if (ocelot->ptp && (skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) { + struct sk_buff *clone = DSA_SKB_CB(skb)->clone; + rew_op = ocelot_port->ptp_cmd; - if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) { - rew_op |= (ocelot_port->ts_id % 4) << 3; - ocelot_port->ts_id++; - } + /* Retrieve timestamp ID populated inside skb->cb[0] of the + * clone by ocelot_port_add_txtstamp_skb + */ + if (ocelot_port->ptp_cmd == IFH_REW_OP_TWO_STEP_PTP) + rew_op |= clone->cb[0] << 3; packing(injection, &rew_op, 125, 117, OCELOT_TAG_LEN, PACK, 0); } diff --git a/net/ethtool/tunnels.c b/net/ethtool/tunnels.c index 84f23289475b..d93bf2da0f34 100644 --- a/net/ethtool/tunnels.c +++ b/net/ethtool/tunnels.c @@ -200,7 +200,7 @@ int ethnl_tunnel_info_doit(struct sk_buff *skb, struct genl_info *info) reply_len = ret + ethnl_reply_header_size(); rskb = ethnl_reply_init(reply_len, req_info.dev, - ETHTOOL_MSG_TUNNEL_INFO_GET, + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY, ETHTOOL_A_TUNNEL_INFO_HEADER, info, &reply_payload); if (!rskb) { @@ -273,7 +273,7 @@ int ethnl_tunnel_info_dumpit(struct sk_buff *skb, struct netlink_callback *cb) goto cont; ehdr = ethnl_dump_put(skb, cb, - ETHTOOL_MSG_TUNNEL_INFO_GET); + ETHTOOL_MSG_TUNNEL_INFO_GET_REPLY); if (!ehdr) { ret = -EMSGSIZE; goto out; diff --git a/net/hsr/hsr_netlink.c b/net/hsr/hsr_netlink.c index 06c3cd988760..0e4681cf71db 100644 --- a/net/hsr/hsr_netlink.c +++ b/net/hsr/hsr_netlink.c @@ -76,7 +76,7 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, proto = nla_get_u8(data[IFLA_HSR_PROTOCOL]); if (proto >= HSR_PROTOCOL_MAX) { - NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol\n"); + NL_SET_ERR_MSG_MOD(extack, "Unsupported protocol"); return -EINVAL; } @@ -84,14 +84,14 @@ static int hsr_newlink(struct net *src_net, struct net_device *dev, proto_version = HSR_V0; } else { if (proto == HSR_PROTOCOL_PRP) { - NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported\n"); + NL_SET_ERR_MSG_MOD(extack, "PRP version unsupported"); return -EINVAL; } proto_version = nla_get_u8(data[IFLA_HSR_VERSION]); if (proto_version > HSR_V1) { NL_SET_ERR_MSG_MOD(extack, - "Only HSR version 0/1 supported\n"); + "Only HSR version 0/1 supported"); return -EINVAL; } } diff --git a/net/ipv4/fib_frontend.c b/net/ipv4/fib_frontend.c index 41079490a118..86a23e4a6a50 100644 --- a/net/ipv4/fib_frontend.c +++ b/net/ipv4/fib_frontend.c @@ -362,6 +362,7 @@ static int __fib_validate_source(struct sk_buff *skb, __be32 src, __be32 dst, fl4.flowi4_tun_key.tun_id = 0; fl4.flowi4_flags = 0; fl4.flowi4_uid = sock_net_uid(net, NULL); + fl4.flowi4_multipath_hash = 0; no_addr = idev->ifa_list == NULL; diff --git a/net/ipv4/inet_diag.c b/net/ipv4/inet_diag.c index 4a98dd736270..f1bd95f243b3 100644 --- a/net/ipv4/inet_diag.c +++ b/net/ipv4/inet_diag.c @@ -186,8 +186,8 @@ errout: } EXPORT_SYMBOL_GPL(inet_diag_msg_attrs_fill); -static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, - struct nlattr **req_nlas) +static int inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, + struct nlattr **req_nlas) { struct nlattr *nla; int remaining; @@ -195,9 +195,13 @@ static void inet_diag_parse_attrs(const struct nlmsghdr *nlh, int hdrlen, nlmsg_for_each_attr(nla, nlh, hdrlen, remaining) { int type = nla_type(nla); + if (type == INET_DIAG_REQ_PROTOCOL && nla_len(nla) != sizeof(u32)) + return -EINVAL; + if (type < __INET_DIAG_REQ_MAX) req_nlas[type] = nla; } + return 0; } static int inet_diag_get_protocol(const struct inet_diag_req_v2 *req, @@ -574,7 +578,10 @@ static int inet_diag_cmd_exact(int cmd, struct sk_buff *in_skb, int err, protocol; memset(&dump_data, 0, sizeof(dump_data)); - inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas); + err = inet_diag_parse_attrs(nlh, hdrlen, dump_data.req_nlas); + if (err) + return err; + protocol = inet_diag_get_protocol(req, &dump_data); handler = inet_diag_lock_handler(protocol); @@ -1180,8 +1187,11 @@ static int __inet_diag_dump_start(struct netlink_callback *cb, int hdrlen) if (!cb_data) return -ENOMEM; - inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas); - + err = inet_diag_parse_attrs(nlh, hdrlen, cb_data->req_nlas); + if (err) { + kfree(cb_data); + return err; + } nla = cb_data->inet_diag_nla_bc; if (nla) { err = inet_diag_bc_audit(nla, skb); diff --git a/net/ipv4/ip_output.c b/net/ipv4/ip_output.c index 61f802d5350c..e6f2ada9e7d5 100644 --- a/net/ipv4/ip_output.c +++ b/net/ipv4/ip_output.c @@ -74,6 +74,7 @@ #include <net/icmp.h> #include <net/checksum.h> #include <net/inetpeer.h> +#include <net/inet_ecn.h> #include <net/lwtunnel.h> #include <linux/bpf-cgroup.h> #include <linux/igmp.h> @@ -1703,7 +1704,7 @@ void ip_send_unicast_reply(struct sock *sk, struct sk_buff *skb, if (IS_ERR(rt)) return; - inet_sk(sk)->tos = arg->tos; + inet_sk(sk)->tos = arg->tos & ~INET_ECN_MASK; sk->sk_protocol = ip_hdr(skb)->protocol; sk->sk_bound_dev_if = arg->bound_dev_if; diff --git a/net/ipv4/ip_tunnel_core.c b/net/ipv4/ip_tunnel_core.c index 75c6013ff9a4..b2ea1a8c5fd6 100644 --- a/net/ipv4/ip_tunnel_core.c +++ b/net/ipv4/ip_tunnel_core.c @@ -554,6 +554,7 @@ static int ip_tun_parse_opts_vxlan(struct nlattr *attr, attr = tb[LWTUNNEL_IP_OPT_VXLAN_GBP]; md->gbp = nla_get_u32(attr); + md->gbp &= VXLAN_GBP_MASK; info->key.tun_flags |= TUNNEL_VXLAN_OPT; } diff --git a/net/ipv4/route.c b/net/ipv4/route.c index 8ca6bcab7b03..58642b29a499 100644 --- a/net/ipv4/route.c +++ b/net/ipv4/route.c @@ -786,8 +786,10 @@ static void __ip_do_redirect(struct rtable *rt, struct sk_buff *skb, struct flow neigh_event_send(n, NULL); } else { if (fib_lookup(net, fl4, &res, 0) == 0) { - struct fib_nh_common *nhc = FIB_RES_NHC(res); + struct fib_nh_common *nhc; + fib_select_path(net, &res, fl4, skb); + nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, new_gw, 0, false, jiffies + ip_rt_gc_timeout); @@ -1013,6 +1015,7 @@ out: kfree_skb(skb); static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) { struct dst_entry *dst = &rt->dst; + struct net *net = dev_net(dst->dev); u32 old_mtu = ipv4_mtu(dst); struct fib_result res; bool lock = false; @@ -1033,9 +1036,11 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu) return; rcu_read_lock(); - if (fib_lookup(dev_net(dst->dev), fl4, &res, 0) == 0) { - struct fib_nh_common *nhc = FIB_RES_NHC(res); + if (fib_lookup(net, fl4, &res, 0) == 0) { + struct fib_nh_common *nhc; + fib_select_path(net, &res, fl4, NULL); + nhc = FIB_RES_NHC(res); update_or_create_fnhe(nhc, fl4->daddr, 0, mtu, lock, jiffies + ip_rt_mtu_expires); } @@ -2147,6 +2152,7 @@ static int ip_route_input_slow(struct sk_buff *skb, __be32 daddr, __be32 saddr, fl4.daddr = daddr; fl4.saddr = saddr; fl4.flowi4_uid = sock_net_uid(net, NULL); + fl4.flowi4_multipath_hash = 0; if (fib4_rules_early_flow_dissect(net, skb, &fl4, &_flkeys)) { flkeys = &_flkeys; @@ -2667,8 +2673,6 @@ struct rtable *ip_route_output_key_hash_rcu(struct net *net, struct flowi4 *fl4, fib_select_path(net, res, fl4, skb); dev_out = FIB_RES_DEV(*res); - fl4->flowi4_oif = dev_out->ifindex; - make_route: rth = __mkroute_output(res, fl4, orig_oif, dev_out, flags); diff --git a/net/ipv6/Kconfig b/net/ipv6/Kconfig index 76bff79d6fed..747f56e0c636 100644 --- a/net/ipv6/Kconfig +++ b/net/ipv6/Kconfig @@ -303,6 +303,7 @@ config IPV6_SEG6_LWTUNNEL config IPV6_SEG6_HMAC bool "IPv6: Segment Routing HMAC support" depends on IPV6 + select CRYPTO select CRYPTO_HMAC select CRYPTO_SHA1 select CRYPTO_SHA256 diff --git a/net/ipv6/ip6_fib.c b/net/ipv6/ip6_fib.c index 25a90f3f705c..4a664ad4f4d4 100644 --- a/net/ipv6/ip6_fib.c +++ b/net/ipv6/ip6_fib.c @@ -1993,14 +1993,19 @@ static void fib6_del_route(struct fib6_table *table, struct fib6_node *fn, /* Need to own table->tb6_lock */ int fib6_del(struct fib6_info *rt, struct nl_info *info) { - struct fib6_node *fn = rcu_dereference_protected(rt->fib6_node, - lockdep_is_held(&rt->fib6_table->tb6_lock)); - struct fib6_table *table = rt->fib6_table; struct net *net = info->nl_net; struct fib6_info __rcu **rtp; struct fib6_info __rcu **rtp_next; + struct fib6_table *table; + struct fib6_node *fn; + + if (rt == net->ipv6.fib6_null_entry) + return -ENOENT; - if (!fn || rt == net->ipv6.fib6_null_entry) + table = rt->fib6_table; + fn = rcu_dereference_protected(rt->fib6_node, + lockdep_is_held(&table->tb6_lock)); + if (!fn) return -ENOENT; WARN_ON(!(fn->fn_flags & RTN_RTINFO)); diff --git a/net/ipv6/route.c b/net/ipv6/route.c index 5e7e25e2523a..fb075d9545b9 100644 --- a/net/ipv6/route.c +++ b/net/ipv6/route.c @@ -4202,7 +4202,7 @@ static struct fib6_info *rt6_add_route_info(struct net *net, .fc_nlinfo.nl_net = net, }; - cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO, + cfg.fc_table = l3mdev_fib_table(dev) ? : RT6_TABLE_INFO; cfg.fc_dst = *prefix; cfg.fc_gateway = *gwaddr; diff --git a/net/mac80211/airtime.c b/net/mac80211/airtime.c index 314973033d03..26d2f8ba7029 100644 --- a/net/mac80211/airtime.c +++ b/net/mac80211/airtime.c @@ -560,7 +560,9 @@ static int ieee80211_fill_rx_status(struct ieee80211_rx_status *stat, if (rate->idx < 0 || !rate->count) return -1; - if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) + if (rate->flags & IEEE80211_TX_RC_160_MHZ_WIDTH) + stat->bw = RATE_INFO_BW_160; + else if (rate->flags & IEEE80211_TX_RC_80_MHZ_WIDTH) stat->bw = RATE_INFO_BW_80; else if (rate->flags & IEEE80211_TX_RC_40_MHZ_WIDTH) stat->bw = RATE_INFO_BW_40; @@ -668,20 +670,26 @@ u32 ieee80211_calc_expected_tx_airtime(struct ieee80211_hw *hw, * This will not be very accurate, but much better than simply * assuming un-aggregated tx in all cases. */ - if (duration > 400) /* <= VHT20 MCS2 1S */ + if (duration > 400 * 1024) /* <= VHT20 MCS2 1S */ agg_shift = 1; - else if (duration > 250) /* <= VHT20 MCS3 1S or MCS1 2S */ + else if (duration > 250 * 1024) /* <= VHT20 MCS3 1S or MCS1 2S */ agg_shift = 2; - else if (duration > 150) /* <= VHT20 MCS5 1S or MCS3 2S */ + else if (duration > 150 * 1024) /* <= VHT20 MCS5 1S or MCS2 2S */ agg_shift = 3; - else + else if (duration > 70 * 1024) /* <= VHT20 MCS5 2S */ agg_shift = 4; + else if (stat.encoding != RX_ENC_HE || + duration > 20 * 1024) /* <= HE40 MCS6 2S */ + agg_shift = 5; + else + agg_shift = 6; duration *= len; duration /= AVG_PKT_SIZE; duration /= 1024; + duration += (overhead >> agg_shift); - return duration + (overhead >> agg_shift); + return max_t(u32, duration, 4); } if (!conf) diff --git a/net/mac80211/mlme.c b/net/mac80211/mlme.c index ac870309b911..2e400b0ff696 100644 --- a/net/mac80211/mlme.c +++ b/net/mac80211/mlme.c @@ -4861,6 +4861,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, struct ieee80211_supported_band *sband; struct cfg80211_chan_def chandef; bool is_6ghz = cbss->channel->band == NL80211_BAND_6GHZ; + bool is_5ghz = cbss->channel->band == NL80211_BAND_5GHZ; struct ieee80211_bss *bss = (void *)cbss->priv; int ret; u32 i; @@ -4879,7 +4880,7 @@ static int ieee80211_prep_channel(struct ieee80211_sub_if_data *sdata, ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } - if (!sband->vht_cap.vht_supported && !is_6ghz) { + if (!sband->vht_cap.vht_supported && is_5ghz) { ifmgd->flags |= IEEE80211_STA_DISABLE_VHT; ifmgd->flags |= IEEE80211_STA_DISABLE_HE; } diff --git a/net/mac80211/rx.c b/net/mac80211/rx.c index 836cde516a18..a959ebf56852 100644 --- a/net/mac80211/rx.c +++ b/net/mac80211/rx.c @@ -451,7 +451,8 @@ ieee80211_add_rx_radiotap_header(struct ieee80211_local *local, else if (status->bw == RATE_INFO_BW_5) channel_flags |= IEEE80211_CHAN_QUARTER; - if (status->band == NL80211_BAND_5GHZ) + if (status->band == NL80211_BAND_5GHZ || + status->band == NL80211_BAND_6GHZ) channel_flags |= IEEE80211_CHAN_OFDM | IEEE80211_CHAN_5GHZ; else if (status->encoding != RX_ENC_LEGACY) channel_flags |= IEEE80211_CHAN_DYN | IEEE80211_CHAN_2GHZ; diff --git a/net/mac80211/util.c b/net/mac80211/util.c index c8504ffc71a1..8d3bfc0fe176 100644 --- a/net/mac80211/util.c +++ b/net/mac80211/util.c @@ -3353,9 +3353,10 @@ bool ieee80211_chandef_he_6ghz_oper(struct ieee80211_sub_if_data *sdata, he_chandef.center_freq1 = ieee80211_channel_to_frequency(he_6ghz_oper->ccfs0, NL80211_BAND_6GHZ); - he_chandef.center_freq2 = - ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, - NL80211_BAND_6GHZ); + if (support_80_80 || support_160) + he_chandef.center_freq2 = + ieee80211_channel_to_frequency(he_6ghz_oper->ccfs1, + NL80211_BAND_6GHZ); } if (!cfg80211_chandef_valid(&he_chandef)) { diff --git a/net/mac80211/vht.c b/net/mac80211/vht.c index 9c6045f9c24d..d1b64d0751f2 100644 --- a/net/mac80211/vht.c +++ b/net/mac80211/vht.c @@ -168,10 +168,7 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, /* take some capabilities as-is */ cap_info = le32_to_cpu(vht_cap_ie->vht_cap_info); vht_cap->cap = cap_info; - vht_cap->cap &= IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_3895 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_7991 | - IEEE80211_VHT_CAP_MAX_MPDU_LENGTH_11454 | - IEEE80211_VHT_CAP_RXLDPC | + vht_cap->cap &= IEEE80211_VHT_CAP_RXLDPC | IEEE80211_VHT_CAP_VHT_TXOP_PS | IEEE80211_VHT_CAP_HTC_VHT | IEEE80211_VHT_CAP_MAX_A_MPDU_LENGTH_EXPONENT_MASK | @@ -180,6 +177,9 @@ ieee80211_vht_cap_ie_to_sta_vht_cap(struct ieee80211_sub_if_data *sdata, IEEE80211_VHT_CAP_RX_ANTENNA_PATTERN | IEEE80211_VHT_CAP_TX_ANTENNA_PATTERN; + vht_cap->cap |= min_t(u32, cap_info & IEEE80211_VHT_CAP_MAX_MPDU_MASK, + own_cap.cap & IEEE80211_VHT_CAP_MAX_MPDU_MASK); + /* and some based on our own capabilities */ switch (own_cap.cap & IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_MASK) { case IEEE80211_VHT_CAP_SUPP_CHAN_WIDTH_160MHZ: diff --git a/net/mac802154/tx.c b/net/mac802154/tx.c index ab52811523e9..c829e4a75325 100644 --- a/net/mac802154/tx.c +++ b/net/mac802154/tx.c @@ -34,11 +34,11 @@ void ieee802154_xmit_worker(struct work_struct *work) if (res) goto err_tx; - ieee802154_xmit_complete(&local->hw, skb, false); - dev->stats.tx_packets++; dev->stats.tx_bytes += skb->len; + ieee802154_xmit_complete(&local->hw, skb, false); + return; err_tx: @@ -78,6 +78,8 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) /* async is priority, otherwise sync is fallback */ if (local->ops->xmit_async) { + unsigned int len = skb->len; + ret = drv_xmit_async(local, skb); if (ret) { ieee802154_wake_queue(&local->hw); @@ -85,7 +87,7 @@ ieee802154_tx(struct ieee802154_local *local, struct sk_buff *skb) } dev->stats.tx_packets++; - dev->stats.tx_bytes += skb->len; + dev->stats.tx_bytes += len; } else { local->tx_skb = skb; queue_work(local->workqueue, &local->tx_work); diff --git a/net/mptcp/pm_netlink.c b/net/mptcp/pm_netlink.c index c8820c4156e6..770da3627848 100644 --- a/net/mptcp/pm_netlink.c +++ b/net/mptcp/pm_netlink.c @@ -66,6 +66,16 @@ static bool addresses_equal(const struct mptcp_addr_info *a, return a->port == b->port; } +static bool address_zero(const struct mptcp_addr_info *addr) +{ + struct mptcp_addr_info zero; + + memset(&zero, 0, sizeof(zero)); + zero.family = addr->family; + + return addresses_equal(addr, &zero, false); +} + static void local_address(const struct sock_common *skc, struct mptcp_addr_info *addr) { @@ -171,9 +181,9 @@ static void check_work_pending(struct mptcp_sock *msk) static void mptcp_pm_create_subflow_or_signal_addr(struct mptcp_sock *msk) { + struct mptcp_addr_info remote = { 0 }; struct sock *sk = (struct sock *)msk; struct mptcp_pm_addr_entry *local; - struct mptcp_addr_info remote; struct pm_nl_pernet *pernet; pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id); @@ -323,10 +333,13 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) * addr */ local_address((struct sock_common *)msk, &msk_local); - local_address((struct sock_common *)msk, &skc_local); + local_address((struct sock_common *)skc, &skc_local); if (addresses_equal(&msk_local, &skc_local, false)) return 0; + if (address_zero(&skc_local)) + return 0; + pernet = net_generic(sock_net((struct sock *)msk), pm_nl_pernet_id); rcu_read_lock(); @@ -341,7 +354,7 @@ int mptcp_pm_nl_get_local_id(struct mptcp_sock *msk, struct sock_common *skc) return ret; /* address not found, add to local list */ - entry = kmalloc(sizeof(*entry), GFP_KERNEL); + entry = kmalloc(sizeof(*entry), GFP_ATOMIC); if (!entry) return -ENOMEM; diff --git a/net/mptcp/subflow.c b/net/mptcp/subflow.c index e8cac2655c82..9ead43f79023 100644 --- a/net/mptcp/subflow.c +++ b/net/mptcp/subflow.c @@ -1063,6 +1063,7 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, struct mptcp_sock *msk = mptcp_sk(sk); struct mptcp_subflow_context *subflow; struct sockaddr_storage addr; + int remote_id = remote->id; int local_id = loc->id; struct socket *sf; struct sock *ssk; @@ -1107,10 +1108,11 @@ int __mptcp_subflow_connect(struct sock *sk, int ifindex, goto failed; mptcp_crypto_key_sha(subflow->remote_key, &remote_token, NULL); - pr_debug("msk=%p remote_token=%u local_id=%d", msk, remote_token, - local_id); + pr_debug("msk=%p remote_token=%u local_id=%d remote_id=%d", msk, + remote_token, local_id, remote_id); subflow->remote_token = remote_token; subflow->local_id = local_id; + subflow->remote_id = remote_id; subflow->request_join = 1; subflow->request_bkup = 1; mptcp_info2sockaddr(remote, &addr); @@ -1347,6 +1349,7 @@ static void subflow_ulp_clone(const struct request_sock *req, new_ctx->fully_established = 1; new_ctx->backup = subflow_req->backup; new_ctx->local_id = subflow_req->local_id; + new_ctx->remote_id = subflow_req->remote_id; new_ctx->token = subflow_req->token; new_ctx->thmac = subflow_req->thmac; } diff --git a/net/netfilter/nf_conntrack_netlink.c b/net/netfilter/nf_conntrack_netlink.c index 832eabecfbdd..c3a4214dc958 100644 --- a/net/netfilter/nf_conntrack_netlink.c +++ b/net/netfilter/nf_conntrack_netlink.c @@ -851,7 +851,6 @@ static int ctnetlink_done(struct netlink_callback *cb) } struct ctnetlink_filter { - u_int32_t cta_flags; u8 family; u_int32_t orig_flags; @@ -906,10 +905,6 @@ static int ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], struct nf_conntrack_zone *zone, u_int32_t flags); -/* applied on filters */ -#define CTA_FILTER_F_CTA_MARK (1 << 0) -#define CTA_FILTER_F_CTA_MARK_MASK (1 << 1) - static struct ctnetlink_filter * ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) { @@ -930,14 +925,10 @@ ctnetlink_alloc_filter(const struct nlattr * const cda[], u8 family) #ifdef CONFIG_NF_CONNTRACK_MARK if (cda[CTA_MARK]) { filter->mark.val = ntohl(nla_get_be32(cda[CTA_MARK])); - filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK); - - if (cda[CTA_MARK_MASK]) { + if (cda[CTA_MARK_MASK]) filter->mark.mask = ntohl(nla_get_be32(cda[CTA_MARK_MASK])); - filter->cta_flags |= CTA_FILTER_FLAG(CTA_MARK_MASK); - } else { + else filter->mark.mask = 0xffffffff; - } } else if (cda[CTA_MARK_MASK]) { err = -EINVAL; goto err_filter; @@ -1117,11 +1108,7 @@ static int ctnetlink_filter_match(struct nf_conn *ct, void *data) } #ifdef CONFIG_NF_CONNTRACK_MARK - if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK_MASK)) && - (ct->mark & filter->mark.mask) != filter->mark.val) - goto ignore_entry; - else if ((filter->cta_flags & CTA_FILTER_FLAG(CTA_MARK)) && - ct->mark != filter->mark.val) + if ((ct->mark & filter->mark.mask) != filter->mark.val) goto ignore_entry; #endif @@ -1404,7 +1391,8 @@ ctnetlink_parse_tuple_filter(const struct nlattr * const cda[], if (err < 0) return err; - + if (l3num != NFPROTO_IPV4 && l3num != NFPROTO_IPV6) + return -EOPNOTSUPP; tuple->src.l3num = l3num; if (flags & CTA_FILTER_FLAG(CTA_IP_DST) || diff --git a/net/netfilter/nf_conntrack_proto.c b/net/netfilter/nf_conntrack_proto.c index 95f79980348c..47e9319d2cf3 100644 --- a/net/netfilter/nf_conntrack_proto.c +++ b/net/netfilter/nf_conntrack_proto.c @@ -565,6 +565,7 @@ static int nf_ct_netns_inet_get(struct net *net) int err; err = nf_ct_netns_do_get(net, NFPROTO_IPV4); +#if IS_ENABLED(CONFIG_IPV6) if (err < 0) goto err1; err = nf_ct_netns_do_get(net, NFPROTO_IPV6); @@ -575,6 +576,7 @@ static int nf_ct_netns_inet_get(struct net *net) err2: nf_ct_netns_put(net, NFPROTO_IPV4); err1: +#endif return err; } diff --git a/net/netfilter/nf_tables_api.c b/net/netfilter/nf_tables_api.c index b7dc1cbf40ea..4603b667973a 100644 --- a/net/netfilter/nf_tables_api.c +++ b/net/netfilter/nf_tables_api.c @@ -684,6 +684,18 @@ nla_put_failure: return -1; } +struct nftnl_skb_parms { + bool report; +}; +#define NFT_CB(skb) (*(struct nftnl_skb_parms*)&((skb)->cb)) + +static void nft_notify_enqueue(struct sk_buff *skb, bool report, + struct list_head *notify_list) +{ + NFT_CB(skb).report = report; + list_add_tail(&skb->list, notify_list); +} + static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) { struct sk_buff *skb; @@ -715,8 +727,7 @@ static void nf_tables_table_notify(const struct nft_ctx *ctx, int event) goto err; } - nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -1468,8 +1479,7 @@ static void nf_tables_chain_notify(const struct nft_ctx *ctx, int event) goto err; } - nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -2807,8 +2817,7 @@ static void nf_tables_rule_notify(const struct nft_ctx *ctx, goto err; } - nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -3837,8 +3846,7 @@ static void nf_tables_set_notify(const struct nft_ctx *ctx, goto err; } - nfnetlink_send(skb, ctx->net, portid, NFNLGRP_NFTABLES, ctx->report, - gfp_flags); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(ctx->net, portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -4959,8 +4967,7 @@ static void nf_tables_setelem_notify(const struct nft_ctx *ctx, goto err; } - nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, ctx->report, - GFP_KERNEL); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -6275,7 +6282,7 @@ void nft_obj_notify(struct net *net, const struct nft_table *table, goto err; } - nfnetlink_send(skb, net, portid, NFNLGRP_NFTABLES, report, gfp); + nft_notify_enqueue(skb, report, &net->nft.notify_list); return; err: nfnetlink_set_err(net, portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -7085,8 +7092,7 @@ static void nf_tables_flowtable_notify(struct nft_ctx *ctx, goto err; } - nfnetlink_send(skb, ctx->net, ctx->portid, NFNLGRP_NFTABLES, - ctx->report, GFP_KERNEL); + nft_notify_enqueue(skb, ctx->report, &ctx->net->nft.notify_list); return; err: nfnetlink_set_err(ctx->net, ctx->portid, NFNLGRP_NFTABLES, -ENOBUFS); @@ -7695,6 +7701,41 @@ static void nf_tables_commit_release(struct net *net) mutex_unlock(&net->nft.commit_mutex); } +static void nft_commit_notify(struct net *net, u32 portid) +{ + struct sk_buff *batch_skb = NULL, *nskb, *skb; + unsigned char *data; + int len; + + list_for_each_entry_safe(skb, nskb, &net->nft.notify_list, list) { + if (!batch_skb) { +new_batch: + batch_skb = skb; + len = NLMSG_GOODSIZE - skb->len; + list_del(&skb->list); + continue; + } + len -= skb->len; + if (len > 0 && NFT_CB(skb).report == NFT_CB(batch_skb).report) { + data = skb_put(batch_skb, skb->len); + memcpy(data, skb->data, skb->len); + list_del(&skb->list); + kfree_skb(skb); + continue; + } + nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES, + NFT_CB(batch_skb).report, GFP_KERNEL); + goto new_batch; + } + + if (batch_skb) { + nfnetlink_send(batch_skb, net, portid, NFNLGRP_NFTABLES, + NFT_CB(batch_skb).report, GFP_KERNEL); + } + + WARN_ON_ONCE(!list_empty(&net->nft.notify_list)); +} + static int nf_tables_commit(struct net *net, struct sk_buff *skb) { struct nft_trans *trans, *next; @@ -7897,6 +7938,7 @@ static int nf_tables_commit(struct net *net, struct sk_buff *skb) } } + nft_commit_notify(net, NETLINK_CB(skb).portid); nf_tables_gen_notify(net, skb, NFT_MSG_NEWGEN); nf_tables_commit_release(net); @@ -8721,6 +8763,7 @@ static int __net_init nf_tables_init_net(struct net *net) INIT_LIST_HEAD(&net->nft.tables); INIT_LIST_HEAD(&net->nft.commit_list); INIT_LIST_HEAD(&net->nft.module_list); + INIT_LIST_HEAD(&net->nft.notify_list); mutex_init(&net->nft.commit_mutex); net->nft.base_seq = 1; net->nft.validate_state = NFT_VALIDATE_SKIP; @@ -8737,6 +8780,7 @@ static void __net_exit nf_tables_exit_net(struct net *net) mutex_unlock(&net->nft.commit_mutex); WARN_ON_ONCE(!list_empty(&net->nft.tables)); WARN_ON_ONCE(!list_empty(&net->nft.module_list)); + WARN_ON_ONCE(!list_empty(&net->nft.notify_list)); } static struct pernet_operations nf_tables_net_ops = { diff --git a/net/netfilter/nft_meta.c b/net/netfilter/nft_meta.c index 7bc6537f3ccb..b37bd02448d8 100644 --- a/net/netfilter/nft_meta.c +++ b/net/netfilter/nft_meta.c @@ -147,11 +147,11 @@ nft_meta_get_eval_skugid(enum nft_meta_keys key, switch (key) { case NFT_META_SKUID: - *dest = from_kuid_munged(&init_user_ns, + *dest = from_kuid_munged(sock_net(sk)->user_ns, sock->file->f_cred->fsuid); break; case NFT_META_SKGID: - *dest = from_kgid_munged(&init_user_ns, + *dest = from_kgid_munged(sock_net(sk)->user_ns, sock->file->f_cred->fsgid); break; default: diff --git a/net/qrtr/qrtr.c b/net/qrtr/qrtr.c index 90c558f89d46..957aa9263ba4 100644 --- a/net/qrtr/qrtr.c +++ b/net/qrtr/qrtr.c @@ -332,8 +332,7 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, { struct qrtr_hdr_v1 *hdr; size_t len = skb->len; - int rc = -ENODEV; - int confirm_rx; + int rc, confirm_rx; confirm_rx = qrtr_tx_wait(node, to->sq_node, to->sq_port, type); if (confirm_rx < 0) { @@ -357,15 +356,17 @@ static int qrtr_node_enqueue(struct qrtr_node *node, struct sk_buff *skb, hdr->size = cpu_to_le32(len); hdr->confirm_rx = !!confirm_rx; - skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); - - mutex_lock(&node->ep_lock); - if (node->ep) - rc = node->ep->xmit(node->ep, skb); - else - kfree_skb(skb); - mutex_unlock(&node->ep_lock); + rc = skb_put_padto(skb, ALIGN(len, 4) + sizeof(*hdr)); + if (!rc) { + mutex_lock(&node->ep_lock); + rc = -ENODEV; + if (node->ep) + rc = node->ep->xmit(node->ep, skb); + else + kfree_skb(skb); + mutex_unlock(&node->ep_lock); + } /* Need to ensure that a subsequent message carries the otherwise lost * confirm_rx flag if we dropped this one */ if (rc && confirm_rx) diff --git a/net/sched/act_ife.c b/net/sched/act_ife.c index c1fcd85719d6..5c568757643b 100644 --- a/net/sched/act_ife.c +++ b/net/sched/act_ife.c @@ -436,6 +436,25 @@ static void tcf_ife_cleanup(struct tc_action *a) kfree_rcu(p, rcu); } +static int load_metalist(struct nlattr **tb, bool rtnl_held) +{ + int i; + + for (i = 1; i < max_metacnt; i++) { + if (tb[i]) { + void *val = nla_data(tb[i]); + int len = nla_len(tb[i]); + int rc; + + rc = load_metaops_and_vet(i, val, len, rtnl_held); + if (rc != 0) + return rc; + } + } + + return 0; +} + static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, bool exists, bool rtnl_held) { @@ -449,10 +468,6 @@ static int populate_metalist(struct tcf_ife_info *ife, struct nlattr **tb, val = nla_data(tb[i]); len = nla_len(tb[i]); - rc = load_metaops_and_vet(i, val, len, rtnl_held); - if (rc != 0) - return rc; - rc = add_metainfo(ife, i, val, len, exists); if (rc) return rc; @@ -509,6 +524,21 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, if (!p) return -ENOMEM; + if (tb[TCA_IFE_METALST]) { + err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, + tb[TCA_IFE_METALST], NULL, + NULL); + if (err) { + kfree(p); + return err; + } + err = load_metalist(tb2, rtnl_held); + if (err) { + kfree(p); + return err; + } + } + index = parm->index; err = tcf_idr_check_alloc(tn, &index, a, bind); if (err < 0) { @@ -570,15 +600,9 @@ static int tcf_ife_init(struct net *net, struct nlattr *nla, } if (tb[TCA_IFE_METALST]) { - err = nla_parse_nested_deprecated(tb2, IFE_META_MAX, - tb[TCA_IFE_METALST], NULL, - NULL); - if (err) - goto metadata_parse_err; err = populate_metalist(ife, tb2, exists, rtnl_held); if (err) goto metadata_parse_err; - } else { /* if no passed metadata allow list or passed allow-all * then here we process by adding as many supported metadatum diff --git a/net/sched/act_tunnel_key.c b/net/sched/act_tunnel_key.c index 536c4bc31be6..37f1e10f35e0 100644 --- a/net/sched/act_tunnel_key.c +++ b/net/sched/act_tunnel_key.c @@ -156,6 +156,7 @@ tunnel_key_copy_vxlan_opt(const struct nlattr *nla, void *dst, int dst_len, struct vxlan_metadata *md = dst; md->gbp = nla_get_u32(tb[TCA_TUNNEL_KEY_ENC_OPT_VXLAN_GBP]); + md->gbp &= VXLAN_GBP_MASK; } return sizeof(struct vxlan_metadata); diff --git a/net/sched/cls_flower.c b/net/sched/cls_flower.c index a4f7ef1de7e7..fed18fd2c50b 100644 --- a/net/sched/cls_flower.c +++ b/net/sched/cls_flower.c @@ -1175,8 +1175,10 @@ static int fl_set_vxlan_opt(const struct nlattr *nla, struct fl_flow_key *key, return -EINVAL; } - if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) + if (tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]) { md->gbp = nla_get_u32(tb[TCA_FLOWER_KEY_ENC_OPT_VXLAN_GBP]); + md->gbp &= VXLAN_GBP_MASK; + } return sizeof(*md); } @@ -1221,6 +1223,7 @@ static int fl_set_erspan_opt(const struct nlattr *nla, struct fl_flow_key *key, } if (tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]) { nla = tb[TCA_FLOWER_KEY_ENC_OPT_ERSPAN_INDEX]; + memset(&md->u, 0x00, sizeof(md->u)); md->u.index = nla_get_be32(nla); } } else if (md->version == 2) { diff --git a/net/sched/sch_generic.c b/net/sched/sch_generic.c index 265a61d011df..54c417244642 100644 --- a/net/sched/sch_generic.c +++ b/net/sched/sch_generic.c @@ -1131,24 +1131,10 @@ EXPORT_SYMBOL(dev_activate); static void qdisc_deactivate(struct Qdisc *qdisc) { - bool nolock = qdisc->flags & TCQ_F_NOLOCK; - if (qdisc->flags & TCQ_F_BUILTIN) return; - if (test_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state)) - return; - - if (nolock) - spin_lock_bh(&qdisc->seqlock); - spin_lock_bh(qdisc_lock(qdisc)); set_bit(__QDISC_STATE_DEACTIVATED, &qdisc->state); - - qdisc_reset(qdisc); - - spin_unlock_bh(qdisc_lock(qdisc)); - if (nolock) - spin_unlock_bh(&qdisc->seqlock); } static void dev_deactivate_queue(struct net_device *dev, @@ -1165,6 +1151,30 @@ static void dev_deactivate_queue(struct net_device *dev, } } +static void dev_reset_queue(struct net_device *dev, + struct netdev_queue *dev_queue, + void *_unused) +{ + struct Qdisc *qdisc; + bool nolock; + + qdisc = dev_queue->qdisc_sleeping; + if (!qdisc) + return; + + nolock = qdisc->flags & TCQ_F_NOLOCK; + + if (nolock) + spin_lock_bh(&qdisc->seqlock); + spin_lock_bh(qdisc_lock(qdisc)); + + qdisc_reset(qdisc); + + spin_unlock_bh(qdisc_lock(qdisc)); + if (nolock) + spin_unlock_bh(&qdisc->seqlock); +} + static bool some_qdisc_is_busy(struct net_device *dev) { unsigned int i; @@ -1213,12 +1223,20 @@ void dev_deactivate_many(struct list_head *head) dev_watchdog_down(dev); } - /* Wait for outstanding qdisc-less dev_queue_xmit calls. + /* Wait for outstanding qdisc-less dev_queue_xmit calls or + * outstanding qdisc enqueuing calls. * This is avoided if all devices are in dismantle phase : * Caller will call synchronize_net() for us */ synchronize_net(); + list_for_each_entry(dev, head, close_list) { + netdev_for_each_tx_queue(dev, dev_reset_queue, NULL); + + if (dev_ingress_queue(dev)) + dev_reset_queue(dev, dev_ingress_queue(dev), NULL); + } + /* Wait for outstanding qdisc_run calls. */ list_for_each_entry(dev, head, close_list) { while (some_qdisc_is_busy(dev)) { diff --git a/net/sched/sch_taprio.c b/net/sched/sch_taprio.c index fe53c1e38c7d..b0ad7687ee2c 100644 --- a/net/sched/sch_taprio.c +++ b/net/sched/sch_taprio.c @@ -777,9 +777,11 @@ static const struct nla_policy taprio_policy[TCA_TAPRIO_ATTR_MAX + 1] = { [TCA_TAPRIO_ATTR_TXTIME_DELAY] = { .type = NLA_U32 }, }; -static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, +static int fill_sched_entry(struct taprio_sched *q, struct nlattr **tb, + struct sched_entry *entry, struct netlink_ext_ack *extack) { + int min_duration = length_to_duration(q, ETH_ZLEN); u32 interval = 0; if (tb[TCA_TAPRIO_SCHED_ENTRY_CMD]) @@ -794,7 +796,10 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, interval = nla_get_u32( tb[TCA_TAPRIO_SCHED_ENTRY_INTERVAL]); - if (interval == 0) { + /* The interval should allow at least the minimum ethernet + * frame to go out. + */ + if (interval < min_duration) { NL_SET_ERR_MSG(extack, "Invalid interval for schedule entry"); return -EINVAL; } @@ -804,8 +809,9 @@ static int fill_sched_entry(struct nlattr **tb, struct sched_entry *entry, return 0; } -static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, - int index, struct netlink_ext_ack *extack) +static int parse_sched_entry(struct taprio_sched *q, struct nlattr *n, + struct sched_entry *entry, int index, + struct netlink_ext_ack *extack) { struct nlattr *tb[TCA_TAPRIO_SCHED_ENTRY_MAX + 1] = { }; int err; @@ -819,10 +825,10 @@ static int parse_sched_entry(struct nlattr *n, struct sched_entry *entry, entry->index = index; - return fill_sched_entry(tb, entry, extack); + return fill_sched_entry(q, tb, entry, extack); } -static int parse_sched_list(struct nlattr *list, +static int parse_sched_list(struct taprio_sched *q, struct nlattr *list, struct sched_gate_list *sched, struct netlink_ext_ack *extack) { @@ -847,7 +853,7 @@ static int parse_sched_list(struct nlattr *list, return -ENOMEM; } - err = parse_sched_entry(n, entry, i, extack); + err = parse_sched_entry(q, n, entry, i, extack); if (err < 0) { kfree(entry); return err; @@ -862,7 +868,7 @@ static int parse_sched_list(struct nlattr *list, return i; } -static int parse_taprio_schedule(struct nlattr **tb, +static int parse_taprio_schedule(struct taprio_sched *q, struct nlattr **tb, struct sched_gate_list *new, struct netlink_ext_ack *extack) { @@ -883,8 +889,8 @@ static int parse_taprio_schedule(struct nlattr **tb, new->cycle_time = nla_get_s64(tb[TCA_TAPRIO_ATTR_SCHED_CYCLE_TIME]); if (tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST]) - err = parse_sched_list( - tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], new, extack); + err = parse_sched_list(q, tb[TCA_TAPRIO_ATTR_SCHED_ENTRY_LIST], + new, extack); if (err < 0) return err; @@ -1473,7 +1479,7 @@ static int taprio_change(struct Qdisc *sch, struct nlattr *opt, goto free_sched; } - err = parse_taprio_schedule(tb, new_admin, extack); + err = parse_taprio_schedule(q, tb, new_admin, extack); if (err < 0) goto free_sched; diff --git a/net/sctp/socket.c b/net/sctp/socket.c index 836615f71a7d..53d0a4161df3 100644 --- a/net/sctp/socket.c +++ b/net/sctp/socket.c @@ -9220,13 +9220,10 @@ void sctp_copy_sock(struct sock *newsk, struct sock *sk, static inline void sctp_copy_descendant(struct sock *sk_to, const struct sock *sk_from) { - int ancestor_size = sizeof(struct inet_sock) + - sizeof(struct sctp_sock) - - offsetof(struct sctp_sock, pd_lobby); - - if (sk_from->sk_family == PF_INET6) - ancestor_size += sizeof(struct ipv6_pinfo); + size_t ancestor_size = sizeof(struct inet_sock); + ancestor_size += sk_from->sk_prot->obj_size; + ancestor_size -= offsetof(struct sctp_sock, pd_lobby); __inet_sk_copy_descendant(sk_to, sk_from, ancestor_size); } diff --git a/net/sunrpc/svcsock.c b/net/sunrpc/svcsock.c index d5805fa1d066..c2752e2b9ce3 100644 --- a/net/sunrpc/svcsock.c +++ b/net/sunrpc/svcsock.c @@ -228,7 +228,7 @@ static int svc_one_sock_name(struct svc_sock *svsk, char *buf, int remaining) static void svc_flush_bvec(const struct bio_vec *bvec, size_t size, size_t seek) { struct bvec_iter bi = { - .bi_size = size, + .bi_size = size + seek, }; struct bio_vec bv; diff --git a/net/tipc/group.c b/net/tipc/group.c index 588c2d2b0c69..b1fcd2ad5ecf 100644 --- a/net/tipc/group.c +++ b/net/tipc/group.c @@ -273,8 +273,8 @@ static struct tipc_member *tipc_group_find_node(struct tipc_group *grp, return NULL; } -static void tipc_group_add_to_tree(struct tipc_group *grp, - struct tipc_member *m) +static int tipc_group_add_to_tree(struct tipc_group *grp, + struct tipc_member *m) { u64 nkey, key = (u64)m->node << 32 | m->port; struct rb_node **n, *parent = NULL; @@ -291,10 +291,11 @@ static void tipc_group_add_to_tree(struct tipc_group *grp, else if (key > nkey) n = &(*n)->rb_right; else - return; + return -EEXIST; } rb_link_node(&m->tree_node, parent, n); rb_insert_color(&m->tree_node, &grp->members); + return 0; } static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, @@ -302,6 +303,7 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, u32 instance, int state) { struct tipc_member *m; + int ret; m = kzalloc(sizeof(*m), GFP_ATOMIC); if (!m) @@ -314,8 +316,12 @@ static struct tipc_member *tipc_group_create_member(struct tipc_group *grp, m->port = port; m->instance = instance; m->bc_acked = grp->bc_snd_nxt - 1; + ret = tipc_group_add_to_tree(grp, m); + if (ret < 0) { + kfree(m); + return NULL; + } grp->member_cnt++; - tipc_group_add_to_tree(grp, m); tipc_nlist_add(&grp->dests, m->node); m->state = state; return m; diff --git a/net/tipc/link.c b/net/tipc/link.c index b7362556da95..cef38a910107 100644 --- a/net/tipc/link.c +++ b/net/tipc/link.c @@ -532,7 +532,8 @@ bool tipc_link_create(struct net *net, char *if_name, int bearer_id, * tipc_link_bc_create - create new link to be used for broadcast * @net: pointer to associated network namespace * @mtu: mtu to be used initially if no peers - * @window: send window to be used + * @min_win: minimal send window to be used by link + * @max_win: maximal send window to be used by link * @inputq: queue to put messages ready for delivery * @namedq: queue to put binding table update messages ready for delivery * @link: return value, pointer to put the created link diff --git a/net/tipc/msg.c b/net/tipc/msg.c index 848fae674532..52e93ba4d8e2 100644 --- a/net/tipc/msg.c +++ b/net/tipc/msg.c @@ -150,7 +150,8 @@ int tipc_buf_append(struct sk_buff **headbuf, struct sk_buff **buf) if (fragid == FIRST_FRAGMENT) { if (unlikely(head)) goto err; - if (unlikely(skb_unclone(frag, GFP_ATOMIC))) + frag = skb_unshare(frag, GFP_ATOMIC); + if (unlikely(!frag)) goto err; head = *headbuf = frag; *buf = NULL; diff --git a/net/tipc/socket.c b/net/tipc/socket.c index ebd280e767bd..11b27ddc75ba 100644 --- a/net/tipc/socket.c +++ b/net/tipc/socket.c @@ -2771,10 +2771,7 @@ static int tipc_shutdown(struct socket *sock, int how) trace_tipc_sk_shutdown(sk, NULL, TIPC_DUMP_ALL, " "); __tipc_shutdown(sock, TIPC_CONN_SHUTDOWN); - if (tipc_sk_type_connectionless(sk)) - sk->sk_shutdown = SHUTDOWN_MASK; - else - sk->sk_shutdown = SEND_SHUTDOWN; + sk->sk_shutdown = SHUTDOWN_MASK; if (sk->sk_state == TIPC_DISCONNECTING) { /* Discard any unreceived messages */ diff --git a/net/wireless/Kconfig b/net/wireless/Kconfig index faf74850a1b5..27026f587fa6 100644 --- a/net/wireless/Kconfig +++ b/net/wireless/Kconfig @@ -217,6 +217,7 @@ config LIB80211_CRYPT_WEP config LIB80211_CRYPT_CCMP tristate + select CRYPTO select CRYPTO_AES select CRYPTO_CCM diff --git a/net/wireless/util.c b/net/wireless/util.c index 4a9ff9ef513f..6fa99df52f86 100644 --- a/net/wireless/util.c +++ b/net/wireless/util.c @@ -95,7 +95,7 @@ u32 ieee80211_channel_to_freq_khz(int chan, enum nl80211_band band) /* see 802.11ax D6.1 27.3.23.2 */ if (chan == 2) return MHZ_TO_KHZ(5935); - if (chan <= 253) + if (chan <= 233) return MHZ_TO_KHZ(5950 + chan * 5); break; case NL80211_BAND_60GHZ: diff --git a/net/xdp/xdp_umem.c b/net/xdp/xdp_umem.c index e97db37354e4..b010bfde0149 100644 --- a/net/xdp/xdp_umem.c +++ b/net/xdp/xdp_umem.c @@ -303,10 +303,10 @@ static int xdp_umem_account_pages(struct xdp_umem *umem) static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) { + u32 npgs_rem, chunk_size = mr->chunk_size, headroom = mr->headroom; bool unaligned_chunks = mr->flags & XDP_UMEM_UNALIGNED_CHUNK_FLAG; - u32 chunk_size = mr->chunk_size, headroom = mr->headroom; u64 npgs, addr = mr->addr, size = mr->len; - unsigned int chunks, chunks_per_page; + unsigned int chunks, chunks_rem; int err; if (chunk_size < XDP_UMEM_MIN_CHUNK_SIZE || chunk_size > PAGE_SIZE) { @@ -336,19 +336,18 @@ static int xdp_umem_reg(struct xdp_umem *umem, struct xdp_umem_reg *mr) if ((addr + size) < addr) return -EINVAL; - npgs = size >> PAGE_SHIFT; + npgs = div_u64_rem(size, PAGE_SIZE, &npgs_rem); + if (npgs_rem) + npgs++; if (npgs > U32_MAX) return -EINVAL; - chunks = (unsigned int)div_u64(size, chunk_size); + chunks = (unsigned int)div_u64_rem(size, chunk_size, &chunks_rem); if (chunks == 0) return -EINVAL; - if (!unaligned_chunks) { - chunks_per_page = PAGE_SIZE / chunk_size; - if (chunks < chunks_per_page || chunks % chunks_per_page) - return -EINVAL; - } + if (!unaligned_chunks && chunks_rem) + return -EINVAL; if (headroom >= chunk_size - XDP_PACKET_HEADROOM) return -EINVAL; diff --git a/scripts/dtc/Makefile b/scripts/dtc/Makefile index a698ece43fff..4852bf44e913 100644 --- a/scripts/dtc/Makefile +++ b/scripts/dtc/Makefile @@ -9,7 +9,7 @@ dtc-objs := dtc.o flattree.o fstree.o data.o livetree.o treesource.o \ dtc-objs += dtc-lexer.lex.o dtc-parser.tab.o # Source files need to get at the userspace version of libfdt_env.h to compile -HOST_EXTRACFLAGS := -I $(srctree)/$(src)/libfdt +HOST_EXTRACFLAGS += -I $(srctree)/$(src)/libfdt ifeq ($(shell pkg-config --exists yaml-0.1 2>/dev/null && echo yes),) ifneq ($(CHECK_DT_BINDING)$(CHECK_DTBS),) diff --git a/scripts/kallsyms.c b/scripts/kallsyms.c index 0096cd965332..7ecd2ccba531 100644 --- a/scripts/kallsyms.c +++ b/scripts/kallsyms.c @@ -82,6 +82,7 @@ static char *sym_name(const struct sym_entry *s) static bool is_ignored_symbol(const char *name, char type) { + /* Symbol names that exactly match to the following are ignored.*/ static const char * const ignored_symbols[] = { /* * Symbols which vary between passes. Passes 1 and 2 must have @@ -104,6 +105,7 @@ static bool is_ignored_symbol(const char *name, char type) NULL }; + /* Symbol names that begin with the following are ignored.*/ static const char * const ignored_prefixes[] = { "$", /* local symbols for ARM, MIPS, etc. */ ".LASANPC", /* s390 kasan local symbols */ @@ -113,6 +115,7 @@ static bool is_ignored_symbol(const char *name, char type) NULL }; + /* Symbol names that end with the following are ignored.*/ static const char * const ignored_suffixes[] = { "_from_arm", /* arm */ "_from_thumb", /* arm */ @@ -120,9 +123,15 @@ static bool is_ignored_symbol(const char *name, char type) NULL }; + /* Symbol names that contain the following are ignored.*/ + static const char * const ignored_matches[] = { + ".long_branch.", /* ppc stub */ + ".plt_branch.", /* ppc stub */ + NULL + }; + const char * const *p; - /* Exclude symbols which vary between passes. */ for (p = ignored_symbols; *p; p++) if (!strcmp(name, *p)) return true; @@ -138,6 +147,11 @@ static bool is_ignored_symbol(const char *name, char type) return true; } + for (p = ignored_matches; *p; p++) { + if (strstr(name, *p)) + return true; + } + if (type == 'U' || type == 'u') return true; /* exclude debugging symbols */ diff --git a/scripts/spelling.txt b/scripts/spelling.txt index f253681e7e2a..feb2efaaa5e6 100644 --- a/scripts/spelling.txt +++ b/scripts/spelling.txt @@ -589,7 +589,7 @@ explictly||explicitly expresion||expression exprimental||experimental extened||extended -exteneded||extended||extended +exteneded||extended extensability||extensibility extention||extension extenstion||extension diff --git a/sound/pci/asihpi/hpioctl.c b/sound/pci/asihpi/hpioctl.c index 496dcde9715d..9790f5108a16 100644 --- a/sound/pci/asihpi/hpioctl.c +++ b/sound/pci/asihpi/hpioctl.c @@ -343,7 +343,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, struct hpi_message hm; struct hpi_response hr; struct hpi_adapter adapter; - struct hpi_pci pci; + struct hpi_pci pci = { 0 }; memset(&adapter, 0, sizeof(adapter)); @@ -499,7 +499,7 @@ int asihpi_adapter_probe(struct pci_dev *pci_dev, return 0; err: - for (idx = 0; idx < HPI_MAX_ADAPTER_MEM_SPACES; idx++) { + while (--idx >= 0) { if (pci.ap_mem_base[idx]) { iounmap(pci.ap_mem_base[idx]); pci.ap_mem_base[idx] = NULL; diff --git a/sound/pci/hda/patch_realtek.c b/sound/pci/hda/patch_realtek.c index 85e207173f5d..d4f17b465892 100644 --- a/sound/pci/hda/patch_realtek.c +++ b/sound/pci/hda/patch_realtek.c @@ -2475,7 +2475,6 @@ static const struct snd_pci_quirk alc882_fixup_tbl[] = { SND_PCI_QUIRK(0x1462, 0x1276, "MSI-GL73", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x1293, "MSI-GP65", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0x7350, "MSI-7350", ALC889_FIXUP_CD), - SND_PCI_QUIRK(0x1462, 0x9c37, "MSI X570-A PRO", ALC1220_FIXUP_CLEVO_P950), SND_PCI_QUIRK(0x1462, 0xda57, "MSI Z270-Gaming", ALC1220_FIXUP_GB_DUAL_CODECS), SND_PCI_QUIRK_VENDOR(0x1462, "MSI", ALC882_FIXUP_GPIO3), SND_PCI_QUIRK(0x147b, 0x107a, "Abit AW9D-MAX", ALC882_FIXUP_ABIT_AW9D_MAX), @@ -3428,7 +3427,11 @@ static void alc256_shutup(struct hda_codec *codec) /* 3k pull low control for Headset jack. */ /* NOTE: call this before clearing the pin, otherwise codec stalls */ - alc_update_coef_idx(codec, 0x46, 0, 3 << 12); + /* If disable 3k pulldown control for alc257, the Mic detection will not work correctly + * when booting with headset plugged. So skip setting it for the codec alc257 + */ + if (codec->core.vendor_id != 0x10ec0257) + alc_update_coef_idx(codec, 0x46, 0, 3 << 12); if (!spec->no_shutup_pins) snd_hda_codec_write(codec, hp_pin, 0, @@ -6051,6 +6054,7 @@ static void alc_fixup_thinkpad_acpi(struct hda_codec *codec, #include "hp_x360_helper.c" enum { + ALC269_FIXUP_GPIO2, ALC269_FIXUP_SONY_VAIO, ALC275_FIXUP_SONY_VAIO_GPIO2, ALC269_FIXUP_DELL_M101Z, @@ -6232,6 +6236,10 @@ enum { }; static const struct hda_fixup alc269_fixups[] = { + [ALC269_FIXUP_GPIO2] = { + .type = HDA_FIXUP_FUNC, + .v.func = alc_fixup_gpio2, + }, [ALC269_FIXUP_SONY_VAIO] = { .type = HDA_FIXUP_PINCTLS, .v.pins = (const struct hda_pintbl[]) { @@ -7051,6 +7059,8 @@ static const struct hda_fixup alc269_fixups[] = { [ALC233_FIXUP_LENOVO_MULTI_CODECS] = { .type = HDA_FIXUP_FUNC, .v.func = alc233_alc662_fixup_lenovo_dual_codecs, + .chained = true, + .chain_id = ALC269_FIXUP_GPIO2 }, [ALC233_FIXUP_ACER_HEADSET_MIC] = { .type = HDA_FIXUP_VERBS, diff --git a/sound/usb/6fire/firmware.c b/sound/usb/6fire/firmware.c index 69137c14d0dc..8981e61f2da4 100644 --- a/sound/usb/6fire/firmware.c +++ b/sound/usb/6fire/firmware.c @@ -158,29 +158,17 @@ static int usb6fire_fw_ihex_init(const struct firmware *fw, static int usb6fire_fw_ezusb_write(struct usb_device *device, int type, int value, char *data, int len) { - int ret; - - ret = usb_control_msg(device, usb_sndctrlpipe(device, 0), type, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - value, 0, data, len, HZ); - if (ret < 0) - return ret; - else if (ret != len) - return -EIO; - return 0; + return usb_control_msg_send(device, 0, type, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, 0, data, len, HZ, GFP_KERNEL); } static int usb6fire_fw_ezusb_read(struct usb_device *device, int type, int value, char *data, int len) { - int ret = usb_control_msg(device, usb_rcvctrlpipe(device, 0), type, - USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, value, - 0, data, len, HZ); - if (ret < 0) - return ret; - else if (ret != len) - return -EIO; - return 0; + return usb_control_msg_recv(device, 0, type, + USB_DIR_IN | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + value, 0, data, len, HZ, GFP_KERNEL); } static int usb6fire_fw_fpga_write(struct usb_device *device, @@ -230,7 +218,7 @@ static int usb6fire_fw_ezusb_upload( /* upload firmware image */ data = 0x01; /* stop ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); - if (ret < 0) { + if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, @@ -242,7 +230,7 @@ static int usb6fire_fw_ezusb_upload( while (usb6fire_fw_ihex_next_record(rec)) { /* write firmware */ ret = usb6fire_fw_ezusb_write(device, 0xa0, rec->address, rec->data, rec->len); - if (ret < 0) { + if (ret) { kfree(rec); release_firmware(fw); dev_err(&intf->dev, @@ -257,7 +245,7 @@ static int usb6fire_fw_ezusb_upload( if (postdata) { /* write data after firmware has been uploaded */ ret = usb6fire_fw_ezusb_write(device, 0xa0, postaddr, postdata, postlen); - if (ret < 0) { + if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: post urb.\n", fwname); @@ -267,7 +255,7 @@ static int usb6fire_fw_ezusb_upload( data = 0x00; /* resume ezusb cpu */ ret = usb6fire_fw_ezusb_write(device, 0xa0, 0xe600, &data, 1); - if (ret < 0) { + if (ret) { dev_err(&intf->dev, "unable to upload ezusb firmware %s: end message.\n", fwname); @@ -302,7 +290,7 @@ static int usb6fire_fw_fpga_upload( end = fw->data + fw->size; ret = usb6fire_fw_ezusb_write(device, 8, 0, NULL, 0); - if (ret < 0) { + if (ret) { kfree(buffer); release_firmware(fw); dev_err(&intf->dev, @@ -327,7 +315,7 @@ static int usb6fire_fw_fpga_upload( kfree(buffer); ret = usb6fire_fw_ezusb_write(device, 9, 0, NULL, 0); - if (ret < 0) { + if (ret) { dev_err(&intf->dev, "unable to upload fpga firmware: end urb.\n"); return ret; @@ -363,7 +351,7 @@ int usb6fire_fw_init(struct usb_interface *intf) u8 buffer[12]; ret = usb6fire_fw_ezusb_read(device, 1, 0, buffer, 8); - if (ret < 0) { + if (ret) { dev_err(&intf->dev, "unable to receive device firmware state.\n"); return ret; diff --git a/sound/usb/helper.c b/sound/usb/helper.c index 4c12cc5b53fd..cf92d7110773 100644 --- a/sound/usb/helper.c +++ b/sound/usb/helper.c @@ -63,20 +63,6 @@ void *snd_usb_find_csint_desc(void *buffer, int buflen, void *after, u8 dsubtype return NULL; } -/* check the validity of pipe and EP types */ -int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe) -{ - static const int pipetypes[4] = { - PIPE_CONTROL, PIPE_ISOCHRONOUS, PIPE_BULK, PIPE_INTERRUPT - }; - struct usb_host_endpoint *ep; - - ep = usb_pipe_endpoint(dev, pipe); - if (!ep || usb_pipetype(pipe) != pipetypes[usb_endpoint_type(&ep->desc)]) - return -EINVAL; - return 0; -} - /* * Wrapper for usb_control_msg(). * Allocates a temp buffer to prevent dmaing from/to the stack. @@ -89,7 +75,7 @@ int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, void *buf = NULL; int timeout; - if (snd_usb_pipe_sanity_check(dev, pipe)) + if (usb_pipe_type_check(dev, pipe)) return -EINVAL; if (size > 0) { diff --git a/sound/usb/helper.h b/sound/usb/helper.h index 5e8a18b4e7b9..f5b4c6647e4d 100644 --- a/sound/usb/helper.h +++ b/sound/usb/helper.h @@ -7,7 +7,6 @@ unsigned int snd_usb_combine_bytes(unsigned char *bytes, int size); void *snd_usb_find_desc(void *descstart, int desclen, void *after, u8 dtype); void *snd_usb_find_csint_desc(void *descstart, int desclen, void *after, u8 dsubtype); -int snd_usb_pipe_sanity_check(struct usb_device *dev, unsigned int pipe); int snd_usb_ctl_msg(struct usb_device *dev, unsigned int pipe, __u8 request, __u8 requesttype, __u16 value, __u16 index, void *data, __u16 size); diff --git a/sound/usb/hiface/pcm.c b/sound/usb/hiface/pcm.c index a148caa5f48e..d942179ca095 100644 --- a/sound/usb/hiface/pcm.c +++ b/sound/usb/hiface/pcm.c @@ -156,16 +156,14 @@ static int hiface_pcm_set_rate(struct pcm_runtime *rt, unsigned int rate) * This control message doesn't have any ack from the * other side */ - ret = usb_control_msg(device, usb_sndctrlpipe(device, 0), - HIFACE_SET_RATE_REQUEST, - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, - rate_value, 0, NULL, 0, 100); - if (ret < 0) { + ret = usb_control_msg_send(device, 0, + HIFACE_SET_RATE_REQUEST, + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_OTHER, + rate_value, 0, NULL, 0, 100, GFP_KERNEL); + if (ret) dev_err(&device->dev, "Error setting samplerate %d.\n", rate); - return ret; - } - return 0; + return ret; } static struct pcm_substream *hiface_pcm_get_substream(struct snd_pcm_substream diff --git a/sound/usb/line6/driver.c b/sound/usb/line6/driver.c index 60674ce4879b..a030dd65eb28 100644 --- a/sound/usb/line6/driver.c +++ b/sound/usb/line6/driver.c @@ -337,23 +337,18 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, { struct usb_device *usbdev = line6->usbdev; int ret; - unsigned char *len; + u8 len; unsigned count; if (address > 0xffff || datalen > 0xff) return -EINVAL; - len = kmalloc(1, GFP_KERNEL); - if (!len) - return -ENOMEM; - /* query the serial number: */ - ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - (datalen << 8) | 0x21, address, - NULL, 0, LINE6_TIMEOUT * HZ); - - if (ret < 0) { + ret = usb_control_msg_send(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + (datalen << 8) | 0x21, address, NULL, 0, + LINE6_TIMEOUT * HZ, GFP_KERNEL); + if (ret) { dev_err(line6->ifcdev, "read request failed (error %d)\n", ret); goto exit; } @@ -362,45 +357,42 @@ int line6_read_data(struct usb_line6 *line6, unsigned address, void *data, for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) { mdelay(LINE6_READ_WRITE_STATUS_DELAY); - ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | - USB_DIR_IN, - 0x0012, 0x0000, len, 1, - LINE6_TIMEOUT * HZ); - if (ret < 0) { + ret = usb_control_msg_recv(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + 0x0012, 0x0000, &len, 1, + LINE6_TIMEOUT * HZ, GFP_KERNEL); + if (ret) { dev_err(line6->ifcdev, "receive length failed (error %d)\n", ret); goto exit; } - if (*len != 0xff) + if (len != 0xff) break; } ret = -EIO; - if (*len == 0xff) { + if (len == 0xff) { dev_err(line6->ifcdev, "read failed after %d retries\n", count); goto exit; - } else if (*len != datalen) { + } else if (len != datalen) { /* should be equal or something went wrong */ dev_err(line6->ifcdev, "length mismatch (expected %d, got %d)\n", - (int)datalen, (int)*len); + (int)datalen, len); goto exit; } /* receive the result: */ - ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, - 0x0013, 0x0000, data, datalen, - LINE6_TIMEOUT * HZ); - - if (ret < 0) + ret = usb_control_msg_recv(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + 0x0013, 0x0000, data, datalen, LINE6_TIMEOUT * HZ, + GFP_KERNEL); + if (ret) dev_err(line6->ifcdev, "read failed (error %d)\n", ret); exit: - kfree(len); return ret; } EXPORT_SYMBOL_GPL(line6_read_data); @@ -423,12 +415,11 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, if (!status) return -ENOMEM; - ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - 0x0022, address, data, datalen, - LINE6_TIMEOUT * HZ); - - if (ret < 0) { + ret = usb_control_msg_send(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + 0x0022, address, data, datalen, LINE6_TIMEOUT * HZ, + GFP_KERNEL); + if (ret) { dev_err(line6->ifcdev, "write request failed (error %d)\n", ret); goto exit; @@ -437,14 +428,11 @@ int line6_write_data(struct usb_line6 *line6, unsigned address, void *data, for (count = 0; count < LINE6_READ_WRITE_MAX_RETRIES; count++) { mdelay(LINE6_READ_WRITE_STATUS_DELAY); - ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), - 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | - USB_DIR_IN, - 0x0012, 0x0000, - status, 1, LINE6_TIMEOUT * HZ); - - if (ret < 0) { + ret = usb_control_msg_recv(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, + 0x0012, 0x0000, status, 1, LINE6_TIMEOUT * HZ, + GFP_KERNEL); + if (ret) { dev_err(line6->ifcdev, "receiving status failed (error %d)\n", ret); goto exit; diff --git a/sound/usb/line6/podhd.c b/sound/usb/line6/podhd.c index eef45f7fef0d..28794a35949d 100644 --- a/sound/usb/line6/podhd.c +++ b/sound/usb/line6/podhd.c @@ -183,29 +183,25 @@ static const struct attribute_group podhd_dev_attr_group = { static int podhd_dev_start(struct usb_line6_podhd *pod) { int ret; - u8 *init_bytes; + u8 init_bytes[8]; int i; struct usb_device *usbdev = pod->line6.usbdev; - init_bytes = kmalloc(8, GFP_KERNEL); - if (!init_bytes) - return -ENOMEM; - - ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), + ret = usb_control_msg_send(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, 0x11, 0, - NULL, 0, LINE6_TIMEOUT * HZ); - if (ret < 0) { + NULL, 0, LINE6_TIMEOUT * HZ, GFP_KERNEL); + if (ret) { dev_err(pod->line6.ifcdev, "read request failed (error %d)\n", ret); goto exit; } /* NOTE: looks like some kind of ping message */ - ret = usb_control_msg(usbdev, usb_rcvctrlpipe(usbdev, 0), 0x67, + ret = usb_control_msg_recv(usbdev, 0, 0x67, USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_IN, 0x11, 0x0, - init_bytes, 3, LINE6_TIMEOUT * HZ); - if (ret < 0) { + init_bytes, 3, LINE6_TIMEOUT * HZ, GFP_KERNEL); + if (ret) { dev_err(pod->line6.ifcdev, "receive length failed (error %d)\n", ret); goto exit; @@ -220,13 +216,12 @@ static int podhd_dev_start(struct usb_line6_podhd *pod) goto exit; } - ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), + ret = usb_control_msg_send(usbdev, 0, USB_REQ_SET_FEATURE, USB_TYPE_STANDARD | USB_RECIP_DEVICE | USB_DIR_OUT, 1, 0, - NULL, 0, LINE6_TIMEOUT * HZ); + NULL, 0, LINE6_TIMEOUT * HZ, GFP_KERNEL); exit: - kfree(init_bytes); return ret; } diff --git a/sound/usb/line6/toneport.c b/sound/usb/line6/toneport.c index 94dd5e7ab2e6..4e5693c97aa4 100644 --- a/sound/usb/line6/toneport.c +++ b/sound/usb/line6/toneport.c @@ -126,11 +126,12 @@ static int toneport_send_cmd(struct usb_device *usbdev, int cmd1, int cmd2) { int ret; - ret = usb_control_msg(usbdev, usb_sndctrlpipe(usbdev, 0), 0x67, - USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, - cmd1, cmd2, NULL, 0, LINE6_TIMEOUT * HZ); + ret = usb_control_msg_send(usbdev, 0, 0x67, + USB_TYPE_VENDOR | USB_RECIP_DEVICE | USB_DIR_OUT, + cmd1, cmd2, NULL, 0, LINE6_TIMEOUT * HZ, + GFP_KERNEL); - if (ret < 0) { + if (ret) { dev_err(&usbdev->dev, "send failed (error %d)\n", ret); return ret; } diff --git a/sound/usb/mixer_maps.c b/sound/usb/mixer_maps.c index 5b43e9e40e49..c369c81e74c4 100644 --- a/sound/usb/mixer_maps.c +++ b/sound/usb/mixer_maps.c @@ -371,7 +371,6 @@ static const struct usbmix_name_map asus_rog_map[] = { }; static const struct usbmix_name_map lenovo_p620_rear_map[] = { - { 19, NULL, 2 }, /* FU, Volume */ { 19, NULL, 12 }, /* FU, Input Gain Pad */ {} }; diff --git a/sound/usb/mixer_scarlett_gen2.c b/sound/usb/mixer_scarlett_gen2.c index 0ffff7640892..9609c6d9655c 100644 --- a/sound/usb/mixer_scarlett_gen2.c +++ b/sound/usb/mixer_scarlett_gen2.c @@ -1978,7 +1978,7 @@ static int scarlett2_mixer_status_create(struct usb_mixer_interface *mixer) return 0; } - if (snd_usb_pipe_sanity_check(dev, pipe)) + if (usb_pipe_type_check(dev, pipe)) return -EINVAL; mixer->urb = usb_alloc_urb(0, GFP_KERNEL); diff --git a/sound/usb/quirks.c b/sound/usb/quirks.c index 75bbdc691243..b4fa80ef730d 100644 --- a/sound/usb/quirks.c +++ b/sound/usb/quirks.c @@ -856,7 +856,7 @@ static int snd_usb_accessmusic_boot_quirk(struct usb_device *dev) static const u8 seq[] = { 0x4e, 0x73, 0x52, 0x01 }; void *buf; - if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x05))) + if (usb_pipe_type_check(dev, usb_sndintpipe(dev, 0x05))) return -EINVAL; buf = kmemdup(seq, ARRAY_SIZE(seq), GFP_KERNEL); if (!buf) @@ -885,8 +885,6 @@ static int snd_usb_nativeinstruments_boot_quirk(struct usb_device *dev) { int ret; - if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0))) - return -EINVAL; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 0xaf, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 1, 0, NULL, 0, 1000); @@ -994,8 +992,6 @@ static int snd_usb_axefx3_boot_quirk(struct usb_device *dev) dev_dbg(&dev->dev, "Waiting for Axe-Fx III to boot up...\n"); - if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0))) - return -EINVAL; /* If the Axe-Fx III has not fully booted, it will timeout when trying * to enable the audio streaming interface. A more generous timeout is * used here to detect when the Axe-Fx III has finished booting as the @@ -1028,7 +1024,7 @@ static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf, { int err, actual_length; - if (snd_usb_pipe_sanity_check(dev, usb_sndintpipe(dev, 0x01))) + if (usb_pipe_type_check(dev, usb_sndintpipe(dev, 0x01))) return -EINVAL; err = usb_interrupt_msg(dev, usb_sndintpipe(dev, 0x01), buf, *length, &actual_length, 1000); @@ -1040,7 +1036,7 @@ static int snd_usb_motu_microbookii_communicate(struct usb_device *dev, u8 *buf, memset(buf, 0, buf_size); - if (snd_usb_pipe_sanity_check(dev, usb_rcvintpipe(dev, 0x82))) + if (usb_pipe_type_check(dev, usb_rcvintpipe(dev, 0x82))) return -EINVAL; err = usb_interrupt_msg(dev, usb_rcvintpipe(dev, 0x82), buf, buf_size, &actual_length, 1000); @@ -1127,8 +1123,6 @@ static int snd_usb_motu_m_series_boot_quirk(struct usb_device *dev) { int ret; - if (snd_usb_pipe_sanity_check(dev, usb_sndctrlpipe(dev, 0))) - return -EINVAL; ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), 1, USB_TYPE_VENDOR | USB_RECIP_DEVICE, 0x0, 0, NULL, 0, 1000); @@ -1678,12 +1672,13 @@ void snd_usb_ctl_msg_quirk(struct usb_device *dev, unsigned int pipe, && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) msleep(20); - /* Zoom R16/24, Logitech H650e, Jabra 550a, Kingston HyperX needs a tiny - * delay here, otherwise requests like get/set frequency return as - * failed despite actually succeeding. + /* Zoom R16/24, Logitech H650e/H570e, Jabra 550a, Kingston HyperX + * needs a tiny delay here, otherwise requests like get/set + * frequency return as failed despite actually succeeding. */ if ((chip->usb_id == USB_ID(0x1686, 0x00dd) || chip->usb_id == USB_ID(0x046d, 0x0a46) || + chip->usb_id == USB_ID(0x046d, 0x0a56) || chip->usb_id == USB_ID(0x0b0e, 0x0349) || chip->usb_id == USB_ID(0x0951, 0x16ad)) && (requesttype & USB_TYPE_MASK) == USB_TYPE_CLASS) diff --git a/sound/usb/usx2y/us122l.c b/sound/usb/usx2y/us122l.c index f86f7a61fb36..6e1bfe894dd5 100644 --- a/sound/usb/usx2y/us122l.c +++ b/sound/usb/usx2y/us122l.c @@ -82,40 +82,13 @@ static int us144_create_usbmidi(struct snd_card *card) &US122L(card)->midi_list, &quirk); } -/* - * Wrapper for usb_control_msg(). - * Allocates a temp buffer to prevent dmaing from/to the stack. - */ -static int us122l_ctl_msg(struct usb_device *dev, unsigned int pipe, - __u8 request, __u8 requesttype, - __u16 value, __u16 index, void *data, - __u16 size, int timeout) -{ - int err; - void *buf = NULL; - - if (size > 0) { - buf = kmemdup(data, size, GFP_KERNEL); - if (!buf) - return -ENOMEM; - } - err = usb_control_msg(dev, pipe, request, requesttype, - value, index, buf, size, timeout); - if (size > 0) { - memcpy(data, buf, size); - kfree(buf); - } - return err; -} - static void pt_info_set(struct usb_device *dev, u8 v) { int ret; - ret = usb_control_msg(dev, usb_sndctrlpipe(dev, 0), - 'I', - USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, - v, 0, NULL, 0, 1000); + ret = usb_control_msg_send(dev, 0, 'I', + USB_DIR_OUT | USB_TYPE_VENDOR | USB_RECIP_DEVICE, + v, 0, NULL, 0, 1000, GFP_NOIO); snd_printdd(KERN_DEBUG "%i\n", ret); } @@ -305,10 +278,11 @@ static int us122l_set_sample_rate(struct usb_device *dev, int rate) data[0] = rate; data[1] = rate >> 8; data[2] = rate >> 16; - err = us122l_ctl_msg(dev, usb_sndctrlpipe(dev, 0), UAC_SET_CUR, - USB_TYPE_CLASS|USB_RECIP_ENDPOINT|USB_DIR_OUT, - UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, 1000); - if (err < 0) + err = usb_control_msg_send(dev, 0, UAC_SET_CUR, + USB_TYPE_CLASS | USB_RECIP_ENDPOINT | USB_DIR_OUT, + UAC_EP_CS_ATTR_SAMPLE_RATE << 8, ep, data, 3, + 1000, GFP_NOIO); + if (err) snd_printk(KERN_ERR "%d: cannot set freq %d to ep 0x%x\n", dev->devnum, rate, ep); return err; diff --git a/tools/bootconfig/test-bootconfig.sh b/tools/bootconfig/test-bootconfig.sh index 56284b98d8f0..d295e406a756 100755 --- a/tools/bootconfig/test-bootconfig.sh +++ b/tools/bootconfig/test-bootconfig.sh @@ -137,6 +137,31 @@ $BOOTCONF $INITRD > $TEMPCONF cat $TEMPCONF xpass grep \'\"string\"\' $TEMPCONF +echo "Repeat same-key tree" +cat > $TEMPCONF << EOF +foo +bar +foo { buz } +EOF +echo > $INITRD + +xpass $BOOTCONF -a $TEMPCONF $INITRD +$BOOTCONF $INITRD > $OUTFILE +xpass grep -q "bar" $OUTFILE + + +echo "Remove/keep tailing spaces" +cat > $TEMPCONF << EOF +foo = val # comment +bar = "val2 " # comment +EOF +echo > $INITRD + +xpass $BOOTCONF -a $TEMPCONF $INITRD +$BOOTCONF $INITRD > $OUTFILE +xfail grep -q val[[:space:]] $OUTFILE +xpass grep -q val2[[:space:]] $OUTFILE + echo "=== expected failure cases ===" for i in samples/bad-* ; do xfail $BOOTCONF -a $i $INITRD diff --git a/tools/bpf/Makefile b/tools/bpf/Makefile index 0a6d09a3e91f..39bb322707b4 100644 --- a/tools/bpf/Makefile +++ b/tools/bpf/Makefile @@ -38,7 +38,7 @@ FEATURE_TESTS = libbfd disassembler-four-args FEATURE_DISPLAY = libbfd disassembler-four-args check_feat := 1 -NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean +NON_CHECK_FEAT_TARGETS := clean bpftool_clean runqslower_clean resolve_btfids_clean ifdef MAKECMDGOALS ifeq ($(filter-out $(NON_CHECK_FEAT_TARGETS),$(MAKECMDGOALS)),) check_feat := 0 @@ -89,7 +89,7 @@ $(OUTPUT)bpf_exp.lex.c: $(OUTPUT)bpf_exp.yacc.c $(OUTPUT)bpf_exp.yacc.o: $(OUTPUT)bpf_exp.yacc.c $(OUTPUT)bpf_exp.lex.o: $(OUTPUT)bpf_exp.lex.c -clean: bpftool_clean runqslower_clean +clean: bpftool_clean runqslower_clean resolve_btfids_clean $(call QUIET_CLEAN, bpf-progs) $(Q)$(RM) -r -- $(OUTPUT)*.o $(OUTPUT)bpf_jit_disasm $(OUTPUT)bpf_dbg \ $(OUTPUT)bpf_asm $(OUTPUT)bpf_exp.yacc.* $(OUTPUT)bpf_exp.lex.* diff --git a/tools/bpf/resolve_btfids/Makefile b/tools/bpf/resolve_btfids/Makefile index a88cd4426398..fe8eb537688b 100644 --- a/tools/bpf/resolve_btfids/Makefile +++ b/tools/bpf/resolve_btfids/Makefile @@ -80,6 +80,7 @@ libbpf-clean: clean: libsubcmd-clean libbpf-clean fixdep-clean $(call msg,CLEAN,$(BINARY)) $(Q)$(RM) -f $(BINARY); \ + $(RM) -rf $(if $(OUTPUT),$(OUTPUT),.)/feature; \ find $(if $(OUTPUT),$(OUTPUT),.) -name \*.o -or -name \*.o.cmd -or -name \*.o.d | xargs $(RM) tags: diff --git a/tools/io_uring/io_uring-bench.c b/tools/io_uring/io_uring-bench.c index 0f257139b003..7703f0118385 100644 --- a/tools/io_uring/io_uring-bench.c +++ b/tools/io_uring/io_uring-bench.c @@ -130,7 +130,7 @@ static int io_uring_register_files(struct submitter *s) s->nr_files); } -static int gettid(void) +static int lk_gettid(void) { return syscall(__NR_gettid); } @@ -281,7 +281,7 @@ static void *submitter_fn(void *data) struct io_sq_ring *ring = &s->sq_ring; int ret, prepped; - printf("submitter=%d\n", gettid()); + printf("submitter=%d\n", lk_gettid()); srand48_r(pthread_self(), &s->rand); diff --git a/tools/lib/bpf/Makefile b/tools/lib/bpf/Makefile index bf8ed134cb8a..9ae8f4ef0aac 100644 --- a/tools/lib/bpf/Makefile +++ b/tools/lib/bpf/Makefile @@ -59,7 +59,7 @@ FEATURE_USER = .libbpf FEATURE_TESTS = libelf libelf-mmap zlib bpf reallocarray FEATURE_DISPLAY = libelf zlib bpf -INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/arch/$(ARCH)/include/uapi -I$(srctree)/tools/include/uapi +INCLUDES = -I. -I$(srctree)/tools/include -I$(srctree)/tools/include/uapi FEATURE_CHECK_CFLAGS-bpf = $(INCLUDES) check_feat := 1 @@ -152,6 +152,7 @@ GLOBAL_SYM_COUNT = $(shell readelf -s --wide $(BPF_IN_SHARED) | \ awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ sort -u | wc -l) VERSIONED_SYM_COUNT = $(shell readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}' | \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | sort -u | wc -l) CMD_TARGETS = $(LIB_TARGET) $(PC_FILE) @@ -219,6 +220,7 @@ check_abi: $(OUTPUT)libbpf.so awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ sort -u > $(OUTPUT)libbpf_global_syms.tmp; \ readelf --dyn-syms --wide $(OUTPUT)libbpf.so | \ + awk '/GLOBAL/ && /DEFAULT/ && !/UND/ {print $$NF}'| \ grep -Eo '[^ ]+@LIBBPF_' | cut -d@ -f1 | \ sort -u > $(OUTPUT)libbpf_versioned_syms.tmp; \ diff -u $(OUTPUT)libbpf_global_syms.tmp \ diff --git a/tools/lib/bpf/libbpf.c b/tools/lib/bpf/libbpf.c index 0ad0b0491e1f..7253b833576c 100644 --- a/tools/lib/bpf/libbpf.c +++ b/tools/lib/bpf/libbpf.c @@ -5203,8 +5203,8 @@ static int bpf_object__collect_map_relos(struct bpf_object *obj, int i, j, nrels, new_sz; const struct btf_var_secinfo *vi = NULL; const struct btf_type *sec, *var, *def; + struct bpf_map *map = NULL, *targ_map; const struct btf_member *member; - struct bpf_map *map, *targ_map; const char *name, *mname; Elf_Data *symbols; unsigned int moff; diff --git a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c index 07ddbfdbcab7..6dfce3fd68bc 100644 --- a/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c +++ b/tools/testing/selftests/bpf/progs/bpf_iter_bpf_hash_map.c @@ -47,7 +47,10 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) __u32 seq_num = ctx->meta->seq_num; struct bpf_map *map = ctx->map; struct key_t *key = ctx->key; + struct key_t tmp_key; __u64 *val = ctx->value; + __u64 tmp_val = 0; + int ret; if (in_test_mode) { /* test mode is used by selftests to @@ -61,6 +64,18 @@ int dump_bpf_hash_map(struct bpf_iter__bpf_map_elem *ctx) if (key == (void *)0 || val == (void *)0) return 0; + /* update the value and then delete the <key, value> pair. + * it should not impact the existing 'val' which is still + * accessible under rcu. + */ + __builtin_memcpy(&tmp_key, key, sizeof(struct key_t)); + ret = bpf_map_update_elem(&hashmap1, &tmp_key, &tmp_val, 0); + if (ret) + return 0; + ret = bpf_map_delete_elem(&hashmap1, &tmp_key); + if (ret) + return 0; + key_sum_a += key->a; key_sum_b += key->b; key_sum_c += key->c; diff --git a/tools/testing/selftests/kvm/x86_64/debug_regs.c b/tools/testing/selftests/kvm/x86_64/debug_regs.c index b8d14f9db5f9..2fc6b3af81a1 100644 --- a/tools/testing/selftests/kvm/x86_64/debug_regs.c +++ b/tools/testing/selftests/kvm/x86_64/debug_regs.c @@ -73,7 +73,7 @@ int main(void) int i; /* Instruction lengths starting at ss_start */ int ss_size[4] = { - 3, /* xor */ + 2, /* xor */ 2, /* cpuid */ 5, /* mov */ 2, /* rdmsr */ diff --git a/tools/testing/selftests/net/rtnetlink.sh b/tools/testing/selftests/net/rtnetlink.sh index 7c38a909f8b8..8a2fe6d64bf2 100755 --- a/tools/testing/selftests/net/rtnetlink.sh +++ b/tools/testing/selftests/net/rtnetlink.sh @@ -1175,6 +1175,51 @@ kci_test_neigh_get() echo "PASS: neigh get" } +kci_test_bridge_parent_id() +{ + local ret=0 + sysfsnet=/sys/bus/netdevsim/devices/netdevsim + probed=false + + if [ ! -w /sys/bus/netdevsim/new_device ] ; then + modprobe -q netdevsim + check_err $? + if [ $ret -ne 0 ]; then + echo "SKIP: bridge_parent_id can't load netdevsim" + return $ksft_skip + fi + probed=true + fi + + echo "10 1" > /sys/bus/netdevsim/new_device + while [ ! -d ${sysfsnet}10 ] ; do :; done + echo "20 1" > /sys/bus/netdevsim/new_device + while [ ! -d ${sysfsnet}20 ] ; do :; done + udevadm settle + dev10=`ls ${sysfsnet}10/net/` + dev20=`ls ${sysfsnet}20/net/` + + ip link add name test-bond0 type bond mode 802.3ad + ip link set dev $dev10 master test-bond0 + ip link set dev $dev20 master test-bond0 + ip link add name test-br0 type bridge + ip link set dev test-bond0 master test-br0 + check_err $? + + # clean up any leftovers + ip link del dev test-br0 + ip link del dev test-bond0 + echo 20 > /sys/bus/netdevsim/del_device + echo 10 > /sys/bus/netdevsim/del_device + $probed && rmmod netdevsim + + if [ $ret -ne 0 ]; then + echo "FAIL: bridge_parent_id" + return 1 + fi + echo "PASS: bridge_parent_id" +} + kci_test_rtnl() { local ret=0 @@ -1224,6 +1269,8 @@ kci_test_rtnl() check_err $? kci_test_neigh_get check_err $? + kci_test_bridge_parent_id + check_err $? kci_del_dummy return $ret |