summaryrefslogtreecommitdiff
path: root/drivers/usb
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/usb')
-rw-r--r--drivers/usb/c67x00/c67x00-hcd.h2
-rw-r--r--drivers/usb/c67x00/c67x00-sched.c12
-rw-r--r--drivers/usb/cdns3/cdns3-imx.c22
-rw-r--r--drivers/usb/chipidea/Kconfig3
-rw-r--r--drivers/usb/chipidea/ci_hdrc_imx.c6
-rw-r--r--drivers/usb/chipidea/ci_hdrc_tegra.c344
-rw-r--r--drivers/usb/chipidea/core.c10
-rw-r--r--drivers/usb/chipidea/host.c104
-rw-r--r--drivers/usb/class/cdc-acm.c4
-rw-r--r--drivers/usb/class/cdc-wdm.c16
-rw-r--r--drivers/usb/class/usblp.c40
-rw-r--r--drivers/usb/class/usbtmc.c85
-rw-r--r--drivers/usb/common/common.c26
-rw-r--r--drivers/usb/core/hcd.c8
-rw-r--r--drivers/usb/dwc2/gadget.c8
-rw-r--r--drivers/usb/dwc2/hcd.c15
-rw-r--r--drivers/usb/dwc2/hcd_intr.c14
-rw-r--r--drivers/usb/dwc2/params.c8
-rw-r--r--drivers/usb/dwc2/pci.c18
-rw-r--r--drivers/usb/dwc3/Kconfig10
-rw-r--r--drivers/usb/dwc3/Makefile1
-rw-r--r--drivers/usb/dwc3/core.c85
-rw-r--r--drivers/usb/dwc3/core.h12
-rw-r--r--drivers/usb/dwc3/drd.c25
-rw-r--r--drivers/usb/dwc3/dwc3-haps.c8
-rw-r--r--drivers/usb/dwc3/dwc3-imx8mp.c363
-rw-r--r--drivers/usb/dwc3/dwc3-keystone.c9
-rw-r--r--drivers/usb/dwc3/dwc3-meson-g12a.c2
-rw-r--r--drivers/usb/dwc3/dwc3-pci.c69
-rw-r--r--drivers/usb/dwc3/dwc3-qcom.c71
-rw-r--r--drivers/usb/dwc3/gadget.c242
-rw-r--r--drivers/usb/dwc3/host.c2
-rw-r--r--drivers/usb/dwc3/ulpi.c38
-rw-r--r--drivers/usb/gadget/Kconfig2
-rw-r--r--drivers/usb/gadget/composite.c114
-rw-r--r--drivers/usb/gadget/configfs.c43
-rw-r--r--drivers/usb/gadget/function/f_midi.c12
-rw-r--r--drivers/usb/gadget/function/f_printer.c6
-rw-r--r--drivers/usb/gadget/function/f_uac2.c69
-rw-r--r--drivers/usb/gadget/function/u_audio.c135
-rw-r--r--drivers/usb/gadget/function/u_ether.c42
-rw-r--r--drivers/usb/gadget/function/u_ether.h12
-rw-r--r--drivers/usb/gadget/function/u_ether_configfs.h15
-rw-r--r--drivers/usb/gadget/function/u_serial.c8
-rw-r--r--drivers/usb/gadget/legacy/Kconfig13
-rw-r--r--drivers/usb/gadget/legacy/acm_ms.c4
-rw-r--r--drivers/usb/gadget/legacy/ether.c4
-rw-r--r--drivers/usb/gadget/legacy/raw_gadget.c3
-rw-r--r--drivers/usb/gadget/udc/Kconfig2
-rw-r--r--drivers/usb/gadget/udc/Makefile1
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/epn.c5
-rw-r--r--drivers/usb/gadget/udc/aspeed-vhub/hub.c4
-rw-r--r--drivers/usb/gadget/udc/bdc/Kconfig11
-rw-r--r--drivers/usb/gadget/udc/bdc/Makefile2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc.h134
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_cmd.h21
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.c2
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_dbg.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.c16
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_ep.h10
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_pci.c128
-rw-r--r--drivers/usb/gadget/udc/bdc/bdc_udc.c8
-rw-r--r--drivers/usb/gadget/udc/core.c55
-rw-r--r--drivers/usb/gadget/udc/dummy_hcd.c45
-rw-r--r--drivers/usb/gadget/udc/fsl_mxc_udc.c122
-rw-r--r--drivers/usb/gadget/udc/snps_udc_core.c30
-rw-r--r--drivers/usb/gadget/udc/udc-xilinx.c2
-rw-r--r--drivers/usb/host/Kconfig10
-rw-r--r--drivers/usb/host/Makefile1
-rw-r--r--drivers/usb/host/ehci-hcd.c12
-rw-r--r--drivers/usb/host/ehci-hub.c3
-rw-r--r--drivers/usb/host/ehci-tegra.c604
-rw-r--r--drivers/usb/host/xhci-ext-caps.c3
-rw-r--r--drivers/usb/host/xhci-mem.c21
-rw-r--r--drivers/usb/host/xhci-mtk-sch.c130
-rw-r--r--drivers/usb/host/xhci-mtk.c2
-rw-r--r--drivers/usb/host/xhci-mtk.h15
-rw-r--r--drivers/usb/host/xhci-mvebu.c42
-rw-r--r--drivers/usb/host/xhci-mvebu.h6
-rw-r--r--drivers/usb/host/xhci-plat.c20
-rw-r--r--drivers/usb/host/xhci-plat.h1
-rw-r--r--drivers/usb/host/xhci-ring.c1144
-rw-r--r--drivers/usb/host/xhci-tegra.c7
-rw-r--r--drivers/usb/host/xhci.c126
-rw-r--r--drivers/usb/host/xhci.h41
-rw-r--r--drivers/usb/misc/yurex.c3
-rw-r--r--drivers/usb/musb/jz4740.c18
-rw-r--r--drivers/usb/musb/musb_core.c31
-rw-r--r--drivers/usb/musb/musb_gadget.c2
-rw-r--r--drivers/usb/musb/musbhsdma.c4
-rw-r--r--drivers/usb/phy/phy-mxs-usb.c7
-rw-r--r--drivers/usb/phy/phy-tegra-usb.c103
-rw-r--r--drivers/usb/renesas_usbhs/fifo.c1
-rw-r--r--drivers/usb/serial/cp210x.c2
-rw-r--r--drivers/usb/serial/iuu_phoenix.c20
-rw-r--r--drivers/usb/serial/option.c9
-rw-r--r--drivers/usb/storage/unusual_uas.h7
-rw-r--r--drivers/usb/typec/altmodes/Kconfig2
-rw-r--r--drivers/usb/typec/class.c63
-rw-r--r--drivers/usb/typec/mux/intel_pmc_mux.c11
-rw-r--r--drivers/usb/typec/tcpm/tcpci.c9
-rw-r--r--drivers/usb/typec/tcpm/tcpci.h6
-rw-r--r--drivers/usb/typec/tcpm/tcpci_maxim.c35
-rw-r--r--drivers/usb/typec/tcpm/tcpm.c1088
-rw-r--r--drivers/usb/typec/ucsi/Kconfig1
-rw-r--r--drivers/usb/typec/ucsi/ucsi.c55
-rw-r--r--drivers/usb/typec/ucsi/ucsi.h3
-rw-r--r--drivers/usb/usbip/stub_main.c4
-rw-r--r--drivers/usb/usbip/usbip_common.h29
-rw-r--r--drivers/usb/usbip/vhci_hcd.c2
-rw-r--r--drivers/usb/usbip/vhci_rx.c2
-rw-r--r--drivers/usb/usbip/vhci_sysfs.c1
113 files changed, 4035 insertions, 2445 deletions
diff --git a/drivers/usb/c67x00/c67x00-hcd.h b/drivers/usb/c67x00/c67x00-hcd.h
index 6b6b04a3fe0f..6332a6b5dce6 100644
--- a/drivers/usb/c67x00/c67x00-hcd.h
+++ b/drivers/usb/c67x00/c67x00-hcd.h
@@ -76,7 +76,7 @@ struct c67x00_hcd {
u16 next_td_addr;
u16 next_buf_addr;
- struct tasklet_struct tasklet;
+ struct work_struct work;
struct completion endpoint_disable;
diff --git a/drivers/usb/c67x00/c67x00-sched.c b/drivers/usb/c67x00/c67x00-sched.c
index e65f1a0ae80b..c7d3e907be81 100644
--- a/drivers/usb/c67x00/c67x00-sched.c
+++ b/drivers/usb/c67x00/c67x00-sched.c
@@ -1123,24 +1123,26 @@ static void c67x00_do_work(struct c67x00_hcd *c67x00)
/* -------------------------------------------------------------------------- */
-static void c67x00_sched_tasklet(struct tasklet_struct *t)
+static void c67x00_sched_work(struct work_struct *work)
{
- struct c67x00_hcd *c67x00 = from_tasklet(c67x00, t, tasklet);
+ struct c67x00_hcd *c67x00;
+
+ c67x00 = container_of(work, struct c67x00_hcd, work);
c67x00_do_work(c67x00);
}
void c67x00_sched_kick(struct c67x00_hcd *c67x00)
{
- tasklet_hi_schedule(&c67x00->tasklet);
+ queue_work(system_highpri_wq, &c67x00->work);
}
int c67x00_sched_start_scheduler(struct c67x00_hcd *c67x00)
{
- tasklet_setup(&c67x00->tasklet, c67x00_sched_tasklet);
+ INIT_WORK(&c67x00->work, c67x00_sched_work);
return 0;
}
void c67x00_sched_stop_scheduler(struct c67x00_hcd *c67x00)
{
- tasklet_kill(&c67x00->tasklet);
+ cancel_work_sync(&c67x00->work);
}
diff --git a/drivers/usb/cdns3/cdns3-imx.c b/drivers/usb/cdns3/cdns3-imx.c
index d9fb68766a15..8f88eec0b0ea 100644
--- a/drivers/usb/cdns3/cdns3-imx.c
+++ b/drivers/usb/cdns3/cdns3-imx.c
@@ -185,7 +185,11 @@ static int cdns_imx_probe(struct platform_device *pdev)
}
data->num_clks = ARRAY_SIZE(imx_cdns3_core_clks);
- data->clks = (struct clk_bulk_data *)imx_cdns3_core_clks;
+ data->clks = devm_kmemdup(dev, imx_cdns3_core_clks,
+ sizeof(imx_cdns3_core_clks), GFP_KERNEL);
+ if (!data->clks)
+ return -ENOMEM;
+
ret = devm_clk_bulk_get(dev, data->num_clks, data->clks);
if (ret)
return ret;
@@ -214,20 +218,16 @@ err:
return ret;
}
-static int cdns_imx_remove_core(struct device *dev, void *data)
-{
- struct platform_device *pdev = to_platform_device(dev);
-
- platform_device_unregister(pdev);
-
- return 0;
-}
-
static int cdns_imx_remove(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
+ struct cdns_imx *data = dev_get_drvdata(dev);
- device_for_each_child(dev, NULL, cdns_imx_remove_core);
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+ clk_bulk_disable_unprepare(data->num_clks, data->clks);
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
platform_set_drvdata(pdev, NULL);
return 0;
diff --git a/drivers/usb/chipidea/Kconfig b/drivers/usb/chipidea/Kconfig
index 8bafcfc6080d..661818e8fed6 100644
--- a/drivers/usb/chipidea/Kconfig
+++ b/drivers/usb/chipidea/Kconfig
@@ -53,9 +53,8 @@ config USB_CHIPIDEA_GENERIC
default USB_CHIPIDEA
config USB_CHIPIDEA_TEGRA
- tristate "Enable Tegra UDC glue driver" if EMBEDDED
+ tristate "Enable Tegra USB glue driver" if EMBEDDED
depends on OF
- depends on USB_CHIPIDEA_UDC
default USB_CHIPIDEA
endif
diff --git a/drivers/usb/chipidea/ci_hdrc_imx.c b/drivers/usb/chipidea/ci_hdrc_imx.c
index 9e12152ea46b..8b7bc10b6e8b 100644
--- a/drivers/usb/chipidea/ci_hdrc_imx.c
+++ b/drivers/usb/chipidea/ci_hdrc_imx.c
@@ -139,9 +139,13 @@ static struct imx_usbmisc_data *usbmisc_get_init_data(struct device *dev)
misc_pdev = of_find_device_by_node(args.np);
of_node_put(args.np);
- if (!misc_pdev || !platform_get_drvdata(misc_pdev))
+ if (!misc_pdev)
return ERR_PTR(-EPROBE_DEFER);
+ if (!platform_get_drvdata(misc_pdev)) {
+ put_device(&misc_pdev->dev);
+ return ERR_PTR(-EPROBE_DEFER);
+ }
data->dev = &misc_pdev->dev;
/*
diff --git a/drivers/usb/chipidea/ci_hdrc_tegra.c b/drivers/usb/chipidea/ci_hdrc_tegra.c
index 7455df0ede49..90f2a8b786be 100644
--- a/drivers/usb/chipidea/ci_hdrc_tegra.c
+++ b/drivers/usb/chipidea/ci_hdrc_tegra.c
@@ -4,57 +4,278 @@
*/
#include <linux/clk.h>
+#include <linux/io.h>
#include <linux/module.h>
#include <linux/of_device.h>
#include <linux/reset.h>
+#include <linux/usb.h>
#include <linux/usb/chipidea.h>
+#include <linux/usb/hcd.h>
+#include <linux/usb/of.h>
+#include <linux/usb/phy.h>
+
+#include "../host/ehci.h"
#include "ci.h"
-struct tegra_udc {
+struct tegra_usb {
struct ci_hdrc_platform_data data;
struct platform_device *dev;
+ const struct tegra_usb_soc_info *soc;
struct usb_phy *phy;
struct clk *clk;
+
+ bool needs_double_reset;
};
-struct tegra_udc_soc_info {
+struct tegra_usb_soc_info {
unsigned long flags;
+ unsigned int txfifothresh;
+ enum usb_dr_mode dr_mode;
};
-static const struct tegra_udc_soc_info tegra_udc_soc_info = {
- .flags = CI_HDRC_REQUIRES_ALIGNED_DMA,
+static const struct tegra_usb_soc_info tegra20_ehci_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_HOST,
+ .txfifothresh = 10,
};
-static const struct of_device_id tegra_udc_of_match[] = {
+static const struct tegra_usb_soc_info tegra30_ehci_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_HOST,
+ .txfifothresh = 16,
+};
+
+static const struct tegra_usb_soc_info tegra20_udc_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_UNKNOWN,
+ .txfifothresh = 10,
+};
+
+static const struct tegra_usb_soc_info tegra30_udc_soc_info = {
+ .flags = CI_HDRC_REQUIRES_ALIGNED_DMA |
+ CI_HDRC_OVERRIDE_PHY_CONTROL |
+ CI_HDRC_SUPPORTS_RUNTIME_PM,
+ .dr_mode = USB_DR_MODE_UNKNOWN,
+ .txfifothresh = 16,
+};
+
+static const struct of_device_id tegra_usb_of_match[] = {
{
+ .compatible = "nvidia,tegra20-ehci",
+ .data = &tegra20_ehci_soc_info,
+ }, {
+ .compatible = "nvidia,tegra30-ehci",
+ .data = &tegra30_ehci_soc_info,
+ }, {
.compatible = "nvidia,tegra20-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra20_udc_soc_info,
}, {
.compatible = "nvidia,tegra30-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra114-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
.compatible = "nvidia,tegra124-udc",
- .data = &tegra_udc_soc_info,
+ .data = &tegra30_udc_soc_info,
}, {
/* sentinel */
}
};
-MODULE_DEVICE_TABLE(of, tegra_udc_of_match);
+MODULE_DEVICE_TABLE(of, tegra_usb_of_match);
+
+static int tegra_usb_reset_controller(struct device *dev)
+{
+ struct reset_control *rst, *rst_utmi;
+ struct device_node *phy_np;
+ int err;
+
+ rst = devm_reset_control_get_shared(dev, "usb");
+ if (IS_ERR(rst)) {
+ dev_err(dev, "can't get ehci reset: %pe\n", rst);
+ return PTR_ERR(rst);
+ }
+
+ phy_np = of_parse_phandle(dev->of_node, "nvidia,phy", 0);
+ if (!phy_np)
+ return -ENOENT;
+
+ /*
+ * The 1st USB controller contains some UTMI pad registers that are
+ * global for all the controllers on the chip. Those registers are
+ * also cleared when reset is asserted to the 1st controller.
+ */
+ rst_utmi = of_reset_control_get_shared(phy_np, "utmi-pads");
+ if (IS_ERR(rst_utmi)) {
+ dev_warn(dev, "can't get utmi-pads reset from the PHY\n");
+ dev_warn(dev, "continuing, but please update your DT\n");
+ } else {
+ /*
+ * PHY driver performs UTMI-pads reset in a case of a
+ * non-legacy DT.
+ */
+ reset_control_put(rst_utmi);
+ }
+
+ of_node_put(phy_np);
+
+ /* reset control is shared, hence initialize it first */
+ err = reset_control_deassert(rst);
+ if (err)
+ return err;
+
+ err = reset_control_assert(rst);
+ if (err)
+ return err;
+
+ udelay(1);
+
+ err = reset_control_deassert(rst);
+ if (err)
+ return err;
+
+ return 0;
+}
+
+static int tegra_usb_notify_event(struct ci_hdrc *ci, unsigned int event)
+{
+ struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
+ struct ehci_hcd *ehci;
+
+ switch (event) {
+ case CI_HDRC_CONTROLLER_RESET_EVENT:
+ if (ci->hcd) {
+ ehci = hcd_to_ehci(ci->hcd);
+ ehci->has_tdi_phy_lpm = false;
+ ehci_writel(ehci, usb->soc->txfifothresh << 16,
+ &ehci->regs->txfill_tuning);
+ }
+ break;
+ }
+
+ return 0;
+}
+
+static int tegra_usb_internal_port_reset(struct ehci_hcd *ehci,
+ u32 __iomem *portsc_reg,
+ unsigned long *flags)
+{
+ u32 saved_usbintr, temp;
+ unsigned int i, tries;
+ int retval = 0;
+
+ saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
+ /* disable USB interrupt */
+ ehci_writel(ehci, 0, &ehci->regs->intr_enable);
+ spin_unlock_irqrestore(&ehci->lock, *flags);
+
+ /*
+ * Here we have to do Port Reset at most twice for
+ * Port Enable bit to be set.
+ */
+ for (i = 0; i < 2; i++) {
+ temp = ehci_readl(ehci, portsc_reg);
+ temp |= PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ fsleep(10000);
+ temp &= ~PORT_RESET;
+ ehci_writel(ehci, temp, portsc_reg);
+ fsleep(1000);
+ tries = 100;
+ do {
+ fsleep(1000);
+ /*
+ * Up to this point, Port Enable bit is
+ * expected to be set after 2 ms waiting.
+ * USB1 usually takes extra 45 ms, for safety,
+ * we take 100 ms as timeout.
+ */
+ temp = ehci_readl(ehci, portsc_reg);
+ } while (!(temp & PORT_PE) && tries--);
+ if (temp & PORT_PE)
+ break;
+ }
+ if (i == 2)
+ retval = -ETIMEDOUT;
+
+ /*
+ * Clear Connect Status Change bit if it's set.
+ * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
+ */
+ if (temp & PORT_CSC)
+ ehci_writel(ehci, PORT_CSC, portsc_reg);
+
+ /*
+ * Write to clear any interrupt status bits that might be set
+ * during port reset.
+ */
+ temp = ehci_readl(ehci, &ehci->regs->status);
+ ehci_writel(ehci, temp, &ehci->regs->status);
+
+ /* restore original interrupt-enable bits */
+ spin_lock_irqsave(&ehci->lock, *flags);
+ ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
+
+ return retval;
+}
+
+static int tegra_ehci_hub_control(struct ci_hdrc *ci, u16 typeReq, u16 wValue,
+ u16 wIndex, char *buf, u16 wLength,
+ bool *done, unsigned long *flags)
+{
+ struct tegra_usb *usb = dev_get_drvdata(ci->dev->parent);
+ struct ehci_hcd *ehci = hcd_to_ehci(ci->hcd);
+ u32 __iomem *status_reg;
+ int retval = 0;
+
+ status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
+
+ switch (typeReq) {
+ case SetPortFeature:
+ if (wValue != USB_PORT_FEAT_RESET || !usb->needs_double_reset)
+ break;
+
+ /* for USB1 port we need to issue Port Reset twice internally */
+ retval = tegra_usb_internal_port_reset(ehci, status_reg, flags);
+ *done = true;
+ break;
+ }
+
+ return retval;
+}
-static int tegra_udc_probe(struct platform_device *pdev)
+static void tegra_usb_enter_lpm(struct ci_hdrc *ci, bool enable)
{
- const struct tegra_udc_soc_info *soc;
- struct tegra_udc *udc;
+ /*
+ * Touching any register which belongs to AHB clock domain will
+ * hang CPU if USB controller is put into low power mode because
+ * AHB USB clock is gated on Tegra in the LPM.
+ *
+ * Tegra PHY has a separate register for checking the clock status
+ * and usb_phy_set_suspend() takes care of gating/ungating the clocks
+ * and restoring the PHY state on Tegra. Hence DEVLC/PORTSC registers
+ * shouldn't be touched directly by the CI driver.
+ */
+ usb_phy_set_suspend(ci->usb_phy, enable);
+}
+
+static int tegra_usb_probe(struct platform_device *pdev)
+{
+ const struct tegra_usb_soc_info *soc;
+ struct tegra_usb *usb;
int err;
- udc = devm_kzalloc(&pdev->dev, sizeof(*udc), GFP_KERNEL);
- if (!udc)
+ usb = devm_kzalloc(&pdev->dev, sizeof(*usb), GFP_KERNEL);
+ if (!usb)
return -ENOMEM;
soc = of_device_get_match_data(&pdev->dev);
@@ -63,70 +284,99 @@ static int tegra_udc_probe(struct platform_device *pdev)
return -EINVAL;
}
- udc->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
- if (IS_ERR(udc->phy)) {
- err = PTR_ERR(udc->phy);
+ usb->phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
+ if (IS_ERR(usb->phy)) {
+ err = PTR_ERR(usb->phy);
dev_err(&pdev->dev, "failed to get PHY: %d\n", err);
return err;
}
- udc->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(udc->clk)) {
- err = PTR_ERR(udc->clk);
+ usb->clk = devm_clk_get(&pdev->dev, NULL);
+ if (IS_ERR(usb->clk)) {
+ err = PTR_ERR(usb->clk);
dev_err(&pdev->dev, "failed to get clock: %d\n", err);
return err;
}
- err = clk_prepare_enable(udc->clk);
+ err = clk_prepare_enable(usb->clk);
if (err < 0) {
dev_err(&pdev->dev, "failed to enable clock: %d\n", err);
return err;
}
- /* setup and register ChipIdea HDRC device */
- udc->data.name = "tegra-udc";
- udc->data.flags = soc->flags;
- udc->data.usb_phy = udc->phy;
- udc->data.capoffset = DEF_CAPOFFSET;
-
- udc->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
- pdev->num_resources, &udc->data);
- if (IS_ERR(udc->dev)) {
- err = PTR_ERR(udc->dev);
- dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
+ if (device_property_present(&pdev->dev, "nvidia,needs-double-reset"))
+ usb->needs_double_reset = true;
+
+ err = tegra_usb_reset_controller(&pdev->dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to reset controller: %d\n", err);
goto fail_power_off;
}
- platform_set_drvdata(pdev, udc);
+ /*
+ * USB controller registers shouldn't be touched before PHY is
+ * initialized, otherwise CPU will hang because clocks are gated.
+ * PHY driver controls gating of internal USB clocks on Tegra.
+ */
+ err = usb_phy_init(usb->phy);
+ if (err)
+ goto fail_power_off;
+
+ platform_set_drvdata(pdev, usb);
+
+ /* setup and register ChipIdea HDRC device */
+ usb->soc = soc;
+ usb->data.name = "tegra-usb";
+ usb->data.flags = soc->flags;
+ usb->data.usb_phy = usb->phy;
+ usb->data.dr_mode = soc->dr_mode;
+ usb->data.capoffset = DEF_CAPOFFSET;
+ usb->data.enter_lpm = tegra_usb_enter_lpm;
+ usb->data.hub_control = tegra_ehci_hub_control;
+ usb->data.notify_event = tegra_usb_notify_event;
+
+ /* Tegra PHY driver currently doesn't support LPM for ULPI */
+ if (of_usb_get_phy_mode(pdev->dev.of_node) == USBPHY_INTERFACE_MODE_ULPI)
+ usb->data.flags &= ~CI_HDRC_SUPPORTS_RUNTIME_PM;
+
+ usb->dev = ci_hdrc_add_device(&pdev->dev, pdev->resource,
+ pdev->num_resources, &usb->data);
+ if (IS_ERR(usb->dev)) {
+ err = PTR_ERR(usb->dev);
+ dev_err(&pdev->dev, "failed to add HDRC device: %d\n", err);
+ goto phy_shutdown;
+ }
return 0;
+phy_shutdown:
+ usb_phy_shutdown(usb->phy);
fail_power_off:
- clk_disable_unprepare(udc->clk);
+ clk_disable_unprepare(usb->clk);
return err;
}
-static int tegra_udc_remove(struct platform_device *pdev)
+static int tegra_usb_remove(struct platform_device *pdev)
{
- struct tegra_udc *udc = platform_get_drvdata(pdev);
+ struct tegra_usb *usb = platform_get_drvdata(pdev);
- ci_hdrc_remove_device(udc->dev);
- clk_disable_unprepare(udc->clk);
+ ci_hdrc_remove_device(usb->dev);
+ usb_phy_shutdown(usb->phy);
+ clk_disable_unprepare(usb->clk);
return 0;
}
-static struct platform_driver tegra_udc_driver = {
+static struct platform_driver tegra_usb_driver = {
.driver = {
- .name = "tegra-udc",
- .of_match_table = tegra_udc_of_match,
+ .name = "tegra-usb",
+ .of_match_table = tegra_usb_of_match,
},
- .probe = tegra_udc_probe,
- .remove = tegra_udc_remove,
+ .probe = tegra_usb_probe,
+ .remove = tegra_usb_remove,
};
-module_platform_driver(tegra_udc_driver);
+module_platform_driver(tegra_usb_driver);
-MODULE_DESCRIPTION("NVIDIA Tegra USB device mode driver");
+MODULE_DESCRIPTION("NVIDIA Tegra USB driver");
MODULE_AUTHOR("Thierry Reding <treding@nvidia.com>");
-MODULE_ALIAS("platform:tegra-udc");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/usb/chipidea/core.c b/drivers/usb/chipidea/core.c
index aa40e510b806..3f6c21406dbd 100644
--- a/drivers/usb/chipidea/core.c
+++ b/drivers/usb/chipidea/core.c
@@ -195,7 +195,7 @@ static void hw_wait_phy_stable(void)
}
/* The PHY enters/leaves low power mode */
-static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+static void ci_hdrc_enter_lpm_common(struct ci_hdrc *ci, bool enable)
{
enum ci_hw_regs reg = ci->hw_bank.lpm ? OP_DEVLC : OP_PORTSC;
bool lpm = !!(hw_read(ci, reg, PORTSC_PHCD(ci->hw_bank.lpm)));
@@ -208,6 +208,11 @@ static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
0);
}
+static void ci_hdrc_enter_lpm(struct ci_hdrc *ci, bool enable)
+{
+ return ci->platdata->enter_lpm(ci, enable);
+}
+
static int hw_device_init(struct ci_hdrc *ci, void __iomem *base)
{
u32 reg;
@@ -790,6 +795,9 @@ static int ci_get_platdata(struct device *dev,
platdata->pins_device = p;
}
+ if (!platdata->enter_lpm)
+ platdata->enter_lpm = ci_hdrc_enter_lpm_common;
+
return 0;
}
diff --git a/drivers/usb/chipidea/host.c b/drivers/usb/chipidea/host.c
index 48e4a5ca1835..67247d2ac07a 100644
--- a/drivers/usb/chipidea/host.c
+++ b/drivers/usb/chipidea/host.c
@@ -29,6 +29,12 @@ struct ehci_ci_priv {
bool enabled;
};
+struct ci_hdrc_dma_aligned_buffer {
+ void *kmalloc_ptr;
+ void *old_xfer_buffer;
+ u8 data[0];
+};
+
static int ehci_ci_portpower(struct usb_hcd *hcd, int portnum, bool enable)
{
struct ehci_hcd *ehci = hcd_to_ehci(hcd);
@@ -160,14 +166,15 @@ static int host_start(struct ci_hdrc *ci)
pinctrl_select_state(ci->platdata->pctl,
ci->platdata->pins_host);
+ ci->hcd = hcd;
+
ret = usb_add_hcd(hcd, 0, 0);
if (ret) {
+ ci->hcd = NULL;
goto disable_reg;
} else {
struct usb_otg *otg = &ci->otg;
- ci->hcd = hcd;
-
if (ci_otg_is_fsm_mode(ci)) {
otg->host = &hcd->self;
hcd->self.otg_port = 1;
@@ -237,6 +244,7 @@ static int ci_ehci_hub_control(
u32 temp;
unsigned long flags;
int retval = 0;
+ bool done = false;
struct device *dev = hcd->self.controller;
struct ci_hdrc *ci = dev_get_drvdata(dev);
@@ -244,6 +252,13 @@ static int ci_ehci_hub_control(
spin_lock_irqsave(&ehci->lock, flags);
+ if (ci->platdata->hub_control) {
+ retval = ci->platdata->hub_control(ci, typeReq, wValue, wIndex,
+ buf, wLength, &done, &flags);
+ if (done)
+ goto done;
+ }
+
if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
temp = ehci_readl(ehci, status_reg);
if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
@@ -349,6 +364,86 @@ static int ci_ehci_bus_suspend(struct usb_hcd *hcd)
return 0;
}
+static void ci_hdrc_free_dma_aligned_buffer(struct urb *urb)
+{
+ struct ci_hdrc_dma_aligned_buffer *temp;
+ size_t length;
+
+ if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
+ return;
+
+ temp = container_of(urb->transfer_buffer,
+ struct ci_hdrc_dma_aligned_buffer, data);
+
+ if (usb_urb_dir_in(urb)) {
+ if (usb_pipeisoc(urb->pipe))
+ length = urb->transfer_buffer_length;
+ else
+ length = urb->actual_length;
+
+ memcpy(temp->old_xfer_buffer, temp->data, length);
+ }
+ urb->transfer_buffer = temp->old_xfer_buffer;
+ kfree(temp->kmalloc_ptr);
+
+ urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
+}
+
+static int ci_hdrc_alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
+{
+ struct ci_hdrc_dma_aligned_buffer *temp, *kmalloc_ptr;
+ const unsigned int ci_hdrc_usb_dma_align = 32;
+ size_t kmalloc_size;
+
+ if (urb->num_sgs || urb->sg || urb->transfer_buffer_length == 0 ||
+ !((uintptr_t)urb->transfer_buffer & (ci_hdrc_usb_dma_align - 1)))
+ return 0;
+
+ /* Allocate a buffer with enough padding for alignment */
+ kmalloc_size = urb->transfer_buffer_length +
+ sizeof(struct ci_hdrc_dma_aligned_buffer) +
+ ci_hdrc_usb_dma_align - 1;
+
+ kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
+ if (!kmalloc_ptr)
+ return -ENOMEM;
+
+ /* Position our struct dma_aligned_buffer such that data is aligned */
+ temp = PTR_ALIGN(kmalloc_ptr + 1, ci_hdrc_usb_dma_align) - 1;
+ temp->kmalloc_ptr = kmalloc_ptr;
+ temp->old_xfer_buffer = urb->transfer_buffer;
+ if (usb_urb_dir_out(urb))
+ memcpy(temp->data, urb->transfer_buffer,
+ urb->transfer_buffer_length);
+ urb->transfer_buffer = temp->data;
+
+ urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
+
+ return 0;
+}
+
+static int ci_hdrc_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
+ gfp_t mem_flags)
+{
+ int ret;
+
+ ret = ci_hdrc_alloc_dma_aligned_buffer(urb, mem_flags);
+ if (ret)
+ return ret;
+
+ ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
+ if (ret)
+ ci_hdrc_free_dma_aligned_buffer(urb);
+
+ return ret;
+}
+
+static void ci_hdrc_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
+{
+ usb_hcd_unmap_urb_for_dma(hcd, urb);
+ ci_hdrc_free_dma_aligned_buffer(urb);
+}
+
int ci_hdrc_host_init(struct ci_hdrc *ci)
{
struct ci_role_driver *rdrv;
@@ -366,6 +461,11 @@ int ci_hdrc_host_init(struct ci_hdrc *ci)
rdrv->name = "host";
ci->roles[CI_ROLE_HOST] = rdrv;
+ if (ci->platdata->flags & CI_HDRC_REQUIRES_ALIGNED_DMA) {
+ ci_ehci_hc_driver.map_urb_for_dma = ci_hdrc_map_urb_for_dma;
+ ci_ehci_hc_driver.unmap_urb_for_dma = ci_hdrc_unmap_urb_for_dma;
+ }
+
return 0;
}
diff --git a/drivers/usb/class/cdc-acm.c b/drivers/usb/class/cdc-acm.c
index f52f1bc0559f..781905745812 100644
--- a/drivers/usb/class/cdc-acm.c
+++ b/drivers/usb/class/cdc-acm.c
@@ -1895,6 +1895,10 @@ static const struct usb_device_id acm_ids[] = {
{ USB_DEVICE(0x04d8, 0xfd08),
.driver_info = IGNORE_DEVICE,
},
+
+ { USB_DEVICE(0x04d8, 0xf58b),
+ .driver_info = IGNORE_DEVICE,
+ },
#endif
/*Samsung phone in firmware update mode */
diff --git a/drivers/usb/class/cdc-wdm.c b/drivers/usb/class/cdc-wdm.c
index 02d0cfd23bb2..508b1c3f8b73 100644
--- a/drivers/usb/class/cdc-wdm.c
+++ b/drivers/usb/class/cdc-wdm.c
@@ -465,13 +465,23 @@ static int service_outstanding_interrupt(struct wdm_device *desc)
if (!desc->resp_count || !--desc->resp_count)
goto out;
+ if (test_bit(WDM_DISCONNECTING, &desc->flags)) {
+ rv = -ENODEV;
+ goto out;
+ }
+ if (test_bit(WDM_RESETTING, &desc->flags)) {
+ rv = -EIO;
+ goto out;
+ }
+
set_bit(WDM_RESPONDING, &desc->flags);
spin_unlock_irq(&desc->iuspin);
rv = usb_submit_urb(desc->response, GFP_KERNEL);
spin_lock_irq(&desc->iuspin);
if (rv) {
- dev_err(&desc->intf->dev,
- "usb_submit_urb failed with result %d\n", rv);
+ if (!test_bit(WDM_DISCONNECTING, &desc->flags))
+ dev_err(&desc->intf->dev,
+ "usb_submit_urb failed with result %d\n", rv);
/* make sure the next notification trigger a submit */
clear_bit(WDM_RESPONDING, &desc->flags);
@@ -1027,9 +1037,9 @@ static void wdm_disconnect(struct usb_interface *intf)
wake_up_all(&desc->wait);
mutex_lock(&desc->rlock);
mutex_lock(&desc->wlock);
- kill_urbs(desc);
cancel_work_sync(&desc->rxwork);
cancel_work_sync(&desc->service_outs_intr);
+ kill_urbs(desc);
mutex_unlock(&desc->wlock);
mutex_unlock(&desc->rlock);
diff --git a/drivers/usb/class/usblp.c b/drivers/usb/class/usblp.c
index 67cbd42421be..c9f6e9758288 100644
--- a/drivers/usb/class/usblp.c
+++ b/drivers/usb/class/usblp.c
@@ -274,8 +274,25 @@ static int usblp_ctrl_msg(struct usblp *usblp, int request, int type, int dir, i
#define usblp_reset(usblp)\
usblp_ctrl_msg(usblp, USBLP_REQ_RESET, USB_TYPE_CLASS, USB_DIR_OUT, USB_RECIP_OTHER, 0, NULL, 0)
-#define usblp_hp_channel_change_request(usblp, channel, buffer) \
- usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST, USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE, channel, buffer, 1)
+static int usblp_hp_channel_change_request(struct usblp *usblp, int channel, u8 *new_channel)
+{
+ u8 *buf;
+ int ret;
+
+ buf = kzalloc(1, GFP_KERNEL);
+ if (!buf)
+ return -ENOMEM;
+
+ ret = usblp_ctrl_msg(usblp, USBLP_REQ_HP_CHANNEL_CHANGE_REQUEST,
+ USB_TYPE_VENDOR, USB_DIR_IN, USB_RECIP_INTERFACE,
+ channel, buf, 1);
+ if (ret == 0)
+ *new_channel = buf[0];
+
+ kfree(buf);
+
+ return ret;
+}
/*
* See the description for usblp_select_alts() below for the usage
@@ -1312,14 +1329,17 @@ static int usblp_set_protocol(struct usblp *usblp, int protocol)
if (protocol < USBLP_FIRST_PROTOCOL || protocol > USBLP_LAST_PROTOCOL)
return -EINVAL;
- alts = usblp->protocol[protocol].alt_setting;
- if (alts < 0)
- return -EINVAL;
- r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
- if (r < 0) {
- printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
- alts, usblp->ifnum);
- return r;
+ /* Don't unnecessarily set the interface if there's a single alt. */
+ if (usblp->intf->num_altsetting > 1) {
+ alts = usblp->protocol[protocol].alt_setting;
+ if (alts < 0)
+ return -EINVAL;
+ r = usb_set_interface(usblp->dev, usblp->ifnum, alts);
+ if (r < 0) {
+ printk(KERN_ERR "usblp: can't set desired altsetting %d on interface %d\n",
+ alts, usblp->ifnum);
+ return r;
+ }
}
usblp->bidir = (usblp->protocol[protocol].epread != NULL);
diff --git a/drivers/usb/class/usbtmc.c b/drivers/usb/class/usbtmc.c
index b222b777e6a4..74d5a9c5238a 100644
--- a/drivers/usb/class/usbtmc.c
+++ b/drivers/usb/class/usbtmc.c
@@ -25,7 +25,7 @@
/* Increment API VERSION when changing tmc.h with new flags or ioctls
* or when changing a significant behavior of the driver.
*/
-#define USBTMC_API_VERSION (2)
+#define USBTMC_API_VERSION (3)
#define USBTMC_HEADER_SIZE 12
#define USBTMC_MINOR_BASE 176
@@ -475,33 +475,17 @@ static int usbtmc_ioctl_abort_bulk_out(struct usbtmc_device_data *data)
return usbtmc_ioctl_abort_bulk_out_tag(data, data->bTag_last_write);
}
-static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
- void __user *arg)
+static int usbtmc_get_stb(struct usbtmc_file_data *file_data, __u8 *stb)
{
struct usbtmc_device_data *data = file_data->data;
struct device *dev = &data->intf->dev;
- int srq_asserted = 0;
u8 *buffer;
u8 tag;
- __u8 stb;
int rv;
dev_dbg(dev, "Enter ioctl_read_stb iin_ep_present: %d\n",
data->iin_ep_present);
- spin_lock_irq(&data->dev_lock);
- srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
- if (srq_asserted) {
- /* a STB with SRQ is already received */
- stb = file_data->srq_byte;
- spin_unlock_irq(&data->dev_lock);
- rv = put_user(stb, (__u8 __user *)arg);
- dev_dbg(dev, "stb:0x%02x with srq received %d\n",
- (unsigned int)stb, rv);
- return rv;
- }
- spin_unlock_irq(&data->dev_lock);
-
buffer = kmalloc(8, GFP_KERNEL);
if (!buffer)
return -ENOMEM;
@@ -548,13 +532,12 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
data->iin_bTag, tag);
}
- stb = data->bNotify2;
+ *stb = data->bNotify2;
} else {
- stb = buffer[2];
+ *stb = buffer[2];
}
- rv = put_user(stb, (__u8 __user *)arg);
- dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)stb, rv);
+ dev_dbg(dev, "stb:0x%02x received %d\n", (unsigned int)*stb, rv);
exit:
/* bump interrupt bTag */
@@ -567,6 +550,53 @@ static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
return rv;
}
+static int usbtmc488_ioctl_read_stb(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ int srq_asserted = 0;
+ __u8 stb;
+ int rv;
+
+ rv = usbtmc_get_stb(file_data, &stb);
+
+ if (rv > 0) {
+ srq_asserted = atomic_xchg(&file_data->srq_asserted,
+ srq_asserted);
+ if (srq_asserted)
+ stb |= 0x40; /* Set RQS bit */
+
+ rv = put_user(stb, (__u8 __user *)arg);
+ }
+ return rv;
+
+}
+
+static int usbtmc_ioctl_get_srq_stb(struct usbtmc_file_data *file_data,
+ void __user *arg)
+{
+ struct usbtmc_device_data *data = file_data->data;
+ struct device *dev = &data->intf->dev;
+ int srq_asserted = 0;
+ __u8 stb = 0;
+ int rv;
+
+ spin_lock_irq(&data->dev_lock);
+ srq_asserted = atomic_xchg(&file_data->srq_asserted, srq_asserted);
+
+ if (srq_asserted) {
+ stb = file_data->srq_byte;
+ spin_unlock_irq(&data->dev_lock);
+ rv = put_user(stb, (__u8 __user *)arg);
+ } else {
+ spin_unlock_irq(&data->dev_lock);
+ rv = -ENOMSG;
+ }
+
+ dev_dbg(dev, "stb:0x%02x with srq received %d\n", (unsigned int)stb, rv);
+
+ return rv;
+}
+
static int usbtmc488_ioctl_wait_srq(struct usbtmc_file_data *file_data,
__u32 __user *arg)
{
@@ -2145,6 +2175,17 @@ static long usbtmc_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
file_data->auto_abort = !!tmp_byte;
break;
+ case USBTMC_IOCTL_GET_STB:
+ retval = usbtmc_get_stb(file_data, &tmp_byte);
+ if (retval > 0)
+ retval = put_user(tmp_byte, (__u8 __user *)arg);
+ break;
+
+ case USBTMC_IOCTL_GET_SRQ_STB:
+ retval = usbtmc_ioctl_get_srq_stb(file_data,
+ (void __user *)arg);
+ break;
+
case USBTMC_IOCTL_CANCEL_IO:
retval = usbtmc_ioctl_cancel_io(file_data);
break;
diff --git a/drivers/usb/common/common.c b/drivers/usb/common/common.c
index 1433260d99b4..fc21cf2d36f6 100644
--- a/drivers/usb/common/common.c
+++ b/drivers/usb/common/common.c
@@ -69,6 +69,13 @@ static const char *const speed_names[] = {
[USB_SPEED_SUPER_PLUS] = "super-speed-plus",
};
+static const char *const ssp_rate[] = {
+ [USB_SSP_GEN_UNKNOWN] = "UNKNOWN",
+ [USB_SSP_GEN_2x1] = "super-speed-plus-gen2x1",
+ [USB_SSP_GEN_1x2] = "super-speed-plus-gen1x2",
+ [USB_SSP_GEN_2x2] = "super-speed-plus-gen2x2",
+};
+
const char *usb_speed_string(enum usb_device_speed speed)
{
if (speed < 0 || speed >= ARRAY_SIZE(speed_names))
@@ -86,12 +93,29 @@ enum usb_device_speed usb_get_maximum_speed(struct device *dev)
if (ret < 0)
return USB_SPEED_UNKNOWN;
- ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ if (ret > 0)
+ return USB_SPEED_SUPER_PLUS;
+ ret = match_string(speed_names, ARRAY_SIZE(speed_names), maximum_speed);
return (ret < 0) ? USB_SPEED_UNKNOWN : ret;
}
EXPORT_SYMBOL_GPL(usb_get_maximum_speed);
+enum usb_ssp_rate usb_get_maximum_ssp_rate(struct device *dev)
+{
+ const char *maximum_speed;
+ int ret;
+
+ ret = device_property_read_string(dev, "maximum-speed", &maximum_speed);
+ if (ret < 0)
+ return USB_SSP_GEN_UNKNOWN;
+
+ ret = match_string(ssp_rate, ARRAY_SIZE(ssp_rate), maximum_speed);
+ return (ret < 0) ? USB_SSP_GEN_UNKNOWN : ret;
+}
+EXPORT_SYMBOL_GPL(usb_get_maximum_ssp_rate);
+
const char *usb_state_string(enum usb_device_state state)
{
static const char *const names[] = {
diff --git a/drivers/usb/core/hcd.c b/drivers/usb/core/hcd.c
index 60886a7464c3..ad5a0f405a75 100644
--- a/drivers/usb/core/hcd.c
+++ b/drivers/usb/core/hcd.c
@@ -1649,14 +1649,12 @@ static void __usb_hcd_giveback_urb(struct urb *urb)
urb->status = status;
/*
* This function can be called in task context inside another remote
- * coverage collection section, but KCOV doesn't support that kind of
+ * coverage collection section, but kcov doesn't support that kind of
* recursion yet. Only collect coverage in softirq context for now.
*/
- if (in_serving_softirq())
- kcov_remote_start_usb((u64)urb->dev->bus->busnum);
+ kcov_remote_start_usb_softirq((u64)urb->dev->bus->busnum);
urb->complete(urb);
- if (in_serving_softirq())
- kcov_remote_stop();
+ kcov_remote_stop_softirq();
usb_anchor_resume_wakeups(anchor);
atomic_dec(&urb->use_count);
diff --git a/drivers/usb/dwc2/gadget.c b/drivers/usb/dwc2/gadget.c
index 0a0d11151cfb..ad4c94366dad 100644
--- a/drivers/usb/dwc2/gadget.c
+++ b/drivers/usb/dwc2/gadget.c
@@ -1543,7 +1543,6 @@ static void dwc2_hsotg_complete_oursetup(struct usb_ep *ep,
static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
u32 windex)
{
- struct dwc2_hsotg_ep *ep;
int dir = (windex & USB_DIR_IN) ? 1 : 0;
int idx = windex & 0x7F;
@@ -1553,12 +1552,7 @@ static struct dwc2_hsotg_ep *ep_from_windex(struct dwc2_hsotg *hsotg,
if (idx > hsotg->num_of_eps)
return NULL;
- ep = index_to_ep(hsotg, idx, dir);
-
- if (idx && ep->dir_in != dir)
- return NULL;
-
- return ep;
+ return index_to_ep(hsotg, idx, dir);
}
/**
diff --git a/drivers/usb/dwc2/hcd.c b/drivers/usb/dwc2/hcd.c
index e9ac215b9663..fc3269f5faf1 100644
--- a/drivers/usb/dwc2/hcd.c
+++ b/drivers/usb/dwc2/hcd.c
@@ -1313,19 +1313,20 @@ static void dwc2_hc_start_transfer(struct dwc2_hsotg *hsotg,
if (num_packets > max_hc_pkt_count) {
num_packets = max_hc_pkt_count;
chan->xfer_len = num_packets * chan->max_packet;
+ } else if (chan->ep_is_in) {
+ /*
+ * Always program an integral # of max packets
+ * for IN transfers.
+ * Note: This assumes that the input buffer is
+ * aligned and sized accordingly.
+ */
+ chan->xfer_len = num_packets * chan->max_packet;
}
} else {
/* Need 1 packet for transfer length of 0 */
num_packets = 1;
}
- if (chan->ep_is_in)
- /*
- * Always program an integral # of max packets for IN
- * transfers
- */
- chan->xfer_len = num_packets * chan->max_packet;
-
if (chan->ep_type == USB_ENDPOINT_XFER_INT ||
chan->ep_type == USB_ENDPOINT_XFER_ISOC)
/*
diff --git a/drivers/usb/dwc2/hcd_intr.c b/drivers/usb/dwc2/hcd_intr.c
index a052d39b4375..d5f4ec1b73b1 100644
--- a/drivers/usb/dwc2/hcd_intr.c
+++ b/drivers/usb/dwc2/hcd_intr.c
@@ -500,7 +500,7 @@ static int dwc2_update_urb_state(struct dwc2_hsotg *hsotg,
&short_read);
if (urb->actual_length + xfer_length > urb->length) {
- dev_warn(hsotg->dev, "%s(): trimming xfer length\n", __func__);
+ dev_dbg(hsotg->dev, "%s(): trimming xfer length\n", __func__);
xfer_length = urb->length - urb->actual_length;
}
@@ -1977,6 +1977,18 @@ error:
qtd->error_count++;
dwc2_update_urb_state_abn(hsotg, chan, chnum, qtd->urb,
qtd, DWC2_HC_XFER_XACT_ERR);
+ /*
+ * We can get here after a completed transaction
+ * (urb->actual_length >= urb->length) which was not reported
+ * as completed. If that is the case, and we do not abort
+ * the transfer, a transfer of size 0 will be enqueued
+ * subsequently. If urb->actual_length is not DMA-aligned,
+ * the buffer will then point to an unaligned address, and
+ * the resulting behavior is undefined. Bail out in that
+ * situation.
+ */
+ if (qtd->urb->actual_length >= qtd->urb->length)
+ qtd->error_count = 3;
dwc2_hcd_save_data_toggle(hsotg, chan, chnum, qtd);
dwc2_halt_channel(hsotg, chan, qtd, DWC2_HC_XFER_XACT_ERR);
}
diff --git a/drivers/usb/dwc2/params.c b/drivers/usb/dwc2/params.c
index 267543c3dc38..92df3d620f7d 100644
--- a/drivers/usb/dwc2/params.c
+++ b/drivers/usb/dwc2/params.c
@@ -177,7 +177,10 @@ static void dwc2_set_stm32mp15_fsotg_params(struct dwc2_hsotg *hsotg)
p->i2c_enable = false;
p->activate_stm_fs_transceiver = true;
p->activate_stm_id_vb_detection = true;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->host_support_fs_ls_low_power = true;
+ p->host_ls_low_power_phy_clk = true;
}
static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
@@ -189,7 +192,12 @@ static void dwc2_set_stm32mp15_hsotg_params(struct dwc2_hsotg *hsotg)
p->host_rx_fifo_size = 440;
p->host_nperio_tx_fifo_size = 256;
p->host_perio_tx_fifo_size = 256;
+ p->ahbcfg = GAHBCFG_HBSTLEN_INCR16 << GAHBCFG_HBSTLEN_SHIFT;
p->power_down = DWC2_POWER_DOWN_PARAM_NONE;
+ p->lpm = false;
+ p->lpm_clock_gating = false;
+ p->besl = false;
+ p->hird_threshold_en = false;
}
const struct of_device_id dwc2_of_match_table[] = {
diff --git a/drivers/usb/dwc2/pci.c b/drivers/usb/dwc2/pci.c
index 7afc10872f1f..0000151e3ca9 100644
--- a/drivers/usb/dwc2/pci.c
+++ b/drivers/usb/dwc2/pci.c
@@ -63,20 +63,6 @@ struct dwc2_pci_glue {
struct platform_device *phy;
};
-static int dwc2_pci_quirks(struct pci_dev *pdev, struct platform_device *dwc2)
-{
- if (pdev->vendor == PCI_VENDOR_ID_SYNOPSYS &&
- pdev->device == PCI_PRODUCT_ID_HAPS_HSOTG) {
- struct property_entry properties[] = {
- { },
- };
-
- return platform_device_add_properties(dwc2, properties);
- }
-
- return 0;
-}
-
/**
* dwc2_pci_probe() - Provides the cleanup entry points for the DWC_otg PCI
* driver
@@ -143,10 +129,6 @@ static int dwc2_pci_probe(struct pci_dev *pci,
dwc2->dev.parent = dev;
- ret = dwc2_pci_quirks(pci, dwc2);
- if (ret)
- goto err;
-
glue = devm_kzalloc(dev, sizeof(*glue), GFP_KERNEL);
if (!glue) {
ret = -ENOMEM;
diff --git a/drivers/usb/dwc3/Kconfig b/drivers/usb/dwc3/Kconfig
index 7a2304565a73..2133acf8ee69 100644
--- a/drivers/usb/dwc3/Kconfig
+++ b/drivers/usb/dwc3/Kconfig
@@ -139,4 +139,14 @@ config USB_DWC3_QCOM
for peripheral mode support.
Say 'Y' or 'M' if you have one such device.
+config USB_DWC3_IMX8MP
+ tristate "NXP iMX8MP Platform"
+ depends on OF && COMMON_CLK
+ depends on (ARCH_MXC && ARM64) || COMPILE_TEST
+ default USB_DWC3
+ help
+ NXP iMX8M Plus SoC use DesignWare Core IP for USB2/3
+ functionality.
+ Say 'Y' or 'M' if you have one such device.
+
endif
diff --git a/drivers/usb/dwc3/Makefile b/drivers/usb/dwc3/Makefile
index ae86da0dc5bd..2259f8876fb2 100644
--- a/drivers/usb/dwc3/Makefile
+++ b/drivers/usb/dwc3/Makefile
@@ -51,3 +51,4 @@ obj-$(CONFIG_USB_DWC3_MESON_G12A) += dwc3-meson-g12a.o
obj-$(CONFIG_USB_DWC3_OF_SIMPLE) += dwc3-of-simple.o
obj-$(CONFIG_USB_DWC3_ST) += dwc3-st.o
obj-$(CONFIG_USB_DWC3_QCOM) += dwc3-qcom.o
+obj-$(CONFIG_USB_DWC3_IMX8MP) += dwc3-imx8mp.o
diff --git a/drivers/usb/dwc3/core.c b/drivers/usb/dwc3/core.c
index 841daec70b6e..f2448d0a9d39 100644
--- a/drivers/usb/dwc3/core.c
+++ b/drivers/usb/dwc3/core.c
@@ -1126,11 +1126,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb2_phy);
if (ret == -ENXIO || ret == -ENODEV) {
dwc->usb2_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb2 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
}
@@ -1138,11 +1135,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb3_phy);
if (ret == -ENXIO || ret == -ENODEV) {
dwc->usb3_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb3 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
}
@@ -1151,11 +1145,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb2_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV) {
dwc->usb2_generic_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb2 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb2 phy configured\n");
}
}
@@ -1164,11 +1155,8 @@ static int dwc3_core_get_phy(struct dwc3 *dwc)
ret = PTR_ERR(dwc->usb3_generic_phy);
if (ret == -ENOSYS || ret == -ENODEV) {
dwc->usb3_generic_phy = NULL;
- } else if (ret == -EPROBE_DEFER) {
- return ret;
} else {
- dev_err(dev, "no usb3 phy configured\n");
- return ret;
+ return dev_err_probe(dev, ret, "no usb3 phy configured\n");
}
}
@@ -1190,11 +1178,8 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_DEVICE);
ret = dwc3_gadget_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize gadget\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize gadget\n");
break;
case USB_DR_MODE_HOST:
dwc3_set_prtcap(dwc, DWC3_GCTL_PRTCAP_HOST);
@@ -1205,20 +1190,14 @@ static int dwc3_core_init_mode(struct dwc3 *dwc)
phy_set_mode(dwc->usb3_generic_phy, PHY_MODE_USB_HOST);
ret = dwc3_host_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize host\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize host\n");
break;
case USB_DR_MODE_OTG:
INIT_WORK(&dwc->drd_work, __dwc3_set_mode);
ret = dwc3_drd_init(dwc);
- if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize dual-role\n");
- return ret;
- }
+ if (ret)
+ return dev_err_probe(dev, ret, "failed to initialize dual-role\n");
break;
default:
dev_err(dev, "Unsupported mode of operation %d\n", dwc->dr_mode);
@@ -1273,6 +1252,7 @@ static void dwc3_get_properties(struct dwc3 *dwc)
hird_threshold = 12;
dwc->maximum_speed = usb_get_maximum_speed(dev);
+ dwc->max_ssp_rate = usb_get_maximum_ssp_rate(dev);
dwc->dr_mode = usb_get_dr_mode(dev);
dwc->hsphy_mode = of_usb_get_phy_mode(dev->of_node);
@@ -1444,6 +1424,42 @@ static void dwc3_check_params(struct dwc3 *dwc)
}
break;
}
+
+ /*
+ * Currently the controller does not have visibility into the HW
+ * parameter to determine the maximum number of lanes the HW supports.
+ * If the number of lanes is not specified in the device property, then
+ * set the default to support dual-lane for DWC_usb32 and single-lane
+ * for DWC_usb31 for super-speed-plus.
+ */
+ if (dwc->maximum_speed == USB_SPEED_SUPER_PLUS) {
+ switch (dwc->max_ssp_rate) {
+ case USB_SSP_GEN_2x1:
+ if (hwparam_gen == DWC3_GHWPARAMS3_SSPHY_IFC_GEN1)
+ dev_warn(dev, "UDC only supports Gen 1\n");
+ break;
+ case USB_SSP_GEN_1x2:
+ case USB_SSP_GEN_2x2:
+ if (DWC3_IP_IS(DWC31))
+ dev_warn(dev, "UDC only supports single lane\n");
+ break;
+ case USB_SSP_GEN_UNKNOWN:
+ default:
+ switch (hwparam_gen) {
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN2:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->max_ssp_rate = USB_SSP_GEN_2x1;
+ break;
+ case DWC3_GHWPARAMS3_SSPHY_IFC_GEN1:
+ if (DWC3_IP_IS(DWC32))
+ dwc->max_ssp_rate = USB_SSP_GEN_1x2;
+ break;
+ }
+ break;
+ }
+ }
}
static int dwc3_probe(struct platform_device *pdev)
@@ -1490,7 +1506,7 @@ static int dwc3_probe(struct platform_device *pdev)
dwc3_get_properties(dwc);
- dwc->reset = devm_reset_control_array_get(dev, true, true);
+ dwc->reset = devm_reset_control_array_get_optional_shared(dev);
if (IS_ERR(dwc->reset))
return PTR_ERR(dwc->reset);
@@ -1555,8 +1571,7 @@ static int dwc3_probe(struct platform_device *pdev)
ret = dwc3_core_init(dwc);
if (ret) {
- if (ret != -EPROBE_DEFER)
- dev_err(dev, "failed to initialize core: %d\n", ret);
+ dev_err_probe(dev, ret, "failed to initialize core\n");
goto err4;
}
@@ -1758,7 +1773,7 @@ static int dwc3_resume_common(struct dwc3 *dwc, pm_message_t msg)
if (PMSG_IS_AUTO(msg))
break;
- ret = dwc3_core_init(dwc);
+ ret = dwc3_core_init_for_resume(dwc);
if (ret)
return ret;
diff --git a/drivers/usb/dwc3/core.h b/drivers/usb/dwc3/core.h
index 2f95f08ca511..052b20d52651 100644
--- a/drivers/usb/dwc3/core.h
+++ b/drivers/usb/dwc3/core.h
@@ -285,6 +285,7 @@
/* Global USB2 PHY Vendor Control Register */
#define DWC3_GUSB2PHYACC_NEWREGREQ BIT(25)
+#define DWC3_GUSB2PHYACC_DONE BIT(24)
#define DWC3_GUSB2PHYACC_BUSY BIT(23)
#define DWC3_GUSB2PHYACC_WRITE BIT(22)
#define DWC3_GUSB2PHYACC_ADDR(n) (n << 16)
@@ -385,6 +386,8 @@
#define DWC3_GUCTL3_SPLITDISABLE BIT(14)
/* Device Configuration Register */
+#define DWC3_DCFG_NUMLANES(n) (((n) & 0x3) << 30) /* DWC_usb32 only */
+
#define DWC3_DCFG_DEVADDR(addr) ((addr) << 3)
#define DWC3_DCFG_DEVADDR_MASK DWC3_DCFG_DEVADDR(0x7f)
@@ -458,6 +461,8 @@
#define DWC3_DEVTEN_USBRSTEN BIT(1)
#define DWC3_DEVTEN_DISCONNEVTEN BIT(0)
+#define DWC3_DSTS_CONNLANES(n) (((n) >> 30) & 0x3) /* DWC_usb32 only */
+
/* Device Status Register */
#define DWC3_DSTS_DCNRD BIT(29)
@@ -963,6 +968,10 @@ struct dwc3_scratchpad_array {
* @nr_scratch: number of scratch buffers
* @u1u2: only used on revisions <1.83a for workaround
* @maximum_speed: maximum speed requested (mainly for testing purposes)
+ * @max_ssp_rate: SuperSpeed Plus maximum signaling rate and lane count
+ * @gadget_max_speed: maximum gadget speed requested
+ * @gadget_ssp_rate: Gadget driver's maximum supported SuperSpeed Plus signaling
+ * rate and lane count.
* @ip: controller's ID
* @revision: controller's version of an IP
* @version_type: VERSIONTYPE register contents, a sub release of a revision
@@ -1125,6 +1134,9 @@ struct dwc3 {
u32 nr_scratch;
u32 u1u2;
u32 maximum_speed;
+ u32 gadget_max_speed;
+ enum usb_ssp_rate max_ssp_rate;
+ enum usb_ssp_rate gadget_ssp_rate;
u32 ip;
diff --git a/drivers/usb/dwc3/drd.c b/drivers/usb/dwc3/drd.c
index 3e1c1aacf002..e2b68bb770d1 100644
--- a/drivers/usb/dwc3/drd.c
+++ b/drivers/usb/dwc3/drd.c
@@ -441,8 +441,8 @@ static int dwc3_drd_notifier(struct notifier_block *nb,
static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
{
struct device *dev = dwc->dev;
- struct device_node *np_phy, *np_conn;
- struct extcon_dev *edev;
+ struct device_node *np_phy;
+ struct extcon_dev *edev = NULL;
const char *name;
if (device_property_read_bool(dev, "extcon"))
@@ -462,15 +462,22 @@ static struct extcon_dev *dwc3_get_extcon(struct dwc3 *dwc)
return edev;
}
+ /*
+ * Try to get an extcon device from the USB PHY controller's "port"
+ * node. Check if it has the "port" node first, to avoid printing the
+ * error message from underlying code, as it's a valid case: extcon
+ * device (and "port" node) may be missing in case of "usb-role-switch"
+ * or OTG mode.
+ */
np_phy = of_parse_phandle(dev->of_node, "phys", 0);
- np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (of_graph_is_present(np_phy)) {
+ struct device_node *np_conn;
- if (np_conn)
- edev = extcon_find_edev_by_node(np_conn);
- else
- edev = NULL;
-
- of_node_put(np_conn);
+ np_conn = of_graph_get_remote_node(np_phy, -1, -1);
+ if (np_conn)
+ edev = extcon_find_edev_by_node(np_conn);
+ of_node_put(np_conn);
+ }
of_node_put(np_phy);
return edev;
diff --git a/drivers/usb/dwc3/dwc3-haps.c b/drivers/usb/dwc3/dwc3-haps.c
index 55b4a901168e..f6e3817fa7af 100644
--- a/drivers/usb/dwc3/dwc3-haps.c
+++ b/drivers/usb/dwc3/dwc3-haps.c
@@ -33,6 +33,10 @@ static const struct property_entry initial_properties[] = {
{ },
};
+static const struct software_node dwc3_haps_swnode = {
+ .properties = initial_properties,
+};
+
static int dwc3_haps_probe(struct pci_dev *pci,
const struct pci_device_id *id)
{
@@ -77,7 +81,7 @@ static int dwc3_haps_probe(struct pci_dev *pci,
dwc->pci = pci;
dwc->dwc3->dev.parent = dev;
- ret = platform_device_add_properties(dwc->dwc3, initial_properties);
+ ret = device_add_software_node(&dwc->dwc3->dev, &dwc3_haps_swnode);
if (ret)
goto err;
@@ -91,6 +95,7 @@ static int dwc3_haps_probe(struct pci_dev *pci,
return 0;
err:
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
@@ -99,6 +104,7 @@ static void dwc3_haps_remove(struct pci_dev *pci)
{
struct dwc3_haps *dwc = pci_get_drvdata(pci);
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
diff --git a/drivers/usb/dwc3/dwc3-imx8mp.c b/drivers/usb/dwc3/dwc3-imx8mp.c
new file mode 100644
index 000000000000..75f0042b998b
--- /dev/null
+++ b/drivers/usb/dwc3/dwc3-imx8mp.c
@@ -0,0 +1,363 @@
+// SPDX-License-Identifier: GPL-2.0
+/**
+ * dwc3-imx8mp.c - NXP imx8mp Specific Glue layer
+ *
+ * Copyright (c) 2020 NXP.
+ */
+
+#include <linux/clk.h>
+#include <linux/interrupt.h>
+#include <linux/io.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/of_platform.h>
+#include <linux/platform_device.h>
+#include <linux/pm_runtime.h>
+
+#include "core.h"
+
+/* USB wakeup registers */
+#define USB_WAKEUP_CTRL 0x00
+
+/* Global wakeup interrupt enable, also used to clear interrupt */
+#define USB_WAKEUP_EN BIT(31)
+/* Wakeup from connect or disconnect, only for superspeed */
+#define USB_WAKEUP_SS_CONN BIT(5)
+/* 0 select vbus_valid, 1 select sessvld */
+#define USB_WAKEUP_VBUS_SRC_SESS_VAL BIT(4)
+/* Enable signal for wake up from u3 state */
+#define USB_WAKEUP_U3_EN BIT(3)
+/* Enable signal for wake up from id change */
+#define USB_WAKEUP_ID_EN BIT(2)
+/* Enable signal for wake up from vbus change */
+#define USB_WAKEUP_VBUS_EN BIT(1)
+/* Enable signal for wake up from dp/dm change */
+#define USB_WAKEUP_DPDM_EN BIT(0)
+
+#define USB_WAKEUP_EN_MASK GENMASK(5, 0)
+
+struct dwc3_imx8mp {
+ struct device *dev;
+ struct platform_device *dwc3;
+ void __iomem *glue_base;
+ struct clk *hsio_clk;
+ struct clk *suspend_clk;
+ int irq;
+ bool pm_suspended;
+ bool wakeup_pending;
+};
+
+static void dwc3_imx8mp_wakeup_enable(struct dwc3_imx8mp *dwc3_imx)
+{
+ struct dwc3 *dwc3 = platform_get_drvdata(dwc3_imx->dwc3);
+ u32 val;
+
+ if (!dwc3)
+ return;
+
+ val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+
+ if ((dwc3->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc3->xhci)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_SS_CONN |
+ USB_WAKEUP_U3_EN | USB_WAKEUP_DPDM_EN;
+ else if (dwc3->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ val |= USB_WAKEUP_EN | USB_WAKEUP_VBUS_EN |
+ USB_WAKEUP_VBUS_SRC_SESS_VAL;
+
+ writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+}
+
+static void dwc3_imx8mp_wakeup_disable(struct dwc3_imx8mp *dwc3_imx)
+{
+ u32 val;
+
+ val = readl(dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+ val &= ~(USB_WAKEUP_EN | USB_WAKEUP_EN_MASK);
+ writel(val, dwc3_imx->glue_base + USB_WAKEUP_CTRL);
+}
+
+static irqreturn_t dwc3_imx8mp_interrupt(int irq, void *_dwc3_imx)
+{
+ struct dwc3_imx8mp *dwc3_imx = _dwc3_imx;
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+
+ if (!dwc3_imx->pm_suspended)
+ return IRQ_HANDLED;
+
+ disable_irq_nosync(dwc3_imx->irq);
+ dwc3_imx->wakeup_pending = true;
+
+ if ((dwc->current_dr_role == DWC3_GCTL_PRTCAP_HOST) && dwc->xhci)
+ pm_runtime_resume(&dwc->xhci->dev);
+ else if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE)
+ pm_runtime_get(dwc->dev);
+
+ return IRQ_HANDLED;
+}
+
+static int dwc3_imx8mp_probe(struct platform_device *pdev)
+{
+ struct device *dev = &pdev->dev;
+ struct device_node *dwc3_np, *node = dev->of_node;
+ struct dwc3_imx8mp *dwc3_imx;
+ int err, irq;
+
+ if (!node) {
+ dev_err(dev, "device node not found\n");
+ return -EINVAL;
+ }
+
+ dwc3_imx = devm_kzalloc(dev, sizeof(*dwc3_imx), GFP_KERNEL);
+ if (!dwc3_imx)
+ return -ENOMEM;
+
+ platform_set_drvdata(pdev, dwc3_imx);
+
+ dwc3_imx->dev = dev;
+
+ dwc3_imx->glue_base = devm_platform_ioremap_resource(pdev, 0);
+ if (IS_ERR(dwc3_imx->glue_base))
+ return PTR_ERR(dwc3_imx->glue_base);
+
+ dwc3_imx->hsio_clk = devm_clk_get(dev, "hsio");
+ if (IS_ERR(dwc3_imx->hsio_clk)) {
+ err = PTR_ERR(dwc3_imx->hsio_clk);
+ dev_err(dev, "Failed to get hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable hsio clk, err=%d\n", err);
+ return err;
+ }
+
+ dwc3_imx->suspend_clk = devm_clk_get(dev, "suspend");
+ if (IS_ERR(dwc3_imx->suspend_clk)) {
+ err = PTR_ERR(dwc3_imx->suspend_clk);
+ dev_err(dev, "Failed to get suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ err = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (err) {
+ dev_err(dev, "Failed to enable suspend clk, err=%d\n", err);
+ goto disable_hsio_clk;
+ }
+
+ irq = platform_get_irq(pdev, 0);
+ if (irq < 0) {
+ err = irq;
+ goto disable_clks;
+ }
+ dwc3_imx->irq = irq;
+
+ err = devm_request_threaded_irq(dev, irq, NULL, dwc3_imx8mp_interrupt,
+ IRQF_ONESHOT, dev_name(dev), dwc3_imx);
+ if (err) {
+ dev_err(dev, "failed to request IRQ #%d --> %d\n", irq, err);
+ goto disable_clks;
+ }
+
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+ err = pm_runtime_get_sync(dev);
+ if (err < 0)
+ goto disable_rpm;
+
+ dwc3_np = of_get_child_by_name(node, "dwc3");
+ if (!dwc3_np) {
+ dev_err(dev, "failed to find dwc3 core child\n");
+ goto disable_rpm;
+ }
+
+ err = of_platform_populate(node, NULL, NULL, dev);
+ if (err) {
+ dev_err(&pdev->dev, "failed to create dwc3 core\n");
+ goto err_node_put;
+ }
+
+ dwc3_imx->dwc3 = of_find_device_by_node(dwc3_np);
+ if (!dwc3_imx->dwc3) {
+ dev_err(dev, "failed to get dwc3 platform device\n");
+ err = -ENODEV;
+ goto depopulate;
+ }
+ of_node_put(dwc3_np);
+
+ device_set_wakeup_capable(dev, true);
+ pm_runtime_put(dev);
+
+ return 0;
+
+depopulate:
+ of_platform_depopulate(dev);
+err_node_put:
+ of_node_put(dwc3_np);
+disable_rpm:
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+disable_clks:
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+disable_hsio_clk:
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ return err;
+}
+
+static int dwc3_imx8mp_remove(struct platform_device *pdev)
+{
+ struct dwc3_imx8mp *dwc3_imx = platform_get_drvdata(pdev);
+ struct device *dev = &pdev->dev;
+
+ pm_runtime_get_sync(dev);
+ of_platform_depopulate(dev);
+
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+
+ pm_runtime_disable(dev);
+ pm_runtime_put_noidle(dev);
+ platform_set_drvdata(pdev, NULL);
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_suspend(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ if (dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup enable */
+ if (PMSG_IS_AUTO(msg) || device_may_wakeup(dwc3_imx->dev))
+ dwc3_imx8mp_wakeup_enable(dwc3_imx);
+
+ dwc3_imx->pm_suspended = true;
+
+ return 0;
+}
+
+static int __maybe_unused dwc3_imx8mp_resume(struct dwc3_imx8mp *dwc3_imx,
+ pm_message_t msg)
+{
+ struct dwc3 *dwc = platform_get_drvdata(dwc3_imx->dwc3);
+ int ret = 0;
+
+ if (!dwc3_imx->pm_suspended)
+ return 0;
+
+ /* Wakeup disable */
+ dwc3_imx8mp_wakeup_disable(dwc3_imx);
+ dwc3_imx->pm_suspended = false;
+
+ if (dwc3_imx->wakeup_pending) {
+ dwc3_imx->wakeup_pending = false;
+ if (dwc->current_dr_role == DWC3_GCTL_PRTCAP_DEVICE) {
+ pm_runtime_mark_last_busy(dwc->dev);
+ pm_runtime_put_autosuspend(dwc->dev);
+ } else {
+ /*
+ * Add wait for xhci switch from suspend
+ * clock to normal clock to detect connection.
+ */
+ usleep_range(9000, 10000);
+ }
+ enable_irq(dwc3_imx->irq);
+ }
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ ret = dwc3_imx8mp_suspend(dwc3_imx, PMSG_SUSPEND);
+
+ if (device_may_wakeup(dwc3_imx->dev))
+ enable_irq_wake(dwc3_imx->irq);
+ else
+ clk_disable_unprepare(dwc3_imx->suspend_clk);
+
+ clk_disable_unprepare(dwc3_imx->hsio_clk);
+ dev_dbg(dev, "dwc3 imx8mp pm suspend.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_pm_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+ int ret;
+
+ if (device_may_wakeup(dwc3_imx->dev)) {
+ disable_irq_wake(dwc3_imx->irq);
+ } else {
+ ret = clk_prepare_enable(dwc3_imx->suspend_clk);
+ if (ret)
+ return ret;
+ }
+
+ ret = clk_prepare_enable(dwc3_imx->hsio_clk);
+ if (ret)
+ return ret;
+
+ ret = dwc3_imx8mp_resume(dwc3_imx, PMSG_RESUME);
+
+ pm_runtime_disable(dev);
+ pm_runtime_set_active(dev);
+ pm_runtime_enable(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp pm resume.\n");
+
+ return ret;
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_suspend(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime suspend.\n");
+
+ return dwc3_imx8mp_suspend(dwc3_imx, PMSG_AUTO_SUSPEND);
+}
+
+static int __maybe_unused dwc3_imx8mp_runtime_resume(struct device *dev)
+{
+ struct dwc3_imx8mp *dwc3_imx = dev_get_drvdata(dev);
+
+ dev_dbg(dev, "dwc3 imx8mp runtime resume.\n");
+
+ return dwc3_imx8mp_resume(dwc3_imx, PMSG_AUTO_RESUME);
+}
+
+static const struct dev_pm_ops dwc3_imx8mp_dev_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(dwc3_imx8mp_pm_suspend, dwc3_imx8mp_pm_resume)
+ SET_RUNTIME_PM_OPS(dwc3_imx8mp_runtime_suspend,
+ dwc3_imx8mp_runtime_resume, NULL)
+};
+
+static const struct of_device_id dwc3_imx8mp_of_match[] = {
+ { .compatible = "fsl,imx8mp-dwc3", },
+ {},
+};
+MODULE_DEVICE_TABLE(of, dwc3_imx8mp_of_match);
+
+static struct platform_driver dwc3_imx8mp_driver = {
+ .probe = dwc3_imx8mp_probe,
+ .remove = dwc3_imx8mp_remove,
+ .driver = {
+ .name = "imx8mp-dwc3",
+ .pm = &dwc3_imx8mp_dev_pm_ops,
+ .of_match_table = dwc3_imx8mp_of_match,
+ },
+};
+
+module_platform_driver(dwc3_imx8mp_driver);
+
+MODULE_ALIAS("platform:imx8mp-dwc3");
+MODULE_AUTHOR("jun.li@nxp.com");
+MODULE_LICENSE("GPL v2");
+MODULE_DESCRIPTION("DesignWare USB3 imx8mp Glue Layer");
diff --git a/drivers/usb/dwc3/dwc3-keystone.c b/drivers/usb/dwc3/dwc3-keystone.c
index 9a99253d5ba3..057056c0975e 100644
--- a/drivers/usb/dwc3/dwc3-keystone.c
+++ b/drivers/usb/dwc3/dwc3-keystone.c
@@ -99,13 +99,8 @@ static int kdwc3_probe(struct platform_device *pdev)
/* PSC dependency on AM65 needs SERDES0 to be powered before USB0 */
kdwc->usb3_phy = devm_phy_optional_get(dev, "usb3-phy");
- if (IS_ERR(kdwc->usb3_phy)) {
- error = PTR_ERR(kdwc->usb3_phy);
- if (error != -EPROBE_DEFER)
- dev_err(dev, "couldn't get usb3 phy: %d\n", error);
-
- return error;
- }
+ if (IS_ERR(kdwc->usb3_phy))
+ return dev_err_probe(dev, PTR_ERR(kdwc->usb3_phy), "couldn't get usb3 phy\n");
phy_pm_runtime_get_sync(kdwc->usb3_phy);
diff --git a/drivers/usb/dwc3/dwc3-meson-g12a.c b/drivers/usb/dwc3/dwc3-meson-g12a.c
index 417e05381b5d..bdf1f98dfad8 100644
--- a/drivers/usb/dwc3/dwc3-meson-g12a.c
+++ b/drivers/usb/dwc3/dwc3-meson-g12a.c
@@ -754,7 +754,7 @@ static int dwc3_meson_g12a_probe(struct platform_device *pdev)
ret = priv->drvdata->setup_regmaps(priv, base);
if (ret)
- return ret;
+ goto err_disable_clks;
if (priv->vbus) {
ret = regulator_enable(priv->vbus);
diff --git a/drivers/usb/dwc3/dwc3-pci.c b/drivers/usb/dwc3/dwc3-pci.c
index bae6a70664c8..3d3918a8d5fb 100644
--- a/drivers/usb/dwc3/dwc3-pci.c
+++ b/drivers/usb/dwc3/dwc3-pci.c
@@ -40,7 +40,9 @@
#define PCI_DEVICE_ID_INTEL_TGPLP 0xa0ee
#define PCI_DEVICE_ID_INTEL_TGPH 0x43ee
#define PCI_DEVICE_ID_INTEL_JSP 0x4dee
+#define PCI_DEVICE_ID_INTEL_ADLP 0x51ee
#define PCI_DEVICE_ID_INTEL_ADLS 0x7ae1
+#define PCI_DEVICE_ID_INTEL_TGL 0x9a15
#define PCI_INTEL_BXT_DSM_GUID "732b85d5-b7a7-4a1b-9ba0-4bbd00ffd511"
#define PCI_INTEL_BXT_FUNC_PMU_PWR 4
@@ -142,6 +144,18 @@ static const struct property_entry dwc3_pci_amd_properties[] = {
{}
};
+static const struct software_node dwc3_pci_intel_swnode = {
+ .properties = dwc3_pci_intel_properties,
+};
+
+static const struct software_node dwc3_pci_intel_mrfld_swnode = {
+ .properties = dwc3_pci_mrfld_properties,
+};
+
+static const struct software_node dwc3_pci_amd_swnode = {
+ .properties = dwc3_pci_amd_properties,
+};
+
static int dwc3_pci_quirks(struct dwc3_pci *dwc)
{
struct pci_dev *pdev = dwc->pci;
@@ -222,7 +236,6 @@ static void dwc3_pci_resume_work(struct work_struct *work)
static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
{
- struct property_entry *p = (struct property_entry *)id->driver_data;
struct dwc3_pci *dwc;
struct resource res[2];
int ret;
@@ -265,7 +278,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
dwc->dwc3->dev.parent = dev;
ACPI_COMPANION_SET(&dwc->dwc3->dev, ACPI_COMPANION(dev));
- ret = platform_device_add_properties(dwc->dwc3, p);
+ ret = device_add_software_node(&dwc->dwc3->dev, (void *)id->driver_data);
if (ret < 0)
goto err;
@@ -288,6 +301,7 @@ static int dwc3_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
return 0;
err:
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_put(dwc->dwc3);
return ret;
}
@@ -304,75 +318,82 @@ static void dwc3_pci_remove(struct pci_dev *pci)
#endif
device_init_wakeup(&pci->dev, false);
pm_runtime_get(&pci->dev);
+ device_remove_software_node(&dwc->dwc3->dev);
platform_device_unregister(dwc->dwc3);
}
static const struct pci_device_id dwc3_pci_id_table[] = {
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BSW),
- (kernel_ulong_t) &dwc3_pci_intel_properties },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BYT),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_MRFLD),
- (kernel_ulong_t) &dwc3_pci_mrfld_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_mrfld_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CMLH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_SPTH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_BXT_M),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_APL),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_KBP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_GLK),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_CNPV),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ICLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_EHLLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPLP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGPH),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_JSP),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLP),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_ADLS),
- (kernel_ulong_t) &dwc3_pci_intel_properties, },
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
+
+ { PCI_VDEVICE(INTEL, PCI_DEVICE_ID_INTEL_TGL),
+ (kernel_ulong_t) &dwc3_pci_intel_swnode, },
{ PCI_VDEVICE(AMD, PCI_DEVICE_ID_AMD_NL_USB),
- (kernel_ulong_t) &dwc3_pci_amd_properties, },
+ (kernel_ulong_t) &dwc3_pci_amd_swnode, },
{ } /* Terminating Entry */
};
MODULE_DEVICE_TABLE(pci, dwc3_pci_id_table);
diff --git a/drivers/usb/dwc3/dwc3-qcom.c b/drivers/usb/dwc3/dwc3-qcom.c
index c703d552bbcf..846a47be6df7 100644
--- a/drivers/usb/dwc3/dwc3-qcom.c
+++ b/drivers/usb/dwc3/dwc3-qcom.c
@@ -60,12 +60,14 @@ struct dwc3_acpi_pdata {
int dp_hs_phy_irq_index;
int dm_hs_phy_irq_index;
int ss_phy_irq_index;
+ bool is_urs;
};
struct dwc3_qcom {
struct device *dev;
void __iomem *qscratch_base;
struct platform_device *dwc3;
+ struct platform_device *urs_usb;
struct clk **clks;
int num_clocks;
struct reset_control *resets;
@@ -429,13 +431,15 @@ static void dwc3_qcom_select_utmi_clk(struct dwc3_qcom *qcom)
static int dwc3_qcom_get_irq(struct platform_device *pdev,
const char *name, int num)
{
+ struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb : pdev;
struct device_node *np = pdev->dev.of_node;
int ret;
if (np)
- ret = platform_get_irq_byname(pdev, name);
+ ret = platform_get_irq_byname(pdev_irq, name);
else
- ret = platform_get_irq(pdev, num);
+ ret = platform_get_irq(pdev_irq, num);
return ret;
}
@@ -563,11 +567,17 @@ static const struct property_entry dwc3_qcom_acpi_properties[] = {
{}
};
+static const struct software_node dwc3_qcom_swnode = {
+ .properties = dwc3_qcom_acpi_properties,
+};
+
static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
{
struct dwc3_qcom *qcom = platform_get_drvdata(pdev);
struct device *dev = &pdev->dev;
struct resource *res, *child_res = NULL;
+ struct platform_device *pdev_irq = qcom->urs_usb ? qcom->urs_usb :
+ pdev;
int irq;
int ret;
@@ -597,7 +607,7 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
child_res[0].end = child_res[0].start +
qcom->acpi_pdata->dwc3_core_base_size;
- irq = platform_get_irq(pdev, 0);
+ irq = platform_get_irq(pdev_irq, 0);
child_res[1].flags = IORESOURCE_IRQ;
child_res[1].start = child_res[1].end = irq;
@@ -607,16 +617,17 @@ static int dwc3_qcom_acpi_register_core(struct platform_device *pdev)
goto out;
}
- ret = platform_device_add_properties(qcom->dwc3,
- dwc3_qcom_acpi_properties);
+ ret = device_add_software_node(&qcom->dwc3->dev, &dwc3_qcom_swnode);
if (ret < 0) {
dev_err(&pdev->dev, "failed to add properties\n");
goto out;
}
ret = platform_device_add(qcom->dwc3);
- if (ret)
+ if (ret) {
dev_err(&pdev->dev, "failed to add device\n");
+ device_remove_software_node(&qcom->dwc3->dev);
+ }
out:
kfree(child_res);
@@ -651,6 +662,33 @@ static int dwc3_qcom_of_register_core(struct platform_device *pdev)
return 0;
}
+static struct platform_device *
+dwc3_qcom_create_urs_usb_platdev(struct device *dev)
+{
+ struct fwnode_handle *fwh;
+ struct acpi_device *adev;
+ char name[8];
+ int ret;
+ int id;
+
+ /* Figure out device id */
+ ret = sscanf(fwnode_get_name(dev->fwnode), "URS%d", &id);
+ if (!ret)
+ return NULL;
+
+ /* Find the child using name */
+ snprintf(name, sizeof(name), "USB%d", id);
+ fwh = fwnode_get_named_child_node(dev->fwnode, name);
+ if (!fwh)
+ return NULL;
+
+ adev = to_acpi_device_node(fwh);
+ if (!adev)
+ return NULL;
+
+ return acpi_create_platform_device(adev, NULL);
+}
+
static int dwc3_qcom_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node;
@@ -715,6 +753,14 @@ static int dwc3_qcom_probe(struct platform_device *pdev)
qcom->acpi_pdata->qscratch_base_offset;
parent_res->end = parent_res->start +
qcom->acpi_pdata->qscratch_base_size;
+
+ if (qcom->acpi_pdata->is_urs) {
+ qcom->urs_usb = dwc3_qcom_create_urs_usb_platdev(dev);
+ if (!qcom->urs_usb) {
+ dev_err(dev, "failed to create URS USB platdev\n");
+ return -ENODEV;
+ }
+ }
}
qcom->qscratch_base = devm_ioremap_resource(dev, parent_res);
@@ -796,6 +842,7 @@ static int dwc3_qcom_remove(struct platform_device *pdev)
struct device *dev = &pdev->dev;
int i;
+ device_remove_software_node(&qcom->dwc3->dev);
of_platform_depopulate(dev);
for (i = qcom->num_clocks - 1; i >= 0; i--) {
@@ -877,8 +924,20 @@ static const struct dwc3_acpi_pdata sdm845_acpi_pdata = {
.ss_phy_irq_index = 2
};
+static const struct dwc3_acpi_pdata sdm845_acpi_urs_pdata = {
+ .qscratch_base_offset = SDM845_QSCRATCH_BASE_OFFSET,
+ .qscratch_base_size = SDM845_QSCRATCH_SIZE,
+ .dwc3_core_base_size = SDM845_DWC3_CORE_SIZE,
+ .hs_phy_irq_index = 1,
+ .dp_hs_phy_irq_index = 4,
+ .dm_hs_phy_irq_index = 3,
+ .ss_phy_irq_index = 2,
+ .is_urs = true,
+};
+
static const struct acpi_device_id dwc3_qcom_acpi_match[] = {
{ "QCOM2430", (unsigned long)&sdm845_acpi_pdata },
+ { "QCOM0304", (unsigned long)&sdm845_acpi_urs_pdata },
{ },
};
MODULE_DEVICE_TABLE(acpi, dwc3_qcom_acpi_match);
diff --git a/drivers/usb/dwc3/gadget.c b/drivers/usb/dwc3/gadget.c
index 78cb4db8a6e4..97d707b4f384 100644
--- a/drivers/usb/dwc3/gadget.c
+++ b/drivers/usb/dwc3/gadget.c
@@ -1763,6 +1763,8 @@ static int dwc3_gadget_ep_dequeue(struct usb_ep *ep,
list_for_each_entry_safe(r, t, &dep->started_list, list)
dwc3_gadget_move_cancelled_request(r);
+ dep->flags &= ~DWC3_EP_WAIT_TRANSFER_COMPLETE;
+
goto out;
}
}
@@ -2036,6 +2038,102 @@ static void dwc3_stop_active_transfers(struct dwc3 *dwc)
}
}
+static void __dwc3_gadget_set_ssp_rate(struct dwc3 *dwc)
+{
+ enum usb_ssp_rate ssp_rate = dwc->gadget_ssp_rate;
+ u32 reg;
+
+ if (ssp_rate == USB_SSP_GEN_UNKNOWN)
+ ssp_rate = dwc->max_ssp_rate;
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~DWC3_DCFG_SPEED_MASK;
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ if (ssp_rate == USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else if (dwc->max_ssp_rate != USB_SSP_GEN_1x2)
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+
+ if (ssp_rate != USB_SSP_GEN_2x1 &&
+ dwc->max_ssp_rate != USB_SSP_GEN_2x1)
+ reg |= DWC3_DCFG_NUMLANES(1);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
+static void __dwc3_gadget_set_speed(struct dwc3 *dwc)
+{
+ enum usb_device_speed speed;
+ u32 reg;
+
+ speed = dwc->gadget_max_speed;
+ if (speed > dwc->maximum_speed)
+ speed = dwc->maximum_speed;
+
+ if (speed == USB_SPEED_SUPER_PLUS &&
+ DWC3_IP_IS(DWC32)) {
+ __dwc3_gadget_set_ssp_rate(dwc);
+ return;
+ }
+
+ reg = dwc3_readl(dwc->regs, DWC3_DCFG);
+ reg &= ~(DWC3_DCFG_SPEED_MASK);
+
+ /*
+ * WORKAROUND: DWC3 revision < 2.20a have an issue
+ * which would cause metastability state on Run/Stop
+ * bit if we try to force the IP to USB2-only mode.
+ *
+ * Because of that, we cannot configure the IP to any
+ * speed other than the SuperSpeed
+ *
+ * Refers to:
+ *
+ * STAR#9000525659: Clock Domain Crossing on DCTL in
+ * USB 2.0 Mode
+ */
+ if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
+ !dwc->dis_metastability_quirk) {
+ reg |= DWC3_DCFG_SUPERSPEED;
+ } else {
+ switch (speed) {
+ case USB_SPEED_LOW:
+ reg |= DWC3_DCFG_LOWSPEED;
+ break;
+ case USB_SPEED_FULL:
+ reg |= DWC3_DCFG_FULLSPEED;
+ break;
+ case USB_SPEED_HIGH:
+ reg |= DWC3_DCFG_HIGHSPEED;
+ break;
+ case USB_SPEED_SUPER:
+ reg |= DWC3_DCFG_SUPERSPEED;
+ break;
+ case USB_SPEED_SUPER_PLUS:
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ break;
+ default:
+ dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+
+ if (DWC3_IP_IS(DWC3))
+ reg |= DWC3_DCFG_SUPERSPEED;
+ else
+ reg |= DWC3_DCFG_SUPERSPEED_PLUS;
+ }
+ }
+
+ if (DWC3_IP_IS(DWC32) &&
+ speed > USB_SPEED_UNKNOWN &&
+ speed < USB_SPEED_SUPER_PLUS)
+ reg &= ~DWC3_DCFG_NUMLANES(~0);
+
+ dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+}
+
static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
{
u32 reg;
@@ -2058,6 +2156,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
if (dwc->has_hibernation)
reg |= DWC3_DCTL_KEEP_CONNECT;
+ __dwc3_gadget_set_speed(dwc);
dwc->pullups_connected = true;
} else {
reg &= ~DWC3_DCTL_RUN_STOP;
@@ -2083,6 +2182,7 @@ static int dwc3_gadget_run_stop(struct dwc3 *dwc, int is_on, int suspend)
static void dwc3_gadget_disable_irq(struct dwc3 *dwc);
static void __dwc3_gadget_stop(struct dwc3 *dwc);
+static int __dwc3_gadget_start(struct dwc3 *dwc);
static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
{
@@ -2108,6 +2208,17 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
}
/*
+ * Check the return value for successful resume, or error. For a
+ * successful resume, the DWC3 runtime PM resume routine will handle
+ * the run stop sequence, so avoid duplicate operations here.
+ */
+ ret = pm_runtime_get_sync(dwc->dev);
+ if (!ret || ret < 0) {
+ pm_runtime_put(dwc->dev);
+ return 0;
+ }
+
+ /*
* Synchronize any pending event handling before executing the controller
* halt routine.
*/
@@ -2145,10 +2256,14 @@ static int dwc3_gadget_pullup(struct usb_gadget *g, int is_on)
dwc->ev_buf->lpos = (dwc->ev_buf->lpos + count) %
dwc->ev_buf->length;
}
+ dwc->connected = false;
+ } else {
+ __dwc3_gadget_start(dwc);
}
ret = dwc3_gadget_run_stop(dwc, is_on, false);
spin_unlock_irqrestore(&dwc->lock, flags);
+ pm_runtime_put(dwc->dev);
return ret;
}
@@ -2158,8 +2273,7 @@ static void dwc3_gadget_enable_irq(struct dwc3 *dwc)
u32 reg;
/* Enable all but Start and End of Frame IRQs */
- reg = (DWC3_DEVTEN_VNDRDEVTSTRCVEDEN |
- DWC3_DEVTEN_EVNTOVERFLOWEN |
+ reg = (DWC3_DEVTEN_EVNTOVERFLOWEN |
DWC3_DEVTEN_CMDCMPLTEN |
DWC3_DEVTEN_ERRTICERREN |
DWC3_DEVTEN_WKUPEVTEN |
@@ -2297,7 +2411,7 @@ static int dwc3_gadget_start(struct usb_gadget *g,
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
- int ret = 0;
+ int ret;
int irq;
irq = dwc->irq_gadget;
@@ -2306,33 +2420,14 @@ static int dwc3_gadget_start(struct usb_gadget *g,
if (ret) {
dev_err(dwc->dev, "failed to request irq #%d --> %d\n",
irq, ret);
- goto err0;
+ return ret;
}
spin_lock_irqsave(&dwc->lock, flags);
- if (dwc->gadget_driver) {
- dev_err(dwc->dev, "%s is already bound to %s\n",
- dwc->gadget->name,
- dwc->gadget_driver->driver.name);
- ret = -EBUSY;
- goto err1;
- }
-
dwc->gadget_driver = driver;
-
- if (pm_runtime_active(dwc->dev))
- __dwc3_gadget_start(dwc);
-
spin_unlock_irqrestore(&dwc->lock, flags);
return 0;
-
-err1:
- spin_unlock_irqrestore(&dwc->lock, flags);
- free_irq(irq, dwc);
-
-err0:
- return ret;
}
static void __dwc3_gadget_stop(struct dwc3 *dwc)
@@ -2348,13 +2443,6 @@ static int dwc3_gadget_stop(struct usb_gadget *g)
unsigned long flags;
spin_lock_irqsave(&dwc->lock, flags);
-
- if (pm_runtime_suspended(dwc->dev))
- goto out;
-
- __dwc3_gadget_stop(dwc);
-
-out:
dwc->gadget_driver = NULL;
spin_unlock_irqrestore(&dwc->lock, flags);
@@ -2407,62 +2495,33 @@ static void dwc3_gadget_set_speed(struct usb_gadget *g,
{
struct dwc3 *dwc = gadget_to_dwc(g);
unsigned long flags;
- u32 reg;
spin_lock_irqsave(&dwc->lock, flags);
- reg = dwc3_readl(dwc->regs, DWC3_DCFG);
- reg &= ~(DWC3_DCFG_SPEED_MASK);
-
- /*
- * WORKAROUND: DWC3 revision < 2.20a have an issue
- * which would cause metastability state on Run/Stop
- * bit if we try to force the IP to USB2-only mode.
- *
- * Because of that, we cannot configure the IP to any
- * speed other than the SuperSpeed
- *
- * Refers to:
- *
- * STAR#9000525659: Clock Domain Crossing on DCTL in
- * USB 2.0 Mode
- */
- if (DWC3_VER_IS_PRIOR(DWC3, 220A) &&
- !dwc->dis_metastability_quirk) {
- reg |= DWC3_DCFG_SUPERSPEED;
- } else {
- switch (speed) {
- case USB_SPEED_LOW:
- reg |= DWC3_DCFG_LOWSPEED;
- break;
- case USB_SPEED_FULL:
- reg |= DWC3_DCFG_FULLSPEED;
- break;
- case USB_SPEED_HIGH:
- reg |= DWC3_DCFG_HIGHSPEED;
- break;
- case USB_SPEED_SUPER:
- reg |= DWC3_DCFG_SUPERSPEED;
- break;
- case USB_SPEED_SUPER_PLUS:
- if (DWC3_IP_IS(DWC3))
- reg |= DWC3_DCFG_SUPERSPEED;
- else
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- break;
- default:
- dev_err(dwc->dev, "invalid speed (%d)\n", speed);
+ dwc->gadget_max_speed = speed;
+ spin_unlock_irqrestore(&dwc->lock, flags);
+}
- if (DWC3_IP_IS(DWC3))
- reg |= DWC3_DCFG_SUPERSPEED;
- else
- reg |= DWC3_DCFG_SUPERSPEED_PLUS;
- }
- }
- dwc3_writel(dwc->regs, DWC3_DCFG, reg);
+static void dwc3_gadget_set_ssp_rate(struct usb_gadget *g,
+ enum usb_ssp_rate rate)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+ unsigned long flags;
+ spin_lock_irqsave(&dwc->lock, flags);
+ dwc->gadget_ssp_rate = rate;
spin_unlock_irqrestore(&dwc->lock, flags);
}
+static int dwc3_gadget_vbus_draw(struct usb_gadget *g, unsigned int mA)
+{
+ struct dwc3 *dwc = gadget_to_dwc(g);
+
+ if (dwc->usb2_phy)
+ return usb_phy_set_power(dwc->usb2_phy, mA);
+
+ return 0;
+}
+
static const struct usb_gadget_ops dwc3_gadget_ops = {
.get_frame = dwc3_gadget_get_frame,
.wakeup = dwc3_gadget_wakeup,
@@ -2471,7 +2530,9 @@ static const struct usb_gadget_ops dwc3_gadget_ops = {
.udc_start = dwc3_gadget_start,
.udc_stop = dwc3_gadget_stop,
.udc_set_speed = dwc3_gadget_set_speed,
+ .udc_set_ssp_rate = dwc3_gadget_set_ssp_rate,
.get_config_params = dwc3_gadget_config_params,
+ .vbus_draw = dwc3_gadget_vbus_draw,
};
/* -------------------------------------------------------------------------- */
@@ -3304,12 +3365,18 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
struct dwc3_ep *dep;
int ret;
u32 reg;
+ u8 lanes = 1;
u8 speed;
reg = dwc3_readl(dwc->regs, DWC3_DSTS);
speed = reg & DWC3_DSTS_CONNECTSPD;
dwc->speed = speed;
+ if (DWC3_IP_IS(DWC32))
+ lanes = DWC3_DSTS_CONNLANES(reg) + 1;
+
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
+
/*
* RAMClkSel is reset to 0 after USB reset, so it must be reprogrammed
* each time on Connect Done.
@@ -3324,6 +3391,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+
+ if (lanes > 1)
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x2;
+ else
+ dwc->gadget->ssp_rate = USB_SSP_GEN_2x1;
break;
case DWC3_DSTS_SUPERSPEED:
/*
@@ -3345,6 +3417,11 @@ static void dwc3_gadget_conndone_interrupt(struct dwc3 *dwc)
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(512);
dwc->gadget->ep0->maxpacket = 512;
dwc->gadget->speed = USB_SPEED_SUPER;
+
+ if (lanes > 1) {
+ dwc->gadget->speed = USB_SPEED_SUPER_PLUS;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_1x2;
+ }
break;
case DWC3_DSTS_HIGHSPEED:
dwc3_gadget_ep0_desc.wMaxPacketSize = cpu_to_le16(64);
@@ -3839,6 +3916,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dev->platform_data = dwc;
dwc->gadget->ops = &dwc3_gadget_ops;
dwc->gadget->speed = USB_SPEED_UNKNOWN;
+ dwc->gadget->ssp_rate = USB_SSP_GEN_UNKNOWN;
dwc->gadget->sg_supported = true;
dwc->gadget->name = "dwc3-gadget";
dwc->gadget->lpm_capable = true;
@@ -3865,6 +3943,7 @@ int dwc3_gadget_init(struct dwc3 *dwc)
dwc->revision);
dwc->gadget->max_speed = dwc->maximum_speed;
+ dwc->gadget->max_ssp_rate = dwc->max_ssp_rate;
/*
* REVISIT: Here we should clear all pending IRQs to be
@@ -3881,7 +3960,10 @@ int dwc3_gadget_init(struct dwc3 *dwc)
goto err5;
}
- dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
+ if (DWC3_IP_IS(DWC32) && dwc->maximum_speed == USB_SPEED_SUPER_PLUS)
+ dwc3_gadget_set_ssp_rate(dwc->gadget, dwc->max_ssp_rate);
+ else
+ dwc3_gadget_set_speed(dwc->gadget, dwc->maximum_speed);
return 0;
diff --git a/drivers/usb/dwc3/host.c b/drivers/usb/dwc3/host.c
index e195176580de..f29a264635aa 100644
--- a/drivers/usb/dwc3/host.c
+++ b/drivers/usb/dwc3/host.c
@@ -108,7 +108,7 @@ int dwc3_host_init(struct dwc3 *dwc)
props[prop_idx++] = PROPERTY_ENTRY_BOOL("quirk-broken-port-ped");
if (prop_idx) {
- ret = platform_device_add_properties(xhci, props);
+ ret = device_create_managed_software_node(&xhci->dev, props, NULL);
if (ret) {
dev_err(dwc->dev, "failed to add properties to xHCI\n");
goto err;
diff --git a/drivers/usb/dwc3/ulpi.c b/drivers/usb/dwc3/ulpi.c
index aa213c9815f6..f23f4c9a557e 100644
--- a/drivers/usb/dwc3/ulpi.c
+++ b/drivers/usb/dwc3/ulpi.c
@@ -7,6 +7,8 @@
* Author: Heikki Krogerus <heikki.krogerus@linux.intel.com>
*/
+#include <linux/delay.h>
+#include <linux/time64.h>
#include <linux/ulpi/regs.h>
#include "core.h"
@@ -17,14 +19,28 @@
DWC3_GUSB2PHYACC_ADDR(ULPI_ACCESS_EXTENDED) | \
DWC3_GUSB2PHYACC_EXTEND_ADDR(a) : DWC3_GUSB2PHYACC_ADDR(a))
-static int dwc3_ulpi_busyloop(struct dwc3 *dwc)
+#define DWC3_ULPI_BASE_DELAY DIV_ROUND_UP(NSEC_PER_SEC, 60000000L)
+
+static int dwc3_ulpi_busyloop(struct dwc3 *dwc, u8 addr, bool read)
{
- unsigned int count = 1000;
+ unsigned long ns = 5L * DWC3_ULPI_BASE_DELAY;
+ unsigned int count = 10000;
u32 reg;
+ if (addr >= ULPI_EXT_VENDOR_SPECIFIC)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ if (read)
+ ns += DWC3_ULPI_BASE_DELAY;
+
+ reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
+ if (reg & DWC3_GUSB2PHYCFG_SUSPHY)
+ usleep_range(1000, 1200);
+
while (count--) {
+ ndelay(ns);
reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYACC(0));
- if (!(reg & DWC3_GUSB2PHYACC_BUSY))
+ if (reg & DWC3_GUSB2PHYACC_DONE)
return 0;
cpu_relax();
}
@@ -38,16 +54,10 @@ static int dwc3_ulpi_read(struct device *dev, u8 addr)
u32 reg;
int ret;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- ret = dwc3_ulpi_busyloop(dwc);
+ ret = dwc3_ulpi_busyloop(dwc, addr, true);
if (ret)
return ret;
@@ -61,17 +71,11 @@ static int dwc3_ulpi_write(struct device *dev, u8 addr, u8 val)
struct dwc3 *dwc = dev_get_drvdata(dev);
u32 reg;
- reg = dwc3_readl(dwc->regs, DWC3_GUSB2PHYCFG(0));
- if (reg & DWC3_GUSB2PHYCFG_SUSPHY) {
- reg &= ~DWC3_GUSB2PHYCFG_SUSPHY;
- dwc3_writel(dwc->regs, DWC3_GUSB2PHYCFG(0), reg);
- }
-
reg = DWC3_GUSB2PHYACC_NEWREGREQ | DWC3_ULPI_ADDR(addr);
reg |= DWC3_GUSB2PHYACC_WRITE | val;
dwc3_writel(dwc->regs, DWC3_GUSB2PHYACC(0), reg);
- return dwc3_ulpi_busyloop(dwc);
+ return dwc3_ulpi_busyloop(dwc, addr, false);
}
static const struct ulpi_ops dwc3_ulpi_ops = {
diff --git a/drivers/usb/gadget/Kconfig b/drivers/usb/gadget/Kconfig
index 7e47e6223089..2d152571a7de 100644
--- a/drivers/usb/gadget/Kconfig
+++ b/drivers/usb/gadget/Kconfig
@@ -265,6 +265,7 @@ config USB_CONFIGFS_NCM
depends on NET
select USB_U_ETHER
select USB_F_NCM
+ select CRC32
help
NCM is an advanced protocol for Ethernet encapsulation, allows
grouping of several ethernet frames into one USB transfer and
@@ -314,6 +315,7 @@ config USB_CONFIGFS_EEM
depends on NET
select USB_U_ETHER
select USB_F_EEM
+ select CRC32
help
CDC EEM is a newer USB standard that is somewhat simpler than CDC ECM
and therefore can be supported by more hardware. Technically ECM and
diff --git a/drivers/usb/gadget/composite.c b/drivers/usb/gadget/composite.c
index c6d455f2bb92..72a9797dbbae 100644
--- a/drivers/usb/gadget/composite.c
+++ b/drivers/usb/gadget/composite.c
@@ -13,6 +13,7 @@
#include <linux/module.h>
#include <linux/device.h>
#include <linux/utsname.h>
+#include <linux/bitfield.h>
#include <linux/usb/composite.h>
#include <linux/usb/otg.h>
@@ -392,8 +393,11 @@ int usb_function_deactivate(struct usb_function *function)
spin_lock_irqsave(&cdev->lock, flags);
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_deactivate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
if (status == 0)
cdev->deactivations++;
@@ -424,8 +428,11 @@ int usb_function_activate(struct usb_function *function)
status = -EINVAL;
else {
cdev->deactivations--;
- if (cdev->deactivations == 0)
+ if (cdev->deactivations == 0) {
+ spin_unlock_irqrestore(&cdev->lock, flags);
status = usb_gadget_activate(cdev->gadget);
+ spin_lock_irqsave(&cdev->lock, flags);
+ }
}
spin_unlock_irqrestore(&cdev->lock, flags);
@@ -728,47 +735,77 @@ static int bos_desc(struct usb_composite_dev *cdev)
/* The SuperSpeedPlus USB Device Capability descriptor */
if (gadget_is_superspeed_plus(cdev->gadget)) {
struct usb_ssp_cap_descriptor *ssp_cap;
+ u8 ssac = 1;
+ u8 ssic;
+ int i;
- ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
- bos->bNumDeviceCaps++;
+ if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x2)
+ ssac = 3;
/*
- * Report typical values.
+ * Paired RX and TX sublink speed attributes share
+ * the same SSID.
*/
+ ssic = (ssac + 1) / 2 - 1;
+
+ ssp_cap = cdev->req->buf + le16_to_cpu(bos->wTotalLength);
+ bos->bNumDeviceCaps++;
- le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(1));
- ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(1);
+ le16_add_cpu(&bos->wTotalLength, USB_DT_USB_SSP_CAP_SIZE(ssac));
+ ssp_cap->bLength = USB_DT_USB_SSP_CAP_SIZE(ssac);
ssp_cap->bDescriptorType = USB_DT_DEVICE_CAPABILITY;
ssp_cap->bDevCapabilityType = USB_SSP_CAP_TYPE;
ssp_cap->bReserved = 0;
ssp_cap->wReserved = 0;
- /* SSAC = 1 (2 attributes) */
- ssp_cap->bmAttributes = cpu_to_le32(1);
+ ssp_cap->bmAttributes =
+ cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_ATTRIBS, ssac) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_IDS, ssic));
- /* Min RX/TX Lane Count = 1 */
ssp_cap->wFunctionalitySupport =
- cpu_to_le16((1 << 8) | (1 << 12));
+ cpu_to_le16(FIELD_PREP(USB_SSP_MIN_SUBLINK_SPEED_ATTRIBUTE_ID, 0) |
+ FIELD_PREP(USB_SSP_MIN_RX_LANE_COUNT, 1) |
+ FIELD_PREP(USB_SSP_MIN_TX_LANE_COUNT, 1));
/*
- * bmSublinkSpeedAttr[0]:
- * ST = Symmetric, RX
- * LSE = 3 (Gbps)
- * LP = 1 (SuperSpeedPlus)
- * LSM = 10 (10 Gbps)
+ * Use 1 SSID if the gadget supports up to gen2x1 or not
+ * specified:
+ * - SSID 0 for symmetric RX/TX sublink speed of 10 Gbps.
+ *
+ * Use 1 SSID if the gadget supports up to gen1x2:
+ * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
+ *
+ * Use 2 SSIDs if the gadget supports up to gen2x2:
+ * - SSID 0 for symmetric RX/TX sublink speed of 5 Gbps.
+ * - SSID 1 for symmetric RX/TX sublink speed of 10 Gbps.
*/
- ssp_cap->bmSublinkSpeedAttr[0] =
- cpu_to_le32((3 << 4) | (1 << 14) | (0xa << 16));
- /*
- * bmSublinkSpeedAttr[1] =
- * ST = Symmetric, TX
- * LSE = 3 (Gbps)
- * LP = 1 (SuperSpeedPlus)
- * LSM = 10 (10 Gbps)
- */
- ssp_cap->bmSublinkSpeedAttr[1] =
- cpu_to_le32((3 << 4) | (1 << 14) |
- (0xa << 16) | (1 << 7));
+ for (i = 0; i < ssac + 1; i++) {
+ u8 ssid;
+ u8 mantissa;
+ u8 type;
+
+ ssid = i >> 1;
+
+ if (cdev->gadget->max_ssp_rate == USB_SSP_GEN_2x1 ||
+ cdev->gadget->max_ssp_rate == USB_SSP_GEN_UNKNOWN)
+ mantissa = 10;
+ else
+ mantissa = 5 << ssid;
+
+ if (i % 2)
+ type = USB_SSP_SUBLINK_SPEED_ST_SYM_TX;
+ else
+ type = USB_SSP_SUBLINK_SPEED_ST_SYM_RX;
+
+ ssp_cap->bmSublinkSpeedAttr[i] =
+ cpu_to_le32(FIELD_PREP(USB_SSP_SUBLINK_SPEED_SSID, ssid) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSE,
+ USB_SSP_SUBLINK_SPEED_LSE_GBPS) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_ST, type) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LP,
+ USB_SSP_SUBLINK_SPEED_LP_SSP) |
+ FIELD_PREP(USB_SSP_SUBLINK_SPEED_LSM, mantissa));
+ }
}
return le16_to_cpu(bos->wTotalLength);
@@ -2036,7 +2073,7 @@ done:
return value;
}
-void composite_disconnect(struct usb_gadget *gadget)
+static void __composite_disconnect(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev = get_gadget_data(gadget);
unsigned long flags;
@@ -2053,6 +2090,23 @@ void composite_disconnect(struct usb_gadget *gadget)
spin_unlock_irqrestore(&cdev->lock, flags);
}
+void composite_disconnect(struct usb_gadget *gadget)
+{
+ usb_gadget_vbus_draw(gadget, 0);
+ __composite_disconnect(gadget);
+}
+
+void composite_reset(struct usb_gadget *gadget)
+{
+ /*
+ * Section 1.4.13 Standard Downstream Port of the USB battery charging
+ * specification v1.2 states that a device connected on a SDP shall only
+ * draw at max 100mA while in a connected, but unconfigured state.
+ */
+ usb_gadget_vbus_draw(gadget, 100);
+ __composite_disconnect(gadget);
+}
+
/*-------------------------------------------------------------------------*/
static ssize_t suspended_show(struct device *dev, struct device_attribute *attr,
@@ -2373,7 +2427,7 @@ static const struct usb_gadget_driver composite_driver_template = {
.unbind = composite_unbind,
.setup = composite_setup,
- .reset = composite_disconnect,
+ .reset = composite_reset,
.disconnect = composite_disconnect,
.suspend = composite_suspend,
diff --git a/drivers/usb/gadget/configfs.c b/drivers/usb/gadget/configfs.c
index 56051bb97349..0d56f33d63c2 100644
--- a/drivers/usb/gadget/configfs.c
+++ b/drivers/usb/gadget/configfs.c
@@ -221,9 +221,16 @@ static ssize_t gadget_dev_desc_bcdUSB_store(struct config_item *item,
static ssize_t gadget_dev_desc_UDC_show(struct config_item *item, char *page)
{
- char *udc_name = to_gadget_info(item)->composite.gadget_driver.udc_name;
+ struct gadget_info *gi = to_gadget_info(item);
+ char *udc_name;
+ int ret;
- return sprintf(page, "%s\n", udc_name ?: "");
+ mutex_lock(&gi->lock);
+ udc_name = gi->composite.gadget_driver.udc_name;
+ ret = sprintf(page, "%s\n", udc_name ?: "");
+ mutex_unlock(&gi->lock);
+
+ return ret;
}
static int unregister_gadget(struct gadget_info *gi)
@@ -1248,9 +1255,9 @@ static void purge_configs_funcs(struct gadget_info *gi)
cfg = container_of(c, struct config_usb_cfg, c);
- list_for_each_entry_safe(f, tmp, &c->functions, list) {
+ list_for_each_entry_safe_reverse(f, tmp, &c->functions, list) {
- list_move_tail(&f->list, &cfg->func_list);
+ list_move(&f->list, &cfg->func_list);
if (f->unbind) {
dev_dbg(&gi->cdev.gadget->dev,
"unbind function '%s'/%p\n",
@@ -1481,6 +1488,28 @@ static void configfs_composite_disconnect(struct usb_gadget *gadget)
spin_unlock_irqrestore(&gi->spinlock, flags);
}
+static void configfs_composite_reset(struct usb_gadget *gadget)
+{
+ struct usb_composite_dev *cdev;
+ struct gadget_info *gi;
+ unsigned long flags;
+
+ cdev = get_gadget_data(gadget);
+ if (!cdev)
+ return;
+
+ gi = container_of(cdev, struct gadget_info, cdev);
+ spin_lock_irqsave(&gi->spinlock, flags);
+ cdev = get_gadget_data(gadget);
+ if (!cdev || gi->unbind) {
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+ return;
+ }
+
+ composite_reset(gadget);
+ spin_unlock_irqrestore(&gi->spinlock, flags);
+}
+
static void configfs_composite_suspend(struct usb_gadget *gadget)
{
struct usb_composite_dev *cdev;
@@ -1530,13 +1559,13 @@ static const struct usb_gadget_driver configfs_driver_template = {
.unbind = configfs_composite_unbind,
.setup = configfs_composite_setup,
- .reset = configfs_composite_disconnect,
+ .reset = configfs_composite_reset,
.disconnect = configfs_composite_disconnect,
.suspend = configfs_composite_suspend,
.resume = configfs_composite_resume,
- .max_speed = USB_SPEED_SUPER,
+ .max_speed = USB_SPEED_SUPER_PLUS,
.driver = {
.owner = THIS_MODULE,
.name = "configfs-gadget",
@@ -1576,7 +1605,7 @@ static struct config_group *gadgets_make(
gi->composite.unbind = configfs_do_nothing;
gi->composite.suspend = NULL;
gi->composite.resume = NULL;
- gi->composite.max_speed = USB_SPEED_SUPER;
+ gi->composite.max_speed = USB_SPEED_SUPER_PLUS;
spin_lock_init(&gi->spinlock);
mutex_init(&gi->lock);
diff --git a/drivers/usb/gadget/function/f_midi.c b/drivers/usb/gadget/function/f_midi.c
index 8fff995b8dd5..71a1a26e85c7 100644
--- a/drivers/usb/gadget/function/f_midi.c
+++ b/drivers/usb/gadget/function/f_midi.c
@@ -87,7 +87,7 @@ struct f_midi {
struct snd_rawmidi_substream *out_substream[MAX_PORTS];
unsigned long out_triggered;
- struct tasklet_struct tasklet;
+ struct work_struct work;
unsigned int in_ports;
unsigned int out_ports;
int index;
@@ -698,9 +698,11 @@ drop_out:
f_midi_drop_out_substreams(midi);
}
-static void f_midi_in_tasklet(struct tasklet_struct *t)
+static void f_midi_in_work(struct work_struct *work)
{
- struct f_midi *midi = from_tasklet(midi, t, tasklet);
+ struct f_midi *midi;
+
+ midi = container_of(work, struct f_midi, work);
f_midi_transmit(midi);
}
@@ -737,7 +739,7 @@ static void f_midi_in_trigger(struct snd_rawmidi_substream *substream, int up)
VDBG(midi, "%s() %d\n", __func__, up);
midi->in_ports_array[substream->number].active = up;
if (up)
- tasklet_hi_schedule(&midi->tasklet);
+ queue_work(system_highpri_wq, &midi->work);
}
static int f_midi_out_open(struct snd_rawmidi_substream *substream)
@@ -875,7 +877,7 @@ static int f_midi_bind(struct usb_configuration *c, struct usb_function *f)
int status, n, jack = 1, i = 0, endpoint_descriptor_index = 0;
midi->gadget = cdev->gadget;
- tasklet_setup(&midi->tasklet, f_midi_in_tasklet);
+ INIT_WORK(&midi->work, f_midi_in_work);
status = f_midi_register_card(midi);
if (status < 0)
goto fail_register;
diff --git a/drivers/usb/gadget/function/f_printer.c b/drivers/usb/gadget/function/f_printer.c
index 64a4112068fc..61ce8e68f7a3 100644
--- a/drivers/usb/gadget/function/f_printer.c
+++ b/drivers/usb/gadget/function/f_printer.c
@@ -51,6 +51,8 @@
#define GET_PORT_STATUS 1
#define SOFT_RESET 2
+#define DEFAULT_Q_LEN 10 /* same as legacy g_printer gadget */
+
static int major, minors;
static struct class *usb_gadget_class;
static DEFINE_IDA(printer_ida);
@@ -1162,6 +1164,7 @@ fail_tx_reqs:
printer_req_free(dev->in_ep, req);
}
+ usb_free_all_descriptors(f);
return ret;
}
@@ -1363,6 +1366,9 @@ static struct usb_function_instance *gprinter_alloc_inst(void)
opts->func_inst.free_func_inst = gprinter_free_inst;
ret = &opts->func_inst;
+ /* Make sure q_len is initialized, otherwise the bound device can't support read/write! */
+ opts->q_len = DEFAULT_Q_LEN;
+
mutex_lock(&printer_ida_lock);
if (ida_is_empty(&printer_ida)) {
diff --git a/drivers/usb/gadget/function/f_uac2.c b/drivers/usb/gadget/function/f_uac2.c
index 3633df6d7610..5d960b6603b6 100644
--- a/drivers/usb/gadget/function/f_uac2.c
+++ b/drivers/usb/gadget/function/f_uac2.c
@@ -271,7 +271,7 @@ static struct usb_endpoint_descriptor fs_epout_desc = {
.bEndpointAddress = USB_DIR_OUT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -280,7 +280,7 @@ static struct usb_endpoint_descriptor hs_epout_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -348,7 +348,7 @@ static struct usb_endpoint_descriptor fs_epin_desc = {
.bEndpointAddress = USB_DIR_IN,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1023),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 1,
};
@@ -357,7 +357,7 @@ static struct usb_endpoint_descriptor hs_epin_desc = {
.bDescriptorType = USB_DT_ENDPOINT,
.bmAttributes = USB_ENDPOINT_XFER_ISOC | USB_ENDPOINT_SYNC_ASYNC,
- .wMaxPacketSize = cpu_to_le16(1024),
+ /* .wMaxPacketSize = DYNAMIC */
.bInterval = 4,
};
@@ -444,12 +444,28 @@ struct cntrl_range_lay3 {
__le32 dRES;
} __packed;
-static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
+static int set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
struct usb_endpoint_descriptor *ep_desc,
- unsigned int factor, bool is_playback)
+ enum usb_device_speed speed, bool is_playback)
{
int chmask, srate, ssize;
- u16 max_packet_size;
+ u16 max_size_bw, max_size_ep;
+ unsigned int factor;
+
+ switch (speed) {
+ case USB_SPEED_FULL:
+ max_size_ep = 1023;
+ factor = 1000;
+ break;
+
+ case USB_SPEED_HIGH:
+ max_size_ep = 1024;
+ factor = 8000;
+ break;
+
+ default:
+ return -EINVAL;
+ }
if (is_playback) {
chmask = uac2_opts->p_chmask;
@@ -461,10 +477,12 @@ static void set_ep_max_packet_size(const struct f_uac2_opts *uac2_opts,
ssize = uac2_opts->c_ssize;
}
- max_packet_size = num_channels(chmask) * ssize *
+ max_size_bw = num_channels(chmask) * ssize *
DIV_ROUND_UP(srate, factor / (1 << (ep_desc->bInterval - 1)));
- ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_packet_size,
- le16_to_cpu(ep_desc->wMaxPacketSize)));
+ ep_desc->wMaxPacketSize = cpu_to_le16(min_t(u16, max_size_bw,
+ max_size_ep));
+
+ return 0;
}
/* Use macro to overcome line length limitation */
@@ -670,10 +688,33 @@ afunc_bind(struct usb_configuration *cfg, struct usb_function *fn)
}
/* Calculate wMaxPacketSize according to audio bandwidth */
- set_ep_max_packet_size(uac2_opts, &fs_epin_desc, 1000, true);
- set_ep_max_packet_size(uac2_opts, &fs_epout_desc, 1000, false);
- set_ep_max_packet_size(uac2_opts, &hs_epin_desc, 8000, true);
- set_ep_max_packet_size(uac2_opts, &hs_epout_desc, 8000, false);
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epin_desc, USB_SPEED_FULL,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &fs_epout_desc, USB_SPEED_FULL,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epin_desc, USB_SPEED_HIGH,
+ true);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
+
+ ret = set_ep_max_packet_size(uac2_opts, &hs_epout_desc, USB_SPEED_HIGH,
+ false);
+ if (ret < 0) {
+ dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
+ return ret;
+ }
if (EPOUT_EN(uac2_opts)) {
agdev->out_ep = usb_ep_autoconfig(gadget, &fs_epout_desc);
diff --git a/drivers/usb/gadget/function/u_audio.c b/drivers/usb/gadget/function/u_audio.c
index e6d32c536781..265c4d805f81 100644
--- a/drivers/usb/gadget/function/u_audio.c
+++ b/drivers/usb/gadget/function/u_audio.c
@@ -23,11 +23,6 @@
#define PRD_SIZE_MAX PAGE_SIZE
#define MIN_PERIODS 4
-struct uac_req {
- struct uac_rtd_params *pp; /* parent param */
- struct usb_request *req;
-};
-
/* Runtime data params for one stream */
struct uac_rtd_params {
struct snd_uac_chip *uac; /* parent chip */
@@ -41,9 +36,8 @@ struct uac_rtd_params {
void *rbuf;
unsigned int max_psize; /* MaxPacketSize of endpoint */
- struct uac_req *ureq;
- spinlock_t lock;
+ struct usb_request **reqs;
};
struct snd_uac_chip {
@@ -79,17 +73,20 @@ static const struct snd_pcm_hardware uac_pcm_hardware = {
static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
{
unsigned int pending;
- unsigned long flags, flags2;
unsigned int hw_ptr;
int status = req->status;
- struct uac_req *ur = req->context;
struct snd_pcm_substream *substream;
struct snd_pcm_runtime *runtime;
- struct uac_rtd_params *prm = ur->pp;
+ struct uac_rtd_params *prm = req->context;
struct snd_uac_chip *uac = prm->uac;
/* i/f shutting down */
- if (!prm->ep_enabled || req->status == -ESHUTDOWN)
+ if (!prm->ep_enabled) {
+ usb_ep_free_request(ep, req);
+ return;
+ }
+
+ if (req->status == -ESHUTDOWN)
return;
/*
@@ -106,16 +103,14 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
if (!substream)
goto exit;
- snd_pcm_stream_lock_irqsave(substream, flags2);
+ snd_pcm_stream_lock(substream);
runtime = substream->runtime;
if (!runtime || !snd_pcm_running(substream)) {
- snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ snd_pcm_stream_unlock(substream);
goto exit;
}
- spin_lock_irqsave(&prm->lock, flags);
-
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
/*
* For each IN packet, take the quotient of the current data
@@ -142,8 +137,6 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
hw_ptr = prm->hw_ptr;
- spin_unlock_irqrestore(&prm->lock, flags);
-
/* Pack USB load in ALSA ring buffer */
pending = runtime->dma_bytes - hw_ptr;
@@ -167,12 +160,10 @@ static void u_audio_iso_complete(struct usb_ep *ep, struct usb_request *req)
}
}
- spin_lock_irqsave(&prm->lock, flags);
/* update hw_ptr after data is copied to memory */
prm->hw_ptr = (hw_ptr + req->actual) % runtime->dma_bytes;
hw_ptr = prm->hw_ptr;
- spin_unlock_irqrestore(&prm->lock, flags);
- snd_pcm_stream_unlock_irqrestore(substream, flags2);
+ snd_pcm_stream_unlock(substream);
if ((hw_ptr % snd_pcm_lib_period_bytes(substream)) < req->actual)
snd_pcm_period_elapsed(substream);
@@ -188,7 +179,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
struct uac_rtd_params *prm;
struct g_audio *audio_dev;
struct uac_params *params;
- unsigned long flags;
int err = 0;
audio_dev = uac->audio_dev;
@@ -199,8 +189,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
else
prm = &uac->c_prm;
- spin_lock_irqsave(&prm->lock, flags);
-
/* Reset */
prm->hw_ptr = 0;
@@ -217,8 +205,6 @@ static int uac_pcm_trigger(struct snd_pcm_substream *substream, int cmd)
err = -EINVAL;
}
- spin_unlock_irqrestore(&prm->lock, flags);
-
/* Clear buffer after Play stops */
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK && !prm->ss)
memset(prm->rbuf, 0, prm->max_psize * params->req_number);
@@ -239,6 +225,25 @@ static snd_pcm_uframes_t uac_pcm_pointer(struct snd_pcm_substream *substream)
return bytes_to_frames(substream->runtime, prm->hw_ptr);
}
+static u64 uac_ssize_to_fmt(int ssize)
+{
+ u64 ret;
+
+ switch (ssize) {
+ case 3:
+ ret = SNDRV_PCM_FMTBIT_S24_3LE;
+ break;
+ case 4:
+ ret = SNDRV_PCM_FMTBIT_S32_LE;
+ break;
+ default:
+ ret = SNDRV_PCM_FMTBIT_S16_LE;
+ break;
+ }
+
+ return ret;
+}
+
static int uac_pcm_open(struct snd_pcm_substream *substream)
{
struct snd_uac_chip *uac = snd_pcm_substream_chip(substream);
@@ -262,36 +267,14 @@ static int uac_pcm_open(struct snd_pcm_substream *substream)
runtime->hw = uac_pcm_hardware;
if (substream->stream == SNDRV_PCM_STREAM_PLAYBACK) {
- spin_lock_init(&uac->p_prm.lock);
runtime->hw.rate_min = p_srate;
- switch (p_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
+ runtime->hw.formats = uac_ssize_to_fmt(p_ssize);
runtime->hw.channels_min = num_channels(p_chmask);
runtime->hw.period_bytes_min = 2 * uac->p_prm.max_psize
/ runtime->hw.periods_min;
} else {
- spin_lock_init(&uac->c_prm.lock);
runtime->hw.rate_min = c_srate;
- switch (c_ssize) {
- case 3:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S24_3LE;
- break;
- case 4:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S32_LE;
- break;
- default:
- runtime->hw.formats = SNDRV_PCM_FMTBIT_S16_LE;
- break;
- }
+ runtime->hw.formats = uac_ssize_to_fmt(c_ssize);
runtime->hw.channels_min = num_channels(c_chmask);
runtime->hw.period_bytes_min = 2 * uac->c_prm.max_psize
/ runtime->hw.periods_min;
@@ -335,10 +318,16 @@ static inline void free_ep(struct uac_rtd_params *prm, struct usb_ep *ep)
params = &audio_dev->params;
for (i = 0; i < params->req_number; i++) {
- if (prm->ureq[i].req) {
- usb_ep_dequeue(ep, prm->ureq[i].req);
- usb_ep_free_request(ep, prm->ureq[i].req);
- prm->ureq[i].req = NULL;
+ if (prm->reqs[i]) {
+ if (usb_ep_dequeue(ep, prm->reqs[i]))
+ usb_ep_free_request(ep, prm->reqs[i]);
+ /*
+ * If usb_ep_dequeue() cannot successfully dequeue the
+ * request, the request will be freed by the completion
+ * callback.
+ */
+
+ prm->reqs[i] = NULL;
}
}
@@ -367,22 +356,21 @@ int u_audio_start_capture(struct g_audio *audio_dev)
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
- if (!prm->ureq[i].req) {
+ if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
+ prm->reqs[i] = req;
req->zero = 0;
- req->context = &prm->ureq[i];
+ req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
@@ -445,22 +433,21 @@ int u_audio_start_playback(struct g_audio *audio_dev)
usb_ep_enable(ep);
for (i = 0; i < params->req_number; i++) {
- if (!prm->ureq[i].req) {
+ if (!prm->reqs[i]) {
req = usb_ep_alloc_request(ep, GFP_ATOMIC);
if (req == NULL)
return -ENOMEM;
- prm->ureq[i].req = req;
- prm->ureq[i].pp = prm;
+ prm->reqs[i] = req;
req->zero = 0;
- req->context = &prm->ureq[i];
+ req->context = prm;
req->length = req_len;
req->complete = u_audio_iso_complete;
req->buf = prm->rbuf + i * ep->maxpacket;
}
- if (usb_ep_queue(ep, prm->ureq[i].req, GFP_ATOMIC))
+ if (usb_ep_queue(ep, prm->reqs[i], GFP_ATOMIC))
dev_err(dev, "%s:%d Error!\n", __func__, __LINE__);
}
@@ -505,9 +492,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
uac->c_prm.uac = uac;
prm->max_psize = g_audio->out_ep_maxpsize;
- prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
- GFP_KERNEL);
- if (!prm->ureq) {
+ prm->reqs = kcalloc(params->req_number,
+ sizeof(struct usb_request *),
+ GFP_KERNEL);
+ if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
@@ -527,9 +515,10 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
uac->p_prm.uac = uac;
prm->max_psize = g_audio->in_ep_maxpsize;
- prm->ureq = kcalloc(params->req_number, sizeof(struct uac_req),
- GFP_KERNEL);
- if (!prm->ureq) {
+ prm->reqs = kcalloc(params->req_number,
+ sizeof(struct usb_request *),
+ GFP_KERNEL);
+ if (!prm->reqs) {
err = -ENOMEM;
goto fail;
}
@@ -582,8 +571,8 @@ int g_audio_setup(struct g_audio *g_audio, const char *pcm_name,
snd_fail:
snd_card_free(card);
fail:
- kfree(uac->p_prm.ureq);
- kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.reqs);
+ kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
@@ -605,8 +594,8 @@ void g_audio_cleanup(struct g_audio *g_audio)
if (card)
snd_card_free(card);
- kfree(uac->p_prm.ureq);
- kfree(uac->c_prm.ureq);
+ kfree(uac->p_prm.reqs);
+ kfree(uac->c_prm.reqs);
kfree(uac->p_prm.rbuf);
kfree(uac->c_prm.rbuf);
kfree(uac);
diff --git a/drivers/usb/gadget/function/u_ether.c b/drivers/usb/gadget/function/u_ether.c
index 31ea76adcc0d..d1d044d9f859 100644
--- a/drivers/usb/gadget/function/u_ether.c
+++ b/drivers/usb/gadget/function/u_ether.c
@@ -45,9 +45,10 @@
#define UETH__VERSION "29-May-2008"
/* Experiments show that both Linux and Windows hosts allow up to 16k
- * frame sizes. Set the max size to 15k+52 to prevent allocating 32k
+ * frame sizes. Set the max MTU size to 15k+52 to prevent allocating 32k
* blocks and still have efficient handling. */
-#define GETHER_MAX_ETH_FRAME_LEN 15412
+#define GETHER_MAX_MTU_SIZE 15412
+#define GETHER_MAX_ETH_FRAME_LEN (GETHER_MAX_MTU_SIZE + ETH_HLEN)
struct eth_dev {
/* lock is held while accessing port_usb
@@ -79,6 +80,7 @@ struct eth_dev {
bool zlp;
bool no_skb_reserve;
+ bool ifname_set;
u8 host_mac[ETH_ALEN];
u8 dev_mac[ETH_ALEN];
};
@@ -786,7 +788,7 @@ struct eth_dev *gether_setup_name(struct usb_gadget *g,
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
dev->gadget = g;
SET_NETDEV_DEV(net, &g->dev);
@@ -848,7 +850,7 @@ struct net_device *gether_setup_name_default(const char *netname)
/* MTU range: 14 - 15412 */
net->min_mtu = ETH_HLEN;
- net->max_mtu = GETHER_MAX_ETH_FRAME_LEN;
+ net->max_mtu = GETHER_MAX_MTU_SIZE;
return net;
}
@@ -1003,15 +1005,45 @@ EXPORT_SYMBOL_GPL(gether_get_qmult);
int gether_get_ifname(struct net_device *net, char *name, int len)
{
+ struct eth_dev *dev = netdev_priv(net);
int ret;
rtnl_lock();
- ret = scnprintf(name, len, "%s\n", netdev_name(net));
+ ret = scnprintf(name, len, "%s\n",
+ dev->ifname_set ? net->name : netdev_name(net));
rtnl_unlock();
return ret;
}
EXPORT_SYMBOL_GPL(gether_get_ifname);
+int gether_set_ifname(struct net_device *net, const char *name, int len)
+{
+ struct eth_dev *dev = netdev_priv(net);
+ char tmp[IFNAMSIZ];
+ const char *p;
+
+ if (name[len - 1] == '\n')
+ len--;
+
+ if (len >= sizeof(tmp))
+ return -E2BIG;
+
+ strscpy(tmp, name, len + 1);
+ if (!dev_valid_name(tmp))
+ return -EINVAL;
+
+ /* Require exactly one %d, so binding will not fail with EEXIST. */
+ p = strchr(name, '%');
+ if (!p || p[1] != 'd' || strchr(p + 2, '%'))
+ return -EINVAL;
+
+ strncpy(net->name, tmp, sizeof(net->name));
+ dev->ifname_set = true;
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(gether_set_ifname);
+
/*
* gether_cleanup - remove Ethernet-over-USB device
* Context: may sleep
diff --git a/drivers/usb/gadget/function/u_ether.h b/drivers/usb/gadget/function/u_ether.h
index 10dd640684e2..40144546d1b0 100644
--- a/drivers/usb/gadget/function/u_ether.h
+++ b/drivers/usb/gadget/function/u_ether.h
@@ -244,6 +244,18 @@ unsigned gether_get_qmult(struct net_device *net);
*/
int gether_get_ifname(struct net_device *net, char *name, int len);
+/**
+ * gether_set_ifname - set an ethernet-over-usb link interface name
+ * @net: device representing this link
+ * @name: new interface name
+ * @len: length of @name
+ *
+ * This sets the interface name of this ethernet-over-usb link.
+ * A single terminating newline, if any, is ignored.
+ * Returns zero on success, else negative errno.
+ */
+int gether_set_ifname(struct net_device *net, const char *name, int len);
+
void gether_cleanup(struct eth_dev *dev);
/* connect/disconnect is handled by individual functions */
diff --git a/drivers/usb/gadget/function/u_ether_configfs.h b/drivers/usb/gadget/function/u_ether_configfs.h
index bd92b5703013..3dfb460908fa 100644
--- a/drivers/usb/gadget/function/u_ether_configfs.h
+++ b/drivers/usb/gadget/function/u_ether_configfs.h
@@ -148,7 +148,20 @@ out: \
return ret; \
} \
\
- CONFIGFS_ATTR_RO(_f_##_opts_, ifname)
+ static ssize_t _f_##_opts_ifname_store(struct config_item *item, \
+ const char *page, size_t len)\
+ { \
+ struct f_##_f_##_opts *opts = to_f_##_f_##_opts(item); \
+ int ret = -EBUSY; \
+ \
+ mutex_lock(&opts->lock); \
+ if (!opts->refcnt) \
+ ret = gether_set_ifname(opts->net, page, len); \
+ mutex_unlock(&opts->lock); \
+ return ret ?: len; \
+ } \
+ \
+ CONFIGFS_ATTR(_f_##_opts_, ifname)
#define USB_ETHER_CONFIGFS_ITEM_ATTR_U8_RW(_f_, _n_) \
static ssize_t _f_##_opts_##_n_##_show(struct config_item *item,\
diff --git a/drivers/usb/gadget/function/u_serial.c b/drivers/usb/gadget/function/u_serial.c
index 2caccbb6e014..1e59204ec7aa 100644
--- a/drivers/usb/gadget/function/u_serial.c
+++ b/drivers/usb/gadget/function/u_serial.c
@@ -258,9 +258,7 @@ __acquires(&port->port_lock)
list_del(&req->list);
req->zero = kfifo_is_empty(&port->port_write_buf);
- pr_vdebug("ttyGS%d: tx len=%d, 0x%02x 0x%02x 0x%02x ...\n",
- port->port_num, len, *((u8 *)req->buf),
- *((u8 *)req->buf+1), *((u8 *)req->buf+2));
+ pr_vdebug("ttyGS%d: tx len=%d, %3ph ...\n", port->port_num, len, req->buf);
/* Drop lock while we call out of driver; completions
* could be issued while we do so. Disconnection may
@@ -346,7 +344,7 @@ __acquires(&port->port_lock)
}
/*
- * RX tasklet takes data out of the RX queue and hands it up to the TTY
+ * RX work takes data out of the RX queue and hands it up to the TTY
* layer until it refuses to take any more data (or is throttled back).
* Then it issues reads for any further data.
*
@@ -709,7 +707,7 @@ raced_with_open:
/* Iff we're disconnected, there can be no I/O in flight so it's
* ok to free the circular buffer; else just scrub it. And don't
- * let the push tasklet fire again until we're re-opened.
+ * let the push async work fire again until we're re-opened.
*/
if (gser == NULL)
kfifo_free(&port->port_write_buf);
diff --git a/drivers/usb/gadget/legacy/Kconfig b/drivers/usb/gadget/legacy/Kconfig
index f02c38b32a2b..11dd6e8adc8a 100644
--- a/drivers/usb/gadget/legacy/Kconfig
+++ b/drivers/usb/gadget/legacy/Kconfig
@@ -515,10 +515,15 @@ config USB_G_WEBCAM
config USB_RAW_GADGET
tristate "USB Raw Gadget"
help
- USB Raw Gadget is a kernel module that provides a userspace interface
- for the USB Gadget subsystem. Essentially it allows to emulate USB
- devices from userspace. See Documentation/usb/raw-gadget.rst for
- details.
+ USB Raw Gadget is a gadget driver that gives userspace low-level
+ control over the gadget's communication process.
+
+ Like any other gadget driver, Raw Gadget implements USB devices via
+ the USB gadget API. Unlike most gadget drivers, Raw Gadget does not
+ implement any concrete USB functions itself but requires userspace
+ to do that.
+
+ See Documentation/usb/raw-gadget.rst for details.
Say "y" to link the driver statically, or "m" to build a
dynamically linked module called "raw_gadget".
diff --git a/drivers/usb/gadget/legacy/acm_ms.c b/drivers/usb/gadget/legacy/acm_ms.c
index 59be2d8417c9..e8033e5f0c18 100644
--- a/drivers/usb/gadget/legacy/acm_ms.c
+++ b/drivers/usb/gadget/legacy/acm_ms.c
@@ -200,8 +200,10 @@ static int acm_ms_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail_string_ids;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/ether.c b/drivers/usb/gadget/legacy/ether.c
index 30313b233680..99c7fc0d1d59 100644
--- a/drivers/usb/gadget/legacy/ether.c
+++ b/drivers/usb/gadget/legacy/ether.c
@@ -403,8 +403,10 @@ static int eth_bind(struct usb_composite_dev *cdev)
struct usb_descriptor_header *usb_desc;
usb_desc = usb_otg_descriptor_alloc(gadget);
- if (!usb_desc)
+ if (!usb_desc) {
+ status = -ENOMEM;
goto fail1;
+ }
usb_otg_descriptor_init(gadget, usb_desc);
otg_desc[0] = usb_desc;
otg_desc[1] = NULL;
diff --git a/drivers/usb/gadget/legacy/raw_gadget.c b/drivers/usb/gadget/legacy/raw_gadget.c
index 062dfac30399..c5a2c734234a 100644
--- a/drivers/usb/gadget/legacy/raw_gadget.c
+++ b/drivers/usb/gadget/legacy/raw_gadget.c
@@ -3,7 +3,8 @@
* USB Raw Gadget driver.
* See Documentation/usb/raw-gadget.rst for more details.
*
- * Andrey Konovalov <andreyknvl@gmail.com>
+ * Copyright (c) 2020 Google, Inc.
+ * Author: Andrey Konovalov <andreyknvl@gmail.com>
*/
#include <linux/compiler.h>
diff --git a/drivers/usb/gadget/udc/Kconfig b/drivers/usb/gadget/udc/Kconfig
index 1a12aab208b4..8c614bb86c66 100644
--- a/drivers/usb/gadget/udc/Kconfig
+++ b/drivers/usb/gadget/udc/Kconfig
@@ -90,7 +90,7 @@ config USB_BCM63XX_UDC
config USB_FSL_USB2
tristate "Freescale Highspeed USB DR Peripheral Controller"
- depends on FSL_SOC || ARCH_MXC
+ depends on FSL_SOC
help
Some of Freescale PowerPC and i.MX processors have a High Speed
Dual-Role(DR) USB controller, which supports device mode.
diff --git a/drivers/usb/gadget/udc/Makefile b/drivers/usb/gadget/udc/Makefile
index f5a7ce28aecd..a21f2224e7eb 100644
--- a/drivers/usb/gadget/udc/Makefile
+++ b/drivers/usb/gadget/udc/Makefile
@@ -23,7 +23,6 @@ obj-$(CONFIG_USB_ATMEL_USBA) += atmel_usba_udc.o
obj-$(CONFIG_USB_BCM63XX_UDC) += bcm63xx_udc.o
obj-$(CONFIG_USB_FSL_USB2) += fsl_usb2_udc.o
fsl_usb2_udc-y := fsl_udc_core.o
-fsl_usb2_udc-$(CONFIG_ARCH_MXC) += fsl_mxc_udc.o
obj-$(CONFIG_USB_TEGRA_XUDC) += tegra-xudc.o
obj-$(CONFIG_USB_M66592) += m66592-udc.o
obj-$(CONFIG_USB_R8A66597) += r8a66597-udc.o
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/epn.c b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
index 0bd6b20435b8..02d8bfae58fb 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/epn.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/epn.c
@@ -420,7 +420,10 @@ static void ast_vhub_stop_active_req(struct ast_vhub_ep *ep,
u32 state, reg, loops;
/* Stop DMA activity */
- writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ if (ep->epn.desc_mode)
+ writel(VHUB_EP_DMA_CTRL_RESET, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
+ else
+ writel(0, ep->epn.regs + AST_VHUB_EP_DMA_CTLSTAT);
/* Wait for it to complete */
for (loops = 0; loops < 1000; loops++) {
diff --git a/drivers/usb/gadget/udc/aspeed-vhub/hub.c b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
index 6497185ec4e7..bfd8e77788e2 100644
--- a/drivers/usb/gadget/udc/aspeed-vhub/hub.c
+++ b/drivers/usb/gadget/udc/aspeed-vhub/hub.c
@@ -999,8 +999,10 @@ static int ast_vhub_of_parse_str_desc(struct ast_vhub *vhub,
str_array[offset].s = NULL;
ret = ast_vhub_str_alloc_add(vhub, &lang_str);
- if (ret)
+ if (ret) {
+ of_node_put(child);
break;
+ }
}
return ret;
diff --git a/drivers/usb/gadget/udc/bdc/Kconfig b/drivers/usb/gadget/udc/bdc/Kconfig
index 3e88c7670b2e..8bedb7f64eba 100644
--- a/drivers/usb/gadget/udc/bdc/Kconfig
+++ b/drivers/usb/gadget/udc/bdc/Kconfig
@@ -11,14 +11,3 @@ config USB_BDC_UDC
Say "y" here to link the driver statically, or "m" to build a dynamically
linked module called "bdc".
-
-if USB_BDC_UDC
-
-comment "Platform Support"
-config USB_BDC_PCI
- tristate "BDC support for PCIe based platforms"
- depends on USB_PCI
- default USB_BDC_UDC
- help
- Enable support for platforms which have BDC connected through PCIe, such as Lego3 FPGA platform.
-endif
diff --git a/drivers/usb/gadget/udc/bdc/Makefile b/drivers/usb/gadget/udc/bdc/Makefile
index 52cb5ea48bbe..1b21c9518efc 100644
--- a/drivers/usb/gadget/udc/bdc/Makefile
+++ b/drivers/usb/gadget/udc/bdc/Makefile
@@ -5,5 +5,3 @@ bdc-y := bdc_core.o bdc_cmd.o bdc_ep.o bdc_udc.o
ifneq ($(CONFIG_USB_GADGET_VERBOSE),)
bdc-y += bdc_dbg.o
endif
-
-obj-$(CONFIG_USB_BDC_PCI) += bdc_pci.o
diff --git a/drivers/usb/gadget/udc/bdc/bdc.h b/drivers/usb/gadget/udc/bdc/bdc.h
index ac75e25c3b6a..8d00b1239f21 100644
--- a/drivers/usb/gadget/udc/bdc/bdc.h
+++ b/drivers/usb/gadget/udc/bdc/bdc.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc.h - header for the BRCM BDC USB3.0 device controller
*
@@ -35,7 +35,7 @@
/*
* Maximum size of ep0 response buffer for ch9 requests,
* the set_sel request uses 6 so far, the max.
-*/
+ */
#define EP0_RESPONSE_BUFF 6
/* Start with SS as default */
#define EP0_MAX_PKT_SIZE 512
@@ -86,23 +86,23 @@
#define BDC_EPSTS5 0x74
#define BDC_EPSTS6 0x78
#define BDC_EPSTS7 0x7c
-#define BDC_SRRBAL(n) (0x200 + (n * 0x10))
-#define BDC_SRRBAH(n) (0x204 + (n * 0x10))
-#define BDC_SRRINT(n) (0x208 + (n * 0x10))
-#define BDC_INTCTLS(n) (0x20c + (n * 0x10))
+#define BDC_SRRBAL(n) (0x200 + ((n) * 0x10))
+#define BDC_SRRBAH(n) (0x204 + ((n) * 0x10))
+#define BDC_SRRINT(n) (0x208 + ((n) * 0x10))
+#define BDC_INTCTLS(n) (0x20c + ((n) * 0x10))
/* Extended capability regs */
#define BDC_FSCNOC 0xcd4
#define BDC_FSCNIC 0xce4
-#define NUM_NCS(p) (p >> 28)
+#define NUM_NCS(p) ((p) >> 28)
/* Register bit fields and Masks */
/* BDC Configuration 0 */
#define BDC_PGS(p) (((p) & (0x7 << 8)) >> 8)
-#define BDC_SPB(p) (p & 0x7)
+#define BDC_SPB(p) ((p) & 0x7)
/* BDC Capability1 */
-#define BDC_P64 (1 << 0)
+#define BDC_P64 BIT(0)
/* BDC Command register */
#define BDC_CMD_FH 0xe
@@ -111,9 +111,9 @@
#define BDC_CMD_BLA 0x3
#define BDC_CMD_EPC 0x2
#define BDC_CMD_DVC 0x1
-#define BDC_CMD_CWS (0x1 << 5)
+#define BDC_CMD_CWS BIT(5)
#define BDC_CMD_CST(p) (((p) & (0xf << 6))>>6)
-#define BDC_CMD_EPN(p) ((p & 0x1f) << 10)
+#define BDC_CMD_EPN(p) (((p) & 0x1f) << 10)
#define BDC_SUB_CMD_ADD (0x1 << 17)
#define BDC_SUB_CMD_FWK (0x4 << 17)
/* Reset sequence number */
@@ -124,7 +124,7 @@
#define BDC_SUB_CMD_EP_STP (0x2 << 17)
#define BDC_SUB_CMD_EP_STL (0x4 << 17)
#define BDC_SUB_CMD_EP_RST (0x1 << 17)
-#define BDC_CMD_SRD (1 << 27)
+#define BDC_CMD_SRD BIT(27)
/* CMD completion status */
#define BDC_CMDS_SUCC 0x1
@@ -141,19 +141,19 @@
#define EPM_SHIFT 4
/* BDC USPSC */
-#define BDC_VBC (1 << 31)
-#define BDC_PRC (1 << 30)
-#define BDC_PCE (1 << 29)
-#define BDC_CFC (1 << 28)
-#define BDC_PCC (1 << 27)
-#define BDC_PSC (1 << 26)
-#define BDC_VBS (1 << 25)
-#define BDC_PRS (1 << 24)
-#define BDC_PCS (1 << 23)
+#define BDC_VBC BIT(31)
+#define BDC_PRC BIT(30)
+#define BDC_PCE BIT(29)
+#define BDC_CFC BIT(28)
+#define BDC_PCC BIT(27)
+#define BDC_PSC BIT(26)
+#define BDC_VBS BIT(25)
+#define BDC_PRS BIT(24)
+#define BDC_PCS BIT(23)
#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
-#define BDC_SCN (1 << 8)
-#define BDC_SDC (1 << 7)
-#define BDC_SWS (1 << 4)
+#define BDC_SCN BIT(8)
+#define BDC_SDC BIT(7)
+#define BDC_SWS BIT(4)
#define BDC_USPSC_RW (BDC_SCN|BDC_SDC|BDC_SWS|0xf)
#define BDC_PSP(p) (((p) & (0x7 << 20))>>20)
@@ -163,21 +163,21 @@
#define BDC_SPEED_HS 0x3
#define BDC_SPEED_SS 0x4
-#define BDC_PST(p) (p & 0xf)
+#define BDC_PST(p) ((p) & 0xf)
#define BDC_PST_MASK 0xf
/* USPPMS */
-#define BDC_U2E (0x1 << 31)
-#define BDC_U1E (0x1 << 30)
-#define BDC_U2A (0x1 << 29)
-#define BDC_PORT_W1S (0x1 << 17)
+#define BDC_U2E BIT(31)
+#define BDC_U1E BIT(30)
+#define BDC_U2A BIT(29)
+#define BDC_PORT_W1S BIT(17)
#define BDC_U1T(p) ((p) & 0xff)
#define BDC_U2T(p) (((p) & 0xff) << 8)
#define BDC_U1T_MASK 0xff
/* USBPM2 */
/* Hardware LPM Enable */
-#define BDC_HLE (1 << 16)
+#define BDC_HLE BIT(16)
/* BDC Status and Control */
#define BDC_COP_RST (1 << 29)
@@ -186,11 +186,11 @@
#define BDC_COP_MASK (BDC_COP_RST|BDC_COP_RUN|BDC_COP_STP)
-#define BDC_COS (1 << 28)
+#define BDC_COS BIT(28)
#define BDC_CSTS(p) (((p) & (0x7 << 20)) >> 20)
-#define BDC_MASK_MCW (1 << 7)
-#define BDC_GIE (1 << 1)
-#define BDC_GIP (1 << 0)
+#define BDC_MASK_MCW BIT(7)
+#define BDC_GIE BIT(1)
+#define BDC_GIP BIT(0)
#define BDC_HLT 1
#define BDC_NOR 2
@@ -201,19 +201,19 @@
#define BD_CHAIN 0xf
#define BD_TFS_SHIFT 4
-#define BD_SOT (1 << 26)
-#define BD_EOT (1 << 27)
-#define BD_ISP (1 << 29)
-#define BD_IOC (1 << 30)
-#define BD_SBF (1 << 31)
+#define BD_SOT BIT(26)
+#define BD_EOT BIT(27)
+#define BD_ISP BIT(29)
+#define BD_IOC BIT(30)
+#define BD_SBF BIT(31)
#define BD_INTR_TARGET(p) (((p) & 0x1f) << 27)
-#define BDC_SRR_RWS (1 << 4)
-#define BDC_SRR_RST (1 << 3)
-#define BDC_SRR_ISR (1 << 2)
-#define BDC_SRR_IE (1 << 1)
-#define BDC_SRR_IP (1 << 0)
+#define BDC_SRR_RWS BIT(4)
+#define BDC_SRR_RST BIT(3)
+#define BDC_SRR_ISR BIT(2)
+#define BDC_SRR_IE BIT(1)
+#define BDC_SRR_IP BIT(0)
#define BDC_SRR_EPI(p) (((p) & (0xff << 24)) >> 24)
#define BDC_SRR_DPI(p) (((p) & (0xff << 16)) >> 16)
#define BDC_SRR_DPI_MASK 0x00ff0000
@@ -221,14 +221,14 @@
#define MARK_CHAIN_BD (BD_CHAIN|BD_EOT|BD_SOT)
/* Control transfer BD specific fields */
-#define BD_DIR_IN (1 << 25)
+#define BD_DIR_IN BIT(25)
#define BDC_PTC_MASK 0xf0000000
/* status report defines */
#define SR_XSF 0
#define SR_USPC 4
-#define SR_BD_LEN(p) (p & 0xffffff)
+#define SR_BD_LEN(p) ((p) & 0xffffff)
#define XSF_SUCC 0x1
#define XSF_SHORT 0x3
@@ -241,13 +241,13 @@
/* Transfer BD fields */
#define BD_LEN(p) ((p) & 0x1ffff)
-#define BD_LTF (1 << 25)
+#define BD_LTF BIT(25)
#define BD_TYPE_DS 0x1
#define BD_TYPE_SS 0x2
-#define BDC_EP_ENABLED (1 << 0)
-#define BDC_EP_STALL (1 << 1)
-#define BDC_EP_STOP (1 << 2)
+#define BDC_EP_ENABLED BIT(0)
+#define BDC_EP_STALL BIT(1)
+#define BDC_EP_STOP BIT(2)
/* One BD can transfer max 65536 bytes */
#define BD_MAX_BUFF_SIZE (1 << 16)
@@ -266,9 +266,9 @@
/* FUNCTION WAKE DEV NOTIFICATION interval, USB3 spec table 8.13 */
#define BDC_TNOTIFY 2500 /*in ms*/
/* Devstatus bitfields */
-#define REMOTE_WAKEUP_ISSUED (1 << 16)
-#define DEVICE_SUSPENDED (1 << 17)
-#define FUNC_WAKE_ISSUED (1 << 18)
+#define REMOTE_WAKEUP_ISSUED BIT(16)
+#define DEVICE_SUSPENDED BIT(17)
+#define FUNC_WAKE_ISSUED BIT(18)
#define REMOTE_WAKE_ENABLE (1 << USB_DEVICE_REMOTE_WAKEUP)
/* On disconnect, preserve these bits and clear rest */
@@ -466,24 +466,24 @@ static inline void bdc_writel(void __iomem *base, u32 offset, u32 value)
}
/* Buffer descriptor list operations */
-void bdc_notify_xfr(struct bdc *, u32);
-void bdc_softconn(struct bdc *);
-void bdc_softdisconn(struct bdc *);
-int bdc_run(struct bdc *);
-int bdc_stop(struct bdc *);
-int bdc_reset(struct bdc *);
-int bdc_udc_init(struct bdc *);
-void bdc_udc_exit(struct bdc *);
-int bdc_reinit(struct bdc *);
+void bdc_notify_xfr(struct bdc *bdc, u32 epnum);
+void bdc_softconn(struct bdc *bdc);
+void bdc_softdisconn(struct bdc *bdc);
+int bdc_run(struct bdc *bdc);
+int bdc_stop(struct bdc *bdc);
+int bdc_reset(struct bdc *bdc);
+int bdc_udc_init(struct bdc *bdc);
+void bdc_udc_exit(struct bdc *bdc);
+int bdc_reinit(struct bdc *bdc);
/* Status report handlers */
/* Upstream port status change sr */
-void bdc_sr_uspc(struct bdc *, struct bdc_sr *);
+void bdc_sr_uspc(struct bdc *bdc, struct bdc_sr *sreport);
/* transfer sr */
-void bdc_sr_xsf(struct bdc *, struct bdc_sr *);
+void bdc_sr_xsf(struct bdc *bdc, struct bdc_sr *sreport);
/* EP0 XSF handlers */
-void bdc_xsf_ep0_setup_recv(struct bdc *, struct bdc_sr *);
-void bdc_xsf_ep0_data_start(struct bdc *, struct bdc_sr *);
-void bdc_xsf_ep0_status_start(struct bdc *, struct bdc_sr *);
+void bdc_xsf_ep0_setup_recv(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_data_start(struct bdc *bdc, struct bdc_sr *sreport);
+void bdc_xsf_ep0_status_start(struct bdc *bdc, struct bdc_sr *sreport);
#endif /* __LINUX_BDC_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.c b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
index 44c2a5eef785..995f79c79f96 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.c
@@ -163,7 +163,7 @@ int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep)
usb_endpoint_xfer_isoc(desc)) {
param2 |= si;
if (usb_endpoint_xfer_isoc(desc) && comp_desc)
- mul = comp_desc->bmAttributes;
+ mul = comp_desc->bmAttributes;
}
param2 |= mul << EPM_SHIFT;
diff --git a/drivers/usb/gadget/udc/bdc/bdc_cmd.h b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
index 29cc988a671a..533ad52fa6d0 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_cmd.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_cmd.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_cmd.h - header for the BDC debug functions
*
@@ -10,15 +10,14 @@
#define __LINUX_BDC_CMD_H__
/* Command operations */
-int bdc_address_device(struct bdc *, u32);
-int bdc_config_ep(struct bdc *, struct bdc_ep *);
-int bdc_dconfig_ep(struct bdc *, struct bdc_ep *);
-int bdc_stop_ep(struct bdc *, int);
-int bdc_ep_set_stall(struct bdc *, int);
-int bdc_ep_clear_stall(struct bdc *, int);
-int bdc_ep_set_halt(struct bdc_ep *, u32 , int);
-int bdc_ep_bla(struct bdc *, struct bdc_ep *, dma_addr_t);
-int bdc_function_wake(struct bdc*, u8);
-int bdc_function_wake_fh(struct bdc*, u8);
+int bdc_address_device(struct bdc *bdc, u32 add);
+int bdc_config_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_dconfig_ep(struct bdc *bdc, struct bdc_ep *ep);
+int bdc_stop_ep(struct bdc *bdc, int epnum);
+int bdc_ep_set_stall(struct bdc *bdc, int epnum);
+int bdc_ep_clear_stall(struct bdc *bdc, int epnum);
+int bdc_ep_bla(struct bdc *bdc, struct bdc_ep *ep, dma_addr_t dma_addr);
+int bdc_function_wake(struct bdc *bdc, u8 intf);
+int bdc_function_wake_fh(struct bdc *bdc, u8 intf);
#endif /* __LINUX_BDC_CMD_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.c b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
index 7ba7448ad743..9c03e13308ca 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.c
@@ -68,7 +68,7 @@ void bdc_dbg_srr(struct bdc *bdc, u32 srr_num)
sr = bdc->srr.sr_bds;
addr = bdc->srr.dma_addr;
- dev_vdbg(bdc->dev, "bdc_dbg_srr sr:%p dqp_index:%d\n",
+ dev_vdbg(bdc->dev, "%s sr:%p dqp_index:%d\n", __func__,
sr, bdc->srr.dqp_index);
for (i = 0; i < NUM_SR_ENTRIES; i++) {
sr = &bdc->srr.sr_bds[i];
diff --git a/drivers/usb/gadget/udc/bdc/bdc_dbg.h b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
index 373d5abffbb8..acd8332f8584 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_dbg.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_dbg.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_dbg.h - header for the BDC debug functions
*
@@ -12,10 +12,10 @@
#include "bdc.h"
#ifdef CONFIG_USB_GADGET_VERBOSE
-void bdc_dbg_bd_list(struct bdc *, struct bdc_ep*);
-void bdc_dbg_srr(struct bdc *, u32);
-void bdc_dbg_regs(struct bdc *);
-void bdc_dump_epsts(struct bdc *);
+void bdc_dbg_bd_list(struct bdc *bdc, struct bdc_ep *ep);
+void bdc_dbg_srr(struct bdc *bdc, u32 srr_num);
+void bdc_dbg_regs(struct bdc *bdc);
+void bdc_dump_epsts(struct bdc *bdc);
#else
static inline void bdc_dbg_regs(struct bdc *bdc)
{ }
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.c b/drivers/usb/gadget/udc/bdc/bdc_ep.c
index fafdc9fdb4a5..8e2f20b12519 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.c
@@ -68,7 +68,7 @@ static void ep_bd_list_free(struct bdc_ep *ep, u32 num_tabs)
* check if the bd_table struct is allocated ?
* if yes, then check if bd memory has been allocated, then
* free the dma_pool and also the bd_table struct memory
- */
+ */
bd_table = bd_list->bd_table_array[index];
dev_dbg(bdc->dev, "bd_table:%p index:%d\n", bd_table, index);
if (!bd_table) {
@@ -147,7 +147,7 @@ static int ep_bd_list_alloc(struct bdc_ep *ep)
/* Allocate memory for each table */
for (index = 0; index < num_tabs; index++) {
/* Allocate memory for bd_table structure */
- bd_table = kzalloc(sizeof(struct bd_table), GFP_ATOMIC);
+ bd_table = kzalloc(sizeof(*bd_table), GFP_ATOMIC);
if (!bd_table)
goto fail;
@@ -275,7 +275,7 @@ static inline int find_end_bdi(struct bdc_ep *ep, int next_hwd_bdi)
end_bdi = next_hwd_bdi - 1;
if (end_bdi < 0)
end_bdi = ep->bd_list.max_bdi - 1;
- else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
+ else if ((end_bdi % (ep->bd_list.num_bds_table-1)) == 0)
end_bdi--;
return end_bdi;
@@ -756,7 +756,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
dev_dbg(bdc->dev, "%s ep:%s start:%d end:%d\n",
__func__, ep->name, start_bdi, end_bdi);
- dev_dbg(bdc->dev, "ep_dequeue ep=%p ep->desc=%p\n",
+ dev_dbg(bdc->dev, "%s ep=%p ep->desc=%p\n", __func__,
ep, (void *)ep->usb_ep.desc);
/* if still connected, stop the ep to see where the HW is ? */
if (!(bdc_readl(bdc->regs, BDC_USPC) & BDC_PST_MASK)) {
@@ -795,7 +795,7 @@ static int ep_dequeue(struct bdc_ep *ep, struct bdc_req *req)
start_pending = true;
end_pending = true;
} else if (end_bdi >= curr_hw_dqpi || end_bdi <= eqp_bdi) {
- end_pending = true;
+ end_pending = true;
}
} else {
if (start_bdi >= curr_hw_dqpi) {
@@ -1405,7 +1405,7 @@ static int ep0_set_sel(struct bdc *bdc,
}
/*
- * Queue a 0 byte bd only if wLength is more than the length and and length is
+ * Queue a 0 byte bd only if wLength is more than the length and length is
* a multiple of MaxPacket then queue 0 byte BD
*/
static int ep0_queue_zlp(struct bdc *bdc)
@@ -1858,12 +1858,12 @@ static int bdc_gadget_ep_enable(struct usb_ep *_ep,
int ret;
if (!_ep || !desc || desc->bDescriptorType != USB_DT_ENDPOINT) {
- pr_debug("bdc_gadget_ep_enable invalid parameters\n");
+ pr_debug("%s invalid parameters\n", __func__);
return -EINVAL;
}
if (!desc->wMaxPacketSize) {
- pr_debug("bdc_gadget_ep_enable missing wMaxPacketSize\n");
+ pr_debug("%s missing wMaxPacketSize\n", __func__);
return -EINVAL;
}
diff --git a/drivers/usb/gadget/udc/bdc/bdc_ep.h b/drivers/usb/gadget/udc/bdc/bdc_ep.h
index a37ff8033b4f..4d3affd07a65 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_ep.h
+++ b/drivers/usb/gadget/udc/bdc/bdc_ep.h
@@ -1,4 +1,4 @@
-// SPDX-License-Identifier: GPL-2.0+
+/* SPDX-License-Identifier: GPL-2.0+ */
/*
* bdc_ep.h - header for the BDC debug functions
*
@@ -9,9 +9,9 @@
#ifndef __LINUX_BDC_EP_H__
#define __LINUX_BDC_EP_H__
-int bdc_init_ep(struct bdc *);
-int bdc_ep_disable(struct bdc_ep *);
-int bdc_ep_enable(struct bdc_ep *);
-void bdc_free_ep(struct bdc *);
+int bdc_init_ep(struct bdc *bdc);
+int bdc_ep_disable(struct bdc_ep *ep);
+int bdc_ep_enable(struct bdc_ep *ep);
+void bdc_free_ep(struct bdc *bdc);
#endif /* __LINUX_BDC_EP_H__ */
diff --git a/drivers/usb/gadget/udc/bdc/bdc_pci.c b/drivers/usb/gadget/udc/bdc/bdc_pci.c
deleted file mode 100644
index 6dbc489513cd..000000000000
--- a/drivers/usb/gadget/udc/bdc/bdc_pci.c
+++ /dev/null
@@ -1,128 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * bdc_pci.c - BRCM BDC USB3.0 device controller PCI interface file.
- *
- * Copyright (C) 2014 Broadcom Corporation
- *
- * Author: Ashwini Pahuja
- *
- * Based on drivers under drivers/usb/
- */
-
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/slab.h>
-#include <linux/pci.h>
-#include <linux/pci_ids.h>
-#include <linux/platform_device.h>
-
-#include "bdc.h"
-
-#define BDC_PCI_PID 0x1570
-
-struct bdc_pci {
- struct device *dev;
- struct platform_device *bdc;
-};
-
-static int bdc_setup_msi(struct pci_dev *pci)
-{
- int ret;
-
- ret = pci_enable_msi(pci);
- if (ret) {
- pr_err("failed to allocate MSI entry\n");
- return ret;
- }
-
- return ret;
-}
-
-static int bdc_pci_probe(struct pci_dev *pci, const struct pci_device_id *id)
-{
- struct resource res[2];
- struct platform_device *bdc;
- struct bdc_pci *glue;
- int ret = -ENOMEM;
-
- glue = devm_kzalloc(&pci->dev, sizeof(*glue), GFP_KERNEL);
- if (!glue)
- return -ENOMEM;
-
- glue->dev = &pci->dev;
- ret = pci_enable_device(pci);
- if (ret) {
- dev_err(&pci->dev, "failed to enable pci device\n");
- return -ENODEV;
- }
- pci_set_master(pci);
-
- bdc = platform_device_alloc(BRCM_BDC_NAME, PLATFORM_DEVID_AUTO);
- if (!bdc)
- return -ENOMEM;
-
- memset(res, 0x00, sizeof(struct resource) * ARRAY_SIZE(res));
- bdc_setup_msi(pci);
-
- res[0].start = pci_resource_start(pci, 0);
- res[0].end = pci_resource_end(pci, 0);
- res[0].name = BRCM_BDC_NAME;
- res[0].flags = IORESOURCE_MEM;
-
- res[1].start = pci->irq;
- res[1].name = BRCM_BDC_NAME;
- res[1].flags = IORESOURCE_IRQ;
-
- ret = platform_device_add_resources(bdc, res, ARRAY_SIZE(res));
- if (ret) {
- dev_err(&pci->dev,
- "couldn't add resources to bdc device\n");
- platform_device_put(bdc);
- return ret;
- }
-
- pci_set_drvdata(pci, glue);
-
- dma_set_coherent_mask(&bdc->dev, pci->dev.coherent_dma_mask);
-
- bdc->dev.dma_mask = pci->dev.dma_mask;
- bdc->dev.dma_parms = pci->dev.dma_parms;
- bdc->dev.parent = &pci->dev;
- glue->bdc = bdc;
-
- ret = platform_device_add(bdc);
- if (ret) {
- dev_err(&pci->dev, "failed to register bdc device\n");
- platform_device_put(bdc);
- return ret;
- }
-
- return 0;
-}
-
-static void bdc_pci_remove(struct pci_dev *pci)
-{
- struct bdc_pci *glue = pci_get_drvdata(pci);
-
- platform_device_unregister(glue->bdc);
- pci_disable_msi(pci);
-}
-
-static struct pci_device_id bdc_pci_id_table[] = {
- { PCI_DEVICE(PCI_VENDOR_ID_BROADCOM, BDC_PCI_PID), },
- {} /* Terminating Entry */
-};
-
-MODULE_DEVICE_TABLE(pci, bdc_pci_id_table);
-
-static struct pci_driver bdc_pci_driver = {
- .name = "bdc-pci",
- .id_table = bdc_pci_id_table,
- .probe = bdc_pci_probe,
- .remove = bdc_pci_remove,
-};
-
-MODULE_AUTHOR("Ashwini Pahuja <ashwini.linux@gmail.com>");
-MODULE_LICENSE("GPL");
-MODULE_DESCRIPTION("BRCM BDC USB3 PCI Glue layer");
-module_pci_driver(bdc_pci_driver);
diff --git a/drivers/usb/gadget/udc/bdc/bdc_udc.c b/drivers/usb/gadget/udc/bdc/bdc_udc.c
index 248426a3e88a..5ac0ef88334e 100644
--- a/drivers/usb/gadget/udc/bdc/bdc_udc.c
+++ b/drivers/usb/gadget/udc/bdc/bdc_udc.c
@@ -164,7 +164,7 @@ static void bdc_func_wake_timer(struct work_struct *work)
/*
* Check if host has started transferring on endpoints
* FUNC_WAKE_ISSUED is cleared when transfer has started after resume
- */
+ */
if (bdc->devstatus & FUNC_WAKE_ISSUED) {
dev_dbg(bdc->dev, "FUNC_WAKE_ISSUED FLAG IS STILL SET\n");
/* flag is still set, so again send func wake */
@@ -205,7 +205,7 @@ static void handle_link_state_change(struct bdc *bdc, u32 uspc)
* if not then send function wake again every
* TNotification secs until host initiates
* transfer to BDC, USB3 spec Table 8.13
- */
+ */
schedule_delayed_work(
&bdc->func_wake_notify,
msecs_to_jiffies(BDC_TNOTIFY));
@@ -379,7 +379,7 @@ static int bdc_udc_start(struct usb_gadget *gadget,
* Run the controller from here and when BDC is connected to
* Host then driver will receive a USPC SR with VBUS present
* and then driver will do a softconnect.
- */
+ */
ret = bdc_run(bdc);
if (ret) {
dev_err(bdc->dev, "%s bdc run fail\n", __func__);
@@ -530,7 +530,7 @@ int bdc_udc_init(struct bdc *bdc)
bdc->gadget.name = BRCM_BDC_NAME;
ret = devm_request_irq(bdc->dev, bdc->irq, bdc_udc_interrupt,
- IRQF_SHARED , BRCM_BDC_NAME, bdc);
+ IRQF_SHARED, BRCM_BDC_NAME, bdc);
if (ret) {
dev_err(bdc->dev,
"failed to request irq #%d %d\n",
diff --git a/drivers/usb/gadget/udc/core.c b/drivers/usb/gadget/udc/core.c
index 5b5cfeb6c14a..493ff93f7dda 100644
--- a/drivers/usb/gadget/udc/core.c
+++ b/drivers/usb/gadget/udc/core.c
@@ -29,6 +29,7 @@
* @list: for use by the udc class driver
* @vbus: for udcs who care about vbus status, this value is real vbus status;
* for udcs who do not care about vbus status, this value is always true
+ * @started: the UDC's started state. True if the UDC had started.
*
* This represents the internal data structure which is used by the UDC-class
* to hold information about udc driver and gadget together.
@@ -39,6 +40,7 @@ struct usb_udc {
struct device dev;
struct list_head list;
bool vbus;
+ bool started;
};
static struct class *udc_class;
@@ -659,8 +661,7 @@ EXPORT_SYMBOL_GPL(usb_gadget_vbus_disconnect);
*
* Enables the D+ (or potentially D-) pullup. The host will start
* enumerating this gadget when the pullup is active and a VBUS session
- * is active (the link is powered). This pullup is always enabled unless
- * usb_gadget_disconnect() has been used to disable it.
+ * is active (the link is powered).
*
* Returns zero on success, else negative errno.
*/
@@ -1083,7 +1084,18 @@ EXPORT_SYMBOL_GPL(usb_gadget_udc_reset);
*/
static inline int usb_gadget_udc_start(struct usb_udc *udc)
{
- return udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+ int ret;
+
+ if (udc->started) {
+ dev_err(&udc->dev, "UDC had already started\n");
+ return -EBUSY;
+ }
+
+ ret = udc->gadget->ops->udc_start(udc->gadget, udc->driver);
+ if (!ret)
+ udc->started = true;
+
+ return ret;
}
/**
@@ -1099,7 +1111,13 @@ static inline int usb_gadget_udc_start(struct usb_udc *udc)
*/
static inline void usb_gadget_udc_stop(struct usb_udc *udc)
{
+ if (!udc->started) {
+ dev_err(&udc->dev, "UDC had already stopped\n");
+ return;
+ }
+
udc->gadget->ops->udc_stop(udc->gadget);
+ udc->started = false;
}
/**
@@ -1115,12 +1133,18 @@ static inline void usb_gadget_udc_stop(struct usb_udc *udc)
static inline void usb_gadget_udc_set_speed(struct usb_udc *udc,
enum usb_device_speed speed)
{
- if (udc->gadget->ops->udc_set_speed) {
- enum usb_device_speed s;
+ struct usb_gadget *gadget = udc->gadget;
+ enum usb_device_speed s;
- s = min(speed, udc->gadget->max_speed);
- udc->gadget->ops->udc_set_speed(udc->gadget, s);
- }
+ if (speed == USB_SPEED_UNKNOWN)
+ s = gadget->max_speed;
+ else
+ s = min(speed, gadget->max_speed);
+
+ if (s == USB_SPEED_SUPER_PLUS && gadget->ops->udc_set_ssp_rate)
+ gadget->ops->udc_set_ssp_rate(gadget, gadget->max_ssp_rate);
+ else if (gadget->ops->udc_set_speed)
+ gadget->ops->udc_set_speed(gadget, s);
}
/**
@@ -1223,6 +1247,8 @@ int usb_add_gadget(struct usb_gadget *gadget)
udc->gadget = gadget;
gadget->udc = udc;
+ udc->started = false;
+
mutex_lock(&udc_lock);
list_add_tail(&udc->list, &udc_list);
@@ -1530,10 +1556,13 @@ static ssize_t soft_connect_store(struct device *dev,
struct device_attribute *attr, const char *buf, size_t n)
{
struct usb_udc *udc = container_of(dev, struct usb_udc, dev);
+ ssize_t ret;
+ mutex_lock(&udc_lock);
if (!udc->driver) {
dev_err(dev, "soft-connect without a gadget driver\n");
- return -EOPNOTSUPP;
+ ret = -EOPNOTSUPP;
+ goto out;
}
if (sysfs_streq(buf, "connect")) {
@@ -1544,10 +1573,14 @@ static ssize_t soft_connect_store(struct device *dev,
usb_gadget_udc_stop(udc);
} else {
dev_err(dev, "unsupported command '%s'\n", buf);
- return -EINVAL;
+ ret = -EINVAL;
+ goto out;
}
- return n;
+ ret = n;
+out:
+ mutex_unlock(&udc_lock);
+ return ret;
}
static DEVICE_ATTR_WO(soft_connect);
diff --git a/drivers/usb/gadget/udc/dummy_hcd.c b/drivers/usb/gadget/udc/dummy_hcd.c
index ab5e978b5052..57067763b100 100644
--- a/drivers/usb/gadget/udc/dummy_hcd.c
+++ b/drivers/usb/gadget/udc/dummy_hcd.c
@@ -2118,9 +2118,21 @@ static int dummy_hub_control(
dum_hcd->port_status &= ~USB_PORT_STAT_POWER;
set_link_state(dum_hcd);
break;
- default:
+ case USB_PORT_FEAT_ENABLE:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ fallthrough;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
dum_hcd->port_status &= ~(1 << wValue);
set_link_state(dum_hcd);
+ break;
+ default:
+ /* Disallow INDICATOR and C_OVER_CURRENT */
+ goto error;
}
break;
case GetHubDescriptor:
@@ -2258,17 +2270,20 @@ static int dummy_hub_control(
}
fallthrough;
case USB_PORT_FEAT_RESET:
+ if (!(dum_hcd->port_status & USB_PORT_STAT_CONNECTION))
+ break;
/* if it's already enabled, disable */
if (hcd->speed == HCD_USB3) {
- dum_hcd->port_status = 0;
dum_hcd->port_status =
(USB_SS_PORT_STAT_POWER |
USB_PORT_STAT_CONNECTION |
USB_PORT_STAT_RESET);
- } else
+ } else {
dum_hcd->port_status &= ~(USB_PORT_STAT_ENABLE
| USB_PORT_STAT_LOW_SPEED
| USB_PORT_STAT_HIGH_SPEED);
+ dum_hcd->port_status |= USB_PORT_STAT_RESET;
+ }
/*
* We want to reset device status. All but the
* Self powered feature
@@ -2280,19 +2295,19 @@ static int dummy_hub_control(
* interval? Is it still 50msec as for HS?
*/
dum_hcd->re_timeout = jiffies + msecs_to_jiffies(50);
- fallthrough;
- default:
- if (hcd->speed == HCD_USB3) {
- if ((dum_hcd->port_status &
- USB_SS_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
- } else
- if ((dum_hcd->port_status &
- USB_PORT_STAT_POWER) != 0) {
- dum_hcd->port_status |= (1 << wValue);
- }
set_link_state(dum_hcd);
+ break;
+ case USB_PORT_FEAT_C_CONNECTION:
+ case USB_PORT_FEAT_C_RESET:
+ case USB_PORT_FEAT_C_ENABLE:
+ case USB_PORT_FEAT_C_SUSPEND:
+ /* Not allowed for USB-3, and ignored for USB-2 */
+ if (hcd->speed == HCD_USB3)
+ goto error;
+ break;
+ default:
+ /* Disallow TEST, INDICATOR, and C_OVER_CURRENT */
+ goto error;
}
break;
case GetPortErrorCount:
diff --git a/drivers/usb/gadget/udc/fsl_mxc_udc.c b/drivers/usb/gadget/udc/fsl_mxc_udc.c
deleted file mode 100644
index 5a321992decc..000000000000
--- a/drivers/usb/gadget/udc/fsl_mxc_udc.c
+++ /dev/null
@@ -1,122 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * Copyright (C) 2009
- * Guennadi Liakhovetski, DENX Software Engineering, <lg@denx.de>
- *
- * Description:
- * Helper routines for i.MX3x SoCs from Freescale, needed by the fsl_usb2_udc.c
- * driver to function correctly on these systems.
- */
-#include <linux/clk.h>
-#include <linux/delay.h>
-#include <linux/err.h>
-#include <linux/fsl_devices.h>
-#include <linux/mod_devicetable.h>
-#include <linux/platform_device.h>
-#include <linux/io.h>
-
-#include "fsl_usb2_udc.h"
-
-static struct clk *mxc_ahb_clk;
-static struct clk *mxc_per_clk;
-static struct clk *mxc_ipg_clk;
-
-/* workaround ENGcm09152 for i.MX35 */
-#define MX35_USBPHYCTRL_OFFSET 0x600
-#define USBPHYCTRL_OTGBASE_OFFSET 0x8
-#define USBPHYCTRL_EVDO (1 << 23)
-
-int fsl_udc_clk_init(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata;
- unsigned long freq;
- int ret;
-
- pdata = dev_get_platdata(&pdev->dev);
-
- mxc_ipg_clk = devm_clk_get(&pdev->dev, "ipg");
- if (IS_ERR(mxc_ipg_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ipg\") failed\n");
- return PTR_ERR(mxc_ipg_clk);
- }
-
- mxc_ahb_clk = devm_clk_get(&pdev->dev, "ahb");
- if (IS_ERR(mxc_ahb_clk)) {
- dev_err(&pdev->dev, "clk_get(\"ahb\") failed\n");
- return PTR_ERR(mxc_ahb_clk);
- }
-
- mxc_per_clk = devm_clk_get(&pdev->dev, "per");
- if (IS_ERR(mxc_per_clk)) {
- dev_err(&pdev->dev, "clk_get(\"per\") failed\n");
- return PTR_ERR(mxc_per_clk);
- }
-
- clk_prepare_enable(mxc_ipg_clk);
- clk_prepare_enable(mxc_ahb_clk);
- clk_prepare_enable(mxc_per_clk);
-
- /* make sure USB_CLK is running at 60 MHz +/- 1000 Hz */
- if (!strcmp(pdev->id_entry->name, "imx-udc-mx27")) {
- freq = clk_get_rate(mxc_per_clk);
- if (pdata->phy_mode != FSL_USB2_PHY_ULPI &&
- (freq < 59999000 || freq > 60001000)) {
- dev_err(&pdev->dev, "USB_CLK=%lu, should be 60MHz\n", freq);
- ret = -EINVAL;
- goto eclkrate;
- }
- }
-
- return 0;
-
-eclkrate:
- clk_disable_unprepare(mxc_ipg_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- return ret;
-}
-
-int fsl_udc_clk_finalize(struct platform_device *pdev)
-{
- struct fsl_usb2_platform_data *pdata = dev_get_platdata(&pdev->dev);
- int ret = 0;
-
- /* workaround ENGcm09152 for i.MX35 */
- if (pdata->workaround & FLS_USB2_WORKAROUND_ENGCM09152) {
- unsigned int v;
- struct resource *res = platform_get_resource
- (pdev, IORESOURCE_MEM, 0);
- void __iomem *phy_regs = ioremap(res->start +
- MX35_USBPHYCTRL_OFFSET, 512);
- if (!phy_regs) {
- dev_err(&pdev->dev, "ioremap for phy address fails\n");
- ret = -EINVAL;
- goto ioremap_err;
- }
-
- v = readl(phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
- writel(v | USBPHYCTRL_EVDO,
- phy_regs + USBPHYCTRL_OTGBASE_OFFSET);
-
- iounmap(phy_regs);
- }
-
-
-ioremap_err:
- /* ULPI transceivers don't need usbpll */
- if (pdata->phy_mode == FSL_USB2_PHY_ULPI) {
- clk_disable_unprepare(mxc_per_clk);
- mxc_per_clk = NULL;
- }
-
- return ret;
-}
-
-void fsl_udc_clk_release(void)
-{
- if (mxc_per_clk)
- clk_disable_unprepare(mxc_per_clk);
- clk_disable_unprepare(mxc_ahb_clk);
- clk_disable_unprepare(mxc_ipg_clk);
-}
diff --git a/drivers/usb/gadget/udc/snps_udc_core.c b/drivers/usb/gadget/udc/snps_udc_core.c
index 6c726d2e1788..d046c09fa566 100644
--- a/drivers/usb/gadget/udc/snps_udc_core.c
+++ b/drivers/usb/gadget/udc/snps_udc_core.c
@@ -36,7 +36,6 @@
#include <asm/unaligned.h>
#include "amd5536udc.h"
-static void udc_tasklet_disconnect(unsigned long);
static void udc_setup_endpoints(struct udc *dev);
static void udc_soft_reset(struct udc *dev);
static struct udc_request *udc_alloc_bna_dummy(struct udc_ep *ep);
@@ -95,9 +94,6 @@ static struct timer_list udc_pollstall_timer;
static int stop_pollstall_timer;
static DECLARE_COMPLETION(on_pollstall_exit);
-/* tasklet for usb disconnect */
-static DECLARE_TASKLET_OLD(disconnect_tasklet, udc_tasklet_disconnect);
-
/* endpoint names used for print */
static const char ep0_string[] = "ep0in";
static const struct {
@@ -1637,6 +1633,8 @@ static void usb_connect(struct udc *dev)
*/
static void usb_disconnect(struct udc *dev)
{
+ u32 tmp;
+
/* Return if already disconnected */
if (!dev->connected)
return;
@@ -1648,23 +1646,6 @@ static void usb_disconnect(struct udc *dev)
/* mask interrupts */
udc_mask_unused_interrupts(dev);
- /* REVISIT there doesn't seem to be a point to having this
- * talk to a tasklet ... do it directly, we already hold
- * the spinlock needed to process the disconnect.
- */
-
- tasklet_schedule(&disconnect_tasklet);
-}
-
-/* Tasklet for disconnect to be outside of interrupt context */
-static void udc_tasklet_disconnect(unsigned long par)
-{
- struct udc *dev = udc;
- u32 tmp;
-
- DBG(dev, "Tasklet disconnect\n");
- spin_lock_irq(&dev->lock);
-
if (dev->driver) {
spin_unlock(&dev->lock);
dev->driver->disconnect(&dev->gadget);
@@ -1673,13 +1654,10 @@ static void udc_tasklet_disconnect(unsigned long par)
/* empty queues */
for (tmp = 0; tmp < UDC_EP_NUM; tmp++)
empty_req_queue(&dev->ep[tmp]);
-
}
/* disable ep0 */
- ep_init(dev->regs,
- &dev->ep[UDC_EP0IN_IX]);
-
+ ep_init(dev->regs, &dev->ep[UDC_EP0IN_IX]);
if (!soft_reset_occured) {
/* init controller by soft reset */
@@ -1695,8 +1673,6 @@ static void udc_tasklet_disconnect(unsigned long par)
tmp = AMD_ADDBITS(tmp, UDC_DEVCFG_SPD_FS, UDC_DEVCFG_SPD);
writel(tmp, &dev->regs->cfg);
}
-
- spin_unlock_irq(&dev->lock);
}
/* Reset the UDC core */
diff --git a/drivers/usb/gadget/udc/udc-xilinx.c b/drivers/usb/gadget/udc/udc-xilinx.c
index d5e9d20c097d..77610b5f7db5 100644
--- a/drivers/usb/gadget/udc/udc-xilinx.c
+++ b/drivers/usb/gadget/udc/udc-xilinx.c
@@ -1954,7 +1954,7 @@ static void xudc_nonctrl_ep_handler(struct xusb_udc *udc, u8 epnum,
if (intrstatus & (XUSB_STATUS_EP0_BUFF1_COMP_MASK << epnum))
ep->buffer0ready = 0;
if (intrstatus & (XUSB_STATUS_EP0_BUFF2_COMP_MASK << epnum))
- ep->buffer1ready = 0;
+ ep->buffer1ready = false;
if (list_empty(&ep->queue))
return;
diff --git a/drivers/usb/host/Kconfig b/drivers/usb/host/Kconfig
index 31e59309da1f..b94f2a070c05 100644
--- a/drivers/usb/host/Kconfig
+++ b/drivers/usb/host/Kconfig
@@ -269,10 +269,14 @@ config USB_EHCI_HCD_AT91
config USB_EHCI_TEGRA
tristate "NVIDIA Tegra HCD support"
depends on ARCH_TEGRA
- select USB_EHCI_ROOT_HUB_TT
- select USB_TEGRA_PHY
+ select USB_CHIPIDEA
+ select USB_CHIPIDEA_HOST
+ select USB_CHIPIDEA_TEGRA
help
- This driver enables support for the internal USB Host Controllers
+ This option is deprecated now and the driver was removed, use
+ USB_CHIPIDEA_TEGRA instead.
+
+ Enable support for the internal USB Host Controllers
found in NVIDIA Tegra SoCs. The controllers are EHCI compliant.
config USB_EHCI_HCD_PPC_OF
diff --git a/drivers/usb/host/Makefile b/drivers/usb/host/Makefile
index c1b08703af10..3e4d298d851f 100644
--- a/drivers/usb/host/Makefile
+++ b/drivers/usb/host/Makefile
@@ -47,7 +47,6 @@ obj-$(CONFIG_USB_EHCI_HCD_SPEAR) += ehci-spear.o
obj-$(CONFIG_USB_EHCI_HCD_STI) += ehci-st.o
obj-$(CONFIG_USB_EHCI_EXYNOS) += ehci-exynos.o
obj-$(CONFIG_USB_EHCI_HCD_AT91) += ehci-atmel.o
-obj-$(CONFIG_USB_EHCI_TEGRA) += ehci-tegra.o
obj-$(CONFIG_USB_EHCI_BRCMSTB) += ehci-brcm.o
obj-$(CONFIG_USB_OXU210HP_HCD) += oxu210hp-hcd.o
diff --git a/drivers/usb/host/ehci-hcd.c b/drivers/usb/host/ehci-hcd.c
index e358ae17d51e..1926b328b6aa 100644
--- a/drivers/usb/host/ehci-hcd.c
+++ b/drivers/usb/host/ehci-hcd.c
@@ -574,6 +574,7 @@ static int ehci_run (struct usb_hcd *hcd)
struct ehci_hcd *ehci = hcd_to_ehci (hcd);
u32 temp;
u32 hcc_params;
+ int rc;
hcd->uses_new_polling = 1;
@@ -629,9 +630,20 @@ static int ehci_run (struct usb_hcd *hcd)
down_write(&ehci_cf_port_reset_rwsem);
ehci->rh_state = EHCI_RH_RUNNING;
ehci_writel(ehci, FLAG_CF, &ehci->regs->configured_flag);
+
+ /* Wait until HC become operational */
ehci_readl(ehci, &ehci->regs->command); /* unblock posted writes */
msleep(5);
+ rc = ehci_handshake(ehci, &ehci->regs->status, STS_HALT, 0, 100 * 1000);
+
up_write(&ehci_cf_port_reset_rwsem);
+
+ if (rc) {
+ ehci_err(ehci, "USB %x.%x, controller refused to start: %d\n",
+ ((ehci->sbrn & 0xf0)>>4), (ehci->sbrn & 0x0f), rc);
+ return rc;
+ }
+
ehci->last_periodic_enable = ktime_get_real();
temp = HC_VERSION(ehci, ehci_readl(ehci, &ehci->caps->hc_capbase));
diff --git a/drivers/usb/host/ehci-hub.c b/drivers/usb/host/ehci-hub.c
index 087402aec5cb..9f9ab5ccea88 100644
--- a/drivers/usb/host/ehci-hub.c
+++ b/drivers/usb/host/ehci-hub.c
@@ -345,6 +345,9 @@ static int ehci_bus_suspend (struct usb_hcd *hcd)
unlink_empty_async_suspended(ehci);
+ /* Some Synopsys controllers mistakenly leave IAA turned on */
+ ehci_writel(ehci, STS_IAA, &ehci->regs->status);
+
/* Any IAA cycle that started before the suspend is now invalid */
end_iaa_cycle(ehci);
ehci_handle_start_intr_unlinks(ehci);
diff --git a/drivers/usb/host/ehci-tegra.c b/drivers/usb/host/ehci-tegra.c
deleted file mode 100644
index 869d9c4de5fc..000000000000
--- a/drivers/usb/host/ehci-tegra.c
+++ /dev/null
@@ -1,604 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0+
-/*
- * EHCI-compliant USB host controller driver for NVIDIA Tegra SoCs
- *
- * Copyright (C) 2010 Google, Inc.
- * Copyright (C) 2009 - 2013 NVIDIA Corporation
- */
-
-#include <linux/clk.h>
-#include <linux/dma-mapping.h>
-#include <linux/err.h>
-#include <linux/gpio.h>
-#include <linux/io.h>
-#include <linux/irq.h>
-#include <linux/module.h>
-#include <linux/of.h>
-#include <linux/of_device.h>
-#include <linux/of_gpio.h>
-#include <linux/platform_device.h>
-#include <linux/pm_runtime.h>
-#include <linux/reset.h>
-#include <linux/slab.h>
-#include <linux/usb/ehci_def.h>
-#include <linux/usb/tegra_usb_phy.h>
-#include <linux/usb.h>
-#include <linux/usb/hcd.h>
-#include <linux/usb/otg.h>
-
-#include "ehci.h"
-
-#define PORT_WAKE_BITS (PORT_WKOC_E|PORT_WKDISC_E|PORT_WKCONN_E)
-
-#define TEGRA_USB_DMA_ALIGN 32
-
-#define DRIVER_DESC "Tegra EHCI driver"
-#define DRV_NAME "tegra-ehci"
-
-static struct hc_driver __read_mostly tegra_ehci_hc_driver;
-
-struct tegra_ehci_soc_config {
- bool has_hostpc;
-};
-
-struct tegra_ehci_hcd {
- struct clk *clk;
- struct reset_control *rst;
- int port_resuming;
- bool needs_double_reset;
-};
-
-static int tegra_reset_usb_controller(struct platform_device *pdev)
-{
- struct device_node *phy_np;
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tegra_ehci_hcd *tegra =
- (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
- struct reset_control *rst;
- int err;
-
- phy_np = of_parse_phandle(pdev->dev.of_node, "nvidia,phy", 0);
- if (!phy_np)
- return -ENOENT;
-
- /*
- * The 1st USB controller contains some UTMI pad registers that are
- * global for all the controllers on the chip. Those registers are
- * also cleared when reset is asserted to the 1st controller.
- */
- rst = of_reset_control_get_shared(phy_np, "utmi-pads");
- if (IS_ERR(rst)) {
- dev_warn(&pdev->dev,
- "can't get utmi-pads reset from the PHY\n");
- dev_warn(&pdev->dev,
- "continuing, but please update your DT\n");
- } else {
- /*
- * PHY driver performs UTMI-pads reset in a case of
- * non-legacy DT.
- */
- reset_control_put(rst);
- }
-
- of_node_put(phy_np);
-
- /* reset control is shared, hence initialize it first */
- err = reset_control_deassert(tegra->rst);
- if (err)
- return err;
-
- err = reset_control_assert(tegra->rst);
- if (err)
- return err;
-
- udelay(1);
-
- err = reset_control_deassert(tegra->rst);
- if (err)
- return err;
-
- return 0;
-}
-
-static int tegra_ehci_internal_port_reset(
- struct ehci_hcd *ehci,
- u32 __iomem *portsc_reg
-)
-{
- u32 temp;
- unsigned long flags;
- int retval = 0;
- int i, tries;
- u32 saved_usbintr;
-
- spin_lock_irqsave(&ehci->lock, flags);
- saved_usbintr = ehci_readl(ehci, &ehci->regs->intr_enable);
- /* disable USB interrupt */
- ehci_writel(ehci, 0, &ehci->regs->intr_enable);
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- /*
- * Here we have to do Port Reset at most twice for
- * Port Enable bit to be set.
- */
- for (i = 0; i < 2; i++) {
- temp = ehci_readl(ehci, portsc_reg);
- temp |= PORT_RESET;
- ehci_writel(ehci, temp, portsc_reg);
- mdelay(10);
- temp &= ~PORT_RESET;
- ehci_writel(ehci, temp, portsc_reg);
- mdelay(1);
- tries = 100;
- do {
- mdelay(1);
- /*
- * Up to this point, Port Enable bit is
- * expected to be set after 2 ms waiting.
- * USB1 usually takes extra 45 ms, for safety,
- * we take 100 ms as timeout.
- */
- temp = ehci_readl(ehci, portsc_reg);
- } while (!(temp & PORT_PE) && tries--);
- if (temp & PORT_PE)
- break;
- }
- if (i == 2)
- retval = -ETIMEDOUT;
-
- /*
- * Clear Connect Status Change bit if it's set.
- * We can't clear PORT_PEC. It will also cause PORT_PE to be cleared.
- */
- if (temp & PORT_CSC)
- ehci_writel(ehci, PORT_CSC, portsc_reg);
-
- /*
- * Write to clear any interrupt status bits that might be set
- * during port reset.
- */
- temp = ehci_readl(ehci, &ehci->regs->status);
- ehci_writel(ehci, temp, &ehci->regs->status);
-
- /* restore original interrupt enable bits */
- ehci_writel(ehci, saved_usbintr, &ehci->regs->intr_enable);
- return retval;
-}
-
-static int tegra_ehci_hub_control(
- struct usb_hcd *hcd,
- u16 typeReq,
- u16 wValue,
- u16 wIndex,
- char *buf,
- u16 wLength
-)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- struct tegra_ehci_hcd *tegra = (struct tegra_ehci_hcd *)ehci->priv;
- u32 __iomem *status_reg;
- u32 temp;
- unsigned long flags;
- int retval = 0;
-
- status_reg = &ehci->regs->port_status[(wIndex & 0xff) - 1];
-
- spin_lock_irqsave(&ehci->lock, flags);
-
- if (typeReq == GetPortStatus) {
- temp = ehci_readl(ehci, status_reg);
- if (tegra->port_resuming && !(temp & PORT_SUSPEND)) {
- /* Resume completed, re-enable disconnect detection */
- tegra->port_resuming = 0;
- tegra_usb_phy_postresume(hcd->usb_phy);
- }
- }
-
- else if (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_SUSPEND) {
- temp = ehci_readl(ehci, status_reg);
- if ((temp & PORT_PE) == 0 || (temp & PORT_RESET) != 0) {
- retval = -EPIPE;
- goto done;
- }
-
- temp &= ~(PORT_RWC_BITS | PORT_WKCONN_E);
- temp |= PORT_WKDISC_E | PORT_WKOC_E;
- ehci_writel(ehci, temp | PORT_SUSPEND, status_reg);
-
- /*
- * If a transaction is in progress, there may be a delay in
- * suspending the port. Poll until the port is suspended.
- */
- if (ehci_handshake(ehci, status_reg, PORT_SUSPEND,
- PORT_SUSPEND, 5000))
- pr_err("%s: timeout waiting for SUSPEND\n", __func__);
-
- set_bit((wIndex & 0xff) - 1, &ehci->suspended_ports);
- goto done;
- }
-
- /* For USB1 port we need to issue Port Reset twice internally */
- if (tegra->needs_double_reset &&
- (typeReq == SetPortFeature && wValue == USB_PORT_FEAT_RESET)) {
- spin_unlock_irqrestore(&ehci->lock, flags);
- return tegra_ehci_internal_port_reset(ehci, status_reg);
- }
-
- /*
- * Tegra host controller will time the resume operation to clear the bit
- * when the port control state switches to HS or FS Idle. This behavior
- * is different from EHCI where the host controller driver is required
- * to set this bit to a zero after the resume duration is timed in the
- * driver.
- */
- else if (typeReq == ClearPortFeature &&
- wValue == USB_PORT_FEAT_SUSPEND) {
- temp = ehci_readl(ehci, status_reg);
- if ((temp & PORT_RESET) || !(temp & PORT_PE)) {
- retval = -EPIPE;
- goto done;
- }
-
- if (!(temp & PORT_SUSPEND))
- goto done;
-
- /* Disable disconnect detection during port resume */
- tegra_usb_phy_preresume(hcd->usb_phy);
-
- ehci->reset_done[wIndex-1] = jiffies + msecs_to_jiffies(25);
-
- temp &= ~(PORT_RWC_BITS | PORT_WAKE_BITS);
- /* start resume signalling */
- ehci_writel(ehci, temp | PORT_RESUME, status_reg);
- set_bit(wIndex-1, &ehci->resuming_ports);
-
- spin_unlock_irqrestore(&ehci->lock, flags);
- msleep(20);
- spin_lock_irqsave(&ehci->lock, flags);
-
- /* Poll until the controller clears RESUME and SUSPEND */
- if (ehci_handshake(ehci, status_reg, PORT_RESUME, 0, 2000))
- pr_err("%s: timeout waiting for RESUME\n", __func__);
- if (ehci_handshake(ehci, status_reg, PORT_SUSPEND, 0, 2000))
- pr_err("%s: timeout waiting for SUSPEND\n", __func__);
-
- ehci->reset_done[wIndex-1] = 0;
- clear_bit(wIndex-1, &ehci->resuming_ports);
-
- tegra->port_resuming = 1;
- goto done;
- }
-
- spin_unlock_irqrestore(&ehci->lock, flags);
-
- /* Handle the hub control events here */
- return ehci_hub_control(hcd, typeReq, wValue, wIndex, buf, wLength);
-
-done:
- spin_unlock_irqrestore(&ehci->lock, flags);
- return retval;
-}
-
-struct dma_aligned_buffer {
- void *kmalloc_ptr;
- void *old_xfer_buffer;
- u8 data[];
-};
-
-static void free_dma_aligned_buffer(struct urb *urb)
-{
- struct dma_aligned_buffer *temp;
- size_t length;
-
- if (!(urb->transfer_flags & URB_ALIGNED_TEMP_BUFFER))
- return;
-
- temp = container_of(urb->transfer_buffer,
- struct dma_aligned_buffer, data);
-
- if (usb_urb_dir_in(urb)) {
- if (usb_pipeisoc(urb->pipe))
- length = urb->transfer_buffer_length;
- else
- length = urb->actual_length;
-
- memcpy(temp->old_xfer_buffer, temp->data, length);
- }
- urb->transfer_buffer = temp->old_xfer_buffer;
- kfree(temp->kmalloc_ptr);
-
- urb->transfer_flags &= ~URB_ALIGNED_TEMP_BUFFER;
-}
-
-static int alloc_dma_aligned_buffer(struct urb *urb, gfp_t mem_flags)
-{
- struct dma_aligned_buffer *temp, *kmalloc_ptr;
- size_t kmalloc_size;
-
- if (urb->num_sgs || urb->sg ||
- urb->transfer_buffer_length == 0 ||
- !((uintptr_t)urb->transfer_buffer & (TEGRA_USB_DMA_ALIGN - 1)))
- return 0;
-
- /* Allocate a buffer with enough padding for alignment */
- kmalloc_size = urb->transfer_buffer_length +
- sizeof(struct dma_aligned_buffer) + TEGRA_USB_DMA_ALIGN - 1;
-
- kmalloc_ptr = kmalloc(kmalloc_size, mem_flags);
- if (!kmalloc_ptr)
- return -ENOMEM;
-
- /* Position our struct dma_aligned_buffer such that data is aligned */
- temp = PTR_ALIGN(kmalloc_ptr + 1, TEGRA_USB_DMA_ALIGN) - 1;
- temp->kmalloc_ptr = kmalloc_ptr;
- temp->old_xfer_buffer = urb->transfer_buffer;
- if (usb_urb_dir_out(urb))
- memcpy(temp->data, urb->transfer_buffer,
- urb->transfer_buffer_length);
- urb->transfer_buffer = temp->data;
-
- urb->transfer_flags |= URB_ALIGNED_TEMP_BUFFER;
-
- return 0;
-}
-
-static int tegra_ehci_map_urb_for_dma(struct usb_hcd *hcd, struct urb *urb,
- gfp_t mem_flags)
-{
- int ret;
-
- ret = alloc_dma_aligned_buffer(urb, mem_flags);
- if (ret)
- return ret;
-
- ret = usb_hcd_map_urb_for_dma(hcd, urb, mem_flags);
- if (ret)
- free_dma_aligned_buffer(urb);
-
- return ret;
-}
-
-static void tegra_ehci_unmap_urb_for_dma(struct usb_hcd *hcd, struct urb *urb)
-{
- usb_hcd_unmap_urb_for_dma(hcd, urb);
- free_dma_aligned_buffer(urb);
-}
-
-static const struct tegra_ehci_soc_config tegra30_soc_config = {
- .has_hostpc = true,
-};
-
-static const struct tegra_ehci_soc_config tegra20_soc_config = {
- .has_hostpc = false,
-};
-
-static const struct of_device_id tegra_ehci_of_match[] = {
- { .compatible = "nvidia,tegra30-ehci", .data = &tegra30_soc_config },
- { .compatible = "nvidia,tegra20-ehci", .data = &tegra20_soc_config },
- { },
-};
-
-static int tegra_ehci_probe(struct platform_device *pdev)
-{
- const struct of_device_id *match;
- const struct tegra_ehci_soc_config *soc_config;
- struct resource *res;
- struct usb_hcd *hcd;
- struct ehci_hcd *ehci;
- struct tegra_ehci_hcd *tegra;
- int err = 0;
- int irq;
- struct usb_phy *u_phy;
-
- match = of_match_device(tegra_ehci_of_match, &pdev->dev);
- if (!match) {
- dev_err(&pdev->dev, "Error: No device match found\n");
- return -ENODEV;
- }
- soc_config = match->data;
-
- /* Right now device-tree probed devices don't get dma_mask set.
- * Since shared usb code relies on it, set it here for now.
- * Once we have dma capability bindings this can go away.
- */
- err = dma_coerce_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
- if (err)
- return err;
-
- hcd = usb_create_hcd(&tegra_ehci_hc_driver, &pdev->dev,
- dev_name(&pdev->dev));
- if (!hcd) {
- dev_err(&pdev->dev, "Unable to create HCD\n");
- return -ENOMEM;
- }
- platform_set_drvdata(pdev, hcd);
- ehci = hcd_to_ehci(hcd);
- tegra = (struct tegra_ehci_hcd *)ehci->priv;
-
- hcd->has_tt = 1;
-
- tegra->clk = devm_clk_get(&pdev->dev, NULL);
- if (IS_ERR(tegra->clk)) {
- dev_err(&pdev->dev, "Can't get ehci clock\n");
- err = PTR_ERR(tegra->clk);
- goto cleanup_hcd_create;
- }
-
- tegra->rst = devm_reset_control_get_shared(&pdev->dev, "usb");
- if (IS_ERR(tegra->rst)) {
- dev_err(&pdev->dev, "Can't get ehci reset\n");
- err = PTR_ERR(tegra->rst);
- goto cleanup_hcd_create;
- }
-
- err = clk_prepare_enable(tegra->clk);
- if (err)
- goto cleanup_hcd_create;
-
- err = tegra_reset_usb_controller(pdev);
- if (err) {
- dev_err(&pdev->dev, "Failed to reset controller\n");
- goto cleanup_clk_en;
- }
-
- u_phy = devm_usb_get_phy_by_phandle(&pdev->dev, "nvidia,phy", 0);
- if (IS_ERR(u_phy)) {
- err = -EPROBE_DEFER;
- goto cleanup_clk_en;
- }
- hcd->usb_phy = u_phy;
- hcd->skip_phy_initialization = 1;
-
- tegra->needs_double_reset = of_property_read_bool(pdev->dev.of_node,
- "nvidia,needs-double-reset");
-
- res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
- hcd->regs = devm_ioremap_resource(&pdev->dev, res);
- if (IS_ERR(hcd->regs)) {
- err = PTR_ERR(hcd->regs);
- goto cleanup_clk_en;
- }
- hcd->rsrc_start = res->start;
- hcd->rsrc_len = resource_size(res);
-
- ehci->caps = hcd->regs + 0x100;
- ehci->has_hostpc = soc_config->has_hostpc;
-
- err = usb_phy_init(hcd->usb_phy);
- if (err) {
- dev_err(&pdev->dev, "Failed to initialize phy\n");
- goto cleanup_clk_en;
- }
-
- u_phy->otg = devm_kzalloc(&pdev->dev, sizeof(struct usb_otg),
- GFP_KERNEL);
- if (!u_phy->otg) {
- err = -ENOMEM;
- goto cleanup_phy;
- }
- u_phy->otg->host = hcd_to_bus(hcd);
-
- irq = platform_get_irq(pdev, 0);
- if (irq < 0) {
- err = irq;
- goto cleanup_phy;
- }
-
- otg_set_host(u_phy->otg, &hcd->self);
-
- err = usb_add_hcd(hcd, irq, IRQF_SHARED);
- if (err) {
- dev_err(&pdev->dev, "Failed to add USB HCD\n");
- goto cleanup_otg_set_host;
- }
- device_wakeup_enable(hcd->self.controller);
-
- return err;
-
-cleanup_otg_set_host:
- otg_set_host(u_phy->otg, NULL);
-cleanup_phy:
- usb_phy_shutdown(hcd->usb_phy);
-cleanup_clk_en:
- clk_disable_unprepare(tegra->clk);
-cleanup_hcd_create:
- usb_put_hcd(hcd);
- return err;
-}
-
-static int tegra_ehci_remove(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
- struct tegra_ehci_hcd *tegra =
- (struct tegra_ehci_hcd *)hcd_to_ehci(hcd)->priv;
-
- usb_remove_hcd(hcd);
- otg_set_host(hcd->usb_phy->otg, NULL);
- usb_phy_shutdown(hcd->usb_phy);
- clk_disable_unprepare(tegra->clk);
- usb_put_hcd(hcd);
-
- return 0;
-}
-
-static void tegra_ehci_hcd_shutdown(struct platform_device *pdev)
-{
- struct usb_hcd *hcd = platform_get_drvdata(pdev);
-
- if (hcd->driver->shutdown)
- hcd->driver->shutdown(hcd);
-}
-
-static struct platform_driver tegra_ehci_driver = {
- .probe = tegra_ehci_probe,
- .remove = tegra_ehci_remove,
- .shutdown = tegra_ehci_hcd_shutdown,
- .driver = {
- .name = DRV_NAME,
- .of_match_table = tegra_ehci_of_match,
- }
-};
-
-static int tegra_ehci_reset(struct usb_hcd *hcd)
-{
- struct ehci_hcd *ehci = hcd_to_ehci(hcd);
- int retval;
- int txfifothresh;
-
- retval = ehci_setup(hcd);
- if (retval)
- return retval;
-
- /*
- * We should really pull this value out of tegra_ehci_soc_config, but
- * to avoid needing access to it, make use of the fact that Tegra20 is
- * the only one so far that needs a value of 10, and Tegra20 is the
- * only one which doesn't set has_hostpc.
- */
- txfifothresh = ehci->has_hostpc ? 0x10 : 10;
- ehci_writel(ehci, txfifothresh << 16, &ehci->regs->txfill_tuning);
-
- return 0;
-}
-
-static const struct ehci_driver_overrides tegra_overrides __initconst = {
- .extra_priv_size = sizeof(struct tegra_ehci_hcd),
- .reset = tegra_ehci_reset,
-};
-
-static int __init ehci_tegra_init(void)
-{
- if (usb_disabled())
- return -ENODEV;
-
- pr_info(DRV_NAME ": " DRIVER_DESC "\n");
-
- ehci_init_driver(&tegra_ehci_hc_driver, &tegra_overrides);
-
- /*
- * The Tegra HW has some unusual quirks, which require Tegra-specific
- * workarounds. We override certain hc_driver functions here to
- * achieve that. We explicitly do not enhance ehci_driver_overrides to
- * allow this more easily, since this is an unusual case, and we don't
- * want to encourage others to override these functions by making it
- * too easy.
- */
-
- tegra_ehci_hc_driver.map_urb_for_dma = tegra_ehci_map_urb_for_dma;
- tegra_ehci_hc_driver.unmap_urb_for_dma = tegra_ehci_unmap_urb_for_dma;
- tegra_ehci_hc_driver.hub_control = tegra_ehci_hub_control;
-
- return platform_driver_register(&tegra_ehci_driver);
-}
-module_init(ehci_tegra_init);
-
-static void __exit ehci_tegra_cleanup(void)
-{
- platform_driver_unregister(&tegra_ehci_driver);
-}
-module_exit(ehci_tegra_cleanup);
-
-MODULE_DESCRIPTION(DRIVER_DESC);
-MODULE_LICENSE("GPL");
-MODULE_ALIAS("platform:" DRV_NAME);
-MODULE_DEVICE_TABLE(of, tegra_ehci_of_match);
diff --git a/drivers/usb/host/xhci-ext-caps.c b/drivers/usb/host/xhci-ext-caps.c
index 3351d07c431f..7a4c2c4ad50e 100644
--- a/drivers/usb/host/xhci-ext-caps.c
+++ b/drivers/usb/host/xhci-ext-caps.c
@@ -54,7 +54,8 @@ static int xhci_create_intel_xhci_sw_pdev(struct xhci_hcd *xhci, u32 cap_offset)
}
if (pci->device == PCI_DEVICE_ID_INTEL_CHERRYVIEW_XHCI) {
- ret = platform_device_add_properties(pdev, role_switch_props);
+ ret = device_create_managed_software_node(&pdev->dev, role_switch_props,
+ NULL);
if (ret) {
dev_err(dev, "failed to register device properties\n");
platform_device_put(pdev);
diff --git a/drivers/usb/host/xhci-mem.c b/drivers/usb/host/xhci-mem.c
index 3589b49b6c8b..f2c4ee7c4786 100644
--- a/drivers/usb/host/xhci-mem.c
+++ b/drivers/usb/host/xhci-mem.c
@@ -592,23 +592,6 @@ struct xhci_ring *xhci_dma_to_transfer_ring(
return ep->ring;
}
-struct xhci_ring *xhci_stream_id_to_ring(
- struct xhci_virt_device *dev,
- unsigned int ep_index,
- unsigned int stream_id)
-{
- struct xhci_virt_ep *ep = &dev->eps[ep_index];
-
- if (stream_id == 0)
- return ep->ring;
- if (!ep->stream_info)
- return NULL;
-
- if (stream_id >= ep->stream_info->num_streams)
- return NULL;
- return ep->stream_info->stream_rings[stream_id];
-}
-
/*
* Change an endpoint's internal structure so it supports stream IDs. The
* number of requested streams includes stream 0, which cannot be used by device
@@ -994,6 +977,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
if (!dev)
return 0;
+ dev->slot_id = slot_id;
+
/* Allocate the (output) device context that will be used in the HC. */
dev->out_ctx = xhci_alloc_container_ctx(xhci, XHCI_CTX_TYPE_DEVICE, flags);
if (!dev->out_ctx)
@@ -1012,6 +997,8 @@ int xhci_alloc_virt_device(struct xhci_hcd *xhci, int slot_id,
/* Initialize the cancellation list and watchdog timers for each ep */
for (i = 0; i < 31; i++) {
+ dev->eps[i].ep_index = i;
+ dev->eps[i].vdev = dev;
xhci_init_endpoint_timer(xhci, &dev->eps[i]);
INIT_LIST_HEAD(&dev->eps[i].cancelled_td_list);
INIT_LIST_HEAD(&dev->eps[i].bw_endpoint_list);
diff --git a/drivers/usb/host/xhci-mtk-sch.c b/drivers/usb/host/xhci-mtk-sch.c
index 45c54d56ecbd..b45e5bf08997 100644
--- a/drivers/usb/host/xhci-mtk-sch.c
+++ b/drivers/usb/host/xhci-mtk-sch.c
@@ -200,6 +200,8 @@ static struct mu3h_sch_ep_info *create_sch_ep(struct usb_device *udev,
sch_ep->sch_tt = tt;
sch_ep->ep = ep;
+ INIT_LIST_HEAD(&sch_ep->endpoint);
+ INIT_LIST_HEAD(&sch_ep->tt_endpoint);
return sch_ep;
}
@@ -373,6 +375,7 @@ static void update_bus_bw(struct mu3h_sch_bw_info *sch_bw,
sch_ep->bw_budget_table[j];
}
}
+ sch_ep->allocated = used;
}
static int check_sch_tt(struct usb_device *udev,
@@ -541,6 +544,22 @@ static int check_sch_bw(struct usb_device *udev,
return 0;
}
+static void destroy_sch_ep(struct usb_device *udev,
+ struct mu3h_sch_bw_info *sch_bw, struct mu3h_sch_ep_info *sch_ep)
+{
+ /* only release ep bw check passed by check_sch_bw() */
+ if (sch_ep->allocated)
+ update_bus_bw(sch_bw, sch_ep, 0);
+
+ list_del(&sch_ep->endpoint);
+
+ if (sch_ep->sch_tt) {
+ list_del(&sch_ep->tt_endpoint);
+ drop_tt(udev);
+ }
+ kfree(sch_ep);
+}
+
static bool need_bw_sch(struct usb_host_endpoint *ep,
enum usb_device_speed speed, int has_tt)
{
@@ -583,6 +602,8 @@ int xhci_mtk_sch_init(struct xhci_hcd_mtk *mtk)
mtk->sch_array = sch_array;
+ INIT_LIST_HEAD(&mtk->bw_ep_chk_list);
+
return 0;
}
EXPORT_SYMBOL_GPL(xhci_mtk_sch_init);
@@ -601,19 +622,14 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
struct xhci_virt_device *virt_dev;
- struct mu3h_sch_bw_info *sch_bw;
struct mu3h_sch_ep_info *sch_ep;
- struct mu3h_sch_bw_info *sch_array;
unsigned int ep_index;
- int bw_index;
- int ret = 0;
xhci = hcd_to_xhci(hcd);
virt_dev = xhci->devs[udev->slot_id];
ep_index = xhci_get_endpoint_index(&ep->desc);
slot_ctx = xhci_get_slot_ctx(xhci, virt_dev->in_ctx);
ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
- sch_array = mtk->sch_array;
xhci_dbg(xhci, "%s() type:%d, speed:%d, mpkt:%d, dir:%d, ep:%p\n",
__func__, usb_endpoint_type(&ep->desc), udev->speed,
@@ -632,35 +648,13 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
return 0;
}
- bw_index = get_bw_index(xhci, udev, ep);
- sch_bw = &sch_array[bw_index];
-
sch_ep = create_sch_ep(udev, ep, ep_ctx);
if (IS_ERR_OR_NULL(sch_ep))
return -ENOMEM;
setup_sch_info(udev, ep_ctx, sch_ep);
- ret = check_sch_bw(udev, sch_bw, sch_ep);
- if (ret) {
- xhci_err(xhci, "Not enough bandwidth!\n");
- if (is_fs_or_ls(udev->speed))
- drop_tt(udev);
-
- kfree(sch_ep);
- return -ENOSPC;
- }
-
- list_add_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
-
- ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
- | EP_BCSCOUNT(sch_ep->cs_count) | EP_BBM(sch_ep->burst_mode));
- ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
- | EP_BREPEAT(sch_ep->repeat));
-
- xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
- sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
- sch_ep->offset, sch_ep->repeat);
+ list_add_tail(&sch_ep->endpoint, &mtk->bw_ep_chk_list);
return 0;
}
@@ -675,7 +669,7 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct xhci_virt_device *virt_dev;
struct mu3h_sch_bw_info *sch_array;
struct mu3h_sch_bw_info *sch_bw;
- struct mu3h_sch_ep_info *sch_ep;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
int bw_index;
xhci = hcd_to_xhci(hcd);
@@ -694,17 +688,79 @@ void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
bw_index = get_bw_index(xhci, udev, ep);
sch_bw = &sch_array[bw_index];
- list_for_each_entry(sch_ep, &sch_bw->bw_ep_list, endpoint) {
+ list_for_each_entry_safe(sch_ep, tmp, &sch_bw->bw_ep_list, endpoint) {
if (sch_ep->ep == ep) {
- update_bus_bw(sch_bw, sch_ep, 0);
- list_del(&sch_ep->endpoint);
- if (is_fs_or_ls(udev->speed)) {
- list_del(&sch_ep->tt_endpoint);
- drop_tt(udev);
- }
- kfree(sch_ep);
+ destroy_sch_ep(udev, sch_bw, sch_ep);
break;
}
}
}
EXPORT_SYMBOL_GPL(xhci_mtk_drop_ep_quirk);
+
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct xhci_virt_device *virt_dev = xhci->devs[udev->slot_id];
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index, ret;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry(sch_ep, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ ret = check_sch_bw(udev, sch_bw, sch_ep);
+ if (ret) {
+ xhci_err(xhci, "Not enough bandwidth!\n");
+ return -ENOSPC;
+ }
+ }
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ struct xhci_ep_ctx *ep_ctx;
+ struct usb_host_endpoint *ep = sch_ep->ep;
+ unsigned int ep_index = xhci_get_endpoint_index(&ep->desc);
+
+ bw_index = get_bw_index(xhci, udev, ep);
+ sch_bw = &mtk->sch_array[bw_index];
+
+ list_move_tail(&sch_ep->endpoint, &sch_bw->bw_ep_list);
+
+ ep_ctx = xhci_get_ep_ctx(xhci, virt_dev->in_ctx, ep_index);
+ ep_ctx->reserved[0] |= cpu_to_le32(EP_BPKTS(sch_ep->pkts)
+ | EP_BCSCOUNT(sch_ep->cs_count)
+ | EP_BBM(sch_ep->burst_mode));
+ ep_ctx->reserved[1] |= cpu_to_le32(EP_BOFFSET(sch_ep->offset)
+ | EP_BREPEAT(sch_ep->repeat));
+
+ xhci_dbg(xhci, " PKTS:%x, CSCOUNT:%x, BM:%x, OFFSET:%x, REPEAT:%x\n",
+ sch_ep->pkts, sch_ep->cs_count, sch_ep->burst_mode,
+ sch_ep->offset, sch_ep->repeat);
+ }
+
+ return xhci_check_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_check_bandwidth);
+
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+{
+ struct xhci_hcd_mtk *mtk = hcd_to_mtk(hcd);
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct mu3h_sch_bw_info *sch_bw;
+ struct mu3h_sch_ep_info *sch_ep, *tmp;
+ int bw_index;
+
+ xhci_dbg(xhci, "%s() udev %s\n", __func__, dev_name(&udev->dev));
+
+ list_for_each_entry_safe(sch_ep, tmp, &mtk->bw_ep_chk_list, endpoint) {
+ bw_index = get_bw_index(xhci, udev, sch_ep->ep);
+ sch_bw = &mtk->sch_array[bw_index];
+ destroy_sch_ep(udev, sch_bw, sch_ep);
+ }
+
+ xhci_reset_bandwidth(hcd, udev);
+}
+EXPORT_SYMBOL_GPL(xhci_mtk_reset_bandwidth);
diff --git a/drivers/usb/host/xhci-mtk.c b/drivers/usb/host/xhci-mtk.c
index 8f321f39ab96..fe010cc61f19 100644
--- a/drivers/usb/host/xhci-mtk.c
+++ b/drivers/usb/host/xhci-mtk.c
@@ -347,6 +347,8 @@ static void usb_wakeup_set(struct xhci_hcd_mtk *mtk, bool enable)
static int xhci_mtk_setup(struct usb_hcd *hcd);
static const struct xhci_driver_overrides xhci_mtk_overrides __initconst = {
.reset = xhci_mtk_setup,
+ .check_bandwidth = xhci_mtk_check_bandwidth,
+ .reset_bandwidth = xhci_mtk_reset_bandwidth,
};
static struct hc_driver __read_mostly xhci_mtk_hc_driver;
diff --git a/drivers/usb/host/xhci-mtk.h b/drivers/usb/host/xhci-mtk.h
index a93cfe817904..cbb09dfea62e 100644
--- a/drivers/usb/host/xhci-mtk.h
+++ b/drivers/usb/host/xhci-mtk.h
@@ -59,6 +59,7 @@ struct mu3h_sch_bw_info {
* @ep_type: endpoint type
* @maxpkt: max packet size of endpoint
* @ep: address of usb_host_endpoint struct
+ * @allocated: the bandwidth is aready allocated from bus_bw
* @offset: which uframe of the interval that transfer should be
* scheduled first time within the interval
* @repeat: the time gap between two uframes that transfers are
@@ -86,6 +87,7 @@ struct mu3h_sch_ep_info {
u32 ep_type;
u32 maxpkt;
void *ep;
+ bool allocated;
/*
* mtk xHCI scheduling information put into reserved DWs
* in ep context
@@ -131,6 +133,7 @@ struct xhci_hcd_mtk {
struct device *dev;
struct usb_hcd *hcd;
struct mu3h_sch_bw_info *sch_array;
+ struct list_head bw_ep_chk_list;
struct mu3c_ippc_regs __iomem *ippc_regs;
bool has_ippc;
int num_u2_ports;
@@ -166,6 +169,8 @@ int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd, struct usb_device *udev,
struct usb_host_endpoint *ep);
+int xhci_mtk_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
#else
static inline int xhci_mtk_add_ep_quirk(struct usb_hcd *hcd,
@@ -179,6 +184,16 @@ static inline void xhci_mtk_drop_ep_quirk(struct usb_hcd *hcd,
{
}
+static inline int xhci_mtk_check_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+ return 0;
+}
+
+static inline void xhci_mtk_reset_bandwidth(struct usb_hcd *hcd,
+ struct usb_device *udev)
+{
+}
#endif
#endif /* _XHCI_MTK_H_ */
diff --git a/drivers/usb/host/xhci-mvebu.c b/drivers/usb/host/xhci-mvebu.c
index 60651a50770f..8ca1a235d164 100644
--- a/drivers/usb/host/xhci-mvebu.c
+++ b/drivers/usb/host/xhci-mvebu.c
@@ -8,6 +8,7 @@
#include <linux/mbus.h>
#include <linux/of.h>
#include <linux/platform_device.h>
+#include <linux/phy/phy.h>
#include <linux/usb.h>
#include <linux/usb/hcd.h>
@@ -74,6 +75,47 @@ int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_hcd *xhci = hcd_to_xhci(hcd);
+ struct device *dev = hcd->self.controller;
+ struct phy *phy;
+ int ret;
+
+ /* Old bindings miss the PHY handle */
+ phy = of_phy_get(dev->of_node, "usb3-phy");
+ if (IS_ERR(phy) && PTR_ERR(phy) == -EPROBE_DEFER)
+ return -EPROBE_DEFER;
+ else if (IS_ERR(phy))
+ goto phy_out;
+
+ ret = phy_init(phy);
+ if (ret)
+ goto phy_put;
+
+ ret = phy_set_mode(phy, PHY_MODE_USB_HOST_SS);
+ if (ret)
+ goto phy_exit;
+
+ ret = phy_power_on(phy);
+ if (ret == -EOPNOTSUPP) {
+ /* Skip initializatin of XHCI PHY when it is unsupported by firmware */
+ dev_warn(dev, "PHY unsupported by firmware\n");
+ xhci->quirks |= XHCI_SKIP_PHY_INIT;
+ }
+ if (ret)
+ goto phy_exit;
+
+ phy_power_off(phy);
+phy_exit:
+ phy_exit(phy);
+phy_put:
+ of_phy_put(phy);
+phy_out:
+
+ return 0;
+}
+
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
struct xhci_hcd *xhci = hcd_to_xhci(hcd);
diff --git a/drivers/usb/host/xhci-mvebu.h b/drivers/usb/host/xhci-mvebu.h
index 3be021793cc8..01bf3fcb3eca 100644
--- a/drivers/usb/host/xhci-mvebu.h
+++ b/drivers/usb/host/xhci-mvebu.h
@@ -12,6 +12,7 @@ struct usb_hcd;
#if IS_ENABLED(CONFIG_USB_XHCI_MVEBU)
int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd);
+int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd);
int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd);
#else
static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
@@ -19,6 +20,11 @@ static inline int xhci_mvebu_mbus_init_quirk(struct usb_hcd *hcd)
return 0;
}
+static inline int xhci_mvebu_a3700_plat_setup(struct usb_hcd *hcd)
+{
+ return 0;
+}
+
static inline int xhci_mvebu_a3700_init_quirk(struct usb_hcd *hcd)
{
return 0;
diff --git a/drivers/usb/host/xhci-plat.c b/drivers/usb/host/xhci-plat.c
index 4d34f6005381..c1edcc9b13ce 100644
--- a/drivers/usb/host/xhci-plat.c
+++ b/drivers/usb/host/xhci-plat.c
@@ -44,6 +44,16 @@ static void xhci_priv_plat_start(struct usb_hcd *hcd)
priv->plat_start(hcd);
}
+static int xhci_priv_plat_setup(struct usb_hcd *hcd)
+{
+ struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
+
+ if (!priv->plat_setup)
+ return 0;
+
+ return priv->plat_setup(hcd);
+}
+
static int xhci_priv_init_quirk(struct usb_hcd *hcd)
{
struct xhci_plat_priv *priv = hcd_to_xhci_priv(hcd);
@@ -111,6 +121,7 @@ static const struct xhci_plat_priv xhci_plat_marvell_armada = {
};
static const struct xhci_plat_priv xhci_plat_marvell_armada3700 = {
+ .plat_setup = xhci_mvebu_a3700_plat_setup,
.init_quirk = xhci_mvebu_a3700_init_quirk,
};
@@ -330,7 +341,14 @@ static int xhci_plat_probe(struct platform_device *pdev)
hcd->tpl_support = of_usb_host_tpl_support(sysdev->of_node);
xhci->shared_hcd->tpl_support = hcd->tpl_support;
- if (priv && (priv->quirks & XHCI_SKIP_PHY_INIT))
+
+ if (priv) {
+ ret = xhci_priv_plat_setup(hcd);
+ if (ret)
+ goto disable_usb_phy;
+ }
+
+ if ((xhci->quirks & XHCI_SKIP_PHY_INIT) || (priv && (priv->quirks & XHCI_SKIP_PHY_INIT)))
hcd->skip_phy_initialization = 1;
if (priv && (priv->quirks & XHCI_SG_TRB_CACHE_SIZE_QUIRK))
diff --git a/drivers/usb/host/xhci-plat.h b/drivers/usb/host/xhci-plat.h
index 1fb149d1fbce..561d0b7bce09 100644
--- a/drivers/usb/host/xhci-plat.h
+++ b/drivers/usb/host/xhci-plat.h
@@ -13,6 +13,7 @@
struct xhci_plat_priv {
const char *firmware_name;
unsigned long long quirks;
+ int (*plat_setup)(struct usb_hcd *);
void (*plat_start)(struct usb_hcd *);
int (*init_quirk)(struct usb_hcd *);
int (*suspend_quirk)(struct usb_hcd *);
diff --git a/drivers/usb/host/xhci-ring.c b/drivers/usb/host/xhci-ring.c
index 5677b81c0915..5e548a1c93ab 100644
--- a/drivers/usb/host/xhci-ring.c
+++ b/drivers/usb/host/xhci-ring.c
@@ -59,6 +59,10 @@
#include "xhci-trace.h"
#include "xhci-mtk.h"
+static int queue_command(struct xhci_hcd *xhci, struct xhci_command *cmd,
+ u32 field1, u32 field2,
+ u32 field3, u32 field4, bool command_must_succeed);
+
/*
* Returns zero if the TRB isn't in this segment, otherwise it returns the DMA
* address of the TRB.
@@ -151,10 +155,11 @@ static void next_trb(struct xhci_hcd *xhci,
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
- * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*/
void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
{
+ unsigned int link_trb_count = 0;
+
/* event ring doesn't have link trbs, check for last trb */
if (ring->type == TYPE_EVENT) {
if (!last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
@@ -170,14 +175,23 @@ void inc_deq(struct xhci_hcd *xhci, struct xhci_ring *ring)
/* All other rings have link trbs */
if (!trb_is_link(ring->dequeue)) {
- ring->dequeue++;
- ring->num_trbs_free++;
+ if (last_trb_on_seg(ring->deq_seg, ring->dequeue)) {
+ xhci_warn(xhci, "Missing link TRB at end of segment\n");
+ } else {
+ ring->dequeue++;
+ ring->num_trbs_free++;
+ }
}
+
while (trb_is_link(ring->dequeue)) {
ring->deq_seg = ring->deq_seg->next;
ring->dequeue = ring->deq_seg->trbs;
- }
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "Ring is an endless link TRB loop\n");
+ break;
+ }
+ }
out:
trace_xhci_inc_deq(ring);
@@ -186,7 +200,6 @@ out:
/*
* See Cycle bit rules. SW is the consumer for the event ring only.
- * Don't make a ring full of link TRBs. That would be dumb and this would loop.
*
* If we've just enqueued a TRB that is in the middle of a TD (meaning the
* chain bit is set), then set the chain bit in all the following link TRBs.
@@ -206,11 +219,18 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
{
u32 chain;
union xhci_trb *next;
+ unsigned int link_trb_count = 0;
chain = le32_to_cpu(ring->enqueue->generic.field[3]) & TRB_CHAIN;
/* If this is not event ring, there is one less usable TRB */
if (!trb_is_link(ring->enqueue))
ring->num_trbs_free--;
+
+ if (last_trb_on_seg(ring->enq_seg, ring->enqueue)) {
+ xhci_err(xhci, "Tried to move enqueue past ring segment\n");
+ return;
+ }
+
next = ++(ring->enqueue);
/* Update the dequeue pointer further if that was a link TRB */
@@ -247,6 +267,11 @@ static void inc_enq(struct xhci_hcd *xhci, struct xhci_ring *ring,
ring->enq_seg = ring->enq_seg->next;
ring->enqueue = ring->enq_seg->trbs;
next = ring->enqueue;
+
+ if (link_trb_count++ > ring->num_segs) {
+ xhci_warn(xhci, "%s: Ring link TRB loop\n", __func__);
+ break;
+ }
}
trace_xhci_inc_enq(ring);
@@ -408,9 +433,8 @@ void xhci_ring_ep_doorbell(struct xhci_hcd *xhci,
trace_xhci_ring_ep_doorbell(slot_id, DB_VALUE(ep_index, stream_id));
writel(DB_VALUE(ep_index, stream_id), db_addr);
- /* The CPU has better things to do at this point than wait for a
- * write-posting flush. It'll get there soon enough.
- */
+ /* flush the write */
+ readl(db_addr);
}
/* Ring the doorbell for any rings with pending URBs */
@@ -446,6 +470,46 @@ void xhci_ring_doorbell_for_active_rings(struct xhci_hcd *xhci,
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
+static struct xhci_virt_ep *xhci_get_virt_ep(struct xhci_hcd *xhci,
+ unsigned int slot_id,
+ unsigned int ep_index)
+{
+ if (slot_id == 0 || slot_id >= MAX_HC_SLOTS) {
+ xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
+ return NULL;
+ }
+ if (ep_index >= EP_CTX_PER_DEV) {
+ xhci_warn(xhci, "Invalid endpoint index %u\n", ep_index);
+ return NULL;
+ }
+ if (!xhci->devs[slot_id]) {
+ xhci_warn(xhci, "No xhci virt device for slot_id %u\n", slot_id);
+ return NULL;
+ }
+
+ return &xhci->devs[slot_id]->eps[ep_index];
+}
+
+static struct xhci_ring *xhci_virt_ep_to_ring(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep,
+ unsigned int stream_id)
+{
+ /* common case, no streams */
+ if (!(ep->ep_state & EP_HAS_STREAMS))
+ return ep->ring;
+
+ if (!ep->stream_info)
+ return NULL;
+
+ if (stream_id == 0 || stream_id >= ep->stream_info->num_streams) {
+ xhci_warn(xhci, "Invalid stream_id %u request for slot_id %u ep_index %u\n",
+ stream_id, ep->vdev->slot_id, ep->ep_index);
+ return NULL;
+ }
+
+ return ep->stream_info->stream_rings[stream_id];
+}
+
/* Get the right ring for the given slot_id, ep_index and stream_id.
* If the endpoint supports streams, boundary check the URB's stream ID.
* If the endpoint doesn't support streams, return the singular endpoint ring.
@@ -456,30 +520,11 @@ struct xhci_ring *xhci_triad_to_transfer_ring(struct xhci_hcd *xhci,
{
struct xhci_virt_ep *ep;
- ep = &xhci->devs[slot_id]->eps[ep_index];
- /* Common case: no streams */
- if (!(ep->ep_state & EP_HAS_STREAMS))
- return ep->ring;
-
- if (stream_id == 0) {
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has streams, "
- "but URB has no stream ID.\n",
- slot_id, ep_index);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
return NULL;
- }
- if (stream_id < ep->stream_info->num_streams)
- return ep->stream_info->stream_rings[stream_id];
-
- xhci_warn(xhci,
- "WARN: Slot ID %u, ep index %u has "
- "stream IDs 1 to %u allocated, "
- "but stream ID %u is requested.\n",
- slot_id, ep_index,
- ep->stream_info->num_streams - 1,
- stream_id);
- return NULL;
+ return xhci_virt_ep_to_ring(xhci, ep, stream_id);
}
@@ -506,73 +551,55 @@ static u64 xhci_get_hw_deq(struct xhci_hcd *xhci, struct xhci_virt_device *vdev,
return le64_to_cpu(ep_ctx->deq);
}
-/*
- * Move the xHC's endpoint ring dequeue pointer past cur_td.
- * Record the new state of the xHC's endpoint ring dequeue segment,
- * dequeue pointer, stream id, and new consumer cycle state in state.
- * Update our internal representation of the ring's dequeue pointer.
- *
- * We do this in three jumps:
- * - First we update our new ring state to be the same as when the xHC stopped.
- * - Then we traverse the ring to find the segment that contains
- * the last TRB in the TD. We toggle the xHC's new cycle state when we pass
- * any link TRBs with the toggle cycle bit set.
- * - Finally we move the dequeue state one TRB further, toggling the cycle bit
- * if we've moved it past a link TRB with the toggle cycle bit set.
- *
- * Some of the uses of xhci_generic_trb are grotty, but if they're done
- * with correct __le32 accesses they should work fine. Only users of this are
- * in here.
- */
-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *cur_td,
- struct xhci_dequeue_state *state)
+static int xhci_move_dequeue_past_td(struct xhci_hcd *xhci,
+ unsigned int slot_id, unsigned int ep_index,
+ unsigned int stream_id, struct xhci_td *td)
{
struct xhci_virt_device *dev = xhci->devs[slot_id];
struct xhci_virt_ep *ep = &dev->eps[ep_index];
struct xhci_ring *ep_ring;
+ struct xhci_command *cmd;
struct xhci_segment *new_seg;
union xhci_trb *new_deq;
+ int new_cycle;
dma_addr_t addr;
u64 hw_dequeue;
bool cycle_found = false;
bool td_last_trb_found = false;
+ u32 trb_sct = 0;
+ int ret;
ep_ring = xhci_triad_to_transfer_ring(xhci, slot_id,
ep_index, stream_id);
if (!ep_ring) {
- xhci_warn(xhci, "WARN can't find new dequeue state "
- "for invalid stream ID %u.\n",
- stream_id);
- return;
+ xhci_warn(xhci, "WARN can't find new dequeue, invalid stream ID %u\n",
+ stream_id);
+ return -ENODEV;
}
/*
* A cancelled TD can complete with a stall if HW cached the trb.
- * In this case driver can't find cur_td, but if the ring is empty we
+ * In this case driver can't find td, but if the ring is empty we
* can move the dequeue pointer to the current enqueue position.
+ * We shouldn't hit this anymore as cached cancelled TRBs are given back
+ * after clearing the cache, but be on the safe side and keep it anyway
*/
- if (!cur_td) {
+ if (!td) {
if (list_empty(&ep_ring->td_list)) {
- state->new_deq_seg = ep_ring->enq_seg;
- state->new_deq_ptr = ep_ring->enqueue;
- state->new_cycle_state = ep_ring->cycle_state;
- goto done;
+ new_seg = ep_ring->enq_seg;
+ new_deq = ep_ring->enqueue;
+ new_cycle = ep_ring->cycle_state;
+ xhci_dbg(xhci, "ep ring empty, Set new dequeue = enqueue");
+ goto deq_found;
} else {
- xhci_warn(xhci, "Can't find new dequeue state, missing cur_td\n");
- return;
+ xhci_warn(xhci, "Can't find new dequeue state, missing td\n");
+ return -EINVAL;
}
}
- /* Dig out the cycle state saved by the xHC during the stop ep cmd */
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Finding endpoint context");
-
hw_dequeue = xhci_get_hw_deq(xhci, dev, ep_index, stream_id);
new_seg = ep_ring->deq_seg;
new_deq = ep_ring->dequeue;
- state->new_cycle_state = hw_dequeue & 0x1;
- state->stream_id = stream_id;
+ new_cycle = hw_dequeue & 0x1;
/*
* We want to find the pointer, segment and cycle state of the new trb
@@ -587,40 +614,71 @@ void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
if (td_last_trb_found)
break;
}
- if (new_deq == cur_td->last_trb)
+ if (new_deq == td->last_trb)
td_last_trb_found = true;
if (cycle_found && trb_is_link(new_deq) &&
link_trb_toggles_cycle(new_deq))
- state->new_cycle_state ^= 0x1;
+ new_cycle ^= 0x1;
next_trb(xhci, ep_ring, &new_seg, &new_deq);
/* Search wrapped around, bail out */
if (new_deq == ep->ring->dequeue) {
xhci_err(xhci, "Error: Failed finding new dequeue state\n");
- state->new_deq_seg = NULL;
- state->new_deq_ptr = NULL;
- return;
+ return -EINVAL;
}
} while (!cycle_found || !td_last_trb_found);
- state->new_deq_seg = new_seg;
- state->new_deq_ptr = new_deq;
+deq_found:
-done:
/* Don't update the ring cycle state for the producer (us). */
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Cycle state = 0x%x", state->new_cycle_state);
+ addr = xhci_trb_virt_to_dma(new_seg, new_deq);
+ if (addr == 0) {
+ xhci_warn(xhci, "Can't find dma of new dequeue ptr\n");
+ xhci_warn(xhci, "deq seg = %p, deq ptr = %p\n", new_seg, new_deq);
+ return -EINVAL;
+ }
+
+ if ((ep->ep_state & SET_DEQ_PENDING)) {
+ xhci_warn(xhci, "Set TR Deq already pending, don't submit for 0x%pad\n",
+ &addr);
+ return -EBUSY;
+ }
+
+ /* This function gets called from contexts where it cannot sleep */
+ cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!cmd) {
+ xhci_warn(xhci, "Can't alloc Set TR Deq cmd 0x%pad\n", &addr);
+ return -ENOMEM;
+ }
+
+ if (stream_id)
+ trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
+ ret = queue_command(xhci, cmd,
+ lower_32_bits(addr) | trb_sct | new_cycle,
+ upper_32_bits(addr),
+ STREAM_ID_FOR_TRB(stream_id), SLOT_ID_FOR_TRB(slot_id) |
+ EP_ID_FOR_TRB(ep_index) | TRB_TYPE(TRB_SET_DEQ), false);
+ if (ret < 0) {
+ xhci_free_command(xhci, cmd);
+ return ret;
+ }
+ ep->queued_deq_seg = new_seg;
+ ep->queued_deq_ptr = new_deq;
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "New dequeue segment = %p (virtual)",
- state->new_deq_seg);
- addr = xhci_trb_virt_to_dma(state->new_deq_seg, state->new_deq_ptr);
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "New dequeue pointer = 0x%llx (DMA)",
- (unsigned long long) addr);
+ "Set TR Deq ptr 0x%llx, cycle %u\n", addr, new_cycle);
+
+ /* Stop the TD queueing code from ringing the doorbell until
+ * this command completes. The HC won't set the dequeue pointer
+ * if the ring is running, and ringing the doorbell starts the
+ * ring running.
+ */
+ ep->ep_state |= SET_DEQ_PENDING;
+ xhci_ring_cmd_db(xhci);
+ return 0;
}
/* flip_cycle means flip the cycle bit of all but the first and last TRB.
@@ -699,159 +757,334 @@ static void xhci_unmap_td_bounce_buffer(struct xhci_hcd *xhci,
dma_unmap_single(dev, seg->bounce_dma, ring->bounce_buf_len,
DMA_FROM_DEVICE);
/* for in tranfers we need to copy the data from bounce to sg */
- len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
- seg->bounce_len, seg->bounce_offs);
- if (len != seg->bounce_len)
- xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
- len, seg->bounce_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_from_buffer(urb->sg, urb->num_sgs, seg->bounce_buf,
+ seg->bounce_len, seg->bounce_offs);
+ if (len != seg->bounce_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer read length: %zu != %d\n",
+ len, seg->bounce_len);
+ } else {
+ memcpy(urb->transfer_buffer + seg->bounce_offs, seg->bounce_buf,
+ seg->bounce_len);
+ }
seg->bounce_len = 0;
seg->bounce_offs = 0;
}
-/*
- * When we get a command completion for a Stop Endpoint Command, we need to
- * unlink any cancelled TDs from the ring. There are two ways to do that:
- *
- * 1. If the HW was in the middle of processing the TD that needs to be
- * cancelled, then we must move the ring's dequeue pointer past the last TRB
- * in the TD with a Set Dequeue Pointer Command.
- * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
- * bit cleared) so that the HW will skip over them.
- */
-static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
- union xhci_trb *trb, struct xhci_event_cmd *event)
+static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
+ struct xhci_ring *ep_ring, int status)
{
- unsigned int ep_index;
- struct xhci_ring *ep_ring;
- struct xhci_virt_ep *ep;
- struct xhci_td *cur_td = NULL;
- struct xhci_td *last_unlinked_td;
- struct xhci_ep_ctx *ep_ctx;
- struct xhci_virt_device *vdev;
- u64 hw_deq;
- struct xhci_dequeue_state deq_state;
+ struct urb *urb = NULL;
- if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
- if (!xhci->devs[slot_id])
- xhci_warn(xhci, "Stop endpoint command "
- "completion for disabled slot %u\n",
- slot_id);
- return;
+ /* Clean up the endpoint's TD list */
+ urb = td->urb;
+
+ /* if a bounce buffer was used to align this td then unmap it */
+ xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
+
+ /* Do one last check of the actual transfer length.
+ * If the host controller said we transferred more data than the buffer
+ * length, urb->actual_length will be a very big number (since it's
+ * unsigned). Play it safe and say we didn't transfer anything.
+ */
+ if (urb->actual_length > urb->transfer_buffer_length) {
+ xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
+ urb->transfer_buffer_length, urb->actual_length);
+ urb->actual_length = 0;
+ status = 0;
}
+ /* TD might be removed from td_list if we are giving back a cancelled URB */
+ if (!list_empty(&td->td_list))
+ list_del_init(&td->td_list);
+ /* Giving back a cancelled URB, or if a slated TD completed anyway */
+ if (!list_empty(&td->cancelled_td_list))
+ list_del_init(&td->cancelled_td_list);
- memset(&deq_state, 0, sizeof(deq_state));
- ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ inc_td_cnt(urb);
+ /* Giveback the urb when all the tds are completed */
+ if (last_td_in_urb(td)) {
+ if ((urb->actual_length != urb->transfer_buffer_length &&
+ (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
+ (status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
+ xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
+ urb, urb->actual_length,
+ urb->transfer_buffer_length, status);
- vdev = xhci->devs[slot_id];
- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
- trace_xhci_handle_cmd_stop_ep(ep_ctx);
+ /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
+ if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
+ status = 0;
+ xhci_giveback_urb_in_irq(xhci, td, status);
+ }
- ep = &xhci->devs[slot_id]->eps[ep_index];
- last_unlinked_td = list_last_entry(&ep->cancelled_td_list,
- struct xhci_td, cancelled_td_list);
+ return 0;
+}
- if (list_empty(&ep->cancelled_td_list)) {
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
- return;
+
+/* Complete the cancelled URBs we unlinked from td_list. */
+static void xhci_giveback_invalidated_tds(struct xhci_virt_ep *ep)
+{
+ struct xhci_ring *ring;
+ struct xhci_td *td, *tmp_td;
+
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
+ cancelled_td_list) {
+
+ /*
+ * Doesn't matter what we pass for status, since the core will
+ * just overwrite it (because the URB has been unlinked).
+ */
+ ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
+
+ if (td->cancel_status == TD_CLEARED)
+ xhci_td_cleanup(ep->xhci, td, ring, 0);
+
+ if (ep->xhci->xhc_state & XHCI_STATE_DYING)
+ return;
+ }
+}
+
+static int xhci_reset_halted_ep(struct xhci_hcd *xhci, unsigned int slot_id,
+ unsigned int ep_index, enum xhci_ep_reset_type reset_type)
+{
+ struct xhci_command *command;
+ int ret = 0;
+
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command) {
+ ret = -ENOMEM;
+ goto done;
}
- /* Fix up the ep ring first, so HW stops executing cancelled TDs.
- * We have the xHCI lock, so nothing can modify this list until we drop
- * it. We're also in the event handler, so we can't get re-interrupted
- * if another Stop Endpoint command completes
+ ret = xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
+done:
+ if (ret)
+ xhci_err(xhci, "ERROR queuing reset endpoint for slot %d ep_index %d, %d\n",
+ slot_id, ep_index, ret);
+ return ret;
+}
+
+static void xhci_handle_halted_endpoint(struct xhci_hcd *xhci,
+ struct xhci_virt_ep *ep, unsigned int stream_id,
+ struct xhci_td *td,
+ enum xhci_ep_reset_type reset_type)
+{
+ unsigned int slot_id = ep->vdev->slot_id;
+ int err;
+
+ /*
+ * Avoid resetting endpoint if link is inactive. Can cause host hang.
+ * Device will be reset soon to recover the link so don't do anything
*/
- list_for_each_entry(cur_td, &ep->cancelled_td_list, cancelled_td_list) {
+ if (ep->vdev->flags & VDEV_PORT_ERROR)
+ return;
+
+ /* add td to cancelled list and let reset ep handler take care of it */
+ if (reset_type == EP_HARD_RESET) {
+ ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
+ if (td && list_empty(&td->cancelled_td_list)) {
+ list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ td->cancel_status = TD_HALTED;
+ }
+ }
+
+ if (ep->ep_state & EP_HALTED) {
+ xhci_dbg(xhci, "Reset ep command already pending\n");
+ return;
+ }
+
+ err = xhci_reset_halted_ep(xhci, slot_id, ep->ep_index, reset_type);
+ if (err)
+ return;
+
+ ep->ep_state |= EP_HALTED;
+
+ xhci_ring_cmd_db(xhci);
+}
+
+/*
+ * Fix up the ep ring first, so HW stops executing cancelled TDs.
+ * We have the xHCI lock, so nothing can modify this list until we drop it.
+ * We're also in the event handler, so we can't get re-interrupted if another
+ * Stop Endpoint command completes.
+ *
+ * only call this when ring is not in a running state
+ */
+
+static int xhci_invalidate_cancelled_tds(struct xhci_virt_ep *ep)
+{
+ struct xhci_hcd *xhci;
+ struct xhci_td *td = NULL;
+ struct xhci_td *tmp_td = NULL;
+ struct xhci_td *cached_td = NULL;
+ struct xhci_ring *ring;
+ u64 hw_deq;
+ unsigned int slot_id = ep->vdev->slot_id;
+ int err;
+
+ xhci = ep->xhci;
+
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list, cancelled_td_list) {
xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
"Removing canceled TD starting at 0x%llx (dma).",
(unsigned long long)xhci_trb_virt_to_dma(
- cur_td->start_seg, cur_td->first_trb));
- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
- if (!ep_ring) {
- /* This shouldn't happen unless a driver is mucking
- * with the stream ID after submission. This will
- * leave the TD on the hardware ring, and the hardware
- * will try to execute it, and may access a buffer
- * that has already been freed. In the best case, the
- * hardware will execute it, and the event handler will
- * ignore the completion event for that TD, since it was
- * removed from the td_list for that endpoint. In
- * short, don't muck with the stream ID after
- * submission.
- */
- xhci_warn(xhci, "WARN Cancelled URB %p "
- "has invalid stream ID %u.\n",
- cur_td->urb,
- cur_td->urb->stream_id);
- goto remove_finished_td;
+ td->start_seg, td->first_trb));
+ list_del_init(&td->td_list);
+ ring = xhci_urb_to_transfer_ring(xhci, td->urb);
+ if (!ring) {
+ xhci_warn(xhci, "WARN Cancelled URB %p has invalid stream ID %u.\n",
+ td->urb, td->urb->stream_id);
+ continue;
}
/*
- * If we stopped on the TD we need to cancel, then we have to
+ * If ring stopped on the TD we need to cancel, then we have to
* move the xHC endpoint ring dequeue pointer past this TD.
*/
- hw_deq = xhci_get_hw_deq(xhci, vdev, ep_index,
- cur_td->urb->stream_id);
+ hw_deq = xhci_get_hw_deq(xhci, ep->vdev, ep->ep_index,
+ td->urb->stream_id);
hw_deq &= ~0xf;
- if (trb_in_td(xhci, cur_td->start_seg, cur_td->first_trb,
- cur_td->last_trb, hw_deq, false)) {
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index,
- cur_td->urb->stream_id,
- cur_td, &deq_state);
+ if (trb_in_td(xhci, td->start_seg, td->first_trb,
+ td->last_trb, hw_deq, false)) {
+ switch (td->cancel_status) {
+ case TD_CLEARED: /* TD is already no-op */
+ case TD_CLEARING_CACHE: /* set TR deq command already queued */
+ break;
+ case TD_DIRTY: /* TD is cached, clear it */
+ case TD_HALTED:
+ /* FIXME stream case, several stopped rings */
+ cached_td = td;
+ break;
+ }
} else {
- td_to_noop(xhci, ep_ring, cur_td, false);
+ td_to_noop(xhci, ring, td, false);
+ td->cancel_status = TD_CLEARED;
+ }
+ }
+ if (cached_td) {
+ cached_td->cancel_status = TD_CLEARING_CACHE;
+
+ err = xhci_move_dequeue_past_td(xhci, slot_id, ep->ep_index,
+ cached_td->urb->stream_id,
+ cached_td);
+ /* Failed to move past cached td, try just setting it noop */
+ if (err) {
+ td_to_noop(xhci, ring, cached_td, false);
+ cached_td->cancel_status = TD_CLEARED;
}
+ cached_td = NULL;
+ }
+ return 0;
+}
-remove_finished_td:
- /*
- * The event handler won't see a completion for this TD anymore,
- * so remove it from the endpoint ring's TD list. Keep it in
- * the cancelled TD list for URB completion later.
- */
- list_del_init(&cur_td->td_list);
+/*
+ * Returns the TD the endpoint ring halted on.
+ * Only call for non-running rings without streams.
+ */
+static struct xhci_td *find_halted_td(struct xhci_virt_ep *ep)
+{
+ struct xhci_td *td;
+ u64 hw_deq;
+
+ if (!list_empty(&ep->ring->td_list)) { /* Not streams compatible */
+ hw_deq = xhci_get_hw_deq(ep->xhci, ep->vdev, ep->ep_index, 0);
+ hw_deq &= ~0xf;
+ td = list_first_entry(&ep->ring->td_list, struct xhci_td, td_list);
+ if (trb_in_td(ep->xhci, td->start_seg, td->first_trb,
+ td->last_trb, hw_deq, false))
+ return td;
}
+ return NULL;
+}
- xhci_stop_watchdog_timer_in_irq(xhci, ep);
+/*
+ * When we get a command completion for a Stop Endpoint Command, we need to
+ * unlink any cancelled TDs from the ring. There are two ways to do that:
+ *
+ * 1. If the HW was in the middle of processing the TD that needs to be
+ * cancelled, then we must move the ring's dequeue pointer past the last TRB
+ * in the TD with a Set Dequeue Pointer Command.
+ * 2. Otherwise, we turn all the TRBs in the TD into No-op TRBs (with the chain
+ * bit cleared) so that the HW will skip over them.
+ */
+static void xhci_handle_cmd_stop_ep(struct xhci_hcd *xhci, int slot_id,
+ union xhci_trb *trb, u32 comp_code)
+{
+ unsigned int ep_index;
+ struct xhci_virt_ep *ep;
+ struct xhci_ep_ctx *ep_ctx;
+ struct xhci_td *td = NULL;
+ enum xhci_ep_reset_type reset_type;
+ struct xhci_command *command;
- /* If necessary, queue a Set Transfer Ring Dequeue Pointer command */
- if (deq_state.new_deq_ptr && deq_state.new_deq_seg) {
- xhci_queue_new_dequeue_state(xhci, slot_id, ep_index,
- &deq_state);
- xhci_ring_cmd_db(xhci);
- } else {
- /* Otherwise ring the doorbell(s) to restart queued transfers */
- ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
+ if (unlikely(TRB_TO_SUSPEND_PORT(le32_to_cpu(trb->generic.field[3])))) {
+ if (!xhci->devs[slot_id])
+ xhci_warn(xhci, "Stop endpoint command completion for disabled slot %u\n",
+ slot_id);
+ return;
}
+ ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
+
+ trace_xhci_handle_cmd_stop_ep(ep_ctx);
+
+ if (comp_code == COMP_CONTEXT_STATE_ERROR) {
/*
- * Drop the lock and complete the URBs in the cancelled TD list.
- * New TDs to be cancelled might be added to the end of the list before
- * we can complete all the URBs for the TDs we already unlinked.
- * So stop when we've completed the URB for the last TD we unlinked.
+ * If stop endpoint command raced with a halting endpoint we need to
+ * reset the host side endpoint first.
+ * If the TD we halted on isn't cancelled the TD should be given back
+ * with a proper error code, and the ring dequeue moved past the TD.
+ * If streams case we can't find hw_deq, or the TD we halted on so do a
+ * soft reset.
+ *
+ * Proper error code is unknown here, it would be -EPIPE if device side
+ * of enadpoit halted (aka STALL), and -EPROTO if not (transaction error)
+ * We use -EPROTO, if device is stalled it should return a stall error on
+ * next transfer, which then will return -EPIPE, and device side stall is
+ * noted and cleared by class driver.
*/
- do {
- cur_td = list_first_entry(&ep->cancelled_td_list,
- struct xhci_td, cancelled_td_list);
- list_del_init(&cur_td->cancelled_td_list);
+ switch (GET_EP_CTX_STATE(ep_ctx)) {
+ case EP_STATE_HALTED:
+ xhci_dbg(xhci, "Stop ep completion raced with stall, reset ep\n");
+ if (ep->ep_state & EP_HAS_STREAMS) {
+ reset_type = EP_SOFT_RESET;
+ } else {
+ reset_type = EP_HARD_RESET;
+ td = find_halted_td(ep);
+ if (td)
+ td->status = -EPROTO;
+ }
+ /* reset ep, reset handler cleans up cancelled tds */
+ xhci_handle_halted_endpoint(xhci, ep, 0, td, reset_type);
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
+ return;
+ case EP_STATE_RUNNING:
+ /* Race, HW handled stop ep cmd before ep was running */
+ command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
+ if (!command)
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
- /* Clean up the cancelled URB */
- /* Doesn't matter what we pass for status, since the core will
- * just overwrite it (because the URB has been unlinked).
- */
- ep_ring = xhci_urb_to_transfer_ring(xhci, cur_td->urb);
- xhci_unmap_td_bounce_buffer(xhci, ep_ring, cur_td);
- inc_td_cnt(cur_td->urb);
- if (last_td_in_urb(cur_td))
- xhci_giveback_urb_in_irq(xhci, cur_td, 0);
+ mod_timer(&ep->stop_cmd_timer,
+ jiffies + XHCI_STOP_EP_CMD_TIMEOUT * HZ);
+ xhci_queue_stop_endpoint(xhci, command, slot_id, ep_index, 0);
+ xhci_ring_cmd_db(xhci);
- /* Stop processing the cancelled list if the watchdog timer is
- * running.
- */
- if (xhci->xhc_state & XHCI_STATE_DYING)
return;
- } while (cur_td != last_unlinked_td);
+ default:
+ break;
+ }
+ }
+ /* will queue a set TR deq if stopped on a cancelled, uncleared TD */
+ xhci_invalidate_cancelled_tds(ep);
+ xhci_stop_watchdog_timer_in_irq(xhci, ep);
- /* Return to the event handler with xhci->lock re-acquired */
+ /* Otherwise ring the doorbell(s) to restart queued transfers */
+ xhci_giveback_invalidated_tds(ep);
+ ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
static void xhci_kill_ring_urbs(struct xhci_hcd *xhci, struct xhci_ring *ring)
@@ -1064,17 +1297,18 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
unsigned int ep_index;
unsigned int stream_id;
struct xhci_ring *ep_ring;
- struct xhci_virt_device *dev;
struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
struct xhci_slot_ctx *slot_ctx;
+ struct xhci_td *td, *tmp_td;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
stream_id = TRB_TO_STREAM_ID(le32_to_cpu(trb->generic.field[2]));
- dev = xhci->devs[slot_id];
- ep = &dev->eps[ep_index];
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
- ep_ring = xhci_stream_id_to_ring(dev, ep_index, stream_id);
+ ep_ring = xhci_virt_ep_to_ring(xhci, ep, stream_id);
if (!ep_ring) {
xhci_warn(xhci, "WARN Set TR deq ptr command for freed stream ID %u\n",
stream_id);
@@ -1082,8 +1316,8 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
goto cleanup;
}
- ep_ctx = xhci_get_ep_ctx(xhci, dev->out_ctx, ep_index);
- slot_ctx = xhci_get_slot_ctx(xhci, dev->out_ctx);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
+ slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
trace_xhci_handle_cmd_set_deq(slot_ctx);
trace_xhci_handle_cmd_set_deq_ep(ep_ctx);
@@ -1136,7 +1370,7 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
/* Update the ring's dequeue segment and dequeue pointer
* to reflect the new position.
*/
- update_ring_for_set_deq_completion(xhci, dev,
+ update_ring_for_set_deq_completion(xhci, ep->vdev,
ep_ring, ep_index);
} else {
xhci_warn(xhci, "Mismatch between completed Set TR Deq Ptr command & xHCI internal state.\n");
@@ -1144,11 +1378,19 @@ static void xhci_handle_cmd_set_deq(struct xhci_hcd *xhci, int slot_id,
ep->queued_deq_seg, ep->queued_deq_ptr);
}
}
-
+ /* HW cached TDs cleared from cache, give them back */
+ list_for_each_entry_safe(td, tmp_td, &ep->cancelled_td_list,
+ cancelled_td_list) {
+ ep_ring = xhci_urb_to_transfer_ring(ep->xhci, td->urb);
+ if (td->cancel_status == TD_CLEARING_CACHE) {
+ td->cancel_status = TD_CLEARED;
+ xhci_td_cleanup(ep->xhci, td, ep_ring, td->status);
+ }
+ }
cleanup:
- dev->eps[ep_index].ep_state &= ~SET_DEQ_PENDING;
- dev->eps[ep_index].queued_deq_seg = NULL;
- dev->eps[ep_index].queued_deq_ptr = NULL;
+ ep->ep_state &= ~SET_DEQ_PENDING;
+ ep->queued_deq_seg = NULL;
+ ep->queued_deq_ptr = NULL;
/* Restart any rings with pending URBs */
ring_doorbell_for_active_rings(xhci, slot_id, ep_index);
}
@@ -1156,13 +1398,16 @@ cleanup:
static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
union xhci_trb *trb, u32 cmd_comp_code)
{
- struct xhci_virt_device *vdev;
+ struct xhci_virt_ep *ep;
struct xhci_ep_ctx *ep_ctx;
unsigned int ep_index;
ep_index = TRB_TO_EP_INDEX(le32_to_cpu(trb->generic.field[3]));
- vdev = xhci->devs[slot_id];
- ep_ctx = xhci_get_ep_ctx(xhci, vdev->out_ctx, ep_index);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep)
+ return;
+
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
trace_xhci_handle_cmd_reset_ep(ep_ctx);
/* This command will only fail if the endpoint wasn't halted,
@@ -1171,27 +1416,15 @@ static void xhci_handle_cmd_reset_ep(struct xhci_hcd *xhci, int slot_id,
xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
"Ignoring reset ep completion code of %u", cmd_comp_code);
- /* HW with the reset endpoint quirk needs to have a configure endpoint
- * command complete before the endpoint can be used. Queue that here
- * because the HW can't handle two commands being queued in a row.
- */
- if (xhci->quirks & XHCI_RESET_EP_QUIRK) {
- struct xhci_command *command;
+ /* Cleanup cancelled TDs as ep is stopped. May queue a Set TR Deq cmd */
+ xhci_invalidate_cancelled_tds(ep);
- command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- return;
+ if (xhci->quirks & XHCI_RESET_EP_QUIRK)
+ xhci_dbg(xhci, "Note: Removed workaround to queue config ep for this hw");
+ /* Clear our internal halted state */
+ ep->ep_state &= ~EP_HALTED;
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Queueing configure endpoint command");
- xhci_queue_configure_endpoint(xhci, command,
- xhci->devs[slot_id]->in_ctx->dma, slot_id,
- false);
- xhci_ring_cmd_db(xhci);
- } else {
- /* Clear our internal halted state */
- xhci->devs[slot_id]->eps[ep_index].ep_state &= ~EP_HALTED;
- }
+ xhci_giveback_invalidated_tds(ep);
/* if this was a soft reset, then restart */
if ((le32_to_cpu(trb->generic.field[3])) & TRB_TSP)
@@ -1226,7 +1459,7 @@ static void xhci_handle_cmd_disable_slot(struct xhci_hcd *xhci, int slot_id)
}
static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
- struct xhci_event_cmd *event, u32 cmd_comp_code)
+ u32 cmd_comp_code)
{
struct xhci_virt_device *virt_dev;
struct xhci_input_control_ctx *ctrl_ctx;
@@ -1244,6 +1477,8 @@ static void xhci_handle_cmd_config_ep(struct xhci_hcd *xhci, int slot_id,
* is not waiting on the configure endpoint command.
*/
virt_dev = xhci->devs[slot_id];
+ if (!virt_dev)
+ return;
ctrl_ctx = xhci_get_input_control_ctx(virt_dev->in_ctx);
if (!ctrl_ctx) {
xhci_warn(xhci, "Could not get input context, bad type.\n");
@@ -1288,24 +1523,27 @@ static void xhci_handle_cmd_addr_dev(struct xhci_hcd *xhci, int slot_id)
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
+ if (!vdev)
+ return;
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_addr_dev(slot_ctx);
}
-static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id,
- struct xhci_event_cmd *event)
+static void xhci_handle_cmd_reset_dev(struct xhci_hcd *xhci, int slot_id)
{
struct xhci_virt_device *vdev;
struct xhci_slot_ctx *slot_ctx;
vdev = xhci->devs[slot_id];
+ if (!vdev) {
+ xhci_warn(xhci, "Reset device command completion for disabled slot %u\n",
+ slot_id);
+ return;
+ }
slot_ctx = xhci_get_slot_ctx(xhci, vdev->out_ctx);
trace_xhci_handle_cmd_reset_dev(slot_ctx);
xhci_dbg(xhci, "Completed reset device command.\n");
- if (!xhci->devs[slot_id])
- xhci_warn(xhci, "Reset device command completion "
- "for disabled slot %u\n", slot_id);
}
static void xhci_handle_cmd_nec_get_fw(struct xhci_hcd *xhci,
@@ -1398,7 +1636,7 @@ time_out_completed:
static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_event_cmd *event)
{
- int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
+ unsigned int slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
u64 cmd_dma;
dma_addr_t cmd_dequeue_dma;
u32 cmd_comp_code;
@@ -1406,6 +1644,11 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
struct xhci_command *cmd;
u32 cmd_type;
+ if (slot_id >= MAX_HC_SLOTS) {
+ xhci_warn(xhci, "Invalid slot_id %u\n", slot_id);
+ return;
+ }
+
cmd_dma = le64_to_cpu(event->cmd_trb);
cmd_trb = xhci->cmd_ring->dequeue;
@@ -1466,8 +1709,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
break;
case TRB_CONFIG_EP:
if (!cmd->completion)
- xhci_handle_cmd_config_ep(xhci, slot_id, event,
- cmd_comp_code);
+ xhci_handle_cmd_config_ep(xhci, slot_id, cmd_comp_code);
break;
case TRB_EVAL_CONTEXT:
break;
@@ -1478,7 +1720,8 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
WARN_ON(slot_id != TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3])));
if (!cmd->completion)
- xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb, event);
+ xhci_handle_cmd_stop_ep(xhci, slot_id, cmd_trb,
+ cmd_comp_code);
break;
case TRB_SET_DEQ:
WARN_ON(slot_id != TRB_TO_SLOT_ID(
@@ -1501,7 +1744,7 @@ static void handle_cmd_completion(struct xhci_hcd *xhci,
*/
slot_id = TRB_TO_SLOT_ID(
le32_to_cpu(cmd_trb->generic.field[3]));
- xhci_handle_cmd_reset_dev(xhci, slot_id, event);
+ xhci_handle_cmd_reset_dev(xhci, slot_id);
break;
case TRB_NEC_GET_FW:
xhci_handle_cmd_nec_get_fw(xhci, event);
@@ -1528,11 +1771,8 @@ event_handled:
}
static void handle_vendor_event(struct xhci_hcd *xhci,
- union xhci_trb *event)
+ union xhci_trb *event, u32 trb_type)
{
- u32 trb_type;
-
- trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->generic.field[3]));
xhci_dbg(xhci, "Vendor specific event TRB type = %u\n", trb_type);
if (trb_type == TRB_NEC_CMD_COMP && (xhci->quirks & XHCI_NEC_HOST))
handle_cmd_completion(xhci, &event->event_cmd);
@@ -1849,37 +2089,6 @@ static void xhci_clear_hub_tt_buffer(struct xhci_hcd *xhci, struct xhci_td *td,
}
}
-static void xhci_cleanup_halted_endpoint(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *td,
- enum xhci_ep_reset_type reset_type)
-{
- struct xhci_virt_ep *ep = &xhci->devs[slot_id]->eps[ep_index];
- struct xhci_command *command;
-
- /*
- * Avoid resetting endpoint if link is inactive. Can cause host hang.
- * Device will be reset soon to recover the link so don't do anything
- */
- if (xhci->devs[slot_id]->flags & VDEV_PORT_ERROR)
- return;
-
- command = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!command)
- return;
-
- ep->ep_state |= EP_HALTED;
-
- xhci_queue_reset_ep(xhci, command, slot_id, ep_index, reset_type);
-
- if (reset_type == EP_HARD_RESET) {
- ep->ep_state |= EP_HARD_CLEAR_TOGGLE;
- xhci_cleanup_stalled_ring(xhci, slot_id, ep_index, stream_id,
- td);
- }
- xhci_ring_cmd_db(xhci);
-}
-
/* Check if an error has halted the endpoint ring. The class driver will
* cleanup the halt for a non-default control endpoint if we indicate a stall.
* However, a babble and other errors also halt the endpoint ring, and the class
@@ -1920,82 +2129,63 @@ int xhci_is_vendor_info_code(struct xhci_hcd *xhci, unsigned int trb_comp_code)
return 0;
}
-static int xhci_td_cleanup(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_ring *ep_ring, int *status)
-{
- struct urb *urb = NULL;
-
- /* Clean up the endpoint's TD list */
- urb = td->urb;
-
- /* if a bounce buffer was used to align this td then unmap it */
- xhci_unmap_td_bounce_buffer(xhci, ep_ring, td);
-
- /* Do one last check of the actual transfer length.
- * If the host controller said we transferred more data than the buffer
- * length, urb->actual_length will be a very big number (since it's
- * unsigned). Play it safe and say we didn't transfer anything.
- */
- if (urb->actual_length > urb->transfer_buffer_length) {
- xhci_warn(xhci, "URB req %u and actual %u transfer length mismatch\n",
- urb->transfer_buffer_length, urb->actual_length);
- urb->actual_length = 0;
- *status = 0;
- }
- list_del_init(&td->td_list);
- /* Was this TD slated to be cancelled but completed anyway? */
- if (!list_empty(&td->cancelled_td_list))
- list_del_init(&td->cancelled_td_list);
-
- inc_td_cnt(urb);
- /* Giveback the urb when all the tds are completed */
- if (last_td_in_urb(td)) {
- if ((urb->actual_length != urb->transfer_buffer_length &&
- (urb->transfer_flags & URB_SHORT_NOT_OK)) ||
- (*status != 0 && !usb_endpoint_xfer_isoc(&urb->ep->desc)))
- xhci_dbg(xhci, "Giveback URB %p, len = %d, expected = %d, status = %d\n",
- urb, urb->actual_length,
- urb->transfer_buffer_length, *status);
-
- /* set isoc urb status to 0 just as EHCI, UHCI, and OHCI */
- if (usb_pipetype(urb->pipe) == PIPE_ISOCHRONOUS)
- *status = 0;
- xhci_giveback_urb_in_irq(xhci, td, *status);
- }
-
- return 0;
-}
-
static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_transfer_event *event, struct xhci_virt_ep *ep)
{
- struct xhci_virt_device *xdev;
struct xhci_ep_ctx *ep_ctx;
struct xhci_ring *ep_ring;
- unsigned int slot_id;
u32 trb_comp_code;
- int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
- if (trb_comp_code == COMP_STOPPED_LENGTH_INVALID ||
- trb_comp_code == COMP_STOPPED ||
- trb_comp_code == COMP_STOPPED_SHORT_PACKET) {
- /* The Endpoint Stop Command completion will take care of any
- * stopped TDs. A stopped TD may be restarted, so don't update
+ switch (trb_comp_code) {
+ case COMP_STOPPED_LENGTH_INVALID:
+ case COMP_STOPPED_SHORT_PACKET:
+ case COMP_STOPPED:
+ /*
+ * The "Stop Endpoint" completion will take care of any
+ * stopped TDs. A stopped TD may be restarted, so don't update
* the ring dequeue pointer or take this TD off any lists yet.
*/
return 0;
- }
- if (trb_comp_code == COMP_STALL_ERROR ||
- xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
- trb_comp_code)) {
+ case COMP_USB_TRANSACTION_ERROR:
+ case COMP_BABBLE_DETECTED_ERROR:
+ case COMP_SPLIT_TRANSACTION_ERROR:
+ /*
+ * If endpoint context state is not halted we might be
+ * racing with a reset endpoint command issued by a unsuccessful
+ * stop endpoint completion (context error). In that case the
+ * td should be on the cancelled list, and EP_HALTED flag set.
+ *
+ * Or then it's not halted due to the 0.95 spec stating that a
+ * babbling control endpoint should not halt. The 0.96 spec
+ * again says it should. Some HW claims to be 0.95 compliant,
+ * but it halts the control endpoint anyway.
+ */
+ if (GET_EP_CTX_STATE(ep_ctx) != EP_STATE_HALTED) {
+ /*
+ * If EP_HALTED is set and TD is on the cancelled list
+ * the TD and dequeue pointer will be handled by reset
+ * ep command completion
+ */
+ if ((ep->ep_state & EP_HALTED) &&
+ !list_empty(&td->cancelled_td_list)) {
+ xhci_dbg(xhci, "Already resolving halted ep for 0x%llx\n",
+ (unsigned long long)xhci_trb_virt_to_dma(
+ td->start_seg, td->first_trb));
+ return 0;
+ }
+ /* endpoint not halted, don't reset it */
+ break;
+ }
+ /* Almost same procedure as for STALL_ERROR below */
+ xhci_clear_hub_tt_buffer(xhci, td, ep);
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_HARD_RESET);
+ return 0;
+ case COMP_STALL_ERROR:
/*
* xhci internal endpoint state will go to a "halt" state for
* any stall, including default control pipe protocol stall.
@@ -2006,18 +2196,24 @@ static int finish_td(struct xhci_hcd *xhci, struct xhci_td *td,
* stall later. Hub TT buffer should only be cleared for FS/LS
* devices behind HS hubs for functional stalls.
*/
- if ((ep_index != 0) || (trb_comp_code != COMP_STALL_ERROR))
+ if (ep->ep_index != 0)
xhci_clear_hub_tt_buffer(xhci, td, ep);
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, EP_HARD_RESET);
- } else {
- /* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring);
- inc_deq(xhci, ep_ring);
+
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_HARD_RESET);
+
+ return 0; /* xhci_handle_halted_endpoint marked td cancelled */
+ default:
+ break;
}
- return xhci_td_cleanup(xhci, td, ep_ring, status);
+ /* Update ring dequeue pointer */
+ ep_ring->dequeue = td->last_trb;
+ ep_ring->deq_seg = td->last_trb_seg;
+ ep_ring->num_trbs_free += td->num_trbs - 1;
+ inc_deq(xhci, ep_ring);
+
+ return xhci_td_cleanup(xhci, td, ep_ring, td->status);
}
/* sum trb lengths from ring dequeue up to stop_trb, _excluding_ stop_trb */
@@ -2040,21 +2236,15 @@ static int sum_trb_lengths(struct xhci_hcd *xhci, struct xhci_ring *ring,
*/
static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
- struct xhci_virt_device *xdev;
- unsigned int slot_id;
- int ep_index;
struct xhci_ep_ctx *ep_ctx;
u32 trb_comp_code;
u32 remaining, requested;
u32 trb_type;
trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(ep_trb->generic.field[3]));
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- xdev = xhci->devs[slot_id];
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep->ep_index);
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
requested = td->urb->transfer_buffer_length;
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2064,13 +2254,13 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
if (trb_type != TRB_STATUS) {
xhci_warn(xhci, "WARN: Success on ctrl %s TRB without IOC set?\n",
(trb_type == TRB_DATA) ? "data" : "setup");
- *status = -ESHUTDOWN;
+ td->status = -ESHUTDOWN;
break;
}
- *status = 0;
+ td->status = 0;
break;
case COMP_SHORT_PACKET:
- *status = 0;
+ td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
if (trb_type == TRB_DATA || trb_type == TRB_NORMAL)
@@ -2102,7 +2292,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
ep_ctx, trb_comp_code))
break;
xhci_dbg(xhci, "TRB error %u, halted endpoint index = %u\n",
- trb_comp_code, ep_index);
+ trb_comp_code, ep->ep_index);
fallthrough;
case COMP_STALL_ERROR:
/* Did we transfer part of the data (middle) phase? */
@@ -2134,7 +2324,7 @@ static int process_ctrl_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->actual_length = requested;
finish_td:
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
/*
@@ -2142,9 +2332,8 @@ finish_td:
*/
static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
- struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
int idx;
struct usb_iso_packet_descriptor *frame;
@@ -2153,7 +2342,6 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
u32 remaining, requested, ep_trb_len;
int short_framestatus;
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
@@ -2214,26 +2402,23 @@ static int process_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
}
if (sum_trbs_for_length)
- frame->actual_length = sum_trb_lengths(xhci, ep_ring, ep_trb) +
+ frame->actual_length = sum_trb_lengths(xhci, ep->ring, ep_trb) +
ep_trb_len - remaining;
else
frame->actual_length = requested;
td->urb->actual_length += frame->actual_length;
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
- struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep, int status)
{
- struct xhci_ring *ep_ring;
struct urb_priv *urb_priv;
struct usb_iso_packet_descriptor *frame;
int idx;
- ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
urb_priv = td->urb->hcpriv;
idx = urb_priv->num_tds_done;
frame = &td->urb->iso_frame_desc[idx];
@@ -2245,11 +2430,12 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
frame->actual_length = 0;
/* Update ring dequeue pointer */
- while (ep_ring->dequeue != td->last_trb)
- inc_deq(xhci, ep_ring);
- inc_deq(xhci, ep_ring);
+ ep->ring->dequeue = td->last_trb;
+ ep->ring->deq_seg = td->last_trb_seg;
+ ep->ring->num_trbs_free += td->num_trbs - 1;
+ inc_deq(xhci, ep->ring);
- return xhci_td_cleanup(xhci, td, ep_ring, status);
+ return xhci_td_cleanup(xhci, td, ep->ring, status);
}
/*
@@ -2257,18 +2443,14 @@ static int skip_isoc_td(struct xhci_hcd *xhci, struct xhci_td *td,
*/
static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
union xhci_trb *ep_trb, struct xhci_transfer_event *event,
- struct xhci_virt_ep *ep, int *status)
+ struct xhci_virt_ep *ep)
{
struct xhci_slot_ctx *slot_ctx;
struct xhci_ring *ep_ring;
u32 trb_comp_code;
u32 remaining, requested, ep_trb_len;
- unsigned int slot_id;
- int ep_index;
- slot_id = TRB_TO_SLOT_ID(le32_to_cpu(event->flags));
- slot_ctx = xhci_get_slot_ctx(xhci, xhci->devs[slot_id]->out_ctx);
- ep_index = TRB_TO_EP_ID(le32_to_cpu(event->flags)) - 1;
+ slot_ctx = xhci_get_slot_ctx(xhci, ep->vdev->out_ctx);
ep_ring = xhci_dma_to_transfer_ring(ep, le64_to_cpu(event->buffer));
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
remaining = EVENT_TRB_LEN(le32_to_cpu(event->transfer_len));
@@ -2285,13 +2467,13 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
}
- *status = 0;
+ td->status = 0;
break;
case COMP_SHORT_PACKET:
xhci_dbg(xhci, "ep %#x - asked for %d bytes, %d bytes untransferred\n",
td->urb->ep->desc.bEndpointAddress,
requested, remaining);
- *status = 0;
+ td->status = 0;
break;
case COMP_STOPPED_SHORT_PACKET:
td->urb->actual_length = remaining;
@@ -2305,9 +2487,11 @@ static int process_bulk_intr_td(struct xhci_hcd *xhci, struct xhci_td *td,
if ((ep_ring->err_count++ > MAX_SOFT_RETRY) ||
le32_to_cpu(slot_ctx->tt_info) & TT_SLOT)
break;
- *status = 0;
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index,
- ep_ring->stream_id, td, EP_SOFT_RESET);
+
+ td->status = 0;
+
+ xhci_handle_halted_endpoint(xhci, ep, ep_ring->stream_id, td,
+ EP_SOFT_RESET);
return 0;
default:
/* do nothing */
@@ -2326,7 +2510,7 @@ finish_td:
remaining);
td->urb->actual_length = 0;
}
- return finish_td(xhci, td, event, ep, status);
+ return finish_td(xhci, td, event, ep);
}
/*
@@ -2337,7 +2521,6 @@ finish_td:
static int handle_tx_event(struct xhci_hcd *xhci,
struct xhci_transfer_event *event)
{
- struct xhci_virt_device *xdev;
struct xhci_virt_ep *ep;
struct xhci_ring *ep_ring;
unsigned int slot_id;
@@ -2358,16 +2541,14 @@ static int handle_tx_event(struct xhci_hcd *xhci,
trb_comp_code = GET_COMP_CODE(le32_to_cpu(event->transfer_len));
ep_trb_dma = le64_to_cpu(event->buffer);
- xdev = xhci->devs[slot_id];
- if (!xdev) {
- xhci_err(xhci, "ERROR Transfer event pointed to bad slot %u\n",
- slot_id);
+ ep = xhci_get_virt_ep(xhci, slot_id, ep_index);
+ if (!ep) {
+ xhci_err(xhci, "ERROR Invalid Transfer event\n");
goto err_out;
}
- ep = &xdev->eps[ep_index];
ep_ring = xhci_dma_to_transfer_ring(ep, ep_trb_dma);
- ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
+ ep_ctx = xhci_get_ep_ctx(xhci, ep->vdev->out_ctx, ep_index);
if (GET_EP_CTX_STATE(ep_ctx) == EP_STATE_DISABLED) {
xhci_err(xhci,
@@ -2383,8 +2564,8 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_USB_TRANSACTION_ERROR:
case COMP_INVALID_STREAM_TYPE_ERROR:
case COMP_INVALID_STREAM_ID_ERROR:
- xhci_cleanup_halted_endpoint(xhci, slot_id, ep_index, 0,
- NULL, EP_SOFT_RESET);
+ xhci_handle_halted_endpoint(xhci, ep, 0, NULL,
+ EP_SOFT_RESET);
goto cleanup;
case COMP_RING_UNDERRUN:
case COMP_RING_OVERRUN:
@@ -2440,7 +2621,6 @@ static int handle_tx_event(struct xhci_hcd *xhci,
case COMP_STALL_ERROR:
xhci_dbg(xhci, "Stalled endpoint for slot %u ep %u\n", slot_id,
ep_index);
- ep->ep_state |= EP_HALTED;
status = -EPIPE;
break;
case COMP_SPLIT_TRANSACTION_ERROR:
@@ -2568,11 +2748,10 @@ static int handle_tx_event(struct xhci_hcd *xhci,
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code)) {
- xhci_cleanup_halted_endpoint(xhci, slot_id,
- ep_index,
- ep_ring->stream_id,
- NULL,
- EP_HARD_RESET);
+ xhci_handle_halted_endpoint(xhci, ep,
+ ep_ring->stream_id,
+ NULL,
+ EP_HARD_RESET);
}
goto cleanup;
}
@@ -2631,7 +2810,7 @@ static int handle_tx_event(struct xhci_hcd *xhci,
return -ESHUTDOWN;
}
- skip_isoc_td(xhci, td, event, ep, &status);
+ skip_isoc_td(xhci, td, ep, status);
goto cleanup;
}
if (trb_comp_code == COMP_SHORT_PACKET)
@@ -2659,25 +2838,26 @@ static int handle_tx_event(struct xhci_hcd *xhci,
* endpoint. Otherwise, the endpoint remains stalled
* indefinitely.
*/
+
if (trb_is_noop(ep_trb)) {
if (trb_comp_code == COMP_STALL_ERROR ||
xhci_requires_manual_halt_cleanup(xhci, ep_ctx,
trb_comp_code))
- xhci_cleanup_halted_endpoint(xhci, slot_id,
- ep_index,
- ep_ring->stream_id,
- td, EP_HARD_RESET);
+ xhci_handle_halted_endpoint(xhci, ep,
+ ep_ring->stream_id,
+ td, EP_HARD_RESET);
goto cleanup;
}
+ td->status = status;
+
/* update the urb's actual_length and give back to the core */
if (usb_endpoint_xfer_control(&td->urb->ep->desc))
- process_ctrl_td(xhci, td, ep_trb, event, ep, &status);
+ process_ctrl_td(xhci, td, ep_trb, event, ep);
else if (usb_endpoint_xfer_isoc(&td->urb->ep->desc))
- process_isoc_td(xhci, td, ep_trb, event, ep, &status);
+ process_isoc_td(xhci, td, ep_trb, event, ep);
else
- process_bulk_intr_td(xhci, td, ep_trb, event, ep,
- &status);
+ process_bulk_intr_td(xhci, td, ep_trb, event, ep);
cleanup:
handling_skipped_tds = ep->skip &&
trb_comp_code != COMP_MISSED_SERVICE_ERROR &&
@@ -2722,6 +2902,7 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
{
union xhci_trb *event;
int update_ptrs = 1;
+ u32 trb_type;
int ret;
/* Event ring hasn't been allocated yet. */
@@ -2743,31 +2924,30 @@ static int xhci_handle_event(struct xhci_hcd *xhci)
* speculative reads of the event's flags/data below.
*/
rmb();
+ trb_type = TRB_FIELD_TO_TYPE(le32_to_cpu(event->event_cmd.flags));
/* FIXME: Handle more event types. */
- switch (le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) {
- case TRB_TYPE(TRB_COMPLETION):
+
+ switch (trb_type) {
+ case TRB_COMPLETION:
handle_cmd_completion(xhci, &event->event_cmd);
break;
- case TRB_TYPE(TRB_PORT_STATUS):
+ case TRB_PORT_STATUS:
handle_port_status(xhci, event);
update_ptrs = 0;
break;
- case TRB_TYPE(TRB_TRANSFER):
+ case TRB_TRANSFER:
ret = handle_tx_event(xhci, &event->trans_event);
if (ret >= 0)
update_ptrs = 0;
break;
- case TRB_TYPE(TRB_DEV_NOTE):
+ case TRB_DEV_NOTE:
handle_device_notification(xhci, event);
break;
default:
- if ((le32_to_cpu(event->event_cmd.flags) & TRB_TYPE_BITMASK) >=
- TRB_TYPE(48))
- handle_vendor_event(xhci, event);
+ if (trb_type >= TRB_VENDOR_DEFINED_LOW)
+ handle_vendor_event(xhci, event, trb_type);
else
- xhci_warn(xhci, "ERROR unknown event type %d\n",
- TRB_FIELD_TO_TYPE(
- le32_to_cpu(event->event_cmd.flags)));
+ xhci_warn(xhci, "ERROR unknown event type %d\n", trb_type);
}
/* Any of the above functions may drop and re-acquire the lock, so check
* to make sure a watchdog timer didn't mark the host as non-responsive.
@@ -2931,6 +3111,8 @@ static void queue_trb(struct xhci_hcd *xhci, struct xhci_ring *ring,
trb->field[0] = cpu_to_le32(field1);
trb->field[1] = cpu_to_le32(field2);
trb->field[2] = cpu_to_le32(field3);
+ /* make sure TRB is fully written before giving it to the controller */
+ wmb();
trb->field[3] = cpu_to_le32(field4);
trace_xhci_queue_trb(ring, trb);
@@ -2946,6 +3128,7 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
u32 ep_state, unsigned int num_trbs, gfp_t mem_flags)
{
unsigned int num_trbs_needed;
+ unsigned int link_trb_count = 0;
/* Make sure the endpoint has been added to xHC schedule */
switch (ep_state) {
@@ -3017,7 +3200,19 @@ static int prepare_ring(struct xhci_hcd *xhci, struct xhci_ring *ep_ring,
ep_ring->enq_seg = ep_ring->enq_seg->next;
ep_ring->enqueue = ep_ring->enq_seg->trbs;
+
+ /* prevent infinite loop if all first trbs are link trbs */
+ if (link_trb_count++ > ep_ring->num_segs) {
+ xhci_warn(xhci, "Ring is an endless link TRB loop\n");
+ return -EINVAL;
+ }
}
+
+ if (last_trb_on_seg(ep_ring->enq_seg, ep_ring->enqueue)) {
+ xhci_warn(xhci, "Missing link TRB at end of ring segment\n");
+ return -EINVAL;
+ }
+
return 0;
}
@@ -3036,7 +3231,8 @@ static int prepare_transfer(struct xhci_hcd *xhci,
struct xhci_ring *ep_ring;
struct xhci_ep_ctx *ep_ctx = xhci_get_ep_ctx(xhci, xdev->out_ctx, ep_index);
- ep_ring = xhci_stream_id_to_ring(xdev, ep_index, stream_id);
+ ep_ring = xhci_triad_to_transfer_ring(xhci, xdev->slot_id, ep_index,
+ stream_id);
if (!ep_ring) {
xhci_dbg(xhci, "Can't prepare ring for bad stream ID %u\n",
stream_id);
@@ -3275,12 +3471,16 @@ static int xhci_align_td(struct xhci_hcd *xhci, struct urb *urb, u32 enqd_len,
/* create a max max_pkt sized bounce buffer pointed to by last trb */
if (usb_urb_dir_out(urb)) {
- len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
- seg->bounce_buf, new_buff_len, enqd_len);
- if (len != new_buff_len)
- xhci_warn(xhci,
- "WARN Wrong bounce buffer write length: %zu != %d\n",
- len, new_buff_len);
+ if (urb->num_sgs) {
+ len = sg_pcopy_to_buffer(urb->sg, urb->num_sgs,
+ seg->bounce_buf, new_buff_len, enqd_len);
+ if (len != new_buff_len)
+ xhci_warn(xhci, "WARN Wrong bounce buffer write length: %zu != %d\n",
+ len, new_buff_len);
+ } else {
+ memcpy(seg->bounce_buf, urb->transfer_buffer + enqd_len, new_buff_len);
+ }
+
seg->bounce_dma = dma_map_single(dev, seg->bounce_buf,
max_pkt, DMA_TO_DEVICE);
} else {
@@ -3401,7 +3601,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
field |= TRB_IOC;
more_trbs_coming = false;
td->last_trb = ring->enqueue;
-
+ td->last_trb_seg = ring->enq_seg;
if (xhci_urb_suitable_for_idt(urb)) {
memcpy(&send_addr, urb->transfer_buffer,
trb_buff_len);
@@ -3427,7 +3627,7 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
upper_32_bits(send_addr),
length_field,
field);
-
+ td->num_trbs++;
addr += trb_buff_len;
sent_len = trb_buff_len;
@@ -3451,8 +3651,10 @@ int xhci_queue_bulk_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
ep_index, urb->stream_id,
1, urb, 1, mem_flags);
urb_priv->td[1].last_trb = ring->enqueue;
+ urb_priv->td[1].last_trb_seg = ring->enq_seg;
field = TRB_TYPE(TRB_NORMAL) | ring->cycle_state | TRB_IOC;
queue_trb(xhci, ring, 0, 0, 0, TRB_INTR_TARGET(0), field);
+ urb_priv->td[1].num_trbs++;
}
check_trb_math(urb, enqd_len);
@@ -3503,6 +3705,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
urb_priv = urb->hcpriv;
td = &urb_priv->td[0];
+ td->num_trbs = num_trbs;
/*
* Don't give the first TRB to the hardware (by toggling the cycle bit)
@@ -3575,6 +3778,7 @@ int xhci_queue_ctrl_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
/* Save the DMA address of the last TRB in the TD */
td->last_trb = ep_ring->enqueue;
+ td->last_trb_seg = ep_ring->enq_seg;
/* Queue status TRB - see Table 7 and sections 4.11.2.2 and 6.4.1.2.3 */
/* If the device sent data, the status stage is an OUT transfer */
@@ -3819,7 +4023,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
goto cleanup;
}
td = &urb_priv->td[i];
-
+ td->num_trbs = trbs_per_td;
/* use SIA as default, if frame id is used overwrite it */
sia_frame_id = TRB_SIA;
if (!(urb->transfer_flags & URB_ISO_ASAP) &&
@@ -3862,6 +4066,7 @@ static int xhci_queue_isoc_tx(struct xhci_hcd *xhci, gfp_t mem_flags,
} else {
more_trbs_coming = false;
td->last_trb = ep_ring->enqueue;
+ td->last_trb_seg = ep_ring->enq_seg;
field |= TRB_IOC;
if (trb_block_event_intr(xhci, num_tds, i))
field |= TRB_BEI;
@@ -4145,71 +4350,6 @@ int xhci_queue_stop_endpoint(struct xhci_hcd *xhci, struct xhci_command *cmd,
trb_slot_id | trb_ep_index | type | trb_suspend, false);
}
-/* Set Transfer Ring Dequeue Pointer command */
-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state)
-{
- dma_addr_t addr;
- u32 trb_slot_id = SLOT_ID_FOR_TRB(slot_id);
- u32 trb_ep_index = EP_ID_FOR_TRB(ep_index);
- u32 trb_stream_id = STREAM_ID_FOR_TRB(deq_state->stream_id);
- u32 trb_sct = 0;
- u32 type = TRB_TYPE(TRB_SET_DEQ);
- struct xhci_virt_ep *ep;
- struct xhci_command *cmd;
- int ret;
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_cancel_urb,
- "Set TR Deq Ptr cmd, new deq seg = %p (0x%llx dma), new deq ptr = %p (0x%llx dma), new cycle = %u",
- deq_state->new_deq_seg,
- (unsigned long long)deq_state->new_deq_seg->dma,
- deq_state->new_deq_ptr,
- (unsigned long long)xhci_trb_virt_to_dma(
- deq_state->new_deq_seg, deq_state->new_deq_ptr),
- deq_state->new_cycle_state);
-
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- if (addr == 0) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq pt = %p\n",
- deq_state->new_deq_seg, deq_state->new_deq_ptr);
- return;
- }
- ep = &xhci->devs[slot_id]->eps[ep_index];
- if ((ep->ep_state & SET_DEQ_PENDING)) {
- xhci_warn(xhci, "WARN Cannot submit Set TR Deq Ptr\n");
- xhci_warn(xhci, "A Set TR Deq Ptr command is pending.\n");
- return;
- }
-
- /* This function gets called from contexts where it cannot sleep */
- cmd = xhci_alloc_command(xhci, false, GFP_ATOMIC);
- if (!cmd)
- return;
-
- ep->queued_deq_seg = deq_state->new_deq_seg;
- ep->queued_deq_ptr = deq_state->new_deq_ptr;
- if (deq_state->stream_id)
- trb_sct = SCT_FOR_TRB(SCT_PRI_TR);
- ret = queue_command(xhci, cmd,
- lower_32_bits(addr) | trb_sct | deq_state->new_cycle_state,
- upper_32_bits(addr), trb_stream_id,
- trb_slot_id | trb_ep_index | type, false);
- if (ret < 0) {
- xhci_free_command(xhci, cmd);
- return;
- }
-
- /* Stop the TD queueing code from ringing the doorbell until
- * this command completes. The HC won't set the dequeue pointer
- * if the ring is running, and ringing the doorbell starts the
- * ring running.
- */
- ep->ep_state |= SET_DEQ_PENDING;
-}
-
int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
int slot_id, unsigned int ep_index,
enum xhci_ep_reset_type reset_type)
diff --git a/drivers/usb/host/xhci-tegra.c b/drivers/usb/host/xhci-tegra.c
index 934be1686352..50bb91b6a4b8 100644
--- a/drivers/usb/host/xhci-tegra.c
+++ b/drivers/usb/host/xhci-tegra.c
@@ -623,6 +623,13 @@ static void tegra_xusb_mbox_handle(struct tegra_xusb *tegra,
enable);
if (err < 0)
break;
+
+ /*
+ * wait 500us for LFPS detector to be disabled before
+ * sending ACK
+ */
+ if (!enable)
+ usleep_range(500, 1000);
}
if (err < 0) {
diff --git a/drivers/usb/host/xhci.c b/drivers/usb/host/xhci.c
index 91ab81c3fc79..bd27bd670104 100644
--- a/drivers/usb/host/xhci.c
+++ b/drivers/usb/host/xhci.c
@@ -1440,15 +1440,6 @@ static unsigned int xhci_get_endpoint_flag(struct usb_endpoint_descriptor *desc)
return 1 << (xhci_get_endpoint_index(desc) + 1);
}
-/* Find the flag for this endpoint (for use in the control context). Use the
- * endpoint index to create a bitmask. The slot context is bit 0, endpoint 0 is
- * bit 1, etc.
- */
-static unsigned int xhci_get_endpoint_flag_from_index(unsigned int ep_index)
-{
- return 1 << (ep_index + 1);
-}
-
/* Compute the last valid endpoint context index. Basically, this is the
* endpoint index plus one. For slot contexts with more than valid endpoint,
* we find the most significant bit set in the added contexts flags.
@@ -1810,7 +1801,12 @@ static int xhci_urb_dequeue(struct usb_hcd *hcd, struct urb *urb, int status)
for (; i < urb_priv->num_tds; i++) {
td = &urb_priv->td[i];
- list_add_tail(&td->cancelled_td_list, &ep->cancelled_td_list);
+ /* TD can already be on cancelled list if ep halted on it */
+ if (list_empty(&td->cancelled_td_list)) {
+ td->cancel_status = TD_DIRTY;
+ list_add_tail(&td->cancelled_td_list,
+ &ep->cancelled_td_list);
+ }
}
/* Queue a stop endpoint command, but only if this is
@@ -2985,7 +2981,7 @@ static void xhci_check_bw_drop_ep_streams(struct xhci_hcd *xhci,
* else should be touching the xhci->devs[slot_id] structure, so we
* don't need to take the xhci->lock for manipulating that.
*/
-static int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
int i;
int ret = 0;
@@ -3083,7 +3079,7 @@ command_cleanup:
return ret;
}
-static void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev)
{
struct xhci_hcd *xhci;
struct xhci_virt_device *virt_dev;
@@ -3119,84 +3115,6 @@ static void xhci_setup_input_ctx_for_config_ep(struct xhci_hcd *xhci,
ctrl_ctx->add_flags |= cpu_to_le32(SLOT_FLAG);
}
-static void xhci_setup_input_ctx_for_quirk(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state)
-{
- struct xhci_input_control_ctx *ctrl_ctx;
- struct xhci_container_ctx *in_ctx;
- struct xhci_ep_ctx *ep_ctx;
- u32 added_ctxs;
- dma_addr_t addr;
-
- in_ctx = xhci->devs[slot_id]->in_ctx;
- ctrl_ctx = xhci_get_input_control_ctx(in_ctx);
- if (!ctrl_ctx) {
- xhci_warn(xhci, "%s: Could not get input context, bad type.\n",
- __func__);
- return;
- }
-
- xhci_endpoint_copy(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ep_index);
- ep_ctx = xhci_get_ep_ctx(xhci, in_ctx, ep_index);
- addr = xhci_trb_virt_to_dma(deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- if (addr == 0) {
- xhci_warn(xhci, "WARN Cannot submit config ep after "
- "reset ep command\n");
- xhci_warn(xhci, "WARN deq seg = %p, deq ptr = %p\n",
- deq_state->new_deq_seg,
- deq_state->new_deq_ptr);
- return;
- }
- ep_ctx->deq = cpu_to_le64(addr | deq_state->new_cycle_state);
-
- added_ctxs = xhci_get_endpoint_flag_from_index(ep_index);
- xhci_setup_input_ctx_for_config_ep(xhci, xhci->devs[slot_id]->in_ctx,
- xhci->devs[slot_id]->out_ctx, ctrl_ctx,
- added_ctxs, added_ctxs);
-}
-
-void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
- unsigned int ep_index, unsigned int stream_id,
- struct xhci_td *td)
-{
- struct xhci_dequeue_state deq_state;
-
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Cleaning up stalled endpoint ring");
- /* We need to move the HW's dequeue pointer past this TD,
- * or it will attempt to resend it on the next doorbell ring.
- */
- xhci_find_new_dequeue_state(xhci, slot_id, ep_index, stream_id, td,
- &deq_state);
-
- if (!deq_state.new_deq_ptr || !deq_state.new_deq_seg)
- return;
-
- /* HW with the reset endpoint quirk will use the saved dequeue state to
- * issue a configure endpoint command later.
- */
- if (!(xhci->quirks & XHCI_RESET_EP_QUIRK)) {
- xhci_dbg_trace(xhci, trace_xhci_dbg_reset_ep,
- "Queueing new dequeue state");
- xhci_queue_new_dequeue_state(xhci, slot_id,
- ep_index, &deq_state);
- } else {
- /* Better hope no one uses the input context between now and the
- * reset endpoint completion!
- * XXX: No idea how this hardware will react when stream rings
- * are enabled.
- */
- xhci_dbg_trace(xhci, trace_xhci_dbg_quirks,
- "Setting up input context for "
- "configure endpoint command");
- xhci_setup_input_ctx_for_quirk(xhci, slot_id,
- ep_index, &deq_state);
- }
-}
-
static void xhci_endpoint_disable(struct usb_hcd *hcd,
struct usb_host_endpoint *host_ep)
{
@@ -4770,19 +4688,19 @@ static u16 xhci_calculate_u1_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
+ else
+ timeout_ns = udev->u1_params.sel;
+
/* Prevent U1 if service interval is shorter than U1 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u1_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U1, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u1_timeout(udev, desc);
- else
- timeout_ns = udev->u1_params.sel;
-
/* The U1 timeout is encoded in 1us intervals.
* Don't return a timeout of zero, because that's USB3_LPM_DISABLED.
*/
@@ -4834,19 +4752,19 @@ static u16 xhci_calculate_u2_timeout(struct xhci_hcd *xhci,
{
unsigned long long timeout_ns;
+ if (xhci->quirks & XHCI_INTEL_HOST)
+ timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
+ else
+ timeout_ns = udev->u2_params.sel;
+
/* Prevent U2 if service interval is shorter than U2 exit latency */
if (usb_endpoint_xfer_int(desc) || usb_endpoint_xfer_isoc(desc)) {
- if (xhci_service_interval_to_ns(desc) <= udev->u2_params.mel) {
+ if (xhci_service_interval_to_ns(desc) <= timeout_ns) {
dev_dbg(&udev->dev, "Disable U2, ESIT shorter than exit latency\n");
return USB3_LPM_DISABLED;
}
}
- if (xhci->quirks & XHCI_INTEL_HOST)
- timeout_ns = xhci_calculate_intel_u2_timeout(udev, desc);
- else
- timeout_ns = udev->u2_params.sel;
-
/* The U2 timeout is encoded in 256us intervals */
timeout_ns = DIV_ROUND_UP_ULL(timeout_ns, 256 * 1000);
/* If the necessary timeout value is bigger than what we can set in the
@@ -5510,6 +5428,10 @@ void xhci_init_driver(struct hc_driver *drv,
drv->reset = over->reset;
if (over->start)
drv->start = over->start;
+ if (over->check_bandwidth)
+ drv->check_bandwidth = over->check_bandwidth;
+ if (over->reset_bandwidth)
+ drv->reset_bandwidth = over->reset_bandwidth;
}
}
EXPORT_SYMBOL_GPL(xhci_init_driver);
diff --git a/drivers/usb/host/xhci.h b/drivers/usb/host/xhci.h
index 25e57bc9c3cc..d41de5dc0452 100644
--- a/drivers/usb/host/xhci.h
+++ b/drivers/usb/host/xhci.h
@@ -918,6 +918,8 @@ struct xhci_bw_info {
#define SS_BW_RESERVED 10
struct xhci_virt_ep {
+ struct xhci_virt_device *vdev; /* parent */
+ unsigned int ep_index;
struct xhci_ring *ring;
/* Related to endpoints that are configured to use stream IDs only */
struct xhci_stream_info *stream_info;
@@ -993,8 +995,10 @@ struct xhci_interval_bw_table {
unsigned int ss_bw_out;
};
+#define EP_CTX_PER_DEV 31
struct xhci_virt_device {
+ int slot_id;
struct usb_device *udev;
/*
* Commands to the hardware are passed an "input context" that
@@ -1007,7 +1011,7 @@ struct xhci_virt_device {
struct xhci_container_ctx *out_ctx;
/* Used for addressing devices and configuration changes */
struct xhci_container_ctx *in_ctx;
- struct xhci_virt_ep eps[31];
+ struct xhci_virt_ep eps[EP_CTX_PER_DEV];
u8 fake_port;
u8 real_port;
struct xhci_interval_bw_table *bw_table;
@@ -1415,7 +1419,7 @@ union xhci_trb {
/* MFINDEX Wrap Event - microframe counter wrapped */
#define TRB_MFINDEX_WRAP 39
/* TRB IDs 40-47 reserved, 48-63 is vendor-defined */
-
+#define TRB_VENDOR_DEFINED_LOW 48
/* Nec vendor-specific command completion event. */
#define TRB_NEC_CMD_COMP 48
/* Get NEC firmware revision. */
@@ -1535,16 +1539,27 @@ struct xhci_segment {
unsigned int bounce_len;
};
+enum xhci_cancelled_td_status {
+ TD_DIRTY = 0,
+ TD_HALTED,
+ TD_CLEARING_CACHE,
+ TD_CLEARED,
+};
+
struct xhci_td {
struct list_head td_list;
struct list_head cancelled_td_list;
+ int status;
+ enum xhci_cancelled_td_status cancel_status;
struct urb *urb;
struct xhci_segment *start_seg;
union xhci_trb *first_trb;
union xhci_trb *last_trb;
+ struct xhci_segment *last_trb_seg;
struct xhci_segment *bounce_seg;
/* actual_length of the URB has already been set */
bool urb_length_set;
+ unsigned int num_trbs;
};
/* xHCI command default timeout value */
@@ -1556,13 +1571,6 @@ struct xhci_cd {
union xhci_trb *cmd_trb;
};
-struct xhci_dequeue_state {
- struct xhci_segment *new_deq_seg;
- union xhci_trb *new_deq_ptr;
- int new_cycle_state;
- unsigned int stream_id;
-};
-
enum xhci_ring_type {
TYPE_CTRL = 0,
TYPE_ISOC,
@@ -1920,6 +1928,8 @@ struct xhci_driver_overrides {
size_t extra_priv_size;
int (*reset)(struct usb_hcd *hcd);
int (*start)(struct usb_hcd *hcd);
+ int (*check_bandwidth)(struct usb_hcd *, struct usb_device *);
+ void (*reset_bandwidth)(struct usb_hcd *, struct usb_device *);
};
#define XHCI_CFC_DELAY 10
@@ -2046,10 +2056,6 @@ void xhci_free_device_endpoint_resources(struct xhci_hcd *xhci,
struct xhci_ring *xhci_dma_to_transfer_ring(
struct xhci_virt_ep *ep,
u64 address);
-struct xhci_ring *xhci_stream_id_to_ring(
- struct xhci_virt_device *dev,
- unsigned int ep_index,
- unsigned int stream_id);
struct xhci_command *xhci_alloc_command(struct xhci_hcd *xhci,
bool allocate_completion, gfp_t mem_flags);
struct xhci_command *xhci_alloc_command_with_ctx(struct xhci_hcd *xhci,
@@ -2074,6 +2080,8 @@ int xhci_gen_setup(struct usb_hcd *hcd, xhci_get_quirks_t get_quirks);
void xhci_shutdown(struct usb_hcd *hcd);
void xhci_init_driver(struct hc_driver *drv,
const struct xhci_driver_overrides *over);
+int xhci_check_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
+void xhci_reset_bandwidth(struct usb_hcd *hcd, struct usb_device *udev);
int xhci_disable_slot(struct xhci_hcd *xhci, u32 slot_id);
int xhci_ext_cap_init(struct xhci_hcd *xhci);
@@ -2121,13 +2129,6 @@ int xhci_queue_reset_ep(struct xhci_hcd *xhci, struct xhci_command *cmd,
enum xhci_ep_reset_type reset_type);
int xhci_queue_reset_device(struct xhci_hcd *xhci, struct xhci_command *cmd,
u32 slot_id);
-void xhci_find_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- unsigned int stream_id, struct xhci_td *cur_td,
- struct xhci_dequeue_state *state);
-void xhci_queue_new_dequeue_state(struct xhci_hcd *xhci,
- unsigned int slot_id, unsigned int ep_index,
- struct xhci_dequeue_state *deq_state);
void xhci_cleanup_stalled_ring(struct xhci_hcd *xhci, unsigned int slot_id,
unsigned int ep_index, unsigned int stream_id,
struct xhci_td *td);
diff --git a/drivers/usb/misc/yurex.c b/drivers/usb/misc/yurex.c
index 73ebfa6e9715..c640f98d20c5 100644
--- a/drivers/usb/misc/yurex.c
+++ b/drivers/usb/misc/yurex.c
@@ -496,6 +496,9 @@ static ssize_t yurex_write(struct file *file, const char __user *user_buffer,
timeout = schedule_timeout(YUREX_WRITE_TIMEOUT);
finish_wait(&dev->waitq, &wait);
+ /* make sure URB is idle after timeout or (spurious) CMD_ACK */
+ usb_kill_urb(dev->cntl_urb);
+
mutex_unlock(&dev->io_mutex);
if (retval < 0) {
diff --git a/drivers/usb/musb/jz4740.c b/drivers/usb/musb/jz4740.c
index c4fe1f4cd17a..5b7d576bf6ee 100644
--- a/drivers/usb/musb/jz4740.c
+++ b/drivers/usb/musb/jz4740.c
@@ -116,13 +116,13 @@ static int jz4740_musb_init(struct musb *musb)
if (IS_ERR(musb->xceiv)) {
err = PTR_ERR(musb->xceiv);
if (err != -EPROBE_DEFER)
- dev_err(dev, "No transceiver configured: %d", err);
+ dev_err(dev, "No transceiver configured: %d\n", err);
return err;
}
glue->role_sw = usb_role_switch_register(dev, &role_sw_desc);
if (IS_ERR(glue->role_sw)) {
- dev_err(dev, "Failed to register USB role switch");
+ dev_err(dev, "Failed to register USB role switch\n");
return PTR_ERR(glue->role_sw);
}
@@ -205,26 +205,26 @@ static int jz4740_probe(struct platform_device *pdev)
pdata = of_device_get_match_data(dev);
if (!pdata) {
- dev_err(dev, "missing platform data");
+ dev_err(dev, "missing platform data\n");
return -EINVAL;
}
musb = platform_device_alloc("musb-hdrc", PLATFORM_DEVID_AUTO);
if (!musb) {
- dev_err(dev, "failed to allocate musb device");
+ dev_err(dev, "failed to allocate musb device\n");
return -ENOMEM;
}
clk = devm_clk_get(dev, "udc");
if (IS_ERR(clk)) {
- dev_err(dev, "failed to get clock");
+ dev_err(dev, "failed to get clock\n");
ret = PTR_ERR(clk);
goto err_platform_device_put;
}
ret = clk_prepare_enable(clk);
if (ret) {
- dev_err(dev, "failed to enable clock");
+ dev_err(dev, "failed to enable clock\n");
goto err_platform_device_put;
}
@@ -240,19 +240,19 @@ static int jz4740_probe(struct platform_device *pdev)
ret = platform_device_add_resources(musb, pdev->resource,
pdev->num_resources);
if (ret) {
- dev_err(dev, "failed to add resources");
+ dev_err(dev, "failed to add resources\n");
goto err_clk_disable;
}
ret = platform_device_add_data(musb, pdata, sizeof(*pdata));
if (ret) {
- dev_err(dev, "failed to add platform_data");
+ dev_err(dev, "failed to add platform_data\n");
goto err_clk_disable;
}
ret = platform_device_add(musb);
if (ret) {
- dev_err(dev, "failed to register musb device");
+ dev_err(dev, "failed to register musb device\n");
goto err_clk_disable;
}
diff --git a/drivers/usb/musb/musb_core.c b/drivers/usb/musb/musb_core.c
index 849e0b770130..1cd87729ba60 100644
--- a/drivers/usb/musb/musb_core.c
+++ b/drivers/usb/musb/musb_core.c
@@ -2240,32 +2240,35 @@ int musb_queue_resume_work(struct musb *musb,
{
struct musb_pending_work *w;
unsigned long flags;
+ bool is_suspended;
int error;
if (WARN_ON(!callback))
return -EINVAL;
- if (pm_runtime_active(musb->controller))
- return callback(musb, data);
+ spin_lock_irqsave(&musb->list_lock, flags);
+ is_suspended = musb->is_runtime_suspended;
+
+ if (is_suspended) {
+ w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
+ if (!w) {
+ error = -ENOMEM;
+ goto out_unlock;
+ }
- w = devm_kzalloc(musb->controller, sizeof(*w), GFP_ATOMIC);
- if (!w)
- return -ENOMEM;
+ w->callback = callback;
+ w->data = data;
- w->callback = callback;
- w->data = data;
- spin_lock_irqsave(&musb->list_lock, flags);
- if (musb->is_runtime_suspended) {
list_add_tail(&w->node, &musb->pending_list);
error = 0;
- } else {
- dev_err(musb->controller, "could not add resume work %p\n",
- callback);
- devm_kfree(musb->controller, w);
- error = -EINPROGRESS;
}
+
+out_unlock:
spin_unlock_irqrestore(&musb->list_lock, flags);
+ if (!is_suspended)
+ error = callback(musb, data);
+
return error;
}
EXPORT_SYMBOL_GPL(musb_queue_resume_work);
diff --git a/drivers/usb/musb/musb_gadget.c b/drivers/usb/musb/musb_gadget.c
index f62ffaede1ab..ef374d4dd94a 100644
--- a/drivers/usb/musb/musb_gadget.c
+++ b/drivers/usb/musb/musb_gadget.c
@@ -451,7 +451,7 @@ void musb_g_tx(struct musb *musb, u8 epnum)
return;
}
- if (request) {
+ if (req) {
trace_musb_req_tx(req);
diff --git a/drivers/usb/musb/musbhsdma.c b/drivers/usb/musb/musbhsdma.c
index 0aacfc8be5a1..7acd1635850d 100644
--- a/drivers/usb/musb/musbhsdma.c
+++ b/drivers/usb/musb/musbhsdma.c
@@ -321,8 +321,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data)
musb_channel->channel.status =
MUSB_DMA_STATUS_BUS_ABORT;
} else {
- u8 devctl;
-
addr = musb_read_hsdma_addr(mbase,
bchannel);
channel->actual_len = addr
@@ -336,8 +334,6 @@ irqreturn_t dma_controller_irq(int irq, void *private_data)
< musb_channel->len) ?
"=> reconfig 0" : "=> complete");
- devctl = musb_readb(mbase, MUSB_DEVCTL);
-
channel->status = MUSB_DMA_STATUS_FREE;
/* completed */
diff --git a/drivers/usb/phy/phy-mxs-usb.c b/drivers/usb/phy/phy-mxs-usb.c
index 67b39dc62b37..8a262c5a0408 100644
--- a/drivers/usb/phy/phy-mxs-usb.c
+++ b/drivers/usb/phy/phy-mxs-usb.c
@@ -714,14 +714,9 @@ static int mxs_phy_probe(struct platform_device *pdev)
struct clk *clk;
struct mxs_phy *mxs_phy;
int ret;
- const struct of_device_id *of_id;
struct device_node *np = pdev->dev.of_node;
u32 val;
- of_id = of_match_device(mxs_phy_dt_ids, &pdev->dev);
- if (!of_id)
- return -ENODEV;
-
base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -797,7 +792,7 @@ static int mxs_phy_probe(struct platform_device *pdev)
mxs_phy->phy.charger_detect = mxs_phy_charger_detect;
mxs_phy->clk = clk;
- mxs_phy->data = of_id->data;
+ mxs_phy->data = of_device_get_match_data(&pdev->dev);
platform_set_drvdata(pdev, mxs_phy);
diff --git a/drivers/usb/phy/phy-tegra-usb.c b/drivers/usb/phy/phy-tegra-usb.c
index 03a333797382..a48452a6172b 100644
--- a/drivers/usb/phy/phy-tegra-usb.c
+++ b/drivers/usb/phy/phy-tegra-usb.c
@@ -45,6 +45,7 @@
#define TEGRA_PORTSC1_RWC_BITS (PORT_CSC | PORT_PEC | PORT_OCC)
#define USB_SUSP_CTRL 0x400
+#define USB_WAKE_ON_RESUME_EN BIT(2)
#define USB_WAKE_ON_CNNT_EN_DEV BIT(3)
#define USB_WAKE_ON_DISCON_EN_DEV BIT(4)
#define USB_SUSP_CLR BIT(5)
@@ -56,6 +57,15 @@
#define USB_SUSP_SET BIT(14)
#define USB_WAKEUP_DEBOUNCE_COUNT(x) (((x) & 0x7) << 16)
+#define USB_PHY_VBUS_SENSORS 0x404
+#define B_SESS_VLD_WAKEUP_EN BIT(6)
+#define B_VBUS_VLD_WAKEUP_EN BIT(14)
+#define A_SESS_VLD_WAKEUP_EN BIT(22)
+#define A_VBUS_VLD_WAKEUP_EN BIT(30)
+
+#define USB_PHY_VBUS_WAKEUP_ID 0x408
+#define VBUS_WAKEUP_WAKEUP_EN BIT(30)
+
#define USB1_LEGACY_CTRL 0x410
#define USB1_NO_LEGACY_MODE BIT(0)
#define USB1_VBUS_SENSE_CTL_MASK (3 << 1)
@@ -334,6 +344,11 @@ static int utmip_pad_power_on(struct tegra_usb_phy *phy)
writel_relaxed(val, base + UTMIP_BIAS_CFG0);
}
+ if (phy->pad_wakeup) {
+ phy->pad_wakeup = false;
+ utmip_pad_count--;
+ }
+
spin_unlock(&utmip_pad_lock);
clk_disable_unprepare(phy->pad_clk);
@@ -359,6 +374,17 @@ static int utmip_pad_power_off(struct tegra_usb_phy *phy)
goto ulock;
}
+ /*
+ * In accordance to TRM, OTG and Bias pad circuits could be turned off
+ * to save power if wake is enabled, but the VBUS-change detection
+ * method is board-specific and these circuits may need to be enabled
+ * to generate wakeup event, hence we will just keep them both enabled.
+ */
+ if (phy->wakeup_enabled) {
+ phy->pad_wakeup = true;
+ utmip_pad_count++;
+ }
+
if (--utmip_pad_count == 0) {
val = readl_relaxed(base + UTMIP_BIAS_CFG0);
val |= UTMIP_OTGPD | UTMIP_BIASPD;
@@ -503,11 +529,24 @@ static int utmi_phy_power_on(struct tegra_usb_phy *phy)
writel_relaxed(val, base + UTMIP_PLL_CFG1);
}
+ val = readl_relaxed(base + USB_SUSP_CTRL);
+ val &= ~USB_WAKE_ON_RESUME_EN;
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+
if (phy->mode == USB_DR_MODE_PERIPHERAL) {
val = readl_relaxed(base + USB_SUSP_CTRL);
val &= ~(USB_WAKE_ON_CNNT_EN_DEV | USB_WAKE_ON_DISCON_EN_DEV);
writel_relaxed(val, base + USB_SUSP_CTRL);
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val &= ~VBUS_WAKEUP_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
+ val &= ~(A_VBUS_VLD_WAKEUP_EN | A_SESS_VLD_WAKEUP_EN);
+ val &= ~(B_VBUS_VLD_WAKEUP_EN | B_SESS_VLD_WAKEUP_EN);
+ writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
+
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val &= ~UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
@@ -605,31 +644,51 @@ static int utmi_phy_power_off(struct tegra_usb_phy *phy)
utmi_phy_clk_disable(phy);
- if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ /* PHY won't resume if reset is asserted */
+ if (!phy->wakeup_enabled) {
val = readl_relaxed(base + USB_SUSP_CTRL);
- val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
- val |= USB_WAKE_ON_CNNT_EN_DEV | USB_WAKEUP_DEBOUNCE_COUNT(5);
+ val |= UTMIP_RESET;
writel_relaxed(val, base + USB_SUSP_CTRL);
}
- val = readl_relaxed(base + USB_SUSP_CTRL);
- val |= UTMIP_RESET;
- writel_relaxed(val, base + USB_SUSP_CTRL);
-
val = readl_relaxed(base + UTMIP_BAT_CHRG_CFG0);
val |= UTMIP_PD_CHRG;
writel_relaxed(val, base + UTMIP_BAT_CHRG_CFG0);
- val = readl_relaxed(base + UTMIP_XCVR_CFG0);
- val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
- UTMIP_FORCE_PDZI_POWERDOWN;
- writel_relaxed(val, base + UTMIP_XCVR_CFG0);
+ if (!phy->wakeup_enabled) {
+ val = readl_relaxed(base + UTMIP_XCVR_CFG0);
+ val |= UTMIP_FORCE_PD_POWERDOWN | UTMIP_FORCE_PD2_POWERDOWN |
+ UTMIP_FORCE_PDZI_POWERDOWN;
+ writel_relaxed(val, base + UTMIP_XCVR_CFG0);
+ }
val = readl_relaxed(base + UTMIP_XCVR_CFG1);
val |= UTMIP_FORCE_PDDISC_POWERDOWN | UTMIP_FORCE_PDCHRP_POWERDOWN |
UTMIP_FORCE_PDDR_POWERDOWN;
writel_relaxed(val, base + UTMIP_XCVR_CFG1);
+ if (phy->wakeup_enabled) {
+ val = readl_relaxed(base + USB_SUSP_CTRL);
+ val &= ~USB_WAKEUP_DEBOUNCE_COUNT(~0);
+ val |= USB_WAKEUP_DEBOUNCE_COUNT(5);
+ val |= USB_WAKE_ON_RESUME_EN;
+ writel_relaxed(val, base + USB_SUSP_CTRL);
+
+ /*
+ * Ask VBUS sensor to generate wake event once cable is
+ * connected.
+ */
+ if (phy->mode == USB_DR_MODE_PERIPHERAL) {
+ val = readl_relaxed(base + USB_PHY_VBUS_WAKEUP_ID);
+ val |= VBUS_WAKEUP_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_WAKEUP_ID);
+
+ val = readl_relaxed(base + USB_PHY_VBUS_SENSORS);
+ val |= A_VBUS_VLD_WAKEUP_EN;
+ writel_relaxed(val, base + USB_PHY_VBUS_SENSORS);
+ }
+ }
+
return utmip_pad_power_off(phy);
}
@@ -765,6 +824,15 @@ static int ulpi_phy_power_off(struct tegra_usb_phy *phy)
usleep_range(5000, 6000);
clk_disable_unprepare(phy->clk);
+ /*
+ * Wakeup currently unimplemented for ULPI, thus PHY needs to be
+ * force-resumed.
+ */
+ if (WARN_ON_ONCE(phy->wakeup_enabled)) {
+ ulpi_phy_power_on(phy);
+ return -EOPNOTSUPP;
+ }
+
return 0;
}
@@ -784,6 +852,9 @@ static int tegra_usb_phy_power_on(struct tegra_usb_phy *phy)
phy->powered_on = true;
+ /* Let PHY settle down */
+ usleep_range(2000, 2500);
+
return 0;
}
@@ -824,6 +895,15 @@ static void tegra_usb_phy_shutdown(struct usb_phy *u_phy)
phy->freq = NULL;
}
+static int tegra_usb_phy_set_wakeup(struct usb_phy *u_phy, bool enable)
+{
+ struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
+
+ phy->wakeup_enabled = enable;
+
+ return 0;
+}
+
static int tegra_usb_phy_set_suspend(struct usb_phy *u_phy, int suspend)
{
struct tegra_usb_phy *phy = to_tegra_usb_phy(u_phy);
@@ -1195,6 +1275,7 @@ static int tegra_usb_phy_probe(struct platform_device *pdev)
tegra_phy->u_phy.dev = &pdev->dev;
tegra_phy->u_phy.init = tegra_usb_phy_init;
tegra_phy->u_phy.shutdown = tegra_usb_phy_shutdown;
+ tegra_phy->u_phy.set_wakeup = tegra_usb_phy_set_wakeup;
tegra_phy->u_phy.set_suspend = tegra_usb_phy_set_suspend;
platform_set_drvdata(pdev, tegra_phy);
diff --git a/drivers/usb/renesas_usbhs/fifo.c b/drivers/usb/renesas_usbhs/fifo.c
index ac9a81ae8216..e6fa13701808 100644
--- a/drivers/usb/renesas_usbhs/fifo.c
+++ b/drivers/usb/renesas_usbhs/fifo.c
@@ -126,6 +126,7 @@ struct usbhs_pkt *usbhs_pkt_pop(struct usbhs_pipe *pipe, struct usbhs_pkt *pkt)
}
usbhs_pipe_clear_without_sequence(pipe, 0, 0);
+ usbhs_pipe_running(pipe, 0);
__usbhsf_pkt_del(pkt);
}
diff --git a/drivers/usb/serial/cp210x.c b/drivers/usb/serial/cp210x.c
index fbb10dfc56e3..7bec1e730b20 100644
--- a/drivers/usb/serial/cp210x.c
+++ b/drivers/usb/serial/cp210x.c
@@ -61,6 +61,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x08e6, 0x5501) }, /* Gemalto Prox-PU/CU contactless smartcard reader */
{ USB_DEVICE(0x08FD, 0x000A) }, /* Digianswer A/S , ZigBee/802.15.4 MAC Device */
{ USB_DEVICE(0x0908, 0x01FF) }, /* Siemens RUGGEDCOM USB Serial Console */
+ { USB_DEVICE(0x0988, 0x0578) }, /* Teraoka AD2000 */
{ USB_DEVICE(0x0B00, 0x3070) }, /* Ingenico 3070 */
{ USB_DEVICE(0x0BED, 0x1100) }, /* MEI (TM) Cashflow-SC Bill/Voucher Acceptor */
{ USB_DEVICE(0x0BED, 0x1101) }, /* MEI series 2000 Combo Acceptor */
@@ -201,6 +202,7 @@ static const struct usb_device_id id_table[] = {
{ USB_DEVICE(0x1901, 0x0194) }, /* GE Healthcare Remote Alarm Box */
{ USB_DEVICE(0x1901, 0x0195) }, /* GE B850/B650/B450 CP2104 DP UART interface */
{ USB_DEVICE(0x1901, 0x0196) }, /* GE B850 CP2105 DP UART interface */
+ { USB_DEVICE(0x199B, 0xBA30) }, /* LORD WSDA-200-USB */
{ USB_DEVICE(0x19CF, 0x3000) }, /* Parrot NMEA GPS Flight Recorder */
{ USB_DEVICE(0x1ADB, 0x0001) }, /* Schweitzer Engineering C662 Cable */
{ USB_DEVICE(0x1B1C, 0x1C00) }, /* Corsair USB Dongle */
diff --git a/drivers/usb/serial/iuu_phoenix.c b/drivers/usb/serial/iuu_phoenix.c
index f1201d4de297..e8f06b41a503 100644
--- a/drivers/usb/serial/iuu_phoenix.c
+++ b/drivers/usb/serial/iuu_phoenix.c
@@ -532,23 +532,29 @@ static int iuu_uart_flush(struct usb_serial_port *port)
struct device *dev = &port->dev;
int i;
int status;
- u8 rxcmd = IUU_UART_RX;
+ u8 *rxcmd;
struct iuu_private *priv = usb_get_serial_port_data(port);
if (iuu_led(port, 0xF000, 0, 0, 0xFF) < 0)
return -EIO;
+ rxcmd = kmalloc(1, GFP_KERNEL);
+ if (!rxcmd)
+ return -ENOMEM;
+
+ rxcmd[0] = IUU_UART_RX;
+
for (i = 0; i < 2; i++) {
- status = bulk_immediate(port, &rxcmd, 1);
+ status = bulk_immediate(port, rxcmd, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_write error\n", __func__);
- return status;
+ goto out_free;
}
status = read_immediate(port, &priv->len, 1);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
if (priv->len > 0) {
@@ -556,12 +562,16 @@ static int iuu_uart_flush(struct usb_serial_port *port)
status = read_immediate(port, priv->buf, priv->len);
if (status != IUU_OPERATION_OK) {
dev_dbg(dev, "%s - uart_flush_read error\n", __func__);
- return status;
+ goto out_free;
}
}
}
dev_dbg(dev, "%s - uart_flush_read OK!\n", __func__);
iuu_led(port, 0, 0xF000, 0, 0xFF);
+
+out_free:
+ kfree(rxcmd);
+
return status;
}
diff --git a/drivers/usb/serial/option.c b/drivers/usb/serial/option.c
index 2c21e34235bb..2049e66f34a3 100644
--- a/drivers/usb/serial/option.c
+++ b/drivers/usb/serial/option.c
@@ -425,6 +425,8 @@ static void option_instat_callback(struct urb *urb);
#define CINTERION_PRODUCT_AHXX_2RMNET 0x0084
#define CINTERION_PRODUCT_AHXX_AUDIO 0x0085
#define CINTERION_PRODUCT_CLS8 0x00b0
+#define CINTERION_PRODUCT_MV31_MBIM 0x00b3
+#define CINTERION_PRODUCT_MV31_RMNET 0x00b7
/* Olivetti products */
#define OLIVETTI_VENDOR_ID 0x0b3c
@@ -1117,6 +1119,8 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0xff, 0xff),
.driver_info = RSVD(1) | RSVD(2) | RSVD(3) | RSVD(4) | NUMEP2 },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_EM12, 0xff, 0, 0) },
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0xff, 0x30) }, /* EM160R-GL */
+ { USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, 0x0620, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x30) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0, 0) },
{ USB_DEVICE_AND_INTERFACE_INFO(QUECTEL_VENDOR_ID, QUECTEL_PRODUCT_RM500Q, 0xff, 0xff, 0x10),
@@ -1912,6 +1916,10 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC25_MDMNET) },
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDM) }, /* HC28 enumerates with Siemens or Cinterion VID depending on FW revision */
{ USB_DEVICE(SIEMENS_VENDOR_ID, CINTERION_PRODUCT_HC28_MDMNET) },
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_MBIM, 0xff),
+ .driver_info = RSVD(3)},
+ { USB_DEVICE_INTERFACE_CLASS(CINTERION_VENDOR_ID, CINTERION_PRODUCT_MV31_RMNET, 0xff),
+ .driver_info = RSVD(0)},
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD100),
.driver_info = RSVD(4) },
{ USB_DEVICE(OLIVETTI_VENDOR_ID, OLIVETTI_PRODUCT_OLICARD120),
@@ -2057,6 +2065,7 @@ static const struct usb_device_id option_ids[] = {
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x0105, 0xff), /* Fibocom NL678 series */
.driver_info = RSVD(6) },
{ USB_DEVICE_INTERFACE_CLASS(0x2cb7, 0x01a0, 0xff) }, /* Fibocom NL668-AM/NL652-EU (laptop MBIM) */
+ { USB_DEVICE_INTERFACE_CLASS(0x2df3, 0x9d03, 0xff) }, /* LongSung M5710 */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1404, 0xff) }, /* GosunCn GM500 RNDIS */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1405, 0xff) }, /* GosunCn GM500 MBIM */
{ USB_DEVICE_INTERFACE_CLASS(0x305a, 0x1406, 0xff) }, /* GosunCn GM500 ECM/NCM */
diff --git a/drivers/usb/storage/unusual_uas.h b/drivers/usb/storage/unusual_uas.h
index 870e9cf3d5dc..f9677a5ec31b 100644
--- a/drivers/usb/storage/unusual_uas.h
+++ b/drivers/usb/storage/unusual_uas.h
@@ -91,6 +91,13 @@ UNUSUAL_DEV(0x152d, 0x0578, 0x0000, 0x9999,
US_FL_BROKEN_FUA),
/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
+UNUSUAL_DEV(0x154b, 0xf00b, 0x0000, 0x9999,
+ "PNY",
+ "Pro Elite SSD",
+ USB_SC_DEVICE, USB_PR_DEVICE, NULL,
+ US_FL_NO_ATA_1X),
+
+/* Reported-by: Thinh Nguyen <thinhn@synopsys.com> */
UNUSUAL_DEV(0x154b, 0xf00d, 0x0000, 0x9999,
"PNY",
"Pro Elite SSD",
diff --git a/drivers/usb/typec/altmodes/Kconfig b/drivers/usb/typec/altmodes/Kconfig
index 187690fd1a5b..60d375e9c3c7 100644
--- a/drivers/usb/typec/altmodes/Kconfig
+++ b/drivers/usb/typec/altmodes/Kconfig
@@ -20,6 +20,6 @@ config TYPEC_NVIDIA_ALTMODE
to enable support for VirtualLink devices with NVIDIA GPUs.
To compile this driver as a module, choose M here: the
- module will be called typec_displayport.
+ module will be called typec_nvidia.
endmenu
diff --git a/drivers/usb/typec/class.c b/drivers/usb/typec/class.c
index ebfd3113a9a8..b4a5d9d4564f 100644
--- a/drivers/usb/typec/class.c
+++ b/drivers/usb/typec/class.c
@@ -27,6 +27,7 @@ struct typec_cable {
enum typec_plug_type type;
struct usb_pd_identity *identity;
unsigned int active:1;
+ u16 pd_revision; /* 0300H = "3.0" */
};
struct typec_partner {
@@ -36,6 +37,7 @@ struct typec_partner {
enum typec_accessory accessory;
struct ida mode_ids;
int num_altmodes;
+ u16 pd_revision; /* 0300H = "3.0" */
};
struct typec_port {
@@ -86,7 +88,7 @@ static const char * const typec_accessory_modes[] = {
/* Product types defined in USB PD Specification R3.0 V2.0 */
static const char * const product_type_ufp[8] = {
- [IDH_PTYPE_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_UFP] = "not_ufp",
[IDH_PTYPE_HUB] = "hub",
[IDH_PTYPE_PERIPH] = "peripheral",
[IDH_PTYPE_PSD] = "psd",
@@ -94,17 +96,17 @@ static const char * const product_type_ufp[8] = {
};
static const char * const product_type_dfp[8] = {
- [IDH_PTYPE_DFP_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_DFP] = "not_dfp",
[IDH_PTYPE_DFP_HUB] = "hub",
[IDH_PTYPE_DFP_HOST] = "host",
[IDH_PTYPE_DFP_PB] = "power_brick",
- [IDH_PTYPE_DFP_AMC] = "amc",
};
static const char * const product_type_cable[8] = {
- [IDH_PTYPE_UNDEF] = "undefined",
+ [IDH_PTYPE_NOT_CABLE] = "not_cable",
[IDH_PTYPE_PCABLE] = "passive",
[IDH_PTYPE_ACABLE] = "active",
+ [IDH_PTYPE_VPD] = "vpd",
};
static struct usb_pd_identity *get_pd_identity(struct device *dev)
@@ -264,6 +266,11 @@ type_show(struct device *dev, struct device_attribute *attr, char *buf)
}
static DEVICE_ATTR_RO(type);
+static ssize_t usb_power_delivery_revision_show(struct device *dev,
+ struct device_attribute *attr,
+ char *buf);
+static DEVICE_ATTR_RO(usb_power_delivery_revision);
+
/* ------------------------------------------------------------------------- */
/* Alternate Modes */
@@ -680,6 +687,7 @@ static struct attribute *typec_partner_attrs[] = {
&dev_attr_supports_usb_power_delivery.attr,
&dev_attr_number_of_alternate_modes.attr,
&dev_attr_type.attr,
+ &dev_attr_usb_power_delivery_revision.attr,
NULL
};
@@ -741,6 +749,30 @@ int typec_partner_set_identity(struct typec_partner *partner)
EXPORT_SYMBOL_GPL(typec_partner_set_identity);
/**
+ * typec_partner_set_pd_revision - Set the PD revision supported by the partner
+ * @partner: The partner to be updated.
+ * @pd_revision: USB Power Delivery Specification Revision supported by partner
+ *
+ * This routine is used to report that the PD revision of the port partner has
+ * become available.
+ */
+void typec_partner_set_pd_revision(struct typec_partner *partner, u16 pd_revision)
+{
+ if (partner->pd_revision == pd_revision)
+ return;
+
+ partner->pd_revision = pd_revision;
+ sysfs_notify(&partner->dev.kobj, NULL, "usb_power_delivery_revision");
+ if (pd_revision != 0 && !partner->usb_pd) {
+ partner->usb_pd = 1;
+ sysfs_notify(&partner->dev.kobj, NULL,
+ "supports_usb_power_delivery");
+ }
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
+}
+EXPORT_SYMBOL_GPL(typec_partner_set_pd_revision);
+
+/**
* typec_partner_set_num_altmodes - Set the number of available partner altmodes
* @partner: The partner to be updated.
* @num_altmodes: The number of altmodes we want to specify as available.
@@ -766,6 +798,7 @@ int typec_partner_set_num_altmodes(struct typec_partner *partner, int num_altmod
return ret;
sysfs_notify(&partner->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&partner->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -814,6 +847,7 @@ struct typec_partner *typec_register_partner(struct typec_port *port,
partner->usb_pd = desc->usb_pd;
partner->accessory = desc->accessory;
partner->num_altmodes = -1;
+ partner->pd_revision = desc->pd_revision;
if (desc->identity) {
/*
@@ -923,6 +957,7 @@ int typec_plug_set_num_altmodes(struct typec_plug *plug, int num_altmodes)
return ret;
sysfs_notify(&plug->dev.kobj, NULL, "number_of_alternate_modes");
+ kobject_uevent(&plug->dev.kobj, KOBJ_CHANGE);
return 0;
}
@@ -1026,6 +1061,7 @@ static DEVICE_ATTR_RO(plug_type);
static struct attribute *typec_cable_attrs[] = {
&dev_attr_type.attr,
&dev_attr_plug_type.attr,
+ &dev_attr_usb_power_delivery_revision.attr,
NULL
};
ATTRIBUTE_GROUPS(typec_cable);
@@ -1128,6 +1164,7 @@ struct typec_cable *typec_register_cable(struct typec_port *port,
cable->type = desc->type;
cable->active = desc->active;
+ cable->pd_revision = desc->pd_revision;
if (desc->identity) {
/*
@@ -1497,11 +1534,23 @@ static ssize_t usb_power_delivery_revision_show(struct device *dev,
struct device_attribute *attr,
char *buf)
{
- struct typec_port *p = to_typec_port(dev);
+ u16 rev = 0;
+
+ if (is_typec_partner(dev)) {
+ struct typec_partner *partner = to_typec_partner(dev);
- return sprintf(buf, "%d\n", (p->cap->pd_revision >> 8) & 0xff);
+ rev = partner->pd_revision;
+ } else if (is_typec_cable(dev)) {
+ struct typec_cable *cable = to_typec_cable(dev);
+
+ rev = cable->pd_revision;
+ } else if (is_typec_port(dev)) {
+ struct typec_port *p = to_typec_port(dev);
+
+ rev = p->cap->pd_revision;
+ }
+ return sysfs_emit(buf, "%d.%d\n", (rev >> 8) & 0xff, (rev >> 4) & 0xf);
}
-static DEVICE_ATTR_RO(usb_power_delivery_revision);
static ssize_t orientation_show(struct device *dev,
struct device_attribute *attr,
diff --git a/drivers/usb/typec/mux/intel_pmc_mux.c b/drivers/usb/typec/mux/intel_pmc_mux.c
index cf37a59ce130..46a25b8db72e 100644
--- a/drivers/usb/typec/mux/intel_pmc_mux.c
+++ b/drivers/usb/typec/mux/intel_pmc_mux.c
@@ -207,10 +207,21 @@ static int
pmc_usb_mux_dp_hpd(struct pmc_usb_port *port, struct typec_displayport_data *dp)
{
u8 msg[2] = { };
+ int ret;
msg[0] = PMC_USB_DP_HPD;
msg[0] |= port->usb3_port << PMC_USB_MSG_USB3_PORT_SHIFT;
+ /* Configure HPD first if HPD,IRQ comes together */
+ if (!IOM_PORT_HPD_ASSERTED(port->iom_status) &&
+ dp->status & DP_STATUS_IRQ_HPD &&
+ dp->status & DP_STATUS_HPD_STATE) {
+ msg[1] = PMC_USB_DP_HPD_LVL;
+ ret = pmc_usb_command(port, msg, sizeof(msg));
+ if (ret)
+ return ret;
+ }
+
if (dp->status & DP_STATUS_IRQ_HPD)
msg[1] = PMC_USB_DP_HPD_IRQ;
diff --git a/drivers/usb/typec/tcpm/tcpci.c b/drivers/usb/typec/tcpm/tcpci.c
index f676abab044b..a27deb0b5f03 100644
--- a/drivers/usb/typec/tcpm/tcpci.c
+++ b/drivers/usb/typec/tcpm/tcpci.c
@@ -255,6 +255,14 @@ static int tcpci_set_polarity(struct tcpc_dev *tcpc,
TCPC_TCPC_CTRL_ORIENTATION : 0);
}
+static void tcpci_set_partner_usb_comm_capable(struct tcpc_dev *tcpc, bool capable)
+{
+ struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
+
+ if (tcpci->data->set_partner_usb_comm_capable)
+ tcpci->data->set_partner_usb_comm_capable(tcpci, tcpci->data, capable);
+}
+
static int tcpci_set_vconn(struct tcpc_dev *tcpc, bool enable)
{
struct tcpci *tcpci = tcpc_to_tcpci(tcpc);
@@ -720,6 +728,7 @@ struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data)
tcpci->tcpc.set_bist_data = tcpci_set_bist_data;
tcpci->tcpc.enable_frs = tcpci_enable_frs;
tcpci->tcpc.frs_sourcing_vbus = tcpci_frs_sourcing_vbus;
+ tcpci->tcpc.set_partner_usb_comm_capable = tcpci_set_partner_usb_comm_capable;
if (tcpci->data->auto_discharge_disconnect) {
tcpci->tcpc.enable_auto_vbus_discharge = tcpci_enable_auto_vbus_discharge;
diff --git a/drivers/usb/typec/tcpm/tcpci.h b/drivers/usb/typec/tcpm/tcpci.h
index c3c7d07d9b4e..57b6e24e0a0c 100644
--- a/drivers/usb/typec/tcpm/tcpci.h
+++ b/drivers/usb/typec/tcpm/tcpci.h
@@ -161,6 +161,10 @@ struct tcpci;
* Optional; Enables TCPC to autonously discharge vbus on disconnect.
* @vbus_vsafe0v:
* optional; Set when TCPC can detect whether vbus is at VSAFE0V.
+ * @set_partner_usb_comm_capable:
+ * Optional; The USB Communications Capable bit indicates if port
+ * partner is capable of communication over the USB data lines
+ * (e.g. D+/- or SS Tx/Rx). Called to notify the status of the bit.
*/
struct tcpci_data {
struct regmap *regmap;
@@ -175,6 +179,8 @@ struct tcpci_data {
enum typec_cc_status cc);
int (*set_vbus)(struct tcpci *tcpci, struct tcpci_data *data, bool source, bool sink);
void (*frs_sourcing_vbus)(struct tcpci *tcpci, struct tcpci_data *data);
+ void (*set_partner_usb_comm_capable)(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable);
};
struct tcpci *tcpci_register_port(struct device *dev, struct tcpci_data *data);
diff --git a/drivers/usb/typec/tcpm/tcpci_maxim.c b/drivers/usb/typec/tcpm/tcpci_maxim.c
index 319266329b42..041a1c393594 100644
--- a/drivers/usb/typec/tcpm/tcpci_maxim.c
+++ b/drivers/usb/typec/tcpm/tcpci_maxim.c
@@ -5,13 +5,10 @@
* MAXIM TCPCI based TCPC driver
*/
-#include <linux/gpio.h>
-#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/i2c.h>
#include <linux/kernel.h>
#include <linux/module.h>
-#include <linux/of_gpio.h>
#include <linux/regmap.h>
#include <linux/usb/pd.h>
#include <linux/usb/tcpm.h>
@@ -22,6 +19,9 @@
#define PD_ACTIVITY_TIMEOUT_MS 10000
#define TCPC_VENDOR_ALERT 0x80
+#define TCPC_VENDOR_USBSW_CTRL 0x93
+#define TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA 0x9
+#define TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA 0
#define TCPC_RECEIVE_BUFFER_COUNT_OFFSET 0
#define TCPC_RECEIVE_BUFFER_FRAME_TYPE_OFFSET 1
@@ -158,7 +158,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
*/
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, 2);
if (ret < 0) {
- dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d", ret);
+ dev_err(chip->dev, "TCPC_RX_BYTE_CNT read failed ret:%d\n", ret);
return;
}
@@ -167,13 +167,13 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
if (count == 0 || frame_type != TCPC_RX_BUF_FRAME_TYPE_SOP) {
max_tcpci_write16(chip, TCPC_ALERT, TCPC_ALERT_RX_STATUS);
- dev_err(chip->dev, "%s", count == 0 ? "error: count is 0" :
+ dev_err(chip->dev, "%s\n", count == 0 ? "error: count is 0" :
"error frame_type is not SOP");
return;
}
if (count > sizeof(struct pd_message) || count + 1 > TCPC_RECEIVE_BUFFER_LEN) {
- dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d", count);
+ dev_err(chip->dev, "Invalid TCPC_RX_BYTE_CNT %d\n", count);
return;
}
@@ -184,7 +184,7 @@ static void process_rx(struct max_tcpci_chip *chip, u16 status)
count += 1;
ret = regmap_raw_read(chip->data.regmap, TCPC_RX_BYTE_CNT, rx_buf, count);
if (ret < 0) {
- dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d", ret);
+ dev_err(chip->dev, "Error: TCPC_RX_BYTE_CNT read failed: %d\n", ret);
return;
}
@@ -277,6 +277,21 @@ static void process_tx(struct max_tcpci_chip *chip, u16 status)
max_tcpci_init_regs(chip);
}
+/* Enable USB switches when partner is USB communications capable */
+static void max_tcpci_set_partner_usb_comm_capable(struct tcpci *tcpci, struct tcpci_data *data,
+ bool capable)
+{
+ struct max_tcpci_chip *chip = tdata_to_max_tcpci(data);
+ int ret;
+
+ ret = max_tcpci_write8(chip, TCPC_VENDOR_USBSW_CTRL, capable ?
+ TCPC_VENDOR_USBSW_CTRL_ENABLE_USB_DATA :
+ TCPC_VENDOR_USBSW_CTRL_DISABLE_USB_DATA);
+
+ if (ret < 0)
+ dev_err(chip->dev, "Failed to enable USB switches");
+}
+
static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
{
u16 mask;
@@ -317,7 +332,7 @@ static irqreturn_t _max_tcpci_irq(struct max_tcpci_chip *chip, u16 status)
return ret;
if (reg_status & TCPC_SINK_FAST_ROLE_SWAP) {
- dev_info(chip->dev, "FRS Signal");
+ dev_info(chip->dev, "FRS Signal\n");
tcpm_sink_frs(chip->port);
}
}
@@ -456,12 +471,12 @@ static int max_tcpci_probe(struct i2c_client *client, const struct i2c_device_id
chip->data.frs_sourcing_vbus = max_tcpci_frs_sourcing_vbus;
chip->data.auto_discharge_disconnect = true;
chip->data.vbus_vsafe0v = true;
+ chip->data.set_partner_usb_comm_capable = max_tcpci_set_partner_usb_comm_capable;
max_tcpci_init_regs(chip);
chip->tcpci = tcpci_register_port(chip->dev, &chip->data);
if (IS_ERR(chip->tcpci)) {
- dev_err(&client->dev, "TCPCI port registration failed");
- ret = PTR_ERR(chip->tcpci);
+ dev_err(&client->dev, "TCPCI port registration failed\n");
return PTR_ERR(chip->tcpci);
}
chip->port = tcpci_get_tcpm_port(chip->tcpci);
diff --git a/drivers/usb/typec/tcpm/tcpm.c b/drivers/usb/typec/tcpm/tcpm.c
index 22a85b396f69..8558ab006885 100644
--- a/drivers/usb/typec/tcpm/tcpm.c
+++ b/drivers/usb/typec/tcpm/tcpm.c
@@ -76,6 +76,8 @@
S(SNK_HARD_RESET_SINK_ON), \
\
S(SOFT_RESET), \
+ S(SRC_SOFT_RESET_WAIT_SNK_TX), \
+ S(SNK_SOFT_RESET), \
S(SOFT_RESET_SEND), \
\
S(DR_SWAP_ACCEPT), \
@@ -139,7 +141,46 @@
\
S(ERROR_RECOVERY), \
S(PORT_RESET), \
- S(PORT_RESET_WAIT_OFF)
+ S(PORT_RESET_WAIT_OFF), \
+ \
+ S(AMS_START), \
+ S(CHUNK_NOT_SUPP)
+
+#define FOREACH_AMS(S) \
+ S(NONE_AMS), \
+ S(POWER_NEGOTIATION), \
+ S(GOTOMIN), \
+ S(SOFT_RESET_AMS), \
+ S(HARD_RESET), \
+ S(CABLE_RESET), \
+ S(GET_SOURCE_CAPABILITIES), \
+ S(GET_SINK_CAPABILITIES), \
+ S(POWER_ROLE_SWAP), \
+ S(FAST_ROLE_SWAP), \
+ S(DATA_ROLE_SWAP), \
+ S(VCONN_SWAP), \
+ S(SOURCE_ALERT), \
+ S(GETTING_SOURCE_EXTENDED_CAPABILITIES),\
+ S(GETTING_SOURCE_SINK_STATUS), \
+ S(GETTING_BATTERY_CAPABILITIES), \
+ S(GETTING_BATTERY_STATUS), \
+ S(GETTING_MANUFACTURER_INFORMATION), \
+ S(SECURITY), \
+ S(FIRMWARE_UPDATE), \
+ S(DISCOVER_IDENTITY), \
+ S(SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY), \
+ S(DISCOVER_SVIDS), \
+ S(DISCOVER_MODES), \
+ S(DFP_TO_UFP_ENTER_MODE), \
+ S(DFP_TO_UFP_EXIT_MODE), \
+ S(DFP_TO_CABLE_PLUG_ENTER_MODE), \
+ S(DFP_TO_CABLE_PLUG_EXIT_MODE), \
+ S(ATTENTION), \
+ S(BIST), \
+ S(UNSTRUCTURED_VDMS), \
+ S(STRUCTURED_VDMS), \
+ S(COUNTRY_INFO), \
+ S(COUNTRY_CODES)
#define GENERATE_ENUM(e) e
#define GENERATE_STRING(s) #s
@@ -152,6 +193,14 @@ static const char * const tcpm_states[] = {
FOREACH_STATE(GENERATE_STRING)
};
+enum tcpm_ams {
+ FOREACH_AMS(GENERATE_ENUM)
+};
+
+static const char * const tcpm_ams_str[] = {
+ FOREACH_AMS(GENERATE_STRING)
+};
+
enum vdm_states {
VDM_STATE_ERR_BUSY = -3,
VDM_STATE_ERR_SEND = -2,
@@ -161,6 +210,7 @@ enum vdm_states {
VDM_STATE_READY = 1,
VDM_STATE_BUSY = 2,
VDM_STATE_WAIT_RSP_BUSY = 3,
+ VDM_STATE_SEND_MESSAGE = 4,
};
enum pd_msg_request {
@@ -302,6 +352,7 @@ struct tcpm_port {
struct hrtimer enable_frs_timer;
struct kthread_work enable_frs;
bool state_machine_running;
+ bool vdm_sm_running;
struct completion tx_complete;
enum tcpm_transmit_status tx_status;
@@ -381,6 +432,12 @@ struct tcpm_port {
/* Sink caps have been queried */
bool sink_cap_done;
+ /* Collision Avoidance and Atomic Message Sequence */
+ enum tcpm_state upcoming_state;
+ enum tcpm_ams ams;
+ enum tcpm_ams next_ams;
+ bool in_ams;
+
#ifdef CONFIG_DEBUG_FS
struct dentry *dentry;
struct mutex logbuffer_lock; /* log buffer access lock */
@@ -396,6 +453,12 @@ struct pd_rx_event {
struct pd_message msg;
};
+static const char * const pd_rev[] = {
+ [PD_REV10] = "rev1",
+ [PD_REV20] = "rev2",
+ [PD_REV30] = "rev3",
+};
+
#define tcpm_cc_is_sink(cc) \
((cc) == TYPEC_CC_RP_DEF || (cc) == TYPEC_CC_RP_1_5 || \
(cc) == TYPEC_CC_RP_3_0)
@@ -440,6 +503,10 @@ struct pd_rx_event {
((port)->typec_caps.data == TYPEC_PORT_DFP ? \
TYPEC_HOST : TYPEC_DEVICE)
+#define tcpm_sink_tx_ok(port) \
+ (tcpm_port_is_sink(port) && \
+ ((port)->cc1 == TYPEC_CC_RP_3_0 || (port)->cc2 == TYPEC_CC_RP_3_0))
+
static enum tcpm_state tcpm_default_state(struct tcpm_port *port)
{
if (port->port_type == TYPEC_PORT_DRP) {
@@ -666,6 +733,67 @@ static void tcpm_debugfs_exit(const struct tcpm_port *port) { }
#endif
+static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
+{
+ tcpm_log(port, "cc:=%d", cc);
+ port->cc_req = cc;
+ port->tcpc->set_cc(port->tcpc, cc);
+}
+
+/*
+ * Determine RP value to set based on maximum current supported
+ * by a port if configured as source.
+ * Returns CC value to report to link partner.
+ */
+static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
+{
+ const u32 *src_pdo = port->src_pdo;
+ int nr_pdo = port->nr_src_pdo;
+ int i;
+
+ /*
+ * Search for first entry with matching voltage.
+ * It should report the maximum supported current.
+ */
+ for (i = 0; i < nr_pdo; i++) {
+ const u32 pdo = src_pdo[i];
+
+ if (pdo_type(pdo) == PDO_TYPE_FIXED &&
+ pdo_fixed_voltage(pdo) == 5000) {
+ unsigned int curr = pdo_max_current(pdo);
+
+ if (curr >= 3000)
+ return TYPEC_CC_RP_3_0;
+ else if (curr >= 1500)
+ return TYPEC_CC_RP_1_5;
+ return TYPEC_CC_RP_DEF;
+ }
+ }
+
+ return TYPEC_CC_RP_DEF;
+}
+
+static int tcpm_ams_finish(struct tcpm_port *port)
+{
+ int ret = 0;
+
+ tcpm_log(port, "AMS %s finished", tcpm_ams_str[port->ams]);
+
+ if (port->pd_capable && port->pwr_role == TYPEC_SOURCE) {
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_OK);
+ else
+ tcpm_set_cc(port, SINK_TX_NG);
+ } else if (port->pwr_role == TYPEC_SOURCE) {
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ }
+
+ port->in_ams = false;
+ port->ams = NONE_AMS;
+
+ return ret;
+}
+
static int tcpm_pd_transmit(struct tcpm_port *port,
enum tcpm_transmit_type type,
const struct pd_message *msg)
@@ -693,13 +821,30 @@ static int tcpm_pd_transmit(struct tcpm_port *port,
switch (port->tx_status) {
case TCPC_TX_SUCCESS:
port->message_id = (port->message_id + 1) & PD_HEADER_ID_MASK;
- return 0;
+ /*
+ * USB PD rev 2.0, 8.3.2.2.1:
+ * USB PD rev 3.0, 8.3.2.1.3:
+ * "... Note that every AMS is Interruptible until the first
+ * Message in the sequence has been successfully sent (GoodCRC
+ * Message received)."
+ */
+ if (port->ams != NONE_AMS)
+ port->in_ams = true;
+ break;
case TCPC_TX_DISCARDED:
- return -EAGAIN;
+ ret = -EAGAIN;
+ break;
case TCPC_TX_FAILED:
default:
- return -EIO;
+ ret = -EIO;
+ break;
}
+
+ /* Some AMS don't expect responses. Finish them here. */
+ if (port->ams == ATTENTION || port->ams == SOURCE_ALERT)
+ tcpm_ams_finish(port);
+
+ return ret;
}
void tcpm_pd_transmit_complete(struct tcpm_port *port,
@@ -804,39 +949,6 @@ static int tcpm_set_current_limit(struct tcpm_port *port, u32 max_ma, u32 mv)
return ret;
}
-/*
- * Determine RP value to set based on maximum current supported
- * by a port if configured as source.
- * Returns CC value to report to link partner.
- */
-static enum typec_cc_status tcpm_rp_cc(struct tcpm_port *port)
-{
- const u32 *src_pdo = port->src_pdo;
- int nr_pdo = port->nr_src_pdo;
- int i;
-
- /*
- * Search for first entry with matching voltage.
- * It should report the maximum supported current.
- */
- for (i = 0; i < nr_pdo; i++) {
- const u32 pdo = src_pdo[i];
-
- if (pdo_type(pdo) == PDO_TYPE_FIXED &&
- pdo_fixed_voltage(pdo) == 5000) {
- unsigned int curr = pdo_max_current(pdo);
-
- if (curr >= 3000)
- return TYPEC_CC_RP_3_0;
- else if (curr >= 1500)
- return TYPEC_CC_RP_1_5;
- return TYPEC_CC_RP_DEF;
- }
- }
-
- return TYPEC_CC_RP_DEF;
-}
-
static int tcpm_set_attached_state(struct tcpm_port *port, bool attached)
{
return port->tcpc->set_roles(port->tcpc, attached, port->pwr_role,
@@ -911,13 +1023,47 @@ static int tcpm_set_pwr_role(struct tcpm_port *port, enum typec_role role)
return 0;
}
+/*
+ * Transform the PDO to be compliant to PD rev2.0.
+ * Return 0 if the PDO type is not defined in PD rev2.0.
+ * Otherwise, return the converted PDO.
+ */
+static u32 tcpm_forge_legacy_pdo(struct tcpm_port *port, u32 pdo, enum typec_role role)
+{
+ switch (pdo_type(pdo)) {
+ case PDO_TYPE_FIXED:
+ if (role == TYPEC_SINK)
+ return pdo & ~PDO_FIXED_FRS_CURR_MASK;
+ else
+ return pdo & ~PDO_FIXED_UNCHUNK_EXT;
+ case PDO_TYPE_VAR:
+ case PDO_TYPE_BATT:
+ return pdo;
+ case PDO_TYPE_APDO:
+ default:
+ return 0;
+ }
+}
+
static int tcpm_pd_send_source_caps(struct tcpm_port *port)
{
struct pd_message msg;
- int i;
+ u32 pdo;
+ unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
- if (!port->nr_src_pdo) {
+
+ for (i = 0; i < port->nr_src_pdo; i++) {
+ if (port->negotiated_rev >= PD_REV30) {
+ msg.payload[nr_pdo++] = cpu_to_le32(port->src_pdo[i]);
+ } else {
+ pdo = tcpm_forge_legacy_pdo(port, port->src_pdo[i], TYPEC_SOURCE);
+ if (pdo)
+ msg.payload[nr_pdo++] = cpu_to_le32(pdo);
+ }
+ }
+
+ if (!nr_pdo) {
/* No source capabilities defined, sink only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
@@ -930,10 +1076,8 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port)
port->data_role,
port->negotiated_rev,
port->message_id,
- port->nr_src_pdo);
+ nr_pdo);
}
- for (i = 0; i < port->nr_src_pdo; i++)
- msg.payload[i] = cpu_to_le32(port->src_pdo[i]);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
@@ -941,10 +1085,22 @@ static int tcpm_pd_send_source_caps(struct tcpm_port *port)
static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
{
struct pd_message msg;
- int i;
+ u32 pdo;
+ unsigned int i, nr_pdo = 0;
memset(&msg, 0, sizeof(msg));
- if (!port->nr_snk_pdo) {
+
+ for (i = 0; i < port->nr_snk_pdo; i++) {
+ if (port->negotiated_rev >= PD_REV30) {
+ msg.payload[nr_pdo++] = cpu_to_le32(port->snk_pdo[i]);
+ } else {
+ pdo = tcpm_forge_legacy_pdo(port, port->snk_pdo[i], TYPEC_SINK);
+ if (pdo)
+ msg.payload[nr_pdo++] = cpu_to_le32(pdo);
+ }
+ }
+
+ if (!nr_pdo) {
/* No sink capabilities defined, source only */
msg.header = PD_HEADER_LE(PD_CTRL_REJECT,
port->pwr_role,
@@ -957,10 +1113,8 @@ static int tcpm_pd_send_sink_caps(struct tcpm_port *port)
port->data_role,
port->negotiated_rev,
port->message_id,
- port->nr_snk_pdo);
+ nr_pdo);
}
- for (i = 0; i < port->nr_snk_pdo; i++)
- msg.payload[i] = cpu_to_le32(port->snk_pdo[i]);
return tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
}
@@ -1000,16 +1154,17 @@ static void tcpm_set_state(struct tcpm_port *port, enum tcpm_state state,
unsigned int delay_ms)
{
if (delay_ms) {
- tcpm_log(port, "pending state change %s -> %s @ %u ms",
- tcpm_states[port->state], tcpm_states[state],
- delay_ms);
+ tcpm_log(port, "pending state change %s -> %s @ %u ms [%s %s]",
+ tcpm_states[port->state], tcpm_states[state], delay_ms,
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = state;
mod_tcpm_delayed_work(port, delay_ms);
port->delayed_runtime = ktime_add(ktime_get(), ms_to_ktime(delay_ms));
port->delay_ms = delay_ms;
} else {
- tcpm_log(port, "state change %s -> %s",
- tcpm_states[port->state], tcpm_states[state]);
+ tcpm_log(port, "state change %s -> %s [%s %s]",
+ tcpm_states[port->state], tcpm_states[state],
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
port->delayed_state = INVALID_STATE;
port->prev_state = port->state;
port->state = state;
@@ -1031,10 +1186,11 @@ static void tcpm_set_state_cond(struct tcpm_port *port, enum tcpm_state state,
tcpm_set_state(port, state, delay_ms);
else
tcpm_log(port,
- "skipped %sstate change %s -> %s [%u ms], context state %s",
+ "skipped %sstate change %s -> %s [%u ms], context state %s [%s %s]",
delay_ms ? "delayed " : "",
tcpm_states[port->state], tcpm_states[state],
- delay_ms, tcpm_states[port->enter_state]);
+ delay_ms, tcpm_states[port->enter_state],
+ pd_rev[port->negotiated_rev], tcpm_ams_str[port->ams]);
}
static void tcpm_queue_message(struct tcpm_port *port,
@@ -1044,6 +1200,149 @@ static void tcpm_queue_message(struct tcpm_port *port,
mod_tcpm_delayed_work(port, 0);
}
+static bool tcpm_vdm_ams(struct tcpm_port *port)
+{
+ switch (port->ams) {
+ case DISCOVER_IDENTITY:
+ case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
+ case DISCOVER_SVIDS:
+ case DISCOVER_MODES:
+ case DFP_TO_UFP_ENTER_MODE:
+ case DFP_TO_UFP_EXIT_MODE:
+ case DFP_TO_CABLE_PLUG_ENTER_MODE:
+ case DFP_TO_CABLE_PLUG_EXIT_MODE:
+ case ATTENTION:
+ case UNSTRUCTURED_VDMS:
+ case STRUCTURED_VDMS:
+ break;
+ default:
+ return false;
+ }
+
+ return true;
+}
+
+static bool tcpm_ams_interruptible(struct tcpm_port *port)
+{
+ switch (port->ams) {
+ /* Interruptible AMS */
+ case NONE_AMS:
+ case SECURITY:
+ case FIRMWARE_UPDATE:
+ case DISCOVER_IDENTITY:
+ case SOURCE_STARTUP_CABLE_PLUG_DISCOVER_IDENTITY:
+ case DISCOVER_SVIDS:
+ case DISCOVER_MODES:
+ case DFP_TO_UFP_ENTER_MODE:
+ case DFP_TO_UFP_EXIT_MODE:
+ case DFP_TO_CABLE_PLUG_ENTER_MODE:
+ case DFP_TO_CABLE_PLUG_EXIT_MODE:
+ case UNSTRUCTURED_VDMS:
+ case STRUCTURED_VDMS:
+ case COUNTRY_INFO:
+ case COUNTRY_CODES:
+ break;
+ /* Non-Interruptible AMS */
+ default:
+ if (port->in_ams)
+ return false;
+ break;
+ }
+
+ return true;
+}
+
+static int tcpm_ams_start(struct tcpm_port *port, enum tcpm_ams ams)
+{
+ int ret = 0;
+
+ tcpm_log(port, "AMS %s start", tcpm_ams_str[ams]);
+
+ if (!tcpm_ams_interruptible(port) &&
+ !(ams == HARD_RESET || ams == SOFT_RESET_AMS)) {
+ port->upcoming_state = INVALID_STATE;
+ tcpm_log(port, "AMS %s not interruptible, aborting",
+ tcpm_ams_str[port->ams]);
+ return -EAGAIN;
+ }
+
+ if (port->pwr_role == TYPEC_SOURCE) {
+ enum typec_cc_status cc_req = port->cc_req;
+
+ port->ams = ams;
+
+ if (ams == HARD_RESET) {
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
+ tcpm_set_state(port, HARD_RESET_START, 0);
+ return ret;
+ } else if (ams == SOFT_RESET_AMS) {
+ if (!port->explicit_contract)
+ tcpm_set_cc(port, tcpm_rp_cc(port));
+ tcpm_set_state(port, SOFT_RESET_SEND, 0);
+ return ret;
+ } else if (tcpm_vdm_ams(port)) {
+ /* tSinkTx is enforced in vdm_run_state_machine */
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_NG);
+ return ret;
+ }
+
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_cc(port, SINK_TX_NG);
+
+ switch (port->state) {
+ case SRC_READY:
+ case SRC_STARTUP:
+ case SRC_SOFT_RESET_WAIT_SNK_TX:
+ case SOFT_RESET:
+ case SOFT_RESET_SEND:
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_state(port, AMS_START,
+ cc_req == SINK_TX_OK ?
+ PD_T_SINK_TX : 0);
+ else
+ tcpm_set_state(port, AMS_START, 0);
+ break;
+ default:
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_set_state(port, SRC_READY,
+ cc_req == SINK_TX_OK ?
+ PD_T_SINK_TX : 0);
+ else
+ tcpm_set_state(port, SRC_READY, 0);
+ break;
+ }
+ } else {
+ if (port->negotiated_rev >= PD_REV30 &&
+ !tcpm_sink_tx_ok(port) &&
+ ams != SOFT_RESET_AMS &&
+ ams != HARD_RESET) {
+ port->upcoming_state = INVALID_STATE;
+ tcpm_log(port, "Sink TX No Go");
+ return -EAGAIN;
+ }
+
+ port->ams = ams;
+
+ if (ams == HARD_RESET) {
+ tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
+ tcpm_set_state(port, HARD_RESET_START, 0);
+ return ret;
+ } else if (tcpm_vdm_ams(port)) {
+ return ret;
+ }
+
+ if (port->state == SNK_READY ||
+ port->state == SNK_SOFT_RESET)
+ tcpm_set_state(port, AMS_START, 0);
+ else
+ tcpm_set_state(port, SNK_READY, 0);
+ }
+
+ return ret;
+}
+
/*
* VDM/VDO handling functions
*/
@@ -1198,6 +1497,9 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
case CMDT_INIT:
switch (cmd) {
case CMD_DISCOVER_IDENT:
+ if (PD_VDO_VID(p[0]) != USB_SID_PD)
+ break;
+
/* 6.4.4.3.1: Only respond as UFP (device) */
if (port->data_role == TYPEC_DEVICE &&
port->nr_snk_vdo) {
@@ -1236,6 +1538,8 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
if (IS_ERR_OR_NULL(port->partner))
break;
+ tcpm_ams_finish(port);
+
switch (cmd) {
case CMD_DISCOVER_IDENT:
/* 6.4.4.3.1 */
@@ -1265,6 +1569,7 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
rlen = 1;
} else {
tcpm_register_partner_altmodes(port);
+ port->vdm_sm_running = false;
}
break;
case CMD_ENTER_MODE:
@@ -1281,21 +1586,39 @@ static int tcpm_pd_svdm(struct tcpm_port *port, struct typec_altmode *adev,
return 0;
}
break;
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ break;
default:
+ /* Unrecognized SVDM */
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
break;
}
break;
case CMDT_RSP_NAK:
+ tcpm_ams_finish(port);
switch (cmd) {
+ case CMD_DISCOVER_IDENT:
+ case CMD_DISCOVER_SVID:
+ case CMD_DISCOVER_MODES:
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ break;
case CMD_ENTER_MODE:
/* Back to USB Operation */
*adev_action = ADEV_NOTIFY_USB_AND_QUEUE_VDM;
return 0;
default:
+ /* Unrecognized SVDM */
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
break;
}
+ port->vdm_sm_running = false;
break;
default:
+ response[0] = p[0] | VDO_CMDT(CMDT_RSP_NAK);
+ rlen = 1;
+ port->vdm_sm_running = false;
break;
}
@@ -1331,8 +1654,12 @@ static void tcpm_handle_vdm_request(struct tcpm_port *port,
port->vdm_state = VDM_STATE_DONE;
}
- if (PD_VDO_SVDM(p[0]))
+ if (PD_VDO_SVDM(p[0])) {
rlen = tcpm_pd_svdm(port, adev, p, cnt, response, &adev_action);
+ } else {
+ if (port->negotiated_rev >= PD_REV30)
+ tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ }
/*
* We are done with any state stored in the port struct now, except
@@ -1435,7 +1762,8 @@ static unsigned int vdm_ready_timeout(u32 vdm_hdr)
static void vdm_run_state_machine(struct tcpm_port *port)
{
struct pd_message msg;
- int i, res;
+ int i, res = 0;
+ u32 vdo_hdr = port->vdo_data[0];
switch (port->vdm_state) {
case VDM_STATE_READY:
@@ -1452,26 +1780,49 @@ static void vdm_run_state_machine(struct tcpm_port *port)
if (port->state != SRC_READY && port->state != SNK_READY)
break;
- /* Prepare and send VDM */
- memset(&msg, 0, sizeof(msg));
- msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
- port->pwr_role,
- port->data_role,
- port->negotiated_rev,
- port->message_id, port->vdo_count);
- for (i = 0; i < port->vdo_count; i++)
- msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
- res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
- if (res < 0) {
- port->vdm_state = VDM_STATE_ERR_SEND;
- } else {
- unsigned long timeout;
+ /* TODO: AMS operation for Unstructured VDM */
+ if (PD_VDO_SVDM(vdo_hdr) && PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) {
+ switch (PD_VDO_CMD(vdo_hdr)) {
+ case CMD_DISCOVER_IDENT:
+ res = tcpm_ams_start(port, DISCOVER_IDENTITY);
+ if (res == 0)
+ port->send_discover = false;
+ break;
+ case CMD_DISCOVER_SVID:
+ res = tcpm_ams_start(port, DISCOVER_SVIDS);
+ break;
+ case CMD_DISCOVER_MODES:
+ res = tcpm_ams_start(port, DISCOVER_MODES);
+ break;
+ case CMD_ENTER_MODE:
+ res = tcpm_ams_start(port, DFP_TO_UFP_ENTER_MODE);
+ break;
+ case CMD_EXIT_MODE:
+ res = tcpm_ams_start(port, DFP_TO_UFP_EXIT_MODE);
+ break;
+ case CMD_ATTENTION:
+ res = tcpm_ams_start(port, ATTENTION);
+ break;
+ case VDO_CMD_VENDOR(0) ... VDO_CMD_VENDOR(15):
+ res = tcpm_ams_start(port, STRUCTURED_VDMS);
+ break;
+ default:
+ res = -EOPNOTSUPP;
+ break;
+ }
- port->vdm_retries = 0;
- port->vdm_state = VDM_STATE_BUSY;
- timeout = vdm_ready_timeout(port->vdo_data[0]);
- mod_vdm_delayed_work(port, timeout);
+ if (res < 0) {
+ port->vdm_sm_running = false;
+ return;
+ }
}
+
+ port->vdm_state = VDM_STATE_SEND_MESSAGE;
+ mod_vdm_delayed_work(port, (port->negotiated_rev >= PD_REV30 &&
+ port->pwr_role == TYPEC_SOURCE &&
+ PD_VDO_SVDM(vdo_hdr) &&
+ PD_VDO_CMDT(vdo_hdr) == CMDT_INIT) ?
+ PD_T_SINK_TX : 0);
break;
case VDM_STATE_WAIT_RSP_BUSY:
port->vdo_data[0] = port->vdo_retry;
@@ -1480,6 +1831,8 @@ static void vdm_run_state_machine(struct tcpm_port *port)
break;
case VDM_STATE_BUSY:
port->vdm_state = VDM_STATE_ERR_TMOUT;
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
break;
case VDM_STATE_ERR_SEND:
/*
@@ -1492,6 +1845,29 @@ static void vdm_run_state_machine(struct tcpm_port *port)
tcpm_log(port, "VDM Tx error, retry");
port->vdm_retries++;
port->vdm_state = VDM_STATE_READY;
+ tcpm_ams_finish(port);
+ }
+ break;
+ case VDM_STATE_SEND_MESSAGE:
+ /* Prepare and send VDM */
+ memset(&msg, 0, sizeof(msg));
+ msg.header = PD_HEADER_LE(PD_DATA_VENDOR_DEF,
+ port->pwr_role,
+ port->data_role,
+ port->negotiated_rev,
+ port->message_id, port->vdo_count);
+ for (i = 0; i < port->vdo_count; i++)
+ msg.payload[i] = cpu_to_le32(port->vdo_data[i]);
+ res = tcpm_pd_transmit(port, TCPC_TX_SOP, &msg);
+ if (res < 0) {
+ port->vdm_state = VDM_STATE_ERR_SEND;
+ } else {
+ unsigned long timeout;
+
+ port->vdm_retries = 0;
+ port->vdm_state = VDM_STATE_BUSY;
+ timeout = vdm_ready_timeout(vdo_hdr);
+ mod_vdm_delayed_work(port, timeout);
}
break;
default:
@@ -1514,7 +1890,11 @@ static void vdm_state_machine_work(struct kthread_work *work)
prev_state = port->vdm_state;
vdm_run_state_machine(port);
} while (port->vdm_state != prev_state &&
- port->vdm_state != VDM_STATE_BUSY);
+ port->vdm_state != VDM_STATE_BUSY &&
+ port->vdm_state != VDM_STATE_SEND_MESSAGE);
+
+ if (port->vdm_state == VDM_STATE_ERR_TMOUT)
+ port->vdm_sm_running = false;
mutex_unlock(&port->lock);
}
@@ -1736,6 +2116,71 @@ static int tcpm_set_auto_vbus_discharge_threshold(struct tcpm_port *port,
return ret;
}
+static void tcpm_pd_handle_state(struct tcpm_port *port,
+ enum tcpm_state state,
+ enum tcpm_ams ams,
+ unsigned int delay_ms)
+{
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ port->ams = ams;
+ tcpm_set_state(port, state, delay_ms);
+ break;
+ /* 8.3.3.4.1.1 and 6.8.1 power transitioning */
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ case SRC_TRANSITION_SUPPLY:
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ break;
+ default:
+ if (!tcpm_ams_interruptible(port)) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ 0);
+ } else {
+ /* process the Message 6.8.1 */
+ port->upcoming_state = state;
+ port->next_ams = ams;
+ tcpm_set_state(port, ready_state(port), delay_ms);
+ }
+ break;
+ }
+}
+
+static void tcpm_pd_handle_msg(struct tcpm_port *port,
+ enum pd_msg_request message,
+ enum tcpm_ams ams)
+{
+ switch (port->state) {
+ case SRC_READY:
+ case SNK_READY:
+ port->ams = ams;
+ tcpm_queue_message(port, message);
+ break;
+ /* PD 3.0 Spec 8.3.3.4.1.1 and 6.8.1 */
+ case SNK_TRANSITION_SINK:
+ case SNK_TRANSITION_SINK_VBUS:
+ case SRC_TRANSITION_SUPPLY:
+ tcpm_set_state(port, HARD_RESET_SEND, 0);
+ break;
+ default:
+ if (!tcpm_ams_interruptible(port)) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ 0);
+ } else {
+ port->next_ams = ams;
+ tcpm_set_state(port, ready_state(port), 0);
+ /* 6.8.1 process the Message */
+ tcpm_queue_message(port, message);
+ }
+ break;
+ }
+}
+
static void tcpm_pd_data_request(struct tcpm_port *port,
const struct pd_message *msg)
{
@@ -1749,9 +2194,6 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
switch (type) {
case PD_DATA_SOURCE_CAP:
- if (port->pwr_role != TYPEC_SINK)
- break;
-
for (i = 0; i < cnt; i++)
port->source_caps[i] = le32_to_cpu(msg->payload[i]);
@@ -1767,12 +2209,26 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just do nothing in that scenario.
*/
- if (rev == PD_REV10)
+ if (rev == PD_REV10) {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_ams_finish(port);
break;
+ }
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
+ if (port->pwr_role == TYPEC_SOURCE) {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_pd_handle_state(port, SRC_READY, NONE_AMS, 0);
+ /* Unexpected Source Capabilities */
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else if (port->state == SNK_WAIT_CAPABILITIES) {
/*
* This message may be received even if VBUS is not
* present. This is quite unexpected; see USB PD
@@ -1786,30 +2242,55 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
* but be prepared to keep waiting for VBUS after it was
* handled.
*/
- tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ port->ams = POWER_NEGOTIATION;
+ port->in_ams = true;
+ tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
+ } else {
+ if (port->ams == GET_SOURCE_CAPABILITIES)
+ tcpm_ams_finish(port);
+ tcpm_pd_handle_state(port, SNK_NEGOTIATE_CAPABILITIES,
+ POWER_NEGOTIATION, 0);
+ }
break;
case PD_DATA_REQUEST:
- if (port->pwr_role != TYPEC_SOURCE ||
- cnt != 1) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
-
/*
* Adjust revision in subsequent message headers, as required,
* to comply with 6.2.1.1.5 of the USB PD 3.0 spec. We don't
* support Rev 1.0 so just reject in that scenario.
*/
if (rev == PD_REV10) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
}
if (rev < PD_MAX_REV)
port->negotiated_rev = rev;
+ if (port->pwr_role != TYPEC_SOURCE || cnt != 1) {
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ break;
+ }
+
port->sink_request = le32_to_cpu(msg->payload[0]);
- tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
+
+ if (port->vdm_sm_running && port->explicit_contract) {
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_WAIT, port->ams);
+ break;
+ }
+
+ if (port->state == SRC_SEND_CAPABILITIES)
+ tcpm_set_state(port, SRC_NEGOTIATE_CAPABILITIES, 0);
+ else
+ tcpm_pd_handle_state(port, SRC_NEGOTIATE_CAPABILITIES,
+ POWER_NEGOTIATION, 0);
break;
case PD_DATA_SINK_CAP:
/* We don't do anything with this at the moment... */
@@ -1830,16 +2311,22 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
port->nr_sink_caps = cnt;
port->sink_cap_done = true;
- tcpm_set_state(port, SNK_READY, 0);
+ if (port->ams == GET_SINK_CAPABILITIES)
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
+ /* Unexpected Sink Capabilities */
+ else
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
case PD_DATA_VENDOR_DEF:
tcpm_handle_vdm_request(port, msg->payload, cnt);
break;
case PD_DATA_BIST:
- if (port->state == SRC_READY || port->state == SNK_READY) {
- port->bist_request = le32_to_cpu(msg->payload[0]);
- tcpm_set_state(port, BIST_RX, 0);
- }
+ port->bist_request = le32_to_cpu(msg->payload[0]);
+ tcpm_pd_handle_state(port, BIST_RX, BIST, 0);
break;
case PD_DATA_ALERT:
tcpm_handle_alert(port, msg->payload, cnt);
@@ -1847,10 +2334,17 @@ static void tcpm_pd_data_request(struct tcpm_port *port,
case PD_DATA_BATT_STATUS:
case PD_DATA_GET_COUNTRY_INFO:
/* Currently unsupported */
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled data message type %#x", type);
+ tcpm_pd_handle_msg(port, port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ tcpm_log(port, "Unrecognized data message type %#x", type);
break;
}
}
@@ -1875,26 +2369,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case PD_CTRL_PING:
break;
case PD_CTRL_GET_SOURCE_CAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_queue_message(port, PD_MSG_DATA_SOURCE_CAP);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_SOURCE_CAP, GET_SOURCE_CAPABILITIES);
break;
case PD_CTRL_GET_SINK_CAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_queue_message(port, PD_MSG_DATA_SINK_CAP);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
+ tcpm_pd_handle_msg(port, PD_MSG_DATA_SINK_CAP, GET_SINK_CAPABILITIES);
break;
case PD_CTRL_GOTO_MIN:
break;
@@ -1910,6 +2388,10 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
TYPEC_PWR_MODE_PD,
port->pps_data.active,
port->supply_voltage);
+ /* Set VDM running flag ASAP */
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
} else {
/*
@@ -1933,6 +2415,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, FR_SWAP_SNK_SRC_NEW_SINK_READY, 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
@@ -1942,10 +2429,14 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
switch (port->state) {
case SNK_NEGOTIATE_CAPABILITIES:
/* USB PD specification, Figure 8-43 */
- if (port->explicit_contract)
+ if (port->explicit_contract) {
next_state = SNK_READY;
- else
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+ } else {
next_state = SNK_WAIT_CAPABILITIES;
+ }
tcpm_set_state(port, next_state, 0);
break;
case SNK_NEGOTIATE_PPS_CAPABILITIES:
@@ -1954,6 +2445,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
port->pps_data.op_curr = port->current_limit;
port->pps_status = (type == PD_CTRL_WAIT ?
-EAGAIN : -EOPNOTSUPP);
+
+ if (port->data_role == TYPEC_HOST &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+
tcpm_set_state(port, SNK_READY, 0);
break;
case DR_SWAP_SEND:
@@ -1979,6 +2475,11 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, ready_state(port), 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
@@ -1995,15 +2496,20 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, SNK_TRANSITION_SINK, 0);
break;
case SOFT_RESET_SEND:
- port->message_id = 0;
- port->rx_msgid = -1;
- if (port->pwr_role == TYPEC_SOURCE)
- next_state = SRC_SEND_CAPABILITIES;
- else
- next_state = SNK_WAIT_CAPABILITIES;
- tcpm_set_state(port, next_state, 0);
+ if (port->ams == SOFT_RESET_AMS)
+ tcpm_ams_finish(port);
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
+ } else {
+ tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ }
break;
case DR_SWAP_SEND:
+ if (port->data_role == TYPEC_DEVICE &&
+ port->send_discover)
+ port->vdm_sm_running = true;
+
tcpm_set_state(port, DR_SWAP_CHANGE_DR, 0);
break;
case PR_SWAP_SEND:
@@ -2016,57 +2522,62 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
tcpm_set_state(port, FR_SWAP_SNK_SRC_TRANSITION_TO_OFF, 0);
break;
default:
+ tcpm_pd_handle_state(port,
+ port->pwr_role == TYPEC_SOURCE ?
+ SRC_SOFT_RESET_WAIT_SNK_TX :
+ SNK_SOFT_RESET,
+ NONE_AMS, 0);
break;
}
break;
case PD_CTRL_SOFT_RESET:
+ port->ams = SOFT_RESET_AMS;
tcpm_set_state(port, SOFT_RESET, 0);
break;
case PD_CTRL_DR_SWAP:
- if (port->typec_caps.data != TYPEC_PORT_DRD) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
/*
* XXX
* 6.3.9: If an alternate mode is active, a request to swap
* alternate modes shall trigger a port reset.
*/
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, DR_SWAP_ACCEPT, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
+ if (port->typec_caps.data != TYPEC_PORT_DRD) {
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+ if (port->vdm_sm_running) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+
+ tcpm_pd_handle_state(port, DR_SWAP_ACCEPT, DATA_ROLE_SWAP, 0);
}
break;
case PD_CTRL_PR_SWAP:
if (port->port_type != TYPEC_PORT_DRP) {
- tcpm_queue_message(port, PD_MSG_CTRL_REJECT);
- break;
- }
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, PR_SWAP_ACCEPT, 0);
- break;
- default:
- tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
- break;
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ } else {
+ if (port->vdm_sm_running) {
+ tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
+ break;
+ }
+
+ tcpm_pd_handle_state(port, PR_SWAP_ACCEPT, POWER_ROLE_SWAP, 0);
}
break;
case PD_CTRL_VCONN_SWAP:
- switch (port->state) {
- case SRC_READY:
- case SNK_READY:
- tcpm_set_state(port, VCONN_SWAP_ACCEPT, 0);
- break;
- default:
+ if (port->vdm_sm_running) {
tcpm_queue_message(port, PD_MSG_CTRL_WAIT);
break;
}
+
+ tcpm_pd_handle_state(port, VCONN_SWAP_ACCEPT, VCONN_SWAP, 0);
break;
case PD_CTRL_GET_SOURCE_CAP_EXT:
case PD_CTRL_GET_STATUS:
@@ -2074,10 +2585,19 @@ static void tcpm_pd_ctrl_request(struct tcpm_port *port,
case PD_CTRL_GET_PPS_STATUS:
case PD_CTRL_GET_COUNTRY_CODES:
/* Currently not supported */
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled ctrl message type %#x", type);
+ tcpm_pd_handle_msg(port,
+ port->negotiated_rev < PD_REV30 ?
+ PD_MSG_CTRL_REJECT :
+ PD_MSG_CTRL_NOT_SUPP,
+ NONE_AMS);
+ tcpm_log(port, "Unrecognized ctrl message type %#x", type);
break;
}
}
@@ -2089,11 +2609,13 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
unsigned int data_size = pd_ext_header_data_size_le(msg->ext_msg.header);
if (!(msg->ext_msg.header & PD_EXT_HDR_CHUNKED)) {
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
tcpm_log(port, "Unchunked extended messages unsupported");
return;
}
if (data_size > PD_EXT_MAX_CHUNK_DATA) {
+ tcpm_pd_handle_state(port, CHUNK_NOT_SUPP, NONE_AMS, PD_T_CHUNK_NOT_SUPP);
tcpm_log(port, "Chunk handling not yet supported");
return;
}
@@ -2106,16 +2628,18 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
*/
if (msg->ext_msg.data[USB_PD_EXT_SDB_EVENT_FLAGS] &
USB_PD_EXT_SDB_PPS_EVENTS)
- tcpm_set_state(port, GET_PPS_STATUS_SEND, 0);
+ tcpm_pd_handle_state(port, GET_PPS_STATUS_SEND,
+ GETTING_SOURCE_SINK_STATUS, 0);
+
else
- tcpm_set_state(port, ready_state(port), 0);
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
break;
case PD_EXT_PPS_STATUS:
/*
* For now the PPS status message is used to clear events
* and nothing more.
*/
- tcpm_set_state(port, ready_state(port), 0);
+ tcpm_pd_handle_state(port, ready_state(port), NONE_AMS, 0);
break;
case PD_EXT_SOURCE_CAP_EXT:
case PD_EXT_GET_BATT_CAP:
@@ -2129,10 +2653,11 @@ static void tcpm_pd_ext_msg_request(struct tcpm_port *port,
case PD_EXT_FW_UPDATE_RESPONSE:
case PD_EXT_COUNTRY_INFO:
case PD_EXT_COUNTRY_CODES:
- tcpm_queue_message(port, PD_MSG_CTRL_NOT_SUPP);
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
break;
default:
- tcpm_log(port, "Unhandled extended message type %#x", type);
+ tcpm_pd_handle_msg(port, PD_MSG_CTRL_NOT_SUPP, NONE_AMS);
+ tcpm_log(port, "Unrecognized extended message type %#x", type);
break;
}
}
@@ -2245,7 +2770,12 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
break;
case PD_MSG_DATA_SINK_CAP:
- tcpm_pd_send_sink_caps(port);
+ ret = tcpm_pd_send_sink_caps(port);
+ if (ret < 0) {
+ tcpm_log(port, "Unable to send snk caps, ret=%d", ret);
+ tcpm_set_state(port, SNK_SOFT_RESET, 0);
+ }
+ tcpm_ams_finish(port);
break;
case PD_MSG_DATA_SOURCE_CAP:
ret = tcpm_pd_send_source_caps(port);
@@ -2255,8 +2785,11 @@ static bool tcpm_send_queued_message(struct tcpm_port *port)
ret);
tcpm_set_state(port, SOFT_RESET_SEND, 0);
} else if (port->pwr_role == TYPEC_SOURCE) {
+ tcpm_ams_finish(port);
tcpm_set_state(port, HARD_RESET_SEND,
PD_T_SENDER_RESPONSE);
+ } else {
+ tcpm_ams_finish(port);
}
break;
default:
@@ -2776,13 +3309,6 @@ static bool tcpm_start_toggling(struct tcpm_port *port, enum typec_cc_status cc)
return ret == 0;
}
-static void tcpm_set_cc(struct tcpm_port *port, enum typec_cc_status cc)
-{
- tcpm_log(port, "cc:=%d", cc);
- port->cc_req = cc;
- port->tcpc->set_cc(port->tcpc, cc);
-}
-
static int tcpm_init_vbus(struct tcpm_port *port)
{
int ret;
@@ -2904,6 +3430,14 @@ static void tcpm_unregister_altmodes(struct tcpm_port *port)
memset(modep, 0, sizeof(*modep));
}
+static void tcpm_set_partner_usb_comm_capable(struct tcpm_port *port, bool capable)
+{
+ tcpm_log(port, "Setting usb_comm capable %s", capable ? "true" : "false");
+
+ if (port->tcpc->set_partner_usb_comm_capable)
+ port->tcpc->set_partner_usb_comm_capable(port->tcpc, capable);
+}
+
static void tcpm_reset_port(struct tcpm_port *port)
{
int ret;
@@ -2912,11 +3446,15 @@ static void tcpm_reset_port(struct tcpm_port *port)
ret = port->tcpc->enable_auto_vbus_discharge(port->tcpc, false);
tcpm_log_force(port, "Disable vbus discharge ret:%d", ret);
}
+ port->in_ams = false;
+ port->ams = NONE_AMS;
+ port->vdm_sm_running = false;
tcpm_unregister_altmodes(port);
tcpm_typec_disconnect(port);
port->attached = false;
port->pd_capable = false;
port->pps_data.supported = false;
+ tcpm_set_partner_usb_comm_capable(port, false);
/*
* First Rx ID should be 0; set this to a sentinel of -1 so that
@@ -3090,6 +3628,7 @@ static void run_state_machine(struct tcpm_port *port)
int ret;
enum typec_pwr_opmode opmode;
unsigned int msecs;
+ enum tcpm_state upcoming_state;
port->enter_state = port->state;
switch (port->state) {
@@ -3190,7 +3729,12 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
- tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
+ /* SNK -> SRC POWER/FAST_ROLE_SWAP finished */
+ if (port->ams == POWER_ROLE_SWAP ||
+ port->ams == FAST_ROLE_SWAP)
+ tcpm_ams_finish(port);
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
break;
case SRC_SEND_CAPABILITIES:
port->caps_count++;
@@ -3251,6 +3795,8 @@ static void run_state_machine(struct tcpm_port *port)
}
} else {
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_set_partner_usb_comm_capable(port,
+ !!(port->sink_request & RDO_USB_COMM));
tcpm_set_state(port, SRC_TRANSITION_SUPPLY,
PD_T_SRC_TRANSITION);
}
@@ -3272,6 +3818,24 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ if (port->next_ams != NONE_AMS) {
+ port->ams = port->next_ams;
+ port->next_ams = NONE_AMS;
+ }
+
+ /*
+ * If previous AMS is interrupted, switch to the upcoming
+ * state.
+ */
+ if (port->upcoming_state != INVALID_STATE) {
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+ }
+
tcpm_check_send_discover(port);
/*
* 6.3.5
@@ -3389,6 +3953,12 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
port->explicit_contract = false;
+
+ if (port->ams == POWER_ROLE_SWAP ||
+ port->ams == FAST_ROLE_SWAP)
+ /* SRC -> SNK POWER/FAST_ROLE_SWAP finished */
+ tcpm_ams_finish(port);
+
tcpm_set_state(port, SNK_DISCOVERY, 0);
break;
case SNK_DISCOVERY:
@@ -3437,7 +4007,7 @@ static void run_state_machine(struct tcpm_port *port)
*/
if (port->vbus_never_low) {
port->vbus_never_low = false;
- tcpm_set_state(port, SOFT_RESET_SEND,
+ tcpm_set_state(port, SNK_SOFT_RESET,
PD_T_SINK_WAIT_CAP);
} else {
tcpm_set_state(port, hard_reset_state(port),
@@ -3446,6 +4016,8 @@ static void run_state_machine(struct tcpm_port *port)
break;
case SNK_NEGOTIATE_CAPABILITIES:
port->pd_capable = true;
+ tcpm_set_partner_usb_comm_capable(port,
+ !!(port->source_caps[0] & PDO_FIXED_USB_COMM));
port->hard_reset_count = 0;
ret = tcpm_pd_send_request(port);
if (ret < 0) {
@@ -3490,9 +4062,28 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_swap_complete(port, 0);
tcpm_typec_connect(port);
- tcpm_check_send_discover(port);
mod_enable_frs_delayed_work(port, 0);
tcpm_pps_complete(port, port->pps_status);
+
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ if (port->next_ams != NONE_AMS) {
+ port->ams = port->next_ams;
+ port->next_ams = NONE_AMS;
+ }
+
+ /*
+ * If previous AMS is interrupted, switch to the upcoming
+ * state.
+ */
+ if (port->upcoming_state != INVALID_STATE) {
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+ }
+
+ tcpm_check_send_discover(port);
power_supply_changed(port->psy);
break;
@@ -3513,8 +4104,14 @@ static void run_state_machine(struct tcpm_port *port)
/* Hard_Reset states */
case HARD_RESET_SEND:
- tcpm_pd_transmit(port, TCPC_TX_HARD_RESET, NULL);
- tcpm_set_state(port, HARD_RESET_START, 0);
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ /*
+ * State machine will be directed to HARD_RESET_START,
+ * thus set upcoming_state to INVALID_STATE.
+ */
+ port->upcoming_state = INVALID_STATE;
+ tcpm_ams_start(port, HARD_RESET);
break;
case HARD_RESET_START:
port->sink_cap_done = false;
@@ -3558,6 +4155,8 @@ static void run_state_machine(struct tcpm_port *port)
case SRC_HARD_RESET_VBUS_ON:
tcpm_set_vconn(port, true);
tcpm_set_vbus(port, true);
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
port->tcpc->set_pd_rx(port->tcpc, true);
tcpm_set_attached_state(port, true);
tcpm_set_state(port, SRC_UNATTACHED, PD_T_PS_SOURCE_ON);
@@ -3579,6 +4178,8 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_set_state(port, SNK_HARD_RESET_SINK_ON, PD_T_SAFE_0V);
break;
case SNK_HARD_RESET_WAIT_VBUS:
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
/* Assume we're disconnected if VBUS doesn't come back. */
tcpm_set_state(port, SNK_UNATTACHED,
PD_T_SRC_RECOVER_MAX + PD_T_SRC_TURN_ON);
@@ -3606,6 +4207,8 @@ static void run_state_machine(struct tcpm_port *port)
5000);
tcpm_set_charge(port, true);
}
+ if (port->ams == HARD_RESET)
+ tcpm_ams_finish(port);
tcpm_set_attached_state(port, true);
tcpm_set_auto_vbus_discharge_threshold(port, TYPEC_PWR_MODE_USB, false, VSAFE5V);
tcpm_set_state(port, SNK_STARTUP, 0);
@@ -3616,10 +4219,20 @@ static void run_state_machine(struct tcpm_port *port)
port->message_id = 0;
port->rx_msgid = -1;
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
- if (port->pwr_role == TYPEC_SOURCE)
- tcpm_set_state(port, SRC_SEND_CAPABILITIES, 0);
- else
+ tcpm_ams_finish(port);
+ if (port->pwr_role == TYPEC_SOURCE) {
+ port->upcoming_state = SRC_SEND_CAPABILITIES;
+ tcpm_ams_start(port, POWER_NEGOTIATION);
+ } else {
tcpm_set_state(port, SNK_WAIT_CAPABILITIES, 0);
+ }
+ break;
+ case SRC_SOFT_RESET_WAIT_SNK_TX:
+ case SNK_SOFT_RESET:
+ if (port->ams != NONE_AMS)
+ tcpm_ams_finish(port);
+ port->upcoming_state = SOFT_RESET_SEND;
+ tcpm_ams_start(port, SOFT_RESET_AMS);
break;
case SOFT_RESET_SEND:
port->message_id = 0;
@@ -3639,10 +4252,14 @@ static void run_state_machine(struct tcpm_port *port)
break;
case DR_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ /* Set VDM state machine running flag ASAP */
+ if (port->data_role == TYPEC_DEVICE && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state_cond(port, DR_SWAP_CHANGE_DR, 0);
break;
case DR_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
+ tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
case DR_SWAP_CHANGE_DR:
@@ -3655,6 +4272,7 @@ static void run_state_machine(struct tcpm_port *port)
TYPEC_HOST);
port->send_discover = true;
}
+ tcpm_ams_finish(port);
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -3782,6 +4400,7 @@ static void run_state_machine(struct tcpm_port *port)
case VCONN_SWAP_ACCEPT:
tcpm_pd_send_control(port, PD_CTRL_ACCEPT);
+ tcpm_ams_finish(port);
tcpm_set_state(port, VCONN_SWAP_START, 0);
break;
case VCONN_SWAP_SEND:
@@ -3791,6 +4410,8 @@ static void run_state_machine(struct tcpm_port *port)
break;
case VCONN_SWAP_SEND_TIMEOUT:
tcpm_swap_complete(port, -ETIMEDOUT);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_START:
@@ -3806,10 +4427,14 @@ static void run_state_machine(struct tcpm_port *port)
case VCONN_SWAP_TURN_ON_VCONN:
tcpm_set_vconn(port, true);
tcpm_pd_send_control(port, PD_CTRL_PS_RDY);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
case VCONN_SWAP_TURN_OFF_VCONN:
tcpm_set_vconn(port, false);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, ready_state(port), 0);
break;
@@ -3817,6 +4442,8 @@ static void run_state_machine(struct tcpm_port *port)
case PR_SWAP_CANCEL:
case VCONN_SWAP_CANCEL:
tcpm_swap_complete(port, port->swap_status);
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
if (port->pwr_role == TYPEC_SOURCE)
tcpm_set_state(port, SRC_READY, 0);
else
@@ -3886,6 +4513,25 @@ static void run_state_machine(struct tcpm_port *port)
tcpm_default_state(port),
port->vbus_present ? PD_T_PS_SOURCE_OFF : 0);
break;
+
+ /* AMS intermediate state */
+ case AMS_START:
+ if (port->upcoming_state == INVALID_STATE) {
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ?
+ SRC_READY : SNK_READY, 0);
+ break;
+ }
+
+ upcoming_state = port->upcoming_state;
+ port->upcoming_state = INVALID_STATE;
+ tcpm_set_state(port, upcoming_state, 0);
+ break;
+
+ /* Chunk state */
+ case CHUNK_NOT_SUPP:
+ tcpm_pd_send_control(port, PD_CTRL_NOT_SUPP);
+ tcpm_set_state(port, port->pwr_role == TYPEC_SOURCE ? SRC_READY : SNK_READY, 0);
+ break;
default:
WARN(1, "Unexpected port state %d\n", port->state);
break;
@@ -4127,6 +4773,9 @@ static void _tcpm_pd_vbus_on(struct tcpm_port *port)
switch (port->state) {
case SNK_TRANSITION_SINK_VBUS:
port->explicit_contract = true;
+ /* Set the VDM flag ASAP */
+ if (port->data_role == TYPEC_HOST && port->send_discover)
+ port->vdm_sm_running = true;
tcpm_set_state(port, SNK_READY, 0);
break;
case SNK_DISCOVERY:
@@ -4262,6 +4911,17 @@ static void _tcpm_pd_vbus_off(struct tcpm_port *port)
/* Do nothing, waiting for sink detection */
break;
+ case SRC_STARTUP:
+ case SRC_SEND_CAPABILITIES:
+ case SRC_SEND_CAPABILITIES_TIMEOUT:
+ case SRC_NEGOTIATE_CAPABILITIES:
+ case SRC_TRANSITION_SUPPLY:
+ case SRC_READY:
+ case SRC_WAIT_NEW_CAPABILITIES:
+ /* Force to unattached state to re-initiate connection */
+ tcpm_set_state(port, SRC_UNATTACHED, 0);
+ break;
+
case PORT_RESET:
/*
* State set back to default mode once the timer completes.
@@ -4313,6 +4973,10 @@ static void _tcpm_pd_hard_reset(struct tcpm_port *port)
if (port->bist_request == BDO_MODE_TESTDATA && port->tcpc->set_bist_data)
port->tcpc->set_bist_data(port->tcpc, false);
+ if (port->ams != NONE_AMS)
+ port->ams = NONE_AMS;
+ if (port->hard_reset_count < PD_N_HARD_RESET_COUNT)
+ port->ams = HARD_RESET;
/*
* If we keep receiving hard reset requests, executing the hard reset
* must have failed. Revert to error recovery if that happens.
@@ -4363,10 +5027,16 @@ static void tcpm_pd_event_handler(struct kthread_work *work)
_tcpm_cc_change(port, cc1, cc2);
}
if (events & TCPM_FRS_EVENT) {
- if (port->state == SNK_READY)
- tcpm_set_state(port, FR_SWAP_SEND, 0);
- else
+ if (port->state == SNK_READY) {
+ int ret;
+
+ port->upcoming_state = FR_SWAP_SEND;
+ ret = tcpm_ams_start(port, FAST_ROLE_SWAP);
+ if (ret == -EAGAIN)
+ port->upcoming_state = INVALID_STATE;
+ } else {
tcpm_log(port, "Discarding FRS_SIGNAL! Not in sink ready");
+ }
}
if (events & TCPM_SOURCING_VBUS) {
tcpm_log(port, "sourcing vbus");
@@ -4435,6 +5105,7 @@ EXPORT_SYMBOL_GPL(tcpm_sourcing_vbus);
static void tcpm_enable_frs_work(struct kthread_work *work)
{
struct tcpm_port *port = container_of(work, struct tcpm_port, enable_frs);
+ int ret;
mutex_lock(&port->lock);
/* Not FRS capable */
@@ -4449,9 +5120,14 @@ static void tcpm_enable_frs_work(struct kthread_work *work)
if (port->state != SNK_READY || port->vdm_state != VDM_STATE_DONE || port->send_discover)
goto resched;
- tcpm_set_state(port, GET_SINK_CAP, 0);
- port->sink_cap_done = true;
-
+ port->upcoming_state = GET_SINK_CAP;
+ ret = tcpm_ams_start(port, GET_SINK_CAPABILITIES);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ } else {
+ port->sink_cap_done = true;
+ goto unlock;
+ }
resched:
mod_enable_frs_delayed_work(port, GET_SINK_CAP_RETRY_MS);
unlock:
@@ -4501,7 +5177,12 @@ static int tcpm_dr_set(struct typec_port *p, enum typec_data_role data)
port->non_pd_role_swap = true;
tcpm_set_state(port, PORT_RESET, 0);
} else {
- tcpm_set_state(port, DR_SWAP_SEND, 0);
+ port->upcoming_state = DR_SWAP_SEND;
+ ret = tcpm_ams_start(port, DATA_ROLE_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
}
port->swap_status = 0;
@@ -4547,10 +5228,16 @@ static int tcpm_pr_set(struct typec_port *p, enum typec_role role)
goto port_unlock;
}
+ port->upcoming_state = PR_SWAP_SEND;
+ ret = tcpm_ams_start(port, POWER_ROLE_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
- tcpm_set_state(port, PR_SWAP_SEND, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
@@ -4586,10 +5273,16 @@ static int tcpm_vconn_set(struct typec_port *p, enum typec_role role)
goto port_unlock;
}
+ port->upcoming_state = VCONN_SWAP_SEND;
+ ret = tcpm_ams_start(port, VCONN_SWAP);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
port->swap_status = 0;
port->swap_pending = true;
reinit_completion(&port->swap_complete);
- tcpm_set_state(port, VCONN_SWAP_SEND, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->swap_complete,
@@ -4654,6 +5347,13 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
goto port_unlock;
}
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
/* Round down operating current to align with PPS valid steps */
op_curr = op_curr - (op_curr % RDO_PROG_CURR_MA_STEP);
@@ -4661,7 +5361,6 @@ static int tcpm_pps_set_op_curr(struct tcpm_port *port, u16 op_curr)
port->pps_data.op_curr = op_curr;
port->pps_status = 0;
port->pps_pending = true;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
@@ -4710,6 +5409,13 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
goto port_unlock;
}
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
/* Round down output voltage to align with PPS valid steps */
out_volt = out_volt - (out_volt % RDO_PROG_VOLT_MV_STEP);
@@ -4717,7 +5423,6 @@ static int tcpm_pps_set_out_volt(struct tcpm_port *port, u16 out_volt)
port->pps_data.out_volt = out_volt;
port->pps_status = 0;
port->pps_pending = true;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
mutex_unlock(&port->lock);
if (!wait_for_completion_timeout(&port->pps_complete,
@@ -4757,6 +5462,16 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
goto port_unlock;
}
+ if (activate)
+ port->upcoming_state = SNK_NEGOTIATE_PPS_CAPABILITIES;
+ else
+ port->upcoming_state = SNK_NEGOTIATE_CAPABILITIES;
+ ret = tcpm_ams_start(port, POWER_NEGOTIATION);
+ if (ret == -EAGAIN) {
+ port->upcoming_state = INVALID_STATE;
+ goto port_unlock;
+ }
+
reinit_completion(&port->pps_complete);
port->pps_status = 0;
port->pps_pending = true;
@@ -4765,9 +5480,6 @@ static int tcpm_pps_activate(struct tcpm_port *port, bool activate)
if (activate) {
port->pps_data.out_volt = port->supply_voltage;
port->pps_data.op_curr = port->current_limit;
- tcpm_set_state(port, SNK_NEGOTIATE_PPS_CAPABILITIES, 0);
- } else {
- tcpm_set_state(port, SNK_NEGOTIATE_CAPABILITIES, 0);
}
mutex_unlock(&port->lock);
diff --git a/drivers/usb/typec/ucsi/Kconfig b/drivers/usb/typec/ucsi/Kconfig
index 2192d7c4fec7..5e9b37b3f25e 100644
--- a/drivers/usb/typec/ucsi/Kconfig
+++ b/drivers/usb/typec/ucsi/Kconfig
@@ -3,6 +3,7 @@
config TYPEC_UCSI
tristate "USB Type-C Connector System Software Interface driver"
depends on !CPU_BIG_ENDIAN
+ depends on USB_ROLE_SWITCH || !USB_ROLE_SWITCH
help
USB Type-C Connector System Software Interface (UCSI) is a
specification for an interface that allows the operating system to
diff --git a/drivers/usb/typec/ucsi/ucsi.c b/drivers/usb/typec/ucsi/ucsi.c
index f02958927cbd..ca3f4194ad90 100644
--- a/drivers/usb/typec/ucsi/ucsi.c
+++ b/drivers/usb/typec/ucsi/ucsi.c
@@ -588,6 +588,7 @@ static void ucsi_unregister_partner(struct ucsi_connector *con)
static void ucsi_partner_change(struct ucsi_connector *con)
{
+ enum usb_role u_role = USB_ROLE_NONE;
int ret;
if (!con->partner)
@@ -595,11 +596,14 @@ static void ucsi_partner_change(struct ucsi_connector *con)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -610,6 +614,15 @@ static void ucsi_partner_change(struct ucsi_connector *con)
if (!completion_done(&con->complete))
complete(&con->complete);
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret)
+ dev_err(con->ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
+
/* Can't rely on Partner Flags field. Always checking the alt modes. */
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret)
@@ -628,6 +641,7 @@ static void ucsi_handle_connector_change(struct work_struct *work)
struct ucsi_connector_status pre_ack_status;
struct ucsi_connector_status post_ack_status;
enum typec_role role;
+ enum usb_role u_role = USB_ROLE_NONE;
u16 inferred_changes;
u16 changed_flags;
u64 command;
@@ -753,11 +767,14 @@ static void ucsi_handle_connector_change(struct work_struct *work)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -770,6 +787,16 @@ static void ucsi_handle_connector_change(struct work_struct *work)
ucsi_unregister_partner(con);
ucsi_port_psy_changed(con);
+
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) &
+ UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret)
+ dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
}
if (con->status.change & UCSI_CONSTAT_PARTNER_CHANGE)
@@ -988,6 +1015,7 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
struct ucsi_connector *con = &ucsi->connector[index];
struct typec_capability *cap = &con->typec_cap;
enum typec_accessory *accessory = cap->accessory;
+ enum usb_role u_role = USB_ROLE_NONE;
u64 command;
int ret;
@@ -1066,11 +1094,14 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
switch (UCSI_CONSTAT_PARTNER_TYPE(con->status.flags)) {
case UCSI_CONSTAT_PARTNER_TYPE_UFP:
- case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
case UCSI_CONSTAT_PARTNER_TYPE_CABLE_AND_UFP:
+ u_role = USB_ROLE_HOST;
+ fallthrough;
+ case UCSI_CONSTAT_PARTNER_TYPE_CABLE:
typec_set_data_role(con->port, TYPEC_HOST);
break;
case UCSI_CONSTAT_PARTNER_TYPE_DFP:
+ u_role = USB_ROLE_DEVICE;
typec_set_data_role(con->port, TYPEC_DEVICE);
break;
default:
@@ -1086,6 +1117,24 @@ static int ucsi_register_port(struct ucsi *ucsi, int index)
ucsi_port_psy_changed(con);
}
+ con->usb_role_sw = fwnode_usb_role_switch_get(cap->fwnode);
+ if (IS_ERR(con->usb_role_sw)) {
+ dev_err(ucsi->dev, "con%d: failed to get usb role switch\n",
+ con->num);
+ con->usb_role_sw = NULL;
+ }
+
+ /* Only notify USB controller if partner supports USB data */
+ if (!(UCSI_CONSTAT_PARTNER_FLAGS(con->status.flags) & UCSI_CONSTAT_PARTNER_FLAG_USB))
+ u_role = USB_ROLE_NONE;
+
+ ret = usb_role_switch_set_role(con->usb_role_sw, u_role);
+ if (ret) {
+ dev_err(ucsi->dev, "con:%d: failed to set usb role:%d\n",
+ con->num, u_role);
+ ret = 0;
+ }
+
if (con->partner) {
ret = ucsi_register_altmodes(con, UCSI_RECIPIENT_SOP);
if (ret) {
diff --git a/drivers/usb/typec/ucsi/ucsi.h b/drivers/usb/typec/ucsi/ucsi.h
index dd9ba60ab4a3..3920e20a9e9e 100644
--- a/drivers/usb/typec/ucsi/ucsi.h
+++ b/drivers/usb/typec/ucsi/ucsi.h
@@ -8,6 +8,7 @@
#include <linux/power_supply.h>
#include <linux/types.h>
#include <linux/usb/typec.h>
+#include <linux/usb/role.h>
/* -------------------------------------------------------------------------- */
@@ -331,6 +332,8 @@ struct ucsi_connector {
u32 rdo;
u32 src_pdos[UCSI_MAX_PDOS];
int num_pdos;
+
+ struct usb_role_switch *usb_role_sw;
};
int ucsi_send_command(struct ucsi *ucsi, u64 command,
diff --git a/drivers/usb/usbip/stub_main.c b/drivers/usb/usbip/stub_main.c
index c1c0bbc9f8b1..77a5b3f8736a 100644
--- a/drivers/usb/usbip/stub_main.c
+++ b/drivers/usb/usbip/stub_main.c
@@ -23,7 +23,7 @@ struct kmem_cache *stub_priv_cache;
*/
#define MAX_BUSID 16
static struct bus_id_priv busid_table[MAX_BUSID];
-static spinlock_t busid_table_lock;
+static DEFINE_SPINLOCK(busid_table_lock);
static void init_busid_table(void)
{
@@ -35,8 +35,6 @@ static void init_busid_table(void)
*/
memset(busid_table, 0, sizeof(busid_table));
- spin_lock_init(&busid_table_lock);
-
for (i = 0; i < MAX_BUSID; i++)
spin_lock_init(&busid_table[i].busid_lock);
}
diff --git a/drivers/usb/usbip/usbip_common.h b/drivers/usb/usbip/usbip_common.h
index 8be857a4fa13..d60ce17d3dd2 100644
--- a/drivers/usb/usbip/usbip_common.h
+++ b/drivers/usb/usbip/usbip_common.h
@@ -277,6 +277,10 @@ struct usbip_device {
void (*reset)(struct usbip_device *);
void (*unusable)(struct usbip_device *);
} eh_ops;
+
+#ifdef CONFIG_KCOV
+ u64 kcov_handle;
+#endif
};
#define kthread_get_run(threadfn, data, namefmt, ...) \
@@ -337,4 +341,29 @@ static inline int interface_to_devnum(struct usb_interface *interface)
return udev->devnum;
}
+#ifdef CONFIG_KCOV
+
+static inline void usbip_kcov_handle_init(struct usbip_device *ud)
+{
+ ud->kcov_handle = kcov_common_handle();
+}
+
+static inline void usbip_kcov_remote_start(struct usbip_device *ud)
+{
+ kcov_remote_start_common(ud->kcov_handle);
+}
+
+static inline void usbip_kcov_remote_stop(void)
+{
+ kcov_remote_stop();
+}
+
+#else /* CONFIG_KCOV */
+
+static inline void usbip_kcov_handle_init(struct usbip_device *ud) { }
+static inline void usbip_kcov_remote_start(struct usbip_device *ud) { }
+static inline void usbip_kcov_remote_stop(void) { }
+
+#endif /* CONFIG_KCOV */
+
#endif /* __USBIP_COMMON_H */
diff --git a/drivers/usb/usbip/vhci_hcd.c b/drivers/usb/usbip/vhci_hcd.c
index 66cde5e5f796..3209b5ddd30c 100644
--- a/drivers/usb/usbip/vhci_hcd.c
+++ b/drivers/usb/usbip/vhci_hcd.c
@@ -396,6 +396,8 @@ static int vhci_hub_control(struct usb_hcd *hcd, u16 typeReq, u16 wValue,
default:
usbip_dbg_vhci_rh(" ClearPortFeature: default %x\n",
wValue);
+ if (wValue >= 32)
+ goto error;
vhci_hcd->port_status[rhport] &= ~(1 << wValue);
break;
}
diff --git a/drivers/usb/usbip/vhci_rx.c b/drivers/usb/usbip/vhci_rx.c
index 266024cbb64f..7f2d1c241559 100644
--- a/drivers/usb/usbip/vhci_rx.c
+++ b/drivers/usb/usbip/vhci_rx.c
@@ -261,7 +261,9 @@ int vhci_rx_loop(void *data)
if (usbip_event_happened(ud))
break;
+ usbip_kcov_remote_start(ud);
vhci_rx_pdu(ud);
+ usbip_kcov_remote_stop();
}
return 0;
diff --git a/drivers/usb/usbip/vhci_sysfs.c b/drivers/usb/usbip/vhci_sysfs.c
index be37aec250c2..96e5371dc335 100644
--- a/drivers/usb/usbip/vhci_sysfs.c
+++ b/drivers/usb/usbip/vhci_sysfs.c
@@ -383,6 +383,7 @@ static ssize_t attach_store(struct device *dev, struct device_attribute *attr,
vdev->ud.sockfd = sockfd;
vdev->ud.tcp_socket = socket;
vdev->ud.status = VDEV_ST_NOTASSIGNED;
+ usbip_kcov_handle_init(&vdev->ud);
spin_unlock(&vdev->ud.lock);
spin_unlock_irqrestore(&vhci->lock, flags);