summaryrefslogtreecommitdiff
path: root/drivers/pci
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/pci')
-rw-r--r--drivers/pci/Kconfig62
-rw-r--r--drivers/pci/controller/Kconfig12
-rw-r--r--drivers/pci/controller/Makefile1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-ep.c1
-rw-r--r--drivers/pci/controller/cadence/pcie-cadence-host.c8
-rw-r--r--drivers/pci/controller/pci-aardvark.c108
-rw-r--r--drivers/pci/controller/pci-v3-semi.c1
-rw-r--r--drivers/pci/controller/pcie-brcmstb.c444
-rw-r--r--drivers/pci/controller/pcie-hisi-error.c327
-rw-r--r--drivers/pci/ecam.c10
-rw-r--r--drivers/pci/hotplug/pciehp_ctrl.c4
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c15
-rw-r--r--drivers/pci/hotplug/rpadlpar_core.c8
-rw-r--r--drivers/pci/hotplug/shpchp_ctrl.c1
-rw-r--r--drivers/pci/p2pdma.c10
-rw-r--r--drivers/pci/pci-acpi.c6
-rw-r--r--drivers/pci/pci-bridge-emul.c4
-rw-r--r--drivers/pci/pci-driver.c26
-rw-r--r--drivers/pci/pci-pf-stub.c14
-rw-r--r--drivers/pci/pci-sysfs.c7
-rw-r--r--drivers/pci/pci.c54
-rw-r--r--drivers/pci/pci.h9
-rw-r--r--drivers/pci/pcie/aspm.c294
-rw-r--r--drivers/pci/pcie/bw_notification.c3
-rw-r--r--drivers/pci/pcie/dpc.c7
-rw-r--r--drivers/pci/probe.c3
-rw-r--r--drivers/pci/quirks.c78
27 files changed, 1125 insertions, 392 deletions
diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig
index 4bef5c2bae9f..d323b25ae27e 100644
--- a/drivers/pci/Kconfig
+++ b/drivers/pci/Kconfig
@@ -187,6 +187,68 @@ config PCI_HYPERV
The PCI device frontend driver allows the kernel to import arbitrary
PCI devices from a PCI backend to support PCI driver domains.
+choice
+ prompt "PCI Express hierarchy optimization setting"
+ default PCIE_BUS_DEFAULT
+ depends on PCI && EXPERT
+ help
+ MPS (Max Payload Size) and MRRS (Max Read Request Size) are PCIe
+ device parameters that affect performance and the ability to
+ support hotplug and peer-to-peer DMA.
+
+ The following choices set the MPS and MRRS optimization strategy
+ at compile-time. The choices are the same as those offered for
+ the kernel command-line parameter 'pci', i.e.,
+ 'pci=pcie_bus_tune_off', 'pci=pcie_bus_safe',
+ 'pci=pcie_bus_perf', and 'pci=pcie_bus_peer2peer'.
+
+ This is a compile-time setting and can be overridden by the above
+ command-line parameters. If unsure, choose PCIE_BUS_DEFAULT.
+
+config PCIE_BUS_TUNE_OFF
+ bool "Tune Off"
+ depends on PCI
+ help
+ Use the BIOS defaults; don't touch MPS at all. This is the same
+ as booting with 'pci=pcie_bus_tune_off'.
+
+config PCIE_BUS_DEFAULT
+ bool "Default"
+ depends on PCI
+ help
+ Default choice; ensure that the MPS matches upstream bridge.
+
+config PCIE_BUS_SAFE
+ bool "Safe"
+ depends on PCI
+ help
+ Use largest MPS that boot-time devices support. If you have a
+ closed system with no possibility of adding new devices, this
+ will use the largest MPS that's supported by all devices. This
+ is the same as booting with 'pci=pcie_bus_safe'.
+
+config PCIE_BUS_PERFORMANCE
+ bool "Performance"
+ depends on PCI
+ help
+ Use MPS and MRRS for best performance. Ensure that a given
+ device's MPS is no larger than its parent MPS, which allows us to
+ keep all switches/bridges to the max MPS supported by their
+ parent. This is the same as booting with 'pci=pcie_bus_perf'.
+
+config PCIE_BUS_PEER2PEER
+ bool "Peer2peer"
+ depends on PCI
+ help
+ Set MPS = 128 for all devices. MPS configuration effected by the
+ other options could cause the MPS on one root port to be
+ different than that of the MPS on another, which may cause
+ hot-added devices or peer-to-peer DMA to fail. Set MPS to the
+ smallest possible value (128B) system-wide to avoid these issues.
+ This is the same as booting with 'pci=pcie_bus_peer2peer'.
+
+endchoice
+
source "drivers/pci/hotplug/Kconfig"
source "drivers/pci/controller/Kconfig"
source "drivers/pci/endpoint/Kconfig"
diff --git a/drivers/pci/controller/Kconfig b/drivers/pci/controller/Kconfig
index f18c3725ef80..6ebe69dd1229 100644
--- a/drivers/pci/controller/Kconfig
+++ b/drivers/pci/controller/Kconfig
@@ -12,7 +12,7 @@ config PCI_MVEBU
select PCI_BRIDGE_EMUL
config PCI_AARDVARK
- bool "Aardvark PCIe controller"
+ tristate "Aardvark PCIe controller"
depends on (ARCH_MVEBU && ARM64) || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
@@ -270,9 +270,10 @@ config VMD
config PCIE_BRCMSTB
tristate "Broadcom Brcmstb PCIe host controller"
- depends on ARCH_BCM2835 || COMPILE_TEST
+ depends on ARCH_BRCMSTB || ARCH_BCM2835 || COMPILE_TEST
depends on OF
depends on PCI_MSI_IRQ_DOMAIN
+ default ARCH_BRCMSTB
help
Say Y here to enable PCIe host controller support for
Broadcom STB based SoCs, like the Raspberry Pi 4.
@@ -294,6 +295,13 @@ config PCI_LOONGSON
Say Y here if you want to enable PCI controller support on
Loongson systems.
+config PCIE_HISI_ERR
+ depends on ACPI_APEI_GHES && (ARM64 || COMPILE_TEST)
+ bool "HiSilicon HIP PCIe controller error handling driver"
+ help
+ Say Y here if you want error handling support
+ for the PCIe controller's errors on HiSilicon HIP SoCs
+
source "drivers/pci/controller/dwc/Kconfig"
source "drivers/pci/controller/mobiveil/Kconfig"
source "drivers/pci/controller/cadence/Kconfig"
diff --git a/drivers/pci/controller/Makefile b/drivers/pci/controller/Makefile
index bcdbf49ab1e4..04c6edc285c5 100644
--- a/drivers/pci/controller/Makefile
+++ b/drivers/pci/controller/Makefile
@@ -31,6 +31,7 @@ obj-$(CONFIG_PCIE_TANGO_SMP8759) += pcie-tango.o
obj-$(CONFIG_VMD) += vmd.o
obj-$(CONFIG_PCIE_BRCMSTB) += pcie-brcmstb.o
obj-$(CONFIG_PCI_LOONGSON) += pci-loongson.o
+obj-$(CONFIG_PCIE_HISI_ERR) += pcie-hisi-error.o
# pcie-hisi.o quirks are needed even without CONFIG_PCIE_DW
obj-y += dwc/
obj-y += mobiveil/
diff --git a/drivers/pci/controller/cadence/pcie-cadence-ep.c b/drivers/pci/controller/cadence/pcie-cadence-ep.c
index 254a3e1eff50..84cc58dc8512 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-ep.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-ep.c
@@ -328,7 +328,6 @@ static int cdns_pcie_ep_send_legacy_irq(struct cdns_pcie_ep *ep, u8 fn, u8 intx)
cdns_pcie_ep_assert_intx(ep, fn, intx, true);
/*
* The mdelay() value was taken from dra7xx_pcie_raise_legacy_irq()
- * from drivers/pci/dwc/pci-dra7xx.c
*/
mdelay(1);
cdns_pcie_ep_assert_intx(ep, fn, intx, false);
diff --git a/drivers/pci/controller/cadence/pcie-cadence-host.c b/drivers/pci/controller/cadence/pcie-cadence-host.c
index 4550e0d469ca..811c1cb2e8de 100644
--- a/drivers/pci/controller/cadence/pcie-cadence-host.c
+++ b/drivers/pci/controller/cadence/pcie-cadence-host.c
@@ -337,7 +337,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
struct resource_entry *entry;
u64 cpu_addr = cfg_res->start;
u32 addr0, addr1, desc1;
- int r, err, busnr = 0;
+ int r, busnr = 0;
entry = resource_list_first_type(&bridge->windows, IORESOURCE_BUS);
if (entry)
@@ -383,11 +383,7 @@ static int cdns_pcie_host_init_address_translation(struct cdns_pcie_rc *rc)
r++;
}
- err = cdns_pcie_host_map_dma_ranges(rc);
- if (err)
- return err;
-
- return 0;
+ return cdns_pcie_host_map_dma_ranges(rc);
}
static int cdns_pcie_host_init(struct device *dev,
diff --git a/drivers/pci/controller/pci-aardvark.c b/drivers/pci/controller/pci-aardvark.c
index 1559f79e63b6..0be485a25327 100644
--- a/drivers/pci/controller/pci-aardvark.c
+++ b/drivers/pci/controller/pci-aardvark.c
@@ -9,11 +9,12 @@
*/
#include <linux/delay.h>
-#include <linux/gpio.h>
+#include <linux/gpio/consumer.h>
#include <linux/interrupt.h>
#include <linux/irq.h>
#include <linux/irqdomain.h>
#include <linux/kernel.h>
+#include <linux/module.h>
#include <linux/pci.h>
#include <linux/init.h>
#include <linux/phy/phy.h>
@@ -251,6 +252,25 @@ static void advk_pcie_wait_for_retrain(struct advk_pcie *pcie)
}
}
+static void advk_pcie_issue_perst(struct advk_pcie *pcie)
+{
+ u32 reg;
+
+ if (!pcie->reset_gpio)
+ return;
+
+ /* PERST does not work for some cards when link training is enabled */
+ reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
+ reg &= ~LINK_TRAINING_EN;
+ advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
+
+ /* 10ms delay is needed for some cards */
+ dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
+ gpiod_set_value_cansleep(pcie->reset_gpio, 1);
+ usleep_range(10000, 11000);
+ gpiod_set_value_cansleep(pcie->reset_gpio, 0);
+}
+
static int advk_pcie_train_at_gen(struct advk_pcie *pcie, int gen)
{
int ret, neg_gen;
@@ -299,6 +319,21 @@ static void advk_pcie_train_link(struct advk_pcie *pcie)
int neg_gen = -1, gen;
/*
+ * Reset PCIe card via PERST# signal. Some cards are not detected
+ * during link training when they are in some non-initial state.
+ */
+ advk_pcie_issue_perst(pcie);
+
+ /*
+ * PERST# signal could have been asserted by pinctrl subsystem before
+ * probe() callback has been called or issued explicitly by reset gpio
+ * function advk_pcie_issue_perst(), making the endpoint going into
+ * fundamental reset. As required by PCI Express spec a delay for at
+ * least 100ms after such a reset before link training is needed.
+ */
+ msleep(PCI_PM_D3COLD_WAIT);
+
+ /*
* Try link training at link gen specified by device tree property
* 'max-link-speed'. If this fails, iteratively train at lower gen.
*/
@@ -330,31 +365,10 @@ err:
dev_err(dev, "link never came up\n");
}
-static void advk_pcie_issue_perst(struct advk_pcie *pcie)
-{
- u32 reg;
-
- if (!pcie->reset_gpio)
- return;
-
- /* PERST does not work for some cards when link training is enabled */
- reg = advk_readl(pcie, PCIE_CORE_CTRL0_REG);
- reg &= ~LINK_TRAINING_EN;
- advk_writel(pcie, reg, PCIE_CORE_CTRL0_REG);
-
- /* 10ms delay is needed for some cards */
- dev_info(&pcie->pdev->dev, "issuing PERST via reset GPIO for 10ms\n");
- gpiod_set_value_cansleep(pcie->reset_gpio, 1);
- usleep_range(10000, 11000);
- gpiod_set_value_cansleep(pcie->reset_gpio, 0);
-}
-
static void advk_pcie_setup_hw(struct advk_pcie *pcie)
{
u32 reg;
- advk_pcie_issue_perst(pcie);
-
/* Enable TX */
reg = advk_readl(pcie, PCIE_CORE_REF_CLK_REG);
reg |= PCIE_CORE_REF_CLK_TX_ENABLE;
@@ -431,15 +445,6 @@ static void advk_pcie_setup_hw(struct advk_pcie *pcie)
reg |= PIO_CTRL_ADDR_WIN_DISABLE;
advk_writel(pcie, reg, PIO_CTRL);
- /*
- * PERST# signal could have been asserted by pinctrl subsystem before
- * probe() callback has been called or issued explicitly by reset gpio
- * function advk_pcie_issue_perst(), making the endpoint going into
- * fundamental reset. As required by PCI Express spec a delay for at
- * least 100ms after such a reset before link training is needed.
- */
- msleep(PCI_PM_D3COLD_WAIT);
-
advk_pcie_train_link(pcie);
/*
@@ -607,7 +612,7 @@ static struct pci_bridge_emul_ops advk_pci_bridge_emul_ops = {
* Initialize the configuration space of the PCI-to-PCI bridge
* associated with the given PCIe interface.
*/
-static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
+static int advk_sw_pci_bridge_init(struct advk_pcie *pcie)
{
struct pci_bridge_emul *bridge = &pcie->bridge;
@@ -633,8 +638,7 @@ static void advk_sw_pci_bridge_init(struct advk_pcie *pcie)
bridge->data = pcie;
bridge->ops = &advk_pci_bridge_emul_ops;
- pci_bridge_emul_init(bridge, 0);
-
+ return pci_bridge_emul_init(bridge, 0);
}
static bool advk_pcie_valid_device(struct advk_pcie *pcie, struct pci_bus *bus,
@@ -1077,7 +1081,9 @@ static int advk_pcie_enable_phy(struct advk_pcie *pcie)
}
ret = phy_power_on(pcie->phy);
- if (ret) {
+ if (ret == -EOPNOTSUPP) {
+ dev_warn(&pcie->pdev->dev, "PHY unsupported by firmware\n");
+ } else if (ret) {
phy_exit(pcie->phy);
return ret;
}
@@ -1122,6 +1128,7 @@ static int advk_pcie_probe(struct platform_device *pdev)
pcie = pci_host_bridge_priv(bridge);
pcie->pdev = pdev;
+ platform_set_drvdata(pdev, pcie);
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -1167,7 +1174,11 @@ static int advk_pcie_probe(struct platform_device *pdev)
advk_pcie_setup_hw(pcie);
- advk_sw_pci_bridge_init(pcie);
+ ret = advk_sw_pci_bridge_init(pcie);
+ if (ret) {
+ dev_err(dev, "Failed to register emulated root PCI bridge\n");
+ return ret;
+ }
ret = advk_pcie_init_irq_domain(pcie);
if (ret) {
@@ -1195,18 +1206,37 @@ static int advk_pcie_probe(struct platform_device *pdev)
return 0;
}
+static int advk_pcie_remove(struct platform_device *pdev)
+{
+ struct advk_pcie *pcie = platform_get_drvdata(pdev);
+ struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
+
+ pci_lock_rescan_remove();
+ pci_stop_root_bus(bridge->bus);
+ pci_remove_root_bus(bridge->bus);
+ pci_unlock_rescan_remove();
+
+ advk_pcie_remove_msi_irq_domain(pcie);
+ advk_pcie_remove_irq_domain(pcie);
+
+ return 0;
+}
+
static const struct of_device_id advk_pcie_of_match_table[] = {
{ .compatible = "marvell,armada-3700-pcie", },
{},
};
+MODULE_DEVICE_TABLE(of, advk_pcie_of_match_table);
static struct platform_driver advk_pcie_driver = {
.driver = {
.name = "advk-pcie",
.of_match_table = advk_pcie_of_match_table,
- /* Driver unloading/unbinding currently not supported */
- .suppress_bind_attrs = true,
},
.probe = advk_pcie_probe,
+ .remove = advk_pcie_remove,
};
-builtin_platform_driver(advk_pcie_driver);
+module_platform_driver(advk_pcie_driver);
+
+MODULE_DESCRIPTION("Aardvark PCIe controller");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/controller/pci-v3-semi.c b/drivers/pci/controller/pci-v3-semi.c
index 1f54334f09f7..154a5398633c 100644
--- a/drivers/pci/controller/pci-v3-semi.c
+++ b/drivers/pci/controller/pci-v3-semi.c
@@ -658,7 +658,6 @@ static int v3_get_dma_range_config(struct v3_pci *v3,
default:
dev_err(v3->dev, "illegal dma memory chunk size\n");
return -EINVAL;
- break;
}
val |= V3_PCI_MAP_M_REG_EN | V3_PCI_MAP_M_ENABLE;
*pci_map = val;
diff --git a/drivers/pci/controller/pcie-brcmstb.c b/drivers/pci/controller/pcie-brcmstb.c
index 85fa7d54f11f..afa7ba1d9a38 100644
--- a/drivers/pci/controller/pcie-brcmstb.c
+++ b/drivers/pci/controller/pcie-brcmstb.c
@@ -23,6 +23,7 @@
#include <linux/of_platform.h>
#include <linux/pci.h>
#include <linux/printk.h>
+#include <linux/reset.h>
#include <linux/sizes.h>
#include <linux/slab.h>
#include <linux/string.h>
@@ -54,8 +55,11 @@
#define PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK 0x1000
#define PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK 0x2000
#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK 0x300000
-#define PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128 0x0
+
#define PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK 0xf8000000
+#define PCIE_MISC_MISC_CTRL_SCB1_SIZE_MASK 0x07c00000
+#define PCIE_MISC_MISC_CTRL_SCB2_SIZE_MASK 0x0000001f
+#define SCB_SIZE_MASK(x) PCIE_MISC_MISC_CTRL_SCB ## x ## _SIZE_MASK
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_LO 0x400c
#define PCIE_MEM_WIN0_LO(win) \
@@ -79,10 +83,12 @@
#define PCIE_MISC_MSI_BAR_CONFIG_HI 0x4048
#define PCIE_MISC_MSI_DATA_CONFIG 0x404c
-#define PCIE_MISC_MSI_DATA_CONFIG_VAL 0xffe06540
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_32 0xffe06540
+#define PCIE_MISC_MSI_DATA_CONFIG_VAL_8 0xfff86540
#define PCIE_MISC_PCIE_CTRL 0x4064
#define PCIE_MISC_PCIE_CTRL_PCIE_L23_REQUEST_MASK 0x1
+#define PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK 0x4
#define PCIE_MISC_PCIE_STATUS 0x4068
#define PCIE_MISC_PCIE_STATUS_PCIE_PORT_MASK 0x80
@@ -90,6 +96,9 @@
#define PCIE_MISC_PCIE_STATUS_PCIE_PHYLINKUP_MASK 0x10
#define PCIE_MISC_PCIE_STATUS_PCIE_LINK_IN_L23_MASK 0x40
+#define PCIE_MISC_REVISION 0x406c
+#define BRCM_PCIE_HW_REV_33 0x0303
+
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT 0x4070
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_LIMIT_MASK 0xfff00000
#define PCIE_MISC_CPU_2_PCIE_MEM_WIN0_BASE_LIMIT_BASE_MASK 0xfff0
@@ -110,10 +119,14 @@
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_CLKREQ_DEBUG_ENABLE_MASK 0x2
#define PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK 0x08000000
-#define PCIE_MSI_INTR2_STATUS 0x4500
-#define PCIE_MSI_INTR2_CLR 0x4508
-#define PCIE_MSI_INTR2_MASK_SET 0x4510
-#define PCIE_MSI_INTR2_MASK_CLR 0x4514
+
+#define PCIE_INTR2_CPU_BASE 0x4300
+#define PCIE_MSI_INTR2_BASE 0x4500
+/* Offsets from PCIE_INTR2_CPU_BASE and PCIE_MSI_INTR2_BASE */
+#define MSI_INT_STATUS 0x0
+#define MSI_INT_CLR 0x8
+#define MSI_INT_MASK_SET 0x10
+#define MSI_INT_MASK_CLR 0x14
#define PCIE_EXT_CFG_DATA 0x8000
@@ -122,13 +135,19 @@
#define PCIE_EXT_SLOT_SHIFT 15
#define PCIE_EXT_FUNC_SHIFT 12
-#define PCIE_RGR1_SW_INIT_1 0x9210
#define PCIE_RGR1_SW_INIT_1_PERST_MASK 0x1
-#define PCIE_RGR1_SW_INIT_1_INIT_MASK 0x2
+#define PCIE_RGR1_SW_INIT_1_PERST_SHIFT 0x0
+
+#define RGR1_SW_INIT_1_INIT_GENERIC_MASK 0x2
+#define RGR1_SW_INIT_1_INIT_GENERIC_SHIFT 0x1
+#define RGR1_SW_INIT_1_INIT_7278_MASK 0x1
+#define RGR1_SW_INIT_1_INIT_7278_SHIFT 0x0
/* PCIe parameters */
#define BRCM_NUM_PCIE_OUT_WINS 0x4
#define BRCM_INT_PCI_MSI_NR 32
+#define BRCM_INT_PCI_MSI_LEGACY_NR 8
+#define BRCM_INT_PCI_MSI_SHIFT 0
/* MSI target adresses */
#define BRCM_MSI_TARGET_ADDR_LT_4GB 0x0fffffffcULL
@@ -153,6 +172,85 @@
#define SSC_STATUS_OFFSET 0x1
#define SSC_STATUS_SSC_MASK 0x400
#define SSC_STATUS_PLL_LOCK_MASK 0x800
+#define PCIE_BRCM_MAX_MEMC 3
+
+#define IDX_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_INDEX])
+#define DATA_ADDR(pcie) (pcie->reg_offsets[EXT_CFG_DATA])
+#define PCIE_RGR1_SW_INIT_1(pcie) (pcie->reg_offsets[RGR1_SW_INIT_1])
+
+/* Rescal registers */
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL 0xc700
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS 0x3
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK 0x4
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK 0x2
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK 0x1
+#define PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT 0x0
+
+/* Forward declarations */
+struct brcm_pcie;
+static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val);
+static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val);
+
+enum {
+ RGR1_SW_INIT_1,
+ EXT_CFG_INDEX,
+ EXT_CFG_DATA,
+};
+
+enum {
+ RGR1_SW_INIT_1_INIT_MASK,
+ RGR1_SW_INIT_1_INIT_SHIFT,
+};
+
+enum pcie_type {
+ GENERIC,
+ BCM7278,
+ BCM2711,
+};
+
+struct pcie_cfg_data {
+ const int *offsets;
+ const enum pcie_type type;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
+};
+
+static const int pcie_offsets[] = {
+ [RGR1_SW_INIT_1] = 0x9210,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data generic_cfg = {
+ .offsets = pcie_offsets,
+ .type = GENERIC,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
+
+static const int pcie_offset_bcm7278[] = {
+ [RGR1_SW_INIT_1] = 0xc010,
+ [EXT_CFG_INDEX] = 0x9000,
+ [EXT_CFG_DATA] = 0x9004,
+};
+
+static const struct pcie_cfg_data bcm7278_cfg = {
+ .offsets = pcie_offset_bcm7278,
+ .type = BCM7278,
+ .perst_set = brcm_pcie_perst_set_7278,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_7278,
+};
+
+static const struct pcie_cfg_data bcm2711_cfg = {
+ .offsets = pcie_offsets,
+ .type = BCM2711,
+ .perst_set = brcm_pcie_perst_set_generic,
+ .bridge_sw_init_set = brcm_pcie_bridge_sw_init_set_generic,
+};
struct brcm_msi {
struct device *dev;
@@ -165,6 +263,12 @@ struct brcm_msi {
int irq;
/* used indicates which MSI interrupts have been alloc'd */
unsigned long used;
+ bool legacy;
+ /* Some chips have MSIs in bits [31..24] of a shared register. */
+ int legacy_shift;
+ int nr; /* No. of MSI available, depends on chip */
+ /* This is the base pointer for interrupt status/set/clr regs */
+ void __iomem *intr_base;
};
/* Internal PCIe Host Controller Information.*/
@@ -177,6 +281,14 @@ struct brcm_pcie {
int gen;
u64 msi_target_addr;
struct brcm_msi *msi;
+ const int *reg_offsets;
+ enum pcie_type type;
+ struct reset_control *rescal;
+ int num_memc;
+ u64 memc_size[PCIE_BRCM_MAX_MEMC];
+ u32 hw_rev;
+ void (*perst_set)(struct brcm_pcie *pcie, u32 val);
+ void (*bridge_sw_init_set)(struct brcm_pcie *pcie, u32 val);
};
/*
@@ -367,8 +479,10 @@ static void brcm_pcie_msi_isr(struct irq_desc *desc)
msi = irq_desc_get_handler_data(desc);
dev = msi->dev;
- status = readl(msi->base + PCIE_MSI_INTR2_STATUS);
- for_each_set_bit(bit, &status, BRCM_INT_PCI_MSI_NR) {
+ status = readl(msi->intr_base + MSI_INT_STATUS);
+ status >>= msi->legacy_shift;
+
+ for_each_set_bit(bit, &status, msi->nr) {
virq = irq_find_mapping(msi->inner_domain, bit);
if (virq)
generic_handle_irq(virq);
@@ -385,7 +499,7 @@ static void brcm_msi_compose_msi_msg(struct irq_data *data, struct msi_msg *msg)
msg->address_lo = lower_32_bits(msi->target_addr);
msg->address_hi = upper_32_bits(msi->target_addr);
- msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL) | data->hwirq;
+ msg->data = (0xffff & PCIE_MISC_MSI_DATA_CONFIG_VAL_32) | data->hwirq;
}
static int brcm_msi_set_affinity(struct irq_data *irq_data,
@@ -397,8 +511,9 @@ static int brcm_msi_set_affinity(struct irq_data *irq_data,
static void brcm_msi_ack_irq(struct irq_data *data)
{
struct brcm_msi *msi = irq_data_get_irq_chip_data(data);
+ const int shift_amt = data->hwirq + msi->legacy_shift;
- writel(1 << data->hwirq, msi->base + PCIE_MSI_INTR2_CLR);
+ writel(1 << shift_amt, msi->intr_base + MSI_INT_CLR);
}
@@ -414,7 +529,7 @@ static int brcm_msi_alloc(struct brcm_msi *msi)
int hwirq;
mutex_lock(&msi->lock);
- hwirq = bitmap_find_free_region(&msi->used, BRCM_INT_PCI_MSI_NR, 0);
+ hwirq = bitmap_find_free_region(&msi->used, msi->nr, 0);
mutex_unlock(&msi->lock);
return hwirq;
@@ -463,8 +578,7 @@ static int brcm_allocate_domains(struct brcm_msi *msi)
struct fwnode_handle *fwnode = of_node_to_fwnode(msi->np);
struct device *dev = msi->dev;
- msi->inner_domain = irq_domain_add_linear(NULL, BRCM_INT_PCI_MSI_NR,
- &msi_domain_ops, msi);
+ msi->inner_domain = irq_domain_add_linear(NULL, msi->nr, &msi_domain_ops, msi);
if (!msi->inner_domain) {
dev_err(dev, "failed to create IRQ domain\n");
return -ENOMEM;
@@ -501,7 +615,10 @@ static void brcm_msi_remove(struct brcm_pcie *pcie)
static void brcm_msi_set_regs(struct brcm_msi *msi)
{
- writel(0xffffffff, msi->base + PCIE_MSI_INTR2_MASK_CLR);
+ u32 val = __GENMASK(31, msi->legacy_shift);
+
+ writel(val, msi->intr_base + MSI_INT_MASK_CLR);
+ writel(val, msi->intr_base + MSI_INT_CLR);
/*
* The 0 bit of PCIE_MISC_MSI_BAR_CONFIG_LO is repurposed to MSI
@@ -512,8 +629,8 @@ static void brcm_msi_set_regs(struct brcm_msi *msi)
writel(upper_32_bits(msi->target_addr),
msi->base + PCIE_MISC_MSI_BAR_CONFIG_HI);
- writel(PCIE_MISC_MSI_DATA_CONFIG_VAL,
- msi->base + PCIE_MISC_MSI_DATA_CONFIG);
+ val = msi->legacy ? PCIE_MISC_MSI_DATA_CONFIG_VAL_8 : PCIE_MISC_MSI_DATA_CONFIG_VAL_32;
+ writel(val, msi->base + PCIE_MISC_MSI_DATA_CONFIG);
}
static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
@@ -538,6 +655,17 @@ static int brcm_pcie_enable_msi(struct brcm_pcie *pcie)
msi->np = pcie->np;
msi->target_addr = pcie->msi_target_addr;
msi->irq = irq;
+ msi->legacy = pcie->hw_rev < BRCM_PCIE_HW_REV_33;
+
+ if (msi->legacy) {
+ msi->intr_base = msi->base + PCIE_INTR2_CPU_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_LEGACY_NR;
+ msi->legacy_shift = 24;
+ } else {
+ msi->intr_base = msi->base + PCIE_MSI_INTR2_BASE;
+ msi->nr = BRCM_INT_PCI_MSI_NR;
+ msi->legacy_shift = 0;
+ }
ret = brcm_allocate_domains(msi);
if (ret)
@@ -601,22 +729,43 @@ static struct pci_ops brcm_pcie_ops = {
.write = pci_generic_config_write,
};
-static inline void brcm_pcie_bridge_sw_init_set(struct brcm_pcie *pcie, u32 val)
+static inline void brcm_pcie_bridge_sw_init_set_generic(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_GENERIC_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_GENERIC_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static inline void brcm_pcie_bridge_sw_init_set_7278(struct brcm_pcie *pcie, u32 val)
+{
+ u32 tmp, mask = RGR1_SW_INIT_1_INIT_7278_MASK;
+ u32 shift = RGR1_SW_INIT_1_INIT_7278_SHIFT;
+
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+ tmp = (tmp & ~mask) | ((val << shift) & mask);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
+}
+
+static inline void brcm_pcie_perst_set_7278(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
- tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
- u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_INIT_MASK);
- writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+ /* Perst bit has moved and assert value is 0 */
+ tmp = readl(pcie->base + PCIE_MISC_PCIE_CTRL);
+ u32p_replace_bits(&tmp, !val, PCIE_MISC_PCIE_CTRL_PCIE_PERSTB_MASK);
+ writel(tmp, pcie->base + PCIE_MISC_PCIE_CTRL);
}
-static inline void brcm_pcie_perst_set(struct brcm_pcie *pcie, u32 val)
+static inline void brcm_pcie_perst_set_generic(struct brcm_pcie *pcie, u32 val)
{
u32 tmp;
- tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1);
+ tmp = readl(pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
u32p_replace_bits(&tmp, val, PCIE_RGR1_SW_INIT_1_PERST_MASK);
- writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1);
+ writel(tmp, pcie->base + PCIE_RGR1_SW_INIT_1(pcie));
}
static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
@@ -624,22 +773,44 @@ static inline int brcm_pcie_get_rc_bar2_size_and_offset(struct brcm_pcie *pcie,
u64 *rc_bar2_offset)
{
struct pci_host_bridge *bridge = pci_host_bridge_from_priv(pcie);
- struct device *dev = pcie->dev;
struct resource_entry *entry;
+ struct device *dev = pcie->dev;
+ u64 lowest_pcie_addr = ~(u64)0;
+ int ret, i = 0;
+ u64 size = 0;
- entry = resource_list_first_type(&bridge->dma_ranges, IORESOURCE_MEM);
- if (!entry)
- return -ENODEV;
+ resource_list_for_each_entry(entry, &bridge->dma_ranges) {
+ u64 pcie_beg = entry->res->start - entry->offset;
+ size += entry->res->end - entry->res->start + 1;
+ if (pcie_beg < lowest_pcie_addr)
+ lowest_pcie_addr = pcie_beg;
+ }
- /*
- * The controller expects the inbound window offset to be calculated as
- * the difference between PCIe's address space and CPU's. The offset
- * provided by the firmware is calculated the opposite way, so we
- * negate it.
- */
- *rc_bar2_offset = -entry->offset;
- *rc_bar2_size = 1ULL << fls64(entry->res->end - entry->res->start);
+ if (lowest_pcie_addr == ~(u64)0) {
+ dev_err(dev, "DT node has no dma-ranges\n");
+ return -EINVAL;
+ }
+
+ ret = of_property_read_variable_u64_array(pcie->np, "brcm,scb-sizes", pcie->memc_size, 1,
+ PCIE_BRCM_MAX_MEMC);
+
+ if (ret <= 0) {
+ /* Make an educated guess */
+ pcie->num_memc = 1;
+ pcie->memc_size[0] = 1ULL << fls64(size - 1);
+ } else {
+ pcie->num_memc = ret;
+ }
+
+ /* Each memc is viewed through a "port" that is a power of 2 */
+ for (i = 0, size = 0; i < pcie->num_memc; i++)
+ size += pcie->memc_size[i];
+
+ /* System memory starts at this address in PCIe-space */
+ *rc_bar2_offset = lowest_pcie_addr;
+ /* The sum of all memc views must also be a power of 2 */
+ *rc_bar2_size = 1ULL << fls64(size - 1);
/*
* We validate the inbound memory view even though we should trust
@@ -691,22 +862,19 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
void __iomem *base = pcie->base;
struct device *dev = pcie->dev;
struct resource_entry *entry;
- unsigned int scb_size_val;
bool ssc_good = false;
struct resource *res;
int num_out_wins = 0;
u16 nlw, cls, lnksta;
- int i, ret;
- u32 tmp, aspm_support;
+ int i, ret, memc;
+ u32 tmp, burst, aspm_support;
/* Reset the bridge */
- brcm_pcie_bridge_sw_init_set(pcie, 1);
- brcm_pcie_perst_set(pcie, 1);
-
+ pcie->bridge_sw_init_set(pcie, 1);
usleep_range(100, 200);
/* Take the bridge out of reset */
- brcm_pcie_bridge_sw_init_set(pcie, 0);
+ pcie->bridge_sw_init_set(pcie, 0);
tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
tmp &= ~PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK;
@@ -714,11 +882,22 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
/* Wait for SerDes to be stable */
usleep_range(100, 200);
+ /*
+ * SCB_MAX_BURST_SIZE is a two bit field. For GENERIC chips it
+ * is encoded as 0=128, 1=256, 2=512, 3=Rsvd, for BCM7278 it
+ * is encoded as 0=Rsvd, 1=128, 2=256, 3=512.
+ */
+ if (pcie->type == BCM2711)
+ burst = 0x0; /* 128B */
+ else if (pcie->type == BCM7278)
+ burst = 0x3; /* 512 bytes */
+ else
+ burst = 0x2; /* 512 bytes */
+
/* Set SCB_MAX_BURST_SIZE, CFG_READ_UR_MODE, SCB_ACCESS_EN */
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_SCB_ACCESS_EN_MASK);
u32p_replace_bits(&tmp, 1, PCIE_MISC_MISC_CTRL_CFG_READ_UR_MODE_MASK);
- u32p_replace_bits(&tmp, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_128,
- PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
+ u32p_replace_bits(&tmp, burst, PCIE_MISC_MISC_CTRL_MAX_BURST_SIZE_MASK);
writel(tmp, base + PCIE_MISC_MISC_CTRL);
ret = brcm_pcie_get_rc_bar2_size_and_offset(pcie, &rc_bar2_size,
@@ -733,11 +912,17 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
writel(upper_32_bits(rc_bar2_offset),
base + PCIE_MISC_RC_BAR2_CONFIG_HI);
- scb_size_val = rc_bar2_size ?
- ilog2(rc_bar2_size) - 15 : 0xf; /* 0xf is 1GB */
tmp = readl(base + PCIE_MISC_MISC_CTRL);
- u32p_replace_bits(&tmp, scb_size_val,
- PCIE_MISC_MISC_CTRL_SCB0_SIZE_MASK);
+ for (memc = 0; memc < pcie->num_memc; memc++) {
+ u32 scb_size_val = ilog2(pcie->memc_size[memc]) - 15;
+
+ if (memc == 0)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(0));
+ else if (memc == 1)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(1));
+ else if (memc == 2)
+ u32p_replace_bits(&tmp, scb_size_val, SCB_SIZE_MASK(2));
+ }
writel(tmp, base + PCIE_MISC_MISC_CTRL);
/*
@@ -762,17 +947,11 @@ static int brcm_pcie_setup(struct brcm_pcie *pcie)
tmp &= ~PCIE_MISC_RC_BAR3_CONFIG_LO_SIZE_MASK;
writel(tmp, base + PCIE_MISC_RC_BAR3_CONFIG_LO);
- /* Mask all interrupts since we are not handling any yet */
- writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_MASK_SET);
-
- /* clear any interrupts we find on boot */
- writel(0xffffffff, pcie->base + PCIE_MSI_INTR2_CLR);
-
if (pcie->gen)
brcm_pcie_set_gen(pcie, pcie->gen);
/* Unassert the fundamental reset */
- brcm_pcie_perst_set(pcie, 0);
+ pcie->perst_set(pcie, 0);
/*
* Give the RC/EP time to wake up, before trying to configure RC.
@@ -884,6 +1063,52 @@ static void brcm_pcie_enter_l23(struct brcm_pcie *pcie)
dev_err(pcie->dev, "failed to enter low-power link state\n");
}
+static int brcm_phy_cntl(struct brcm_pcie *pcie, const int start)
+{
+ static const u32 shifts[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_SHIFT,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_SHIFT,};
+ static const u32 masks[PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS] = {
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_PWRDN_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_RESET_MASK,
+ PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_DIG_RESET_MASK,};
+ const int beg = start ? 0 : PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS - 1;
+ const int end = start ? PCIE_DVT_PMU_PCIE_PHY_CTRL_DAST_NFLDS : -1;
+ u32 tmp, combined_mask = 0;
+ u32 val;
+ void __iomem *base = pcie->base;
+ int i, ret;
+
+ for (i = beg; i != end; start ? i++ : i--) {
+ val = start ? BIT_MASK(shifts[i]) : 0;
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ tmp = (tmp & ~masks[i]) | (val & masks[i]);
+ writel(tmp, base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ usleep_range(50, 200);
+ combined_mask |= masks[i];
+ }
+
+ tmp = readl(base + PCIE_DVT_PMU_PCIE_PHY_CTRL);
+ val = start ? combined_mask : 0;
+
+ ret = (tmp & combined_mask) == val ? 0 : -EIO;
+ if (ret)
+ dev_err(pcie->dev, "failed to %s phy\n", (start ? "start" : "stop"));
+
+ return ret;
+}
+
+static inline int brcm_phy_start(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 1) : 0;
+}
+
+static inline int brcm_phy_stop(struct brcm_pcie *pcie)
+{
+ return pcie->rescal ? brcm_phy_cntl(pcie, 0) : 0;
+}
+
static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
{
void __iomem *base = pcie->base;
@@ -892,7 +1117,7 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
if (brcm_pcie_link_up(pcie))
brcm_pcie_enter_l23(pcie);
/* Assert fundamental reset */
- brcm_pcie_perst_set(pcie, 1);
+ pcie->perst_set(pcie, 1);
/* Deassert request for L23 in case it was asserted */
tmp = readl(base + PCIE_MISC_PCIE_CTRL);
@@ -905,13 +1130,66 @@ static void brcm_pcie_turn_off(struct brcm_pcie *pcie)
writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
/* Shutdown PCIe bridge */
- brcm_pcie_bridge_sw_init_set(pcie, 1);
+ pcie->bridge_sw_init_set(pcie, 1);
+}
+
+static int brcm_pcie_suspend(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ int ret;
+
+ brcm_pcie_turn_off(pcie);
+ ret = brcm_phy_stop(pcie);
+ clk_disable_unprepare(pcie->clk);
+
+ return ret;
+}
+
+static int brcm_pcie_resume(struct device *dev)
+{
+ struct brcm_pcie *pcie = dev_get_drvdata(dev);
+ void __iomem *base;
+ u32 tmp;
+ int ret;
+
+ base = pcie->base;
+ clk_prepare_enable(pcie->clk);
+
+ ret = brcm_phy_start(pcie);
+ if (ret)
+ goto err;
+
+ /* Take bridge out of reset so we can access the SERDES reg */
+ pcie->bridge_sw_init_set(pcie, 0);
+
+ /* SERDES_IDDQ = 0 */
+ tmp = readl(base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+ u32p_replace_bits(&tmp, 0, PCIE_MISC_HARD_PCIE_HARD_DEBUG_SERDES_IDDQ_MASK);
+ writel(tmp, base + PCIE_MISC_HARD_PCIE_HARD_DEBUG);
+
+ /* wait for serdes to be stable */
+ udelay(100);
+
+ ret = brcm_pcie_setup(pcie);
+ if (ret)
+ goto err;
+
+ if (pcie->msi)
+ brcm_msi_set_regs(pcie->msi);
+
+ return 0;
+
+err:
+ clk_disable_unprepare(pcie->clk);
+ return ret;
}
static void __brcm_pcie_remove(struct brcm_pcie *pcie)
{
brcm_msi_remove(pcie);
brcm_pcie_turn_off(pcie);
+ brcm_phy_stop(pcie);
+ reset_control_assert(pcie->rescal);
clk_disable_unprepare(pcie->clk);
}
@@ -927,11 +1205,21 @@ static int brcm_pcie_remove(struct platform_device *pdev)
return 0;
}
+static const struct of_device_id brcm_pcie_match[] = {
+ { .compatible = "brcm,bcm2711-pcie", .data = &bcm2711_cfg },
+ { .compatible = "brcm,bcm7211-pcie", .data = &generic_cfg },
+ { .compatible = "brcm,bcm7278-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7216-pcie", .data = &bcm7278_cfg },
+ { .compatible = "brcm,bcm7445-pcie", .data = &generic_cfg },
+ {},
+};
+
static int brcm_pcie_probe(struct platform_device *pdev)
{
struct device_node *np = pdev->dev.of_node, *msi_np;
struct pci_host_bridge *bridge;
struct device_node *fw_np;
+ const struct pcie_cfg_data *data;
struct brcm_pcie *pcie;
int ret;
@@ -953,9 +1241,19 @@ static int brcm_pcie_probe(struct platform_device *pdev)
if (!bridge)
return -ENOMEM;
+ data = of_device_get_match_data(&pdev->dev);
+ if (!data) {
+ pr_err("failed to look up compatible string\n");
+ return -EINVAL;
+ }
+
pcie = pci_host_bridge_priv(bridge);
pcie->dev = &pdev->dev;
pcie->np = np;
+ pcie->reg_offsets = data->offsets;
+ pcie->type = data->type;
+ pcie->perst_set = data->perst_set;
+ pcie->bridge_sw_init_set = data->bridge_sw_init_set;
pcie->base = devm_platform_ioremap_resource(pdev, 0);
if (IS_ERR(pcie->base))
@@ -975,11 +1273,29 @@ static int brcm_pcie_probe(struct platform_device *pdev)
dev_err(&pdev->dev, "could not enable clock\n");
return ret;
}
+ pcie->rescal = devm_reset_control_get_optional_shared(&pdev->dev, "rescal");
+ if (IS_ERR(pcie->rescal)) {
+ clk_disable_unprepare(pcie->clk);
+ return PTR_ERR(pcie->rescal);
+ }
+
+ ret = reset_control_deassert(pcie->rescal);
+ if (ret)
+ dev_err(&pdev->dev, "failed to deassert 'rescal'\n");
+
+ ret = brcm_phy_start(pcie);
+ if (ret) {
+ reset_control_assert(pcie->rescal);
+ clk_disable_unprepare(pcie->clk);
+ return ret;
+ }
ret = brcm_pcie_setup(pcie);
if (ret)
goto fail;
+ pcie->hw_rev = readl(pcie->base + PCIE_MISC_REVISION);
+
msi_np = of_parse_phandle(pcie->np, "msi-parent", 0);
if (pci_msi_enabled() && msi_np == pcie->np) {
ret = brcm_pcie_enable_msi(pcie);
@@ -1000,18 +1316,20 @@ fail:
return ret;
}
-static const struct of_device_id brcm_pcie_match[] = {
- { .compatible = "brcm,bcm2711-pcie" },
- {},
-};
MODULE_DEVICE_TABLE(of, brcm_pcie_match);
+static const struct dev_pm_ops brcm_pcie_pm_ops = {
+ .suspend = brcm_pcie_suspend,
+ .resume = brcm_pcie_resume,
+};
+
static struct platform_driver brcm_pcie_driver = {
.probe = brcm_pcie_probe,
.remove = brcm_pcie_remove,
.driver = {
.name = "brcm-pcie",
.of_match_table = brcm_pcie_match,
+ .pm = &brcm_pcie_pm_ops,
},
};
module_platform_driver(brcm_pcie_driver);
diff --git a/drivers/pci/controller/pcie-hisi-error.c b/drivers/pci/controller/pcie-hisi-error.c
new file mode 100644
index 000000000000..7959c9c8d2bc
--- /dev/null
+++ b/drivers/pci/controller/pcie-hisi-error.c
@@ -0,0 +1,327 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Driver for handling the PCIe controller errors on
+ * HiSilicon HIP SoCs.
+ *
+ * Copyright (c) 2020 HiSilicon Limited.
+ */
+
+#include <linux/acpi.h>
+#include <acpi/ghes.h>
+#include <linux/bitops.h>
+#include <linux/delay.h>
+#include <linux/pci.h>
+#include <linux/platform_device.h>
+#include <linux/kfifo.h>
+#include <linux/spinlock.h>
+
+/* HISI PCIe controller error definitions */
+#define HISI_PCIE_ERR_MISC_REGS 33
+
+#define HISI_PCIE_LOCAL_VALID_VERSION BIT(0)
+#define HISI_PCIE_LOCAL_VALID_SOC_ID BIT(1)
+#define HISI_PCIE_LOCAL_VALID_SOCKET_ID BIT(2)
+#define HISI_PCIE_LOCAL_VALID_NIMBUS_ID BIT(3)
+#define HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID BIT(4)
+#define HISI_PCIE_LOCAL_VALID_CORE_ID BIT(5)
+#define HISI_PCIE_LOCAL_VALID_PORT_ID BIT(6)
+#define HISI_PCIE_LOCAL_VALID_ERR_TYPE BIT(7)
+#define HISI_PCIE_LOCAL_VALID_ERR_SEVERITY BIT(8)
+#define HISI_PCIE_LOCAL_VALID_ERR_MISC 9
+
+static guid_t hisi_pcie_sec_guid =
+ GUID_INIT(0xB2889FC9, 0xE7D7, 0x4F9D,
+ 0xA8, 0x67, 0xAF, 0x42, 0xE9, 0x8B, 0xE7, 0x72);
+
+/*
+ * Firmware reports the socket port ID where the error occurred. These
+ * macros convert that to the core ID and core port ID required by the
+ * ACPI reset method.
+ */
+#define HISI_PCIE_PORT_ID(core, v) (((v) >> 1) + ((core) << 3))
+#define HISI_PCIE_CORE_ID(v) ((v) >> 3)
+#define HISI_PCIE_CORE_PORT_ID(v) (((v) & 7) << 1)
+
+struct hisi_pcie_error_data {
+ u64 val_bits;
+ u8 version;
+ u8 soc_id;
+ u8 socket_id;
+ u8 nimbus_id;
+ u8 sub_module_id;
+ u8 core_id;
+ u8 port_id;
+ u8 err_severity;
+ u16 err_type;
+ u8 reserv[2];
+ u32 err_misc[HISI_PCIE_ERR_MISC_REGS];
+};
+
+struct hisi_pcie_error_private {
+ struct notifier_block nb;
+ struct device *dev;
+};
+
+enum hisi_pcie_submodule_id {
+ HISI_PCIE_SUB_MODULE_ID_AP,
+ HISI_PCIE_SUB_MODULE_ID_TL,
+ HISI_PCIE_SUB_MODULE_ID_MAC,
+ HISI_PCIE_SUB_MODULE_ID_DL,
+ HISI_PCIE_SUB_MODULE_ID_SDI,
+};
+
+static const char * const hisi_pcie_sub_module[] = {
+ [HISI_PCIE_SUB_MODULE_ID_AP] = "AP Layer",
+ [HISI_PCIE_SUB_MODULE_ID_TL] = "TL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_MAC] = "MAC Layer",
+ [HISI_PCIE_SUB_MODULE_ID_DL] = "DL Layer",
+ [HISI_PCIE_SUB_MODULE_ID_SDI] = "SDI Layer",
+};
+
+enum hisi_pcie_err_severity {
+ HISI_PCIE_ERR_SEV_RECOVERABLE,
+ HISI_PCIE_ERR_SEV_FATAL,
+ HISI_PCIE_ERR_SEV_CORRECTED,
+ HISI_PCIE_ERR_SEV_NONE,
+};
+
+static const char * const hisi_pcie_error_sev[] = {
+ [HISI_PCIE_ERR_SEV_RECOVERABLE] = "recoverable",
+ [HISI_PCIE_ERR_SEV_FATAL] = "fatal",
+ [HISI_PCIE_ERR_SEV_CORRECTED] = "corrected",
+ [HISI_PCIE_ERR_SEV_NONE] = "none",
+};
+
+static const char *hisi_pcie_get_string(const char * const *array,
+ size_t n, u32 id)
+{
+ u32 index;
+
+ for (index = 0; index < n; index++) {
+ if (index == id && array[index])
+ return array[index];
+ }
+
+ return "unknown";
+}
+
+static int hisi_pcie_port_reset(struct platform_device *pdev,
+ u32 chip_id, u32 port_id)
+{
+ struct device *dev = &pdev->dev;
+ acpi_handle handle = ACPI_HANDLE(dev);
+ union acpi_object arg[3];
+ struct acpi_object_list arg_list;
+ acpi_status s;
+ unsigned long long data = 0;
+
+ arg[0].type = ACPI_TYPE_INTEGER;
+ arg[0].integer.value = chip_id;
+ arg[1].type = ACPI_TYPE_INTEGER;
+ arg[1].integer.value = HISI_PCIE_CORE_ID(port_id);
+ arg[2].type = ACPI_TYPE_INTEGER;
+ arg[2].integer.value = HISI_PCIE_CORE_PORT_ID(port_id);
+
+ arg_list.count = 3;
+ arg_list.pointer = arg;
+
+ s = acpi_evaluate_integer(handle, "RST", &arg_list, &data);
+ if (ACPI_FAILURE(s)) {
+ dev_err(dev, "No RST method\n");
+ return -EIO;
+ }
+
+ if (data) {
+ dev_err(dev, "Failed to Reset\n");
+ return -EIO;
+ }
+
+ return 0;
+}
+
+static int hisi_pcie_port_do_recovery(struct platform_device *dev,
+ u32 chip_id, u32 port_id)
+{
+ acpi_status s;
+ struct device *device = &dev->dev;
+ acpi_handle root_handle = ACPI_HANDLE(device);
+ struct acpi_pci_root *pci_root;
+ struct pci_bus *root_bus;
+ struct pci_dev *pdev;
+ u32 domain, busnr, devfn;
+
+ s = acpi_get_parent(root_handle, &root_handle);
+ if (ACPI_FAILURE(s))
+ return -ENODEV;
+ pci_root = acpi_pci_find_root(root_handle);
+ if (!pci_root)
+ return -ENODEV;
+ root_bus = pci_root->bus;
+ domain = pci_root->segment;
+
+ busnr = root_bus->number;
+ devfn = PCI_DEVFN(port_id, 0);
+ pdev = pci_get_domain_bus_and_slot(domain, busnr, devfn);
+ if (!pdev) {
+ dev_info(device, "Fail to get root port %04x:%02x:%02x.%d device\n",
+ domain, busnr, PCI_SLOT(devfn), PCI_FUNC(devfn));
+ return -ENODEV;
+ }
+
+ pci_stop_and_remove_bus_device_locked(pdev);
+ pci_dev_put(pdev);
+
+ if (hisi_pcie_port_reset(dev, chip_id, port_id))
+ return -EIO;
+
+ /*
+ * The initialization time of subordinate devices after
+ * hot reset is no more than 1s, which is required by
+ * the PCI spec v5.0 sec 6.6.1. The time will shorten
+ * if Readiness Notifications mechanisms are used. But
+ * wait 1s here to adapt any conditions.
+ */
+ ssleep(1UL);
+
+ /* add root port and downstream devices */
+ pci_lock_rescan_remove();
+ pci_rescan_bus(root_bus);
+ pci_unlock_rescan_remove();
+
+ return 0;
+}
+
+static void hisi_pcie_handle_error(struct platform_device *pdev,
+ const struct hisi_pcie_error_data *edata)
+{
+ struct device *dev = &pdev->dev;
+ int idx, rc;
+ const unsigned long valid_bits[] = {BITMAP_FROM_U64(edata->val_bits)};
+
+ if (edata->val_bits == 0) {
+ dev_warn(dev, "%s: no valid error information\n", __func__);
+ return;
+ }
+
+ dev_info(dev, "\nHISI : HIP : PCIe controller error\n");
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOC_ID)
+ dev_info(dev, "Table version = %d\n", edata->version);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SOCKET_ID)
+ dev_info(dev, "Socket ID = %d\n", edata->socket_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_NIMBUS_ID)
+ dev_info(dev, "Nimbus ID = %d\n", edata->nimbus_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_SUB_MODULE_ID)
+ dev_info(dev, "Sub Module = %s\n",
+ hisi_pcie_get_string(hisi_pcie_sub_module,
+ ARRAY_SIZE(hisi_pcie_sub_module),
+ edata->sub_module_id));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_CORE_ID)
+ dev_info(dev, "Core ID = core%d\n", edata->core_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_PORT_ID)
+ dev_info(dev, "Port ID = port%d\n", edata->port_id);
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_SEVERITY)
+ dev_info(dev, "Error severity = %s\n",
+ hisi_pcie_get_string(hisi_pcie_error_sev,
+ ARRAY_SIZE(hisi_pcie_error_sev),
+ edata->err_severity));
+ if (edata->val_bits & HISI_PCIE_LOCAL_VALID_ERR_TYPE)
+ dev_info(dev, "Error type = 0x%x\n", edata->err_type);
+
+ dev_info(dev, "Reg Dump:\n");
+ idx = HISI_PCIE_LOCAL_VALID_ERR_MISC;
+ for_each_set_bit_from(idx, valid_bits,
+ HISI_PCIE_LOCAL_VALID_ERR_MISC + HISI_PCIE_ERR_MISC_REGS)
+ dev_info(dev, "ERR_MISC_%d = 0x%x\n", idx - HISI_PCIE_LOCAL_VALID_ERR_MISC,
+ edata->err_misc[idx - HISI_PCIE_LOCAL_VALID_ERR_MISC]);
+
+ if (edata->err_severity != HISI_PCIE_ERR_SEV_RECOVERABLE)
+ return;
+
+ /* Recovery for the PCIe controller errors, try reset
+ * PCI port for the error recovery
+ */
+ rc = hisi_pcie_port_do_recovery(pdev, edata->socket_id,
+ HISI_PCIE_PORT_ID(edata->core_id, edata->port_id));
+ if (rc)
+ dev_info(dev, "fail to do hisi pcie port reset\n");
+}
+
+static int hisi_pcie_notify_error(struct notifier_block *nb,
+ unsigned long event, void *data)
+{
+ struct acpi_hest_generic_data *gdata = data;
+ const struct hisi_pcie_error_data *error_data = acpi_hest_get_payload(gdata);
+ struct hisi_pcie_error_private *priv;
+ struct device *dev;
+ struct platform_device *pdev;
+ guid_t err_sec_guid;
+ u8 socket;
+
+ import_guid(&err_sec_guid, gdata->section_type);
+ if (!guid_equal(&err_sec_guid, &hisi_pcie_sec_guid))
+ return NOTIFY_DONE;
+
+ priv = container_of(nb, struct hisi_pcie_error_private, nb);
+ dev = priv->dev;
+
+ if (device_property_read_u8(dev, "socket", &socket))
+ return NOTIFY_DONE;
+
+ if (error_data->socket_id != socket)
+ return NOTIFY_DONE;
+
+ pdev = container_of(dev, struct platform_device, dev);
+ hisi_pcie_handle_error(pdev, error_data);
+
+ return NOTIFY_OK;
+}
+
+static int hisi_pcie_error_handler_probe(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv;
+ int ret;
+
+ priv = devm_kzalloc(&pdev->dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ priv->nb.notifier_call = hisi_pcie_notify_error;
+ priv->dev = &pdev->dev;
+ ret = ghes_register_vendor_record_notifier(&priv->nb);
+ if (ret) {
+ dev_err(&pdev->dev,
+ "Failed to register hisi pcie controller error handler with apei\n");
+ return ret;
+ }
+
+ platform_set_drvdata(pdev, priv);
+
+ return 0;
+}
+
+static int hisi_pcie_error_handler_remove(struct platform_device *pdev)
+{
+ struct hisi_pcie_error_private *priv = platform_get_drvdata(pdev);
+
+ ghes_unregister_vendor_record_notifier(&priv->nb);
+
+ return 0;
+}
+
+static const struct acpi_device_id hisi_pcie_acpi_match[] = {
+ { "HISI0361", 0 },
+ { }
+};
+
+static struct platform_driver hisi_pcie_error_handler_driver = {
+ .driver = {
+ .name = "hisi-pcie-error-handler",
+ .acpi_match_table = hisi_pcie_acpi_match,
+ },
+ .probe = hisi_pcie_error_handler_probe,
+ .remove = hisi_pcie_error_handler_remove,
+};
+module_platform_driver(hisi_pcie_error_handler_driver);
+
+MODULE_DESCRIPTION("HiSilicon HIP PCIe controller error handling driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/pci/ecam.c b/drivers/pci/ecam.c
index 8f065a42fc1a..b54d32a31669 100644
--- a/drivers/pci/ecam.c
+++ b/drivers/pci/ecam.c
@@ -168,4 +168,14 @@ const struct pci_ecam_ops pci_32b_ops = {
.write = pci_generic_config_write32,
}
};
+
+/* ECAM ops for 32-bit read only (non-compliant) */
+const struct pci_ecam_ops pci_32b_read_ops = {
+ .bus_shift = 20,
+ .pci_ops = {
+ .map_bus = pci_ecam_map_bus,
+ .read = pci_generic_config_read32,
+ .write = pci_generic_config_write,
+ }
+};
#endif
diff --git a/drivers/pci/hotplug/pciehp_ctrl.c b/drivers/pci/hotplug/pciehp_ctrl.c
index 6503d15effbb..2f5f4bb42dcc 100644
--- a/drivers/pci/hotplug/pciehp_ctrl.c
+++ b/drivers/pci/hotplug/pciehp_ctrl.c
@@ -73,10 +73,8 @@ static int board_added(struct controller *ctrl)
/* Check link training status */
retval = pciehp_check_link_status(ctrl);
- if (retval) {
- ctrl_err(ctrl, "Failed to check link status\n");
+ if (retval)
goto err_exit;
- }
/* Check for a power fault */
if (ctrl->power_fault_detected || pciehp_query_power_fault(ctrl)) {
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 53433b37e181..fb3840e222ad 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -283,8 +283,6 @@ static void pcie_wait_for_presence(struct pci_dev *pdev)
msleep(10);
timeout -= 10;
} while (timeout > 0);
-
- pci_info(pdev, "Timeout waiting for Presence Detect\n");
}
int pciehp_check_link_status(struct controller *ctrl)
@@ -293,8 +291,10 @@ int pciehp_check_link_status(struct controller *ctrl)
bool found;
u16 lnk_status;
- if (!pcie_wait_for_link(pdev, true))
+ if (!pcie_wait_for_link(pdev, true)) {
+ ctrl_info(ctrl, "Slot(%s): No link\n", slot_name(ctrl));
return -1;
+ }
if (ctrl->inband_presence_disabled)
pcie_wait_for_presence(pdev);
@@ -311,15 +311,18 @@ int pciehp_check_link_status(struct controller *ctrl)
ctrl_dbg(ctrl, "%s: lnk_status = %x\n", __func__, lnk_status);
if ((lnk_status & PCI_EXP_LNKSTA_LT) ||
!(lnk_status & PCI_EXP_LNKSTA_NLW)) {
- ctrl_err(ctrl, "link training error: status %#06x\n",
- lnk_status);
+ ctrl_info(ctrl, "Slot(%s): Cannot train link: status %#06x\n",
+ slot_name(ctrl), lnk_status);
return -1;
}
pcie_update_link_speed(ctrl->pcie->port->subordinate, lnk_status);
- if (!found)
+ if (!found) {
+ ctrl_info(ctrl, "Slot(%s): No device found\n",
+ slot_name(ctrl));
return -1;
+ }
return 0;
}
diff --git a/drivers/pci/hotplug/rpadlpar_core.c b/drivers/pci/hotplug/rpadlpar_core.c
index f979b7098acf..0a3c80ba66be 100644
--- a/drivers/pci/hotplug/rpadlpar_core.c
+++ b/drivers/pci/hotplug/rpadlpar_core.c
@@ -40,13 +40,13 @@ static DEFINE_MUTEX(rpadlpar_mutex);
static struct device_node *find_vio_slot_node(char *drc_name)
{
struct device_node *parent = of_find_node_by_name(NULL, "vdevice");
- struct device_node *dn = NULL;
+ struct device_node *dn;
int rc;
if (!parent)
return NULL;
- while ((dn = of_get_next_child(parent, dn))) {
+ for_each_child_of_node(parent, dn) {
rc = rpaphp_check_drc_props(dn, drc_name, NULL);
if (rc == 0)
break;
@@ -60,10 +60,10 @@ static struct device_node *find_vio_slot_node(char *drc_name)
static struct device_node *find_php_slot_pci_node(char *drc_name,
char *drc_type)
{
- struct device_node *np = NULL;
+ struct device_node *np;
int rc;
- while ((np = of_find_node_by_name(np, "pci"))) {
+ for_each_node_by_name(np, "pci") {
rc = rpaphp_check_drc_props(np, drc_name, drc_type);
if (rc == 0)
break;
diff --git a/drivers/pci/hotplug/shpchp_ctrl.c b/drivers/pci/hotplug/shpchp_ctrl.c
index afdc52d1cae7..aedd9dfd2a16 100644
--- a/drivers/pci/hotplug/shpchp_ctrl.c
+++ b/drivers/pci/hotplug/shpchp_ctrl.c
@@ -299,7 +299,6 @@ static int board_added(struct slot *p_slot)
if (p_slot->status == 0xFF) {
/* power fault occurred, but it was benign */
ctrl_dbg(ctrl, "%s: Power fault\n", __func__);
- rc = POWER_FAILURE;
p_slot->status = 0;
goto err_exit;
}
diff --git a/drivers/pci/p2pdma.c b/drivers/pci/p2pdma.c
index 64ebed129dbf..85fc9936fa9e 100644
--- a/drivers/pci/p2pdma.c
+++ b/drivers/pci/p2pdma.c
@@ -53,7 +53,7 @@ static ssize_t size_show(struct device *dev, struct device_attribute *attr,
if (pdev->p2pdma->pool)
size = gen_pool_size(pdev->p2pdma->pool);
- return snprintf(buf, PAGE_SIZE, "%zd\n", size);
+ return scnprintf(buf, PAGE_SIZE, "%zd\n", size);
}
static DEVICE_ATTR_RO(size);
@@ -66,7 +66,7 @@ static ssize_t available_show(struct device *dev, struct device_attribute *attr,
if (pdev->p2pdma->pool)
avail = gen_pool_avail(pdev->p2pdma->pool);
- return snprintf(buf, PAGE_SIZE, "%zd\n", avail);
+ return scnprintf(buf, PAGE_SIZE, "%zd\n", avail);
}
static DEVICE_ATTR_RO(available);
@@ -75,8 +75,8 @@ static ssize_t published_show(struct device *dev, struct device_attribute *attr,
{
struct pci_dev *pdev = to_pci_dev(dev);
- return snprintf(buf, PAGE_SIZE, "%d\n",
- pdev->p2pdma->p2pmem_published);
+ return scnprintf(buf, PAGE_SIZE, "%d\n",
+ pdev->p2pdma->p2pmem_published);
}
static DEVICE_ATTR_RO(published);
@@ -761,7 +761,7 @@ struct scatterlist *pci_p2pmem_alloc_sgl(struct pci_dev *pdev,
struct scatterlist *sg;
void *addr;
- sg = kzalloc(sizeof(*sg), GFP_KERNEL);
+ sg = kmalloc(sizeof(*sg), GFP_KERNEL);
if (!sg)
return NULL;
diff --git a/drivers/pci/pci-acpi.c b/drivers/pci/pci-acpi.c
index d5869a03f748..154db9a47511 100644
--- a/drivers/pci/pci-acpi.c
+++ b/drivers/pci/pci-acpi.c
@@ -1167,7 +1167,7 @@ static struct acpi_device *acpi_pci_find_companion(struct device *dev)
* @pdev: the PCI device whose delay is to be updated
* @handle: ACPI handle of this device
*
- * Update the d3_delay and d3cold_delay of a PCI device from the ACPI _DSM
+ * Update the d3hot_delay and d3cold_delay of a PCI device from the ACPI _DSM
* control method of either the device itself or the PCI host bridge.
*
* Function 8, "Reset Delay," applies to the entire hierarchy below a PCI
@@ -1206,8 +1206,8 @@ static void pci_acpi_optimize_delay(struct pci_dev *pdev,
}
if (elements[3].type == ACPI_TYPE_INTEGER) {
value = (int)elements[3].integer.value / 1000;
- if (value < PCI_PM_D3_WAIT)
- pdev->d3_delay = value;
+ if (value < PCI_PM_D3HOT_WAIT)
+ pdev->d3hot_delay = value;
}
}
ACPI_FREE(obj);
diff --git a/drivers/pci/pci-bridge-emul.c b/drivers/pci/pci-bridge-emul.c
index ccf26d12ec61..139869d50eb2 100644
--- a/drivers/pci/pci-bridge-emul.c
+++ b/drivers/pci/pci-bridge-emul.c
@@ -294,6 +294,7 @@ int pci_bridge_emul_init(struct pci_bridge_emul *bridge,
return 0;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_init);
/*
* Cleanup a pci_bridge_emul structure that was previously initialized
@@ -305,6 +306,7 @@ void pci_bridge_emul_cleanup(struct pci_bridge_emul *bridge)
kfree(bridge->pcie_cap_regs_behavior);
kfree(bridge->pci_regs_behavior);
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_cleanup);
/*
* Should be called by the PCI controller driver when reading the PCI
@@ -366,6 +368,7 @@ int pci_bridge_emul_conf_read(struct pci_bridge_emul *bridge, int where,
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_read);
/*
* Should be called by the PCI controller driver when writing the PCI
@@ -430,3 +433,4 @@ int pci_bridge_emul_conf_write(struct pci_bridge_emul *bridge, int where,
return PCIBIOS_SUCCESSFUL;
}
+EXPORT_SYMBOL_GPL(pci_bridge_emul_conf_write);
diff --git a/drivers/pci/pci-driver.c b/drivers/pci/pci-driver.c
index 449466f71040..40be221e69cf 100644
--- a/drivers/pci/pci-driver.c
+++ b/drivers/pci/pci-driver.c
@@ -969,12 +969,6 @@ static int pci_pm_resume(struct device *dev)
#ifdef CONFIG_HIBERNATE_CALLBACKS
-/*
- * pcibios_pm_ops - provide arch-specific hooks when a PCI device is doing
- * a hibernate transition
- */
-struct dev_pm_ops __weak pcibios_pm_ops;
-
static int pci_pm_freeze(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
@@ -1033,9 +1027,6 @@ static int pci_pm_freeze_noirq(struct device *dev)
pci_pm_set_unknown_state(pci_dev);
- if (pcibios_pm_ops.freeze_noirq)
- return pcibios_pm_ops.freeze_noirq(dev);
-
return 0;
}
@@ -1043,13 +1034,6 @@ static int pci_pm_thaw_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error;
-
- if (pcibios_pm_ops.thaw_noirq) {
- error = pcibios_pm_ops.thaw_noirq(dev);
- if (error)
- return error;
- }
/*
* The pm->thaw_noirq() callback assumes the device has been
@@ -1174,9 +1158,6 @@ static int pci_pm_poweroff_noirq(struct device *dev)
pci_fixup_device(pci_fixup_suspend_late, pci_dev);
- if (pcibios_pm_ops.poweroff_noirq)
- return pcibios_pm_ops.poweroff_noirq(dev);
-
return 0;
}
@@ -1184,13 +1165,6 @@ static int pci_pm_restore_noirq(struct device *dev)
{
struct pci_dev *pci_dev = to_pci_dev(dev);
const struct dev_pm_ops *pm = dev->driver ? dev->driver->pm : NULL;
- int error;
-
- if (pcibios_pm_ops.restore_noirq) {
- error = pcibios_pm_ops.restore_noirq(dev);
- if (error)
- return error;
- }
pci_pm_default_resume_early(pci_dev);
pci_fixup_device(pci_fixup_resume_early, pci_dev);
diff --git a/drivers/pci/pci-pf-stub.c b/drivers/pci/pci-pf-stub.c
index a0b2bd6c918a..45855a5e9fca 100644
--- a/drivers/pci/pci-pf-stub.c
+++ b/drivers/pci/pci-pf-stub.c
@@ -37,18 +37,6 @@ static struct pci_driver pf_stub_driver = {
.probe = pci_pf_stub_probe,
.sriov_configure = pci_sriov_configure_simple,
};
-
-static int __init pci_pf_stub_init(void)
-{
- return pci_register_driver(&pf_stub_driver);
-}
-
-static void __exit pci_pf_stub_exit(void)
-{
- pci_unregister_driver(&pf_stub_driver);
-}
-
-module_init(pci_pf_stub_init);
-module_exit(pci_pf_stub_exit);
+module_pci_driver(pf_stub_driver);
MODULE_LICENSE("GPL");
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 6d78df981d41..d15c881e2e7e 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -574,7 +574,7 @@ static ssize_t driver_override_show(struct device *dev,
ssize_t len;
device_lock(dev);
- len = snprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
+ len = scnprintf(buf, PAGE_SIZE, "%s\n", pdev->driver_override);
device_unlock(dev);
return len;
}
@@ -708,6 +708,7 @@ static ssize_t pci_read_config(struct file *filp, struct kobject *kobj,
data[off - init_off + 3] = (val >> 24) & 0xff;
off += 4;
size -= 4;
+ cond_resched();
}
if (size >= 2) {
@@ -1196,10 +1197,10 @@ static int pci_create_resource_files(struct pci_dev *pdev)
}
return 0;
}
-#else /* !HAVE_PCI_MMAP */
+#else /* !(defined(HAVE_PCI_MMAP) || defined(ARCH_GENERIC_PCI_MMAP_RESOURCE)) */
int __weak pci_create_resource_files(struct pci_dev *dev) { return 0; }
void __weak pci_remove_resource_files(struct pci_dev *dev) { return; }
-#endif /* HAVE_PCI_MMAP */
+#endif
/**
* pci_write_rom - used to enable access to the PCI ROM display
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index a458c46d7e39..0e63e0e77708 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -15,7 +15,6 @@
#include <linux/init.h>
#include <linux/msi.h>
#include <linux/of.h>
-#include <linux/of_pci.h>
#include <linux/pci.h>
#include <linux/pm.h>
#include <linux/slab.h>
@@ -30,8 +29,6 @@
#include <linux/pm_runtime.h>
#include <linux/pci_hotplug.h>
#include <linux/vmalloc.h>
-#include <linux/pci-ats.h>
-#include <asm/setup.h>
#include <asm/dma.h>
#include <linux/aer.h>
#include "pci.h"
@@ -49,7 +46,7 @@ EXPORT_SYMBOL(isa_dma_bridge_buggy);
int pci_pci_problems;
EXPORT_SYMBOL(pci_pci_problems);
-unsigned int pci_pm_d3_delay;
+unsigned int pci_pm_d3hot_delay;
static void pci_pme_list_scan(struct work_struct *work);
@@ -66,10 +63,10 @@ struct pci_pme_device {
static void pci_dev_d3_sleep(struct pci_dev *dev)
{
- unsigned int delay = dev->d3_delay;
+ unsigned int delay = dev->d3hot_delay;
- if (delay < pci_pm_d3_delay)
- delay = pci_pm_d3_delay;
+ if (delay < pci_pm_d3hot_delay)
+ delay = pci_pm_d3hot_delay;
if (delay)
msleep(delay);
@@ -101,7 +98,19 @@ unsigned long pci_hotplug_mmio_pref_size = DEFAULT_HOTPLUG_MMIO_PREF_SIZE;
#define DEFAULT_HOTPLUG_BUS_SIZE 1
unsigned long pci_hotplug_bus_size = DEFAULT_HOTPLUG_BUS_SIZE;
+
+/* PCIe MPS/MRRS strategy; can be overridden by kernel command-line param */
+#ifdef CONFIG_PCIE_BUS_TUNE_OFF
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_TUNE_OFF;
+#elif defined CONFIG_PCIE_BUS_SAFE
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_SAFE;
+#elif defined CONFIG_PCIE_BUS_PERFORMANCE
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PERFORMANCE;
+#elif defined CONFIG_PCIE_BUS_PEER2PEER
+enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_PEER2PEER;
+#else
enum pcie_bus_config_types pcie_bus_config = PCIE_BUS_DEFAULT;
+#endif
/*
* The default CLS is used if arch didn't set CLS explicitly and not
@@ -876,6 +885,10 @@ static void pci_std_enable_acs(struct pci_dev *dev)
/* Upstream Forwarding */
ctrl |= (cap & PCI_ACS_UF);
+ /* Enable Translation Blocking for external devices */
+ if (dev->external_facing || dev->untrusted)
+ ctrl |= (cap & PCI_ACS_TB);
+
pci_write_config_word(dev, pos + PCI_ACS_CTRL, ctrl);
}
@@ -1065,7 +1078,7 @@ static int pci_raw_set_power_state(struct pci_dev *dev, pci_power_t state)
if (state == PCI_D3hot || dev->current_state == PCI_D3hot)
pci_dev_d3_sleep(dev);
else if (state == PCI_D2 || dev->current_state == PCI_D2)
- msleep(PCI_PM_D2_DELAY);
+ udelay(PCI_PM_D2_DELAY);
pci_read_config_word(dev, dev->pm_cap + PCI_PM_CTRL, &pmcsr);
dev->current_state = (pmcsr & PCI_PM_CTRL_STATE_MASK);
@@ -3013,7 +3026,7 @@ void pci_pm_init(struct pci_dev *dev)
}
dev->pm_cap = pm;
- dev->d3_delay = PCI_PM_D3_WAIT;
+ dev->d3hot_delay = PCI_PM_D3HOT_WAIT;
dev->d3cold_delay = PCI_PM_D3COLD_WAIT;
dev->bridge_d3 = pci_bridge_d3_possible(dev);
dev->d3cold_allowed = true;
@@ -3038,7 +3051,7 @@ void pci_pm_init(struct pci_dev *dev)
(pmc & PCI_PM_CAP_PME_D0) ? " D0" : "",
(pmc & PCI_PM_CAP_PME_D1) ? " D1" : "",
(pmc & PCI_PM_CAP_PME_D2) ? " D2" : "",
- (pmc & PCI_PM_CAP_PME_D3) ? " D3hot" : "",
+ (pmc & PCI_PM_CAP_PME_D3hot) ? " D3hot" : "",
(pmc & PCI_PM_CAP_PME_D3cold) ? " D3cold" : "");
dev->pme_support = pmc >> PCI_PM_CAP_PME_SHIFT;
dev->pme_poll = true;
@@ -4621,7 +4634,7 @@ static int pci_af_flr(struct pci_dev *dev, int probe)
*
* NOTE: This causes the caller to sleep for twice the device power transition
* cooldown period, which for the D0->D3hot and D3hot->D0 transitions is 10 ms
- * by default (i.e. unless the @dev's d3_delay field has a different value).
+ * by default (i.e. unless the @dev's d3hot_delay field has a different value).
* Moreover, only devices in D0 can be reset by this function.
*/
static int pci_pm_reset(struct pci_dev *dev, int probe)
@@ -4701,9 +4714,7 @@ static bool pcie_wait_for_link_delay(struct pci_dev *pdev, bool active,
}
if (active && ret)
msleep(delay);
- else if (ret != active)
- pci_info(pdev, "Data Link Layer Link Active not %s in 1000 msec\n",
- active ? "set" : "cleared");
+
return ret == active;
}
@@ -4828,6 +4839,7 @@ void pci_bridge_wait_for_secondary_bus(struct pci_dev *dev)
delay);
if (!pcie_wait_for_link_delay(dev, true, delay)) {
/* Did not train, no need to wait any further */
+ pci_info(dev, "Data Link Layer Link Active not set in 1000 msec\n");
return;
}
}
@@ -4920,16 +4932,10 @@ static int pci_reset_hotplug_slot(struct hotplug_slot *hotplug, int probe)
static int pci_dev_reset_slot_function(struct pci_dev *dev, int probe)
{
- struct pci_dev *pdev;
-
- if (dev->subordinate || !dev->slot ||
+ if (dev->multifunction || dev->subordinate || !dev->slot ||
dev->dev_flags & PCI_DEV_FLAGS_NO_BUS_RESET)
return -ENOTTY;
- list_for_each_entry(pdev, &dev->bus->devices, bus_list)
- if (pdev != dev && pdev->slot == dev->slot)
- return -ENOTTY;
-
return pci_reset_hotplug_slot(dev->slot->hotplug, probe);
}
@@ -6005,7 +6011,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
if (flags & PCI_VGA_STATE_CHANGE_DECODES) {
pci_read_config_word(dev, PCI_COMMAND, &cmd);
- if (decode == true)
+ if (decode)
cmd |= command_bits;
else
cmd &= ~command_bits;
@@ -6021,7 +6027,7 @@ int pci_set_vga_state(struct pci_dev *dev, bool decode,
if (bridge) {
pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
&cmd);
- if (decode == true)
+ if (decode)
cmd |= PCI_BRIDGE_CTL_VGA;
else
cmd &= ~PCI_BRIDGE_CTL_VGA;
@@ -6350,7 +6356,7 @@ static ssize_t resource_alignment_show(struct bus_type *bus, char *buf)
spin_lock(&resource_alignment_lock);
if (resource_alignment_param)
- count = snprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
+ count = scnprintf(buf, PAGE_SIZE, "%s", resource_alignment_param);
spin_unlock(&resource_alignment_lock);
/*
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index fa12f7cbc1a0..f86cae9aa1f4 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -43,10 +43,9 @@ int pci_probe_reset_function(struct pci_dev *dev);
int pci_bridge_secondary_bus_reset(struct pci_dev *dev);
int pci_bus_error_reset(struct pci_dev *dev);
-#define PCI_PM_D2_DELAY 200
-#define PCI_PM_D3_WAIT 10
-#define PCI_PM_D3COLD_WAIT 100
-#define PCI_PM_BUS_WAIT 50
+#define PCI_PM_D2_DELAY 200 /* usec; see PCIe r4.0, sec 5.9.1 */
+#define PCI_PM_D3HOT_WAIT 10 /* msec */
+#define PCI_PM_D3COLD_WAIT 100 /* msec */
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
@@ -178,7 +177,7 @@ extern struct mutex pci_slot_mutex;
extern raw_spinlock_t pci_lock;
-extern unsigned int pci_pm_d3_delay;
+extern unsigned int pci_pm_d3hot_delay;
#ifdef CONFIG_PCI_MSI
void pci_no_msi(void);
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 253c30cc1967..ac0557a305af 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -74,14 +74,6 @@ struct pcie_link_state {
* has one slot under it, so at most there are 8 functions.
*/
struct aspm_latency acceptable[8];
-
- /* L1 PM Substate info */
- struct {
- u32 up_cap_ptr; /* L1SS cap ptr in upstream dev */
- u32 dw_cap_ptr; /* L1SS cap ptr in downstream dev */
- u32 ctl1; /* value to be programmed in ctl1 */
- u32 ctl2; /* value to be programmed in ctl2 */
- } l1ss;
};
static int aspm_disabled, aspm_force;
@@ -308,8 +300,10 @@ static void pcie_aspm_configure_common_clock(struct pcie_link_state *link)
}
/* Convert L0s latency encoding to ns */
-static u32 calc_l0s_latency(u32 encoding)
+static u32 calc_l0s_latency(u32 lnkcap)
{
+ u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L0SEL) >> 12;
+
if (encoding == 0x7)
return (5 * 1000); /* > 4us */
return (64 << encoding);
@@ -324,8 +318,10 @@ static u32 calc_l0s_acceptable(u32 encoding)
}
/* Convert L1 latency encoding to ns */
-static u32 calc_l1_latency(u32 encoding)
+static u32 calc_l1_latency(u32 lnkcap)
{
+ u32 encoding = (lnkcap & PCI_EXP_LNKCAP_L1EL) >> 15;
+
if (encoding == 0x7)
return (65 * 1000); /* > 64us */
return (1000 << encoding);
@@ -380,58 +376,6 @@ static void encode_l12_threshold(u32 threshold_us, u32 *scale, u32 *value)
}
}
-struct aspm_register_info {
- u32 support:2;
- u32 enabled:2;
- u32 latency_encoding_l0s;
- u32 latency_encoding_l1;
-
- /* L1 substates */
- u32 l1ss_cap_ptr;
- u32 l1ss_cap;
- u32 l1ss_ctl1;
- u32 l1ss_ctl2;
-};
-
-static void pcie_get_aspm_reg(struct pci_dev *pdev,
- struct aspm_register_info *info)
-{
- u16 reg16;
- u32 reg32;
-
- pcie_capability_read_dword(pdev, PCI_EXP_LNKCAP, &reg32);
- info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- pcie_capability_read_word(pdev, PCI_EXP_LNKCTL, &reg16);
- info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
-
- /* Read L1 PM substate capabilities */
- info->l1ss_cap = info->l1ss_ctl1 = info->l1ss_ctl2 = 0;
- info->l1ss_cap_ptr = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_L1SS);
- if (!info->l1ss_cap_ptr)
- return;
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CAP,
- &info->l1ss_cap);
- if (!(info->l1ss_cap & PCI_L1SS_CAP_L1_PM_SS)) {
- info->l1ss_cap = 0;
- return;
- }
-
- /*
- * If we don't have LTR for the entire path from the Root Complex
- * to this device, we can't use ASPM L1.2 because it relies on the
- * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
- */
- if (!pdev->ltr_path)
- info->l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
-
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL1,
- &info->l1ss_ctl1);
- pci_read_config_dword(pdev, info->l1ss_cap_ptr + PCI_L1SS_CTL2,
- &info->l1ss_ctl2);
-}
-
static void pcie_aspm_check_latency(struct pci_dev *endpoint)
{
u32 latency, l1_switch_latency = 0;
@@ -493,39 +437,49 @@ static struct pci_dev *pci_function_0(struct pci_bus *linkbus)
return NULL;
}
+static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
+ u32 clear, u32 set)
+{
+ u32 val;
+
+ pci_read_config_dword(pdev, pos, &val);
+ val &= ~clear;
+ val |= set;
+ pci_write_config_dword(pdev, pos, val);
+}
+
/* Calculate L1.2 PM substate timing parameters */
static void aspm_calc_l1ss_info(struct pcie_link_state *link,
- struct aspm_register_info *upreg,
- struct aspm_register_info *dwreg)
+ u32 parent_l1ss_cap, u32 child_l1ss_cap)
{
+ struct pci_dev *child = link->downstream, *parent = link->pdev;
u32 val1, val2, scale1, scale2;
u32 t_common_mode, t_power_on, l1_2_threshold, scale, value;
-
- link->l1ss.up_cap_ptr = upreg->l1ss_cap_ptr;
- link->l1ss.dw_cap_ptr = dwreg->l1ss_cap_ptr;
- link->l1ss.ctl1 = link->l1ss.ctl2 = 0;
+ u32 ctl1 = 0, ctl2 = 0;
+ u32 pctl1, pctl2, cctl1, cctl2;
+ u32 pl1_2_enables, cl1_2_enables;
if (!(link->aspm_support & ASPM_STATE_L1_2_MASK))
return;
/* Choose the greater of the two Port Common_Mode_Restore_Times */
- val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
- val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val1 = (parent_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
+ val2 = (child_l1ss_cap & PCI_L1SS_CAP_CM_RESTORE_TIME) >> 8;
t_common_mode = max(val1, val2);
/* Choose the greater of the two Port T_POWER_ON times */
- val1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale1 = (upreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
- val2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
- scale2 = (dwreg->l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
-
- if (calc_l1ss_pwron(link->pdev, scale1, val1) >
- calc_l1ss_pwron(link->downstream, scale2, val2)) {
- link->l1ss.ctl2 |= scale1 | (val1 << 3);
- t_power_on = calc_l1ss_pwron(link->pdev, scale1, val1);
+ val1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale1 = (parent_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+ val2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_VALUE) >> 19;
+ scale2 = (child_l1ss_cap & PCI_L1SS_CAP_P_PWR_ON_SCALE) >> 16;
+
+ if (calc_l1ss_pwron(parent, scale1, val1) >
+ calc_l1ss_pwron(child, scale2, val2)) {
+ ctl2 |= scale1 | (val1 << 3);
+ t_power_on = calc_l1ss_pwron(parent, scale1, val1);
} else {
- link->l1ss.ctl2 |= scale2 | (val2 << 3);
- t_power_on = calc_l1ss_pwron(link->downstream, scale2, val2);
+ ctl2 |= scale2 | (val2 << 3);
+ t_power_on = calc_l1ss_pwron(child, scale2, val2);
}
/*
@@ -540,14 +494,60 @@ static void aspm_calc_l1ss_info(struct pcie_link_state *link,
*/
l1_2_threshold = 2 + 4 + t_common_mode + t_power_on;
encode_l12_threshold(l1_2_threshold, &scale, &value);
- link->l1ss.ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+ ctl1 |= t_common_mode << 8 | scale << 29 | value << 16;
+
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1, &pctl1);
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, &pctl2);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1, &cctl1);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL2, &cctl2);
+
+ if (ctl1 == pctl1 && ctl1 == cctl1 &&
+ ctl2 == pctl2 && ctl2 == cctl2)
+ return;
+
+ /* Disable L1.2 while updating. See PCIe r5.0, sec 5.5.4, 7.8.3.3 */
+ pl1_2_enables = pctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+ cl1_2_enables = cctl1 & PCI_L1SS_CTL1_L1_2_MASK;
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_L1_2_MASK, 0);
+ }
+
+ /* Program T_POWER_ON times in both ports */
+ pci_write_config_dword(parent, parent->l1ss + PCI_L1SS_CTL2, ctl2);
+ pci_write_config_dword(child, child->l1ss + PCI_L1SS_CTL2, ctl2);
+
+ /* Program Common_Mode_Restore_Time in upstream device */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_CM_RESTORE_TIME, ctl1);
+
+ /* Program LTR_L1.2_THRESHOLD time in both ports */
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
+ PCI_L1SS_CTL1_LTR_L12_TH_SCALE, ctl1);
+
+ if (pl1_2_enables || cl1_2_enables) {
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1, 0,
+ pl1_2_enables);
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1, 0,
+ cl1_2_enables);
+ }
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
struct pci_dev *child = link->downstream, *parent = link->pdev;
+ u32 parent_lnkcap, child_lnkcap;
+ u16 parent_lnkctl, child_lnkctl;
+ u32 parent_l1ss_cap, child_l1ss_cap;
+ u32 parent_l1ss_ctl1 = 0, child_l1ss_ctl1 = 0;
struct pci_bus *linkbus = parent->subordinate;
- struct aspm_register_info upreg, dwreg;
if (blacklist) {
/* Set enabled/disable so that we will disable ASPM later */
@@ -556,26 +556,28 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
return;
}
- /* Get upstream/downstream components' register state */
- pcie_get_aspm_reg(parent, &upreg);
- pcie_get_aspm_reg(child, &dwreg);
-
/*
* If ASPM not supported, don't mess with the clocks and link,
* bail out now.
*/
- if (!(upreg.support & dwreg.support))
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
+ if (!(parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPMS))
return;
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
/*
- * Re-read upstream/downstream components' register state
- * after clock configuration
+ * Re-read upstream/downstream components' register state after
+ * clock configuration. L0s & L1 exit latencies in the otherwise
+ * read-only Link Capabilities may change depending on common clock
+ * configuration (PCIe r5.0, sec 7.5.3.6).
*/
- pcie_get_aspm_reg(parent, &upreg);
- pcie_get_aspm_reg(child, &dwreg);
+ pcie_capability_read_dword(parent, PCI_EXP_LNKCAP, &parent_lnkcap);
+ pcie_capability_read_dword(child, PCI_EXP_LNKCAP, &child_lnkcap);
+ pcie_capability_read_word(parent, PCI_EXP_LNKCTL, &parent_lnkctl);
+ pcie_capability_read_word(child, PCI_EXP_LNKCTL, &child_lnkctl);
/*
* Setup L0s state
@@ -584,44 +586,71 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
* given link unless components on both sides of the link each
* support L0s.
*/
- if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L0S)
link->aspm_support |= ASPM_STATE_L0S;
- if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+
+ if (child_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_UP;
- if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ if (parent_lnkctl & PCI_EXP_LNKCTL_ASPM_L0S)
link->aspm_enabled |= ASPM_STATE_L0S_DW;
- link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
- link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+ link->latency_up.l0s = calc_l0s_latency(parent_lnkcap);
+ link->latency_dw.l0s = calc_l0s_latency(child_lnkcap);
/* Setup L1 state */
- if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ if (parent_lnkcap & child_lnkcap & PCI_EXP_LNKCAP_ASPM_L1)
link->aspm_support |= ASPM_STATE_L1;
- if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+
+ if (parent_lnkctl & child_lnkctl & PCI_EXP_LNKCTL_ASPM_L1)
link->aspm_enabled |= ASPM_STATE_L1;
- link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
- link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+ link->latency_up.l1 = calc_l1_latency(parent_lnkcap);
+ link->latency_dw.l1 = calc_l1_latency(child_lnkcap);
/* Setup L1 substate */
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CAP,
+ &parent_l1ss_cap);
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CAP,
+ &child_l1ss_cap);
+
+ if (!(parent_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ parent_l1ss_cap = 0;
+ if (!(child_l1ss_cap & PCI_L1SS_CAP_L1_PM_SS))
+ child_l1ss_cap = 0;
+
+ /*
+ * If we don't have LTR for the entire path from the Root Complex
+ * to this device, we can't use ASPM L1.2 because it relies on the
+ * LTR_L1.2_THRESHOLD. See PCIe r4.0, secs 5.5.4, 6.18.
+ */
+ if (!child->ltr_path)
+ child_l1ss_cap &= ~PCI_L1SS_CAP_ASPM_L1_2;
+
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_ASPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_1)
link->aspm_support |= ASPM_STATE_L1_1_PCIPM;
- if (upreg.l1ss_cap & dwreg.l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
+ if (parent_l1ss_cap & child_l1ss_cap & PCI_L1SS_CAP_PCIPM_L1_2)
link->aspm_support |= ASPM_STATE_L1_2_PCIPM;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
+ if (parent_l1ss_cap)
+ pci_read_config_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
+ &parent_l1ss_ctl1);
+ if (child_l1ss_cap)
+ pci_read_config_dword(child, child->l1ss + PCI_L1SS_CTL1,
+ &child_l1ss_ctl1);
+
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_ASPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_1)
link->aspm_enabled |= ASPM_STATE_L1_1_PCIPM;
- if (upreg.l1ss_ctl1 & dwreg.l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
+ if (parent_l1ss_ctl1 & child_l1ss_ctl1 & PCI_L1SS_CTL1_PCIPM_L1_2)
link->aspm_enabled |= ASPM_STATE_L1_2_PCIPM;
if (link->aspm_support & ASPM_STATE_L1SS)
- aspm_calc_l1ss_info(link, &upreg, &dwreg);
+ aspm_calc_l1ss_info(link, parent_l1ss_cap, child_l1ss_cap);
/* Save default state */
link->aspm_default = link->aspm_enabled;
@@ -651,24 +680,11 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
}
}
-static void pci_clear_and_set_dword(struct pci_dev *pdev, int pos,
- u32 clear, u32 set)
-{
- u32 val;
-
- pci_read_config_dword(pdev, pos, &val);
- val &= ~clear;
- val |= set;
- pci_write_config_dword(pdev, pos, val);
-}
-
/* Configure the ASPM L1 substates */
static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
{
u32 val, enable_req;
struct pci_dev *child = link->downstream, *parent = link->pdev;
- u32 up_cap_ptr = link->l1ss.up_cap_ptr;
- u32 dw_cap_ptr = link->l1ss.dw_cap_ptr;
enable_req = (link->aspm_enabled ^ state) & state;
@@ -686,9 +702,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
*/
/* Disable all L1 substates */
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, 0);
/*
* If needed, disable L1, and it gets enabled later
@@ -701,30 +717,6 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
PCI_EXP_LNKCTL_ASPM_L1, 0);
}
- if (enable_req & ASPM_STATE_L1_2_MASK) {
-
- /* Program T_POWER_ON times in both ports */
- pci_write_config_dword(parent, up_cap_ptr + PCI_L1SS_CTL2,
- link->l1ss.ctl2);
- pci_write_config_dword(child, dw_cap_ptr + PCI_L1SS_CTL2,
- link->l1ss.ctl2);
-
- /* Program Common_Mode_Restore_Time in upstream device */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_CM_RESTORE_TIME,
- link->l1ss.ctl1);
-
- /* Program LTR_L1.2_THRESHOLD time in both ports */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- link->l1ss.ctl1);
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
- PCI_L1SS_CTL1_LTR_L12_TH_VALUE |
- PCI_L1SS_CTL1_LTR_L12_TH_SCALE,
- link->l1ss.ctl1);
- }
-
val = 0;
if (state & ASPM_STATE_L1_1)
val |= PCI_L1SS_CTL1_ASPM_L1_1;
@@ -736,9 +728,9 @@ static void pcie_config_aspm_l1ss(struct pcie_link_state *link, u32 state)
val |= PCI_L1SS_CTL1_PCIPM_L1_2;
/* Enable what we need to enable */
- pci_clear_and_set_dword(parent, up_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(parent, parent->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
- pci_clear_and_set_dword(child, dw_cap_ptr + PCI_L1SS_CTL1,
+ pci_clear_and_set_dword(child, child->l1ss + PCI_L1SS_CTL1,
PCI_L1SS_CTL1_L1SS_MASK, val);
}
diff --git a/drivers/pci/pcie/bw_notification.c b/drivers/pci/pcie/bw_notification.c
index 77e685771487..565d23cccb8b 100644
--- a/drivers/pci/pcie/bw_notification.c
+++ b/drivers/pci/pcie/bw_notification.c
@@ -14,6 +14,8 @@
* and warns when links become degraded in operation.
*/
+#define dev_fmt(fmt) "bw_notification: " fmt
+
#include "../pci.h"
#include "portdrv.h"
@@ -97,6 +99,7 @@ static int pcie_bandwidth_notification_probe(struct pcie_device *srv)
return ret;
pcie_enable_link_bandwidth_notification(srv->port);
+ pci_info(srv->port, "enabled with IRQ %d\n", srv->irq);
return 0;
}
diff --git a/drivers/pci/pcie/dpc.c b/drivers/pci/pcie/dpc.c
index daa9a4153776..e05aba86a317 100644
--- a/drivers/pci/pcie/dpc.c
+++ b/drivers/pci/pcie/dpc.c
@@ -103,7 +103,8 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
* Wait until the Link is inactive, then clear DPC Trigger Status
* to allow the Port to leave DPC.
*/
- pcie_wait_for_link(pdev, false);
+ if (!pcie_wait_for_link(pdev, false))
+ pci_info(pdev, "Data Link Layer Link Active not cleared in 1000 msec\n");
if (pdev->dpc_rp_extensions && dpc_wait_rp_inactive(pdev))
return PCI_ERS_RESULT_DISCONNECT;
@@ -111,8 +112,10 @@ pci_ers_result_t dpc_reset_link(struct pci_dev *pdev)
pci_write_config_word(pdev, cap + PCI_EXP_DPC_STATUS,
PCI_EXP_DPC_STATUS_TRIGGER);
- if (!pcie_wait_for_link(pdev, true))
+ if (!pcie_wait_for_link(pdev, true)) {
+ pci_info(pdev, "Data Link Layer Link Active not set in 1000 msec\n");
return PCI_ERS_RESULT_DISCONNECT;
+ }
return PCI_ERS_RESULT_RECOVERED;
}
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index d8bf3fe8dacd..4289030b0fff 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -2118,6 +2118,9 @@ static void pci_configure_ltr(struct pci_dev *dev)
if (!pci_is_pcie(dev))
return;
+ /* Read L1 PM substate capabilities */
+ dev->l1ss = pci_find_ext_capability(dev, PCI_EXT_CAP_ID_L1SS);
+
pcie_capability_read_dword(dev, PCI_EXP_DEVCAP2, &cap);
if (!(cap & PCI_EXP_DEVCAP2_LTR))
return;
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index bdf9b52567e0..eefed9d26945 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1846,7 +1846,7 @@ DECLARE_PCI_FIXUP_EARLY(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_PXHV, quirk_pci
*/
static void quirk_intel_pcie_pm(struct pci_dev *dev)
{
- pci_pm_d3_delay = 120;
+ pci_pm_d3hot_delay = 120;
dev->no_d1d2 = 1;
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x25e2, quirk_intel_pcie_pm);
@@ -1873,12 +1873,12 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x260b, quirk_intel_pcie_pm);
static void quirk_d3hot_delay(struct pci_dev *dev, unsigned int delay)
{
- if (dev->d3_delay >= delay)
+ if (dev->d3hot_delay >= delay)
return;
- dev->d3_delay = delay;
+ dev->d3hot_delay = delay;
pci_info(dev, "extending delay after power-on from D3hot to %d msec\n",
- dev->d3_delay);
+ dev->d3hot_delay);
}
static void quirk_radeon_pm(struct pci_dev *dev)
@@ -3387,36 +3387,36 @@ DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0152, disable_igfx_irq);
* PCI devices which are on Intel chips can skip the 10ms delay
* before entering D3 mode.
*/
-static void quirk_remove_d3_delay(struct pci_dev *dev)
-{
- dev->d3_delay = 0;
-}
-/* C600 Series devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3_delay);
-/* Lynxpoint-H PCH devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3_delay);
-/* Intel Cherrytrail devices do not need 10ms d3_delay */
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3_delay);
-DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3_delay);
+static void quirk_remove_d3hot_delay(struct pci_dev *dev)
+{
+ dev->d3hot_delay = 0;
+}
+/* C600 Series devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0412, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c00, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x0c0c, quirk_remove_d3hot_delay);
+/* Lynxpoint-H PCH devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c02, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c18, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c1c, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c20, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c22, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c26, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c2d, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c31, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3a, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c3d, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x8c4e, quirk_remove_d3hot_delay);
+/* Intel Cherrytrail devices do not need 10ms d3hot_delay */
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2280, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x2298, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x229c, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b0, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b5, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b7, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22b8, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22d8, quirk_remove_d3hot_delay);
+DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_INTEL, 0x22dc, quirk_remove_d3hot_delay);
/*
* Some devices may pass our check in pci_intx_mask_supported() if
@@ -4949,6 +4949,13 @@ static void pci_quirk_enable_intel_rp_mpc_acs(struct pci_dev *dev)
}
}
+/*
+ * Currently this quirk does the equivalent of
+ * PCI_ACS_SV | PCI_ACS_RR | PCI_ACS_CR | PCI_ACS_UF
+ *
+ * TODO: This quirk also needs to do equivalent of PCI_ACS_TB,
+ * if dev->external_facing || dev->untrusted
+ */
static int pci_quirk_enable_intel_pch_acs(struct pci_dev *dev)
{
if (!pci_quirk_intel_pch_acs_match(dev))
@@ -4988,6 +4995,9 @@ static int pci_quirk_enable_intel_spt_pch_acs(struct pci_dev *dev)
ctrl |= (cap & PCI_ACS_CR);
ctrl |= (cap & PCI_ACS_UF);
+ if (dev->external_facing || dev->untrusted)
+ ctrl |= (cap & PCI_ACS_TB);
+
pci_write_config_dword(dev, pos + INTEL_SPT_ACS_CTRL, ctrl);
pci_info(dev, "Intel SPT PCH root port ACS workaround enabled\n");