summaryrefslogtreecommitdiff
path: root/drivers
diff options
context:
space:
mode:
authorStephen Rothwell <sfr@canb.auug.org.au>2009-09-04 10:08:04 +1000
committerStephen Rothwell <sfr@canb.auug.org.au>2009-09-04 10:08:08 +1000
commit7c52ca1bcf6de1f87533ac5f6f419245f7011f46 (patch)
tree513e1abc699dc7bc8d5c904bdc4da0370db47552 /drivers
parent42295913f01b82e576b46c63ffb1f38f814e5e62 (diff)
parent7222f5a6f537eb4639d9dfe0fc7ceeb7c4278c62 (diff)
Merge commit 'pci/linux-next'
Conflicts: arch/powerpc/kernel/pci_64.c
Diffstat (limited to 'drivers')
-rw-r--r--drivers/acpi/pci_root.c17
-rw-r--r--drivers/gpu/Makefile2
-rw-r--r--drivers/gpu/vga/Kconfig10
-rw-r--r--drivers/gpu/vga/Makefile1
-rw-r--r--drivers/gpu/vga/vgaarb.c1205
-rw-r--r--drivers/pci/Makefile3
-rw-r--r--drivers/pci/hotplug/acpiphp_glue.c28
-rw-r--r--drivers/pci/hotplug/pci_hotplug_core.c3
-rw-r--r--drivers/pci/hotplug/pciehp_hpc.c10
-rw-r--r--drivers/pci/legacy.c34
-rw-r--r--drivers/pci/msi.c283
-rw-r--r--drivers/pci/pci-sysfs.c37
-rw-r--r--drivers/pci/pci.c60
-rw-r--r--drivers/pci/pci.h1
-rw-r--r--drivers/pci/pcie/aspm.c495
-rw-r--r--drivers/pci/probe.c12
-rw-r--r--drivers/pci/quirks.c4
-rw-r--r--drivers/pci/search.c31
-rw-r--r--drivers/pci/setup-res.c1
-rw-r--r--drivers/pcmcia/yenta_socket.c16
-rw-r--r--drivers/video/Kconfig2
21 files changed, 1793 insertions, 462 deletions
diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c
index 55b5b90c2a44..31b961c2f22f 100644
--- a/drivers/acpi/pci_root.c
+++ b/drivers/acpi/pci_root.c
@@ -61,20 +61,6 @@ static struct acpi_driver acpi_pci_root_driver = {
},
};
-struct acpi_pci_root {
- struct list_head node;
- struct acpi_device *device;
- struct pci_bus *bus;
- u16 segment;
- u8 bus_nr;
-
- u32 osc_support_set; /* _OSC state of support bits */
- u32 osc_control_set; /* _OSC state of control bits */
- u32 osc_control_qry; /* the latest _OSC query result */
-
- u32 osc_queried:1; /* has _OSC control been queried? */
-};
-
static LIST_HEAD(acpi_pci_roots);
static struct acpi_pci_driver *sub_driver;
@@ -317,7 +303,7 @@ static acpi_status acpi_pci_osc_support(struct acpi_pci_root *root, u32 flags)
return status;
}
-static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
+struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
{
struct acpi_pci_root *root;
@@ -327,6 +313,7 @@ static struct acpi_pci_root *acpi_pci_find_root(acpi_handle handle)
}
return NULL;
}
+EXPORT_SYMBOL_GPL(acpi_pci_find_root);
struct acpi_handle_node {
struct list_head node;
diff --git a/drivers/gpu/Makefile b/drivers/gpu/Makefile
index de566cf0414c..30879df3daea 100644
--- a/drivers/gpu/Makefile
+++ b/drivers/gpu/Makefile
@@ -1 +1 @@
-obj-y += drm/
+obj-y += drm/ vga/
diff --git a/drivers/gpu/vga/Kconfig b/drivers/gpu/vga/Kconfig
new file mode 100644
index 000000000000..790e675b13eb
--- /dev/null
+++ b/drivers/gpu/vga/Kconfig
@@ -0,0 +1,10 @@
+config VGA_ARB
+ bool "VGA Arbitration" if EMBEDDED
+ default y
+ depends on PCI
+ help
+ Some "legacy" VGA devices implemented on PCI typically have the same
+ hard-decoded addresses as they did on ISA. When multiple PCI devices
+ are accessed at same time they need some kind of coordination. Please
+ see Documentation/vgaarbiter.txt for more details. Select this to
+ enable VGA arbiter.
diff --git a/drivers/gpu/vga/Makefile b/drivers/gpu/vga/Makefile
new file mode 100644
index 000000000000..7cc8c1ed645b
--- /dev/null
+++ b/drivers/gpu/vga/Makefile
@@ -0,0 +1 @@
+obj-$(CONFIG_VGA_ARB) += vgaarb.o
diff --git a/drivers/gpu/vga/vgaarb.c b/drivers/gpu/vga/vgaarb.c
new file mode 100644
index 000000000000..1ac0c93603c9
--- /dev/null
+++ b/drivers/gpu/vga/vgaarb.c
@@ -0,0 +1,1205 @@
+/*
+ * vgaarb.c
+ *
+ * (C) Copyright 2005 Benjamin Herrenschmidt <benh@kernel.crashing.org>
+ * (C) Copyright 2007 Paulo R. Zanoni <przanoni@gmail.com>
+ * (C) Copyright 2007, 2009 Tiago Vignatti <vignatti@freedesktop.org>
+ *
+ * Implements the VGA arbitration. For details refer to
+ * Documentation/vgaarbiter.txt
+ */
+
+#include <linux/module.h>
+#include <linux/kernel.h>
+#include <linux/pci.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/list.h>
+#include <linux/sched.h>
+#include <linux/wait.h>
+#include <linux/spinlock.h>
+#include <linux/poll.h>
+#include <linux/miscdevice.h>
+
+#include <linux/uaccess.h>
+
+#include <linux/vgaarb.h>
+
+static void vga_arbiter_notify_clients(void);
+/*
+ * We keep a list of all vga devices in the system to speed
+ * up the various operations of the arbiter
+ */
+struct vga_device {
+ struct list_head list;
+ struct pci_dev *pdev;
+ unsigned int decodes; /* what does it decodes */
+ unsigned int owns; /* what does it owns */
+ unsigned int locks; /* what does it locks */
+ unsigned int io_lock_cnt; /* legacy IO lock count */
+ unsigned int mem_lock_cnt; /* legacy MEM lock count */
+ unsigned int io_norm_cnt; /* normal IO count */
+ unsigned int mem_norm_cnt; /* normal MEM count */
+
+ /* allow IRQ enable/disable hook */
+ void *cookie;
+ void (*irq_set_state)(void *cookie, bool enable);
+ unsigned int (*set_vga_decode)(void *cookie, bool decode);
+};
+
+static LIST_HEAD(vga_list);
+static int vga_count, vga_decode_count;
+static bool vga_arbiter_used;
+static DEFINE_SPINLOCK(vga_lock);
+static DECLARE_WAIT_QUEUE_HEAD(vga_wait_queue);
+
+
+static const char *vga_iostate_to_str(unsigned int iostate)
+{
+ /* Ignore VGA_RSRC_IO and VGA_RSRC_MEM */
+ iostate &= VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ switch (iostate) {
+ case VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM:
+ return "io+mem";
+ case VGA_RSRC_LEGACY_IO:
+ return "io";
+ case VGA_RSRC_LEGACY_MEM:
+ return "mem";
+ }
+ return "none";
+}
+
+static int vga_str_to_iostate(char *buf, int str_size, int *io_state)
+{
+ /* we could in theory hand out locks on IO and mem
+ * separately to userspace but it can cause deadlocks */
+ if (strncmp(buf, "none", 4) == 0) {
+ *io_state = VGA_RSRC_NONE;
+ return 1;
+ }
+
+ /* XXX We're not chekcing the str_size! */
+ if (strncmp(buf, "io+mem", 6) == 0)
+ goto both;
+ else if (strncmp(buf, "io", 2) == 0)
+ goto both;
+ else if (strncmp(buf, "mem", 3) == 0)
+ goto both;
+ return 0;
+both:
+ *io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ return 1;
+}
+
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+/* this is only used a cookie - it should not be dereferenced */
+static struct pci_dev *vga_default;
+#endif
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev);
+
+/* Find somebody in our list */
+static struct vga_device *vgadev_find(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+
+ list_for_each_entry(vgadev, &vga_list, list)
+ if (pdev == vgadev->pdev)
+ return vgadev;
+ return NULL;
+}
+
+/* Returns the default VGA device (vgacon's babe) */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+struct pci_dev *vga_default_device(void)
+{
+ return vga_default;
+}
+#endif
+
+static inline void vga_irq_set_state(struct vga_device *vgadev, bool state)
+{
+ if (vgadev->irq_set_state)
+ vgadev->irq_set_state(vgadev->cookie, state);
+}
+
+
+/* If we don't ever use VGA arb we should avoid
+ turning off anything anywhere due to old X servers getting
+ confused about the boot device not being VGA */
+static void vga_check_first_use(void)
+{
+ /* we should inform all GPUs in the system that
+ * VGA arb has occured and to try and disable resources
+ * if they can */
+ if (!vga_arbiter_used) {
+ vga_arbiter_used = true;
+ vga_arbiter_notify_clients();
+ }
+}
+
+static struct vga_device *__vga_tryget(struct vga_device *vgadev,
+ unsigned int rsrc)
+{
+ unsigned int wants, legacy_wants, match;
+ struct vga_device *conflict;
+ unsigned int pci_bits;
+ /* Account for "normal" resources to lock. If we decode the legacy,
+ * counterpart, we need to request it as well
+ */
+ if ((rsrc & VGA_RSRC_NORMAL_IO) &&
+ (vgadev->decodes & VGA_RSRC_LEGACY_IO))
+ rsrc |= VGA_RSRC_LEGACY_IO;
+ if ((rsrc & VGA_RSRC_NORMAL_MEM) &&
+ (vgadev->decodes & VGA_RSRC_LEGACY_MEM))
+ rsrc |= VGA_RSRC_LEGACY_MEM;
+
+ pr_devel("%s: %d\n", __func__, rsrc);
+ pr_devel("%s: owns: %d\n", __func__, vgadev->owns);
+
+ /* Check what resources we need to acquire */
+ wants = rsrc & ~vgadev->owns;
+
+ /* We already own everything, just mark locked & bye bye */
+ if (wants == 0)
+ goto lock_them;
+
+ /* We don't need to request a legacy resource, we just enable
+ * appropriate decoding and go
+ */
+ legacy_wants = wants & VGA_RSRC_LEGACY_MASK;
+ if (legacy_wants == 0)
+ goto enable_them;
+
+ /* Ok, we don't, let's find out how we need to kick off */
+ list_for_each_entry(conflict, &vga_list, list) {
+ unsigned int lwants = legacy_wants;
+ unsigned int change_bridge = 0;
+
+ /* Don't conflict with myself */
+ if (vgadev == conflict)
+ continue;
+
+ /* Check if the architecture allows a conflict between those
+ * 2 devices or if they are on separate domains
+ */
+ if (!vga_conflicts(vgadev->pdev, conflict->pdev))
+ continue;
+
+ /* We have a possible conflict. before we go further, we must
+ * check if we sit on the same bus as the conflicting device.
+ * if we don't, then we must tie both IO and MEM resources
+ * together since there is only a single bit controlling
+ * VGA forwarding on P2P bridges
+ */
+ if (vgadev->pdev->bus != conflict->pdev->bus) {
+ change_bridge = 1;
+ lwants = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ }
+
+ /* Check if the guy has a lock on the resource. If he does,
+ * return the conflicting entry
+ */
+ if (conflict->locks & lwants)
+ return conflict;
+
+ /* Ok, now check if he owns the resource we want. We don't need
+ * to check "decodes" since it should be impossible to own
+ * own legacy resources you don't decode unless I have a bug
+ * in this code...
+ */
+ WARN_ON(conflict->owns & ~conflict->decodes);
+ match = lwants & conflict->owns;
+ if (!match)
+ continue;
+
+ /* looks like he doesn't have a lock, we can steal
+ * them from him
+ */
+ vga_irq_set_state(conflict, false);
+
+ pci_bits = 0;
+ if (lwants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ pci_bits |= PCI_COMMAND_MEMORY;
+ if (lwants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ pci_bits |= PCI_COMMAND_IO;
+
+ pci_set_vga_state(conflict->pdev, false, pci_bits,
+ change_bridge);
+ conflict->owns &= ~lwants;
+ /* If he also owned non-legacy, that is no longer the case */
+ if (lwants & VGA_RSRC_LEGACY_MEM)
+ conflict->owns &= ~VGA_RSRC_NORMAL_MEM;
+ if (lwants & VGA_RSRC_LEGACY_IO)
+ conflict->owns &= ~VGA_RSRC_NORMAL_IO;
+ }
+
+enable_them:
+ /* ok dude, we got it, everybody conflicting has been disabled, let's
+ * enable us. Make sure we don't mark a bit in "owns" that we don't
+ * also have in "decodes". We can lock resources we don't decode but
+ * not own them.
+ */
+ pci_bits = 0;
+ if (wants & (VGA_RSRC_LEGACY_MEM|VGA_RSRC_NORMAL_MEM))
+ pci_bits |= PCI_COMMAND_MEMORY;
+ if (wants & (VGA_RSRC_LEGACY_IO|VGA_RSRC_NORMAL_IO))
+ pci_bits |= PCI_COMMAND_IO;
+ pci_set_vga_state(vgadev->pdev, true, pci_bits, !!(wants & VGA_RSRC_LEGACY_MASK));
+
+ vga_irq_set_state(vgadev, true);
+ vgadev->owns |= (wants & vgadev->decodes);
+lock_them:
+ vgadev->locks |= (rsrc & VGA_RSRC_LEGACY_MASK);
+ if (rsrc & VGA_RSRC_LEGACY_IO)
+ vgadev->io_lock_cnt++;
+ if (rsrc & VGA_RSRC_LEGACY_MEM)
+ vgadev->mem_lock_cnt++;
+ if (rsrc & VGA_RSRC_NORMAL_IO)
+ vgadev->io_norm_cnt++;
+ if (rsrc & VGA_RSRC_NORMAL_MEM)
+ vgadev->mem_norm_cnt++;
+
+ return NULL;
+}
+
+static void __vga_put(struct vga_device *vgadev, unsigned int rsrc)
+{
+ unsigned int old_locks = vgadev->locks;
+
+ pr_devel("%s\n", __func__);
+
+ /* Update our counters, and account for equivalent legacy resources
+ * if we decode them
+ */
+ if ((rsrc & VGA_RSRC_NORMAL_IO) && vgadev->io_norm_cnt > 0) {
+ vgadev->io_norm_cnt--;
+ if (vgadev->decodes & VGA_RSRC_LEGACY_IO)
+ rsrc |= VGA_RSRC_LEGACY_IO;
+ }
+ if ((rsrc & VGA_RSRC_NORMAL_MEM) && vgadev->mem_norm_cnt > 0) {
+ vgadev->mem_norm_cnt--;
+ if (vgadev->decodes & VGA_RSRC_LEGACY_MEM)
+ rsrc |= VGA_RSRC_LEGACY_MEM;
+ }
+ if ((rsrc & VGA_RSRC_LEGACY_IO) && vgadev->io_lock_cnt > 0)
+ vgadev->io_lock_cnt--;
+ if ((rsrc & VGA_RSRC_LEGACY_MEM) && vgadev->mem_lock_cnt > 0)
+ vgadev->mem_lock_cnt--;
+
+ /* Just clear lock bits, we do lazy operations so we don't really
+ * have to bother about anything else at this point
+ */
+ if (vgadev->io_lock_cnt == 0)
+ vgadev->locks &= ~VGA_RSRC_LEGACY_IO;
+ if (vgadev->mem_lock_cnt == 0)
+ vgadev->locks &= ~VGA_RSRC_LEGACY_MEM;
+
+ /* Kick the wait queue in case somebody was waiting if we actually
+ * released something
+ */
+ if (old_locks != vgadev->locks)
+ wake_up_all(&vga_wait_queue);
+}
+
+int vga_get(struct pci_dev *pdev, unsigned int rsrc, int interruptible)
+{
+ struct vga_device *vgadev, *conflict;
+ unsigned long flags;
+ wait_queue_t wait;
+ int rc = 0;
+
+ vga_check_first_use();
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return 0;
+
+ for (;;) {
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ rc = -ENODEV;
+ break;
+ }
+ conflict = __vga_tryget(vgadev, rsrc);
+ spin_unlock_irqrestore(&vga_lock, flags);
+ if (conflict == NULL)
+ break;
+
+
+ /* We have a conflict, we wait until somebody kicks the
+ * work queue. Currently we have one work queue that we
+ * kick each time some resources are released, but it would
+ * be fairly easy to have a per device one so that we only
+ * need to attach to the conflicting device
+ */
+ init_waitqueue_entry(&wait, current);
+ add_wait_queue(&vga_wait_queue, &wait);
+ set_current_state(interruptible ?
+ TASK_INTERRUPTIBLE :
+ TASK_UNINTERRUPTIBLE);
+ if (signal_pending(current)) {
+ rc = -EINTR;
+ break;
+ }
+ schedule();
+ remove_wait_queue(&vga_wait_queue, &wait);
+ set_current_state(TASK_RUNNING);
+ }
+ return rc;
+}
+EXPORT_SYMBOL(vga_get);
+
+int vga_tryget(struct pci_dev *pdev, unsigned int rsrc)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ int rc = 0;
+
+ vga_check_first_use();
+
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return 0;
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ rc = -ENODEV;
+ goto bail;
+ }
+ if (__vga_tryget(vgadev, rsrc))
+ rc = -EBUSY;
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return rc;
+}
+EXPORT_SYMBOL(vga_tryget);
+
+void vga_put(struct pci_dev *pdev, unsigned int rsrc)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ /* The one who calls us should check for this, but lets be sure... */
+ if (pdev == NULL)
+ pdev = vga_default_device();
+ if (pdev == NULL)
+ return;
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL)
+ goto bail;
+ __vga_put(vgadev, rsrc);
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+EXPORT_SYMBOL(vga_put);
+
+/*
+ * Currently, we assume that the "initial" setup of the system is
+ * not sane, that is we come up with conflicting devices and let
+ * the arbiter's client decides if devices decodes or not legacy
+ * things.
+ */
+static bool vga_arbiter_add_pci_device(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+
+ /* Only deal with VGA class devices */
+ if ((pdev->class >> 8) != PCI_CLASS_DISPLAY_VGA)
+ return false;
+
+ /* Allocate structure */
+ vgadev = kmalloc(sizeof(struct vga_device), GFP_KERNEL);
+ if (vgadev == NULL) {
+ pr_err("vgaarb: failed to allocate pci device\n");
+ /* What to do on allocation failure ? For now, let's
+ * just do nothing, I'm not sure there is anything saner
+ * to be done
+ */
+ return false;
+ }
+
+ memset(vgadev, 0, sizeof(*vgadev));
+
+ /* Take lock & check for duplicates */
+ spin_lock_irqsave(&vga_lock, flags);
+ if (vgadev_find(pdev) != NULL) {
+ BUG_ON(1);
+ goto fail;
+ }
+ vgadev->pdev = pdev;
+
+ /* By default, assume we decode everything */
+ vgadev->decodes = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM |
+ VGA_RSRC_NORMAL_IO | VGA_RSRC_NORMAL_MEM;
+
+ /* by default mark it as decoding */
+ vga_decode_count++;
+ /* Mark that we "own" resources based on our enables, we will
+ * clear that below if the bridge isn't forwarding
+ */
+ pci_read_config_word(pdev, PCI_COMMAND, &cmd);
+ if (cmd & PCI_COMMAND_IO)
+ vgadev->owns |= VGA_RSRC_LEGACY_IO;
+ if (cmd & PCI_COMMAND_MEMORY)
+ vgadev->owns |= VGA_RSRC_LEGACY_MEM;
+
+ /* Check if VGA cycles can get down to us */
+ bus = pdev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ u16 l;
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &l);
+ if (!(l & PCI_BRIDGE_CTL_VGA)) {
+ vgadev->owns = 0;
+ break;
+ }
+ }
+ bus = bus->parent;
+ }
+
+ /* Deal with VGA default device. Use first enabled one
+ * by default if arch doesn't have it's own hook
+ */
+#ifndef __ARCH_HAS_VGA_DEFAULT_DEVICE
+ if (vga_default == NULL &&
+ ((vgadev->owns & VGA_RSRC_LEGACY_MASK) == VGA_RSRC_LEGACY_MASK))
+ vga_default = pci_dev_get(pdev);
+#endif
+
+ /* Add to the list */
+ list_add(&vgadev->list, &vga_list);
+ vga_count++;
+ pr_info("vgaarb: device added: PCI:%s,decodes=%s,owns=%s,locks=%s\n",
+ pci_name(pdev),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns),
+ vga_iostate_to_str(vgadev->locks));
+
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return true;
+fail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ kfree(vgadev);
+ return false;
+}
+
+static bool vga_arbiter_del_pci_device(struct pci_dev *pdev)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ bool ret = true;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ ret = false;
+ goto bail;
+ }
+
+ if (vga_default == pdev) {
+ pci_dev_put(vga_default);
+ vga_default = NULL;
+ }
+
+ if (vgadev->decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+ vga_decode_count--;
+
+ /* Remove entry from list */
+ list_del(&vgadev->list);
+ vga_count--;
+ /* Notify userland driver that the device is gone so it discards
+ * it's copies of the pci_dev pointer
+ */
+ vga_arb_device_card_gone(pdev);
+
+ /* Wake up all possible waiters */
+ wake_up_all(&vga_wait_queue);
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ kfree(vgadev);
+ return ret;
+}
+
+/* this is called with the lock */
+static inline void vga_update_device_decodes(struct vga_device *vgadev,
+ int new_decodes)
+{
+ int old_decodes;
+ struct vga_device *new_vgadev, *conflict;
+
+ old_decodes = vgadev->decodes;
+ vgadev->decodes = new_decodes;
+
+ pr_info("vgaarb: device changed decodes: PCI:%s,olddecodes=%s,decodes=%s:owns=%s\n",
+ pci_name(vgadev->pdev),
+ vga_iostate_to_str(old_decodes),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns));
+
+
+ /* if we own the decodes we should move them along to
+ another card */
+ if ((vgadev->owns & old_decodes) && (vga_count > 1)) {
+ /* set us to own nothing */
+ vgadev->owns &= ~old_decodes;
+ list_for_each_entry(new_vgadev, &vga_list, list) {
+ if ((new_vgadev != vgadev) &&
+ (new_vgadev->decodes & VGA_RSRC_LEGACY_MASK)) {
+ pr_info("vgaarb: transferring owner from PCI:%s to PCI:%s\n", pci_name(vgadev->pdev), pci_name(new_vgadev->pdev));
+ conflict = __vga_tryget(new_vgadev, VGA_RSRC_LEGACY_MASK);
+ if (!conflict)
+ __vga_put(new_vgadev, VGA_RSRC_LEGACY_MASK);
+ break;
+ }
+ }
+ }
+
+ /* change decodes counter */
+ if (old_decodes != new_decodes) {
+ if (new_decodes & (VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM))
+ vga_decode_count++;
+ else
+ vga_decode_count--;
+ }
+}
+
+void __vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes, bool userspace)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ decodes &= VGA_RSRC_LEGACY_MASK;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL)
+ goto bail;
+
+ /* don't let userspace futz with kernel driver decodes */
+ if (userspace && vgadev->set_vga_decode)
+ goto bail;
+
+ /* update the device decodes + counter */
+ vga_update_device_decodes(vgadev, decodes);
+
+ /* XXX if somebody is going from "doesn't decode" to "decodes" state
+ * here, additional care must be taken as we may have pending owner
+ * ship of non-legacy region ...
+ */
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+void vga_set_legacy_decoding(struct pci_dev *pdev, unsigned int decodes)
+{
+ __vga_set_legacy_decoding(pdev, decodes, false);
+}
+EXPORT_SYMBOL(vga_set_legacy_decoding);
+
+/* call with NULL to unregister */
+int vga_client_register(struct pci_dev *pdev, void *cookie,
+ void (*irq_set_state)(void *cookie, bool state),
+ unsigned int (*set_vga_decode)(void *cookie, bool decode))
+{
+ int ret = -1;
+ struct vga_device *vgadev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ vgadev = vgadev_find(pdev);
+ if (!vgadev)
+ goto bail;
+
+ vgadev->irq_set_state = irq_set_state;
+ vgadev->set_vga_decode = set_vga_decode;
+ vgadev->cookie = cookie;
+ ret = 0;
+
+bail:
+ spin_unlock_irqrestore(&vga_lock, flags);
+ return ret;
+
+}
+EXPORT_SYMBOL(vga_client_register);
+
+/*
+ * Char driver implementation
+ *
+ * Semantics is:
+ *
+ * open : open user instance of the arbitrer. by default, it's
+ * attached to the default VGA device of the system.
+ *
+ * close : close user instance, release locks
+ *
+ * read : return a string indicating the status of the target.
+ * an IO state string is of the form {io,mem,io+mem,none},
+ * mc and ic are respectively mem and io lock counts (for
+ * debugging/diagnostic only). "decodes" indicate what the
+ * card currently decodes, "owns" indicates what is currently
+ * enabled on it, and "locks" indicates what is locked by this
+ * card. If the card is unplugged, we get "invalid" then for
+ * card_ID and an -ENODEV error is returned for any command
+ * until a new card is targeted
+ *
+ * "<card_ID>,decodes=<io_state>,owns=<io_state>,locks=<io_state> (ic,mc)"
+ *
+ * write : write a command to the arbiter. List of commands is:
+ *
+ * target <card_ID> : switch target to card <card_ID> (see below)
+ * lock <io_state> : acquires locks on target ("none" is invalid io_state)
+ * trylock <io_state> : non-blocking acquire locks on target
+ * unlock <io_state> : release locks on target
+ * unlock all : release all locks on target held by this user
+ * decodes <io_state> : set the legacy decoding attributes for the card
+ *
+ * poll : event if something change on any card (not just the target)
+ *
+ * card_ID is of the form "PCI:domain:bus:dev.fn". It can be set to "default"
+ * to go back to the system default card (TODO: not implemented yet).
+ * Currently, only PCI is supported as a prefix, but the userland API may
+ * support other bus types in the future, even if the current kernel
+ * implementation doesn't.
+ *
+ * Note about locks:
+ *
+ * The driver keeps track of which user has what locks on which card. It
+ * supports stacking, like the kernel one. This complexifies the implementation
+ * a bit, but makes the arbiter more tolerant to userspace problems and able
+ * to properly cleanup in all cases when a process dies.
+ * Currently, a max of 16 cards simultaneously can have locks issued from
+ * userspace for a given user (file descriptor instance) of the arbiter.
+ *
+ * If the device is hot-unplugged, there is a hook inside the module to notify
+ * they being added/removed in the system and automatically added/removed in
+ * the arbiter.
+ */
+
+#define MAX_USER_CARDS 16
+#define PCI_INVALID_CARD ((struct pci_dev *)-1UL)
+
+/*
+ * Each user has an array of these, tracking which cards have locks
+ */
+struct vga_arb_user_card {
+ struct pci_dev *pdev;
+ unsigned int mem_cnt;
+ unsigned int io_cnt;
+};
+
+struct vga_arb_private {
+ struct list_head list;
+ struct pci_dev *target;
+ struct vga_arb_user_card cards[MAX_USER_CARDS];
+ spinlock_t lock;
+};
+
+static LIST_HEAD(vga_user_list);
+static DEFINE_SPINLOCK(vga_user_lock);
+
+
+/*
+ * This function gets a string in the format: "PCI:domain:bus:dev.fn" and
+ * returns the respective values. If the string is not in this format,
+ * it returns 0.
+ */
+static int vga_pci_str_to_vars(char *buf, int count, unsigned int *domain,
+ unsigned int *bus, unsigned int *devfn)
+{
+ int n;
+ unsigned int slot, func;
+
+
+ n = sscanf(buf, "PCI:%x:%x:%x.%x", domain, bus, &slot, &func);
+ if (n != 4)
+ return 0;
+
+ *devfn = PCI_DEVFN(slot, func);
+
+ return 1;
+}
+
+static ssize_t vga_arb_read(struct file *file, char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_device *vgadev;
+ struct pci_dev *pdev;
+ unsigned long flags;
+ size_t len;
+ int rc;
+ char *lbuf;
+
+ lbuf = kmalloc(1024, GFP_KERNEL);
+ if (lbuf == NULL)
+ return -ENOMEM;
+
+ /* Shields against vga_arb_device_card_gone (pci_dev going
+ * away), and allows access to vga list
+ */
+ spin_lock_irqsave(&vga_lock, flags);
+
+ /* If we are targetting the default, use it */
+ pdev = priv->target;
+ if (pdev == NULL || pdev == PCI_INVALID_CARD) {
+ spin_unlock_irqrestore(&vga_lock, flags);
+ len = sprintf(lbuf, "invalid");
+ goto done;
+ }
+
+ /* Find card vgadev structure */
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ /* Wow, it's not in the list, that shouldn't happen,
+ * let's fix us up and return invalid card
+ */
+ if (pdev == priv->target)
+ vga_arb_device_card_gone(pdev);
+ spin_unlock_irqrestore(&vga_lock, flags);
+ len = sprintf(lbuf, "invalid");
+ goto done;
+ }
+
+ /* Fill the buffer with infos */
+ len = snprintf(lbuf, 1024,
+ "count:%d,PCI:%s,decodes=%s,owns=%s,locks=%s(%d:%d)\n",
+ vga_decode_count, pci_name(pdev),
+ vga_iostate_to_str(vgadev->decodes),
+ vga_iostate_to_str(vgadev->owns),
+ vga_iostate_to_str(vgadev->locks),
+ vgadev->io_lock_cnt, vgadev->mem_lock_cnt);
+
+ spin_unlock_irqrestore(&vga_lock, flags);
+done:
+
+ /* Copy that to user */
+ if (len > count)
+ len = count;
+ rc = copy_to_user(buf, lbuf, len);
+ kfree(lbuf);
+ if (rc)
+ return -EFAULT;
+ return len;
+}
+
+/*
+ * TODO: To avoid parsing inside kernel and to improve the speed we may
+ * consider use ioctl here
+ */
+static ssize_t vga_arb_write(struct file *file, const char __user * buf,
+ size_t count, loff_t *ppos)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_arb_user_card *uc = NULL;
+ struct pci_dev *pdev;
+
+ unsigned int io_state;
+
+ char *kbuf, *curr_pos;
+ size_t remaining = count;
+
+ int ret_val;
+ int i;
+
+
+ kbuf = kmalloc(count + 1, GFP_KERNEL);
+ if (!kbuf)
+ return -ENOMEM;
+
+ if (copy_from_user(kbuf, buf, count)) {
+ kfree(kbuf);
+ return -EFAULT;
+ }
+ curr_pos = kbuf;
+ kbuf[count] = '\0'; /* Just to make sure... */
+
+ if (strncmp(curr_pos, "lock ", 5) == 0) {
+ curr_pos += 5;
+ remaining -= 5;
+
+ pr_devel("client 0x%p called 'lock'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ vga_get_uninterruptible(pdev, io_state);
+
+ /* Update the client's locks lists... */
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev) {
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ priv->cards[i].io_cnt++;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ priv->cards[i].mem_cnt++;
+ break;
+ }
+ }
+
+ ret_val = count;
+ goto done;
+ } else if (strncmp(curr_pos, "unlock ", 7) == 0) {
+ curr_pos += 7;
+ remaining -= 7;
+
+ pr_devel("client 0x%p called 'unlock'\n", priv);
+
+ if (strncmp(curr_pos, "all", 3) == 0)
+ io_state = VGA_RSRC_LEGACY_IO | VGA_RSRC_LEGACY_MEM;
+ else {
+ if (!vga_str_to_iostate
+ (curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ /* TODO: Add this?
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ */
+ }
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev)
+ uc = &priv->cards[i];
+ }
+
+ if (!uc)
+ return -EINVAL;
+
+ if (io_state & VGA_RSRC_LEGACY_IO && uc->io_cnt == 0)
+ return -EINVAL;
+
+ if (io_state & VGA_RSRC_LEGACY_MEM && uc->mem_cnt == 0)
+ return -EINVAL;
+
+ vga_put(pdev, io_state);
+
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ uc->io_cnt--;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ uc->mem_cnt--;
+
+ ret_val = count;
+ goto done;
+ } else if (strncmp(curr_pos, "trylock ", 8) == 0) {
+ curr_pos += 8;
+ remaining -= 8;
+
+ pr_devel("client 0x%p called 'trylock'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ /* TODO: Add this?
+ if (io_state == VGA_RSRC_NONE) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ */
+
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ if (vga_tryget(pdev, io_state)) {
+ /* Update the client's locks lists... */
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev) {
+ if (io_state & VGA_RSRC_LEGACY_IO)
+ priv->cards[i].io_cnt++;
+ if (io_state & VGA_RSRC_LEGACY_MEM)
+ priv->cards[i].mem_cnt++;
+ break;
+ }
+ }
+ ret_val = count;
+ goto done;
+ } else {
+ ret_val = -EBUSY;
+ goto done;
+ }
+
+ } else if (strncmp(curr_pos, "target ", 7) == 0) {
+ unsigned int domain, bus, devfn;
+ struct vga_device *vgadev;
+
+ curr_pos += 7;
+ remaining -= 7;
+ pr_devel("client 0x%p called 'target'\n", priv);
+ /* if target is default */
+ if (!strncmp(buf, "default", 7))
+ pdev = pci_dev_get(vga_default_device());
+ else {
+ if (!vga_pci_str_to_vars(curr_pos, remaining,
+ &domain, &bus, &devfn)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+
+ pdev = pci_get_bus_and_slot(bus, devfn);
+ if (!pdev) {
+ pr_info("vgaarb: invalid PCI address!\n");
+ ret_val = -ENODEV;
+ goto done;
+ }
+ }
+
+ vgadev = vgadev_find(pdev);
+ if (vgadev == NULL) {
+ pr_info("vgaarb: this pci device is not a vga device\n");
+ pci_dev_put(pdev);
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ priv->target = pdev;
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ if (priv->cards[i].pdev == pdev)
+ break;
+ if (priv->cards[i].pdev == NULL) {
+ priv->cards[i].pdev = pdev;
+ priv->cards[i].io_cnt = 0;
+ priv->cards[i].mem_cnt = 0;
+ break;
+ }
+ }
+ if (i == MAX_USER_CARDS) {
+ pr_err("vgaarb: maximum user cards number reached!\n");
+ pci_dev_put(pdev);
+ /* XXX: which value to return? */
+ ret_val = -ENOMEM;
+ goto done;
+ }
+
+ ret_val = count;
+ pci_dev_put(pdev);
+ goto done;
+
+
+ } else if (strncmp(curr_pos, "decodes ", 8) == 0) {
+ curr_pos += 8;
+ remaining -= 8;
+ pr_devel("vgaarb: client 0x%p called 'decodes'\n", priv);
+
+ if (!vga_str_to_iostate(curr_pos, remaining, &io_state)) {
+ ret_val = -EPROTO;
+ goto done;
+ }
+ pdev = priv->target;
+ if (priv->target == NULL) {
+ ret_val = -ENODEV;
+ goto done;
+ }
+
+ __vga_set_legacy_decoding(pdev, io_state, true);
+ ret_val = count;
+ goto done;
+ }
+ /* If we got here, the message written is not part of the protocol! */
+ kfree(kbuf);
+ return -EPROTO;
+
+done:
+ kfree(kbuf);
+ return ret_val;
+}
+
+static unsigned int vga_arb_fpoll(struct file *file, poll_table * wait)
+{
+ struct vga_arb_private *priv = file->private_data;
+
+ pr_devel("%s\n", __func__);
+
+ if (priv == NULL)
+ return -ENODEV;
+ poll_wait(file, &vga_wait_queue, wait);
+ return POLLIN;
+}
+
+static int vga_arb_open(struct inode *inode, struct file *file)
+{
+ struct vga_arb_private *priv;
+ unsigned long flags;
+
+ pr_devel("%s\n", __func__);
+
+ priv = kmalloc(sizeof(struct vga_arb_private), GFP_KERNEL);
+ if (priv == NULL)
+ return -ENOMEM;
+ memset(priv, 0, sizeof(*priv));
+ spin_lock_init(&priv->lock);
+ file->private_data = priv;
+
+ spin_lock_irqsave(&vga_user_lock, flags);
+ list_add(&priv->list, &vga_user_list);
+ spin_unlock_irqrestore(&vga_user_lock, flags);
+
+ /* Set the client' lists of locks */
+ priv->target = vga_default_device(); /* Maybe this is still null! */
+ priv->cards[0].pdev = priv->target;
+ priv->cards[0].io_cnt = 0;
+ priv->cards[0].mem_cnt = 0;
+
+
+ return 0;
+}
+
+static int vga_arb_release(struct inode *inode, struct file *file)
+{
+ struct vga_arb_private *priv = file->private_data;
+ struct vga_arb_user_card *uc;
+ unsigned long flags;
+ int i;
+
+ pr_devel("%s\n", __func__);
+
+ if (priv == NULL)
+ return -ENODEV;
+
+ spin_lock_irqsave(&vga_user_lock, flags);
+ list_del(&priv->list);
+ for (i = 0; i < MAX_USER_CARDS; i++) {
+ uc = &priv->cards[i];
+ if (uc->pdev == NULL)
+ continue;
+ pr_devel("uc->io_cnt == %d, uc->mem_cnt == %d\n",
+ uc->io_cnt, uc->mem_cnt);
+ while (uc->io_cnt--)
+ vga_put(uc->pdev, VGA_RSRC_LEGACY_IO);
+ while (uc->mem_cnt--)
+ vga_put(uc->pdev, VGA_RSRC_LEGACY_MEM);
+ }
+ spin_unlock_irqrestore(&vga_user_lock, flags);
+
+ kfree(priv);
+
+ return 0;
+}
+
+static void vga_arb_device_card_gone(struct pci_dev *pdev)
+{
+}
+
+/*
+ * callback any registered clients to let them know we have a
+ * change in VGA cards
+ */
+static void vga_arbiter_notify_clients(void)
+{
+ struct vga_device *vgadev;
+ unsigned long flags;
+ uint32_t new_decodes;
+ bool new_state;
+
+ if (!vga_arbiter_used)
+ return;
+
+ spin_lock_irqsave(&vga_lock, flags);
+ list_for_each_entry(vgadev, &vga_list, list) {
+ if (vga_count > 1)
+ new_state = false;
+ else
+ new_state = true;
+ if (vgadev->set_vga_decode) {
+ new_decodes = vgadev->set_vga_decode(vgadev->cookie, new_state);
+ vga_update_device_decodes(vgadev, new_decodes);
+ }
+ }
+ spin_unlock_irqrestore(&vga_lock, flags);
+}
+
+static int pci_notify(struct notifier_block *nb, unsigned long action,
+ void *data)
+{
+ struct device *dev = data;
+ struct pci_dev *pdev = to_pci_dev(dev);
+ bool notify = false;
+
+ pr_devel("%s\n", __func__);
+
+ /* For now we're only intereted in devices added and removed. I didn't
+ * test this thing here, so someone needs to double check for the
+ * cases of hotplugable vga cards. */
+ if (action == BUS_NOTIFY_ADD_DEVICE)
+ notify = vga_arbiter_add_pci_device(pdev);
+ else if (action == BUS_NOTIFY_DEL_DEVICE)
+ notify = vga_arbiter_del_pci_device(pdev);
+
+ if (notify)
+ vga_arbiter_notify_clients();
+ return 0;
+}
+
+static struct notifier_block pci_notifier = {
+ .notifier_call = pci_notify,
+};
+
+static const struct file_operations vga_arb_device_fops = {
+ .read = vga_arb_read,
+ .write = vga_arb_write,
+ .poll = vga_arb_fpoll,
+ .open = vga_arb_open,
+ .release = vga_arb_release,
+};
+
+static struct miscdevice vga_arb_device = {
+ MISC_DYNAMIC_MINOR, "vga_arbiter", &vga_arb_device_fops
+};
+
+static int __init vga_arb_device_init(void)
+{
+ int rc;
+ struct pci_dev *pdev;
+
+ rc = misc_register(&vga_arb_device);
+ if (rc < 0)
+ pr_err("vgaarb: error %d registering device\n", rc);
+
+ bus_register_notifier(&pci_bus_type, &pci_notifier);
+
+ /* We add all pci devices satisfying vga class in the arbiter by
+ * default */
+ pdev = NULL;
+ while ((pdev =
+ pci_get_subsys(PCI_ANY_ID, PCI_ANY_ID, PCI_ANY_ID,
+ PCI_ANY_ID, pdev)) != NULL)
+ vga_arbiter_add_pci_device(pdev);
+
+ pr_info("vgaarb: loaded\n");
+ return rc;
+}
+subsys_initcall(vga_arb_device_init);
diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile
index 1ebd6b4c743b..4a7f11d8f432 100644
--- a/drivers/pci/Makefile
+++ b/drivers/pci/Makefile
@@ -8,6 +8,9 @@ obj-y += access.o bus.o probe.o remove.o pci.o quirks.o \
obj-$(CONFIG_PROC_FS) += proc.o
obj-$(CONFIG_SYSFS) += slot.o
+obj-$(CONFIG_PCI_LEGACY) += legacy.o
+CFLAGS_legacy.o += -Wno-deprecated-declarations
+
# Build PCI Express stuff if needed
obj-$(CONFIG_PCIEPORTBUS) += pcie/
diff --git a/drivers/pci/hotplug/acpiphp_glue.c b/drivers/pci/hotplug/acpiphp_glue.c
index 0cb0f830a993..2e5f25969e11 100644
--- a/drivers/pci/hotplug/acpiphp_glue.c
+++ b/drivers/pci/hotplug/acpiphp_glue.c
@@ -62,6 +62,22 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus);
static void acpiphp_set_hpp_values(acpi_handle handle, struct pci_bus *bus);
static void handle_hotplug_event_func(acpi_handle handle, u32 type, void *context);
+static struct pci_bus *pci_bus_from_handle(acpi_handle handle)
+{
+ struct pci_bus *pbus;
+ struct acpi_pci_root *root;
+
+ root = acpi_pci_find_root(handle);
+ if (root)
+ pbus = root->bus;
+ else {
+ struct pci_dev *pdev = acpi_get_pci_dev(handle);
+ pbus = pdev->subordinate;
+ pci_dev_put(pdev);
+ }
+ return pbus;
+}
+
/* callback routine to check for the existence of a pci dock device */
static acpi_status
is_pci_dock_device(acpi_handle handle, u32 lvl, void *context, void **rv)
@@ -1387,16 +1403,7 @@ static void acpiphp_sanitize_bus(struct pci_bus *bus)
/* Program resources in newly inserted bridge */
static int acpiphp_configure_bridge (acpi_handle handle)
{
- struct pci_dev *dev;
- struct pci_bus *bus;
-
- dev = acpi_get_pci_dev(handle);
- if (!dev) {
- err("cannot get PCI domain and bus number for bridge\n");
- return -EINVAL;
- }
-
- bus = dev->bus;
+ struct pci_bus *bus = pci_bus_from_handle(handle);
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
@@ -1404,7 +1411,6 @@ static int acpiphp_configure_bridge (acpi_handle handle)
acpiphp_set_hpp_values(handle, bus);
pci_enable_bridges(bus);
acpiphp_configure_ioapics(handle);
- pci_dev_put(dev);
return 0;
}
diff --git a/drivers/pci/hotplug/pci_hotplug_core.c b/drivers/pci/hotplug/pci_hotplug_core.c
index 5c5043f239cf..0325d989bb46 100644
--- a/drivers/pci/hotplug/pci_hotplug_core.c
+++ b/drivers/pci/hotplug/pci_hotplug_core.c
@@ -86,7 +86,8 @@ static char *pci_bus_speed_strings[] = {
"66 MHz PCIX 533", /* 0x11 */
"100 MHz PCIX 533", /* 0x12 */
"133 MHz PCIX 533", /* 0x13 */
- "25 GBps PCI-E", /* 0x14 */
+ "2.5 GT/s PCI-E", /* 0x14 */
+ "5.0 GT/s PCI-E", /* 0x15 */
};
#ifdef CONFIG_HOTPLUG_PCI_CPCI
diff --git a/drivers/pci/hotplug/pciehp_hpc.c b/drivers/pci/hotplug/pciehp_hpc.c
index 52813257e5bf..271f917b6f2c 100644
--- a/drivers/pci/hotplug/pciehp_hpc.c
+++ b/drivers/pci/hotplug/pciehp_hpc.c
@@ -693,7 +693,10 @@ static int hpc_get_max_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_cap & 0x000F) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
@@ -772,7 +775,10 @@ static int hpc_get_cur_lnk_speed(struct slot *slot, enum pci_bus_speed *value)
switch (lnk_status & PCI_EXP_LNKSTA_CLS) {
case 1:
- lnk_speed = PCIE_2PT5GB;
+ lnk_speed = PCIE_2_5GB;
+ break;
+ case 2:
+ lnk_speed = PCIE_5_0GB;
break;
default:
lnk_speed = PCIE_LNK_SPEED_UNKNOWN;
diff --git a/drivers/pci/legacy.c b/drivers/pci/legacy.c
new file mode 100644
index 000000000000..871f65c15936
--- /dev/null
+++ b/drivers/pci/legacy.c
@@ -0,0 +1,34 @@
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/module.h>
+#include <linux/interrupt.h>
+#include "pci.h"
+
+/**
+ * pci_find_device - begin or continue searching for a PCI device by vendor/device id
+ * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
+ * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
+ * @from: Previous PCI device found in search, or %NULL for new search.
+ *
+ * Iterates through the list of known PCI devices. If a PCI device is found
+ * with a matching @vendor and @device, a pointer to its device structure is
+ * returned. Otherwise, %NULL is returned.
+ * A new search is initiated by passing %NULL as the @from argument.
+ * Otherwise if @from is not %NULL, searches continue from next device
+ * on the global list.
+ *
+ * NOTE: Do not use this function any more; use pci_get_device() instead, as
+ * the PCI device returned by this function can disappear at any moment in
+ * time.
+ */
+struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
+ struct pci_dev *from)
+{
+ struct pci_dev *pdev;
+
+ pci_dev_get(from);
+ pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
+ pci_dev_put(pdev);
+ return pdev;
+}
+EXPORT_SYMBOL(pci_find_device);
diff --git a/drivers/pci/msi.c b/drivers/pci/msi.c
index d986afb7032b..f9cf3173b23d 100644
--- a/drivers/pci/msi.c
+++ b/drivers/pci/msi.c
@@ -16,9 +16,8 @@
#include <linux/proc_fs.h>
#include <linux/msi.h>
#include <linux/smp.h>
-
-#include <asm/errno.h>
-#include <asm/io.h>
+#include <linux/errno.h>
+#include <linux/io.h>
#include "pci.h"
#include "msi.h"
@@ -272,7 +271,30 @@ void write_msi_msg(unsigned int irq, struct msi_msg *msg)
write_msi_msg_desc(desc, msg);
}
-static int msi_free_irqs(struct pci_dev* dev);
+static void free_msi_irqs(struct pci_dev *dev)
+{
+ struct msi_desc *entry, *tmp;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int i, nvec;
+ if (!entry->irq)
+ continue;
+ nvec = 1 << entry->msi_attrib.multiple;
+ for (i = 0; i < nvec; i++)
+ BUG_ON(irq_has_action(entry->irq + i));
+ }
+
+ arch_teardown_msi_irqs(dev);
+
+ list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
+ if (entry->msi_attrib.is_msix) {
+ if (list_is_last(&entry->list, &dev->msi_list))
+ iounmap(entry->mask_base);
+ }
+ list_del(&entry->list);
+ kfree(entry);
+ }
+}
static struct msi_desc *alloc_msi_entry(struct pci_dev *dev)
{
@@ -324,7 +346,7 @@ static void __pci_restore_msix_state(struct pci_dev *dev)
if (!dev->msix_enabled)
return;
BUG_ON(list_empty(&dev->msi_list));
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
+ entry = list_first_entry(&dev->msi_list, struct msi_desc, list);
pos = entry->msi_attrib.pos;
pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
@@ -367,7 +389,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
u16 control;
unsigned mask;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSI);
msi_set_enable(dev, pos, 0); /* Disable MSI during set up */
pci_read_config_word(dev, msi_control_reg(pos), &control);
@@ -376,12 +398,12 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
if (!entry)
return -ENOMEM;
- entry->msi_attrib.is_msix = 0;
- entry->msi_attrib.is_64 = is_64bit_address(control);
- entry->msi_attrib.entry_nr = 0;
- entry->msi_attrib.maskbit = is_mask_bit_support(control);
- entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
- entry->msi_attrib.pos = pos;
+ entry->msi_attrib.is_msix = 0;
+ entry->msi_attrib.is_64 = is_64bit_address(control);
+ entry->msi_attrib.entry_nr = 0;
+ entry->msi_attrib.maskbit = is_mask_bit_support(control);
+ entry->msi_attrib.default_irq = dev->irq; /* Save IOAPIC IRQ */
+ entry->msi_attrib.pos = pos;
entry->mask_pos = msi_mask_reg(pos, entry->msi_attrib.is_64);
/* All MSIs are unmasked by default, Mask them all */
@@ -396,7 +418,7 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSI);
if (ret) {
msi_mask_irq(entry, mask, ~mask);
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
return ret;
}
@@ -409,44 +431,27 @@ static int msi_capability_init(struct pci_dev *dev, int nvec)
return 0;
}
-/**
- * msix_capability_init - configure device's MSI-X capability
- * @dev: pointer to the pci_dev data structure of MSI-X device function
- * @entries: pointer to an array of struct msix_entry entries
- * @nvec: number of @entries
- *
- * Setup the MSI-X capability structure of device function with a
- * single MSI-X irq. A return of zero indicates the successful setup of
- * requested MSI-X entries with allocated irqs or non-zero for otherwise.
- **/
-static int msix_capability_init(struct pci_dev *dev,
- struct msix_entry *entries, int nvec)
+static void __iomem *msix_map_region(struct pci_dev *dev, unsigned pos,
+ unsigned nr_entries)
{
- struct msi_desc *entry;
- int pos, i, j, nr_entries, ret;
unsigned long phys_addr;
u32 table_offset;
- u16 control;
u8 bir;
- void __iomem *base;
- pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
- pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
-
- /* Ensure MSI-X is disabled while it is set up */
- control &= ~PCI_MSIX_FLAGS_ENABLE;
- pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
-
- /* Request & Map MSI-X table region */
- nr_entries = multi_msix_capable(control);
-
- pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
+ pci_read_config_dword(dev, msix_table_offset_reg(pos), &table_offset);
bir = (u8)(table_offset & PCI_MSIX_FLAGS_BIRMASK);
table_offset &= ~PCI_MSIX_FLAGS_BIRMASK;
- phys_addr = pci_resource_start (dev, bir) + table_offset;
- base = ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
- if (base == NULL)
- return -ENOMEM;
+ phys_addr = pci_resource_start(dev, bir) + table_offset;
+
+ return ioremap_nocache(phys_addr, nr_entries * PCI_MSIX_ENTRY_SIZE);
+}
+
+static int msix_setup_entries(struct pci_dev *dev, unsigned pos,
+ void __iomem *base, struct msix_entry *entries,
+ int nvec)
+{
+ struct msi_desc *entry;
+ int i;
for (i = 0; i < nvec; i++) {
entry = alloc_msi_entry(dev);
@@ -454,41 +459,78 @@ static int msix_capability_init(struct pci_dev *dev,
if (!i)
iounmap(base);
else
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
/* No enough memory. Don't try again */
return -ENOMEM;
}
- j = entries[i].entry;
- entry->msi_attrib.is_msix = 1;
- entry->msi_attrib.is_64 = 1;
- entry->msi_attrib.entry_nr = j;
- entry->msi_attrib.default_irq = dev->irq;
- entry->msi_attrib.pos = pos;
- entry->mask_base = base;
+ entry->msi_attrib.is_msix = 1;
+ entry->msi_attrib.is_64 = 1;
+ entry->msi_attrib.entry_nr = entries[i].entry;
+ entry->msi_attrib.default_irq = dev->irq;
+ entry->msi_attrib.pos = pos;
+ entry->mask_base = base;
list_add_tail(&entry->list, &dev->msi_list);
}
- ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
- if (ret < 0) {
- /* If we had some success report the number of irqs
- * we succeeded in setting up. */
- int avail = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- if (entry->irq != 0) {
- avail++;
- }
- }
+ return 0;
+}
- if (avail != 0)
- ret = avail;
+static void msix_program_entries(struct pci_dev *dev,
+ struct msix_entry *entries)
+{
+ struct msi_desc *entry;
+ int i = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ int offset = entries[i].entry * PCI_MSIX_ENTRY_SIZE +
+ PCI_MSIX_ENTRY_VECTOR_CTRL;
+
+ entries[i].vector = entry->irq;
+ set_irq_msi(entry->irq, entry);
+ entry->masked = readl(entry->mask_base + offset);
+ msix_mask_irq(entry, 1);
+ i++;
}
+}
- if (ret) {
- msi_free_irqs(dev);
+/**
+ * msix_capability_init - configure device's MSI-X capability
+ * @dev: pointer to the pci_dev data structure of MSI-X device function
+ * @entries: pointer to an array of struct msix_entry entries
+ * @nvec: number of @entries
+ *
+ * Setup the MSI-X capability structure of device function with a
+ * single MSI-X irq. A return of zero indicates the successful setup of
+ * requested MSI-X entries with allocated irqs or non-zero for otherwise.
+ **/
+static int msix_capability_init(struct pci_dev *dev,
+ struct msix_entry *entries, int nvec)
+{
+ int pos, ret;
+ u16 control;
+ void __iomem *base;
+
+ pos = pci_find_capability(dev, PCI_CAP_ID_MSIX);
+ pci_read_config_word(dev, pos + PCI_MSIX_FLAGS, &control);
+
+ /* Ensure MSI-X is disabled while it is set up */
+ control &= ~PCI_MSIX_FLAGS_ENABLE;
+ pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
+
+ /* Request & Map MSI-X table region */
+ base = msix_map_region(dev, pos, multi_msix_capable(control));
+ if (!base)
+ return -ENOMEM;
+
+ ret = msix_setup_entries(dev, pos, base, entries, nvec);
+ if (ret)
return ret;
- }
+
+ ret = arch_setup_msi_irqs(dev, nvec, PCI_CAP_ID_MSIX);
+ if (ret)
+ goto error;
/*
* Some devices require MSI-X to be enabled before we can touch the
@@ -498,16 +540,7 @@ static int msix_capability_init(struct pci_dev *dev,
control |= PCI_MSIX_FLAGS_MASKALL | PCI_MSIX_FLAGS_ENABLE;
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
- i = 0;
- list_for_each_entry(entry, &dev->msi_list, list) {
- entries[i].vector = entry->irq;
- set_irq_msi(entry->irq, entry);
- j = entries[i].entry;
- entry->masked = readl(base + j * PCI_MSIX_ENTRY_SIZE +
- PCI_MSIX_ENTRY_VECTOR_CTRL);
- msix_mask_irq(entry, 1);
- i++;
- }
+ msix_program_entries(dev, entries);
/* Set MSI-X enabled bits and unmask the function */
pci_intx_for_msi(dev, 0);
@@ -517,6 +550,27 @@ static int msix_capability_init(struct pci_dev *dev,
pci_write_config_word(dev, pos + PCI_MSIX_FLAGS, control);
return 0;
+
+error:
+ if (ret < 0) {
+ /*
+ * If we had some success, report the number of irqs
+ * we succeeded in setting up.
+ */
+ struct msi_desc *entry;
+ int avail = 0;
+
+ list_for_each_entry(entry, &dev->msi_list, list) {
+ if (entry->irq != 0)
+ avail++;
+ }
+ if (avail != 0)
+ ret = avail;
+ }
+
+ free_msi_irqs(dev);
+
+ return ret;
}
/**
@@ -529,7 +583,7 @@ static int msix_capability_init(struct pci_dev *dev,
* to determine if MSI/-X are supported for the device. If MSI/-X is
* supported return 0, else return an error code.
**/
-static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
+static int pci_msi_check_device(struct pci_dev *dev, int nvec, int type)
{
struct pci_bus *bus;
int ret;
@@ -546,8 +600,9 @@ static int pci_msi_check_device(struct pci_dev* dev, int nvec, int type)
if (nvec < 1)
return -ERANGE;
- /* Any bridge which does NOT route MSI transactions from it's
- * secondary bus to it's primary bus must set NO_MSI flag on
+ /*
+ * Any bridge which does NOT route MSI transactions from its
+ * secondary bus to its primary bus must set NO_MSI flag on
* the secondary pci_bus.
* We expect only arch-specific PCI host bus controller driver
* or quirks for specific PCI bridges to be setting NO_MSI.
@@ -638,50 +693,16 @@ void pci_msi_shutdown(struct pci_dev *dev)
dev->irq = desc->msi_attrib.default_irq;
}
-void pci_disable_msi(struct pci_dev* dev)
+void pci_disable_msi(struct pci_dev *dev)
{
- struct msi_desc *entry;
-
if (!pci_msi_enable || !dev || !dev->msi_enabled)
return;
pci_msi_shutdown(dev);
-
- entry = list_entry(dev->msi_list.next, struct msi_desc, list);
- if (entry->msi_attrib.is_msix)
- return;
-
- msi_free_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msi);
-static int msi_free_irqs(struct pci_dev* dev)
-{
- struct msi_desc *entry, *tmp;
-
- list_for_each_entry(entry, &dev->msi_list, list) {
- int i, nvec;
- if (!entry->irq)
- continue;
- nvec = 1 << entry->msi_attrib.multiple;
- for (i = 0; i < nvec; i++)
- BUG_ON(irq_has_action(entry->irq + i));
- }
-
- arch_teardown_msi_irqs(dev);
-
- list_for_each_entry_safe(entry, tmp, &dev->msi_list, list) {
- if (entry->msi_attrib.is_msix) {
- if (list_is_last(&entry->list, &dev->msi_list))
- iounmap(entry->mask_base);
- }
- list_del(&entry->list);
- kfree(entry);
- }
-
- return 0;
-}
-
/**
* pci_msix_table_size - return the number of device's MSI-X table entries
* @dev: pointer to the pci_dev data structure of MSI-X device function
@@ -714,13 +735,13 @@ int pci_msix_table_size(struct pci_dev *dev)
* of irqs or MSI-X vectors available. Driver should use the returned value to
* re-send its request.
**/
-int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
+int pci_enable_msix(struct pci_dev *dev, struct msix_entry *entries, int nvec)
{
int status, nr_entries;
int i, j;
if (!entries)
- return -EINVAL;
+ return -EINVAL;
status = pci_msi_check_device(dev, nvec, PCI_CAP_ID_MSIX);
if (status)
@@ -742,7 +763,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
WARN_ON(!!dev->msix_enabled);
/* Check whether driver already requested for MSI irq */
- if (dev->msi_enabled) {
+ if (dev->msi_enabled) {
dev_info(&dev->dev, "can't enable MSI-X "
"(MSI IRQ already assigned)\n");
return -EINVAL;
@@ -752,12 +773,7 @@ int pci_enable_msix(struct pci_dev* dev, struct msix_entry *entries, int nvec)
}
EXPORT_SYMBOL(pci_enable_msix);
-static void msix_free_all_irqs(struct pci_dev *dev)
-{
- msi_free_irqs(dev);
-}
-
-void pci_msix_shutdown(struct pci_dev* dev)
+void pci_msix_shutdown(struct pci_dev *dev)
{
struct msi_desc *entry;
@@ -774,14 +790,14 @@ void pci_msix_shutdown(struct pci_dev* dev)
pci_intx_for_msi(dev, 1);
dev->msix_enabled = 0;
}
-void pci_disable_msix(struct pci_dev* dev)
+
+void pci_disable_msix(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev || !dev->msix_enabled)
return;
pci_msix_shutdown(dev);
-
- msix_free_all_irqs(dev);
+ free_msi_irqs(dev);
}
EXPORT_SYMBOL(pci_disable_msix);
@@ -794,16 +810,13 @@ EXPORT_SYMBOL(pci_disable_msix);
* allocated for this device function, are reclaimed to unused state,
* which may be used later on.
**/
-void msi_remove_pci_irq_vectors(struct pci_dev* dev)
+void msi_remove_pci_irq_vectors(struct pci_dev *dev)
{
if (!pci_msi_enable || !dev)
- return;
-
- if (dev->msi_enabled)
- msi_free_irqs(dev);
+ return;
- if (dev->msix_enabled)
- msix_free_all_irqs(dev);
+ if (dev->msi_enabled || dev->msix_enabled)
+ free_msi_irqs(dev);
}
void pci_no_msi(void)
diff --git a/drivers/pci/pci-sysfs.c b/drivers/pci/pci-sysfs.c
index 85ebd02a64a7..0f6382f090ee 100644
--- a/drivers/pci/pci-sysfs.c
+++ b/drivers/pci/pci-sysfs.c
@@ -916,6 +916,24 @@ int __attribute__ ((weak)) pcibios_add_platform_entries(struct pci_dev *dev)
return 0;
}
+static ssize_t reset_store(struct device *dev,
+ struct device_attribute *attr, const char *buf,
+ size_t count)
+{
+ struct pci_dev *pdev = to_pci_dev(dev);
+ unsigned long val;
+ ssize_t result = strict_strtoul(buf, 0, &val);
+
+ if (result < 0)
+ return result;
+
+ if (val != 1)
+ return -EINVAL;
+ return pci_reset_function(pdev);
+}
+
+static struct device_attribute reset_attr = __ATTR(reset, 0200, NULL, reset_store);
+
static int pci_create_capabilities_sysfs(struct pci_dev *dev)
{
int retval;
@@ -943,7 +961,22 @@ static int pci_create_capabilities_sysfs(struct pci_dev *dev)
/* Active State Power Management */
pcie_aspm_create_sysfs_dev_files(dev);
+ if (!pci_probe_reset_function(dev)) {
+ retval = device_create_file(&dev->dev, &reset_attr);
+ if (retval)
+ goto error;
+ dev->reset_fn = 1;
+ }
return 0;
+
+error:
+ pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->vpd && dev->vpd->attr) {
+ sysfs_remove_bin_file(&dev->dev.kobj, dev->vpd->attr);
+ kfree(dev->vpd->attr);
+ }
+
+ return retval;
}
int __must_check pci_create_sysfs_dev_files (struct pci_dev *pdev)
@@ -1037,6 +1070,10 @@ static void pci_remove_capabilities_sysfs(struct pci_dev *dev)
}
pcie_aspm_remove_sysfs_dev_files(dev);
+ if (dev->reset_fn) {
+ device_remove_file(&dev->dev, &reset_attr);
+ dev->reset_fn = 0;
+ }
}
/**
diff --git a/drivers/pci/pci.c b/drivers/pci/pci.c
index 7b70312181d7..bd993351db45 100644
--- a/drivers/pci/pci.c
+++ b/drivers/pci/pci.c
@@ -2262,6 +2262,22 @@ int __pci_reset_function(struct pci_dev *dev)
EXPORT_SYMBOL_GPL(__pci_reset_function);
/**
+ * pci_probe_reset_function - check whether the device can be safely reset
+ * @dev: PCI device to reset
+ *
+ * Some devices allow an individual function to be reset without affecting
+ * other functions in the same device. The PCI device must be responsive
+ * to PCI config space in order to use this function.
+ *
+ * Returns 0 if the device function can be reset or negative if the
+ * device doesn't support resetting a single function.
+ */
+int pci_probe_reset_function(struct pci_dev *dev)
+{
+ return pci_dev_reset(dev, 1);
+}
+
+/**
* pci_reset_function - quiesce and reset a PCI device function
* @dev: PCI device to reset
*
@@ -2504,6 +2520,50 @@ int pci_resource_bar(struct pci_dev *dev, int resno, enum pci_bar_type *type)
return 0;
}
+/**
+ * pci_set_vga_state - set VGA decode state on device and parents if requested
+ * @dev the PCI device
+ * @decode - true = enable decoding, false = disable decoding
+ * @command_bits PCI_COMMAND_IO and/or PCI_COMMAND_MEMORY
+ * @change_bridge - traverse ancestors and change bridges
+ */
+int pci_set_vga_state(struct pci_dev *dev, bool decode,
+ unsigned int command_bits, bool change_bridge)
+{
+ struct pci_bus *bus;
+ struct pci_dev *bridge;
+ u16 cmd;
+
+ WARN_ON(command_bits & ~(PCI_COMMAND_IO|PCI_COMMAND_MEMORY));
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ if (decode == true)
+ cmd |= command_bits;
+ else
+ cmd &= ~command_bits;
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+
+ if (change_bridge == false)
+ return 0;
+
+ bus = dev->bus;
+ while (bus) {
+ bridge = bus->self;
+ if (bridge) {
+ pci_read_config_word(bridge, PCI_BRIDGE_CONTROL,
+ &cmd);
+ if (decode == true)
+ cmd |= PCI_BRIDGE_CTL_VGA;
+ else
+ cmd &= ~PCI_BRIDGE_CTL_VGA;
+ pci_write_config_word(bridge, PCI_BRIDGE_CONTROL,
+ cmd);
+ }
+ bus = bus->parent;
+ }
+ return 0;
+}
+
#define RESOURCE_ALIGNMENT_PARAM_SIZE COMMAND_LINE_SIZE
static char resource_alignment_param[RESOURCE_ALIGNMENT_PARAM_SIZE] = {0};
spinlock_t resource_alignment_lock = SPIN_LOCK_UNLOCKED;
diff --git a/drivers/pci/pci.h b/drivers/pci/pci.h
index 5ff4d25bf0e9..73d9d92715a0 100644
--- a/drivers/pci/pci.h
+++ b/drivers/pci/pci.h
@@ -16,6 +16,7 @@ extern void pci_cleanup_rom(struct pci_dev *dev);
extern int pci_mmap_fits(struct pci_dev *pdev, int resno,
struct vm_area_struct *vma);
#endif
+int pci_probe_reset_function(struct pci_dev *dev);
/**
* struct pci_platform_pm_ops - Firmware PM callbacks
diff --git a/drivers/pci/pcie/aspm.c b/drivers/pci/pcie/aspm.c
index 3d27c97e0486..f289ca9bf18d 100644
--- a/drivers/pci/pcie/aspm.c
+++ b/drivers/pci/pcie/aspm.c
@@ -26,6 +26,13 @@
#endif
#define MODULE_PARAM_PREFIX "pcie_aspm."
+/* Note: those are not register definitions */
+#define ASPM_STATE_L0S_UP (1) /* Upstream direction L0s state */
+#define ASPM_STATE_L0S_DW (2) /* Downstream direction L0s state */
+#define ASPM_STATE_L1 (4) /* L1 state */
+#define ASPM_STATE_L0S (ASPM_STATE_L0S_UP | ASPM_STATE_L0S_DW)
+#define ASPM_STATE_ALL (ASPM_STATE_L0S | ASPM_STATE_L1)
+
struct aspm_latency {
u32 l0s; /* L0s latency (nsec) */
u32 l1; /* L1 latency (nsec) */
@@ -40,17 +47,20 @@ struct pcie_link_state {
struct list_head link; /* node in parent's children list */
/* ASPM state */
- u32 aspm_support:2; /* Supported ASPM state */
- u32 aspm_enabled:2; /* Enabled ASPM state */
- u32 aspm_default:2; /* Default ASPM state by BIOS */
+ u32 aspm_support:3; /* Supported ASPM state */
+ u32 aspm_enabled:3; /* Enabled ASPM state */
+ u32 aspm_capable:3; /* Capable ASPM state with latency */
+ u32 aspm_default:3; /* Default ASPM state by BIOS */
+ u32 aspm_disable:3; /* Disabled ASPM state */
/* Clock PM state */
u32 clkpm_capable:1; /* Clock PM capable? */
u32 clkpm_enabled:1; /* Current Clock PM state */
u32 clkpm_default:1; /* Default Clock PM state by BIOS */
- /* Latencies */
- struct aspm_latency latency; /* Exit latency */
+ /* Exit latencies */
+ struct aspm_latency latency_up; /* Upstream direction exit latency */
+ struct aspm_latency latency_dw; /* Downstream direction exit latency */
/*
* Endpoint acceptable latencies. A pcie downstream port only
* has one slot under it, so at most there are 8 functions.
@@ -82,7 +92,7 @@ static int policy_to_aspm_state(struct pcie_link_state *link)
return 0;
case POLICY_POWERSAVE:
/* Enable ASPM L0s/L1 */
- return PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ return ASPM_STATE_ALL;
case POLICY_DEFAULT:
return link->aspm_default;
}
@@ -164,18 +174,6 @@ static void pcie_clkpm_cap_init(struct pcie_link_state *link, int blacklist)
link->clkpm_capable = (blacklist) ? 0 : capable;
}
-static bool pcie_aspm_downstream_has_switch(struct pcie_link_state *link)
-{
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_UPSTREAM)
- return true;
- }
- return false;
-}
-
/*
* pcie_aspm_configure_common_clock: check if the 2 ends of a link
* could use common clock. If they are, configure them to use the
@@ -288,71 +286,133 @@ static u32 calc_l1_acceptable(u32 encoding)
return (1000 << encoding);
}
-static void pcie_aspm_get_cap_device(struct pci_dev *pdev, u32 *state,
- u32 *l0s, u32 *l1, u32 *enabled)
+struct aspm_register_info {
+ u32 support:2;
+ u32 enabled:2;
+ u32 latency_encoding_l0s;
+ u32 latency_encoding_l1;
+};
+
+static void pcie_get_aspm_reg(struct pci_dev *pdev,
+ struct aspm_register_info *info)
{
int pos;
u16 reg16;
- u32 reg32, encoding;
+ u32 reg32;
- *l0s = *l1 = *enabled = 0;
pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_dword(pdev, pos + PCI_EXP_LNKCAP, &reg32);
- *state = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
- if (*state != PCIE_LINK_STATE_L0S &&
- *state != (PCIE_LINK_STATE_L1 | PCIE_LINK_STATE_L0S))
- *state = 0;
- if (*state == 0)
+ info->support = (reg32 & PCI_EXP_LNKCAP_ASPMS) >> 10;
+ /* 00b and 10b are defined as "Reserved". */
+ if (info->support == PCIE_LINK_STATE_L1)
+ info->support = 0;
+ info->latency_encoding_l0s = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
+ info->latency_encoding_l1 = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
+ pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
+ info->enabled = reg16 & PCI_EXP_LNKCTL_ASPMC;
+}
+
+static void pcie_aspm_check_latency(struct pci_dev *endpoint)
+{
+ u32 latency, l1_switch_latency = 0;
+ struct aspm_latency *acceptable;
+ struct pcie_link_state *link;
+
+ /* Device not in D0 doesn't need latency check */
+ if ((endpoint->current_state != PCI_D0) &&
+ (endpoint->current_state != PCI_UNKNOWN))
return;
- encoding = (reg32 & PCI_EXP_LNKCAP_L0SEL) >> 12;
- *l0s = calc_l0s_latency(encoding);
- if (*state & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_LNKCAP_L1EL) >> 15;
- *l1 = calc_l1_latency(encoding);
+ link = endpoint->bus->self->link_state;
+ acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+
+ while (link) {
+ /* Check upstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_UP) &&
+ (link->latency_up.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_UP;
+
+ /* Check downstream direction L0s latency */
+ if ((link->aspm_capable & ASPM_STATE_L0S_DW) &&
+ (link->latency_dw.l0s > acceptable->l0s))
+ link->aspm_capable &= ~ASPM_STATE_L0S_DW;
+ /*
+ * Check L1 latency.
+ * Every switch on the path to root complex need 1
+ * more microsecond for L1. Spec doesn't mention L0s.
+ */
+ latency = max_t(u32, link->latency_up.l1, link->latency_dw.l1);
+ if ((link->aspm_capable & ASPM_STATE_L1) &&
+ (latency + l1_switch_latency > acceptable->l1))
+ link->aspm_capable &= ~ASPM_STATE_L1;
+ l1_switch_latency += 1000;
+
+ link = link->parent;
}
- pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
- *enabled = reg16 & (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
}
static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
{
- u32 support, l0s, l1, enabled;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
+ struct aspm_register_info upreg, dwreg;
if (blacklist) {
- /* Set support state to 0, so we will disable ASPM later */
- link->aspm_support = 0;
- link->aspm_default = 0;
- link->aspm_enabled = PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1;
+ /* Set enabled/disable so that we will disable ASPM later */
+ link->aspm_enabled = ASPM_STATE_ALL;
+ link->aspm_disable = ASPM_STATE_ALL;
return;
}
/* Configure common clock before checking latencies */
pcie_aspm_configure_common_clock(link);
- /* upstream component states */
- pcie_aspm_get_cap_device(parent, &support, &l0s, &l1, &enabled);
- link->aspm_support = support;
- link->latency.l0s = l0s;
- link->latency.l1 = l1;
- link->aspm_enabled = enabled;
-
- /* downstream component states, all functions have the same setting */
+ /* Get upstream/downstream components' register state */
+ pcie_get_aspm_reg(parent, &upreg);
child = list_entry(linkbus->devices.next, struct pci_dev, bus_list);
- pcie_aspm_get_cap_device(child, &support, &l0s, &l1, &enabled);
- link->aspm_support &= support;
- link->latency.l0s = max_t(u32, link->latency.l0s, l0s);
- link->latency.l1 = max_t(u32, link->latency.l1, l1);
+ pcie_get_aspm_reg(child, &dwreg);
- if (!link->aspm_support)
- return;
-
- link->aspm_enabled &= link->aspm_support;
+ /*
+ * Setup L0s state
+ *
+ * Note that we must not enable L0s in either direction on a
+ * given link unless components on both sides of the link each
+ * support L0s.
+ */
+ if (dwreg.support & upreg.support & PCIE_LINK_STATE_L0S)
+ link->aspm_support |= ASPM_STATE_L0S;
+ if (dwreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_UP;
+ if (upreg.enabled & PCIE_LINK_STATE_L0S)
+ link->aspm_enabled |= ASPM_STATE_L0S_DW;
+ link->latency_up.l0s = calc_l0s_latency(upreg.latency_encoding_l0s);
+ link->latency_dw.l0s = calc_l0s_latency(dwreg.latency_encoding_l0s);
+
+ /* Setup L1 state */
+ if (upreg.support & dwreg.support & PCIE_LINK_STATE_L1)
+ link->aspm_support |= ASPM_STATE_L1;
+ if (upreg.enabled & dwreg.enabled & PCIE_LINK_STATE_L1)
+ link->aspm_enabled |= ASPM_STATE_L1;
+ link->latency_up.l1 = calc_l1_latency(upreg.latency_encoding_l1);
+ link->latency_dw.l1 = calc_l1_latency(dwreg.latency_encoding_l1);
+
+ /* Save default state */
link->aspm_default = link->aspm_enabled;
- /* ENDPOINT states*/
+ /* Setup initial capable state. Will be updated later */
+ link->aspm_capable = link->aspm_support;
+ /*
+ * If the downstream component has pci bridge function, don't
+ * do ASPM for now.
+ */
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE) {
+ link->aspm_disable = ASPM_STATE_ALL;
+ break;
+ }
+ }
+
+ /* Get and check endpoint acceptable latencies */
list_for_each_entry(child, &linkbus->devices, bus_list) {
int pos;
u32 reg32, encoding;
@@ -365,109 +425,46 @@ static void pcie_aspm_cap_init(struct pcie_link_state *link, int blacklist)
pos = pci_find_capability(child, PCI_CAP_ID_EXP);
pci_read_config_dword(child, pos + PCI_EXP_DEVCAP, &reg32);
+ /* Calculate endpoint L0s acceptable latency */
encoding = (reg32 & PCI_EXP_DEVCAP_L0S) >> 6;
acceptable->l0s = calc_l0s_acceptable(encoding);
- if (link->aspm_support & PCIE_LINK_STATE_L1) {
- encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
- acceptable->l1 = calc_l1_acceptable(encoding);
- }
- }
-}
-
-/**
- * __pcie_aspm_check_state_one - check latency for endpoint device.
- * @endpoint: pointer to the struct pci_dev of endpoint device
- *
- * TBD: The latency from the endpoint to root complex vary per switch's
- * upstream link state above the device. Here we just do a simple check
- * which assumes all links above the device can be in L1 state, that
- * is we just consider the worst case. If switch's upstream link can't
- * be put into L0S/L1, then our check is too strictly.
- */
-static u32 __pcie_aspm_check_state_one(struct pci_dev *endpoint, u32 state)
-{
- u32 l1_switch_latency = 0;
- struct aspm_latency *acceptable;
- struct pcie_link_state *link;
-
- link = endpoint->bus->self->link_state;
- state &= link->aspm_support;
- acceptable = &link->acceptable[PCI_FUNC(endpoint->devfn)];
+ /* Calculate endpoint L1 acceptable latency */
+ encoding = (reg32 & PCI_EXP_DEVCAP_L1) >> 9;
+ acceptable->l1 = calc_l1_acceptable(encoding);
- while (link && state) {
- if ((state & PCIE_LINK_STATE_L0S) &&
- (link->latency.l0s > acceptable->l0s))
- state &= ~PCIE_LINK_STATE_L0S;
- if ((state & PCIE_LINK_STATE_L1) &&
- (link->latency.l1 + l1_switch_latency > acceptable->l1))
- state &= ~PCIE_LINK_STATE_L1;
- link = link->parent;
- /*
- * Every switch on the path to root complex need 1
- * more microsecond for L1. Spec doesn't mention L0s.
- */
- l1_switch_latency += 1000;
- }
- return state;
-}
-
-static u32 pcie_aspm_check_state(struct pcie_link_state *link, u32 state)
-{
- pci_power_t power_state;
- struct pci_dev *child;
- struct pci_bus *linkbus = link->pdev->subordinate;
-
- /* If no child, ignore the link */
- if (list_empty(&linkbus->devices))
- return state;
-
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- /*
- * If downstream component of a link is pci bridge, we
- * disable ASPM for now for the link
- */
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return 0;
-
- if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT &&
- child->pcie_type != PCI_EXP_TYPE_LEG_END))
- continue;
- /* Device not in D0 doesn't need check latency */
- power_state = child->current_state;
- if (power_state == PCI_D1 || power_state == PCI_D2 ||
- power_state == PCI_D3hot || power_state == PCI_D3cold)
- continue;
- state = __pcie_aspm_check_state_one(child, state);
+ pcie_aspm_check_latency(child);
}
- return state;
}
-static void __pcie_aspm_config_one_dev(struct pci_dev *pdev, unsigned int state)
+static void pcie_config_aspm_dev(struct pci_dev *pdev, u32 val)
{
u16 reg16;
int pos = pci_find_capability(pdev, PCI_CAP_ID_EXP);
pci_read_config_word(pdev, pos + PCI_EXP_LNKCTL, &reg16);
reg16 &= ~0x3;
- reg16 |= state;
+ reg16 |= val;
pci_write_config_word(pdev, pos + PCI_EXP_LNKCTL, reg16);
}
-static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
+static void pcie_config_aspm_link(struct pcie_link_state *link, u32 state)
{
+ u32 upstream = 0, dwstream = 0;
struct pci_dev *child, *parent = link->pdev;
struct pci_bus *linkbus = parent->subordinate;
- /* If no child, disable the link */
- if (list_empty(&linkbus->devices))
- state = 0;
- /*
- * If the downstream component has pci bridge function, don't
- * do ASPM now.
- */
- list_for_each_entry(child, &linkbus->devices, bus_list) {
- if (child->pcie_type == PCI_EXP_TYPE_PCI_BRIDGE)
- return;
+ /* Nothing to do if the link is already in the requested state */
+ state &= (link->aspm_capable & ~link->aspm_disable);
+ if (link->aspm_enabled == state)
+ return;
+ /* Convert ASPM state to upstream/downstream ASPM register state */
+ if (state & ASPM_STATE_L0S_UP)
+ dwstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L0S_DW)
+ upstream |= PCIE_LINK_STATE_L0S;
+ if (state & ASPM_STATE_L1) {
+ upstream |= PCIE_LINK_STATE_L1;
+ dwstream |= PCIE_LINK_STATE_L1;
}
/*
* Spec 2.0 suggests all functions should be configured the
@@ -475,67 +472,24 @@ static void __pcie_aspm_config_link(struct pcie_link_state *link, u32 state)
* upstream component first and then downstream, and vice
* versa for disabling ASPM L1. Spec doesn't mention L0S.
*/
- if (state & PCIE_LINK_STATE_L1)
- __pcie_aspm_config_one_dev(parent, state);
-
+ if (state & ASPM_STATE_L1)
+ pcie_config_aspm_dev(parent, upstream);
list_for_each_entry(child, &linkbus->devices, bus_list)
- __pcie_aspm_config_one_dev(child, state);
-
- if (!(state & PCIE_LINK_STATE_L1))
- __pcie_aspm_config_one_dev(parent, state);
+ pcie_config_aspm_dev(child, dwstream);
+ if (!(state & ASPM_STATE_L1))
+ pcie_config_aspm_dev(parent, upstream);
link->aspm_enabled = state;
}
-/* Check the whole hierarchy, and configure each link in the hierarchy */
-static void __pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
+static void pcie_config_aspm_path(struct pcie_link_state *link)
{
- struct pcie_link_state *leaf, *root = link->root;
-
- state &= (PCIE_LINK_STATE_L0S | PCIE_LINK_STATE_L1);
-
- /* Check all links who have specific root port link */
- list_for_each_entry(leaf, &link_list, sibling) {
- if (!list_empty(&leaf->children) || (leaf->root != root))
- continue;
- state = pcie_aspm_check_state(leaf, state);
- }
- /* Check root port link too in case it hasn't children */
- state = pcie_aspm_check_state(root, state);
- if (link->aspm_enabled == state)
- return;
- /*
- * We must change the hierarchy. See comments in
- * __pcie_aspm_config_link for the order
- **/
- if (state & PCIE_LINK_STATE_L1) {
- list_for_each_entry(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
- } else {
- list_for_each_entry_reverse(leaf, &link_list, sibling) {
- if (leaf->root == root)
- __pcie_aspm_config_link(leaf, state);
- }
+ while (link) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ link = link->parent;
}
}
-/*
- * pcie_aspm_configure_link_state: enable/disable PCI express link state
- * @pdev: the root port or switch downstream port
- */
-static void pcie_aspm_configure_link_state(struct pcie_link_state *link,
- u32 state)
-{
- down_read(&pci_bus_sem);
- mutex_lock(&aspm_lock);
- __pcie_aspm_configure_link_state(link, state);
- mutex_unlock(&aspm_lock);
- up_read(&pci_bus_sem);
-}
-
static void free_link_state(struct pcie_link_state *link)
{
link->pdev->link_state = NULL;
@@ -570,10 +524,9 @@ static int pcie_aspm_sanity_check(struct pci_dev *pdev)
return 0;
}
-static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
+static struct pcie_link_state *alloc_pcie_link_state(struct pci_dev *pdev)
{
struct pcie_link_state *link;
- int blacklist = !!pcie_aspm_sanity_check(pdev);
link = kzalloc(sizeof(*link), GFP_KERNEL);
if (!link)
@@ -599,15 +552,7 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
link->root = link->parent->root;
list_add(&link->sibling, &link_list);
-
pdev->link_state = link;
-
- /* Check ASPM capability */
- pcie_aspm_cap_init(link, blacklist);
-
- /* Check Clock PM capability */
- pcie_clkpm_cap_init(link, blacklist);
-
return link;
}
@@ -618,8 +563,8 @@ static struct pcie_link_state *pcie_aspm_setup_link_state(struct pci_dev *pdev)
*/
void pcie_aspm_init_link_state(struct pci_dev *pdev)
{
- u32 state;
struct pcie_link_state *link;
+ int blacklist = !!pcie_aspm_sanity_check(pdev);
if (aspm_disabled || !pdev->is_pcie || pdev->link_state)
return;
@@ -637,47 +582,64 @@ void pcie_aspm_init_link_state(struct pci_dev *pdev)
goto out;
mutex_lock(&aspm_lock);
- link = pcie_aspm_setup_link_state(pdev);
+ link = alloc_pcie_link_state(pdev);
if (!link)
goto unlock;
/*
- * Setup initial ASPM state
- *
- * If link has switch, delay the link config. The leaf link
- * initialization will config the whole hierarchy. But we must
- * make sure BIOS doesn't set unsupported link state.
+ * Setup initial ASPM state. Note that we need to configure
+ * upstream links also because capable state of them can be
+ * update through pcie_aspm_cap_init().
*/
- if (pcie_aspm_downstream_has_switch(link)) {
- state = pcie_aspm_check_state(link, link->aspm_default);
- __pcie_aspm_config_link(link, state);
- } else {
- state = policy_to_aspm_state(link);
- __pcie_aspm_configure_link_state(link, state);
- }
+ pcie_aspm_cap_init(link, blacklist);
+ pcie_config_aspm_path(link);
/* Setup initial Clock PM state */
- state = (link->clkpm_capable) ? policy_to_clkpm_state(link) : 0;
- pcie_set_clkpm(link, state);
+ pcie_clkpm_cap_init(link, blacklist);
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
unlock:
mutex_unlock(&aspm_lock);
out:
up_read(&pci_bus_sem);
}
+/* Recheck latencies and update aspm_capable for links under the root */
+static void pcie_update_aspm_capable(struct pcie_link_state *root)
+{
+ struct pcie_link_state *link;
+ BUG_ON(root->parent);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ link->aspm_capable = link->aspm_support;
+ }
+ list_for_each_entry(link, &link_list, sibling) {
+ struct pci_dev *child;
+ struct pci_bus *linkbus = link->pdev->subordinate;
+ if (link->root != root)
+ continue;
+ list_for_each_entry(child, &linkbus->devices, bus_list) {
+ if ((child->pcie_type != PCI_EXP_TYPE_ENDPOINT) &&
+ (child->pcie_type != PCI_EXP_TYPE_LEG_END))
+ continue;
+ pcie_aspm_check_latency(child);
+ }
+ }
+}
+
/* @pdev: the endpoint device */
void pcie_aspm_exit_link_state(struct pci_dev *pdev)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state = parent->link_state;
+ struct pcie_link_state *link, *root, *parent_link;
- if (aspm_disabled || !pdev->is_pcie || !parent || !link_state)
+ if (aspm_disabled || !pdev->is_pcie || !parent || !parent->link_state)
return;
- if (parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((parent->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (parent->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
+
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
-
/*
* All PCIe functions are in one slot, remove one function will remove
* the whole slot, so just wait until we are the last function left.
@@ -685,13 +647,20 @@ void pcie_aspm_exit_link_state(struct pci_dev *pdev)
if (!list_is_last(&pdev->bus_list, &parent->subordinate->devices))
goto out;
+ link = parent->link_state;
+ root = link->root;
+ parent_link = link->parent;
+
/* All functions are removed, so just disable ASPM for the link */
- __pcie_aspm_config_one_dev(parent, 0);
- list_del(&link_state->sibling);
- list_del(&link_state->link);
+ pcie_config_aspm_link(link, 0);
+ list_del(&link->sibling);
+ list_del(&link->link);
/* Clock PM is for endpoint device */
+ free_link_state(link);
- free_link_state(link_state);
+ /* Recheck latencies and configure upstream links */
+ pcie_update_aspm_capable(root);
+ pcie_config_aspm_path(parent_link);
out:
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -700,18 +669,23 @@ out:
/* @pdev: the root port or switch downstream port */
void pcie_aspm_pm_state_change(struct pci_dev *pdev)
{
- struct pcie_link_state *link_state = pdev->link_state;
+ struct pcie_link_state *link = pdev->link_state;
- if (aspm_disabled || !pdev->is_pcie || !pdev->link_state)
+ if (aspm_disabled || !pdev->is_pcie || !link)
return;
- if (pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT &&
- pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM)
+ if ((pdev->pcie_type != PCI_EXP_TYPE_ROOT_PORT) &&
+ (pdev->pcie_type != PCI_EXP_TYPE_DOWNSTREAM))
return;
/*
- * devices changed PM state, we should recheck if latency meets all
- * functions' requirement
+ * Devices changed PM state, we should recheck if latency
+ * meets all functions' requirement
*/
- pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ pcie_update_aspm_capable(link->root);
+ pcie_config_aspm_path(link);
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
}
/*
@@ -721,7 +695,7 @@ void pcie_aspm_pm_state_change(struct pci_dev *pdev)
void pci_disable_link_state(struct pci_dev *pdev, int state)
{
struct pci_dev *parent = pdev->bus->self;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
if (aspm_disabled || !pdev->is_pcie)
return;
@@ -733,12 +707,16 @@ void pci_disable_link_state(struct pci_dev *pdev, int state)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
- link_state = parent->link_state;
- link_state->aspm_support &= ~state;
- __pcie_aspm_configure_link_state(link_state, link_state->aspm_enabled);
+ link = parent->link_state;
+ if (state & PCIE_LINK_STATE_L0S)
+ link->aspm_disable |= ASPM_STATE_L0S;
+ if (state & PCIE_LINK_STATE_L1)
+ link->aspm_disable |= ASPM_STATE_L1;
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+
if (state & PCIE_LINK_STATE_CLKPM) {
- link_state->clkpm_capable = 0;
- pcie_set_clkpm(link_state, 0);
+ link->clkpm_capable = 0;
+ pcie_set_clkpm(link, 0);
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -748,7 +726,7 @@ EXPORT_SYMBOL(pci_disable_link_state);
static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
{
int i;
- struct pcie_link_state *link_state;
+ struct pcie_link_state *link;
for (i = 0; i < ARRAY_SIZE(policy_str); i++)
if (!strncmp(val, policy_str[i], strlen(policy_str[i])))
@@ -761,10 +739,9 @@ static int pcie_aspm_set_policy(const char *val, struct kernel_param *kp)
down_read(&pci_bus_sem);
mutex_lock(&aspm_lock);
aspm_policy = i;
- list_for_each_entry(link_state, &link_list, sibling) {
- __pcie_aspm_configure_link_state(link_state,
- policy_to_aspm_state(link_state));
- pcie_set_clkpm(link_state, policy_to_clkpm_state(link_state));
+ list_for_each_entry(link, &link_list, sibling) {
+ pcie_config_aspm_link(link, policy_to_aspm_state(link));
+ pcie_set_clkpm(link, policy_to_clkpm_state(link));
}
mutex_unlock(&aspm_lock);
up_read(&pci_bus_sem);
@@ -802,18 +779,28 @@ static ssize_t link_state_store(struct device *dev,
size_t n)
{
struct pci_dev *pdev = to_pci_dev(dev);
- int state;
+ struct pcie_link_state *link, *root = pdev->link_state->root;
+ u32 val = buf[0] - '0', state = 0;
- if (n < 1)
+ if (n < 1 || val > 3)
return -EINVAL;
- state = buf[0]-'0';
- if (state >= 0 && state <= 3) {
- /* setup link aspm state */
- pcie_aspm_configure_link_state(pdev->link_state, state);
- return n;
- }
- return -EINVAL;
+ /* Convert requested state to ASPM state */
+ if (val & PCIE_LINK_STATE_L0S)
+ state |= ASPM_STATE_L0S;
+ if (val & PCIE_LINK_STATE_L1)
+ state |= ASPM_STATE_L1;
+
+ down_read(&pci_bus_sem);
+ mutex_lock(&aspm_lock);
+ list_for_each_entry(link, &link_list, sibling) {
+ if (link->root != root)
+ continue;
+ pcie_config_aspm_link(link, state);
+ }
+ mutex_unlock(&aspm_lock);
+ up_read(&pci_bus_sem);
+ return n;
}
static ssize_t clk_ctl_show(struct device *dev,
diff --git a/drivers/pci/probe.c b/drivers/pci/probe.c
index 40e75f6a5056..ab52840f4753 100644
--- a/drivers/pci/probe.c
+++ b/drivers/pci/probe.c
@@ -235,7 +235,10 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->start = l64;
res->end = l64 + sz64;
dev_printk(KERN_DEBUG, &dev->dev,
- "reg %x 64bit mmio: %pR\n", pos, res);
+ "reg %x %s: %pR\n", pos,
+ (res->flags & IORESOURCE_PREFETCH) ?
+ "64bit mmio pref" : "64bit mmio",
+ res);
}
res->flags |= IORESOURCE_MEM_64;
@@ -249,7 +252,9 @@ int __pci_read_base(struct pci_dev *dev, enum pci_bar_type type,
res->end = l + sz;
dev_printk(KERN_DEBUG, &dev->dev, "reg %x %s: %pR\n", pos,
- (res->flags & IORESOURCE_IO) ? "io port" : "32bit mmio",
+ (res->flags & IORESOURCE_IO) ? "io port" :
+ ((res->flags & IORESOURCE_PREFETCH) ?
+ "32bit mmio pref" : "32bit mmio"),
res);
}
@@ -1061,8 +1066,7 @@ int pci_scan_slot(struct pci_bus *bus, int devfn)
if (dev && !dev->is_added) /* new device? */
nr++;
- if ((dev && dev->multifunction) ||
- (!dev && pcibios_scan_all_fns(bus, devfn))) {
+ if (dev && dev->multifunction) {
for (fn = 1; fn < 8; fn++) {
dev = pci_scan_single_device(bus, devfn + fn);
if (dev) {
diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c
index 06b965623962..10731373d00e 100644
--- a/drivers/pci/quirks.c
+++ b/drivers/pci/quirks.c
@@ -1201,6 +1201,7 @@ static void __init asus_hides_smbus_hostbridge(struct pci_dev *dev)
switch(dev->subsystem_device) {
case 0x00b8: /* Compaq Evo D510 CMT */
case 0x00b9: /* Compaq Evo D510 SFF */
+ case 0x00ba: /* Compaq Evo D510 USDT */
/* Motherboard doesn't have Host bridge
* subvendor/subdevice IDs and on-board VGA
* controller is disabled if an AGP card is
@@ -2382,8 +2383,10 @@ static void __devinit nv_msi_ht_cap_quirk_leaf(struct pci_dev *dev)
}
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_NVIDIA, PCI_ANY_ID, nv_msi_ht_cap_quirk_leaf);
DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
+DECLARE_PCI_FIXUP_RESUME_EARLY(PCI_VENDOR_ID_AL, PCI_ANY_ID, nv_msi_ht_cap_quirk_all);
static void __devinit quirk_msi_intx_disable_bug(struct pci_dev *dev)
{
@@ -2492,6 +2495,7 @@ DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e6, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e7, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x10e8, quirk_i82576_sriov);
DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150a, quirk_i82576_sriov);
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, 0x150d, quirk_i82576_sriov);
#endif /* CONFIG_PCI_IOV */
diff --git a/drivers/pci/search.c b/drivers/pci/search.c
index e8cb5051c311..ec415352d9ba 100644
--- a/drivers/pci/search.c
+++ b/drivers/pci/search.c
@@ -113,37 +113,6 @@ pci_find_next_bus(const struct pci_bus *from)
return b;
}
-#ifdef CONFIG_PCI_LEGACY
-/**
- * pci_find_device - begin or continue searching for a PCI device by vendor/device id
- * @vendor: PCI vendor id to match, or %PCI_ANY_ID to match all vendor ids
- * @device: PCI device id to match, or %PCI_ANY_ID to match all device ids
- * @from: Previous PCI device found in search, or %NULL for new search.
- *
- * Iterates through the list of known PCI devices. If a PCI device is found
- * with a matching @vendor and @device, a pointer to its device structure is
- * returned. Otherwise, %NULL is returned.
- * A new search is initiated by passing %NULL as the @from argument.
- * Otherwise if @from is not %NULL, searches continue from next device
- * on the global list.
- *
- * NOTE: Do not use this function any more; use pci_get_device() instead, as
- * the PCI device returned by this function can disappear at any moment in
- * time.
- */
-struct pci_dev *pci_find_device(unsigned int vendor, unsigned int device,
- struct pci_dev *from)
-{
- struct pci_dev *pdev;
-
- pci_dev_get(from);
- pdev = pci_get_subsys(vendor, device, PCI_ANY_ID, PCI_ANY_ID, from);
- pci_dev_put(pdev);
- return pdev;
-}
-EXPORT_SYMBOL(pci_find_device);
-#endif /* CONFIG_PCI_LEGACY */
-
/**
* pci_get_slot - locate PCI device for a given PCI slot
* @bus: PCI bus on which desired PCI device resides
diff --git a/drivers/pci/setup-res.c b/drivers/pci/setup-res.c
index 88cdd1a937d6..706f82d8111f 100644
--- a/drivers/pci/setup-res.c
+++ b/drivers/pci/setup-res.c
@@ -119,6 +119,7 @@ int pci_claim_resource(struct pci_dev *dev, int resource)
return err;
}
+EXPORT_SYMBOL(pci_claim_resource);
#ifdef CONFIG_PCI_QUIRKS
void pci_disable_bridge_window(struct pci_dev *dev)
diff --git a/drivers/pcmcia/yenta_socket.c b/drivers/pcmcia/yenta_socket.c
index 3ecd7c99d8eb..737fe5d87c40 100644
--- a/drivers/pcmcia/yenta_socket.c
+++ b/drivers/pcmcia/yenta_socket.c
@@ -622,11 +622,12 @@ static int yenta_search_res(struct yenta_socket *socket, struct resource *res,
static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type, int addr_start, int addr_end)
{
- struct resource *root, *res;
+ struct pci_dev *dev = socket->dev;
+ struct resource *res;
struct pci_bus_region region;
unsigned mask;
- res = socket->dev->resource + PCI_BRIDGE_RESOURCES + nr;
+ res = dev->resource + PCI_BRIDGE_RESOURCES + nr;
/* Already allocated? */
if (res->parent)
return 0;
@@ -636,17 +637,16 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
if (type & IORESOURCE_IO)
mask = ~3;
- res->name = socket->dev->subordinate->name;
+ res->name = dev->subordinate->name;
res->flags = type;
region.start = config_readl(socket, addr_start) & mask;
region.end = config_readl(socket, addr_end) | ~mask;
if (region.start && region.end > region.start && !override_bios) {
- pcibios_bus_to_resource(socket->dev, res, &region);
- root = pci_find_parent_resource(socket->dev, res);
- if (root && (request_resource(root, res) == 0))
+ pcibios_bus_to_resource(dev, res, &region);
+ if (pci_claim_resource(dev, PCI_BRIDGE_RESOURCES + nr) == 0)
return 0;
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"Preassigned resource %d busy or not available, "
"reconfiguring...\n",
nr);
@@ -672,7 +672,7 @@ static int yenta_allocate_res(struct yenta_socket *socket, int nr, unsigned type
return 1;
}
- dev_printk(KERN_INFO, &socket->dev->dev,
+ dev_printk(KERN_INFO, &dev->dev,
"no resource of type %x available, trying to continue...\n",
type);
res->start = res->end = res->flags = 0;
diff --git a/drivers/video/Kconfig b/drivers/video/Kconfig
index 49f2c4bdf904..11af4cb8924e 100644
--- a/drivers/video/Kconfig
+++ b/drivers/video/Kconfig
@@ -7,6 +7,8 @@ menu "Graphics support"
source "drivers/char/agp/Kconfig"
+source "drivers/gpu/vga/Kconfig"
+
source "drivers/gpu/drm/Kconfig"
config VGASTATE