diff options
author | Stephen Rothwell <sfr@canb.auug.org.au> | 2014-12-12 13:39:46 +1100 |
---|---|---|
committer | Stephen Rothwell <sfr@canb.auug.org.au> | 2014-12-12 13:39:46 +1100 |
commit | 125b77f1db3f32cacfccd3221b1aa327c705a889 (patch) | |
tree | 09f72e169cac8138e599adb4267bc81958c41c6a /drivers | |
parent | 45933ee346427c351af5932640344a698411e03e (diff) | |
parent | ee46ec3279c3c516be5dad746ef24febb2ddc658 (diff) |
Merge remote-tracking branch 'tip/auto-latest'
Conflicts:
drivers/irqchip/Makefile
Diffstat (limited to 'drivers')
29 files changed, 3157 insertions, 1060 deletions
diff --git a/drivers/acpi/Kconfig b/drivers/acpi/Kconfig index 8951cefb0a96..e6c3ddd92665 100644 --- a/drivers/acpi/Kconfig +++ b/drivers/acpi/Kconfig @@ -315,6 +315,12 @@ config ACPI_HOTPLUG_MEMORY To compile this driver as a module, choose M here: the module will be called acpi_memhotplug. +config ACPI_HOTPLUG_IOAPIC + bool + depends on PCI + depends on X86_IO_APIC + default y + config ACPI_SBS tristate "Smart Battery System" depends on X86 diff --git a/drivers/acpi/Makefile b/drivers/acpi/Makefile index f74317cc1ca9..e59494c90402 100644 --- a/drivers/acpi/Makefile +++ b/drivers/acpi/Makefile @@ -70,6 +70,7 @@ obj-$(CONFIG_ACPI_PROCESSOR) += processor.o obj-y += container.o obj-$(CONFIG_ACPI_THERMAL) += thermal.o obj-y += acpi_memhotplug.o +obj-$(CONFIG_ACPI_HOTPLUG_IOAPIC) += ioapic.o obj-$(CONFIG_ACPI_BATTERY) += battery.o obj-$(CONFIG_ACPI_SBS) += sbshc.o obj-$(CONFIG_ACPI_SBS) += sbs.o diff --git a/drivers/acpi/internal.h b/drivers/acpi/internal.h index 163e82f536fa..caca2805536d 100644 --- a/drivers/acpi/internal.h +++ b/drivers/acpi/internal.h @@ -35,6 +35,13 @@ void acpi_int340x_thermal_init(void); int acpi_sysfs_init(void); void acpi_container_init(void); void acpi_memory_hotplug_init(void); +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +int acpi_ioapic_add(struct acpi_pci_root *root); +int acpi_ioapic_remove(struct acpi_pci_root *root); +#else +static inline int acpi_ioapic_add(struct acpi_pci_root *root) { return 0; } +static inline int acpi_ioapic_remove(struct acpi_pci_root *root) { return 0; } +#endif #ifdef CONFIG_ACPI_DOCK void register_dock_dependent_device(struct acpi_device *adev, acpi_handle dshandle); diff --git a/drivers/acpi/ioapic.c b/drivers/acpi/ioapic.c new file mode 100644 index 000000000000..9a631a61e270 --- /dev/null +++ b/drivers/acpi/ioapic.c @@ -0,0 +1,235 @@ +/* + * IOAPIC/IOxAPIC/IOSAPIC driver + * + * Copyright (C) 2009 Fujitsu Limited. + * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. + * + * Copyright (C) 2014 Intel Corporation + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * Based on original drivers/pci/ioapic.c + * Yinghai Lu <yinghai@kernel.org> + * Jiang Liu <jiang.liu@intel.com> + */ + +/* + * This driver manages I/O APICs added by hotplug after boot. + * We try to claim all I/O APIC devices, but those present at boot were + * registered when we parsed the ACPI MADT. + */ + +#define pr_fmt(fmt) "ACPI : IOAPIC: " fmt + +#include <linux/slab.h> +#include <linux/acpi.h> +#include <linux/pci.h> +#include <acpi/acpi.h> + +struct acpi_pci_ioapic { + acpi_handle root_handle; + acpi_handle handle; + u32 gsi_base; + struct resource res; + struct pci_dev *pdev; + struct list_head list; +}; + +static LIST_HEAD(ioapic_list); +static DEFINE_MUTEX(ioapic_list_lock); + +static acpi_status setup_res(struct acpi_resource *acpi_res, void *data) +{ + struct resource *res = data; + + memset(res, 0, sizeof(*res)); + if (acpi_dev_resource_memory(acpi_res, res)) { + res->flags &= IORESOURCE_MEM; + if (res->flags) + return AE_OK; + } else if (acpi_dev_resource_address_space(acpi_res, res)) { + struct acpi_resource_address64 addr; + + res->flags &= IORESOURCE_MEM; + if (res->flags && + ACPI_SUCCESS(acpi_resource_to_address64(acpi_res, &addr)) && + addr.info.mem.caching != ACPI_PREFETCHABLE_MEMORY) { + res->start += addr.translation_offset; + res->end += addr.translation_offset; + return AE_OK; + } + } + res->flags = 0; + + return AE_OK; +} + +static bool acpi_is_ioapic(acpi_handle handle, char **type) +{ + acpi_status status; + struct acpi_device_info *info; + char *hid = NULL; + bool match = false; + + if (!acpi_has_method(handle, "_GSB")) + return false; + + status = acpi_get_object_info(handle, &info); + if (ACPI_SUCCESS(status)) { + if (info->valid & ACPI_VALID_HID) + hid = info->hardware_id.string; + if (hid) { + if (strcmp(hid, "ACPI0009") == 0) { + *type = "IOxAPIC"; + match = true; + } else if (strcmp(hid, "ACPI000A") == 0) { + *type = "IOAPIC"; + match = true; + } + } + kfree(info); + } + + return match; +} + +static acpi_status handle_ioapic_add(acpi_handle handle, u32 lvl, + void *context, void **rv) +{ + acpi_status status; + unsigned long long gsi_base; + struct acpi_pci_ioapic *ioapic; + struct pci_dev *dev = NULL; + struct resource *res = NULL; + char *type = NULL; + + if (!acpi_is_ioapic(handle, &type)) + return AE_OK; + + mutex_lock(&ioapic_list_lock); + list_for_each_entry(ioapic, &ioapic_list, list) + if (ioapic->handle == handle) { + mutex_unlock(&ioapic_list_lock); + return AE_OK; + } + + status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsi_base); + if (ACPI_FAILURE(status)) { + acpi_handle_warn(handle, "failed to evaluate _GSB method\n"); + goto exit; + } + + ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); + if (!ioapic) { + pr_err("cannot allocate memory for new IOAPIC\n"); + goto exit; + } else { + ioapic->root_handle = (acpi_handle)context; + ioapic->handle = handle; + ioapic->gsi_base = (u32)gsi_base; + ioapic->res.flags = IORESOURCE_UNSET; + } + + if (acpi_ioapic_registered(handle, (u32)gsi_base)) + goto done; + + dev = acpi_get_pci_dev(handle); + if (dev && pci_resource_len(dev, 0)) { + if (pci_enable_device(dev) < 0) + goto exit_put; + pci_set_master(dev); + if (pci_request_region(dev, 0, type)) + goto exit_disable; + res = &dev->resource[0]; + ioapic->pdev = dev; + } else { + pci_dev_put(dev); + dev = NULL; + + res = &ioapic->res; + acpi_walk_resources(handle, METHOD_NAME__CRS, setup_res, res); + if (res->flags == IORESOURCE_UNSET) { + acpi_handle_warn(handle, "failed to get resource\n"); + goto exit_free; + } else if (request_resource(&iomem_resource, res)) { + acpi_handle_warn(handle, "failed to insert resource\n"); + goto exit_free; + } + } + + if (acpi_register_ioapic(handle, res->start, (u32)gsi_base)) { + acpi_handle_warn(handle, "failed to register IOAPIC\n"); + goto exit_release; + } +done: + list_add(&ioapic->list, &ioapic_list); + mutex_unlock(&ioapic_list_lock); + + if (dev) + dev_info(&dev->dev, "%s at %pR, GSI %u\n", + type, res, (u32)gsi_base); + else + acpi_handle_info(handle, "%s at %pR, GSI %u\n", + type, res, (u32)gsi_base); + + return AE_OK; + +exit_release: + if (dev) + pci_release_region(dev, 0); + else + release_resource(res); +exit_disable: + if (dev) + pci_disable_device(dev); +exit_put: + if (dev) + pci_dev_put(dev); +exit_free: + kfree(ioapic); +exit: + mutex_unlock(&ioapic_list_lock); + *(acpi_status *)rv = AE_ERROR; + return AE_OK; +} + +int acpi_ioapic_add(struct acpi_pci_root *root) +{ + acpi_status status, retval = AE_OK; + + status = acpi_walk_namespace(ACPI_TYPE_DEVICE, root->device->handle, + UINT_MAX, handle_ioapic_add, NULL, + root->device->handle, (void **)&retval); + + return ACPI_SUCCESS(status) && ACPI_SUCCESS(retval) ? 0 : -ENODEV; +} + +int acpi_ioapic_remove(struct acpi_pci_root *root) +{ + int retval = 0; + struct acpi_pci_ioapic *ioapic, *tmp; + + mutex_lock(&ioapic_list_lock); + list_for_each_entry_safe(ioapic, tmp, &ioapic_list, list) { + if (root->device->handle != ioapic->root_handle) + continue; + + if (acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base)) + retval = -EBUSY; + + if (ioapic->pdev) { + pci_release_region(ioapic->pdev, 0); + pci_disable_device(ioapic->pdev); + pci_dev_put(ioapic->pdev); + } else if (ioapic->res.flags != IORESOURCE_UNSET) { + release_resource(&ioapic->res); + } + list_del(&ioapic->list); + kfree(ioapic); + } + mutex_unlock(&ioapic_list_lock); + + return retval; +} diff --git a/drivers/acpi/pci_irq.c b/drivers/acpi/pci_irq.c index 7cc4e33179f9..5277a0ee5704 100644 --- a/drivers/acpi/pci_irq.c +++ b/drivers/acpi/pci_irq.c @@ -413,6 +413,9 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return 0; } + if (dev->irq_managed && dev->irq > 0) + return 0; + entry = acpi_pci_irq_lookup(dev, pin); if (!entry) { /* @@ -456,6 +459,7 @@ int acpi_pci_irq_enable(struct pci_dev *dev) return rc; } dev->irq = rc; + dev->irq_managed = 1; if (link) snprintf(link_desc, sizeof(link_desc), " -> Link[%s]", link); @@ -478,7 +482,7 @@ void acpi_pci_irq_disable(struct pci_dev *dev) u8 pin; pin = dev->pin; - if (!pin) + if (!pin || !dev->irq_managed || dev->irq <= 0) return; /* Keep IOAPIC pin configuration when suspending */ @@ -506,6 +510,9 @@ void acpi_pci_irq_disable(struct pci_dev *dev) */ dev_dbg(&dev->dev, "PCI INT %c disabled\n", pin_name(pin)); - if (gsi >= 0 && dev->irq > 0) + if (gsi >= 0) { acpi_unregister_gsi(gsi); + dev->irq = 0; + dev->irq_managed = 0; + } } diff --git a/drivers/acpi/pci_root.c b/drivers/acpi/pci_root.c index c6bcb8c719d8..16db50b03648 100644 --- a/drivers/acpi/pci_root.c +++ b/drivers/acpi/pci_root.c @@ -621,6 +621,7 @@ static int acpi_pci_root_add(struct acpi_device *device, if (hotadd) { pcibios_resource_survey_bus(root->bus); pci_assign_unassigned_root_bus_resources(root->bus); + acpi_ioapic_add(root); } pci_lock_rescan_remove(); @@ -644,6 +645,8 @@ static void acpi_pci_root_remove(struct acpi_device *device) pci_stop_root_bus(root->bus); + WARN_ON(acpi_ioapic_remove(root)); + device_set_run_wake(root->bus->bridge, false); pci_acpi_remove_bus_pm_notifier(device); diff --git a/drivers/acpi/processor_core.c b/drivers/acpi/processor_core.c index ef58f46c8442..f124cbb491d9 100644 --- a/drivers/acpi/processor_core.c +++ b/drivers/acpi/processor_core.c @@ -4,6 +4,10 @@ * * Alex Chiang <achiang@hp.com> * - Unified x86/ia64 implementations + * + * I/O APIC hotplug support + * Yinghai Lu <yinghai@kernel.org> + * Jiang Liu <jiang.liu@intel.com> */ #include <linux/export.h> #include <linux/acpi.h> @@ -12,6 +16,21 @@ #define _COMPONENT ACPI_PROCESSOR_COMPONENT ACPI_MODULE_NAME("processor_core"); +static struct acpi_table_madt *get_madt_table(void) +{ + static struct acpi_table_madt *madt; + static int read_madt; + + if (!read_madt) { + if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, + (struct acpi_table_header **)&madt))) + madt = NULL; + read_madt++; + } + + return madt; +} + static int map_lapic_id(struct acpi_subtable_header *entry, u32 acpi_id, int *apic_id) { @@ -67,17 +86,10 @@ static int map_lsapic_id(struct acpi_subtable_header *entry, static int map_madt_entry(int type, u32 acpi_id) { unsigned long madt_end, entry; - static struct acpi_table_madt *madt; - static int read_madt; int apic_id = -1; + struct acpi_table_madt *madt; - if (!read_madt) { - if (ACPI_FAILURE(acpi_get_table(ACPI_SIG_MADT, 0, - (struct acpi_table_header **)&madt))) - madt = NULL; - read_madt++; - } - + madt = get_madt_table(); if (!madt) return apic_id; @@ -125,13 +137,12 @@ static int map_mat_entry(acpi_handle handle, int type, u32 acpi_id) } header = (struct acpi_subtable_header *)obj->buffer.pointer; - if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) { + if (header->type == ACPI_MADT_TYPE_LOCAL_APIC) map_lapic_id(header, acpi_id, &apic_id); - } else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) { + else if (header->type == ACPI_MADT_TYPE_LOCAL_SAPIC) map_lsapic_id(header, type, acpi_id, &apic_id); - } else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) { + else if (header->type == ACPI_MADT_TYPE_LOCAL_X2APIC) map_x2apic_id(header, type, acpi_id, &apic_id); - } exit: kfree(buffer.pointer); @@ -164,7 +175,7 @@ int acpi_map_cpuid(int apic_id, u32 acpi_id) * For example, * * Scope (_PR) - * { + * { * Processor (CPU0, 0x00, 0x00000410, 0x06) {} * Processor (CPU1, 0x01, 0x00000410, 0x06) {} * Processor (CPU2, 0x02, 0x00000410, 0x06) {} @@ -204,3 +215,96 @@ int acpi_get_cpuid(acpi_handle handle, int type, u32 acpi_id) return acpi_map_cpuid(apic_id, acpi_id); } EXPORT_SYMBOL_GPL(acpi_get_cpuid); + +#ifdef CONFIG_ACPI_HOTPLUG_IOAPIC +static int get_ioapic_id(struct acpi_subtable_header *entry, u32 gsi_base, + u64 *phys_addr, int *ioapic_id) +{ + struct acpi_madt_io_apic *ioapic = (struct acpi_madt_io_apic *)entry; + + if (ioapic->global_irq_base != gsi_base) + return 0; + + *phys_addr = ioapic->address; + *ioapic_id = ioapic->id; + return 1; +} + +static int parse_madt_ioapic_entry(u32 gsi_base, u64 *phys_addr) +{ + struct acpi_subtable_header *hdr; + unsigned long madt_end, entry; + struct acpi_table_madt *madt; + int apic_id = -1; + + madt = get_madt_table(); + if (!madt) + return apic_id; + + entry = (unsigned long)madt; + madt_end = entry + madt->header.length; + + /* Parse all entries looking for a match. */ + entry += sizeof(struct acpi_table_madt); + while (entry + sizeof(struct acpi_subtable_header) < madt_end) { + hdr = (struct acpi_subtable_header *)entry; + if (hdr->type == ACPI_MADT_TYPE_IO_APIC && + get_ioapic_id(hdr, gsi_base, phys_addr, &apic_id)) + break; + else + entry += hdr->length; + } + + return apic_id; +} + +static int parse_mat_ioapic_entry(acpi_handle handle, u32 gsi_base, + u64 *phys_addr) +{ + struct acpi_buffer buffer = { ACPI_ALLOCATE_BUFFER, NULL }; + struct acpi_subtable_header *header; + union acpi_object *obj; + int apic_id = -1; + + if (ACPI_FAILURE(acpi_evaluate_object(handle, "_MAT", NULL, &buffer))) + goto exit; + + if (!buffer.length || !buffer.pointer) + goto exit; + + obj = buffer.pointer; + if (obj->type != ACPI_TYPE_BUFFER || + obj->buffer.length < sizeof(struct acpi_subtable_header)) + goto exit; + + header = (struct acpi_subtable_header *)obj->buffer.pointer; + if (header->type == ACPI_MADT_TYPE_IO_APIC) + get_ioapic_id(header, gsi_base, phys_addr, &apic_id); + +exit: + kfree(buffer.pointer); + return apic_id; +} + +/** + * acpi_get_ioapic_id - Get IOAPIC ID and physical address matching @gsi_base + * @handle: ACPI object for IOAPIC device + * @gsi_base: GSI base to match with + * @phys_addr: Pointer to store physical address of matching IOAPIC record + * + * Walk resources returned by ACPI_MAT method, then ACPI MADT table, to search + * for an ACPI IOAPIC record matching @gsi_base. + * Return IOAPIC id and store physical address in @phys_addr if found a match, + * otherwise return <0. + */ +int acpi_get_ioapic_id(acpi_handle handle, u32 gsi_base, u64 *phys_addr) +{ + int apic_id; + + apic_id = parse_mat_ioapic_entry(handle, gsi_base, phys_addr); + if (apic_id == -1) + apic_id = parse_madt_ioapic_entry(gsi_base, phys_addr); + + return apic_id; +} +#endif /* CONFIG_ACPI_HOTPLUG_IOAPIC */ diff --git a/drivers/acpi/resource.c b/drivers/acpi/resource.c index 2ba8f02ced36..782a0d15c25f 100644 --- a/drivers/acpi/resource.c +++ b/drivers/acpi/resource.c @@ -200,7 +200,7 @@ bool acpi_dev_resource_address_space(struct acpi_resource *ares, status = acpi_resource_to_address64(ares, &addr); if (ACPI_FAILURE(status)) - return true; + return false; res->start = addr.minimum; res->end = addr.maximum; diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c index b205f76d7129..35a38db4a5c1 100644 --- a/drivers/iommu/amd_iommu.c +++ b/drivers/iommu/amd_iommu.c @@ -33,6 +33,7 @@ #include <linux/export.h> #include <linux/irq.h> #include <linux/msi.h> +#include <linux/irqdomain.h> #include <asm/irq_remapping.h> #include <asm/io_apic.h> #include <asm/apic.h> @@ -3857,6 +3858,21 @@ union irte { } fields; }; +struct irq_2_irte { + u16 devid; /* Device ID for IRTE table */ + u16 index; /* Index into IRTE table*/ +}; + +struct amd_ir_data { + struct irq_2_irte irq_2_irte; + union irte irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + +static struct irq_chip amd_ir_chip; + #define DTE_IRQ_PHYS_ADDR_MASK (((1ULL << 45)-1) << 6) #define DTE_IRQ_REMAP_INTCTL (2ULL << 60) #define DTE_IRQ_TABLE_LEN (8ULL << 1) @@ -3950,7 +3966,7 @@ out_unlock: return table; } -static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) +static int alloc_irq_index(u16 devid, int count) { struct irq_remap_table *table; unsigned long flags; @@ -3972,18 +3988,10 @@ static int alloc_irq_index(struct irq_cfg *cfg, u16 devid, int count) c = 0; if (c == count) { - struct irq_2_irte *irte_info; - for (; c != 0; --c) table->table[index - c + 1] = IRTE_ALLOCATED; index -= count - 1; - - cfg->remapped = 1; - irte_info = &cfg->irq_2_irte; - irte_info->devid = devid; - irte_info->index = index; - goto out; } } @@ -3996,22 +4004,6 @@ out: return index; } -static int get_irte(u16 devid, int index, union irte *irte) -{ - struct irq_remap_table *table; - unsigned long flags; - - table = get_irq_table(devid, false); - if (!table) - return -ENOMEM; - - spin_lock_irqsave(&table->lock, flags); - irte->val = table->table[index]; - spin_unlock_irqrestore(&table->lock, flags); - - return 0; -} - static int modify_irte(u16 devid, int index, union irte irte) { struct irq_remap_table *table; @@ -4058,244 +4050,317 @@ static void free_irte(u16 devid, int index) iommu_completion_wait(iommu); } -static int setup_ioapic_entry(int irq, struct IO_APIC_route_entry *entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) +static int get_devid(struct irq_alloc_info *info) { - struct irq_remap_table *table; - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - union irte irte; - int ioapic_id; - int index; - int devid; - int ret; - - cfg = irq_get_chip_data(irq); - if (!cfg) - return -EINVAL; - - irte_info = &cfg->irq_2_irte; - ioapic_id = mpc_ioapic_id(attr->ioapic); - devid = get_ioapic_devid(ioapic_id); - - if (devid < 0) - return devid; - - table = get_irq_table(devid, true); - if (table == NULL) - return -ENOMEM; - - index = attr->ioapic_pin; + int devid = -1; - /* Setup IRQ remapping info */ - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + devid = get_ioapic_devid(info->ioapic_id); + break; + case X86_IRQ_ALLOC_TYPE_HPET: + devid = get_hpet_devid(info->hpet_id); + break; + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + devid = get_device_id(&info->msi_dev->dev); + break; + default: + BUG_ON(1); + break; + } - /* Setup IRTE for IOMMU */ - irte.val = 0; - irte.fields.vector = vector; - irte.fields.int_type = apic->irq_delivery_mode; - irte.fields.destination = destination; - irte.fields.dm = apic->irq_dest_mode; - irte.fields.valid = 1; - - ret = modify_irte(devid, index, irte); - if (ret) - return ret; + return devid; +} - /* Setup IOAPIC entry */ - memset(entry, 0, sizeof(*entry)); +static struct irq_domain *get_ir_irq_domain(struct irq_alloc_info *info) +{ + int devid; + struct amd_iommu *iommu; - entry->vector = index; - entry->mask = 0; - entry->trigger = attr->trigger; - entry->polarity = attr->polarity; + if (!info) + return NULL; - /* - * Mask level triggered irqs. - */ - if (attr->trigger) - entry->mask = 1; + devid = get_devid(info); + if (devid >= 0) { + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + return iommu->ir_domain; + } - return 0; + return NULL; } -static int set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) +static struct irq_domain *get_irq_domain(struct irq_alloc_info *info) { - struct irq_2_irte *irte_info; - unsigned int dest, irq; - struct irq_cfg *cfg; - union irte irte; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -1; - - cfg = data->chip_data; - irq = data->irq; - irte_info = &cfg->irq_2_irte; + int devid; + struct amd_iommu *iommu; - if (!cpumask_intersects(mask, cpu_online_mask)) - return -EINVAL; + if (!info) + return NULL; - if (get_irte(irte_info->devid, irte_info->index, &irte)) - return -EBUSY; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + devid = get_device_id(&info->msi_dev->dev); + if (devid >= 0) { + iommu = amd_iommu_rlookup_table[devid]; + if (iommu) + return iommu->msi_domain; + } + break; + default: + break; + } - if (assign_irq_vector(irq, cfg, mask)) - return -EBUSY; + return NULL; +} - err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("AMD-Vi: Failed to recover vector for irq %d\n", irq); - return err; - } +struct irq_remap_ops amd_iommu_irq_ops = { + .supported = amd_iommu_supported, + .prepare = amd_iommu_prepare, + .enable = amd_iommu_enable, + .disable = amd_iommu_disable, + .reenable = amd_iommu_reenable, + .enable_faulting = amd_iommu_enable_faulting, + .get_ir_irq_domain = get_ir_irq_domain, + .get_irq_domain = get_irq_domain, +}; - irte.fields.vector = cfg->vector; - irte.fields.destination = dest; +static void irq_remapping_prepare_irte(struct amd_ir_data *data, + struct irq_cfg *irq_cfg, + struct irq_alloc_info *info, + int devid, int index, int sub_handle) +{ + union irte *irte = &data->irte_entry; + struct irq_2_irte *irte_info = &data->irq_2_irte; + struct msi_msg *msg = &data->msi_entry; + struct IO_APIC_route_entry *entry; - modify_irte(irte_info->devid, irte_info->index, irte); + data->irq_2_irte.devid = devid; + data->irq_2_irte.index = index + sub_handle; - if (cfg->move_in_progress) - send_cleanup_vector(cfg); + /* Setup IRTE for IOMMU */ + irte->val = 0; + irte->fields.vector = irq_cfg->vector; + irte->fields.int_type = apic->irq_delivery_mode; + irte->fields.destination = irq_cfg->dest_apicid; + irte->fields.dm = apic->irq_dest_mode; + irte->fields.valid = 1; + + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + /* Setup IOAPIC entry */ + entry = info->ioapic_entry; + info->ioapic_entry = NULL; + memset(entry, 0, sizeof(*entry)); + entry->vector = index; + entry->mask = 0; + entry->trigger = info->ioapic_trigger; + entry->polarity = info->ioapic_polarity; + /* Mask level triggered irqs. */ + if (info->ioapic_trigger) + entry->mask = 1; + break; - cpumask_copy(data->affinity, mask); + case X86_IRQ_ALLOC_TYPE_HPET: + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + msg->address_hi = MSI_ADDR_BASE_HI; + msg->address_lo = MSI_ADDR_BASE_LO; + msg->data = irte_info->index; + break; - return 0; + default: + BUG_ON(1); + break; + } } -static int free_irq(int irq) +static int irq_remapping_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) { - struct irq_2_irte *irte_info; + struct irq_alloc_info *info = arg; + struct amd_ir_data *data; + struct irq_data *irq_data; struct irq_cfg *cfg; + int i, ret, devid; + int index = -1; - cfg = irq_get_chip_data(irq); - if (!cfg) + if (!info) + return -EINVAL; + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && + info->type != X86_IRQ_ALLOC_TYPE_MSIX) return -EINVAL; - irte_info = &cfg->irq_2_irte; - - free_irte(irte_info->devid, irte_info->index); + /* + * With IRQ remapping enabled, don't need contigious CPU vectors + * to support multiple MSI interrupts. + */ + if (info->type == X86_IRQ_ALLOC_TYPE_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGOUS_VECTORS; - return 0; -} + devid = get_devid(info); + if (devid < 0) + return -EINVAL; -static void compose_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) -{ - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - union irte irte; + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) + return ret; - cfg = irq_get_chip_data(irq); - if (!cfg) - return; + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_parent; - irte_info = &cfg->irq_2_irte; + if (info->type == X86_IRQ_ALLOC_TYPE_IOAPIC) { + if (get_irq_table(devid, true)) + index = info->ioapic_pin; + else + ret = -ENOMEM; + } else { + index = alloc_irq_index(devid, nr_irqs); + } + if (index < 0) { + pr_warn("Failed to allocate IRTE\n"); + kfree(data); + goto out_free_parent; + } - irte.val = 0; - irte.fields.vector = cfg->vector; - irte.fields.int_type = apic->irq_delivery_mode; - irte.fields.destination = dest; - irte.fields.dm = apic->irq_dest_mode; - irte.fields.valid = 1; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + cfg = irqd_cfg(irq_data); + if (!irq_data || !cfg) { + ret = -EINVAL; + goto out_free_data; + } - modify_irte(irte_info->devid, irte_info->index, irte); + if (i > 0) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_data; + } + irq_data->hwirq = (devid << 16) + i; + irq_data->chip_data = data; + irq_data->chip = &amd_ir_chip; + irq_remapping_prepare_irte(data, cfg, info, devid, index, i); + irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); + } + return 0; - msg->address_hi = MSI_ADDR_BASE_HI; - msg->address_lo = MSI_ADDR_BASE_LO; - msg->data = irte_info->index; +out_free_data: + for (i--; i >= 0; i--) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data) + kfree(irq_data->chip_data); + } + for (i = 0; i < nr_irqs; i++) + free_irte(devid, index + i); +out_free_parent: + irq_domain_free_irqs_common(domain, virq, nr_irqs); + return ret; } -static int msi_alloc_irq(struct pci_dev *pdev, int irq, int nvec) +static void irq_remapping_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) { - struct irq_cfg *cfg; - int index; - u16 devid; - - if (!pdev) - return -EINVAL; + struct irq_data *irq_data; + struct amd_ir_data *data; + struct irq_2_irte *irte_info; + int i; - cfg = irq_get_chip_data(irq); - if (!cfg) - return -EINVAL; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + data = irq_data->chip_data; + irte_info = &data->irq_2_irte; + free_irte(irte_info->devid, irte_info->index); + kfree(data); + } + } + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} - devid = get_device_id(&pdev->dev); - index = alloc_irq_index(cfg, devid, nvec); +static void irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct amd_ir_data *data = irq_data->chip_data; + struct irq_2_irte *irte_info = &data->irq_2_irte; - return index < 0 ? MAX_IRQS_PER_TABLE : index; + modify_irte(irte_info->devid, irte_info->index, data->irte_entry); } -static int msi_setup_irq(struct pci_dev *pdev, unsigned int irq, - int index, int offset) +static void irq_remapping_deactivate(struct irq_domain *domain, + struct irq_data *irq_data) { - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - u16 devid; + struct amd_ir_data *data = irq_data->chip_data; + struct irq_2_irte *irte_info = &data->irq_2_irte; + union irte entry; - if (!pdev) - return -EINVAL; + entry.val = 0; + modify_irte(irte_info->devid, irte_info->index, data->irte_entry); +} - cfg = irq_get_chip_data(irq); - if (!cfg) - return -EINVAL; +static struct irq_domain_ops amd_ir_domain_ops = { + .alloc = irq_remapping_alloc, + .free = irq_remapping_free, + .activate = irq_remapping_activate, + .deactivate = irq_remapping_deactivate, +}; - if (index >= MAX_IRQS_PER_TABLE) - return 0; +static int amd_ir_set_affinity(struct irq_data *data, + const struct cpumask *mask, bool force) +{ + struct amd_ir_data *ir_data = data->chip_data; + struct irq_2_irte *irte_info = &ir_data->irq_2_irte; + struct irq_cfg *cfg = irqd_cfg(data); + struct irq_data *parent = data->parent_data; + int ret; - devid = get_device_id(&pdev->dev); - irte_info = &cfg->irq_2_irte; + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index + offset; + /* + * Atomically updates the IRTE with the new destination, vector + * and flushes the interrupt entry cache. + */ + ir_data->irte_entry.fields.vector = cfg->vector; + ir_data->irte_entry.fields.destination = cfg->dest_apicid; + modify_irte(irte_info->devid, irte_info->index, ir_data->irte_entry); - return 0; + /* + * After this point, all the interrupts will start arriving + * at the new destination. So, time to cleanup the previous + * vector allocation. + */ + send_cleanup_vector(cfg); + + return IRQ_SET_MASK_OK_DONE; } -static int alloc_hpet_msi(unsigned int irq, unsigned int id) +static void ir_compose_msi_msg(struct irq_data *irq_data, struct msi_msg *msg) { - struct irq_2_irte *irte_info; - struct irq_cfg *cfg; - int index, devid; + struct amd_ir_data *ir_data = irq_data->chip_data; - cfg = irq_get_chip_data(irq); - if (!cfg) - return -EINVAL; + *msg = ir_data->msi_entry; +} - irte_info = &cfg->irq_2_irte; - devid = get_hpet_devid(id); - if (devid < 0) - return devid; +static struct irq_chip amd_ir_chip = { + .irq_ack = ir_ack_apic_edge, + .irq_set_affinity = amd_ir_set_affinity, + .irq_compose_msi_msg = ir_compose_msi_msg, +}; - index = alloc_irq_index(cfg, devid, 1); - if (index < 0) - return index; +int amd_iommu_create_irq_domain(struct amd_iommu *iommu) +{ + iommu->ir_domain = irq_domain_add_tree(NULL, &amd_ir_domain_ops, iommu); + if (!iommu->ir_domain) + return -ENOMEM; - cfg->remapped = 1; - irte_info->devid = devid; - irte_info->index = index; + iommu->ir_domain->parent = arch_get_ir_parent_domain(); + iommu->msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); return 0; } - -struct irq_remap_ops amd_iommu_irq_ops = { - .supported = amd_iommu_supported, - .prepare = amd_iommu_prepare, - .enable = amd_iommu_enable, - .disable = amd_iommu_disable, - .reenable = amd_iommu_reenable, - .enable_faulting = amd_iommu_enable_faulting, - .setup_ioapic_entry = setup_ioapic_entry, - .set_affinity = set_affinity, - .free_irq = free_irq, - .compose_msi_msg = compose_msi_msg, - .msi_alloc_irq = msi_alloc_irq, - .msi_setup_irq = msi_setup_irq, - .alloc_hpet_msi = alloc_hpet_msi, -}; #endif diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c index b0522f15730f..de3390a7d345 100644 --- a/drivers/iommu/amd_iommu_init.c +++ b/drivers/iommu/amd_iommu_init.c @@ -1124,6 +1124,10 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h) if (ret) return ret; + ret = amd_iommu_create_irq_domain(iommu); + if (ret) + return ret; + /* * Make sure IOMMU is not considered to translate itself. The IVRS * table tells us so, but this is a lie! diff --git a/drivers/iommu/amd_iommu_proto.h b/drivers/iommu/amd_iommu_proto.h index 95ed6deae47f..612a22192fa0 100644 --- a/drivers/iommu/amd_iommu_proto.h +++ b/drivers/iommu/amd_iommu_proto.h @@ -63,6 +63,15 @@ extern u8 amd_iommu_pc_get_max_counters(u16 devid); extern int amd_iommu_pc_get_set_reg_val(u16 devid, u8 bank, u8 cntr, u8 fxn, u64 *value, bool is_write); +#ifdef CONFIG_IRQ_REMAP +extern int amd_iommu_create_irq_domain(struct amd_iommu *iommu); +#else +static inline int amd_iommu_create_irq_domain(struct amd_iommu *iommu) +{ + return 0; +} +#endif + #define PPR_SUCCESS 0x0 #define PPR_INVALID 0x1 #define PPR_FAILURE 0xf diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h index cec51a8ba844..ef12d74a03fe 100644 --- a/drivers/iommu/amd_iommu_types.h +++ b/drivers/iommu/amd_iommu_types.h @@ -392,6 +392,7 @@ struct amd_iommu_fault { struct iommu_domain; +struct irq_domain; /* * This structure contains generic data for IOMMU protection domains @@ -574,6 +575,10 @@ struct amd_iommu { /* The maximum PC banks and counters/bank (PCSup=1) */ u8 max_banks; u8 max_counters; +#ifdef CONFIG_IRQ_REMAP + struct irq_domain *ir_domain; + struct irq_domain *msi_domain; +#endif }; struct devid_map { diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c index 9847613085e1..536f2d8ea41a 100644 --- a/drivers/iommu/dmar.c +++ b/drivers/iommu/dmar.c @@ -1087,8 +1087,8 @@ static void free_iommu(struct intel_iommu *iommu) if (iommu->irq) { free_irq(iommu->irq, iommu); - irq_set_handler_data(iommu->irq, NULL); dmar_free_hwirq(iommu->irq); + iommu->irq = 0; } if (iommu->qi) { @@ -1642,23 +1642,14 @@ int dmar_set_interrupt(struct intel_iommu *iommu) if (iommu->irq) return 0; - irq = dmar_alloc_hwirq(); - if (irq <= 0) { + irq = dmar_alloc_hwirq(iommu->seq_id, iommu->node, iommu); + if (irq > 0) { + iommu->irq = irq; + } else { pr_err("IOMMU: no free vectors\n"); return -EINVAL; } - irq_set_handler_data(irq, iommu); - iommu->irq = irq; - - ret = arch_setup_dmar_msi(irq); - if (ret) { - irq_set_handler_data(irq, NULL); - iommu->irq = 0; - dmar_free_hwirq(irq); - return ret; - } - ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu); if (ret) pr_err("IOMMU: can't request irq\n"); diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c index 27541d440849..ff35b0336d2b 100644 --- a/drivers/iommu/intel_irq_remapping.c +++ b/drivers/iommu/intel_irq_remapping.c @@ -8,6 +8,7 @@ #include <linux/irq.h> #include <linux/intel-iommu.h> #include <linux/acpi.h> +#include <linux/irqdomain.h> #include <asm/io_apic.h> #include <asm/smp.h> #include <asm/cpu.h> @@ -31,6 +32,21 @@ struct hpet_scope { unsigned int devfn; }; +struct irq_2_iommu { + struct intel_iommu *iommu; + u16 irte_index; + u16 sub_handle; + u8 irte_mask; +}; + +struct intel_ir_data { + struct irq_2_iommu irq_2_iommu; + struct irte irte_entry; + union { + struct msi_msg msi_entry; + }; +}; + #define IR_X2APIC_MODE(mode) (mode ? (1 << 11) : 0) #define IRTE_DEST(dest) ((x2apic_mode) ? dest : dest << 8) @@ -49,43 +65,14 @@ static struct hpet_scope ir_hpet[MAX_HPET_TBS]; * the dmar_global_lock. */ static DEFINE_RAW_SPINLOCK(irq_2_ir_lock); +static struct irq_domain_ops intel_ir_domain_ops; static int __init parse_ioapics_under_ir(void); -static struct irq_2_iommu *irq_2_iommu(unsigned int irq) -{ - struct irq_cfg *cfg = irq_get_chip_data(irq); - return cfg ? &cfg->irq_2_iommu : NULL; -} - -static int get_irte(int irq, struct irte *entry) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!entry || !irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - if (unlikely(!irq_iommu->iommu)) { - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return -1; - } - - index = irq_iommu->irte_index + irq_iommu->sub_handle; - *entry = *(irq_iommu->iommu->ir_table->base + index); - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return 0; -} - -static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) +static int alloc_irte(struct intel_iommu *iommu, int irq, + struct irq_2_iommu *irq_iommu, u16 count) { struct ir_table *table = iommu->ir_table; - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - struct irq_cfg *cfg = irq_get_chip_data(irq); unsigned int mask = 0; unsigned long flags; int index; @@ -112,7 +99,6 @@ static int alloc_irte(struct intel_iommu *iommu, int irq, u16 count) if (index < 0) { pr_warn("IR%d: can't allocate an IRTE\n", iommu->seq_id); } else { - cfg->remapped = 1; irq_iommu->iommu = iommu; irq_iommu->irte_index = index; irq_iommu->sub_handle = 0; @@ -134,47 +120,9 @@ static int qi_flush_iec(struct intel_iommu *iommu, int index, int mask) return qi_submit_sync(&desc, iommu); } -static int map_irq_to_irte_handle(int irq, u16 *sub_handle) +static int modify_irte(struct irq_2_iommu *irq_iommu, + struct irte *irte_modified) { - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int index; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - *sub_handle = irq_iommu->sub_handle; - index = irq_iommu->irte_index; - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - return index; -} - -static int set_irte_irq(int irq, struct intel_iommu *iommu, u16 index, u16 subhandle) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - struct irq_cfg *cfg = irq_get_chip_data(irq); - unsigned long flags; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - cfg->remapped = 1; - irq_iommu->iommu = iommu; - irq_iommu->irte_index = index; - irq_iommu->sub_handle = subhandle; - irq_iommu->irte_mask = 0; - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return 0; -} - -static int modify_irte(int irq, struct irte *irte_modified) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); struct intel_iommu *iommu; unsigned long flags; struct irte *irte; @@ -241,7 +189,7 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) return 0; iommu = irq_iommu->iommu; - index = irq_iommu->irte_index + irq_iommu->sub_handle; + index = irq_iommu->irte_index; start = iommu->ir_table->base + index; end = start + (1 << irq_iommu->irte_mask); @@ -256,29 +204,6 @@ static int clear_entries(struct irq_2_iommu *irq_iommu) return qi_flush_iec(iommu, index, irq_iommu->irte_mask); } -static int free_irte(int irq) -{ - struct irq_2_iommu *irq_iommu = irq_2_iommu(irq); - unsigned long flags; - int rc; - - if (!irq_iommu) - return -1; - - raw_spin_lock_irqsave(&irq_2_ir_lock, flags); - - rc = clear_entries(irq_iommu); - - irq_iommu->iommu = NULL; - irq_iommu->irte_index = 0; - irq_iommu->sub_handle = 0; - irq_iommu->irte_mask = 0; - - raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); - - return rc; -} - /* * source validation type */ @@ -481,13 +406,12 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) if (iommu->ir_table) return 0; - ir_table = kzalloc(sizeof(struct ir_table), GFP_ATOMIC); + ir_table = kzalloc(sizeof(struct ir_table), GFP_KERNEL); if (!ir_table) return -ENOMEM; - pages = alloc_pages_node(iommu->node, GFP_ATOMIC | __GFP_ZERO, + pages = alloc_pages_node(iommu->node, GFP_KERNEL | __GFP_ZERO, INTR_REMAP_PAGE_ORDER); - if (!pages) { pr_err("IR%d: failed to allocate pages of order %d\n", iommu->seq_id, INTR_REMAP_PAGE_ORDER); @@ -501,11 +425,23 @@ static int intel_setup_irq_remapping(struct intel_iommu *iommu) goto out_free_pages; } + iommu->ir_domain = irq_domain_add_hierarchy(arch_get_ir_parent_domain(), + 0, INTR_REMAP_TABLE_ENTRIES, + NULL, &intel_ir_domain_ops, + iommu); + if (!iommu->ir_domain) { + pr_err("IR%d: failed to allocate irqdomain\n", iommu->seq_id); + goto out_free_bitmap; + } + iommu->ir_msi_domain = arch_create_msi_irq_domain(iommu->ir_domain); + ir_table->base = page_address(pages); ir_table->bitmap = bitmap; iommu->ir_table = ir_table; return 0; +out_free_bitmap: + kfree(bitmap); out_free_pages: __free_pages(pages, INTR_REMAP_PAGE_ORDER); out_free_table: @@ -516,6 +452,14 @@ out_free_table: static void intel_teardown_irq_remapping(struct intel_iommu *iommu) { if (iommu && iommu->ir_table) { + if (iommu->ir_msi_domain) { + irq_domain_remove(iommu->ir_msi_domain); + iommu->ir_msi_domain = NULL; + } + if (iommu->ir_domain) { + irq_domain_remove(iommu->ir_domain); + iommu->ir_domain = NULL; + } free_pages((unsigned long)iommu->ir_table->base, INTR_REMAP_PAGE_ORDER); kfree(iommu->ir_table->bitmap); @@ -595,22 +539,57 @@ static int __init intel_irq_remapping_supported(void) return 1; } -static int __init intel_enable_irq_remapping(void) +static void __init intel_cleanup_irq_remapping(void) { struct dmar_drhd_unit *drhd; struct intel_iommu *iommu; - bool x2apic_present; - int setup = 0; - int eim = 0; - x2apic_present = x2apic_supported(); + for_each_iommu(iommu, drhd) { + if (ecap_ir_support(iommu->ecap)) { + iommu_disable_irq_remapping(iommu); + intel_teardown_irq_remapping(iommu); + } + } + + if (x2apic_supported()) + pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); +} + +static int __init intel_prepare_irq_remapping(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + + if (dmar_table_init() < 0) + return -1; if (parse_ioapics_under_ir() != 1) { - printk(KERN_INFO "Not enable interrupt remapping\n"); + printk(KERN_INFO "Not enabling interrupt remapping\n"); goto error; } - if (x2apic_present) { + for_each_iommu(iommu, drhd) { + if (!ecap_ir_support(iommu->ecap)) + continue; + + /* Do the allocations early */ + if (intel_setup_irq_remapping(iommu)) + goto error; + } + return 0; +error: + intel_cleanup_irq_remapping(); + return -1; +} + +static int __init intel_enable_irq_remapping(void) +{ + struct dmar_drhd_unit *drhd; + struct intel_iommu *iommu; + int setup = 0; + int eim = 0; + + if (x2apic_supported()) { pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n"); eim = !dmar_x2apic_optout(); @@ -678,9 +657,6 @@ static int __init intel_enable_irq_remapping(void) if (!ecap_ir_support(iommu->ecap)) continue; - if (intel_setup_irq_remapping(iommu)) - goto error; - iommu_set_irq_remapping(iommu, eim); setup = 1; } @@ -690,27 +666,12 @@ static int __init intel_enable_irq_remapping(void) irq_remapping_enabled = 1; - /* - * VT-d has a different layout for IO-APIC entries when - * interrupt remapping is enabled. So it needs a special routine - * to print IO-APIC entries for debugging purposes too. - */ - x86_io_apic_ops.print_entries = intel_ir_io_apic_print_entries; - pr_info("Enabled IRQ remapping in %s mode\n", eim ? "x2apic" : "xapic"); return eim ? IRQ_REMAP_X2APIC_MODE : IRQ_REMAP_XAPIC_MODE; error: - for_each_iommu(iommu, drhd) - if (ecap_ir_support(iommu->ecap)) { - iommu_disable_irq_remapping(iommu); - intel_teardown_irq_remapping(iommu); - } - - if (x2apic_present) - pr_warn("Failed to enable irq remapping. You are vulnerable to irq-injection attacks.\n"); - + intel_cleanup_irq_remapping(); return -1; } @@ -941,8 +902,7 @@ error: return -1; } -static void prepare_irte(struct irte *irte, int vector, - unsigned int dest) +static void prepare_irte(struct irte *irte, int vector, unsigned int dest) { memset(irte, 0, sizeof(*irte)); @@ -962,76 +922,64 @@ static void prepare_irte(struct irte *irte, int vector, irte->redir_hint = 1; } -static int intel_setup_ioapic_entry(int irq, - struct IO_APIC_route_entry *route_entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) +static struct irq_domain *intel_get_ir_irq_domain(struct irq_alloc_info *info) { - int ioapic_id = mpc_ioapic_id(attr->ioapic); - struct intel_iommu *iommu; - struct IR_IO_APIC_route_entry *entry; - struct irte irte; - int index; - - down_read(&dmar_global_lock); - iommu = map_ioapic_to_ir(ioapic_id); - if (!iommu) { - pr_warn("No mapping iommu for ioapic %d\n", ioapic_id); - index = -ENODEV; - } else { - index = alloc_irte(iommu, irq, 1); - if (index < 0) { - pr_warn("Failed to allocate IRTE for ioapic %d\n", - ioapic_id); - index = -ENOMEM; - } - } - up_read(&dmar_global_lock); - if (index < 0) - return index; - - prepare_irte(&irte, vector, destination); + struct intel_iommu *iommu = NULL; - /* Set source-id of interrupt request */ - set_ioapic_sid(&irte, ioapic_id); + if (!info) + return NULL; - modify_irte(irq, &irte); + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + iommu = map_ioapic_to_ir(info->ioapic_id); + break; + case X86_IRQ_ALLOC_TYPE_HPET: + iommu = map_hpet_to_ir(info->hpet_id); + break; + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + iommu = map_dev_to_ir(info->msi_dev); + break; + default: + BUG_ON(1); + break; + } - apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: " - "Set IRTE entry (P:%d FPD:%d Dst_Mode:%d " - "Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X " - "Avail:%X Vector:%02X Dest:%08X " - "SID:%04X SQ:%X SVT:%X)\n", - attr->ioapic, irte.present, irte.fpd, irte.dst_mode, - irte.redir_hint, irte.trigger_mode, irte.dlvry_mode, - irte.avail, irte.vector, irte.dest_id, - irte.sid, irte.sq, irte.svt); + return iommu ? iommu->ir_domain : NULL; +} - entry = (struct IR_IO_APIC_route_entry *)route_entry; - memset(entry, 0, sizeof(*entry)); +static struct irq_domain *intel_get_irq_domain(struct irq_alloc_info *info) +{ + struct intel_iommu *iommu; - entry->index2 = (index >> 15) & 0x1; - entry->zero = 0; - entry->format = 1; - entry->index = (index & 0x7fff); - /* - * IO-APIC RTE will be configured with virtual vector. - * irq handler will do the explicit EOI to the io-apic. - */ - entry->vector = attr->ioapic_pin; - entry->mask = 0; /* enable IRQ */ - entry->trigger = attr->trigger; - entry->polarity = attr->polarity; + if (!info) + return NULL; - /* Mask level triggered irqs. - * Use IRQ_DELAYED_DISABLE for edge triggered irqs. - */ - if (attr->trigger) - entry->mask = 1; + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + iommu = map_dev_to_ir(info->msi_dev); + if (iommu) + return iommu->ir_msi_domain; + break; + default: + break; + } - return 0; + return NULL; } +struct irq_remap_ops intel_irq_remap_ops = { + .supported = intel_irq_remapping_supported, + .prepare = intel_prepare_irq_remapping, + .enable = intel_enable_irq_remapping, + .disable = disable_irq_remapping, + .reenable = reenable_irq_remapping, + .enable_faulting = enable_drhd_fault_handling, + .get_ir_irq_domain = intel_get_ir_irq_domain, + .get_irq_domain = intel_get_irq_domain, +}; + /* * Migrate the IO-APIC irq in the presence of intr-remapping. * @@ -1047,171 +995,236 @@ static int intel_setup_ioapic_entry(int irq, * is used to migrate MSI irq's in the presence of interrupt-remapping. */ static int -intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask, - bool force) +intel_ir_set_affinity(struct irq_data *data, const struct cpumask *mask, + bool force) { - struct irq_cfg *cfg = data->chip_data; - unsigned int dest, irq = data->irq; - struct irte irte; - int err; - - if (!config_enabled(CONFIG_SMP)) - return -EINVAL; - - if (!cpumask_intersects(mask, cpu_online_mask)) - return -EINVAL; - - if (get_irte(irq, &irte)) - return -EBUSY; - - err = assign_irq_vector(irq, cfg, mask); - if (err) - return err; - - err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest); - if (err) { - if (assign_irq_vector(irq, cfg, data->affinity)) - pr_err("Failed to recover vector for irq %d\n", irq); - return err; - } + struct intel_ir_data *ir_data = data->chip_data; + struct irte *irte = &ir_data->irte_entry; + struct irq_cfg *cfg = irqd_cfg(data); + struct irq_data *parent = data->parent_data; + int ret; - irte.vector = cfg->vector; - irte.dest_id = IRTE_DEST(dest); + ret = parent->chip->irq_set_affinity(parent, mask, force); + if (ret < 0 || ret == IRQ_SET_MASK_OK_DONE) + return ret; /* * Atomically updates the IRTE with the new destination, vector * and flushes the interrupt entry cache. */ - modify_irte(irq, &irte); + irte->vector = cfg->vector; + irte->dest_id = IRTE_DEST(cfg->dest_apicid); + modify_irte(&ir_data->irq_2_iommu, irte); /* * After this point, all the interrupts will start arriving * at the new destination. So, time to cleanup the previous * vector allocation. */ - if (cfg->move_in_progress) - send_cleanup_vector(cfg); + send_cleanup_vector(cfg); - cpumask_copy(data->affinity, mask); - return 0; + return IRQ_SET_MASK_OK_DONE; } -static void intel_compose_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) +static void intel_ir_compose_msi_msg(struct irq_data *irq_data, + struct msi_msg *msg) { - struct irq_cfg *cfg; - struct irte irte; - u16 sub_handle = 0; - int ir_index; - - cfg = irq_get_chip_data(irq); + struct intel_ir_data *ir_data = irq_data->chip_data; - ir_index = map_irq_to_irte_handle(irq, &sub_handle); - BUG_ON(ir_index == -1); - - prepare_irte(&irte, cfg->vector, dest); - - /* Set source-id of interrupt request */ - if (pdev) - set_msi_sid(&irte, pdev); - else - set_hpet_sid(&irte, hpet_id); + *msg = ir_data->msi_entry; +} - modify_irte(irq, &irte); +static struct irq_chip intel_ir_chip = { + .irq_ack = ir_ack_apic_edge, + .irq_set_affinity = intel_ir_set_affinity, + .irq_compose_msi_msg = intel_ir_compose_msi_msg, +}; - msg->address_hi = MSI_ADDR_BASE_HI; - msg->data = sub_handle; - msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | - MSI_ADDR_IR_SHV | - MSI_ADDR_IR_INDEX1(ir_index) | - MSI_ADDR_IR_INDEX2(ir_index); +static void intel_irq_remapping_prepare_irte(struct intel_ir_data *data, + struct irq_cfg *irq_cfg, + struct irq_alloc_info *info, + int index, int sub_handle) +{ + struct IR_IO_APIC_route_entry *entry; + struct irte *irte = &data->irte_entry; + struct msi_msg *msg = &data->msi_entry; + + prepare_irte(irte, irq_cfg->vector, irq_cfg->dest_apicid); + switch (info->type) { + case X86_IRQ_ALLOC_TYPE_IOAPIC: + /* Set source-id of interrupt request */ + set_ioapic_sid(irte, info->ioapic_id); + apic_printk(APIC_VERBOSE, KERN_DEBUG "IOAPIC[%d]: Set IRTE entry (P:%d FPD:%d Dst_Mode:%d Redir_hint:%d Trig_Mode:%d Dlvry_Mode:%X Avail:%X Vector:%02X Dest:%08X SID:%04X SQ:%X SVT:%X)\n", + info->ioapic_id, irte->present, irte->fpd, + irte->dst_mode, irte->redir_hint, + irte->trigger_mode, irte->dlvry_mode, + irte->avail, irte->vector, irte->dest_id, + irte->sid, irte->sq, irte->svt); + + entry = (struct IR_IO_APIC_route_entry *)info->ioapic_entry; + info->ioapic_entry = NULL; + memset(entry, 0, sizeof(*entry)); + entry->index2 = (index >> 15) & 0x1; + entry->zero = 0; + entry->format = 1; + entry->index = (index & 0x7fff); + /* + * IO-APIC RTE will be configured with virtual vector. + * irq handler will do the explicit EOI to the io-apic. + */ + entry->vector = info->ioapic_pin; + entry->mask = 0; /* enable IRQ */ + entry->trigger = info->ioapic_trigger; + entry->polarity = info->ioapic_polarity; + if (info->ioapic_trigger) + entry->mask = 1; /* Mask level triggered irqs. */ + break; + + case X86_IRQ_ALLOC_TYPE_HPET: + case X86_IRQ_ALLOC_TYPE_MSI: + case X86_IRQ_ALLOC_TYPE_MSIX: + if (info->type == X86_IRQ_ALLOC_TYPE_HPET) + set_hpet_sid(irte, info->hpet_id); + else + set_msi_sid(irte, info->msi_dev); + + msg->address_hi = MSI_ADDR_BASE_HI; + msg->data = sub_handle; + msg->address_lo = MSI_ADDR_BASE_LO | MSI_ADDR_IR_EXT_INT | + MSI_ADDR_IR_SHV | + MSI_ADDR_IR_INDEX1(index) | + MSI_ADDR_IR_INDEX2(index); + break; + + default: + BUG_ON(1); + break; + } } -/* - * Map the PCI dev to the corresponding remapping hardware unit - * and allocate 'nvec' consecutive interrupt-remapping table entries - * in it. - */ -static int intel_msi_alloc_irq(struct pci_dev *dev, int irq, int nvec) +static void intel_free_irq_resources(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { - struct intel_iommu *iommu; - int index; + struct irq_data *irq_data; + struct intel_ir_data *data; + struct irq_2_iommu *irq_iommu; + unsigned long flags; + int i; - down_read(&dmar_global_lock); - iommu = map_dev_to_ir(dev); - if (!iommu) { - printk(KERN_ERR - "Unable to map PCI %s to iommu\n", pci_name(dev)); - index = -ENOENT; - } else { - index = alloc_irte(iommu, irq, nvec); - if (index < 0) { - printk(KERN_ERR - "Unable to allocate %d IRTE for PCI %s\n", - nvec, pci_name(dev)); - index = -ENOSPC; + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + if (irq_data && irq_data->chip_data) { + data = irq_data->chip_data; + irq_iommu = &data->irq_2_iommu; + raw_spin_lock_irqsave(&irq_2_ir_lock, flags); + clear_entries(irq_iommu); + raw_spin_unlock_irqrestore(&irq_2_ir_lock, flags); + irq_domain_reset_irq_data(irq_data); + kfree(data); } } - up_read(&dmar_global_lock); - - return index; } -static int intel_msi_setup_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle) +static int intel_irq_remapping_alloc(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs, + void *arg) { - struct intel_iommu *iommu; - int ret = -ENOENT; + struct intel_iommu *iommu = domain->host_data; + struct irq_alloc_info *info = arg; + struct intel_ir_data *data; + struct irq_data *irq_data; + struct irq_cfg *irq_cfg; + int i, ret, index; + + if (!info || !iommu) + return -EINVAL; + if (nr_irqs > 1 && info->type != X86_IRQ_ALLOC_TYPE_MSI && + info->type != X86_IRQ_ALLOC_TYPE_MSIX) + return -EINVAL; + + /* + * With IRQ remapping enabled, don't need contigious CPU vectors + * to support multiple MSI interrupts. + */ + if (info->type == X86_IRQ_ALLOC_TYPE_MSI) + info->flags &= ~X86_IRQ_ALLOC_CONTIGOUS_VECTORS; + + ret = irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, arg); + if (ret < 0) + return ret; + + ret = -ENOMEM; + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_parent; down_read(&dmar_global_lock); - iommu = map_dev_to_ir(pdev); - if (iommu) { - /* - * setup the mapping between the irq and the IRTE - * base index, the sub_handle pointing to the - * appropriate interrupt remap table entry. - */ - set_irte_irq(irq, iommu, index, sub_handle); - ret = 0; - } + index = alloc_irte(iommu, virq, &data->irq_2_iommu, nr_irqs); up_read(&dmar_global_lock); + if (index < 0) { + pr_warn("Failed to allocate IRTE\n"); + kfree(data); + goto out_free_parent; + } + + for (i = 0; i < nr_irqs; i++) { + irq_data = irq_domain_get_irq_data(domain, virq + i); + irq_cfg = irqd_cfg(irq_data); + if (!irq_data || !irq_cfg) { + ret = -EINVAL; + goto out_free_data; + } + + if (i > 0) { + data = kzalloc(sizeof(*data), GFP_KERNEL); + if (!data) + goto out_free_data; + } + irq_data->hwirq = (index << 16) + i; + irq_data->chip_data = data; + irq_data->chip = &intel_ir_chip; + intel_irq_remapping_prepare_irte(data, irq_cfg, info, index, i); + irq_set_status_flags(virq + i, IRQ_MOVE_PCNTXT); + } + return 0; +out_free_data: + intel_free_irq_resources(domain, virq, i); +out_free_parent: + irq_domain_free_irqs_common(domain, virq, nr_irqs); return ret; } -static int intel_alloc_hpet_msi(unsigned int irq, unsigned int id) +static void intel_irq_remapping_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) { - int ret = -1; - struct intel_iommu *iommu; - int index; + intel_free_irq_resources(domain, virq, nr_irqs); + irq_domain_free_irqs_common(domain, virq, nr_irqs); +} - down_read(&dmar_global_lock); - iommu = map_hpet_to_ir(id); - if (iommu) { - index = alloc_irte(iommu, irq, 1); - if (index >= 0) - ret = 0; - } - up_read(&dmar_global_lock); +static void intel_irq_remapping_activate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct intel_ir_data *data = irq_data->chip_data; - return ret; + modify_irte(&data->irq_2_iommu, &data->irte_entry); } -struct irq_remap_ops intel_irq_remap_ops = { - .supported = intel_irq_remapping_supported, - .prepare = dmar_table_init, - .enable = intel_enable_irq_remapping, - .disable = disable_irq_remapping, - .reenable = reenable_irq_remapping, - .enable_faulting = enable_drhd_fault_handling, - .setup_ioapic_entry = intel_setup_ioapic_entry, - .set_affinity = intel_ioapic_set_affinity, - .free_irq = free_irte, - .compose_msi_msg = intel_compose_msi_msg, - .msi_alloc_irq = intel_msi_alloc_irq, - .msi_setup_irq = intel_msi_setup_irq, - .alloc_hpet_msi = intel_alloc_hpet_msi, +static void intel_irq_remapping_deactivate(struct irq_domain *domain, + struct irq_data *irq_data) +{ + struct intel_ir_data *data = irq_data->chip_data; + struct irte entry; + + memset(&entry, 0, sizeof(entry)); + modify_irte(&data->irq_2_iommu, &entry); +} + +static struct irq_domain_ops intel_ir_domain_ops = { + .alloc = intel_irq_remapping_alloc, + .free = intel_irq_remapping_free, + .activate = intel_irq_remapping_activate, + .deactivate = intel_irq_remapping_deactivate, }; /* diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c index 2c3f5ad01098..66517e7d42f0 100644 --- a/drivers/iommu/irq_remapping.c +++ b/drivers/iommu/irq_remapping.c @@ -6,6 +6,7 @@ #include <linux/msi.h> #include <linux/irq.h> #include <linux/pci.h> +#include <linux/irqdomain.h> #include <asm/hw_irq.h> #include <asm/irq_remapping.h> @@ -25,18 +26,6 @@ int no_x2apic_optout; static struct irq_remap_ops *remap_ops; -static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec); -static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle); -static int set_remapped_irq_affinity(struct irq_data *data, - const struct cpumask *mask, - bool force); - -static bool irq_remapped(struct irq_cfg *cfg) -{ - return (cfg->remapped == 1); -} - static void irq_remapping_disable_io_apic(void) { /* @@ -50,117 +39,9 @@ static void irq_remapping_disable_io_apic(void) disconnect_bsp_APIC(0); } -static int do_setup_msi_irqs(struct pci_dev *dev, int nvec) -{ - int ret, sub_handle, nvec_pow2, index = 0; - unsigned int irq; - struct msi_desc *msidesc; - - msidesc = list_entry(dev->msi_list.next, struct msi_desc, list); - - irq = irq_alloc_hwirqs(nvec, dev_to_node(&dev->dev)); - if (irq == 0) - return -ENOSPC; - - nvec_pow2 = __roundup_pow_of_two(nvec); - for (sub_handle = 0; sub_handle < nvec; sub_handle++) { - if (!sub_handle) { - index = msi_alloc_remapped_irq(dev, irq, nvec_pow2); - if (index < 0) { - ret = index; - goto error; - } - } else { - ret = msi_setup_remapped_irq(dev, irq + sub_handle, - index, sub_handle); - if (ret < 0) - goto error; - } - ret = setup_msi_irq(dev, msidesc, irq, sub_handle); - if (ret < 0) - goto error; - } - return 0; - -error: - irq_free_hwirqs(irq, nvec); - - /* - * Restore altered MSI descriptor fields and prevent just destroyed - * IRQs from tearing down again in default_teardown_msi_irqs() - */ - msidesc->irq = 0; - - return ret; -} - -static int do_setup_msix_irqs(struct pci_dev *dev, int nvec) -{ - int node, ret, sub_handle, index = 0; - struct msi_desc *msidesc; - unsigned int irq; - - node = dev_to_node(&dev->dev); - sub_handle = 0; - - list_for_each_entry(msidesc, &dev->msi_list, list) { - - irq = irq_alloc_hwirq(node); - if (irq == 0) - return -1; - - if (sub_handle == 0) - ret = index = msi_alloc_remapped_irq(dev, irq, nvec); - else - ret = msi_setup_remapped_irq(dev, irq, index, sub_handle); - - if (ret < 0) - goto error; - - ret = setup_msi_irq(dev, msidesc, irq, 0); - if (ret < 0) - goto error; - - sub_handle += 1; - irq += 1; - } - - return 0; - -error: - irq_free_hwirq(irq); - return ret; -} - -static int irq_remapping_setup_msi_irqs(struct pci_dev *dev, - int nvec, int type) -{ - if (type == PCI_CAP_ID_MSI) - return do_setup_msi_irqs(dev, nvec); - else - return do_setup_msix_irqs(dev, nvec); -} - -static void eoi_ioapic_pin_remapped(int apic, int pin, int vector) -{ - /* - * Intr-remapping uses pin number as the virtual vector - * in the RTE. Actual vector is programmed in - * intr-remapping table entry. Hence for the io-apic - * EOI we use the pin number. - */ - io_apic_eoi(apic, pin); -} - static void __init irq_remapping_modify_x86_ops(void) { x86_io_apic_ops.disable = irq_remapping_disable_io_apic; - x86_io_apic_ops.set_affinity = set_remapped_irq_affinity; - x86_io_apic_ops.setup_entry = setup_ioapic_remapped_entry; - x86_io_apic_ops.eoi_ioapic_pin = eoi_ioapic_pin_remapped; - x86_msi.setup_msi_irqs = irq_remapping_setup_msi_irqs; - x86_msi.setup_hpet_msi = setup_hpet_msi_remapped; - x86_msi.compose_msi_msg = compose_remapped_msi_msg; } static __init int setup_nointremap(char *str) @@ -194,16 +75,6 @@ static __init int setup_irqremap(char *str) } early_param("intremap", setup_irqremap); -void __init setup_irq_remapping_ops(void) -{ - remap_ops = &intel_irq_remap_ops; - -#ifdef CONFIG_AMD_IOMMU - if (amd_iommu_irq_ops.prepare() == 0) - remap_ops = &amd_iommu_irq_ops; -#endif -} - void set_irq_remapping_broken(void) { irq_remap_broken = 1; @@ -222,9 +93,14 @@ int irq_remapping_supported(void) int __init irq_remapping_prepare(void) { - if (!remap_ops || !remap_ops->prepare) - return -ENODEV; + remap_ops = &intel_irq_remap_ops; +#ifdef CONFIG_AMD_IOMMU + if (amd_iommu_irq_ops.prepare() == 0) { + remap_ops = &amd_iommu_irq_ops; + return 0; + } +#endif return remap_ops->prepare(); } @@ -274,117 +150,61 @@ int __init irq_remap_enable_fault_handling(void) return remap_ops->enable_faulting(); } -int setup_ioapic_remapped_entry(int irq, - struct IO_APIC_route_entry *entry, - unsigned int destination, int vector, - struct io_apic_irq_attr *attr) -{ - if (!remap_ops || !remap_ops->setup_ioapic_entry) - return -ENODEV; - - return remap_ops->setup_ioapic_entry(irq, entry, destination, - vector, attr); -} - -static int set_remapped_irq_affinity(struct irq_data *data, - const struct cpumask *mask, bool force) -{ - if (!config_enabled(CONFIG_SMP) || !remap_ops || - !remap_ops->set_affinity) - return 0; - - return remap_ops->set_affinity(data, mask, force); -} - -void free_remapped_irq(int irq) -{ - struct irq_cfg *cfg = irq_get_chip_data(irq); - - if (!remap_ops || !remap_ops->free_irq) - return; - - if (irq_remapped(cfg)) - remap_ops->free_irq(irq); -} - -void compose_remapped_msi_msg(struct pci_dev *pdev, - unsigned int irq, unsigned int dest, - struct msi_msg *msg, u8 hpet_id) -{ - struct irq_cfg *cfg = irq_get_chip_data(irq); - - if (!irq_remapped(cfg)) - native_compose_msi_msg(pdev, irq, dest, msg, hpet_id); - else if (remap_ops && remap_ops->compose_msi_msg) - remap_ops->compose_msi_msg(pdev, irq, dest, msg, hpet_id); -} - -static int msi_alloc_remapped_irq(struct pci_dev *pdev, int irq, int nvec) -{ - if (!remap_ops || !remap_ops->msi_alloc_irq) - return -ENODEV; - - return remap_ops->msi_alloc_irq(pdev, irq, nvec); -} - -static int msi_setup_remapped_irq(struct pci_dev *pdev, unsigned int irq, - int index, int sub_handle) -{ - if (!remap_ops || !remap_ops->msi_setup_irq) - return -ENODEV; - - return remap_ops->msi_setup_irq(pdev, irq, index, sub_handle); -} - -int setup_hpet_msi_remapped(unsigned int irq, unsigned int id) -{ - int ret; - - if (!remap_ops || !remap_ops->alloc_hpet_msi) - return -ENODEV; - - ret = remap_ops->alloc_hpet_msi(irq, id); - if (ret) - return -EINVAL; - - return default_setup_hpet_msi(irq, id); -} - void panic_if_irq_remap(const char *msg) { if (irq_remapping_enabled) panic(msg); } -static void ir_ack_apic_edge(struct irq_data *data) +void ir_ack_apic_edge(struct irq_data *data) { ack_APIC_irq(); } -static void ir_ack_apic_level(struct irq_data *data) +void irq_remapping_print_chip(struct irq_data *data, struct seq_file *p) { - ack_APIC_irq(); - eoi_ioapic_irq(data->irq, data->chip_data); -} - -static void ir_print_prefix(struct irq_data *data, struct seq_file *p) -{ - seq_printf(p, " IR-%s", data->chip->name); -} - -void irq_remap_modify_chip_defaults(struct irq_chip *chip) -{ - chip->irq_print_chip = ir_print_prefix; - chip->irq_ack = ir_ack_apic_edge; - chip->irq_eoi = ir_ack_apic_level; - chip->irq_set_affinity = x86_io_apic_ops.set_affinity; -} - -bool setup_remapped_irq(int irq, struct irq_cfg *cfg, struct irq_chip *chip) -{ - if (!irq_remapped(cfg)) - return false; - irq_set_status_flags(irq, IRQ_MOVE_PCNTXT); - irq_remap_modify_chip_defaults(chip); - return true; + /* + * Assume interrupt is remapped if the parent irqdomain isn't the + * vector domain, which is true for MSI, HPET and IOAPIC on x86 + * platforms. + */ + if (data->domain && data->domain->parent != arch_get_ir_parent_domain()) + seq_printf(p, " IR-%s", data->chip->name); + else + seq_printf(p, " %s", data->chip->name); +} + +/** + * irq_remapping_get_ir_irq_domain - Get the irqdomain associated with the IOMMU + * device serving request @info + * @info: interrupt allocation information, used to identify the IOMMU device + * + * It's used to get parent irqdomain for HPET and IOAPIC irqdomains. + * Returns pointer to IRQ domain, or NULL on failure. + */ +struct irq_domain * +irq_remapping_get_ir_irq_domain(struct irq_alloc_info *info) +{ + if (!remap_ops || !remap_ops->get_ir_irq_domain) + return NULL; + + return remap_ops->get_ir_irq_domain(info); +} + +/** + * irq_remapping_get_irq_domain - Get the irqdomain serving the request @info + * @info: interrupt allocation information, used to identify the IOMMU device + * + * There will be one PCI MSI/MSIX irqdomain associated with each interrupt + * remapping device, so this interface is used to retrieve the PCI MSI/MSIX + * irqdomain serving request @info. + * Returns pointer to IRQ domain, or NULL on failure. + */ +struct irq_domain * +irq_remapping_get_irq_domain(struct irq_alloc_info *info) +{ + if (!remap_ops || !remap_ops->get_irq_domain) + return NULL; + + return remap_ops->get_irq_domain(info); } diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h index fde250f86e60..4bd791d72e5e 100644 --- a/drivers/iommu/irq_remapping.h +++ b/drivers/iommu/irq_remapping.h @@ -24,12 +24,10 @@ #ifdef CONFIG_IRQ_REMAP -struct IO_APIC_route_entry; -struct io_apic_irq_attr; struct irq_data; -struct cpumask; -struct pci_dev; struct msi_msg; +struct irq_domain; +struct irq_alloc_info; extern int disable_irq_remap; extern int irq_remap_broken; @@ -56,36 +54,18 @@ struct irq_remap_ops { /* Enable fault handling */ int (*enable_faulting)(void); - /* IO-APIC setup routine */ - int (*setup_ioapic_entry)(int irq, struct IO_APIC_route_entry *, - unsigned int, int, - struct io_apic_irq_attr *); + /* Get the irqdomain associated the IOMMU device */ + struct irq_domain *(*get_ir_irq_domain)(struct irq_alloc_info *); - /* Set the CPU affinity of a remapped interrupt */ - int (*set_affinity)(struct irq_data *data, const struct cpumask *mask, - bool force); - - /* Free an IRQ */ - int (*free_irq)(int); - - /* Create MSI msg to use for interrupt remapping */ - void (*compose_msi_msg)(struct pci_dev *, - unsigned int, unsigned int, - struct msi_msg *, u8); - - /* Allocate remapping resources for MSI */ - int (*msi_alloc_irq)(struct pci_dev *, int, int); - - /* Setup the remapped MSI irq */ - int (*msi_setup_irq)(struct pci_dev *, unsigned int, int, int); - - /* Setup interrupt remapping for an HPET MSI */ - int (*alloc_hpet_msi)(unsigned int, unsigned int); + /* Get the MSI irqdomain associated with the IOMMU device */ + struct irq_domain *(*get_irq_domain)(struct irq_alloc_info *); }; extern struct irq_remap_ops intel_irq_remap_ops; extern struct irq_remap_ops amd_iommu_irq_ops; +extern void ir_ack_apic_edge(struct irq_data *data); + #else /* CONFIG_IRQ_REMAP */ #define irq_remapping_enabled 0 diff --git a/drivers/irqchip/Kconfig b/drivers/irqchip/Kconfig index e12cb23d786c..cc79d2a5a8c2 100644 --- a/drivers/irqchip/Kconfig +++ b/drivers/irqchip/Kconfig @@ -5,8 +5,15 @@ config IRQCHIP config ARM_GIC bool select IRQ_DOMAIN + select IRQ_DOMAIN_HIERARCHY select MULTI_IRQ_HANDLER +config ARM_GIC_V2M + bool + depends on ARM_GIC + depends on PCI && PCI_MSI + select PCI_MSI_IRQ_DOMAIN + config GIC_NON_BANKED bool @@ -14,6 +21,11 @@ config ARM_GIC_V3 bool select IRQ_DOMAIN select MULTI_IRQ_HANDLER + select IRQ_DOMAIN_HIERARCHY + +config ARM_GIC_V3_ITS + bool + select PCI_MSI_IRQ_DOMAIN config ARM_NVIC bool diff --git a/drivers/irqchip/Makefile b/drivers/irqchip/Makefile index 4954a314c31e..9516a324be6d 100644 --- a/drivers/irqchip/Makefile +++ b/drivers/irqchip/Makefile @@ -19,7 +19,9 @@ obj-$(CONFIG_ARCH_SUNXI) += irq-sun4i.o obj-$(CONFIG_ARCH_SUNXI) += irq-sunxi-nmi.o obj-$(CONFIG_ARCH_SPEAR3XX) += spear-shirq.o obj-$(CONFIG_ARM_GIC) += irq-gic.o irq-gic-common.o +obj-$(CONFIG_ARM_GIC_V2M) += irq-gic-v2m.o obj-$(CONFIG_ARM_GIC_V3) += irq-gic-v3.o irq-gic-common.o +obj-$(CONFIG_ARM_GIC_V3_ITS) += irq-gic-v3-its.o obj-$(CONFIG_ARM_NVIC) += irq-nvic.o obj-$(CONFIG_ARM_VIC) += irq-vic.o obj-$(CONFIG_ATMEL_AIC_IRQ) += irq-atmel-aic-common.o irq-atmel-aic.o @@ -39,3 +41,4 @@ obj-$(CONFIG_BCM7120_L2_IRQ) += irq-bcm7120-l2.o obj-$(CONFIG_BRCMSTB_L2_IRQ) += irq-brcmstb-l2.o obj-$(CONFIG_KEYSTONE_IRQ) += irq-keystone.o obj-$(CONFIG_MIPS_GIC) += irq-mips-gic.o +obj-$(CONFIG_ARCH_MEDIATEK) += irq-mtk-sysirq.o diff --git a/drivers/irqchip/irq-gic-v2m.c b/drivers/irqchip/irq-gic-v2m.c new file mode 100644 index 000000000000..fdf706555d72 --- /dev/null +++ b/drivers/irqchip/irq-gic-v2m.c @@ -0,0 +1,333 @@ +/* + * ARM GIC v2m MSI(-X) support + * Support for Message Signaled Interrupts for systems that + * implement ARM Generic Interrupt Controller: GICv2m. + * + * Copyright (C) 2014 Advanced Micro Devices, Inc. + * Authors: Suravee Suthikulpanit <suravee.suthikulpanit@amd.com> + * Harish Kasiviswanathan <harish.kasiviswanathan@amd.com> + * Brandon Anderson <brandon.anderson@amd.com> + * + * This program is free software; you can redistribute it and/or modify it + * under the terms of the GNU General Public License version 2 as published + * by the Free Software Foundation. + */ + +#define pr_fmt(fmt) "GICv2m: " fmt + +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/kernel.h> +#include <linux/of_address.h> +#include <linux/of_pci.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +/* +* MSI_TYPER: +* [31:26] Reserved +* [25:16] lowest SPI assigned to MSI +* [15:10] Reserved +* [9:0] Numer of SPIs assigned to MSI +*/ +#define V2M_MSI_TYPER 0x008 +#define V2M_MSI_TYPER_BASE_SHIFT 16 +#define V2M_MSI_TYPER_BASE_MASK 0x3FF +#define V2M_MSI_TYPER_NUM_MASK 0x3FF +#define V2M_MSI_SETSPI_NS 0x040 +#define V2M_MIN_SPI 32 +#define V2M_MAX_SPI 1019 + +#define V2M_MSI_TYPER_BASE_SPI(x) \ + (((x) >> V2M_MSI_TYPER_BASE_SHIFT) & V2M_MSI_TYPER_BASE_MASK) + +#define V2M_MSI_TYPER_NUM_SPI(x) ((x) & V2M_MSI_TYPER_NUM_MASK) + +struct v2m_data { + spinlock_t msi_cnt_lock; + struct msi_controller mchip; + struct resource res; /* GICv2m resource */ + void __iomem *base; /* GICv2m virt address */ + u32 spi_start; /* The SPI number that MSIs start */ + u32 nr_spis; /* The number of SPIs for MSIs */ + unsigned long *bm; /* MSI vector bitmap */ + struct irq_domain *domain; +}; + +static void gicv2m_mask_msi_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void gicv2m_unmask_msi_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip gicv2m_msi_irq_chip = { + .name = "MSI", + .irq_mask = gicv2m_mask_msi_irq, + .irq_unmask = gicv2m_unmask_msi_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_write_msi_msg = pci_msi_domain_write_msg, +}; + +static struct msi_domain_info gicv2m_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_PCI_MSIX), + .chip = &gicv2m_msi_irq_chip, +}; + +static int gicv2m_set_affinity(struct irq_data *irq_data, + const struct cpumask *mask, bool force) +{ + int ret; + + ret = irq_chip_set_affinity_parent(irq_data, mask, force); + if (ret == IRQ_SET_MASK_OK) + ret = IRQ_SET_MASK_OK_DONE; + + return ret; +} + +static void gicv2m_compose_msi_msg(struct irq_data *data, struct msi_msg *msg) +{ + struct v2m_data *v2m = irq_data_get_irq_chip_data(data); + phys_addr_t addr = v2m->res.start + V2M_MSI_SETSPI_NS; + + msg->address_hi = (u32) (addr >> 32); + msg->address_lo = (u32) (addr); + msg->data = data->hwirq; +} + +static struct irq_chip gicv2m_irq_chip = { + .name = "GICv2m", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_affinity = gicv2m_set_affinity, + .irq_compose_msi_msg = gicv2m_compose_msi_msg, +}; + +static int gicv2m_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct of_phandle_args args; + struct irq_data *d; + int err; + + args.np = domain->parent->of_node; + args.args_count = 3; + args.args[0] = 0; + args.args[1] = hwirq - 32; + args.args[2] = IRQ_TYPE_EDGE_RISING; + + err = irq_domain_alloc_irqs_parent(domain, virq, 1, &args); + if (err) + return err; + + /* Configure the interrupt line to be edge */ + d = irq_domain_get_irq_data(domain->parent, virq); + d->chip->irq_set_type(d, IRQ_TYPE_EDGE_RISING); + return 0; +} + +static void gicv2m_unalloc_msi(struct v2m_data *v2m, unsigned int hwirq) +{ + int pos; + + pos = hwirq - v2m->spi_start; + if (pos < 0 || pos >= v2m->nr_spis) { + pr_err("Failed to teardown msi. Invalid hwirq %d\n", hwirq); + return; + } + + spin_lock(&v2m->msi_cnt_lock); + __clear_bit(pos, v2m->bm); + spin_unlock(&v2m->msi_cnt_lock); +} + +static int gicv2m_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + struct v2m_data *v2m = domain->host_data; + int hwirq, offset, err = 0; + + spin_lock(&v2m->msi_cnt_lock); + offset = find_first_zero_bit(v2m->bm, v2m->nr_spis); + if (offset < v2m->nr_spis) + __set_bit(offset, v2m->bm); + else + err = -ENOSPC; + spin_unlock(&v2m->msi_cnt_lock); + + if (err) + return err; + + hwirq = v2m->spi_start + offset; + + err = gicv2m_irq_gic_domain_alloc(domain, virq, hwirq); + if (err) { + gicv2m_unalloc_msi(v2m, hwirq); + return err; + } + + irq_domain_set_hwirq_and_chip(domain, virq, hwirq, + &gicv2m_irq_chip, v2m); + + return 0; +} + +static void gicv2m_irq_domain_free(struct irq_domain *domain, + unsigned int virq, unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct v2m_data *v2m = irq_data_get_irq_chip_data(d); + + BUG_ON(nr_irqs != 1); + gicv2m_unalloc_msi(v2m, d->hwirq); + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops gicv2m_domain_ops = { + .alloc = gicv2m_irq_domain_alloc, + .free = gicv2m_irq_domain_free, +}; + +static bool is_msi_spi_valid(u32 base, u32 num) +{ + if (base < V2M_MIN_SPI) { + pr_err("Invalid MSI base SPI (base:%u)\n", base); + return false; + } + + if ((num == 0) || (base + num > V2M_MAX_SPI)) { + pr_err("Number of SPIs (%u) exceed maximum (%u)\n", + num, V2M_MAX_SPI - V2M_MIN_SPI + 1); + return false; + } + + return true; +} + +static int __init gicv2m_init_one(struct device_node *node, + struct irq_domain *parent) +{ + int ret; + struct v2m_data *v2m; + + v2m = kzalloc(sizeof(struct v2m_data), GFP_KERNEL); + if (!v2m) { + pr_err("Failed to allocate struct v2m_data.\n"); + return -ENOMEM; + } + + ret = of_address_to_resource(node, 0, &v2m->res); + if (ret) { + pr_err("Failed to allocate v2m resource.\n"); + goto err_free_v2m; + } + + v2m->base = ioremap(v2m->res.start, resource_size(&v2m->res)); + if (!v2m->base) { + pr_err("Failed to map GICv2m resource\n"); + ret = -ENOMEM; + goto err_free_v2m; + } + + if (!of_property_read_u32(node, "arm,msi-base-spi", &v2m->spi_start) && + !of_property_read_u32(node, "arm,msi-num-spis", &v2m->nr_spis)) { + pr_info("Overriding V2M MSI_TYPER (base:%u, num:%u)\n", + v2m->spi_start, v2m->nr_spis); + } else { + u32 typer = readl_relaxed(v2m->base + V2M_MSI_TYPER); + + v2m->spi_start = V2M_MSI_TYPER_BASE_SPI(typer); + v2m->nr_spis = V2M_MSI_TYPER_NUM_SPI(typer); + } + + if (!is_msi_spi_valid(v2m->spi_start, v2m->nr_spis)) { + ret = -EINVAL; + goto err_iounmap; + } + + v2m->bm = kzalloc(sizeof(long) * BITS_TO_LONGS(v2m->nr_spis), + GFP_KERNEL); + if (!v2m->bm) { + ret = -ENOMEM; + goto err_iounmap; + } + + v2m->domain = irq_domain_add_tree(NULL, &gicv2m_domain_ops, v2m); + if (!v2m->domain) { + pr_err("Failed to create GICv2m domain\n"); + ret = -ENOMEM; + goto err_free_bm; + } + + v2m->domain->parent = parent; + v2m->mchip.of_node = node; + v2m->mchip.domain = pci_msi_create_irq_domain(node, + &gicv2m_msi_domain_info, + v2m->domain); + if (!v2m->mchip.domain) { + pr_err("Failed to create MSI domain\n"); + ret = -ENOMEM; + goto err_free_domains; + } + + spin_lock_init(&v2m->msi_cnt_lock); + + ret = of_pci_msi_chip_add(&v2m->mchip); + if (ret) { + pr_err("Failed to add msi_chip.\n"); + goto err_free_domains; + } + + pr_info("Node %s: range[%#lx:%#lx], SPI[%d:%d]\n", node->name, + (unsigned long)v2m->res.start, (unsigned long)v2m->res.end, + v2m->spi_start, (v2m->spi_start + v2m->nr_spis)); + + return 0; + +err_free_domains: + if (v2m->mchip.domain) + irq_domain_remove(v2m->mchip.domain); + if (v2m->domain) + irq_domain_remove(v2m->domain); +err_free_bm: + kfree(v2m->bm); +err_iounmap: + iounmap(v2m->base); +err_free_v2m: + kfree(v2m); + return ret; +} + +static struct of_device_id gicv2m_device_id[] = { + { .compatible = "arm,gic-v2m-frame", }, + {}, +}; + +int __init gicv2m_of_init(struct device_node *node, struct irq_domain *parent) +{ + int ret = 0; + struct device_node *child; + + for (child = of_find_matching_node(node, gicv2m_device_id); child; + child = of_find_matching_node(child, gicv2m_device_id)) { + if (!of_find_property(child, "msi-controller", NULL)) + continue; + + ret = gicv2m_init_one(child, parent); + if (ret) { + of_node_put(node); + break; + } + } + + return ret; +} diff --git a/drivers/irqchip/irq-gic-v3-its.c b/drivers/irqchip/irq-gic-v3-its.c new file mode 100644 index 000000000000..e9d16151eed6 --- /dev/null +++ b/drivers/irqchip/irq-gic-v3-its.c @@ -0,0 +1,1402 @@ +/* + * Copyright (C) 2013, 2014 ARM Limited, All Rights Reserved. + * Author: Marc Zyngier <marc.zyngier@arm.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program. If not, see <http://www.gnu.org/licenses/>. + */ + +#include <linux/bitmap.h> +#include <linux/cpu.h> +#include <linux/delay.h> +#include <linux/interrupt.h> +#include <linux/log2.h> +#include <linux/mm.h> +#include <linux/msi.h> +#include <linux/of.h> +#include <linux/of_address.h> +#include <linux/of_irq.h> +#include <linux/of_pci.h> +#include <linux/of_platform.h> +#include <linux/percpu.h> +#include <linux/slab.h> + +#include <linux/irqchip/arm-gic-v3.h> + +#include <asm/cacheflush.h> +#include <asm/cputype.h> +#include <asm/exception.h> + +#include "irqchip.h" + +#define ITS_FLAGS_CMDQ_NEEDS_FLUSHING (1 << 0) + +#define RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING (1 << 0) + +/* + * Collection structure - just an ID, and a redistributor address to + * ping. We use one per CPU as a bag of interrupts assigned to this + * CPU. + */ +struct its_collection { + u64 target_address; + u16 col_id; +}; + +/* + * The ITS structure - contains most of the infrastructure, with the + * msi_controller, the command queue, the collections, and the list of + * devices writing to it. + */ +struct its_node { + raw_spinlock_t lock; + struct list_head entry; + struct msi_controller msi_chip; + struct irq_domain *domain; + void __iomem *base; + unsigned long phys_base; + struct its_cmd_block *cmd_base; + struct its_cmd_block *cmd_write; + void *tables[GITS_BASER_NR_REGS]; + struct its_collection *collections; + struct list_head its_device_list; + u64 flags; + u32 ite_size; +}; + +#define ITS_ITT_ALIGN SZ_256 + +/* + * The ITS view of a device - belongs to an ITS, a collection, owns an + * interrupt translation table, and a list of interrupts. + */ +struct its_device { + struct list_head entry; + struct its_node *its; + struct its_collection *collection; + void *itt; + unsigned long *lpi_map; + irq_hw_number_t lpi_base; + int nr_lpis; + u32 nr_ites; + u32 device_id; +}; + +static LIST_HEAD(its_nodes); +static DEFINE_SPINLOCK(its_lock); +static struct device_node *gic_root_node; +static struct rdists *gic_rdists; + +#define gic_data_rdist() (raw_cpu_ptr(gic_rdists->rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) + +/* + * ITS command descriptors - parameters to be encoded in a command + * block. + */ +struct its_cmd_desc { + union { + struct { + struct its_device *dev; + u32 event_id; + } its_inv_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_int_cmd; + + struct { + struct its_device *dev; + int valid; + } its_mapd_cmd; + + struct { + struct its_collection *col; + int valid; + } its_mapc_cmd; + + struct { + struct its_device *dev; + u32 phys_id; + u32 event_id; + } its_mapvi_cmd; + + struct { + struct its_device *dev; + struct its_collection *col; + u32 id; + } its_movi_cmd; + + struct { + struct its_device *dev; + u32 event_id; + } its_discard_cmd; + + struct { + struct its_collection *col; + } its_invall_cmd; + }; +}; + +/* + * The ITS command block, which is what the ITS actually parses. + */ +struct its_cmd_block { + u64 raw_cmd[4]; +}; + +#define ITS_CMD_QUEUE_SZ SZ_64K +#define ITS_CMD_QUEUE_NR_ENTRIES (ITS_CMD_QUEUE_SZ / sizeof(struct its_cmd_block)) + +typedef struct its_collection *(*its_cmd_builder_t)(struct its_cmd_block *, + struct its_cmd_desc *); + +static void its_encode_cmd(struct its_cmd_block *cmd, u8 cmd_nr) +{ + cmd->raw_cmd[0] &= ~0xffUL; + cmd->raw_cmd[0] |= cmd_nr; +} + +static void its_encode_devid(struct its_cmd_block *cmd, u32 devid) +{ + cmd->raw_cmd[0] &= ~(0xffffUL << 32); + cmd->raw_cmd[0] |= ((u64)devid) << 32; +} + +static void its_encode_event_id(struct its_cmd_block *cmd, u32 id) +{ + cmd->raw_cmd[1] &= ~0xffffffffUL; + cmd->raw_cmd[1] |= id; +} + +static void its_encode_phys_id(struct its_cmd_block *cmd, u32 phys_id) +{ + cmd->raw_cmd[1] &= 0xffffffffUL; + cmd->raw_cmd[1] |= ((u64)phys_id) << 32; +} + +static void its_encode_size(struct its_cmd_block *cmd, u8 size) +{ + cmd->raw_cmd[1] &= ~0x1fUL; + cmd->raw_cmd[1] |= size & 0x1f; +} + +static void its_encode_itt(struct its_cmd_block *cmd, u64 itt_addr) +{ + cmd->raw_cmd[2] &= ~0xffffffffffffUL; + cmd->raw_cmd[2] |= itt_addr & 0xffffffffff00UL; +} + +static void its_encode_valid(struct its_cmd_block *cmd, int valid) +{ + cmd->raw_cmd[2] &= ~(1UL << 63); + cmd->raw_cmd[2] |= ((u64)!!valid) << 63; +} + +static void its_encode_target(struct its_cmd_block *cmd, u64 target_addr) +{ + cmd->raw_cmd[2] &= ~(0xffffffffUL << 16); + cmd->raw_cmd[2] |= (target_addr & (0xffffffffUL << 16)); +} + +static void its_encode_collection(struct its_cmd_block *cmd, u16 col) +{ + cmd->raw_cmd[2] &= ~0xffffUL; + cmd->raw_cmd[2] |= col; +} + +static inline void its_fixup_cmd(struct its_cmd_block *cmd) +{ + /* Let's fixup BE commands */ + cmd->raw_cmd[0] = cpu_to_le64(cmd->raw_cmd[0]); + cmd->raw_cmd[1] = cpu_to_le64(cmd->raw_cmd[1]); + cmd->raw_cmd[2] = cpu_to_le64(cmd->raw_cmd[2]); + cmd->raw_cmd[3] = cpu_to_le64(cmd->raw_cmd[3]); +} + +static struct its_collection *its_build_mapd_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + unsigned long itt_addr; + u8 size = order_base_2(desc->its_mapd_cmd.dev->nr_ites); + + itt_addr = virt_to_phys(desc->its_mapd_cmd.dev->itt); + itt_addr = ALIGN(itt_addr, ITS_ITT_ALIGN); + + its_encode_cmd(cmd, GITS_CMD_MAPD); + its_encode_devid(cmd, desc->its_mapd_cmd.dev->device_id); + its_encode_size(cmd, size - 1); + its_encode_itt(cmd, itt_addr); + its_encode_valid(cmd, desc->its_mapd_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapd_cmd.dev->collection; +} + +static struct its_collection *its_build_mapc_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPC); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + its_encode_target(cmd, desc->its_mapc_cmd.col->target_address); + its_encode_valid(cmd, desc->its_mapc_cmd.valid); + + its_fixup_cmd(cmd); + + return desc->its_mapc_cmd.col; +} + +static struct its_collection *its_build_mapvi_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MAPVI); + its_encode_devid(cmd, desc->its_mapvi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_mapvi_cmd.event_id); + its_encode_phys_id(cmd, desc->its_mapvi_cmd.phys_id); + its_encode_collection(cmd, desc->its_mapvi_cmd.dev->collection->col_id); + + its_fixup_cmd(cmd); + + return desc->its_mapvi_cmd.dev->collection; +} + +static struct its_collection *its_build_movi_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_MOVI); + its_encode_devid(cmd, desc->its_movi_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_movi_cmd.id); + its_encode_collection(cmd, desc->its_movi_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return desc->its_movi_cmd.dev->collection; +} + +static struct its_collection *its_build_discard_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_DISCARD); + its_encode_devid(cmd, desc->its_discard_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_discard_cmd.event_id); + + its_fixup_cmd(cmd); + + return desc->its_discard_cmd.dev->collection; +} + +static struct its_collection *its_build_inv_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INV); + its_encode_devid(cmd, desc->its_inv_cmd.dev->device_id); + its_encode_event_id(cmd, desc->its_inv_cmd.event_id); + + its_fixup_cmd(cmd); + + return desc->its_inv_cmd.dev->collection; +} + +static struct its_collection *its_build_invall_cmd(struct its_cmd_block *cmd, + struct its_cmd_desc *desc) +{ + its_encode_cmd(cmd, GITS_CMD_INVALL); + its_encode_collection(cmd, desc->its_mapc_cmd.col->col_id); + + its_fixup_cmd(cmd); + + return NULL; +} + +static u64 its_cmd_ptr_to_offset(struct its_node *its, + struct its_cmd_block *ptr) +{ + return (ptr - its->cmd_base) * sizeof(*ptr); +} + +static int its_queue_full(struct its_node *its) +{ + int widx; + int ridx; + + widx = its->cmd_write - its->cmd_base; + ridx = readl_relaxed(its->base + GITS_CREADR) / sizeof(struct its_cmd_block); + + /* This is incredibly unlikely to happen, unless the ITS locks up. */ + if (((widx + 1) % ITS_CMD_QUEUE_NR_ENTRIES) == ridx) + return 1; + + return 0; +} + +static struct its_cmd_block *its_allocate_entry(struct its_node *its) +{ + struct its_cmd_block *cmd; + u32 count = 1000000; /* 1s! */ + + while (its_queue_full(its)) { + count--; + if (!count) { + pr_err_ratelimited("ITS queue not draining\n"); + return NULL; + } + cpu_relax(); + udelay(1); + } + + cmd = its->cmd_write++; + + /* Handle queue wrapping */ + if (its->cmd_write == (its->cmd_base + ITS_CMD_QUEUE_NR_ENTRIES)) + its->cmd_write = its->cmd_base; + + return cmd; +} + +static struct its_cmd_block *its_post_commands(struct its_node *its) +{ + u64 wr = its_cmd_ptr_to_offset(its, its->cmd_write); + + writel_relaxed(wr, its->base + GITS_CWRITER); + + return its->cmd_write; +} + +static void its_flush_cmd(struct its_node *its, struct its_cmd_block *cmd) +{ + /* + * Make sure the commands written to memory are observable by + * the ITS. + */ + if (its->flags & ITS_FLAGS_CMDQ_NEEDS_FLUSHING) + __flush_dcache_area(cmd, sizeof(*cmd)); + else + dsb(ishst); +} + +static void its_wait_for_range_completion(struct its_node *its, + struct its_cmd_block *from, + struct its_cmd_block *to) +{ + u64 rd_idx, from_idx, to_idx; + u32 count = 1000000; /* 1s! */ + + from_idx = its_cmd_ptr_to_offset(its, from); + to_idx = its_cmd_ptr_to_offset(its, to); + + while (1) { + rd_idx = readl_relaxed(its->base + GITS_CREADR); + if (rd_idx >= to_idx || rd_idx < from_idx) + break; + + count--; + if (!count) { + pr_err_ratelimited("ITS queue timeout\n"); + return; + } + cpu_relax(); + udelay(1); + } +} + +static void its_send_single_command(struct its_node *its, + its_cmd_builder_t builder, + struct its_cmd_desc *desc) +{ + struct its_cmd_block *cmd, *sync_cmd, *next_cmd; + struct its_collection *sync_col; + + raw_spin_lock(&its->lock); + + cmd = its_allocate_entry(its); + if (!cmd) { /* We're soooooo screewed... */ + pr_err_ratelimited("ITS can't allocate, dropping command\n"); + raw_spin_unlock(&its->lock); + return; + } + sync_col = builder(cmd, desc); + its_flush_cmd(its, cmd); + + if (sync_col) { + sync_cmd = its_allocate_entry(its); + if (!sync_cmd) { + pr_err_ratelimited("ITS can't SYNC, skipping\n"); + goto post; + } + its_encode_cmd(sync_cmd, GITS_CMD_SYNC); + its_encode_target(sync_cmd, sync_col->target_address); + its_fixup_cmd(sync_cmd); + its_flush_cmd(its, sync_cmd); + } + +post: + next_cmd = its_post_commands(its); + raw_spin_unlock(&its->lock); + + its_wait_for_range_completion(its, cmd, next_cmd); +} + +static void its_send_inv(struct its_device *dev, u32 event_id) +{ + struct its_cmd_desc desc; + + desc.its_inv_cmd.dev = dev; + desc.its_inv_cmd.event_id = event_id; + + its_send_single_command(dev->its, its_build_inv_cmd, &desc); +} + +static void its_send_mapd(struct its_device *dev, int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapd_cmd.dev = dev; + desc.its_mapd_cmd.valid = !!valid; + + its_send_single_command(dev->its, its_build_mapd_cmd, &desc); +} + +static void its_send_mapc(struct its_node *its, struct its_collection *col, + int valid) +{ + struct its_cmd_desc desc; + + desc.its_mapc_cmd.col = col; + desc.its_mapc_cmd.valid = !!valid; + + its_send_single_command(its, its_build_mapc_cmd, &desc); +} + +static void its_send_mapvi(struct its_device *dev, u32 irq_id, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_mapvi_cmd.dev = dev; + desc.its_mapvi_cmd.phys_id = irq_id; + desc.its_mapvi_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_mapvi_cmd, &desc); +} + +static void its_send_movi(struct its_device *dev, + struct its_collection *col, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_movi_cmd.dev = dev; + desc.its_movi_cmd.col = col; + desc.its_movi_cmd.id = id; + + its_send_single_command(dev->its, its_build_movi_cmd, &desc); +} + +static void its_send_discard(struct its_device *dev, u32 id) +{ + struct its_cmd_desc desc; + + desc.its_discard_cmd.dev = dev; + desc.its_discard_cmd.event_id = id; + + its_send_single_command(dev->its, its_build_discard_cmd, &desc); +} + +static void its_send_invall(struct its_node *its, struct its_collection *col) +{ + struct its_cmd_desc desc; + + desc.its_invall_cmd.col = col; + + its_send_single_command(its, its_build_invall_cmd, &desc); +} + +/* + * irqchip functions - assumes MSI, mostly. + */ + +static inline u32 its_get_event_id(struct irq_data *d) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + return d->hwirq - its_dev->lpi_base; +} + +static void lpi_set_config(struct irq_data *d, bool enable) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + irq_hw_number_t hwirq = d->hwirq; + u32 id = its_get_event_id(d); + u8 *cfg = page_address(gic_rdists->prop_page) + hwirq - 8192; + + if (enable) + *cfg |= LPI_PROP_ENABLED; + else + *cfg &= ~LPI_PROP_ENABLED; + + /* + * Make the above write visible to the redistributors. + * And yes, we're flushing exactly: One. Single. Byte. + * Humpf... + */ + if (gic_rdists->flags & RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING) + __flush_dcache_area(cfg, sizeof(*cfg)); + else + dsb(ishst); + its_send_inv(its_dev, id); +} + +static void its_mask_irq(struct irq_data *d) +{ + lpi_set_config(d, false); +} + +static void its_unmask_irq(struct irq_data *d) +{ + lpi_set_config(d, true); +} + +static void its_eoi_irq(struct irq_data *d) +{ + gic_write_eoir(d->hwirq); +} + +static int its_set_affinity(struct irq_data *d, const struct cpumask *mask_val, + bool force) +{ + unsigned int cpu = cpumask_any_and(mask_val, cpu_online_mask); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_collection *target_col; + u32 id = its_get_event_id(d); + + if (cpu >= nr_cpu_ids) + return -EINVAL; + + target_col = &its_dev->its->collections[cpu]; + its_send_movi(its_dev, target_col, id); + its_dev->collection = target_col; + + return IRQ_SET_MASK_OK_DONE; +} + +static void its_irq_compose_msi_msg(struct irq_data *d, struct msi_msg *msg) +{ + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + struct its_node *its; + u64 addr; + + its = its_dev->its; + addr = its->phys_base + GITS_TRANSLATER; + + msg->address_lo = addr & ((1UL << 32) - 1); + msg->address_hi = addr >> 32; + msg->data = its_get_event_id(d); +} + +static struct irq_chip its_irq_chip = { + .name = "ITS", + .irq_mask = its_mask_irq, + .irq_unmask = its_unmask_irq, + .irq_eoi = its_eoi_irq, + .irq_set_affinity = its_set_affinity, + .irq_compose_msi_msg = its_irq_compose_msi_msg, +}; + +static void its_mask_msi_irq(struct irq_data *d) +{ + pci_msi_mask_irq(d); + irq_chip_mask_parent(d); +} + +static void its_unmask_msi_irq(struct irq_data *d) +{ + pci_msi_unmask_irq(d); + irq_chip_unmask_parent(d); +} + +static struct irq_chip its_msi_irq_chip = { + .name = "ITS-MSI", + .irq_unmask = its_unmask_msi_irq, + .irq_mask = its_mask_msi_irq, + .irq_eoi = irq_chip_eoi_parent, + .irq_write_msi_msg = pci_msi_domain_write_msg, +}; + +/* + * How we allocate LPIs: + * + * The GIC has id_bits bits for interrupt identifiers. From there, we + * must subtract 8192 which are reserved for SGIs/PPIs/SPIs. Then, as + * we allocate LPIs by chunks of 32, we can shift the whole thing by 5 + * bits to the right. + * + * This gives us (((1UL << id_bits) - 8192) >> 5) possible allocations. + */ +#define IRQS_PER_CHUNK_SHIFT 5 +#define IRQS_PER_CHUNK (1 << IRQS_PER_CHUNK_SHIFT) + +static unsigned long *lpi_bitmap; +static u32 lpi_chunks; +static DEFINE_SPINLOCK(lpi_lock); + +static int its_lpi_to_chunk(int lpi) +{ + return (lpi - 8192) >> IRQS_PER_CHUNK_SHIFT; +} + +static int its_chunk_to_lpi(int chunk) +{ + return (chunk << IRQS_PER_CHUNK_SHIFT) + 8192; +} + +static int its_lpi_init(u32 id_bits) +{ + lpi_chunks = its_lpi_to_chunk(1UL << id_bits); + + lpi_bitmap = kzalloc(BITS_TO_LONGS(lpi_chunks) * sizeof(long), + GFP_KERNEL); + if (!lpi_bitmap) { + lpi_chunks = 0; + return -ENOMEM; + } + + pr_info("ITS: Allocated %d chunks for LPIs\n", (int)lpi_chunks); + return 0; +} + +static unsigned long *its_lpi_alloc_chunks(int nr_irqs, int *base, int *nr_ids) +{ + unsigned long *bitmap = NULL; + int chunk_id; + int nr_chunks; + int i; + + nr_chunks = DIV_ROUND_UP(nr_irqs, IRQS_PER_CHUNK); + + spin_lock(&lpi_lock); + + do { + chunk_id = bitmap_find_next_zero_area(lpi_bitmap, lpi_chunks, + 0, nr_chunks, 0); + if (chunk_id < lpi_chunks) + break; + + nr_chunks--; + } while (nr_chunks > 0); + + if (!nr_chunks) + goto out; + + bitmap = kzalloc(BITS_TO_LONGS(nr_chunks * IRQS_PER_CHUNK) * sizeof (long), + GFP_ATOMIC); + if (!bitmap) + goto out; + + for (i = 0; i < nr_chunks; i++) + set_bit(chunk_id + i, lpi_bitmap); + + *base = its_chunk_to_lpi(chunk_id); + *nr_ids = nr_chunks * IRQS_PER_CHUNK; + +out: + spin_unlock(&lpi_lock); + + return bitmap; +} + +static void its_lpi_free(unsigned long *bitmap, int base, int nr_ids) +{ + int lpi; + + spin_lock(&lpi_lock); + + for (lpi = base; lpi < (base + nr_ids); lpi += IRQS_PER_CHUNK) { + int chunk = its_lpi_to_chunk(lpi); + BUG_ON(chunk > lpi_chunks); + if (test_bit(chunk, lpi_bitmap)) { + clear_bit(chunk, lpi_bitmap); + } else { + pr_err("Bad LPI chunk %d\n", chunk); + } + } + + spin_unlock(&lpi_lock); + + kfree(bitmap); +} + +/* + * We allocate 64kB for PROPBASE. That gives us at most 64K LPIs to + * deal with (one configuration byte per interrupt). PENDBASE has to + * be 64kB aligned (one bit per LPI, plus 8192 bits for SPI/PPI/SGI). + */ +#define LPI_PROPBASE_SZ SZ_64K +#define LPI_PENDBASE_SZ (LPI_PROPBASE_SZ / 8 + SZ_1K) + +/* + * This is how many bits of ID we need, including the useless ones. + */ +#define LPI_NRBITS ilog2(LPI_PROPBASE_SZ + SZ_8K) + +#define LPI_PROP_DEFAULT_PRIO 0xa0 + +static int __init its_alloc_lpi_tables(void) +{ + phys_addr_t paddr; + + gic_rdists->prop_page = alloc_pages(GFP_NOWAIT, + get_order(LPI_PROPBASE_SZ)); + if (!gic_rdists->prop_page) { + pr_err("Failed to allocate PROPBASE\n"); + return -ENOMEM; + } + + paddr = page_to_phys(gic_rdists->prop_page); + pr_info("GIC: using LPI property table @%pa\n", &paddr); + + /* Priority 0xa0, Group-1, disabled */ + memset(page_address(gic_rdists->prop_page), + LPI_PROP_DEFAULT_PRIO | LPI_PROP_GROUP1, + LPI_PROPBASE_SZ); + + /* Make sure the GIC will observe the written configuration */ + __flush_dcache_area(page_address(gic_rdists->prop_page), LPI_PROPBASE_SZ); + + return 0; +} + +static const char *its_base_type_string[] = { + [GITS_BASER_TYPE_DEVICE] = "Devices", + [GITS_BASER_TYPE_VCPU] = "Virtual CPUs", + [GITS_BASER_TYPE_CPU] = "Physical CPUs", + [GITS_BASER_TYPE_COLLECTION] = "Interrupt Collections", + [GITS_BASER_TYPE_RESERVED5] = "Reserved (5)", + [GITS_BASER_TYPE_RESERVED6] = "Reserved (6)", + [GITS_BASER_TYPE_RESERVED7] = "Reserved (7)", +}; + +static void its_free_tables(struct its_node *its) +{ + int i; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + if (its->tables[i]) { + free_page((unsigned long)its->tables[i]); + its->tables[i] = NULL; + } + } +} + +static int its_alloc_tables(struct its_node *its) +{ + int err; + int i; + int psz = PAGE_SIZE; + u64 shr = GITS_BASER_InnerShareable; + + for (i = 0; i < GITS_BASER_NR_REGS; i++) { + u64 val = readq_relaxed(its->base + GITS_BASER + i * 8); + u64 type = GITS_BASER_TYPE(val); + u64 entry_size = GITS_BASER_ENTRY_SIZE(val); + u64 tmp; + void *base; + + if (type == GITS_BASER_TYPE_NONE) + continue; + + /* We're lazy and only allocate a single page for now */ + base = (void *)get_zeroed_page(GFP_KERNEL); + if (!base) { + err = -ENOMEM; + goto out_free; + } + + its->tables[i] = base; + +retry_baser: + val = (virt_to_phys(base) | + (type << GITS_BASER_TYPE_SHIFT) | + ((entry_size - 1) << GITS_BASER_ENTRY_SIZE_SHIFT) | + GITS_BASER_WaWb | + shr | + GITS_BASER_VALID); + + switch (psz) { + case SZ_4K: + val |= GITS_BASER_PAGE_SIZE_4K; + break; + case SZ_16K: + val |= GITS_BASER_PAGE_SIZE_16K; + break; + case SZ_64K: + val |= GITS_BASER_PAGE_SIZE_64K; + break; + } + + val |= (PAGE_SIZE / psz) - 1; + + writeq_relaxed(val, its->base + GITS_BASER + i * 8); + tmp = readq_relaxed(its->base + GITS_BASER + i * 8); + + if ((val ^ tmp) & GITS_BASER_SHAREABILITY_MASK) { + /* + * Shareability didn't stick. Just use + * whatever the read reported, which is likely + * to be the only thing this redistributor + * supports. + */ + shr = tmp & GITS_BASER_SHAREABILITY_MASK; + goto retry_baser; + } + + if ((val ^ tmp) & GITS_BASER_PAGE_SIZE_MASK) { + /* + * Page size didn't stick. Let's try a smaller + * size and retry. If we reach 4K, then + * something is horribly wrong... + */ + switch (psz) { + case SZ_16K: + psz = SZ_4K; + goto retry_baser; + case SZ_64K: + psz = SZ_16K; + goto retry_baser; + } + } + + if (val != tmp) { + pr_err("ITS: %s: GITS_BASER%d doesn't stick: %lx %lx\n", + its->msi_chip.of_node->full_name, i, + (unsigned long) val, (unsigned long) tmp); + err = -ENXIO; + goto out_free; + } + + pr_info("ITS: allocated %d %s @%lx (psz %dK, shr %d)\n", + (int)(PAGE_SIZE / entry_size), + its_base_type_string[type], + (unsigned long)virt_to_phys(base), + psz / SZ_1K, (int)shr >> GITS_BASER_SHAREABILITY_SHIFT); + } + + return 0; + +out_free: + its_free_tables(its); + + return err; +} + +static int its_alloc_collections(struct its_node *its) +{ + its->collections = kzalloc(nr_cpu_ids * sizeof(*its->collections), + GFP_KERNEL); + if (!its->collections) + return -ENOMEM; + + return 0; +} + +static void its_cpu_init_lpis(void) +{ + void __iomem *rbase = gic_data_rdist_rd_base(); + struct page *pend_page; + u64 val, tmp; + + /* If we didn't allocate the pending table yet, do it now */ + pend_page = gic_data_rdist()->pend_page; + if (!pend_page) { + phys_addr_t paddr; + /* + * The pending pages have to be at least 64kB aligned, + * hence the 'max(LPI_PENDBASE_SZ, SZ_64K)' below. + */ + pend_page = alloc_pages(GFP_NOWAIT | __GFP_ZERO, + get_order(max(LPI_PENDBASE_SZ, SZ_64K))); + if (!pend_page) { + pr_err("Failed to allocate PENDBASE for CPU%d\n", + smp_processor_id()); + return; + } + + /* Make sure the GIC will observe the zero-ed page */ + __flush_dcache_area(page_address(pend_page), LPI_PENDBASE_SZ); + + paddr = page_to_phys(pend_page); + pr_info("CPU%d: using LPI pending table @%pa\n", + smp_processor_id(), &paddr); + gic_data_rdist()->pend_page = pend_page; + } + + /* Disable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val &= ~GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* + * Make sure any change to the table is observable by the GIC. + */ + dsb(sy); + + /* set PROPBASE */ + val = (page_to_phys(gic_rdists->prop_page) | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_WaWb | + ((LPI_NRBITS - 1) & GICR_PROPBASER_IDBITS_MASK)); + + writeq_relaxed(val, rbase + GICR_PROPBASER); + tmp = readq_relaxed(rbase + GICR_PROPBASER); + + if ((tmp ^ val) & GICR_PROPBASER_SHAREABILITY_MASK) { + pr_info_once("GIC: using cache flushing for LPI property table\n"); + gic_rdists->flags |= RDIST_FLAGS_PROPBASE_NEEDS_FLUSHING; + } + + /* set PENDBASE */ + val = (page_to_phys(pend_page) | + GICR_PROPBASER_InnerShareable | + GICR_PROPBASER_WaWb); + + writeq_relaxed(val, rbase + GICR_PENDBASER); + + /* Enable LPIs */ + val = readl_relaxed(rbase + GICR_CTLR); + val |= GICR_CTLR_ENABLE_LPIS; + writel_relaxed(val, rbase + GICR_CTLR); + + /* Make sure the GIC has seen the above */ + dsb(sy); +} + +static void its_cpu_init_collection(void) +{ + struct its_node *its; + int cpu; + + spin_lock(&its_lock); + cpu = smp_processor_id(); + + list_for_each_entry(its, &its_nodes, entry) { + u64 target; + + /* + * We now have to bind each collection to its target + * redistributor. + */ + if (readq_relaxed(its->base + GITS_TYPER) & GITS_TYPER_PTA) { + /* + * This ITS wants the physical address of the + * redistributor. + */ + target = gic_data_rdist()->phys_base; + } else { + /* + * This ITS wants a linear CPU number. + */ + target = readq_relaxed(gic_data_rdist_rd_base() + GICR_TYPER); + target = GICR_TYPER_CPU_NUMBER(target); + } + + /* Perform collection mapping */ + its->collections[cpu].target_address = target; + its->collections[cpu].col_id = cpu; + + its_send_mapc(its, &its->collections[cpu], 1); + its_send_invall(its, &its->collections[cpu]); + } + + spin_unlock(&its_lock); +} + +static struct its_device *its_find_device(struct its_node *its, u32 dev_id) +{ + struct its_device *its_dev = NULL, *tmp; + + raw_spin_lock(&its->lock); + + list_for_each_entry(tmp, &its->its_device_list, entry) { + if (tmp->device_id == dev_id) { + its_dev = tmp; + break; + } + } + + raw_spin_unlock(&its->lock); + + return its_dev; +} + +static struct its_device *its_create_device(struct its_node *its, u32 dev_id, + int nvecs) +{ + struct its_device *dev; + unsigned long *lpi_map; + void *itt; + int lpi_base; + int nr_lpis; + int cpu; + int sz; + + dev = kzalloc(sizeof(*dev), GFP_KERNEL); + sz = nvecs * its->ite_size; + sz = max(sz, ITS_ITT_ALIGN) + ITS_ITT_ALIGN - 1; + itt = kmalloc(sz, GFP_KERNEL); + lpi_map = its_lpi_alloc_chunks(nvecs, &lpi_base, &nr_lpis); + + if (!dev || !itt || !lpi_map) { + kfree(dev); + kfree(itt); + kfree(lpi_map); + return NULL; + } + + dev->its = its; + dev->itt = itt; + dev->nr_ites = nvecs; + dev->lpi_map = lpi_map; + dev->lpi_base = lpi_base; + dev->nr_lpis = nr_lpis; + dev->device_id = dev_id; + INIT_LIST_HEAD(&dev->entry); + + raw_spin_lock(&its->lock); + list_add(&dev->entry, &its->its_device_list); + raw_spin_unlock(&its->lock); + + /* Bind the device to the first possible CPU */ + cpu = cpumask_first(cpu_online_mask); + dev->collection = &its->collections[cpu]; + + /* Map device to its ITT */ + its_send_mapd(dev, 1); + + return dev; +} + +static void its_free_device(struct its_device *its_dev) +{ + raw_spin_lock(&its_dev->its->lock); + list_del(&its_dev->entry); + raw_spin_unlock(&its_dev->its->lock); + kfree(its_dev->itt); + kfree(its_dev); +} + +static int its_alloc_device_irq(struct its_device *dev, irq_hw_number_t *hwirq) +{ + int idx; + + idx = find_first_zero_bit(dev->lpi_map, dev->nr_lpis); + if (idx == dev->nr_lpis) + return -ENOSPC; + + *hwirq = dev->lpi_base + idx; + set_bit(idx, dev->lpi_map); + + /* Map the GIC irq ID to the device */ + its_send_mapvi(dev, *hwirq, idx); + + return 0; +} + +static int its_msi_prepare(struct irq_domain *domain, struct device *dev, + int nvec, msi_alloc_info_t *info) +{ + struct pci_dev *pdev; + struct its_node *its; + u32 dev_id; + struct its_device *its_dev; + + if (!dev_is_pci(dev)) + return -EINVAL; + + pdev = to_pci_dev(dev); + dev_id = PCI_DEVID(pdev->bus->number, pdev->devfn); + its = domain->parent->host_data; + + its_dev = its_find_device(its, dev_id); + if (WARN_ON(its_dev)) + return -EINVAL; + + its_dev = its_create_device(its, dev_id, nvec); + if (!its_dev) + return -ENOMEM; + + dev_dbg(&pdev->dev, "ITT %d entries, %d bits\n", nvec, ilog2(nvec)); + + info->scratchpad[0].ptr = its_dev; + info->scratchpad[1].ptr = dev; + return 0; +} + +static struct msi_domain_ops its_pci_msi_ops = { + .msi_prepare = its_msi_prepare, +}; + +static struct msi_domain_info its_pci_msi_domain_info = { + .flags = (MSI_FLAG_USE_DEF_DOM_OPS | MSI_FLAG_USE_DEF_CHIP_OPS | + MSI_FLAG_MULTI_PCI_MSI | MSI_FLAG_PCI_MSIX), + .ops = &its_pci_msi_ops, + .chip = &its_msi_irq_chip, +}; + +static int its_irq_gic_domain_alloc(struct irq_domain *domain, + unsigned int virq, + irq_hw_number_t hwirq) +{ + struct of_phandle_args args; + + args.np = domain->parent->of_node; + args.args_count = 3; + args.args[0] = GIC_IRQ_TYPE_LPI; + args.args[1] = hwirq; + args.args[2] = IRQ_TYPE_EDGE_RISING; + + return irq_domain_alloc_irqs_parent(domain, virq, 1, &args); +} + +static int its_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *args) +{ + msi_alloc_info_t *info = args; + struct its_device *its_dev = info->scratchpad[0].ptr; + irq_hw_number_t hwirq; + int err; + int i; + + for (i = 0; i < nr_irqs; i++) { + err = its_alloc_device_irq(its_dev, &hwirq); + if (err) + return err; + + err = its_irq_gic_domain_alloc(domain, virq + i, hwirq); + if (err) + return err; + + irq_domain_set_hwirq_and_chip(domain, virq + i, + hwirq, &its_irq_chip, its_dev); + dev_dbg(info->scratchpad[1].ptr, "ID:%d pID:%d vID:%d\n", + (int)(hwirq - its_dev->lpi_base), (int)hwirq, virq + i); + } + + return 0; +} + +static void its_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + struct irq_data *d = irq_domain_get_irq_data(domain, virq); + struct its_device *its_dev = irq_data_get_irq_chip_data(d); + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *data = irq_domain_get_irq_data(domain, + virq + i); + int event = its_get_event_id(data); + + /* Stop the delivery of interrupts */ + its_send_discard(its_dev, event); + + /* Mark interrupt index as unused */ + clear_bit(event, its_dev->lpi_map); + + /* Nuke the entry in the domain */ + irq_domain_reset_irq_data(d); + } + + /* If all interrupts have been freed, start mopping the floor */ + if (bitmap_empty(its_dev->lpi_map, its_dev->nr_lpis)) { + its_lpi_free(its_dev->lpi_map, + its_dev->lpi_base, + its_dev->nr_lpis); + + /* Unmap device/itt */ + its_send_mapd(its_dev, 0); + its_free_device(its_dev); + } + + irq_domain_free_irqs_parent(domain, virq, nr_irqs); +} + +static const struct irq_domain_ops its_domain_ops = { + .alloc = its_irq_domain_alloc, + .free = its_irq_domain_free, +}; + +static int its_probe(struct device_node *node, struct irq_domain *parent) +{ + struct resource res; + struct its_node *its; + void __iomem *its_base; + u32 val; + u64 baser, tmp; + int err; + + err = of_address_to_resource(node, 0, &res); + if (err) { + pr_warn("%s: no regs?\n", node->full_name); + return -ENXIO; + } + + its_base = ioremap(res.start, resource_size(&res)); + if (!its_base) { + pr_warn("%s: unable to map registers\n", node->full_name); + return -ENOMEM; + } + + val = readl_relaxed(its_base + GITS_PIDR2) & GIC_PIDR2_ARCH_MASK; + if (val != 0x30 && val != 0x40) { + pr_warn("%s: no ITS detected, giving up\n", node->full_name); + err = -ENODEV; + goto out_unmap; + } + + pr_info("ITS: %s\n", node->full_name); + + its = kzalloc(sizeof(*its), GFP_KERNEL); + if (!its) { + err = -ENOMEM; + goto out_unmap; + } + + raw_spin_lock_init(&its->lock); + INIT_LIST_HEAD(&its->entry); + INIT_LIST_HEAD(&its->its_device_list); + its->base = its_base; + its->phys_base = res.start; + its->msi_chip.of_node = node; + its->ite_size = ((readl_relaxed(its_base + GITS_TYPER) >> 4) & 0xf) + 1; + + its->cmd_base = kzalloc(ITS_CMD_QUEUE_SZ, GFP_KERNEL); + if (!its->cmd_base) { + err = -ENOMEM; + goto out_free_its; + } + its->cmd_write = its->cmd_base; + + err = its_alloc_tables(its); + if (err) + goto out_free_cmd; + + err = its_alloc_collections(its); + if (err) + goto out_free_tables; + + baser = (virt_to_phys(its->cmd_base) | + GITS_CBASER_WaWb | + GITS_CBASER_InnerShareable | + (ITS_CMD_QUEUE_SZ / SZ_4K - 1) | + GITS_CBASER_VALID); + + writeq_relaxed(baser, its->base + GITS_CBASER); + tmp = readq_relaxed(its->base + GITS_CBASER); + writeq_relaxed(0, its->base + GITS_CWRITER); + writel_relaxed(1, its->base + GITS_CTLR); + + if ((tmp ^ baser) & GITS_BASER_SHAREABILITY_MASK) { + pr_info("ITS: using cache flushing for cmd queue\n"); + its->flags |= ITS_FLAGS_CMDQ_NEEDS_FLUSHING; + } + + if (of_property_read_bool(its->msi_chip.of_node, "msi-controller")) { + its->domain = irq_domain_add_tree(NULL, &its_domain_ops, its); + if (!its->domain) { + err = -ENOMEM; + goto out_free_tables; + } + + its->domain->parent = parent; + + its->msi_chip.domain = pci_msi_create_irq_domain(node, + &its_pci_msi_domain_info, + its->domain); + if (!its->msi_chip.domain) { + err = -ENOMEM; + goto out_free_domains; + } + + err = of_pci_msi_chip_add(&its->msi_chip); + if (err) + goto out_free_domains; + } + + spin_lock(&its_lock); + list_add(&its->entry, &its_nodes); + spin_unlock(&its_lock); + + return 0; + +out_free_domains: + if (its->msi_chip.domain) + irq_domain_remove(its->msi_chip.domain); + if (its->domain) + irq_domain_remove(its->domain); +out_free_tables: + its_free_tables(its); +out_free_cmd: + kfree(its->cmd_base); +out_free_its: + kfree(its); +out_unmap: + iounmap(its_base); + pr_err("ITS: failed probing %s (%d)\n", node->full_name, err); + return err; +} + +static bool gic_rdists_supports_plpis(void) +{ + return !!(readl_relaxed(gic_data_rdist_rd_base() + GICR_TYPER) & GICR_TYPER_PLPIS); +} + +int its_cpu_init(void) +{ + if (!gic_rdists_supports_plpis()) { + pr_info("CPU%d: LPIs not supported\n", smp_processor_id()); + return -ENXIO; + } + + if (!list_empty(&its_nodes)) { + its_cpu_init_lpis(); + its_cpu_init_collection(); + } + + return 0; +} + +static struct of_device_id its_device_id[] = { + { .compatible = "arm,gic-v3-its", }, + {}, +}; + +int its_init(struct device_node *node, struct rdists *rdists, + struct irq_domain *parent_domain) +{ + struct device_node *np; + + for (np = of_find_matching_node(node, its_device_id); np; + np = of_find_matching_node(np, its_device_id)) { + its_probe(np, parent_domain); + } + + if (list_empty(&its_nodes)) { + pr_warn("ITS: No ITS available, not enabling LPIs\n"); + return -ENXIO; + } + + gic_rdists = rdists; + gic_root_node = node; + + its_alloc_lpi_tables(); + its_lpi_init(rdists->id_bits); + + return 0; +} diff --git a/drivers/irqchip/irq-gic-v3.c b/drivers/irqchip/irq-gic-v3.c index aa17ae805a70..1a146ccee701 100644 --- a/drivers/irqchip/irq-gic-v3.c +++ b/drivers/irqchip/irq-gic-v3.c @@ -34,20 +34,25 @@ #include "irq-gic-common.h" #include "irqchip.h" +struct redist_region { + void __iomem *redist_base; + phys_addr_t phys_base; +}; + struct gic_chip_data { void __iomem *dist_base; - void __iomem **redist_base; - void __iomem * __percpu *rdist; + struct redist_region *redist_regions; + struct rdists rdists; struct irq_domain *domain; u64 redist_stride; - u32 redist_regions; + u32 nr_redist_regions; unsigned int irq_nr; }; static struct gic_chip_data gic_data __read_mostly; -#define gic_data_rdist() (this_cpu_ptr(gic_data.rdist)) -#define gic_data_rdist_rd_base() (*gic_data_rdist()) +#define gic_data_rdist() (this_cpu_ptr(gic_data.rdists.rdist)) +#define gic_data_rdist_rd_base() (gic_data_rdist()->rd_base) #define gic_data_rdist_sgi_base() (gic_data_rdist_rd_base() + SZ_64K) /* Our default, arbitrary priority value. Linux only uses one anyway. */ @@ -71,9 +76,6 @@ static inline void __iomem *gic_dist_base(struct irq_data *d) if (d->hwirq <= 1023) /* SPI -> dist_base */ return gic_data.dist_base; - if (d->hwirq >= 8192) - BUG(); /* LPI Detected!!! */ - return NULL; } @@ -271,11 +273,11 @@ static asmlinkage void __exception_irq_entry gic_handle_irq(struct pt_regs *regs do { irqnr = gic_read_iar(); - if (likely(irqnr > 15 && irqnr < 1020)) { + if (likely(irqnr > 15 && irqnr < 1020) || irqnr >= 8192) { int err; err = handle_domain_irq(gic_data.domain, irqnr, regs); if (err) { - WARN_ONCE(true, "Unexpected SPI received!\n"); + WARN_ONCE(true, "Unexpected interrupt received!\n"); gic_write_eoir(irqnr); } continue; @@ -333,8 +335,8 @@ static int gic_populate_rdist(void) MPIDR_AFFINITY_LEVEL(mpidr, 1) << 8 | MPIDR_AFFINITY_LEVEL(mpidr, 0)); - for (i = 0; i < gic_data.redist_regions; i++) { - void __iomem *ptr = gic_data.redist_base[i]; + for (i = 0; i < gic_data.nr_redist_regions; i++) { + void __iomem *ptr = gic_data.redist_regions[i].redist_base; u32 reg; reg = readl_relaxed(ptr + GICR_PIDR2) & GIC_PIDR2_ARCH_MASK; @@ -347,10 +349,13 @@ static int gic_populate_rdist(void) do { typer = readq_relaxed(ptr + GICR_TYPER); if ((typer >> 32) == aff) { + u64 offset = ptr - gic_data.redist_regions[i].redist_base; gic_data_rdist_rd_base() = ptr; - pr_info("CPU%d: found redistributor %llx @%p\n", + gic_data_rdist()->phys_base = gic_data.redist_regions[i].phys_base + offset; + pr_info("CPU%d: found redistributor %llx region %d:%pa\n", smp_processor_id(), - (unsigned long long)mpidr, ptr); + (unsigned long long)mpidr, + i, &gic_data_rdist()->phys_base); return 0; } @@ -385,6 +390,11 @@ static void gic_cpu_sys_reg_init(void) gic_write_grpen1(1); } +static int gic_dist_supports_lpis(void) +{ + return !!(readl_relaxed(gic_data.dist_base + GICD_TYPER) & GICD_TYPER_LPIS); +} + static void gic_cpu_init(void) { void __iomem *rbase; @@ -399,6 +409,10 @@ static void gic_cpu_init(void) gic_cpu_config(rbase, gic_redist_wait_for_rwp); + /* Give LPIs a spin */ + if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) + its_cpu_init(); + /* initialise system registers */ gic_cpu_sys_reg_init(); } @@ -585,26 +599,43 @@ static struct irq_chip gic_chip = { .irq_set_affinity = gic_set_affinity, }; +#define GIC_ID_NR (1U << gic_data.rdists.id_bits) + static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw) { /* SGIs are private to the core kernel */ if (hw < 16) return -EPERM; + /* Nothing here */ + if (hw >= gic_data.irq_nr && hw < 8192) + return -EPERM; + /* Off limits */ + if (hw >= GIC_ID_NR) + return -EPERM; + /* PPIs */ if (hw < 32) { irq_set_percpu_devid(irq); - irq_set_chip_and_handler(irq, &gic_chip, - handle_percpu_devid_irq); + irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); } /* SPIs */ if (hw >= 32 && hw < gic_data.irq_nr) { - irq_set_chip_and_handler(irq, &gic_chip, - handle_fasteoi_irq); + irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); } - irq_set_chip_data(irq, d->host_data); + /* LPIs */ + if (hw >= 8192 && hw < GIC_ID_NR) { + if (!gic_dist_supports_lpis()) + return -EPERM; + irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); + set_irq_flags(irq, IRQF_VALID); + } + return 0; } @@ -625,6 +656,9 @@ static int gic_irq_domain_xlate(struct irq_domain *d, case 1: /* PPI */ *out_hwirq = intspec[1] + 16; break; + case GIC_IRQ_TYPE_LPI: /* LPI */ + *out_hwirq = intspec[1]; + break; default: return -EINVAL; } @@ -633,17 +667,50 @@ static int gic_irq_domain_xlate(struct irq_domain *d, return 0; } +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct of_phandle_args *irq_data = arg; + + ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, + irq_data->args_count, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + gic_irq_domain_map(domain, virq + i, hwirq + i); + + return 0; +} + +static void gic_irq_domain_free(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs) +{ + int i; + + for (i = 0; i < nr_irqs; i++) { + struct irq_data *d = irq_domain_get_irq_data(domain, virq + i); + irq_set_handler(virq + i, NULL); + irq_domain_reset_irq_data(d); + } +} + static const struct irq_domain_ops gic_irq_domain_ops = { - .map = gic_irq_domain_map, .xlate = gic_irq_domain_xlate, + .alloc = gic_irq_domain_alloc, + .free = gic_irq_domain_free, }; static int __init gic_of_init(struct device_node *node, struct device_node *parent) { void __iomem *dist_base; - void __iomem **redist_base; + struct redist_region *rdist_regs; u64 redist_stride; - u32 redist_regions; + u32 nr_redist_regions; + u32 typer; u32 reg; int gic_irqs; int err; @@ -664,54 +731,63 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare goto out_unmap_dist; } - if (of_property_read_u32(node, "#redistributor-regions", &redist_regions)) - redist_regions = 1; + if (of_property_read_u32(node, "#redistributor-regions", &nr_redist_regions)) + nr_redist_regions = 1; - redist_base = kzalloc(sizeof(*redist_base) * redist_regions, GFP_KERNEL); - if (!redist_base) { + rdist_regs = kzalloc(sizeof(*rdist_regs) * nr_redist_regions, GFP_KERNEL); + if (!rdist_regs) { err = -ENOMEM; goto out_unmap_dist; } - for (i = 0; i < redist_regions; i++) { - redist_base[i] = of_iomap(node, 1 + i); - if (!redist_base[i]) { + for (i = 0; i < nr_redist_regions; i++) { + struct resource res; + int ret; + + ret = of_address_to_resource(node, 1 + i, &res); + rdist_regs[i].redist_base = of_iomap(node, 1 + i); + if (ret || !rdist_regs[i].redist_base) { pr_err("%s: couldn't map region %d\n", node->full_name, i); err = -ENODEV; goto out_unmap_rdist; } + rdist_regs[i].phys_base = res.start; } if (of_property_read_u64(node, "redistributor-stride", &redist_stride)) redist_stride = 0; gic_data.dist_base = dist_base; - gic_data.redist_base = redist_base; - gic_data.redist_regions = redist_regions; + gic_data.redist_regions = rdist_regs; + gic_data.nr_redist_regions = nr_redist_regions; gic_data.redist_stride = redist_stride; /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources (SGI+PPI+SPI) */ - gic_irqs = readl_relaxed(gic_data.dist_base + GICD_TYPER) & 0x1f; - gic_irqs = (gic_irqs + 1) * 32; + typer = readl_relaxed(gic_data.dist_base + GICD_TYPER); + gic_data.rdists.id_bits = GICD_TYPER_ID_BITS(typer); + gic_irqs = GICD_TYPER_IRQS(typer); if (gic_irqs > 1020) gic_irqs = 1020; gic_data.irq_nr = gic_irqs; gic_data.domain = irq_domain_add_tree(node, &gic_irq_domain_ops, &gic_data); - gic_data.rdist = alloc_percpu(typeof(*gic_data.rdist)); + gic_data.rdists.rdist = alloc_percpu(typeof(*gic_data.rdists.rdist)); - if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdist)) { + if (WARN_ON(!gic_data.domain) || WARN_ON(!gic_data.rdists.rdist)) { err = -ENOMEM; goto out_free; } set_handle_irq(gic_handle_irq); + if (IS_ENABLED(CONFIG_ARM_GIC_V3_ITS) && gic_dist_supports_lpis()) + its_init(node, &gic_data.rdists, gic_data.domain); + gic_smp_init(); gic_dist_init(); gic_cpu_init(); @@ -722,12 +798,12 @@ static int __init gic_of_init(struct device_node *node, struct device_node *pare out_free: if (gic_data.domain) irq_domain_remove(gic_data.domain); - free_percpu(gic_data.rdist); + free_percpu(gic_data.rdists.rdist); out_unmap_rdist: - for (i = 0; i < redist_regions; i++) - if (redist_base[i]) - iounmap(redist_base[i]); - kfree(redist_base); + for (i = 0; i < nr_redist_regions; i++) + if (rdist_regs[i].redist_base) + iounmap(rdist_regs[i].redist_base); + kfree(rdist_regs); out_unmap_dist: iounmap(dist_base); return err; diff --git a/drivers/irqchip/irq-gic.c b/drivers/irqchip/irq-gic.c index 7f9be0785c6a..d617ee5a3d8a 100644 --- a/drivers/irqchip/irq-gic.c +++ b/drivers/irqchip/irq-gic.c @@ -788,17 +788,16 @@ static int gic_irq_domain_map(struct irq_domain *d, unsigned int irq, { if (hw < 32) { irq_set_percpu_devid(irq); - irq_set_chip_and_handler(irq, &gic_chip, - handle_percpu_devid_irq); + irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, + handle_percpu_devid_irq, NULL, NULL); set_irq_flags(irq, IRQF_VALID | IRQF_NOAUTOEN); } else { - irq_set_chip_and_handler(irq, &gic_chip, - handle_fasteoi_irq); + irq_domain_set_info(d, irq, hw, &gic_chip, d->host_data, + handle_fasteoi_irq, NULL, NULL); set_irq_flags(irq, IRQF_VALID | IRQF_PROBE); gic_routable_irq_domain_ops->map(d, irq, hw); } - irq_set_chip_data(irq, d->host_data); return 0; } @@ -858,6 +857,31 @@ static struct notifier_block gic_cpu_notifier = { }; #endif +static int gic_irq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i, ret; + irq_hw_number_t hwirq; + unsigned int type = IRQ_TYPE_NONE; + struct of_phandle_args *irq_data = arg; + + ret = gic_irq_domain_xlate(domain, irq_data->np, irq_data->args, + irq_data->args_count, &hwirq, &type); + if (ret) + return ret; + + for (i = 0; i < nr_irqs; i++) + gic_irq_domain_map(domain, virq + i, hwirq + i); + + return 0; +} + +static const struct irq_domain_ops gic_irq_domain_hierarchy_ops = { + .xlate = gic_irq_domain_xlate, + .alloc = gic_irq_domain_alloc, + .free = irq_domain_free_irqs_top, +}; + static const struct irq_domain_ops gic_irq_domain_ops = { .map = gic_irq_domain_map, .unmap = gic_irq_domain_unmap, @@ -948,18 +972,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_cpu_map[i] = 0xff; /* - * For primary GICs, skip over SGIs. - * For secondary GICs, skip over PPIs, too. - */ - if (gic_nr == 0 && (irq_start & 31) > 0) { - hwirq_base = 16; - if (irq_start != -1) - irq_start = (irq_start & ~31) + 16; - } else { - hwirq_base = 32; - } - - /* * Find out how many interrupts are supported. * The GIC only supports up to 1020 interrupt sources. */ @@ -969,10 +981,31 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic_irqs = 1020; gic->gic_irqs = gic_irqs; - gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ + if (node) { /* DT case */ + const struct irq_domain_ops *ops = &gic_irq_domain_hierarchy_ops; + + if (!of_property_read_u32(node, "arm,routable-irqs", + &nr_routable_irqs)) { + ops = &gic_irq_domain_ops; + gic_irqs = nr_routable_irqs; + } + + gic->domain = irq_domain_add_linear(node, gic_irqs, ops, gic); + } else { /* Non-DT case */ + /* + * For primary GICs, skip over SGIs. + * For secondary GICs, skip over PPIs, too. + */ + if (gic_nr == 0 && (irq_start & 31) > 0) { + hwirq_base = 16; + if (irq_start != -1) + irq_start = (irq_start & ~31) + 16; + } else { + hwirq_base = 32; + } + + gic_irqs -= hwirq_base; /* calculate # of irqs to allocate */ - if (of_property_read_u32(node, "arm,routable-irqs", - &nr_routable_irqs)) { irq_base = irq_alloc_descs(irq_start, 16, gic_irqs, numa_node_id()); if (IS_ERR_VALUE(irq_base)) { @@ -983,10 +1016,6 @@ void __init gic_init_bases(unsigned int gic_nr, int irq_start, gic->domain = irq_domain_add_legacy(node, gic_irqs, irq_base, hwirq_base, &gic_irq_domain_ops, gic); - } else { - gic->domain = irq_domain_add_linear(node, nr_routable_irqs, - &gic_irq_domain_ops, - gic); } if (WARN_ON(!gic->domain)) @@ -1037,6 +1066,10 @@ gic_of_init(struct device_node *node, struct device_node *parent) irq = irq_of_parse_and_map(node, 0); gic_cascade_irq(gic_cnt, irq); } + + if (IS_ENABLED(CONFIG_ARM_GIC_V2M)) + gicv2m_of_init(node, gic_data[gic_cnt].domain); + gic_cnt++; return 0; } diff --git a/drivers/irqchip/irq-mtk-sysirq.c b/drivers/irqchip/irq-mtk-sysirq.c new file mode 100644 index 000000000000..7e342df6a62f --- /dev/null +++ b/drivers/irqchip/irq-mtk-sysirq.c @@ -0,0 +1,163 @@ +/* + * Copyright (c) 2014 MediaTek Inc. + * Author: Joe.C <yingjoe.chen@mediatek.com> + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + */ + +#include <linux/irq.h> +#include <linux/irqdomain.h> +#include <linux/of.h> +#include <linux/of_irq.h> +#include <linux/of_address.h> +#include <linux/io.h> +#include <linux/slab.h> +#include <linux/spinlock.h> + +#include "irqchip.h" + +#define MT6577_SYS_INTPOL_NUM (224) + +struct mtk_sysirq_chip_data { + spinlock_t lock; + void __iomem *intpol_base; +}; + +static int mtk_sysirq_set_type(struct irq_data *data, unsigned int type) +{ + irq_hw_number_t hwirq = data->hwirq; + struct mtk_sysirq_chip_data *chip_data = data->chip_data; + u32 offset, reg_index, value; + unsigned long flags; + int ret; + + offset = hwirq & 0x1f; + reg_index = hwirq >> 5; + + spin_lock_irqsave(&chip_data->lock, flags); + value = readl_relaxed(chip_data->intpol_base + reg_index * 4); + if (type == IRQ_TYPE_LEVEL_LOW || type == IRQ_TYPE_EDGE_FALLING) { + if (type == IRQ_TYPE_LEVEL_LOW) + type = IRQ_TYPE_LEVEL_HIGH; + else + type = IRQ_TYPE_EDGE_RISING; + value |= (1 << offset); + } else { + value &= ~(1 << offset); + } + writel(value, chip_data->intpol_base + reg_index * 4); + + data = data->parent_data; + ret = data->chip->irq_set_type(data, type); + spin_unlock_irqrestore(&chip_data->lock, flags); + return ret; +} + +static struct irq_chip mtk_sysirq_chip = { + .name = "MT_SYSIRQ", + .irq_mask = irq_chip_mask_parent, + .irq_unmask = irq_chip_unmask_parent, + .irq_eoi = irq_chip_eoi_parent, + .irq_set_type = mtk_sysirq_set_type, + .irq_retrigger = irq_chip_retrigger_hierarchy, + .irq_set_affinity = irq_chip_set_affinity_parent, +}; + +static int mtk_sysirq_domain_xlate(struct irq_domain *d, + struct device_node *controller, + const u32 *intspec, unsigned int intsize, + unsigned long *out_hwirq, + unsigned int *out_type) +{ + if (intsize != 3) + return -EINVAL; + + /* sysirq doesn't support PPI */ + if (intspec[0]) + return -EINVAL; + + *out_hwirq = intspec[1]; + *out_type = intspec[2] & IRQ_TYPE_SENSE_MASK; + return 0; +} + +static int mtk_sysirq_domain_alloc(struct irq_domain *domain, unsigned int virq, + unsigned int nr_irqs, void *arg) +{ + int i; + irq_hw_number_t hwirq; + struct of_phandle_args *irq_data = arg; + struct of_phandle_args gic_data = *irq_data; + + if (irq_data->args_count != 3) + return -EINVAL; + + /* sysirq doesn't support PPI */ + if (irq_data->args[0]) + return -EINVAL; + + hwirq = irq_data->args[1]; + for (i = 0; i < nr_irqs; i++) + irq_domain_set_hwirq_and_chip(domain, virq + i, hwirq + i, + &mtk_sysirq_chip, + domain->host_data); + + gic_data.np = domain->parent->of_node; + return irq_domain_alloc_irqs_parent(domain, virq, nr_irqs, &gic_data); +} + +static struct irq_domain_ops sysirq_domain_ops = { + .xlate = mtk_sysirq_domain_xlate, + .alloc = mtk_sysirq_domain_alloc, + .free = irq_domain_free_irqs_common, +}; + +static int __init mtk_sysirq_of_init(struct device_node *node, + struct device_node *parent) +{ + struct irq_domain *domain, *domain_parent; + struct mtk_sysirq_chip_data *chip_data; + int ret = 0; + + domain_parent = irq_find_host(parent); + if (!domain_parent) { + pr_err("mtk_sysirq: interrupt-parent not found\n"); + return -EINVAL; + } + + chip_data = kzalloc(sizeof(*chip_data), GFP_KERNEL); + if (!chip_data) + return -ENOMEM; + + chip_data->intpol_base = of_io_request_and_map(node, 0, "intpol"); + if (!chip_data->intpol_base) { + pr_err("mtk_sysirq: unable to map sysirq register\n"); + ret = -ENOMEM; + goto out_free; + } + + domain = irq_domain_add_hierarchy(domain_parent, 0, + MT6577_SYS_INTPOL_NUM, node, + &sysirq_domain_ops, chip_data); + if (!domain) { + ret = -ENOMEM; + goto out_unmap; + } + spin_lock_init(&chip_data->lock); + + return 0; + +out_unmap: + iounmap(chip_data->intpol_base); +out_free: + kfree(chip_data); + return ret; +} +IRQCHIP_DECLARE(mtk_sysirq, "mediatek,mt6577-sysirq", mtk_sysirq_of_init); diff --git a/drivers/pci/Kconfig b/drivers/pci/Kconfig index cced84233ac0..7a8f1c5e65af 100644 --- a/drivers/pci/Kconfig +++ b/drivers/pci/Kconfig @@ -67,7 +67,7 @@ config XEN_PCIDEV_FRONTEND config HT_IRQ bool "Interrupts on hypertransport devices" default y - depends on PCI && X86_LOCAL_APIC && X86_IO_APIC + depends on PCI && X86_LOCAL_APIC help This allows native hypertransport devices to use interrupts. @@ -110,13 +110,6 @@ config PCI_PASID If unsure, say N. -config PCI_IOAPIC - bool "PCI IO-APIC hotplug support" if X86 - depends on PCI - depends on ACPI - depends on X86_IO_APIC - default !X86 - config PCI_LABEL def_bool y if (DMI || ACPI) select NLS diff --git a/drivers/pci/Makefile b/drivers/pci/Makefile index e04fe2d9df3b..73e4af400a5a 100644 --- a/drivers/pci/Makefile +++ b/drivers/pci/Makefile @@ -13,8 +13,6 @@ obj-$(CONFIG_PCI_QUIRKS) += quirks.o # Build PCI Express stuff if needed obj-$(CONFIG_PCIEPORTBUS) += pcie/ -obj-$(CONFIG_PCI_IOAPIC) += ioapic.o - # Build the PCI Hotplug drivers if we were asked to obj-$(CONFIG_HOTPLUG_PCI) += hotplug/ ifdef CONFIG_HOTPLUG_PCI diff --git a/drivers/pci/hotplug/ibmphp_core.c b/drivers/pci/hotplug/ibmphp_core.c index 3efaf4c38528..96c5c729cdbc 100644 --- a/drivers/pci/hotplug/ibmphp_core.c +++ b/drivers/pci/hotplug/ibmphp_core.c @@ -36,6 +36,7 @@ #include <linux/wait.h> #include "../pci.h" #include <asm/pci_x86.h> /* for struct irq_routing_table */ +#include <asm/io_apic.h> #include "ibmphp.h" #define attn_on(sl) ibmphp_hpc_writeslot (sl, HPC_SLOT_ATTNON) @@ -155,13 +156,10 @@ int ibmphp_init_devno(struct slot **cur_slot) for (loop = 0; loop < len; loop++) { if ((*cur_slot)->number == rtable->slots[loop].slot && (*cur_slot)->bus == rtable->slots[loop].bus) { - struct io_apic_irq_attr irq_attr; - (*cur_slot)->device = PCI_SLOT(rtable->slots[loop].devfn); for (i = 0; i < 4; i++) (*cur_slot)->irq[i] = IO_APIC_get_PCI_irq_vector((int) (*cur_slot)->bus, - (int) (*cur_slot)->device, i, - &irq_attr); + (int) (*cur_slot)->device, i); debug("(*cur_slot)->irq[0] = %x\n", (*cur_slot)->irq[0]); diff --git a/drivers/pci/htirq.c b/drivers/pci/htirq.c index a94dd2c4183a..7eb4109a3df4 100644 --- a/drivers/pci/htirq.c +++ b/drivers/pci/htirq.c @@ -23,20 +23,11 @@ */ static DEFINE_SPINLOCK(ht_irq_lock); -struct ht_irq_cfg { - struct pci_dev *dev; - /* Update callback used to cope with buggy hardware */ - ht_irq_update_t *update; - unsigned pos; - unsigned idx; - struct ht_irq_msg msg; -}; - - void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) { struct ht_irq_cfg *cfg = irq_get_handler_data(irq); unsigned long flags; + spin_lock_irqsave(&ht_irq_lock, flags); if (cfg->msg.address_lo != msg->address_lo) { pci_write_config_byte(cfg->dev, cfg->pos + 2, cfg->idx); @@ -55,6 +46,7 @@ void write_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) void fetch_ht_irq_msg(unsigned int irq, struct ht_irq_msg *msg) { struct ht_irq_cfg *cfg = irq_get_handler_data(irq); + *msg = cfg->msg; } @@ -86,7 +78,6 @@ void unmask_ht_irq(struct irq_data *data) */ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) { - struct ht_irq_cfg *cfg; int max_irq, pos, irq; unsigned long flags; u32 data; @@ -105,29 +96,9 @@ int __ht_create_irq(struct pci_dev *dev, int idx, ht_irq_update_t *update) if (idx > max_irq) return -EINVAL; - cfg = kmalloc(sizeof(*cfg), GFP_KERNEL); - if (!cfg) - return -ENOMEM; - - cfg->dev = dev; - cfg->update = update; - cfg->pos = pos; - cfg->idx = 0x10 + (idx * 2); - /* Initialize msg to a value that will never match the first write. */ - cfg->msg.address_lo = 0xffffffff; - cfg->msg.address_hi = 0xffffffff; - - irq = irq_alloc_hwirq(dev_to_node(&dev->dev)); - if (!irq) { - kfree(cfg); - return -EBUSY; - } - irq_set_handler_data(irq, cfg); - - if (arch_setup_ht_irq(irq, dev) < 0) { - ht_destroy_irq(irq); - return -EBUSY; - } + irq = arch_setup_ht_irq(idx, pos, dev, update); + if (irq > 0) + dev_dbg(&dev->dev, "irq %d for HT\n", irq); return irq; } @@ -158,13 +129,6 @@ EXPORT_SYMBOL(ht_create_irq); */ void ht_destroy_irq(unsigned int irq) { - struct ht_irq_cfg *cfg; - - cfg = irq_get_handler_data(irq); - irq_set_chip(irq, NULL); - irq_set_handler_data(irq, NULL); - irq_free_hwirq(irq); - - kfree(cfg); + arch_teardown_ht_irq(irq); } EXPORT_SYMBOL(ht_destroy_irq); diff --git a/drivers/pci/ioapic.c b/drivers/pci/ioapic.c deleted file mode 100644 index f6219d36227f..000000000000 --- a/drivers/pci/ioapic.c +++ /dev/null @@ -1,121 +0,0 @@ -/* - * IOAPIC/IOxAPIC/IOSAPIC driver - * - * Copyright (C) 2009 Fujitsu Limited. - * (c) Copyright 2009 Hewlett-Packard Development Company, L.P. - * - * This program is free software; you can redistribute it and/or modify - * it under the terms of the GNU General Public License version 2 as - * published by the Free Software Foundation. - */ - -/* - * This driver manages PCI I/O APICs added by hotplug after boot. We try to - * claim all I/O APIC PCI devices, but those present at boot were registered - * when we parsed the ACPI MADT, so we'll fail when we try to re-register - * them. - */ - -#include <linux/pci.h> -#include <linux/module.h> -#include <linux/acpi.h> -#include <linux/slab.h> - -struct ioapic { - acpi_handle handle; - u32 gsi_base; -}; - -static int ioapic_probe(struct pci_dev *dev, const struct pci_device_id *ent) -{ - acpi_handle handle; - acpi_status status; - unsigned long long gsb; - struct ioapic *ioapic; - int ret; - char *type; - struct resource *res; - - handle = ACPI_HANDLE(&dev->dev); - if (!handle) - return -EINVAL; - - status = acpi_evaluate_integer(handle, "_GSB", NULL, &gsb); - if (ACPI_FAILURE(status)) - return -EINVAL; - - /* - * The previous code in acpiphp evaluated _MAT if _GSB failed, but - * ACPI spec 4.0 sec 6.2.2 requires _GSB for hot-pluggable I/O APICs. - */ - - ioapic = kzalloc(sizeof(*ioapic), GFP_KERNEL); - if (!ioapic) - return -ENOMEM; - - ioapic->handle = handle; - ioapic->gsi_base = (u32) gsb; - - if (dev->class == PCI_CLASS_SYSTEM_PIC_IOAPIC) - type = "IOAPIC"; - else - type = "IOxAPIC"; - - ret = pci_enable_device(dev); - if (ret < 0) - goto exit_free; - - pci_set_master(dev); - - if (pci_request_region(dev, 0, type)) - goto exit_disable; - - res = &dev->resource[0]; - if (acpi_register_ioapic(ioapic->handle, res->start, ioapic->gsi_base)) - goto exit_release; - - pci_set_drvdata(dev, ioapic); - dev_info(&dev->dev, "%s at %pR, GSI %u\n", type, res, ioapic->gsi_base); - return 0; - -exit_release: - pci_release_region(dev, 0); -exit_disable: - pci_disable_device(dev); -exit_free: - kfree(ioapic); - return -ENODEV; -} - -static void ioapic_remove(struct pci_dev *dev) -{ - struct ioapic *ioapic = pci_get_drvdata(dev); - - acpi_unregister_ioapic(ioapic->handle, ioapic->gsi_base); - pci_release_region(dev, 0); - pci_disable_device(dev); - kfree(ioapic); -} - - -static const struct pci_device_id ioapic_devices[] = { - { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOAPIC, ~0) }, - { PCI_DEVICE_CLASS(PCI_CLASS_SYSTEM_PIC_IOXAPIC, ~0) }, - { } -}; -MODULE_DEVICE_TABLE(pci, ioapic_devices); - -static struct pci_driver ioapic_driver = { - .name = "ioapic", - .id_table = ioapic_devices, - .probe = ioapic_probe, - .remove = ioapic_remove, -}; - -static int __init ioapic_init(void) -{ - return pci_register_driver(&ioapic_driver); -} -module_init(ioapic_init); - -MODULE_LICENSE("GPL"); diff --git a/drivers/pci/quirks.c b/drivers/pci/quirks.c index 90acb32c85b1..4e1707aa5b26 100644 --- a/drivers/pci/quirks.c +++ b/drivers/pci/quirks.c @@ -765,13 +765,6 @@ static void quirk_amd_ioapic(struct pci_dev *dev) } } DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_AMD, PCI_DEVICE_ID_AMD_VIPER_7410, quirk_amd_ioapic); - -static void quirk_ioapic_rmw(struct pci_dev *dev) -{ - if (dev->devfn == 0 && dev->bus->number == 0) - sis_apic_bug = 1; -} -DECLARE_PCI_FIXUP_FINAL(PCI_VENDOR_ID_SI, PCI_ANY_ID, quirk_ioapic_rmw); #endif /* CONFIG_X86_IO_APIC */ /* |