summaryrefslogtreecommitdiff
path: root/drivers/iommu
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/iommu')
-rw-r--r--drivers/iommu/amd_iommu.c119
-rw-r--r--drivers/iommu/amd_iommu_init.c19
-rw-r--r--drivers/iommu/amd_iommu_types.h5
-rw-r--r--drivers/iommu/dmar.c194
-rw-r--r--drivers/iommu/intel_irq_remapping.c20
-rw-r--r--drivers/iommu/iommu.c5
-rw-r--r--drivers/iommu/irq_remapping.c5
-rw-r--r--drivers/iommu/irq_remapping.h2
-rw-r--r--drivers/iommu/omap-iommu.c32
-rw-r--r--drivers/iommu/tegra-gart.c20
-rw-r--r--drivers/iommu/tegra-smmu.c6
11 files changed, 270 insertions, 157 deletions
diff --git a/drivers/iommu/amd_iommu.c b/drivers/iommu/amd_iommu.c
index a5bee8e2dfce..625626391f2d 100644
--- a/drivers/iommu/amd_iommu.c
+++ b/drivers/iommu/amd_iommu.c
@@ -83,6 +83,8 @@ static struct iommu_ops amd_iommu_ops;
static ATOMIC_NOTIFIER_HEAD(ppr_notifier);
int amd_iommu_max_glx_val = -1;
+static struct dma_map_ops amd_iommu_dma_ops;
+
/*
* general struct to manage commands send to an IOMMU
*/
@@ -402,7 +404,7 @@ static void amd_iommu_stats_init(void)
return;
de_fflush = debugfs_create_bool("fullflush", 0444, stats_dir,
- (u32 *)&amd_iommu_unmap_flush);
+ &amd_iommu_unmap_flush);
amd_iommu_stats_add(&compl_wait);
amd_iommu_stats_add(&cnt_map_single);
@@ -450,12 +452,27 @@ static void dump_command(unsigned long phys_addr)
static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
{
- u32 *event = __evt;
- int type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
- int devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
- int domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
- int flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
- u64 address = (u64)(((u64)event[3]) << 32) | event[2];
+ int type, devid, domid, flags;
+ volatile u32 *event = __evt;
+ int count = 0;
+ u64 address;
+
+retry:
+ type = (event[1] >> EVENT_TYPE_SHIFT) & EVENT_TYPE_MASK;
+ devid = (event[0] >> EVENT_DEVID_SHIFT) & EVENT_DEVID_MASK;
+ domid = (event[1] >> EVENT_DOMID_SHIFT) & EVENT_DOMID_MASK;
+ flags = (event[1] >> EVENT_FLAGS_SHIFT) & EVENT_FLAGS_MASK;
+ address = (u64)(((u64)event[3]) << 32) | event[2];
+
+ if (type == 0) {
+ /* Did we hit the erratum? */
+ if (++count == LOOP_TIMEOUT) {
+ pr_err("AMD-Vi: No event written to event log\n");
+ return;
+ }
+ udelay(1);
+ goto retry;
+ }
printk(KERN_ERR "AMD-Vi: Event logged [");
@@ -508,6 +525,8 @@ static void iommu_print_event(struct amd_iommu *iommu, void *__evt)
default:
printk(KERN_ERR "UNKNOWN type=0x%02x]\n", type);
}
+
+ memset(__evt, 0, 4 * sizeof(u32));
}
static void iommu_poll_events(struct amd_iommu *iommu)
@@ -530,26 +549,12 @@ static void iommu_poll_events(struct amd_iommu *iommu)
spin_unlock_irqrestore(&iommu->lock, flags);
}
-static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
+static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u64 *raw)
{
struct amd_iommu_fault fault;
- volatile u64 *raw;
- int i;
INC_STATS_COUNTER(pri_requests);
- raw = (u64 *)(iommu->ppr_log + head);
-
- /*
- * Hardware bug: Interrupt may arrive before the entry is written to
- * memory. If this happens we need to wait for the entry to arrive.
- */
- for (i = 0; i < LOOP_TIMEOUT; ++i) {
- if (PPR_REQ_TYPE(raw[0]) != 0)
- break;
- udelay(1);
- }
-
if (PPR_REQ_TYPE(raw[0]) != PPR_REQ_FAULT) {
pr_err_ratelimited("AMD-Vi: Unknown PPR request received\n");
return;
@@ -561,12 +566,6 @@ static void iommu_handle_ppr_entry(struct amd_iommu *iommu, u32 head)
fault.tag = PPR_TAG(raw[0]);
fault.flags = PPR_FLAGS(raw[0]);
- /*
- * To detect the hardware bug we need to clear the entry
- * to back to zero.
- */
- raw[0] = raw[1] = 0;
-
atomic_notifier_call_chain(&ppr_notifier, 0, &fault);
}
@@ -578,25 +577,62 @@ static void iommu_poll_ppr_log(struct amd_iommu *iommu)
if (iommu->ppr_log == NULL)
return;
+ /* enable ppr interrupts again */
+ writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
+
spin_lock_irqsave(&iommu->lock, flags);
head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
while (head != tail) {
+ volatile u64 *raw;
+ u64 entry[2];
+ int i;
- /* Handle PPR entry */
- iommu_handle_ppr_entry(iommu, head);
+ raw = (u64 *)(iommu->ppr_log + head);
+
+ /*
+ * Hardware bug: Interrupt may arrive before the entry is
+ * written to memory. If this happens we need to wait for the
+ * entry to arrive.
+ */
+ for (i = 0; i < LOOP_TIMEOUT; ++i) {
+ if (PPR_REQ_TYPE(raw[0]) != 0)
+ break;
+ udelay(1);
+ }
+
+ /* Avoid memcpy function-call overhead */
+ entry[0] = raw[0];
+ entry[1] = raw[1];
+
+ /*
+ * To detect the hardware bug we need to clear the entry
+ * back to zero.
+ */
+ raw[0] = raw[1] = 0UL;
- /* Update and refresh ring-buffer state*/
+ /* Update head pointer of hardware ring-buffer */
head = (head + PPR_ENTRY_SIZE) % PPR_LOG_SIZE;
writel(head, iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
+
+ /*
+ * Release iommu->lock because ppr-handling might need to
+ * re-aquire it
+ */
+ spin_unlock_irqrestore(&iommu->lock, flags);
+
+ /* Handle PPR entry */
+ iommu_handle_ppr_entry(iommu, entry);
+
+ spin_lock_irqsave(&iommu->lock, flags);
+
+ /* Refresh ring-buffer information */
+ head = readl(iommu->mmio_base + MMIO_PPR_HEAD_OFFSET);
tail = readl(iommu->mmio_base + MMIO_PPR_TAIL_OFFSET);
}
- /* enable ppr interrupts again */
- writel(MMIO_STATUS_PPR_INT_MASK, iommu->mmio_base + MMIO_STATUS_OFFSET);
-
spin_unlock_irqrestore(&iommu->lock, flags);
}
@@ -2035,20 +2071,20 @@ out_err:
}
/* FIXME: Move this to PCI code */
-#define PCI_PRI_TLP_OFF (1 << 2)
+#define PCI_PRI_TLP_OFF (1 << 15)
bool pci_pri_tlp_required(struct pci_dev *pdev)
{
- u16 control;
+ u16 status;
int pos;
pos = pci_find_ext_capability(pdev, PCI_EXT_CAP_ID_PRI);
if (!pos)
return false;
- pci_read_config_word(pdev, pos + PCI_PRI_CTRL, &control);
+ pci_read_config_word(pdev, pos + PCI_PRI_STATUS, &status);
- return (control & PCI_PRI_TLP_OFF) ? true : false;
+ return (status & PCI_PRI_TLP_OFF) ? true : false;
}
/*
@@ -2233,6 +2269,13 @@ static int device_change_notifier(struct notifier_block *nb,
list_add_tail(&dma_domain->list, &iommu_pd_list);
spin_unlock_irqrestore(&iommu_pd_list_lock, flags);
+ dev_data = get_dev_data(dev);
+
+ if (!dev_data->passthrough)
+ dev->archdata.dma_ops = &amd_iommu_dma_ops;
+ else
+ dev->archdata.dma_ops = &nommu_dma_ops;
+
break;
case BUS_NOTIFY_DEL_DEVICE:
diff --git a/drivers/iommu/amd_iommu_init.c b/drivers/iommu/amd_iommu_init.c
index c56790375e0f..a33612f3206f 100644
--- a/drivers/iommu/amd_iommu_init.c
+++ b/drivers/iommu/amd_iommu_init.c
@@ -129,7 +129,7 @@ u16 amd_iommu_last_bdf; /* largest PCI device id we have
to handle */
LIST_HEAD(amd_iommu_unity_map); /* a list of required unity mappings
we find in ACPI */
-bool amd_iommu_unmap_flush; /* if true, flush on every unmap */
+u32 amd_iommu_unmap_flush; /* if true, flush on every unmap */
LIST_HEAD(amd_iommu_list); /* list of all AMD IOMMUs in the
system */
@@ -1029,6 +1029,9 @@ static int __init init_iommu_one(struct amd_iommu *iommu, struct ivhd_header *h)
if (!iommu->dev)
return 1;
+ iommu->root_pdev = pci_get_bus_and_slot(iommu->dev->bus->number,
+ PCI_DEVFN(0, 0));
+
iommu->cap_ptr = h->cap_ptr;
iommu->pci_seg = h->pci_seg;
iommu->mmio_phys = h->mmio_phys;
@@ -1323,20 +1326,16 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
{
int i, j;
u32 ioc_feature_control;
- struct pci_dev *pdev = NULL;
+ struct pci_dev *pdev = iommu->root_pdev;
/* RD890 BIOSes may not have completely reconfigured the iommu */
- if (!is_rd890_iommu(iommu->dev))
+ if (!is_rd890_iommu(iommu->dev) || !pdev)
return;
/*
* First, we need to ensure that the iommu is enabled. This is
* controlled by a register in the northbridge
*/
- pdev = pci_get_bus_and_slot(iommu->dev->bus->number, PCI_DEVFN(0, 0));
-
- if (!pdev)
- return;
/* Select Northbridge indirect register 0x75 and enable writing */
pci_write_config_dword(pdev, 0x60, 0x75 | (1 << 7));
@@ -1346,8 +1345,6 @@ static void iommu_apply_resume_quirks(struct amd_iommu *iommu)
if (!(ioc_feature_control & 0x1))
pci_write_config_dword(pdev, 0x64, ioc_feature_control | 1);
- pci_dev_put(pdev);
-
/* Restore the iommu BAR */
pci_write_config_dword(iommu->dev, iommu->cap_ptr + 4,
iommu->stored_addr_lo);
@@ -1644,6 +1641,8 @@ static int __init amd_iommu_init(void)
amd_iommu_init_api();
+ x86_platform.iommu_shutdown = disable_iommus;
+
if (iommu_pass_through)
goto out;
@@ -1652,8 +1651,6 @@ static int __init amd_iommu_init(void)
else
printk(KERN_INFO "AMD-Vi: Lazy IO/TLB flushing enabled\n");
- x86_platform.iommu_shutdown = disable_iommus;
-
out:
return ret;
diff --git a/drivers/iommu/amd_iommu_types.h b/drivers/iommu/amd_iommu_types.h
index 2452f3b71736..c1b1d489817e 100644
--- a/drivers/iommu/amd_iommu_types.h
+++ b/drivers/iommu/amd_iommu_types.h
@@ -481,6 +481,9 @@ struct amd_iommu {
/* Pointer to PCI device of this IOMMU */
struct pci_dev *dev;
+ /* Cache pdev to root device for resume quirks */
+ struct pci_dev *root_pdev;
+
/* physical address of MMIO space */
u64 mmio_phys;
/* virtual address of MMIO space */
@@ -649,7 +652,7 @@ extern unsigned long *amd_iommu_pd_alloc_bitmap;
* If true, the addresses will be flushed on unmap time, not when
* they are reused
*/
-extern bool amd_iommu_unmap_flush;
+extern u32 amd_iommu_unmap_flush;
/* Smallest number of PASIDs supported by any IOMMU in the system */
extern u32 amd_iommu_max_pasids;
diff --git a/drivers/iommu/dmar.c b/drivers/iommu/dmar.c
index 3a74e4410fc0..86e2f4a62b9a 100644
--- a/drivers/iommu/dmar.c
+++ b/drivers/iommu/dmar.c
@@ -26,6 +26,8 @@
* These routines are used by both DMA-remapping and Interrupt-remapping
*/
+#define pr_fmt(fmt) KBUILD_MODNAME ": " fmt /* has to precede printk.h */
+
#include <linux/pci.h>
#include <linux/dmar.h>
#include <linux/iova.h>
@@ -39,8 +41,6 @@
#include <asm/irq_remapping.h>
#include <asm/iommu_table.h>
-#define PREFIX "DMAR: "
-
/* No locks are needed as DMA remapping hardware unit
* list is constructed at boot time and hotplug of
* these units are not supported by the architecture.
@@ -83,16 +83,12 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
* ignore it
*/
if (!bus) {
- printk(KERN_WARNING
- PREFIX "Device scope bus [%d] not found\n",
- scope->bus);
+ pr_warn("Device scope bus [%d] not found\n", scope->bus);
break;
}
pdev = pci_get_slot(bus, PCI_DEVFN(path->dev, path->fn));
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, bus->number, path->dev, path->fn);
+ /* warning will be printed below */
break;
}
path ++;
@@ -100,9 +96,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
bus = pdev->subordinate;
}
if (!pdev) {
- printk(KERN_WARNING PREFIX
- "Device scope device [%04x:%02x:%02x.%02x] not found\n",
- segment, scope->bus, path->dev, path->fn);
+ pr_warn("Device scope device [%04x:%02x:%02x.%02x] not found\n",
+ segment, scope->bus, path->dev, path->fn);
*dev = NULL;
return 0;
}
@@ -110,9 +105,8 @@ static int __init dmar_parse_one_dev_scope(struct acpi_dmar_device_scope *scope,
pdev->subordinate) || (scope->entry_type == \
ACPI_DMAR_SCOPE_TYPE_BRIDGE && !pdev->subordinate)) {
pci_dev_put(pdev);
- printk(KERN_WARNING PREFIX
- "Device scope type does not match for %s\n",
- pci_name(pdev));
+ pr_warn("Device scope type does not match for %s\n",
+ pci_name(pdev));
return -EINVAL;
}
*dev = pdev;
@@ -134,8 +128,7 @@ int __init dmar_parse_dev_scope(void *start, void *end, int *cnt,
scope->entry_type == ACPI_DMAR_SCOPE_TYPE_BRIDGE)
(*cnt)++;
else if (scope->entry_type != ACPI_DMAR_SCOPE_TYPE_IOAPIC) {
- printk(KERN_WARNING PREFIX
- "Unsupported device scope\n");
+ pr_warn("Unsupported device scope\n");
}
start += scope->length;
}
@@ -261,25 +254,23 @@ dmar_table_print_dmar_entry(struct acpi_dmar_header *header)
case ACPI_DMAR_TYPE_HARDWARE_UNIT:
drhd = container_of(header, struct acpi_dmar_hardware_unit,
header);
- printk (KERN_INFO PREFIX
- "DRHD base: %#016Lx flags: %#x\n",
+ pr_info("DRHD base: %#016Lx flags: %#x\n",
(unsigned long long)drhd->address, drhd->flags);
break;
case ACPI_DMAR_TYPE_RESERVED_MEMORY:
rmrr = container_of(header, struct acpi_dmar_reserved_memory,
header);
- printk (KERN_INFO PREFIX
- "RMRR base: %#016Lx end: %#016Lx\n",
+ pr_info("RMRR base: %#016Lx end: %#016Lx\n",
(unsigned long long)rmrr->base_address,
(unsigned long long)rmrr->end_address);
break;
case ACPI_DMAR_TYPE_ATSR:
atsr = container_of(header, struct acpi_dmar_atsr, header);
- printk(KERN_INFO PREFIX "ATSR flags: %#x\n", atsr->flags);
+ pr_info("ATSR flags: %#x\n", atsr->flags);
break;
case ACPI_DMAR_HARDWARE_AFFINITY:
rhsa = container_of(header, struct acpi_dmar_rhsa, header);
- printk(KERN_INFO PREFIX "RHSA base: %#016Lx proximity domain: %#x\n",
+ pr_info("RHSA base: %#016Lx proximity domain: %#x\n",
(unsigned long long)rhsa->base_address,
rhsa->proximity_domain);
break;
@@ -299,7 +290,7 @@ static int __init dmar_table_detect(void)
&dmar_tbl_size);
if (ACPI_SUCCESS(status) && !dmar_tbl) {
- printk (KERN_WARNING PREFIX "Unable to map DMAR\n");
+ pr_warn("Unable to map DMAR\n");
status = AE_NOT_FOUND;
}
@@ -333,20 +324,18 @@ parse_dmar_table(void)
return -ENODEV;
if (dmar->width < PAGE_SHIFT - 1) {
- printk(KERN_WARNING PREFIX "Invalid DMAR haw\n");
+ pr_warn("Invalid DMAR haw\n");
return -EINVAL;
}
- printk (KERN_INFO PREFIX "Host address width %d\n",
- dmar->width + 1);
+ pr_info("Host address width %d\n", dmar->width + 1);
entry_header = (struct acpi_dmar_header *)(dmar + 1);
while (((unsigned long)entry_header) <
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
ret = -EINVAL;
break;
}
@@ -369,8 +358,7 @@ parse_dmar_table(void)
#endif
break;
default:
- printk(KERN_WARNING PREFIX
- "Unknown DMAR structure type %d\n",
+ pr_warn("Unknown DMAR structure type %d\n",
entry_header->type);
ret = 0; /* for forward compatibility */
break;
@@ -469,12 +457,12 @@ int __init dmar_table_init(void)
ret = parse_dmar_table();
if (ret) {
if (ret != -ENODEV)
- printk(KERN_INFO PREFIX "parse DMAR table failure.\n");
+ pr_info("parse DMAR table failure.\n");
return ret;
}
if (list_empty(&dmar_drhd_units)) {
- printk(KERN_INFO PREFIX "No DMAR devices found\n");
+ pr_info("No DMAR devices found\n");
return -ENODEV;
}
@@ -506,8 +494,7 @@ int __init check_zero_address(void)
(((unsigned long)dmar) + dmar_tbl->length)) {
/* Avoid looping forever on bad ACPI tables */
if (entry_header->length == 0) {
- printk(KERN_WARNING PREFIX
- "Invalid 0-length structure\n");
+ pr_warn("Invalid 0-length structure\n");
return 0;
}
@@ -558,8 +545,7 @@ int __init detect_intel_iommu(void)
if (ret && irq_remapping_enabled && cpu_has_x2apic &&
dmar->flags & 0x1)
- printk(KERN_INFO
- "Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
+ pr_info("Queued invalidation will be enabled to support x2apic and Intr-remapping.\n");
if (ret && !no_iommu && !iommu_detected && !dmar_disabled) {
iommu_detected = 1;
@@ -579,14 +565,89 @@ int __init detect_intel_iommu(void)
}
+static void unmap_iommu(struct intel_iommu *iommu)
+{
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+}
+
+/**
+ * map_iommu: map the iommu's registers
+ * @iommu: the iommu to map
+ * @phys_addr: the physical address of the base resgister
+ *
+ * Memory map the iommu's registers. Start w/ a single page, and
+ * possibly expand if that turns out to be insufficent.
+ */
+static int map_iommu(struct intel_iommu *iommu, u64 phys_addr)
+{
+ int map_size, err=0;
+
+ iommu->reg_phys = phys_addr;
+ iommu->reg_size = VTD_PAGE_SIZE;
+
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size, iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+
+ iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
+ iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
+
+ if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
+ err = -EINVAL;
+ warn_invalid_dmar(phys_addr, " returns all ones");
+ goto unmap;
+ }
+
+ /* the registers might be more than one page */
+ map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
+ cap_max_fault_reg_offset(iommu->cap));
+ map_size = VTD_PAGE_ALIGN(map_size);
+ if (map_size > iommu->reg_size) {
+ iounmap(iommu->reg);
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+ iommu->reg_size = map_size;
+ if (!request_mem_region(iommu->reg_phys, iommu->reg_size,
+ iommu->name)) {
+ pr_err("IOMMU: can't reserve memory\n");
+ err = -EBUSY;
+ goto out;
+ }
+ iommu->reg = ioremap(iommu->reg_phys, iommu->reg_size);
+ if (!iommu->reg) {
+ pr_err("IOMMU: can't map the region\n");
+ err = -ENOMEM;
+ goto release;
+ }
+ }
+ err = 0;
+ goto out;
+
+unmap:
+ iounmap(iommu->reg);
+release:
+ release_mem_region(iommu->reg_phys, iommu->reg_size);
+out:
+ return err;
+}
+
int alloc_iommu(struct dmar_drhd_unit *drhd)
{
struct intel_iommu *iommu;
- int map_size;
u32 ver;
static int iommu_allocated = 0;
int agaw = 0;
int msagaw = 0;
+ int err;
if (!drhd->reg_base_addr) {
warn_invalid_dmar(0, "");
@@ -600,30 +661,22 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->seq_id = iommu_allocated++;
sprintf (iommu->name, "dmar%d", iommu->seq_id);
- iommu->reg = ioremap(drhd->reg_base_addr, VTD_PAGE_SIZE);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
+ err = map_iommu(iommu, drhd->reg_base_addr);
+ if (err) {
+ pr_err("IOMMU: failed to map %s\n", iommu->name);
goto error;
}
- iommu->cap = dmar_readq(iommu->reg + DMAR_CAP_REG);
- iommu->ecap = dmar_readq(iommu->reg + DMAR_ECAP_REG);
-
- if (iommu->cap == (uint64_t)-1 && iommu->ecap == (uint64_t)-1) {
- warn_invalid_dmar(drhd->reg_base_addr, " returns all ones");
- goto err_unmap;
- }
+ err = -EINVAL;
agaw = iommu_calculate_agaw(iommu);
if (agaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid agaw for iommu (seq_id = %d)\n",
- iommu->seq_id);
+ pr_err("Cannot get a valid agaw for iommu (seq_id = %d)\n",
+ iommu->seq_id);
goto err_unmap;
}
msagaw = iommu_calculate_max_sagaw(iommu);
if (msagaw < 0) {
- printk(KERN_ERR
- "Cannot get a valid max agaw for iommu (seq_id = %d)\n",
+ pr_err("Cannot get a valid max agaw for iommu (seq_id = %d)\n",
iommu->seq_id);
goto err_unmap;
}
@@ -632,19 +685,6 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
iommu->node = -1;
- /* the registers might be more than one page */
- map_size = max_t(int, ecap_max_iotlb_offset(iommu->ecap),
- cap_max_fault_reg_offset(iommu->cap));
- map_size = VTD_PAGE_ALIGN(map_size);
- if (map_size > VTD_PAGE_SIZE) {
- iounmap(iommu->reg);
- iommu->reg = ioremap(drhd->reg_base_addr, map_size);
- if (!iommu->reg) {
- printk(KERN_ERR "IOMMU: can't map the region\n");
- goto error;
- }
- }
-
ver = readl(iommu->reg + DMAR_VER_REG);
pr_info("IOMMU %d: reg_base_addr %llx ver %d:%d cap %llx ecap %llx\n",
iommu->seq_id,
@@ -659,10 +699,10 @@ int alloc_iommu(struct dmar_drhd_unit *drhd)
return 0;
err_unmap:
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
error:
kfree(iommu);
- return -1;
+ return err;
}
void free_iommu(struct intel_iommu *iommu)
@@ -673,7 +713,8 @@ void free_iommu(struct intel_iommu *iommu)
free_dmar_iommu(iommu);
if (iommu->reg)
- iounmap(iommu->reg);
+ unmap_iommu(iommu);
+
kfree(iommu);
}
@@ -710,7 +751,7 @@ static int qi_check_fault(struct intel_iommu *iommu, int index)
if (fault & DMA_FSTS_IQE) {
head = readl(iommu->reg + DMAR_IQH_REG);
if ((head >> DMAR_IQ_SHIFT) == index) {
- printk(KERN_ERR "VT-d detected invalid descriptor: "
+ pr_err("VT-d detected invalid descriptor: "
"low=%llx, high=%llx\n",
(unsigned long long)qi->desc[index].low,
(unsigned long long)qi->desc[index].high);
@@ -1129,15 +1170,14 @@ static int dmar_fault_do_one(struct intel_iommu *iommu, int type,
reason = dmar_get_fault_reason(fault_reason, &fault_type);
if (fault_type == INTR_REMAP)
- printk(KERN_ERR "INTR-REMAP: Request device [[%02x:%02x.%d] "
+ pr_err("INTR-REMAP: Request device [[%02x:%02x.%d] "
"fault index %llx\n"
"INTR-REMAP:[fault reason %02d] %s\n",
(source_id >> 8), PCI_SLOT(source_id & 0xFF),
PCI_FUNC(source_id & 0xFF), addr >> 48,
fault_reason, reason);
else
- printk(KERN_ERR
- "DMAR:[%s] Request device [%02x:%02x.%d] "
+ pr_err("DMAR:[%s] Request device [%02x:%02x.%d] "
"fault addr %llx \n"
"DMAR:[fault reason %02d] %s\n",
(type ? "DMA Read" : "DMA Write"),
@@ -1157,8 +1197,7 @@ irqreturn_t dmar_fault(int irq, void *dev_id)
raw_spin_lock_irqsave(&iommu->register_lock, flag);
fault_status = readl(iommu->reg + DMAR_FSTS_REG);
if (fault_status)
- printk(KERN_ERR "DRHD: handling fault status reg %x\n",
- fault_status);
+ pr_err("DRHD: handling fault status reg %x\n", fault_status);
/* TBD: ignore advanced fault log currently */
if (!(fault_status & DMA_FSTS_PPF))
@@ -1224,7 +1263,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
irq = create_irq();
if (!irq) {
- printk(KERN_ERR "IOMMU: no free vectors\n");
+ pr_err("IOMMU: no free vectors\n");
return -EINVAL;
}
@@ -1241,7 +1280,7 @@ int dmar_set_interrupt(struct intel_iommu *iommu)
ret = request_irq(irq, dmar_fault, IRQF_NO_THREAD, iommu->name, iommu);
if (ret)
- printk(KERN_ERR "IOMMU: can't request irq\n");
+ pr_err("IOMMU: can't request irq\n");
return ret;
}
@@ -1258,8 +1297,7 @@ int __init enable_drhd_fault_handling(void)
ret = dmar_set_interrupt(iommu);
if (ret) {
- printk(KERN_ERR "DRHD %Lx: failed to enable fault, "
- " interrupt, ret %d\n",
+ pr_err("DRHD %Lx: failed to enable fault, interrupt, ret %d\n",
(unsigned long long)drhd->reg_base_addr, ret);
return -1;
}
diff --git a/drivers/iommu/intel_irq_remapping.c b/drivers/iommu/intel_irq_remapping.c
index 6d347064b8b0..e0b18f3ae9a8 100644
--- a/drivers/iommu/intel_irq_remapping.c
+++ b/drivers/iommu/intel_irq_remapping.c
@@ -902,7 +902,6 @@ static int intel_setup_ioapic_entry(int irq,
return 0;
}
-#ifdef CONFIG_SMP
/*
* Migrate the IO-APIC irq in the presence of intr-remapping.
*
@@ -924,6 +923,10 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
struct irq_cfg *cfg = data->chip_data;
unsigned int dest, irq = data->irq;
struct irte irte;
+ int err;
+
+ if (!config_enabled(CONFIG_SMP))
+ return -EINVAL;
if (!cpumask_intersects(mask, cpu_online_mask))
return -EINVAL;
@@ -931,10 +934,16 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
if (get_irte(irq, &irte))
return -EBUSY;
- if (assign_irq_vector(irq, cfg, mask))
- return -EBUSY;
+ err = assign_irq_vector(irq, cfg, mask);
+ if (err)
+ return err;
- dest = apic->cpu_mask_to_apicid_and(cfg->domain, mask);
+ err = apic->cpu_mask_to_apicid_and(cfg->domain, mask, &dest);
+ if (err) {
+ if (assign_irq_vector(irq, cfg, data->affinity))
+ pr_err("Failed to recover vector for irq %d\n", irq);
+ return err;
+ }
irte.vector = cfg->vector;
irte.dest_id = IRTE_DEST(dest);
@@ -956,7 +965,6 @@ intel_ioapic_set_affinity(struct irq_data *data, const struct cpumask *mask,
cpumask_copy(data->affinity, mask);
return 0;
}
-#endif
static void intel_compose_msi_msg(struct pci_dev *pdev,
unsigned int irq, unsigned int dest,
@@ -1058,9 +1066,7 @@ struct irq_remap_ops intel_irq_remap_ops = {
.reenable = reenable_irq_remapping,
.enable_faulting = enable_drhd_fault_handling,
.setup_ioapic_entry = intel_setup_ioapic_entry,
-#ifdef CONFIG_SMP
.set_affinity = intel_ioapic_set_affinity,
-#endif
.free_irq = free_irte,
.compose_msi_msg = intel_compose_msi_msg,
.msi_alloc_irq = intel_msi_alloc_irq,
diff --git a/drivers/iommu/iommu.c b/drivers/iommu/iommu.c
index 2198b2dbbcd3..8b9ded88e6f5 100644
--- a/drivers/iommu/iommu.c
+++ b/drivers/iommu/iommu.c
@@ -119,6 +119,7 @@ EXPORT_SYMBOL_GPL(iommu_present);
* iommu_set_fault_handler() - set a fault handler for an iommu domain
* @domain: iommu domain
* @handler: fault handler
+ * @token: user data, will be passed back to the fault handler
*
* This function should be used by IOMMU users which want to be notified
* whenever an IOMMU fault happens.
@@ -127,11 +128,13 @@ EXPORT_SYMBOL_GPL(iommu_present);
* error code otherwise.
*/
void iommu_set_fault_handler(struct iommu_domain *domain,
- iommu_fault_handler_t handler)
+ iommu_fault_handler_t handler,
+ void *token)
{
BUG_ON(!domain);
domain->handler = handler;
+ domain->handler_token = token;
}
EXPORT_SYMBOL_GPL(iommu_set_fault_handler);
diff --git a/drivers/iommu/irq_remapping.c b/drivers/iommu/irq_remapping.c
index 40cda8e98d87..1d29b1c66e72 100644
--- a/drivers/iommu/irq_remapping.c
+++ b/drivers/iommu/irq_remapping.c
@@ -111,16 +111,15 @@ int setup_ioapic_remapped_entry(int irq,
vector, attr);
}
-#ifdef CONFIG_SMP
int set_remapped_irq_affinity(struct irq_data *data, const struct cpumask *mask,
bool force)
{
- if (!remap_ops || !remap_ops->set_affinity)
+ if (!config_enabled(CONFIG_SMP) || !remap_ops ||
+ !remap_ops->set_affinity)
return 0;
return remap_ops->set_affinity(data, mask, force);
}
-#endif
void free_remapped_irq(int irq)
{
diff --git a/drivers/iommu/irq_remapping.h b/drivers/iommu/irq_remapping.h
index be9d72950c51..b12974cc1dfe 100644
--- a/drivers/iommu/irq_remapping.h
+++ b/drivers/iommu/irq_remapping.h
@@ -59,11 +59,9 @@ struct irq_remap_ops {
unsigned int, int,
struct io_apic_irq_attr *);
-#ifdef CONFIG_SMP
/* Set the CPU affinity of a remapped interrupt */
int (*set_affinity)(struct irq_data *data, const struct cpumask *mask,
bool force);
-#endif
/* Free an IRQ */
int (*free_irq)(int);
diff --git a/drivers/iommu/omap-iommu.c b/drivers/iommu/omap-iommu.c
index 6899dcd02dfa..e70ee2b59df9 100644
--- a/drivers/iommu/omap-iommu.c
+++ b/drivers/iommu/omap-iommu.c
@@ -41,11 +41,13 @@
* @pgtable: the page table
* @iommu_dev: an omap iommu device attached to this domain. only a single
* iommu device can be attached for now.
+ * @dev: Device using this domain.
* @lock: domain lock, should be taken when attaching/detaching
*/
struct omap_iommu_domain {
u32 *pgtable;
struct omap_iommu *iommu_dev;
+ struct device *dev;
spinlock_t lock;
};
@@ -1081,6 +1083,7 @@ omap_iommu_attach_dev(struct iommu_domain *domain, struct device *dev)
}
omap_domain->iommu_dev = arch_data->iommu_dev = oiommu;
+ omap_domain->dev = dev;
oiommu->domain = domain;
out:
@@ -1088,19 +1091,16 @@ out:
return ret;
}
-static void omap_iommu_detach_dev(struct iommu_domain *domain,
- struct device *dev)
+static void _omap_iommu_detach_dev(struct omap_iommu_domain *omap_domain,
+ struct device *dev)
{
- struct omap_iommu_domain *omap_domain = domain->priv;
- struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
struct omap_iommu *oiommu = dev_to_omap_iommu(dev);
-
- spin_lock(&omap_domain->lock);
+ struct omap_iommu_arch_data *arch_data = dev->archdata.iommu;
/* only a single device is supported per domain for now */
if (omap_domain->iommu_dev != oiommu) {
dev_err(dev, "invalid iommu device\n");
- goto out;
+ return;
}
iopgtable_clear_entry_all(oiommu);
@@ -1108,8 +1108,16 @@ static void omap_iommu_detach_dev(struct iommu_domain *domain,
omap_iommu_detach(oiommu);
omap_domain->iommu_dev = arch_data->iommu_dev = NULL;
+ omap_domain->dev = NULL;
+}
-out:
+static void omap_iommu_detach_dev(struct iommu_domain *domain,
+ struct device *dev)
+{
+ struct omap_iommu_domain *omap_domain = domain->priv;
+
+ spin_lock(&omap_domain->lock);
+ _omap_iommu_detach_dev(omap_domain, dev);
spin_unlock(&omap_domain->lock);
}
@@ -1148,13 +1156,19 @@ out:
return -ENOMEM;
}
-/* assume device was already detached */
static void omap_iommu_domain_destroy(struct iommu_domain *domain)
{
struct omap_iommu_domain *omap_domain = domain->priv;
domain->priv = NULL;
+ /*
+ * An iommu device is still attached
+ * (currently, only one device can be attached) ?
+ */
+ if (omap_domain->iommu_dev)
+ _omap_iommu_detach_dev(omap_domain, omap_domain->dev);
+
kfree(omap_domain->pgtable);
kfree(omap_domain);
}
diff --git a/drivers/iommu/tegra-gart.c b/drivers/iommu/tegra-gart.c
index 779306ee7b16..0c0a37792218 100644
--- a/drivers/iommu/tegra-gart.c
+++ b/drivers/iommu/tegra-gart.c
@@ -29,15 +29,17 @@
#include <linux/device.h>
#include <linux/io.h>
#include <linux/iommu.h>
+#include <linux/of.h>
#include <asm/cacheflush.h>
/* bitmap of the page sizes currently supported */
#define GART_IOMMU_PGSIZES (SZ_4K)
-#define GART_CONFIG 0x24
-#define GART_ENTRY_ADDR 0x28
-#define GART_ENTRY_DATA 0x2c
+#define GART_REG_BASE 0x24
+#define GART_CONFIG (0x24 - GART_REG_BASE)
+#define GART_ENTRY_ADDR (0x28 - GART_REG_BASE)
+#define GART_ENTRY_DATA (0x2c - GART_REG_BASE)
#define GART_ENTRY_PHYS_ADDR_VALID (1 << 31)
#define GART_PAGE_SHIFT 12
@@ -158,7 +160,7 @@ static int gart_iommu_attach_dev(struct iommu_domain *domain,
struct gart_client *client, *c;
int err = 0;
- gart = dev_get_drvdata(dev->parent);
+ gart = gart_handle;
if (!gart)
return -EINVAL;
domain->priv = gart;
@@ -422,6 +424,14 @@ const struct dev_pm_ops tegra_gart_pm_ops = {
.resume = tegra_gart_resume,
};
+#ifdef CONFIG_OF
+static struct of_device_id tegra_gart_of_match[] __devinitdata = {
+ { .compatible = "nvidia,tegra20-gart", },
+ { },
+};
+MODULE_DEVICE_TABLE(of, tegra_gart_of_match);
+#endif
+
static struct platform_driver tegra_gart_driver = {
.probe = tegra_gart_probe,
.remove = tegra_gart_remove,
@@ -429,6 +439,7 @@ static struct platform_driver tegra_gart_driver = {
.owner = THIS_MODULE,
.name = "tegra-gart",
.pm = &tegra_gart_pm_ops,
+ .of_match_table = of_match_ptr(tegra_gart_of_match),
},
};
@@ -448,4 +459,5 @@ module_exit(tegra_gart_exit);
MODULE_DESCRIPTION("IOMMU API for GART in Tegra20");
MODULE_AUTHOR("Hiroshi DOYU <hdoyu@nvidia.com>");
+MODULE_ALIAS("platform:tegra-gart");
MODULE_LICENSE("GPL v2");
diff --git a/drivers/iommu/tegra-smmu.c b/drivers/iommu/tegra-smmu.c
index eb93c821f592..3f3d09d560ea 100644
--- a/drivers/iommu/tegra-smmu.c
+++ b/drivers/iommu/tegra-smmu.c
@@ -550,13 +550,13 @@ static int alloc_pdir(struct smmu_as *as)
return 0;
as->pte_count = devm_kzalloc(smmu->dev,
- sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_KERNEL);
+ sizeof(as->pte_count[0]) * SMMU_PDIR_COUNT, GFP_ATOMIC);
if (!as->pte_count) {
dev_err(smmu->dev,
"failed to allocate smmu_device PTE cunters\n");
return -ENOMEM;
}
- as->pdir_page = alloc_page(GFP_KERNEL | __GFP_DMA);
+ as->pdir_page = alloc_page(GFP_ATOMIC | __GFP_DMA);
if (!as->pdir_page) {
dev_err(smmu->dev,
"failed to allocate smmu_device page directory\n");
@@ -733,7 +733,7 @@ static int smmu_iommu_attach_dev(struct iommu_domain *domain,
pr_info("Reserve \"page zero\" for AVP vectors using a common dummy\n");
}
- dev_dbg(smmu->dev, "%s is attached\n", dev_name(c->dev));
+ dev_dbg(smmu->dev, "%s is attached\n", dev_name(dev));
return 0;
err_client: