summaryrefslogtreecommitdiff
path: root/drivers/char
diff options
context:
space:
mode:
authorArd Biesheuvel <ardb@kernel.org>2022-10-20 15:54:33 +0200
committerArd Biesheuvel <ardb@kernel.org>2023-09-11 08:13:17 +0000
commitcf8e8658100d4eae80ce9b21f7a81cb024dd5057 (patch)
tree31d3b640bebf97c33d354768fc44dfd532c2df81 /drivers/char
parenta0334bf78b95532cec54f56b53e8ae1bfe7e1ca1 (diff)
arch: Remove Itanium (IA-64) architecture
The Itanium architecture is obsolete, and an informal survey [0] reveals that any residual use of Itanium hardware in production is mostly HP-UX or OpenVMS based. The use of Linux on Itanium appears to be limited to enthusiasts that occasionally boot a fresh Linux kernel to see whether things are still working as intended, and perhaps to churn out some distro packages that are rarely used in practice. None of the original companies behind Itanium still produce or support any hardware or software for the architecture, and it is listed as 'Orphaned' in the MAINTAINERS file, as apparently, none of the engineers that contributed on behalf of those companies (nor anyone else, for that matter) have been willing to support or maintain the architecture upstream or even be responsible for applying the odd fix. The Intel firmware team removed all IA-64 support from the Tianocore/EDK2 reference implementation of EFI in 2018. (Itanium is the original architecture for which EFI was developed, and the way Linux supports it deviates significantly from other architectures.) Some distros, such as Debian and Gentoo, still maintain [unofficial] ia64 ports, but many have dropped support years ago. While the argument is being made [1] that there is a 'for the common good' angle to being able to build and run existing projects such as the Grid Community Toolkit [2] on Itanium for interoperability testing, the fact remains that none of those projects are known to be deployed on Linux/ia64, and very few people actually have access to such a system in the first place. Even if there were ways imaginable in which Linux/ia64 could be put to good use today, what matters is whether anyone is actually doing that, and this does not appear to be the case. There are no emulators widely available, and so boot testing Itanium is generally infeasible for ordinary contributors. GCC still supports IA-64 but its compile farm [3] no longer has any IA-64 machines. GLIBC would like to get rid of IA-64 [4] too because it would permit some overdue code cleanups. In summary, the benefits to the ecosystem of having IA-64 be part of it are mostly theoretical, whereas the maintenance overhead of keeping it supported is real. So let's rip off the band aid, and remove the IA-64 arch code entirely. This follows the timeline proposed by the Debian/ia64 maintainer [5], which removes support in a controlled manner, leaving IA-64 in a known good state in the most recent LTS release. Other projects will follow once the kernel support is removed. [0] https://lore.kernel.org/all/CAMj1kXFCMh_578jniKpUtx_j8ByHnt=s7S+yQ+vGbKt9ud7+kQ@mail.gmail.com/ [1] https://lore.kernel.org/all/0075883c-7c51-00f5-2c2d-5119c1820410@web.de/ [2] https://gridcf.org/gct-docs/latest/index.html [3] https://cfarm.tetaneutral.net/machines/list/ [4] https://lore.kernel.org/all/87bkiilpc4.fsf@mid.deneb.enyo.de/ [5] https://lore.kernel.org/all/ff58a3e76e5102c94bb5946d99187b358def688a.camel@physik.fu-berlin.de/ Acked-by: Tony Luck <tony.luck@intel.com> Signed-off-by: Ard Biesheuvel <ardb@kernel.org>
Diffstat (limited to 'drivers/char')
-rw-r--r--drivers/char/Kconfig4
-rw-r--r--drivers/char/Makefile1
-rw-r--r--drivers/char/agp/Kconfig16
-rw-r--r--drivers/char/agp/Makefile2
-rw-r--r--drivers/char/agp/hp-agp.c550
-rw-r--r--drivers/char/agp/i460-agp.c659
-rw-r--r--drivers/char/hpet.c30
-rw-r--r--drivers/char/hw_random/Kconfig2
-rw-r--r--drivers/char/mem.c12
-rw-r--r--drivers/char/mspec.c295
10 files changed, 4 insertions, 1567 deletions
diff --git a/drivers/char/Kconfig b/drivers/char/Kconfig
index 625af75833fc..7c8dd0abcfdf 100644
--- a/drivers/char/Kconfig
+++ b/drivers/char/Kconfig
@@ -348,7 +348,7 @@ config DEVPORT
device is similar to /dev/mem, but for I/O ports.
config HPET
- bool "HPET - High Precision Event Timer" if (X86 || IA64)
+ bool "HPET - High Precision Event Timer" if X86
default n
depends on ACPI
help
@@ -377,7 +377,7 @@ config HPET_MMAP_DEFAULT
config HANGCHECK_TIMER
tristate "Hangcheck timer"
- depends on X86 || IA64 || PPC64 || S390
+ depends on X86 || PPC64 || S390
help
The hangcheck-timer module detects when the system has gone
out to lunch past a certain margin. It can reboot the system
diff --git a/drivers/char/Makefile b/drivers/char/Makefile
index c5f532e412f1..e9b360cdc99a 100644
--- a/drivers/char/Makefile
+++ b/drivers/char/Makefile
@@ -8,7 +8,6 @@ obj-$(CONFIG_TTY_PRINTK) += ttyprintk.o
obj-y += misc.o
obj-$(CONFIG_ATARI_DSP56K) += dsp56k.o
obj-$(CONFIG_VIRTIO_CONSOLE) += virtio_console.o
-obj-$(CONFIG_MSPEC) += mspec.o
obj-$(CONFIG_UV_MMTIMER) += uv_mmtimer.o
obj-$(CONFIG_IBM_BSR) += bsr.o
diff --git a/drivers/char/agp/Kconfig b/drivers/char/agp/Kconfig
index 4f501e4842ab..c47eb7bf06d4 100644
--- a/drivers/char/agp/Kconfig
+++ b/drivers/char/agp/Kconfig
@@ -1,7 +1,7 @@
# SPDX-License-Identifier: GPL-2.0
menuconfig AGP
tristate "/dev/agpgart (AGP Support)"
- depends on ALPHA || IA64 || PARISC || PPC || X86
+ depends on ALPHA || PARISC || PPC || X86
depends on PCI
help
AGP (Accelerated Graphics Port) is a bus system mainly used to
@@ -109,20 +109,6 @@ config AGP_VIA
This option gives you AGP support for the GLX component of
X on VIA MVP3/Apollo Pro chipsets.
-config AGP_I460
- tristate "Intel 460GX chipset support"
- depends on AGP && IA64
- help
- This option gives you AGP GART support for the Intel 460GX chipset
- for IA64 processors.
-
-config AGP_HP_ZX1
- tristate "HP ZX1 chipset AGP support"
- depends on AGP && IA64
- help
- This option gives you AGP GART support for the HP ZX1 chipset
- for IA64 processors.
-
config AGP_PARISC
tristate "HP Quicksilver AGP support"
depends on AGP && PARISC && 64BIT && IOMMU_SBA
diff --git a/drivers/char/agp/Makefile b/drivers/char/agp/Makefile
index 90ed8c789e48..25834557e486 100644
--- a/drivers/char/agp/Makefile
+++ b/drivers/char/agp/Makefile
@@ -14,9 +14,7 @@ obj-$(CONFIG_AGP_AMD) += amd-k7-agp.o
obj-$(CONFIG_AGP_AMD64) += amd64-agp.o
obj-$(CONFIG_AGP_ALPHA_CORE) += alpha-agp.o
obj-$(CONFIG_AGP_EFFICEON) += efficeon-agp.o
-obj-$(CONFIG_AGP_HP_ZX1) += hp-agp.o
obj-$(CONFIG_AGP_PARISC) += parisc-agp.o
-obj-$(CONFIG_AGP_I460) += i460-agp.o
obj-$(CONFIG_AGP_INTEL) += intel-agp.o
obj-$(CONFIG_INTEL_GTT) += intel-gtt.o
obj-$(CONFIG_AGP_NVIDIA) += nvidia-agp.o
diff --git a/drivers/char/agp/hp-agp.c b/drivers/char/agp/hp-agp.c
deleted file mode 100644
index 84d9adbb62f6..000000000000
--- a/drivers/char/agp/hp-agp.c
+++ /dev/null
@@ -1,550 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * HP zx1 AGPGART routines.
- *
- * (c) Copyright 2002, 2003 Hewlett-Packard Development Company, L.P.
- * Bjorn Helgaas <bjorn.helgaas@hp.com>
- */
-
-#include <linux/acpi.h>
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/agp_backend.h>
-#include <linux/log2.h>
-#include <linux/slab.h>
-
-#include <asm/acpi-ext.h>
-
-#include "agp.h"
-
-#define HP_ZX1_IOC_OFFSET 0x1000 /* ACPI reports SBA, we want IOC */
-
-/* HP ZX1 IOC registers */
-#define HP_ZX1_IBASE 0x300
-#define HP_ZX1_IMASK 0x308
-#define HP_ZX1_PCOM 0x310
-#define HP_ZX1_TCNFG 0x318
-#define HP_ZX1_PDIR_BASE 0x320
-
-#define HP_ZX1_IOVA_BASE GB(1UL)
-#define HP_ZX1_IOVA_SIZE GB(1UL)
-#define HP_ZX1_GART_SIZE (HP_ZX1_IOVA_SIZE / 2)
-#define HP_ZX1_SBA_IOMMU_COOKIE 0x0000badbadc0ffeeUL
-
-#define HP_ZX1_PDIR_VALID_BIT 0x8000000000000000UL
-#define HP_ZX1_IOVA_TO_PDIR(va) ((va - hp_private.iova_base) >> hp_private.io_tlb_shift)
-
-#define AGP8X_MODE_BIT 3
-#define AGP8X_MODE (1 << AGP8X_MODE_BIT)
-
-/* AGP bridge need not be PCI device, but DRM thinks it is. */
-static struct pci_dev fake_bridge_dev;
-
-static int hp_zx1_gart_found;
-
-static struct aper_size_info_fixed hp_zx1_sizes[] =
-{
- {0, 0, 0}, /* filled in by hp_zx1_fetch_size() */
-};
-
-static struct gatt_mask hp_zx1_masks[] =
-{
- {.mask = HP_ZX1_PDIR_VALID_BIT, .type = 0}
-};
-
-static struct _hp_private {
- volatile u8 __iomem *ioc_regs;
- volatile u8 __iomem *lba_regs;
- int lba_cap_offset;
- u64 *io_pdir; // PDIR for entire IOVA
- u64 *gatt; // PDIR just for GART (subset of above)
- u64 gatt_entries;
- u64 iova_base;
- u64 gart_base;
- u64 gart_size;
- u64 io_pdir_size;
- int io_pdir_owner; // do we own it, or share it with sba_iommu?
- int io_page_size;
- int io_tlb_shift;
- int io_tlb_ps; // IOC ps config
- int io_pages_per_kpage;
-} hp_private;
-
-static int __init hp_zx1_ioc_shared(void)
-{
- struct _hp_private *hp = &hp_private;
-
- printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR shared with sba_iommu\n");
-
- /*
- * IOC already configured by sba_iommu module; just use
- * its setup. We assume:
- * - IOVA space is 1Gb in size
- * - first 512Mb is IOMMU, second 512Mb is GART
- */
- hp->io_tlb_ps = readq(hp->ioc_regs+HP_ZX1_TCNFG);
- switch (hp->io_tlb_ps) {
- case 0: hp->io_tlb_shift = 12; break;
- case 1: hp->io_tlb_shift = 13; break;
- case 2: hp->io_tlb_shift = 14; break;
- case 3: hp->io_tlb_shift = 16; break;
- default:
- printk(KERN_ERR PFX "Invalid IOTLB page size "
- "configuration 0x%x\n", hp->io_tlb_ps);
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- return -ENODEV;
- }
- hp->io_page_size = 1 << hp->io_tlb_shift;
- hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
- hp->iova_base = readq(hp->ioc_regs+HP_ZX1_IBASE) & ~0x1;
- hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - HP_ZX1_GART_SIZE;
-
- hp->gart_size = HP_ZX1_GART_SIZE;
- hp->gatt_entries = hp->gart_size / hp->io_page_size;
-
- hp->io_pdir = phys_to_virt(readq(hp->ioc_regs+HP_ZX1_PDIR_BASE));
- hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
-
- if (hp->gatt[0] != HP_ZX1_SBA_IOMMU_COOKIE) {
- /* Normal case when no AGP device in system */
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- printk(KERN_ERR PFX "No reserved IO PDIR entry found; "
- "GART disabled\n");
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int __init
-hp_zx1_ioc_owner (void)
-{
- struct _hp_private *hp = &hp_private;
-
- printk(KERN_INFO PFX "HP ZX1 IOC: IOPDIR dedicated to GART\n");
-
- /*
- * Select an IOV page size no larger than system page size.
- */
- if (PAGE_SIZE >= KB(64)) {
- hp->io_tlb_shift = 16;
- hp->io_tlb_ps = 3;
- } else if (PAGE_SIZE >= KB(16)) {
- hp->io_tlb_shift = 14;
- hp->io_tlb_ps = 2;
- } else if (PAGE_SIZE >= KB(8)) {
- hp->io_tlb_shift = 13;
- hp->io_tlb_ps = 1;
- } else {
- hp->io_tlb_shift = 12;
- hp->io_tlb_ps = 0;
- }
- hp->io_page_size = 1 << hp->io_tlb_shift;
- hp->io_pages_per_kpage = PAGE_SIZE / hp->io_page_size;
-
- hp->iova_base = HP_ZX1_IOVA_BASE;
- hp->gart_size = HP_ZX1_GART_SIZE;
- hp->gart_base = hp->iova_base + HP_ZX1_IOVA_SIZE - hp->gart_size;
-
- hp->gatt_entries = hp->gart_size / hp->io_page_size;
- hp->io_pdir_size = (HP_ZX1_IOVA_SIZE / hp->io_page_size) * sizeof(u64);
-
- return 0;
-}
-
-static int __init
-hp_zx1_ioc_init (u64 hpa)
-{
- struct _hp_private *hp = &hp_private;
-
- hp->ioc_regs = ioremap(hpa, 1024);
- if (!hp->ioc_regs)
- return -ENOMEM;
-
- /*
- * If the IOTLB is currently disabled, we can take it over.
- * Otherwise, we have to share with sba_iommu.
- */
- hp->io_pdir_owner = (readq(hp->ioc_regs+HP_ZX1_IBASE) & 0x1) == 0;
-
- if (hp->io_pdir_owner)
- return hp_zx1_ioc_owner();
-
- return hp_zx1_ioc_shared();
-}
-
-static int
-hp_zx1_lba_find_capability (volatile u8 __iomem *hpa, int cap)
-{
- u16 status;
- u8 pos, id;
- int ttl = 48;
-
- status = readw(hpa+PCI_STATUS);
- if (!(status & PCI_STATUS_CAP_LIST))
- return 0;
- pos = readb(hpa+PCI_CAPABILITY_LIST);
- while (ttl-- && pos >= 0x40) {
- pos &= ~3;
- id = readb(hpa+pos+PCI_CAP_LIST_ID);
- if (id == 0xff)
- break;
- if (id == cap)
- return pos;
- pos = readb(hpa+pos+PCI_CAP_LIST_NEXT);
- }
- return 0;
-}
-
-static int __init
-hp_zx1_lba_init (u64 hpa)
-{
- struct _hp_private *hp = &hp_private;
- int cap;
-
- hp->lba_regs = ioremap(hpa, 256);
- if (!hp->lba_regs)
- return -ENOMEM;
-
- hp->lba_cap_offset = hp_zx1_lba_find_capability(hp->lba_regs, PCI_CAP_ID_AGP);
-
- cap = readl(hp->lba_regs+hp->lba_cap_offset) & 0xff;
- if (cap != PCI_CAP_ID_AGP) {
- printk(KERN_ERR PFX "Invalid capability ID 0x%02x at 0x%x\n",
- cap, hp->lba_cap_offset);
- iounmap(hp->lba_regs);
- return -ENODEV;
- }
-
- return 0;
-}
-
-static int
-hp_zx1_fetch_size(void)
-{
- int size;
-
- size = hp_private.gart_size / MB(1);
- hp_zx1_sizes[0].size = size;
- agp_bridge->current_size = (void *) &hp_zx1_sizes[0];
- return size;
-}
-
-static int
-hp_zx1_configure (void)
-{
- struct _hp_private *hp = &hp_private;
-
- agp_bridge->gart_bus_addr = hp->gart_base;
- agp_bridge->capndx = hp->lba_cap_offset;
- agp_bridge->mode = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
-
- if (hp->io_pdir_owner) {
- writel(virt_to_phys(hp->io_pdir), hp->ioc_regs+HP_ZX1_PDIR_BASE);
- readl(hp->ioc_regs+HP_ZX1_PDIR_BASE);
- writel(hp->io_tlb_ps, hp->ioc_regs+HP_ZX1_TCNFG);
- readl(hp->ioc_regs+HP_ZX1_TCNFG);
- writel((unsigned int)(~(HP_ZX1_IOVA_SIZE-1)), hp->ioc_regs+HP_ZX1_IMASK);
- readl(hp->ioc_regs+HP_ZX1_IMASK);
- writel(hp->iova_base|1, hp->ioc_regs+HP_ZX1_IBASE);
- readl(hp->ioc_regs+HP_ZX1_IBASE);
- writel(hp->iova_base|ilog2(HP_ZX1_IOVA_SIZE), hp->ioc_regs+HP_ZX1_PCOM);
- readl(hp->ioc_regs+HP_ZX1_PCOM);
- }
-
- return 0;
-}
-
-static void
-hp_zx1_cleanup (void)
-{
- struct _hp_private *hp = &hp_private;
-
- if (hp->ioc_regs) {
- if (hp->io_pdir_owner) {
- writeq(0, hp->ioc_regs+HP_ZX1_IBASE);
- readq(hp->ioc_regs+HP_ZX1_IBASE);
- }
- iounmap(hp->ioc_regs);
- }
- if (hp->lba_regs)
- iounmap(hp->lba_regs);
-}
-
-static void
-hp_zx1_tlbflush (struct agp_memory *mem)
-{
- struct _hp_private *hp = &hp_private;
-
- writeq(hp->gart_base | ilog2(hp->gart_size), hp->ioc_regs+HP_ZX1_PCOM);
- readq(hp->ioc_regs+HP_ZX1_PCOM);
-}
-
-static int
-hp_zx1_create_gatt_table (struct agp_bridge_data *bridge)
-{
- struct _hp_private *hp = &hp_private;
- int i;
-
- if (hp->io_pdir_owner) {
- hp->io_pdir = (u64 *) __get_free_pages(GFP_KERNEL,
- get_order(hp->io_pdir_size));
- if (!hp->io_pdir) {
- printk(KERN_ERR PFX "Couldn't allocate contiguous "
- "memory for I/O PDIR\n");
- hp->gatt = NULL;
- hp->gatt_entries = 0;
- return -ENOMEM;
- }
- memset(hp->io_pdir, 0, hp->io_pdir_size);
-
- hp->gatt = &hp->io_pdir[HP_ZX1_IOVA_TO_PDIR(hp->gart_base)];
- }
-
- for (i = 0; i < hp->gatt_entries; i++) {
- hp->gatt[i] = (unsigned long) agp_bridge->scratch_page;
- }
-
- return 0;
-}
-
-static int
-hp_zx1_free_gatt_table (struct agp_bridge_data *bridge)
-{
- struct _hp_private *hp = &hp_private;
-
- if (hp->io_pdir_owner)
- free_pages((unsigned long) hp->io_pdir,
- get_order(hp->io_pdir_size));
- else
- hp->gatt[0] = HP_ZX1_SBA_IOMMU_COOKIE;
- return 0;
-}
-
-static int
-hp_zx1_insert_memory (struct agp_memory *mem, off_t pg_start, int type)
-{
- struct _hp_private *hp = &hp_private;
- int i, k;
- off_t j, io_pg_start;
- int io_pg_count;
-
- if (type != mem->type ||
- agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
- return -EINVAL;
- }
-
- io_pg_start = hp->io_pages_per_kpage * pg_start;
- io_pg_count = hp->io_pages_per_kpage * mem->page_count;
- if ((io_pg_start + io_pg_count) > hp->gatt_entries) {
- return -EINVAL;
- }
-
- j = io_pg_start;
- while (j < (io_pg_start + io_pg_count)) {
- if (hp->gatt[j]) {
- return -EBUSY;
- }
- j++;
- }
-
- if (!mem->is_flushed) {
- global_cache_flush();
- mem->is_flushed = true;
- }
-
- for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- unsigned long paddr;
-
- paddr = page_to_phys(mem->pages[i]);
- for (k = 0;
- k < hp->io_pages_per_kpage;
- k++, j++, paddr += hp->io_page_size) {
- hp->gatt[j] = HP_ZX1_PDIR_VALID_BIT | paddr;
- }
- }
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static int
-hp_zx1_remove_memory (struct agp_memory *mem, off_t pg_start, int type)
-{
- struct _hp_private *hp = &hp_private;
- int i, io_pg_start, io_pg_count;
-
- if (type != mem->type ||
- agp_bridge->driver->agp_type_to_mask_type(agp_bridge, type)) {
- return -EINVAL;
- }
-
- io_pg_start = hp->io_pages_per_kpage * pg_start;
- io_pg_count = hp->io_pages_per_kpage * mem->page_count;
- for (i = io_pg_start; i < io_pg_count + io_pg_start; i++) {
- hp->gatt[i] = agp_bridge->scratch_page;
- }
-
- agp_bridge->driver->tlb_flush(mem);
- return 0;
-}
-
-static unsigned long
-hp_zx1_mask_memory (struct agp_bridge_data *bridge, dma_addr_t addr, int type)
-{
- return HP_ZX1_PDIR_VALID_BIT | addr;
-}
-
-static void
-hp_zx1_enable (struct agp_bridge_data *bridge, u32 mode)
-{
- struct _hp_private *hp = &hp_private;
- u32 command;
-
- command = readl(hp->lba_regs+hp->lba_cap_offset+PCI_AGP_STATUS);
- command = agp_collect_device_status(bridge, mode, command);
- command |= 0x00000100;
-
- writel(command, hp->lba_regs+hp->lba_cap_offset+PCI_AGP_COMMAND);
-
- agp_device_command(command, (mode & AGP8X_MODE) != 0);
-}
-
-const struct agp_bridge_driver hp_zx1_driver = {
- .owner = THIS_MODULE,
- .size_type = FIXED_APER_SIZE,
- .configure = hp_zx1_configure,
- .fetch_size = hp_zx1_fetch_size,
- .cleanup = hp_zx1_cleanup,
- .tlb_flush = hp_zx1_tlbflush,
- .mask_memory = hp_zx1_mask_memory,
- .masks = hp_zx1_masks,
- .agp_enable = hp_zx1_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = hp_zx1_create_gatt_table,
- .free_gatt_table = hp_zx1_free_gatt_table,
- .insert_memory = hp_zx1_insert_memory,
- .remove_memory = hp_zx1_remove_memory,
- .alloc_by_type = agp_generic_alloc_by_type,
- .free_by_type = agp_generic_free_by_type,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .cant_use_aperture = true,
-};
-
-static int __init
-hp_zx1_setup (u64 ioc_hpa, u64 lba_hpa)
-{
- struct agp_bridge_data *bridge;
- int error = 0;
-
- error = hp_zx1_ioc_init(ioc_hpa);
- if (error)
- goto fail;
-
- error = hp_zx1_lba_init(lba_hpa);
- if (error)
- goto fail;
-
- bridge = agp_alloc_bridge();
- if (!bridge) {
- error = -ENOMEM;
- goto fail;
- }
- bridge->driver = &hp_zx1_driver;
-
- fake_bridge_dev.vendor = PCI_VENDOR_ID_HP;
- fake_bridge_dev.device = PCI_DEVICE_ID_HP_PCIX_LBA;
- bridge->dev = &fake_bridge_dev;
-
- error = agp_add_bridge(bridge);
- fail:
- if (error)
- hp_zx1_cleanup();
- return error;
-}
-
-static acpi_status __init
-zx1_gart_probe (acpi_handle obj, u32 depth, void *context, void **ret)
-{
- acpi_handle handle, parent;
- acpi_status status;
- struct acpi_device_info *info;
- u64 lba_hpa, sba_hpa, length;
- int match;
-
- status = hp_acpi_csr_space(obj, &lba_hpa, &length);
- if (ACPI_FAILURE(status))
- return AE_OK; /* keep looking for another bridge */
-
- /* Look for an enclosing IOC scope and find its CSR space */
- handle = obj;
- do {
- status = acpi_get_object_info(handle, &info);
- if (ACPI_SUCCESS(status) && (info->valid & ACPI_VALID_HID)) {
- /* TBD check _CID also */
- match = (strcmp(info->hardware_id.string, "HWP0001") == 0);
- kfree(info);
- if (match) {
- status = hp_acpi_csr_space(handle, &sba_hpa, &length);
- if (ACPI_SUCCESS(status))
- break;
- else {
- printk(KERN_ERR PFX "Detected HP ZX1 "
- "AGP LBA but no IOC.\n");
- return AE_OK;
- }
- }
- }
-
- status = acpi_get_parent(handle, &parent);
- handle = parent;
- } while (ACPI_SUCCESS(status));
-
- if (ACPI_FAILURE(status))
- return AE_OK; /* found no enclosing IOC */
-
- if (hp_zx1_setup(sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa))
- return AE_OK;
-
- printk(KERN_INFO PFX "Detected HP ZX1 %s AGP chipset "
- "(ioc=%llx, lba=%llx)\n", (char *)context,
- sba_hpa + HP_ZX1_IOC_OFFSET, lba_hpa);
-
- hp_zx1_gart_found = 1;
- return AE_CTRL_TERMINATE; /* we only support one bridge; quit looking */
-}
-
-static int __init
-agp_hp_init (void)
-{
- if (agp_off)
- return -EINVAL;
-
- acpi_get_devices("HWP0003", zx1_gart_probe, "HWP0003", NULL);
- if (hp_zx1_gart_found)
- return 0;
-
- acpi_get_devices("HWP0007", zx1_gart_probe, "HWP0007", NULL);
- if (hp_zx1_gart_found)
- return 0;
-
- return -ENODEV;
-}
-
-static void __exit
-agp_hp_cleanup (void)
-{
-}
-
-module_init(agp_hp_init);
-module_exit(agp_hp_cleanup);
-
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/agp/i460-agp.c b/drivers/char/agp/i460-agp.c
deleted file mode 100644
index 15b240ea4848..000000000000
--- a/drivers/char/agp/i460-agp.c
+++ /dev/null
@@ -1,659 +0,0 @@
-/*
- * For documentation on the i460 AGP interface, see Chapter 7 (AGP Subsystem) of
- * the "Intel 460GTX Chipset Software Developer's Manual":
- * http://www.intel.com/design/archives/itanium/downloads/248704.htm
- */
-/*
- * 460GX support by Chris Ahna <christopher.j.ahna@intel.com>
- * Clean up & simplification by David Mosberger-Tang <davidm@hpl.hp.com>
- */
-#include <linux/module.h>
-#include <linux/pci.h>
-#include <linux/init.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/agp_backend.h>
-#include <linux/log2.h>
-
-#include "agp.h"
-
-#define INTEL_I460_BAPBASE 0x98
-#define INTEL_I460_GXBCTL 0xa0
-#define INTEL_I460_AGPSIZ 0xa2
-#define INTEL_I460_ATTBASE 0xfe200000
-#define INTEL_I460_GATT_VALID (1UL << 24)
-#define INTEL_I460_GATT_COHERENT (1UL << 25)
-
-/*
- * The i460 can operate with large (4MB) pages, but there is no sane way to support this
- * within the current kernel/DRM environment, so we disable the relevant code for now.
- * See also comments in ia64_alloc_page()...
- */
-#define I460_LARGE_IO_PAGES 0
-
-#if I460_LARGE_IO_PAGES
-# define I460_IO_PAGE_SHIFT i460.io_page_shift
-#else
-# define I460_IO_PAGE_SHIFT 12
-#endif
-
-#define I460_IOPAGES_PER_KPAGE (PAGE_SIZE >> I460_IO_PAGE_SHIFT)
-#define I460_KPAGES_PER_IOPAGE (1 << (I460_IO_PAGE_SHIFT - PAGE_SHIFT))
-#define I460_SRAM_IO_DISABLE (1 << 4)
-#define I460_BAPBASE_ENABLE (1 << 3)
-#define I460_AGPSIZ_MASK 0x7
-#define I460_4M_PS (1 << 1)
-
-/* Control bits for Out-Of-GART coherency and Burst Write Combining */
-#define I460_GXBCTL_OOG (1UL << 0)
-#define I460_GXBCTL_BWC (1UL << 2)
-
-/*
- * gatt_table entries are 32-bits wide on the i460; the generic code ought to declare the
- * gatt_table and gatt_table_real pointers a "void *"...
- */
-#define RD_GATT(index) readl((u32 *) i460.gatt + (index))
-#define WR_GATT(index, val) writel((val), (u32 *) i460.gatt + (index))
-/*
- * The 460 spec says we have to read the last location written to make sure that all
- * writes have taken effect
- */
-#define WR_FLUSH_GATT(index) RD_GATT(index)
-
-static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- dma_addr_t addr, int type);
-
-static struct {
- void *gatt; /* ioremap'd GATT area */
-
- /* i460 supports multiple GART page sizes, so GART pageshift is dynamic: */
- u8 io_page_shift;
-
- /* BIOS configures chipset to one of 2 possible apbase values: */
- u8 dynamic_apbase;
-
- /* structure for tracking partial use of 4MB GART pages: */
- struct lp_desc {
- unsigned long *alloced_map; /* bitmap of kernel-pages in use */
- int refcount; /* number of kernel pages using the large page */
- u64 paddr; /* physical address of large page */
- struct page *page; /* page pointer */
- } *lp_desc;
-} i460;
-
-static const struct aper_size_info_8 i460_sizes[3] =
-{
- /*
- * The 32GB aperture is only available with a 4M GART page size. Due to the
- * dynamic GART page size, we can't figure out page_order or num_entries until
- * runtime.
- */
- {32768, 0, 0, 4},
- {1024, 0, 0, 2},
- {256, 0, 0, 1}
-};
-
-static struct gatt_mask i460_masks[] =
-{
- {
- .mask = INTEL_I460_GATT_VALID | INTEL_I460_GATT_COHERENT,
- .type = 0
- }
-};
-
-static int i460_fetch_size (void)
-{
- int i;
- u8 temp;
- struct aper_size_info_8 *values;
-
- /* Determine the GART page size */
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &temp);
- i460.io_page_shift = (temp & I460_4M_PS) ? 22 : 12;
- pr_debug("i460_fetch_size: io_page_shift=%d\n", i460.io_page_shift);
-
- if (i460.io_page_shift != I460_IO_PAGE_SHIFT) {
- printk(KERN_ERR PFX
- "I/O (GART) page-size %luKB doesn't match expected "
- "size %luKB\n",
- 1UL << (i460.io_page_shift - 10),
- 1UL << (I460_IO_PAGE_SHIFT));
- return 0;
- }
-
- values = A_SIZE_8(agp_bridge->driver->aperture_sizes);
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
-
- /* Exit now if the IO drivers for the GART SRAMS are turned off */
- if (temp & I460_SRAM_IO_DISABLE) {
- printk(KERN_ERR PFX "GART SRAMS disabled on 460GX chipset\n");
- printk(KERN_ERR PFX "AGPGART operation not possible\n");
- return 0;
- }
-
- /* Make sure we don't try to create an 2 ^ 23 entry GATT */
- if ((i460.io_page_shift == 0) && ((temp & I460_AGPSIZ_MASK) == 4)) {
- printk(KERN_ERR PFX "We can't have a 32GB aperture with 4KB GART pages\n");
- return 0;
- }
-
- /* Determine the proper APBASE register */
- if (temp & I460_BAPBASE_ENABLE)
- i460.dynamic_apbase = INTEL_I460_BAPBASE;
- else
- i460.dynamic_apbase = AGP_APBASE;
-
- for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
- /*
- * Dynamically calculate the proper num_entries and page_order values for
- * the define aperture sizes. Take care not to shift off the end of
- * values[i].size.
- */
- values[i].num_entries = (values[i].size << 8) >> (I460_IO_PAGE_SHIFT - 12);
- values[i].page_order = ilog2((sizeof(u32)*values[i].num_entries) >> PAGE_SHIFT);
- }
-
- for (i = 0; i < agp_bridge->driver->num_aperture_sizes; i++) {
- /* Neglect control bits when matching up size_value */
- if ((temp & I460_AGPSIZ_MASK) == values[i].size_value) {
- agp_bridge->previous_size = agp_bridge->current_size = (void *) (values + i);
- agp_bridge->aperture_size_idx = i;
- return values[i].size;
- }
- }
-
- return 0;
-}
-
-/* There isn't anything to do here since 460 has no GART TLB. */
-static void i460_tlb_flush (struct agp_memory *mem)
-{
- return;
-}
-
-/*
- * This utility function is needed to prevent corruption of the control bits
- * which are stored along with the aperture size in 460's AGPSIZ register
- */
-static void i460_write_agpsiz (u8 size_value)
-{
- u8 temp;
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ, &temp);
- pci_write_config_byte(agp_bridge->dev, INTEL_I460_AGPSIZ,
- ((temp & ~I460_AGPSIZ_MASK) | size_value));
-}
-
-static void i460_cleanup (void)
-{
- struct aper_size_info_8 *previous_size;
-
- previous_size = A_SIZE_8(agp_bridge->previous_size);
- i460_write_agpsiz(previous_size->size_value);
-
- if (I460_IO_PAGE_SHIFT > PAGE_SHIFT)
- kfree(i460.lp_desc);
-}
-
-static int i460_configure (void)
-{
- union {
- u32 small[2];
- u64 large;
- } temp;
- size_t size;
- u8 scratch;
- struct aper_size_info_8 *current_size;
-
- temp.large = 0;
-
- current_size = A_SIZE_8(agp_bridge->current_size);
- i460_write_agpsiz(current_size->size_value);
-
- /*
- * Do the necessary rigmarole to read all eight bytes of APBASE.
- * This has to be done since the AGP aperture can be above 4GB on
- * 460 based systems.
- */
- pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase, &(temp.small[0]));
- pci_read_config_dword(agp_bridge->dev, i460.dynamic_apbase + 4, &(temp.small[1]));
-
- /* Clear BAR control bits */
- agp_bridge->gart_bus_addr = temp.large & ~((1UL << 3) - 1);
-
- pci_read_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL, &scratch);
- pci_write_config_byte(agp_bridge->dev, INTEL_I460_GXBCTL,
- (scratch & 0x02) | I460_GXBCTL_OOG | I460_GXBCTL_BWC);
-
- /*
- * Initialize partial allocation trackers if a GART page is bigger than a kernel
- * page.
- */
- if (I460_IO_PAGE_SHIFT > PAGE_SHIFT) {
- size = current_size->num_entries * sizeof(i460.lp_desc[0]);
- i460.lp_desc = kzalloc(size, GFP_KERNEL);
- if (!i460.lp_desc)
- return -ENOMEM;
- }
- return 0;
-}
-
-static int i460_create_gatt_table (struct agp_bridge_data *bridge)
-{
- int page_order, num_entries, i;
- void *temp;
-
- /*
- * Load up the fixed address of the GART SRAMS which hold our GATT table.
- */
- temp = agp_bridge->current_size;
- page_order = A_SIZE_8(temp)->page_order;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- i460.gatt = ioremap(INTEL_I460_ATTBASE, PAGE_SIZE << page_order);
- if (!i460.gatt) {
- printk(KERN_ERR PFX "ioremap failed\n");
- return -ENOMEM;
- }
-
- /* These are no good, the should be removed from the agp_bridge strucure... */
- agp_bridge->gatt_table_real = NULL;
- agp_bridge->gatt_table = NULL;
- agp_bridge->gatt_bus_addr = 0;
-
- for (i = 0; i < num_entries; ++i)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(i - 1);
- return 0;
-}
-
-static int i460_free_gatt_table (struct agp_bridge_data *bridge)
-{
- int num_entries, i;
- void *temp;
-
- temp = agp_bridge->current_size;
-
- num_entries = A_SIZE_8(temp)->num_entries;
-
- for (i = 0; i < num_entries; ++i)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(num_entries - 1);
-
- iounmap(i460.gatt);
- return 0;
-}
-
-/*
- * The following functions are called when the I/O (GART) page size is smaller than
- * PAGE_SIZE.
- */
-
-static int i460_insert_memory_small_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- unsigned long paddr, io_pg_start, io_page_size;
- int i, j, k, num_entries;
- void *temp;
-
- pr_debug("i460_insert_memory_small_io_page(mem=%p, pg_start=%ld, type=%d, paddr0=0x%lx)\n",
- mem, pg_start, type, page_to_phys(mem->pages[0]));
-
- if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
- return -EINVAL;
-
- io_pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- if ((io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count) > num_entries) {
- printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
- return -EINVAL;
- }
-
- j = io_pg_start;
- while (j < (io_pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count)) {
- if (!PGE_EMPTY(agp_bridge, RD_GATT(j))) {
- pr_debug("i460_insert_memory_small_io_page: GATT[%d]=0x%x is busy\n",
- j, RD_GATT(j));
- return -EBUSY;
- }
- j++;
- }
-
- io_page_size = 1UL << I460_IO_PAGE_SHIFT;
- for (i = 0, j = io_pg_start; i < mem->page_count; i++) {
- paddr = page_to_phys(mem->pages[i]);
- for (k = 0; k < I460_IOPAGES_PER_KPAGE; k++, j++, paddr += io_page_size)
- WR_GATT(j, i460_mask_memory(agp_bridge, paddr, mem->type));
- }
- WR_FLUSH_GATT(j - 1);
- return 0;
-}
-
-static int i460_remove_memory_small_io_page(struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i;
-
- pr_debug("i460_remove_memory_small_io_page(mem=%p, pg_start=%ld, type=%d)\n",
- mem, pg_start, type);
-
- pg_start = I460_IOPAGES_PER_KPAGE * pg_start;
-
- for (i = pg_start; i < (pg_start + I460_IOPAGES_PER_KPAGE * mem->page_count); i++)
- WR_GATT(i, 0);
- WR_FLUSH_GATT(i - 1);
- return 0;
-}
-
-#if I460_LARGE_IO_PAGES
-
-/*
- * These functions are called when the I/O (GART) page size exceeds PAGE_SIZE.
- *
- * This situation is interesting since AGP memory allocations that are smaller than a
- * single GART page are possible. The i460.lp_desc array tracks partial allocation of the
- * large GART pages to work around this issue.
- *
- * i460.lp_desc[pg_num].refcount tracks the number of kernel pages in use within GART page
- * pg_num. i460.lp_desc[pg_num].paddr is the physical address of the large page and
- * i460.lp_desc[pg_num].alloced_map is a bitmap of kernel pages that are in use (allocated).
- */
-
-static int i460_alloc_large_page (struct lp_desc *lp)
-{
- unsigned long order = I460_IO_PAGE_SHIFT - PAGE_SHIFT;
- size_t map_size;
-
- lp->page = alloc_pages(GFP_KERNEL, order);
- if (!lp->page) {
- printk(KERN_ERR PFX "Couldn't alloc 4M GART page...\n");
- return -ENOMEM;
- }
-
- map_size = ((I460_KPAGES_PER_IOPAGE + BITS_PER_LONG - 1) & -BITS_PER_LONG)/8;
- lp->alloced_map = kzalloc(map_size, GFP_KERNEL);
- if (!lp->alloced_map) {
- __free_pages(lp->page, order);
- printk(KERN_ERR PFX "Out of memory, we're in trouble...\n");
- return -ENOMEM;
- }
-
- lp->paddr = page_to_phys(lp->page);
- lp->refcount = 0;
- atomic_add(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
- return 0;
-}
-
-static void i460_free_large_page (struct lp_desc *lp)
-{
- kfree(lp->alloced_map);
- lp->alloced_map = NULL;
-
- __free_pages(lp->page, I460_IO_PAGE_SHIFT - PAGE_SHIFT);
- atomic_sub(I460_KPAGES_PER_IOPAGE, &agp_bridge->current_memory_agp);
-}
-
-static int i460_insert_memory_large_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i, start_offset, end_offset, idx, pg, num_entries;
- struct lp_desc *start, *end, *lp;
- void *temp;
-
- if (type >= AGP_USER_TYPES || mem->type >= AGP_USER_TYPES)
- return -EINVAL;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- /* Figure out what pg_start means in terms of our large GART pages */
- start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
- end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
- start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
- end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
-
- if (end > i460.lp_desc + num_entries) {
- printk(KERN_ERR PFX "Looks like we're out of AGP memory\n");
- return -EINVAL;
- }
-
- /* Check if the requested region of the aperture is free */
- for (lp = start; lp <= end; ++lp) {
- if (!lp->alloced_map)
- continue; /* OK, the entire large page is available... */
-
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++)
- {
- if (test_bit(idx, lp->alloced_map))
- return -EBUSY;
- }
- }
-
- for (lp = start, i = 0; lp <= end; ++lp) {
- if (!lp->alloced_map) {
- /* Allocate new GART pages... */
- if (i460_alloc_large_page(lp) < 0)
- return -ENOMEM;
- pg = lp - i460.lp_desc;
- WR_GATT(pg, i460_mask_memory(agp_bridge,
- lp->paddr, 0));
- WR_FLUSH_GATT(pg);
- }
-
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++, i++)
- {
- mem->pages[i] = lp->page;
- __set_bit(idx, lp->alloced_map);
- ++lp->refcount;
- }
- }
- return 0;
-}
-
-static int i460_remove_memory_large_io_page (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- int i, pg, start_offset, end_offset, idx, num_entries;
- struct lp_desc *start, *end, *lp;
- void *temp;
-
- temp = agp_bridge->current_size;
- num_entries = A_SIZE_8(temp)->num_entries;
-
- /* Figure out what pg_start means in terms of our large GART pages */
- start = &i460.lp_desc[pg_start / I460_KPAGES_PER_IOPAGE];
- end = &i460.lp_desc[(pg_start + mem->page_count - 1) / I460_KPAGES_PER_IOPAGE];
- start_offset = pg_start % I460_KPAGES_PER_IOPAGE;
- end_offset = (pg_start + mem->page_count - 1) % I460_KPAGES_PER_IOPAGE;
-
- for (i = 0, lp = start; lp <= end; ++lp) {
- for (idx = ((lp == start) ? start_offset : 0);
- idx < ((lp == end) ? (end_offset + 1) : I460_KPAGES_PER_IOPAGE);
- idx++, i++)
- {
- mem->pages[i] = NULL;
- __clear_bit(idx, lp->alloced_map);
- --lp->refcount;
- }
-
- /* Free GART pages if they are unused */
- if (lp->refcount == 0) {
- pg = lp - i460.lp_desc;
- WR_GATT(pg, 0);
- WR_FLUSH_GATT(pg);
- i460_free_large_page(lp);
- }
- }
- return 0;
-}
-
-/* Wrapper routines to call the approriate {small_io_page,large_io_page} function */
-
-static int i460_insert_memory (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
- return i460_insert_memory_small_io_page(mem, pg_start, type);
- else
- return i460_insert_memory_large_io_page(mem, pg_start, type);
-}
-
-static int i460_remove_memory (struct agp_memory *mem,
- off_t pg_start, int type)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT)
- return i460_remove_memory_small_io_page(mem, pg_start, type);
- else
- return i460_remove_memory_large_io_page(mem, pg_start, type);
-}
-
-/*
- * If the I/O (GART) page size is bigger than the kernel page size, we don't want to
- * allocate memory until we know where it is to be bound in the aperture (a
- * multi-kernel-page alloc might fit inside of an already allocated GART page).
- *
- * Let's just hope nobody counts on the allocated AGP memory being there before bind time
- * (I don't think current drivers do)...
- */
-static struct page *i460_alloc_page (struct agp_bridge_data *bridge)
-{
- void *page;
-
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- page = agp_generic_alloc_page(agp_bridge);
- } else
- /* Returning NULL would cause problems */
- /* AK: really dubious code. */
- page = (void *)~0UL;
- return page;
-}
-
-static void i460_destroy_page (struct page *page, int flags)
-{
- if (I460_IO_PAGE_SHIFT <= PAGE_SHIFT) {
- agp_generic_destroy_page(page, flags);
- }
-}
-
-#endif /* I460_LARGE_IO_PAGES */
-
-static unsigned long i460_mask_memory (struct agp_bridge_data *bridge,
- dma_addr_t addr, int type)
-{
- /* Make sure the returned address is a valid GATT entry */
- return bridge->driver->masks[0].mask
- | (((addr & ~((1 << I460_IO_PAGE_SHIFT) - 1)) & 0xfffff000) >> 12);
-}
-
-const struct agp_bridge_driver intel_i460_driver = {
- .owner = THIS_MODULE,
- .aperture_sizes = i460_sizes,
- .size_type = U8_APER_SIZE,
- .num_aperture_sizes = 3,
- .configure = i460_configure,
- .fetch_size = i460_fetch_size,
- .cleanup = i460_cleanup,
- .tlb_flush = i460_tlb_flush,
- .mask_memory = i460_mask_memory,
- .masks = i460_masks,
- .agp_enable = agp_generic_enable,
- .cache_flush = global_cache_flush,
- .create_gatt_table = i460_create_gatt_table,
- .free_gatt_table = i460_free_gatt_table,
-#if I460_LARGE_IO_PAGES
- .insert_memory = i460_insert_memory,
- .remove_memory = i460_remove_memory,
- .agp_alloc_page = i460_alloc_page,
- .agp_destroy_page = i460_destroy_page,
-#else
- .insert_memory = i460_insert_memory_small_io_page,
- .remove_memory = i460_remove_memory_small_io_page,
- .agp_alloc_page = agp_generic_alloc_page,
- .agp_alloc_pages = agp_generic_alloc_pages,
- .agp_destroy_page = agp_generic_destroy_page,
- .agp_destroy_pages = agp_generic_destroy_pages,
-#endif
- .alloc_by_type = agp_generic_alloc_by_type,
- .free_by_type = agp_generic_free_by_type,
- .agp_type_to_mask_type = agp_generic_type_to_mask_type,
- .cant_use_aperture = true,
-};
-
-static int agp_intel_i460_probe(struct pci_dev *pdev,
- const struct pci_device_id *ent)
-{
- struct agp_bridge_data *bridge;
- u8 cap_ptr;
-
- cap_ptr = pci_find_capability(pdev, PCI_CAP_ID_AGP);
- if (!cap_ptr)
- return -ENODEV;
-
- bridge = agp_alloc_bridge();
- if (!bridge)
- return -ENOMEM;
-
- bridge->driver = &intel_i460_driver;
- bridge->dev = pdev;
- bridge->capndx = cap_ptr;
-
- printk(KERN_INFO PFX "Detected Intel 460GX chipset\n");
-
- pci_set_drvdata(pdev, bridge);
- return agp_add_bridge(bridge);
-}
-
-static void agp_intel_i460_remove(struct pci_dev *pdev)
-{
- struct agp_bridge_data *bridge = pci_get_drvdata(pdev);
-
- agp_remove_bridge(bridge);
- agp_put_bridge(bridge);
-}
-
-static struct pci_device_id agp_intel_i460_pci_table[] = {
- {
- .class = (PCI_CLASS_BRIDGE_HOST << 8),
- .class_mask = ~0,
- .vendor = PCI_VENDOR_ID_INTEL,
- .device = PCI_DEVICE_ID_INTEL_84460GX,
- .subvendor = PCI_ANY_ID,
- .subdevice = PCI_ANY_ID,
- },
- { }
-};
-
-MODULE_DEVICE_TABLE(pci, agp_intel_i460_pci_table);
-
-static struct pci_driver agp_intel_i460_pci_driver = {
- .name = "agpgart-intel-i460",
- .id_table = agp_intel_i460_pci_table,
- .probe = agp_intel_i460_probe,
- .remove = agp_intel_i460_remove,
-};
-
-static int __init agp_intel_i460_init(void)
-{
- if (agp_off)
- return -EINVAL;
- return pci_register_driver(&agp_intel_i460_pci_driver);
-}
-
-static void __exit agp_intel_i460_cleanup(void)
-{
- pci_unregister_driver(&agp_intel_i460_pci_driver);
-}
-
-module_init(agp_intel_i460_init);
-module_exit(agp_intel_i460_cleanup);
-
-MODULE_AUTHOR("Chris Ahna <Christopher.J.Ahna@intel.com>");
-MODULE_LICENSE("GPL and additional rights");
diff --git a/drivers/char/hpet.c b/drivers/char/hpet.c
index ee71376f174b..3b2159416e62 100644
--- a/drivers/char/hpet.c
+++ b/drivers/char/hpet.c
@@ -64,25 +64,6 @@
static DEFINE_MUTEX(hpet_mutex); /* replaces BKL */
static u32 hpet_nhpet, hpet_max_freq = HPET_USER_FREQ;
-/* This clocksource driver currently only works on ia64 */
-#ifdef CONFIG_IA64
-static void __iomem *hpet_mctr;
-
-static u64 read_hpet(struct clocksource *cs)
-{
- return (u64)read_counter((void __iomem *)hpet_mctr);
-}
-
-static struct clocksource clocksource_hpet = {
- .name = "hpet",
- .rating = 250,
- .read = read_hpet,
- .mask = CLOCKSOURCE_MASK(64),
- .flags = CLOCK_SOURCE_IS_CONTINUOUS,
-};
-static struct clocksource *hpet_clocksource;
-#endif
-
/* A lock for concurrent access by app and isr hpet activity. */
static DEFINE_SPINLOCK(hpet_lock);
@@ -907,17 +888,6 @@ int hpet_alloc(struct hpet_data *hdp)
hpetp->hp_delta = hpet_calibrate(hpetp);
-/* This clocksource driver currently only works on ia64 */
-#ifdef CONFIG_IA64
- if (!hpet_clocksource) {
- hpet_mctr = (void __iomem *)&hpetp->hp_hpet->hpet_mc;
- clocksource_hpet.archdata.fsys_mmio = hpet_mctr;
- clocksource_register_hz(&clocksource_hpet, hpetp->hp_tick_freq);
- hpetp->hp_clocksource = &clocksource_hpet;
- hpet_clocksource = &clocksource_hpet;
- }
-#endif
-
return 0;
}
diff --git a/drivers/char/hw_random/Kconfig b/drivers/char/hw_random/Kconfig
index 8de74dcfa18c..442c40efb200 100644
--- a/drivers/char/hw_random/Kconfig
+++ b/drivers/char/hw_random/Kconfig
@@ -37,7 +37,7 @@ config HW_RANDOM_TIMERIOMEM
config HW_RANDOM_INTEL
tristate "Intel HW Random Number Generator support"
- depends on (X86 || IA64 || COMPILE_TEST) && PCI
+ depends on (X86 || COMPILE_TEST) && PCI
default HW_RANDOM
help
This driver provides kernel-side support for the Random Number
diff --git a/drivers/char/mem.c b/drivers/char/mem.c
index 1052b0f2d4cf..8d27aa6b5b50 100644
--- a/drivers/char/mem.c
+++ b/drivers/char/mem.c
@@ -31,10 +31,6 @@
#include <linux/uaccess.h>
#include <linux/security.h>
-#ifdef CONFIG_IA64
-# include <linux/efi.h>
-#endif
-
#define DEVMEM_MINOR 1
#define DEVPORT_MINOR 4
@@ -277,13 +273,6 @@ int __weak phys_mem_access_prot_allowed(struct file *file,
#ifdef pgprot_noncached
static int uncached_access(struct file *file, phys_addr_t addr)
{
-#if defined(CONFIG_IA64)
- /*
- * On ia64, we ignore O_DSYNC because we cannot tolerate memory
- * attribute aliases.
- */
- return !(efi_mem_attributes(addr) & EFI_MEMORY_WB);
-#else
/*
* Accessing memory above the top the kernel knows about or through a
* file pointer
@@ -292,7 +281,6 @@ static int uncached_access(struct file *file, phys_addr_t addr)
if (file->f_flags & O_DSYNC)
return 1;
return addr >= __pa(high_memory);
-#endif
}
#endif
diff --git a/drivers/char/mspec.c b/drivers/char/mspec.c
deleted file mode 100644
index b35f651837c8..000000000000
--- a/drivers/char/mspec.c
+++ /dev/null
@@ -1,295 +0,0 @@
-// SPDX-License-Identifier: GPL-2.0-only
-/*
- * Copyright (C) 2001-2006 Silicon Graphics, Inc. All rights
- * reserved.
- */
-
-/*
- * SN Platform Special Memory (mspec) Support
- *
- * This driver exports the SN special memory (mspec) facility to user
- * processes.
- * There are two types of memory made available thru this driver:
- * uncached and cached.
- *
- * Uncached are used for memory write combining feature of the ia64
- * cpu.
- *
- * Cached are used for areas of memory that are used as cached addresses
- * on our partition and used as uncached addresses from other partitions.
- * Due to a design constraint of the SN2 Shub, you can not have processors
- * on the same FSB perform both a cached and uncached reference to the
- * same cache line. These special memory cached regions prevent the
- * kernel from ever dropping in a TLB entry and therefore prevent the
- * processor from ever speculating a cache line from this page.
- */
-
-#include <linux/types.h>
-#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/init.h>
-#include <linux/errno.h>
-#include <linux/miscdevice.h>
-#include <linux/spinlock.h>
-#include <linux/mm.h>
-#include <linux/fs.h>
-#include <linux/vmalloc.h>
-#include <linux/string.h>
-#include <linux/slab.h>
-#include <linux/numa.h>
-#include <linux/refcount.h>
-#include <asm/page.h>
-#include <linux/atomic.h>
-#include <asm/tlbflush.h>
-#include <asm/uncached.h>
-
-
-#define CACHED_ID "Cached,"
-#define UNCACHED_ID "Uncached"
-#define REVISION "4.0"
-#define MSPEC_BASENAME "mspec"
-
-/*
- * Page types allocated by the device.
- */
-enum mspec_page_type {
- MSPEC_CACHED = 2,
- MSPEC_UNCACHED
-};
-
-/*
- * One of these structures is allocated when an mspec region is mmaped. The
- * structure is pointed to by the vma->vm_private_data field in the vma struct.
- * This structure is used to record the addresses of the mspec pages.
- * This structure is shared by all vma's that are split off from the
- * original vma when split_vma()'s are done.
- *
- * The refcnt is incremented atomically because mm->mmap_lock does not
- * protect in fork case where multiple tasks share the vma_data.
- */
-struct vma_data {
- refcount_t refcnt; /* Number of vmas sharing the data. */
- spinlock_t lock; /* Serialize access to this structure. */
- int count; /* Number of pages allocated. */
- enum mspec_page_type type; /* Type of pages allocated. */
- unsigned long vm_start; /* Original (unsplit) base. */
- unsigned long vm_end; /* Original (unsplit) end. */
- unsigned long maddr[]; /* Array of MSPEC addresses. */
-};
-
-/*
- * mspec_open
- *
- * Called when a device mapping is created by a means other than mmap
- * (via fork, munmap, etc.). Increments the reference count on the
- * underlying mspec data so it is not freed prematurely.
- */
-static void
-mspec_open(struct vm_area_struct *vma)
-{
- struct vma_data *vdata;
-
- vdata = vma->vm_private_data;
- refcount_inc(&vdata->refcnt);
-}
-
-/*
- * mspec_close
- *
- * Called when unmapping a device mapping. Frees all mspec pages
- * belonging to all the vma's sharing this vma_data structure.
- */
-static void
-mspec_close(struct vm_area_struct *vma)
-{
- struct vma_data *vdata;
- int index, last_index;
- unsigned long my_page;
-
- vdata = vma->vm_private_data;
-
- if (!refcount_dec_and_test(&vdata->refcnt))
- return;
-
- last_index = (vdata->vm_end - vdata->vm_start) >> PAGE_SHIFT;
- for (index = 0; index < last_index; index++) {
- if (vdata->maddr[index] == 0)
- continue;
- /*
- * Clear the page before sticking it back
- * into the pool.
- */
- my_page = vdata->maddr[index];
- vdata->maddr[index] = 0;
- memset((char *)my_page, 0, PAGE_SIZE);
- uncached_free_page(my_page, 1);
- }
-
- kvfree(vdata);
-}
-
-/*
- * mspec_fault
- *
- * Creates a mspec page and maps it to user space.
- */
-static vm_fault_t
-mspec_fault(struct vm_fault *vmf)
-{
- unsigned long paddr, maddr;
- unsigned long pfn;
- pgoff_t index = vmf->pgoff;
- struct vma_data *vdata = vmf->vma->vm_private_data;
-
- maddr = (volatile unsigned long) vdata->maddr[index];
- if (maddr == 0) {
- maddr = uncached_alloc_page(numa_node_id(), 1);
- if (maddr == 0)
- return VM_FAULT_OOM;
-
- spin_lock(&vdata->lock);
- if (vdata->maddr[index] == 0) {
- vdata->count++;
- vdata->maddr[index] = maddr;
- } else {
- uncached_free_page(maddr, 1);
- maddr = vdata->maddr[index];
- }
- spin_unlock(&vdata->lock);
- }
-
- paddr = maddr & ~__IA64_UNCACHED_OFFSET;
- pfn = paddr >> PAGE_SHIFT;
-
- return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
-}
-
-static const struct vm_operations_struct mspec_vm_ops = {
- .open = mspec_open,
- .close = mspec_close,
- .fault = mspec_fault,
-};
-
-/*
- * mspec_mmap
- *
- * Called when mmapping the device. Initializes the vma with a fault handler
- * and private data structure necessary to allocate, track, and free the
- * underlying pages.
- */
-static int
-mspec_mmap(struct file *file, struct vm_area_struct *vma,
- enum mspec_page_type type)
-{
- struct vma_data *vdata;
- int pages, vdata_size;
-
- if (vma->vm_pgoff != 0)
- return -EINVAL;
-
- if ((vma->vm_flags & VM_SHARED) == 0)
- return -EINVAL;
-
- if ((vma->vm_flags & VM_WRITE) == 0)
- return -EPERM;
-
- pages = vma_pages(vma);
- vdata_size = sizeof(struct vma_data) + pages * sizeof(long);
- vdata = kvzalloc(vdata_size, GFP_KERNEL);
- if (!vdata)
- return -ENOMEM;
-
- vdata->vm_start = vma->vm_start;
- vdata->vm_end = vma->vm_end;
- vdata->type = type;
- spin_lock_init(&vdata->lock);
- refcount_set(&vdata->refcnt, 1);
- vma->vm_private_data = vdata;
-
- vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP);
- if (vdata->type == MSPEC_UNCACHED)
- vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
- vma->vm_ops = &mspec_vm_ops;
-
- return 0;
-}
-
-static int
-cached_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return mspec_mmap(file, vma, MSPEC_CACHED);
-}
-
-static int
-uncached_mmap(struct file *file, struct vm_area_struct *vma)
-{
- return mspec_mmap(file, vma, MSPEC_UNCACHED);
-}
-
-static const struct file_operations cached_fops = {
- .owner = THIS_MODULE,
- .mmap = cached_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice cached_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "mspec_cached",
- .fops = &cached_fops
-};
-
-static const struct file_operations uncached_fops = {
- .owner = THIS_MODULE,
- .mmap = uncached_mmap,
- .llseek = noop_llseek,
-};
-
-static struct miscdevice uncached_miscdev = {
- .minor = MISC_DYNAMIC_MINOR,
- .name = "mspec_uncached",
- .fops = &uncached_fops
-};
-
-/*
- * mspec_init
- *
- * Called at boot time to initialize the mspec facility.
- */
-static int __init
-mspec_init(void)
-{
- int ret;
-
- ret = misc_register(&cached_miscdev);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device %i\n",
- CACHED_ID, ret);
- return ret;
- }
- ret = misc_register(&uncached_miscdev);
- if (ret) {
- printk(KERN_ERR "%s: failed to register device %i\n",
- UNCACHED_ID, ret);
- misc_deregister(&cached_miscdev);
- return ret;
- }
-
- printk(KERN_INFO "%s %s initialized devices: %s %s\n",
- MSPEC_BASENAME, REVISION, CACHED_ID, UNCACHED_ID);
-
- return 0;
-}
-
-static void __exit
-mspec_exit(void)
-{
- misc_deregister(&uncached_miscdev);
- misc_deregister(&cached_miscdev);
-}
-
-module_init(mspec_init);
-module_exit(mspec_exit);
-
-MODULE_AUTHOR("Silicon Graphics, Inc. <linux-altix@sgi.com>");
-MODULE_DESCRIPTION("Driver for SGI SN special memory operations");
-MODULE_LICENSE("GPL");