summaryrefslogtreecommitdiff
path: root/arch/alpha/kernel
diff options
context:
space:
mode:
authorLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
committerLinus Torvalds <torvalds@ppc970.osdl.org>2005-04-16 15:20:36 -0700
commit1da177e4c3f41524e886b7f1b8a0c1fc7321cac2 (patch)
tree0bba044c4ce775e45a88a51686b5d9f90697ea9d /arch/alpha/kernel
Linux-2.6.12-rc2v2.6.12-rc2
Initial git repository build. I'm not bothering with the full history, even though we have it. We can create a separate "historical" git archive of that later if we want to, and in the meantime it's about 3.2GB when imported into git - space that would just make the early git days unnecessarily complicated, when we don't have a lot of good infrastructure for it. Let it rip!
Diffstat (limited to 'arch/alpha/kernel')
-rw-r--r--arch/alpha/kernel/Makefile104
-rw-r--r--arch/alpha/kernel/alpha_ksyms.c235
-rw-r--r--arch/alpha/kernel/asm-offsets.c43
-rw-r--r--arch/alpha/kernel/console.c66
-rw-r--r--arch/alpha/kernel/core_apecs.c418
-rw-r--r--arch/alpha/kernel/core_cia.c1212
-rw-r--r--arch/alpha/kernel/core_irongate.c416
-rw-r--r--arch/alpha/kernel/core_lca.c515
-rw-r--r--arch/alpha/kernel/core_marvel.c1154
-rw-r--r--arch/alpha/kernel/core_mcpcia.c618
-rw-r--r--arch/alpha/kernel/core_polaris.c203
-rw-r--r--arch/alpha/kernel/core_t2.c622
-rw-r--r--arch/alpha/kernel/core_titan.c806
-rw-r--r--arch/alpha/kernel/core_tsunami.c459
-rw-r--r--arch/alpha/kernel/core_wildfire.c658
-rw-r--r--arch/alpha/kernel/entry.S957
-rw-r--r--arch/alpha/kernel/err_common.c321
-rw-r--r--arch/alpha/kernel/err_ev6.c274
-rw-r--r--arch/alpha/kernel/err_ev7.c289
-rw-r--r--arch/alpha/kernel/err_impl.h85
-rw-r--r--arch/alpha/kernel/err_marvel.c1159
-rw-r--r--arch/alpha/kernel/err_titan.c756
-rw-r--r--arch/alpha/kernel/es1888.c49
-rw-r--r--arch/alpha/kernel/gct.c48
-rw-r--r--arch/alpha/kernel/head.S99
-rw-r--r--arch/alpha/kernel/init_task.c23
-rw-r--r--arch/alpha/kernel/io.c630
-rw-r--r--arch/alpha/kernel/irq.c774
-rw-r--r--arch/alpha/kernel/irq_alpha.c252
-rw-r--r--arch/alpha/kernel/irq_i8259.c183
-rw-r--r--arch/alpha/kernel/irq_impl.h42
-rw-r--r--arch/alpha/kernel/irq_pyxis.c127
-rw-r--r--arch/alpha/kernel/irq_srm.c79
-rw-r--r--arch/alpha/kernel/machvec_impl.h150
-rw-r--r--arch/alpha/kernel/module.c311
-rw-r--r--arch/alpha/kernel/ns87312.c38
-rw-r--r--arch/alpha/kernel/osf_sys.c1345
-rw-r--r--arch/alpha/kernel/pci-noop.c214
-rw-r--r--arch/alpha/kernel/pci.c561
-rw-r--r--arch/alpha/kernel/pci_impl.h209
-rw-r--r--arch/alpha/kernel/pci_iommu.c971
-rw-r--r--arch/alpha/kernel/process.c528
-rw-r--r--arch/alpha/kernel/proto.h210
-rw-r--r--arch/alpha/kernel/ptrace.c415
-rw-r--r--arch/alpha/kernel/semaphore.c224
-rw-r--r--arch/alpha/kernel/setup.c1486
-rw-r--r--arch/alpha/kernel/signal.c672
-rw-r--r--arch/alpha/kernel/smc37c669.c2554
-rw-r--r--arch/alpha/kernel/smc37c93x.c277
-rw-r--r--arch/alpha/kernel/smp.c1163
-rw-r--r--arch/alpha/kernel/srm_env.c335
-rw-r--r--arch/alpha/kernel/srmcons.c326
-rw-r--r--arch/alpha/kernel/sys_alcor.c326
-rw-r--r--arch/alpha/kernel/sys_cabriolet.c448
-rw-r--r--arch/alpha/kernel/sys_dp264.c689
-rw-r--r--arch/alpha/kernel/sys_eb64p.c256
-rw-r--r--arch/alpha/kernel/sys_eiger.c242
-rw-r--r--arch/alpha/kernel/sys_jensen.c274
-rw-r--r--arch/alpha/kernel/sys_marvel.c499
-rw-r--r--arch/alpha/kernel/sys_miata.c289
-rw-r--r--arch/alpha/kernel/sys_mikasa.c265
-rw-r--r--arch/alpha/kernel/sys_nautilus.c269
-rw-r--r--arch/alpha/kernel/sys_noritake.c347
-rw-r--r--arch/alpha/kernel/sys_rawhide.c270
-rw-r--r--arch/alpha/kernel/sys_ruffian.c240
-rw-r--r--arch/alpha/kernel/sys_rx164.c220
-rw-r--r--arch/alpha/kernel/sys_sable.c653
-rw-r--r--arch/alpha/kernel/sys_sio.c438
-rw-r--r--arch/alpha/kernel/sys_sx164.c178
-rw-r--r--arch/alpha/kernel/sys_takara.c296
-rw-r--r--arch/alpha/kernel/sys_titan.c420
-rw-r--r--arch/alpha/kernel/sys_wildfire.c361
-rw-r--r--arch/alpha/kernel/systbls.S468
-rw-r--r--arch/alpha/kernel/time.c591
-rw-r--r--arch/alpha/kernel/traps.c1092
-rw-r--r--arch/alpha/kernel/vmlinux.lds.S149
76 files changed, 34945 insertions, 0 deletions
diff --git a/arch/alpha/kernel/Makefile b/arch/alpha/kernel/Makefile
new file mode 100644
index 000000000000..ab6fa54b3860
--- /dev/null
+++ b/arch/alpha/kernel/Makefile
@@ -0,0 +1,104 @@
+#
+# Makefile for the linux kernel.
+#
+
+extra-y := head.o vmlinux.lds
+EXTRA_AFLAGS := $(CFLAGS)
+EXTRA_CFLAGS := -Werror -Wno-sign-compare
+
+obj-y := entry.o traps.o process.o init_task.o osf_sys.o irq.o \
+ irq_alpha.o signal.o setup.o ptrace.o time.o semaphore.o \
+ alpha_ksyms.o systbls.o err_common.o io.o
+
+obj-$(CONFIG_VGA_HOSE) += console.o
+obj-$(CONFIG_SMP) += smp.o
+obj-$(CONFIG_PCI) += pci.o pci_iommu.o
+obj-$(CONFIG_SRM_ENV) += srm_env.o
+obj-$(CONFIG_MODULES) += module.o
+
+ifdef CONFIG_ALPHA_GENERIC
+
+obj-y += core_apecs.o core_cia.o core_irongate.o core_lca.o \
+ core_mcpcia.o core_polaris.o core_t2.o \
+ core_tsunami.o
+
+obj-y += sys_alcor.o sys_cabriolet.o sys_dp264.o sys_eb64p.o sys_eiger.o \
+ sys_jensen.o sys_miata.o sys_mikasa.o sys_nautilus.o \
+ sys_noritake.o sys_rawhide.o sys_ruffian.o sys_rx164.o \
+ sys_sable.o sys_sio.o sys_sx164.o sys_takara.o
+
+ifndef CONFIG_ALPHA_LEGACY_START_ADDRESS
+obj-y += core_marvel.o core_titan.o core_wildfire.o
+obj-y += sys_marvel.o sys_titan.o sys_wildfire.o
+obj-y += err_ev7.o err_titan.o err_marvel.o
+endif
+
+obj-y += irq_pyxis.o irq_i8259.o irq_srm.o
+obj-y += err_ev6.o
+obj-y += es1888.o smc37c669.o smc37c93x.o ns87312.o gct.o
+obj-y += srmcons.o
+
+else
+
+# Misc support
+obj-$(CONFIG_ALPHA_SRM) += srmcons.o
+
+# Core logic support
+obj-$(CONFIG_ALPHA_APECS) += core_apecs.o
+obj-$(CONFIG_ALPHA_CIA) += core_cia.o
+obj-$(CONFIG_ALPHA_IRONGATE) += core_irongate.o
+obj-$(CONFIG_ALPHA_LCA) += core_lca.o
+obj-$(CONFIG_ALPHA_MARVEL) += core_marvel.o gct.o
+obj-$(CONFIG_ALPHA_MCPCIA) += core_mcpcia.o
+obj-$(CONFIG_ALPHA_POLARIS) += core_polaris.o
+obj-$(CONFIG_ALPHA_T2) += core_t2.o
+obj-$(CONFIG_ALPHA_TSUNAMI) += core_tsunami.o
+obj-$(CONFIG_ALPHA_TITAN) += core_titan.o
+obj-$(CONFIG_ALPHA_WILDFIRE) += core_wildfire.o
+
+# Board support
+obj-$(CONFIG_ALPHA_ALCOR) += sys_alcor.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_CABRIOLET) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_EB164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_EB66P) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ ns87312.o
+obj-$(CONFIG_ALPHA_LX164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ smc37c93x.o
+obj-$(CONFIG_ALPHA_PC164) += sys_cabriolet.o irq_i8259.o irq_srm.o \
+ smc37c93x.o
+obj-$(CONFIG_ALPHA_DP264) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_SHARK) += sys_dp264.o irq_i8259.o es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_TITAN) += sys_titan.o irq_i8259.o smc37c669.o
+obj-$(CONFIG_ALPHA_EB64P) += sys_eb64p.o irq_i8259.o
+obj-$(CONFIG_ALPHA_EB66) += sys_eb64p.o irq_i8259.o
+obj-$(CONFIG_ALPHA_EIGER) += sys_eiger.o irq_i8259.o
+obj-$(CONFIG_ALPHA_JENSEN) += sys_jensen.o pci-noop.o irq_i8259.o
+obj-$(CONFIG_ALPHA_MARVEL) += sys_marvel.o
+obj-$(CONFIG_ALPHA_MIATA) += sys_miata.o irq_pyxis.o irq_i8259.o \
+ es1888.o smc37c669.o
+obj-$(CONFIG_ALPHA_MIKASA) += sys_mikasa.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_NAUTILUS) += sys_nautilus.o irq_i8259.o irq_srm.o
+obj-$(CONFIG_ALPHA_NORITAKE) += sys_noritake.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RAWHIDE) += sys_rawhide.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RUFFIAN) += sys_ruffian.o irq_pyxis.o irq_i8259.o
+obj-$(CONFIG_ALPHA_RX164) += sys_rx164.o irq_i8259.o
+obj-$(CONFIG_ALPHA_SABLE) += sys_sable.o
+obj-$(CONFIG_ALPHA_LYNX) += sys_sable.o
+obj-$(CONFIG_ALPHA_BOOK1) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_AVANTI) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_NONAME) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_P2K) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_XL) += sys_sio.o irq_i8259.o irq_srm.o ns87312.o
+obj-$(CONFIG_ALPHA_SX164) += sys_sx164.o irq_pyxis.o irq_i8259.o \
+ irq_srm.o smc37c669.o
+obj-$(CONFIG_ALPHA_TAKARA) += sys_takara.o irq_i8259.o ns87312.o
+obj-$(CONFIG_ALPHA_WILDFIRE) += sys_wildfire.o irq_i8259.o
+
+# Error support
+obj-$(CONFIG_ALPHA_MARVEL) += err_ev7.o err_marvel.o
+obj-$(CONFIG_ALPHA_NAUTILUS) += err_ev6.o
+obj-$(CONFIG_ALPHA_TITAN) += err_ev6.o err_titan.o
+
+endif # GENERIC
diff --git a/arch/alpha/kernel/alpha_ksyms.c b/arch/alpha/kernel/alpha_ksyms.c
new file mode 100644
index 000000000000..fc5ef90c4fc9
--- /dev/null
+++ b/arch/alpha/kernel/alpha_ksyms.c
@@ -0,0 +1,235 @@
+/*
+ * linux/arch/alpha/kernel/ksyms.c
+ *
+ * Export the alpha-specific functions that are needed for loadable
+ * modules.
+ */
+
+#include <linux/config.h>
+#include <linux/module.h>
+#include <linux/string.h>
+#include <linux/user.h>
+#include <linux/elfcore.h>
+#include <linux/socket.h>
+#include <linux/syscalls.h>
+#include <linux/in.h>
+#include <linux/in6.h>
+#include <linux/pci.h>
+#include <linux/tty.h>
+#include <linux/mm.h>
+#include <linux/delay.h>
+#include <linux/dma-mapping.h>
+
+#include <asm/io.h>
+#include <asm/console.h>
+#include <asm/hwrpb.h>
+#include <asm/uaccess.h>
+#include <asm/processor.h>
+#include <asm/checksum.h>
+#include <linux/interrupt.h>
+#include <asm/fpu.h>
+#include <asm/irq.h>
+#include <asm/machvec.h>
+#include <asm/pgalloc.h>
+#include <asm/semaphore.h>
+#include <asm/tlbflush.h>
+#include <asm/cacheflush.h>
+#include <asm/vga.h>
+
+#define __KERNEL_SYSCALLS__
+#include <asm/unistd.h>
+
+extern struct hwrpb_struct *hwrpb;
+extern void dump_thread(struct pt_regs *, struct user *);
+extern spinlock_t rtc_lock;
+
+/* these are C runtime functions with special calling conventions: */
+extern void __divl (void);
+extern void __reml (void);
+extern void __divq (void);
+extern void __remq (void);
+extern void __divlu (void);
+extern void __remlu (void);
+extern void __divqu (void);
+extern void __remqu (void);
+
+EXPORT_SYMBOL(alpha_mv);
+EXPORT_SYMBOL(enable_irq);
+EXPORT_SYMBOL(disable_irq);
+EXPORT_SYMBOL(disable_irq_nosync);
+EXPORT_SYMBOL(probe_irq_mask);
+EXPORT_SYMBOL(screen_info);
+EXPORT_SYMBOL(perf_irq);
+EXPORT_SYMBOL(callback_getenv);
+EXPORT_SYMBOL(callback_setenv);
+EXPORT_SYMBOL(callback_save_env);
+#ifdef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(alpha_using_srm);
+#endif /* CONFIG_ALPHA_GENERIC */
+
+/* platform dependent support */
+EXPORT_SYMBOL(strcat);
+EXPORT_SYMBOL(strcmp);
+EXPORT_SYMBOL(strcpy);
+EXPORT_SYMBOL(strlen);
+EXPORT_SYMBOL(strncmp);
+EXPORT_SYMBOL(strncpy);
+EXPORT_SYMBOL(strnlen);
+EXPORT_SYMBOL(strncat);
+EXPORT_SYMBOL(strstr);
+EXPORT_SYMBOL(strpbrk);
+EXPORT_SYMBOL(strchr);
+EXPORT_SYMBOL(strrchr);
+EXPORT_SYMBOL(memcmp);
+EXPORT_SYMBOL(memmove);
+EXPORT_SYMBOL(memscan);
+EXPORT_SYMBOL(__memcpy);
+EXPORT_SYMBOL(__memset);
+EXPORT_SYMBOL(__memsetw);
+EXPORT_SYMBOL(__constant_c_memset);
+EXPORT_SYMBOL(copy_page);
+EXPORT_SYMBOL(clear_page);
+
+EXPORT_SYMBOL(__direct_map_base);
+EXPORT_SYMBOL(__direct_map_size);
+
+#ifdef CONFIG_PCI
+EXPORT_SYMBOL(pci_alloc_consistent);
+EXPORT_SYMBOL(pci_free_consistent);
+EXPORT_SYMBOL(pci_map_single);
+EXPORT_SYMBOL(pci_map_page);
+EXPORT_SYMBOL(pci_unmap_single);
+EXPORT_SYMBOL(pci_unmap_page);
+EXPORT_SYMBOL(pci_map_sg);
+EXPORT_SYMBOL(pci_unmap_sg);
+EXPORT_SYMBOL(pci_dma_supported);
+EXPORT_SYMBOL(pci_dac_dma_supported);
+EXPORT_SYMBOL(pci_dac_page_to_dma);
+EXPORT_SYMBOL(pci_dac_dma_to_page);
+EXPORT_SYMBOL(pci_dac_dma_to_offset);
+EXPORT_SYMBOL(alpha_gendev_to_pci);
+#endif
+EXPORT_SYMBOL(dma_set_mask);
+
+EXPORT_SYMBOL(dump_thread);
+EXPORT_SYMBOL(dump_elf_thread);
+EXPORT_SYMBOL(dump_elf_task);
+EXPORT_SYMBOL(dump_elf_task_fp);
+EXPORT_SYMBOL(hwrpb);
+EXPORT_SYMBOL(start_thread);
+EXPORT_SYMBOL(alpha_read_fp_reg);
+EXPORT_SYMBOL(alpha_read_fp_reg_s);
+EXPORT_SYMBOL(alpha_write_fp_reg);
+EXPORT_SYMBOL(alpha_write_fp_reg_s);
+
+/* In-kernel system calls. */
+EXPORT_SYMBOL(kernel_thread);
+EXPORT_SYMBOL(sys_open);
+EXPORT_SYMBOL(sys_dup);
+EXPORT_SYMBOL(sys_exit);
+EXPORT_SYMBOL(sys_write);
+EXPORT_SYMBOL(sys_read);
+EXPORT_SYMBOL(sys_lseek);
+EXPORT_SYMBOL(execve);
+EXPORT_SYMBOL(sys_setsid);
+EXPORT_SYMBOL(sys_wait4);
+
+/* Networking helper routines. */
+EXPORT_SYMBOL(csum_tcpudp_magic);
+EXPORT_SYMBOL(ip_compute_csum);
+EXPORT_SYMBOL(ip_fast_csum);
+EXPORT_SYMBOL(csum_partial_copy_nocheck);
+EXPORT_SYMBOL(csum_partial_copy_from_user);
+EXPORT_SYMBOL(csum_ipv6_magic);
+
+#ifdef CONFIG_MATHEMU_MODULE
+extern long (*alpha_fp_emul_imprecise)(struct pt_regs *, unsigned long);
+extern long (*alpha_fp_emul) (unsigned long pc);
+EXPORT_SYMBOL(alpha_fp_emul_imprecise);
+EXPORT_SYMBOL(alpha_fp_emul);
+#endif
+
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+EXPORT_SYMBOL(__min_ipl);
+#endif
+
+/*
+ * The following are specially called from the uaccess assembly stubs.
+ */
+EXPORT_SYMBOL(__copy_user);
+EXPORT_SYMBOL(__do_clear_user);
+EXPORT_SYMBOL(__strncpy_from_user);
+EXPORT_SYMBOL(__strnlen_user);
+
+/* Semaphore helper functions. */
+EXPORT_SYMBOL(__down_failed);
+EXPORT_SYMBOL(__down_failed_interruptible);
+EXPORT_SYMBOL(__up_wakeup);
+EXPORT_SYMBOL(down);
+EXPORT_SYMBOL(down_interruptible);
+EXPORT_SYMBOL(down_trylock);
+EXPORT_SYMBOL(up);
+
+/*
+ * SMP-specific symbols.
+ */
+
+#ifdef CONFIG_SMP
+EXPORT_SYMBOL(synchronize_irq);
+EXPORT_SYMBOL(flush_tlb_mm);
+EXPORT_SYMBOL(flush_tlb_range);
+EXPORT_SYMBOL(flush_tlb_page);
+EXPORT_SYMBOL(smp_imb);
+EXPORT_SYMBOL(cpu_data);
+EXPORT_SYMBOL(smp_num_cpus);
+EXPORT_SYMBOL(smp_call_function);
+EXPORT_SYMBOL(smp_call_function_on_cpu);
+EXPORT_SYMBOL(_atomic_dec_and_lock);
+#ifdef CONFIG_DEBUG_SPINLOCK
+EXPORT_SYMBOL(_raw_spin_unlock);
+EXPORT_SYMBOL(debug_spin_lock);
+EXPORT_SYMBOL(debug_spin_trylock);
+#endif
+#ifdef CONFIG_DEBUG_RWLOCK
+EXPORT_SYMBOL(_raw_write_lock);
+EXPORT_SYMBOL(_raw_read_lock);
+#endif
+EXPORT_SYMBOL(cpu_present_mask);
+#endif /* CONFIG_SMP */
+
+/*
+ * NUMA specific symbols
+ */
+#ifdef CONFIG_DISCONTIGMEM
+EXPORT_SYMBOL(node_data);
+#endif /* CONFIG_DISCONTIGMEM */
+
+EXPORT_SYMBOL(rtc_lock);
+
+/*
+ * The following are special because they're not called
+ * explicitly (the C compiler or assembler generates them in
+ * response to division operations). Fortunately, their
+ * interface isn't gonna change any time soon now, so it's OK
+ * to leave it out of version control.
+ */
+# undef memcpy
+# undef memset
+EXPORT_SYMBOL(__divl);
+EXPORT_SYMBOL(__divlu);
+EXPORT_SYMBOL(__divq);
+EXPORT_SYMBOL(__divqu);
+EXPORT_SYMBOL(__reml);
+EXPORT_SYMBOL(__remlu);
+EXPORT_SYMBOL(__remq);
+EXPORT_SYMBOL(__remqu);
+EXPORT_SYMBOL(memcpy);
+EXPORT_SYMBOL(memset);
+EXPORT_SYMBOL(memchr);
+
+EXPORT_SYMBOL(get_wchan);
+
+#ifdef CONFIG_ALPHA_IRONGATE
+EXPORT_SYMBOL(irongate_ioremap);
+EXPORT_SYMBOL(irongate_iounmap);
+#endif
diff --git a/arch/alpha/kernel/asm-offsets.c b/arch/alpha/kernel/asm-offsets.c
new file mode 100644
index 000000000000..8f2e5c718b50
--- /dev/null
+++ b/arch/alpha/kernel/asm-offsets.c
@@ -0,0 +1,43 @@
+/*
+ * Generate definitions needed by assembly language modules.
+ * This code generates raw asm output which is post-processed to extract
+ * and format the required data.
+ */
+
+#include <linux/types.h>
+#include <linux/stddef.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <asm/io.h>
+
+#define DEFINE(sym, val) \
+ asm volatile("\n->" #sym " %0 " #val : : "i" (val))
+
+#define BLANK() asm volatile("\n->" : : )
+
+void foo(void)
+{
+ DEFINE(TI_TASK, offsetof(struct thread_info, task));
+ DEFINE(TI_FLAGS, offsetof(struct thread_info, flags));
+ DEFINE(TI_CPU, offsetof(struct thread_info, cpu));
+ BLANK();
+
+ DEFINE(TASK_BLOCKED, offsetof(struct task_struct, blocked));
+ DEFINE(TASK_UID, offsetof(struct task_struct, uid));
+ DEFINE(TASK_EUID, offsetof(struct task_struct, euid));
+ DEFINE(TASK_GID, offsetof(struct task_struct, gid));
+ DEFINE(TASK_EGID, offsetof(struct task_struct, egid));
+ DEFINE(TASK_REAL_PARENT, offsetof(struct task_struct, real_parent));
+ DEFINE(TASK_TGID, offsetof(struct task_struct, tgid));
+ BLANK();
+
+ DEFINE(SIZEOF_PT_REGS, sizeof(struct pt_regs));
+ DEFINE(PT_PTRACED, PT_PTRACED);
+ DEFINE(CLONE_VM, CLONE_VM);
+ DEFINE(CLONE_UNTRACED, CLONE_UNTRACED);
+ DEFINE(SIGCHLD, SIGCHLD);
+ BLANK();
+
+ DEFINE(HAE_CACHE, offsetof(struct alpha_machine_vector, hae_cache));
+ DEFINE(HAE_REG, offsetof(struct alpha_machine_vector, hae_register));
+}
diff --git a/arch/alpha/kernel/console.c b/arch/alpha/kernel/console.c
new file mode 100644
index 000000000000..cb3e739fbad8
--- /dev/null
+++ b/arch/alpha/kernel/console.c
@@ -0,0 +1,66 @@
+/*
+ * linux/arch/alpha/kernel/console.c
+ *
+ * Architecture-specific specific support for VGA device on
+ * non-0 I/O hose
+ */
+
+#include <linux/config.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+#include <asm/vga.h>
+#include <asm/machvec.h>
+
+#ifdef CONFIG_VGA_HOSE
+
+/*
+ * Externally-visible vga hose bases
+ */
+unsigned long __vga_hose_io_base = 0; /* base for default hose */
+unsigned long __vga_hose_mem_base = 0; /* base for default hose */
+
+static struct pci_controller * __init
+default_vga_hose_select(struct pci_controller *h1, struct pci_controller *h2)
+{
+ if (h2->index < h1->index)
+ return h2;
+
+ return h1;
+}
+
+void __init
+set_vga_hose(struct pci_controller *hose)
+{
+ if (hose) {
+ __vga_hose_io_base = hose->io_space->start;
+ __vga_hose_mem_base = hose->mem_space->start;
+ }
+}
+
+void __init
+locate_and_init_vga(void *(*sel_func)(void *, void *))
+{
+ struct pci_controller *hose = NULL;
+ struct pci_dev *dev = NULL;
+
+ if (!sel_func) sel_func = (void *)default_vga_hose_select;
+
+ for(dev=NULL; (dev=pci_get_class(PCI_CLASS_DISPLAY_VGA << 8, dev));) {
+ if (!hose) hose = dev->sysdata;
+ else hose = sel_func(hose, dev->sysdata);
+ }
+
+ /* Did we already inititialize the correct one? */
+ if (conswitchp == &vga_con &&
+ __vga_hose_io_base == hose->io_space->start &&
+ __vga_hose_mem_base == hose->mem_space->start)
+ return;
+
+ /* Set the VGA hose and init the new console */
+ set_vga_hose(hose);
+ take_over_console(&vga_con, 0, MAX_NR_CONSOLES-1, 1);
+}
+
+#endif
diff --git a/arch/alpha/kernel/core_apecs.c b/arch/alpha/kernel/core_apecs.c
new file mode 100644
index 000000000000..a27ba12ba35e
--- /dev/null
+++ b/arch/alpha/kernel/core_apecs.c
@@ -0,0 +1,418 @@
+/*
+ * linux/arch/alpha/kernel/core_apecs.c
+ *
+ * Rewritten for Apecs from the lca.c from:
+ *
+ * Written by David Mosberger (davidm@cs.arizona.edu) with some code
+ * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
+ * bios code.
+ *
+ * Code common to all APECS core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_apecs.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBGC(args) printk args
+#else
+# define DBGC(args)
+#endif
+
+#define vuip volatile unsigned int *
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the APECS_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+
+ /* type 0 configuration cycle: */
+
+ if (device > 20) {
+ DBGC(("mk_conf_addr: device (%d) > 20, returning -1\n",
+ device));
+ return -1;
+ }
+
+ *type1 = 0;
+ addr = (device_fn << 8) | (where);
+ } else {
+ /* type 1 configuration cycle: */
+ *type1 = 1;
+ addr = (bus << 16) | (device_fn << 8) | (where);
+ }
+ *pci_addr = addr;
+ DBGC(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned long flags;
+ unsigned int stat0, value;
+ unsigned int haxr2 = 0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ DBGC(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ DBGC(("conf_read: APECS DCSR was 0x%x\n", stat0));
+
+ /* If Type1 access, must set HAE #2. */
+ if (type1) {
+ haxr2 = *(vuip)APECS_IOC_HAXR2;
+ mb();
+ *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
+ DBGC(("conf_read: TYPE1 access\n"));
+ }
+
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+
+ /* Some SRMs step on these registers during a machine check. */
+ asm volatile("ldl %0,%1; mb; mb" : "=r"(value) : "m"(*(vuip)addr)
+ : "$9", "$10", "$11", "$12", "$13", "$14", "memory");
+
+ if (mcheck_taken(0)) {
+ mcheck_taken(0) = 0;
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(0) = 0;
+ mb();
+
+#if 1
+ /*
+ * david.rusling@reo.mts.dec.com. This code is needed for the
+ * EB64+ as it does not generate a machine check (why I don't
+ * know). When we build kernels for one particular platform
+ * then we can make this conditional on the type.
+ */
+ draina();
+
+ /* Now look for any errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ DBGC(("conf_read: APECS DCSR after read 0x%x\n", stat0));
+
+ /* Is any error bit set? */
+ if (stat0 & 0xffe0U) {
+ /* If not NDEV, print status. */
+ if (!(stat0 & 0x0800)) {
+ printk("apecs.c:conf_read: got stat0=%x\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ wrmces(0x7); /* reset machine check */
+ value = 0xffffffff;
+ }
+#endif
+
+ /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
+ if (type1) {
+ *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
+ mb();
+ }
+ local_irq_restore(flags);
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned long flags;
+ unsigned int stat0;
+ unsigned int haxr2 = 0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+
+ /* If Type1 access, must set HAE #2. */
+ if (type1) {
+ haxr2 = *(vuip)APECS_IOC_HAXR2;
+ mb();
+ *(vuip)APECS_IOC_HAXR2 = haxr2 | 1;
+ }
+
+ draina();
+ mcheck_expected(0) = 1;
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ mb();
+ mb(); /* magic */
+ mcheck_expected(0) = 0;
+ mb();
+
+#if 1
+ /*
+ * david.rusling@reo.mts.dec.com. This code is needed for the
+ * EB64+ as it does not generate a machine check (why I don't
+ * know). When we build kernels for one particular platform
+ * then we can make this conditional on the type.
+ */
+ draina();
+
+ /* Now look for any errors. */
+ stat0 = *(vuip)APECS_IOC_DCSR;
+
+ /* Is any error bit set? */
+ if (stat0 & 0xffe0U) {
+ /* If not NDEV, print status. */
+ if (!(stat0 & 0x0800)) {
+ printk("apecs.c:conf_write: got stat0=%x\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vuip)APECS_IOC_DCSR = stat0;
+ mb();
+ wrmces(0x7); /* reset machine check */
+ }
+#endif
+
+ /* If Type1 access, must reset HAE #2 so normal IO space ops work. */
+ if (type1) {
+ *(vuip)APECS_IOC_HAXR2 = haxr2 & ~1;
+ mb();
+ }
+ local_irq_restore(flags);
+}
+
+static int
+apecs_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + APECS_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+apecs_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + APECS_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops apecs_pci_ops =
+{
+ .read = apecs_read_config,
+ .write = apecs_write_config,
+};
+
+void
+apecs_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)APECS_IOC_TBIA = 0;
+ mb();
+}
+
+void __init
+apecs_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = APECS_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = APECS_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = APECS_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 1 is direct access 1GB at 1GB
+ * Window 2 is scatter-gather 8MB at 8MB (for isa)
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vuip)APECS_IOC_PB1R = __direct_map_base | 0x00080000;
+ *(vuip)APECS_IOC_PM1R = (__direct_map_size - 1) & 0xfff00000U;
+ *(vuip)APECS_IOC_TB1R = 0;
+
+ *(vuip)APECS_IOC_PB2R = hose->sg_isa->dma_base | 0x000c0000;
+ *(vuip)APECS_IOC_PM2R = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)APECS_IOC_TB2R = virt_to_phys(hose->sg_isa->ptes) >> 1;
+
+ apecs_pci_tbi(hose, 0, -1);
+
+ /*
+ * Finally, clear the HAXR2 register, which gets used
+ * for PCI Config Space accesses. That is the way
+ * we want to use it, and we do not want to depend on
+ * what ARC or SRM might have left behind...
+ */
+ *(vuip)APECS_IOC_HAXR2 = 0;
+ mb();
+}
+
+void
+apecs_pci_clr_err(void)
+{
+ unsigned int jd;
+
+ jd = *(vuip)APECS_IOC_DCSR;
+ if (jd & 0xffe0L) {
+ *(vuip)APECS_IOC_SEAR;
+ *(vuip)APECS_IOC_DCSR = jd | 0xffe1L;
+ mb();
+ *(vuip)APECS_IOC_DCSR;
+ }
+ *(vuip)APECS_IOC_TBIA = (unsigned int)APECS_IOC_TBIA;
+ mb();
+ *(vuip)APECS_IOC_TBIA;
+}
+
+void
+apecs_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ struct el_common *mchk_header;
+ struct el_apecs_procdata *mchk_procdata;
+ struct el_apecs_sysdata_mcheck *mchk_sysdata;
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ mchk_procdata = (struct el_apecs_procdata *)
+ (la_ptr + mchk_header->proc_offset
+ - sizeof(mchk_procdata->paltemp));
+
+ mchk_sysdata = (struct el_apecs_sysdata_mcheck *)
+ (la_ptr + mchk_header->sys_offset);
+
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ apecs_pci_clr_err();
+ wrmces(0x7); /* reset machine check pending flag */
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "APECS",
+ (mcheck_expected(0)
+ && (mchk_sysdata->epic_dcsr & 0x0c00UL)));
+}
diff --git a/arch/alpha/kernel/core_cia.c b/arch/alpha/kernel/core_cia.c
new file mode 100644
index 000000000000..fd563064363c
--- /dev/null
+++ b/arch/alpha/kernel/core_cia.c
@@ -0,0 +1,1212 @@
+/*
+ * linux/arch/alpha/kernel/core_cia.c
+ *
+ * Written by David A Rusling (david.rusling@reo.mts.dec.com).
+ * December 1995.
+ *
+ * Copyright (C) 1995 David A Rusling
+ * Copyright (C) 1997, 1998 Jay Estabrook
+ * Copyright (C) 1998, 1999, 2000 Richard Henderson
+ *
+ * Code common to all CIA core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_cia.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+#define DEBUG_CONFIG 0
+#if DEBUG_CONFIG
+# define DBGC(args) printk args
+#else
+# define DBGC(args)
+#endif
+
+#define vip volatile int *
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address. It is therefore not safe to have
+ * concurrent invocations to configuration space access routines, but
+ * there really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *bus_dev, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ u8 bus = bus_dev->number;
+
+ *type1 = (bus != 0);
+ *pci_addr = (bus << 16) | (device_fn << 8) | where;
+
+ DBGC(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " returning address 0x%p\n"
+ bus, device_fn, where, *pci_addr));
+
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned long flags;
+ int stat0, value;
+ int cia_cfg = 0;
+
+ DBGC(("conf_read(addr=0x%lx, type1=%d) ", addr, type1));
+ local_irq_save(flags);
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = stat0;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
+
+ /* If Type1 access, must set CIA CFG. */
+ if (type1) {
+ cia_cfg = *(vip)CIA_IOC_CFG;
+ *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ mb();
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+ value = *(vip)addr;
+ mb();
+ mb(); /* magic */
+ if (mcheck_taken(0)) {
+ mcheck_taken(0) = 0;
+ value = 0xffffffff;
+ mb();
+ }
+ mcheck_expected(0) = 0;
+ mb();
+
+ /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
+ if (type1) {
+ *(vip)CIA_IOC_CFG = cia_cfg;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ local_irq_restore(flags);
+ DBGC(("done\n"));
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned long flags;
+ int stat0, cia_cfg = 0;
+
+ DBGC(("conf_write(addr=0x%lx, type1=%d) ", addr, type1));
+ local_irq_save(flags);
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = stat0;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write */
+
+ /* If Type1 access, must set CIA CFG. */
+ if (type1) {
+ cia_cfg = *(vip)CIA_IOC_CFG;
+ *(vip)CIA_IOC_CFG = (cia_cfg & ~3) | 1;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ mb();
+ draina();
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+
+ /* Access configuration space. */
+ *(vip)addr = value;
+ mb();
+ *(vip)addr; /* read back to force the write */
+
+ mcheck_expected(0) = 0;
+ mb();
+
+ /* If Type1 access, must reset IOC CFG so normal IO space ops work. */
+ if (type1) {
+ *(vip)CIA_IOC_CFG = cia_cfg;
+ mb();
+ *(vip)CIA_IOC_CFG;
+ }
+
+ local_irq_restore(flags);
+ DBGC(("done\n"));
+}
+
+static int
+cia_read_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 *value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ unsigned char type1;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + CIA_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+cia_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + CIA_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops cia_pci_ops =
+{
+ .read = cia_read_config,
+ .write = cia_write_config,
+};
+
+/*
+ * CIA Pass 1 and PYXIS Pass 1 and 2 have a broken scatter-gather tlb.
+ * It cannot be invalidated. Rather than hard code the pass numbers,
+ * actually try the tbia to see if it works.
+ */
+
+void
+cia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vip)CIA_IOC_PCI_TBIA = 3; /* Flush all locked and unlocked. */
+ mb();
+ *(vip)CIA_IOC_PCI_TBIA;
+}
+
+/*
+ * On PYXIS, even if the tbia works, we cannot use it. It effectively locks
+ * the chip (as well as direct write to the tag registers) if there is a
+ * SG DMA operation in progress. This is true at least for PYXIS rev. 1,
+ * so always use the method below.
+ */
+/*
+ * This is the method NT and NetBSD use.
+ *
+ * Allocate mappings, and put the chip into DMA loopback mode to read a
+ * garbage page. This works by causing TLB misses, causing old entries to
+ * be purged to make room for the new entries coming in for the garbage page.
+ */
+
+#define CIA_BROKEN_TBIA_BASE 0x30000000
+#define CIA_BROKEN_TBIA_SIZE 1024
+
+/* Always called with interrupts disabled */
+void
+cia_pci_tbi_try2(struct pci_controller *hose,
+ dma_addr_t start, dma_addr_t end)
+{
+ void __iomem *bus_addr;
+ int ctrl;
+
+ /* Put the chip into PCI loopback mode. */
+ mb();
+ ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+
+ /* Read from PCI dense memory space at TBI_ADDR, skipping 32k on
+ each read. This forces SG TLB misses. NetBSD claims that the
+ TLB entries are not quite LRU, meaning that we need to read more
+ times than there are actual tags. The 2117x docs claim strict
+ round-robin. Oh well, we've come this far... */
+ /* Even better - as seen on the PYXIS rev 1 the TLB tags 0-3 can
+ be filled by the TLB misses *only once* after being invalidated
+ (by tbia or direct write). Next misses won't update them even
+ though the lock bits are cleared. Tags 4-7 are "quite LRU" though,
+ so use them and read at window 3 base exactly 4 times. Reading
+ more sometimes makes the chip crazy. -ink */
+
+ bus_addr = cia_ioremap(CIA_BROKEN_TBIA_BASE, 32768 * 4);
+
+ cia_readl(bus_addr + 0x00000);
+ cia_readl(bus_addr + 0x08000);
+ cia_readl(bus_addr + 0x10000);
+ cia_readl(bus_addr + 0x18000);
+
+ cia_iounmap(bus_addr);
+
+ /* Restore normal PCI operation. */
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL = ctrl;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+}
+
+static inline void
+cia_prepare_tbia_workaround(int window)
+{
+ unsigned long *ppte, pte;
+ long i;
+
+ /* Use minimal 1K map. */
+ ppte = __alloc_bootmem(CIA_BROKEN_TBIA_SIZE, 32768, 0);
+ pte = (virt_to_phys(ppte) >> (PAGE_SHIFT - 1)) | 1;
+
+ for (i = 0; i < CIA_BROKEN_TBIA_SIZE / sizeof(unsigned long); ++i)
+ ppte[i] = pte;
+
+ *(vip)CIA_IOC_PCI_Wn_BASE(window) = CIA_BROKEN_TBIA_BASE | 3;
+ *(vip)CIA_IOC_PCI_Wn_MASK(window)
+ = (CIA_BROKEN_TBIA_SIZE*1024 - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_Tn_BASE(window) = virt_to_phys(ppte) >> 2;
+}
+
+static void __init
+verify_tb_operation(void)
+{
+ static int page[PAGE_SIZE/4]
+ __attribute__((aligned(PAGE_SIZE)))
+ __initdata = { 0 };
+
+ struct pci_iommu_arena *arena = pci_isa_hose->sg_isa;
+ int ctrl, addr0, tag0, pte0, data0;
+ int temp, use_tbia_try2 = 0;
+ void __iomem *bus_addr;
+
+ /* pyxis -- tbia is broken */
+ if (pci_isa_hose->dense_io_base)
+ use_tbia_try2 = 1;
+
+ /* Put the chip into PCI loopback mode. */
+ mb();
+ ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ *(vip)CIA_IOC_CIA_CTRL = ctrl | CIA_CTRL_PCI_LOOP_EN;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+
+ /* Write a valid entry directly into the TLB registers. */
+
+ addr0 = arena->dma_base;
+ tag0 = addr0 | 1;
+ pte0 = (virt_to_phys(page) >> (PAGE_SHIFT - 1)) | 1;
+
+ *(vip)CIA_IOC_TB_TAGn(0) = tag0;
+ *(vip)CIA_IOC_TB_TAGn(1) = 0;
+ *(vip)CIA_IOC_TB_TAGn(2) = 0;
+ *(vip)CIA_IOC_TB_TAGn(3) = 0;
+ *(vip)CIA_IOC_TB_TAGn(4) = 0;
+ *(vip)CIA_IOC_TB_TAGn(5) = 0;
+ *(vip)CIA_IOC_TB_TAGn(6) = 0;
+ *(vip)CIA_IOC_TB_TAGn(7) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,0) = pte0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,1) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,2) = 0;
+ *(vip)CIA_IOC_TBn_PAGEm(0,3) = 0;
+ mb();
+
+ /* Get a usable bus address */
+ bus_addr = cia_ioremap(addr0, 8*PAGE_SIZE);
+
+ /* First, verify we can read back what we've written. If
+ this fails, we can't be sure of any of the other testing
+ we're going to do, so bail. */
+ /* ??? Actually, we could do the work with machine checks.
+ By passing this register update test, we pretty much
+ guarantee that cia_pci_tbi_try1 works. If this test
+ fails, cia_pci_tbi_try2 might still work. */
+
+ temp = *(vip)CIA_IOC_TB_TAGn(0);
+ if (temp != tag0) {
+ printk("pci: failed tb register update test "
+ "(tag0 %#x != %#x)\n", temp, tag0);
+ goto failed;
+ }
+ temp = *(vip)CIA_IOC_TB_TAGn(1);
+ if (temp != 0) {
+ printk("pci: failed tb register update test "
+ "(tag1 %#x != 0)\n", temp);
+ goto failed;
+ }
+ temp = *(vip)CIA_IOC_TBn_PAGEm(0,0);
+ if (temp != pte0) {
+ printk("pci: failed tb register update test "
+ "(pte0 %#x != %#x)\n", temp, pte0);
+ goto failed;
+ }
+ printk("pci: passed tb register update test\n");
+
+ /* Second, verify we can actually do I/O through this entry. */
+
+ data0 = 0xdeadbeef;
+ page[0] = data0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed sg loopback i/o read test (mcheck)\n");
+ goto failed;
+ }
+ if (temp != data0) {
+ printk("pci: failed sg loopback i/o read test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ }
+ printk("pci: passed sg loopback i/o read test\n");
+
+ /* Third, try to invalidate the TLB. */
+
+ if (! use_tbia_try2) {
+ cia_pci_tbi(arena->hose, 0, -1);
+ temp = *(vip)CIA_IOC_TB_TAGn(0);
+ if (temp & 1) {
+ use_tbia_try2 = 1;
+ printk("pci: failed tbia test; workaround available\n");
+ } else {
+ printk("pci: passed tbia test\n");
+ }
+ }
+
+ /* Fourth, verify the TLB snoops the EV5's caches when
+ doing a tlb fill. */
+
+ data0 = 0x5adda15e;
+ page[0] = data0;
+ arena->ptes[4] = pte0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 4*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed pte write cache snoop test (mcheck)\n");
+ goto failed;
+ }
+ if (temp != data0) {
+ printk("pci: failed pte write cache snoop test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ }
+ printk("pci: passed pte write cache snoop test\n");
+
+ /* Fifth, verify that a previously invalid PTE entry gets
+ filled from the page table. */
+
+ data0 = 0xabcdef12;
+ page[0] = data0;
+ arena->ptes[5] = pte0;
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 5*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ if (mcheck_taken(0)) {
+ printk("pci: failed valid tag invalid pte reload test "
+ "(mcheck; workaround available)\n");
+ /* Work around this bug by aligning new allocations
+ on 4 page boundaries. */
+ arena->align_entry = 4;
+ } else if (temp != data0) {
+ printk("pci: failed valid tag invalid pte reload test "
+ "(%#x != %#x)\n", temp, data0);
+ goto failed;
+ } else {
+ printk("pci: passed valid tag invalid pte reload test\n");
+ }
+
+ /* Sixth, verify machine checks are working. Test invalid
+ pte under the same valid tag as we used above. */
+
+ mcheck_expected(0) = 1;
+ mcheck_taken(0) = 0;
+ mb();
+ temp = cia_readl(bus_addr + 6*PAGE_SIZE);
+ mb();
+ mcheck_expected(0) = 0;
+ mb();
+ printk("pci: %s pci machine check test\n",
+ mcheck_taken(0) ? "passed" : "failed");
+
+ /* Clean up after the tests. */
+ arena->ptes[4] = 0;
+ arena->ptes[5] = 0;
+
+ if (use_tbia_try2) {
+ alpha_mv.mv_pci_tbi = cia_pci_tbi_try2;
+
+ /* Tags 0-3 must be disabled if we use this workaraund. */
+ wmb();
+ *(vip)CIA_IOC_TB_TAGn(0) = 2;
+ *(vip)CIA_IOC_TB_TAGn(1) = 2;
+ *(vip)CIA_IOC_TB_TAGn(2) = 2;
+ *(vip)CIA_IOC_TB_TAGn(3) = 2;
+
+ printk("pci: tbia workaround enabled\n");
+ }
+ alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+exit:
+ /* unmap the bus addr */
+ cia_iounmap(bus_addr);
+
+ /* Restore normal PCI operation. */
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL = ctrl;
+ mb();
+ *(vip)CIA_IOC_CIA_CTRL;
+ mb();
+ return;
+
+failed:
+ printk("pci: disabling sg translation window\n");
+ *(vip)CIA_IOC_PCI_W0_BASE = 0;
+ *(vip)CIA_IOC_PCI_W1_BASE = 0;
+ pci_isa_hose->sg_isa = NULL;
+ alpha_mv.mv_pci_tbi = NULL;
+ goto exit;
+}
+
+#if defined(ALPHA_RESTORE_SRM_SETUP)
+/* Save CIA configuration data as the console had it set up. */
+struct
+{
+ unsigned int hae_mem;
+ unsigned int hae_io;
+ unsigned int pci_dac_offset;
+ unsigned int err_mask;
+ unsigned int cia_ctrl;
+ unsigned int cia_cnfg;
+ struct {
+ unsigned int w_base;
+ unsigned int w_mask;
+ unsigned int t_base;
+ } window[4];
+} saved_config __attribute((common));
+
+void
+cia_save_srm_settings(int is_pyxis)
+{
+ int i;
+
+ /* Save some important registers. */
+ saved_config.err_mask = *(vip)CIA_IOC_ERR_MASK;
+ saved_config.cia_ctrl = *(vip)CIA_IOC_CIA_CTRL;
+ saved_config.hae_mem = *(vip)CIA_IOC_HAE_MEM;
+ saved_config.hae_io = *(vip)CIA_IOC_HAE_IO;
+ saved_config.pci_dac_offset = *(vip)CIA_IOC_PCI_W_DAC;
+
+ if (is_pyxis)
+ saved_config.cia_cnfg = *(vip)CIA_IOC_CIA_CNFG;
+ else
+ saved_config.cia_cnfg = 0;
+
+ /* Save DMA windows configuration. */
+ for (i = 0; i < 4; i++) {
+ saved_config.window[i].w_base = *(vip)CIA_IOC_PCI_Wn_BASE(i);
+ saved_config.window[i].w_mask = *(vip)CIA_IOC_PCI_Wn_MASK(i);
+ saved_config.window[i].t_base = *(vip)CIA_IOC_PCI_Tn_BASE(i);
+ }
+ mb();
+}
+
+void
+cia_restore_srm_settings(void)
+{
+ int i;
+
+ for (i = 0; i < 4; i++) {
+ *(vip)CIA_IOC_PCI_Wn_BASE(i) = saved_config.window[i].w_base;
+ *(vip)CIA_IOC_PCI_Wn_MASK(i) = saved_config.window[i].w_mask;
+ *(vip)CIA_IOC_PCI_Tn_BASE(i) = saved_config.window[i].t_base;
+ }
+
+ *(vip)CIA_IOC_HAE_MEM = saved_config.hae_mem;
+ *(vip)CIA_IOC_HAE_IO = saved_config.hae_io;
+ *(vip)CIA_IOC_PCI_W_DAC = saved_config.pci_dac_offset;
+ *(vip)CIA_IOC_ERR_MASK = saved_config.err_mask;
+ *(vip)CIA_IOC_CIA_CTRL = saved_config.cia_ctrl;
+
+ if (saved_config.cia_cnfg) /* Must be pyxis. */
+ *(vip)CIA_IOC_CIA_CNFG = saved_config.cia_cnfg;
+
+ mb();
+}
+#else /* ALPHA_RESTORE_SRM_SETUP */
+#define cia_save_srm_settings(p) do {} while (0)
+#define cia_restore_srm_settings() do {} while (0)
+#endif /* ALPHA_RESTORE_SRM_SETUP */
+
+
+static void __init
+do_init_arch(int is_pyxis)
+{
+ struct pci_controller *hose;
+ int temp, cia_rev, tbia_window;
+
+ cia_rev = *(vip)CIA_IOC_CIA_REV & CIA_REV_MASK;
+ printk("pci: cia revision %d%s\n",
+ cia_rev, is_pyxis ? " (pyxis)" : "");
+
+ if (alpha_using_srm)
+ cia_save_srm_settings(is_pyxis);
+
+ /* Set up error reporting. */
+ temp = *(vip)CIA_IOC_ERR_MASK;
+ temp &= ~(CIA_ERR_CPU_PE | CIA_ERR_MEM_NEM | CIA_ERR_PA_PTE_INV
+ | CIA_ERR_RCVD_MAS_ABT | CIA_ERR_RCVD_TAR_ABT);
+ *(vip)CIA_IOC_ERR_MASK = temp;
+
+ /* Clear all currently pending errors. */
+ temp = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = temp;
+
+ /* Turn on mchecks. */
+ temp = *(vip)CIA_IOC_CIA_CTRL;
+ temp |= CIA_CTRL_FILL_ERR_EN | CIA_CTRL_MCHK_ERR_EN;
+ *(vip)CIA_IOC_CIA_CTRL = temp;
+
+ /* Clear the CFG register, which gets used for PCI config space
+ accesses. That is the way we want to use it, and we do not
+ want to depend on what ARC or SRM might have left behind. */
+ *(vip)CIA_IOC_CFG = 0;
+
+ /* Zero the HAEs. */
+ *(vip)CIA_IOC_HAE_MEM = 0;
+ *(vip)CIA_IOC_HAE_IO = 0;
+
+ /* For PYXIS, we always use BWX bus and i/o accesses. To that end,
+ make sure they're enabled on the controller. At the same time,
+ enable the monster window. */
+ if (is_pyxis) {
+ temp = *(vip)CIA_IOC_CIA_CNFG;
+ temp |= CIA_CNFG_IOA_BWEN | CIA_CNFG_PCI_MWEN;
+ *(vip)CIA_IOC_CIA_CNFG = temp;
+ }
+
+ /* Synchronize with all previous changes. */
+ mb();
+ *(vip)CIA_IOC_CIA_REV;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ if (! is_pyxis) {
+ struct resource *hae_mem = alloc_resource();
+ hose->mem_space = hae_mem;
+
+ hae_mem->start = 0;
+ hae_mem->end = CIA_MEM_R1_MASK;
+ hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
+
+ if (request_resource(&iomem_resource, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM\n");
+
+ hose->sparse_mem_base = CIA_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = CIA_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = CIA_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+ } else {
+ hose->sparse_mem_base = 0;
+ hose->dense_mem_base = CIA_BW_MEM - IDENT_ADDR;
+ hose->sparse_io_base = 0;
+ hose->dense_io_base = CIA_BW_IO - IDENT_ADDR;
+ }
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is S/G 8MB at 8MB (for isa)
+ * Window 1 is S/G 1MB at 768MB (for tbia) (unused for CIA rev 1)
+ * Window 2 is direct access 2GB at 2GB
+ * Window 3 is DAC access 4GB at 8GB (or S/G for tbia if CIA rev 1)
+ *
+ * ??? NetBSD hints that page tables must be aligned to 32K,
+ * possibly due to a hardware bug. This is over-aligned
+ * from the 8K alignment one would expect for an 8MB window.
+ * No description of what revisions affected.
+ */
+
+ hose->sg_pci = NULL;
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 32768);
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vip)CIA_IOC_PCI_W0_BASE = hose->sg_isa->dma_base | 3;
+ *(vip)CIA_IOC_PCI_W0_MASK = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_T0_BASE = virt_to_phys(hose->sg_isa->ptes) >> 2;
+
+ *(vip)CIA_IOC_PCI_W2_BASE = __direct_map_base | 1;
+ *(vip)CIA_IOC_PCI_W2_MASK = (__direct_map_size - 1) & 0xfff00000;
+ *(vip)CIA_IOC_PCI_T2_BASE = 0 >> 2;
+
+ /* On PYXIS we have the monster window, selected by bit 40, so
+ there is no need for window3 to be enabled.
+
+ On CIA, we don't have true arbitrary addressing -- bits <39:32>
+ are compared against W_DAC. We can, however, directly map 4GB,
+ which is better than before. However, due to assumptions made
+ elsewhere, we should not claim that we support DAC unless that
+ 4GB covers all of physical memory.
+
+ On CIA rev 1, apparently W1 and W2 can't be used for SG.
+ At least, there are reports that it doesn't work for Alcor.
+ In that case, we have no choice but to use W3 for the TBIA
+ workaround, which means we can't use DAC at all. */
+
+ tbia_window = 1;
+ if (is_pyxis) {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0;
+ } else if (cia_rev == 1) {
+ *(vip)CIA_IOC_PCI_W1_BASE = 0;
+ tbia_window = 3;
+ } else if (max_low_pfn > (0x100000000UL >> PAGE_SHIFT)) {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0;
+ } else {
+ *(vip)CIA_IOC_PCI_W3_BASE = 0x00000000 | 1 | 8;
+ *(vip)CIA_IOC_PCI_W3_MASK = 0xfff00000;
+ *(vip)CIA_IOC_PCI_T3_BASE = 0 >> 2;
+
+ alpha_mv.pci_dac_offset = 0x200000000UL;
+ *(vip)CIA_IOC_PCI_W_DAC = alpha_mv.pci_dac_offset >> 32;
+ }
+
+ /* Prepare workaround for apparently broken tbia. */
+ cia_prepare_tbia_workaround(tbia_window);
+}
+
+void __init
+cia_init_arch(void)
+{
+ do_init_arch(0);
+}
+
+void __init
+pyxis_init_arch(void)
+{
+ /* On pyxis machines we can precisely calculate the
+ CPU clock frequency using pyxis real time counter.
+ It's especially useful for SX164 with broken RTC.
+
+ Both CPU and chipset are driven by the single 16.666M
+ or 16.667M crystal oscillator. PYXIS_RT_COUNT clock is
+ 66.66 MHz. -ink */
+
+ unsigned int cc0, cc1;
+ unsigned long pyxis_cc;
+
+ __asm__ __volatile__ ("rpcc %0" : "=r"(cc0));
+ pyxis_cc = *(vulp)PYXIS_RT_COUNT;
+ do { } while(*(vulp)PYXIS_RT_COUNT - pyxis_cc < 4096);
+ __asm__ __volatile__ ("rpcc %0" : "=r"(cc1));
+ cc1 -= cc0;
+ hwrpb->cycle_freq = ((cc1 >> 11) * 100000000UL) / 3;
+ hwrpb_update_checksum(hwrpb);
+
+ do_init_arch(1);
+}
+
+void
+cia_kill_arch(int mode)
+{
+ if (alpha_using_srm)
+ cia_restore_srm_settings();
+}
+
+void __init
+cia_init_pci(void)
+{
+ /* Must delay this from init_arch, as we need machine checks. */
+ verify_tb_operation();
+ common_init_pci();
+}
+
+static inline void
+cia_pci_clr_err(void)
+{
+ int jd;
+
+ jd = *(vip)CIA_IOC_CIA_ERR;
+ *(vip)CIA_IOC_CIA_ERR = jd;
+ mb();
+ *(vip)CIA_IOC_CIA_ERR; /* re-read to force write. */
+}
+
+#ifdef CONFIG_VERBOSE_MCHECK
+static void
+cia_decode_pci_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ static const char * const pci_cmd_desc[16] = {
+ "Interrupt Acknowledge", "Special Cycle", "I/O Read",
+ "I/O Write", "Reserved 0x4", "Reserved 0x5", "Memory Read",
+ "Memory Write", "Reserved 0x8", "Reserved 0x9",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line", "Memory Write and Invalidate"
+ };
+
+ if (cia->cia_err & (CIA_ERR_COR_ERR
+ | CIA_ERR_UN_COR_ERR
+ | CIA_ERR_MEM_NEM
+ | CIA_ERR_PA_PTE_INV)) {
+ static const char * const window_desc[6] = {
+ "No window active", "Window 0 hit", "Window 1 hit",
+ "Window 2 hit", "Window 3 hit", "Monster window hit"
+ };
+
+ const char *window;
+ const char *cmd;
+ unsigned long addr, tmp;
+ int lock, dac;
+
+ cmd = pci_cmd_desc[cia->pci_err0 & 0x7];
+ lock = (cia->pci_err0 >> 4) & 1;
+ dac = (cia->pci_err0 >> 5) & 1;
+
+ tmp = (cia->pci_err0 >> 8) & 0x1F;
+ tmp = ffs(tmp);
+ window = window_desc[tmp];
+
+ addr = cia->pci_err1;
+ if (dac) {
+ tmp = *(vip)CIA_IOC_PCI_W_DAC & 0xFFUL;
+ addr |= tmp << 32;
+ }
+
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " DMA command: %s\n", cmd);
+ printk(KERN_CRIT " PCI address: %#010lx\n", addr);
+ printk(KERN_CRIT " %s, Lock: %d, DAC: %d\n",
+ window, lock, dac);
+ } else if (cia->cia_err & (CIA_ERR_PERR
+ | CIA_ERR_PCI_ADDR_PE
+ | CIA_ERR_RCVD_MAS_ABT
+ | CIA_ERR_RCVD_TAR_ABT
+ | CIA_ERR_IOA_TIMEOUT)) {
+ static const char * const master_st_desc[16] = {
+ "Idle", "Drive bus", "Address step cycle",
+ "Address cycle", "Data cycle", "Last read data cycle",
+ "Last write data cycle", "Read stop cycle",
+ "Write stop cycle", "Read turnaround cycle",
+ "Write turnaround cycle", "Reserved 0xB",
+ "Reserved 0xC", "Reserved 0xD", "Reserved 0xE",
+ "Unknown state"
+ };
+ static const char * const target_st_desc[16] = {
+ "Idle", "Busy", "Read data cycle", "Write data cycle",
+ "Read stop cycle", "Write stop cycle",
+ "Read turnaround cycle", "Write turnaround cycle",
+ "Read wait cycle", "Write wait cycle",
+ "Reserved 0xA", "Reserved 0xB", "Reserved 0xC",
+ "Reserved 0xD", "Reserved 0xE", "Unknown state"
+ };
+
+ const char *cmd;
+ const char *master, *target;
+ unsigned long addr, tmp;
+ int dac;
+
+ master = master_st_desc[(cia->pci_err0 >> 16) & 0xF];
+ target = target_st_desc[(cia->pci_err0 >> 20) & 0xF];
+ cmd = pci_cmd_desc[(cia->pci_err0 >> 24) & 0xF];
+ dac = (cia->pci_err0 >> 28) & 1;
+
+ addr = cia->pci_err2;
+ if (dac) {
+ tmp = *(volatile int *)CIA_IOC_PCI_W_DAC & 0xFFUL;
+ addr |= tmp << 32;
+ }
+
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " PCI command: %s\n", cmd);
+ printk(KERN_CRIT " Master state: %s, Target state: %s\n",
+ master, target);
+ printk(KERN_CRIT " PCI address: %#010lx, DAC: %d\n",
+ addr, dac);
+ } else {
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+ printk(KERN_CRIT " Unknown PCI error\n");
+ printk(KERN_CRIT " PCI_ERR0 = %#08lx", cia->pci_err0);
+ printk(KERN_CRIT " PCI_ERR1 = %#08lx", cia->pci_err1);
+ printk(KERN_CRIT " PCI_ERR2 = %#08lx", cia->pci_err2);
+ }
+}
+
+static void
+cia_decode_mem_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ unsigned long mem_port_addr;
+ unsigned long mem_port_mask;
+ const char *mem_port_cmd;
+ const char *seq_state;
+ const char *set_select;
+ unsigned long tmp;
+
+ /* If this is a DMA command, also decode the PCI bits. */
+ if ((cia->mem_err1 >> 20) & 1)
+ cia_decode_pci_error(cia, msg);
+ else
+ printk(KERN_CRIT "CIA machine check: %s\n", msg);
+
+ mem_port_addr = cia->mem_err0 & 0xfffffff0;
+ mem_port_addr |= (cia->mem_err1 & 0x83UL) << 32;
+
+ mem_port_mask = (cia->mem_err1 >> 12) & 0xF;
+
+ tmp = (cia->mem_err1 >> 8) & 0xF;
+ tmp |= ((cia->mem_err1 >> 20) & 1) << 4;
+ if ((tmp & 0x1E) == 0x06)
+ mem_port_cmd = "WRITE BLOCK or WRITE BLOCK LOCK";
+ else if ((tmp & 0x1C) == 0x08)
+ mem_port_cmd = "READ MISS or READ MISS MODIFY";
+ else if (tmp == 0x1C)
+ mem_port_cmd = "BC VICTIM";
+ else if ((tmp & 0x1E) == 0x0E)
+ mem_port_cmd = "READ MISS MODIFY";
+ else if ((tmp & 0x1C) == 0x18)
+ mem_port_cmd = "DMA READ or DMA READ MODIFY";
+ else if ((tmp & 0x1E) == 0x12)
+ mem_port_cmd = "DMA WRITE";
+ else
+ mem_port_cmd = "Unknown";
+
+ tmp = (cia->mem_err1 >> 16) & 0xF;
+ switch (tmp) {
+ case 0x0:
+ seq_state = "Idle";
+ break;
+ case 0x1:
+ seq_state = "DMA READ or DMA WRITE";
+ break;
+ case 0x2: case 0x3:
+ seq_state = "READ MISS (or READ MISS MODIFY) with victim";
+ break;
+ case 0x4: case 0x5: case 0x6:
+ seq_state = "READ MISS (or READ MISS MODIFY) with no victim";
+ break;
+ case 0x8: case 0x9: case 0xB:
+ seq_state = "Refresh";
+ break;
+ case 0xC:
+ seq_state = "Idle, waiting for DMA pending read";
+ break;
+ case 0xE: case 0xF:
+ seq_state = "Idle, ras precharge";
+ break;
+ default:
+ seq_state = "Unknown";
+ break;
+ }
+
+ tmp = (cia->mem_err1 >> 24) & 0x1F;
+ switch (tmp) {
+ case 0x00: set_select = "Set 0 selected"; break;
+ case 0x01: set_select = "Set 1 selected"; break;
+ case 0x02: set_select = "Set 2 selected"; break;
+ case 0x03: set_select = "Set 3 selected"; break;
+ case 0x04: set_select = "Set 4 selected"; break;
+ case 0x05: set_select = "Set 5 selected"; break;
+ case 0x06: set_select = "Set 6 selected"; break;
+ case 0x07: set_select = "Set 7 selected"; break;
+ case 0x08: set_select = "Set 8 selected"; break;
+ case 0x09: set_select = "Set 9 selected"; break;
+ case 0x0A: set_select = "Set A selected"; break;
+ case 0x0B: set_select = "Set B selected"; break;
+ case 0x0C: set_select = "Set C selected"; break;
+ case 0x0D: set_select = "Set D selected"; break;
+ case 0x0E: set_select = "Set E selected"; break;
+ case 0x0F: set_select = "Set F selected"; break;
+ case 0x10: set_select = "No set selected"; break;
+ case 0x1F: set_select = "Refresh cycle"; break;
+ default: set_select = "Unknown"; break;
+ }
+
+ printk(KERN_CRIT " Memory port command: %s\n", mem_port_cmd);
+ printk(KERN_CRIT " Memory port address: %#010lx, mask: %#lx\n",
+ mem_port_addr, mem_port_mask);
+ printk(KERN_CRIT " Memory sequencer state: %s\n", seq_state);
+ printk(KERN_CRIT " Memory set: %s\n", set_select);
+}
+
+static void
+cia_decode_ecc_error(struct el_CIA_sysdata_mcheck *cia, const char *msg)
+{
+ long syn;
+ long i;
+ const char *fmt;
+
+ cia_decode_mem_error(cia, msg);
+
+ syn = cia->cia_syn & 0xff;
+ if (syn == (syn & -syn)) {
+ fmt = KERN_CRIT " ECC syndrome %#x -- check bit %d\n";
+ i = ffs(syn) - 1;
+ } else {
+ static unsigned char const data_bit[64] = {
+ 0xCE, 0xCB, 0xD3, 0xD5,
+ 0xD6, 0xD9, 0xDA, 0xDC,
+ 0x23, 0x25, 0x26, 0x29,
+ 0x2A, 0x2C, 0x31, 0x34,
+ 0x0E, 0x0B, 0x13, 0x15,
+ 0x16, 0x19, 0x1A, 0x1C,
+ 0xE3, 0xE5, 0xE6, 0xE9,
+ 0xEA, 0xEC, 0xF1, 0xF4,
+ 0x4F, 0x4A, 0x52, 0x54,
+ 0x57, 0x58, 0x5B, 0x5D,
+ 0xA2, 0xA4, 0xA7, 0xA8,
+ 0xAB, 0xAD, 0xB0, 0xB5,
+ 0x8F, 0x8A, 0x92, 0x94,
+ 0x97, 0x98, 0x9B, 0x9D,
+ 0x62, 0x64, 0x67, 0x68,
+ 0x6B, 0x6D, 0x70, 0x75
+ };
+
+ for (i = 0; i < 64; ++i)
+ if (data_bit[i] == syn)
+ break;
+
+ if (i < 64)
+ fmt = KERN_CRIT " ECC syndrome %#x -- data bit %d\n";
+ else
+ fmt = KERN_CRIT " ECC syndrome %#x -- unknown bit\n";
+ }
+
+ printk (fmt, syn, i);
+}
+
+static void
+cia_decode_parity_error(struct el_CIA_sysdata_mcheck *cia)
+{
+ static const char * const cmd_desc[16] = {
+ "NOP", "LOCK", "FETCH", "FETCH_M", "MEMORY BARRIER",
+ "SET DIRTY", "WRITE BLOCK", "WRITE BLOCK LOCK",
+ "READ MISS0", "READ MISS1", "READ MISS MOD0",
+ "READ MISS MOD1", "BCACHE VICTIM", "Spare",
+ "READ MISS MOD STC0", "READ MISS MOD STC1"
+ };
+
+ unsigned long addr;
+ unsigned long mask;
+ const char *cmd;
+ int par;
+
+ addr = cia->cpu_err0 & 0xfffffff0;
+ addr |= (cia->cpu_err1 & 0x83UL) << 32;
+ cmd = cmd_desc[(cia->cpu_err1 >> 8) & 0xF];
+ mask = (cia->cpu_err1 >> 12) & 0xF;
+ par = (cia->cpu_err1 >> 21) & 1;
+
+ printk(KERN_CRIT "CIA machine check: System bus parity error\n");
+ printk(KERN_CRIT " Command: %s, Parity bit: %d\n", cmd, par);
+ printk(KERN_CRIT " Address: %#010lx, Mask: %#lx\n", addr, mask);
+}
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+
+static int
+cia_decode_mchk(unsigned long la_ptr)
+{
+ struct el_common *com;
+ struct el_CIA_sysdata_mcheck *cia;
+
+ com = (void *)la_ptr;
+ cia = (void *)(la_ptr + com->sys_offset);
+
+ if ((cia->cia_err & CIA_ERR_VALID) == 0)
+ return 0;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!alpha_verbose_mcheck)
+ return 1;
+
+ switch (ffs(cia->cia_err & 0xfff) - 1) {
+ case 0: /* CIA_ERR_COR_ERR */
+ cia_decode_ecc_error(cia, "Corrected ECC error");
+ break;
+ case 1: /* CIA_ERR_UN_COR_ERR */
+ cia_decode_ecc_error(cia, "Uncorrected ECC error");
+ break;
+ case 2: /* CIA_ERR_CPU_PE */
+ cia_decode_parity_error(cia);
+ break;
+ case 3: /* CIA_ERR_MEM_NEM */
+ cia_decode_mem_error(cia, "Access to nonexistent memory");
+ break;
+ case 4: /* CIA_ERR_PCI_SERR */
+ cia_decode_pci_error(cia, "PCI bus system error");
+ break;
+ case 5: /* CIA_ERR_PERR */
+ cia_decode_pci_error(cia, "PCI data parity error");
+ break;
+ case 6: /* CIA_ERR_PCI_ADDR_PE */
+ cia_decode_pci_error(cia, "PCI address parity error");
+ break;
+ case 7: /* CIA_ERR_RCVD_MAS_ABT */
+ cia_decode_pci_error(cia, "PCI master abort");
+ break;
+ case 8: /* CIA_ERR_RCVD_TAR_ABT */
+ cia_decode_pci_error(cia, "PCI target abort");
+ break;
+ case 9: /* CIA_ERR_PA_PTE_INV */
+ cia_decode_pci_error(cia, "PCI invalid PTE");
+ break;
+ case 10: /* CIA_ERR_FROM_WRT_ERR */
+ cia_decode_mem_error(cia, "Write to flash ROM attempted");
+ break;
+ case 11: /* CIA_ERR_IOA_TIMEOUT */
+ cia_decode_pci_error(cia, "I/O timeout");
+ break;
+ }
+
+ if (cia->cia_err & CIA_ERR_LOST_CORR_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Correctable ECC error\n");
+ if (cia->cia_err & CIA_ERR_LOST_UN_CORR_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Uncorrectable ECC error\n");
+ if (cia->cia_err & CIA_ERR_LOST_CPU_PE)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "System bus parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_MEM_NEM)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Access to nonexistent memory\n");
+ if (cia->cia_err & CIA_ERR_LOST_PERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI data parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_PCI_ADDR_PE)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI address parity error\n");
+ if (cia->cia_err & CIA_ERR_LOST_RCVD_MAS_ABT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI master abort\n");
+ if (cia->cia_err & CIA_ERR_LOST_RCVD_TAR_ABT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI target abort\n");
+ if (cia->cia_err & CIA_ERR_LOST_PA_PTE_INV)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "PCI invalid PTE\n");
+ if (cia->cia_err & CIA_ERR_LOST_FROM_WRT_ERR)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "Write to flash ROM attempted\n");
+ if (cia->cia_err & CIA_ERR_LOST_IOA_TIMEOUT)
+ printk(KERN_CRIT "CIA lost machine check: "
+ "I/O timeout\n");
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return 1;
+}
+
+void
+cia_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ int expected;
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ cia_pci_clr_err();
+ wrmces(rdmces()); /* reset machine check pending flag. */
+ mb();
+
+ expected = mcheck_expected(0);
+ if (!expected && vector == 0x660)
+ expected = cia_decode_mchk(la_ptr);
+ process_mcheck_info(vector, la_ptr, regs, "CIA", expected);
+}
diff --git a/arch/alpha/kernel/core_irongate.c b/arch/alpha/kernel/core_irongate.c
new file mode 100644
index 000000000000..138d497d1cca
--- /dev/null
+++ b/arch/alpha/kernel/core_irongate.c
@@ -0,0 +1,416 @@
+/*
+ * linux/arch/alpha/kernel/core_irongate.c
+ *
+ * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Copyright (C) 1999 Alpha Processor, Inc.,
+ * (David Daniel, Stig Telfer, Soohoon Lee)
+ *
+ * Code common to all IRONGATE core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_irongate.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/initrd.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/pci.h>
+#include <asm/cacheflush.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+igcsr32 *IronECC;
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address accordingly. It is therefore not safe
+ * to have concurrent invocations to configuration space access
+ * routines, but there really shouldn't be any need for this.
+ *
+ * addr[31:24] reserved
+ * addr[23:16] bus number (8 bits = 128 possible buses)
+ * addr[15:11] Device number (5 bits)
+ * addr[10: 8] function number
+ * addr[ 7: 2] register number
+ *
+ * For IRONGATE:
+ * if (bus = addr[23:16]) == 0
+ * then
+ * type 0 config cycle:
+ * addr_on_pci[31:11] = id selection for device = addr[15:11]
+ * addr_on_pci[10: 2] = addr[10: 2] ???
+ * addr_on_pci[ 1: 0] = 00
+ * else
+ * type 1 config cycle (pass on with no decoding):
+ * addr_on_pci[31:24] = 0
+ * addr_on_pci[23: 2] = addr[23: 2]
+ * addr_on_pci[ 1: 0] = 01
+ * fi
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= IRONGATE_CONF;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+irongate_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+irongate_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops irongate_pci_ops =
+{
+ .read = irongate_read_config,
+ .write = irongate_write_config,
+};
+
+int
+irongate_pci_clr_err(void)
+{
+ unsigned int nmi_ctl=0;
+ unsigned int IRONGATE_jd;
+
+again:
+ IRONGATE_jd = IRONGATE0->stat_cmd;
+ printk("Iron stat_cmd %x\n", IRONGATE_jd);
+ IRONGATE0->stat_cmd = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = IRONGATE0->stat_cmd; /* re-read to force write */
+
+ IRONGATE_jd = *IronECC;
+ printk("Iron ECC %x\n", IRONGATE_jd);
+ *IronECC = IRONGATE_jd; /* write again clears error bits */
+ mb();
+ IRONGATE_jd = *IronECC; /* re-read to force write */
+
+ /* Clear ALI NMI */
+ nmi_ctl = inb(0x61);
+ nmi_ctl |= 0x0c;
+ outb(nmi_ctl, 0x61);
+ nmi_ctl &= ~0x0c;
+ outb(nmi_ctl, 0x61);
+
+ IRONGATE_jd = *IronECC;
+ if (IRONGATE_jd & 0x300) goto again;
+
+ return 0;
+}
+
+#define IRONGATE_3GB 0xc0000000UL
+
+/* On Albacore (aka UP1500) with 4Gb of RAM we have to reserve some
+ memory for PCI. At this point we just reserve memory above 3Gb. Most
+ of this memory will be freed after PCI setup is done. */
+static void __init
+albacore_init_arch(void)
+{
+ unsigned long memtop = max_low_pfn << PAGE_SHIFT;
+ unsigned long pci_mem = (memtop + 0x1000000UL) & ~0xffffffUL;
+ struct percpu_struct *cpu;
+ int pal_rev, pal_var;
+
+ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
+ pal_rev = cpu->pal_revision & 0xffff;
+ pal_var = (cpu->pal_revision >> 16) & 0xff;
+
+ /* Consoles earlier than A5.6-18 (OSF PALcode v1.62-2) set up
+ the CPU incorrectly (leave speculative stores enabled),
+ which causes memory corruption under certain conditions.
+ Issue a warning for such consoles. */
+ if (alpha_using_srm &&
+ (pal_rev < 0x13e || (pal_rev == 0x13e && pal_var < 2)))
+ printk(KERN_WARNING "WARNING! Upgrade to SRM A5.6-19 "
+ "or later\n");
+
+ if (pci_mem > IRONGATE_3GB)
+ pci_mem = IRONGATE_3GB;
+ IRONGATE0->pci_mem = pci_mem;
+ alpha_mv.min_mem_address = pci_mem;
+ if (memtop > pci_mem) {
+#ifdef CONFIG_BLK_DEV_INITRD
+ extern unsigned long initrd_start, initrd_end;
+ extern void *move_initrd(unsigned long);
+
+ /* Move the initrd out of the way. */
+ if (initrd_end && __pa(initrd_end) > pci_mem) {
+ unsigned long size;
+
+ size = initrd_end - initrd_start;
+ free_bootmem_node(NODE_DATA(0), __pa(initrd_start),
+ PAGE_ALIGN(size));
+ if (!move_initrd(pci_mem))
+ printk("irongate_init_arch: initrd too big "
+ "(%ldK)\ndisabling initrd\n",
+ size / 1024);
+ }
+#endif
+ reserve_bootmem_node(NODE_DATA(0), pci_mem, memtop - pci_mem);
+ printk("irongate_init_arch: temporarily reserving "
+ "region %08lx-%08lx for PCI\n", pci_mem, memtop - 1);
+ }
+}
+
+static void __init
+irongate_setup_agp(void)
+{
+ /* Disable the GART window. AGPGART doesn't work due to yet
+ unresolved memory coherency issues... */
+ IRONGATE0->agpva = IRONGATE0->agpva & ~0xf;
+ alpha_agpgart_size = 0;
+}
+
+void __init
+irongate_init_arch(void)
+{
+ struct pci_controller *hose;
+ int amd761 = (IRONGATE0->dev_vendor >> 16) > 0x7006; /* Albacore? */
+
+ IronECC = amd761 ? &IRONGATE0->bacsr54_eccms761 : &IRONGATE0->dramms;
+
+ irongate_pci_clr_err();
+
+ if (amd761)
+ albacore_init_arch();
+
+ irongate_setup_agp();
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ /* This is for userland consumption. For some reason, the 40-bit
+ PIO bias that we use in the kernel through KSEG didn't work for
+ the page table based user mappings. So make sure we get the
+ 43-bit PIO bias. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (IRONGATE_MEM & 0xffffffffffUL) | 0x80000000000UL;
+ hose->dense_io_base
+ = (IRONGATE_IO & 0xffffffffffUL) | 0x80000000000UL;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+ __direct_map_base = 0;
+ __direct_map_size = 0xffffffff;
+}
+
+/*
+ * IO map and AGP support
+ */
+#include <linux/vmalloc.h>
+#include <linux/agp_backend.h>
+#include <linux/agpgart.h>
+#include <asm/pgalloc.h>
+
+#define GET_PAGE_DIR_OFF(addr) (addr >> 22)
+#define GET_PAGE_DIR_IDX(addr) (GET_PAGE_DIR_OFF(addr))
+
+#define GET_GATT_OFF(addr) ((addr & 0x003ff000) >> 12)
+#define GET_GATT(addr) (gatt_pages[GET_PAGE_DIR_IDX(addr)])
+
+void __iomem *
+irongate_ioremap(unsigned long addr, unsigned long size)
+{
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long baddr, last;
+ u32 *mmio_regs, *gatt_pages, *cur_gatt, pte;
+ unsigned long gart_bus_addr;
+
+ if (!alpha_agpgart_size)
+ return (void __iomem *)(addr + IRONGATE_MEM);
+
+ gart_bus_addr = (unsigned long)IRONGATE0->bar0 &
+ PCI_BASE_ADDRESS_MEM_MASK;
+
+ /*
+ * Check for within the AGP aperture...
+ */
+ do {
+ /*
+ * Check the AGP area
+ */
+ if (addr >= gart_bus_addr && addr + size - 1 <
+ gart_bus_addr + alpha_agpgart_size)
+ break;
+
+ /*
+ * Not found - assume legacy ioremap
+ */
+ return (void __iomem *)(addr + IRONGATE_MEM);
+ } while(0);
+
+ mmio_regs = (u32 *)(((unsigned long)IRONGATE0->bar1 &
+ PCI_BASE_ADDRESS_MEM_MASK) + IRONGATE_MEM);
+
+ gatt_pages = (u32 *)(phys_to_virt(mmio_regs[1])); /* FIXME */
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ if (addr & ~PAGE_MASK) {
+ printk("AGP ioremap failed... addr not page aligned (0x%lx)\n",
+ addr);
+ return (void __iomem *)(addr + IRONGATE_MEM);
+ }
+ last = addr + size - 1;
+ size = PAGE_ALIGN(last) - addr;
+
+#if 0
+ printk("irongate_ioremap(0x%lx, 0x%lx)\n", addr, size);
+ printk("irongate_ioremap: gart_bus_addr 0x%lx\n", gart_bus_addr);
+ printk("irongate_ioremap: gart_aper_size 0x%lx\n", gart_aper_size);
+ printk("irongate_ioremap: mmio_regs %p\n", mmio_regs);
+ printk("irongate_ioremap: gatt_pages %p\n", gatt_pages);
+
+ for(baddr = addr; baddr <= last; baddr += PAGE_SIZE)
+ {
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
+ printk("irongate_ioremap: cur_gatt %p pte 0x%x\n",
+ cur_gatt, pte);
+ }
+#endif
+
+ /*
+ * Map it
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area) return NULL;
+
+ for(baddr = addr, vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE)
+ {
+ cur_gatt = phys_to_virt(GET_GATT(baddr) & ~1);
+ pte = cur_gatt[GET_GATT_OFF(baddr)] & ~1;
+
+ if (__alpha_remap_area_pages(vaddr,
+ pte, PAGE_SIZE, 0)) {
+ printk("AGP ioremap: FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+#if 0
+ printk("irongate_ioremap(0x%lx, 0x%lx) returning 0x%lx\n",
+ addr, size, vaddr);
+#endif
+ return (void __iomem *)vaddr;
+}
+
+void
+irongate_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (((long)addr >> 41) == -2)
+ return; /* kseg map, nothing to do */
+ if (addr)
+ return vfree((void *)(PAGE_MASK & addr));
+}
diff --git a/arch/alpha/kernel/core_lca.c b/arch/alpha/kernel/core_lca.c
new file mode 100644
index 000000000000..6a5a9145c676
--- /dev/null
+++ b/arch/alpha/kernel/core_lca.c
@@ -0,0 +1,515 @@
+/*
+ * linux/arch/alpha/kernel/core_lca.c
+ *
+ * Written by David Mosberger (davidm@cs.arizona.edu) with some code
+ * taken from Dave Rusling's (david.rusling@reo.mts.dec.com) 32-bit
+ * bios code.
+ *
+ * Code common to all LCA core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_lca.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/tty.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+/*
+ * Machine check reasons. Defined according to PALcode sources
+ * (osf.h and platform.h).
+ */
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_UNKNOWN 0x008A
+#define MCHK_K_CACKSOFT 0x008C
+#define MCHK_K_BUGCHECK 0x008E
+#define MCHK_K_OS_BUGCHECK 0x0090
+#define MCHK_K_DCPERR 0x0092
+#define MCHK_K_ICPERR 0x0094
+
+
+/*
+ * Platform-specific machine-check reasons:
+ */
+#define MCHK_K_SIO_SERR 0x204 /* all platforms so far */
+#define MCHK_K_SIO_IOCHK 0x206 /* all platforms so far */
+#define MCHK_K_DCSR 0x208 /* all but Noname */
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the LCA_IOC_CONF register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | | | | | | | | | | | | | | |F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+ int func = device_fn & 0x7;
+
+ /* Type 0 configuration cycle. */
+
+ if (device > 12) {
+ return -1;
+ }
+
+ *(vulp)LCA_IOC_CONF = 0;
+ addr = (1 << (11 + device)) | (func << 8) | where;
+ } else {
+ /* Type 1 configuration cycle. */
+ *(vulp)LCA_IOC_CONF = 1;
+ addr = (bus << 16) | (device_fn << 8) | where;
+ }
+ *pci_addr = addr;
+ return 0;
+}
+
+static unsigned int
+conf_read(unsigned long addr)
+{
+ unsigned long flags, code, stat0;
+ unsigned int value;
+
+ local_irq_save(flags);
+
+ /* Reset status register to avoid loosing errors. */
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Access configuration space. */
+ value = *(vuip)addr;
+ draina();
+
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ if (stat0 & LCA_IOC_STAT0_ERR) {
+ code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
+ & LCA_IOC_STAT0_CODE_MASK);
+ if (code != 1) {
+ printk("lca.c:conf_read: got stat0=%lx\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Reset machine check. */
+ wrmces(0x7);
+
+ value = 0xffffffff;
+ }
+ local_irq_restore(flags);
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value)
+{
+ unsigned long flags, code, stat0;
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid loosing errors. */
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ draina();
+
+ stat0 = *(vulp)LCA_IOC_STAT0;
+ if (stat0 & LCA_IOC_STAT0_ERR) {
+ code = ((stat0 >> LCA_IOC_STAT0_CODE_SHIFT)
+ & LCA_IOC_STAT0_CODE_MASK);
+ if (code != 1) {
+ printk("lca.c:conf_write: got stat0=%lx\n", stat0);
+ }
+
+ /* Reset error status. */
+ *(vulp)LCA_IOC_STAT0 = stat0;
+ mb();
+
+ /* Reset machine check. */
+ wrmces(0x7);
+ }
+ local_irq_restore(flags);
+}
+
+static int
+lca_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+ int shift;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ shift = (where & 3) * 8;
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + LCA_CONF;
+ *value = conf_read(addr) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+lca_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + LCA_CONF;
+ conf_write(addr, value << ((where & 3) * 8));
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops lca_pci_ops =
+{
+ .read = lca_read_config,
+ .write = lca_write_config,
+};
+
+void
+lca_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vulp)LCA_IOC_TBIA = 0;
+ mb();
+}
+
+void __init
+lca_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = LCA_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = LCA_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = LCA_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Mimic the SRM settings for the direct-map window.
+ * Window 0 is scatter-gather 8MB at 8MB (for isa).
+ * Window 1 is direct access 1GB at 1GB.
+ *
+ * Note that we do not try to save any of the DMA window CSRs
+ * before setting them, since we cannot read those CSRs on LCA.
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = NULL;
+ __direct_map_base = 0x40000000;
+ __direct_map_size = 0x40000000;
+
+ *(vulp)LCA_IOC_W_BASE0 = hose->sg_isa->dma_base | (3UL << 32);
+ *(vulp)LCA_IOC_W_MASK0 = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vulp)LCA_IOC_T_BASE0 = virt_to_phys(hose->sg_isa->ptes);
+
+ *(vulp)LCA_IOC_W_BASE1 = __direct_map_base | (2UL << 32);
+ *(vulp)LCA_IOC_W_MASK1 = (__direct_map_size - 1) & 0xfff00000;
+ *(vulp)LCA_IOC_T_BASE1 = 0;
+
+ *(vulp)LCA_IOC_TB_ENA = 0x80;
+
+ lca_pci_tbi(hose, 0, -1);
+
+ /*
+ * Disable PCI parity for now. The NCR53c810 chip has
+ * troubles meeting the PCI spec which results in
+ * data parity errors.
+ */
+ *(vulp)LCA_IOC_PAR_DIS = 1UL<<5;
+
+ /*
+ * Finally, set up for restoring the correct HAE if using SRM.
+ * Again, since we cannot read many of the CSRs on the LCA,
+ * one of which happens to be the HAE, we save the value that
+ * the SRM will expect...
+ */
+ if (alpha_using_srm)
+ srm_hae = 0x80000000UL;
+}
+
+/*
+ * Constants used during machine-check handling. I suppose these
+ * could be moved into lca.h but I don't see much reason why anybody
+ * else would want to use them.
+ */
+
+#define ESR_EAV (1UL<< 0) /* error address valid */
+#define ESR_CEE (1UL<< 1) /* correctable error */
+#define ESR_UEE (1UL<< 2) /* uncorrectable error */
+#define ESR_WRE (1UL<< 3) /* write-error */
+#define ESR_SOR (1UL<< 4) /* error source */
+#define ESR_CTE (1UL<< 7) /* cache-tag error */
+#define ESR_MSE (1UL<< 9) /* multiple soft errors */
+#define ESR_MHE (1UL<<10) /* multiple hard errors */
+#define ESR_NXM (1UL<<12) /* non-existent memory */
+
+#define IOC_ERR ( 1<<4) /* ioc logs an error */
+#define IOC_CMD_SHIFT 0
+#define IOC_CMD (0xf<<IOC_CMD_SHIFT)
+#define IOC_CODE_SHIFT 8
+#define IOC_CODE (0xf<<IOC_CODE_SHIFT)
+#define IOC_LOST ( 1<<5)
+#define IOC_P_NBR ((__u32) ~((1<<13) - 1))
+
+static void
+mem_error(unsigned long esr, unsigned long ear)
+{
+ printk(" %s %s error to %s occurred at address %x\n",
+ ((esr & ESR_CEE) ? "Correctable" :
+ (esr & ESR_UEE) ? "Uncorrectable" : "A"),
+ (esr & ESR_WRE) ? "write" : "read",
+ (esr & ESR_SOR) ? "memory" : "b-cache",
+ (unsigned) (ear & 0x1ffffff8));
+ if (esr & ESR_CTE) {
+ printk(" A b-cache tag parity error was detected.\n");
+ }
+ if (esr & ESR_MSE) {
+ printk(" Several other correctable errors occurred.\n");
+ }
+ if (esr & ESR_MHE) {
+ printk(" Several other uncorrectable errors occurred.\n");
+ }
+ if (esr & ESR_NXM) {
+ printk(" Attempted to access non-existent memory.\n");
+ }
+}
+
+static void
+ioc_error(__u32 stat0, __u32 stat1)
+{
+ static const char * const pci_cmd[] = {
+ "Interrupt Acknowledge", "Special", "I/O Read", "I/O Write",
+ "Rsvd 1", "Rsvd 2", "Memory Read", "Memory Write", "Rsvd3",
+ "Rsvd4", "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address", "Memory Read Line",
+ "Memory Write and Invalidate"
+ };
+ static const char * const err_name[] = {
+ "exceeded retry limit", "no device", "bad data parity",
+ "target abort", "bad address parity", "page table read error",
+ "invalid page", "data error"
+ };
+ unsigned code = (stat0 & IOC_CODE) >> IOC_CODE_SHIFT;
+ unsigned cmd = (stat0 & IOC_CMD) >> IOC_CMD_SHIFT;
+
+ printk(" %s initiated PCI %s cycle to address %x"
+ " failed due to %s.\n",
+ code > 3 ? "PCI" : "CPU", pci_cmd[cmd], stat1, err_name[code]);
+
+ if (code == 5 || code == 6) {
+ printk(" (Error occurred at PCI memory address %x.)\n",
+ (stat0 & ~IOC_P_NBR));
+ }
+ if (stat0 & IOC_LOST) {
+ printk(" Other PCI errors occurred simultaneously.\n");
+ }
+}
+
+void
+lca_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs)
+{
+ const char * reason;
+ union el_lca el;
+
+ el.c = (struct el_common *) la_ptr;
+
+ wrmces(rdmces()); /* reset machine check pending flag */
+
+ printk(KERN_CRIT "LCA machine check: vector=%#lx pc=%#lx code=%#x\n",
+ vector, regs->pc, (unsigned int) el.c->code);
+
+ /*
+ * The first quadword after the common header always seems to
+ * be the machine check reason---don't know why this isn't
+ * part of the common header instead. In the case of a long
+ * logout frame, the upper 32 bits is the machine check
+ * revision level, which we ignore for now.
+ */
+ switch ((unsigned int) el.c->code) {
+ case MCHK_K_TPERR: reason = "tag parity error"; break;
+ case MCHK_K_TCPERR: reason = "tag control parity error"; break;
+ case MCHK_K_HERR: reason = "access to non-existent memory"; break;
+ case MCHK_K_ECC_C: reason = "correctable ECC error"; break;
+ case MCHK_K_ECC_NC: reason = "non-correctable ECC error"; break;
+ case MCHK_K_CACKSOFT: reason = "MCHK_K_CACKSOFT"; break;
+ case MCHK_K_BUGCHECK: reason = "illegal exception in PAL mode"; break;
+ case MCHK_K_OS_BUGCHECK: reason = "callsys in kernel mode"; break;
+ case MCHK_K_DCPERR: reason = "d-cache parity error"; break;
+ case MCHK_K_ICPERR: reason = "i-cache parity error"; break;
+ case MCHK_K_SIO_SERR: reason = "SIO SERR occurred on PCI bus"; break;
+ case MCHK_K_SIO_IOCHK: reason = "SIO IOCHK occurred on ISA bus"; break;
+ case MCHK_K_DCSR: reason = "MCHK_K_DCSR"; break;
+ case MCHK_K_UNKNOWN:
+ default: reason = "unknown"; break;
+ }
+
+ switch (el.c->size) {
+ case sizeof(struct el_lca_mcheck_short):
+ printk(KERN_CRIT
+ " Reason: %s (short frame%s, dc_stat=%#lx):\n",
+ reason, el.c->retry ? ", retryable" : "",
+ el.s->dc_stat);
+ if (el.s->esr & ESR_EAV) {
+ mem_error(el.s->esr, el.s->ear);
+ }
+ if (el.s->ioc_stat0 & IOC_ERR) {
+ ioc_error(el.s->ioc_stat0, el.s->ioc_stat1);
+ }
+ break;
+
+ case sizeof(struct el_lca_mcheck_long):
+ printk(KERN_CRIT " Reason: %s (long frame%s):\n",
+ reason, el.c->retry ? ", retryable" : "");
+ printk(KERN_CRIT
+ " reason: %#lx exc_addr: %#lx dc_stat: %#lx\n",
+ el.l->pt[0], el.l->exc_addr, el.l->dc_stat);
+ printk(KERN_CRIT " car: %#lx\n", el.l->car);
+ if (el.l->esr & ESR_EAV) {
+ mem_error(el.l->esr, el.l->ear);
+ }
+ if (el.l->ioc_stat0 & IOC_ERR) {
+ ioc_error(el.l->ioc_stat0, el.l->ioc_stat1);
+ }
+ break;
+
+ default:
+ printk(KERN_CRIT " Unknown errorlog size %d\n", el.c->size);
+ }
+
+ /* Dump the logout area to give all info. */
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ unsigned long * ptr = (unsigned long *) la_ptr;
+ long i;
+ for (i = 0; i < el.c->size / sizeof(long); i += 2) {
+ printk(KERN_CRIT " +%8lx %016lx %016lx\n",
+ i*sizeof(long), ptr[i], ptr[i+1]);
+ }
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+/*
+ * The following routines are needed to support the SPEED changing
+ * necessary to successfully manage the thermal problem on the AlphaBook1.
+ */
+
+void
+lca_clock_print(void)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+
+ printk("Status of clock control:\n");
+ printk("\tPrimary clock divisor\t0x%lx\n", LCA_GET_PRIMARY(pmr_reg));
+ printk("\tOverride clock divisor\t0x%lx\n", LCA_GET_OVERRIDE(pmr_reg));
+ printk("\tInterrupt override is %s\n",
+ (pmr_reg & LCA_PMR_INTO) ? "on" : "off");
+ printk("\tDMA override is %s\n",
+ (pmr_reg & LCA_PMR_DMAO) ? "on" : "off");
+
+}
+
+int
+lca_get_clock(void)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+ return(LCA_GET_PRIMARY(pmr_reg));
+
+}
+
+void
+lca_clock_fiddle(int divisor)
+{
+ long pmr_reg;
+
+ pmr_reg = LCA_READ_PMR;
+ LCA_SET_PRIMARY_CLOCK(pmr_reg, divisor);
+ /* lca_norm_clock = divisor; */
+ LCA_WRITE_PMR(pmr_reg);
+ mb();
+}
diff --git a/arch/alpha/kernel/core_marvel.c b/arch/alpha/kernel/core_marvel.c
new file mode 100644
index 000000000000..44866cb26a80
--- /dev/null
+++ b/arch/alpha/kernel/core_marvel.c
@@ -0,0 +1,1154 @@
+/*
+ * linux/arch/alpha/kernel/core_marvel.c
+ *
+ * Code common to all Marvel based systems.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_marvel.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/mc146818rtc.h>
+#include <linux/rtc.h>
+#include <linux/module.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+#include <asm/gct.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+#include <asm/rtc.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * Debug helpers
+ */
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Private data
+ */
+static struct io7 *io7_head = NULL;
+
+
+/*
+ * Helper functions
+ */
+static unsigned long __attribute__ ((unused))
+read_ev7_csr(int pe, unsigned long offset)
+{
+ ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
+ unsigned long q;
+
+ mb();
+ q = ev7csr->csr;
+ mb();
+
+ return q;
+}
+
+static void __attribute__ ((unused))
+write_ev7_csr(int pe, unsigned long offset, unsigned long q)
+{
+ ev7_csr *ev7csr = EV7_CSR_KERN(pe, offset);
+
+ mb();
+ ev7csr->csr = q;
+ mb();
+}
+
+static char * __init
+mk_resource_name(int pe, int port, char *str)
+{
+ char tmp[80];
+ char *name;
+
+ sprintf(tmp, "PCI %s PE %d PORT %d", str, pe, port);
+ name = alloc_bootmem(strlen(tmp) + 1);
+ strcpy(name, tmp);
+
+ return name;
+}
+
+inline struct io7 *
+marvel_next_io7(struct io7 *prev)
+{
+ return (prev ? prev->next : io7_head);
+}
+
+struct io7 *
+marvel_find_io7(int pe)
+{
+ struct io7 *io7;
+
+ for (io7 = io7_head; io7 && io7->pe != pe; io7 = io7->next)
+ continue;
+
+ return io7;
+}
+
+static struct io7 * __init
+alloc_io7(unsigned int pe)
+{
+ struct io7 *io7;
+ struct io7 *insp;
+ int h;
+
+ if (marvel_find_io7(pe)) {
+ printk(KERN_WARNING "IO7 at PE %d already allocated!\n", pe);
+ return NULL;
+ }
+
+ io7 = alloc_bootmem(sizeof(*io7));
+ io7->pe = pe;
+ spin_lock_init(&io7->irq_lock);
+
+ for (h = 0; h < 4; h++) {
+ io7->ports[h].io7 = io7;
+ io7->ports[h].port = h;
+ io7->ports[h].enabled = 0; /* default to disabled */
+ }
+
+ /*
+ * Insert in pe sorted order.
+ */
+ if (NULL == io7_head) /* empty list */
+ io7_head = io7;
+ else if (io7_head->pe > io7->pe) { /* insert at head */
+ io7->next = io7_head;
+ io7_head = io7;
+ } else { /* insert at position */
+ for (insp = io7_head; insp; insp = insp->next) {
+ if (insp->pe == io7->pe) {
+ printk(KERN_ERR "Too many IO7s at PE %d\n",
+ io7->pe);
+ return NULL;
+ }
+
+ if (NULL == insp->next ||
+ insp->next->pe > io7->pe) { /* insert here */
+ io7->next = insp->next;
+ insp->next = io7;
+ break;
+ }
+ }
+
+ if (NULL == insp) { /* couldn't insert ?!? */
+ printk(KERN_WARNING "Failed to insert IO7 at PE %d "
+ " - adding at head of list\n", io7->pe);
+ io7->next = io7_head;
+ io7_head = io7;
+ }
+ }
+
+ return io7;
+}
+
+void
+io7_clear_errors(struct io7 *io7)
+{
+ io7_port7_csrs *p7csrs;
+ io7_ioport_csrs *csrs;
+ int port;
+
+
+ /*
+ * First the IO ports.
+ */
+ for (port = 0; port < 4; port++) {
+ csrs = IO7_CSRS_KERN(io7->pe, port);
+
+ csrs->POx_ERR_SUM.csr = -1UL;
+ csrs->POx_TLB_ERR.csr = -1UL;
+ csrs->POx_SPL_COMPLT.csr = -1UL;
+ csrs->POx_TRANS_SUM.csr = -1UL;
+ }
+
+ /*
+ * Then the common ones.
+ */
+ p7csrs = IO7_PORT7_CSRS_KERN(io7->pe);
+
+ p7csrs->PO7_ERROR_SUM.csr = -1UL;
+ p7csrs->PO7_UNCRR_SYM.csr = -1UL;
+ p7csrs->PO7_CRRCT_SYM.csr = -1UL;
+}
+
+
+/*
+ * IO7 PCI, PCI/X, AGP configuration.
+ */
+static void __init
+io7_init_hose(struct io7 *io7, int port)
+{
+ static int hose_index = 0;
+
+ struct pci_controller *hose = alloc_pci_controller();
+ struct io7_port *io7_port = &io7->ports[port];
+ io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, port);
+ int i;
+
+ hose->index = hose_index++; /* arbitrary */
+
+ /*
+ * We don't have an isa or legacy hose, but glibc expects to be
+ * able to use the bus == 0 / dev == 0 form of the iobase syscall
+ * to determine information about the i/o system. Since XFree86
+ * relies on glibc's determination to tell whether or not to use
+ * sparse access, we need to point the pci_isa_hose at a real hose
+ * so at least that determination is correct.
+ */
+ if (hose->index == 0)
+ pci_isa_hose = hose;
+
+ io7_port->csrs = csrs;
+ io7_port->hose = hose;
+ hose->sysdata = io7_port;
+
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /*
+ * Base addresses for userland consumption. Since these are going
+ * to be mapped, they are pure physical addresses.
+ */
+ hose->sparse_mem_base = hose->sparse_io_base = 0;
+ hose->dense_mem_base = IO7_MEM_PHYS(io7->pe, port);
+ hose->dense_io_base = IO7_IO_PHYS(io7->pe, port);
+
+ /*
+ * Base addresses and resource ranges for kernel consumption.
+ */
+ hose->config_space_base = (unsigned long)IO7_CONF_KERN(io7->pe, port);
+
+ hose->io_space->start = (unsigned long)IO7_IO_KERN(io7->pe, port);
+ hose->io_space->end = hose->io_space->start + IO7_IO_SPACE - 1;
+ hose->io_space->name = mk_resource_name(io7->pe, port, "IO");
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = (unsigned long)IO7_MEM_KERN(io7->pe, port);
+ hose->mem_space->end = hose->mem_space->start + IO7_MEM_SPACE - 1;
+ hose->mem_space->name = mk_resource_name(io7->pe, port, "MEM");
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n",
+ hose->index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n",
+ hose->index);
+
+ /*
+ * Save the existing DMA window settings for later restoration.
+ */
+ for (i = 0; i < 4; i++) {
+ io7_port->saved_wbase[i] = csrs->POx_WBASE[i].csr;
+ io7_port->saved_wmask[i] = csrs->POx_WMASK[i].csr;
+ io7_port->saved_tbase[i] = csrs->POx_TBASE[i].csr;
+ }
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB
+ * Window 1 is direct access 1GB at 2GB
+ * Window 2 is scatter-gather (up-to) 1GB at 3GB
+ * Window 3 is disabled
+ */
+
+ /*
+ * TBIA before modifying windows.
+ */
+ marvel_pci_tbi(hose, 0, -1);
+
+ /*
+ * Set up window 0 for scatter-gather 8MB at 8MB.
+ */
+ hose->sg_isa = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
+ hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa->align_entry = 8; /* cache line boundary */
+ csrs->POx_WBASE[0].csr =
+ hose->sg_isa->dma_base | wbase_m_ena | wbase_m_sg;
+ csrs->POx_WMASK[0].csr = (hose->sg_isa->size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ /*
+ * Set up window 1 for direct-mapped 1GB at 2GB.
+ */
+ csrs->POx_WBASE[1].csr = __direct_map_base | wbase_m_ena;
+ csrs->POx_WMASK[1].csr = (__direct_map_size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[1].csr = 0;
+
+ /*
+ * Set up window 2 for scatter-gather (up-to) 1GB at 3GB.
+ */
+ hose->sg_pci = iommu_arena_new_node(marvel_cpuid_to_nid(io7->pe),
+ hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci->align_entry = 8; /* cache line boundary */
+ csrs->POx_WBASE[2].csr =
+ hose->sg_pci->dma_base | wbase_m_ena | wbase_m_sg;
+ csrs->POx_WMASK[2].csr = (hose->sg_pci->size - 1) & wbase_m_addr;
+ csrs->POx_TBASE[2].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ /*
+ * Disable window 3.
+ */
+ csrs->POx_WBASE[3].csr = 0;
+
+ /*
+ * Make sure that the AGP Monster Window is disabled.
+ */
+ csrs->POx_CTRL.csr &= ~(1UL << 61);
+
+#if 1
+ printk("FIXME: disabling master aborts\n");
+ csrs->POx_MSK_HEI.csr &= ~(3UL << 14);
+#endif
+ /*
+ * TBIA after modifying windows.
+ */
+ marvel_pci_tbi(hose, 0, -1);
+}
+
+static void __init
+marvel_init_io7(struct io7 *io7)
+{
+ int i;
+
+ printk("Initializing IO7 at PID %d\n", io7->pe);
+
+ /*
+ * Get the Port 7 CSR pointer.
+ */
+ io7->csrs = IO7_PORT7_CSRS_KERN(io7->pe);
+
+ /*
+ * Init this IO7's hoses.
+ */
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ io7_ioport_csrs *csrs = IO7_CSRS_KERN(io7->pe, i);
+ if (csrs->POx_CACHE_CTL.csr == 8) {
+ io7->ports[i].enabled = 1;
+ io7_init_hose(io7, i);
+ }
+ }
+}
+
+void
+marvel_io7_present(gct6_node *node)
+{
+ int pe;
+
+ if (node->type != GCT_TYPE_HOSE ||
+ node->subtype != GCT_SUBTYPE_IO_PORT_MODULE)
+ return;
+
+ pe = (node->id >> 8) & 0xff;
+ printk("Found an IO7 at PID %d\n", pe);
+
+ alloc_io7(pe);
+}
+
+static void __init
+marvel_init_vga_hose(void)
+{
+#ifdef CONFIG_VGA_HOSE
+ u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
+
+ if (pu64[7] == 3) { /* TERM_TYPE == graphics */
+ struct pci_controller *hose = NULL;
+ int h = (pu64[30] >> 24) & 0xff; /* TERM_OUT_LOC, hose # */
+ struct io7 *io7;
+ int pid, port;
+
+ /* FIXME - encoding is going to have to change for Marvel
+ * since hose will be able to overflow a byte...
+ * need to fix this decode when the console
+ * changes its encoding
+ */
+ printk("console graphics is on hose %d (console)\n", h);
+
+ /*
+ * The console's hose numbering is:
+ *
+ * hose<n:2>: PID
+ * hose<1:0>: PORT
+ *
+ * We need to find the hose at that pid and port
+ */
+ pid = h >> 2;
+ port = h & 3;
+ if ((io7 = marvel_find_io7(pid)))
+ hose = io7->ports[port].hose;
+
+ if (hose) {
+ printk("Console graphics on hose %d\n", hose->index);
+ pci_vga_hose = hose;
+ }
+ }
+#endif /* CONFIG_VGA_HOSE */
+}
+
+gct6_search_struct gct_wanted_node_list[] = {
+ { GCT_TYPE_HOSE, GCT_SUBTYPE_IO_PORT_MODULE, marvel_io7_present },
+ { 0, 0, NULL }
+};
+
+/*
+ * In case the GCT is not complete, let the user specify PIDs with IO7s
+ * at boot time. Syntax is 'io7=a,b,c,...,n' where a-n are the PIDs (decimal)
+ * where IO7s are connected
+ */
+static int __init
+marvel_specify_io7(char *str)
+{
+ unsigned long pid;
+ struct io7 *io7;
+ char *pchar;
+
+ do {
+ pid = simple_strtoul(str, &pchar, 0);
+ if (pchar != str) {
+ printk("User-specified IO7 at PID %lu\n", pid);
+ io7 = alloc_io7(pid);
+ if (io7) marvel_init_io7(io7);
+ }
+
+ if (pchar == str) pchar++;
+ str = pchar;
+ } while(*str);
+
+ return 0;
+}
+__setup("io7=", marvel_specify_io7);
+
+void __init
+marvel_init_arch(void)
+{
+ struct io7 *io7;
+
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* PCI DMA Direct Mapping is 1GB at 2GB. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x40000000;
+
+ /* Parse the config tree. */
+ gct6_find_nodes(GCT_NODE_PTR(0), gct_wanted_node_list);
+
+ /* Init the io7s. */
+ for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); )
+ marvel_init_io7(io7);
+
+ /* Check for graphic console location (if any). */
+ marvel_init_vga_hose();
+}
+
+void
+marvel_kill_arch(int mode)
+{
+}
+
+
+/*
+ * PCI Configuration Space access functions
+ *
+ * Configuration space addresses have the following format:
+ *
+ * |2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * |3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|R|R|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * n:24 reserved for hose base
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * IO7 determines whether to use a type 0 or type 1 config cycle
+ * based on the bus number. Therefore the bus number must be set
+ * to 0 for the root bus on any hose.
+ *
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ */
+
+static inline unsigned long
+build_conf_addr(struct pci_controller *hose, u8 bus,
+ unsigned int devfn, int where)
+{
+ return (hose->config_space_base | (bus << 16) | (devfn << 8) | where);
+}
+
+static unsigned long
+mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ struct io7_port *io7_port;
+ unsigned long addr = 0;
+ u8 bus = pbus->number;
+
+ if (!hose)
+ return addr;
+
+ /* Check for enabled. */
+ io7_port = hose->sysdata;
+ if (!io7_port->enabled)
+ return addr;
+
+ if (!pbus->parent) { /* No parent means peer PCI bus. */
+ /* Don't support idsel > 20 on primary bus. */
+ if (devfn >= PCI_DEVFN(21, 0))
+ return addr;
+ bus = 0;
+ }
+
+ addr = build_conf_addr(hose, bus, devfn, where);
+
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return addr;
+}
+
+static int
+marvel_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+
+ if (0 == (addr = mk_conf_addr(bus, devfn, where)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch(size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+marvel_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+
+ if (0 == (addr = mk_conf_addr(bus, devfn, where)))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ default:
+ return PCIBIOS_FUNC_NOT_SUPPORTED;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops marvel_pci_ops =
+{
+ .read = marvel_read_config,
+ .write = marvel_write_config,
+};
+
+
+/*
+ * Other PCI helper functions.
+ */
+void
+marvel_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ io7_ioport_csrs *csrs = ((struct io7_port *)hose->sysdata)->csrs;
+
+ wmb();
+ csrs->POx_SG_TBIA.csr = 0;
+ mb();
+ csrs->POx_SG_TBIA.csr;
+}
+
+
+
+/*
+ * RTC Support
+ */
+struct marvel_rtc_access_info {
+ unsigned long function;
+ unsigned long index;
+ unsigned long data;
+};
+
+static void
+__marvel_access_rtc(void *info)
+{
+ struct marvel_rtc_access_info *rtc_access = info;
+
+ register unsigned long __r0 __asm__("$0");
+ register unsigned long __r16 __asm__("$16") = rtc_access->function;
+ register unsigned long __r17 __asm__("$17") = rtc_access->index;
+ register unsigned long __r18 __asm__("$18") = rtc_access->data;
+
+ __asm__ __volatile__(
+ "call_pal %4 # cserve rtc"
+ : "=r"(__r16), "=r"(__r17), "=r"(__r18), "=r"(__r0)
+ : "i"(PAL_cserve), "0"(__r16), "1"(__r17), "2"(__r18)
+ : "$1", "$22", "$23", "$24", "$25");
+
+ rtc_access->data = __r0;
+}
+
+static u8
+__marvel_rtc_io(u8 b, unsigned long addr, int write)
+{
+ static u8 index = 0;
+
+ struct marvel_rtc_access_info rtc_access;
+ u8 ret = 0;
+
+ switch(addr) {
+ case 0x70: /* RTC_PORT(0) */
+ if (write) index = b;
+ ret = index;
+ break;
+
+ case 0x71: /* RTC_PORT(1) */
+ rtc_access.index = index;
+ rtc_access.data = BCD_TO_BIN(b);
+ rtc_access.function = 0x48 + !write; /* GET/PUT_TOY */
+
+#ifdef CONFIG_SMP
+ if (smp_processor_id() != boot_cpuid)
+ smp_call_function_on_cpu(__marvel_access_rtc,
+ &rtc_access, 1, 1,
+ cpumask_of_cpu(boot_cpuid));
+ else
+ __marvel_access_rtc(&rtc_access);
+#else
+ __marvel_access_rtc(&rtc_access);
+#endif
+ ret = BIN_TO_BCD(rtc_access.data);
+ break;
+
+ default:
+ printk(KERN_WARNING "Illegal RTC port %lx\n", addr);
+ break;
+ }
+
+ return ret;
+}
+
+
+/*
+ * IO map support.
+ */
+
+#define __marvel_is_mem_vga(a) (((a) >= 0xa0000) && ((a) <= 0xc0000))
+
+void __iomem *
+marvel_ioremap(unsigned long addr, unsigned long size)
+{
+ struct pci_controller *hose;
+ unsigned long baddr, last;
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long *ptes;
+ unsigned long pfn;
+
+ /*
+ * Adjust the addr.
+ */
+#ifdef CONFIG_VGA_HOSE
+ if (pci_vga_hose && __marvel_is_mem_vga(addr)) {
+ addr += pci_vga_hose->mem_space->start;
+ }
+#endif
+
+ /*
+ * Find the hose.
+ */
+ for (hose = hose_head; hose; hose = hose->next) {
+ if ((addr >> 32) == (hose->mem_space->start >> 32))
+ break;
+ }
+ if (!hose)
+ return NULL;
+
+ /*
+ * We have the hose - calculate the bus limits.
+ */
+ baddr = addr - hose->mem_space->start;
+ last = baddr + size - 1;
+
+ /*
+ * Is it direct-mapped?
+ */
+ if ((baddr >= __direct_map_base) &&
+ ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
+ addr = IDENT_ADDR | (baddr - __direct_map_base);
+ return (void __iomem *) addr;
+ }
+
+ /*
+ * Check the scatter-gather arena.
+ */
+ if (hose->sg_pci &&
+ baddr >= (unsigned long)hose->sg_pci->dma_base &&
+ last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size) {
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ baddr -= hose->sg_pci->dma_base;
+ last -= hose->sg_pci->dma_base;
+ baddr &= PAGE_MASK;
+ size = PAGE_ALIGN(last) - baddr;
+
+ /*
+ * Map it.
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ ptes = hose->sg_pci->ptes;
+ for (vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ pfn = ptes[baddr >> PAGE_SHIFT];
+ if (!(pfn & 1)) {
+ printk("ioremap failed... pte not valid...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ pfn >>= 1; /* make it a true pfn */
+
+ if (__alpha_remap_area_pages(vaddr,
+ pfn << PAGE_SHIFT,
+ PAGE_SIZE, 0)) {
+ printk("FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+
+ return (void __iomem *) vaddr;
+ }
+
+ return NULL;
+}
+
+void
+marvel_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr >= VMALLOC_START)
+ vfree((void *)(PAGE_MASK & addr));
+}
+
+int
+marvel_is_mmio(const volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+
+ if (addr >= VMALLOC_START)
+ return 1;
+ else
+ return (addr & 0xFF000000UL) == 0;
+}
+
+#define __marvel_is_port_vga(a) \
+ (((a) >= 0x3b0) && ((a) < 0x3e0) && ((a) != 0x3b3) && ((a) != 0x3d3))
+#define __marvel_is_port_kbd(a) (((a) == 0x60) || ((a) == 0x64))
+#define __marvel_is_port_rtc(a) (((a) == 0x70) || ((a) == 0x71))
+
+void __iomem *marvel_ioportmap (unsigned long addr)
+{
+ if (__marvel_is_port_rtc (addr) || __marvel_is_port_kbd(addr))
+ ;
+#ifdef CONFIG_VGA_HOSE
+ else if (__marvel_is_port_vga (addr) && pci_vga_hose)
+ addr += pci_vga_hose->io_space->start;
+#endif
+ else
+ return NULL;
+ return (void __iomem *)addr;
+}
+
+unsigned int
+marvel_ioread8(void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (__marvel_is_port_kbd(addr))
+ return 0;
+ else if (__marvel_is_port_rtc(addr))
+ return __marvel_rtc_io(0, addr, 0);
+ else
+ return __kernel_ldbu(*(vucp)addr);
+}
+
+void
+marvel_iowrite8(u8 b, void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (__marvel_is_port_kbd(addr))
+ return;
+ else if (__marvel_is_port_rtc(addr))
+ __marvel_rtc_io(b, addr, 1);
+ else
+ __kernel_stb(b, *(vucp)addr);
+}
+
+#ifndef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(marvel_ioremap);
+EXPORT_SYMBOL(marvel_iounmap);
+EXPORT_SYMBOL(marvel_is_mmio);
+EXPORT_SYMBOL(marvel_ioportmap);
+EXPORT_SYMBOL(marvel_ioread8);
+EXPORT_SYMBOL(marvel_iowrite8);
+#endif
+
+/*
+ * NUMA Support
+ */
+/**********
+ * FIXME - for now each cpu is a node by itself
+ * -- no real support for striped mode
+ **********
+ */
+int
+marvel_pa_to_nid(unsigned long pa)
+{
+ int cpuid;
+
+ if ((pa >> 43) & 1) /* I/O */
+ cpuid = (~(pa >> 35) & 0xff);
+ else /* mem */
+ cpuid = ((pa >> 34) & 0x3) | ((pa >> (37 - 2)) & (0x1f << 2));
+
+ return marvel_cpuid_to_nid(cpuid);
+}
+
+int
+marvel_cpuid_to_nid(int cpuid)
+{
+ return cpuid;
+}
+
+unsigned long
+marvel_node_mem_start(int nid)
+{
+ unsigned long pa;
+
+ pa = (nid & 0x3) | ((nid & (0x1f << 2)) << 1);
+ pa <<= 34;
+
+ return pa;
+}
+
+unsigned long
+marvel_node_mem_size(int nid)
+{
+ return 16UL * 1024 * 1024 * 1024; /* 16GB */
+}
+
+
+/*
+ * AGP GART Support.
+ */
+#include <linux/agp_backend.h>
+#include <asm/agp_backend.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+struct marvel_agp_aperture {
+ struct pci_iommu_arena *arena;
+ long pg_start;
+ long pg_count;
+};
+
+static int
+marvel_agp_setup(alpha_agp_info *agp)
+{
+ struct marvel_agp_aperture *aper;
+
+ if (!alpha_agpgart_size)
+ return -ENOMEM;
+
+ aper = kmalloc(sizeof(*aper), GFP_KERNEL);
+ if (aper == NULL) return -ENOMEM;
+
+ aper->arena = agp->hose->sg_pci;
+ aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
+ aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
+ aper->pg_count - 1);
+
+ if (aper->pg_start < 0) {
+ printk(KERN_ERR "Failed to reserve AGP memory\n");
+ kfree(aper);
+ return -ENOMEM;
+ }
+
+ agp->aperture.bus_base =
+ aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
+ agp->aperture.size = aper->pg_count * PAGE_SIZE;
+ agp->aperture.sysdata = aper;
+
+ return 0;
+}
+
+static void
+marvel_agp_cleanup(alpha_agp_info *agp)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ int status;
+
+ status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
+ if (status == -EBUSY) {
+ printk(KERN_WARNING
+ "Attempted to release bound AGP memory - unbinding\n");
+ iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
+ status = iommu_release(aper->arena, aper->pg_start,
+ aper->pg_count);
+ }
+ if (status < 0)
+ printk(KERN_ERR "Failed to release AGP memory\n");
+
+ kfree(aper);
+ kfree(agp);
+}
+
+static int
+marvel_agp_configure(alpha_agp_info *agp)
+{
+ io7_ioport_csrs *csrs = ((struct io7_port *)agp->hose->sysdata)->csrs;
+ struct io7 *io7 = ((struct io7_port *)agp->hose->sysdata)->io7;
+ unsigned int new_rate = 0;
+ unsigned long agp_pll;
+
+ /*
+ * Check the requested mode against the PLL setting.
+ * The agpgart_be code has not programmed the card yet,
+ * so we can still tweak mode here.
+ */
+ agp_pll = io7->csrs->POx_RST[IO7_AGP_PORT].csr;
+ switch(IO7_PLL_RNGB(agp_pll)) {
+ case 0x4: /* 2x only */
+ /*
+ * The PLL is only programmed for 2x, so adjust the
+ * rate to 2x, if necessary.
+ */
+ if (agp->mode.bits.rate != 2)
+ new_rate = 2;
+ break;
+
+ case 0x6: /* 1x / 4x */
+ /*
+ * The PLL is programmed for 1x or 4x. Don't go faster
+ * than requested, so if the requested rate is 2x, use 1x.
+ */
+ if (agp->mode.bits.rate == 2)
+ new_rate = 1;
+ break;
+
+ default: /* ??????? */
+ /*
+ * Don't know what this PLL setting is, take the requested
+ * rate, but warn the user.
+ */
+ printk("%s: unknown PLL setting RNGB=%lx (PLL6_CTL=%016lx)\n",
+ __FUNCTION__, IO7_PLL_RNGB(agp_pll), agp_pll);
+ break;
+ }
+
+ /*
+ * Set the new rate, if necessary.
+ */
+ if (new_rate) {
+ printk("Requested AGP Rate %dX not compatible "
+ "with PLL setting - using %dX\n",
+ agp->mode.bits.rate,
+ new_rate);
+
+ agp->mode.bits.rate = new_rate;
+ }
+
+ printk("Enabling AGP on hose %d: %dX%s RQ %d\n",
+ agp->hose->index, agp->mode.bits.rate,
+ agp->mode.bits.sba ? " - SBA" : "", agp->mode.bits.rq);
+
+ csrs->AGP_CMD.csr = agp->mode.lw;
+
+ return 0;
+}
+
+static int
+marvel_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_bind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count, mem->memory);
+}
+
+static int
+marvel_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_unbind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count);
+}
+
+static unsigned long
+marvel_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
+{
+ struct marvel_agp_aperture *aper = agp->aperture.sysdata;
+ unsigned long baddr = addr - aper->arena->dma_base;
+ unsigned long pte;
+
+ if (addr < agp->aperture.bus_base ||
+ addr >= agp->aperture.bus_base + agp->aperture.size) {
+ printk("%s: addr out of range\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
+ if (!(pte & 1)) {
+ printk("%s: pte not valid\n", __FUNCTION__);
+ return -EINVAL;
+ }
+ return (pte >> 1) << PAGE_SHIFT;
+}
+
+struct alpha_agp_ops marvel_agp_ops =
+{
+ .setup = marvel_agp_setup,
+ .cleanup = marvel_agp_cleanup,
+ .configure = marvel_agp_configure,
+ .bind = marvel_agp_bind_memory,
+ .unbind = marvel_agp_unbind_memory,
+ .translate = marvel_agp_translate
+};
+
+alpha_agp_info *
+marvel_agp_info(void)
+{
+ struct pci_controller *hose;
+ io7_ioport_csrs *csrs;
+ alpha_agp_info *agp;
+ struct io7 *io7;
+
+ /*
+ * Find the first IO7 with an AGP card.
+ *
+ * FIXME -- there should be a better way (we want to be able to
+ * specify and what if the agp card is not video???)
+ */
+ hose = NULL;
+ for (io7 = NULL; (io7 = marvel_next_io7(io7)) != NULL; ) {
+ struct pci_controller *h;
+ vuip addr;
+
+ if (!io7->ports[IO7_AGP_PORT].enabled)
+ continue;
+
+ h = io7->ports[IO7_AGP_PORT].hose;
+ addr = (vuip)build_conf_addr(h, 0, PCI_DEVFN(5, 0), 0);
+
+ if (*addr != 0xffffffffu) {
+ hose = h;
+ break;
+ }
+ }
+
+ if (!hose || !hose->sg_pci)
+ return NULL;
+
+ printk("MARVEL - using hose %d as AGP\n", hose->index);
+
+ /*
+ * Get the csrs from the hose.
+ */
+ csrs = ((struct io7_port *)hose->sysdata)->csrs;
+
+ /*
+ * Allocate the info structure.
+ */
+ agp = kmalloc(sizeof(*agp), GFP_KERNEL);
+
+ /*
+ * Fill it in.
+ */
+ agp->hose = hose;
+ agp->private = NULL;
+ agp->ops = &marvel_agp_ops;
+
+ /*
+ * Aperture - not configured until ops.setup().
+ */
+ agp->aperture.bus_base = 0;
+ agp->aperture.size = 0;
+ agp->aperture.sysdata = NULL;
+
+ /*
+ * Capabilities.
+ *
+ * NOTE: IO7 reports through AGP_STAT that it can support a read queue
+ * depth of 17 (rq = 0x10). It actually only supports a depth of
+ * 16 (rq = 0xf).
+ */
+ agp->capability.lw = csrs->AGP_STAT.csr;
+ agp->capability.bits.rq = 0xf;
+
+ /*
+ * Mode.
+ */
+ agp->mode.lw = csrs->AGP_CMD.csr;
+
+ return agp;
+}
diff --git a/arch/alpha/kernel/core_mcpcia.c b/arch/alpha/kernel/core_mcpcia.c
new file mode 100644
index 000000000000..28849c894153
--- /dev/null
+++ b/arch/alpha/kernel/core_mcpcia.c
@@ -0,0 +1,618 @@
+/*
+ * linux/arch/alpha/kernel/core_mcpcia.c
+ *
+ * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Code common to all MCbus-PCI Adaptor core logic chipsets
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_mcpcia.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/delay.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CFG 0
+
+#if DEBUG_CFG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+#define MCPCIA_MAX_HOSES 4
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the MCPCIA_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1,
+ struct pci_controller *hose)
+{
+ unsigned long flags;
+ unsigned long mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int stat0, value, temp, cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_save(flags);
+
+ DBG_CFG(("conf_read(addr=0x%lx, type1=%d, hose=%d)\n",
+ addr, type1, mid));
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = stat0;
+ mb();
+ temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ DBG_CFG(("conf_read: MCPCIA_CAP_ERR(%d) was 0x%x\n", mid, stat0));
+
+ mb();
+ draina();
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access configuration space. */
+ value = *((vuip)addr);
+ mb();
+ mb(); /* magic */
+
+ if (mcheck_taken(cpu)) {
+ mcheck_taken(cpu) = 0;
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ DBG_CFG(("conf_read(): finished\n"));
+
+ local_irq_restore(flags);
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1,
+ struct pci_controller *hose)
+{
+ unsigned long flags;
+ unsigned long mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int stat0, temp, cpu;
+
+ cpu = smp_processor_id();
+
+ local_irq_save(flags); /* avoid getting hit by machine check */
+
+ /* Reset status register to avoid losing errors. */
+ stat0 = *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = stat0; mb();
+ temp = *(vuip)MCPCIA_CAP_ERR(mid);
+ DBG_CFG(("conf_write: MCPCIA CAP_ERR(%d) was 0x%x\n", mid, stat0));
+
+ draina();
+ mcheck_expected(cpu) = 1;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access configuration space. */
+ *((vuip)addr) = value;
+ mb();
+ mb(); /* magic */
+ temp = *(vuip)MCPCIA_CAP_ERR(mid); /* read to force the write */
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ DBG_CFG(("conf_write(): finished\n"));
+ local_irq_restore(flags);
+}
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int devfn, int where,
+ struct pci_controller *hose, unsigned long *pci_addr,
+ unsigned char *type1)
+{
+ u8 bus = pbus->number;
+ unsigned long addr;
+
+ DBG_CFG(("mk_conf_addr(bus=%d,devfn=0x%x,hose=%d,where=0x%x,"
+ " pci_addr=0x%p, type1=0x%p)\n",
+ bus, devfn, hose->index, where, pci_addr, type1));
+
+ /* Type 1 configuration cycle for *ALL* busses. */
+ *type1 = 1;
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ addr = (bus << 16) | (devfn << 8) | (where);
+ addr <<= 5; /* swizzle for SPARSE */
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+mcpcia_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr, w;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr |= (size - 1) * 8;
+ w = conf_read(addr, type1, hose);
+ switch (size) {
+ case 1:
+ *value = __kernel_extbl(w, where & 3);
+ break;
+ case 2:
+ *value = __kernel_extwl(w, where & 3);
+ break;
+ case 4:
+ *value = w;
+ break;
+ }
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+mcpcia_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ struct pci_controller *hose = bus->sysdata;
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, hose, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ addr |= (size - 1) * 8;
+ value = __kernel_insql(value, where & 3);
+ conf_write(addr, value, type1, hose);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops mcpcia_pci_ops =
+{
+ .read = mcpcia_read_config,
+ .write = mcpcia_write_config,
+};
+
+void
+mcpcia_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ wmb();
+ *(vuip)MCPCIA_SG_TBIA(MCPCIA_HOSE2MID(hose->index)) = 0;
+ mb();
+}
+
+static int __init
+mcpcia_probe_hose(int h)
+{
+ int cpu = smp_processor_id();
+ int mid = MCPCIA_HOSE2MID(h);
+ unsigned int pci_rev;
+
+ /* Gotta be REAL careful. If hose is absent, we get an mcheck. */
+
+ mb();
+ mb();
+ draina();
+ wrmces(7);
+
+ mcheck_expected(cpu) = 2; /* indicates probing */
+ mcheck_taken(cpu) = 0;
+ mcheck_extra(cpu) = mid;
+ mb();
+
+ /* Access the bus revision word. */
+ pci_rev = *(vuip)MCPCIA_REV(mid);
+
+ mb();
+ mb(); /* magic */
+ if (mcheck_taken(cpu)) {
+ mcheck_taken(cpu) = 0;
+ pci_rev = 0xffffffff;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ mb();
+
+ return (pci_rev >> 16) == PCI_CLASS_BRIDGE_HOST;
+}
+
+static void __init
+mcpcia_new_hose(int h)
+{
+ struct pci_controller *hose;
+ struct resource *io, *mem, *hae_mem;
+ int mid = MCPCIA_HOSE2MID(h);
+
+ hose = alloc_pci_controller();
+ if (h == 0)
+ pci_isa_hose = hose;
+ io = alloc_resource();
+ mem = alloc_resource();
+ hae_mem = alloc_resource();
+
+ hose->io_space = io;
+ hose->mem_space = hae_mem;
+ hose->sparse_mem_base = MCPCIA_SPARSE(mid) - IDENT_ADDR;
+ hose->dense_mem_base = MCPCIA_DENSE(mid) - IDENT_ADDR;
+ hose->sparse_io_base = MCPCIA_IO(mid) - IDENT_ADDR;
+ hose->dense_io_base = 0;
+ hose->config_space_base = MCPCIA_CONF(mid);
+ hose->index = h;
+
+ io->start = MCPCIA_IO(mid) - MCPCIA_IO_BIAS;
+ io->end = io->start + 0xffff;
+ io->name = pci_io_names[h];
+ io->flags = IORESOURCE_IO;
+
+ mem->start = MCPCIA_DENSE(mid) - MCPCIA_MEM_BIAS;
+ mem->end = mem->start + 0xffffffff;
+ mem->name = pci_mem_names[h];
+ mem->flags = IORESOURCE_MEM;
+
+ hae_mem->start = mem->start;
+ hae_mem->end = mem->start + MCPCIA_MEM_MASK;
+ hae_mem->name = pci_hae0_name;
+ hae_mem->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, io) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", h);
+ if (request_resource(&iomem_resource, mem) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", h);
+ if (request_resource(mem, hae_mem) < 0)
+ printk(KERN_ERR "Failed to request HAE_MEM on hose %d\n", h);
+}
+
+static void
+mcpcia_pci_clr_err(int mid)
+{
+ *(vuip)MCPCIA_CAP_ERR(mid);
+ *(vuip)MCPCIA_CAP_ERR(mid) = 0xffffffff; /* Clear them all. */
+ mb();
+ *(vuip)MCPCIA_CAP_ERR(mid); /* Re-read for force write. */
+}
+
+static void __init
+mcpcia_startup_hose(struct pci_controller *hose)
+{
+ int mid = MCPCIA_HOSE2MID(hose->index);
+ unsigned int tmp;
+
+ mcpcia_pci_clr_err(mid);
+
+ /*
+ * Set up error reporting.
+ */
+ tmp = *(vuip)MCPCIA_CAP_ERR(mid);
+ tmp |= 0x0006; /* master/target abort */
+ *(vuip)MCPCIA_CAP_ERR(mid) = tmp;
+ mb();
+ tmp = *(vuip)MCPCIA_CAP_ERR(mid);
+
+ /*
+ * Set up the PCI->physical memory translation windows.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather (up to) 1GB at 1GB (for pci)
+ * Window 2 is direct access 2GB at 2GB
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0x40000000,
+ size_for_memory(0x40000000), 0);
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ *(vuip)MCPCIA_W0_BASE(mid) = hose->sg_isa->dma_base | 3;
+ *(vuip)MCPCIA_W0_MASK(mid) = (hose->sg_isa->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T0_BASE(mid) = virt_to_phys(hose->sg_isa->ptes) >> 8;
+
+ *(vuip)MCPCIA_W1_BASE(mid) = hose->sg_pci->dma_base | 3;
+ *(vuip)MCPCIA_W1_MASK(mid) = (hose->sg_pci->size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T1_BASE(mid) = virt_to_phys(hose->sg_pci->ptes) >> 8;
+
+ *(vuip)MCPCIA_W2_BASE(mid) = __direct_map_base | 1;
+ *(vuip)MCPCIA_W2_MASK(mid) = (__direct_map_size - 1) & 0xfff00000;
+ *(vuip)MCPCIA_T2_BASE(mid) = 0;
+
+ *(vuip)MCPCIA_W3_BASE(mid) = 0x0;
+
+ mcpcia_pci_tbi(hose, 0, -1);
+
+ *(vuip)MCPCIA_HBASE(mid) = 0x0;
+ mb();
+
+ *(vuip)MCPCIA_HAE_MEM(mid) = 0U;
+ mb();
+ *(vuip)MCPCIA_HAE_MEM(mid); /* read it back. */
+ *(vuip)MCPCIA_HAE_IO(mid) = 0;
+ mb();
+ *(vuip)MCPCIA_HAE_IO(mid); /* read it back. */
+}
+
+void __init
+mcpcia_init_arch(void)
+{
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* Allocate hose 0. That's the one that all the ISA junk hangs
+ off of, from which we'll be registering stuff here in a bit.
+ Other hose detection is done in mcpcia_init_hoses, which is
+ called from init_IRQ. */
+
+ mcpcia_new_hose(0);
+}
+
+/* This is called from init_IRQ, since we cannot take interrupts
+ before then. Which means we cannot do this in init_arch. */
+
+void __init
+mcpcia_init_hoses(void)
+{
+ struct pci_controller *hose;
+ int hose_count;
+ int h;
+
+ /* First, find how many hoses we have. */
+ hose_count = 0;
+ for (h = 0; h < MCPCIA_MAX_HOSES; ++h) {
+ if (mcpcia_probe_hose(h)) {
+ if (h != 0)
+ mcpcia_new_hose(h);
+ hose_count++;
+ }
+ }
+
+ printk("mcpcia_init_hoses: found %d hoses\n", hose_count);
+
+ /* Now do init for each hose. */
+ for (hose = hose_head; hose; hose = hose->next)
+ mcpcia_startup_hose(hose);
+}
+
+static void
+mcpcia_print_uncorrectable(struct el_MCPCIA_uncorrected_frame_mcheck *logout)
+{
+ struct el_common_EV5_uncorrectable_mcheck *frame;
+ int i;
+
+ frame = &logout->procdata;
+
+ /* Print PAL fields */
+ for (i = 0; i < 24; i += 2) {
+ printk(" paltmp[%d-%d] = %16lx %16lx\n",
+ i, i+1, frame->paltemp[i], frame->paltemp[i+1]);
+ }
+ for (i = 0; i < 8; i += 2) {
+ printk(" shadow[%d-%d] = %16lx %16lx\n",
+ i, i+1, frame->shadow[i],
+ frame->shadow[i+1]);
+ }
+ printk(" Addr of excepting instruction = %16lx\n",
+ frame->exc_addr);
+ printk(" Summary of arithmetic traps = %16lx\n",
+ frame->exc_sum);
+ printk(" Exception mask = %16lx\n",
+ frame->exc_mask);
+ printk(" Base address for PALcode = %16lx\n",
+ frame->pal_base);
+ printk(" Interrupt Status Reg = %16lx\n",
+ frame->isr);
+ printk(" CURRENT SETUP OF EV5 IBOX = %16lx\n",
+ frame->icsr);
+ printk(" I-CACHE Reg %s parity error = %16lx\n",
+ (frame->ic_perr_stat & 0x800L) ?
+ "Data" : "Tag",
+ frame->ic_perr_stat);
+ printk(" D-CACHE error Reg = %16lx\n",
+ frame->dc_perr_stat);
+ if (frame->dc_perr_stat & 0x2) {
+ switch (frame->dc_perr_stat & 0x03c) {
+ case 8:
+ printk(" Data error in bank 1\n");
+ break;
+ case 4:
+ printk(" Data error in bank 0\n");
+ break;
+ case 20:
+ printk(" Tag error in bank 1\n");
+ break;
+ case 10:
+ printk(" Tag error in bank 0\n");
+ break;
+ }
+ }
+ printk(" Effective VA = %16lx\n",
+ frame->va);
+ printk(" Reason for D-stream = %16lx\n",
+ frame->mm_stat);
+ printk(" EV5 SCache address = %16lx\n",
+ frame->sc_addr);
+ printk(" EV5 SCache TAG/Data parity = %16lx\n",
+ frame->sc_stat);
+ printk(" EV5 BC_TAG_ADDR = %16lx\n",
+ frame->bc_tag_addr);
+ printk(" EV5 EI_ADDR: Phys addr of Xfer = %16lx\n",
+ frame->ei_addr);
+ printk(" Fill Syndrome = %16lx\n",
+ frame->fill_syndrome);
+ printk(" EI_STAT reg = %16lx\n",
+ frame->ei_stat);
+ printk(" LD_LOCK = %16lx\n",
+ frame->ld_lock);
+}
+
+static void
+mcpcia_print_system_area(unsigned long la_ptr)
+{
+ struct el_common *frame;
+ struct pci_controller *hose;
+
+ struct IOD_subpacket {
+ unsigned long base;
+ unsigned int whoami;
+ unsigned int rsvd1;
+ unsigned int pci_rev;
+ unsigned int cap_ctrl;
+ unsigned int hae_mem;
+ unsigned int hae_io;
+ unsigned int int_ctl;
+ unsigned int int_reg;
+ unsigned int int_mask0;
+ unsigned int int_mask1;
+ unsigned int mc_err0;
+ unsigned int mc_err1;
+ unsigned int cap_err;
+ unsigned int rsvd2;
+ unsigned int pci_err1;
+ unsigned int mdpa_stat;
+ unsigned int mdpa_syn;
+ unsigned int mdpb_stat;
+ unsigned int mdpb_syn;
+ unsigned int rsvd3;
+ unsigned int rsvd4;
+ unsigned int rsvd5;
+ } *iodpp;
+
+ frame = (struct el_common *)la_ptr;
+ iodpp = (struct IOD_subpacket *) (la_ptr + frame->sys_offset);
+
+ for (hose = hose_head; hose; hose = hose->next, iodpp++) {
+
+ printk("IOD %d Register Subpacket - Bridge Base Address %16lx\n",
+ hose->index, iodpp->base);
+ printk(" WHOAMI = %8x\n", iodpp->whoami);
+ printk(" PCI_REV = %8x\n", iodpp->pci_rev);
+ printk(" CAP_CTRL = %8x\n", iodpp->cap_ctrl);
+ printk(" HAE_MEM = %8x\n", iodpp->hae_mem);
+ printk(" HAE_IO = %8x\n", iodpp->hae_io);
+ printk(" INT_CTL = %8x\n", iodpp->int_ctl);
+ printk(" INT_REG = %8x\n", iodpp->int_reg);
+ printk(" INT_MASK0 = %8x\n", iodpp->int_mask0);
+ printk(" INT_MASK1 = %8x\n", iodpp->int_mask1);
+ printk(" MC_ERR0 = %8x\n", iodpp->mc_err0);
+ printk(" MC_ERR1 = %8x\n", iodpp->mc_err1);
+ printk(" CAP_ERR = %8x\n", iodpp->cap_err);
+ printk(" PCI_ERR1 = %8x\n", iodpp->pci_err1);
+ printk(" MDPA_STAT = %8x\n", iodpp->mdpa_stat);
+ printk(" MDPA_SYN = %8x\n", iodpp->mdpa_syn);
+ printk(" MDPB_STAT = %8x\n", iodpp->mdpb_stat);
+ printk(" MDPB_SYN = %8x\n", iodpp->mdpb_syn);
+ }
+}
+
+void
+mcpcia_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ struct el_common *mchk_header;
+ struct el_MCPCIA_uncorrected_frame_mcheck *mchk_logout;
+ unsigned int cpu = smp_processor_id();
+ int expected;
+
+ mchk_header = (struct el_common *)la_ptr;
+ mchk_logout = (struct el_MCPCIA_uncorrected_frame_mcheck *)la_ptr;
+ expected = mcheck_expected(cpu);
+
+ mb();
+ mb(); /* magic */
+ draina();
+
+ switch (expected) {
+ case 0:
+ {
+ /* FIXME: how do we figure out which hose the
+ error was on? */
+ struct pci_controller *hose;
+ for (hose = hose_head; hose; hose = hose->next)
+ mcpcia_pci_clr_err(MCPCIA_HOSE2MID(hose->index));
+ break;
+ }
+ case 1:
+ mcpcia_pci_clr_err(mcheck_extra(cpu));
+ break;
+ default:
+ /* Otherwise, we're being called from mcpcia_probe_hose
+ and there's no hose clear an error from. */
+ break;
+ }
+
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "MCPCIA", expected != 0);
+ if (!expected && vector != 0x620 && vector != 0x630) {
+ mcpcia_print_uncorrectable(mchk_logout);
+ mcpcia_print_system_area(la_ptr);
+ }
+}
diff --git a/arch/alpha/kernel/core_polaris.c b/arch/alpha/kernel/core_polaris.c
new file mode 100644
index 000000000000..277674a500ff
--- /dev/null
+++ b/arch/alpha/kernel/core_polaris.c
@@ -0,0 +1,203 @@
+/*
+ * linux/arch/alpha/kernel/core_polaris.c
+ *
+ * POLARIS chip-specific code
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_polaris.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address. This is fairly straightforward
+ * on POLARIS, since the chip itself generates Type 0 or Type 1
+ * cycles automatically depending on the bus number (Bus 0 is
+ * hardwired to Type 0, all others are Type 1. Peer bridges
+ * are not supported).
+ *
+ * All types:
+ *
+ * 3 3 3 3|3 3 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * |1|1|1|1|1|0|0|1|1|1|1|1|1|1|1|0|B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|x|x|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., scsi and ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, u8 *type1)
+{
+ u8 bus = pbus->number;
+
+ *type1 = (bus == 0) ? 0 : 1;
+ *pci_addr = (bus << 16) | (device_fn << 8) | (where) |
+ POLARIS_DENSE_CONFIG_BASE;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x,"
+ " returning address 0x%p\n"
+ bus, device_fn, where, *pci_addr));
+
+ return 0;
+}
+
+static int
+polaris_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+
+static int
+polaris_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops polaris_pci_ops =
+{
+ .read = polaris_read_config,
+ .write = polaris_write_config,
+};
+
+void __init
+polaris_init_arch(void)
+{
+ struct pci_controller *hose;
+
+ /* May need to initialize error reporting (see PCICTL0/1), but
+ * for now assume that the firmware has done the right thing
+ * already.
+ */
+#if 0
+ printk("polaris_init_arch(): trusting firmware for setup\n");
+#endif
+
+ /*
+ * Create our single hose.
+ */
+
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = 0;
+ hose->dense_mem_base = POLARIS_DENSE_MEM_BASE - IDENT_ADDR;
+ hose->sparse_io_base = 0;
+ hose->dense_io_base = POLARIS_DENSE_IO_BASE - IDENT_ADDR;
+
+ hose->sg_isa = hose->sg_pci = NULL;
+
+ /* The I/O window is fixed at 2G @ 2G. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+}
+
+static inline void
+polaris_pci_clr_err(void)
+{
+ *(vusp)POLARIS_W_STATUS;
+ /* Write 1's to settable bits to clear errors */
+ *(vusp)POLARIS_W_STATUS = 0x7800;
+ mb();
+ *(vusp)POLARIS_W_STATUS;
+}
+
+void
+polaris_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ /* Clear the error before any reporting. */
+ mb();
+ mb();
+ draina();
+ polaris_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "POLARIS",
+ mcheck_expected(0));
+}
diff --git a/arch/alpha/kernel/core_t2.c b/arch/alpha/kernel/core_t2.c
new file mode 100644
index 000000000000..ecce09e3626a
--- /dev/null
+++ b/arch/alpha/kernel/core_t2.c
@@ -0,0 +1,622 @@
+/*
+ * linux/arch/alpha/kernel/core_t2.c
+ *
+ * Written by Jay A Estabrook (jestabro@amt.tay1.dec.com).
+ * December 1996.
+ *
+ * based on CIA code by David A Rusling (david.rusling@reo.mts.dec.com)
+ *
+ * Code common to all T2 core logic chips.
+ */
+
+#define __EXTERN_INLINE
+#include <asm/io.h>
+#include <asm/core_t2.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/delay.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* For dumping initial DMA window settings. */
+#define DEBUG_PRINT_INITIAL_SETTINGS 0
+
+/* For dumping final DMA window settings. */
+#define DEBUG_PRINT_FINAL_SETTINGS 0
+
+/*
+ * By default, we direct-map starting at 2GB, in order to allow the
+ * maximum size direct-map window (2GB) to match the maximum amount of
+ * memory (2GB) that can be present on SABLEs. But that limits the
+ * floppy to DMA only via the scatter/gather window set up for 8MB
+ * ISA DMA, since the maximum ISA DMA address is 2GB-1.
+ *
+ * For now, this seems a reasonable trade-off: even though most SABLEs
+ * have less than 1GB of memory, floppy usage/performance will not
+ * really be affected by forcing it to go via scatter/gather...
+ */
+#define T2_DIRECTMAP_2G 1
+
+#if T2_DIRECTMAP_2G
+# define T2_DIRECTMAP_START 0x80000000UL
+# define T2_DIRECTMAP_LENGTH 0x80000000UL
+#else
+# define T2_DIRECTMAP_START 0x40000000UL
+# define T2_DIRECTMAP_LENGTH 0x40000000UL
+#endif
+
+/* The ISA scatter/gather window settings. */
+#define T2_ISA_SG_START 0x00800000UL
+#define T2_ISA_SG_LENGTH 0x00800000UL
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the i/o controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG(args) printk args
+#else
+# define DBG(args)
+#endif
+
+static volatile unsigned int t2_mcheck_any_expected;
+static volatile unsigned int t2_mcheck_last_taken;
+
+/* Place to save the DMA Window registers as set up by SRM
+ for restoration during shutdown. */
+static struct
+{
+ struct {
+ unsigned long wbase;
+ unsigned long wmask;
+ unsigned long tbase;
+ } window[2];
+ unsigned long hae_1;
+ unsigned long hae_2;
+ unsigned long hae_3;
+ unsigned long hae_4;
+ unsigned long hbase;
+} t2_saved_config __attribute((common));
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address and setup the T2_HAXR2 register
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Type 0:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | |D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|0|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:11 Device select bit.
+ * 10:8 Function number
+ * 7:2 Register number
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG(("mk_conf_addr(bus=%d, dfn=0x%x, where=0x%x,"
+ " addr=0x%lx, type1=0x%x)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (bus == 0) {
+ int device = device_fn >> 3;
+
+ /* Type 0 configuration cycle. */
+
+ if (device > 8) {
+ DBG(("mk_conf_addr: device (%d)>20, returning -1\n",
+ device));
+ return -1;
+ }
+
+ *type1 = 0;
+ addr = (0x0800L << device) | ((device_fn & 7) << 8) | (where);
+ } else {
+ /* Type 1 configuration cycle. */
+ *type1 = 1;
+ addr = (bus << 16) | (device_fn << 8) | (where);
+ }
+ *pci_addr = addr;
+ DBG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+/*
+ * NOTE: both conf_read() and conf_write() may set HAE_3 when needing
+ * to do type1 access. This is protected by the use of spinlock IRQ
+ * primitives in the wrapper functions pci_{read,write}_config_*()
+ * defined in drivers/pci/pci.c.
+ */
+static unsigned int
+conf_read(unsigned long addr, unsigned char type1)
+{
+ unsigned int value, cpu, taken;
+ unsigned long t2_cfg = 0;
+
+ cpu = smp_processor_id();
+
+ DBG(("conf_read(addr=0x%lx, type1=%d)\n", addr, type1));
+
+ /* If Type1 access, must set T2 CFG. */
+ if (type1) {
+ t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
+ *(vulp)T2_HAE_3 = 0x40000000UL | t2_cfg;
+ mb();
+ }
+ mb();
+ draina();
+
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_any_expected |= (1 << cpu);
+ mb();
+
+ /* Access configuration space. */
+ value = *(vuip)addr;
+ mb();
+ mb(); /* magic */
+
+ /* Wait for possible mcheck. Also, this lets other CPUs clear
+ their mchecks as well, as they can reliably tell when
+ another CPU is in the midst of handling a real mcheck via
+ the "taken" function. */
+ udelay(100);
+
+ if ((taken = mcheck_taken(cpu))) {
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_last_taken |= (1 << cpu);
+ value = 0xffffffffU;
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ t2_mcheck_any_expected = 0;
+ mb();
+
+ /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
+ if (type1) {
+ *(vulp)T2_HAE_3 = t2_cfg;
+ mb();
+ }
+
+ return value;
+}
+
+static void
+conf_write(unsigned long addr, unsigned int value, unsigned char type1)
+{
+ unsigned int cpu, taken;
+ unsigned long t2_cfg = 0;
+
+ cpu = smp_processor_id();
+
+ /* If Type1 access, must set T2 CFG. */
+ if (type1) {
+ t2_cfg = *(vulp)T2_HAE_3 & ~0xc0000000UL;
+ *(vulp)T2_HAE_3 = t2_cfg | 0x40000000UL;
+ mb();
+ }
+ mb();
+ draina();
+
+ mcheck_expected(cpu) = 1;
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_any_expected |= (1 << cpu);
+ mb();
+
+ /* Access configuration space. */
+ *(vuip)addr = value;
+ mb();
+ mb(); /* magic */
+
+ /* Wait for possible mcheck. Also, this lets other CPUs clear
+ their mchecks as well, as they can reliably tell when
+ this CPU is in the midst of handling a real mcheck via
+ the "taken" function. */
+ udelay(100);
+
+ if ((taken = mcheck_taken(cpu))) {
+ mcheck_taken(cpu) = 0;
+ t2_mcheck_last_taken |= (1 << cpu);
+ mb();
+ }
+ mcheck_expected(cpu) = 0;
+ t2_mcheck_any_expected = 0;
+ mb();
+
+ /* If Type1 access, must reset T2 CFG so normal IO space ops work. */
+ if (type1) {
+ *(vulp)T2_HAE_3 = t2_cfg;
+ mb();
+ }
+}
+
+static int
+t2_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ int shift;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ shift = (where & 3) * 8;
+ addr = (pci_addr << 5) + mask + T2_CONF;
+ *value = conf_read(addr, type1) >> (shift);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+t2_write_config(struct pci_bus *bus, unsigned int devfn, int where, int size,
+ u32 value)
+{
+ unsigned long addr, pci_addr;
+ unsigned char type1;
+ long mask;
+
+ if (mk_conf_addr(bus, devfn, where, &pci_addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ mask = (size - 1) * 8;
+ addr = (pci_addr << 5) + mask + T2_CONF;
+ conf_write(addr, value << ((where & 3) * 8), type1);
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops t2_pci_ops =
+{
+ .read = t2_read_config,
+ .write = t2_write_config,
+};
+
+static void __init
+t2_direct_map_window1(unsigned long base, unsigned long length)
+{
+ unsigned long temp;
+
+ __direct_map_base = base;
+ __direct_map_size = length;
+
+ temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
+ *(vulp)T2_WBASE1 = temp | 0x80000UL; /* OR in ENABLE bit */
+ temp = (length - 1) & 0xfff00000UL;
+ *(vulp)T2_WMASK1 = temp;
+ *(vulp)T2_TBASE1 = 0;
+
+#if DEBUG_PRINT_FINAL_SETTINGS
+ printk("%s: setting WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n",
+ __FUNCTION__,
+ *(vulp)T2_WBASE1,
+ *(vulp)T2_WMASK1,
+ *(vulp)T2_TBASE1);
+#endif
+}
+
+static void __init
+t2_sg_map_window2(struct pci_controller *hose,
+ unsigned long base,
+ unsigned long length)
+{
+ unsigned long temp;
+
+ /* Note we can only do 1 SG window, as the other is for direct, so
+ do an ISA SG area, especially for the floppy. */
+ hose->sg_isa = iommu_arena_new(hose, base, length, 0);
+ hose->sg_pci = NULL;
+
+ temp = (base & 0xfff00000UL) | ((base + length - 1) >> 20);
+ *(vulp)T2_WBASE2 = temp | 0xc0000UL; /* OR in ENABLE/SG bits */
+ temp = (length - 1) & 0xfff00000UL;
+ *(vulp)T2_WMASK2 = temp;
+ *(vulp)T2_TBASE2 = virt_to_phys(hose->sg_isa->ptes) >> 1;
+ mb();
+
+ t2_pci_tbi(hose, 0, -1); /* flush TLB all */
+
+#if DEBUG_PRINT_FINAL_SETTINGS
+ printk("%s: setting WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n",
+ __FUNCTION__,
+ *(vulp)T2_WBASE2,
+ *(vulp)T2_WMASK2,
+ *(vulp)T2_TBASE2);
+#endif
+}
+
+static void __init
+t2_save_configuration(void)
+{
+#if DEBUG_PRINT_INITIAL_SETTINGS
+ printk("%s: HAE_1 was 0x%lx\n", __FUNCTION__, srm_hae); /* HW is 0 */
+ printk("%s: HAE_2 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_2);
+ printk("%s: HAE_3 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_3);
+ printk("%s: HAE_4 was 0x%lx\n", __FUNCTION__, *(vulp)T2_HAE_4);
+ printk("%s: HBASE was 0x%lx\n", __FUNCTION__, *(vulp)T2_HBASE);
+
+ printk("%s: WBASE1=0x%lx WMASK1=0x%lx TBASE1=0x%lx\n", __FUNCTION__,
+ *(vulp)T2_WBASE1, *(vulp)T2_WMASK1, *(vulp)T2_TBASE1);
+ printk("%s: WBASE2=0x%lx WMASK2=0x%lx TBASE2=0x%lx\n", __FUNCTION__,
+ *(vulp)T2_WBASE2, *(vulp)T2_WMASK2, *(vulp)T2_TBASE2);
+#endif
+
+ /*
+ * Save the DMA Window registers.
+ */
+ t2_saved_config.window[0].wbase = *(vulp)T2_WBASE1;
+ t2_saved_config.window[0].wmask = *(vulp)T2_WMASK1;
+ t2_saved_config.window[0].tbase = *(vulp)T2_TBASE1;
+ t2_saved_config.window[1].wbase = *(vulp)T2_WBASE2;
+ t2_saved_config.window[1].wmask = *(vulp)T2_WMASK2;
+ t2_saved_config.window[1].tbase = *(vulp)T2_TBASE2;
+
+ t2_saved_config.hae_1 = srm_hae; /* HW is already set to 0 */
+ t2_saved_config.hae_2 = *(vulp)T2_HAE_2;
+ t2_saved_config.hae_3 = *(vulp)T2_HAE_3;
+ t2_saved_config.hae_4 = *(vulp)T2_HAE_4;
+ t2_saved_config.hbase = *(vulp)T2_HBASE;
+}
+
+void __init
+t2_init_arch(void)
+{
+ struct pci_controller *hose;
+ unsigned long temp;
+ unsigned int i;
+
+ for (i = 0; i < NR_CPUS; i++) {
+ mcheck_expected(i) = 0;
+ mcheck_taken(i) = 0;
+ }
+ t2_mcheck_any_expected = 0;
+ t2_mcheck_last_taken = 0;
+
+ /* Enable scatter/gather TLB use. */
+ temp = *(vulp)T2_IOCSR;
+ if (!(temp & (0x1UL << 26))) {
+ printk("t2_init_arch: enabling SG TLB, IOCSR was 0x%lx\n",
+ temp);
+ *(vulp)T2_IOCSR = temp | (0x1UL << 26);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+ }
+
+ t2_save_configuration();
+
+ /*
+ * Create our single hose.
+ */
+ pci_isa_hose = hose = alloc_pci_controller();
+ hose->io_space = &ioport_resource;
+ hose->mem_space = &iomem_resource;
+ hose->index = 0;
+
+ hose->sparse_mem_base = T2_SPARSE_MEM - IDENT_ADDR;
+ hose->dense_mem_base = T2_DENSE_MEM - IDENT_ADDR;
+ hose->sparse_io_base = T2_IO - IDENT_ADDR;
+ hose->dense_io_base = 0;
+
+ /*
+ * Set up the PCI->physical memory translation windows.
+ *
+ * Window 1 is direct mapped.
+ * Window 2 is scatter/gather (for ISA).
+ */
+
+ t2_direct_map_window1(T2_DIRECTMAP_START, T2_DIRECTMAP_LENGTH);
+
+ /* Always make an ISA DMA window. */
+ t2_sg_map_window2(hose, T2_ISA_SG_START, T2_ISA_SG_LENGTH);
+
+ *(vulp)T2_HBASE = 0x0; /* Disable HOLES. */
+
+ /* Zero HAE. */
+ *(vulp)T2_HAE_1 = 0; mb(); /* Sparse MEM HAE */
+ *(vulp)T2_HAE_2 = 0; mb(); /* Sparse I/O HAE */
+ *(vulp)T2_HAE_3 = 0; mb(); /* Config Space HAE */
+
+ /*
+ * We also now zero out HAE_4, the dense memory HAE, so that
+ * we need not account for its "offset" when accessing dense
+ * memory resources which we allocated in our normal way. This
+ * HAE would need to stay untouched were we to keep the SRM
+ * resource settings.
+ *
+ * Thus we can now run standard X servers on SABLE/LYNX. :-)
+ */
+ *(vulp)T2_HAE_4 = 0; mb();
+}
+
+void
+t2_kill_arch(int mode)
+{
+ /*
+ * Restore the DMA Window registers.
+ */
+ *(vulp)T2_WBASE1 = t2_saved_config.window[0].wbase;
+ *(vulp)T2_WMASK1 = t2_saved_config.window[0].wmask;
+ *(vulp)T2_TBASE1 = t2_saved_config.window[0].tbase;
+ *(vulp)T2_WBASE2 = t2_saved_config.window[1].wbase;
+ *(vulp)T2_WMASK2 = t2_saved_config.window[1].wmask;
+ *(vulp)T2_TBASE2 = t2_saved_config.window[1].tbase;
+ mb();
+
+ *(vulp)T2_HAE_1 = srm_hae;
+ *(vulp)T2_HAE_2 = t2_saved_config.hae_2;
+ *(vulp)T2_HAE_3 = t2_saved_config.hae_3;
+ *(vulp)T2_HAE_4 = t2_saved_config.hae_4;
+ *(vulp)T2_HBASE = t2_saved_config.hbase;
+ mb();
+ *(vulp)T2_HBASE; /* READ it back to ensure WRITE occurred. */
+}
+
+void
+t2_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ unsigned long t2_iocsr;
+
+ t2_iocsr = *(vulp)T2_IOCSR;
+
+ /* set the TLB Clear bit */
+ *(vulp)T2_IOCSR = t2_iocsr | (0x1UL << 28);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+
+ /* clear the TLB Clear bit */
+ *(vulp)T2_IOCSR = t2_iocsr & ~(0x1UL << 28);
+ mb();
+ *(vulp)T2_IOCSR; /* read it back to make sure */
+}
+
+#define SIC_SEIC (1UL << 33) /* System Event Clear */
+
+static void
+t2_clear_errors(int cpu)
+{
+ struct sable_cpu_csr *cpu_regs;
+
+ cpu_regs = (struct sable_cpu_csr *)T2_CPUn_BASE(cpu);
+
+ cpu_regs->sic &= ~SIC_SEIC;
+
+ /* Clear CPU errors. */
+ cpu_regs->bcce |= cpu_regs->bcce;
+ cpu_regs->cbe |= cpu_regs->cbe;
+ cpu_regs->bcue |= cpu_regs->bcue;
+ cpu_regs->dter |= cpu_regs->dter;
+
+ *(vulp)T2_CERR1 |= *(vulp)T2_CERR1;
+ *(vulp)T2_PERR1 |= *(vulp)T2_PERR1;
+
+ mb();
+ mb(); /* magic */
+}
+
+/*
+ * SABLE seems to have a "broadcast" style machine check, in that all
+ * CPUs receive it. And, the issuing CPU, in the case of PCI Config
+ * space read/write faults, will also receive a second mcheck, upon
+ * lowering IPL during completion processing in pci_read_config_byte()
+ * et al.
+ *
+ * Hence all the taken/expected/any_expected/last_taken stuff...
+ */
+void
+t2_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ int cpu = smp_processor_id();
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+#endif
+
+ /* Clear the error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ t2_clear_errors(cpu);
+
+ /* This should not actually be done until the logout frame is
+ examined, but, since we don't do that, go on and do this... */
+ wrmces(0x7);
+ mb();
+
+ /* Now, do testing for the anomalous conditions. */
+ if (!mcheck_expected(cpu) && t2_mcheck_any_expected) {
+ /*
+ * FUNKY: Received mcheck on a CPU and not
+ * expecting it, but another CPU is expecting one.
+ *
+ * Just dismiss it for now on this CPU...
+ */
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("t2_machine_check(cpu%d): any_expected 0x%x -"
+ " (assumed) spurious -"
+ " code 0x%x\n", cpu, t2_mcheck_any_expected,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+ return;
+ }
+
+ if (!mcheck_expected(cpu) && !t2_mcheck_any_expected) {
+ if (t2_mcheck_last_taken & (1 << cpu)) {
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("t2_machine_check(cpu%d): last_taken 0x%x - "
+ "unexpected mcheck - code 0x%x\n",
+ cpu, t2_mcheck_last_taken,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+ t2_mcheck_last_taken = 0;
+ mb();
+ return;
+ } else {
+ t2_mcheck_last_taken = 0;
+ mb();
+ }
+ }
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk("%s t2_mcheck(cpu%d): last_taken 0x%x - "
+ "any_expected 0x%x - code 0x%x\n",
+ (mcheck_expected(cpu) ? "EX" : "UN"), cpu,
+ t2_mcheck_last_taken, t2_mcheck_any_expected,
+ (unsigned int)mchk_header->code);
+ }
+#endif
+
+ process_mcheck_info(vector, la_ptr, regs, "T2", mcheck_expected(cpu));
+}
diff --git a/arch/alpha/kernel/core_titan.c b/arch/alpha/kernel/core_titan.c
new file mode 100644
index 000000000000..3662fef7db9a
--- /dev/null
+++ b/arch/alpha/kernel/core_titan.c
@@ -0,0 +1,806 @@
+/*
+ * linux/arch/alpha/kernel/core_titan.c
+ *
+ * Code common to all TITAN core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_titan.h>
+#undef __EXTERN_INLINE
+
+#include <linux/module.h>
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/vmalloc.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+#include <asm/pgalloc.h>
+#include <asm/tlbflush.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* Save Titan configuration data as the console had it set up. */
+
+struct
+{
+ unsigned long wsba[4];
+ unsigned long wsm[4];
+ unsigned long tba[4];
+} saved_config[4] __attribute__((common));
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Routines to access TIG registers.
+ */
+static inline volatile unsigned long *
+mk_tig_addr(int offset)
+{
+ return (volatile unsigned long *)(TITAN_TIG_SPACE + (offset << 6));
+}
+
+static inline u8
+titan_read_tig(int offset, u8 value)
+{
+ volatile unsigned long *tig_addr = mk_tig_addr(offset);
+ return (u8)(*tig_addr & 0xff);
+}
+
+static inline void
+titan_write_tig(int offset, u8 value)
+{
+ volatile unsigned long *tig_addr = mk_tig_addr(offset);
+ *tig_addr = (unsigned long)value;
+}
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Note that all config space accesses use Type 1 address format.
+ *
+ * Note also that type 1 is determined by non-zero bus number.
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+titan_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+titan_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops titan_pci_ops =
+{
+ .read = titan_read_config,
+ .write = titan_write_config,
+};
+
+
+void
+titan_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ titan_pachip *pachip =
+ (hose->index & 1) ? TITAN_pachip1 : TITAN_pachip0;
+ titan_pachip_port *port;
+ volatile unsigned long *csr;
+ unsigned long value;
+
+ /* Get the right hose. */
+ port = &pachip->g_port;
+ if (hose->index & 2)
+ port = &pachip->a_port;
+
+ /* We can invalidate up to 8 tlb entries in a go. The flush
+ matches against <31:16> in the pci address.
+ Note that gtlbi* and atlbi* are in the same place in the g_port
+ and a_port, respectively, so the g_port offset can be used
+ even if hose is an a_port */
+ csr = &port->port_specific.g.gtlbia.csr;
+ if (((start ^ end) & 0xffff0000) == 0)
+ csr = &port->port_specific.g.gtlbiv.csr;
+
+ /* For TBIA, it doesn't matter what value we write. For TBI,
+ it's the shifted tag bits. */
+ value = (start & 0xffff0000) >> 12;
+
+ wmb();
+ *csr = value;
+ mb();
+ *csr;
+}
+
+static int
+titan_query_agp(titan_pachip_port *port)
+{
+ union TPAchipPCTL pctl;
+
+ /* set up APCTL */
+ pctl.pctl_q_whole = port->pctl.csr;
+
+ return pctl.pctl_r_bits.apctl_v_agp_present;
+
+}
+
+static void __init
+titan_init_one_pachip_port(titan_pachip_port *port, int index)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_pci_controller();
+ if (index == 0)
+ pci_isa_hose = hose;
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /*
+ * This is for userland consumption. The 40-bit PIO bias that we
+ * use in the kernel through KSEG doesn't work in the page table
+ * based user mappings. (43-bit KSEG sign extends the physical
+ * address from bit 40 to hit the I/O bit - mapped addresses don't).
+ * So make sure we get the 43-bit PIO bias.
+ */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (TITAN_MEM(index) & 0xffffffffffUL) | 0x80000000000UL;
+ hose->dense_io_base
+ = (TITAN_IO(index) & 0xffffffffffUL) | 0x80000000000UL;
+
+ hose->config_space_base = TITAN_CONF(index);
+ hose->index = index;
+
+ hose->io_space->start = TITAN_IO(index) - TITAN_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + TITAN_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[index];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = TITAN_MEM(index) - TITAN_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[index];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
+
+ /*
+ * Save the existing PCI window translations. SRM will
+ * need them when we go to reboot.
+ */
+ saved_config[index].wsba[0] = port->wsba[0].csr;
+ saved_config[index].wsm[0] = port->wsm[0].csr;
+ saved_config[index].tba[0] = port->tba[0].csr;
+
+ saved_config[index].wsba[1] = port->wsba[1].csr;
+ saved_config[index].wsm[1] = port->wsm[1].csr;
+ saved_config[index].tba[1] = port->tba[1].csr;
+
+ saved_config[index].wsba[2] = port->wsba[2].csr;
+ saved_config[index].wsm[2] = port->wsm[2].csr;
+ saved_config[index].tba[2] = port->tba[2].csr;
+
+ saved_config[index].wsba[3] = port->wsba[3].csr;
+ saved_config[index].wsm[3] = port->wsm[3].csr;
+ saved_config[index].tba[3] = port->tba[3].csr;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 on Titan is Scatter-Gather ONLY.
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is direct access 1GB at 2GB
+ * Window 2 is scatter-gather 1GB at 3GB
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_isa->align_entry = 8; /* 64KB for ISA */
+
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x40000000, 0);
+ hose->sg_pci->align_entry = 4; /* Titan caches 4 PTEs at a time */
+
+ port->wsba[0].csr = hose->sg_isa->dma_base | 3;
+ port->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ port->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ port->wsba[1].csr = __direct_map_base | 1;
+ port->wsm[1].csr = (__direct_map_size - 1) & 0xfff00000;
+ port->tba[1].csr = 0;
+
+ port->wsba[2].csr = hose->sg_pci->dma_base | 3;
+ port->wsm[2].csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ port->tba[2].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ port->wsba[3].csr = 0;
+
+ /* Enable the Monster Window to make DAC pci64 possible. */
+ port->pctl.csr |= pctl_m_mwin;
+
+ /*
+ * If it's an AGP port, initialize agplastwr.
+ */
+ if (titan_query_agp(port))
+ port->port_specific.a.agplastwr.csr = __direct_map_base;
+
+ titan_pci_tbi(hose, 0, -1);
+}
+
+static void __init
+titan_init_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
+{
+ int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
+
+ /* Init the ports in hose order... */
+ titan_init_one_pachip_port(&pachip0->g_port, 0); /* hose 0 */
+ if (pchip1_present)
+ titan_init_one_pachip_port(&pachip1->g_port, 1);/* hose 1 */
+ titan_init_one_pachip_port(&pachip0->a_port, 2); /* hose 2 */
+ if (pchip1_present)
+ titan_init_one_pachip_port(&pachip1->a_port, 3);/* hose 3 */
+}
+
+static void __init
+titan_init_vga_hose(void)
+{
+#ifdef CONFIG_VGA_HOSE
+ u64 *pu64 = (u64 *)((u64)hwrpb + hwrpb->ctbt_offset);
+
+ if (pu64[7] == 3) { /* TERM_TYPE == graphics */
+ struct pci_controller *hose;
+ int h = (pu64[30] >> 24) & 0xff; /* console hose # */
+
+ /*
+ * Our hose numbering matches the console's, so just find
+ * the right one...
+ */
+ for (hose = hose_head; hose; hose = hose->next) {
+ if (hose->index == h) break;
+ }
+
+ if (hose) {
+ printk("Console graphics on hose %d\n", hose->index);
+ pci_vga_hose = hose;
+ }
+ }
+#endif /* CONFIG_VGA_HOSE */
+}
+
+void __init
+titan_init_arch(void)
+{
+#if 0
+ printk("%s: titan_init_arch()\n", __FUNCTION__);
+ printk("%s: CChip registers:\n", __FUNCTION__);
+ printk("%s: CSR_CSC 0x%lx\n", __FUNCTION__, TITAN_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", __FUNCTION__, TITAN_cchip->mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", __FUNCTION__, TITAN_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", __FUNCTION__, TITAN_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", __FUNCTION__, TITAN_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", __FUNCTION__, TITAN_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", __FUNCTION__, TITAN_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", __FUNCTION__, TITAN_cchip->drir.csr);
+
+ printk("%s: DChip registers:\n", __FUNCTION__);
+ printk("%s: CSR_DSC 0x%lx\n", __FUNCTION__, TITAN_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", __FUNCTION__, TITAN_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", __FUNCTION__, TITAN_dchip->drev.csr);
+#endif
+
+ boot_cpuid = __hard_smp_processor_id();
+
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* PCI DMA Direct Mapping is 1GB at 2GB. */
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x40000000;
+
+ /* Init the PA chip(s). */
+ titan_init_pachips(TITAN_pachip0, TITAN_pachip1);
+
+ /* Check for graphic console location (if any). */
+ titan_init_vga_hose();
+}
+
+static void
+titan_kill_one_pachip_port(titan_pachip_port *port, int index)
+{
+ port->wsba[0].csr = saved_config[index].wsba[0];
+ port->wsm[0].csr = saved_config[index].wsm[0];
+ port->tba[0].csr = saved_config[index].tba[0];
+
+ port->wsba[1].csr = saved_config[index].wsba[1];
+ port->wsm[1].csr = saved_config[index].wsm[1];
+ port->tba[1].csr = saved_config[index].tba[1];
+
+ port->wsba[2].csr = saved_config[index].wsba[2];
+ port->wsm[2].csr = saved_config[index].wsm[2];
+ port->tba[2].csr = saved_config[index].tba[2];
+
+ port->wsba[3].csr = saved_config[index].wsba[3];
+ port->wsm[3].csr = saved_config[index].wsm[3];
+ port->tba[3].csr = saved_config[index].tba[3];
+}
+
+static void
+titan_kill_pachips(titan_pachip *pachip0, titan_pachip *pachip1)
+{
+ int pchip1_present = TITAN_cchip->csc.csr & 1L<<14;
+
+ if (pchip1_present) {
+ titan_kill_one_pachip_port(&pachip1->g_port, 1);
+ titan_kill_one_pachip_port(&pachip1->a_port, 3);
+ }
+ titan_kill_one_pachip_port(&pachip0->g_port, 0);
+ titan_kill_one_pachip_port(&pachip0->a_port, 2);
+}
+
+void
+titan_kill_arch(int mode)
+{
+ titan_kill_pachips(TITAN_pachip0, TITAN_pachip1);
+}
+
+
+/*
+ * IO map support.
+ */
+
+void __iomem *
+titan_ioremap(unsigned long addr, unsigned long size)
+{
+ int h = (addr & TITAN_HOSE_MASK) >> TITAN_HOSE_SHIFT;
+ unsigned long baddr = addr & ~TITAN_HOSE_MASK;
+ unsigned long last = baddr + size - 1;
+ struct pci_controller *hose;
+ struct vm_struct *area;
+ unsigned long vaddr;
+ unsigned long *ptes;
+ unsigned long pfn;
+
+ /*
+ * Adjust the addr.
+ */
+#ifdef CONFIG_VGA_HOSE
+ if (pci_vga_hose && __titan_is_mem_vga(addr)) {
+ h = pci_vga_hose->index;
+ addr += pci_vga_hose->mem_space->start;
+ }
+#endif
+
+ /*
+ * Find the hose.
+ */
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == h)
+ break;
+ if (!hose)
+ return NULL;
+
+ /*
+ * Is it direct-mapped?
+ */
+ if ((baddr >= __direct_map_base) &&
+ ((baddr + size - 1) < __direct_map_base + __direct_map_size)) {
+ vaddr = addr - __direct_map_base + TITAN_MEM_BIAS;
+ return (void __iomem *) vaddr;
+ }
+
+ /*
+ * Check the scatter-gather arena.
+ */
+ if (hose->sg_pci &&
+ baddr >= (unsigned long)hose->sg_pci->dma_base &&
+ last < (unsigned long)hose->sg_pci->dma_base + hose->sg_pci->size){
+
+ /*
+ * Adjust the limits (mappings must be page aligned)
+ */
+ baddr -= hose->sg_pci->dma_base;
+ last -= hose->sg_pci->dma_base;
+ baddr &= PAGE_MASK;
+ size = PAGE_ALIGN(last) - baddr;
+
+ /*
+ * Map it
+ */
+ area = get_vm_area(size, VM_IOREMAP);
+ if (!area)
+ return NULL;
+
+ ptes = hose->sg_pci->ptes;
+ for (vaddr = (unsigned long)area->addr;
+ baddr <= last;
+ baddr += PAGE_SIZE, vaddr += PAGE_SIZE) {
+ pfn = ptes[baddr >> PAGE_SHIFT];
+ if (!(pfn & 1)) {
+ printk("ioremap failed... pte not valid...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ pfn >>= 1; /* make it a true pfn */
+
+ if (__alpha_remap_area_pages(vaddr,
+ pfn << PAGE_SHIFT,
+ PAGE_SIZE, 0)) {
+ printk("FAILED to map...\n");
+ vfree(area->addr);
+ return NULL;
+ }
+ }
+
+ flush_tlb_all();
+
+ vaddr = (unsigned long)area->addr + (addr & ~PAGE_MASK);
+ return (void __iomem *) vaddr;
+ }
+
+ return NULL;
+}
+
+void
+titan_iounmap(volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+ if (addr >= VMALLOC_START)
+ vfree((void *)(PAGE_MASK & addr));
+}
+
+int
+titan_is_mmio(const volatile void __iomem *xaddr)
+{
+ unsigned long addr = (unsigned long) xaddr;
+
+ if (addr >= VMALLOC_START)
+ return 1;
+ else
+ return (addr & 0x100000000UL) == 0;
+}
+
+#ifndef CONFIG_ALPHA_GENERIC
+EXPORT_SYMBOL(titan_ioremap);
+EXPORT_SYMBOL(titan_iounmap);
+EXPORT_SYMBOL(titan_is_mmio);
+#endif
+
+/*
+ * AGP GART Support.
+ */
+#include <linux/agp_backend.h>
+#include <asm/agp_backend.h>
+#include <linux/slab.h>
+#include <linux/delay.h>
+
+struct titan_agp_aperture {
+ struct pci_iommu_arena *arena;
+ long pg_start;
+ long pg_count;
+};
+
+static int
+titan_agp_setup(alpha_agp_info *agp)
+{
+ struct titan_agp_aperture *aper;
+
+ if (!alpha_agpgart_size)
+ return -ENOMEM;
+
+ aper = kmalloc(sizeof(struct titan_agp_aperture), GFP_KERNEL);
+ if (aper == NULL)
+ return -ENOMEM;
+
+ aper->arena = agp->hose->sg_pci;
+ aper->pg_count = alpha_agpgart_size / PAGE_SIZE;
+ aper->pg_start = iommu_reserve(aper->arena, aper->pg_count,
+ aper->pg_count - 1);
+ if (aper->pg_start < 0) {
+ printk(KERN_ERR "Failed to reserve AGP memory\n");
+ kfree(aper);
+ return -ENOMEM;
+ }
+
+ agp->aperture.bus_base =
+ aper->arena->dma_base + aper->pg_start * PAGE_SIZE;
+ agp->aperture.size = aper->pg_count * PAGE_SIZE;
+ agp->aperture.sysdata = aper;
+
+ return 0;
+}
+
+static void
+titan_agp_cleanup(alpha_agp_info *agp)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ int status;
+
+ status = iommu_release(aper->arena, aper->pg_start, aper->pg_count);
+ if (status == -EBUSY) {
+ printk(KERN_WARNING
+ "Attempted to release bound AGP memory - unbinding\n");
+ iommu_unbind(aper->arena, aper->pg_start, aper->pg_count);
+ status = iommu_release(aper->arena, aper->pg_start,
+ aper->pg_count);
+ }
+ if (status < 0)
+ printk(KERN_ERR "Failed to release AGP memory\n");
+
+ kfree(aper);
+ kfree(agp);
+}
+
+static int
+titan_agp_configure(alpha_agp_info *agp)
+{
+ union TPAchipPCTL pctl;
+ titan_pachip_port *port = agp->private;
+ pctl.pctl_q_whole = port->pctl.csr;
+
+ /* Side-Band Addressing? */
+ pctl.pctl_r_bits.apctl_v_agp_sba_en = agp->mode.bits.sba;
+
+ /* AGP Rate? */
+ pctl.pctl_r_bits.apctl_v_agp_rate = 0; /* 1x */
+ if (agp->mode.bits.rate & 2)
+ pctl.pctl_r_bits.apctl_v_agp_rate = 1; /* 2x */
+#if 0
+ if (agp->mode.bits.rate & 4)
+ pctl.pctl_r_bits.apctl_v_agp_rate = 2; /* 4x */
+#endif
+
+ /* RQ Depth? */
+ pctl.pctl_r_bits.apctl_v_agp_hp_rd = 2;
+ pctl.pctl_r_bits.apctl_v_agp_lp_rd = 7;
+
+ /*
+ * AGP Enable.
+ */
+ pctl.pctl_r_bits.apctl_v_agp_en = agp->mode.bits.enable;
+
+ /* Tell the user. */
+ printk("Enabling AGP: %dX%s\n",
+ 1 << pctl.pctl_r_bits.apctl_v_agp_rate,
+ pctl.pctl_r_bits.apctl_v_agp_sba_en ? " - SBA" : "");
+
+ /* Write it. */
+ port->pctl.csr = pctl.pctl_q_whole;
+
+ /* And wait at least 5000 66MHz cycles (per Titan spec). */
+ udelay(100);
+
+ return 0;
+}
+
+static int
+titan_agp_bind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_bind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count, mem->memory);
+}
+
+static int
+titan_agp_unbind_memory(alpha_agp_info *agp, off_t pg_start, struct agp_memory *mem)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ return iommu_unbind(aper->arena, aper->pg_start + pg_start,
+ mem->page_count);
+}
+
+static unsigned long
+titan_agp_translate(alpha_agp_info *agp, dma_addr_t addr)
+{
+ struct titan_agp_aperture *aper = agp->aperture.sysdata;
+ unsigned long baddr = addr - aper->arena->dma_base;
+ unsigned long pte;
+
+ if (addr < agp->aperture.bus_base ||
+ addr >= agp->aperture.bus_base + agp->aperture.size) {
+ printk("%s: addr out of range\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ pte = aper->arena->ptes[baddr >> PAGE_SHIFT];
+ if (!(pte & 1)) {
+ printk("%s: pte not valid\n", __FUNCTION__);
+ return -EINVAL;
+ }
+
+ return (pte >> 1) << PAGE_SHIFT;
+}
+
+struct alpha_agp_ops titan_agp_ops =
+{
+ .setup = titan_agp_setup,
+ .cleanup = titan_agp_cleanup,
+ .configure = titan_agp_configure,
+ .bind = titan_agp_bind_memory,
+ .unbind = titan_agp_unbind_memory,
+ .translate = titan_agp_translate
+};
+
+alpha_agp_info *
+titan_agp_info(void)
+{
+ alpha_agp_info *agp;
+ struct pci_controller *hose;
+ titan_pachip_port *port;
+ int hosenum = -1;
+ union TPAchipPCTL pctl;
+
+ /*
+ * Find the AGP port.
+ */
+ port = &TITAN_pachip0->a_port;
+ if (titan_query_agp(port))
+ hosenum = 2;
+ if (hosenum < 0 &&
+ titan_query_agp(port = &TITAN_pachip1->a_port))
+ hosenum = 3;
+
+ /*
+ * Find the hose the port is on.
+ */
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == hosenum)
+ break;
+
+ if (!hose || !hose->sg_pci)
+ return NULL;
+
+ /*
+ * Allocate the info structure.
+ */
+ agp = kmalloc(sizeof(*agp), GFP_KERNEL);
+
+ /*
+ * Fill it in.
+ */
+ agp->hose = hose;
+ agp->private = port;
+ agp->ops = &titan_agp_ops;
+
+ /*
+ * Aperture - not configured until ops.setup().
+ *
+ * FIXME - should we go ahead and allocate it here?
+ */
+ agp->aperture.bus_base = 0;
+ agp->aperture.size = 0;
+ agp->aperture.sysdata = NULL;
+
+ /*
+ * Capabilities.
+ */
+ agp->capability.lw = 0;
+ agp->capability.bits.rate = 3; /* 2x, 1x */
+ agp->capability.bits.sba = 1;
+ agp->capability.bits.rq = 7; /* 8 - 1 */
+
+ /*
+ * Mode.
+ */
+ pctl.pctl_q_whole = port->pctl.csr;
+ agp->mode.lw = 0;
+ agp->mode.bits.rate = 1 << pctl.pctl_r_bits.apctl_v_agp_rate;
+ agp->mode.bits.sba = pctl.pctl_r_bits.apctl_v_agp_sba_en;
+ agp->mode.bits.rq = 7; /* RQ Depth? */
+ agp->mode.bits.enable = pctl.pctl_r_bits.apctl_v_agp_en;
+
+ return agp;
+}
diff --git a/arch/alpha/kernel/core_tsunami.c b/arch/alpha/kernel/core_tsunami.c
new file mode 100644
index 000000000000..8aa305bd6a2c
--- /dev/null
+++ b/arch/alpha/kernel/core_tsunami.c
@@ -0,0 +1,459 @@
+/*
+ * linux/arch/alpha/kernel/core_tsunami.c
+ *
+ * Based on code written by David A. Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * Code common to all TSUNAMI core logic chips.
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_tsunami.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+/* Save Tsunami configuration data as the console had it set up. */
+
+struct
+{
+ unsigned long wsba[4];
+ unsigned long wsm[4];
+ unsigned long tba[4];
+} saved_config[2] __attribute__((common));
+
+/*
+ * NOTE: Herein lie back-to-back mb instructions. They are magic.
+ * One plausible explanation is that the I/O controller does not properly
+ * handle the system transaction. Another involves timing. Ho hum.
+ */
+
+/*
+ * BIOS32-style PCI interface:
+ */
+
+#define DEBUG_CONFIG 0
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+
+/*
+ * Given a bus, device, and function number, compute resulting
+ * configuration space address
+ * accordingly. It is therefore not safe to have concurrent
+ * invocations to configuration space access routines, but there
+ * really shouldn't be any need for this.
+ *
+ * Note that all config space accesses use Type 1 address format.
+ *
+ * Note also that type 1 is determined by non-zero bus number.
+ *
+ * Type 1:
+ *
+ * 3 3|3 3 2 2|2 2 2 2|2 2 2 2|1 1 1 1|1 1 1 1|1 1
+ * 3 2|1 0 9 8|7 6 5 4|3 2 1 0|9 8 7 6|5 4 3 2|1 0 9 8|7 6 5 4|3 2 1 0
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ * | | | | | | | | | | |B|B|B|B|B|B|B|B|D|D|D|D|D|F|F|F|R|R|R|R|R|R|0|1|
+ * +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
+ *
+ * 31:24 reserved
+ * 23:16 bus number (8 bits = 128 possible buses)
+ * 15:11 Device number (5 bits)
+ * 10:8 function number
+ * 7:2 register number
+ *
+ * Notes:
+ * The function number selects which function of a multi-function device
+ * (e.g., SCSI and Ethernet).
+ *
+ * The register selects a DWORD (32 bit) register offset. Hence it
+ * doesn't get shifted by 2 bits as we want to "drop" the bottom two
+ * bits.
+ */
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+tsunami_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+tsunami_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops tsunami_pci_ops =
+{
+ .read = tsunami_read_config,
+ .write = tsunami_write_config,
+};
+
+void
+tsunami_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ tsunami_pchip *pchip = hose->index ? TSUNAMI_pchip1 : TSUNAMI_pchip0;
+ volatile unsigned long *csr;
+ unsigned long value;
+
+ /* We can invalidate up to 8 tlb entries in a go. The flush
+ matches against <31:16> in the pci address. */
+ csr = &pchip->tlbia.csr;
+ if (((start ^ end) & 0xffff0000) == 0)
+ csr = &pchip->tlbiv.csr;
+
+ /* For TBIA, it doesn't matter what value we write. For TBI,
+ it's the shifted tag bits. */
+ value = (start & 0xffff0000) >> 12;
+
+ *csr = value;
+ mb();
+ *csr;
+}
+
+#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
+static long __init
+tsunami_probe_read(volatile unsigned long *vaddr)
+{
+ long dont_care, probe_result;
+ int cpu = smp_processor_id();
+ int s = swpipl(IPL_MCHECK - 1);
+
+ mcheck_taken(cpu) = 0;
+ mcheck_expected(cpu) = 1;
+ mb();
+ dont_care = *vaddr;
+ draina();
+ mcheck_expected(cpu) = 0;
+ probe_result = !mcheck_taken(cpu);
+ mcheck_taken(cpu) = 0;
+ setipl(s);
+
+ printk("dont_care == 0x%lx\n", dont_care);
+
+ return probe_result;
+}
+
+static long __init
+tsunami_probe_write(volatile unsigned long *vaddr)
+{
+ long true_contents, probe_result = 1;
+
+ TSUNAMI_cchip->misc.csr |= (1L << 28); /* clear NXM... */
+ true_contents = *vaddr;
+ *vaddr = 0;
+ draina();
+ if (TSUNAMI_cchip->misc.csr & (1L << 28)) {
+ int source = (TSUNAMI_cchip->misc.csr >> 29) & 7;
+ TSUNAMI_cchip->misc.csr |= (1L << 28); /* ...and unlock NXS. */
+ probe_result = 0;
+ printk("tsunami_probe_write: unit %d at 0x%016lx\n", source,
+ (unsigned long)vaddr);
+ }
+ if (probe_result)
+ *vaddr = true_contents;
+ return probe_result;
+}
+#else
+#define tsunami_probe_read(ADDR) 1
+#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
+
+#define FN __FUNCTION__
+
+static void __init
+tsunami_init_one_pchip(tsunami_pchip *pchip, int index)
+{
+ struct pci_controller *hose;
+
+ if (tsunami_probe_read(&pchip->pctl.csr) == 0)
+ return;
+
+ hose = alloc_pci_controller();
+ if (index == 0)
+ pci_isa_hose = hose;
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /* This is for userland consumption. For some reason, the 40-bit
+ PIO bias that we use in the kernel through KSEG didn't work for
+ the page table based user mappings. So make sure we get the
+ 43-bit PIO bias. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base
+ = (TSUNAMI_MEM(index) & 0xffffffffffL) | 0x80000000000L;
+ hose->dense_io_base
+ = (TSUNAMI_IO(index) & 0xffffffffffL) | 0x80000000000L;
+
+ hose->config_space_base = TSUNAMI_CONF(index);
+ hose->index = index;
+
+ hose->io_space->start = TSUNAMI_IO(index) - TSUNAMI_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + TSUNAMI_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[index];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = TSUNAMI_MEM(index) - TSUNAMI_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[index];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on hose %d\n", index);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on hose %d\n", index);
+
+ /*
+ * Save the existing PCI window translations. SRM will
+ * need them when we go to reboot.
+ */
+
+ saved_config[index].wsba[0] = pchip->wsba[0].csr;
+ saved_config[index].wsm[0] = pchip->wsm[0].csr;
+ saved_config[index].tba[0] = pchip->tba[0].csr;
+
+ saved_config[index].wsba[1] = pchip->wsba[1].csr;
+ saved_config[index].wsm[1] = pchip->wsm[1].csr;
+ saved_config[index].tba[1] = pchip->tba[1].csr;
+
+ saved_config[index].wsba[2] = pchip->wsba[2].csr;
+ saved_config[index].wsm[2] = pchip->wsm[2].csr;
+ saved_config[index].tba[2] = pchip->tba[2].csr;
+
+ saved_config[index].wsba[3] = pchip->wsba[3].csr;
+ saved_config[index].wsm[3] = pchip->wsm[3].csr;
+ saved_config[index].tba[3] = pchip->tba[3].csr;
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 is scatter-gather only
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is scatter-gather (up to) 1GB at 1GB
+ * Window 2 is direct access 2GB at 2GB
+ *
+ * NOTE: we need the align_entry settings for Acer devices on ES40,
+ * specifically floppy and IDE when memory is larger than 2GB.
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ /* Initially set for 4 PTEs, but will be overridden to 64K for ISA. */
+ hose->sg_isa->align_entry = 4;
+
+ hose->sg_pci = iommu_arena_new(hose, 0x40000000,
+ size_for_memory(0x40000000), 0);
+ hose->sg_pci->align_entry = 4; /* Tsunami caches 4 PTEs at a time */
+
+ __direct_map_base = 0x80000000;
+ __direct_map_size = 0x80000000;
+
+ pchip->wsba[0].csr = hose->sg_isa->dma_base | 3;
+ pchip->wsm[0].csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ pchip->tba[0].csr = virt_to_phys(hose->sg_isa->ptes);
+
+ pchip->wsba[1].csr = hose->sg_pci->dma_base | 3;
+ pchip->wsm[1].csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ pchip->tba[1].csr = virt_to_phys(hose->sg_pci->ptes);
+
+ pchip->wsba[2].csr = 0x80000000 | 1;
+ pchip->wsm[2].csr = (0x80000000 - 1) & 0xfff00000;
+ pchip->tba[2].csr = 0;
+
+ pchip->wsba[3].csr = 0;
+
+ /* Enable the Monster Window to make DAC pci64 possible. */
+ pchip->pctl.csr |= pctl_m_mwin;
+
+ tsunami_pci_tbi(hose, 0, -1);
+}
+
+void __init
+tsunami_init_arch(void)
+{
+#ifdef NXM_MACHINE_CHECKS_ON_TSUNAMI
+ unsigned long tmp;
+
+ /* Ho hum.. init_arch is called before init_IRQ, but we need to be
+ able to handle machine checks. So install the handler now. */
+ wrent(entInt, 0);
+
+ /* NXMs just don't matter to Tsunami--unless they make it
+ choke completely. */
+ tmp = (unsigned long)(TSUNAMI_cchip - 1);
+ printk("%s: probing bogus address: 0x%016lx\n", FN, bogus_addr);
+ printk("\tprobe %s\n",
+ tsunami_probe_write((unsigned long *)bogus_addr)
+ ? "succeeded" : "failed");
+#endif /* NXM_MACHINE_CHECKS_ON_TSUNAMI */
+
+#if 0
+ printk("%s: CChip registers:\n", FN);
+ printk("%s: CSR_CSC 0x%lx\n", FN, TSUNAMI_cchip->csc.csr);
+ printk("%s: CSR_MTR 0x%lx\n", FN, TSUNAMI_cchip.mtr.csr);
+ printk("%s: CSR_MISC 0x%lx\n", FN, TSUNAMI_cchip->misc.csr);
+ printk("%s: CSR_DIM0 0x%lx\n", FN, TSUNAMI_cchip->dim0.csr);
+ printk("%s: CSR_DIM1 0x%lx\n", FN, TSUNAMI_cchip->dim1.csr);
+ printk("%s: CSR_DIR0 0x%lx\n", FN, TSUNAMI_cchip->dir0.csr);
+ printk("%s: CSR_DIR1 0x%lx\n", FN, TSUNAMI_cchip->dir1.csr);
+ printk("%s: CSR_DRIR 0x%lx\n", FN, TSUNAMI_cchip->drir.csr);
+
+ printk("%s: DChip registers:\n");
+ printk("%s: CSR_DSC 0x%lx\n", FN, TSUNAMI_dchip->dsc.csr);
+ printk("%s: CSR_STR 0x%lx\n", FN, TSUNAMI_dchip->str.csr);
+ printk("%s: CSR_DREV 0x%lx\n", FN, TSUNAMI_dchip->drev.csr);
+#endif
+ /* With multiple PCI busses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+ /* Find how many hoses we have, and initialize them. TSUNAMI
+ and TYPHOON can have 2, but might only have 1 (DS10). */
+
+ tsunami_init_one_pchip(TSUNAMI_pchip0, 0);
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_init_one_pchip(TSUNAMI_pchip1, 1);
+}
+
+static void
+tsunami_kill_one_pchip(tsunami_pchip *pchip, int index)
+{
+ pchip->wsba[0].csr = saved_config[index].wsba[0];
+ pchip->wsm[0].csr = saved_config[index].wsm[0];
+ pchip->tba[0].csr = saved_config[index].tba[0];
+
+ pchip->wsba[1].csr = saved_config[index].wsba[1];
+ pchip->wsm[1].csr = saved_config[index].wsm[1];
+ pchip->tba[1].csr = saved_config[index].tba[1];
+
+ pchip->wsba[2].csr = saved_config[index].wsba[2];
+ pchip->wsm[2].csr = saved_config[index].wsm[2];
+ pchip->tba[2].csr = saved_config[index].tba[2];
+
+ pchip->wsba[3].csr = saved_config[index].wsba[3];
+ pchip->wsm[3].csr = saved_config[index].wsm[3];
+ pchip->tba[3].csr = saved_config[index].tba[3];
+}
+
+void
+tsunami_kill_arch(int mode)
+{
+ tsunami_kill_one_pchip(TSUNAMI_pchip0, 0);
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_kill_one_pchip(TSUNAMI_pchip1, 1);
+}
+
+static inline void
+tsunami_pci_clr_err_1(tsunami_pchip *pchip)
+{
+ pchip->perror.csr;
+ pchip->perror.csr = 0x040;
+ mb();
+ pchip->perror.csr;
+}
+
+static inline void
+tsunami_pci_clr_err(void)
+{
+ tsunami_pci_clr_err_1(TSUNAMI_pchip0);
+
+ /* TSUNAMI and TYPHOON can have 2, but might only have 1 (DS10) */
+ if (TSUNAMI_cchip->csc.csr & 1L<<14)
+ tsunami_pci_clr_err_1(TSUNAMI_pchip1);
+}
+
+void
+tsunami_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ /* Clear error before any reporting. */
+ mb();
+ mb(); /* magic */
+ draina();
+ tsunami_pci_clr_err();
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "TSUNAMI",
+ mcheck_expected(smp_processor_id()));
+}
diff --git a/arch/alpha/kernel/core_wildfire.c b/arch/alpha/kernel/core_wildfire.c
new file mode 100644
index 000000000000..2b767a1bad96
--- /dev/null
+++ b/arch/alpha/kernel/core_wildfire.c
@@ -0,0 +1,658 @@
+/*
+ * linux/arch/alpha/kernel/core_wildfire.c
+ *
+ * Wildfire support.
+ *
+ * Copyright (C) 2000 Andrea Arcangeli <andrea@suse.de> SuSE
+ */
+
+#define __EXTERN_INLINE inline
+#include <asm/io.h>
+#include <asm/core_wildfire.h>
+#undef __EXTERN_INLINE
+
+#include <linux/types.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+#include <asm/ptrace.h>
+#include <asm/smp.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+#define DEBUG_CONFIG 0
+#define DEBUG_DUMP_REGS 0
+#define DEBUG_DUMP_CONFIG 1
+
+#if DEBUG_CONFIG
+# define DBG_CFG(args) printk args
+#else
+# define DBG_CFG(args)
+#endif
+
+#if DEBUG_DUMP_REGS
+static void wildfire_dump_pci_regs(int qbbno, int hoseno);
+static void wildfire_dump_pca_regs(int qbbno, int pcano);
+static void wildfire_dump_qsa_regs(int qbbno);
+static void wildfire_dump_qsd_regs(int qbbno);
+static void wildfire_dump_iop_regs(int qbbno);
+static void wildfire_dump_gp_regs(int qbbno);
+#endif
+#if DEBUG_DUMP_CONFIG
+static void wildfire_dump_hardware_config(void);
+#endif
+
+unsigned char wildfire_hard_qbb_map[WILDFIRE_MAX_QBB];
+unsigned char wildfire_soft_qbb_map[WILDFIRE_MAX_QBB];
+#define QBB_MAP_EMPTY 0xff
+
+unsigned long wildfire_hard_qbb_mask;
+unsigned long wildfire_soft_qbb_mask;
+unsigned long wildfire_gp_mask;
+unsigned long wildfire_hs_mask;
+unsigned long wildfire_iop_mask;
+unsigned long wildfire_ior_mask;
+unsigned long wildfire_pca_mask;
+unsigned long wildfire_cpu_mask;
+unsigned long wildfire_mem_mask;
+
+void __init
+wildfire_init_hose(int qbbno, int hoseno)
+{
+ struct pci_controller *hose;
+ wildfire_pci *pci;
+
+ hose = alloc_pci_controller();
+ hose->io_space = alloc_resource();
+ hose->mem_space = alloc_resource();
+
+ /* This is for userland consumption. */
+ hose->sparse_mem_base = 0;
+ hose->sparse_io_base = 0;
+ hose->dense_mem_base = WILDFIRE_MEM(qbbno, hoseno);
+ hose->dense_io_base = WILDFIRE_IO(qbbno, hoseno);
+
+ hose->config_space_base = WILDFIRE_CONF(qbbno, hoseno);
+ hose->index = (qbbno << 3) + hoseno;
+
+ hose->io_space->start = WILDFIRE_IO(qbbno, hoseno) - WILDFIRE_IO_BIAS;
+ hose->io_space->end = hose->io_space->start + WILDFIRE_IO_SPACE - 1;
+ hose->io_space->name = pci_io_names[hoseno];
+ hose->io_space->flags = IORESOURCE_IO;
+
+ hose->mem_space->start = WILDFIRE_MEM(qbbno, hoseno)-WILDFIRE_MEM_BIAS;
+ hose->mem_space->end = hose->mem_space->start + 0xffffffff;
+ hose->mem_space->name = pci_mem_names[hoseno];
+ hose->mem_space->flags = IORESOURCE_MEM;
+
+ if (request_resource(&ioport_resource, hose->io_space) < 0)
+ printk(KERN_ERR "Failed to request IO on qbb %d hose %d\n",
+ qbbno, hoseno);
+ if (request_resource(&iomem_resource, hose->mem_space) < 0)
+ printk(KERN_ERR "Failed to request MEM on qbb %d hose %d\n",
+ qbbno, hoseno);
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_pci_regs(qbbno, hoseno);
+#endif
+
+ /*
+ * Set up the PCI to main memory translation windows.
+ *
+ * Note: Window 3 is scatter-gather only
+ *
+ * Window 0 is scatter-gather 8MB at 8MB (for isa)
+ * Window 1 is direct access 1GB at 1GB
+ * Window 2 is direct access 1GB at 2GB
+ * Window 3 is scatter-gather 128MB at 3GB
+ * ??? We ought to scale window 3 memory.
+ *
+ */
+ hose->sg_isa = iommu_arena_new(hose, 0x00800000, 0x00800000, 0);
+ hose->sg_pci = iommu_arena_new(hose, 0xc0000000, 0x08000000, 0);
+
+ pci = WILDFIRE_pci(qbbno, hoseno);
+
+ pci->pci_window[0].wbase.csr = hose->sg_isa->dma_base | 3;
+ pci->pci_window[0].wmask.csr = (hose->sg_isa->size - 1) & 0xfff00000;
+ pci->pci_window[0].tbase.csr = virt_to_phys(hose->sg_isa->ptes);
+
+ pci->pci_window[1].wbase.csr = 0x40000000 | 1;
+ pci->pci_window[1].wmask.csr = (0x40000000 -1) & 0xfff00000;
+ pci->pci_window[1].tbase.csr = 0;
+
+ pci->pci_window[2].wbase.csr = 0x80000000 | 1;
+ pci->pci_window[2].wmask.csr = (0x40000000 -1) & 0xfff00000;
+ pci->pci_window[2].tbase.csr = 0x40000000;
+
+ pci->pci_window[3].wbase.csr = hose->sg_pci->dma_base | 3;
+ pci->pci_window[3].wmask.csr = (hose->sg_pci->size - 1) & 0xfff00000;
+ pci->pci_window[3].tbase.csr = virt_to_phys(hose->sg_pci->ptes);
+
+ wildfire_pci_tbi(hose, 0, 0); /* Flush TLB at the end. */
+}
+
+void __init
+wildfire_init_pca(int qbbno, int pcano)
+{
+
+ /* Test for PCA existence first. */
+ if (!WILDFIRE_PCA_EXISTS(qbbno, pcano))
+ return;
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_pca_regs(qbbno, pcano);
+#endif
+
+ /* Do both hoses of the PCA. */
+ wildfire_init_hose(qbbno, (pcano << 1) + 0);
+ wildfire_init_hose(qbbno, (pcano << 1) + 1);
+}
+
+void __init
+wildfire_init_qbb(int qbbno)
+{
+ int pcano;
+
+ /* Test for QBB existence first. */
+ if (!WILDFIRE_QBB_EXISTS(qbbno))
+ return;
+
+#if DEBUG_DUMP_REGS
+ wildfire_dump_qsa_regs(qbbno);
+ wildfire_dump_qsd_regs(qbbno);
+ wildfire_dump_iop_regs(qbbno);
+ wildfire_dump_gp_regs(qbbno);
+#endif
+
+ /* Init all PCAs here. */
+ for (pcano = 0; pcano < WILDFIRE_PCA_PER_QBB; pcano++) {
+ wildfire_init_pca(qbbno, pcano);
+ }
+}
+
+void __init
+wildfire_hardware_probe(void)
+{
+ unsigned long temp;
+ unsigned int hard_qbb, soft_qbb;
+ wildfire_fast_qsd *fast = WILDFIRE_fast_qsd();
+ wildfire_qsd *qsd;
+ wildfire_qsa *qsa;
+ wildfire_iop *iop;
+ wildfire_gp *gp;
+ wildfire_ne *ne;
+ wildfire_fe *fe;
+ int i;
+
+ temp = fast->qsd_whami.csr;
+#if 0
+ printk(KERN_ERR "fast QSD_WHAMI at base %p is 0x%lx\n", fast, temp);
+#endif
+
+ hard_qbb = (temp >> 8) & 7;
+ soft_qbb = (temp >> 4) & 7;
+
+ /* Init the HW configuration variables. */
+ wildfire_hard_qbb_mask = (1 << hard_qbb);
+ wildfire_soft_qbb_mask = (1 << soft_qbb);
+
+ wildfire_gp_mask = 0;
+ wildfire_hs_mask = 0;
+ wildfire_iop_mask = 0;
+ wildfire_ior_mask = 0;
+ wildfire_pca_mask = 0;
+
+ wildfire_cpu_mask = 0;
+ wildfire_mem_mask = 0;
+
+ memset(wildfire_hard_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
+ memset(wildfire_soft_qbb_map, QBB_MAP_EMPTY, WILDFIRE_MAX_QBB);
+
+ /* First, determine which QBBs are present. */
+ qsa = WILDFIRE_qsa(soft_qbb);
+
+ temp = qsa->qsa_qbb_id.csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_ID at base %p is 0x%lx\n", qsa, temp);
+#endif
+
+ if (temp & 0x40) /* Is there an HS? */
+ wildfire_hs_mask = 1;
+
+ if (temp & 0x20) { /* Is there a GP? */
+ gp = WILDFIRE_gp(soft_qbb);
+ temp = 0;
+ for (i = 0; i < 4; i++) {
+ temp |= gp->gpa_qbb_map[i].csr << (i * 8);
+#if 0
+ printk(KERN_ERR "GPA_QBB_MAP[%d] at base %p is 0x%lx\n",
+ i, gp, temp);
+#endif
+ }
+
+ for (hard_qbb = 0; hard_qbb < WILDFIRE_MAX_QBB; hard_qbb++) {
+ if (temp & 8) { /* Is there a QBB? */
+ soft_qbb = temp & 7;
+ wildfire_hard_qbb_mask |= (1 << hard_qbb);
+ wildfire_soft_qbb_mask |= (1 << soft_qbb);
+ }
+ temp >>= 4;
+ }
+ wildfire_gp_mask = wildfire_soft_qbb_mask;
+ }
+
+ /* Next determine each QBBs resources. */
+ for (soft_qbb = 0; soft_qbb < WILDFIRE_MAX_QBB; soft_qbb++) {
+ if (WILDFIRE_QBB_EXISTS(soft_qbb)) {
+ qsd = WILDFIRE_qsd(soft_qbb);
+ temp = qsd->qsd_whami.csr;
+#if 0
+ printk(KERN_ERR "QSD_WHAMI at base %p is 0x%lx\n", qsd, temp);
+#endif
+ hard_qbb = (temp >> 8) & 7;
+ wildfire_hard_qbb_map[hard_qbb] = soft_qbb;
+ wildfire_soft_qbb_map[soft_qbb] = hard_qbb;
+
+ qsa = WILDFIRE_qsa(soft_qbb);
+ temp = qsa->qsa_qbb_pop[0].csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_POP_0 at base %p is 0x%lx\n", qsa, temp);
+#endif
+ wildfire_cpu_mask |= ((temp >> 0) & 0xf) << (soft_qbb << 2);
+ wildfire_mem_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
+
+ temp = qsa->qsa_qbb_pop[1].csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_POP_1 at base %p is 0x%lx\n", qsa, temp);
+#endif
+ wildfire_iop_mask |= (1 << soft_qbb);
+ wildfire_ior_mask |= ((temp >> 4) & 0xf) << (soft_qbb << 2);
+
+ temp = qsa->qsa_qbb_id.csr;
+#if 0
+ printk(KERN_ERR "QSA_QBB_ID at %p is 0x%lx\n", qsa, temp);
+#endif
+ if (temp & 0x20)
+ wildfire_gp_mask |= (1 << soft_qbb);
+
+ /* Probe for PCA existence here. */
+ for (i = 0; i < WILDFIRE_PCA_PER_QBB; i++) {
+ iop = WILDFIRE_iop(soft_qbb);
+ ne = WILDFIRE_ne(soft_qbb, i);
+ fe = WILDFIRE_fe(soft_qbb, i);
+
+ if ((iop->iop_hose[i].init.csr & 1) == 1 &&
+ ((ne->ne_what_am_i.csr & 0xf00000300UL) == 0x100000300UL) &&
+ ((fe->fe_what_am_i.csr & 0xf00000300UL) == 0x100000200UL))
+ {
+ wildfire_pca_mask |= 1 << ((soft_qbb << 2) + i);
+ }
+ }
+
+ }
+ }
+#if DEBUG_DUMP_CONFIG
+ wildfire_dump_hardware_config();
+#endif
+}
+
+void __init
+wildfire_init_arch(void)
+{
+ int qbbno;
+
+ /* With multiple PCI buses, we play with I/O as physical addrs. */
+ ioport_resource.end = ~0UL;
+
+
+ /* Probe the hardware for info about configuration. */
+ wildfire_hardware_probe();
+
+ /* Now init all the found QBBs. */
+ for (qbbno = 0; qbbno < WILDFIRE_MAX_QBB; qbbno++) {
+ wildfire_init_qbb(qbbno);
+ }
+
+ /* Normal direct PCI DMA mapping. */
+ __direct_map_base = 0x40000000UL;
+ __direct_map_size = 0x80000000UL;
+}
+
+void
+wildfire_machine_check(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs * regs)
+{
+ mb();
+ mb(); /* magic */
+ draina();
+ /* FIXME: clear pci errors */
+ wrmces(0x7);
+ mb();
+
+ process_mcheck_info(vector, la_ptr, regs, "WILDFIRE",
+ mcheck_expected(smp_processor_id()));
+}
+
+void
+wildfire_kill_arch(int mode)
+{
+}
+
+void
+wildfire_pci_tbi(struct pci_controller *hose, dma_addr_t start, dma_addr_t end)
+{
+ int qbbno = hose->index >> 3;
+ int hoseno = hose->index & 7;
+ wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
+
+ mb();
+ pci->pci_flush_tlb.csr; /* reading does the trick */
+}
+
+static int
+mk_conf_addr(struct pci_bus *pbus, unsigned int device_fn, int where,
+ unsigned long *pci_addr, unsigned char *type1)
+{
+ struct pci_controller *hose = pbus->sysdata;
+ unsigned long addr;
+ u8 bus = pbus->number;
+
+ DBG_CFG(("mk_conf_addr(bus=%d ,device_fn=0x%x, where=0x%x, "
+ "pci_addr=0x%p, type1=0x%p)\n",
+ bus, device_fn, where, pci_addr, type1));
+
+ if (!pbus->parent) /* No parent means peer PCI bus. */
+ bus = 0;
+ *type1 = (bus != 0);
+
+ addr = (bus << 16) | (device_fn << 8) | where;
+ addr |= hose->config_space_base;
+
+ *pci_addr = addr;
+ DBG_CFG(("mk_conf_addr: returning pci_addr 0x%lx\n", addr));
+ return 0;
+}
+
+static int
+wildfire_read_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 *value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ *value = __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ *value = __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *value = *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+static int
+wildfire_write_config(struct pci_bus *bus, unsigned int devfn, int where,
+ int size, u32 value)
+{
+ unsigned long addr;
+ unsigned char type1;
+
+ if (mk_conf_addr(bus, devfn, where, &addr, &type1))
+ return PCIBIOS_DEVICE_NOT_FOUND;
+
+ switch (size) {
+ case 1:
+ __kernel_stb(value, *(vucp)addr);
+ mb();
+ __kernel_ldbu(*(vucp)addr);
+ break;
+ case 2:
+ __kernel_stw(value, *(vusp)addr);
+ mb();
+ __kernel_ldwu(*(vusp)addr);
+ break;
+ case 4:
+ *(vuip)addr = value;
+ mb();
+ *(vuip)addr;
+ break;
+ }
+
+ return PCIBIOS_SUCCESSFUL;
+}
+
+struct pci_ops wildfire_pci_ops =
+{
+ .read = wildfire_read_config,
+ .write = wildfire_write_config,
+};
+
+
+/*
+ * NUMA Support
+ */
+int wildfire_pa_to_nid(unsigned long pa)
+{
+ return pa >> 36;
+}
+
+int wildfire_cpuid_to_nid(int cpuid)
+{
+ /* assume 4 CPUs per node */
+ return cpuid >> 2;
+}
+
+unsigned long wildfire_node_mem_start(int nid)
+{
+ /* 64GB per node */
+ return (unsigned long)nid * (64UL * 1024 * 1024 * 1024);
+}
+
+unsigned long wildfire_node_mem_size(int nid)
+{
+ /* 64GB per node */
+ return 64UL * 1024 * 1024 * 1024;
+}
+
+#if DEBUG_DUMP_REGS
+
+static void __init
+wildfire_dump_pci_regs(int qbbno, int hoseno)
+{
+ wildfire_pci *pci = WILDFIRE_pci(qbbno, hoseno);
+ int i;
+
+ printk(KERN_ERR "PCI registers for QBB %d hose %d (%p)\n",
+ qbbno, hoseno, pci);
+
+ printk(KERN_ERR " PCI_IO_ADDR_EXT: 0x%16lx\n",
+ pci->pci_io_addr_ext.csr);
+ printk(KERN_ERR " PCI_CTRL: 0x%16lx\n", pci->pci_ctrl.csr);
+ printk(KERN_ERR " PCI_ERR_SUM: 0x%16lx\n", pci->pci_err_sum.csr);
+ printk(KERN_ERR " PCI_ERR_ADDR: 0x%16lx\n", pci->pci_err_addr.csr);
+ printk(KERN_ERR " PCI_STALL_CNT: 0x%16lx\n", pci->pci_stall_cnt.csr);
+ printk(KERN_ERR " PCI_PEND_INT: 0x%16lx\n", pci->pci_pend_int.csr);
+ printk(KERN_ERR " PCI_SENT_INT: 0x%16lx\n", pci->pci_sent_int.csr);
+
+ printk(KERN_ERR " DMA window registers for QBB %d hose %d (%p)\n",
+ qbbno, hoseno, pci);
+ for (i = 0; i < 4; i++) {
+ printk(KERN_ERR " window %d: 0x%16lx 0x%16lx 0x%16lx\n", i,
+ pci->pci_window[i].wbase.csr,
+ pci->pci_window[i].wmask.csr,
+ pci->pci_window[i].tbase.csr);
+ }
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_pca_regs(int qbbno, int pcano)
+{
+ wildfire_pca *pca = WILDFIRE_pca(qbbno, pcano);
+ int i;
+
+ printk(KERN_ERR "PCA registers for QBB %d PCA %d (%p)\n",
+ qbbno, pcano, pca);
+
+ printk(KERN_ERR " PCA_WHAT_AM_I: 0x%16lx\n", pca->pca_what_am_i.csr);
+ printk(KERN_ERR " PCA_ERR_SUM: 0x%16lx\n", pca->pca_err_sum.csr);
+ printk(KERN_ERR " PCA_PEND_INT: 0x%16lx\n", pca->pca_pend_int.csr);
+ printk(KERN_ERR " PCA_SENT_INT: 0x%16lx\n", pca->pca_sent_int.csr);
+ printk(KERN_ERR " PCA_STDIO_EL: 0x%16lx\n",
+ pca->pca_stdio_edge_level.csr);
+
+ printk(KERN_ERR " PCA target registers for QBB %d PCA %d (%p)\n",
+ qbbno, pcano, pca);
+ for (i = 0; i < 4; i++) {
+ printk(KERN_ERR " target %d: 0x%16lx 0x%16lx\n", i,
+ pca->pca_int[i].target.csr,
+ pca->pca_int[i].enable.csr);
+ }
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_qsa_regs(int qbbno)
+{
+ wildfire_qsa *qsa = WILDFIRE_qsa(qbbno);
+ int i;
+
+ printk(KERN_ERR "QSA registers for QBB %d (%p)\n", qbbno, qsa);
+
+ printk(KERN_ERR " QSA_QBB_ID: 0x%16lx\n", qsa->qsa_qbb_id.csr);
+ printk(KERN_ERR " QSA_PORT_ENA: 0x%16lx\n", qsa->qsa_port_ena.csr);
+ printk(KERN_ERR " QSA_REF_INT: 0x%16lx\n", qsa->qsa_ref_int.csr);
+
+ for (i = 0; i < 5; i++)
+ printk(KERN_ERR " QSA_CONFIG_%d: 0x%16lx\n",
+ i, qsa->qsa_config[i].csr);
+
+ for (i = 0; i < 2; i++)
+ printk(KERN_ERR " QSA_QBB_POP_%d: 0x%16lx\n",
+ i, qsa->qsa_qbb_pop[0].csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_qsd_regs(int qbbno)
+{
+ wildfire_qsd *qsd = WILDFIRE_qsd(qbbno);
+
+ printk(KERN_ERR "QSD registers for QBB %d (%p)\n", qbbno, qsd);
+
+ printk(KERN_ERR " QSD_WHAMI: 0x%16lx\n", qsd->qsd_whami.csr);
+ printk(KERN_ERR " QSD_REV: 0x%16lx\n", qsd->qsd_rev.csr);
+ printk(KERN_ERR " QSD_PORT_PRESENT: 0x%16lx\n",
+ qsd->qsd_port_present.csr);
+ printk(KERN_ERR " QSD_PORT_ACTUVE: 0x%16lx\n",
+ qsd->qsd_port_active.csr);
+ printk(KERN_ERR " QSD_FAULT_ENA: 0x%16lx\n",
+ qsd->qsd_fault_ena.csr);
+ printk(KERN_ERR " QSD_CPU_INT_ENA: 0x%16lx\n",
+ qsd->qsd_cpu_int_ena.csr);
+ printk(KERN_ERR " QSD_MEM_CONFIG: 0x%16lx\n",
+ qsd->qsd_mem_config.csr);
+ printk(KERN_ERR " QSD_ERR_SUM: 0x%16lx\n",
+ qsd->qsd_err_sum.csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_iop_regs(int qbbno)
+{
+ wildfire_iop *iop = WILDFIRE_iop(qbbno);
+ int i;
+
+ printk(KERN_ERR "IOP registers for QBB %d (%p)\n", qbbno, iop);
+
+ printk(KERN_ERR " IOA_CONFIG: 0x%16lx\n", iop->ioa_config.csr);
+ printk(KERN_ERR " IOD_CONFIG: 0x%16lx\n", iop->iod_config.csr);
+ printk(KERN_ERR " IOP_SWITCH_CREDITS: 0x%16lx\n",
+ iop->iop_switch_credits.csr);
+ printk(KERN_ERR " IOP_HOSE_CREDITS: 0x%16lx\n",
+ iop->iop_hose_credits.csr);
+
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " IOP_HOSE_%d_INIT: 0x%16lx\n",
+ i, iop->iop_hose[i].init.csr);
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " IOP_DEV_INT_TARGET_%d: 0x%16lx\n",
+ i, iop->iop_dev_int[i].target.csr);
+
+ printk(KERN_ERR "\n");
+}
+
+static void __init
+wildfire_dump_gp_regs(int qbbno)
+{
+ wildfire_gp *gp = WILDFIRE_gp(qbbno);
+ int i;
+
+ printk(KERN_ERR "GP registers for QBB %d (%p)\n", qbbno, gp);
+ for (i = 0; i < 4; i++)
+ printk(KERN_ERR " GPA_QBB_MAP_%d: 0x%16lx\n",
+ i, gp->gpa_qbb_map[i].csr);
+
+ printk(KERN_ERR " GPA_MEM_POP_MAP: 0x%16lx\n",
+ gp->gpa_mem_pop_map.csr);
+ printk(KERN_ERR " GPA_SCRATCH: 0x%16lx\n", gp->gpa_scratch.csr);
+ printk(KERN_ERR " GPA_DIAG: 0x%16lx\n", gp->gpa_diag.csr);
+ printk(KERN_ERR " GPA_CONFIG_0: 0x%16lx\n", gp->gpa_config_0.csr);
+ printk(KERN_ERR " GPA_INIT_ID: 0x%16lx\n", gp->gpa_init_id.csr);
+ printk(KERN_ERR " GPA_CONFIG_2: 0x%16lx\n", gp->gpa_config_2.csr);
+
+ printk(KERN_ERR "\n");
+}
+#endif /* DUMP_REGS */
+
+#if DEBUG_DUMP_CONFIG
+static void __init
+wildfire_dump_hardware_config(void)
+{
+ int i;
+
+ printk(KERN_ERR "Probed Hardware Configuration\n");
+
+ printk(KERN_ERR " hard_qbb_mask: 0x%16lx\n", wildfire_hard_qbb_mask);
+ printk(KERN_ERR " soft_qbb_mask: 0x%16lx\n", wildfire_soft_qbb_mask);
+
+ printk(KERN_ERR " gp_mask: 0x%16lx\n", wildfire_gp_mask);
+ printk(KERN_ERR " hs_mask: 0x%16lx\n", wildfire_hs_mask);
+ printk(KERN_ERR " iop_mask: 0x%16lx\n", wildfire_iop_mask);
+ printk(KERN_ERR " ior_mask: 0x%16lx\n", wildfire_ior_mask);
+ printk(KERN_ERR " pca_mask: 0x%16lx\n", wildfire_pca_mask);
+
+ printk(KERN_ERR " cpu_mask: 0x%16lx\n", wildfire_cpu_mask);
+ printk(KERN_ERR " mem_mask: 0x%16lx\n", wildfire_mem_mask);
+
+ printk(" hard_qbb_map: ");
+ for (i = 0; i < WILDFIRE_MAX_QBB; i++)
+ if (wildfire_hard_qbb_map[i] == QBB_MAP_EMPTY)
+ printk("--- ");
+ else
+ printk("%3d ", wildfire_hard_qbb_map[i]);
+ printk("\n");
+
+ printk(" soft_qbb_map: ");
+ for (i = 0; i < WILDFIRE_MAX_QBB; i++)
+ if (wildfire_soft_qbb_map[i] == QBB_MAP_EMPTY)
+ printk("--- ");
+ else
+ printk("%3d ", wildfire_soft_qbb_map[i]);
+ printk("\n");
+}
+#endif /* DUMP_CONFIG */
diff --git a/arch/alpha/kernel/entry.S b/arch/alpha/kernel/entry.S
new file mode 100644
index 000000000000..f0927ee53f29
--- /dev/null
+++ b/arch/alpha/kernel/entry.S
@@ -0,0 +1,957 @@
+/*
+ * arch/alpha/kernel/entry.S
+ *
+ * Kernel entry-points.
+ */
+
+#include <linux/config.h>
+#include <asm/asm_offsets.h>
+#include <asm/thread_info.h>
+#include <asm/pal.h>
+#include <asm/errno.h>
+#include <asm/unistd.h>
+
+ .text
+ .set noat
+
+/* Stack offsets. */
+#define SP_OFF 184
+#define SWITCH_STACK_SIZE 320
+
+/*
+ * This defines the normal kernel pt-regs layout.
+ *
+ * regs 9-15 preserved by C code
+ * regs 16-18 saved by PAL-code
+ * regs 29-30 saved and set up by PAL-code
+ * JRP - Save regs 16-18 in a special area of the stack, so that
+ * the palcode-provided values are available to the signal handler.
+ */
+
+#define SAVE_ALL \
+ subq $sp, SP_OFF, $sp; \
+ stq $0, 0($sp); \
+ stq $1, 8($sp); \
+ stq $2, 16($sp); \
+ stq $3, 24($sp); \
+ stq $4, 32($sp); \
+ stq $28, 144($sp); \
+ lda $2, alpha_mv; \
+ stq $5, 40($sp); \
+ stq $6, 48($sp); \
+ stq $7, 56($sp); \
+ stq $8, 64($sp); \
+ stq $19, 72($sp); \
+ stq $20, 80($sp); \
+ stq $21, 88($sp); \
+ ldq $2, HAE_CACHE($2); \
+ stq $22, 96($sp); \
+ stq $23, 104($sp); \
+ stq $24, 112($sp); \
+ stq $25, 120($sp); \
+ stq $26, 128($sp); \
+ stq $27, 136($sp); \
+ stq $2, 152($sp); \
+ stq $16, 160($sp); \
+ stq $17, 168($sp); \
+ stq $18, 176($sp)
+
+#define RESTORE_ALL \
+ lda $19, alpha_mv; \
+ ldq $0, 0($sp); \
+ ldq $1, 8($sp); \
+ ldq $2, 16($sp); \
+ ldq $3, 24($sp); \
+ ldq $21, 152($sp); \
+ ldq $20, HAE_CACHE($19); \
+ ldq $4, 32($sp); \
+ ldq $5, 40($sp); \
+ ldq $6, 48($sp); \
+ ldq $7, 56($sp); \
+ subq $20, $21, $20; \
+ ldq $8, 64($sp); \
+ beq $20, 99f; \
+ ldq $20, HAE_REG($19); \
+ stq $21, HAE_CACHE($19); \
+ stq $21, 0($20); \
+ ldq $0, 0($sp); \
+ ldq $1, 8($sp); \
+99:; \
+ ldq $19, 72($sp); \
+ ldq $20, 80($sp); \
+ ldq $21, 88($sp); \
+ ldq $22, 96($sp); \
+ ldq $23, 104($sp); \
+ ldq $24, 112($sp); \
+ ldq $25, 120($sp); \
+ ldq $26, 128($sp); \
+ ldq $27, 136($sp); \
+ ldq $28, 144($sp); \
+ addq $sp, SP_OFF, $sp
+
+/*
+ * Non-syscall kernel entry points.
+ */
+
+ .align 4
+ .globl entInt
+ .ent entInt
+entInt:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $19
+ jsr $31, do_entInt
+.end entInt
+
+ .align 4
+ .globl entArith
+ .ent entArith
+entArith:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $18
+ jsr $31, do_entArith
+.end entArith
+
+ .align 4
+ .globl entMM
+ .ent entMM
+entMM:
+ SAVE_ALL
+/* save $9 - $15 so the inline exception code can manipulate them. */
+ subq $sp, 56, $sp
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ addq $sp, 56, $19
+/* handle the fault */
+ lda $8, 0x3fff
+ bic $sp, $8, $8
+ jsr $26, do_page_fault
+/* reload the registers after the exception code played. */
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ addq $sp, 56, $sp
+/* finish up the syscall as normal. */
+ br ret_from_sys_call
+.end entMM
+
+ .align 4
+ .globl entIF
+ .ent entIF
+entIF:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $17
+ jsr $31, do_entIF
+.end entIF
+
+ .align 4
+ .globl entUna
+ .ent entUna
+entUna:
+ lda $sp, -256($sp)
+ stq $0, 0($sp)
+ ldq $0, 256($sp) /* get PS */
+ stq $1, 8($sp)
+ stq $2, 16($sp)
+ stq $3, 24($sp)
+ and $0, 8, $0 /* user mode? */
+ stq $4, 32($sp)
+ bne $0, entUnaUser /* yup -> do user-level unaligned fault */
+ stq $5, 40($sp)
+ stq $6, 48($sp)
+ stq $7, 56($sp)
+ stq $8, 64($sp)
+ stq $9, 72($sp)
+ stq $10, 80($sp)
+ stq $11, 88($sp)
+ stq $12, 96($sp)
+ stq $13, 104($sp)
+ stq $14, 112($sp)
+ stq $15, 120($sp)
+ /* 16-18 PAL-saved */
+ stq $19, 152($sp)
+ stq $20, 160($sp)
+ stq $21, 168($sp)
+ stq $22, 176($sp)
+ stq $23, 184($sp)
+ stq $24, 192($sp)
+ stq $25, 200($sp)
+ stq $26, 208($sp)
+ stq $27, 216($sp)
+ stq $28, 224($sp)
+ stq $gp, 232($sp)
+ lda $8, 0x3fff
+ stq $31, 248($sp)
+ bic $sp, $8, $8
+ jsr $26, do_entUna
+ ldq $0, 0($sp)
+ ldq $1, 8($sp)
+ ldq $2, 16($sp)
+ ldq $3, 24($sp)
+ ldq $4, 32($sp)
+ ldq $5, 40($sp)
+ ldq $6, 48($sp)
+ ldq $7, 56($sp)
+ ldq $8, 64($sp)
+ ldq $9, 72($sp)
+ ldq $10, 80($sp)
+ ldq $11, 88($sp)
+ ldq $12, 96($sp)
+ ldq $13, 104($sp)
+ ldq $14, 112($sp)
+ ldq $15, 120($sp)
+ /* 16-18 PAL-saved */
+ ldq $19, 152($sp)
+ ldq $20, 160($sp)
+ ldq $21, 168($sp)
+ ldq $22, 176($sp)
+ ldq $23, 184($sp)
+ ldq $24, 192($sp)
+ ldq $25, 200($sp)
+ ldq $26, 208($sp)
+ ldq $27, 216($sp)
+ ldq $28, 224($sp)
+ ldq $gp, 232($sp)
+ lda $sp, 256($sp)
+ call_pal PAL_rti
+.end entUna
+
+ .align 4
+ .ent entUnaUser
+entUnaUser:
+ ldq $0, 0($sp) /* restore original $0 */
+ lda $sp, 256($sp) /* pop entUna's stack frame */
+ SAVE_ALL /* setup normal kernel stack */
+ lda $sp, -56($sp)
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ lda $8, 0x3fff
+ addq $sp, 56, $19
+ bic $sp, $8, $8
+ jsr $26, do_entUnaUser
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ lda $sp, 56($sp)
+ br ret_from_sys_call
+.end entUnaUser
+
+ .align 4
+ .globl entDbg
+ .ent entDbg
+entDbg:
+ SAVE_ALL
+ lda $8, 0x3fff
+ lda $26, ret_from_sys_call
+ bic $sp, $8, $8
+ mov $sp, $16
+ jsr $31, do_entDbg
+.end entDbg
+
+/*
+ * The system call entry point is special. Most importantly, it looks
+ * like a function call to userspace as far as clobbered registers. We
+ * do preserve the argument registers (for syscall restarts) and $26
+ * (for leaf syscall functions).
+ *
+ * So much for theory. We don't take advantage of this yet.
+ *
+ * Note that a0-a2 are not saved by PALcode as with the other entry points.
+ */
+
+ .align 4
+ .globl entSys
+ .globl ret_from_sys_call
+ .ent entSys
+entSys:
+ SAVE_ALL
+ lda $8, 0x3fff
+ bic $sp, $8, $8
+ lda $4, NR_SYSCALLS($31)
+ stq $16, SP_OFF+24($sp)
+ lda $5, sys_call_table
+ lda $27, sys_ni_syscall
+ cmpult $0, $4, $4
+ ldl $3, TI_FLAGS($8)
+ stq $17, SP_OFF+32($sp)
+ s8addq $0, $5, $5
+ stq $18, SP_OFF+40($sp)
+ blbs $3, strace
+ beq $4, 1f
+ ldq $27, 0($5)
+1: jsr $26, ($27), alpha_ni_syscall
+ ldgp $gp, 0($26)
+ blt $0, $syscall_error /* the call failed */
+ stq $0, 0($sp)
+ stq $31, 72($sp) /* a3=0 => no error */
+
+ .align 4
+ret_from_sys_call:
+ cmovne $26, 0, $19 /* $19 = 0 => non-restartable */
+ ldq $0, SP_OFF($sp)
+ and $0, 8, $0
+ beq $0, restore_all
+ret_from_reschedule:
+ /* Make sure need_resched and sigpending don't change between
+ sampling and the rti. */
+ lda $16, 7
+ call_pal PAL_swpipl
+ ldl $5, TI_FLAGS($8)
+ and $5, _TIF_WORK_MASK, $2
+ bne $5, work_pending
+restore_all:
+ RESTORE_ALL
+ call_pal PAL_rti
+
+ .align 3
+$syscall_error:
+ /*
+ * Some system calls (e.g., ptrace) can return arbitrary
+ * values which might normally be mistaken as error numbers.
+ * Those functions must zero $0 (v0) directly in the stack
+ * frame to indicate that a negative return value wasn't an
+ * error number..
+ */
+ ldq $19, 0($sp) /* old syscall nr (zero if success) */
+ beq $19, $ret_success
+
+ ldq $20, 72($sp) /* .. and this a3 */
+ subq $31, $0, $0 /* with error in v0 */
+ addq $31, 1, $1 /* set a3 for errno return */
+ stq $0, 0($sp)
+ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
+ stq $1, 72($sp) /* a3 for return */
+ br ret_from_sys_call
+
+$ret_success:
+ stq $0, 0($sp)
+ stq $31, 72($sp) /* a3=0 => no error */
+ br ret_from_sys_call
+.end entSys
+
+/*
+ * Do all cleanup when returning from all interrupts and system calls.
+ *
+ * Arguments:
+ * $5: TI_FLAGS.
+ * $8: current.
+ * $19: The old syscall number, or zero if this is not a return
+ * from a syscall that errored and is possibly restartable.
+ * $20: Error indication.
+ */
+
+ .align 4
+ .ent work_pending
+work_pending:
+ and $5, _TIF_NEED_RESCHED, $2
+ beq $2, $work_notifysig
+
+$work_resched:
+ subq $sp, 16, $sp
+ stq $19, 0($sp) /* save syscall nr */
+ stq $20, 8($sp) /* and error indication (a3) */
+ jsr $26, schedule
+ ldq $19, 0($sp)
+ ldq $20, 8($sp)
+ addq $sp, 16, $sp
+ /* Make sure need_resched and sigpending don't change between
+ sampling and the rti. */
+ lda $16, 7
+ call_pal PAL_swpipl
+ ldl $5, TI_FLAGS($8)
+ and $5, _TIF_WORK_MASK, $2
+ beq $2, restore_all
+ and $5, _TIF_NEED_RESCHED, $2
+ bne $2, $work_resched
+
+$work_notifysig:
+ mov $sp, $17
+ br $1, do_switch_stack
+ mov $5, $21
+ mov $sp, $18
+ mov $31, $16
+ jsr $26, do_notify_resume
+ bsr $1, undo_switch_stack
+ br restore_all
+.end work_pending
+
+/*
+ * PTRACE syscall handler
+ */
+
+ .align 4
+ .ent strace
+strace:
+ /* set up signal stack, call syscall_trace */
+ bsr $1, do_switch_stack
+ jsr $26, syscall_trace
+ bsr $1, undo_switch_stack
+
+ /* get the system call number and the arguments back.. */
+ ldq $0, 0($sp)
+ ldq $16, SP_OFF+24($sp)
+ ldq $17, SP_OFF+32($sp)
+ ldq $18, SP_OFF+40($sp)
+ ldq $19, 72($sp)
+ ldq $20, 80($sp)
+ ldq $21, 88($sp)
+
+ /* get the system call pointer.. */
+ lda $1, NR_SYSCALLS($31)
+ lda $2, sys_call_table
+ lda $27, alpha_ni_syscall
+ cmpult $0, $1, $1
+ s8addq $0, $2, $2
+ beq $1, 1f
+ ldq $27, 0($2)
+1: jsr $26, ($27), sys_gettimeofday
+ ldgp $gp, 0($26)
+
+ /* check return.. */
+ blt $0, $strace_error /* the call failed */
+ stq $31, 72($sp) /* a3=0 => no error */
+$strace_success:
+ stq $0, 0($sp) /* save return value */
+
+ bsr $1, do_switch_stack
+ jsr $26, syscall_trace
+ bsr $1, undo_switch_stack
+ br $31, ret_from_sys_call
+
+ .align 3
+$strace_error:
+ ldq $19, 0($sp) /* old syscall nr (zero if success) */
+ beq $19, $strace_success
+ ldq $20, 72($sp) /* .. and this a3 */
+
+ subq $31, $0, $0 /* with error in v0 */
+ addq $31, 1, $1 /* set a3 for errno return */
+ stq $0, 0($sp)
+ stq $1, 72($sp) /* a3 for return */
+
+ bsr $1, do_switch_stack
+ mov $19, $9 /* save old syscall number */
+ mov $20, $10 /* save old a3 */
+ jsr $26, syscall_trace
+ mov $9, $19
+ mov $10, $20
+ bsr $1, undo_switch_stack
+
+ mov $31, $26 /* tell "ret_from_sys_call" we can restart */
+ br ret_from_sys_call
+.end strace
+
+/*
+ * Save and restore the switch stack -- aka the balance of the user context.
+ */
+
+ .align 4
+ .ent do_switch_stack
+do_switch_stack:
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ stq $9, 0($sp)
+ stq $10, 8($sp)
+ stq $11, 16($sp)
+ stq $12, 24($sp)
+ stq $13, 32($sp)
+ stq $14, 40($sp)
+ stq $15, 48($sp)
+ stq $26, 56($sp)
+ stt $f0, 64($sp)
+ stt $f1, 72($sp)
+ stt $f2, 80($sp)
+ stt $f3, 88($sp)
+ stt $f4, 96($sp)
+ stt $f5, 104($sp)
+ stt $f6, 112($sp)
+ stt $f7, 120($sp)
+ stt $f8, 128($sp)
+ stt $f9, 136($sp)
+ stt $f10, 144($sp)
+ stt $f11, 152($sp)
+ stt $f12, 160($sp)
+ stt $f13, 168($sp)
+ stt $f14, 176($sp)
+ stt $f15, 184($sp)
+ stt $f16, 192($sp)
+ stt $f17, 200($sp)
+ stt $f18, 208($sp)
+ stt $f19, 216($sp)
+ stt $f20, 224($sp)
+ stt $f21, 232($sp)
+ stt $f22, 240($sp)
+ stt $f23, 248($sp)
+ stt $f24, 256($sp)
+ stt $f25, 264($sp)
+ stt $f26, 272($sp)
+ stt $f27, 280($sp)
+ mf_fpcr $f0 # get fpcr
+ stt $f28, 288($sp)
+ stt $f29, 296($sp)
+ stt $f30, 304($sp)
+ stt $f0, 312($sp) # save fpcr in slot of $f31
+ ldt $f0, 64($sp) # dont let "do_switch_stack" change fp state.
+ ret $31, ($1), 1
+.end do_switch_stack
+
+ .align 4
+ .ent undo_switch_stack
+undo_switch_stack:
+ ldq $9, 0($sp)
+ ldq $10, 8($sp)
+ ldq $11, 16($sp)
+ ldq $12, 24($sp)
+ ldq $13, 32($sp)
+ ldq $14, 40($sp)
+ ldq $15, 48($sp)
+ ldq $26, 56($sp)
+ ldt $f30, 312($sp) # get saved fpcr
+ ldt $f0, 64($sp)
+ ldt $f1, 72($sp)
+ ldt $f2, 80($sp)
+ ldt $f3, 88($sp)
+ mt_fpcr $f30 # install saved fpcr
+ ldt $f4, 96($sp)
+ ldt $f5, 104($sp)
+ ldt $f6, 112($sp)
+ ldt $f7, 120($sp)
+ ldt $f8, 128($sp)
+ ldt $f9, 136($sp)
+ ldt $f10, 144($sp)
+ ldt $f11, 152($sp)
+ ldt $f12, 160($sp)
+ ldt $f13, 168($sp)
+ ldt $f14, 176($sp)
+ ldt $f15, 184($sp)
+ ldt $f16, 192($sp)
+ ldt $f17, 200($sp)
+ ldt $f18, 208($sp)
+ ldt $f19, 216($sp)
+ ldt $f20, 224($sp)
+ ldt $f21, 232($sp)
+ ldt $f22, 240($sp)
+ ldt $f23, 248($sp)
+ ldt $f24, 256($sp)
+ ldt $f25, 264($sp)
+ ldt $f26, 272($sp)
+ ldt $f27, 280($sp)
+ ldt $f28, 288($sp)
+ ldt $f29, 296($sp)
+ ldt $f30, 304($sp)
+ lda $sp, SWITCH_STACK_SIZE($sp)
+ ret $31, ($1), 1
+.end undo_switch_stack
+
+/*
+ * The meat of the context switch code.
+ */
+
+ .align 4
+ .globl alpha_switch_to
+ .ent alpha_switch_to
+alpha_switch_to:
+ .prologue 0
+ bsr $1, do_switch_stack
+ call_pal PAL_swpctx
+ lda $8, 0x3fff
+ bsr $1, undo_switch_stack
+ bic $sp, $8, $8
+ mov $17, $0
+ ret
+.end alpha_switch_to
+
+/*
+ * New processes begin life here.
+ */
+
+ .globl ret_from_fork
+ .align 4
+ .ent ret_from_fork
+ret_from_fork:
+ lda $26, ret_from_sys_call
+ mov $17, $16
+ jmp $31, schedule_tail
+.end ret_from_fork
+
+/*
+ * kernel_thread(fn, arg, clone_flags)
+ */
+ .align 4
+ .globl kernel_thread
+ .ent kernel_thread
+kernel_thread:
+ /* We can be called from a module. */
+ ldgp $gp, 0($27)
+ .prologue 1
+ subq $sp, SP_OFF+6*8, $sp
+ br $1, 2f /* load start address */
+
+ /* We've now "returned" from a fake system call. */
+ unop
+ blt $0, 1f /* error? */
+ ldi $1, 0x3fff
+ beq $20, 1f /* parent or child? */
+
+ bic $sp, $1, $8 /* in child. */
+ jsr $26, ($27)
+ ldgp $gp, 0($26)
+ mov $0, $16
+ mov $31, $26
+ jmp $31, sys_exit
+
+1: ret /* in parent. */
+
+ .align 4
+2: /* Fake a system call stack frame, as we can't do system calls
+ from kernel space. Note that we store FN and ARG as they
+ need to be set up in the child for the call. Also store $8
+ and $26 for use in the parent. */
+ stq $31, SP_OFF($sp) /* ps */
+ stq $1, SP_OFF+8($sp) /* pc */
+ stq $gp, SP_OFF+16($sp) /* gp */
+ stq $16, 136($sp) /* $27; FN for child */
+ stq $17, SP_OFF+24($sp) /* $16; ARG for child */
+ stq $8, 64($sp) /* $8 */
+ stq $26, 128($sp) /* $26 */
+ /* Avoid the HAE being gratuitously wrong, to avoid restoring it. */
+ ldq $2, alpha_mv+HAE_CACHE
+ stq $2, 152($sp) /* HAE */
+
+ /* Shuffle FLAGS to the front; add CLONE_VM. */
+ ldi $1, CLONE_VM|CLONE_UNTRACED
+ or $18, $1, $16
+ bsr $26, sys_clone
+
+ /* We don't actually care for a3 success widgetry in the kernel.
+ Not for positive errno values. */
+ stq $0, 0($sp) /* $0 */
+ br restore_all
+.end kernel_thread
+
+/*
+ * execve(path, argv, envp)
+ */
+ .align 4
+ .globl execve
+ .ent execve
+execve:
+ /* We can be called from a module. */
+ ldgp $gp, 0($27)
+ lda $sp, -(32+SIZEOF_PT_REGS+8)($sp)
+ .frame $sp, 32+SIZEOF_PT_REGS+8, $26, 0
+ stq $26, 0($sp)
+ stq $16, 8($sp)
+ stq $17, 16($sp)
+ stq $18, 24($sp)
+ .prologue 1
+
+ lda $16, 32($sp)
+ lda $17, 0
+ lda $18, SIZEOF_PT_REGS
+ bsr $26, memset !samegp
+
+ /* Avoid the HAE being gratuitously wrong, which would cause us
+ to do the whole turn off interrupts thing and restore it. */
+ ldq $2, alpha_mv+HAE_CACHE
+ stq $2, 152+32($sp)
+
+ ldq $16, 8($sp)
+ ldq $17, 16($sp)
+ ldq $18, 24($sp)
+ lda $19, 32($sp)
+ bsr $26, do_execve !samegp
+
+ ldq $26, 0($sp)
+ bne $0, 1f /* error! */
+
+ /* Move the temporary pt_regs struct from its current location
+ to the top of the kernel stack frame. See copy_thread for
+ details for a normal process. */
+ lda $16, 0x4000 - SIZEOF_PT_REGS($8)
+ lda $17, 32($sp)
+ lda $18, SIZEOF_PT_REGS
+ bsr $26, memmove !samegp
+
+ /* Take that over as our new stack frame and visit userland! */
+ lda $sp, 0x4000 - SIZEOF_PT_REGS($8)
+ br $31, ret_from_sys_call
+
+1: lda $sp, 32+SIZEOF_PT_REGS+8($sp)
+ ret
+.end execve
+
+
+/*
+ * Special system calls. Most of these are special in that they either
+ * have to play switch_stack games or in some way use the pt_regs struct.
+ */
+ .align 4
+ .globl sys_fork
+ .ent sys_fork
+sys_fork:
+ .prologue 0
+ mov $sp, $21
+ bsr $1, do_switch_stack
+ bis $31, SIGCHLD, $16
+ mov $31, $17
+ mov $31, $18
+ mov $31, $19
+ mov $31, $20
+ jsr $26, alpha_clone
+ bsr $1, undo_switch_stack
+ ret
+.end sys_fork
+
+ .align 4
+ .globl sys_clone
+ .ent sys_clone
+sys_clone:
+ .prologue 0
+ mov $sp, $21
+ bsr $1, do_switch_stack
+ /* $16, $17, $18, $19, $20 come from the user. */
+ jsr $26, alpha_clone
+ bsr $1, undo_switch_stack
+ ret
+.end sys_clone
+
+ .align 4
+ .globl sys_vfork
+ .ent sys_vfork
+sys_vfork:
+ .prologue 0
+ mov $sp, $16
+ bsr $1, do_switch_stack
+ jsr $26, alpha_vfork
+ bsr $1, undo_switch_stack
+ ret
+.end sys_vfork
+
+ .align 4
+ .globl sys_sigreturn
+ .ent sys_sigreturn
+sys_sigreturn:
+ .prologue 0
+ mov $sp, $17
+ lda $18, -SWITCH_STACK_SIZE($sp)
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ jsr $26, do_sigreturn
+ br $1, undo_switch_stack
+ br ret_from_sys_call
+.end sys_sigreturn
+
+ .align 4
+ .globl sys_rt_sigreturn
+ .ent sys_rt_sigreturn
+sys_rt_sigreturn:
+ .prologue 0
+ mov $sp, $17
+ lda $18, -SWITCH_STACK_SIZE($sp)
+ lda $sp, -SWITCH_STACK_SIZE($sp)
+ jsr $26, do_rt_sigreturn
+ br $1, undo_switch_stack
+ br ret_from_sys_call
+.end sys_rt_sigreturn
+
+ .align 4
+ .globl sys_sigsuspend
+ .ent sys_sigsuspend
+sys_sigsuspend:
+ .prologue 0
+ mov $sp, $17
+ br $1, do_switch_stack
+ mov $sp, $18
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_sigsuspend
+
+ .align 4
+ .globl sys_rt_sigsuspend
+ .ent sys_rt_sigsuspend
+sys_rt_sigsuspend:
+ .prologue 0
+ mov $sp, $18
+ br $1, do_switch_stack
+ mov $sp, $19
+ subq $sp, 16, $sp
+ stq $26, 0($sp)
+ jsr $26, do_rt_sigsuspend
+ ldq $26, 0($sp)
+ lda $sp, SWITCH_STACK_SIZE+16($sp)
+ ret
+.end sys_rt_sigsuspend
+
+ .align 4
+ .globl sys_sethae
+ .ent sys_sethae
+sys_sethae:
+ .prologue 0
+ stq $16, 152($sp)
+ ret
+.end sys_sethae
+
+ .align 4
+ .globl osf_getpriority
+ .ent osf_getpriority
+osf_getpriority:
+ lda $sp, -16($sp)
+ stq $26, 0($sp)
+ .prologue 0
+
+ jsr $26, sys_getpriority
+
+ ldq $26, 0($sp)
+ blt $0, 1f
+
+ /* Return value is the unbiased priority, i.e. 20 - prio.
+ This does result in negative return values, so signal
+ no error by writing into the R0 slot. */
+ lda $1, 20
+ stq $31, 16($sp)
+ subl $1, $0, $0
+ unop
+
+1: lda $sp, 16($sp)
+ ret
+.end osf_getpriority
+
+ .align 4
+ .globl sys_getxuid
+ .ent sys_getxuid
+sys_getxuid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+ ldl $0, TASK_UID($2)
+ ldl $1, TASK_EUID($2)
+ stq $1, 80($sp)
+ ret
+.end sys_getxuid
+
+ .align 4
+ .globl sys_getxgid
+ .ent sys_getxgid
+sys_getxgid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+ ldl $0, TASK_GID($2)
+ ldl $1, TASK_EGID($2)
+ stq $1, 80($sp)
+ ret
+.end sys_getxgid
+
+ .align 4
+ .globl sys_getxpid
+ .ent sys_getxpid
+sys_getxpid:
+ .prologue 0
+ ldq $2, TI_TASK($8)
+
+ /* See linux/kernel/timer.c sys_getppid for discussion
+ about this loop. */
+ ldq $3, TASK_REAL_PARENT($2)
+1: ldl $1, TASK_TGID($3)
+#ifdef CONFIG_SMP
+ mov $3, $4
+ mb
+ ldq $3, TASK_REAL_PARENT($2)
+ cmpeq $3, $4, $4
+ beq $4, 1b
+#endif
+ stq $1, 80($sp)
+ ldl $0, TASK_TGID($2)
+ ret
+.end sys_getxpid
+
+ .align 4
+ .globl sys_pipe
+ .ent sys_pipe
+sys_pipe:
+ lda $sp, -16($sp)
+ stq $26, 0($sp)
+ .prologue 0
+
+ lda $16, 8($sp)
+ jsr $26, do_pipe
+
+ ldq $26, 0($sp)
+ bne $0, 1f
+
+ /* The return values are in $0 and $20. */
+ ldl $1, 12($sp)
+ ldl $0, 8($sp)
+
+ stq $1, 80+16($sp)
+1: lda $sp, 16($sp)
+ ret
+.end sys_pipe
+
+ .align 4
+ .globl sys_ptrace
+ .ent sys_ptrace
+sys_ptrace:
+ .prologue 0
+ mov $sp, $20
+ jmp $31, do_sys_ptrace
+.end sys_ptrace
+
+ .align 4
+ .globl sys_execve
+ .ent sys_execve
+sys_execve:
+ .prologue 0
+ mov $sp, $19
+ jmp $31, do_sys_execve
+.end sys_execve
+
+ .align 4
+ .globl osf_sigprocmask
+ .ent osf_sigprocmask
+osf_sigprocmask:
+ .prologue 0
+ mov $sp, $18
+ jmp $31, do_osf_sigprocmask
+.end osf_sigprocmask
+
+ .align 4
+ .globl alpha_ni_syscall
+ .ent alpha_ni_syscall
+alpha_ni_syscall:
+ .prologue 0
+ /* Special because it also implements overflow handling via
+ syscall number 0. And if you recall, zero is a special
+ trigger for "not an error". Store large non-zero there. */
+ lda $0, -ENOSYS
+ unop
+ stq $0, 0($sp)
+ ret
+.end alpha_ni_syscall
diff --git a/arch/alpha/kernel/err_common.c b/arch/alpha/kernel/err_common.c
new file mode 100644
index 000000000000..687580b16b41
--- /dev/null
+++ b/arch/alpha/kernel/err_common.c
@@ -0,0 +1,321 @@
+/*
+ * linux/arch/alpha/kernel/err_common.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+/*
+ * err_print_prefix -- error handling print routines should prefix
+ * all prints with this
+ */
+char *err_print_prefix = KERN_NOTICE;
+
+
+/*
+ * Generic
+ */
+void
+mchk_dump_mem(void *data, size_t length, char **annotation)
+{
+ unsigned long *ldata = data;
+ size_t i;
+
+ for (i = 0; (i * sizeof(*ldata)) < length; i++) {
+ if (annotation && !annotation[i])
+ annotation = NULL;
+ printk("%s %08x: %016lx %s\n",
+ err_print_prefix,
+ (unsigned)(i * sizeof(*ldata)), ldata[i],
+ annotation ? annotation[i] : "");
+ }
+}
+
+void
+mchk_dump_logout_frame(struct el_common *mchk_header)
+{
+ printk("%s -- Frame Header --\n"
+ " Frame Size: %d (0x%x) bytes\n"
+ " Flags: %s%s\n"
+ " MCHK Code: 0x%x\n"
+ " Frame Rev: %d\n"
+ " Proc Offset: 0x%08x\n"
+ " Sys Offset: 0x%08x\n"
+ " -- Processor Region --\n",
+ err_print_prefix,
+ mchk_header->size, mchk_header->size,
+ mchk_header->retry ? "RETRY " : "",
+ mchk_header->err2 ? "SECOND_ERR " : "",
+ mchk_header->code,
+ mchk_header->frame_rev,
+ mchk_header->proc_offset,
+ mchk_header->sys_offset);
+
+ mchk_dump_mem((void *)
+ ((unsigned long)mchk_header + mchk_header->proc_offset),
+ mchk_header->sys_offset - mchk_header->proc_offset,
+ NULL);
+
+ printk("%s -- System Region --\n", err_print_prefix);
+ mchk_dump_mem((void *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset),
+ mchk_header->size - mchk_header->sys_offset,
+ NULL);
+ printk("%s -- End of Frame --\n", err_print_prefix);
+}
+
+
+/*
+ * Console Data Log
+ */
+/* Data */
+static struct el_subpacket_handler *subpacket_handler_list = NULL;
+static struct el_subpacket_annotation *subpacket_annotation_list = NULL;
+
+static struct el_subpacket *
+el_process_header_subpacket(struct el_subpacket *header)
+{
+ union el_timestamp timestamp;
+ char *name = "UNKNOWN EVENT";
+ int packet_count = 0;
+ int length = 0;
+
+ if (header->class != EL_CLASS__HEADER) {
+ printk("%s** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ switch(header->type) {
+ case EL_TYPE__HEADER__SYSTEM_ERROR_FRAME:
+ name = "SYSTEM ERROR";
+ length = header->by_type.sys_err.frame_length;
+ packet_count =
+ header->by_type.sys_err.frame_packet_count;
+ timestamp.as_int = 0;
+ break;
+ case EL_TYPE__HEADER__SYSTEM_EVENT_FRAME:
+ name = "SYSTEM EVENT";
+ length = header->by_type.sys_event.frame_length;
+ packet_count =
+ header->by_type.sys_event.frame_packet_count;
+ timestamp = header->by_type.sys_event.timestamp;
+ break;
+ case EL_TYPE__HEADER__HALT_FRAME:
+ name = "ERROR HALT";
+ length = header->by_type.err_halt.frame_length;
+ packet_count =
+ header->by_type.err_halt.frame_packet_count;
+ timestamp = header->by_type.err_halt.timestamp;
+ break;
+ case EL_TYPE__HEADER__LOGOUT_FRAME:
+ name = "LOGOUT FRAME";
+ length = header->by_type.logout_header.frame_length;
+ packet_count = 1;
+ timestamp.as_int = 0;
+ break;
+ default: /* Unknown */
+ printk("%s** Unknown header - CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ printk("%s*** %s:\n"
+ " CLASS %d, TYPE %d\n",
+ err_print_prefix,
+ name,
+ header->class, header->type);
+ el_print_timestamp(&timestamp);
+
+ /*
+ * Process the subpackets
+ */
+ el_process_subpackets(header, packet_count);
+
+ /* return the next header */
+ header = (struct el_subpacket *)
+ ((unsigned long)header + header->length + length);
+ return header;
+}
+
+static struct el_subpacket *
+el_process_subpacket_reg(struct el_subpacket *header)
+{
+ struct el_subpacket *next = NULL;
+ struct el_subpacket_handler *h = subpacket_handler_list;
+
+ for (; h && h->class != header->class; h = h->next);
+ if (h) next = h->handler(header);
+
+ return next;
+}
+
+void
+el_print_timestamp(union el_timestamp *timestamp)
+{
+ if (timestamp->as_int)
+ printk("%s TIMESTAMP: %d/%d/%02d %d:%02d:%0d\n",
+ err_print_prefix,
+ timestamp->b.month, timestamp->b.day,
+ timestamp->b.year, timestamp->b.hour,
+ timestamp->b.minute, timestamp->b.second);
+}
+
+void
+el_process_subpackets(struct el_subpacket *header, int packet_count)
+{
+ struct el_subpacket *subpacket;
+ int i;
+
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)header + header->length);
+
+ for (i = 0; subpacket && i < packet_count; i++) {
+ printk("%sPROCESSING SUBPACKET %d\n", err_print_prefix, i);
+ subpacket = el_process_subpacket(subpacket);
+ }
+}
+
+struct el_subpacket *
+el_process_subpacket(struct el_subpacket *header)
+{
+ struct el_subpacket *next = NULL;
+
+ switch(header->class) {
+ case EL_CLASS__TERMINATION:
+ /* Termination packet, there are no more */
+ break;
+ case EL_CLASS__HEADER:
+ next = el_process_header_subpacket(header);
+ break;
+ default:
+ if (NULL == (next = el_process_subpacket_reg(header))) {
+ printk("%s** Unexpected header CLASS %d TYPE %d"
+ " -- aborting.\n",
+ err_print_prefix,
+ header->class, header->type);
+ }
+ break;
+ }
+
+ return next;
+}
+
+void
+el_annotate_subpacket(struct el_subpacket *header)
+{
+ struct el_subpacket_annotation *a;
+ char **annotation = NULL;
+
+ for (a = subpacket_annotation_list; a; a = a->next) {
+ if (a->class == header->class &&
+ a->type == header->type &&
+ a->revision == header->revision) {
+ /*
+ * We found the annotation
+ */
+ annotation = a->annotation;
+ printk("%s %s\n", err_print_prefix, a->description);
+ break;
+ }
+ }
+
+ mchk_dump_mem(header, header->length, annotation);
+}
+
+static void __init
+cdl_process_console_data_log(int cpu, struct percpu_struct *pcpu)
+{
+ struct el_subpacket *header = (struct el_subpacket *)
+ (IDENT_ADDR | pcpu->console_data_log_pa);
+ int err;
+
+ printk("%s******* CONSOLE DATA LOG FOR CPU %d. *******\n"
+ "*** Error(s) were logged on a previous boot\n",
+ err_print_prefix, cpu);
+
+ for (err = 0; header && (header->class != EL_CLASS__TERMINATION); err++)
+ header = el_process_subpacket(header);
+
+ /* let the console know it's ok to clear the error(s) at restart */
+ pcpu->console_data_log_pa = 0;
+
+ printk("%s*** %d total error(s) logged\n"
+ "**** END OF CONSOLE DATA LOG FOR CPU %d ****\n",
+ err_print_prefix, err, cpu);
+}
+
+void __init
+cdl_check_console_data_log(void)
+{
+ struct percpu_struct *pcpu;
+ unsigned long cpu;
+
+ for (cpu = 0; cpu < hwrpb->nr_processors; cpu++) {
+ pcpu = (struct percpu_struct *)
+ ((unsigned long)hwrpb + hwrpb->processor_offset
+ + cpu * hwrpb->processor_size);
+ if (pcpu->console_data_log_pa)
+ cdl_process_console_data_log(cpu, pcpu);
+ }
+
+}
+
+int __init
+cdl_register_subpacket_annotation(struct el_subpacket_annotation *new)
+{
+ struct el_subpacket_annotation *a = subpacket_annotation_list;
+
+ if (a == NULL) subpacket_annotation_list = new;
+ else {
+ for (; a->next != NULL; a = a->next) {
+ if ((a->class == new->class && a->type == new->type) ||
+ a == new) {
+ printk("Attempted to re-register "
+ "subpacket annotation\n");
+ return -EINVAL;
+ }
+ }
+ a->next = new;
+ }
+ new->next = NULL;
+
+ return 0;
+}
+
+int __init
+cdl_register_subpacket_handler(struct el_subpacket_handler *new)
+{
+ struct el_subpacket_handler *h = subpacket_handler_list;
+
+ if (h == NULL) subpacket_handler_list = new;
+ else {
+ for (; h->next != NULL; h = h->next) {
+ if (h->class == new->class || h == new) {
+ printk("Attempted to re-register "
+ "subpacket handler\n");
+ return -EINVAL;
+ }
+ }
+ h->next = new;
+ }
+ new->next = NULL;
+
+ return 0;
+}
+
diff --git a/arch/alpha/kernel/err_ev6.c b/arch/alpha/kernel/err_ev6.c
new file mode 100644
index 000000000000..64f59f2fcf5c
--- /dev/null
+++ b/arch/alpha/kernel/err_ev6.c
@@ -0,0 +1,274 @@
+/*
+ * linux/arch/alpha/kernel/err_ev6.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev6.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+static int
+ev6_parse_ibox(u64 i_stat, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define EV6__I_STAT__PAR (1UL << 29)
+#define EV6__I_STAT__ERRMASK (EV6__I_STAT__PAR)
+
+ if (!(i_stat & EV6__I_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ if (i_stat & EV6__I_STAT__PAR)
+ printk("%s Icache parity error\n", err_print_prefix);
+
+ return status;
+}
+
+static int
+ev6_parse_mbox(u64 mm_stat, u64 d_stat, u64 c_stat, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define EV6__MM_STAT__DC_TAG_PERR (1UL << 10)
+#define EV6__MM_STAT__ERRMASK (EV6__MM_STAT__DC_TAG_PERR)
+#define EV6__D_STAT__TPERR_P0 (1UL << 0)
+#define EV6__D_STAT__TPERR_P1 (1UL << 1)
+#define EV6__D_STAT__ECC_ERR_ST (1UL << 2)
+#define EV6__D_STAT__ECC_ERR_LD (1UL << 3)
+#define EV6__D_STAT__SEO (1UL << 4)
+#define EV6__D_STAT__ERRMASK (EV6__D_STAT__TPERR_P0 | \
+ EV6__D_STAT__TPERR_P1 | \
+ EV6__D_STAT__ECC_ERR_ST | \
+ EV6__D_STAT__ECC_ERR_LD | \
+ EV6__D_STAT__SEO)
+
+ if (!(d_stat & EV6__D_STAT__ERRMASK) &&
+ !(mm_stat & EV6__MM_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ if (mm_stat & EV6__MM_STAT__DC_TAG_PERR)
+ printk("%s Dcache tag parity error on probe\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__TPERR_P0)
+ printk("%s Dcache tag parity error - pipe 0\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__TPERR_P1)
+ printk("%s Dcache tag parity error - pipe 1\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__ECC_ERR_ST)
+ printk("%s ECC error occurred on a store\n",
+ err_print_prefix);
+ if (d_stat & EV6__D_STAT__ECC_ERR_LD)
+ printk("%s ECC error occurred on a %s load\n",
+ err_print_prefix,
+ c_stat ? "" : "speculative ");
+ if (d_stat & EV6__D_STAT__SEO)
+ printk("%s Dcache second error\n", err_print_prefix);
+
+ return status;
+}
+
+static int
+ev6_parse_cbox(u64 c_addr, u64 c1_syn, u64 c2_syn,
+ u64 c_stat, u64 c_sts, int print)
+{
+ char *sourcename[] = { "UNKNOWN", "UNKNOWN", "UNKNOWN",
+ "MEMORY", "BCACHE", "DCACHE",
+ "BCACHE PROBE", "BCACHE PROBE" };
+ char *streamname[] = { "D", "I" };
+ char *bitsname[] = { "SINGLE", "DOUBLE" };
+ int status = MCHK_DISPOSITION_REPORT;
+ int source = -1, stream = -1, bits = -1;
+
+#define EV6__C_STAT__BC_PERR (0x01)
+#define EV6__C_STAT__DC_PERR (0x02)
+#define EV6__C_STAT__DSTREAM_MEM_ERR (0x03)
+#define EV6__C_STAT__DSTREAM_BC_ERR (0x04)
+#define EV6__C_STAT__DSTREAM_DC_ERR (0x05)
+#define EV6__C_STAT__PROBE_BC_ERR0 (0x06) /* both 6 and 7 indicate... */
+#define EV6__C_STAT__PROBE_BC_ERR1 (0x07) /* ...probe bc error. */
+#define EV6__C_STAT__ISTREAM_MEM_ERR (0x0B)
+#define EV6__C_STAT__ISTREAM_BC_ERR (0x0C)
+#define EV6__C_STAT__DSTREAM_MEM_DBL (0x13)
+#define EV6__C_STAT__DSTREAM_BC_DBL (0x14)
+#define EV6__C_STAT__ISTREAM_MEM_DBL (0x1B)
+#define EV6__C_STAT__ISTREAM_BC_DBL (0x1C)
+#define EV6__C_STAT__SOURCE_MEMORY (0x03)
+#define EV6__C_STAT__SOURCE_BCACHE (0x04)
+#define EV6__C_STAT__SOURCE__S (0)
+#define EV6__C_STAT__SOURCE__M (0x07)
+#define EV6__C_STAT__ISTREAM__S (3)
+#define EV6__C_STAT__ISTREAM__M (0x01)
+#define EV6__C_STAT__DOUBLE__S (4)
+#define EV6__C_STAT__DOUBLE__M (0x01)
+#define EV6__C_STAT__ERRMASK (0x1F)
+#define EV6__C_STS__SHARED (1 << 0)
+#define EV6__C_STS__DIRTY (1 << 1)
+#define EV6__C_STS__VALID (1 << 2)
+#define EV6__C_STS__PARITY (1 << 3)
+
+ if (!(c_stat & EV6__C_STAT__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ if (!print)
+ return status;
+
+ source = EXTRACT(c_stat, EV6__C_STAT__SOURCE);
+ stream = EXTRACT(c_stat, EV6__C_STAT__ISTREAM);
+ bits = EXTRACT(c_stat, EV6__C_STAT__DOUBLE);
+
+ if (c_stat & EV6__C_STAT__BC_PERR) {
+ printk("%s Bcache tag parity error\n", err_print_prefix);
+ source = -1;
+ }
+
+ if (c_stat & EV6__C_STAT__DC_PERR) {
+ printk("%s Dcache tag parity error\n", err_print_prefix);
+ source = -1;
+ }
+
+ if (c_stat == EV6__C_STAT__PROBE_BC_ERR0 ||
+ c_stat == EV6__C_STAT__PROBE_BC_ERR1) {
+ printk("%s Bcache single-bit error on a probe hit\n",
+ err_print_prefix);
+ source = -1;
+ }
+
+ if (source != -1)
+ printk("%s %s-STREAM %s-BIT ECC error from %s\n",
+ err_print_prefix,
+ streamname[stream], bitsname[bits], sourcename[source]);
+
+ printk("%s Address: 0x%016lx\n"
+ " Syndrome[upper.lower]: %02lx.%02lx\n",
+ err_print_prefix,
+ c_addr,
+ c2_syn, c1_syn);
+
+ if (source == EV6__C_STAT__SOURCE_MEMORY ||
+ source == EV6__C_STAT__SOURCE_BCACHE)
+ printk("%s Block status: %s%s%s%s\n",
+ err_print_prefix,
+ (c_sts & EV6__C_STS__SHARED) ? "SHARED " : "",
+ (c_sts & EV6__C_STS__DIRTY) ? "DIRTY " : "",
+ (c_sts & EV6__C_STS__VALID) ? "VALID " : "",
+ (c_sts & EV6__C_STS__PARITY) ? "PARITY " : "");
+
+ return status;
+}
+
+void
+ev6_register_error_handlers(void)
+{
+ /* None right now. */
+}
+
+int
+ev6_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_common_EV6_mcheck *ev6mchk =
+ (struct el_common_EV6_mcheck *)mchk_header;
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ status |= ev6_parse_ibox(ev6mchk->I_STAT, print);
+ status |= ev6_parse_mbox(ev6mchk->MM_STAT, ev6mchk->DC_STAT,
+ ev6mchk->C_STAT, print);
+ status |= ev6_parse_cbox(ev6mchk->C_ADDR, ev6mchk->DC1_SYNDROME,
+ ev6mchk->DC0_SYNDROME, ev6mchk->C_STAT,
+ ev6mchk->C_STS, print);
+
+ if (!print)
+ return status;
+
+ if (status != MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+
+ /*
+ * Dump some additional information from the frame
+ */
+ printk("%s EXC_ADDR: 0x%016lx IER_CM: 0x%016lx"
+ " ISUM: 0x%016lx\n"
+ " PAL_BASE: 0x%016lx I_CTL: 0x%016lx"
+ " PCTX: 0x%016lx\n",
+ err_print_prefix,
+ ev6mchk->EXC_ADDR, ev6mchk->IER_CM, ev6mchk->ISUM,
+ ev6mchk->PAL_BASE, ev6mchk->I_CTL, ev6mchk->PCTX);
+
+ if (status == MCHK_DISPOSITION_UNKNOWN_ERROR) {
+ printk("%s UNKNOWN error, frame follows:\n",
+ err_print_prefix);
+ } else {
+ /* had decode -- downgrade print level for frame */
+ err_print_prefix = KERN_NOTICE;
+ }
+
+ mchk_dump_logout_frame(mchk_header);
+
+ err_print_prefix = saved_err_prefix;
+ }
+
+ return status;
+}
+
+void
+ev6_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are have a disposition of "dismiss", then just dismiss them
+ * and don't print any message
+ */
+ if (ev6_process_logout_frame(mchk_header, 0) !=
+ MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Either a nondismissable error was detected or no
+ * recognized error was detected in the logout frame
+ * -- report the error in either case
+ */
+ printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (vector == SCB_Q_PROCERR)?"Correctable":"Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+
+ ev6_process_logout_frame(mchk_header, 1);
+ dik_show_regs(regs, NULL);
+
+ err_print_prefix = saved_err_prefix;
+ }
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
diff --git a/arch/alpha/kernel/err_ev7.c b/arch/alpha/kernel/err_ev7.c
new file mode 100644
index 000000000000..bf52ba691957
--- /dev/null
+++ b/arch/alpha/kernel/err_ev7.c
@@ -0,0 +1,289 @@
+/*
+ * linux/arch/alpha/kernel/err_ev7.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting Alpha systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev7.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+struct ev7_lf_subpackets *
+ev7_collect_logout_frame_subpackets(struct el_subpacket *el_ptr,
+ struct ev7_lf_subpackets *lf_subpackets)
+{
+ struct el_subpacket *subpacket;
+ int i;
+
+ /*
+ * A Marvel machine check frame is always packaged in an
+ * el_subpacket of class HEADER, type LOGOUT_FRAME.
+ */
+ if (el_ptr->class != EL_CLASS__HEADER ||
+ el_ptr->type != EL_TYPE__HEADER__LOGOUT_FRAME)
+ return NULL;
+
+ /*
+ * It is a logout frame header. Look at the one subpacket.
+ */
+ el_ptr = (struct el_subpacket *)
+ ((unsigned long)el_ptr + el_ptr->length);
+
+ /*
+ * It has to be class PAL, type LOGOUT_FRAME.
+ */
+ if (el_ptr->class != EL_CLASS__PAL ||
+ el_ptr->type != EL_TYPE__PAL__LOGOUT_FRAME)
+ return NULL;
+
+ lf_subpackets->logout = (struct ev7_pal_logout_subpacket *)
+ el_ptr->by_type.raw.data_start;
+
+ /*
+ * Process the subpackets.
+ */
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)el_ptr + el_ptr->length);
+ for (i = 0;
+ subpacket && i < lf_subpackets->logout->subpacket_count;
+ subpacket = (struct el_subpacket *)
+ ((unsigned long)subpacket + subpacket->length), i++) {
+ /*
+ * All subpackets should be class PAL.
+ */
+ if (subpacket->class != EL_CLASS__PAL) {
+ printk("%s**UNEXPECTED SUBPACKET CLASS %d "
+ "IN LOGOUT FRAME (packet %d\n",
+ err_print_prefix, subpacket->class, i);
+ return NULL;
+ }
+
+ /*
+ * Remember the subpacket.
+ */
+ switch(subpacket->type) {
+ case EL_TYPE__PAL__EV7_PROCESSOR:
+ lf_subpackets->ev7 =
+ (struct ev7_pal_processor_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_RBOX:
+ lf_subpackets->rbox = (struct ev7_pal_rbox_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_ZBOX:
+ lf_subpackets->zbox = (struct ev7_pal_zbox_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__EV7_IO:
+ lf_subpackets->io = (struct ev7_pal_io_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ case EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE:
+ case EL_TYPE__PAL__ENV__AIRMOVER_FAN:
+ case EL_TYPE__PAL__ENV__VOLTAGE:
+ case EL_TYPE__PAL__ENV__INTRUSION:
+ case EL_TYPE__PAL__ENV__POWER_SUPPLY:
+ case EL_TYPE__PAL__ENV__LAN:
+ case EL_TYPE__PAL__ENV__HOT_PLUG:
+ lf_subpackets->env[ev7_lf_env_index(subpacket->type)] =
+ (struct ev7_pal_environmental_subpacket *)
+ subpacket->by_type.raw.data_start;
+ break;
+
+ default:
+ /*
+ * Don't know what kind of frame this is.
+ */
+ return NULL;
+ }
+ }
+
+ return lf_subpackets;
+}
+
+void
+ev7_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
+ char *saved_err_prefix = err_print_prefix;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ err_print_prefix = KERN_CRIT;
+ printk("%s*CPU %s Error (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix,
+ (vector == SCB_Q_PROCERR) ? "Correctable" : "Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+ el_process_subpacket(el_ptr);
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
+static char *el_ev7_processor_subpacket_annotation[] = {
+ "Subpacket Header", "I_STAT", "DC_STAT",
+ "C_ADDR", "C_SYNDROME_1", "C_SYNDROME_0",
+ "C_STAT", "C_STS", "MM_STAT",
+ "EXC_ADDR", "IER_CM", "ISUM",
+ "PAL_BASE", "I_CTL", "PROCESS_CONTEXT",
+ "CBOX_CTL", "CBOX_STP_CTL", "CBOX_ACC_CTL",
+ "CBOX_LCL_SET", "CBOX_GLB_SET", "BBOX_CTL",
+ "BBOX_ERR_STS", "BBOX_ERR_IDX", "CBOX_DDP_ERR_STS",
+ "BBOX_DAT_RMP", NULL
+};
+
+static char *el_ev7_zbox_subpacket_annotation[] = {
+ "Subpacket Header",
+ "ZBOX(0): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
+ "ZBOX(0): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
+ "ZBOX(0): DIFT_TIMEOUT / DRAM_ERR_ADR",
+ "ZBOX(0): FRC_ERR_ADR / DRAM_MAPPER_CTL",
+ "ZBOX(0): reserved / DIFT_ERR_STATUS",
+ "ZBOX(1): DRAM_ERR_STATUS_2 / DRAM_ERR_STATUS_1",
+ "ZBOX(1): DRAM_ERROR_CTL / DRAM_ERR_STATUS_3",
+ "ZBOX(1): DIFT_TIMEOUT / DRAM_ERR_ADR",
+ "ZBOX(1): FRC_ERR_ADR / DRAM_MAPPER_CTL",
+ "ZBOX(1): reserved / DIFT_ERR_STATUS",
+ "CBOX_CTL", "CBOX_STP_CTL",
+ "ZBOX(0)_ERROR_PA", "ZBOX(1)_ERROR_PA",
+ "ZBOX(0)_ORED_SYNDROME","ZBOX(1)_ORED_SYNDROME",
+ NULL
+};
+
+static char *el_ev7_rbox_subpacket_annotation[] = {
+ "Subpacket Header", "RBOX_CFG", "RBOX_N_CFG",
+ "RBOX_S_CFG", "RBOX_E_CFG", "RBOX_W_CFG",
+ "RBOX_N_ERR", "RBOX_S_ERR", "RBOX_E_ERR",
+ "RBOX_W_ERR", "RBOX_IO_CFG", "RBOX_IO_ERR",
+ "RBOX_L_ERR", "RBOX_WHOAMI", "RBOX_IMASL",
+ "RBOX_INTQ", "RBOX_INT", NULL
+};
+
+static char *el_ev7_io_subpacket_annotation[] = {
+ "Subpacket Header", "IO_ASIC_REV", "IO_SYS_REV",
+ "IO7_UPH", "HPI_CTL", "CRD_CTL",
+ "HEI_CTL", "PO7_ERROR_SUM","PO7_UNCRR_SYM",
+ "PO7_CRRCT_SYM", "PO7_UGBGE_SYM","PO7_ERR_PKT0",
+ "PO7_ERR_PKT1", "reserved", "reserved",
+ "PO0_ERR_SUM", "PO0_TLB_ERR", "PO0_SPL_COMPLT",
+ "PO0_TRANS_SUM", "PO0_FIRST_ERR","PO0_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO1_ERR_SUM", "PO1_TLB_ERR", "PO1_SPL_COMPLT",
+ "PO1_TRANS_SUM", "PO1_FIRST_ERR","PO1_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO2_ERR_SUM", "PO2_TLB_ERR", "PO2_SPL_COMPLT",
+ "PO2_TRANS_SUM", "PO2_FIRST_ERR","PO2_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ "PO3_ERR_SUM", "PO3_TLB_ERR", "PO3_SPL_COMPLT",
+ "PO3_TRANS_SUM", "PO3_FIRST_ERR","PO3_MULT_ERR",
+ "DM CSR PH", "DM CSR PH", "DM CSR PH",
+ "DM CSR PH", "reserved",
+ NULL
+};
+
+static struct el_subpacket_annotation el_ev7_pal_annotations[] = {
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_PROCESSOR,
+ 1,
+ "EV7 Processor Subpacket",
+ el_ev7_processor_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_ZBOX,
+ 1,
+ "EV7 ZBOX Subpacket",
+ el_ev7_zbox_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_RBOX,
+ 1,
+ "EV7 RBOX Subpacket",
+ el_ev7_rbox_subpacket_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__PAL,
+ EL_TYPE__PAL__EV7_IO,
+ 1,
+ "EV7 IO Subpacket",
+ el_ev7_io_subpacket_annotation)
+};
+
+static struct el_subpacket *
+ev7_process_pal_subpacket(struct el_subpacket *header)
+{
+ struct ev7_pal_subpacket *packet;
+
+ if (header->class != EL_CLASS__PAL) {
+ printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ packet = (struct ev7_pal_subpacket *)header->by_type.raw.data_start;
+
+ switch(header->type) {
+ case EL_TYPE__PAL__LOGOUT_FRAME:
+ printk("%s*** MCHK occurred on LPID %ld (RBOX %lx)\n",
+ err_print_prefix,
+ packet->by_type.logout.whami,
+ packet->by_type.logout.rbox_whami);
+ el_print_timestamp(&packet->by_type.logout.timestamp);
+ printk("%s EXC_ADDR: %016lx\n"
+ " HALT_CODE: %lx\n",
+ err_print_prefix,
+ packet->by_type.logout.exc_addr,
+ packet->by_type.logout.halt_code);
+ el_process_subpackets(header,
+ packet->by_type.logout.subpacket_count);
+ break;
+ default:
+ printk("%s ** PAL TYPE %d SUBPACKET\n",
+ err_print_prefix,
+ header->type);
+ el_annotate_subpacket(header);
+ break;
+ }
+
+ return (struct el_subpacket *)((unsigned long)header + header->length);
+}
+
+struct el_subpacket_handler ev7_pal_subpacket_handler =
+ SUBPACKET_HANDLER_INIT(EL_CLASS__PAL, ev7_process_pal_subpacket);
+
+void
+ev7_register_error_handlers(void)
+{
+ int i;
+
+ for(i = 0;
+ i<sizeof(el_ev7_pal_annotations)/sizeof(el_ev7_pal_annotations[1]);
+ i++) {
+ cdl_register_subpacket_annotation(&el_ev7_pal_annotations[i]);
+ }
+ cdl_register_subpacket_handler(&ev7_pal_subpacket_handler);
+}
+
diff --git a/arch/alpha/kernel/err_impl.h b/arch/alpha/kernel/err_impl.h
new file mode 100644
index 000000000000..64e9b73809fa
--- /dev/null
+++ b/arch/alpha/kernel/err_impl.h
@@ -0,0 +1,85 @@
+/*
+ * linux/arch/alpha/kernel/err_impl.h
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Contains declarations and macros to support Alpha error handling
+ * implementations.
+ */
+
+union el_timestamp;
+struct el_subpacket;
+struct ev7_lf_subpackets;
+
+struct el_subpacket_annotation {
+ struct el_subpacket_annotation *next;
+ u16 class;
+ u16 type;
+ u16 revision;
+ char *description;
+ char **annotation;
+};
+#define SUBPACKET_ANNOTATION(c, t, r, d, a) {NULL, (c), (t), (r), (d), (a)}
+
+struct el_subpacket_handler {
+ struct el_subpacket_handler *next;
+ u16 class;
+ struct el_subpacket *(*handler)(struct el_subpacket *);
+};
+#define SUBPACKET_HANDLER_INIT(c, h) {NULL, (c), (h)}
+
+/*
+ * Manipulate a field from a register given it's name. defines
+ * for the LSB (__S - shift count) and bitmask (__M) are required
+ *
+ * EXTRACT(u, f) - extracts the field and places it at bit position 0
+ * GEN_MASK(f) - creates an in-position mask for the field
+ */
+#define EXTRACT(u, f) (((u) >> f##__S) & f##__M)
+#define GEN_MASK(f) ((u64)f##__M << f##__S)
+
+/*
+ * err_common.c
+ */
+extern char *err_print_prefix;
+
+extern void mchk_dump_mem(void *, size_t, char **);
+extern void mchk_dump_logout_frame(struct el_common *);
+extern void el_print_timestamp(union el_timestamp *);
+extern void el_process_subpackets(struct el_subpacket *, int);
+extern struct el_subpacket *el_process_subpacket(struct el_subpacket *);
+extern void el_annotate_subpacket(struct el_subpacket *);
+extern void cdl_check_console_data_log(void);
+extern int cdl_register_subpacket_annotation(struct el_subpacket_annotation *);
+extern int cdl_register_subpacket_handler(struct el_subpacket_handler *);
+
+/*
+ * err_ev7.c
+ */
+extern struct ev7_lf_subpackets *
+ev7_collect_logout_frame_subpackets(struct el_subpacket *,
+ struct ev7_lf_subpackets *);
+extern void ev7_register_error_handlers(void);
+extern void ev7_machine_check(u64, u64, struct pt_regs *);
+
+/*
+ * err_ev6.c
+ */
+extern void ev6_register_error_handlers(void);
+extern int ev6_process_logout_frame(struct el_common *, int);
+extern void ev6_machine_check(u64, u64, struct pt_regs *);
+
+/*
+ * err_marvel.c
+ */
+extern void marvel_machine_check(u64, u64, struct pt_regs *);
+extern void marvel_register_error_handlers(void);
+
+/*
+ * err_titan.c
+ */
+extern int titan_process_logout_frame(struct el_common *, int);
+extern void titan_machine_check(u64, u64, struct pt_regs *);
+extern void titan_register_error_handlers(void);
+extern int privateer_process_logout_frame(struct el_common *, int);
+extern void privateer_machine_check(u64, u64, struct pt_regs *);
diff --git a/arch/alpha/kernel/err_marvel.c b/arch/alpha/kernel/err_marvel.c
new file mode 100644
index 000000000000..70b38b1d2af3
--- /dev/null
+++ b/arch/alpha/kernel/err_marvel.c
@@ -0,0 +1,1159 @@
+/*
+ * linux/arch/alpha/kernel/err_marvel.c
+ *
+ * Copyright (C) 2001 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/console.h>
+#include <asm/core_marvel.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev7.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+static void
+marvel_print_680_frame(struct ev7_lf_subpackets *lf_subpackets)
+{
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct ev7_pal_environmental_subpacket *env;
+ struct { int type; char *name; } ev_packets[] = {
+ { EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE,
+ "Ambient Temperature" },
+ { EL_TYPE__PAL__ENV__AIRMOVER_FAN,
+ "AirMover / Fan" },
+ { EL_TYPE__PAL__ENV__VOLTAGE,
+ "Voltage" },
+ { EL_TYPE__PAL__ENV__INTRUSION,
+ "Intrusion" },
+ { EL_TYPE__PAL__ENV__POWER_SUPPLY,
+ "Power Supply" },
+ { EL_TYPE__PAL__ENV__LAN,
+ "LAN" },
+ { EL_TYPE__PAL__ENV__HOT_PLUG,
+ "Hot Plug" },
+ { 0, NULL }
+ };
+ int i;
+
+ for (i = 0; ev_packets[i].type != 0; i++) {
+ env = lf_subpackets->env[ev7_lf_env_index(ev_packets[i].type)];
+ if (!env)
+ continue;
+
+ printk("%s**%s event (cabinet %d, drawer %d)\n",
+ err_print_prefix,
+ ev_packets[i].name,
+ env->cabinet,
+ env->drawer);
+ printk("%s Module Type: 0x%x - Unit ID 0x%x - "
+ "Condition 0x%x\n",
+ err_print_prefix,
+ env->module_type,
+ env->unit_id,
+ env->condition);
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+static int
+marvel_process_680_frame(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ int i;
+
+ for (i = ev7_lf_env_index(EL_TYPE__PAL__ENV__AMBIENT_TEMPERATURE);
+ i <= ev7_lf_env_index(EL_TYPE__PAL__ENV__HOT_PLUG);
+ i++) {
+ if (lf_subpackets->env[i])
+ status = MCHK_DISPOSITION_REPORT;
+ }
+
+ if (print)
+ marvel_print_680_frame(lf_subpackets);
+
+ return status;
+}
+
+#ifdef CONFIG_VERBOSE_MCHECK
+
+static void
+marvel_print_err_cyc(u64 err_cyc)
+{
+ static char *packet_desc[] = {
+ "No Error",
+ "UNKNOWN",
+ "1 cycle (1 or 2 flit packet)",
+ "2 cycles (3 flit packet)",
+ "9 cycles (18 flit packet)",
+ "10 cycles (19 flit packet)",
+ "UNKNOWN",
+ "UNKNOWN",
+ "UNKNOWN"
+ };
+
+#define IO7__ERR_CYC__ODD_FLT (1UL << 0)
+#define IO7__ERR_CYC__EVN_FLT (1UL << 1)
+#define IO7__ERR_CYC__PACKET__S (6)
+#define IO7__ERR_CYC__PACKET__M (0x7)
+#define IO7__ERR_CYC__LOC (1UL << 5)
+#define IO7__ERR_CYC__CYCLE__S (2)
+#define IO7__ERR_CYC__CYCLE__M (0x7)
+
+ printk("%s Packet In Error: %s\n"
+ "%s Error in %s, cycle %ld%s%s\n",
+ err_print_prefix,
+ packet_desc[EXTRACT(err_cyc, IO7__ERR_CYC__PACKET)],
+ err_print_prefix,
+ (err_cyc & IO7__ERR_CYC__LOC) ? "DATA" : "HEADER",
+ EXTRACT(err_cyc, IO7__ERR_CYC__CYCLE),
+ (err_cyc & IO7__ERR_CYC__ODD_FLT) ? " [ODD Flit]": "",
+ (err_cyc & IO7__ERR_CYC__EVN_FLT) ? " [Even Flit]": "");
+}
+
+static void
+marvel_print_po7_crrct_sym(u64 crrct_sym)
+{
+#define IO7__PO7_CRRCT_SYM__SYN__S (0)
+#define IO7__PO7_CRRCT_SYM__SYN__M (0x7f)
+#define IO7__PO7_CRRCT_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT + EVN_FLT */
+#define IO7__PO7_CRRCT_SYM__ERR_CYC__M (0x1ff)
+
+
+ printk("%s Correctable Error Symptoms:\n"
+ "%s Syndrome: 0x%lx\n",
+ err_print_prefix,
+ err_print_prefix, EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__SYN));
+ marvel_print_err_cyc(EXTRACT(crrct_sym, IO7__PO7_CRRCT_SYM__ERR_CYC));
+}
+
+static void
+marvel_print_po7_uncrr_sym(u64 uncrr_sym, u64 valid_mask)
+{
+ static char *clk_names[] = { "_h[0]", "_h[1]", "_n[0]", "_n[1]" };
+ static char *clk_decode[] = {
+ "No Error",
+ "One extra rising edge",
+ "Two extra rising edges",
+ "Lost one clock"
+ };
+ static char *port_names[] = { "Port 0", "Port 1",
+ "Port 2", "Port 3",
+ "Unknown Port", "Unknown Port",
+ "Unknown Port", "Port 7" };
+ int scratch, i;
+
+#define IO7__PO7_UNCRR_SYM__SYN__S (0)
+#define IO7__PO7_UNCRR_SYM__SYN__M (0x7f)
+#define IO7__PO7_UNCRR_SYM__ERR_CYC__S (7) /* ERR_CYC + ODD_FLT... */
+#define IO7__PO7_UNCRR_SYM__ERR_CYC__M (0x1ff) /* ... + EVN_FLT */
+#define IO7__PO7_UNCRR_SYM__CLK__S (16)
+#define IO7__PO7_UNCRR_SYM__CLK__M (0xff)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ (1UL << 24)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO (1UL << 25)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO (1UL << 26)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK (1UL << 27)
+#define IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK (1UL << 28)
+#define IO7__PO7_UNCRR_SYM__OVF__READIO (1UL << 29)
+#define IO7__PO7_UNCRR_SYM__OVF__WRITEIO (1UL << 30)
+#define IO7__PO7_UNCRR_SYM__OVF__FWD (1UL << 31)
+#define IO7__PO7_UNCRR_SYM__VICTIM_SP__S (32)
+#define IO7__PO7_UNCRR_SYM__VICTIM_SP__M (0xff)
+#define IO7__PO7_UNCRR_SYM__DETECT_SP__S (40)
+#define IO7__PO7_UNCRR_SYM__DETECT_SP__M (0xff)
+#define IO7__PO7_UNCRR_SYM__STRV_VTR__S (48)
+#define IO7__PO7_UNCRR_SYM__STRV_VTR__M (0x3ff)
+
+#define IO7__STRV_VTR__LSI__INTX__S (0)
+#define IO7__STRV_VTR__LSI__INTX__M (0x3)
+#define IO7__STRV_VTR__LSI__SLOT__S (2)
+#define IO7__STRV_VTR__LSI__SLOT__M (0x7)
+#define IO7__STRV_VTR__LSI__BUS__S (5)
+#define IO7__STRV_VTR__LSI__BUS__M (0x3)
+#define IO7__STRV_VTR__MSI__INTNUM__S (0)
+#define IO7__STRV_VTR__MSI__INTNUM__M (0x1ff)
+#define IO7__STRV_VTR__IS_MSI (1UL << 9)
+
+ printk("%s Uncorrectable Error Symptoms:\n", err_print_prefix);
+ uncrr_sym &= valid_mask;
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__SYN))
+ printk("%s Syndrome: 0x%lx\n",
+ err_print_prefix,
+ EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__SYN));
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__ERR_CYC))
+ marvel_print_err_cyc(EXTRACT(uncrr_sym,
+ IO7__PO7_UNCRR_SYM__ERR_CYC));
+
+ scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__CLK);
+ for (i = 0; i < 4; i++, scratch >>= 2) {
+ if (scratch & 0x3)
+ printk("%s Clock %s: %s\n",
+ err_print_prefix,
+ clk_names[i], clk_decode[scratch & 0x3]);
+ }
+
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__REQ)
+ printk("%s REQ Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__RIO)
+ printk("%s RIO Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__WIO)
+ printk("%s WIO Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__BLK)
+ printk("%s BLK Credit Timeout or Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__CDT_OVF_TO__NBK)
+ printk("%s NBK Credit Timeout or Overflow\n",
+ err_print_prefix);
+
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__READIO)
+ printk("%s Read I/O Buffer Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__WRITEIO)
+ printk("%s Write I/O Buffer Overflow\n",
+ err_print_prefix);
+ if (uncrr_sym & IO7__PO7_UNCRR_SYM__OVF__FWD)
+ printk("%s FWD Buffer Overflow\n",
+ err_print_prefix);
+
+ if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__VICTIM_SP))) {
+ int lost = scratch & (1UL << 4);
+ scratch &= ~lost;
+ for (i = 0; i < 8; i++, scratch >>= 1) {
+ if (!(scratch & 1))
+ continue;
+ printk("%s Error Response sent to %s",
+ err_print_prefix, port_names[i]);
+ }
+ if (lost)
+ printk("%s Lost Error sent somewhere else\n",
+ err_print_prefix);
+ }
+
+ if ((scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__DETECT_SP))) {
+ for (i = 0; i < 8; i++, scratch >>= 1) {
+ if (!(scratch & 1))
+ continue;
+ printk("%s Error Reported by %s",
+ err_print_prefix, port_names[i]);
+ }
+ }
+
+ if (EXTRACT(valid_mask, IO7__PO7_UNCRR_SYM__STRV_VTR)) {
+ char starvation_message[80];
+
+ scratch = EXTRACT(uncrr_sym, IO7__PO7_UNCRR_SYM__STRV_VTR);
+ if (scratch & IO7__STRV_VTR__IS_MSI)
+ sprintf(starvation_message,
+ "MSI Interrupt 0x%x",
+ EXTRACT(scratch, IO7__STRV_VTR__MSI__INTNUM));
+ else
+ sprintf(starvation_message,
+ "LSI INT%c for Bus:Slot (%d:%d)\n",
+ 'A' + EXTRACT(scratch,
+ IO7__STRV_VTR__LSI__INTX),
+ EXTRACT(scratch, IO7__STRV_VTR__LSI__BUS),
+ EXTRACT(scratch, IO7__STRV_VTR__LSI__SLOT));
+
+ printk("%s Starvation Int Trigger By: %s\n",
+ err_print_prefix, starvation_message);
+ }
+}
+
+static void
+marvel_print_po7_ugbge_sym(u64 ugbge_sym)
+{
+ char opcode_str[10];
+
+#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__S (6)
+#define IO7__PO7_UGBGE_SYM__UPH_PKT_OFF__M (0xfffffffful)
+#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__S (40)
+#define IO7__PO7_UGBGE_SYM__UPH_OPCODE__M (0xff)
+#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__S (48)
+#define IO7__PO7_UGBGE_SYM__UPH_SRC_PORT__M (0xf)
+#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__S (52)
+#define IO7__PO7_UGBGE_SYM__UPH_DEST_PID__M (0x7ff)
+#define IO7__PO7_UGBGE_SYM__VALID (1UL << 63)
+
+ if (!(ugbge_sym & IO7__PO7_UGBGE_SYM__VALID))
+ return;
+
+ switch(EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE)) {
+ case 0x51:
+ sprintf(opcode_str, "Wr32");
+ break;
+ case 0x50:
+ sprintf(opcode_str, "WrQW");
+ break;
+ case 0x54:
+ sprintf(opcode_str, "WrIPR");
+ break;
+ case 0xD8:
+ sprintf(opcode_str, "Victim");
+ break;
+ case 0xC5:
+ sprintf(opcode_str, "BlkIO");
+ break;
+ default:
+ sprintf(opcode_str, "0x%lx\n",
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE));
+ break;
+ }
+
+ printk("%s Up Hose Garbage Symptom:\n"
+ "%s Source Port: %ld - Dest PID: %ld - OpCode: %s\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_SRC_PORT),
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_DEST_PID),
+ opcode_str);
+
+ if (0xC5 != EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_OPCODE))
+ printk("%s Packet Offset 0x%08lx\n",
+ err_print_prefix,
+ EXTRACT(ugbge_sym, IO7__PO7_UGBGE_SYM__UPH_PKT_OFF));
+}
+
+static void
+marvel_print_po7_err_sum(struct ev7_pal_io_subpacket *io)
+{
+ u64 uncrr_sym_valid = 0;
+
+#define IO7__PO7_ERRSUM__CR_SBE (1UL << 32)
+#define IO7__PO7_ERRSUM__CR_SBE2 (1UL << 33)
+#define IO7__PO7_ERRSUM__CR_PIO_WBYTE (1UL << 34)
+#define IO7__PO7_ERRSUM__CR_CSR_NXM (1UL << 35)
+#define IO7__PO7_ERRSUM__CR_RPID_ACV (1UL << 36)
+#define IO7__PO7_ERRSUM__CR_RSP_NXM (1UL << 37)
+#define IO7__PO7_ERRSUM__CR_ERR_RESP (1UL << 38)
+#define IO7__PO7_ERRSUM__CR_CLK_DERR (1UL << 39)
+#define IO7__PO7_ERRSUM__CR_DAT_DBE (1UL << 40)
+#define IO7__PO7_ERRSUM__CR_DAT_GRBG (1UL << 41)
+#define IO7__PO7_ERRSUM__MAF_TO (1UL << 42)
+#define IO7__PO7_ERRSUM__UGBGE (1UL << 43)
+#define IO7__PO7_ERRSUM__UN_MAF_LOST (1UL << 44)
+#define IO7__PO7_ERRSUM__UN_PKT_OVF (1UL << 45)
+#define IO7__PO7_ERRSUM__UN_CDT_OVF (1UL << 46)
+#define IO7__PO7_ERRSUM__UN_DEALLOC (1UL << 47)
+#define IO7__PO7_ERRSUM__BH_CDT_TO (1UL << 51)
+#define IO7__PO7_ERRSUM__BH_CLK_HDR (1UL << 52)
+#define IO7__PO7_ERRSUM__BH_DBE_HDR (1UL << 53)
+#define IO7__PO7_ERRSUM__BH_GBG_HDR (1UL << 54)
+#define IO7__PO7_ERRSUM__BH_BAD_CMD (1UL << 55)
+#define IO7__PO7_ERRSUM__HLT_INT (1UL << 56)
+#define IO7__PO7_ERRSUM__HP_INT (1UL << 57)
+#define IO7__PO7_ERRSUM__CRD_INT (1UL << 58)
+#define IO7__PO7_ERRSUM__STV_INT (1UL << 59)
+#define IO7__PO7_ERRSUM__HRD_INT (1UL << 60)
+#define IO7__PO7_ERRSUM__BH_SUM (1UL << 61)
+#define IO7__PO7_ERRSUM__ERR_LST (1UL << 62)
+#define IO7__PO7_ERRSUM__ERR_VALID (1UL << 63)
+
+#define IO7__PO7_ERRSUM__ERR_MASK (IO7__PO7_ERRSUM__ERR_VALID | \
+ IO7__PO7_ERRSUM__CR_SBE)
+
+ /*
+ * Single bit errors aren't covered by ERR_VALID.
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE) {
+ printk("%s %sSingle Bit Error(s) detected/corrected\n",
+ err_print_prefix,
+ (io->po7_error_sum & IO7__PO7_ERRSUM__CR_SBE2)
+ ? "Multiple " : "");
+ marvel_print_po7_crrct_sym(io->po7_crrct_sym);
+ }
+
+ /*
+ * Neither are the interrupt status bits
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HLT_INT)
+ printk("%s Halt Interrupt posted", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HP_INT) {
+ printk("%s Hot Plug Event Interrupt posted",
+ err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CRD_INT)
+ printk("%s Correctable Error Interrupt posted",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__STV_INT) {
+ printk("%s Starvation Interrupt posted", err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__STRV_VTR);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__HRD_INT) {
+ printk("%s Hard Error Interrupt posted", err_print_prefix);
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__DETECT_SP);
+ }
+
+ /*
+ * Everything else is valid only with ERR_VALID, so skip to the end
+ * (uncrr_sym check) unless ERR_VALID is set.
+ */
+ if (!(io->po7_error_sum & IO7__PO7_ERRSUM__ERR_VALID))
+ goto check_uncrr_sym;
+
+ /*
+ * Since ERR_VALID is set, VICTIM_SP in uncrr_sym is valid.
+ * For bits [29:0] to also be valid, the following bits must
+ * not be set:
+ * CR_PIO_WBYTE CR_CSR_NXM CR_RSP_NXM
+ * CR_ERR_RESP MAF_TO
+ */
+ uncrr_sym_valid |= GEN_MASK(IO7__PO7_UNCRR_SYM__VICTIM_SP);
+ if (!(io->po7_error_sum & (IO7__PO7_ERRSUM__CR_PIO_WBYTE |
+ IO7__PO7_ERRSUM__CR_CSR_NXM |
+ IO7__PO7_ERRSUM__CR_RSP_NXM |
+ IO7__PO7_ERRSUM__CR_ERR_RESP |
+ IO7__PO7_ERRSUM__MAF_TO)))
+ uncrr_sym_valid |= 0x3ffffffful;
+
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_PIO_WBYTE)
+ printk("%s Write byte into IO7 CSR\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CSR_NXM)
+ printk("%s PIO to non-existent CSR\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RPID_ACV)
+ printk("%s Bus Requester PID (Access Violation)\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_RSP_NXM)
+ printk("%s Received NXM response from EV7\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_ERR_RESP)
+ printk("%s Received ERROR RESPONSE\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_CLK_DERR)
+ printk("%s Clock error on data flit\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_DBE)
+ printk("%s Double Bit Error Data Error Detected\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__CR_DAT_GRBG)
+ printk("%s Garbage Encoding Detected on the data\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UGBGE) {
+ printk("%s Garbage Encoding sent up hose\n",
+ err_print_prefix);
+ marvel_print_po7_ugbge_sym(io->po7_ugbge_sym);
+ }
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_MAF_LOST)
+ printk("%s Orphan response (unexpected response)\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_PKT_OVF)
+ printk("%s Down hose packet overflow\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_CDT_OVF)
+ printk("%s Down hose credit overflow\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__UN_DEALLOC)
+ printk("%s Unexpected or bad dealloc field\n",
+ err_print_prefix);
+
+ /*
+ * The black hole events.
+ */
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__MAF_TO)
+ printk("%s BLACK HOLE: Timeout for all responses\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CDT_TO)
+ printk("%s BLACK HOLE: Credit Timeout\n", err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_CLK_HDR)
+ printk("%s BLACK HOLE: Clock check on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_DBE_HDR)
+ printk("%s BLACK HOLE: Uncorrectable Error on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_GBG_HDR)
+ printk("%s BLACK HOLE: Garbage on header\n",
+ err_print_prefix);
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__BH_BAD_CMD)
+ printk("%s BLACK HOLE: Bad EV7 command\n",
+ err_print_prefix);
+
+ if (io->po7_error_sum & IO7__PO7_ERRSUM__ERR_LST)
+ printk("%s Lost Error\n", err_print_prefix);
+
+ printk("%s Failing Packet:\n"
+ "%s Cycle 1: %016lx\n"
+ "%s Cycle 2: %016lx\n",
+ err_print_prefix,
+ err_print_prefix, io->po7_err_pkt0,
+ err_print_prefix, io->po7_err_pkt1);
+ /*
+ * If there are any valid bits in UNCRR sym for this err,
+ * print UNCRR_SYM as well.
+ */
+check_uncrr_sym:
+ if (uncrr_sym_valid)
+ marvel_print_po7_uncrr_sym(io->po7_uncrr_sym, uncrr_sym_valid);
+}
+
+static void
+marvel_print_pox_tlb_err(u64 tlb_err)
+{
+ static char *tlb_errors[] = {
+ "No Error",
+ "North Port Signaled Error fetching TLB entry",
+ "PTE invalid or UCC or GBG error on this entry",
+ "Address did not hit any DMA window"
+ };
+
+#define IO7__POX_TLBERR__ERR_VALID (1UL << 63)
+#define IO7__POX_TLBERR__ERRCODE__S (0)
+#define IO7__POX_TLBERR__ERRCODE__M (0x3)
+#define IO7__POX_TLBERR__ERR_TLB_PTR__S (3)
+#define IO7__POX_TLBERR__ERR_TLB_PTR__M (0x7)
+#define IO7__POX_TLBERR__FADDR__S (6)
+#define IO7__POX_TLBERR__FADDR__M (0x3fffffffffful)
+
+ if (!(tlb_err & IO7__POX_TLBERR__ERR_VALID))
+ return;
+
+ printk("%s TLB Error on index 0x%lx:\n"
+ "%s - %s\n"
+ "%s - Addr: 0x%016lx\n",
+ err_print_prefix,
+ EXTRACT(tlb_err, IO7__POX_TLBERR__ERR_TLB_PTR),
+ err_print_prefix,
+ tlb_errors[EXTRACT(tlb_err, IO7__POX_TLBERR__ERRCODE)],
+ err_print_prefix,
+ EXTRACT(tlb_err, IO7__POX_TLBERR__FADDR) << 6);
+}
+
+static void
+marvel_print_pox_spl_cmplt(u64 spl_cmplt)
+{
+ char message[80];
+
+#define IO7__POX_SPLCMPLT__MESSAGE__S (0)
+#define IO7__POX_SPLCMPLT__MESSAGE__M (0x0fffffffful)
+#define IO7__POX_SPLCMPLT__SOURCE_BUS__S (40)
+#define IO7__POX_SPLCMPLT__SOURCE_BUS__M (0xfful)
+#define IO7__POX_SPLCMPLT__SOURCE_DEV__S (35)
+#define IO7__POX_SPLCMPLT__SOURCE_DEV__M (0x1ful)
+#define IO7__POX_SPLCMPLT__SOURCE_FUNC__S (32)
+#define IO7__POX_SPLCMPLT__SOURCE_FUNC__M (0x07ul)
+
+#define IO7__POX_SPLCMPLT__MSG_CLASS__S (28)
+#define IO7__POX_SPLCMPLT__MSG_CLASS__M (0xf)
+#define IO7__POX_SPLCMPLT__MSG_INDEX__S (20)
+#define IO7__POX_SPLCMPLT__MSG_INDEX__M (0xff)
+#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__S (20)
+#define IO7__POX_SPLCMPLT__MSG_CLASSINDEX__M (0xfff)
+#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__S (12)
+#define IO7__POX_SPLCMPLT__REM_LOWER_ADDR__M (0x7f)
+#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__S (0)
+#define IO7__POX_SPLCMPLT__REM_BYTE_COUNT__M (0xfff)
+
+ printk("%s Split Completion Error:\n"
+ "%s Source (Bus:Dev:Func): %ld:%ld:%ld\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_BUS),
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_DEV),
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__SOURCE_FUNC));
+
+ switch(EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MSG_CLASSINDEX)) {
+ case 0x000:
+ sprintf(message, "Normal completion");
+ break;
+ case 0x100:
+ sprintf(message, "Bridge - Master Abort");
+ break;
+ case 0x101:
+ sprintf(message, "Bridge - Target Abort");
+ break;
+ case 0x102:
+ sprintf(message, "Bridge - Uncorrectable Write Data Error");
+ break;
+ case 0x200:
+ sprintf(message, "Byte Count Out of Range");
+ break;
+ case 0x201:
+ sprintf(message, "Uncorrectable Split Write Data Error");
+ break;
+ default:
+ sprintf(message, "%08lx\n",
+ EXTRACT(spl_cmplt, IO7__POX_SPLCMPLT__MESSAGE));
+ break;
+ }
+ printk("%s Message: %s\n", err_print_prefix, message);
+}
+
+static void
+marvel_print_pox_trans_sum(u64 trans_sum)
+{
+ char *pcix_cmd[] = { "Interrupt Acknowledge",
+ "Special Cycle",
+ "I/O Read",
+ "I/O Write",
+ "Reserved",
+ "Reserved / Device ID Message",
+ "Memory Read",
+ "Memory Write",
+ "Reserved / Alias to Memory Read Block",
+ "Reserved / Alias to Memory Write Block",
+ "Configuration Read",
+ "Configuration Write",
+ "Memory Read Multiple / Split Completion",
+ "Dual Address Cycle",
+ "Memory Read Line / Memory Read Block",
+ "Memory Write and Invalidate / Memory Write Block"
+ };
+
+#define IO7__POX_TRANSUM__PCI_ADDR__S (0)
+#define IO7__POX_TRANSUM__PCI_ADDR__M (0x3fffffffffffful)
+#define IO7__POX_TRANSUM__DAC (1UL << 50)
+#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__S (52)
+#define IO7__POX_TRANSUM__PCIX_MASTER_SLOT__M (0xf)
+#define IO7__POX_TRANSUM__PCIX_CMD__S (56)
+#define IO7__POX_TRANSUM__PCIX_CMD__M (0xf)
+#define IO7__POX_TRANSUM__ERR_VALID (1UL << 63)
+
+ if (!(trans_sum & IO7__POX_TRANSUM__ERR_VALID))
+ return;
+
+ printk("%s Transaction Summary:\n"
+ "%s Command: 0x%lx - %s\n"
+ "%s Address: 0x%016lx%s\n"
+ "%s PCI-X Master Slot: 0x%lx\n",
+ err_print_prefix,
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD),
+ pcix_cmd[EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_CMD)],
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCI_ADDR),
+ (trans_sum & IO7__POX_TRANSUM__DAC) ? " (DAC)" : "",
+ err_print_prefix,
+ EXTRACT(trans_sum, IO7__POX_TRANSUM__PCIX_MASTER_SLOT));
+}
+
+static void
+marvel_print_pox_err(u64 err_sum, struct ev7_pal_io_one_port *port)
+{
+#define IO7__POX_ERRSUM__AGP_REQQ_OVFL (1UL << 4)
+#define IO7__POX_ERRSUM__AGP_SYNC_ERR (1UL << 5)
+#define IO7__POX_ERRSUM__MRETRY_TO (1UL << 6)
+#define IO7__POX_ERRSUM__PCIX_UX_SPL (1UL << 7)
+#define IO7__POX_ERRSUM__PCIX_SPLIT_TO (1UL << 8)
+#define IO7__POX_ERRSUM__PCIX_DISCARD_SPL (1UL << 9)
+#define IO7__POX_ERRSUM__DMA_RD_TO (1UL << 10)
+#define IO7__POX_ERRSUM__CSR_NXM_RD (1UL << 11)
+#define IO7__POX_ERRSUM__CSR_NXM_WR (1UL << 12)
+#define IO7__POX_ERRSUM__DMA_TO (1UL << 13)
+#define IO7__POX_ERRSUM__ALL_MABORTS (1UL << 14)
+#define IO7__POX_ERRSUM__MABORT (1UL << 15)
+#define IO7__POX_ERRSUM__MABORT_MASK (IO7__POX_ERRSUM__ALL_MABORTS|\
+ IO7__POX_ERRSUM__MABORT)
+#define IO7__POX_ERRSUM__PT_TABORT (1UL << 16)
+#define IO7__POX_ERRSUM__PM_TABORT (1UL << 17)
+#define IO7__POX_ERRSUM__TABORT_MASK (IO7__POX_ERRSUM__PT_TABORT | \
+ IO7__POX_ERRSUM__PM_TABORT)
+#define IO7__POX_ERRSUM__SERR (1UL << 18)
+#define IO7__POX_ERRSUM__ADDRERR_STB (1UL << 19)
+#define IO7__POX_ERRSUM__DETECTED_SERR (1UL << 20)
+#define IO7__POX_ERRSUM__PERR (1UL << 21)
+#define IO7__POX_ERRSUM__DATAERR_STB_NIOW (1UL << 22)
+#define IO7__POX_ERRSUM__DETECTED_PERR (1UL << 23)
+#define IO7__POX_ERRSUM__PM_PERR (1UL << 24)
+#define IO7__POX_ERRSUM__PT_SCERROR (1UL << 26)
+#define IO7__POX_ERRSUM__HUNG_BUS (1UL << 28)
+#define IO7__POX_ERRSUM__UPE_ERROR__S (51)
+#define IO7__POX_ERRSUM__UPE_ERROR__M (0xffUL)
+#define IO7__POX_ERRSUM__UPE_ERROR GEN_MASK(IO7__POX_ERRSUM__UPE_ERROR)
+#define IO7__POX_ERRSUM__TLB_ERR (1UL << 59)
+#define IO7__POX_ERRSUM__ERR_VALID (1UL << 63)
+
+#define IO7__POX_ERRSUM__TRANS_SUM__MASK (IO7__POX_ERRSUM__MRETRY_TO | \
+ IO7__POX_ERRSUM__PCIX_UX_SPL | \
+ IO7__POX_ERRSUM__PCIX_SPLIT_TO | \
+ IO7__POX_ERRSUM__DMA_TO | \
+ IO7__POX_ERRSUM__MABORT_MASK | \
+ IO7__POX_ERRSUM__TABORT_MASK | \
+ IO7__POX_ERRSUM__SERR | \
+ IO7__POX_ERRSUM__ADDRERR_STB | \
+ IO7__POX_ERRSUM__PERR | \
+ IO7__POX_ERRSUM__DATAERR_STB_NIOW |\
+ IO7__POX_ERRSUM__DETECTED_PERR | \
+ IO7__POX_ERRSUM__PM_PERR | \
+ IO7__POX_ERRSUM__PT_SCERROR | \
+ IO7__POX_ERRSUM__UPE_ERROR)
+
+ if (!(err_sum & IO7__POX_ERRSUM__ERR_VALID))
+ return;
+
+ /*
+ * First the transaction summary errors
+ */
+ if (err_sum & IO7__POX_ERRSUM__MRETRY_TO)
+ printk("%s IO7 Master Retry Timeout expired\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_UX_SPL)
+ printk("%s Unexpected Split Completion\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_SPLIT_TO)
+ printk("%s IO7 Split Completion Timeout expired\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DMA_TO)
+ printk("%s Hung bus during DMA transaction\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__MABORT_MASK)
+ printk("%s Master Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PT_TABORT)
+ printk("%s IO7 Asserted Target Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PM_TABORT)
+ printk("%s IO7 Received Target Abort\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__ADDRERR_STB) {
+ printk("%s Address or PCI-X Attribute Parity Error\n",
+ err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__SERR)
+ printk("%s IO7 Asserted SERR\n", err_print_prefix);
+ }
+ if (err_sum & IO7__POX_ERRSUM__PERR) {
+ if (err_sum & IO7__POX_ERRSUM__DATAERR_STB_NIOW)
+ printk("%s IO7 Detected Data Parity Error\n",
+ err_print_prefix);
+ else
+ printk("%s Split Completion Response with "
+ "Parity Error\n", err_print_prefix);
+ }
+ if (err_sum & IO7__POX_ERRSUM__DETECTED_PERR)
+ printk("%s PERR detected\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PM_PERR)
+ printk("%s PERR while IO7 is master\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PT_SCERROR) {
+ printk("%s IO7 Received Split Completion Error message\n",
+ err_print_prefix);
+ marvel_print_pox_spl_cmplt(port->pox_spl_cmplt);
+ }
+ if (err_sum & IO7__POX_ERRSUM__UPE_ERROR) {
+ unsigned int upe_error = EXTRACT(err_sum,
+ IO7__POX_ERRSUM__UPE_ERROR);
+ int i;
+ static char *upe_errors[] = {
+ "Parity Error on MSI write data",
+ "MSI read (MSI window is write only",
+ "TLB - Invalid WR transaction",
+ "TLB - Invalid RD transaction",
+ "DMA - WR error (see north port)",
+ "DMA - RD error (see north port)",
+ "PPR - WR error (see north port)",
+ "PPR - RD error (see north port)"
+ };
+
+ printk("%s UPE Error:\n", err_print_prefix);
+ for (i = 0; i < 8; i++) {
+ if (upe_error & (1 << i))
+ printk("%s %s\n", err_print_prefix,
+ upe_errors[i]);
+ }
+ }
+
+ /*
+ * POx_TRANS_SUM, if appropriate.
+ */
+ if (err_sum & IO7__POX_ERRSUM__TRANS_SUM__MASK)
+ marvel_print_pox_trans_sum(port->pox_trans_sum);
+
+ /*
+ * Then TLB_ERR.
+ */
+ if (err_sum & IO7__POX_ERRSUM__TLB_ERR) {
+ printk("%s TLB ERROR\n", err_print_prefix);
+ marvel_print_pox_tlb_err(port->pox_tlb_err);
+ }
+
+ /*
+ * And the single bit status errors.
+ */
+ if (err_sum & IO7__POX_ERRSUM__AGP_REQQ_OVFL)
+ printk("%s AGP Request Queue Overflow\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__AGP_SYNC_ERR)
+ printk("%s AGP Sync Error\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__PCIX_DISCARD_SPL)
+ printk("%s Discarded split completion\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DMA_RD_TO)
+ printk("%s DMA Read Timeout\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__CSR_NXM_RD)
+ printk("%s CSR NXM READ\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__CSR_NXM_WR)
+ printk("%s CSR NXM WRITE\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__DETECTED_SERR)
+ printk("%s SERR detected\n", err_print_prefix);
+ if (err_sum & IO7__POX_ERRSUM__HUNG_BUS)
+ printk("%s HUNG BUS detected\n", err_print_prefix);
+}
+
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+static struct ev7_pal_io_subpacket *
+marvel_find_io7_with_error(struct ev7_lf_subpackets *lf_subpackets)
+{
+ struct ev7_pal_io_subpacket *io = lf_subpackets->io;
+ struct io7 *io7;
+ int i;
+
+ /*
+ * Caller must provide the packet to fill
+ */
+ if (!io)
+ return NULL;
+
+ /*
+ * Fill the subpacket with the console's standard fill pattern
+ */
+ memset(io, 0x55, sizeof(*io));
+
+ for (io7 = NULL; NULL != (io7 = marvel_next_io7(io7)); ) {
+ unsigned long err_sum = 0;
+
+ err_sum |= io7->csrs->PO7_ERROR_SUM.csr;
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ if (!io7->ports[i].enabled)
+ continue;
+ err_sum |= io7->ports[i].csrs->POx_ERR_SUM.csr;
+ }
+
+ /*
+ * Is there at least one error?
+ */
+ if (err_sum & (1UL << 63))
+ break;
+ }
+
+ /*
+ * Did we find an IO7 with an error?
+ */
+ if (!io7)
+ return NULL;
+
+ /*
+ * We have an IO7 with an error.
+ *
+ * Fill in the IO subpacket.
+ */
+ io->io_asic_rev = io7->csrs->IO_ASIC_REV.csr;
+ io->io_sys_rev = io7->csrs->IO_SYS_REV.csr;
+ io->io7_uph = io7->csrs->IO7_UPH.csr;
+ io->hpi_ctl = io7->csrs->HPI_CTL.csr;
+ io->crd_ctl = io7->csrs->CRD_CTL.csr;
+ io->hei_ctl = io7->csrs->HEI_CTL.csr;
+ io->po7_error_sum = io7->csrs->PO7_ERROR_SUM.csr;
+ io->po7_uncrr_sym = io7->csrs->PO7_UNCRR_SYM.csr;
+ io->po7_crrct_sym = io7->csrs->PO7_CRRCT_SYM.csr;
+ io->po7_ugbge_sym = io7->csrs->PO7_UGBGE_SYM.csr;
+ io->po7_err_pkt0 = io7->csrs->PO7_ERR_PKT[0].csr;
+ io->po7_err_pkt1 = io7->csrs->PO7_ERR_PKT[1].csr;
+
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ io7_ioport_csrs *csrs = io7->ports[i].csrs;
+
+ if (!io7->ports[i].enabled)
+ continue;
+
+ io->ports[i].pox_err_sum = csrs->POx_ERR_SUM.csr;
+ io->ports[i].pox_tlb_err = csrs->POx_TLB_ERR.csr;
+ io->ports[i].pox_spl_cmplt = csrs->POx_SPL_COMPLT.csr;
+ io->ports[i].pox_trans_sum = csrs->POx_TRANS_SUM.csr;
+ io->ports[i].pox_first_err = csrs->POx_FIRST_ERR.csr;
+ io->ports[i].pox_mult_err = csrs->POx_MULT_ERR.csr;
+ io->ports[i].pox_dm_source = csrs->POx_DM_SOURCE.csr;
+ io->ports[i].pox_dm_dest = csrs->POx_DM_DEST.csr;
+ io->ports[i].pox_dm_size = csrs->POx_DM_SIZE.csr;
+ io->ports[i].pox_dm_ctrl = csrs->POx_DM_CTRL.csr;
+
+ /*
+ * Ack this port's errors, if any. POx_ERR_SUM must be last.
+ *
+ * Most of the error registers get cleared and unlocked when
+ * the associated bits in POx_ERR_SUM are cleared (by writing
+ * 1). POx_TLB_ERR is an exception and must be explicitly
+ * cleared.
+ */
+ csrs->POx_TLB_ERR.csr = io->ports[i].pox_tlb_err;
+ csrs->POx_ERR_SUM.csr = io->ports[i].pox_err_sum;
+ mb();
+ csrs->POx_ERR_SUM.csr;
+ }
+
+ /*
+ * Ack any port 7 error(s).
+ */
+ io7->csrs->PO7_ERROR_SUM.csr = io->po7_error_sum;
+ mb();
+ io7->csrs->PO7_ERROR_SUM.csr;
+
+ /*
+ * Correct the io7_pid.
+ */
+ lf_subpackets->io_pid = io7->pe;
+
+ return io;
+}
+
+static int
+marvel_process_io_error(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct ev7_pal_io_subpacket *io = lf_subpackets->io;
+ int i;
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define MARVEL_IO_ERR_VALID(x) ((x) & (1UL << 63))
+
+ if (!lf_subpackets->logout || !lf_subpackets->io)
+ return status;
+
+ /*
+ * The PALcode only builds an IO subpacket if there is a
+ * locally connected IO7. In the cases of
+ * 1) a uniprocessor kernel
+ * 2) an mp kernel before the local secondary has called in
+ * error interrupts are all directed to the primary processor.
+ * In that case, we may not have an IO subpacket at all and, event
+ * if we do, it may not be the right now.
+ *
+ * If the RBOX indicates an I/O error interrupt, make sure we have
+ * the correct IO7 information. If we don't have an IO subpacket
+ * or it's the wrong one, try to find the right one.
+ *
+ * RBOX I/O error interrupts are indicated by RBOX_INT<29> and
+ * RBOX_INT<10>.
+ */
+ if ((lf_subpackets->io->po7_error_sum & (1UL << 32)) ||
+ ((lf_subpackets->io->po7_error_sum |
+ lf_subpackets->io->ports[0].pox_err_sum |
+ lf_subpackets->io->ports[1].pox_err_sum |
+ lf_subpackets->io->ports[2].pox_err_sum |
+ lf_subpackets->io->ports[3].pox_err_sum) & (1UL << 63))) {
+ /*
+ * Either we have no IO subpacket or no error is
+ * indicated in the one we do have. Try find the
+ * one with the error.
+ */
+ if (!marvel_find_io7_with_error(lf_subpackets))
+ return status;
+ }
+
+ /*
+ * We have an IO7 indicating an error - we're going to report it
+ */
+ status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+
+ if (!print)
+ return status;
+
+ printk("%s*Error occurred on IO7 at PID %u\n",
+ err_print_prefix, lf_subpackets->io_pid);
+
+ /*
+ * Check port 7 first
+ */
+ if (lf_subpackets->io->po7_error_sum & IO7__PO7_ERRSUM__ERR_MASK) {
+ marvel_print_po7_err_sum(io);
+
+#if 0
+ printk("%s PORT 7 ERROR:\n"
+ "%s PO7_ERROR_SUM: %016lx\n"
+ "%s PO7_UNCRR_SYM: %016lx\n"
+ "%s PO7_CRRCT_SYM: %016lx\n"
+ "%s PO7_UGBGE_SYM: %016lx\n"
+ "%s PO7_ERR_PKT0: %016lx\n"
+ "%s PO7_ERR_PKT1: %016lx\n",
+ err_print_prefix,
+ err_print_prefix, io->po7_error_sum,
+ err_print_prefix, io->po7_uncrr_sym,
+ err_print_prefix, io->po7_crrct_sym,
+ err_print_prefix, io->po7_ugbge_sym,
+ err_print_prefix, io->po7_err_pkt0,
+ err_print_prefix, io->po7_err_pkt1);
+#endif
+ }
+
+ /*
+ * Then loop through the ports
+ */
+ for (i = 0; i < IO7_NUM_PORTS; i++) {
+ if (!MARVEL_IO_ERR_VALID(io->ports[i].pox_err_sum))
+ continue;
+
+ printk("%s PID %u PORT %d POx_ERR_SUM: %016lx\n",
+ err_print_prefix,
+ lf_subpackets->io_pid, i, io->ports[i].pox_err_sum);
+ marvel_print_pox_err(io->ports[i].pox_err_sum, &io->ports[i]);
+
+ printk("%s [ POx_FIRST_ERR: %016lx ]\n",
+ err_print_prefix, io->ports[i].pox_first_err);
+ marvel_print_pox_err(io->ports[i].pox_first_err,
+ &io->ports[i]);
+
+ }
+
+
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+marvel_process_logout_frame(struct ev7_lf_subpackets *lf_subpackets, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ /*
+ * I/O error?
+ */
+#define EV7__RBOX_INT__IO_ERROR__MASK 0x20000400ul
+ if (lf_subpackets->logout &&
+ (lf_subpackets->logout->rbox_int & 0x20000400ul))
+ status = marvel_process_io_error(lf_subpackets, print);
+
+ /*
+ * Probing behind PCI-X bridges can cause machine checks on
+ * Marvel when the probe is handled by the bridge as a split
+ * completion transaction. The symptom is an ERROR_RESPONSE
+ * to a CONFIG address. Since these errors will happen in
+ * normal operation, dismiss them.
+ *
+ * Dismiss if:
+ * C_STAT = 0x14 (Error Reponse)
+ * C_STS<3> = 0 (C_ADDR valid)
+ * C_ADDR<42> = 1 (I/O)
+ * C_ADDR<31:22> = 111110xxb (PCI Config space)
+ */
+ if (lf_subpackets->ev7 &&
+ (lf_subpackets->ev7->c_stat == 0x14) &&
+ !(lf_subpackets->ev7->c_sts & 0x8) &&
+ ((lf_subpackets->ev7->c_addr & 0x400ff000000ul)
+ == 0x400fe000000ul))
+ status = MCHK_DISPOSITION_DISMISS;
+
+ return status;
+}
+
+void
+marvel_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_subpacket *el_ptr = (struct el_subpacket *)la_ptr;
+ int (*process_frame)(struct ev7_lf_subpackets *, int) = NULL;
+ struct ev7_lf_subpackets subpacket_collection = { NULL, };
+ struct ev7_pal_io_subpacket scratch_io_packet = { 0, };
+ struct ev7_lf_subpackets *lf_subpackets = NULL;
+ int disposition = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ char *saved_err_prefix = err_print_prefix;
+ char *error_type = NULL;
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ switch(vector) {
+ case SCB_Q_SYSEVENT:
+ process_frame = marvel_process_680_frame;
+ error_type = "System Event";
+ break;
+
+ case SCB_Q_SYSMCHK:
+ process_frame = marvel_process_logout_frame;
+ error_type = "System Uncorrectable Error";
+ break;
+
+ case SCB_Q_SYSERR:
+ process_frame = marvel_process_logout_frame;
+ error_type = "System Correctable Error";
+ break;
+
+ default:
+ /* Don't know it - pass it up. */
+ ev7_machine_check(vector, la_ptr, regs);
+ return;
+ }
+
+ /*
+ * A system event or error has occured, handle it here.
+ *
+ * Any errors in the logout frame have already been cleared by the
+ * PALcode, so just parse it.
+ */
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are classified as "dismissable", then just dismiss them and
+ * don't print any message
+ */
+ lf_subpackets =
+ ev7_collect_logout_frame_subpackets(el_ptr,
+ &subpacket_collection);
+ if (process_frame && lf_subpackets && lf_subpackets->logout) {
+ /*
+ * We might not have the correct (or any) I/O subpacket.
+ * [ See marvel_process_io_error() for explanation. ]
+ * If we don't have one, point the io subpacket in
+ * lf_subpackets at scratch_io_packet so that
+ * marvel_find_io7_with_error() will have someplace to
+ * store the info.
+ */
+ if (!lf_subpackets->io)
+ lf_subpackets->io = &scratch_io_packet;
+
+ /*
+ * Default io_pid to the processor reporting the error
+ * [this will get changed in marvel_find_io7_with_error()
+ * if a different one is needed]
+ */
+ lf_subpackets->io_pid = lf_subpackets->logout->whami;
+
+ /*
+ * Evaluate the frames.
+ */
+ disposition = process_frame(lf_subpackets, 0);
+ }
+ switch(disposition) {
+ case MCHK_DISPOSITION_DISMISS:
+ /* Nothing to do. */
+ break;
+
+ case MCHK_DISPOSITION_REPORT:
+ /* Recognized error, report it. */
+ printk("%s*%s (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix, error_type,
+ (unsigned int)vector, (int)smp_processor_id());
+ el_print_timestamp(&lf_subpackets->logout->timestamp);
+ process_frame(lf_subpackets, 1);
+ break;
+
+ default:
+ /* Unknown - dump the annotated subpackets. */
+ printk("%s*%s (Vector 0x%x) reported on CPU %d\n",
+ err_print_prefix, error_type,
+ (unsigned int)vector, (int)smp_processor_id());
+ el_process_subpacket(el_ptr);
+ break;
+
+ }
+
+ err_print_prefix = saved_err_prefix;
+
+ /* Release the logout frame. */
+ wrmces(0x7);
+ mb();
+}
+
+void
+marvel_register_error_handlers(void)
+{
+ ev7_register_error_handlers();
+}
diff --git a/arch/alpha/kernel/err_titan.c b/arch/alpha/kernel/err_titan.c
new file mode 100644
index 000000000000..7e6720d45f02
--- /dev/null
+++ b/arch/alpha/kernel/err_titan.c
@@ -0,0 +1,756 @@
+/*
+ * linux/arch/alpha/kernel/err_titan.c
+ *
+ * Copyright (C) 2000 Jeff Wiedemeier (Compaq Computer Corporation)
+ *
+ * Error handling code supporting TITAN systems
+ */
+
+#include <linux/init.h>
+#include <linux/pci.h>
+#include <linux/sched.h>
+
+#include <asm/io.h>
+#include <asm/core_titan.h>
+#include <asm/hwrpb.h>
+#include <asm/smp.h>
+#include <asm/err_common.h>
+#include <asm/err_ev6.h>
+
+#include "err_impl.h"
+#include "proto.h"
+
+
+static int
+titan_parse_c_misc(u64 c_misc, int print)
+{
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *src;
+ int nxs = 0;
+#endif
+ int status = MCHK_DISPOSITION_REPORT;
+
+#define TITAN__CCHIP_MISC__NXM (1UL << 28)
+#define TITAN__CCHIP_MISC__NXS__S (29)
+#define TITAN__CCHIP_MISC__NXS__M (0x7)
+
+ if (!(c_misc & TITAN__CCHIP_MISC__NXM))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ nxs = EXTRACT(c_misc, TITAN__CCHIP_MISC__NXS);
+ switch(nxs) {
+ case 0: /* CPU 0 */
+ case 1: /* CPU 1 */
+ case 2: /* CPU 2 */
+ case 3: /* CPU 3 */
+ src = "CPU";
+ /* num is already the CPU number */
+ break;
+ case 4: /* Pchip 0 */
+ case 5: /* Pchip 1 */
+ src = "Pchip";
+ nxs -= 4;
+ break;
+ default:/* reserved */
+ src = "Unknown, NXS =";
+ /* leave num untouched */
+ break;
+ }
+
+ printk("%s Non-existent memory access from: %s %d\n",
+ err_print_prefix, src, nxs);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_serror(int which, u64 serror, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *serror_src[] = {"GPCI", "APCI", "AGP HP", "AGP LP"};
+ char *serror_cmd[] = {"DMA Read", "DMA RMW", "SGTE Read", "Reserved"};
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_SERROR__LOST_UECC (1UL << 0)
+#define TITAN__PCHIP_SERROR__UECC (1UL << 1)
+#define TITAN__PCHIP_SERROR__CRE (1UL << 2)
+#define TITAN__PCHIP_SERROR__NXIO (1UL << 3)
+#define TITAN__PCHIP_SERROR__LOST_CRE (1UL << 4)
+#define TITAN__PCHIP_SERROR__ECCMASK (TITAN__PCHIP_SERROR__UECC | \
+ TITAN__PCHIP_SERROR__CRE)
+#define TITAN__PCHIP_SERROR__ERRMASK (TITAN__PCHIP_SERROR__LOST_UECC | \
+ TITAN__PCHIP_SERROR__UECC | \
+ TITAN__PCHIP_SERROR__CRE | \
+ TITAN__PCHIP_SERROR__NXIO | \
+ TITAN__PCHIP_SERROR__LOST_CRE)
+#define TITAN__PCHIP_SERROR__SRC__S (52)
+#define TITAN__PCHIP_SERROR__SRC__M (0x3)
+#define TITAN__PCHIP_SERROR__CMD__S (54)
+#define TITAN__PCHIP_SERROR__CMD__M (0x3)
+#define TITAN__PCHIP_SERROR__SYN__S (56)
+#define TITAN__PCHIP_SERROR__SYN__M (0xff)
+#define TITAN__PCHIP_SERROR__ADDR__S (15)
+#define TITAN__PCHIP_SERROR__ADDR__M (0xffffffffUL)
+
+ if (!(serror & TITAN__PCHIP_SERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ printk("%s PChip %d SERROR: %016lx\n",
+ err_print_prefix, which, serror);
+ if (serror & TITAN__PCHIP_SERROR__ECCMASK) {
+ printk("%s %sorrectable ECC Error:\n"
+ " Source: %-6s Command: %-8s Syndrome: 0x%08x\n"
+ " Address: 0x%lx\n",
+ err_print_prefix,
+ (serror & TITAN__PCHIP_SERROR__UECC) ? "Unc" : "C",
+ serror_src[EXTRACT(serror, TITAN__PCHIP_SERROR__SRC)],
+ serror_cmd[EXTRACT(serror, TITAN__PCHIP_SERROR__CMD)],
+ (unsigned)EXTRACT(serror, TITAN__PCHIP_SERROR__SYN),
+ EXTRACT(serror, TITAN__PCHIP_SERROR__ADDR));
+ }
+ if (serror & TITAN__PCHIP_SERROR__NXIO)
+ printk("%s Non Existent I/O Error\n", err_print_prefix);
+ if (serror & TITAN__PCHIP_SERROR__LOST_UECC)
+ printk("%s Lost Uncorrectable ECC Error\n",
+ err_print_prefix);
+ if (serror & TITAN__PCHIP_SERROR__LOST_CRE)
+ printk("%s Lost Correctable ECC Error\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_perror(int which, int port, u64 perror, int print)
+{
+ int cmd;
+ unsigned long addr;
+ int status = MCHK_DISPOSITION_REPORT;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ char *perror_cmd[] = { "Interrupt Acknowledge", "Special Cycle",
+ "I/O Read", "I/O Write",
+ "Reserved", "Reserved",
+ "Memory Read", "Memory Write",
+ "Reserved", "Reserved",
+ "Configuration Read", "Configuration Write",
+ "Memory Read Multiple", "Dual Address Cycle",
+ "Memory Read Line","Memory Write and Invalidate"
+ };
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_PERROR__LOST (1UL << 0)
+#define TITAN__PCHIP_PERROR__SERR (1UL << 1)
+#define TITAN__PCHIP_PERROR__PERR (1UL << 2)
+#define TITAN__PCHIP_PERROR__DCRTO (1UL << 3)
+#define TITAN__PCHIP_PERROR__SGE (1UL << 4)
+#define TITAN__PCHIP_PERROR__APE (1UL << 5)
+#define TITAN__PCHIP_PERROR__TA (1UL << 6)
+#define TITAN__PCHIP_PERROR__DPE (1UL << 7)
+#define TITAN__PCHIP_PERROR__NDS (1UL << 8)
+#define TITAN__PCHIP_PERROR__IPTPR (1UL << 9)
+#define TITAN__PCHIP_PERROR__IPTPW (1UL << 10)
+#define TITAN__PCHIP_PERROR__ERRMASK (TITAN__PCHIP_PERROR__LOST | \
+ TITAN__PCHIP_PERROR__SERR | \
+ TITAN__PCHIP_PERROR__PERR | \
+ TITAN__PCHIP_PERROR__DCRTO | \
+ TITAN__PCHIP_PERROR__SGE | \
+ TITAN__PCHIP_PERROR__APE | \
+ TITAN__PCHIP_PERROR__TA | \
+ TITAN__PCHIP_PERROR__DPE | \
+ TITAN__PCHIP_PERROR__NDS | \
+ TITAN__PCHIP_PERROR__IPTPR | \
+ TITAN__PCHIP_PERROR__IPTPW)
+#define TITAN__PCHIP_PERROR__DAC (1UL << 47)
+#define TITAN__PCHIP_PERROR__MWIN (1UL << 48)
+#define TITAN__PCHIP_PERROR__CMD__S (52)
+#define TITAN__PCHIP_PERROR__CMD__M (0x0f)
+#define TITAN__PCHIP_PERROR__ADDR__S (14)
+#define TITAN__PCHIP_PERROR__ADDR__M (0x1fffffffful)
+
+ if (!(perror & TITAN__PCHIP_PERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ cmd = EXTRACT(perror, TITAN__PCHIP_PERROR__CMD);
+ addr = EXTRACT(perror, TITAN__PCHIP_PERROR__ADDR) << 2;
+
+ /*
+ * Initializing the BIOS on a video card on a bus without
+ * a south bridge (subtractive decode agent) can result in
+ * master aborts as the BIOS probes the capabilities of the
+ * card. XFree86 does such initialization. If the error
+ * is a master abort (No DevSel as PCI Master) and the command
+ * is an I/O read or write below the address where we start
+ * assigning PCI I/O spaces (SRM uses 0x1000), then mark the
+ * error as dismissable so starting XFree86 doesn't result
+ * in a series of uncorrectable errors being reported. Also
+ * dismiss master aborts to VGA frame buffer space
+ * (0xA0000 - 0xC0000) and legacy BIOS space (0xC0000 - 0x100000)
+ * for the same reason.
+ *
+ * Also mark the error dismissible if it looks like the right
+ * error but only the Lost bit is set. Since the BIOS initialization
+ * can cause multiple master aborts and the error interrupt can
+ * be handled on a different CPU than the BIOS code is run on,
+ * it is possible for a second master abort to occur between the
+ * time the PALcode reads PERROR and the time it writes PERROR
+ * to acknowledge the error. If this timing happens, a second
+ * error will be signalled after the first, and if no additional
+ * errors occur, will look like a Lost error with no additional
+ * errors on the same transaction as the previous error.
+ */
+ if (((perror & TITAN__PCHIP_PERROR__NDS) ||
+ ((perror & TITAN__PCHIP_PERROR__ERRMASK) ==
+ TITAN__PCHIP_PERROR__LOST)) &&
+ ((((cmd & 0xE) == 2) && (addr < 0x1000)) ||
+ (((cmd & 0xE) == 6) && (addr >= 0xA0000) && (addr < 0x100000)))) {
+ status = MCHK_DISPOSITION_DISMISS;
+ }
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ printk("%s PChip %d %cPERROR: %016lx\n",
+ err_print_prefix, which,
+ port ? 'A' : 'G', perror);
+ if (perror & TITAN__PCHIP_PERROR__IPTPW)
+ printk("%s Invalid Peer-to-Peer Write\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__IPTPR)
+ printk("%s Invalid Peer-to-Peer Read\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__NDS)
+ printk("%s No DEVSEL as PCI Master [Master Abort]\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__DPE)
+ printk("%s Data Parity Error\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__TA)
+ printk("%s Target Abort\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__APE)
+ printk("%s Address Parity Error\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__SGE)
+ printk("%s Scatter-Gather Error, Invalid PTE\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__DCRTO)
+ printk("%s Delayed-Completion Retry Timeout\n",
+ err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__PERR)
+ printk("%s PERR Asserted\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__SERR)
+ printk("%s SERR Asserted\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__LOST)
+ printk("%s Lost Error\n", err_print_prefix);
+ printk("%s Command: 0x%x - %s\n"
+ " Address: 0x%lx\n",
+ err_print_prefix,
+ cmd, perror_cmd[cmd],
+ addr);
+ if (perror & TITAN__PCHIP_PERROR__DAC)
+ printk("%s Dual Address Cycle\n", err_print_prefix);
+ if (perror & TITAN__PCHIP_PERROR__MWIN)
+ printk("%s Hit in Monster Window\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_agperror(int which, u64 agperror, int print)
+{
+ int status = MCHK_DISPOSITION_REPORT;
+#ifdef CONFIG_VERBOSE_MCHECK
+ int cmd, len;
+ unsigned long addr;
+
+ char *agperror_cmd[] = { "Read (low-priority)", "Read (high-priority)",
+ "Write (low-priority)",
+ "Write (high-priority)",
+ "Reserved", "Reserved",
+ "Flush", "Fence"
+ };
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+#define TITAN__PCHIP_AGPERROR__LOST (1UL << 0)
+#define TITAN__PCHIP_AGPERROR__LPQFULL (1UL << 1)
+#define TITAN__PCHIP_AGPERROR__HPQFULL (1UL << 2)
+#define TITAN__PCHIP_AGPERROR__RESCMD (1UL << 3)
+#define TITAN__PCHIP_AGPERROR__IPTE (1UL << 4)
+#define TITAN__PCHIP_AGPERROR__PTP (1UL << 5)
+#define TITAN__PCHIP_AGPERROR__NOWINDOW (1UL << 6)
+#define TITAN__PCHIP_AGPERROR__ERRMASK (TITAN__PCHIP_AGPERROR__LOST | \
+ TITAN__PCHIP_AGPERROR__LPQFULL | \
+ TITAN__PCHIP_AGPERROR__HPQFULL | \
+ TITAN__PCHIP_AGPERROR__RESCMD | \
+ TITAN__PCHIP_AGPERROR__IPTE | \
+ TITAN__PCHIP_AGPERROR__PTP | \
+ TITAN__PCHIP_AGPERROR__NOWINDOW)
+#define TITAN__PCHIP_AGPERROR__DAC (1UL << 48)
+#define TITAN__PCHIP_AGPERROR__MWIN (1UL << 49)
+#define TITAN__PCHIP_AGPERROR__FENCE (1UL << 59)
+#define TITAN__PCHIP_AGPERROR__CMD__S (50)
+#define TITAN__PCHIP_AGPERROR__CMD__M (0x07)
+#define TITAN__PCHIP_AGPERROR__ADDR__S (15)
+#define TITAN__PCHIP_AGPERROR__ADDR__M (0xffffffffUL)
+#define TITAN__PCHIP_AGPERROR__LEN__S (53)
+#define TITAN__PCHIP_AGPERROR__LEN__M (0x3f)
+
+ if (!(agperror & TITAN__PCHIP_AGPERROR__ERRMASK))
+ return MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (!print)
+ return status;
+
+ cmd = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__CMD);
+ addr = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__ADDR) << 3;
+ len = EXTRACT(agperror, TITAN__PCHIP_AGPERROR__LEN);
+
+ printk("%s PChip %d AGPERROR: %016lx\n", err_print_prefix,
+ which, agperror);
+ if (agperror & TITAN__PCHIP_AGPERROR__NOWINDOW)
+ printk("%s No Window\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__PTP)
+ printk("%s Peer-to-Peer set\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__IPTE)
+ printk("%s Invalid PTE\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__RESCMD)
+ printk("%s Reserved Command\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__HPQFULL)
+ printk("%s HP Transaction Received while Queue Full\n",
+ err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__LPQFULL)
+ printk("%s LP Transaction Received while Queue Full\n",
+ err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__LOST)
+ printk("%s Lost Error\n", err_print_prefix);
+ printk("%s Command: 0x%x - %s, %d Quadwords%s\n"
+ " Address: 0x%lx\n",
+ err_print_prefix, cmd, agperror_cmd[cmd], len,
+ (agperror & TITAN__PCHIP_AGPERROR__FENCE) ? ", FENCE" : "",
+ addr);
+ if (agperror & TITAN__PCHIP_AGPERROR__DAC)
+ printk("%s Dual Address Cycle\n", err_print_prefix);
+ if (agperror & TITAN__PCHIP_AGPERROR__MWIN)
+ printk("%s Hit in Monster Window\n", err_print_prefix);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+static int
+titan_parse_p_chip(int which, u64 serror, u64 gperror,
+ u64 aperror, u64 agperror, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+ status |= titan_parse_p_serror(which, serror, print);
+ status |= titan_parse_p_perror(which, 0, gperror, print);
+ status |= titan_parse_p_perror(which, 1, aperror, print);
+ status |= titan_parse_p_agperror(which, agperror, print);
+ return status;
+}
+
+int
+titan_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ status |= titan_parse_c_misc(tmchk->c_misc, print);
+ status |= titan_parse_p_chip(0, tmchk->p0_serror, tmchk->p0_gperror,
+ tmchk->p0_aperror, tmchk->p0_agperror,
+ print);
+ status |= titan_parse_p_chip(1, tmchk->p1_serror, tmchk->p1_gperror,
+ tmchk->p1_aperror, tmchk->p1_agperror,
+ print);
+
+ return status;
+}
+
+void
+titan_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+ u64 irqmask;
+
+ /*
+ * Mask of Titan interrupt sources which are reported as machine checks
+ *
+ * 63 - CChip Error
+ * 62 - PChip 0 H_Error
+ * 61 - PChip 1 H_Error
+ * 60 - PChip 0 C_Error
+ * 59 - PChip 1 C_Error
+ */
+#define TITAN_MCHECK_INTERRUPT_MASK 0xF800000000000000UL
+
+ /*
+ * Sync the processor
+ */
+ mb();
+ draina();
+
+ /*
+ * Only handle system errors here
+ */
+ if ((vector != SCB_Q_SYSMCHK) && (vector != SCB_Q_SYSERR)) {
+ ev6_machine_check(vector, la_ptr, regs);
+ return;
+ }
+
+ /*
+ * It's a system error, handle it here
+ *
+ * The PALcode has already cleared the error, so just parse it
+ */
+
+ /*
+ * Parse the logout frame without printing first. If the only error(s)
+ * found are classified as "dismissable", then just dismiss them and
+ * don't print any message
+ */
+ if (titan_process_logout_frame(mchk_header, 0) !=
+ MCHK_DISPOSITION_DISMISS) {
+ char *saved_err_prefix = err_print_prefix;
+ err_print_prefix = KERN_CRIT;
+
+ /*
+ * Either a nondismissable error was detected or no
+ * recognized error was detected in the logout frame
+ * -- report the error in either case
+ */
+ printk("%s"
+ "*System %s Error (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (vector == SCB_Q_SYSERR)?"Correctable":"Uncorrectable",
+ (unsigned int)vector, (int)smp_processor_id());
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ titan_process_logout_frame(mchk_header, alpha_verbose_mcheck);
+ if (alpha_verbose_mcheck)
+ dik_show_regs(regs, NULL);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Convert any pending interrupts which report as system
+ * machine checks to interrupts
+ */
+ irqmask = tmchk->c_dirx & TITAN_MCHECK_INTERRUPT_MASK;
+ titan_dispatch_irqs(irqmask, regs);
+ }
+
+
+ /*
+ * Release the logout frame
+ */
+ wrmces(0x7);
+ mb();
+}
+
+/*
+ * Subpacket Annotations
+ */
+static char *el_titan_pchip0_extended_annotation[] = {
+ "Subpacket Header", "P0_SCTL", "P0_SERREN",
+ "P0_APCTL", "P0_APERREN", "P0_AGPERREN",
+ "P0_ASPRST", "P0_AWSBA0", "P0_AWSBA1",
+ "P0_AWSBA2", "P0_AWSBA3", "P0_AWSM0",
+ "P0_AWSM1", "P0_AWSM2", "P0_AWSM3",
+ "P0_ATBA0", "P0_ATBA1", "P0_ATBA2",
+ "P0_ATBA3", "P0_GPCTL", "P0_GPERREN",
+ "P0_GSPRST", "P0_GWSBA0", "P0_GWSBA1",
+ "P0_GWSBA2", "P0_GWSBA3", "P0_GWSM0",
+ "P0_GWSM1", "P0_GWSM2", "P0_GWSM3",
+ "P0_GTBA0", "P0_GTBA1", "P0_GTBA2",
+ "P0_GTBA3", NULL
+};
+static char *el_titan_pchip1_extended_annotation[] = {
+ "Subpacket Header", "P1_SCTL", "P1_SERREN",
+ "P1_APCTL", "P1_APERREN", "P1_AGPERREN",
+ "P1_ASPRST", "P1_AWSBA0", "P1_AWSBA1",
+ "P1_AWSBA2", "P1_AWSBA3", "P1_AWSM0",
+ "P1_AWSM1", "P1_AWSM2", "P1_AWSM3",
+ "P1_ATBA0", "P1_ATBA1", "P1_ATBA2",
+ "P1_ATBA3", "P1_GPCTL", "P1_GPERREN",
+ "P1_GSPRST", "P1_GWSBA0", "P1_GWSBA1",
+ "P1_GWSBA2", "P1_GWSBA3", "P1_GWSM0",
+ "P1_GWSM1", "P1_GWSM2", "P1_GWSM3",
+ "P1_GTBA0", "P1_GTBA1", "P1_GTBA2",
+ "P1_GTBA3", NULL
+};
+static char *el_titan_memory_extended_annotation[] = {
+ "Subpacket Header", "AAR0", "AAR1",
+ "AAR2", "AAR3", "P0_SCTL",
+ "P0_GPCTL", "P0_APCTL", "P1_SCTL",
+ "P1_GPCTL", "P1_SCTL", NULL
+};
+
+static struct el_subpacket_annotation el_titan_annotations[] = {
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_PCHIP0_EXTENDED,
+ 1,
+ "Titan PChip 0 Extended Frame",
+ el_titan_pchip0_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_PCHIP1_EXTENDED,
+ 1,
+ "Titan PChip 1 Extended Frame",
+ el_titan_pchip1_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__REGATTA__TITAN_MEMORY_EXTENDED,
+ 1,
+ "Titan Memory Extended Frame",
+ el_titan_memory_extended_annotation),
+ SUBPACKET_ANNOTATION(EL_CLASS__REGATTA_FAMILY,
+ EL_TYPE__TERMINATION__TERMINATION,
+ 1,
+ "Termination Subpacket",
+ NULL)
+};
+
+static struct el_subpacket *
+el_process_regatta_subpacket(struct el_subpacket *header)
+{
+ int status;
+
+ if (header->class != EL_CLASS__REGATTA_FAMILY) {
+ printk("%s ** Unexpected header CLASS %d TYPE %d, aborting\n",
+ err_print_prefix,
+ header->class, header->type);
+ return NULL;
+ }
+
+ switch(header->type) {
+ case EL_TYPE__REGATTA__PROCESSOR_ERROR_FRAME:
+ case EL_TYPE__REGATTA__SYSTEM_ERROR_FRAME:
+ case EL_TYPE__REGATTA__ENVIRONMENTAL_FRAME:
+ case EL_TYPE__REGATTA__PROCESSOR_DBL_ERROR_HALT:
+ case EL_TYPE__REGATTA__SYSTEM_DBL_ERROR_HALT:
+ printk("%s ** Occurred on CPU %d:\n",
+ err_print_prefix,
+ (int)header->by_type.regatta_frame.cpuid);
+ status = privateer_process_logout_frame((struct el_common *)
+ header->by_type.regatta_frame.data_start, 1);
+ break;
+ default:
+ printk("%s ** REGATTA TYPE %d SUBPACKET\n",
+ err_print_prefix, header->type);
+ el_annotate_subpacket(header);
+ break;
+ }
+
+
+ return (struct el_subpacket *)((unsigned long)header + header->length);
+}
+
+static struct el_subpacket_handler titan_subpacket_handler =
+ SUBPACKET_HANDLER_INIT(EL_CLASS__REGATTA_FAMILY,
+ el_process_regatta_subpacket);
+
+void
+titan_register_error_handlers(void)
+{
+ size_t i;
+
+ for (i = 0; i < ARRAY_SIZE (el_titan_annotations); i++)
+ cdl_register_subpacket_annotation(&el_titan_annotations[i]);
+
+ cdl_register_subpacket_handler(&titan_subpacket_handler);
+
+ ev6_register_error_handlers();
+}
+
+
+/*
+ * Privateer
+ */
+
+static int
+privateer_process_680_frame(struct el_common *mchk_header, int print)
+{
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+#ifdef CONFIG_VERBOSE_MCHECK
+ struct el_PRIVATEER_envdata_mcheck *emchk =
+ (struct el_PRIVATEER_envdata_mcheck *)
+ ((unsigned long)mchk_header + mchk_header->sys_offset);
+
+ /* TODO - catagorize errors, for now, no error */
+
+ if (!print)
+ return status;
+
+ /* TODO - decode instead of just dumping... */
+ printk("%s Summary Flags: %016lx\n"
+ " CChip DIRx: %016lx\n"
+ " System Management IR: %016lx\n"
+ " CPU IR: %016lx\n"
+ " Power Supply IR: %016lx\n"
+ " LM78 Fault Status: %016lx\n"
+ " System Doors: %016lx\n"
+ " Temperature Warning: %016lx\n"
+ " Fan Control: %016lx\n"
+ " Fatal Power Down Code: %016lx\n",
+ err_print_prefix,
+ emchk->summary,
+ emchk->c_dirx,
+ emchk->smir,
+ emchk->cpuir,
+ emchk->psir,
+ emchk->fault,
+ emchk->sys_doors,
+ emchk->temp_warn,
+ emchk->fan_ctrl,
+ emchk->code);
+#endif /* CONFIG_VERBOSE_MCHECK */
+
+ return status;
+}
+
+int
+privateer_process_logout_frame(struct el_common *mchk_header, int print)
+{
+ struct el_common_EV6_mcheck *ev6mchk =
+ (struct el_common_EV6_mcheck *)mchk_header;
+ int status = MCHK_DISPOSITION_UNKNOWN_ERROR;
+
+ /*
+ * Machine check codes
+ */
+#define PRIVATEER_MCHK__CORR_ECC 0x86 /* 630 */
+#define PRIVATEER_MCHK__DC_TAG_PERR 0x9E /* 630 */
+#define PRIVATEER_MCHK__PAL_BUGCHECK 0x8E /* 670 */
+#define PRIVATEER_MCHK__OS_BUGCHECK 0x90 /* 670 */
+#define PRIVATEER_MCHK__PROC_HRD_ERR 0x98 /* 670 */
+#define PRIVATEER_MCHK__ISTREAM_CMOV_PRX 0xA0 /* 670 */
+#define PRIVATEER_MCHK__ISTREAM_CMOV_FLT 0xA2 /* 670 */
+#define PRIVATEER_MCHK__SYS_HRD_ERR 0x202 /* 660 */
+#define PRIVATEER_MCHK__SYS_CORR_ERR 0x204 /* 620 */
+#define PRIVATEER_MCHK__SYS_ENVIRON 0x206 /* 680 */
+
+ switch(ev6mchk->MCHK_Code) {
+ /*
+ * Vector 630 - Processor, Correctable
+ */
+ case PRIVATEER_MCHK__CORR_ECC:
+ case PRIVATEER_MCHK__DC_TAG_PERR:
+ /*
+ * Fall through to vector 670 for processing...
+ */
+ /*
+ * Vector 670 - Processor, Uncorrectable
+ */
+ case PRIVATEER_MCHK__PAL_BUGCHECK:
+ case PRIVATEER_MCHK__OS_BUGCHECK:
+ case PRIVATEER_MCHK__PROC_HRD_ERR:
+ case PRIVATEER_MCHK__ISTREAM_CMOV_PRX:
+ case PRIVATEER_MCHK__ISTREAM_CMOV_FLT:
+ status |= ev6_process_logout_frame(mchk_header, print);
+ break;
+
+ /*
+ * Vector 620 - System, Correctable
+ */
+ case PRIVATEER_MCHK__SYS_CORR_ERR:
+ /*
+ * Fall through to vector 660 for processing...
+ */
+ /*
+ * Vector 660 - System, Uncorrectable
+ */
+ case PRIVATEER_MCHK__SYS_HRD_ERR:
+ status |= titan_process_logout_frame(mchk_header, print);
+ break;
+
+ /*
+ * Vector 680 - System, Environmental
+ */
+ case PRIVATEER_MCHK__SYS_ENVIRON: /* System, Environmental */
+ status |= privateer_process_680_frame(mchk_header, print);
+ break;
+
+ /*
+ * Unknown
+ */
+ default:
+ status |= MCHK_DISPOSITION_REPORT;
+ if (print) {
+ printk("%s** Unknown Error, frame follows\n",
+ err_print_prefix);
+ mchk_dump_logout_frame(mchk_header);
+ }
+
+ }
+
+ return status;
+}
+
+void
+privateer_machine_check(u64 vector, u64 la_ptr, struct pt_regs *regs)
+{
+ struct el_common *mchk_header = (struct el_common *)la_ptr;
+ struct el_TITAN_sysdata_mcheck *tmchk =
+ (struct el_TITAN_sysdata_mcheck *)
+ (la_ptr + mchk_header->sys_offset);
+ u64 irqmask;
+ char *saved_err_prefix = err_print_prefix;
+
+#define PRIVATEER_680_INTERRUPT_MASK (0xE00UL)
+#define PRIVATEER_HOTPLUG_INTERRUPT_MASK (0xE00UL)
+
+ /*
+ * Sync the processor.
+ */
+ mb();
+ draina();
+
+ /*
+ * Only handle system events here.
+ */
+ if (vector != SCB_Q_SYSEVENT)
+ return titan_machine_check(vector, la_ptr, regs);
+
+ /*
+ * Report the event - System Events should be reported even if no
+ * error is indicated since the event could indicate the return
+ * to normal status.
+ */
+ err_print_prefix = KERN_CRIT;
+ printk("%s*System Event (Vector 0x%x) reported on CPU %d:\n",
+ err_print_prefix,
+ (unsigned int)vector, (int)smp_processor_id());
+ privateer_process_680_frame(mchk_header, 1);
+ err_print_prefix = saved_err_prefix;
+
+ /*
+ * Convert any pending interrupts which report as 680 machine
+ * checks to interrupts.
+ */
+ irqmask = tmchk->c_dirx & PRIVATEER_680_INTERRUPT_MASK;
+
+ /*
+ * Dispatch the interrupt(s).
+ */
+ titan_dispatch_irqs(irqmask, regs);
+
+ /*
+ * Release the logout frame.
+ */
+ wrmces(0x7);
+ mb();
+}
diff --git a/arch/alpha/kernel/es1888.c b/arch/alpha/kernel/es1888.c
new file mode 100644
index 000000000000..d584c85fea7a
--- /dev/null
+++ b/arch/alpha/kernel/es1888.c
@@ -0,0 +1,49 @@
+/*
+ * linux/arch/alpha/kernel/es1888.c
+ *
+ * Init the built-in ES1888 sound chip (SB16 compatible)
+ */
+
+#include <linux/init.h>
+#include <asm/io.h>
+#include "proto.h"
+
+void __init
+es1888_init(void)
+{
+ /* Sequence of IO reads to init the audio controller */
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x0229);
+ inb(0x022b);
+ inb(0x0229);
+ inb(0x0220); /* This sets the base address to 0x220 */
+
+ /* Sequence to set DMA channels */
+ outb(0x01, 0x0226); /* reset */
+ inb(0x0226); /* pause */
+ outb(0x00, 0x0226); /* release reset */
+ while (!(inb(0x022e) & 0x80)) /* wait for bit 7 to assert*/
+ continue;
+ inb(0x022a); /* pause */
+ outb(0xc6, 0x022c); /* enable extended mode */
+ inb(0x022a); /* pause, also forces the write */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0xb1, 0x022c); /* setup for write to Interrupt CR */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0x14, 0x022c); /* set IRQ 5 */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0xb2, 0x022c); /* setup for write to DMA CR */
+ while (inb(0x022c) & 0x80) /* wait for bit 7 to deassert */
+ continue;
+ outb(0x18, 0x022c); /* set DMA channel 1 */
+ inb(0x022c); /* force the write */
+}
diff --git a/arch/alpha/kernel/gct.c b/arch/alpha/kernel/gct.c
new file mode 100644
index 000000000000..8827687b9f89
--- /dev/null
+++ b/arch/alpha/kernel/gct.c
@@ -0,0 +1,48 @@
+/*
+ * linux/arch/alpha/kernel/gct.c
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/errno.h>
+
+#include <asm/hwrpb.h>
+#include <asm/gct.h>
+
+int
+gct6_find_nodes(gct6_node *node, gct6_search_struct *search)
+{
+ gct6_search_struct *wanted;
+ int status = 0;
+
+ /* First check the magic number. */
+ if (node->magic != GCT_NODE_MAGIC) {
+ printk(KERN_ERR "GCT Node MAGIC incorrect - GCT invalid\n");
+ return -EINVAL;
+ }
+
+ /* Check against the search struct. */
+ for (wanted = search;
+ wanted && (wanted->type | wanted->subtype);
+ wanted++) {
+ if (node->type != wanted->type)
+ continue;
+ if (node->subtype != wanted->subtype)
+ continue;
+
+ /* Found it -- call out. */
+ if (wanted->callout)
+ wanted->callout(node);
+ }
+
+ /* Now walk the tree, siblings first. */
+ if (node->next)
+ status |= gct6_find_nodes(GCT_NODE_PTR(node->next), search);
+
+ /* Then the children. */
+ if (node->child)
+ status |= gct6_find_nodes(GCT_NODE_PTR(node->child), search);
+
+ return status;
+}
diff --git a/arch/alpha/kernel/head.S b/arch/alpha/kernel/head.S
new file mode 100644
index 000000000000..4ca2e404708a
--- /dev/null
+++ b/arch/alpha/kernel/head.S
@@ -0,0 +1,99 @@
+/*
+ * alpha/boot/head.S
+ *
+ * initial boot stuff.. At this point, the bootloader has already
+ * switched into OSF/1 PAL-code, and loaded us at the correct address
+ * (START_ADDR). So there isn't much left for us to do: just set up
+ * the kernel global pointer and jump to the kernel entry-point.
+ */
+
+#include <linux/config.h>
+#include <asm/system.h>
+#include <asm/asm_offsets.h>
+
+.globl swapper_pg_dir
+.globl _stext
+swapper_pg_dir=SWAPPER_PGD
+
+ .set noreorder
+ .globl __start
+ .ent __start
+_stext:
+__start:
+ .prologue 0
+ br $27,1f
+1: ldgp $29,0($27)
+ /* We need to get current_task_info loaded up... */
+ lda $8,init_thread_union
+ /* ... and find our stack ... */
+ lda $30,0x4000 - SIZEOF_PT_REGS($8)
+ /* ... and then we can start the kernel. */
+ jsr $26,start_kernel
+ call_pal PAL_halt
+ .end __start
+
+#ifdef CONFIG_SMP
+ .align 3
+ .globl __smp_callin
+ .ent __smp_callin
+ /* On entry here from SRM console, the HWPCB of the per-cpu
+ slot for this processor has been loaded. We've arranged
+ for the UNIQUE value for this process to contain the PCBB
+ of the target idle task. */
+__smp_callin:
+ .prologue 1
+ ldgp $29,0($27) # First order of business, load the GP.
+
+ call_pal PAL_rduniq # Grab the target PCBB.
+ mov $0,$16 # Install it.
+ call_pal PAL_swpctx
+
+ lda $8,0x3fff # Find "current".
+ bic $30,$8,$8
+
+ jsr $26,smp_callin
+ call_pal PAL_halt
+ .end __smp_callin
+#endif /* CONFIG_SMP */
+
+ #
+ # The following two functions are needed for supporting SRM PALcode
+ # on the PC164 (at least), since that PALcode manages the interrupt
+ # masking, and we cannot duplicate the effort without causing problems
+ #
+
+ .align 3
+ .globl cserve_ena
+ .ent cserve_ena
+cserve_ena:
+ .prologue 0
+ bis $16,$16,$17
+ lda $16,52($31)
+ call_pal PAL_cserve
+ ret ($26)
+ .end cserve_ena
+
+ .align 3
+ .globl cserve_dis
+ .ent cserve_dis
+cserve_dis:
+ .prologue 0
+ bis $16,$16,$17
+ lda $16,53($31)
+ call_pal PAL_cserve
+ ret ($26)
+ .end cserve_dis
+
+ #
+ # It is handy, on occasion, to make halt actually just loop.
+ # Putting it here means we dont have to recompile the whole
+ # kernel.
+ #
+
+ .align 3
+ .globl halt
+ .ent halt
+halt:
+ .prologue 0
+ call_pal PAL_halt
+ .end halt
diff --git a/arch/alpha/kernel/init_task.c b/arch/alpha/kernel/init_task.c
new file mode 100644
index 000000000000..835d09a7b332
--- /dev/null
+++ b/arch/alpha/kernel/init_task.c
@@ -0,0 +1,23 @@
+#include <linux/mm.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+#include <linux/init_task.h>
+#include <linux/fs.h>
+#include <linux/mqueue.h>
+#include <asm/uaccess.h>
+
+
+static struct fs_struct init_fs = INIT_FS;
+static struct files_struct init_files = INIT_FILES;
+static struct signal_struct init_signals = INIT_SIGNALS(init_signals);
+static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand);
+struct mm_struct init_mm = INIT_MM(init_mm);
+struct task_struct init_task = INIT_TASK(init_task);
+
+EXPORT_SYMBOL(init_mm);
+EXPORT_SYMBOL(init_task);
+
+union thread_union init_thread_union
+ __attribute__((section(".data.init_thread")))
+ = { INIT_THREAD_INFO(init_task) };
diff --git a/arch/alpha/kernel/io.c b/arch/alpha/kernel/io.c
new file mode 100644
index 000000000000..19c5875ab398
--- /dev/null
+++ b/arch/alpha/kernel/io.c
@@ -0,0 +1,630 @@
+/*
+ * Alpha IO and memory functions.
+ */
+
+#include <linux/kernel.h>
+#include <linux/types.h>
+#include <linux/string.h>
+#include <linux/module.h>
+#include <asm/io.h>
+
+/* Out-of-line versions of the i/o routines that redirect into the
+ platform-specific version. Note that "platform-specific" may mean
+ "generic", which bumps through the machine vector. */
+
+unsigned int
+ioread8(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread8)(addr);
+ mb();
+ return ret;
+}
+
+unsigned int ioread16(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread16)(addr);
+ mb();
+ return ret;
+}
+
+unsigned int ioread32(void __iomem *addr)
+{
+ unsigned int ret = IO_CONCAT(__IO_PREFIX,ioread32)(addr);
+ mb();
+ return ret;
+}
+
+void iowrite8(u8 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite8)(b, addr);
+ mb();
+}
+
+void iowrite16(u16 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite16)(b, addr);
+ mb();
+}
+
+void iowrite32(u32 b, void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,iowrite32)(b, addr);
+ mb();
+}
+
+EXPORT_SYMBOL(ioread8);
+EXPORT_SYMBOL(ioread16);
+EXPORT_SYMBOL(ioread32);
+EXPORT_SYMBOL(iowrite8);
+EXPORT_SYMBOL(iowrite16);
+EXPORT_SYMBOL(iowrite32);
+
+u8 inb(unsigned long port)
+{
+ return ioread8(ioport_map(port, 1));
+}
+
+u16 inw(unsigned long port)
+{
+ return ioread16(ioport_map(port, 2));
+}
+
+u32 inl(unsigned long port)
+{
+ return ioread32(ioport_map(port, 4));
+}
+
+void outb(u8 b, unsigned long port)
+{
+ iowrite8(b, ioport_map(port, 1));
+}
+
+void outw(u16 b, unsigned long port)
+{
+ iowrite16(b, ioport_map(port, 2));
+}
+
+void outl(u32 b, unsigned long port)
+{
+ iowrite32(b, ioport_map(port, 4));
+}
+
+EXPORT_SYMBOL(inb);
+EXPORT_SYMBOL(inw);
+EXPORT_SYMBOL(inl);
+EXPORT_SYMBOL(outb);
+EXPORT_SYMBOL(outw);
+EXPORT_SYMBOL(outl);
+
+u8 __raw_readb(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readb)(addr);
+}
+
+u16 __raw_readw(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readw)(addr);
+}
+
+u32 __raw_readl(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readl)(addr);
+}
+
+u64 __raw_readq(const volatile void __iomem *addr)
+{
+ return IO_CONCAT(__IO_PREFIX,readq)(addr);
+}
+
+void __raw_writeb(u8 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writeb)(b, addr);
+}
+
+void __raw_writew(u16 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writew)(b, addr);
+}
+
+void __raw_writel(u32 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writel)(b, addr);
+}
+
+void __raw_writeq(u64 b, volatile void __iomem *addr)
+{
+ IO_CONCAT(__IO_PREFIX,writeq)(b, addr);
+}
+
+EXPORT_SYMBOL(__raw_readb);
+EXPORT_SYMBOL(__raw_readw);
+EXPORT_SYMBOL(__raw_readl);
+EXPORT_SYMBOL(__raw_readq);
+EXPORT_SYMBOL(__raw_writeb);
+EXPORT_SYMBOL(__raw_writew);
+EXPORT_SYMBOL(__raw_writel);
+EXPORT_SYMBOL(__raw_writeq);
+
+u8 readb(const volatile void __iomem *addr)
+{
+ u8 ret = __raw_readb(addr);
+ mb();
+ return ret;
+}
+
+u16 readw(const volatile void __iomem *addr)
+{
+ u16 ret = __raw_readw(addr);
+ mb();
+ return ret;
+}
+
+u32 readl(const volatile void __iomem *addr)
+{
+ u32 ret = __raw_readl(addr);
+ mb();
+ return ret;
+}
+
+u64 readq(const volatile void __iomem *addr)
+{
+ u64 ret = __raw_readq(addr);
+ mb();
+ return ret;
+}
+
+void writeb(u8 b, volatile void __iomem *addr)
+{
+ __raw_writeb(b, addr);
+ mb();
+}
+
+void writew(u16 b, volatile void __iomem *addr)
+{
+ __raw_writew(b, addr);
+ mb();
+}
+
+void writel(u32 b, volatile void __iomem *addr)
+{
+ __raw_writel(b, addr);
+ mb();
+}
+
+void writeq(u64 b, volatile void __iomem *addr)
+{
+ __raw_writeq(b, addr);
+ mb();
+}
+
+EXPORT_SYMBOL(readb);
+EXPORT_SYMBOL(readw);
+EXPORT_SYMBOL(readl);
+EXPORT_SYMBOL(readq);
+EXPORT_SYMBOL(writeb);
+EXPORT_SYMBOL(writew);
+EXPORT_SYMBOL(writel);
+EXPORT_SYMBOL(writeq);
+
+
+/*
+ * Read COUNT 8-bit bytes from port PORT into memory starting at SRC.
+ */
+void ioread8_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ while ((unsigned long)dst & 0x3) {
+ if (!count)
+ return;
+ count--;
+ *(unsigned char *)dst = ioread8(port);
+ dst += 1;
+ }
+
+ while (count >= 4) {
+ unsigned int w;
+ count -= 4;
+ w = ioread8(port);
+ w |= ioread8(port) << 8;
+ w |= ioread8(port) << 16;
+ w |= ioread8(port) << 24;
+ *(unsigned int *)dst = w;
+ dst += 4;
+ }
+
+ while (count) {
+ --count;
+ *(unsigned char *)dst = ioread8(port);
+ dst += 1;
+ }
+}
+
+void insb(unsigned long port, void *dst, unsigned long count)
+{
+ ioread8_rep(ioport_map(port, 1), dst, count);
+}
+
+EXPORT_SYMBOL(ioread8_rep);
+EXPORT_SYMBOL(insb);
+
+/*
+ * Read COUNT 16-bit words from port PORT into memory starting at
+ * SRC. SRC must be at least short aligned. This is used by the
+ * IDE driver to read disk sectors. Performance is important, but
+ * the interfaces seems to be slow: just using the inlined version
+ * of the inw() breaks things.
+ */
+void ioread16_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ if (unlikely((unsigned long)dst & 0x3)) {
+ if (!count)
+ return;
+ BUG_ON((unsigned long)dst & 0x1);
+ count--;
+ *(unsigned short *)dst = ioread16(port);
+ dst += 2;
+ }
+
+ while (count >= 2) {
+ unsigned int w;
+ count -= 2;
+ w = ioread16(port);
+ w |= ioread16(port) << 16;
+ *(unsigned int *)dst = w;
+ dst += 4;
+ }
+
+ if (count) {
+ *(unsigned short*)dst = ioread16(port);
+ }
+}
+
+void insw(unsigned long port, void *dst, unsigned long count)
+{
+ ioread16_rep(ioport_map(port, 2), dst, count);
+}
+
+EXPORT_SYMBOL(ioread16_rep);
+EXPORT_SYMBOL(insw);
+
+
+/*
+ * Read COUNT 32-bit words from port PORT into memory starting at
+ * SRC. Now works with any alignment in SRC. Performance is important,
+ * but the interfaces seems to be slow: just using the inlined version
+ * of the inl() breaks things.
+ */
+void ioread32_rep(void __iomem *port, void *dst, unsigned long count)
+{
+ if (unlikely((unsigned long)dst & 0x3)) {
+ while (count--) {
+ struct S { int x __attribute__((packed)); };
+ ((struct S *)dst)->x = ioread32(port);
+ dst += 4;
+ }
+ } else {
+ /* Buffer 32-bit aligned. */
+ while (count--) {
+ *(unsigned int *)dst = ioread32(port);
+ dst += 4;
+ }
+ }
+}
+
+void insl(unsigned long port, void *dst, unsigned long count)
+{
+ ioread32_rep(ioport_map(port, 4), dst, count);
+}
+
+EXPORT_SYMBOL(ioread32_rep);
+EXPORT_SYMBOL(insl);
+
+
+/*
+ * Like insb but in the opposite direction.
+ * Don't worry as much about doing aligned memory transfers:
+ * doing byte reads the "slow" way isn't nearly as slow as
+ * doing byte writes the slow way (no r-m-w cycle).
+ */
+void iowrite8_rep(void __iomem *port, const void *xsrc, unsigned long count)
+{
+ const unsigned char *src = xsrc;
+ while (count--)
+ iowrite8(*src++, port);
+}
+
+void outsb(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite8_rep(ioport_map(port, 1), src, count);
+}
+
+EXPORT_SYMBOL(iowrite8_rep);
+EXPORT_SYMBOL(outsb);
+
+
+/*
+ * Like insw but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Performance is important, but the
+ * interfaces seems to be slow: just using the inlined version of the
+ * outw() breaks things.
+ */
+void iowrite16_rep(void __iomem *port, const void *src, unsigned long count)
+{
+ if (unlikely((unsigned long)src & 0x3)) {
+ if (!count)
+ return;
+ BUG_ON((unsigned long)src & 0x1);
+ iowrite16(*(unsigned short *)src, port);
+ src += 2;
+ --count;
+ }
+
+ while (count >= 2) {
+ unsigned int w;
+ count -= 2;
+ w = *(unsigned int *)src;
+ src += 4;
+ iowrite16(w >> 0, port);
+ iowrite16(w >> 16, port);
+ }
+
+ if (count) {
+ iowrite16(*(unsigned short *)src, port);
+ }
+}
+
+void outsw(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite16_rep(ioport_map(port, 2), src, count);
+}
+
+EXPORT_SYMBOL(iowrite16_rep);
+EXPORT_SYMBOL(outsw);
+
+
+/*
+ * Like insl but in the opposite direction. This is used by the IDE
+ * driver to write disk sectors. Works with any alignment in SRC.
+ * Performance is important, but the interfaces seems to be slow:
+ * just using the inlined version of the outl() breaks things.
+ */
+void iowrite32_rep(void __iomem *port, const void *src, unsigned long count)
+{
+ if (unlikely((unsigned long)src & 0x3)) {
+ while (count--) {
+ struct S { int x __attribute__((packed)); };
+ iowrite32(((struct S *)src)->x, port);
+ src += 4;
+ }
+ } else {
+ /* Buffer 32-bit aligned. */
+ while (count--) {
+ iowrite32(*(unsigned int *)src, port);
+ src += 4;
+ }
+ }
+}
+
+void outsl(unsigned long port, const void *src, unsigned long count)
+{
+ iowrite32_rep(ioport_map(port, 4), src, count);
+}
+
+EXPORT_SYMBOL(iowrite32_rep);
+EXPORT_SYMBOL(outsl);
+
+
+/*
+ * Copy data from IO memory space to "real" memory space.
+ * This needs to be optimized.
+ */
+void memcpy_fromio(void *to, const volatile void __iomem *from, long count)
+{
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
+ count -= 8;
+ do {
+ *(u64 *)to = __raw_readq(from);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
+ count -= 4;
+ do {
+ *(u32 *)to = __raw_readl(from);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
+ count -= 2;
+ do {
+ *(u16 *)to = __raw_readw(from);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ *(u8 *) to = __raw_readb(from);
+ count--;
+ to++;
+ from++;
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(memcpy_fromio);
+
+
+/*
+ * Copy data from "real" memory space to IO memory space.
+ * This needs to be optimized.
+ */
+void memcpy_toio(volatile void __iomem *to, const void *from, long count)
+{
+ /* Optimize co-aligned transfers. Everything else gets handled
+ a byte at a time. */
+ /* FIXME -- align FROM. */
+
+ if (count >= 8 && ((u64)to & 7) == ((u64)from & 7)) {
+ count -= 8;
+ do {
+ __raw_writeq(*(const u64 *)from, to);
+ count -= 8;
+ to += 8;
+ from += 8;
+ } while (count >= 0);
+ count += 8;
+ }
+
+ if (count >= 4 && ((u64)to & 3) == ((u64)from & 3)) {
+ count -= 4;
+ do {
+ __raw_writel(*(const u32 *)from, to);
+ count -= 4;
+ to += 4;
+ from += 4;
+ } while (count >= 0);
+ count += 4;
+ }
+
+ if (count >= 2 && ((u64)to & 1) == ((u64)from & 1)) {
+ count -= 2;
+ do {
+ __raw_writew(*(const u16 *)from, to);
+ count -= 2;
+ to += 2;
+ from += 2;
+ } while (count >= 0);
+ count += 2;
+ }
+
+ while (count > 0) {
+ __raw_writeb(*(const u8 *) from, to);
+ count--;
+ to++;
+ from++;
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(memcpy_toio);
+
+
+/*
+ * "memset" on IO memory space.
+ */
+void _memset_c_io(volatile void __iomem *to, unsigned long c, long count)
+{
+ /* Handle any initial odd byte */
+ if (count > 0 && ((u64)to & 1)) {
+ __raw_writeb(c, to);
+ to++;
+ count--;
+ }
+
+ /* Handle any initial odd halfword */
+ if (count >= 2 && ((u64)to & 2)) {
+ __raw_writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+
+ /* Handle any initial odd word */
+ if (count >= 4 && ((u64)to & 4)) {
+ __raw_writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+
+ /* Handle all full-sized quadwords: we're aligned
+ (or have a small count) */
+ count -= 8;
+ if (count >= 0) {
+ do {
+ __raw_writeq(c, to);
+ to += 8;
+ count -= 8;
+ } while (count >= 0);
+ }
+ count += 8;
+
+ /* The tail is word-aligned if we still have count >= 4 */
+ if (count >= 4) {
+ __raw_writel(c, to);
+ to += 4;
+ count -= 4;
+ }
+
+ /* The tail is half-word aligned if we have count >= 2 */
+ if (count >= 2) {
+ __raw_writew(c, to);
+ to += 2;
+ count -= 2;
+ }
+
+ /* And finally, one last byte.. */
+ if (count) {
+ __raw_writeb(c, to);
+ }
+ mb();
+}
+
+EXPORT_SYMBOL(_memset_c_io);
+
+/* A version of memcpy used by the vga console routines to move data around
+ arbitrarily between screen and main memory. */
+
+void
+scr_memcpyw(u16 *d, const u16 *s, unsigned int count)
+{
+ const u16 __iomem *ios = (const u16 __iomem *) s;
+ u16 __iomem *iod = (u16 __iomem *) d;
+ int s_isio = __is_ioaddr(s);
+ int d_isio = __is_ioaddr(d);
+
+ if (s_isio) {
+ if (d_isio) {
+ /* FIXME: Should handle unaligned ops and
+ operation widening. */
+
+ count /= 2;
+ while (count--) {
+ u16 tmp = __raw_readw(ios++);
+ __raw_writew(tmp, iod++);
+ }
+ }
+ else
+ memcpy_fromio(d, ios, count);
+ } else {
+ if (d_isio)
+ memcpy_toio(iod, s, count);
+ else
+ memcpy(d, s, count);
+ }
+}
+
+EXPORT_SYMBOL(scr_memcpyw);
+
+void __iomem *ioport_map(unsigned long port, unsigned int size)
+{
+ return IO_CONCAT(__IO_PREFIX,ioportmap) (port);
+}
+
+void ioport_unmap(void __iomem *addr)
+{
+}
+
+EXPORT_SYMBOL(ioport_map);
+EXPORT_SYMBOL(ioport_unmap);
diff --git a/arch/alpha/kernel/irq.c b/arch/alpha/kernel/irq.c
new file mode 100644
index 000000000000..b6114f5c0d2b
--- /dev/null
+++ b/arch/alpha/kernel/irq.c
@@ -0,0 +1,774 @@
+/*
+ * linux/arch/alpha/kernel/irq.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ *
+ * This file contains the code used by various IRQ handling routines:
+ * asking for different IRQ's should be done through these routines
+ * instead of just grabbing them. Thus setups with different IRQ numbers
+ * shouldn't result in any weird surprises, and installing new handlers
+ * should be easier.
+ */
+
+#include <linux/config.h>
+#include <linux/kernel.h>
+#include <linux/module.h>
+#include <linux/errno.h>
+#include <linux/kernel_stat.h>
+#include <linux/signal.h>
+#include <linux/sched.h>
+#include <linux/ptrace.h>
+#include <linux/interrupt.h>
+#include <linux/slab.h>
+#include <linux/random.h>
+#include <linux/init.h>
+#include <linux/irq.h>
+#include <linux/proc_fs.h>
+#include <linux/seq_file.h>
+#include <linux/profile.h>
+#include <linux/bitops.h>
+
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+
+/*
+ * Controller mappings for all interrupt sources:
+ */
+irq_desc_t irq_desc[NR_IRQS] __cacheline_aligned = {
+ [0 ... NR_IRQS-1] = {
+ .handler = &no_irq_type,
+ .lock = SPIN_LOCK_UNLOCKED
+ }
+};
+
+static void register_irq_proc(unsigned int irq);
+
+volatile unsigned long irq_err_count;
+
+/*
+ * Special irq handlers.
+ */
+
+irqreturn_t no_action(int cpl, void *dev_id, struct pt_regs *regs)
+{
+ return IRQ_NONE;
+}
+
+/*
+ * Generic no controller code
+ */
+
+static void no_irq_enable_disable(unsigned int irq) { }
+static unsigned int no_irq_startup(unsigned int irq) { return 0; }
+
+static void
+no_irq_ack(unsigned int irq)
+{
+ irq_err_count++;
+ printk(KERN_CRIT "Unexpected IRQ trap at vector %u\n", irq);
+}
+
+struct hw_interrupt_type no_irq_type = {
+ .typename = "none",
+ .startup = no_irq_startup,
+ .shutdown = no_irq_enable_disable,
+ .enable = no_irq_enable_disable,
+ .disable = no_irq_enable_disable,
+ .ack = no_irq_ack,
+ .end = no_irq_enable_disable,
+};
+
+int
+handle_IRQ_event(unsigned int irq, struct pt_regs *regs,
+ struct irqaction *action)
+{
+ int status = 1; /* Force the "do bottom halves" bit */
+ int ret;
+
+ do {
+ if (!(action->flags & SA_INTERRUPT))
+ local_irq_enable();
+ else
+ local_irq_disable();
+
+ ret = action->handler(irq, action->dev_id, regs);
+ if (ret == IRQ_HANDLED)
+ status |= action->flags;
+ action = action->next;
+ } while (action);
+ if (status & SA_SAMPLE_RANDOM)
+ add_interrupt_randomness(irq);
+ local_irq_disable();
+
+ return status;
+}
+
+/*
+ * Generic enable/disable code: this just calls
+ * down into the PIC-specific version for the actual
+ * hardware disable after having gotten the irq
+ * controller lock.
+ */
+void inline
+disable_irq_nosync(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ if (!desc->depth++) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->disable(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+/*
+ * Synchronous version of the above, making sure the IRQ is
+ * no longer running on any other IRQ..
+ */
+void
+disable_irq(unsigned int irq)
+{
+ disable_irq_nosync(irq);
+ synchronize_irq(irq);
+}
+
+void
+enable_irq(unsigned int irq)
+{
+ irq_desc_t *desc = irq_desc + irq;
+ unsigned long flags;
+
+ spin_lock_irqsave(&desc->lock, flags);
+ switch (desc->depth) {
+ case 1: {
+ unsigned int status = desc->status & ~IRQ_DISABLED;
+ desc->status = status;
+ if ((status & (IRQ_PENDING | IRQ_REPLAY)) == IRQ_PENDING) {
+ desc->status = status | IRQ_REPLAY;
+ hw_resend_irq(desc->handler,irq);
+ }
+ desc->handler->enable(irq);
+ /* fall-through */
+ }
+ default:
+ desc->depth--;
+ break;
+ case 0:
+ printk(KERN_ERR "enable_irq() unbalanced from %p\n",
+ __builtin_return_address(0));
+ }
+ spin_unlock_irqrestore(&desc->lock, flags);
+}
+
+int
+setup_irq(unsigned int irq, struct irqaction * new)
+{
+ int shared = 0;
+ struct irqaction *old, **p;
+ unsigned long flags;
+ irq_desc_t *desc = irq_desc + irq;
+
+ if (desc->handler == &no_irq_type)
+ return -ENOSYS;
+
+ /*
+ * Some drivers like serial.c use request_irq() heavily,
+ * so we have to be careful not to interfere with a
+ * running system.
+ */
+ if (new->flags & SA_SAMPLE_RANDOM) {
+ /*
+ * This function might sleep, we want to call it first,
+ * outside of the atomic block.
+ * Yes, this might clear the entropy pool if the wrong
+ * driver is attempted to be loaded, without actually
+ * installing a new handler, but is this really a problem,
+ * only the sysadmin is able to do this.
+ */
+ rand_initialize_irq(irq);
+ }
+
+ /*
+ * The following block of code has to be executed atomically
+ */
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ if ((old = *p) != NULL) {
+ /* Can't share interrupts unless both agree to */
+ if (!(old->flags & new->flags & SA_SHIRQ)) {
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return -EBUSY;
+ }
+
+ /* add new interrupt at end of irq queue */
+ do {
+ p = &old->next;
+ old = *p;
+ } while (old);
+ shared = 1;
+ }
+
+ *p = new;
+
+ if (!shared) {
+ desc->depth = 0;
+ desc->status &=
+ ~(IRQ_DISABLED|IRQ_AUTODETECT|IRQ_WAITING|IRQ_INPROGRESS);
+ desc->handler->startup(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+ return 0;
+}
+
+static struct proc_dir_entry * root_irq_dir;
+static struct proc_dir_entry * irq_dir[NR_IRQS];
+
+#ifdef CONFIG_SMP
+static struct proc_dir_entry * smp_affinity_entry[NR_IRQS];
+static char irq_user_affinity[NR_IRQS];
+static cpumask_t irq_affinity[NR_IRQS] = { [0 ... NR_IRQS-1] = CPU_MASK_ALL };
+
+static void
+select_smp_affinity(int irq)
+{
+ static int last_cpu;
+ int cpu = last_cpu + 1;
+
+ if (! irq_desc[irq].handler->set_affinity || irq_user_affinity[irq])
+ return;
+
+ while (!cpu_possible(cpu))
+ cpu = (cpu < (NR_CPUS-1) ? cpu + 1 : 0);
+ last_cpu = cpu;
+
+ irq_affinity[irq] = cpumask_of_cpu(cpu);
+ irq_desc[irq].handler->set_affinity(irq, cpumask_of_cpu(cpu));
+}
+
+static int
+irq_affinity_read_proc (char *page, char **start, off_t off,
+ int count, int *eof, void *data)
+{
+ int len = cpumask_scnprintf(page, count, irq_affinity[(long)data]);
+ if (count - len < 2)
+ return -EINVAL;
+ len += sprintf(page + len, "\n");
+ return len;
+}
+
+static int
+irq_affinity_write_proc(struct file *file, const char __user *buffer,
+ unsigned long count, void *data)
+{
+ int irq = (long) data, full_count = count, err;
+ cpumask_t new_value;
+
+ if (!irq_desc[irq].handler->set_affinity)
+ return -EIO;
+
+ err = cpumask_parse(buffer, count, new_value);
+
+ /* The special value 0 means release control of the
+ affinity to kernel. */
+ cpus_and(new_value, new_value, cpu_online_map);
+ if (cpus_empty(new_value)) {
+ irq_user_affinity[irq] = 0;
+ select_smp_affinity(irq);
+ }
+ /* Do not allow disabling IRQs completely - it's a too easy
+ way to make the system unusable accidentally :-) At least
+ one online CPU still has to be targeted. */
+ else {
+ irq_affinity[irq] = new_value;
+ irq_user_affinity[irq] = 1;
+ irq_desc[irq].handler->set_affinity(irq, new_value);
+ }
+
+ return full_count;
+}
+
+#endif /* CONFIG_SMP */
+
+#define MAX_NAMELEN 10
+
+static void
+register_irq_proc (unsigned int irq)
+{
+ char name [MAX_NAMELEN];
+
+ if (!root_irq_dir || (irq_desc[irq].handler == &no_irq_type) ||
+ irq_dir[irq])
+ return;
+
+ memset(name, 0, MAX_NAMELEN);
+ sprintf(name, "%d", irq);
+
+ /* create /proc/irq/1234 */
+ irq_dir[irq] = proc_mkdir(name, root_irq_dir);
+
+#ifdef CONFIG_SMP
+ if (irq_desc[irq].handler->set_affinity) {
+ struct proc_dir_entry *entry;
+ /* create /proc/irq/1234/smp_affinity */
+ entry = create_proc_entry("smp_affinity", 0600, irq_dir[irq]);
+
+ if (entry) {
+ entry->nlink = 1;
+ entry->data = (void *)(long)irq;
+ entry->read_proc = irq_affinity_read_proc;
+ entry->write_proc = irq_affinity_write_proc;
+ }
+
+ smp_affinity_entry[irq] = entry;
+ }
+#endif
+}
+
+void
+init_irq_proc (void)
+{
+ int i;
+
+ /* create /proc/irq */
+ root_irq_dir = proc_mkdir("irq", NULL);
+
+#ifdef CONFIG_SMP
+ /* create /proc/irq/prof_cpu_mask */
+ create_prof_cpu_mask(root_irq_dir);
+#endif
+
+ /*
+ * Create entries for all existing IRQs.
+ */
+ for (i = 0; i < ACTUAL_NR_IRQS; i++) {
+ if (irq_desc[i].handler == &no_irq_type)
+ continue;
+ register_irq_proc(i);
+ }
+}
+
+int
+request_irq(unsigned int irq, irqreturn_t (*handler)(int, void *, struct pt_regs *),
+ unsigned long irqflags, const char * devname, void *dev_id)
+{
+ int retval;
+ struct irqaction * action;
+
+ if (irq >= ACTUAL_NR_IRQS)
+ return -EINVAL;
+ if (!handler)
+ return -EINVAL;
+
+#if 1
+ /*
+ * Sanity-check: shared interrupts should REALLY pass in
+ * a real dev-ID, otherwise we'll have trouble later trying
+ * to figure out which interrupt is which (messes up the
+ * interrupt freeing logic etc).
+ */
+ if ((irqflags & SA_SHIRQ) && !dev_id) {
+ printk(KERN_ERR
+ "Bad boy: %s (at %p) called us without a dev_id!\n",
+ devname, __builtin_return_address(0));
+ }
+#endif
+
+ action = (struct irqaction *)
+ kmalloc(sizeof(struct irqaction), GFP_KERNEL);
+ if (!action)
+ return -ENOMEM;
+
+ action->handler = handler;
+ action->flags = irqflags;
+ cpus_clear(action->mask);
+ action->name = devname;
+ action->next = NULL;
+ action->dev_id = dev_id;
+
+#ifdef CONFIG_SMP
+ select_smp_affinity(irq);
+#endif
+
+ retval = setup_irq(irq, action);
+ if (retval)
+ kfree(action);
+ return retval;
+}
+
+EXPORT_SYMBOL(request_irq);
+
+void
+free_irq(unsigned int irq, void *dev_id)
+{
+ irq_desc_t *desc;
+ struct irqaction **p;
+ unsigned long flags;
+
+ if (irq >= ACTUAL_NR_IRQS) {
+ printk(KERN_CRIT "Trying to free IRQ%d\n", irq);
+ return;
+ }
+
+ desc = irq_desc + irq;
+ spin_lock_irqsave(&desc->lock,flags);
+ p = &desc->action;
+ for (;;) {
+ struct irqaction * action = *p;
+ if (action) {
+ struct irqaction **pp = p;
+ p = &action->next;
+ if (action->dev_id != dev_id)
+ continue;
+
+ /* Found - now remove it from the list of entries. */
+ *pp = action->next;
+ if (!desc->action) {
+ desc->status |= IRQ_DISABLED;
+ desc->handler->shutdown(irq);
+ }
+ spin_unlock_irqrestore(&desc->lock,flags);
+
+#ifdef CONFIG_SMP
+ /* Wait to make sure it's not being used on
+ another CPU. */
+ while (desc->status & IRQ_INPROGRESS)
+ barrier();
+#endif
+ kfree(action);
+ return;
+ }
+ printk(KERN_ERR "Trying to free free IRQ%d\n",irq);
+ spin_unlock_irqrestore(&desc->lock,flags);
+ return;
+ }
+}
+
+EXPORT_SYMBOL(free_irq);
+
+int
+show_interrupts(struct seq_file *p, void *v)
+{
+#ifdef CONFIG_SMP
+ int j;
+#endif
+ int i = *(loff_t *) v;
+ struct irqaction * action;
+ unsigned long flags;
+
+#ifdef CONFIG_SMP
+ if (i == 0) {
+ seq_puts(p, " ");
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ seq_printf(p, "CPU%d ", i);
+ seq_putc(p, '\n');
+ }
+#endif
+
+ if (i < ACTUAL_NR_IRQS) {
+ spin_lock_irqsave(&irq_desc[i].lock, flags);
+ action = irq_desc[i].action;
+ if (!action)
+ goto unlock;
+ seq_printf(p, "%3d: ",i);
+#ifndef CONFIG_SMP
+ seq_printf(p, "%10u ", kstat_irqs(i));
+#else
+ for (j = 0; j < NR_CPUS; j++)
+ if (cpu_online(j))
+ seq_printf(p, "%10u ", kstat_cpu(j).irqs[i]);
+#endif
+ seq_printf(p, " %14s", irq_desc[i].handler->typename);
+ seq_printf(p, " %c%s",
+ (action->flags & SA_INTERRUPT)?'+':' ',
+ action->name);
+
+ for (action=action->next; action; action = action->next) {
+ seq_printf(p, ", %c%s",
+ (action->flags & SA_INTERRUPT)?'+':' ',
+ action->name);
+ }
+
+ seq_putc(p, '\n');
+unlock:
+ spin_unlock_irqrestore(&irq_desc[i].lock, flags);
+ } else if (i == ACTUAL_NR_IRQS) {
+#ifdef CONFIG_SMP
+ seq_puts(p, "IPI: ");
+ for (i = 0; i < NR_CPUS; i++)
+ if (cpu_online(i))
+ seq_printf(p, "%10lu ", cpu_data[i].ipi_count);
+ seq_putc(p, '\n');
+#endif
+ seq_printf(p, "ERR: %10lu\n", irq_err_count);
+ }
+ return 0;
+}
+
+
+/*
+ * handle_irq handles all normal device IRQ's (the special
+ * SMP cross-CPU interrupts have their own specific
+ * handlers).
+ */
+
+#define MAX_ILLEGAL_IRQS 16
+
+void
+handle_irq(int irq, struct pt_regs * regs)
+{
+ /*
+ * We ack quickly, we don't want the irq controller
+ * thinking we're snobs just because some other CPU has
+ * disabled global interrupts (we have already done the
+ * INT_ACK cycles, it's too late to try to pretend to the
+ * controller that we aren't taking the interrupt).
+ *
+ * 0 return value means that this irq is already being
+ * handled by some other CPU. (or is disabled)
+ */
+ int cpu = smp_processor_id();
+ irq_desc_t *desc = irq_desc + irq;
+ struct irqaction * action;
+ unsigned int status;
+ static unsigned int illegal_count=0;
+
+ if ((unsigned) irq > ACTUAL_NR_IRQS && illegal_count < MAX_ILLEGAL_IRQS ) {
+ irq_err_count++;
+ illegal_count++;
+ printk(KERN_CRIT "device_interrupt: invalid interrupt %d\n",
+ irq);
+ return;
+ }
+
+ irq_enter();
+ kstat_cpu(cpu).irqs[irq]++;
+ spin_lock_irq(&desc->lock); /* mask also the higher prio events */
+ desc->handler->ack(irq);
+ /*
+ * REPLAY is when Linux resends an IRQ that was dropped earlier.
+ * WAITING is used by probe to mark irqs that are being tested.
+ */
+ status = desc->status & ~(IRQ_REPLAY | IRQ_WAITING);
+ status |= IRQ_PENDING; /* we _want_ to handle it */
+
+ /*
+ * If the IRQ is disabled for whatever reason, we cannot
+ * use the action we have.
+ */
+ action = NULL;
+ if (!(status & (IRQ_DISABLED | IRQ_INPROGRESS))) {
+ action = desc->action;
+ status &= ~IRQ_PENDING; /* we commit to handling */
+ status |= IRQ_INPROGRESS; /* we are handling it */
+ }
+ desc->status = status;
+
+ /*
+ * If there is no IRQ handler or it was disabled, exit early.
+ * Since we set PENDING, if another processor is handling
+ * a different instance of this same irq, the other processor
+ * will take care of it.
+ */
+ if (!action)
+ goto out;
+
+ /*
+ * Edge triggered interrupts need to remember pending events.
+ * This applies to any hw interrupts that allow a second
+ * instance of the same irq to arrive while we are in handle_irq
+ * or in the handler. But the code here only handles the _second_
+ * instance of the irq, not the third or fourth. So it is mostly
+ * useful for irq hardware that does not mask cleanly in an
+ * SMP environment.
+ */
+ for (;;) {
+ spin_unlock(&desc->lock);
+ handle_IRQ_event(irq, regs, action);
+ spin_lock(&desc->lock);
+
+ if (!(desc->status & IRQ_PENDING)
+ || (desc->status & IRQ_LEVEL))
+ break;
+ desc->status &= ~IRQ_PENDING;
+ }
+ desc->status &= ~IRQ_INPROGRESS;
+out:
+ /*
+ * The ->end() handler has to deal with interrupts which got
+ * disabled while the handler was running.
+ */
+ desc->handler->end(irq);
+ spin_unlock(&desc->lock);
+
+ irq_exit();
+}
+
+/*
+ * IRQ autodetection code..
+ *
+ * This depends on the fact that any interrupt that
+ * comes in on to an unassigned handler will get stuck
+ * with "IRQ_WAITING" cleared and the interrupt
+ * disabled.
+ */
+unsigned long
+probe_irq_on(void)
+{
+ int i;
+ irq_desc_t *desc;
+ unsigned long delay;
+ unsigned long val;
+
+ /* Something may have generated an irq long ago and we want to
+ flush such a longstanding irq before considering it as spurious. */
+ for (i = NR_IRQS-1; i >= 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!irq_desc[i].action)
+ irq_desc[i].handler->startup(i);
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /* Wait for longstanding interrupts to trigger. */
+ for (delay = jiffies + HZ/50; time_after(delay, jiffies); )
+ /* about 20ms delay */ barrier();
+
+ /* enable any unassigned irqs (we must startup again here because
+ if a longstanding irq happened in the previous stage, it may have
+ masked itself) first, enable any unassigned irqs. */
+ for (i = NR_IRQS-1; i >= 0; i--) {
+ desc = irq_desc + i;
+
+ spin_lock_irq(&desc->lock);
+ if (!desc->action) {
+ desc->status |= IRQ_AUTODETECT | IRQ_WAITING;
+ if (desc->handler->startup(i))
+ desc->status |= IRQ_PENDING;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ /*
+ * Wait for spurious interrupts to trigger
+ */
+ for (delay = jiffies + HZ/10; time_after(delay, jiffies); )
+ /* about 100ms delay */ barrier();
+
+ /*
+ * Now filter out any obviously spurious interrupts
+ */
+ val = 0;
+ for (i=0; i<NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* It triggered already - consider it spurious. */
+ if (!(status & IRQ_WAITING)) {
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ } else
+ if (i < 32)
+ val |= 1 << i;
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return val;
+}
+
+EXPORT_SYMBOL(probe_irq_on);
+
+/*
+ * Return a mask of triggered interrupts (this
+ * can handle only legacy ISA interrupts).
+ */
+unsigned int
+probe_irq_mask(unsigned long val)
+{
+ int i;
+ unsigned int mask;
+
+ mask = 0;
+ for (i = 0; i < NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ /* We only react to ISA interrupts */
+ if (!(status & IRQ_WAITING)) {
+ if (i < 16)
+ mask |= 1 << i;
+ }
+
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ return mask & val;
+}
+
+/*
+ * Get the result of the IRQ probe.. A negative result means that
+ * we have several candidates (but we return the lowest-numbered
+ * one).
+ */
+
+int
+probe_irq_off(unsigned long val)
+{
+ int i, irq_found, nr_irqs;
+
+ nr_irqs = 0;
+ irq_found = 0;
+ for (i=0; i<NR_IRQS; i++) {
+ irq_desc_t *desc = irq_desc + i;
+ unsigned int status;
+
+ spin_lock_irq(&desc->lock);
+ status = desc->status;
+
+ if (status & IRQ_AUTODETECT) {
+ if (!(status & IRQ_WAITING)) {
+ if (!nr_irqs)
+ irq_found = i;
+ nr_irqs++;
+ }
+ desc->status = status & ~IRQ_AUTODETECT;
+ desc->handler->shutdown(i);
+ }
+ spin_unlock_irq(&desc->lock);
+ }
+
+ if (nr_irqs > 1)
+ irq_found = -irq_found;
+ return irq_found;
+}
+
+EXPORT_SYMBOL(probe_irq_off);
+
+#ifdef CONFIG_SMP
+void synchronize_irq(unsigned int irq)
+{
+ /* is there anything to synchronize with? */
+ if (!irq_desc[irq].action)
+ return;
+
+ while (irq_desc[irq].status & IRQ_INPROGRESS)
+ barrier();
+}
+#endif
diff --git a/arch/alpha/kernel/irq_alpha.c b/arch/alpha/kernel/irq_alpha.c
new file mode 100644
index 000000000000..e6ded33c6e22
--- /dev/null
+++ b/arch/alpha/kernel/irq_alpha.c
@@ -0,0 +1,252 @@
+/*
+ * Alpha specific irq code.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/kernel_stat.h>
+
+#include <asm/machvec.h>
+#include <asm/dma.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+/* Hack minimum IPL during interrupt processing for broken hardware. */
+#ifdef CONFIG_ALPHA_BROKEN_IRQ_MASK
+int __min_ipl;
+#endif
+
+/*
+ * Performance counter hook. A module can override this to
+ * do something useful.
+ */
+static void
+dummy_perf(unsigned long vector, struct pt_regs *regs)
+{
+ irq_err_count++;
+ printk(KERN_CRIT "Performance counter interrupt!\n");
+}
+
+void (*perf_irq)(unsigned long, struct pt_regs *) = dummy_perf;
+
+/*
+ * The main interrupt entry point.
+ */
+
+asmlinkage void
+do_entInt(unsigned long type, unsigned long vector,
+ unsigned long la_ptr, struct pt_regs *regs)
+{
+ switch (type) {
+ case 0:
+#ifdef CONFIG_SMP
+ handle_ipi(regs);
+ return;
+#else
+ irq_err_count++;
+ printk(KERN_CRIT "Interprocessor interrupt? "
+ "You must be kidding!\n");
+#endif
+ break;
+ case 1:
+#ifdef CONFIG_SMP
+ {
+ long cpu;
+ smp_percpu_timer_interrupt(regs);
+ cpu = smp_processor_id();
+ if (cpu != boot_cpuid) {
+ kstat_cpu(cpu).irqs[RTC_IRQ]++;
+ } else {
+ handle_irq(RTC_IRQ, regs);
+ }
+ }
+#else
+ handle_irq(RTC_IRQ, regs);
+#endif
+ return;
+ case 2:
+ alpha_mv.machine_check(vector, la_ptr, regs);
+ return;
+ case 3:
+ alpha_mv.device_interrupt(vector, regs);
+ return;
+ case 4:
+ perf_irq(la_ptr, regs);
+ return;
+ default:
+ printk(KERN_CRIT "Hardware intr %ld %lx? Huh?\n",
+ type, vector);
+ }
+ printk(KERN_CRIT "PC = %016lx PS=%04lx\n", regs->pc, regs->ps);
+}
+
+void __init
+common_init_isa_dma(void)
+{
+ outb(0, DMA1_RESET_REG);
+ outb(0, DMA2_RESET_REG);
+ outb(0, DMA1_CLR_MASK_REG);
+ outb(0, DMA2_CLR_MASK_REG);
+}
+
+void __init
+init_IRQ(void)
+{
+ /* Just in case the platform init_irq() causes interrupts/mchecks
+ (as is the case with RAWHIDE, at least). */
+ wrent(entInt, 0);
+
+ alpha_mv.init_irq();
+}
+
+/*
+ * machine error checks
+ */
+#define MCHK_K_TPERR 0x0080
+#define MCHK_K_TCPERR 0x0082
+#define MCHK_K_HERR 0x0084
+#define MCHK_K_ECC_C 0x0086
+#define MCHK_K_ECC_NC 0x0088
+#define MCHK_K_OS_BUGCHECK 0x008A
+#define MCHK_K_PAL_BUGCHECK 0x0090
+
+#ifndef CONFIG_SMP
+struct mcheck_info __mcheck_info;
+#endif
+
+void
+process_mcheck_info(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs, const char *machine,
+ int expected)
+{
+ struct el_common *mchk_header;
+ const char *reason;
+
+ /*
+ * See if the machine check is due to a badaddr() and if so,
+ * ignore it.
+ */
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ printk(KERN_CRIT "%s machine check %s\n", machine,
+ expected ? "expected." : "NOT expected!!!");
+ }
+#endif
+
+ if (expected) {
+ int cpu = smp_processor_id();
+ mcheck_expected(cpu) = 0;
+ mcheck_taken(cpu) = 1;
+ return;
+ }
+
+ mchk_header = (struct el_common *)la_ptr;
+
+ printk(KERN_CRIT "%s machine check: vector=0x%lx pc=0x%lx code=0x%x\n",
+ machine, vector, regs->pc, mchk_header->code);
+
+ switch (mchk_header->code) {
+ /* Machine check reasons. Defined according to PALcode sources. */
+ case 0x80: reason = "tag parity error"; break;
+ case 0x82: reason = "tag control parity error"; break;
+ case 0x84: reason = "generic hard error"; break;
+ case 0x86: reason = "correctable ECC error"; break;
+ case 0x88: reason = "uncorrectable ECC error"; break;
+ case 0x8A: reason = "OS-specific PAL bugcheck"; break;
+ case 0x90: reason = "callsys in kernel mode"; break;
+ case 0x96: reason = "i-cache read retryable error"; break;
+ case 0x98: reason = "processor detected hard error"; break;
+
+ /* System specific (these are for Alcor, at least): */
+ case 0x202: reason = "system detected hard error"; break;
+ case 0x203: reason = "system detected uncorrectable ECC error"; break;
+ case 0x204: reason = "SIO SERR occurred on PCI bus"; break;
+ case 0x205: reason = "parity error detected by core logic"; break;
+ case 0x206: reason = "SIO IOCHK occurred on ISA bus"; break;
+ case 0x207: reason = "non-existent memory error"; break;
+ case 0x208: reason = "MCHK_K_DCSR"; break;
+ case 0x209: reason = "PCI SERR detected"; break;
+ case 0x20b: reason = "PCI data parity error detected"; break;
+ case 0x20d: reason = "PCI address parity error detected"; break;
+ case 0x20f: reason = "PCI master abort error"; break;
+ case 0x211: reason = "PCI target abort error"; break;
+ case 0x213: reason = "scatter/gather PTE invalid error"; break;
+ case 0x215: reason = "flash ROM write error"; break;
+ case 0x217: reason = "IOA timeout detected"; break;
+ case 0x219: reason = "IOCHK#, EISA add-in board parity or other catastrophic error"; break;
+ case 0x21b: reason = "EISA fail-safe timer timeout"; break;
+ case 0x21d: reason = "EISA bus time-out"; break;
+ case 0x21f: reason = "EISA software generated NMI"; break;
+ case 0x221: reason = "unexpected ev5 IRQ[3] interrupt"; break;
+ default: reason = "unknown"; break;
+ }
+
+ printk(KERN_CRIT "machine check type: %s%s\n",
+ reason, mchk_header->retry ? " (retryable)" : "");
+
+ dik_show_regs(regs, NULL);
+
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (alpha_verbose_mcheck > 1) {
+ /* Dump the logout area to give all info. */
+ unsigned long *ptr = (unsigned long *)la_ptr;
+ long i;
+ for (i = 0; i < mchk_header->size / sizeof(long); i += 2) {
+ printk(KERN_CRIT " +%8lx %016lx %016lx\n",
+ i*sizeof(long), ptr[i], ptr[i+1]);
+ }
+ }
+#endif /* CONFIG_VERBOSE_MCHECK */
+}
+
+/*
+ * The special RTC interrupt type. The interrupt itself was
+ * processed by PALcode, and comes in via entInt vector 1.
+ */
+
+static void rtc_enable_disable(unsigned int irq) { }
+static unsigned int rtc_startup(unsigned int irq) { return 0; }
+
+struct irqaction timer_irqaction = {
+ .handler = timer_interrupt,
+ .flags = SA_INTERRUPT,
+ .name = "timer",
+};
+
+static struct hw_interrupt_type rtc_irq_type = {
+ .typename = "RTC",
+ .startup = rtc_startup,
+ .shutdown = rtc_enable_disable,
+ .enable = rtc_enable_disable,
+ .disable = rtc_enable_disable,
+ .ack = rtc_enable_disable,
+ .end = rtc_enable_disable,
+};
+
+void __init
+init_rtc_irq(void)
+{
+ irq_desc[RTC_IRQ].status = IRQ_DISABLED;
+ irq_desc[RTC_IRQ].handler = &rtc_irq_type;
+ setup_irq(RTC_IRQ, &timer_irqaction);
+}
+
+/* Dummy irqactions. */
+struct irqaction isa_cascade_irqaction = {
+ .handler = no_action,
+ .name = "isa-cascade"
+};
+
+struct irqaction timer_cascade_irqaction = {
+ .handler = no_action,
+ .name = "timer-cascade"
+};
+
+struct irqaction halt_switch_irqaction = {
+ .handler = no_action,
+ .name = "halt-switch"
+};
diff --git a/arch/alpha/kernel/irq_i8259.c b/arch/alpha/kernel/irq_i8259.c
new file mode 100644
index 000000000000..b188683b83fd
--- /dev/null
+++ b/arch/alpha/kernel/irq_i8259.c
@@ -0,0 +1,183 @@
+/*
+ * linux/arch/alpha/kernel/irq_i8259.c
+ *
+ * This is the 'legacy' 8259A Programmable Interrupt Controller,
+ * present in the majority of PC/AT boxes.
+ *
+ * Started hacking from linux-2.3.30pre6/arch/i386/kernel/i8259.c.
+ */
+
+#include <linux/config.h>
+#include <linux/init.h>
+#include <linux/cache.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+#include <linux/interrupt.h>
+
+#include <asm/io.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/* Note mask bit is true for DISABLED irqs. */
+static unsigned int cached_irq_mask = 0xffff;
+static DEFINE_SPINLOCK(i8259_irq_lock);
+
+static inline void
+i8259_update_irq_hw(unsigned int irq, unsigned long mask)
+{
+ int port = 0x21;
+ if (irq & 8) mask >>= 8;
+ if (irq & 8) port = 0xA1;
+ outb(mask, port);
+}
+
+inline void
+i8259a_enable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ i8259_update_irq_hw(irq, cached_irq_mask &= ~(1 << irq));
+ spin_unlock(&i8259_irq_lock);
+}
+
+static inline void
+__i8259a_disable_irq(unsigned int irq)
+{
+ i8259_update_irq_hw(irq, cached_irq_mask |= 1 << irq);
+}
+
+void
+i8259a_disable_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+ spin_unlock(&i8259_irq_lock);
+}
+
+void
+i8259a_mask_and_ack_irq(unsigned int irq)
+{
+ spin_lock(&i8259_irq_lock);
+ __i8259a_disable_irq(irq);
+
+ /* Ack the interrupt making it the lowest priority. */
+ if (irq >= 8) {
+ outb(0xE0 | (irq - 8), 0xa0); /* ack the slave */
+ irq = 2;
+ }
+ outb(0xE0 | irq, 0x20); /* ack the master */
+ spin_unlock(&i8259_irq_lock);
+}
+
+unsigned int
+i8259a_startup_irq(unsigned int irq)
+{
+ i8259a_enable_irq(irq);
+ return 0; /* never anything pending */
+}
+
+void
+i8259a_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ i8259a_enable_irq(irq);
+}
+
+struct hw_interrupt_type i8259a_irq_type = {
+ .typename = "XT-PIC",
+ .startup = i8259a_startup_irq,
+ .shutdown = i8259a_disable_irq,
+ .enable = i8259a_enable_irq,
+ .disable = i8259a_disable_irq,
+ .ack = i8259a_mask_and_ack_irq,
+ .end = i8259a_end_irq,
+};
+
+void __init
+init_i8259a_irqs(void)
+{
+ static struct irqaction cascade = {
+ .handler = no_action,
+ .name = "cascade",
+ };
+
+ long i;
+
+ outb(0xff, 0x21); /* mask all of 8259A-1 */
+ outb(0xff, 0xA1); /* mask all of 8259A-2 */
+
+ for (i = 0; i < 16; i++) {
+ irq_desc[i].status = IRQ_DISABLED;
+ irq_desc[i].handler = &i8259a_irq_type;
+ }
+
+ setup_irq(2, &cascade);
+}
+
+
+#if defined(CONFIG_ALPHA_GENERIC)
+# define IACK_SC alpha_mv.iack_sc
+#elif defined(CONFIG_ALPHA_APECS)
+# define IACK_SC APECS_IACK_SC
+#elif defined(CONFIG_ALPHA_LCA)
+# define IACK_SC LCA_IACK_SC
+#elif defined(CONFIG_ALPHA_CIA)
+# define IACK_SC CIA_IACK_SC
+#elif defined(CONFIG_ALPHA_PYXIS)
+# define IACK_SC PYXIS_IACK_SC
+#elif defined(CONFIG_ALPHA_TITAN)
+# define IACK_SC TITAN_IACK_SC
+#elif defined(CONFIG_ALPHA_TSUNAMI)
+# define IACK_SC TSUNAMI_IACK_SC
+#elif defined(CONFIG_ALPHA_IRONGATE)
+# define IACK_SC IRONGATE_IACK_SC
+#endif
+/* Note that CONFIG_ALPHA_POLARIS is intentionally left out here, since
+ sys_rx164 wants to use isa_no_iack_sc_device_interrupt for some reason. */
+
+#if defined(IACK_SC)
+void
+isa_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ /*
+ * Generate a PCI interrupt acknowledge cycle. The PIC will
+ * respond with the interrupt vector of the highest priority
+ * interrupt that is pending. The PALcode sets up the
+ * interrupts vectors such that irq level L generates vector L.
+ */
+ int j = *(vuip) IACK_SC;
+ j &= 0xff;
+ handle_irq(j, regs);
+}
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || !defined(IACK_SC)
+void
+isa_no_iack_sc_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pic;
+
+ /*
+ * It seems to me that the probability of two or more *device*
+ * interrupts occurring at almost exactly the same time is
+ * pretty low. So why pay the price of checking for
+ * additional interrupts here if the common case can be
+ * handled so much easier?
+ */
+ /*
+ * The first read of gives you *all* interrupting lines.
+ * Therefore, read the mask register and and out those lines
+ * not enabled. Note that some documentation has 21 and a1
+ * write only. This is not true.
+ */
+ pic = inb(0x20) | (inb(0xA0) << 8); /* read isr */
+ pic &= 0xFFFB; /* mask out cascade & hibits */
+
+ while (pic) {
+ int j = ffz(~pic);
+ pic &= pic - 1;
+ handle_irq(j, regs);
+ }
+}
+#endif
diff --git a/arch/alpha/kernel/irq_impl.h b/arch/alpha/kernel/irq_impl.h
new file mode 100644
index 000000000000..f201d8ffc0d9
--- /dev/null
+++ b/arch/alpha/kernel/irq_impl.h
@@ -0,0 +1,42 @@
+/*
+ * linux/arch/alpha/kernel/irq_impl.h
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ * Copyright (C) 1998, 2000 Richard Henderson
+ *
+ * This file contains declarations and inline functions for interfacing
+ * with the IRQ handling routines in irq.c.
+ */
+
+#include <linux/interrupt.h>
+#include <linux/irq.h>
+#include <linux/profile.h>
+
+
+#define RTC_IRQ 8
+
+extern void isa_device_interrupt(unsigned long, struct pt_regs *);
+extern void isa_no_iack_sc_device_interrupt(unsigned long, struct pt_regs *);
+extern void srm_device_interrupt(unsigned long, struct pt_regs *);
+extern void pyxis_device_interrupt(unsigned long, struct pt_regs *);
+
+extern struct irqaction timer_irqaction;
+extern struct irqaction isa_cascade_irqaction;
+extern struct irqaction timer_cascade_irqaction;
+extern struct irqaction halt_switch_irqaction;
+
+extern void init_srm_irqs(long, unsigned long);
+extern void init_pyxis_irqs(unsigned long);
+extern void init_rtc_irq(void);
+
+extern void common_init_isa_dma(void);
+
+extern void i8259a_enable_irq(unsigned int);
+extern void i8259a_disable_irq(unsigned int);
+extern void i8259a_mask_and_ack_irq(unsigned int);
+extern unsigned int i8259a_startup_irq(unsigned int);
+extern void i8259a_end_irq(unsigned int);
+extern struct hw_interrupt_type i8259a_irq_type;
+extern void init_i8259a_irqs(void);
+
+extern void handle_irq(int irq, struct pt_regs * regs);
diff --git a/arch/alpha/kernel/irq_pyxis.c b/arch/alpha/kernel/irq_pyxis.c
new file mode 100644
index 000000000000..146a20b9e3d5
--- /dev/null
+++ b/arch/alpha/kernel/irq_pyxis.c
@@ -0,0 +1,127 @@
+/*
+ * linux/arch/alpha/kernel/irq_pyxis.c
+ *
+ * Based on code written by David A Rusling (david.rusling@reo.mts.dec.com).
+ *
+ * IRQ Code common to all PYXIS core logic chips.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include <asm/io.h>
+#include <asm/core_cia.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/* Note mask bit is true for ENABLED irqs. */
+static unsigned long cached_irq_mask;
+
+static inline void
+pyxis_update_irq_hw(unsigned long mask)
+{
+ *(vulp)PYXIS_INT_MASK = mask;
+ mb();
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static inline void
+pyxis_enable_irq(unsigned int irq)
+{
+ pyxis_update_irq_hw(cached_irq_mask |= 1UL << (irq - 16));
+}
+
+static void
+pyxis_disable_irq(unsigned int irq)
+{
+ pyxis_update_irq_hw(cached_irq_mask &= ~(1UL << (irq - 16)));
+}
+
+static unsigned int
+pyxis_startup_irq(unsigned int irq)
+{
+ pyxis_enable_irq(irq);
+ return 0;
+}
+
+static void
+pyxis_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ pyxis_enable_irq(irq);
+}
+
+static void
+pyxis_mask_and_ack_irq(unsigned int irq)
+{
+ unsigned long bit = 1UL << (irq - 16);
+ unsigned long mask = cached_irq_mask &= ~bit;
+
+ /* Disable the interrupt. */
+ *(vulp)PYXIS_INT_MASK = mask;
+ wmb();
+ /* Ack PYXIS PCI interrupt. */
+ *(vulp)PYXIS_INT_REQ = bit;
+ mb();
+ /* Re-read to force both writes. */
+ *(vulp)PYXIS_INT_MASK;
+}
+
+static struct hw_interrupt_type pyxis_irq_type = {
+ .typename = "PYXIS",
+ .startup = pyxis_startup_irq,
+ .shutdown = pyxis_disable_irq,
+ .enable = pyxis_enable_irq,
+ .disable = pyxis_disable_irq,
+ .ack = pyxis_mask_and_ack_irq,
+ .end = pyxis_end_irq,
+};
+
+void
+pyxis_device_interrupt(unsigned long vector, struct pt_regs *regs)
+{
+ unsigned long pld;
+ unsigned int i;
+
+ /* Read the interrupt summary register of PYXIS */
+ pld = *(vulp)PYXIS_INT_REQ;
+ pld &= cached_irq_mask;
+
+ /*
+ * Now for every possible bit set, work through them and call
+ * the appropriate interrupt handler.
+ */
+ while (pld) {
+ i = ffz(~pld);
+ pld &= pld - 1; /* clear least bit set */
+ if (i == 7)
+ isa_device_interrupt(vector, regs);
+ else
+ handle_irq(16+i, regs);
+ }
+}
+
+void __init
+init_pyxis_irqs(unsigned long ignore_mask)
+{
+ long i;
+
+ *(vulp)PYXIS_INT_MASK = 0; /* disable all */
+ *(vulp)PYXIS_INT_REQ = -1; /* flush all */
+ mb();
+
+ /* Send -INTA pulses to clear any pending interrupts ...*/
+ *(vuip) CIA_IACK_SC;
+
+ for (i = 16; i < 48; ++i) {
+ if ((ignore_mask >> i) & 1)
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &pyxis_irq_type;
+ }
+
+ setup_irq(16+7, &isa_cascade_irqaction);
+}
diff --git a/arch/alpha/kernel/irq_srm.c b/arch/alpha/kernel/irq_srm.c
new file mode 100644
index 000000000000..0a87e466918c
--- /dev/null
+++ b/arch/alpha/kernel/irq_srm.c
@@ -0,0 +1,79 @@
+/*
+ * Handle interrupts from the SRM, assuming no additional weirdness.
+ */
+
+#include <linux/init.h>
+#include <linux/sched.h>
+#include <linux/irq.h>
+
+#include "proto.h"
+#include "irq_impl.h"
+
+
+/*
+ * Is the palcode SMP safe? In other words: can we call cserve_ena/dis
+ * at the same time in multiple CPUs? To be safe I added a spinlock
+ * but it can be removed trivially if the palcode is robust against smp.
+ */
+DEFINE_SPINLOCK(srm_irq_lock);
+
+static inline void
+srm_enable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_ena(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static void
+srm_disable_irq(unsigned int irq)
+{
+ spin_lock(&srm_irq_lock);
+ cserve_dis(irq - 16);
+ spin_unlock(&srm_irq_lock);
+}
+
+static unsigned int
+srm_startup_irq(unsigned int irq)
+{
+ srm_enable_irq(irq);
+ return 0;
+}
+
+static void
+srm_end_irq(unsigned int irq)
+{
+ if (!(irq_desc[irq].status & (IRQ_DISABLED|IRQ_INPROGRESS)))
+ srm_enable_irq(irq);
+}
+
+/* Handle interrupts from the SRM, assuming no additional weirdness. */
+static struct hw_interrupt_type srm_irq_type = {
+ .typename = "SRM",
+ .startup = srm_startup_irq,
+ .shutdown = srm_disable_irq,
+ .enable = srm_enable_irq,
+ .disable = srm_disable_irq,
+ .ack = srm_disable_irq,
+ .end = srm_end_irq,
+};
+
+void __init
+init_srm_irqs(long max, unsigned long ignore_mask)
+{
+ long i;
+
+ for (i = 16; i < max; ++i) {
+ if (i < 64 && ((ignore_mask >> i) & 1))
+ continue;
+ irq_desc[i].status = IRQ_DISABLED | IRQ_LEVEL;
+ irq_desc[i].handler = &srm_irq_type;
+ }
+}
+
+void
+srm_device_interrupt(unsigned long vector, struct pt_regs * regs)
+{
+ int irq = (vector - 0x800) >> 4;
+ handle_irq(irq, regs);
+}
diff --git a/arch/alpha/kernel/machvec_impl.h b/arch/alpha/kernel/machvec_impl.h
new file mode 100644
index 000000000000..4959b7a3e1e6
--- /dev/null
+++ b/arch/alpha/kernel/machvec_impl.h
@@ -0,0 +1,150 @@
+/*
+ * linux/arch/alpha/kernel/machvec.h
+ *
+ * Copyright (C) 1997, 1998 Richard Henderson
+ *
+ * This file has goodies to help simplify instantiation of machine vectors.
+ */
+
+#include <linux/config.h>
+#include <asm/pgalloc.h>
+
+/* Whee. These systems don't have an HAE:
+ IRONGATE, MARVEL, POLARIS, TSUNAMI, TITAN, WILDFIRE
+ Fix things up for the GENERIC kernel by defining the HAE address
+ to be that of the cache. Now we can read and write it as we like. ;-) */
+#define IRONGATE_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define MARVEL_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define POLARIS_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define TSUNAMI_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define TITAN_HAE_ADDRESS (&alpha_mv.hae_cache)
+#define WILDFIRE_HAE_ADDRESS (&alpha_mv.hae_cache)
+
+#ifdef CIA_ONE_HAE_WINDOW
+#define CIA_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
+#ifdef MCPCIA_ONE_HAE_WINDOW
+#define MCPCIA_HAE_ADDRESS (&alpha_mv.hae_cache)
+#endif
+
+/* Only a few systems don't define IACK_SC, handling all interrupts through
+ the SRM console. But splitting out that one case from IO() below
+ seems like such a pain. Define this to get things to compile. */
+#define JENSEN_IACK_SC 1
+#define T2_IACK_SC 1
+#define WILDFIRE_IACK_SC 1 /* FIXME */
+
+/*
+ * Some helpful macros for filling in the blanks.
+ */
+
+#define CAT1(x,y) x##y
+#define CAT(x,y) CAT1(x,y)
+
+#define DO_DEFAULT_RTC rtc_port: 0x70
+
+#define DO_EV4_MMU \
+ .max_asn = EV4_MAX_ASN, \
+ .mv_switch_mm = ev4_switch_mm, \
+ .mv_activate_mm = ev4_activate_mm, \
+ .mv_flush_tlb_current = ev4_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev4_flush_tlb_current_page
+
+#define DO_EV5_MMU \
+ .max_asn = EV5_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define DO_EV6_MMU \
+ .max_asn = EV6_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define DO_EV7_MMU \
+ .max_asn = EV6_MAX_ASN, \
+ .mv_switch_mm = ev5_switch_mm, \
+ .mv_activate_mm = ev5_activate_mm, \
+ .mv_flush_tlb_current = ev5_flush_tlb_current, \
+ .mv_flush_tlb_current_page = ev5_flush_tlb_current_page
+
+#define IO_LITE(UP,low) \
+ .hae_register = (unsigned long *) CAT(UP,_HAE_ADDRESS), \
+ .iack_sc = CAT(UP,_IACK_SC), \
+ .mv_ioread8 = CAT(low,_ioread8), \
+ .mv_ioread16 = CAT(low,_ioread16), \
+ .mv_ioread32 = CAT(low,_ioread32), \
+ .mv_iowrite8 = CAT(low,_iowrite8), \
+ .mv_iowrite16 = CAT(low,_iowrite16), \
+ .mv_iowrite32 = CAT(low,_iowrite32), \
+ .mv_readb = CAT(low,_readb), \
+ .mv_readw = CAT(low,_readw), \
+ .mv_readl = CAT(low,_readl), \
+ .mv_readq = CAT(low,_readq), \
+ .mv_writeb = CAT(low,_writeb), \
+ .mv_writew = CAT(low,_writew), \
+ .mv_writel = CAT(low,_writel), \
+ .mv_writeq = CAT(low,_writeq), \
+ .mv_ioportmap = CAT(low,_ioportmap), \
+ .mv_ioremap = CAT(low,_ioremap), \
+ .mv_iounmap = CAT(low,_iounmap), \
+ .mv_is_ioaddr = CAT(low,_is_ioaddr), \
+ .mv_is_mmio = CAT(low,_is_mmio) \
+
+#define IO(UP,low) \
+ IO_LITE(UP,low), \
+ .pci_ops = &CAT(low,_pci_ops), \
+ .mv_pci_tbi = CAT(low,_pci_tbi)
+
+#define DO_APECS_IO IO(APECS,apecs)
+#define DO_CIA_IO IO(CIA,cia)
+#define DO_IRONGATE_IO IO(IRONGATE,irongate)
+#define DO_LCA_IO IO(LCA,lca)
+#define DO_MARVEL_IO IO(MARVEL,marvel)
+#define DO_MCPCIA_IO IO(MCPCIA,mcpcia)
+#define DO_POLARIS_IO IO(POLARIS,polaris)
+#define DO_T2_IO IO(T2,t2)
+#define DO_TSUNAMI_IO IO(TSUNAMI,tsunami)
+#define DO_TITAN_IO IO(TITAN,titan)
+#define DO_WILDFIRE_IO IO(WILDFIRE,wildfire)
+
+#define DO_PYXIS_IO IO_LITE(CIA,cia_bwx), \
+ .pci_ops = &cia_pci_ops, \
+ .mv_pci_tbi = cia_pci_tbi
+
+/*
+ * In a GENERIC kernel, we have lots of these vectors floating about,
+ * all but one of which we want to go away. In a non-GENERIC kernel,
+ * we want only one, ever.
+ *
+ * Accomplish this in the GENERIC kernel by putting all of the vectors
+ * in the .init.data section where they'll go away. We'll copy the
+ * one we want to the real alpha_mv vector in setup_arch.
+ *
+ * Accomplish this in a non-GENERIC kernel by ifdef'ing out all but
+ * one of the vectors, which will not reside in .init.data. We then
+ * alias this one vector to alpha_mv, so no copy is needed.
+ *
+ * Upshot: set __initdata to nothing for non-GENERIC kernels.
+ */
+
+#ifdef CONFIG_ALPHA_GENERIC
+#define __initmv __initdata
+#define ALIAS_MV(x)
+#else
+#define __initmv
+
+/* GCC actually has a syntax for defining aliases, but is under some
+ delusion that you shouldn't be able to declare it extern somewhere
+ else beforehand. Fine. We'll do it ourselves. */
+#if 0
+#define ALIAS_MV(system) \
+ struct alpha_machine_vector alpha_mv __attribute__((alias(#system "_mv")));
+#else
+#define ALIAS_MV(system) \
+ asm(".global alpha_mv\nalpha_mv = " #system "_mv");
+#endif
+#endif /* GENERIC */
diff --git a/arch/alpha/kernel/module.c b/arch/alpha/kernel/module.c
new file mode 100644
index 000000000000..fc271e316a38
--- /dev/null
+++ b/arch/alpha/kernel/module.c
@@ -0,0 +1,311 @@
+/* Kernel module help for Alpha.
+ Copyright (C) 2002 Richard Henderson.
+
+ This program is free software; you can redistribute it and/or modify
+ it under the terms of the GNU General Public License as published by
+ the Free Software Foundation; either version 2 of the License, or
+ (at your option) any later version.
+
+ This program is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ GNU General Public License for more details.
+
+ You should have received a copy of the GNU General Public License
+ along with this program; if not, write to the Free Software
+ Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
+*/
+#include <linux/moduleloader.h>
+#include <linux/elf.h>
+#include <linux/vmalloc.h>
+#include <linux/fs.h>
+#include <linux/string.h>
+#include <linux/kernel.h>
+#include <linux/slab.h>
+
+#if 0
+#define DEBUGP printk
+#else
+#define DEBUGP(fmt...)
+#endif
+
+void *
+module_alloc(unsigned long size)
+{
+ if (size == 0)
+ return NULL;
+ return vmalloc(size);
+}
+
+void
+module_free(struct module *mod, void *module_region)
+{
+ vfree(module_region);
+}
+
+/* Allocate the GOT at the end of the core sections. */
+
+struct got_entry {
+ struct got_entry *next;
+ Elf64_Addr r_offset;
+ int got_offset;
+};
+
+static inline void
+process_reloc_for_got(Elf64_Rela *rela,
+ struct got_entry *chains, Elf64_Xword *poffset)
+{
+ unsigned long r_sym = ELF64_R_SYM (rela->r_info);
+ unsigned long r_type = ELF64_R_TYPE (rela->r_info);
+ Elf64_Addr r_offset = rela->r_offset;
+ struct got_entry *g;
+
+ if (r_type != R_ALPHA_LITERAL)
+ return;
+
+ for (g = chains + r_sym; g ; g = g->next)
+ if (g->r_offset == r_offset) {
+ if (g->got_offset == 0) {
+ g->got_offset = *poffset;
+ *poffset += 8;
+ }
+ goto found_entry;
+ }
+
+ g = kmalloc (sizeof (*g), GFP_KERNEL);
+ g->next = chains[r_sym].next;
+ g->r_offset = r_offset;
+ g->got_offset = *poffset;
+ *poffset += 8;
+ chains[r_sym].next = g;
+
+ found_entry:
+ /* Trick: most of the ELF64_R_TYPE field is unused. There are
+ 42 valid relocation types, and a 32-bit field. Co-opt the
+ bits above 256 to store the got offset for this reloc. */
+ rela->r_info |= g->got_offset << 8;
+}
+
+int
+module_frob_arch_sections(Elf64_Ehdr *hdr, Elf64_Shdr *sechdrs,
+ char *secstrings, struct module *me)
+{
+ struct got_entry *chains;
+ Elf64_Rela *rela;
+ Elf64_Shdr *esechdrs, *symtab, *s, *got;
+ unsigned long nsyms, nrela, i;
+
+ esechdrs = sechdrs + hdr->e_shnum;
+ symtab = got = NULL;
+
+ /* Find out how large the symbol table is. Allocate one got_entry
+ head per symbol. Normally this will be enough, but not always.
+ We'll chain different offsets for the symbol down each head. */
+ for (s = sechdrs; s < esechdrs; ++s)
+ if (s->sh_type == SHT_SYMTAB)
+ symtab = s;
+ else if (!strcmp(".got", secstrings + s->sh_name)) {
+ got = s;
+ me->arch.gotsecindex = s - sechdrs;
+ }
+
+ if (!symtab) {
+ printk(KERN_ERR "module %s: no symbol table\n", me->name);
+ return -ENOEXEC;
+ }
+ if (!got) {
+ printk(KERN_ERR "module %s: no got section\n", me->name);
+ return -ENOEXEC;
+ }
+
+ nsyms = symtab->sh_size / sizeof(Elf64_Sym);
+ chains = kmalloc(nsyms * sizeof(struct got_entry), GFP_KERNEL);
+ memset(chains, 0, nsyms * sizeof(struct got_entry));
+
+ got->sh_size = 0;
+ got->sh_addralign = 8;
+ got->sh_type = SHT_NOBITS;
+
+ /* Examine all LITERAL relocations to find out what GOT entries
+ are required. This sizes the GOT section as well. */
+ for (s = sechdrs; s < esechdrs; ++s)
+ if (s->sh_type == SHT_RELA) {
+ nrela = s->sh_size / sizeof(Elf64_Rela);
+ rela = (void *)hdr + s->sh_offset;
+ for (i = 0; i < nrela; ++i)
+ process_reloc_for_got(rela+i, chains,
+ &got->sh_size);
+ }
+
+ /* Free the memory we allocated. */
+ for (i = 0; i < nsyms; ++i) {
+ struct got_entry *g, *n;
+ for (g = chains[i].next; g ; g = n) {
+ n = g->next;
+ kfree(g);
+ }
+ }
+ kfree(chains);
+
+ return 0;
+}
+
+int
+apply_relocate(Elf64_Shdr *sechdrs, const char *strtab, unsigned int symindex,
+ unsigned int relsec, struct module *me)
+{
+ printk(KERN_ERR "module %s: REL relocation unsupported\n", me->name);
+ return -ENOEXEC;
+}
+
+int
+apply_relocate_add(Elf64_Shdr *sechdrs, const char *strtab,
+ unsigned int symindex, unsigned int relsec,
+ struct module *me)
+{
+ Elf64_Rela *rela = (void *)sechdrs[relsec].sh_addr;
+ unsigned long i, n = sechdrs[relsec].sh_size / sizeof(*rela);
+ Elf64_Sym *symtab, *sym;
+ void *base, *location;
+ unsigned long got, gp;
+
+ DEBUGP("Applying relocate section %u to %u\n", relsec,
+ sechdrs[relsec].sh_info);
+
+ base = (void *)sechdrs[sechdrs[relsec].sh_info].sh_addr;
+ symtab = (Elf64_Sym *)sechdrs[symindex].sh_addr;
+
+ /* The small sections were sorted to the end of the segment.
+ The following should definitely cover them. */
+ gp = (u64)me->module_core + me->core_size - 0x8000;
+ got = sechdrs[me->arch.gotsecindex].sh_addr;
+
+ for (i = 0; i < n; i++) {
+ unsigned long r_sym = ELF64_R_SYM (rela[i].r_info);
+ unsigned long r_type = ELF64_R_TYPE (rela[i].r_info);
+ unsigned long r_got_offset = r_type >> 8;
+ unsigned long value, hi, lo;
+ r_type &= 0xff;
+
+ /* This is where to make the change. */
+ location = base + rela[i].r_offset;
+
+ /* This is the symbol it is referring to. Note that all
+ unresolved symbols have been resolved. */
+ sym = symtab + r_sym;
+ value = sym->st_value + rela[i].r_addend;
+
+ switch (r_type) {
+ case R_ALPHA_NONE:
+ break;
+ case R_ALPHA_REFQUAD:
+ /* BUG() can produce misaligned relocations. */
+ ((u32 *)location)[0] = value;
+ ((u32 *)location)[1] = value >> 32;
+ break;
+ case R_ALPHA_GPREL32:
+ value -= gp;
+ if ((int)value != value)
+ goto reloc_overflow;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_LITERAL:
+ hi = got + r_got_offset;
+ lo = hi - gp;
+ if ((short)lo != lo)
+ goto reloc_overflow;
+ *(u16 *)location = lo;
+ *(u64 *)hi = value;
+ break;
+ case R_ALPHA_LITUSE:
+ break;
+ case R_ALPHA_GPDISP:
+ value = gp - (u64)location;
+ lo = (short)value;
+ hi = (int)(value - lo);
+ if (hi + lo != value)
+ goto reloc_overflow;
+ *(u16 *)location = hi >> 16;
+ *(u16 *)(location + rela[i].r_addend) = lo;
+ break;
+ case R_ALPHA_BRSGP:
+ /* BRSGP is only allowed to bind to local symbols.
+ If the section is undef, this means that the
+ value was resolved from somewhere else. */
+ if (sym->st_shndx == SHN_UNDEF)
+ goto reloc_overflow;
+ if ((sym->st_other & STO_ALPHA_STD_GPLOAD) ==
+ STO_ALPHA_STD_GPLOAD)
+ /* Omit the prologue. */
+ value += 8;
+ /* FALLTHRU */
+ case R_ALPHA_BRADDR:
+ value -= (u64)location + 4;
+ if (value & 3)
+ goto reloc_overflow;
+ value = (long)value >> 2;
+ if (value + (1<<21) >= 1<<22)
+ goto reloc_overflow;
+ value &= 0x1fffff;
+ value |= *(u32 *)location & ~0x1fffff;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_HINT:
+ break;
+ case R_ALPHA_SREL32:
+ value -= (u64)location;
+ if ((int)value != value)
+ goto reloc_overflow;
+ *(u32 *)location = value;
+ break;
+ case R_ALPHA_SREL64:
+ value -= (u64)location;
+ *(u64 *)location = value;
+ break;
+ case R_ALPHA_GPRELHIGH:
+ value = (long)(value - gp + 0x8000) >> 16;
+ if ((short) value != value)
+ goto reloc_overflow;
+ *(u16 *)location = value;
+ break;
+ case R_ALPHA_GPRELLOW:
+ value -= gp;
+ *(u16 *)location = value;
+ break;
+ case R_ALPHA_GPREL16:
+ value -= gp;
+ if ((short) value != value)
+ goto reloc_overflow;
+ *(u16 *)location = value;
+ break;
+ default:
+ printk(KERN_ERR "module %s: Unknown relocation: %lu\n",
+ me->name, r_type);
+ return -ENOEXEC;
+ reloc_overflow:
+ if (ELF64_ST_TYPE (sym->st_info) == STT_SECTION)
+ printk(KERN_ERR
+ "module %s: Relocation overflow vs section %d\n",
+ me->name, sym->st_shndx);
+ else
+ printk(KERN_ERR
+ "module %s: Relocation overflow vs %s\n",
+ me->name, strtab + sym->st_name);
+ return -ENOEXEC;
+ }
+ }
+
+ return 0;
+}
+
+int
+module_finalize(const Elf_Ehdr *hdr, const Elf_Shdr *sechdrs,
+ struct module *me)
+{
+ return 0;
+}
+
+void
+module_arch_cleanup(struct module *mod)
+{
+}
diff --git a/arch/alpha/kernel/ns87312.c b/arch/alpha/kernel/ns87312.c
new file mode 100644
index 000000000000..342b56d24c20
--- /dev/null
+++ b/arch/alpha/kernel/ns87312.c
@@ -0,0 +1,38 @@
+/*
+ * linux/arch/alpha/kernel/ns87312.c
+ */
+
+#include <linux/init.h>
+#include <asm/io.h>
+#include "proto.h"
+
+
+/*
+ * The SRM console *disables* the IDE interface, this code ensures it's
+ * enabled.
+ *
+ * This code bangs on a control register of the 87312 Super I/O chip
+ * that implements parallel port/serial ports/IDE/FDI. Depending on
+ * the motherboard, the Super I/O chip can be configured through a
+ * pair of registers that are located either at I/O ports 0x26e/0x26f
+ * or 0x398/0x399. Unfortunately, autodetecting which base address is
+ * in use works only once (right after a reset). The Super I/O chip
+ * has the additional quirk that configuration register data must be
+ * written twice (I believe this is a safety feature to prevent
+ * accidental modification---fun, isn't it?).
+ */
+
+void __init
+ns87312_enable_ide(long ide_base)
+{
+ int data;
+ unsigned long flags;
+
+ local_irq_save(flags);
+ outb(0, ide_base); /* set the index register for reg #0 */
+ data = inb(ide_base+1); /* read the current contents */
+ outb(0, ide_base); /* set the index register for reg #0 */
+ outb(data | 0x40, ide_base+1); /* turn on IDE */
+ outb(data | 0x40, ide_base+1); /* turn on IDE, really! */
+ local_irq_restore(flags);
+}
diff --git a/arch/alpha/kernel/osf_sys.c b/arch/alpha/kernel/osf_sys.c
new file mode 100644
index 000000000000..b5d0fd2bb10a
--- /dev/null
+++ b/arch/alpha/kernel/osf_sys.c
@@ -0,0 +1,1345 @@
+/*
+ * linux/arch/alpha/kernel/osf_sys.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles some of the stranger OSF/1 system call interfaces.
+ * Some of the system calls expect a non-C calling standard, others have
+ * special parameter blocks..
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/syscalls.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/timex.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/shm.h>
+#include <linux/poll.h>
+#include <linux/file.h>
+#include <linux/types.h>
+#include <linux/ipc.h>
+#include <linux/namei.h>
+#include <linux/uio.h>
+#include <linux/vfs.h>
+
+#include <asm/fpu.h>
+#include <asm/io.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/sysinfo.h>
+#include <asm/hwrpb.h>
+#include <asm/processor.h>
+
+extern int do_pipe(int *);
+
+/*
+ * Brk needs to return an error. Still support Linux's brk(0) query idiom,
+ * which OSF programs just shouldn't be doing. We're still not quite
+ * identical to OSF as we don't return 0 on success, but doing otherwise
+ * would require changes to libc. Hopefully this is good enough.
+ */
+asmlinkage unsigned long
+osf_brk(unsigned long brk)
+{
+ unsigned long retval = sys_brk(brk);
+ if (brk && brk != retval)
+ retval = -ENOMEM;
+ return retval;
+}
+
+/*
+ * This is pure guess-work..
+ */
+asmlinkage int
+osf_set_program_attributes(unsigned long text_start, unsigned long text_len,
+ unsigned long bss_start, unsigned long bss_len)
+{
+ struct mm_struct *mm;
+
+ lock_kernel();
+ mm = current->mm;
+ mm->end_code = bss_start + bss_len;
+ mm->brk = bss_start + bss_len;
+#if 0
+ printk("set_program_attributes(%lx %lx %lx %lx)\n",
+ text_start, text_len, bss_start, bss_len);
+#endif
+ unlock_kernel();
+ return 0;
+}
+
+/*
+ * OSF/1 directory handling functions...
+ *
+ * The "getdents()" interface is much more sane: the "basep" stuff is
+ * braindamage (it can't really handle filesystems where the directory
+ * offset differences aren't the same as "d_reclen").
+ */
+#define NAME_OFFSET offsetof (struct osf_dirent, d_name)
+#define ROUND_UP(x) (((x)+3) & ~3)
+
+struct osf_dirent {
+ unsigned int d_ino;
+ unsigned short d_reclen;
+ unsigned short d_namlen;
+ char d_name[1];
+};
+
+struct osf_dirent_callback {
+ struct osf_dirent __user *dirent;
+ long __user *basep;
+ unsigned int count;
+ int error;
+};
+
+static int
+osf_filldir(void *__buf, const char *name, int namlen, loff_t offset,
+ ino_t ino, unsigned int d_type)
+{
+ struct osf_dirent __user *dirent;
+ struct osf_dirent_callback *buf = (struct osf_dirent_callback *) __buf;
+ unsigned int reclen = ROUND_UP(NAME_OFFSET + namlen + 1);
+
+ buf->error = -EINVAL; /* only used if we fail */
+ if (reclen > buf->count)
+ return -EINVAL;
+ if (buf->basep) {
+ if (put_user(offset, buf->basep))
+ return -EFAULT;
+ buf->basep = NULL;
+ }
+ dirent = buf->dirent;
+ put_user(ino, &dirent->d_ino);
+ put_user(namlen, &dirent->d_namlen);
+ put_user(reclen, &dirent->d_reclen);
+ if (copy_to_user(dirent->d_name, name, namlen) ||
+ put_user(0, dirent->d_name + namlen))
+ return -EFAULT;
+ dirent = (void __user *)dirent + reclen;
+ buf->dirent = dirent;
+ buf->count -= reclen;
+ return 0;
+}
+
+asmlinkage int
+osf_getdirentries(unsigned int fd, struct osf_dirent __user *dirent,
+ unsigned int count, long __user *basep)
+{
+ int error;
+ struct file *file;
+ struct osf_dirent_callback buf;
+
+ error = -EBADF;
+ file = fget(fd);
+ if (!file)
+ goto out;
+
+ buf.dirent = dirent;
+ buf.basep = basep;
+ buf.count = count;
+ buf.error = 0;
+
+ error = vfs_readdir(file, osf_filldir, &buf);
+ if (error < 0)
+ goto out_putf;
+
+ error = buf.error;
+ if (count != buf.count)
+ error = count - buf.count;
+
+ out_putf:
+ fput(file);
+ out:
+ return error;
+}
+
+#undef ROUND_UP
+#undef NAME_OFFSET
+
+asmlinkage unsigned long
+osf_mmap(unsigned long addr, unsigned long len, unsigned long prot,
+ unsigned long flags, unsigned long fd, unsigned long off)
+{
+ struct file *file = NULL;
+ unsigned long ret = -EBADF;
+
+#if 0
+ if (flags & (_MAP_HASSEMAPHORE | _MAP_INHERIT | _MAP_UNALIGNED))
+ printk("%s: unimplemented OSF mmap flags %04lx\n",
+ current->comm, flags);
+#endif
+ if (!(flags & MAP_ANONYMOUS)) {
+ file = fget(fd);
+ if (!file)
+ goto out;
+ }
+ flags &= ~(MAP_EXECUTABLE | MAP_DENYWRITE);
+ down_write(&current->mm->mmap_sem);
+ ret = do_mmap(file, addr, len, prot, flags, off);
+ up_write(&current->mm->mmap_sem);
+ if (file)
+ fput(file);
+ out:
+ return ret;
+}
+
+
+/*
+ * The OSF/1 statfs structure is much larger, but this should
+ * match the beginning, at least.
+ */
+struct osf_statfs {
+ short f_type;
+ short f_flags;
+ int f_fsize;
+ int f_bsize;
+ int f_blocks;
+ int f_bfree;
+ int f_bavail;
+ int f_files;
+ int f_ffree;
+ __kernel_fsid_t f_fsid;
+};
+
+static int
+linux_to_osf_statfs(struct kstatfs *linux_stat, struct osf_statfs __user *osf_stat,
+ unsigned long bufsiz)
+{
+ struct osf_statfs tmp_stat;
+
+ tmp_stat.f_type = linux_stat->f_type;
+ tmp_stat.f_flags = 0; /* mount flags */
+ tmp_stat.f_fsize = linux_stat->f_frsize;
+ tmp_stat.f_bsize = linux_stat->f_bsize;
+ tmp_stat.f_blocks = linux_stat->f_blocks;
+ tmp_stat.f_bfree = linux_stat->f_bfree;
+ tmp_stat.f_bavail = linux_stat->f_bavail;
+ tmp_stat.f_files = linux_stat->f_files;
+ tmp_stat.f_ffree = linux_stat->f_ffree;
+ tmp_stat.f_fsid = linux_stat->f_fsid;
+ if (bufsiz > sizeof(tmp_stat))
+ bufsiz = sizeof(tmp_stat);
+ return copy_to_user(osf_stat, &tmp_stat, bufsiz) ? -EFAULT : 0;
+}
+
+static int
+do_osf_statfs(struct dentry * dentry, struct osf_statfs __user *buffer,
+ unsigned long bufsiz)
+{
+ struct kstatfs linux_stat;
+ int error = vfs_statfs(dentry->d_inode->i_sb, &linux_stat);
+ if (!error)
+ error = linux_to_osf_statfs(&linux_stat, buffer, bufsiz);
+ return error;
+}
+
+asmlinkage int
+osf_statfs(char __user *path, struct osf_statfs __user *buffer, unsigned long bufsiz)
+{
+ struct nameidata nd;
+ int retval;
+
+ retval = user_path_walk(path, &nd);
+ if (!retval) {
+ retval = do_osf_statfs(nd.dentry, buffer, bufsiz);
+ path_release(&nd);
+ }
+ return retval;
+}
+
+asmlinkage int
+osf_fstatfs(unsigned long fd, struct osf_statfs __user *buffer, unsigned long bufsiz)
+{
+ struct file *file;
+ int retval;
+
+ retval = -EBADF;
+ file = fget(fd);
+ if (file) {
+ retval = do_osf_statfs(file->f_dentry, buffer, bufsiz);
+ fput(file);
+ }
+ return retval;
+}
+
+/*
+ * Uhh.. OSF/1 mount parameters aren't exactly obvious..
+ *
+ * Although to be frank, neither are the native Linux/i386 ones..
+ */
+struct ufs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+};
+
+struct cdfs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+
+ /* This has lots more here, which Linux handles with the option block
+ but I'm too lazy to do the translation into ASCII. */
+};
+
+struct procfs_args {
+ char __user *devname;
+ int flags;
+ uid_t exroot;
+};
+
+/*
+ * We can't actually handle ufs yet, so we translate UFS mounts to
+ * ext2fs mounts. I wouldn't mind a UFS filesystem, but the UFS
+ * layout is so braindead it's a major headache doing it.
+ *
+ * Just how long ago was it written? OTOH our UFS driver may be still
+ * unhappy with OSF UFS. [CHECKME]
+ */
+static int
+osf_ufs_mount(char *dirname, struct ufs_args __user *args, int flags)
+{
+ int retval;
+ struct cdfs_args tmp;
+ char *devname;
+
+ retval = -EFAULT;
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ goto out;
+ devname = getname(tmp.devname);
+ retval = PTR_ERR(devname);
+ if (IS_ERR(devname))
+ goto out;
+ retval = do_mount(devname, dirname, "ext2", flags, NULL);
+ putname(devname);
+ out:
+ return retval;
+}
+
+static int
+osf_cdfs_mount(char *dirname, struct cdfs_args __user *args, int flags)
+{
+ int retval;
+ struct cdfs_args tmp;
+ char *devname;
+
+ retval = -EFAULT;
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ goto out;
+ devname = getname(tmp.devname);
+ retval = PTR_ERR(devname);
+ if (IS_ERR(devname))
+ goto out;
+ retval = do_mount(devname, dirname, "iso9660", flags, NULL);
+ putname(devname);
+ out:
+ return retval;
+}
+
+static int
+osf_procfs_mount(char *dirname, struct procfs_args __user *args, int flags)
+{
+ struct procfs_args tmp;
+
+ if (copy_from_user(&tmp, args, sizeof(tmp)))
+ return -EFAULT;
+
+ return do_mount("", dirname, "proc", flags, NULL);
+}
+
+asmlinkage int
+osf_mount(unsigned long typenr, char __user *path, int flag, void __user *data)
+{
+ int retval = -EINVAL;
+ char *name;
+
+ lock_kernel();
+
+ name = getname(path);
+ retval = PTR_ERR(name);
+ if (IS_ERR(name))
+ goto out;
+ switch (typenr) {
+ case 1:
+ retval = osf_ufs_mount(name, data, flag);
+ break;
+ case 6:
+ retval = osf_cdfs_mount(name, data, flag);
+ break;
+ case 9:
+ retval = osf_procfs_mount(name, data, flag);
+ break;
+ default:
+ printk("osf_mount(%ld, %x)\n", typenr, flag);
+ }
+ putname(name);
+ out:
+ unlock_kernel();
+ return retval;
+}
+
+asmlinkage int
+osf_utsname(char __user *name)
+{
+ int error;
+
+ down_read(&uts_sem);
+ error = -EFAULT;
+ if (copy_to_user(name + 0, system_utsname.sysname, 32))
+ goto out;
+ if (copy_to_user(name + 32, system_utsname.nodename, 32))
+ goto out;
+ if (copy_to_user(name + 64, system_utsname.release, 32))
+ goto out;
+ if (copy_to_user(name + 96, system_utsname.version, 32))
+ goto out;
+ if (copy_to_user(name + 128, system_utsname.machine, 32))
+ goto out;
+
+ error = 0;
+ out:
+ up_read(&uts_sem);
+ return error;
+}
+
+asmlinkage unsigned long
+sys_getpagesize(void)
+{
+ return PAGE_SIZE;
+}
+
+asmlinkage unsigned long
+sys_getdtablesize(void)
+{
+ return NR_OPEN;
+}
+
+/*
+ * For compatibility with OSF/1 only. Use utsname(2) instead.
+ */
+asmlinkage int
+osf_getdomainname(char __user *name, int namelen)
+{
+ unsigned len;
+ int i;
+
+ if (!access_ok(VERIFY_WRITE, name, namelen))
+ return -EFAULT;
+
+ len = namelen;
+ if (namelen > 32)
+ len = 32;
+
+ down_read(&uts_sem);
+ for (i = 0; i < len; ++i) {
+ __put_user(system_utsname.domainname[i], name + i);
+ if (system_utsname.domainname[i] == '\0')
+ break;
+ }
+ up_read(&uts_sem);
+
+ return 0;
+}
+
+asmlinkage long
+osf_shmat(int shmid, void __user *shmaddr, int shmflg)
+{
+ unsigned long raddr;
+ long err;
+
+ err = do_shmat(shmid, shmaddr, shmflg, &raddr);
+
+ /*
+ * This works because all user-level addresses are
+ * non-negative longs!
+ */
+ return err ? err : (long)raddr;
+}
+
+
+/*
+ * The following stuff should move into a header file should it ever
+ * be labeled "officially supported." Right now, there is just enough
+ * support to avoid applications (such as tar) printing error
+ * messages. The attributes are not really implemented.
+ */
+
+/*
+ * Values for Property list entry flag
+ */
+#define PLE_PROPAGATE_ON_COPY 0x1 /* cp(1) will copy entry
+ by default */
+#define PLE_FLAG_MASK 0x1 /* Valid flag values */
+#define PLE_FLAG_ALL -1 /* All flag value */
+
+struct proplistname_args {
+ unsigned int pl_mask;
+ unsigned int pl_numnames;
+ char **pl_names;
+};
+
+union pl_args {
+ struct setargs {
+ char __user *path;
+ long follow;
+ long nbytes;
+ char __user *buf;
+ } set;
+ struct fsetargs {
+ long fd;
+ long nbytes;
+ char __user *buf;
+ } fset;
+ struct getargs {
+ char __user *path;
+ long follow;
+ struct proplistname_args __user *name_args;
+ long nbytes;
+ char __user *buf;
+ int __user *min_buf_size;
+ } get;
+ struct fgetargs {
+ long fd;
+ struct proplistname_args __user *name_args;
+ long nbytes;
+ char __user *buf;
+ int __user *min_buf_size;
+ } fget;
+ struct delargs {
+ char __user *path;
+ long follow;
+ struct proplistname_args __user *name_args;
+ } del;
+ struct fdelargs {
+ long fd;
+ struct proplistname_args __user *name_args;
+ } fdel;
+};
+
+enum pl_code {
+ PL_SET = 1, PL_FSET = 2,
+ PL_GET = 3, PL_FGET = 4,
+ PL_DEL = 5, PL_FDEL = 6
+};
+
+asmlinkage long
+osf_proplist_syscall(enum pl_code code, union pl_args __user *args)
+{
+ long error;
+ int __user *min_buf_size_ptr;
+
+ lock_kernel();
+ switch (code) {
+ case PL_SET:
+ if (get_user(error, &args->set.nbytes))
+ error = -EFAULT;
+ break;
+ case PL_FSET:
+ if (get_user(error, &args->fset.nbytes))
+ error = -EFAULT;
+ break;
+ case PL_GET:
+ error = get_user(min_buf_size_ptr, &args->get.min_buf_size);
+ if (error)
+ break;
+ error = put_user(0, min_buf_size_ptr);
+ break;
+ case PL_FGET:
+ error = get_user(min_buf_size_ptr, &args->fget.min_buf_size);
+ if (error)
+ break;
+ error = put_user(0, min_buf_size_ptr);
+ break;
+ case PL_DEL:
+ case PL_FDEL:
+ error = 0;
+ break;
+ default:
+ error = -EOPNOTSUPP;
+ break;
+ };
+ unlock_kernel();
+ return error;
+}
+
+asmlinkage int
+osf_sigstack(struct sigstack __user *uss, struct sigstack __user *uoss)
+{
+ unsigned long usp = rdusp();
+ unsigned long oss_sp = current->sas_ss_sp + current->sas_ss_size;
+ unsigned long oss_os = on_sig_stack(usp);
+ int error;
+
+ if (uss) {
+ void __user *ss_sp;
+
+ error = -EFAULT;
+ if (get_user(ss_sp, &uss->ss_sp))
+ goto out;
+
+ /* If the current stack was set with sigaltstack, don't
+ swap stacks while we are on it. */
+ error = -EPERM;
+ if (current->sas_ss_sp && on_sig_stack(usp))
+ goto out;
+
+ /* Since we don't know the extent of the stack, and we don't
+ track onstack-ness, but rather calculate it, we must
+ presume a size. Ho hum this interface is lossy. */
+ current->sas_ss_sp = (unsigned long)ss_sp - SIGSTKSZ;
+ current->sas_ss_size = SIGSTKSZ;
+ }
+
+ if (uoss) {
+ error = -EFAULT;
+ if (! access_ok(VERIFY_WRITE, uoss, sizeof(*uoss))
+ || __put_user(oss_sp, &uoss->ss_sp)
+ || __put_user(oss_os, &uoss->ss_onstack))
+ goto out;
+ }
+
+ error = 0;
+ out:
+ return error;
+}
+
+asmlinkage long
+osf_sysinfo(int command, char __user *buf, long count)
+{
+ static char * sysinfo_table[] = {
+ system_utsname.sysname,
+ system_utsname.nodename,
+ system_utsname.release,
+ system_utsname.version,
+ system_utsname.machine,
+ "alpha", /* instruction set architecture */
+ "dummy", /* hardware serial number */
+ "dummy", /* hardware manufacturer */
+ "dummy", /* secure RPC domain */
+ };
+ unsigned long offset;
+ char *res;
+ long len, err = -EINVAL;
+
+ offset = command-1;
+ if (offset >= sizeof(sysinfo_table)/sizeof(char *)) {
+ /* Digital UNIX has a few unpublished interfaces here */
+ printk("sysinfo(%d)", command);
+ goto out;
+ }
+
+ down_read(&uts_sem);
+ res = sysinfo_table[offset];
+ len = strlen(res)+1;
+ if (len > count)
+ len = count;
+ if (copy_to_user(buf, res, len))
+ err = -EFAULT;
+ else
+ err = 0;
+ up_read(&uts_sem);
+ out:
+ return err;
+}
+
+asmlinkage unsigned long
+osf_getsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
+ int __user *start, void __user *arg)
+{
+ unsigned long w;
+ struct percpu_struct *cpu;
+
+ switch (op) {
+ case GSI_IEEE_FP_CONTROL:
+ /* Return current software fp control & status bits. */
+ /* Note that DU doesn't verify available space here. */
+
+ w = current_thread_info()->ieee_state & IEEE_SW_MASK;
+ w = swcr_update_status(w, rdfpcr());
+ if (put_user(w, (unsigned long __user *) buffer))
+ return -EFAULT;
+ return 0;
+
+ case GSI_IEEE_STATE_AT_SIGNAL:
+ /*
+ * Not sure anybody will ever use this weird stuff. These
+ * ops can be used (under OSF/1) to set the fpcr that should
+ * be used when a signal handler starts executing.
+ */
+ break;
+
+ case GSI_UACPROC:
+ if (nbytes < sizeof(unsigned int))
+ return -EINVAL;
+ w = (current_thread_info()->flags >> UAC_SHIFT) & UAC_BITMASK;
+ if (put_user(w, (unsigned int __user *)buffer))
+ return -EFAULT;
+ return 1;
+
+ case GSI_PROC_TYPE:
+ if (nbytes < sizeof(unsigned long))
+ return -EINVAL;
+ cpu = (struct percpu_struct*)
+ ((char*)hwrpb + hwrpb->processor_offset);
+ w = cpu->type;
+ if (put_user(w, (unsigned long __user*)buffer))
+ return -EFAULT;
+ return 1;
+
+ case GSI_GET_HWRPB:
+ if (nbytes < sizeof(*hwrpb))
+ return -EINVAL;
+ if (copy_to_user(buffer, hwrpb, nbytes) != 0)
+ return -EFAULT;
+ return 1;
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+asmlinkage unsigned long
+osf_setsysinfo(unsigned long op, void __user *buffer, unsigned long nbytes,
+ int __user *start, void __user *arg)
+{
+ switch (op) {
+ case SSI_IEEE_FP_CONTROL: {
+ unsigned long swcr, fpcr;
+ unsigned int *state;
+
+ /*
+ * Alpha Architecture Handbook 4.7.7.3:
+ * To be fully IEEE compiant, we must track the current IEEE
+ * exception state in software, because spurrious bits can be
+ * set in the trap shadow of a software-complete insn.
+ */
+
+ if (get_user(swcr, (unsigned long __user *)buffer))
+ return -EFAULT;
+ state = &current_thread_info()->ieee_state;
+
+ /* Update softare trap enable bits. */
+ *state = (*state & ~IEEE_SW_MASK) | (swcr & IEEE_SW_MASK);
+
+ /* Update the real fpcr. */
+ fpcr = rdfpcr() & FPCR_DYN_MASK;
+ fpcr |= ieee_swcr_to_fpcr(swcr);
+ wrfpcr(fpcr);
+
+ return 0;
+ }
+
+ case SSI_IEEE_RAISE_EXCEPTION: {
+ unsigned long exc, swcr, fpcr, fex;
+ unsigned int *state;
+
+ if (get_user(exc, (unsigned long __user *)buffer))
+ return -EFAULT;
+ state = &current_thread_info()->ieee_state;
+ exc &= IEEE_STATUS_MASK;
+
+ /* Update softare trap enable bits. */
+ swcr = (*state & IEEE_SW_MASK) | exc;
+ *state |= exc;
+
+ /* Update the real fpcr. */
+ fpcr = rdfpcr();
+ fpcr |= ieee_swcr_to_fpcr(swcr);
+ wrfpcr(fpcr);
+
+ /* If any exceptions set by this call, and are unmasked,
+ send a signal. Old exceptions are not signaled. */
+ fex = (exc >> IEEE_STATUS_TO_EXCSUM_SHIFT) & swcr;
+ if (fex) {
+ siginfo_t info;
+ int si_code = 0;
+
+ if (fex & IEEE_TRAP_ENABLE_DNO) si_code = FPE_FLTUND;
+ if (fex & IEEE_TRAP_ENABLE_INE) si_code = FPE_FLTRES;
+ if (fex & IEEE_TRAP_ENABLE_UNF) si_code = FPE_FLTUND;
+ if (fex & IEEE_TRAP_ENABLE_OVF) si_code = FPE_FLTOVF;
+ if (fex & IEEE_TRAP_ENABLE_DZE) si_code = FPE_FLTDIV;
+ if (fex & IEEE_TRAP_ENABLE_INV) si_code = FPE_FLTINV;
+
+ info.si_signo = SIGFPE;
+ info.si_errno = 0;
+ info.si_code = si_code;
+ info.si_addr = NULL; /* FIXME */
+ send_sig_info(SIGFPE, &info, current);
+ }
+ return 0;
+ }
+
+ case SSI_IEEE_STATE_AT_SIGNAL:
+ case SSI_IEEE_IGNORE_STATE_AT_SIGNAL:
+ /*
+ * Not sure anybody will ever use this weird stuff. These
+ * ops can be used (under OSF/1) to set the fpcr that should
+ * be used when a signal handler starts executing.
+ */
+ break;
+
+ case SSI_NVPAIRS: {
+ unsigned long v, w, i;
+ unsigned int old, new;
+
+ for (i = 0; i < nbytes; ++i) {
+
+ if (get_user(v, 2*i + (unsigned int __user *)buffer))
+ return -EFAULT;
+ if (get_user(w, 2*i + 1 + (unsigned int __user *)buffer))
+ return -EFAULT;
+ switch (v) {
+ case SSIN_UACPROC:
+ again:
+ old = current_thread_info()->flags;
+ new = old & ~(UAC_BITMASK << UAC_SHIFT);
+ new = new | (w & UAC_BITMASK) << UAC_SHIFT;
+ if (cmpxchg(&current_thread_info()->flags,
+ old, new) != old)
+ goto again;
+ break;
+
+ default:
+ return -EOPNOTSUPP;
+ }
+ }
+ return 0;
+ }
+
+ default:
+ break;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Translations due to the fact that OSF's time_t is an int. Which
+ affects all sorts of things, like timeval and itimerval. */
+
+extern struct timezone sys_tz;
+extern int do_adjtimex(struct timex *);
+
+struct timeval32
+{
+ int tv_sec, tv_usec;
+};
+
+struct itimerval32
+{
+ struct timeval32 it_interval;
+ struct timeval32 it_value;
+};
+
+static inline long
+get_tv32(struct timeval *o, struct timeval32 __user *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->tv_sec, &i->tv_sec) |
+ __get_user(o->tv_usec, &i->tv_usec)));
+}
+
+static inline long
+put_tv32(struct timeval32 __user *o, struct timeval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->tv_sec, &o->tv_sec) |
+ __put_user(i->tv_usec, &o->tv_usec)));
+}
+
+static inline long
+get_it32(struct itimerval *o, struct itimerval32 __user *i)
+{
+ return (!access_ok(VERIFY_READ, i, sizeof(*i)) ||
+ (__get_user(o->it_interval.tv_sec, &i->it_interval.tv_sec) |
+ __get_user(o->it_interval.tv_usec, &i->it_interval.tv_usec) |
+ __get_user(o->it_value.tv_sec, &i->it_value.tv_sec) |
+ __get_user(o->it_value.tv_usec, &i->it_value.tv_usec)));
+}
+
+static inline long
+put_it32(struct itimerval32 __user *o, struct itimerval *i)
+{
+ return (!access_ok(VERIFY_WRITE, o, sizeof(*o)) ||
+ (__put_user(i->it_interval.tv_sec, &o->it_interval.tv_sec) |
+ __put_user(i->it_interval.tv_usec, &o->it_interval.tv_usec) |
+ __put_user(i->it_value.tv_sec, &o->it_value.tv_sec) |
+ __put_user(i->it_value.tv_usec, &o->it_value.tv_usec)));
+}
+
+static inline void
+jiffies_to_timeval32(unsigned long jiffies, struct timeval32 *value)
+{
+ value->tv_usec = (jiffies % HZ) * (1000000L / HZ);
+ value->tv_sec = jiffies / HZ;
+}
+
+asmlinkage int
+osf_gettimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+{
+ if (tv) {
+ struct timeval ktv;
+ do_gettimeofday(&ktv);
+ if (put_tv32(tv, &ktv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_to_user(tz, &sys_tz, sizeof(sys_tz)))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage int
+osf_settimeofday(struct timeval32 __user *tv, struct timezone __user *tz)
+{
+ struct timespec kts;
+ struct timezone ktz;
+
+ if (tv) {
+ if (get_tv32((struct timeval *)&kts, tv))
+ return -EFAULT;
+ }
+ if (tz) {
+ if (copy_from_user(&ktz, tz, sizeof(*tz)))
+ return -EFAULT;
+ }
+
+ kts.tv_nsec *= 1000;
+
+ return do_sys_settimeofday(tv ? &kts : NULL, tz ? &ktz : NULL);
+}
+
+asmlinkage int
+osf_getitimer(int which, struct itimerval32 __user *it)
+{
+ struct itimerval kit;
+ int error;
+
+ error = do_getitimer(which, &kit);
+ if (!error && put_it32(it, &kit))
+ error = -EFAULT;
+
+ return error;
+}
+
+asmlinkage int
+osf_setitimer(int which, struct itimerval32 __user *in, struct itimerval32 __user *out)
+{
+ struct itimerval kin, kout;
+ int error;
+
+ if (in) {
+ if (get_it32(&kin, in))
+ return -EFAULT;
+ } else
+ memset(&kin, 0, sizeof(kin));
+
+ error = do_setitimer(which, &kin, out ? &kout : NULL);
+ if (error || !out)
+ return error;
+
+ if (put_it32(out, &kout))
+ return -EFAULT;
+
+ return 0;
+
+}
+
+asmlinkage int
+osf_utimes(char __user *filename, struct timeval32 __user *tvs)
+{
+ struct timeval ktvs[2];
+
+ if (tvs) {
+ if (get_tv32(&ktvs[0], &tvs[0]) ||
+ get_tv32(&ktvs[1], &tvs[1]))
+ return -EFAULT;
+ }
+
+ return do_utimes(filename, tvs ? ktvs : NULL);
+}
+
+#define MAX_SELECT_SECONDS \
+ ((unsigned long) (MAX_SCHEDULE_TIMEOUT / HZ)-1)
+
+asmlinkage int
+osf_select(int n, fd_set __user *inp, fd_set __user *outp, fd_set __user *exp,
+ struct timeval32 __user *tvp)
+{
+ fd_set_bits fds;
+ char *bits;
+ size_t size;
+ long timeout;
+ int ret = -EINVAL;
+
+ timeout = MAX_SCHEDULE_TIMEOUT;
+ if (tvp) {
+ time_t sec, usec;
+
+ if (!access_ok(VERIFY_READ, tvp, sizeof(*tvp))
+ || __get_user(sec, &tvp->tv_sec)
+ || __get_user(usec, &tvp->tv_usec)) {
+ ret = -EFAULT;
+ goto out_nofds;
+ }
+
+ if (sec < 0 || usec < 0)
+ goto out_nofds;
+
+ if ((unsigned long) sec < MAX_SELECT_SECONDS) {
+ timeout = (usec + 1000000/HZ - 1) / (1000000/HZ);
+ timeout += sec * (unsigned long) HZ;
+ }
+ }
+
+ if (n < 0 || n > current->files->max_fdset)
+ goto out_nofds;
+
+ /*
+ * We need 6 bitmaps (in/out/ex for both incoming and outgoing),
+ * since we used fdset we need to allocate memory in units of
+ * long-words.
+ */
+ ret = -ENOMEM;
+ size = FDS_BYTES(n);
+ bits = kmalloc(6 * size, GFP_KERNEL);
+ if (!bits)
+ goto out_nofds;
+ fds.in = (unsigned long *) bits;
+ fds.out = (unsigned long *) (bits + size);
+ fds.ex = (unsigned long *) (bits + 2*size);
+ fds.res_in = (unsigned long *) (bits + 3*size);
+ fds.res_out = (unsigned long *) (bits + 4*size);
+ fds.res_ex = (unsigned long *) (bits + 5*size);
+
+ if ((ret = get_fd_set(n, inp->fds_bits, fds.in)) ||
+ (ret = get_fd_set(n, outp->fds_bits, fds.out)) ||
+ (ret = get_fd_set(n, exp->fds_bits, fds.ex)))
+ goto out;
+ zero_fd_set(n, fds.res_in);
+ zero_fd_set(n, fds.res_out);
+ zero_fd_set(n, fds.res_ex);
+
+ ret = do_select(n, &fds, &timeout);
+
+ /* OSF does not copy back the remaining time. */
+
+ if (ret < 0)
+ goto out;
+ if (!ret) {
+ ret = -ERESTARTNOHAND;
+ if (signal_pending(current))
+ goto out;
+ ret = 0;
+ }
+
+ if (set_fd_set(n, inp->fds_bits, fds.res_in) ||
+ set_fd_set(n, outp->fds_bits, fds.res_out) ||
+ set_fd_set(n, exp->fds_bits, fds.res_ex))
+ ret = -EFAULT;
+
+ out:
+ kfree(bits);
+ out_nofds:
+ return ret;
+}
+
+struct rusage32 {
+ struct timeval32 ru_utime; /* user time used */
+ struct timeval32 ru_stime; /* system time used */
+ long ru_maxrss; /* maximum resident set size */
+ long ru_ixrss; /* integral shared memory size */
+ long ru_idrss; /* integral unshared data size */
+ long ru_isrss; /* integral unshared stack size */
+ long ru_minflt; /* page reclaims */
+ long ru_majflt; /* page faults */
+ long ru_nswap; /* swaps */
+ long ru_inblock; /* block input operations */
+ long ru_oublock; /* block output operations */
+ long ru_msgsnd; /* messages sent */
+ long ru_msgrcv; /* messages received */
+ long ru_nsignals; /* signals received */
+ long ru_nvcsw; /* voluntary context switches */
+ long ru_nivcsw; /* involuntary " */
+};
+
+asmlinkage int
+osf_getrusage(int who, struct rusage32 __user *ru)
+{
+ struct rusage32 r;
+
+ if (who != RUSAGE_SELF && who != RUSAGE_CHILDREN)
+ return -EINVAL;
+
+ memset(&r, 0, sizeof(r));
+ switch (who) {
+ case RUSAGE_SELF:
+ jiffies_to_timeval32(current->utime, &r.ru_utime);
+ jiffies_to_timeval32(current->stime, &r.ru_stime);
+ r.ru_minflt = current->min_flt;
+ r.ru_majflt = current->maj_flt;
+ break;
+ case RUSAGE_CHILDREN:
+ jiffies_to_timeval32(current->signal->cutime, &r.ru_utime);
+ jiffies_to_timeval32(current->signal->cstime, &r.ru_stime);
+ r.ru_minflt = current->signal->cmin_flt;
+ r.ru_majflt = current->signal->cmaj_flt;
+ break;
+ }
+
+ return copy_to_user(ru, &r, sizeof(r)) ? -EFAULT : 0;
+}
+
+asmlinkage long
+osf_wait4(pid_t pid, int __user *ustatus, int options,
+ struct rusage32 __user *ur)
+{
+ struct rusage r;
+ long ret, err;
+ mm_segment_t old_fs;
+
+ if (!ur)
+ return sys_wait4(pid, ustatus, options, NULL);
+
+ old_fs = get_fs();
+
+ set_fs (KERNEL_DS);
+ ret = sys_wait4(pid, ustatus, options, (struct rusage __user *) &r);
+ set_fs (old_fs);
+
+ if (!access_ok(VERIFY_WRITE, ur, sizeof(*ur)))
+ return -EFAULT;
+
+ err = 0;
+ err |= __put_user(r.ru_utime.tv_sec, &ur->ru_utime.tv_sec);
+ err |= __put_user(r.ru_utime.tv_usec, &ur->ru_utime.tv_usec);
+ err |= __put_user(r.ru_stime.tv_sec, &ur->ru_stime.tv_sec);
+ err |= __put_user(r.ru_stime.tv_usec, &ur->ru_stime.tv_usec);
+ err |= __put_user(r.ru_maxrss, &ur->ru_maxrss);
+ err |= __put_user(r.ru_ixrss, &ur->ru_ixrss);
+ err |= __put_user(r.ru_idrss, &ur->ru_idrss);
+ err |= __put_user(r.ru_isrss, &ur->ru_isrss);
+ err |= __put_user(r.ru_minflt, &ur->ru_minflt);
+ err |= __put_user(r.ru_majflt, &ur->ru_majflt);
+ err |= __put_user(r.ru_nswap, &ur->ru_nswap);
+ err |= __put_user(r.ru_inblock, &ur->ru_inblock);
+ err |= __put_user(r.ru_oublock, &ur->ru_oublock);
+ err |= __put_user(r.ru_msgsnd, &ur->ru_msgsnd);
+ err |= __put_user(r.ru_msgrcv, &ur->ru_msgrcv);
+ err |= __put_user(r.ru_nsignals, &ur->ru_nsignals);
+ err |= __put_user(r.ru_nvcsw, &ur->ru_nvcsw);
+ err |= __put_user(r.ru_nivcsw, &ur->ru_nivcsw);
+
+ return err ? err : ret;
+}
+
+/*
+ * I don't know what the parameters are: the first one
+ * seems to be a timeval pointer, and I suspect the second
+ * one is the time remaining.. Ho humm.. No documentation.
+ */
+asmlinkage int
+osf_usleep_thread(struct timeval32 __user *sleep, struct timeval32 __user *remain)
+{
+ struct timeval tmp;
+ unsigned long ticks;
+
+ if (get_tv32(&tmp, sleep))
+ goto fault;
+
+ ticks = tmp.tv_usec;
+ ticks = (ticks + (1000000 / HZ) - 1) / (1000000 / HZ);
+ ticks += tmp.tv_sec * HZ;
+
+ current->state = TASK_INTERRUPTIBLE;
+ ticks = schedule_timeout(ticks);
+
+ if (remain) {
+ tmp.tv_sec = ticks / HZ;
+ tmp.tv_usec = ticks % HZ;
+ if (put_tv32(remain, &tmp))
+ goto fault;
+ }
+
+ return 0;
+ fault:
+ return -EFAULT;
+}
+
+
+struct timex32 {
+ unsigned int modes; /* mode selector */
+ long offset; /* time offset (usec) */
+ long freq; /* frequency offset (scaled ppm) */
+ long maxerror; /* maximum error (usec) */
+ long esterror; /* estimated error (usec) */
+ int status; /* clock command/status */
+ long constant; /* pll time constant */
+ long precision; /* clock precision (usec) (read only) */
+ long tolerance; /* clock frequency tolerance (ppm)
+ * (read only)
+ */
+ struct timeval32 time; /* (read only) */
+ long tick; /* (modified) usecs between clock ticks */
+
+ long ppsfreq; /* pps frequency (scaled ppm) (ro) */
+ long jitter; /* pps jitter (us) (ro) */
+ int shift; /* interval duration (s) (shift) (ro) */
+ long stabil; /* pps stability (scaled ppm) (ro) */
+ long jitcnt; /* jitter limit exceeded (ro) */
+ long calcnt; /* calibration intervals (ro) */
+ long errcnt; /* calibration errors (ro) */
+ long stbcnt; /* stability limit exceeded (ro) */
+
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+ int :32; int :32; int :32; int :32;
+};
+
+asmlinkage int
+sys_old_adjtimex(struct timex32 __user *txc_p)
+{
+ struct timex txc;
+ int ret;
+
+ /* copy relevant bits of struct timex. */
+ if (copy_from_user(&txc, txc_p, offsetof(struct timex32, time)) ||
+ copy_from_user(&txc.tick, &txc_p->tick, sizeof(struct timex32) -
+ offsetof(struct timex32, time)))
+ return -EFAULT;
+
+ ret = do_adjtimex(&txc);
+ if (ret < 0)
+ return ret;
+
+ /* copy back to timex32 */
+ if (copy_to_user(txc_p, &txc, offsetof(struct timex32, time)) ||
+ (copy_to_user(&txc_p->tick, &txc.tick, sizeof(struct timex32) -
+ offsetof(struct timex32, tick))) ||
+ (put_tv32(&txc_p->time, &txc.time)))
+ return -EFAULT;
+
+ return ret;
+}
+
+/* Get an address range which is currently unmapped. Similar to the
+ generic version except that we know how to honor ADDR_LIMIT_32BIT. */
+
+static unsigned long
+arch_get_unmapped_area_1(unsigned long addr, unsigned long len,
+ unsigned long limit)
+{
+ struct vm_area_struct *vma = find_vma(current->mm, addr);
+
+ while (1) {
+ /* At this point: (!vma || addr < vma->vm_end). */
+ if (limit - len < addr)
+ return -ENOMEM;
+ if (!vma || addr + len <= vma->vm_start)
+ return addr;
+ addr = vma->vm_end;
+ vma = vma->vm_next;
+ }
+}
+
+unsigned long
+arch_get_unmapped_area(struct file *filp, unsigned long addr,
+ unsigned long len, unsigned long pgoff,
+ unsigned long flags)
+{
+ unsigned long limit;
+
+ /* "32 bit" actually means 31 bit, since pointers sign extend. */
+ if (current->personality & ADDR_LIMIT_32BIT)
+ limit = 0x80000000;
+ else
+ limit = TASK_SIZE;
+
+ if (len > limit)
+ return -ENOMEM;
+
+ /* First, see if the given suggestion fits.
+
+ The OSF/1 loader (/sbin/loader) relies on us returning an
+ address larger than the requested if one exists, which is
+ a terribly broken way to program.
+
+ That said, I can see the use in being able to suggest not
+ merely specific addresses, but regions of memory -- perhaps
+ this feature should be incorporated into all ports? */
+
+ if (addr) {
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(addr), len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+ }
+
+ /* Next, try allocating at TASK_UNMAPPED_BASE. */
+ addr = arch_get_unmapped_area_1 (PAGE_ALIGN(TASK_UNMAPPED_BASE),
+ len, limit);
+ if (addr != (unsigned long) -ENOMEM)
+ return addr;
+
+ /* Finally, try allocating in low memory. */
+ addr = arch_get_unmapped_area_1 (PAGE_SIZE, len, limit);
+
+ return addr;
+}
+
+#ifdef CONFIG_OSF4_COMPAT
+
+/* Clear top 32 bits of iov_len in the user's buffer for
+ compatibility with old versions of OSF/1 where iov_len
+ was defined as int. */
+static int
+osf_fix_iov_len(const struct iovec __user *iov, unsigned long count)
+{
+ unsigned long i;
+
+ for (i = 0 ; i < count ; i++) {
+ int __user *iov_len_high = (int __user *)&iov[i].iov_len + 1;
+
+ if (put_user(0, iov_len_high))
+ return -EFAULT;
+ }
+ return 0;
+}
+
+asmlinkage ssize_t
+osf_readv(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+{
+ if (unlikely(personality(current->personality) == PER_OSF4))
+ if (osf_fix_iov_len(vector, count))
+ return -EFAULT;
+ return sys_readv(fd, vector, count);
+}
+
+asmlinkage ssize_t
+osf_writev(unsigned long fd, const struct iovec __user * vector, unsigned long count)
+{
+ if (unlikely(personality(current->personality) == PER_OSF4))
+ if (osf_fix_iov_len(vector, count))
+ return -EFAULT;
+ return sys_writev(fd, vector, count);
+}
+
+#endif
diff --git a/arch/alpha/kernel/pci-noop.c b/arch/alpha/kernel/pci-noop.c
new file mode 100644
index 000000000000..582a3519fb28
--- /dev/null
+++ b/arch/alpha/kernel/pci-noop.c
@@ -0,0 +1,214 @@
+/*
+ * linux/arch/alpha/kernel/pci-noop.c
+ *
+ * Stub PCI interfaces for Jensen-specific kernels.
+ */
+
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/bootmem.h>
+#include <linux/mm.h>
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/dma-mapping.h>
+
+#include "proto.h"
+
+
+/*
+ * The PCI controller list.
+ */
+
+struct pci_controller *hose_head, **hose_tail = &hose_head;
+struct pci_controller *pci_isa_hose;
+
+
+struct pci_controller * __init
+alloc_pci_controller(void)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_bootmem(sizeof(*hose));
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+
+ return hose;
+}
+
+struct resource * __init
+alloc_resource(void)
+{
+ struct resource *res;
+
+ res = alloc_bootmem(sizeof(*res));
+
+ return res;
+}
+
+asmlinkage long
+sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
+{
+ struct pci_controller *hose;
+
+ /* from hose or from bus.devfn */
+ if (which & IOBASE_FROM_HOSE) {
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == bus)
+ break;
+ if (!hose)
+ return -ENODEV;
+ } else {
+ /* Special hook for ISA access. */
+ if (bus == 0 && dfn == 0)
+ hose = pci_isa_hose;
+ else
+ return -ENODEV;
+ }
+
+ switch (which & ~IOBASE_FROM_HOSE) {
+ case IOBASE_HOSE:
+ return hose->index;
+ case IOBASE_SPARSE_MEM:
+ return hose->sparse_mem_base;
+ case IOBASE_DENSE_MEM:
+ return hose->dense_mem_base;
+ case IOBASE_SPARSE_IO:
+ return hose->sparse_io_base;
+ case IOBASE_DENSE_IO:
+ return hose->dense_io_base;
+ case IOBASE_ROOT_BUS:
+ return hose->bus->number;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+asmlinkage long
+sys_pciconfig_read(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len, void *buf)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ else
+ return -ENODEV;
+}
+
+asmlinkage long
+sys_pciconfig_write(unsigned long bus, unsigned long dfn,
+ unsigned long off, unsigned long len, void *buf)
+{
+ if (!capable(CAP_SYS_ADMIN))
+ return -EPERM;
+ else
+ return -ENODEV;
+}
+
+/* Stubs for the routines in pci_iommu.c: */
+
+void *
+pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+ return NULL;
+}
+
+void
+pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+}
+
+dma_addr_t
+pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size,
+ int direction)
+{
+ return (dma_addr_t) 0;
+}
+
+void
+pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+}
+
+int
+pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ return 0;
+}
+
+void
+pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+}
+
+int
+pci_dma_supported(struct pci_dev *hwdev, dma_addr_t mask)
+{
+ return 0;
+}
+
+/* Generic DMA mapping functions: */
+
+void *
+dma_alloc_coherent(struct device *dev, size_t size,
+ dma_addr_t *dma_handle, int gfp)
+{
+ void *ret;
+
+ if (!dev || *dev->dma_mask >= 0xffffffffUL)
+ gfp &= ~GFP_DMA;
+ ret = (void *)__get_free_pages(gfp, get_order(size));
+ if (ret) {
+ memset(ret, 0, size);
+ *dma_handle = virt_to_bus(ret);
+ }
+ return ret;
+}
+
+EXPORT_SYMBOL(dma_alloc_coherent);
+
+int
+dma_map_sg(struct device *dev, struct scatterlist *sg, int nents,
+ enum dma_data_direction direction)
+{
+ int i;
+
+ for (i = 0; i < nents; i++ ) {
+ void *va;
+
+ BUG_ON(!sg[i].page);
+ va = page_address(sg[i].page) + sg[i].offset;
+ sg_dma_address(sg + i) = (dma_addr_t)virt_to_bus(va);
+ sg_dma_len(sg + i) = sg[i].length;
+ }
+
+ return nents;
+}
+
+EXPORT_SYMBOL(dma_map_sg);
+
+int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask || !dma_supported(dev, mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ return NULL;
+}
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+}
+
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/alpha/kernel/pci.c b/arch/alpha/kernel/pci.c
new file mode 100644
index 000000000000..1f36bbd0ed5d
--- /dev/null
+++ b/arch/alpha/kernel/pci.c
@@ -0,0 +1,561 @@
+/*
+ * linux/arch/alpha/kernel/pci.c
+ *
+ * Extruded from code written by
+ * Dave Rusling (david.rusling@reo.mts.dec.com)
+ * David Mosberger (davidm@cs.arizona.edu)
+ */
+
+/* 2.3.x PCI/resources, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+/*
+ * Nov 2000, Ivan Kokshaysky <ink@jurassic.park.msu.ru>
+ * PCI-PCI bridges cleanup
+ */
+#include <linux/config.h>
+#include <linux/string.h>
+#include <linux/pci.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/kernel.h>
+#include <linux/bootmem.h>
+#include <linux/module.h>
+#include <linux/cache.h>
+#include <linux/slab.h>
+#include <asm/machvec.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+/*
+ * Some string constants used by the various core logics.
+ */
+
+const char *const pci_io_names[] = {
+ "PCI IO bus 0", "PCI IO bus 1", "PCI IO bus 2", "PCI IO bus 3",
+ "PCI IO bus 4", "PCI IO bus 5", "PCI IO bus 6", "PCI IO bus 7"
+};
+
+const char *const pci_mem_names[] = {
+ "PCI mem bus 0", "PCI mem bus 1", "PCI mem bus 2", "PCI mem bus 3",
+ "PCI mem bus 4", "PCI mem bus 5", "PCI mem bus 6", "PCI mem bus 7"
+};
+
+const char pci_hae0_name[] = "HAE0";
+
+/* Indicate whether we respect the PCI setup left by console. */
+/*
+ * Make this long-lived so that we know when shutting down
+ * whether we probed only or not.
+ */
+int pci_probe_only;
+
+/*
+ * The PCI controller list.
+ */
+
+struct pci_controller *hose_head, **hose_tail = &hose_head;
+struct pci_controller *pci_isa_hose;
+
+/*
+ * Quirks.
+ */
+
+static void __init
+quirk_isa_bridge(struct pci_dev *dev)
+{
+ dev->class = PCI_CLASS_BRIDGE_ISA << 8;
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_INTEL, PCI_DEVICE_ID_INTEL_82378, quirk_isa_bridge);
+
+static void __init
+quirk_cypress(struct pci_dev *dev)
+{
+ /* The Notorious Cy82C693 chip. */
+
+ /* The Cypress IDE controller doesn't support native mode, but it
+ has programmable addresses of IDE command/control registers.
+ This violates PCI specifications, confuses the IDE subsystem and
+ causes resource conflicts between the primary HD_CMD register and
+ the floppy controller. Ugh. Fix that. */
+ if (dev->class >> 8 == PCI_CLASS_STORAGE_IDE) {
+ dev->resource[0].flags = 0;
+ dev->resource[1].flags = 0;
+ }
+
+ /* The Cypress bridge responds on the PCI bus in the address range
+ 0xffff0000-0xffffffff (conventional x86 BIOS ROM). There is no
+ way to turn this off. The bridge also supports several extended
+ BIOS ranges (disabled after power-up), and some consoles do turn
+ them on. So if we use a large direct-map window, or a large SG
+ window, we must avoid the entire 0xfff00000-0xffffffff region. */
+ else if (dev->class >> 8 == PCI_CLASS_BRIDGE_ISA) {
+ if (__direct_map_base + __direct_map_size >= 0xfff00000UL)
+ __direct_map_size = 0xfff00000UL - __direct_map_base;
+ else {
+ struct pci_controller *hose = dev->sysdata;
+ struct pci_iommu_arena *pci = hose->sg_pci;
+ if (pci && pci->dma_base + pci->size >= 0xfff00000UL)
+ pci->size = 0xfff00000UL - pci->dma_base;
+ }
+ }
+}
+DECLARE_PCI_FIXUP_HEADER(PCI_VENDOR_ID_CONTAQ, PCI_DEVICE_ID_CONTAQ_82C693, quirk_cypress);
+
+/* Called for each device after PCI setup is done. */
+static void __init
+pcibios_fixup_final(struct pci_dev *dev)
+{
+ unsigned int class = dev->class >> 8;
+
+ if (class == PCI_CLASS_BRIDGE_ISA || class == PCI_CLASS_BRIDGE_EISA) {
+ dev->dma_mask = MAX_ISA_DMA_ADDRESS - 1;
+ isa_bridge = dev;
+ }
+}
+DECLARE_PCI_FIXUP_FINAL(PCI_ANY_ID, PCI_ANY_ID, pcibios_fixup_final);
+
+/* Just declaring that the power-of-ten prefixes are actually the
+ power-of-two ones doesn't make it true :) */
+#define KB 1024
+#define MB (1024*KB)
+#define GB (1024*MB)
+
+void
+pcibios_align_resource(void *data, struct resource *res,
+ unsigned long size, unsigned long align)
+{
+ struct pci_dev *dev = data;
+ struct pci_controller *hose = dev->sysdata;
+ unsigned long alignto;
+ unsigned long start = res->start;
+
+ if (res->flags & IORESOURCE_IO) {
+ /* Make sure we start at our min on all hoses */
+ if (start - hose->io_space->start < PCIBIOS_MIN_IO)
+ start = PCIBIOS_MIN_IO + hose->io_space->start;
+
+ /*
+ * Put everything into 0x00-0xff region modulo 0x400
+ */
+ if (start & 0x300)
+ start = (start + 0x3ff) & ~0x3ff;
+ }
+ else if (res->flags & IORESOURCE_MEM) {
+ /* Make sure we start at our min on all hoses */
+ if (start - hose->mem_space->start < PCIBIOS_MIN_MEM)
+ start = PCIBIOS_MIN_MEM + hose->mem_space->start;
+
+ /*
+ * The following holds at least for the Low Cost
+ * Alpha implementation of the PCI interface:
+ *
+ * In sparse memory address space, the first
+ * octant (16MB) of every 128MB segment is
+ * aliased to the very first 16 MB of the
+ * address space (i.e., it aliases the ISA
+ * memory address space). Thus, we try to
+ * avoid allocating PCI devices in that range.
+ * Can be allocated in 2nd-7th octant only.
+ * Devices that need more than 112MB of
+ * address space must be accessed through
+ * dense memory space only!
+ */
+
+ /* Align to multiple of size of minimum base. */
+ alignto = max(0x1000UL, align);
+ start = ALIGN(start, alignto);
+ if (hose->sparse_mem_base && size <= 7 * 16*MB) {
+ if (((start / (16*MB)) & 0x7) == 0) {
+ start &= ~(128*MB - 1);
+ start += 16*MB;
+ start = ALIGN(start, alignto);
+ }
+ if (start/(128*MB) != (start + size - 1)/(128*MB)) {
+ start &= ~(128*MB - 1);
+ start += (128 + 16)*MB;
+ start = ALIGN(start, alignto);
+ }
+ }
+ }
+
+ res->start = start;
+}
+#undef KB
+#undef MB
+#undef GB
+
+static int __init
+pcibios_init(void)
+{
+ if (alpha_mv.init_pci)
+ alpha_mv.init_pci();
+ return 0;
+}
+
+subsys_initcall(pcibios_init);
+
+char * __init
+pcibios_setup(char *str)
+{
+ return str;
+}
+
+#ifdef ALPHA_RESTORE_SRM_SETUP
+static struct pdev_srm_saved_conf *srm_saved_configs;
+
+void __init
+pdev_save_srm_config(struct pci_dev *dev)
+{
+ struct pdev_srm_saved_conf *tmp;
+ static int printed = 0;
+
+ if (!alpha_using_srm || pci_probe_only)
+ return;
+
+ if (!printed) {
+ printk(KERN_INFO "pci: enabling save/restore of SRM state\n");
+ printed = 1;
+ }
+
+ tmp = kmalloc(sizeof(*tmp), GFP_KERNEL);
+ if (!tmp) {
+ printk(KERN_ERR "%s: kmalloc() failed!\n", __FUNCTION__);
+ return;
+ }
+ tmp->next = srm_saved_configs;
+ tmp->dev = dev;
+
+ pci_save_state(dev);
+
+ srm_saved_configs = tmp;
+}
+
+void
+pci_restore_srm_config(void)
+{
+ struct pdev_srm_saved_conf *tmp;
+
+ /* No need to restore if probed only. */
+ if (pci_probe_only)
+ return;
+
+ /* Restore SRM config. */
+ for (tmp = srm_saved_configs; tmp; tmp = tmp->next) {
+ pci_restore_state(tmp->dev);
+ }
+}
+#endif
+
+void __init
+pcibios_fixup_resource(struct resource *res, struct resource *root)
+{
+ res->start += root->start;
+ res->end += root->start;
+}
+
+void __init
+pcibios_fixup_device_resources(struct pci_dev *dev, struct pci_bus *bus)
+{
+ /* Update device resources. */
+ struct pci_controller *hose = (struct pci_controller *)bus->sysdata;
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ if (!dev->resource[i].start)
+ continue;
+ if (dev->resource[i].flags & IORESOURCE_IO)
+ pcibios_fixup_resource(&dev->resource[i],
+ hose->io_space);
+ else if (dev->resource[i].flags & IORESOURCE_MEM)
+ pcibios_fixup_resource(&dev->resource[i],
+ hose->mem_space);
+ }
+}
+
+void __init
+pcibios_fixup_bus(struct pci_bus *bus)
+{
+ /* Propagate hose info into the subordinate devices. */
+
+ struct pci_controller *hose = bus->sysdata;
+ struct pci_dev *dev = bus->self;
+
+ if (!dev) {
+ /* Root bus. */
+ u32 pci_mem_end;
+ u32 sg_base = hose->sg_pci ? hose->sg_pci->dma_base : ~0;
+ unsigned long end;
+
+ bus->resource[0] = hose->io_space;
+ bus->resource[1] = hose->mem_space;
+
+ /* Adjust hose mem_space limit to prevent PCI allocations
+ in the iommu windows. */
+ pci_mem_end = min((u32)__direct_map_base, sg_base) - 1;
+ end = hose->mem_space->start + pci_mem_end;
+ if (hose->mem_space->end > end)
+ hose->mem_space->end = end;
+ } else if (pci_probe_only &&
+ (dev->class >> 8) == PCI_CLASS_BRIDGE_PCI) {
+ pci_read_bridge_bases(bus);
+ pcibios_fixup_device_resources(dev, bus);
+ }
+
+ list_for_each_entry(dev, &bus->devices, bus_list) {
+ pdev_save_srm_config(dev);
+ if ((dev->class >> 8) != PCI_CLASS_BRIDGE_PCI)
+ pcibios_fixup_device_resources(dev, bus);
+ }
+}
+
+void __init
+pcibios_update_irq(struct pci_dev *dev, int irq)
+{
+ pci_write_config_byte(dev, PCI_INTERRUPT_LINE, irq);
+}
+
+/* Most Alphas have straight-forward swizzling needs. */
+
+u8 __init
+common_swizzle(struct pci_dev *dev, u8 *pinp)
+{
+ u8 pin = *pinp;
+
+ while (dev->bus->parent) {
+ pin = bridge_swizzle(pin, PCI_SLOT(dev->devfn));
+ /* Move up the chain of bridges. */
+ dev = dev->bus->self;
+ }
+ *pinp = pin;
+
+ /* The slot is the slot of the last bridge. */
+ return PCI_SLOT(dev->devfn);
+}
+
+void __devinit
+pcibios_resource_to_bus(struct pci_dev *dev, struct pci_bus_region *region,
+ struct resource *res)
+{
+ struct pci_controller *hose = (struct pci_controller *)dev->sysdata;
+ unsigned long offset = 0;
+
+ if (res->flags & IORESOURCE_IO)
+ offset = hose->io_space->start;
+ else if (res->flags & IORESOURCE_MEM)
+ offset = hose->mem_space->start;
+
+ region->start = res->start - offset;
+ region->end = res->end - offset;
+}
+
+#ifdef CONFIG_HOTPLUG
+EXPORT_SYMBOL(pcibios_resource_to_bus);
+#endif
+
+int
+pcibios_enable_device(struct pci_dev *dev, int mask)
+{
+ u16 cmd, oldcmd;
+ int i;
+
+ pci_read_config_word(dev, PCI_COMMAND, &cmd);
+ oldcmd = cmd;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *res = &dev->resource[i];
+
+ if (res->flags & IORESOURCE_IO)
+ cmd |= PCI_COMMAND_IO;
+ else if (res->flags & IORESOURCE_MEM)
+ cmd |= PCI_COMMAND_MEMORY;
+ }
+
+ if (cmd != oldcmd) {
+ printk(KERN_DEBUG "PCI: Enabling device: (%s), cmd %x\n",
+ pci_name(dev), cmd);
+ /* Enable the appropriate bits in the PCI command register. */
+ pci_write_config_word(dev, PCI_COMMAND, cmd);
+ }
+ return 0;
+}
+
+/*
+ * If we set up a device for bus mastering, we need to check the latency
+ * timer as certain firmware forgets to set it properly, as seen
+ * on SX164 and LX164 with SRM.
+ */
+void
+pcibios_set_master(struct pci_dev *dev)
+{
+ u8 lat;
+ pci_read_config_byte(dev, PCI_LATENCY_TIMER, &lat);
+ if (lat >= 16) return;
+ printk("PCI: Setting latency timer of device %s to 64\n",
+ pci_name(dev));
+ pci_write_config_byte(dev, PCI_LATENCY_TIMER, 64);
+}
+
+static void __init
+pcibios_claim_one_bus(struct pci_bus *b)
+{
+ struct pci_dev *dev;
+ struct pci_bus *child_bus;
+
+ list_for_each_entry(dev, &b->devices, bus_list) {
+ int i;
+
+ for (i = 0; i < PCI_NUM_RESOURCES; i++) {
+ struct resource *r = &dev->resource[i];
+
+ if (r->parent || !r->start || !r->flags)
+ continue;
+ pci_claim_resource(dev, i);
+ }
+ }
+
+ list_for_each_entry(child_bus, &b->children, node)
+ pcibios_claim_one_bus(child_bus);
+}
+
+static void __init
+pcibios_claim_console_setup(void)
+{
+ struct pci_bus *b;
+
+ list_for_each_entry(b, &pci_root_buses, node)
+ pcibios_claim_one_bus(b);
+}
+
+void __init
+common_init_pci(void)
+{
+ struct pci_controller *hose;
+ struct pci_bus *bus;
+ int next_busno;
+ int need_domain_info = 0;
+
+ /* Scan all of the recorded PCI controllers. */
+ for (next_busno = 0, hose = hose_head; hose; hose = hose->next) {
+ bus = pci_scan_bus(next_busno, alpha_mv.pci_ops, hose);
+ hose->bus = bus;
+ hose->need_domain_info = need_domain_info;
+ next_busno = bus->subordinate + 1;
+ /* Don't allow 8-bit bus number overflow inside the hose -
+ reserve some space for bridges. */
+ if (next_busno > 224) {
+ next_busno = 0;
+ need_domain_info = 1;
+ }
+ }
+
+ if (pci_probe_only)
+ pcibios_claim_console_setup();
+
+ pci_assign_unassigned_resources();
+ pci_fixup_irqs(alpha_mv.pci_swizzle, alpha_mv.pci_map_irq);
+}
+
+
+struct pci_controller * __init
+alloc_pci_controller(void)
+{
+ struct pci_controller *hose;
+
+ hose = alloc_bootmem(sizeof(*hose));
+
+ *hose_tail = hose;
+ hose_tail = &hose->next;
+
+ return hose;
+}
+
+struct resource * __init
+alloc_resource(void)
+{
+ struct resource *res;
+
+ res = alloc_bootmem(sizeof(*res));
+
+ return res;
+}
+
+
+/* Provide information on locations of various I/O regions in physical
+ memory. Do this on a per-card basis so that we choose the right hose. */
+
+asmlinkage long
+sys_pciconfig_iobase(long which, unsigned long bus, unsigned long dfn)
+{
+ struct pci_controller *hose;
+ struct pci_dev *dev;
+
+ /* from hose or from bus.devfn */
+ if (which & IOBASE_FROM_HOSE) {
+ for(hose = hose_head; hose; hose = hose->next)
+ if (hose->index == bus) break;
+ if (!hose) return -ENODEV;
+ } else {
+ /* Special hook for ISA access. */
+ if (bus == 0 && dfn == 0) {
+ hose = pci_isa_hose;
+ } else {
+ dev = pci_find_slot(bus, dfn);
+ if (!dev)
+ return -ENODEV;
+ hose = dev->sysdata;
+ }
+ }
+
+ switch (which & ~IOBASE_FROM_HOSE) {
+ case IOBASE_HOSE:
+ return hose->index;
+ case IOBASE_SPARSE_MEM:
+ return hose->sparse_mem_base;
+ case IOBASE_DENSE_MEM:
+ return hose->dense_mem_base;
+ case IOBASE_SPARSE_IO:
+ return hose->sparse_io_base;
+ case IOBASE_DENSE_IO:
+ return hose->dense_io_base;
+ case IOBASE_ROOT_BUS:
+ return hose->bus->number;
+ }
+
+ return -EOPNOTSUPP;
+}
+
+/* Create an __iomem token from a PCI BAR. Copied from lib/iomap.c with
+ no changes, since we don't want the other things in that object file. */
+
+void __iomem *pci_iomap(struct pci_dev *dev, int bar, unsigned long maxlen)
+{
+ unsigned long start = pci_resource_start(dev, bar);
+ unsigned long len = pci_resource_len(dev, bar);
+ unsigned long flags = pci_resource_flags(dev, bar);
+
+ if (!len || !start)
+ return NULL;
+ if (maxlen && len > maxlen)
+ len = maxlen;
+ if (flags & IORESOURCE_IO)
+ return ioport_map(start, len);
+ if (flags & IORESOURCE_MEM) {
+ /* Not checking IORESOURCE_CACHEABLE because alpha does
+ not distinguish between ioremap and ioremap_nocache. */
+ return ioremap(start, len);
+ }
+ return NULL;
+}
+
+/* Destroy that token. Not copied from lib/iomap.c. */
+
+void pci_iounmap(struct pci_dev *dev, void __iomem * addr)
+{
+ if (__is_mmio(addr))
+ iounmap(addr);
+}
+
+EXPORT_SYMBOL(pci_iomap);
+EXPORT_SYMBOL(pci_iounmap);
diff --git a/arch/alpha/kernel/pci_impl.h b/arch/alpha/kernel/pci_impl.h
new file mode 100644
index 000000000000..f8b74995a002
--- /dev/null
+++ b/arch/alpha/kernel/pci_impl.h
@@ -0,0 +1,209 @@
+/*
+ * linux/arch/alpha/kernel/pci_impl.h
+ *
+ * This file contains declarations and inline functions for interfacing
+ * with the PCI initialization routines.
+ */
+
+struct pci_dev;
+struct pci_controller;
+struct pci_iommu_arena;
+
+/*
+ * We can't just blindly use 64K for machines with EISA busses; they
+ * may also have PCI-PCI bridges present, and then we'd configure the
+ * bridge incorrectly.
+ *
+ * Also, we start at 0x8000 or 0x9000, in hopes to get all devices'
+ * IO space areas allocated *before* 0xC000; this is because certain
+ * BIOSes (Millennium for one) use PCI Config space "mechanism #2"
+ * accesses to probe the bus. If a device's registers appear at 0xC000,
+ * it may see an INx/OUTx at that address during BIOS emulation of the
+ * VGA BIOS, and some cards, notably Adaptec 2940UW, take mortal offense.
+ */
+
+#define EISA_DEFAULT_IO_BASE 0x9000 /* start above 8th slot */
+#define DEFAULT_IO_BASE 0x8000 /* start at 8th slot */
+
+/*
+ * We try to make the DEFAULT_MEM_BASE addresses *always* have more than
+ * a single bit set. This is so that devices like the broken Myrinet card
+ * will always have a PCI memory address that will never match a IDSEL
+ * address in PCI Config space, which can cause problems with early rev cards.
+ */
+
+/*
+ * An XL is AVANTI (APECS) family, *but* it has only 27 bits of ISA address
+ * that get passed through the PCI<->ISA bridge chip. Although this causes
+ * us to set the PCI->Mem window bases lower than normal, we still allocate
+ * PCI bus devices' memory addresses *below* the low DMA mapping window,
+ * and hope they fit below 64Mb (to avoid conflicts), and so that they can
+ * be accessed via SPARSE space.
+ *
+ * We accept the risk that a broken Myrinet card will be put into a true XL
+ * and thus can more easily run into the problem described below.
+ */
+#define XL_DEFAULT_MEM_BASE ((16+2)*1024*1024) /* 16M to 64M-1 is avail */
+
+/*
+ * APECS and LCA have only 34 bits for physical addresses, thus limiting PCI
+ * bus memory addresses for SPARSE access to be less than 128Mb.
+ */
+#define APECS_AND_LCA_DEFAULT_MEM_BASE ((16+2)*1024*1024)
+
+/*
+ * Because MCPCIA and T2 core logic support more bits for
+ * physical addresses, they should allow an expanded range of SPARSE
+ * memory addresses. However, we do not use them all, in order to
+ * avoid the HAE manipulation that would be needed.
+ */
+#define MCPCIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
+#define T2_DEFAULT_MEM_BASE ((16+1)*1024*1024)
+
+/*
+ * Because CIA and PYXIS have more bits for physical addresses,
+ * they support an expanded range of SPARSE memory addresses.
+ */
+#define DEFAULT_MEM_BASE ((128+16)*1024*1024)
+
+/* ??? Experimenting with no HAE for CIA. */
+#define CIA_DEFAULT_MEM_BASE ((32+2)*1024*1024)
+
+#define IRONGATE_DEFAULT_MEM_BASE ((256*8-16)*1024*1024)
+
+#define DEFAULT_AGP_APER_SIZE (64*1024*1024)
+
+/*
+ * A small note about bridges and interrupts. The DECchip 21050 (and
+ * later) adheres to the PCI-PCI bridge specification. This says that
+ * the interrupts on the other side of a bridge are swizzled in the
+ * following manner:
+ *
+ * Dev Interrupt Interrupt
+ * Pin on Pin on
+ * Device Connector
+ *
+ * 4 A A
+ * B B
+ * C C
+ * D D
+ *
+ * 5 A B
+ * B C
+ * C D
+ * D A
+ *
+ * 6 A C
+ * B D
+ * C A
+ * D B
+ *
+ * 7 A D
+ * B A
+ * C B
+ * D C
+ *
+ * Where A = pin 1, B = pin 2 and so on and pin=0 = default = A.
+ * Thus, each swizzle is ((pin-1) + (device#-4)) % 4
+ *
+ * The following code swizzles for exactly one bridge. The routine
+ * common_swizzle below handles multiple bridges. But there are a
+ * couple boards that do strange things, so we define this here.
+ */
+
+static inline u8 bridge_swizzle(u8 pin, u8 slot)
+{
+ return (((pin-1) + slot) % 4) + 1;
+}
+
+
+/* The following macro is used to implement the table-based irq mapping
+ function for all single-bus Alphas. */
+
+#define COMMON_TABLE_LOOKUP \
+({ long _ctl_ = -1; \
+ if (slot >= min_idsel && slot <= max_idsel && pin < irqs_per_slot) \
+ _ctl_ = irq_tab[slot - min_idsel][pin]; \
+ _ctl_; })
+
+
+/* A PCI IOMMU allocation arena. There are typically two of these
+ regions per bus. */
+/* ??? The 8400 has a 32-byte pte entry, and the entire table apparently
+ lives directly on the host bridge (no tlb?). We don't support this
+ machine, but if we ever did, we'd need to parameterize all this quite
+ a bit further. Probably with per-bus operation tables. */
+
+struct pci_iommu_arena
+{
+ spinlock_t lock;
+ struct pci_controller *hose;
+#define IOMMU_INVALID_PTE 0x2 /* 32:63 bits MBZ */
+#define IOMMU_RESERVED_PTE 0xface
+ unsigned long *ptes;
+ dma_addr_t dma_base;
+ unsigned int size;
+ unsigned int next_entry;
+ unsigned int align_entry;
+};
+
+#if defined(CONFIG_ALPHA_SRM) && \
+ (defined(CONFIG_ALPHA_CIA) || defined(CONFIG_ALPHA_LCA))
+# define NEED_SRM_SAVE_RESTORE
+#else
+# undef NEED_SRM_SAVE_RESTORE
+#endif
+
+#if defined(CONFIG_ALPHA_GENERIC) || defined(NEED_SRM_SAVE_RESTORE)
+# define ALPHA_RESTORE_SRM_SETUP
+#else
+# undef ALPHA_RESTORE_SRM_SETUP
+#endif
+
+#ifdef ALPHA_RESTORE_SRM_SETUP
+/* Store PCI device configuration left by SRM here. */
+struct pdev_srm_saved_conf
+{
+ struct pdev_srm_saved_conf *next;
+ struct pci_dev *dev;
+};
+
+extern void pci_restore_srm_config(void);
+#else
+#define pdev_save_srm_config(dev) do {} while (0)
+#define pci_restore_srm_config() do {} while (0)
+#endif
+
+/* The hose list. */
+extern struct pci_controller *hose_head, **hose_tail;
+extern struct pci_controller *pci_isa_hose;
+
+/* Indicate that we trust the console to configure things properly. */
+extern int pci_probe_only;
+
+extern unsigned long alpha_agpgart_size;
+
+extern void common_init_pci(void);
+extern u8 common_swizzle(struct pci_dev *, u8 *);
+extern struct pci_controller *alloc_pci_controller(void);
+extern struct resource *alloc_resource(void);
+
+extern struct pci_iommu_arena *iommu_arena_new_node(int,
+ struct pci_controller *,
+ dma_addr_t, unsigned long,
+ unsigned long);
+extern struct pci_iommu_arena *iommu_arena_new(struct pci_controller *,
+ dma_addr_t, unsigned long,
+ unsigned long);
+extern const char *const pci_io_names[];
+extern const char *const pci_mem_names[];
+extern const char pci_hae0_name[];
+
+extern unsigned long size_for_memory(unsigned long max);
+
+extern int iommu_reserve(struct pci_iommu_arena *, long, long);
+extern int iommu_release(struct pci_iommu_arena *, long, long);
+extern int iommu_bind(struct pci_iommu_arena *, long, long, unsigned long *);
+extern int iommu_unbind(struct pci_iommu_arena *, long, long);
+
+
diff --git a/arch/alpha/kernel/pci_iommu.c b/arch/alpha/kernel/pci_iommu.c
new file mode 100644
index 000000000000..7cb23f12ecbd
--- /dev/null
+++ b/arch/alpha/kernel/pci_iommu.c
@@ -0,0 +1,971 @@
+/*
+ * linux/arch/alpha/kernel/pci_iommu.c
+ */
+
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/pci.h>
+#include <linux/slab.h>
+#include <linux/bootmem.h>
+
+#include <asm/io.h>
+#include <asm/hwrpb.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+#define DEBUG_ALLOC 0
+#if DEBUG_ALLOC > 0
+# define DBGA(args...) printk(KERN_DEBUG args)
+#else
+# define DBGA(args...)
+#endif
+#if DEBUG_ALLOC > 1
+# define DBGA2(args...) printk(KERN_DEBUG args)
+#else
+# define DBGA2(args...)
+#endif
+
+#define DEBUG_NODIRECT 0
+#define DEBUG_FORCEDAC 0
+
+#define ISA_DMA_MASK 0x00ffffff
+
+static inline unsigned long
+mk_iommu_pte(unsigned long paddr)
+{
+ return (paddr >> (PAGE_SHIFT-1)) | 1;
+}
+
+static inline long
+calc_npages(long bytes)
+{
+ return (bytes + PAGE_SIZE - 1) >> PAGE_SHIFT;
+}
+
+
+/* Return the minimum of MAX or the first power of two larger
+ than main memory. */
+
+unsigned long
+size_for_memory(unsigned long max)
+{
+ unsigned long mem = max_low_pfn << PAGE_SHIFT;
+ if (mem < max)
+ max = 1UL << ceil_log2(mem);
+ return max;
+}
+
+struct pci_iommu_arena *
+iommu_arena_new_node(int nid, struct pci_controller *hose, dma_addr_t base,
+ unsigned long window_size, unsigned long align)
+{
+ unsigned long mem_size;
+ struct pci_iommu_arena *arena;
+
+ mem_size = window_size / (PAGE_SIZE / sizeof(unsigned long));
+
+ /* Note that the TLB lookup logic uses bitwise concatenation,
+ not addition, so the required arena alignment is based on
+ the size of the window. Retain the align parameter so that
+ particular systems can over-align the arena. */
+ if (align < mem_size)
+ align = mem_size;
+
+
+#ifdef CONFIG_DISCONTIGMEM
+
+ if (!NODE_DATA(nid) ||
+ (NULL == (arena = alloc_bootmem_node(NODE_DATA(nid),
+ sizeof(*arena))))) {
+ printk("%s: couldn't allocate arena from node %d\n"
+ " falling back to system-wide allocation\n",
+ __FUNCTION__, nid);
+ arena = alloc_bootmem(sizeof(*arena));
+ }
+
+ if (!NODE_DATA(nid) ||
+ (NULL == (arena->ptes = __alloc_bootmem_node(NODE_DATA(nid),
+ mem_size,
+ align,
+ 0)))) {
+ printk("%s: couldn't allocate arena ptes from node %d\n"
+ " falling back to system-wide allocation\n",
+ __FUNCTION__, nid);
+ arena->ptes = __alloc_bootmem(mem_size, align, 0);
+ }
+
+#else /* CONFIG_DISCONTIGMEM */
+
+ arena = alloc_bootmem(sizeof(*arena));
+ arena->ptes = __alloc_bootmem(mem_size, align, 0);
+
+#endif /* CONFIG_DISCONTIGMEM */
+
+ spin_lock_init(&arena->lock);
+ arena->hose = hose;
+ arena->dma_base = base;
+ arena->size = window_size;
+ arena->next_entry = 0;
+
+ /* Align allocations to a multiple of a page size. Not needed
+ unless there are chip bugs. */
+ arena->align_entry = 1;
+
+ return arena;
+}
+
+struct pci_iommu_arena *
+iommu_arena_new(struct pci_controller *hose, dma_addr_t base,
+ unsigned long window_size, unsigned long align)
+{
+ return iommu_arena_new_node(0, hose, base, window_size, align);
+}
+
+/* Must be called with the arena lock held */
+static long
+iommu_arena_find_pages(struct pci_iommu_arena *arena, long n, long mask)
+{
+ unsigned long *ptes;
+ long i, p, nent;
+
+ /* Search forward for the first mask-aligned sequence of N free ptes */
+ ptes = arena->ptes;
+ nent = arena->size >> PAGE_SHIFT;
+ p = (arena->next_entry + mask) & ~mask;
+ i = 0;
+ while (i < n && p+i < nent) {
+ if (ptes[p+i])
+ p = (p + i + 1 + mask) & ~mask, i = 0;
+ else
+ i = i + 1;
+ }
+
+ if (i < n) {
+ /* Reached the end. Flush the TLB and restart the
+ search from the beginning. */
+ alpha_mv.mv_pci_tbi(arena->hose, 0, -1);
+
+ p = 0, i = 0;
+ while (i < n && p+i < nent) {
+ if (ptes[p+i])
+ p = (p + i + 1 + mask) & ~mask, i = 0;
+ else
+ i = i + 1;
+ }
+
+ if (i < n)
+ return -1;
+ }
+
+ /* Success. It's the responsibility of the caller to mark them
+ in use before releasing the lock */
+ return p;
+}
+
+static long
+iommu_arena_alloc(struct pci_iommu_arena *arena, long n, unsigned int align)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, p, mask;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ /* Search for N empty ptes */
+ ptes = arena->ptes;
+ mask = max(align, arena->align_entry) - 1;
+ p = iommu_arena_find_pages(arena, n, mask);
+ if (p < 0) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -1;
+ }
+
+ /* Success. Mark them all in use, ie not zero and invalid
+ for the iommu tlb that could load them from under us.
+ The chip specific bits will fill this in with something
+ kosher when we return. */
+ for (i = 0; i < n; ++i)
+ ptes[p+i] = IOMMU_INVALID_PTE;
+
+ arena->next_entry = p + n;
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return p;
+}
+
+static void
+iommu_arena_free(struct pci_iommu_arena *arena, long ofs, long n)
+{
+ unsigned long *p;
+ long i;
+
+ p = arena->ptes + ofs;
+ for (i = 0; i < n; ++i)
+ p[i] = 0;
+}
+
+/* Map a single buffer of the indicated size for PCI DMA in streaming
+ mode. The 32-bit PCI bus mastering address to use is returned.
+ Once the device is given the dma address, the device owns this memory
+ until either pci_unmap_single or pci_dma_sync_single is performed. */
+
+static dma_addr_t
+pci_map_single_1(struct pci_dev *pdev, void *cpu_addr, size_t size,
+ int dac_allowed)
+{
+ struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ dma_addr_t max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ struct pci_iommu_arena *arena;
+ long npages, dma_ofs, i;
+ unsigned long paddr;
+ dma_addr_t ret;
+ unsigned int align = 0;
+
+ paddr = __pa(cpu_addr);
+
+#if !DEBUG_NODIRECT
+ /* First check to see if we can use the direct map window. */
+ if (paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ ret = paddr + __direct_map_base;
+
+ DBGA2("pci_map_single: [%p,%lx] -> direct %lx from %p\n",
+ cpu_addr, size, ret, __builtin_return_address(0));
+
+ return ret;
+ }
+#endif
+
+ /* Next, use DAC if selected earlier. */
+ if (dac_allowed) {
+ ret = paddr + alpha_mv.pci_dac_offset;
+
+ DBGA2("pci_map_single: [%p,%lx] -> DAC %lx from %p\n",
+ cpu_addr, size, ret, __builtin_return_address(0));
+
+ return ret;
+ }
+
+ /* If the machine doesn't define a pci_tbi routine, we have to
+ assume it doesn't support sg mapping, and, since we tried to
+ use direct_map above, it now must be considered an error. */
+ if (! alpha_mv.mv_pci_tbi) {
+ static int been_here = 0; /* Only print the message once. */
+ if (!been_here) {
+ printk(KERN_WARNING "pci_map_single: no HW sg\n");
+ been_here = 1;
+ }
+ return 0;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+
+ npages = calc_npages((paddr & ~PAGE_MASK) + size);
+
+ /* Force allocation to 64KB boundary for ISA bridges. */
+ if (pdev && pdev == isa_bridge)
+ align = 8;
+ dma_ofs = iommu_arena_alloc(arena, npages, align);
+ if (dma_ofs < 0) {
+ printk(KERN_WARNING "pci_map_single failed: "
+ "could not allocate dma page tables\n");
+ return 0;
+ }
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ arena->ptes[i + dma_ofs] = mk_iommu_pte(paddr);
+
+ ret = arena->dma_base + dma_ofs * PAGE_SIZE;
+ ret += (unsigned long)cpu_addr & ~PAGE_MASK;
+
+ DBGA2("pci_map_single: [%p,%lx] np %ld -> sg %lx from %p\n",
+ cpu_addr, size, npages, ret, __builtin_return_address(0));
+
+ return ret;
+}
+
+dma_addr_t
+pci_map_single(struct pci_dev *pdev, void *cpu_addr, size_t size, int dir)
+{
+ int dac_allowed;
+
+ if (dir == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+ return pci_map_single_1(pdev, cpu_addr, size, dac_allowed);
+}
+
+dma_addr_t
+pci_map_page(struct pci_dev *pdev, struct page *page, unsigned long offset,
+ size_t size, int dir)
+{
+ int dac_allowed;
+
+ if (dir == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+ return pci_map_single_1(pdev, (char *)page_address(page) + offset,
+ size, dac_allowed);
+}
+
+/* Unmap a single streaming mode DMA translation. The DMA_ADDR and
+ SIZE must match what was provided for in a previous pci_map_single
+ call. All other usages are undefined. After this call, reads by
+ the cpu to the buffer are guaranteed to see whatever the device
+ wrote there. */
+
+void
+pci_unmap_single(struct pci_dev *pdev, dma_addr_t dma_addr, size_t size,
+ int direction)
+{
+ unsigned long flags;
+ struct pci_controller *hose = pdev ? pdev->sysdata : pci_isa_hose;
+ struct pci_iommu_arena *arena;
+ long dma_ofs, npages;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ if (dma_addr >= __direct_map_base
+ && dma_addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+
+ DBGA2("pci_unmap_single: direct [%lx,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+
+ return;
+ }
+
+ if (dma_addr > 0xffffffff) {
+ DBGA2("pci64_unmap_single: DAC [%lx,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+ return;
+ }
+
+ arena = hose->sg_pci;
+ if (!arena || dma_addr < arena->dma_base)
+ arena = hose->sg_isa;
+
+ dma_ofs = (dma_addr - arena->dma_base) >> PAGE_SHIFT;
+ if (dma_ofs * PAGE_SIZE >= arena->size) {
+ printk(KERN_ERR "Bogus pci_unmap_single: dma_addr %lx "
+ " base %lx size %x\n", dma_addr, arena->dma_base,
+ arena->size);
+ return;
+ BUG();
+ }
+
+ npages = calc_npages((dma_addr & ~PAGE_MASK) + size);
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ iommu_arena_free(arena, dma_ofs, npages);
+
+ /* If we're freeing ptes above the `next_entry' pointer (they
+ may have snuck back into the TLB since the last wrap flush),
+ we need to flush the TLB before reallocating the latter. */
+ if (dma_ofs >= arena->next_entry)
+ alpha_mv.mv_pci_tbi(hose, dma_addr, dma_addr + size - 1);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ DBGA2("pci_unmap_single: sg [%lx,%lx] np %ld from %p\n",
+ dma_addr, size, npages, __builtin_return_address(0));
+}
+
+void
+pci_unmap_page(struct pci_dev *pdev, dma_addr_t dma_addr,
+ size_t size, int direction)
+{
+ pci_unmap_single(pdev, dma_addr, size, direction);
+}
+
+/* Allocate and map kernel buffer using consistent mode DMA for PCI
+ device. Returns non-NULL cpu-view pointer to the buffer if
+ successful and sets *DMA_ADDRP to the pci side dma address as well,
+ else DMA_ADDRP is undefined. */
+
+void *
+pci_alloc_consistent(struct pci_dev *pdev, size_t size, dma_addr_t *dma_addrp)
+{
+ void *cpu_addr;
+ long order = get_order(size);
+ int gfp = GFP_ATOMIC;
+
+try_again:
+ cpu_addr = (void *)__get_free_pages(gfp, order);
+ if (! cpu_addr) {
+ printk(KERN_INFO "pci_alloc_consistent: "
+ "get_free_pages failed from %p\n",
+ __builtin_return_address(0));
+ /* ??? Really atomic allocation? Otherwise we could play
+ with vmalloc and sg if we can't find contiguous memory. */
+ return NULL;
+ }
+ memset(cpu_addr, 0, size);
+
+ *dma_addrp = pci_map_single_1(pdev, cpu_addr, size, 0);
+ if (*dma_addrp == 0) {
+ free_pages((unsigned long)cpu_addr, order);
+ if (alpha_mv.mv_pci_tbi || (gfp & GFP_DMA))
+ return NULL;
+ /* The address doesn't fit required mask and we
+ do not have iommu. Try again with GFP_DMA. */
+ gfp |= GFP_DMA;
+ goto try_again;
+ }
+
+ DBGA2("pci_alloc_consistent: %lx -> [%p,%x] from %p\n",
+ size, cpu_addr, *dma_addrp, __builtin_return_address(0));
+
+ return cpu_addr;
+}
+
+/* Free and unmap a consistent DMA buffer. CPU_ADDR and DMA_ADDR must
+ be values that were returned from pci_alloc_consistent. SIZE must
+ be the same as what as passed into pci_alloc_consistent.
+ References to the memory and mappings associated with CPU_ADDR or
+ DMA_ADDR past this call are illegal. */
+
+void
+pci_free_consistent(struct pci_dev *pdev, size_t size, void *cpu_addr,
+ dma_addr_t dma_addr)
+{
+ pci_unmap_single(pdev, dma_addr, size, PCI_DMA_BIDIRECTIONAL);
+ free_pages((unsigned long)cpu_addr, get_order(size));
+
+ DBGA2("pci_free_consistent: [%x,%lx] from %p\n",
+ dma_addr, size, __builtin_return_address(0));
+}
+
+
+/* Classify the elements of the scatterlist. Write dma_address
+ of each element with:
+ 0 : Followers all physically adjacent.
+ 1 : Followers all virtually adjacent.
+ -1 : Not leader, physically adjacent to previous.
+ -2 : Not leader, virtually adjacent to previous.
+ Write dma_length of each leader with the combined lengths of
+ the mergable followers. */
+
+#define SG_ENT_VIRT_ADDRESS(SG) (page_address((SG)->page) + (SG)->offset)
+#define SG_ENT_PHYS_ADDRESS(SG) __pa(SG_ENT_VIRT_ADDRESS(SG))
+
+static void
+sg_classify(struct scatterlist *sg, struct scatterlist *end, int virt_ok)
+{
+ unsigned long next_paddr;
+ struct scatterlist *leader;
+ long leader_flag, leader_length;
+
+ leader = sg;
+ leader_flag = 0;
+ leader_length = leader->length;
+ next_paddr = SG_ENT_PHYS_ADDRESS(leader) + leader_length;
+
+ for (++sg; sg < end; ++sg) {
+ unsigned long addr, len;
+ addr = SG_ENT_PHYS_ADDRESS(sg);
+ len = sg->length;
+
+ if (next_paddr == addr) {
+ sg->dma_address = -1;
+ leader_length += len;
+ } else if (((next_paddr | addr) & ~PAGE_MASK) == 0 && virt_ok) {
+ sg->dma_address = -2;
+ leader_flag = 1;
+ leader_length += len;
+ } else {
+ leader->dma_address = leader_flag;
+ leader->dma_length = leader_length;
+ leader = sg;
+ leader_flag = 0;
+ leader_length = len;
+ }
+
+ next_paddr = addr + len;
+ }
+
+ leader->dma_address = leader_flag;
+ leader->dma_length = leader_length;
+}
+
+/* Given a scatterlist leader, choose an allocation method and fill
+ in the blanks. */
+
+static int
+sg_fill(struct scatterlist *leader, struct scatterlist *end,
+ struct scatterlist *out, struct pci_iommu_arena *arena,
+ dma_addr_t max_dma, int dac_allowed)
+{
+ unsigned long paddr = SG_ENT_PHYS_ADDRESS(leader);
+ long size = leader->dma_length;
+ struct scatterlist *sg;
+ unsigned long *ptes;
+ long npages, dma_ofs, i;
+
+#if !DEBUG_NODIRECT
+ /* If everything is physically contiguous, and the addresses
+ fall into the direct-map window, use it. */
+ if (leader->dma_address == 0
+ && paddr + size + __direct_map_base - 1 <= max_dma
+ && paddr + size <= __direct_map_size) {
+ out->dma_address = paddr + __direct_map_base;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> direct %lx\n",
+ __va(paddr), size, out->dma_address);
+
+ return 0;
+ }
+#endif
+
+ /* If physically contiguous and DAC is available, use it. */
+ if (leader->dma_address == 0 && dac_allowed) {
+ out->dma_address = paddr + alpha_mv.pci_dac_offset;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> DAC %lx\n",
+ __va(paddr), size, out->dma_address);
+
+ return 0;
+ }
+
+ /* Otherwise, we'll use the iommu to make the pages virtually
+ contiguous. */
+
+ paddr &= ~PAGE_MASK;
+ npages = calc_npages(paddr + size);
+ dma_ofs = iommu_arena_alloc(arena, npages, 0);
+ if (dma_ofs < 0) {
+ /* If we attempted a direct map above but failed, die. */
+ if (leader->dma_address == 0)
+ return -1;
+
+ /* Otherwise, break up the remaining virtually contiguous
+ hunks into individual direct maps and retry. */
+ sg_classify(leader, end, 0);
+ return sg_fill(leader, end, out, arena, max_dma, dac_allowed);
+ }
+
+ out->dma_address = arena->dma_base + dma_ofs*PAGE_SIZE + paddr;
+ out->dma_length = size;
+
+ DBGA(" sg_fill: [%p,%lx] -> sg %lx np %ld\n",
+ __va(paddr), size, out->dma_address, npages);
+
+ /* All virtually contiguous. We need to find the length of each
+ physically contiguous subsegment to fill in the ptes. */
+ ptes = &arena->ptes[dma_ofs];
+ sg = leader;
+ do {
+#if DEBUG_ALLOC > 0
+ struct scatterlist *last_sg = sg;
+#endif
+
+ size = sg->length;
+ paddr = SG_ENT_PHYS_ADDRESS(sg);
+
+ while (sg+1 < end && (int) sg[1].dma_address == -1) {
+ size += sg[1].length;
+ sg++;
+ }
+
+ npages = calc_npages((paddr & ~PAGE_MASK) + size);
+
+ paddr &= PAGE_MASK;
+ for (i = 0; i < npages; ++i, paddr += PAGE_SIZE)
+ *ptes++ = mk_iommu_pte(paddr);
+
+#if DEBUG_ALLOC > 0
+ DBGA(" (%ld) [%p,%x] np %ld\n",
+ last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+ last_sg->length, npages);
+ while (++last_sg <= sg) {
+ DBGA(" (%ld) [%p,%x] cont\n",
+ last_sg - leader, SG_ENT_VIRT_ADDRESS(last_sg),
+ last_sg->length);
+ }
+#endif
+ } while (++sg < end && (int) sg->dma_address < 0);
+
+ return 1;
+}
+
+int
+pci_map_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ struct scatterlist *start, *end, *out;
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+ dma_addr_t max_dma;
+ int dac_allowed;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ dac_allowed = pdev ? pci_dac_dma_supported(pdev, pdev->dma_mask) : 0;
+
+ /* Fast path single entry scatterlists. */
+ if (nents == 1) {
+ sg->dma_length = sg->length;
+ sg->dma_address
+ = pci_map_single_1(pdev, SG_ENT_VIRT_ADDRESS(sg),
+ sg->length, dac_allowed);
+ return sg->dma_address != 0;
+ }
+
+ start = sg;
+ end = sg + nents;
+
+ /* First, prepare information about the entries. */
+ sg_classify(sg, end, alpha_mv.mv_pci_tbi != 0);
+
+ /* Second, figure out where we're going to map things. */
+ if (alpha_mv.mv_pci_tbi) {
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+ } else {
+ max_dma = -1;
+ arena = NULL;
+ hose = NULL;
+ }
+
+ /* Third, iterate over the scatterlist leaders and allocate
+ dma space as needed. */
+ for (out = sg; sg < end; ++sg) {
+ if ((int) sg->dma_address < 0)
+ continue;
+ if (sg_fill(sg, end, out, arena, max_dma, dac_allowed) < 0)
+ goto error;
+ out++;
+ }
+
+ /* Mark the end of the list for pci_unmap_sg. */
+ if (out < end)
+ out->dma_length = 0;
+
+ if (out - start == 0)
+ printk(KERN_WARNING "pci_map_sg failed: no entries?\n");
+ DBGA("pci_map_sg: %ld entries\n", out - start);
+
+ return out - start;
+
+ error:
+ printk(KERN_WARNING "pci_map_sg failed: "
+ "could not allocate dma page tables\n");
+
+ /* Some allocation failed while mapping the scatterlist
+ entries. Unmap them now. */
+ if (out > start)
+ pci_unmap_sg(pdev, start, out - start, direction);
+ return 0;
+}
+
+/* Unmap a set of streaming mode DMA translations. Again, cpu read
+ rules concerning calls here are the same as for pci_unmap_single()
+ above. */
+
+void
+pci_unmap_sg(struct pci_dev *pdev, struct scatterlist *sg, int nents,
+ int direction)
+{
+ unsigned long flags;
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+ struct scatterlist *end;
+ dma_addr_t max_dma;
+ dma_addr_t fbeg, fend;
+
+ if (direction == PCI_DMA_NONE)
+ BUG();
+
+ if (! alpha_mv.mv_pci_tbi)
+ return;
+
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ max_dma = pdev ? pdev->dma_mask : ISA_DMA_MASK;
+ arena = hose->sg_pci;
+ if (!arena || arena->dma_base + arena->size - 1 > max_dma)
+ arena = hose->sg_isa;
+
+ fbeg = -1, fend = 0;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ for (end = sg + nents; sg < end; ++sg) {
+ dma64_addr_t addr;
+ size_t size;
+ long npages, ofs;
+ dma_addr_t tend;
+
+ addr = sg->dma_address;
+ size = sg->dma_length;
+ if (!size)
+ break;
+
+ if (addr > 0xffffffff) {
+ /* It's a DAC address -- nothing to do. */
+ DBGA(" (%ld) DAC [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+ continue;
+ }
+
+ if (addr >= __direct_map_base
+ && addr < __direct_map_base + __direct_map_size) {
+ /* Nothing to do. */
+ DBGA(" (%ld) direct [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+ continue;
+ }
+
+ DBGA(" (%ld) sg [%lx,%lx]\n",
+ sg - end + nents, addr, size);
+
+ npages = calc_npages((addr & ~PAGE_MASK) + size);
+ ofs = (addr - arena->dma_base) >> PAGE_SHIFT;
+ iommu_arena_free(arena, ofs, npages);
+
+ tend = addr + size - 1;
+ if (fbeg > addr) fbeg = addr;
+ if (fend < tend) fend = tend;
+ }
+
+ /* If we're freeing ptes above the `next_entry' pointer (they
+ may have snuck back into the TLB since the last wrap flush),
+ we need to flush the TLB before reallocating the latter. */
+ if ((fend - arena->dma_base) >> PAGE_SHIFT >= arena->next_entry)
+ alpha_mv.mv_pci_tbi(hose, fbeg, fend);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ DBGA("pci_unmap_sg: %ld entries\n", nents - (end - sg));
+}
+
+
+/* Return whether the given PCI device DMA address mask can be
+ supported properly. */
+
+int
+pci_dma_supported(struct pci_dev *pdev, u64 mask)
+{
+ struct pci_controller *hose;
+ struct pci_iommu_arena *arena;
+
+ /* If there exists a direct map, and the mask fits either
+ the entire direct mapped space or the total system memory as
+ shifted by the map base */
+ if (__direct_map_size != 0
+ && (__direct_map_base + __direct_map_size - 1 <= mask ||
+ __direct_map_base + (max_low_pfn << PAGE_SHIFT) - 1 <= mask))
+ return 1;
+
+ /* Check that we have a scatter-gather arena that fits. */
+ hose = pdev ? pdev->sysdata : pci_isa_hose;
+ arena = hose->sg_isa;
+ if (arena && arena->dma_base + arena->size - 1 <= mask)
+ return 1;
+ arena = hose->sg_pci;
+ if (arena && arena->dma_base + arena->size - 1 <= mask)
+ return 1;
+
+ /* As last resort try ZONE_DMA. */
+ if (!__direct_map_base && MAX_DMA_ADDRESS - IDENT_ADDR - 1 <= mask)
+ return 1;
+
+ return 0;
+}
+
+
+/*
+ * AGP GART extensions to the IOMMU
+ */
+int
+iommu_reserve(struct pci_iommu_arena *arena, long pg_count, long align_mask)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, p;
+
+ if (!arena) return -EINVAL;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ /* Search for N empty ptes. */
+ ptes = arena->ptes;
+ p = iommu_arena_find_pages(arena, pg_count, align_mask);
+ if (p < 0) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -1;
+ }
+
+ /* Success. Mark them all reserved (ie not zero and invalid)
+ for the iommu tlb that could load them from under us.
+ They will be filled in with valid bits by _bind() */
+ for (i = 0; i < pg_count; ++i)
+ ptes[p+i] = IOMMU_RESERVED_PTE;
+
+ arena->next_entry = p + pg_count;
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return p;
+}
+
+int
+iommu_release(struct pci_iommu_arena *arena, long pg_start, long pg_count)
+{
+ unsigned long *ptes;
+ long i;
+
+ if (!arena) return -EINVAL;
+
+ ptes = arena->ptes;
+
+ /* Make sure they're all reserved first... */
+ for(i = pg_start; i < pg_start + pg_count; i++)
+ if (ptes[i] != IOMMU_RESERVED_PTE)
+ return -EBUSY;
+
+ iommu_arena_free(arena, pg_start, pg_count);
+ return 0;
+}
+
+int
+iommu_bind(struct pci_iommu_arena *arena, long pg_start, long pg_count,
+ unsigned long *physaddrs)
+{
+ unsigned long flags;
+ unsigned long *ptes;
+ long i, j;
+
+ if (!arena) return -EINVAL;
+
+ spin_lock_irqsave(&arena->lock, flags);
+
+ ptes = arena->ptes;
+
+ for(j = pg_start; j < pg_start + pg_count; j++) {
+ if (ptes[j] != IOMMU_RESERVED_PTE) {
+ spin_unlock_irqrestore(&arena->lock, flags);
+ return -EBUSY;
+ }
+ }
+
+ for(i = 0, j = pg_start; i < pg_count; i++, j++)
+ ptes[j] = mk_iommu_pte(physaddrs[i]);
+
+ spin_unlock_irqrestore(&arena->lock, flags);
+
+ return 0;
+}
+
+int
+iommu_unbind(struct pci_iommu_arena *arena, long pg_start, long pg_count)
+{
+ unsigned long *p;
+ long i;
+
+ if (!arena) return -EINVAL;
+
+ p = arena->ptes + pg_start;
+ for(i = 0; i < pg_count; i++)
+ p[i] = IOMMU_RESERVED_PTE;
+
+ return 0;
+}
+
+/* True if the machine supports DAC addressing, and DEV can
+ make use of it given MASK. */
+
+int
+pci_dac_dma_supported(struct pci_dev *dev, u64 mask)
+{
+ dma64_addr_t dac_offset = alpha_mv.pci_dac_offset;
+ int ok = 1;
+
+ /* If this is not set, the machine doesn't support DAC at all. */
+ if (dac_offset == 0)
+ ok = 0;
+
+ /* The device has to be able to address our DAC bit. */
+ if ((dac_offset & dev->dma_mask) != dac_offset)
+ ok = 0;
+
+ /* If both conditions above are met, we are fine. */
+ DBGA("pci_dac_dma_supported %s from %p\n",
+ ok ? "yes" : "no", __builtin_return_address(0));
+
+ return ok;
+}
+
+dma64_addr_t
+pci_dac_page_to_dma(struct pci_dev *pdev, struct page *page,
+ unsigned long offset, int direction)
+{
+ return (alpha_mv.pci_dac_offset
+ + __pa(page_address(page))
+ + (dma64_addr_t) offset);
+}
+
+struct page *
+pci_dac_dma_to_page(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ unsigned long paddr = (dma_addr & PAGE_MASK) - alpha_mv.pci_dac_offset;
+ return virt_to_page(__va(paddr));
+}
+
+unsigned long
+pci_dac_dma_to_offset(struct pci_dev *pdev, dma64_addr_t dma_addr)
+{
+ return (dma_addr & ~PAGE_MASK);
+}
+
+
+/* Helper for generic DMA-mapping functions. */
+
+struct pci_dev *
+alpha_gendev_to_pci(struct device *dev)
+{
+ if (dev && dev->bus == &pci_bus_type)
+ return to_pci_dev(dev);
+
+ /* Assume that non-PCI devices asking for DMA are either ISA or EISA,
+ BUG() otherwise. */
+ BUG_ON(!isa_bridge);
+
+ /* Assume non-busmaster ISA DMA when dma_mask is not set (the ISA
+ bridge is bus master then). */
+ if (!dev || !dev->dma_mask || !*dev->dma_mask)
+ return isa_bridge;
+
+ /* For EISA bus masters, return isa_bridge (it might have smaller
+ dma_mask due to wiring limitations). */
+ if (*dev->dma_mask >= isa_bridge->dma_mask)
+ return isa_bridge;
+
+ /* This assumes ISA bus master with dma_mask 0xffffff. */
+ return NULL;
+}
+
+int
+dma_set_mask(struct device *dev, u64 mask)
+{
+ if (!dev->dma_mask ||
+ !pci_dma_supported(alpha_gendev_to_pci(dev), mask))
+ return -EIO;
+
+ *dev->dma_mask = mask;
+
+ return 0;
+}
diff --git a/arch/alpha/kernel/process.c b/arch/alpha/kernel/process.c
new file mode 100644
index 000000000000..4933f3ce5833
--- /dev/null
+++ b/arch/alpha/kernel/process.c
@@ -0,0 +1,528 @@
+/*
+ * linux/arch/alpha/kernel/process.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/*
+ * This file handles the architecture-dependent parts of process handling.
+ */
+
+#include <linux/config.h>
+#include <linux/errno.h>
+#include <linux/module.h>
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/utsname.h>
+#include <linux/time.h>
+#include <linux/major.h>
+#include <linux/stat.h>
+#include <linux/mman.h>
+#include <linux/elfcore.h>
+#include <linux/reboot.h>
+#include <linux/tty.h>
+#include <linux/console.h>
+
+#include <asm/reg.h>
+#include <asm/uaccess.h>
+#include <asm/system.h>
+#include <asm/io.h>
+#include <asm/pgtable.h>
+#include <asm/hwrpb.h>
+#include <asm/fpu.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+void default_idle(void)
+{
+ barrier();
+}
+
+void
+cpu_idle(void)
+{
+ while (1) {
+ void (*idle)(void) = default_idle;
+ /* FIXME -- EV6 and LCA45 know how to power down
+ the CPU. */
+
+ while (!need_resched())
+ idle();
+ schedule();
+ }
+}
+
+
+struct halt_info {
+ int mode;
+ char *restart_cmd;
+};
+
+static void
+common_shutdown_1(void *generic_ptr)
+{
+ struct halt_info *how = (struct halt_info *)generic_ptr;
+ struct percpu_struct *cpup;
+ unsigned long *pflags, flags;
+ int cpuid = smp_processor_id();
+
+ /* No point in taking interrupts anymore. */
+ local_irq_disable();
+
+ cpup = (struct percpu_struct *)
+ ((unsigned long)hwrpb + hwrpb->processor_offset
+ + hwrpb->processor_size * cpuid);
+ pflags = &cpup->flags;
+ flags = *pflags;
+
+ /* Clear reason to "default"; clear "bootstrap in progress". */
+ flags &= ~0x00ff0001UL;
+
+#ifdef CONFIG_SMP
+ /* Secondaries halt here. */
+ if (cpuid != boot_cpuid) {
+ flags |= 0x00040000UL; /* "remain halted" */
+ *pflags = flags;
+ clear_bit(cpuid, &cpu_present_mask);
+ halt();
+ }
+#endif
+
+ if (how->mode == LINUX_REBOOT_CMD_RESTART) {
+ if (!how->restart_cmd) {
+ flags |= 0x00020000UL; /* "cold bootstrap" */
+ } else {
+ /* For SRM, we could probably set environment
+ variables to get this to work. We'd have to
+ delay this until after srm_paging_stop unless
+ we ever got srm_fixup working.
+
+ At the moment, SRM will use the last boot device,
+ but the file and flags will be the defaults, when
+ doing a "warm" bootstrap. */
+ flags |= 0x00030000UL; /* "warm bootstrap" */
+ }
+ } else {
+ flags |= 0x00040000UL; /* "remain halted" */
+ }
+ *pflags = flags;
+
+#ifdef CONFIG_SMP
+ /* Wait for the secondaries to halt. */
+ cpu_clear(boot_cpuid, cpu_possible_map);
+ while (cpus_weight(cpu_possible_map))
+ barrier();
+#endif
+
+ /* If booted from SRM, reset some of the original environment. */
+ if (alpha_using_srm) {
+#ifdef CONFIG_DUMMY_CONSOLE
+ /* This has the effect of resetting the VGA video origin. */
+ take_over_console(&dummy_con, 0, MAX_NR_CONSOLES-1, 1);
+#endif
+ pci_restore_srm_config();
+ set_hae(srm_hae);
+ }
+
+ if (alpha_mv.kill_arch)
+ alpha_mv.kill_arch(how->mode);
+
+ if (! alpha_using_srm && how->mode != LINUX_REBOOT_CMD_RESTART) {
+ /* Unfortunately, since MILO doesn't currently understand
+ the hwrpb bits above, we can't reliably halt the
+ processor and keep it halted. So just loop. */
+ return;
+ }
+
+ if (alpha_using_srm)
+ srm_paging_stop();
+
+ halt();
+}
+
+static void
+common_shutdown(int mode, char *restart_cmd)
+{
+ struct halt_info args;
+ args.mode = mode;
+ args.restart_cmd = restart_cmd;
+ on_each_cpu(common_shutdown_1, &args, 1, 0);
+}
+
+void
+machine_restart(char *restart_cmd)
+{
+ common_shutdown(LINUX_REBOOT_CMD_RESTART, restart_cmd);
+}
+
+EXPORT_SYMBOL(machine_restart);
+
+void
+machine_halt(void)
+{
+ common_shutdown(LINUX_REBOOT_CMD_HALT, NULL);
+}
+
+EXPORT_SYMBOL(machine_halt);
+
+void
+machine_power_off(void)
+{
+ common_shutdown(LINUX_REBOOT_CMD_POWER_OFF, NULL);
+}
+
+EXPORT_SYMBOL(machine_power_off);
+
+/* Used by sysrq-p, among others. I don't believe r9-r15 are ever
+ saved in the context it's used. */
+
+void
+show_regs(struct pt_regs *regs)
+{
+ dik_show_regs(regs, NULL);
+}
+
+/*
+ * Re-start a thread when doing execve()
+ */
+void
+start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
+{
+ set_fs(USER_DS);
+ regs->pc = pc;
+ regs->ps = 8;
+ wrusp(sp);
+}
+
+/*
+ * Free current thread data structures etc..
+ */
+void
+exit_thread(void)
+{
+}
+
+void
+flush_thread(void)
+{
+ /* Arrange for each exec'ed process to start off with a clean slate
+ with respect to the FPU. This is all exceptions disabled. */
+ current_thread_info()->ieee_state = 0;
+ wrfpcr(FPCR_DYN_NORMAL | ieee_swcr_to_fpcr(0));
+
+ /* Clean slate for TLS. */
+ current_thread_info()->pcb.unique = 0;
+}
+
+void
+release_thread(struct task_struct *dead_task)
+{
+}
+
+/*
+ * "alpha_clone()".. By the time we get here, the
+ * non-volatile registers have also been saved on the
+ * stack. We do some ugly pointer stuff here.. (see
+ * also copy_thread)
+ *
+ * Notice that "fork()" is implemented in terms of clone,
+ * with parameters (SIGCHLD, 0).
+ */
+int
+alpha_clone(unsigned long clone_flags, unsigned long usp,
+ int __user *parent_tid, int __user *child_tid,
+ unsigned long tls_value, struct pt_regs *regs)
+{
+ if (!usp)
+ usp = rdusp();
+
+ return do_fork(clone_flags, usp, regs, 0, parent_tid, child_tid);
+}
+
+int
+alpha_vfork(struct pt_regs *regs)
+{
+ return do_fork(CLONE_VFORK | CLONE_VM | SIGCHLD, rdusp(),
+ regs, 0, NULL, NULL);
+}
+
+/*
+ * Copy an alpha thread..
+ *
+ * Note the "stack_offset" stuff: when returning to kernel mode, we need
+ * to have some extra stack-space for the kernel stack that still exists
+ * after the "ret_from_fork". When returning to user mode, we only want
+ * the space needed by the syscall stack frame (ie "struct pt_regs").
+ * Use the passed "regs" pointer to determine how much space we need
+ * for a kernel fork().
+ */
+
+int
+copy_thread(int nr, unsigned long clone_flags, unsigned long usp,
+ unsigned long unused,
+ struct task_struct * p, struct pt_regs * regs)
+{
+ extern void ret_from_fork(void);
+
+ struct thread_info *childti = p->thread_info;
+ struct pt_regs * childregs;
+ struct switch_stack * childstack, *stack;
+ unsigned long stack_offset, settls;
+
+ stack_offset = PAGE_SIZE - sizeof(struct pt_regs);
+ if (!(regs->ps & 8))
+ stack_offset = (PAGE_SIZE-1) & (unsigned long) regs;
+ childregs = (struct pt_regs *)
+ (stack_offset + PAGE_SIZE + (long) childti);
+
+ *childregs = *regs;
+ settls = regs->r20;
+ childregs->r0 = 0;
+ childregs->r19 = 0;
+ childregs->r20 = 1; /* OSF/1 has some strange fork() semantics. */
+ regs->r20 = 0;
+ stack = ((struct switch_stack *) regs) - 1;
+ childstack = ((struct switch_stack *) childregs) - 1;
+ *childstack = *stack;
+ childstack->r26 = (unsigned long) ret_from_fork;
+ childti->pcb.usp = usp;
+ childti->pcb.ksp = (unsigned long) childstack;
+ childti->pcb.flags = 1; /* set FEN, clear everything else */
+
+ /* Set a new TLS for the child thread? Peek back into the
+ syscall arguments that we saved on syscall entry. Oops,
+ except we'd have clobbered it with the parent/child set
+ of r20. Read the saved copy. */
+ /* Note: if CLONE_SETTLS is not set, then we must inherit the
+ value from the parent, which will have been set by the block
+ copy in dup_task_struct. This is non-intuitive, but is
+ required for proper operation in the case of a threaded
+ application calling fork. */
+ if (clone_flags & CLONE_SETTLS)
+ childti->pcb.unique = settls;
+
+ return 0;
+}
+
+/*
+ * Fill in the user structure for an ECOFF core dump.
+ */
+void
+dump_thread(struct pt_regs * pt, struct user * dump)
+{
+ /* switch stack follows right below pt_regs: */
+ struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
+
+ dump->magic = CMAGIC;
+ dump->start_code = current->mm->start_code;
+ dump->start_data = current->mm->start_data;
+ dump->start_stack = rdusp() & ~(PAGE_SIZE - 1);
+ dump->u_tsize = ((current->mm->end_code - dump->start_code)
+ >> PAGE_SHIFT);
+ dump->u_dsize = ((current->mm->brk + PAGE_SIZE-1 - dump->start_data)
+ >> PAGE_SHIFT);
+ dump->u_ssize = (current->mm->start_stack - dump->start_stack
+ + PAGE_SIZE-1) >> PAGE_SHIFT;
+
+ /*
+ * We store the registers in an order/format that is
+ * compatible with DEC Unix/OSF/1 as this makes life easier
+ * for gdb.
+ */
+ dump->regs[EF_V0] = pt->r0;
+ dump->regs[EF_T0] = pt->r1;
+ dump->regs[EF_T1] = pt->r2;
+ dump->regs[EF_T2] = pt->r3;
+ dump->regs[EF_T3] = pt->r4;
+ dump->regs[EF_T4] = pt->r5;
+ dump->regs[EF_T5] = pt->r6;
+ dump->regs[EF_T6] = pt->r7;
+ dump->regs[EF_T7] = pt->r8;
+ dump->regs[EF_S0] = sw->r9;
+ dump->regs[EF_S1] = sw->r10;
+ dump->regs[EF_S2] = sw->r11;
+ dump->regs[EF_S3] = sw->r12;
+ dump->regs[EF_S4] = sw->r13;
+ dump->regs[EF_S5] = sw->r14;
+ dump->regs[EF_S6] = sw->r15;
+ dump->regs[EF_A3] = pt->r19;
+ dump->regs[EF_A4] = pt->r20;
+ dump->regs[EF_A5] = pt->r21;
+ dump->regs[EF_T8] = pt->r22;
+ dump->regs[EF_T9] = pt->r23;
+ dump->regs[EF_T10] = pt->r24;
+ dump->regs[EF_T11] = pt->r25;
+ dump->regs[EF_RA] = pt->r26;
+ dump->regs[EF_T12] = pt->r27;
+ dump->regs[EF_AT] = pt->r28;
+ dump->regs[EF_SP] = rdusp();
+ dump->regs[EF_PS] = pt->ps;
+ dump->regs[EF_PC] = pt->pc;
+ dump->regs[EF_GP] = pt->gp;
+ dump->regs[EF_A0] = pt->r16;
+ dump->regs[EF_A1] = pt->r17;
+ dump->regs[EF_A2] = pt->r18;
+ memcpy((char *)dump->regs + EF_SIZE, sw->fp, 32 * 8);
+}
+
+/*
+ * Fill in the user structure for a ELF core dump.
+ */
+void
+dump_elf_thread(elf_greg_t *dest, struct pt_regs *pt, struct thread_info *ti)
+{
+ /* switch stack follows right below pt_regs: */
+ struct switch_stack * sw = ((struct switch_stack *) pt) - 1;
+
+ dest[ 0] = pt->r0;
+ dest[ 1] = pt->r1;
+ dest[ 2] = pt->r2;
+ dest[ 3] = pt->r3;
+ dest[ 4] = pt->r4;
+ dest[ 5] = pt->r5;
+ dest[ 6] = pt->r6;
+ dest[ 7] = pt->r7;
+ dest[ 8] = pt->r8;
+ dest[ 9] = sw->r9;
+ dest[10] = sw->r10;
+ dest[11] = sw->r11;
+ dest[12] = sw->r12;
+ dest[13] = sw->r13;
+ dest[14] = sw->r14;
+ dest[15] = sw->r15;
+ dest[16] = pt->r16;
+ dest[17] = pt->r17;
+ dest[18] = pt->r18;
+ dest[19] = pt->r19;
+ dest[20] = pt->r20;
+ dest[21] = pt->r21;
+ dest[22] = pt->r22;
+ dest[23] = pt->r23;
+ dest[24] = pt->r24;
+ dest[25] = pt->r25;
+ dest[26] = pt->r26;
+ dest[27] = pt->r27;
+ dest[28] = pt->r28;
+ dest[29] = pt->gp;
+ dest[30] = rdusp();
+ dest[31] = pt->pc;
+
+ /* Once upon a time this was the PS value. Which is stupid
+ since that is always 8 for usermode. Usurped for the more
+ useful value of the thread's UNIQUE field. */
+ dest[32] = ti->pcb.unique;
+}
+
+int
+dump_elf_task(elf_greg_t *dest, struct task_struct *task)
+{
+ struct thread_info *ti;
+ struct pt_regs *pt;
+
+ ti = task->thread_info;
+ pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
+
+ dump_elf_thread(dest, pt, ti);
+
+ return 1;
+}
+
+int
+dump_elf_task_fp(elf_fpreg_t *dest, struct task_struct *task)
+{
+ struct thread_info *ti;
+ struct pt_regs *pt;
+ struct switch_stack *sw;
+
+ ti = task->thread_info;
+ pt = (struct pt_regs *)((unsigned long)ti + 2*PAGE_SIZE) - 1;
+ sw = (struct switch_stack *)pt - 1;
+
+ memcpy(dest, sw->fp, 32 * 8);
+
+ return 1;
+}
+
+/*
+ * sys_execve() executes a new program.
+ */
+asmlinkage int
+do_sys_execve(char __user *ufilename, char __user * __user *argv,
+ char __user * __user *envp, struct pt_regs *regs)
+{
+ int error;
+ char *filename;
+
+ filename = getname(ufilename);
+ error = PTR_ERR(filename);
+ if (IS_ERR(filename))
+ goto out;
+ error = do_execve(filename, argv, envp, regs);
+ putname(filename);
+out:
+ return error;
+}
+
+/*
+ * Return saved PC of a blocked thread. This assumes the frame
+ * pointer is the 6th saved long on the kernel stack and that the
+ * saved return address is the first long in the frame. This all
+ * holds provided the thread blocked through a call to schedule() ($15
+ * is the frame pointer in schedule() and $15 is saved at offset 48 by
+ * entry.S:do_switch_stack).
+ *
+ * Under heavy swap load I've seen this lose in an ugly way. So do
+ * some extra sanity checking on the ranges we expect these pointers
+ * to be in so that we can fail gracefully. This is just for ps after
+ * all. -- r~
+ */
+
+unsigned long
+thread_saved_pc(task_t *t)
+{
+ unsigned long base = (unsigned long)t->thread_info;
+ unsigned long fp, sp = t->thread_info->pcb.ksp;
+
+ if (sp > base && sp+6*8 < base + 16*1024) {
+ fp = ((unsigned long*)sp)[6];
+ if (fp > sp && fp < base + 16*1024)
+ return *(unsigned long *)fp;
+ }
+
+ return 0;
+}
+
+unsigned long
+get_wchan(struct task_struct *p)
+{
+ unsigned long schedule_frame;
+ unsigned long pc;
+ if (!p || p == current || p->state == TASK_RUNNING)
+ return 0;
+ /*
+ * This one depends on the frame size of schedule(). Do a
+ * "disass schedule" in gdb to find the frame size. Also, the
+ * code assumes that sleep_on() follows immediately after
+ * interruptible_sleep_on() and that add_timer() follows
+ * immediately after interruptible_sleep(). Ugly, isn't it?
+ * Maybe adding a wchan field to task_struct would be better,
+ * after all...
+ */
+
+ pc = thread_saved_pc(p);
+ if (in_sched_functions(pc)) {
+ schedule_frame = ((unsigned long *)p->thread_info->pcb.ksp)[6];
+ return ((unsigned long *)schedule_frame)[12];
+ }
+ return pc;
+}
diff --git a/arch/alpha/kernel/proto.h b/arch/alpha/kernel/proto.h
new file mode 100644
index 000000000000..e1560fb15610
--- /dev/null
+++ b/arch/alpha/kernel/proto.h
@@ -0,0 +1,210 @@
+#include <linux/config.h>
+#include <linux/interrupt.h>
+
+
+/* Prototypes of functions used across modules here in this directory. */
+
+#define vucp volatile unsigned char *
+#define vusp volatile unsigned short *
+#define vip volatile int *
+#define vuip volatile unsigned int *
+#define vulp volatile unsigned long *
+
+struct pt_regs;
+struct task_struct;
+struct pci_dev;
+struct pci_controller;
+
+/* core_apecs.c */
+extern struct pci_ops apecs_pci_ops;
+extern void apecs_init_arch(void);
+extern void apecs_pci_clr_err(void);
+extern void apecs_machine_check(u64, u64, struct pt_regs *);
+extern void apecs_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_cia.c */
+extern struct pci_ops cia_pci_ops;
+extern void cia_init_pci(void);
+extern void cia_init_arch(void);
+extern void pyxis_init_arch(void);
+extern void cia_kill_arch(int);
+extern void cia_machine_check(u64, u64, struct pt_regs *);
+extern void cia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_irongate.c */
+extern struct pci_ops irongate_pci_ops;
+extern int irongate_pci_clr_err(void);
+extern void irongate_init_arch(void);
+extern void irongate_machine_check(u64, u64, struct pt_regs *);
+#define irongate_pci_tbi ((void *)0)
+
+/* core_lca.c */
+extern struct pci_ops lca_pci_ops;
+extern void lca_init_arch(void);
+extern void lca_machine_check(u64, u64, struct pt_regs *);
+extern void lca_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_marvel.c */
+extern struct pci_ops marvel_pci_ops;
+extern void marvel_init_arch(void);
+extern void marvel_kill_arch(int);
+extern void marvel_machine_check(u64, u64, struct pt_regs *);
+extern void marvel_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern int marvel_pa_to_nid(unsigned long);
+extern int marvel_cpuid_to_nid(int);
+extern unsigned long marvel_node_mem_start(int);
+extern unsigned long marvel_node_mem_size(int);
+extern struct _alpha_agp_info *marvel_agp_info(void);
+struct io7 *marvel_find_io7(int pe);
+struct io7 *marvel_next_io7(struct io7 *prev);
+void io7_clear_errors(struct io7 *io7);
+
+/* core_mcpcia.c */
+extern struct pci_ops mcpcia_pci_ops;
+extern void mcpcia_init_arch(void);
+extern void mcpcia_init_hoses(void);
+extern void mcpcia_machine_check(u64, u64, struct pt_regs *);
+extern void mcpcia_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_polaris.c */
+extern struct pci_ops polaris_pci_ops;
+extern int polaris_read_config_dword(struct pci_dev *, int, u32 *);
+extern int polaris_write_config_dword(struct pci_dev *, int, u32);
+extern void polaris_init_arch(void);
+extern void polaris_machine_check(u64, u64, struct pt_regs *);
+#define polaris_pci_tbi ((void *)0)
+
+/* core_t2.c */
+extern struct pci_ops t2_pci_ops;
+extern void t2_init_arch(void);
+extern void t2_kill_arch(int);
+extern void t2_machine_check(u64, u64, struct pt_regs *);
+extern void t2_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_titan.c */
+extern struct pci_ops titan_pci_ops;
+extern void titan_init_arch(void);
+extern void titan_kill_arch(int);
+extern void titan_machine_check(u64, u64, struct pt_regs *);
+extern void titan_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern struct _alpha_agp_info *titan_agp_info(void);
+
+/* core_tsunami.c */
+extern struct pci_ops tsunami_pci_ops;
+extern void tsunami_init_arch(void);
+extern void tsunami_kill_arch(int);
+extern void tsunami_machine_check(u64, u64, struct pt_regs *);
+extern void tsunami_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+
+/* core_wildfire.c */
+extern struct pci_ops wildfire_pci_ops;
+extern void wildfire_init_arch(void);
+extern void wildfire_kill_arch(int);
+extern void wildfire_machine_check(u64, u64, struct pt_regs *);
+extern void wildfire_pci_tbi(struct pci_controller *, dma_addr_t, dma_addr_t);
+extern int wildfire_pa_to_nid(unsigned long);
+extern int wildfire_cpuid_to_nid(int);
+extern unsigned long wildfire_node_mem_start(int);
+extern unsigned long wildfire_node_mem_size(int);
+
+/* setup.c */
+extern unsigned long srm_hae;
+extern int boot_cpuid;
+#ifdef CONFIG_VERBOSE_MCHECK
+extern unsigned long alpha_verbose_mcheck;
+#endif
+
+/* srmcons.c */
+#if defined(CONFIG_ALPHA_GENERIC) || defined(CONFIG_ALPHA_SRM)
+extern void register_srm_console(void);
+extern void unregister_srm_console(void);
+#else
+#define register_srm_console()
+#define unregister_srm_console()
+#endif
+
+/* smp.c */
+extern void setup_smp(void);
+extern void handle_ipi(struct pt_regs *);
+extern void smp_percpu_timer_interrupt(struct pt_regs *);
+
+/* bios32.c */
+/* extern void reset_for_srm(void); */
+
+/* time.c */
+extern irqreturn_t timer_interrupt(int irq, void *dev, struct pt_regs * regs);
+extern void common_init_rtc(void);
+extern unsigned long est_cycle_freq;
+
+/* smc37c93x.c */
+extern void SMC93x_Init(void);
+
+/* smc37c669.c */
+extern void SMC669_Init(int);
+
+/* es1888.c */
+extern void es1888_init(void);
+
+/* ns87312.c */
+extern void ns87312_enable_ide(long ide_base);
+
+/* ../lib/fpreg.c */
+extern void alpha_write_fp_reg (unsigned long reg, unsigned long val);
+extern unsigned long alpha_read_fp_reg (unsigned long reg);
+
+/* head.S */
+extern void wrmces(unsigned long mces);
+extern void cserve_ena(unsigned long);
+extern void cserve_dis(unsigned long);
+extern void __smp_callin(unsigned long);
+
+/* entry.S */
+extern void entArith(void);
+extern void entIF(void);
+extern void entInt(void);
+extern void entMM(void);
+extern void entSys(void);
+extern void entUna(void);
+extern void entDbg(void);
+
+/* ptrace.c */
+extern int ptrace_set_bpt (struct task_struct *child);
+extern int ptrace_cancel_bpt (struct task_struct *child);
+
+/* traps.c */
+extern void dik_show_regs(struct pt_regs *regs, unsigned long *r9_15);
+extern void die_if_kernel(char *, struct pt_regs *, long, unsigned long *);
+
+/* sys_titan.c */
+extern void titan_dispatch_irqs(u64, struct pt_regs *);
+
+/* ../mm/init.c */
+extern void switch_to_system_map(void);
+extern void srm_paging_stop(void);
+
+/* ../mm/remap.c */
+extern int __alpha_remap_area_pages(unsigned long, unsigned long,
+ unsigned long, unsigned long);
+
+/* irq.c */
+
+#ifdef CONFIG_SMP
+#define mcheck_expected(cpu) (cpu_data[cpu].mcheck_expected)
+#define mcheck_taken(cpu) (cpu_data[cpu].mcheck_taken)
+#define mcheck_extra(cpu) (cpu_data[cpu].mcheck_extra)
+#else
+extern struct mcheck_info
+{
+ unsigned char expected __attribute__((aligned(8)));
+ unsigned char taken;
+ unsigned char extra;
+} __mcheck_info;
+
+#define mcheck_expected(cpu) (*((void)(cpu), &__mcheck_info.expected))
+#define mcheck_taken(cpu) (*((void)(cpu), &__mcheck_info.taken))
+#define mcheck_extra(cpu) (*((void)(cpu), &__mcheck_info.extra))
+#endif
+
+extern void process_mcheck_info(unsigned long vector, unsigned long la_ptr,
+ struct pt_regs *regs, const char *machine,
+ int expected);
diff --git a/arch/alpha/kernel/ptrace.c b/arch/alpha/kernel/ptrace.c
new file mode 100644
index 000000000000..d00583161574
--- /dev/null
+++ b/arch/alpha/kernel/ptrace.c
@@ -0,0 +1,415 @@
+/* ptrace.c */
+/* By Ross Biro 1/23/92 */
+/* edited by Linus Torvalds */
+/* mangled further by Bob Manson (manson@santafe.edu) */
+/* more mutilation by David Mosberger (davidm@azstarnet.com) */
+
+#include <linux/kernel.h>
+#include <linux/sched.h>
+#include <linux/mm.h>
+#include <linux/smp.h>
+#include <linux/smp_lock.h>
+#include <linux/errno.h>
+#include <linux/ptrace.h>
+#include <linux/user.h>
+#include <linux/slab.h>
+#include <linux/security.h>
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/fpu.h>
+
+#include "proto.h"
+
+#define DEBUG DBG_MEM
+#undef DEBUG
+
+#ifdef DEBUG
+enum {
+ DBG_MEM = (1<<0),
+ DBG_BPT = (1<<1),
+ DBG_MEM_ALL = (1<<2)
+};
+#define DBG(fac,args) {if ((fac) & DEBUG) printk args;}
+#else
+#define DBG(fac,args)
+#endif
+
+#define BREAKINST 0x00000080 /* call_pal bpt */
+
+/*
+ * does not yet catch signals sent when the child dies.
+ * in exit.c or in signal.c.
+ */
+
+/*
+ * Processes always block with the following stack-layout:
+ *
+ * +================================+ <---- task + 2*PAGE_SIZE
+ * | PALcode saved frame (ps, pc, | ^
+ * | gp, a0, a1, a2) | |
+ * +================================+ | struct pt_regs
+ * | | |
+ * | frame generated by SAVE_ALL | |
+ * | | v
+ * +================================+
+ * | | ^
+ * | frame saved by do_switch_stack | | struct switch_stack
+ * | | v
+ * +================================+
+ */
+
+/*
+ * The following table maps a register index into the stack offset at
+ * which the register is saved. Register indices are 0-31 for integer
+ * regs, 32-63 for fp regs, and 64 for the pc. Notice that sp and
+ * zero have no stack-slot and need to be treated specially (see
+ * get_reg/put_reg below).
+ */
+enum {
+ REG_R0 = 0, REG_F0 = 32, REG_FPCR = 63, REG_PC = 64
+};
+
+static int regoff[] = {
+ PT_REG( r0), PT_REG( r1), PT_REG( r2), PT_REG( r3),
+ PT_REG( r4), PT_REG( r5), PT_REG( r6), PT_REG( r7),
+ PT_REG( r8), SW_REG( r9), SW_REG( r10), SW_REG( r11),
+ SW_REG( r12), SW_REG( r13), SW_REG( r14), SW_REG( r15),
+ PT_REG( r16), PT_REG( r17), PT_REG( r18), PT_REG( r19),
+ PT_REG( r20), PT_REG( r21), PT_REG( r22), PT_REG( r23),
+ PT_REG( r24), PT_REG( r25), PT_REG( r26), PT_REG( r27),
+ PT_REG( r28), PT_REG( gp), -1, -1,
+ SW_REG(fp[ 0]), SW_REG(fp[ 1]), SW_REG(fp[ 2]), SW_REG(fp[ 3]),
+ SW_REG(fp[ 4]), SW_REG(fp[ 5]), SW_REG(fp[ 6]), SW_REG(fp[ 7]),
+ SW_REG(fp[ 8]), SW_REG(fp[ 9]), SW_REG(fp[10]), SW_REG(fp[11]),
+ SW_REG(fp[12]), SW_REG(fp[13]), SW_REG(fp[14]), SW_REG(fp[15]),
+ SW_REG(fp[16]), SW_REG(fp[17]), SW_REG(fp[18]), SW_REG(fp[19]),
+ SW_REG(fp[20]), SW_REG(fp[21]), SW_REG(fp[22]), SW_REG(fp[23]),
+ SW_REG(fp[24]), SW_REG(fp[25]), SW_REG(fp[26]), SW_REG(fp[27]),
+ SW_REG(fp[28]), SW_REG(fp[29]), SW_REG(fp[30]), SW_REG(fp[31]),
+ PT_REG( pc)
+};
+
+static unsigned long zero;
+
+/*
+ * Get address of register REGNO in task TASK.
+ */
+static unsigned long *
+get_reg_addr(struct task_struct * task, unsigned long regno)
+{
+ unsigned long *addr;
+
+ if (regno == 30) {
+ addr = &task->thread_info->pcb.usp;
+ } else if (regno == 65) {
+ addr = &task->thread_info->pcb.unique;
+ } else if (regno == 31 || regno > 65) {
+ zero = 0;
+ addr = &zero;
+ } else {
+ addr = (void *)task->thread_info + regoff[regno];
+ }
+ return addr;
+}
+
+/*
+ * Get contents of register REGNO in task TASK.
+ */
+static unsigned long
+get_reg(struct task_struct * task, unsigned long regno)
+{
+ /* Special hack for fpcr -- combine hardware and software bits. */
+ if (regno == 63) {
+ unsigned long fpcr = *get_reg_addr(task, regno);
+ unsigned long swcr
+ = task->thread_info->ieee_state & IEEE_SW_MASK;
+ swcr = swcr_update_status(swcr, fpcr);
+ return fpcr | swcr;
+ }
+ return *get_reg_addr(task, regno);
+}
+
+/*
+ * Write contents of register REGNO in task TASK.
+ */
+static int
+put_reg(struct task_struct *task, unsigned long regno, unsigned long data)
+{
+ if (regno == 63) {
+ task->thread_info->ieee_state
+ = ((task->thread_info->ieee_state & ~IEEE_SW_MASK)
+ | (data & IEEE_SW_MASK));
+ data = (data & FPCR_DYN_MASK) | ieee_swcr_to_fpcr(data);
+ }
+ *get_reg_addr(task, regno) = data;
+ return 0;
+}
+
+static inline int
+read_int(struct task_struct *task, unsigned long addr, int * data)
+{
+ int copied = access_process_vm(task, addr, data, sizeof(int), 0);
+ return (copied == sizeof(int)) ? 0 : -EIO;
+}
+
+static inline int
+write_int(struct task_struct *task, unsigned long addr, int data)
+{
+ int copied = access_process_vm(task, addr, &data, sizeof(int), 1);
+ return (copied == sizeof(int)) ? 0 : -EIO;
+}
+
+/*
+ * Set breakpoint.
+ */
+int
+ptrace_set_bpt(struct task_struct * child)
+{
+ int displ, i, res, reg_b, nsaved = 0;
+ unsigned int insn, op_code;
+ unsigned long pc;
+
+ pc = get_reg(child, REG_PC);
+ res = read_int(child, pc, (int *) &insn);
+ if (res < 0)
+ return res;
+
+ op_code = insn >> 26;
+ if (op_code >= 0x30) {
+ /*
+ * It's a branch: instead of trying to figure out
+ * whether the branch will be taken or not, we'll put
+ * a breakpoint at either location. This is simpler,
+ * more reliable, and probably not a whole lot slower
+ * than the alternative approach of emulating the
+ * branch (emulation can be tricky for fp branches).
+ */
+ displ = ((s32)(insn << 11)) >> 9;
+ child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ if (displ) /* guard against unoptimized code */
+ child->thread_info->bpt_addr[nsaved++]
+ = pc + 4 + displ;
+ DBG(DBG_BPT, ("execing branch\n"));
+ } else if (op_code == 0x1a) {
+ reg_b = (insn >> 16) & 0x1f;
+ child->thread_info->bpt_addr[nsaved++] = get_reg(child, reg_b);
+ DBG(DBG_BPT, ("execing jump\n"));
+ } else {
+ child->thread_info->bpt_addr[nsaved++] = pc + 4;
+ DBG(DBG_BPT, ("execing normal insn\n"));
+ }
+
+ /* install breakpoints: */
+ for (i = 0; i < nsaved; ++i) {
+ res = read_int(child, child->thread_info->bpt_addr[i],
+ (int *) &insn);
+ if (res < 0)
+ return res;
+ child->thread_info->bpt_insn[i] = insn;
+ DBG(DBG_BPT, (" -> next_pc=%lx\n",
+ child->thread_info->bpt_addr[i]));
+ res = write_int(child, child->thread_info->bpt_addr[i],
+ BREAKINST);
+ if (res < 0)
+ return res;
+ }
+ child->thread_info->bpt_nsaved = nsaved;
+ return 0;
+}
+
+/*
+ * Ensure no single-step breakpoint is pending. Returns non-zero
+ * value if child was being single-stepped.
+ */
+int
+ptrace_cancel_bpt(struct task_struct * child)
+{
+ int i, nsaved = child->thread_info->bpt_nsaved;
+
+ child->thread_info->bpt_nsaved = 0;
+
+ if (nsaved > 2) {
+ printk("ptrace_cancel_bpt: bogus nsaved: %d!\n", nsaved);
+ nsaved = 2;
+ }
+
+ for (i = 0; i < nsaved; ++i) {
+ write_int(child, child->thread_info->bpt_addr[i],
+ child->thread_info->bpt_insn[i]);
+ }
+ return (nsaved != 0);
+}
+
+/*
+ * Called by kernel/ptrace.c when detaching..
+ *
+ * Make sure the single step bit is not set.
+ */
+void ptrace_disable(struct task_struct *child)
+{
+ ptrace_cancel_bpt(child);
+}
+
+asmlinkage long
+do_sys_ptrace(long request, long pid, long addr, long data,
+ struct pt_regs *regs)
+{
+ struct task_struct *child;
+ unsigned long tmp;
+ size_t copied;
+ long ret;
+
+ lock_kernel();
+ DBG(DBG_MEM, ("request=%ld pid=%ld addr=0x%lx data=0x%lx\n",
+ request, pid, addr, data));
+ ret = -EPERM;
+ if (request == PTRACE_TRACEME) {
+ /* are we already being traced? */
+ if (current->ptrace & PT_PTRACED)
+ goto out_notsk;
+ ret = security_ptrace(current->parent, current);
+ if (ret)
+ goto out_notsk;
+ /* set the ptrace bit in the process ptrace flags. */
+ current->ptrace |= PT_PTRACED;
+ ret = 0;
+ goto out_notsk;
+ }
+ if (pid == 1) /* you may not mess with init */
+ goto out_notsk;
+
+ ret = -ESRCH;
+ read_lock(&tasklist_lock);
+ child = find_task_by_pid(pid);
+ if (child)
+ get_task_struct(child);
+ read_unlock(&tasklist_lock);
+ if (!child)
+ goto out_notsk;
+
+ if (request == PTRACE_ATTACH) {
+ ret = ptrace_attach(child);
+ goto out;
+ }
+
+ ret = ptrace_check_attach(child, request == PTRACE_KILL);
+ if (ret < 0)
+ goto out;
+
+ switch (request) {
+ /* When I and D space are separate, these will need to be fixed. */
+ case PTRACE_PEEKTEXT: /* read word at location addr. */
+ case PTRACE_PEEKDATA:
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 0);
+ ret = -EIO;
+ if (copied != sizeof(tmp))
+ break;
+
+ regs->r0 = 0; /* special return: no errors */
+ ret = tmp;
+ break;
+
+ /* Read register number ADDR. */
+ case PTRACE_PEEKUSR:
+ regs->r0 = 0; /* special return: no errors */
+ ret = get_reg(child, addr);
+ DBG(DBG_MEM, ("peek $%ld->%#lx\n", addr, ret));
+ break;
+
+ /* When I and D space are separate, this will have to be fixed. */
+ case PTRACE_POKETEXT: /* write the word at location addr. */
+ case PTRACE_POKEDATA:
+ tmp = data;
+ copied = access_process_vm(child, addr, &tmp, sizeof(tmp), 1);
+ ret = (copied == sizeof(tmp)) ? 0 : -EIO;
+ break;
+
+ case PTRACE_POKEUSR: /* write the specified register */
+ DBG(DBG_MEM, ("poke $%ld<-%#lx\n", addr, data));
+ ret = put_reg(child, addr, data);
+ break;
+
+ case PTRACE_SYSCALL:
+ /* continue and stop at next (return from) syscall */
+ case PTRACE_CONT: /* restart after signal. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ if (request == PTRACE_SYSCALL)
+ set_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ else
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt(child);
+ wake_up_process(child);
+ ret = 0;
+ break;
+
+ /*
+ * Make the child exit. Best I can do is send it a sigkill.
+ * perhaps it should be put in the status that it wants to
+ * exit.
+ */
+ case PTRACE_KILL:
+ ret = 0;
+ if (child->exit_state == EXIT_ZOMBIE)
+ break;
+ child->exit_code = SIGKILL;
+ /* make sure single-step breakpoint is gone. */
+ ptrace_cancel_bpt(child);
+ wake_up_process(child);
+ goto out;
+
+ case PTRACE_SINGLESTEP: /* execute single instruction. */
+ ret = -EIO;
+ if ((unsigned long) data > _NSIG)
+ break;
+ /* Mark single stepping. */
+ child->thread_info->bpt_nsaved = -1;
+ clear_tsk_thread_flag(child, TIF_SYSCALL_TRACE);
+ child->exit_code = data;
+ wake_up_process(child);
+ /* give it a chance to run. */
+ ret = 0;
+ goto out;
+
+ case PTRACE_DETACH: /* detach a process that was attached. */
+ ret = ptrace_detach(child, data);
+ goto out;
+
+ default:
+ ret = ptrace_request(child, request, addr, data);
+ goto out;
+ }
+ out:
+ put_task_struct(child);
+ out_notsk:
+ unlock_kernel();
+ return ret;
+}
+
+asmlinkage void
+syscall_trace(void)
+{
+ if (!test_thread_flag(TIF_SYSCALL_TRACE))
+ return;
+ if (!(current->ptrace & PT_PTRACED))
+ return;
+ /* The 0x80 provides a way for the tracing parent to distinguish
+ between a syscall stop and SIGTRAP delivery */
+ ptrace_notify(SIGTRAP | ((current->ptrace & PT_TRACESYSGOOD)
+ ? 0x80 : 0));
+
+ /*
+ * This isn't the same as continuing with a signal, but it will do
+ * for normal use. strace only continues with a signal if the
+ * stopping signal is not SIGTRAP. -brl
+ */
+ if (current->exit_code) {
+ send_sig(current->exit_code, current, 1);
+ current->exit_code = 0;
+ }
+}
diff --git a/arch/alpha/kernel/semaphore.c b/arch/alpha/kernel/semaphore.c
new file mode 100644
index 000000000000..8c8aaa205eae
--- /dev/null
+++ b/arch/alpha/kernel/semaphore.c
@@ -0,0 +1,224 @@
+/*
+ * Alpha semaphore implementation.
+ *
+ * (C) Copyright 1996 Linus Torvalds
+ * (C) Copyright 1999, 2000 Richard Henderson
+ */
+
+#include <linux/errno.h>
+#include <linux/sched.h>
+#include <linux/init.h>
+
+/*
+ * This is basically the PPC semaphore scheme ported to use
+ * the Alpha ll/sc sequences, so see the PPC code for
+ * credits.
+ */
+
+/*
+ * Atomically update sem->count.
+ * This does the equivalent of the following:
+ *
+ * old_count = sem->count;
+ * tmp = MAX(old_count, 0) + incr;
+ * sem->count = tmp;
+ * return old_count;
+ */
+static inline int __sem_update_count(struct semaphore *sem, int incr)
+{
+ long old_count, tmp = 0;
+
+ __asm__ __volatile__(
+ "1: ldl_l %0,%2\n"
+ " cmovgt %0,%0,%1\n"
+ " addl %1,%3,%1\n"
+ " stl_c %1,%2\n"
+ " beq %1,2f\n"
+ " mb\n"
+ ".subsection 2\n"
+ "2: br 1b\n"
+ ".previous"
+ : "=&r" (old_count), "=&r" (tmp), "=m" (sem->count)
+ : "Ir" (incr), "1" (tmp), "m" (sem->count));
+
+ return old_count;
+}
+
+/*
+ * Perform the "down" function. Return zero for semaphore acquired,
+ * return negative for signalled out of the function.
+ *
+ * If called from down, the return is ignored and the wait loop is
+ * not interruptible. This means that a task waiting on a semaphore
+ * using "down()" cannot be killed until someone does an "up()" on
+ * the semaphore.
+ *
+ * If called from down_interruptible, the return value gets checked
+ * upon return. If the return value is negative then the task continues
+ * with the negative value in the return register (it can be tested by
+ * the caller).
+ *
+ * Either form may be used in conjunction with "up()".
+ */
+
+void __sched
+__down_failed(struct semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+
+ tsk->state = TASK_UNINTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ /*
+ * Try to get the semaphore. If the count is > 0, then we've
+ * got the semaphore; we decrement count and exit the loop.
+ * If the count is 0 or negative, we set it to -1, indicating
+ * that we are asleep, and then sleep.
+ */
+ while (__sem_update_count(sem, -1) <= 0) {
+ schedule();
+ set_task_state(tsk, TASK_UNINTERRUPTIBLE);
+ }
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+
+ /*
+ * If there are any more sleepers, wake one of them up so
+ * that it can either get the semaphore, or set count to -1
+ * indicating that there are still processes sleeping.
+ */
+ wake_up(&sem->wait);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down acquired(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+}
+
+int __sched
+__down_failed_interruptible(struct semaphore *sem)
+{
+ struct task_struct *tsk = current;
+ DECLARE_WAITQUEUE(wait, tsk);
+ long ret = 0;
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down failed(%p)\n",
+ tsk->comm, tsk->pid, sem);
+#endif
+
+ tsk->state = TASK_INTERRUPTIBLE;
+ wmb();
+ add_wait_queue_exclusive(&sem->wait, &wait);
+
+ while (__sem_update_count(sem, -1) <= 0) {
+ if (signal_pending(current)) {
+ /*
+ * A signal is pending - give up trying.
+ * Set sem->count to 0 if it is negative,
+ * since we are no longer sleeping.
+ */
+ __sem_update_count(sem, 0);
+ ret = -EINTR;
+ break;
+ }
+ schedule();
+ set_task_state(tsk, TASK_INTERRUPTIBLE);
+ }
+
+ remove_wait_queue(&sem->wait, &wait);
+ tsk->state = TASK_RUNNING;
+ wake_up(&sem->wait);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down %s(%p)\n",
+ current->comm, current->pid,
+ (ret < 0 ? "interrupted" : "acquired"), sem);
+#endif
+ return ret;
+}
+
+void
+__up_wakeup(struct semaphore *sem)
+{
+ /*
+ * Note that we incremented count in up() before we came here,
+ * but that was ineffective since the result was <= 0, and
+ * any negative value of count is equivalent to 0.
+ * This ends up setting count to 1, unless count is now > 0
+ * (i.e. because some other cpu has called up() in the meantime),
+ * in which case we just increment count.
+ */
+ __sem_update_count(sem, 1);
+ wake_up(&sem->wait);
+}
+
+void __sched
+down(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __down(sem);
+}
+
+int __sched
+down_interruptible(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ return __down_interruptible(sem);
+}
+
+int
+down_trylock(struct semaphore *sem)
+{
+ int ret;
+
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+
+ ret = __down_trylock(sem);
+
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): down_trylock %s from %p\n",
+ current->comm, current->pid,
+ ret ? "failed" : "acquired",
+ __builtin_return_address(0));
+#endif
+
+ return ret;
+}
+
+void
+up(struct semaphore *sem)
+{
+#ifdef WAITQUEUE_DEBUG
+ CHECK_MAGIC(sem->__magic);
+#endif
+#ifdef CONFIG_DEBUG_SEMAPHORE
+ printk("%s(%d): up(%p) <count=%d> from %p\n",
+ current->comm, current->pid, sem,
+ atomic_read(&sem->count), __builtin_return_address(0));
+#endif
+ __up(sem);
+}
diff --git a/arch/alpha/kernel/setup.c b/arch/alpha/kernel/setup.c
new file mode 100644
index 000000000000..b4e5f8ff2b25
--- /dev/null
+++ b/arch/alpha/kernel/setup.c
@@ -0,0 +1,1486 @@
+/*
+ * linux/arch/alpha/kernel/setup.c
+ *
+ * Copyright (C) 1995 Linus Torvalds
+ */
+
+/* 2.3.x bootmem, 1999 Andrea Arcangeli <andrea@suse.de> */
+
+/*
+ * Bootup setup stuff.
+ */
+
+#include <linux/sched.h>
+#include <linux/kernel.h>
+#include <linux/mm.h>
+#include <linux/stddef.h>
+#include <linux/unistd.h>
+#include <linux/ptrace.h>
+#include <linux/slab.h>
+#include <linux/user.h>
+#include <linux/a.out.h>
+#include <linux/tty.h>
+#include <linux/delay.h>
+#include <linux/config.h> /* CONFIG_ALPHA_LCA etc */
+#include <linux/mc146818rtc.h>
+#include <linux/console.h>
+#include <linux/errno.h>
+#include <linux/init.h>
+#include <linux/string.h>
+#include <linux/ioport.h>
+#include <linux/bootmem.h>
+#include <linux/pci.h>
+#include <linux/seq_file.h>
+#include <linux/root_dev.h>
+#include <linux/initrd.h>
+#include <linux/eisa.h>
+#ifdef CONFIG_MAGIC_SYSRQ
+#include <linux/sysrq.h>
+#include <linux/reboot.h>
+#endif
+#include <linux/notifier.h>
+#include <asm/setup.h>
+#include <asm/io.h>
+
+extern struct notifier_block *panic_notifier_list;
+static int alpha_panic_event(struct notifier_block *, unsigned long, void *);
+static struct notifier_block alpha_panic_block = {
+ alpha_panic_event,
+ NULL,
+ INT_MAX /* try to do it first */
+};
+
+#include <asm/uaccess.h>
+#include <asm/pgtable.h>
+#include <asm/system.h>
+#include <asm/hwrpb.h>
+#include <asm/dma.h>
+#include <asm/io.h>
+#include <asm/mmu_context.h>
+#include <asm/console.h>
+
+#include "proto.h"
+#include "pci_impl.h"
+
+
+struct hwrpb_struct *hwrpb;
+unsigned long srm_hae;
+
+int alpha_l1i_cacheshape;
+int alpha_l1d_cacheshape;
+int alpha_l2_cacheshape;
+int alpha_l3_cacheshape;
+
+#ifdef CONFIG_VERBOSE_MCHECK
+/* 0=minimum, 1=verbose, 2=all */
+/* These can be overridden via the command line, ie "verbose_mcheck=2") */
+unsigned long alpha_verbose_mcheck = CONFIG_VERBOSE_MCHECK_ON;
+#endif
+
+/* Which processor we booted from. */
+int boot_cpuid;
+
+/*
+ * Using SRM callbacks for initial console output. This works from
+ * setup_arch() time through the end of time_init(), as those places
+ * are under our (Alpha) control.
+
+ * "srmcons" specified in the boot command arguments allows us to
+ * see kernel messages during the period of time before the true
+ * console device is "registered" during console_init().
+ * As of this version (2.5.59), console_init() will call
+ * disable_early_printk() as the last action before initializing
+ * the console drivers. That's the last possible time srmcons can be
+ * unregistered without interfering with console behavior.
+ *
+ * By default, OFF; set it with a bootcommand arg of "srmcons" or
+ * "console=srm". The meaning of these two args is:
+ * "srmcons" - early callback prints
+ * "console=srm" - full callback based console, including early prints
+ */
+int srmcons_output = 0;
+
+/* Enforce a memory size limit; useful for testing. By default, none. */
+unsigned long mem_size_limit = 0;
+
+/* Set AGP GART window size (0 means disabled). */
+unsigned long alpha_agpgart_size = DEFAULT_AGP_APER_SIZE;
+
+#ifdef CONFIG_ALPHA_GENERIC
+struct alpha_machine_vector alpha_mv;
+int alpha_using_srm;
+#endif
+
+#define N(a) (sizeof(a)/sizeof(a[0]))
+
+static struct alpha_machine_vector *get_sysvec(unsigned long, unsigned long,
+ unsigned long);
+static struct alpha_machine_vector *get_sysvec_byname(const char *);
+static void get_sysnames(unsigned long, unsigned long, unsigned long,
+ char **, char **);
+static void determine_cpu_caches (unsigned int);
+
+static char command_line[COMMAND_LINE_SIZE];
+
+/*
+ * The format of "screen_info" is strange, and due to early
+ * i386-setup code. This is just enough to make the console
+ * code think we're on a VGA color display.
+ */
+
+struct screen_info screen_info = {
+ .orig_x = 0,
+ .orig_y = 25,
+ .orig_video_cols = 80,
+ .orig_video_lines = 25,
+ .orig_video_isVGA = 1,
+ .orig_video_points = 16
+};
+
+/*
+ * The direct map I/O window, if any. This should be the same
+ * for all busses, since it's used by virt_to_bus.
+ */
+
+unsigned long __direct_map_base;
+unsigned long __direct_map_size;
+
+/*
+ * Declare all of the machine vectors.
+ */
+
+/* GCC 2.7.2 (on alpha at least) is lame. It does not support either
+ __attribute__((weak)) or #pragma weak. Bypass it and talk directly
+ to the assembler. */
+
+#define WEAK(X) \
+ extern struct alpha_machine_vector X; \
+ asm(".weak "#X)
+
+WEAK(alcor_mv);
+WEAK(alphabook1_mv);
+WEAK(avanti_mv);
+WEAK(cabriolet_mv);
+WEAK(clipper_mv);
+WEAK(dp264_mv);
+WEAK(eb164_mv);
+WEAK(eb64p_mv);
+WEAK(eb66_mv);
+WEAK(eb66p_mv);
+WEAK(eiger_mv);
+WEAK(jensen_mv);
+WEAK(lx164_mv);
+WEAK(lynx_mv);
+WEAK(marvel_ev7_mv);
+WEAK(miata_mv);
+WEAK(mikasa_mv);
+WEAK(mikasa_primo_mv);
+WEAK(monet_mv);
+WEAK(nautilus_mv);
+WEAK(noname_mv);
+WEAK(noritake_mv);
+WEAK(noritake_primo_mv);
+WEAK(p2k_mv);
+WEAK(pc164_mv);
+WEAK(privateer_mv);
+WEAK(rawhide_mv);
+WEAK(ruffian_mv);
+WEAK(rx164_mv);
+WEAK(sable_mv);
+WEAK(sable_gamma_mv);
+WEAK(shark_mv);
+WEAK(sx164_mv);
+WEAK(takara_mv);
+WEAK(titan_mv);
+WEAK(webbrick_mv);
+WEAK(wildfire_mv);
+WEAK(xl_mv);
+WEAK(xlt_mv);
+
+#undef WEAK
+
+/*
+ * I/O resources inherited from PeeCees. Except for perhaps the
+ * turbochannel alphas, everyone has these on some sort of SuperIO chip.
+ *
+ * ??? If this becomes less standard, move the struct out into the
+ * machine vector.
+ */
+
+static void __init
+reserve_std_resources(void)
+{
+ static struct resource standard_io_resources[] = {
+ { .name = "rtc", .start = -1, .end = -1 },
+ { .name = "dma1", .start = 0x00, .end = 0x1f },
+ { .name = "pic1", .start = 0x20, .end = 0x3f },
+ { .name = "timer", .start = 0x40, .end = 0x5f },
+ { .name = "keyboard", .start = 0x60, .end = 0x6f },
+ { .name = "dma page reg", .start = 0x80, .end = 0x8f },
+ { .name = "pic2", .start = 0xa0, .end = 0xbf },
+ { .name = "dma2", .start = 0xc0, .end = 0xdf },
+ };
+
+ struct resource *io = &ioport_resource;
+ size_t i;
+
+ if (hose_head) {
+ struct pci_controller *hose;
+ for (hose = hose_head; hose; hose = hose->next)
+ if (hose->index == 0) {
+ io = hose->io_space;
+ break;
+ }
+ }
+
+ /* Fix up for the Jensen's queer RTC placement. */
+ standard_io_resources[0].start = RTC_PORT(0);
+ standard_io_resources[0].end = RTC_PORT(0) + 0x10;
+
+ for (i = 0; i < N(standard_io_resources); ++i)
+ request_resource(io, standard_io_resources+i);
+}
+
+#define PFN_UP(x) (((x) + PAGE_SIZE-1) >> PAGE_SHIFT)
+#define PFN_DOWN(x) ((x) >> PAGE_SHIFT)
+#define PFN_PHYS(x) ((x) << PAGE_SHIFT)
+#define PFN_MAX PFN_DOWN(0x80000000)
+#define for_each_mem_cluster(memdesc, cluster, i) \
+ for ((cluster) = (memdesc)->cluster, (i) = 0; \
+ (i) < (memdesc)->numclusters; (i)++, (cluster)++)
+
+static unsigned long __init
+get_mem_size_limit(char *s)
+{
+ unsigned long end = 0;
+ char *from = s;
+
+ end = simple_strtoul(from, &from, 0);
+ if ( *from == 'K' || *from == 'k' ) {
+ end = end << 10;
+ from++;
+ } else if ( *from == 'M' || *from == 'm' ) {
+ end = end << 20;
+ from++;
+ } else if ( *from == 'G' || *from == 'g' ) {
+ end = end << 30;
+ from++;
+ }
+ return end >> PAGE_SHIFT; /* Return the PFN of the limit. */
+}
+
+#ifdef CONFIG_BLK_DEV_INITRD
+void * __init
+move_initrd(unsigned long mem_limit)
+{
+ void *start;
+ unsigned long size;
+
+ size = initrd_end - initrd_start;
+ start = __alloc_bootmem(PAGE_ALIGN(size), PAGE_SIZE, 0);
+ if (!start || __pa(start) + size > mem_limit) {
+ initrd_start = initrd_end = 0;
+ return NULL;
+ }
+ memmove(start, (void *)initrd_start, size);
+ initrd_start = (unsigned long)start;
+ initrd_end = initrd_start + size;
+ printk("initrd moved to %p\n", start);
+ return start;
+}
+#endif
+
+#ifndef CONFIG_DISCONTIGMEM
+static void __init
+setup_memory(void *kernel_end)
+{
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ unsigned long start_kernel_pfn, end_kernel_pfn;
+ unsigned long bootmap_size, bootmap_pages, bootmap_start;
+ unsigned long start, end;
+ unsigned long i;
+
+ /* Find free clusters, and init and free the bootmem accordingly. */
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+
+ for_each_mem_cluster(memdesc, cluster, i) {
+ printk("memcluster %lu, usage %01lx, start %8lu, end %8lu\n",
+ i, cluster->usage, cluster->start_pfn,
+ cluster->start_pfn + cluster->numpages);
+
+ /* Bit 0 is console/PALcode reserved. Bit 1 is
+ non-volatile memory -- we might want to mark
+ this for later. */
+ if (cluster->usage & 3)
+ continue;
+
+ end = cluster->start_pfn + cluster->numpages;
+ if (end > max_low_pfn)
+ max_low_pfn = end;
+ }
+
+ /*
+ * Except for the NUMA systems (wildfire, marvel) all of the
+ * Alpha systems we run on support 32GB of memory or less.
+ * Since the NUMA systems introduce large holes in memory addressing,
+ * we can get into a situation where there is not enough contiguous
+ * memory for the memory map.
+ *
+ * Limit memory to the first 32GB to limit the NUMA systems to
+ * memory on their first node (wildfire) or 2 (marvel) to avoid
+ * not being able to produce the memory map. In order to access
+ * all of the memory on the NUMA systems, build with discontiguous
+ * memory support.
+ *
+ * If the user specified a memory limit, let that memory limit stand.
+ */
+ if (!mem_size_limit)
+ mem_size_limit = (32ul * 1024 * 1024 * 1024) >> PAGE_SHIFT;
+
+ if (mem_size_limit && max_low_pfn >= mem_size_limit)
+ {
+ printk("setup: forcing memory size to %ldK (from %ldK).\n",
+ mem_size_limit << (PAGE_SHIFT - 10),
+ max_low_pfn << (PAGE_SHIFT - 10));
+ max_low_pfn = mem_size_limit;
+ }
+
+ /* Find the bounds of kernel memory. */
+ start_kernel_pfn = PFN_DOWN(KERNEL_START_PHYS);
+ end_kernel_pfn = PFN_UP(virt_to_phys(kernel_end));
+ bootmap_start = -1;
+
+ try_again:
+ if (max_low_pfn <= end_kernel_pfn)
+ panic("not enough memory to boot");
+
+ /* We need to know how many physically contiguous pages
+ we'll need for the bootmap. */
+ bootmap_pages = bootmem_bootmap_pages(max_low_pfn);
+
+ /* Now find a good region where to allocate the bootmap. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = start + cluster->numpages;
+ if (start >= max_low_pfn)
+ continue;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn
+ && end - end_kernel_pfn >= bootmap_pages) {
+ bootmap_start = end_kernel_pfn;
+ break;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (end - start >= bootmap_pages) {
+ bootmap_start = start;
+ break;
+ }
+ }
+
+ if (bootmap_start == ~0UL) {
+ max_low_pfn >>= 1;
+ goto try_again;
+ }
+
+ /* Allocate the bootmap and mark the whole MM as reserved. */
+ bootmap_size = init_bootmem(bootmap_start, max_low_pfn);
+
+ /* Mark the free regions. */
+ for_each_mem_cluster(memdesc, cluster, i) {
+ if (cluster->usage & 3)
+ continue;
+
+ start = cluster->start_pfn;
+ end = cluster->start_pfn + cluster->numpages;
+ if (start >= max_low_pfn)
+ continue;
+ if (end > max_low_pfn)
+ end = max_low_pfn;
+ if (start < start_kernel_pfn) {
+ if (end > end_kernel_pfn) {
+ free_bootmem(PFN_PHYS(start),
+ (PFN_PHYS(start_kernel_pfn)
+ - PFN_PHYS(start)));
+ printk("freeing pages %ld:%ld\n",
+ start, start_kernel_pfn);
+ start = end_kernel_pfn;
+ } else if (end > start_kernel_pfn)
+ end = start_kernel_pfn;
+ } else if (start < end_kernel_pfn)
+ start = end_kernel_pfn;
+ if (start >= end)
+ continue;
+
+ free_bootmem(PFN_PHYS(start), PFN_PHYS(end) - PFN_PHYS(start));
+ printk("freeing pages %ld:%ld\n", start, end);
+ }
+
+ /* Reserve the bootmap memory. */
+ reserve_bootmem(PFN_PHYS(bootmap_start), bootmap_size);
+ printk("reserving pages %ld:%ld\n", bootmap_start, bootmap_start+PFN_UP(bootmap_size));
+
+#ifdef CONFIG_BLK_DEV_INITRD
+ initrd_start = INITRD_START;
+ if (initrd_start) {
+ initrd_end = initrd_start+INITRD_SIZE;
+ printk("Initial ramdisk at: 0x%p (%lu bytes)\n",
+ (void *) initrd_start, INITRD_SIZE);
+
+ if ((void *)initrd_end > phys_to_virt(PFN_PHYS(max_low_pfn))) {
+ if (!move_initrd(PFN_PHYS(max_low_pfn)))
+ printk("initrd extends beyond end of memory "
+ "(0x%08lx > 0x%p)\ndisabling initrd\n",
+ initrd_end,
+ phys_to_virt(PFN_PHYS(max_low_pfn)));
+ } else {
+ reserve_bootmem(virt_to_phys((void *)initrd_start),
+ INITRD_SIZE);
+ }
+ }
+#endif /* CONFIG_BLK_DEV_INITRD */
+}
+#else
+extern void setup_memory(void *);
+#endif /* !CONFIG_DISCONTIGMEM */
+
+int __init
+page_is_ram(unsigned long pfn)
+{
+ struct memclust_struct * cluster;
+ struct memdesc_struct * memdesc;
+ unsigned long i;
+
+ memdesc = (struct memdesc_struct *)
+ (hwrpb->mddt_offset + (unsigned long) hwrpb);
+ for_each_mem_cluster(memdesc, cluster, i)
+ {
+ if (pfn >= cluster->start_pfn &&
+ pfn < cluster->start_pfn + cluster->numpages) {
+ return (cluster->usage & 3) ? 0 : 1;
+ }
+ }
+
+ return 0;
+}
+
+#undef PFN_UP
+#undef PFN_DOWN
+#undef PFN_PHYS
+#undef PFN_MAX
+
+void __init
+setup_arch(char **cmdline_p)
+{
+ extern char _end[];
+
+ struct alpha_machine_vector *vec = NULL;
+ struct percpu_struct *cpu;
+ char *type_name, *var_name, *p;
+ void *kernel_end = _end; /* end of kernel */
+ char *args = command_line;
+
+ hwrpb = (struct hwrpb_struct*) __va(INIT_HWRPB->phys_addr);
+ boot_cpuid = hard_smp_processor_id();
+
+ /*
+ * Pre-process the system type to make sure it will be valid.
+ *
+ * This may restore real CABRIO and EB66+ family names, ie
+ * EB64+ and EB66.
+ *
+ * Oh, and "white box" AS800 (aka DIGITAL Server 3000 series)
+ * and AS1200 (DIGITAL Server 5000 series) have the type as
+ * the negative of the real one.
+ */
+ if ((long)hwrpb->sys_type < 0) {
+ hwrpb->sys_type = -((long)hwrpb->sys_type);
+ hwrpb_update_checksum(hwrpb);
+ }
+
+ /* Register a call for panic conditions. */
+ notifier_chain_register(&panic_notifier_list, &alpha_panic_block);
+
+#ifdef CONFIG_ALPHA_GENERIC
+ /* Assume that we've booted from SRM if we haven't booted from MILO.
+ Detect the later by looking for "MILO" in the system serial nr. */
+ alpha_using_srm = strncmp((const char *)hwrpb->ssn, "MILO", 4) != 0;
+#endif
+
+ /* If we are using SRM, we want to allow callbacks
+ as early as possible, so do this NOW, and then
+ they should work immediately thereafter.
+ */
+ kernel_end = callback_init(kernel_end);
+
+ /*
+ * Locate the command line.
+ */
+ /* Hack for Jensen... since we're restricted to 8 or 16 chars for
+ boot flags depending on the boot mode, we need some shorthand.
+ This should do for installation. */
+ if (strcmp(COMMAND_LINE, "INSTALL") == 0) {
+ strlcpy(command_line, "root=/dev/fd0 load_ramdisk=1", sizeof command_line);
+ } else {
+ strlcpy(command_line, COMMAND_LINE, sizeof command_line);
+ }
+ strcpy(saved_command_line, command_line);
+ *cmdline_p = command_line;
+
+ /*
+ * Process command-line arguments.
+ */
+ while ((p = strsep(&args, " \t")) != NULL) {
+ if (!*p) continue;
+ if (strncmp(p, "alpha_mv=", 9) == 0) {
+ vec = get_sysvec_byname(p+9);
+ continue;
+ }
+ if (strncmp(p, "cycle=", 6) == 0) {
+ est_cycle_freq = simple_strtol(p+6, NULL, 0);
+ continue;
+ }
+ if (strncmp(p, "mem=", 4) == 0) {
+ mem_size_limit = get_mem_size_limit(p+4);
+ continue;
+ }
+ if (strncmp(p, "srmcons", 7) == 0) {
+ srmcons_output |= 1;
+ continue;
+ }
+ if (strncmp(p, "console=srm", 11) == 0) {
+ srmcons_output |= 2;
+ continue;
+ }
+ if (strncmp(p, "gartsize=", 9) == 0) {
+ alpha_agpgart_size =
+ get_mem_size_limit(p+9) << PAGE_SHIFT;
+ continue;
+ }
+#ifdef CONFIG_VERBOSE_MCHECK
+ if (strncmp(p, "verbose_mcheck=", 15) == 0) {
+ alpha_verbose_mcheck = simple_strtol(p+15, NULL, 0);
+ continue;
+ }
+#endif
+ }
+
+ /* Replace the command line, now that we've killed it with strsep. */
+ strcpy(command_line, saved_command_line);
+
+ /* If we want SRM console printk echoing early, do it now. */
+ if (alpha_using_srm && srmcons_output) {
+ register_srm_console();
+
+ /*
+ * If "console=srm" was specified, clear the srmcons_output
+ * flag now so that time.c won't unregister_srm_console
+ */
+ if (srmcons_output & 2)
+ srmcons_output = 0;
+ }
+
+#ifdef CONFIG_MAGIC_SYSRQ
+ /* If we're using SRM, make sysrq-b halt back to the prom,
+ not auto-reboot. */
+ if (alpha_using_srm) {
+ struct sysrq_key_op *op = __sysrq_get_key_op('b');
+ op->handler = (void *) machine_halt;
+ }
+#endif
+
+ /*
+ * Identify and reconfigure for the current system.
+ */
+ cpu = (struct percpu_struct*)((char*)hwrpb + hwrpb->processor_offset);
+
+ get_sysnames(hwrpb->sys_type, hwrpb->sys_variation,
+ cpu->type, &type_name, &var_name);
+ if (*var_name == '0')
+ var_name = "";
+
+ if (!vec) {
+ vec = get_sysvec(hwrpb->sys_type, hwrpb->sys_variation,
+ cpu->type);
+ }
+
+ if (!vec) {
+ panic("Unsupported system type: %s%s%s (%ld %ld)\n",
+ type_name, (*var_name ? " variation " : ""), var_name,
+ hwrpb->sys_type, hwrpb->sys_variation);
+ }
+ if (vec != &alpha_mv) {
+ alpha_mv = *vec;
+ }
+
+ printk("Booting "
+#ifdef CONFIG_ALPHA_GENERIC
+ "GENERIC "
+#endif
+ "on %s%s%s using machine vector %s from %s\n",
+ type_name, (*var_name ? " variation " : ""),
+ var_name, alpha_mv.vector_name,
+ (alpha_using_srm ? "SRM" : "MILO"));
+
+ printk("Major Options: "
+#ifdef CONFIG_SMP
+ "SMP "
+#endif
+#ifdef CONFIG_ALPHA_EV56
+ "EV56 "
+#endif
+#ifdef CONFIG_ALPHA_EV67
+ "EV67 "
+#endif
+#ifdef CONFIG_ALPHA_LEGACY_START_ADDRESS
+ "LEGACY_START "
+#endif
+#ifdef CONFIG_VERBOSE_MCHECK
+ "VERBOSE_MCHECK "
+#endif
+
+#ifdef CONFIG_DISCONTIGMEM
+ "DISCONTIGMEM "
+#ifdef CONFIG_NUMA
+ "NUMA "
+#endif
+#endif
+
+#ifdef CONFIG_DEBUG_SPINLOCK
+ "DEBUG_SPINLOCK "
+#endif
+#ifdef CONFIG_MAGIC_SYSRQ
+ "MAGIC_SYSRQ "
+#endif
+ "\n");
+
+ printk("Command line: %s\n", command_line);
+
+ /*
+ * Sync up the HAE.
+ * Save the SRM's current value for restoration.
+ */
+ srm_hae = *alpha_mv.hae_register;
+ __set_hae(alpha_mv.hae_cache);
+
+ /* Reset enable correctable error reports. */
+ wrmces(0x7);
+
+ /* Find our memory. */
+ setup_memory(kernel_end);
+
+ /* First guess at cpu cache sizes. Do this before init_arch. */
+ determine_cpu_caches(cpu->type);
+
+ /* Initialize the machine. Usually has to do with setting up
+ DMA windows and the like. */
+ if (alpha_mv.init_arch)
+ alpha_mv.init_arch();
+
+ /* Reserve standard resources. */
+ reserve_std_resources();
+
+ /*
+ * Give us a default console. TGA users will see nothing until
+ * chr_dev_init is called, rather late in the boot sequence.
+ */
+
+#ifdef CONFIG_VT
+#if defined(CONFIG_VGA_CONSOLE)
+ conswitchp = &vga_con;
+#elif defined(CONFIG_DUMMY_CONSOLE)
+ conswitchp = &dummy_con;
+#endif
+#endif
+
+ /* Default root filesystem to sda2. */
+ ROOT_DEV = Root_SDA2;
+
+#ifdef CONFIG_EISA
+ /* FIXME: only set this when we actually have EISA in this box? */
+ EISA_bus = 1;
+#endif
+
+ /*
+ * Check ASN in HWRPB for validity, report if bad.
+ * FIXME: how was this failing? Should we trust it instead,
+ * and copy the value into alpha_mv.max_asn?
+ */
+
+ if (hwrpb->max_asn != MAX_ASN) {
+ printk("Max ASN from HWRPB is bad (0x%lx)\n", hwrpb->max_asn);
+ }
+
+ /*
+ * Identify the flock of penguins.
+ */
+
+#ifdef CONFIG_SMP
+ setup_smp();
+#endif
+ paging_init();
+}
+
+void __init
+disable_early_printk(void)
+{
+ if (alpha_using_srm && srmcons_output) {
+ unregister_srm_console();
+ srmcons_output = 0;
+ }
+}
+
+static char sys_unknown[] = "Unknown";
+static char systype_names[][16] = {
+ "0",
+ "ADU", "Cobra", "Ruby", "Flamingo", "Mannequin", "Jensen",
+ "Pelican", "Morgan", "Sable", "Medulla", "Noname",
+ "Turbolaser", "Avanti", "Mustang", "Alcor", "Tradewind",
+ "Mikasa", "EB64", "EB66", "EB64+", "AlphaBook1",
+ "Rawhide", "K2", "Lynx", "XL", "EB164", "Noritake",
+ "Cortex", "29", "Miata", "XXM", "Takara", "Yukon",
+ "Tsunami", "Wildfire", "CUSCO", "Eiger", "Titan", "Marvel"
+};
+
+static char unofficial_names[][8] = {"100", "Ruffian"};
+
+static char api_names[][16] = {"200", "Nautilus"};
+
+static char eb164_names[][8] = {"EB164", "PC164", "LX164", "SX164", "RX164"};
+static int eb164_indices[] = {0,0,0,1,1,1,1,1,2,2,2,2,3,3,3,3,4};
+
+static char alcor_names[][16] = {"Alcor", "Maverick", "Bret"};
+static int alcor_indices[] = {0,0,0,1,1,1,0,0,0,0,0,0,2,2,2,2,2,2};
+
+static char eb64p_names[][16] = {"EB64+", "Cabriolet", "AlphaPCI64"};
+static int eb64p_indices[] = {0,0,1,2};
+
+static char eb66_names[][8] = {"EB66", "EB66+"};
+static int eb66_indices[] = {0,0,1};
+
+static char marvel_names[][16] = {
+ "Marvel/EV7"
+};
+static int marvel_indices[] = { 0 };
+
+static char rawhide_names[][16] = {
+ "Dodge", "Wrangler", "Durango", "Tincup", "DaVinci"
+};
+static int rawhide_indices[] = {0,0,0,1,1,2,2,3,3,4,4};
+
+static char titan_names[][16] = {
+ "DEFAULT", "Privateer", "Falcon", "Granite"
+};
+static int titan_indices[] = {0,1,2,2,3};
+
+static char tsunami_names[][16] = {
+ "0", "DP264", "Warhol", "Windjammer", "Monet", "Clipper",
+ "Goldrush", "Webbrick", "Catamaran", "Brisbane", "Melbourne",
+ "Flying Clipper", "Shark"
+};
+static int tsunami_indices[] = {0,1,2,3,4,5,6,7,8,9,10,11,12};
+
+static struct alpha_machine_vector * __init
+get_sysvec(unsigned long type, unsigned long variation, unsigned long cpu)
+{
+ static struct alpha_machine_vector *systype_vecs[] __initdata =
+ {
+ NULL, /* 0 */
+ NULL, /* ADU */
+ NULL, /* Cobra */
+ NULL, /* Ruby */
+ NULL, /* Flamingo */
+ NULL, /* Mannequin */
+ &jensen_mv,
+ NULL, /* Pelican */
+ NULL, /* Morgan */
+ NULL, /* Sable -- see below. */
+ NULL, /* Medulla */
+ &noname_mv,
+ NULL, /* Turbolaser */
+ &avanti_mv,
+ NULL, /* Mustang */
+ NULL, /* Alcor, Bret, Maverick. HWRPB inaccurate? */
+ NULL, /* Tradewind */
+ NULL, /* Mikasa -- see below. */
+ NULL, /* EB64 */
+ NULL, /* EB66 -- see variation. */
+ NULL, /* EB64+ -- see variation. */
+ &alphabook1_mv,
+ &rawhide_mv,
+ NULL, /* K2 */
+ &lynx_mv, /* Lynx */
+ &xl_mv,
+ NULL, /* EB164 -- see variation. */
+ NULL, /* Noritake -- see below. */
+ NULL, /* Cortex */
+ NULL, /* 29 */
+ &miata_mv,
+ NULL, /* XXM */
+ &takara_mv,
+ NULL, /* Yukon */
+ NULL, /* Tsunami -- see variation. */
+ &wildfire_mv, /* Wildfire */
+ NULL, /* CUSCO */
+ &eiger_mv, /* Eiger */
+ NULL, /* Titan */
+ NULL, /* Marvel */
+ };
+
+ static struct alpha_machine_vector *unofficial_vecs[] __initdata =
+ {
+ NULL, /* 100 */
+ &ruffian_mv,
+ };
+
+ static struct alpha_machine_vector *api_vecs[] __initdata =
+ {
+ NULL, /* 200 */
+ &nautilus_mv,
+ };
+
+ static struct alpha_machine_vector *alcor_vecs[] __initdata =
+ {
+ &alcor_mv, &xlt_mv, &xlt_mv
+ };
+
+ static struct alpha_machine_vector *eb164_vecs[] __initdata =
+ {
+ &eb164_mv, &pc164_mv, &lx164_mv, &sx164_mv, &rx164_mv
+ };
+
+ static struct alpha_machine_vector *eb64p_vecs[] __initdata =
+ {
+ &eb64p_mv,
+ &cabriolet_mv,
+ &cabriolet_mv /* AlphaPCI64 */
+ };
+
+ static struct alpha_machine_vector *eb66_vecs[] __initdata =
+ {
+ &eb66_mv,
+ &eb66p_mv
+ };
+
+ static struct alpha_machine_vector *marvel_vecs[] __initdata =
+ {
+ &marvel_ev7_mv,
+ };
+
+ static struct alpha_machine_vector *titan_vecs[] __initdata =
+ {
+ &titan_mv, /* default */
+ &privateer_mv, /* privateer */
+ &titan_mv, /* falcon */
+ &privateer_mv, /* granite */
+ };
+
+ static struct alpha_machine_vector *tsunami_vecs[] __initdata =
+ {
+ NULL,
+ &dp264_mv, /* dp264 */
+ &dp264_mv, /* warhol */
+ &dp264_mv, /* windjammer */
+ &monet_mv, /* monet */
+ &clipper_mv, /* clipper */
+ &dp264_mv, /* goldrush */
+ &webbrick_mv, /* webbrick */
+ &dp264_mv, /* catamaran */
+ NULL, /* brisbane? */
+ NULL, /* melbourne? */
+ NULL, /* flying clipper? */
+ &shark_mv, /* shark */
+ };
+
+ /* ??? Do we need to distinguish between Rawhides? */
+
+ struct alpha_machine_vector *vec;
+
+ /* Search the system tables first... */
+ vec = NULL;
+ if (type < N(systype_vecs)) {
+ vec = systype_vecs[type];
+ } else if ((type > ST_API_BIAS) &&
+ (type - ST_API_BIAS) < N(api_vecs)) {
+ vec = api_vecs[type - ST_API_BIAS];
+ } else if ((type > ST_UNOFFICIAL_BIAS) &&
+ (type - ST_UNOFFICIAL_BIAS) < N(unofficial_vecs)) {
+ vec = unofficial_vecs[type - ST_UNOFFICIAL_BIAS];
+ }
+
+ /* If we've not found one, try for a variation. */
+
+ if (!vec) {
+ /* Member ID is a bit-field. */
+ unsigned long member = (variation >> 10) & 0x3f;
+
+ cpu &= 0xffffffff; /* make it usable */
+
+ switch (type) {
+ case ST_DEC_ALCOR:
+ if (member < N(alcor_indices))
+ vec = alcor_vecs[alcor_indices[member]];
+ break;
+ case ST_DEC_EB164:
+ if (member < N(eb164_indices))
+ vec = eb164_vecs[eb164_indices[member]];
+ /* PC164 may show as EB164 variation with EV56 CPU,
+ but, since no true EB164 had anything but EV5... */
+ if (vec == &eb164_mv && cpu == EV56_CPU)
+ vec = &pc164_mv;
+ break;
+ case ST_DEC_EB64P:
+ if (member < N(eb64p_indices))
+ vec = eb64p_vecs[eb64p_indices[member]];
+ break;
+ case ST_DEC_EB66:
+ if (member < N(eb66_indices))
+ vec = eb66_vecs[eb66_indices[member]];
+ break;
+ case ST_DEC_MARVEL:
+ if (member < N(marvel_indices))
+ vec = marvel_vecs[marvel_indices[member]];
+ break;
+ case ST_DEC_TITAN:
+ vec = titan_vecs[0]; /* default */
+ if (member < N(titan_indices))
+ vec = titan_vecs[titan_indices[member]];
+ break;
+ case ST_DEC_TSUNAMI:
+ if (member < N(tsunami_indices))
+ vec = tsunami_vecs[tsunami_indices[member]];
+ break;
+ case ST_DEC_1000:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &mikasa_primo_mv;
+ else
+ vec = &mikasa_mv;
+ break;
+ case ST_DEC_NORITAKE:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &noritake_primo_mv;
+ else
+ vec = &noritake_mv;
+ break;
+ case ST_DEC_2100_A500:
+ if (cpu == EV5_CPU || cpu == EV56_CPU)
+ vec = &sable_gamma_mv;
+ else
+ vec = &sable_mv;
+ break;
+ }
+ }
+ return vec;
+}
+
+static struct alpha_machine_vector * __init
+get_sysvec_byname(const char *name)
+{
+ static struct alpha_machine_vector *all_vecs[] __initdata =
+ {
+ &alcor_mv,
+ &alphabook1_mv,
+ &avanti_mv,
+ &cabriolet_mv,
+ &clipper_mv,
+ &dp264_mv,
+ &eb164_mv,
+ &eb64p_mv,
+ &eb66_mv,
+ &eb66p_mv,
+ &eiger_mv,
+ &jensen_mv,
+ &lx164_mv,
+ &lynx_mv,
+ &miata_mv,
+ &mikasa_mv,
+ &mikasa_primo_mv,
+ &monet_mv,
+ &nautilus_mv,
+ &noname_mv,
+ &noritake_mv,
+ &noritake_primo_mv,
+ &p2k_mv,
+ &pc164_mv,
+ &privateer_mv,
+ &rawhide_mv,
+ &ruffian_mv,
+ &rx164_mv,
+ &sable_mv,
+ &sable_gamma_mv,
+ &shark_mv,
+ &sx164_mv,
+ &takara_mv,
+ &webbrick_mv,
+ &wildfire_mv,
+ &xl_mv,
+ &xlt_mv
+ };
+
+ size_t i;
+
+ for (i = 0; i < N(all_vecs); ++i) {
+ struct alpha_machine_vector *mv = all_vecs[i];
+ if (strcasecmp(mv->vector_name, name) == 0)
+ return mv;
+ }
+ return NULL;
+}
+
+static void
+get_sysnames(unsigned long type, unsigned long variation, unsigned long cpu,
+ char **type_name, char **variation_name)
+{
+ unsign