From d9b2b2a277219d4812311d995054ce4f95067725 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Feb 2008 16:56:49 -0800 Subject: [LIB]: Make PowerPC LMB code generic so sparc64 can use it too. Signed-off-by: David S. Miller --- arch/powerpc/Kconfig | 1 + arch/powerpc/kernel/btext.c | 3 +- arch/powerpc/kernel/crash.c | 3 +- arch/powerpc/kernel/crash_dump.c | 3 +- arch/powerpc/kernel/machine_kexec.c | 3 +- arch/powerpc/kernel/prom.c | 2 +- arch/powerpc/kernel/rtas.c | 2 +- arch/powerpc/kernel/setup-common.c | 2 +- arch/powerpc/kernel/setup_64.c | 2 +- arch/powerpc/kernel/vdso.c | 3 +- arch/powerpc/mm/Makefile | 2 +- arch/powerpc/mm/hash_utils_64.c | 3 +- arch/powerpc/mm/init_32.c | 2 +- arch/powerpc/mm/init_64.c | 2 +- arch/powerpc/mm/lmb.c | 357 -------------------------------- arch/powerpc/mm/mem.c | 2 +- arch/powerpc/mm/numa.c | 3 +- arch/powerpc/mm/ppc_mmu_32.c | 2 +- arch/powerpc/mm/stab.c | 4 +- arch/powerpc/platforms/cell/iommu.c | 2 +- arch/powerpc/platforms/maple/setup.c | 2 +- arch/powerpc/platforms/powermac/setup.c | 2 +- arch/powerpc/platforms/ps3/htab.c | 3 +- arch/powerpc/platforms/ps3/mm.c | 3 +- arch/powerpc/platforms/ps3/os-area.c | 3 +- arch/powerpc/sysdev/dart_iommu.c | 2 +- arch/sparc64/Kconfig | 1 + include/asm-powerpc/abs_addr.h | 3 +- include/asm-powerpc/lmb.h | 82 +------- include/asm-sparc64/lmb.h | 10 + include/linux/lmb.h | 83 ++++++++ lib/Kconfig | 3 + lib/Makefile | 2 + lib/lmb.c | 354 +++++++++++++++++++++++++++++++ 34 files changed, 500 insertions(+), 456 deletions(-) delete mode 100644 arch/powerpc/mm/lmb.c create mode 100644 include/asm-sparc64/lmb.h create mode 100644 include/linux/lmb.h create mode 100644 lib/lmb.c diff --git a/arch/powerpc/Kconfig b/arch/powerpc/Kconfig index 485513c9f1af..bd645f0d1e43 100644 --- a/arch/powerpc/Kconfig +++ b/arch/powerpc/Kconfig @@ -90,6 +90,7 @@ config PPC select HAVE_IDE select HAVE_OPROFILE select HAVE_KPROBES + select HAVE_LMB config EARLY_PRINTK bool diff --git a/arch/powerpc/kernel/btext.c b/arch/powerpc/kernel/btext.c index 80e2eef05b2e..9f9377745490 100644 --- a/arch/powerpc/kernel/btext.c +++ b/arch/powerpc/kernel/btext.c @@ -7,6 +7,7 @@ #include #include #include +#include #include #include @@ -15,7 +16,7 @@ #include #include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/crash.c b/arch/powerpc/kernel/crash.c index 571132ed12c1..eae401de3f76 100644 --- a/arch/powerpc/kernel/crash.c +++ b/arch/powerpc/kernel/crash.c @@ -24,12 +24,13 @@ #include #include #include +#include #include #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/kernel/crash_dump.c b/arch/powerpc/kernel/crash_dump.c index 29ff77c468ac..9ee3c5278db0 100644 --- a/arch/powerpc/kernel/crash_dump.c +++ b/arch/powerpc/kernel/crash_dump.c @@ -13,8 +13,9 @@ #include #include +#include #include -#include +#include #include #include diff --git a/arch/powerpc/kernel/machine_kexec.c b/arch/powerpc/kernel/machine_kexec.c index c0c8e8c3ced9..2d202f274e73 100644 --- a/arch/powerpc/kernel/machine_kexec.c +++ b/arch/powerpc/kernel/machine_kexec.c @@ -12,8 +12,9 @@ #include #include #include +#include #include -#include +#include void machine_crash_shutdown(struct pt_regs *regs) { diff --git a/arch/powerpc/kernel/prom.c b/arch/powerpc/kernel/prom.c index 8b5efbce8d90..c17a5851bdb7 100644 --- a/arch/powerpc/kernel/prom.c +++ b/arch/powerpc/kernel/prom.c @@ -31,10 +31,10 @@ #include #include #include +#include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/rtas.c b/arch/powerpc/kernel/rtas.c index 52e95c2158c0..e2e78d967f31 100644 --- a/arch/powerpc/kernel/rtas.c +++ b/arch/powerpc/kernel/rtas.c @@ -22,6 +22,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/setup-common.c b/arch/powerpc/kernel/setup-common.c index 6adb5a1e98bb..12cc41c16b0d 100644 --- a/arch/powerpc/kernel/setup-common.c +++ b/arch/powerpc/kernel/setup-common.c @@ -34,6 +34,7 @@ #include #include #include +#include #include #include #include @@ -56,7 +57,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/kernel/setup_64.c b/arch/powerpc/kernel/setup_64.c index 3b1529c103ef..2c2d8315193c 100644 --- a/arch/powerpc/kernel/setup_64.c +++ b/arch/powerpc/kernel/setup_64.c @@ -33,6 +33,7 @@ #include #include #include +#include #include #include #include @@ -55,7 +56,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/kernel/vdso.c b/arch/powerpc/kernel/vdso.c index 3702df7dc567..d715048370df 100644 --- a/arch/powerpc/kernel/vdso.c +++ b/arch/powerpc/kernel/vdso.c @@ -21,13 +21,14 @@ #include #include #include +#include #include #include #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/mm/Makefile b/arch/powerpc/mm/Makefile index 41649a5d3602..1c00e0196f6c 100644 --- a/arch/powerpc/mm/Makefile +++ b/arch/powerpc/mm/Makefile @@ -6,7 +6,7 @@ ifeq ($(CONFIG_PPC64),y) EXTRA_CFLAGS += -mno-minimal-toc endif -obj-y := fault.o mem.o lmb.o \ +obj-y := fault.o mem.o \ init_$(CONFIG_WORD_SIZE).o \ pgtable_$(CONFIG_WORD_SIZE).o \ mmu_context_$(CONFIG_WORD_SIZE).o diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 32f416175db1..73b78486abfc 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -41,7 +42,7 @@ #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/mm/init_32.c b/arch/powerpc/mm/init_32.c index 977cb1ee5e72..59a725b8ece9 100644 --- a/arch/powerpc/mm/init_32.c +++ b/arch/powerpc/mm/init_32.c @@ -30,6 +30,7 @@ #include #include #include +#include #include #include @@ -41,7 +42,6 @@ #include #include #include -#include #include #include "mmu_decl.h" diff --git a/arch/powerpc/mm/init_64.c b/arch/powerpc/mm/init_64.c index c0f5cff77035..abeb0eb79313 100644 --- a/arch/powerpc/mm/init_64.c +++ b/arch/powerpc/mm/init_64.c @@ -38,11 +38,11 @@ #include #include #include +#include #include #include #include -#include #include #include #include diff --git a/arch/powerpc/mm/lmb.c b/arch/powerpc/mm/lmb.c deleted file mode 100644 index 4ce23bcf8a57..000000000000 --- a/arch/powerpc/mm/lmb.c +++ /dev/null @@ -1,357 +0,0 @@ -/* - * Procedures for maintaining information about logical memory blocks. - * - * Peter Bergner, IBM Corp. June 2001. - * Copyright (C) 2001 Peter Bergner. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ - -#include -#include -#include -#include -#include -#include -#include -#ifdef CONFIG_PPC32 -#include "mmu_decl.h" /* for __max_low_memory */ -#endif - -#undef DEBUG - -#ifdef DEBUG -#include -#define DBG(fmt...) udbg_printf(fmt) -#else -#define DBG(fmt...) -#endif - -#define LMB_ALLOC_ANYWHERE 0 - -struct lmb lmb; - -void lmb_dump_all(void) -{ -#ifdef DEBUG - unsigned long i; - - DBG("lmb_dump_all:\n"); - DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - DBG(" memory.size = 0x%lx\n", lmb.memory.size); - for (i=0; i < lmb.memory.cnt ;i++) { - DBG(" memory.region[0x%x].base = 0x%lx\n", - i, lmb.memory.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.memory.region[i].size); - } - - DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); - DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); - for (i=0; i < lmb.reserved.cnt ;i++) { - DBG(" reserved.region[0x%x].base = 0x%lx\n", - i, lmb.reserved.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.reserved.region[i].size); - } -#endif /* DEBUG */ -} - -static unsigned long __init lmb_addrs_overlap(unsigned long base1, - unsigned long size1, unsigned long base2, unsigned long size2) -{ - return ((base1 < (base2+size2)) && (base2 < (base1+size1))); -} - -static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, - unsigned long base2, unsigned long size2) -{ - if (base2 == base1 + size1) - return 1; - else if (base1 == base2 + size2) - return -1; - - return 0; -} - -static long __init lmb_regions_adjacent(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - unsigned long base1 = rgn->region[r1].base; - unsigned long size1 = rgn->region[r1].size; - unsigned long base2 = rgn->region[r2].base; - unsigned long size2 = rgn->region[r2].size; - - return lmb_addrs_adjacent(base1, size1, base2, size2); -} - -static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) -{ - unsigned long i; - - for (i = r; i < rgn->cnt - 1; i++) { - rgn->region[i].base = rgn->region[i + 1].base; - rgn->region[i].size = rgn->region[i + 1].size; - } - rgn->cnt--; -} - -/* Assumption: base addr of region 1 < base addr of region 2 */ -static void __init lmb_coalesce_regions(struct lmb_region *rgn, - unsigned long r1, unsigned long r2) -{ - rgn->region[r1].size += rgn->region[r2].size; - lmb_remove_region(rgn, r2); -} - -/* This routine called with relocation disabled. */ -void __init lmb_init(void) -{ - /* Create a dummy zero size LMB which will get coalesced away later. - * This simplifies the lmb_add() code below... - */ - lmb.memory.region[0].base = 0; - lmb.memory.region[0].size = 0; - lmb.memory.cnt = 1; - - /* Ditto. */ - lmb.reserved.region[0].base = 0; - lmb.reserved.region[0].size = 0; - lmb.reserved.cnt = 1; -} - -/* This routine may be called with relocation disabled. */ -void __init lmb_analyze(void) -{ - int i; - - lmb.memory.size = 0; - - for (i = 0; i < lmb.memory.cnt; i++) - lmb.memory.size += lmb.memory.region[i].size; -} - -/* This routine called with relocation disabled. */ -static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) -{ - unsigned long coalesced = 0; - long adjacent, i; - - /* First try and coalesce this LMB with another. */ - for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; - - if ((rgnbase == base) && (rgnsize == size)) - /* Already have this region, so we're done */ - return 0; - - adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); - if ( adjacent > 0 ) { - rgn->region[i].base -= size; - rgn->region[i].size += size; - coalesced++; - break; - } - else if ( adjacent < 0 ) { - rgn->region[i].size += size; - coalesced++; - break; - } - } - - if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { - lmb_coalesce_regions(rgn, i, i+1); - coalesced++; - } - - if (coalesced) - return coalesced; - if (rgn->cnt >= MAX_LMB_REGIONS) - return -1; - - /* Couldn't coalesce the LMB, so add it to the sorted table. */ - for (i = rgn->cnt-1; i >= 0; i--) { - if (base < rgn->region[i].base) { - rgn->region[i+1].base = rgn->region[i].base; - rgn->region[i+1].size = rgn->region[i].size; - } else { - rgn->region[i+1].base = base; - rgn->region[i+1].size = size; - break; - } - } - rgn->cnt++; - - return 0; -} - -/* This routine may be called with relocation disabled. */ -long __init lmb_add(unsigned long base, unsigned long size) -{ - struct lmb_region *_rgn = &(lmb.memory); - - /* On pSeries LPAR systems, the first LMB is our RMO region. */ - if (base == 0) - lmb.rmo_size = size; - - return lmb_add_region(_rgn, base, size); - -} - -long __init lmb_reserve(unsigned long base, unsigned long size) -{ - struct lmb_region *_rgn = &(lmb.reserved); - - BUG_ON(0 == size); - - return lmb_add_region(_rgn, base, size); -} - -long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) -{ - unsigned long i; - - for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; - if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { - break; - } - } - - return (i < rgn->cnt) ? i : -1; -} - -unsigned long __init lmb_alloc(unsigned long size, unsigned long align) -{ - return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); -} - -unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) -{ - unsigned long alloc; - - alloc = __lmb_alloc_base(size, align, max_addr); - - if (alloc == 0) - panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", - size, max_addr); - - return alloc; -} - -unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) -{ - long i, j; - unsigned long base = 0; - - BUG_ON(0 == size); - -#ifdef CONFIG_PPC32 - /* On 32-bit, make sure we allocate lowmem */ - if (max_addr == LMB_ALLOC_ANYWHERE) - max_addr = __max_low_memory; -#endif - for (i = lmb.memory.cnt-1; i >= 0; i--) { - unsigned long lmbbase = lmb.memory.region[i].base; - unsigned long lmbsize = lmb.memory.region[i].size; - - if (max_addr == LMB_ALLOC_ANYWHERE) - base = _ALIGN_DOWN(lmbbase + lmbsize - size, align); - else if (lmbbase < max_addr) { - base = min(lmbbase + lmbsize, max_addr); - base = _ALIGN_DOWN(base - size, align); - } else - continue; - - while ((lmbbase <= base) && - ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) - base = _ALIGN_DOWN(lmb.reserved.region[j].base - size, - align); - - if ((base != 0) && (lmbbase <= base)) - break; - } - - if (i < 0) - return 0; - - lmb_add_region(&lmb.reserved, base, size); - - return base; -} - -/* You must call lmb_analyze() before this. */ -unsigned long __init lmb_phys_mem_size(void) -{ - return lmb.memory.size; -} - -unsigned long __init lmb_end_of_DRAM(void) -{ - int idx = lmb.memory.cnt - 1; - - return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); -} - -/* You must call lmb_analyze() after this. */ -void __init lmb_enforce_memory_limit(unsigned long memory_limit) -{ - unsigned long i, limit; - struct lmb_property *p; - - if (! memory_limit) - return; - - /* Truncate the lmb regions to satisfy the memory limit. */ - limit = memory_limit; - for (i = 0; i < lmb.memory.cnt; i++) { - if (limit > lmb.memory.region[i].size) { - limit -= lmb.memory.region[i].size; - continue; - } - - lmb.memory.region[i].size = limit; - lmb.memory.cnt = i + 1; - break; - } - - if (lmb.memory.region[0].size < lmb.rmo_size) - lmb.rmo_size = lmb.memory.region[0].size; - - /* And truncate any reserves above the limit also. */ - for (i = 0; i < lmb.reserved.cnt; i++) { - p = &lmb.reserved.region[i]; - - if (p->base > memory_limit) - p->size = 0; - else if ((p->base + p->size) > memory_limit) - p->size = memory_limit - p->base; - - if (p->size == 0) { - lmb_remove_region(&lmb.reserved, i); - i--; - } - } -} - -int __init lmb_is_reserved(unsigned long addr) -{ - int i; - - for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long upper = lmb.reserved.region[i].base + - lmb.reserved.region[i].size - 1; - if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) - return 1; - } - return 0; -} diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c index be5c506779a7..60c019cdc69f 100644 --- a/arch/powerpc/mm/mem.c +++ b/arch/powerpc/mm/mem.c @@ -31,6 +31,7 @@ #include #include #include +#include #include #include @@ -42,7 +43,6 @@ #include #include #include -#include #include #include diff --git a/arch/powerpc/mm/numa.c b/arch/powerpc/mm/numa.c index a300d254aac6..1efd631211ef 100644 --- a/arch/powerpc/mm/numa.c +++ b/arch/powerpc/mm/numa.c @@ -17,8 +17,9 @@ #include #include #include +#include #include -#include +#include #include #include diff --git a/arch/powerpc/mm/ppc_mmu_32.c b/arch/powerpc/mm/ppc_mmu_32.c index 5c45d474cfcc..72de3c79210a 100644 --- a/arch/powerpc/mm/ppc_mmu_32.c +++ b/arch/powerpc/mm/ppc_mmu_32.c @@ -26,11 +26,11 @@ #include #include #include +#include #include #include #include -#include #include "mmu_decl.h" diff --git a/arch/powerpc/mm/stab.c b/arch/powerpc/mm/stab.c index 50448d5de9d2..efbbd13d93e5 100644 --- a/arch/powerpc/mm/stab.c +++ b/arch/powerpc/mm/stab.c @@ -12,12 +12,14 @@ * 2 of the License, or (at your option) any later version. */ +#include + #include #include #include #include #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/cell/iommu.c b/arch/powerpc/platforms/cell/iommu.c index edab631a8dcb..61381586895f 100644 --- a/arch/powerpc/platforms/cell/iommu.c +++ b/arch/powerpc/platforms/cell/iommu.c @@ -28,13 +28,13 @@ #include #include #include +#include #include #include #include #include #include -#include #include #include diff --git a/arch/powerpc/platforms/maple/setup.c b/arch/powerpc/platforms/maple/setup.c index 3ce2d73b4177..dadf33b5c09c 100644 --- a/arch/powerpc/platforms/maple/setup.c +++ b/arch/powerpc/platforms/maple/setup.c @@ -43,6 +43,7 @@ #include #include #include +#include #include #include @@ -57,7 +58,6 @@ #include #include #include -#include #include #include #include diff --git a/arch/powerpc/platforms/powermac/setup.c b/arch/powerpc/platforms/powermac/setup.c index 36ff1b6b7fac..59404baf911f 100644 --- a/arch/powerpc/platforms/powermac/setup.c +++ b/arch/powerpc/platforms/powermac/setup.c @@ -53,6 +53,7 @@ #include #include #include +#include #include #include @@ -74,7 +75,6 @@ #include #include #include -#include #include #include "pmac.h" diff --git a/arch/powerpc/platforms/ps3/htab.c b/arch/powerpc/platforms/ps3/htab.c index 7382f195c4f8..1cf901fa9031 100644 --- a/arch/powerpc/platforms/ps3/htab.c +++ b/arch/powerpc/platforms/ps3/htab.c @@ -19,9 +19,10 @@ */ #include +#include #include -#include +#include #include #include #include diff --git a/arch/powerpc/platforms/ps3/mm.c b/arch/powerpc/platforms/ps3/mm.c index 68900476c842..5b3fb2b321ab 100644 --- a/arch/powerpc/platforms/ps3/mm.c +++ b/arch/powerpc/platforms/ps3/mm.c @@ -21,9 +21,10 @@ #include #include #include +#include #include -#include +#include #include #include diff --git a/arch/powerpc/platforms/ps3/os-area.c b/arch/powerpc/platforms/ps3/os-area.c index b9ea09d9d2fb..c73379ec9141 100644 --- a/arch/powerpc/platforms/ps3/os-area.c +++ b/arch/powerpc/platforms/ps3/os-area.c @@ -24,8 +24,9 @@ #include #include #include +#include -#include +#include #include "platform.h" diff --git a/arch/powerpc/sysdev/dart_iommu.c b/arch/powerpc/sysdev/dart_iommu.c index e0e24b01e3a6..005c2ecf976f 100644 --- a/arch/powerpc/sysdev/dart_iommu.c +++ b/arch/powerpc/sysdev/dart_iommu.c @@ -37,6 +37,7 @@ #include #include #include +#include #include #include #include @@ -44,7 +45,6 @@ #include #include #include -#include #include #include "dart.h" diff --git a/arch/sparc64/Kconfig b/arch/sparc64/Kconfig index 3af378ddb6ae..b18d24d249fa 100644 --- a/arch/sparc64/Kconfig +++ b/arch/sparc64/Kconfig @@ -15,6 +15,7 @@ config SPARC64 bool default y select HAVE_IDE + select HAVE_LMB help SPARC is a family of RISC microprocessors designed and marketed by Sun Microsystems, incorporated. This port covers the newer 64-bit diff --git a/include/asm-powerpc/abs_addr.h b/include/asm-powerpc/abs_addr.h index 4aa220718b19..98324c5a8286 100644 --- a/include/asm-powerpc/abs_addr.h +++ b/include/asm-powerpc/abs_addr.h @@ -12,10 +12,11 @@ * 2 of the License, or (at your option) any later version. */ +#include + #include #include #include -#include #include struct mschunks_map { diff --git a/include/asm-powerpc/lmb.h b/include/asm-powerpc/lmb.h index 5d1dc48a0bb8..028184b6a162 100644 --- a/include/asm-powerpc/lmb.h +++ b/include/asm-powerpc/lmb.h @@ -1,81 +1,15 @@ #ifndef _ASM_POWERPC_LMB_H #define _ASM_POWERPC_LMB_H -#ifdef __KERNEL__ -/* - * Definitions for talking to the Open Firmware PROM on - * Power Macintosh computers. - * - * Copyright (C) 2001 Peter Bergner, IBM Corp. - * - * This program is free software; you can redistribute it and/or - * modify it under the terms of the GNU General Public License - * as published by the Free Software Foundation; either version - * 2 of the License, or (at your option) any later version. - */ +#include -#include -#include +#define LMB_DBG(fmt...) udbg_printf(fmt) -#define MAX_LMB_REGIONS 128 +#ifdef CONFIG_PPC32 +extern unsigned long __max_low_memory; +#define LMB_REAL_LIMIT __max_low_memory +#else +#define LMB_REAL_LIMIT 0 +#endif -struct lmb_property { - unsigned long base; - unsigned long size; -}; - -struct lmb_region { - unsigned long cnt; - unsigned long size; - struct lmb_property region[MAX_LMB_REGIONS+1]; -}; - -struct lmb { - unsigned long debug; - unsigned long rmo_size; - struct lmb_region memory; - struct lmb_region reserved; -}; - -extern struct lmb lmb; - -extern void __init lmb_init(void); -extern void __init lmb_analyze(void); -extern long __init lmb_add(unsigned long base, unsigned long size); -extern long __init lmb_reserve(unsigned long base, unsigned long size); -extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); -extern unsigned long __init lmb_alloc_base(unsigned long size, - unsigned long align, unsigned long max_addr); -extern unsigned long __init __lmb_alloc_base(unsigned long size, - unsigned long align, unsigned long max_addr); -extern unsigned long __init lmb_phys_mem_size(void); -extern unsigned long __init lmb_end_of_DRAM(void); -extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); -extern int __init lmb_is_reserved(unsigned long addr); - -extern void lmb_dump_all(void); - -static inline unsigned long -lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].size; -} -static inline unsigned long -lmb_size_pages(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; -} -static inline unsigned long -lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return type->region[region_nr].base >> PAGE_SHIFT; -} -static inline unsigned long -lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) -{ - return lmb_start_pfn(type, region_nr) + - lmb_size_pages(type, region_nr); -} - -#endif /* __KERNEL__ */ #endif /* _ASM_POWERPC_LMB_H */ diff --git a/include/asm-sparc64/lmb.h b/include/asm-sparc64/lmb.h new file mode 100644 index 000000000000..6a352cbcf520 --- /dev/null +++ b/include/asm-sparc64/lmb.h @@ -0,0 +1,10 @@ +#ifndef _SPARC64_LMB_H +#define _SPARC64_LMB_H + +#include + +#define LMB_DBG(fmt...) prom_printf(fmt) + +#define LMB_REAL_LIMIT 0 + +#endif /* !(_SPARC64_LMB_H) */ diff --git a/include/linux/lmb.h b/include/linux/lmb.h new file mode 100644 index 000000000000..8b93f63407e9 --- /dev/null +++ b/include/linux/lmb.h @@ -0,0 +1,83 @@ +#ifndef _LINUX_LMB_H +#define _LINUX_LMB_H +#ifdef __KERNEL__ + +/* + * Logical memory blocks. + * + * Copyright (C) 2001 Peter Bergner, IBM Corp. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include + +#define MAX_LMB_REGIONS 128 + +struct lmb_property { + unsigned long base; + unsigned long size; +}; + +struct lmb_region { + unsigned long cnt; + unsigned long size; + struct lmb_property region[MAX_LMB_REGIONS+1]; +}; + +struct lmb { + unsigned long debug; + unsigned long rmo_size; + struct lmb_region memory; + struct lmb_region reserved; +}; + +extern struct lmb lmb; + +extern void __init lmb_init(void); +extern void __init lmb_analyze(void); +extern long __init lmb_add(unsigned long base, unsigned long size); +extern long __init lmb_reserve(unsigned long base, unsigned long size); +extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); +extern unsigned long __init lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); +extern unsigned long __init __lmb_alloc_base(unsigned long size, + unsigned long align, unsigned long max_addr); +extern unsigned long __init lmb_phys_mem_size(void); +extern unsigned long __init lmb_end_of_DRAM(void); +extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); +extern int __init lmb_is_reserved(unsigned long addr); + +extern void lmb_dump_all(void); + +static inline unsigned long +lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) +{ + return type->region[region_nr].size; +} +static inline unsigned long +lmb_size_pages(struct lmb_region *type, unsigned long region_nr) +{ + return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; +} +static inline unsigned long +lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) +{ + return type->region[region_nr].base >> PAGE_SHIFT; +} +static inline unsigned long +lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) +{ + return lmb_start_pfn(type, region_nr) + + lmb_size_pages(type, region_nr); +} + +#include + +#endif /* __KERNEL__ */ + +#endif /* _LINUX_LMB_H */ diff --git a/lib/Kconfig b/lib/Kconfig index ba3d104994d9..2d53dc092e8b 100644 --- a/lib/Kconfig +++ b/lib/Kconfig @@ -141,4 +141,7 @@ config HAS_DMA config CHECK_SIGNATURE bool +config HAVE_LMB + boolean + endmenu diff --git a/lib/Makefile b/lib/Makefile index 23de261a4c83..61bba16a0a2f 100644 --- a/lib/Makefile +++ b/lib/Makefile @@ -70,6 +70,8 @@ obj-$(CONFIG_FAULT_INJECTION) += fault-inject.o lib-$(CONFIG_GENERIC_BUG) += bug.o +obj-$(CONFIG_HAVE_LMB) += lmb.o + hostprogs-y := gen_crc32table clean-files := crc32table.h diff --git a/lib/lmb.c b/lib/lmb.c new file mode 100644 index 000000000000..98078b4ec20e --- /dev/null +++ b/lib/lmb.c @@ -0,0 +1,354 @@ +/* + * Procedures for maintaining information about logical memory blocks. + * + * Peter Bergner, IBM Corp. June 2001. + * Copyright (C) 2001 Peter Bergner. + * + * This program is free software; you can redistribute it and/or + * modify it under the terms of the GNU General Public License + * as published by the Free Software Foundation; either version + * 2 of the License, or (at your option) any later version. + */ + +#include +#include +#include +#include + +#undef DEBUG + +#ifdef DEBUG +#define DBG(fmt...) LMB_DBG(fmt) +#else +#define DBG(fmt...) +#endif + +#define LMB_ALLOC_ANYWHERE 0 + +struct lmb lmb; + +void lmb_dump_all(void) +{ +#ifdef DEBUG + unsigned long i; + + DBG("lmb_dump_all:\n"); + DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); + DBG(" memory.size = 0x%lx\n", lmb.memory.size); + for (i=0; i < lmb.memory.cnt ;i++) { + DBG(" memory.region[0x%x].base = 0x%lx\n", + i, lmb.memory.region[i].base); + DBG(" .size = 0x%lx\n", + lmb.memory.region[i].size); + } + + DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); + DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); + for (i=0; i < lmb.reserved.cnt ;i++) { + DBG(" reserved.region[0x%x].base = 0x%lx\n", + i, lmb.reserved.region[i].base); + DBG(" .size = 0x%lx\n", + lmb.reserved.region[i].size); + } +#endif /* DEBUG */ +} + +static unsigned long __init lmb_addrs_overlap(unsigned long base1, + unsigned long size1, unsigned long base2, unsigned long size2) +{ + return ((base1 < (base2+size2)) && (base2 < (base1+size1))); +} + +static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, + unsigned long base2, unsigned long size2) +{ + if (base2 == base1 + size1) + return 1; + else if (base1 == base2 + size2) + return -1; + + return 0; +} + +static long __init lmb_regions_adjacent(struct lmb_region *rgn, + unsigned long r1, unsigned long r2) +{ + unsigned long base1 = rgn->region[r1].base; + unsigned long size1 = rgn->region[r1].size; + unsigned long base2 = rgn->region[r2].base; + unsigned long size2 = rgn->region[r2].size; + + return lmb_addrs_adjacent(base1, size1, base2, size2); +} + +static void __init lmb_remove_region(struct lmb_region *rgn, unsigned long r) +{ + unsigned long i; + + for (i = r; i < rgn->cnt - 1; i++) { + rgn->region[i].base = rgn->region[i + 1].base; + rgn->region[i].size = rgn->region[i + 1].size; + } + rgn->cnt--; +} + +/* Assumption: base addr of region 1 < base addr of region 2 */ +static void __init lmb_coalesce_regions(struct lmb_region *rgn, + unsigned long r1, unsigned long r2) +{ + rgn->region[r1].size += rgn->region[r2].size; + lmb_remove_region(rgn, r2); +} + +/* This routine called with relocation disabled. */ +void __init lmb_init(void) +{ + /* Create a dummy zero size LMB which will get coalesced away later. + * This simplifies the lmb_add() code below... + */ + lmb.memory.region[0].base = 0; + lmb.memory.region[0].size = 0; + lmb.memory.cnt = 1; + + /* Ditto. */ + lmb.reserved.region[0].base = 0; + lmb.reserved.region[0].size = 0; + lmb.reserved.cnt = 1; +} + +/* This routine may be called with relocation disabled. */ +void __init lmb_analyze(void) +{ + int i; + + lmb.memory.size = 0; + + for (i = 0; i < lmb.memory.cnt; i++) + lmb.memory.size += lmb.memory.region[i].size; +} + +/* This routine called with relocation disabled. */ +static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, + unsigned long size) +{ + unsigned long coalesced = 0; + long adjacent, i; + + /* First try and coalesce this LMB with another. */ + for (i=0; i < rgn->cnt; i++) { + unsigned long rgnbase = rgn->region[i].base; + unsigned long rgnsize = rgn->region[i].size; + + if ((rgnbase == base) && (rgnsize == size)) + /* Already have this region, so we're done */ + return 0; + + adjacent = lmb_addrs_adjacent(base,size,rgnbase,rgnsize); + if ( adjacent > 0 ) { + rgn->region[i].base -= size; + rgn->region[i].size += size; + coalesced++; + break; + } + else if ( adjacent < 0 ) { + rgn->region[i].size += size; + coalesced++; + break; + } + } + + if ((i < rgn->cnt-1) && lmb_regions_adjacent(rgn, i, i+1) ) { + lmb_coalesce_regions(rgn, i, i+1); + coalesced++; + } + + if (coalesced) + return coalesced; + if (rgn->cnt >= MAX_LMB_REGIONS) + return -1; + + /* Couldn't coalesce the LMB, so add it to the sorted table. */ + for (i = rgn->cnt-1; i >= 0; i--) { + if (base < rgn->region[i].base) { + rgn->region[i+1].base = rgn->region[i].base; + rgn->region[i+1].size = rgn->region[i].size; + } else { + rgn->region[i+1].base = base; + rgn->region[i+1].size = size; + break; + } + } + rgn->cnt++; + + return 0; +} + +/* This routine may be called with relocation disabled. */ +long __init lmb_add(unsigned long base, unsigned long size) +{ + struct lmb_region *_rgn = &(lmb.memory); + + /* On pSeries LPAR systems, the first LMB is our RMO region. */ + if (base == 0) + lmb.rmo_size = size; + + return lmb_add_region(_rgn, base, size); + +} + +long __init lmb_reserve(unsigned long base, unsigned long size) +{ + struct lmb_region *_rgn = &(lmb.reserved); + + BUG_ON(0 == size); + + return lmb_add_region(_rgn, base, size); +} + +long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, + unsigned long size) +{ + unsigned long i; + + for (i=0; i < rgn->cnt; i++) { + unsigned long rgnbase = rgn->region[i].base; + unsigned long rgnsize = rgn->region[i].size; + if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { + break; + } + } + + return (i < rgn->cnt) ? i : -1; +} + +unsigned long __init lmb_alloc(unsigned long size, unsigned long align) +{ + return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); +} + +unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, + unsigned long max_addr) +{ + unsigned long alloc; + + alloc = __lmb_alloc_base(size, align, max_addr); + + if (alloc == 0) + panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", + size, max_addr); + + return alloc; +} + +static unsigned long lmb_align_down(unsigned long addr, unsigned long size) +{ + return addr & ~(size - 1); +} + +unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, + unsigned long max_addr) +{ + long i, j; + unsigned long base = 0; + + BUG_ON(0 == size); + + /* On some platforms, make sure we allocate lowmem */ + if (max_addr == LMB_ALLOC_ANYWHERE) + max_addr = LMB_REAL_LIMIT; + + for (i = lmb.memory.cnt-1; i >= 0; i--) { + unsigned long lmbbase = lmb.memory.region[i].base; + unsigned long lmbsize = lmb.memory.region[i].size; + + if (max_addr == LMB_ALLOC_ANYWHERE) + base = lmb_align_down(lmbbase + lmbsize - size, align); + else if (lmbbase < max_addr) { + base = min(lmbbase + lmbsize, max_addr); + base = lmb_align_down(base - size, align); + } else + continue; + + while ((lmbbase <= base) && + ((j = lmb_overlaps_region(&lmb.reserved, base, size)) >= 0) ) + base = lmb_align_down(lmb.reserved.region[j].base - size, + align); + + if ((base != 0) && (lmbbase <= base)) + break; + } + + if (i < 0) + return 0; + + lmb_add_region(&lmb.reserved, base, size); + + return base; +} + +/* You must call lmb_analyze() before this. */ +unsigned long __init lmb_phys_mem_size(void) +{ + return lmb.memory.size; +} + +unsigned long __init lmb_end_of_DRAM(void) +{ + int idx = lmb.memory.cnt - 1; + + return (lmb.memory.region[idx].base + lmb.memory.region[idx].size); +} + +/* You must call lmb_analyze() after this. */ +void __init lmb_enforce_memory_limit(unsigned long memory_limit) +{ + unsigned long i, limit; + struct lmb_property *p; + + if (! memory_limit) + return; + + /* Truncate the lmb regions to satisfy the memory limit. */ + limit = memory_limit; + for (i = 0; i < lmb.memory.cnt; i++) { + if (limit > lmb.memory.region[i].size) { + limit -= lmb.memory.region[i].size; + continue; + } + + lmb.memory.region[i].size = limit; + lmb.memory.cnt = i + 1; + break; + } + + if (lmb.memory.region[0].size < lmb.rmo_size) + lmb.rmo_size = lmb.memory.region[0].size; + + /* And truncate any reserves above the limit also. */ + for (i = 0; i < lmb.reserved.cnt; i++) { + p = &lmb.reserved.region[i]; + + if (p->base > memory_limit) + p->size = 0; + else if ((p->base + p->size) > memory_limit) + p->size = memory_limit - p->base; + + if (p->size == 0) { + lmb_remove_region(&lmb.reserved, i); + i--; + } + } +} + +int __init lmb_is_reserved(unsigned long addr) +{ + int i; + + for (i = 0; i < lmb.reserved.cnt; i++) { + unsigned long upper = lmb.reserved.region[i].base + + lmb.reserved.region[i].size - 1; + if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) + return 1; + } + return 0; +} -- cgit v1.2.3 From eea89e13a9c61d3928223d2f9bf2295e22e0efb6 Mon Sep 17 00:00:00 2001 From: "David S. Miller" Date: Wed, 13 Feb 2008 16:57:09 -0800 Subject: [LMB]: Fix bug in __lmb_alloc_base(). We need to check lmb_add_region() for errors, it can run out of regions etc. Also, the size needs to be padded to the given alignment or else the lmb.reserved regions don't get expanded and instead we get tons of holes and eventually run out of regions prematurely. Signed-off-by: David S. Miller --- lib/lmb.c | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/lib/lmb.c b/lib/lmb.c index 98078b4ec20e..6390d63a2a0e 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -245,6 +245,11 @@ static unsigned long lmb_align_down(unsigned long addr, unsigned long size) return addr & ~(size - 1); } +static unsigned long lmb_align_up(unsigned long addr, unsigned long size) +{ + return (addr + (size - 1)) & ~(size - 1); +} + unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, unsigned long max_addr) { @@ -281,7 +286,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, if (i < 0) return 0; - lmb_add_region(&lmb.reserved, base, size); + if (lmb_add_region(&lmb.reserved, base, lmb_align_up(size, align)) < 0) + return 0; return base; } -- cgit v1.2.3 From 27e6672bb9912d3e3a41cf88e6142d3ae5e534b3 Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Wed, 13 Feb 2008 16:58:11 -0800 Subject: [LMB]: Fix initial lmb add region with a non-zero base If we add to an empty lmb region with a non-zero base we will not coalesce the number of regions down to one. This causes problems on ppc32 for the memory region as its assumed to only have one region. We can fix this be easily specially casing the initial add to just replace the dummy region. Signed-off-by: Kumar Gala Signed-off-by: David S. Miller --- lib/lmb.c | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/lib/lmb.c b/lib/lmb.c index 6390d63a2a0e..e34a9e586c42 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -134,6 +134,12 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, unsigned long coalesced = 0; long adjacent, i; + if ((rgn->cnt == 1) && (rgn->region[0].size == 0)) { + rgn->region[0].base = base; + rgn->region[0].size = size; + return 0; + } + /* First try and coalesce this LMB with another. */ for (i=0; i < rgn->cnt; i++) { unsigned long rgnbase = rgn->region[i].base; -- cgit v1.2.3 From e5f270954364a4add74e8445b1db925ac534fcfb Mon Sep 17 00:00:00 2001 From: Becky Bruce Date: Wed, 13 Feb 2008 16:58:39 -0800 Subject: [LMB]: Make lmb support large physical addressing Convert the lmb code to use u64 instead of unsigned long for physical addresses and sizes. This is needed to support large amounts of RAM on 32-bit systems that support 36-bit physical addressing. Signed-off-by: Becky Bruce Signed-off-by: David S. Miller --- include/linux/lmb.h | 38 +++++++++++----------- lib/lmb.c | 93 ++++++++++++++++++++++++++--------------------------- 2 files changed, 65 insertions(+), 66 deletions(-) diff --git a/include/linux/lmb.h b/include/linux/lmb.h index 8b93f63407e9..632717c6a2ba 100644 --- a/include/linux/lmb.h +++ b/include/linux/lmb.h @@ -19,19 +19,19 @@ #define MAX_LMB_REGIONS 128 struct lmb_property { - unsigned long base; - unsigned long size; + u64 base; + u64 size; }; struct lmb_region { unsigned long cnt; - unsigned long size; + u64 size; struct lmb_property region[MAX_LMB_REGIONS+1]; }; struct lmb { unsigned long debug; - unsigned long rmo_size; + u64 rmo_size; struct lmb_region memory; struct lmb_region reserved; }; @@ -40,36 +40,36 @@ extern struct lmb lmb; extern void __init lmb_init(void); extern void __init lmb_analyze(void); -extern long __init lmb_add(unsigned long base, unsigned long size); -extern long __init lmb_reserve(unsigned long base, unsigned long size); -extern unsigned long __init lmb_alloc(unsigned long size, unsigned long align); -extern unsigned long __init lmb_alloc_base(unsigned long size, - unsigned long align, unsigned long max_addr); -extern unsigned long __init __lmb_alloc_base(unsigned long size, - unsigned long align, unsigned long max_addr); -extern unsigned long __init lmb_phys_mem_size(void); -extern unsigned long __init lmb_end_of_DRAM(void); -extern void __init lmb_enforce_memory_limit(unsigned long memory_limit); -extern int __init lmb_is_reserved(unsigned long addr); +extern long __init lmb_add(u64 base, u64 size); +extern long __init lmb_reserve(u64 base, u64 size); +extern u64 __init lmb_alloc(u64 size, u64 align); +extern u64 __init lmb_alloc_base(u64 size, + u64, u64 max_addr); +extern u64 __init __lmb_alloc_base(u64 size, + u64 align, u64 max_addr); +extern u64 __init lmb_phys_mem_size(void); +extern u64 __init lmb_end_of_DRAM(void); +extern void __init lmb_enforce_memory_limit(u64 memory_limit); +extern int __init lmb_is_reserved(u64 addr); extern void lmb_dump_all(void); -static inline unsigned long +static inline u64 lmb_size_bytes(struct lmb_region *type, unsigned long region_nr) { return type->region[region_nr].size; } -static inline unsigned long +static inline u64 lmb_size_pages(struct lmb_region *type, unsigned long region_nr) { return lmb_size_bytes(type, region_nr) >> PAGE_SHIFT; } -static inline unsigned long +static inline u64 lmb_start_pfn(struct lmb_region *type, unsigned long region_nr) { return type->region[region_nr].base >> PAGE_SHIFT; } -static inline unsigned long +static inline u64 lmb_end_pfn(struct lmb_region *type, unsigned long region_nr) { return lmb_start_pfn(type, region_nr) + diff --git a/lib/lmb.c b/lib/lmb.c index e34a9e586c42..e3c8dcb04b46 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -34,33 +34,34 @@ void lmb_dump_all(void) DBG("lmb_dump_all:\n"); DBG(" memory.cnt = 0x%lx\n", lmb.memory.cnt); - DBG(" memory.size = 0x%lx\n", lmb.memory.size); + DBG(" memory.size = 0x%llx\n", + (unsigned long long)lmb.memory.size); for (i=0; i < lmb.memory.cnt ;i++) { - DBG(" memory.region[0x%x].base = 0x%lx\n", - i, lmb.memory.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.memory.region[i].size); + DBG(" memory.region[0x%x].base = 0x%llx\n", + i, (unsigned long long)lmb.memory.region[i].base); + DBG(" .size = 0x%llx\n", + (unsigned long long)lmb.memory.region[i].size); } DBG("\n reserved.cnt = 0x%lx\n", lmb.reserved.cnt); DBG(" reserved.size = 0x%lx\n", lmb.reserved.size); for (i=0; i < lmb.reserved.cnt ;i++) { - DBG(" reserved.region[0x%x].base = 0x%lx\n", - i, lmb.reserved.region[i].base); - DBG(" .size = 0x%lx\n", - lmb.reserved.region[i].size); + DBG(" reserved.region[0x%x].base = 0x%llx\n", + i, (unsigned long long)lmb.reserved.region[i].base); + DBG(" .size = 0x%llx\n", + (unsigned long long)lmb.reserved.region[i].size); } #endif /* DEBUG */ } -static unsigned long __init lmb_addrs_overlap(unsigned long base1, - unsigned long size1, unsigned long base2, unsigned long size2) +static unsigned long __init lmb_addrs_overlap(u64 base1, + u64 size1, u64 base2, u64 size2) { return ((base1 < (base2+size2)) && (base2 < (base1+size1))); } -static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, - unsigned long base2, unsigned long size2) +static long __init lmb_addrs_adjacent(u64 base1, u64 size1, + u64 base2, u64 size2) { if (base2 == base1 + size1) return 1; @@ -73,10 +74,10 @@ static long __init lmb_addrs_adjacent(unsigned long base1, unsigned long size1, static long __init lmb_regions_adjacent(struct lmb_region *rgn, unsigned long r1, unsigned long r2) { - unsigned long base1 = rgn->region[r1].base; - unsigned long size1 = rgn->region[r1].size; - unsigned long base2 = rgn->region[r2].base; - unsigned long size2 = rgn->region[r2].size; + u64 base1 = rgn->region[r1].base; + u64 size1 = rgn->region[r1].size; + u64 base2 = rgn->region[r2].base; + u64 size2 = rgn->region[r2].size; return lmb_addrs_adjacent(base1, size1, base2, size2); } @@ -128,8 +129,7 @@ void __init lmb_analyze(void) } /* This routine called with relocation disabled. */ -static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) +static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) { unsigned long coalesced = 0; long adjacent, i; @@ -142,8 +142,8 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, /* First try and coalesce this LMB with another. */ for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; if ((rgnbase == base) && (rgnsize == size)) /* Already have this region, so we're done */ @@ -190,7 +190,7 @@ static long __init lmb_add_region(struct lmb_region *rgn, unsigned long base, } /* This routine may be called with relocation disabled. */ -long __init lmb_add(unsigned long base, unsigned long size) +long __init lmb_add(u64 base, u64 size) { struct lmb_region *_rgn = &(lmb.memory); @@ -202,7 +202,7 @@ long __init lmb_add(unsigned long base, unsigned long size) } -long __init lmb_reserve(unsigned long base, unsigned long size) +long __init lmb_reserve(u64 base, u64 size) { struct lmb_region *_rgn = &(lmb.reserved); @@ -211,14 +211,14 @@ long __init lmb_reserve(unsigned long base, unsigned long size) return lmb_add_region(_rgn, base, size); } -long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, - unsigned long size) +long __init lmb_overlaps_region(struct lmb_region *rgn, u64 base, + u64 size) { unsigned long i; for (i=0; i < rgn->cnt; i++) { - unsigned long rgnbase = rgn->region[i].base; - unsigned long rgnsize = rgn->region[i].size; + u64 rgnbase = rgn->region[i].base; + u64 rgnsize = rgn->region[i].size; if ( lmb_addrs_overlap(base,size,rgnbase,rgnsize) ) { break; } @@ -227,40 +227,38 @@ long __init lmb_overlaps_region(struct lmb_region *rgn, unsigned long base, return (i < rgn->cnt) ? i : -1; } -unsigned long __init lmb_alloc(unsigned long size, unsigned long align) +u64 __init lmb_alloc(u64 size, u64 align) { return lmb_alloc_base(size, align, LMB_ALLOC_ANYWHERE); } -unsigned long __init lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) +u64 __init lmb_alloc_base(u64 size, u64 align, u64 max_addr) { - unsigned long alloc; + u64 alloc; alloc = __lmb_alloc_base(size, align, max_addr); if (alloc == 0) - panic("ERROR: Failed to allocate 0x%lx bytes below 0x%lx.\n", - size, max_addr); + panic("ERROR: Failed to allocate 0x%llx bytes below 0x%llx.\n", + (unsigned long long) size, (unsigned long long) max_addr); return alloc; } -static unsigned long lmb_align_down(unsigned long addr, unsigned long size) +static u64 lmb_align_down(u64 addr, u64 size) { return addr & ~(size - 1); } -static unsigned long lmb_align_up(unsigned long addr, unsigned long size) +static u64 lmb_align_up(u64 addr, u64 size) { return (addr + (size - 1)) & ~(size - 1); } -unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, - unsigned long max_addr) +u64 __init __lmb_alloc_base(u64 size, u64 align, u64 max_addr) { long i, j; - unsigned long base = 0; + u64 base = 0; BUG_ON(0 == size); @@ -269,8 +267,8 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, max_addr = LMB_REAL_LIMIT; for (i = lmb.memory.cnt-1; i >= 0; i--) { - unsigned long lmbbase = lmb.memory.region[i].base; - unsigned long lmbsize = lmb.memory.region[i].size; + u64 lmbbase = lmb.memory.region[i].base; + u64 lmbsize = lmb.memory.region[i].size; if (max_addr == LMB_ALLOC_ANYWHERE) base = lmb_align_down(lmbbase + lmbsize - size, align); @@ -299,12 +297,12 @@ unsigned long __init __lmb_alloc_base(unsigned long size, unsigned long align, } /* You must call lmb_analyze() before this. */ -unsigned long __init lmb_phys_mem_size(void) +u64 __init lmb_phys_mem_size(void) { return lmb.memory.size; } -unsigned long __init lmb_end_of_DRAM(void) +u64 __init lmb_end_of_DRAM(void) { int idx = lmb.memory.cnt - 1; @@ -312,9 +310,10 @@ unsigned long __init lmb_end_of_DRAM(void) } /* You must call lmb_analyze() after this. */ -void __init lmb_enforce_memory_limit(unsigned long memory_limit) +void __init lmb_enforce_memory_limit(u64 memory_limit) { - unsigned long i, limit; + unsigned long i; + u64 limit; struct lmb_property *p; if (! memory_limit) @@ -352,13 +351,13 @@ void __init lmb_enforce_memory_limit(unsigned long memory_limit) } } -int __init lmb_is_reserved(unsigned long addr) +int __init lmb_is_reserved(u64 addr) { int i; for (i = 0; i < lmb.reserved.cnt; i++) { - unsigned long upper = lmb.reserved.region[i].base + - lmb.reserved.region[i].size - 1; + u64 upper = lmb.reserved.region[i].base + + lmb.reserved.region[i].size - 1; if ((addr >= lmb.reserved.region[i].base) && (addr <= upper)) return 1; } -- cgit v1.2.3 From 74b20dad1c4cc0fd13ceca62fbab808919e1a7ea Mon Sep 17 00:00:00 2001 From: Kumar Gala Date: Tue, 19 Feb 2008 21:28:18 -0800 Subject: [LMB]: Fix lmb_add_region if region should be added at the head We introduced a bug in fixing lmb_add_region to handle an initial region being non-zero. Before that fix it was impossible to insert a region at the head of the list since the first region always started at zero. Now that its possible for the first region to be non-zero we need to check to see if the new region should be added at the head and if so actually add it. Signed-off-by: Kumar Gala Signed-off-by: David S. Miller --- lib/lmb.c | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/lib/lmb.c b/lib/lmb.c index e3c8dcb04b46..3c43b95fef4a 100644 --- a/lib/lmb.c +++ b/lib/lmb.c @@ -184,6 +184,11 @@ static long __init lmb_add_region(struct lmb_region *rgn, u64 base, u64 size) break; } } + + if (base < rgn->region[0].base) { + rgn->region[0].base = base; + rgn->region[0].size = size; + } rgn->cnt++; return 0; -- cgit v1.2.3 From f8c8803bda4db47cbbdadb9b27b024e903e1d645 Mon Sep 17 00:00:00 2001 From: Badari Pulavarty Date: Tue, 29 Jan 2008 09:19:24 +1100 Subject: [POWERPC] Add code for removing HPTEs for parts of the linear mapping For memory remove, we need to clean up htab mappings for the section of the memory we are removing. This implements support for removing htab bolted mappings for pSeries logical partitions. Other sub-archs may need to implement similar functionality for hotplug memory remove to work on them. Signed-off-by: Badari Pulavarty Signed-off-by: Paul Mackerras --- arch/powerpc/mm/hash_utils_64.c | 23 +++++++++++++++++++++++ arch/powerpc/platforms/pseries/lpar.c | 15 +++++++++++++++ include/asm-powerpc/machdep.h | 2 ++ include/asm-powerpc/sparsemem.h | 1 + 4 files changed, 41 insertions(+) diff --git a/arch/powerpc/mm/hash_utils_64.c b/arch/powerpc/mm/hash_utils_64.c index 73b78486abfc..4cc53f92377e 100644 --- a/arch/powerpc/mm/hash_utils_64.c +++ b/arch/powerpc/mm/hash_utils_64.c @@ -192,6 +192,24 @@ int htab_bolt_mapping(unsigned long vstart, unsigned long vend, return ret < 0 ? ret : 0; } +static void htab_remove_mapping(unsigned long vstart, unsigned long vend, + int psize, int ssize) +{ + unsigned long vaddr; + unsigned int step, shift; + + shift = mmu_psize_defs[psize].shift; + step = 1 << shift; + + if (!ppc_md.hpte_removebolted) { + printk("Sub-arch doesn't implement hpte_removebolted\n"); + return; + } + + for (vaddr = vstart; vaddr < vend; vaddr += step) + ppc_md.hpte_removebolted(vaddr, psize, ssize); +} + static int __init htab_dt_scan_seg_sizes(unsigned long node, const char *uname, int depth, void *data) @@ -430,6 +448,11 @@ void create_section_mapping(unsigned long start, unsigned long end) _PAGE_ACCESSED | _PAGE_DIRTY | _PAGE_COHERENT | PP_RWXX, mmu_linear_psize, mmu_kernel_ssize)); } + +void remove_section_mapping(unsigned long start, unsigned long end) +{ + htab_remove_mapping(start, end, mmu_linear_psize, mmu_kernel_ssize); +} #endif /* CONFIG_MEMORY_HOTPLUG */ static inline void make_bl(unsigned int *insn_addr, void *func) diff --git a/arch/powerpc/platforms/pseries/lpar.c b/arch/powerpc/platforms/pseries/lpar.c index 9a455d46379d..233d9be25f49 100644 --- a/arch/powerpc/platforms/pseries/lpar.c +++ b/arch/powerpc/platforms/pseries/lpar.c @@ -520,6 +520,20 @@ static void pSeries_lpar_hpte_invalidate(unsigned long slot, unsigned long va, BUG_ON(lpar_rc != H_SUCCESS); } +static void pSeries_lpar_hpte_removebolted(unsigned long ea, + int psize, int ssize) +{ + unsigned long slot, vsid, va; + + vsid = get_kernel_vsid(ea, ssize); + va = hpt_va(ea, vsid, ssize); + + slot = pSeries_lpar_hpte_find(va, psize, ssize); + BUG_ON(slot == -1); + + pSeries_lpar_hpte_invalidate(slot, va, psize, ssize, 0); +} + /* Flag bits for H_BULK_REMOVE */ #define HBR_REQUEST 0x4000000000000000UL #define HBR_RESPONSE 0x8000000000000000UL @@ -597,6 +611,7 @@ void __init hpte_init_lpar(void) ppc_md.hpte_updateboltedpp = pSeries_lpar_hpte_updateboltedpp; ppc_md.hpte_insert = pSeries_lpar_hpte_insert; ppc_md.hpte_remove = pSeries_lpar_hpte_remove; + ppc_md.hpte_removebolted = pSeries_lpar_hpte_removebolted; ppc_md.flush_hash_range = pSeries_lpar_flush_hash_range; ppc_md.hpte_clear_all = pSeries_lpar_hptab_clear; } diff --git a/include/asm-powerpc/machdep.h b/include/asm-powerpc/machdep.h index 0872ec228c1e..b95386aed50d 100644 --- a/include/asm-powerpc/machdep.h +++ b/include/asm-powerpc/machdep.h @@ -68,6 +68,8 @@ struct machdep_calls { unsigned long vflags, int psize, int ssize); long (*hpte_remove)(unsigned long hpte_group); + void (*hpte_removebolted)(unsigned long ea, + int psize, int ssize); void (*flush_hash_range)(unsigned long number, int local); /* special for kexec, to be called in real mode, linar mapping is diff --git a/include/asm-powerpc/sparsemem.h b/include/asm-powerpc/sparsemem.h index e8b493d52b4f..c5acf4ccf571 100644 --- a/include/asm-powerpc/sparsemem.h +++ b/include/asm-powerpc/sparsemem.h @@ -15,6 +15,7 @@ #ifdef CONFIG_MEMORY_HOTPLUG extern void create_section_mapping(unsigned long start, unsigned long end); +extern void remove_section_mapping(unsigned long start, unsigned long end); #ifdef CONFIG_NUMA extern int hot_add_scn_to_nid(unsigned long scn_addr); #else -- cgit v1.2.3 From 445857e0fc16ba3f74e778dd5a8707459c84f933 Mon Sep 17 00:00:00 2001 From: Dale Farnsworth Date: Fri, 1 Feb 2008 08:11:41 +1100 Subject: [POWERPC] Remove dead code at KernelAltiVec This code isn't referenced anywhere, so remove it. Signed-off-by: Dale Farnsworth Signed-off-by: Paul Mackerras --- arch/powerpc/kernel/head_32.S | 17 ----------------- arch/ppc/kernel/head.S | 17 ----------------- 2 files changed, 34 deletions(-) diff --git a/arch/powerpc/kernel/head_32.S b/arch/powerpc/kernel/head_32.S index 0f4fac512020..c16d1354b19d 100644 --- a/arch/powerpc/kernel/head_32.S +++ b/arch/powerpc/kernel/head_32.S @@ -762,23 +762,6 @@ load_up_altivec: /* we haven't used ctr or xer or lr */ b fast_exception_return -/* - * AltiVec unavailable trap from kernel - print a message, but let - * the task use AltiVec in the kernel until it returns to user mode. - */ -KernelAltiVec: - lwz r3,_MSR(r1) - oris r3,r3,MSR_VEC@h - stw r3,_MSR(r1) /* enable use of AltiVec after return */ - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" - .align 4,0 - /* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, diff --git a/arch/ppc/kernel/head.S b/arch/ppc/kernel/head.S index 1b0ec7202dd5..e7e642b95138 100644 --- a/arch/ppc/kernel/head.S +++ b/arch/ppc/kernel/head.S @@ -700,23 +700,6 @@ load_up_altivec: /* we haven't used ctr or xer or lr */ b fast_exception_return -/* - * AltiVec unavailable trap from kernel - print a message, but let - * the task use AltiVec in the kernel until it returns to user mode. - */ -KernelAltiVec: - lwz r3,_MSR(r1) - oris r3,r3,MSR_VEC@h - stw r3,_MSR(r1) /* enable use of AltiVec after return */ - lis r3,87f@h - ori r3,r3,87f@l - mr r4,r2 /* current */ - lwz r5,_NIP(r1) - bl printk - b ret_from_except -87: .string "AltiVec used in kernel (task=%p, pc=%x) \n" - .align 4,0 - /* * giveup_altivec(tsk) * Disable AltiVec for the task given as the argument, -- cgit v1.2.3 From 4217c7b529af3abea090d875f0ef133ed2d44285 Mon Sep 17 00:00:00 2001 From: Julia Lawall Date: Mon, 11 Feb 2008 07:07:59 +1100 Subject: [POWERPC] Use FIELD_SIZEOF in arch/ppc Robert P.J. Day proposed to use the macro FIELD_SIZEOF in replace of code that matches its definition. The modification was made using the following semantic patch (http://www.emn.fr/x-info/coccinelle/) // @haskernel@ @@ #include @depends on haskernel@ type t; identifier f; @@ - (sizeof(((t*)0)->f)) + FIELD_SIZEOF(t, f) @depends on haskernel@ type t; identifier f; @@ - sizeof(((t*)0)->f) + FIELD_SIZEOF(t, f) // Signed-off-by: Julia Lawall Signed-off-by: Paul Mackerras --- arch/ppc/8xx_io/commproc.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/arch/ppc/8xx_io/commproc.c b/arch/ppc/8xx_io/commproc.c index 9d656de0f0f1..752443df5ecf 100644 --- a/arch/ppc/8xx_io/commproc.c +++ b/arch/ppc/8xx_io/commproc.c @@ -43,7 +43,7 @@ ({ \ u32 offset = offsetof(immap_t, member); \ void *addr = ioremap (IMAP_ADDR + offset, \ - sizeof( ((immap_t*)0)->member)); \ + FIELD_SIZEOF(immap_t, member)); \ addr; \ }) -- cgit v1.2.3 From d9d1063d47cbfe45f8b369475a35c3cdd64fb69c Mon Sep 17 00:00:00 2001 From: Johannes Berg Date: Thu, 21 Feb 2008 20:39:01 +1100 Subject: [POWERPC] mpic: Make sparse happy I was running sparse on something else and noticed sparse warnings and especially the bogus code that is fixed by the first hunk of this patch, so I fixed them all while at it. Signed-off-by: Johannes Berg Cc: Benjamin Herrenschmidt Cc: Milton Miller Signed-off-by: Paul Mackerras --- arch/powerpc/sysdev/mpic.c | 32 ++++++++++++++++++-------------- 1 file changed, 18 insertions(+), 14 deletions(-) diff --git a/arch/powerpc/sysdev/mpic.c b/arch/powerpc/sysdev/mpic.c index 6ffdda244bb1..6131fd2b6619 100644 --- a/arch/powerpc/sysdev/mpic.c +++ b/arch/powerpc/sysdev/mpic.c @@ -175,13 +175,16 @@ static inline void _mpic_write(enum mpic_reg_type type, switch(type) { #ifdef CONFIG_PPC_DCR case mpic_access_dcr: - return dcr_write(rb->dhost, reg, value); + dcr_write(rb->dhost, reg, value); + break; #endif case mpic_access_mmio_be: - return out_be32(rb->base + (reg >> 2), value); + out_be32(rb->base + (reg >> 2), value); + break; case mpic_access_mmio_le: default: - return out_le32(rb->base + (reg >> 2), value); + out_le32(rb->base + (reg >> 2), value); + break; } } @@ -1000,7 +1003,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, const char *name) { struct mpic *mpic; - u32 reg; + u32 greg_feature; const char *vers; int i; int intvec_top; @@ -1064,7 +1067,8 @@ struct mpic * __init mpic_alloc(struct device_node *node, /* Look for protected sources */ if (node) { - unsigned int psize, bits, mapsize; + int psize; + unsigned int bits, mapsize; const u32 *psrc = of_get_property(node, "protected-sources", &psize); if (psrc) { @@ -1107,8 +1111,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, * in, try to obtain one */ if (paddr == 0 && !(mpic->flags & MPIC_USES_DCR)) { - const u32 *reg; - reg = of_get_property(node, "reg", NULL); + const u32 *reg = of_get_property(node, "reg", NULL); BUG_ON(reg == NULL); paddr = of_translate_address(node, reg); BUG_ON(paddr == OF_BAD_ADDR); @@ -1137,12 +1140,13 @@ struct mpic * __init mpic_alloc(struct device_node *node, * MPICs, num sources as well. On ISU MPICs, sources are counted * as ISUs are added */ - reg = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); - mpic->num_cpus = ((reg & MPIC_GREG_FEATURE_LAST_CPU_MASK) + greg_feature = mpic_read(mpic->gregs, MPIC_INFO(GREG_FEATURE_0)); + mpic->num_cpus = ((greg_feature & MPIC_GREG_FEATURE_LAST_CPU_MASK) >> MPIC_GREG_FEATURE_LAST_CPU_SHIFT) + 1; if (isu_size == 0) - mpic->num_sources = ((reg & MPIC_GREG_FEATURE_LAST_SRC_MASK) - >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; + mpic->num_sources = + ((greg_feature & MPIC_GREG_FEATURE_LAST_SRC_MASK) + >> MPIC_GREG_FEATURE_LAST_SRC_SHIFT) + 1; /* Map the per-CPU registers */ for (i = 0; i < mpic->num_cpus; i++) { @@ -1161,7 +1165,7 @@ struct mpic * __init mpic_alloc(struct device_node *node, mpic->isu_mask = (1 << mpic->isu_shift) - 1; /* Display version */ - switch (reg & MPIC_GREG_FEATURE_VERSION_MASK) { + switch (greg_feature & MPIC_GREG_FEATURE_VERSION_MASK) { case 1: vers = "1.0"; break; @@ -1321,7 +1325,7 @@ void __init mpic_set_serial_int(struct mpic *mpic, int enable) void mpic_irq_set_priority(unsigned int irq, unsigned int pri) { - int is_ipi; + unsigned int is_ipi; struct mpic *mpic = mpic_find(irq, &is_ipi); unsigned int src = mpic_irq_to_hw(irq); unsigned long flags; @@ -1344,7 +1348,7 @@ void mpic_irq_set_priority(unsigned int irq, unsigned int pri) unsigned int mpic_irq_get_priority(unsigned int irq) { - int is_ipi; + unsigned int is_ipi; struct mpic *mpic = mpic_find(irq, &is_ipi); unsigned int src = mpic_irq_to_hw(irq); unsigned long flags; -- cgit v1.2.3 From 50530378161fa8d7837243119ed9140ee65e55d4 Mon Sep 17 00:00:00 2001 From: Anton Vorontsov Date: Fri, 22 Feb 2008 06:45:08 +1100 Subject: [POWERPC] 8xx: Timebase frequency should not depend on bus-frequency m8xx_setup.c says: /* Force all 8xx processors to use divide by 16 processor clock. */ And at the same time it is using bus-frequency for calculating timebase. It is okay for most setups because bus-frequency is equal to clock-frequency. The problem emerges when cpu frequency is > 66MHz, quoting u-boot/cpu/mpc8xx/speed.c: if (gd->cpu_clk <= 66000000) { sccr_reg |= SCCR_EBDF00; /* bus division factor = 1 */ gd->bus_clk = gd->cpu_clk; } else { sccr_reg |= SCCR_EBDF01; /* bus division factor = 2 */ gd->bus_clk = gd->cpu_clk / 2; } So in case of cpu clock > 66MHz, bus_clk = cpu_clk / 2. An then, from Linux, we calculate timebase frequency as tb_freq = bus_clk / 16, that is cpu_clk / 2 / 16, which is wrong. This fixes the system time drifting problem on the EP885C board running at 133MHz. Signed-off-by: Anton Vorontsov Signed-off-by: Paul Mackerras --- arch/powerpc/platforms/8xx/m8xx_setup.c | 7 +------ 1 file changed, 1 insertion(+), 6 deletions(-) diff --git a/arch/powerpc/platforms/8xx/m8xx_setup.c b/arch/powerpc/platforms/8xx/m8xx_setup.c index 184f998d1be2..0d9f75c74f8c 100644 --- a/arch/powerpc/platforms/8xx/m8xx_setup.c +++ b/arch/powerpc/platforms/8xx/m8xx_setup.c @@ -111,17 +111,12 @@ void __init mpc8xx_calibrate_decr(void) /* Processor frequency is MHz. */ - ppc_tb_freq = 50000000; - if (!get_freq("bus-frequency", &ppc_tb_freq)) { - printk(KERN_ERR "WARNING: Estimating decrementer frequency " - "(not found)\n"); - } - ppc_tb_freq /= 16; ppc_proc_freq = 50000000; if (!get_freq("clock-frequency", &ppc_proc_freq)) printk(KERN_ERR "WARNING: Estimating processor frequency " "(not found)\n"); + ppc_tb_freq = ppc_proc_freq / 16; printk("Decrementer Frequency = 0x%lx\n", ppc_tb_freq); /* Perform some more timer/timebase initialization. This used -- cgit v1.2.3 From 6a2d322e4b81edc2ab35573f1c52f93d1d16eebb Mon Sep 17 00:00:00 2001 From: Benjamin Herrenschmidt Date: Fri, 22 Feb 2008 14:26:43 +1100 Subject: [POWERPC] Fix thinko in cpu_thread_mask_to_cores() The function cpu_thread_mask_to_cores() which returns a cpumask of one and only one thread enabled for a given core has a bug as it's shifting things in the wrong direction. Note: The implementation is still sub-optimal in the sense that for a given core, the thread set in the result may not be any of the threads set in the input, which can lead to more IPIs then strictly necessary, but it isn't incorrect per-se. I'll improve that later. Signed-off-by: Benjamin Herrenschmidt Signed-off-by: Paul Mackerras --- include/asm-powerpc/cputhreads.h | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/include/asm-powerpc/cputhreads.h b/include/asm-powerpc/cputhreads.h index 8485c28b5f47..fb11b0c459b8 100644 --- a/include/asm-powerpc/cputhreads.h +++ b/include/asm-powerpc/cputhreads.h @@ -35,7 +35,7 @@ static inline cpumask_t cpu_thread_mask_to_cores(cpumask_t threads) res = CPU_MASK_NONE; for (i = 0; i < NR_CPUS; i += threads_per_core) { - cpus_shift_right(tmp, threads_core_mask, i); + cpus_shift_left(tmp, threads_core_mask, i); if (cpus_intersects(threads, tmp)) cpu_set(i, res); } -- cgit v1.2.3 From afea3278f73c14271ee60ca7593ad74b7a946486 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Mar 2008 12:11:48 -0600 Subject: pasemi_mac: Move RX/TX section enablement to dma_lib Also stop both rx and tx sections before changing the configuration of the dma device during init. Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- arch/powerpc/platforms/pasemi/dma_lib.c | 30 ++++++++++++++++++++++++++++++ drivers/net/pasemi_mac.c | 6 ------ include/asm-powerpc/pasemi_dma.h | 7 ++++++- 3 files changed, 36 insertions(+), 7 deletions(-) diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index c529d8dff395..48cb7c99962d 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -17,6 +17,7 @@ * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ +#include #include #include #include @@ -410,6 +411,7 @@ int pasemi_dma_init(void) struct resource res; struct device_node *dn; int i, intf, err = 0; + unsigned long timeout; u32 tmp; if (!machine_is(pasemi)) @@ -478,6 +480,34 @@ int pasemi_dma_init(void) for (i = 0; i < MAX_RXCH; i++) __set_bit(i, rxch_free); + timeout = jiffies + HZ; + pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, 0); + while (pasemi_read_dma_reg(PAS_DMA_COM_RXSTA) & 1) { + if (time_after(jiffies, timeout)) { + pr_warning("Warning: Could not disable RX section\n"); + break; + } + } + + timeout = jiffies + HZ; + pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, 0); + while (pasemi_read_dma_reg(PAS_DMA_COM_TXSTA) & 1) { + if (time_after(jiffies, timeout)) { + pr_warning("Warning: Could not disable TX section\n"); + break; + } + } + + /* setup resource allocations for the different DMA sections */ + tmp = pasemi_read_dma_reg(PAS_DMA_COM_CFG); + pasemi_write_dma_reg(PAS_DMA_COM_CFG, tmp | 0x18000000); + + /* enable tx section */ + pasemi_write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); + + /* enable rx section */ + pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); + printk(KERN_INFO "PA Semi PWRficient DMA library initialized " "(%d tx, %d rx channels)\n", num_txch, num_rxch); diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 2e39e0285d8f..6ea822addde5 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -1043,12 +1043,6 @@ static int pasemi_mac_open(struct net_device *dev) unsigned int flags; int ret; - /* enable rx section */ - write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); - - /* enable tx section */ - write_dma_reg(PAS_DMA_COM_TXCMD, PAS_DMA_COM_TXCMD_EN); - flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | PAS_MAC_CFG_TXP_TIFT(8) | PAS_MAC_CFG_TXP_TIFG(12); diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h index b4526ff3a50d..35577b6dec41 100644 --- a/include/asm-powerpc/pasemi_dma.h +++ b/include/asm-powerpc/pasemi_dma.h @@ -1,5 +1,5 @@ /* - * Copyright (C) 2006 PA Semi, Inc + * Copyright (C) 2006-2008 PA Semi, Inc * * Hardware register layout and descriptor formats for the on-board * DMA engine on PA Semi PWRficient. Used by ethernet, function and security @@ -40,6 +40,11 @@ enum { PAS_DMA_COM_TXSTA = 0x104, /* Transmit Status Register */ PAS_DMA_COM_RXCMD = 0x108, /* Receive Command Register */ PAS_DMA_COM_RXSTA = 0x10c, /* Receive Status Register */ + PAS_DMA_COM_CFG = 0x114, /* Common config reg */ + PAS_DMA_TXF_SFLG0 = 0x140, /* Set flags */ + PAS_DMA_TXF_SFLG1 = 0x144, /* Set flags */ + PAS_DMA_TXF_CFLG0 = 0x148, /* Set flags */ + PAS_DMA_TXF_CFLG1 = 0x14c, /* Set flags */ }; -- cgit v1.2.3 From f37203b5ccaf6d58cb5ca6b1840e40f3b587109c Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Mar 2008 12:25:45 -0600 Subject: [POWERPC] pasemi: Add flag management functions to dma_lib Add functions to manage the channel syncronization flags to dma_lib Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- arch/powerpc/platforms/pasemi/dma_lib.c | 74 +++++++++++++++++++++++++++++++++ include/asm-powerpc/pasemi_dma.h | 6 +++ 2 files changed, 80 insertions(+) diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 48cb7c99962d..07e4e25ef625 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -27,6 +27,7 @@ #define MAX_TXCH 64 #define MAX_RXCH 64 +#define MAX_FLAGS 64 static struct pasdma_status *dma_status; @@ -44,6 +45,7 @@ static struct pci_dev *dma_pdev; static DECLARE_BITMAP(txch_free, MAX_TXCH); static DECLARE_BITMAP(rxch_free, MAX_RXCH); +static DECLARE_BITMAP(flags_free, MAX_FLAGS); /* pasemi_read_iob_reg - read IOB register * @reg: Register to read (offset into PCI CFG space) @@ -374,6 +376,71 @@ void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, } EXPORT_SYMBOL(pasemi_dma_free_buf); +/* pasemi_dma_alloc_flag - Allocate a flag (event) for channel syncronization + * + * Allocates a flag for use with channel syncronization (event descriptors). + * Returns allocated flag (0-63), < 0 on error. + */ +int pasemi_dma_alloc_flag(void) +{ + int bit; + +retry: + bit = find_next_bit(flags_free, MAX_FLAGS, 0); + if (bit >= MAX_FLAGS) + return -ENOSPC; + if (!test_and_clear_bit(bit, flags_free)) + goto retry; + + return bit; +} +EXPORT_SYMBOL(pasemi_dma_alloc_flag); + + +/* pasemi_dma_free_flag - Deallocates a flag (event) + * @flag: Flag number to deallocate + * + * Frees up a flag so it can be reused for other purposes. + */ +void pasemi_dma_free_flag(int flag) +{ + BUG_ON(test_bit(flag, flags_free)); + BUG_ON(flag >= MAX_FLAGS); + set_bit(flag, flags_free); +} +EXPORT_SYMBOL(pasemi_dma_free_flag); + + +/* pasemi_dma_set_flag - Sets a flag (event) to 1 + * @flag: Flag number to set active + * + * Sets the flag provided to 1. + */ +void pasemi_dma_set_flag(int flag) +{ + BUG_ON(flag >= MAX_FLAGS); + if (flag < 32) + pasemi_write_dma_reg(PAS_DMA_TXF_SFLG0, 1 << flag); + else + pasemi_write_dma_reg(PAS_DMA_TXF_SFLG1, 1 << flag); +} +EXPORT_SYMBOL(pasemi_dma_set_flag); + +/* pasemi_dma_clear_flag - Sets a flag (event) to 0 + * @flag: Flag number to set inactive + * + * Sets the flag provided to 0. + */ +void pasemi_dma_clear_flag(int flag) +{ + BUG_ON(flag >= MAX_FLAGS); + if (flag < 32) + pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 1 << flag); + else + pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 1 << flag); +} +EXPORT_SYMBOL(pasemi_dma_clear_flag); + static void *map_onedev(struct pci_dev *p, int index) { struct device_node *dn; @@ -508,6 +575,13 @@ int pasemi_dma_init(void) /* enable rx section */ pasemi_write_dma_reg(PAS_DMA_COM_RXCMD, PAS_DMA_COM_RXCMD_EN); + for (i = 0; i < MAX_FLAGS; i++) + __set_bit(i, flags_free); + + /* clear all status flags */ + pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff); + pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff); + printk(KERN_INFO "PA Semi PWRficient DMA library initialized " "(%d tx, %d rx channels)\n", num_txch, num_rxch); diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h index 35577b6dec41..ffd413d7dfb0 100644 --- a/include/asm-powerpc/pasemi_dma.h +++ b/include/asm-powerpc/pasemi_dma.h @@ -466,6 +466,12 @@ extern void *pasemi_dma_alloc_buf(struct pasemi_dmachan *chan, int size, extern void pasemi_dma_free_buf(struct pasemi_dmachan *chan, int size, dma_addr_t *handle); +/* Routines to allocate flags (events) for channel syncronization */ +extern int pasemi_dma_alloc_flag(void); +extern void pasemi_dma_free_flag(int flag); +extern void pasemi_dma_set_flag(int flag); +extern void pasemi_dma_clear_flag(int flag); + /* Initialize the library, must be called before any other functions */ extern int pasemi_dma_init(void); -- cgit v1.2.3 From dda56df08a28404004bca313d2a1ba1597acd755 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Mar 2008 12:25:59 -0600 Subject: [POWERPC] pasemi: Add function engine management functions to dma_lib Used to allocate functions for crypto/checksum offload. Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- arch/powerpc/platforms/pasemi/dma_lib.c | 40 +++++++++++++++++++++++++++++++++ include/asm-powerpc/pasemi_dma.h | 4 ++++ 2 files changed, 44 insertions(+) diff --git a/arch/powerpc/platforms/pasemi/dma_lib.c b/arch/powerpc/platforms/pasemi/dma_lib.c index 07e4e25ef625..217af321b0ca 100644 --- a/arch/powerpc/platforms/pasemi/dma_lib.c +++ b/arch/powerpc/platforms/pasemi/dma_lib.c @@ -28,6 +28,7 @@ #define MAX_TXCH 64 #define MAX_RXCH 64 #define MAX_FLAGS 64 +#define MAX_FUN 8 static struct pasdma_status *dma_status; @@ -46,6 +47,7 @@ static struct pci_dev *dma_pdev; static DECLARE_BITMAP(txch_free, MAX_TXCH); static DECLARE_BITMAP(rxch_free, MAX_RXCH); static DECLARE_BITMAP(flags_free, MAX_FLAGS); +static DECLARE_BITMAP(fun_free, MAX_FUN); /* pasemi_read_iob_reg - read IOB register * @reg: Register to read (offset into PCI CFG space) @@ -441,6 +443,41 @@ void pasemi_dma_clear_flag(int flag) } EXPORT_SYMBOL(pasemi_dma_clear_flag); +/* pasemi_dma_alloc_fun - Allocate a function engine + * + * Allocates a function engine to use for crypto/checksum offload + * Returns allocated engine (0-8), < 0 on error. + */ +int pasemi_dma_alloc_fun(void) +{ + int bit; + +retry: + bit = find_next_bit(fun_free, MAX_FLAGS, 0); + if (bit >= MAX_FLAGS) + return -ENOSPC; + if (!test_and_clear_bit(bit, fun_free)) + goto retry; + + return bit; +} +EXPORT_SYMBOL(pasemi_dma_alloc_fun); + + +/* pasemi_dma_free_fun - Deallocates a function engine + * @flag: Engine number to deallocate + * + * Frees up a function engine so it can be used for other purposes. + */ +void pasemi_dma_free_fun(int fun) +{ + BUG_ON(test_bit(fun, fun_free)); + BUG_ON(fun >= MAX_FLAGS); + set_bit(fun, fun_free); +} +EXPORT_SYMBOL(pasemi_dma_free_fun); + + static void *map_onedev(struct pci_dev *p, int index) { struct device_node *dn; @@ -578,6 +615,9 @@ int pasemi_dma_init(void) for (i = 0; i < MAX_FLAGS; i++) __set_bit(i, flags_free); + for (i = 0; i < MAX_FUN; i++) + __set_bit(i, fun_free); + /* clear all status flags */ pasemi_write_dma_reg(PAS_DMA_TXF_CFLG0, 0xffffffff); pasemi_write_dma_reg(PAS_DMA_TXF_CFLG1, 0xffffffff); diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h index ffd413d7dfb0..facb2bdea45c 100644 --- a/include/asm-powerpc/pasemi_dma.h +++ b/include/asm-powerpc/pasemi_dma.h @@ -472,6 +472,10 @@ extern void pasemi_dma_free_flag(int flag); extern void pasemi_dma_set_flag(int flag); extern void pasemi_dma_clear_flag(int flag); +/* Routines to allocate function engines */ +extern int pasemi_dma_alloc_fun(void); +extern void pasemi_dma_free_fun(int fun); + /* Initialize the library, must be called before any other functions */ extern int pasemi_dma_init(void); -- cgit v1.2.3 From 8d636d8bc5ffcdbf49c72aafcda9ddab7ccb2f41 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 5 Mar 2008 16:34:16 -0600 Subject: pasemi_mac: jumbo frame support First cut at jumbo frame support. To support large MTU, one or several separate channels must be allocated to calculate the TCP/UDP checksum separately, since the mac lacks enough buffers to hold a whole packet while it's being calculated. Furthermore, it seems that a single function channel is not quite enough to feed one of the 10Gig links, so allocate two channels for XAUI interfaces. Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- drivers/net/pasemi_mac.c | 294 ++++++++++++++++++++++++++++++++++----- drivers/net/pasemi_mac.h | 15 +- include/asm-powerpc/pasemi_dma.h | 60 +++++++- 3 files changed, 333 insertions(+), 36 deletions(-) diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 6ea822addde5..54904ad29ea7 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -59,11 +59,12 @@ /* Must be a power of two */ #define RX_RING_SIZE 2048 #define TX_RING_SIZE 4096 +#define CS_RING_SIZE (TX_RING_SIZE*2) #define LRO_MAX_AGGR 64 #define PE_MIN_MTU 64 -#define PE_MAX_MTU 1500 +#define PE_MAX_MTU 9000 #define PE_DEF_MTU ETH_DATA_LEN #define DEFAULT_MSG_ENABLE \ @@ -81,6 +82,7 @@ #define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) #define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) #define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) +#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)]) #define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ & ((ring)->size - 1)) @@ -322,6 +324,103 @@ static int pasemi_mac_unmap_tx_skb(struct pasemi_mac *mac, return (nfrags + 3) & ~1; } +static struct pasemi_mac_csring *pasemi_mac_setup_csring(struct pasemi_mac *mac) +{ + struct pasemi_mac_csring *ring; + u32 val; + unsigned int cfg; + int chno; + + ring = pasemi_dma_alloc_chan(TXCHAN, sizeof(struct pasemi_mac_csring), + offsetof(struct pasemi_mac_csring, chan)); + + if (!ring) { + dev_err(&mac->pdev->dev, "Can't allocate checksum channel\n"); + goto out_chan; + } + + chno = ring->chan.chno; + + ring->size = CS_RING_SIZE; + ring->next_to_fill = 0; + + /* Allocate descriptors */ + if (pasemi_dma_alloc_ring(&ring->chan, CS_RING_SIZE)) + goto out_ring_desc; + + write_dma_reg(PAS_DMA_TXCHAN_BASEL(chno), + PAS_DMA_TXCHAN_BASEL_BRBL(ring->chan.ring_dma)); + val = PAS_DMA_TXCHAN_BASEU_BRBH(ring->chan.ring_dma >> 32); + val |= PAS_DMA_TXCHAN_BASEU_SIZ(CS_RING_SIZE >> 3); + + write_dma_reg(PAS_DMA_TXCHAN_BASEU(chno), val); + + ring->events[0] = pasemi_dma_alloc_flag(); + ring->events[1] = pasemi_dma_alloc_flag(); + if (ring->events[0] < 0 || ring->events[1] < 0) + goto out_flags; + + pasemi_dma_clear_flag(ring->events[0]); + pasemi_dma_clear_flag(ring->events[1]); + + ring->fun = pasemi_dma_alloc_fun(); + if (ring->fun < 0) + goto out_fun; + + cfg = PAS_DMA_TXCHAN_CFG_TY_FUNC | PAS_DMA_TXCHAN_CFG_UP | + PAS_DMA_TXCHAN_CFG_TATTR(ring->fun) | + PAS_DMA_TXCHAN_CFG_LPSQ | PAS_DMA_TXCHAN_CFG_LPDQ; + + if (translation_enabled()) + cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; + + write_dma_reg(PAS_DMA_TXCHAN_CFG(chno), cfg); + + /* enable channel */ + pasemi_dma_start_chan(&ring->chan, PAS_DMA_TXCHAN_TCMDSTA_SZ | + PAS_DMA_TXCHAN_TCMDSTA_DB | + PAS_DMA_TXCHAN_TCMDSTA_DE | + PAS_DMA_TXCHAN_TCMDSTA_DA); + + return ring; + +out_fun: +out_flags: + if (ring->events[0] >= 0) + pasemi_dma_free_flag(ring->events[0]); + if (ring->events[1] >= 0) + pasemi_dma_free_flag(ring->events[1]); + pasemi_dma_free_ring(&ring->chan); +out_ring_desc: + pasemi_dma_free_chan(&ring->chan); +out_chan: + + return NULL; +} + +static void pasemi_mac_setup_csrings(struct pasemi_mac *mac) +{ + int i; + mac->cs[0] = pasemi_mac_setup_csring(mac); + if (mac->type == MAC_TYPE_XAUI) + mac->cs[1] = pasemi_mac_setup_csring(mac); + else + mac->cs[1] = 0; + + for (i = 0; i < MAX_CS; i++) + if (mac->cs[i]) + mac->num_cs++; +} + +static void pasemi_mac_free_csring(struct pasemi_mac_csring *csring) +{ + pasemi_dma_stop_chan(&csring->chan); + pasemi_dma_free_flag(csring->events[0]); + pasemi_dma_free_flag(csring->events[1]); + pasemi_dma_free_ring(&csring->chan); + pasemi_dma_free_chan(&csring->chan); +} + static int pasemi_mac_setup_rx_resources(const struct net_device *dev) { struct pasemi_mac_rxring *ring; @@ -445,7 +544,7 @@ pasemi_mac_setup_tx_resources(const struct net_device *dev) cfg = PAS_DMA_TXCHAN_CFG_TY_IFACE | PAS_DMA_TXCHAN_CFG_TATTR(mac->dma_if) | PAS_DMA_TXCHAN_CFG_UP | - PAS_DMA_TXCHAN_CFG_WT(2); + PAS_DMA_TXCHAN_CFG_WT(4); if (translation_enabled()) cfg |= PAS_DMA_TXCHAN_CFG_TRD | PAS_DMA_TXCHAN_CFG_TRR; @@ -810,13 +909,21 @@ restart: u64 mactx = TX_DESC(txring, i); struct sk_buff *skb; - skb = TX_DESC_INFO(txring, i+1).skb; - nr_frags = TX_DESC_INFO(txring, i).dma; - if ((mactx & XCT_MACTX_E) || (*chan->status & PAS_STATUS_ERROR)) pasemi_mac_tx_error(mac, mactx); + /* Skip over control descriptors */ + if (!(mactx & XCT_MACTX_LLEN_M)) { + TX_DESC(txring, i) = 0; + TX_DESC(txring, i+1) = 0; + buf_count = 2; + continue; + } + + skb = TX_DESC_INFO(txring, i+1).skb; + nr_frags = TX_DESC_INFO(txring, i).dma; + if (unlikely(mactx & XCT_MACTX_O)) /* Not yet transmitted */ break; @@ -1058,6 +1165,12 @@ static int pasemi_mac_open(struct net_device *dev) if (!mac->tx) goto out_tx_ring; + if (dev->mtu > 1500) { + pasemi_mac_setup_csrings(mac); + if (!mac->num_cs) + goto out_tx_ring; + } + /* 0x3ff with 33MHz clock is about 31us */ write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); @@ -1241,7 +1354,7 @@ static int pasemi_mac_close(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int sta; - int rxch, txch; + int rxch, txch, i; rxch = rx_ring(mac)->chan.chno; txch = tx_ring(mac)->chan.chno; @@ -1286,6 +1399,9 @@ static int pasemi_mac_close(struct net_device *dev) free_irq(mac->tx->chan.irq, mac->tx); free_irq(mac->rx->chan.irq, mac->rx); + for (i = 0; i < mac->num_cs; i++) + pasemi_mac_free_csring(mac->cs[i]); + /* Free resources */ pasemi_mac_free_rx_resources(mac); pasemi_mac_free_tx_resources(mac); @@ -1293,35 +1409,113 @@ static int pasemi_mac_close(struct net_device *dev) return 0; } +static void pasemi_mac_queue_csdesc(const struct sk_buff *skb, + const dma_addr_t *map, + const unsigned int *map_size, + struct pasemi_mac_txring *txring, + struct pasemi_mac_csring *csring) +{ + u64 fund; + dma_addr_t cs_dest; + const int nh_off = skb_network_offset(skb); + const int nh_len = skb_network_header_len(skb); + const int nfrags = skb_shinfo(skb)->nr_frags; + int cs_size, i, fill, hdr, cpyhdr, evt; + dma_addr_t csdma; + + fund = XCT_FUN_ST | XCT_FUN_RR_8BRES | + XCT_FUN_O | XCT_FUN_FUN(csring->fun) | + XCT_FUN_CRM_SIG | XCT_FUN_LLEN(skb->len - nh_off) | + XCT_FUN_SHL(nh_len >> 2) | XCT_FUN_SE; + + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + fund |= XCT_FUN_SIG_TCP4; + /* TCP checksum is 16 bytes into the header */ + cs_dest = map[0] + skb_transport_offset(skb) + 16; + break; + case IPPROTO_UDP: + fund |= XCT_FUN_SIG_UDP4; + /* UDP checksum is 6 bytes into the header */ + cs_dest = map[0] + skb_transport_offset(skb) + 6; + break; + default: + BUG(); + } + + /* Do the checksum offloaded */ + fill = csring->next_to_fill; + hdr = fill; + + CS_DESC(csring, fill++) = fund; + /* Room for 8BRES. Checksum result is really 2 bytes into it */ + csdma = csring->chan.ring_dma + (fill & (CS_RING_SIZE-1)) * 8 + 2; + CS_DESC(csring, fill++) = 0; + + CS_DESC(csring, fill) = XCT_PTR_LEN(map_size[0]-nh_off) | XCT_PTR_ADDR(map[0]+nh_off); + for (i = 1; i <= nfrags; i++) + CS_DESC(csring, fill+i) = XCT_PTR_LEN(map_size[i]) | XCT_PTR_ADDR(map[i]); + + fill += i; + if (fill & 1) + fill++; + + /* Copy the result into the TCP packet */ + cpyhdr = fill; + CS_DESC(csring, fill++) = XCT_FUN_O | XCT_FUN_FUN(csring->fun) | + XCT_FUN_LLEN(2) | XCT_FUN_SE; + CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(cs_dest) | XCT_PTR_T; + CS_DESC(csring, fill++) = XCT_PTR_LEN(2) | XCT_PTR_ADDR(csdma); + fill++; + + evt = !csring->last_event; + csring->last_event = evt; + + /* Event handshaking with MAC TX */ + CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_SET | CTRL_CMD_REG(csring->events[evt]); + CS_DESC(csring, fill++) = 0; + CS_DESC(csring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_WCLR | CTRL_CMD_REG(csring->events[!evt]); + CS_DESC(csring, fill++) = 0; + csring->next_to_fill = fill & (CS_RING_SIZE-1); + + cs_size = fill - hdr; + write_dma_reg(PAS_DMA_TXCHAN_INCR(csring->chan.chno), (cs_size) >> 1); + + /* TX-side event handshaking */ + fill = txring->next_to_fill; + TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_WSET | CTRL_CMD_REG(csring->events[evt]); + TX_DESC(txring, fill++) = 0; + TX_DESC(txring, fill++) = CTRL_CMD_T | CTRL_CMD_META_EVT | CTRL_CMD_O | + CTRL_CMD_ETYPE_CLR | CTRL_CMD_REG(csring->events[!evt]); + TX_DESC(txring, fill++) = 0; + txring->next_to_fill = fill; + + write_dma_reg(PAS_DMA_TXCHAN_INCR(txring->chan.chno), 2); + + return; +} + static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) { - struct pasemi_mac *mac = netdev_priv(dev); - struct pasemi_mac_txring *txring; - u64 dflags, mactx; + struct pasemi_mac * const mac = netdev_priv(dev); + struct pasemi_mac_txring * const txring = tx_ring(mac); + struct pasemi_mac_csring *csring; + u64 dflags = 0; + u64 mactx; dma_addr_t map[MAX_SKB_FRAGS+1]; unsigned int map_size[MAX_SKB_FRAGS+1]; unsigned long flags; int i, nfrags; int fill; + const int nh_off = skb_network_offset(skb); + const int nh_len = skb_network_header_len(skb); - dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; - - if (skb->ip_summed == CHECKSUM_PARTIAL) { - const unsigned char *nh = skb_network_header(skb); + prefetch(&txring->ring_info); - switch (ip_hdr(skb)->protocol) { - case IPPROTO_TCP: - dflags |= XCT_MACTX_CSUM_TCP; - dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); - dflags |= XCT_MACTX_IPO(nh - skb->data); - break; - case IPPROTO_UDP: - dflags |= XCT_MACTX_CSUM_UDP; - dflags |= XCT_MACTX_IPH(skb_network_header_len(skb) >> 2); - dflags |= XCT_MACTX_IPO(nh - skb->data); - break; - } - } + dflags = XCT_MACTX_O | XCT_MACTX_ST | XCT_MACTX_CRC_PAD; nfrags = skb_shinfo(skb)->nr_frags; @@ -1344,24 +1538,46 @@ static int pasemi_mac_start_tx(struct sk_buff *skb, struct net_device *dev) } } - mactx = dflags | XCT_MACTX_LLEN(skb->len); + if (skb->ip_summed == CHECKSUM_PARTIAL && skb->len <= 1540) { + switch (ip_hdr(skb)->protocol) { + case IPPROTO_TCP: + dflags |= XCT_MACTX_CSUM_TCP; + dflags |= XCT_MACTX_IPH(nh_len >> 2); + dflags |= XCT_MACTX_IPO(nh_off); + break; + case IPPROTO_UDP: + dflags |= XCT_MACTX_CSUM_UDP; + dflags |= XCT_MACTX_IPH(nh_len >> 2); + dflags |= XCT_MACTX_IPO(nh_off); + break; + default: + WARN_ON(1); + } + } - txring = tx_ring(mac); + mactx = dflags | XCT_MACTX_LLEN(skb->len); spin_lock_irqsave(&txring->lock, flags); - fill = txring->next_to_fill; - /* Avoid stepping on the same cache line that the DMA controller * is currently about to send, so leave at least 8 words available. * Total free space needed is mactx + fragments + 8 */ - if (RING_AVAIL(txring) < nfrags + 10) { + if (RING_AVAIL(txring) < nfrags + 14) { /* no room -- stop the queue and wait for tx intr */ netif_stop_queue(dev); goto out_err; } + /* Queue up checksum + event descriptors, if needed */ + if (mac->num_cs && skb->ip_summed == CHECKSUM_PARTIAL && skb->len > 1540) { + csring = mac->cs[mac->last_cs]; + mac->last_cs = (mac->last_cs + 1) % mac->num_cs; + + pasemi_mac_queue_csdesc(skb, map, map_size, txring, csring); + } + + fill = txring->next_to_fill; TX_DESC(txring, fill) = mactx; TX_DESC_INFO(txring, fill).dma = nfrags; fill++; @@ -1439,8 +1655,9 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int reg; - unsigned int rcmdsta; + unsigned int rcmdsta = 0; int running; + int ret = 0; if (new_mtu < PE_MIN_MTU || new_mtu > PE_MAX_MTU) return -EINVAL; @@ -1462,6 +1679,16 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) pasemi_mac_pause_rxint(mac); pasemi_mac_clean_rx(rx_ring(mac), RX_RING_SIZE); pasemi_mac_free_rx_buffers(mac); + + } + + /* Setup checksum channels if large MTU and none already allocated */ + if (new_mtu > 1500 && !mac->num_cs) { + pasemi_mac_setup_csrings(mac); + if (!mac->num_cs) { + ret = -ENOMEM; + goto out; + } } /* Change maxf, i.e. what size frames are accepted. @@ -1476,6 +1703,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) /* MTU + ETH_HLEN + VLAN_HLEN + 2 64B cachelines */ mac->bufsz = new_mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; +out: if (running) { write_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if), rcmdsta | PAS_DMA_RXINT_RCMDSTA_EN); @@ -1488,7 +1716,7 @@ static int pasemi_mac_change_mtu(struct net_device *dev, int new_mtu) pasemi_mac_intf_enable(mac); } - return 0; + return ret; } static int __devinit diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index 99e7b9329a6f..90c51670a1e7 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h @@ -27,6 +27,7 @@ #include #define MAX_LRO_DESCRIPTORS 8 +#define MAX_CS 2 struct pasemi_mac_txring { struct pasemi_dmachan chan; /* Must be first */ @@ -51,6 +52,15 @@ struct pasemi_mac_rxring { struct pasemi_mac *mac; /* Needed in intr handler */ }; +struct pasemi_mac_csring { + struct pasemi_dmachan chan; + unsigned int size; + unsigned int next_to_fill; + int events[2]; + int last_event; + int fun; +}; + struct pasemi_mac { struct net_device *netdev; struct pci_dev *pdev; @@ -60,10 +70,12 @@ struct pasemi_mac { struct napi_struct napi; int bufsz; /* RX ring buffer size */ + int last_cs; + int num_cs; + u32 dma_if; u8 type; #define MAC_TYPE_GMAC 1 #define MAC_TYPE_XAUI 2 - u32 dma_if; u8 mac_addr[6]; @@ -74,6 +86,7 @@ struct pasemi_mac { struct pasemi_mac_txring *tx; struct pasemi_mac_rxring *rx; + struct pasemi_mac_csring *cs[MAX_CS]; char tx_irq_name[10]; /* "eth%d tx" */ char rx_irq_name[10]; /* "eth%d rx" */ int link; diff --git a/include/asm-powerpc/pasemi_dma.h b/include/asm-powerpc/pasemi_dma.h index facb2bdea45c..19fd7933e2d9 100644 --- a/include/asm-powerpc/pasemi_dma.h +++ b/include/asm-powerpc/pasemi_dma.h @@ -128,11 +128,16 @@ enum { #define PAS_DMA_TXCHAN_TCMDSTA_DA 0x00000100 #define PAS_DMA_TXCHAN_CFG(c) (0x304+(c)*_PAS_DMA_TXCHAN_STRIDE) #define PAS_DMA_TXCHAN_CFG_TY_IFACE 0x00000000 /* Type = interface */ +#define PAS_DMA_TXCHAN_CFG_TY_COPY 0x00000001 /* Type = copy only */ +#define PAS_DMA_TXCHAN_CFG_TY_FUNC 0x00000002 /* Type = function */ +#define PAS_DMA_TXCHAN_CFG_TY_XOR 0x00000003 /* Type = xor only */ #define PAS_DMA_TXCHAN_CFG_TATTR_M 0x0000003c #define PAS_DMA_TXCHAN_CFG_TATTR_S 2 #define PAS_DMA_TXCHAN_CFG_TATTR(x) (((x) << PAS_DMA_TXCHAN_CFG_TATTR_S) & \ PAS_DMA_TXCHAN_CFG_TATTR_M) -#define PAS_DMA_TXCHAN_CFG_WT_M 0x000001c0 +#define PAS_DMA_TXCHAN_CFG_LPDQ 0x00000800 +#define PAS_DMA_TXCHAN_CFG_LPSQ 0x00000400 +#define PAS_DMA_TXCHAN_CFG_WT_M 0x000003c0 #define PAS_DMA_TXCHAN_CFG_WT_S 6 #define PAS_DMA_TXCHAN_CFG_WT(x) (((x) << PAS_DMA_TXCHAN_CFG_WT_S) & \ PAS_DMA_TXCHAN_CFG_WT_M) @@ -399,11 +404,62 @@ enum { XCT_COPY_LLEN_M) #define XCT_COPY_SE 0x0000000000000001ull +/* Function descriptor fields */ +#define XCT_FUN_T 0x8000000000000000ull +#define XCT_FUN_ST 0x4000000000000000ull +#define XCT_FUN_RR_M 0x3000000000000000ull +#define XCT_FUN_RR_NORES 0x0000000000000000ull +#define XCT_FUN_RR_8BRES 0x1000000000000000ull +#define XCT_FUN_RR_24BRES 0x2000000000000000ull +#define XCT_FUN_RR_40BRES 0x3000000000000000ull +#define XCT_FUN_I 0x0800000000000000ull +#define XCT_FUN_O 0x0400000000000000ull +#define XCT_FUN_E 0x0200000000000000ull +#define XCT_FUN_FUN_M 0x01c0000000000000ull +#define XCT_FUN_FUN_S 54 +#define XCT_FUN_FUN(x) ((((long)(x)) << XCT_FUN_FUN_S) & XCT_FUN_FUN_M) +#define XCT_FUN_CRM_M 0x0038000000000000ull +#define XCT_FUN_CRM_NOP 0x0000000000000000ull +#define XCT_FUN_CRM_SIG 0x0008000000000000ull +#define XCT_FUN_LLEN_M 0x0007ffff00000000ull +#define XCT_FUN_LLEN_S 32 +#define XCT_FUN_LLEN(x) ((((long)(x)) << XCT_FUN_LLEN_S) & XCT_FUN_LLEN_M) +#define XCT_FUN_SHL_M 0x00000000f8000000ull +#define XCT_FUN_SHL_S 27 +#define XCT_FUN_SHL(x) ((((long)(x)) << XCT_FUN_SHL_S) & XCT_FUN_SHL_M) +#define XCT_FUN_CHL_M 0x0000000007c00000ull +#define XCT_FUN_HSZ_M 0x00000000003c0000ull +#define XCT_FUN_ALG_M 0x0000000000038000ull +#define XCT_FUN_HP 0x0000000000004000ull +#define XCT_FUN_BCM_M 0x0000000000003800ull +#define XCT_FUN_BCP_M 0x0000000000000600ull +#define XCT_FUN_SIG_M 0x00000000000001f0ull +#define XCT_FUN_SIG_TCP4 0x0000000000000140ull +#define XCT_FUN_SIG_TCP6 0x0000000000000150ull +#define XCT_FUN_SIG_UDP4 0x0000000000000160ull +#define XCT_FUN_SIG_UDP6 0x0000000000000170ull +#define XCT_FUN_A 0x0000000000000008ull +#define XCT_FUN_C 0x0000000000000004ull +#define XCT_FUN_AL2 0x0000000000000002ull +#define XCT_FUN_SE 0x0000000000000001ull + +/* Function descriptor 8byte result fields */ +#define XCT_FUNRES_8B_CS_M 0x0000ffff00000000ull +#define XCT_FUNRES_8B_CS_S 32 +#define XCT_FUNRES_8B_CRC_M 0x00000000ffffffffull +#define XCT_FUNRES_8B_CRC_S 0 + /* Control descriptor fields */ #define CTRL_CMD_T 0x8000000000000000ull #define CTRL_CMD_META_EVT 0x2000000000000000ull #define CTRL_CMD_O 0x0400000000000000ull -#define CTRL_CMD_REG_M 0x000000000000000full +#define CTRL_CMD_ETYPE_M 0x0038000000000000ull +#define CTRL_CMD_ETYPE_EXT 0x0000000000000000ull +#define CTRL_CMD_ETYPE_WSET 0x0020000000000000ull +#define CTRL_CMD_ETYPE_WCLR 0x0028000000000000ull +#define CTRL_CMD_ETYPE_SET 0x0030000000000000ull +#define CTRL_CMD_ETYPE_CLR 0x0038000000000000ull +#define CTRL_CMD_REG_M 0x000000000000007full #define CTRL_CMD_REG_S 0 #define CTRL_CMD_REG(x) ((((long)(x)) << CTRL_CMD_REG_S) & \ CTRL_CMD_REG_M) -- cgit v1.2.3 From 251567848f2f446f4356f969329a8188faf1fe90 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 20 Feb 2008 20:57:58 -0600 Subject: pasemi_mac: Enable GSO by default Ethtool support will handle the runtime toggling, but we do quite a bit better with it on by default so just leave it on for now. Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- drivers/net/pasemi_mac.c | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 54904ad29ea7..5ee7e4a73ae0 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -1750,7 +1750,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) netif_napi_add(dev, &mac->napi, pasemi_mac_poll, 64); dev->features = NETIF_F_IP_CSUM | NETIF_F_LLTX | NETIF_F_SG | - NETIF_F_HIGHDMA; + NETIF_F_HIGHDMA | NETIF_F_GSO; mac->lro_mgr.max_aggr = LRO_MAX_AGGR; mac->lro_mgr.max_desc = MAX_LRO_DESCRIPTORS; -- cgit v1.2.3 From e37c772e36a7943b2e0bd8f48312e78474c0df15 Mon Sep 17 00:00:00 2001 From: Olof Johansson Date: Wed, 20 Feb 2008 20:57:59 -0600 Subject: pasemi_mac: basic ethtool support First cut at ethtool support, to be completed over time. Signed-off-by: Olof Johansson Acked-by: Jeff Garzik --- drivers/net/Makefile | 3 +- drivers/net/pasemi_mac.c | 26 ++----- drivers/net/pasemi_mac.h | 20 +++++ drivers/net/pasemi_mac_ethtool.c | 159 +++++++++++++++++++++++++++++++++++++++ 4 files changed, 189 insertions(+), 19 deletions(-) create mode 100644 drivers/net/pasemi_mac_ethtool.c diff --git a/drivers/net/Makefile b/drivers/net/Makefile index 3b1ea321dc05..4b442739e7bf 100644 --- a/drivers/net/Makefile +++ b/drivers/net/Makefile @@ -218,7 +218,8 @@ obj-$(CONFIG_SMC911X) += smc911x.o obj-$(CONFIG_BFIN_MAC) += bfin_mac.o obj-$(CONFIG_DM9000) += dm9000.o obj-$(CONFIG_FEC_8XX) += fec_8xx/ -obj-$(CONFIG_PASEMI_MAC) += pasemi_mac.o +obj-$(CONFIG_PASEMI_MAC) += pasemi_mac_driver.o +pasemi_mac_driver-objs := pasemi_mac.o pasemi_mac_ethtool.o obj-$(CONFIG_MLX4_CORE) += mlx4/ obj-$(CONFIG_ENC28J60) += enc28j60.o diff --git a/drivers/net/pasemi_mac.c b/drivers/net/pasemi_mac.c index 5ee7e4a73ae0..c50f0f4de6d8 100644 --- a/drivers/net/pasemi_mac.c +++ b/drivers/net/pasemi_mac.c @@ -55,12 +55,6 @@ * - Multiqueue RX/TX */ - -/* Must be a power of two */ -#define RX_RING_SIZE 2048 -#define TX_RING_SIZE 4096 -#define CS_RING_SIZE (TX_RING_SIZE*2) - #define LRO_MAX_AGGR 64 #define PE_MIN_MTU 64 @@ -77,17 +71,6 @@ NETIF_MSG_RX_ERR | \ NETIF_MSG_TX_ERR) -#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) -#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) -#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) -#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) -#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) -#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)]) - -#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ - & ((ring)->size - 1)) -#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) - MODULE_LICENSE("GPL"); MODULE_AUTHOR ("Olof Johansson "); MODULE_DESCRIPTION("PA Semi PWRficient Ethernet driver"); @@ -96,6 +79,8 @@ static int debug = -1; /* -1 == use DEFAULT_MSG_ENABLE as value */ module_param(debug, int, 0); MODULE_PARM_DESC(debug, "PA Semi MAC bitmapped debugging message enable value"); +extern const struct ethtool_ops pasemi_mac_ethtool_ops; + static int translation_enabled(void) { #if defined(CONFIG_PPC_PASEMI_IOMMU_DMA_FORCE) @@ -1148,7 +1133,7 @@ static int pasemi_mac_open(struct net_device *dev) { struct pasemi_mac *mac = netdev_priv(dev); unsigned int flags; - int ret; + int i, ret; flags = PAS_MAC_CFG_TXP_FCE | PAS_MAC_CFG_TXP_FPC(3) | PAS_MAC_CFG_TXP_SL(3) | PAS_MAC_CFG_TXP_COB(0xf) | @@ -1171,6 +1156,10 @@ static int pasemi_mac_open(struct net_device *dev) goto out_tx_ring; } + /* Zero out rmon counters */ + for (i = 0; i < 32; i++) + write_mac_reg(mac, PAS_MAC_RMON(i), 0); + /* 0x3ff with 33MHz clock is about 31us */ write_iob_reg(PAS_IOB_DMA_COM_TIMEOUTCFG, PAS_IOB_DMA_COM_TIMEOUTCFG_TCNT(0x3ff)); @@ -1812,6 +1801,7 @@ pasemi_mac_probe(struct pci_dev *pdev, const struct pci_device_id *ent) mac->bufsz = dev->mtu + ETH_HLEN + ETH_FCS_LEN + LOCAL_SKB_ALIGN + 128; dev->change_mtu = pasemi_mac_change_mtu; + dev->ethtool_ops = &pasemi_mac_ethtool_ops; if (err) goto out; diff --git a/drivers/net/pasemi_mac.h b/drivers/net/pasemi_mac.h index 90c51670a1e7..1a115ec60b53 100644 --- a/drivers/net/pasemi_mac.h +++ b/drivers/net/pasemi_mac.h @@ -26,6 +26,12 @@ #include #include +/* Must be a power of two */ +#define RX_RING_SIZE 2048 +#define TX_RING_SIZE 4096 +#define CS_RING_SIZE (TX_RING_SIZE*2) + + #define MAX_LRO_DESCRIPTORS 8 #define MAX_CS 2 @@ -103,6 +109,16 @@ struct pasemi_mac_buffer { dma_addr_t dma; }; +#define TX_DESC(tx, num) ((tx)->chan.ring_virt[(num) & (TX_RING_SIZE-1)]) +#define TX_DESC_INFO(tx, num) ((tx)->ring_info[(num) & (TX_RING_SIZE-1)]) +#define RX_DESC(rx, num) ((rx)->chan.ring_virt[(num) & (RX_RING_SIZE-1)]) +#define RX_DESC_INFO(rx, num) ((rx)->ring_info[(num) & (RX_RING_SIZE-1)]) +#define RX_BUFF(rx, num) ((rx)->buffers[(num) & (RX_RING_SIZE-1)]) +#define CS_DESC(cs, num) ((cs)->chan.ring_virt[(num) & (CS_RING_SIZE-1)]) + +#define RING_USED(ring) (((ring)->next_to_fill - (ring)->next_to_clean) \ + & ((ring)->size - 1)) +#define RING_AVAIL(ring) ((ring->size) - RING_USED(ring)) /* PCI register offsets and formats */ @@ -114,6 +130,7 @@ enum { PAS_MAC_CFG_ADR0 = 0x8c, PAS_MAC_CFG_ADR1 = 0x90, PAS_MAC_CFG_TXP = 0x98, + PAS_MAC_CFG_RMON = 0x100, PAS_MAC_IPC_CHNL = 0x208, }; @@ -185,6 +202,8 @@ enum { #define PAS_MAC_CFG_TXP_TIFG(x) (((x) << PAS_MAC_CFG_TXP_TIFG_S) & \ PAS_MAC_CFG_TXP_TIFG_M) +#define PAS_MAC_RMON(r) (0x100+(r)*4) + #define PAS_MAC_IPC_CHNL_DCHNO_M 0x003f0000 #define PAS_MAC_IPC_CHNL_DCHNO_S 16 #define PAS_MAC_IPC_CHNL_DCHNO(x) (((x) << PAS_MAC_IPC_CHNL_DCHNO_S) & \ @@ -194,4 +213,5 @@ enum { #define PAS_MAC_IPC_CHNL_BCH(x) (((x) << PAS_MAC_IPC_CHNL_BCH_S) & \ PAS_MAC_IPC_CHNL_BCH_M) + #endif /* PASEMI_MAC_H */ diff --git a/drivers/net/pasemi_mac_ethtool.c b/drivers/net/pasemi_mac_ethtool.c new file mode 100644 index 000000000000..5e8df3afea64 --- /dev/null +++ b/drivers/net/pasemi_mac_ethtool.c @@ -0,0 +1,159 @@ +/* + * Copyright (C) 2006-2008 PA Semi, Inc + * + * Ethtool hooks for the PA Semi PWRficient onchip 1G/10G Ethernet MACs + * + * This program is free software; you can redistribute it and/or modify + * it under the terms of the GNU General Public License version 2 as + * published by the Free Software Foundation. + * + * This program is distributed in the hope that it will be useful, + * but WITHOUT ANY WARRANTY; without even the implied warranty of + * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + * GNU General Public License for more details. + * + * You should have received a copy of the GNU General Public License + * along with this program; if not, write to the Free Software + * Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA + */ + + +#include +#include +#include +#include + +#include +#include "pasemi_mac.h" + +static struct { + const char str[ETH_GSTRING_LEN]; +} ethtool_stats_keys[] = { + { "rx-drops" }, + { "rx-bytes" }, + { "rx-packets" }, + { "rx-broadcast-packets" }, + { "rx-multicast-packets" }, + { "rx-crc-errors" }, + { "rx-undersize-errors" }, + { "rx-oversize-errors" }, + { "rx-short-fragment-errors" }, + { "rx-jabber-errors" }, + { "rx-64-byte-packets" }, + { "rx-65-127-byte-packets" }, + { "rx-128-255-byte-packets" }, + { "rx-256-511-byte-packets" }, + { "rx-512-1023-byte-packets" }, + { "rx-1024-1518-byte-packets" }, + { "rx-pause-frames" }, + { "tx-bytes" }, + { "tx-packets" }, + { "tx-broadcast-packets" }, + { "tx-multicast-packets" }, + { "tx-collisions" }, + { "tx-late-collisions" }, + { "tx-excessive-collisions" }, + { "tx-crc-errors" }, + { "tx-undersize-errors" }, + { "tx-oversize-errors" }, + { "tx-64-byte-packets" }, + { "tx-65-127-byte-packets" }, + { "tx-128-255-byte-packets" }, + { "tx-256-511-byte-packets" }, + { "tx-512-1023-byte-packets" }, + { "tx-1024-1518-byte-packets" }, +}; + +static int +pasemi_mac_ethtool_get_settings(struct net_device *netdev, + struct ethtool_cmd *cmd) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + struct phy_device *phydev = mac->phydev; + + return phy_ethtool_gset(phydev, cmd); +} + +static void +pasemi_mac_ethtool_get_drvinfo(struct net_device *netdev, + struct ethtool_drvinfo *drvinfo) +{ + struct pasemi_mac *mac; + mac = netdev_priv(netdev); + + /* clear and fill out info */ + memset(drvinfo, 0, sizeof(struct ethtool_drvinfo)); + strncpy(drvinfo->driver, "pasemi_mac", 12); + strcpy(drvinfo->version, "N/A"); + strcpy(drvinfo->fw_version, "N/A"); + strncpy(drvinfo->bus_info, pci_name(mac->pdev), 32); +} + +static u32 +pasemi_mac_ethtool_get_msglevel(struct net_device *netdev) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + return mac->msg_enable; +} + +static void +pasemi_mac_ethtool_set_msglevel(struct net_device *netdev, + u32 level) +{ + struct pasemi_mac *mac = netdev_priv(netdev); + mac->msg_enable = level; +} + + +static void +pasemi_mac_ethtool_get_ringparam(struct net_device *netdev, + struct ethtool_ringparam *ering) +{ + struct pasemi_mac *mac = netdev->priv; + + ering->tx_max_pending = TX_RING_SIZE/2; + ering->tx_pending = RING_USED(mac->tx)/2; + ering->rx_max_pending = RX_RING_SIZE/4; + ering->rx_pending = RING_USED(mac->rx)/4; +} + +static int pasemi_mac_get_sset_count(struct net_device *netdev, int sset) +{ + switch (sset) { + case ETH_SS_STATS: + return ARRAY_SIZE(ethtool_stats_keys); + default: + return -EOPNOTSUPP; + } +} + +static void pasemi_mac_get_ethtool_stats(struct net_device *netdev, + struct ethtool_stats *stats, u64 *data) +{ + struct pasemi_mac *mac = netdev->priv; + int i; + + data[0] = pasemi_read_dma_reg(PAS_DMA_RXINT_RCMDSTA(mac->dma_if)) + >> PAS_DMA_RXINT_RCMDSTA_DROPS_S; + for (i = 0; i < 32; i++) + data[1+i] = pasemi_read_mac_reg(mac->dma_if, PAS_MAC_RMON(i)); +} + +static void pasemi_mac_get_strings(struct net_device *netdev, u32 stringset, + u8 *data) +{ + memcpy(data, ethtool_stats_keys, sizeof(ethtool_stats_keys)); +} + +const struct ethtool_ops pasemi_mac_ethtool_ops = { + .get_settings = pasemi_mac_ethtool_get_settings, + .get_drvinfo = pasemi_mac_ethtool_get_drvinfo, + .get_msglevel = pasemi_mac_ethtool_get_msglevel, + .set_msglevel = pasemi_mac_ethtool_set_msglevel, + .get_link = ethtool_op_get_link, + .get_ringparam = pasemi_mac_ethtool_get_ringparam, + .get_strings = pasemi_mac_get_strings, + .get_sset_count = pasemi_mac_get_sset_count, + .get_ethtool_stats = pasemi_mac_get_ethtool_stats, +}; + -- cgit v1.2.3