summaryrefslogtreecommitdiff
path: root/arch/ia64
diff options
context:
space:
mode:
Diffstat (limited to 'arch/ia64')
-rw-r--r--arch/ia64/Kconfig12
-rw-r--r--arch/ia64/include/asm/kmap_types.h13
-rw-r--r--arch/ia64/include/asm/meminit.h2
-rw-r--r--arch/ia64/include/asm/mmu_context.h17
-rw-r--r--arch/ia64/include/asm/sparsemem.h6
-rw-r--r--arch/ia64/include/uapi/asm/signal.h24
-rw-r--r--arch/ia64/kernel/process.c4
-rw-r--r--arch/ia64/kernel/ptrace.c51
-rw-r--r--arch/ia64/kernel/time.c56
-rw-r--r--arch/ia64/mm/contig.c58
-rw-r--r--arch/ia64/mm/discontig.c44
-rw-r--r--arch/ia64/mm/init.c14
-rw-r--r--arch/ia64/mm/numa.c30
13 files changed, 121 insertions, 210 deletions
diff --git a/arch/ia64/Kconfig b/arch/ia64/Kconfig
index 39b25a5a591b..eed59ec32657 100644
--- a/arch/ia64/Kconfig
+++ b/arch/ia64/Kconfig
@@ -46,6 +46,7 @@ config IA64
select ARCH_THREAD_STACK_ALLOCATOR
select ARCH_CLOCKSOURCE_DATA
select GENERIC_TIME_VSYSCALL
+ select LEGACY_TIMER_TICK
select SWIOTLB
select SYSCTL_ARCH_UNALIGN_NO_WARN
select HAVE_MOD_ARCH_SPECIFIC
@@ -288,6 +289,7 @@ config ARCH_SELECT_MEMORY_MODEL
config ARCH_DISCONTIGMEM_ENABLE
def_bool y
+ depends on BROKEN
help
Say Y to support efficient handling of discontiguous physical memory,
for architectures which are either NUMA (Non-Uniform Memory Access)
@@ -299,12 +301,11 @@ config ARCH_FLATMEM_ENABLE
config ARCH_SPARSEMEM_ENABLE
def_bool y
- depends on ARCH_DISCONTIGMEM_ENABLE
select SPARSEMEM_VMEMMAP_ENABLE
-config ARCH_DISCONTIGMEM_DEFAULT
+config ARCH_SPARSEMEM_DEFAULT
def_bool y
- depends on ARCH_DISCONTIGMEM_ENABLE
+ depends on ARCH_SPARSEMEM_ENABLE
config NUMA
bool "NUMA support"
@@ -329,7 +330,7 @@ config NODES_SHIFT
# VIRTUAL_MEM_MAP has been retained for historical reasons.
config VIRTUAL_MEM_MAP
bool "Virtual mem map"
- depends on !SPARSEMEM
+ depends on !SPARSEMEM && !FLATMEM
default y
help
Say Y to compile the kernel with support for a virtual mem map.
@@ -342,9 +343,6 @@ config HOLES_IN_ZONE
bool
default y if VIRTUAL_MEM_MAP
-config HAVE_ARCH_EARLY_PFN_TO_NID
- def_bool NUMA && SPARSEMEM
-
config HAVE_ARCH_NODEDATA_EXTENSION
def_bool y
depends on NUMA
diff --git a/arch/ia64/include/asm/kmap_types.h b/arch/ia64/include/asm/kmap_types.h
deleted file mode 100644
index 5c268cf7c2bd..000000000000
--- a/arch/ia64/include/asm/kmap_types.h
+++ /dev/null
@@ -1,13 +0,0 @@
-/* SPDX-License-Identifier: GPL-2.0 */
-#ifndef _ASM_IA64_KMAP_TYPES_H
-#define _ASM_IA64_KMAP_TYPES_H
-
-#ifdef CONFIG_DEBUG_HIGHMEM
-#define __WITH_KM_FENCE
-#endif
-
-#include <asm-generic/kmap_types.h>
-
-#undef __WITH_KM_FENCE
-
-#endif /* _ASM_IA64_KMAP_TYPES_H */
diff --git a/arch/ia64/include/asm/meminit.h b/arch/ia64/include/asm/meminit.h
index 092f1c91b36c..e789c0818edb 100644
--- a/arch/ia64/include/asm/meminit.h
+++ b/arch/ia64/include/asm/meminit.h
@@ -59,10 +59,8 @@ extern int reserve_elfcorehdr(u64 *start, u64 *end);
extern int register_active_ranges(u64 start, u64 len, int nid);
#ifdef CONFIG_VIRTUAL_MEM_MAP
-# define LARGE_GAP 0x40000000 /* Use virtual mem map if hole is > than this */
extern unsigned long VMALLOC_END;
extern struct page *vmem_map;
- extern int find_largest_hole(u64 start, u64 end, void *arg);
extern int create_mem_map_page_table(u64 start, u64 end, void *arg);
extern int vmemmap_find_next_valid_pfn(int, int);
#else
diff --git a/arch/ia64/include/asm/mmu_context.h b/arch/ia64/include/asm/mmu_context.h
index 2da0e2eb036b..87a0d5bc11ef 100644
--- a/arch/ia64/include/asm/mmu_context.h
+++ b/arch/ia64/include/asm/mmu_context.h
@@ -49,11 +49,6 @@ DECLARE_PER_CPU(u8, ia64_need_tlb_flush);
extern void mmu_context_init (void);
extern void wrap_mmu_context (struct mm_struct *mm);
-static inline void
-enter_lazy_tlb (struct mm_struct *mm, struct task_struct *tsk)
-{
-}
-
/*
* When the context counter wraps around all TLBs need to be flushed because
* an old context number might have been reused. This is signalled by the
@@ -116,6 +111,7 @@ out:
* Initialize context number to some sane value. MM is guaranteed to be a
* brand-new address-space, so no TLB flushing is needed, ever.
*/
+#define init_new_context init_new_context
static inline int
init_new_context (struct task_struct *p, struct mm_struct *mm)
{
@@ -124,12 +120,6 @@ init_new_context (struct task_struct *p, struct mm_struct *mm)
}
static inline void
-destroy_context (struct mm_struct *mm)
-{
- /* Nothing to do. */
-}
-
-static inline void
reload_context (nv_mm_context_t context)
{
unsigned long rid;
@@ -178,11 +168,10 @@ activate_context (struct mm_struct *mm)
} while (unlikely(context != mm->context));
}
-#define deactivate_mm(tsk,mm) do { } while (0)
-
/*
* Switch from address space PREV to address space NEXT.
*/
+#define activate_mm activate_mm
static inline void
activate_mm (struct mm_struct *prev, struct mm_struct *next)
{
@@ -196,5 +185,7 @@ activate_mm (struct mm_struct *prev, struct mm_struct *next)
#define switch_mm(prev_mm,next_mm,next_task) activate_mm(prev_mm, next_mm)
+#include <asm-generic/mmu_context.h>
+
# endif /* ! __ASSEMBLY__ */
#endif /* _ASM_IA64_MMU_CONTEXT_H */
diff --git a/arch/ia64/include/asm/sparsemem.h b/arch/ia64/include/asm/sparsemem.h
index 336d0570e1fa..dd8c166ffd7b 100644
--- a/arch/ia64/include/asm/sparsemem.h
+++ b/arch/ia64/include/asm/sparsemem.h
@@ -18,4 +18,10 @@
#endif
#endif /* CONFIG_SPARSEMEM */
+
+#ifdef CONFIG_MEMORY_HOTPLUG
+int memory_add_physaddr_to_nid(u64 addr);
+#define memory_add_physaddr_to_nid memory_add_physaddr_to_nid
+#endif
+
#endif /* _ASM_IA64_SPARSEMEM_H */
diff --git a/arch/ia64/include/uapi/asm/signal.h b/arch/ia64/include/uapi/asm/signal.h
index aa98ff1b9e22..38166a88e4c9 100644
--- a/arch/ia64/include/uapi/asm/signal.h
+++ b/arch/ia64/include/uapi/asm/signal.h
@@ -53,30 +53,6 @@
#define SIGRTMIN 32
#define SIGRTMAX _NSIG
-/*
- * SA_FLAGS values:
- *
- * SA_ONSTACK indicates that a registered stack_t will be used.
- * SA_RESTART flag to get restarting signals (which were the default long ago)
- * SA_NOCLDSTOP flag to turn off SIGCHLD when children stop.
- * SA_RESETHAND clears the handler when the signal is delivered.
- * SA_NOCLDWAIT flag on SIGCHLD to inhibit zombies.
- * SA_NODEFER prevents the current signal from being masked in the handler.
- *
- * SA_ONESHOT and SA_NOMASK are the historical Linux names for the Single
- * Unix names RESETHAND and NODEFER respectively.
- */
-#define SA_NOCLDSTOP 0x00000001
-#define SA_NOCLDWAIT 0x00000002
-#define SA_SIGINFO 0x00000004
-#define SA_ONSTACK 0x08000000
-#define SA_RESTART 0x10000000
-#define SA_NODEFER 0x40000000
-#define SA_RESETHAND 0x80000000
-
-#define SA_NOMASK SA_NODEFER
-#define SA_ONESHOT SA_RESETHAND
-
#define SA_RESTORER 0x04000000
/*
diff --git a/arch/ia64/kernel/process.c b/arch/ia64/kernel/process.c
index 8d4e1cab9190..4ebbfa076a26 100644
--- a/arch/ia64/kernel/process.c
+++ b/arch/ia64/kernel/process.c
@@ -240,7 +240,7 @@ void arch_cpu_idle(void)
if (mark_idle)
(*mark_idle)(1);
- safe_halt();
+ raw_safe_halt();
if (mark_idle)
(*mark_idle)(0);
@@ -488,7 +488,7 @@ do_copy_task_regs (struct task_struct *task, struct unw_frame_info *info, void *
unw_get_ar(info, UNW_AR_SSD, &dst[56]);
}
-void
+static void
do_copy_regs (struct unw_frame_info *info, void *arg)
{
do_copy_task_regs(current, info, arg);
diff --git a/arch/ia64/kernel/ptrace.c b/arch/ia64/kernel/ptrace.c
index 75c070aed81e..c3490ee2daa5 100644
--- a/arch/ia64/kernel/ptrace.c
+++ b/arch/ia64/kernel/ptrace.c
@@ -817,8 +817,8 @@ access_nat_bits (struct task_struct *child, struct pt_regs *pt,
}
static int
-access_uarea (struct task_struct *child, unsigned long addr,
- unsigned long *data, int write_access);
+access_elf_reg(struct task_struct *target, struct unw_frame_info *info,
+ unsigned long addr, unsigned long *data, int write_access);
static long
ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
@@ -847,13 +847,13 @@ ptrace_getregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
return -EIO;
}
- if (access_uarea(child, PT_CR_IPSR, &psr, 0) < 0
- || access_uarea(child, PT_AR_EC, &ec, 0) < 0
- || access_uarea(child, PT_AR_LC, &lc, 0) < 0
- || access_uarea(child, PT_AR_RNAT, &rnat, 0) < 0
- || access_uarea(child, PT_AR_BSP, &bsp, 0) < 0
- || access_uarea(child, PT_CFM, &cfm, 0)
- || access_uarea(child, PT_NAT_BITS, &nat_bits, 0))
+ if (access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 0) < 0 ||
+ access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 0) < 0)
return -EIO;
/* control regs */
@@ -972,7 +972,7 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
struct switch_stack *sw;
struct ia64_fpreg fpval;
struct pt_regs *pt;
- long ret, retval = 0;
+ long retval = 0;
int i;
memset(&fpval, 0, sizeof(fpval));
@@ -1097,17 +1097,16 @@ ptrace_setregs (struct task_struct *child, struct pt_all_user_regs __user *ppr)
retval |= __get_user(nat_bits, &ppr->nat);
- retval |= access_uarea(child, PT_CR_IPSR, &psr, 1);
- retval |= access_uarea(child, PT_AR_RSC, &rsc, 1);
- retval |= access_uarea(child, PT_AR_EC, &ec, 1);
- retval |= access_uarea(child, PT_AR_LC, &lc, 1);
- retval |= access_uarea(child, PT_AR_RNAT, &rnat, 1);
- retval |= access_uarea(child, PT_AR_BSP, &bsp, 1);
- retval |= access_uarea(child, PT_CFM, &cfm, 1);
- retval |= access_uarea(child, PT_NAT_BITS, &nat_bits, 1);
+ retval |= access_elf_reg(child, &info, ELF_CR_IPSR_OFFSET, &psr, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_RSC_OFFSET, &rsc, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_EC_OFFSET, &ec, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_LC_OFFSET, &lc, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_RNAT_OFFSET, &rnat, 1);
+ retval |= access_elf_reg(child, &info, ELF_AR_BSP_OFFSET, &bsp, 1);
+ retval |= access_elf_reg(child, &info, ELF_CFM_OFFSET, &cfm, 1);
+ retval |= access_elf_reg(child, &info, ELF_NAT_OFFSET, &nat_bits, 1);
- ret = retval ? -EIO : 0;
- return ret;
+ return retval ? -EIO : 0;
}
void
@@ -1150,6 +1149,10 @@ ptrace_disable (struct task_struct *child)
user_disable_single_step(child);
}
+static int
+access_uarea (struct task_struct *child, unsigned long addr,
+ unsigned long *data, int write_access);
+
long
arch_ptrace (struct task_struct *child, long request,
unsigned long addr, unsigned long data)
@@ -1491,7 +1494,7 @@ struct regset_membuf {
int ret;
};
-void do_gpregs_get(struct unw_frame_info *info, void *arg)
+static void do_gpregs_get(struct unw_frame_info *info, void *arg)
{
struct regset_membuf *dst = arg;
struct membuf to = dst->to;
@@ -1524,7 +1527,7 @@ void do_gpregs_get(struct unw_frame_info *info, void *arg)
}
}
-void do_gpregs_set(struct unw_frame_info *info, void *arg)
+static void do_gpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
@@ -1569,7 +1572,7 @@ void do_gpregs_set(struct unw_frame_info *info, void *arg)
#define ELF_FP_OFFSET(i) (i * sizeof(elf_fpreg_t))
-void do_fpregs_get(struct unw_frame_info *info, void *arg)
+static void do_fpregs_get(struct unw_frame_info *info, void *arg)
{
struct task_struct *task = info->task;
struct regset_membuf *dst = arg;
@@ -1603,7 +1606,7 @@ void do_fpregs_get(struct unw_frame_info *info, void *arg)
membuf_zero(&to, 96 * sizeof(reg));
}
-void do_fpregs_set(struct unw_frame_info *info, void *arg)
+static void do_fpregs_set(struct unw_frame_info *info, void *arg)
{
struct regset_getset *dst = arg;
elf_fpreg_t fpreg, tmp[30];
diff --git a/arch/ia64/kernel/time.c b/arch/ia64/kernel/time.c
index 7abc5f37bfaf..ed9fc3d057a6 100644
--- a/arch/ia64/kernel/time.c
+++ b/arch/ia64/kernel/time.c
@@ -138,12 +138,8 @@ void vtime_account_kernel(struct task_struct *tsk)
struct thread_info *ti = task_thread_info(tsk);
__u64 stime = vtime_delta(tsk);
- if ((tsk->flags & PF_VCPU) && !irq_count())
+ if (tsk->flags & PF_VCPU)
ti->gtime += stime;
- else if (hardirq_count())
- ti->hardirq_time += stime;
- else if (in_serving_softirq())
- ti->softirq_time += stime;
else
ti->stime += stime;
}
@@ -156,44 +152,48 @@ void vtime_account_idle(struct task_struct *tsk)
ti->idle_time += vtime_delta(tsk);
}
+void vtime_account_softirq(struct task_struct *tsk)
+{
+ struct thread_info *ti = task_thread_info(tsk);
+
+ ti->softirq_time += vtime_delta(tsk);
+}
+
+void vtime_account_hardirq(struct task_struct *tsk)
+{
+ struct thread_info *ti = task_thread_info(tsk);
+
+ ti->hardirq_time += vtime_delta(tsk);
+}
+
#endif /* CONFIG_VIRT_CPU_ACCOUNTING_NATIVE */
static irqreturn_t
timer_interrupt (int irq, void *dev_id)
{
- unsigned long new_itm;
+ unsigned long cur_itm, new_itm, ticks;
if (cpu_is_offline(smp_processor_id())) {
return IRQ_HANDLED;
}
new_itm = local_cpu_data->itm_next;
+ cur_itm = ia64_get_itc();
- if (!time_after(ia64_get_itc(), new_itm))
+ if (!time_after(cur_itm, new_itm)) {
printk(KERN_ERR "Oops: timer tick before it's due (itc=%lx,itm=%lx)\n",
- ia64_get_itc(), new_itm);
-
- profile_tick(CPU_PROFILING);
-
- while (1) {
- update_process_times(user_mode(get_irq_regs()));
-
- new_itm += local_cpu_data->itm_delta;
-
- if (smp_processor_id() == time_keeper_id)
- xtime_update(1);
-
- local_cpu_data->itm_next = new_itm;
+ cur_itm, new_itm);
+ ticks = 1;
+ } else {
+ ticks = DIV_ROUND_UP(cur_itm - new_itm,
+ local_cpu_data->itm_delta);
+ new_itm += ticks * local_cpu_data->itm_delta;
+ }
- if (time_after(new_itm, ia64_get_itc()))
- break;
+ if (smp_processor_id() != time_keeper_id)
+ ticks = 0;
- /*
- * Allow IPIs to interrupt the timer loop.
- */
- local_irq_enable();
- local_irq_disable();
- }
+ legacy_timer_tick(ticks);
do {
/*
diff --git a/arch/ia64/mm/contig.c b/arch/ia64/mm/contig.c
index e30e360beef8..bfc4ecd0a2ab 100644
--- a/arch/ia64/mm/contig.c
+++ b/arch/ia64/mm/contig.c
@@ -19,15 +19,12 @@
#include <linux/mm.h>
#include <linux/nmi.h>
#include <linux/swap.h>
+#include <linux/sizes.h>
#include <asm/meminit.h>
#include <asm/sections.h>
#include <asm/mca.h>
-#ifdef CONFIG_VIRTUAL_MEM_MAP
-static unsigned long max_gap;
-#endif
-
/* physical address where the bootmem map is located */
unsigned long bootmap_start;
@@ -166,6 +163,32 @@ find_memory (void)
alloc_per_cpu_data();
}
+static int __init find_largest_hole(u64 start, u64 end, void *arg)
+{
+ u64 *max_gap = arg;
+
+ static u64 last_end = PAGE_OFFSET;
+
+ /* NOTE: this algorithm assumes efi memmap table is ordered */
+
+ if (*max_gap < (start - last_end))
+ *max_gap = start - last_end;
+ last_end = end;
+ return 0;
+}
+
+static void __init verify_gap_absence(void)
+{
+ unsigned long max_gap;
+
+ /* Forbid FLATMEM if hole is > than 1G */
+ efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
+ if (max_gap >= SZ_1G)
+ panic("Cannot use FLATMEM with %ldMB hole\n"
+ "Please switch over to SPARSEMEM\n",
+ (max_gap >> 20));
+}
+
/*
* Set up the page tables.
*/
@@ -177,37 +200,12 @@ paging_init (void)
unsigned long max_zone_pfns[MAX_NR_ZONES];
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA32
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
max_zone_pfns[ZONE_DMA32] = max_dma;
-#endif
max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- efi_memmap_walk(find_largest_hole, (u64 *)&max_gap);
- if (max_gap < LARGE_GAP) {
- vmem_map = (struct page *) 0;
- } else {
- unsigned long map_size;
-
- /* allocate virtual_mem_map */
+ verify_gap_absence();
- map_size = PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- VMALLOC_END -= map_size;
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
-
- /*
- * alloc_node_mem_map makes an adjustment for mem_map
- * which isn't compatible with vmem_map.
- */
- NODE_DATA(0)->node_mem_map = vmem_map +
- find_min_pfn_with_active_regions();
-
- printk("Virtual mem_map starts at 0x%p\n", mem_map);
- }
-#endif /* !CONFIG_VIRTUAL_MEM_MAP */
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
}
diff --git a/arch/ia64/mm/discontig.c b/arch/ia64/mm/discontig.c
index dbe829fc5298..c7311131156e 100644
--- a/arch/ia64/mm/discontig.c
+++ b/arch/ia64/mm/discontig.c
@@ -584,6 +584,25 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
}
}
+static void __init virtual_map_init(void)
+{
+#ifdef CONFIG_VIRTUAL_MEM_MAP
+ int node;
+
+ VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
+ sizeof(struct page));
+ vmem_map = (struct page *) VMALLOC_END;
+ efi_memmap_walk(create_mem_map_page_table, NULL);
+ printk("Virtual mem_map starts at 0x%p\n", vmem_map);
+
+ for_each_online_node(node) {
+ unsigned long pfn_offset = mem_data[node].min_pfn;
+
+ NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
+ }
+#endif
+}
+
/**
* paging_init - setup page tables
*
@@ -593,38 +612,17 @@ void call_pernode_memory(unsigned long start, unsigned long len, void *arg)
void __init paging_init(void)
{
unsigned long max_dma;
- unsigned long pfn_offset = 0;
- unsigned long max_pfn = 0;
- int node;
unsigned long max_zone_pfns[MAX_NR_ZONES];
max_dma = virt_to_phys((void *) MAX_DMA_ADDRESS) >> PAGE_SHIFT;
sparse_init();
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- VMALLOC_END -= PAGE_ALIGN(ALIGN(max_low_pfn, MAX_ORDER_NR_PAGES) *
- sizeof(struct page));
- vmem_map = (struct page *) VMALLOC_END;
- efi_memmap_walk(create_mem_map_page_table, NULL);
- printk("Virtual mem_map starts at 0x%p\n", vmem_map);
-#endif
-
- for_each_online_node(node) {
- pfn_offset = mem_data[node].min_pfn;
-
-#ifdef CONFIG_VIRTUAL_MEM_MAP
- NODE_DATA(node)->node_mem_map = vmem_map + pfn_offset;
-#endif
- if (mem_data[node].max_pfn > max_pfn)
- max_pfn = mem_data[node].max_pfn;
- }
+ virtual_map_init();
memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
-#ifdef CONFIG_ZONE_DMA32
max_zone_pfns[ZONE_DMA32] = max_dma;
-#endif
- max_zone_pfns[ZONE_NORMAL] = max_pfn;
+ max_zone_pfns[ZONE_NORMAL] = max_low_pfn;
free_area_init(max_zone_pfns);
zero_page_memmap_ptr = virt_to_page(ia64_imva(empty_zero_page));
diff --git a/arch/ia64/mm/init.c b/arch/ia64/mm/init.c
index ef12e097f318..9b5acf8fb092 100644
--- a/arch/ia64/mm/init.c
+++ b/arch/ia64/mm/init.c
@@ -574,20 +574,6 @@ ia64_pfn_valid (unsigned long pfn)
}
EXPORT_SYMBOL(ia64_pfn_valid);
-int __init find_largest_hole(u64 start, u64 end, void *arg)
-{
- u64 *max_gap = arg;
-
- static u64 last_end = PAGE_OFFSET;
-
- /* NOTE: this algorithm assumes efi memmap table is ordered */
-
- if (*max_gap < (start - last_end))
- *max_gap = start - last_end;
- last_end = end;
- return 0;
-}
-
#endif /* CONFIG_VIRTUAL_MEM_MAP */
int __init register_active_ranges(u64 start, u64 len, int nid)
diff --git a/arch/ia64/mm/numa.c b/arch/ia64/mm/numa.c
index f34964271101..46b6e5f3a40f 100644
--- a/arch/ia64/mm/numa.c
+++ b/arch/ia64/mm/numa.c
@@ -58,36 +58,6 @@ paddr_to_nid(unsigned long paddr)
EXPORT_SYMBOL(paddr_to_nid);
#if defined(CONFIG_SPARSEMEM) && defined(CONFIG_NUMA)
-/*
- * Because of holes evaluate on section limits.
- * If the section of memory exists, then return the node where the section
- * resides. Otherwise return node 0 as the default. This is used by
- * SPARSEMEM to allocate the SPARSEMEM sectionmap on the NUMA node where
- * the section resides.
- */
-int __meminit __early_pfn_to_nid(unsigned long pfn,
- struct mminit_pfnnid_cache *state)
-{
- int i, section = pfn >> PFN_SECTION_SHIFT, ssec, esec;
-
- if (section >= state->last_start && section < state->last_end)
- return state->last_nid;
-
- for (i = 0; i < num_node_memblks; i++) {
- ssec = node_memblk[i].start_paddr >> PA_SECTION_SHIFT;
- esec = (node_memblk[i].start_paddr + node_memblk[i].size +
- ((1L << PA_SECTION_SHIFT) - 1)) >> PA_SECTION_SHIFT;
- if (section >= ssec && section < esec) {
- state->last_start = ssec;
- state->last_end = esec;
- state->last_nid = node_memblk[i].nid;
- return node_memblk[i].nid;
- }
- }
-
- return -1;
-}
-
void numa_clear_node(int cpu)
{
unmap_cpu_from_node(cpu, NUMA_NO_NODE);