summaryrefslogtreecommitdiff
path: root/arch/mips
diff options
context:
space:
mode:
Diffstat (limited to 'arch/mips')
-rw-r--r--arch/mips/Kconfig223
-rw-r--r--arch/mips/Makefile22
-rw-r--r--arch/mips/alchemy/Kconfig2
-rw-r--r--arch/mips/alchemy/common/clock.c3
-rw-r--r--arch/mips/alchemy/common/dbdma.c4
-rw-r--r--arch/mips/alchemy/devboards/db1000.c18
-rw-r--r--arch/mips/alchemy/devboards/db1550.c4
-rw-r--r--arch/mips/ath79/Kconfig12
-rw-r--r--arch/mips/ath79/clock.c243
-rw-r--r--arch/mips/ath79/common.c24
-rw-r--r--arch/mips/ath79/early_printk.c6
-rw-r--r--arch/mips/ath79/setup.c63
-rw-r--r--arch/mips/bcm47xx/Makefile2
-rw-r--r--arch/mips/bcm47xx/bcm47xx_private.h3
-rw-r--r--arch/mips/bcm47xx/setup.c2
-rw-r--r--arch/mips/bcm47xx/sprom.c724
-rw-r--r--arch/mips/bmips/Kconfig4
-rw-r--r--arch/mips/bmips/setup.c16
-rw-r--r--arch/mips/boot/compressed/Makefile12
-rw-r--r--arch/mips/boot/compressed/decompress.c17
-rw-r--r--arch/mips/boot/compressed/head.S16
-rw-r--r--arch/mips/boot/dts/brcm/Makefile2
-rw-r--r--arch/mips/boot/dts/brcm/bcm6328.dtsi50
-rw-r--r--arch/mips/boot/dts/brcm/bcm6358.dtsi130
-rw-r--r--arch/mips/boot/dts/brcm/bcm6368.dtsi32
-rw-r--r--arch/mips/boot/dts/brcm/bcm7125.dtsi69
-rw-r--r--arch/mips/boot/dts/brcm/bcm7346.dtsi6
-rw-r--r--arch/mips/boot/dts/brcm/bcm7358.dtsi2
-rw-r--r--arch/mips/boot/dts/brcm/bcm7360.dtsi42
-rw-r--r--arch/mips/boot/dts/brcm/bcm7362.dtsi6
-rw-r--r--arch/mips/boot/dts/brcm/bcm7420.dtsi77
-rw-r--r--arch/mips/boot/dts/brcm/bcm7425.dtsi100
-rw-r--r--arch/mips/boot/dts/brcm/bcm7435.dtsi143
-rw-r--r--arch/mips/boot/dts/brcm/bcm96358nb4ser.dts46
-rw-r--r--arch/mips/boot/dts/brcm/bcm96368mvwg.dts4
-rw-r--r--arch/mips/boot/dts/brcm/bcm97125cbmb.dts24
-rw-r--r--arch/mips/boot/dts/brcm/bcm97360svmb.dts8
-rw-r--r--arch/mips/boot/dts/brcm/bcm97420c.dts28
-rw-r--r--arch/mips/boot/dts/brcm/bcm97425svmb.dts28
-rw-r--r--arch/mips/boot/dts/brcm/bcm97435svmb.dts38
-rw-r--r--arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts94
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts217
-rw-r--r--arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi231
-rw-r--r--arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts59
-rw-r--r--arch/mips/boot/dts/ingenic/jz4740.dtsi16
-rw-r--r--arch/mips/boot/dts/lantiq/easy50712.dts2
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi236
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda.dtsi63
-rw-r--r--arch/mips/boot/dts/pic32/pic32mzda_sk.dts5
-rw-r--r--arch/mips/boot/dts/qca/Makefile7
-rw-r--r--arch/mips/boot/dts/qca/ar9132.dtsi19
-rw-r--r--arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts100
-rw-r--r--arch/mips/boot/dts/qca/ar9331.dtsi155
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dpt_module.dts78
-rw-r--r--arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts102
-rw-r--r--arch/mips/boot/dts/qca/ar9331_omega.dts78
-rw-r--r--arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts118
-rw-r--r--arch/mips/boot/dts/ralink/mt7620a.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt2880.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3050.dtsi2
-rw-r--r--arch/mips/boot/dts/ralink/rt3883.dtsi2
-rw-r--r--arch/mips/boot/dts/xilfpga/nexys4ddr.dts2
-rw-r--r--arch/mips/boot/tools/.gitignore1
-rw-r--r--arch/mips/boot/tools/Makefile8
-rw-r--r--arch/mips/boot/tools/relocs.c680
-rw-r--r--arch/mips/boot/tools/relocs.h45
-rw-r--r--arch/mips/boot/tools/relocs_32.c17
-rw-r--r--arch/mips/boot/tools/relocs_64.c30
-rw-r--r--arch/mips/boot/tools/relocs_main.c84
-rw-r--r--arch/mips/cavium-octeon/csrc-octeon.c13
-rw-r--r--arch/mips/cavium-octeon/dma-octeon.c8
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-bootmem.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper-board.c22
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-helper.c43
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c14
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-pko.c2
-rw-r--r--arch/mips/cavium-octeon/executive/cvmx-sysinfo.c72
-rw-r--r--arch/mips/cavium-octeon/executive/octeon-model.c82
-rw-r--r--arch/mips/cavium-octeon/octeon-irq.c693
-rw-r--r--arch/mips/cavium-octeon/octeon-platform.c219
-rw-r--r--arch/mips/cavium-octeon/setup.c104
-rw-r--r--arch/mips/cavium-octeon/smp.c160
-rw-r--r--arch/mips/cobalt/setup.c4
-rw-r--r--arch/mips/configs/ath25_defconfig119
-rw-r--r--arch/mips/configs/bcm47xx_defconfig1
-rw-r--r--arch/mips/configs/bcm63xx_defconfig1
-rw-r--r--arch/mips/configs/bigsur_defconfig1
-rw-r--r--arch/mips/configs/bmips_be_defconfig1
-rw-r--r--arch/mips/configs/cavium_octeon_defconfig17
-rw-r--r--arch/mips/configs/ci20_defconfig14
-rw-r--r--arch/mips/configs/db1xxx_defconfig1
-rw-r--r--arch/mips/configs/decstation_defconfig1
-rw-r--r--arch/mips/configs/ip22_defconfig1
-rw-r--r--arch/mips/configs/ip27_defconfig1
-rw-r--r--arch/mips/configs/jazz_defconfig1
-rw-r--r--arch/mips/configs/lemote2f_defconfig1
-rw-r--r--arch/mips/configs/loongson1b_defconfig (renamed from arch/mips/configs/ls1b_defconfig)35
-rw-r--r--arch/mips/configs/loongson3_defconfig1
-rw-r--r--arch/mips/configs/malta_qemu_32r6_defconfig2
-rw-r--r--arch/mips/configs/maltaaprp_defconfig2
-rw-r--r--arch/mips/configs/maltasmvp_eva_defconfig2
-rw-r--r--arch/mips/configs/maltaup_defconfig2
-rw-r--r--arch/mips/configs/mtx1_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlp_defconfig1
-rw-r--r--arch/mips/configs/nlm_xlr_defconfig1
-rw-r--r--arch/mips/configs/rbtx49xx_defconfig2
-rw-r--r--arch/mips/configs/rm200_defconfig1
-rw-r--r--arch/mips/dec/int-handler.S2
-rw-r--r--arch/mips/dec/setup.c1
-rw-r--r--arch/mips/fw/arc/memory.c2
-rw-r--r--arch/mips/include/asm/Kbuild1
-rw-r--r--arch/mips/include/asm/addrspace.h2
-rw-r--r--arch/mips/include/asm/asmmacro.h282
-rw-r--r--arch/mips/include/asm/atomic.h154
-rw-r--r--arch/mips/include/asm/bitops.h17
-rw-r--r--arch/mips/include/asm/bitrev.h30
-rw-r--r--arch/mips/include/asm/bmips.h1
-rw-r--r--arch/mips/include/asm/bootinfo.h22
-rw-r--r--arch/mips/include/asm/cacheflush.h7
-rw-r--r--arch/mips/include/asm/cacheops.h6
-rw-r--r--arch/mips/include/asm/clkdev.h27
-rw-r--r--arch/mips/include/asm/cpu-features.h147
-rw-r--r--arch/mips/include/asm/cpu-info.h45
-rw-r--r--arch/mips/include/asm/cpu-type.h5
-rw-r--r--arch/mips/include/asm/cpu.h113
-rw-r--r--arch/mips/include/asm/dsemul.h92
-rw-r--r--arch/mips/include/asm/elf.h153
-rw-r--r--arch/mips/include/asm/fpu_emulator.h17
-rw-r--r--arch/mips/include/asm/hazards.h15
-rw-r--r--arch/mips/include/asm/highmem.h4
-rw-r--r--arch/mips/include/asm/io.h10
-rw-r--r--arch/mips/include/asm/irq_regs.h10
-rw-r--r--arch/mips/include/asm/irqflags.h5
-rw-r--r--arch/mips/include/asm/kvm_host.h325
-rw-r--r--arch/mips/include/asm/llsc.h28
-rw-r--r--arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h2
-rw-r--r--arch/mips/include/asm/mach-au1x00/gpio-au1300.h2
-rw-r--r--arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h2
-rw-r--r--arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h14
-rw-r--r--arch/mips/include/asm/mach-bmips/ioremap.h33
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/irq.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-cavium-octeon/mangle-port.h42
-rw-r--r--arch/mips/include/asm/mach-generic/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/irq.h2
-rw-r--r--arch/mips/include/asm/mach-ip27/kernel-entry-init.h2
-rw-r--r--arch/mips/include/asm/mach-ip32/dma-coherence.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/gpio.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/jz4740_nand.h2
-rw-r--r--arch/mips/include/asm/mach-jz4740/platform.h1
-rw-r--r--arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h4
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/lantiq_platform.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h2
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h4
-rw-r--r--arch/mips/include/asm/mach-lantiq/xway/xway_dma.h2
-rw-r--r--arch/mips/include/asm/mach-loongson32/cpufreq.h1
-rw-r--r--arch/mips/include/asm/mach-loongson32/dma.h25
-rw-r--r--arch/mips/include/asm/mach-loongson32/irq.h1
-rw-r--r--arch/mips/include/asm/mach-loongson32/loongson1.h4
-rw-r--r--arch/mips/include/asm/mach-loongson32/nand.h30
-rw-r--r--arch/mips/include/asm/mach-loongson32/platform.h14
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-clk.h24
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-mux.h84
-rw-r--r--arch/mips/include/asm/mach-loongson32/regs-pwm.h12
-rw-r--r--arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h18
-rw-r--r--arch/mips/include/asm/mach-loongson64/kernel-entry-init.h18
-rw-r--r--arch/mips/include/asm/mach-loongson64/loongson_hwmon.h2
-rw-r--r--arch/mips/include/asm/mach-malta/kernel-entry-init.h6
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7620.h3
-rw-r--r--arch/mips/include/asm/mach-ralink/mt7621.h2
-rw-r--r--arch/mips/include/asm/mach-ralink/pinmux.h2
-rw-r--r--arch/mips/include/asm/mach-ralink/ralink_regs.h2
-rw-r--r--arch/mips/include/asm/mach-ralink/rt288x.h2
-rw-r--r--arch/mips/include/asm/mach-ralink/rt305x.h2
-rw-r--r--arch/mips/include/asm/mips-cm.h14
-rw-r--r--arch/mips/include/asm/mips-cpc.h3
-rw-r--r--arch/mips/include/asm/mips-r2-to-r6-emul.h2
-rw-r--r--arch/mips/include/asm/mips_mt.h2
-rw-r--r--arch/mips/include/asm/mipsregs.h823
-rw-r--r--arch/mips/include/asm/mmu.h9
-rw-r--r--arch/mips/include/asm/mmu_context.h47
-rw-r--r--arch/mips/include/asm/msa.h36
-rw-r--r--arch/mips/include/asm/octeon/cvmx-bootinfo.h16
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ciu3-defs.h353
-rw-r--r--arch/mips/include/asm/octeon/cvmx-cmd-queue.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-config.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-coremask.h89
-rw-r--r--arch/mips/include/asm/octeon/cvmx-fpa-defs.h1
-rw-r--r--arch/mips/include/asm/octeon/cvmx-helper-board.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-ipd.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mio-defs.h410
-rw-r--r--arch/mips/include/asm/octeon/cvmx-mpi-defs.h328
-rw-r--r--arch/mips/include/asm/octeon/cvmx-pow.h2
-rw-r--r--arch/mips/include/asm/octeon/cvmx-sysinfo.h37
-rw-r--r--arch/mips/include/asm/octeon/cvmx.h29
-rw-r--r--arch/mips/include/asm/octeon/octeon-feature.h19
-rw-r--r--arch/mips/include/asm/octeon/octeon-model.h5
-rw-r--r--arch/mips/include/asm/octeon/octeon.h25
-rw-r--r--arch/mips/include/asm/page.h44
-rw-r--r--arch/mips/include/asm/pci.h20
-rw-r--r--arch/mips/include/asm/pci/bridge.h18
-rw-r--r--arch/mips/include/asm/pgalloc.h6
-rw-r--r--arch/mips/include/asm/pgtable-32.h33
-rw-r--r--arch/mips/include/asm/pgtable-64.h18
-rw-r--r--arch/mips/include/asm/pgtable-bits.h213
-rw-r--r--arch/mips/include/asm/pgtable.h165
-rw-r--r--arch/mips/include/asm/processor.h28
-rw-r--r--arch/mips/include/asm/r4kcache.h4
-rw-r--r--arch/mips/include/asm/seccomp.h47
-rw-r--r--arch/mips/include/asm/setup.h1
-rw-r--r--arch/mips/include/asm/sgi/hpc3.h4
-rw-r--r--arch/mips/include/asm/sgiarcs.h4
-rw-r--r--arch/mips/include/asm/sibyte/bcm1480_regs.h4
-rw-r--r--arch/mips/include/asm/signal.h12
-rw-r--r--arch/mips/include/asm/smp-cps.h2
-rw-r--r--arch/mips/include/asm/smp.h4
-rw-r--r--arch/mips/include/asm/sn/ioc3.h2
-rw-r--r--arch/mips/include/asm/sn/sn0/hubio.h2
-rw-r--r--arch/mips/include/asm/spinlock.h19
-rw-r--r--arch/mips/include/asm/switch_to.h2
-rw-r--r--arch/mips/include/asm/syscall.h2
-rw-r--r--arch/mips/include/asm/uaccess.h4
-rw-r--r--arch/mips/include/asm/uasm.h10
-rw-r--r--arch/mips/include/asm/watch.h10
-rw-r--r--arch/mips/include/uapi/asm/auxvec.h2
-rw-r--r--arch/mips/include/uapi/asm/inst.h125
-rw-r--r--arch/mips/include/uapi/asm/siginfo.h22
-rw-r--r--arch/mips/include/uapi/asm/unistd.h18
-rw-r--r--arch/mips/jz4740/board-qi_lb60.c139
-rw-r--r--arch/mips/jz4740/platform.c25
-rw-r--r--arch/mips/jz4740/setup.c10
-rw-r--r--arch/mips/kernel/Makefile6
-rw-r--r--arch/mips/kernel/asm-offsets.c62
-rw-r--r--arch/mips/kernel/binfmt_elfn32.c16
-rw-r--r--arch/mips/kernel/binfmt_elfo32.c32
-rw-r--r--arch/mips/kernel/bmips_5xxx_init.S753
-rw-r--r--arch/mips/kernel/bmips_vec.S50
-rw-r--r--arch/mips/kernel/branch.c30
-rw-r--r--arch/mips/kernel/cevt-r4k.c87
-rw-r--r--arch/mips/kernel/cps-vec.S311
-rw-r--r--arch/mips/kernel/cpu-bugs64.c6
-rw-r--r--arch/mips/kernel/cpu-probe.c384
-rw-r--r--arch/mips/kernel/crash.c16
-rw-r--r--arch/mips/kernel/csrc-r4k.c4
-rw-r--r--arch/mips/kernel/elf.c25
-rw-r--r--arch/mips/kernel/genex.S58
-rw-r--r--arch/mips/kernel/head.S42
-rw-r--r--arch/mips/kernel/idle.c5
-rw-r--r--arch/mips/kernel/irq.c3
-rw-r--r--arch/mips/kernel/mips-cm.c4
-rw-r--r--arch/mips/kernel/mips-r2-to-r6-emul.c161
-rw-r--r--arch/mips/kernel/module-rela.c77
-rw-r--r--arch/mips/kernel/module.c79
-rw-r--r--arch/mips/kernel/perf_event.c12
-rw-r--r--arch/mips/kernel/perf_event_mipsxx.c66
-rw-r--r--arch/mips/kernel/pm-cps.c21
-rw-r--r--arch/mips/kernel/pm.c2
-rw-r--r--arch/mips/kernel/proc.c1
-rw-r--r--arch/mips/kernel/process.c64
-rw-r--r--arch/mips/kernel/ptrace.c43
-rw-r--r--arch/mips/kernel/r4k_fpu.S10
-rw-r--r--arch/mips/kernel/r4k_switch.S1
-rw-r--r--arch/mips/kernel/relocate.c386
-rw-r--r--arch/mips/kernel/scall32-o32.S13
-rw-r--r--arch/mips/kernel/scall64-64.S5
-rw-r--r--arch/mips/kernel/scall64-n32.S18
-rw-r--r--arch/mips/kernel/scall64-o32.S18
-rw-r--r--arch/mips/kernel/segment.c13
-rw-r--r--arch/mips/kernel/setup.c83
-rw-r--r--arch/mips/kernel/signal.c33
-rw-r--r--arch/mips/kernel/signal32.c294
-rw-r--r--arch/mips/kernel/signal_o32.c285
-rw-r--r--arch/mips/kernel/smp-bmips.c89
-rw-r--r--arch/mips/kernel/smp-cps.c110
-rw-r--r--arch/mips/kernel/smp.c46
-rw-r--r--arch/mips/kernel/spram.c1
-rw-r--r--arch/mips/kernel/traps.c105
-rw-r--r--arch/mips/kernel/unaligned.c62
-rw-r--r--arch/mips/kernel/vdso.c13
-rw-r--r--arch/mips/kernel/vmlinux.lds.S21
-rw-r--r--arch/mips/kernel/watch.c79
-rw-r--r--arch/mips/kvm/Kconfig1
-rw-r--r--arch/mips/kvm/Makefile3
-rw-r--r--arch/mips/kvm/commpage.c2
-rw-r--r--arch/mips/kvm/dyntrans.c182
-rw-r--r--arch/mips/kvm/emulate.c606
-rw-r--r--arch/mips/kvm/entry.c701
-rw-r--r--arch/mips/kvm/fpu.S7
-rw-r--r--arch/mips/kvm/interrupt.c12
-rw-r--r--arch/mips/kvm/interrupt.h13
-rw-r--r--arch/mips/kvm/locore.S654
-rw-r--r--arch/mips/kvm/mips.c369
-rw-r--r--arch/mips/kvm/mmu.c375
-rw-r--r--arch/mips/kvm/stats.c21
-rw-r--r--arch/mips/kvm/tlb.c495
-rw-r--r--arch/mips/kvm/trace.h236
-rw-r--r--arch/mips/kvm/trap_emul.c185
-rw-r--r--arch/mips/lantiq/Kconfig12
-rw-r--r--arch/mips/lantiq/Makefile2
-rw-r--r--arch/mips/lantiq/clk.c2
-rw-r--r--arch/mips/lantiq/clk.h2
-rw-r--r--arch/mips/lantiq/early_printk.c2
-rw-r--r--arch/mips/lantiq/falcon/prom.c2
-rw-r--r--arch/mips/lantiq/falcon/reset.c2
-rw-r--r--arch/mips/lantiq/falcon/sysctrl.c2
-rw-r--r--arch/mips/lantiq/irq.c33
-rw-r--r--arch/mips/lantiq/prom.c15
-rw-r--r--arch/mips/lantiq/prom.h2
-rw-r--r--arch/mips/lantiq/xway/clk.c2
-rw-r--r--arch/mips/lantiq/xway/dcdc.c2
-rw-r--r--arch/mips/lantiq/xway/dma.c2
-rw-r--r--arch/mips/lantiq/xway/gptu.c2
-rw-r--r--arch/mips/lantiq/xway/prom.c2
-rw-r--r--arch/mips/lantiq/xway/reset.c4
-rw-r--r--arch/mips/lantiq/xway/sysctrl.c2
-rw-r--r--arch/mips/lantiq/xway/vmmc.c2
-rw-r--r--arch/mips/lantiq/xway/xrx200_phy_fw.c4
-rw-r--r--arch/mips/lasat/picvue_proc.c4
-rw-r--r--arch/mips/lib/ashldi3.c2
-rw-r--r--arch/mips/lib/ashrdi3.c2
-rw-r--r--arch/mips/lib/bswapdi.c2
-rw-r--r--arch/mips/lib/bswapsi.c2
-rw-r--r--arch/mips/lib/cmpdi2.c2
-rw-r--r--arch/mips/lib/dump_tlb.c27
-rw-r--r--arch/mips/lib/lshrdi3.c2
-rw-r--r--arch/mips/lib/memcpy.S2
-rw-r--r--arch/mips/lib/memset.S2
-rw-r--r--arch/mips/lib/r3k_dump_tlb.c9
-rw-r--r--arch/mips/lib/ucmpdi2.c2
-rw-r--r--arch/mips/loongson32/common/platform.c105
-rw-r--r--arch/mips/loongson32/common/reset.c13
-rw-r--r--arch/mips/loongson32/common/time.c27
-rw-r--r--arch/mips/loongson32/ls1b/board.c67
-rw-r--r--arch/mips/loongson64/Platform2
-rw-r--r--arch/mips/loongson64/common/dma-swiotlb.c10
-rw-r--r--arch/mips/loongson64/common/env.c7
-rw-r--r--arch/mips/loongson64/loongson-3/Makefile2
-rw-r--r--arch/mips/loongson64/loongson-3/acpi_init.c150
-rw-r--r--arch/mips/loongson64/loongson-3/hpet.c16
-rw-r--r--arch/mips/loongson64/loongson-3/irq.c10
-rw-r--r--arch/mips/loongson64/loongson-3/numa.c6
-rw-r--r--arch/mips/loongson64/loongson-3/smp.c108
-rw-r--r--arch/mips/math-emu/Makefile4
-rw-r--r--arch/mips/math-emu/cp1emu.c67
-rw-r--r--arch/mips/math-emu/dp_maddf.c35
-rw-r--r--arch/mips/math-emu/dp_msubf.c269
-rw-r--r--arch/mips/math-emu/dp_mul.c4
-rw-r--r--arch/mips/math-emu/dsemul.c335
-rw-r--r--arch/mips/math-emu/ieee754dp.c15
-rw-r--r--arch/mips/math-emu/ieee754dp.h1
-rw-r--r--arch/mips/math-emu/ieee754int.h10
-rw-r--r--arch/mips/math-emu/ieee754sp.c18
-rw-r--r--arch/mips/math-emu/ieee754sp.h17
-rw-r--r--arch/mips/math-emu/sp_add.c6
-rw-r--r--arch/mips/math-emu/sp_maddf.c43
-rw-r--r--arch/mips/math-emu/sp_msubf.c258
-rw-r--r--arch/mips/math-emu/sp_sub.c6
-rw-r--r--arch/mips/mm/c-r4k.c358
-rw-r--r--arch/mips/mm/cache.c41
-rw-r--r--arch/mips/mm/dma-default.c27
-rw-r--r--arch/mips/mm/fault.c2
-rw-r--r--arch/mips/mm/init.c16
-rw-r--r--arch/mips/mm/page.c9
-rw-r--r--arch/mips/mm/sc-debugfs.c4
-rw-r--r--arch/mips/mm/sc-ip22.c2
-rw-r--r--arch/mips/mm/sc-mips.c1
-rw-r--r--arch/mips/mm/sc-rm7k.c2
-rw-r--r--arch/mips/mm/tlb-r3k.c24
-rw-r--r--arch/mips/mm/tlb-r4k.c61
-rw-r--r--arch/mips/mm/tlb-r8k.c2
-rw-r--r--arch/mips/mm/tlbex.c264
-rw-r--r--arch/mips/mm/uasm-micromips.c13
-rw-r--r--arch/mips/mm/uasm-mips.c15
-rw-r--r--arch/mips/mm/uasm.c31
-rw-r--r--arch/mips/mti-malta/malta-dtshim.c4
-rw-r--r--arch/mips/mti-malta/malta-memory.c2
-rw-r--r--arch/mips/mti-malta/malta-setup.c9
-rw-r--r--arch/mips/mti-malta/malta-time.c40
-rw-r--r--arch/mips/mti-sead3/sead3-setup.c13
-rw-r--r--arch/mips/net/bpf_jit.c6
-rw-r--r--arch/mips/netlogic/common/nlm-dma.c4
-rw-r--r--arch/mips/netlogic/common/reset.S11
-rw-r--r--arch/mips/netlogic/common/smpboot.S4
-rw-r--r--arch/mips/netlogic/xlp/nlm_hal.c2
-rw-r--r--arch/mips/netlogic/xlr/setup.c2
-rw-r--r--arch/mips/oprofile/common.c2
-rw-r--r--arch/mips/oprofile/op_impl.h2
-rw-r--r--arch/mips/oprofile/op_model_loongson3.c35
-rw-r--r--arch/mips/oprofile/op_model_mipsxx.c4
-rw-r--r--arch/mips/pci/fixup-lantiq.c2
-rw-r--r--arch/mips/pci/ops-bridge.c4
-rw-r--r--arch/mips/pci/ops-lantiq.c2
-rw-r--r--arch/mips/pci/pci-alchemy.c2
-rw-r--r--arch/mips/pci/pci-ip32.c1
-rw-r--r--arch/mips/pci/pci-lantiq.c2
-rw-r--r--arch/mips/pci/pci-lantiq.h2
-rw-r--r--arch/mips/pci/pci-mt7620.c2
-rw-r--r--arch/mips/pci/pci-rt2880.c2
-rw-r--r--arch/mips/pci/pci.c22
-rw-r--r--arch/mips/pic32/Kconfig2
-rw-r--r--arch/mips/pic32/pic32mzda/init.c7
-rw-r--r--arch/mips/pic32/pic32mzda/time.c13
-rw-r--r--arch/mips/pistachio/init.c77
-rw-r--r--arch/mips/pmcs-msp71xx/msp_setup.c2
-rw-r--r--arch/mips/pnx833x/common/setup.c3
-rw-r--r--arch/mips/ralink/Makefile2
-rw-r--r--arch/mips/ralink/bootrom.c2
-rw-r--r--arch/mips/ralink/cevt-rt3352.c19
-rw-r--r--arch/mips/ralink/clk.c2
-rw-r--r--arch/mips/ralink/common.h2
-rw-r--r--arch/mips/ralink/ill_acc.c2
-rw-r--r--arch/mips/ralink/irq-gic.c2
-rw-r--r--arch/mips/ralink/irq.c2
-rw-r--r--arch/mips/ralink/mt7620.c123
-rw-r--r--arch/mips/ralink/mt7621.c2
-rw-r--r--arch/mips/ralink/of.c2
-rw-r--r--arch/mips/ralink/prom.c2
-rw-r--r--arch/mips/ralink/reset.c4
-rw-r--r--arch/mips/ralink/rt288x.c2
-rw-r--r--arch/mips/ralink/rt305x.c2
-rw-r--r--arch/mips/ralink/rt3883.c2
-rw-r--r--arch/mips/ralink/timer-gic.c2
-rw-r--r--arch/mips/ralink/timer.c4
-rw-r--r--arch/mips/sgi-ip22/ip22-reset.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-hubio.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-memory.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-nmi.c2
-rw-r--r--arch/mips/sgi-ip27/ip27-xtalk.c2
-rw-r--r--arch/mips/sibyte/Kconfig3
-rw-r--r--arch/mips/sni/rm200.c2
-rw-r--r--arch/mips/sni/time.c1
-rw-r--r--arch/mips/txx9/generic/pci.c2
-rw-r--r--arch/mips/txx9/generic/setup.c2
-rw-r--r--arch/mips/txx9/rbtx4939/setup.c2
-rw-r--r--arch/mips/vdso/Makefile44
-rw-r--r--arch/mips/vr41xx/common/cmu.c2
-rw-r--r--arch/mips/vr41xx/common/pmu.c2
-rw-r--r--arch/mips/xilfpga/init.c13
442 files changed, 15284 insertions, 7412 deletions
diff --git a/arch/mips/Kconfig b/arch/mips/Kconfig
index 2018c2b0e078..26388562e300 100644
--- a/arch/mips/Kconfig
+++ b/arch/mips/Kconfig
@@ -15,7 +15,7 @@ config MIPS
select HAVE_ARCH_KGDB
select HAVE_ARCH_SECCOMP_FILTER
select HAVE_ARCH_TRACEHOOK
- select HAVE_BPF_JIT if !CPU_MICROMIPS
+ select HAVE_CBPF_JIT if !CPU_MICROMIPS
select HAVE_FUNCTION_TRACER
select HAVE_DYNAMIC_FTRACE
select HAVE_FTRACE_MCOUNT_RECORD
@@ -48,6 +48,7 @@ config MIPS
select GENERIC_SCHED_CLOCK if !CAVIUM_OCTEON_SOC
select GENERIC_CMOS_UPDATE
select HAVE_MOD_ARCH_SPECIFIC
+ select HAVE_NMI
select VIRT_TO_BUS
select MODULES_USE_ELF_REL if MODULES
select MODULES_USE_ELF_RELA if MODULES && 64BIT
@@ -62,6 +63,8 @@ config MIPS
select HAVE_IRQ_TIME_ACCOUNTING
select GENERIC_TIME_VSYSCALL
select ARCH_CLOCKSOURCE_DATA
+ select HANDLE_DOMAIN_IRQ
+ select HAVE_EXIT_THREAD
menu "Machine selection"
@@ -79,7 +82,7 @@ config MIPS_ALCHEMY
select SYS_HAS_CPU_MIPS32_R1
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_APM_EMULATION
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SYS_SUPPORTS_ZBOOT
select COMMON_CLK
@@ -98,7 +101,7 @@ config AR7
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_ZBOOT_UART16550
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select VLYNQ
select HAVE_CLK
help
@@ -122,11 +125,11 @@ config ATH25
config ATH79
bool "Atheros AR71XX/AR724X/AR913X based boards"
select ARCH_HAS_RESET_CONTROLLER
- select ARCH_REQUIRE_GPIOLIB
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
select DMA_NONCOHERENT
+ select GPIOLIB
select HAVE_CLK
select COMMON_CLK
select CLKDEV_LOOKUP
@@ -137,7 +140,7 @@ config ATH79
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_MIPS16
- select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_ZBOOT_UART_PROM
select USE_OF
help
Support for the Atheros AR71XX/AR724X/AR913X SoCs.
@@ -170,7 +173,6 @@ config BMIPS_GENERIC
select USB_EHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_DESC if CPU_BIG_ENDIAN
select USB_OHCI_BIG_ENDIAN_MMIO if CPU_BIG_ENDIAN
- select ARCH_WANT_OPTIONAL_GPIOLIB
help
Build a generic DT-based kernel image that boots on select
BCM33xx cable modem chips, BCM63xx DSL chips, and BCM7xxx set-top
@@ -179,7 +181,6 @@ config BMIPS_GENERIC
config BCM47XX
bool "Broadcom BCM47XX based boards"
- select ARCH_WANT_OPTIONAL_GPIOLIB
select BOOT_RAW
select CEVT_R4K
select CSRC_R4K
@@ -196,6 +197,7 @@ config BCM47XX
select GPIOLIB
select LEDS_GPIO_REGISTER
select BCM47XX_NVRAM
+ select BCM47XX_SPROM
help
Support for BCM47XX based boards
@@ -211,7 +213,7 @@ config BCM63XX
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
select SWAP_IO_SPACE
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select HAVE_CLK
select MIPS_L1_CACHE_SHIFT_4
help
@@ -305,7 +307,7 @@ config MACH_INGENIC
select SYS_SUPPORTS_ZBOOT_UART16550
select DMA_NONCOHERENT
select IRQ_MIPS_CPU
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select COMMON_CLK
select GENERIC_IRQ_CHIP
select BUILTIN_DTB
@@ -325,7 +327,7 @@ config LANTIQ
select SYS_SUPPORTS_MIPS16
select SYS_SUPPORTS_MULTITHREADING
select SYS_HAS_EARLY_PRINTK
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SWAP_IO_SPACE
select BOOT_RAW
select CLKDEV_LOOKUP
@@ -377,14 +379,14 @@ config MACH_LOONGSON64
config MACH_PISTACHIO
bool "IMG Pistachio SoC based boards"
- select ARCH_REQUIRE_GPIOLIB
select BOOT_ELF32
select BOOT_RAW
select CEVT_R4K
select CLKSRC_MIPS_GIC
select COMMON_CLK
select CSRC_R4K
- select DMA_MAYBE_COHERENT
+ select DMA_NONCOHERENT
+ select GPIOLIB
select IRQ_MIPS_CPU
select LIBFDT
select MFD_SYSCON
@@ -397,6 +399,7 @@ config MACH_PISTACHIO
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_MIPS_CPS
select SYS_SUPPORTS_MULTITHREADING
+ select SYS_SUPPORTS_RELOCATABLE
select SYS_SUPPORTS_ZBOOT
select SYS_HAS_EARLY_PRINTK
select USE_GENERIC_EARLY_PRINTK_8250
@@ -406,13 +409,13 @@ config MACH_PISTACHIO
config MACH_XILFPGA
bool "MIPSfpga Xilinx based boards"
- select ARCH_REQUIRE_GPIOLIB
select BOOT_ELF32
select BOOT_RAW
select BUILTIN_DTB
select CEVT_R4K
select COMMON_CLK
select CSRC_R4K
+ select GPIOLIB
select IRQ_MIPS_CPU
select LIBFDT
select MIPS_CPU_SCACHE
@@ -473,6 +476,7 @@ config MIPS_MALTA
select SYS_SUPPORTS_MULTITHREADING
select SYS_SUPPORTS_SMARTMIPS
select SYS_SUPPORTS_ZBOOT
+ select SYS_SUPPORTS_RELOCATABLE
select USE_OF
select ZONE_DMA32 if 64BIT
select BUILTIN_DTB
@@ -507,6 +511,7 @@ config MIPS_SEAD3
select MIPS_MSC
select SYS_HAS_CPU_MIPS32_R1
select SYS_HAS_CPU_MIPS32_R2
+ select SYS_HAS_CPU_MIPS32_R6
select SYS_HAS_CPU_MIPS64_R1
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
@@ -516,6 +521,7 @@ config MIPS_SEAD3
select SYS_SUPPORTS_SMARTMIPS
select SYS_SUPPORTS_MICROMIPS
select SYS_SUPPORTS_MIPS16
+ select SYS_SUPPORTS_RELOCATABLE
select USB_EHCI_BIG_ENDIAN_DESC
select USB_EHCI_BIG_ENDIAN_MMIO
select USE_OF
@@ -536,7 +542,7 @@ config MACH_VR41XX
select CSRC_R4K
select SYS_HAS_CPU_VR41XX
select SYS_SUPPORTS_MIPS16
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
config NXP_STB220
bool "NXP STB220 board"
@@ -856,7 +862,7 @@ config MIKROTIK_RB532
select SYS_SUPPORTS_LITTLE_ENDIAN
select SWAP_IO_SPACE
select BOOT_RAW
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select MIPS_L1_CACHE_SHIFT_4
help
Support the Mikrotik(tm) RouterBoard 532 series,
@@ -875,11 +881,10 @@ config CAVIUM_OCTEON_SOC
select SYS_SUPPORTS_HOTPLUG_CPU if CPU_BIG_ENDIAN
select SYS_HAS_EARLY_PRINTK
select SYS_HAS_CPU_CAVIUM_OCTEON
- select SWAP_IO_SPACE
select HW_HAS_PCI
select ZONE_DMA32
select HOLES_IN_ZONE
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select LIBFDT
select USE_OF
select ARCH_SPARSEMEM_ENABLE
@@ -937,7 +942,7 @@ config NLM_XLP_BOARD
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_64BIT_KERNEL
select ARCH_PHYS_ADDR_T_64BIT
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select SYS_SUPPORTS_BIG_ENDIAN
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_SUPPORTS_HIGHMEM
@@ -1077,7 +1082,7 @@ config MIPS_CLOCK_VSYSCALL
def_bool CSRC_R4K || CLKSRC_MIPS_GIC
config GPIO_TXX9
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
bool
config FW_CFE
@@ -1106,16 +1111,6 @@ config NEED_DMA_MAP_STATE
config SYS_HAS_EARLY_PRINTK
bool
-config HOTPLUG_CPU
- bool "Support for hot-pluggable CPUs"
- depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
- help
- Say Y here to allow turning CPUs off and on. CPUs can be
- controlled through /sys/devices/system/cpu.
- (Note: power management support will enable this option
- automatically on SMP systems. )
- Say N if you want to disable CPU hotplug.
-
config SYS_SUPPORTS_HOTPLUG_CPU
bool
@@ -1155,6 +1150,13 @@ config ISA_DMA_API
config HOLES_IN_ZONE
bool
+config SYS_SUPPORTS_RELOCATABLE
+ bool
+ help
+ Selected if the platform supports relocating the kernel.
+ The platform must provide plat_get_fdt() if it selects CONFIG_USE_OF
+ to allow access to command line and entropy sources.
+
#
# Endianness selection. Sufficiently obscure so many users don't know what to
# answer,so we try hard to limit the available choices. Also the use of a
@@ -1342,11 +1344,30 @@ config CPU_LOONGSON3
select CPU_SUPPORTS_HUGEPAGES
select WEAK_ORDERING
select WEAK_REORDERING_BEYOND_LLSC
- select ARCH_REQUIRE_GPIOLIB
+ select MIPS_PGD_C0_CONTEXT
+ select GPIOLIB
help
The Loongson 3 processor implements the MIPS64R2 instruction
set with many extensions.
+config LOONGSON3_ENHANCEMENT
+ bool "New Loongson 3 CPU Enhancements"
+ default n
+ select CPU_MIPSR2
+ select CPU_HAS_PREFETCH
+ depends on CPU_LOONGSON3
+ help
+ New Loongson 3 CPU (since Loongson-3A R2, as opposed to Loongson-3A
+ R1, Loongson-3B R1 and Loongson-3B R2) has many enhancements, such as
+ FTLB, L1-VCache, EI/DI/Wait/Prefetch instruction, DSP/DSPv2 ASE, User
+ Local register, Read-Inhibit/Execute-Inhibit, SFB (Store Fill Buffer),
+ Fast TLB refill support, etc.
+
+ This option enable those enhancements which are not probed at run
+ time. If you want a generic kernel to run on all Loongson 3 machines,
+ please say 'N' here. If you want a high-performance kernel to run on
+ new Loongson 3 machines only, please say 'Y' here.
+
config CPU_LOONGSON2E
bool "Loongson 2E"
depends on SYS_HAS_CPU_LOONGSON2E
@@ -1362,7 +1383,7 @@ config CPU_LOONGSON2F
bool "Loongson 2F"
depends on SYS_HAS_CPU_LOONGSON2F
select CPU_LOONGSON2
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
help
The Loongson 2F processor implements the MIPS III instruction set
with many extensions.
@@ -1375,6 +1396,7 @@ config CPU_LOONGSON1B
bool "Loongson 1B"
depends on SYS_HAS_CPU_LOONGSON1B
select CPU_LOONGSON1
+ select LEDS_GPIO_REGISTER
help
The Loongson 1B is a 32-bit SoC, which implements the MIPS32
release 2 instruction set.
@@ -1455,6 +1477,7 @@ config CPU_MIPS64_R2
select CPU_SUPPORTS_HIGHMEM
select CPU_SUPPORTS_HUGEPAGES
select CPU_SUPPORTS_MSA
+ select HAVE_KVM
help
Choose this option to build a kernel for release 2 or later of the
MIPS64 architecture. Many modern embedded systems with a 64-bit
@@ -1472,6 +1495,7 @@ config CPU_MIPS64_R6
select CPU_SUPPORTS_MSA
select GENERIC_CSUM
select MIPS_O32_FP64_SUPPORT if MIPS32_O32
+ select HAVE_KVM
help
Choose this option to build a kernel for release 6 or later of the
MIPS64 architecture. New MIPS processors, starting with the Warrior
@@ -1673,6 +1697,7 @@ config CPU_XLP
select CPU_HAS_PREFETCH
select CPU_MIPSR2
select CPU_SUPPORTS_HUGEPAGES
+ select MIPS_ASID_BITS_VARIABLE
help
Netlogic Microsystems XLP processors.
endchoice
@@ -1798,6 +1823,7 @@ config CPU_BMIPS4380
select MIPS_L1_CACHE_SHIFT_6
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_HOTPLUG_CPU
+ select CPU_HAS_RIXI
config CPU_BMIPS5000
bool
@@ -1805,10 +1831,12 @@ config CPU_BMIPS5000
select MIPS_L1_CACHE_SHIFT_7
select SYS_SUPPORTS_SMP
select SYS_SUPPORTS_HOTPLUG_CPU
+ select CPU_HAS_RIXI
config SYS_HAS_CPU_LOONGSON3
bool
select CPU_SUPPORTS_CPUFREQ
+ select CPU_HAS_RIXI
config SYS_HAS_CPU_LOONGSON2E
bool
@@ -1961,11 +1989,15 @@ config CPU_MIPSR1
config CPU_MIPSR2
bool
default y if CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_CAVIUM_OCTEON
+ select CPU_HAS_RIXI
select MIPS_SPRAM
config CPU_MIPSR6
bool
default y if CPU_MIPS32_R6 || CPU_MIPS64_R6
+ select CPU_HAS_RIXI
+ select HAVE_ARCH_BITREVERSE
+ select MIPS_ASID_BITS_VARIABLE
select MIPS_SPRAM
config EVA
@@ -1999,7 +2031,7 @@ config MIPS_PGD_C0_CONTEXT
#
config HARDWARE_WATCHPOINTS
bool
- default y if CPU_MIPSR1 || CPU_MIPSR2
+ default y if CPU_MIPSR1 || CPU_MIPSR2 || CPU_MIPSR6
menu "Kernel type"
@@ -2042,6 +2074,16 @@ config KVM_GUEST_TIMER_FREQ
emulation when determining guest CPU Frequency. Instead, the guest's
timer frequency is specified directly.
+config MIPS_VA_BITS_48
+ bool "48 bits virtual memory"
+ depends on 64BIT
+ help
+ Support a maximum at least 48 bits of application virtual memory.
+ Default is 40 bits or less, depending on the CPU.
+ This option result in a small memory overhead for page tables.
+ This option is only supported with 16k and 64k page sizes.
+ If unsure, say N.
+
choice
prompt "Kernel page size"
default PAGE_SIZE_4KB
@@ -2049,6 +2091,7 @@ choice
config PAGE_SIZE_4KB
bool "4kB"
depends on !CPU_LOONGSON2 && !CPU_LOONGSON3
+ depends on !MIPS_VA_BITS_48
help
This option select the standard 4kB Linux page size. On some
R3000-family processors this is the only available page size. Using
@@ -2058,6 +2101,7 @@ config PAGE_SIZE_4KB
config PAGE_SIZE_8KB
bool "8kB"
depends on CPU_R8000 || CPU_CAVIUM_OCTEON
+ depends on !MIPS_VA_BITS_48
help
Using 8kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available
@@ -2076,6 +2120,7 @@ config PAGE_SIZE_16KB
config PAGE_SIZE_32KB
bool "32kB"
depends on CPU_CAVIUM_OCTEON
+ depends on !MIPS_VA_BITS_48
help
Using 32kB page size will result in higher performance kernel at
the price of higher memory consumption. This option is available
@@ -2280,7 +2325,7 @@ config MIPS_CMP
config MIPS_CPS
bool "MIPS Coherent Processing System support"
- depends on SYS_SUPPORTS_MIPS_CPS && !CPU_MIPSR6
+ depends on SYS_SUPPORTS_MIPS_CPS
select MIPS_CM
select MIPS_CPC
select MIPS_CPS_PM if HOTPLUG_CPU
@@ -2371,6 +2416,9 @@ config CPU_HAS_WB
config XKS01
bool
+config CPU_HAS_RIXI
+ bool
+
#
# Vectored interrupt mode is an R2 feature
#
@@ -2401,6 +2449,21 @@ config CPU_R4000_WORKAROUNDS
config CPU_R4400_WORKAROUNDS
bool
+config MIPS_ASID_SHIFT
+ int
+ default 6 if CPU_R3000 || CPU_TX39XX
+ default 4 if CPU_R8000
+ default 0
+
+config MIPS_ASID_BITS
+ int
+ default 0 if MIPS_ASID_BITS_VARIABLE
+ default 6 if CPU_R3000 || CPU_TX39XX
+ default 8
+
+config MIPS_ASID_BITS_VARIABLE
+ bool
+
#
# - Highmem only makes sense for the 32-bit kernel.
# - The current highmem code will only work properly on physically indexed
@@ -2470,6 +2533,61 @@ config NUMA
config SYS_SUPPORTS_NUMA
bool
+config RELOCATABLE
+ bool "Relocatable kernel"
+ depends on SYS_SUPPORTS_RELOCATABLE && (CPU_MIPS32_R2 || CPU_MIPS64_R2 || CPU_MIPS32_R6 || CPU_MIPS64_R6)
+ help
+ This builds a kernel image that retains relocation information
+ so it can be loaded someplace besides the default 1MB.
+ The relocations make the kernel binary about 15% larger,
+ but are discarded at runtime
+
+config RELOCATION_TABLE_SIZE
+ hex "Relocation table size"
+ depends on RELOCATABLE
+ range 0x0 0x01000000
+ default "0x00100000"
+ ---help---
+ A table of relocation data will be appended to the kernel binary
+ and parsed at boot to fix up the relocated kernel.
+
+ This option allows the amount of space reserved for the table to be
+ adjusted, although the default of 1Mb should be ok in most cases.
+
+ The build will fail and a valid size suggested if this is too small.
+
+ If unsure, leave at the default value.
+
+config RANDOMIZE_BASE
+ bool "Randomize the address of the kernel image"
+ depends on RELOCATABLE
+ ---help---
+ Randomizes the physical and virtual address at which the
+ kernel image is loaded, as a security feature that
+ deters exploit attempts relying on knowledge of the location
+ of kernel internals.
+
+ Entropy is generated using any coprocessor 0 registers available.
+
+ The kernel will be offset by up to RANDOMIZE_BASE_MAX_OFFSET.
+
+ If unsure, say N.
+
+config RANDOMIZE_BASE_MAX_OFFSET
+ hex "Maximum kASLR offset" if EXPERT
+ depends on RANDOMIZE_BASE
+ range 0x0 0x40000000 if EVA || 64BIT
+ range 0x0 0x08000000
+ default "0x01000000"
+ ---help---
+ When kASLR is active, this provides the maximum offset that will
+ be applied to the kernel image. It should be set according to the
+ amount of physical RAM available in the target system minus
+ PHYSICAL_START and must be a power of 2.
+
+ This is limited by the size of KSEG0, 256Mb on 32-bit or 1Gb with
+ EVA or 64-bit. The default is 16Mb.
+
config NODES_SHIFT
int
default "6"
@@ -2477,7 +2595,7 @@ config NODES_SHIFT
config HW_PERF_EVENTS
bool "Enable hardware performance counter support for perf events"
- depends on PERF_EVENTS && OPROFILE=n && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
+ depends on PERF_EVENTS && !OPROFILE && (CPU_MIPS32 || CPU_MIPS64 || CPU_R10000 || CPU_SB1 || CPU_CAVIUM_OCTEON || CPU_XLP || CPU_LOONGSON3)
default y
help
Enable hardware performance counter support for perf events. If
@@ -2507,6 +2625,16 @@ config SMP
If you don't know what to do here, say N.
+config HOTPLUG_CPU
+ bool "Support for hot-pluggable CPUs"
+ depends on SMP && SYS_SUPPORTS_HOTPLUG_CPU
+ help
+ Say Y here to allow turning CPUs off and on. CPUs can be
+ controlled through /sys/devices/system/cpu.
+ (Note: power management support will enable this option
+ automatically on SMP systems. )
+ Say N if you want to disable CPU hotplug.
+
config SMP_UP
bool
@@ -2758,10 +2886,10 @@ choice
the documented boot protocol using a device tree.
config MIPS_RAW_APPENDED_DTB
- bool "vmlinux.bin"
+ bool "vmlinux.bin or vmlinuz.bin"
help
With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinux.bin (without decompressor).
+ DTB) appended to raw vmlinux.bin or vmlinuz.bin.
(e.g. cat vmlinux.bin <filename>.dtb > vmlinux_w_dtb).
This is meant as a backward compatibility convenience for those
@@ -2773,24 +2901,6 @@ choice
look like a DTB header after a reboot if no actual DTB is appended
to vmlinux.bin. Do not leave this option active in a production kernel
if you don't intend to always append a DTB.
-
- config MIPS_ZBOOT_APPENDED_DTB
- bool "vmlinuz.bin"
- depends on SYS_SUPPORTS_ZBOOT
- help
- With this option, the boot code will look for a device tree binary
- DTB) appended to raw vmlinuz.bin (with decompressor).
- (e.g. cat vmlinuz.bin <filename>.dtb > vmlinuz_w_dtb).
-
- This is meant as a backward compatibility convenience for those
- systems with a bootloader that can't be upgraded to accommodate
- the documented boot protocol using a device tree.
-
- Beware that there is very little in terms of protection against
- this option being confused by leftover garbage in memory that might
- look like a DTB header after a reboot if no actual DTB is appended
- to vmlinuz.bin. Do not leave this option active in a production kernel
- if you don't intend to always append a DTB.
endchoice
choice
@@ -2810,6 +2920,10 @@ choice
config MIPS_CMDLINE_FROM_BOOTLOADER
bool "Bootloader kernel arguments if available"
+
+ config MIPS_CMDLINE_BUILTIN_EXTEND
+ depends on CMDLINE_BOOL
+ bool "Extend builtin kernel arguments with bootloader arguments"
endchoice
endmenu
@@ -2987,6 +3101,7 @@ config MIPS32_N32
config BINFMT_ELF32
bool
default y if MIPS32_O32 || MIPS32_N32
+ select ELFCORE
endmenu
diff --git a/arch/mips/Makefile b/arch/mips/Makefile
index e78d60dbdffd..efd7a9dc93c4 100644
--- a/arch/mips/Makefile
+++ b/arch/mips/Makefile
@@ -12,6 +12,9 @@
# for "archclean" cleaning up for this architecture.
#
+archscripts: scripts_basic
+ $(Q)$(MAKE) $(build)=arch/mips/boot/tools relocs
+
KBUILD_DEFCONFIG := ip22_defconfig
#
@@ -93,6 +96,10 @@ LDFLAGS_vmlinux += -G 0 -static -n -nostdlib
KBUILD_AFLAGS_MODULE += -mlong-calls
KBUILD_CFLAGS_MODULE += -mlong-calls
+ifeq ($(CONFIG_RELOCATABLE),y)
+LDFLAGS_vmlinux += --emit-relocs
+endif
+
#
# pass -msoft-float to GAS if it supports it. However on newer binutils
# (specifically newer than 2.24.51.20140728) we then also need to explicitly
@@ -193,6 +200,8 @@ ifeq ($(CONFIG_CPU_HAS_MSA),y)
toolchain-msa := $(call cc-option-yn,$(mips-cflags) -mhard-float -mfp64 -Wa$(comma)-mmsa)
cflags-$(toolchain-msa) += -DTOOLCHAIN_SUPPORTS_MSA
endif
+toolchain-virt := $(call cc-option-yn,$(mips-cflags) -mvirt)
+cflags-$(toolchain-virt) += -DTOOLCHAIN_SUPPORTS_VIRT
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_NEVER) += -mcompact-branches=never
cflags-$(CONFIG_MIPS_COMPACT_BRANCHES_OPTIMAL) += -mcompact-branches=optimal
@@ -310,6 +319,10 @@ rom.bin rom.sw: vmlinux
$(bootvars-y) $@
endif
+CMD_RELOCS = arch/mips/boot/tools/relocs
+quiet_cmd_relocs = RELOCS $<
+ cmd_relocs = $(CMD_RELOCS) $<
+
#
# Some machines like the Indy need 32-bit ELF binaries for booting purposes.
# Other need ECOFF, so we build a 32-bit ELF binary for them which we then
@@ -318,6 +331,11 @@ endif
quiet_cmd_32 = OBJCOPY $@
cmd_32 = $(OBJCOPY) -O $(32bit-bfd) $(OBJCOPYFLAGS) $< $@
vmlinux.32: vmlinux
+ifeq ($(CONFIG_RELOCATABLE)$(CONFIG_64BIT),yy)
+# Currently, objcopy fails to handle the relocations in the elf64
+# So the relocs tool must be run here to remove them first
+ $(call cmd,relocs)
+endif
$(call cmd,32)
#
@@ -333,6 +351,9 @@ all: $(all-y)
# boot
$(boot-y): $(vmlinux-32) FORCE
+ifeq ($(CONFIG_RELOCATABLE)$(CONFIG_32BIT),yy)
+ $(call cmd,relocs)
+endif
$(Q)$(MAKE) $(build)=arch/mips/boot VMLINUX=$(vmlinux-32) \
$(bootvars-y) arch/mips/boot/$@
@@ -385,6 +406,7 @@ endif
archclean:
$(Q)$(MAKE) $(clean)=arch/mips/boot
$(Q)$(MAKE) $(clean)=arch/mips/boot/compressed
+ $(Q)$(MAKE) $(clean)=arch/mips/boot/tools
$(Q)$(MAKE) $(clean)=arch/mips/lasat
define archhelp
diff --git a/arch/mips/alchemy/Kconfig b/arch/mips/alchemy/Kconfig
index 7fa24881b708..88b4d6a792c1 100644
--- a/arch/mips/alchemy/Kconfig
+++ b/arch/mips/alchemy/Kconfig
@@ -20,7 +20,7 @@ config MIPS_MTX1
config MIPS_DB1XXX
bool "Alchemy DB1XXX / PB1XXX boards"
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select HW_HAS_PCI
select SYS_SUPPORTS_LITTLE_ENDIAN
select SYS_HAS_EARLY_PRINTK
diff --git a/arch/mips/alchemy/common/clock.c b/arch/mips/alchemy/common/clock.c
index bd34f4093cd9..7ba7ea0a22f8 100644
--- a/arch/mips/alchemy/common/clock.c
+++ b/arch/mips/alchemy/common/clock.c
@@ -1043,8 +1043,7 @@ static int __init alchemy_clk_init(void)
/* Root of the Alchemy clock tree: external 12MHz crystal osc */
c = clk_register_fixed_rate(NULL, ALCHEMY_ROOT_CLK, NULL,
- CLK_IS_ROOT,
- ALCHEMY_ROOTCLK_RATE);
+ 0, ALCHEMY_ROOTCLK_RATE);
ERRCK(c)
/* CPU core clock */
diff --git a/arch/mips/alchemy/common/dbdma.c b/arch/mips/alchemy/common/dbdma.c
index 745695db5ba0..f2f264b5aafe 100644
--- a/arch/mips/alchemy/common/dbdma.c
+++ b/arch/mips/alchemy/common/dbdma.c
@@ -261,7 +261,7 @@ u32 au1xxx_dbdma_chan_alloc(u32 srcid, u32 destid,
au1x_dma_chan_t *cp;
/*
- * We do the intialization on the first channel allocation.
+ * We do the initialization on the first channel allocation.
* We have to wait because of the interrupt handler initialization
* which can't be done successfully during board set up.
*/
@@ -964,7 +964,7 @@ u32 au1xxx_dbdma_put_dscr(u32 chanid, au1x_ddma_desc_t *dscr)
dp->dscr_source1 = dscr->dscr_source1;
dp->dscr_cmd1 = dscr->dscr_cmd1;
nbytes = dscr->dscr_cmd1;
- /* Allow the caller to specifiy if an interrupt is generated */
+ /* Allow the caller to specify if an interrupt is generated */
dp->dscr_cmd0 &= ~DSCR_CMD0_IE;
dp->dscr_cmd0 |= dscr->dscr_cmd0 | DSCR_CMD0_V;
ctp->chan_ptr->ddma_dbell = 0;
diff --git a/arch/mips/alchemy/devboards/db1000.c b/arch/mips/alchemy/devboards/db1000.c
index bdeed9d13c6f..433c4b9a9f0a 100644
--- a/arch/mips/alchemy/devboards/db1000.c
+++ b/arch/mips/alchemy/devboards/db1000.c
@@ -503,15 +503,15 @@ int __init db1000_dev_setup(void)
if (board == BCSR_WHOAMI_DB1500) {
c0 = AU1500_GPIO2_INT;
c1 = AU1500_GPIO5_INT;
- d0 = AU1500_GPIO0_INT;
- d1 = AU1500_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO1_INT;
s1 = AU1500_GPIO4_INT;
} else if (board == BCSR_WHOAMI_DB1100) {
c0 = AU1100_GPIO2_INT;
c1 = AU1100_GPIO5_INT;
- d0 = AU1100_GPIO0_INT;
- d1 = AU1100_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO1_INT;
s1 = AU1100_GPIO4_INT;
@@ -545,15 +545,15 @@ int __init db1000_dev_setup(void)
} else if (board == BCSR_WHOAMI_DB1000) {
c0 = AU1000_GPIO2_INT;
c1 = AU1000_GPIO5_INT;
- d0 = AU1000_GPIO0_INT;
- d1 = AU1000_GPIO3_INT;
+ d0 = 0; /* GPIO number, NOT irq! */
+ d1 = 3; /* GPIO number, NOT irq! */
s0 = AU1000_GPIO1_INT;
s1 = AU1000_GPIO4_INT;
platform_add_devices(db1000_devs, ARRAY_SIZE(db1000_devs));
} else if ((board == BCSR_WHOAMI_PB1500) ||
(board == BCSR_WHOAMI_PB1500R2)) {
c0 = AU1500_GPIO203_INT;
- d0 = AU1500_GPIO201_INT;
+ d0 = 1; /* GPIO number, NOT irq! */
s0 = AU1500_GPIO202_INT;
twosocks = 0;
flashsize = 64;
@@ -566,7 +566,7 @@ int __init db1000_dev_setup(void)
*/
} else if (board == BCSR_WHOAMI_PB1100) {
c0 = AU1100_GPIO11_INT;
- d0 = AU1100_GPIO9_INT;
+ d0 = 9; /* GPIO number, NOT irq! */
s0 = AU1100_GPIO10_INT;
twosocks = 0;
flashsize = 64;
@@ -583,7 +583,6 @@ int __init db1000_dev_setup(void)
} else
return 0; /* unknown board, no further dev setup to do */
- irq_set_irq_type(d0, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c0, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s0, IRQ_TYPE_LEVEL_LOW);
@@ -597,7 +596,6 @@ int __init db1000_dev_setup(void)
c0, d0, /*s0*/0, 0, 0);
if (twosocks) {
- irq_set_irq_type(d1, IRQ_TYPE_EDGE_BOTH);
irq_set_irq_type(c1, IRQ_TYPE_LEVEL_LOW);
irq_set_irq_type(s1, IRQ_TYPE_LEVEL_LOW);
diff --git a/arch/mips/alchemy/devboards/db1550.c b/arch/mips/alchemy/devboards/db1550.c
index b518f029f5e7..1c01d6eadb08 100644
--- a/arch/mips/alchemy/devboards/db1550.c
+++ b/arch/mips/alchemy/devboards/db1550.c
@@ -514,7 +514,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x000400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x000010000 - 1,
- AU1550_GPIO3_INT, AU1550_GPIO0_INT,
+ AU1550_GPIO3_INT, 0,
/*AU1550_GPIO21_INT*/0, 0, 0);
db1x_register_pcmcia_socket(
@@ -524,7 +524,7 @@ static void __init db1550_devices(void)
AU1000_PCMCIA_MEM_PHYS_ADDR + 0x004400000 - 1,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004000000,
AU1000_PCMCIA_IO_PHYS_ADDR + 0x004010000 - 1,
- AU1550_GPIO5_INT, AU1550_GPIO1_INT,
+ AU1550_GPIO5_INT, 1,
/*AU1550_GPIO22_INT*/0, 0, 1);
platform_device_register(&db1550_nand_dev);
diff --git a/arch/mips/ath79/Kconfig b/arch/mips/ath79/Kconfig
index 13c04cf54afa..dfc60209dc63 100644
--- a/arch/mips/ath79/Kconfig
+++ b/arch/mips/ath79/Kconfig
@@ -71,18 +71,6 @@ config ATH79_MACH_UBNT_XM
Say 'Y' here if you want your kernel to support the
Ubiquiti Networks XM (rev 1.0) board.
-choice
- prompt "Build a DTB in the kernel"
- optional
- help
- Select a devicetree that should be built into the kernel.
-
- config DTB_TL_WR1043ND_V1
- bool "TL-WR1043ND Version 1"
- select BUILTIN_DTB
- select SOC_AR913X
-endchoice
-
endmenu
config SOC_AR71XX
diff --git a/arch/mips/ath79/clock.c b/arch/mips/ath79/clock.c
index eb5117ced95a..2e7378467c5c 100644
--- a/arch/mips/ath79/clock.c
+++ b/arch/mips/ath79/clock.c
@@ -18,18 +18,21 @@
#include <linux/clk.h>
#include <linux/clkdev.h>
#include <linux/clk-provider.h>
+#include <linux/of.h>
+#include <linux/of_address.h>
+#include <dt-bindings/clock/ath79-clk.h>
#include <asm/div64.h>
#include <asm/mach-ath79/ath79.h>
#include <asm/mach-ath79/ar71xx_regs.h>
#include "common.h"
+#include "machtypes.h"
#define AR71XX_BASE_FREQ 40000000
-#define AR724X_BASE_FREQ 5000000
-#define AR913X_BASE_FREQ 5000000
+#define AR724X_BASE_FREQ 40000000
-static struct clk *clks[3];
+static struct clk *clks[ATH79_CLK_END];
static struct clk_onecell_data clk_data = {
.clks = clks,
.clk_num = ARRAY_SIZE(clks),
@@ -41,7 +44,7 @@ static struct clk *__init ath79_add_sys_clkdev(
struct clk *clk;
int err;
- clk = clk_register_fixed_rate(NULL, id, NULL, CLK_IS_ROOT, rate);
+ clk = clk_register_fixed_rate(NULL, id, NULL, 0, rate);
if (!clk)
panic("failed to allocate %s clock structure", id);
@@ -79,140 +82,139 @@ static void __init ar71xx_clocks_init(void)
ahb_rate = cpu_rate / div;
ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+ clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+ clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ahb", NULL);
}
-static void __init ar724x_clocks_init(void)
+static struct clk * __init ath79_reg_ffclk(const char *name,
+ const char *parent_name, unsigned int mult, unsigned int div)
{
- unsigned long ref_rate;
- unsigned long cpu_rate;
- unsigned long ddr_rate;
- unsigned long ahb_rate;
- u32 pll;
- u32 freq;
- u32 div;
-
- ref_rate = AR724X_BASE_FREQ;
- pll = ath79_pll_rr(AR724X_PLL_REG_CPU_CONFIG);
+ struct clk *clk;
- div = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
- freq = div * ref_rate;
+ clk = clk_register_fixed_factor(NULL, name, parent_name, 0, mult, div);
+ if (!clk)
+ panic("failed to allocate %s clock structure", name);
- div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK);
- freq *= div;
+ return clk;
+}
- cpu_rate = freq;
+static void __init ar724x_clk_init(struct clk *ref_clk, void __iomem *pll_base)
+{
+ u32 pll;
+ u32 mult, div, ddr_div, ahb_div;
- div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
- ddr_rate = freq / div;
+ pll = __raw_readl(pll_base + AR724X_PLL_REG_CPU_CONFIG);
- div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
- ahb_rate = cpu_rate / div;
+ mult = ((pll >> AR724X_PLL_FB_SHIFT) & AR724X_PLL_FB_MASK);
+ div = ((pll >> AR724X_PLL_REF_DIV_SHIFT) & AR724X_PLL_REF_DIV_MASK) * 2;
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ ddr_div = ((pll >> AR724X_DDR_DIV_SHIFT) & AR724X_DDR_DIV_MASK) + 1;
+ ahb_div = (((pll >> AR724X_AHB_DIV_SHIFT) & AR724X_AHB_DIV_MASK) + 1) * 2;
- clk_add_alias("wdt", NULL, "ahb", NULL);
- clk_add_alias("uart", NULL, "ahb", NULL);
+ clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref", mult, div);
+ clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref", mult, div * ddr_div);
+ clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref", mult, div * ahb_div);
}
-static void __init ar913x_clocks_init(void)
+static void __init ar724x_clocks_init(void)
{
- unsigned long ref_rate;
- unsigned long cpu_rate;
- unsigned long ddr_rate;
- unsigned long ahb_rate;
- u32 pll;
- u32 freq;
- u32 div;
+ struct clk *ref_clk;
- ref_rate = AR913X_BASE_FREQ;
- pll = ath79_pll_rr(AR913X_PLL_REG_CPU_CONFIG);
+ ref_clk = ath79_add_sys_clkdev("ref", AR724X_BASE_FREQ);
- div = ((pll >> AR913X_PLL_FB_SHIFT) & AR913X_PLL_FB_MASK);
- freq = div * ref_rate;
+ ar724x_clk_init(ref_clk, ath79_pll_base);
- cpu_rate = freq;
-
- div = ((pll >> AR913X_DDR_DIV_SHIFT) & AR913X_DDR_DIV_MASK) + 1;
- ddr_rate = freq / div;
-
- div = (((pll >> AR913X_AHB_DIV_SHIFT) & AR913X_AHB_DIV_MASK) + 1) * 2;
- ahb_rate = cpu_rate / div;
-
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ /* just make happy plat_time_init() from arch/mips/ath79/setup.c */
+ clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
+ clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
+ clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ahb", NULL);
}
-static void __init ar933x_clocks_init(void)
+static void __init ar9330_clk_init(struct clk *ref_clk, void __iomem *pll_base)
{
- unsigned long ref_rate;
- unsigned long cpu_rate;
- unsigned long ddr_rate;
- unsigned long ahb_rate;
u32 clock_ctrl;
- u32 cpu_config;
- u32 freq;
- u32 t;
+ u32 ref_div;
+ u32 ninit_mul;
+ u32 out_div;
- t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
- if (t & AR933X_BOOTSTRAP_REF_CLK_40)
- ref_rate = (40 * 1000 * 1000);
- else
- ref_rate = (25 * 1000 * 1000);
+ u32 cpu_div;
+ u32 ddr_div;
+ u32 ahb_div;
- clock_ctrl = ath79_pll_rr(AR933X_PLL_CLOCK_CTRL_REG);
+ clock_ctrl = __raw_readl(pll_base + AR933X_PLL_CLOCK_CTRL_REG);
if (clock_ctrl & AR933X_PLL_CLOCK_CTRL_BYPASS) {
- cpu_rate = ref_rate;
- ahb_rate = ref_rate;
- ddr_rate = ref_rate;
+ ref_div = 1;
+ ninit_mul = 1;
+ out_div = 1;
+
+ cpu_div = 1;
+ ddr_div = 1;
+ ahb_div = 1;
} else {
- cpu_config = ath79_pll_rr(AR933X_PLL_CPU_CONFIG_REG);
+ u32 cpu_config;
+ u32 t;
+
+ cpu_config = __raw_readl(pll_base + AR933X_PLL_CPU_CONFIG_REG);
t = (cpu_config >> AR933X_PLL_CPU_CONFIG_REFDIV_SHIFT) &
AR933X_PLL_CPU_CONFIG_REFDIV_MASK;
- freq = ref_rate / t;
+ ref_div = t;
- t = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
+ ninit_mul = (cpu_config >> AR933X_PLL_CPU_CONFIG_NINT_SHIFT) &
AR933X_PLL_CPU_CONFIG_NINT_MASK;
- freq *= t;
t = (cpu_config >> AR933X_PLL_CPU_CONFIG_OUTDIV_SHIFT) &
AR933X_PLL_CPU_CONFIG_OUTDIV_MASK;
if (t == 0)
t = 1;
- freq >>= t;
+ out_div = (1 << t);
- t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
+ cpu_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_CPU_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_CPU_DIV_MASK) + 1;
- cpu_rate = freq / t;
- t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
+ ddr_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_DDR_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_DDR_DIV_MASK) + 1;
- ddr_rate = freq / t;
- t = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
+ ahb_div = ((clock_ctrl >> AR933X_PLL_CLOCK_CTRL_AHB_DIV_SHIFT) &
AR933X_PLL_CLOCK_CTRL_AHB_DIV_MASK) + 1;
- ahb_rate = freq / t;
}
- ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ clks[ATH79_CLK_CPU] = ath79_reg_ffclk("cpu", "ref",
+ ninit_mul, ref_div * out_div * cpu_div);
+ clks[ATH79_CLK_DDR] = ath79_reg_ffclk("ddr", "ref",
+ ninit_mul, ref_div * out_div * ddr_div);
+ clks[ATH79_CLK_AHB] = ath79_reg_ffclk("ahb", "ref",
+ ninit_mul, ref_div * out_div * ahb_div);
+}
+
+static void __init ar933x_clocks_init(void)
+{
+ struct clk *ref_clk;
+ unsigned long ref_rate;
+ u32 t;
+
+ t = ath79_reset_rr(AR933X_RESET_REG_BOOTSTRAP);
+ if (t & AR933X_BOOTSTRAP_REF_CLK_40)
+ ref_rate = (40 * 1000 * 1000);
+ else
+ ref_rate = (25 * 1000 * 1000);
+
+ ref_clk = ath79_add_sys_clkdev("ref", ref_rate);
+
+ ar9330_clk_init(ref_clk, ath79_pll_base);
+
+ /* just make happy plat_time_init() from arch/mips/ath79/setup.c */
+ clk_register_clkdev(clks[ATH79_CLK_CPU], "cpu", NULL);
+ clk_register_clkdev(clks[ATH79_CLK_DDR], "ddr", NULL);
+ clk_register_clkdev(clks[ATH79_CLK_AHB], "ahb", NULL);
clk_add_alias("wdt", NULL, "ahb", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
@@ -344,9 +346,9 @@ static void __init ar934x_clocks_init(void)
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+ clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+ clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
@@ -431,9 +433,9 @@ static void __init qca955x_clocks_init(void)
ahb_rate = cpu_pll / (postdiv + 1);
ath79_add_sys_clkdev("ref", ref_rate);
- clks[0] = ath79_add_sys_clkdev("cpu", cpu_rate);
- clks[1] = ath79_add_sys_clkdev("ddr", ddr_rate);
- clks[2] = ath79_add_sys_clkdev("ahb", ahb_rate);
+ clks[ATH79_CLK_CPU] = ath79_add_sys_clkdev("cpu", cpu_rate);
+ clks[ATH79_CLK_DDR] = ath79_add_sys_clkdev("ddr", ddr_rate);
+ clks[ATH79_CLK_AHB] = ath79_add_sys_clkdev("ahb", ahb_rate);
clk_add_alias("wdt", NULL, "ref", NULL);
clk_add_alias("uart", NULL, "ref", NULL);
@@ -443,10 +445,8 @@ void __init ath79_clocks_init(void)
{
if (soc_is_ar71xx())
ar71xx_clocks_init();
- else if (soc_is_ar724x())
+ else if (soc_is_ar724x() || soc_is_ar913x())
ar724x_clocks_init();
- else if (soc_is_ar913x())
- ar913x_clocks_init();
else if (soc_is_ar933x())
ar933x_clocks_init();
else if (soc_is_ar934x())
@@ -455,8 +455,6 @@ void __init ath79_clocks_init(void)
qca955x_clocks_init();
else
BUG();
-
- of_clk_init(NULL);
}
unsigned long __init
@@ -483,8 +481,49 @@ static void __init ath79_clocks_init_dt(struct device_node *np)
CLK_OF_DECLARE(ar7100, "qca,ar7100-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar7240, "qca,ar7240-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9130, "qca,ar9130-pll", ath79_clocks_init_dt);
-CLK_OF_DECLARE(ar9330, "qca,ar9330-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9340, "qca,ar9340-pll", ath79_clocks_init_dt);
CLK_OF_DECLARE(ar9550, "qca,qca9550-pll", ath79_clocks_init_dt);
+
+static void __init ath79_clocks_init_dt_ng(struct device_node *np)
+{
+ struct clk *ref_clk;
+ void __iomem *pll_base;
+ const char *dnfn = of_node_full_name(np);
+
+ ref_clk = of_clk_get(np, 0);
+ if (IS_ERR(ref_clk)) {
+ pr_err("%s: of_clk_get failed\n", dnfn);
+ goto err;
+ }
+
+ pll_base = of_iomap(np, 0);
+ if (!pll_base) {
+ pr_err("%s: can't map pll registers\n", dnfn);
+ goto err_clk;
+ }
+
+ if (of_device_is_compatible(np, "qca,ar9130-pll"))
+ ar724x_clk_init(ref_clk, pll_base);
+ else if (of_device_is_compatible(np, "qca,ar9330-pll"))
+ ar9330_clk_init(ref_clk, pll_base);
+ else {
+ pr_err("%s: could not find any appropriate clk_init()\n", dnfn);
+ goto err_clk;
+ }
+
+ if (of_clk_add_provider(np, of_clk_src_onecell_get, &clk_data)) {
+ pr_err("%s: could not register clk provider\n", dnfn);
+ goto err_clk;
+ }
+
+ return;
+
+err_clk:
+ clk_put(ref_clk);
+
+err:
+ return;
+}
+CLK_OF_DECLARE(ar9130_clk, "qca,ar9130-pll", ath79_clocks_init_dt_ng);
+CLK_OF_DECLARE(ar9330_clk, "qca,ar9330-pll", ath79_clocks_init_dt_ng);
#endif
diff --git a/arch/mips/ath79/common.c b/arch/mips/ath79/common.c
index 3cedd1f95e0f..d071a3a0f876 100644
--- a/arch/mips/ath79/common.c
+++ b/arch/mips/ath79/common.c
@@ -46,12 +46,12 @@ void ath79_ddr_ctrl_init(void)
{
ath79_ddr_base = ioremap_nocache(AR71XX_DDR_CTRL_BASE,
AR71XX_DDR_CTRL_SIZE);
- if (soc_is_ar71xx() || soc_is_ar934x()) {
- ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
- ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
- } else {
+ if (soc_is_ar913x() || soc_is_ar724x() || soc_is_ar933x()) {
ath79_ddr_wb_flush_base = ath79_ddr_base + 0x7c;
ath79_ddr_pci_win_base = 0;
+ } else {
+ ath79_ddr_wb_flush_base = ath79_ddr_base + 0x9c;
+ ath79_ddr_pci_win_base = ath79_ddr_base + 0x7c;
}
}
EXPORT_SYMBOL_GPL(ath79_ddr_ctrl_init);
@@ -76,14 +76,14 @@ void ath79_ddr_set_pci_windows(void)
{
BUG_ON(!ath79_ddr_pci_win_base);
- __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0);
- __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 1);
- __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 2);
- __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 3);
- __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 4);
- __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 5);
- __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 6);
- __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 7);
+ __raw_writel(AR71XX_PCI_WIN0_OFFS, ath79_ddr_pci_win_base + 0x0);
+ __raw_writel(AR71XX_PCI_WIN1_OFFS, ath79_ddr_pci_win_base + 0x4);
+ __raw_writel(AR71XX_PCI_WIN2_OFFS, ath79_ddr_pci_win_base + 0x8);
+ __raw_writel(AR71XX_PCI_WIN3_OFFS, ath79_ddr_pci_win_base + 0xc);
+ __raw_writel(AR71XX_PCI_WIN4_OFFS, ath79_ddr_pci_win_base + 0x10);
+ __raw_writel(AR71XX_PCI_WIN5_OFFS, ath79_ddr_pci_win_base + 0x14);
+ __raw_writel(AR71XX_PCI_WIN6_OFFS, ath79_ddr_pci_win_base + 0x18);
+ __raw_writel(AR71XX_PCI_WIN7_OFFS, ath79_ddr_pci_win_base + 0x1c);
}
EXPORT_SYMBOL_GPL(ath79_ddr_set_pci_windows);
diff --git a/arch/mips/ath79/early_printk.c b/arch/mips/ath79/early_printk.c
index b955fafc58ba..d1adc59af5bf 100644
--- a/arch/mips/ath79/early_printk.c
+++ b/arch/mips/ath79/early_printk.c
@@ -31,13 +31,15 @@ static inline void prom_putchar_wait(void __iomem *reg, u32 mask, u32 val)
} while (1);
}
+#define BOTH_EMPTY (UART_LSR_TEMT | UART_LSR_THRE)
+
static void prom_putchar_ar71xx(unsigned char ch)
{
void __iomem *base = (void __iomem *)(KSEG1ADDR(AR71XX_UART_BASE));
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
__raw_writel(ch, base + UART_TX * 4);
- prom_putchar_wait(base + UART_LSR * 4, UART_LSR_THRE, UART_LSR_THRE);
+ prom_putchar_wait(base + UART_LSR * 4, BOTH_EMPTY, BOTH_EMPTY);
}
static void prom_putchar_ar933x(unsigned char ch)
diff --git a/arch/mips/ath79/setup.c b/arch/mips/ath79/setup.c
index be451ee4a5ea..f206dafbb0a3 100644
--- a/arch/mips/ath79/setup.c
+++ b/arch/mips/ath79/setup.c
@@ -17,7 +17,7 @@
#include <linux/bootmem.h>
#include <linux/err.h>
#include <linux/clk.h>
-#include <linux/of_platform.h>
+#include <linux/clk-provider.h>
#include <linux/of_fdt.h>
#include <asm/bootinfo.h>
@@ -203,26 +203,57 @@ void __init plat_mem_setup(void)
fdt_start = fw_getenvl("fdt_start");
if (fdt_start)
__dt_setup_arch((void *)KSEG0ADDR(fdt_start));
-#ifdef CONFIG_BUILTIN_DTB
- else
- __dt_setup_arch(__dtb_start);
-#endif
+ else if (fw_passed_dtb)
+ __dt_setup_arch((void *)KSEG0ADDR(fw_passed_dtb));
- ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
- AR71XX_RESET_SIZE);
- ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
- AR71XX_PLL_SIZE);
- ath79_detect_sys_type();
- ath79_ddr_ctrl_init();
+ if (mips_machtype != ATH79_MACH_GENERIC_OF) {
+ ath79_reset_base = ioremap_nocache(AR71XX_RESET_BASE,
+ AR71XX_RESET_SIZE);
+ ath79_pll_base = ioremap_nocache(AR71XX_PLL_BASE,
+ AR71XX_PLL_SIZE);
+ ath79_detect_sys_type();
+ ath79_ddr_ctrl_init();
- if (mips_machtype != ATH79_MACH_GENERIC_OF)
detect_memory_region(0, ATH79_MEM_SIZE_MIN, ATH79_MEM_SIZE_MAX);
- _machine_restart = ath79_restart;
+ /* OF machines should use the reset driver */
+ _machine_restart = ath79_restart;
+ }
+
_machine_halt = ath79_halt;
pm_power_off = ath79_halt;
}
+static void __init ath79_of_plat_time_init(void)
+{
+ struct device_node *np;
+ struct clk *clk;
+ unsigned long cpu_clk_rate;
+
+ of_clk_init(NULL);
+
+ np = of_get_cpu_node(0, NULL);
+ if (!np) {
+ pr_err("Failed to get CPU node\n");
+ return;
+ }
+
+ clk = of_clk_get(np, 0);
+ if (IS_ERR(clk)) {
+ pr_err("Failed to get CPU clock: %ld\n", PTR_ERR(clk));
+ return;
+ }
+
+ cpu_clk_rate = clk_get_rate(clk);
+
+ pr_info("CPU clock: %lu.%03lu MHz\n",
+ cpu_clk_rate / 1000000, (cpu_clk_rate / 1000) % 1000);
+
+ mips_hpt_frequency = cpu_clk_rate / 2;
+
+ clk_put(clk);
+}
+
void __init plat_time_init(void)
{
unsigned long cpu_clk_rate;
@@ -230,6 +261,11 @@ void __init plat_time_init(void)
unsigned long ddr_clk_rate;
unsigned long ref_clk_rate;
+ if (IS_ENABLED(CONFIG_OF) && mips_machtype == ATH79_MACH_GENERIC_OF) {
+ ath79_of_plat_time_init();
+ return;
+ }
+
ath79_clocks_init();
cpu_clk_rate = ath79_get_sys_clk_rate("cpu");
@@ -248,7 +284,6 @@ void __init plat_time_init(void)
static int __init ath79_setup(void)
{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
if (mips_machtype == ATH79_MACH_GENERIC_OF)
return 0;
diff --git a/arch/mips/bcm47xx/Makefile b/arch/mips/bcm47xx/Makefile
index 66bea4ecf449..6d8615074075 100644
--- a/arch/mips/bcm47xx/Makefile
+++ b/arch/mips/bcm47xx/Makefile
@@ -3,5 +3,5 @@
# under Linux.
#
-obj-y += irq.o prom.o serial.o setup.o time.o sprom.o
+obj-y += irq.o prom.o serial.o setup.o time.o
obj-y += board.o buttons.o leds.o workarounds.o
diff --git a/arch/mips/bcm47xx/bcm47xx_private.h b/arch/mips/bcm47xx/bcm47xx_private.h
index 41796befa9df..0367ac7286fe 100644
--- a/arch/mips/bcm47xx/bcm47xx_private.h
+++ b/arch/mips/bcm47xx/bcm47xx_private.h
@@ -10,9 +10,6 @@
/* prom.c */
void __init bcm47xx_prom_highmem_init(void);
-/* sprom.c */
-void bcm47xx_sprom_register_fallbacks(void);
-
/* buttons.c */
int __init bcm47xx_buttons_register(void);
diff --git a/arch/mips/bcm47xx/setup.c b/arch/mips/bcm47xx/setup.c
index c807e32d6d81..6054d49e608e 100644
--- a/arch/mips/bcm47xx/setup.c
+++ b/arch/mips/bcm47xx/setup.c
@@ -28,6 +28,7 @@
#include "bcm47xx_private.h"
+#include <linux/bcm47xx_sprom.h>
#include <linux/export.h>
#include <linux/types.h>
#include <linux/ethtool.h>
@@ -151,7 +152,6 @@ void __init plat_mem_setup(void)
pr_info("Using bcma bus\n");
#ifdef CONFIG_BCM47XX_BCMA
bcm47xx_bus_type = BCM47XX_BUS_TYPE_BCMA;
- bcm47xx_sprom_register_fallbacks();
bcm47xx_register_bcma();
bcm47xx_set_system_type(bcm47xx_bus.bcma.bus.chipinfo.id);
#ifdef CONFIG_HIGHMEM
diff --git a/arch/mips/bcm47xx/sprom.c b/arch/mips/bcm47xx/sprom.c
deleted file mode 100644
index 959c145a0a2c..000000000000
--- a/arch/mips/bcm47xx/sprom.c
+++ /dev/null
@@ -1,724 +0,0 @@
-/*
- * Copyright (C) 2004 Florian Schirmer <jolt@tuxbox.org>
- * Copyright (C) 2006 Felix Fietkau <nbd@openwrt.org>
- * Copyright (C) 2006 Michael Buesch <m@bues.ch>
- * Copyright (C) 2010 Waldemar Brodkorb <wbx@openadk.org>
- * Copyright (C) 2010-2012 Hauke Mehrtens <hauke@hauke-m.de>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- *
- * THIS SOFTWARE IS PROVIDED ``AS IS'' AND ANY EXPRESS OR IMPLIED
- * WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
- * MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
- * NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
- * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
- * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF
- * USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
- * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
- * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
- * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
- *
- * You should have received a copy of the GNU General Public License along
- * with this program; if not, write to the Free Software Foundation, Inc.,
- * 675 Mass Ave, Cambridge, MA 02139, USA.
- */
-
-#include <bcm47xx.h>
-#include <linux/if_ether.h>
-#include <linux/etherdevice.h>
-
-static void create_key(const char *prefix, const char *postfix,
- const char *name, char *buf, int len)
-{
- if (prefix && postfix)
- snprintf(buf, len, "%s%s%s", prefix, name, postfix);
- else if (prefix)
- snprintf(buf, len, "%s%s", prefix, name);
- else if (postfix)
- snprintf(buf, len, "%s%s", name, postfix);
- else
- snprintf(buf, len, "%s", name);
-}
-
-static int get_nvram_var(const char *prefix, const char *postfix,
- const char *name, char *buf, int len, bool fallback)
-{
- char key[40];
- int err;
-
- create_key(prefix, postfix, name, key, sizeof(key));
-
- err = bcm47xx_nvram_getenv(key, buf, len);
- if (fallback && err == -ENOENT && prefix) {
- create_key(NULL, postfix, name, key, sizeof(key));
- err = bcm47xx_nvram_getenv(key, buf, len);
- }
- return err;
-}
-
-#define NVRAM_READ_VAL(type) \
-static void nvram_read_ ## type(const char *prefix, \
- const char *postfix, const char *name, \
- type *val, type allset, bool fallback) \
-{ \
- char buf[100]; \
- int err; \
- type var; \
- \
- err = get_nvram_var(prefix, postfix, name, buf, sizeof(buf), \
- fallback); \
- if (err < 0) \
- return; \
- err = kstrto ## type(strim(buf), 0, &var); \
- if (err) { \
- pr_warn("can not parse nvram name %s%s%s with value %s got %i\n", \
- prefix, name, postfix, buf, err); \
- return; \
- } \
- if (allset && var == allset) \
- return; \
- *val = var; \
-}
-
-NVRAM_READ_VAL(u8)
-NVRAM_READ_VAL(s8)
-NVRAM_READ_VAL(u16)
-NVRAM_READ_VAL(u32)
-
-#undef NVRAM_READ_VAL
-
-static void nvram_read_u32_2(const char *prefix, const char *name,
- u16 *val_lo, u16 *val_hi, bool fallback)
-{
- char buf[100];
- int err;
- u32 val;
-
- err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
- if (err < 0)
- return;
- err = kstrtou32(strim(buf), 0, &val);
- if (err) {
- pr_warn("can not parse nvram name %s%s with value %s got %i\n",
- prefix, name, buf, err);
- return;
- }
- *val_lo = (val & 0x0000FFFFU);
- *val_hi = (val & 0xFFFF0000U) >> 16;
-}
-
-static void nvram_read_leddc(const char *prefix, const char *name,
- u8 *leddc_on_time, u8 *leddc_off_time,
- bool fallback)
-{
- char buf[100];
- int err;
- u32 val;
-
- err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
- if (err < 0)
- return;
- err = kstrtou32(strim(buf), 0, &val);
- if (err) {
- pr_warn("can not parse nvram name %s%s with value %s got %i\n",
- prefix, name, buf, err);
- return;
- }
-
- if (val == 0xffff || val == 0xffffffff)
- return;
-
- *leddc_on_time = val & 0xff;
- *leddc_off_time = (val >> 16) & 0xff;
-}
-
-static void bcm47xx_nvram_parse_macaddr(char *buf, u8 macaddr[6])
-{
- if (strchr(buf, ':'))
- sscanf(buf, "%hhx:%hhx:%hhx:%hhx:%hhx:%hhx", &macaddr[0],
- &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
- &macaddr[5]);
- else if (strchr(buf, '-'))
- sscanf(buf, "%hhx-%hhx-%hhx-%hhx-%hhx-%hhx", &macaddr[0],
- &macaddr[1], &macaddr[2], &macaddr[3], &macaddr[4],
- &macaddr[5]);
- else
- pr_warn("Can not parse mac address: %s\n", buf);
-}
-
-static void nvram_read_macaddr(const char *prefix, const char *name,
- u8 val[6], bool fallback)
-{
- char buf[100];
- int err;
-
- err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
- if (err < 0)
- return;
-
- bcm47xx_nvram_parse_macaddr(buf, val);
-}
-
-static void nvram_read_alpha2(const char *prefix, const char *name,
- char val[2], bool fallback)
-{
- char buf[10];
- int err;
-
- err = get_nvram_var(prefix, NULL, name, buf, sizeof(buf), fallback);
- if (err < 0)
- return;
- if (buf[0] == '0')
- return;
- if (strlen(buf) > 2) {
- pr_warn("alpha2 is too long %s\n", buf);
- return;
- }
- memcpy(val, buf, 2);
-}
-
-/* This is one-function-only macro, it uses local "sprom" variable! */
-#define ENTRY(_revmask, _type, _prefix, _name, _val, _allset, _fallback) \
- if (_revmask & BIT(sprom->revision)) \
- nvram_read_ ## _type(_prefix, NULL, _name, &sprom->_val, \
- _allset, _fallback)
-/*
- * Special version of filling function that can be safely called for any SPROM
- * revision. For every NVRAM to SPROM mapping it contains bitmask of revisions
- * for which the mapping is valid.
- * It obviously requires some hexadecimal/bitmasks knowledge, but allows
- * writing cleaner code (easy revisions handling).
- * Note that while SPROM revision 0 was never used, we still keep BIT(0)
- * reserved for it, just to keep numbering sane.
- */
-static void bcm47xx_sprom_fill_auto(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- const char *pre = prefix;
- bool fb = fallback;
-
- /* Broadcom extracts it for rev 8+ but it was found on 2 and 4 too */
- ENTRY(0xfffffffe, u16, pre, "devid", dev_id, 0, fallback);
-
- ENTRY(0xfffffffe, u16, pre, "boardrev", board_rev, 0, true);
- ENTRY(0xfffffffe, u32, pre, "boardflags", boardflags, 0, fb);
- ENTRY(0xfffffff0, u32, pre, "boardflags2", boardflags2, 0, fb);
- ENTRY(0xfffff800, u32, pre, "boardflags3", boardflags3, 0, fb);
- ENTRY(0x00000002, u16, pre, "boardflags", boardflags_lo, 0, fb);
- ENTRY(0xfffffffc, u16, pre, "boardtype", board_type, 0, true);
- ENTRY(0xfffffffe, u16, pre, "boardnum", board_num, 0, fb);
- ENTRY(0x00000002, u8, pre, "cc", country_code, 0, fb);
- ENTRY(0xfffffff8, u8, pre, "regrev", regrev, 0, fb);
-
- ENTRY(0xfffffffe, u8, pre, "ledbh0", gpio0, 0xff, fb);
- ENTRY(0xfffffffe, u8, pre, "ledbh1", gpio1, 0xff, fb);
- ENTRY(0xfffffffe, u8, pre, "ledbh2", gpio2, 0xff, fb);
- ENTRY(0xfffffffe, u8, pre, "ledbh3", gpio3, 0xff, fb);
-
- ENTRY(0x0000070e, u16, pre, "pa0b0", pa0b0, 0, fb);
- ENTRY(0x0000070e, u16, pre, "pa0b1", pa0b1, 0, fb);
- ENTRY(0x0000070e, u16, pre, "pa0b2", pa0b2, 0, fb);
- ENTRY(0x0000070e, u8, pre, "pa0itssit", itssi_bg, 0, fb);
- ENTRY(0x0000070e, u8, pre, "pa0maxpwr", maxpwr_bg, 0, fb);
-
- ENTRY(0x0000070c, u8, pre, "opo", opo, 0, fb);
- ENTRY(0xfffffffe, u8, pre, "aa2g", ant_available_bg, 0, fb);
- ENTRY(0xfffffffe, u8, pre, "aa5g", ant_available_a, 0, fb);
- ENTRY(0x000007fe, s8, pre, "ag0", antenna_gain.a0, 0, fb);
- ENTRY(0x000007fe, s8, pre, "ag1", antenna_gain.a1, 0, fb);
- ENTRY(0x000007f0, s8, pre, "ag2", antenna_gain.a2, 0, fb);
- ENTRY(0x000007f0, s8, pre, "ag3", antenna_gain.a3, 0, fb);
-
- ENTRY(0x0000070e, u16, pre, "pa1b0", pa1b0, 0, fb);
- ENTRY(0x0000070e, u16, pre, "pa1b1", pa1b1, 0, fb);
- ENTRY(0x0000070e, u16, pre, "pa1b2", pa1b2, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1lob0", pa1lob0, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1lob1", pa1lob1, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1lob2", pa1lob2, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1hib0", pa1hib0, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1hib1", pa1hib1, 0, fb);
- ENTRY(0x0000070c, u16, pre, "pa1hib2", pa1hib2, 0, fb);
- ENTRY(0x0000070e, u8, pre, "pa1itssit", itssi_a, 0, fb);
- ENTRY(0x0000070e, u8, pre, "pa1maxpwr", maxpwr_a, 0, fb);
- ENTRY(0x0000070c, u8, pre, "pa1lomaxpwr", maxpwr_al, 0, fb);
- ENTRY(0x0000070c, u8, pre, "pa1himaxpwr", maxpwr_ah, 0, fb);
-
- ENTRY(0x00000708, u8, pre, "bxa2g", bxa2g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssisav2g", rssisav2g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssismc2g", rssismc2g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssismf2g", rssismf2g, 0, fb);
- ENTRY(0x00000708, u8, pre, "bxa5g", bxa5g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssisav5g", rssisav5g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssismc5g", rssismc5g, 0, fb);
- ENTRY(0x00000708, u8, pre, "rssismf5g", rssismf5g, 0, fb);
- ENTRY(0x00000708, u8, pre, "tri2g", tri2g, 0, fb);
- ENTRY(0x00000708, u8, pre, "tri5g", tri5g, 0, fb);
- ENTRY(0x00000708, u8, pre, "tri5gl", tri5gl, 0, fb);
- ENTRY(0x00000708, u8, pre, "tri5gh", tri5gh, 0, fb);
- ENTRY(0x00000708, s8, pre, "rxpo2g", rxpo2g, 0, fb);
- ENTRY(0x00000708, s8, pre, "rxpo5g", rxpo5g, 0, fb);
- ENTRY(0xfffffff0, u8, pre, "txchain", txchain, 0xf, fb);
- ENTRY(0xfffffff0, u8, pre, "rxchain", rxchain, 0xf, fb);
- ENTRY(0xfffffff0, u8, pre, "antswitch", antswitch, 0xff, fb);
- ENTRY(0x00000700, u8, pre, "tssipos2g", fem.ghz2.tssipos, 0, fb);
- ENTRY(0x00000700, u8, pre, "extpagain2g", fem.ghz2.extpa_gain, 0, fb);
- ENTRY(0x00000700, u8, pre, "pdetrange2g", fem.ghz2.pdet_range, 0, fb);
- ENTRY(0x00000700, u8, pre, "triso2g", fem.ghz2.tr_iso, 0, fb);
- ENTRY(0x00000700, u8, pre, "antswctl2g", fem.ghz2.antswlut, 0, fb);
- ENTRY(0x00000700, u8, pre, "tssipos5g", fem.ghz5.tssipos, 0, fb);
- ENTRY(0x00000700, u8, pre, "extpagain5g", fem.ghz5.extpa_gain, 0, fb);
- ENTRY(0x00000700, u8, pre, "pdetrange5g", fem.ghz5.pdet_range, 0, fb);
- ENTRY(0x00000700, u8, pre, "triso5g", fem.ghz5.tr_iso, 0, fb);
- ENTRY(0x00000700, u8, pre, "antswctl5g", fem.ghz5.antswlut, 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid2ga0", txpid2g[0], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid2ga1", txpid2g[1], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid2ga2", txpid2g[2], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid2ga3", txpid2g[3], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5ga0", txpid5g[0], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5ga1", txpid5g[1], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5ga2", txpid5g[2], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5ga3", txpid5g[3], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gla0", txpid5gl[0], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gla1", txpid5gl[1], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gla2", txpid5gl[2], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gla3", txpid5gl[3], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gha0", txpid5gh[0], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gha1", txpid5gh[1], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gha2", txpid5gh[2], 0, fb);
- ENTRY(0x000000f0, u8, pre, "txpid5gha3", txpid5gh[3], 0, fb);
-
- ENTRY(0xffffff00, u8, pre, "tempthresh", tempthresh, 0, fb);
- ENTRY(0xffffff00, u8, pre, "tempoffset", tempoffset, 0, fb);
- ENTRY(0xffffff00, u16, pre, "rawtempsense", rawtempsense, 0, fb);
- ENTRY(0xffffff00, u8, pre, "measpower", measpower, 0, fb);
- ENTRY(0xffffff00, u8, pre, "tempsense_slope", tempsense_slope, 0, fb);
- ENTRY(0xffffff00, u8, pre, "tempcorrx", tempcorrx, 0, fb);
- ENTRY(0xffffff00, u8, pre, "tempsense_option", tempsense_option, 0, fb);
- ENTRY(0x00000700, u8, pre, "freqoffset_corr", freqoffset_corr, 0, fb);
- ENTRY(0x00000700, u8, pre, "iqcal_swp_dis", iqcal_swp_dis, 0, fb);
- ENTRY(0x00000700, u8, pre, "hw_iqcal_en", hw_iqcal_en, 0, fb);
- ENTRY(0x00000700, u8, pre, "elna2g", elna2g, 0, fb);
- ENTRY(0x00000700, u8, pre, "elna5g", elna5g, 0, fb);
- ENTRY(0xffffff00, u8, pre, "phycal_tempdelta", phycal_tempdelta, 0, fb);
- ENTRY(0xffffff00, u8, pre, "temps_period", temps_period, 0, fb);
- ENTRY(0xffffff00, u8, pre, "temps_hysteresis", temps_hysteresis, 0, fb);
- ENTRY(0xffffff00, u8, pre, "measpower1", measpower1, 0, fb);
- ENTRY(0xffffff00, u8, pre, "measpower2", measpower2, 0, fb);
-
- ENTRY(0x000001f0, u16, pre, "cck2gpo", cck2gpo, 0, fb);
- ENTRY(0x000001f0, u32, pre, "ofdm2gpo", ofdm2gpo, 0, fb);
- ENTRY(0x000001f0, u32, pre, "ofdm5gpo", ofdm5gpo, 0, fb);
- ENTRY(0x000001f0, u32, pre, "ofdm5glpo", ofdm5glpo, 0, fb);
- ENTRY(0x000001f0, u32, pre, "ofdm5ghpo", ofdm5ghpo, 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo0", mcs2gpo[0], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo1", mcs2gpo[1], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo2", mcs2gpo[2], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo3", mcs2gpo[3], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo4", mcs2gpo[4], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo5", mcs2gpo[5], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo6", mcs2gpo[6], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs2gpo7", mcs2gpo[7], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo0", mcs5gpo[0], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo1", mcs5gpo[1], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo2", mcs5gpo[2], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo3", mcs5gpo[3], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo4", mcs5gpo[4], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo5", mcs5gpo[5], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo6", mcs5gpo[6], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5gpo7", mcs5gpo[7], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo0", mcs5glpo[0], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo1", mcs5glpo[1], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo2", mcs5glpo[2], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo3", mcs5glpo[3], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo4", mcs5glpo[4], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo5", mcs5glpo[5], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo6", mcs5glpo[6], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5glpo7", mcs5glpo[7], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo0", mcs5ghpo[0], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo1", mcs5ghpo[1], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo2", mcs5ghpo[2], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo3", mcs5ghpo[3], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo4", mcs5ghpo[4], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo5", mcs5ghpo[5], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo6", mcs5ghpo[6], 0, fb);
- ENTRY(0x000001f0, u16, pre, "mcs5ghpo7", mcs5ghpo[7], 0, fb);
- ENTRY(0x000001f0, u16, pre, "cddpo", cddpo, 0, fb);
- ENTRY(0x000001f0, u16, pre, "stbcpo", stbcpo, 0, fb);
- ENTRY(0x000001f0, u16, pre, "bw40po", bw40po, 0, fb);
- ENTRY(0x000001f0, u16, pre, "bwduppo", bwduppo, 0, fb);
-
- ENTRY(0xfffffe00, u16, pre, "cckbw202gpo", cckbw202gpo, 0, fb);
- ENTRY(0xfffffe00, u16, pre, "cckbw20ul2gpo", cckbw20ul2gpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw202gpo", legofdmbw202gpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw20ul2gpo", legofdmbw20ul2gpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw205glpo", legofdmbw205glpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw20ul5glpo", legofdmbw20ul5glpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw205gmpo", legofdmbw205gmpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw20ul5gmpo", legofdmbw20ul5gmpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw205ghpo", legofdmbw205ghpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "legofdmbw20ul5ghpo", legofdmbw20ul5ghpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw202gpo", mcsbw202gpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "mcsbw20ul2gpo", mcsbw20ul2gpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw402gpo", mcsbw402gpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw205glpo", mcsbw205glpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "mcsbw20ul5glpo", mcsbw20ul5glpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw405glpo", mcsbw405glpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw205gmpo", mcsbw205gmpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "mcsbw20ul5gmpo", mcsbw20ul5gmpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw405gmpo", mcsbw405gmpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw205ghpo", mcsbw205ghpo, 0, fb);
- ENTRY(0x00000600, u32, pre, "mcsbw20ul5ghpo", mcsbw20ul5ghpo, 0, fb);
- ENTRY(0xfffffe00, u32, pre, "mcsbw405ghpo", mcsbw405ghpo, 0, fb);
- ENTRY(0x00000600, u16, pre, "mcs32po", mcs32po, 0, fb);
- ENTRY(0x00000600, u16, pre, "legofdm40duppo", legofdm40duppo, 0, fb);
- ENTRY(0x00000700, u8, pre, "pcieingress_war", pcieingress_war, 0, fb);
-
- /* TODO: rev 11 support */
- ENTRY(0x00000700, u8, pre, "rxgainerr2ga0", rxgainerr2ga[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr2ga1", rxgainerr2ga[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr2ga2", rxgainerr2ga[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gla0", rxgainerr5gla[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gla1", rxgainerr5gla[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gla2", rxgainerr5gla[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gma0", rxgainerr5gma[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gma1", rxgainerr5gma[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gma2", rxgainerr5gma[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gha0", rxgainerr5gha[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gha1", rxgainerr5gha[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gha2", rxgainerr5gha[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gua0", rxgainerr5gua[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gua1", rxgainerr5gua[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "rxgainerr5gua2", rxgainerr5gua[2], 0, fb);
-
- ENTRY(0xfffffe00, u8, pre, "sar2g", sar2g, 0, fb);
- ENTRY(0xfffffe00, u8, pre, "sar5g", sar5g, 0, fb);
-
- /* TODO: rev 11 support */
- ENTRY(0x00000700, u8, pre, "noiselvl2ga0", noiselvl2ga[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl2ga1", noiselvl2ga[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl2ga2", noiselvl2ga[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gla0", noiselvl5gla[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gla1", noiselvl5gla[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gla2", noiselvl5gla[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gma0", noiselvl5gma[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gma1", noiselvl5gma[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gma2", noiselvl5gma[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gha0", noiselvl5gha[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gha1", noiselvl5gha[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gha2", noiselvl5gha[2], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gua0", noiselvl5gua[0], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gua1", noiselvl5gua[1], 0, fb);
- ENTRY(0x00000700, u8, pre, "noiselvl5gua2", noiselvl5gua[2], 0, fb);
-}
-#undef ENTRY /* It's specififc, uses local variable, don't use it (again). */
-
-static void bcm47xx_fill_sprom_path_r4589(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- char postfix[2];
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
- struct ssb_sprom_core_pwr_info *pwr_info;
-
- pwr_info = &sprom->core_pwr_info[i];
-
- snprintf(postfix, sizeof(postfix), "%i", i);
- nvram_read_u8(prefix, postfix, "maxp2ga",
- &pwr_info->maxpwr_2g, 0, fallback);
- nvram_read_u8(prefix, postfix, "itt2ga",
- &pwr_info->itssi_2g, 0, fallback);
- nvram_read_u8(prefix, postfix, "itt5ga",
- &pwr_info->itssi_5g, 0, fallback);
- nvram_read_u16(prefix, postfix, "pa2gw0a",
- &pwr_info->pa_2g[0], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa2gw1a",
- &pwr_info->pa_2g[1], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa2gw2a",
- &pwr_info->pa_2g[2], 0, fallback);
- nvram_read_u8(prefix, postfix, "maxp5ga",
- &pwr_info->maxpwr_5g, 0, fallback);
- nvram_read_u8(prefix, postfix, "maxp5gha",
- &pwr_info->maxpwr_5gh, 0, fallback);
- nvram_read_u8(prefix, postfix, "maxp5gla",
- &pwr_info->maxpwr_5gl, 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5gw0a",
- &pwr_info->pa_5g[0], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5gw1a",
- &pwr_info->pa_5g[1], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5gw2a",
- &pwr_info->pa_5g[2], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5glw0a",
- &pwr_info->pa_5gl[0], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5glw1a",
- &pwr_info->pa_5gl[1], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5glw2a",
- &pwr_info->pa_5gl[2], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5ghw0a",
- &pwr_info->pa_5gh[0], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5ghw1a",
- &pwr_info->pa_5gh[1], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5ghw2a",
- &pwr_info->pa_5gh[2], 0, fallback);
- }
-}
-
-static void bcm47xx_fill_sprom_path_r45(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- char postfix[2];
- int i;
-
- for (i = 0; i < ARRAY_SIZE(sprom->core_pwr_info); i++) {
- struct ssb_sprom_core_pwr_info *pwr_info;
-
- pwr_info = &sprom->core_pwr_info[i];
-
- snprintf(postfix, sizeof(postfix), "%i", i);
- nvram_read_u16(prefix, postfix, "pa2gw3a",
- &pwr_info->pa_2g[3], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5gw3a",
- &pwr_info->pa_5g[3], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5glw3a",
- &pwr_info->pa_5gl[3], 0, fallback);
- nvram_read_u16(prefix, postfix, "pa5ghw3a",
- &pwr_info->pa_5gh[3], 0, fallback);
- }
-}
-
-static bool bcm47xx_is_valid_mac(u8 *mac)
-{
- return mac && !(mac[0] == 0x00 && mac[1] == 0x90 && mac[2] == 0x4c);
-}
-
-static int bcm47xx_increase_mac_addr(u8 *mac, u8 num)
-{
- u8 *oui = mac + ETH_ALEN/2 - 1;
- u8 *p = mac + ETH_ALEN - 1;
-
- do {
- (*p) += num;
- if (*p > num)
- break;
- p--;
- num = 1;
- } while (p != oui);
-
- if (p == oui) {
- pr_err("unable to fetch mac address\n");
- return -ENOENT;
- }
- return 0;
-}
-
-static int mac_addr_used = 2;
-
-static void bcm47xx_fill_sprom_ethernet(struct ssb_sprom *sprom,
- const char *prefix, bool fallback)
-{
- bool fb = fallback;
-
- nvram_read_macaddr(prefix, "et0macaddr", sprom->et0mac, fallback);
- nvram_read_u8(prefix, NULL, "et0mdcport", &sprom->et0mdcport, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "et0phyaddr", &sprom->et0phyaddr, 0,
- fallback);
-
- nvram_read_macaddr(prefix, "et1macaddr", sprom->et1mac, fallback);
- nvram_read_u8(prefix, NULL, "et1mdcport", &sprom->et1mdcport, 0,
- fallback);
- nvram_read_u8(prefix, NULL, "et1phyaddr", &sprom->et1phyaddr, 0,
- fallback);
-
- nvram_read_macaddr(prefix, "et2macaddr", sprom->et2mac, fb);
- nvram_read_u8(prefix, NULL, "et2mdcport", &sprom->et2mdcport, 0, fb);
- nvram_read_u8(prefix, NULL, "et2phyaddr", &sprom->et2phyaddr, 0, fb);
-
- nvram_read_macaddr(prefix, "macaddr", sprom->il0mac, fallback);
- nvram_read_macaddr(prefix, "il0macaddr", sprom->il0mac, fallback);
-
- /* The address prefix 00:90:4C is used by Broadcom in their initial
- * configuration. When a mac address with the prefix 00:90:4C is used
- * all devices from the same series are sharing the same mac address.
- * To prevent mac address collisions we replace them with a mac address
- * based on the base address.
- */
- if (!bcm47xx_is_valid_mac(sprom->il0mac)) {
- u8 mac[6];
-
- nvram_read_macaddr(NULL, "et0macaddr", mac, false);
- if (bcm47xx_is_valid_mac(mac)) {
- int err = bcm47xx_increase_mac_addr(mac, mac_addr_used);
-
- if (!err) {
- ether_addr_copy(sprom->il0mac, mac);
- mac_addr_used++;
- }
- }
- }
-}
-
-static void bcm47xx_fill_board_data(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- nvram_read_u32_2(prefix, "boardflags", &sprom->boardflags_lo,
- &sprom->boardflags_hi, fallback);
- nvram_read_u32_2(prefix, "boardflags2", &sprom->boardflags2_lo,
- &sprom->boardflags2_hi, fallback);
-}
-
-void bcm47xx_fill_sprom(struct ssb_sprom *sprom, const char *prefix,
- bool fallback)
-{
- bcm47xx_fill_sprom_ethernet(sprom, prefix, fallback);
- bcm47xx_fill_board_data(sprom, prefix, fallback);
-
- nvram_read_u8(prefix, NULL, "sromrev", &sprom->revision, 0, fallback);
-
- /* Entries requiring custom functions */
- nvram_read_alpha2(prefix, "ccode", sprom->alpha2, fallback);
- if (sprom->revision >= 3)
- nvram_read_leddc(prefix, "leddc", &sprom->leddc_on_time,
- &sprom->leddc_off_time, fallback);
-
- switch (sprom->revision) {
- case 4:
- case 5:
- bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
- bcm47xx_fill_sprom_path_r45(sprom, prefix, fallback);
- break;
- case 8:
- case 9:
- bcm47xx_fill_sprom_path_r4589(sprom, prefix, fallback);
- break;
- }
-
- bcm47xx_sprom_fill_auto(sprom, prefix, fallback);
-}
-
-#if defined(CONFIG_BCM47XX_SSB)
-static int bcm47xx_get_sprom_ssb(struct ssb_bus *bus, struct ssb_sprom *out)
-{
- char prefix[10];
-
- switch (bus->bustype) {
- case SSB_BUSTYPE_SSB:
- bcm47xx_fill_sprom(out, NULL, false);
- return 0;
- case SSB_BUSTYPE_PCI:
- memset(out, 0, sizeof(struct ssb_sprom));
- snprintf(prefix, sizeof(prefix), "pci/%u/%u/",
- bus->host_pci->bus->number + 1,
- PCI_SLOT(bus->host_pci->devfn));
- bcm47xx_fill_sprom(out, prefix, false);
- return 0;
- default:
- pr_warn("Unable to fill SPROM for given bustype.\n");
- return -EINVAL;
- }
-}
-#endif
-
-#if defined(CONFIG_BCM47XX_BCMA)
-/*
- * Having many NVRAM entries for PCI devices led to repeating prefixes like
- * pci/1/1/ all the time and wasting flash space. So at some point Broadcom
- * decided to introduce prefixes like 0: 1: 2: etc.
- * If we find e.g. devpath0=pci/2/1 or devpath0=pci/2/1/ we should use 0:
- * instead of pci/2/1/.
- */
-static void bcm47xx_sprom_apply_prefix_alias(char *prefix, size_t prefix_size)
-{
- size_t prefix_len = strlen(prefix);
- size_t short_len = prefix_len - 1;
- char nvram_var[10];
- char buf[20];
- int i;
-
- /* Passed prefix has to end with a slash */
- if (prefix_len <= 0 || prefix[prefix_len - 1] != '/')
- return;
-
- for (i = 0; i < 3; i++) {
- if (snprintf(nvram_var, sizeof(nvram_var), "devpath%d", i) <= 0)
- continue;
- if (bcm47xx_nvram_getenv(nvram_var, buf, sizeof(buf)) < 0)
- continue;
- if (!strcmp(buf, prefix) ||
- (short_len && strlen(buf) == short_len && !strncmp(buf, prefix, short_len))) {
- snprintf(prefix, prefix_size, "%d:", i);
- return;
- }
- }
-}
-
-static int bcm47xx_get_sprom_bcma(struct bcma_bus *bus, struct ssb_sprom *out)
-{
- struct bcma_boardinfo *binfo = &bus->boardinfo;
- struct bcma_device *core;
- char buf[10];
- char *prefix;
- bool fallback = false;
-
- switch (bus->hosttype) {
- case BCMA_HOSTTYPE_PCI:
- memset(out, 0, sizeof(struct ssb_sprom));
- /* On BCM47XX all PCI buses share the same domain */
- if (config_enabled(CONFIG_BCM47XX))
- snprintf(buf, sizeof(buf), "pci/%u/%u/",
- bus->host_pci->bus->number + 1,
- PCI_SLOT(bus->host_pci->devfn));
- else
- snprintf(buf, sizeof(buf), "pci/%u/%u/",
- pci_domain_nr(bus->host_pci->bus) + 1,
- bus->host_pci->bus->number);
- bcm47xx_sprom_apply_prefix_alias(buf, sizeof(buf));
- prefix = buf;
- break;
- case BCMA_HOSTTYPE_SOC:
- memset(out, 0, sizeof(struct ssb_sprom));
- core = bcma_find_core(bus, BCMA_CORE_80211);
- if (core) {
- snprintf(buf, sizeof(buf), "sb/%u/",
- core->core_index);
- prefix = buf;
- fallback = true;
- } else {
- prefix = NULL;
- }
- break;
- default:
- pr_warn("Unable to fill SPROM for given bustype.\n");
- return -EINVAL;
- }
-
- nvram_read_u16(prefix, NULL, "boardvendor", &binfo->vendor, 0, true);
- if (!binfo->vendor)
- binfo->vendor = SSB_BOARDVENDOR_BCM;
- nvram_read_u16(prefix, NULL, "boardtype", &binfo->type, 0, true);
-
- bcm47xx_fill_sprom(out, prefix, fallback);
-
- return 0;
-}
-#endif
-
-/*
- * On bcm47xx we need to register SPROM fallback handler very early, so we can't
- * use anything like platform device / driver for this.
- */
-void bcm47xx_sprom_register_fallbacks(void)
-{
-#if defined(CONFIG_BCM47XX_SSB)
- if (ssb_arch_register_fallback_sprom(&bcm47xx_get_sprom_ssb))
- pr_warn("Failed to registered ssb SPROM handler\n");
-#endif
-
-#if defined(CONFIG_BCM47XX_BCMA)
- if (bcma_arch_register_fallback_sprom(&bcm47xx_get_sprom_bcma))
- pr_warn("Failed to registered bcma SPROM handler\n");
-#endif
-}
diff --git a/arch/mips/bmips/Kconfig b/arch/mips/bmips/Kconfig
index e2c4fd682c74..264328d528c7 100644
--- a/arch/mips/bmips/Kconfig
+++ b/arch/mips/bmips/Kconfig
@@ -21,6 +21,10 @@ config DT_BCM93384WVG_VIPER
bool "BCM93384WVG Viper CPU (EXPERIMENTAL)"
select BUILTIN_DTB
+config DT_BCM96358NB4SER
+ bool "BCM96358NB4SER"
+ select BUILTIN_DTB
+
config DT_BCM96368MVWG
bool "BCM96368MVWG"
select BUILTIN_DTB
diff --git a/arch/mips/bmips/setup.c b/arch/mips/bmips/setup.c
index 35535284b39e..6776042679dd 100644
--- a/arch/mips/bmips/setup.c
+++ b/arch/mips/bmips/setup.c
@@ -95,6 +95,15 @@ static void bcm6328_quirks(void)
bcm63xx_fixup_cpu1();
}
+static void bcm6358_quirks(void)
+{
+ /*
+ * BCM6358 needs special handling for its shared TLB, so
+ * disable SMP for now
+ */
+ bmips_smp_enabled = 0;
+}
+
static void bcm6368_quirks(void)
{
bcm63xx_fixup_cpu1();
@@ -104,13 +113,16 @@ static const struct bmips_quirk bmips_quirk_list[] = {
{ "brcm,bcm3384-viper", &bcm3384_viper_quirks },
{ "brcm,bcm33843-viper", &bcm3384_viper_quirks },
{ "brcm,bcm6328", &bcm6328_quirks },
+ { "brcm,bcm6358", &bcm6358_quirks },
{ "brcm,bcm6368", &bcm6368_quirks },
{ "brcm,bcm63168", &bcm6368_quirks },
+ { "brcm,bcm63268", &bcm6368_quirks },
{ },
};
void __init prom_init(void)
{
+ bmips_cpu_setup();
register_bmips_smp_ops();
}
@@ -150,8 +162,8 @@ void __init plat_mem_setup(void)
/* intended to somewhat resemble ARM; see Documentation/arm/Booting */
if (fw_arg0 == 0 && fw_arg1 == 0xffffffff)
dtb = phys_to_virt(fw_arg2);
- else if (fw_arg0 == -2) /* UHI interface */
- dtb = (void *)fw_arg1;
+ else if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
else if (__dtb_start != __dtb_end)
dtb = (void *)__dtb_start;
else
diff --git a/arch/mips/boot/compressed/Makefile b/arch/mips/boot/compressed/Makefile
index 4eff1ef02eff..90aca95fe314 100644
--- a/arch/mips/boot/compressed/Makefile
+++ b/arch/mips/boot/compressed/Makefile
@@ -37,12 +37,18 @@ vmlinuzobjs-$(CONFIG_DEBUG_ZBOOT) += $(obj)/dbg.o
vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART16550) += $(obj)/uart-16550.o
vmlinuzobjs-$(CONFIG_SYS_SUPPORTS_ZBOOT_UART_PROM) += $(obj)/uart-prom.o
vmlinuzobjs-$(CONFIG_MIPS_ALCHEMY) += $(obj)/uart-alchemy.o
+vmlinuzobjs-$(CONFIG_ATH79) += $(obj)/uart-ath79.o
endif
-vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o
+extra-y += uart-ath79.c
+$(obj)/uart-ath79.c: $(srctree)/arch/mips/ath79/early_printk.c
+ $(call cmd,shipped)
+
+vmlinuzobjs-$(CONFIG_KERNEL_XZ) += $(obj)/ashldi3.o $(obj)/bswapsi.o
-$(obj)/ashldi3.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
-$(obj)/ashldi3.c: $(srctree)/arch/mips/lib/ashldi3.c
+extra-y += ashldi3.c bswapsi.c
+$(obj)/ashldi3.o $(obj)/bswapsi.o: KBUILD_CFLAGS += -I$(srctree)/arch/mips/lib
+$(obj)/ashldi3.c $(obj)/bswapsi.c: $(obj)/%.c: $(srctree)/arch/mips/lib/%.c
$(call cmd,shipped)
targets := $(notdir $(vmlinuzobjs-y))
diff --git a/arch/mips/boot/compressed/decompress.c b/arch/mips/boot/compressed/decompress.c
index 080cd53bac36..fdf99e9dd4c3 100644
--- a/arch/mips/boot/compressed/decompress.c
+++ b/arch/mips/boot/compressed/decompress.c
@@ -14,6 +14,7 @@
#include <linux/types.h>
#include <linux/kernel.h>
#include <linux/string.h>
+#include <linux/libfdt.h>
#include <asm/addrspace.h>
@@ -36,6 +37,8 @@ extern void puthex(unsigned long long val);
#define puthex(val) do {} while (0)
#endif
+extern char __appended_dtb[];
+
void error(char *x)
{
puts("\n\n");
@@ -114,6 +117,20 @@ void decompress_kernel(unsigned long boot_heap_start)
__decompress((char *)zimage_start, zimage_size, 0, 0,
(void *)VMLINUX_LOAD_ADDRESS_ULL, 0, 0, error);
+ if (IS_ENABLED(CONFIG_MIPS_RAW_APPENDED_DTB) &&
+ fdt_magic((void *)&__appended_dtb) == FDT_MAGIC) {
+ unsigned int image_size, dtb_size;
+
+ dtb_size = fdt_totalsize((void *)&__appended_dtb);
+
+ /* last four bytes is always image size in little endian */
+ image_size = le32_to_cpup((void *)&__image_end - 4);
+
+ /* copy dtb to where the booted kernel will expect it */
+ memcpy((void *)VMLINUX_LOAD_ADDRESS_ULL + image_size,
+ __appended_dtb, dtb_size);
+ }
+
/* FIXME: should we flush cache here? */
puts("Now, booting the kernel...\n");
}
diff --git a/arch/mips/boot/compressed/head.S b/arch/mips/boot/compressed/head.S
index c580e853b9fb..409cb483a9ff 100644
--- a/arch/mips/boot/compressed/head.S
+++ b/arch/mips/boot/compressed/head.S
@@ -25,22 +25,6 @@ start:
move s2, a2
move s3, a3
-#ifdef CONFIG_MIPS_ZBOOT_APPENDED_DTB
- PTR_LA t0, __appended_dtb
-#ifdef CONFIG_CPU_BIG_ENDIAN
- li t1, 0xd00dfeed
-#else
- li t1, 0xedfe0dd0
-#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
-
- move s1, t0
- PTR_LI s0, -2
-not_found:
-#endif
-
/* Clear BSS */
PTR_LA a0, _edata
PTR_LA a2, _end
diff --git a/arch/mips/boot/dts/brcm/Makefile b/arch/mips/boot/dts/brcm/Makefile
index eabeb603e805..fda9d387cc08 100644
--- a/arch/mips/boot/dts/brcm/Makefile
+++ b/arch/mips/boot/dts/brcm/Makefile
@@ -1,5 +1,6 @@
dtb-$(CONFIG_DT_BCM93384WVG) += bcm93384wvg.dtb
dtb-$(CONFIG_DT_BCM93384WVG_VIPER) += bcm93384wvg_viper.dtb
+dtb-$(CONFIG_DT_BCM96358NB4SER) += bcm96358nb4ser.dtb
dtb-$(CONFIG_DT_BCM96368MVWG) += bcm96368mvwg.dtb
dtb-$(CONFIG_DT_BCM9EJTAGPRB) += bcm9ejtagprb.dtb
dtb-$(CONFIG_DT_BCM97125CBMB) += bcm97125cbmb.dtb
@@ -14,6 +15,7 @@ dtb-$(CONFIG_DT_BCM97435SVMB) += bcm97435svmb.dtb
dtb-$(CONFIG_DT_NONE) += \
bcm93384wvg.dtb \
bcm93384wvg_viper.dtb \
+ bcm96358nb4ser.dtb \
bcm96368mvwg.dtb \
bcm9ejtagprb.dtb \
bcm97125cbmb.dtb \
diff --git a/arch/mips/boot/dts/brcm/bcm6328.dtsi b/arch/mips/boot/dts/brcm/bcm6328.dtsi
index 9d19236f53e7..5633b9d90f55 100644
--- a/arch/mips/boot/dts/brcm/bcm6328.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6328.dtsi
@@ -23,7 +23,7 @@
};
clocks {
- periph_clk: periph_clk {
+ periph_clk: periph-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
@@ -31,11 +31,11 @@
};
aliases {
- leds0 = &leds0;
- uart0 = &uart0;
+ serial0 = &uart0;
+ serial1 = &uart1;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -50,16 +50,16 @@
compatible = "simple-bus";
ranges;
- periph_intc: periph_intc@10000020 {
- compatible = "brcm,bcm3380-l2-intc";
- reg = <0x10000024 0x4 0x1000002c 0x4>,
- <0x10000020 0x4 0x10000028 0x4>;
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&cpu_intc>;
- interrupts = <2>;
+ interrupts = <2>, <3>;
};
uart0: serial@10000100 {
@@ -71,13 +71,22 @@
status = "disabled";
};
- timer: timer@10000040 {
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <39>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ timer: syscon@10000040 {
compatible = "syscon";
reg = <0x10000040 0x2c>;
native-endian;
};
- reboot {
+ reboot: syscon-reboot@10000068 {
compatible = "syscon-reboot";
regmap = <&timer>;
offset = <0x28>;
@@ -91,5 +100,24 @@
reg = <0x10000800 0x24>;
status = "disabled";
};
+
+ ehci: usb@10002500 {
+ compatible = "brcm,bcm6328-ehci", "generic-ehci";
+ reg = <0x10002500 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <42>;
+ status = "disabled";
+ };
+
+ ohci: usb@10002600 {
+ compatible = "brcm,bcm6328-ohci", "generic-ohci";
+ reg = <0x10002600 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <41>;
+ status = "disabled";
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm6358.dtsi b/arch/mips/boot/dts/brcm/bcm6358.dtsi
new file mode 100644
index 000000000000..f9d8d392162b
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm6358.dtsi
@@ -0,0 +1,130 @@
+/ {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "brcm,bcm6358";
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ mips-hpt-frequency = <150000000>;
+
+ cpu@0 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <0>;
+ };
+
+ cpu@1 {
+ compatible = "brcm,bmips4350";
+ device_type = "cpu";
+ reg = <1>;
+ };
+ };
+
+ clocks {
+ periph_clk: periph-clk {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ clock-frequency = <50000000>;
+ };
+ };
+
+ aliases {
+ serial0 = &uart0;
+ serial1 = &uart1;
+ };
+
+ cpu_intc: interrupt-controller {
+ #address-cells = <0>;
+ compatible = "mti,cpu-interrupt-controller";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ ubus {
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ compatible = "simple-bus";
+ ranges;
+
+ periph_cntl: syscon@fffe0000 {
+ compatible = "syscon";
+ reg = <0xfffe0000 0xc>;
+ native-endian;
+ };
+
+ reboot: syscon-reboot@fffe0008 {
+ compatible = "syscon-reboot";
+ regmap = <&periph_cntl>;
+ offset = <0x8>;
+ mask = <0x1>;
+ };
+
+ periph_intc: interrupt-controller@fffe000c {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0xfffe000c 0x8>,
+ <0xfffe0038 0x8>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&cpu_intc>;
+ interrupts = <2>, <3>;
+ };
+
+ leds0: led-controller@fffe00d0 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "brcm,bcm6358-leds";
+ reg = <0xfffe00d0 0x8>;
+
+ status = "disabled";
+ };
+
+ uart0: serial@fffe0100 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfffe0100 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <2>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ uart1: serial@fffe0120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0xfffe0120 0x18>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+
+ clocks = <&periph_clk>;
+
+ status = "disabled";
+ };
+
+ ehci: usb@fffe1300 {
+ compatible = "brcm,bcm6358-ehci", "generic-ehci";
+ reg = <0xfffe1300 0x100>;
+ big-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <10>;
+ status = "disabled";
+ };
+
+ ohci: usb@fffe1400 {
+ compatible = "brcm,bcm6358-ohci", "generic-ohci";
+ reg = <0xfffe1400 0x100>;
+ big-endian;
+ no-big-frame-no;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <5>;
+ status = "disabled";
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/brcm/bcm6368.dtsi b/arch/mips/boot/dts/brcm/bcm6368.dtsi
index 1f6b9b5cddb4..d0e3a70b32e2 100644
--- a/arch/mips/boot/dts/brcm/bcm6368.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm6368.dtsi
@@ -20,11 +20,10 @@
device_type = "cpu";
reg = <1>;
};
-
};
clocks {
- periph_clk: periph_clk {
+ periph_clk: periph-clk {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <50000000>;
@@ -32,11 +31,11 @@
};
aliases {
- leds0 = &leds0;
- uart0 = &uart0;
+ serial0 = &uart0;
+ serial1 = &uart1;
};
- cpu_intc: cpu_intc {
+ cpu_intc: interrupt-controller {
#address-cells = <0>;
compatible = "mti,cpu-interrupt-controller";
@@ -64,16 +63,16 @@
mask = <0x1>;
};
- periph_intc: periph_intc@10000020 {
- compatible = "brcm,bcm3380-l2-intc";
- reg = <0x10000024 0x4 0x1000002c 0x4>,
- <0x10000020 0x4 0x10000028 0x4>;
+ periph_intc: interrupt-controller@10000020 {
+ compatible = "brcm,bcm6345-l1-intc";
+ reg = <0x10000020 0x10>,
+ <0x10000030 0x10>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&cpu_intc>;
- interrupts = <2>;
+ interrupts = <2>, <3>;
};
leds0: led-controller@100000d0 {
@@ -93,7 +92,16 @@
status = "disabled";
};
- ehci0: usb@10001500 {
+ uart1: serial@10000120 {
+ compatible = "brcm,bcm6345-uart";
+ reg = <0x10000120 0x18>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <3>;
+ clocks = <&periph_clk>;
+ status = "disabled";
+ };
+
+ ehci: usb@10001500 {
compatible = "brcm,bcm6368-ehci", "generic-ehci";
reg = <0x10001500 0x100>;
big-endian;
@@ -102,7 +110,7 @@
status = "disabled";
};
- ohci0: usb@10001600 {
+ ohci: usb@10001600 {
compatible = "brcm,bcm6368-ohci", "generic-ohci";
reg = <0x10001600 0x100>;
big-endian;
diff --git a/arch/mips/boot/dts/brcm/bcm7125.dtsi b/arch/mips/boot/dts/brcm/bcm7125.dtsi
index 3ae16053a0c9..550e1d9e3ee0 100644
--- a/arch/mips/boot/dts/brcm/bcm7125.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7125.dtsi
@@ -85,14 +85,15 @@
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
- brcm,int-map-mask = <0x44>;
+ brcm,int-map-mask = <0x44>, <0xf000000>;
brcm,int-fwd-mask = <0x70000>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&periph_intc>;
- interrupts = <18>;
+ interrupts = <18>, <19>;
+ interrupt-names = "upg_main", "upg_bsc";
};
sun_top_ctrl: syscon@404000 {
@@ -118,6 +119,70 @@
status = "disabled";
};
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ native-endian;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406380 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406380 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
ehci0: usb@488300 {
compatible = "brcm,bcm7125-ehci", "generic-ehci";
reg = <0x488300 0x100>;
diff --git a/arch/mips/boot/dts/brcm/bcm7346.dtsi b/arch/mips/boot/dts/brcm/bcm7346.dtsi
index be7991917d29..ec959061d52e 100644
--- a/arch/mips/boot/dts/brcm/bcm7346.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7346.dtsi
@@ -24,8 +24,6 @@
aliases {
uart0 = &uart0;
- uart1 = &uart1;
- uart2 = &uart2;
};
cpu_intc: cpu_intc {
@@ -323,8 +321,6 @@
interrupts = <40>;
#address-cells = <1>;
#size-cells = <0>;
- brcm,broken-ncq;
- brcm,broken-phy;
status = "disabled";
sata0: sata-port@0 {
@@ -338,7 +334,7 @@
};
};
- sata_phy: sata-phy@1800000 {
+ sata_phy: sata-phy@180100 {
compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
reg = <0x180100 0x0eff>;
reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7358.dtsi b/arch/mips/boot/dts/brcm/bcm7358.dtsi
index 060805be619a..ca57fb5eb122 100644
--- a/arch/mips/boot/dts/brcm/bcm7358.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7358.dtsi
@@ -18,8 +18,6 @@
aliases {
uart0 = &uart0;
- uart1 = &uart1;
- uart2 = &uart2;
};
cpu_intc: cpu_intc {
diff --git a/arch/mips/boot/dts/brcm/bcm7360.dtsi b/arch/mips/boot/dts/brcm/bcm7360.dtsi
index bcdb09bfe07b..1c0c3d438c7a 100644
--- a/arch/mips/boot/dts/brcm/bcm7360.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7360.dtsi
@@ -18,8 +18,6 @@
aliases {
uart0 = &uart0;
- uart1 = &uart1;
- uart2 = &uart2;
};
cpu_intc: cpu_intc {
@@ -241,5 +239,45 @@
interrupts = <66>;
status = "disabled";
};
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <86>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm7362.dtsi b/arch/mips/boot/dts/brcm/bcm7362.dtsi
index d3b1b762e6c3..6b4713add4b8 100644
--- a/arch/mips/boot/dts/brcm/bcm7362.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7362.dtsi
@@ -24,8 +24,6 @@
aliases {
uart0 = &uart0;
- uart1 = &uart1;
- uart2 = &uart2;
};
cpu_intc: cpu_intc {
@@ -246,8 +244,6 @@
interrupts = <86>;
#address-cells = <1>;
#size-cells = <0>;
- brcm,broken-ncq;
- brcm,broken-phy;
status = "disabled";
sata0: sata-port@0 {
@@ -261,7 +257,7 @@
};
};
- sata_phy: sata-phy@1800000 {
+ sata_phy: sata-phy@180100 {
compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
reg = <0x180100 0x0eff>;
reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7420.dtsi b/arch/mips/boot/dts/brcm/bcm7420.dtsi
index 3302a1b8a5c9..0586bf662571 100644
--- a/arch/mips/boot/dts/brcm/bcm7420.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7420.dtsi
@@ -86,14 +86,15 @@
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
- brcm,int-map-mask = <0x44>;
+ brcm,int-map-mask = <0x44>, <0x1f000000>;
brcm,int-fwd-mask = <0x70000>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&periph_intc>;
- interrupts = <18>;
+ interrupts = <18>, <19>;
+ interrupt-names = "upg_main", "upg_bsc";
};
sun_top_ctrl: syscon@404000 {
@@ -118,6 +119,78 @@
status = "disabled";
};
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <64>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <65>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406380 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406380 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@406800 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406800 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
enet0: ethernet@468000 {
phy-mode = "internal";
phy-handle = <&phy1>;
diff --git a/arch/mips/boot/dts/brcm/bcm7425.dtsi b/arch/mips/boot/dts/brcm/bcm7425.dtsi
index 15b27aae15a9..c1c15edaf829 100644
--- a/arch/mips/boot/dts/brcm/bcm7425.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7425.dtsi
@@ -87,14 +87,32 @@
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
- brcm,int-map-mask = <0x44>;
+ brcm,int-map-mask = <0x44>, <0x7000000>;
brcm,int-fwd-mask = <0x70000>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&periph_intc>;
- interrupts = <55>;
+ interrupts = <55>, <53>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x409480 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <56>, <54>, <59>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
};
sun_top_ctrl: syscon@404000 {
@@ -119,6 +137,78 @@
status = "disabled";
};
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <62>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <63>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@409180 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409180 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@409400 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409400 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
enet0: ethernet@b80000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -227,11 +317,9 @@
reg-names = "ahci", "top-ctrl";
reg = <0x181000 0xa9c>, <0x180020 0x1c>;
interrupt-parent = <&periph_intc>;
- interrupts = <40>;
+ interrupts = <41>;
#address-cells = <1>;
#size-cells = <0>;
- brcm,broken-ncq;
- brcm,broken-phy;
status = "disabled";
sata0: sata-port@0 {
@@ -245,7 +333,7 @@
};
};
- sata_phy: sata-phy@1800000 {
+ sata_phy: sata-phy@180100 {
compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
reg = <0x180100 0x0eff>;
reg-names = "phy";
diff --git a/arch/mips/boot/dts/brcm/bcm7435.dtsi b/arch/mips/boot/dts/brcm/bcm7435.dtsi
index adb33e355043..a874d3a0e2ee 100644
--- a/arch/mips/boot/dts/brcm/bcm7435.dtsi
+++ b/arch/mips/boot/dts/brcm/bcm7435.dtsi
@@ -7,7 +7,7 @@
#address-cells = <1>;
#size-cells = <0>;
- mips-hpt-frequency = <163125000>;
+ mips-hpt-frequency = <175625000>;
cpu@0 {
compatible = "brcm,bmips5200";
@@ -63,13 +63,14 @@
periph_intc: periph_intc@41b500 {
compatible = "brcm,bcm7038-l1-intc";
- reg = <0x41b500 0x40>, <0x41b600 0x40>;
+ reg = <0x41b500 0x40>, <0x41b600 0x40>,
+ <0x41b700 0x40>, <0x41b800 0x40>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&cpu_intc>;
- interrupts = <2>, <3>;
+ interrupts = <2>, <3>, <2>, <3>;
};
sun_l2_intc: sun_l2_intc@403000 {
@@ -82,7 +83,7 @@
};
gisb-arb@400000 {
- compatible = "brcm,bcm7400-gisb-arb";
+ compatible = "brcm,bcm7435-gisb-arb";
reg = <0x400000 0xdc>;
native-endian;
interrupt-parent = <&sun_l2_intc>;
@@ -101,14 +102,32 @@
compatible = "brcm,bcm7120-l2-intc";
reg = <0x406780 0x8>;
- brcm,int-map-mask = <0x44>;
+ brcm,int-map-mask = <0x44>, <0x7000000>;
brcm,int-fwd-mask = <0x70000>;
interrupt-controller;
#interrupt-cells = <1>;
interrupt-parent = <&periph_intc>;
- interrupts = <60>;
+ interrupts = <60>, <58>;
+ interrupt-names = "upg_main", "upg_bsc";
+ };
+
+ upg_aon_irq0_intc: upg_aon_irq0_intc@409480 {
+ compatible = "brcm,bcm7120-l2-intc";
+ reg = <0x409480 0x8>;
+
+ brcm,int-map-mask = <0x40>, <0x18000000>, <0x100000>;
+ brcm,int-fwd-mask = <0>;
+ brcm,irq-can-wake;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ interrupt-parent = <&periph_intc>;
+ interrupts = <61>, <59>, <64>;
+ interrupt-names = "upg_main_aon", "upg_bsc_aon",
+ "upg_spi";
};
sun_top_ctrl: syscon@404000 {
@@ -133,6 +152,78 @@
status = "disabled";
};
+ uart1: serial@406b40 {
+ compatible = "ns16550a";
+ reg = <0x406b40 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <67>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ uart2: serial@406b80 {
+ compatible = "ns16550a";
+ reg = <0x406b80 0x20>;
+ reg-io-width = <0x4>;
+ reg-shift = <0x2>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <68>;
+ clocks = <&uart_clk>;
+ status = "disabled";
+ };
+
+ bsca: i2c@406300 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406300 0x58>;
+ interrupts = <26>;
+ interrupt-names = "upg_bsca";
+ status = "disabled";
+ };
+
+ bscb: i2c@409400 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409400 0x58>;
+ interrupts = <28>;
+ interrupt-names = "upg_bscb";
+ status = "disabled";
+ };
+
+ bscc: i2c@406200 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406200 0x58>;
+ interrupts = <24>;
+ interrupt-names = "upg_bscc";
+ status = "disabled";
+ };
+
+ bscd: i2c@406280 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_irq0_intc>;
+ reg = <0x406280 0x58>;
+ interrupts = <25>;
+ interrupt-names = "upg_bscd";
+ status = "disabled";
+ };
+
+ bsce: i2c@409180 {
+ clock-frequency = <390000>;
+ compatible = "brcm,brcmstb-i2c";
+ interrupt-parent = <&upg_aon_irq0_intc>;
+ reg = <0x409180 0x58>;
+ interrupts = <27>;
+ interrupt-names = "upg_bsce";
+ status = "disabled";
+ };
+
enet0: ethernet@b80000 {
phy-mode = "internal";
phy-handle = <&phy1>;
@@ -235,5 +326,45 @@
interrupts = <78>;
status = "disabled";
};
+
+ sata: sata@181000 {
+ compatible = "brcm,bcm7425-ahci", "brcm,sata3-ahci";
+ reg-names = "ahci", "top-ctrl";
+ reg = <0x181000 0xa9c>, <0x180020 0x1c>;
+ interrupt-parent = <&periph_intc>;
+ interrupts = <45>;
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata0: sata-port@0 {
+ reg = <0>;
+ phys = <&sata_phy0>;
+ };
+
+ sata1: sata-port@1 {
+ reg = <1>;
+ phys = <&sata_phy1>;
+ };
+ };
+
+ sata_phy: sata-phy@180100 {
+ compatible = "brcm,bcm7425-sata-phy", "brcm,phy-sata3";
+ reg = <0x180100 0x0eff>;
+ reg-names = "phy";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ status = "disabled";
+
+ sata_phy0: sata-phy@0 {
+ reg = <0>;
+ #phy-cells = <0>;
+ };
+
+ sata_phy1: sata-phy@1 {
+ reg = <1>;
+ #phy-cells = <0>;
+ };
+ };
};
};
diff --git a/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts b/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
new file mode 100644
index 000000000000..f412117972e6
--- /dev/null
+++ b/arch/mips/boot/dts/brcm/bcm96358nb4ser.dts
@@ -0,0 +1,46 @@
+/dts-v1/;
+
+/include/ "bcm6358.dtsi"
+
+/ {
+ compatible = "sfr,nb4-ser", "brcm,bcm6358";
+ model = "SFR Neufbox 4 (Sercomm)";
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x00000000 0x02000000>;
+ };
+
+ chosen {
+ stdout-path = &uart0;
+ };
+};
+
+&leds0 {
+ status = "ok";
+
+ led@0 {
+ reg = <0>;
+ active-low;
+ label = "nb4-ser:white:alarm";
+ };
+ led@2 {
+ reg = <2>;
+ active-low;
+ label = "nb4-ser:white:tv";
+ };
+ led@3 {
+ reg = <3>;
+ active-low;
+ label = "nb4-ser:white:tel";
+ };
+ led@4 {
+ reg = <4>;
+ active-low;
+ label = "nb4-ser:white:adsl";
+ };
+};
+
+&uart0 {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
index 0e890c28fe5c..8c71c6845730 100644
--- a/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
+++ b/arch/mips/boot/dts/brcm/bcm96368mvwg.dts
@@ -22,10 +22,10 @@
};
/* FIXME: need to set up USB_CTRL registers first */
-&ehci0 {
+&ehci {
status = "disabled";
};
-&ohci0 {
+&ohci {
status = "disabled";
};
diff --git a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
index e046b1109eab..f2449d147c6d 100644
--- a/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97125cbmb.dts
@@ -21,6 +21,30 @@
status = "okay";
};
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
/* FIXME: USB is wonky; disable it for now */
&ehci0 {
status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97360svmb.dts b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
index d48462e091f1..73124be9548a 100644
--- a/arch/mips/boot/dts/brcm/bcm97360svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97360svmb.dts
@@ -56,3 +56,11 @@
&ohci0 {
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/brcm/bcm97420c.dts b/arch/mips/boot/dts/brcm/bcm97420c.dts
index 67fe1f3a3891..600d57abee05 100644
--- a/arch/mips/boot/dts/brcm/bcm97420c.dts
+++ b/arch/mips/boot/dts/brcm/bcm97420c.dts
@@ -23,6 +23,34 @@
status = "okay";
};
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
/* FIXME: MAC driver comes up but cannot attach to PHY */
&enet0 {
status = "disabled";
diff --git a/arch/mips/boot/dts/brcm/bcm97425svmb.dts b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
index 689c68a4f9c8..119c714805cb 100644
--- a/arch/mips/boot/dts/brcm/bcm97425svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97425svmb.dts
@@ -23,6 +23,34 @@
status = "okay";
};
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
diff --git a/arch/mips/boot/dts/brcm/bcm97435svmb.dts b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
index 1df088183523..43e3ba27f07b 100644
--- a/arch/mips/boot/dts/brcm/bcm97435svmb.dts
+++ b/arch/mips/boot/dts/brcm/bcm97435svmb.dts
@@ -14,7 +14,7 @@
};
chosen {
- bootargs = "console=ttyS0,115200 maxcpus=1";
+ bootargs = "console=ttyS0,115200";
stdout-path = &uart0;
};
};
@@ -23,6 +23,34 @@
status = "okay";
};
+&uart1 {
+ status = "okay";
+};
+
+&uart2 {
+ status = "okay";
+};
+
+&bsca {
+ status = "okay";
+};
+
+&bscb {
+ status = "okay";
+};
+
+&bscc {
+ status = "okay";
+};
+
+&bscd {
+ status = "okay";
+};
+
+&bsce {
+ status = "okay";
+};
+
&enet0 {
status = "okay";
};
@@ -58,3 +86,11 @@
&ohci3 {
status = "okay";
};
+
+&sata {
+ status = "okay";
+};
+
+&sata_phy {
+ status = "okay";
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
new file mode 100644
index 000000000000..b134798a0fd7
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/dlink_dsr-1000n.dts
@@ -0,0 +1,94 @@
+/*
+ * Device tree source for D-Link DSR-1000N.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+#include <dt-bindings/gpio/gpio.h>
+
+/ {
+ model = "dlink,dsr-1000n";
+
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy8: ethernet-phy@8 {
+ reg = <8>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@1 {
+ fixed-link {
+ speed = <1000>;
+ full-duplex;
+ };
+ };
+ ethernet@2 {
+ phy-handle = <&phy8>;
+ };
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ rtc@68 {
+ compatible = "dallas,ds1337";
+ reg = <0x68>;
+ };
+ };
+
+ uart0: serial@1180000000800 {
+ clock-frequency = <500000000>;
+ };
+
+ usbn: usbn@1180068000000 {
+ refclk-frequency = <12000000>;
+ refclk-type = "crystal";
+ };
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ usb1 {
+ label = "usb1";
+ gpios = <&gpio 9 GPIO_ACTIVE_LOW>;
+ };
+
+ usb2 {
+ label = "usb2";
+ gpios = <&gpio 10 GPIO_ACTIVE_LOW>;
+ };
+
+ wps {
+ label = "wps";
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless1 {
+ label = "5g";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ };
+
+ wireless2 {
+ label = "2.4g";
+ gpios = <&gpio 18 GPIO_ACTIVE_LOW>;
+ };
+ };
+
+ aliases {
+ pip = &pip;
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
index 9c48e0586ba7..ca6b4467bcd3 100644
--- a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dts
@@ -1,4 +1,3 @@
-/dts-v1/;
/*
* OCTEON 3XXX, 5XXX, 63XX device tree skeleton.
*
@@ -6,56 +5,12 @@
* use. Because of this, it contains a super-set of the available
* devices and properties.
*/
-/ {
- compatible = "cavium,octeon-3860";
- #address-cells = <2>;
- #size-cells = <2>;
- interrupt-parent = <&ciu>;
- soc@0 {
- compatible = "simple-bus";
- #address-cells = <2>;
- #size-cells = <2>;
- ranges; /* Direct mapping */
-
- ciu: interrupt-controller@1070000000000 {
- compatible = "cavium,octeon-3860-ciu";
- interrupt-controller;
- /* Interrupts are specified by two parts:
- * 1) Controller register (0 or 1)
- * 2) Bit within the register (0..63)
- */
- #interrupt-cells = <2>;
- reg = <0x10700 0x00000000 0x0 0x7000>;
- };
-
- gpio: gpio-controller@1070000000800 {
- #gpio-cells = <2>;
- compatible = "cavium,octeon-3860-gpio";
- reg = <0x10700 0x00000800 0x0 0x100>;
- gpio-controller;
- /* Interrupts are specified by two parts:
- * 1) GPIO pin number (0..15)
- * 2) Triggering (1 - edge rising
- * 2 - edge falling
- * 4 - level active high
- * 8 - level active low)
- */
- interrupt-controller;
- #interrupt-cells = <2>;
- /* The GPIO pin connect to 16 consecutive CUI bits */
- interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
- <0 20>, <0 21>, <0 22>, <0 23>,
- <0 24>, <0 25>, <0 26>, <0 27>,
- <0 28>, <0 29>, <0 30>, <0 31>;
- };
+/include/ "octeon_3xxx.dtsi"
+/ {
+ soc@0 {
smi0: mdio@1180000001800 {
- compatible = "cavium,octeon-3860-mdio";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x11800 0x00001800 0x0 0x40>;
-
phy0: ethernet-phy@0 {
compatible = "marvell,88e1118";
marvell,reg-init =
@@ -220,35 +175,16 @@
};
pip: pip@11800a0000000 {
- compatible = "cavium,octeon-3860-pip";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0x11800 0xa0000000 0x0 0x2000>;
-
interface@0 {
- compatible = "cavium,octeon-3860-pip-interface";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <0>; /* interface */
-
ethernet@0 {
- compatible = "cavium,octeon-3860-pip-port";
- reg = <0x0>; /* Port */
- local-mac-address = [ 00 00 00 00 00 00 ];
phy-handle = <&phy2>;
cavium,alt-phy-handle = <&phy100>;
};
ethernet@1 {
- compatible = "cavium,octeon-3860-pip-port";
- reg = <0x1>; /* Port */
- local-mac-address = [ 00 00 00 00 00 00 ];
phy-handle = <&phy3>;
cavium,alt-phy-handle = <&phy101>;
};
ethernet@2 {
- compatible = "cavium,octeon-3860-pip-port";
- reg = <0x2>; /* Port */
- local-mac-address = [ 00 00 00 00 00 00 ];
phy-handle = <&phy4>;
cavium,alt-phy-handle = <&phy102>;
};
@@ -322,11 +258,6 @@
};
interface@1 {
- compatible = "cavium,octeon-3860-pip-interface";
- #address-cells = <1>;
- #size-cells = <0>;
- reg = <1>; /* interface */
-
ethernet@0 {
compatible = "cavium,octeon-3860-pip-port";
reg = <0x0>; /* Port */
@@ -355,13 +286,6 @@
};
twsi0: i2c@1180000001000 {
- #address-cells = <1>;
- #size-cells = <0>;
- compatible = "cavium,octeon-3860-twsi";
- reg = <0x11800 0x00001000 0x0 0x200>;
- interrupts = <0 45>;
- clock-frequency = <100000>;
-
rtc@68 {
compatible = "dallas,ds1337";
reg = <0x68>;
@@ -381,15 +305,6 @@
clock-frequency = <100000>;
};
- uart0: serial@1180000000800 {
- compatible = "cavium,octeon-3860-uart","ns16550";
- reg = <0x11800 0x00000800 0x0 0x400>;
- clock-frequency = <0>;
- current-speed = <115200>;
- reg-shift = <3>;
- interrupts = <0 34>;
- };
-
uart1: serial@1180000000c00 {
compatible = "cavium,octeon-3860-uart","ns16550";
reg = <0x11800 0x00000c00 0x0 0x400>;
@@ -409,98 +324,6 @@
};
bootbus: bootbus@1180000000000 {
- compatible = "cavium,octeon-3860-bootbus";
- reg = <0x11800 0x00000000 0x0 0x200>;
- /* The chip select number and offset */
- #address-cells = <2>;
- /* The size of the chip select region */
- #size-cells = <1>;
- ranges = <0 0 0x0 0x1f400000 0xc00000>,
- <1 0 0x10000 0x30000000 0>,
- <2 0 0x10000 0x40000000 0>,
- <3 0 0x10000 0x50000000 0>,
- <4 0 0x0 0x1d020000 0x10000>,
- <5 0 0x0 0x1d040000 0x10000>,
- <6 0 0x0 0x1d050000 0x10000>,
- <7 0 0x10000 0x90000000 0>;
-
- cavium,cs-config@0 {
- compatible = "cavium,octeon-3860-bootbus-config";
- cavium,cs-index = <0>;
- cavium,t-adr = <20>;
- cavium,t-ce = <60>;
- cavium,t-oe = <60>;
- cavium,t-we = <45>;
- cavium,t-rd-hld = <35>;
- cavium,t-wr-hld = <45>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <35>;
- cavium,t-rd-dly = <0>;
-
- cavium,pages = <0>;
- cavium,bus-width = <8>;
- };
- cavium,cs-config@4 {
- compatible = "cavium,octeon-3860-bootbus-config";
- cavium,cs-index = <4>;
- cavium,t-adr = <320>;
- cavium,t-ce = <320>;
- cavium,t-oe = <320>;
- cavium,t-we = <320>;
- cavium,t-rd-hld = <320>;
- cavium,t-wr-hld = <320>;
- cavium,t-pause = <320>;
- cavium,t-wait = <320>;
- cavium,t-page = <320>;
- cavium,t-rd-dly = <0>;
-
- cavium,pages = <0>;
- cavium,bus-width = <8>;
- };
- cavium,cs-config@5 {
- compatible = "cavium,octeon-3860-bootbus-config";
- cavium,cs-index = <5>;
- cavium,t-adr = <5>;
- cavium,t-ce = <300>;
- cavium,t-oe = <125>;
- cavium,t-we = <150>;
- cavium,t-rd-hld = <100>;
- cavium,t-wr-hld = <30>;
- cavium,t-pause = <0>;
- cavium,t-wait = <30>;
- cavium,t-page = <320>;
- cavium,t-rd-dly = <0>;
-
- cavium,pages = <0>;
- cavium,bus-width = <16>;
- };
- cavium,cs-config@6 {
- compatible = "cavium,octeon-3860-bootbus-config";
- cavium,cs-index = <6>;
- cavium,t-adr = <5>;
- cavium,t-ce = <300>;
- cavium,t-oe = <270>;
- cavium,t-we = <150>;
- cavium,t-rd-hld = <100>;
- cavium,t-wr-hld = <70>;
- cavium,t-pause = <0>;
- cavium,t-wait = <0>;
- cavium,t-page = <320>;
- cavium,t-rd-dly = <0>;
-
- cavium,pages = <0>;
- cavium,wait-mode;
- cavium,bus-width = <16>;
- };
-
- flash0: nor@0,0 {
- compatible = "cfi-flash";
- reg = <0 0 0x800000>;
- #address-cells = <1>;
- #size-cells = <1>;
- };
-
led0: led-display@4,0 {
compatible = "avago,hdsp-253x";
reg = <4 0x20 0x20>, <4 0 0x20>;
@@ -515,17 +338,6 @@
};
};
- dma0: dma-engine@1180000000100 {
- compatible = "cavium,octeon-5750-bootbus-dma";
- reg = <0x11800 0x00000100 0x0 0x8>;
- interrupts = <0 63>;
- };
- dma1: dma-engine@1180000000108 {
- compatible = "cavium,octeon-5750-bootbus-dma";
- reg = <0x11800 0x00000108 0x0 0x8>;
- interrupts = <0 63>;
- };
-
uctl: uctl@118006f000000 {
compatible = "cavium,octeon-6335-uctl";
reg = <0x11800 0x6f000000 0x0 0x100>;
@@ -552,21 +364,10 @@
};
usbn: usbn@1180068000000 {
- compatible = "cavium,octeon-5750-usbn";
- reg = <0x11800 0x68000000 0x0 0x1000>;
- ranges; /* Direct mapping */
- #address-cells = <2>;
- #size-cells = <2>;
/* 12MHz, 24MHz and 48MHz allowed */
refclk-frequency = <12000000>;
/* Either "crystal" or "external" */
refclk-type = "crystal";
-
- usbc@16f0010000000 {
- compatible = "cavium,octeon-5750-usbc";
- reg = <0x16f00 0x10000000 0x0 0x80000>;
- interrupts = <0 56>;
- };
};
};
@@ -587,16 +388,4 @@
usbn = &usbn;
led0 = &led0;
};
-
- dsr1000n-leds {
- compatible = "gpio-leds";
- usb1 {
- label = "usb1";
- gpios = <&gpio 9 1>; /* Active low */
- };
- usb2 {
- label = "usb2";
- gpios = <&gpio 10 1>; /* Active low */
- };
- };
};
diff --git a/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
new file mode 100644
index 000000000000..5302148e05a3
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/octeon_3xxx.dtsi
@@ -0,0 +1,231 @@
+/* OCTEON 3XXX DTS common parts. */
+
+/dts-v1/;
+
+/ {
+ compatible = "cavium,octeon-3860";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ interrupt-parent = <&ciu>;
+
+ soc@0 {
+ compatible = "simple-bus";
+ #address-cells = <2>;
+ #size-cells = <2>;
+ ranges; /* Direct mapping */
+
+ ciu: interrupt-controller@1070000000000 {
+ compatible = "cavium,octeon-3860-ciu";
+ interrupt-controller;
+ /* Interrupts are specified by two parts:
+ * 1) Controller register (0 or 1)
+ * 2) Bit within the register (0..63)
+ */
+ #interrupt-cells = <2>;
+ reg = <0x10700 0x00000000 0x0 0x7000>;
+ };
+
+ gpio: gpio-controller@1070000000800 {
+ #gpio-cells = <2>;
+ compatible = "cavium,octeon-3860-gpio";
+ reg = <0x10700 0x00000800 0x0 0x100>;
+ gpio-controller;
+ /* Interrupts are specified by two parts:
+ * 1) GPIO pin number (0..15)
+ * 2) Triggering (1 - edge rising
+ * 2 - edge falling
+ * 4 - level active high
+ * 8 - level active low)
+ */
+ interrupt-controller;
+ #interrupt-cells = <2>;
+ /* The GPIO pin connect to 16 consecutive CUI bits */
+ interrupts = <0 16>, <0 17>, <0 18>, <0 19>,
+ <0 20>, <0 21>, <0 22>, <0 23>,
+ <0 24>, <0 25>, <0 26>, <0 27>,
+ <0 28>, <0 29>, <0 30>, <0 31>;
+ };
+
+ smi0: mdio@1180000001800 {
+ compatible = "cavium,octeon-3860-mdio";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0x00001800 0x0 0x40>;
+ };
+
+ pip: pip@11800a0000000 {
+ compatible = "cavium,octeon-3860-pip";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0x11800 0xa0000000 0x0 0x2000>;
+
+ interface@0 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <0>; /* interface */
+
+ ethernet@0 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x0>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@1 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x1>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ ethernet@2 {
+ compatible = "cavium,octeon-3860-pip-port";
+ reg = <0x2>; /* Port */
+ local-mac-address = [ 00 00 00 00 00 00 ];
+ };
+ };
+
+ interface@1 {
+ compatible = "cavium,octeon-3860-pip-interface";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ reg = <1>; /* interface */
+ };
+ };
+
+ twsi0: i2c@1180000001000 {
+ #address-cells = <1>;
+ #size-cells = <0>;
+ compatible = "cavium,octeon-3860-twsi";
+ reg = <0x11800 0x00001000 0x0 0x200>;
+ interrupts = <0 45>;
+ clock-frequency = <100000>;
+ };
+
+ uart0: serial@1180000000800 {
+ compatible = "cavium,octeon-3860-uart","ns16550";
+ reg = <0x11800 0x00000800 0x0 0x400>;
+ clock-frequency = <0>;
+ current-speed = <115200>;
+ reg-shift = <3>;
+ interrupts = <0 34>;
+ };
+
+ bootbus: bootbus@1180000000000 {
+ compatible = "cavium,octeon-3860-bootbus";
+ reg = <0x11800 0x00000000 0x0 0x200>;
+ /* The chip select number and offset */
+ #address-cells = <2>;
+ /* The size of the chip select region */
+ #size-cells = <1>;
+ ranges = <0 0 0x0 0x1f400000 0xc00000>,
+ <1 0 0x10000 0x30000000 0>,
+ <2 0 0x10000 0x40000000 0>,
+ <3 0 0x10000 0x50000000 0>,
+ <4 0 0x0 0x1d020000 0x10000>,
+ <5 0 0x0 0x1d040000 0x10000>,
+ <6 0 0x0 0x1d050000 0x10000>,
+ <7 0 0x10000 0x90000000 0>;
+
+ cavium,cs-config@0 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <0>;
+ cavium,t-adr = <20>;
+ cavium,t-ce = <60>;
+ cavium,t-oe = <60>;
+ cavium,t-we = <45>;
+ cavium,t-rd-hld = <35>;
+ cavium,t-wr-hld = <45>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <35>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@4 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <4>;
+ cavium,t-adr = <320>;
+ cavium,t-ce = <320>;
+ cavium,t-oe = <320>;
+ cavium,t-we = <320>;
+ cavium,t-rd-hld = <320>;
+ cavium,t-wr-hld = <320>;
+ cavium,t-pause = <320>;
+ cavium,t-wait = <320>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <8>;
+ };
+ cavium,cs-config@5 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <5>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <125>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <30>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <30>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,bus-width = <16>;
+ };
+ cavium,cs-config@6 {
+ compatible = "cavium,octeon-3860-bootbus-config";
+ cavium,cs-index = <6>;
+ cavium,t-adr = <5>;
+ cavium,t-ce = <300>;
+ cavium,t-oe = <270>;
+ cavium,t-we = <150>;
+ cavium,t-rd-hld = <100>;
+ cavium,t-wr-hld = <70>;
+ cavium,t-pause = <0>;
+ cavium,t-wait = <0>;
+ cavium,t-page = <320>;
+ cavium,t-rd-dly = <0>;
+
+ cavium,pages = <0>;
+ cavium,wait-mode;
+ cavium,bus-width = <16>;
+ };
+
+ flash0: nor@0,0 {
+ compatible = "cfi-flash";
+ reg = <0 0 0x800000>;
+ #address-cells = <1>;
+ #size-cells = <1>;
+ };
+ };
+
+ dma0: dma-engine@1180000000100 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000100 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ dma1: dma-engine@1180000000108 {
+ compatible = "cavium,octeon-5750-bootbus-dma";
+ reg = <0x11800 0x00000108 0x0 0x8>;
+ interrupts = <0 63>;
+ };
+
+ usbn: usbn@1180068000000 {
+ compatible = "cavium,octeon-5750-usbn";
+ reg = <0x11800 0x68000000 0x0 0x1000>;
+ ranges; /* Direct mapping */
+ #address-cells = <2>;
+ #size-cells = <2>;
+
+ usbc@16f0010000000 {
+ compatible = "cavium,octeon-5750-usbc";
+ reg = <0x16f00 0x10000000 0x0 0x80000>;
+ interrupts = <0 56>;
+ };
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
new file mode 100644
index 000000000000..243e5dc444fb
--- /dev/null
+++ b/arch/mips/boot/dts/cavium-octeon/ubnt_e100.dts
@@ -0,0 +1,59 @@
+/*
+ * Device tree source for EdgeRouter Lite.
+ *
+ * Written by: Aaro Koskinen <aaro.koskinen@iki.fi>
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+
+/include/ "octeon_3xxx.dtsi"
+
+/ {
+ model = "ubnt,e100";
+
+ soc@0 {
+ smi0: mdio@1180000001800 {
+ phy5: ethernet-phy@5 {
+ reg = <5>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ phy6: ethernet-phy@6 {
+ reg = <6>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ phy7: ethernet-phy@7 {
+ reg = <7>;
+ compatible = "ethernet-phy-ieee802.3-c22";
+ };
+ };
+
+ pip: pip@11800a0000000 {
+ interface@0 {
+ ethernet@0 {
+ phy-handle = <&phy7>;
+ };
+ ethernet@1 {
+ phy-handle = <&phy6>;
+ };
+ ethernet@2 {
+ phy-handle = <&phy5>;
+ };
+ };
+ };
+
+ uart0: serial@1180000000800 {
+ clock-frequency = <500000000>;
+ };
+
+ usbn: usbn@1180068000000 {
+ refclk-frequency = <12000000>;
+ refclk-type = "crystal";
+ };
+ };
+
+ aliases {
+ pip = &pip;
+ };
+};
diff --git a/arch/mips/boot/dts/ingenic/jz4740.dtsi b/arch/mips/boot/dts/ingenic/jz4740.dtsi
index 8b2437cd019f..f6ae6ed9c4b1 100644
--- a/arch/mips/boot/dts/ingenic/jz4740.dtsi
+++ b/arch/mips/boot/dts/ingenic/jz4740.dtsi
@@ -5,7 +5,7 @@
#size-cells = <1>;
compatible = "ingenic,jz4740";
- cpuintc: interrupt-controller@0 {
+ cpuintc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
@@ -65,4 +65,18 @@
clocks = <&ext>, <&cgu JZ4740_CLK_UART1>;
clock-names = "baud", "module";
};
+
+ uhc: uhc@13030000 {
+ compatible = "ingenic,jz4740-ohci", "generic-ohci";
+ reg = <0x13030000 0x1000>;
+
+ clocks = <&cgu JZ4740_CLK_UHC>;
+ assigned-clocks = <&cgu JZ4740_CLK_UHC>;
+ assigned-clock-rates = <48000000>;
+
+ interrupt-parent = <&intc>;
+ interrupts = <3>;
+
+ status = "disabled";
+ };
};
diff --git a/arch/mips/boot/dts/lantiq/easy50712.dts b/arch/mips/boot/dts/lantiq/easy50712.dts
index 143b8a37b5e4..b59962585dde 100644
--- a/arch/mips/boot/dts/lantiq/easy50712.dts
+++ b/arch/mips/boot/dts/lantiq/easy50712.dts
@@ -52,7 +52,7 @@
};
gpio: pinmux@E100B10 {
- compatible = "lantiq,pinctrl-xway";
+ compatible = "lantiq,danube-pinctrl";
pinctrl-names = "default";
pinctrl-0 = <&state_default>;
diff --git a/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi b/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi
deleted file mode 100644
index ef1335012f43..000000000000
--- a/arch/mips/boot/dts/pic32/pic32mzda-clk.dtsi
+++ /dev/null
@@ -1,236 +0,0 @@
-/*
- * Device Tree Source for PIC32MZDA clock data
- *
- * Purna Chandra Mandal <purna.mandal@microchip.com>
- * Copyright (C) 2015 Microchip Technology Inc. All rights reserved.
- *
- * Licensed under GPLv2 or later.
- */
-
-/* all fixed rate clocks */
-
-/ {
- POSC:posc_clk { /* On-chip primary oscillator */
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- };
-
- FRC:frc_clk { /* internal FRC oscillator */
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <8000000>;
- };
-
- BFRC:bfrc_clk { /* internal backup FRC oscillator */
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <8000000>;
- };
-
- LPRC:lprc_clk { /* internal low-power FRC oscillator */
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <32000>;
- };
-
- /* UPLL provides clock to USBCORE */
- UPLL:usb_phy_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- clock-output-names = "usbphy_clk";
- };
-
- TxCKI:txcki_clk { /* external clock input on TxCLKI pin */
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <4000000>;
- status = "disabled";
- };
-
- /* external clock input on REFCLKIx pin */
- REFIx:refix_clk {
- #clock-cells = <0>;
- compatible = "fixed-clock";
- clock-frequency = <24000000>;
- status = "disabled";
- };
-
- /* PIC32 specific clks */
- pic32_clktree {
- #address-cells = <1>;
- #size-cells = <1>;
- reg = <0x1f801200 0x200>;
- compatible = "microchip,pic32mzda-clk";
- ranges = <0 0x1f801200 0x200>;
-
- /* secondary oscillator; external input on SOSCI pin */
- SOSC:sosc_clk@0 {
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-sosc";
- clock-frequency = <32768>;
- reg = <0x000 0x10>, /* enable reg */
- <0x1d0 0x10>; /* status reg */
- microchip,bit-mask = <0x02>; /* enable mask */
- microchip,status-bit-mask = <0x10>; /* status-mask*/
- };
-
- FRCDIV:frcdiv_clk {
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-frcdivclk";
- clocks = <&FRC>;
- clock-output-names = "frcdiv_clk";
- };
-
- /* System PLL clock */
- SYSPLL:spll_clk@020 {
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-syspll";
- reg = <0x020 0x10>, /* SPLL register */
- <0x1d0 0x10>; /* CLKSTAT register */
- clocks = <&POSC>, <&FRC>;
- clock-output-names = "sys_pll";
- microchip,status-bit-mask = <0x80>; /* SPLLRDY */
- };
-
- /* system clock; mux with postdiv & slew */
- SYSCLK:sys_clk@1c0 {
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-sysclk-v2";
- reg = <0x1c0 0x04>; /* SLEWCON */
- clocks = <&FRCDIV>, <&SYSPLL>, <&POSC>, <&SOSC>,
- <&LPRC>, <&FRCDIV>;
- microchip,clock-indices = <0>, <1>, <2>, <4>,
- <5>, <7>;
- clock-output-names = "sys_clk";
- };
-
- /* Peripheral bus1 clock */
- PBCLK1:pb1_clk@140 {
- reg = <0x140 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- clock-output-names = "pb1_clk";
- /* used by system modules, not gateable */
- microchip,ignore-unused;
- };
-
- /* Peripheral bus2 clock */
- PBCLK2:pb2_clk@150 {
- reg = <0x150 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- clock-output-names = "pb2_clk";
- /* avoid gating even if unused */
- microchip,ignore-unused;
- };
-
- /* Peripheral bus3 clock */
- PBCLK3:pb3_clk@160 {
- reg = <0x160 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- clock-output-names = "pb3_clk";
- };
-
- /* Peripheral bus4 clock(I/O ports, GPIO) */
- PBCLK4:pb4_clk@170 {
- reg = <0x170 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- clock-output-names = "pb4_clk";
- };
-
- /* Peripheral bus clock */
- PBCLK5:pb5_clk@180 {
- reg = <0x180 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- clock-output-names = "pb5_clk";
- };
-
- /* Peripheral Bus6 clock; */
- PBCLK6:pb6_clk@190 {
- reg = <0x190 0x10>;
- compatible = "microchip,pic32mzda-pbclk";
- clocks = <&SYSCLK>;
- #clock-cells = <0>;
- };
-
- /* Peripheral bus7 clock */
- PBCLK7:pb7_clk@1a0 {
- reg = <0x1a0 0x10>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-pbclk";
- /* CPU is driven by this clock; so named */
- clock-output-names = "cpu_clk";
- clocks = <&SYSCLK>;
- };
-
- /* Reference Oscillator clock for SPI/I2S */
- REFCLKO1:refo1_clk@80 {
- reg = <0x080 0x20>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-refoclk";
- clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
- <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
- microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
- <5>, <7>, <8>, <9>;
- clock-output-names = "refo1_clk";
- };
-
- /* Reference Oscillator clock for SQI */
- REFCLKO2:refo2_clk@a0 {
- reg = <0x0a0 0x20>;
- #clock-cells = <0>;
- compatible = "microchip,pic32mzda-refoclk";
- clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
- <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
- microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
- <5>, <7>, <8>, <9>;
- clock-output-names = "refo2_clk";
- };
-
- /* Reference Oscillator clock, ADC */
- REFCLKO3:refo3_clk@c0 {
- reg = <0x0c0 0x20>;
- compatible = "microchip,pic32mzda-refoclk";
- clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
- <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
- microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
- <5>, <7>, <8>, <9>;
- #clock-cells = <0>;
- clock-output-names = "refo3_clk";
- };
-
- /* Reference Oscillator clock */
- REFCLKO4:refo4_clk@e0 {
- reg = <0x0e0 0x20>;
- compatible = "microchip,pic32mzda-refoclk";
- clocks = <&SYSCLK>, <&PBCLK1>, <&POSC>, <&FRC>, <&LPRC>,
- <&SOSC>, <&SYSPLL>, <&REFIx>, <&BFRC>;
- microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
- <5>, <7>, <8>, <9>;
- #clock-cells = <0>;
- clock-output-names = "refo4_clk";
- };
-
- /* Reference Oscillator clock, LCD */
- REFCLKO5:refo5_clk@100 {
- reg = <0x100 0x20>;
- compatible = "microchip,pic32mzda-refoclk";
- clocks = <&SYSCLK>,<&PBCLK1>,<&POSC>,<&FRC>,<&LPRC>,
- <&SOSC>,<&SYSPLL>,<&REFIx>,<&BFRC>;
- microchip,clock-indices = <0>, <1>, <2>, <3>, <4>,
- <5>, <7>, <8>, <9>;
- #clock-cells = <0>;
- clock-output-names = "refo5_clk";
- };
- };
-};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda.dtsi b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
index ad9e3318c2ce..5353a639c4fb 100644
--- a/arch/mips/boot/dts/pic32/pic32mzda.dtsi
+++ b/arch/mips/boot/dts/pic32/pic32mzda.dtsi
@@ -6,11 +6,9 @@
* published by the Free Software Foundation.
*
*/
-
+#include <dt-bindings/clock/microchip,pic32-clock.h>
#include <dt-bindings/interrupt-controller/irq.h>
-#include "pic32mzda-clk.dtsi"
-
/ {
#address-cells = <1>;
#size-cells = <1>;
@@ -50,6 +48,29 @@
interrupts = <0 IRQ_TYPE_EDGE_RISING>;
};
+ /* external clock input on TxCLKI pin */
+ txcki: txcki_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <4000000>;
+ status = "disabled";
+ };
+
+ /* external input on REFCLKIx pin */
+ refix: refix_clk {
+ #clock-cells = <0>;
+ compatible = "fixed-clock";
+ clock-frequency = <24000000>;
+ status = "disabled";
+ };
+
+ rootclk: clock-controller@1f801200 {
+ compatible = "microchip,pic32mzda-clk";
+ reg = <0x1f801200 0x200>;
+ #clock-cells = <1>;
+ microchip,pic32mzda-sosc;
+ };
+
evic: interrupt-controller@1f810000 {
compatible = "microchip,pic32mzda-evic";
interrupt-controller;
@@ -63,7 +84,7 @@
#size-cells = <1>;
compatible = "microchip,pic32mzda-pinctrl";
reg = <0x1f801400 0x400>;
- clocks = <&PBCLK1>;
+ clocks = <&rootclk PB1CLK>;
};
/* PORTA */
@@ -75,7 +96,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <0>;
gpio-ranges = <&pic32_pinctrl 0 0 16>;
};
@@ -89,7 +110,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <1>;
gpio-ranges = <&pic32_pinctrl 0 16 16>;
};
@@ -103,7 +124,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <2>;
gpio-ranges = <&pic32_pinctrl 0 32 16>;
};
@@ -117,7 +138,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <3>;
gpio-ranges = <&pic32_pinctrl 0 48 16>;
};
@@ -131,7 +152,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <4>;
gpio-ranges = <&pic32_pinctrl 0 64 16>;
};
@@ -145,7 +166,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <5>;
gpio-ranges = <&pic32_pinctrl 0 80 16>;
};
@@ -159,7 +180,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <6>;
gpio-ranges = <&pic32_pinctrl 0 96 16>;
};
@@ -173,7 +194,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <7>;
gpio-ranges = <&pic32_pinctrl 0 112 16>;
};
@@ -189,7 +210,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <8>;
gpio-ranges = <&pic32_pinctrl 0 128 16>;
};
@@ -203,7 +224,7 @@
gpio-controller;
interrupt-controller;
#interrupt-cells = <2>;
- clocks = <&PBCLK4>;
+ clocks = <&rootclk PB4CLK>;
microchip,gpio-bank = <9>;
gpio-ranges = <&pic32_pinctrl 0 144 16>;
};
@@ -212,7 +233,7 @@
compatible = "microchip,pic32mzda-sdhci";
reg = <0x1f8ec000 0x100>;
interrupts = <191 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&REFCLKO4>, <&PBCLK5>;
+ clocks = <&rootclk REF4CLK>, <&rootclk PB5CLK>;
clock-names = "base_clk", "sys_clk";
bus-width = <4>;
cap-sd-highspeed;
@@ -225,7 +246,7 @@
interrupts = <112 IRQ_TYPE_LEVEL_HIGH>,
<113 IRQ_TYPE_LEVEL_HIGH>,
<114 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
@@ -235,7 +256,7 @@
interrupts = <145 IRQ_TYPE_LEVEL_HIGH>,
<146 IRQ_TYPE_LEVEL_HIGH>,
<147 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
@@ -245,7 +266,7 @@
interrupts = <157 IRQ_TYPE_LEVEL_HIGH>,
<158 IRQ_TYPE_LEVEL_HIGH>,
<159 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
@@ -255,7 +276,7 @@
interrupts = <170 IRQ_TYPE_LEVEL_HIGH>,
<171 IRQ_TYPE_LEVEL_HIGH>,
<172 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
@@ -265,7 +286,7 @@
interrupts = <179 IRQ_TYPE_LEVEL_HIGH>,
<180 IRQ_TYPE_LEVEL_HIGH>,
<181 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
@@ -275,7 +296,7 @@
interrupts = <188 IRQ_TYPE_LEVEL_HIGH>,
<189 IRQ_TYPE_LEVEL_HIGH>,
<190 IRQ_TYPE_LEVEL_HIGH>;
- clocks = <&PBCLK2>;
+ clocks = <&rootclk PB2CLK>;
status = "disabled";
};
};
diff --git a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
index 5d434a50e85b..fc740102852e 100644
--- a/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
+++ b/arch/mips/boot/dts/pic32/pic32mzda_sk.dts
@@ -95,8 +95,9 @@
pinctrl-names = "default";
pinctrl-0 = <&pinctrl_sdhc1>;
status = "okay";
- assigned-clocks = <&REFCLKO2>,<&REFCLKO4>,<&REFCLKO5>;
- assigned-clock-rates = <50000000>,<25000000>,<40000000>;
+ assigned-clocks = <&rootclk REF2CLK>, <&rootclk REF4CLK>,
+ <&rootclk REF5CLK>;
+ assigned-clock-rates = <50000000>, <25000000>, <40000000>;
};
&pic32_pinctrl {
diff --git a/arch/mips/boot/dts/qca/Makefile b/arch/mips/boot/dts/qca/Makefile
index 2d61455d585d..63a9ddf048c9 100644
--- a/arch/mips/boot/dts/qca/Makefile
+++ b/arch/mips/boot/dts/qca/Makefile
@@ -1,8 +1,9 @@
# All DTBs
dtb-$(CONFIG_ATH79) += ar9132_tl_wr1043nd_v1.dtb
-
-# Select a DTB to build in the kernel
-obj-$(CONFIG_DTB_TL_WR1043ND_V1) += ar9132_tl_wr1043nd_v1.dtb.o
+dtb-$(CONFIG_ATH79) += ar9331_dpt_module.dtb
+dtb-$(CONFIG_ATH79) += ar9331_dragino_ms14.dtb
+dtb-$(CONFIG_ATH79) += ar9331_omega.dtb
+dtb-$(CONFIG_ATH79) += ar9331_tl_mr3020.dtb
# Force kbuild to make empty built-in.o if necessary
obj- += dummy.o
diff --git a/arch/mips/boot/dts/qca/ar9132.dtsi b/arch/mips/boot/dts/qca/ar9132.dtsi
index 3ad4ba9b12fd..302f0a8d2988 100644
--- a/arch/mips/boot/dts/qca/ar9132.dtsi
+++ b/arch/mips/boot/dts/qca/ar9132.dtsi
@@ -1,3 +1,5 @@
+#include <dt-bindings/clock/ath79-clk.h>
+
/ {
compatible = "qca,ar9132";
@@ -11,6 +13,7 @@
cpu@0 {
device_type = "cpu";
compatible = "mips,mips24Kc";
+ clocks = <&pll ATH79_CLK_CPU>;
reg = <0>;
};
};
@@ -52,12 +55,12 @@
#qca,ddr-wb-channel-cells = <1>;
};
- uart@18020000 {
+ uart: uart@18020000 {
compatible = "ns8250";
reg = <0x18020000 0x20>;
interrupts = <3>;
- clocks = <&pll 2>;
+ clocks = <&pll ATH79_CLK_AHB>;
clock-names = "uart";
reg-io-width = <4>;
@@ -83,7 +86,7 @@
};
pll: pll-controller@18050000 {
- compatible = "qca,ar9132-ppl",
+ compatible = "qca,ar9132-pll",
"qca,ar9130-pll";
reg = <0x18050000 0x20>;
@@ -94,13 +97,13 @@
clock-output-names = "cpu", "ddr", "ahb";
};
- wdt@18060008 {
+ wdt: wdt@18060008 {
compatible = "qca,ar7130-wdt";
reg = <0x18060008 0x8>;
interrupts = <4>;
- clocks = <&pll 2>;
+ clocks = <&pll ATH79_CLK_AHB>;
clock-names = "wdt";
};
@@ -125,7 +128,7 @@
};
};
- usb@1b000100 {
+ usb: usb@1b000100 {
compatible = "qca,ar7100-ehci", "generic-ehci";
reg = <0x1b000100 0x100>;
@@ -140,11 +143,11 @@
status = "disabled";
};
- spi@1f000000 {
+ spi: spi@1f000000 {
compatible = "qca,ar9132-spi", "qca,ar7100-spi";
reg = <0x1f000000 0x10>;
- clocks = <&pll 2>;
+ clocks = <&pll ATH79_CLK_AHB>;
clock-names = "ahb";
status = "disabled";
diff --git a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
index e535ee3c26a4..3c3b7ce5737b 100644
--- a/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
+++ b/arch/mips/boot/dts/qca/ar9132_tl_wr1043nd_v1.dts
@@ -9,70 +9,17 @@
compatible = "tplink,tl-wr1043nd-v1", "qca,ar9132";
model = "TP-Link TL-WR1043ND Version 1";
- alias {
- serial0 = "/ahb/apb/uart@18020000";
- };
-
memory@0 {
device_type = "memory";
reg = <0x0 0x2000000>;
};
- extosc: oscillator {
+ extosc: ref {
compatible = "fixed-clock";
#clock-cells = <0>;
clock-frequency = <40000000>;
};
- ahb {
- apb {
- uart@18020000 {
- status = "okay";
- };
-
- pll-controller@18050000 {
- clocks = <&extosc>;
- };
- };
-
- usb@1b000100 {
- status = "okay";
- };
-
- spi@1f000000 {
- status = "okay";
- num-cs = <1>;
-
- flash@0 {
- #address-cells = <1>;
- #size-cells = <1>;
- compatible = "s25sl064a";
- reg = <0>;
- spi-max-frequency = <25000000>;
-
- partition@0 {
- label = "u-boot";
- reg = <0x000000 0x020000>;
- };
-
- partition@1 {
- label = "firmware";
- reg = <0x020000 0x7D0000>;
- };
-
- partition@2 {
- label = "art";
- reg = <0x7F0000 0x010000>;
- read-only;
- };
- };
- };
- };
-
- usb-phy {
- status = "okay";
- };
-
gpio-keys {
compatible = "gpio-keys-polled";
#address-cells = <1>;
@@ -118,3 +65,48 @@
};
};
};
+
+&uart {
+ status = "okay";
+};
+
+&pll {
+ clocks = <&extosc>;
+};
+
+&usb {
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ status = "okay";
+ num-cs = <1>;
+
+ flash@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "s25sl064a";
+ reg = <0>;
+ spi-max-frequency = <25000000>;
+
+ partition@0 {
+ label = "u-boot";
+ reg = <0x000000 0x020000>;
+ };
+
+ partition@1 {
+ label = "firmware";
+ reg = <0x020000 0x7D0000>;
+ };
+
+ partition@2 {
+ label = "art";
+ reg = <0x7F0000 0x010000>;
+ read-only;
+ };
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331.dtsi b/arch/mips/boot/dts/qca/ar9331.dtsi
new file mode 100644
index 000000000000..cf47ed4d8569
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331.dtsi
@@ -0,0 +1,155 @@
+#include <dt-bindings/clock/ath79-clk.h>
+
+/ {
+ compatible = "qca,ar9331";
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ cpus {
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ cpu@0 {
+ device_type = "cpu";
+ compatible = "mips,mips24Kc";
+ clocks = <&pll ATH79_CLK_CPU>;
+ reg = <0>;
+ };
+ };
+
+ cpuintc: interrupt-controller {
+ compatible = "qca,ar7100-cpu-intc";
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+
+ qca,ddr-wb-channel-interrupts = <2>, <3>;
+ qca,ddr-wb-channels = <&ddr_ctrl 3>, <&ddr_ctrl 2>;
+ };
+
+ ref: ref {
+ compatible = "fixed-clock";
+ #clock-cells = <0>;
+ };
+
+ ahb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&cpuintc>;
+
+ apb {
+ compatible = "simple-bus";
+ ranges;
+
+ #address-cells = <1>;
+ #size-cells = <1>;
+
+ interrupt-parent = <&miscintc>;
+
+ ddr_ctrl: memory-controller@18000000 {
+ compatible = "qca,ar7240-ddr-controller";
+ reg = <0x18000000 0x100>;
+
+ #qca,ddr-wb-channel-cells = <1>;
+ };
+
+ uart: uart@18020000 {
+ compatible = "qca,ar9330-uart";
+ reg = <0x18020000 0x14>;
+
+ interrupts = <3>;
+
+ clocks = <&ref>;
+ clock-names = "uart";
+
+ status = "disabled";
+ };
+
+ gpio: gpio@18040000 {
+ compatible = "qca,ar7100-gpio";
+ reg = <0x18040000 0x34>;
+ interrupts = <2>;
+
+ ngpios = <30>;
+
+ gpio-controller;
+ #gpio-cells = <2>;
+
+ interrupt-controller;
+ #interrupt-cells = <2>;
+
+ status = "disabled";
+ };
+
+ pll: pll-controller@18050000 {
+ compatible = "qca,ar9330-pll";
+ reg = <0x18050000 0x100>;
+
+ clocks = <&ref>;
+ clock-names = "ref";
+
+ #clock-cells = <1>;
+ };
+
+ miscintc: interrupt-controller@18060010 {
+ compatible = "qca,ar7240-misc-intc";
+ reg = <0x18060010 0x4>;
+
+ interrupt-parent = <&cpuintc>;
+ interrupts = <6>;
+
+ interrupt-controller;
+ #interrupt-cells = <1>;
+ };
+
+ rst: reset-controller@1806001c {
+ compatible = "qca,ar7100-reset";
+ reg = <0x1806001c 0x4>;
+
+ #reset-cells = <1>;
+ };
+ };
+
+ usb: usb@1b000100 {
+ compatible = "chipidea,usb2";
+ reg = <0x1b000000 0x200>;
+
+ interrupts = <3>;
+ resets = <&rst 5>;
+
+ phy-names = "usb-phy";
+ phys = <&usb_phy>;
+
+ status = "disabled";
+ };
+
+ spi: spi@1f000000 {
+ compatible = "qca,ar7100-spi";
+ reg = <0x1f000000 0x10>;
+
+ clocks = <&pll ATH79_CLK_AHB>;
+ clock-names = "ahb";
+
+ #address-cells = <1>;
+ #size-cells = <0>;
+
+ status = "disabled";
+ };
+ };
+
+ usb_phy: usb-phy {
+ compatible = "qca,ar7100-usb-phy";
+
+ reset-names = "usb-phy", "usb-suspend-override";
+ resets = <&rst 4>, <&rst 3>;
+
+ #phy-cells = <0>;
+
+ status = "disabled";
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dpt_module.dts b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
new file mode 100644
index 000000000000..98e74500e79d
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dpt_module.dts
@@ -0,0 +1,78 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "DPTechnics DPT-Module";
+ compatible = "dptechnics,dpt-module";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ system {
+ label = "dpt-module:green:system";
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128FVSG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
new file mode 100644
index 000000000000..56f832076a69
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_dragino_ms14.dts
@@ -0,0 +1,102 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "Dragino MS14 (Dragino 2)";
+ compatible = "dragino,ms14";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ wlan {
+ label = "dragino2:red:wlan";
+ gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ lan {
+ label = "dragino2:red:lan";
+ gpios = <&gpio 13 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ wan {
+ label = "dragino2:red:wan";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ system {
+ label = "dragino2:red:system";
+ gpios = <&gpio 28 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "jumpstart";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio 11 GPIO_ACTIVE_LOW>;
+ };
+
+ button@1 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 12 GPIO_ACTIVE_LOW>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128BVFG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_omega.dts b/arch/mips/boot/dts/qca/ar9331_omega.dts
new file mode 100644
index 000000000000..b2be3b04479d
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_omega.dts
@@ -0,0 +1,78 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "Onion Omega";
+ compatible = "onion,omega";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x4000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ system {
+ label = "onion:amber:system";
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "reset";
+ linux,code = <KEY_RESTART>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Winbond 25Q128FVSG SPI flash */
+ spiflash: w25q128@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "winbond,w25q128", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
new file mode 100644
index 000000000000..919cf3b854a5
--- /dev/null
+++ b/arch/mips/boot/dts/qca/ar9331_tl_mr3020.dts
@@ -0,0 +1,118 @@
+/dts-v1/;
+
+#include <dt-bindings/gpio/gpio.h>
+#include <dt-bindings/input/input.h>
+
+#include "ar9331.dtsi"
+
+/ {
+ model = "TP-Link TL-MR3020";
+ compatible = "tplink,tl-mr3020";
+
+ aliases {
+ serial0 = &uart;
+ };
+
+ memory@0 {
+ device_type = "memory";
+ reg = <0x0 0x2000000>;
+ };
+
+ leds {
+ compatible = "gpio-leds";
+
+ wlan {
+ label = "tp-link:green:wlan";
+ gpios = <&gpio 0 GPIO_ACTIVE_HIGH>;
+ default-state = "off";
+ };
+
+ lan {
+ label = "tp-link:green:lan";
+ gpios = <&gpio 17 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ wps {
+ label = "tp-link:green:wps";
+ gpios = <&gpio 26 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+
+ led3g {
+ label = "tp-link:green:3g";
+ gpios = <&gpio 27 GPIO_ACTIVE_LOW>;
+ default-state = "off";
+ };
+ };
+
+ gpio-keys-polled {
+ compatible = "gpio-keys-polled";
+ #address-cells = <1>;
+ #size-cells = <0>;
+ poll-interval = <100>;
+
+ button@0 {
+ label = "wps";
+ linux,code = <KEY_WPS_BUTTON>;
+ gpios = <&gpio 11 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@1 {
+ label = "sw1";
+ linux,code = <BTN_0>;
+ gpios = <&gpio 18 GPIO_ACTIVE_HIGH>;
+ };
+
+ button@2 {
+ label = "sw2";
+ linux,code = <BTN_1>;
+ gpios = <&gpio 20 GPIO_ACTIVE_HIGH>;
+ };
+ };
+
+ reg_usb_vbus: reg_usb_vbus {
+ compatible = "regulator-fixed";
+ regulator-name = "usb_vbus";
+ regulator-min-microvolt = <5000000>;
+ regulator-max-microvolt = <5000000>;
+ gpio = <&gpio 8 GPIO_ACTIVE_HIGH>;
+ enable-active-high;
+ };
+};
+
+&ref {
+ clock-frequency = <25000000>;
+};
+
+&uart {
+ status = "okay";
+};
+
+&gpio {
+ status = "okay";
+};
+
+&usb {
+ dr_mode = "host";
+ vbus-supply = <&reg_usb_vbus>;
+ status = "okay";
+};
+
+&usb_phy {
+ status = "okay";
+};
+
+&spi {
+ num-chipselects = <1>;
+ status = "okay";
+
+ /* Spansion S25FL032PIF SPI flash */
+ spiflash: s25sl032p@0 {
+ #address-cells = <1>;
+ #size-cells = <1>;
+ compatible = "spansion,s25sl032p", "jedec,spi-nor";
+ spi-max-frequency = <104000000>;
+ reg = <0>;
+ };
+};
diff --git a/arch/mips/boot/dts/ralink/mt7620a.dtsi b/arch/mips/boot/dts/ralink/mt7620a.dtsi
index 08bf24fefe9f..793c0c7ca921 100644
--- a/arch/mips/boot/dts/ralink/mt7620a.dtsi
+++ b/arch/mips/boot/dts/ralink/mt7620a.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt2880.dtsi b/arch/mips/boot/dts/ralink/rt2880.dtsi
index 182afde2f2e1..fb2faef0ab79 100644
--- a/arch/mips/boot/dts/ralink/rt2880.dtsi
+++ b/arch/mips/boot/dts/ralink/rt2880.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3050.dtsi b/arch/mips/boot/dts/ralink/rt3050.dtsi
index e3203d414fee..d3cb57f985da 100644
--- a/arch/mips/boot/dts/ralink/rt3050.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3050.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/ralink/rt3883.dtsi b/arch/mips/boot/dts/ralink/rt3883.dtsi
index 3b131dd0d5ac..3d6fc9afdaf6 100644
--- a/arch/mips/boot/dts/ralink/rt3883.dtsi
+++ b/arch/mips/boot/dts/ralink/rt3883.dtsi
@@ -9,7 +9,7 @@
};
};
- cpuintc: cpuintc@0 {
+ cpuintc: cpuintc {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
index 686ebd11386d..48d21127c3f3 100644
--- a/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
+++ b/arch/mips/boot/dts/xilfpga/nexys4ddr.dts
@@ -10,7 +10,7 @@
reg = <0x0 0x08000000>;
};
- cpuintc: interrupt-controller@0 {
+ cpuintc: interrupt-controller {
#address-cells = <0>;
#interrupt-cells = <1>;
interrupt-controller;
diff --git a/arch/mips/boot/tools/.gitignore b/arch/mips/boot/tools/.gitignore
new file mode 100644
index 000000000000..be0ed065249b
--- /dev/null
+++ b/arch/mips/boot/tools/.gitignore
@@ -0,0 +1 @@
+relocs
diff --git a/arch/mips/boot/tools/Makefile b/arch/mips/boot/tools/Makefile
new file mode 100644
index 000000000000..d232a68f6c8a
--- /dev/null
+++ b/arch/mips/boot/tools/Makefile
@@ -0,0 +1,8 @@
+
+hostprogs-y += relocs
+relocs-objs += relocs_32.o
+relocs-objs += relocs_64.o
+relocs-objs += relocs_main.o
+PHONY += relocs
+relocs: $(obj)/relocs
+ @:
diff --git a/arch/mips/boot/tools/relocs.c b/arch/mips/boot/tools/relocs.c
new file mode 100644
index 000000000000..b9cbf78527e8
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.c
@@ -0,0 +1,680 @@
+/* This is included from relocs_32/64.c */
+
+#define ElfW(type) _ElfW(ELF_BITS, type)
+#define _ElfW(bits, type) __ElfW(bits, type)
+#define __ElfW(bits, type) Elf##bits##_##type
+
+#define Elf_Addr ElfW(Addr)
+#define Elf_Ehdr ElfW(Ehdr)
+#define Elf_Phdr ElfW(Phdr)
+#define Elf_Shdr ElfW(Shdr)
+#define Elf_Sym ElfW(Sym)
+
+static Elf_Ehdr ehdr;
+
+struct relocs {
+ uint32_t *offset;
+ unsigned long count;
+ unsigned long size;
+};
+
+static struct relocs relocs;
+
+struct section {
+ Elf_Shdr shdr;
+ struct section *link;
+ Elf_Sym *symtab;
+ Elf_Rel *reltab;
+ char *strtab;
+ long shdr_offset;
+};
+static struct section *secs;
+
+static const char * const regex_sym_kernel = {
+/* Symbols matching these regex's should never be relocated */
+ "^(__crc_)",
+};
+
+static regex_t sym_regex_c;
+
+static int regex_skip_reloc(const char *sym_name)
+{
+ return !regexec(&sym_regex_c, sym_name, 0, NULL, 0);
+}
+
+static void regex_init(void)
+{
+ char errbuf[128];
+ int err;
+
+ err = regcomp(&sym_regex_c, regex_sym_kernel,
+ REG_EXTENDED|REG_NOSUB);
+
+ if (err) {
+ regerror(err, &sym_regex_c, errbuf, sizeof(errbuf));
+ die("%s", errbuf);
+ }
+}
+
+static const char *rel_type(unsigned type)
+{
+ static const char * const type_name[] = {
+#define REL_TYPE(X)[X] = #X
+ REL_TYPE(R_MIPS_NONE),
+ REL_TYPE(R_MIPS_16),
+ REL_TYPE(R_MIPS_32),
+ REL_TYPE(R_MIPS_REL32),
+ REL_TYPE(R_MIPS_26),
+ REL_TYPE(R_MIPS_HI16),
+ REL_TYPE(R_MIPS_LO16),
+ REL_TYPE(R_MIPS_GPREL16),
+ REL_TYPE(R_MIPS_LITERAL),
+ REL_TYPE(R_MIPS_GOT16),
+ REL_TYPE(R_MIPS_PC16),
+ REL_TYPE(R_MIPS_CALL16),
+ REL_TYPE(R_MIPS_GPREL32),
+ REL_TYPE(R_MIPS_64),
+ REL_TYPE(R_MIPS_HIGHER),
+ REL_TYPE(R_MIPS_HIGHEST),
+ REL_TYPE(R_MIPS_PC21_S2),
+ REL_TYPE(R_MIPS_PC26_S2),
+#undef REL_TYPE
+ };
+ const char *name = "unknown type rel type name";
+
+ if (type < ARRAY_SIZE(type_name) && type_name[type])
+ name = type_name[type];
+ return name;
+}
+
+static const char *sec_name(unsigned shndx)
+{
+ const char *sec_strtab;
+ const char *name;
+
+ sec_strtab = secs[ehdr.e_shstrndx].strtab;
+ if (shndx < ehdr.e_shnum)
+ name = sec_strtab + secs[shndx].shdr.sh_name;
+ else if (shndx == SHN_ABS)
+ name = "ABSOLUTE";
+ else if (shndx == SHN_COMMON)
+ name = "COMMON";
+ else
+ name = "<noname>";
+ return name;
+}
+
+static struct section *sec_lookup(const char *secname)
+{
+ int i;
+
+ for (i = 0; i < ehdr.e_shnum; i++)
+ if (strcmp(secname, sec_name(i)) == 0)
+ return &secs[i];
+
+ return NULL;
+}
+
+static const char *sym_name(const char *sym_strtab, Elf_Sym *sym)
+{
+ const char *name;
+
+ if (sym->st_name)
+ name = sym_strtab + sym->st_name;
+ else
+ name = sec_name(sym->st_shndx);
+ return name;
+}
+
+#if BYTE_ORDER == LITTLE_ENDIAN
+#define le16_to_cpu(val) (val)
+#define le32_to_cpu(val) (val)
+#define le64_to_cpu(val) (val)
+#define be16_to_cpu(val) bswap_16(val)
+#define be32_to_cpu(val) bswap_32(val)
+#define be64_to_cpu(val) bswap_64(val)
+
+#define cpu_to_le16(val) (val)
+#define cpu_to_le32(val) (val)
+#define cpu_to_le64(val) (val)
+#define cpu_to_be16(val) bswap_16(val)
+#define cpu_to_be32(val) bswap_32(val)
+#define cpu_to_be64(val) bswap_64(val)
+#endif
+#if BYTE_ORDER == BIG_ENDIAN
+#define le16_to_cpu(val) bswap_16(val)
+#define le32_to_cpu(val) bswap_32(val)
+#define le64_to_cpu(val) bswap_64(val)
+#define be16_to_cpu(val) (val)
+#define be32_to_cpu(val) (val)
+#define be64_to_cpu(val) (val)
+
+#define cpu_to_le16(val) bswap_16(val)
+#define cpu_to_le32(val) bswap_32(val)
+#define cpu_to_le64(val) bswap_64(val)
+#define cpu_to_be16(val) (val)
+#define cpu_to_be32(val) (val)
+#define cpu_to_be64(val) (val)
+#endif
+
+static uint16_t elf16_to_cpu(uint16_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le16_to_cpu(val);
+ else
+ return be16_to_cpu(val);
+}
+
+static uint32_t elf32_to_cpu(uint32_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le32_to_cpu(val);
+ else
+ return be32_to_cpu(val);
+}
+
+static uint32_t cpu_to_elf32(uint32_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return cpu_to_le32(val);
+ else
+ return cpu_to_be32(val);
+}
+
+#define elf_half_to_cpu(x) elf16_to_cpu(x)
+#define elf_word_to_cpu(x) elf32_to_cpu(x)
+
+#if ELF_BITS == 64
+static uint64_t elf64_to_cpu(uint64_t val)
+{
+ if (ehdr.e_ident[EI_DATA] == ELFDATA2LSB)
+ return le64_to_cpu(val);
+ else
+ return be64_to_cpu(val);
+}
+#define elf_addr_to_cpu(x) elf64_to_cpu(x)
+#define elf_off_to_cpu(x) elf64_to_cpu(x)
+#define elf_xword_to_cpu(x) elf64_to_cpu(x)
+#else
+#define elf_addr_to_cpu(x) elf32_to_cpu(x)
+#define elf_off_to_cpu(x) elf32_to_cpu(x)
+#define elf_xword_to_cpu(x) elf32_to_cpu(x)
+#endif
+
+static void read_ehdr(FILE *fp)
+{
+ if (fread(&ehdr, sizeof(ehdr), 1, fp) != 1)
+ die("Cannot read ELF header: %s\n", strerror(errno));
+
+ if (memcmp(ehdr.e_ident, ELFMAG, SELFMAG) != 0)
+ die("No ELF magic\n");
+
+ if (ehdr.e_ident[EI_CLASS] != ELF_CLASS)
+ die("Not a %d bit executable\n", ELF_BITS);
+
+ if ((ehdr.e_ident[EI_DATA] != ELFDATA2LSB) &&
+ (ehdr.e_ident[EI_DATA] != ELFDATA2MSB))
+ die("Unknown ELF Endianness\n");
+
+ if (ehdr.e_ident[EI_VERSION] != EV_CURRENT)
+ die("Unknown ELF version\n");
+
+ /* Convert the fields to native endian */
+ ehdr.e_type = elf_half_to_cpu(ehdr.e_type);
+ ehdr.e_machine = elf_half_to_cpu(ehdr.e_machine);
+ ehdr.e_version = elf_word_to_cpu(ehdr.e_version);
+ ehdr.e_entry = elf_addr_to_cpu(ehdr.e_entry);
+ ehdr.e_phoff = elf_off_to_cpu(ehdr.e_phoff);
+ ehdr.e_shoff = elf_off_to_cpu(ehdr.e_shoff);
+ ehdr.e_flags = elf_word_to_cpu(ehdr.e_flags);
+ ehdr.e_ehsize = elf_half_to_cpu(ehdr.e_ehsize);
+ ehdr.e_phentsize = elf_half_to_cpu(ehdr.e_phentsize);
+ ehdr.e_phnum = elf_half_to_cpu(ehdr.e_phnum);
+ ehdr.e_shentsize = elf_half_to_cpu(ehdr.e_shentsize);
+ ehdr.e_shnum = elf_half_to_cpu(ehdr.e_shnum);
+ ehdr.e_shstrndx = elf_half_to_cpu(ehdr.e_shstrndx);
+
+ if ((ehdr.e_type != ET_EXEC) && (ehdr.e_type != ET_DYN))
+ die("Unsupported ELF header type\n");
+
+ if (ehdr.e_machine != ELF_MACHINE)
+ die("Not for %s\n", ELF_MACHINE_NAME);
+
+ if (ehdr.e_version != EV_CURRENT)
+ die("Unknown ELF version\n");
+
+ if (ehdr.e_ehsize != sizeof(Elf_Ehdr))
+ die("Bad Elf header size\n");
+
+ if (ehdr.e_phentsize != sizeof(Elf_Phdr))
+ die("Bad program header entry\n");
+
+ if (ehdr.e_shentsize != sizeof(Elf_Shdr))
+ die("Bad section header entry\n");
+
+ if (ehdr.e_shstrndx >= ehdr.e_shnum)
+ die("String table index out of bounds\n");
+}
+
+static void read_shdrs(FILE *fp)
+{
+ int i;
+ Elf_Shdr shdr;
+
+ secs = calloc(ehdr.e_shnum, sizeof(struct section));
+ if (!secs)
+ die("Unable to allocate %d section headers\n", ehdr.e_shnum);
+
+ if (fseek(fp, ehdr.e_shoff, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n", ehdr.e_shoff, strerror(errno));
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ sec->shdr_offset = ftell(fp);
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+ sec->shdr.sh_name = elf_word_to_cpu(shdr.sh_name);
+ sec->shdr.sh_type = elf_word_to_cpu(shdr.sh_type);
+ sec->shdr.sh_flags = elf_xword_to_cpu(shdr.sh_flags);
+ sec->shdr.sh_addr = elf_addr_to_cpu(shdr.sh_addr);
+ sec->shdr.sh_offset = elf_off_to_cpu(shdr.sh_offset);
+ sec->shdr.sh_size = elf_xword_to_cpu(shdr.sh_size);
+ sec->shdr.sh_link = elf_word_to_cpu(shdr.sh_link);
+ sec->shdr.sh_info = elf_word_to_cpu(shdr.sh_info);
+ sec->shdr.sh_addralign = elf_xword_to_cpu(shdr.sh_addralign);
+ sec->shdr.sh_entsize = elf_xword_to_cpu(shdr.sh_entsize);
+ if (sec->shdr.sh_link < ehdr.e_shnum)
+ sec->link = &secs[sec->shdr.sh_link];
+ }
+}
+
+static void read_strtabs(FILE *fp)
+{
+ int i;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_STRTAB)
+ continue;
+
+ sec->strtab = malloc(sec->shdr.sh_size);
+ if (!sec->strtab)
+ die("malloc of %d bytes for strtab failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->strtab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+ }
+}
+
+static void read_symtabs(FILE *fp)
+{
+ int i, j;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+ if (sec->shdr.sh_type != SHT_SYMTAB)
+ continue;
+
+ sec->symtab = malloc(sec->shdr.sh_size);
+ if (!sec->symtab)
+ die("malloc of %d bytes for symtab failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->symtab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Sym); j++) {
+ Elf_Sym *sym = &sec->symtab[j];
+
+ sym->st_name = elf_word_to_cpu(sym->st_name);
+ sym->st_value = elf_addr_to_cpu(sym->st_value);
+ sym->st_size = elf_xword_to_cpu(sym->st_size);
+ sym->st_shndx = elf_half_to_cpu(sym->st_shndx);
+ }
+ }
+}
+
+static void read_relocs(FILE *fp)
+{
+ static unsigned long base = 0;
+ int i, j;
+
+ if (!base) {
+ struct section *sec = sec_lookup(".text");
+
+ if (!sec)
+ die("Could not find .text section\n");
+
+ base = sec->shdr.sh_addr;
+ }
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ sec->reltab = malloc(sec->shdr.sh_size);
+ if (!sec->reltab)
+ die("malloc of %d bytes for relocs failed\n",
+ sec->shdr.sh_size);
+
+ if (fseek(fp, sec->shdr.sh_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr.sh_offset, strerror(errno));
+
+ if (fread(sec->reltab, 1, sec->shdr.sh_size, fp) !=
+ sec->shdr.sh_size)
+ die("Cannot read symbol table: %s\n", strerror(errno));
+
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+ Elf_Rel *rel = &sec->reltab[j];
+
+ rel->r_offset = elf_addr_to_cpu(rel->r_offset);
+ /* Set offset into kernel image */
+ rel->r_offset -= base;
+#if (ELF_BITS == 32)
+ rel->r_info = elf_xword_to_cpu(rel->r_info);
+#else
+ /* Convert MIPS64 RELA format - only the symbol
+ * index needs converting to native endianness
+ */
+ rel->r_info = rel->r_info;
+ ELF_R_SYM(rel->r_info) = elf32_to_cpu(ELF_R_SYM(rel->r_info));
+#endif
+#if (SHT_REL_TYPE == SHT_RELA)
+ rel->r_addend = elf_xword_to_cpu(rel->r_addend);
+#endif
+ }
+ }
+}
+
+static void remove_relocs(FILE *fp)
+{
+ int i;
+ Elf_Shdr shdr;
+
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr_offset, strerror(errno));
+
+ if (fread(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot read ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+
+ /* Set relocation section size to 0, effectively removing it.
+ * This is necessary due to lack of support for relocations
+ * in objcopy when creating 32bit elf from 64bit elf.
+ */
+ shdr.sh_size = 0;
+
+ if (fseek(fp, sec->shdr_offset, SEEK_SET) < 0)
+ die("Seek to %d failed: %s\n",
+ sec->shdr_offset, strerror(errno));
+
+ if (fwrite(&shdr, sizeof(shdr), 1, fp) != 1)
+ die("Cannot write ELF section headers %d/%d: %s\n",
+ i, ehdr.e_shnum, strerror(errno));
+ }
+}
+
+static void add_reloc(struct relocs *r, uint32_t offset, unsigned type)
+{
+ /* Relocation representation in binary table:
+ * |76543210|76543210|76543210|76543210|
+ * | Type | offset from _text >> 2 |
+ */
+ offset >>= 2;
+ if (offset > 0x00FFFFFF)
+ die("Kernel image exceeds maximum size for relocation!\n");
+
+ offset = (offset & 0x00FFFFFF) | ((type & 0xFF) << 24);
+
+ if (r->count == r->size) {
+ unsigned long newsize = r->size + 50000;
+ void *mem = realloc(r->offset, newsize * sizeof(r->offset[0]));
+
+ if (!mem)
+ die("realloc failed\n");
+
+ r->offset = mem;
+ r->size = newsize;
+ }
+ r->offset[r->count++] = offset;
+}
+
+static void walk_relocs(int (*process)(struct section *sec, Elf_Rel *rel,
+ Elf_Sym *sym, const char *symname))
+{
+ int i;
+
+ /* Walk through the relocations */
+ for (i = 0; i < ehdr.e_shnum; i++) {
+ char *sym_strtab;
+ Elf_Sym *sh_symtab;
+ struct section *sec_applies, *sec_symtab;
+ int j;
+ struct section *sec = &secs[i];
+
+ if (sec->shdr.sh_type != SHT_REL_TYPE)
+ continue;
+
+ sec_symtab = sec->link;
+ sec_applies = &secs[sec->shdr.sh_info];
+ if (!(sec_applies->shdr.sh_flags & SHF_ALLOC))
+ continue;
+
+ sh_symtab = sec_symtab->symtab;
+ sym_strtab = sec_symtab->link->strtab;
+ for (j = 0; j < sec->shdr.sh_size/sizeof(Elf_Rel); j++) {
+ Elf_Rel *rel = &sec->reltab[j];
+ Elf_Sym *sym = &sh_symtab[ELF_R_SYM(rel->r_info)];
+ const char *symname = sym_name(sym_strtab, sym);
+
+ process(sec, rel, sym, symname);
+ }
+ }
+}
+
+static int do_reloc(struct section *sec, Elf_Rel *rel, Elf_Sym *sym,
+ const char *symname)
+{
+ unsigned r_type = ELF_R_TYPE(rel->r_info);
+ unsigned bind = ELF_ST_BIND(sym->st_info);
+
+ if ((bind == STB_WEAK) && (sym->st_value == 0)) {
+ /* Don't relocate weak symbols without a target */
+ return 0;
+ }
+
+ if (regex_skip_reloc(symname))
+ return 0;
+
+ switch (r_type) {
+ case R_MIPS_NONE:
+ case R_MIPS_REL32:
+ case R_MIPS_PC16:
+ case R_MIPS_PC21_S2:
+ case R_MIPS_PC26_S2:
+ /*
+ * NONE can be ignored and PC relative relocations don't
+ * need to be adjusted.
+ */
+ case R_MIPS_HIGHEST:
+ case R_MIPS_HIGHER:
+ /* We support relocating within the same 4Gb segment only,
+ * thus leaving the top 32bits unchanged
+ */
+ case R_MIPS_LO16:
+ /* We support relocating by 64k jumps only
+ * thus leaving the bottom 16bits unchanged
+ */
+ break;
+
+ case R_MIPS_64:
+ case R_MIPS_32:
+ case R_MIPS_26:
+ case R_MIPS_HI16:
+ add_reloc(&relocs, rel->r_offset, r_type);
+ break;
+
+ default:
+ die("Unsupported relocation type: %s (%d)\n",
+ rel_type(r_type), r_type);
+ break;
+ }
+
+ return 0;
+}
+
+static int write_reloc_as_bin(uint32_t v, FILE *f)
+{
+ unsigned char buf[4];
+
+ v = cpu_to_elf32(v);
+
+ memcpy(buf, &v, sizeof(uint32_t));
+ return fwrite(buf, 1, 4, f);
+}
+
+static int write_reloc_as_text(uint32_t v, FILE *f)
+{
+ int res;
+
+ res = fprintf(f, "\t.long 0x%08"PRIx32"\n", v);
+ if (res < 0)
+ return res;
+ else
+ return sizeof(uint32_t);
+}
+
+static void emit_relocs(int as_text, int as_bin, FILE *outf)
+{
+ int i;
+ int (*write_reloc)(uint32_t, FILE *) = write_reloc_as_bin;
+ int size = 0;
+ int size_reserved;
+ struct section *sec_reloc;
+
+ sec_reloc = sec_lookup(".data.reloc");
+ if (!sec_reloc)
+ die("Could not find relocation section\n");
+
+ size_reserved = sec_reloc->shdr.sh_size;
+
+ /* Collect up the relocations */
+ walk_relocs(do_reloc);
+
+ /* Print the relocations */
+ if (as_text) {
+ /* Print the relocations in a form suitable that
+ * gas will like.
+ */
+ printf(".section \".data.reloc\",\"a\"\n");
+ printf(".balign 4\n");
+ /* Output text to stdout */
+ write_reloc = write_reloc_as_text;
+ outf = stdout;
+ } else if (as_bin) {
+ /* Output raw binary to stdout */
+ outf = stdout;
+ } else {
+ /* Seek to offset of the relocation section.
+ * Each relocation is then written into the
+ * vmlinux kernel image.
+ */
+ if (fseek(outf, sec_reloc->shdr.sh_offset, SEEK_SET) < 0) {
+ die("Seek to %d failed: %s\n",
+ sec_reloc->shdr.sh_offset, strerror(errno));
+ }
+ }
+
+ for (i = 0; i < relocs.count; i++)
+ size += write_reloc(relocs.offset[i], outf);
+
+ /* Print a stop, but only if we've actually written some relocs */
+ if (size)
+ size += write_reloc(0, outf);
+
+ if (size > size_reserved)
+ /* Die, but suggest a value for CONFIG_RELOCATION_TABLE_SIZE
+ * which will fix this problem and allow a bit of headroom
+ * if more kernel features are enabled
+ */
+ die("Relocations overflow available space!\n" \
+ "Please adjust CONFIG_RELOCATION_TABLE_SIZE " \
+ "to at least 0x%08x\n", (size + 0x1000) & ~0xFFF);
+}
+
+/*
+ * As an aid to debugging problems with different linkers
+ * print summary information about the relocs.
+ * Since different linkers tend to emit the sections in
+ * different orders we use the section names in the output.
+ */
+static int do_reloc_info(struct section *sec, Elf_Rel *rel, ElfW(Sym) *sym,
+ const char *symname)
+{
+ printf("%16s 0x%08x %16s %40s %16s\n",
+ sec_name(sec->shdr.sh_info),
+ (unsigned int)rel->r_offset,
+ rel_type(ELF_R_TYPE(rel->r_info)),
+ symname,
+ sec_name(sym->st_shndx));
+ return 0;
+}
+
+static void print_reloc_info(void)
+{
+ printf("%16s %10s %16s %40s %16s\n",
+ "reloc section",
+ "offset",
+ "reloc type",
+ "symbol",
+ "symbol section");
+ walk_relocs(do_reloc_info);
+}
+
+#if ELF_BITS == 64
+# define process process_64
+#else
+# define process process_32
+#endif
+
+void process(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs)
+{
+ regex_init();
+ read_ehdr(fp);
+ read_shdrs(fp);
+ read_strtabs(fp);
+ read_symtabs(fp);
+ read_relocs(fp);
+ if (show_reloc_info) {
+ print_reloc_info();
+ return;
+ }
+ emit_relocs(as_text, as_bin, fp);
+ if (!keep_relocs)
+ remove_relocs(fp);
+}
diff --git a/arch/mips/boot/tools/relocs.h b/arch/mips/boot/tools/relocs.h
new file mode 100644
index 000000000000..3cf676f49e18
--- /dev/null
+++ b/arch/mips/boot/tools/relocs.h
@@ -0,0 +1,45 @@
+#ifndef RELOCS_H
+#define RELOCS_H
+
+#include <stdio.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <stdint.h>
+#include <inttypes.h>
+#include <string.h>
+#include <errno.h>
+#include <unistd.h>
+#include <elf.h>
+#include <byteswap.h>
+#define USE_BSD
+#include <endian.h>
+#include <regex.h>
+
+void die(char *fmt, ...);
+
+/*
+ * Introduced for MIPSr6
+ */
+#ifndef R_MIPS_PC21_S2
+#define R_MIPS_PC21_S2 60
+#endif
+
+#ifndef R_MIPS_PC26_S2
+#define R_MIPS_PC26_S2 61
+#endif
+
+#define ARRAY_SIZE(x) (sizeof(x) / sizeof((x)[0]))
+
+enum symtype {
+ S_ABS,
+ S_REL,
+ S_SEG,
+ S_LIN,
+ S_NSYMTYPES
+};
+
+void process_32(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs);
+void process_64(FILE *fp, int as_text, int as_bin,
+ int show_reloc_info, int keep_relocs);
+#endif /* RELOCS_H */
diff --git a/arch/mips/boot/tools/relocs_32.c b/arch/mips/boot/tools/relocs_32.c
new file mode 100644
index 000000000000..915bdc07f5ed
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_32.c
@@ -0,0 +1,17 @@
+#include "relocs.h"
+
+#define ELF_BITS 32
+
+#define ELF_MACHINE EM_MIPS
+#define ELF_MACHINE_NAME "MIPS"
+#define SHT_REL_TYPE SHT_REL
+#define Elf_Rel ElfW(Rel)
+
+#define ELF_CLASS ELFCLASS32
+#define ELF_R_SYM(val) ELF32_R_SYM(val)
+#define ELF_R_TYPE(val) ELF32_R_TYPE(val)
+#define ELF_ST_TYPE(o) ELF32_ST_TYPE(o)
+#define ELF_ST_BIND(o) ELF32_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o) ELF32_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_64.c b/arch/mips/boot/tools/relocs_64.c
new file mode 100644
index 000000000000..06066e6ac2f9
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_64.c
@@ -0,0 +1,30 @@
+#include "relocs.h"
+
+#define ELF_BITS 64
+
+#define ELF_MACHINE EM_MIPS
+#define ELF_MACHINE_NAME "MIPS64"
+#define SHT_REL_TYPE SHT_RELA
+#define Elf_Rel Elf64_Rela
+
+typedef uint8_t Elf64_Byte;
+
+typedef union {
+ struct {
+ Elf64_Word r_sym; /* Symbol index. */
+ Elf64_Byte r_ssym; /* Special symbol. */
+ Elf64_Byte r_type3; /* Third relocation. */
+ Elf64_Byte r_type2; /* Second relocation. */
+ Elf64_Byte r_type; /* First relocation. */
+ } fields;
+ Elf64_Xword unused;
+} Elf64_Mips_Rela;
+
+#define ELF_CLASS ELFCLASS64
+#define ELF_R_SYM(val) (((Elf64_Mips_Rela *)(&val))->fields.r_sym)
+#define ELF_R_TYPE(val) (((Elf64_Mips_Rela *)(&val))->fields.r_type)
+#define ELF_ST_TYPE(o) ELF64_ST_TYPE(o)
+#define ELF_ST_BIND(o) ELF64_ST_BIND(o)
+#define ELF_ST_VISIBILITY(o) ELF64_ST_VISIBILITY(o)
+
+#include "relocs.c"
diff --git a/arch/mips/boot/tools/relocs_main.c b/arch/mips/boot/tools/relocs_main.c
new file mode 100644
index 000000000000..d8fe2343b8d0
--- /dev/null
+++ b/arch/mips/boot/tools/relocs_main.c
@@ -0,0 +1,84 @@
+
+#include <stdio.h>
+#include <stdint.h>
+#include <stdarg.h>
+#include <stdlib.h>
+#include <string.h>
+#include <errno.h>
+#include <endian.h>
+#include <elf.h>
+
+#include "relocs.h"
+
+void die(char *fmt, ...)
+{
+ va_list ap;
+
+ va_start(ap, fmt);
+ vfprintf(stderr, fmt, ap);
+ va_end(ap);
+ exit(1);
+}
+
+static void usage(void)
+{
+ die("relocs [--reloc-info|--text|--bin|--keep] vmlinux\n");
+}
+
+int main(int argc, char **argv)
+{
+ int show_reloc_info, as_text, as_bin, keep_relocs;
+ const char *fname;
+ FILE *fp;
+ int i;
+ unsigned char e_ident[EI_NIDENT];
+
+ show_reloc_info = 0;
+ as_text = 0;
+ as_bin = 0;
+ keep_relocs = 0;
+ fname = NULL;
+ for (i = 1; i < argc; i++) {
+ char *arg = argv[i];
+
+ if (*arg == '-') {
+ if (strcmp(arg, "--reloc-info") == 0) {
+ show_reloc_info = 1;
+ continue;
+ }
+ if (strcmp(arg, "--text") == 0) {
+ as_text = 1;
+ continue;
+ }
+ if (strcmp(arg, "--bin") == 0) {
+ as_bin = 1;
+ continue;
+ }
+ if (strcmp(arg, "--keep") == 0) {
+ keep_relocs = 1;
+ continue;
+ }
+ } else if (!fname) {
+ fname = arg;
+ continue;
+ }
+ usage();
+ }
+ if (!fname)
+ usage();
+
+ fp = fopen(fname, "r+");
+ if (!fp)
+ die("Cannot open %s: %s\n", fname, strerror(errno));
+
+ if (fread(&e_ident, 1, EI_NIDENT, fp) != EI_NIDENT)
+ die("Cannot read %s: %s", fname, strerror(errno));
+
+ rewind(fp);
+ if (e_ident[EI_CLASS] == ELFCLASS64)
+ process_64(fp, as_text, as_bin, show_reloc_info, keep_relocs);
+ else
+ process_32(fp, as_text, as_bin, show_reloc_info, keep_relocs);
+ fclose(fp);
+ return 0;
+}
diff --git a/arch/mips/cavium-octeon/csrc-octeon.c b/arch/mips/cavium-octeon/csrc-octeon.c
index 1882e6475dd0..23c2344a3552 100644
--- a/arch/mips/cavium-octeon/csrc-octeon.c
+++ b/arch/mips/cavium-octeon/csrc-octeon.c
@@ -19,6 +19,7 @@
#include <asm/octeon/cvmx-ipd-defs.h>
#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
+#include <asm/octeon/cvmx-fpa-defs.h>
static u64 f;
static u64 rdiv;
@@ -65,9 +66,13 @@ void __init octeon_setup_delays(void)
*/
void octeon_init_cvmcount(void)
{
+ u64 clk_reg;
unsigned long flags;
unsigned loops = 2;
+ clk_reg = octeon_has_feature(OCTEON_FEATURE_FPA3) ?
+ CVMX_FPA_CLK_COUNT : CVMX_IPD_CLK_COUNT;
+
/* Clobber loops so GCC will not unroll the following while loop. */
asm("" : "+r" (loops));
@@ -77,18 +82,18 @@ void octeon_init_cvmcount(void)
* which should give more deterministic timing.
*/
while (loops--) {
- u64 ipd_clk_count = cvmx_read_csr(CVMX_IPD_CLK_COUNT);
+ u64 clk_count = cvmx_read_csr(clk_reg);
if (rdiv != 0) {
- ipd_clk_count *= rdiv;
+ clk_count *= rdiv;
if (f != 0) {
asm("dmultu\t%[cnt],%[f]\n\t"
"mfhi\t%[cnt]"
- : [cnt] "+r" (ipd_clk_count)
+ : [cnt] "+r" (clk_count)
: [f] "r" (f)
: "hi", "lo");
}
}
- write_c0_cvmcount(ipd_clk_count);
+ write_c0_cvmcount(clk_count);
}
local_irq_restore(flags);
}
diff --git a/arch/mips/cavium-octeon/dma-octeon.c b/arch/mips/cavium-octeon/dma-octeon.c
index 2cd45f5f9481..fd69528b24fb 100644
--- a/arch/mips/cavium-octeon/dma-octeon.c
+++ b/arch/mips/cavium-octeon/dma-octeon.c
@@ -125,7 +125,7 @@ static phys_addr_t octeon_small_dma_to_phys(struct device *dev,
static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
direction, attrs);
@@ -135,7 +135,7 @@ static dma_addr_t octeon_dma_map_page(struct device *dev, struct page *page,
}
static int octeon_dma_map_sg(struct device *dev, struct scatterlist *sg,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int r = swiotlb_map_sg_attrs(dev, sg, nents, direction, attrs);
mb();
@@ -157,7 +157,7 @@ static void octeon_dma_sync_sg_for_device(struct device *dev,
}
static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -189,7 +189,7 @@ static void *octeon_dma_alloc_coherent(struct device *dev, size_t size,
}
static void octeon_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
index 504ed61a47cd..b65a6c1ac016 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-bootmem.c
@@ -668,7 +668,7 @@ int64_t cvmx_bootmem_phy_named_block_alloc(uint64_t size, uint64_t min_addr,
/*
* Round size up to mult of minimum alignment bytes We need
* the actual size allocated to allow for blocks to be
- * coallesced when they are freed. The alloc routine does the
+ * coalesced when they are freed. The alloc routine does the
* same rounding up on all allocations.
*/
size = ALIGN(size, CVMX_BOOTMEM_ALIGNMENT_SIZE);
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
index 36e30d65ba05..ff49fc04500c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper-board.c
@@ -186,15 +186,6 @@ int cvmx_helper_board_get_mii_address(int ipd_port)
return 7 - ipd_port;
else
return -1;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- /*
- * Port 2 connects to Broadcom PHY (B5081). Other ports (0-1)
- * connect to a switch (BCM53115).
- */
- if (ipd_port == 2)
- return 8;
- else
- return -1;
case CVMX_BOARD_TYPE_KONTRON_S1901:
if (ipd_port == CVMX_HELPER_BOARD_MGMT_IPD_PORT)
return 1;
@@ -289,18 +280,6 @@ cvmx_helper_link_info_t __cvmx_helper_board_link_get(int ipd_port)
return result;
}
break;
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
- if (ipd_port == 0 || ipd_port == 1) {
- /* Ports 0 and 1 connect to a switch (BCM53115). */
- result.s.link_up = 1;
- result.s.full_duplex = 1;
- result.s.speed = 1000;
- return result;
- } else {
- /* Port 2 uses a Broadcom PHY (B5081). */
- is_broadcom_phy = 1;
- }
- break;
}
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
@@ -765,7 +744,6 @@ enum cvmx_helper_board_usb_clock_types __cvmx_helper_board_usb_get_clock_type(vo
case CVMX_BOARD_TYPE_LANAI2_G:
case CVMX_BOARD_TYPE_NIC10E_66:
case CVMX_BOARD_TYPE_UBNT_E100:
- case CVMX_BOARD_TYPE_CUST_DSR1000N:
return USB_CLOCK_TYPE_CRYSTAL_12;
case CVMX_BOARD_TYPE_NIC10E:
return USB_CLOCK_TYPE_REF_12;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-helper.c b/arch/mips/cavium-octeon/executive/cvmx-helper.c
index 376701f41cc2..ff26d0217b87 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-helper.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-helper.c
@@ -87,6 +87,8 @@ int cvmx_helper_get_number_of_interfaces(void)
return 9;
if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN52XX))
return 4;
+ if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+ return 5;
else
return 3;
}
@@ -260,6 +262,41 @@ static cvmx_helper_interface_mode_t __cvmx_get_mode_octeon2(int interface)
}
/**
+ * @INTERNAL
+ * Return interface mode for CN7XXX.
+ */
+static cvmx_helper_interface_mode_t __cvmx_get_mode_cn7xxx(int interface)
+{
+ union cvmx_gmxx_inf_mode mode;
+
+ mode.u64 = cvmx_read_csr(CVMX_GMXX_INF_MODE(interface));
+
+ switch (interface) {
+ case 0:
+ case 1:
+ switch (mode.cn68xx.mode) {
+ case 0:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ case 1:
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_XAUI;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_SGMII;
+ }
+ case 2:
+ return CVMX_HELPER_INTERFACE_MODE_NPI;
+ case 3:
+ return CVMX_HELPER_INTERFACE_MODE_LOOP;
+ case 4:
+ return CVMX_HELPER_INTERFACE_MODE_RGMII;
+ default:
+ return CVMX_HELPER_INTERFACE_MODE_DISABLED;
+ }
+}
+
+/**
* Get the operating mode of an interface. Depending on the Octeon
* chip and configuration, this function returns an enumeration
* of the type of packet I/O supported by an interface.
@@ -278,6 +315,12 @@ cvmx_helper_interface_mode_t cvmx_helper_interface_get_mode(int interface)
return CVMX_HELPER_INTERFACE_MODE_DISABLED;
/*
+ * OCTEON III models
+ */
+ if (OCTEON_IS_MODEL(OCTEON_CN7XXX))
+ return __cvmx_get_mode_cn7xxx(interface);
+
+ /*
* Octeon II models
*/
if (OCTEON_IS_MODEL(OCTEON_CN6XXX) || OCTEON_IS_MODEL(OCTEON_CNF71XX))
diff --git a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
index e59d1b79f24c..2f415d9d0f3c 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-interrupt-decodes.c
@@ -68,7 +68,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -89,7 +89,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -112,7 +112,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -134,7 +134,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -156,7 +156,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -179,7 +179,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
/*gmx_rx_int_en.s.phy_spd = 1; */
/*gmx_rx_int_en.s.phy_link = 1; */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
@@ -209,7 +209,7 @@ void __cvmx_interrupt_gmxx_rxx_int_en_enable(int index, int block)
gmx_rx_int_en.s.pause_drp = 1;
/* Skipping gmx_rx_int_en.s.reserved_16_18 */
/*gmx_rx_int_en.s.ifgerr = 1; */
- /*gmx_rx_int_en.s.coldet = 1; // Collsion detect */
+ /*gmx_rx_int_en.s.coldet = 1; // Collision detect */
/*gmx_rx_int_en.s.falerr = 1; // False carrier error or extend error after slottime */
/*gmx_rx_int_en.s.rsverr = 1; // RGMII reserved opcodes */
/*gmx_rx_int_en.s.pcterr = 1; // Bad Preamble / Protocol */
diff --git a/arch/mips/cavium-octeon/executive/cvmx-pko.c b/arch/mips/cavium-octeon/executive/cvmx-pko.c
index 87be167a7a6a..676fab50dd2b 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-pko.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-pko.c
@@ -189,7 +189,7 @@ void cvmx_pko_initialize_global(void)
/*
* Set the size of the PKO command buffers to an odd number of
* 64bit words. This allows the normal two word send to stay
- * aligned and never span a comamnd word buffer.
+ * aligned and never span a command word buffer.
*/
config.u64 = 0;
config.s.pool = CVMX_FPA_OUTPUT_BUFFER_POOL;
diff --git a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
index 3d17fac29359..cc1b1d2a6fa1 100644
--- a/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
+++ b/arch/mips/cavium-octeon/executive/cvmx-sysinfo.c
@@ -32,86 +32,22 @@
#include <linux/module.h>
#include <asm/octeon/cvmx.h>
-#include <asm/octeon/cvmx-spinlock.h>
#include <asm/octeon/cvmx-sysinfo.h>
-/**
+/*
* This structure defines the private state maintained by sysinfo module.
- *
*/
-static struct {
- struct cvmx_sysinfo sysinfo; /* system information */
- cvmx_spinlock_t lock; /* mutex spinlock */
-
-} state = {
- .lock = CVMX_SPINLOCK_UNLOCKED_INITIALIZER
-};
-
+static struct cvmx_sysinfo sysinfo; /* system information */
/*
- * Global variables that define the min/max of the memory region set
- * up for 32 bit userspace access.
- */
-uint64_t linux_mem32_min;
-uint64_t linux_mem32_max;
-uint64_t linux_mem32_wired;
-uint64_t linux_mem32_offset;
-
-/**
- * This function returns the application information as obtained
+ * Returns the application information as obtained
* by the bootloader. This provides the core mask of the cores
* running the same application image, as well as the physical
* memory regions available to the core.
- *
- * Returns Pointer to the boot information structure
- *
*/
struct cvmx_sysinfo *cvmx_sysinfo_get(void)
{
- return &(state.sysinfo);
+ return &sysinfo;
}
EXPORT_SYMBOL(cvmx_sysinfo_get);
-/**
- * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
- * are required to use simple executive files directly.
- *
- * Locking (if required) must be handled outside of this
- * function
- *
- * @phy_mem_desc_ptr:
- * Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
- *
- * @board_rev_major:
- * Board major revision
- * @board_rev_minor:
- * Board minor revision
- * @cpu_clock_hz:
- * CPU clock freqency in hertz
- *
- * Returns 0: Failure
- * 1: success
- */
-int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
- uint16_t board_type,
- uint8_t board_rev_major,
- uint8_t board_rev_minor,
- uint32_t cpu_clock_hz)
-{
-
- /* The sysinfo structure was already initialized */
- if (state.sysinfo.board_type)
- return 0;
-
- memset(&(state.sysinfo), 0x0, sizeof(state.sysinfo));
- state.sysinfo.phy_mem_desc_ptr = phy_mem_desc_ptr;
- state.sysinfo.board_type = board_type;
- state.sysinfo.board_rev_major = board_rev_major;
- state.sysinfo.board_rev_minor = board_rev_minor;
- state.sysinfo.cpu_clock_hz = cpu_clock_hz;
-
- return 1;
-}
diff --git a/arch/mips/cavium-octeon/executive/octeon-model.c b/arch/mips/cavium-octeon/executive/octeon-model.c
index b2104bd9ab3b..d08a2bce653c 100644
--- a/arch/mips/cavium-octeon/executive/octeon-model.c
+++ b/arch/mips/cavium-octeon/executive/octeon-model.c
@@ -71,11 +71,11 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
uint32_t fuse_data = 0;
fus3.u64 = 0;
- if (!OCTEON_IS_MODEL(OCTEON_CN6XXX))
+ if (OCTEON_IS_MODEL(OCTEON_CN3XXX) || OCTEON_IS_MODEL(OCTEON_CN5XXX))
fus3.u64 = cvmx_read_csr(CVMX_L2D_FUS3);
fus_dat2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
fus_dat3.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT3);
- num_cores = cvmx_pop(cvmx_read_csr(CVMX_CIU_FUSE));
+ num_cores = cvmx_octeon_num_cores();
/* Make sure the non existent devices look disabled */
switch ((chip_id >> 8) & 0xff) {
@@ -121,6 +121,15 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
* later.
*/
switch (num_cores) {
+ case 48:
+ core_model = "90";
+ break;
+ case 44:
+ core_model = "88";
+ break;
+ case 40:
+ core_model = "85";
+ break;
case 32:
core_model = "80";
break;
@@ -297,7 +306,7 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
if (fus_dat3.s.nozip)
suffix = "SCP";
- if (fus_dat3.s.bar2_en)
+ if (fus_dat3.cn56xx.bar2_en)
suffix = "NSPB2";
}
if (fus3.cn56xx.crip_1024k)
@@ -369,6 +378,73 @@ static const char *__init octeon_model_get_string_buffer(uint32_t chip_id,
else
suffix = "AAP";
break;
+ case 0x94: /* CNF71XX */
+ family = "F71";
+ if (fus_dat3.cnf71xx.nozip)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x95: /* CN78XX */
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ if (OCTEON_IS_MODEL(OCTEON_CN76XX))
+ family = "76";
+ else
+ family = "78";
+ if (fus_dat3.cn78xx.l2c_crip == 2)
+ family = "77";
+ if (fus_dat3.cn78xx.nozip
+ && fus_dat3.cn78xx.nodfa_dte
+ && fus_dat3.cn78xx.nohna_dte) {
+ if (fus_dat3.cn78xx.nozip &&
+ !fus_dat2.cn78xx.raid_en &&
+ fus_dat3.cn78xx.nohna_dte) {
+ suffix = "CP";
+ } else {
+ suffix = "SCP";
+ }
+ } else if (fus_dat2.cn78xx.raid_en == 0)
+ suffix = "HCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x96: /* CN70XX */
+ family = "70";
+ if (cvmx_read_csr(CVMX_MIO_FUS_PDF) & (0x1ULL << 32))
+ family = "71";
+ if (fus_dat2.cn70xx.nocrypto)
+ suffix = "CP";
+ else if (fus_dat3.cn70xx.nodfa_dte)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
+ case 0x97: /* CN73XX */
+ if (num_cores == 6) /* Other core counts match generic */
+ core_model = "35";
+ family = "73";
+ if (fus_dat3.cn73xx.l2c_crip == 2)
+ family = "72";
+ if (fus_dat3.cn73xx.nozip
+ && fus_dat3.cn73xx.nodfa_dte
+ && fus_dat3.cn73xx.nohna_dte) {
+ if (!fus_dat2.cn73xx.raid_en)
+ suffix = "CP";
+ else
+ suffix = "SCP";
+ } else
+ suffix = "AAP";
+ break;
+ case 0x98: /* CN75XX */
+ family = "F75";
+ if (fus_dat3.cn78xx.nozip
+ && fus_dat3.cn78xx.nodfa_dte
+ && fus_dat3.cn78xx.nohna_dte)
+ suffix = "SCP";
+ else
+ suffix = "AAP";
+ break;
default:
family = "XX";
core_model = "XX";
diff --git a/arch/mips/cavium-octeon/octeon-irq.c b/arch/mips/cavium-octeon/octeon-irq.c
index 4f9eb0576884..5a9b87b7993e 100644
--- a/arch/mips/cavium-octeon/octeon-irq.c
+++ b/arch/mips/cavium-octeon/octeon-irq.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2014 Cavium, Inc.
+ * Copyright (C) 2004-2016 Cavium, Inc.
*/
#include <linux/of_address.h>
@@ -19,16 +19,53 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/cvmx-ciu2-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu0_en_mirror);
static DEFINE_PER_CPU(unsigned long, octeon_irq_ciu1_en_mirror);
static DEFINE_PER_CPU(raw_spinlock_t, octeon_irq_ciu_spinlock);
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip2);
+
+static DEFINE_PER_CPU(unsigned int, octeon_irq_ciu3_idt_ip3);
+static DEFINE_PER_CPU(struct octeon_ciu3_info *, octeon_ciu3_info);
+#define CIU3_MBOX_PER_CORE 10
+
+/*
+ * The 8 most significant bits of the intsn identify the interrupt major block.
+ * Each major block might use its own interrupt domain. Thus 256 domains are
+ * needed.
+ */
+#define MAX_CIU3_DOMAINS 256
+
+typedef irq_hw_number_t (*octeon_ciu3_intsn2hw_t)(struct irq_domain *, unsigned int);
+
+/* Information for each ciu3 in the system */
+struct octeon_ciu3_info {
+ u64 ciu3_addr;
+ int node;
+ struct irq_domain *domain[MAX_CIU3_DOMAINS];
+ octeon_ciu3_intsn2hw_t intsn2hw[MAX_CIU3_DOMAINS];
+};
+
+/* Each ciu3 in the system uses its own data (one ciu3 per node) */
+static struct octeon_ciu3_info *octeon_ciu3_info_per_node[4];
struct octeon_irq_ciu_domain_data {
int num_sum; /* number of sum registers (2 or 3). */
};
-static __read_mostly u8 octeon_irq_ciu_to_irq[8][64];
+/* Register offsets from ciu3_addr */
+#define CIU3_CONST 0x220
+#define CIU3_IDT_CTL(_idt) ((_idt) * 8 + 0x110000)
+#define CIU3_IDT_PP(_idt, _idx) ((_idt) * 32 + (_idx) * 8 + 0x120000)
+#define CIU3_IDT_IO(_idt) ((_idt) * 8 + 0x130000)
+#define CIU3_DEST_PP_INT(_pp_ip) ((_pp_ip) * 8 + 0x200000)
+#define CIU3_DEST_IO_INT(_io) ((_io) * 8 + 0x210000)
+#define CIU3_ISC_CTL(_intsn) ((_intsn) * 8 + 0x80000000)
+#define CIU3_ISC_W1C(_intsn) ((_intsn) * 8 + 0x90000000)
+#define CIU3_ISC_W1S(_intsn) ((_intsn) * 8 + 0xa0000000)
+
+static __read_mostly int octeon_irq_ciu_to_irq[8][64];
struct octeon_ciu_chip_data {
union {
@@ -39,10 +76,11 @@ struct octeon_ciu_chip_data {
struct { /* only used for ciu/ciu2 */
u8 line;
u8 bit;
- u8 gpio_line;
};
};
+ int gpio_line;
int current_cpu; /* Next CPU expected to take this irq */
+ int ciu_node; /* NUMA node number of the CIU */
};
struct octeon_core_chip_data {
@@ -626,6 +664,18 @@ static void octeon_irq_ciu_enable_all_v2(struct irq_data *data)
}
}
+static int octeon_irq_ciu_set_type(struct irq_data *data, unsigned int t)
+{
+ irqd_set_trigger_type(data, t);
+
+ if (t & IRQ_TYPE_EDGE_BOTH)
+ irq_set_handler_locked(data, handle_edge_irq);
+ else
+ irq_set_handler_locked(data, handle_level_irq);
+
+ return IRQ_SET_MASK_OK;
+}
+
static void octeon_irq_gpio_setup(struct irq_data *data)
{
union cvmx_gpio_bit_cfgx cfg;
@@ -663,7 +713,7 @@ static int octeon_irq_ciu_gpio_set_type(struct irq_data *data, unsigned int t)
irqd_set_trigger_type(data, t);
octeon_irq_gpio_setup(data);
- if (irqd_get_trigger_type(data) & IRQ_TYPE_EDGE_BOTH)
+ if (t & IRQ_TYPE_EDGE_BOTH)
irq_set_handler_locked(data, handle_edge_irq);
else
irq_set_handler_locked(data, handle_level_irq);
@@ -863,6 +913,16 @@ static int octeon_irq_ciu_set_affinity_sum2(struct irq_data *data,
}
#endif
+static unsigned int edge_startup(struct irq_data *data)
+{
+ /* ack any pending edge-irq at startup, so there is
+ * an _edge_ to fire on when the event reappears.
+ */
+ data->chip->irq_ack(data);
+ data->chip->irq_enable(data);
+ return 0;
+}
+
/*
* Newer octeon chips have support for lockless CIU operation.
*/
@@ -1158,16 +1218,6 @@ static struct irq_chip *octeon_irq_ciu_chip;
static struct irq_chip *octeon_irq_ciu_chip_edge;
static struct irq_chip *octeon_irq_gpio_chip;
-static bool octeon_irq_virq_in_range(unsigned int virq)
-{
- /* We cannot let it overflow the mapping array. */
- if (virq < (1ul << 8 * sizeof(octeon_irq_ciu_to_irq[0][0])))
- return true;
-
- WARN_ONCE(true, "virq out of range %u.\n", virq);
- return false;
-}
-
static int octeon_irq_ciu_map(struct irq_domain *d,
unsigned int virq, irq_hw_number_t hw)
{
@@ -1176,13 +1226,6 @@ static int octeon_irq_ciu_map(struct irq_domain *d,
unsigned int bit = hw & 63;
struct octeon_irq_ciu_domain_data *dd = d->host_data;
- if (!octeon_irq_virq_in_range(virq))
- return -EINVAL;
-
- /* Don't map irq if it is reserved for GPIO. */
- if (line == 0 && bit >= 16 && bit <32)
- return 0;
-
if (line >= dd->num_sum || octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1215,12 +1258,9 @@ static int octeon_irq_gpio_map(struct irq_domain *d,
unsigned int line, bit;
int r;
- if (!octeon_irq_virq_in_range(virq))
- return -EINVAL;
-
line = (hw + gpiod->base_hwirq) >> 6;
bit = (hw + gpiod->base_hwirq) & 63;
- if (line > ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
+ if (line >= ARRAY_SIZE(octeon_irq_ciu_to_irq) ||
octeon_irq_ciu_to_irq[line][bit] != 0)
return -EINVAL;
@@ -1502,10 +1542,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 0, 56);
- if (r)
- goto err;
-
r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_TWSI2, 0, 59);
if (r)
goto err;
@@ -1519,10 +1555,6 @@ static int __init octeon_irq_init_ciu(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB1, 1, 17);
- if (r)
- goto err;
-
/* Enable the CIU lines */
set_c0_status(STATUSF_IP3 | STATUSF_IP2);
if (octeon_irq_use_ip4)
@@ -1899,9 +1931,6 @@ static int octeon_irq_ciu2_map(struct irq_domain *d,
unsigned int line = hw >> 6;
unsigned int bit = hw & 63;
- if (!octeon_irq_virq_in_range(virq))
- return -EINVAL;
-
/*
* Don't map irq if it is reserved for GPIO.
* (Line 7 are the GPIO lines.)
@@ -2040,10 +2069,6 @@ static int __init octeon_irq_init_ciu2(
goto err;
}
- r = octeon_irq_force_ciu_mapping(ciu_domain, OCTEON_IRQ_USB0, 3, 44);
- if (r)
- goto err;
-
for (i = 0; i < 4; i++) {
r = octeon_irq_force_ciu_mapping(
ciu_domain, i + OCTEON_IRQ_PCI_INT0, 4, i);
@@ -2294,10 +2319,598 @@ static int __init octeon_irq_init_cib(struct device_node *ciu_node,
return 0;
}
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type)
+{
+ struct octeon_ciu3_info *ciu3_info = d->host_data;
+ unsigned int hwirq, type, intsn_major;
+ union cvmx_ciu3_iscx_ctl isc;
+
+ if (intsize < 2)
+ return -EINVAL;
+ hwirq = intspec[0];
+ type = intspec[1];
+
+ if (hwirq >= (1 << 20))
+ return -EINVAL;
+
+ intsn_major = hwirq >> 12;
+ switch (intsn_major) {
+ case 0x04: /* Software handled separately. */
+ return -EINVAL;
+ default:
+ break;
+ }
+
+ isc.u64 = cvmx_read_csr(ciu3_info->ciu3_addr + CIU3_ISC_CTL(hwirq));
+ if (!isc.s.imp)
+ return -EINVAL;
+
+ switch (type) {
+ case 4: /* official value for level triggering. */
+ *out_type = IRQ_TYPE_LEVEL_HIGH;
+ break;
+ case 0: /* unofficial value, but we might as well let it work. */
+ case 1: /* official value for edge triggering. */
+ *out_type = IRQ_TYPE_EDGE_RISING;
+ break;
+ default: /* Nothing else is acceptable. */
+ return -EINVAL;
+ }
+
+ *out_hwirq = hwirq;
+
+ return 0;
+}
+
+void octeon_irq_ciu3_enable(struct irq_data *data)
+{
+ int cpu;
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_ctl_addr;
+
+ struct octeon_ciu_chip_data *cd;
+
+ cpu = next_cpu_for_irq(data);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_disable(struct irq_data *data)
+{
+ u64 isc_ctl_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+ cvmx_write_csr(isc_ctl_addr, 0);
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+void octeon_irq_ciu3_ack(struct irq_data *data)
+{
+ u64 isc_w1c_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ struct octeon_ciu_chip_data *cd;
+ u32 trigger_type = irqd_get_trigger_type(data);
+
+ /*
+ * We use a single irq_chip, so we have to do nothing to ack a
+ * level interrupt.
+ */
+ if (!(trigger_type & IRQ_TYPE_EDGE_BOTH))
+ return;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.raw = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask(struct irq_data *data)
+{
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr;
+ struct octeon_ciu_chip_data *cd;
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+void octeon_irq_ciu3_mask_ack(struct irq_data *data)
+{
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr;
+ struct octeon_ciu_chip_data *cd;
+ u32 trigger_type = irqd_get_trigger_type(data);
+
+ cd = irq_data_get_irq_chip_data(data);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+
+ /*
+ * We use a single irq_chip, so only ack an edge (!level)
+ * interrupt.
+ */
+ if (trigger_type & IRQ_TYPE_EDGE_BOTH)
+ isc_w1c.s.raw = 1;
+
+ isc_w1c_addr = cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+#ifdef CONFIG_SMP
+int octeon_irq_ciu3_set_affinity(struct irq_data *data,
+ const struct cpumask *dest, bool force)
+{
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_ctl_addr;
+ int cpu;
+ bool enable_one = !irqd_irq_disabled(data) && !irqd_irq_masked(data);
+ struct octeon_ciu_chip_data *cd = irq_data_get_irq_chip_data(data);
+
+ if (!cpumask_subset(dest, cpumask_of_node(cd->ciu_node)))
+ return -EINVAL;
+
+ if (!enable_one)
+ return IRQ_SET_MASK_OK;
+
+ cd = irq_data_get_irq_chip_data(data);
+ cpu = cpumask_first(dest);
+ if (cpu >= nr_cpu_ids)
+ cpu = smp_processor_id();
+ cd->current_cpu = cpu;
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(cd->ciu3_addr + CIU3_ISC_W1C(cd->intsn), isc_w1c.u64);
+
+ isc_ctl_addr = cd->ciu3_addr + CIU3_ISC_CTL(cd->intsn);
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = per_cpu(octeon_irq_ciu3_idt_ip2, cpu);
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ cvmx_read_csr(isc_ctl_addr);
+
+ return IRQ_SET_MASK_OK;
+}
+#endif
+
+static struct irq_chip octeon_irq_chip_ciu3 = {
+ .name = "CIU3",
+ .irq_startup = edge_startup,
+ .irq_enable = octeon_irq_ciu3_enable,
+ .irq_disable = octeon_irq_ciu3_disable,
+ .irq_ack = octeon_irq_ciu3_ack,
+ .irq_mask = octeon_irq_ciu3_mask,
+ .irq_mask_ack = octeon_irq_ciu3_mask_ack,
+ .irq_unmask = octeon_irq_ciu3_enable,
+ .irq_set_type = octeon_irq_ciu_set_type,
+#ifdef CONFIG_SMP
+ .irq_set_affinity = octeon_irq_ciu3_set_affinity,
+ .irq_cpu_offline = octeon_irq_cpu_offline_ciu,
+#endif
+};
+
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw, struct irq_chip *chip)
+{
+ struct octeon_ciu3_info *ciu3_info = d->host_data;
+ struct octeon_ciu_chip_data *cd = kzalloc_node(sizeof(*cd), GFP_KERNEL,
+ ciu3_info->node);
+ if (!cd)
+ return -ENOMEM;
+ cd->intsn = hw;
+ cd->current_cpu = -1;
+ cd->ciu3_addr = ciu3_info->ciu3_addr;
+ cd->ciu_node = ciu3_info->node;
+ irq_set_chip_and_handler(virq, chip, handle_edge_irq);
+ irq_set_chip_data(virq, cd);
+
+ return 0;
+}
+
+static int octeon_irq_ciu3_map(struct irq_domain *d,
+ unsigned int virq, irq_hw_number_t hw)
+{
+ return octeon_irq_ciu3_mapx(d, virq, hw, &octeon_irq_chip_ciu3);
+}
+
+static struct irq_domain_ops octeon_dflt_domain_ciu3_ops = {
+ .map = octeon_irq_ciu3_map,
+ .unmap = octeon_irq_free_cd,
+ .xlate = octeon_irq_ciu3_xlat,
+};
+
+static void octeon_irq_ciu3_ip2(void)
+{
+ union cvmx_ciu3_destx_pp_int dest_pp_int;
+ struct octeon_ciu3_info *ciu3_info;
+ u64 ciu3_addr;
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ ciu3_addr = ciu3_info->ciu3_addr;
+
+ dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(3 * cvmx_get_local_core_num()));
+
+ if (likely(dest_pp_int.s.intr)) {
+ irq_hw_number_t intsn = dest_pp_int.s.intsn;
+ irq_hw_number_t hw;
+ struct irq_domain *domain;
+ /* Get the domain to use from the major block */
+ int block = intsn >> 12;
+ int ret;
+
+ domain = ciu3_info->domain[block];
+ if (ciu3_info->intsn2hw[block])
+ hw = ciu3_info->intsn2hw[block](domain, intsn);
+ else
+ hw = intsn;
+
+ ret = handle_domain_irq(domain, hw, NULL);
+ if (ret < 0) {
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+ spurious_interrupt();
+ }
+ } else {
+ spurious_interrupt();
+ }
+}
+
+/*
+ * 10 mbox per core starting from zero.
+ * Base mbox is core * 10
+ */
+static unsigned int octeon_irq_ciu3_base_mbox_intsn(int core)
+{
+ /* SW (mbox) are 0x04 in bits 12..19 */
+ return 0x04000 + CIU3_MBOX_PER_CORE * core;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_core(int core, unsigned int mbox)
+{
+ return octeon_irq_ciu3_base_mbox_intsn(core) + mbox;
+}
+
+static unsigned int octeon_irq_ciu3_mbox_intsn_for_cpu(int cpu, unsigned int mbox)
+{
+ int local_core = octeon_coreid_for_cpu(cpu) & 0x3f;
+
+ return octeon_irq_ciu3_mbox_intsn_for_core(local_core, mbox);
+}
+
+static void octeon_irq_ciu3_mbox(void)
+{
+ union cvmx_ciu3_destx_pp_int dest_pp_int;
+ struct octeon_ciu3_info *ciu3_info;
+ u64 ciu3_addr;
+ int core = cvmx_get_local_core_num();
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ ciu3_addr = ciu3_info->ciu3_addr;
+
+ dest_pp_int.u64 = cvmx_read_csr(ciu3_addr + CIU3_DEST_PP_INT(1 + 3 * core));
+
+ if (likely(dest_pp_int.s.intr)) {
+ irq_hw_number_t intsn = dest_pp_int.s.intsn;
+ int mbox = intsn - octeon_irq_ciu3_base_mbox_intsn(core);
+
+ if (likely(mbox >= 0 && mbox < CIU3_MBOX_PER_CORE)) {
+ do_IRQ(mbox + OCTEON_IRQ_MBOX0);
+ } else {
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ u64 isc_w1c_addr = ciu3_addr + CIU3_ISC_W1C(intsn);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.en = 1;
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+ spurious_interrupt();
+ }
+ } else {
+ spurious_interrupt();
+ }
+}
+
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ union cvmx_ciu3_iscx_w1s isc_w1s;
+ u64 isc_w1s_addr;
+
+ if (WARN_ON_ONCE(mbox >= CIU3_MBOX_PER_CORE))
+ return;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+ ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+ isc_w1s_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1S(intsn);
+
+ isc_w1s.u64 = 0;
+ isc_w1s.s.raw = 1;
+
+ cvmx_write_csr(isc_w1s_addr, isc_w1s.u64);
+ cvmx_read_csr(isc_w1s_addr);
+}
+
+static void octeon_irq_ciu3_mbox_set_enable(struct irq_data *data, int cpu, bool en)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ u64 isc_ctl_addr, isc_w1c_addr;
+ union cvmx_ciu3_iscx_ctl isc_ctl;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_cpu(cpu, mbox);
+ ciu3_info = per_cpu(octeon_ciu3_info, cpu);
+ isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+ isc_ctl_addr = ciu3_info->ciu3_addr + CIU3_ISC_CTL(intsn);
+
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+
+ cvmx_write_csr(isc_w1c_addr, isc_ctl.u64);
+ cvmx_write_csr(isc_ctl_addr, 0);
+ if (en) {
+ unsigned int idt = per_cpu(octeon_irq_ciu3_idt_ip3, cpu);
+
+ isc_ctl.u64 = 0;
+ isc_ctl.s.en = 1;
+ isc_ctl.s.idt = idt;
+ cvmx_write_csr(isc_ctl_addr, isc_ctl.u64);
+ }
+ cvmx_read_csr(isc_ctl_addr);
+}
+
+static void octeon_irq_ciu3_mbox_enable(struct irq_data *data)
+{
+ int cpu;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+ for_each_online_cpu(cpu)
+ octeon_irq_ciu3_mbox_set_enable(data, cpu, true);
+}
+
+static void octeon_irq_ciu3_mbox_disable(struct irq_data *data)
+{
+ int cpu;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ WARN_ON(mbox >= CIU3_MBOX_PER_CORE);
+
+ for_each_online_cpu(cpu)
+ octeon_irq_ciu3_mbox_set_enable(data, cpu, false);
+}
+
+static void octeon_irq_ciu3_mbox_ack(struct irq_data *data)
+{
+ struct octeon_ciu3_info *ciu3_info;
+ unsigned int intsn;
+ u64 isc_w1c_addr;
+ union cvmx_ciu3_iscx_w1c isc_w1c;
+ unsigned int mbox = data->irq - OCTEON_IRQ_MBOX0;
+
+ intsn = octeon_irq_ciu3_mbox_intsn_for_core(cvmx_get_local_core_num(), mbox);
+
+ isc_w1c.u64 = 0;
+ isc_w1c.s.raw = 1;
+
+ ciu3_info = __this_cpu_read(octeon_ciu3_info);
+ isc_w1c_addr = ciu3_info->ciu3_addr + CIU3_ISC_W1C(intsn);
+ cvmx_write_csr(isc_w1c_addr, isc_w1c.u64);
+ cvmx_read_csr(isc_w1c_addr);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_online(struct irq_data *data)
+{
+ octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), true);
+}
+
+static void octeon_irq_ciu3_mbox_cpu_offline(struct irq_data *data)
+{
+ octeon_irq_ciu3_mbox_set_enable(data, smp_processor_id(), false);
+}
+
+static int octeon_irq_ciu3_alloc_resources(struct octeon_ciu3_info *ciu3_info)
+{
+ u64 b = ciu3_info->ciu3_addr;
+ int idt_ip2, idt_ip3, idt_ip4;
+ int unused_idt2;
+ int core = cvmx_get_local_core_num();
+ int i;
+
+ __this_cpu_write(octeon_ciu3_info, ciu3_info);
+
+ /*
+ * 4 idt per core starting from 1 because zero is reserved.
+ * Base idt per core is 4 * core + 1
+ */
+ idt_ip2 = core * 4 + 1;
+ idt_ip3 = core * 4 + 2;
+ idt_ip4 = core * 4 + 3;
+ unused_idt2 = core * 4 + 4;
+ __this_cpu_write(octeon_irq_ciu3_idt_ip2, idt_ip2);
+ __this_cpu_write(octeon_irq_ciu3_idt_ip3, idt_ip3);
+
+ /* ip2 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip2), 0);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip2, 0), 1ull << core);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip2), 0);
+
+ /* ip3 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip3), 1);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip3, 0), 1ull << core);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip3), 0);
+
+ /* ip4 interrupts for this CPU */
+ cvmx_write_csr(b + CIU3_IDT_CTL(idt_ip4), 2);
+ cvmx_write_csr(b + CIU3_IDT_PP(idt_ip4, 0), 0);
+ cvmx_write_csr(b + CIU3_IDT_IO(idt_ip4), 0);
+
+ cvmx_write_csr(b + CIU3_IDT_CTL(unused_idt2), 0);
+ cvmx_write_csr(b + CIU3_IDT_PP(unused_idt2, 0), 0);
+ cvmx_write_csr(b + CIU3_IDT_IO(unused_idt2), 0);
+
+ for (i = 0; i < CIU3_MBOX_PER_CORE; i++) {
+ unsigned int intsn = octeon_irq_ciu3_mbox_intsn_for_core(core, i);
+
+ cvmx_write_csr(b + CIU3_ISC_W1C(intsn), 2);
+ cvmx_write_csr(b + CIU3_ISC_CTL(intsn), 0);
+ }
+
+ return 0;
+}
+
+static void octeon_irq_setup_secondary_ciu3(void)
+{
+ struct octeon_ciu3_info *ciu3_info;
+
+ ciu3_info = octeon_ciu3_info_per_node[cvmx_get_node_num()];
+ octeon_irq_ciu3_alloc_resources(ciu3_info);
+ irq_cpu_online();
+
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP3 | STATUSF_IP2);
+ if (octeon_irq_use_ip4)
+ set_c0_status(STATUSF_IP4);
+ else
+ clear_c0_status(STATUSF_IP4);
+}
+
+static struct irq_chip octeon_irq_chip_ciu3_mbox = {
+ .name = "CIU3-M",
+ .irq_enable = octeon_irq_ciu3_mbox_enable,
+ .irq_disable = octeon_irq_ciu3_mbox_disable,
+ .irq_ack = octeon_irq_ciu3_mbox_ack,
+
+ .irq_cpu_online = octeon_irq_ciu3_mbox_cpu_online,
+ .irq_cpu_offline = octeon_irq_ciu3_mbox_cpu_offline,
+ .flags = IRQCHIP_ONOFFLINE_ENABLED,
+};
+
+static int __init octeon_irq_init_ciu3(struct device_node *ciu_node,
+ struct device_node *parent)
+{
+ int i;
+ int node;
+ struct irq_domain *domain;
+ struct octeon_ciu3_info *ciu3_info;
+ const __be32 *zero_addr;
+ u64 base_addr;
+ union cvmx_ciu3_const consts;
+
+ node = 0; /* of_node_to_nid(ciu_node); */
+ ciu3_info = kzalloc_node(sizeof(*ciu3_info), GFP_KERNEL, node);
+
+ if (!ciu3_info)
+ return -ENOMEM;
+
+ zero_addr = of_get_address(ciu_node, 0, NULL, NULL);
+ if (WARN_ON(!zero_addr))
+ return -EINVAL;
+
+ base_addr = of_translate_address(ciu_node, zero_addr);
+ base_addr = (u64)phys_to_virt(base_addr);
+
+ ciu3_info->ciu3_addr = base_addr;
+ ciu3_info->node = node;
+
+ consts.u64 = cvmx_read_csr(base_addr + CIU3_CONST);
+
+ octeon_irq_setup_secondary = octeon_irq_setup_secondary_ciu3;
+
+ octeon_irq_ip2 = octeon_irq_ciu3_ip2;
+ octeon_irq_ip3 = octeon_irq_ciu3_mbox;
+ octeon_irq_ip4 = octeon_irq_ip4_mask;
+
+ if (node == cvmx_get_node_num()) {
+ /* Mips internal */
+ octeon_irq_init_core();
+
+ /* Only do per CPU things if it is the CIU of the boot node. */
+ i = irq_alloc_descs_from(OCTEON_IRQ_MBOX0, 8, node);
+ WARN_ON(i < 0);
+
+ for (i = 0; i < 8; i++)
+ irq_set_chip_and_handler(i + OCTEON_IRQ_MBOX0,
+ &octeon_irq_chip_ciu3_mbox, handle_percpu_irq);
+ }
+
+ /*
+ * Initialize all domains to use the default domain. Specific major
+ * blocks will overwrite the default domain as needed.
+ */
+ domain = irq_domain_add_tree(ciu_node, &octeon_dflt_domain_ciu3_ops,
+ ciu3_info);
+ for (i = 0; i < MAX_CIU3_DOMAINS; i++)
+ ciu3_info->domain[i] = domain;
+
+ octeon_ciu3_info_per_node[node] = ciu3_info;
+
+ if (node == cvmx_get_node_num()) {
+ /* Only do per CPU things if it is the CIU of the boot node. */
+ octeon_irq_ciu3_alloc_resources(ciu3_info);
+ if (node == 0)
+ irq_set_default_host(domain);
+
+ octeon_irq_use_ip4 = false;
+ /* Enable the CIU lines */
+ set_c0_status(STATUSF_IP2 | STATUSF_IP3);
+ clear_c0_status(STATUSF_IP4);
+ }
+
+ return 0;
+}
+
static struct of_device_id ciu_types[] __initdata = {
{.compatible = "cavium,octeon-3860-ciu", .data = octeon_irq_init_ciu},
{.compatible = "cavium,octeon-3860-gpio", .data = octeon_irq_init_gpio},
{.compatible = "cavium,octeon-6880-ciu2", .data = octeon_irq_init_ciu2},
+ {.compatible = "cavium,octeon-7890-ciu3", .data = octeon_irq_init_ciu3},
{.compatible = "cavium,octeon-7130-cib", .data = octeon_irq_init_cib},
{}
};
diff --git a/arch/mips/cavium-octeon/octeon-platform.c b/arch/mips/cavium-octeon/octeon-platform.c
index d113c8ded6e2..b31fbc9d6eae 100644
--- a/arch/mips/cavium-octeon/octeon-platform.c
+++ b/arch/mips/cavium-octeon/octeon-platform.c
@@ -3,32 +3,27 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2004-2011 Cavium Networks
+ * Copyright (C) 2004-2016 Cavium Networks
* Copyright (C) 2008 Wind River Systems
*/
-#include <linux/delay.h>
#include <linux/init.h>
-#include <linux/irq.h>
-#include <linux/i2c.h>
-#include <linux/usb.h>
-#include <linux/dma-mapping.h>
-#include <linux/module.h>
-#include <linux/mutex.h>
-#include <linux/slab.h>
-#include <linux/platform_device.h>
+#include <linux/delay.h>
+#include <linux/etherdevice.h>
#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <linux/libfdt.h>
+#include <linux/usb/ehci_def.h>
#include <linux/usb/ehci_pdriver.h>
#include <linux/usb/ohci_pdriver.h>
#include <asm/octeon/octeon.h>
-#include <asm/octeon/cvmx-rnm-defs.h>
-#include <asm/octeon/cvmx-helper.h>
#include <asm/octeon/cvmx-helper-board.h>
#include <asm/octeon/cvmx-uctlx-defs.h>
+#define CVMX_UAHCX_EHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000010ull))
+#define CVMX_UAHCX_OHCI_USBCMD (CVMX_ADD_IO_SEG(0x00016F0000000408ull))
+
/* Octeon Random Number Generator. */
static int __init octeon_rng_device_init(void)
{
@@ -77,12 +72,36 @@ static DEFINE_MUTEX(octeon2_usb_clocks_mutex);
static int octeon2_usb_clock_start_cnt;
+static int __init octeon2_usb_reset(void)
+{
+ union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
+ u32 ucmd;
+
+ if (!OCTEON_IS_OCTEON2())
+ return 0;
+
+ clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
+ if (clk_rst_ctl.s.hrst) {
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_EHCI_USBCMD);
+ ucmd &= ~CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ mdelay(2);
+ ucmd |= CMD_RESET;
+ cvmx_write64_uint32(CVMX_UAHCX_EHCI_USBCMD, ucmd);
+ ucmd = cvmx_read64_uint32(CVMX_UAHCX_OHCI_USBCMD);
+ ucmd |= CMD_RUN;
+ cvmx_write64_uint32(CVMX_UAHCX_OHCI_USBCMD, ucmd);
+ }
+
+ return 0;
+}
+arch_initcall(octeon2_usb_reset);
+
static void octeon2_usb_clocks_start(struct device *dev)
{
u64 div;
union cvmx_uctlx_if_ena if_ena;
union cvmx_uctlx_clk_rst_ctl clk_rst_ctl;
- union cvmx_uctlx_uphy_ctl_status uphy_ctl_status;
union cvmx_uctlx_uphy_portx_ctl_status port_ctl_status;
int i;
unsigned long io_clk_64_to_ns;
@@ -130,6 +149,17 @@ static void octeon2_usb_clocks_start(struct device *dev)
if_ena.s.en = 1;
cvmx_write_csr(CVMX_UCTLX_IF_ENA(0), if_ena.u64);
+ for (i = 0; i <= 1; i++) {
+ port_ctl_status.u64 =
+ cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
+ /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
+ port_ctl_status.s.txvreftune = 15;
+ port_ctl_status.s.txrisetune = 1;
+ port_ctl_status.s.txpreemphasistune = 1;
+ cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
+ port_ctl_status.u64);
+ }
+
/* Step 3: Configure the reference clock, PHY, and HCLK */
clk_rst_ctl.u64 = cvmx_read_csr(CVMX_UCTLX_CLK_RST_CTL(0));
@@ -217,29 +247,10 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_por = 0;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
- /* Step 5: Wait 1 ms for the PHY clock to start. */
- mdelay(1);
+ /* Step 5: Wait 3 ms for the PHY clock to start. */
+ mdelay(3);
- /*
- * Step 6: Program the reset input from automatic test
- * equipment field in the UPHY CSR
- */
- uphy_ctl_status.u64 = cvmx_read_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0));
- uphy_ctl_status.s.ate_reset = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /* Step 7: Wait for at least 10ns. */
- ndelay(10);
-
- /* Step 8: Clear the ATE_RESET field in the UPHY CSR. */
- uphy_ctl_status.s.ate_reset = 0;
- cvmx_write_csr(CVMX_UCTLX_UPHY_CTL_STATUS(0), uphy_ctl_status.u64);
-
- /*
- * Step 9: Wait for at least 20ns for UPHY to output PHY clock
- * signals and OHCI_CLK48
- */
- ndelay(20);
+ /* Steps 6..9 for ATE only, are skipped. */
/* Step 10: Configure the OHCI_CLK48 and OHCI_CLK12 clocks. */
/* 10a */
@@ -260,6 +271,20 @@ static void octeon2_usb_clocks_start(struct device *dev)
clk_rst_ctl.s.p_prst = 1;
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+ /* Step 11b */
+ udelay(1);
+
+ /* Step 11c */
+ clk_rst_ctl.s.p_prst = 0;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
+ /* Step 11d */
+ mdelay(1);
+
+ /* Step 11e */
+ clk_rst_ctl.s.p_prst = 1;
+ cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
+
/* Step 12: Wait 1 uS. */
udelay(1);
@@ -268,21 +293,9 @@ static void octeon2_usb_clocks_start(struct device *dev)
cvmx_write_csr(CVMX_UCTLX_CLK_RST_CTL(0), clk_rst_ctl.u64);
end_clock:
- /* Now we can set some other registers. */
-
- for (i = 0; i <= 1; i++) {
- port_ctl_status.u64 =
- cvmx_read_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0));
- /* Set txvreftune to 15 to obtain compliant 'eye' diagram. */
- port_ctl_status.s.txvreftune = 15;
- port_ctl_status.s.txrisetune = 1;
- port_ctl_status.s.txpreemphasistune = 1;
- cvmx_write_csr(CVMX_UCTLX_UPHY_PORTX_CTL_STATUS(i, 0),
- port_ctl_status.u64);
- }
-
/* Set uSOF cycle period to 60,000 bits. */
cvmx_write_csr(CVMX_UCTLX_EHCI_FLA(0), 0x20ull);
+
exit:
mutex_unlock(&octeon2_usb_clocks_mutex);
}
@@ -310,7 +323,11 @@ static struct usb_ehci_pdata octeon_ehci_pdata = {
#ifdef __BIG_ENDIAN
.big_endian_mmio = 1,
#endif
- .dma_mask_64 = 1,
+ /*
+ * We can DMA from anywhere. But the descriptors must be in
+ * the lower 4GB.
+ */
+ .dma_mask_64 = 0,
.power_on = octeon_ehci_power_on,
.power_off = octeon_ehci_power_off,
};
@@ -525,10 +542,17 @@ static void __init octeon_fdt_set_phy(int eth, int phy_addr)
static void __init octeon_fdt_set_mac_addr(int n, u64 *pmac)
{
+ const u8 *old_mac;
+ int old_len;
u8 new_mac[6];
u64 mac = *pmac;
int r;
+ old_mac = fdt_getprop(initial_boot_params, n, "local-mac-address",
+ &old_len);
+ if (!old_mac || old_len != 6 || is_valid_ether_addr(old_mac))
+ return;
+
new_mac[0] = (mac >> 40) & 0xff;
new_mac[1] = (mac >> 32) & 0xff;
new_mac[2] = (mac >> 24) & 0xff;
@@ -560,7 +584,7 @@ static void __init octeon_fdt_rm_ethernet(int node)
fdt_nop_node(initial_boot_params, node);
}
-static void __init octeon_fdt_pip_port(int iface, int i, int p, int max, u64 *pmac)
+static void __init octeon_fdt_pip_port(int iface, int i, int p, int max)
{
char name_buffer[20];
int eth;
@@ -583,10 +607,9 @@ static void __init octeon_fdt_pip_port(int iface, int i, int p, int max, u64 *pm
phy_addr = cvmx_helper_board_get_mii_address(ipd_port);
octeon_fdt_set_phy(eth, phy_addr);
- octeon_fdt_set_mac_addr(eth, pmac);
}
-static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
+static void __init octeon_fdt_pip_iface(int pip, int idx)
{
char name_buffer[20];
int iface;
@@ -602,7 +625,73 @@ static void __init octeon_fdt_pip_iface(int pip, int idx, u64 *pmac)
count = cvmx_helper_ports_on_interface(idx);
for (p = 0; p < 16; p++)
- octeon_fdt_pip_port(iface, idx, p, count - 1, pmac);
+ octeon_fdt_pip_port(iface, idx, p, count - 1);
+}
+
+void __init octeon_fill_mac_addresses(void)
+{
+ const char *alias_prop;
+ char name_buffer[20];
+ u64 mac_addr_base;
+ int aliases;
+ int pip;
+ int i;
+
+ aliases = fdt_path_offset(initial_boot_params, "/aliases");
+ if (aliases < 0)
+ return;
+
+ mac_addr_base =
+ ((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
+ ((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
+ ((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
+ ((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
+ ((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
+ (octeon_bootinfo->mac_addr_base[5] & 0xffull);
+
+ for (i = 0; i < 2; i++) {
+ int mgmt;
+
+ snprintf(name_buffer, sizeof(name_buffer), "mix%d", i);
+ alias_prop = fdt_getprop(initial_boot_params, aliases,
+ name_buffer, NULL);
+ if (!alias_prop)
+ continue;
+ mgmt = fdt_path_offset(initial_boot_params, alias_prop);
+ if (mgmt < 0)
+ continue;
+ octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
+ }
+
+ alias_prop = fdt_getprop(initial_boot_params, aliases, "pip", NULL);
+ if (!alias_prop)
+ return;
+
+ pip = fdt_path_offset(initial_boot_params, alias_prop);
+ if (pip < 0)
+ return;
+
+ for (i = 0; i <= 4; i++) {
+ int iface;
+ int p;
+
+ snprintf(name_buffer, sizeof(name_buffer), "interface@%d", i);
+ iface = fdt_subnode_offset(initial_boot_params, pip,
+ name_buffer);
+ if (iface < 0)
+ continue;
+ for (p = 0; p < 16; p++) {
+ int eth;
+
+ snprintf(name_buffer, sizeof(name_buffer),
+ "ethernet@%x", p);
+ eth = fdt_subnode_offset(initial_boot_params, iface,
+ name_buffer);
+ if (eth < 0)
+ continue;
+ octeon_fdt_set_mac_addr(eth, &mac_addr_base);
+ }
+ }
}
int __init octeon_prune_device_tree(void)
@@ -612,26 +701,20 @@ int __init octeon_prune_device_tree(void)
const char *alias_prop;
char name_buffer[20];
int aliases;
- u64 mac_addr_base;
if (fdt_check_header(initial_boot_params))
panic("Corrupt Device Tree.");
+ WARN(octeon_bootinfo->board_type == CVMX_BOARD_TYPE_CUST_DSR1000N,
+ "Built-in DTB booting is deprecated on %s. Please switch to use appended DTB.",
+ cvmx_board_type_to_string(octeon_bootinfo->board_type));
+
aliases = fdt_path_offset(initial_boot_params, "/aliases");
if (aliases < 0) {
pr_err("Error: No /aliases node in device tree.");
return -EINVAL;
}
-
- mac_addr_base =
- ((octeon_bootinfo->mac_addr_base[0] & 0xffull)) << 40 |
- ((octeon_bootinfo->mac_addr_base[1] & 0xffull)) << 32 |
- ((octeon_bootinfo->mac_addr_base[2] & 0xffull)) << 24 |
- ((octeon_bootinfo->mac_addr_base[3] & 0xffull)) << 16 |
- ((octeon_bootinfo->mac_addr_base[4] & 0xffull)) << 8 |
- (octeon_bootinfo->mac_addr_base[5] & 0xffull);
-
if (OCTEON_IS_MODEL(OCTEON_CN52XX) || OCTEON_IS_MODEL(OCTEON_CN63XX))
max_port = 2;
else if (OCTEON_IS_MODEL(OCTEON_CN56XX) || OCTEON_IS_MODEL(OCTEON_CN68XX))
@@ -660,7 +743,6 @@ int __init octeon_prune_device_tree(void)
} else {
int phy_addr = cvmx_helper_board_get_mii_address(CVMX_HELPER_BOARD_MGMT_IPD_PORT + i);
octeon_fdt_set_phy(mgmt, phy_addr);
- octeon_fdt_set_mac_addr(mgmt, &mac_addr_base);
}
}
}
@@ -670,7 +752,7 @@ int __init octeon_prune_device_tree(void)
int pip = fdt_path_offset(initial_boot_params, pip_path);
if (pip >= 0)
for (i = 0; i <= 4; i++)
- octeon_fdt_pip_iface(pip, i, &mac_addr_base);
+ octeon_fdt_pip_iface(pip, i);
}
/* I2C */
@@ -970,13 +1052,6 @@ end_led:
}
}
- if (octeon_bootinfo->board_type != CVMX_BOARD_TYPE_CUST_DSR1000N) {
- int dsr1000n_leds = fdt_path_offset(initial_boot_params,
- "/dsr1000n-leds");
- if (dsr1000n_leds >= 0)
- fdt_nop_node(initial_boot_params, dsr1000n_leds);
- }
-
return 0;
}
diff --git a/arch/mips/cavium-octeon/setup.c b/arch/mips/cavium-octeon/setup.c
index cd7101fb6227..cb16fcc5f8f0 100644
--- a/arch/mips/cavium-octeon/setup.c
+++ b/arch/mips/cavium-octeon/setup.c
@@ -40,10 +40,26 @@
#include <asm/octeon/octeon.h>
#include <asm/octeon/pci-octeon.h>
-#include <asm/octeon/cvmx-mio-defs.h>
#include <asm/octeon/cvmx-rst-defs.h>
-extern struct plat_smp_ops octeon_smp_ops;
+/*
+ * TRUE for devices having registers with little-endian byte
+ * order, FALSE for registers with native-endian byte order.
+ * PCI mandates little-endian, USB and SATA are configuraable,
+ * but we chose little-endian for these.
+ */
+const bool octeon_should_swizzle_table[256] = {
+ [0x00] = true, /* bootbus/CF */
+ [0x1b] = true, /* PCI mmio window */
+ [0x1c] = true, /* PCI mmio window */
+ [0x1d] = true, /* PCI mmio window */
+ [0x1e] = true, /* PCI mmio window */
+ [0x68] = true, /* OCTEON III USB */
+ [0x69] = true, /* OCTEON III USB */
+ [0x6c] = true, /* OCTEON III SATA */
+ [0x6f] = true, /* OCTEON II USB */
+};
+EXPORT_SYMBOL(octeon_should_swizzle_table);
#ifdef CONFIG_PCI
extern void pci_console_init(const char *arg);
@@ -466,15 +482,25 @@ static void octeon_halt(void)
static char __read_mostly octeon_system_type[80];
-static int __init init_octeon_system_type(void)
+static void __init init_octeon_system_type(void)
{
- snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
- cvmx_board_type_to_string(octeon_bootinfo->board_type),
- octeon_model_get_string(read_c0_prid()));
+ char const *board_type;
+
+ board_type = cvmx_board_type_to_string(octeon_bootinfo->board_type);
+ if (board_type == NULL) {
+ struct device_node *root;
+ int ret;
+
+ root = of_find_node_by_path("/");
+ ret = of_property_read_string(root, "model", &board_type);
+ of_node_put(root);
+ if (ret)
+ board_type = "Unsupported Board";
+ }
- return 0;
+ snprintf(octeon_system_type, sizeof(octeon_system_type), "%s (%s)",
+ board_type, octeon_model_get_string(read_c0_prid()));
}
-early_initcall(init_octeon_system_type);
/**
* Return a string representing the system type
@@ -492,8 +518,6 @@ const char *get_system_type(void)
void octeon_user_io_init(void)
{
union octeon_cvmemctl cvmmemctl;
- union cvmx_iob_fau_timeout fau_timeout;
- union cvmx_pow_nw_tim nm_tim;
/* Get the current settings for CP0_CVMMEMCTL_REG */
cvmmemctl.u64 = read_c0_cvmmemctl();
@@ -595,17 +619,27 @@ void octeon_user_io_init(void)
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE,
CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128);
- /* Set a default for the hardware timeouts */
- fau_timeout.u64 = 0;
- fau_timeout.s.tout_val = 0xfff;
- /* Disable tagwait FAU timeout */
- fau_timeout.s.tout_enb = 0;
- cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+ if (octeon_has_feature(OCTEON_FEATURE_FAU)) {
+ union cvmx_iob_fau_timeout fau_timeout;
- nm_tim.u64 = 0;
- /* 4096 cycles */
- nm_tim.s.nw_tim = 3;
- cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+ /* Set a default for the hardware timeouts */
+ fau_timeout.u64 = 0;
+ fau_timeout.s.tout_val = 0xfff;
+ /* Disable tagwait FAU timeout */
+ fau_timeout.s.tout_enb = 0;
+ cvmx_write_csr(CVMX_IOB_FAU_TIMEOUT, fau_timeout.u64);
+ }
+
+ if ((!OCTEON_IS_MODEL(OCTEON_CN68XX) &&
+ !OCTEON_IS_MODEL(OCTEON_CN7XXX)) ||
+ OCTEON_IS_MODEL(OCTEON_CN70XX)) {
+ union cvmx_pow_nw_tim nm_tim;
+
+ nm_tim.u64 = 0;
+ /* 4096 cycles */
+ nm_tim.s.nw_tim = 3;
+ cvmx_write_csr(CVMX_POW_NW_TIM, nm_tim.u64);
+ }
write_octeon_c0_icacheerr(0);
write_c0_derraddr1(0);
@@ -637,9 +671,22 @@ void __init prom_init(void)
sysinfo = cvmx_sysinfo_get();
memset(sysinfo, 0, sizeof(*sysinfo));
sysinfo->system_dram_size = octeon_bootinfo->dram_size << 20;
- sysinfo->phy_mem_desc_ptr =
- cvmx_phys_to_ptr(octeon_bootinfo->phy_mem_desc_addr);
- sysinfo->core_mask = octeon_bootinfo->core_mask;
+ sysinfo->phy_mem_desc_addr = (u64)phys_to_virt(octeon_bootinfo->phy_mem_desc_addr);
+
+ if ((octeon_bootinfo->major_version > 1) ||
+ (octeon_bootinfo->major_version == 1 &&
+ octeon_bootinfo->minor_version >= 4))
+ cvmx_coremask_copy(&sysinfo->core_mask,
+ &octeon_bootinfo->ext_core_mask);
+ else
+ cvmx_coremask_set64(&sysinfo->core_mask,
+ octeon_bootinfo->core_mask);
+
+ /* Some broken u-boot pass garbage in upper bits, clear them out */
+ if (!OCTEON_IS_MODEL(OCTEON_CN78XX))
+ for (i = 512; i < 1024; i++)
+ cvmx_coremask_clear_core(&sysinfo->core_mask, i);
+
sysinfo->exception_base_addr = octeon_bootinfo->exception_base_addr;
sysinfo->cpu_clock_hz = octeon_bootinfo->eclock_hz;
sysinfo->dram_data_rate_hz = octeon_bootinfo->dclock_hz * 2;
@@ -867,7 +914,7 @@ void __init prom_init(void)
#endif
octeon_user_io_init();
- register_smp_ops(&octeon_smp_ops);
+ octeon_setup_smp();
}
/* Exclude a single page from the regions obtained in plat_mem_setup. */
@@ -1079,6 +1126,7 @@ void __init prom_free_prom_memory(void)
}
}
+void __init octeon_fill_mac_addresses(void);
int octeon_prune_device_tree(void);
extern const char __appended_dtb;
@@ -1088,11 +1136,13 @@ void __init device_tree_init(void)
{
const void *fdt;
bool do_prune;
+ bool fill_mac;
#ifdef CONFIG_MIPS_ELF_APPENDED_DTB
if (!fdt_check_header(&__appended_dtb)) {
fdt = &__appended_dtb;
do_prune = false;
+ fill_mac = true;
pr_info("Using appended Device Tree.\n");
} else
#endif
@@ -1101,13 +1151,16 @@ void __init device_tree_init(void)
if (fdt_check_header(fdt))
panic("Corrupt Device Tree passed to kernel.");
do_prune = false;
+ fill_mac = false;
pr_info("Using passed Device Tree.\n");
} else if (OCTEON_IS_MODEL(OCTEON_CN68XX)) {
fdt = &__dtb_octeon_68xx_begin;
do_prune = true;
+ fill_mac = true;
} else {
fdt = &__dtb_octeon_3xxx_begin;
do_prune = true;
+ fill_mac = true;
}
initial_boot_params = (void *)fdt;
@@ -1116,7 +1169,10 @@ void __init device_tree_init(void)
octeon_prune_device_tree();
pr_info("Using internal Device Tree.\n");
}
+ if (fill_mac)
+ octeon_fill_mac_addresses();
unflatten_and_copy_device_tree();
+ init_octeon_system_type();
}
static int __initdata disable_octeon_edac_p;
diff --git a/arch/mips/cavium-octeon/smp.c b/arch/mips/cavium-octeon/smp.c
index b7fa9ae28c36..4d457d602d3b 100644
--- a/arch/mips/cavium-octeon/smp.c
+++ b/arch/mips/cavium-octeon/smp.c
@@ -30,25 +30,55 @@ uint64_t octeon_bootloader_entry_addr;
EXPORT_SYMBOL(octeon_bootloader_entry_addr);
#endif
+static void octeon_icache_flush(void)
+{
+ asm volatile ("synci 0($0)\n");
+}
+
+static void (*octeon_message_functions[8])(void) = {
+ scheduler_ipi,
+ generic_smp_call_function_interrupt,
+ octeon_icache_flush,
+};
+
static irqreturn_t mailbox_interrupt(int irq, void *dev_id)
{
- const int coreid = cvmx_get_core_num();
- uint64_t action;
+ u64 mbox_clrx = CVMX_CIU_MBOX_CLRX(cvmx_get_core_num());
+ u64 action;
+ int i;
+
+ /*
+ * Make sure the function array initialization remains
+ * correct.
+ */
+ BUILD_BUG_ON(SMP_RESCHEDULE_YOURSELF != (1 << 0));
+ BUILD_BUG_ON(SMP_CALL_FUNCTION != (1 << 1));
+ BUILD_BUG_ON(SMP_ICACHE_FLUSH != (1 << 2));
+
+ /*
+ * Load the mailbox register to figure out what we're supposed
+ * to do.
+ */
+ action = cvmx_read_csr(mbox_clrx);
- /* Load the mailbox register to figure out what we're supposed to do */
- action = cvmx_read_csr(CVMX_CIU_MBOX_CLRX(coreid)) & 0xffff;
+ if (OCTEON_IS_MODEL(OCTEON_CN68XX))
+ action &= 0xff;
+ else
+ action &= 0xffff;
/* Clear the mailbox to clear the interrupt */
- cvmx_write_csr(CVMX_CIU_MBOX_CLRX(coreid), action);
+ cvmx_write_csr(mbox_clrx, action);
- if (action & SMP_CALL_FUNCTION)
- generic_smp_call_function_interrupt();
- if (action & SMP_RESCHEDULE_YOURSELF)
- scheduler_ipi();
+ for (i = 0; i < ARRAY_SIZE(octeon_message_functions) && action;) {
+ if (action & 1) {
+ void (*fn)(void) = octeon_message_functions[i];
- /* Check if we've been told to flush the icache */
- if (action & SMP_ICACHE_FLUSH)
- asm volatile ("synci 0($0)\n");
+ if (fn)
+ fn();
+ }
+ action >>= 1;
+ i++;
+ }
return IRQ_HANDLED;
}
@@ -97,13 +127,15 @@ static void octeon_smp_hotplug_setup(void)
#endif
}
-static void octeon_smp_setup(void)
+static void __init octeon_smp_setup(void)
{
const int coreid = cvmx_get_core_num();
int cpus;
int id;
- int core_mask = octeon_get_boot_coremask();
+ struct cvmx_sysinfo *sysinfo = cvmx_sysinfo_get();
+
#ifdef CONFIG_HOTPLUG_CPU
+ int core_mask = octeon_get_boot_coremask();
unsigned int num_cores = cvmx_octeon_num_cores();
#endif
@@ -119,7 +151,7 @@ static void octeon_smp_setup(void)
/* The present CPUs get the lowest CPU numbers. */
cpus = 1;
for (id = 0; id < NR_CPUS; id++) {
- if ((id != coreid) && (core_mask & (1 << id))) {
+ if ((id != coreid) && cvmx_coremask_is_core_set(&sysinfo->core_mask, id)) {
set_cpu_possible(cpus, true);
set_cpu_present(cpus, true);
__cpu_number_map[id] = cpus;
@@ -196,7 +228,7 @@ static void octeon_init_secondary(void)
* Callout to firmware before smp_init
*
*/
-void octeon_prepare_cpus(unsigned int max_cpus)
+static void __init octeon_prepare_cpus(unsigned int max_cpus)
{
/*
* Only the low order mailbox bits are used for IPIs, leave
@@ -239,10 +271,11 @@ static int octeon_cpu_disable(void)
return -ENOTSUPP;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
octeon_fixup_irqs();
- flush_cache_all();
+ __flush_cache_all();
local_flush_tlb_all();
return 0;
@@ -331,7 +364,7 @@ static int octeon_update_boot_vector(unsigned int cpu)
}
if (!(avail_coremask & (1 << coreid))) {
- /* core not available, assume, that catched by simple-executive */
+ /* core not available, assume, that caught by simple-executive */
cvmx_write_csr(CVMX_CIU_PP_RST, 1 << coreid);
cvmx_write_csr(CVMX_CIU_PP_RST, 0);
}
@@ -352,7 +385,7 @@ static int octeon_cpu_callback(struct notifier_block *nfb,
{
unsigned int cpu = (unsigned long)hcpu;
- switch (action) {
+ switch (action & ~CPU_TASKS_FROZEN) {
case CPU_UP_PREPARE:
octeon_update_boot_vector(cpu);
break;
@@ -388,3 +421,92 @@ struct plat_smp_ops octeon_smp_ops = {
.cpu_die = octeon_cpu_die,
#endif
};
+
+static irqreturn_t octeon_78xx_reched_interrupt(int irq, void *dev_id)
+{
+ scheduler_ipi();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_call_function_interrupt(int irq, void *dev_id)
+{
+ generic_smp_call_function_interrupt();
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t octeon_78xx_icache_flush_interrupt(int irq, void *dev_id)
+{
+ octeon_icache_flush();
+ return IRQ_HANDLED;
+}
+
+/*
+ * Callout to firmware before smp_init
+ */
+static void octeon_78xx_prepare_cpus(unsigned int max_cpus)
+{
+ if (request_irq(OCTEON_IRQ_MBOX0 + 0,
+ octeon_78xx_reched_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "Scheduler",
+ octeon_78xx_reched_interrupt)) {
+ panic("Cannot request_irq for SchedulerIPI");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX0 + 1,
+ octeon_78xx_call_function_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "SMP-Call",
+ octeon_78xx_call_function_interrupt)) {
+ panic("Cannot request_irq for SMP-Call");
+ }
+ if (request_irq(OCTEON_IRQ_MBOX0 + 2,
+ octeon_78xx_icache_flush_interrupt,
+ IRQF_PERCPU | IRQF_NO_THREAD, "ICache-Flush",
+ octeon_78xx_icache_flush_interrupt)) {
+ panic("Cannot request_irq for ICache-Flush");
+ }
+}
+
+static void octeon_78xx_send_ipi_single(int cpu, unsigned int action)
+{
+ int i;
+
+ for (i = 0; i < 8; i++) {
+ if (action & 1)
+ octeon_ciu3_mbox_send(cpu, i);
+ action >>= 1;
+ }
+}
+
+static void octeon_78xx_send_ipi_mask(const struct cpumask *mask,
+ unsigned int action)
+{
+ unsigned int cpu;
+
+ for_each_cpu(cpu, mask)
+ octeon_78xx_send_ipi_single(cpu, action);
+}
+
+static struct plat_smp_ops octeon_78xx_smp_ops = {
+ .send_ipi_single = octeon_78xx_send_ipi_single,
+ .send_ipi_mask = octeon_78xx_send_ipi_mask,
+ .init_secondary = octeon_init_secondary,
+ .smp_finish = octeon_smp_finish,
+ .boot_secondary = octeon_boot_secondary,
+ .smp_setup = octeon_smp_setup,
+ .prepare_cpus = octeon_78xx_prepare_cpus,
+#ifdef CONFIG_HOTPLUG_CPU
+ .cpu_disable = octeon_cpu_disable,
+ .cpu_die = octeon_cpu_die,
+#endif
+};
+
+void __init octeon_setup_smp(void)
+{
+ struct plat_smp_ops *ops;
+
+ if (octeon_has_feature(OCTEON_FEATURE_CIU3))
+ ops = &octeon_78xx_smp_ops;
+ else
+ ops = &octeon_smp_ops;
+
+ register_smp_ops(ops);
+}
diff --git a/arch/mips/cobalt/setup.c b/arch/mips/cobalt/setup.c
index 9a8c2fe8d334..c136a18c7221 100644
--- a/arch/mips/cobalt/setup.c
+++ b/arch/mips/cobalt/setup.c
@@ -42,8 +42,8 @@ const char *get_system_type(void)
/*
* Cobalt doesn't have PS/2 keyboard/mouse interfaces,
- * keyboard conntroller is never used.
- * Also PCI-ISA bridge DMA contoroller is never used.
+ * keyboard controller is never used.
+ * Also PCI-ISA bridge DMA controller is never used.
*/
static struct resource cobalt_reserved_resources[] = {
{ /* dma1 */
diff --git a/arch/mips/configs/ath25_defconfig b/arch/mips/configs/ath25_defconfig
new file mode 100644
index 000000000000..2c829950be17
--- /dev/null
+++ b/arch/mips/configs/ath25_defconfig
@@ -0,0 +1,119 @@
+CONFIG_ATH25=y
+# CONFIG_COMPACTION is not set
+CONFIG_HZ_100=y
+# CONFIG_SECCOMP is not set
+# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_SYSVIPC=y
+# CONFIG_CROSS_MEMORY_ATTACH is not set
+# CONFIG_FHANDLE is not set
+CONFIG_HIGH_RES_TIMERS=y
+CONFIG_BLK_DEV_INITRD=y
+# CONFIG_RD_GZIP is not set
+# CONFIG_RD_BZIP2 is not set
+# CONFIG_RD_XZ is not set
+# CONFIG_RD_LZO is not set
+# CONFIG_RD_LZ4 is not set
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
+# CONFIG_AIO is not set
+CONFIG_EMBEDDED=y
+# CONFIG_VM_EVENT_COUNTERS is not set
+# CONFIG_SLUB_DEBUG is not set
+# CONFIG_COMPAT_BRK is not set
+CONFIG_MODULES=y
+CONFIG_MODULE_UNLOAD=y
+# CONFIG_BLK_DEV_BSG is not set
+# CONFIG_IOSCHED_CFQ is not set
+# CONFIG_SUSPEND is not set
+CONFIG_NET=y
+CONFIG_PACKET=y
+CONFIG_UNIX=y
+CONFIG_INET=y
+CONFIG_IP_MULTICAST=y
+CONFIG_IP_ADVANCED_ROUTER=y
+# CONFIG_INET_XFRM_MODE_TRANSPORT is not set
+# CONFIG_INET_XFRM_MODE_TUNNEL is not set
+# CONFIG_INET_XFRM_MODE_BEET is not set
+# CONFIG_IPV6 is not set
+CONFIG_CFG80211=m
+CONFIG_MAC80211=m
+CONFIG_MAC80211_DEBUGFS=y
+CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
+# CONFIG_FIRMWARE_IN_KERNEL is not set
+CONFIG_MTD=y
+CONFIG_MTD_REDBOOT_PARTS=y
+CONFIG_MTD_REDBOOT_DIRECTORY_BLOCK=-2
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_CFI=y
+CONFIG_MTD_CFI_ADV_OPTIONS=y
+CONFIG_MTD_CFI_GEOMETRY=y
+# CONFIG_MTD_MAP_BANK_WIDTH_1 is not set
+# CONFIG_MTD_MAP_BANK_WIDTH_4 is not set
+# CONFIG_MTD_CFI_I2 is not set
+CONFIG_MTD_CFI_AMDSTD=y
+CONFIG_MTD_COMPLEX_MAPPINGS=y
+CONFIG_MTD_PHYSMAP=y
+CONFIG_NETDEVICES=y
+# CONFIG_ETHERNET is not set
+# CONFIG_WLAN_VENDOR_ADMTEK is not set
+CONFIG_ATH5K=m
+# CONFIG_WLAN_VENDOR_ATMEL is not set
+# CONFIG_WLAN_VENDOR_BROADCOM is not set
+# CONFIG_WLAN_VENDOR_CISCO is not set
+# CONFIG_WLAN_VENDOR_INTEL is not set
+# CONFIG_WLAN_VENDOR_INTERSIL is not set
+# CONFIG_WLAN_VENDOR_MARVELL is not set
+# CONFIG_WLAN_VENDOR_MEDIATEK is not set
+# CONFIG_WLAN_VENDOR_RALINK is not set
+# CONFIG_WLAN_VENDOR_REALTEK is not set
+# CONFIG_WLAN_VENDOR_RSI is not set
+# CONFIG_WLAN_VENDOR_ST is not set
+# CONFIG_WLAN_VENDOR_TI is not set
+# CONFIG_WLAN_VENDOR_ZYDAS is not set
+CONFIG_INPUT=m
+# CONFIG_INPUT_KEYBOARD is not set
+# CONFIG_INPUT_MOUSE is not set
+# CONFIG_SERIO is not set
+# CONFIG_VT is not set
+# CONFIG_LEGACY_PTYS is not set
+# CONFIG_DEVKMEM is not set
+CONFIG_SERIAL_8250=y
+CONFIG_SERIAL_8250_CONSOLE=y
+# CONFIG_SERIAL_8250_PCI is not set
+CONFIG_SERIAL_8250_NR_UARTS=1
+CONFIG_SERIAL_8250_RUNTIME_UARTS=1
+# CONFIG_HW_RANDOM is not set
+# CONFIG_HWMON is not set
+# CONFIG_VGA_ARB is not set
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_LEDS_CLASS=y
+# CONFIG_IOMMU_SUPPORT is not set
+# CONFIG_DNOTIFY is not set
+# CONFIG_PROC_PAGE_MONITOR is not set
+CONFIG_TMPFS=y
+CONFIG_TMPFS_XATTR=y
+CONFIG_JFFS2_FS=y
+CONFIG_JFFS2_SUMMARY=y
+CONFIG_JFFS2_FS_XATTR=y
+# CONFIG_JFFS2_FS_POSIX_ACL is not set
+# CONFIG_JFFS2_FS_SECURITY is not set
+CONFIG_JFFS2_COMPRESSION_OPTIONS=y
+# CONFIG_JFFS2_ZLIB is not set
+CONFIG_SQUASHFS=y
+CONFIG_SQUASHFS_FILE_DIRECT=y
+CONFIG_SQUASHFS_DECOMP_MULTI_PERCPU=y
+# CONFIG_SQUASHFS_ZLIB is not set
+CONFIG_SQUASHFS_XZ=y
+CONFIG_PRINTK_TIME=y
+# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_STRIP_ASM_SYMS=y
+CONFIG_DEBUG_FS=y
+# CONFIG_SCHED_DEBUG is not set
+# CONFIG_FTRACE is not set
+# CONFIG_XZ_DEC_X86 is not set
+# CONFIG_XZ_DEC_POWERPC is not set
+# CONFIG_XZ_DEC_IA64 is not set
+# CONFIG_XZ_DEC_ARM is not set
+# CONFIG_XZ_DEC_ARMTHUMB is not set
+# CONFIG_XZ_DEC_SPARC is not set
diff --git a/arch/mips/configs/bcm47xx_defconfig b/arch/mips/configs/bcm47xx_defconfig
index 0db4eb319e0a..fad8e964f14c 100644
--- a/arch/mips/configs/bcm47xx_defconfig
+++ b/arch/mips/configs/bcm47xx_defconfig
@@ -23,7 +23,6 @@ CONFIG_IP_MROUTE=y
CONFIG_IP_MROUTE_MULTIPLE_TABLES=y
CONFIG_SYN_COOKIES=y
CONFIG_TCP_CONG_ADVANCED=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_MULTIPLE_TABLES=y
CONFIG_IPV6_SUBTREES=y
CONFIG_IPV6_MROUTE=y
diff --git a/arch/mips/configs/bcm63xx_defconfig b/arch/mips/configs/bcm63xx_defconfig
index 3fec26410f34..5599a9f1e3c6 100644
--- a/arch/mips/configs/bcm63xx_defconfig
+++ b/arch/mips/configs/bcm63xx_defconfig
@@ -44,6 +44,7 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
# CONFIG_STANDALONE is not set
# CONFIG_PREVENT_FIRMWARE_BUILD is not set
CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/bigsur_defconfig b/arch/mips/configs/bigsur_defconfig
index e070dac071c8..d20b09d77b53 100644
--- a/arch/mips/configs/bigsur_defconfig
+++ b/arch/mips/configs/bigsur_defconfig
@@ -62,7 +62,6 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
# CONFIG_INET_LRO is not set
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/bmips_be_defconfig b/arch/mips/configs/bmips_be_defconfig
index 24dcb90b0f64..acf7785c4cdb 100644
--- a/arch/mips/configs/bmips_be_defconfig
+++ b/arch/mips/configs/bmips_be_defconfig
@@ -36,6 +36,7 @@ CONFIG_DEVTMPFS_MOUNT=y
CONFIG_PRINTK_TIME=y
CONFIG_BRCMSTB_GISB_ARB=y
CONFIG_MTD=y
+CONFIG_MTD_BCM63XX_PARTS=y
CONFIG_MTD_CFI=y
CONFIG_MTD_CFI_INTELEXT=y
CONFIG_MTD_CFI_AMDSTD=y
diff --git a/arch/mips/configs/cavium_octeon_defconfig b/arch/mips/configs/cavium_octeon_defconfig
index e57058d4ec22..d470d08362c0 100644
--- a/arch/mips/configs/cavium_octeon_defconfig
+++ b/arch/mips/configs/cavium_octeon_defconfig
@@ -59,6 +59,8 @@ CONFIG_EEPROM_AT25=y
CONFIG_BLK_DEV_SD=y
CONFIG_ATA=y
CONFIG_SATA_AHCI=y
+CONFIG_SATA_AHCI_PLATFORM=y
+CONFIG_AHCI_OCTEON=y
CONFIG_PATA_OCTEON_CF=y
CONFIG_SATA_SIL=y
CONFIG_NETDEVICES=y
@@ -119,14 +121,16 @@ CONFIG_SPI=y
CONFIG_SPI_OCTEON=y
# CONFIG_HWMON is not set
CONFIG_WATCHDOG=y
-# CONFIG_USB_SUPPORT is not set
-CONFIG_USB_EHCI_BIG_ENDIAN_MMIO=y
-CONFIG_USB_OHCI_BIG_ENDIAN_MMIO=y
-CONFIG_USB_OHCI_LITTLE_ENDIAN=y
+CONFIG_USB=m
+CONFIG_USB_EHCI_HCD=m
+CONFIG_USB_EHCI_HCD_PLATFORM=m
+CONFIG_USB_OHCI_HCD=m
+CONFIG_USB_OHCI_HCD_PLATFORM=m
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_DS1307=y
CONFIG_STAGING=y
CONFIG_OCTEON_ETHERNET=y
+CONFIG_OCTEON_USB=m
# CONFIG_IOMMU_SUPPORT is not set
CONFIG_EXT4_FS=y
CONFIG_EXT4_FS_POSIX_ACL=y
@@ -152,6 +156,9 @@ CONFIG_SECURITY=y
CONFIG_SECURITY_NETWORK=y
CONFIG_CRYPTO_CBC=y
CONFIG_CRYPTO_HMAC=y
-CONFIG_CRYPTO_MD5=y
+CONFIG_CRYPTO_MD5_OCTEON=y
+CONFIG_CRYPTO_SHA1_OCTEON=m
+CONFIG_CRYPTO_SHA256_OCTEON=m
+CONFIG_CRYPTO_SHA512_OCTEON=m
CONFIG_CRYPTO_DES=y
# CONFIG_CRYPTO_ANSI_CPRNG is not set
diff --git a/arch/mips/configs/ci20_defconfig b/arch/mips/configs/ci20_defconfig
index 4e36b6e1869c..43e0ba24470c 100644
--- a/arch/mips/configs/ci20_defconfig
+++ b/arch/mips/configs/ci20_defconfig
@@ -17,13 +17,12 @@ CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CGROUPS=y
+CONFIG_MEMCG=y
+CONFIG_CGROUP_SCHED=y
CONFIG_CGROUP_FREEZER=y
-CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
+CONFIG_CGROUP_DEVICE=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_MEMCG=y
-CONFIG_MEMCG_KMEM=y
-CONFIG_CGROUP_SCHED=y
CONFIG_NAMESPACES=y
CONFIG_USER_NS=y
CONFIG_CC_OPTIMIZE_FOR_SIZE=y
@@ -52,6 +51,11 @@ CONFIG_DEVTMPFS=y
# CONFIG_ALLOW_DEV_COREDUMP is not set
CONFIG_DMA_CMA=y
CONFIG_CMA_SIZE_MBYTES=32
+CONFIG_MTD=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_JZ4780=y
+CONFIG_MTD_UBI=y
+CONFIG_MTD_UBI_FASTMAP=y
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_ARC is not set
# CONFIG_NET_CADENCE is not set
@@ -103,7 +107,7 @@ CONFIG_PROC_KCORE=y
# CONFIG_PROC_PAGE_MONITOR is not set
CONFIG_TMPFS=y
CONFIG_CONFIGFS_FS=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_UBIFS_FS=y
# CONFIG_NETWORK_FILESYSTEMS is not set
CONFIG_NLS=y
CONFIG_NLS_CODEPAGE_437=y
diff --git a/arch/mips/configs/db1xxx_defconfig b/arch/mips/configs/db1xxx_defconfig
index 3bdb72a70364..f0c8971030c4 100644
--- a/arch/mips/configs/db1xxx_defconfig
+++ b/arch/mips/configs/db1xxx_defconfig
@@ -18,7 +18,6 @@ CONFIG_CGROUP_FREEZER=y
CONFIG_CGROUP_DEVICE=y
CONFIG_CPUSETS=y
CONFIG_CGROUP_CPUACCT=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_MEMCG_KMEM=y
diff --git a/arch/mips/configs/decstation_defconfig b/arch/mips/configs/decstation_defconfig
index ebc011c51e5a..2b6cb41d5715 100644
--- a/arch/mips/configs/decstation_defconfig
+++ b/arch/mips/configs/decstation_defconfig
@@ -30,7 +30,6 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
diff --git a/arch/mips/configs/ip22_defconfig b/arch/mips/configs/ip22_defconfig
index 6ba9ce9fcdd5..5d83ff755547 100644
--- a/arch/mips/configs/ip22_defconfig
+++ b/arch/mips/configs/ip22_defconfig
@@ -48,7 +48,6 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
# CONFIG_INET_LRO is not set
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/ip27_defconfig b/arch/mips/configs/ip27_defconfig
index 77e9f505f5e4..2b74aee320a1 100644
--- a/arch/mips/configs/ip27_defconfig
+++ b/arch/mips/configs/ip27_defconfig
@@ -43,7 +43,6 @@ CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_IPV6_OPTIMISTIC_DAD=y
diff --git a/arch/mips/configs/jazz_defconfig b/arch/mips/configs/jazz_defconfig
index a5e85e1ee5de..3019fce63cd3 100644
--- a/arch/mips/configs/jazz_defconfig
+++ b/arch/mips/configs/jazz_defconfig
@@ -34,7 +34,6 @@ CONFIG_IP_PIMSM_V2=y
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
diff --git a/arch/mips/configs/lemote2f_defconfig b/arch/mips/configs/lemote2f_defconfig
index d1f198b072a0..5da76e0e120f 100644
--- a/arch/mips/configs/lemote2f_defconfig
+++ b/arch/mips/configs/lemote2f_defconfig
@@ -71,7 +71,6 @@ CONFIG_TCP_CONG_ADVANCED=y
CONFIG_TCP_CONG_BIC=y
CONFIG_DEFAULT_BIC=y
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_TUNNEL=m
CONFIG_IPV6_MULTIPLE_TABLES=y
diff --git a/arch/mips/configs/ls1b_defconfig b/arch/mips/configs/loongson1b_defconfig
index 1b2cc1fb26a1..c442f27685f4 100644
--- a/arch/mips/configs/ls1b_defconfig
+++ b/arch/mips/configs/loongson1b_defconfig
@@ -1,19 +1,17 @@
CONFIG_MACH_LOONGSON32=y
CONFIG_PREEMPT=y
# CONFIG_SECCOMP is not set
-CONFIG_EXPERIMENTAL=y
# CONFIG_LOCALVERSION_AUTO is not set
+CONFIG_KERNEL_XZ=y
CONFIG_SYSVIPC=y
+CONFIG_HIGH_RES_TIMERS=y
CONFIG_BSD_PROCESS_ACCT=y
CONFIG_BSD_PROCESS_ACCT_V3=y
-CONFIG_HIGH_RES_TIMERS=y
CONFIG_IKCONFIG=y
CONFIG_IKCONFIG_PROC=y
CONFIG_LOG_BUF_SHIFT=16
CONFIG_NAMESPACES=y
-CONFIG_BLK_DEV_INITRD=y
-CONFIG_RD_BZIP2=y
-CONFIG_RD_LZMA=y
+CONFIG_CC_OPTIMIZE_FOR_SIZE=y
CONFIG_EXPERT=y
CONFIG_PERF_EVENTS=y
# CONFIG_COMPAT_BRK is not set
@@ -41,6 +39,12 @@ CONFIG_UEVENT_HELPER_PATH="/sbin/hotplug"
CONFIG_DEVTMPFS=y
CONFIG_DEVTMPFS_MOUNT=y
# CONFIG_STANDALONE is not set
+CONFIG_MTD=y
+CONFIG_MTD_CMDLINE_PARTS=y
+CONFIG_MTD_BLOCK=y
+CONFIG_MTD_NAND=y
+CONFIG_MTD_NAND_LOONGSON1=y
+CONFIG_MTD_UBI=y
CONFIG_BLK_DEV_LOOP=y
CONFIG_SCSI=m
# CONFIG_SCSI_PROC_FS is not set
@@ -48,7 +52,6 @@ CONFIG_BLK_DEV_SD=m
# CONFIG_SCSI_LOWLEVEL is not set
CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_BROADCOM is not set
-# CONFIG_NET_VENDOR_CHELSIO is not set
# CONFIG_NET_VENDOR_INTEL is not set
# CONFIG_NET_VENDOR_MARVELL is not set
# CONFIG_NET_VENDOR_MICREL is not set
@@ -56,7 +59,6 @@ CONFIG_NETDEVICES=y
# CONFIG_NET_VENDOR_SEEQ is not set
# CONFIG_NET_VENDOR_SMSC is not set
CONFIG_STMMAC_ETH=y
-CONFIG_STMMAC_DA=y
# CONFIG_NET_VENDOR_WIZNET is not set
# CONFIG_WLAN is not set
CONFIG_INPUT_EVDEV=y
@@ -69,18 +71,25 @@ CONFIG_LEGACY_PTY_COUNT=8
CONFIG_SERIAL_8250=y
CONFIG_SERIAL_8250_CONSOLE=y
# CONFIG_HW_RANDOM is not set
+CONFIG_GPIOLIB=y
+CONFIG_GPIO_LOONGSON1=y
# CONFIG_HWMON is not set
# CONFIG_VGA_CONSOLE is not set
-CONFIG_USB_HID=m
CONFIG_HID_GENERIC=m
+CONFIG_USB_HID=m
CONFIG_USB=y
CONFIG_USB_ANNOUNCE_NEW_DEVICES=y
CONFIG_USB_EHCI_HCD=y
-CONFIG_USB_EHCI_HCD_PLATFORM=y
# CONFIG_USB_EHCI_TT_NEWSCHED is not set
+CONFIG_USB_EHCI_HCD_PLATFORM=y
CONFIG_USB_STORAGE=m
CONFIG_USB_SERIAL=m
CONFIG_USB_SERIAL_PL2303=m
+CONFIG_NEW_LEDS=y
+CONFIG_LEDS_CLASS=y
+CONFIG_LEDS_GPIO=y
+CONFIG_LEDS_TRIGGERS=y
+CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_DRV_LOONGSON1=y
# CONFIG_IOMMU_SUPPORT is not set
@@ -96,15 +105,21 @@ CONFIG_VFAT_FS=y
CONFIG_PROC_KCORE=y
CONFIG_TMPFS=y
CONFIG_TMPFS_POSIX_ACL=y
-# CONFIG_MISC_FILESYSTEMS is not set
+CONFIG_UBIFS_FS=y
+CONFIG_UBIFS_FS_ADVANCED_COMPR=y
+CONFIG_UBIFS_ATIME_SUPPORT=y
CONFIG_NFS_FS=y
CONFIG_ROOT_NFS=y
CONFIG_NLS_CODEPAGE_437=m
CONFIG_NLS_ISO8859_1=m
+CONFIG_DYNAMIC_DEBUG=y
# CONFIG_ENABLE_WARN_DEPRECATED is not set
# CONFIG_ENABLE_MUST_CHECK is not set
+CONFIG_DEBUG_FS=y
CONFIG_MAGIC_SYSRQ=y
# CONFIG_SCHED_DEBUG is not set
# CONFIG_DEBUG_PREEMPT is not set
# CONFIG_FTRACE is not set
# CONFIG_EARLY_PRINTK is not set
+# CONFIG_CRYPTO_ECHAINIV is not set
+# CONFIG_CRYPTO_HW is not set
diff --git a/arch/mips/configs/loongson3_defconfig b/arch/mips/configs/loongson3_defconfig
index f8bf915c6d6b..7f95c4b3ab2c 100644
--- a/arch/mips/configs/loongson3_defconfig
+++ b/arch/mips/configs/loongson3_defconfig
@@ -25,7 +25,6 @@ CONFIG_TASK_XACCT=y
CONFIG_TASK_IO_ACCOUNTING=y
CONFIG_LOG_BUF_SHIFT=14
CONFIG_CPUSETS=y
-CONFIG_RESOURCE_COUNTERS=y
CONFIG_MEMCG=y
CONFIG_MEMCG_SWAP=y
CONFIG_BLK_CGROUP=y
diff --git a/arch/mips/configs/malta_qemu_32r6_defconfig b/arch/mips/configs/malta_qemu_32r6_defconfig
index 7f50dd67aa8d..65f140e1e872 100644
--- a/arch/mips/configs/malta_qemu_32r6_defconfig
+++ b/arch/mips/configs/malta_qemu_32r6_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaaprp_defconfig b/arch/mips/configs/maltaaprp_defconfig
index a9d433a17fcf..799c4338fd5e 100644
--- a/arch/mips/configs/maltaaprp_defconfig
+++ b/arch/mips/configs/maltaaprp_defconfig
@@ -147,7 +147,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltasmvp_eva_defconfig b/arch/mips/configs/maltasmvp_eva_defconfig
index 2774ef064505..31846000530f 100644
--- a/arch/mips/configs/maltasmvp_eva_defconfig
+++ b/arch/mips/configs/maltasmvp_eva_defconfig
@@ -152,7 +152,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/maltaup_defconfig b/arch/mips/configs/maltaup_defconfig
index 9bbd2218f0bf..a79107da0675 100644
--- a/arch/mips/configs/maltaup_defconfig
+++ b/arch/mips/configs/maltaup_defconfig
@@ -146,7 +146,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_TRIGGERS=y
CONFIG_LEDS_TRIGGER_TIMER=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_LEDS_TRIGGER_BACKLIGHT=y
CONFIG_LEDS_TRIGGER_DEFAULT_ON=y
diff --git a/arch/mips/configs/mtx1_defconfig b/arch/mips/configs/mtx1_defconfig
index 9b6926d6bb32..f3f60056bc27 100644
--- a/arch/mips/configs/mtx1_defconfig
+++ b/arch/mips/configs/mtx1_defconfig
@@ -51,7 +51,6 @@ CONFIG_INET_IPCOMP=m
CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/nlm_xlp_defconfig b/arch/mips/configs/nlm_xlp_defconfig
index b3d1d37f85ea..b496c25fced6 100644
--- a/arch/mips/configs/nlm_xlp_defconfig
+++ b/arch/mips/configs/nlm_xlp_defconfig
@@ -95,7 +95,6 @@ CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/nlm_xlr_defconfig b/arch/mips/configs/nlm_xlr_defconfig
index 3d8016d6cf3e..8e99ad807a57 100644
--- a/arch/mips/configs/nlm_xlr_defconfig
+++ b/arch/mips/configs/nlm_xlr_defconfig
@@ -75,7 +75,6 @@ CONFIG_TCP_CONG_YEAH=m
CONFIG_TCP_CONG_ILLINOIS=m
CONFIG_TCP_MD5SIG=y
CONFIG_IPV6=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_INET6_AH=m
CONFIG_INET6_ESP=m
CONFIG_INET6_IPCOMP=m
diff --git a/arch/mips/configs/rbtx49xx_defconfig b/arch/mips/configs/rbtx49xx_defconfig
index f8bf9b4c1343..43d55e5abacb 100644
--- a/arch/mips/configs/rbtx49xx_defconfig
+++ b/arch/mips/configs/rbtx49xx_defconfig
@@ -90,7 +90,7 @@ CONFIG_NEW_LEDS=y
CONFIG_LEDS_CLASS=y
CONFIG_LEDS_GPIO=y
CONFIG_LEDS_TRIGGERS=y
-CONFIG_LEDS_TRIGGER_IDE_DISK=y
+CONFIG_LEDS_TRIGGER_DISK=y
CONFIG_LEDS_TRIGGER_HEARTBEAT=y
CONFIG_RTC_CLASS=y
CONFIG_RTC_INTF_DEV_UIE_EMUL=y
diff --git a/arch/mips/configs/rm200_defconfig b/arch/mips/configs/rm200_defconfig
index 82db4e3e4cf1..c2b4e3f33a73 100644
--- a/arch/mips/configs/rm200_defconfig
+++ b/arch/mips/configs/rm200_defconfig
@@ -37,7 +37,6 @@ CONFIG_INET_XFRM_MODE_TRANSPORT=m
CONFIG_INET_XFRM_MODE_TUNNEL=m
CONFIG_INET_XFRM_MODE_BEET=m
CONFIG_TCP_MD5SIG=y
-CONFIG_IPV6_PRIVACY=y
CONFIG_IPV6_ROUTER_PREF=y
CONFIG_IPV6_ROUTE_INFO=y
CONFIG_INET6_AH=m
diff --git a/arch/mips/dec/int-handler.S b/arch/mips/dec/int-handler.S
index 8c6f508e59de..d7b99180c6e1 100644
--- a/arch/mips/dec/int-handler.S
+++ b/arch/mips/dec/int-handler.S
@@ -5,7 +5,7 @@
* Written by Ralf Baechle and Andreas Busse, modified for DECstation
* support by Paul Antoine and Harald Koerfgen.
*
- * completly rewritten:
+ * completely rewritten:
* Copyright (C) 1998 Harald Koerfgen
*
* Rewritten extensively for controller-driven IRQ support
diff --git a/arch/mips/dec/setup.c b/arch/mips/dec/setup.c
index a0b8943c8f11..1c3bf9fe926f 100644
--- a/arch/mips/dec/setup.c
+++ b/arch/mips/dec/setup.c
@@ -60,6 +60,7 @@ EXPORT_SYMBOL(dec_kn_slot_size);
int dec_tc_bus;
DEFINE_SPINLOCK(ioasic_ssr_lock);
+EXPORT_SYMBOL(ioasic_ssr_lock);
volatile u32 *ioasic_base;
diff --git a/arch/mips/fw/arc/memory.c b/arch/mips/fw/arc/memory.c
index 5537b94572b2..0d75b5a0bad4 100644
--- a/arch/mips/fw/arc/memory.c
+++ b/arch/mips/fw/arc/memory.c
@@ -9,7 +9,7 @@
* PROM library functions for acquiring/using memory descriptors given to us
* from the ARCS firmware. This is only used when CONFIG_ARC_MEMORY is set
* because on some machines like SGI IP27 the ARC memory configuration data
- * completly bogus and alternate easier to use mechanisms are available.
+ * completely bogus and alternate easier to use mechanisms are available.
*/
#include <linux/init.h>
#include <linux/kernel.h>
diff --git a/arch/mips/include/asm/Kbuild b/arch/mips/include/asm/Kbuild
index c7fe4d01e79c..9740066cc631 100644
--- a/arch/mips/include/asm/Kbuild
+++ b/arch/mips/include/asm/Kbuild
@@ -1,5 +1,6 @@
# MIPS headers
generic-(CONFIG_GENERIC_CSUM) += checksum.h
+generic-y += clkdev.h
generic-y += cputime.h
generic-y += current.h
generic-y += dma-contiguous.h
diff --git a/arch/mips/include/asm/addrspace.h b/arch/mips/include/asm/addrspace.h
index 3b0e51d5a613..c5b04e752e97 100644
--- a/arch/mips/include/asm/addrspace.h
+++ b/arch/mips/include/asm/addrspace.h
@@ -45,7 +45,7 @@
/*
* Returns the kernel segment base of a given address
*/
-#define KSEGX(a) ((_ACAST32_ (a)) & 0xe0000000)
+#define KSEGX(a) ((_ACAST32_(a)) & _ACAST32_(0xe0000000))
/*
* Returns the physical address of a CKSEGx / XKPHYS address
diff --git a/arch/mips/include/asm/asmmacro.h b/arch/mips/include/asm/asmmacro.h
index 867f924b05c7..56584a659183 100644
--- a/arch/mips/include/asm/asmmacro.h
+++ b/arch/mips/include/asm/asmmacro.h
@@ -19,6 +19,28 @@
#include <asm/asmmacro-64.h>
#endif
+/*
+ * Helper macros for generating raw instruction encodings.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+ .macro insn32_if_mm enc
+ .insn
+ .hword ((\enc) >> 16)
+ .hword ((\enc) & 0xffff)
+ .endm
+
+ .macro insn_if_mips enc
+ .endm
+#else
+ .macro insn32_if_mm enc
+ .endm
+
+ .macro insn_if_mips enc
+ .insn
+ .word (\enc)
+ .endm
+#endif
+
#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
.macro local_irq_enable reg=t0
ei
@@ -235,6 +257,7 @@
.macro ld_b wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
ld.b $w\wd, \off(\base)
.set pop
@@ -243,6 +266,7 @@
.macro ld_h wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
ld.h $w\wd, \off(\base)
.set pop
@@ -251,6 +275,7 @@
.macro ld_w wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
ld.w $w\wd, \off(\base)
.set pop
@@ -268,6 +293,7 @@
.macro st_b wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
st.b $w\wd, \off(\base)
.set pop
@@ -276,6 +302,7 @@
.macro st_h wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
st.h $w\wd, \off(\base)
.set pop
@@ -284,6 +311,7 @@
.macro st_w wd, off, base
.set push
.set mips32r2
+ .set fp=64
.set msa
st.w $w\wd, \off(\base)
.set pop
@@ -298,21 +326,21 @@
.set pop
.endm
- .macro copy_u_w ws, n
+ .macro copy_s_w ws, n
.set push
.set mips32r2
.set fp=64
.set msa
- copy_u.w $1, $w\ws[\n]
+ copy_s.w $1, $w\ws[\n]
.set pop
.endm
- .macro copy_u_d ws, n
+ .macro copy_s_d ws, n
.set push
.set mips64r2
.set fp=64
.set msa
- copy_u.d $1, $w\ws[\n]
+ copy_s.d $1, $w\ws[\n]
.set pop
.endm
@@ -335,38 +363,6 @@
.endm
#else
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN 0x587e0056
-#define CTC_MSA_INSN 0x583e0816
-#define LDB_MSA_INSN 0x58000807
-#define LDH_MSA_INSN 0x58000817
-#define LDW_MSA_INSN 0x58000827
-#define LDD_MSA_INSN 0x58000837
-#define STB_MSA_INSN 0x5800080f
-#define STH_MSA_INSN 0x5800081f
-#define STW_MSA_INSN 0x5800082f
-#define STD_MSA_INSN 0x5800083f
-#define COPY_UW_MSA_INSN 0x58f00056
-#define COPY_UD_MSA_INSN 0x58f80056
-#define INSERT_W_MSA_INSN 0x59300816
-#define INSERT_D_MSA_INSN 0x59380816
-#else
-#define CFC_MSA_INSN 0x787e0059
-#define CTC_MSA_INSN 0x783e0819
-#define LDB_MSA_INSN 0x78000820
-#define LDH_MSA_INSN 0x78000821
-#define LDW_MSA_INSN 0x78000822
-#define LDD_MSA_INSN 0x78000823
-#define STB_MSA_INSN 0x78000824
-#define STH_MSA_INSN 0x78000825
-#define STW_MSA_INSN 0x78000826
-#define STD_MSA_INSN 0x78000827
-#define COPY_UW_MSA_INSN 0x78f00059
-#define COPY_UD_MSA_INSN 0x78f80059
-#define INSERT_W_MSA_INSN 0x79300819
-#define INSERT_D_MSA_INSN 0x79380819
-#endif
-
/*
* Temporary until all toolchains in use include MSA support.
*/
@@ -374,8 +370,8 @@
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word CFC_MSA_INSN | (\cs << 11)
+ insn_if_mips 0x787e0059 | (\cs << 11)
+ insn32_if_mm 0x587e0056 | (\cs << 11)
move \rd, $1
.set pop
.endm
@@ -385,7 +381,8 @@
.set noat
SET_HARDFLOAT
move $1, \rs
- .word CTC_MSA_INSN | (\cd << 6)
+ insn_if_mips 0x783e0819 | (\cd << 6)
+ insn32_if_mm 0x583e0816 | (\cd << 6)
.set pop
.endm
@@ -393,8 +390,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word LDB_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000820 | (\wd << 6)
+ insn32_if_mm 0x58000807 | (\wd << 6)
.set pop
.endm
@@ -402,8 +400,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word LDH_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000821 | (\wd << 6)
+ insn32_if_mm 0x58000817 | (\wd << 6)
.set pop
.endm
@@ -411,8 +410,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word LDW_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000822 | (\wd << 6)
+ insn32_if_mm 0x58000827 | (\wd << 6)
.set pop
.endm
@@ -420,8 +420,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word LDD_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000823 | (\wd << 6)
+ insn32_if_mm 0x58000837 | (\wd << 6)
.set pop
.endm
@@ -429,8 +430,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word STB_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000824 | (\wd << 6)
+ insn32_if_mm 0x5800080f | (\wd << 6)
.set pop
.endm
@@ -438,8 +440,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word STH_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000825 | (\wd << 6)
+ insn32_if_mm 0x5800081f | (\wd << 6)
.set pop
.endm
@@ -447,8 +450,9 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word STW_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000826 | (\wd << 6)
+ insn32_if_mm 0x5800082f | (\wd << 6)
.set pop
.endm
@@ -456,26 +460,27 @@
.set push
.set noat
SET_HARDFLOAT
- addu $1, \base, \off
- .word STD_MSA_INSN | (\wd << 6)
+ PTR_ADDU $1, \base, \off
+ insn_if_mips 0x78000827 | (\wd << 6)
+ insn32_if_mm 0x5800083f | (\wd << 6)
.set pop
.endm
- .macro copy_u_w ws, n
+ .macro copy_s_w ws, n
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word COPY_UW_MSA_INSN | (\n << 16) | (\ws << 11)
+ insn_if_mips 0x78b00059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b00056 | (\n << 16) | (\ws << 11)
.set pop
.endm
- .macro copy_u_d ws, n
+ .macro copy_s_d ws, n
.set push
.set noat
SET_HARDFLOAT
- .insn
- .word COPY_UD_MSA_INSN | (\n << 16) | (\ws << 11)
+ insn_if_mips 0x78b80059 | (\n << 16) | (\ws << 11)
+ insn32_if_mm 0x58b80056 | (\n << 16) | (\ws << 11)
.set pop
.endm
@@ -483,7 +488,8 @@
.set push
.set noat
SET_HARDFLOAT
- .word INSERT_W_MSA_INSN | (\n << 16) | (\wd << 6)
+ insn_if_mips 0x79300819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59300816 | (\n << 16) | (\wd << 6)
.set pop
.endm
@@ -491,46 +497,58 @@
.set push
.set noat
SET_HARDFLOAT
- .word INSERT_D_MSA_INSN | (\n << 16) | (\wd << 6)
+ insn_if_mips 0x79380819 | (\n << 16) | (\wd << 6)
+ insn32_if_mm 0x59380816 | (\n << 16) | (\wd << 6)
.set pop
.endm
#endif
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+#define FPR_BASE_OFFS THREAD_FPR0
+#define FPR_BASE $1
+#else
+#define FPR_BASE_OFFS 0
+#define FPR_BASE \thread
+#endif
+
.macro msa_save_all thread
- st_d 0, THREAD_FPR0, \thread
- st_d 1, THREAD_FPR1, \thread
- st_d 2, THREAD_FPR2, \thread
- st_d 3, THREAD_FPR3, \thread
- st_d 4, THREAD_FPR4, \thread
- st_d 5, THREAD_FPR5, \thread
- st_d 6, THREAD_FPR6, \thread
- st_d 7, THREAD_FPR7, \thread
- st_d 8, THREAD_FPR8, \thread
- st_d 9, THREAD_FPR9, \thread
- st_d 10, THREAD_FPR10, \thread
- st_d 11, THREAD_FPR11, \thread
- st_d 12, THREAD_FPR12, \thread
- st_d 13, THREAD_FPR13, \thread
- st_d 14, THREAD_FPR14, \thread
- st_d 15, THREAD_FPR15, \thread
- st_d 16, THREAD_FPR16, \thread
- st_d 17, THREAD_FPR17, \thread
- st_d 18, THREAD_FPR18, \thread
- st_d 19, THREAD_FPR19, \thread
- st_d 20, THREAD_FPR20, \thread
- st_d 21, THREAD_FPR21, \thread
- st_d 22, THREAD_FPR22, \thread
- st_d 23, THREAD_FPR23, \thread
- st_d 24, THREAD_FPR24, \thread
- st_d 25, THREAD_FPR25, \thread
- st_d 26, THREAD_FPR26, \thread
- st_d 27, THREAD_FPR27, \thread
- st_d 28, THREAD_FPR28, \thread
- st_d 29, THREAD_FPR29, \thread
- st_d 30, THREAD_FPR30, \thread
- st_d 31, THREAD_FPR31, \thread
.set push
.set noat
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+ st_d 0, THREAD_FPR0 - FPR_BASE_OFFS, FPR_BASE
+ st_d 1, THREAD_FPR1 - FPR_BASE_OFFS, FPR_BASE
+ st_d 2, THREAD_FPR2 - FPR_BASE_OFFS, FPR_BASE
+ st_d 3, THREAD_FPR3 - FPR_BASE_OFFS, FPR_BASE
+ st_d 4, THREAD_FPR4 - FPR_BASE_OFFS, FPR_BASE
+ st_d 5, THREAD_FPR5 - FPR_BASE_OFFS, FPR_BASE
+ st_d 6, THREAD_FPR6 - FPR_BASE_OFFS, FPR_BASE
+ st_d 7, THREAD_FPR7 - FPR_BASE_OFFS, FPR_BASE
+ st_d 8, THREAD_FPR8 - FPR_BASE_OFFS, FPR_BASE
+ st_d 9, THREAD_FPR9 - FPR_BASE_OFFS, FPR_BASE
+ st_d 10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+ st_d 11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+ st_d 12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+ st_d 13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+ st_d 14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+ st_d 15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+ st_d 16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+ st_d 17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+ st_d 18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+ st_d 19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+ st_d 20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+ st_d 21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+ st_d 22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+ st_d 23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+ st_d 24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+ st_d 25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+ st_d 26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+ st_d 27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+ st_d 28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+ st_d 29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+ st_d 30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+ st_d 31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
SET_HARDFLOAT
_cfcmsa $1, MSA_CSR
sw $1, THREAD_MSA_CSR(\thread)
@@ -543,40 +561,46 @@
SET_HARDFLOAT
lw $1, THREAD_MSA_CSR(\thread)
_ctcmsa MSA_CSR, $1
- .set pop
- ld_d 0, THREAD_FPR0, \thread
- ld_d 1, THREAD_FPR1, \thread
- ld_d 2, THREAD_FPR2, \thread
- ld_d 3, THREAD_FPR3, \thread
- ld_d 4, THREAD_FPR4, \thread
- ld_d 5, THREAD_FPR5, \thread
- ld_d 6, THREAD_FPR6, \thread
- ld_d 7, THREAD_FPR7, \thread
- ld_d 8, THREAD_FPR8, \thread
- ld_d 9, THREAD_FPR9, \thread
- ld_d 10, THREAD_FPR10, \thread
- ld_d 11, THREAD_FPR11, \thread
- ld_d 12, THREAD_FPR12, \thread
- ld_d 13, THREAD_FPR13, \thread
- ld_d 14, THREAD_FPR14, \thread
- ld_d 15, THREAD_FPR15, \thread
- ld_d 16, THREAD_FPR16, \thread
- ld_d 17, THREAD_FPR17, \thread
- ld_d 18, THREAD_FPR18, \thread
- ld_d 19, THREAD_FPR19, \thread
- ld_d 20, THREAD_FPR20, \thread
- ld_d 21, THREAD_FPR21, \thread
- ld_d 22, THREAD_FPR22, \thread
- ld_d 23, THREAD_FPR23, \thread
- ld_d 24, THREAD_FPR24, \thread
- ld_d 25, THREAD_FPR25, \thread
- ld_d 26, THREAD_FPR26, \thread
- ld_d 27, THREAD_FPR27, \thread
- ld_d 28, THREAD_FPR28, \thread
- ld_d 29, THREAD_FPR29, \thread
- ld_d 30, THREAD_FPR30, \thread
- ld_d 31, THREAD_FPR31, \thread
- .endm
+#ifdef TOOLCHAIN_SUPPORTS_MSA
+ PTR_ADDU FPR_BASE, \thread, FPR_BASE_OFFS
+#endif
+ ld_d 0, THREAD_FPR0 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 1, THREAD_FPR1 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 2, THREAD_FPR2 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 3, THREAD_FPR3 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 4, THREAD_FPR4 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 5, THREAD_FPR5 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 6, THREAD_FPR6 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 7, THREAD_FPR7 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 8, THREAD_FPR8 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 9, THREAD_FPR9 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 10, THREAD_FPR10 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 11, THREAD_FPR11 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 12, THREAD_FPR12 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 13, THREAD_FPR13 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 14, THREAD_FPR14 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 15, THREAD_FPR15 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 16, THREAD_FPR16 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 17, THREAD_FPR17 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 18, THREAD_FPR18 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 19, THREAD_FPR19 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 20, THREAD_FPR20 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 21, THREAD_FPR21 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 22, THREAD_FPR22 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 23, THREAD_FPR23 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 24, THREAD_FPR24 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 25, THREAD_FPR25 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 26, THREAD_FPR26 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 27, THREAD_FPR27 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 28, THREAD_FPR28 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 29, THREAD_FPR29 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 30, THREAD_FPR30 - FPR_BASE_OFFS, FPR_BASE
+ ld_d 31, THREAD_FPR31 - FPR_BASE_OFFS, FPR_BASE
+ .set pop
+ .endm
+
+#undef FPR_BASE_OFFS
+#undef FPR_BASE
.macro msa_init_upper wd
#ifdef CONFIG_64BIT
diff --git a/arch/mips/include/asm/atomic.h b/arch/mips/include/asm/atomic.h
index 835b402e4574..0ab176bdb8e8 100644
--- a/arch/mips/include/asm/atomic.h
+++ b/arch/mips/include/asm/atomic.h
@@ -66,7 +66,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
" " #asm_op " %0, %2 \n" \
" sc %0, %1 \n" \
" .set mips0 \n" \
- : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "=&r" (temp), "+" GCC_OFF_SMALL_ASM() (v->counter) \
: "Ir" (i)); \
} while (unlikely(!temp)); \
} else { \
@@ -79,12 +79,10 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
}
#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
-static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
+static __inline__ int atomic_##op##_return_relaxed(int i, atomic_t * v) \
{ \
int result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
int temp; \
\
@@ -125,23 +123,84 @@ static __inline__ int atomic_##op##_return(int i, atomic_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
+static __inline__ int atomic_fetch_##op##_relaxed(int i, atomic_t * v) \
+{ \
+ int result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ int temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ int temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " ll %1, %2 # atomic_fetch_" #op " \n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " sc %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC_OPS(op, c_op, asm_op) \
ATOMIC_OP(op, c_op, asm_op) \
- ATOMIC_OP_RETURN(op, c_op, asm_op)
+ ATOMIC_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
ATOMIC_OPS(add, +=, addu)
ATOMIC_OPS(sub, -=, subu)
-ATOMIC_OP(and, &=, and)
-ATOMIC_OP(or, |=, or)
-ATOMIC_OP(xor, ^=, xor)
+#define atomic_add_return_relaxed atomic_add_return_relaxed
+#define atomic_sub_return_relaxed atomic_sub_return_relaxed
+#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
+#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
+
+#undef ATOMIC_OPS
+#define ATOMIC_OPS(op, c_op, asm_op) \
+ ATOMIC_OP(op, c_op, asm_op) \
+ ATOMIC_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC_OPS(and, &=, and)
+ATOMIC_OPS(or, |=, or)
+ATOMIC_OPS(xor, ^=, xor)
+
+#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
+#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
+#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#undef ATOMIC_OPS
+#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP
@@ -362,12 +421,10 @@ static __inline__ void atomic64_##op(long i, atomic64_t * v) \
}
#define ATOMIC64_OP_RETURN(op, c_op, asm_op) \
-static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
+static __inline__ long atomic64_##op##_return_relaxed(long i, atomic64_t * v) \
{ \
long result; \
\
- smp_mb__before_llsc(); \
- \
if (kernel_uses_llsc && R10000_LLSC_WAR) { \
long temp; \
\
@@ -409,22 +466,85 @@ static __inline__ long atomic64_##op##_return(long i, atomic64_t * v) \
raw_local_irq_restore(flags); \
} \
\
- smp_llsc_mb(); \
+ return result; \
+}
+
+#define ATOMIC64_FETCH_OP(op, c_op, asm_op) \
+static __inline__ long atomic64_fetch_##op##_relaxed(long i, atomic64_t * v) \
+{ \
+ long result; \
+ \
+ if (kernel_uses_llsc && R10000_LLSC_WAR) { \
+ long temp; \
+ \
+ __asm__ __volatile__( \
+ " .set arch=r4000 \n" \
+ "1: lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " beqzl %0, 1b \n" \
+ " move %0, %1 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "+" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i)); \
+ } else if (kernel_uses_llsc) { \
+ long temp; \
+ \
+ do { \
+ __asm__ __volatile__( \
+ " .set "MIPS_ISA_LEVEL" \n" \
+ " lld %1, %2 # atomic64_fetch_" #op "\n" \
+ " " #asm_op " %0, %1, %3 \n" \
+ " scd %0, %2 \n" \
+ " .set mips0 \n" \
+ : "=&r" (result), "=&r" (temp), \
+ "=" GCC_OFF_SMALL_ASM() (v->counter) \
+ : "Ir" (i), GCC_OFF_SMALL_ASM() (v->counter) \
+ : "memory"); \
+ } while (unlikely(!result)); \
+ \
+ result = temp; \
+ } else { \
+ unsigned long flags; \
+ \
+ raw_local_irq_save(flags); \
+ result = v->counter; \
+ v->counter c_op i; \
+ raw_local_irq_restore(flags); \
+ } \
\
return result; \
}
#define ATOMIC64_OPS(op, c_op, asm_op) \
ATOMIC64_OP(op, c_op, asm_op) \
- ATOMIC64_OP_RETURN(op, c_op, asm_op)
+ ATOMIC64_OP_RETURN(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
ATOMIC64_OPS(add, +=, daddu)
ATOMIC64_OPS(sub, -=, dsubu)
-ATOMIC64_OP(and, &=, and)
-ATOMIC64_OP(or, |=, or)
-ATOMIC64_OP(xor, ^=, xor)
+
+#define atomic64_add_return_relaxed atomic64_add_return_relaxed
+#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
+#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
+#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
+
+#undef ATOMIC64_OPS
+#define ATOMIC64_OPS(op, c_op, asm_op) \
+ ATOMIC64_OP(op, c_op, asm_op) \
+ ATOMIC64_FETCH_OP(op, c_op, asm_op)
+
+ATOMIC64_OPS(and, &=, and)
+ATOMIC64_OPS(or, |=, or)
+ATOMIC64_OPS(xor, ^=, xor)
+
+#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
+#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
+#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#undef ATOMIC64_OPS
+#undef ATOMIC64_FETCH_OP
#undef ATOMIC64_OP_RETURN
#undef ATOMIC64_OP
diff --git a/arch/mips/include/asm/bitops.h b/arch/mips/include/asm/bitops.h
index ce9666cf1499..fa57cef12a46 100644
--- a/arch/mips/include/asm/bitops.h
+++ b/arch/mips/include/asm/bitops.h
@@ -19,25 +19,10 @@
#include <asm/byteorder.h> /* sigh ... */
#include <asm/compiler.h>
#include <asm/cpu-features.h>
+#include <asm/llsc.h>
#include <asm/sgidefs.h>
#include <asm/war.h>
-#if _MIPS_SZLONG == 32
-#define SZLONG_LOG 5
-#define SZLONG_MASK 31UL
-#define __LL "ll "
-#define __SC "sc "
-#define __INS "ins "
-#define __EXT "ext "
-#elif _MIPS_SZLONG == 64
-#define SZLONG_LOG 6
-#define SZLONG_MASK 63UL
-#define __LL "lld "
-#define __SC "scd "
-#define __INS "dins "
-#define __EXT "dext "
-#endif
-
/*
* These are the "slower" versions of the functions and are in bitops.c.
* These functions call raw_local_irq_{save,restore}().
diff --git a/arch/mips/include/asm/bitrev.h b/arch/mips/include/asm/bitrev.h
new file mode 100644
index 000000000000..bc739a404ae3
--- /dev/null
+++ b/arch/mips/include/asm/bitrev.h
@@ -0,0 +1,30 @@
+#ifndef __MIPS_ASM_BITREV_H__
+#define __MIPS_ASM_BITREV_H__
+
+#include <linux/swab.h>
+
+static __always_inline __attribute_const__ u32 __arch_bitrev32(u32 x)
+{
+ u32 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(__swab32(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u16 __arch_bitrev16(u16 x)
+{
+ u16 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(__swab16(x)));
+ return ret;
+}
+
+static __always_inline __attribute_const__ u8 __arch_bitrev8(u8 x)
+{
+ u8 ret;
+
+ asm("bitswap %0, %1" : "=r"(ret) : "r"(x));
+ return ret;
+}
+
+#endif /* __MIPS_ASM_BITREV_H__ */
diff --git a/arch/mips/include/asm/bmips.h b/arch/mips/include/asm/bmips.h
index 6d25ad33ec78..a92aee7b977a 100644
--- a/arch/mips/include/asm/bmips.h
+++ b/arch/mips/include/asm/bmips.h
@@ -88,6 +88,7 @@ extern unsigned long bmips_tp1_irqs;
extern void bmips_ebase_setup(void);
extern asmlinkage void plat_wired_tlb_setup(void);
+extern void bmips_cpu_setup(void);
static inline unsigned long bmips_read_zscm_reg(unsigned int offset)
{
diff --git a/arch/mips/include/asm/bootinfo.h b/arch/mips/include/asm/bootinfo.h
index b603804caac5..ee9f5f2d18fc 100644
--- a/arch/mips/include/asm/bootinfo.h
+++ b/arch/mips/include/asm/bootinfo.h
@@ -127,6 +127,10 @@ extern char arcs_cmdline[COMMAND_LINE_SIZE];
*/
extern unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+extern unsigned long fw_passed_dtb;
+#endif
+
/*
* Platform memory detection hook called by setup_arch
*/
@@ -144,4 +148,22 @@ static inline void plat_swiotlb_setup(void) {}
#endif /* CONFIG_SWIOTLB */
+#ifdef CONFIG_USE_OF
+/**
+ * plat_get_fdt() - Return a pointer to the platform's device tree blob
+ *
+ * This function provides a platform independent API to get a pointer to the
+ * flattened device tree blob. The interface between bootloader and kernel
+ * is not consistent across platforms so it is necessary to provide this
+ * API such that common startup code can locate the FDT.
+ *
+ * This is used by the KASLR code to get command line arguments and random
+ * seed from the device tree. Any platform wishing to use KASLR should
+ * provide this API and select SYS_SUPPORTS_RELOCATABLE.
+ *
+ * Return: Pointer to the flattened device tree blob.
+ */
+extern void *plat_get_fdt(void);
+#endif /* CONFIG_USE_OF */
+
#endif /* _ASM_BOOTINFO_H */
diff --git a/arch/mips/include/asm/cacheflush.h b/arch/mips/include/asm/cacheflush.h
index 723229f4cf27..34ed22ec6c33 100644
--- a/arch/mips/include/asm/cacheflush.h
+++ b/arch/mips/include/asm/cacheflush.h
@@ -51,7 +51,6 @@ extern void (*flush_cache_range)(struct vm_area_struct *vma,
unsigned long start, unsigned long end);
extern void (*flush_cache_page)(struct vm_area_struct *vma, unsigned long page, unsigned long pfn);
extern void __flush_dcache_page(struct page *page);
-extern void __flush_icache_page(struct vm_area_struct *vma, struct page *page);
#define ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE 1
static inline void flush_dcache_page(struct page *page)
@@ -77,11 +76,6 @@ static inline void flush_anon_page(struct vm_area_struct *vma,
static inline void flush_icache_page(struct vm_area_struct *vma,
struct page *page)
{
- if (!cpu_has_ic_fills_f_dc && (vma->vm_flags & VM_EXEC) &&
- Page_dcache_dirty(page)) {
- __flush_icache_page(vma, page);
- ClearPageDcacheDirty(page);
- }
}
extern void (*flush_icache_range)(unsigned long start, unsigned long end);
@@ -132,6 +126,7 @@ static inline void kunmap_noncoherent(void)
static inline void flush_kernel_dcache_page(struct page *page)
{
BUG_ON(cpu_has_dc_aliases && PageHighMem(page));
+ flush_dcache_page(page);
}
/*
diff --git a/arch/mips/include/asm/cacheops.h b/arch/mips/include/asm/cacheops.h
index c3212ff26723..8031fbc6b69a 100644
--- a/arch/mips/include/asm/cacheops.h
+++ b/arch/mips/include/asm/cacheops.h
@@ -21,6 +21,7 @@
#define Cache_I 0x00
#define Cache_D 0x01
#define Cache_T 0x02
+#define Cache_V 0x02 /* Loongson-3 */
#define Cache_S 0x03
#define Index_Writeback_Inv 0x00
@@ -107,4 +108,9 @@
*/
#define Hit_Invalidate_I_Loongson2 (Cache_I | 0x00)
+/*
+ * Loongson3-specific cacheops
+ */
+#define Index_Writeback_Inv_V (Cache_V | Index_Writeback_Inv)
+
#endif /* __ASM_CACHEOPS_H */
diff --git a/arch/mips/include/asm/clkdev.h b/arch/mips/include/asm/clkdev.h
deleted file mode 100644
index 1b3ad7b09dc1..000000000000
--- a/arch/mips/include/asm/clkdev.h
+++ /dev/null
@@ -1,27 +0,0 @@
-/*
- * based on arch/arm/include/asm/clkdev.h
- *
- * Copyright (C) 2008 Russell King.
- *
- * This program is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License version 2 as
- * published by the Free Software Foundation.
- *
- * Helper for the clk API to assist looking up a struct clk.
- */
-#ifndef __ASM_CLKDEV_H
-#define __ASM_CLKDEV_H
-
-#include <linux/slab.h>
-
-#ifndef CONFIG_COMMON_CLK
-#define __clk_get(clk) ({ 1; })
-#define __clk_put(clk) do { } while (0)
-#endif
-
-static inline struct clk_lookup_alloc *__clkdev_alloc(size_t size)
-{
- return kzalloc(size, GFP_KERNEL);
-}
-
-#endif
diff --git a/arch/mips/include/asm/cpu-features.h b/arch/mips/include/asm/cpu-features.h
index eeec8c8e2da2..e961c8a7ea66 100644
--- a/arch/mips/include/asm/cpu-features.h
+++ b/arch/mips/include/asm/cpu-features.h
@@ -35,6 +35,9 @@
#ifndef cpu_has_htw
#define cpu_has_htw (cpu_data[0].options & MIPS_CPU_HTW)
#endif
+#ifndef cpu_has_ldpte
+#define cpu_has_ldpte (cpu_data[0].options & MIPS_CPU_LDPTE)
+#endif
#ifndef cpu_has_rixiex
#define cpu_has_rixiex (cpu_data[0].options & MIPS_CPU_RIXIEX)
#endif
@@ -117,6 +120,21 @@
#ifndef kernel_uses_llsc
#define kernel_uses_llsc cpu_has_llsc
#endif
+#ifndef cpu_has_guestctl0ext
+#define cpu_has_guestctl0ext (cpu_data[0].options & MIPS_CPU_GUESTCTL0EXT)
+#endif
+#ifndef cpu_has_guestctl1
+#define cpu_has_guestctl1 (cpu_data[0].options & MIPS_CPU_GUESTCTL1)
+#endif
+#ifndef cpu_has_guestctl2
+#define cpu_has_guestctl2 (cpu_data[0].options & MIPS_CPU_GUESTCTL2)
+#endif
+#ifndef cpu_has_guestid
+#define cpu_has_guestid (cpu_data[0].options & MIPS_CPU_GUESTID)
+#endif
+#ifndef cpu_has_drg
+#define cpu_has_drg (cpu_data[0].options & MIPS_CPU_DRG)
+#endif
#ifndef cpu_has_mips16
#define cpu_has_mips16 (cpu_data[0].ases & MIPS_ASE_MIPS16)
#endif
@@ -142,8 +160,14 @@
# endif
#endif
+#ifndef cpu_has_lpa
+#define cpu_has_lpa (cpu_data[0].options & MIPS_CPU_LPA)
+#endif
+#ifndef cpu_has_mvh
+#define cpu_has_mvh (cpu_data[0].options & MIPS_CPU_MVH)
+#endif
#ifndef cpu_has_xpa
-#define cpu_has_xpa (cpu_data[0].options & MIPS_CPU_XPA)
+#define cpu_has_xpa (cpu_has_lpa && cpu_has_mvh)
#endif
#ifndef cpu_has_vtag_icache
#define cpu_has_vtag_icache (cpu_data[0].icache.flags & MIPS_CACHE_VTAG)
@@ -180,6 +204,16 @@
#endif
#endif
+/* __builtin_constant_p(cpu_has_mips_r) && cpu_has_mips_r */
+#if !((defined(cpu_has_mips32r1) && cpu_has_mips32r1) || \
+ (defined(cpu_has_mips32r2) && cpu_has_mips32r2) || \
+ (defined(cpu_has_mips32r6) && cpu_has_mips32r6) || \
+ (defined(cpu_has_mips64r1) && cpu_has_mips64r1) || \
+ (defined(cpu_has_mips64r2) && cpu_has_mips64r2) || \
+ (defined(cpu_has_mips64r6) && cpu_has_mips64r6))
+#define CPU_NO_EFFICIENT_FFS 1
+#endif
+
#ifndef cpu_has_mips_1
# define cpu_has_mips_1 (!cpu_has_mips_r6)
#endif
@@ -307,10 +341,18 @@
#define cpu_has_dsp2 (cpu_data[0].ases & MIPS_ASE_DSP2P)
#endif
+#ifndef cpu_has_dsp3
+#define cpu_has_dsp3 (cpu_data[0].ases & MIPS_ASE_DSP3)
+#endif
+
#ifndef cpu_has_mipsmt
#define cpu_has_mipsmt (cpu_data[0].ases & MIPS_ASE_MIPSMT)
#endif
+#ifndef cpu_has_vp
+#define cpu_has_vp (cpu_data[0].options & MIPS_CPU_VP)
+#endif
+
#ifndef cpu_has_userlocal
#define cpu_has_userlocal (cpu_data[0].options & MIPS_CPU_ULRI)
#endif
@@ -421,4 +463,107 @@
#define cpu_has_nan_2008 (cpu_data[0].options & MIPS_CPU_NAN_2008)
#endif
+#ifndef cpu_has_ebase_wg
+# define cpu_has_ebase_wg (cpu_data[0].options & MIPS_CPU_EBASE_WG)
+#endif
+
+#ifndef cpu_has_badinstr
+# define cpu_has_badinstr (cpu_data[0].options & MIPS_CPU_BADINSTR)
+#endif
+
+#ifndef cpu_has_badinstrp
+# define cpu_has_badinstrp (cpu_data[0].options & MIPS_CPU_BADINSTRP)
+#endif
+
+#ifndef cpu_has_contextconfig
+# define cpu_has_contextconfig (cpu_data[0].options & MIPS_CPU_CTXTC)
+#endif
+
+#ifndef cpu_has_perf
+# define cpu_has_perf (cpu_data[0].options & MIPS_CPU_PERF)
+#endif
+
+/*
+ * Guest capabilities
+ */
+#ifndef cpu_guest_has_conf1
+#define cpu_guest_has_conf1 (cpu_data[0].guest.conf & (1 << 1))
+#endif
+#ifndef cpu_guest_has_conf2
+#define cpu_guest_has_conf2 (cpu_data[0].guest.conf & (1 << 2))
+#endif
+#ifndef cpu_guest_has_conf3
+#define cpu_guest_has_conf3 (cpu_data[0].guest.conf & (1 << 3))
+#endif
+#ifndef cpu_guest_has_conf4
+#define cpu_guest_has_conf4 (cpu_data[0].guest.conf & (1 << 4))
+#endif
+#ifndef cpu_guest_has_conf5
+#define cpu_guest_has_conf5 (cpu_data[0].guest.conf & (1 << 5))
+#endif
+#ifndef cpu_guest_has_conf6
+#define cpu_guest_has_conf6 (cpu_data[0].guest.conf & (1 << 6))
+#endif
+#ifndef cpu_guest_has_conf7
+#define cpu_guest_has_conf7 (cpu_data[0].guest.conf & (1 << 7))
+#endif
+#ifndef cpu_guest_has_fpu
+#define cpu_guest_has_fpu (cpu_data[0].guest.options & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_watch
+#define cpu_guest_has_watch (cpu_data[0].guest.options & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_contextconfig
+#define cpu_guest_has_contextconfig (cpu_data[0].guest.options & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_segments
+#define cpu_guest_has_segments (cpu_data[0].guest.options & MIPS_CPU_SEGMENTS)
+#endif
+#ifndef cpu_guest_has_badinstr
+#define cpu_guest_has_badinstr (cpu_data[0].guest.options & MIPS_CPU_BADINSTR)
+#endif
+#ifndef cpu_guest_has_badinstrp
+#define cpu_guest_has_badinstrp (cpu_data[0].guest.options & MIPS_CPU_BADINSTRP)
+#endif
+#ifndef cpu_guest_has_htw
+#define cpu_guest_has_htw (cpu_data[0].guest.options & MIPS_CPU_HTW)
+#endif
+#ifndef cpu_guest_has_msa
+#define cpu_guest_has_msa (cpu_data[0].guest.ases & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_kscr
+#define cpu_guest_has_kscr(n) (cpu_data[0].guest.kscratch_mask & (1u << (n)))
+#endif
+#ifndef cpu_guest_has_rw_llb
+#define cpu_guest_has_rw_llb (cpu_has_mips_r6 || (cpu_data[0].guest.options & MIPS_CPU_RW_LLB))
+#endif
+#ifndef cpu_guest_has_perf
+#define cpu_guest_has_perf (cpu_data[0].guest.options & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_maar
+#define cpu_guest_has_maar (cpu_data[0].guest.options & MIPS_CPU_MAAR)
+#endif
+
+/*
+ * Guest dynamic capabilities
+ */
+#ifndef cpu_guest_has_dyn_fpu
+#define cpu_guest_has_dyn_fpu (cpu_data[0].guest.options_dyn & MIPS_CPU_FPU)
+#endif
+#ifndef cpu_guest_has_dyn_watch
+#define cpu_guest_has_dyn_watch (cpu_data[0].guest.options_dyn & MIPS_CPU_WATCH)
+#endif
+#ifndef cpu_guest_has_dyn_contextconfig
+#define cpu_guest_has_dyn_contextconfig (cpu_data[0].guest.options_dyn & MIPS_CPU_CTXTC)
+#endif
+#ifndef cpu_guest_has_dyn_perf
+#define cpu_guest_has_dyn_perf (cpu_data[0].guest.options_dyn & MIPS_CPU_PERF)
+#endif
+#ifndef cpu_guest_has_dyn_msa
+#define cpu_guest_has_dyn_msa (cpu_data[0].guest.ases_dyn & MIPS_ASE_MSA)
+#endif
+#ifndef cpu_guest_has_dyn_maar
+#define cpu_guest_has_dyn_maar (cpu_data[0].guest.options_dyn & MIPS_CPU_MAAR)
+#endif
+
#endif /* __ASM_CPU_FEATURES_H */
diff --git a/arch/mips/include/asm/cpu-info.h b/arch/mips/include/asm/cpu-info.h
index e7dc785a91ca..edbe2734a1bf 100644
--- a/arch/mips/include/asm/cpu-info.h
+++ b/arch/mips/include/asm/cpu-info.h
@@ -28,6 +28,15 @@ struct cache_desc {
unsigned char flags; /* Flags describing cache properties */
};
+struct guest_info {
+ unsigned long ases;
+ unsigned long ases_dyn;
+ unsigned long long options;
+ unsigned long long options_dyn;
+ u8 conf;
+ u8 kscratch_mask;
+};
+
/*
* Flag definitions
*/
@@ -40,6 +49,9 @@ struct cache_desc {
struct cpuinfo_mips {
unsigned long asid_cache;
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ unsigned long asid_mask;
+#endif
/*
* Capability and feature descriptor structure for MIPS CPU
@@ -60,6 +72,7 @@ struct cpuinfo_mips {
int tlbsizeftlbways;
struct cache_desc icache; /* Primary I-cache */
struct cache_desc dcache; /* Primary D or combined I/D cache */
+ struct cache_desc vcache; /* Victim cache, between pcache and scache */
struct cache_desc scache; /* Secondary cache */
struct cache_desc tcache; /* Tertiary/split secondary cache */
int srsets; /* Shadow register sets */
@@ -68,7 +81,7 @@ struct cpuinfo_mips {
#ifdef CONFIG_64BIT
int vmbits; /* Virtual memory size in bits */
#endif
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
/*
* There is not necessarily a 1:1 mapping of VPE num to CPU number
* in particular on multi-core systems.
@@ -91,6 +104,11 @@ struct cpuinfo_mips {
* htw_start/htw_stop calls
*/
unsigned int htw_seq;
+
+ /* VZ & Guest features */
+ struct guest_info guest;
+ unsigned int gtoffset_mask;
+ unsigned int guestid_mask;
} __attribute__((aligned(SMP_CACHE_BYTES)));
extern struct cpuinfo_mips cpu_data[];
@@ -102,7 +120,7 @@ extern void cpu_probe(void);
extern void cpu_report(void);
extern const char *__cpu_name[];
-#define cpu_name_string() __cpu_name[smp_processor_id()]
+#define cpu_name_string() __cpu_name[raw_smp_processor_id()]
struct seq_file;
struct notifier_block;
@@ -125,10 +143,31 @@ struct proc_cpuinfo_notifier_args {
unsigned long n;
};
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
# define cpu_vpe_id(cpuinfo) ((cpuinfo)->vpe_id)
#else
# define cpu_vpe_id(cpuinfo) ({ (void)cpuinfo; 0; })
#endif
+static inline unsigned long cpu_asid_inc(void)
+{
+ return 1 << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline unsigned long cpu_asid_mask(struct cpuinfo_mips *cpuinfo)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ return cpuinfo->asid_mask;
+#endif
+ return ((1 << CONFIG_MIPS_ASID_BITS) - 1) << CONFIG_MIPS_ASID_SHIFT;
+}
+
+static inline void set_cpu_asid_mask(struct cpuinfo_mips *cpuinfo,
+ unsigned long asid_mask)
+{
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ cpuinfo->asid_mask = asid_mask;
+#endif
+}
+
#endif /* __ASM_CPU_INFO_H */
diff --git a/arch/mips/include/asm/cpu-type.h b/arch/mips/include/asm/cpu-type.h
index abee2bfd10dc..fbe1881f28fc 100644
--- a/arch/mips/include/asm/cpu-type.h
+++ b/arch/mips/include/asm/cpu-type.h
@@ -77,8 +77,13 @@ static inline int __pure __get_cpu_type(const int cpu_type)
*/
#endif
+#ifdef CONFIG_SYS_HAS_CPU_MIPS32_R6
+ case CPU_M6250:
+#endif
+
#ifdef CONFIG_SYS_HAS_CPU_MIPS64_R6
case CPU_I6400:
+ case CPU_P6600:
#endif
#ifdef CONFIG_SYS_HAS_CPU_R3000
diff --git a/arch/mips/include/asm/cpu.h b/arch/mips/include/asm/cpu.h
index a97ca97285ec..f672df8b26d0 100644
--- a/arch/mips/include/asm/cpu.h
+++ b/arch/mips/include/asm/cpu.h
@@ -42,6 +42,7 @@
#define PRID_COMP_LEXRA 0x0b0000
#define PRID_COMP_NETLOGIC 0x0c0000
#define PRID_COMP_CAVIUM 0x0d0000
+#define PRID_COMP_LOONGSON 0x140000
#define PRID_COMP_INGENIC_D0 0xd00000 /* JZ4740, JZ4750 */
#define PRID_COMP_INGENIC_D1 0xd10000 /* JZ4770, JZ4775 */
#define PRID_COMP_INGENIC_E1 0xe10000 /* JZ4780 */
@@ -118,9 +119,11 @@
#define PRID_IMP_INTERAPTIV_MP 0xa100
#define PRID_IMP_PROAPTIV_UP 0xa200
#define PRID_IMP_PROAPTIV_MP 0xa300
+#define PRID_IMP_P6600 0xa400
#define PRID_IMP_M5150 0xa700
#define PRID_IMP_P5600 0xa800
#define PRID_IMP_I6400 0xa900
+#define PRID_IMP_M6250 0xab00
/*
* These are the PRID's for when 23:16 == PRID_COMP_SIBYTE
@@ -169,6 +172,8 @@
#define PRID_IMP_CAVIUM_CNF71XX 0x9400
#define PRID_IMP_CAVIUM_CN78XX 0x9500
#define PRID_IMP_CAVIUM_CN70XX 0x9600
+#define PRID_IMP_CAVIUM_CN73XX 0x9700
+#define PRID_IMP_CAVIUM_CNF75XX 0x9800
/*
* These are the PRID's for when 23:16 == PRID_COMP_INGENIC_*
@@ -237,9 +242,10 @@
#define PRID_REV_LOONGSON1B 0x0020
#define PRID_REV_LOONGSON2E 0x0002
#define PRID_REV_LOONGSON2F 0x0003
-#define PRID_REV_LOONGSON3A 0x0005
+#define PRID_REV_LOONGSON3A_R1 0x0005
#define PRID_REV_LOONGSON3B_R1 0x0006
#define PRID_REV_LOONGSON3B_R2 0x0007
+#define PRID_REV_LOONGSON3A_R2 0x0008
/*
* Older processors used to encode processor version and revision in two
@@ -307,8 +313,8 @@ enum cpu_type_enum {
CPU_4KC, CPU_4KEC, CPU_4KSC, CPU_24K, CPU_34K, CPU_1004K, CPU_74K,
CPU_ALCHEMY, CPU_PR4450, CPU_BMIPS32, CPU_BMIPS3300, CPU_BMIPS4350,
CPU_BMIPS4380, CPU_BMIPS5000, CPU_JZRISC, CPU_LOONGSON1, CPU_M14KC,
- CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K, CPU_M5150,
- CPU_I6400,
+ CPU_M14KEC, CPU_INTERAPTIV, CPU_P5600, CPU_PROAPTIV, CPU_1074K,
+ CPU_M5150, CPU_I6400, CPU_P6600, CPU_M6250,
/*
* MIPS64 class processors
@@ -346,48 +352,68 @@ enum cpu_type_enum {
MIPS_CPU_ISA_M64R6)
/*
+ * Private version of BIT_ULL() to escape include file recursion hell.
+ * We soon will have to switch to another mechanism that will work with
+ * more than 64 bits anyway.
+ */
+#define MBIT_ULL(bit) (1ULL << (bit))
+
+/*
* CPU Option encodings
*/
-#define MIPS_CPU_TLB 0x00000001ull /* CPU has TLB */
-#define MIPS_CPU_4KEX 0x00000002ull /* "R4K" exception model */
-#define MIPS_CPU_3K_CACHE 0x00000004ull /* R3000-style caches */
-#define MIPS_CPU_4K_CACHE 0x00000008ull /* R4000-style caches */
-#define MIPS_CPU_TX39_CACHE 0x00000010ull /* TX3900-style caches */
-#define MIPS_CPU_FPU 0x00000020ull /* CPU has FPU */
-#define MIPS_CPU_32FPR 0x00000040ull /* 32 dbl. prec. FP registers */
-#define MIPS_CPU_COUNTER 0x00000080ull /* Cycle count/compare */
-#define MIPS_CPU_WATCH 0x00000100ull /* watchpoint registers */
-#define MIPS_CPU_DIVEC 0x00000200ull /* dedicated interrupt vector */
-#define MIPS_CPU_VCE 0x00000400ull /* virt. coherence conflict possible */
-#define MIPS_CPU_CACHE_CDEX_P 0x00000800ull /* Create_Dirty_Exclusive CACHE op */
-#define MIPS_CPU_CACHE_CDEX_S 0x00001000ull /* ... same for seconary cache ... */
-#define MIPS_CPU_MCHECK 0x00002000ull /* Machine check exception */
-#define MIPS_CPU_EJTAG 0x00004000ull /* EJTAG exception */
-#define MIPS_CPU_NOFPUEX 0x00008000ull /* no FPU exception */
-#define MIPS_CPU_LLSC 0x00010000ull /* CPU has ll/sc instructions */
-#define MIPS_CPU_INCLUSIVE_CACHES 0x00020000ull /* P-cache subset enforced */
-#define MIPS_CPU_PREFETCH 0x00040000ull /* CPU has usable prefetch */
-#define MIPS_CPU_VINT 0x00080000ull /* CPU supports MIPSR2 vectored interrupts */
-#define MIPS_CPU_VEIC 0x00100000ull /* CPU supports MIPSR2 external interrupt controller mode */
-#define MIPS_CPU_ULRI 0x00200000ull /* CPU has ULRI feature */
-#define MIPS_CPU_PCI 0x00400000ull /* CPU has Perf Ctr Int indicator */
-#define MIPS_CPU_RIXI 0x00800000ull /* CPU has TLB Read/eXec Inhibit */
-#define MIPS_CPU_MICROMIPS 0x01000000ull /* CPU has microMIPS capability */
-#define MIPS_CPU_TLBINV 0x02000000ull /* CPU supports TLBINV/F */
-#define MIPS_CPU_SEGMENTS 0x04000000ull /* CPU supports Segmentation Control registers */
-#define MIPS_CPU_EVA 0x80000000ull /* CPU supports Enhanced Virtual Addressing */
-#define MIPS_CPU_HTW 0x100000000ull /* CPU support Hardware Page Table Walker */
-#define MIPS_CPU_RIXIEX 0x200000000ull /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
-#define MIPS_CPU_MAAR 0x400000000ull /* MAAR(I) registers are present */
-#define MIPS_CPU_FRE 0x800000000ull /* FRE & UFE bits implemented */
-#define MIPS_CPU_RW_LLB 0x1000000000ull /* LLADDR/LLB writes are allowed */
-#define MIPS_CPU_XPA 0x2000000000ull /* CPU supports Extended Physical Addressing */
-#define MIPS_CPU_CDMM 0x4000000000ull /* CPU has Common Device Memory Map */
-#define MIPS_CPU_BP_GHIST 0x8000000000ull /* R12K+ Branch Prediction Global History */
-#define MIPS_CPU_SP 0x10000000000ull /* Small (1KB) page support */
-#define MIPS_CPU_FTLB 0x20000000000ull /* CPU has Fixed-page-size TLB */
-#define MIPS_CPU_NAN_LEGACY 0x40000000000ull /* Legacy NaN implemented */
-#define MIPS_CPU_NAN_2008 0x80000000000ull /* 2008 NaN implemented */
+#define MIPS_CPU_TLB MBIT_ULL( 0) /* CPU has TLB */
+#define MIPS_CPU_4KEX MBIT_ULL( 1) /* "R4K" exception model */
+#define MIPS_CPU_3K_CACHE MBIT_ULL( 2) /* R3000-style caches */
+#define MIPS_CPU_4K_CACHE MBIT_ULL( 3) /* R4000-style caches */
+#define MIPS_CPU_TX39_CACHE MBIT_ULL( 4) /* TX3900-style caches */
+#define MIPS_CPU_FPU MBIT_ULL( 5) /* CPU has FPU */
+#define MIPS_CPU_32FPR MBIT_ULL( 6) /* 32 dbl. prec. FP registers */
+#define MIPS_CPU_COUNTER MBIT_ULL( 7) /* Cycle count/compare */
+#define MIPS_CPU_WATCH MBIT_ULL( 8) /* watchpoint registers */
+#define MIPS_CPU_DIVEC MBIT_ULL( 9) /* dedicated interrupt vector */
+#define MIPS_CPU_VCE MBIT_ULL(10) /* virt. coherence conflict possible */
+#define MIPS_CPU_CACHE_CDEX_P MBIT_ULL(11) /* Create_Dirty_Exclusive CACHE op */
+#define MIPS_CPU_CACHE_CDEX_S MBIT_ULL(12) /* ... same for seconary cache ... */
+#define MIPS_CPU_MCHECK MBIT_ULL(13) /* Machine check exception */
+#define MIPS_CPU_EJTAG MBIT_ULL(14) /* EJTAG exception */
+#define MIPS_CPU_NOFPUEX MBIT_ULL(15) /* no FPU exception */
+#define MIPS_CPU_LLSC MBIT_ULL(16) /* CPU has ll/sc instructions */
+#define MIPS_CPU_INCLUSIVE_CACHES MBIT_ULL(17) /* P-cache subset enforced */
+#define MIPS_CPU_PREFETCH MBIT_ULL(18) /* CPU has usable prefetch */
+#define MIPS_CPU_VINT MBIT_ULL(19) /* CPU supports MIPSR2 vectored interrupts */
+#define MIPS_CPU_VEIC MBIT_ULL(20) /* CPU supports MIPSR2 external interrupt controller mode */
+#define MIPS_CPU_ULRI MBIT_ULL(21) /* CPU has ULRI feature */
+#define MIPS_CPU_PCI MBIT_ULL(22) /* CPU has Perf Ctr Int indicator */
+#define MIPS_CPU_RIXI MBIT_ULL(23) /* CPU has TLB Read/eXec Inhibit */
+#define MIPS_CPU_MICROMIPS MBIT_ULL(24) /* CPU has microMIPS capability */
+#define MIPS_CPU_TLBINV MBIT_ULL(25) /* CPU supports TLBINV/F */
+#define MIPS_CPU_SEGMENTS MBIT_ULL(26) /* CPU supports Segmentation Control registers */
+#define MIPS_CPU_EVA MBIT_ULL(27) /* CPU supports Enhanced Virtual Addressing */
+#define MIPS_CPU_HTW MBIT_ULL(28) /* CPU support Hardware Page Table Walker */
+#define MIPS_CPU_RIXIEX MBIT_ULL(29) /* CPU has unique exception codes for {Read, Execute}-Inhibit exceptions */
+#define MIPS_CPU_MAAR MBIT_ULL(30) /* MAAR(I) registers are present */
+#define MIPS_CPU_FRE MBIT_ULL(31) /* FRE & UFE bits implemented */
+#define MIPS_CPU_RW_LLB MBIT_ULL(32) /* LLADDR/LLB writes are allowed */
+#define MIPS_CPU_LPA MBIT_ULL(33) /* CPU supports Large Physical Addressing */
+#define MIPS_CPU_CDMM MBIT_ULL(34) /* CPU has Common Device Memory Map */
+#define MIPS_CPU_BP_GHIST MBIT_ULL(35) /* R12K+ Branch Prediction Global History */
+#define MIPS_CPU_SP MBIT_ULL(36) /* Small (1KB) page support */
+#define MIPS_CPU_FTLB MBIT_ULL(37) /* CPU has Fixed-page-size TLB */
+#define MIPS_CPU_NAN_LEGACY MBIT_ULL(38) /* Legacy NaN implemented */
+#define MIPS_CPU_NAN_2008 MBIT_ULL(39) /* 2008 NaN implemented */
+#define MIPS_CPU_VP MBIT_ULL(40) /* MIPSr6 Virtual Processors (multi-threading) */
+#define MIPS_CPU_LDPTE MBIT_ULL(41) /* CPU has ldpte/lddir instructions */
+#define MIPS_CPU_MVH MBIT_ULL(42) /* CPU supports MFHC0/MTHC0 */
+#define MIPS_CPU_EBASE_WG MBIT_ULL(43) /* CPU has EBase.WG */
+#define MIPS_CPU_BADINSTR MBIT_ULL(44) /* CPU has BadInstr register */
+#define MIPS_CPU_BADINSTRP MBIT_ULL(45) /* CPU has BadInstrP register */
+#define MIPS_CPU_CTXTC MBIT_ULL(46) /* CPU has [X]ConfigContext registers */
+#define MIPS_CPU_PERF MBIT_ULL(47) /* CPU has MIPS performance counters */
+#define MIPS_CPU_GUESTCTL0EXT MBIT_ULL(48) /* CPU has VZ GuestCtl0Ext register */
+#define MIPS_CPU_GUESTCTL1 MBIT_ULL(49) /* CPU has VZ GuestCtl1 register */
+#define MIPS_CPU_GUESTCTL2 MBIT_ULL(50) /* CPU has VZ GuestCtl2 register */
+#define MIPS_CPU_GUESTID MBIT_ULL(51) /* CPU uses VZ ASE GuestID feature */
+#define MIPS_CPU_DRG MBIT_ULL(52) /* CPU has VZ Direct Root to Guest (DRG) */
/*
* CPU ASE encodings
@@ -401,5 +427,6 @@ enum cpu_type_enum {
#define MIPS_ASE_DSP2P 0x00000040 /* Signal Processing ASE Rev 2 */
#define MIPS_ASE_VZ 0x00000080 /* Virtualization ASE */
#define MIPS_ASE_MSA 0x00000100 /* MIPS SIMD Architecture */
+#define MIPS_ASE_DSP3 0x00000200 /* Signal Processing ASE Rev 3*/
#endif /* _ASM_CPU_H */
diff --git a/arch/mips/include/asm/dsemul.h b/arch/mips/include/asm/dsemul.h
new file mode 100644
index 000000000000..a6e067801f23
--- /dev/null
+++ b/arch/mips/include/asm/dsemul.h
@@ -0,0 +1,92 @@
+/*
+ * Copyright (C) 2016 Imagination Technologies
+ * Author: Paul Burton <paul.burton@imgtec.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __MIPS_ASM_DSEMUL_H__
+#define __MIPS_ASM_DSEMUL_H__
+
+#include <asm/break.h>
+#include <asm/inst.h>
+
+/* Break instruction with special math emu break code set */
+#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
+
+/* When used as a frame index, indicates the lack of a frame */
+#define BD_EMUFRAME_NONE ((int)BIT(31))
+
+struct mm_struct;
+struct pt_regs;
+struct task_struct;
+
+/**
+ * mips_dsemul() - 'Emulate' an instruction from a branch delay slot
+ * @regs: User thread register context.
+ * @ir: The instruction to be 'emulated'.
+ * @branch_pc: The PC of the branch instruction.
+ * @cont_pc: The PC to continue at following 'emulation'.
+ *
+ * Emulate or execute an arbitrary MIPS instruction within the context of
+ * the current user thread. This is used primarily to handle instructions
+ * in the delay slots of emulated branch instructions, for example FP
+ * branch instructions on systems without an FPU.
+ *
+ * Return: Zero on success, negative if ir is a NOP, signal number on failure.
+ */
+extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc);
+
+/**
+ * do_dsemulret() - Return from a delay slot 'emulation' frame
+ * @xcp: User thread register context.
+ *
+ * Call in response to the BRK_MEMU break instruction used to return to
+ * the kernel from branch delay slot 'emulation' frames following a call
+ * to mips_dsemul(). Restores the user thread PC to the value that was
+ * passed as the cpc parameter to mips_dsemul().
+ *
+ * Return: True if an emulation frame was returned from, else false.
+ */
+extern bool do_dsemulret(struct pt_regs *xcp);
+
+/**
+ * dsemul_thread_cleanup() - Cleanup thread 'emulation' frame
+ * @tsk: The task structure associated with the thread
+ *
+ * If the thread @tsk has a branch delay slot 'emulation' frame
+ * allocated to it then free that frame.
+ *
+ * Return: True if a frame was freed, else false.
+ */
+extern bool dsemul_thread_cleanup(struct task_struct *tsk);
+
+/**
+ * dsemul_thread_rollback() - Rollback from an 'emulation' frame
+ * @regs: User thread register context.
+ *
+ * If the current thread, whose register context is represented by @regs,
+ * is executing within a delay slot 'emulation' frame then exit that
+ * frame. The PC will be rolled back to the branch if the instruction
+ * that was being 'emulated' has not yet executed, or advanced to the
+ * continuation PC if it has.
+ *
+ * Return: True if a frame was exited, else false.
+ */
+extern bool dsemul_thread_rollback(struct pt_regs *regs);
+
+/**
+ * dsemul_mm_cleanup() - Cleanup per-mm delay slot 'emulation' state
+ * @mm: The struct mm_struct to cleanup state for.
+ *
+ * Cleanup state for the given @mm, ensuring that any memory allocated
+ * for delay slot 'emulation' book-keeping is freed. This is to be called
+ * before @mm is freed in order to avoid memory leaks.
+ */
+extern void dsemul_mm_cleanup(struct mm_struct *mm);
+
+#endif /* __MIPS_ASM_DSEMUL_H__ */
diff --git a/arch/mips/include/asm/elf.h b/arch/mips/include/asm/elf.h
index e090fc388e02..2b3dc2973670 100644
--- a/arch/mips/include/asm/elf.h
+++ b/arch/mips/include/asm/elf.h
@@ -111,6 +111,11 @@
#define R_MIPS_CALLHI16 30
#define R_MIPS_CALLLO16 31
/*
+ * Introduced for MIPSr6.
+ */
+#define R_MIPS_PC21_S2 60
+#define R_MIPS_PC26_S2 61
+/*
* This range is reserved for vendor specific relocations.
*/
#define R_MIPS_LOVENDOR 100
@@ -170,16 +175,14 @@
#define SHF_MIPS_NAMES 0x02000000
#define SHF_MIPS_NODUPES 0x01000000
-#ifndef ELF_ARCH
-/* ELF register definitions */
-#define ELF_NGREG 45
-#define ELF_NFPREG 33
-
-typedef unsigned long elf_greg_t;
-typedef elf_greg_t elf_gregset_t[ELF_NGREG];
-
-typedef double elf_fpreg_t;
-typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
+#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
+#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
+#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
+#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
+#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_XX 5 /* -mfpxx */
+#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
+#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
struct mips_elf_abiflags_v0 {
uint16_t version; /* Version of flags structure */
@@ -196,16 +199,54 @@ struct mips_elf_abiflags_v0 {
uint32_t flags2;
};
-#define MIPS_ABI_FP_ANY 0 /* FP ABI doesn't matter */
-#define MIPS_ABI_FP_DOUBLE 1 /* -mdouble-float */
-#define MIPS_ABI_FP_SINGLE 2 /* -msingle-float */
-#define MIPS_ABI_FP_SOFT 3 /* -msoft-float */
-#define MIPS_ABI_FP_OLD_64 4 /* -mips32r2 -mfp64 */
-#define MIPS_ABI_FP_XX 5 /* -mfpxx */
-#define MIPS_ABI_FP_64 6 /* -mips32r2 -mfp64 */
-#define MIPS_ABI_FP_64A 7 /* -mips32r2 -mfp64 -mno-odd-spreg */
+#ifndef ELF_ARCH
+/* ELF register definitions */
+#define ELF_NGREG 45
+#define ELF_NFPREG 33
+
+typedef unsigned long elf_greg_t;
+typedef elf_greg_t elf_gregset_t[ELF_NGREG];
+
+typedef double elf_fpreg_t;
+typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
#ifdef CONFIG_32BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfo32_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS32
+
+#endif /* CONFIG_32BIT */
+
+#ifdef CONFIG_64BIT
+/*
+ * This is used to ensure we don't load something for the wrong architecture.
+ */
+#define elf_check_arch elfn64_check_arch
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#define ELF_CLASS ELFCLASS64
+
+#endif /* CONFIG_64BIT */
+
+/*
+ * These are used to set parameters in the core dumps.
+ */
+#ifdef __MIPSEB__
+#define ELF_DATA ELFDATA2MSB
+#elif defined(__MIPSEL__)
+#define ELF_DATA ELFDATA2LSB
+#endif
+#define ELF_ARCH EM_MIPS
+
+#endif /* !defined(ELF_ARCH) */
/*
* In order to be sure that we don't attempt to execute an O32 binary which
@@ -219,10 +260,15 @@ struct mips_elf_abiflags_v0 {
# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
#endif
+#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
+
+#define vmcore_elf32_check_arch mips_elf_check_machine
+#define vmcore_elf64_check_arch mips_elf_check_machine
+
/*
- * This is used to ensure we don't load something for the wrong architecture.
+ * Return non-zero if HDR identifies an o32 ELF binary.
*/
-#define elf_check_arch(hdr) \
+#define elfo32_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
@@ -243,17 +289,9 @@ struct mips_elf_abiflags_v0 {
})
/*
- * These are used to set parameters in the core dumps.
- */
-#define ELF_CLASS ELFCLASS32
-
-#endif /* CONFIG_32BIT */
-
-#ifdef CONFIG_64BIT
-/*
- * This is used to ensure we don't load something for the wrong architecture.
+ * Return non-zero if HDR identifies an n64 ELF binary.
*/
-#define elf_check_arch(hdr) \
+#define elfn64_check_arch(hdr) \
({ \
int __res = 1; \
struct elfhdr *__h = (hdr); \
@@ -267,28 +305,23 @@ struct mips_elf_abiflags_v0 {
})
/*
- * These are used to set parameters in the core dumps.
+ * Return non-zero if HDR identifies an n32 ELF binary.
*/
-#define ELF_CLASS ELFCLASS64
-
-#endif /* CONFIG_64BIT */
-
-/*
- * These are used to set parameters in the core dumps.
- */
-#ifdef __MIPSEB__
-#define ELF_DATA ELFDATA2MSB
-#elif defined(__MIPSEL__)
-#define ELF_DATA ELFDATA2LSB
-#endif
-#define ELF_ARCH EM_MIPS
-
-#endif /* !defined(ELF_ARCH) */
-
-#define mips_elf_check_machine(x) ((x)->e_machine == EM_MIPS)
-
-#define vmcore_elf32_check_arch mips_elf_check_machine
-#define vmcore_elf64_check_arch mips_elf_check_machine
+#define elfn32_check_arch(hdr) \
+({ \
+ int __res = 1; \
+ struct elfhdr *__h = (hdr); \
+ \
+ if (!mips_elf_check_machine(__h)) \
+ __res = 0; \
+ if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
+ __res = 0; \
+ if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
+ ((__h->e_flags & EF_MIPS_ABI) != 0)) \
+ __res = 0; \
+ \
+ __res; \
+})
struct mips_abi;
@@ -300,17 +333,16 @@ extern struct mips_abi mips_abi_n32;
#define SET_PERSONALITY2(ex, state) \
do { \
- if (personality(current->personality) != PER_LINUX) \
- set_personality(PER_LINUX); \
- \
clear_thread_flag(TIF_HYBRID_FPREGS); \
set_thread_flag(TIF_32BIT_FPREGS); \
\
- mips_set_personality_fp(state); \
- \
current->thread.abi = &mips_abi; \
\
+ mips_set_personality_fp(state); \
mips_set_personality_nan(state); \
+ \
+ if (personality(current->personality) != PER_LINUX) \
+ set_personality(PER_LINUX); \
} while (0)
#endif /* CONFIG_32BIT */
@@ -321,6 +353,7 @@ do { \
#define __SET_PERSONALITY32_N32() \
do { \
set_thread_flag(TIF_32BIT_ADDR); \
+ \
current->thread.abi = &mips_abi_n32; \
} while (0)
#else
@@ -336,9 +369,9 @@ do { \
clear_thread_flag(TIF_HYBRID_FPREGS); \
set_thread_flag(TIF_32BIT_FPREGS); \
\
- mips_set_personality_fp(state); \
- \
current->thread.abi = &mips_abi_32; \
+ \
+ mips_set_personality_fp(state); \
} while (0)
#else
#define __SET_PERSONALITY32_O32(ex, state) \
@@ -425,6 +458,7 @@ extern const char *__elf_platform;
#define ELF_ET_DYN_BASE (TASK_SIZE / 3 * 2)
#endif
+/* update AT_VECTOR_SIZE_ARCH if the number of NEW_AUX_ENT entries changes */
#define ARCH_DLINFO \
do { \
NEW_AUX_ENT(AT_SYSINFO_EHDR, \
@@ -465,4 +499,7 @@ extern int arch_check_elf(void *ehdr, bool has_interpreter, void *interp_ehdr,
extern void mips_set_personality_nan(struct arch_elf_state *state);
extern void mips_set_personality_fp(struct arch_elf_state *state);
+#define elf_read_implies_exec(ex, stk) mips_elf_read_implies_exec(&(ex), stk)
+extern int mips_elf_read_implies_exec(void *elf_ex, int exstack);
+
#endif /* _ASM_ELF_H */
diff --git a/arch/mips/include/asm/fpu_emulator.h b/arch/mips/include/asm/fpu_emulator.h
index 3225c3c0724b..355dc25172e7 100644
--- a/arch/mips/include/asm/fpu_emulator.h
+++ b/arch/mips/include/asm/fpu_emulator.h
@@ -24,7 +24,7 @@
#define _ASM_FPU_EMULATOR_H
#include <linux/sched.h>
-#include <asm/break.h>
+#include <asm/dsemul.h>
#include <asm/thread_info.h>
#include <asm/inst.h>
#include <asm/local.h>
@@ -60,27 +60,16 @@ do { \
#define MIPS_FPU_EMU_INC_STATS(M) do { } while (0)
#endif /* CONFIG_DEBUG_FS */
-extern int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
- unsigned long cpc);
-extern int do_dsemulret(struct pt_regs *xcp);
extern int fpu_emulator_cop1Handler(struct pt_regs *xcp,
struct mips_fpu_struct *ctx, int has_fpu,
void *__user *fault_addr);
int process_fpemu_return(int sig, void __user *fault_addr,
unsigned long fcr31);
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc);
int mm_isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
unsigned long *contpc);
-/*
- * Instruction inserted following the badinst to further tag the sequence
- */
-#define BD_COOKIE 0x0000bd36 /* tne $0, $0 with baggage */
-
-/*
- * Break instruction with special math emu break code set
- */
-#define BREAK_MATH(micromips) (((micromips) ? 0x7 : 0xd) | (BRK_MEMU << 16))
-
#define SIGNALLING_NAN 0x7ff800007ff80000LL
static inline void fpu_emulator_init_fpu(void)
diff --git a/arch/mips/include/asm/hazards.h b/arch/mips/include/asm/hazards.h
index 7b99efd31074..e0fecf206f2c 100644
--- a/arch/mips/include/asm/hazards.h
+++ b/arch/mips/include/asm/hazards.h
@@ -22,7 +22,8 @@
/*
* TLB hazards
*/
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) && !defined(CONFIG_CPU_CAVIUM_OCTEON)
+#if (defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)) && \
+ !defined(CONFIG_CPU_CAVIUM_OCTEON) && !defined(CONFIG_LOONGSON3_ENHANCEMENT)
/*
* MIPSR2 defines ehb for hazard avoidance
@@ -57,8 +58,8 @@
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
*/
#define instruction_hazard() \
do { \
@@ -132,8 +133,8 @@ do { \
* address of a label as argument to inline assembler. Gas otoh has the
* annoying difference between la and dla which are only usable for 32-bit
* rsp. 64-bit code, so can't be used without conditional compilation.
- * The alterantive is switching the assembler to 64-bit code which happens
- * to work right even for 32-bit code ...
+ * The alternative is switching the assembler to 64-bit code which happens
+ * to work right even for 32-bit code...
*/
#define __instruction_hazard() \
do { \
@@ -155,8 +156,8 @@ do { \
} while (0)
#elif defined(CONFIG_MIPS_ALCHEMY) || defined(CONFIG_CPU_CAVIUM_OCTEON) || \
- defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_CPU_R10000) || \
- defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
+ defined(CONFIG_CPU_LOONGSON2) || defined(CONFIG_LOONGSON3_ENHANCEMENT) || \
+ defined(CONFIG_CPU_R10000) || defined(CONFIG_CPU_R5500) || defined(CONFIG_CPU_XLR)
/*
* R10000 rocks - all hazards handled in hardware, so this becomes a nobrainer.
diff --git a/arch/mips/include/asm/highmem.h b/arch/mips/include/asm/highmem.h
index 01880b34a209..64f2500d891b 100644
--- a/arch/mips/include/asm/highmem.h
+++ b/arch/mips/include/asm/highmem.h
@@ -19,8 +19,10 @@
#ifdef __KERNEL__
+#include <linux/bug.h>
#include <linux/interrupt.h>
#include <linux/uaccess.h>
+#include <asm/cpu-features.h>
#include <asm/kmap_types.h>
/* undef for production */
@@ -50,7 +52,7 @@ extern void *kmap_atomic(struct page *page);
extern void __kunmap_atomic(void *kvaddr);
extern void *kmap_atomic_pfn(unsigned long pfn);
-#define flush_cache_kmaps() flush_cache_all()
+#define flush_cache_kmaps() BUG_ON(cpu_has_dc_aliases)
extern void kmap_init(void);
diff --git a/arch/mips/include/asm/io.h b/arch/mips/include/asm/io.h
index 2b4dc7ad53b8..ecabc00c1e66 100644
--- a/arch/mips/include/asm/io.h
+++ b/arch/mips/include/asm/io.h
@@ -304,10 +304,10 @@ static inline void iounmap(const volatile void __iomem *addr)
#undef __IS_KSEG1
}
-#ifdef CONFIG_CPU_CAVIUM_OCTEON
-#define war_octeon_io_reorder_wmb() wmb()
+#if defined(CONFIG_CPU_CAVIUM_OCTEON) || defined(CONFIG_LOONGSON3_ENHANCEMENT)
+#define war_io_reorder_wmb() wmb()
#else
-#define war_octeon_io_reorder_wmb() do { } while (0)
+#define war_io_reorder_wmb() do { } while (0)
#endif
#define __BUILD_MEMORY_SINGLE(pfx, bwlq, type, irq) \
@@ -318,7 +318,7 @@ static inline void pfx##write##bwlq(type val, \
volatile type *__mem; \
type __val; \
\
- war_octeon_io_reorder_wmb(); \
+ war_io_reorder_wmb(); \
\
__mem = (void *)__swizzle_addr_##bwlq((unsigned long)(mem)); \
\
@@ -387,7 +387,7 @@ static inline void pfx##out##bwlq##p(type val, unsigned long port) \
volatile type *__addr; \
type __val; \
\
- war_octeon_io_reorder_wmb(); \
+ war_io_reorder_wmb(); \
\
__addr = (void *)__swizzle_addr_##bwlq(mips_io_port_base + port); \
\
diff --git a/arch/mips/include/asm/irq_regs.h b/arch/mips/include/asm/irq_regs.h
index 33bd2a06de57..8c48d6dd1d78 100644
--- a/arch/mips/include/asm/irq_regs.h
+++ b/arch/mips/include/asm/irq_regs.h
@@ -18,4 +18,14 @@ static inline struct pt_regs *get_irq_regs(void)
return current_thread_info()->regs;
}
+static inline struct pt_regs *set_irq_regs(struct pt_regs *new_regs)
+{
+ struct pt_regs *old_regs;
+
+ old_regs = get_irq_regs();
+ current_thread_info()->regs = new_regs;
+
+ return old_regs;
+}
+
#endif /* __ASM_IRQ_REGS_H */
diff --git a/arch/mips/include/asm/irqflags.h b/arch/mips/include/asm/irqflags.h
index 65c351e328cc..9d3610be2323 100644
--- a/arch/mips/include/asm/irqflags.h
+++ b/arch/mips/include/asm/irqflags.h
@@ -41,7 +41,12 @@ static inline unsigned long arch_local_irq_save(void)
" .set push \n"
" .set reorder \n"
" .set noat \n"
+#if defined(CONFIG_CPU_LOONGSON3)
+ " mfc0 %[flags], $12 \n"
+ " di \n"
+#else
" di %[flags] \n"
+#endif
" andi %[flags], 1 \n"
" " __stringify(__irq_disable_hazard) " \n"
" .set pop \n"
diff --git a/arch/mips/include/asm/kvm_host.h b/arch/mips/include/asm/kvm_host.h
index f6b12790716c..b54bcadd8aec 100644
--- a/arch/mips/include/asm/kvm_host.h
+++ b/arch/mips/include/asm/kvm_host.h
@@ -19,6 +19,9 @@
#include <linux/threads.h>
#include <linux/spinlock.h>
+#include <asm/inst.h>
+#include <asm/mipsregs.h>
+
/* MIPS KVM register ids */
#define MIPS_CP0_32(_R, _S) \
(KVM_REG_MIPS_CP0 | KVM_REG_SIZE_U32 | (8 * (_R) + (_S)))
@@ -53,6 +56,12 @@
#define KVM_REG_MIPS_CP0_CONFIG7 MIPS_CP0_32(16, 7)
#define KVM_REG_MIPS_CP0_XCONTEXT MIPS_CP0_64(20, 0)
#define KVM_REG_MIPS_CP0_ERROREPC MIPS_CP0_64(30, 0)
+#define KVM_REG_MIPS_CP0_KSCRATCH1 MIPS_CP0_64(31, 2)
+#define KVM_REG_MIPS_CP0_KSCRATCH2 MIPS_CP0_64(31, 3)
+#define KVM_REG_MIPS_CP0_KSCRATCH3 MIPS_CP0_64(31, 4)
+#define KVM_REG_MIPS_CP0_KSCRATCH4 MIPS_CP0_64(31, 5)
+#define KVM_REG_MIPS_CP0_KSCRATCH5 MIPS_CP0_64(31, 6)
+#define KVM_REG_MIPS_CP0_KSCRATCH6 MIPS_CP0_64(31, 7)
#define KVM_MAX_VCPUS 1
@@ -65,8 +74,14 @@
-/* Special address that contains the comm page, used for reducing # of traps */
-#define KVM_GUEST_COMMPAGE_ADDR 0x0
+/*
+ * Special address that contains the comm page, used for reducing # of traps
+ * This needs to be within 32Kb of 0x0 (so the zero register can be used), but
+ * preferably not at 0x0 so that most kernel NULL pointer dereferences can be
+ * caught.
+ */
+#define KVM_GUEST_COMMPAGE_ADDR ((PAGE_SIZE > 0x8000) ? 0 : \
+ (0x8000 - PAGE_SIZE))
#define KVM_GUEST_KERNEL_MODE(vcpu) ((kvm_read_c0_guest_status(vcpu->arch.cop0) & (ST0_EXL | ST0_ERL)) || \
((kvm_read_c0_guest_status(vcpu->arch.cop0) & KSU_USER) == 0))
@@ -74,7 +89,7 @@
#define KVM_GUEST_KUSEG 0x00000000UL
#define KVM_GUEST_KSEG0 0x40000000UL
#define KVM_GUEST_KSEG23 0x60000000UL
-#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0x60000000)
+#define KVM_GUEST_KSEGX(a) ((_ACAST32_(a)) & 0xe0000000)
#define KVM_GUEST_CPHYSADDR(a) ((_ACAST32_(a)) & 0x1fffffff)
#define KVM_GUEST_CKSEG0ADDR(a) (KVM_GUEST_CPHYSADDR(a) | KVM_GUEST_KSEG0)
@@ -93,9 +108,6 @@
#define KVM_INVALID_ADDR 0xdeadbeef
extern atomic_t kvm_mips_instance;
-extern kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-extern void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-extern bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
struct kvm_vm_stat {
u32 remote_tlb_flush;
@@ -122,31 +134,10 @@ struct kvm_vcpu_stat {
u32 flush_dcache_exits;
u32 halt_successful_poll;
u32 halt_attempted_poll;
+ u32 halt_poll_invalid;
u32 halt_wakeup;
};
-enum kvm_mips_exit_types {
- WAIT_EXITS,
- CACHE_EXITS,
- SIGNAL_EXITS,
- INT_EXITS,
- COP_UNUSABLE_EXITS,
- TLBMOD_EXITS,
- TLBMISS_LD_EXITS,
- TLBMISS_ST_EXITS,
- ADDRERR_ST_EXITS,
- ADDRERR_LD_EXITS,
- SYSCALL_EXITS,
- RESVD_INST_EXITS,
- BREAK_INST_EXITS,
- TRAP_INST_EXITS,
- MSA_FPE_EXITS,
- FPE_EXITS,
- MSA_DISABLED_EXITS,
- FLUSH_DCACHE_EXITS,
- MAX_KVM_MIPS_EXIT_TYPES
-};
-
struct kvm_arch_memory_slot {
};
@@ -214,73 +205,6 @@ struct mips_coproc {
#define MIPS_CP0_CONFIG4_SEL 4
#define MIPS_CP0_CONFIG5_SEL 5
-/* Config0 register bits */
-#define CP0C0_M 31
-#define CP0C0_K23 28
-#define CP0C0_KU 25
-#define CP0C0_MDU 20
-#define CP0C0_MM 17
-#define CP0C0_BM 16
-#define CP0C0_BE 15
-#define CP0C0_AT 13
-#define CP0C0_AR 10
-#define CP0C0_MT 7
-#define CP0C0_VI 3
-#define CP0C0_K0 0
-
-/* Config1 register bits */
-#define CP0C1_M 31
-#define CP0C1_MMU 25
-#define CP0C1_IS 22
-#define CP0C1_IL 19
-#define CP0C1_IA 16
-#define CP0C1_DS 13
-#define CP0C1_DL 10
-#define CP0C1_DA 7
-#define CP0C1_C2 6
-#define CP0C1_MD 5
-#define CP0C1_PC 4
-#define CP0C1_WR 3
-#define CP0C1_CA 2
-#define CP0C1_EP 1
-#define CP0C1_FP 0
-
-/* Config2 Register bits */
-#define CP0C2_M 31
-#define CP0C2_TU 28
-#define CP0C2_TS 24
-#define CP0C2_TL 20
-#define CP0C2_TA 16
-#define CP0C2_SU 12
-#define CP0C2_SS 8
-#define CP0C2_SL 4
-#define CP0C2_SA 0
-
-/* Config3 Register bits */
-#define CP0C3_M 31
-#define CP0C3_ISA_ON_EXC 16
-#define CP0C3_ULRI 13
-#define CP0C3_DSPP 10
-#define CP0C3_LPA 7
-#define CP0C3_VEIC 6
-#define CP0C3_VInt 5
-#define CP0C3_SP 4
-#define CP0C3_MT 2
-#define CP0C3_SM 1
-#define CP0C3_TL 0
-
-/* MMU types, the first four entries have the same layout as the
- CP0C0_MT field. */
-enum mips_mmu_types {
- MMU_TYPE_NONE,
- MMU_TYPE_R4000,
- MMU_TYPE_RESERVED,
- MMU_TYPE_FMT,
- MMU_TYPE_R3000,
- MMU_TYPE_R6000,
- MMU_TYPE_R8000
-};
-
/* Resume Flags */
#define RESUME_FLAG_DR (1<<0) /* Reload guest nonvolatile state? */
#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
@@ -297,11 +221,6 @@ enum emulation_result {
EMULATE_PRIV_FAIL,
};
-#define MIPS3_PG_G 0x00000001 /* Global; ignore ASID if in lo0 & lo1 */
-#define MIPS3_PG_V 0x00000002 /* Valid */
-#define MIPS3_PG_NV 0x00000000
-#define MIPS3_PG_D 0x00000004 /* Dirty */
-
#define mips3_paddr_to_tlbpfn(x) \
(((unsigned long)(x) >> MIPS3_PG_SHIFT) & MIPS3_PG_FRAME)
#define mips3_tlbpfn_to_paddr(x) \
@@ -311,40 +230,37 @@ enum emulation_result {
#define MIPS3_PG_FRAME 0x3fffffc0
#define VPN2_MASK 0xffffe000
-#define TLB_IS_GLOBAL(x) (((x).tlb_lo0 & MIPS3_PG_G) && \
- ((x).tlb_lo1 & MIPS3_PG_G))
+#define KVM_ENTRYHI_ASID MIPS_ENTRYHI_ASID
+#define TLB_IS_GLOBAL(x) ((x).tlb_lo[0] & (x).tlb_lo[1] & ENTRYLO_G)
#define TLB_VPN2(x) ((x).tlb_hi & VPN2_MASK)
-#define TLB_ASID(x) ((x).tlb_hi & ASID_MASK)
-#define TLB_IS_VALID(x, va) (((va) & (1 << PAGE_SHIFT)) \
- ? ((x).tlb_lo1 & MIPS3_PG_V) \
- : ((x).tlb_lo0 & MIPS3_PG_V))
+#define TLB_ASID(x) ((x).tlb_hi & KVM_ENTRYHI_ASID)
+#define TLB_LO_IDX(x, va) (((va) >> PAGE_SHIFT) & 1)
+#define TLB_IS_VALID(x, va) ((x).tlb_lo[TLB_LO_IDX(x, va)] & ENTRYLO_V)
#define TLB_HI_VPN2_HIT(x, y) ((TLB_VPN2(x) & ~(x).tlb_mask) == \
((y) & VPN2_MASK & ~(x).tlb_mask))
#define TLB_HI_ASID_HIT(x, y) (TLB_IS_GLOBAL(x) || \
- TLB_ASID(x) == ((y) & ASID_MASK))
+ TLB_ASID(x) == ((y) & KVM_ENTRYHI_ASID))
struct kvm_mips_tlb {
long tlb_mask;
long tlb_hi;
- long tlb_lo0;
- long tlb_lo1;
+ long tlb_lo[2];
};
-#define KVM_MIPS_FPU_FPU 0x1
-#define KVM_MIPS_FPU_MSA 0x2
+#define KVM_MIPS_AUX_FPU 0x1
+#define KVM_MIPS_AUX_MSA 0x2
#define KVM_MIPS_GUEST_TLB_SIZE 64
struct kvm_vcpu_arch {
- void *host_ebase, *guest_ebase;
+ void *guest_ebase;
+ int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
unsigned long host_stack;
unsigned long host_gp;
/* Host CP0 registers used when handling exits from guest */
unsigned long host_cp0_badvaddr;
- unsigned long host_cp0_cause;
unsigned long host_cp0_epc;
- unsigned long host_cp0_entryhi;
- uint32_t guest_inst;
+ u32 host_cp0_cause;
/* GPRS */
unsigned long gprs[32];
@@ -354,8 +270,8 @@ struct kvm_vcpu_arch {
/* FPU State */
struct mips_fpu_struct fpu;
- /* Which FPU state is loaded (KVM_MIPS_FPU_*) */
- unsigned int fpu_inuse;
+ /* Which auxiliary state is loaded (KVM_MIPS_AUX_*) */
+ unsigned int aux_inuse;
/* COP0 State */
struct mips_coproc *cop0;
@@ -367,11 +283,11 @@ struct kvm_vcpu_arch {
struct hrtimer comparecount_timer;
/* Count timer control KVM register */
- uint32_t count_ctl;
+ u32 count_ctl;
/* Count bias from the raw time */
- uint32_t count_bias;
+ u32 count_bias;
/* Frequency of timer in Hz */
- uint32_t count_hz;
+ u32 count_hz;
/* Dynamic nanosecond bias (multiple of count_period) to avoid overflow */
s64 count_dyn_bias;
/* Resume time */
@@ -385,7 +301,7 @@ struct kvm_vcpu_arch {
/* Bitmask of pending exceptions to be cleared */
unsigned long pending_exceptions_clr;
- unsigned long pending_load_cause;
+ u32 pending_load_cause;
/* Save/Restore the entryhi register when are are preempted/scheduled back in */
unsigned long preempt_entryhi;
@@ -394,8 +310,8 @@ struct kvm_vcpu_arch {
struct kvm_mips_tlb guest_tlb[KVM_MIPS_GUEST_TLB_SIZE];
/* Cached guest kernel/user ASIDs */
- uint32_t guest_user_asid[NR_CPUS];
- uint32_t guest_kernel_asid[NR_CPUS];
+ u32 guest_user_asid[NR_CPUS];
+ u32 guest_kernel_asid[NR_CPUS];
struct mm_struct guest_kernel_mm, guest_user_mm;
int last_sched_cpu;
@@ -405,6 +321,7 @@ struct kvm_vcpu_arch {
u8 fpu_enabled;
u8 msa_enabled;
+ u8 kscratch_enabled;
};
@@ -458,6 +375,18 @@ struct kvm_vcpu_arch {
#define kvm_write_c0_guest_config7(cop0, val) (cop0->reg[MIPS_CP0_CONFIG][7] = (val))
#define kvm_read_c0_guest_errorepc(cop0) (cop0->reg[MIPS_CP0_ERROR_PC][0])
#define kvm_write_c0_guest_errorepc(cop0, val) (cop0->reg[MIPS_CP0_ERROR_PC][0] = (val))
+#define kvm_read_c0_guest_kscratch1(cop0) (cop0->reg[MIPS_CP0_DESAVE][2])
+#define kvm_read_c0_guest_kscratch2(cop0) (cop0->reg[MIPS_CP0_DESAVE][3])
+#define kvm_read_c0_guest_kscratch3(cop0) (cop0->reg[MIPS_CP0_DESAVE][4])
+#define kvm_read_c0_guest_kscratch4(cop0) (cop0->reg[MIPS_CP0_DESAVE][5])
+#define kvm_read_c0_guest_kscratch5(cop0) (cop0->reg[MIPS_CP0_DESAVE][6])
+#define kvm_read_c0_guest_kscratch6(cop0) (cop0->reg[MIPS_CP0_DESAVE][7])
+#define kvm_write_c0_guest_kscratch1(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][2] = (val))
+#define kvm_write_c0_guest_kscratch2(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][3] = (val))
+#define kvm_write_c0_guest_kscratch3(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][4] = (val))
+#define kvm_write_c0_guest_kscratch4(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][5] = (val))
+#define kvm_write_c0_guest_kscratch5(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][6] = (val))
+#define kvm_write_c0_guest_kscratch6(cop0, val) (cop0->reg[MIPS_CP0_DESAVE][7] = (val))
/*
* Some of the guest registers may be modified asynchronously (e.g. from a
@@ -471,7 +400,7 @@ static inline void _kvm_atomic_set_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" or %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -487,7 +416,7 @@ static inline void _kvm_atomic_clear_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" " __SC "%0, %1 \n"
@@ -504,7 +433,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
unsigned long temp;
do {
__asm__ __volatile__(
- " .set mips3 \n"
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
" " __LL "%0, %1 \n"
" and %0, %2 \n"
" or %0, %3 \n"
@@ -539,7 +468,7 @@ static inline void _kvm_atomic_change_c0_guest_reg(unsigned long *reg,
static inline bool kvm_mips_guest_can_have_fpu(struct kvm_vcpu_arch *vcpu)
{
- return (!__builtin_constant_p(cpu_has_fpu) || cpu_has_fpu) &&
+ return (!__builtin_constant_p(raw_cpu_has_fpu) || raw_cpu_has_fpu) &&
vcpu->fpu_enabled;
}
@@ -586,9 +515,11 @@ struct kvm_mips_callbacks {
void (*dequeue_io_int)(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int (*irq_deliver)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int (*irq_clear)(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
+ unsigned long (*num_regs)(struct kvm_vcpu *vcpu);
+ int (*copy_reg_indices)(struct kvm_vcpu *vcpu, u64 __user *indices);
int (*get_one_reg)(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg, s64 *v);
int (*set_one_reg)(struct kvm_vcpu *vcpu,
@@ -602,8 +533,13 @@ int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks);
/* Debug: dump vcpu state */
int kvm_arch_vcpu_dump_regs(struct kvm_vcpu *vcpu);
-/* Trampoline ASM routine to start running in "Guest" context */
-extern int __kvm_mips_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+extern int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu);
+
+/* Building of entry/exception code */
+int kvm_mips_entry_setup(void);
+void *kvm_mips_build_vcpu_run(void *addr);
+void *kvm_mips_build_exception(void *addr, void *handler);
+void *kvm_mips_build_exit(void *addr);
/* FPU/MSA context management */
void __kvm_save_fpu(struct kvm_vcpu_arch *vcpu);
@@ -619,11 +555,11 @@ void kvm_drop_fpu(struct kvm_vcpu *vcpu);
void kvm_lose_fpu(struct kvm_vcpu *vcpu);
/* TLB handling */
-uint32_t kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_kernel_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_user_asid(struct kvm_vcpu *vcpu);
+u32 kvm_get_user_asid(struct kvm_vcpu *vcpu);
-uint32_t kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
+u32 kvm_get_commpage_asid (struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_kseg0_tlb_fault(unsigned long badbaddr,
struct kvm_vcpu *vcpu);
@@ -632,22 +568,24 @@ extern int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu);
extern int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1);
+ struct kvm_mips_tlb *tlb);
-extern enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern void kvm_mips_dump_host_tlbs(void);
extern void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu);
+extern int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
+ unsigned long entrylo0,
+ unsigned long entrylo1,
+ int flush_dcache_mask);
extern void kvm_mips_flush_host_tlb(int skip_kseg0);
extern int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long entryhi);
@@ -664,90 +602,90 @@ extern void kvm_mips_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
extern void kvm_mips_vcpu_put(struct kvm_vcpu *vcpu);
/* Emulation */
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu);
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause);
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu);
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause);
-extern enum emulation_result kvm_mips_emulate_inst(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_inst(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_handle_ri(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_handle_ri(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-extern enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+extern enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
extern enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
struct kvm_run *run);
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu);
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count);
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare);
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu);
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count);
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack);
void kvm_mips_init_count(struct kvm_vcpu *vcpu);
int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl);
int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume);
@@ -756,27 +694,27 @@ void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu);
void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu);
enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst,
- uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_store(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
-enum emulation_result kvm_mips_emulate_load(uint32_t inst,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu);
@@ -786,13 +724,13 @@ unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu);
unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu);
/* Dynamic binary translation */
-extern int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
- struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_cache_index(union mips_instruction inst,
+ u32 *opc, struct kvm_vcpu *vcpu);
+extern int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
-extern int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc,
+extern int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu);
/* Misc */
@@ -812,5 +750,6 @@ static inline void kvm_arch_vcpu_uninit(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_sched_in(struct kvm_vcpu *vcpu, int cpu) {}
static inline void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu) {}
static inline void kvm_arch_vcpu_unblocking(struct kvm_vcpu *vcpu) {}
+static inline void kvm_arch_vcpu_block_finish(struct kvm_vcpu *vcpu) {}
#endif /* __MIPS_KVM_HOST_H__ */
diff --git a/arch/mips/include/asm/llsc.h b/arch/mips/include/asm/llsc.h
new file mode 100644
index 000000000000..c6d17d171147
--- /dev/null
+++ b/arch/mips/include/asm/llsc.h
@@ -0,0 +1,28 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Macros for 32/64-bit neutral inline assembler
+ */
+
+#ifndef __ASM_LLSC_H
+#define __ASM_LLSC_H
+
+#if _MIPS_SZLONG == 32
+#define SZLONG_LOG 5
+#define SZLONG_MASK 31UL
+#define __LL "ll "
+#define __SC "sc "
+#define __INS "ins "
+#define __EXT "ext "
+#elif _MIPS_SZLONG == 64
+#define SZLONG_LOG 6
+#define SZLONG_MASK 63UL
+#define __LL "lld "
+#define __SC "scd "
+#define __INS "dins "
+#define __EXT "dext "
+#endif
+
+#endif /* __ASM_LLSC_H */
diff --git a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
index ca8077afac4a..456ddba152c4 100644
--- a/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
+++ b/arch/mips/include/asm/mach-au1x00/au1xxx_dbdma.h
@@ -100,7 +100,7 @@ typedef volatile struct au1xxx_ddma_desc {
u32 dscr_nxtptr; /* Next descriptor pointer (mostly) */
/*
* First 32 bytes are HW specific!!!
- * Lets have some SW data following -- make sure it's 32 bytes.
+ * Let's have some SW data following -- make sure it's 32 bytes.
*/
u32 sw_status;
u32 sw_context;
diff --git a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
index ce02894271c6..d607d643b973 100644
--- a/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
+++ b/arch/mips/include/asm/mach-au1x00/gpio-au1300.h
@@ -140,7 +140,7 @@ static inline int au1300_gpio_getinitlvl(unsigned int gpio)
* Cases 1 and 3 are intended for boards which want to provide their own
* GPIO namespace and -operations (i.e. for example you have 8 GPIOs
* which are in part provided by spare Au1300 GPIO pins and in part by
-* an external FPGA but you still want them to be accssible in linux
+* an external FPGA but you still want them to be accessible in linux
* as gpio0-7. The board can of course use the alchemy_gpioX_* functions
* as required).
*/
diff --git a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
index 466fc85899f4..c4e856f27040 100644
--- a/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
+++ b/arch/mips/include/asm/mach-bcm63xx/bcm63xx_dev_enet.h
@@ -22,7 +22,7 @@ struct bcm63xx_enet_platform_data {
int has_phy_interrupt;
int phy_interrupt;
- /* if has_phy, use autonegociated pause parameters or force
+ /* if has_phy, use autonegotiated pause parameters or force
* them */
int pause_auto;
int pause_rx;
diff --git a/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
new file mode 100644
index 000000000000..fa0583e1ce0d
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/cpu-feature-overrides.h
@@ -0,0 +1,14 @@
+#ifndef __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+#define __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H
+
+/* Invariants across all BMIPS processors */
+#define cpu_has_vtag_icache 0
+#define cpu_icache_snoops_remote_store 1
+
+/* Processor ISA compatibility is MIPS32R1 */
+#define cpu_has_mips32r1 1
+#define cpu_has_mips32r2 0
+#define cpu_has_mips64r1 0
+#define cpu_has_mips64r2 0
+
+#endif /* __ASM_MACH_BMIPS_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-bmips/ioremap.h b/arch/mips/include/asm/mach-bmips/ioremap.h
new file mode 100644
index 000000000000..29c7a7bb7080
--- /dev/null
+++ b/arch/mips/include/asm/mach-bmips/ioremap.h
@@ -0,0 +1,33 @@
+#ifndef __ASM_MACH_BMIPS_IOREMAP_H
+#define __ASM_MACH_BMIPS_IOREMAP_H
+
+#include <linux/types.h>
+
+static inline phys_addr_t fixup_bigphys_addr(phys_addr_t phys_addr, phys_addr_t size)
+{
+ return phys_addr;
+}
+
+static inline int is_bmips_internal_registers(phys_addr_t offset)
+{
+ if (offset >= 0xfff80000)
+ return 1;
+
+ return 0;
+}
+
+static inline void __iomem *plat_ioremap(phys_addr_t offset, unsigned long size,
+ unsigned long flags)
+{
+ if (is_bmips_internal_registers(offset))
+ return (void __iomem *)offset;
+
+ return NULL;
+}
+
+static inline int plat_iounmap(const volatile void __iomem *addr)
+{
+ return is_bmips_internal_registers((unsigned long)addr);
+}
+
+#endif /* __ASM_MACH_BMIPS_IOREMAP_H */
diff --git a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
index d68e685cde60..bd8b9bbe1771 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/cpu-feature-overrides.h
@@ -55,7 +55,7 @@
#define cpu_has_mipsmt 0
#define cpu_has_vint 0
#define cpu_has_veic 0
-#define cpu_hwrena_impl_bits 0xc0000000
+#define cpu_hwrena_impl_bits (MIPS_HWRENA_IMPL1 | MIPS_HWRENA_IMPL2)
#define cpu_has_wsbh 1
#define cpu_has_rixi (cpu_data[0].cputype != CPU_CAVIUM_OCTEON)
diff --git a/arch/mips/include/asm/mach-cavium-octeon/irq.h b/arch/mips/include/asm/mach-cavium-octeon/irq.h
index cceae32a0732..64b86b9d30fe 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/irq.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/irq.h
@@ -42,8 +42,6 @@ enum octeon_irq {
OCTEON_IRQ_TIMER1,
OCTEON_IRQ_TIMER2,
OCTEON_IRQ_TIMER3,
- OCTEON_IRQ_USB0,
- OCTEON_IRQ_USB1,
#ifndef CONFIG_PCI_MSI
OCTEON_IRQ_LAST = 127
#endif
diff --git a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
index cf92fe733995..c4873e8594ef 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/kernel-entry-init.h
@@ -141,7 +141,7 @@ octeon_main_processor:
.endm
/*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
.endm
diff --git a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
index 374eefafb320..0cf5ac1f7245 100644
--- a/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
+++ b/arch/mips/include/asm/mach-cavium-octeon/mangle-port.h
@@ -12,6 +12,14 @@
#ifdef __BIG_ENDIAN
+static inline bool __should_swizzle_bits(volatile void *a)
+{
+ extern const bool octeon_should_swizzle_table[];
+
+ unsigned long did = ((unsigned long)a >> 40) & 0xff;
+ return octeon_should_swizzle_table[did];
+}
+
# define __swizzle_addr_b(port) (port)
# define __swizzle_addr_w(port) (port)
# define __swizzle_addr_l(port) (port)
@@ -19,6 +27,8 @@
#else /* __LITTLE_ENDIAN */
+#define __should_swizzle_bits(a) false
+
static inline bool __should_swizzle_addr(unsigned long p)
{
/* boot bus? */
@@ -35,40 +45,14 @@ static inline bool __should_swizzle_addr(unsigned long p)
#endif /* __BIG_ENDIAN */
-/*
- * Sane hardware offers swapping of PCI/ISA I/O space accesses in hardware;
- * less sane hardware forces software to fiddle with this...
- *
- * Regardless, if the host bus endianness mismatches that of PCI/ISA, then
- * you can't have the numerical value of data and byte addresses within
- * multibyte quantities both preserved at the same time. Hence two
- * variations of functions: non-prefixed ones that preserve the value
- * and prefixed ones that preserve byte addresses. The latters are
- * typically used for moving raw data between a peripheral and memory (cf.
- * string I/O functions), hence the "__mem_" prefix.
- */
-#if defined(CONFIG_SWAP_IO_SPACE)
# define ioswabb(a, x) (x)
# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) le16_to_cpu(x)
+# define ioswabw(a, x) (__should_swizzle_bits(a) ? le16_to_cpu(x) : x)
# define __mem_ioswabw(a, x) (x)
-# define ioswabl(a, x) le32_to_cpu(x)
+# define ioswabl(a, x) (__should_swizzle_bits(a) ? le32_to_cpu(x) : x)
# define __mem_ioswabl(a, x) (x)
-# define ioswabq(a, x) le64_to_cpu(x)
+# define ioswabq(a, x) (__should_swizzle_bits(a) ? le64_to_cpu(x) : x)
# define __mem_ioswabq(a, x) (x)
-#else
-
-# define ioswabb(a, x) (x)
-# define __mem_ioswabb(a, x) (x)
-# define ioswabw(a, x) (x)
-# define __mem_ioswabw(a, x) cpu_to_le16(x)
-# define ioswabl(a, x) (x)
-# define __mem_ioswabl(a, x) cpu_to_le32(x)
-# define ioswabq(a, x) (x)
-# define __mem_ioswabq(a, x) cpu_to_le32(x)
-
-#endif
-
#endif /* __ASM_MACH_GENERIC_MANGLE_PORT_H */
diff --git a/arch/mips/include/asm/mach-generic/kernel-entry-init.h b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
index 13b0751b010a..a229297c880b 100644
--- a/arch/mips/include/asm/mach-generic/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-generic/kernel-entry-init.h
@@ -16,7 +16,7 @@
.endm
/*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
.endm
diff --git a/arch/mips/include/asm/mach-ip27/dma-coherence.h b/arch/mips/include/asm/mach-ip27/dma-coherence.h
index 1daa64412569..04d862020ac9 100644
--- a/arch/mips/include/asm/mach-ip27/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip27/dma-coherence.h
@@ -64,7 +64,7 @@ static inline void plat_post_dma_flush(struct device *dev)
static inline int plat_device_is_coherent(struct device *dev)
{
- return 1; /* IP27 non-cohernet mode is unsupported */
+ return 1; /* IP27 non-coherent mode is unsupported */
}
#endif /* __ASM_MACH_IP27_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-ip27/irq.h b/arch/mips/include/asm/mach-ip27/irq.h
index cf4384bfa846..b0b7261ff3ad 100644
--- a/arch/mips/include/asm/mach-ip27/irq.h
+++ b/arch/mips/include/asm/mach-ip27/irq.h
@@ -11,7 +11,7 @@
#define __ASM_MACH_IP27_IRQ_H
/*
- * A hardwired interrupt number is completly stupid for this system - a
+ * A hardwired interrupt number is completely stupid for this system - a
* large configuration might have thousands if not tenthousands of
* interrupts.
*/
diff --git a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
index b087cb83da3a..f992c1db876b 100644
--- a/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-ip27/kernel-entry-init.h
@@ -81,7 +81,7 @@
.endm
/*
- * Do SMP slave processor setup necessary before we can savely execute C code.
+ * Do SMP slave processor setup necessary before we can safely execute C code.
*/
.macro smp_slave_setup
GET_NASID_ASM t1
diff --git a/arch/mips/include/asm/mach-ip32/dma-coherence.h b/arch/mips/include/asm/mach-ip32/dma-coherence.h
index 0a0b0e2ced60..7bdf212587a0 100644
--- a/arch/mips/include/asm/mach-ip32/dma-coherence.h
+++ b/arch/mips/include/asm/mach-ip32/dma-coherence.h
@@ -86,7 +86,7 @@ static inline void plat_post_dma_flush(struct device *dev)
static inline int plat_device_is_coherent(struct device *dev)
{
- return 0; /* IP32 is non-cohernet */
+ return 0; /* IP32 is non-coherent */
}
#endif /* __ASM_MACH_IP32_DMA_COHERENCE_H */
diff --git a/arch/mips/include/asm/mach-jz4740/gpio.h b/arch/mips/include/asm/mach-jz4740/gpio.h
index bf8c3e1860e7..7c7708a23baa 100644
--- a/arch/mips/include/asm/mach-jz4740/gpio.h
+++ b/arch/mips/include/asm/mach-jz4740/gpio.h
@@ -27,7 +27,7 @@ enum jz_gpio_function {
/*
Usually a driver for a SoC component has to request several gpio pins and
- configure them as funcion pins.
+ configure them as function pins.
jz_gpio_bulk_request can be used to ease this process.
Usually one would do something like:
diff --git a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
index 398733e3e2cf..7f7b0fc554da 100644
--- a/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
+++ b/arch/mips/include/asm/mach-jz4740/jz4740_nand.h
@@ -27,7 +27,7 @@ struct jz_nand_platform_data {
unsigned char banks[JZ_NAND_NUM_BANKS];
- void (*ident_callback)(struct platform_device *, struct nand_chip *,
+ void (*ident_callback)(struct platform_device *, struct mtd_info *,
struct mtd_partition **, int *num_partitions);
};
diff --git a/arch/mips/include/asm/mach-jz4740/platform.h b/arch/mips/include/asm/mach-jz4740/platform.h
index 32cfbe6a191b..073b8bfbb3b3 100644
--- a/arch/mips/include/asm/mach-jz4740/platform.h
+++ b/arch/mips/include/asm/mach-jz4740/platform.h
@@ -19,7 +19,6 @@
#include <linux/platform_device.h>
-extern struct platform_device jz4740_usb_ohci_device;
extern struct platform_device jz4740_udc_device;
extern struct platform_device jz4740_udc_xceiv_device;
extern struct platform_device jz4740_mmc_device;
diff --git a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
index 98d6a2f14aaf..8e9b022c3594 100644
--- a/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/falcon/lantiq_soc.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LTQ_FALCON_H__
@@ -22,7 +22,7 @@
/*
* during early_printk no ioremap possible at this early stage
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
*/
#define LTQ_ASC0_BASE_ADDR 0x1E100C00
#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC0_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq.h b/arch/mips/include/asm/mach-lantiq/lantiq.h
index 4e5ae6523cb4..8064d7a4b33d 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LANTIQ_H__
#define _LANTIQ_H__
diff --git a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
index e23bf7c9a2d0..17d2fdcdaef4 100644
--- a/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
+++ b/arch/mips/include/asm/mach-lantiq/lantiq_platform.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LANTIQ_PLATFORM_H__
diff --git a/arch/mips/include/asm/mach-lantiq/xway/irq.h b/arch/mips/include/asm/mach-lantiq/xway/irq.h
index a1471d2dd0d2..83e5f03cccb5 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/irq.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef __LANTIQ_IRQ_H
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
index 5eadfe582529..141076325307 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_irq.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LANTIQ_XWAY_IRQ_H__
diff --git a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
index dd6005b75e0c..17b41bb5991f 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/lantiq_soc.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LTQ_XWAY_H__
@@ -75,7 +75,7 @@ extern __iomem void *ltq_cgu_membase;
/*
* during early_printk no ioremap is possible
- * lets use KSEG1 instead
+ * let's use KSEG1 instead
*/
#define LTQ_ASC1_BASE_ADDR 0x1E100C00
#define LTQ_EARLY_ASC KSEG1ADDR(LTQ_ASC1_BASE_ADDR)
diff --git a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
index 5f8693d5ab12..4901833498f7 100644
--- a/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
+++ b/arch/mips/include/asm/mach-lantiq/xway/xway_dma.h
@@ -12,7 +12,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
*/
#ifndef LTQ_DMA_H__
diff --git a/arch/mips/include/asm/mach-loongson32/cpufreq.h b/arch/mips/include/asm/mach-loongson32/cpufreq.h
index 6843fa1a608d..2f1ecb081223 100644
--- a/arch/mips/include/asm/mach-loongson32/cpufreq.h
+++ b/arch/mips/include/asm/mach-loongson32/cpufreq.h
@@ -9,7 +9,6 @@
* option) any later version.
*/
-
#ifndef __ASM_MACH_LOONGSON32_CPUFREQ_H
#define __ASM_MACH_LOONGSON32_CPUFREQ_H
diff --git a/arch/mips/include/asm/mach-loongson32/dma.h b/arch/mips/include/asm/mach-loongson32/dma.h
new file mode 100644
index 000000000000..ad1dec743ccc
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/dma.h
@@ -0,0 +1,25 @@
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_DMA_H
+#define __ASM_MACH_LOONGSON32_DMA_H
+
+#define LS1X_DMA_CHANNEL0 0
+#define LS1X_DMA_CHANNEL1 1
+#define LS1X_DMA_CHANNEL2 2
+
+struct plat_ls1x_dma {
+ int nr_channels;
+};
+
+extern struct plat_ls1x_dma ls1b_dma_pdata;
+
+#endif /* __ASM_MACH_LOONGSON32_DMA_H */
diff --git a/arch/mips/include/asm/mach-loongson32/irq.h b/arch/mips/include/asm/mach-loongson32/irq.h
index 0d35b994e8d2..c1c744197de4 100644
--- a/arch/mips/include/asm/mach-loongson32/irq.h
+++ b/arch/mips/include/asm/mach-loongson32/irq.h
@@ -9,7 +9,6 @@
* option) any later version.
*/
-
#ifndef __ASM_MACH_LOONGSON32_IRQ_H
#define __ASM_MACH_LOONGSON32_IRQ_H
diff --git a/arch/mips/include/asm/mach-loongson32/loongson1.h b/arch/mips/include/asm/mach-loongson32/loongson1.h
index 12aa129aad80..978f6df8970a 100644
--- a/arch/mips/include/asm/mach-loongson32/loongson1.h
+++ b/arch/mips/include/asm/mach-loongson32/loongson1.h
@@ -9,7 +9,6 @@
* option) any later version.
*/
-
#ifndef __ASM_MACH_LOONGSON32_LOONGSON1_H
#define __ASM_MACH_LOONGSON32_LOONGSON1_H
@@ -18,6 +17,9 @@
/* Loongson 1 Register Bases */
#define LS1X_MUX_BASE 0x1fd00420
#define LS1X_INTC_BASE 0x1fd01040
+#define LS1X_GPIO0_BASE 0x1fd010c0
+#define LS1X_GPIO1_BASE 0x1fd010c4
+#define LS1X_DMAC_BASE 0x1fd01160
#define LS1X_EHCI_BASE 0x1fe00000
#define LS1X_OHCI_BASE 0x1fe08000
#define LS1X_GMAC0_BASE 0x1fe10000
diff --git a/arch/mips/include/asm/mach-loongson32/nand.h b/arch/mips/include/asm/mach-loongson32/nand.h
new file mode 100644
index 000000000000..e274912e9de1
--- /dev/null
+++ b/arch/mips/include/asm/mach-loongson32/nand.h
@@ -0,0 +1,30 @@
+/*
+ * Copyright (c) 2015 Zhang, Keguang <keguang.zhang@gmail.com>
+ *
+ * Loongson 1 NAND platform support.
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License as published by the
+ * Free Software Foundation; either version 2 of the License, or (at your
+ * option) any later version.
+ */
+
+#ifndef __ASM_MACH_LOONGSON32_NAND_H
+#define __ASM_MACH_LOONGSON32_NAND_H
+
+#include <linux/dmaengine.h>
+#include <linux/mtd/partitions.h>
+
+struct plat_ls1x_nand {
+ struct mtd_partition *parts;
+ unsigned int nr_parts;
+
+ int hold_cycle;
+ int wait_cycle;
+};
+
+extern struct plat_ls1x_nand ls1b_nand_pdata;
+
+bool ls1x_dma_filter_fn(struct dma_chan *chan, void *param);
+
+#endif /* __ASM_MACH_LOONGSON32_NAND_H */
diff --git a/arch/mips/include/asm/mach-loongson32/platform.h b/arch/mips/include/asm/mach-loongson32/platform.h
index c32f03f3f72c..672531aa9bef 100644
--- a/arch/mips/include/asm/mach-loongson32/platform.h
+++ b/arch/mips/include/asm/mach-loongson32/platform.h
@@ -7,20 +7,28 @@
* option) any later version.
*/
-
#ifndef __ASM_MACH_LOONGSON32_PLATFORM_H
#define __ASM_MACH_LOONGSON32_PLATFORM_H
#include <linux/platform_device.h>
+#include <dma.h>
+#include <nand.h>
+
extern struct platform_device ls1x_uart_pdev;
extern struct platform_device ls1x_cpufreq_pdev;
+extern struct platform_device ls1x_dma_pdev;
extern struct platform_device ls1x_eth0_pdev;
extern struct platform_device ls1x_eth1_pdev;
extern struct platform_device ls1x_ehci_pdev;
+extern struct platform_device ls1x_gpio0_pdev;
+extern struct platform_device ls1x_gpio1_pdev;
+extern struct platform_device ls1x_nand_pdev;
extern struct platform_device ls1x_rtc_pdev;
-extern void __init ls1x_clk_init(void);
-extern void __init ls1x_serial_setup(struct platform_device *pdev);
+void __init ls1x_clk_init(void);
+void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata);
+void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata);
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev);
#endif /* __ASM_MACH_LOONGSON32_PLATFORM_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-clk.h b/arch/mips/include/asm/mach-loongson32/regs-clk.h
index 1f5a715ac841..4d56fc38f0c4 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-clk.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-clk.h
@@ -19,18 +19,18 @@
#define LS1X_CLK_PLL_DIV LS1X_CLK_REG(0x4)
/* Clock PLL Divisor Register Bits */
-#define DIV_DC_EN (0x1 << 31)
-#define DIV_DC_RST (0x1 << 30)
-#define DIV_CPU_EN (0x1 << 25)
-#define DIV_CPU_RST (0x1 << 24)
-#define DIV_DDR_EN (0x1 << 19)
-#define DIV_DDR_RST (0x1 << 18)
-#define RST_DC_EN (0x1 << 5)
-#define RST_DC (0x1 << 4)
-#define RST_DDR_EN (0x1 << 3)
-#define RST_DDR (0x1 << 2)
-#define RST_CPU_EN (0x1 << 1)
-#define RST_CPU 0x1
+#define DIV_DC_EN BIT(31)
+#define DIV_DC_RST BIT(30)
+#define DIV_CPU_EN BIT(25)
+#define DIV_CPU_RST BIT(24)
+#define DIV_DDR_EN BIT(19)
+#define DIV_DDR_RST BIT(18)
+#define RST_DC_EN BIT(5)
+#define RST_DC BIT(4)
+#define RST_DDR_EN BIT(3)
+#define RST_DDR BIT(2)
+#define RST_CPU_EN BIT(1)
+#define RST_CPU BIT(0)
#define DIV_DC_SHIFT 26
#define DIV_CPU_SHIFT 20
diff --git a/arch/mips/include/asm/mach-loongson32/regs-mux.h b/arch/mips/include/asm/mach-loongson32/regs-mux.h
index 8302d92f2da2..7c394f93cb9e 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-mux.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-mux.h
@@ -19,49 +19,49 @@
#define LS1X_MUX_CTRL1 LS1X_MUX_REG(0x4)
/* MUX CTRL0 Register Bits */
-#define UART0_USE_PWM23 (0x1 << 28)
-#define UART0_USE_PWM01 (0x1 << 27)
-#define UART1_USE_LCD0_5_6_11 (0x1 << 26)
-#define I2C2_USE_CAN1 (0x1 << 25)
-#define I2C1_USE_CAN0 (0x1 << 24)
-#define NAND3_USE_UART5 (0x1 << 23)
-#define NAND3_USE_UART4 (0x1 << 22)
-#define NAND3_USE_UART1_DAT (0x1 << 21)
-#define NAND3_USE_UART1_CTS (0x1 << 20)
-#define NAND3_USE_PWM23 (0x1 << 19)
-#define NAND3_USE_PWM01 (0x1 << 18)
-#define NAND2_USE_UART5 (0x1 << 17)
-#define NAND2_USE_UART4 (0x1 << 16)
-#define NAND2_USE_UART1_DAT (0x1 << 15)
-#define NAND2_USE_UART1_CTS (0x1 << 14)
-#define NAND2_USE_PWM23 (0x1 << 13)
-#define NAND2_USE_PWM01 (0x1 << 12)
-#define NAND1_USE_UART5 (0x1 << 11)
-#define NAND1_USE_UART4 (0x1 << 10)
-#define NAND1_USE_UART1_DAT (0x1 << 9)
-#define NAND1_USE_UART1_CTS (0x1 << 8)
-#define NAND1_USE_PWM23 (0x1 << 7)
-#define NAND1_USE_PWM01 (0x1 << 6)
-#define GMAC1_USE_UART1 (0x1 << 4)
-#define GMAC1_USE_UART0 (0x1 << 3)
-#define LCD_USE_UART0_DAT (0x1 << 2)
-#define LCD_USE_UART15 (0x1 << 1)
-#define LCD_USE_UART0 0x1
+#define UART0_USE_PWM23 BIT(28)
+#define UART0_USE_PWM01 BIT(27)
+#define UART1_USE_LCD0_5_6_11 BIT(26)
+#define I2C2_USE_CAN1 BIT(25)
+#define I2C1_USE_CAN0 BIT(24)
+#define NAND3_USE_UART5 BIT(23)
+#define NAND3_USE_UART4 BIT(22)
+#define NAND3_USE_UART1_DAT BIT(21)
+#define NAND3_USE_UART1_CTS BIT(20)
+#define NAND3_USE_PWM23 BIT(19)
+#define NAND3_USE_PWM01 BIT(18)
+#define NAND2_USE_UART5 BIT(17)
+#define NAND2_USE_UART4 BIT(16)
+#define NAND2_USE_UART1_DAT BIT(15)
+#define NAND2_USE_UART1_CTS BIT(14)
+#define NAND2_USE_PWM23 BIT(13)
+#define NAND2_USE_PWM01 BIT(12)
+#define NAND1_USE_UART5 BIT(11)
+#define NAND1_USE_UART4 BIT(10)
+#define NAND1_USE_UART1_DAT BIT(9)
+#define NAND1_USE_UART1_CTS BIT(8)
+#define NAND1_USE_PWM23 BIT(7)
+#define NAND1_USE_PWM01 BIT(6)
+#define GMAC1_USE_UART1 BIT(4)
+#define GMAC1_USE_UART0 BIT(3)
+#define LCD_USE_UART0_DAT BIT(2)
+#define LCD_USE_UART15 BIT(1)
+#define LCD_USE_UART0 BIT(0)
/* MUX CTRL1 Register Bits */
-#define USB_RESET (0x1 << 31)
-#define SPI1_CS_USE_PWM01 (0x1 << 24)
-#define SPI1_USE_CAN (0x1 << 23)
-#define DISABLE_DDR_CONFSPACE (0x1 << 20)
-#define DDR32TO16EN (0x1 << 16)
-#define GMAC1_SHUT (0x1 << 13)
-#define GMAC0_SHUT (0x1 << 12)
-#define USB_SHUT (0x1 << 11)
-#define UART1_3_USE_CAN1 (0x1 << 5)
-#define UART1_2_USE_CAN0 (0x1 << 4)
-#define GMAC1_USE_TXCLK (0x1 << 3)
-#define GMAC0_USE_TXCLK (0x1 << 2)
-#define GMAC1_USE_PWM23 (0x1 << 1)
-#define GMAC0_USE_PWM01 0x1
+#define USB_RESET BIT(31)
+#define SPI1_CS_USE_PWM01 BIT(24)
+#define SPI1_USE_CAN BIT(23)
+#define DISABLE_DDR_CONFSPACE BIT(20)
+#define DDR32TO16EN BIT(16)
+#define GMAC1_SHUT BIT(13)
+#define GMAC0_SHUT BIT(12)
+#define USB_SHUT BIT(11)
+#define UART1_3_USE_CAN1 BIT(5)
+#define UART1_2_USE_CAN0 BIT(4)
+#define GMAC1_USE_TXCLK BIT(3)
+#define GMAC0_USE_TXCLK BIT(2)
+#define GMAC1_USE_PWM23 BIT(1)
+#define GMAC0_USE_PWM01 BIT(0)
#endif /* __ASM_MACH_LOONGSON32_REGS_MUX_H */
diff --git a/arch/mips/include/asm/mach-loongson32/regs-pwm.h b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
index 69f174ed13a4..4119600ce79a 100644
--- a/arch/mips/include/asm/mach-loongson32/regs-pwm.h
+++ b/arch/mips/include/asm/mach-loongson32/regs-pwm.h
@@ -19,11 +19,11 @@
#define PWM_CTRL 0xc
/* PWM Control Register Bits */
-#define CNT_RST (0x1 << 7)
-#define INT_SR (0x1 << 6)
-#define INT_EN (0x1 << 5)
-#define PWM_SINGLE (0x1 << 4)
-#define PWM_OE (0x1 << 3)
-#define CNT_EN 0x1
+#define CNT_RST BIT(7)
+#define INT_SR BIT(6)
+#define INT_EN BIT(5)
+#define PWM_SINGLE BIT(4)
+#define PWM_OE BIT(3)
+#define CNT_EN BIT(0)
#endif /* __ASM_MACH_LOONGSON32_REGS_PWM_H */
diff --git a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
index 98963c2c7be4..89328a3d44d8 100644
--- a/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
+++ b/arch/mips/include/asm/mach-loongson64/cpu-feature-overrides.h
@@ -16,11 +16,6 @@
#ifndef __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
#define __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H
-#define cpu_dcache_line_size() 32
-#define cpu_icache_line_size() 32
-#define cpu_scache_line_size() 32
-
-
#define cpu_has_32fpr 1
#define cpu_has_3k_cache 0
#define cpu_has_4k_cache 1
@@ -31,24 +26,17 @@
#define cpu_has_counter 1
#define cpu_has_dc_aliases (PAGE_SIZE < 0x4000)
#define cpu_has_divec 0
-#define cpu_has_dsp 0
-#define cpu_has_dsp2 0
#define cpu_has_ejtag 0
-#define cpu_has_ic_fills_f_dc 0
#define cpu_has_inclusive_pcaches 1
#define cpu_has_llsc 1
#define cpu_has_mcheck 0
#define cpu_has_mdmx 0
#define cpu_has_mips16 0
-#define cpu_has_mips32r2 0
#define cpu_has_mips3d 0
-#define cpu_has_mips64r2 0
#define cpu_has_mipsmt 0
-#define cpu_has_prefetch 0
#define cpu_has_smartmips 0
#define cpu_has_tlb 1
#define cpu_has_tx39_cache 0
-#define cpu_has_userlocal 0
#define cpu_has_vce 0
#define cpu_has_veic 0
#define cpu_has_vint 0
@@ -56,6 +44,10 @@
#define cpu_has_watch 1
#define cpu_has_local_ebase 0
-#define cpu_has_wsbh IS_ENABLED(CONFIG_CPU_LOONGSON3)
+#ifdef CONFIG_CPU_LOONGSON3
+#define cpu_has_wsbh 1
+#define cpu_has_ic_fills_f_dc 1
+#define cpu_hwrena_impl_bits 0xc0000000
+#endif
#endif /* __ASM_MACH_LOONGSON64_CPU_FEATURE_OVERRIDES_H */
diff --git a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
index 3f2f84f6c401..8393bc548987 100644
--- a/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-loongson64/kernel-entry-init.h
@@ -23,8 +23,15 @@
or t0, (0x1 << 7)
mtc0 t0, $16, 3
/* Set ELPA on LOONGSON3 pagegrain */
- li t0, (0x1 << 29)
+ mfc0 t0, $5, 1
+ or t0, (0x1 << 29)
mtc0 t0, $5, 1
+#ifdef CONFIG_LOONGSON3_ENHANCEMENT
+ /* Enable STFill Buffer */
+ mfc0 t0, $16, 6
+ or t0, 0x100
+ mtc0 t0, $16, 6
+#endif
_ehb
.set pop
#endif
@@ -42,8 +49,15 @@
or t0, (0x1 << 7)
mtc0 t0, $16, 3
/* Set ELPA on LOONGSON3 pagegrain */
- li t0, (0x1 << 29)
+ mfc0 t0, $5, 1
+ or t0, (0x1 << 29)
mtc0 t0, $5, 1
+#ifdef CONFIG_LOONGSON3_ENHANCEMENT
+ /* Enable STFill Buffer */
+ mfc0 t0, $16, 6
+ or t0, 0x100
+ mtc0 t0, $16, 6
+#endif
_ehb
.set pop
#endif
diff --git a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
index 4431fc54a36c..74230d0ca98b 100644
--- a/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
+++ b/arch/mips/include/asm/mach-loongson64/loongson_hwmon.h
@@ -24,7 +24,7 @@ struct temp_range {
u8 level;
};
-#define CONSTANT_SPEED_POLICY 0 /* at constent speed */
+#define CONSTANT_SPEED_POLICY 0 /* at constant speed */
#define STEP_SPEED_POLICY 1 /* use up/down arrays to describe policy */
#define KERNEL_HELPER_POLICY 2 /* kernel as a helper to fan control */
diff --git a/arch/mips/include/asm/mach-malta/kernel-entry-init.h b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
index 0cf8622db27f..ab03eb3fadac 100644
--- a/arch/mips/include/asm/mach-malta/kernel-entry-init.h
+++ b/arch/mips/include/asm/mach-malta/kernel-entry-init.h
@@ -56,7 +56,7 @@
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
- mtc0 t0, $5, 2
+ mtc0 t0, CP0_SEGCTL0
/* SegCtl1 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -67,7 +67,7 @@
(0 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
ins t0, t1, 16, 3
- mtc0 t0, $5, 3
+ mtc0 t0, CP0_SEGCTL1
/* SegCtl2 */
li t0, ((MIPS_SEGCFG_MUSUK << MIPS_SEGCFG_AM_SHIFT) | \
@@ -77,7 +77,7 @@
(4 << MIPS_SEGCFG_PA_SHIFT) | \
(1 << MIPS_SEGCFG_EU_SHIFT)) << 16)
or t0, t2
- mtc0 t0, $5, 4
+ mtc0 t0, CP0_SEGCTL2
jal mips_ihb
mfc0 t0, $16, 5
diff --git a/arch/mips/include/asm/mach-ralink/mt7620.h b/arch/mips/include/asm/mach-ralink/mt7620.h
index 455d406e8ddf..a73350b07fdf 100644
--- a/arch/mips/include/asm/mach-ralink/mt7620.h
+++ b/arch/mips/include/asm/mach-ralink/mt7620.h
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#ifndef _MT7620_REGS_H_
@@ -72,6 +72,7 @@
#define SYSCFG0_DRAM_TYPE_SDRAM 0
#define SYSCFG0_DRAM_TYPE_DDR1 1
#define SYSCFG0_DRAM_TYPE_DDR2 2
+#define SYSCFG0_DRAM_TYPE_UNKNOWN 3
#define SYSCFG0_DRAM_TYPE_DDR2_MT7628 0
#define SYSCFG0_DRAM_TYPE_DDR1_MT7628 1
diff --git a/arch/mips/include/asm/mach-ralink/mt7621.h b/arch/mips/include/asm/mach-ralink/mt7621.h
index 610b61e3f9df..a672e06fa5fd 100644
--- a/arch/mips/include/asm/mach-ralink/mt7621.h
+++ b/arch/mips/include/asm/mach-ralink/mt7621.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
*/
#ifndef _MT7621_REGS_H_
diff --git a/arch/mips/include/asm/mach-ralink/pinmux.h b/arch/mips/include/asm/mach-ralink/pinmux.h
index be106cb2e26d..ba8ac331af0c 100644
--- a/arch/mips/include/asm/mach-ralink/pinmux.h
+++ b/arch/mips/include/asm/mach-ralink/pinmux.h
@@ -3,7 +3,7 @@
* it under the terms of the GNU General Public License version 2 as
* publishhed by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#ifndef _RT288X_PINMUX_H__
diff --git a/arch/mips/include/asm/mach-ralink/ralink_regs.h b/arch/mips/include/asm/mach-ralink/ralink_regs.h
index 4c9fba68c8b2..9df1a53bcb36 100644
--- a/arch/mips/include/asm/mach-ralink/ralink_regs.h
+++ b/arch/mips/include/asm/mach-ralink/ralink_regs.h
@@ -1,7 +1,7 @@
/*
* Ralink SoC register definitions
*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
* Copyright (C) 2008-2010 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
*
diff --git a/arch/mips/include/asm/mach-ralink/rt288x.h b/arch/mips/include/asm/mach-ralink/rt288x.h
index 03ad716acb42..25ae1042d57b 100644
--- a/arch/mips/include/asm/mach-ralink/rt288x.h
+++ b/arch/mips/include/asm/mach-ralink/rt288x.h
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#ifndef _RT288X_REGS_H_
diff --git a/arch/mips/include/asm/mach-ralink/rt305x.h b/arch/mips/include/asm/mach-ralink/rt305x.h
index 2eea79331a14..ac2d65c04b5f 100644
--- a/arch/mips/include/asm/mach-ralink/rt305x.h
+++ b/arch/mips/include/asm/mach-ralink/rt305x.h
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#ifndef _RT305X_REGS_H_
diff --git a/arch/mips/include/asm/mips-cm.h b/arch/mips/include/asm/mips-cm.h
index b196825a1de9..58e7874e9347 100644
--- a/arch/mips/include/asm/mips-cm.h
+++ b/arch/mips/include/asm/mips-cm.h
@@ -28,7 +28,7 @@ extern void __iomem *mips_cm_l2sync_base;
* This function returns the physical base address of the Coherence Manager
* global control block, or 0 if no Coherence Manager is present. It provides
* a default implementation which reads the CMGCRBase register where available,
- * and may be overriden by platforms which determine this address in a
+ * and may be overridden by platforms which determine this address in a
* different way by defining a function with the same prototype except for the
* name mips_cm_phys_base (without underscores).
*/
@@ -208,6 +208,7 @@ BUILD_CM_RW(l2_config, MIPS_CM_GCB_OFS + 0x130)
BUILD_CM_RW(sys_config2, MIPS_CM_GCB_OFS + 0x150)
BUILD_CM_RW(l2_pft_control, MIPS_CM_GCB_OFS + 0x300)
BUILD_CM_RW(l2_pft_control_b, MIPS_CM_GCB_OFS + 0x308)
+BUILD_CM_RW(bev_base, MIPS_CM_GCB_OFS + 0x680)
/* Core Local & Core Other register accessor functions */
BUILD_CM_Cx_RW(reset_release, 0x00)
@@ -290,8 +291,8 @@ BUILD_CM_Cx_R_(tcid_8_priority, 0x80)
#define CM_GCR_GIC_BASE_GICEN_MSK (_ULCAST_(0x1) << 0)
/* GCR_CPC_BASE register fields */
-#define CM_GCR_CPC_BASE_CPCBASE_SHF 17
-#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x7fff) << 17)
+#define CM_GCR_CPC_BASE_CPCBASE_SHF 15
+#define CM_GCR_CPC_BASE_CPCBASE_MSK (_ULCAST_(0x1ffff) << 15)
#define CM_GCR_CPC_BASE_CPCEN_SHF 0
#define CM_GCR_CPC_BASE_CPCEN_MSK (_ULCAST_(0x1) << 0)
@@ -461,7 +462,10 @@ static inline unsigned int mips_cm_max_vp_width(void)
if (mips_cm_revision() >= CM_REV_CM3)
return read_gcr_sys_config2() & CM_GCR_SYS_CONFIG2_MAXVPW_MSK;
- return smp_num_siblings;
+ if (IS_ENABLED(CONFIG_SMP))
+ return smp_num_siblings;
+
+ return 1;
}
/**
@@ -505,7 +509,7 @@ extern void mips_cm_unlock_other(void);
#else /* !CONFIG_MIPS_CM */
-static inline void mips_cm_lock_other(unsigned int core) { }
+static inline void mips_cm_lock_other(unsigned int core, unsigned int vp) { }
static inline void mips_cm_unlock_other(void) { }
#endif /* !CONFIG_MIPS_CM */
diff --git a/arch/mips/include/asm/mips-cpc.h b/arch/mips/include/asm/mips-cpc.h
index e09035239e53..8c519f9827a3 100644
--- a/arch/mips/include/asm/mips-cpc.h
+++ b/arch/mips/include/asm/mips-cpc.h
@@ -106,6 +106,9 @@ BUILD_CPC_R_(revision, MIPS_CPC_GCB_OFS + 0x20)
BUILD_CPC_Cx_RW(cmd, 0x00)
BUILD_CPC_Cx_RW(stat_conf, 0x08)
BUILD_CPC_Cx_RW(other, 0x10)
+BUILD_CPC_Cx_RW(vp_stop, 0x20)
+BUILD_CPC_Cx_RW(vp_run, 0x28)
+BUILD_CPC_Cx_RW(vp_running, 0x30)
/* CPC_Cx_CMD register fields */
#define CPC_Cx_CMD_SHF 0
diff --git a/arch/mips/include/asm/mips-r2-to-r6-emul.h b/arch/mips/include/asm/mips-r2-to-r6-emul.h
index 1f6ea8352ca9..20621e1ca238 100644
--- a/arch/mips/include/asm/mips-r2-to-r6-emul.h
+++ b/arch/mips/include/asm/mips-r2-to-r6-emul.h
@@ -79,7 +79,7 @@ struct r2_decoder_table {
};
-extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+extern void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str);
#ifndef CONFIG_MIPSR2_TO_R6_EMULATOR
diff --git a/arch/mips/include/asm/mips_mt.h b/arch/mips/include/asm/mips_mt.h
index f6ba004a7711..aa4cca060e0a 100644
--- a/arch/mips/include/asm/mips_mt.h
+++ b/arch/mips/include/asm/mips_mt.h
@@ -1,5 +1,5 @@
/*
- * Definitions and decalrations for MIPS MT support that are common between
+ * Definitions and declarations for MIPS MT support that are common between
* the VSMP, and AP/SP kernel models.
*/
#ifndef __ASM_MIPS_MT_H
diff --git a/arch/mips/include/asm/mipsregs.h b/arch/mips/include/asm/mipsregs.h
index 3ad19ad04d8a..def9d8d13f6e 100644
--- a/arch/mips/include/asm/mipsregs.h
+++ b/arch/mips/include/asm/mipsregs.h
@@ -48,15 +48,24 @@
#define CP0_CONF $3
#define CP0_CONTEXT $4
#define CP0_PAGEMASK $5
+#define CP0_SEGCTL0 $5, 2
+#define CP0_SEGCTL1 $5, 3
+#define CP0_SEGCTL2 $5, 4
#define CP0_WIRED $6
#define CP0_INFO $7
-#define CP0_HWRENA $7, 0
+#define CP0_HWRENA $7
#define CP0_BADVADDR $8
#define CP0_BADINSTR $8, 1
#define CP0_COUNT $9
#define CP0_ENTRYHI $10
+#define CP0_GUESTCTL1 $10, 4
+#define CP0_GUESTCTL2 $10, 5
+#define CP0_GUESTCTL3 $10, 6
#define CP0_COMPARE $11
+#define CP0_GUESTCTL0EXT $11, 4
#define CP0_STATUS $12
+#define CP0_GUESTCTL0 $12, 6
+#define CP0_GTOFFSET $12, 7
#define CP0_CAUSE $13
#define CP0_EPC $14
#define CP0_PRID $15
@@ -229,6 +238,8 @@
/* MIPS32/64 EntryHI bit definitions */
#define MIPS_ENTRYHI_EHINV (_ULCAST_(1) << 10)
+#define MIPS_ENTRYHI_ASIDX (_ULCAST_(0x3) << 8)
+#define MIPS_ENTRYHI_ASID (_ULCAST_(0xff) << 0)
/*
* R4x00 interrupt enable / cause bits
@@ -390,6 +401,8 @@
#define CAUSEF_IP7 (_ULCAST_(1) << 15)
#define CAUSEB_FDCI 21
#define CAUSEF_FDCI (_ULCAST_(1) << 21)
+#define CAUSEB_WP 22
+#define CAUSEF_WP (_ULCAST_(1) << 22)
#define CAUSEB_IV 23
#define CAUSEF_IV (_ULCAST_(1) << 23)
#define CAUSEB_PCI 26
@@ -520,6 +533,7 @@
#define TX49_CONF_CWFON (_ULCAST_(1) << 27)
/* Bits specific to the MIPS32/64 PRA. */
+#define MIPS_CONF_VI (_ULCAST_(1) << 3)
#define MIPS_CONF_MT (_ULCAST_(7) << 7)
#define MIPS_CONF_MT_TLB (_ULCAST_(1) << 7)
#define MIPS_CONF_MT_FTLB (_ULCAST_(4) << 7)
@@ -611,7 +625,8 @@
#define MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT (_ULCAST_(1) << 14)
#define MIPS_CONF4_MMUEXTDEF_FTLBSIZEEXT (_ULCAST_(2) << 14)
#define MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT (_ULCAST_(3) << 14)
-#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << 16)
+#define MIPS_CONF4_KSCREXIST_SHIFT (16)
+#define MIPS_CONF4_KSCREXIST (_ULCAST_(255) << MIPS_CONF4_KSCREXIST_SHIFT)
#define MIPS_CONF4_VTLBSIZEEXT_SHIFT (24)
#define MIPS_CONF4_VTLBSIZEEXT (_ULCAST_(15) << MIPS_CONF4_VTLBSIZEEXT_SHIFT)
#define MIPS_CONF4_AE (_ULCAST_(1) << 28)
@@ -623,6 +638,7 @@
#define MIPS_CONF5_MRP (_ULCAST_(1) << 3)
#define MIPS_CONF5_LLB (_ULCAST_(1) << 4)
#define MIPS_CONF5_MVH (_ULCAST_(1) << 5)
+#define MIPS_CONF5_VP (_ULCAST_(1) << 7)
#define MIPS_CONF5_FRE (_ULCAST_(1) << 8)
#define MIPS_CONF5_UFE (_ULCAST_(1) << 9)
#define MIPS_CONF5_MSAEN (_ULCAST_(1) << 27)
@@ -633,6 +649,8 @@
#define MIPS_CONF6_SYND (_ULCAST_(1) << 13)
/* proAptiv FTLB on/off bit */
#define MIPS_CONF6_FTLBEN (_ULCAST_(1) << 15)
+/* Loongson-3 FTLB on/off bit */
+#define MIPS_CONF6_FTLBDIS (_ULCAST_(1) << 22)
/* FTLB probability bits */
#define MIPS_CONF6_FTLBP_SHIFT (16)
@@ -645,12 +663,38 @@
/* FTLB probability bits for R6 */
#define MIPS_CONF7_FTLBP_SHIFT (18)
+/* WatchLo* register definitions */
+#define MIPS_WATCHLO_IRW (_ULCAST_(0x7) << 0)
+
+/* WatchHi* register definitions */
+#define MIPS_WATCHHI_M (_ULCAST_(1) << 31)
+#define MIPS_WATCHHI_G (_ULCAST_(1) << 30)
+#define MIPS_WATCHHI_WM (_ULCAST_(0x3) << 28)
+#define MIPS_WATCHHI_WM_R_RVA (_ULCAST_(0) << 28)
+#define MIPS_WATCHHI_WM_R_GPA (_ULCAST_(1) << 28)
+#define MIPS_WATCHHI_WM_G_GVA (_ULCAST_(2) << 28)
+#define MIPS_WATCHHI_EAS (_ULCAST_(0x3) << 24)
+#define MIPS_WATCHHI_ASID (_ULCAST_(0xff) << 16)
+#define MIPS_WATCHHI_MASK (_ULCAST_(0x1ff) << 3)
+#define MIPS_WATCHHI_I (_ULCAST_(1) << 2)
+#define MIPS_WATCHHI_R (_ULCAST_(1) << 1)
+#define MIPS_WATCHHI_W (_ULCAST_(1) << 0)
+#define MIPS_WATCHHI_IRW (_ULCAST_(0x7) << 0)
+
/* MAAR bit definitions */
#define MIPS_MAAR_ADDR ((BIT_ULL(BITS_PER_LONG - 12) - 1) << 12)
#define MIPS_MAAR_ADDR_SHIFT 12
#define MIPS_MAAR_S (_ULCAST_(1) << 1)
#define MIPS_MAAR_V (_ULCAST_(1) << 0)
+/* EBase bit definitions */
+#define MIPS_EBASE_CPUNUM_SHIFT 0
+#define MIPS_EBASE_CPUNUM (_ULCAST_(0x3ff) << 0)
+#define MIPS_EBASE_WG_SHIFT 11
+#define MIPS_EBASE_WG (_ULCAST_(1) << 11)
+#define MIPS_EBASE_BASE_SHIFT 12
+#define MIPS_EBASE_BASE (~_ULCAST_((1 << MIPS_EBASE_BASE_SHIFT) - 1))
+
/* CMGCRBase bit definitions */
#define MIPS_CMGCRB_BASE 11
#define MIPS_CMGCRF_BASE (~_ULCAST_((1 << MIPS_CMGCRB_BASE) - 1))
@@ -686,6 +730,8 @@
#define MIPS_PWFIELD_PTEI_SHIFT 0
#define MIPS_PWFIELD_PTEI_MASK 0x0000003f
+#define MIPS_PWSIZE_PS_SHIFT 30
+#define MIPS_PWSIZE_PS_MASK 0x40000000
#define MIPS_PWSIZE_GDW_SHIFT 24
#define MIPS_PWSIZE_GDW_MASK 0x3f000000
#define MIPS_PWSIZE_UDW_SHIFT 18
@@ -699,6 +745,12 @@
#define MIPS_PWCTL_PWEN_SHIFT 31
#define MIPS_PWCTL_PWEN_MASK 0x80000000
+#define MIPS_PWCTL_XK_SHIFT 28
+#define MIPS_PWCTL_XK_MASK 0x10000000
+#define MIPS_PWCTL_XS_SHIFT 27
+#define MIPS_PWCTL_XS_MASK 0x08000000
+#define MIPS_PWCTL_XU_SHIFT 26
+#define MIPS_PWCTL_XU_MASK 0x04000000
#define MIPS_PWCTL_DPH_SHIFT 7
#define MIPS_PWCTL_DPH_MASK 0x00000080
#define MIPS_PWCTL_HUGEPG_SHIFT 6
@@ -706,6 +758,94 @@
#define MIPS_PWCTL_PSN_SHIFT 0
#define MIPS_PWCTL_PSN_MASK 0x0000003f
+/* GuestCtl0 fields */
+#define MIPS_GCTL0_GM_SHIFT 31
+#define MIPS_GCTL0_GM (_ULCAST_(1) << MIPS_GCTL0_GM_SHIFT)
+#define MIPS_GCTL0_RI_SHIFT 30
+#define MIPS_GCTL0_RI (_ULCAST_(1) << MIPS_GCTL0_RI_SHIFT)
+#define MIPS_GCTL0_MC_SHIFT 29
+#define MIPS_GCTL0_MC (_ULCAST_(1) << MIPS_GCTL0_MC_SHIFT)
+#define MIPS_GCTL0_CP0_SHIFT 28
+#define MIPS_GCTL0_CP0 (_ULCAST_(1) << MIPS_GCTL0_CP0_SHIFT)
+#define MIPS_GCTL0_AT_SHIFT 26
+#define MIPS_GCTL0_AT (_ULCAST_(0x3) << MIPS_GCTL0_AT_SHIFT)
+#define MIPS_GCTL0_GT_SHIFT 25
+#define MIPS_GCTL0_GT (_ULCAST_(1) << MIPS_GCTL0_GT_SHIFT)
+#define MIPS_GCTL0_CG_SHIFT 24
+#define MIPS_GCTL0_CG (_ULCAST_(1) << MIPS_GCTL0_CG_SHIFT)
+#define MIPS_GCTL0_CF_SHIFT 23
+#define MIPS_GCTL0_CF (_ULCAST_(1) << MIPS_GCTL0_CF_SHIFT)
+#define MIPS_GCTL0_G1_SHIFT 22
+#define MIPS_GCTL0_G1 (_ULCAST_(1) << MIPS_GCTL0_G1_SHIFT)
+#define MIPS_GCTL0_G0E_SHIFT 19
+#define MIPS_GCTL0_G0E (_ULCAST_(1) << MIPS_GCTL0_G0E_SHIFT)
+#define MIPS_GCTL0_PT_SHIFT 18
+#define MIPS_GCTL0_PT (_ULCAST_(1) << MIPS_GCTL0_PT_SHIFT)
+#define MIPS_GCTL0_RAD_SHIFT 9
+#define MIPS_GCTL0_RAD (_ULCAST_(1) << MIPS_GCTL0_RAD_SHIFT)
+#define MIPS_GCTL0_DRG_SHIFT 8
+#define MIPS_GCTL0_DRG (_ULCAST_(1) << MIPS_GCTL0_DRG_SHIFT)
+#define MIPS_GCTL0_G2_SHIFT 7
+#define MIPS_GCTL0_G2 (_ULCAST_(1) << MIPS_GCTL0_G2_SHIFT)
+#define MIPS_GCTL0_GEXC_SHIFT 2
+#define MIPS_GCTL0_GEXC (_ULCAST_(0x1f) << MIPS_GCTL0_GEXC_SHIFT)
+#define MIPS_GCTL0_SFC2_SHIFT 1
+#define MIPS_GCTL0_SFC2 (_ULCAST_(1) << MIPS_GCTL0_SFC2_SHIFT)
+#define MIPS_GCTL0_SFC1_SHIFT 0
+#define MIPS_GCTL0_SFC1 (_ULCAST_(1) << MIPS_GCTL0_SFC1_SHIFT)
+
+/* GuestCtl0.AT Guest address translation control */
+#define MIPS_GCTL0_AT_ROOT 1 /* Guest MMU under Root control */
+#define MIPS_GCTL0_AT_GUEST 3 /* Guest MMU under Guest control */
+
+/* GuestCtl0.GExcCode Hypervisor exception cause codes */
+#define MIPS_GCTL0_GEXC_GPSI 0 /* Guest Privileged Sensitive Instruction */
+#define MIPS_GCTL0_GEXC_GSFC 1 /* Guest Software Field Change */
+#define MIPS_GCTL0_GEXC_HC 2 /* Hypercall */
+#define MIPS_GCTL0_GEXC_GRR 3 /* Guest Reserved Instruction Redirect */
+#define MIPS_GCTL0_GEXC_GVA 8 /* Guest Virtual Address available */
+#define MIPS_GCTL0_GEXC_GHFC 9 /* Guest Hardware Field Change */
+#define MIPS_GCTL0_GEXC_GPA 10 /* Guest Physical Address available */
+
+/* GuestCtl0Ext fields */
+#define MIPS_GCTL0EXT_RPW_SHIFT 8
+#define MIPS_GCTL0EXT_RPW (_ULCAST_(0x3) << MIPS_GCTL0EXT_RPW_SHIFT)
+#define MIPS_GCTL0EXT_NCC_SHIFT 6
+#define MIPS_GCTL0EXT_NCC (_ULCAST_(0x3) << MIPS_GCTL0EXT_NCC_SHIFT)
+#define MIPS_GCTL0EXT_CGI_SHIFT 4
+#define MIPS_GCTL0EXT_CGI (_ULCAST_(1) << MIPS_GCTL0EXT_CGI_SHIFT)
+#define MIPS_GCTL0EXT_FCD_SHIFT 3
+#define MIPS_GCTL0EXT_FCD (_ULCAST_(1) << MIPS_GCTL0EXT_FCD_SHIFT)
+#define MIPS_GCTL0EXT_OG_SHIFT 2
+#define MIPS_GCTL0EXT_OG (_ULCAST_(1) << MIPS_GCTL0EXT_OG_SHIFT)
+#define MIPS_GCTL0EXT_BG_SHIFT 1
+#define MIPS_GCTL0EXT_BG (_ULCAST_(1) << MIPS_GCTL0EXT_BG_SHIFT)
+#define MIPS_GCTL0EXT_MG_SHIFT 0
+#define MIPS_GCTL0EXT_MG (_ULCAST_(1) << MIPS_GCTL0EXT_MG_SHIFT)
+
+/* GuestCtl0Ext.RPW Root page walk configuration */
+#define MIPS_GCTL0EXT_RPW_BOTH 0 /* Root PW for GPA->RPA and RVA->RPA */
+#define MIPS_GCTL0EXT_RPW_GPA 2 /* Root PW for GPA->RPA */
+#define MIPS_GCTL0EXT_RPW_RVA 3 /* Root PW for RVA->RPA */
+
+/* GuestCtl0Ext.NCC Nested cache coherency attributes */
+#define MIPS_GCTL0EXT_NCC_IND 0 /* Guest CCA independent of Root CCA */
+#define MIPS_GCTL0EXT_NCC_MOD 1 /* Guest CCA modified by Root CCA */
+
+/* GuestCtl1 fields */
+#define MIPS_GCTL1_ID_SHIFT 0
+#define MIPS_GCTL1_ID_WIDTH 8
+#define MIPS_GCTL1_ID (_ULCAST_(0xff) << MIPS_GCTL1_ID_SHIFT)
+#define MIPS_GCTL1_RID_SHIFT 16
+#define MIPS_GCTL1_RID_WIDTH 8
+#define MIPS_GCTL1_RID (_ULCAST_(0xff) << MIPS_GCTL1_RID_SHIFT)
+#define MIPS_GCTL1_EID_SHIFT 24
+#define MIPS_GCTL1_EID_WIDTH 8
+#define MIPS_GCTL1_EID (_ULCAST_(0xff) << MIPS_GCTL1_EID_SHIFT)
+
+/* GuestID reserved for root context */
+#define MIPS_GCTL1_ROOT_GUESTID 0
+
/* CDMMBase register bit definitions */
#define MIPS_CDMMBASE_SIZE_SHIFT 0
#define MIPS_CDMMBASE_SIZE (_ULCAST_(511) << MIPS_CDMMBASE_SIZE_SHIFT)
@@ -714,6 +854,24 @@
#define MIPS_CDMMBASE_ADDR_SHIFT 11
#define MIPS_CDMMBASE_ADDR_START 15
+/* RDHWR register numbers */
+#define MIPS_HWR_CPUNUM 0 /* CPU number */
+#define MIPS_HWR_SYNCISTEP 1 /* SYNCI step size */
+#define MIPS_HWR_CC 2 /* Cycle counter */
+#define MIPS_HWR_CCRES 3 /* Cycle counter resolution */
+#define MIPS_HWR_ULR 29 /* UserLocal */
+#define MIPS_HWR_IMPL1 30 /* Implementation dependent */
+#define MIPS_HWR_IMPL2 31 /* Implementation dependent */
+
+/* Bits in HWREna register */
+#define MIPS_HWRENA_CPUNUM (_ULCAST_(1) << MIPS_HWR_CPUNUM)
+#define MIPS_HWRENA_SYNCISTEP (_ULCAST_(1) << MIPS_HWR_SYNCISTEP)
+#define MIPS_HWRENA_CC (_ULCAST_(1) << MIPS_HWR_CC)
+#define MIPS_HWRENA_CCRES (_ULCAST_(1) << MIPS_HWR_CCRES)
+#define MIPS_HWRENA_ULR (_ULCAST_(1) << MIPS_HWR_ULR)
+#define MIPS_HWRENA_IMPL1 (_ULCAST_(1) << MIPS_HWR_IMPL1)
+#define MIPS_HWRENA_IMPL2 (_ULCAST_(1) << MIPS_HWR_IMPL2)
+
/*
* Bitfields in the TX39 family CP0 Configuration Register 3
*/
@@ -757,6 +915,15 @@
/* Disable Branch Return Cache */
#define R10K_DIAG_D_BRC (_ULCAST_(1) << 22)
+/* Flush ITLB */
+#define LOONGSON_DIAG_ITLB (_ULCAST_(1) << 2)
+/* Flush DTLB */
+#define LOONGSON_DIAG_DTLB (_ULCAST_(1) << 3)
+/* Flush VTLB */
+#define LOONGSON_DIAG_VTLB (_ULCAST_(1) << 12)
+/* Flush FTLB */
+#define LOONGSON_DIAG_FTLB (_ULCAST_(1) << 13)
+
/*
* Coprocessor 1 (FPU) register names
*/
@@ -909,6 +1076,33 @@ static inline int mm_insn_16bit(u16 insn)
}
/*
+ * Helper macros for generating raw instruction encodings in inline asm.
+ */
+#ifdef CONFIG_CPU_MICROMIPS
+#define _ASM_INSN16_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword (" #_enc ")\n\t"
+#define _ASM_INSN32_IF_MM(_enc) \
+ ".insn\n\t" \
+ ".hword ((" #_enc ") >> 16)\n\t" \
+ ".hword ((" #_enc ") & 0xffff)\n\t"
+#else
+#define _ASM_INSN_IF_MIPS(_enc) \
+ ".insn\n\t" \
+ ".word (" #_enc ")\n\t"
+#endif
+
+#ifndef _ASM_INSN16_IF_MM
+#define _ASM_INSN16_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN32_IF_MM
+#define _ASM_INSN32_IF_MM(_enc)
+#endif
+#ifndef _ASM_INSN_IF_MIPS
+#define _ASM_INSN_IF_MIPS(_enc)
+#endif
+
+/*
* TLB Invalidate Flush
*/
static inline void tlbinvf(void)
@@ -916,7 +1110,9 @@ static inline void tlbinvf(void)
__asm__ __volatile__(
".set push\n\t"
".set noreorder\n\t"
- ".word 0x42000004\n\t" /* tlbinvf */
+ "# tlbinvf\n\t"
+ _ASM_INSN_IF_MIPS(0x42000004)
+ _ASM_INSN32_IF_MM(0x0000537c)
".set pop");
}
@@ -1137,9 +1333,9 @@ do { \
" .set push \n" \
" .set noat \n" \
" .set mips32r2 \n" \
- " .insn \n" \
" # mfhc0 $1, %1 \n" \
- " .word (0x40410000 | ((%1 & 0x1f) << 11)) \n" \
+ _ASM_INSN_IF_MIPS(0x40410000 | ((%1 & 0x1f) << 11)) \
+ _ASM_INSN32_IF_MM(0x002000f4 | ((%1 & 0x1f) << 16)) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res) \
@@ -1155,8 +1351,8 @@ do { \
" .set mips32r2 \n" \
" move $1, %0 \n" \
" # mthc0 $1, %1 \n" \
- " .insn \n" \
- " .word (0x40c10000 | ((%1 & 0x1f) << 11)) \n" \
+ _ASM_INSN_IF_MIPS(0x40c10000 | ((%1 & 0x1f) << 11)) \
+ _ASM_INSN32_IF_MM(0x002002f4 | ((%1 & 0x1f) << 16)) \
" .set pop \n" \
: \
: "r" (value), "i" (register)); \
@@ -1186,9 +1382,15 @@ do { \
#define read_c0_context() __read_ulong_c0_register($4, 0)
#define write_c0_context(val) __write_ulong_c0_register($4, 0, val)
+#define read_c0_contextconfig() __read_32bit_c0_register($4, 1)
+#define write_c0_contextconfig(val) __write_32bit_c0_register($4, 1, val)
+
#define read_c0_userlocal() __read_ulong_c0_register($4, 2)
#define write_c0_userlocal(val) __write_ulong_c0_register($4, 2, val)
+#define read_c0_xcontextconfig() __read_ulong_c0_register($4, 3)
+#define write_c0_xcontextconfig(val) __write_ulong_c0_register($4, 3, val)
+
#define read_c0_pagemask() __read_32bit_c0_register($5, 0)
#define write_c0_pagemask(val) __write_32bit_c0_register($5, 0, val)
@@ -1206,6 +1408,9 @@ do { \
#define read_c0_badvaddr() __read_ulong_c0_register($8, 0)
#define write_c0_badvaddr(val) __write_ulong_c0_register($8, 0, val)
+#define read_c0_badinstr() __read_32bit_c0_register($8, 1)
+#define read_c0_badinstrp() __read_32bit_c0_register($8, 2)
+
#define read_c0_count() __read_32bit_c0_register($9, 0)
#define write_c0_count(val) __write_32bit_c0_register($9, 0, val)
@@ -1218,9 +1423,21 @@ do { \
#define read_c0_entryhi() __read_ulong_c0_register($10, 0)
#define write_c0_entryhi(val) __write_ulong_c0_register($10, 0, val)
+#define read_c0_guestctl1() __read_32bit_c0_register($10, 4)
+#define write_c0_guestctl1(val) __write_32bit_c0_register($10, 4, val)
+
+#define read_c0_guestctl2() __read_32bit_c0_register($10, 5)
+#define write_c0_guestctl2(val) __write_32bit_c0_register($10, 5, val)
+
+#define read_c0_guestctl3() __read_32bit_c0_register($10, 6)
+#define write_c0_guestctl3(val) __write_32bit_c0_register($10, 6, val)
+
#define read_c0_compare() __read_32bit_c0_register($11, 0)
#define write_c0_compare(val) __write_32bit_c0_register($11, 0, val)
+#define read_c0_guestctl0ext() __read_32bit_c0_register($11, 4)
+#define write_c0_guestctl0ext(val) __write_32bit_c0_register($11, 4, val)
+
#define read_c0_compare2() __read_32bit_c0_register($11, 6) /* pnx8550 */
#define write_c0_compare2(val) __write_32bit_c0_register($11, 6, val)
@@ -1231,6 +1448,12 @@ do { \
#define write_c0_status(val) __write_32bit_c0_register($12, 0, val)
+#define read_c0_guestctl0() __read_32bit_c0_register($12, 6)
+#define write_c0_guestctl0(val) __write_32bit_c0_register($12, 6, val)
+
+#define read_c0_gtoffset() __read_32bit_c0_register($12, 7)
+#define write_c0_gtoffset(val) __write_32bit_c0_register($12, 7, val)
+
#define read_c0_cause() __read_32bit_c0_register($13, 0)
#define write_c0_cause(val) __write_32bit_c0_register($13, 0, val)
@@ -1416,6 +1639,9 @@ do { \
#define read_c0_ebase() __read_32bit_c0_register($15, 1)
#define write_c0_ebase(val) __write_32bit_c0_register($15, 1, val)
+#define read_c0_ebase_64() __read_64bit_c0_register($15, 1)
+#define write_c0_ebase_64(val) __write_64bit_c0_register($15, 1, val)
+
#define read_c0_cdmmbase() __read_ulong_c0_register($15, 2)
#define write_c0_cdmmbase(val) __write_ulong_c0_register($15, 2, val)
@@ -1442,6 +1668,12 @@ do { \
#define read_c0_pwctl() __read_32bit_c0_register($6, 6)
#define write_c0_pwctl(val) __write_32bit_c0_register($6, 6, val)
+#define read_c0_pgd() __read_64bit_c0_register($9, 7)
+#define write_c0_pgd(val) __write_64bit_c0_register($9, 7, val)
+
+#define read_c0_kpgd() __read_64bit_c0_register($31, 7)
+#define write_c0_kpgd(val) __write_64bit_c0_register($31, 7, val)
+
/* Cavium OCTEON (cnMIPS) */
#define read_c0_cvmcount() __read_ulong_c0_register($9, 6)
#define write_c0_cvmcount(val) __write_ulong_c0_register($9, 6, val)
@@ -1507,6 +1739,321 @@ do { \
#define write_c0_brcm_sleepcount(val) __write_32bit_c0_register($22, 7, val)
/*
+ * Macros to access the guest system control coprocessor
+ */
+
+#ifdef TOOLCHAIN_SUPPORTS_VIRT
+
+#define __read_32bit_gc0_register(source, sel) \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r2\n\t" \
+ ".set\tvirt\n\t" \
+ "mfgc0\t%0, $%1, %2\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (source), "i" (sel)); \
+ __res; \
+})
+
+#define __read_64bit_gc0_register(source, sel) \
+({ unsigned long long __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r2\n\t" \
+ ".set\tvirt\n\t" \
+ "dmfgc0\t%0, $%1, %2\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (source), "i" (sel)); \
+ __res; \
+})
+
+#define __write_32bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips32r2\n\t" \
+ ".set\tvirt\n\t" \
+ "mtgc0\t%z0, $%1, %2\n\t" \
+ ".set\tpop" \
+ : : "Jr" ((unsigned int)(value)), \
+ "i" (register), "i" (sel)); \
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tmips64r2\n\t" \
+ ".set\tvirt\n\t" \
+ "dmtgc0\t%z0, $%1, %2\n\t" \
+ ".set\tpop" \
+ : : "Jr" (value), \
+ "i" (register), "i" (sel)); \
+} while (0)
+
+#else /* TOOLCHAIN_SUPPORTS_VIRT */
+
+#define __read_32bit_gc0_register(source, sel) \
+({ int __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "# mfgc0\t$1, $%1, %2\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610000 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x002004fc | %1 << 16 | %2 << 11) \
+ "move\t%0, $1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (source), "i" (sel)); \
+ __res; \
+})
+
+#define __read_64bit_gc0_register(source, sel) \
+({ unsigned long long __res; \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "# dmfgc0\t$1, $%1, %2\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610100 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x582004fc | %1 << 16 | %2 << 11) \
+ "move\t%0, $1\n\t" \
+ ".set\tpop" \
+ : "=r" (__res) \
+ : "i" (source), "i" (sel)); \
+ __res; \
+})
+
+#define __write_32bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "move\t$1, %z0\n\t" \
+ "# mtgc0\t$1, $%1, %2\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610200 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x002006fc | %1 << 16 | %2 << 11) \
+ ".set\tpop" \
+ : : "Jr" ((unsigned int)(value)), \
+ "i" (register), "i" (sel)); \
+} while (0)
+
+#define __write_64bit_gc0_register(register, sel, value) \
+do { \
+ __asm__ __volatile__( \
+ ".set\tpush\n\t" \
+ ".set\tnoat\n\t" \
+ "move\t$1, %z0\n\t" \
+ "# dmtgc0\t$1, $%1, %2\n\t" \
+ _ASM_INSN_IF_MIPS(0x40610300 | %1 << 11 | %2) \
+ _ASM_INSN32_IF_MM(0x582006fc | %1 << 16 | %2 << 11) \
+ ".set\tpop" \
+ : : "Jr" (value), \
+ "i" (register), "i" (sel)); \
+} while (0)
+
+#endif /* !TOOLCHAIN_SUPPORTS_VIRT */
+
+#define __read_ulong_gc0_register(reg, sel) \
+ ((sizeof(unsigned long) == 4) ? \
+ (unsigned long) __read_32bit_gc0_register(reg, sel) : \
+ (unsigned long) __read_64bit_gc0_register(reg, sel))
+
+#define __write_ulong_gc0_register(reg, sel, val) \
+do { \
+ if (sizeof(unsigned long) == 4) \
+ __write_32bit_gc0_register(reg, sel, val); \
+ else \
+ __write_64bit_gc0_register(reg, sel, val); \
+} while (0)
+
+#define read_gc0_index() __read_32bit_gc0_register(0, 0)
+#define write_gc0_index(val) __write_32bit_gc0_register(0, 0, val)
+
+#define read_gc0_entrylo0() __read_ulong_gc0_register(2, 0)
+#define write_gc0_entrylo0(val) __write_ulong_gc0_register(2, 0, val)
+
+#define read_gc0_entrylo1() __read_ulong_gc0_register(3, 0)
+#define write_gc0_entrylo1(val) __write_ulong_gc0_register(3, 0, val)
+
+#define read_gc0_context() __read_ulong_gc0_register(4, 0)
+#define write_gc0_context(val) __write_ulong_gc0_register(4, 0, val)
+
+#define read_gc0_contextconfig() __read_32bit_gc0_register(4, 1)
+#define write_gc0_contextconfig(val) __write_32bit_gc0_register(4, 1, val)
+
+#define read_gc0_userlocal() __read_ulong_gc0_register(4, 2)
+#define write_gc0_userlocal(val) __write_ulong_gc0_register(4, 2, val)
+
+#define read_gc0_xcontextconfig() __read_ulong_gc0_register(4, 3)
+#define write_gc0_xcontextconfig(val) __write_ulong_gc0_register(4, 3, val)
+
+#define read_gc0_pagemask() __read_32bit_gc0_register(5, 0)
+#define write_gc0_pagemask(val) __write_32bit_gc0_register(5, 0, val)
+
+#define read_gc0_pagegrain() __read_32bit_gc0_register(5, 1)
+#define write_gc0_pagegrain(val) __write_32bit_gc0_register(5, 1, val)
+
+#define read_gc0_segctl0() __read_ulong_gc0_register(5, 2)
+#define write_gc0_segctl0(val) __write_ulong_gc0_register(5, 2, val)
+
+#define read_gc0_segctl1() __read_ulong_gc0_register(5, 3)
+#define write_gc0_segctl1(val) __write_ulong_gc0_register(5, 3, val)
+
+#define read_gc0_segctl2() __read_ulong_gc0_register(5, 4)
+#define write_gc0_segctl2(val) __write_ulong_gc0_register(5, 4, val)
+
+#define read_gc0_pwbase() __read_ulong_gc0_register(5, 5)
+#define write_gc0_pwbase(val) __write_ulong_gc0_register(5, 5, val)
+
+#define read_gc0_pwfield() __read_ulong_gc0_register(5, 6)
+#define write_gc0_pwfield(val) __write_ulong_gc0_register(5, 6, val)
+
+#define read_gc0_pwsize() __read_ulong_gc0_register(5, 7)
+#define write_gc0_pwsize(val) __write_ulong_gc0_register(5, 7, val)
+
+#define read_gc0_wired() __read_32bit_gc0_register(6, 0)
+#define write_gc0_wired(val) __write_32bit_gc0_register(6, 0, val)
+
+#define read_gc0_pwctl() __read_32bit_gc0_register(6, 6)
+#define write_gc0_pwctl(val) __write_32bit_gc0_register(6, 6, val)
+
+#define read_gc0_hwrena() __read_32bit_gc0_register(7, 0)
+#define write_gc0_hwrena(val) __write_32bit_gc0_register(7, 0, val)
+
+#define read_gc0_badvaddr() __read_ulong_gc0_register(8, 0)
+#define write_gc0_badvaddr(val) __write_ulong_gc0_register(8, 0, val)
+
+#define read_gc0_badinstr() __read_32bit_gc0_register(8, 1)
+#define write_gc0_badinstr(val) __write_32bit_gc0_register(8, 1, val)
+
+#define read_gc0_badinstrp() __read_32bit_gc0_register(8, 2)
+#define write_gc0_badinstrp(val) __write_32bit_gc0_register(8, 2, val)
+
+#define read_gc0_count() __read_32bit_gc0_register(9, 0)
+
+#define read_gc0_entryhi() __read_ulong_gc0_register(10, 0)
+#define write_gc0_entryhi(val) __write_ulong_gc0_register(10, 0, val)
+
+#define read_gc0_compare() __read_32bit_gc0_register(11, 0)
+#define write_gc0_compare(val) __write_32bit_gc0_register(11, 0, val)
+
+#define read_gc0_status() __read_32bit_gc0_register(12, 0)
+#define write_gc0_status(val) __write_32bit_gc0_register(12, 0, val)
+
+#define read_gc0_intctl() __read_32bit_gc0_register(12, 1)
+#define write_gc0_intctl(val) __write_32bit_gc0_register(12, 1, val)
+
+#define read_gc0_cause() __read_32bit_gc0_register(13, 0)
+#define write_gc0_cause(val) __write_32bit_gc0_register(13, 0, val)
+
+#define read_gc0_epc() __read_ulong_gc0_register(14, 0)
+#define write_gc0_epc(val) __write_ulong_gc0_register(14, 0, val)
+
+#define read_gc0_ebase() __read_32bit_gc0_register(15, 1)
+#define write_gc0_ebase(val) __write_32bit_gc0_register(15, 1, val)
+
+#define read_gc0_ebase_64() __read_64bit_gc0_register(15, 1)
+#define write_gc0_ebase_64(val) __write_64bit_gc0_register(15, 1, val)
+
+#define read_gc0_config() __read_32bit_gc0_register(16, 0)
+#define read_gc0_config1() __read_32bit_gc0_register(16, 1)
+#define read_gc0_config2() __read_32bit_gc0_register(16, 2)
+#define read_gc0_config3() __read_32bit_gc0_register(16, 3)
+#define read_gc0_config4() __read_32bit_gc0_register(16, 4)
+#define read_gc0_config5() __read_32bit_gc0_register(16, 5)
+#define read_gc0_config6() __read_32bit_gc0_register(16, 6)
+#define read_gc0_config7() __read_32bit_gc0_register(16, 7)
+#define write_gc0_config(val) __write_32bit_gc0_register(16, 0, val)
+#define write_gc0_config1(val) __write_32bit_gc0_register(16, 1, val)
+#define write_gc0_config2(val) __write_32bit_gc0_register(16, 2, val)
+#define write_gc0_config3(val) __write_32bit_gc0_register(16, 3, val)
+#define write_gc0_config4(val) __write_32bit_gc0_register(16, 4, val)
+#define write_gc0_config5(val) __write_32bit_gc0_register(16, 5, val)
+#define write_gc0_config6(val) __write_32bit_gc0_register(16, 6, val)
+#define write_gc0_config7(val) __write_32bit_gc0_register(16, 7, val)
+
+#define read_gc0_watchlo0() __read_ulong_gc0_register(18, 0)
+#define read_gc0_watchlo1() __read_ulong_gc0_register(18, 1)
+#define read_gc0_watchlo2() __read_ulong_gc0_register(18, 2)
+#define read_gc0_watchlo3() __read_ulong_gc0_register(18, 3)
+#define read_gc0_watchlo4() __read_ulong_gc0_register(18, 4)
+#define read_gc0_watchlo5() __read_ulong_gc0_register(18, 5)
+#define read_gc0_watchlo6() __read_ulong_gc0_register(18, 6)
+#define read_gc0_watchlo7() __read_ulong_gc0_register(18, 7)
+#define write_gc0_watchlo0(val) __write_ulong_gc0_register(18, 0, val)
+#define write_gc0_watchlo1(val) __write_ulong_gc0_register(18, 1, val)
+#define write_gc0_watchlo2(val) __write_ulong_gc0_register(18, 2, val)
+#define write_gc0_watchlo3(val) __write_ulong_gc0_register(18, 3, val)
+#define write_gc0_watchlo4(val) __write_ulong_gc0_register(18, 4, val)
+#define write_gc0_watchlo5(val) __write_ulong_gc0_register(18, 5, val)
+#define write_gc0_watchlo6(val) __write_ulong_gc0_register(18, 6, val)
+#define write_gc0_watchlo7(val) __write_ulong_gc0_register(18, 7, val)
+
+#define read_gc0_watchhi0() __read_32bit_gc0_register(19, 0)
+#define read_gc0_watchhi1() __read_32bit_gc0_register(19, 1)
+#define read_gc0_watchhi2() __read_32bit_gc0_register(19, 2)
+#define read_gc0_watchhi3() __read_32bit_gc0_register(19, 3)
+#define read_gc0_watchhi4() __read_32bit_gc0_register(19, 4)
+#define read_gc0_watchhi5() __read_32bit_gc0_register(19, 5)
+#define read_gc0_watchhi6() __read_32bit_gc0_register(19, 6)
+#define read_gc0_watchhi7() __read_32bit_gc0_register(19, 7)
+#define write_gc0_watchhi0(val) __write_32bit_gc0_register(19, 0, val)
+#define write_gc0_watchhi1(val) __write_32bit_gc0_register(19, 1, val)
+#define write_gc0_watchhi2(val) __write_32bit_gc0_register(19, 2, val)
+#define write_gc0_watchhi3(val) __write_32bit_gc0_register(19, 3, val)
+#define write_gc0_watchhi4(val) __write_32bit_gc0_register(19, 4, val)
+#define write_gc0_watchhi5(val) __write_32bit_gc0_register(19, 5, val)
+#define write_gc0_watchhi6(val) __write_32bit_gc0_register(19, 6, val)
+#define write_gc0_watchhi7(val) __write_32bit_gc0_register(19, 7, val)
+
+#define read_gc0_xcontext() __read_ulong_gc0_register(20, 0)
+#define write_gc0_xcontext(val) __write_ulong_gc0_register(20, 0, val)
+
+#define read_gc0_perfctrl0() __read_32bit_gc0_register(25, 0)
+#define write_gc0_perfctrl0(val) __write_32bit_gc0_register(25, 0, val)
+#define read_gc0_perfcntr0() __read_32bit_gc0_register(25, 1)
+#define write_gc0_perfcntr0(val) __write_32bit_gc0_register(25, 1, val)
+#define read_gc0_perfcntr0_64() __read_64bit_gc0_register(25, 1)
+#define write_gc0_perfcntr0_64(val) __write_64bit_gc0_register(25, 1, val)
+#define read_gc0_perfctrl1() __read_32bit_gc0_register(25, 2)
+#define write_gc0_perfctrl1(val) __write_32bit_gc0_register(25, 2, val)
+#define read_gc0_perfcntr1() __read_32bit_gc0_register(25, 3)
+#define write_gc0_perfcntr1(val) __write_32bit_gc0_register(25, 3, val)
+#define read_gc0_perfcntr1_64() __read_64bit_gc0_register(25, 3)
+#define write_gc0_perfcntr1_64(val) __write_64bit_gc0_register(25, 3, val)
+#define read_gc0_perfctrl2() __read_32bit_gc0_register(25, 4)
+#define write_gc0_perfctrl2(val) __write_32bit_gc0_register(25, 4, val)
+#define read_gc0_perfcntr2() __read_32bit_gc0_register(25, 5)
+#define write_gc0_perfcntr2(val) __write_32bit_gc0_register(25, 5, val)
+#define read_gc0_perfcntr2_64() __read_64bit_gc0_register(25, 5)
+#define write_gc0_perfcntr2_64(val) __write_64bit_gc0_register(25, 5, val)
+#define read_gc0_perfctrl3() __read_32bit_gc0_register(25, 6)
+#define write_gc0_perfctrl3(val) __write_32bit_gc0_register(25, 6, val)
+#define read_gc0_perfcntr3() __read_32bit_gc0_register(25, 7)
+#define write_gc0_perfcntr3(val) __write_32bit_gc0_register(25, 7, val)
+#define read_gc0_perfcntr3_64() __read_64bit_gc0_register(25, 7)
+#define write_gc0_perfcntr3_64(val) __write_64bit_gc0_register(25, 7, val)
+
+#define read_gc0_errorepc() __read_ulong_gc0_register(30, 0)
+#define write_gc0_errorepc(val) __write_ulong_gc0_register(30, 0, val)
+
+#define read_gc0_kscratch1() __read_ulong_gc0_register(31, 2)
+#define read_gc0_kscratch2() __read_ulong_gc0_register(31, 3)
+#define read_gc0_kscratch3() __read_ulong_gc0_register(31, 4)
+#define read_gc0_kscratch4() __read_ulong_gc0_register(31, 5)
+#define read_gc0_kscratch5() __read_ulong_gc0_register(31, 6)
+#define read_gc0_kscratch6() __read_ulong_gc0_register(31, 7)
+#define write_gc0_kscratch1(val) __write_ulong_gc0_register(31, 2, val)
+#define write_gc0_kscratch2(val) __write_ulong_gc0_register(31, 3, val)
+#define write_gc0_kscratch3(val) __write_ulong_gc0_register(31, 4, val)
+#define write_gc0_kscratch4(val) __write_ulong_gc0_register(31, 5, val)
+#define write_gc0_kscratch5(val) __write_ulong_gc0_register(31, 6, val)
+#define write_gc0_kscratch6(val) __write_ulong_gc0_register(31, 7, val)
+
+/*
* Macros to access the floating point coprocessor control registers
*/
#define _read_32bit_cp1_register(source, gas_hardfloat) \
@@ -1762,7 +2309,6 @@ do { \
#else
-#ifdef CONFIG_CPU_MICROMIPS
#define rddsp(mask) \
({ \
unsigned int __res; \
@@ -1771,8 +2317,8 @@ do { \
" .set push \n" \
" .set noat \n" \
" # rddsp $1, %x1 \n" \
- " .hword ((0x0020067c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020067c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c000cb8 | (%x1 << 16)) \
+ _ASM_INSN32_IF_MM(0x0020067c | (%x1 << 14)) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__res) \
@@ -1787,22 +2333,22 @@ do { \
" .set noat \n" \
" move $1, %0 \n" \
" # wrdsp $1, %x1 \n" \
- " .hword ((0x0020167c | (%x1 << 14)) >> 16) \n" \
- " .hword ((0x0020167c | (%x1 << 14)) & 0xffff) \n" \
+ _ASM_INSN_IF_MIPS(0x7c2004f8 | (%x1 << 11)) \
+ _ASM_INSN32_IF_MM(0x0020167c | (%x1 << 14)) \
" .set pop \n" \
: \
: "r" (val), "i" (mask)); \
} while (0)
-#define _umips_dsp_mfxxx(ins) \
+#define _dsp_mfxxx(ins) \
({ \
unsigned long __treg; \
\
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00000810 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001007c | %x1) \
" move %0, $1 \n" \
" .set pop \n" \
: "=r" (__treg) \
@@ -1810,101 +2356,28 @@ do { \
__treg; \
})
-#define _umips_dsp_mtxxx(val, ins) \
+#define _dsp_mtxxx(val, ins) \
do { \
__asm__ __volatile__( \
" .set push \n" \
" .set noat \n" \
" move $1, %0 \n" \
- " .hword 0x0001 \n" \
- " .hword %x1 \n" \
+ _ASM_INSN_IF_MIPS(0x00200011 | %X1) \
+ _ASM_INSN32_IF_MM(0x0001207c | %x1) \
" .set pop \n" \
: \
: "r" (val), "i" (ins)); \
} while (0)
-#define _umips_dsp_mflo(reg) _umips_dsp_mfxxx((reg << 14) | 0x107c)
-#define _umips_dsp_mfhi(reg) _umips_dsp_mfxxx((reg << 14) | 0x007c)
-
-#define _umips_dsp_mtlo(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x307c))
-#define _umips_dsp_mthi(val, reg) _umips_dsp_mtxxx(val, ((reg << 14) | 0x207c))
-
-#define mflo0() _umips_dsp_mflo(0)
-#define mflo1() _umips_dsp_mflo(1)
-#define mflo2() _umips_dsp_mflo(2)
-#define mflo3() _umips_dsp_mflo(3)
-
-#define mfhi0() _umips_dsp_mfhi(0)
-#define mfhi1() _umips_dsp_mfhi(1)
-#define mfhi2() _umips_dsp_mfhi(2)
-#define mfhi3() _umips_dsp_mfhi(3)
+#ifdef CONFIG_CPU_MICROMIPS
-#define mtlo0(x) _umips_dsp_mtlo(x, 0)
-#define mtlo1(x) _umips_dsp_mtlo(x, 1)
-#define mtlo2(x) _umips_dsp_mtlo(x, 2)
-#define mtlo3(x) _umips_dsp_mtlo(x, 3)
+#define _dsp_mflo(reg) _dsp_mfxxx((reg << 14) | 0x1000)
+#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 14) | 0x0000)
-#define mthi0(x) _umips_dsp_mthi(x, 0)
-#define mthi1(x) _umips_dsp_mthi(x, 1)
-#define mthi2(x) _umips_dsp_mthi(x, 2)
-#define mthi3(x) _umips_dsp_mthi(x, 3)
+#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x1000))
+#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 14) | 0x0000))
#else /* !CONFIG_CPU_MICROMIPS */
-#define rddsp(mask) \
-({ \
- unsigned int __res; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " # rddsp $1, %x1 \n" \
- " .word 0x7c000cb8 | (%x1 << 16) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__res) \
- : "i" (mask)); \
- __res; \
-})
-
-#define wrdsp(val, mask) \
-do { \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " # wrdsp $1, %x1 \n" \
- " .word 0x7c2004f8 | (%x1 << 11) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (mask)); \
-} while (0)
-
-#define _dsp_mfxxx(ins) \
-({ \
- unsigned long __treg; \
- \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " .word (0x00000810 | %1) \n" \
- " move %0, $1 \n" \
- " .set pop \n" \
- : "=r" (__treg) \
- : "i" (ins)); \
- __treg; \
-})
-
-#define _dsp_mtxxx(val, ins) \
-do { \
- __asm__ __volatile__( \
- " .set push \n" \
- " .set noat \n" \
- " move $1, %0 \n" \
- " .word (0x00200011 | %1) \n" \
- " .set pop \n" \
- : \
- : "r" (val), "i" (ins)); \
-} while (0)
#define _dsp_mflo(reg) _dsp_mfxxx((reg << 21) | 0x0002)
#define _dsp_mfhi(reg) _dsp_mfxxx((reg << 21) | 0x0000)
@@ -1912,6 +2385,8 @@ do { \
#define _dsp_mtlo(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0002))
#define _dsp_mthi(val, reg) _dsp_mtxxx(val, ((reg << 11) | 0x0000))
+#endif /* CONFIG_CPU_MICROMIPS */
+
#define mflo0() _dsp_mflo(0)
#define mflo1() _dsp_mflo(1)
#define mflo2() _dsp_mflo(2)
@@ -1932,7 +2407,6 @@ do { \
#define mthi2(x) _dsp_mthi(x, 2)
#define mthi3(x) _dsp_mthi(x, 3)
-#endif /* CONFIG_CPU_MICROMIPS */
#endif
/*
@@ -2001,47 +2475,164 @@ static inline void tlb_write_random(void)
".set reorder");
}
+#ifdef TOOLCHAIN_SUPPORTS_VIRT
+
/*
- * Manipulate bits in a c0 register.
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
+ */
+static inline void guest_tlb_probe(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set virt\n\t"
+ "tlbgp\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_read(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set virt\n\t"
+ "tlbgr\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set virt\n\t"
+ "tlbgwi\n\t"
+ ".set pop");
+}
+
+static inline void guest_tlb_write_random(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set virt\n\t"
+ "tlbgwr\n\t"
+ ".set pop");
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+ __asm__ __volatile__(
+ ".set push\n\t"
+ ".set noreorder\n\t"
+ ".set virt\n\t"
+ "tlbginvf\n\t"
+ ".set pop");
+}
+
+#else /* TOOLCHAIN_SUPPORTS_VIRT */
+
+/*
+ * Guest TLB operations.
+ *
+ * It is responsibility of the caller to take care of any TLB hazards.
*/
-#define __BUILD_SET_C0(name) \
+static inline void guest_tlb_probe(void)
+{
+ __asm__ __volatile__(
+ "# tlbgp\n\t"
+ _ASM_INSN_IF_MIPS(0x42000010)
+ _ASM_INSN32_IF_MM(0x0000017c));
+}
+
+static inline void guest_tlb_read(void)
+{
+ __asm__ __volatile__(
+ "# tlbgr\n\t"
+ _ASM_INSN_IF_MIPS(0x42000009)
+ _ASM_INSN32_IF_MM(0x0000117c));
+}
+
+static inline void guest_tlb_write_indexed(void)
+{
+ __asm__ __volatile__(
+ "# tlbgwi\n\t"
+ _ASM_INSN_IF_MIPS(0x4200000a)
+ _ASM_INSN32_IF_MM(0x0000217c));
+}
+
+static inline void guest_tlb_write_random(void)
+{
+ __asm__ __volatile__(
+ "# tlbgwr\n\t"
+ _ASM_INSN_IF_MIPS(0x4200000e)
+ _ASM_INSN32_IF_MM(0x0000317c));
+}
+
+/*
+ * Guest TLB Invalidate Flush
+ */
+static inline void guest_tlbinvf(void)
+{
+ __asm__ __volatile__(
+ "# tlbginvf\n\t"
+ _ASM_INSN_IF_MIPS(0x4200000c)
+ _ASM_INSN32_IF_MM(0x0000517c));
+}
+
+#endif /* !TOOLCHAIN_SUPPORTS_VIRT */
+
+/*
+ * Manipulate bits in a register.
+ */
+#define __BUILD_SET_COMMON(name) \
static inline unsigned int \
-set_c0_##name(unsigned int set) \
+set_##name(unsigned int set) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res | set; \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
} \
\
static inline unsigned int \
-clear_c0_##name(unsigned int clear) \
+clear_##name(unsigned int clear) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res & ~clear; \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
} \
\
static inline unsigned int \
-change_c0_##name(unsigned int change, unsigned int val) \
+change_##name(unsigned int change, unsigned int val) \
{ \
unsigned int res, new; \
\
- res = read_c0_##name(); \
+ res = read_##name(); \
new = res & ~change; \
new |= (val & change); \
- write_c0_##name(new); \
+ write_##name(new); \
\
return res; \
}
+/*
+ * Manipulate bits in a c0 register.
+ */
+#define __BUILD_SET_C0(name) __BUILD_SET_COMMON(c0_##name)
+
__BUILD_SET_C0(status)
__BUILD_SET_C0(cause)
__BUILD_SET_C0(config)
@@ -2050,6 +2641,11 @@ __BUILD_SET_C0(intcontrol)
__BUILD_SET_C0(intctl)
__BUILD_SET_C0(srsmap)
__BUILD_SET_C0(pagegrain)
+__BUILD_SET_C0(guestctl0)
+__BUILD_SET_C0(guestctl0ext)
+__BUILD_SET_C0(guestctl1)
+__BUILD_SET_C0(guestctl2)
+__BUILD_SET_C0(guestctl3)
__BUILD_SET_C0(brcm_config_0)
__BUILD_SET_C0(brcm_bus_pll)
__BUILD_SET_C0(brcm_reset)
@@ -2059,12 +2655,21 @@ __BUILD_SET_C0(brcm_config)
__BUILD_SET_C0(brcm_mode)
/*
+ * Manipulate bits in a guest c0 register.
+ */
+#define __BUILD_SET_GC0(name) __BUILD_SET_COMMON(gc0_##name)
+
+__BUILD_SET_GC0(status)
+__BUILD_SET_GC0(cause)
+__BUILD_SET_GC0(ebase)
+
+/*
* Return low 10 bits of ebase.
* Note that under KVM (MIPSVZ) this returns vcpu id.
*/
static inline unsigned int get_ebase_cpunum(void)
{
- return read_c0_ebase() & 0x3ff;
+ return read_c0_ebase() & MIPS_EBASE_CPUNUM;
}
#endif /* !__ASSEMBLY__ */
diff --git a/arch/mips/include/asm/mmu.h b/arch/mips/include/asm/mmu.h
index 1afa1f986df8..f6ba08d77931 100644
--- a/arch/mips/include/asm/mmu.h
+++ b/arch/mips/include/asm/mmu.h
@@ -2,11 +2,20 @@
#define __ASM_MMU_H
#include <linux/atomic.h>
+#include <linux/spinlock.h>
+#include <linux/wait.h>
typedef struct {
unsigned long asid[NR_CPUS];
void *vdso;
atomic_t fp_mode_switching;
+
+ /* lock to be held whilst modifying fp_bd_emupage_allocmap */
+ spinlock_t bd_emupage_lock;
+ /* bitmap tracking allocation of fp_bd_emupage */
+ unsigned long *bd_emupage_allocmap;
+ /* wait queue for threads requiring an emuframe */
+ wait_queue_head_t bd_emupage_queue;
} mm_context_t;
#endif /* __ASM_MMU_H */
diff --git a/arch/mips/include/asm/mmu_context.h b/arch/mips/include/asm/mmu_context.h
index 45914b59824c..ddd57ade1aa8 100644
--- a/arch/mips/include/asm/mmu_context.h
+++ b/arch/mips/include/asm/mmu_context.h
@@ -16,6 +16,7 @@
#include <linux/smp.h>
#include <linux/slab.h>
#include <asm/cacheflush.h>
+#include <asm/dsemul.h>
#include <asm/hazards.h>
#include <asm/tlbflush.h>
#include <asm-generic/mm_hooks.h>
@@ -65,37 +66,32 @@ extern unsigned long pgd_current[];
back_to_back_c0_hazard(); \
TLBMISS_HANDLER_SETUP_PGD(swapper_pg_dir)
#endif /* CONFIG_MIPS_PGD_C0_CONTEXT*/
-#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-#define ASID_INC 0x40
-#define ASID_MASK 0xfc0
-
-#elif defined(CONFIG_CPU_R8000)
-
-#define ASID_INC 0x10
-#define ASID_MASK 0xff0
-
-#else /* FIXME: not correct for R6000 */
+/*
+ * All unused by hardware upper bits will be considered
+ * as a software asid extension.
+ */
+static unsigned long asid_version_mask(unsigned int cpu)
+{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
-#define ASID_INC 0x1
-#define ASID_MASK 0xff
+ return ~(asid_mask | (asid_mask - 1));
+}
-#endif
+static unsigned long asid_first_version(unsigned int cpu)
+{
+ return ~asid_version_mask(cpu) + 1;
+}
#define cpu_context(cpu, mm) ((mm)->context.asid[cpu])
-#define cpu_asid(cpu, mm) (cpu_context((cpu), (mm)) & ASID_MASK)
#define asid_cache(cpu) (cpu_data[cpu].asid_cache)
+#define cpu_asid(cpu, mm) \
+ (cpu_context((cpu), (mm)) & cpu_asid_mask(&cpu_data[cpu]))
static inline void enter_lazy_tlb(struct mm_struct *mm, struct task_struct *tsk)
{
}
-/*
- * All unused by hardware upper bits will be considered
- * as a software asid extension.
- */
-#define ASID_VERSION_MASK ((unsigned long)~(ASID_MASK|(ASID_MASK-1)))
-#define ASID_FIRST_VERSION ((unsigned long)(~ASID_VERSION_MASK) + 1)
/* Normal, classic MIPS get_new_mmu_context */
static inline void
@@ -104,7 +100,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
extern void kvm_local_flush_tlb_all(void);
unsigned long asid = asid_cache(cpu);
- if (! ((asid += ASID_INC) & ASID_MASK) ) {
+ if (!((asid += cpu_asid_inc()) & cpu_asid_mask(&cpu_data[cpu]))) {
if (cpu_has_vtag_icache)
flush_icache_all();
#ifdef CONFIG_KVM
@@ -113,7 +109,7 @@ get_new_mmu_context(struct mm_struct *mm, unsigned long cpu)
local_flush_tlb_all(); /* start new asid cycle */
#endif
if (!asid) /* fix version if needed */
- asid = ASID_FIRST_VERSION;
+ asid = asid_first_version(cpu);
}
cpu_context(cpu, mm) = asid_cache(cpu) = asid;
@@ -133,6 +129,10 @@ init_new_context(struct task_struct *tsk, struct mm_struct *mm)
atomic_set(&mm->context.fp_mode_switching, 0);
+ mm->context.bd_emupage_allocmap = NULL;
+ spin_lock_init(&mm->context.bd_emupage_lock);
+ init_waitqueue_head(&mm->context.bd_emupage_queue);
+
return 0;
}
@@ -145,7 +145,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
htw_stop();
/* Check if our ASID is of an older version and thus invalid */
- if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & ASID_VERSION_MASK)
+ if ((cpu_context(cpu, next) ^ asid_cache(cpu)) & asid_version_mask(cpu))
get_new_mmu_context(next, cpu);
write_c0_entryhi(cpu_asid(cpu, next));
TLBMISS_HANDLER_SETUP_PGD(next->pgd);
@@ -167,6 +167,7 @@ static inline void switch_mm(struct mm_struct *prev, struct mm_struct *next,
*/
static inline void destroy_context(struct mm_struct *mm)
{
+ dsemul_mm_cleanup(mm);
}
#define deactivate_mm(tsk, mm) do { } while (0)
diff --git a/arch/mips/include/asm/msa.h b/arch/mips/include/asm/msa.h
index bbb85fe21642..8967b475ab10 100644
--- a/arch/mips/include/asm/msa.h
+++ b/arch/mips/include/asm/msa.h
@@ -147,6 +147,19 @@ static inline void restore_msa(struct task_struct *t)
_restore_msa(t);
}
+static inline void init_msa_upper(void)
+{
+ /*
+ * Check cpu_has_msa only if it's a constant. This will allow the
+ * compiler to optimise out code for CPUs without MSA without adding
+ * an extra redundant check for CPUs with MSA.
+ */
+ if (__builtin_constant_p(cpu_has_msa) && !cpu_has_msa)
+ return;
+
+ _init_msa_upper();
+}
+
#ifdef TOOLCHAIN_SUPPORTS_MSA
#define __BUILD_MSA_CTL_REG(name, cs) \
@@ -155,6 +168,7 @@ static inline unsigned int read_msa_##name(void) \
unsigned int reg; \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" cfcmsa %0, $" #cs "\n" \
" .set pop\n" \
@@ -166,6 +180,7 @@ static inline void write_msa_##name(unsigned int val) \
{ \
__asm__ __volatile__( \
" .set push\n" \
+ " .set fp=64\n" \
" .set msa\n" \
" ctcmsa $" #cs ", %0\n" \
" .set pop\n" \
@@ -179,13 +194,6 @@ static inline void write_msa_##name(unsigned int val) \
* allow compilation with toolchains that do not support MSA. Once all
* toolchains in use support MSA these can be removed.
*/
-#ifdef CONFIG_CPU_MICROMIPS
-#define CFC_MSA_INSN 0x587e0056
-#define CTC_MSA_INSN 0x583e0816
-#else
-#define CFC_MSA_INSN 0x787e0059
-#define CTC_MSA_INSN 0x783e0819
-#endif
#define __BUILD_MSA_CTL_REG(name, cs) \
static inline unsigned int read_msa_##name(void) \
@@ -194,11 +202,12 @@ static inline unsigned int read_msa_##name(void) \
__asm__ __volatile__( \
" .set push\n" \
" .set noat\n" \
- " .insn\n" \
- " .word %1 | (" #cs " << 11)\n" \
+ " # cfcmsa $1, $%1\n" \
+ _ASM_INSN_IF_MIPS(0x787e0059 | %1 << 11) \
+ _ASM_INSN32_IF_MM(0x587e0056 | %1 << 11) \
" move %0, $1\n" \
" .set pop\n" \
- : "=r"(reg) : "i"(CFC_MSA_INSN)); \
+ : "=r"(reg) : "i"(cs)); \
return reg; \
} \
\
@@ -208,10 +217,11 @@ static inline void write_msa_##name(unsigned int val) \
" .set push\n" \
" .set noat\n" \
" move $1, %0\n" \
- " .insn\n" \
- " .word %1 | (" #cs " << 6)\n" \
+ " # ctcmsa $%1, $1\n" \
+ _ASM_INSN_IF_MIPS(0x783e0819 | %1 << 6) \
+ _ASM_INSN32_IF_MM(0x583e0816 | %1 << 6) \
" .set pop\n" \
- : : "r"(val), "i"(CTC_MSA_INSN)); \
+ : : "r"(val), "i"(cs)); \
}
#endif /* !TOOLCHAIN_SUPPORTS_MSA */
diff --git a/arch/mips/include/asm/octeon/cvmx-bootinfo.h b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
index d92cf59bdae6..62787765575e 100644
--- a/arch/mips/include/asm/octeon/cvmx-bootinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-bootinfo.h
@@ -32,6 +32,8 @@
#ifndef __CVMX_BOOTINFO_H__
#define __CVMX_BOOTINFO_H__
+#include "cvmx-coremask.h"
+
/*
* Current major and minor versions of the CVMX bootinfo block that is
* passed from the bootloader to the application. This is versioned
@@ -39,7 +41,7 @@
* versions.
*/
#define CVMX_BOOTINFO_MAJ_VER 1
-#define CVMX_BOOTINFO_MIN_VER 3
+#define CVMX_BOOTINFO_MIN_VER 4
#if (CVMX_BOOTINFO_MAJ_VER == 1)
#define CVMX_BOOTINFO_OCTEON_SERIAL_LEN 20
@@ -124,6 +126,13 @@ struct cvmx_bootinfo {
*/
uint64_t fdt_addr;
#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+ /*
+ * Coremask used for processors with more than 32 cores
+ * or with OCI. This replaces core_mask.
+ */
+ struct cvmx_coremask ext_core_mask;
+#endif
#else /* __BIG_ENDIAN */
/*
* Little-Endian: When the CPU mode is switched to
@@ -177,6 +186,9 @@ struct cvmx_bootinfo {
#if (CVMX_BOOTINFO_MIN_VER >= 3)
uint64_t fdt_addr;
#endif
+#if (CVMX_BOOTINFO_MIN_VER >= 4)
+ struct cvmx_coremask ext_core_mask;
+#endif
#endif
};
@@ -388,7 +400,7 @@ static inline const char *cvmx_board_type_to_string(enum
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_KONTRON_S1901)
ENUM_BRD_TYPE_CASE(CVMX_BOARD_TYPE_CUST_PRIVATE_MAX)
}
- return "Unsupported Board";
+ return NULL;
}
#define ENUM_CHIP_TYPE_CASE(x) \
diff --git a/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
new file mode 100644
index 000000000000..547f778f5b05
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-ciu3-defs.h
@@ -0,0 +1,353 @@
+/*
+ * Copyright (c) 2003-2016 Cavium Inc.
+ *
+ * This file is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License, Version 2, as
+ * published by the Free Software Foundation.
+ *
+ * This file is distributed in the hope that it will be useful, but
+ * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
+ * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
+ * NONINFRINGEMENT. See the GNU General Public License for more
+ * details.
+ *
+ */
+
+#ifndef __CVMX_CIU3_DEFS_H__
+#define __CVMX_CIU3_DEFS_H__
+
+#define CVMX_CIU3_FUSE CVMX_ADD_IO_SEG(0x00010100000001A0ull)
+#define CVMX_CIU3_BIST CVMX_ADD_IO_SEG(0x00010100000001C0ull)
+#define CVMX_CIU3_CONST CVMX_ADD_IO_SEG(0x0001010000000220ull)
+#define CVMX_CIU3_CTL CVMX_ADD_IO_SEG(0x00010100000000E0ull)
+#define CVMX_CIU3_DESTX_IO_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000210000ull) + ((offset) & 7) * 8)
+#define CVMX_CIU3_DESTX_PP_INT(offset) (CVMX_ADD_IO_SEG(0x0001010000200000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_GSTOP CVMX_ADD_IO_SEG(0x0001010000000140ull)
+#define CVMX_CIU3_IDTX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010000110000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_IO(offset) (CVMX_ADD_IO_SEG(0x0001010000130000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_IDTX_PPX(offset, block_id) (CVMX_ADD_IO_SEG(0x0001010000120000ull) + ((block_id) & 255) * 0x20ull)
+#define CVMX_CIU3_INTR_RAM_ECC_CTL CVMX_ADD_IO_SEG(0x0001010000000260ull)
+#define CVMX_CIU3_INTR_RAM_ECC_ST CVMX_ADD_IO_SEG(0x0001010000000280ull)
+#define CVMX_CIU3_INTR_READY CVMX_ADD_IO_SEG(0x00010100000002A0ull)
+#define CVMX_CIU3_INTR_SLOWDOWN CVMX_ADD_IO_SEG(0x0001010000000240ull)
+#define CVMX_CIU3_ISCX_CTL(offset) (CVMX_ADD_IO_SEG(0x0001010080000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1C(offset) (CVMX_ADD_IO_SEG(0x0001010090000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_ISCX_W1S(offset) (CVMX_ADD_IO_SEG(0x00010100A0000000ull) + ((offset) & 1048575) * 8)
+#define CVMX_CIU3_NMI CVMX_ADD_IO_SEG(0x0001010000000160ull)
+#define CVMX_CIU3_SISCX(offset) (CVMX_ADD_IO_SEG(0x0001010000220000ull) + ((offset) & 255) * 8)
+#define CVMX_CIU3_TIMX(offset) (CVMX_ADD_IO_SEG(0x0001010000010000ull) + ((offset) & 15) * 8)
+
+union cvmx_ciu3_bist {
+ uint64_t u64;
+ struct cvmx_ciu3_bist_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_9_63 : 55;
+ uint64_t bist : 9;
+#else
+ uint64_t bist : 9;
+ uint64_t reserved_9_63 : 55;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_const {
+ uint64_t u64;
+ struct cvmx_ciu3_const_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t dests_io : 16;
+ uint64_t pintsn : 16;
+ uint64_t dests_pp : 16;
+ uint64_t idt : 16;
+#else
+ uint64_t idt : 16;
+ uint64_t dests_pp : 16;
+ uint64_t pintsn : 16;
+ uint64_t dests_io : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t mcd_sel : 2;
+ uint64_t iscmem_le : 1;
+ uint64_t seq_dis : 1;
+ uint64_t cclk_dis : 1;
+#else
+ uint64_t cclk_dis : 1;
+ uint64_t seq_dis : 1;
+ uint64_t iscmem_le : 1;
+ uint64_t mcd_sel : 2;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_destx_io_int {
+ uint64_t u64;
+ struct cvmx_ciu3_destx_io_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intidt : 8;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+#else
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t intidt : 8;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_destx_pp_int {
+ uint64_t u64;
+ struct cvmx_ciu3_destx_pp_int_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intidt : 8;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+#else
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t intidt : 8;
+ uint64_t reserved_10_31 : 22;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_gstop {
+ uint64_t u64;
+ struct cvmx_ciu3_gstop_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_1_63 : 63;
+ uint64_t gstop : 1;
+#else
+ uint64_t gstop : 1;
+ uint64_t reserved_1_63 : 63;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t intsn : 20;
+ uint64_t reserved_4_31 : 28;
+ uint64_t intr : 1;
+ uint64_t newint : 1;
+ uint64_t ip_num : 2;
+#else
+ uint64_t ip_num : 2;
+ uint64_t newint : 1;
+ uint64_t intr : 1;
+ uint64_t reserved_4_31 : 28;
+ uint64_t intsn : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_io {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_io_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_5_63 : 59;
+ uint64_t io : 5;
+#else
+ uint64_t io : 5;
+ uint64_t reserved_5_63 : 59;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_idtx_ppx {
+ uint64_t u64;
+ struct cvmx_ciu3_idtx_ppx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t pp : 48;
+#else
+ uint64_t pp : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ram_ecc_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t flip_synd : 2;
+ uint64_t ecc_ena : 1;
+#else
+ uint64_t ecc_ena : 1;
+ uint64_t flip_synd : 2;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ram_ecc_st {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ram_ecc_st_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_52_63 : 12;
+ uint64_t addr : 20;
+ uint64_t reserved_6_31 : 26;
+ uint64_t sisc_dbe : 1;
+ uint64_t sisc_sbe : 1;
+ uint64_t idt_dbe : 1;
+ uint64_t idt_sbe : 1;
+ uint64_t isc_dbe : 1;
+ uint64_t isc_sbe : 1;
+#else
+ uint64_t isc_sbe : 1;
+ uint64_t isc_dbe : 1;
+ uint64_t idt_sbe : 1;
+ uint64_t idt_dbe : 1;
+ uint64_t sisc_sbe : 1;
+ uint64_t sisc_dbe : 1;
+ uint64_t reserved_6_31 : 26;
+ uint64_t addr : 20;
+ uint64_t reserved_52_63 : 12;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_ready {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_ready_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_46_63 : 18;
+ uint64_t index : 14;
+ uint64_t reserved_1_31 : 31;
+ uint64_t ready : 1;
+#else
+ uint64_t ready : 1;
+ uint64_t reserved_1_31 : 31;
+ uint64_t index : 14;
+ uint64_t reserved_46_63 : 18;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_intr_slowdown {
+ uint64_t u64;
+ struct cvmx_ciu3_intr_slowdown_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_3_63 : 61;
+ uint64_t ctl : 3;
+#else
+ uint64_t ctl : 3;
+ uint64_t reserved_3_63 : 61;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_ctl {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_ctl_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_24_63 : 40;
+ uint64_t idt : 8;
+ uint64_t imp : 1;
+ uint64_t reserved_2_14 : 13;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_14 : 13;
+ uint64_t imp : 1;
+ uint64_t idt : 8;
+ uint64_t reserved_24_63 : 40;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_w1c {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_w1c_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_iscx_w1s {
+ uint64_t u64;
+ struct cvmx_ciu3_iscx_w1s_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_2_63 : 62;
+ uint64_t en : 1;
+ uint64_t raw : 1;
+#else
+ uint64_t raw : 1;
+ uint64_t en : 1;
+ uint64_t reserved_2_63 : 62;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_nmi {
+ uint64_t u64;
+ struct cvmx_ciu3_nmi_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63 : 16;
+ uint64_t nmi : 48;
+#else
+ uint64_t nmi : 48;
+ uint64_t reserved_48_63 : 16;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_siscx {
+ uint64_t u64;
+ struct cvmx_ciu3_siscx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t en : 64;
+#else
+ uint64_t en : 64;
+#endif
+ } s;
+};
+
+union cvmx_ciu3_timx {
+ uint64_t u64;
+ struct cvmx_ciu3_timx_s {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_37_63 : 27;
+ uint64_t one_shot : 1;
+ uint64_t len : 36;
+#else
+ uint64_t len : 36;
+ uint64_t one_shot : 1;
+ uint64_t reserved_37_63 : 27;
+#endif
+ } s;
+};
+
+#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
index 8d05d9069823..a07a36f7d814 100644
--- a/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
+++ b/arch/mips/include/asm/octeon/cvmx-cmd-queue.h
@@ -146,7 +146,7 @@ typedef struct {
* This structure contains the global state of all command queues.
* It is stored in a bootmem named block and shared by all
* applications running on Octeon. Tickets are stored in a differnet
- * cahce line that queue information to reduce the contention on the
+ * cache line that queue information to reduce the contention on the
* ll/sc used to get a ticket. If this is not the case, the update
* of queue state causes the ll/sc to fail quite often.
*/
diff --git a/arch/mips/include/asm/octeon/cvmx-config.h b/arch/mips/include/asm/octeon/cvmx-config.h
index f7dd17d0dc22..f4f1996e0fac 100644
--- a/arch/mips/include/asm/octeon/cvmx-config.h
+++ b/arch/mips/include/asm/octeon/cvmx-config.h
@@ -33,7 +33,7 @@
/* Packet buffers */
#define CVMX_FPA_PACKET_POOL (0)
#define CVMX_FPA_PACKET_POOL_SIZE CVMX_FPA_POOL_0_SIZE
-/* Work queue entrys */
+/* Work queue entries */
#define CVMX_FPA_WQE_POOL (1)
#define CVMX_FPA_WQE_POOL_SIZE CVMX_FPA_POOL_1_SIZE
/* PKO queue command buffers */
diff --git a/arch/mips/include/asm/octeon/cvmx-coremask.h b/arch/mips/include/asm/octeon/cvmx-coremask.h
new file mode 100644
index 000000000000..097dc096db84
--- /dev/null
+++ b/arch/mips/include/asm/octeon/cvmx-coremask.h
@@ -0,0 +1,89 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (c) 2016 Cavium Inc. (support@cavium.com).
+ *
+ */
+
+/*
+ * Module to support operations on bitmap of cores. Coremask can be used to
+ * select a specific core, a group of cores, or all available cores, for
+ * initialization and differentiation of roles within a single shared binary
+ * executable image.
+ *
+ * The core numbers used in this file are the same value as what is found in
+ * the COP0_EBASE register and the rdhwr 0 instruction.
+ *
+ * For the CN78XX and other multi-node environments the core numbers are not
+ * contiguous. The core numbers for the CN78XX are as follows:
+ *
+ * Node 0: Cores 0 - 47
+ * Node 1: Cores 128 - 175
+ * Node 2: Cores 256 - 303
+ * Node 3: Cores 384 - 431
+ *
+ */
+
+#ifndef __CVMX_COREMASK_H__
+#define __CVMX_COREMASK_H__
+
+#define CVMX_MIPS_MAX_CORES 1024
+/* bits per holder */
+#define CVMX_COREMASK_ELTSZ 64
+
+/* cvmx_coremask_t's size in u64 */
+#define CVMX_COREMASK_BMPSZ (CVMX_MIPS_MAX_CORES / CVMX_COREMASK_ELTSZ)
+
+
+/* cvmx_coremask_t */
+struct cvmx_coremask {
+ u64 coremask_bitmap[CVMX_COREMASK_BMPSZ];
+};
+
+/*
+ * Is ``core'' set in the coremask?
+ */
+static inline bool cvmx_coremask_is_core_set(const struct cvmx_coremask *pcm,
+ int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_ELTSZ;
+ i = core / CVMX_COREMASK_ELTSZ;
+
+ return (pcm->coremask_bitmap[i] & ((u64)1 << n)) != 0;
+}
+
+/*
+ * Make a copy of a coremask
+ */
+static inline void cvmx_coremask_copy(struct cvmx_coremask *dest,
+ const struct cvmx_coremask *src)
+{
+ memcpy(dest, src, sizeof(*dest));
+}
+
+/*
+ * Set the lower 64-bit of the coremask.
+ */
+static inline void cvmx_coremask_set64(struct cvmx_coremask *pcm,
+ uint64_t coremask_64)
+{
+ pcm->coremask_bitmap[0] = coremask_64;
+}
+
+/*
+ * Clear ``core'' from the coremask.
+ */
+static inline void cvmx_coremask_clear_core(struct cvmx_coremask *pcm, int core)
+{
+ int n, i;
+
+ n = core % CVMX_COREMASK_ELTSZ;
+ i = core / CVMX_COREMASK_ELTSZ;
+ pcm->coremask_bitmap[i] &= ~(1ull << n);
+}
+
+#endif /* __CVMX_COREMASK_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
index 1d79e3c7040d..887ff8e1f715 100644
--- a/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-fpa-defs.h
@@ -66,6 +66,7 @@
#define CVMX_FPA_WART_CTL (CVMX_ADD_IO_SEG(0x00011800280000D8ull))
#define CVMX_FPA_WART_STATUS (CVMX_ADD_IO_SEG(0x00011800280000E0ull))
#define CVMX_FPA_WQE_THRESHOLD (CVMX_ADD_IO_SEG(0x0001180028000468ull))
+#define CVMX_FPA_CLK_COUNT (CVMX_ADD_IO_SEG(0x00012800000000F0ull))
union cvmx_fpa_addr_range_error {
uint64_t u64;
diff --git a/arch/mips/include/asm/octeon/cvmx-helper-board.h b/arch/mips/include/asm/octeon/cvmx-helper-board.h
index 893320375aef..cda93aee712c 100644
--- a/arch/mips/include/asm/octeon/cvmx-helper-board.h
+++ b/arch/mips/include/asm/octeon/cvmx-helper-board.h
@@ -94,7 +94,7 @@ extern int cvmx_helper_board_get_mii_address(int ipd_port);
* @phy_addr: The address of the PHY to program
* @link_flags:
* Flags to control autonegotiation. Bit 0 is autonegotiation
- * enable/disable to maintain backware compatibility.
+ * enable/disable to maintain backward compatibility.
* @link_info: Link speed to program. If the speed is zero and autonegotiation
* is enabled, all possible negotiation speeds are advertised.
*
diff --git a/arch/mips/include/asm/octeon/cvmx-ipd.h b/arch/mips/include/asm/octeon/cvmx-ipd.h
index e13490ebbb27..cbdc14b77435 100644
--- a/arch/mips/include/asm/octeon/cvmx-ipd.h
+++ b/arch/mips/include/asm/octeon/cvmx-ipd.h
@@ -39,7 +39,7 @@
enum cvmx_ipd_mode {
CVMX_IPD_OPC_MODE_STT = 0LL, /* All blocks DRAM, not cached in L2 */
- CVMX_IPD_OPC_MODE_STF = 1LL, /* All bloccks into L2 */
+ CVMX_IPD_OPC_MODE_STF = 1LL, /* All blocks into L2 */
CVMX_IPD_OPC_MODE_STF1_STT = 2LL, /* 1st block L2, rest DRAM */
CVMX_IPD_OPC_MODE_STF2_STT = 3LL /* 1st, 2nd blocks L2, rest DRAM */
};
diff --git a/arch/mips/include/asm/octeon/cvmx-mio-defs.h b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
index bb0ae338a460..5196c04eee41 100644
--- a/arch/mips/include/asm/octeon/cvmx-mio-defs.h
+++ b/arch/mips/include/asm/octeon/cvmx-mio-defs.h
@@ -1481,7 +1481,9 @@ union cvmx_mio_fus_dat2 {
uint64_t u64;
struct cvmx_mio_fus_dat2_s {
#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_48_63:16;
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
uint64_t fus118:1;
uint64_t rom_info:10;
uint64_t power_limit:2;
@@ -1513,7 +1515,9 @@ union cvmx_mio_fus_dat2 {
uint64_t power_limit:2;
uint64_t rom_info:10;
uint64_t fus118:1;
- uint64_t reserved_48_63:16;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
#endif
} s;
struct cvmx_mio_fus_dat2_cn30xx {
@@ -1837,50 +1841,192 @@ union cvmx_mio_fus_dat2 {
#endif
} cn68xx;
struct cvmx_mio_fus_dat2_cn68xx cn68xxp1;
+ struct cvmx_mio_fus_dat2_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_48_63:16;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_15_0:16;
+#else
+ uint64_t reserved_15_0:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_63:16;
+#endif
+ } cn70xx;
+ struct cvmx_mio_fus_dat2_cn70xx cn70xxp1;
+ struct cvmx_mio_fus_dat2_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_15_0:16;
+#else
+ uint64_t reserved_15_0:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn73xx;
+ struct cvmx_mio_fus_dat2_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t reserved_48_55:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t reserved_48_55:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn78xx;
+ struct cvmx_mio_fus_dat2_cn78xxp2 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t reserved_59_63:5;
+ uint64_t run_platform:3;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t fus118:1;
+ uint64_t rom_info:10;
+ uint64_t power_limit:2;
+ uint64_t dorm_crypto:1;
+ uint64_t fus318:1;
+ uint64_t raid_en:1;
+ uint64_t reserved_31_29:3;
+ uint64_t nodfa_cp2:1;
+ uint64_t nomul:1;
+ uint64_t nocrypto:1;
+ uint64_t reserved_25_24:2;
+ uint64_t chip_id:8;
+ uint64_t reserved_0_15:16;
+#else
+ uint64_t reserved_0_15:16;
+ uint64_t chip_id:8;
+ uint64_t reserved_25_24:2;
+ uint64_t nocrypto:1;
+ uint64_t nomul:1;
+ uint64_t nodfa_cp2:1;
+ uint64_t reserved_31_29:3;
+ uint64_t raid_en:1;
+ uint64_t fus318:1;
+ uint64_t dorm_crypto:1;
+ uint64_t power_limit:2;
+ uint64_t rom_info:10;
+ uint64_t fus118:1;
+ uint64_t gbl_pwr_throttle:8;
+ uint64_t run_platform:3;
+ uint64_t reserved_59_63:5;
+#endif
+ } cn78xxp2;
struct cvmx_mio_fus_dat2_cn61xx cnf71xx;
+ struct cvmx_mio_fus_dat2_cn73xx cnf75xx;
};
union cvmx_mio_fus_dat3 {
uint64_t u64;
struct cvmx_mio_fus_dat3_s {
#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_58_63:6;
+ uint64_t ema0:6;
uint64_t pll_ctl:10;
uint64_t dfa_info_dte:3;
uint64_t dfa_info_clm:4;
- uint64_t reserved_40_40:1;
- uint64_t ema:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t reserved_38_39:2;
uint64_t efus_lck_rsv:1;
uint64_t efus_lck_man:1;
uint64_t pll_half_dis:1;
uint64_t l2c_crip:3;
- uint64_t pll_div4:1;
- uint64_t reserved_29_30:2;
- uint64_t bar2_en:1;
+ uint64_t reserved_28_31:4;
uint64_t efus_lck:1;
uint64_t efus_ign:1;
uint64_t nozip:1;
uint64_t nodfa_dte:1;
- uint64_t icache:24;
+ uint64_t reserved_0_23:24;
#else
- uint64_t icache:24;
+ uint64_t reserved_0_23:24;
uint64_t nodfa_dte:1;
uint64_t nozip:1;
uint64_t efus_ign:1;
uint64_t efus_lck:1;
- uint64_t bar2_en:1;
- uint64_t reserved_29_30:2;
- uint64_t pll_div4:1;
+ uint64_t reserved_28_31:4;
uint64_t l2c_crip:3;
uint64_t pll_half_dis:1;
uint64_t efus_lck_man:1;
uint64_t efus_lck_rsv:1;
- uint64_t ema:2;
- uint64_t reserved_40_40:1;
+ uint64_t reserved_38_39:2;
+ uint64_t pll_alt_matrix:1;
uint64_t dfa_info_clm:4;
uint64_t dfa_info_dte:3;
uint64_t pll_ctl:10;
- uint64_t reserved_58_63:6;
+ uint64_t ema0:6;
#endif
} s;
struct cvmx_mio_fus_dat3_cn30xx {
@@ -2022,7 +2168,239 @@ union cvmx_mio_fus_dat3 {
struct cvmx_mio_fus_dat3_cn61xx cn66xx;
struct cvmx_mio_fus_dat3_cn61xx cn68xx;
struct cvmx_mio_fus_dat3_cn61xx cn68xxp1;
+ struct cvmx_mio_fus_dat3_cn70xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_0_17:18;
+#else
+ uint64_t reserved_0_17:18;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn70xx;
+ struct cvmx_mio_fus_dat3_cn70xxp1 {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_38_40:3;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_0_17:18;
+#else
+ uint64_t reserved_0_17:18;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t reserved_31_31:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t reserved_38_40:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn70xxp1;
+ struct cvmx_mio_fus_dat3_cn73xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t nohna_dte:1;
+ uint64_t hna_info_dte:3;
+ uint64_t hna_info_clm:4;
+ uint64_t reserved_9_9:1;
+ uint64_t core_pll_mul:5;
+ uint64_t pnr_pll_mul:4;
+#else
+ uint64_t pnr_pll_mul:4;
+ uint64_t core_pll_mul:5;
+ uint64_t reserved_9_9:1;
+ uint64_t hna_info_clm:4;
+ uint64_t hna_info_dte:3;
+ uint64_t nohna_dte:1;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn73xx;
+ struct cvmx_mio_fus_dat3_cn78xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t reserved_38_40:3;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t reserved_31_31:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t nohna_dte:1;
+ uint64_t hna_info_dte:3;
+ uint64_t hna_info_clm:4;
+ uint64_t reserved_0_9:10;
+#else
+ uint64_t reserved_0_9:10;
+ uint64_t hna_info_clm:4;
+ uint64_t hna_info_dte:3;
+ uint64_t nohna_dte:1;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t reserved_31_31:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t reserved_38_40:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cn78xx;
+ struct cvmx_mio_fus_dat3_cn73xx cn78xxp2;
struct cvmx_mio_fus_dat3_cn61xx cnf71xx;
+ struct cvmx_mio_fus_dat3_cnf75xx {
+#ifdef __BIG_ENDIAN_BITFIELD
+ uint64_t ema0:6;
+ uint64_t pll_ctl:10;
+ uint64_t dfa_info_dte:3;
+ uint64_t dfa_info_clm:4;
+ uint64_t pll_alt_matrix:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t efus_lck_rsv:1;
+ uint64_t efus_lck_man:1;
+ uint64_t pll_half_dis:1;
+ uint64_t l2c_crip:3;
+ uint64_t use_int_refclk:1;
+ uint64_t zip_info:2;
+ uint64_t bar2_sz_conf:1;
+ uint64_t efus_lck:1;
+ uint64_t efus_ign:1;
+ uint64_t nozip:1;
+ uint64_t nodfa_dte:1;
+ uint64_t ema1:6;
+ uint64_t reserved_9_17:9;
+ uint64_t core_pll_mul:5;
+ uint64_t pnr_pll_mul:4;
+#else
+ uint64_t pnr_pll_mul:4;
+ uint64_t core_pll_mul:5;
+ uint64_t reserved_9_17:9;
+ uint64_t ema1:6;
+ uint64_t nodfa_dte:1;
+ uint64_t nozip:1;
+ uint64_t efus_ign:1;
+ uint64_t efus_lck:1;
+ uint64_t bar2_sz_conf:1;
+ uint64_t zip_info:2;
+ uint64_t use_int_refclk:1;
+ uint64_t l2c_crip:3;
+ uint64_t pll_half_dis:1;
+ uint64_t efus_lck_man:1;
+ uint64_t efus_lck_rsv:1;
+ uint64_t pll_bwadj_denom:2;
+ uint64_t pll_alt_matrix:1;
+ uint64_t dfa_info_clm:4;
+ uint64_t dfa_info_dte:3;
+ uint64_t pll_ctl:10;
+ uint64_t ema0:6;
+#endif
+ } cnf75xx;
};
union cvmx_mio_fus_ema {
diff --git a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h b/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
deleted file mode 100644
index 4615b102625b..000000000000
--- a/arch/mips/include/asm/octeon/cvmx-mpi-defs.h
+++ /dev/null
@@ -1,328 +0,0 @@
-/***********************license start***************
- * Author: Cavium Networks
- *
- * Contact: support@caviumnetworks.com
- * This file is part of the OCTEON SDK
- *
- * Copyright (c) 2003-2012 Cavium Networks
- *
- * This file is free software; you can redistribute it and/or modify
- * it under the terms of the GNU General Public License, Version 2, as
- * published by the Free Software Foundation.
- *
- * This file is distributed in the hope that it will be useful, but
- * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
- * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
- * NONINFRINGEMENT. See the GNU General Public License for more
- * details.
- *
- * You should have received a copy of the GNU General Public License
- * along with this file; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
- * or visit http://www.gnu.org/licenses/.
- *
- * This file may also be available under a different license from Cavium.
- * Contact Cavium Networks for more information
- ***********************license end**************************************/
-
-#ifndef __CVMX_MPI_DEFS_H__
-#define __CVMX_MPI_DEFS_H__
-
-#define CVMX_MPI_CFG (CVMX_ADD_IO_SEG(0x0001070000001000ull))
-#define CVMX_MPI_DATX(offset) (CVMX_ADD_IO_SEG(0x0001070000001080ull) + ((offset) & 15) * 8)
-#define CVMX_MPI_STS (CVMX_ADD_IO_SEG(0x0001070000001008ull))
-#define CVMX_MPI_TX (CVMX_ADD_IO_SEG(0x0001070000001010ull))
-
-union cvmx_mpi_cfg {
- uint64_t u64;
- struct cvmx_mpi_cfg_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } s;
- struct cvmx_mpi_cfg_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_12_15:4;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_15:4;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn30xx;
- struct cvmx_mpi_cfg_cn31xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_11_15:5;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t csena:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t csena:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t reserved_11_15:5;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn31xx;
- struct cvmx_mpi_cfg_cn30xx cn50xx;
- struct cvmx_mpi_cfg_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t reserved_14_15:2;
- uint64_t csena1:1;
- uint64_t csena0:1;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t csena0:1;
- uint64_t csena1:1;
- uint64_t reserved_14_15:2;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn61xx;
- struct cvmx_mpi_cfg_cn66xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_29_63:35;
- uint64_t clkdiv:13;
- uint64_t csena3:1;
- uint64_t csena2:1;
- uint64_t reserved_12_13:2;
- uint64_t cslate:1;
- uint64_t tritx:1;
- uint64_t idleclks:2;
- uint64_t cshi:1;
- uint64_t reserved_6_6:1;
- uint64_t int_ena:1;
- uint64_t lsbfirst:1;
- uint64_t wireor:1;
- uint64_t clk_cont:1;
- uint64_t idlelo:1;
- uint64_t enable:1;
-#else
- uint64_t enable:1;
- uint64_t idlelo:1;
- uint64_t clk_cont:1;
- uint64_t wireor:1;
- uint64_t lsbfirst:1;
- uint64_t int_ena:1;
- uint64_t reserved_6_6:1;
- uint64_t cshi:1;
- uint64_t idleclks:2;
- uint64_t tritx:1;
- uint64_t cslate:1;
- uint64_t reserved_12_13:2;
- uint64_t csena2:1;
- uint64_t csena3:1;
- uint64_t clkdiv:13;
- uint64_t reserved_29_63:35;
-#endif
- } cn66xx;
- struct cvmx_mpi_cfg_cn61xx cnf71xx;
-};
-
-union cvmx_mpi_datx {
- uint64_t u64;
- struct cvmx_mpi_datx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_8_63:56;
- uint64_t data:8;
-#else
- uint64_t data:8;
- uint64_t reserved_8_63:56;
-#endif
- } s;
- struct cvmx_mpi_datx_s cn30xx;
- struct cvmx_mpi_datx_s cn31xx;
- struct cvmx_mpi_datx_s cn50xx;
- struct cvmx_mpi_datx_s cn61xx;
- struct cvmx_mpi_datx_s cn66xx;
- struct cvmx_mpi_datx_s cnf71xx;
-};
-
-union cvmx_mpi_sts {
- uint64_t u64;
- struct cvmx_mpi_sts_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_13_63:51;
- uint64_t rxnum:5;
- uint64_t reserved_1_7:7;
- uint64_t busy:1;
-#else
- uint64_t busy:1;
- uint64_t reserved_1_7:7;
- uint64_t rxnum:5;
- uint64_t reserved_13_63:51;
-#endif
- } s;
- struct cvmx_mpi_sts_s cn30xx;
- struct cvmx_mpi_sts_s cn31xx;
- struct cvmx_mpi_sts_s cn50xx;
- struct cvmx_mpi_sts_s cn61xx;
- struct cvmx_mpi_sts_s cn66xx;
- struct cvmx_mpi_sts_s cnf71xx;
-};
-
-union cvmx_mpi_tx {
- uint64_t u64;
- struct cvmx_mpi_tx_s {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_22_63:42;
- uint64_t csid:2;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:2;
- uint64_t reserved_22_63:42;
-#endif
- } s;
- struct cvmx_mpi_tx_cn30xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_17_63:47;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_63:47;
-#endif
- } cn30xx;
- struct cvmx_mpi_tx_cn30xx cn31xx;
- struct cvmx_mpi_tx_cn30xx cn50xx;
- struct cvmx_mpi_tx_cn61xx {
-#ifdef __BIG_ENDIAN_BITFIELD
- uint64_t reserved_21_63:43;
- uint64_t csid:1;
- uint64_t reserved_17_19:3;
- uint64_t leavecs:1;
- uint64_t reserved_13_15:3;
- uint64_t txnum:5;
- uint64_t reserved_5_7:3;
- uint64_t totnum:5;
-#else
- uint64_t totnum:5;
- uint64_t reserved_5_7:3;
- uint64_t txnum:5;
- uint64_t reserved_13_15:3;
- uint64_t leavecs:1;
- uint64_t reserved_17_19:3;
- uint64_t csid:1;
- uint64_t reserved_21_63:43;
-#endif
- } cn61xx;
- struct cvmx_mpi_tx_s cn66xx;
- struct cvmx_mpi_tx_cn61xx cnf71xx;
-};
-
-#endif
diff --git a/arch/mips/include/asm/octeon/cvmx-pow.h b/arch/mips/include/asm/octeon/cvmx-pow.h
index 51531563f8dc..410bb70e5aac 100644
--- a/arch/mips/include/asm/octeon/cvmx-pow.h
+++ b/arch/mips/include/asm/octeon/cvmx-pow.h
@@ -2051,7 +2051,7 @@ static inline void cvmx_pow_tag_sw_desched(uint32_t tag,
}
/**
- * Descchedules the current work queue entry.
+ * Deschedules the current work queue entry.
*
* @no_sched: no schedule flag value to be set on the work queue
* entry. If this is set the entry will not be
diff --git a/arch/mips/include/asm/octeon/cvmx-sysinfo.h b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
index 2131197422e5..c6c3ee39c69d 100644
--- a/arch/mips/include/asm/octeon/cvmx-sysinfo.h
+++ b/arch/mips/include/asm/octeon/cvmx-sysinfo.h
@@ -4,7 +4,7 @@
* Contact: support@caviumnetworks.com
* This file is part of the OCTEON SDK
*
- * Copyright (c) 2003-2008 Cavium Networks
+ * Copyright (c) 2003-2016 Cavium, Inc.
*
* This file is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License, Version 2, as
@@ -32,6 +32,8 @@
#ifndef __CVMX_SYSINFO_H__
#define __CVMX_SYSINFO_H__
+#include "cvmx-coremask.h"
+
#define OCTEON_SERIAL_LEN 20
/**
* Structure describing application specific information.
@@ -50,8 +52,7 @@ struct cvmx_sysinfo {
uint64_t system_dram_size;
/* ptr to memory descriptor block */
- void *phy_mem_desc_ptr;
-
+ uint64_t phy_mem_desc_addr;
/* Application image specific variables */
/* stack top address (virtual) */
@@ -63,7 +64,7 @@ struct cvmx_sysinfo {
/* heap size in bytes */
uint32_t heap_size;
/* coremask defining cores running application */
- uint32_t core_mask;
+ struct cvmx_coremask core_mask;
/* Deprecated, use cvmx_coremask_first_core() to select init core */
uint32_t init_core;
@@ -121,32 +122,4 @@ struct cvmx_sysinfo {
extern struct cvmx_sysinfo *cvmx_sysinfo_get(void);
-/**
- * This function is used in non-simple executive environments (such as
- * Linux kernel, u-boot, etc.) to configure the minimal fields that
- * are required to use simple executive files directly.
- *
- * Locking (if required) must be handled outside of this
- * function
- *
- * @phy_mem_desc_ptr: Pointer to global physical memory descriptor
- * (bootmem descriptor) @board_type: Octeon board
- * type enumeration
- *
- * @board_rev_major:
- * Board major revision
- * @board_rev_minor:
- * Board minor revision
- * @cpu_clock_hz:
- * CPU clock freqency in hertz
- *
- * Returns 0: Failure
- * 1: success
- */
-extern int cvmx_sysinfo_minimal_initialize(void *phy_mem_desc_ptr,
- uint16_t board_type,
- uint8_t board_rev_major,
- uint8_t board_rev_minor,
- uint32_t cpu_clock_hz);
-
#endif /* __CVMX_SYSINFO_H__ */
diff --git a/arch/mips/include/asm/octeon/cvmx.h b/arch/mips/include/asm/octeon/cvmx.h
index 19e139c9f337..2530e8731c8a 100644
--- a/arch/mips/include/asm/octeon/cvmx.h
+++ b/arch/mips/include/asm/octeon/cvmx.h
@@ -57,6 +57,7 @@ enum cvmx_mips_space {
#include <asm/octeon/cvmx-sysinfo.h>
#include <asm/octeon/cvmx-ciu-defs.h>
+#include <asm/octeon/cvmx-ciu3-defs.h>
#include <asm/octeon/cvmx-gpio-defs.h>
#include <asm/octeon/cvmx-iob-defs.h>
#include <asm/octeon/cvmx-ipd-defs.h>
@@ -189,7 +190,7 @@ static inline uint64_t cvmx_ptr_to_phys(void *ptr)
static inline void *cvmx_phys_to_ptr(uint64_t physical_address)
{
if (sizeof(void *) == 8) {
- /* Just set the top bit, avoiding any TLB uglyness */
+ /* Just set the top bit, avoiding any TLB ugliness */
return CASTPTR(void,
CVMX_ADD_SEG(CVMX_MIPS_SPACE_XKPHYS,
physical_address));
@@ -341,6 +342,21 @@ static inline unsigned int cvmx_get_core_num(void)
return core_num;
}
+/* Maximum # of bits to define core in node */
+#define CVMX_NODE_NO_SHIFT 7
+#define CVMX_NODE_MASK 0x3
+static inline unsigned int cvmx_get_node_num(void)
+{
+ unsigned int core_num = cvmx_get_core_num();
+
+ return (core_num >> CVMX_NODE_NO_SHIFT) & CVMX_NODE_MASK;
+}
+
+static inline unsigned int cvmx_get_local_core_num(void)
+{
+ return cvmx_get_core_num() & ((1 << CVMX_NODE_NO_SHIFT) - 1);
+}
+
/**
* Returns the number of bits set in the provided value.
* Simple wrapper for POP instruction.
@@ -448,8 +464,15 @@ static inline uint64_t cvmx_get_cycle_global(void)
/* Return the number of cores available in the chip */
static inline uint32_t cvmx_octeon_num_cores(void)
{
- uint32_t ciu_fuse = (uint32_t) cvmx_read_csr(CVMX_CIU_FUSE) & 0xffff;
- return cvmx_pop(ciu_fuse);
+ u64 ciu_fuse_reg;
+ u64 ciu_fuse;
+
+ if (OCTEON_IS_OCTEON3() && !OCTEON_IS_MODEL(OCTEON_CN70XX))
+ ciu_fuse_reg = CVMX_CIU3_FUSE;
+ else
+ ciu_fuse_reg = CVMX_CIU_FUSE;
+ ciu_fuse = cvmx_read_csr(ciu_fuse_reg);
+ return cvmx_dpop(ciu_fuse);
}
#endif /* __CVMX_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-feature.h b/arch/mips/include/asm/octeon/octeon-feature.h
index 3ed10a8d7865..a19ca3b2775c 100644
--- a/arch/mips/include/asm/octeon/octeon-feature.h
+++ b/arch/mips/include/asm/octeon/octeon-feature.h
@@ -81,6 +81,10 @@ enum octeon_feature {
OCTEON_FEATURE_HFA,
OCTEON_FEATURE_DFM,
OCTEON_FEATURE_CIU2,
+ OCTEON_FEATURE_CIU3,
+ /* Octeon has FPA first seen on 78XX */
+ OCTEON_FEATURE_FPA3,
+ OCTEON_FEATURE_FAU,
OCTEON_MAX_FEATURE
};
@@ -110,7 +114,7 @@ static inline int octeon_has_crypto(void)
* Returns Non zero if the feature exists. Zero if the feature does not
* exist.
*/
-static inline int octeon_has_feature(enum octeon_feature feature)
+static inline bool octeon_has_feature(enum octeon_feature feature)
{
switch (feature) {
case OCTEON_FEATURE_SAAD:
@@ -122,7 +126,7 @@ static inline int octeon_has_feature(enum octeon_feature feature)
fus_2.u64 = cvmx_read_csr(CVMX_MIO_FUS_DAT2);
return !fus_2.s.nocrypto && !fus_2.s.nomul && fus_2.s.dorm_crypto;
} else {
- return 0;
+ return false;
}
case OCTEON_FEATURE_PCIE:
@@ -190,11 +194,20 @@ static inline int octeon_has_feature(enum octeon_feature feature)
case OCTEON_FEATURE_CIU2:
return OCTEON_IS_MODEL(OCTEON_CN68XX);
+ case OCTEON_FEATURE_CIU3:
+ case OCTEON_FEATURE_FPA3:
+ return OCTEON_IS_MODEL(OCTEON_CN78XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF75XX)
+ || OCTEON_IS_MODEL(OCTEON_CN73XX);
+ case OCTEON_FEATURE_FAU:
+ return !(OCTEON_IS_MODEL(OCTEON_CN78XX)
+ || OCTEON_IS_MODEL(OCTEON_CNF75XX)
+ || OCTEON_IS_MODEL(OCTEON_CN73XX));
default:
break;
}
- return 0;
+ return false;
}
#endif /* __OCTEON_FEATURE_H__ */
diff --git a/arch/mips/include/asm/octeon/octeon-model.h b/arch/mips/include/asm/octeon/octeon-model.h
index 92b377e36dac..6c68517c2770 100644
--- a/arch/mips/include/asm/octeon/octeon-model.h
+++ b/arch/mips/include/asm/octeon/octeon-model.h
@@ -74,7 +74,12 @@
* CN7XXX models with new revision encoding
*/
+#define OCTEON_CNF75XX_PASS1_0 0x000d9800
+#define OCTEON_CNF75XX (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_REVISION)
+#define OCTEON_CNF75XX_PASS1_X (OCTEON_CNF75XX_PASS1_0 | OM_IGNORE_MINOR_REVISION)
+
#define OCTEON_CN73XX_PASS1_0 0x000d9700
+#define OCTEON_CN73XX_PASS1_1 0x000d9701
#define OCTEON_CN73XX (OCTEON_CN73XX_PASS1_0 | OM_IGNORE_REVISION)
#define OCTEON_CN73XX_PASS1_X (OCTEON_CN73XX_PASS1_0 | \
OM_IGNORE_MINOR_REVISION)
diff --git a/arch/mips/include/asm/octeon/octeon.h b/arch/mips/include/asm/octeon/octeon.h
index de9f74ee5dd0..07c0516ef4d5 100644
--- a/arch/mips/include/asm/octeon/octeon.h
+++ b/arch/mips/include/asm/octeon/octeon.h
@@ -299,6 +299,31 @@ static inline void octeon_npi_write32(uint64_t address, uint32_t val)
cvmx_read64_uint32(address ^ 4);
}
+#ifdef CONFIG_SMP
+void octeon_setup_smp(void);
+#else
+static inline void octeon_setup_smp(void) {}
+#endif
+
+struct irq_domain;
+struct device_node;
+struct irq_data;
+struct irq_chip;
+void octeon_ciu3_mbox_send(int cpu, unsigned int mbox);
+int octeon_irq_ciu3_xlat(struct irq_domain *d,
+ struct device_node *node,
+ const u32 *intspec,
+ unsigned int intsize,
+ unsigned long *out_hwirq,
+ unsigned int *out_type);
+void octeon_irq_ciu3_enable(struct irq_data *data);
+void octeon_irq_ciu3_disable(struct irq_data *data);
+void octeon_irq_ciu3_ack(struct irq_data *data);
+void octeon_irq_ciu3_mask(struct irq_data *data);
+void octeon_irq_ciu3_mask_ack(struct irq_data *data);
+int octeon_irq_ciu3_mapx(struct irq_domain *d, unsigned int virq,
+ irq_hw_number_t hw, struct irq_chip *chip);
+
/* Octeon multiplier save/restore routines from octeon_switch.S */
void octeon_mult_save(void);
void octeon_mult_restore(void);
diff --git a/arch/mips/include/asm/page.h b/arch/mips/include/asm/page.h
index 21ed7150fec3..ea0cd9773914 100644
--- a/arch/mips/include/asm/page.h
+++ b/arch/mips/include/asm/page.h
@@ -162,16 +162,34 @@ typedef struct { unsigned long pgprot; } pgprot_t;
/*
* __pa()/__va() should be used only during mem init.
*/
-#ifdef CONFIG_64BIT
-#define __pa(x) \
-({ \
- unsigned long __x = (unsigned long)(x); \
- __x < CKSEG0 ? XPHYSADDR(__x) : CPHYSADDR(__x); \
-})
-#else
-#define __pa(x) \
- ((unsigned long)(x) - PAGE_OFFSET + PHYS_OFFSET)
-#endif
+static inline unsigned long ___pa(unsigned long x)
+{
+ if (config_enabled(CONFIG_64BIT)) {
+ /*
+ * For MIPS64 the virtual address may either be in one of
+ * the compatibility segements ckseg0 or ckseg1, or it may
+ * be in xkphys.
+ */
+ return x < CKSEG0 ? XPHYSADDR(x) : CPHYSADDR(x);
+ }
+
+ if (!config_enabled(CONFIG_EVA)) {
+ /*
+ * We're using the standard MIPS32 legacy memory map, ie.
+ * the address x is going to be in kseg0 or kseg1. We can
+ * handle either case by masking out the desired bits using
+ * CPHYSADDR.
+ */
+ return CPHYSADDR(x);
+ }
+
+ /*
+ * EVA is in use so the memory map could be anything, making it not
+ * safe to just mask out bits.
+ */
+ return x - PAGE_OFFSET + PHYS_OFFSET;
+}
+#define __pa(x) ___pa((unsigned long)(x))
#define __va(x) ((void *)((unsigned long)(x) + PAGE_OFFSET - PHYS_OFFSET))
#include <asm/io.h>
@@ -229,8 +247,10 @@ extern int __virt_addr_valid(const volatile void *kaddr);
#define virt_addr_valid(kaddr) \
__virt_addr_valid((const volatile void *) (kaddr))
-#define VM_DATA_DEFAULT_FLAGS (VM_READ | VM_WRITE | VM_EXEC | \
- VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
+#define VM_DATA_DEFAULT_FLAGS \
+ (VM_READ | VM_WRITE | \
+ ((current->personality & READ_IMPLIES_EXEC) ? VM_EXEC : 0) | \
+ VM_MAYREAD | VM_MAYWRITE | VM_MAYEXEC)
#define UNCAC_ADDR(addr) ((addr) - PAGE_OFFSET + UNCAC_BASE)
#define CAC_ADDR(addr) ((addr) - UNCAC_BASE + PAGE_OFFSET)
diff --git a/arch/mips/include/asm/pci.h b/arch/mips/include/asm/pci.h
index 8c16fb7b8fdb..9b63cd41213d 100644
--- a/arch/mips/include/asm/pci.h
+++ b/arch/mips/include/asm/pci.h
@@ -43,8 +43,6 @@ struct pci_controller {
and XFree86. Eventually will be removed. */
unsigned int need_domain_info;
- int iommu;
-
/* Optional access methods for reading/writing the bus number
of the PCI controller */
int (*get_busno)(void);
@@ -82,16 +80,6 @@ extern int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
#define HAVE_ARCH_PCI_RESOURCE_TO_USER
-static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
- const struct resource *rsrc, resource_size_t *start,
- resource_size_t *end)
-{
- phys_addr_t size = resource_size(rsrc);
-
- *start = fixup_bigphys_addr(rsrc->start, size);
- *end = rsrc->start + size;
-}
-
/*
* Dynamic DMA mapping stuff.
* MIPS has everything mapped statically.
@@ -106,11 +94,11 @@ static inline void pci_resource_to_user(const struct pci_dev *dev, int bar,
struct pci_dev;
/*
- * The PCI address space does equal the physical memory address space. The
- * networking and block device layers use this boolean for bounce buffer
- * decisions. This is set if any hose does not have an IOMMU.
+ * The PCI address space does equal the physical memory address space.
+ * The networking and block device layers use this boolean for bounce
+ * buffer decisions.
*/
-extern unsigned int PCI_DMA_BUS_IS_PHYS;
+#define PCI_DMA_BUS_IS_PHYS (1)
#ifdef CONFIG_PCI_DOMAINS
#define pci_domain_nr(bus) ((struct pci_controller *)(bus)->sysdata)->index
diff --git a/arch/mips/include/asm/pci/bridge.h b/arch/mips/include/asm/pci/bridge.h
index 8d7a63b52ac7..3206245d1ed6 100644
--- a/arch/mips/include/asm/pci/bridge.h
+++ b/arch/mips/include/asm/pci/bridge.h
@@ -269,16 +269,16 @@ typedef struct bridge_err_cmdword_s {
union {
u32 cmd_word;
struct {
- u32 didn:4, /* Destination ID */
- sidn:4, /* Source ID */
- pactyp:4, /* Packet type */
- tnum:5, /* Trans Number */
- coh:1, /* Coh Transacti */
- ds:2, /* Data size */
- gbr:1, /* GBR enable */
- vbpm:1, /* VBPM message */
+ u32 didn:4, /* Destination ID */
+ sidn:4, /* Source ID */
+ pactyp:4, /* Packet type */
+ tnum:5, /* Trans Number */
+ coh:1, /* Coh Transaction */
+ ds:2, /* Data size */
+ gbr:1, /* GBR enable */
+ vbpm:1, /* VBPM message */
error:1, /* Error occurred */
- barr:1, /* Barrier op */
+ barr:1, /* Barrier op */
rsvd:8;
} berr_st;
} berr_un;
diff --git a/arch/mips/include/asm/pgalloc.h b/arch/mips/include/asm/pgalloc.h
index b336037e8768..93c079a1cfc8 100644
--- a/arch/mips/include/asm/pgalloc.h
+++ b/arch/mips/include/asm/pgalloc.h
@@ -69,7 +69,7 @@ static inline pte_t *pte_alloc_one_kernel(struct mm_struct *mm,
{
pte_t *pte;
- pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT|__GFP_ZERO, PTE_ORDER);
+ pte = (pte_t *) __get_free_pages(GFP_KERNEL|__GFP_ZERO, PTE_ORDER);
return pte;
}
@@ -79,7 +79,7 @@ static inline struct page *pte_alloc_one(struct mm_struct *mm,
{
struct page *pte;
- pte = alloc_pages(GFP_KERNEL | __GFP_REPEAT, PTE_ORDER);
+ pte = alloc_pages(GFP_KERNEL, PTE_ORDER);
if (!pte)
return NULL;
clear_highpage(pte);
@@ -113,7 +113,7 @@ static inline pmd_t *pmd_alloc_one(struct mm_struct *mm, unsigned long address)
{
pmd_t *pmd;
- pmd = (pmd_t *) __get_free_pages(GFP_KERNEL|__GFP_REPEAT, PMD_ORDER);
+ pmd = (pmd_t *) __get_free_pages(GFP_KERNEL, PMD_ORDER);
if (pmd)
pmd_init((unsigned long)pmd, (unsigned long)invalid_pte_table);
return pmd;
diff --git a/arch/mips/include/asm/pgtable-32.h b/arch/mips/include/asm/pgtable-32.h
index 832e2167d00f..d21f3da7bdb6 100644
--- a/arch/mips/include/asm/pgtable-32.h
+++ b/arch/mips/include/asm/pgtable-32.h
@@ -103,8 +103,8 @@ static inline void pmd_clear(pmd_t *pmdp)
pmd_val(*pmdp) = ((unsigned long) invalid_pte_table);
}
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-#define pte_page(x) pfn_to_page(pte_pfn(x))
+#if defined(CONFIG_XPA)
+
#define pte_pfn(x) (((unsigned long)((x).pte_high >> _PFN_SHIFT)) | (unsigned long)((x).pte_low << _PAGE_PRESENT_SHIFT))
static inline pte_t
pfn_pte(unsigned long pfn, pgprot_t prot)
@@ -118,9 +118,21 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
return pte;
}
-#else
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-#define pte_page(x) pfn_to_page(pte_pfn(x))
+#define pte_pfn(x) ((unsigned long)((x).pte_high >> 6))
+
+static inline pte_t pfn_pte(unsigned long pfn, pgprot_t prot)
+{
+ pte_t pte;
+
+ pte.pte_high = (pfn << 6) | (pgprot_val(prot) & 0x3f);
+ pte.pte_low = pgprot_val(prot);
+
+ return pte;
+}
+
+#else
#ifdef CONFIG_CPU_VR41XX
#define pte_pfn(x) ((unsigned long)((x).pte >> (PAGE_SHIFT + 2)))
@@ -131,6 +143,8 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#endif
#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32) */
+#define pte_page(x) pfn_to_page(pte_pfn(x))
+
#define __pgd_offset(address) pgd_index(address)
#define __pud_offset(address) (((address) >> PUD_SHIFT) & (PTRS_PER_PUD-1))
#define __pmd_offset(address) (((address) >> PMD_SHIFT) & (PTRS_PER_PMD-1))
@@ -166,7 +180,7 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#else
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
/* Swap entries must have VALID and GLOBAL bits cleared. */
#define __swp_type(x) (((x).val >> 4) & 0x1f)
@@ -175,6 +189,15 @@ pfn_pte(unsigned long pfn, pgprot_t prot)
#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+
+/* Swap entries must have VALID and GLOBAL bits cleared. */
+#define __swp_type(x) (((x).val >> 2) & 0x1f)
+#define __swp_offset(x) ((x).val >> 7)
+#define __swp_entry(type, offset) ((swp_entry_t) { ((type) << 2) | ((offset) << 7) })
+#define __pte_to_swp_entry(pte) ((swp_entry_t) { (pte).pte_high })
+#define __swp_entry_to_pte(x) ((pte_t) { 0, (x).val })
+
#else
/*
* Constraints:
diff --git a/arch/mips/include/asm/pgtable-64.h b/arch/mips/include/asm/pgtable-64.h
index cf661a2fb141..514cbc0a6a67 100644
--- a/arch/mips/include/asm/pgtable-64.h
+++ b/arch/mips/include/asm/pgtable-64.h
@@ -17,7 +17,7 @@
#include <asm/cachectl.h>
#include <asm/fixmap.h>
-#ifdef CONFIG_PAGE_SIZE_64KB
+#if defined(CONFIG_PAGE_SIZE_64KB) && !defined(CONFIG_MIPS_VA_BITS_48)
#include <asm-generic/pgtable-nopmd.h>
#else
#include <asm-generic/pgtable-nopud.h>
@@ -90,7 +90,11 @@
#define PTE_ORDER 0
#endif
#ifdef CONFIG_PAGE_SIZE_16KB
-#define PGD_ORDER 0
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PGD_ORDER 1
+#else
+#define PGD_ORDER 0
+#endif
#define PUD_ORDER aieeee_attempt_to_allocate_pud
#define PMD_ORDER 0
#define PTE_ORDER 0
@@ -104,7 +108,11 @@
#ifdef CONFIG_PAGE_SIZE_64KB
#define PGD_ORDER 0
#define PUD_ORDER aieeee_attempt_to_allocate_pud
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define PMD_ORDER 0
+#else
#define PMD_ORDER aieeee_attempt_to_allocate_pmd
+#endif
#define PTE_ORDER 0
#endif
@@ -114,11 +122,7 @@
#endif
#define PTRS_PER_PTE ((PAGE_SIZE << PTE_ORDER) / sizeof(pte_t))
-#if PGDIR_SIZE >= TASK_SIZE64
-#define USER_PTRS_PER_PGD (1)
-#else
-#define USER_PTRS_PER_PGD (TASK_SIZE64 / PGDIR_SIZE)
-#endif
+#define USER_PTRS_PER_PGD ((TASK_SIZE64 / PGDIR_SIZE)?(TASK_SIZE64 / PGDIR_SIZE):1)
#define FIRST_USER_ADDRESS 0UL
/*
diff --git a/arch/mips/include/asm/pgtable-bits.h b/arch/mips/include/asm/pgtable-bits.h
index 97b313882678..f88a48cd68b2 100644
--- a/arch/mips/include/asm/pgtable-bits.h
+++ b/arch/mips/include/asm/pgtable-bits.h
@@ -32,149 +32,132 @@
* unpredictable things. The code (when it is written) to deal with
* this problem will be in the update_mmu_cache() code for the r4k.
*/
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
/*
- * The following bits are implemented by the TLB hardware
+ * Page table bit offsets used for 64 bit physical addressing on
+ * MIPS32r5 with XPA.
*/
-#define _PAGE_NO_EXEC_SHIFT 0
-#define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
-#define _PAGE_NO_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
-#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK (7 << _CACHE_SHIFT)
-
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT (24)
-#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-
-#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+enum pgtable_bits {
+ /* Used by TLB hardware (placed in EntryLo*) */
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT = 24,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+};
/*
* Bits for extended EntryLo0/EntryLo1 registers
*/
#define _PFNX_MASK 0xffffff
-#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
/*
- * The following bits are implemented in software
+ * Page table bit offsets used for 36 bit physical addressing on MIPS32,
+ * for example with Alchemy or Netlogic XLP/XLR.
*/
-#define _PAGE_PRESENT_SHIFT (0)
-#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
-#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
-#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
+enum pgtable_bits {
+ /* Used by TLB hardware (placed in EntryLo*) */
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT = _CACHE_SHIFT + 3,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+};
-/*
- * The following bits are implemented by the TLB hardware
- */
-#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 4)
-#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
-#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
-#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_UNCACHED_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
-#define _CACHE_MASK _CACHE_UNCACHED
+#elif defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
-#define _PFN_SHIFT PAGE_SHIFT
+/* Page table bits used for r3k systems */
+enum pgtable_bits {
+ /* Used only by software (writes to EntryLo ignored) */
+ _PAGE_PRESENT_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+
+ /* Used by TLB hardware (placed in EntryLo) */
+ _PAGE_GLOBAL_SHIFT = 8,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_UNCACHED_SHIFT,
+};
#else
-/*
- * Below are the "Normal" R4K cases
- */
-/*
- * The following bits are implemented in software
- */
-#define _PAGE_PRESENT_SHIFT 0
+/* Page table bits used for r4k systems */
+enum pgtable_bits {
+ /* Used only by software (masked out before writing EntryLo*) */
+ _PAGE_PRESENT_SHIFT,
+#if !defined(CONFIG_CPU_HAS_RIXI)
+ _PAGE_NO_READ_SHIFT,
+#endif
+ _PAGE_WRITE_SHIFT,
+ _PAGE_ACCESSED_SHIFT,
+ _PAGE_MODIFIED_SHIFT,
+#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
+ _PAGE_HUGE_SHIFT,
+#endif
+
+ /* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_CPU_HAS_RIXI)
+ _PAGE_NO_EXEC_SHIFT,
+ _PAGE_NO_READ_SHIFT,
+#endif
+ _PAGE_GLOBAL_SHIFT,
+ _PAGE_VALID_SHIFT,
+ _PAGE_DIRTY_SHIFT,
+ _CACHE_SHIFT,
+};
+
+#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+
+/* Used only by software */
#define _PAGE_PRESENT (1 << _PAGE_PRESENT_SHIFT)
-/* R2 or later cores check for RI/XI support to determine _PAGE_READ */
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-#define _PAGE_WRITE_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#else
-#define _PAGE_READ_SHIFT (_PAGE_PRESENT_SHIFT + 1)
-#define _PAGE_READ (1 << _PAGE_READ_SHIFT)
-#define _PAGE_WRITE_SHIFT (_PAGE_READ_SHIFT + 1)
#define _PAGE_WRITE (1 << _PAGE_WRITE_SHIFT)
-#endif
-#define _PAGE_ACCESSED_SHIFT (_PAGE_WRITE_SHIFT + 1)
#define _PAGE_ACCESSED (1 << _PAGE_ACCESSED_SHIFT)
-#define _PAGE_MODIFIED_SHIFT (_PAGE_ACCESSED_SHIFT + 1)
#define _PAGE_MODIFIED (1 << _PAGE_MODIFIED_SHIFT)
-
#if defined(CONFIG_64BIT) && defined(CONFIG_MIPS_HUGE_TLB_SUPPORT)
-/* Huge TLB page */
-#define _PAGE_HUGE_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
-#define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
-#endif /* CONFIG_64BIT && CONFIG_MIPS_HUGE_TLB_SUPPORT */
-
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
-/* XI - page cannot be executed */
-#ifdef _PAGE_HUGE_SHIFT
-#define _PAGE_NO_EXEC_SHIFT (_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_NO_EXEC_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+# define _PAGE_HUGE (1 << _PAGE_HUGE_SHIFT)
#endif
-#define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
-
-/* RI - page cannot be read */
-#define _PAGE_READ_SHIFT (_PAGE_NO_EXEC_SHIFT + 1)
-#define _PAGE_READ (cpu_has_rixi ? 0 : (1 << _PAGE_READ_SHIFT))
-#define _PAGE_NO_READ_SHIFT _PAGE_READ_SHIFT
-#define _PAGE_NO_READ (cpu_has_rixi ? (1 << _PAGE_READ_SHIFT) : 0)
-#endif /* defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6) */
-
-#if defined(_PAGE_NO_READ_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_NO_READ_SHIFT + 1)
-#elif defined(_PAGE_HUGE_SHIFT)
-#define _PAGE_GLOBAL_SHIFT (_PAGE_HUGE_SHIFT + 1)
-#else
-#define _PAGE_GLOBAL_SHIFT (_PAGE_MODIFIED_SHIFT + 1)
+
+/* Used by TLB hardware (placed in EntryLo*) */
+#if defined(CONFIG_XPA)
+# define _PAGE_NO_EXEC (1 << _PAGE_NO_EXEC_SHIFT)
+#elif defined(CONFIG_CPU_HAS_RIXI)
+# define _PAGE_NO_EXEC (cpu_has_rixi ? (1 << _PAGE_NO_EXEC_SHIFT) : 0)
#endif
+#define _PAGE_NO_READ (1 << _PAGE_NO_READ_SHIFT)
#define _PAGE_GLOBAL (1 << _PAGE_GLOBAL_SHIFT)
-
-#define _PAGE_VALID_SHIFT (_PAGE_GLOBAL_SHIFT + 1)
#define _PAGE_VALID (1 << _PAGE_VALID_SHIFT)
-#define _PAGE_DIRTY_SHIFT (_PAGE_VALID_SHIFT + 1)
#define _PAGE_DIRTY (1 << _PAGE_DIRTY_SHIFT)
-#define _CACHE_SHIFT (_PAGE_DIRTY_SHIFT + 1)
-#define _CACHE_MASK (7 << _CACHE_SHIFT)
-
-#define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
-
-#endif /* defined(CONFIG_PHYS_ADDR_T_64BIT && defined(CONFIG_CPU_MIPS32) */
+#if defined(CONFIG_CPU_R3000) || defined(CONFIG_CPU_TX39XX)
+# define _CACHE_UNCACHED (1 << _CACHE_UNCACHED_SHIFT)
+# define _CACHE_MASK _CACHE_UNCACHED
+# define _PFN_SHIFT PAGE_SHIFT
+#else
+# define _CACHE_MASK (7 << _CACHE_SHIFT)
+# define _PFN_SHIFT (PAGE_SHIFT - 12 + _CACHE_SHIFT + 3)
+#endif
#ifndef _PAGE_NO_EXEC
#define _PAGE_NO_EXEC 0
#endif
-#ifndef _PAGE_NO_READ
-#define _PAGE_NO_READ 0
-#endif
#define _PAGE_SILENT_READ _PAGE_VALID
#define _PAGE_SILENT_WRITE _PAGE_DIRTY
@@ -191,14 +174,13 @@
*/
-#ifndef __ASSEMBLY__
/*
* pte_to_entrylo converts a page table entry (PTE) into a Mips
* entrylo0/1 value.
*/
static inline uint64_t pte_to_entrylo(unsigned long pte_val)
{
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
+#ifdef CONFIG_CPU_HAS_RIXI
if (cpu_has_rixi) {
int sa;
#ifdef CONFIG_32BIT
@@ -218,7 +200,6 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
return pte_val >> _PAGE_GLOBAL_SHIFT;
}
-#endif
/*
* Cache attributes
@@ -274,7 +255,7 @@ static inline uint64_t pte_to_entrylo(unsigned long pte_val)
#define _CACHE_UNCACHED_ACCELERATED (7<<_CACHE_SHIFT)
#endif
-#define __READABLE (_PAGE_SILENT_READ | _PAGE_READ | _PAGE_ACCESSED)
+#define __READABLE (_PAGE_SILENT_READ | _PAGE_ACCESSED)
#define __WRITEABLE (_PAGE_SILENT_WRITE | _PAGE_WRITE | _PAGE_MODIFIED)
#define _PAGE_CHG_MASK (_PAGE_ACCESSED | _PAGE_MODIFIED | \
diff --git a/arch/mips/include/asm/pgtable.h b/arch/mips/include/asm/pgtable.h
index 9a4fe0133ff1..70128d3f770a 100644
--- a/arch/mips/include/asm/pgtable.h
+++ b/arch/mips/include/asm/pgtable.h
@@ -23,18 +23,19 @@
struct mm_struct;
struct vm_area_struct;
-#define PAGE_NONE __pgprot(_PAGE_PRESENT | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | _PAGE_READ | \
+#define PAGE_NONE __pgprot(_PAGE_PRESENT | _PAGE_NO_READ | \
_page_cachable_default)
-#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_NO_EXEC | \
+#define PAGE_SHARED __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
-#define PAGE_READONLY __pgprot(_PAGE_PRESENT | _PAGE_READ | \
+#define PAGE_COPY __pgprot(_PAGE_PRESENT | _PAGE_NO_EXEC | \
+ _page_cachable_default)
+#define PAGE_READONLY __pgprot(_PAGE_PRESENT | \
_page_cachable_default)
#define PAGE_KERNEL __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _page_cachable_default)
#define PAGE_KERNEL_NC __pgprot(_PAGE_PRESENT | __READABLE | __WRITEABLE | \
_PAGE_GLOBAL | _CACHE_CACHABLE_NONCOHERENT)
-#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_READ | _PAGE_WRITE | \
+#define PAGE_USERIO __pgprot(_PAGE_PRESENT | _PAGE_WRITE | \
_page_cachable_default)
#define PAGE_KERNEL_UNCACHED __pgprot(_PAGE_PRESENT | __READABLE | \
__WRITEABLE | _PAGE_GLOBAL | _CACHE_UNCACHED)
@@ -127,10 +128,19 @@ do { \
} \
} while(0)
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval);
+
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
-#define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
+#ifdef CONFIG_XPA
+# define pte_none(pte) (!(((pte).pte_high) & ~_PAGE_GLOBAL))
+#else
+# define pte_none(pte) (!(((pte).pte_low | (pte).pte_high) & ~_PAGE_GLOBAL))
+#endif
+
#define pte_present(pte) ((pte).pte_low & _PAGE_PRESENT)
+#define pte_no_exec(pte) ((pte).pte_low & _PAGE_NO_EXEC)
static inline void set_pte(pte_t *ptep, pte_t pte)
{
@@ -138,17 +148,23 @@ static inline void set_pte(pte_t *ptep, pte_t pte)
smp_wmb();
ptep->pte_low = pte.pte_low;
+#ifdef CONFIG_XPA
if (pte.pte_high & _PAGE_GLOBAL) {
+#else
+ if (pte.pte_low & _PAGE_GLOBAL) {
+#endif
pte_t *buddy = ptep_buddy(ptep);
/*
* Make sure the buddy is global too (if it's !none,
* it better already be global)
*/
- if (pte_none(*buddy))
+ if (pte_none(*buddy)) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ buddy->pte_low |= _PAGE_GLOBAL;
buddy->pte_high |= _PAGE_GLOBAL;
+ }
}
}
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -156,8 +172,13 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
htw_stop();
/* Preserve global status for the pair */
- if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
- null.pte_high = _PAGE_GLOBAL;
+ if (IS_ENABLED(CONFIG_XPA)) {
+ if (ptep_buddy(ptep)->pte_high & _PAGE_GLOBAL)
+ null.pte_high = _PAGE_GLOBAL;
+ } else {
+ if (ptep_buddy(ptep)->pte_low & _PAGE_GLOBAL)
+ null.pte_low = null.pte_high = _PAGE_GLOBAL;
+ }
set_pte_at(mm, addr, ptep, null);
htw_start();
@@ -166,6 +187,7 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
#define pte_none(pte) (!(pte_val(pte) & ~_PAGE_GLOBAL))
#define pte_present(pte) (pte_val(pte) & _PAGE_PRESENT)
+#define pte_no_exec(pte) (pte_val(pte) & _PAGE_NO_EXEC)
/*
* Certain architectures need to do special things when pte's
@@ -187,30 +209,42 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
* For SMP, multiple CPUs can race, so we need to do
* this atomically.
*/
-#ifdef CONFIG_64BIT
-#define LL_INSN "lld"
-#define SC_INSN "scd"
-#else /* CONFIG_32BIT */
-#define LL_INSN "ll"
-#define SC_INSN "sc"
-#endif
unsigned long page_global = _PAGE_GLOBAL;
unsigned long tmp;
- __asm__ __volatile__ (
- " .set push\n"
- " .set noreorder\n"
- "1: " LL_INSN " %[tmp], %[buddy]\n"
- " bnez %[tmp], 2f\n"
- " or %[tmp], %[tmp], %[global]\n"
- " " SC_INSN " %[tmp], %[buddy]\n"
- " beqz %[tmp], 1b\n"
- " nop\n"
- "2:\n"
- " .set pop"
- : [buddy] "+m" (buddy->pte),
- [tmp] "=&r" (tmp)
+ if (kernel_uses_llsc && R10000_LLSC_WAR) {
+ __asm__ __volatile__ (
+ " .set arch=r4000 \n"
+ " .set push \n"
+ " .set noreorder \n"
+ "1:" __LL "%[tmp], %[buddy] \n"
+ " bnez %[tmp], 2f \n"
+ " or %[tmp], %[tmp], %[global] \n"
+ __SC "%[tmp], %[buddy] \n"
+ " beqzl %[tmp], 1b \n"
+ " nop \n"
+ "2: \n"
+ " .set pop \n"
+ " .set mips0 \n"
+ : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
: [global] "r" (page_global));
+ } else if (kernel_uses_llsc) {
+ __asm__ __volatile__ (
+ " .set "MIPS_ISA_ARCH_LEVEL" \n"
+ " .set push \n"
+ " .set noreorder \n"
+ "1:" __LL "%[tmp], %[buddy] \n"
+ " bnez %[tmp], 2f \n"
+ " or %[tmp], %[tmp], %[global] \n"
+ __SC "%[tmp], %[buddy] \n"
+ " beqz %[tmp], 1b \n"
+ " nop \n"
+ "2: \n"
+ " .set pop \n"
+ " .set mips0 \n"
+ : [buddy] "+m" (buddy->pte), [tmp] "=&r" (tmp)
+ : [global] "r" (page_global));
+ }
#else /* !CONFIG_SMP */
if (pte_none(*buddy))
pte_val(*buddy) = pte_val(*buddy) | _PAGE_GLOBAL;
@@ -218,7 +252,6 @@ static inline void set_pte(pte_t *ptep, pte_t pteval)
}
#endif
}
-#define set_pte_at(mm, addr, ptep, pteval) set_pte(ptep, pteval)
static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *ptep)
{
@@ -234,6 +267,22 @@ static inline void pte_clear(struct mm_struct *mm, unsigned long addr, pte_t *pt
}
#endif
+static inline void set_pte_at(struct mm_struct *mm, unsigned long addr,
+ pte_t *ptep, pte_t pteval)
+{
+ extern void __update_cache(unsigned long address, pte_t pte);
+
+ if (!pte_present(pteval))
+ goto cache_sync_done;
+
+ if (pte_present(*ptep) && (pte_pfn(*ptep) == pte_pfn(pteval)))
+ goto cache_sync_done;
+
+ __update_cache(addr, pteval);
+cache_sync_done:
+ set_pte(ptep, pteval);
+}
+
/*
* (pmds are folded into puds so this doesn't get actually called,
* but the define is needed for a generic inline function.)
@@ -270,6 +319,8 @@ static inline int pte_young(pte_t pte) { return pte.pte_low & _PAGE_ACCESSED; }
static inline pte_t pte_wrprotect(pte_t pte)
{
pte.pte_low &= ~_PAGE_WRITE;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
@@ -277,6 +328,8 @@ static inline pte_t pte_wrprotect(pte_t pte)
static inline pte_t pte_mkclean(pte_t pte)
{
pte.pte_low &= ~_PAGE_MODIFIED;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_WRITE;
pte.pte_high &= ~_PAGE_SILENT_WRITE;
return pte;
}
@@ -284,6 +337,8 @@ static inline pte_t pte_mkclean(pte_t pte)
static inline pte_t pte_mkold(pte_t pte)
{
pte.pte_low &= ~_PAGE_ACCESSED;
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low &= ~_PAGE_SILENT_READ;
pte.pte_high &= ~_PAGE_SILENT_READ;
return pte;
}
@@ -291,24 +346,33 @@ static inline pte_t pte_mkold(pte_t pte)
static inline pte_t pte_mkwrite(pte_t pte)
{
pte.pte_low |= _PAGE_WRITE;
- if (pte.pte_low & _PAGE_MODIFIED)
+ if (pte.pte_low & _PAGE_MODIFIED) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
+ }
return pte;
}
static inline pte_t pte_mkdirty(pte_t pte)
{
pte.pte_low |= _PAGE_MODIFIED;
- if (pte.pte_low & _PAGE_WRITE)
+ if (pte.pte_low & _PAGE_WRITE) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_WRITE;
pte.pte_high |= _PAGE_SILENT_WRITE;
+ }
return pte;
}
static inline pte_t pte_mkyoung(pte_t pte)
{
pte.pte_low |= _PAGE_ACCESSED;
- if (pte.pte_low & _PAGE_READ)
+ if (!(pte.pte_low & _PAGE_NO_READ)) {
+ if (!IS_ENABLED(CONFIG_XPA))
+ pte.pte_low |= _PAGE_SILENT_READ;
pte.pte_high |= _PAGE_SILENT_READ;
+ }
return pte;
}
#else
@@ -353,13 +417,8 @@ static inline pte_t pte_mkdirty(pte_t pte)
static inline pte_t pte_mkyoung(pte_t pte)
{
pte_val(pte) |= _PAGE_ACCESSED;
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pte_val(pte) & _PAGE_NO_READ))
pte_val(pte) |= _PAGE_SILENT_READ;
- else
-#endif
- if (pte_val(pte) & _PAGE_READ)
- pte_val(pte) |= _PAGE_SILENT_READ;
return pte;
}
@@ -411,34 +470,41 @@ static inline pgprot_t pgprot_writecombine(pgprot_t _prot)
*/
#define mk_pte(page, pgprot) pfn_pte(page_to_pfn(page), (pgprot))
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
pte.pte_low &= (_PAGE_MODIFIED | _PAGE_ACCESSED | _PFNX_MASK);
pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
pte.pte_low |= pgprot_val(newprot) & ~_PFNX_MASK;
- pte.pte_high |= pgprot_val(newprot) & ~_PFN_MASK;
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
+ return pte;
+}
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
+{
+ pte.pte_low &= _PAGE_CHG_MASK;
+ pte.pte_high &= (_PFN_MASK | _CACHE_MASK);
+ pte.pte_low |= pgprot_val(newprot);
+ pte.pte_high |= pgprot_val(newprot) & ~(_PFN_MASK | _CACHE_MASK);
return pte;
}
#else
static inline pte_t pte_modify(pte_t pte, pgprot_t newprot)
{
- return __pte((pte_val(pte) & _PAGE_CHG_MASK) | pgprot_val(newprot));
+ return __pte((pte_val(pte) & _PAGE_CHG_MASK) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK));
}
#endif
extern void __update_tlb(struct vm_area_struct *vma, unsigned long address,
pte_t pte);
-extern void __update_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t pte);
static inline void update_mmu_cache(struct vm_area_struct *vma,
unsigned long address, pte_t *ptep)
{
pte_t pte = *ptep;
__update_tlb(vma, address, pte);
- __update_cache(vma, address, pte);
}
static inline void update_mmu_cache_pmd(struct vm_area_struct *vma,
@@ -468,6 +534,7 @@ static inline int io_remap_pfn_range(struct vm_area_struct *vma,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
+#define has_transparent_hugepage has_transparent_hugepage
extern int has_transparent_hugepage(void);
static inline int pmd_trans_huge(pmd_t pmd)
@@ -542,13 +609,8 @@ static inline pmd_t pmd_mkyoung(pmd_t pmd)
{
pmd_val(pmd) |= _PAGE_ACCESSED;
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
if (!(pmd_val(pmd) & _PAGE_NO_READ))
pmd_val(pmd) |= _PAGE_SILENT_READ;
- else
-#endif
- if (pmd_val(pmd) & _PAGE_READ)
- pmd_val(pmd) |= _PAGE_SILENT_READ;
return pmd;
}
@@ -571,7 +633,8 @@ static inline struct page *pmd_page(pmd_t pmd)
static inline pmd_t pmd_modify(pmd_t pmd, pgprot_t newprot)
{
- pmd_val(pmd) = (pmd_val(pmd) & _PAGE_CHG_MASK) | pgprot_val(newprot);
+ pmd_val(pmd) = (pmd_val(pmd) & (_PAGE_CHG_MASK | _PAGE_HUGE)) |
+ (pgprot_val(newprot) & ~_PAGE_CHG_MASK);
return pmd;
}
diff --git a/arch/mips/include/asm/processor.h b/arch/mips/include/asm/processor.h
index 041153f5cf93..0d36c87acbe2 100644
--- a/arch/mips/include/asm/processor.h
+++ b/arch/mips/include/asm/processor.h
@@ -11,12 +11,14 @@
#ifndef _ASM_PROCESSOR_H
#define _ASM_PROCESSOR_H
+#include <linux/atomic.h>
#include <linux/cpumask.h>
#include <linux/threads.h>
#include <asm/cachectl.h>
#include <asm/cpu.h>
#include <asm/cpu-info.h>
+#include <asm/dsemul.h>
#include <asm/mipsregs.h>
#include <asm/prefetch.h>
@@ -63,7 +65,11 @@ extern unsigned int vced_count, vcei_count;
* 8192EB ...
*/
#define TASK_SIZE32 0x7fff8000UL
-#define TASK_SIZE64 0x10000000000UL
+#ifdef CONFIG_MIPS_VA_BITS_48
+#define TASK_SIZE64 (0x1UL << ((cpu_data[0].vmbits>48)?48:cpu_data[0].vmbits))
+#else
+#define TASK_SIZE64 0x10000000000UL
+#endif
#define TASK_SIZE (test_thread_flag(TIF_32BIT_ADDR) ? TASK_SIZE32 : TASK_SIZE64)
#define STACK_TOP_MAX TASK_SIZE64
@@ -74,7 +80,11 @@ extern unsigned int vced_count, vcei_count;
#endif
-#define STACK_TOP (TASK_SIZE & PAGE_MASK)
+/*
+ * One page above the stack is used for branch delay slot "emulation".
+ * See dsemul.c for details.
+ */
+#define STACK_TOP ((TASK_SIZE & PAGE_MASK) - PAGE_SIZE)
/*
* This decides where the kernel will search for a free chunk of vm
@@ -252,6 +262,12 @@ struct thread_struct {
/* Saved fpu/fpu emulator stuff. */
struct mips_fpu_struct fpu FPU_ALIGN;
+ /* Assigned branch delay slot 'emulation' frame */
+ atomic_t bd_emu_frame;
+ /* PC of the branch from a branch delay slot 'emulation' */
+ unsigned long bd_emu_branch_pc;
+ /* PC to continue from following a branch delay slot 'emulation' */
+ unsigned long bd_emu_cont_pc;
#ifdef CONFIG_MIPS_MT_FPAFF
/* Emulated instruction count */
unsigned long emulated_fp;
@@ -319,6 +335,10 @@ struct thread_struct {
* FPU affinity state (null if not FPAFF) \
*/ \
FPAFF_INIT \
+ /* Delay slot emulation */ \
+ .bd_emu_frame = ATOMIC_INIT(BD_EMUFRAME_NONE), \
+ .bd_emu_branch_pc = 0, \
+ .bd_emu_cont_pc = 0, \
/* \
* Saved DSP stuff \
*/ \
@@ -355,6 +375,10 @@ extern unsigned long thread_saved_pc(struct task_struct *tsk);
*/
extern void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp);
+static inline void flush_thread(void)
+{
+}
+
unsigned long get_wchan(struct task_struct *p);
#define __KSTK_TOS(tsk) ((unsigned long)task_stack_page(tsk) + \
diff --git a/arch/mips/include/asm/r4kcache.h b/arch/mips/include/asm/r4kcache.h
index 38902bf97adc..667ca3c467b7 100644
--- a/arch/mips/include/asm/r4kcache.h
+++ b/arch/mips/include/asm/r4kcache.h
@@ -210,7 +210,11 @@ static inline void protected_writeback_dcache_line(unsigned long addr)
static inline void protected_writeback_scache_line(unsigned long addr)
{
+#ifdef CONFIG_EVA
+ protected_cachee_op(Hit_Writeback_Inv_SD, addr);
+#else
protected_cache_op(Hit_Writeback_Inv_SD, addr);
+#endif
}
/*
diff --git a/arch/mips/include/asm/seccomp.h b/arch/mips/include/asm/seccomp.h
index 1d8a2e2c75c1..d886d6f7687a 100644
--- a/arch/mips/include/asm/seccomp.h
+++ b/arch/mips/include/asm/seccomp.h
@@ -2,27 +2,32 @@
#include <linux/unistd.h>
-/*
- * Kludge alert:
- *
- * The generic seccomp code currently allows only a single compat ABI. Until
- * this is fixed we priorize O32 as the compat ABI over N32.
- */
-#ifdef CONFIG_MIPS32_O32
-
-#define __NR_seccomp_read_32 4003
-#define __NR_seccomp_write_32 4004
-#define __NR_seccomp_exit_32 4001
-#define __NR_seccomp_sigreturn_32 4193 /* rt_sigreturn */
-
-#elif defined(CONFIG_MIPS32_N32)
-
-#define __NR_seccomp_read_32 6000
-#define __NR_seccomp_write_32 6001
-#define __NR_seccomp_exit_32 6058
-#define __NR_seccomp_sigreturn_32 6211 /* rt_sigreturn */
-
-#endif /* CONFIG_MIPS32_O32 */
+#ifdef CONFIG_COMPAT
+static inline const int *get_compat_mode1_syscalls(void)
+{
+ static const int syscalls_O32[] = {
+ __NR_O32_Linux + 3, __NR_O32_Linux + 4,
+ __NR_O32_Linux + 1, __NR_O32_Linux + 193,
+ 0, /* null terminated */
+ };
+ static const int syscalls_N32[] = {
+ __NR_N32_Linux + 0, __NR_N32_Linux + 1,
+ __NR_N32_Linux + 58, __NR_N32_Linux + 211,
+ 0, /* null terminated */
+ };
+
+ if (IS_ENABLED(CONFIG_MIPS32_O32) && test_thread_flag(TIF_32BIT_REGS))
+ return syscalls_O32;
+
+ if (IS_ENABLED(CONFIG_MIPS32_N32))
+ return syscalls_N32;
+
+ BUG();
+}
+
+#define get_compat_mode1_syscalls get_compat_mode1_syscalls
+
+#endif /* CONFIG_COMPAT */
#include <asm-generic/seccomp.h>
diff --git a/arch/mips/include/asm/setup.h b/arch/mips/include/asm/setup.h
index d7bfdeba9e84..4f5279a8308d 100644
--- a/arch/mips/include/asm/setup.h
+++ b/arch/mips/include/asm/setup.h
@@ -21,6 +21,7 @@ extern void *set_vi_handler(int n, vi_handler_t addr);
extern void *set_except_vector(int n, void *addr);
extern unsigned long ebase;
+extern unsigned int hwrena;
extern void per_cpu_trap_init(bool);
extern void cpu_cache_init(void);
diff --git a/arch/mips/include/asm/sgi/hpc3.h b/arch/mips/include/asm/sgi/hpc3.h
index 59920b345942..c0e3dc0293a7 100644
--- a/arch/mips/include/asm/sgi/hpc3.h
+++ b/arch/mips/include/asm/sgi/hpc3.h
@@ -39,7 +39,7 @@ struct hpc3_pbus_dmacregs {
volatile u32 pbdma_dptr; /* pbus dma channel desc ptr */
u32 _unused0[0x1000/4 - 2]; /* padding */
volatile u32 pbdma_ctrl; /* pbus dma channel control register has
- * copletely different meaning for read
+ * completely different meaning for read
* compared with write */
/* read */
#define HPC3_PDMACTRL_INT 0x00000001 /* interrupt (cleared after read) */
@@ -147,7 +147,7 @@ struct hpc3_ethregs {
#define HPC3_EPCFG_P1 0x000f /* Cycles to spend in P1 state for PIO */
#define HPC3_EPCFG_P2 0x00f0 /* Cycles to spend in P2 state for PIO */
#define HPC3_EPCFG_P3 0x0f00 /* Cycles to spend in P3 state for PIO */
-#define HPC3_EPCFG_TST 0x1000 /* Diagnistic ram test feature bit */
+#define HPC3_EPCFG_TST 0x1000 /* Diagnostic ram test feature bit */
u32 _unused2[0x1000/4 - 8]; /* padding */
diff --git a/arch/mips/include/asm/sgiarcs.h b/arch/mips/include/asm/sgiarcs.h
index 26ddfff28c8e..105a9479ac5f 100644
--- a/arch/mips/include/asm/sgiarcs.h
+++ b/arch/mips/include/asm/sgiarcs.h
@@ -144,7 +144,7 @@ struct linux_tinfo {
struct linux_vdirent {
ULONG namelen;
unsigned char attr;
- char fname[32]; /* XXX imperical, should be a define */
+ char fname[32]; /* XXX empirical, should be a define */
};
/* Other stuff for files. */
@@ -179,7 +179,7 @@ struct linux_finfo {
enum linux_devtypes dtype;
unsigned long namelen;
unsigned char attr;
- char name[32]; /* XXX imperical, should be define */
+ char name[32]; /* XXX empirical, should be define */
};
/* This describes the vector containing function pointers to the ARC
diff --git a/arch/mips/include/asm/sibyte/bcm1480_regs.h b/arch/mips/include/asm/sibyte/bcm1480_regs.h
index ec0dacf6f0cb..32a84837b8fa 100644
--- a/arch/mips/include/asm/sibyte/bcm1480_regs.h
+++ b/arch/mips/include/asm/sibyte/bcm1480_regs.h
@@ -415,8 +415,8 @@
(cpu)*BCM1480_IMR_ALIAS_MAILBOX_SPACING)
#define A_BCM1480_IMR_ALIAS_MAILBOX_REGISTER(cpu, reg) (A_BCM1480_IMR_ALIAS_MAILBOX(cpu)+(reg))
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000 /* 0x0x0 */
-#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008 /* 0x0x8 */
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0 0x0000
+#define R_BCM1480_IMR_ALIAS_MAILBOX_0_SET 0x0008
/*
* these macros work together to build the address of a mailbox
diff --git a/arch/mips/include/asm/signal.h b/arch/mips/include/asm/signal.h
index 003e273eff4c..23d6b8015c79 100644
--- a/arch/mips/include/asm/signal.h
+++ b/arch/mips/include/asm/signal.h
@@ -11,11 +11,17 @@
#include <uapi/asm/signal.h>
+#ifdef CONFIG_MIPS32_O32
+extern struct mips_abi mips_abi_32;
-#ifdef CONFIG_TRAD_SIGNALS
-#define sig_uses_siginfo(ka) ((ka)->sa.sa_flags & SA_SIGINFO)
+#define sig_uses_siginfo(ka, abi) \
+ ((abi != &mips_abi_32) ? 1 : \
+ ((ka)->sa.sa_flags & SA_SIGINFO))
#else
-#define sig_uses_siginfo(ka) (1)
+#define sig_uses_siginfo(ka, abi) \
+ (IS_ENABLED(CONFIG_64BIT) ? 1 : \
+ (IS_ENABLED(CONFIG_TRAD_SIGNALS) ? \
+ ((ka)->sa.sa_flags & SA_SIGINFO) : 1) )
#endif
#include <asm/sigcontext.h>
diff --git a/arch/mips/include/asm/smp-cps.h b/arch/mips/include/asm/smp-cps.h
index 326c16ebd589..2ae1f61a4a95 100644
--- a/arch/mips/include/asm/smp-cps.h
+++ b/arch/mips/include/asm/smp-cps.h
@@ -29,7 +29,7 @@ extern struct core_boot_config *mips_cps_core_bootcfg;
extern void mips_cps_core_entry(void);
extern void mips_cps_core_init(void);
-extern struct vpe_boot_config *mips_cps_boot_vpes(void);
+extern void mips_cps_boot_vpes(struct core_boot_config *cfg, unsigned vpe);
extern void mips_cps_pm_save(void);
extern void mips_cps_pm_restore(void);
diff --git a/arch/mips/include/asm/smp.h b/arch/mips/include/asm/smp.h
index 03722d4326a1..8bc6c70a4030 100644
--- a/arch/mips/include/asm/smp.h
+++ b/arch/mips/include/asm/smp.h
@@ -23,7 +23,7 @@
extern int smp_num_siblings;
extern cpumask_t cpu_sibling_map[];
extern cpumask_t cpu_core_map[];
-extern cpumask_t cpu_foreign_map;
+extern cpumask_t cpu_foreign_map[];
#define raw_smp_processor_id() (current_thread_info()->cpu)
@@ -53,6 +53,8 @@ extern cpumask_t cpu_coherent_mask;
extern void asmlinkage smp_bootstrap(void);
+extern void calculate_cpu_foreign_map(void);
+
/*
* this function sends a 'reschedule' IPI to another CPU.
* it goes straight through and wastes no time serializing
diff --git a/arch/mips/include/asm/sn/ioc3.h b/arch/mips/include/asm/sn/ioc3.h
index e33f0363235b..feb385180f87 100644
--- a/arch/mips/include/asm/sn/ioc3.h
+++ b/arch/mips/include/asm/sn/ioc3.h
@@ -355,7 +355,7 @@ struct ioc3_etxd {
#define SSCR_PAUSE_STATE 0x40000000 /* sets when PAUSE takes effect */
#define SSCR_RESET 0x80000000 /* reset DMA channels */
-/* all producer/comsumer pointers are the same bitfield */
+/* all producer/consumer pointers are the same bitfield */
#define PROD_CONS_PTR_4K 0x00000ff8 /* for 4K buffers */
#define PROD_CONS_PTR_1K 0x000003f8 /* for 1K buffers */
#define PROD_CONS_PTR_OFF 3
diff --git a/arch/mips/include/asm/sn/sn0/hubio.h b/arch/mips/include/asm/sn/sn0/hubio.h
index 5998b13e9764..57ece90f8cf1 100644
--- a/arch/mips/include/asm/sn/sn0/hubio.h
+++ b/arch/mips/include/asm/sn/sn0/hubio.h
@@ -628,7 +628,7 @@ typedef union h1_icrbb_u {
/*
* Values for field imsgtype
*/
-#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Meessage from Xtalk */
+#define IIO_ICRB_IMSGT_XTALK 0 /* Incoming Message from Xtalk */
#define IIO_ICRB_IMSGT_BTE 1 /* Incoming message from BTE */
#define IIO_ICRB_IMSGT_SN0NET 2 /* Incoming message from SN0 net */
#define IIO_ICRB_IMSGT_CRB 3 /* Incoming message from CRB ??? */
diff --git a/arch/mips/include/asm/spinlock.h b/arch/mips/include/asm/spinlock.h
index 40196bebe849..f485afe51514 100644
--- a/arch/mips/include/asm/spinlock.h
+++ b/arch/mips/include/asm/spinlock.h
@@ -12,6 +12,7 @@
#include <linux/compiler.h>
#include <asm/barrier.h>
+#include <asm/processor.h>
#include <asm/compiler.h>
#include <asm/war.h>
@@ -48,8 +49,22 @@ static inline int arch_spin_value_unlocked(arch_spinlock_t lock)
}
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock)
-#define arch_spin_unlock_wait(x) \
- while (arch_spin_is_locked(x)) { cpu_relax(); }
+
+static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
+{
+ u16 owner = READ_ONCE(lock->h.serving_now);
+ smp_rmb();
+ for (;;) {
+ arch_spinlock_t tmp = READ_ONCE(*lock);
+
+ if (tmp.h.serving_now == tmp.h.ticket ||
+ tmp.h.serving_now != owner)
+ break;
+
+ cpu_relax();
+ }
+ smp_acquire__after_ctrl_dep();
+}
static inline int arch_spin_is_contended(arch_spinlock_t *lock)
{
diff --git a/arch/mips/include/asm/switch_to.h b/arch/mips/include/asm/switch_to.h
index 28b5d84a5022..ebb5c0f2f90d 100644
--- a/arch/mips/include/asm/switch_to.h
+++ b/arch/mips/include/asm/switch_to.h
@@ -105,7 +105,7 @@ do { \
__clear_software_ll_bit(); \
if (cpu_has_userlocal) \
write_c0_userlocal(task_thread_info(next)->tp_value); \
- __restore_watch(); \
+ __restore_watch(next); \
(last) = resume(prev, next, task_thread_info(next)); \
} while (0)
diff --git a/arch/mips/include/asm/syscall.h b/arch/mips/include/asm/syscall.h
index 47bc45a67e9b..d87882513ee3 100644
--- a/arch/mips/include/asm/syscall.h
+++ b/arch/mips/include/asm/syscall.h
@@ -99,7 +99,7 @@ static inline void syscall_get_arguments(struct task_struct *task,
{
int ret;
/* O32 ABI syscall() - Either 64-bit with O32 or 32-bit */
- if ((config_enabled(CONFIG_32BIT) ||
+ if ((IS_ENABLED(CONFIG_32BIT) ||
test_tsk_thread_flag(task, TIF_32BIT_REGS)) &&
(regs->regs[2] == __NR_syscall))
i++;
diff --git a/arch/mips/include/asm/uaccess.h b/arch/mips/include/asm/uaccess.h
index 095ecafe6bd3..11b965f98d95 100644
--- a/arch/mips/include/asm/uaccess.h
+++ b/arch/mips/include/asm/uaccess.h
@@ -88,14 +88,14 @@ extern u64 __ua_limit;
*/
static inline bool eva_kernel_access(void)
{
- if (!config_enabled(CONFIG_EVA))
+ if (!IS_ENABLED(CONFIG_EVA))
return false;
return segment_eq(get_fs(), get_ds());
}
/*
- * Is a address valid? This does a straighforward calculation rather
+ * Is a address valid? This does a straightforward calculation rather
* than tests.
*
* Address valid if:
diff --git a/arch/mips/include/asm/uasm.h b/arch/mips/include/asm/uasm.h
index fc1cdd25fcda..f7929f65f7ca 100644
--- a/arch/mips/include/asm/uasm.h
+++ b/arch/mips/include/asm/uasm.h
@@ -104,8 +104,13 @@ Ip_u1s2(_bltz);
Ip_u1s2(_bltzl);
Ip_u1u2s3(_bne);
Ip_u2s3u1(_cache);
+Ip_u1u2(_cfc1);
+Ip_u2u1(_cfcmsa);
+Ip_u1u2(_ctc1);
+Ip_u2u1(_ctcmsa);
Ip_u2u1s3(_daddiu);
Ip_u3u1u2(_daddu);
+Ip_u1(_di);
Ip_u2u1msbu3(_dins);
Ip_u2u1msbu3(_dinsm);
Ip_u1u2(_divu);
@@ -141,6 +146,8 @@ Ip_u1(_mfhi);
Ip_u1(_mflo);
Ip_u1u2u3(_mtc0);
Ip_u1u2u3(_mthc0);
+Ip_u1(_mthi);
+Ip_u1(_mtlo);
Ip_u3u1u2(_mul);
Ip_u3u1u2(_or);
Ip_u2u1u3(_ori);
@@ -171,7 +178,8 @@ Ip_u2u1(_wsbh);
Ip_u3u1u2(_xor);
Ip_u2u1u3(_xori);
Ip_u2u1(_yield);
-
+Ip_u1u2(_ldpte);
+Ip_u2u1u3(_lddir);
/* Handle labels. */
struct uasm_label {
diff --git a/arch/mips/include/asm/watch.h b/arch/mips/include/asm/watch.h
index 20126ec79359..6ffe3eadf105 100644
--- a/arch/mips/include/asm/watch.h
+++ b/arch/mips/include/asm/watch.h
@@ -12,21 +12,21 @@
#include <asm/mipsregs.h>
-void mips_install_watch_registers(void);
+void mips_install_watch_registers(struct task_struct *t);
void mips_read_watch_registers(void);
void mips_clear_watch_registers(void);
void mips_probe_watch_registers(struct cpuinfo_mips *c);
#ifdef CONFIG_HARDWARE_WATCHPOINTS
-#define __restore_watch() do { \
+#define __restore_watch(task) do { \
if (unlikely(test_bit(TIF_LOAD_WATCH, \
- &current_thread_info()->flags))) { \
- mips_install_watch_registers(); \
+ &task_thread_info(task)->flags))) { \
+ mips_install_watch_registers(task); \
} \
} while (0)
#else
-#define __restore_watch() do {} while (0)
+#define __restore_watch(task) do {} while (0)
#endif
#endif /* _ASM_WATCH_H */
diff --git a/arch/mips/include/uapi/asm/auxvec.h b/arch/mips/include/uapi/asm/auxvec.h
index c9c7195272c4..45ba259a3618 100644
--- a/arch/mips/include/uapi/asm/auxvec.h
+++ b/arch/mips/include/uapi/asm/auxvec.h
@@ -14,4 +14,6 @@
/* Location of VDSO image. */
#define AT_SYSINFO_EHDR 33
+#define AT_VECTOR_SIZE_ARCH 1 /* entries in ARCH_DLINFO */
+
#endif /* __ASM_AUXVEC_H */
diff --git a/arch/mips/include/uapi/asm/inst.h b/arch/mips/include/uapi/asm/inst.h
index ddea53e3a9bb..77429d1622b3 100644
--- a/arch/mips/include/uapi/asm/inst.h
+++ b/arch/mips/include/uapi/asm/inst.h
@@ -21,20 +21,20 @@
enum major_op {
spec_op, bcond_op, j_op, jal_op,
beq_op, bne_op, blez_op, bgtz_op,
- addi_op, cbcond0_op = addi_op, addiu_op, slti_op, sltiu_op,
+ addi_op, pop10_op = addi_op, addiu_op, slti_op, sltiu_op,
andi_op, ori_op, xori_op, lui_op,
cop0_op, cop1_op, cop2_op, cop1x_op,
beql_op, bnel_op, blezl_op, bgtzl_op,
- daddi_op, cbcond1_op = daddi_op, daddiu_op, ldl_op, ldr_op,
+ daddi_op, pop30_op = daddi_op, daddiu_op, ldl_op, ldr_op,
spec2_op, jalx_op, mdmx_op, msa_op = mdmx_op, spec3_op,
lb_op, lh_op, lwl_op, lw_op,
lbu_op, lhu_op, lwr_op, lwu_op,
sb_op, sh_op, swl_op, sw_op,
sdl_op, sdr_op, swr_op, cache_op,
ll_op, lwc1_op, lwc2_op, bc6_op = lwc2_op, pref_op,
- lld_op, ldc1_op, ldc2_op, beqzcjic_op = ldc2_op, ld_op,
+ lld_op, ldc1_op, ldc2_op, pop66_op = ldc2_op, ld_op,
sc_op, swc1_op, swc2_op, balc6_op = swc2_op, major_3b_op,
- scd_op, sdc1_op, sdc2_op, bnezcjialc_op = sdc2_op, sd_op
+ scd_op, sdc1_op, sdc2_op, pop76_op = sdc2_op, sd_op
};
/*
@@ -93,6 +93,50 @@ enum spec3_op {
};
/*
+ * Bits 10-6 minor opcode for r6 spec mult/div encodings
+ */
+enum mult_op {
+ mult_mult_op = 0x0,
+ mult_mul_op = 0x2,
+ mult_muh_op = 0x3,
+};
+enum multu_op {
+ multu_multu_op = 0x0,
+ multu_mulu_op = 0x2,
+ multu_muhu_op = 0x3,
+};
+enum div_op {
+ div_div_op = 0x0,
+ div_div6_op = 0x2,
+ div_mod_op = 0x3,
+};
+enum divu_op {
+ divu_divu_op = 0x0,
+ divu_divu6_op = 0x2,
+ divu_modu_op = 0x3,
+};
+enum dmult_op {
+ dmult_dmult_op = 0x0,
+ dmult_dmul_op = 0x2,
+ dmult_dmuh_op = 0x3,
+};
+enum dmultu_op {
+ dmultu_dmultu_op = 0x0,
+ dmultu_dmulu_op = 0x2,
+ dmultu_dmuhu_op = 0x3,
+};
+enum ddiv_op {
+ ddiv_ddiv_op = 0x0,
+ ddiv_ddiv6_op = 0x2,
+ ddiv_dmod_op = 0x3,
+};
+enum ddivu_op {
+ ddivu_ddivu_op = 0x0,
+ ddivu_ddivu6_op = 0x2,
+ ddivu_dmodu_op = 0x3,
+};
+
+/*
* rt field of bcond opcodes.
*/
enum rt_op {
@@ -103,7 +147,7 @@ enum rt_op {
bltzal_op, bgezal_op, bltzall_op, bgezall_op,
rt_op_0x14, rt_op_0x15, rt_op_0x16, rt_op_0x17,
rt_op_0x18, rt_op_0x19, rt_op_0x1a, rt_op_0x1b,
- bposge32_op, rt_op_0x1d, rt_op_0x1e, rt_op_0x1f
+ bposge32_op, rt_op_0x1d, rt_op_0x1e, synci_op
};
/*
@@ -167,6 +211,7 @@ enum cop1_sdw_func {
fceill_op = 0x0a, ffloorl_op = 0x0b,
fround_op = 0x0c, ftrunc_op = 0x0d,
fceil_op = 0x0e, ffloor_op = 0x0f,
+ fsel_op = 0x10,
fmovc_op = 0x11, fmovz_op = 0x12,
fmovn_op = 0x13, fseleqz_op = 0x14,
frecip_op = 0x15, frsqrt_op = 0x16,
@@ -204,6 +249,16 @@ enum mad_func {
};
/*
+ * func field for page table walker (Loongson-3).
+ */
+enum ptw_func {
+ lwdir_op = 0x00,
+ lwpte_op = 0x01,
+ lddir_op = 0x02,
+ ldpte_op = 0x03,
+};
+
+/*
* func field for special3 lx opcodes (Cavium Octeon).
*/
enum lx_func {
@@ -227,6 +282,21 @@ enum bshfl_func {
};
/*
+ * MSA minor opcodes.
+ */
+enum msa_func {
+ msa_elm_op = 0x19,
+};
+
+/*
+ * MSA ELM opcodes.
+ */
+enum msa_elm {
+ msa_ctc_op = 0x3e,
+ msa_cfc_op = 0x7e,
+};
+
+/*
* func field for MSA MI10 format.
*/
enum msa_mi10_func {
@@ -253,7 +323,7 @@ enum mm_major_op {
mm_pool32b_op, mm_pool16b_op, mm_lhu16_op, mm_andi16_op,
mm_addiu32_op, mm_lhu32_op, mm_sh32_op, mm_lh32_op,
mm_pool32i_op, mm_pool16c_op, mm_lwsp16_op, mm_pool16d_op,
- mm_ori32_op, mm_pool32f_op, mm_reserved1_op, mm_reserved2_op,
+ mm_ori32_op, mm_pool32f_op, mm_pool32s_op, mm_reserved2_op,
mm_pool32c_op, mm_lwgp16_op, mm_lw16_op, mm_pool16e_op,
mm_xori32_op, mm_jals32_op, mm_addiupc_op, mm_reserved3_op,
mm_reserved4_op, mm_pool16f_op, mm_sb16_op, mm_beqz16_op,
@@ -349,7 +419,10 @@ enum mm_32axf_minor_op {
mm_mflo32_op = 0x075,
mm_jalrhb_op = 0x07c,
mm_tlbwi_op = 0x08d,
+ mm_mthi32_op = 0x0b5,
mm_tlbwr_op = 0x0cd,
+ mm_mtlo32_op = 0x0f5,
+ mm_di_op = 0x11d,
mm_jalrs_op = 0x13c,
mm_jalrshb_op = 0x17c,
mm_sync_op = 0x1ad,
@@ -468,6 +541,13 @@ enum mm_32f_73_minor_op {
};
/*
+ * (microMIPS) POOL32S minor opcodes.
+ */
+enum mm_32s_minor_op {
+ mm_32s_elm_op = 0x16,
+};
+
+/*
* (microMIPS) POOL16C minor opcodes.
*/
enum mm_16c_minor_op {
@@ -575,6 +655,36 @@ struct r_format { /* Register format */
;))))))
};
+struct c0r_format { /* C0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int z: 8,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))
+};
+
+struct mfmc0_format { /* MFMC0 register format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int rs : 5,
+ __BITFIELD_FIELD(unsigned int rt : 5,
+ __BITFIELD_FIELD(unsigned int rd : 5,
+ __BITFIELD_FIELD(unsigned int re : 5,
+ __BITFIELD_FIELD(unsigned int sc : 1,
+ __BITFIELD_FIELD(unsigned int : 2,
+ __BITFIELD_FIELD(unsigned int sel : 3,
+ ;))))))))
+};
+
+struct co_format { /* C0 CO format */
+ __BITFIELD_FIELD(unsigned int opcode : 6,
+ __BITFIELD_FIELD(unsigned int co : 1,
+ __BITFIELD_FIELD(unsigned int code : 19,
+ __BITFIELD_FIELD(unsigned int func : 6,
+ ;))))
+};
+
struct p_format { /* Performance counter format (R10000) */
__BITFIELD_FIELD(unsigned int opcode : 6,
__BITFIELD_FIELD(unsigned int rs : 5,
@@ -926,6 +1036,9 @@ union mips_instruction {
struct u_format u_format;
struct c_format c_format;
struct r_format r_format;
+ struct c0r_format c0r_format;
+ struct mfmc0_format mfmc0_format;
+ struct co_format co_format;
struct p_format p_format;
struct f_format f_format;
struct ma_format ma_format;
diff --git a/arch/mips/include/uapi/asm/siginfo.h b/arch/mips/include/uapi/asm/siginfo.h
index cc49dc240d67..8069cf766603 100644
--- a/arch/mips/include/uapi/asm/siginfo.h
+++ b/arch/mips/include/uapi/asm/siginfo.h
@@ -28,7 +28,7 @@
#define __ARCH_SIGSYS
-#include <uapi/asm-generic/siginfo.h>
+#include <asm-generic/siginfo.h>
/* We can't use generic siginfo_t, because our si_code and si_errno are swapped */
typedef struct siginfo {
@@ -42,13 +42,13 @@ typedef struct siginfo {
/* kill() */
struct {
- pid_t _pid; /* sender's pid */
+ __kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
} _kill;
/* POSIX.1b timers */
struct {
- timer_t _tid; /* timer id */
+ __kernel_timer_t _tid; /* timer id */
int _overrun; /* overrun count */
char _pad[sizeof( __ARCH_SI_UID_T) - sizeof(int)];
sigval_t _sigval; /* same as below */
@@ -57,26 +57,26 @@ typedef struct siginfo {
/* POSIX.1b signals */
struct {
- pid_t _pid; /* sender's pid */
+ __kernel_pid_t _pid; /* sender's pid */
__ARCH_SI_UID_T _uid; /* sender's uid */
sigval_t _sigval;
} _rt;
/* SIGCHLD */
struct {
- pid_t _pid; /* which child */
+ __kernel_pid_t _pid; /* which child */
__ARCH_SI_UID_T _uid; /* sender's uid */
int _status; /* exit code */
- clock_t _utime;
- clock_t _stime;
+ __kernel_clock_t _utime;
+ __kernel_clock_t _stime;
} _sigchld;
/* IRIX SIGCHLD */
struct {
- pid_t _pid; /* which child */
- clock_t _utime;
+ __kernel_pid_t _pid; /* which child */
+ __kernel_clock_t _utime;
int _status; /* exit code */
- clock_t _stime;
+ __kernel_clock_t _stime;
} _irix_sigchld;
/* SIGILL, SIGFPE, SIGSEGV, SIGBUS */
@@ -123,6 +123,4 @@ typedef struct siginfo {
#define SI_TIMER __SI_CODE(__SI_TIMER, -3) /* sent by timer expiration */
#define SI_MESGQ __SI_CODE(__SI_MESGQ, -4) /* sent by real time mesq state change */
-#include <asm-generic/siginfo.h>
-
#endif /* _UAPI_ASM_SIGINFO_H */
diff --git a/arch/mips/include/uapi/asm/unistd.h b/arch/mips/include/uapi/asm/unistd.h
index 3129795de940..24ad815c7f38 100644
--- a/arch/mips/include/uapi/asm/unistd.h
+++ b/arch/mips/include/uapi/asm/unistd.h
@@ -381,16 +381,18 @@
#define __NR_membarrier (__NR_Linux + 358)
#define __NR_mlock2 (__NR_Linux + 359)
#define __NR_copy_file_range (__NR_Linux + 360)
+#define __NR_preadv2 (__NR_Linux + 361)
+#define __NR_pwritev2 (__NR_Linux + 362)
/*
* Offset of the last Linux o32 flavoured syscall
*/
-#define __NR_Linux_syscalls 360
+#define __NR_Linux_syscalls 362
#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
#define __NR_O32_Linux 4000
-#define __NR_O32_Linux_syscalls 360
+#define __NR_O32_Linux_syscalls 362
#if _MIPS_SIM == _MIPS_SIM_ABI64
@@ -719,16 +721,18 @@
#define __NR_membarrier (__NR_Linux + 318)
#define __NR_mlock2 (__NR_Linux + 319)
#define __NR_copy_file_range (__NR_Linux + 320)
+#define __NR_preadv2 (__NR_Linux + 321)
+#define __NR_pwritev2 (__NR_Linux + 322)
/*
* Offset of the last Linux 64-bit flavoured syscall
*/
-#define __NR_Linux_syscalls 320
+#define __NR_Linux_syscalls 322
#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 */
#define __NR_64_Linux 5000
-#define __NR_64_Linux_syscalls 320
+#define __NR_64_Linux_syscalls 322
#if _MIPS_SIM == _MIPS_SIM_NABI32
@@ -1061,15 +1065,17 @@
#define __NR_membarrier (__NR_Linux + 322)
#define __NR_mlock2 (__NR_Linux + 323)
#define __NR_copy_file_range (__NR_Linux + 324)
+#define __NR_preadv2 (__NR_Linux + 325)
+#define __NR_pwritev2 (__NR_Linux + 326)
/*
* Offset of the last N32 flavoured syscall
*/
-#define __NR_Linux_syscalls 324
+#define __NR_Linux_syscalls 326
#endif /* _MIPS_SIM == _MIPS_SIM_NABI32 */
#define __NR_N32_Linux 6000
-#define __NR_N32_Linux_syscalls 324
+#define __NR_N32_Linux_syscalls 326
#endif /* _UAPI_ASM_UNISTD_H */
diff --git a/arch/mips/jz4740/board-qi_lb60.c b/arch/mips/jz4740/board-qi_lb60.c
index 934b15b5b575..258fd03c9ef5 100644
--- a/arch/mips/jz4740/board-qi_lb60.c
+++ b/arch/mips/jz4740/board-qi_lb60.c
@@ -39,8 +39,6 @@
#include "clock.h"
-static bool is_avt2;
-
/* GPIOs */
#define QI_LB60_GPIO_SD_CD JZ_GPIO_PORTD(0)
#define QI_LB60_GPIO_SD_VCC_EN_N JZ_GPIO_PORTD(2)
@@ -50,20 +48,6 @@ static bool is_avt2;
#define QI_LB60_GPIO_KEYIN8 JZ_GPIO_PORTD(26)
/* NAND */
-static struct nand_ecclayout qi_lb60_ecclayout_1gb = {
- .eccbytes = 36,
- .eccpos = {
- 6, 7, 8, 9, 10, 11, 12, 13,
- 14, 15, 16, 17, 18, 19, 20, 21,
- 22, 23, 24, 25, 26, 27, 28, 29,
- 30, 31, 32, 33, 34, 35, 36, 37,
- 38, 39, 40, 41
- },
- .oobfree = {
- { .offset = 2, .length = 4 },
- { .offset = 42, .length = 22 }
- },
-};
/* Early prototypes of the QI LB60 had only 1GB of NAND.
* In order to support these devices as well the partition and ecc layout is
@@ -86,25 +70,6 @@ static struct mtd_partition qi_lb60_partitions_1gb[] = {
},
};
-static struct nand_ecclayout qi_lb60_ecclayout_2gb = {
- .eccbytes = 72,
- .eccpos = {
- 12, 13, 14, 15, 16, 17, 18, 19,
- 20, 21, 22, 23, 24, 25, 26, 27,
- 28, 29, 30, 31, 32, 33, 34, 35,
- 36, 37, 38, 39, 40, 41, 42, 43,
- 44, 45, 46, 47, 48, 49, 50, 51,
- 52, 53, 54, 55, 56, 57, 58, 59,
- 60, 61, 62, 63, 64, 65, 66, 67,
- 68, 69, 70, 71, 72, 73, 74, 75,
- 76, 77, 78, 79, 80, 81, 82, 83
- },
- .oobfree = {
- { .offset = 2, .length = 10 },
- { .offset = 84, .length = 44 },
- },
-};
-
static struct mtd_partition qi_lb60_partitions_2gb[] = {
{
.name = "NAND BOOT partition",
@@ -123,19 +88,67 @@ static struct mtd_partition qi_lb60_partitions_2gb[] = {
},
};
+static int qi_lb60_ooblayout_ecc(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ if (section)
+ return -ERANGE;
+
+ oobregion->length = 36;
+ oobregion->offset = 6;
+
+ if (mtd->oobsize == 128) {
+ oobregion->length *= 2;
+ oobregion->offset *= 2;
+ }
+
+ return 0;
+}
+
+static int qi_lb60_ooblayout_free(struct mtd_info *mtd, int section,
+ struct mtd_oob_region *oobregion)
+{
+ int eccbytes = 36, eccoff = 6;
+
+ if (section > 1)
+ return -ERANGE;
+
+ if (mtd->oobsize == 128) {
+ eccbytes *= 2;
+ eccoff *= 2;
+ }
+
+ if (!section) {
+ oobregion->offset = 2;
+ oobregion->length = eccoff - 2;
+ } else {
+ oobregion->offset = eccoff + eccbytes;
+ oobregion->length = mtd->oobsize - oobregion->offset;
+ }
+
+ return 0;
+}
+
+static const struct mtd_ooblayout_ops qi_lb60_ooblayout_ops = {
+ .ecc = qi_lb60_ooblayout_ecc,
+ .free = qi_lb60_ooblayout_free,
+};
+
static void qi_lb60_nand_ident(struct platform_device *pdev,
- struct nand_chip *chip, struct mtd_partition **partitions,
+ struct mtd_info *mtd, struct mtd_partition **partitions,
int *num_partitions)
{
+ struct nand_chip *chip = mtd_to_nand(mtd);
+
if (chip->page_shift == 12) {
- chip->ecc.layout = &qi_lb60_ecclayout_2gb;
*partitions = qi_lb60_partitions_2gb;
*num_partitions = ARRAY_SIZE(qi_lb60_partitions_2gb);
} else {
- chip->ecc.layout = &qi_lb60_ecclayout_1gb;
*partitions = qi_lb60_partitions_1gb;
*num_partitions = ARRAY_SIZE(qi_lb60_partitions_1gb);
}
+
+ mtd_set_ooblayout(mtd, &qi_lb60_ooblayout_ops);
}
static struct jz_nand_platform_data qi_lb60_nand_pdata = {
@@ -367,43 +380,12 @@ static struct jz4740_mmc_platform_data qi_lb60_mmc_pdata = {
.power_active_low = 1,
};
-/* OHCI */
-static struct regulator_consumer_supply avt2_usb_regulator_consumer =
- REGULATOR_SUPPLY("vbus", "jz4740-ohci");
-
-static struct regulator_init_data avt2_usb_regulator_init_data = {
- .num_consumer_supplies = 1,
- .consumer_supplies = &avt2_usb_regulator_consumer,
- .constraints = {
- .name = "USB power",
- .min_uV = 5000000,
- .max_uV = 5000000,
- .valid_modes_mask = REGULATOR_MODE_NORMAL,
- .valid_ops_mask = REGULATOR_CHANGE_STATUS,
- },
-};
-
-static struct fixed_voltage_config avt2_usb_regulator_data = {
- .supply_name = "USB power",
- .microvolts = 5000000,
- .gpio = JZ_GPIO_PORTB(17),
- .init_data = &avt2_usb_regulator_init_data,
-};
-
-static struct platform_device avt2_usb_regulator_device = {
- .name = "reg-fixed-voltage",
- .id = -1,
- .dev = {
- .platform_data = &avt2_usb_regulator_data,
- }
-};
-
+/* beeper */
static struct pwm_lookup qi_lb60_pwm_lookup[] = {
PWM_LOOKUP("jz4740-pwm", 4, "pwm-beeper", NULL, 0,
PWM_POLARITY_NORMAL),
};
-/* beeper */
static struct platform_device qi_lb60_pwm_beeper = {
.name = "pwm-beeper",
.id = -1,
@@ -487,11 +469,6 @@ static int __init qi_lb60_init_platform_devices(void)
spi_register_board_info(qi_lb60_spi_board_info,
ARRAY_SIZE(qi_lb60_spi_board_info));
- if (is_avt2) {
- platform_device_register(&avt2_usb_regulator_device);
- platform_device_register(&jz4740_usb_ohci_device);
- }
-
pwm_add_table(qi_lb60_pwm_lookup, ARRAY_SIZE(qi_lb60_pwm_lookup));
return platform_add_devices(jz_platform_devices,
@@ -499,19 +476,9 @@ static int __init qi_lb60_init_platform_devices(void)
}
-static __init int board_avt2(char *str)
-{
- qi_lb60_mmc_pdata.card_detect_active_low = 1;
- is_avt2 = true;
-
- return 1;
-}
-__setup("avt2", board_avt2);
-
static int __init qi_lb60_board_setup(void)
{
- printk(KERN_INFO "Qi Hardware JZ4740 QI %s setup\n",
- is_avt2 ? "AVT2" : "LB60");
+ printk(KERN_INFO "Qi Hardware JZ4740 QI LB60 setup\n");
board_gpio_setup();
diff --git a/arch/mips/jz4740/platform.c b/arch/mips/jz4740/platform.c
index e8a463b9b663..2f1dab35c061 100644
--- a/arch/mips/jz4740/platform.c
+++ b/arch/mips/jz4740/platform.c
@@ -32,31 +32,6 @@
#include "clock.h"
-/* OHCI controller */
-static struct resource jz4740_usb_ohci_resources[] = {
- {
- .start = JZ4740_UHC_BASE_ADDR,
- .end = JZ4740_UHC_BASE_ADDR + 0x1000 - 1,
- .flags = IORESOURCE_MEM,
- },
- {
- .start = JZ4740_IRQ_UHC,
- .end = JZ4740_IRQ_UHC,
- .flags = IORESOURCE_IRQ,
- },
-};
-
-struct platform_device jz4740_usb_ohci_device = {
- .name = "jz4740-ohci",
- .id = -1,
- .dev = {
- .dma_mask = &jz4740_usb_ohci_device.dev.coherent_dma_mask,
- .coherent_dma_mask = DMA_BIT_MASK(32),
- },
- .num_resources = ARRAY_SIZE(jz4740_usb_ohci_resources),
- .resource = jz4740_usb_ohci_resources,
-};
-
/* USB Device Controller */
struct platform_device jz4740_udc_xceiv_device = {
.name = "usb_phy_generic",
diff --git a/arch/mips/jz4740/setup.c b/arch/mips/jz4740/setup.c
index 510fc0d962f2..6d0152321819 100644
--- a/arch/mips/jz4740/setup.c
+++ b/arch/mips/jz4740/setup.c
@@ -20,7 +20,6 @@
#include <linux/kernel.h>
#include <linux/libfdt.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/bootinfo.h>
#include <asm/prom.h>
@@ -74,16 +73,9 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-static int __init populate_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(populate_machine);
-
const char *get_system_type(void)
{
- if (config_enabled(CONFIG_MACH_JZ4780))
+ if (IS_ENABLED(CONFIG_MACH_JZ4780))
return "JZ4780";
return "JZ4740";
diff --git a/arch/mips/kernel/Makefile b/arch/mips/kernel/Makefile
index b0988fd62fcc..4a603a3ea657 100644
--- a/arch/mips/kernel/Makefile
+++ b/arch/mips/kernel/Makefile
@@ -44,7 +44,7 @@ obj-$(CONFIG_CPU_CAVIUM_OCTEON) += r4k_fpu.o octeon_switch.o
obj-$(CONFIG_SMP) += smp.o
obj-$(CONFIG_SMP_UP) += smp-up.o
-obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o
+obj-$(CONFIG_CPU_BMIPS) += smp-bmips.o bmips_vec.o bmips_5xxx_init.o
obj-$(CONFIG_MIPS_MT) += mips-mt.o
obj-$(CONFIG_MIPS_MT_FPAFF) += mips-mt-fpaff.o
@@ -71,7 +71,7 @@ obj-$(CONFIG_32BIT) += scall32-o32.o
obj-$(CONFIG_64BIT) += scall64-64.o
obj-$(CONFIG_MIPS32_COMPAT) += linux32.o ptrace32.o signal32.o
obj-$(CONFIG_MIPS32_N32) += binfmt_elfn32.o scall64-n32.o signal_n32.o
-obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o
+obj-$(CONFIG_MIPS32_O32) += binfmt_elfo32.o scall64-o32.o signal_o32.o
obj-$(CONFIG_KGDB) += kgdb.o
obj-$(CONFIG_PROC_FS) += proc.o
@@ -83,6 +83,8 @@ obj-$(CONFIG_I8253) += i8253.o
obj-$(CONFIG_GPIO_TXX9) += gpio_txx9.o
+obj-$(CONFIG_RELOCATABLE) += relocate.o
+
obj-$(CONFIG_KEXEC) += machine_kexec.o relocate_kernel.o crash.o
obj-$(CONFIG_CRASH_DUMP) += crash_dump.o
obj-$(CONFIG_EARLY_PRINTK) += early_printk.o
diff --git a/arch/mips/kernel/asm-offsets.c b/arch/mips/kernel/asm-offsets.c
index 154e2039ea5e..fae2f9447792 100644
--- a/arch/mips/kernel/asm-offsets.c
+++ b/arch/mips/kernel/asm-offsets.c
@@ -14,6 +14,7 @@
#include <linux/mm.h>
#include <linux/kbuild.h>
#include <linux/suspend.h>
+#include <asm/cpu-info.h>
#include <asm/pm.h>
#include <asm/ptrace.h>
#include <asm/processor.h>
@@ -341,59 +342,6 @@ void output_pm_defines(void)
void output_kvm_defines(void)
{
COMMENT(" KVM/MIPS Specfic offsets. ");
- DEFINE(VCPU_ARCH_SIZE, sizeof(struct kvm_vcpu_arch));
- OFFSET(VCPU_RUN, kvm_vcpu, run);
- OFFSET(VCPU_HOST_ARCH, kvm_vcpu, arch);
-
- OFFSET(VCPU_HOST_EBASE, kvm_vcpu_arch, host_ebase);
- OFFSET(VCPU_GUEST_EBASE, kvm_vcpu_arch, guest_ebase);
-
- OFFSET(VCPU_HOST_STACK, kvm_vcpu_arch, host_stack);
- OFFSET(VCPU_HOST_GP, kvm_vcpu_arch, host_gp);
-
- OFFSET(VCPU_HOST_CP0_BADVADDR, kvm_vcpu_arch, host_cp0_badvaddr);
- OFFSET(VCPU_HOST_CP0_CAUSE, kvm_vcpu_arch, host_cp0_cause);
- OFFSET(VCPU_HOST_EPC, kvm_vcpu_arch, host_cp0_epc);
- OFFSET(VCPU_HOST_ENTRYHI, kvm_vcpu_arch, host_cp0_entryhi);
-
- OFFSET(VCPU_GUEST_INST, kvm_vcpu_arch, guest_inst);
-
- OFFSET(VCPU_R0, kvm_vcpu_arch, gprs[0]);
- OFFSET(VCPU_R1, kvm_vcpu_arch, gprs[1]);
- OFFSET(VCPU_R2, kvm_vcpu_arch, gprs[2]);
- OFFSET(VCPU_R3, kvm_vcpu_arch, gprs[3]);
- OFFSET(VCPU_R4, kvm_vcpu_arch, gprs[4]);
- OFFSET(VCPU_R5, kvm_vcpu_arch, gprs[5]);
- OFFSET(VCPU_R6, kvm_vcpu_arch, gprs[6]);
- OFFSET(VCPU_R7, kvm_vcpu_arch, gprs[7]);
- OFFSET(VCPU_R8, kvm_vcpu_arch, gprs[8]);
- OFFSET(VCPU_R9, kvm_vcpu_arch, gprs[9]);
- OFFSET(VCPU_R10, kvm_vcpu_arch, gprs[10]);
- OFFSET(VCPU_R11, kvm_vcpu_arch, gprs[11]);
- OFFSET(VCPU_R12, kvm_vcpu_arch, gprs[12]);
- OFFSET(VCPU_R13, kvm_vcpu_arch, gprs[13]);
- OFFSET(VCPU_R14, kvm_vcpu_arch, gprs[14]);
- OFFSET(VCPU_R15, kvm_vcpu_arch, gprs[15]);
- OFFSET(VCPU_R16, kvm_vcpu_arch, gprs[16]);
- OFFSET(VCPU_R17, kvm_vcpu_arch, gprs[17]);
- OFFSET(VCPU_R18, kvm_vcpu_arch, gprs[18]);
- OFFSET(VCPU_R19, kvm_vcpu_arch, gprs[19]);
- OFFSET(VCPU_R20, kvm_vcpu_arch, gprs[20]);
- OFFSET(VCPU_R21, kvm_vcpu_arch, gprs[21]);
- OFFSET(VCPU_R22, kvm_vcpu_arch, gprs[22]);
- OFFSET(VCPU_R23, kvm_vcpu_arch, gprs[23]);
- OFFSET(VCPU_R24, kvm_vcpu_arch, gprs[24]);
- OFFSET(VCPU_R25, kvm_vcpu_arch, gprs[25]);
- OFFSET(VCPU_R26, kvm_vcpu_arch, gprs[26]);
- OFFSET(VCPU_R27, kvm_vcpu_arch, gprs[27]);
- OFFSET(VCPU_R28, kvm_vcpu_arch, gprs[28]);
- OFFSET(VCPU_R29, kvm_vcpu_arch, gprs[29]);
- OFFSET(VCPU_R30, kvm_vcpu_arch, gprs[30]);
- OFFSET(VCPU_R31, kvm_vcpu_arch, gprs[31]);
- OFFSET(VCPU_LO, kvm_vcpu_arch, lo);
- OFFSET(VCPU_HI, kvm_vcpu_arch, hi);
- OFFSET(VCPU_PC, kvm_vcpu_arch, pc);
- BLANK();
OFFSET(VCPU_FPR0, kvm_vcpu_arch, fpu.fpr[0]);
OFFSET(VCPU_FPR1, kvm_vcpu_arch, fpu.fpr[1]);
@@ -431,14 +379,6 @@ void output_kvm_defines(void)
OFFSET(VCPU_FCR31, kvm_vcpu_arch, fpu.fcr31);
OFFSET(VCPU_MSA_CSR, kvm_vcpu_arch, fpu.msacsr);
BLANK();
-
- OFFSET(VCPU_COP0, kvm_vcpu_arch, cop0);
- OFFSET(VCPU_GUEST_KERNEL_ASID, kvm_vcpu_arch, guest_kernel_asid);
- OFFSET(VCPU_GUEST_USER_ASID, kvm_vcpu_arch, guest_user_asid);
-
- OFFSET(COP0_TLB_HI, mips_coproc, reg[MIPS_CP0_TLB_HI][0]);
- OFFSET(COP0_STATUS, mips_coproc, reg[MIPS_CP0_STATUS][0]);
- BLANK();
}
#ifdef CONFIG_MIPS_CPS
diff --git a/arch/mips/kernel/binfmt_elfn32.c b/arch/mips/kernel/binfmt_elfn32.c
index 1b992c6e3d8e..58ad63d7eb42 100644
--- a/arch/mips/kernel/binfmt_elfn32.c
+++ b/arch/mips/kernel/binfmt_elfn32.c
@@ -30,21 +30,7 @@ typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(hdr) \
-({ \
- int __res = 1; \
- struct elfhdr *__h = (hdr); \
- \
- if (!mips_elf_check_machine(__h)) \
- __res = 0; \
- if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
- __res = 0; \
- if (((__h->e_flags & EF_MIPS_ABI2) == 0) || \
- ((__h->e_flags & EF_MIPS_ABI) != 0)) \
- __res = 0; \
- \
- __res; \
-})
+#define elf_check_arch elfn32_check_arch
#define TASK32_SIZE 0x7fff8000UL
#undef ELF_ET_DYN_BASE
diff --git a/arch/mips/kernel/binfmt_elfo32.c b/arch/mips/kernel/binfmt_elfo32.c
index abd3affe5fb3..49fb881481f7 100644
--- a/arch/mips/kernel/binfmt_elfo32.c
+++ b/arch/mips/kernel/binfmt_elfo32.c
@@ -28,39 +28,9 @@ typedef double elf_fpreg_t;
typedef elf_fpreg_t elf_fpregset_t[ELF_NFPREG];
/*
- * In order to be sure that we don't attempt to execute an O32 binary which
- * requires 64 bit FP (FR=1) on a system which does not support it we refuse
- * to execute any binary which has bits specified by the following macro set
- * in its ELF header flags.
- */
-#ifdef CONFIG_MIPS_O32_FP64_SUPPORT
-# define __MIPS_O32_FP64_MUST_BE_ZERO 0
-#else
-# define __MIPS_O32_FP64_MUST_BE_ZERO EF_MIPS_FP64
-#endif
-
-/*
* This is used to ensure we don't load something for the wrong architecture.
*/
-#define elf_check_arch(hdr) \
-({ \
- int __res = 1; \
- struct elfhdr *__h = (hdr); \
- \
- if (!mips_elf_check_machine(__h)) \
- __res = 0; \
- if (__h->e_ident[EI_CLASS] != ELFCLASS32) \
- __res = 0; \
- if ((__h->e_flags & EF_MIPS_ABI2) != 0) \
- __res = 0; \
- if (((__h->e_flags & EF_MIPS_ABI) != 0) && \
- ((__h->e_flags & EF_MIPS_ABI) != EF_MIPS_ABI_O32)) \
- __res = 0; \
- if (__h->e_flags & __MIPS_O32_FP64_MUST_BE_ZERO) \
- __res = 0; \
- \
- __res; \
-})
+#define elf_check_arch elfo32_check_arch
#ifdef CONFIG_KVM_GUEST
#define TASK32_SIZE 0x3fff8000UL
diff --git a/arch/mips/kernel/bmips_5xxx_init.S b/arch/mips/kernel/bmips_5xxx_init.S
new file mode 100644
index 000000000000..adaa82e00f2b
--- /dev/null
+++ b/arch/mips/kernel/bmips_5xxx_init.S
@@ -0,0 +1,753 @@
+
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 2011-2012 by Broadcom Corporation
+ *
+ * Init for bmips 5000.
+ * Used to init second core in dual core 5000's.
+ */
+
+#include <linux/init.h>
+
+#include <asm/asm.h>
+#include <asm/asmmacro.h>
+#include <asm/cacheops.h>
+#include <asm/regdef.h>
+#include <asm/mipsregs.h>
+#include <asm/stackframe.h>
+#include <asm/addrspace.h>
+#include <asm/hazards.h>
+#include <asm/bmips.h>
+
+#ifdef CONFIG_CPU_BMIPS5000
+
+
+#define cacheop(kva, size, linesize, op) \
+ .set noreorder ; \
+ addu t1, kva, size ; \
+ subu t2, linesize, 1 ; \
+ not t2 ; \
+ and t0, kva, t2 ; \
+ addiu t1, t1, -1 ; \
+ and t1, t2 ; \
+9: cache op, 0(t0) ; \
+ bne t0, t1, 9b ; \
+ addu t0, linesize ; \
+ .set reorder ;
+
+
+
+#define IS_SHIFT 22
+#define IL_SHIFT 19
+#define IA_SHIFT 16
+#define DS_SHIFT 13
+#define DL_SHIFT 10
+#define DA_SHIFT 7
+#define IS_MASK 7
+#define IL_MASK 7
+#define IA_MASK 7
+#define DS_MASK 7
+#define DL_MASK 7
+#define DA_MASK 7
+#define ICE_MASK 0x80000000
+#define DCE_MASK 0x40000000
+
+#define CP0_BRCM_CONFIG0 $22, 0
+#define CP0_BRCM_MODE $22, 1
+#define CP0_CONFIG_K0_MASK 7
+
+#define CP0_ICACHE_TAG_LO $28
+#define CP0_ICACHE_DATA_LO $28, 1
+#define CP0_DCACHE_TAG_LO $28, 2
+#define CP0_D_SEC_CACHE_DATA_LO $28, 3
+#define CP0_ICACHE_TAG_HI $29
+#define CP0_ICACHE_DATA_HI $29, 1
+#define CP0_DCACHE_TAG_HI $29, 2
+
+#define CP0_BRCM_MODE_Luc_MASK (1 << 11)
+#define CP0_BRCM_CONFIG0_CWF_MASK (1 << 20)
+#define CP0_BRCM_CONFIG0_TSE_MASK (1 << 19)
+#define CP0_BRCM_MODE_SET_MASK (1 << 7)
+#define CP0_BRCM_MODE_ClkRATIO_MASK (7 << 4)
+#define CP0_BRCM_MODE_BrPRED_MASK (3 << 24)
+#define CP0_BRCM_MODE_BrPRED_SHIFT 24
+#define CP0_BRCM_MODE_BrHIST_MASK (0x1f << 20)
+#define CP0_BRCM_MODE_BrHIST_SHIFT 20
+
+/* ZSC L2 Cache Register Access Register Definitions */
+#define BRCM_ZSC_ALL_REGS_SELECT 0x7 << 24
+
+#define BRCM_ZSC_CONFIG_REG 0 << 3
+#define BRCM_ZSC_REQ_BUFFER_REG 2 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG0 4 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG1 6 << 3
+#define BRCM_ZSC_RBUS_ADDR_MAPPING_REG2 8 << 3
+
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG0 0xa << 3
+#define BRCM_ZSC_SCB0_ADDR_MAPPING_REG1 0xc << 3
+
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG0 0xe << 3
+#define BRCM_ZSC_SCB1_ADDR_MAPPING_REG1 0x10 << 3
+
+#define BRCM_ZSC_CONFIG_LMB1En 1 << (15)
+#define BRCM_ZSC_CONFIG_LMB0En 1 << (14)
+
+/* branch predition values */
+
+#define BRCM_BrPRED_ALL_TAKEN (0x0)
+#define BRCM_BrPRED_ALL_NOT_TAKEN (0x1)
+#define BRCM_BrPRED_BHT_ENABLE (0x2)
+#define BRCM_BrPRED_PREDICT_BACKWARD (0x3)
+
+
+
+.align 2
+/*
+ * Function: size_i_cache
+ * Arguments: None
+ * Returns: v0 = i cache size, v1 = I cache line size
+ * Description: compute the I-cache size and I-cache line size
+ * Trashes: v0, v1, a0, t0
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(size_i_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, IS_SHIFT
+ and a0, a0, IS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IL_SHIFT
+ and a0, a0, IL_MASK
+
+ beqz a0, no_i_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /*
+ * Determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, IA_SHIFT
+ and a0, a0, IA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+ b 1f
+ nop
+
+no_i_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_i_cache)
+
+/*
+ * Function: size_d_cache
+ * Arguments: None
+ * Returns: v0 = d cache size, v1 = d cache line size
+ * Description: compute the D-cache size and D-cache line size.
+ * Trashes: v0, v1, a0, t0
+ *
+ */
+
+LEAF(size_d_cache)
+ .set noreorder
+
+ mfc0 a0, CP0_CONFIG, 1
+ move t0, a0
+
+ /*
+ * Determine sets per way: IS
+ *
+ * This field contains the number of sets (i.e., indices) per way of
+ * the instruction cache:
+ * i) 0x0: 64, ii) 0x1: 128, iii) 0x2: 256, iv) 0x3: 512, v) 0x4: 1k
+ * vi) 0x5 - 0x7: Reserved.
+ */
+
+ srl a0, a0, DS_SHIFT
+ and a0, a0, DS_MASK
+
+ /* sets per way = (64<<IS) */
+
+ li v0, 0x40
+ sllv v0, v0, a0
+
+ /*
+ * Determine line size
+ *
+ * This field contains the line size of the instruction cache:
+ * i) 0x0: No I-cache present, i) 0x3: 16 bytes, ii) 0x4: 32 bytes, iii)
+ * 0x5: 64 bytes, iv) the rest: Reserved.
+ */
+ move a0, t0
+
+ srl a0, a0, DL_SHIFT
+ and a0, a0, DL_MASK
+
+ beqz a0, no_d_cache
+ nop
+
+ /* line size = 2 ^ (IL+1) */
+
+ addi a0, a0, 1
+ li v1, 1
+ sll v1, v1, a0
+
+ /* v0 now have sets per way, multiply it by line size now
+ * that will give the set size
+ */
+
+ sll v0, v0, a0
+
+ /* determine set associativity
+ *
+ * This field contains the set associativity of the instruction cache.
+ * i) 0x0: Direct mapped, ii) 0x1: 2-way, iii) 0x2: 3-way, iv) 0x3:
+ * 4-way, v) 0x4 - 0x7: Reserved.
+ */
+
+ move a0, t0
+
+ srl a0, a0, DA_SHIFT
+ and a0, a0, DA_MASK
+ addi a0, a0, 0x1
+
+ /* v0 has the set size, multiply it by
+ * set associativiy, to get the cache size
+ */
+
+ multu v0, a0 /*multu is interlocked, so no need to insert nops */
+ mflo v0
+
+ b 1f
+ nop
+
+no_d_cache:
+ move v0, zero
+ move v1, zero
+1:
+ jr ra
+ nop
+ .set reorder
+
+END(size_d_cache)
+
+
+/*
+ * Function: enable_ID
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, initialize I and D-caches, also set
+ * hardware delay for d-cache (TP0).
+ * Trashes: t0
+ *
+ */
+ .global enable_ID
+ .ent enable_ID
+ .set noreorder
+enable_ID:
+ mfc0 t0, CP0_BRCM_CONFIG0
+ or t0, t0, (ICE_MASK | DCE_MASK)
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+
+ .end enable_ID
+ .set reorder
+
+
+/*
+ * Function: l1_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: a0, v0, v1, t0, t1, t2, t8
+ *
+ */
+ .globl l1_init
+ .ent l1_init
+ .set noreorder
+l1_init:
+
+ /* save return address */
+ move t8, ra
+
+
+ /* initialize I and D cache Data and Tag registers. */
+ mtc0 zero, CP0_ICACHE_TAG_LO
+ mtc0 zero, CP0_ICACHE_TAG_HI
+ mtc0 zero, CP0_ICACHE_DATA_LO
+ mtc0 zero, CP0_ICACHE_DATA_HI
+ mtc0 zero, CP0_DCACHE_TAG_LO
+ mtc0 zero, CP0_DCACHE_TAG_HI
+
+ /* Enable Caches before Clearing. If the caches are disabled
+ * then the cache operations to clear the cache will be ignored
+ */
+
+ jal enable_ID
+ nop
+
+ jal size_i_cache /* v0 = i-cache size, v1 = i-cache line size */
+ nop
+
+ /* run uncached in kseg 1 */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ jr k0
+ nop
+1:
+
+ /*
+ * set K0 cache mode
+ */
+
+ mfc0 t0, CP0_CONFIG
+ and t0, t0, ~CP0_CONFIG_K0_MASK
+ or t0, t0, 3 /* Write Back mode */
+ mtc0 t0, CP0_CONFIG
+
+ /*
+ * Initialize instruction cache.
+ */
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_I)
+
+ /*
+ * Now we can run from I-$, kseg 0
+ */
+ la k0, 1f
+ lui k1, 0x2000
+ or k0, k1, k0
+ xor k0, k1, k0
+ jr k0
+ nop
+1:
+ /*
+ * Initialize data cache.
+ */
+
+ jal size_d_cache /* v0 = d-cache size, v1 = d-cache line size */
+ nop
+
+
+ li a0, KSEG0
+ cacheop(a0, v0, v1, Index_Store_Tag_D)
+
+ jr t8
+ nop
+
+ .end l1_init
+ .set reorder
+
+
+/*
+ * Function: set_other_config
+ * Arguments: none
+ * Returns: None
+ * Description: initialize other remainder configuration to defaults.
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_other_config)
+ .set noreorder
+
+ /* enable Bus error for I-fetch */
+ mfc0 t0, CP0_CACHEERR, 0
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 0
+
+ /* enable Bus error for Load */
+ mfc0 t0, CP0_CACHEERR, 1
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 1
+
+ /* enable Bus Error for Store */
+ mfc0 t0, CP0_CACHEERR, 2
+ li t1, 0x4
+ or t0, t1
+ mtc0 t0, CP0_CACHEERR, 2
+
+ jr ra
+ nop
+ .set reorder
+END(set_other_config)
+
+/*
+ * Function: set_branch_pred
+ * Arguments: none
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ * pseudo code:
+ *
+ */
+
+LEAF(set_branch_pred)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_BrPRED_MASK | CP0_BRCM_MODE_BrHIST_MASK )
+ and t0, t0, t1
+
+ /* enable Branch prediction */
+ li t1, BRCM_BrPRED_BHT_ENABLE
+ sll t1, CP0_BRCM_MODE_BrPRED_SHIFT
+ or t0, t0, t1
+
+ /* set history count to 8 */
+ li t1, 8
+ sll t1, CP0_BRCM_MODE_BrHIST_SHIFT
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_branch_pred)
+
+
+/*
+ * Function: set_luc
+ * Arguments: set link uncached.
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_luc)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_Luc_MASK)
+ and t0, t0, t1
+
+ /* set Luc */
+ ori t0, t0, CP0_BRCM_MODE_Luc_MASK
+
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_luc)
+
+/*
+ * Function: set_cwf_tse
+ * Arguments: set CWF and TSE bits
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1
+ *
+ */
+LEAF(set_cwf_tse)
+ .set noreorder
+ mfc0 t0, CP0_BRCM_CONFIG0
+ li t1, (CP0_BRCM_CONFIG0_CWF_MASK | CP0_BRCM_CONFIG0_TSE_MASK)
+ or t0, t0, t1
+
+ mtc0 t0, CP0_BRCM_CONFIG0
+ jr ra
+ nop
+ .set reorder
+END(set_cwf_tse)
+
+/*
+ * Function: set_clock_ratio
+ * Arguments: set clock ratio specified by a0
+ * Returns: None
+ * Description:
+ * Trashes: v0, v1, a0, a1
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_clock_ratio)
+ .set noreorder
+
+ mfc0 t0, CP0_BRCM_MODE
+ li t1, ~(CP0_BRCM_MODE_SET_MASK | CP0_BRCM_MODE_ClkRATIO_MASK)
+ and t0, t0, t1
+ li t1, CP0_BRCM_MODE_SET_MASK
+ or t0, t0, t1
+ or t0, t0, a0
+ mtc0 t0, CP0_BRCM_MODE
+ jr ra
+ nop
+ .set reorder
+END(set_clock_ratio)
+/*
+ * Function: set_zephyr
+ * Arguments: None
+ * Returns: None
+ * Description: Set any zephyr bits
+ * Trashes: t0 & t1
+ *
+ */
+LEAF(set_zephyr)
+ .set noreorder
+
+ /* enable read/write of CP0 #22 sel. 8 */
+ li t0, 0x5a455048
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+ .word 0x4008b008 /* mfc0 t0, $22, 8 */
+ li t1, 0x09008000 /* turn off pref, jtb */
+ or t0, t0, t1
+ .word 0x4088b008 /* mtc0 t0, $22, 8 */
+ sync
+
+ /* disable read/write of CP0 #22 sel 8 */
+ li t0, 0x0
+ .word 0x4088b00f /* mtc0 t0, $22, 15 */
+
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_zephyr)
+
+
+/*
+ * Function: set_llmb
+ * Arguments: a0=0 disable llmb, a0=1 enables llmb
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ * pseudo code:
+ *
+ */
+LEAF(set_llmb)
+ .set noreorder
+
+ li t2, 0x90000000 | BRCM_ZSC_ALL_REGS_SELECT | BRCM_ZSC_CONFIG_REG
+ sync
+ cache 0x7, 0x0(t2)
+ sync
+ mfc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ li t1, ~(BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ and t0, t0, t1
+
+ beqz a0, svlmb
+ nop
+
+enable_lmb:
+ li t1, (BRCM_ZSC_CONFIG_LMB1En | BRCM_ZSC_CONFIG_LMB0En)
+ or t0, t0, t1
+
+svlmb:
+ mtc0 t0, CP0_D_SEC_CACHE_DATA_LO
+ sync
+ cache 0xb, 0x0(t2)
+ sync
+
+ jr ra
+ nop
+ .set reorder
+
+END(set_llmb)
+/*
+ * Function: core_init
+ * Arguments: none
+ * Returns: None
+ * Description: initialize core related configuration
+ * Trashes: v0,v1,a0,a1,t8
+ *
+ * pseudo code:
+ *
+ */
+ .globl core_init
+ .ent core_init
+ .set noreorder
+core_init:
+ move t8, ra
+
+ /* set Zephyr bits. */
+ bal set_zephyr
+ nop
+
+#if ENABLE_FPU==1
+ /* initialize the Floating point unit (both TPs) */
+ bal init_fpu
+ nop
+#endif
+
+ /* set low latency memory bus */
+ li a0, 1
+ bal set_llmb
+ nop
+
+ /* set branch prediction (TP0 only) */
+ bal set_branch_pred
+ nop
+
+ /* set link uncached */
+ bal set_luc
+ nop
+
+ /* set CWF and TSE */
+ bal set_cwf_tse
+ nop
+
+ /*
+ *set clock ratio by setting 1 to 'set'
+ * and 0 to ClkRatio, (TP0 only)
+ */
+ li a0, 0
+ bal set_clock_ratio
+ nop
+
+ /* set other configuration to defaults */
+ bal set_other_config
+ nop
+
+ move ra, t8
+ jr ra
+ nop
+
+ .set reorder
+ .end core_init
+
+/*
+ * Function: clear_jump_target_buffer
+ * Arguments: None
+ * Returns: None
+ * Description:
+ * Trashes: t0, t1, t2
+ *
+ */
+#define RESET_CALL_RETURN_STACK_THIS_THREAD (0x06<<16)
+#define RESET_JUMP_TARGET_BUFFER_THIS_THREAD (0x04<<16)
+#define JTB_CS_CNTL_MASK (0xFF<<16)
+
+ .globl clear_jump_target_buffer
+ .ent clear_jump_target_buffer
+ .set noreorder
+clear_jump_target_buffer:
+
+ mfc0 t0, $22, 2
+ nop
+ nop
+
+ li t1, ~JTB_CS_CNTL_MASK
+ and t0, t0, t1
+ li t2, RESET_CALL_RETURN_STACK_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+
+ and t0, t0, t1
+ li t2, RESET_JUMP_TARGET_BUFFER_THIS_THREAD
+ or t0, t0, t2
+ mtc0 t0, $22, 2
+ nop
+ nop
+ jr ra
+ nop
+
+ .end clear_jump_target_buffer
+ .set reorder
+/*
+ * Function: bmips_cache_init
+ * Arguments: None
+ * Returns: None
+ * Description: Enable I and D caches, and initialize I and D-caches
+ * Trashes: v0, v1, t0, t1, t2, t5, t7, t8
+ *
+ */
+ .globl bmips_5xxx_init
+ .ent bmips_5xxx_init
+ .set noreorder
+bmips_5xxx_init:
+
+ /* save return address and A0 */
+ move t7, ra
+ move t5, a0
+
+ jal l1_init
+ nop
+
+ jal core_init
+ nop
+
+ jal clear_jump_target_buffer
+ nop
+
+ mtc0 zero, CP0_CAUSE
+
+ move a0, t5
+ jr t7
+ nop
+
+ .end bmips_5xxx_init
+ .set reorder
+
+
+#endif
diff --git a/arch/mips/kernel/bmips_vec.S b/arch/mips/kernel/bmips_vec.S
index 86495072a922..921a5fa55da6 100644
--- a/arch/mips/kernel/bmips_vec.S
+++ b/arch/mips/kernel/bmips_vec.S
@@ -88,12 +88,13 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
li k1, (1 << 19)
mfc0 k0, CP0_STATUS
and k0, k1
- beqz k0, bmips_smp_entry
+ beqz k0, soft_reset
#if defined(CONFIG_CPU_BMIPS5000)
mfc0 k0, CP0_PRID
li k1, PRID_IMP_BMIPS5000
- andi k0, 0xff00
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi k0, PRID_IMP_BMIPS5000
bne k0, k1, 1f
/* if we're not on core 0, this must be the SMP boot signal */
@@ -125,13 +126,48 @@ NESTED(bmips_reset_nmi_vec, PT_SIZE, sp)
.set arch=r4000
eret
+#ifdef CONFIG_SMP
+soft_reset:
+
+#if defined(CONFIG_CPU_BMIPS5000)
+ mfc0 k0, CP0_PRID
+ andi k0, 0xff00
+ li k1, PRID_IMP_BMIPS5200
+ bne k0, k1, bmips_smp_entry
+
+ /* if running on TP 1, jump to bmips_smp_entry */
+ mfc0 k0, $22
+ li k1, (1 << 24)
+ and k1, k0
+ bnez k1, bmips_smp_entry
+ nop
+
+ /*
+ * running on TP0, can not be core 0 (the boot core).
+ * Check for soft reset. Indicates a warm boot
+ */
+ mfc0 k0, $12
+ li k1, (1 << 20)
+ and k0, k1
+ beqz k0, bmips_smp_entry
+
+ /*
+ * Warm boot.
+ * Cache init is only done on TP0
+ */
+ la k0, bmips_5xxx_init
+ jalr k0
+ nop
+
+ b bmips_smp_entry
+ nop
+#endif
+
/***********************************************************************
* CPU1 reset vector (used for the initial boot only)
* This is still part of bmips_reset_nmi_vec().
***********************************************************************/
-#ifdef CONFIG_SMP
-
bmips_smp_entry:
/* set up CP0 STATUS; enable FPU */
@@ -166,10 +202,12 @@ bmips_smp_entry:
2:
#endif /* CONFIG_CPU_BMIPS4350 || CONFIG_CPU_BMIPS4380 */
#if defined(CONFIG_CPU_BMIPS5000)
- /* set exception vector base */
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
li k1, PRID_IMP_BMIPS5000
+ andi k0, PRID_IMP_BMIPS5000
bne k0, k1, 3f
+ /* set exception vector base */
la k0, ebase
lw k0, 0(k0)
mtc0 k0, $15, 1
@@ -263,6 +301,8 @@ LEAF(bmips_enable_xks01)
#endif /* CONFIG_CPU_BMIPS4380 */
#if defined(CONFIG_CPU_BMIPS5000)
li t1, PRID_IMP_BMIPS5000
+ /* mask with PRID_IMP_BMIPS5000 to cover both variants */
+ andi t2, PRID_IMP_BMIPS5000
bne t2, t1, 2f
mfc0 t0, $22, 5
diff --git a/arch/mips/kernel/branch.c b/arch/mips/kernel/branch.c
index d8f9b357b222..46c227fc98f5 100644
--- a/arch/mips/kernel/branch.c
+++ b/arch/mips/kernel/branch.c
@@ -481,7 +481,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* OK we are here either because we hit a NAL
* instruction or because we are emulating an
- * old bltzal{,l} one. Lets figure out what the
+ * old bltzal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
@@ -515,7 +515,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/*
* OK we are here either because we hit a BAL
* instruction or because we are emulating an
- * old bgezal{,l} one. Lets figure out what the
+ * old bgezal{,l} one. Let's figure out what the
* case really is.
*/
if (!insn.i_format.rs) {
@@ -688,21 +688,9 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
}
lose_fpu(1); /* Save FPU state for the emulator. */
reg = insn.i_format.rt;
- bit = 0;
- switch (insn.i_format.rs) {
- case bc1eqz_op:
- /* Test bit 0 */
- if (get_fpr32(&current->thread.fpu.fpr[reg], 0)
- & 0x1)
- bit = 1;
- break;
- case bc1nez_op:
- /* Test bit 0 */
- if (!(get_fpr32(&current->thread.fpu.fpr[reg], 0)
- & 0x1))
- bit = 1;
- break;
- }
+ bit = get_fpr32(&current->thread.fpu.fpr[reg], 0) & 0x1;
+ if (insn.i_format.rs == bc1eqz_op)
+ bit = !bit;
own_fpu(1);
if (bit)
epc = epc + 4 +
@@ -802,7 +790,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
epc += 4 + (insn.i_format.simmediate << 2);
regs->cp0_epc = epc;
break;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -810,7 +798,7 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
/* Compact branch: BEQZC || JIC */
regs->cp0_epc += 8;
break;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6) {
ret = -SIGILL;
break;
@@ -821,8 +809,8 @@ int __compute_return_epc_for_insn(struct pt_regs *regs,
regs->cp0_epc += 8;
break;
#endif
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
/* Only valid for MIPS R6 */
if (!cpu_has_mips_r6) {
ret = -SIGILL;
diff --git a/arch/mips/kernel/cevt-r4k.c b/arch/mips/kernel/cevt-r4k.c
index 8dfe6a6e1480..804d2a2a19fe 100644
--- a/arch/mips/kernel/cevt-r4k.c
+++ b/arch/mips/kernel/cevt-r4k.c
@@ -28,6 +28,83 @@ static int mips_next_event(unsigned long delta,
return res;
}
+/**
+ * calculate_min_delta() - Calculate a good minimum delta for mips_next_event().
+ *
+ * Running under virtualisation can introduce overhead into mips_next_event() in
+ * the form of hypervisor emulation of CP0_Count/CP0_Compare registers,
+ * potentially with an unnatural frequency, which makes a fixed min_delta_ns
+ * value inappropriate as it may be too small.
+ *
+ * It can also introduce occasional latency from the guest being descheduled.
+ *
+ * This function calculates a good minimum delta based roughly on the 75th
+ * percentile of the time taken to do the mips_next_event() sequence, in order
+ * to handle potentially higher overhead while also eliminating outliers due to
+ * unpredictable hypervisor latency (which can be handled by retries).
+ *
+ * Return: An appropriate minimum delta for the clock event device.
+ */
+static unsigned int calculate_min_delta(void)
+{
+ unsigned int cnt, i, j, k, l;
+ unsigned int buf1[4], buf2[3];
+ unsigned int min_delta;
+
+ /*
+ * Calculate the median of 5 75th percentiles of 5 samples of how long
+ * it takes to set CP0_Compare = CP0_Count + delta.
+ */
+ for (i = 0; i < 5; ++i) {
+ for (j = 0; j < 5; ++j) {
+ /*
+ * This is like the code in mips_next_event(), and
+ * directly measures the borderline "safe" delta.
+ */
+ cnt = read_c0_count();
+ write_c0_compare(cnt);
+ cnt = read_c0_count() - cnt;
+
+ /* Sorted insert into buf1 */
+ for (k = 0; k < j; ++k) {
+ if (cnt < buf1[k]) {
+ l = min_t(unsigned int,
+ j, ARRAY_SIZE(buf1) - 1);
+ for (; l > k; --l)
+ buf1[l] = buf1[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf1))
+ buf1[k] = cnt;
+ }
+
+ /* Sorted insert of 75th percentile into buf2 */
+ for (k = 0; k < i; ++k) {
+ if (buf1[ARRAY_SIZE(buf1) - 1] < buf2[k]) {
+ l = min_t(unsigned int,
+ i, ARRAY_SIZE(buf2) - 1);
+ for (; l > k; --l)
+ buf2[l] = buf2[l - 1];
+ break;
+ }
+ }
+ if (k < ARRAY_SIZE(buf2))
+ buf2[k] = buf1[ARRAY_SIZE(buf1) - 1];
+ }
+
+ /* Use 2 * median of 75th percentiles */
+ min_delta = buf2[ARRAY_SIZE(buf2) - 1] * 2;
+
+ /* Don't go too low */
+ if (min_delta < 0x300)
+ min_delta = 0x300;
+
+ pr_debug("%s: median 75th percentile=%#x, min_delta=%#x\n",
+ __func__, buf2[ARRAY_SIZE(buf2) - 1], min_delta);
+ return min_delta;
+}
+
DEFINE_PER_CPU(struct clock_event_device, mips_clockevent_device);
int cp0_timer_irq_installed;
@@ -177,7 +254,7 @@ int r4k_clockevent_init(void)
{
unsigned int cpu = smp_processor_id();
struct clock_event_device *cd;
- unsigned int irq;
+ unsigned int irq, min_delta;
if (!cpu_has_counter || !mips_hpt_frequency)
return -ENXIO;
@@ -199,11 +276,7 @@ int r4k_clockevent_init(void)
CLOCK_EVT_FEAT_C3STOP |
CLOCK_EVT_FEAT_PERCPU;
- clockevent_set_clock(cd, mips_hpt_frequency);
-
- /* Calculate the min / max delta */
- cd->max_delta_ns = clockevent_delta2ns(0x7fffffff, cd);
- cd->min_delta_ns = clockevent_delta2ns(0x300, cd);
+ min_delta = calculate_min_delta();
cd->rating = 300;
cd->irq = irq;
@@ -211,7 +284,7 @@ int r4k_clockevent_init(void)
cd->set_next_event = mips_next_event;
cd->event_handler = mips_event_handler;
- clockevents_register_device(cd);
+ clockevents_config_and_register(cd, mips_hpt_frequency, min_delta, 0x7fffffff);
if (cp0_timer_irq_installed)
return 0;
diff --git a/arch/mips/kernel/cps-vec.S b/arch/mips/kernel/cps-vec.S
index ac81edd44563..59476a607add 100644
--- a/arch/mips/kernel/cps-vec.S
+++ b/arch/mips/kernel/cps-vec.S
@@ -18,9 +18,12 @@
#include <asm/mipsmtregs.h>
#include <asm/pm.h>
+#define GCR_CPC_BASE_OFS 0x0088
#define GCR_CL_COHERENCE_OFS 0x2008
#define GCR_CL_ID_OFS 0x2028
+#define CPC_CL_VC_RUN_OFS 0x2028
+
.extern mips_cm_base
.set noreorder
@@ -60,6 +63,37 @@
nop
.endm
+ /*
+ * Set dest to non-zero if the core supports MIPSr6 multithreading
+ * (ie. VPs), else zero. If MIPSr6 multithreading is not supported then
+ * branch to nomt.
+ */
+ .macro has_vp dest, nomt
+ mfc0 \dest, CP0_CONFIG, 1
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 2
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 3
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 4
+ bgez \dest, \nomt
+ mfc0 \dest, CP0_CONFIG, 5
+ andi \dest, \dest, MIPS_CONF5_VP
+ beqz \dest, \nomt
+ nop
+ .endm
+
+ /* Calculate an uncached address for the CM GCRs */
+ .macro cmgcrb dest
+ .set push
+ .set noat
+ MFC0 $1, CP0_CMGCRBASE
+ PTR_SLL $1, $1, 4
+ PTR_LI \dest, UNCAC_BASE
+ PTR_ADDU \dest, \dest, $1
+ .set pop
+ .endm
+
.section .text.cps-vec
.balign 0x1000
@@ -90,120 +124,64 @@ not_nmi:
li t0, ST0_CU1 | ST0_CU0 | ST0_BEV | STATUS_BITDEPS
mtc0 t0, CP0_STATUS
- /*
- * Clear the bits used to index the caches. Note that the architecture
- * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
- * be valid for all MIPS32 CPUs, even those for which said writes are
- * unnecessary.
- */
- mtc0 zero, CP0_TAGLO, 0
- mtc0 zero, CP0_TAGHI, 0
- mtc0 zero, CP0_TAGLO, 2
- mtc0 zero, CP0_TAGHI, 2
- ehb
-
- /* Primary cache configuration is indicated by Config1 */
- mfc0 v0, CP0_CONFIG, 1
-
- /* Detect I-cache line size */
- _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
- beqz t0, icache_done
- li t1, 2
- sllv t0, t1, t0
-
- /* Detect I-cache size */
- _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
- xori t2, t1, 0x7
- beqz t2, 1f
- li t3, 32
- addiu t1, t1, 1
- sllv t1, t3, t1
-1: /* At this point t1 == I-cache sets per way */
- _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
- addiu t2, t2, 1
- mul t1, t1, t0
- mul t1, t1, t2
-
- li a0, CKSEG0
- PTR_ADD a1, a0, t1
-1: cache Index_Store_Tag_I, 0(a0)
- PTR_ADD a0, a0, t0
- bne a0, a1, 1b
+ /* Skip cache & coherence setup if we're already coherent */
+ cmgcrb v1
+ lw s7, GCR_CL_COHERENCE_OFS(v1)
+ bnez s7, 1f
nop
-icache_done:
- /* Detect D-cache line size */
- _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
- beqz t0, dcache_done
- li t1, 2
- sllv t0, t1, t0
-
- /* Detect D-cache size */
- _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
- xori t2, t1, 0x7
- beqz t2, 1f
- li t3, 32
- addiu t1, t1, 1
- sllv t1, t3, t1
-1: /* At this point t1 == D-cache sets per way */
- _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
- addiu t2, t2, 1
- mul t1, t1, t0
- mul t1, t1, t2
+ /* Initialize the L1 caches */
+ jal mips_cps_cache_init
+ nop
- li a0, CKSEG0
- PTR_ADDU a1, a0, t1
- PTR_SUBU a1, a1, t0
-1: cache Index_Store_Tag_D, 0(a0)
- bne a0, a1, 1b
- PTR_ADD a0, a0, t0
-dcache_done:
+ /* Enter the coherent domain */
+ li t0, 0xff
+ sw t0, GCR_CL_COHERENCE_OFS(v1)
+ ehb
/* Set Kseg0 CCA to that in s0 */
- mfc0 t0, CP0_CONFIG
+1: mfc0 t0, CP0_CONFIG
ori t0, 0x7
xori t0, 0x7
or t0, t0, s0
mtc0 t0, CP0_CONFIG
ehb
- /* Calculate an uncached address for the CM GCRs */
- MFC0 v1, CP0_CMGCRBASE
- PTR_SLL v1, v1, 4
- PTR_LI t0, UNCAC_BASE
- PTR_ADDU v1, v1, t0
-
- /* Enter the coherent domain */
- li t0, 0xff
- sw t0, GCR_CL_COHERENCE_OFS(v1)
- ehb
-
/* Jump to kseg0 */
PTR_LA t0, 1f
jr t0
nop
/*
- * We're up, cached & coherent. Perform any further required core-level
- * initialisation.
+ * We're up, cached & coherent. Perform any EVA initialization necessary
+ * before we access memory.
*/
-1: jal mips_cps_core_init
+1: eva_init
+
+ /* Retrieve boot configuration pointers */
+ jal mips_cps_get_bootcfg
+ nop
+
+ /* Skip core-level init if we started up coherent */
+ bnez s7, 1f
nop
- /* Do any EVA initialization if necessary */
- eva_init
+ /* Perform any further required core-level initialisation */
+ jal mips_cps_core_init
+ nop
/*
* Boot any other VPEs within this core that should be online, and
* deactivate this VPE if it should be offline.
*/
+ move a1, t9
jal mips_cps_boot_vpes
- nop
+ move a0, v0
/* Off we go! */
- PTR_L t1, VPEBOOTCFG_PC(v0)
- PTR_L gp, VPEBOOTCFG_GP(v0)
- PTR_L sp, VPEBOOTCFG_SP(v0)
+1: PTR_L t1, VPEBOOTCFG_PC(v1)
+ PTR_L gp, VPEBOOTCFG_GP(v1)
+ PTR_L sp, VPEBOOTCFG_SP(v1)
jr t1
nop
END(mips_cps_core_entry)
@@ -245,7 +223,6 @@ LEAF(excep_intex)
.org 0x480
LEAF(excep_ejtag)
- DUMP_EXCEP("EJTAG")
PTR_LA k0, ejtag_debug_handler
jr k0
nop
@@ -323,22 +300,35 @@ LEAF(mips_cps_core_init)
nop
END(mips_cps_core_init)
-LEAF(mips_cps_boot_vpes)
- /* Retrieve CM base address */
- PTR_LA t0, mips_cm_base
- PTR_L t0, 0(t0)
-
+/**
+ * mips_cps_get_bootcfg() - retrieve boot configuration pointers
+ *
+ * Returns: pointer to struct core_boot_config in v0, pointer to
+ * struct vpe_boot_config in v1, VPE ID in t9
+ */
+LEAF(mips_cps_get_bootcfg)
/* Calculate a pointer to this cores struct core_boot_config */
+ cmgcrb t0
lw t0, GCR_CL_ID_OFS(t0)
li t1, COREBOOTCFG_SIZE
mul t0, t0, t1
PTR_LA t1, mips_cps_core_bootcfg
PTR_L t1, 0(t1)
- PTR_ADDU t0, t0, t1
+ PTR_ADDU v0, t0, t1
/* Calculate this VPEs ID. If the core doesn't support MT use 0 */
li t9, 0
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_CPU_MIPSR6)
+ has_vp ta2, 1f
+
+ /*
+ * Assume non-contiguous numbering. Perhaps some day we'll need
+ * to handle contiguous VP numbering, but no such systems yet
+ * exist.
+ */
+ mfc0 t9, $3, 1
+ andi t9, t9, 0xff
+#elif defined(CONFIG_MIPS_MT_SMP)
has_mt ta2, 1f
/* Find the number of VPEs present in the core */
@@ -362,22 +352,43 @@ LEAF(mips_cps_boot_vpes)
1: /* Calculate a pointer to this VPEs struct vpe_boot_config */
li t1, VPEBOOTCFG_SIZE
- mul v0, t9, t1
- PTR_L ta3, COREBOOTCFG_VPECONFIG(t0)
- PTR_ADDU v0, v0, ta3
+ mul v1, t9, t1
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(v0)
+ PTR_ADDU v1, v1, ta3
-#ifdef CONFIG_MIPS_MT_SMP
-
- /* If the core doesn't support MT then return */
- bnez ta2, 1f
- nop
jr ra
nop
+ END(mips_cps_get_bootcfg)
+
+LEAF(mips_cps_boot_vpes)
+ PTR_L ta2, COREBOOTCFG_VPEMASK(a0)
+ PTR_L ta3, COREBOOTCFG_VPECONFIG(a0)
+
+#if defined(CONFIG_CPU_MIPSR6)
+
+ has_vp t0, 5f
+
+ /* Find base address of CPC */
+ cmgcrb t3
+ PTR_L t1, GCR_CPC_BASE_OFS(t3)
+ PTR_LI t2, ~0x7fff
+ and t1, t1, t2
+ PTR_LI t2, UNCAC_BASE
+ PTR_ADD t1, t1, t2
+
+ /* Set VC_RUN to the VPE mask */
+ PTR_S ta2, CPC_CL_VC_RUN_OFS(t1)
+ ehb
+
+#elif defined(CONFIG_MIPS_MT)
.set push
.set mt
-1: /* Enter VPE configuration state */
+ /* If the core doesn't support MT then return */
+ has_mt t0, 5f
+
+ /* Enter VPE configuration state */
dvpe
PTR_LA t1, 1f
jr.hb t1
@@ -388,7 +399,6 @@ LEAF(mips_cps_boot_vpes)
ehb
/* Loop through each VPE */
- PTR_L ta2, COREBOOTCFG_VPEMASK(t0)
move t8, ta2
li ta1, 0
@@ -431,6 +441,21 @@ LEAF(mips_cps_boot_vpes)
mfc0 t0, CP0_CONFIG
mttc0 t0, CP0_CONFIG
+ /*
+ * Copy the EVA config from this VPE if the CPU supports it.
+ * CONFIG3 must exist to be running MT startup - just read it.
+ */
+ mfc0 t0, CP0_CONFIG, 3
+ and t0, t0, MIPS_CONF3_SC
+ beqz t0, 3f
+ nop
+ mfc0 t0, CP0_SEGCTL0
+ mttc0 t0, CP0_SEGCTL0
+ mfc0 t0, CP0_SEGCTL1
+ mttc0 t0, CP0_SEGCTL1
+ mfc0 t0, CP0_SEGCTL2
+ mttc0 t0, CP0_SEGCTL2
+3:
/* Ensure no software interrupts are pending */
mttc0 zero, CP0_CAUSE
mttc0 zero, CP0_STATUS
@@ -465,7 +490,7 @@ LEAF(mips_cps_boot_vpes)
/* Check whether this VPE is meant to be running */
li t0, 1
- sll t0, t0, t9
+ sll t0, t0, a1
and t0, t0, t8
bnez t0, 2f
nop
@@ -482,10 +507,84 @@ LEAF(mips_cps_boot_vpes)
#endif /* CONFIG_MIPS_MT_SMP */
/* Return */
- jr ra
+5: jr ra
nop
END(mips_cps_boot_vpes)
+LEAF(mips_cps_cache_init)
+ /*
+ * Clear the bits used to index the caches. Note that the architecture
+ * dictates that writing to any of TagLo or TagHi selects 0 or 2 should
+ * be valid for all MIPS32 CPUs, even those for which said writes are
+ * unnecessary.
+ */
+ mtc0 zero, CP0_TAGLO, 0
+ mtc0 zero, CP0_TAGHI, 0
+ mtc0 zero, CP0_TAGLO, 2
+ mtc0 zero, CP0_TAGHI, 2
+ ehb
+
+ /* Primary cache configuration is indicated by Config1 */
+ mfc0 v0, CP0_CONFIG, 1
+
+ /* Detect I-cache line size */
+ _EXT t0, v0, MIPS_CONF1_IL_SHF, MIPS_CONF1_IL_SZ
+ beqz t0, icache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect I-cache size */
+ _EXT t1, v0, MIPS_CONF1_IS_SHF, MIPS_CONF1_IS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == I-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_IA_SHF, MIPS_CONF1_IA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADD a1, a0, t1
+1: cache Index_Store_Tag_I, 0(a0)
+ PTR_ADD a0, a0, t0
+ bne a0, a1, 1b
+ nop
+icache_done:
+
+ /* Detect D-cache line size */
+ _EXT t0, v0, MIPS_CONF1_DL_SHF, MIPS_CONF1_DL_SZ
+ beqz t0, dcache_done
+ li t1, 2
+ sllv t0, t1, t0
+
+ /* Detect D-cache size */
+ _EXT t1, v0, MIPS_CONF1_DS_SHF, MIPS_CONF1_DS_SZ
+ xori t2, t1, 0x7
+ beqz t2, 1f
+ li t3, 32
+ addiu t1, t1, 1
+ sllv t1, t3, t1
+1: /* At this point t1 == D-cache sets per way */
+ _EXT t2, v0, MIPS_CONF1_DA_SHF, MIPS_CONF1_DA_SZ
+ addiu t2, t2, 1
+ mul t1, t1, t0
+ mul t1, t1, t2
+
+ li a0, CKSEG0
+ PTR_ADDU a1, a0, t1
+ PTR_SUBU a1, a1, t0
+1: cache Index_Store_Tag_D, 0(a0)
+ bne a0, a1, 1b
+ PTR_ADD a0, a0, t0
+dcache_done:
+
+ jr ra
+ nop
+ END(mips_cps_cache_init)
+
#if defined(CONFIG_MIPS_CPS_PM) && defined(CONFIG_CPU_PM)
/* Calculate a pointer to this CPUs struct mips_static_suspend_state */
diff --git a/arch/mips/kernel/cpu-bugs64.c b/arch/mips/kernel/cpu-bugs64.c
index 6392dbe504fb..a378e44688f5 100644
--- a/arch/mips/kernel/cpu-bugs64.c
+++ b/arch/mips/kernel/cpu-bugs64.c
@@ -244,7 +244,7 @@ static inline void check_daddi(void)
panic(bug64hit, !DADDI_WAR ? daddiwar : nowar);
}
-int daddiu_bug = config_enabled(CONFIG_CPU_MIPSR6) ? 0 : -1;
+int daddiu_bug = IS_ENABLED(CONFIG_CPU_MIPSR6) ? 0 : -1;
static inline void check_daddiu(void)
{
@@ -314,7 +314,7 @@ static inline void check_daddiu(void)
void __init check_bugs64_early(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6)) {
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6)) {
check_mult_sh();
check_daddiu();
}
@@ -322,6 +322,6 @@ void __init check_bugs64_early(void)
void __init check_bugs64(void)
{
- if (!config_enabled(CONFIG_CPU_MIPSR6))
+ if (!IS_ENABLED(CONFIG_CPU_MIPSR6))
check_daddi();
}
diff --git a/arch/mips/kernel/cpu-probe.c b/arch/mips/kernel/cpu-probe.c
index b725b713b9f8..a88d44247cc8 100644
--- a/arch/mips/kernel/cpu-probe.c
+++ b/arch/mips/kernel/cpu-probe.c
@@ -539,6 +539,7 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
switch (c->cputype) {
case CPU_PROAPTIV:
case CPU_P5600:
+ case CPU_P6600:
/* proAptiv & related cores use Config6 to enable the FTLB */
config = read_c0_config6();
/* Clear the old probability value */
@@ -561,6 +562,19 @@ static int set_ftlb_enable(struct cpuinfo_mips *c, int enable)
write_c0_config7(config | (calculate_ftlb_probability(c)
<< MIPS_CONF7_FTLBP_SHIFT));
break;
+ case CPU_LOONGSON3:
+ /* Flush ITLB, DTLB, VTLB and FTLB */
+ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB |
+ LOONGSON_DIAG_VTLB | LOONGSON_DIAG_FTLB);
+ /* Loongson-3 cores use Config6 to enable the FTLB */
+ config = read_c0_config6();
+ if (enable)
+ /* Enable FTLB */
+ write_c0_config6(config & ~MIPS_CONF6_FTLBDIS);
+ else
+ /* Disable FTLB */
+ write_c0_config6(config | MIPS_CONF6_FTLBDIS);
+ break;
default:
return 1;
}
@@ -634,6 +648,8 @@ static inline unsigned int decode_config1(struct cpuinfo_mips *c)
if (config1 & MIPS_CONF1_MD)
c->ases |= MIPS_ASE_MDMX;
+ if (config1 & MIPS_CONF1_PC)
+ c->options |= MIPS_CPU_PERF;
if (config1 & MIPS_CONF1_WR)
c->options |= MIPS_CPU_WATCH;
if (config1 & MIPS_CONF1_CA)
@@ -673,18 +689,25 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
if (config3 & MIPS_CONF3_SM) {
c->ases |= MIPS_ASE_SMARTMIPS;
- c->options |= MIPS_CPU_RIXI;
+ c->options |= MIPS_CPU_RIXI | MIPS_CPU_CTXTC;
}
if (config3 & MIPS_CONF3_RXI)
c->options |= MIPS_CPU_RIXI;
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->options |= MIPS_CPU_CTXTC;
if (config3 & MIPS_CONF3_DSP)
c->ases |= MIPS_ASE_DSP;
- if (config3 & MIPS_CONF3_DSP2P)
+ if (config3 & MIPS_CONF3_DSP2P) {
c->ases |= MIPS_ASE_DSP2P;
+ if (cpu_has_mips_r6)
+ c->ases |= MIPS_ASE_DSP3;
+ }
if (config3 & MIPS_CONF3_VINT)
c->options |= MIPS_CPU_VINT;
if (config3 & MIPS_CONF3_VEIC)
c->options |= MIPS_CPU_VEIC;
+ if (config3 & MIPS_CONF3_LPA)
+ c->options |= MIPS_CPU_LPA;
if (config3 & MIPS_CONF3_MT)
c->ases |= MIPS_ASE_MIPSMT;
if (config3 & MIPS_CONF3_ULRI)
@@ -695,6 +718,10 @@ static inline unsigned int decode_config3(struct cpuinfo_mips *c)
c->ases |= MIPS_ASE_VZ;
if (config3 & MIPS_CONF3_SC)
c->options |= MIPS_CPU_SEGMENTS;
+ if (config3 & MIPS_CONF3_BI)
+ c->options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->options |= MIPS_CPU_BADINSTRP;
if (config3 & MIPS_CONF3_MSA)
c->ases |= MIPS_ASE_MSA;
if (config3 & MIPS_CONF3_PW) {
@@ -715,6 +742,7 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
unsigned int newcf4;
unsigned int mmuextdef;
unsigned int ftlb_page = MIPS_CONF4_FTLBPAGESIZE;
+ unsigned long asid_mask;
config4 = read_c0_config4();
@@ -773,7 +801,20 @@ static inline unsigned int decode_config4(struct cpuinfo_mips *c)
}
}
- c->kscratch_mask = (config4 >> 16) & 0xff;
+ c->kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ asid_mask = MIPS_ENTRYHI_ASID;
+ if (config4 & MIPS_CONF4_AE)
+ asid_mask |= MIPS_ENTRYHI_ASIDX;
+ set_cpu_asid_mask(c, asid_mask);
+
+ /*
+ * Warn if the computed ASID mask doesn't match the mask the kernel
+ * is built for. This may indicate either a serious problem or an
+ * easy optimisation opportunity, but either way should be addressed.
+ */
+ WARN_ON(asid_mask != cpu_asid_mask(c));
return config4 & MIPS_CONF_M;
}
@@ -792,10 +833,10 @@ static inline unsigned int decode_config5(struct cpuinfo_mips *c)
c->options |= MIPS_CPU_MAAR;
if (config5 & MIPS_CONF5_LLB)
c->options |= MIPS_CPU_RW_LLB;
-#ifdef CONFIG_XPA
if (config5 & MIPS_CONF5_MVH)
- c->options |= MIPS_CPU_XPA;
-#endif
+ c->options |= MIPS_CPU_MVH;
+ if (cpu_has_mips_r6 && (config5 & MIPS_CONF5_VP))
+ c->options |= MIPS_CPU_VP;
return config5 & MIPS_CONF_M;
}
@@ -826,17 +867,43 @@ static void decode_configs(struct cpuinfo_mips *c)
if (ok)
ok = decode_config5(c);
- mips_probe_watch_registers(c);
-
- if (cpu_has_rixi) {
- /* Enable the RIXI exceptions */
- set_c0_pagegrain(PG_IEC);
- back_to_back_c0_hazard();
- /* Verify the IEC bit is set */
- if (read_c0_pagegrain() & PG_IEC)
- c->options |= MIPS_CPU_RIXIEX;
+ /* Probe the EBase.WG bit */
+ if (cpu_has_mips_r2_r6) {
+ u64 ebase;
+ unsigned int status;
+
+ /* {read,write}_c0_ebase_64() may be UNDEFINED prior to r6 */
+ ebase = cpu_has_mips64r6 ? read_c0_ebase_64()
+ : (s32)read_c0_ebase();
+ if (ebase & MIPS_EBASE_WG) {
+ /* WG bit already set, we can avoid the clumsy probe */
+ c->options |= MIPS_CPU_EBASE_WG;
+ } else {
+ /* Its UNDEFINED to change EBase while BEV=0 */
+ status = read_c0_status();
+ write_c0_status(status | ST0_BEV);
+ irq_enable_hazard();
+ /*
+ * On pre-r6 cores, this may well clobber the upper bits
+ * of EBase. This is hard to avoid without potentially
+ * hitting UNDEFINED dm*c0 behaviour if EBase is 32-bit.
+ */
+ if (cpu_has_mips64r6)
+ write_c0_ebase_64(ebase | MIPS_EBASE_WG);
+ else
+ write_c0_ebase(ebase | MIPS_EBASE_WG);
+ back_to_back_c0_hazard();
+ /* Restore BEV */
+ write_c0_status(status);
+ if (read_c0_ebase() & MIPS_EBASE_WG) {
+ c->options |= MIPS_CPU_EBASE_WG;
+ write_c0_ebase(ebase);
+ }
+ }
}
+ mips_probe_watch_registers(c);
+
#ifndef CONFIG_MIPS_CPS
if (cpu_has_mips_r2_r6) {
c->core = get_ebase_cpunum();
@@ -846,6 +913,235 @@ static void decode_configs(struct cpuinfo_mips *c)
#endif
}
+/*
+ * Probe for certain guest capabilities by writing config bits and reading back.
+ * Finally write back the original value.
+ */
+#define probe_gc0_config(name, maxconf, bits) \
+do { \
+ unsigned int tmp; \
+ tmp = read_gc0_##name(); \
+ write_gc0_##name(tmp | (bits)); \
+ back_to_back_c0_hazard(); \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(tmp); \
+} while (0)
+
+/*
+ * Probe for dynamic guest capabilities by changing certain config bits and
+ * reading back to see if they change. Finally write back the original value.
+ */
+#define probe_gc0_config_dyn(name, maxconf, dynconf, bits) \
+do { \
+ maxconf = read_gc0_##name(); \
+ write_gc0_##name(maxconf ^ (bits)); \
+ back_to_back_c0_hazard(); \
+ dynconf = maxconf ^ read_gc0_##name(); \
+ write_gc0_##name(maxconf); \
+ maxconf |= dynconf; \
+} while (0)
+
+static inline unsigned int decode_guest_config0(struct cpuinfo_mips *c)
+{
+ unsigned int config0;
+
+ probe_gc0_config(config, config0, MIPS_CONF_M);
+
+ if (config0 & MIPS_CONF_M)
+ c->guest.conf |= BIT(1);
+ return config0 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config1(struct cpuinfo_mips *c)
+{
+ unsigned int config1, config1_dyn;
+
+ probe_gc0_config_dyn(config1, config1, config1_dyn,
+ MIPS_CONF_M | MIPS_CONF1_PC | MIPS_CONF1_WR |
+ MIPS_CONF1_FP);
+
+ if (config1 & MIPS_CONF1_FP)
+ c->guest.options |= MIPS_CPU_FPU;
+ if (config1_dyn & MIPS_CONF1_FP)
+ c->guest.options_dyn |= MIPS_CPU_FPU;
+
+ if (config1 & MIPS_CONF1_WR)
+ c->guest.options |= MIPS_CPU_WATCH;
+ if (config1_dyn & MIPS_CONF1_WR)
+ c->guest.options_dyn |= MIPS_CPU_WATCH;
+
+ if (config1 & MIPS_CONF1_PC)
+ c->guest.options |= MIPS_CPU_PERF;
+ if (config1_dyn & MIPS_CONF1_PC)
+ c->guest.options_dyn |= MIPS_CPU_PERF;
+
+ if (config1 & MIPS_CONF_M)
+ c->guest.conf |= BIT(2);
+ return config1 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config2(struct cpuinfo_mips *c)
+{
+ unsigned int config2;
+
+ probe_gc0_config(config2, config2, MIPS_CONF_M);
+
+ if (config2 & MIPS_CONF_M)
+ c->guest.conf |= BIT(3);
+ return config2 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config3(struct cpuinfo_mips *c)
+{
+ unsigned int config3, config3_dyn;
+
+ probe_gc0_config_dyn(config3, config3, config3_dyn,
+ MIPS_CONF_M | MIPS_CONF3_MSA | MIPS_CONF3_CTXTC);
+
+ if (config3 & MIPS_CONF3_CTXTC)
+ c->guest.options |= MIPS_CPU_CTXTC;
+ if (config3_dyn & MIPS_CONF3_CTXTC)
+ c->guest.options_dyn |= MIPS_CPU_CTXTC;
+
+ if (config3 & MIPS_CONF3_PW)
+ c->guest.options |= MIPS_CPU_HTW;
+
+ if (config3 & MIPS_CONF3_SC)
+ c->guest.options |= MIPS_CPU_SEGMENTS;
+
+ if (config3 & MIPS_CONF3_BI)
+ c->guest.options |= MIPS_CPU_BADINSTR;
+ if (config3 & MIPS_CONF3_BP)
+ c->guest.options |= MIPS_CPU_BADINSTRP;
+
+ if (config3 & MIPS_CONF3_MSA)
+ c->guest.ases |= MIPS_ASE_MSA;
+ if (config3_dyn & MIPS_CONF3_MSA)
+ c->guest.ases_dyn |= MIPS_ASE_MSA;
+
+ if (config3 & MIPS_CONF_M)
+ c->guest.conf |= BIT(4);
+ return config3 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config4(struct cpuinfo_mips *c)
+{
+ unsigned int config4;
+
+ probe_gc0_config(config4, config4,
+ MIPS_CONF_M | MIPS_CONF4_KSCREXIST);
+
+ c->guest.kscratch_mask = (config4 & MIPS_CONF4_KSCREXIST)
+ >> MIPS_CONF4_KSCREXIST_SHIFT;
+
+ if (config4 & MIPS_CONF_M)
+ c->guest.conf |= BIT(5);
+ return config4 & MIPS_CONF_M;
+}
+
+static inline unsigned int decode_guest_config5(struct cpuinfo_mips *c)
+{
+ unsigned int config5, config5_dyn;
+
+ probe_gc0_config_dyn(config5, config5, config5_dyn,
+ MIPS_CONF_M | MIPS_CONF5_MRP);
+
+ if (config5 & MIPS_CONF5_MRP)
+ c->guest.options |= MIPS_CPU_MAAR;
+ if (config5_dyn & MIPS_CONF5_MRP)
+ c->guest.options_dyn |= MIPS_CPU_MAAR;
+
+ if (config5 & MIPS_CONF5_LLB)
+ c->guest.options |= MIPS_CPU_RW_LLB;
+
+ if (config5 & MIPS_CONF_M)
+ c->guest.conf |= BIT(6);
+ return config5 & MIPS_CONF_M;
+}
+
+static inline void decode_guest_configs(struct cpuinfo_mips *c)
+{
+ unsigned int ok;
+
+ ok = decode_guest_config0(c);
+ if (ok)
+ ok = decode_guest_config1(c);
+ if (ok)
+ ok = decode_guest_config2(c);
+ if (ok)
+ ok = decode_guest_config3(c);
+ if (ok)
+ ok = decode_guest_config4(c);
+ if (ok)
+ decode_guest_config5(c);
+}
+
+static inline void cpu_probe_guestctl0(struct cpuinfo_mips *c)
+{
+ unsigned int guestctl0, temp;
+
+ guestctl0 = read_c0_guestctl0();
+
+ if (guestctl0 & MIPS_GCTL0_G0E)
+ c->options |= MIPS_CPU_GUESTCTL0EXT;
+ if (guestctl0 & MIPS_GCTL0_G1)
+ c->options |= MIPS_CPU_GUESTCTL1;
+ if (guestctl0 & MIPS_GCTL0_G2)
+ c->options |= MIPS_CPU_GUESTCTL2;
+ if (!(guestctl0 & MIPS_GCTL0_RAD)) {
+ c->options |= MIPS_CPU_GUESTID;
+
+ /*
+ * Probe for Direct Root to Guest (DRG). Set GuestCtl1.RID = 0
+ * first, otherwise all data accesses will be fully virtualised
+ * as if they were performed by guest mode.
+ */
+ write_c0_guestctl1(0);
+ tlbw_use_hazard();
+
+ write_c0_guestctl0(guestctl0 | MIPS_GCTL0_DRG);
+ back_to_back_c0_hazard();
+ temp = read_c0_guestctl0();
+
+ if (temp & MIPS_GCTL0_DRG) {
+ write_c0_guestctl0(guestctl0);
+ c->options |= MIPS_CPU_DRG;
+ }
+ }
+}
+
+static inline void cpu_probe_guestctl1(struct cpuinfo_mips *c)
+{
+ if (cpu_has_guestid) {
+ /* determine the number of bits of GuestID available */
+ write_c0_guestctl1(MIPS_GCTL1_ID);
+ back_to_back_c0_hazard();
+ c->guestid_mask = (read_c0_guestctl1() & MIPS_GCTL1_ID)
+ >> MIPS_GCTL1_ID_SHIFT;
+ write_c0_guestctl1(0);
+ }
+}
+
+static inline void cpu_probe_gtoffset(struct cpuinfo_mips *c)
+{
+ /* determine the number of bits of GTOffset available */
+ write_c0_gtoffset(0xffffffff);
+ back_to_back_c0_hazard();
+ c->gtoffset_mask = read_c0_gtoffset();
+ write_c0_gtoffset(0);
+}
+
+static inline void cpu_probe_vz(struct cpuinfo_mips *c)
+{
+ cpu_probe_guestctl0(c);
+ if (cpu_has_guestctl1)
+ cpu_probe_guestctl1(c);
+
+ cpu_probe_gtoffset(c);
+
+ decode_guest_configs(c);
+}
+
#define R4K_OPTS (MIPS_CPU_TLB | MIPS_CPU_4KEX | MIPS_CPU_4K_CACHE \
| MIPS_CPU_COUNTER)
@@ -1172,7 +1468,7 @@ static inline void cpu_probe_legacy(struct cpuinfo_mips *c, unsigned int cpu)
set_isa(c, MIPS_CPU_ISA_III);
c->fpu_msk31 |= FPU_CSR_CONDX;
break;
- case PRID_REV_LOONGSON3A:
+ case PRID_REV_LOONGSON3A_R1:
c->cputype = CPU_LOONGSON3;
__cpu_name[cpu] = "ICT Loongson-3";
set_elf_platform(cpu, "loongson3a");
@@ -1314,6 +1610,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_P5600;
__cpu_name[cpu] = "MIPS P5600";
break;
+ case PRID_IMP_P6600:
+ c->cputype = CPU_P6600;
+ __cpu_name[cpu] = "MIPS P6600";
+ break;
case PRID_IMP_I6400:
c->cputype = CPU_I6400;
__cpu_name[cpu] = "MIPS I6400";
@@ -1322,6 +1622,10 @@ static inline void cpu_probe_mips(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_M5150;
__cpu_name[cpu] = "MIPS M5150";
break;
+ case PRID_IMP_M6250:
+ c->cputype = CPU_M6250;
+ __cpu_name[cpu] = "MIPS M6250";
+ break;
}
decode_configs(c);
@@ -1435,6 +1739,7 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
c->cputype = CPU_BMIPS4380;
__cpu_name[cpu] = "Broadcom BMIPS4380";
set_elf_platform(cpu, "bmips4380");
+ c->options |= MIPS_CPU_RIXI;
} else {
c->cputype = CPU_BMIPS4350;
__cpu_name[cpu] = "Broadcom BMIPS4350";
@@ -1445,9 +1750,12 @@ static inline void cpu_probe_broadcom(struct cpuinfo_mips *c, unsigned int cpu)
case PRID_IMP_BMIPS5000:
case PRID_IMP_BMIPS5200:
c->cputype = CPU_BMIPS5000;
- __cpu_name[cpu] = "Broadcom BMIPS5000";
+ if ((c->processor_id & PRID_IMP_MASK) == PRID_IMP_BMIPS5200)
+ __cpu_name[cpu] = "Broadcom BMIPS5200";
+ else
+ __cpu_name[cpu] = "Broadcom BMIPS5000";
set_elf_platform(cpu, "bmips5000");
- c->options |= MIPS_CPU_ULRI;
+ c->options |= MIPS_CPU_ULRI | MIPS_CPU_RIXI;
break;
}
}
@@ -1481,6 +1789,8 @@ platform:
set_elf_platform(cpu, "octeon2");
break;
case PRID_IMP_CAVIUM_CN70XX:
+ case PRID_IMP_CAVIUM_CN73XX:
+ case PRID_IMP_CAVIUM_CNF75XX:
case PRID_IMP_CAVIUM_CN78XX:
c->cputype = CPU_CAVIUM_OCTEON3;
__cpu_name[cpu] = "Cavium Octeon III";
@@ -1493,6 +1803,29 @@ platform:
}
}
+static inline void cpu_probe_loongson(struct cpuinfo_mips *c, unsigned int cpu)
+{
+ switch (c->processor_id & PRID_IMP_MASK) {
+ case PRID_IMP_LOONGSON_64: /* Loongson-2/3 */
+ switch (c->processor_id & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON3A_R2:
+ c->cputype = CPU_LOONGSON3;
+ __cpu_name[cpu] = "ICT Loongson-3";
+ set_elf_platform(cpu, "loongson3a");
+ set_isa(c, MIPS_CPU_ISA_M64R2);
+ break;
+ }
+
+ decode_configs(c);
+ c->options |= MIPS_CPU_TLBINV | MIPS_CPU_LDPTE;
+ c->writecombine = _CACHE_UNCACHED_ACCELERATED;
+ break;
+ default:
+ panic("Unknown Loongson Processor ID!");
+ break;
+ }
+}
+
static inline void cpu_probe_ingenic(struct cpuinfo_mips *c, unsigned int cpu)
{
decode_configs(c);
@@ -1640,6 +1973,9 @@ void cpu_probe(void)
case PRID_COMP_CAVIUM:
cpu_probe_cavium(c, cpu);
break;
+ case PRID_COMP_LOONGSON:
+ cpu_probe_loongson(c, cpu);
+ break;
case PRID_COMP_INGENIC_D0:
case PRID_COMP_INGENIC_D1:
case PRID_COMP_INGENIC_E1:
@@ -1660,6 +1996,15 @@ void cpu_probe(void)
*/
BUG_ON(current_cpu_type() != c->cputype);
+ if (cpu_has_rixi) {
+ /* Enable the RIXI exceptions */
+ set_c0_pagegrain(PG_IEC);
+ back_to_back_c0_hazard();
+ /* Verify the IEC bit is set */
+ if (read_c0_pagegrain() & PG_IEC)
+ c->options |= MIPS_CPU_RIXIEX;
+ }
+
if (mips_fpu_disabled)
c->options &= ~MIPS_CPU_FPU;
@@ -1699,6 +2044,9 @@ void cpu_probe(void)
elf_hwcap |= HWCAP_MIPS_MSA;
}
+ if (cpu_has_vz)
+ cpu_probe_vz(c);
+
cpu_probe_vmbits(c);
#ifdef CONFIG_64BIT
diff --git a/arch/mips/kernel/crash.c b/arch/mips/kernel/crash.c
index d434d5d5ae6e..610f0f3bdb34 100644
--- a/arch/mips/kernel/crash.c
+++ b/arch/mips/kernel/crash.c
@@ -14,12 +14,22 @@ static int crashing_cpu = -1;
static cpumask_t cpus_in_crash = CPU_MASK_NONE;
#ifdef CONFIG_SMP
-static void crash_shutdown_secondary(void *ignore)
+static void crash_shutdown_secondary(void *passed_regs)
{
- struct pt_regs *regs;
+ struct pt_regs *regs = passed_regs;
int cpu = smp_processor_id();
- regs = task_pt_regs(current);
+ /*
+ * If we are passed registers, use those. Otherwise get the
+ * regs from the last interrupt, which should be correct, as
+ * we are in an interrupt. But if the regs are not there,
+ * pull them from the top of the stack. They are probably
+ * wrong, but we need something to keep from crashing again.
+ */
+ if (!regs)
+ regs = get_irq_regs();
+ if (!regs)
+ regs = task_pt_regs(current);
if (!cpu_online(cpu))
return;
diff --git a/arch/mips/kernel/csrc-r4k.c b/arch/mips/kernel/csrc-r4k.c
index 1f910563fdf6..d76275da54cb 100644
--- a/arch/mips/kernel/csrc-r4k.c
+++ b/arch/mips/kernel/csrc-r4k.c
@@ -23,7 +23,7 @@ static struct clocksource clocksource_mips = {
.flags = CLOCK_SOURCE_IS_CONTINUOUS,
};
-static u64 notrace r4k_read_sched_clock(void)
+static u64 __maybe_unused notrace r4k_read_sched_clock(void)
{
return read_c0_count();
}
@@ -82,7 +82,9 @@ int __init init_r4k_clocksource(void)
clocksource_register_hz(&clocksource_mips, mips_hpt_frequency);
+#ifndef CONFIG_CPU_FREQ
sched_clock_register(r4k_read_sched_clock, 32, mips_hpt_frequency);
+#endif
return 0;
}
diff --git a/arch/mips/kernel/elf.c b/arch/mips/kernel/elf.c
index c3c234dc0c07..6430bff21fff 100644
--- a/arch/mips/kernel/elf.c
+++ b/arch/mips/kernel/elf.c
@@ -8,9 +8,12 @@
* option) any later version.
*/
+#include <linux/binfmts.h>
#include <linux/elf.h>
+#include <linux/export.h>
#include <linux/sched.h>
+#include <asm/cpu-features.h>
#include <asm/cpu-info.h>
/* Whether to accept legacy-NaN and 2008-NaN user binaries. */
@@ -88,7 +91,7 @@ int arch_elf_pt_proc(void *_ehdr, void *_phdr, struct file *elf,
elf32 = ehdr->e32.e_ident[EI_CLASS] == ELFCLASS32;
flags = elf32 ? ehdr->e32.e_flags : ehdr->e64.e_flags;
- /* Lets see if this is an O32 ELF */
+ /* Let's see if this is an O32 ELF */
if (elf32) {
if (flags & EF_MIPS_FP64) {
/*
@@ -179,7 +182,7 @@ int arch_check_elf(void *_ehdr, bool has_interpreter, void *_interp_ehdr,
return -ELIBBAD;
}
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
fp_abi = state->fp_abi;
@@ -285,7 +288,7 @@ void mips_set_personality_fp(struct arch_elf_state *state)
* not be worried about N32/N64 binaries.
*/
- if (!config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ if (!IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return;
switch (state->overall_fp_mode) {
@@ -326,3 +329,19 @@ void mips_set_personality_nan(struct arch_elf_state *state)
BUG();
}
}
+
+int mips_elf_read_implies_exec(void *elf_ex, int exstack)
+{
+ if (exstack != EXSTACK_DISABLE_X) {
+ /* The binary doesn't request a non-executable stack */
+ return 1;
+ }
+
+ if (!cpu_has_rixi) {
+ /* The CPU doesn't support non-executable memory */
+ return 1;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL(mips_elf_read_implies_exec);
diff --git a/arch/mips/kernel/genex.S b/arch/mips/kernel/genex.S
index baa7b6fc0a60..17326a90d53c 100644
--- a/arch/mips/kernel/genex.S
+++ b/arch/mips/kernel/genex.S
@@ -130,7 +130,7 @@ LEAF(__r4k_wait)
/* end of rollback region (the region size must be power of two) */
1:
jr ra
- nop
+ nop
.set pop
END(__r4k_wait)
@@ -172,7 +172,7 @@ NESTED(handle_int, PT_SIZE, sp)
mfc0 k0, CP0_EPC
.set noreorder
j k0
- rfe
+ rfe
#else
and k0, ST0_IE
bnez k0, 1f
@@ -189,7 +189,7 @@ NESTED(handle_int, PT_SIZE, sp)
LONG_L s0, TI_REGS($28)
LONG_S sp, TI_REGS($28)
PTR_LA ra, ret_from_irq
- PTR_LA v0, plat_irq_dispatch
+ PTR_LA v0, plat_irq_dispatch
jr v0
#ifdef CONFIG_CPU_MICROMIPS
nop
@@ -292,7 +292,7 @@ ejtag_return:
MFC0 k0, CP0_DESAVE
.set mips32
deret
- .set pop
+ .set pop
END(ejtag_debug_handler)
/*
@@ -329,10 +329,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
* Clear BEV - required for page fault exception handler to work
*/
mfc0 k0, CP0_STATUS
- ori k0, k0, ST0_EXL
+ ori k0, k0, ST0_EXL
li k1, ~(ST0_BEV | ST0_ERL)
- and k0, k0, k1
- mtc0 k0, CP0_STATUS
+ and k0, k0, k1
+ mtc0 k0, CP0_STATUS
_ehb
SAVE_ALL
move a0, sp
@@ -396,7 +396,7 @@ NESTED(nmi_handler, PT_SIZE, sp)
.macro __BUILD_count exception
LONG_L t0,exception_count_\exception
- LONG_ADDIU t0, 1
+ LONG_ADDIU t0, 1
LONG_S t0,exception_count_\exception
.comm exception_count\exception, 8, 8
.endm
@@ -455,10 +455,10 @@ NESTED(nmi_handler, PT_SIZE, sp)
.set noreorder
/* check if TLB contains a entry for EPC */
MFC0 k1, CP0_ENTRYHI
- andi k1, 0xff /* ASID_MASK */
+ andi k1, MIPS_ENTRYHI_ASID | MIPS_ENTRYHI_ASIDX
MFC0 k0, CP0_EPC
- PTR_SRL k0, _PAGE_SHIFT + 1
- PTR_SLL k0, _PAGE_SHIFT + 1
+ PTR_SRL k0, _PAGE_SHIFT + 1
+ PTR_SLL k0, _PAGE_SHIFT + 1
or k1, k0
MTC0 k1, CP0_ENTRYHI
mtc0_tlbw_hazard
@@ -478,27 +478,27 @@ NESTED(nmi_handler, PT_SIZE, sp)
/* microMIPS: 0x007d6b3c: rdhwr v1,$29 */
MFC0 k1, CP0_EPC
#if defined(CONFIG_CPU_MICROMIPS) || defined(CONFIG_CPU_MIPS32_R2) || defined(CONFIG_CPU_MIPS64_R2)
- and k0, k1, 1
- beqz k0, 1f
- xor k1, k0
- lhu k0, (k1)
- lhu k1, 2(k1)
- ins k1, k0, 16, 16
- lui k0, 0x007d
- b docheck
- ori k0, 0x6b3c
+ and k0, k1, 1
+ beqz k0, 1f
+ xor k1, k0
+ lhu k0, (k1)
+ lhu k1, 2(k1)
+ ins k1, k0, 16, 16
+ lui k0, 0x007d
+ b docheck
+ ori k0, 0x6b3c
1:
- lui k0, 0x7c03
- lw k1, (k1)
- ori k0, 0xe83b
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
#else
- andi k0, k1, 1
- bnez k0, handle_ri
- lui k0, 0x7c03
- lw k1, (k1)
- ori k0, 0xe83b
+ andi k0, k1, 1
+ bnez k0, handle_ri
+ lui k0, 0x7c03
+ lw k1, (k1)
+ ori k0, 0xe83b
#endif
- .set reorder
+ .set reorder
docheck:
bne k0, k1, handle_ri /* if not ours */
diff --git a/arch/mips/kernel/head.S b/arch/mips/kernel/head.S
index 4e4cc5b9a771..cf052204eb0a 100644
--- a/arch/mips/kernel/head.S
+++ b/arch/mips/kernel/head.S
@@ -21,7 +21,6 @@
#include <asm/asmmacro.h>
#include <asm/irqflags.h>
#include <asm/regdef.h>
-#include <asm/pgtable-bits.h>
#include <asm/mipsregs.h>
#include <asm/stackframe.h>
@@ -94,21 +93,24 @@ NESTED(kernel_entry, 16, sp) # kernel entry point
jr t0
0:
+#ifdef CONFIG_USE_OF
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
- PTR_LA t0, __appended_dtb
+ PTR_LA t2, __appended_dtb
#ifdef CONFIG_CPU_BIG_ENDIAN
li t1, 0xd00dfeed
#else
li t1, 0xedfe0dd0
#endif
- lw t2, (t0)
- bne t1, t2, not_found
- nop
+ lw t0, (t2)
+ beq t0, t1, dtb_found
+#endif
+ li t1, -2
+ beq a0, t1, dtb_found
+ move t2, a1
- move a1, t0
- PTR_LI a0, -2
-not_found:
+ li t2, 0
+dtb_found:
#endif
PTR_LA t0, __bss_start # clear .bss
LONG_S zero, (t0)
@@ -123,6 +125,10 @@ not_found:
LONG_S a2, fw_arg2
LONG_S a3, fw_arg3
+#ifdef CONFIG_USE_OF
+ LONG_S t2, fw_passed_dtb
+#endif
+
MTC0 zero, CP0_CONTEXT # clear context register
PTR_LA $28, init_thread_union
/* Set the SP after an empty pt_regs. */
@@ -132,7 +138,27 @@ not_found:
set_saved_sp sp, t0, t1
PTR_SUBU sp, 4 * SZREG # init stack pointer
+#ifdef CONFIG_RELOCATABLE
+ /* Copy kernel and apply the relocations */
+ jal relocate_kernel
+
+ /* Repoint the sp into the new kernel image */
+ PTR_LI sp, _THREAD_SIZE - 32 - PT_SIZE
+ PTR_ADDU sp, $28
+ set_saved_sp sp, t0, t1
+ PTR_SUBU sp, 4 * SZREG # init stack pointer
+
+ /*
+ * relocate_kernel returns the entry point either
+ * in the relocated kernel or the original if for
+ * some reason relocation failed - jump there now
+ * with instruction hazard barrier because of the
+ * newly sync'd icache.
+ */
+ jr.hb v0
+#else
j start_kernel
+#endif
END(kernel_entry)
#ifdef CONFIG_SMP
diff --git a/arch/mips/kernel/idle.c b/arch/mips/kernel/idle.c
index 46794d64c0bf..60ab4c44d305 100644
--- a/arch/mips/kernel/idle.c
+++ b/arch/mips/kernel/idle.c
@@ -181,6 +181,11 @@ void __init check_wait(void)
case CPU_XLP:
cpu_wait = r4k_wait;
break;
+ case CPU_LOONGSON3:
+ if ((c->processor_id & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+ cpu_wait = r4k_wait;
+ break;
+
case CPU_BMIPS5000:
cpu_wait = r4k_wait_irqoff;
break;
diff --git a/arch/mips/kernel/irq.c b/arch/mips/kernel/irq.c
index 8eb5af805964..f25f7eab7307 100644
--- a/arch/mips/kernel/irq.c
+++ b/arch/mips/kernel/irq.c
@@ -54,6 +54,9 @@ void __init init_IRQ(void)
for (i = 0; i < NR_IRQS; i++)
irq_set_noprobe(i);
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+
arch_init_irq();
}
diff --git a/arch/mips/kernel/mips-cm.c b/arch/mips/kernel/mips-cm.c
index 1448c1f43d4e..659e6d3ae335 100644
--- a/arch/mips/kernel/mips-cm.c
+++ b/arch/mips/kernel/mips-cm.c
@@ -24,7 +24,7 @@ static char *cm2_tr[8] = {
"0x04", "cpc", "0x06", "0x07"
};
-/* CM3 Tag ECC transation type */
+/* CM3 Tag ECC transaction type */
static char *cm3_tr[16] = {
[0x0] = "ReqNoData",
[0x1] = "0x1",
@@ -251,7 +251,7 @@ int mips_cm_probe(void)
mips_cm_probe_l2sync();
/* determine register width for this CM */
- mips_cm_is64 = config_enabled(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
+ mips_cm_is64 = IS_ENABLED(CONFIG_64BIT) && (mips_cm_revision() >= CM_REV_CM3);
for_each_possible_cpu(cpu)
spin_lock_init(&per_cpu(cm_core_lock, cpu));
diff --git a/arch/mips/kernel/mips-r2-to-r6-emul.c b/arch/mips/kernel/mips-r2-to-r6-emul.c
index 1f5aac7f9ec3..c3372cac6db2 100644
--- a/arch/mips/kernel/mips-r2-to-r6-emul.c
+++ b/arch/mips/kernel/mips-r2-to-r6-emul.c
@@ -28,6 +28,7 @@
#include <asm/inst.h>
#include <asm/mips-r2-to-r6-emul.h>
#include <asm/local.h>
+#include <asm/mipsregs.h>
#include <asm/ptrace.h>
#include <asm/uaccess.h>
@@ -83,7 +84,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(s32)MIPSInst_SIMM(ir);
return 0;
case daddiu_op:
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
break;
if (MIPSInst_RT(ir))
@@ -142,7 +143,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u32)regs->regs[MIPSInst_RT(ir)]);
return 0;
case dsll_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -151,7 +152,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case dsrl_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_RS(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_RS(ir))
break;
if (MIPSInst_RD(ir))
@@ -160,7 +161,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
MIPSInst_FD(ir));
return 0;
case daddu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -169,7 +170,7 @@ static inline int mipsr6_emul(struct pt_regs *regs, u32 ir)
(u64)regs->regs[MIPSInst_RT(ir)];
return 0;
case dsubu_op:
- if (config_enabled(CONFIG_32BIT) || MIPSInst_FD(ir))
+ if (IS_ENABLED(CONFIG_32BIT) || MIPSInst_FD(ir))
break;
if (MIPSInst_RD(ir))
@@ -282,7 +283,7 @@ static int jr_func(struct pt_regs *regs, u32 ir)
err = mipsr6_emul(regs, nir);
if (err > 0) {
regs->cp0_epc = nepc;
- err = mips_dsemul(regs, nir, cepc);
+ err = mips_dsemul(regs, nir, epc, cepc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -497,7 +498,7 @@ static int dmult_func(struct pt_regs *regs, u32 ir)
s64 res;
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -529,7 +530,7 @@ static int dmultu_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -560,7 +561,7 @@ static int ddiv_func(struct pt_regs *regs, u32 ir)
{
s64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -585,7 +586,7 @@ static int ddivu_func(struct pt_regs *regs, u32 ir)
{
u64 rt, rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
rt = regs->regs[MIPSInst_RT(ir)];
@@ -824,7 +825,7 @@ static int dclz_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -851,7 +852,7 @@ static int dclo_func(struct pt_regs *regs, u32 ir)
u64 res;
u64 rs;
- if (config_enabled(CONFIG_32BIT))
+ if (IS_ENABLED(CONFIG_32BIT))
return SIGILL;
if (!MIPSInst_RD(ir))
@@ -940,42 +941,42 @@ repeat:
switch (rt) {
case tgei_op:
if ((long)regs->regs[rs] >= MIPSInst_SIMM(inst))
- do_trap_or_bp(regs, 0, "TGEI");
+ do_trap_or_bp(regs, 0, 0, "TGEI");
MIPS_R2_STATS(traps);
break;
case tgeiu_op:
if (regs->regs[rs] >= MIPSInst_UIMM(inst))
- do_trap_or_bp(regs, 0, "TGEIU");
+ do_trap_or_bp(regs, 0, 0, "TGEIU");
MIPS_R2_STATS(traps);
break;
case tlti_op:
if ((long)regs->regs[rs] < MIPSInst_SIMM(inst))
- do_trap_or_bp(regs, 0, "TLTI");
+ do_trap_or_bp(regs, 0, 0, "TLTI");
MIPS_R2_STATS(traps);
break;
case tltiu_op:
if (regs->regs[rs] < MIPSInst_UIMM(inst))
- do_trap_or_bp(regs, 0, "TLTIU");
+ do_trap_or_bp(regs, 0, 0, "TLTIU");
MIPS_R2_STATS(traps);
break;
case teqi_op:
if (regs->regs[rs] == MIPSInst_SIMM(inst))
- do_trap_or_bp(regs, 0, "TEQI");
+ do_trap_or_bp(regs, 0, 0, "TEQI");
MIPS_R2_STATS(traps);
break;
case tnei_op:
if (regs->regs[rs] != MIPSInst_SIMM(inst))
- do_trap_or_bp(regs, 0, "TNEI");
+ do_trap_or_bp(regs, 0, 0, "TNEI");
MIPS_R2_STATS(traps);
@@ -1032,7 +1033,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1081,7 +1082,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1148,7 +1149,7 @@ repeat:
if (nir) {
err = mipsr6_emul(regs, nir);
if (err > 0) {
- err = mips_dsemul(regs, nir, cpc);
+ err = mips_dsemul(regs, nir, epc, cpc);
if (err == SIGILL)
err = SIGEMT;
MIPS_R2_STATS(dsemul);
@@ -1251,10 +1252,10 @@ fpu_emul:
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1326,10 +1327,10 @@ fpu_emul:
" j 10b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1397,10 +1398,10 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1467,10 +1468,10 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1483,7 +1484,7 @@ fpu_emul:
break;
case ldl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1582,14 +1583,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
- " .word 5b,8b\n"
- " .word 6b,8b\n"
- " .word 7b,8b\n"
- " .word 0b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1602,7 +1603,7 @@ fpu_emul:
break;
case ldr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1701,14 +1702,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
- " .word 5b,8b\n"
- " .word 6b,8b\n"
- " .word 7b,8b\n"
- " .word 0b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1721,7 +1722,7 @@ fpu_emul:
break;
case sdl_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1820,14 +1821,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
- " .word 5b,8b\n"
- " .word 6b,8b\n"
- " .word 7b,8b\n"
- " .word 0b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -1839,7 +1840,7 @@ fpu_emul:
break;
case sdr_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -1938,14 +1939,14 @@ fpu_emul:
" j 9b\n"
" .previous\n"
" .section __ex_table,\"a\"\n"
- " .word 1b,8b\n"
- " .word 2b,8b\n"
- " .word 3b,8b\n"
- " .word 4b,8b\n"
- " .word 5b,8b\n"
- " .word 6b,8b\n"
- " .word 7b,8b\n"
- " .word 0b,8b\n"
+ STR(PTR) " 1b,8b\n"
+ STR(PTR) " 2b,8b\n"
+ STR(PTR) " 3b,8b\n"
+ STR(PTR) " 4b,8b\n"
+ STR(PTR) " 5b,8b\n"
+ STR(PTR) " 6b,8b\n"
+ STR(PTR) " 7b,8b\n"
+ STR(PTR) " 0b,8b\n"
" .previous\n"
" .set pop\n"
: "+&r"(rt), "=&r"(rs),
@@ -2000,7 +2001,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- ".word 1b, 3b\n"
+ STR(PTR) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
@@ -2058,7 +2059,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- ".word 1b, 3b\n"
+ STR(PTR) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
@@ -2071,7 +2072,7 @@ fpu_emul:
break;
case lld_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2119,7 +2120,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- ".word 1b, 3b\n"
+ STR(PTR) " 1b,3b\n"
".previous\n"
: "=&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV)
@@ -2132,7 +2133,7 @@ fpu_emul:
break;
case scd_op:
- if (config_enabled(CONFIG_32BIT)) {
+ if (IS_ENABLED(CONFIG_32BIT)) {
err = SIGILL;
break;
}
@@ -2182,7 +2183,7 @@ fpu_emul:
"j 2b\n"
".previous\n"
".section __ex_table,\"a\"\n"
- ".word 1b, 3b\n"
+ STR(PTR) " 1b,3b\n"
".previous\n"
: "+&r"(res), "+&r"(err)
: "r"(vaddr), "i"(SIGSEGV));
@@ -2201,7 +2202,7 @@ fpu_emul:
}
/*
- * Lets not return to userland just yet. It's constly and
+ * Let's not return to userland just yet. It's costly and
* it's likely we have more R2 instructions to emulate
*/
if (!err && (pass++ < MIPS_R2_EMUL_TOTAL_PASS)) {
diff --git a/arch/mips/kernel/module-rela.c b/arch/mips/kernel/module-rela.c
index 2b70723071c3..781168834456 100644
--- a/arch/mips/kernel/module-rela.c
+++ b/arch/mips/kernel/module-rela.c
@@ -16,6 +16,7 @@
* Copyright (C) 2001 Rusty Russell.
* Copyright (C) 2003, 2004 Ralf Baechle (ralf@linux-mips.org)
* Copyright (C) 2005 Thiemo Seufer
+ * Copyright (C) 2015 Imagination Technologies Ltd.
*/
#include <linux/elf.h>
@@ -35,15 +36,13 @@ static int apply_r_mips_32_rela(struct module *me, u32 *location, Elf_Addr v)
static int apply_r_mips_26_rela(struct module *me, u32 *location, Elf_Addr v)
{
if (v % 4) {
- pr_err("module %s: dangerous R_MIPS_26 RELArelocation\n",
+ pr_err("module %s: dangerous R_MIPS_26 RELA relocation\n",
me->name);
return -ENOEXEC;
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
- me->name);
+ pr_err("module %s: relocation overflow\n", me->name);
return -ENOEXEC;
}
@@ -67,6 +66,48 @@ static int apply_r_mips_lo16_rela(struct module *me, u32 *location, Elf_Addr v)
return 0;
}
+static int apply_r_mips_pc_rela(struct module *me, u32 *location, Elf_Addr v,
+ unsigned bits)
+{
+ unsigned long mask = GENMASK(bits - 1, 0);
+ unsigned long se_bits;
+ long offset;
+
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_PC%u RELA relocation\n",
+ me->name, bits);
+ return -ENOEXEC;
+ }
+
+ offset = ((long)v - (long)location) >> 2;
+
+ /* check the sign bit onwards are identical - ie. we didn't overflow */
+ se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+ if ((offset & ~mask) != (se_bits & ~mask)) {
+ pr_err("module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~mask) | (offset & mask);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rela(me, location, v, 16);
+}
+
+static int apply_r_mips_pc21_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rela(me, location, v, 21);
+}
+
+static int apply_r_mips_pc26_rela(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rela(me, location, v, 26);
+}
+
static int apply_r_mips_64_rela(struct module *me, u32 *location, Elf_Addr v)
{
*(Elf_Addr *)location = v;
@@ -99,9 +140,12 @@ static int (*reloc_handlers_rela[]) (struct module *me, u32 *location,
[R_MIPS_26] = apply_r_mips_26_rela,
[R_MIPS_HI16] = apply_r_mips_hi16_rela,
[R_MIPS_LO16] = apply_r_mips_lo16_rela,
+ [R_MIPS_PC16] = apply_r_mips_pc16_rela,
[R_MIPS_64] = apply_r_mips_64_rela,
[R_MIPS_HIGHER] = apply_r_mips_higher_rela,
- [R_MIPS_HIGHEST] = apply_r_mips_highest_rela
+ [R_MIPS_HIGHEST] = apply_r_mips_highest_rela,
+ [R_MIPS_PC21_S2] = apply_r_mips_pc21_rela,
+ [R_MIPS_PC26_S2] = apply_r_mips_pc26_rela,
};
int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
@@ -109,9 +153,10 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
struct module *me)
{
Elf_Mips_Rela *rel = (void *) sechdrs[relsec].sh_addr;
+ int (*handler)(struct module *me, u32 *location, Elf_Addr v);
Elf_Sym *sym;
u32 *location;
- unsigned int i;
+ unsigned int i, type;
Elf_Addr v;
int res;
@@ -125,18 +170,30 @@ int apply_relocate_add(Elf_Shdr *sechdrs, const char *strtab,
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
- if (IS_ERR_VALUE(sym->st_value)) {
+ if (sym->st_value >= -MAX_ERRNO) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
+ pr_warn("%s: Unknown symbol %s\n",
me->name, strtab + sym->st_name);
return -ENOENT;
}
- v = sym->st_value + rel[i].r_addend;
+ type = ELF_MIPS_R_TYPE(rel[i]);
+
+ if (type < ARRAY_SIZE(reloc_handlers_rela))
+ handler = reloc_handlers_rela[type];
+ else
+ handler = NULL;
- res = reloc_handlers_rela[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n",
+ me->name, type);
+ return -EINVAL;
+ }
+
+ v = sym->st_value + rel[i].r_addend;
+ res = handler(me, location, v);
if (res)
return res;
}
diff --git a/arch/mips/kernel/module.c b/arch/mips/kernel/module.c
index 1833f5171ccd..79850e376ef6 100644
--- a/arch/mips/kernel/module.c
+++ b/arch/mips/kernel/module.c
@@ -73,8 +73,7 @@ static int apply_r_mips_26_rel(struct module *me, u32 *location, Elf_Addr v)
}
if ((v & 0xf0000000) != (((unsigned long)location + 4) & 0xf0000000)) {
- printk(KERN_ERR
- "module %s: relocation overflow\n",
+ pr_err("module %s: relocation overflow\n",
me->name);
return -ENOEXEC;
}
@@ -183,13 +182,62 @@ out_danger:
return -ENOEXEC;
}
+static int apply_r_mips_pc_rel(struct module *me, u32 *location, Elf_Addr v,
+ unsigned bits)
+{
+ unsigned long mask = GENMASK(bits - 1, 0);
+ unsigned long se_bits;
+ long offset;
+
+ if (v % 4) {
+ pr_err("module %s: dangerous R_MIPS_PC%u REL relocation\n",
+ me->name, bits);
+ return -ENOEXEC;
+ }
+
+ /* retrieve & sign extend implicit addend */
+ offset = *location & mask;
+ offset |= (offset & BIT(bits - 1)) ? ~mask : 0;
+
+ offset += ((long)v - (long)location) >> 2;
+
+ /* check the sign bit onwards are identical - ie. we didn't overflow */
+ se_bits = (offset & BIT(bits - 1)) ? ~0ul : 0;
+ if ((offset & ~mask) != (se_bits & ~mask)) {
+ pr_err("module %s: relocation overflow\n", me->name);
+ return -ENOEXEC;
+ }
+
+ *location = (*location & ~mask) | (offset & mask);
+
+ return 0;
+}
+
+static int apply_r_mips_pc16_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rel(me, location, v, 16);
+}
+
+static int apply_r_mips_pc21_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rel(me, location, v, 21);
+}
+
+static int apply_r_mips_pc26_rel(struct module *me, u32 *location, Elf_Addr v)
+{
+ return apply_r_mips_pc_rel(me, location, v, 26);
+}
+
static int (*reloc_handlers_rel[]) (struct module *me, u32 *location,
Elf_Addr v) = {
[R_MIPS_NONE] = apply_r_mips_none,
[R_MIPS_32] = apply_r_mips_32_rel,
[R_MIPS_26] = apply_r_mips_26_rel,
[R_MIPS_HI16] = apply_r_mips_hi16_rel,
- [R_MIPS_LO16] = apply_r_mips_lo16_rel
+ [R_MIPS_LO16] = apply_r_mips_lo16_rel,
+ [R_MIPS_PC16] = apply_r_mips_pc16_rel,
+ [R_MIPS_PC21_S2] = apply_r_mips_pc21_rel,
+ [R_MIPS_PC26_S2] = apply_r_mips_pc26_rel,
};
int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
@@ -197,9 +245,10 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
struct module *me)
{
Elf_Mips_Rel *rel = (void *) sechdrs[relsec].sh_addr;
+ int (*handler)(struct module *me, u32 *location, Elf_Addr v);
Elf_Sym *sym;
u32 *location;
- unsigned int i;
+ unsigned int i, type;
Elf_Addr v;
int res;
@@ -214,18 +263,30 @@ int apply_relocate(Elf_Shdr *sechdrs, const char *strtab,
/* This is the symbol it is referring to */
sym = (Elf_Sym *)sechdrs[symindex].sh_addr
+ ELF_MIPS_R_SYM(rel[i]);
- if (IS_ERR_VALUE(sym->st_value)) {
+ if (sym->st_value >= -MAX_ERRNO) {
/* Ignore unresolved weak symbol */
if (ELF_ST_BIND(sym->st_info) == STB_WEAK)
continue;
- printk(KERN_WARNING "%s: Unknown symbol %s\n",
- me->name, strtab + sym->st_name);
+ pr_warn("%s: Unknown symbol %s\n",
+ me->name, strtab + sym->st_name);
return -ENOENT;
}
- v = sym->st_value;
+ type = ELF_MIPS_R_TYPE(rel[i]);
+
+ if (type < ARRAY_SIZE(reloc_handlers_rel))
+ handler = reloc_handlers_rel[type];
+ else
+ handler = NULL;
- res = reloc_handlers_rel[ELF_MIPS_R_TYPE(rel[i])](me, location, v);
+ if (!handler) {
+ pr_err("%s: Unknown relocation type %u\n",
+ me->name, type);
+ return -EINVAL;
+ }
+
+ v = sym->st_value;
+ res = handler(me, location, v);
if (res)
return res;
}
diff --git a/arch/mips/kernel/perf_event.c b/arch/mips/kernel/perf_event.c
index c1cf9c6c3f77..d64056e0bb56 100644
--- a/arch/mips/kernel/perf_event.c
+++ b/arch/mips/kernel/perf_event.c
@@ -25,8 +25,8 @@
* the user stack callchains, we will add it here.
*/
-static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
- unsigned long reg29)
+static void save_raw_perf_callchain(struct perf_callchain_entry_ctx *entry,
+ unsigned long reg29)
{
unsigned long *sp = (unsigned long *)reg29;
unsigned long addr;
@@ -35,14 +35,14 @@ static void save_raw_perf_callchain(struct perf_callchain_entry *entry,
addr = *sp++;
if (__kernel_text_address(addr)) {
perf_callchain_store(entry, addr);
- if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ if (entry->nr >= entry->max_stack)
break;
}
}
}
-void perf_callchain_kernel(struct perf_callchain_entry *entry,
- struct pt_regs *regs)
+void perf_callchain_kernel(struct perf_callchain_entry_ctx *entry,
+ struct pt_regs *regs)
{
unsigned long sp = regs->regs[29];
#ifdef CONFIG_KALLSYMS
@@ -59,7 +59,7 @@ void perf_callchain_kernel(struct perf_callchain_entry *entry,
}
do {
perf_callchain_store(entry, pc);
- if (entry->nr >= PERF_MAX_STACK_DEPTH)
+ if (entry->nr >= entry->max_stack)
break;
pc = unwind_stack(current, &sp, pc, &ra);
} while (pc);
diff --git a/arch/mips/kernel/perf_event_mipsxx.c b/arch/mips/kernel/perf_event_mipsxx.c
index d7b8dd43147a..d3ba9f4105b5 100644
--- a/arch/mips/kernel/perf_event_mipsxx.c
+++ b/arch/mips/kernel/perf_event_mipsxx.c
@@ -101,8 +101,6 @@ struct mips_pmu {
static struct mips_pmu mipspmu;
-#define M_CONFIG1_PC (1 << 4)
-
#define M_PERFCTL_EXL (1 << 0)
#define M_PERFCTL_KERNEL (1 << 1)
#define M_PERFCTL_SUPERVISOR (1 << 2)
@@ -530,7 +528,7 @@ static void mipspmu_enable(struct pmu *pmu)
/*
* MIPS performance counters can be per-TC. The control registers can
- * not be directly accessed accross CPUs. Hence if we want to do global
+ * not be directly accessed across CPUs. Hence if we want to do global
* control, we need cross CPU calls. on_each_cpu() can help us, but we
* can not make sure this function is called with interrupts enabled. So
* here we pause local counters and then grab a rwlock and leave the
@@ -754,7 +752,7 @@ static void handle_associated_event(struct cpu_hw_events *cpuc,
static int __n_counters(void)
{
- if (!(read_c0_config1() & M_CONFIG1_PC))
+ if (!cpu_has_perf)
return 0;
if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
return 1;
@@ -825,6 +823,16 @@ static const struct mips_perf_event mipsxxcore_event_map2
[PERF_COUNT_HW_BRANCH_MISSES] = { 0x27, CNTR_ODD, T },
};
+static const struct mips_perf_event i6400_event_map[PERF_COUNT_HW_MAX] = {
+ [PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_INSTRUCTIONS] = { 0x01, CNTR_EVEN | CNTR_ODD },
+ /* These only count dcache, not icache */
+ [PERF_COUNT_HW_CACHE_REFERENCES] = { 0x45, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_CACHE_MISSES] = { 0x48, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_INSTRUCTIONS] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [PERF_COUNT_HW_BRANCH_MISSES] = { 0x16, CNTR_EVEN | CNTR_ODD },
+};
+
static const struct mips_perf_event loongson3_event_map[PERF_COUNT_HW_MAX] = {
[PERF_COUNT_HW_CPU_CYCLES] = { 0x00, CNTR_EVEN },
[PERF_COUNT_HW_INSTRUCTIONS] = { 0x00, CNTR_ODD },
@@ -1015,6 +1023,46 @@ static const struct mips_perf_event mipsxxcore_cache_map2
},
};
+static const struct mips_perf_event i6400_cache_map
+ [PERF_COUNT_HW_CACHE_MAX]
+ [PERF_COUNT_HW_CACHE_OP_MAX]
+ [PERF_COUNT_HW_CACHE_RESULT_MAX] = {
+[C(L1D)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x46, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x49, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x47, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x4a, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(L1I)] = {
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x84, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x85, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(DTLB)] = {
+ /* Can't distinguish read & write */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+ [C(OP_WRITE)] = {
+ [C(RESULT_ACCESS)] = { 0x40, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x41, CNTR_EVEN | CNTR_ODD },
+ },
+},
+[C(BPU)] = {
+ /* Conditional branches / mispredicted */
+ [C(OP_READ)] = {
+ [C(RESULT_ACCESS)] = { 0x15, CNTR_EVEN | CNTR_ODD },
+ [C(RESULT_MISS)] = { 0x16, CNTR_EVEN | CNTR_ODD },
+ },
+},
+};
+
static const struct mips_perf_event loongson3_cache_map
[PERF_COUNT_HW_CACHE_MAX]
[PERF_COUNT_HW_CACHE_OP_MAX]
@@ -1556,6 +1604,7 @@ static const struct mips_perf_event *mipsxx_pmu_map_raw_event(u64 config)
#endif
break;
case CPU_P5600:
+ case CPU_P6600:
case CPU_I6400:
/* 8-bit event numbers */
raw_id = config & 0x1ff;
@@ -1718,11 +1767,16 @@ init_hw_perf_events(void)
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
- case CPU_I6400:
- mipspmu.name = "mips/I6400";
+ case CPU_P6600:
+ mipspmu.name = "mips/P6600";
mipspmu.general_event_map = &mipsxxcore_event_map2;
mipspmu.cache_event_map = &mipsxxcore_cache_map2;
break;
+ case CPU_I6400:
+ mipspmu.name = "mips/I6400";
+ mipspmu.general_event_map = &i6400_event_map;
+ mipspmu.cache_event_map = &i6400_cache_map;
+ break;
case CPU_1004K:
mipspmu.name = "mips/1004K";
mipspmu.general_event_map = &mipsxxcore_event_map;
diff --git a/arch/mips/kernel/pm-cps.c b/arch/mips/kernel/pm-cps.c
index f63a289977cc..5b31a9405ebc 100644
--- a/arch/mips/kernel/pm-cps.c
+++ b/arch/mips/kernel/pm-cps.c
@@ -148,7 +148,7 @@ int cps_pm_enter_state(enum cps_pm_state state)
}
/* Setup the VPE to run mips_cps_pm_restore when started again */
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
return -EINVAL;
@@ -224,11 +224,18 @@ static void __init cps_gen_cache_routine(u32 **pp, struct uasm_label **pl,
uasm_build_label(pl, *pp, lbl);
/* Generate the cache ops */
- for (i = 0; i < unroll_lines; i++)
- uasm_i_cache(pp, op, i * cache->linesz, t0);
+ for (i = 0; i < unroll_lines; i++) {
+ if (cpu_has_mips_r6) {
+ uasm_i_cache(pp, op, 0, t0);
+ uasm_i_addiu(pp, t0, t0, cache->linesz);
+ } else {
+ uasm_i_cache(pp, op, i * cache->linesz, t0);
+ }
+ }
- /* Update the base address */
- uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
+ if (!cpu_has_mips_r6)
+ /* Update the base address */
+ uasm_i_addiu(pp, t0, t0, unroll_lines * cache->linesz);
/* Loop if we haven't reached the end address yet */
uasm_il_bne(pp, pr, t0, t1, lbl);
@@ -380,7 +387,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
memset(labels, 0, sizeof(labels));
memset(relocs, 0, sizeof(relocs));
- if (config_enabled(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
+ if (IS_ENABLED(CONFIG_CPU_PM) && state == CPS_PM_POWER_GATED) {
/* Power gating relies upon CPS SMP */
if (!mips_cps_smp_in_use())
goto out_err;
@@ -472,7 +479,7 @@ static void * __init cps_gen_entry_code(unsigned cpu, enum cps_pm_state state)
/*
* Disable all but self interventions. The load from COHCTL is defined
* by the interAptiv & proAptiv SUMs as ensuring that the operation
- * resulting from the preceeding store is complete.
+ * resulting from the preceding store is complete.
*/
uasm_i_addiu(&p, t0, zero, 1 << cpu_data[cpu].core);
uasm_i_sw(&p, t0, 0, r_pcohctl);
diff --git a/arch/mips/kernel/pm.c b/arch/mips/kernel/pm.c
index fefdf39d3df3..dc814892133c 100644
--- a/arch/mips/kernel/pm.c
+++ b/arch/mips/kernel/pm.c
@@ -56,7 +56,7 @@ static void mips_cpu_restore(void)
write_c0_userlocal(current_thread_info()->tp_value);
/* Restore watch registers */
- __restore_watch();
+ __restore_watch(current);
}
/**
diff --git a/arch/mips/kernel/proc.c b/arch/mips/kernel/proc.c
index 298b2b773d12..97dc01b03631 100644
--- a/arch/mips/kernel/proc.c
+++ b/arch/mips/kernel/proc.c
@@ -114,6 +114,7 @@ static int show_cpuinfo(struct seq_file *m, void *v)
if (cpu_has_smartmips) seq_printf(m, "%s", " smartmips");
if (cpu_has_dsp) seq_printf(m, "%s", " dsp");
if (cpu_has_dsp2) seq_printf(m, "%s", " dsp2");
+ if (cpu_has_dsp3) seq_printf(m, "%s", " dsp3");
if (cpu_has_mipsmt) seq_printf(m, "%s", " mt");
if (cpu_has_mmips) seq_printf(m, "%s", " micromips");
if (cpu_has_vz) seq_printf(m, "%s", " vz");
diff --git a/arch/mips/kernel/process.c b/arch/mips/kernel/process.c
index eddd5fd6fdfa..7429ad09fbe3 100644
--- a/arch/mips/kernel/process.c
+++ b/arch/mips/kernel/process.c
@@ -30,6 +30,7 @@
#include <asm/asm.h>
#include <asm/bootinfo.h>
#include <asm/cpu.h>
+#include <asm/dsemul.h>
#include <asm/dsp.h>
#include <asm/fpu.h>
#include <asm/msa.h>
@@ -68,17 +69,20 @@ void start_thread(struct pt_regs * regs, unsigned long pc, unsigned long sp)
lose_fpu(0);
clear_thread_flag(TIF_MSA_CTX_LIVE);
clear_used_math();
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
init_dsp();
regs->cp0_epc = pc;
regs->regs[29] = sp;
}
-void exit_thread(void)
-{
-}
-
-void flush_thread(void)
+void exit_thread(struct task_struct *tsk)
{
+ /*
+ * User threads may have allocated a delay slot emulation frame.
+ * If so, clean up that allocation.
+ */
+ if (!(current->flags & PF_KTHREAD))
+ dsemul_thread_cleanup(tsk);
}
int arch_dup_task_struct(struct task_struct *dst, struct task_struct *src)
@@ -167,6 +171,8 @@ int copy_thread(unsigned long clone_flags, unsigned long usp,
clear_tsk_thread_flag(p, TIF_FPUBOUND);
#endif /* CONFIG_MIPS_MT_FPAFF */
+ atomic_set(&p->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
if (clone_flags & CLONE_SETTLS)
ti->tp_value = regs->regs[7];
@@ -353,7 +359,7 @@ static int get_frame_info(struct mips_frame_info *info)
return 0;
if (info->pc_offset < 0) /* leaf */
return 1;
- /* prologue seems boggus... */
+ /* prologue seems bogus... */
err:
return -1;
}
@@ -455,7 +461,7 @@ unsigned long notrace unwind_stack_by_address(unsigned long stack_page,
*sp + sizeof(*regs) <= stack_page + THREAD_SIZE - 32) {
regs = (struct pt_regs *)*sp;
pc = regs->cp0_epc;
- if (__kernel_text_address(pc)) {
+ if (!user_mode(regs) && __kernel_text_address(pc)) {
*sp = regs->regs[29];
*ra = regs->regs[31];
return pc;
@@ -580,11 +586,19 @@ int mips_get_process_fp_mode(struct task_struct *task)
return value;
}
+static void prepare_for_fp_mode_switch(void *info)
+{
+ struct mm_struct *mm = info;
+
+ if (current->mm == mm)
+ lose_fpu(1);
+}
+
int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
{
const unsigned int known_bits = PR_FP_MODE_FR | PR_FP_MODE_FRE;
- unsigned long switch_count;
struct task_struct *t;
+ int max_users;
/* Check the value is valid */
if (value & ~known_bits)
@@ -601,6 +615,9 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
if (!(value & PR_FP_MODE_FR) && cpu_has_fpu && cpu_has_mips_r6)
return -EOPNOTSUPP;
+ /* Proceed with the mode switch */
+ preempt_disable();
+
/* Save FP & vector context, then disable FPU & MSA */
if (task->signal == current->signal)
lose_fpu(1);
@@ -610,31 +627,17 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
smp_mb__after_atomic();
/*
- * If there are multiple online CPUs then wait until all threads whose
- * FP mode is about to change have been context switched. This approach
- * allows us to only worry about whether an FP mode switch is in
- * progress when FP is first used in a tasks time slice. Pretty much all
- * of the mode switch overhead can thus be confined to cases where mode
- * switches are actually occuring. That is, to here. However for the
- * thread performing the mode switch it may take a while...
+ * If there are multiple online CPUs then force any which are running
+ * threads in this process to lose their FPU context, which they can't
+ * regain until fp_mode_switching is cleared later.
*/
if (num_online_cpus() > 1) {
- spin_lock_irq(&task->sighand->siglock);
-
- for_each_thread(task, t) {
- if (t == current)
- continue;
+ /* No need to send an IPI for the local CPU */
+ max_users = (task->mm == current->mm) ? 1 : 0;
- switch_count = t->nvcsw + t->nivcsw;
-
- do {
- spin_unlock_irq(&task->sighand->siglock);
- cond_resched();
- spin_lock_irq(&task->sighand->siglock);
- } while ((t->nvcsw + t->nivcsw) == switch_count);
- }
-
- spin_unlock_irq(&task->sighand->siglock);
+ if (atomic_read(&current->mm->mm_users) > max_users)
+ smp_call_function(prepare_for_fp_mode_switch,
+ (void *)current->mm, 1);
}
/*
@@ -659,6 +662,7 @@ int mips_set_process_fp_mode(struct task_struct *task, unsigned int value)
/* Allow threads to use FP again */
atomic_set(&task->mm->context.fp_mode_switching, 0);
+ preempt_enable();
return 0;
}
diff --git a/arch/mips/kernel/ptrace.c b/arch/mips/kernel/ptrace.c
index a5279b2f3198..6103b24d1bfc 100644
--- a/arch/mips/kernel/ptrace.c
+++ b/arch/mips/kernel/ptrace.c
@@ -57,8 +57,7 @@ static void init_fp_ctx(struct task_struct *target)
/* Begin with data registers set to all 1s... */
memset(&target->thread.fpu.fpr, ~0, sizeof(target->thread.fpu.fpr));
- /* ...and FCSR zeroed */
- target->thread.fpu.fcr31 = 0;
+ /* FCSR has been preset by `mips_set_personality_nan'. */
/*
* Record that the target has "used" math, such that the context
@@ -80,6 +79,22 @@ void ptrace_disable(struct task_struct *child)
}
/*
+ * Poke at FCSR according to its mask. Don't set the cause bits as
+ * this is currently not handled correctly in FP context restoration
+ * and will cause an oops if a corresponding enable bit is set.
+ */
+static void ptrace_setfcr31(struct task_struct *child, u32 value)
+{
+ u32 fcr31;
+ u32 mask;
+
+ value &= ~FPU_CSR_ALL_X;
+ fcr31 = child->thread.fpu.fcr31;
+ mask = boot_cpu_data.fpu_msk31;
+ child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+}
+
+/*
* Read a general register set. We always use the 64-bit format, even
* for 32-bit kernels and for 32-bit processes on a 64-bit kernel.
* Registers are sign extended to fill the available space.
@@ -159,9 +174,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
{
union fpureg *fregs;
u64 fpr_val;
- u32 fcr31;
u32 value;
- u32 mask;
int i;
if (!access_ok(VERIFY_READ, data, 33 * 8))
@@ -176,9 +189,7 @@ int ptrace_setfpregs(struct task_struct *child, __u32 __user *data)
}
__get_user(value, data + 64);
- fcr31 = child->thread.fpu.fcr31;
- mask = boot_cpu_data.fpu_msk31;
- child->thread.fpu.fcr31 = (value & ~mask) | (fcr31 & mask);
+ ptrace_setfcr31(child, value);
/* FIR may not be written. */
@@ -210,7 +221,8 @@ int ptrace_get_watch_regs(struct task_struct *child,
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
__put_user(child->thread.watch.mips3264.watchlo[i],
&addr->WATCH_STYLE.watchlo[i]);
- __put_user(child->thread.watch.mips3264.watchhi[i] & 0xfff,
+ __put_user(child->thread.watch.mips3264.watchhi[i] &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW),
&addr->WATCH_STYLE.watchhi[i]);
__put_user(boot_cpu_data.watch_reg_masks[i],
&addr->WATCH_STYLE.watch_masks[i]);
@@ -252,12 +264,12 @@ int ptrace_set_watch_regs(struct task_struct *child,
}
#endif
__get_user(ht[i], &addr->WATCH_STYLE.watchhi[i]);
- if (ht[i] & ~0xff8)
+ if (ht[i] & ~MIPS_WATCHHI_MASK)
return -EINVAL;
}
/* Install them. */
for (i = 0; i < boot_cpu_data.watch_reg_use_cnt; i++) {
- if (lt[i] & 7)
+ if (lt[i] & MIPS_WATCHLO_IRW)
watch_active = 1;
child->thread.watch.mips3264.watchlo[i] = lt[i];
/* Set the G bit. */
@@ -805,7 +817,7 @@ long arch_ptrace(struct task_struct *child, long request,
break;
#endif
case FPC_CSR:
- child->thread.fpu.fcr31 = data & ~FPU_CSR_ALL_X;
+ ptrace_setfcr31(child, data);
break;
case DSP_BASE ... DSP_BASE + 5: {
dspreg_t *dregs;
@@ -876,17 +888,16 @@ long arch_ptrace(struct task_struct *child, long request,
*/
asmlinkage long syscall_trace_enter(struct pt_regs *regs, long syscall)
{
- long ret = 0;
user_exit();
current_thread_info()->syscall = syscall;
- if (secure_computing() == -1)
- return -1;
-
if (test_thread_flag(TIF_SYSCALL_TRACE) &&
tracehook_report_syscall_entry(regs))
- ret = -1;
+ return -1;
+
+ if (secure_computing(NULL) == -1)
+ return -1;
if (unlikely(test_thread_flag(TIF_SYSCALL_TRACEPOINT)))
trace_sys_enter(regs, regs->regs[2]);
diff --git a/arch/mips/kernel/r4k_fpu.S b/arch/mips/kernel/r4k_fpu.S
index 17732f876eff..56d86b09c917 100644
--- a/arch/mips/kernel/r4k_fpu.S
+++ b/arch/mips/kernel/r4k_fpu.S
@@ -244,17 +244,17 @@ LEAF(\name)
.set push
.set noat
#ifdef CONFIG_64BIT
- copy_u_d \wr, 1
+ copy_s_d \wr, 1
EX sd $1, \off(\base)
#elif defined(CONFIG_CPU_LITTLE_ENDIAN)
- copy_u_w \wr, 2
+ copy_s_w \wr, 2
EX sw $1, \off(\base)
- copy_u_w \wr, 3
+ copy_s_w \wr, 3
EX sw $1, (\off+4)(\base)
#else /* CONFIG_CPU_BIG_ENDIAN */
- copy_u_w \wr, 2
+ copy_s_w \wr, 2
EX sw $1, (\off+4)(\base)
- copy_u_w \wr, 3
+ copy_s_w \wr, 3
EX sw $1, \off(\base)
#endif
.set pop
diff --git a/arch/mips/kernel/r4k_switch.S b/arch/mips/kernel/r4k_switch.S
index 92cd0516ecf5..2f0a3b223c97 100644
--- a/arch/mips/kernel/r4k_switch.S
+++ b/arch/mips/kernel/r4k_switch.S
@@ -15,7 +15,6 @@
#include <asm/fpregdef.h>
#include <asm/mipsregs.h>
#include <asm/asm-offsets.h>
-#include <asm/pgtable-bits.h>
#include <asm/regdef.h>
#include <asm/stackframe.h>
#include <asm/thread_info.h>
diff --git a/arch/mips/kernel/relocate.c b/arch/mips/kernel/relocate.c
new file mode 100644
index 000000000000..ca1cc30c0891
--- /dev/null
+++ b/arch/mips/kernel/relocate.c
@@ -0,0 +1,386 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Support for Kernel relocation at boot time
+ *
+ * Copyright (C) 2015, Imagination Technologies Ltd.
+ * Authors: Matt Redfearn (matt.redfearn@imgtec.com)
+ */
+#include <asm/bootinfo.h>
+#include <asm/cacheflush.h>
+#include <asm/fw/fw.h>
+#include <asm/sections.h>
+#include <asm/setup.h>
+#include <asm/timex.h>
+#include <linux/elf.h>
+#include <linux/kernel.h>
+#include <linux/libfdt.h>
+#include <linux/of_fdt.h>
+#include <linux/sched.h>
+#include <linux/start_kernel.h>
+#include <linux/string.h>
+#include <linux/printk.h>
+
+#define RELOCATED(x) ((void *)((long)x + offset))
+
+extern u32 _relocation_start[]; /* End kernel image / start relocation table */
+extern u32 _relocation_end[]; /* End relocation table */
+
+extern long __start___ex_table; /* Start exception table */
+extern long __stop___ex_table; /* End exception table */
+
+static inline u32 __init get_synci_step(void)
+{
+ u32 res;
+
+ __asm__("rdhwr %0, $1" : "=r" (res));
+
+ return res;
+}
+
+static void __init sync_icache(void *kbase, unsigned long kernel_length)
+{
+ void *kend = kbase + kernel_length;
+ u32 step = get_synci_step();
+
+ do {
+ __asm__ __volatile__(
+ "synci 0(%0)"
+ : /* no output */
+ : "r" (kbase));
+
+ kbase += step;
+ } while (kbase < kend);
+
+ /* Completion barrier */
+ __sync();
+}
+
+static int __init apply_r_mips_64_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ *(u64 *)loc_new += offset;
+
+ return 0;
+}
+
+static int __init apply_r_mips_32_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ *loc_new += offset;
+
+ return 0;
+}
+
+static int __init apply_r_mips_26_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ unsigned long target_addr = (*loc_orig) & 0x03ffffff;
+
+ if (offset % 4) {
+ pr_err("Dangerous R_MIPS_26 REL relocation\n");
+ return -ENOEXEC;
+ }
+
+ /* Original target address */
+ target_addr <<= 2;
+ target_addr += (unsigned long)loc_orig & ~0x03ffffff;
+
+ /* Get the new target address */
+ target_addr += offset;
+
+ if ((target_addr & 0xf0000000) != ((unsigned long)loc_new & 0xf0000000)) {
+ pr_err("R_MIPS_26 REL relocation overflow\n");
+ return -ENOEXEC;
+ }
+
+ target_addr -= (unsigned long)loc_new & ~0x03ffffff;
+ target_addr >>= 2;
+
+ *loc_new = (*loc_new & ~0x03ffffff) | (target_addr & 0x03ffffff);
+
+ return 0;
+}
+
+
+static int __init apply_r_mips_hi16_rel(u32 *loc_orig, u32 *loc_new, long offset)
+{
+ unsigned long insn = *loc_orig;
+ unsigned long target = (insn & 0xffff) << 16; /* high 16bits of target */
+
+ target += offset;
+
+ *loc_new = (insn & ~0xffff) | ((target >> 16) & 0xffff);
+ return 0;
+}
+
+static int (*reloc_handlers_rel[]) (u32 *, u32 *, long) __initdata = {
+ [R_MIPS_64] = apply_r_mips_64_rel,
+ [R_MIPS_32] = apply_r_mips_32_rel,
+ [R_MIPS_26] = apply_r_mips_26_rel,
+ [R_MIPS_HI16] = apply_r_mips_hi16_rel,
+};
+
+int __init do_relocations(void *kbase_old, void *kbase_new, long offset)
+{
+ u32 *r;
+ u32 *loc_orig;
+ u32 *loc_new;
+ int type;
+ int res;
+
+ for (r = _relocation_start; r < _relocation_end; r++) {
+ /* Sentinel for last relocation */
+ if (*r == 0)
+ break;
+
+ type = (*r >> 24) & 0xff;
+ loc_orig = (void *)(kbase_old + ((*r & 0x00ffffff) << 2));
+ loc_new = RELOCATED(loc_orig);
+
+ if (reloc_handlers_rel[type] == NULL) {
+ /* Unsupported relocation */
+ pr_err("Unhandled relocation type %d at 0x%pK\n",
+ type, loc_orig);
+ return -ENOEXEC;
+ }
+
+ res = reloc_handlers_rel[type](loc_orig, loc_new, offset);
+ if (res)
+ return res;
+ }
+
+ return 0;
+}
+
+/*
+ * The exception table is filled in by the relocs tool after vmlinux is linked.
+ * It must be relocated separately since there will not be any relocation
+ * information for it filled in by the linker.
+ */
+static int __init relocate_exception_table(long offset)
+{
+ unsigned long *etable_start, *etable_end, *e;
+
+ etable_start = RELOCATED(&__start___ex_table);
+ etable_end = RELOCATED(&__stop___ex_table);
+
+ for (e = etable_start; e < etable_end; e++)
+ *e += offset;
+
+ return 0;
+}
+
+#ifdef CONFIG_RANDOMIZE_BASE
+
+static inline __init unsigned long rotate_xor(unsigned long hash,
+ const void *area, size_t size)
+{
+ size_t i;
+ unsigned long *ptr = (unsigned long *)area;
+
+ for (i = 0; i < size / sizeof(hash); i++) {
+ /* Rotate by odd number of bits and XOR. */
+ hash = (hash << ((sizeof(hash) * 8) - 7)) | (hash >> 7);
+ hash ^= ptr[i];
+ }
+
+ return hash;
+}
+
+static inline __init unsigned long get_random_boot(void)
+{
+ unsigned long entropy = random_get_entropy();
+ unsigned long hash = 0;
+
+ /* Attempt to create a simple but unpredictable starting entropy. */
+ hash = rotate_xor(hash, linux_banner, strlen(linux_banner));
+
+ /* Add in any runtime entropy we can get */
+ hash = rotate_xor(hash, &entropy, sizeof(entropy));
+
+#if defined(CONFIG_USE_OF)
+ /* Get any additional entropy passed in device tree */
+ {
+ int node, len;
+ u64 *prop;
+
+ node = fdt_path_offset(initial_boot_params, "/chosen");
+ if (node >= 0) {
+ prop = fdt_getprop_w(initial_boot_params, node,
+ "kaslr-seed", &len);
+ if (prop && (len == sizeof(u64)))
+ hash = rotate_xor(hash, prop, sizeof(*prop));
+ }
+ }
+#endif /* CONFIG_USE_OF */
+
+ return hash;
+}
+
+static inline __init bool kaslr_disabled(void)
+{
+ char *str;
+
+#if defined(CONFIG_CMDLINE_BOOL)
+ const char *builtin_cmdline = CONFIG_CMDLINE;
+
+ str = strstr(builtin_cmdline, "nokaslr");
+ if (str == builtin_cmdline ||
+ (str > builtin_cmdline && *(str - 1) == ' '))
+ return true;
+#endif
+ str = strstr(arcs_cmdline, "nokaslr");
+ if (str == arcs_cmdline || (str > arcs_cmdline && *(str - 1) == ' '))
+ return true;
+
+ return false;
+}
+
+static inline void __init *determine_relocation_address(void)
+{
+ /* Choose a new address for the kernel */
+ unsigned long kernel_length;
+ void *dest = &_text;
+ unsigned long offset;
+
+ if (kaslr_disabled())
+ return dest;
+
+ kernel_length = (long)_end - (long)(&_text);
+
+ offset = get_random_boot() << 16;
+ offset &= (CONFIG_RANDOMIZE_BASE_MAX_OFFSET - 1);
+ if (offset < kernel_length)
+ offset += ALIGN(kernel_length, 0xffff);
+
+ return RELOCATED(dest);
+}
+
+#else
+
+static inline void __init *determine_relocation_address(void)
+{
+ /*
+ * Choose a new address for the kernel
+ * For now we'll hard code the destination
+ */
+ return (void *)0xffffffff81000000;
+}
+
+#endif
+
+static inline int __init relocation_addr_valid(void *loc_new)
+{
+ if ((unsigned long)loc_new & 0x0000ffff) {
+ /* Inappropriately aligned new location */
+ return 0;
+ }
+ if ((unsigned long)loc_new < (unsigned long)&_end) {
+ /* New location overlaps original kernel */
+ return 0;
+ }
+ return 1;
+}
+
+void *__init relocate_kernel(void)
+{
+ void *loc_new;
+ unsigned long kernel_length;
+ unsigned long bss_length;
+ long offset = 0;
+ int res = 1;
+ /* Default to original kernel entry point */
+ void *kernel_entry = start_kernel;
+
+ /* Get the command line */
+ fw_init_cmdline();
+#if defined(CONFIG_USE_OF)
+ /* Deal with the device tree */
+ early_init_dt_scan(plat_get_fdt());
+ if (boot_command_line[0]) {
+ /* Boot command line was passed in device tree */
+ strlcpy(arcs_cmdline, boot_command_line, COMMAND_LINE_SIZE);
+ }
+#endif /* CONFIG_USE_OF */
+
+ kernel_length = (long)(&_relocation_start) - (long)(&_text);
+ bss_length = (long)&__bss_stop - (long)&__bss_start;
+
+ loc_new = determine_relocation_address();
+
+ /* Sanity check relocation address */
+ if (relocation_addr_valid(loc_new))
+ offset = (unsigned long)loc_new - (unsigned long)(&_text);
+
+ /* Reset the command line now so we don't end up with a duplicate */
+ arcs_cmdline[0] = '\0';
+
+ if (offset) {
+ /* Copy the kernel to it's new location */
+ memcpy(loc_new, &_text, kernel_length);
+
+ /* Perform relocations on the new kernel */
+ res = do_relocations(&_text, loc_new, offset);
+ if (res < 0)
+ goto out;
+
+ /* Sync the caches ready for execution of new kernel */
+ sync_icache(loc_new, kernel_length);
+
+ res = relocate_exception_table(offset);
+ if (res < 0)
+ goto out;
+
+ /*
+ * The original .bss has already been cleared, and
+ * some variables such as command line parameters
+ * stored to it so make a copy in the new location.
+ */
+ memcpy(RELOCATED(&__bss_start), &__bss_start, bss_length);
+
+ /* The current thread is now within the relocated image */
+ __current_thread_info = RELOCATED(&init_thread_union);
+
+ /* Return the new kernel's entry point */
+ kernel_entry = RELOCATED(start_kernel);
+ }
+out:
+ return kernel_entry;
+}
+
+/*
+ * Show relocation information on panic.
+ */
+void show_kernel_relocation(const char *level)
+{
+ unsigned long offset;
+
+ offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
+
+ if (IS_ENABLED(CONFIG_RELOCATABLE) && offset > 0) {
+ printk(level);
+ pr_cont("Kernel relocated by 0x%pK\n", (void *)offset);
+ pr_cont(" .text @ 0x%pK\n", _text);
+ pr_cont(" .data @ 0x%pK\n", _sdata);
+ pr_cont(" .bss @ 0x%pK\n", __bss_start);
+ }
+}
+
+static int kernel_location_notifier_fn(struct notifier_block *self,
+ unsigned long v, void *p)
+{
+ show_kernel_relocation(KERN_EMERG);
+ return NOTIFY_DONE;
+}
+
+static struct notifier_block kernel_location_notifier = {
+ .notifier_call = kernel_location_notifier_fn
+};
+
+static int __init register_kernel_offset_dumper(void)
+{
+ atomic_notifier_chain_register(&panic_notifier_list,
+ &kernel_location_notifier);
+ return 0;
+}
+__initcall(register_kernel_offset_dumper);
diff --git a/arch/mips/kernel/scall32-o32.S b/arch/mips/kernel/scall32-o32.S
index a56317444bda..c8e43e0c4066 100644
--- a/arch/mips/kernel/scall32-o32.S
+++ b/arch/mips/kernel/scall32-o32.S
@@ -35,7 +35,6 @@ NESTED(handle_sys, PT_SIZE, sp)
lw t1, PT_EPC(sp) # skip syscall on return
- subu v0, v0, __NR_O32_Linux # check syscall number
addiu t1, 4 # skip to next instruction
sw t1, PT_EPC(sp)
@@ -89,6 +88,7 @@ loads_done:
and t0, t1
bnez t0, syscall_trace_entry # -> yes
syscall_common:
+ subu v0, v0, __NR_O32_Linux # check syscall number
sltiu t0, v0, __NR_O32_Linux_syscalls + 1
beqz t0, illegal_syscall
@@ -118,24 +118,23 @@ o32_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, v0
move a0, sp
/*
* syscall number is in v0 unless we called syscall(__NR_###)
* where the real syscall number is in a0
*/
- addiu a1, v0, __NR_O32_Linux
- bnez v0, 1f /* __NR_syscall at offset 0 */
+ move a1, v0
+ subu t2, v0, __NR_O32_Linux
+ bnez t2, 1f /* __NR_syscall at offset 0 */
lw a1, PT_R4(sp)
1: jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
- move v0, s0 # restore syscall
-
RESTORE_STATIC
+ lw v0, PT_R2(sp) # Restore syscall (maybe modified)
lw a0, PT_R4(sp) # Restore argument registers
lw a1, PT_R5(sp)
lw a2, PT_R6(sp)
@@ -596,3 +595,5 @@ EXPORT(sys_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 4360 */
+ PTR sys_preadv2
+ PTR sys_pwritev2
diff --git a/arch/mips/kernel/scall64-64.S b/arch/mips/kernel/scall64-64.S
index 2b2dc14610d0..e6ede125059f 100644
--- a/arch/mips/kernel/scall64-64.S
+++ b/arch/mips/kernel/scall64-64.S
@@ -82,15 +82,14 @@ n64_syscall_exit:
syscall_trace_entry:
SAVE_STATIC
- move s0, v0
move a0, sp
move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
- move v0, s0
RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
@@ -434,4 +433,6 @@ EXPORT(sys_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 5320 */
+ PTR sys_preadv2
+ PTR sys_pwritev2
.size sys_call_table,.-sys_call_table
diff --git a/arch/mips/kernel/scall64-n32.S b/arch/mips/kernel/scall64-n32.S
index 2bf5c8593d91..51d3988933f8 100644
--- a/arch/mips/kernel/scall64-n32.S
+++ b/arch/mips/kernel/scall64-n32.S
@@ -42,9 +42,6 @@ NESTED(handle_sysn32, PT_SIZE, sp)
#endif
beqz t0, not_n32_scall
- dsll t0, v0, 3 # offset into table
- ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
-
sd a3, PT_R26(sp) # save a3 for syscall restarting
li t1, _TIF_WORK_SYSCALL_ENTRY
@@ -53,6 +50,9 @@ NESTED(handle_sysn32, PT_SIZE, sp)
bnez t0, n32_syscall_trace_entry
syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sysn32_call_table - (__NR_N32_Linux * 8))(t0)
+
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -71,21 +71,25 @@ syscall_common:
n32_syscall_trace_entry:
SAVE_STATIC
- move s0, t2
move a0, sp
move a1, v0
jal syscall_trace_enter
bltz v0, 1f # seccomp failed? Skip syscall
- move t2, s0
RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
ld a3, PT_R7(sp)
ld a4, PT_R8(sp)
ld a5, PT_R9(sp)
+
+ dsubu t2, v0, __NR_N32_Linux # check (new) syscall number
+ sltiu t0, t2, __NR_N32_Linux_syscalls + 1
+ beqz t0, not_n32_scall
+
j syscall_common
1: j syscall_exit
@@ -344,7 +348,7 @@ EXPORT(sysn32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key
PTR sys_request_key
- PTR sys_keyctl /* 6245 */
+ PTR compat_sys_keyctl /* 6245 */
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch
@@ -424,4 +428,6 @@ EXPORT(sysn32_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range
+ PTR compat_sys_preadv2 /* 6325 */
+ PTR compat_sys_pwritev2
.size sysn32_call_table,.-sysn32_call_table
diff --git a/arch/mips/kernel/scall64-o32.S b/arch/mips/kernel/scall64-o32.S
index c5b759e584c7..6efa7136748f 100644
--- a/arch/mips/kernel/scall64-o32.S
+++ b/arch/mips/kernel/scall64-o32.S
@@ -52,9 +52,6 @@ NESTED(handle_sys, PT_SIZE, sp)
sll a2, a2, 0
sll a3, a3, 0
- dsll t0, v0, 3 # offset into table
- ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
-
sd a3, PT_R26(sp) # save a3 for syscall restarting
/*
@@ -88,6 +85,9 @@ loads_done:
bnez t0, trace_a_syscall
syscall_common:
+ dsll t0, v0, 3 # offset into table
+ ld t2, (sys32_call_table - (__NR_O32_Linux * 8))(t0)
+
jalr t2 # Do The Real Thing (TM)
li t0, -EMAXERRNO - 1 # error?
@@ -112,7 +112,6 @@ trace_a_syscall:
sd a6, PT_R10(sp)
sd a7, PT_R11(sp) # For indirect syscalls
- move s0, t2 # Save syscall pointer
move a0, sp
/*
* absolute syscall number is in v0 unless we called syscall(__NR_###)
@@ -133,8 +132,8 @@ trace_a_syscall:
bltz v0, 1f # seccomp failed? Skip syscall
- move t2, s0
RESTORE_STATIC
+ ld v0, PT_R2(sp) # Restore syscall (maybe modified)
ld a0, PT_R4(sp) # Restore argument registers
ld a1, PT_R5(sp)
ld a2, PT_R6(sp)
@@ -143,6 +142,11 @@ trace_a_syscall:
ld a5, PT_R9(sp)
ld a6, PT_R10(sp)
ld a7, PT_R11(sp) # For indirect syscalls
+
+ dsubu t0, v0, __NR_O32_Linux # check (new) syscall number
+ sltiu t0, t0, __NR_O32_Linux_syscalls + 1
+ beqz t0, not_o32_scall
+
j syscall_common
1: j syscall_exit
@@ -500,7 +504,7 @@ EXPORT(sys32_call_table)
PTR sys_ni_syscall /* available, was setaltroot */
PTR sys_add_key /* 4280 */
PTR sys_request_key
- PTR sys_keyctl
+ PTR compat_sys_keyctl
PTR sys_set_thread_area
PTR sys_inotify_init
PTR sys_inotify_add_watch /* 4285 */
@@ -579,4 +583,6 @@ EXPORT(sys32_call_table)
PTR sys_membarrier
PTR sys_mlock2
PTR sys_copy_file_range /* 4360 */
+ PTR compat_sys_preadv2
+ PTR compat_sys_pwritev2
.size sys32_call_table,.-sys32_call_table
diff --git a/arch/mips/kernel/segment.c b/arch/mips/kernel/segment.c
index 87bc74a5a518..2703f218202e 100644
--- a/arch/mips/kernel/segment.c
+++ b/arch/mips/kernel/segment.c
@@ -26,17 +26,20 @@ static void build_segment_config(char *str, unsigned int cfg)
/*
* Access modes MK, MSK and MUSK are mapped segments. Therefore
- * there is no direct physical address mapping.
+ * there is no direct physical address mapping unless it becomes
+ * unmapped uncached at error level due to EU.
*/
- if ((am == 0) || (am > 3)) {
+ if ((am == 0) || (am > 3) || (cfg & MIPS_SEGCFG_EU))
str += sprintf(str, " %03lx",
((cfg & MIPS_SEGCFG_PA) >> MIPS_SEGCFG_PA_SHIFT));
+ else
+ str += sprintf(str, " UND");
+
+ if ((am == 0) || (am > 3))
str += sprintf(str, " %01ld",
((cfg & MIPS_SEGCFG_C) >> MIPS_SEGCFG_C_SHIFT));
- } else {
- str += sprintf(str, " UND");
+ else
str += sprintf(str, " U");
- }
/* Exception configuration. */
str += sprintf(str, " %01ld\n",
diff --git a/arch/mips/kernel/setup.c b/arch/mips/kernel/setup.c
index 4f607341a793..36cf8d65c47d 100644
--- a/arch/mips/kernel/setup.c
+++ b/arch/mips/kernel/setup.c
@@ -26,6 +26,7 @@
#include <linux/sizes.h>
#include <linux/device.h>
#include <linux/dma-contiguous.h>
+#include <linux/decompress/generic.h>
#include <asm/addrspace.h>
#include <asm/bootinfo.h>
@@ -52,13 +53,6 @@ struct screen_info screen_info;
#endif
/*
- * Despite it's name this variable is even if we don't have PCI
- */
-unsigned int PCI_DMA_BUS_IS_PHYS;
-
-EXPORT_SYMBOL(PCI_DMA_BUS_IS_PHYS);
-
-/*
* Setup information
*
* These are initialized so they are in the .data section
@@ -250,6 +244,35 @@ disable:
return 0;
}
+/* In some conditions (e.g. big endian bootloader with a little endian
+ kernel), the initrd might appear byte swapped. Try to detect this and
+ byte swap it if needed. */
+static void __init maybe_bswap_initrd(void)
+{
+#if defined(CONFIG_CPU_CAVIUM_OCTEON)
+ u64 buf;
+
+ /* Check for CPIO signature */
+ if (!memcmp((void *)initrd_start, "070701", 6))
+ return;
+
+ /* Check for compressed initrd */
+ if (decompress_method((unsigned char *)initrd_start, 8, NULL))
+ return;
+
+ /* Try again with a byte swapped header */
+ buf = swab64p((u64 *)initrd_start);
+ if (!memcmp(&buf, "070701", 6) ||
+ decompress_method((unsigned char *)(&buf), 8, NULL)) {
+ unsigned long i;
+
+ pr_info("Byteswapped initrd detected\n");
+ for (i = initrd_start; i < ALIGN(initrd_end, 8); i += 8)
+ swab64s((u64 *)i);
+ }
+#endif
+}
+
static void __init finalize_initrd(void)
{
unsigned long size = initrd_end - initrd_start;
@@ -263,6 +286,8 @@ static void __init finalize_initrd(void)
goto disable;
}
+ maybe_bswap_initrd();
+
reserve_bootmem(__pa(initrd_start), size, BOOTMEM_DEFAULT);
initrd_below_start_ok = 1;
@@ -469,6 +494,29 @@ static void __init bootmem_init(void)
*/
reserve_bootmem(PFN_PHYS(mapstart), bootmap_size, BOOTMEM_DEFAULT);
+#ifdef CONFIG_RELOCATABLE
+ /*
+ * The kernel reserves all memory below its _end symbol as bootmem,
+ * but the kernel may now be at a much higher address. The memory
+ * between the original and new locations may be returned to the system.
+ */
+ if (__pa_symbol(_text) > __pa_symbol(VMLINUX_LOAD_ADDRESS)) {
+ unsigned long offset;
+ extern void show_kernel_relocation(const char *level);
+
+ offset = __pa_symbol(_text) - __pa_symbol(VMLINUX_LOAD_ADDRESS);
+ free_bootmem(__pa_symbol(VMLINUX_LOAD_ADDRESS), offset);
+
+#if defined(CONFIG_DEBUG_KERNEL) && defined(CONFIG_DEBUG_INFO)
+ /*
+ * This information is necessary when debugging the kernel
+ * But is a security vulnerability otherwise!
+ */
+ show_kernel_relocation(KERN_INFO);
+#endif
+ }
+#endif
+
/*
* Reserve initrd memory if needed.
*/
@@ -624,6 +672,8 @@ static void __init request_crashkernel(struct resource *res)
#define USE_PROM_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_BOOTLOADER)
#define USE_DTB_CMDLINE IS_ENABLED(CONFIG_MIPS_CMDLINE_FROM_DTB)
#define EXTEND_WITH_PROM IS_ENABLED(CONFIG_MIPS_CMDLINE_DTB_EXTEND)
+#define BUILTIN_EXTEND_WITH_PROM \
+ IS_ENABLED(CONFIG_MIPS_CMDLINE_BUILTIN_EXTEND)
static void __init arch_mem_init(char **cmdline_p)
{
@@ -657,15 +707,23 @@ static void __init arch_mem_init(char **cmdline_p)
strlcpy(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
if (EXTEND_WITH_PROM && arcs_cmdline[0]) {
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
}
#if defined(CONFIG_CMDLINE_BOOL)
if (builtin_cmdline[0]) {
- strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
strlcat(boot_command_line, builtin_cmdline, COMMAND_LINE_SIZE);
}
+
+ if (BUILTIN_EXTEND_WITH_PROM && arcs_cmdline[0]) {
+ if (boot_command_line[0])
+ strlcat(boot_command_line, " ", COMMAND_LINE_SIZE);
+ strlcat(boot_command_line, arcs_cmdline, COMMAND_LINE_SIZE);
+ }
#endif
#endif
strlcpy(command_line, boot_command_line, COMMAND_LINE_SIZE);
@@ -706,6 +764,9 @@ static void __init arch_mem_init(char **cmdline_p)
for_each_memblock(reserved, reg)
if (reg->size != 0)
reserve_bootmem(reg->base, reg->size, BOOTMEM_DEFAULT);
+
+ reserve_bootmem_region(__pa_symbol(&__nosave_begin),
+ __pa_symbol(&__nosave_end)); /* Reserve for hibernation */
}
static void __init resource_init(void)
@@ -814,6 +875,10 @@ void __init setup_arch(char **cmdline_p)
unsigned long kernelsp[NR_CPUS];
unsigned long fw_arg0, fw_arg1, fw_arg2, fw_arg3;
+#ifdef CONFIG_USE_OF
+unsigned long fw_passed_dtb;
+#endif
+
#ifdef CONFIG_DEBUG_FS
struct dentry *mips_debugfs_dir;
static int __init debugfs_mips(void)
diff --git a/arch/mips/kernel/signal.c b/arch/mips/kernel/signal.c
index bf792e2839a6..9e224469c788 100644
--- a/arch/mips/kernel/signal.c
+++ b/arch/mips/kernel/signal.c
@@ -165,7 +165,7 @@ static int save_msa_extcontext(void __user *buf)
* should already have been done when handling scalar FP
* context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
err = __put_user(read_msa_csr(), &msa->csr);
err |= _save_msa_all_upper(&msa->wr);
@@ -195,6 +195,9 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
unsigned int csr;
int i, err;
+ if (!IS_ENABLED(CONFIG_CPU_HAS_MSA))
+ return SIGSYS;
+
if (size != sizeof(*msa))
return -EINVAL;
@@ -212,7 +215,7 @@ static int restore_msa_extcontext(void __user *buf, unsigned int size)
* scalar FP context, so FPU & MSA should have already been
* disabled whilst handling scalar FP context.
*/
- BUG_ON(config_enabled(CONFIG_EVA));
+ BUG_ON(IS_ENABLED(CONFIG_EVA));
write_msa_csr(csr);
err |= _restore_msa_all_upper(&msa->wr);
@@ -312,7 +315,7 @@ int protected_save_fp_context(void __user *sc)
* EVA does not have userland equivalents of ldc1 or sdc1, so
* save to the kernel FP context & copy that to userland below.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(1);
while (1) {
@@ -375,7 +378,7 @@ int protected_restore_fp_context(void __user *sc)
* disable the FPU here such that the code below simply copies to
* the kernel FP context.
*/
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
lose_fpu(0);
while (1) {
@@ -398,8 +401,8 @@ int protected_restore_fp_context(void __user *sc)
}
fp_done:
- if (used & USED_EXTCONTEXT)
- err |= restore_extcontext(sc_to_extcontext(sc));
+ if (!err && (used & USED_EXTCONTEXT))
+ err = restore_extcontext(sc_to_extcontext(sc));
return err ?: sig;
}
@@ -767,15 +770,15 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
sigset_t *oldset = sigmask_to_save();
int ret;
struct mips_abi *abi = current->thread.abi;
-#ifdef CONFIG_CPU_MICROMIPS
- void *vdso;
- unsigned long tmp = (unsigned long)current->mm->context.vdso;
-
- set_isa16_mode(tmp);
- vdso = (void *)tmp;
-#else
void *vdso = current->mm->context.vdso;
-#endif
+
+ /*
+ * If we were emulating a delay slot instruction, exit that frame such
+ * that addresses in the sigframe are as expected for userland and we
+ * don't have a problem if we reuse the thread's frame for an
+ * instruction within the signal handler.
+ */
+ dsemul_thread_rollback(regs);
if (regs->regs[0]) {
switch(regs->regs[2]) {
@@ -798,7 +801,7 @@ static void handle_signal(struct ksignal *ksig, struct pt_regs *regs)
regs->regs[0] = 0; /* Don't deal with this again. */
}
- if (sig_uses_siginfo(&ksig->ka))
+ if (sig_uses_siginfo(&ksig->ka, abi))
ret = abi->setup_rt_frame(vdso + abi->vdso->off_rt_sigreturn,
ksig, regs, oldset);
else
diff --git a/arch/mips/kernel/signal32.c b/arch/mips/kernel/signal32.c
index 4909639aa35b..97b7c51b8251 100644
--- a/arch/mips/kernel/signal32.c
+++ b/arch/mips/kernel/signal32.c
@@ -6,129 +6,26 @@
* Copyright (C) 1991, 1992 Linus Torvalds
* Copyright (C) 1994 - 2000, 2006 Ralf Baechle
* Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
*/
-#include <linux/cache.h>
-#include <linux/compat.h>
-#include <linux/sched.h>
-#include <linux/mm.h>
-#include <linux/smp.h>
+#include <linux/compiler.h>
+#include <linux/errno.h>
#include <linux/kernel.h>
#include <linux/signal.h>
#include <linux/syscalls.h>
-#include <linux/errno.h>
-#include <linux/wait.h>
-#include <linux/ptrace.h>
-#include <linux/suspend.h>
-#include <linux/compiler.h>
-#include <linux/uaccess.h>
-#include <asm/abi.h>
-#include <asm/asm.h>
+#include <asm/compat.h>
#include <asm/compat-signal.h>
-#include <linux/bitops.h>
-#include <asm/cacheflush.h>
-#include <asm/sim.h>
-#include <asm/ucontext.h>
-#include <asm/fpu.h>
-#include <asm/war.h>
-#include <asm/dsp.h>
+#include <asm/uaccess.h>
+#include <asm/unistd.h>
#include "signal-common.h"
-/*
- * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
- */
-#define __NR_O32_restart_syscall 4253
-
/* 32-bit compatibility types */
typedef unsigned int __sighandler32_t;
typedef void (*vfptr_t)(void);
-struct ucontext32 {
- u32 uc_flags;
- s32 uc_link;
- compat_stack_t uc_stack;
- struct sigcontext32 uc_mcontext;
- compat_sigset_t uc_sigmask; /* mask last for extensibility */
-};
-
-struct sigframe32 {
- u32 sf_ass[4]; /* argument save space for o32 */
- u32 sf_pad[2]; /* Was: signal trampoline */
- struct sigcontext32 sf_sc;
- compat_sigset_t sf_mask;
-};
-
-struct rt_sigframe32 {
- u32 rs_ass[4]; /* argument save space for o32 */
- u32 rs_pad[2]; /* Was: signal trampoline */
- compat_siginfo_t rs_info;
- struct ucontext32 rs_uc;
-};
-
-static int setup_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- int i;
-
- err |= __put_user(regs->cp0_epc, &sc->sc_pc);
-
- err |= __put_user(0, &sc->sc_regs[0]);
- for (i = 1; i < 32; i++)
- err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
-
- err |= __put_user(regs->hi, &sc->sc_mdhi);
- err |= __put_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
- err |= __put_user(mfhi1(), &sc->sc_hi1);
- err |= __put_user(mflo1(), &sc->sc_lo1);
- err |= __put_user(mfhi2(), &sc->sc_hi2);
- err |= __put_user(mflo2(), &sc->sc_lo2);
- err |= __put_user(mfhi3(), &sc->sc_hi3);
- err |= __put_user(mflo3(), &sc->sc_lo3);
- }
-
- /*
- * Save FPU state to signal context. Signal handler
- * will "inherit" current FPU state.
- */
- err |= protected_save_fp_context(sc);
-
- return err;
-}
-
-static int restore_sigcontext32(struct pt_regs *regs,
- struct sigcontext32 __user *sc)
-{
- int err = 0;
- s32 treg;
- int i;
-
- /* Always make any pending restarted system calls return -EINTR */
- current->restart_block.fn = do_no_restart_syscall;
-
- err |= __get_user(regs->cp0_epc, &sc->sc_pc);
- err |= __get_user(regs->hi, &sc->sc_mdhi);
- err |= __get_user(regs->lo, &sc->sc_mdlo);
- if (cpu_has_dsp) {
- err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
- err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
- err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
- err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
- err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
- err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
- err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
- }
-
- for (i = 1; i < 32; i++)
- err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
-
- return err ?: protected_restore_fp_context(sc);
-}
-
/*
* Atomically swap in the new signal mask, and wait for a signal.
*/
@@ -227,6 +124,12 @@ int copy_siginfo_to_user32(compat_siginfo_t __user *to, const siginfo_t *from)
err |= __put_user(from->si_uid, &to->si_uid);
err |= __put_user(from->si_int, &to->si_int);
break;
+ case __SI_SYS >> 16:
+ err |= __copy_to_user(&to->si_call_addr, &from->si_call_addr,
+ sizeof(compat_uptr_t));
+ err |= __put_user(from->si_syscall, &to->si_syscall);
+ err |= __put_user(from->si_arch, &to->si_arch);
+ break;
}
}
return err;
@@ -241,176 +144,3 @@ int copy_siginfo_from_user32(siginfo_t *to, compat_siginfo_t __user *from)
return 0;
}
-
-asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct sigframe32 __user *frame;
- sigset_t blocked;
- int sig;
-
- frame = (struct sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
- goto badframe;
-
- set_current_blocked(&blocked);
-
- sig = restore_sigcontext32(&regs, &frame->sf_sc);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
-{
- struct rt_sigframe32 __user *frame;
- sigset_t set;
- int sig;
-
- frame = (struct rt_sigframe32 __user *) regs.regs[29];
- if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
- goto badframe;
- if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
- goto badframe;
-
- set_current_blocked(&set);
-
- sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
- if (sig < 0)
- goto badframe;
- else if (sig)
- force_sig(sig, current);
-
- if (compat_restore_altstack(&frame->rs_uc.uc_stack))
- goto badframe;
-
- /*
- * Don't let your children do this ...
- */
- __asm__ __volatile__(
- "move\t$29, %0\n\t"
- "j\tsyscall_exit"
- :/* no outputs */
- :"r" (&regs));
- /* Unreached */
-
-badframe:
- force_sig(SIGSEGV, current);
-}
-
-static int setup_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- err |= setup_sigcontext32(regs, &frame->sf_sc);
- err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to struct sigcontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to the
- * struct sigframe.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = 0;
- regs->regs[ 6] = (unsigned long) &frame->sf_sc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
- struct pt_regs *regs, sigset_t *set)
-{
- struct rt_sigframe32 __user *frame;
- int err = 0;
-
- frame = get_sigframe(ksig, regs, sizeof(*frame));
- if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
- return -EFAULT;
-
- /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
- err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
-
- /* Create the ucontext. */
- err |= __put_user(0, &frame->rs_uc.uc_flags);
- err |= __put_user(0, &frame->rs_uc.uc_link);
- err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
- err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
- err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
-
- if (err)
- return -EFAULT;
-
- /*
- * Arguments to signal handler:
- *
- * a0 = signal number
- * a1 = 0 (should be cause)
- * a2 = pointer to ucontext
- *
- * $25 and c0_epc point to the signal handler, $29 points to
- * the struct rt_sigframe32.
- */
- regs->regs[ 4] = ksig->sig;
- regs->regs[ 5] = (unsigned long) &frame->rs_info;
- regs->regs[ 6] = (unsigned long) &frame->rs_uc;
- regs->regs[29] = (unsigned long) frame;
- regs->regs[31] = (unsigned long) sig_return;
- regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
-
- DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
- current->comm, current->pid,
- frame, regs->cp0_epc, regs->regs[31]);
-
- return 0;
-}
-
-/*
- * o32 compatibility on 64-bit kernels, without DSP ASE
- */
-struct mips_abi mips_abi_32 = {
- .setup_frame = setup_frame_32,
- .setup_rt_frame = setup_rt_frame_32,
- .restart = __NR_O32_restart_syscall,
-
- .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
- .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
- .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
-
- .vdso = &vdso_image_o32,
-};
diff --git a/arch/mips/kernel/signal_o32.c b/arch/mips/kernel/signal_o32.c
new file mode 100644
index 000000000000..5e169fc5ca5c
--- /dev/null
+++ b/arch/mips/kernel/signal_o32.c
@@ -0,0 +1,285 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Copyright (C) 1991, 1992 Linus Torvalds
+ * Copyright (C) 1994 - 2000, 2006 Ralf Baechle
+ * Copyright (C) 1999, 2000 Silicon Graphics, Inc.
+ * Copyright (C) 2016, Imagination Technologies Ltd.
+ */
+#include <linux/compiler.h>
+#include <linux/errno.h>
+#include <linux/signal.h>
+#include <linux/uaccess.h>
+
+#include <asm/abi.h>
+#include <asm/compat-signal.h>
+#include <asm/dsp.h>
+#include <asm/sim.h>
+#include <asm/unistd.h>
+
+#include "signal-common.h"
+
+/*
+ * Including <asm/unistd.h> would give use the 64-bit syscall numbers ...
+ */
+#define __NR_O32_restart_syscall 4253
+
+struct sigframe32 {
+ u32 sf_ass[4]; /* argument save space for o32 */
+ u32 sf_pad[2]; /* Was: signal trampoline */
+ struct sigcontext32 sf_sc;
+ compat_sigset_t sf_mask;
+};
+
+struct ucontext32 {
+ u32 uc_flags;
+ s32 uc_link;
+ compat_stack_t uc_stack;
+ struct sigcontext32 uc_mcontext;
+ compat_sigset_t uc_sigmask; /* mask last for extensibility */
+};
+
+struct rt_sigframe32 {
+ u32 rs_ass[4]; /* argument save space for o32 */
+ u32 rs_pad[2]; /* Was: signal trampoline */
+ compat_siginfo_t rs_info;
+ struct ucontext32 rs_uc;
+};
+
+static int setup_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ int i;
+
+ err |= __put_user(regs->cp0_epc, &sc->sc_pc);
+
+ err |= __put_user(0, &sc->sc_regs[0]);
+ for (i = 1; i < 32; i++)
+ err |= __put_user(regs->regs[i], &sc->sc_regs[i]);
+
+ err |= __put_user(regs->hi, &sc->sc_mdhi);
+ err |= __put_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __put_user(rddsp(DSP_MASK), &sc->sc_dsp);
+ err |= __put_user(mfhi1(), &sc->sc_hi1);
+ err |= __put_user(mflo1(), &sc->sc_lo1);
+ err |= __put_user(mfhi2(), &sc->sc_hi2);
+ err |= __put_user(mflo2(), &sc->sc_lo2);
+ err |= __put_user(mfhi3(), &sc->sc_hi3);
+ err |= __put_user(mflo3(), &sc->sc_lo3);
+ }
+
+ /*
+ * Save FPU state to signal context. Signal handler
+ * will "inherit" current FPU state.
+ */
+ err |= protected_save_fp_context(sc);
+
+ return err;
+}
+
+static int restore_sigcontext32(struct pt_regs *regs,
+ struct sigcontext32 __user *sc)
+{
+ int err = 0;
+ s32 treg;
+ int i;
+
+ /* Always make any pending restarted system calls return -EINTR */
+ current->restart_block.fn = do_no_restart_syscall;
+
+ err |= __get_user(regs->cp0_epc, &sc->sc_pc);
+ err |= __get_user(regs->hi, &sc->sc_mdhi);
+ err |= __get_user(regs->lo, &sc->sc_mdlo);
+ if (cpu_has_dsp) {
+ err |= __get_user(treg, &sc->sc_hi1); mthi1(treg);
+ err |= __get_user(treg, &sc->sc_lo1); mtlo1(treg);
+ err |= __get_user(treg, &sc->sc_hi2); mthi2(treg);
+ err |= __get_user(treg, &sc->sc_lo2); mtlo2(treg);
+ err |= __get_user(treg, &sc->sc_hi3); mthi3(treg);
+ err |= __get_user(treg, &sc->sc_lo3); mtlo3(treg);
+ err |= __get_user(treg, &sc->sc_dsp); wrdsp(treg, DSP_MASK);
+ }
+
+ for (i = 1; i < 32; i++)
+ err |= __get_user(regs->regs[i], &sc->sc_regs[i]);
+
+ return err ?: protected_restore_fp_context(sc);
+}
+
+static int setup_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ err |= setup_sigcontext32(regs, &frame->sf_sc);
+ err |= __copy_conv_sigset_to_user(&frame->sf_mask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to struct sigcontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to the
+ * struct sigframe.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = 0;
+ regs->regs[ 6] = (unsigned long) &frame->sf_sc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+asmlinkage void sys32_rt_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct rt_sigframe32 __user *frame;
+ sigset_t set;
+ int sig;
+
+ frame = (struct rt_sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&set, &frame->rs_uc.uc_sigmask))
+ goto badframe;
+
+ set_current_blocked(&set);
+
+ sig = restore_sigcontext32(&regs, &frame->rs_uc.uc_mcontext);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ if (compat_restore_altstack(&frame->rs_uc.uc_stack))
+ goto badframe;
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
+
+static int setup_rt_frame_32(void *sig_return, struct ksignal *ksig,
+ struct pt_regs *regs, sigset_t *set)
+{
+ struct rt_sigframe32 __user *frame;
+ int err = 0;
+
+ frame = get_sigframe(ksig, regs, sizeof(*frame));
+ if (!access_ok(VERIFY_WRITE, frame, sizeof (*frame)))
+ return -EFAULT;
+
+ /* Convert (siginfo_t -> compat_siginfo_t) and copy to user. */
+ err |= copy_siginfo_to_user32(&frame->rs_info, &ksig->info);
+
+ /* Create the ucontext. */
+ err |= __put_user(0, &frame->rs_uc.uc_flags);
+ err |= __put_user(0, &frame->rs_uc.uc_link);
+ err |= __compat_save_altstack(&frame->rs_uc.uc_stack, regs->regs[29]);
+ err |= setup_sigcontext32(regs, &frame->rs_uc.uc_mcontext);
+ err |= __copy_conv_sigset_to_user(&frame->rs_uc.uc_sigmask, set);
+
+ if (err)
+ return -EFAULT;
+
+ /*
+ * Arguments to signal handler:
+ *
+ * a0 = signal number
+ * a1 = 0 (should be cause)
+ * a2 = pointer to ucontext
+ *
+ * $25 and c0_epc point to the signal handler, $29 points to
+ * the struct rt_sigframe32.
+ */
+ regs->regs[ 4] = ksig->sig;
+ regs->regs[ 5] = (unsigned long) &frame->rs_info;
+ regs->regs[ 6] = (unsigned long) &frame->rs_uc;
+ regs->regs[29] = (unsigned long) frame;
+ regs->regs[31] = (unsigned long) sig_return;
+ regs->cp0_epc = regs->regs[25] = (unsigned long) ksig->ka.sa.sa_handler;
+
+ DEBUGP("SIG deliver (%s:%d): sp=0x%p pc=0x%lx ra=0x%lx\n",
+ current->comm, current->pid,
+ frame, regs->cp0_epc, regs->regs[31]);
+
+ return 0;
+}
+
+/*
+ * o32 compatibility on 64-bit kernels, without DSP ASE
+ */
+struct mips_abi mips_abi_32 = {
+ .setup_frame = setup_frame_32,
+ .setup_rt_frame = setup_rt_frame_32,
+ .restart = __NR_O32_restart_syscall,
+
+ .off_sc_fpregs = offsetof(struct sigcontext32, sc_fpregs),
+ .off_sc_fpc_csr = offsetof(struct sigcontext32, sc_fpc_csr),
+ .off_sc_used_math = offsetof(struct sigcontext32, sc_used_math),
+
+ .vdso = &vdso_image_o32,
+};
+
+
+asmlinkage void sys32_sigreturn(nabi_no_regargs struct pt_regs regs)
+{
+ struct sigframe32 __user *frame;
+ sigset_t blocked;
+ int sig;
+
+ frame = (struct sigframe32 __user *) regs.regs[29];
+ if (!access_ok(VERIFY_READ, frame, sizeof(*frame)))
+ goto badframe;
+ if (__copy_conv_sigset_from_user(&blocked, &frame->sf_mask))
+ goto badframe;
+
+ set_current_blocked(&blocked);
+
+ sig = restore_sigcontext32(&regs, &frame->sf_sc);
+ if (sig < 0)
+ goto badframe;
+ else if (sig)
+ force_sig(sig, current);
+
+ /*
+ * Don't let your children do this ...
+ */
+ __asm__ __volatile__(
+ "move\t$29, %0\n\t"
+ "j\tsyscall_exit"
+ :/* no outputs */
+ :"r" (&regs));
+ /* Unreached */
+
+badframe:
+ force_sig(SIGSEGV, current);
+}
diff --git a/arch/mips/kernel/smp-bmips.c b/arch/mips/kernel/smp-bmips.c
index 78cf8c2f1de0..6d0f1321e084 100644
--- a/arch/mips/kernel/smp-bmips.c
+++ b/arch/mips/kernel/smp-bmips.c
@@ -243,6 +243,7 @@ static void bmips_init_secondary(void)
break;
case CPU_BMIPS5000:
write_c0_brcm_action(ACTION_CLR_IPI(smp_processor_id(), 0));
+ current_cpu_data.core = (read_c0_brcm_config() >> 25) & 3;
break;
}
}
@@ -362,6 +363,7 @@ static int bmips_cpu_disable(void)
pr_info("SMP: CPU%d is offline\n", cpu);
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
clear_c0_status(IE_IRQ5);
@@ -565,3 +567,90 @@ asmlinkage void __weak plat_wired_tlb_setup(void)
* once the wired entries are present.
*/
}
+
+void __init bmips_cpu_setup(void)
+{
+ void __iomem __maybe_unused *cbr = BMIPS_GET_CBR();
+ u32 __maybe_unused cfg;
+
+ switch (current_cpu_type()) {
+ case CPU_BMIPS3300:
+ /* Set BIU to async mode */
+ set_c0_brcm_bus_pll(BIT(22));
+ __sync();
+
+ /* put the BIU back in sync mode */
+ clear_c0_brcm_bus_pll(BIT(22));
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_reset(BIT(16));
+
+ /* Flush and enable RAC */
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0x100, BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_CONFIG);
+ __raw_writel(cfg | 0xf, BMIPS_RAC_CONFIG);
+ __raw_readl(cbr + BMIPS_RAC_CONFIG);
+
+ cfg = __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_writel(cfg | 0x0fff0000, cbr + BMIPS_RAC_ADDRESS_RANGE);
+ __raw_readl(cbr + BMIPS_RAC_ADDRESS_RANGE);
+ break;
+
+ case CPU_BMIPS4380:
+ /* CBG workaround for early BMIPS4380 CPUs */
+ switch (read_c0_prid()) {
+ case 0x2a040:
+ case 0x2a042:
+ case 0x2a044:
+ case 0x2a060:
+ cfg = __raw_readl(cbr + BMIPS_L2_CONFIG);
+ __raw_writel(cfg & ~0x07000000, cbr + BMIPS_L2_CONFIG);
+ __raw_readl(cbr + BMIPS_L2_CONFIG);
+ }
+
+ /* clear BHTD to enable branch history table */
+ clear_c0_brcm_config_0(BIT(21));
+
+ /* XI/ROTR enable */
+ set_c0_brcm_config_0(BIT(23));
+ set_c0_brcm_cmt_ctrl(BIT(15));
+ break;
+
+ case CPU_BMIPS5000:
+ /* enable RDHWR, BRDHWR */
+ set_c0_brcm_config(BIT(17) | BIT(21));
+
+ /* Disable JTB */
+ __asm__ __volatile__(
+ " .set noreorder\n"
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .word 0x4008b008\n" /* mfc0 t0, $22, 8 */
+ " li $9, 0x00008000\n"
+ " or $8, $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 t0, $22, 8 */
+ " sync\n"
+ " li $8, 0x0\n"
+ " .word 0x4088b00f\n" /* mtc0 t0, $22, 15 */
+ " .set reorder\n"
+ : : : "$8", "$9");
+
+ /* XI enable */
+ set_c0_brcm_config(BIT(27));
+
+ /* enable MIPS32R2 ROR instruction for XI TLB handlers */
+ __asm__ __volatile__(
+ " li $8, 0x5a455048\n"
+ " .word 0x4088b00f\n" /* mtc0 $8, $22, 15 */
+ " nop; nop; nop\n"
+ " .word 0x4008b008\n" /* mfc0 $8, $22, 8 */
+ " lui $9, 0x0100\n"
+ " or $8, $9\n"
+ " .word 0x4088b008\n" /* mtc0 $8, $22, 8 */
+ : : : "$8", "$9");
+ break;
+ }
+}
diff --git a/arch/mips/kernel/smp-cps.c b/arch/mips/kernel/smp-cps.c
index 253e1409338c..e9d9fc6c754c 100644
--- a/arch/mips/kernel/smp-cps.c
+++ b/arch/mips/kernel/smp-cps.c
@@ -27,15 +27,27 @@
#include <asm/time.h>
#include <asm/uasm.h>
+static bool threads_disabled;
static DECLARE_BITMAP(core_power, NR_CPUS);
struct core_boot_config *mips_cps_core_bootcfg;
+static int __init setup_nothreads(char *s)
+{
+ threads_disabled = true;
+ return 0;
+}
+early_param("nothreads", setup_nothreads);
+
static unsigned core_vpe_count(unsigned core)
{
unsigned cfg;
- if (!config_enabled(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ if (threads_disabled)
+ return 1;
+
+ if ((!IS_ENABLED(CONFIG_MIPS_MT_SMP) || !cpu_has_mipsmt)
+ && (!IS_ENABLED(CONFIG_CPU_MIPSR6) || !cpu_has_vp))
return 1;
mips_cm_lock_other(core, 0);
@@ -47,11 +59,12 @@ static unsigned core_vpe_count(unsigned core)
static void __init cps_smp_setup(void)
{
unsigned int ncores, nvpes, core_vpes;
+ unsigned long core_entry;
int c, v;
/* Detect & record VPE topology */
ncores = mips_cm_numcores();
- pr_info("VPE topology ");
+ pr_info("%s topology ", cpu_has_mips_r6 ? "VP" : "VPE");
for (c = nvpes = 0; c < ncores; c++) {
core_vpes = core_vpe_count(c);
pr_cont("%c%u", c ? ',' : '{', core_vpes);
@@ -62,7 +75,7 @@ static void __init cps_smp_setup(void)
for (v = 0; v < min_t(int, core_vpes, NR_CPUS - nvpes); v++) {
cpu_data[nvpes + v].core = c;
-#ifdef CONFIG_MIPS_MT_SMP
+#if defined(CONFIG_MIPS_MT_SMP) || defined(CONFIG_CPU_MIPSR6)
cpu_data[nvpes + v].vpe_id = v;
#endif
}
@@ -91,6 +104,11 @@ static void __init cps_smp_setup(void)
/* Make core 0 coherent with everything */
write_gcr_cl_coherence(0xff);
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_bev_base(core_entry);
+ }
+
#ifdef CONFIG_MIPS_MT_FPAFF
/* If we have an FPU, enroll ourselves in the FPU-full mask */
if (cpu_has_fpu)
@@ -188,7 +206,7 @@ err_out:
}
}
-static void boot_core(unsigned core)
+static void boot_core(unsigned int core, unsigned int vpe_id)
{
u32 access, stat, seq_state;
unsigned timeout;
@@ -213,6 +231,19 @@ static void boot_core(unsigned core)
if (mips_cpc_present()) {
/* Reset the core */
mips_cpc_lock_other(core);
+
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ /* Run only the requested VP following the reset */
+ write_cpc_co_vp_stop(0xf);
+ write_cpc_co_vp_run(1 << vpe_id);
+
+ /*
+ * Ensure that the VP_RUN register is written before the
+ * core leaves reset.
+ */
+ wmb();
+ }
+
write_cpc_co_cmd(CPC_Cx_CMD_RESET);
timeout = 100;
@@ -250,7 +281,10 @@ static void boot_core(unsigned core)
static void remote_vpe_boot(void *dummy)
{
- mips_cps_boot_vpes();
+ unsigned core = current_cpu_data.core;
+ struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
+
+ mips_cps_boot_vpes(core_cfg, cpu_vpe_id(&current_cpu_data));
}
static void cps_boot_secondary(int cpu, struct task_struct *idle)
@@ -259,6 +293,7 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
unsigned vpe_id = cpu_vpe_id(&cpu_data[cpu]);
struct core_boot_config *core_cfg = &mips_cps_core_bootcfg[core];
struct vpe_boot_config *vpe_cfg = &core_cfg->vpe_config[vpe_id];
+ unsigned long core_entry;
unsigned int remote;
int err;
@@ -272,10 +307,17 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
if (!test_bit(core, core_power)) {
/* Boot a VPE on a powered down core */
- boot_core(core);
+ boot_core(core, vpe_id);
goto out;
}
+ if (cpu_has_vp) {
+ mips_cm_lock_other(core, vpe_id);
+ core_entry = CKSEG1ADDR((unsigned long)mips_cps_core_entry);
+ write_gcr_co_reset_base(core_entry);
+ mips_cm_unlock_other();
+ }
+
if (core != current_cpu_data.core) {
/* Boot a VPE on another powered up core */
for (remote = 0; remote < NR_CPUS; remote++) {
@@ -293,10 +335,10 @@ static void cps_boot_secondary(int cpu, struct task_struct *idle)
goto out;
}
- BUG_ON(!cpu_has_mipsmt);
+ BUG_ON(!cpu_has_mipsmt && !cpu_has_vp);
/* Boot a VPE on this core */
- mips_cps_boot_vpes();
+ mips_cps_boot_vpes(core_cfg, vpe_id);
out:
preempt_enable();
}
@@ -307,8 +349,23 @@ static void cps_init_secondary(void)
if (cpu_has_mipsmt)
dmt();
- change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 | STATUSF_IP4 |
- STATUSF_IP5 | STATUSF_IP6 | STATUSF_IP7);
+ if (mips_cm_revision() >= CM_REV_CM3) {
+ unsigned ident = gic_read_local_vp_id();
+
+ /*
+ * Ensure that our calculation of the VP ID matches up with
+ * what the GIC reports, otherwise we'll have configured
+ * interrupts incorrectly.
+ */
+ BUG_ON(ident != mips_cm_vp_id(smp_processor_id()));
+ }
+
+ if (cpu_has_veic)
+ clear_c0_status(ST0_IM);
+ else
+ change_c0_status(ST0_IM, STATUSF_IP2 | STATUSF_IP3 |
+ STATUSF_IP4 | STATUSF_IP5 |
+ STATUSF_IP6 | STATUSF_IP7);
}
static void cps_smp_finish(void)
@@ -341,6 +398,7 @@ static int cps_cpu_disable(void)
atomic_sub(1 << cpu_vpe_id(&current_cpu_data), &core_cfg->vpe_mask);
smp_mb__after_atomic();
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
return 0;
@@ -355,14 +413,16 @@ static enum {
void play_dead(void)
{
- unsigned cpu, core;
+ unsigned int cpu, core, vpe_id;
local_irq_disable();
idle_task_exit();
cpu = smp_processor_id();
cpu_death = CPU_DEATH_POWER;
- if (cpu_has_mipsmt) {
+ pr_debug("CPU%d going offline\n", cpu);
+
+ if (cpu_has_mipsmt || cpu_has_vp) {
core = cpu_data[cpu].core;
/* Look for another online VPE within the core */
@@ -383,10 +443,21 @@ void play_dead(void)
complete(&cpu_death_chosen);
if (cpu_death == CPU_DEATH_HALT) {
- /* Halt this TC */
- write_c0_tchalt(TCHALT_H);
- instruction_hazard();
+ vpe_id = cpu_vpe_id(&cpu_data[cpu]);
+
+ pr_debug("Halting core %d VP%d\n", core, vpe_id);
+ if (cpu_has_mipsmt) {
+ /* Halt this TC */
+ write_c0_tchalt(TCHALT_H);
+ instruction_hazard();
+ } else if (cpu_has_vp) {
+ write_cpc_cl_vp_stop(1 << vpe_id);
+
+ /* Ensure that the VP_STOP register is written */
+ wmb();
+ }
} else {
+ pr_debug("Gating power to core %d\n", core);
/* Power down the core */
cps_pm_enter_state(CPS_PM_POWER_GATED);
}
@@ -413,6 +484,7 @@ static void wait_for_sibling_halt(void *ptr_cpu)
static void cps_cpu_die(unsigned int cpu)
{
unsigned core = cpu_data[cpu].core;
+ unsigned int vpe_id = cpu_vpe_id(&cpu_data[cpu]);
unsigned stat;
int err;
@@ -441,10 +513,12 @@ static void cps_cpu_die(unsigned int cpu)
* in which case the CPC will refuse to power down the core.
*/
do {
+ mips_cm_lock_other(core, vpe_id);
mips_cpc_lock_other(core);
stat = read_cpc_co_stat_conf();
stat &= CPC_Cx_STAT_CONF_SEQSTATE_MSK;
mips_cpc_unlock_other();
+ mips_cm_unlock_other();
} while (stat != CPC_Cx_STAT_CONF_SEQSTATE_D0 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_D2 &&
stat != CPC_Cx_STAT_CONF_SEQSTATE_U2);
@@ -461,6 +535,12 @@ static void cps_cpu_die(unsigned int cpu)
(void *)(unsigned long)cpu, 1);
if (err)
panic("Failed to call remote sibling CPU\n");
+ } else if (cpu_has_vp) {
+ do {
+ mips_cm_lock_other(core, vpe_id);
+ stat = read_cpc_co_vp_running();
+ mips_cm_unlock_other();
+ } while (stat & (1 << vpe_id));
}
}
diff --git a/arch/mips/kernel/smp.c b/arch/mips/kernel/smp.c
index 37708d9af638..f95f094f36e4 100644
--- a/arch/mips/kernel/smp.c
+++ b/arch/mips/kernel/smp.c
@@ -72,7 +72,7 @@ EXPORT_SYMBOL(cpu_core_map);
* A logcal cpu mask containing only one VPE per core to
* reduce the number of IPIs on large MT systems.
*/
-cpumask_t cpu_foreign_map __read_mostly;
+cpumask_t cpu_foreign_map[NR_CPUS] __read_mostly;
EXPORT_SYMBOL(cpu_foreign_map);
/* representing cpus for which sibling maps can be computed */
@@ -124,7 +124,7 @@ static inline void set_cpu_core_map(int cpu)
* Calculate a new cpu_foreign_map mask whenever a
* new cpu appears or disappears.
*/
-static inline void calculate_cpu_foreign_map(void)
+void calculate_cpu_foreign_map(void)
{
int i, k, core_present;
cpumask_t temp_foreign_map;
@@ -141,7 +141,9 @@ static inline void calculate_cpu_foreign_map(void)
cpumask_set_cpu(i, &temp_foreign_map);
}
- cpumask_copy(&cpu_foreign_map, &temp_foreign_map);
+ for_each_online_cpu(i)
+ cpumask_andnot(&cpu_foreign_map[i],
+ &temp_foreign_map, &cpu_sibling_map[i]);
}
struct plat_smp_ops *mp_ops;
@@ -254,7 +256,17 @@ static int __init mips_smp_ipi_init(void)
if (node && !ipidomain)
ipidomain = irq_find_matching_host(NULL, DOMAIN_BUS_IPI);
- BUG_ON(!ipidomain);
+ /*
+ * There are systems which only use IPI domains some of the time,
+ * depending upon configuration we don't know until runtime. An
+ * example is Malta where we may compile in support for GIC & the
+ * MT ASE, but run on a system which has multiple VPEs in a single
+ * core and doesn't include a GIC. Until all IPI implementations
+ * have been converted to use IPI domains the best we can do here
+ * is to return & hope some other code sets up the IPIs.
+ */
+ if (!ipidomain)
+ return 0;
call_virq = irq_reserve_ipi(ipidomain, cpu_possible_mask);
BUG_ON(!call_virq);
@@ -334,16 +346,9 @@ asmlinkage void start_secondary(void)
static void stop_this_cpu(void *dummy)
{
/*
- * Remove this CPU. Be a bit slow here and
- * set the bits for every online CPU so we don't miss
- * any IPI whilst taking this VPE down.
+ * Remove this CPU:
*/
- cpumask_copy(&cpu_foreign_map, cpu_online_mask);
-
- /* Make it visible to every other CPU */
- smp_mb();
-
set_cpu_online(smp_processor_id(), false);
calculate_cpu_foreign_map();
local_irq_disable();
@@ -502,10 +507,17 @@ void flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned l
smp_on_other_tlbs(flush_tlb_range_ipi, &fd);
} else {
unsigned int cpu;
+ int exec = vma->vm_flags & VM_EXEC;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_range() will only fully flush icache if
+ * the VMA is executable, otherwise we must invalidate
+ * ASID without it appearing to has_valid_asid() as if
+ * mm has been completely unused by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, mm))
- cpu_context(cpu, mm) = 0;
+ cpu_context(cpu, mm) = !exec;
}
}
local_flush_tlb_range(vma, start, end);
@@ -550,8 +562,14 @@ void flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
unsigned int cpu;
for_each_online_cpu(cpu) {
+ /*
+ * flush_cache_page() only does partial flushes, so
+ * invalidate ASID without it appearing to
+ * has_valid_asid() as if mm has been completely unused
+ * by that CPU.
+ */
if (cpu != smp_processor_id() && cpu_context(cpu, vma->vm_mm))
- cpu_context(cpu, vma->vm_mm) = 0;
+ cpu_context(cpu, vma->vm_mm) = 1;
}
}
local_flush_tlb_page(vma, page);
diff --git a/arch/mips/kernel/spram.c b/arch/mips/kernel/spram.c
index 8489c88f9932..d6e6cf75114d 100644
--- a/arch/mips/kernel/spram.c
+++ b/arch/mips/kernel/spram.c
@@ -210,6 +210,7 @@ void spram_config(void)
case CPU_P5600:
case CPU_QEMU_GENERIC:
case CPU_I6400:
+ case CPU_P6600:
config0 = read_c0_config();
/* FIXME: addresses are Malta specific */
if (config0 & (1<<24)) {
diff --git a/arch/mips/kernel/traps.c b/arch/mips/kernel/traps.c
index bf14da9f3e33..3de85be2486a 100644
--- a/arch/mips/kernel/traps.c
+++ b/arch/mips/kernel/traps.c
@@ -56,6 +56,7 @@
#include <asm/pgtable.h>
#include <asm/ptrace.h>
#include <asm/sections.h>
+#include <asm/siginfo.h>
#include <asm/tlbdebug.h>
#include <asm/traps.h>
#include <asm/uaccess.h>
@@ -144,7 +145,7 @@ static void show_backtrace(struct task_struct *task, const struct pt_regs *regs)
if (!task)
task = current;
- if (raw_show_trace || !__kernel_text_address(pc)) {
+ if (raw_show_trace || user_mode(regs) || !__kernel_text_address(pc)) {
show_raw_backtrace(sp);
return;
}
@@ -398,11 +399,8 @@ void __noreturn die(const char *str, struct pt_regs *regs)
if (in_interrupt())
panic("Fatal exception in interrupt");
- if (panic_on_oops) {
- printk(KERN_EMERG "Fatal exception: panic in 5 seconds");
- ssleep(5);
+ if (panic_on_oops)
panic("Fatal exception");
- }
if (regs && kexec_should_crash(current))
crash_kexec(regs);
@@ -621,17 +619,17 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
perf_sw_event(PERF_COUNT_SW_EMULATION_FAULTS,
1, regs, 0);
switch (rd) {
- case 0: /* CPU number */
+ case MIPS_HWR_CPUNUM: /* CPU number */
regs->regs[rt] = smp_processor_id();
return 0;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
regs->regs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
return 0;
- case 2: /* Read count register */
+ case MIPS_HWR_CC: /* Read count register */
regs->regs[rt] = read_c0_count();
return 0;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_type()) {
case CPU_20KC:
case CPU_25KF:
@@ -641,7 +639,7 @@ static int simulate_rdhwr(struct pt_regs *regs, int rd, int rt)
regs->regs[rt] = 2;
}
return 0;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
regs->regs[rt] = ti->tp_value;
return 0;
default:
@@ -706,6 +704,7 @@ asmlinkage void do_ov(struct pt_regs *regs)
int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
{
struct siginfo si = { 0 };
+ struct vm_area_struct *vma;
switch (sig) {
case 0:
@@ -746,7 +745,8 @@ int process_fpemu_return(int sig, void __user *fault_addr, unsigned long fcr31)
si.si_addr = fault_addr;
si.si_signo = sig;
down_read(&current->mm->mmap_sem);
- if (find_vma(current->mm, (unsigned long)fault_addr))
+ vma = find_vma(current->mm, (unsigned long)fault_addr);
+ if (vma && (vma->vm_start <= (unsigned long)fault_addr))
si.si_code = SEGV_ACCERR;
else
si.si_code = SEGV_MAPERR;
@@ -871,7 +871,7 @@ out:
exception_exit(prev_state);
}
-void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
+void do_trap_or_bp(struct pt_regs *regs, unsigned int code, int si_code,
const char *str)
{
siginfo_t info = { 0 };
@@ -928,7 +928,13 @@ void do_trap_or_bp(struct pt_regs *regs, unsigned int code,
default:
scnprintf(b, sizeof(b), "%s instruction in kernel code", str);
die_if_kernel(b, regs);
- force_sig(SIGTRAP, current);
+ if (si_code) {
+ info.si_signo = SIGTRAP;
+ info.si_code = si_code;
+ force_sig_info(SIGTRAP, &info, current);
+ } else {
+ force_sig(SIGTRAP, current);
+ }
}
}
@@ -1012,7 +1018,7 @@ asmlinkage void do_bp(struct pt_regs *regs)
break;
}
- do_trap_or_bp(regs, bcode, "Break");
+ do_trap_or_bp(regs, bcode, TRAP_BRKPT, "Break");
out:
set_fs(seg);
@@ -1054,7 +1060,7 @@ asmlinkage void do_tr(struct pt_regs *regs)
tcode = (opcode >> 6) & ((1 << 10) - 1);
}
- do_trap_or_bp(regs, tcode, "Trap");
+ do_trap_or_bp(regs, tcode, 0, "Trap");
out:
set_fs(seg);
@@ -1115,19 +1121,7 @@ no_r2_instr:
if (unlikely(compute_return_epc(regs) < 0))
goto out;
- if (get_isa16_mode(regs->cp0_epc)) {
- unsigned short mmop[2] = { 0 };
-
- if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
- status = SIGSEGV;
- if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
- status = SIGSEGV;
- opcode = mmop[0];
- opcode = (opcode << 16) | mmop[1];
-
- if (status < 0)
- status = simulate_rdhwr_mm(regs, opcode);
- } else {
+ if (!get_isa16_mode(regs->cp0_epc)) {
if (unlikely(get_user(opcode, epc) < 0))
status = SIGSEGV;
@@ -1142,6 +1136,18 @@ no_r2_instr:
if (status < 0)
status = simulate_fp(regs, opcode, old_epc, old31);
+ } else if (cpu_has_mmips) {
+ unsigned short mmop[2] = { 0 };
+
+ if (unlikely(get_user(mmop[0], (u16 __user *)epc + 0) < 0))
+ status = SIGSEGV;
+ if (unlikely(get_user(mmop[1], (u16 __user *)epc + 1) < 0))
+ status = SIGSEGV;
+ opcode = mmop[0];
+ opcode = (opcode << 16) | mmop[1];
+
+ if (status < 0)
+ status = simulate_rdhwr_mm(regs, opcode);
}
if (status < 0)
@@ -1242,7 +1248,7 @@ static int enable_restore_fp_context(int msa)
err = init_fpu();
if (msa && !err) {
enable_msa();
- _init_msa_upper();
+ init_msa_upper();
set_thread_flag(TIF_USEDMSA);
set_thread_flag(TIF_MSA_CTX_LIVE);
}
@@ -1305,7 +1311,7 @@ static int enable_restore_fp_context(int msa)
*/
prior_msa = test_and_set_thread_flag(TIF_MSA_CTX_LIVE);
if (!prior_msa && was_fpu_owner) {
- _init_msa_upper();
+ init_msa_upper();
goto out;
}
@@ -1322,7 +1328,7 @@ static int enable_restore_fp_context(int msa)
* of each vector register such that it cannot see data left
* behind by another task.
*/
- _init_msa_upper();
+ init_msa_upper();
} else {
/* We need to restore the vector context. */
restore_msa(current);
@@ -1349,7 +1355,6 @@ asmlinkage void do_cpu(struct pt_regs *regs)
unsigned long fcr31;
unsigned int cpid;
int status, err;
- unsigned long __maybe_unused flags;
int sig;
prev_state = exception_enter();
@@ -1492,17 +1497,15 @@ asmlinkage void do_mdmx(struct pt_regs *regs)
*/
asmlinkage void do_watch(struct pt_regs *regs)
{
+ siginfo_t info = { .si_signo = SIGTRAP, .si_code = TRAP_HWBKPT };
enum ctx_state prev_state;
- u32 cause;
prev_state = exception_enter();
/*
* Clear WP (bit 22) bit of cause register so we don't loop
* forever.
*/
- cause = read_c0_cause();
- cause &= ~(1 << 22);
- write_c0_cause(cause);
+ clear_c0_cause(CAUSEF_WP);
/*
* If the current thread has the watch registers loaded, save
@@ -1512,7 +1515,7 @@ asmlinkage void do_watch(struct pt_regs *regs)
if (test_tsk_thread_flag(current, TIF_LOAD_WATCH)) {
mips_read_watch_registers();
local_irq_enable();
- force_sig(SIGTRAP, current);
+ force_sig_info(SIGTRAP, &info, current);
} else {
mips_clear_watch_registers();
local_irq_enable();
@@ -1639,6 +1642,7 @@ static inline void parity_protection_init(void)
case CPU_P5600:
case CPU_QEMU_GENERIC:
case CPU_I6400:
+ case CPU_P6600:
{
#define ERRCTL_PE 0x80000000
#define ERRCTL_L2P 0x00800000
@@ -1769,7 +1773,8 @@ asmlinkage void do_ftlb(void)
/* For the moment, report the problem and hang. */
if ((cpu_has_mips_r2_r6) &&
- ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS)) {
+ (((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_MIPS) ||
+ ((current_cpu_data.processor_id & 0xff0000) == PRID_COMP_LOONGSON))) {
pr_err("FTLB error exception, cp0_ecc=0x%08x:\n",
read_c0_ecc());
pr_err("cp0_errorepc == %0*lx\n", field, read_c0_errorepc());
@@ -1856,6 +1861,7 @@ void __noreturn nmi_exception_handler(struct pt_regs *regs)
#define VECTORSPACING 0x100 /* for EI/VI mode */
unsigned long ebase;
+EXPORT_SYMBOL_GPL(ebase);
unsigned long exception_handlers[32];
unsigned long vi_handlers[64];
@@ -2060,16 +2066,22 @@ static void configure_status(void)
status_set);
}
+unsigned int hwrena;
+EXPORT_SYMBOL_GPL(hwrena);
+
/* configure HWRENA register */
static void configure_hwrena(void)
{
- unsigned int hwrena = cpu_hwrena_impl_bits;
+ hwrena = cpu_hwrena_impl_bits;
if (cpu_has_mips_r2_r6)
- hwrena |= 0x0000000f;
+ hwrena |= MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
if (!noulri && cpu_has_userlocal)
- hwrena |= (1 << 29);
+ hwrena |= MIPS_HWRENA_ULR;
if (hwrena)
write_c0_hwrena(hwrena);
@@ -2111,6 +2123,13 @@ void per_cpu_trap_init(bool is_boot_cpu)
* o read IntCtl.IPFDC to determine the fast debug channel interrupt
*/
if (cpu_has_mips_r2_r6) {
+ /*
+ * We shouldn't trust a secondary core has a sane EBASE register
+ * so use the one calculated by the boot CPU.
+ */
+ if (!is_boot_cpu)
+ write_c0_ebase(ebase);
+
cp0_compare_irq_shift = CAUSEB_TI - CAUSEB_IP;
cp0_compare_irq = (read_c0_intctl() >> INTCTLB_IPTI) & 7;
cp0_perfcount_irq = (read_c0_intctl() >> INTCTLB_IPPCI) & 7;
@@ -2126,7 +2145,7 @@ void per_cpu_trap_init(bool is_boot_cpu)
}
if (!cpu_data[cpu].asid_cache)
- cpu_data[cpu].asid_cache = ASID_FIRST_VERSION;
+ cpu_data[cpu].asid_cache = asid_first_version(cpu);
atomic_inc(&init_mm.mm_count);
current->active_mm = &init_mm;
@@ -2214,7 +2233,7 @@ void __init trap_init(void)
/*
* Copy the generic exception handlers to their final destination.
- * This will be overriden later as suitable for a particular
+ * This will be overridden later as suitable for a particular
* configuration.
*/
set_handler(0x180, &except_vec3_generic, 0x80);
diff --git a/arch/mips/kernel/unaligned.c b/arch/mips/kernel/unaligned.c
index 490cea569d57..f1c308dbbc4a 100644
--- a/arch/mips/kernel/unaligned.c
+++ b/arch/mips/kernel/unaligned.c
@@ -885,7 +885,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
{
union mips_instruction insn;
unsigned long value;
- unsigned int res;
+ unsigned int res, preempted;
unsigned long origpc;
unsigned long orig31;
void __user *fault_addr = NULL;
@@ -1025,7 +1025,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHW(addr, value, res);
else
@@ -1044,7 +1044,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 4))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadW(addr, value, res);
else
@@ -1063,7 +1063,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, 2))
goto sigbus;
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
LoadHWU(addr, value, res);
else
@@ -1131,7 +1131,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreHW(addr, value, res);
else
@@ -1151,7 +1151,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
compute_return_epc(regs);
value = regs->regs[insn.i_format.rt];
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
if (segment_eq(get_fs(), get_ds()))
StoreW(addr, value, res);
else
@@ -1191,6 +1191,7 @@ static void emulate_load_store_insn(struct pt_regs *regs,
case ldc1_op:
case swc1_op:
case sdc1_op:
+ case cop1x_op:
die_if_kernel("Unaligned FP access in kernel code", regs);
BUG_ON(!used_math());
@@ -1226,27 +1227,36 @@ static void emulate_load_store_insn(struct pt_regs *regs,
if (!access_ok(VERIFY_READ, addr, sizeof(*fpr)))
goto sigbus;
- /*
- * Disable preemption to avoid a race between copying
- * state from userland, migrating to another CPU and
- * updating the hardware vector register below.
- */
- preempt_disable();
-
- res = __copy_from_user_inatomic(fpr, addr,
- sizeof(*fpr));
- if (res)
- goto fault;
-
- /*
- * Update the hardware register if it is in use by the
- * task in this quantum, in order to avoid having to
- * save & restore the whole vector context.
- */
- if (test_thread_flag(TIF_USEDMSA))
- write_msa_wr(wd, fpr, df);
+ do {
+ /*
+ * If we have live MSA context keep track of
+ * whether we get preempted in order to avoid
+ * the register context we load being clobbered
+ * by the live context as it's saved during
+ * preemption. If we don't have live context
+ * then it can't be saved to clobber the value
+ * we load.
+ */
+ preempted = test_thread_flag(TIF_USEDMSA);
+
+ res = __copy_from_user_inatomic(fpr, addr,
+ sizeof(*fpr));
+ if (res)
+ goto fault;
- preempt_enable();
+ /*
+ * Update the hardware register if it is in use
+ * by the task in this quantum, in order to
+ * avoid having to save & restore the whole
+ * vector context.
+ */
+ preempt_disable();
+ if (test_thread_flag(TIF_USEDMSA)) {
+ write_msa_wr(wd, fpr, df);
+ preempted = 0;
+ }
+ preempt_enable();
+ } while (preempted);
break;
case msa_st_op:
diff --git a/arch/mips/kernel/vdso.c b/arch/mips/kernel/vdso.c
index 975e99759bab..9abe447a4b48 100644
--- a/arch/mips/kernel/vdso.c
+++ b/arch/mips/kernel/vdso.c
@@ -104,7 +104,18 @@ int arch_setup_additional_pages(struct linux_binprm *bprm, int uses_interp)
struct resource gic_res;
int ret;
- down_write(&mm->mmap_sem);
+ if (down_write_killable(&mm->mmap_sem))
+ return -EINTR;
+
+ /* Map delay slot emulation page */
+ base = mmap_region(NULL, STACK_TOP, PAGE_SIZE,
+ VM_READ|VM_WRITE|VM_EXEC|
+ VM_MAYREAD|VM_MAYWRITE|VM_MAYEXEC,
+ 0);
+ if (IS_ERR_VALUE(base)) {
+ ret = base;
+ goto out;
+ }
/*
* Determine total area size. This includes the VDSO data itself, the
diff --git a/arch/mips/kernel/vmlinux.lds.S b/arch/mips/kernel/vmlinux.lds.S
index 54d653ee17e1..a82c178d0bb9 100644
--- a/arch/mips/kernel/vmlinux.lds.S
+++ b/arch/mips/kernel/vmlinux.lds.S
@@ -136,6 +136,27 @@ SECTIONS
#ifdef CONFIG_SMP
PERCPU_SECTION(1 << CONFIG_MIPS_L1_CACHE_SHIFT)
#endif
+
+#ifdef CONFIG_RELOCATABLE
+ . = ALIGN(4);
+
+ .data.reloc : {
+ _relocation_start = .;
+ /*
+ * Space for relocation table
+ * This needs to be filled so that the
+ * relocs tool can overwrite the content.
+ * An invalid value is left at the start of the
+ * section to abort relocation if the table
+ * has not been filled in.
+ */
+ LONG(0xFFFFFFFF);
+ FILL(0);
+ . += CONFIG_RELOCATION_TABLE_SIZE - 4;
+ _relocation_end = .;
+ }
+#endif
+
#ifdef CONFIG_MIPS_RAW_APPENDED_DTB
__appended_dtb = .;
/* leave space for appended DTB */
diff --git a/arch/mips/kernel/watch.c b/arch/mips/kernel/watch.c
index 2a03abb5bd2c..19fcab7348b1 100644
--- a/arch/mips/kernel/watch.c
+++ b/arch/mips/kernel/watch.c
@@ -15,10 +15,9 @@
* Install the watch registers for the current thread. A maximum of
* four registers are installed although the machine may have more.
*/
-void mips_install_watch_registers(void)
+void mips_install_watch_registers(struct task_struct *t)
{
- struct mips3264_watch_reg_state *watches =
- &current->thread.watch.mips3264;
+ struct mips3264_watch_reg_state *watches = &t->thread.watch.mips3264;
switch (current_cpu_data.watch_reg_use_cnt) {
default:
BUG();
@@ -26,16 +25,20 @@ void mips_install_watch_registers(void)
write_c0_watchlo3(watches->watchlo[3]);
/* Write 1 to the I, R, and W bits to clear them, and
1 to G so all ASIDs are trapped. */
- write_c0_watchhi3(0x40000007 | watches->watchhi[3]);
+ write_c0_watchhi3(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+ watches->watchhi[3]);
case 3:
write_c0_watchlo2(watches->watchlo[2]);
- write_c0_watchhi2(0x40000007 | watches->watchhi[2]);
+ write_c0_watchhi2(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+ watches->watchhi[2]);
case 2:
write_c0_watchlo1(watches->watchlo[1]);
- write_c0_watchhi1(0x40000007 | watches->watchhi[1]);
+ write_c0_watchhi1(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+ watches->watchhi[1]);
case 1:
write_c0_watchlo0(watches->watchlo[0]);
- write_c0_watchhi0(0x40000007 | watches->watchhi[0]);
+ write_c0_watchhi0(MIPS_WATCHHI_G | MIPS_WATCHHI_IRW |
+ watches->watchhi[0]);
}
}
@@ -52,22 +55,26 @@ void mips_read_watch_registers(void)
default:
BUG();
case 4:
- watches->watchhi[3] = (read_c0_watchhi3() & 0x0fff);
+ watches->watchhi[3] = (read_c0_watchhi3() &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
case 3:
- watches->watchhi[2] = (read_c0_watchhi2() & 0x0fff);
+ watches->watchhi[2] = (read_c0_watchhi2() &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
case 2:
- watches->watchhi[1] = (read_c0_watchhi1() & 0x0fff);
+ watches->watchhi[1] = (read_c0_watchhi1() &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
case 1:
- watches->watchhi[0] = (read_c0_watchhi0() & 0x0fff);
+ watches->watchhi[0] = (read_c0_watchhi0() &
+ (MIPS_WATCHHI_MASK | MIPS_WATCHHI_IRW));
}
if (current_cpu_data.watch_reg_use_cnt == 1 &&
- (watches->watchhi[0] & 7) == 0) {
+ (watches->watchhi[0] & MIPS_WATCHHI_IRW) == 0) {
/* Pathological case of release 1 architecture that
* doesn't set the condition bits. We assume that
* since we got here, the watch condition was met and
* signal that the conditions requested in watchlo
* were met. */
- watches->watchhi[0] |= (watches->watchlo[0] & 7);
+ watches->watchhi[0] |= (watches->watchlo[0] & MIPS_WATCHHI_IRW);
}
}
@@ -110,86 +117,86 @@ void mips_probe_watch_registers(struct cpuinfo_mips *c)
* Check which of the I,R and W bits are supported, then
* disable the register.
*/
- write_c0_watchlo0(7);
+ write_c0_watchlo0(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo0();
write_c0_watchlo0(0);
- c->watch_reg_masks[0] = t & 7;
+ c->watch_reg_masks[0] = t & MIPS_WATCHLO_IRW;
/* Write the mask bits and read them back to determine which
* can be used. */
c->watch_reg_count = 1;
c->watch_reg_use_cnt = 1;
t = read_c0_watchhi0();
- write_c0_watchhi0(t | 0xff8);
+ write_c0_watchhi0(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi0();
- c->watch_reg_masks[0] |= (t & 0xff8);
- if ((t & 0x80000000) == 0)
+ c->watch_reg_masks[0] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
- write_c0_watchlo1(7);
+ write_c0_watchlo1(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo1();
write_c0_watchlo1(0);
- c->watch_reg_masks[1] = t & 7;
+ c->watch_reg_masks[1] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 2;
c->watch_reg_use_cnt = 2;
t = read_c0_watchhi1();
- write_c0_watchhi1(t | 0xff8);
+ write_c0_watchhi1(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi1();
- c->watch_reg_masks[1] |= (t & 0xff8);
- if ((t & 0x80000000) == 0)
+ c->watch_reg_masks[1] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
- write_c0_watchlo2(7);
+ write_c0_watchlo2(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo2();
write_c0_watchlo2(0);
- c->watch_reg_masks[2] = t & 7;
+ c->watch_reg_masks[2] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 3;
c->watch_reg_use_cnt = 3;
t = read_c0_watchhi2();
- write_c0_watchhi2(t | 0xff8);
+ write_c0_watchhi2(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi2();
- c->watch_reg_masks[2] |= (t & 0xff8);
- if ((t & 0x80000000) == 0)
+ c->watch_reg_masks[2] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
- write_c0_watchlo3(7);
+ write_c0_watchlo3(MIPS_WATCHLO_IRW);
back_to_back_c0_hazard();
t = read_c0_watchlo3();
write_c0_watchlo3(0);
- c->watch_reg_masks[3] = t & 7;
+ c->watch_reg_masks[3] = t & MIPS_WATCHLO_IRW;
c->watch_reg_count = 4;
c->watch_reg_use_cnt = 4;
t = read_c0_watchhi3();
- write_c0_watchhi3(t | 0xff8);
+ write_c0_watchhi3(t | MIPS_WATCHHI_MASK);
back_to_back_c0_hazard();
t = read_c0_watchhi3();
- c->watch_reg_masks[3] |= (t & 0xff8);
- if ((t & 0x80000000) == 0)
+ c->watch_reg_masks[3] |= (t & MIPS_WATCHHI_MASK);
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
/* We use at most 4, but probe and report up to 8. */
c->watch_reg_count = 5;
t = read_c0_watchhi4();
- if ((t & 0x80000000) == 0)
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 6;
t = read_c0_watchhi5();
- if ((t & 0x80000000) == 0)
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 7;
t = read_c0_watchhi6();
- if ((t & 0x80000000) == 0)
+ if ((t & MIPS_WATCHHI_M) == 0)
return;
c->watch_reg_count = 8;
diff --git a/arch/mips/kvm/Kconfig b/arch/mips/kvm/Kconfig
index 2ae12825529f..7c56d6b124d1 100644
--- a/arch/mips/kvm/Kconfig
+++ b/arch/mips/kvm/Kconfig
@@ -17,6 +17,7 @@ if VIRTUALIZATION
config KVM
tristate "Kernel-based Virtual Machine (KVM) support"
depends on HAVE_KVM
+ select EXPORT_UASM
select PREEMPT_NOTIFIERS
select ANON_INODES
select KVM_MMIO
diff --git a/arch/mips/kvm/Makefile b/arch/mips/kvm/Makefile
index 637ebbebd549..847429de780d 100644
--- a/arch/mips/kvm/Makefile
+++ b/arch/mips/kvm/Makefile
@@ -7,9 +7,10 @@ EXTRA_CFLAGS += -Ivirt/kvm -Iarch/mips/kvm
common-objs-$(CONFIG_CPU_HAS_MSA) += msa.o
-kvm-objs := $(common-objs-y) mips.o emulate.o locore.o \
+kvm-objs := $(common-objs-y) mips.o emulate.o entry.o \
interrupt.o stats.o commpage.o \
dyntrans.o trap_emul.o fpu.o
+kvm-objs += mmu.o
obj-$(CONFIG_KVM) += kvm.o
obj-y += callback.o tlb.o
diff --git a/arch/mips/kvm/commpage.c b/arch/mips/kvm/commpage.c
index 2d6e976d1add..a36b77e1705c 100644
--- a/arch/mips/kvm/commpage.c
+++ b/arch/mips/kvm/commpage.c
@@ -4,7 +4,7 @@
* for more details.
*
* commpage, currently used for Virtual COP0 registers.
- * Mapped into the guest kernel @ 0x0.
+ * Mapped into the guest kernel @ KVM_GUEST_COMMPAGE_ADDR.
*
* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
* Authors: Sanjay Lal <sanjayl@kymasys.com>
diff --git a/arch/mips/kvm/dyntrans.c b/arch/mips/kvm/dyntrans.c
index f1527a465c1b..d280894915ed 100644
--- a/arch/mips/kvm/dyntrans.c
+++ b/arch/mips/kvm/dyntrans.c
@@ -11,6 +11,7 @@
#include <linux/errno.h>
#include <linux/err.h>
+#include <linux/highmem.h>
#include <linux/kvm_host.h>
#include <linux/module.h>
#include <linux/vmalloc.h>
@@ -20,125 +21,114 @@
#include "commpage.h"
-#define SYNCI_TEMPLATE 0x041f0000
-#define SYNCI_BASE(x) (((x) >> 21) & 0x1f)
-#define SYNCI_OFFSET ((x) & 0xffff)
+/**
+ * kvm_mips_trans_replace() - Replace trapping instruction in guest memory.
+ * @vcpu: Virtual CPU.
+ * @opc: PC of instruction to replace.
+ * @replace: Instruction to write
+ */
+static int kvm_mips_trans_replace(struct kvm_vcpu *vcpu, u32 *opc,
+ union mips_instruction replace)
+{
+ unsigned long paddr, flags;
+ void *vaddr;
+
+ if (KVM_GUEST_KSEGX((unsigned long)opc) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
+ (unsigned long)opc);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ memcpy(vaddr, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)vaddr,
+ (unsigned long)vaddr + 32);
+ kunmap_atomic(vaddr);
+ } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ memcpy((void *)opc, (void *)&replace, sizeof(u32));
+ local_flush_icache_range((unsigned long)opc,
+ (unsigned long)opc + 32);
+ local_irq_restore(flags);
+ } else {
+ kvm_err("%s: Invalid address: %p\n", __func__, opc);
+ return -EFAULT;
+ }
-#define LW_TEMPLATE 0x8c000000
-#define CLEAR_TEMPLATE 0x00000020
-#define SW_TEMPLATE 0xac000000
+ return 0;
+}
-int kvm_mips_trans_cache_index(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_index(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = 0x0;
+ union mips_instruction nop_inst = { 0 };
/* Replace the CACHE instruction, with a NOP */
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ return kvm_mips_trans_replace(vcpu, opc, nop_inst);
}
/*
* Address based CACHE instructions are transformed into synci(s). A little
* heavy for just D-cache invalidates, but avoids an expensive trap
*/
-int kvm_mips_trans_cache_va(uint32_t inst, uint32_t *opc,
+int kvm_mips_trans_cache_va(union mips_instruction inst, u32 *opc,
struct kvm_vcpu *vcpu)
{
- int result = 0;
- unsigned long kseg0_opc;
- uint32_t synci_inst = SYNCI_TEMPLATE, base, offset;
-
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- synci_inst |= (base << 21);
- synci_inst |= offset;
-
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&synci_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
-
- return result;
+ union mips_instruction synci_inst = { 0 };
+
+ synci_inst.i_format.opcode = bcond_op;
+ synci_inst.i_format.rs = inst.i_format.rs;
+ synci_inst.i_format.rt = synci_op;
+ if (cpu_has_mips_r6)
+ synci_inst.i_format.simmediate = inst.spec3_format.simmediate;
+ else
+ synci_inst.i_format.simmediate = inst.i_format.simmediate;
+
+ return kvm_mips_trans_replace(vcpu, opc, synci_inst);
}
-int kvm_mips_trans_mfc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mfc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mfc0_inst;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
+ union mips_instruction mfc0_inst = { 0 };
+ u32 rd, sel;
- if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
- mfc0_inst = CLEAR_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- } else {
- mfc0_inst = LW_TEMPLATE;
- mfc0_inst |= ((rt & 0x1f) << 16);
- mfc0_inst |= offsetof(struct kvm_mips_commpage,
- cop0.reg[rd][sel]);
- }
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mfc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
+ if (rd == MIPS_CP0_ERRCTL && sel == 0) {
+ mfc0_inst.r_format.opcode = spec_op;
+ mfc0_inst.r_format.rd = inst.c0r_format.rt;
+ mfc0_inst.r_format.func = add_op;
} else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
+ mfc0_inst.i_format.opcode = lw_op;
+ mfc0_inst.i_format.rt = inst.c0r_format.rt;
+ mfc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mfc0_inst.i_format.simmediate |= 4;
+#endif
}
- return 0;
+ return kvm_mips_trans_replace(vcpu, opc, mfc0_inst);
}
-int kvm_mips_trans_mtc0(uint32_t inst, uint32_t *opc, struct kvm_vcpu *vcpu)
+int kvm_mips_trans_mtc0(union mips_instruction inst, u32 *opc,
+ struct kvm_vcpu *vcpu)
{
- int32_t rt, rd, sel;
- uint32_t mtc0_inst = SW_TEMPLATE;
- unsigned long kseg0_opc, flags;
-
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
-
- mtc0_inst |= ((rt & 0x1f) << 16);
- mtc0_inst |= offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
-
- if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- kseg0_opc =
- CKSEG0ADDR(kvm_mips_translate_guest_kseg0_to_hpa
- (vcpu, (unsigned long) opc));
- memcpy((void *)kseg0_opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range(kseg0_opc, kseg0_opc + 32);
- } else if (KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- memcpy((void *)opc, (void *)&mtc0_inst, sizeof(uint32_t));
- local_flush_icache_range((unsigned long)opc,
- (unsigned long)opc + 32);
- local_irq_restore(flags);
- } else {
- kvm_err("%s: Invalid address: %p\n", __func__, opc);
- return -EFAULT;
- }
-
- return 0;
+ union mips_instruction mtc0_inst = { 0 };
+ u32 rd, sel;
+
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ mtc0_inst.i_format.opcode = sw_op;
+ mtc0_inst.i_format.rt = inst.c0r_format.rt;
+ mtc0_inst.i_format.simmediate = KVM_GUEST_COMMPAGE_ADDR |
+ offsetof(struct kvm_mips_commpage, cop0.reg[rd][sel]);
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ if (sizeof(vcpu->arch.cop0->reg[0][0]) == 8)
+ mtc0_inst.i_format.simmediate |= 4;
+#endif
+
+ return kvm_mips_trans_replace(vcpu, opc, mtc0_inst);
}
diff --git a/arch/mips/kvm/emulate.c b/arch/mips/kvm/emulate.c
index b37954cc880d..6eb52b9c9818 100644
--- a/arch/mips/kvm/emulate.c
+++ b/arch/mips/kvm/emulate.c
@@ -52,7 +52,7 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
goto unaligned;
/* Read the instruction */
- insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
+ insn.word = kvm_get_inst((u32 *) epc, vcpu);
if (insn.word == KVM_INVALID_INST)
return KVM_INVALID_INST;
@@ -161,9 +161,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case blez_op: /* not really i_format */
- case blezl_op:
- /* rt field assumed to be zero */
+ case blez_op: /* POP06 */
+#ifndef CONFIG_CPU_MIPSR6
+ case blezl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] <= 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -171,9 +174,12 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
nextpc = epc;
break;
- case bgtz_op:
- case bgtzl_op:
- /* rt field assumed to be zero */
+ case bgtz_op: /* POP07 */
+#ifndef CONFIG_CPU_MIPSR6
+ case bgtzl_op: /* removed in R6 */
+#endif
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
if ((long)arch->gprs[insn.i_format.rs] > 0)
epc = epc + 4 + (insn.i_format.simmediate << 2);
else
@@ -185,6 +191,40 @@ unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
case cop1_op:
kvm_err("%s: unsupported cop1_op\n", __func__);
break;
+
+#ifdef CONFIG_CPU_MIPSR6
+ /* R6 added the following compact branches with forbidden slots */
+ case blezl_op: /* POP26 */
+ case bgtzl_op: /* POP27 */
+ /* only rt == 0 isn't compact branch */
+ if (insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop10_op:
+ case pop30_op:
+ /* only rs == rt == 0 is reserved, rest are compact branches */
+ if (insn.i_format.rs != 0 || insn.i_format.rt != 0)
+ goto compact_branch;
+ break;
+ case pop66_op:
+ case pop76_op:
+ /* only rs == 0 isn't compact branch */
+ if (insn.i_format.rs != 0)
+ goto compact_branch;
+ break;
+compact_branch:
+ /*
+ * If we've hit an exception on the forbidden slot, then
+ * the branch must not have been taken.
+ */
+ epc += 8;
+ nextpc = epc;
+ break;
+#else
+compact_branch:
+ /* Compact branches not supported before R6 */
+ break;
+#endif
}
return nextpc;
@@ -198,7 +238,7 @@ sigill:
return nextpc;
}
-enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
+enum emulation_result update_pc(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long branch_pc;
enum emulation_result er = EMULATE_DONE;
@@ -243,7 +283,7 @@ static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
*
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
-static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
{
s64 now_ns, periods;
u64 delta;
@@ -300,14 +340,33 @@ static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
*
* Returns: The current value of the guest CP0_Count register.
*/
-static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
+static u32 kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
{
- ktime_t expires;
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ ktime_t expires, threshold;
+ u32 count, compare;
int running;
- /* Is the hrtimer pending? */
+ /* Calculate the biased and scaled guest CP0_Count */
+ count = vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ compare = kvm_read_c0_guest_compare(cop0);
+
+ /*
+ * Find whether CP0_Count has reached the closest timer interrupt. If
+ * not, we shouldn't inject it.
+ */
+ if ((s32)(count - compare) < 0)
+ return count;
+
+ /*
+ * The CP0_Count we're going to return has already reached the closest
+ * timer interrupt. Quickly check if it really is a new interrupt by
+ * looking at whether the interval until the hrtimer expiry time is
+ * less than 1/4 of the timer period.
+ */
expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
- if (ktime_compare(now, expires) >= 0) {
+ threshold = ktime_add_ns(now, vcpu->arch.count_period / 4);
+ if (ktime_before(expires, threshold)) {
/*
* Cancel it while we handle it so there's no chance of
* interference with the timeout handler.
@@ -329,8 +388,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
}
}
- /* Return the biased and scaled guest CP0_Count */
- return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
+ return count;
}
/**
@@ -342,7 +400,7 @@ static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
*
* Returns: The current guest CP0_Count value.
*/
-uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
+u32 kvm_mips_read_count(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -369,8 +427,7 @@ uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
*
* Returns: The ktime at the point of freeze.
*/
-static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
- uint32_t *count)
+static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu, u32 *count)
{
ktime_t now;
@@ -401,16 +458,16 @@ static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
* Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
*/
static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
- ktime_t now, uint32_t count)
+ ktime_t now, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t compare;
+ u32 compare;
u64 delta;
ktime_t expire;
/* Calculate timeout (wrap 0 to 2^32) */
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
expire = ktime_add_ns(now, delta);
@@ -420,39 +477,13 @@ static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
}
/**
- * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
- * @vcpu: Virtual CPU.
- *
- * Recalculates and updates the expiry time of the hrtimer. This can be used
- * after timer parameters have been altered which do not depend on the time that
- * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
- * kvm_mips_resume_hrtimer() are used directly).
- *
- * It is guaranteed that no timer interrupts will be lost in the process.
- *
- * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
- */
-static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
-{
- ktime_t now;
- uint32_t count;
-
- /*
- * freeze_hrtimer takes care of a timer interrupts <= count, and
- * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
- */
- now = kvm_mips_freeze_hrtimer(vcpu, &count);
- kvm_mips_resume_hrtimer(vcpu, now, count);
-}
-
-/**
* kvm_mips_write_count() - Modify the count and update timer.
* @vcpu: Virtual CPU.
* @count: Guest CP0_Count value to set.
*
* Sets the CP0_Count value and updates the timer accordingly.
*/
-void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
+void kvm_mips_write_count(struct kvm_vcpu *vcpu, u32 count)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
ktime_t now;
@@ -540,23 +571,42 @@ int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
* kvm_mips_write_compare() - Modify compare and update timer.
* @vcpu: Virtual CPU.
* @compare: New CP0_Compare value.
+ * @ack: Whether to acknowledge timer interrupt.
*
* Update CP0_Compare to a new value and update the timeout.
+ * If @ack, atomically acknowledge any pending timer interrupt, otherwise ensure
+ * any pending timer interrupt is preserved.
*/
-void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
+void kvm_mips_write_compare(struct kvm_vcpu *vcpu, u32 compare, bool ack)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
+ int dc;
+ u32 old_compare = kvm_read_c0_guest_compare(cop0);
+ ktime_t now;
+ u32 count;
/* if unchanged, must just be an ack */
- if (kvm_read_c0_guest_compare(cop0) == compare)
+ if (old_compare == compare) {
+ if (!ack)
+ return;
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
+ kvm_write_c0_guest_compare(cop0, compare);
return;
+ }
+
+ /* freeze_hrtimer() takes care of timer interrupts <= count */
+ dc = kvm_mips_count_disabled(vcpu);
+ if (!dc)
+ now = kvm_mips_freeze_hrtimer(vcpu, &count);
+
+ if (ack)
+ kvm_mips_callbacks->dequeue_timer_int(vcpu);
- /* Update compare */
kvm_write_c0_guest_compare(cop0, compare);
- /* Update timeout if count enabled */
- if (!kvm_mips_count_disabled(vcpu))
- kvm_mips_update_hrtimer(vcpu);
+ /* resume_hrtimer() takes care of timer interrupts > count */
+ if (!dc)
+ kvm_mips_resume_hrtimer(vcpu, now, count);
}
/**
@@ -574,7 +624,7 @@ void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
ktime_t now;
/* Stop hrtimer */
@@ -621,7 +671,7 @@ void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t count;
+ u32 count;
kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
@@ -650,7 +700,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
s64 changed = count_ctl ^ vcpu->arch.count_ctl;
s64 delta;
ktime_t expire, now;
- uint32_t count, compare;
+ u32 count, compare;
/* Only allow defined bits to be changed */
if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
@@ -676,7 +726,7 @@ int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
*/
count = kvm_read_c0_guest_count(cop0);
compare = kvm_read_c0_guest_compare(cop0);
- delta = (u64)(uint32_t)(compare - count - 1) + 1;
+ delta = (u64)(u32)(compare - count - 1) + 1;
delta = div_u64(delta * NSEC_PER_SEC,
vcpu->arch.count_hz);
expire = ktime_add_ns(vcpu->arch.count_resume, delta);
@@ -765,7 +815,7 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
vcpu->arch.pending_exceptions);
++vcpu->stat.wait_exits;
- trace_kvm_exit(vcpu, WAIT_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_WAIT);
if (!vcpu->arch.pending_exceptions) {
vcpu->arch.wait = 1;
kvm_vcpu_block(vcpu);
@@ -790,9 +840,9 @@ enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
- kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
+ kvm_err("[%#lx] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
return EMULATE_FAIL;
}
@@ -802,11 +852,11 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
struct mips_coproc *cop0 = vcpu->arch.cop0;
int index = kvm_read_c0_guest_index(cop0);
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
kvm_debug("%s: illegal index: %d\n", __func__, index);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -823,10 +873,10 @@ enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0),
@@ -840,7 +890,7 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_mips_tlb *tlb = NULL;
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index;
get_random_bytes(&index, sizeof(index));
@@ -856,10 +906,10 @@ enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
- tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
- tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
+ tlb->tlb_lo[0] = kvm_read_c0_guest_entrylo0(cop0);
+ tlb->tlb_lo[1] = kvm_read_c0_guest_entrylo1(cop0);
- kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
+ kvm_debug("[%#lx] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
pc, index, kvm_read_c0_guest_entryhi(cop0),
kvm_read_c0_guest_entrylo0(cop0),
kvm_read_c0_guest_entrylo1(cop0));
@@ -871,14 +921,14 @@ enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
long entryhi = kvm_read_c0_guest_entryhi(cop0);
- uint32_t pc = vcpu->arch.pc;
+ unsigned long pc = vcpu->arch.pc;
int index = -1;
index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
kvm_write_c0_guest_index(cop0, index);
- kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
+ kvm_debug("[%#lx] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
index);
return EMULATE_DONE;
@@ -911,8 +961,8 @@ unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
*/
unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
{
- /* Config4 is optional */
- unsigned int mask = MIPS_CONF_M;
+ /* Config4 and ULRI are optional */
+ unsigned int mask = MIPS_CONF_M | MIPS_CONF3_ULRI;
/* Permit MSA to be present if MSA is supported */
if (kvm_mips_guest_can_have_msa(&vcpu->arch))
@@ -931,7 +981,12 @@ unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
{
/* Config5 is optional */
- return MIPS_CONF_M;
+ unsigned int mask = MIPS_CONF_M;
+
+ /* KScrExist */
+ mask |= (unsigned int)vcpu->arch.kscratch_enabled << 16;
+
+ return mask;
}
/**
@@ -962,14 +1017,14 @@ unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
return mask;
}
-enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
- uint32_t cause, struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_CP0(union mips_instruction inst,
+ u32 *opc, u32 cause,
+ struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t rt, rd, copz, sel, co_bit, op;
- uint32_t pc = vcpu->arch.pc;
+ u32 rt, rd, sel;
unsigned long curr_pc;
/*
@@ -981,16 +1036,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- copz = (inst >> 21) & 0x1f;
- rt = (inst >> 16) & 0x1f;
- rd = (inst >> 11) & 0x1f;
- sel = inst & 0x7;
- co_bit = (inst >> 25) & 1;
-
- if (co_bit) {
- op = (inst) & 0xff;
-
- switch (op) {
+ if (inst.co_format.co) {
+ switch (inst.co_format.func) {
case tlbr_op: /* Read indexed TLB entry */
er = kvm_mips_emul_tlbr(vcpu);
break;
@@ -1009,47 +1056,58 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case eret_op:
er = kvm_mips_emul_eret(vcpu);
goto dont_update_pc;
- break;
case wait_op:
er = kvm_mips_emul_wait(vcpu);
break;
}
} else {
- switch (copz) {
+ rt = inst.c0r_format.rt;
+ rd = inst.c0r_format.rd;
+ sel = inst.c0r_format.sel;
+
+ switch (inst.c0r_format.rs) {
case mfc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
/* Get reg */
if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
- vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
+ vcpu->arch.gprs[rt] =
+ (s32)kvm_mips_read_count(vcpu);
} else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
vcpu->arch.gprs[rt] = 0x0;
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
} else {
- vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+ vcpu->arch.gprs[rt] = (s32)cop0->reg[rd][sel];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mfc0(inst, opc, vcpu);
#endif
}
- kvm_debug
- ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
- pc, rd, sel, rt, vcpu->arch.gprs[rt]);
-
+ trace_kvm_hwr(vcpu, KVM_TRACE_MFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case dmfc_op:
vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMFC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
break;
case mtc_op:
#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
cop0->stat[rd][sel]++;
#endif
+ trace_kvm_hwr(vcpu, KVM_TRACE_MTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
+
if ((rd == MIPS_CP0_TLB_INDEX)
&& (vcpu->arch.gprs[rt] >=
KVM_MIPS_GUEST_TLB_SIZE)) {
@@ -1067,16 +1125,15 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
kvm_read_c0_guest_ebase(cop0));
} else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
- uint32_t nasid =
- vcpu->arch.gprs[rt] & ASID_MASK;
+ u32 nasid =
+ vcpu->arch.gprs[rt] & KVM_ENTRYHI_ASID;
if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
((kvm_read_c0_guest_entryhi(cop0) &
- ASID_MASK) != nasid)) {
- kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
+ KVM_ENTRYHI_ASID) != nasid)) {
+ trace_kvm_asid_change(vcpu,
kvm_read_c0_guest_entryhi(cop0)
- & ASID_MASK,
- vcpu->arch.gprs[rt]
- & ASID_MASK);
+ & KVM_ENTRYHI_ASID,
+ nasid);
/* Blow away the shadow host TLBs */
kvm_mips_flush_host_tlb(1);
@@ -1089,15 +1146,11 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
goto done;
} else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
- kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
- pc, kvm_read_c0_guest_compare(cop0),
- vcpu->arch.gprs[rt]);
-
/* If we are writing to COMPARE */
/* Clear pending timer interrupt, if any */
- kvm_mips_callbacks->dequeue_timer_int(vcpu);
kvm_mips_write_compare(vcpu,
- vcpu->arch.gprs[rt]);
+ vcpu->arch.gprs[rt],
+ true);
} else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
unsigned int old_val, val, change;
@@ -1144,7 +1197,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* it first.
*/
if (change & ST0_CU1 && !(val & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1155,7 +1208,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* the near future.
*/
if (change & ST0_CU1 &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_status(ST0_CU1, val);
preempt_enable();
@@ -1190,7 +1243,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* context is already loaded.
*/
if (change & MIPS_CONF5_FRE &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
change_c0_config5(MIPS_CONF5_FRE, val);
/*
@@ -1200,7 +1253,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
* quickly enabled again in the near future.
*/
if (change & MIPS_CONF5_MSAEN &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
change_c0_config5(MIPS_CONF5_MSAEN,
val);
@@ -1208,7 +1261,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
kvm_write_c0_guest_config5(cop0, val);
} else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
- uint32_t old_cause, new_cause;
+ u32 old_cause, new_cause;
old_cause = kvm_read_c0_guest_cause(cop0);
new_cause = vcpu->arch.gprs[rt];
@@ -1222,20 +1275,30 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
else
kvm_mips_count_enable_cause(vcpu);
}
+ } else if ((rd == MIPS_CP0_HWRENA) && (sel == 0)) {
+ u32 mask = MIPS_HWRENA_CPUNUM |
+ MIPS_HWRENA_SYNCISTEP |
+ MIPS_HWRENA_CC |
+ MIPS_HWRENA_CCRES;
+
+ if (kvm_read_c0_guest_config3(cop0) &
+ MIPS_CONF3_ULRI)
+ mask |= MIPS_HWRENA_ULR;
+ cop0->reg[rd][sel] = vcpu->arch.gprs[rt] & mask;
} else {
cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
#ifdef CONFIG_KVM_MIPS_DYN_TRANS
kvm_mips_trans_mtc0(inst, opc, vcpu);
#endif
}
-
- kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
- rd, sel, cop0->reg[rd][sel]);
break;
case dmtc_op:
kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
vcpu->arch.pc, rt, rd, sel);
+ trace_kvm_hwr(vcpu, KVM_TRACE_DMTC0,
+ KVM_TRACE_COP0(rd, sel),
+ vcpu->arch.gprs[rt]);
er = EMULATE_FAIL;
break;
@@ -1247,7 +1310,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
vcpu->arch.gprs[rt] =
kvm_read_c0_guest_status(cop0);
/* EI */
- if (inst & 0x20) {
+ if (inst.mfmc0_format.sc) {
kvm_debug("[%#lx] mfmc0_op: EI\n",
vcpu->arch.pc);
kvm_set_c0_guest_status(cop0, ST0_IE);
@@ -1261,9 +1324,8 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
case wrpgpr_op:
{
- uint32_t css =
- cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
- uint32_t pss =
+ u32 css = cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
+ u32 pss =
(cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
/*
* We don't support any shadow register sets, so
@@ -1280,7 +1342,7 @@ enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
break;
default:
kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
- vcpu->arch.pc, copz);
+ vcpu->arch.pc, inst.c0r_format.rs);
er = EMULATE_FAIL;
break;
}
@@ -1301,13 +1363,14 @@ dont_update_pc:
return er;
}
-enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
+enum emulation_result kvm_mips_emulate_store(union mips_instruction inst,
+ u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 rt;
+ u32 bytes;
void *data = run->mmio.data;
unsigned long curr_pc;
@@ -1320,12 +1383,9 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
if (er == EMULATE_FAIL)
return er;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
- switch (op) {
+ switch (inst.i_format.opcode) {
case sb_op:
bytes = 1;
if (bytes > sizeof(run->mmio.data)) {
@@ -1346,7 +1406,7 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
*(u8 *) data = vcpu->arch.gprs[rt];
kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
- *(uint8_t *) data);
+ *(u8 *) data);
break;
@@ -1368,11 +1428,11 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint32_t *) data = vcpu->arch.gprs[rt];
+ *(u32 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
case sh_op:
@@ -1393,15 +1453,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
run->mmio.is_write = 1;
vcpu->mmio_needed = 1;
vcpu->mmio_is_write = 1;
- *(uint16_t *) data = vcpu->arch.gprs[rt];
+ *(u16 *) data = vcpu->arch.gprs[rt];
kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
- vcpu->arch.gprs[rt], *(uint32_t *) data);
+ vcpu->arch.gprs[rt], *(u32 *) data);
break;
default:
- kvm_err("Store not yet supported");
+ kvm_err("Store not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1413,18 +1474,16 @@ enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
return er;
}
-enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
- struct kvm_run *run,
+enum emulation_result kvm_mips_emulate_load(union mips_instruction inst,
+ u32 cause, struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DO_MMIO;
- int32_t op, base, rt, offset;
- uint32_t bytes;
+ u32 op, rt;
+ u32 bytes;
- rt = (inst >> 16) & 0x1f;
- base = (inst >> 21) & 0x1f;
- offset = inst & 0xffff;
- op = (inst >> 26) & 0x3f;
+ rt = inst.i_format.rt;
+ op = inst.i_format.opcode;
vcpu->arch.pending_load_cause = cause;
vcpu->arch.io_gpr = rt;
@@ -1510,7 +1569,8 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
break;
default:
- kvm_err("Load not yet supported");
+ kvm_err("Load not yet supported (inst=0x%08x)\n",
+ inst.word);
er = EMULATE_FAIL;
break;
}
@@ -1518,40 +1578,15 @@ enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
return er;
}
-int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
-{
- unsigned long offset = (va & ~PAGE_MASK);
- struct kvm *kvm = vcpu->kvm;
- unsigned long pa;
- gfn_t gfn;
- kvm_pfn_t pfn;
-
- gfn = va >> PAGE_SHIFT;
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- return -1;
- }
- pfn = kvm->arch.guest_pmap[gfn];
- pa = (pfn << PAGE_SHIFT) | offset;
-
- kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
- CKSEG0ADDR(pa));
-
- local_flush_icache_range(CKSEG0ADDR(pa), 32);
- return 0;
-}
-
-enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
- uint32_t cause,
+enum emulation_result kvm_mips_emulate_cache(union mips_instruction inst,
+ u32 *opc, u32 cause,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
enum emulation_result er = EMULATE_DONE;
- int32_t offset, cache, op_inst, op, base;
+ u32 cache, op_inst, op, base;
+ s16 offset;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long va;
unsigned long curr_pc;
@@ -1565,9 +1600,12 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
if (er == EMULATE_FAIL)
return er;
- base = (inst >> 21) & 0x1f;
- op_inst = (inst >> 16) & 0x1f;
- offset = (int16_t)inst;
+ base = inst.i_format.rs;
+ op_inst = inst.i_format.rt;
+ if (cpu_has_mips_r6)
+ offset = inst.spec3_format.simmediate;
+ else
+ offset = inst.i_format.simmediate;
cache = op_inst & CacheOp_Cache;
op = op_inst & CacheOp_Op;
@@ -1620,11 +1658,11 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
*/
index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
(kvm_read_c0_guest_entryhi
- (cop0) & ASID_MASK));
+ (cop0) & KVM_ENTRYHI_ASID));
if (index < 0) {
- vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
vcpu);
preempt_enable();
@@ -1636,6 +1674,8 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* invalid exception to the guest
*/
if (!TLB_IS_VALID(*tlb, va)) {
+ vcpu->arch.host_cp0_badvaddr = va;
+ vcpu->arch.pc = curr_pc;
er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
run, vcpu);
preempt_enable();
@@ -1645,9 +1685,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
* We fault an entry from the guest tlb to the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
- NULL,
- NULL);
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
}
}
} else {
@@ -1655,7 +1693,7 @@ enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
preempt_enable();
- goto dont_update_pc;
+ goto done;
}
@@ -1683,33 +1721,37 @@ skip_fault:
kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
cache, op, base, arch->gprs[base], offset);
er = EMULATE_FAIL;
- preempt_enable();
- goto dont_update_pc;
}
preempt_enable();
+done:
+ /* Rollback PC only if emulation was unsuccessful */
+ if (er == EMULATE_FAIL)
+ vcpu->arch.pc = curr_pc;
dont_update_pc:
- /* Rollback PC */
- vcpu->arch.pc = curr_pc;
-done:
+ /*
+ * This is for exceptions whose emulation updates the PC, so do not
+ * overwrite the PC under any circumstances
+ */
+
return er;
}
-enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_emulate_inst(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
+ union mips_instruction inst;
enum emulation_result er = EMULATE_DONE;
- uint32_t inst;
/* Fetch the instruction. */
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- switch (((union mips_instruction)inst).r_format.opcode) {
+ switch (inst.r_format.opcode) {
case cop0_op:
er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
break;
@@ -1726,15 +1768,31 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
er = kvm_mips_emulate_load(inst, cause, run, vcpu);
break;
+#ifndef CONFIG_CPU_MIPSR6
case cache_op:
++vcpu->stat.cache_exits;
- trace_kvm_exit(vcpu, CACHE_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
break;
+#else
+ case spec3_op:
+ switch (inst.spec3_format.func) {
+ case cache6_op:
+ ++vcpu->stat.cache_exits;
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
+ er = kvm_mips_emulate_cache(inst, opc, cause, run,
+ vcpu);
+ break;
+ default:
+ goto unknown;
+ };
+ break;
+unknown:
+#endif
default:
kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
- inst);
+ inst.word);
kvm_arch_vcpu_dump_regs(vcpu);
er = EMULATE_FAIL;
break;
@@ -1743,8 +1801,8 @@ enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_syscall(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1778,15 +1836,15 @@ enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1824,8 +1882,8 @@ enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_ld(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1833,7 +1891,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi =
(vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1870,15 +1928,15 @@ enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmiss_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1914,15 +1972,15 @@ enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbinv_st(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
/* save old pc */
@@ -1959,7 +2017,7 @@ enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
}
/* TLBMOD: store into address matching TLB with Dirty bit off */
-enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmod(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -1967,7 +2025,7 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
#ifdef DEBUG
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
int index;
/* If address not in the guest TLB, then we are in trouble */
@@ -1987,14 +2045,14 @@ enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
return er;
}
-enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_tlbmod(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
+ (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
struct kvm_vcpu_arch *arch = &vcpu->arch;
if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
@@ -2030,8 +2088,8 @@ enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpu_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2059,8 +2117,8 @@ enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
return EMULATE_DONE;
}
-enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_ri_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2094,8 +2152,8 @@ enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_bp_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2129,8 +2187,8 @@ enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_trap_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2164,8 +2222,8 @@ enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msafpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2199,8 +2257,8 @@ enum emulation_result kvm_mips_emulate_msafpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_fpe_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2234,8 +2292,8 @@ enum emulation_result kvm_mips_emulate_fpe_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_emulate_msadis_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2269,22 +2327,7 @@ enum emulation_result kvm_mips_emulate_msadis_exc(unsigned long cause,
return er;
}
-/* ll/sc, rdhwr, sync emulation */
-
-#define OPCODE 0xfc000000
-#define BASE 0x03e00000
-#define RT 0x001f0000
-#define OFFSET 0x0000ffff
-#define LL 0xc0000000
-#define SC 0xe0000000
-#define SPEC0 0x00000000
-#define SPEC3 0x7c000000
-#define RD 0x0000f800
-#define FUNC 0x0000003f
-#define SYNC 0x0000000f
-#define RDHWR 0x0000003b
-
-enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
+enum emulation_result kvm_mips_handle_ri(u32 cause, u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
@@ -2292,7 +2335,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
unsigned long curr_pc;
- uint32_t inst;
+ union mips_instruction inst;
/*
* Update PC and hold onto current PC in case there is
@@ -2307,17 +2350,22 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
if (cause & CAUSEF_BD)
opc += 1;
- inst = kvm_get_inst(opc, vcpu);
+ inst.word = kvm_get_inst(opc, vcpu);
- if (inst == KVM_INVALID_INST) {
+ if (inst.word == KVM_INVALID_INST) {
kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
return EMULATE_FAIL;
}
- if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
+ if (inst.r_format.opcode == spec3_op &&
+ inst.r_format.func == rdhwr_op &&
+ inst.r_format.rs == 0 &&
+ (inst.r_format.re >> 3) == 0) {
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
- int rd = (inst & RD) >> 11;
- int rt = (inst & RT) >> 16;
+ int rd = inst.r_format.rd;
+ int rt = inst.r_format.rt;
+ int sel = inst.r_format.re & 0x7;
+
/* If usermode, check RDHWR rd is allowed by guest HWREna */
if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
@@ -2325,17 +2373,17 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
goto emulate_ri;
}
switch (rd) {
- case 0: /* CPU number */
- arch->gprs[rt] = 0;
+ case MIPS_HWR_CPUNUM: /* CPU number */
+ arch->gprs[rt] = vcpu->vcpu_id;
break;
- case 1: /* SYNCI length */
+ case MIPS_HWR_SYNCISTEP: /* SYNCI length */
arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
current_cpu_data.icache.linesz);
break;
- case 2: /* Read count register */
- arch->gprs[rt] = kvm_mips_read_count(vcpu);
+ case MIPS_HWR_CC: /* Read count register */
+ arch->gprs[rt] = (s32)kvm_mips_read_count(vcpu);
break;
- case 3: /* Count register resolution */
+ case MIPS_HWR_CCRES: /* Count register resolution */
switch (current_cpu_data.cputype) {
case CPU_20KC:
case CPU_25KF:
@@ -2345,7 +2393,7 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
arch->gprs[rt] = 2;
}
break;
- case 29:
+ case MIPS_HWR_ULR: /* Read UserLocal register */
arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
break;
@@ -2353,8 +2401,12 @@ enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
goto emulate_ri;
}
+
+ trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, KVM_TRACE_HWR(rd, sel),
+ vcpu->arch.gprs[rt]);
} else {
- kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
+ kvm_debug("Emulate RI not supported @ %p: %#x\n",
+ opc, inst.word);
goto emulate_ri;
}
@@ -2387,19 +2439,19 @@ enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
switch (run->mmio.len) {
case 4:
- *gpr = *(int32_t *) run->mmio.data;
+ *gpr = *(s32 *) run->mmio.data;
break;
case 2:
if (vcpu->mmio_needed == 2)
- *gpr = *(int16_t *) run->mmio.data;
+ *gpr = *(s16 *) run->mmio.data;
else
- *gpr = *(uint16_t *)run->mmio.data;
+ *gpr = *(u16 *)run->mmio.data;
break;
case 1:
if (vcpu->mmio_needed == 2)
- *gpr = *(int8_t *) run->mmio.data;
+ *gpr = *(s8 *) run->mmio.data;
else
*gpr = *(u8 *) run->mmio.data;
break;
@@ -2414,12 +2466,12 @@ done:
return er;
}
-static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
- uint32_t *opc,
+static enum emulation_result kvm_mips_emulate_exc(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_vcpu_arch *arch = &vcpu->arch;
enum emulation_result er = EMULATE_DONE;
@@ -2452,13 +2504,13 @@ static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
return er;
}
-enum emulation_result kvm_mips_check_privilege(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_check_privilege(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
@@ -2548,18 +2600,18 @@ enum emulation_result kvm_mips_check_privilege(unsigned long cause,
* (2) TLB entry is present in the Guest TLB but not in the shadow, in this
* case we inject the TLB from the Guest TLB into the shadow host TLB
*/
-enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
- uint32_t *opc,
+enum emulation_result kvm_mips_handle_tlbmiss(u32 cause,
+ u32 *opc,
struct kvm_run *run,
struct kvm_vcpu *vcpu)
{
enum emulation_result er = EMULATE_DONE;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
unsigned long va = vcpu->arch.host_cp0_badvaddr;
int index;
- kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
- vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
+ kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx\n",
+ vcpu->arch.host_cp0_badvaddr);
/*
* KVM would not have got the exception if this entry was valid in the
@@ -2569,7 +2621,8 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
*/
index = kvm_mips_guest_tlb_lookup(vcpu,
(va & VPN2_MASK) |
- (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) & ASID_MASK));
+ (kvm_read_c0_guest_entryhi(vcpu->arch.cop0) &
+ KVM_ENTRYHI_ASID));
if (index < 0) {
if (exccode == EXCCODE_TLBL) {
er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
@@ -2601,13 +2654,12 @@ enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
}
} else {
kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
- tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
+ tlb->tlb_hi, tlb->tlb_lo[0], tlb->tlb_lo[1]);
/*
* OK we have a Guest TLB entry, now inject it into the
* shadow host TLB
*/
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
- NULL);
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb);
}
}
diff --git a/arch/mips/kvm/entry.c b/arch/mips/kvm/entry.c
new file mode 100644
index 000000000000..6a02b3a3fa65
--- /dev/null
+++ b/arch/mips/kvm/entry.c
@@ -0,0 +1,701 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * Generation of main entry point for the guest, exception handling.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ *
+ * Copyright (C) 2016 Imagination Technologies Ltd.
+ */
+
+#include <linux/kvm_host.h>
+#include <asm/msa.h>
+#include <asm/setup.h>
+#include <asm/uasm.h>
+
+/* Register names */
+#define ZERO 0
+#define AT 1
+#define V0 2
+#define V1 3
+#define A0 4
+#define A1 5
+
+#if _MIPS_SIM == _MIPS_SIM_ABI32
+#define T0 8
+#define T1 9
+#define T2 10
+#define T3 11
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI32 */
+
+#if _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32
+#define T0 12
+#define T1 13
+#define T2 14
+#define T3 15
+#endif /* _MIPS_SIM == _MIPS_SIM_ABI64 || _MIPS_SIM == _MIPS_SIM_NABI32 */
+
+#define S0 16
+#define S1 17
+#define T9 25
+#define K0 26
+#define K1 27
+#define GP 28
+#define SP 29
+#define RA 31
+
+/* Some CP0 registers */
+#define C0_HWRENA 7, 0
+#define C0_BADVADDR 8, 0
+#define C0_ENTRYHI 10, 0
+#define C0_STATUS 12, 0
+#define C0_CAUSE 13, 0
+#define C0_EPC 14, 0
+#define C0_EBASE 15, 1
+#define C0_CONFIG5 16, 5
+#define C0_DDATA_LO 28, 3
+#define C0_ERROREPC 30, 0
+
+#define CALLFRAME_SIZ 32
+
+#ifdef CONFIG_64BIT
+#define ST0_KX_IF_64 ST0_KX
+#else
+#define ST0_KX_IF_64 0
+#endif
+
+static unsigned int scratch_vcpu[2] = { C0_DDATA_LO };
+static unsigned int scratch_tmp[2] = { C0_ERROREPC };
+
+enum label_id {
+ label_fpu_1 = 1,
+ label_msa_1,
+ label_return_to_host,
+ label_kernel_asid,
+ label_exit_common,
+};
+
+UASM_L_LA(_fpu_1)
+UASM_L_LA(_msa_1)
+UASM_L_LA(_return_to_host)
+UASM_L_LA(_kernel_asid)
+UASM_L_LA(_exit_common)
+
+static void *kvm_mips_build_enter_guest(void *addr);
+static void *kvm_mips_build_ret_from_exit(void *addr);
+static void *kvm_mips_build_ret_to_guest(void *addr);
+static void *kvm_mips_build_ret_to_host(void *addr);
+
+/**
+ * kvm_mips_entry_setup() - Perform global setup for entry code.
+ *
+ * Perform global setup for entry code, such as choosing a scratch register.
+ *
+ * Returns: 0 on success.
+ * -errno on failure.
+ */
+int kvm_mips_entry_setup(void)
+{
+ /*
+ * We prefer to use KScratchN registers if they are available over the
+ * defaults above, which may not work on all cores.
+ */
+ unsigned int kscratch_mask = cpu_data[0].kscratch_mask & 0xfc;
+
+ /* Pick a scratch register for storing VCPU */
+ if (kscratch_mask) {
+ scratch_vcpu[0] = 31;
+ scratch_vcpu[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_vcpu[1]);
+ }
+
+ /* Pick a scratch register to use as a temp for saving state */
+ if (kscratch_mask) {
+ scratch_tmp[0] = 31;
+ scratch_tmp[1] = ffs(kscratch_mask) - 1;
+ kscratch_mask &= ~BIT(scratch_tmp[1]);
+ }
+
+ return 0;
+}
+
+static void kvm_mips_build_save_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /* Save the VCPU scratch register value in cp0_epc of the stack frame */
+ UASM_i_MFC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+
+ /* Save the temp scratch register value in cp0_cause of stack frame */
+ if (scratch_tmp[0] == 31) {
+ UASM_i_MFC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ }
+}
+
+static void kvm_mips_build_restore_scratch(u32 **p, unsigned int tmp,
+ unsigned int frame)
+{
+ /*
+ * Restore host scratch register values saved by
+ * kvm_mips_build_save_scratch().
+ */
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_epc), frame);
+ UASM_i_MTC0(p, tmp, scratch_vcpu[0], scratch_vcpu[1]);
+
+ if (scratch_tmp[0] == 31) {
+ UASM_i_LW(p, tmp, offsetof(struct pt_regs, cp0_cause), frame);
+ UASM_i_MTC0(p, tmp, scratch_tmp[0], scratch_tmp[1]);
+ }
+}
+
+/**
+ * build_set_exc_base() - Assemble code to write exception base address.
+ * @p: Code buffer pointer.
+ * @reg: Source register (generated code may set WG bit in @reg).
+ *
+ * Assemble code to modify the exception base address in the EBase register,
+ * using the appropriately sized access and setting the WG bit if necessary.
+ */
+static inline void build_set_exc_base(u32 **p, unsigned int reg)
+{
+ if (cpu_has_ebase_wg) {
+ /* Set WG so that all the bits get written */
+ uasm_i_ori(p, reg, reg, MIPS_EBASE_WG);
+ UASM_i_MTC0(p, reg, C0_EBASE);
+ } else {
+ uasm_i_mtc0(p, reg, C0_EBASE);
+ }
+}
+
+/**
+ * kvm_mips_build_vcpu_run() - Assemble function to start running a guest VCPU.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the start of the vcpu_run function to run a guest VCPU. The function
+ * conforms to the following prototype:
+ *
+ * int vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu);
+ *
+ * The exit from the guest and return to the caller is handled by the code
+ * generated by kvm_mips_build_ret_to_host().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_vcpu_run(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /*
+ * A0: run
+ * A1: vcpu
+ */
+
+ /* k0/k1 not being used in host kernel context */
+ UASM_i_ADDIU(&p, K1, SP, -(int)sizeof(struct pt_regs));
+ for (i = 16; i < 32; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_SW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Save host status */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+ UASM_i_SW(&p, V0, offsetof(struct pt_regs, cp0_status), K1);
+
+ /* Save scratch registers, will be used to store pointer to vcpu etc */
+ kvm_mips_build_save_scratch(&p, V1, K1);
+
+ /* VCPU scratch register has pointer to vcpu */
+ UASM_i_MTC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Offset into vcpu->arch */
+ UASM_i_ADDIU(&p, K1, A1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Save the host stack to VCPU, used for exception processing
+ * when we exit from the Guest
+ */
+ UASM_i_SW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Save the kernel gp as well */
+ UASM_i_SW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /*
+ * Setup status register for running the guest in UM, interrupts
+ * are disabled
+ */
+ UASM_i_LA(&p, K0, ST0_EXL | KSU_USER | ST0_BEV | ST0_KX_IF_64);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* load up the new EBASE */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+ build_set_exc_base(&p, K0);
+
+ /*
+ * Now that the new EBASE has been loaded, unset BEV, set
+ * interrupt mask as it was but make sure that timer interrupts
+ * are enabled
+ */
+ uasm_i_addiu(&p, K0, ZERO, ST0_EXL | KSU_USER | ST0_IE | ST0_KX_IF_64);
+ uasm_i_andi(&p, V0, V0, ST0_IM);
+ uasm_i_or(&p, K0, K0, V0);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_enter_guest() - Assemble code to resume guest execution.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to resume guest execution. This code is common between the
+ * initial entry into the guest from the host, and returning from the exit
+ * handler back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_enter_guest(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Set Guest EPC */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, pc), K1);
+ UASM_i_MTC0(&p, T0, C0_EPC);
+
+ /* Set the ASID for the Guest Kernel */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, cop0), K1);
+ UASM_i_LW(&p, T0, offsetof(struct mips_coproc, reg[MIPS_CP0_STATUS][0]),
+ T0);
+ uasm_i_andi(&p, T0, T0, KSU_USER | ST0_ERL | ST0_EXL);
+ uasm_i_xori(&p, T0, T0, KSU_USER);
+ uasm_il_bnez(&p, &r, T0, label_kernel_asid);
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_kernel_asid));
+ /* else user */
+ UASM_i_ADDIU(&p, T1, K1,
+ offsetof(struct kvm_vcpu_arch, guest_user_asid));
+ uasm_l_kernel_asid(&l, p);
+
+ /* t1: contains the base of the ASID array, need to get the cpu id */
+ /* smp_processor_id */
+ uasm_i_lw(&p, T2, offsetof(struct thread_info, cpu), GP);
+ /* x4 */
+ uasm_i_sll(&p, T2, T2, 2);
+ UASM_i_ADDU(&p, T3, T1, T2);
+ uasm_i_lw(&p, K0, 0, T3);
+#ifdef CONFIG_MIPS_ASID_BITS_VARIABLE
+ /* x sizeof(struct cpuinfo_mips)/4 */
+ uasm_i_addiu(&p, T3, ZERO, sizeof(struct cpuinfo_mips)/4);
+ uasm_i_mul(&p, T2, T2, T3);
+
+ UASM_i_LA_mostly(&p, AT, (long)&cpu_data[0].asid_mask);
+ UASM_i_ADDU(&p, AT, AT, T2);
+ UASM_i_LW(&p, T2, uasm_rel_lo((long)&cpu_data[0].asid_mask), AT);
+ uasm_i_and(&p, K0, K0, T2);
+#else
+ uasm_i_andi(&p, K0, K0, MIPS_ENTRYHI_ASID);
+#endif
+ uasm_i_mtc0(&p, K0, C0_ENTRYHI);
+ uasm_i_ehb(&p);
+
+ /* Disable RDHWR access */
+ uasm_i_mtc0(&p, ZERO, C0_HWRENA);
+
+ /* load the guest context from VCPU and return */
+ for (i = 1; i < 32; ++i) {
+ /* Guest k0/k1 loaded later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_LW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* Restore hi/lo */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, hi), K1);
+ uasm_i_mthi(&p, K0);
+
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, lo), K1);
+ uasm_i_mtlo(&p, K0);
+#endif
+
+ /* Restore the guest's k0/k1 registers */
+ UASM_i_LW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Jump to guest */
+ uasm_i_eret(&p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exception() - Assemble first level guest exception handler.
+ * @addr: Address to start writing code.
+ * @handler: Address of common handler (within range of @addr).
+ *
+ * Assemble exception vector code for guest execution. The generated vector will
+ * branch to the common exception handler generated by kvm_mips_build_exit().
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exception(void *addr, void *handler)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Save guest k1 into scratch register */
+ UASM_i_MTC0(&p, K1, scratch_tmp[0], scratch_tmp[1]);
+
+ /* Get the VCPU pointer from the VCPU scratch register */
+ UASM_i_MFC0(&p, K1, scratch_vcpu[0], scratch_vcpu[1]);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /* Save guest k0 into VCPU structure */
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, gprs[K0]), K1);
+
+ /* Branch to the common handler */
+ uasm_il_b(&p, &r, label_exit_common);
+ uasm_i_nop(&p);
+
+ uasm_l_exit_common(&l, handler);
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_exit() - Assemble common guest exit handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the generic guest exit handling code. This is called by the
+ * exception vectors (generated by kvm_mips_build_exception()), and calls
+ * kvm_mips_handle_exit(), then either resumes the guest or returns to the host
+ * depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+void *kvm_mips_build_exit(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+ struct uasm_label labels[3];
+ struct uasm_reloc relocs[3];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /*
+ * Generic Guest exception handler. We end up here when the guest
+ * does something that causes a trap to kernel mode.
+ *
+ * Both k0/k1 registers will have already been saved (k0 into the vcpu
+ * structure, and k1 into the scratch_tmp register).
+ *
+ * The k1 register will already contain the kvm_vcpu_arch pointer.
+ */
+
+ /* Start saving Guest context to VCPU */
+ for (i = 0; i < 32; ++i) {
+ /* Guest k0/k1 saved later */
+ if (i == K0 || i == K1)
+ continue;
+ UASM_i_SW(&p, i, offsetof(struct kvm_vcpu_arch, gprs[i]), K1);
+ }
+
+#ifndef CONFIG_CPU_MIPSR6
+ /* We need to save hi/lo and restore them on the way out */
+ uasm_i_mfhi(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, hi), K1);
+
+ uasm_i_mflo(&p, T0);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, lo), K1);
+#endif
+
+ /* Finally save guest k1 to VCPU */
+ uasm_i_ehb(&p);
+ UASM_i_MFC0(&p, T0, scratch_tmp[0], scratch_tmp[1]);
+ UASM_i_SW(&p, T0, offsetof(struct kvm_vcpu_arch, gprs[K1]), K1);
+
+ /* Now that context has been saved, we can use other registers */
+
+ /* Restore vcpu */
+ UASM_i_MFC0(&p, A1, scratch_vcpu[0], scratch_vcpu[1]);
+ uasm_i_move(&p, S1, A1);
+
+ /* Restore run (vcpu->run) */
+ UASM_i_LW(&p, A0, offsetof(struct kvm_vcpu, run), A1);
+ /* Save pointer to run in s0, will be saved by the compiler */
+ uasm_i_move(&p, S0, A0);
+
+ /*
+ * Save Host level EPC, BadVaddr and Cause to VCPU, useful to process
+ * the exception
+ */
+ UASM_i_MFC0(&p, K0, C0_EPC);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, pc), K1);
+
+ UASM_i_MFC0(&p, K0, C0_BADVADDR);
+ UASM_i_SW(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_badvaddr),
+ K1);
+
+ uasm_i_mfc0(&p, K0, C0_CAUSE);
+ uasm_i_sw(&p, K0, offsetof(struct kvm_vcpu_arch, host_cp0_cause), K1);
+
+ /* Now restore the host state just enough to run the handlers */
+
+ /* Switch EBASE to the one used by Linux */
+ /* load up the host EBASE */
+ uasm_i_mfc0(&p, V0, C0_STATUS);
+
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V0, AT);
+
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ UASM_i_LA_mostly(&p, K0, (long)&ebase);
+ UASM_i_LW(&p, K0, uasm_rel_lo((long)&ebase), K0);
+ build_set_exc_base(&p, K0);
+
+ if (raw_cpu_has_fpu) {
+ /*
+ * If FPU is enabled, save FCR31 and clear it so that later
+ * ctc1's don't trigger FPE for pending exceptions.
+ */
+ uasm_i_lui(&p, AT, ST0_CU1 >> 16);
+ uasm_i_and(&p, V1, V0, AT);
+ uasm_il_beqz(&p, &r, V1, label_fpu_1);
+ uasm_i_nop(&p);
+ uasm_i_cfc1(&p, T0, 31);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.fcr31),
+ K1);
+ uasm_i_ctc1(&p, ZERO, 31);
+ uasm_l_fpu_1(&l, p);
+ }
+
+ if (cpu_has_msa) {
+ /*
+ * If MSA is enabled, save MSACSR and clear it so that later
+ * instructions don't trigger MSAFPE for pending exceptions.
+ */
+ uasm_i_mfc0(&p, T0, C0_CONFIG5);
+ uasm_i_ext(&p, T0, T0, 27, 1); /* MIPS_CONF5_MSAEN */
+ uasm_il_beqz(&p, &r, T0, label_msa_1);
+ uasm_i_nop(&p);
+ uasm_i_cfcmsa(&p, T0, MSA_CSR);
+ uasm_i_sw(&p, T0, offsetof(struct kvm_vcpu_arch, fpu.msacsr),
+ K1);
+ uasm_i_ctcmsa(&p, MSA_CSR, ZERO);
+ uasm_l_msa_1(&l, p);
+ }
+
+ /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
+ uasm_i_addiu(&p, AT, ZERO, ~(ST0_EXL | KSU_USER | ST0_IE));
+ uasm_i_and(&p, V0, V0, AT);
+ uasm_i_lui(&p, AT, ST0_CU0 >> 16);
+ uasm_i_or(&p, V0, V0, AT);
+ uasm_i_mtc0(&p, V0, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ /* Load up host GP */
+ UASM_i_LW(&p, GP, offsetof(struct kvm_vcpu_arch, host_gp), K1);
+
+ /* Need a stack before we can jump to "C" */
+ UASM_i_LW(&p, SP, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+
+ /* Saved host state */
+ UASM_i_ADDIU(&p, SP, SP, -(int)sizeof(struct pt_regs));
+
+ /*
+ * XXXKYMA do we need to load the host ASID, maybe not because the
+ * kernel entries are marked GLOBAL, need to verify
+ */
+
+ /* Restore host scratch registers, as we'll have clobbered them */
+ kvm_mips_build_restore_scratch(&p, K0, SP);
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Jump to handler */
+ /*
+ * XXXKYMA: not sure if this is safe, how large is the stack??
+ * Now jump to the kvm_mips_handle_exit() to see if we can deal
+ * with this in the kernel
+ */
+ UASM_i_LA(&p, T9, (unsigned long)kvm_mips_handle_exit);
+ uasm_i_jalr(&p, RA, T9);
+ UASM_i_ADDIU(&p, SP, SP, -CALLFRAME_SIZ);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ p = kvm_mips_build_ret_from_exit(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_from_exit() - Assemble guest exit return handler.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle the return from kvm_mips_handle_exit(), either
+ * resuming the guest or returning to the host depending on the return value.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_from_exit(void *addr)
+{
+ u32 *p = addr;
+ struct uasm_label labels[2];
+ struct uasm_reloc relocs[2];
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+
+ /* Return from handler Make sure interrupts are disabled */
+ uasm_i_di(&p, ZERO);
+ uasm_i_ehb(&p);
+
+ /*
+ * XXXKYMA: k0/k1 could have been blown away if we processed
+ * an exception while we were handling the exception from the
+ * guest, reload k1
+ */
+
+ uasm_i_move(&p, K1, S1);
+ UASM_i_ADDIU(&p, K1, K1, offsetof(struct kvm_vcpu, arch));
+
+ /*
+ * Check return value, should tell us if we are returning to the
+ * host (handle I/O etc)or resuming the guest
+ */
+ uasm_i_andi(&p, T0, V0, RESUME_HOST);
+ uasm_il_bnez(&p, &r, T0, label_return_to_host);
+ uasm_i_nop(&p);
+
+ p = kvm_mips_build_ret_to_guest(p);
+
+ uasm_l_return_to_host(&l, p);
+ p = kvm_mips_build_ret_to_host(p);
+
+ uasm_resolve_relocs(relocs, labels);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_guest() - Assemble code to return to the guest.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the guest.
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_guest(void *addr)
+{
+ u32 *p = addr;
+
+ /* Put the saved pointer to vcpu (s1) back into the scratch register */
+ UASM_i_MTC0(&p, S1, scratch_vcpu[0], scratch_vcpu[1]);
+
+ /* Load up the Guest EBASE to minimize the window where BEV is set */
+ UASM_i_LW(&p, T0, offsetof(struct kvm_vcpu_arch, guest_ebase), K1);
+
+ /* Switch EBASE back to the one used by KVM */
+ uasm_i_mfc0(&p, V1, C0_STATUS);
+ uasm_i_lui(&p, AT, ST0_BEV >> 16);
+ uasm_i_or(&p, K0, V1, AT);
+ uasm_i_mtc0(&p, K0, C0_STATUS);
+ uasm_i_ehb(&p);
+ build_set_exc_base(&p, T0);
+
+ /* Setup status register for running guest in UM */
+ uasm_i_ori(&p, V1, V1, ST0_EXL | KSU_USER | ST0_IE);
+ UASM_i_LA(&p, AT, ~(ST0_CU0 | ST0_MX));
+ uasm_i_and(&p, V1, V1, AT);
+ uasm_i_mtc0(&p, V1, C0_STATUS);
+ uasm_i_ehb(&p);
+
+ p = kvm_mips_build_enter_guest(p);
+
+ return p;
+}
+
+/**
+ * kvm_mips_build_ret_to_host() - Assemble code to return to the host.
+ * @addr: Address to start writing code.
+ *
+ * Assemble the code to handle return from the guest exit handler
+ * (kvm_mips_handle_exit()) back to the host, i.e. to the caller of the vcpu_run
+ * function generated by kvm_mips_build_vcpu_run().
+ *
+ * Returns: Next address after end of written function.
+ */
+static void *kvm_mips_build_ret_to_host(void *addr)
+{
+ u32 *p = addr;
+ unsigned int i;
+
+ /* EBASE is already pointing to Linux */
+ UASM_i_LW(&p, K1, offsetof(struct kvm_vcpu_arch, host_stack), K1);
+ UASM_i_ADDIU(&p, K1, K1, -(int)sizeof(struct pt_regs));
+
+ /*
+ * r2/v0 is the return code, shift it down by 2 (arithmetic)
+ * to recover the err code
+ */
+ uasm_i_sra(&p, K0, V0, 2);
+ uasm_i_move(&p, V0, K0);
+
+ /* Load context saved on the host stack */
+ for (i = 16; i < 31; ++i) {
+ if (i == 24)
+ i = 28;
+ UASM_i_LW(&p, i, offsetof(struct pt_regs, regs[i]), K1);
+ }
+
+ /* Restore RDHWR access */
+ UASM_i_LA_mostly(&p, K0, (long)&hwrena);
+ uasm_i_lw(&p, K0, uasm_rel_lo((long)&hwrena), K0);
+ uasm_i_mtc0(&p, K0, C0_HWRENA);
+
+ /* Restore RA, which is the address we will return to */
+ UASM_i_LW(&p, RA, offsetof(struct pt_regs, regs[RA]), K1);
+ uasm_i_jr(&p, RA);
+ uasm_i_nop(&p);
+
+ return p;
+}
+
diff --git a/arch/mips/kvm/fpu.S b/arch/mips/kvm/fpu.S
index 531fbf5131c0..16f17c6390dd 100644
--- a/arch/mips/kvm/fpu.S
+++ b/arch/mips/kvm/fpu.S
@@ -14,13 +14,16 @@
#include <asm/mipsregs.h>
#include <asm/regdef.h>
+/* preprocessor replaces the fp in ".set fp=64" with $30 otherwise */
+#undef fp
+
.set noreorder
.set noat
LEAF(__kvm_save_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
@@ -63,8 +66,8 @@ LEAF(__kvm_save_fpu)
LEAF(__kvm_restore_fpu)
.set push
- .set mips64r2
SET_HARDFLOAT
+ .set fp=64
mfc0 t0, CP0_STATUS
sll t0, t0, 5 # is Status.FR set?
bgez t0, 1f # no: skip odd doubles
diff --git a/arch/mips/kvm/interrupt.c b/arch/mips/kvm/interrupt.c
index 95f790663b0c..ad28dac6b7e9 100644
--- a/arch/mips/kvm/interrupt.c
+++ b/arch/mips/kvm/interrupt.c
@@ -22,12 +22,12 @@
#include "interrupt.h"
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
set_bit(priority, &vcpu->arch.pending_exceptions);
}
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority)
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
{
clear_bit(priority, &vcpu->arch.pending_exceptions);
}
@@ -114,10 +114,10 @@ void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
/* Deliver the interrupt of the corresponding priority, if possible. */
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
int allowed = 0;
- uint32_t exccode;
+ u32 exccode;
struct kvm_vcpu_arch *arch = &vcpu->arch;
struct mips_coproc *cop0 = vcpu->arch.cop0;
@@ -196,12 +196,12 @@ int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
}
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause)
+ u32 cause)
{
return 1;
}
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause)
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause)
{
unsigned long *pending = &vcpu->arch.pending_exceptions;
unsigned long *pending_clr = &vcpu->arch.pending_exceptions_clr;
diff --git a/arch/mips/kvm/interrupt.h b/arch/mips/kvm/interrupt.h
index 4ab4bdfad703..fb118a2c8379 100644
--- a/arch/mips/kvm/interrupt.h
+++ b/arch/mips/kvm/interrupt.h
@@ -28,16 +28,13 @@
#define MIPS_EXC_MAX 12
/* XXXSL More to follow */
-extern char mips32_exception[], mips32_exceptionEnd[];
-extern char mips32_GuestException[], mips32_GuestExceptionEnd[];
-
#define C_TI (_ULCAST_(1) << 30)
#define KVM_MIPS_IRQ_DELIVER_ALL_AT_ONCE (0)
#define KVM_MIPS_IRQ_CLEAR_ALL_AT_ONCE (0)
-void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
-void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, uint32_t priority);
+void kvm_mips_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
+void kvm_mips_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority);
int kvm_mips_pending_timer(struct kvm_vcpu *vcpu);
void kvm_mips_queue_timer_int_cb(struct kvm_vcpu *vcpu);
@@ -47,7 +44,7 @@ void kvm_mips_queue_io_int_cb(struct kvm_vcpu *vcpu,
void kvm_mips_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
struct kvm_mips_interrupt *irq);
int kvm_mips_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
+ u32 cause);
int kvm_mips_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
- uint32_t cause);
-void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, uint32_t cause);
+ u32 cause);
+void kvm_mips_deliver_interrupts(struct kvm_vcpu *vcpu, u32 cause);
diff --git a/arch/mips/kvm/locore.S b/arch/mips/kvm/locore.S
deleted file mode 100644
index 81687ab1b523..000000000000
--- a/arch/mips/kvm/locore.S
+++ /dev/null
@@ -1,654 +0,0 @@
-/*
- * This file is subject to the terms and conditions of the GNU General Public
- * License. See the file "COPYING" in the main directory of this archive
- * for more details.
- *
- * Main entry point for the guest, exception handling.
- *
- * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
- * Authors: Sanjay Lal <sanjayl@kymasys.com>
- */
-
-#include <asm/asm.h>
-#include <asm/asmmacro.h>
-#include <asm/regdef.h>
-#include <asm/mipsregs.h>
-#include <asm/stackframe.h>
-#include <asm/asm-offsets.h>
-
-#define _C_LABEL(x) x
-#define MIPSX(name) mips32_ ## name
-#define CALLFRAME_SIZ 32
-
-/*
- * VECTOR
- * exception vector entrypoint
- */
-#define VECTOR(x, regmask) \
- .ent _C_LABEL(x),0; \
- EXPORT(x);
-
-#define VECTOR_END(x) \
- EXPORT(x);
-
-/* Overload, Danger Will Robinson!! */
-#define PT_HOST_ASID PT_BVADDR
-#define PT_HOST_USERLOCAL PT_EPC
-
-#define CP0_DDATA_LO $28,3
-
-/* Resume Flags */
-#define RESUME_FLAG_HOST (1<<1) /* Resume host? */
-
-#define RESUME_GUEST 0
-#define RESUME_HOST RESUME_FLAG_HOST
-
-/*
- * __kvm_mips_vcpu_run: entry point to the guest
- * a0: run
- * a1: vcpu
- */
- .set noreorder
- .set noat
-
-FEXPORT(__kvm_mips_vcpu_run)
- /* k0/k1 not being used in host kernel context */
- INT_ADDIU k1, sp, -PT_SIZE
- LONG_S $0, PT_R0(k1)
- LONG_S $1, PT_R1(k1)
- LONG_S $2, PT_R2(k1)
- LONG_S $3, PT_R3(k1)
-
- LONG_S $4, PT_R4(k1)
- LONG_S $5, PT_R5(k1)
- LONG_S $6, PT_R6(k1)
- LONG_S $7, PT_R7(k1)
-
- LONG_S $8, PT_R8(k1)
- LONG_S $9, PT_R9(k1)
- LONG_S $10, PT_R10(k1)
- LONG_S $11, PT_R11(k1)
- LONG_S $12, PT_R12(k1)
- LONG_S $13, PT_R13(k1)
- LONG_S $14, PT_R14(k1)
- LONG_S $15, PT_R15(k1)
- LONG_S $16, PT_R16(k1)
- LONG_S $17, PT_R17(k1)
-
- LONG_S $18, PT_R18(k1)
- LONG_S $19, PT_R19(k1)
- LONG_S $20, PT_R20(k1)
- LONG_S $21, PT_R21(k1)
- LONG_S $22, PT_R22(k1)
- LONG_S $23, PT_R23(k1)
- LONG_S $24, PT_R24(k1)
- LONG_S $25, PT_R25(k1)
-
- /*
- * XXXKYMA k0/k1 not saved, not being used if we got here through
- * an ioctl()
- */
-
- LONG_S $28, PT_R28(k1)
- LONG_S $29, PT_R29(k1)
- LONG_S $30, PT_R30(k1)
- LONG_S $31, PT_R31(k1)
-
- /* Save hi/lo */
- mflo v0
- LONG_S v0, PT_LO(k1)
- mfhi v1
- LONG_S v1, PT_HI(k1)
-
- /* Save host status */
- mfc0 v0, CP0_STATUS
- LONG_S v0, PT_STATUS(k1)
-
- /* Save host ASID, shove it into the BVADDR location */
- mfc0 v1, CP0_ENTRYHI
- andi v1, 0xff
- LONG_S v1, PT_HOST_ASID(k1)
-
- /* Save DDATA_LO, will be used to store pointer to vcpu */
- mfc0 v1, CP0_DDATA_LO
- LONG_S v1, PT_HOST_USERLOCAL(k1)
-
- /* DDATA_LO has pointer to vcpu */
- mtc0 a1, CP0_DDATA_LO
-
- /* Offset into vcpu->arch */
- INT_ADDIU k1, a1, VCPU_HOST_ARCH
-
- /*
- * Save the host stack to VCPU, used for exception processing
- * when we exit from the Guest
- */
- LONG_S sp, VCPU_HOST_STACK(k1)
-
- /* Save the kernel gp as well */
- LONG_S gp, VCPU_HOST_GP(k1)
-
- /*
- * Setup status register for running the guest in UM, interrupts
- * are disabled
- */
- li k0, (ST0_EXL | KSU_USER | ST0_BEV)
- mtc0 k0, CP0_STATUS
- ehb
-
- /* load up the new EBASE */
- LONG_L k0, VCPU_GUEST_EBASE(k1)
- mtc0 k0, CP0_EBASE
-
- /*
- * Now that the new EBASE has been loaded, unset BEV, set
- * interrupt mask as it was but make sure that timer interrupts
- * are enabled
- */
- li k0, (ST0_EXL | KSU_USER | ST0_IE)
- andi v0, v0, ST0_IM
- or k0, k0, v0
- mtc0 k0, CP0_STATUS
- ehb
-
- /* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
-
-FEXPORT(__kvm_mips_load_asid)
- /* Set the ASID for the Guest Kernel */
- PTR_L t0, VCPU_COP0(k1)
- LONG_L t0, COP0_STATUS(t0)
- andi t0, KSU_USER | ST0_ERL | ST0_EXL
- xori t0, KSU_USER
- bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
-1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- INT_SLL t2, t2, 2 /* x4 */
- REG_ADDU t3, t1, t2
- LONG_L k0, (t3)
- andi k0, k0, 0xff
- mtc0 k0, CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- /* Now load up the Guest Context from VCPU */
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
-
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
-
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* k0/k1 loaded up later */
-
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
-
- /* Restore hi/lo */
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
-
- LONG_L k0, VCPU_HI(k1)
- mthi k0
-
-FEXPORT(__kvm_mips_load_k0k1)
- /* Restore the guest's k0/k1 registers */
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
-
- /* Jump to guest */
- eret
-
-VECTOR(MIPSX(exception), unknown)
-/* Find out what mode we came from and jump to the proper handler. */
- mtc0 k0, CP0_ERROREPC #01: Save guest k0
- ehb #02:
-
- mfc0 k0, CP0_EBASE #02: Get EBASE
- INT_SRL k0, k0, 10 #03: Get rid of CPUNum
- INT_SLL k0, k0, 10 #04
- LONG_S k1, 0x3000(k0) #05: Save k1 @ offset 0x3000
- INT_ADDIU k0, k0, 0x2000 #06: Exception handler is
- # installed @ offset 0x2000
- j k0 #07: jump to the function
- nop #08: branch delay slot
-VECTOR_END(MIPSX(exceptionEnd))
-.end MIPSX(exception)
-
-/*
- * Generic Guest exception handler. We end up here when the guest
- * does something that causes a trap to kernel mode.
- */
-NESTED (MIPSX(GuestException), CALLFRAME_SIZ, ra)
- /* Get the VCPU pointer from DDTATA_LO */
- mfc0 k1, CP0_DDATA_LO
- INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
- /* Start saving Guest context to VCPU */
- LONG_S $0, VCPU_R0(k1)
- LONG_S $1, VCPU_R1(k1)
- LONG_S $2, VCPU_R2(k1)
- LONG_S $3, VCPU_R3(k1)
- LONG_S $4, VCPU_R4(k1)
- LONG_S $5, VCPU_R5(k1)
- LONG_S $6, VCPU_R6(k1)
- LONG_S $7, VCPU_R7(k1)
- LONG_S $8, VCPU_R8(k1)
- LONG_S $9, VCPU_R9(k1)
- LONG_S $10, VCPU_R10(k1)
- LONG_S $11, VCPU_R11(k1)
- LONG_S $12, VCPU_R12(k1)
- LONG_S $13, VCPU_R13(k1)
- LONG_S $14, VCPU_R14(k1)
- LONG_S $15, VCPU_R15(k1)
- LONG_S $16, VCPU_R16(k1)
- LONG_S $17, VCPU_R17(k1)
- LONG_S $18, VCPU_R18(k1)
- LONG_S $19, VCPU_R19(k1)
- LONG_S $20, VCPU_R20(k1)
- LONG_S $21, VCPU_R21(k1)
- LONG_S $22, VCPU_R22(k1)
- LONG_S $23, VCPU_R23(k1)
- LONG_S $24, VCPU_R24(k1)
- LONG_S $25, VCPU_R25(k1)
-
- /* Guest k0/k1 saved later */
-
- LONG_S $28, VCPU_R28(k1)
- LONG_S $29, VCPU_R29(k1)
- LONG_S $30, VCPU_R30(k1)
- LONG_S $31, VCPU_R31(k1)
-
- /* We need to save hi/lo and restore them on the way out */
- mfhi t0
- LONG_S t0, VCPU_HI(k1)
-
- mflo t0
- LONG_S t0, VCPU_LO(k1)
-
- /* Finally save guest k0/k1 to VCPU */
- mfc0 t0, CP0_ERROREPC
- LONG_S t0, VCPU_R26(k1)
-
- /* Get GUEST k1 and save it in VCPU */
- PTR_LI t1, ~0x2ff
- mfc0 t0, CP0_EBASE
- and t0, t0, t1
- LONG_L t0, 0x3000(t0)
- LONG_S t0, VCPU_R27(k1)
-
- /* Now that context has been saved, we can use other registers */
-
- /* Restore vcpu */
- mfc0 a1, CP0_DDATA_LO
- move s1, a1
-
- /* Restore run (vcpu->run) */
- LONG_L a0, VCPU_RUN(a1)
- /* Save pointer to run in s0, will be saved by the compiler */
- move s0, a0
-
- /*
- * Save Host level EPC, BadVaddr and Cause to VCPU, useful to
- * process the exception
- */
- mfc0 k0,CP0_EPC
- LONG_S k0, VCPU_PC(k1)
-
- mfc0 k0, CP0_BADVADDR
- LONG_S k0, VCPU_HOST_CP0_BADVADDR(k1)
-
- mfc0 k0, CP0_CAUSE
- LONG_S k0, VCPU_HOST_CP0_CAUSE(k1)
-
- mfc0 k0, CP0_ENTRYHI
- LONG_S k0, VCPU_HOST_ENTRYHI(k1)
-
- /* Now restore the host state just enough to run the handlers */
-
- /* Switch EBASE to the one used by Linux */
- /* load up the host EBASE */
- mfc0 v0, CP0_STATUS
-
- .set at
- or k0, v0, ST0_BEV
- .set noat
-
- mtc0 k0, CP0_STATUS
- ehb
-
- LONG_L k0, VCPU_HOST_EBASE(k1)
- mtc0 k0,CP0_EBASE
-
- /*
- * If FPU is enabled, save FCR31 and clear it so that later ctc1's don't
- * trigger FPE for pending exceptions.
- */
- .set at
- and v1, v0, ST0_CU1
- beqz v1, 1f
- nop
- .set push
- SET_HARDFLOAT
- cfc1 t0, fcr31
- sw t0, VCPU_FCR31(k1)
- ctc1 zero,fcr31
- .set pop
- .set noat
-1:
-
-#ifdef CONFIG_CPU_HAS_MSA
- /*
- * If MSA is enabled, save MSACSR and clear it so that later
- * instructions don't trigger MSAFPE for pending exceptions.
- */
- mfc0 t0, CP0_CONFIG3
- ext t0, t0, 28, 1 /* MIPS_CONF3_MSAP */
- beqz t0, 1f
- nop
- mfc0 t0, CP0_CONFIG5
- ext t0, t0, 27, 1 /* MIPS_CONF5_MSAEN */
- beqz t0, 1f
- nop
- _cfcmsa t0, MSA_CSR
- sw t0, VCPU_MSA_CSR(k1)
- _ctcmsa MSA_CSR, zero
-1:
-#endif
-
- /* Now that the new EBASE has been loaded, unset BEV and KSU_USER */
- .set at
- and v0, v0, ~(ST0_EXL | KSU_USER | ST0_IE)
- or v0, v0, ST0_CU0
- .set noat
- mtc0 v0, CP0_STATUS
- ehb
-
- /* Load up host GP */
- LONG_L gp, VCPU_HOST_GP(k1)
-
- /* Need a stack before we can jump to "C" */
- LONG_L sp, VCPU_HOST_STACK(k1)
-
- /* Saved host state */
- INT_ADDIU sp, sp, -PT_SIZE
-
- /*
- * XXXKYMA do we need to load the host ASID, maybe not because the
- * kernel entries are marked GLOBAL, need to verify
- */
-
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(sp)
- mtc0 k0, CP0_DDATA_LO
-
- /* Restore RDHWR access */
- PTR_LI k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
-
- /* Jump to handler */
-FEXPORT(__kvm_mips_jump_to_handler)
- /*
- * XXXKYMA: not sure if this is safe, how large is the stack??
- * Now jump to the kvm_mips_handle_exit() to see if we can deal
- * with this in the kernel
- */
- PTR_LA t9, kvm_mips_handle_exit
- jalr.hb t9
- INT_ADDIU sp, sp, -CALLFRAME_SIZ /* BD Slot */
-
- /* Return from handler Make sure interrupts are disabled */
- di
- ehb
-
- /*
- * XXXKYMA: k0/k1 could have been blown away if we processed
- * an exception while we were handling the exception from the
- * guest, reload k1
- */
-
- move k1, s1
- INT_ADDIU k1, k1, VCPU_HOST_ARCH
-
- /*
- * Check return value, should tell us if we are returning to the
- * host (handle I/O etc)or resuming the guest
- */
- andi t0, v0, RESUME_HOST
- bnez t0, __kvm_mips_return_to_host
- nop
-
-__kvm_mips_return_to_guest:
- /* Put the saved pointer to vcpu (s1) back into the DDATA_LO Register */
- mtc0 s1, CP0_DDATA_LO
-
- /* Load up the Guest EBASE to minimize the window where BEV is set */
- LONG_L t0, VCPU_GUEST_EBASE(k1)
-
- /* Switch EBASE back to the one used by KVM */
- mfc0 v1, CP0_STATUS
- .set at
- or k0, v1, ST0_BEV
- .set noat
- mtc0 k0, CP0_STATUS
- ehb
- mtc0 t0, CP0_EBASE
-
- /* Setup status register for running guest in UM */
- .set at
- or v1, v1, (ST0_EXL | KSU_USER | ST0_IE)
- and v1, v1, ~(ST0_CU0 | ST0_MX)
- .set noat
- mtc0 v1, CP0_STATUS
- ehb
-
- /* Set Guest EPC */
- LONG_L t0, VCPU_PC(k1)
- mtc0 t0, CP0_EPC
-
- /* Set the ASID for the Guest Kernel */
- PTR_L t0, VCPU_COP0(k1)
- LONG_L t0, COP0_STATUS(t0)
- andi t0, KSU_USER | ST0_ERL | ST0_EXL
- xori t0, KSU_USER
- bnez t0, 1f /* If kernel */
- INT_ADDIU t1, k1, VCPU_GUEST_KERNEL_ASID /* (BD) */
- INT_ADDIU t1, k1, VCPU_GUEST_USER_ASID /* else user */
-1:
- /* t1: contains the base of the ASID array, need to get the cpu id */
- LONG_L t2, TI_CPU($28) /* smp_processor_id */
- INT_SLL t2, t2, 2 /* x4 */
- REG_ADDU t3, t1, t2
- LONG_L k0, (t3)
- andi k0, k0, 0xff
- mtc0 k0, CP0_ENTRYHI
- ehb
-
- /* Disable RDHWR access */
- mtc0 zero, CP0_HWRENA
-
- /* load the guest context from VCPU and return */
- LONG_L $0, VCPU_R0(k1)
- LONG_L $1, VCPU_R1(k1)
- LONG_L $2, VCPU_R2(k1)
- LONG_L $3, VCPU_R3(k1)
- LONG_L $4, VCPU_R4(k1)
- LONG_L $5, VCPU_R5(k1)
- LONG_L $6, VCPU_R6(k1)
- LONG_L $7, VCPU_R7(k1)
- LONG_L $8, VCPU_R8(k1)
- LONG_L $9, VCPU_R9(k1)
- LONG_L $10, VCPU_R10(k1)
- LONG_L $11, VCPU_R11(k1)
- LONG_L $12, VCPU_R12(k1)
- LONG_L $13, VCPU_R13(k1)
- LONG_L $14, VCPU_R14(k1)
- LONG_L $15, VCPU_R15(k1)
- LONG_L $16, VCPU_R16(k1)
- LONG_L $17, VCPU_R17(k1)
- LONG_L $18, VCPU_R18(k1)
- LONG_L $19, VCPU_R19(k1)
- LONG_L $20, VCPU_R20(k1)
- LONG_L $21, VCPU_R21(k1)
- LONG_L $22, VCPU_R22(k1)
- LONG_L $23, VCPU_R23(k1)
- LONG_L $24, VCPU_R24(k1)
- LONG_L $25, VCPU_R25(k1)
-
- /* $/k1 loaded later */
- LONG_L $28, VCPU_R28(k1)
- LONG_L $29, VCPU_R29(k1)
- LONG_L $30, VCPU_R30(k1)
- LONG_L $31, VCPU_R31(k1)
-
-FEXPORT(__kvm_mips_skip_guest_restore)
- LONG_L k0, VCPU_HI(k1)
- mthi k0
-
- LONG_L k0, VCPU_LO(k1)
- mtlo k0
-
- LONG_L k0, VCPU_R26(k1)
- LONG_L k1, VCPU_R27(k1)
-
- eret
-
-__kvm_mips_return_to_host:
- /* EBASE is already pointing to Linux */
- LONG_L k1, VCPU_HOST_STACK(k1)
- INT_ADDIU k1,k1, -PT_SIZE
-
- /* Restore host DDATA_LO */
- LONG_L k0, PT_HOST_USERLOCAL(k1)
- mtc0 k0, CP0_DDATA_LO
-
- /* Restore host ASID */
- LONG_L k0, PT_HOST_ASID(sp)
- andi k0, 0xff
- mtc0 k0,CP0_ENTRYHI
- ehb
-
- /* Load context saved on the host stack */
- LONG_L $0, PT_R0(k1)
- LONG_L $1, PT_R1(k1)
-
- /*
- * r2/v0 is the return code, shift it down by 2 (arithmetic)
- * to recover the err code
- */
- INT_SRA k0, v0, 2
- move $2, k0
-
- LONG_L $3, PT_R3(k1)
- LONG_L $4, PT_R4(k1)
- LONG_L $5, PT_R5(k1)
- LONG_L $6, PT_R6(k1)
- LONG_L $7, PT_R7(k1)
- LONG_L $8, PT_R8(k1)
- LONG_L $9, PT_R9(k1)
- LONG_L $10, PT_R10(k1)
- LONG_L $11, PT_R11(k1)
- LONG_L $12, PT_R12(k1)
- LONG_L $13, PT_R13(k1)
- LONG_L $14, PT_R14(k1)
- LONG_L $15, PT_R15(k1)
- LONG_L $16, PT_R16(k1)
- LONG_L $17, PT_R17(k1)
- LONG_L $18, PT_R18(k1)
- LONG_L $19, PT_R19(k1)
- LONG_L $20, PT_R20(k1)
- LONG_L $21, PT_R21(k1)
- LONG_L $22, PT_R22(k1)
- LONG_L $23, PT_R23(k1)
- LONG_L $24, PT_R24(k1)
- LONG_L $25, PT_R25(k1)
-
- /* Host k0/k1 were not saved */
-
- LONG_L $28, PT_R28(k1)
- LONG_L $29, PT_R29(k1)
- LONG_L $30, PT_R30(k1)
-
- LONG_L k0, PT_HI(k1)
- mthi k0
-
- LONG_L k0, PT_LO(k1)
- mtlo k0
-
- /* Restore RDHWR access */
- PTR_LI k0, 0x2000000F
- mtc0 k0, CP0_HWRENA
-
- /* Restore RA, which is the address we will return to */
- LONG_L ra, PT_R31(k1)
- j ra
- nop
-
-VECTOR_END(MIPSX(GuestExceptionEnd))
-.end MIPSX(GuestException)
-
-MIPSX(exceptions):
- ####
- ##### The exception handlers.
- #####
- .word _C_LABEL(MIPSX(GuestException)) # 0
- .word _C_LABEL(MIPSX(GuestException)) # 1
- .word _C_LABEL(MIPSX(GuestException)) # 2
- .word _C_LABEL(MIPSX(GuestException)) # 3
- .word _C_LABEL(MIPSX(GuestException)) # 4
- .word _C_LABEL(MIPSX(GuestException)) # 5
- .word _C_LABEL(MIPSX(GuestException)) # 6
- .word _C_LABEL(MIPSX(GuestException)) # 7
- .word _C_LABEL(MIPSX(GuestException)) # 8
- .word _C_LABEL(MIPSX(GuestException)) # 9
- .word _C_LABEL(MIPSX(GuestException)) # 10
- .word _C_LABEL(MIPSX(GuestException)) # 11
- .word _C_LABEL(MIPSX(GuestException)) # 12
- .word _C_LABEL(MIPSX(GuestException)) # 13
- .word _C_LABEL(MIPSX(GuestException)) # 14
- .word _C_LABEL(MIPSX(GuestException)) # 15
- .word _C_LABEL(MIPSX(GuestException)) # 16
- .word _C_LABEL(MIPSX(GuestException)) # 17
- .word _C_LABEL(MIPSX(GuestException)) # 18
- .word _C_LABEL(MIPSX(GuestException)) # 19
- .word _C_LABEL(MIPSX(GuestException)) # 20
- .word _C_LABEL(MIPSX(GuestException)) # 21
- .word _C_LABEL(MIPSX(GuestException)) # 22
- .word _C_LABEL(MIPSX(GuestException)) # 23
- .word _C_LABEL(MIPSX(GuestException)) # 24
- .word _C_LABEL(MIPSX(GuestException)) # 25
- .word _C_LABEL(MIPSX(GuestException)) # 26
- .word _C_LABEL(MIPSX(GuestException)) # 27
- .word _C_LABEL(MIPSX(GuestException)) # 28
- .word _C_LABEL(MIPSX(GuestException)) # 29
- .word _C_LABEL(MIPSX(GuestException)) # 30
- .word _C_LABEL(MIPSX(GuestException)) # 31
diff --git a/arch/mips/kvm/mips.c b/arch/mips/kvm/mips.c
index 70ef1a43c114..a6ea084b4d9d 100644
--- a/arch/mips/kvm/mips.c
+++ b/arch/mips/kvm/mips.c
@@ -9,6 +9,7 @@
* Authors: Sanjay Lal <sanjayl@kymasys.com>
*/
+#include <linux/bitops.h>
#include <linux/errno.h>
#include <linux/err.h>
#include <linux/kdebug.h>
@@ -56,6 +57,7 @@ struct kvm_stats_debugfs_item debugfs_entries[] = {
{ "flush_dcache", VCPU_STAT(flush_dcache_exits), KVM_STAT_VCPU },
{ "halt_successful_poll", VCPU_STAT(halt_successful_poll), KVM_STAT_VCPU },
{ "halt_attempted_poll", VCPU_STAT(halt_attempted_poll), KVM_STAT_VCPU },
+ { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid), KVM_STAT_VCPU },
{ "halt_wakeup", VCPU_STAT(halt_wakeup), KVM_STAT_VCPU },
{NULL}
};
@@ -146,7 +148,7 @@ void kvm_mips_free_vcpus(struct kvm *kvm)
/* Put the pages we reserved for the guest pmap */
for (i = 0; i < kvm->arch.guest_pmap_npages; i++) {
if (kvm->arch.guest_pmap[i] != KVM_INVALID_PAGE)
- kvm_mips_release_pfn_clean(kvm->arch.guest_pmap[i]);
+ kvm_release_pfn_clean(kvm->arch.guest_pmap[i]);
}
kfree(kvm->arch.guest_pmap);
@@ -243,10 +245,27 @@ void kvm_arch_commit_memory_region(struct kvm *kvm,
}
}
+static inline void dump_handler(const char *symbol, void *start, void *end)
+{
+ u32 *p;
+
+ pr_debug("LEAF(%s)\n", symbol);
+
+ pr_debug("\t.set push\n");
+ pr_debug("\t.set noreorder\n");
+
+ for (p = start; p < (u32 *)end; ++p)
+ pr_debug("\t.word\t0x%08x\t\t# %p\n", *p, p);
+
+ pr_debug("\t.set\tpop\n");
+
+ pr_debug("\tEND(%s)\n", symbol);
+}
+
struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
{
- int err, size, offset;
- void *gebase;
+ int err, size;
+ void *gebase, *p, *handler;
int i;
struct kvm_vcpu *vcpu = kzalloc(sizeof(struct kvm_vcpu), GFP_KERNEL);
@@ -272,9 +291,6 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
else
size = 0x4000;
- /* Save Linux EBASE */
- vcpu->arch.host_ebase = (void *)read_c0_ebase();
-
gebase = kzalloc(ALIGN(size, PAGE_SIZE), GFP_KERNEL);
if (!gebase) {
@@ -284,35 +300,53 @@ struct kvm_vcpu *kvm_arch_vcpu_create(struct kvm *kvm, unsigned int id)
kvm_debug("Allocated %d bytes for KVM Exception Handlers @ %p\n",
ALIGN(size, PAGE_SIZE), gebase);
+ /*
+ * Check new ebase actually fits in CP0_EBase. The lack of a write gate
+ * limits us to the low 512MB of physical address space. If the memory
+ * we allocate is out of range, just give up now.
+ */
+ if (!cpu_has_ebase_wg && virt_to_phys(gebase) >= 0x20000000) {
+ kvm_err("CP0_EBase.WG required for guest exception base %pK\n",
+ gebase);
+ err = -ENOMEM;
+ goto out_free_gebase;
+ }
+
/* Save new ebase */
vcpu->arch.guest_ebase = gebase;
- /* Copy L1 Guest Exception handler to correct offset */
+ /* Build guest exception vectors dynamically in unmapped memory */
+ handler = gebase + 0x2000;
/* TLB Refill, EXL = 0 */
- memcpy(gebase, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase, handler);
/* General Exception Entry point */
- memcpy(gebase + 0x180, mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x180, handler);
/* For vectored interrupts poke the exception code @ all offsets 0-7 */
for (i = 0; i < 8; i++) {
kvm_debug("L1 Vectored handler @ %p\n",
gebase + 0x200 + (i * VECTORSPACING));
- memcpy(gebase + 0x200 + (i * VECTORSPACING), mips32_exception,
- mips32_exceptionEnd - mips32_exception);
+ kvm_mips_build_exception(gebase + 0x200 + i * VECTORSPACING,
+ handler);
}
- /* General handler, relocate to unmapped space for sanity's sake */
- offset = 0x2000;
- kvm_debug("Installing KVM Exception handlers @ %p, %#x bytes\n",
- gebase + offset,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* General exit handler */
+ p = handler;
+ p = kvm_mips_build_exit(p);
- memcpy(gebase + offset, mips32_GuestException,
- mips32_GuestExceptionEnd - mips32_GuestException);
+ /* Guest entry routine */
+ vcpu->arch.vcpu_run = p;
+ p = kvm_mips_build_vcpu_run(p);
+
+ /* Dump the generated code */
+ pr_debug("#include <asm/asm.h>\n");
+ pr_debug("#include <asm/regdef.h>\n");
+ pr_debug("\n");
+ dump_handler("kvm_vcpu_run", vcpu->arch.vcpu_run, p);
+ dump_handler("kvm_gen_exc", gebase + 0x180, gebase + 0x200);
+ dump_handler("kvm_exit", gebase + 0x2000, vcpu->arch.vcpu_run);
/* Invalidate the icache for these ranges */
local_flush_icache_range((unsigned long)gebase,
@@ -398,17 +432,19 @@ int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run)
kvm_mips_deliver_interrupts(vcpu,
kvm_read_c0_guest_cause(vcpu->arch.cop0));
- __kvm_guest_enter();
+ guest_enter_irqoff();
/* Disable hardware page table walking while in guest */
htw_stop();
- r = __kvm_mips_vcpu_run(run, vcpu);
+ trace_kvm_enter(vcpu);
+ r = vcpu->arch.vcpu_run(run, vcpu);
+ trace_kvm_out(vcpu);
/* Re-enable HTW before enabling interrupts */
htw_start();
- __kvm_guest_exit();
+ guest_exit_irqoff();
local_irq_enable();
if (vcpu->sigset_active)
@@ -497,8 +533,10 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_R30,
KVM_REG_MIPS_R31,
+#ifndef CONFIG_CPU_MIPSR6
KVM_REG_MIPS_HI,
KVM_REG_MIPS_LO,
+#endif
KVM_REG_MIPS_PC,
KVM_REG_MIPS_CP0_INDEX,
@@ -529,6 +567,104 @@ static u64 kvm_mips_get_one_regs[] = {
KVM_REG_MIPS_COUNT_HZ,
};
+static u64 kvm_mips_get_one_regs_fpu[] = {
+ KVM_REG_MIPS_FCR_IR,
+ KVM_REG_MIPS_FCR_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_msa[] = {
+ KVM_REG_MIPS_MSA_IR,
+ KVM_REG_MIPS_MSA_CSR,
+};
+
+static u64 kvm_mips_get_one_regs_kscratch[] = {
+ KVM_REG_MIPS_CP0_KSCRATCH1,
+ KVM_REG_MIPS_CP0_KSCRATCH2,
+ KVM_REG_MIPS_CP0_KSCRATCH3,
+ KVM_REG_MIPS_CP0_KSCRATCH4,
+ KVM_REG_MIPS_CP0_KSCRATCH5,
+ KVM_REG_MIPS_CP0_KSCRATCH6,
+};
+
+static unsigned long kvm_mips_num_regs(struct kvm_vcpu *vcpu)
+{
+ unsigned long ret;
+
+ ret = ARRAY_SIZE(kvm_mips_get_one_regs);
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_fpu) + 48;
+ /* odd doubles */
+ if (boot_cpu_data.fpu_id & MIPS_FPIR_F64)
+ ret += 16;
+ }
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch))
+ ret += ARRAY_SIZE(kvm_mips_get_one_regs_msa) + 32;
+ ret += __arch_hweight8(vcpu->arch.kscratch_enabled);
+ ret += kvm_mips_callbacks->num_regs(vcpu);
+
+ return ret;
+}
+
+static int kvm_mips_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
+{
+ u64 index;
+ unsigned int i;
+
+ if (copy_to_user(indices, kvm_mips_get_one_regs,
+ sizeof(kvm_mips_get_one_regs)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs);
+
+ if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_fpu,
+ sizeof(kvm_mips_get_one_regs_fpu)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_fpu);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_FPR_32(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+
+ /* skip odd doubles if no F64 */
+ if (i & 1 && !(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
+ continue;
+
+ index = KVM_REG_MIPS_FPR_64(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ if (kvm_mips_guest_can_have_msa(&vcpu->arch)) {
+ if (copy_to_user(indices, kvm_mips_get_one_regs_msa,
+ sizeof(kvm_mips_get_one_regs_msa)))
+ return -EFAULT;
+ indices += ARRAY_SIZE(kvm_mips_get_one_regs_msa);
+
+ for (i = 0; i < 32; ++i) {
+ index = KVM_REG_MIPS_VEC_128(i);
+ if (copy_to_user(indices, &index, sizeof(index)))
+ return -EFAULT;
+ ++indices;
+ }
+ }
+
+ for (i = 0; i < 6; ++i) {
+ if (!(vcpu->arch.kscratch_enabled & BIT(i + 2)))
+ continue;
+
+ if (copy_to_user(indices, &kvm_mips_get_one_regs_kscratch[i],
+ sizeof(kvm_mips_get_one_regs_kscratch[i])))
+ return -EFAULT;
+ ++indices;
+ }
+
+ return kvm_mips_callbacks->copy_reg_indices(vcpu, indices);
+}
+
static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg)
{
@@ -544,12 +680,14 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R0 ... KVM_REG_MIPS_R31:
v = (long)vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0];
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
v = (long)vcpu->arch.hi;
break;
case KVM_REG_MIPS_LO:
v = (long)vcpu->arch.lo;
break;
+#endif
case KVM_REG_MIPS_PC:
v = (long)vcpu->arch.pc;
break;
@@ -678,17 +816,37 @@ static int kvm_mips_get_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
v = (long)kvm_read_c0_guest_errorepc(cop0);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ v = (long)kvm_read_c0_guest_kscratch1(cop0);
+ break;
+ case 3:
+ v = (long)kvm_read_c0_guest_kscratch2(cop0);
+ break;
+ case 4:
+ v = (long)kvm_read_c0_guest_kscratch3(cop0);
+ break;
+ case 5:
+ v = (long)kvm_read_c0_guest_kscratch4(cop0);
+ break;
+ case 6:
+ v = (long)kvm_read_c0_guest_kscratch5(cop0);
+ break;
+ case 7:
+ v = (long)kvm_read_c0_guest_kscratch6(cop0);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
+ default:
ret = kvm_mips_callbacks->get_one_reg(vcpu, reg, &v);
if (ret)
return ret;
break;
- default:
- return -EINVAL;
}
if ((reg->id & KVM_REG_SIZE_MASK) == KVM_REG_SIZE_U64) {
u64 __user *uaddr64 = (u64 __user *)(long)reg->addr;
@@ -745,12 +903,14 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_R1 ... KVM_REG_MIPS_R31:
vcpu->arch.gprs[reg->id - KVM_REG_MIPS_R0] = v;
break;
+#ifndef CONFIG_CPU_MIPSR6
case KVM_REG_MIPS_HI:
vcpu->arch.hi = v;
break;
case KVM_REG_MIPS_LO:
vcpu->arch.lo = v;
break;
+#endif
case KVM_REG_MIPS_PC:
vcpu->arch.pc = v;
break;
@@ -849,22 +1009,34 @@ static int kvm_mips_set_reg(struct kvm_vcpu *vcpu,
case KVM_REG_MIPS_CP0_ERROREPC:
kvm_write_c0_guest_errorepc(cop0, v);
break;
+ case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
+ idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
+ if (!(vcpu->arch.kscratch_enabled & BIT(idx)))
+ return -EINVAL;
+ switch (idx) {
+ case 2:
+ kvm_write_c0_guest_kscratch1(cop0, v);
+ break;
+ case 3:
+ kvm_write_c0_guest_kscratch2(cop0, v);
+ break;
+ case 4:
+ kvm_write_c0_guest_kscratch3(cop0, v);
+ break;
+ case 5:
+ kvm_write_c0_guest_kscratch4(cop0, v);
+ break;
+ case 6:
+ kvm_write_c0_guest_kscratch5(cop0, v);
+ break;
+ case 7:
+ kvm_write_c0_guest_kscratch6(cop0, v);
+ break;
+ }
+ break;
/* registers to be handled specially */
- case KVM_REG_MIPS_CP0_COUNT:
- case KVM_REG_MIPS_CP0_COMPARE:
- case KVM_REG_MIPS_CP0_CAUSE:
- case KVM_REG_MIPS_CP0_CONFIG:
- case KVM_REG_MIPS_CP0_CONFIG1:
- case KVM_REG_MIPS_CP0_CONFIG2:
- case KVM_REG_MIPS_CP0_CONFIG3:
- case KVM_REG_MIPS_CP0_CONFIG4:
- case KVM_REG_MIPS_CP0_CONFIG5:
- case KVM_REG_MIPS_COUNT_CTL:
- case KVM_REG_MIPS_COUNT_RESUME:
- case KVM_REG_MIPS_COUNT_HZ:
- return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
default:
- return -EINVAL;
+ return kvm_mips_callbacks->set_one_reg(vcpu, reg, v);
}
return 0;
}
@@ -917,23 +1089,18 @@ long kvm_arch_vcpu_ioctl(struct file *filp, unsigned int ioctl,
}
case KVM_GET_REG_LIST: {
struct kvm_reg_list __user *user_list = argp;
- u64 __user *reg_dest;
struct kvm_reg_list reg_list;
unsigned n;
if (copy_from_user(&reg_list, user_list, sizeof(reg_list)))
return -EFAULT;
n = reg_list.n;
- reg_list.n = ARRAY_SIZE(kvm_mips_get_one_regs);
+ reg_list.n = kvm_mips_num_regs(vcpu);
if (copy_to_user(user_list, &reg_list, sizeof(reg_list)))
return -EFAULT;
if (n < reg_list.n)
return -E2BIG;
- reg_dest = user_list->reg;
- if (copy_to_user(reg_dest, kvm_mips_get_one_regs,
- sizeof(kvm_mips_get_one_regs)))
- return -EFAULT;
- return 0;
+ return kvm_mips_copy_reg_indices(vcpu, user_list->reg);
}
case KVM_NMI:
/* Treat the NMI as a CPU reset */
@@ -1079,7 +1246,8 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
r = KVM_COALESCED_MMIO_PAGE_OFFSET;
break;
case KVM_CAP_MIPS_FPU:
- r = !!cpu_has_fpu;
+ /* We don't handle systems with inconsistent cpu_has_fpu */
+ r = !!raw_cpu_has_fpu;
break;
case KVM_CAP_MIPS_MSA:
/*
@@ -1211,7 +1379,7 @@ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
static void kvm_mips_set_c0_status(void)
{
- uint32_t status = read_c0_status();
+ u32 status = read_c0_status();
if (cpu_has_dsp)
status |= (ST0_MX);
@@ -1225,9 +1393,9 @@ static void kvm_mips_set_c0_status(void)
*/
int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
- uint32_t cause = vcpu->arch.host_cp0_cause;
- uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
+ u32 exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -1249,6 +1417,7 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("kvm_mips_handle_exit: cause: %#x, PC: %p, kvm_run: %p, kvm_vcpu: %p\n",
cause, opc, run, vcpu);
+ trace_kvm_exit(vcpu, exccode);
/*
* Do a privilege check, if in UM most of these exit conditions end up
@@ -1268,7 +1437,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("[%d]EXCCODE_INT @ %p\n", vcpu->vcpu_id, opc);
++vcpu->stat.int_exits;
- trace_kvm_exit(vcpu, INT_EXITS);
if (need_resched())
cond_resched();
@@ -1280,7 +1448,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
kvm_debug("EXCCODE_CPU: @ PC: %p\n", opc);
++vcpu->stat.cop_unusable_exits;
- trace_kvm_exit(vcpu, COP_UNUSABLE_EXITS);
ret = kvm_mips_callbacks->handle_cop_unusable(vcpu);
/* XXXKYMA: Might need to return to user space */
if (run->exit_reason == KVM_EXIT_IRQ_WINDOW_OPEN)
@@ -1289,7 +1456,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
case EXCCODE_MOD:
++vcpu->stat.tlbmod_exits;
- trace_kvm_exit(vcpu, TLBMOD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_mod(vcpu);
break;
@@ -1299,7 +1465,6 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
badvaddr);
++vcpu->stat.tlbmiss_st_exits;
- trace_kvm_exit(vcpu, TLBMISS_ST_EXITS);
ret = kvm_mips_callbacks->handle_tlb_st_miss(vcpu);
break;
@@ -1308,61 +1473,51 @@ int kvm_mips_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
cause, opc, badvaddr);
++vcpu->stat.tlbmiss_ld_exits;
- trace_kvm_exit(vcpu, TLBMISS_LD_EXITS);
ret = kvm_mips_callbacks->handle_tlb_ld_miss(vcpu);
break;
case EXCCODE_ADES:
++vcpu->stat.addrerr_st_exits;
- trace_kvm_exit(vcpu, ADDRERR_ST_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_st(vcpu);
break;
case EXCCODE_ADEL:
++vcpu->stat.addrerr_ld_exits;
- trace_kvm_exit(vcpu, ADDRERR_LD_EXITS);
ret = kvm_mips_callbacks->handle_addr_err_ld(vcpu);
break;
case EXCCODE_SYS:
++vcpu->stat.syscall_exits;
- trace_kvm_exit(vcpu, SYSCALL_EXITS);
ret = kvm_mips_callbacks->handle_syscall(vcpu);
break;
case EXCCODE_RI:
++vcpu->stat.resvd_inst_exits;
- trace_kvm_exit(vcpu, RESVD_INST_EXITS);
ret = kvm_mips_callbacks->handle_res_inst(vcpu);
break;
case EXCCODE_BP:
++vcpu->stat.break_inst_exits;
- trace_kvm_exit(vcpu, BREAK_INST_EXITS);
ret = kvm_mips_callbacks->handle_break(vcpu);
break;
case EXCCODE_TR:
++vcpu->stat.trap_inst_exits;
- trace_kvm_exit(vcpu, TRAP_INST_EXITS);
ret = kvm_mips_callbacks->handle_trap(vcpu);
break;
case EXCCODE_MSAFPE:
++vcpu->stat.msa_fpe_exits;
- trace_kvm_exit(vcpu, MSA_FPE_EXITS);
ret = kvm_mips_callbacks->handle_msa_fpe(vcpu);
break;
case EXCCODE_FPE:
++vcpu->stat.fpe_exits;
- trace_kvm_exit(vcpu, FPE_EXITS);
ret = kvm_mips_callbacks->handle_fpe(vcpu);
break;
case EXCCODE_MSADIS:
++vcpu->stat.msa_disabled_exits;
- trace_kvm_exit(vcpu, MSA_DISABLED_EXITS);
ret = kvm_mips_callbacks->handle_msa_disabled(vcpu);
break;
@@ -1389,11 +1544,13 @@ skip_emul:
run->exit_reason = KVM_EXIT_INTR;
ret = (-EINTR << 2) | RESUME_HOST;
++vcpu->stat.signal_exits;
- trace_kvm_exit(vcpu, SIGNAL_EXITS);
+ trace_kvm_exit(vcpu, KVM_TRACE_EXIT_SIGNAL);
}
}
if (ret == RESUME_GUEST) {
+ trace_kvm_reenter(vcpu);
+
/*
* If FPU / MSA are enabled (i.e. the guest's FPU / MSA context
* is live), restore FCR31 / MSACSR.
@@ -1439,7 +1596,7 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
* not to clobber the status register directly via the commpage.
*/
if (cpu_has_msa && sr & ST0_CU1 && !(sr & ST0_FR) &&
- vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA)
+ vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
kvm_lose_fpu(vcpu);
/*
@@ -1454,9 +1611,12 @@ void kvm_own_fpu(struct kvm_vcpu *vcpu)
enable_fpu_hazard();
/* If guest FPU state not active, restore it now */
- if (!(vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)) {
+ if (!(vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
__kvm_restore_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_FPU);
+ } else {
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_FPU);
}
preempt_enable();
@@ -1483,8 +1643,8 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
* interacts with MSA state, so play it safe and save it first.
*/
if (!(sr & ST0_FR) &&
- (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU |
- KVM_MIPS_FPU_MSA)) == KVM_MIPS_FPU_FPU)
+ (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU |
+ KVM_MIPS_AUX_MSA)) == KVM_MIPS_AUX_FPU)
kvm_lose_fpu(vcpu);
change_c0_status(ST0_CU1 | ST0_FR, sr);
@@ -1498,22 +1658,26 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
- switch (vcpu->arch.fpu_inuse & (KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA)) {
- case KVM_MIPS_FPU_FPU:
+ switch (vcpu->arch.aux_inuse & (KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA)) {
+ case KVM_MIPS_AUX_FPU:
/*
* Guest FPU state already loaded, only restore upper MSA state
*/
__kvm_restore_msa_upper(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE, KVM_TRACE_AUX_MSA);
break;
case 0:
/* Neither FPU or MSA already active, restore full MSA state */
__kvm_restore_msa(&vcpu->arch);
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_MSA;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_MSA;
if (kvm_mips_guest_has_fpu(&vcpu->arch))
- vcpu->arch.fpu_inuse |= KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse |= KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_RESTORE,
+ KVM_TRACE_AUX_FPU_MSA);
break;
default:
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_ENABLE, KVM_TRACE_AUX_MSA);
break;
}
@@ -1525,13 +1689,15 @@ void kvm_own_msa(struct kvm_vcpu *vcpu)
void kvm_drop_fpu(struct kvm_vcpu *vcpu)
{
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
disable_msa();
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_MSA;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_MSA);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_MSA;
}
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_DISCARD, KVM_TRACE_AUX_FPU);
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
}
preempt_enable();
}
@@ -1547,26 +1713,31 @@ void kvm_lose_fpu(struct kvm_vcpu *vcpu)
*/
preempt_disable();
- if (cpu_has_msa && vcpu->arch.fpu_inuse & KVM_MIPS_FPU_MSA) {
+ if (cpu_has_msa && vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
set_c0_config5(MIPS_CONF5_MSAEN);
enable_fpu_hazard();
__kvm_save_msa(&vcpu->arch);
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU_MSA);
/* Disable MSA & FPU */
disable_msa();
- if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
+ if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
clear_c0_status(ST0_CU1 | ST0_FR);
- vcpu->arch.fpu_inuse &= ~(KVM_MIPS_FPU_FPU | KVM_MIPS_FPU_MSA);
- } else if (vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU) {
+ disable_fpu_hazard();
+ }
+ vcpu->arch.aux_inuse &= ~(KVM_MIPS_AUX_FPU | KVM_MIPS_AUX_MSA);
+ } else if (vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) {
set_c0_status(ST0_CU1);
enable_fpu_hazard();
__kvm_save_fpu(&vcpu->arch);
- vcpu->arch.fpu_inuse &= ~KVM_MIPS_FPU_FPU;
+ vcpu->arch.aux_inuse &= ~KVM_MIPS_AUX_FPU;
+ trace_kvm_aux(vcpu, KVM_TRACE_AUX_SAVE, KVM_TRACE_AUX_FPU);
/* Disable FPU */
clear_c0_status(ST0_CU1 | ST0_FR);
+ disable_fpu_hazard();
}
preempt_enable();
}
@@ -1624,6 +1795,10 @@ static int __init kvm_mips_init(void)
{
int ret;
+ ret = kvm_mips_entry_setup();
+ if (ret)
+ return ret;
+
ret = kvm_init(NULL, sizeof(struct kvm_vcpu), 0, THIS_MODULE);
if (ret)
@@ -1631,18 +1806,6 @@ static int __init kvm_mips_init(void)
register_die_notifier(&kvm_mips_csr_die_notifier);
- /*
- * On MIPS, kernel modules are executed from "mapped space", which
- * requires TLBs. The TLB handling code is statically linked with
- * the rest of the kernel (tlb.c) to avoid the possibility of
- * double faulting. The issue is that the TLB code references
- * routines that are part of the the KVM module, which are only
- * available once the module is loaded.
- */
- kvm_mips_gfn_to_pfn = gfn_to_pfn;
- kvm_mips_release_pfn_clean = kvm_release_pfn_clean;
- kvm_mips_is_error_pfn = is_error_pfn;
-
return 0;
}
@@ -1650,10 +1813,6 @@ static void __exit kvm_mips_exit(void)
{
kvm_exit();
- kvm_mips_gfn_to_pfn = NULL;
- kvm_mips_release_pfn_clean = NULL;
- kvm_mips_is_error_pfn = NULL;
-
unregister_die_notifier(&kvm_mips_csr_die_notifier);
}
diff --git a/arch/mips/kvm/mmu.c b/arch/mips/kvm/mmu.c
new file mode 100644
index 000000000000..57319ee57c4f
--- /dev/null
+++ b/arch/mips/kvm/mmu.c
@@ -0,0 +1,375 @@
+/*
+ * This file is subject to the terms and conditions of the GNU General Public
+ * License. See the file "COPYING" in the main directory of this archive
+ * for more details.
+ *
+ * KVM/MIPS MMU handling in the KVM module.
+ *
+ * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
+ * Authors: Sanjay Lal <sanjayl@kymasys.com>
+ */
+
+#include <linux/highmem.h>
+#include <linux/kvm_host.h>
+#include <asm/mmu_context.h>
+
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_kernel_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+{
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_user_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
+}
+
+static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
+{
+ int srcu_idx, err = 0;
+ kvm_pfn_t pfn;
+
+ if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
+ return 0;
+
+ srcu_idx = srcu_read_lock(&kvm->srcu);
+ pfn = gfn_to_pfn(kvm, gfn);
+
+ if (is_error_pfn(pfn)) {
+ kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
+ err = -EFAULT;
+ goto out;
+ }
+
+ kvm->arch.guest_pmap[gfn] = pfn;
+out:
+ srcu_read_unlock(&kvm->srcu, srcu_idx);
+ return err;
+}
+
+/* Translate guest KSEG0 addresses to Host PA */
+unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
+ unsigned long gva)
+{
+ gfn_t gfn;
+ unsigned long offset = gva & ~PAGE_MASK;
+ struct kvm *kvm = vcpu->kvm;
+
+ if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
+ kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
+ __builtin_return_address(0), gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
+
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
+ gva);
+ return KVM_INVALID_PAGE;
+ }
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return KVM_INVALID_ADDR;
+
+ return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
+}
+
+/* XXXKYMA: Must be called with interrupts disabled */
+int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
+ struct kvm_vcpu *vcpu)
+{
+ gfn_t gfn;
+ kvm_pfn_t pfn0, pfn1;
+ unsigned long vaddr = 0;
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ const int flush_dcache_mask = 0;
+ int ret;
+
+ if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
+ kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+
+ gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
+ if (gfn >= kvm->arch.guest_pmap_npages) {
+ kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
+ gfn, badvaddr);
+ kvm_mips_dump_host_tlbs();
+ return -1;
+ }
+ vaddr = badvaddr & (PAGE_MASK << 1);
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
+ pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
+
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
+
+ preempt_disable();
+ entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ flush_dcache_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
+ struct kvm_mips_tlb *tlb)
+{
+ unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
+ struct kvm *kvm = vcpu->kvm;
+ kvm_pfn_t pfn0, pfn1;
+ int ret;
+
+ if ((tlb->tlb_hi & VPN2_MASK) == 0) {
+ pfn0 = 0;
+ pfn1 = 0;
+ } else {
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[0])
+ >> PAGE_SHIFT) < 0)
+ return -1;
+
+ if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo[1])
+ >> PAGE_SHIFT) < 0)
+ return -1;
+
+ pfn0 = kvm->arch.guest_pmap[
+ mips3_tlbpfn_to_paddr(tlb->tlb_lo[0]) >> PAGE_SHIFT];
+ pfn1 = kvm->arch.guest_pmap[
+ mips3_tlbpfn_to_paddr(tlb->tlb_lo[1]) >> PAGE_SHIFT];
+ }
+
+ /* Get attributes from the Guest TLB */
+ entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb->tlb_lo[0] & ENTRYLO_D) |
+ (tlb->tlb_lo[0] & ENTRYLO_V);
+ entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ (tlb->tlb_lo[1] & ENTRYLO_D) |
+ (tlb->tlb_lo[1] & ENTRYLO_V);
+
+ kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
+ tlb->tlb_lo[0], tlb->tlb_lo[1]);
+
+ preempt_disable();
+ entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
+ kvm_mips_get_kernel_asid(vcpu) :
+ kvm_mips_get_user_asid(vcpu));
+ ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
+ tlb->tlb_mask);
+ preempt_enable();
+
+ return ret;
+}
+
+void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
+ struct kvm_vcpu *vcpu)
+{
+ unsigned long asid = asid_cache(cpu);
+
+ asid += cpu_asid_inc();
+ if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) {
+ if (cpu_has_vtag_icache)
+ flush_icache_all();
+
+ kvm_local_flush_tlb_all(); /* start new asid cycle */
+
+ if (!asid) /* fix version if needed */
+ asid = asid_first_version(cpu);
+ }
+
+ cpu_context(cpu, mm) = asid_cache(cpu) = asid;
+}
+
+/**
+ * kvm_mips_migrate_count() - Migrate timer.
+ * @vcpu: Virtual CPU.
+ *
+ * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
+ * if it was running prior to being cancelled.
+ *
+ * Must be called when the VCPU is migrated to a different CPU to ensure that
+ * timer expiry during guest execution interrupts the guest and causes the
+ * interrupt to be delivered in a timely manner.
+ */
+static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
+{
+ if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
+ hrtimer_restart(&vcpu->arch.comparecount_timer);
+}
+
+/* Restore ASID once we are scheduled back after preemption */
+void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
+{
+ unsigned long asid_mask = cpu_asid_mask(&cpu_data[cpu]);
+ unsigned long flags;
+ int newasid = 0;
+
+ kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
+
+ /* Allocate new kernel and user ASIDs if needed */
+
+ local_irq_save(flags);
+
+ if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
+ asid_version_mask(cpu)) {
+ kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
+ vcpu->arch.guest_kernel_asid[cpu] =
+ vcpu->arch.guest_kernel_mm.context.asid[cpu];
+ kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
+ vcpu->arch.guest_user_asid[cpu] =
+ vcpu->arch.guest_user_mm.context.asid[cpu];
+ newasid++;
+
+ kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
+ cpu_context(cpu, current->mm));
+ kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
+ cpu, vcpu->arch.guest_kernel_asid[cpu]);
+ kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
+ vcpu->arch.guest_user_asid[cpu]);
+ }
+
+ if (vcpu->arch.last_sched_cpu != cpu) {
+ kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
+ vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
+ /*
+ * Migrate the timer interrupt to the current CPU so that it
+ * always interrupts the guest and synchronously triggers a
+ * guest timer interrupt.
+ */
+ kvm_mips_migrate_count(vcpu);
+ }
+
+ if (!newasid) {
+ /*
+ * If we preempted while the guest was executing, then reload
+ * the pre-empted ASID
+ */
+ if (current->flags & PF_VCPU) {
+ write_c0_entryhi(vcpu->arch.
+ preempt_entryhi & asid_mask);
+ ehb();
+ }
+ } else {
+ /* New ASIDs were allocated for the VM */
+
+ /*
+ * Were we in guest context? If so then the pre-empted ASID is
+ * no longer valid, we need to set it to what it should be based
+ * on the mode of the Guest (Kernel/User)
+ */
+ if (current->flags & PF_VCPU) {
+ if (KVM_GUEST_KERNEL_MODE(vcpu))
+ write_c0_entryhi(vcpu->arch.
+ guest_kernel_asid[cpu] &
+ asid_mask);
+ else
+ write_c0_entryhi(vcpu->arch.
+ guest_user_asid[cpu] &
+ asid_mask);
+ ehb();
+ }
+ }
+
+ /* restore guest state to registers */
+ kvm_mips_callbacks->vcpu_set_regs(vcpu);
+
+ local_irq_restore(flags);
+
+}
+
+/* ASID can change if another task is scheduled during preemption */
+void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
+{
+ unsigned long flags;
+ int cpu;
+
+ local_irq_save(flags);
+
+ cpu = smp_processor_id();
+
+ vcpu->arch.preempt_entryhi = read_c0_entryhi();
+ vcpu->arch.last_sched_cpu = cpu;
+
+ /* save guest state in registers */
+ kvm_mips_callbacks->vcpu_get_regs(vcpu);
+
+ if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
+ asid_version_mask(cpu))) {
+ kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
+ cpu_context(cpu, current->mm));
+ drop_mmu_context(current->mm, cpu);
+ }
+ write_c0_entryhi(cpu_asid(cpu, current->mm));
+ ehb();
+
+ local_irq_restore(flags);
+}
+
+u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu)
+{
+ struct mips_coproc *cop0 = vcpu->arch.cop0;
+ unsigned long paddr, flags, vpn2, asid;
+ unsigned long va = (unsigned long)opc;
+ void *vaddr;
+ u32 inst;
+ int index;
+
+ if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 ||
+ KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
+ local_irq_save(flags);
+ index = kvm_mips_host_tlb_lookup(vcpu, va);
+ if (index >= 0) {
+ inst = *(opc);
+ } else {
+ vpn2 = va & VPN2_MASK;
+ asid = kvm_read_c0_guest_entryhi(cop0) &
+ KVM_ENTRYHI_ASID;
+ index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
+ if (index < 0) {
+ kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
+ __func__, opc, vcpu, read_c0_entryhi());
+ kvm_mips_dump_host_tlbs();
+ kvm_mips_dump_guest_tlbs(vcpu);
+ local_irq_restore(flags);
+ return KVM_INVALID_INST;
+ }
+ kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
+ &vcpu->arch.
+ guest_tlb[index]);
+ inst = *(opc);
+ }
+ local_irq_restore(flags);
+ } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
+ paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va);
+ vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr)));
+ vaddr += paddr & ~PAGE_MASK;
+ inst = *(u32 *)vaddr;
+ kunmap_atomic(vaddr);
+ } else {
+ kvm_err("%s: illegal address: %p\n", __func__, opc);
+ return KVM_INVALID_INST;
+ }
+
+ return inst;
+}
diff --git a/arch/mips/kvm/stats.c b/arch/mips/kvm/stats.c
index 888bb67070ac..53f851a61554 100644
--- a/arch/mips/kvm/stats.c
+++ b/arch/mips/kvm/stats.c
@@ -11,27 +11,6 @@
#include <linux/kvm_host.h>
-char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES] = {
- "WAIT",
- "CACHE",
- "Signal",
- "Interrupt",
- "COP0/1 Unusable",
- "TLB Mod",
- "TLB Miss (LD)",
- "TLB Miss (ST)",
- "Address Err (ST)",
- "Address Error (LD)",
- "System Call",
- "Reserved Inst",
- "Break Inst",
- "Trap Inst",
- "MSA FPE",
- "FPE",
- "MSA Disabled",
- "D-Cache Flushes",
-};
-
char *kvm_cop0_str[N_MIPS_COPROC_REGS] = {
"Index",
"Random",
diff --git a/arch/mips/kvm/tlb.c b/arch/mips/kvm/tlb.c
index a08c43946247..254377d8e0b9 100644
--- a/arch/mips/kvm/tlb.c
+++ b/arch/mips/kvm/tlb.c
@@ -14,7 +14,7 @@
#include <linux/smp.h>
#include <linux/mm.h>
#include <linux/delay.h>
-#include <linux/module.h>
+#include <linux/export.h>
#include <linux/kvm_host.h>
#include <linux/srcu.h>
@@ -24,6 +24,7 @@
#include <asm/pgtable.h>
#include <asm/cacheflush.h>
#include <asm/tlb.h>
+#include <asm/tlbdebug.h>
#undef CONFIG_MIPS_MT
#include <asm/r4kcache.h>
@@ -32,32 +33,26 @@
#define KVM_GUEST_PC_TLB 0
#define KVM_GUEST_SP_TLB 1
-#define PRIx64 "llx"
-
atomic_t kvm_mips_instance;
EXPORT_SYMBOL_GPL(kvm_mips_instance);
-/* These function pointers are initialized once the KVM module is loaded */
-kvm_pfn_t (*kvm_mips_gfn_to_pfn)(struct kvm *kvm, gfn_t gfn);
-EXPORT_SYMBOL_GPL(kvm_mips_gfn_to_pfn);
-
-void (*kvm_mips_release_pfn_clean)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_release_pfn_clean);
-
-bool (*kvm_mips_is_error_pfn)(kvm_pfn_t pfn);
-EXPORT_SYMBOL_GPL(kvm_mips_is_error_pfn);
-
-uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_kernel_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
}
-uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
+static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
{
- return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
+ int cpu = smp_processor_id();
+
+ return vcpu->arch.guest_user_asid[cpu] &
+ cpu_asid_mask(&cpu_data[cpu]);
}
-inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
+inline u32 kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
{
return vcpu->kvm->arch.commpage_tlb;
}
@@ -66,49 +61,15 @@ inline uint32_t kvm_mips_get_commpage_asid(struct kvm_vcpu *vcpu)
void kvm_mips_dump_host_tlbs(void)
{
- unsigned long old_entryhi;
- unsigned long old_pagemask;
- struct kvm_mips_tlb tlb;
unsigned long flags;
- int i;
local_irq_save(flags);
- old_entryhi = read_c0_entryhi();
- old_pagemask = read_c0_pagemask();
-
kvm_info("HOST TLBs:\n");
- kvm_info("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
-
- for (i = 0; i < current_cpu_data.tlbsize; i++) {
- write_c0_index(i);
- mtc0_tlbw_hazard();
+ dump_tlb_regs();
+ pr_info("\n");
+ dump_tlb_all();
- tlb_read();
- tlbw_use_hazard();
-
- tlb.tlb_hi = read_c0_entryhi();
- tlb.tlb_lo0 = read_c0_entrylo0();
- tlb.tlb_lo1 = read_c0_entrylo1();
- tlb.tlb_mask = read_c0_pagemask();
-
- kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
- i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
- }
- write_c0_entryhi(old_entryhi);
- write_c0_pagemask(old_pagemask);
- mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_host_tlbs);
@@ -125,74 +86,24 @@ void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
tlb = vcpu->arch.guest_tlb[i];
kvm_info("TLB%c%3d Hi 0x%08lx ",
- (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
+ (tlb.tlb_lo[0] | tlb.tlb_lo[1]) & ENTRYLO_V
+ ? ' ' : '*',
i, tlb.tlb_hi);
- kvm_info("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
- (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo0 >> 3) & 7);
- kvm_info("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
- (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
- (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
- (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
- (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
+ kvm_info("Lo0=0x%09llx %c%c attr %lx ",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[0]),
+ (tlb.tlb_lo[0] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[0] & ENTRYLO_C) >> ENTRYLO_C_SHIFT);
+ kvm_info("Lo1=0x%09llx %c%c attr %lx sz=%lx\n",
+ (u64) mips3_tlbpfn_to_paddr(tlb.tlb_lo[1]),
+ (tlb.tlb_lo[1] & ENTRYLO_D) ? 'D' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_G) ? 'G' : ' ',
+ (tlb.tlb_lo[1] & ENTRYLO_C) >> ENTRYLO_C_SHIFT,
+ tlb.tlb_mask);
}
}
EXPORT_SYMBOL_GPL(kvm_mips_dump_guest_tlbs);
-static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
-{
- int srcu_idx, err = 0;
- kvm_pfn_t pfn;
-
- if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
- return 0;
-
- srcu_idx = srcu_read_lock(&kvm->srcu);
- pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
-
- if (kvm_mips_is_error_pfn(pfn)) {
- kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
- err = -EFAULT;
- goto out;
- }
-
- kvm->arch.guest_pmap[gfn] = pfn;
-out:
- srcu_read_unlock(&kvm->srcu, srcu_idx);
- return err;
-}
-
-/* Translate guest KSEG0 addresses to Host PA */
-unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
- unsigned long gva)
-{
- gfn_t gfn;
- uint32_t offset = gva & ~PAGE_MASK;
- struct kvm *kvm = vcpu->kvm;
-
- if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
- kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
- __builtin_return_address(0), gva);
- return KVM_INVALID_PAGE;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
-
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
- gva);
- return KVM_INVALID_PAGE;
- }
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return KVM_INVALID_ADDR;
-
- return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
-}
-EXPORT_SYMBOL_GPL(kvm_mips_translate_guest_kseg0_to_hpa);
-
/* XXXKYMA: Must be called with interrupts disabled */
/* set flush_dcache_mask == 0 if no dcache flush required */
int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
@@ -236,12 +147,12 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Flush D-cache */
if (flush_dcache_mask) {
- if (entrylo0 & MIPS3_PG_V) {
+ if (entrylo0 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page((entryhi & VPN2_MASK) &
~flush_dcache_mask);
}
- if (entrylo1 & MIPS3_PG_V) {
+ if (entrylo1 & ENTRYLO_V) {
++vcpu->stat.flush_dcache_exits;
flush_data_cache_page(((entryhi & VPN2_MASK) &
~flush_dcache_mask) |
@@ -252,91 +163,35 @@ int kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
-
-/* XXXKYMA: Must be called with interrupts disabled */
-int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
- struct kvm_vcpu *vcpu)
-{
- gfn_t gfn;
- kvm_pfn_t pfn0, pfn1;
- unsigned long vaddr = 0;
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- int even;
- struct kvm *kvm = vcpu->kvm;
- const int flush_dcache_mask = 0;
-
- if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
- kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
-
- gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
- if (gfn >= kvm->arch.guest_pmap_npages) {
- kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
- gfn, badvaddr);
- kvm_mips_dump_host_tlbs();
- return -1;
- }
- even = !(gfn & 0x1);
- vaddr = badvaddr & (PAGE_MASK << 1);
-
- if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
- return -1;
-
- if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
- return -1;
-
- if (even) {
- pfn0 = kvm->arch.guest_pmap[gfn];
- pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
- } else {
- pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
- pfn1 = kvm->arch.guest_pmap[gfn];
- }
-
- entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
-
- return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- flush_dcache_mask);
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_kseg0_tlb_fault);
+EXPORT_SYMBOL_GPL(kvm_mips_host_tlb_write);
int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
struct kvm_vcpu *vcpu)
{
- kvm_pfn_t pfn0, pfn1;
+ kvm_pfn_t pfn;
unsigned long flags, old_entryhi = 0, vaddr = 0;
- unsigned long entrylo0 = 0, entrylo1 = 0;
+ unsigned long entrylo[2] = { 0, 0 };
+ unsigned int pair_idx;
- pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
- pfn1 = 0;
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (1 << 2) | (0x1 << 1);
- entrylo1 = 0;
+ pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
+ pair_idx = (badvaddr >> PAGE_SHIFT) & 1;
+ entrylo[pair_idx] = mips3_paddr_to_tlbpfn(pfn << PAGE_SHIFT) |
+ ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) |
+ ENTRYLO_D | ENTRYLO_V;
local_irq_save(flags);
old_entryhi = read_c0_entryhi();
vaddr = badvaddr & (PAGE_MASK << 1);
write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
- mtc0_tlbw_hazard();
- write_c0_entrylo0(entrylo0);
- mtc0_tlbw_hazard();
- write_c0_entrylo1(entrylo1);
- mtc0_tlbw_hazard();
+ write_c0_entrylo0(entrylo[0]);
+ write_c0_entrylo1(entrylo[1]);
write_c0_index(kvm_mips_get_commpage_asid(vcpu));
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
tlbw_use_hazard();
kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
@@ -346,63 +201,12 @@ int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
return 0;
}
EXPORT_SYMBOL_GPL(kvm_mips_handle_commpage_tlb_fault);
-int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
- struct kvm_mips_tlb *tlb,
- unsigned long *hpa0,
- unsigned long *hpa1)
-{
- unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
- struct kvm *kvm = vcpu->kvm;
- kvm_pfn_t pfn0, pfn1;
-
- if ((tlb->tlb_hi & VPN2_MASK) == 0) {
- pfn0 = 0;
- pfn1 = 0;
- } else {
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT) < 0)
- return -1;
-
- pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0)
- >> PAGE_SHIFT];
- pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1)
- >> PAGE_SHIFT];
- }
-
- if (hpa0)
- *hpa0 = pfn0 << PAGE_SHIFT;
-
- if (hpa1)
- *hpa1 = pfn1 << PAGE_SHIFT;
-
- /* Get attributes from the Guest TLB */
- entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
- kvm_mips_get_kernel_asid(vcpu) :
- kvm_mips_get_user_asid(vcpu));
- entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
- entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
- (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
-
- kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
- tlb->tlb_lo0, tlb->tlb_lo1);
-
- return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
- tlb->tlb_mask);
-}
-EXPORT_SYMBOL_GPL(kvm_mips_handle_mapped_seg_tlb_fault);
-
int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
{
int i;
@@ -418,7 +222,7 @@ int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
}
kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
- __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
+ __func__, entryhi, index, tlb[i].tlb_lo[0], tlb[i].tlb_lo[1]);
return index;
}
@@ -450,7 +254,6 @@ int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
/* Restore old ASID */
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -481,21 +284,16 @@ int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
if (idx > 0) {
write_c0_entryhi(UNIQUE_ENTRYHI(idx));
- mtc0_tlbw_hazard();
-
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
-
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
write_c0_entryhi(old_entryhi);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
@@ -523,61 +321,39 @@ void kvm_mips_flush_host_tlb(int skip_kseg0)
/* Blast 'em all away. */
for (entry = 0; entry < maxentry; entry++) {
write_c0_index(entry);
- mtc0_tlbw_hazard();
if (skip_kseg0) {
+ mtc0_tlbr_hazard();
tlb_read();
- tlbw_use_hazard();
+ tlb_read_hazard();
entryhi = read_c0_entryhi();
/* Don't blow away guest kernel entries */
if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0)
continue;
+
+ write_c0_pagemask(old_pagemask);
}
/* Make sure all entries differ. */
write_c0_entryhi(UNIQUE_ENTRYHI(entry));
- mtc0_tlbw_hazard();
write_c0_entrylo0(0);
- mtc0_tlbw_hazard();
write_c0_entrylo1(0);
mtc0_tlbw_hazard();
tlb_write_indexed();
- mtc0_tlbw_hazard();
+ tlbw_use_hazard();
}
- tlbw_use_hazard();
-
write_c0_entryhi(old_entryhi);
write_c0_pagemask(old_pagemask);
mtc0_tlbw_hazard();
- tlbw_use_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_mips_flush_host_tlb);
-void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
- struct kvm_vcpu *vcpu)
-{
- unsigned long asid = asid_cache(cpu);
-
- asid += ASID_INC;
- if (!(asid & ASID_MASK)) {
- if (cpu_has_vtag_icache)
- flush_icache_all();
-
- kvm_local_flush_tlb_all(); /* start new asid cycle */
-
- if (!asid) /* fix version if needed */
- asid = ASID_FIRST_VERSION;
- }
-
- cpu_context(cpu, mm) = asid_cache(cpu) = asid;
-}
-
void kvm_local_flush_tlb_all(void)
{
unsigned long flags;
@@ -597,183 +373,12 @@ void kvm_local_flush_tlb_all(void)
write_c0_index(entry);
mtc0_tlbw_hazard();
tlb_write_indexed();
+ tlbw_use_hazard();
entry++;
}
- tlbw_use_hazard();
write_c0_entryhi(old_ctx);
mtc0_tlbw_hazard();
local_irq_restore(flags);
}
EXPORT_SYMBOL_GPL(kvm_local_flush_tlb_all);
-
-/**
- * kvm_mips_migrate_count() - Migrate timer.
- * @vcpu: Virtual CPU.
- *
- * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
- * if it was running prior to being cancelled.
- *
- * Must be called when the VCPU is migrated to a different CPU to ensure that
- * timer expiry during guest execution interrupts the guest and causes the
- * interrupt to be delivered in a timely manner.
- */
-static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
-{
- if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
- hrtimer_restart(&vcpu->arch.comparecount_timer);
-}
-
-/* Restore ASID once we are scheduled back after preemption */
-void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
-{
- unsigned long flags;
- int newasid = 0;
-
- kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
-
- /* Alocate new kernel and user ASIDs if needed */
-
- local_irq_save(flags);
-
- if ((vcpu->arch.guest_kernel_asid[cpu] ^ asid_cache(cpu)) &
- ASID_VERSION_MASK) {
- kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
- vcpu->arch.guest_kernel_asid[cpu] =
- vcpu->arch.guest_kernel_mm.context.asid[cpu];
- kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
- vcpu->arch.guest_user_asid[cpu] =
- vcpu->arch.guest_user_mm.context.asid[cpu];
- newasid++;
-
- kvm_debug("[%d]: cpu_context: %#lx\n", cpu,
- cpu_context(cpu, current->mm));
- kvm_debug("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
- cpu, vcpu->arch.guest_kernel_asid[cpu]);
- kvm_debug("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
- vcpu->arch.guest_user_asid[cpu]);
- }
-
- if (vcpu->arch.last_sched_cpu != cpu) {
- kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
- vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
- /*
- * Migrate the timer interrupt to the current CPU so that it
- * always interrupts the guest and synchronously triggers a
- * guest timer interrupt.
- */
- kvm_mips_migrate_count(vcpu);
- }
-
- if (!newasid) {
- /*
- * If we preempted while the guest was executing, then reload
- * the pre-empted ASID
- */
- if (current->flags & PF_VCPU) {
- write_c0_entryhi(vcpu->arch.
- preempt_entryhi & ASID_MASK);
- ehb();
- }
- } else {
- /* New ASIDs were allocated for the VM */
-
- /*
- * Were we in guest context? If so then the pre-empted ASID is
- * no longer valid, we need to set it to what it should be based
- * on the mode of the Guest (Kernel/User)
- */
- if (current->flags & PF_VCPU) {
- if (KVM_GUEST_KERNEL_MODE(vcpu))
- write_c0_entryhi(vcpu->arch.
- guest_kernel_asid[cpu] &
- ASID_MASK);
- else
- write_c0_entryhi(vcpu->arch.
- guest_user_asid[cpu] &
- ASID_MASK);
- ehb();
- }
- }
-
- /* restore guest state to registers */
- kvm_mips_callbacks->vcpu_set_regs(vcpu);
-
- local_irq_restore(flags);
-
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_load);
-
-/* ASID can change if another task is scheduled during preemption */
-void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
-{
- unsigned long flags;
- uint32_t cpu;
-
- local_irq_save(flags);
-
- cpu = smp_processor_id();
-
- vcpu->arch.preempt_entryhi = read_c0_entryhi();
- vcpu->arch.last_sched_cpu = cpu;
-
- /* save guest state in registers */
- kvm_mips_callbacks->vcpu_get_regs(vcpu);
-
- if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
- ASID_VERSION_MASK)) {
- kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
- cpu_context(cpu, current->mm));
- drop_mmu_context(current->mm, cpu);
- }
- write_c0_entryhi(cpu_asid(cpu, current->mm));
- ehb();
-
- local_irq_restore(flags);
-}
-EXPORT_SYMBOL_GPL(kvm_arch_vcpu_put);
-
-uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
-{
- struct mips_coproc *cop0 = vcpu->arch.cop0;
- unsigned long paddr, flags, vpn2, asid;
- uint32_t inst;
- int index;
-
- if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
- KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
- local_irq_save(flags);
- index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
- if (index >= 0) {
- inst = *(opc);
- } else {
- vpn2 = (unsigned long) opc & VPN2_MASK;
- asid = kvm_read_c0_guest_entryhi(cop0) & ASID_MASK;
- index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid);
- if (index < 0) {
- kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
- __func__, opc, vcpu, read_c0_entryhi());
- kvm_mips_dump_host_tlbs();
- local_irq_restore(flags);
- return KVM_INVALID_INST;
- }
- kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
- &vcpu->arch.
- guest_tlb[index],
- NULL, NULL);
- inst = *(opc);
- }
- local_irq_restore(flags);
- } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
- paddr =
- kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
- (unsigned long) opc);
- inst = *(uint32_t *) CKSEG0ADDR(paddr);
- } else {
- kvm_err("%s: illegal address: %p\n", __func__, opc);
- return KVM_INVALID_INST;
- }
-
- return inst;
-}
-EXPORT_SYMBOL_GPL(kvm_get_inst);
diff --git a/arch/mips/kvm/trace.h b/arch/mips/kvm/trace.h
index bd6437f67dc0..c858cf168078 100644
--- a/arch/mips/kvm/trace.h
+++ b/arch/mips/kvm/trace.h
@@ -17,8 +17,75 @@
#define TRACE_INCLUDE_PATH .
#define TRACE_INCLUDE_FILE trace
-/* Tracepoints for VM eists */
-extern char *kvm_mips_exit_types_str[MAX_KVM_MIPS_EXIT_TYPES];
+/*
+ * Tracepoints for VM enters
+ */
+DECLARE_EVENT_CLASS(kvm_transition,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ ),
+
+ TP_printk("PC: 0x%08lx",
+ __entry->pc)
+);
+
+DEFINE_EVENT(kvm_transition, kvm_enter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_reenter,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+DEFINE_EVENT(kvm_transition, kvm_out,
+ TP_PROTO(struct kvm_vcpu *vcpu),
+ TP_ARGS(vcpu));
+
+/* The first 32 exit reasons correspond to Cause.ExcCode */
+#define KVM_TRACE_EXIT_INT 0
+#define KVM_TRACE_EXIT_TLBMOD 1
+#define KVM_TRACE_EXIT_TLBMISS_LD 2
+#define KVM_TRACE_EXIT_TLBMISS_ST 3
+#define KVM_TRACE_EXIT_ADDRERR_LD 4
+#define KVM_TRACE_EXIT_ADDRERR_ST 5
+#define KVM_TRACE_EXIT_SYSCALL 8
+#define KVM_TRACE_EXIT_BREAK_INST 9
+#define KVM_TRACE_EXIT_RESVD_INST 10
+#define KVM_TRACE_EXIT_COP_UNUSABLE 11
+#define KVM_TRACE_EXIT_TRAP_INST 13
+#define KVM_TRACE_EXIT_MSA_FPE 14
+#define KVM_TRACE_EXIT_FPE 15
+#define KVM_TRACE_EXIT_MSA_DISABLED 21
+/* Further exit reasons */
+#define KVM_TRACE_EXIT_WAIT 32
+#define KVM_TRACE_EXIT_CACHE 33
+#define KVM_TRACE_EXIT_SIGNAL 34
+
+/* Tracepoints for VM exits */
+#define kvm_trace_symbol_exit_types \
+ { KVM_TRACE_EXIT_INT, "Interrupt" }, \
+ { KVM_TRACE_EXIT_TLBMOD, "TLB Mod" }, \
+ { KVM_TRACE_EXIT_TLBMISS_LD, "TLB Miss (LD)" }, \
+ { KVM_TRACE_EXIT_TLBMISS_ST, "TLB Miss (ST)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_LD, "Address Error (LD)" }, \
+ { KVM_TRACE_EXIT_ADDRERR_ST, "Address Err (ST)" }, \
+ { KVM_TRACE_EXIT_SYSCALL, "System Call" }, \
+ { KVM_TRACE_EXIT_BREAK_INST, "Break Inst" }, \
+ { KVM_TRACE_EXIT_RESVD_INST, "Reserved Inst" }, \
+ { KVM_TRACE_EXIT_COP_UNUSABLE, "COP0/1 Unusable" }, \
+ { KVM_TRACE_EXIT_TRAP_INST, "Trap Inst" }, \
+ { KVM_TRACE_EXIT_MSA_FPE, "MSA FPE" }, \
+ { KVM_TRACE_EXIT_FPE, "FPE" }, \
+ { KVM_TRACE_EXIT_MSA_DISABLED, "MSA Disabled" }, \
+ { KVM_TRACE_EXIT_WAIT, "WAIT" }, \
+ { KVM_TRACE_EXIT_CACHE, "CACHE" }, \
+ { KVM_TRACE_EXIT_SIGNAL, "Signal" }
TRACE_EVENT(kvm_exit,
TP_PROTO(struct kvm_vcpu *vcpu, unsigned int reason),
@@ -34,10 +101,173 @@ TRACE_EVENT(kvm_exit,
),
TP_printk("[%s]PC: 0x%08lx",
- kvm_mips_exit_types_str[__entry->reason],
+ __print_symbolic(__entry->reason,
+ kvm_trace_symbol_exit_types),
__entry->pc)
);
+#define KVM_TRACE_MFC0 0
+#define KVM_TRACE_MTC0 1
+#define KVM_TRACE_DMFC0 2
+#define KVM_TRACE_DMTC0 3
+#define KVM_TRACE_RDHWR 4
+
+#define KVM_TRACE_HWR_COP0 0
+#define KVM_TRACE_HWR_HWR 1
+
+#define KVM_TRACE_COP0(REG, SEL) ((KVM_TRACE_HWR_COP0 << 8) | \
+ ((REG) << 3) | (SEL))
+#define KVM_TRACE_HWR(REG, SEL) ((KVM_TRACE_HWR_HWR << 8) | \
+ ((REG) << 3) | (SEL))
+
+#define kvm_trace_symbol_hwr_ops \
+ { KVM_TRACE_MFC0, "MFC0" }, \
+ { KVM_TRACE_MTC0, "MTC0" }, \
+ { KVM_TRACE_DMFC0, "DMFC0" }, \
+ { KVM_TRACE_DMTC0, "DMTC0" }, \
+ { KVM_TRACE_RDHWR, "RDHWR" }
+
+#define kvm_trace_symbol_hwr_cop \
+ { KVM_TRACE_HWR_COP0, "COP0" }, \
+ { KVM_TRACE_HWR_HWR, "HWR" }
+
+#define kvm_trace_symbol_hwr_regs \
+ { KVM_TRACE_COP0( 0, 0), "Index" }, \
+ { KVM_TRACE_COP0( 2, 0), "EntryLo0" }, \
+ { KVM_TRACE_COP0( 3, 0), "EntryLo1" }, \
+ { KVM_TRACE_COP0( 4, 0), "Context" }, \
+ { KVM_TRACE_COP0( 4, 2), "UserLocal" }, \
+ { KVM_TRACE_COP0( 5, 0), "PageMask" }, \
+ { KVM_TRACE_COP0( 6, 0), "Wired" }, \
+ { KVM_TRACE_COP0( 7, 0), "HWREna" }, \
+ { KVM_TRACE_COP0( 8, 0), "BadVAddr" }, \
+ { KVM_TRACE_COP0( 9, 0), "Count" }, \
+ { KVM_TRACE_COP0(10, 0), "EntryHi" }, \
+ { KVM_TRACE_COP0(11, 0), "Compare" }, \
+ { KVM_TRACE_COP0(12, 0), "Status" }, \
+ { KVM_TRACE_COP0(12, 1), "IntCtl" }, \
+ { KVM_TRACE_COP0(12, 2), "SRSCtl" }, \
+ { KVM_TRACE_COP0(13, 0), "Cause" }, \
+ { KVM_TRACE_COP0(14, 0), "EPC" }, \
+ { KVM_TRACE_COP0(15, 0), "PRId" }, \
+ { KVM_TRACE_COP0(15, 1), "EBase" }, \
+ { KVM_TRACE_COP0(16, 0), "Config" }, \
+ { KVM_TRACE_COP0(16, 1), "Config1" }, \
+ { KVM_TRACE_COP0(16, 2), "Config2" }, \
+ { KVM_TRACE_COP0(16, 3), "Config3" }, \
+ { KVM_TRACE_COP0(16, 4), "Config4" }, \
+ { KVM_TRACE_COP0(16, 5), "Config5" }, \
+ { KVM_TRACE_COP0(16, 7), "Config7" }, \
+ { KVM_TRACE_COP0(26, 0), "ECC" }, \
+ { KVM_TRACE_COP0(30, 0), "ErrorEPC" }, \
+ { KVM_TRACE_COP0(31, 2), "KScratch1" }, \
+ { KVM_TRACE_COP0(31, 3), "KScratch2" }, \
+ { KVM_TRACE_COP0(31, 4), "KScratch3" }, \
+ { KVM_TRACE_COP0(31, 5), "KScratch4" }, \
+ { KVM_TRACE_COP0(31, 6), "KScratch5" }, \
+ { KVM_TRACE_COP0(31, 7), "KScratch6" }, \
+ { KVM_TRACE_HWR( 0, 0), "CPUNum" }, \
+ { KVM_TRACE_HWR( 1, 0), "SYNCI_Step" }, \
+ { KVM_TRACE_HWR( 2, 0), "CC" }, \
+ { KVM_TRACE_HWR( 3, 0), "CCRes" }, \
+ { KVM_TRACE_HWR(29, 0), "ULR" }
+
+TRACE_EVENT(kvm_hwr,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op, unsigned int reg,
+ unsigned long val),
+ TP_ARGS(vcpu, op, reg, val),
+ TP_STRUCT__entry(
+ __field(unsigned long, val)
+ __field(u16, reg)
+ __field(u8, op)
+ ),
+
+ TP_fast_assign(
+ __entry->val = val;
+ __entry->reg = reg;
+ __entry->op = op;
+ ),
+
+ TP_printk("%s %s (%s:%u:%u) 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_hwr_ops),
+ __print_symbolic(__entry->reg,
+ kvm_trace_symbol_hwr_regs),
+ __print_symbolic(__entry->reg >> 8,
+ kvm_trace_symbol_hwr_cop),
+ (__entry->reg >> 3) & 0x1f,
+ __entry->reg & 0x7,
+ __entry->val)
+);
+
+#define KVM_TRACE_AUX_RESTORE 0
+#define KVM_TRACE_AUX_SAVE 1
+#define KVM_TRACE_AUX_ENABLE 2
+#define KVM_TRACE_AUX_DISABLE 3
+#define KVM_TRACE_AUX_DISCARD 4
+
+#define KVM_TRACE_AUX_FPU 1
+#define KVM_TRACE_AUX_MSA 2
+#define KVM_TRACE_AUX_FPU_MSA 3
+
+#define kvm_trace_symbol_aux_op \
+ { KVM_TRACE_AUX_RESTORE, "restore" }, \
+ { KVM_TRACE_AUX_SAVE, "save" }, \
+ { KVM_TRACE_AUX_ENABLE, "enable" }, \
+ { KVM_TRACE_AUX_DISABLE, "disable" }, \
+ { KVM_TRACE_AUX_DISCARD, "discard" }
+
+#define kvm_trace_symbol_aux_state \
+ { KVM_TRACE_AUX_FPU, "FPU" }, \
+ { KVM_TRACE_AUX_MSA, "MSA" }, \
+ { KVM_TRACE_AUX_FPU_MSA, "FPU & MSA" }
+
+TRACE_EVENT(kvm_aux,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int op,
+ unsigned int state),
+ TP_ARGS(vcpu, op, state),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, op)
+ __field(u8, state)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->op = op;
+ __entry->state = state;
+ ),
+
+ TP_printk("%s %s PC: 0x%08lx",
+ __print_symbolic(__entry->op,
+ kvm_trace_symbol_aux_op),
+ __print_symbolic(__entry->state,
+ kvm_trace_symbol_aux_state),
+ __entry->pc)
+);
+
+TRACE_EVENT(kvm_asid_change,
+ TP_PROTO(struct kvm_vcpu *vcpu, unsigned int old_asid,
+ unsigned int new_asid),
+ TP_ARGS(vcpu, old_asid, new_asid),
+ TP_STRUCT__entry(
+ __field(unsigned long, pc)
+ __field(u8, old_asid)
+ __field(u8, new_asid)
+ ),
+
+ TP_fast_assign(
+ __entry->pc = vcpu->arch.pc;
+ __entry->old_asid = old_asid;
+ __entry->new_asid = new_asid;
+ ),
+
+ TP_printk("PC: 0x%08lx old: 0x%02x new: 0x%02x",
+ __entry->pc,
+ __entry->old_asid,
+ __entry->new_asid)
+);
+
#endif /* _TRACE_KVM_H */
/* This part must be outside protection */
diff --git a/arch/mips/kvm/trap_emul.c b/arch/mips/kvm/trap_emul.c
index ad988000563f..091553942bcb 100644
--- a/arch/mips/kvm/trap_emul.c
+++ b/arch/mips/kvm/trap_emul.c
@@ -21,7 +21,7 @@
static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
{
gpa_t gpa;
- uint32_t kseg = KSEGX(gva);
+ gva_t kseg = KSEGX(gva);
if ((kseg == CKSEG0) || (kseg == CKSEG1))
gpa = CPHYSADDR(gva);
@@ -40,8 +40,8 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -87,15 +87,15 @@ static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_debug("USER/KSEG23 ADDR TLB MOD fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
er = kvm_mips_handle_tlbmod(cause, opc, run, vcpu);
@@ -111,14 +111,14 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
* when we are not using HIGHMEM. Need to address this in a
* HIGHMEM kernel
*/
- kvm_err("TLB MOD fault not handled, cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("TLB MOD fault not handled, cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
} else {
- kvm_err("Illegal TLB Mod fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Illegal TLB Mod fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
@@ -128,59 +128,12 @@ static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
return ret;
}
-static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
-{
- struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
- enum emulation_result er = EMULATE_DONE;
- int ret = RESUME_GUEST;
-
- if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
- && KVM_GUEST_KERNEL_MODE(vcpu)) {
- if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
- || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB LD fault: cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu);
- if (er == EMULATE_DONE)
- ret = RESUME_GUEST;
- else {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
- /*
- * All KSEG0 faults are handled by KVM, as the guest kernel does
- * not expect to ever get them
- */
- if (kvm_mips_handle_kseg0_tlb_fault
- (vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- } else {
- kvm_err("Illegal TLB LD fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
- kvm_mips_dump_host_tlbs();
- kvm_arch_vcpu_dump_regs(vcpu);
- run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
- ret = RESUME_HOST;
- }
- return ret;
-}
-
-static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -192,8 +145,8 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
}
} else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
|| KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
- kvm_debug("USER ADDR TLB ST fault: PC: %#lx, BadVaddr: %#lx\n",
- vcpu->arch.pc, badvaddr);
+ kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
/*
* User Address (UA) fault, this could happen if
@@ -213,14 +166,18 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
+ /*
+ * All KSEG0 faults are handled by KVM, as the guest kernel does
+ * not expect to ever get them
+ */
if (kvm_mips_handle_kseg0_tlb_fault
(vcpu->arch.host_cp0_badvaddr, vcpu) < 0) {
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
}
} else {
- kvm_err("Illegal TLB ST fault address , cause %#lx, PC: %p, BadVaddr: %#lx\n",
- cause, opc, badvaddr);
+ kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
+ store ? "ST" : "LD", cause, opc, badvaddr);
kvm_mips_dump_host_tlbs();
kvm_arch_vcpu_dump_regs(vcpu);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
@@ -229,12 +186,22 @@ static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
return ret;
}
+static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, true);
+}
+
+static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
+{
+ return kvm_trap_emul_handle_tlb_miss(vcpu, false);
+}
+
static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -251,7 +218,7 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (STORE): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -262,9 +229,9 @@ static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -280,7 +247,7 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
ret = RESUME_HOST;
}
} else {
- kvm_err("Address Error (LOAD): cause %#lx, PC: %p, BadVaddr: %#lx\n",
+ kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
cause, opc, badvaddr);
run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
ret = RESUME_HOST;
@@ -292,8 +259,8 @@ static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -310,8 +277,8 @@ static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -328,8 +295,8 @@ static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -346,8 +313,8 @@ static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -364,8 +331,8 @@ static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -382,8 +349,8 @@ static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
{
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *)vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *)vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -407,8 +374,8 @@ static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
struct kvm_run *run = vcpu->run;
- uint32_t __user *opc = (uint32_t __user *) vcpu->arch.pc;
- unsigned long cause = vcpu->arch.host_cp0_cause;
+ u32 __user *opc = (u32 __user *) vcpu->arch.pc;
+ u32 cause = vcpu->arch.host_cp0_cause;
enum emulation_result er = EMULATE_DONE;
int ret = RESUME_GUEST;
@@ -451,24 +418,41 @@ static int kvm_trap_emul_vm_init(struct kvm *kvm)
static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
{
+ vcpu->arch.kscratch_enabled = 0xfc;
+
return 0;
}
static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
{
struct mips_coproc *cop0 = vcpu->arch.cop0;
- uint32_t config1;
+ u32 config, config1;
int vcpu_id = vcpu->vcpu_id;
/*
* Arch specific stuff, set up config registers properly so that the
- * guest will come up as expected, for now we simulate a MIPS 24kc
+ * guest will come up as expected
*/
+#ifndef CONFIG_CPU_MIPSR6
+ /* r2-r5, simulate a MIPS 24kc */
kvm_write_c0_guest_prid(cop0, 0x00019300);
- /* Have config1, Cacheable, noncoherent, write-back, write allocate */
- kvm_write_c0_guest_config(cop0, MIPS_CONF_M | (0x3 << CP0C0_K0) |
- (0x1 << CP0C0_AR) |
- (MMU_TYPE_R4000 << CP0C0_MT));
+#else
+ /* r6+, simulate a generic QEMU machine */
+ kvm_write_c0_guest_prid(cop0, 0x00010000);
+#endif
+ /*
+ * Have config1, Cacheable, noncoherent, write-back, write allocate.
+ * Endianness, arch revision & virtually tagged icache should match
+ * host.
+ */
+ config = read_c0_config() & MIPS_CONF_AR;
+ config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
+#ifdef CONFIG_CPU_BIG_ENDIAN
+ config |= CONF_BE;
+#endif
+ if (cpu_has_vtag_icache)
+ config |= MIPS_CONF_VI;
+ kvm_write_c0_guest_config(cop0, config);
/* Read the cache characteristics from the host Config1 Register */
config1 = (read_c0_config1() & ~0x7f);
@@ -478,9 +462,8 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
/* We unset some bits that we aren't emulating */
- config1 &=
- ~((1 << CP0C1_C2) | (1 << CP0C1_MD) | (1 << CP0C1_PC) |
- (1 << CP0C1_WR) | (1 << CP0C1_CA));
+ config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
+ MIPS_CONF1_WR | MIPS_CONF1_CA);
kvm_write_c0_guest_config1(cop0, config1);
/* Have config3, no tertiary/secondary caches implemented */
@@ -500,16 +483,28 @@ static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
/*
- * Setup IntCtl defaults, compatibilty mode for timer interrupts (HW5)
+ * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
*/
kvm_write_c0_guest_intctl(cop0, 0xFC000000);
/* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
- kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 | (vcpu_id & 0xFF));
+ kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
+ (vcpu_id & MIPS_EBASE_CPUNUM));
return 0;
}
+static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
+{
+ return 0;
+}
+
+static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
+ u64 __user *indices)
+{
+ return 0;
+}
+
static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
const struct kvm_one_reg *reg,
s64 *v)
@@ -546,7 +541,7 @@ static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
kvm_mips_write_count(vcpu, v);
break;
case KVM_REG_MIPS_CP0_COMPARE:
- kvm_mips_write_compare(vcpu, v);
+ kvm_mips_write_compare(vcpu, v, false);
break;
case KVM_REG_MIPS_CP0_CAUSE:
/*
@@ -659,6 +654,8 @@ static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
.dequeue_io_int = kvm_mips_dequeue_io_int_cb,
.irq_deliver = kvm_mips_irq_deliver_cb,
.irq_clear = kvm_mips_irq_clear_cb,
+ .num_regs = kvm_trap_emul_num_regs,
+ .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
.get_one_reg = kvm_trap_emul_get_one_reg,
.set_one_reg = kvm_trap_emul_set_one_reg,
.vcpu_get_regs = kvm_trap_emul_vcpu_get_regs,
diff --git a/arch/mips/lantiq/Kconfig b/arch/mips/lantiq/Kconfig
index e10d33342b30..177769dbb0e8 100644
--- a/arch/mips/lantiq/Kconfig
+++ b/arch/mips/lantiq/Kconfig
@@ -25,7 +25,17 @@ config SOC_FALCON
endchoice
choice
- prompt "Devicetree"
+ prompt "Built-in device tree"
+ help
+ Legacy bootloaders do not pass a DTB pointer to the kernel, so
+ if a "wrapper" is not being used, the kernel will need to include
+ a device tree that matches the target board.
+
+ The builtin DTB will only be used if the firmware does not supply
+ a valid DTB.
+
+config LANTIQ_DT_NONE
+ bool "None"
config DT_EASY50712
bool "Easy50712"
diff --git a/arch/mips/lantiq/Makefile b/arch/mips/lantiq/Makefile
index 690257ab86d6..2718652e7466 100644
--- a/arch/mips/lantiq/Makefile
+++ b/arch/mips/lantiq/Makefile
@@ -1,4 +1,4 @@
-# Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+# Copyright (C) 2010 John Crispin <john@phrozen.org>
#
# This program is free software; you can redistribute it and/or modify it
# under the terms of the GNU General Public License version 2 as published
diff --git a/arch/mips/lantiq/clk.c b/arch/mips/lantiq/clk.c
index a0706fd4ce0a..149f0513c4f5 100644
--- a/arch/mips/lantiq/clk.c
+++ b/arch/mips/lantiq/clk.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/io.h>
#include <linux/export.h>
diff --git a/arch/mips/lantiq/clk.h b/arch/mips/lantiq/clk.h
index 7376ce817eda..e806e048ffc2 100644
--- a/arch/mips/lantiq/clk.h
+++ b/arch/mips/lantiq/clk.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LTQ_CLK_H__
diff --git a/arch/mips/lantiq/early_printk.c b/arch/mips/lantiq/early_printk.c
index 9b28d0940ef4..44bccaee822b 100644
--- a/arch/mips/lantiq/early_printk.c
+++ b/arch/mips/lantiq/early_printk.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/cpu.h>
diff --git a/arch/mips/lantiq/falcon/prom.c b/arch/mips/lantiq/falcon/prom.c
index aa9497947859..75315c0a9fc3 100644
--- a/arch/mips/lantiq/falcon/prom.c
+++ b/arch/mips/lantiq/falcon/prom.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/lantiq/falcon/reset.c b/arch/mips/lantiq/falcon/reset.c
index 568248253426..7a535d72f541 100644
--- a/arch/mips/lantiq/falcon/reset.c
+++ b/arch/mips/lantiq/falcon/reset.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2012 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/init.h>
diff --git a/arch/mips/lantiq/falcon/sysctrl.c b/arch/mips/lantiq/falcon/sysctrl.c
index 7edcd4946fc1..2a1b3021589c 100644
--- a/arch/mips/lantiq/falcon/sysctrl.c
+++ b/arch/mips/lantiq/falcon/sysctrl.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2011 Thomas Langer <thomas.langer@lantiq.com>
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
*/
#include <linux/ioport.h>
diff --git a/arch/mips/lantiq/irq.c b/arch/mips/lantiq/irq.c
index 2e7f60c9fc5d..8ac0e5994ed2 100644
--- a/arch/mips/lantiq/irq.c
+++ b/arch/mips/lantiq/irq.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
* Copyright (C) 2010 Thomas Langer <thomas.langer@lantiq.com>
*/
@@ -66,7 +66,7 @@ int gic_present;
#endif
static int exin_avail;
-static struct resource ltq_eiu_irq[MAX_EIU];
+static u32 ltq_eiu_irq[MAX_EIU];
static void __iomem *ltq_icu_membase[MAX_IM];
static void __iomem *ltq_eiu_membase;
static struct irq_domain *ltq_domain;
@@ -75,7 +75,7 @@ static int ltq_perfcount_irq;
int ltq_eiu_get_irq(int exin)
{
if (exin < exin_avail)
- return ltq_eiu_irq[exin].start;
+ return ltq_eiu_irq[exin];
return -1;
}
@@ -125,8 +125,8 @@ static int ltq_eiu_settype(struct irq_data *d, unsigned int type)
{
int i;
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
int val = 0;
int edge = 0;
@@ -173,8 +173,8 @@ static unsigned int ltq_startup_eiu_irq(struct irq_data *d)
int i;
ltq_enable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* by default we are low level triggered */
ltq_eiu_settype(d, IRQF_TRIGGER_LOW);
/* clear all pending */
@@ -195,8 +195,8 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
int i;
ltq_disable_irq(d);
- for (i = 0; i < MAX_EIU; i++) {
- if (d->hwirq == ltq_eiu_irq[i].start) {
+ for (i = 0; i < exin_avail; i++) {
+ if (d->hwirq == ltq_eiu_irq[i]) {
/* disable */
ltq_eiu_w32(ltq_eiu_r32(LTQ_EIU_EXIN_INEN) & ~BIT(i),
LTQ_EIU_EXIN_INEN);
@@ -206,7 +206,7 @@ static void ltq_shutdown_eiu_irq(struct irq_data *d)
}
static struct irq_chip ltq_irq_type = {
- "icu",
+ .name = "icu",
.irq_enable = ltq_enable_irq,
.irq_disable = ltq_disable_irq,
.irq_unmask = ltq_enable_irq,
@@ -216,7 +216,7 @@ static struct irq_chip ltq_irq_type = {
};
static struct irq_chip ltq_eiu_type = {
- "eiu",
+ .name = "eiu",
.irq_startup = ltq_startup_eiu_irq,
.irq_shutdown = ltq_shutdown_eiu_irq,
.irq_enable = ltq_enable_irq,
@@ -341,10 +341,10 @@ static int icu_map(struct irq_domain *d, unsigned int irq, irq_hw_number_t hw)
return 0;
for (i = 0; i < exin_avail; i++)
- if (hw == ltq_eiu_irq[i].start)
+ if (hw == ltq_eiu_irq[i])
chip = &ltq_eiu_type;
- irq_set_chip_and_handler(hw, chip, handle_level_irq);
+ irq_set_chip_and_handler(irq, chip, handle_level_irq);
return 0;
}
@@ -439,14 +439,15 @@ int __init icu_of_init(struct device_node *node, struct device_node *parent)
eiu_node = of_find_compatible_node(NULL, NULL, "lantiq,eiu-xway");
if (eiu_node && !of_address_to_resource(eiu_node, 0, &res)) {
/* find out how many external irq sources we have */
- exin_avail = of_irq_count(eiu_node);
+ exin_avail = of_property_count_u32_elems(eiu_node,
+ "lantiq,eiu-irqs");
if (exin_avail > MAX_EIU)
exin_avail = MAX_EIU;
- ret = of_irq_to_resource_table(eiu_node,
+ ret = of_property_read_u32_array(eiu_node, "lantiq,eiu-irqs",
ltq_eiu_irq, exin_avail);
- if (ret != exin_avail)
+ if (ret)
panic("failed to load external irq resources");
if (!request_mem_region(res.start, resource_size(&res),
diff --git a/arch/mips/lantiq/prom.c b/arch/mips/lantiq/prom.c
index 297bcaa6b5d3..4cbb000e778e 100644
--- a/arch/mips/lantiq/prom.c
+++ b/arch/mips/lantiq/prom.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/export.h>
@@ -65,6 +65,8 @@ static void __init prom_init_cmdline(void)
void __init plat_mem_setup(void)
{
+ void *dtb;
+
ioport_resource.start = IOPORT_RESOURCE_START;
ioport_resource.end = IOPORT_RESOURCE_END;
iomem_resource.start = IOMEM_RESOURCE_START;
@@ -72,11 +74,18 @@ void __init plat_mem_setup(void)
set_io_port_base((unsigned long) KSEG1);
+ if (fw_passed_dtb) /* UHI interface */
+ dtb = (void *)fw_passed_dtb;
+ else if (__dtb_start != __dtb_end)
+ dtb = (void *)__dtb_start;
+ else
+ panic("no dtb found");
+
/*
- * Load the builtin devicetree. This causes the chosen node to be
+ * Load the devicetree. This causes the chosen node to be
* parsed resulting in our memory appearing
*/
- __dt_setup_arch(__dtb_start);
+ __dt_setup_arch(dtb);
}
void __init device_tree_init(void)
diff --git a/arch/mips/lantiq/prom.h b/arch/mips/lantiq/prom.h
index bfd2d58c1d69..4b6576c50250 100644
--- a/arch/mips/lantiq/prom.h
+++ b/arch/mips/lantiq/prom.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LTQ_PROM_H__
diff --git a/arch/mips/lantiq/xway/clk.c b/arch/mips/lantiq/xway/clk.c
index 07f6d5b0b65e..41fc30d8ef89 100644
--- a/arch/mips/lantiq/xway/clk.c
+++ b/arch/mips/lantiq/xway/clk.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
diff --git a/arch/mips/lantiq/xway/dcdc.c b/arch/mips/lantiq/xway/dcdc.c
index ae8e930f5283..08f7abaadfe5 100644
--- a/arch/mips/lantiq/xway/dcdc.c
+++ b/arch/mips/lantiq/xway/dcdc.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2010 Sameer Ahmad, Lantiq GmbH
*/
diff --git a/arch/mips/lantiq/xway/dma.c b/arch/mips/lantiq/xway/dma.c
index 34a116e840d8..cef811755123 100644
--- a/arch/mips/lantiq/xway/dma.c
+++ b/arch/mips/lantiq/xway/dma.c
@@ -12,7 +12,7 @@
* along with this program; if not, write to the Free Software
* Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
*
- * Copyright (C) 2011 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011 John Crispin <john@phrozen.org>
*/
#include <linux/init.h>
diff --git a/arch/mips/lantiq/xway/gptu.c b/arch/mips/lantiq/xway/gptu.c
index f1492b2db017..0f1bbea1a816 100644
--- a/arch/mips/lantiq/xway/gptu.c
+++ b/arch/mips/lantiq/xway/gptu.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
* Copyright (C) 2012 Lantiq GmbH
*/
diff --git a/arch/mips/lantiq/xway/prom.c b/arch/mips/lantiq/xway/prom.c
index 8f6e02f1e965..9475b2510adb 100644
--- a/arch/mips/lantiq/xway/prom.c
+++ b/arch/mips/lantiq/xway/prom.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
diff --git a/arch/mips/lantiq/xway/reset.c b/arch/mips/lantiq/xway/reset.c
index bc29bb349e94..83fd65d76e81 100644
--- a/arch/mips/lantiq/xway/reset.c
+++ b/arch/mips/lantiq/xway/reset.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
@@ -258,7 +258,7 @@ static int ltq_reset_device(struct reset_controller_dev *rcdev,
return ltq_deassert_device(rcdev, id);
}
-static struct reset_control_ops reset_ops = {
+static const struct reset_control_ops reset_ops = {
.reset = ltq_reset_device,
.assert = ltq_assert_device,
.deassert = ltq_deassert_device,
diff --git a/arch/mips/lantiq/xway/sysctrl.c b/arch/mips/lantiq/xway/sysctrl.c
index 80554e8f6037..236193b5210b 100644
--- a/arch/mips/lantiq/xway/sysctrl.c
+++ b/arch/mips/lantiq/xway/sysctrl.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2011-2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2011-2012 John Crispin <john@phrozen.org>
* Copyright (C) 2013-2015 Lantiq Beteiligungs-GmbH & Co.KG
*/
diff --git a/arch/mips/lantiq/xway/vmmc.c b/arch/mips/lantiq/xway/vmmc.c
index d001bc38908a..4625495f9230 100644
--- a/arch/mips/lantiq/xway/vmmc.c
+++ b/arch/mips/lantiq/xway/vmmc.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/module.h>
diff --git a/arch/mips/lantiq/xway/xrx200_phy_fw.c b/arch/mips/lantiq/xway/xrx200_phy_fw.c
index 199094a40c15..71e518c1e7e7 100644
--- a/arch/mips/lantiq/xway/xrx200_phy_fw.c
+++ b/arch/mips/lantiq/xway/xrx200_phy_fw.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/delay.h>
@@ -112,6 +112,6 @@ static struct platform_driver xway_phy_driver = {
module_platform_driver(xway_phy_driver);
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org>");
+MODULE_AUTHOR("John Crispin <john@phrozen.org>");
MODULE_DESCRIPTION("Lantiq XRX200 PHY Firmware Loader");
MODULE_LICENSE("GPL");
diff --git a/arch/mips/lasat/picvue_proc.c b/arch/mips/lasat/picvue_proc.c
index b42095880667..27533c109f92 100644
--- a/arch/mips/lasat/picvue_proc.c
+++ b/arch/mips/lasat/picvue_proc.c
@@ -43,7 +43,7 @@ static int pvc_line_proc_show(struct seq_file *m, void *v)
{
int lineno = *(int *)m->private;
- if (lineno < 0 || lineno > PVC_NLINES) {
+ if (lineno < 0 || lineno >= PVC_NLINES) {
printk(KERN_WARNING "proc_read_line: invalid lineno %d\n", lineno);
return 0;
}
@@ -67,7 +67,7 @@ static ssize_t pvc_line_proc_write(struct file *file, const char __user *buf,
char kbuf[PVC_LINELEN];
size_t len;
- BUG_ON(lineno < 0 || lineno > PVC_NLINES);
+ BUG_ON(lineno < 0 || lineno >= PVC_NLINES);
len = min(count, sizeof(kbuf) - 1);
if (copy_from_user(kbuf, buf, len))
diff --git a/arch/mips/lib/ashldi3.c b/arch/mips/lib/ashldi3.c
index beb80f316095..927dc94a030f 100644
--- a/arch/mips/lib/ashldi3.c
+++ b/arch/mips/lib/ashldi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __ashldi3(long long u, word_type b)
+long long notrace __ashldi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/ashrdi3.c b/arch/mips/lib/ashrdi3.c
index c884a912b660..9fdf1a598428 100644
--- a/arch/mips/lib/ashrdi3.c
+++ b/arch/mips/lib/ashrdi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __ashrdi3(long long u, word_type b)
+long long notrace __ashrdi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/bswapdi.c b/arch/mips/lib/bswapdi.c
index 77e5f9c1f005..e3e77aa52c95 100644
--- a/arch/mips/lib/bswapdi.c
+++ b/arch/mips/lib/bswapdi.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
-unsigned long long __bswapdi2(unsigned long long u)
+unsigned long long notrace __bswapdi2(unsigned long long u)
{
return (((u) & 0xff00000000000000ull) >> 56) |
(((u) & 0x00ff000000000000ull) >> 40) |
diff --git a/arch/mips/lib/bswapsi.c b/arch/mips/lib/bswapsi.c
index 2b302ff121d2..530a8afe6fda 100644
--- a/arch/mips/lib/bswapsi.c
+++ b/arch/mips/lib/bswapsi.c
@@ -1,6 +1,6 @@
#include <linux/module.h>
-unsigned int __bswapsi2(unsigned int u)
+unsigned int notrace __bswapsi2(unsigned int u)
{
return (((u) & 0xff000000) >> 24) |
(((u) & 0x00ff0000) >> 8) |
diff --git a/arch/mips/lib/cmpdi2.c b/arch/mips/lib/cmpdi2.c
index 8c1306437ed1..06857da96993 100644
--- a/arch/mips/lib/cmpdi2.c
+++ b/arch/mips/lib/cmpdi2.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-word_type __cmpdi2(long long a, long long b)
+word_type notrace __cmpdi2(long long a, long long b)
{
const DWunion au = {
.ll = a
diff --git a/arch/mips/lib/dump_tlb.c b/arch/mips/lib/dump_tlb.c
index 92a37319efbe..0f80b936e75e 100644
--- a/arch/mips/lib/dump_tlb.c
+++ b/arch/mips/lib/dump_tlb.c
@@ -19,6 +19,8 @@ void dump_tlb_regs(void)
pr_info("Index : %0x\n", read_c0_index());
pr_info("PageMask : %0x\n", read_c0_pagemask());
+ if (cpu_has_guestid)
+ pr_info("GuestCtl1: %0x\n", read_c0_guestctl1());
pr_info("EntryHi : %0*lx\n", field, read_c0_entryhi());
pr_info("EntryLo0 : %0*lx\n", field, read_c0_entrylo0());
pr_info("EntryLo1 : %0*lx\n", field, read_c0_entrylo1());
@@ -72,7 +74,10 @@ static void dump_tlb(int first, int last)
{
unsigned long s_entryhi, entryhi, asid;
unsigned long long entrylo0, entrylo1, pa;
- unsigned int s_index, s_pagemask, pagemask, c0, c1, i;
+ unsigned int s_index, s_pagemask, s_guestctl1 = 0;
+ unsigned int pagemask, guestctl1 = 0, c0, c1, i;
+ unsigned long asidmask = cpu_asid_mask(&current_cpu_data);
+ int asidwidth = DIV_ROUND_UP(ilog2(asidmask) + 1, 4);
#ifdef CONFIG_32BIT
bool xpa = cpu_has_xpa && (read_c0_pagegrain() & PG_ELPA);
int pwidth = xpa ? 11 : 8;
@@ -86,7 +91,9 @@ static void dump_tlb(int first, int last)
s_pagemask = read_c0_pagemask();
s_entryhi = read_c0_entryhi();
s_index = read_c0_index();
- asid = s_entryhi & 0xff;
+ asid = s_entryhi & asidmask;
+ if (cpu_has_guestid)
+ s_guestctl1 = read_c0_guestctl1();
for (i = first; i <= last; i++) {
write_c0_index(i);
@@ -97,6 +104,8 @@ static void dump_tlb(int first, int last)
entryhi = read_c0_entryhi();
entrylo0 = read_c0_entrylo0();
entrylo1 = read_c0_entrylo1();
+ if (cpu_has_guestid)
+ guestctl1 = read_c0_guestctl1();
/* EHINV bit marks entire entry as invalid */
if (cpu_has_tlbinv && entryhi & MIPS_ENTRYHI_EHINV)
@@ -115,7 +124,7 @@ static void dump_tlb(int first, int last)
* due to duplicate TLB entry.
*/
if (!((entrylo0 | entrylo1) & ENTRYLO_G) &&
- (entryhi & 0xff) != asid)
+ (entryhi & asidmask) != asid)
continue;
/*
@@ -126,15 +135,19 @@ static void dump_tlb(int first, int last)
c0 = (entrylo0 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
c1 = (entrylo1 & ENTRYLO_C) >> ENTRYLO_C_SHIFT;
- printk("va=%0*lx asid=%02lx\n",
+ printk("va=%0*lx asid=%0*lx",
vwidth, (entryhi & ~0x1fffUL),
- entryhi & 0xff);
+ asidwidth, entryhi & asidmask);
+ if (cpu_has_guestid)
+ printk(" gid=%02lx",
+ (guestctl1 & MIPS_GCTL1_RID)
+ >> MIPS_GCTL1_RID_SHIFT);
/* RI/XI are in awkward places, so mask them off separately */
pa = entrylo0 & ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
if (xpa)
pa |= (unsigned long long)readx_c0_entrylo0() << 30;
pa = (pa << 6) & PAGE_MASK;
- printk("\t[");
+ printk("\n\t[");
if (cpu_has_rixi)
printk("ri=%d xi=%d ",
(entrylo0 & MIPS_ENTRYLO_RI) ? 1 : 0,
@@ -164,6 +177,8 @@ static void dump_tlb(int first, int last)
write_c0_entryhi(s_entryhi);
write_c0_index(s_index);
write_c0_pagemask(s_pagemask);
+ if (cpu_has_guestid)
+ write_c0_guestctl1(s_guestctl1);
}
void dump_tlb_all(void)
diff --git a/arch/mips/lib/lshrdi3.c b/arch/mips/lib/lshrdi3.c
index dcf8d6810b7c..364547449c65 100644
--- a/arch/mips/lib/lshrdi3.c
+++ b/arch/mips/lib/lshrdi3.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-long long __lshrdi3(long long u, word_type b)
+long long notrace __lshrdi3(long long u, word_type b)
{
DWunion uu, w;
word_type bm;
diff --git a/arch/mips/lib/memcpy.S b/arch/mips/lib/memcpy.S
index 9245e1705e69..6c303a94a196 100644
--- a/arch/mips/lib/memcpy.S
+++ b/arch/mips/lib/memcpy.S
@@ -256,7 +256,7 @@
/*
* Macro to build the __copy_user common code
- * Arguements:
+ * Arguments:
* mode : LEGACY_MODE or EVA_MODE
* from : Source operand. USEROP or KERNELOP
* to : Destination operand. USEROP or KERNELOP
diff --git a/arch/mips/lib/memset.S b/arch/mips/lib/memset.S
index 8f0019a2e5c8..18a1ccd4d134 100644
--- a/arch/mips/lib/memset.S
+++ b/arch/mips/lib/memset.S
@@ -228,10 +228,12 @@
.hidden __memset
.endif
+#ifdef CONFIG_CPU_MIPSR6
.Lbyte_fixup\@:
PTR_SUBU a2, $0, t0
jr ra
PTR_ADDIU a2, 1
+#endif /* CONFIG_CPU_MIPSR6 */
.Lfirst_fixup\@:
jr ra
diff --git a/arch/mips/lib/r3k_dump_tlb.c b/arch/mips/lib/r3k_dump_tlb.c
index cfcbb5218b59..744f4a7bc49d 100644
--- a/arch/mips/lib/r3k_dump_tlb.c
+++ b/arch/mips/lib/r3k_dump_tlb.c
@@ -29,9 +29,10 @@ static void dump_tlb(int first, int last)
{
int i;
unsigned int asid;
- unsigned long entryhi, entrylo0;
+ unsigned long entryhi, entrylo0, asid_mask;
- asid = read_c0_entryhi() & ASID_MASK;
+ asid_mask = cpu_asid_mask(&current_cpu_data);
+ asid = read_c0_entryhi() & asid_mask;
for (i = first; i <= last; i++) {
write_c0_index(i<<8);
@@ -46,7 +47,7 @@ static void dump_tlb(int first, int last)
/* Unused entries have a virtual address of KSEG0. */
if ((entryhi & PAGE_MASK) != KSEG0 &&
(entrylo0 & R3K_ENTRYLO_G ||
- (entryhi & ASID_MASK) == asid)) {
+ (entryhi & asid_mask) == asid)) {
/*
* Only print entries in use
*/
@@ -55,7 +56,7 @@ static void dump_tlb(int first, int last)
printk("va=%08lx asid=%08lx"
" [pa=%06lx n=%d d=%d v=%d g=%d]",
entryhi & PAGE_MASK,
- entryhi & ASID_MASK,
+ entryhi & asid_mask,
entrylo0 & PAGE_MASK,
(entrylo0 & R3K_ENTRYLO_N) ? 1 : 0,
(entrylo0 & R3K_ENTRYLO_D) ? 1 : 0,
diff --git a/arch/mips/lib/ucmpdi2.c b/arch/mips/lib/ucmpdi2.c
index bb4cb2f828ea..bd599f58234c 100644
--- a/arch/mips/lib/ucmpdi2.c
+++ b/arch/mips/lib/ucmpdi2.c
@@ -2,7 +2,7 @@
#include "libgcc.h"
-word_type __ucmpdi2(unsigned long long a, unsigned long long b)
+word_type notrace __ucmpdi2(unsigned long long a, unsigned long long b)
{
const DWunion au = {.ll = a};
const DWunion bu = {.ll = b};
diff --git a/arch/mips/loongson32/common/platform.c b/arch/mips/loongson32/common/platform.c
index ddf1d4cbf31e..f2c714d8fb60 100644
--- a/arch/mips/loongson32/common/platform.c
+++ b/arch/mips/loongson32/common/platform.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -10,14 +10,17 @@
#include <linux/clk.h>
#include <linux/dma-mapping.h>
#include <linux/err.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
#include <linux/phy.h>
#include <linux/serial_8250.h>
#include <linux/stmmac.h>
#include <linux/usb/ehci_pdriver.h>
-#include <asm-generic/sizes.h>
-#include <cpufreq.h>
#include <loongson1.h>
+#include <cpufreq.h>
+#include <dma.h>
+#include <nand.h>
/* 8250/16550 compatible UART */
#define LS1X_UART(_id) \
@@ -45,7 +48,7 @@ struct platform_device ls1x_uart_pdev = {
},
};
-void __init ls1x_serial_setup(struct platform_device *pdev)
+void __init ls1x_serial_set_uartclk(struct platform_device *pdev)
{
struct clk *clk;
struct plat_serial8250_port *p;
@@ -77,6 +80,42 @@ struct platform_device ls1x_cpufreq_pdev = {
},
};
+/* DMA */
+static struct resource ls1x_dma_resources[] = {
+ [0] = {
+ .start = LS1X_DMAC_BASE,
+ .end = LS1X_DMAC_BASE + SZ_4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ .start = LS1X_DMA0_IRQ,
+ .end = LS1X_DMA0_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ [2] = {
+ .start = LS1X_DMA1_IRQ,
+ .end = LS1X_DMA1_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+ [3] = {
+ .start = LS1X_DMA2_IRQ,
+ .end = LS1X_DMA2_IRQ,
+ .flags = IORESOURCE_IRQ,
+ },
+};
+
+struct platform_device ls1x_dma_pdev = {
+ .name = "ls1x-dma",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ls1x_dma_resources),
+ .resource = ls1x_dma_resources,
+};
+
+void __init ls1x_dma_set_platdata(struct plat_ls1x_dma *pdata)
+{
+ ls1x_dma_pdev.dev.platform_data = pdata;
+}
+
/* Synopsys Ethernet GMAC */
static struct stmmac_mdio_bus_data ls1x_mdio_bus_data = {
.phy_mask = 0,
@@ -198,6 +237,64 @@ struct platform_device ls1x_eth1_pdev = {
},
};
+/* GPIO */
+static struct resource ls1x_gpio0_resources[] = {
+ [0] = {
+ .start = LS1X_GPIO0_BASE,
+ .end = LS1X_GPIO0_BASE + SZ_4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device ls1x_gpio0_pdev = {
+ .name = "ls1x-gpio",
+ .id = 0,
+ .num_resources = ARRAY_SIZE(ls1x_gpio0_resources),
+ .resource = ls1x_gpio0_resources,
+};
+
+static struct resource ls1x_gpio1_resources[] = {
+ [0] = {
+ .start = LS1X_GPIO1_BASE,
+ .end = LS1X_GPIO1_BASE + SZ_4 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+};
+
+struct platform_device ls1x_gpio1_pdev = {
+ .name = "ls1x-gpio",
+ .id = 1,
+ .num_resources = ARRAY_SIZE(ls1x_gpio1_resources),
+ .resource = ls1x_gpio1_resources,
+};
+
+/* NAND Flash */
+static struct resource ls1x_nand_resources[] = {
+ [0] = {
+ .start = LS1X_NAND_BASE,
+ .end = LS1X_NAND_BASE + SZ_32 - 1,
+ .flags = IORESOURCE_MEM,
+ },
+ [1] = {
+ /* DMA channel 0 is dedicated to NAND */
+ .start = LS1X_DMA_CHANNEL0,
+ .end = LS1X_DMA_CHANNEL0,
+ .flags = IORESOURCE_DMA,
+ },
+};
+
+struct platform_device ls1x_nand_pdev = {
+ .name = "ls1x-nand",
+ .id = -1,
+ .num_resources = ARRAY_SIZE(ls1x_nand_resources),
+ .resource = ls1x_nand_resources,
+};
+
+void __init ls1x_nand_set_platdata(struct plat_ls1x_nand *pdata)
+{
+ ls1x_nand_pdev.dev.platform_data = pdata;
+}
+
/* USB EHCI */
static u64 ls1x_ehci_dmamask = DMA_BIT_MASK(32);
diff --git a/arch/mips/loongson32/common/reset.c b/arch/mips/loongson32/common/reset.c
index c41e4ca56ab4..8a1d9cc5a134 100644
--- a/arch/mips/loongson32/common/reset.c
+++ b/arch/mips/loongson32/common/reset.c
@@ -9,12 +9,13 @@
#include <linux/io.h>
#include <linux/pm.h>
+#include <linux/sizes.h>
#include <asm/idle.h>
#include <asm/reboot.h>
#include <loongson1.h>
-static void __iomem *wdt_base;
+static void __iomem *wdt_reg_base;
static void ls1x_halt(void)
{
@@ -26,9 +27,9 @@ static void ls1x_halt(void)
static void ls1x_restart(char *command)
{
- __raw_writel(0x1, wdt_base + WDT_EN);
- __raw_writel(0x1, wdt_base + WDT_TIMER);
- __raw_writel(0x1, wdt_base + WDT_SET);
+ __raw_writel(0x1, wdt_reg_base + WDT_EN);
+ __raw_writel(0x1, wdt_reg_base + WDT_TIMER);
+ __raw_writel(0x1, wdt_reg_base + WDT_SET);
ls1x_halt();
}
@@ -40,8 +41,8 @@ static void ls1x_power_off(void)
static int __init ls1x_reboot_setup(void)
{
- wdt_base = ioremap_nocache(LS1X_WDT_BASE, 0x0f);
- if (!wdt_base)
+ wdt_reg_base = ioremap_nocache(LS1X_WDT_BASE, (SZ_4 + SZ_8));
+ if (!wdt_reg_base)
panic("Failed to remap watchdog registers");
_machine_restart = ls1x_restart;
diff --git a/arch/mips/loongson32/common/time.c b/arch/mips/loongson32/common/time.c
index 0996b025eeef..ff224f0020e5 100644
--- a/arch/mips/loongson32/common/time.c
+++ b/arch/mips/loongson32/common/time.c
@@ -9,6 +9,7 @@
#include <linux/clk.h>
#include <linux/interrupt.h>
+#include <linux/sizes.h>
#include <asm/time.h>
#include <loongson1.h>
@@ -35,25 +36,25 @@
DEFINE_RAW_SPINLOCK(ls1x_timer_lock);
-static void __iomem *timer_base;
+static void __iomem *timer_reg_base;
static uint32_t ls1x_jiffies_per_tick;
static inline void ls1x_pwmtimer_set_period(uint32_t period)
{
- __raw_writel(period, timer_base + PWM_HRC);
- __raw_writel(period, timer_base + PWM_LRC);
+ __raw_writel(period, timer_reg_base + PWM_HRC);
+ __raw_writel(period, timer_reg_base + PWM_LRC);
}
static inline void ls1x_pwmtimer_restart(void)
{
- __raw_writel(0x0, timer_base + PWM_CNT);
- __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+ __raw_writel(0x0, timer_reg_base + PWM_CNT);
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
}
void __init ls1x_pwmtimer_init(void)
{
- timer_base = ioremap(LS1X_TIMER_BASE, 0xf);
- if (!timer_base)
+ timer_reg_base = ioremap_nocache(LS1X_TIMER_BASE, SZ_16);
+ if (!timer_reg_base)
panic("Failed to remap timer registers");
ls1x_jiffies_per_tick = DIV_ROUND_CLOSEST(mips_hpt_frequency, HZ);
@@ -86,7 +87,7 @@ static cycle_t ls1x_clocksource_read(struct clocksource *cs)
*/
jifs = jiffies;
/* read the count */
- count = __raw_readl(timer_base + PWM_CNT);
+ count = __raw_readl(timer_reg_base + PWM_CNT);
/*
* It's possible for count to appear to go the wrong way for this
@@ -131,7 +132,7 @@ static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
raw_spin_lock(&ls1x_timer_lock);
ls1x_pwmtimer_set_period(ls1x_jiffies_per_tick);
ls1x_pwmtimer_restart();
- __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
@@ -140,7 +141,7 @@ static int ls1x_clockevent_set_state_periodic(struct clock_event_device *cd)
static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
- __raw_writel(INT_EN | CNT_EN, timer_base + PWM_CTRL);
+ __raw_writel(INT_EN | CNT_EN, timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
@@ -149,8 +150,8 @@ static int ls1x_clockevent_tick_resume(struct clock_event_device *cd)
static int ls1x_clockevent_set_state_shutdown(struct clock_event_device *cd)
{
raw_spin_lock(&ls1x_timer_lock);
- __raw_writel(__raw_readl(timer_base + PWM_CTRL) & ~CNT_EN,
- timer_base + PWM_CTRL);
+ __raw_writel(__raw_readl(timer_reg_base + PWM_CTRL) & ~CNT_EN,
+ timer_reg_base + PWM_CTRL);
raw_spin_unlock(&ls1x_timer_lock);
return 0;
@@ -220,7 +221,7 @@ void __init plat_time_init(void)
#ifdef CONFIG_CEVT_CSRC_LS1X
/* setup LS1X PWM timer */
- clk = clk_get(NULL, "ls1x_pwmtimer");
+ clk = clk_get(NULL, "ls1x-pwmtimer");
if (IS_ERR(clk))
panic("unable to get timer clock, err=%ld", PTR_ERR(clk));
diff --git a/arch/mips/loongson32/ls1b/board.c b/arch/mips/loongson32/ls1b/board.c
index 58daeea25739..38a1d404be1b 100644
--- a/arch/mips/loongson32/ls1b/board.c
+++ b/arch/mips/loongson32/ls1b/board.c
@@ -1,5 +1,5 @@
/*
- * Copyright (c) 2011 Zhang, Keguang <keguang.zhang@gmail.com>
+ * Copyright (c) 2011-2016 Zhang, Keguang <keguang.zhang@gmail.com>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License as published by the
@@ -7,26 +7,83 @@
* option) any later version.
*/
+#include <linux/leds.h>
+#include <linux/mtd/partitions.h>
+#include <linux/sizes.h>
+
+#include <loongson1.h>
+#include <dma.h>
+#include <nand.h>
#include <platform.h>
+struct plat_ls1x_dma ls1x_dma_pdata = {
+ .nr_channels = 3,
+};
+
+static struct mtd_partition ls1x_nand_parts[] = {
+ {
+ .name = "kernel",
+ .offset = 0,
+ .size = SZ_16M,
+ },
+ {
+ .name = "rootfs",
+ .offset = MTDPART_OFS_APPEND,
+ .size = MTDPART_SIZ_FULL,
+ },
+};
+
+struct plat_ls1x_nand ls1x_nand_pdata = {
+ .parts = ls1x_nand_parts,
+ .nr_parts = ARRAY_SIZE(ls1x_nand_parts),
+ .hold_cycle = 0x2,
+ .wait_cycle = 0xc,
+};
+
+static const struct gpio_led ls1x_gpio_leds[] __initconst = {
+ {
+ .name = "LED9",
+ .default_trigger = "heartbeat",
+ .gpio = 38,
+ .active_low = 1,
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ }, {
+ .name = "LED6",
+ .default_trigger = "nand-disk",
+ .gpio = 39,
+ .active_low = 1,
+ .default_state = LEDS_GPIO_DEFSTATE_OFF,
+ },
+};
+
+static const struct gpio_led_platform_data ls1x_led_pdata __initconst = {
+ .num_leds = ARRAY_SIZE(ls1x_gpio_leds),
+ .leds = ls1x_gpio_leds,
+};
+
static struct platform_device *ls1b_platform_devices[] __initdata = {
&ls1x_uart_pdev,
&ls1x_cpufreq_pdev,
+ &ls1x_dma_pdev,
&ls1x_eth0_pdev,
&ls1x_eth1_pdev,
&ls1x_ehci_pdev,
+ &ls1x_gpio0_pdev,
+ &ls1x_gpio1_pdev,
+ &ls1x_nand_pdev,
&ls1x_rtc_pdev,
};
static int __init ls1b_platform_init(void)
{
- int err;
+ ls1x_serial_set_uartclk(&ls1x_uart_pdev);
+ ls1x_dma_set_platdata(&ls1x_dma_pdata);
+ ls1x_nand_set_platdata(&ls1x_nand_pdata);
- ls1x_serial_setup(&ls1x_uart_pdev);
+ gpio_led_register_device(-1, &ls1x_led_pdata);
- err = platform_add_devices(ls1b_platform_devices,
+ return platform_add_devices(ls1b_platform_devices,
ARRAY_SIZE(ls1b_platform_devices));
- return err;
}
arch_initcall(ls1b_platform_init);
diff --git a/arch/mips/loongson64/Platform b/arch/mips/loongson64/Platform
index 85d808924c94..0fce4608aa88 100644
--- a/arch/mips/loongson64/Platform
+++ b/arch/mips/loongson64/Platform
@@ -31,7 +31,7 @@ cflags-$(CONFIG_CPU_LOONGSON3) += -Wa,--trap
# can't easily be used safely within the kbuild framework.
#
ifeq ($(call cc-ifversion, -ge, 0409, y), y)
- ifeq ($(call ld-ifversion, -ge, 22500000, y), y)
+ ifeq ($(call ld-ifversion, -ge, 225000000, y), y)
cflags-$(CONFIG_CPU_LOONGSON3) += \
$(call cc-option,-march=loongson3a -U_MIPS_ISA -D_MIPS_ISA=_MIPS_ISA_MIPS64)
else
diff --git a/arch/mips/loongson64/common/dma-swiotlb.c b/arch/mips/loongson64/common/dma-swiotlb.c
index 4ffa6fc81c8f..1a80b6f73ab2 100644
--- a/arch/mips/loongson64/common/dma-swiotlb.c
+++ b/arch/mips/loongson64/common/dma-swiotlb.c
@@ -10,7 +10,7 @@
#include <dma-coherence.h>
static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
@@ -41,7 +41,7 @@ static void *loongson_dma_alloc_coherent(struct device *dev, size_t size,
}
static void loongson_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
@@ -49,7 +49,7 @@ static void loongson_dma_free_coherent(struct device *dev, size_t size,
static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size,
enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
dma_addr_t daddr = swiotlb_map_page(dev, page, offset, size,
dir, attrs);
@@ -59,9 +59,9 @@ static dma_addr_t loongson_dma_map_page(struct device *dev, struct page *page,
static int loongson_dma_map_sg(struct device *dev, struct scatterlist *sg,
int nents, enum dma_data_direction dir,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
- int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, NULL);
+ int r = swiotlb_map_sg_attrs(dev, sg, nents, dir, 0);
mb();
return r;
diff --git a/arch/mips/loongson64/common/env.c b/arch/mips/loongson64/common/env.c
index d6d07ad56180..57d590ac8004 100644
--- a/arch/mips/loongson64/common/env.c
+++ b/arch/mips/loongson64/common/env.c
@@ -105,6 +105,10 @@ void __init prom_init_env(void)
loongson_chiptemp[1] = 0x900010001fe0019c;
loongson_chiptemp[2] = 0x900020001fe0019c;
loongson_chiptemp[3] = 0x900030001fe0019c;
+ loongson_freqctrl[0] = 0x900000001fe001d0;
+ loongson_freqctrl[1] = 0x900010001fe001d0;
+ loongson_freqctrl[2] = 0x900020001fe001d0;
+ loongson_freqctrl[3] = 0x900030001fe001d0;
loongson_sysconf.ht_control_base = 0x90000EFDFB000000;
loongson_sysconf.workarounds = WORKAROUND_CPUFREQ;
} else if (ecpu->cputype == Loongson_3B) {
@@ -187,7 +191,8 @@ void __init prom_init_env(void)
case PRID_REV_LOONGSON2F:
cpu_clock_freq = 797000000;
break;
- case PRID_REV_LOONGSON3A:
+ case PRID_REV_LOONGSON3A_R1:
+ case PRID_REV_LOONGSON3A_R2:
cpu_clock_freq = 900000000;
break;
case PRID_REV_LOONGSON3B_R1:
diff --git a/arch/mips/loongson64/loongson-3/Makefile b/arch/mips/loongson64/loongson-3/Makefile
index 622fead5ebc9..44bc1482158b 100644
--- a/arch/mips/loongson64/loongson-3/Makefile
+++ b/arch/mips/loongson64/loongson-3/Makefile
@@ -1,7 +1,7 @@
#
# Makefile for Loongson-3 family machines
#
-obj-y += irq.o cop2-ex.o platform.o
+obj-y += irq.o cop2-ex.o platform.o acpi_init.o
obj-$(CONFIG_SMP) += smp.o
diff --git a/arch/mips/loongson64/loongson-3/acpi_init.c b/arch/mips/loongson64/loongson-3/acpi_init.c
new file mode 100644
index 000000000000..dbdad79ead8f
--- /dev/null
+++ b/arch/mips/loongson64/loongson-3/acpi_init.c
@@ -0,0 +1,150 @@
+#include <linux/io.h>
+#include <linux/init.h>
+#include <linux/ioport.h>
+#include <linux/export.h>
+
+#define SBX00_ACPI_IO_BASE 0x800
+#define SBX00_ACPI_IO_SIZE 0x100
+
+#define ACPI_PM_EVT_BLK (SBX00_ACPI_IO_BASE + 0x00) /* 4 bytes */
+#define ACPI_PM_CNT_BLK (SBX00_ACPI_IO_BASE + 0x04) /* 2 bytes */
+#define ACPI_PMA_CNT_BLK (SBX00_ACPI_IO_BASE + 0x0F) /* 1 byte */
+#define ACPI_PM_TMR_BLK (SBX00_ACPI_IO_BASE + 0x18) /* 4 bytes */
+#define ACPI_GPE0_BLK (SBX00_ACPI_IO_BASE + 0x10) /* 8 bytes */
+#define ACPI_END (SBX00_ACPI_IO_BASE + 0x80)
+
+#define PM_INDEX 0xCD6
+#define PM_DATA 0xCD7
+#define PM2_INDEX 0xCD0
+#define PM2_DATA 0xCD1
+
+/*
+ * SCI interrupt need acpi space, allocate here
+ */
+
+static int __init register_acpi_resource(void)
+{
+ request_region(SBX00_ACPI_IO_BASE, SBX00_ACPI_IO_SIZE, "acpi");
+ return 0;
+}
+
+static void pmio_write_index(u16 index, u8 reg, u8 value)
+{
+ outb(reg, index);
+ outb(value, index + 1);
+}
+
+static u8 pmio_read_index(u16 index, u8 reg)
+{
+ outb(reg, index);
+ return inb(index + 1);
+}
+
+void pm_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm_iowrite);
+
+u8 pm_ioread(u8 reg)
+{
+ return pmio_read_index(PM_INDEX, reg);
+}
+EXPORT_SYMBOL(pm_ioread);
+
+void pm2_iowrite(u8 reg, u8 value)
+{
+ pmio_write_index(PM2_INDEX, reg, value);
+}
+EXPORT_SYMBOL(pm2_iowrite);
+
+u8 pm2_ioread(u8 reg)
+{
+ return pmio_read_index(PM2_INDEX, reg);
+}
+EXPORT_SYMBOL(pm2_ioread);
+
+static void acpi_hw_clear_status(void)
+{
+ u16 value;
+
+ /* PMStatus: Clear WakeStatus/PwrBtnStatus */
+ value = inw(ACPI_PM_EVT_BLK);
+ value |= (1 << 8 | 1 << 15);
+ outw(value, ACPI_PM_EVT_BLK);
+
+ /* GPEStatus: Clear all generated events */
+ outl(inl(ACPI_GPE0_BLK), ACPI_GPE0_BLK);
+}
+
+void acpi_registers_setup(void)
+{
+ u32 value;
+
+ /* PM Status Base */
+ pm_iowrite(0x20, ACPI_PM_EVT_BLK & 0xff);
+ pm_iowrite(0x21, ACPI_PM_EVT_BLK >> 8);
+
+ /* PM Control Base */
+ pm_iowrite(0x22, ACPI_PM_CNT_BLK & 0xff);
+ pm_iowrite(0x23, ACPI_PM_CNT_BLK >> 8);
+
+ /* GPM Base */
+ pm_iowrite(0x28, ACPI_GPE0_BLK & 0xff);
+ pm_iowrite(0x29, ACPI_GPE0_BLK >> 8);
+
+ /* ACPI End */
+ pm_iowrite(0x2e, ACPI_END & 0xff);
+ pm_iowrite(0x2f, ACPI_END >> 8);
+
+ /* IO Decode: When AcpiDecodeEnable set, South-Bridge uses the contents
+ * of the PM registers at index 0x20~0x2B to decode ACPI I/O address. */
+ pm_iowrite(0x0e, 1 << 3);
+
+ /* SCI_EN set */
+ outw(1, ACPI_PM_CNT_BLK);
+
+ /* Enable to generate SCI */
+ pm_iowrite(0x10, pm_ioread(0x10) | 1);
+
+ /* GPM3/GPM9 enable */
+ value = inl(ACPI_GPE0_BLK + 4);
+ outl(value | (1 << 14) | (1 << 22), ACPI_GPE0_BLK + 4);
+
+ /* Set GPM9 as input */
+ pm_iowrite(0x8d, pm_ioread(0x8d) & (~(1 << 1)));
+
+ /* Set GPM9 as non-output */
+ pm_iowrite(0x94, pm_ioread(0x94) | (1 << 3));
+
+ /* GPM3 config ACPI trigger SCIOUT */
+ pm_iowrite(0x33, pm_ioread(0x33) & (~(3 << 4)));
+
+ /* GPM9 config ACPI trigger SCIOUT */
+ pm_iowrite(0x3d, pm_ioread(0x3d) & (~(3 << 2)));
+
+ /* GPM3 config falling edge trigger */
+ pm_iowrite(0x37, pm_ioread(0x37) & (~(1 << 6)));
+
+ /* No wait for STPGNT# in ACPI Sx state */
+ pm_iowrite(0x7c, pm_ioread(0x7c) | (1 << 6));
+
+ /* Set GPM3 pull-down enable */
+ value = pm2_ioread(0xf6);
+ value |= ((1 << 7) | (1 << 3));
+ pm2_iowrite(0xf6, value);
+
+ /* Set GPM9 pull-down enable */
+ value = pm2_ioread(0xf8);
+ value |= ((1 << 5) | (1 << 1));
+ pm2_iowrite(0xf8, value);
+}
+
+int __init sbx00_acpi_init(void)
+{
+ register_acpi_resource();
+ acpi_registers_setup();
+ acpi_hw_clear_status();
+
+ return 0;
+}
diff --git a/arch/mips/loongson64/loongson-3/hpet.c b/arch/mips/loongson64/loongson-3/hpet.c
index a2631a52ca99..4788bea62a6a 100644
--- a/arch/mips/loongson64/loongson-3/hpet.c
+++ b/arch/mips/loongson64/loongson-3/hpet.c
@@ -13,8 +13,8 @@
#define SMBUS_PCI_REG64 0x64
#define SMBUS_PCI_REGB4 0xb4
-#define HPET_MIN_CYCLES 64
-#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES + (HPET_MIN_CYCLES >> 1))
+#define HPET_MIN_CYCLES 16
+#define HPET_MIN_PROG_DELTA (HPET_MIN_CYCLES * 12)
static DEFINE_SPINLOCK(hpet_lock);
DEFINE_PER_CPU(struct clock_event_device, hpet_clockevent_device);
@@ -157,14 +157,14 @@ static int hpet_tick_resume(struct clock_event_device *evt)
static int hpet_next_event(unsigned long delta,
struct clock_event_device *evt)
{
- unsigned int cnt;
- int res;
+ u32 cnt;
+ s32 res;
cnt = hpet_read(HPET_COUNTER);
- cnt += delta;
+ cnt += (u32) delta;
hpet_write(HPET_T0_CMP, cnt);
- res = (int)(cnt - hpet_read(HPET_COUNTER));
+ res = (s32)(cnt - hpet_read(HPET_COUNTER));
return res < HPET_MIN_CYCLES ? -ETIME : 0;
}
@@ -212,7 +212,7 @@ static void hpet_setup(void)
/* set hpet base address */
smbus_write(SMBUS_PCI_REGB4, HPET_ADDR);
- /* enable decodeing of access to HPET MMIO*/
+ /* enable decoding of access to HPET MMIO*/
smbus_enable(SMBUS_PCI_REG40, (1 << 28));
/* HPET irq enable */
@@ -230,7 +230,7 @@ void __init setup_hpet_timer(void)
cd = &per_cpu(hpet_clockevent_device, cpu);
cd->name = "hpet";
- cd->rating = 320;
+ cd->rating = 100;
cd->features = CLOCK_EVT_FEAT_PERIODIC | CLOCK_EVT_FEAT_ONESHOT;
cd->set_state_shutdown = hpet_set_state_shutdown;
cd->set_state_periodic = hpet_set_state_periodic;
diff --git a/arch/mips/loongson64/loongson-3/irq.c b/arch/mips/loongson64/loongson-3/irq.c
index 0f75b6b3d218..8e7649088353 100644
--- a/arch/mips/loongson64/loongson-3/irq.c
+++ b/arch/mips/loongson64/loongson-3/irq.c
@@ -24,19 +24,21 @@ static void ht_irqdispatch(void)
}
}
+#define UNUSED_IPS (CAUSEF_IP5 | CAUSEF_IP4 | CAUSEF_IP1 | CAUSEF_IP0)
+
void mach_irq_dispatch(unsigned int pending)
{
if (pending & CAUSEF_IP7)
do_IRQ(LOONGSON_TIMER_IRQ);
#if defined(CONFIG_SMP)
- else if (pending & CAUSEF_IP6)
+ if (pending & CAUSEF_IP6)
loongson3_ipi_interrupt(NULL);
#endif
- else if (pending & CAUSEF_IP3)
+ if (pending & CAUSEF_IP3)
ht_irqdispatch();
- else if (pending & CAUSEF_IP2)
+ if (pending & CAUSEF_IP2)
do_IRQ(LOONGSON_UART_IRQ);
- else {
+ if (pending & UNUSED_IPS) {
pr_err("%s : spurious interrupt\n", __func__);
spurious_interrupt();
}
diff --git a/arch/mips/loongson64/loongson-3/numa.c b/arch/mips/loongson64/loongson-3/numa.c
index 6f9e010cec4d..282c5a8c2fcd 100644
--- a/arch/mips/loongson64/loongson-3/numa.c
+++ b/arch/mips/loongson64/loongson-3/numa.c
@@ -213,10 +213,10 @@ static void __init node_mem_init(unsigned int node)
BOOTMEM_DEFAULT);
if (node == 0 && node_end_pfn(0) >= (0xffffffff >> PAGE_SHIFT)) {
- /* Reserve 0xff800000~0xffffffff for RS780E integrated GPU */
+ /* Reserve 0xfe000000~0xffffffff for RS780E integrated GPU */
reserve_bootmem_node(NODE_DATA(node),
- (node_addrspace_offset | 0xff800000),
- 8 << 20, BOOTMEM_DEFAULT);
+ (node_addrspace_offset | 0xfe000000),
+ 32 << 20, BOOTMEM_DEFAULT);
}
sparse_memory_present_with_active_regions(node);
diff --git a/arch/mips/loongson64/loongson-3/smp.c b/arch/mips/loongson64/loongson-3/smp.c
index 509832a9836c..2fec6f753a35 100644
--- a/arch/mips/loongson64/loongson-3/smp.c
+++ b/arch/mips/loongson64/loongson-3/smp.c
@@ -417,11 +417,11 @@ static int loongson3_cpu_disable(void)
return -EBUSY;
set_cpu_online(cpu, false);
+ calculate_cpu_foreign_map();
cpumask_clear_cpu(cpu, &cpu_callin_map);
local_irq_save(flags);
fixup_irqs();
local_irq_restore(flags);
- flush_cache_all();
local_flush_tlb_all();
return 0;
@@ -440,7 +440,7 @@ static void loongson3_cpu_die(unsigned int cpu)
* flush all L1 entries at first. Then, another core (usually Core 0) can
* safely disable the clock of the target core. loongson3_play_dead() is
* called via CKSEG1 (uncached and unmmaped) */
-static void loongson3a_play_dead(int *state_addr)
+static void loongson3a_r1_play_dead(int *state_addr)
{
register int val;
register long cpuid, core, node, count;
@@ -502,6 +502,89 @@ static void loongson3a_play_dead(int *state_addr)
: "a1");
}
+static void loongson3a_r2_play_dead(int *state_addr)
+{
+ register int val;
+ register long cpuid, core, node, count;
+ register void *addr, *base, *initfunc;
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "1: cache 0, 0(%[addr]) \n" /* flush L1 ICache */
+ " cache 0, 1(%[addr]) \n"
+ " cache 0, 2(%[addr]) \n"
+ " cache 0, 3(%[addr]) \n"
+ " cache 1, 0(%[addr]) \n" /* flush L1 DCache */
+ " cache 1, 1(%[addr]) \n"
+ " cache 1, 2(%[addr]) \n"
+ " cache 1, 3(%[addr]) \n"
+ " addiu %[sets], %[sets], -1 \n"
+ " bnez %[sets], 1b \n"
+ " addiu %[addr], %[addr], 0x40 \n"
+ " li %[addr], 0x80000000 \n" /* KSEG0 */
+ "2: cache 2, 0(%[addr]) \n" /* flush L1 VCache */
+ " cache 2, 1(%[addr]) \n"
+ " cache 2, 2(%[addr]) \n"
+ " cache 2, 3(%[addr]) \n"
+ " cache 2, 4(%[addr]) \n"
+ " cache 2, 5(%[addr]) \n"
+ " cache 2, 6(%[addr]) \n"
+ " cache 2, 7(%[addr]) \n"
+ " cache 2, 8(%[addr]) \n"
+ " cache 2, 9(%[addr]) \n"
+ " cache 2, 10(%[addr]) \n"
+ " cache 2, 11(%[addr]) \n"
+ " cache 2, 12(%[addr]) \n"
+ " cache 2, 13(%[addr]) \n"
+ " cache 2, 14(%[addr]) \n"
+ " cache 2, 15(%[addr]) \n"
+ " addiu %[vsets], %[vsets], -1 \n"
+ " bnez %[vsets], 2b \n"
+ " addiu %[addr], %[addr], 0x40 \n"
+ " li %[val], 0x7 \n" /* *state_addr = CPU_DEAD; */
+ " sw %[val], (%[state_addr]) \n"
+ " sync \n"
+ " cache 21, (%[state_addr]) \n" /* flush entry of *state_addr */
+ " .set pop \n"
+ : [addr] "=&r" (addr), [val] "=&r" (val)
+ : [state_addr] "r" (state_addr),
+ [sets] "r" (cpu_data[smp_processor_id()].dcache.sets),
+ [vsets] "r" (cpu_data[smp_processor_id()].vcache.sets));
+
+ __asm__ __volatile__(
+ " .set push \n"
+ " .set noreorder \n"
+ " .set mips64 \n"
+ " mfc0 %[cpuid], $15, 1 \n"
+ " andi %[cpuid], 0x3ff \n"
+ " dli %[base], 0x900000003ff01000 \n"
+ " andi %[core], %[cpuid], 0x3 \n"
+ " sll %[core], 8 \n" /* get core id */
+ " or %[base], %[base], %[core] \n"
+ " andi %[node], %[cpuid], 0xc \n"
+ " dsll %[node], 42 \n" /* get node id */
+ " or %[base], %[base], %[node] \n"
+ "1: li %[count], 0x100 \n" /* wait for init loop */
+ "2: bnez %[count], 2b \n" /* limit mailbox access */
+ " addiu %[count], -1 \n"
+ " ld %[initfunc], 0x20(%[base]) \n" /* get PC via mailbox */
+ " beqz %[initfunc], 1b \n"
+ " nop \n"
+ " ld $sp, 0x28(%[base]) \n" /* get SP via mailbox */
+ " ld $gp, 0x30(%[base]) \n" /* get GP via mailbox */
+ " ld $a1, 0x38(%[base]) \n"
+ " jr %[initfunc] \n" /* jump to initial PC */
+ " nop \n"
+ " .set pop \n"
+ : [core] "=&r" (core), [node] "=&r" (node),
+ [base] "=&r" (base), [cpuid] "=&r" (cpuid),
+ [count] "=&r" (count), [initfunc] "=&r" (initfunc)
+ : /* No Input */
+ : "a1");
+}
+
static void loongson3b_play_dead(int *state_addr)
{
register int val;
@@ -573,13 +656,18 @@ void play_dead(void)
void (*play_dead_at_ckseg1)(int *);
idle_task_exit();
- switch (loongson_sysconf.cputype) {
- case Loongson_3A:
+ switch (read_c0_prid() & PRID_REV_MASK) {
+ case PRID_REV_LOONGSON3A_R1:
default:
play_dead_at_ckseg1 =
- (void *)CKSEG1ADDR((unsigned long)loongson3a_play_dead);
+ (void *)CKSEG1ADDR((unsigned long)loongson3a_r1_play_dead);
+ break;
+ case PRID_REV_LOONGSON3A_R2:
+ play_dead_at_ckseg1 =
+ (void *)CKSEG1ADDR((unsigned long)loongson3a_r2_play_dead);
break;
- case Loongson_3B:
+ case PRID_REV_LOONGSON3B_R1:
+ case PRID_REV_LOONGSON3B_R2:
play_dead_at_ckseg1 =
(void *)CKSEG1ADDR((unsigned long)loongson3b_play_dead);
break;
@@ -594,9 +682,9 @@ void loongson3_disable_clock(int cpu)
uint64_t core_id = cpu_data[cpu].core;
uint64_t package_id = cpu_data[cpu].package;
- if (loongson_sysconf.cputype == Loongson_3A) {
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
LOONGSON_CHIPCFG(package_id) &= ~(1 << (12 + core_id));
- } else if (loongson_sysconf.cputype == Loongson_3B) {
+ } else {
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) &= ~(1 << (core_id * 4 + 3));
}
@@ -607,9 +695,9 @@ void loongson3_enable_clock(int cpu)
uint64_t core_id = cpu_data[cpu].core;
uint64_t package_id = cpu_data[cpu].package;
- if (loongson_sysconf.cputype == Loongson_3A) {
+ if ((read_c0_prid() & PRID_REV_MASK) == PRID_REV_LOONGSON3A_R1) {
LOONGSON_CHIPCFG(package_id) |= 1 << (12 + core_id);
- } else if (loongson_sysconf.cputype == Loongson_3B) {
+ } else {
if (!(loongson_sysconf.workarounds & WORKAROUND_CPUHOTPLUG))
LOONGSON_FREQCTRL(package_id) |= 1 << (core_id * 4 + 3);
}
diff --git a/arch/mips/math-emu/Makefile b/arch/mips/math-emu/Makefile
index a19641d3ac23..e9bbc2a6526f 100644
--- a/arch/mips/math-emu/Makefile
+++ b/arch/mips/math-emu/Makefile
@@ -4,9 +4,9 @@
obj-y += cp1emu.o ieee754dp.o ieee754sp.o ieee754.o \
dp_div.o dp_mul.o dp_sub.o dp_add.o dp_fsp.o dp_cmp.o dp_simple.o \
- dp_tint.o dp_fint.o dp_maddf.o dp_msubf.o dp_2008class.o dp_fmin.o dp_fmax.o \
+ dp_tint.o dp_fint.o dp_maddf.o dp_2008class.o dp_fmin.o dp_fmax.o \
sp_div.o sp_mul.o sp_sub.o sp_add.o sp_fdp.o sp_cmp.o sp_simple.o \
- sp_tint.o sp_fint.o sp_maddf.o sp_msubf.o sp_2008class.o sp_fmin.o sp_fmax.o \
+ sp_tint.o sp_fint.o sp_maddf.o sp_2008class.o sp_fmin.o sp_fmax.o \
dsemul.o
lib-y += ieee754d.o \
diff --git a/arch/mips/math-emu/cp1emu.c b/arch/mips/math-emu/cp1emu.c
index cdfd44ffa51c..36775d20b0e7 100644
--- a/arch/mips/math-emu/cp1emu.c
+++ b/arch/mips/math-emu/cp1emu.c
@@ -434,8 +434,8 @@ static int microMIPS32_to_MIPS32(union mips_instruction *insn_ptr)
* a single subroutine should be used across both
* modules.
*/
-static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
- unsigned long *contpc)
+int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
+ unsigned long *contpc)
{
union mips_instruction insn = (union mips_instruction)dec_insn.insn;
unsigned int fcr31;
@@ -445,9 +445,11 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
case spec_op:
switch (insn.r_format.func) {
case jalr_op:
- regs->regs[insn.r_format.rd] =
- regs->cp0_epc + dec_insn.pc_inc +
- dec_insn.next_pc_inc;
+ if (insn.r_format.rd != 0) {
+ regs->regs[insn.r_format.rd] =
+ regs->cp0_epc + dec_insn.pc_inc +
+ dec_insn.next_pc_inc;
+ }
/* Fall through */
case jr_op:
/* For R6, JR already emulated in jalr_op */
@@ -625,8 +627,8 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case cbcond0_op:
- case cbcond1_op:
+ case pop10_op:
+ case pop30_op:
if (!cpu_has_mips_r6)
break;
if (insn.i_format.rt && !insn.i_format.rs)
@@ -681,14 +683,14 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
dec_insn.next_pc_inc;
return 1;
- case beqzcjic_op:
+ case pop66_op:
if (!cpu_has_mips_r6)
break;
*contpc = regs->cp0_epc + dec_insn.pc_inc +
dec_insn.next_pc_inc;
return 1;
- case bnezcjialc_op:
+ case pop76_op:
if (!cpu_has_mips_r6)
break;
if (!insn.i_format.rs)
@@ -782,10 +784,10 @@ static int isBranchInstr(struct pt_regs *regs, struct mm_decoded_insn dec_insn,
*/
static inline int cop1_64bit(struct pt_regs *xcp)
{
- if (config_enabled(CONFIG_64BIT) && !config_enabled(CONFIG_MIPS32_O32))
+ if (IS_ENABLED(CONFIG_64BIT) && !IS_ENABLED(CONFIG_MIPS32_O32))
return 1;
- else if (config_enabled(CONFIG_32BIT) &&
- !config_enabled(CONFIG_MIPS_O32_FP64_SUPPORT))
+ else if (IS_ENABLED(CONFIG_32BIT) &&
+ !IS_ENABLED(CONFIG_MIPS_O32_FP64_SUPPORT))
return 0;
return !test_thread_flag(TIF_32BIT_FPREGS);
@@ -973,9 +975,10 @@ static int cop1Emulate(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
struct mm_decoded_insn dec_insn, void *__user *fault_addr)
{
unsigned long contpc = xcp->cp0_epc + dec_insn.pc_inc;
- unsigned int cond, cbit;
+ unsigned int cond, cbit, bit0;
mips_instruction ir;
int likely, pc_inc;
+ union fpureg *fpr;
u32 __user *wva;
u64 __user *dva;
u32 wval;
@@ -1187,14 +1190,14 @@ emul:
return SIGILL;
cond = likely = 0;
+ fpr = &current->thread.fpu.fpr[MIPSInst_RT(ir)];
+ bit0 = get_fpr32(fpr, 0) & 0x1;
switch (MIPSInst_RS(ir)) {
case bc1eqz_op:
- if (get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1)
- cond = 1;
+ cond = bit0 == 0;
break;
case bc1nez_op:
- if (!(get_fpr32(&current->thread.fpu.fpr[MIPSInst_RT(ir)], 0) & 0x1))
- cond = 1;
+ cond = bit0 != 0;
break;
}
goto branch_common;
@@ -1265,7 +1268,7 @@ branch_common:
* instruction in the dslot.
*/
sig = mips_dsemul(xcp, ir,
- contpc);
+ bcpc, contpc);
if (sig < 0)
break;
if (sig)
@@ -1320,7 +1323,7 @@ branch_common:
* Single step the non-cp1
* instruction in the dslot
*/
- sig = mips_dsemul(xcp, ir, contpc);
+ sig = mips_dsemul(xcp, ir, bcpc, contpc);
if (sig < 0)
break;
if (sig)
@@ -1674,7 +1677,7 @@ static int fpu_emu(struct pt_regs *xcp, struct mips_fpu_struct *ctx,
union ieee754sp(*b) (union ieee754sp, union ieee754sp);
union ieee754sp(*u) (union ieee754sp);
} handler;
- union ieee754sp fs, ft;
+ union ieee754sp fd, fs, ft;
switch (MIPSInst_FUNC(ir)) {
/* binary ops */
@@ -1945,6 +1948,17 @@ copcsr:
rfmt = w_fmt;
goto copcsr;
+ case fsel_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ SPFROMREG(fd, MIPSInst_FD(ir));
+ if (fd.bits & 0x1)
+ SPFROMREG(rv.s, MIPSInst_FT(ir));
+ else
+ SPFROMREG(rv.s, MIPSInst_FS(ir));
+ break;
+
case fcvtl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
@@ -1993,7 +2007,7 @@ copcsr:
}
case d_fmt: {
- union ieee754dp fs, ft;
+ union ieee754dp fd, fs, ft;
union {
union ieee754dp(*b) (union ieee754dp, union ieee754dp);
union ieee754dp(*u) (union ieee754dp);
@@ -2243,6 +2257,17 @@ dcopuop:
rfmt = w_fmt;
goto copcsr;
+ case fsel_op:
+ if (!cpu_has_mips_r6)
+ return SIGILL;
+
+ DPFROMREG(fd, MIPSInst_FD(ir));
+ if (fd.bits & 0x1)
+ DPFROMREG(rv.d, MIPSInst_FT(ir));
+ else
+ DPFROMREG(rv.d, MIPSInst_FS(ir));
+ break;
+
case fcvtl_op:
if (!cpu_has_mips_3_4_5_64_r2_r6)
return SIGILL;
diff --git a/arch/mips/math-emu/dp_maddf.c b/arch/mips/math-emu/dp_maddf.c
index 119eda9fa1ea..4a2d03c72959 100644
--- a/arch/mips/math-emu/dp_maddf.c
+++ b/arch/mips/math-emu/dp_maddf.c
@@ -14,8 +14,12 @@
#include "ieee754dp.h"
-union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
- union ieee754dp y)
+enum maddf_flags {
+ maddf_negate_product = 1 << 0,
+};
+
+static union ieee754dp _dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y, enum maddf_flags flags)
{
int re;
int rs;
@@ -32,16 +36,15 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
COMPXDP;
COMPYDP;
-
- u64 zm; int ze; int zs __maybe_unused; int zc;
+ COMPZDP;
EXPLODEXDP;
EXPLODEYDP;
- EXPLODEDP(z, zc, zs, ze, zm)
+ EXPLODEZDP;
FLUSHXDP;
FLUSHYDP;
- FLUSHDP(z, zc, zs, ze, zm);
+ FLUSHZDP;
ieee754_clearcx();
@@ -50,7 +53,7 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754dp_nanxcpt(z);
case IEEE754_CLASS_DNORM:
- DPDNORMx(zm, ze);
+ DPDNORMZ;
/* QNAN is handled separately below */
}
@@ -154,13 +157,15 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
re = xe + ye;
rs = xs ^ ys;
+ if (flags & maddf_negate_product)
+ rs ^= 1;
/* shunt to top of word */
xm <<= 64 - (DP_FBITS + 1);
ym <<= 64 - (DP_FBITS + 1);
/*
- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+ * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
*/
/* 32 * 32 => 64 */
@@ -198,7 +203,7 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
if ((s64) rm < 0) {
rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
((rm << (DP_FBITS + 1 + 3)) != 0);
- re++;
+ re++;
} else {
rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
@@ -263,3 +268,15 @@ union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
return ieee754dp_format(zs, ze, zm);
}
+
+union ieee754dp ieee754dp_maddf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, 0);
+}
+
+union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
+ union ieee754dp y)
+{
+ return _dp_maddf(z, x, y, maddf_negate_product);
+}
diff --git a/arch/mips/math-emu/dp_msubf.c b/arch/mips/math-emu/dp_msubf.c
deleted file mode 100644
index 12241262f856..000000000000
--- a/arch/mips/math-emu/dp_msubf.c
+++ /dev/null
@@ -1,269 +0,0 @@
-/*
- * IEEE754 floating point arithmetic
- * double precision: MSUB.f (Fused Multiply Subtract)
- * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
- *
- * MIPS floating point support
- * Copyright (C) 2015 Imagination Technologies, Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include "ieee754dp.h"
-
-union ieee754dp ieee754dp_msubf(union ieee754dp z, union ieee754dp x,
- union ieee754dp y)
-{
- int re;
- int rs;
- u64 rm;
- unsigned lxm;
- unsigned hxm;
- unsigned lym;
- unsigned hym;
- u64 lrm;
- u64 hrm;
- u64 t;
- u64 at;
- int s;
-
- COMPXDP;
- COMPYDP;
-
- u64 zm; int ze; int zs __maybe_unused; int zc;
-
- EXPLODEXDP;
- EXPLODEYDP;
- EXPLODEDP(z, zc, zs, ze, zm)
-
- FLUSHXDP;
- FLUSHYDP;
- FLUSHDP(z, zc, zs, ze, zm);
-
- ieee754_clearcx();
-
- switch (zc) {
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_nanxcpt(z);
- case IEEE754_CLASS_DNORM:
- DPDNORMx(zm, ze);
- /* QNAN is handled separately below */
- }
-
- switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
- return ieee754dp_nanxcpt(y);
-
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- return ieee754dp_nanxcpt(x);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
- return y;
-
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
- return x;
-
-
- /*
- * Infinity handling
- */
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754dp_indef();
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- return ieee754dp_inf(xs ^ ys);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- if (zc == IEEE754_CLASS_INF)
- return ieee754dp_inf(zs);
- /* Multiplication is 0 so just return z */
- return z;
-
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
- DPDNORMX;
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754dp_inf(zs);
- DPDNORMY;
- break;
-
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754dp_inf(zs);
- DPDNORMX;
- break;
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754dp_inf(zs);
- /* fall through to real computations */
- }
-
- /* Finally get to do some computation */
-
- /*
- * Do the multiplication bit first
- *
- * rm = xm * ym, re = xe + ye basically
- *
- * At this point xm and ym should have been normalized.
- */
- assert(xm & DP_HIDDEN_BIT);
- assert(ym & DP_HIDDEN_BIT);
-
- re = xe + ye;
- rs = xs ^ ys;
-
- /* shunt to top of word */
- xm <<= 64 - (DP_FBITS + 1);
- ym <<= 64 - (DP_FBITS + 1);
-
- /*
- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
- */
-
- /* 32 * 32 => 64 */
-#define DPXMULT(x, y) ((u64)(x) * (u64)y)
-
- lxm = xm;
- hxm = xm >> 32;
- lym = ym;
- hym = ym >> 32;
-
- lrm = DPXMULT(lxm, lym);
- hrm = DPXMULT(hxm, hym);
-
- t = DPXMULT(lxm, hym);
-
- at = lrm + (t << 32);
- hrm += at < lrm;
- lrm = at;
-
- hrm = hrm + (t >> 32);
-
- t = DPXMULT(hxm, lym);
-
- at = lrm + (t << 32);
- hrm += at < lrm;
- lrm = at;
-
- hrm = hrm + (t >> 32);
-
- rm = hrm | (lrm != 0);
-
- /*
- * Sticky shift down to normal rounding precision.
- */
- if ((s64) rm < 0) {
- rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
- ((rm << (DP_FBITS + 1 + 3)) != 0);
- re++;
- } else {
- rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
- ((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
- }
- assert(rm & (DP_HIDDEN_BIT << 3));
-
- /* And now the subtraction */
-
- /* flip sign of r and handle as add */
- rs ^= 1;
-
- assert(zm & DP_HIDDEN_BIT);
-
- /*
- * Provide guard,round and stick bit space.
- */
- zm <<= 3;
-
- if (ze > re) {
- /*
- * Have to shift y fraction right to align.
- */
- s = ze - re;
- rm = XDPSRS(rm, s);
- re += s;
- } else if (re > ze) {
- /*
- * Have to shift x fraction right to align.
- */
- s = re - ze;
- zm = XDPSRS(zm, s);
- ze += s;
- }
- assert(ze == re);
- assert(ze <= DP_EMAX);
-
- if (zs == rs) {
- /*
- * Generate 28 bit result of adding two 27 bit numbers
- * leaving result in xm, xs and xe.
- */
- zm = zm + rm;
-
- if (zm >> (DP_FBITS + 1 + 3)) { /* carry out */
- zm = XDPSRS1(zm);
- ze++;
- }
- } else {
- if (zm >= rm) {
- zm = zm - rm;
- } else {
- zm = rm - zm;
- zs = rs;
- }
- if (zm == 0)
- return ieee754dp_zero(ieee754_csr.rm == FPU_CSR_RD);
-
- /*
- * Normalize to rounding precision.
- */
- while ((zm >> (DP_FBITS + 3)) == 0) {
- zm <<= 1;
- ze--;
- }
- }
-
- return ieee754dp_format(zs, ze, zm);
-}
diff --git a/arch/mips/math-emu/dp_mul.c b/arch/mips/math-emu/dp_mul.c
index d0901f03fa19..87d0b44b0614 100644
--- a/arch/mips/math-emu/dp_mul.c
+++ b/arch/mips/math-emu/dp_mul.c
@@ -125,7 +125,7 @@ union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
ym <<= 64 - (DP_FBITS + 1);
/*
- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
+ * Multiply 64 bits xm, ym to give high 64 bits rm with stickness.
*/
/* 32 * 32 => 64 */
@@ -163,7 +163,7 @@ union ieee754dp ieee754dp_mul(union ieee754dp x, union ieee754dp y)
if ((s64) rm < 0) {
rm = (rm >> (64 - (DP_FBITS + 1 + 3))) |
((rm << (DP_FBITS + 1 + 3)) != 0);
- re++;
+ re++;
} else {
rm = (rm >> (64 - (DP_FBITS + 1 + 3 + 1))) |
((rm << (DP_FBITS + 1 + 3 + 1)) != 0);
diff --git a/arch/mips/math-emu/dsemul.c b/arch/mips/math-emu/dsemul.c
index 46b964d2b79c..72a4642eee2c 100644
--- a/arch/mips/math-emu/dsemul.c
+++ b/arch/mips/math-emu/dsemul.c
@@ -1,3 +1,6 @@
+#include <linux/err.h>
+#include <linux/slab.h>
+
#include <asm/branch.h>
#include <asm/cacheflush.h>
#include <asm/fpu_emulator.h>
@@ -5,43 +8,211 @@
#include <asm/mipsregs.h>
#include <asm/uaccess.h>
-#include "ieee754.h"
-
-/*
- * Emulate the arbritrary instruction ir at xcp->cp0_epc. Required when
- * we have to emulate the instruction in a COP1 branch delay slot. Do
- * not change cp0_epc due to the instruction
+/**
+ * struct emuframe - The 'emulation' frame structure
+ * @emul: The instruction to 'emulate'.
+ * @badinst: A break instruction to cause a return to the kernel.
*
- * According to the spec:
- * 1) it shouldn't be a branch :-)
- * 2) it can be a COP instruction :-(
- * 3) if we are tring to run a protected memory space we must take
- * special care on memory access instructions :-(
- */
-
-/*
- * "Trampoline" return routine to catch exception following
- * execution of delay-slot instruction execution.
+ * This structure defines the frames placed within the delay slot emulation
+ * page in response to a call to mips_dsemul(). Each thread may be allocated
+ * only one frame at any given time. The kernel stores within it the
+ * instruction to be 'emulated' followed by a break instruction, then
+ * executes the frame in user mode. The break causes a trap to the kernel
+ * which leads to do_dsemulret() being called unless the instruction in
+ * @emul causes a trap itself, is a branch, or a signal is delivered to
+ * the thread. In these cases the allocated frame will either be reused by
+ * a subsequent delay slot 'emulation', or be freed during signal delivery or
+ * upon thread exit.
+ *
+ * This approach is used because:
+ *
+ * - Actually emulating all instructions isn't feasible. We would need to
+ * be able to handle instructions from all revisions of the MIPS ISA,
+ * all ASEs & all vendor instruction set extensions. This would be a
+ * whole lot of work & continual maintenance burden as new instructions
+ * are introduced, and in the case of some vendor extensions may not
+ * even be possible. Thus we need to take the approach of actually
+ * executing the instruction.
+ *
+ * - We must execute the instruction within user context. If we were to
+ * execute the instruction in kernel mode then it would have access to
+ * kernel resources without very careful checks, leaving us with a
+ * high potential for security or stability issues to arise.
+ *
+ * - We used to place the frame on the users stack, but this requires
+ * that the stack be executable. This is bad for security so the
+ * per-process page is now used instead.
+ *
+ * - The instruction in @emul may be something entirely invalid for a
+ * delay slot. The user may (intentionally or otherwise) place a branch
+ * in a delay slot, or a kernel mode instruction, or something else
+ * which generates an exception. Thus we can't rely upon the break in
+ * @badinst always being hit. For this reason we track the index of the
+ * frame allocated to each thread, allowing us to clean it up at later
+ * points such as signal delivery or thread exit.
+ *
+ * - The user may generate a fake struct emuframe if they wish, invoking
+ * the BRK_MEMU break instruction themselves. We must therefore not
+ * trust that BRK_MEMU means there's actually a valid frame allocated
+ * to the thread, and must not allow the user to do anything they
+ * couldn't already.
*/
-
struct emuframe {
mips_instruction emul;
mips_instruction badinst;
- mips_instruction cookie;
- unsigned long epc;
};
-/*
- * Set up an emulation frame for instruction IR, from a delay slot of
- * a branch jumping to CPC. Return 0 if successful, -1 if no emulation
- * required, otherwise a signal number causing a frame setup failure.
- */
-int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
+static const int emupage_frame_count = PAGE_SIZE / sizeof(struct emuframe);
+
+static inline __user struct emuframe *dsemul_page(void)
+{
+ return (__user struct emuframe *)STACK_TOP;
+}
+
+static int alloc_emuframe(void)
+{
+ mm_context_t *mm_ctx = &current->mm->context;
+ int idx;
+
+retry:
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ /* Ensure we have an allocation bitmap */
+ if (!mm_ctx->bd_emupage_allocmap) {
+ mm_ctx->bd_emupage_allocmap =
+ kcalloc(BITS_TO_LONGS(emupage_frame_count),
+ sizeof(unsigned long),
+ GFP_ATOMIC);
+
+ if (!mm_ctx->bd_emupage_allocmap) {
+ idx = BD_EMUFRAME_NONE;
+ goto out_unlock;
+ }
+ }
+
+ /* Attempt to allocate a single bit/frame */
+ idx = bitmap_find_free_region(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count, 0);
+ if (idx < 0) {
+ /*
+ * Failed to allocate a frame. We'll wait until one becomes
+ * available. We unlock the page so that other threads actually
+ * get the opportunity to free their frames, which means
+ * technically the result of bitmap_full may be incorrect.
+ * However the worst case is that we repeat all this and end up
+ * back here again.
+ */
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ if (!wait_event_killable(mm_ctx->bd_emupage_queue,
+ !bitmap_full(mm_ctx->bd_emupage_allocmap,
+ emupage_frame_count)))
+ goto retry;
+
+ /* Received a fatal signal - just give in */
+ return BD_EMUFRAME_NONE;
+ }
+
+ /* Success! */
+ pr_debug("allocate emuframe %d to %d\n", idx, current->pid);
+out_unlock:
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+ return idx;
+}
+
+static void free_emuframe(int idx, struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ spin_lock(&mm_ctx->bd_emupage_lock);
+
+ pr_debug("free emuframe %d from %d\n", idx, current->pid);
+ bitmap_clear(mm_ctx->bd_emupage_allocmap, idx, 1);
+
+ /* If some thread is waiting for a frame, now's its chance */
+ wake_up(&mm_ctx->bd_emupage_queue);
+
+ spin_unlock(&mm_ctx->bd_emupage_lock);
+}
+
+static bool within_emuframe(struct pt_regs *regs)
+{
+ unsigned long base = (unsigned long)dsemul_page();
+
+ if (regs->cp0_epc < base)
+ return false;
+ if (regs->cp0_epc >= (base + PAGE_SIZE))
+ return false;
+
+ return true;
+}
+
+bool dsemul_thread_cleanup(struct task_struct *tsk)
+{
+ int fr_idx;
+
+ /* Clear any allocated frame, retrieving its index */
+ fr_idx = atomic_xchg(&tsk->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+
+ /* If no frame was allocated, we're done */
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+
+ task_lock(tsk);
+
+ /* Free the frame that this thread had allocated */
+ if (tsk->mm)
+ free_emuframe(fr_idx, tsk->mm);
+
+ task_unlock(tsk);
+ return true;
+}
+
+bool dsemul_thread_rollback(struct pt_regs *regs)
+{
+ struct emuframe __user *fr;
+ int fr_idx;
+
+ /* Do nothing if we're not executing from a frame */
+ if (!within_emuframe(regs))
+ return false;
+
+ /* Find the frame being executed */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ return false;
+ fr = &dsemul_page()[fr_idx];
+
+ /*
+ * If the PC is at the emul instruction, roll back to the branch. If
+ * PC is at the badinst (break) instruction, we've already emulated the
+ * instruction so progress to the continue PC. If it's anything else
+ * then something is amiss & the user has branched into some other area
+ * of the emupage - we'll free the allocated frame anyway.
+ */
+ if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->emul)
+ regs->cp0_epc = current->thread.bd_emu_branch_pc;
+ else if (msk_isa16_mode(regs->cp0_epc) == (unsigned long)&fr->badinst)
+ regs->cp0_epc = current->thread.bd_emu_cont_pc;
+
+ atomic_set(&current->thread.bd_emu_frame, BD_EMUFRAME_NONE);
+ free_emuframe(fr_idx, current->mm);
+ return true;
+}
+
+void dsemul_mm_cleanup(struct mm_struct *mm)
+{
+ mm_context_t *mm_ctx = &mm->context;
+
+ kfree(mm_ctx->bd_emupage_allocmap);
+}
+
+int mips_dsemul(struct pt_regs *regs, mips_instruction ir,
+ unsigned long branch_pc, unsigned long cont_pc)
{
int isa16 = get_isa16_mode(regs->cp0_epc);
mips_instruction break_math;
struct emuframe __user *fr;
- int err;
+ int err, fr_idx;
/* NOP is easy */
if (ir == 0)
@@ -60,7 +231,7 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
unsigned int rs;
s32 v;
- rs = (((insn.mm_a_format.rs + 0x1e) & 0xf) + 2);
+ rs = (((insn.mm_a_format.rs + 0xe) & 0xf) + 2);
v = regs->cp0_epc & ~3;
v += insn.mm_a_format.simmediate << 2;
regs->regs[rs] = (long)v;
@@ -68,30 +239,20 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
}
}
- pr_debug("dsemul %lx %lx\n", regs->cp0_epc, cpc);
+ pr_debug("dsemul 0x%08lx cont at 0x%08lx\n", regs->cp0_epc, cont_pc);
- /*
- * The strategy is to push the instruction onto the user stack
- * and put a trap after it which we can catch and jump to
- * the required address any alternative apart from full
- * instruction emulation!!.
- *
- * Algorithmics used a system call instruction, and
- * borrowed that vector. MIPS/Linux version is a bit
- * more heavyweight in the interests of portability and
- * multiprocessor support. For Linux we use a BREAK 514
- * instruction causing a breakpoint exception.
- */
- break_math = BREAK_MATH(isa16);
-
- /* Ensure that the two instructions are in the same cache line */
- fr = (struct emuframe __user *)
- ((regs->regs[29] - sizeof(struct emuframe)) & ~0x7);
-
- /* Verify that the stack pointer is not competely insane */
- if (unlikely(!access_ok(VERIFY_WRITE, fr, sizeof(struct emuframe))))
+ /* Allocate a frame if we don't already have one */
+ fr_idx = atomic_read(&current->thread.bd_emu_frame);
+ if (fr_idx == BD_EMUFRAME_NONE)
+ fr_idx = alloc_emuframe();
+ if (fr_idx == BD_EMUFRAME_NONE)
return SIGBUS;
+ fr = &dsemul_page()[fr_idx];
+
+ /* Retrieve the appropriately encoded break instruction */
+ break_math = BREAK_MATH(isa16);
+ /* Write the instructions to the frame */
if (isa16) {
err = __put_user(ir >> 16,
(u16 __user *)(&fr->emul));
@@ -106,84 +267,36 @@ int mips_dsemul(struct pt_regs *regs, mips_instruction ir, unsigned long cpc)
err |= __put_user(break_math, &fr->badinst);
}
- err |= __put_user((mips_instruction)BD_COOKIE, &fr->cookie);
- err |= __put_user(cpc, &fr->epc);
-
if (unlikely(err)) {
MIPS_FPU_EMU_INC_STATS(errors);
+ free_emuframe(fr_idx, current->mm);
return SIGBUS;
}
+ /* Record the PC of the branch, PC to continue from & frame index */
+ current->thread.bd_emu_branch_pc = branch_pc;
+ current->thread.bd_emu_cont_pc = cont_pc;
+ atomic_set(&current->thread.bd_emu_frame, fr_idx);
+
+ /* Change user register context to execute the frame */
regs->cp0_epc = (unsigned long)&fr->emul | isa16;
+ /* Ensure the icache observes our newly written frame */
flush_cache_sigtramp((unsigned long)&fr->emul);
return 0;
}
-int do_dsemulret(struct pt_regs *xcp)
+bool do_dsemulret(struct pt_regs *xcp)
{
- int isa16 = get_isa16_mode(xcp->cp0_epc);
- struct emuframe __user *fr;
- unsigned long epc;
- u32 insn, cookie;
- int err = 0;
- u16 instr[2];
-
- fr = (struct emuframe __user *)
- (msk_isa16_mode(xcp->cp0_epc) - sizeof(mips_instruction));
-
- /*
- * If we can't even access the area, something is very wrong, but we'll
- * leave that to the default handling
- */
- if (!access_ok(VERIFY_READ, fr, sizeof(struct emuframe)))
- return 0;
-
- /*
- * Do some sanity checking on the stackframe:
- *
- * - Is the instruction pointed to by the EPC an BREAK_MATH?
- * - Is the following memory word the BD_COOKIE?
- */
- if (isa16) {
- err = __get_user(instr[0],
- (u16 __user *)(&fr->badinst));
- err |= __get_user(instr[1],
- (u16 __user *)((long)(&fr->badinst) + 2));
- insn = (instr[0] << 16) | instr[1];
- } else {
- err = __get_user(insn, &fr->badinst);
- }
- err |= __get_user(cookie, &fr->cookie);
-
- if (unlikely(err ||
- insn != BREAK_MATH(isa16) || cookie != BD_COOKIE)) {
+ /* Cleanup the allocated frame, returning if there wasn't one */
+ if (!dsemul_thread_cleanup(current)) {
MIPS_FPU_EMU_INC_STATS(errors);
- return 0;
- }
-
- /*
- * At this point, we are satisfied that it's a BD emulation trap. Yes,
- * a user might have deliberately put two malformed and useless
- * instructions in a row in his program, in which case he's in for a
- * nasty surprise - the next instruction will be treated as a
- * continuation address! Alas, this seems to be the only way that we
- * can handle signals, recursion, and longjmps() in the context of
- * emulating the branch delay instruction.
- */
-
- pr_debug("dsemulret\n");
-
- if (__get_user(epc, &fr->epc)) { /* Saved EPC */
- /* This is not a good situation to be in */
- force_sig(SIGBUS, current);
-
- return 0;
+ return false;
}
/* Set EPC to return to post-branch instruction */
- xcp->cp0_epc = epc;
- MIPS_FPU_EMU_INC_STATS(ds_emul);
- return 1;
+ xcp->cp0_epc = current->thread.bd_emu_cont_pc;
+ pr_debug("dsemulret to 0x%08lx\n", xcp->cp0_epc);
+ return true;
}
diff --git a/arch/mips/math-emu/ieee754dp.c b/arch/mips/math-emu/ieee754dp.c
index ad3c73436777..465a0342ed4c 100644
--- a/arch/mips/math-emu/ieee754dp.c
+++ b/arch/mips/math-emu/ieee754dp.c
@@ -54,10 +54,13 @@ union ieee754dp __cold ieee754dp_nanxcpt(union ieee754dp r)
assert(ieee754dp_issnan(r));
ieee754_setcx(IEEE754_INVALID_OPERATION);
- if (ieee754_csr.nan2008)
+ if (ieee754_csr.nan2008) {
DPMANT(r) |= DP_MBIT(DP_FBITS - 1);
- else
- r = ieee754dp_indef();
+ } else {
+ DPMANT(r) &= ~DP_MBIT(DP_FBITS - 1);
+ if (!ieee754dp_isnan(r))
+ DPMANT(r) |= DP_MBIT(DP_FBITS - 2);
+ }
return r;
}
@@ -97,7 +100,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
- assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no execess */
+ assert((xm >> (DP_FBITS + 1 + 3)) == 0); /* no excess */
assert(xm & (DP_HIDDEN_BIT << 3));
if (xe < DP_EMIN) {
@@ -165,7 +168,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
/* strip grs bits */
xm >>= 3;
- assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
+ assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xe >= DP_EMIN);
if (xe > DP_EMAX) {
@@ -198,7 +201,7 @@ union ieee754dp ieee754dp_format(int sn, int xe, u64 xm)
ieee754_setcx(IEEE754_UNDERFLOW);
return builddp(sn, DP_EMIN - 1 + DP_EBIAS, xm);
} else {
- assert((xm >> (DP_FBITS + 1)) == 0); /* no execess */
+ assert((xm >> (DP_FBITS + 1)) == 0); /* no excess */
assert(xm & DP_HIDDEN_BIT);
return builddp(sn, xe + DP_EBIAS, xm & ~DP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754dp.h b/arch/mips/math-emu/ieee754dp.h
index e2babd98fee3..9ba023004eb6 100644
--- a/arch/mips/math-emu/ieee754dp.h
+++ b/arch/mips/math-emu/ieee754dp.h
@@ -60,6 +60,7 @@ static inline int ieee754dp_finite(union ieee754dp x)
while ((m >> DP_FBITS) == 0) { m <<= 1; e--; }
#define DPDNORMX DPDNORMx(xm, xe)
#define DPDNORMY DPDNORMx(ym, ye)
+#define DPDNORMZ DPDNORMx(zm, ze)
static inline union ieee754dp builddp(int s, int bx, u64 m)
{
diff --git a/arch/mips/math-emu/ieee754int.h b/arch/mips/math-emu/ieee754int.h
index ed7bb277b3e0..8bc2f6963324 100644
--- a/arch/mips/math-emu/ieee754int.h
+++ b/arch/mips/math-emu/ieee754int.h
@@ -55,6 +55,9 @@ static inline int ieee754_class_nan(int xc)
#define COMPYSP \
unsigned ym; int ye; int ys; int yc
+#define COMPZSP \
+ unsigned zm; int ze; int zs; int zc
+
#define EXPLODESP(v, vc, vs, ve, vm) \
{ \
vs = SPSIGN(v); \
@@ -81,6 +84,7 @@ static inline int ieee754_class_nan(int xc)
}
#define EXPLODEXSP EXPLODESP(x, xc, xs, xe, xm)
#define EXPLODEYSP EXPLODESP(y, yc, ys, ye, ym)
+#define EXPLODEZSP EXPLODESP(z, zc, zs, ze, zm)
#define COMPXDP \
@@ -89,6 +93,9 @@ static inline int ieee754_class_nan(int xc)
#define COMPYDP \
u64 ym; int ye; int ys; int yc
+#define COMPZDP \
+ u64 zm; int ze; int zs; int zc
+
#define EXPLODEDP(v, vc, vs, ve, vm) \
{ \
vm = DPMANT(v); \
@@ -115,6 +122,7 @@ static inline int ieee754_class_nan(int xc)
}
#define EXPLODEXDP EXPLODEDP(x, xc, xs, xe, xm)
#define EXPLODEYDP EXPLODEDP(y, yc, ys, ye, ym)
+#define EXPLODEZDP EXPLODEDP(z, zc, zs, ze, zm)
#define FLUSHDP(v, vc, vs, ve, vm) \
if (vc==IEEE754_CLASS_DNORM) { \
@@ -140,7 +148,9 @@ static inline int ieee754_class_nan(int xc)
#define FLUSHXDP FLUSHDP(x, xc, xs, xe, xm)
#define FLUSHYDP FLUSHDP(y, yc, ys, ye, ym)
+#define FLUSHZDP FLUSHDP(z, zc, zs, ze, zm)
#define FLUSHXSP FLUSHSP(x, xc, xs, xe, xm)
#define FLUSHYSP FLUSHSP(y, yc, ys, ye, ym)
+#define FLUSHZSP FLUSHSP(z, zc, zs, ze, zm)
#endif /* __IEEE754INT_H */
diff --git a/arch/mips/math-emu/ieee754sp.c b/arch/mips/math-emu/ieee754sp.c
index def00ffc50fc..260e68965907 100644
--- a/arch/mips/math-emu/ieee754sp.c
+++ b/arch/mips/math-emu/ieee754sp.c
@@ -54,10 +54,13 @@ union ieee754sp __cold ieee754sp_nanxcpt(union ieee754sp r)
assert(ieee754sp_issnan(r));
ieee754_setcx(IEEE754_INVALID_OPERATION);
- if (ieee754_csr.nan2008)
+ if (ieee754_csr.nan2008) {
SPMANT(r) |= SP_MBIT(SP_FBITS - 1);
- else
- r = ieee754sp_indef();
+ } else {
+ SPMANT(r) &= ~SP_MBIT(SP_FBITS - 1);
+ if (!ieee754sp_isnan(r))
+ SPMANT(r) |= SP_MBIT(SP_FBITS - 2);
+ }
return r;
}
@@ -97,7 +100,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
{
assert(xm); /* we don't gen exact zeros (probably should) */
- assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no execess */
+ assert((xm >> (SP_FBITS + 1 + 3)) == 0); /* no excess */
assert(xm & (SP_HIDDEN_BIT << 3));
if (xe < SP_EMIN) {
@@ -138,7 +141,8 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
} else {
/* sticky right shift es bits
*/
- SPXSRSXn(es);
+ xm = XSPSRS(xm, es);
+ xe += es;
assert((xm & (SP_HIDDEN_BIT << 3)) == 0);
assert(xe == SP_EMIN);
}
@@ -163,7 +167,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
/* strip grs bits */
xm >>= 3;
- assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
+ assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
assert(xe >= SP_EMIN);
if (xe > SP_EMAX) {
@@ -196,7 +200,7 @@ union ieee754sp ieee754sp_format(int sn, int xe, unsigned xm)
ieee754_setcx(IEEE754_UNDERFLOW);
return buildsp(sn, SP_EMIN - 1 + SP_EBIAS, xm);
} else {
- assert((xm >> (SP_FBITS + 1)) == 0); /* no execess */
+ assert((xm >> (SP_FBITS + 1)) == 0); /* no excess */
assert(xm & SP_HIDDEN_BIT);
return buildsp(sn, xe + SP_EBIAS, xm & ~SP_HIDDEN_BIT);
diff --git a/arch/mips/math-emu/ieee754sp.h b/arch/mips/math-emu/ieee754sp.h
index 374a3f00a589..8476067075fe 100644
--- a/arch/mips/math-emu/ieee754sp.h
+++ b/arch/mips/math-emu/ieee754sp.h
@@ -46,25 +46,24 @@ static inline int ieee754sp_finite(union ieee754sp x)
}
/* 3bit extended single precision sticky right shift */
-#define SPXSRSXn(rs) \
- (xe += rs, \
- xm = (rs > (SP_FBITS+3))?1:((xm) >> (rs)) | ((xm) << (32-(rs)) != 0))
+#define XSPSRS(v, rs) \
+ ((rs > (SP_FBITS+3))?1:((v) >> (rs)) | ((v) << (32-(rs)) != 0))
-#define SPXSRSX1() \
- (xe++, (xm = (xm >> 1) | (xm & 1)))
+#define XSPSRS1(m) \
+ ((m >> 1) | (m & 1))
-#define SPXSRSYn(rs) \
- (ye+=rs, \
- ym = (rs > (SP_FBITS+3))?1:((ym) >> (rs)) | ((ym) << (32-(rs)) != 0))
+#define SPXSRSX1() \
+ (xe++, (xm = XSPSRS1(xm)))
#define SPXSRSY1() \
- (ye++, (ym = (ym >> 1) | (ym & 1)))
+ (ye++, (ym = XSPSRS1(ym)))
/* convert denormal to normalized with extended exponent */
#define SPDNORMx(m,e) \
while ((m >> SP_FBITS) == 0) { m <<= 1; e--; }
#define SPDNORMX SPDNORMx(xm, xe)
#define SPDNORMY SPDNORMx(ym, ye)
+#define SPDNORMZ SPDNORMx(zm, ze)
static inline union ieee754sp buildsp(int s, int bx, unsigned m)
{
diff --git a/arch/mips/math-emu/sp_add.c b/arch/mips/math-emu/sp_add.c
index f1c87b07d3b4..c55c0c00bca8 100644
--- a/arch/mips/math-emu/sp_add.c
+++ b/arch/mips/math-emu/sp_add.c
@@ -132,13 +132,15 @@ union ieee754sp ieee754sp_add(union ieee754sp x, union ieee754sp y)
* Have to shift y fraction right to align.
*/
s = xe - ye;
- SPXSRSYn(s);
+ ym = XSPSRS(ym, s);
+ ye += s;
} else if (ye > xe) {
/*
* Have to shift x fraction right to align.
*/
s = ye - xe;
- SPXSRSXn(s);
+ xm = XSPSRS(xm, s);
+ xe += s;
}
assert(xe == ye);
assert(xe <= SP_EMAX);
diff --git a/arch/mips/math-emu/sp_maddf.c b/arch/mips/math-emu/sp_maddf.c
index dd1dd83e34eb..a8cd8b4f235e 100644
--- a/arch/mips/math-emu/sp_maddf.c
+++ b/arch/mips/math-emu/sp_maddf.c
@@ -14,8 +14,12 @@
#include "ieee754sp.h"
-union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
- union ieee754sp y)
+enum maddf_flags {
+ maddf_negate_product = 1 << 0,
+};
+
+static union ieee754sp _sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y, enum maddf_flags flags)
{
int re;
int rs;
@@ -32,15 +36,15 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
COMPXSP;
COMPYSP;
- u32 zm; int ze; int zs __maybe_unused; int zc;
+ COMPZSP;
EXPLODEXSP;
EXPLODEYSP;
- EXPLODESP(z, zc, zs, ze, zm)
+ EXPLODEZSP;
FLUSHXSP;
FLUSHYSP;
- FLUSHSP(z, zc, zs, ze, zm);
+ FLUSHZSP;
ieee754_clearcx();
@@ -49,7 +53,7 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
ieee754_setcx(IEEE754_INVALID_OPERATION);
return ieee754sp_nanxcpt(z);
case IEEE754_CLASS_DNORM:
- SPDNORMx(zm, ze);
+ SPDNORMZ;
/* QNAN is handled separately below */
}
@@ -154,6 +158,8 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
re = xe + ye;
rs = xs ^ ys;
+ if (flags & maddf_negate_product)
+ rs ^= 1;
/* shunt to top of word */
xm <<= 32 - (SP_FBITS + 1);
@@ -208,16 +214,18 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
if (ze > re) {
/*
- * Have to shift y fraction right to align.
+ * Have to shift r fraction right to align.
*/
s = ze - re;
- SPXSRSYn(s);
+ rm = XSPSRS(rm, s);
+ re += s;
} else if (re > ze) {
/*
- * Have to shift x fraction right to align.
+ * Have to shift z fraction right to align.
*/
s = re - ze;
- SPXSRSYn(s);
+ zm = XSPSRS(zm, s);
+ ze += s;
}
assert(ze == re);
assert(ze <= SP_EMAX);
@@ -230,7 +238,8 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
zm = zm + rm;
if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
- SPXSRSX1();
+ zm = XSPSRS1(zm);
+ ze++;
}
} else {
if (zm >= rm) {
@@ -253,3 +262,15 @@ union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
}
return ieee754sp_format(zs, ze, zm);
}
+
+union ieee754sp ieee754sp_maddf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, 0);
+}
+
+union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
+ union ieee754sp y)
+{
+ return _sp_maddf(z, x, y, maddf_negate_product);
+}
diff --git a/arch/mips/math-emu/sp_msubf.c b/arch/mips/math-emu/sp_msubf.c
deleted file mode 100644
index 81c38b980d69..000000000000
--- a/arch/mips/math-emu/sp_msubf.c
+++ /dev/null
@@ -1,258 +0,0 @@
-/*
- * IEEE754 floating point arithmetic
- * single precision: MSUB.f (Fused Multiply Subtract)
- * MSUBF.fmt: FPR[fd] = FPR[fd] - (FPR[fs] x FPR[ft])
- *
- * MIPS floating point support
- * Copyright (C) 2015 Imagination Technologies, Ltd.
- * Author: Markos Chandras <markos.chandras@imgtec.com>
- *
- * This program is free software; you can distribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; version 2 of the License.
- */
-
-#include "ieee754sp.h"
-
-union ieee754sp ieee754sp_msubf(union ieee754sp z, union ieee754sp x,
- union ieee754sp y)
-{
- int re;
- int rs;
- unsigned rm;
- unsigned short lxm;
- unsigned short hxm;
- unsigned short lym;
- unsigned short hym;
- unsigned lrm;
- unsigned hrm;
- unsigned t;
- unsigned at;
- int s;
-
- COMPXSP;
- COMPYSP;
- u32 zm; int ze; int zs __maybe_unused; int zc;
-
- EXPLODEXSP;
- EXPLODEYSP;
- EXPLODESP(z, zc, zs, ze, zm)
-
- FLUSHXSP;
- FLUSHYSP;
- FLUSHSP(z, zc, zs, ze, zm);
-
- ieee754_clearcx();
-
- switch (zc) {
- case IEEE754_CLASS_SNAN:
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_nanxcpt(z);
- case IEEE754_CLASS_DNORM:
- SPDNORMx(zm, ze);
- /* QNAN is handled separately below */
- }
-
- switch (CLPAIR(xc, yc)) {
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_SNAN):
- return ieee754sp_nanxcpt(y);
-
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_SNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_SNAN, IEEE754_CLASS_INF):
- return ieee754sp_nanxcpt(x);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_QNAN):
- return y;
-
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_QNAN):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_QNAN, IEEE754_CLASS_INF):
- return x;
-
- /*
- * Infinity handling
- */
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- ieee754_setcx(IEEE754_INVALID_OPERATION);
- return ieee754sp_indef();
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_INF):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_INF):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_INF, IEEE754_CLASS_INF):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- return ieee754sp_inf(xs ^ ys);
-
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_NORM):
- case CLPAIR(IEEE754_CLASS_ZERO, IEEE754_CLASS_DNORM):
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_ZERO):
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_ZERO):
- if (zc == IEEE754_CLASS_INF)
- return ieee754sp_inf(zs);
- /* Multiplication is 0 so just return z */
- return z;
-
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_DNORM):
- SPDNORMX;
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_DNORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754sp_inf(zs);
- SPDNORMY;
- break;
-
- case CLPAIR(IEEE754_CLASS_DNORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754sp_inf(zs);
- SPDNORMX;
- break;
-
- case CLPAIR(IEEE754_CLASS_NORM, IEEE754_CLASS_NORM):
- if (zc == IEEE754_CLASS_QNAN)
- return z;
- else if (zc == IEEE754_CLASS_INF)
- return ieee754sp_inf(zs);
- /* fall through to real compuation */
- }
-
- /* Finally get to do some computation */
-
- /*
- * Do the multiplication bit first
- *
- * rm = xm * ym, re = xe + ye basically
- *
- * At this point xm and ym should have been normalized.
- */
-
- /* rm = xm * ym, re = xe+ye basically */
- assert(xm & SP_HIDDEN_BIT);
- assert(ym & SP_HIDDEN_BIT);
-
- re = xe + ye;
- rs = xs ^ ys;
-
- /* shunt to top of word */
- xm <<= 32 - (SP_FBITS + 1);
- ym <<= 32 - (SP_FBITS + 1);
-
- /*
- * Multiply 32 bits xm, ym to give high 32 bits rm with stickness.
- */
- lxm = xm & 0xffff;
- hxm = xm >> 16;
- lym = ym & 0xffff;
- hym = ym >> 16;
-
- lrm = lxm * lym; /* 16 * 16 => 32 */
- hrm = hxm * hym; /* 16 * 16 => 32 */
-
- t = lxm * hym; /* 16 * 16 => 32 */
- at = lrm + (t << 16);
- hrm += at < lrm;
- lrm = at;
- hrm = hrm + (t >> 16);
-
- t = hxm * lym; /* 16 * 16 => 32 */
- at = lrm + (t << 16);
- hrm += at < lrm;
- lrm = at;
- hrm = hrm + (t >> 16);
-
- rm = hrm | (lrm != 0);
-
- /*
- * Sticky shift down to normal rounding precision.
- */
- if ((int) rm < 0) {
- rm = (rm >> (32 - (SP_FBITS + 1 + 3))) |
- ((rm << (SP_FBITS + 1 + 3)) != 0);
- re++;
- } else {
- rm = (rm >> (32 - (SP_FBITS + 1 + 3 + 1))) |
- ((rm << (SP_FBITS + 1 + 3 + 1)) != 0);
- }
- assert(rm & (SP_HIDDEN_BIT << 3));
-
- /* And now the subtraction */
-
- /* Flip sign of r and handle as add */
- rs ^= 1;
-
- assert(zm & SP_HIDDEN_BIT);
-
- /*
- * Provide guard,round and stick bit space.
- */
- zm <<= 3;
-
- if (ze > re) {
- /*
- * Have to shift y fraction right to align.
- */
- s = ze - re;
- SPXSRSYn(s);
- } else if (re > ze) {
- /*
- * Have to shift x fraction right to align.
- */
- s = re - ze;
- SPXSRSYn(s);
- }
- assert(ze == re);
- assert(ze <= SP_EMAX);
-
- if (zs == rs) {
- /*
- * Generate 28 bit result of adding two 27 bit numbers
- * leaving result in zm, zs and ze.
- */
- zm = zm + rm;
-
- if (zm >> (SP_FBITS + 1 + 3)) { /* carry out */
- SPXSRSX1(); /* shift preserving sticky */
- }
- } else {
- if (zm >= rm) {
- zm = zm - rm;
- } else {
- zm = rm - zm;
- zs = rs;
- }
- if (zm == 0)
- return ieee754sp_zero(ieee754_csr.rm == FPU_CSR_RD);
-
- /*
- * Normalize in extended single precision
- */
- while ((zm >> (SP_MBITS + 3)) == 0) {
- zm <<= 1;
- ze--;
- }
-
- }
- return ieee754sp_format(zs, ze, zm);
-}
diff --git a/arch/mips/math-emu/sp_sub.c b/arch/mips/math-emu/sp_sub.c
index ec5f937a8b3e..dc998ed47295 100644
--- a/arch/mips/math-emu/sp_sub.c
+++ b/arch/mips/math-emu/sp_sub.c
@@ -134,13 +134,15 @@ union ieee754sp ieee754sp_sub(union ieee754sp x, union ieee754sp y)
* have to shift y fraction right to align
*/
s = xe - ye;
- SPXSRSYn(s);
+ ym = XSPSRS(ym, s);
+ ye += s;
} else if (ye > xe) {
/*
* have to shift x fraction right to align
*/
s = ye - xe;
- SPXSRSXn(s);
+ xm = XSPSRS(xm, s);
+ xe += s;
}
assert(xe == ye);
assert(xe <= SP_EMAX);
diff --git a/arch/mips/mm/c-r4k.c b/arch/mips/mm/c-r4k.c
index caac3d747a90..cd72805b64a7 100644
--- a/arch/mips/mm/c-r4k.c
+++ b/arch/mips/mm/c-r4k.c
@@ -40,6 +40,51 @@
#include <asm/mips-cm.h>
/*
+ * Bits describing what cache ops an SMP callback function may perform.
+ *
+ * R4K_HIT - Virtual user or kernel address based cache operations. The
+ * active_mm must be checked before using user addresses, falling
+ * back to kmap.
+ * R4K_INDEX - Index based cache operations.
+ */
+
+#define R4K_HIT BIT(0)
+#define R4K_INDEX BIT(1)
+
+/**
+ * r4k_op_needs_ipi() - Decide if a cache op needs to be done on every core.
+ * @type: Type of cache operations (R4K_HIT or R4K_INDEX).
+ *
+ * Decides whether a cache op needs to be performed on every core in the system.
+ * This may change depending on the @type of cache operation, as well as the set
+ * of online CPUs, so preemption should be disabled by the caller to prevent CPU
+ * hotplug from changing the result.
+ *
+ * Returns: 1 if the cache operation @type should be done on every core in
+ * the system.
+ * 0 if the cache operation @type is globalized and only needs to
+ * be performed on a simple CPU.
+ */
+static inline bool r4k_op_needs_ipi(unsigned int type)
+{
+ /* The MIPS Coherence Manager (CM) globalizes address-based cache ops */
+ if (type == R4K_HIT && mips_cm_present())
+ return false;
+
+ /*
+ * Hardware doesn't globalize the required cache ops, so SMP calls may
+ * be needed, but only if there are foreign CPUs (non-siblings with
+ * separate caches).
+ */
+ /* cpu_foreign_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ return !cpumask_empty(&cpu_foreign_map[0]);
+#else
+ return false;
+#endif
+}
+
+/*
* Special Variant of smp_call_function for use by cache functions:
*
* o No return value
@@ -48,35 +93,23 @@
* primary cache.
* o doesn't disable interrupts on the local CPU
*/
-static inline void r4k_on_each_cpu(void (*func) (void *info), void *info)
+static inline void r4k_on_each_cpu(unsigned int type,
+ void (*func)(void *info), void *info)
{
preempt_disable();
-
- /*
- * The Coherent Manager propagates address-based cache ops to other
- * cores but not index-based ops. However, r4k_on_each_cpu is used
- * in both cases so there is no easy way to tell what kind of op is
- * executed to the other cores. The best we can probably do is
- * to restrict that call when a CM is not present because both
- * CM-based SMP protocols (CMP & CPS) restrict index-based cache ops.
- */
- if (!mips_cm_present())
- smp_call_function_many(&cpu_foreign_map, func, info, 1);
+ if (r4k_op_needs_ipi(type))
+ smp_call_function_many(&cpu_foreign_map[smp_processor_id()],
+ func, info, 1);
func(info);
preempt_enable();
}
-#if defined(CONFIG_MIPS_CMP) || defined(CONFIG_MIPS_CPS)
-#define cpu_has_safe_index_cacheops 0
-#else
-#define cpu_has_safe_index_cacheops 1
-#endif
-
/*
* Must die.
*/
static unsigned long icache_size __read_mostly;
static unsigned long dcache_size __read_mostly;
+static unsigned long vcache_size __read_mostly;
static unsigned long scache_size __read_mostly;
/*
@@ -447,6 +480,11 @@ static inline void local_r4k___flush_cache_all(void * args)
r4k_blast_scache();
break;
+ case CPU_BMIPS5000:
+ r4k_blast_scache();
+ __sync();
+ break;
+
default:
r4k_blast_dcache();
r4k_blast_icache();
@@ -456,22 +494,44 @@ static inline void local_r4k___flush_cache_all(void * args)
static void r4k___flush_cache_all(void)
{
- r4k_on_each_cpu(local_r4k___flush_cache_all, NULL);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k___flush_cache_all, NULL);
}
-static inline int has_valid_asid(const struct mm_struct *mm)
+/**
+ * has_valid_asid() - Determine if an mm already has an ASID.
+ * @mm: Memory map.
+ * @type: R4K_HIT or R4K_INDEX, type of cache op.
+ *
+ * Determines whether @mm already has an ASID on any of the CPUs which cache ops
+ * of type @type within an r4k_on_each_cpu() call will affect. If
+ * r4k_on_each_cpu() does an SMP call to a single VPE in each core, then the
+ * scope of the operation is confined to sibling CPUs, otherwise all online CPUs
+ * will need to be checked.
+ *
+ * Must be called in non-preemptive context.
+ *
+ * Returns: 1 if the CPUs affected by @type cache ops have an ASID for @mm.
+ * 0 otherwise.
+ */
+static inline int has_valid_asid(const struct mm_struct *mm, unsigned int type)
{
-#ifdef CONFIG_MIPS_MT_SMP
- int i;
+ unsigned int i;
+ const cpumask_t *mask = cpu_present_mask;
- for_each_online_cpu(i)
+ /* cpu_sibling_map[] undeclared when !CONFIG_SMP */
+#ifdef CONFIG_SMP
+ /*
+ * If r4k_on_each_cpu does SMP calls, it does them to a single VPE in
+ * each foreign core, so we only need to worry about siblings.
+ * Otherwise we need to worry about all present CPUs.
+ */
+ if (r4k_op_needs_ipi(type))
+ mask = &cpu_sibling_map[smp_processor_id()];
+#endif
+ for_each_cpu(i, mask)
if (cpu_context(i, mm))
return 1;
-
return 0;
-#else
- return cpu_context(smp_processor_id(), mm);
-#endif
}
static void r4k__flush_cache_vmap(void)
@@ -484,15 +544,26 @@ static void r4k__flush_cache_vunmap(void)
r4k_blast_dcache();
}
+/*
+ * Note: flush_tlb_range() assumes flush_cache_range() sufficiently flushes
+ * whole caches when vma is executable.
+ */
static inline void local_r4k_flush_cache_range(void * args)
{
struct vm_area_struct *vma = args;
int exec = vma->vm_flags & VM_EXEC;
- if (!(has_valid_asid(vma->vm_mm)))
+ if (!has_valid_asid(vma->vm_mm, R4K_INDEX))
return;
- r4k_blast_dcache();
+ /*
+ * If dcache can alias, we must blast it since mapping is changing.
+ * If executable, we must ensure any dirty lines are written back far
+ * enough to be visible to icache.
+ */
+ if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
+ r4k_blast_dcache();
+ /* If executable, blast stale lines from icache */
if (exec)
r4k_blast_icache();
}
@@ -502,15 +573,15 @@ static void r4k_flush_cache_range(struct vm_area_struct *vma,
{
int exec = vma->vm_flags & VM_EXEC;
- if (cpu_has_dc_aliases || (exec && !cpu_has_ic_fills_f_dc))
- r4k_on_each_cpu(local_r4k_flush_cache_range, vma);
+ if (cpu_has_dc_aliases || exec)
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_range, vma);
}
static inline void local_r4k_flush_cache_mm(void * args)
{
struct mm_struct *mm = args;
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_INDEX))
return;
/*
@@ -535,7 +606,7 @@ static void r4k_flush_cache_mm(struct mm_struct *mm)
if (!cpu_has_dc_aliases)
return;
- r4k_on_each_cpu(local_r4k_flush_cache_mm, mm);
+ r4k_on_each_cpu(R4K_INDEX, local_r4k_flush_cache_mm, mm);
}
struct flush_cache_page_args {
@@ -560,10 +631,10 @@ static inline void local_r4k_flush_cache_page(void *args)
void *vaddr;
/*
- * If ownes no valid ASID yet, cannot possibly have gotten
+ * If owns no valid ASID yet, cannot possibly have gotten
* this page into the cache.
*/
- if (!has_valid_asid(mm))
+ if (!has_valid_asid(mm, R4K_HIT))
return;
addr &= PAGE_MASK;
@@ -630,7 +701,7 @@ static void r4k_flush_cache_page(struct vm_area_struct *vma,
args.addr = addr;
args.pfn = pfn;
- r4k_on_each_cpu(local_r4k_flush_cache_page, &args);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_page, &args);
}
static inline void local_r4k_flush_data_cache_page(void * addr)
@@ -643,18 +714,23 @@ static void r4k_flush_data_cache_page(unsigned long addr)
if (in_atomic())
local_r4k_flush_data_cache_page((void *)addr);
else
- r4k_on_each_cpu(local_r4k_flush_data_cache_page, (void *) addr);
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_data_cache_page,
+ (void *) addr);
}
struct flush_icache_range_args {
unsigned long start;
unsigned long end;
+ unsigned int type;
};
-static inline void local_r4k_flush_icache_range(unsigned long start, unsigned long end)
+static inline void __local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end,
+ unsigned int type)
{
if (!cpu_has_ic_fills_f_dc) {
- if (end - start >= dcache_size) {
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start >= dcache_size)) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -662,7 +738,8 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
}
}
- if (end - start > icache_size)
+ if (type == R4K_INDEX ||
+ (type & R4K_INDEX && end - start > icache_size))
r4k_blast_icache();
else {
switch (boot_cpu_type()) {
@@ -688,23 +765,52 @@ static inline void local_r4k_flush_icache_range(unsigned long start, unsigned lo
#endif
}
+static inline void local_r4k_flush_icache_range(unsigned long start,
+ unsigned long end)
+{
+ __local_r4k_flush_icache_range(start, end, R4K_HIT | R4K_INDEX);
+}
+
static inline void local_r4k_flush_icache_range_ipi(void *args)
{
struct flush_icache_range_args *fir_args = args;
unsigned long start = fir_args->start;
unsigned long end = fir_args->end;
+ unsigned int type = fir_args->type;
- local_r4k_flush_icache_range(start, end);
+ __local_r4k_flush_icache_range(start, end, type);
}
static void r4k_flush_icache_range(unsigned long start, unsigned long end)
{
struct flush_icache_range_args args;
+ unsigned long size, cache_size;
args.start = start;
args.end = end;
+ args.type = R4K_HIT | R4K_INDEX;
- r4k_on_each_cpu(local_r4k_flush_icache_range_ipi, &args);
+ /*
+ * Indexed cache ops require an SMP call.
+ * Consider if that can or should be avoided.
+ */
+ preempt_disable();
+ if (r4k_op_needs_ipi(R4K_INDEX) && !r4k_op_needs_ipi(R4K_HIT)) {
+ /*
+ * If address-based cache ops don't require an SMP call, then
+ * use them exclusively for small flushes.
+ */
+ size = start - end;
+ cache_size = icache_size;
+ if (!cpu_has_ic_fills_f_dc) {
+ size *= 2;
+ cache_size += dcache_size;
+ }
+ if (size <= cache_size)
+ args.type &= ~R4K_INDEX;
+ }
+ r4k_on_each_cpu(args.type, local_r4k_flush_icache_range_ipi, &args);
+ preempt_enable();
instruction_hazard();
}
@@ -731,7 +837,7 @@ static void r4k_dma_cache_wback_inv(unsigned long addr, unsigned long size)
* subset property so we have to flush the primary caches
* explicitly
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -768,7 +874,7 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
return;
}
- if (cpu_has_safe_index_cacheops && size >= dcache_size) {
+ if (size >= dcache_size) {
r4k_blast_dcache();
} else {
R4600_HIT_CACHEOP_WAR_IMPL;
@@ -781,25 +887,76 @@ static void r4k_dma_cache_inv(unsigned long addr, unsigned long size)
}
#endif /* CONFIG_DMA_NONCOHERENT || CONFIG_DMA_MAYBE_COHERENT */
+struct flush_cache_sigtramp_args {
+ struct mm_struct *mm;
+ struct page *page;
+ unsigned long addr;
+};
+
/*
* While we're protected against bad userland addresses we don't care
* very much about what happens in that case. Usually a segmentation
* fault will dump the process later on anyway ...
*/
-static void local_r4k_flush_cache_sigtramp(void * arg)
+static void local_r4k_flush_cache_sigtramp(void *args)
{
+ struct flush_cache_sigtramp_args *fcs_args = args;
+ unsigned long addr = fcs_args->addr;
+ struct page *page = fcs_args->page;
+ struct mm_struct *mm = fcs_args->mm;
+ int map_coherent = 0;
+ void *vaddr;
+
unsigned long ic_lsize = cpu_icache_line_size();
unsigned long dc_lsize = cpu_dcache_line_size();
unsigned long sc_lsize = cpu_scache_line_size();
- unsigned long addr = (unsigned long) arg;
+
+ /*
+ * If owns no valid ASID yet, cannot possibly have gotten
+ * this page into the cache.
+ */
+ if (!has_valid_asid(mm, R4K_HIT))
+ return;
+
+ if (mm == current->active_mm) {
+ vaddr = NULL;
+ } else {
+ /*
+ * Use kmap_coherent or kmap_atomic to do flushes for
+ * another ASID than the current one.
+ */
+ map_coherent = (cpu_has_dc_aliases &&
+ page_mapcount(page) &&
+ !Page_dcache_dirty(page));
+ if (map_coherent)
+ vaddr = kmap_coherent(page, addr);
+ else
+ vaddr = kmap_atomic(page);
+ addr = (unsigned long)vaddr + (addr & ~PAGE_MASK);
+ }
R4600_HIT_CACHEOP_WAR_IMPL;
- if (dc_lsize)
- protected_writeback_dcache_line(addr & ~(dc_lsize - 1));
- if (!cpu_icache_snoops_remote_store && scache_size)
- protected_writeback_scache_line(addr & ~(sc_lsize - 1));
+ if (!cpu_has_ic_fills_f_dc) {
+ if (dc_lsize)
+ vaddr ? flush_dcache_line(addr & ~(dc_lsize - 1))
+ : protected_writeback_dcache_line(
+ addr & ~(dc_lsize - 1));
+ if (!cpu_icache_snoops_remote_store && scache_size)
+ vaddr ? flush_scache_line(addr & ~(sc_lsize - 1))
+ : protected_writeback_scache_line(
+ addr & ~(sc_lsize - 1));
+ }
if (ic_lsize)
- protected_flush_icache_line(addr & ~(ic_lsize - 1));
+ vaddr ? flush_icache_line(addr & ~(ic_lsize - 1))
+ : protected_flush_icache_line(addr & ~(ic_lsize - 1));
+
+ if (vaddr) {
+ if (map_coherent)
+ kunmap_coherent();
+ else
+ kunmap_atomic(vaddr);
+ }
+
if (MIPS4K_ICACHE_REFILL_WAR) {
__asm__ __volatile__ (
".set push\n\t"
@@ -824,7 +981,23 @@ static void local_r4k_flush_cache_sigtramp(void * arg)
static void r4k_flush_cache_sigtramp(unsigned long addr)
{
- r4k_on_each_cpu(local_r4k_flush_cache_sigtramp, (void *) addr);
+ struct flush_cache_sigtramp_args args;
+ int npages;
+
+ down_read(&current->mm->mmap_sem);
+
+ npages = get_user_pages_fast(addr, 1, 0, &args.page);
+ if (npages < 1)
+ goto out;
+
+ args.mm = current->mm;
+ args.addr = addr;
+
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_cache_sigtramp, &args);
+
+ put_page(args.page);
+out:
+ up_read(&current->mm->mmap_sem);
}
static void r4k_flush_icache_all(void)
@@ -838,6 +1011,15 @@ struct flush_kernel_vmap_range_args {
int size;
};
+static inline void local_r4k_flush_kernel_vmap_range_index(void *args)
+{
+ /*
+ * Aliases only affect the primary caches so don't bother with
+ * S-caches or T-caches.
+ */
+ r4k_blast_dcache();
+}
+
static inline void local_r4k_flush_kernel_vmap_range(void *args)
{
struct flush_kernel_vmap_range_args *vmra = args;
@@ -848,12 +1030,8 @@ static inline void local_r4k_flush_kernel_vmap_range(void *args)
* Aliases only affect the primary caches so don't bother with
* S-caches or T-caches.
*/
- if (cpu_has_safe_index_cacheops && size >= dcache_size)
- r4k_blast_dcache();
- else {
- R4600_HIT_CACHEOP_WAR_IMPL;
- blast_dcache_range(vaddr, vaddr + size);
- }
+ R4600_HIT_CACHEOP_WAR_IMPL;
+ blast_dcache_range(vaddr, vaddr + size);
}
static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
@@ -863,7 +1041,12 @@ static void r4k_flush_kernel_vmap_range(unsigned long vaddr, int size)
args.vaddr = (unsigned long) vaddr;
args.size = size;
- r4k_on_each_cpu(local_r4k_flush_kernel_vmap_range, &args);
+ if (size >= dcache_size)
+ r4k_on_each_cpu(R4K_INDEX,
+ local_r4k_flush_kernel_vmap_range_index, NULL);
+ else
+ r4k_on_each_cpu(R4K_HIT, local_r4k_flush_kernel_vmap_range,
+ &args);
}
static inline void rm7k_erratum31(void)
@@ -1148,6 +1331,8 @@ static void probe_pcache(void)
c->dcache.ways *
c->dcache.linesz;
c->dcache.waybit = 0;
+ if ((prid & PRID_REV_MASK) >= PRID_REV_LOONGSON3A_R2)
+ c->options |= MIPS_CPU_PREFETCH;
break;
case CPU_CAVIUM_OCTEON3:
@@ -1191,7 +1376,7 @@ static void probe_pcache(void)
c->icache.linesz;
c->icache.waybit = __ffs(icache_size/c->icache.ways);
- if (config & 0x8) /* VI bit */
+ if (config & MIPS_CONF_VI)
c->icache.flags |= MIPS_CACHE_VTAG;
/*
@@ -1278,6 +1463,8 @@ static void probe_pcache(void)
case CPU_M5150:
case CPU_QEMU_GENERIC:
case CPU_I6400:
+ case CPU_P6600:
+ case CPU_M6250:
if (!(read_c0_config7() & MIPS_CONF7_IAR) &&
(c->icache.waysize > PAGE_SIZE))
c->icache.flags |= MIPS_CACHE_ALIASES;
@@ -1304,7 +1491,14 @@ static void probe_pcache(void)
break;
case CPU_ALCHEMY:
+ case CPU_I6400:
+ c->icache.flags |= MIPS_CACHE_IC_F_DC;
+ break;
+
+ case CPU_BMIPS5000:
c->icache.flags |= MIPS_CACHE_IC_F_DC;
+ /* Cache aliases are handled in hardware; allow HIGHMEM */
+ c->dcache.flags &= ~MIPS_CACHE_ALIASES;
break;
case CPU_LOONGSON2:
@@ -1328,6 +1522,31 @@ static void probe_pcache(void)
c->dcache.linesz);
}
+static void probe_vcache(void)
+{
+ struct cpuinfo_mips *c = &current_cpu_data;
+ unsigned int config2, lsize;
+
+ if (current_cpu_type() != CPU_LOONGSON3)
+ return;
+
+ config2 = read_c0_config2();
+ if ((lsize = ((config2 >> 20) & 15)))
+ c->vcache.linesz = 2 << lsize;
+ else
+ c->vcache.linesz = lsize;
+
+ c->vcache.sets = 64 << ((config2 >> 24) & 15);
+ c->vcache.ways = 1 + ((config2 >> 16) & 15);
+
+ vcache_size = c->vcache.sets * c->vcache.ways * c->vcache.linesz;
+
+ c->vcache.waybit = 0;
+
+ pr_info("Unified victim cache %ldkB %s, linesize %d bytes.\n",
+ vcache_size >> 10, way_string[c->vcache.ways], c->vcache.linesz);
+}
+
/*
* If you even _breathe_ on this function, look at the gcc output and make sure
* it does not pop things on and off the stack for the cache sizing loop that
@@ -1650,6 +1869,7 @@ void r4k_cache_init(void)
struct cpuinfo_mips *c = &current_cpu_data;
probe_pcache();
+ probe_vcache();
setup_scache();
r4k_blast_dcache_page_setup();
@@ -1671,7 +1891,7 @@ void r4k_cache_init(void)
* This code supports virtually indexed processors and will be
* unnecessarily inefficient on physically indexed processors.
*/
- if (c->dcache.linesz)
+ if (c->dcache.linesz && cpu_has_dc_aliases)
shm_align_mask = max_t( unsigned long,
c->dcache.sets * c->dcache.linesz - 1,
PAGE_SIZE - 1);
@@ -1744,12 +1964,24 @@ void r4k_cache_init(void)
flush_icache_range = (void *)b5k_instruction_hazard;
local_flush_icache_range = (void *)b5k_instruction_hazard;
- /* Cache aliases are handled in hardware; allow HIGHMEM */
- current_cpu_data.dcache.flags &= ~MIPS_CACHE_ALIASES;
/* Optimization: an L2 flush implicitly flushes the L1 */
current_cpu_data.options |= MIPS_CPU_INCLUSIVE_CACHES;
break;
+ case CPU_LOONGSON3:
+ /* Loongson-3 maintains cache coherency by hardware */
+ __flush_cache_all = cache_noop;
+ __flush_cache_vmap = cache_noop;
+ __flush_cache_vunmap = cache_noop;
+ __flush_kernel_vmap_range = (void *)cache_noop;
+ flush_cache_mm = (void *)cache_noop;
+ flush_cache_page = (void *)cache_noop;
+ flush_cache_range = (void *)cache_noop;
+ flush_cache_sigtramp = (void *)cache_noop;
+ flush_icache_all = (void *)cache_noop;
+ flush_data_cache_page = (void *)cache_noop;
+ local_flush_data_cache_page = (void *)cache_noop;
+ break;
}
}
diff --git a/arch/mips/mm/cache.c b/arch/mips/mm/cache.c
index 3f159caf6dbc..bf04c6c479a4 100644
--- a/arch/mips/mm/cache.c
+++ b/arch/mips/mm/cache.c
@@ -16,6 +16,7 @@
#include <linux/mm.h>
#include <asm/cacheflush.h>
+#include <asm/highmem.h>
#include <asm/processor.h>
#include <asm/cpu.h>
#include <asm/cpu-features.h>
@@ -83,8 +84,6 @@ void __flush_dcache_page(struct page *page)
struct address_space *mapping = page_mapping(page);
unsigned long addr;
- if (PageHighMem(page))
- return;
if (mapping && !mapping_mapped(mapping)) {
SetPageDcacheDirty(page);
return;
@@ -95,8 +94,15 @@ void __flush_dcache_page(struct page *page)
* case is for exec env/arg pages and those are %99 certainly going to
* get faulted into the tlb (and thus flushed) anyways.
*/
- addr = (unsigned long) page_address(page);
+ if (PageHighMem(page))
+ addr = (unsigned long)kmap_atomic(page);
+ else
+ addr = (unsigned long)page_address(page);
+
flush_data_cache_page(addr);
+
+ if (PageHighMem(page))
+ __kunmap_atomic((void *)addr);
}
EXPORT_SYMBOL(__flush_dcache_page);
@@ -119,33 +125,28 @@ void __flush_anon_page(struct page *page, unsigned long vmaddr)
EXPORT_SYMBOL(__flush_anon_page);
-void __flush_icache_page(struct vm_area_struct *vma, struct page *page)
-{
- unsigned long addr;
-
- if (PageHighMem(page))
- return;
-
- addr = (unsigned long) page_address(page);
- flush_data_cache_page(addr);
-}
-EXPORT_SYMBOL_GPL(__flush_icache_page);
-
-void __update_cache(struct vm_area_struct *vma, unsigned long address,
- pte_t pte)
+void __update_cache(unsigned long address, pte_t pte)
{
struct page *page;
unsigned long pfn, addr;
- int exec = (vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc;
+ int exec = !pte_no_exec(pte) && !cpu_has_ic_fills_f_dc;
pfn = pte_pfn(pte);
if (unlikely(!pfn_valid(pfn)))
return;
page = pfn_to_page(pfn);
- if (page_mapping(page) && Page_dcache_dirty(page)) {
- addr = (unsigned long) page_address(page);
+ if (Page_dcache_dirty(page)) {
+ if (PageHighMem(page))
+ addr = (unsigned long)kmap_atomic(page);
+ else
+ addr = (unsigned long)page_address(page);
+
if (exec || pages_do_alias(addr, address & PAGE_MASK))
flush_data_cache_page(addr);
+
+ if (PageHighMem(page))
+ __kunmap_atomic((void *)addr);
+
ClearPageDcacheDirty(page);
}
}
diff --git a/arch/mips/mm/dma-default.c b/arch/mips/mm/dma-default.c
index 730d394ce5f0..b2eadd6fa9a1 100644
--- a/arch/mips/mm/dma-default.c
+++ b/arch/mips/mm/dma-default.c
@@ -88,19 +88,20 @@ static gfp_t massage_gfp_flags(const struct device *dev, gfp_t gfp)
else
#endif
#if defined(CONFIG_ZONE_DMA32) && defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(32))
+ if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(32))
dma_flag = __GFP_DMA;
else if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA32) && !defined(CONFIG_ZONE_DMA)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(64))
+ if (dev == NULL || dev->coherent_dma_mask < DMA_BIT_MASK(64))
dma_flag = __GFP_DMA32;
else
#endif
#if defined(CONFIG_ZONE_DMA) && !defined(CONFIG_ZONE_DMA32)
- if (dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
+ if (dev == NULL ||
+ dev->coherent_dma_mask < DMA_BIT_MASK(sizeof(phys_addr_t) * 8))
dma_flag = __GFP_DMA;
else
#endif
@@ -130,7 +131,7 @@ static void *mips_dma_alloc_noncoherent(struct device *dev, size_t size,
}
static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t * dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
void *ret;
struct page *page = NULL;
@@ -140,7 +141,7 @@ static void *mips_dma_alloc_coherent(struct device *dev, size_t size,
* XXX: seems like the coherent and non-coherent implementations could
* be consolidated.
*/
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs))
+ if (attrs & DMA_ATTR_NON_CONSISTENT)
return mips_dma_alloc_noncoherent(dev, size, dma_handle, gfp);
gfp = massage_gfp_flags(dev, gfp);
@@ -175,13 +176,13 @@ static void mips_dma_free_noncoherent(struct device *dev, size_t size,
}
static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
- dma_addr_t dma_handle, struct dma_attrs *attrs)
+ dma_addr_t dma_handle, unsigned long attrs)
{
unsigned long addr = (unsigned long) vaddr;
unsigned int count = PAGE_ALIGN(size) >> PAGE_SHIFT;
struct page *page = NULL;
- if (dma_get_attr(DMA_ATTR_NON_CONSISTENT, attrs)) {
+ if (attrs & DMA_ATTR_NON_CONSISTENT) {
mips_dma_free_noncoherent(dev, size, vaddr, dma_handle);
return;
}
@@ -199,7 +200,7 @@ static void mips_dma_free_coherent(struct device *dev, size_t size, void *vaddr,
static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
void *cpu_addr, dma_addr_t dma_addr, size_t size,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
unsigned long user_count = (vma->vm_end - vma->vm_start) >> PAGE_SHIFT;
unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT;
@@ -213,7 +214,7 @@ static int mips_dma_mmap(struct device *dev, struct vm_area_struct *vma,
pfn = page_to_pfn(virt_to_page((void *)addr));
- if (dma_get_attr(DMA_ATTR_WRITE_COMBINE, attrs))
+ if (attrs & DMA_ATTR_WRITE_COMBINE)
vma->vm_page_prot = pgprot_writecombine(vma->vm_page_prot);
else
vma->vm_page_prot = pgprot_noncached(vma->vm_page_prot);
@@ -290,7 +291,7 @@ static inline void __dma_sync(struct page *page,
}
static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
- size_t size, enum dma_data_direction direction, struct dma_attrs *attrs)
+ size_t size, enum dma_data_direction direction, unsigned long attrs)
{
if (cpu_needs_post_dma_flush(dev))
__dma_sync(dma_addr_to_page(dev, dma_addr),
@@ -300,7 +301,7 @@ static void mips_dma_unmap_page(struct device *dev, dma_addr_t dma_addr,
}
static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
- int nents, enum dma_data_direction direction, struct dma_attrs *attrs)
+ int nents, enum dma_data_direction direction, unsigned long attrs)
{
int i;
struct scatterlist *sg;
@@ -321,7 +322,7 @@ static int mips_dma_map_sg(struct device *dev, struct scatterlist *sglist,
static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
unsigned long offset, size_t size, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
if (!plat_device_is_coherent(dev))
__dma_sync(page, offset, size, direction);
@@ -331,7 +332,7 @@ static dma_addr_t mips_dma_map_page(struct device *dev, struct page *page,
static void mips_dma_unmap_sg(struct device *dev, struct scatterlist *sglist,
int nhwentries, enum dma_data_direction direction,
- struct dma_attrs *attrs)
+ unsigned long attrs)
{
int i;
struct scatterlist *sg;
diff --git a/arch/mips/mm/fault.c b/arch/mips/mm/fault.c
index 4b88fa031891..9560ad731120 100644
--- a/arch/mips/mm/fault.c
+++ b/arch/mips/mm/fault.c
@@ -153,7 +153,7 @@ good_area:
* make sure we exit gracefully rather than endlessly redo
* the fault.
*/
- fault = handle_mm_fault(mm, vma, address, flags);
+ fault = handle_mm_fault(vma, address, flags);
if ((fault & VM_FAULT_RETRY) && fatal_signal_pending(current))
return;
diff --git a/arch/mips/mm/init.c b/arch/mips/mm/init.c
index 7e5fa0938c21..a5509e7dcad2 100644
--- a/arch/mips/mm/init.c
+++ b/arch/mips/mm/init.c
@@ -98,8 +98,10 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
idx += in_interrupt() ? FIX_N_COLOURS : 0;
vaddr = __fix_to_virt(FIX_CMAP_END - idx);
pte = mk_pte(page, prot);
-#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+#if defined(CONFIG_XPA)
entrylo = pte_to_entrylo(pte.pte_high);
+#elif defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
+ entrylo = pte.pte_high;
#else
entrylo = pte_to_entrylo(pte_val(pte));
#endif
@@ -110,9 +112,11 @@ static void *__kmap_pgprot(struct page *page, unsigned long addr, pgprot_t prot)
write_c0_entrylo0(entrylo);
write_c0_entrylo1(entrylo);
#ifdef CONFIG_XPA
- entrylo = (pte.pte_low & _PFNX_MASK);
- writex_c0_entrylo0(entrylo);
- writex_c0_entrylo1(entrylo);
+ if (cpu_has_xpa) {
+ entrylo = (pte.pte_low & _PFNX_MASK);
+ writex_c0_entrylo0(entrylo);
+ writex_c0_entrylo1(entrylo);
+ }
#endif
tlbidx = read_c0_wired();
write_c0_wired(tlbidx + 1);
@@ -196,7 +200,7 @@ void copy_to_user_page(struct vm_area_struct *vma,
if (cpu_has_dc_aliases)
SetPageDcacheDirty(page);
}
- if ((vma->vm_flags & VM_EXEC) && !cpu_has_ic_fills_f_dc)
+ if (vma->vm_flags & VM_EXEC)
flush_cache_page(vma, vaddr, page_to_pfn(page));
}
@@ -500,7 +504,7 @@ void free_initrd_mem(unsigned long start, unsigned long end)
void (*free_init_pages_eva)(void *begin, void *end) = NULL;
-void __init_refok free_initmem(void)
+void __ref free_initmem(void)
{
prom_free_prom_memory();
/*
diff --git a/arch/mips/mm/page.c b/arch/mips/mm/page.c
index 885d73ffd6fb..c41953ca6605 100644
--- a/arch/mips/mm/page.c
+++ b/arch/mips/mm/page.c
@@ -188,6 +188,15 @@ static void set_prefetch_parameters(void)
}
break;
+ case CPU_LOONGSON3:
+ /* Loongson-3 only support the Pref_Load/Pref_Store. */
+ pref_bias_clear_store = 128;
+ pref_bias_copy_load = 128;
+ pref_bias_copy_store = 128;
+ pref_src_mode = Pref_Load;
+ pref_dst_mode = Pref_Store;
+ break;
+
default:
pref_bias_clear_store = 128;
pref_bias_copy_load = 256;
diff --git a/arch/mips/mm/sc-debugfs.c b/arch/mips/mm/sc-debugfs.c
index 5eefe3281b24..01f1154cdb0c 100644
--- a/arch/mips/mm/sc-debugfs.c
+++ b/arch/mips/mm/sc-debugfs.c
@@ -73,8 +73,8 @@ static int __init sc_debugfs_init(void)
file = debugfs_create_file("prefetch", S_IRUGO | S_IWUSR, dir,
NULL, &sc_prefetch_fops);
- if (IS_ERR(file))
- return PTR_ERR(file);
+ if (!file)
+ return -ENOMEM;
return 0;
}
diff --git a/arch/mips/mm/sc-ip22.c b/arch/mips/mm/sc-ip22.c
index dc7c5a5214a9..026cb59a914d 100644
--- a/arch/mips/mm/sc-ip22.c
+++ b/arch/mips/mm/sc-ip22.c
@@ -158,7 +158,7 @@ static inline int __init indy_sc_probe(void)
return 1;
}
-/* XXX Check with wje if the Indy caches can differenciate between
+/* XXX Check with wje if the Indy caches can differentiate between
writeback + invalidate and just invalidate. */
static struct bcache_ops indy_sc_ops = {
.bc_enable = indy_sc_enable,
diff --git a/arch/mips/mm/sc-mips.c b/arch/mips/mm/sc-mips.c
index 91dec32c77b7..286a4d5a1884 100644
--- a/arch/mips/mm/sc-mips.c
+++ b/arch/mips/mm/sc-mips.c
@@ -141,6 +141,7 @@ static inline int mips_sc_is_activated(struct cpuinfo_mips *c)
case CPU_P5600:
case CPU_BMIPS5000:
case CPU_QEMU_GENERIC:
+ case CPU_P6600:
if (config2 & (1 << 12))
return 0;
}
diff --git a/arch/mips/mm/sc-rm7k.c b/arch/mips/mm/sc-rm7k.c
index 9ac1efcfbcc7..78f900c59276 100644
--- a/arch/mips/mm/sc-rm7k.c
+++ b/arch/mips/mm/sc-rm7k.c
@@ -161,7 +161,7 @@ static void rm7k_tc_disable(void)
local_irq_save(flags);
blast_rm7k_tcache();
clear_c0_config(RM7K_CONF_TE);
- local_irq_save(flags);
+ local_irq_restore(flags);
}
static void rm7k_sc_disable(void)
diff --git a/arch/mips/mm/tlb-r3k.c b/arch/mips/mm/tlb-r3k.c
index b4f366f7c0f5..1290b995695d 100644
--- a/arch/mips/mm/tlb-r3k.c
+++ b/arch/mips/mm/tlb-r3k.c
@@ -43,7 +43,7 @@ static void local_flush_tlb_from(int entry)
{
unsigned long old_ctx;
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
write_c0_entrylo0(0);
while (entry < current_cpu_data.tlbsize) {
write_c0_index(entry << 8);
@@ -81,6 +81,7 @@ void local_flush_tlb_mm(struct mm_struct *mm)
void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
unsigned long end)
{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
struct mm_struct *mm = vma->vm_mm;
int cpu = smp_processor_id();
@@ -89,13 +90,13 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
#ifdef DEBUG_TLB
printk("[tlbrange<%lu,0x%08lx,0x%08lx>]",
- cpu_context(cpu, mm) & ASID_MASK, start, end);
+ cpu_context(cpu, mm) & asid_mask, start, end);
#endif
local_irq_save(flags);
size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
if (size <= current_cpu_data.tlbsize) {
- int oldpid = read_c0_entryhi() & ASID_MASK;
- int newpid = cpu_context(cpu, mm) & ASID_MASK;
+ int oldpid = read_c0_entryhi() & asid_mask;
+ int newpid = cpu_context(cpu, mm) & asid_mask;
start &= PAGE_MASK;
end += PAGE_SIZE - 1;
@@ -159,6 +160,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
int cpu = smp_processor_id();
if (cpu_context(cpu, vma->vm_mm) != 0) {
@@ -168,10 +170,10 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
#ifdef DEBUG_TLB
printk("[tlbpage<%lu,0x%08lx>]", cpu_context(cpu, vma->vm_mm), page);
#endif
- newpid = cpu_context(cpu, vma->vm_mm) & ASID_MASK;
+ newpid = cpu_context(cpu, vma->vm_mm) & asid_mask;
page &= PAGE_MASK;
local_irq_save(flags);
- oldpid = read_c0_entryhi() & ASID_MASK;
+ oldpid = read_c0_entryhi() & asid_mask;
write_c0_entryhi(page | newpid);
BARRIER;
tlb_probe();
@@ -190,6 +192,7 @@ finish:
void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags;
int idx, pid;
@@ -199,10 +202,10 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = read_c0_entryhi() & asid_mask;
#ifdef DEBUG_TLB
- if ((pid != (cpu_context(cpu, vma->vm_mm) & ASID_MASK)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
+ if ((pid != (cpu_context(cpu, vma->vm_mm) & asid_mask)) || (cpu_context(cpu, vma->vm_mm) == 0)) {
printk("update_mmu_cache: Wheee, bogus tlbpid mmpid=%lu tlbpid=%d\n",
(cpu_context(cpu, vma->vm_mm)), pid);
}
@@ -228,6 +231,7 @@ void __update_tlb(struct vm_area_struct *vma, unsigned long address, pte_t pte)
void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
unsigned long entryhi, unsigned long pagemask)
{
+ unsigned long asid_mask = cpu_asid_mask(&current_cpu_data);
unsigned long flags;
unsigned long old_ctx;
static unsigned long wired = 0;
@@ -243,7 +247,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
local_irq_save(flags);
/* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = read_c0_entryhi() & asid_mask;
old_pagemask = read_c0_pagemask();
w = read_c0_wired();
write_c0_wired(w + 1);
@@ -266,7 +270,7 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#endif
local_irq_save(flags);
- old_ctx = read_c0_entryhi() & ASID_MASK;
+ old_ctx = read_c0_entryhi() & asid_mask;
write_c0_entrylo0(entrylo0);
write_c0_entryhi(entryhi);
write_c0_index(wired);
diff --git a/arch/mips/mm/tlb-r4k.c b/arch/mips/mm/tlb-r4k.c
index 5037d5868cef..e8b335c16295 100644
--- a/arch/mips/mm/tlb-r4k.c
+++ b/arch/mips/mm/tlb-r4k.c
@@ -19,6 +19,7 @@
#include <asm/cpu.h>
#include <asm/cpu-type.h>
#include <asm/bootinfo.h>
+#include <asm/hazards.h>
#include <asm/mmu_context.h>
#include <asm/pgtable.h>
#include <asm/tlb.h>
@@ -27,25 +28,28 @@
extern void build_tlb_refill_handler(void);
/*
- * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
- * unfortunately, itlb is not totally transparent to software.
+ * LOONGSON-2 has a 4 entry itlb which is a subset of jtlb, LOONGSON-3 has
+ * a 4 entry itlb and a 4 entry dtlb which are subsets of jtlb. Unfortunately,
+ * itlb/dtlb are not totally transparent to software.
*/
-static inline void flush_itlb(void)
+static inline void flush_micro_tlb(void)
{
switch (current_cpu_type()) {
case CPU_LOONGSON2:
+ write_c0_diag(LOONGSON_DIAG_ITLB);
+ break;
case CPU_LOONGSON3:
- write_c0_diag(4);
+ write_c0_diag(LOONGSON_DIAG_ITLB | LOONGSON_DIAG_DTLB);
break;
default:
break;
}
}
-static inline void flush_itlb_vm(struct vm_area_struct *vma)
+static inline void flush_micro_tlb_vm(struct vm_area_struct *vma)
{
if (vma->vm_flags & VM_EXEC)
- flush_itlb();
+ flush_micro_tlb();
}
void local_flush_tlb_all(void)
@@ -92,7 +96,7 @@ void local_flush_tlb_all(void)
tlbw_use_hazard();
write_c0_entryhi(old_ctx);
htw_start();
- flush_itlb();
+ flush_micro_tlb();
local_irq_restore(flags);
}
EXPORT_SYMBOL(local_flush_tlb_all);
@@ -158,7 +162,7 @@ void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
} else {
drop_mmu_context(mm, cpu);
}
- flush_itlb();
+ flush_micro_tlb();
local_irq_restore(flags);
}
}
@@ -204,7 +208,7 @@ void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
} else {
local_flush_tlb_all();
}
- flush_itlb();
+ flush_micro_tlb();
local_irq_restore(flags);
}
@@ -239,7 +243,7 @@ void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
finish:
write_c0_entryhi(oldpid);
htw_start();
- flush_itlb_vm(vma);
+ flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
}
@@ -273,7 +277,7 @@ void local_flush_tlb_one(unsigned long page)
}
write_c0_entryhi(oldpid);
htw_start();
- flush_itlb();
+ flush_micro_tlb();
local_irq_restore(flags);
}
@@ -300,7 +304,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
local_irq_save(flags);
htw_stop();
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
address &= (PAGE_MASK << 1);
write_c0_entryhi(address | pid);
pgdp = pgd_offset(vma->vm_mm, address);
@@ -335,10 +339,12 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
#if defined(CONFIG_PHYS_ADDR_T_64BIT) && defined(CONFIG_CPU_MIPS32)
#ifdef CONFIG_XPA
write_c0_entrylo0(pte_to_entrylo(ptep->pte_high));
- writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
+ if (cpu_has_xpa)
+ writex_c0_entrylo0(ptep->pte_low & _PFNX_MASK);
ptep++;
write_c0_entrylo1(pte_to_entrylo(ptep->pte_high));
- writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
+ if (cpu_has_xpa)
+ writex_c0_entrylo1(ptep->pte_low & _PFNX_MASK);
#else
write_c0_entrylo0(ptep->pte_high);
ptep++;
@@ -356,7 +362,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
}
tlbw_use_hazard();
htw_start();
- flush_itlb_vm(vma);
+ flush_micro_tlb_vm(vma);
local_irq_restore(flags);
}
@@ -399,19 +405,20 @@ void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
-int __init has_transparent_hugepage(void)
+int has_transparent_hugepage(void)
{
- unsigned int mask;
- unsigned long flags;
-
- local_irq_save(flags);
- write_c0_pagemask(PM_HUGE_MASK);
- back_to_back_c0_hazard();
- mask = read_c0_pagemask();
- write_c0_pagemask(PM_DEFAULT_MASK);
+ static unsigned int mask = -1;
- local_irq_restore(flags);
+ if (mask == -1) { /* first call comes during __init */
+ unsigned long flags;
+ local_irq_save(flags);
+ write_c0_pagemask(PM_HUGE_MASK);
+ back_to_back_c0_hazard();
+ mask = read_c0_pagemask();
+ write_c0_pagemask(PM_DEFAULT_MASK);
+ local_irq_restore(flags);
+ }
return mask == PM_HUGE_MASK;
}
@@ -486,6 +493,10 @@ static void r4k_tlb_configure(void)
* be set to fixed-size pages.
*/
write_c0_pagemask(PM_DEFAULT_MASK);
+ back_to_back_c0_hazard();
+ if (read_c0_pagemask() != PM_DEFAULT_MASK)
+ panic("MMU doesn't support PAGE_SIZE=0x%lx", PAGE_SIZE);
+
write_c0_wired(0);
if (current_cpu_type() == CPU_R10000 ||
current_cpu_type() == CPU_R12000 ||
diff --git a/arch/mips/mm/tlb-r8k.c b/arch/mips/mm/tlb-r8k.c
index 138a2ec7cc6b..e86e2e55ad3e 100644
--- a/arch/mips/mm/tlb-r8k.c
+++ b/arch/mips/mm/tlb-r8k.c
@@ -194,7 +194,7 @@ void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
if (current->active_mm != vma->vm_mm)
return;
- pid = read_c0_entryhi() & ASID_MASK;
+ pid = read_c0_entryhi() & cpu_asid_mask(&current_cpu_data);
local_irq_save(flags);
address &= PAGE_MASK;
diff --git a/arch/mips/mm/tlbex.c b/arch/mips/mm/tlbex.c
index 5a04b6f5c6fb..55ce39606cb8 100644
--- a/arch/mips/mm/tlbex.c
+++ b/arch/mips/mm/tlbex.c
@@ -12,7 +12,7 @@
* Copyright (C) 2011 MIPS Technologies, Inc.
*
* ... and the days got worse and worse and now you see
- * I've gone completly out of my mind.
+ * I've gone completely out of my mind.
*
* They're coming to take me a away haha
* they're coming to take me a away hoho hihi haha
@@ -234,20 +234,16 @@ static void output_pgtable_bits_defines(void)
pr_debug("\n");
pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
- pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
+ pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
#endif
-#if defined(CONFIG_CPU_MIPSR2) || defined(CONFIG_CPU_MIPSR6)
- if (cpu_has_rixi) {
#ifdef _PAGE_NO_EXEC_SHIFT
+ if (cpu_has_rixi)
pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
- pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
-#endif
- }
#endif
pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
@@ -284,7 +280,12 @@ static inline void dump_handler(const char *symbol, const u32 *handler, int coun
#define C0_ENTRYLO1 3, 0
#define C0_CONTEXT 4, 0
#define C0_PAGEMASK 5, 0
+#define C0_PWBASE 5, 5
+#define C0_PWFIELD 5, 6
+#define C0_PWSIZE 5, 7
+#define C0_PWCTL 6, 6
#define C0_BADVADDR 8, 0
+#define C0_PGD 9, 7
#define C0_ENTRYHI 10, 0
#define C0_EPC 14, 0
#define C0_XCONTEXT 20, 0
@@ -630,6 +631,11 @@ static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
unsigned int reg)
{
+ if (_PAGE_GLOBAL_SHIFT == 0) {
+ /* pte_t is already in EntryLo format */
+ return;
+ }
+
if (cpu_has_rixi && _PAGE_NO_EXEC) {
if (fill_includes_sw_bits) {
UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
@@ -808,7 +814,10 @@ build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
if (pgd_reg != -1) {
/* pgd is in pgd_reg */
- UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
+ if (cpu_has_ldpte)
+ UASM_i_MFC0(p, ptr, C0_PWBASE);
+ else
+ UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
} else {
#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
/*
@@ -879,7 +888,7 @@ build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
}
}
if (!did_vmalloc_branch) {
- if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
+ if (single_insn_swpd) {
uasm_il_b(p, r, label_vmalloc_done);
uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
} else {
@@ -1007,39 +1016,40 @@ static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
{
- /*
- * 64bit address support (36bit on a 32bit CPU) in a 32bit
- * Kernel is a special case. Only a few CPUs use it.
- */
- if (config_enabled(CONFIG_PHYS_ADDR_T_64BIT) && !cpu_has_64bits) {
- int pte_off_even = sizeof(pte_t) / 2;
- int pte_off_odd = pte_off_even + sizeof(pte_t);
-#ifdef CONFIG_XPA
- const int scratch = 1; /* Our extra working register */
+ int pte_off_even = 0;
+ int pte_off_odd = sizeof(pte_t);
- uasm_i_addu(p, scratch, 0, ptep);
+#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
+ /* The low 32 bits of EntryLo is stored in pte_high */
+ pte_off_even += offsetof(pte_t, pte_high);
+ pte_off_odd += offsetof(pte_t, pte_high);
#endif
+
+ if (IS_ENABLED(CONFIG_XPA)) {
uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
- uasm_i_lw(p, ptep, pte_off_odd, ptep); /* odd pte */
UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
- UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
- UASM_i_MTC0(p, ptep, C0_ENTRYLO1);
-#ifdef CONFIG_XPA
- uasm_i_lw(p, tmp, 0, scratch);
- uasm_i_lw(p, ptep, sizeof(pte_t), scratch);
- uasm_i_lui(p, scratch, 0xff);
- uasm_i_ori(p, scratch, scratch, 0xffff);
- uasm_i_and(p, tmp, scratch, tmp);
- uasm_i_and(p, ptep, scratch, ptep);
- uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
- uasm_i_mthc0(p, ptep, C0_ENTRYLO1);
-#endif
+
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, 0, ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
+ }
+
+ uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
+ UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
+ UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
+
+ if (cpu_has_xpa && !mips_xpa_disabled) {
+ uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
+ uasm_i_ext(p, tmp, tmp, 0, 24);
+ uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
+ }
return;
}
- UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
- UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
+ UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
+ UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
if (r45k_bvahwbug())
build_tlb_probe_entry(p);
build_convert_pte_to_entrylo(p, tmp);
@@ -1421,6 +1431,108 @@ static void build_r4000_tlb_refill_handler(void)
dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
}
+static void setup_pw(void)
+{
+ unsigned long pgd_i, pgd_w;
+#ifndef __PAGETABLE_PMD_FOLDED
+ unsigned long pmd_i, pmd_w;
+#endif
+ unsigned long pt_i, pt_w;
+ unsigned long pte_i, pte_w;
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ unsigned long psn;
+
+ psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */
+#endif
+ pgd_i = PGDIR_SHIFT; /* 1st level PGD */
+#ifndef __PAGETABLE_PMD_FOLDED
+ pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
+
+ pmd_i = PMD_SHIFT; /* 2nd level PMD */
+ pmd_w = PMD_SHIFT - PAGE_SHIFT;
+#else
+ pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
+#endif
+
+ pt_i = PAGE_SHIFT; /* 3rd level PTE */
+ pt_w = PAGE_SHIFT - 3;
+
+ pte_i = ilog2(_PAGE_GLOBAL);
+ pte_w = 0;
+
+#ifndef __PAGETABLE_PMD_FOLDED
+ write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
+ write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
+#else
+ write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
+ write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
+#endif
+
+#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
+ write_c0_pwctl(1 << 6 | psn);
+#endif
+ write_c0_kpgd(swapper_pg_dir);
+ kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
+}
+
+static void build_loongson3_tlb_refill_handler(void)
+{
+ u32 *p = tlb_handler;
+ struct uasm_label *l = labels;
+ struct uasm_reloc *r = relocs;
+
+ memset(labels, 0, sizeof(labels));
+ memset(relocs, 0, sizeof(relocs));
+ memset(tlb_handler, 0, sizeof(tlb_handler));
+
+ if (check_for_high_segbits) {
+ uasm_i_dmfc0(&p, K0, C0_BADVADDR);
+ uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
+ uasm_il_beqz(&p, &r, K1, label_vmalloc);
+ uasm_i_nop(&p);
+
+ uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
+ uasm_i_nop(&p);
+ uasm_l_vmalloc(&l, p);
+ }
+
+ uasm_i_dmfc0(&p, K1, C0_PGD);
+
+ uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
+#ifndef __PAGETABLE_PMD_FOLDED
+ uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
+#endif
+ uasm_i_ldpte(&p, K1, 0); /* even */
+ uasm_i_ldpte(&p, K1, 1); /* odd */
+ uasm_i_tlbwr(&p);
+
+ /* restore page mask */
+ if (PM_DEFAULT_MASK >> 16) {
+ uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
+ uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
+ uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ } else if (PM_DEFAULT_MASK) {
+ uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
+ uasm_i_mtc0(&p, K0, C0_PAGEMASK);
+ } else {
+ uasm_i_mtc0(&p, 0, C0_PAGEMASK);
+ }
+
+ uasm_i_eret(&p);
+
+ if (check_for_high_segbits) {
+ uasm_l_large_segbits_fault(&l, p);
+ UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
+ uasm_i_jr(&p, K1);
+ uasm_i_nop(&p);
+ }
+
+ uasm_resolve_relocs(relocs, labels);
+ memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
+ local_flush_icache_range(ebase + 0x80, ebase + 0x100);
+ dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
+}
+
extern u32 handle_tlbl[], handle_tlbl_end[];
extern u32 handle_tlbs[], handle_tlbs_end[];
extern u32 handle_tlbm[], handle_tlbm_end[];
@@ -1468,7 +1580,10 @@ static void build_setup_pgd(void)
} else {
/* PGD in c0_KScratch */
uasm_i_jr(&p, 31);
- UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
+ if (cpu_has_ldpte)
+ UASM_i_MTC0(&p, a0, C0_PWBASE);
+ else
+ UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
}
#else
#ifdef CONFIG_SMP
@@ -1523,19 +1638,19 @@ iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
static void
iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
- unsigned int mode)
+ unsigned int mode, unsigned int scratch)
{
-#ifdef CONFIG_PHYS_ADDR_T_64BIT
unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
+ unsigned int swmode = mode & ~hwmode;
- if (!cpu_has_64bits) {
- const int scratch = 1; /* Our extra working register */
-
- uasm_i_lui(p, scratch, (mode >> 16));
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
+ uasm_i_lui(p, scratch, swmode >> 16);
uasm_i_or(p, pte, pte, scratch);
- } else
-#endif
- uasm_i_ori(p, pte, pte, mode);
+ BUG_ON(swmode & 0xffff);
+ } else {
+ uasm_i_ori(p, pte, pte, mode);
+ }
+
#ifdef CONFIG_SMP
# ifdef CONFIG_PHYS_ADDR_T_64BIT
if (cpu_has_64bits)
@@ -1554,6 +1669,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
/* no uasm_i_nop needed */
uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
+ BUG_ON(hwmode & ~0xffff);
uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
/* no uasm_i_nop needed */
@@ -1575,6 +1691,7 @@ iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
if (!cpu_has_64bits) {
uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_ori(p, pte, pte, hwmode);
+ BUG_ON(hwmode & ~0xffff);
uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
uasm_i_lw(p, pte, 0, ptr);
}
@@ -1615,9 +1732,8 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
cur = t;
}
uasm_i_andi(p, t, cur,
- (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT);
- uasm_i_xori(p, t, t,
- (_PAGE_PRESENT | _PAGE_READ) >> _PAGE_PRESENT_SHIFT);
+ (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
+ uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
uasm_il_bnez(p, r, t, lid);
if (pte == t)
/* You lose the SMP race :-(*/
@@ -1628,11 +1744,11 @@ build_pte_present(u32 **p, struct uasm_reloc **r,
/* Make PTE valid, store result in PTR. */
static void
build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr, unsigned int scratch)
{
unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
- iPTE_SW(p, r, pte, ptr, mode);
+ iPTE_SW(p, r, pte, ptr, mode, scratch);
}
/*
@@ -1668,12 +1784,12 @@ build_pte_writable(u32 **p, struct uasm_reloc **r,
*/
static void
build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
- unsigned int ptr)
+ unsigned int ptr, unsigned int scratch)
{
unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
| _PAGE_DIRTY);
- iPTE_SW(p, r, pte, ptr, mode);
+ iPTE_SW(p, r, pte, ptr, mode, scratch);
}
/*
@@ -1778,7 +1894,7 @@ static void build_r3000_tlb_load_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
uasm_i_nop(&p); /* load delay */
- build_make_valid(&p, &r, K0, K1);
+ build_make_valid(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
uasm_l_nopage_tlbl(&l, p);
@@ -1809,7 +1925,7 @@ static void build_r3000_tlb_store_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1);
+ build_make_write(&p, &r, K0, K1, -1);
build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
uasm_l_nopage_tlbs(&l, p);
@@ -1840,7 +1956,7 @@ static void build_r3000_tlb_modify_handler(void)
build_r3000_tlbchange_handler_head(&p, K0, K1);
build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
uasm_i_nop(&p); /* load delay */
- build_make_write(&p, &r, K0, K1);
+ build_make_write(&p, &r, K0, K1, -1);
build_r3000_pte_reload_tlbwi(&p, K0, K1);
uasm_l_nopage_tlbm(&l, p);
@@ -2008,7 +2124,7 @@ static void build_r4000_tlb_load_handler(void)
}
uasm_l_tlbl_goaround1(&l, p);
}
- build_make_valid(&p, &r, wr.r1, wr.r2);
+ build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2122,7 +2238,7 @@ static void build_r4000_tlb_store_handler(void)
build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
- build_make_write(&p, &r, wr.r1, wr.r2);
+ build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2178,7 +2294,7 @@ static void build_r4000_tlb_modify_handler(void)
if (m4kc_tlbp_war())
build_tlb_probe_entry(&p);
/* Present and writable bits set, set accessed and dirty bits. */
- build_make_write(&p, &r, wr.r1, wr.r2);
+ build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
@@ -2245,8 +2361,9 @@ static void print_htw_config(void)
(config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
config = read_c0_pwsize();
- pr_debug("PWSize (0x%0*lx): GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
+ pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
field, config,
+ (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
(config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
(config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
(config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
@@ -2254,9 +2371,12 @@ static void print_htw_config(void)
(config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
pwctl = read_c0_pwctl();
- pr_debug("PWCtl (0x%x): PWEn: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
+ pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
pwctl,
(pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
+ (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
+ (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
+ (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
(pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
(pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
(pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
@@ -2311,17 +2431,25 @@ static void config_htw_params(void)
if (CONFIG_PGTABLE_LEVELS >= 3)
pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
- /* If XPA has been enabled, PTEs are 64-bit in size. */
- if (config_enabled(CONFIG_64BITS) || (read_c0_pagegrain() & PG_ELPA))
- pwsize |= 1;
+ /* Set pointer size to size of directory pointers */
+ if (IS_ENABLED(CONFIG_64BIT))
+ pwsize |= MIPS_PWSIZE_PS_MASK;
+ /* PTEs may be multiple pointers long (e.g. with XPA) */
+ pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
+ & MIPS_PWSIZE_PTEW_MASK;
write_c0_pwsize(pwsize);
/* Make sure everything is set before we enable the HTW */
back_to_back_c0_hazard();
- /* Enable HTW and disable the rest of the pwctl fields */
+ /*
+ * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
+ * the pwctl fields.
+ */
config = 1 << MIPS_PWCTL_PWEN_SHIFT;
+ if (IS_ENABLED(CONFIG_64BIT))
+ config |= MIPS_PWCTL_XU_MASK;
write_c0_pwctl(config);
pr_info("Hardware Page Table Walker enabled\n");
@@ -2394,6 +2522,9 @@ void build_tlb_refill_handler(void)
*/
static int run_once = 0;
+ if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
+ panic("Kernels supporting XPA currently require CPUs with RIXI");
+
output_pgtable_bits_defines();
check_pabits();
@@ -2437,13 +2568,18 @@ void build_tlb_refill_handler(void)
break;
default:
+ if (cpu_has_ldpte)
+ setup_pw();
+
if (!run_once) {
scratch_reg = allocate_kscratch();
build_setup_pgd();
build_r4000_tlb_load_handler();
build_r4000_tlb_store_handler();
build_r4000_tlb_modify_handler();
- if (!cpu_has_local_ebase)
+ if (cpu_has_ldpte)
+ build_loongson3_tlb_refill_handler();
+ else if (!cpu_has_local_ebase)
build_r4000_tlb_refill_handler();
flush_tlb_handlers();
run_once++;
diff --git a/arch/mips/mm/uasm-micromips.c b/arch/mips/mm/uasm-micromips.c
index d78178daea4b..277cf52d80e1 100644
--- a/arch/mips/mm/uasm-micromips.c
+++ b/arch/mips/mm/uasm-micromips.c
@@ -53,8 +53,13 @@ static struct insn insn_table_MM[] = {
{ insn_bltzl, 0, 0 },
{ insn_bne, M(mm_bne32_op, 0, 0, 0, 0, 0), RT | RS | BIMM },
{ insn_cache, M(mm_pool32b_op, 0, 0, mm_cache_func, 0, 0), RT | RS | SIMM },
+ { insn_cfc1, M(mm_pool32f_op, 0, 0, 0, mm_cfc1_op, mm_32f_73_op), RT | RS },
+ { insn_cfcmsa, M(mm_pool32s_op, 0, msa_cfc_op, 0, 0, mm_32s_elm_op), RD | RE },
+ { insn_ctc1, M(mm_pool32f_op, 0, 0, 0, mm_ctc1_op, mm_32f_73_op), RT | RS },
+ { insn_ctcmsa, M(mm_pool32s_op, 0, msa_ctc_op, 0, 0, mm_32s_elm_op), RD | RE },
{ insn_daddu, 0, 0 },
{ insn_daddiu, 0, 0 },
+ { insn_di, M(mm_pool32a_op, 0, 0, 0, mm_di_op, mm_pool32axf_op), RS },
{ insn_divu, M(mm_pool32a_op, 0, 0, 0, mm_divu_op, mm_pool32axf_op), RT | RS },
{ insn_dmfc0, 0, 0 },
{ insn_dmtc0, 0, 0 },
@@ -84,6 +89,8 @@ static struct insn insn_table_MM[] = {
{ insn_mfhi, M(mm_pool32a_op, 0, 0, 0, mm_mfhi32_op, mm_pool32axf_op), RS },
{ insn_mflo, M(mm_pool32a_op, 0, 0, 0, mm_mflo32_op, mm_pool32axf_op), RS },
{ insn_mtc0, M(mm_pool32a_op, 0, 0, 0, mm_mtc0_op, mm_pool32axf_op), RT | RS | RD },
+ { insn_mthi, M(mm_pool32a_op, 0, 0, 0, mm_mthi32_op, mm_pool32axf_op), RS },
+ { insn_mtlo, M(mm_pool32a_op, 0, 0, 0, mm_mtlo32_op, mm_pool32axf_op), RS },
{ insn_mul, M(mm_pool32a_op, 0, 0, 0, 0, mm_mul_op), RT | RS | RD },
{ insn_or, M(mm_pool32a_op, 0, 0, 0, 0, mm_or32_op), RT | RS | RD },
{ insn_ori, M(mm_ori32_op, 0, 0, 0, 0, 0), RT | RS | UIMM },
@@ -166,13 +173,15 @@ static void build_insn(u32 **buf, enum opcode opc, ...)
op = ip->match;
va_start(ap, opc);
if (ip->fields & RS) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rt(va_arg(ap, u32));
else
op |= build_rs(va_arg(ap, u32));
}
if (ip->fields & RT) {
- if (opc == insn_mfc0 || opc == insn_mtc0)
+ if (opc == insn_mfc0 || opc == insn_mtc0 ||
+ opc == insn_cfc1 || opc == insn_ctc1)
op |= build_rs(va_arg(ap, u32));
else
op |= build_rt(va_arg(ap, u32));
diff --git a/arch/mips/mm/uasm-mips.c b/arch/mips/mm/uasm-mips.c
index b4a837893562..763d3f1edb8a 100644
--- a/arch/mips/mm/uasm-mips.c
+++ b/arch/mips/mm/uasm-mips.c
@@ -65,11 +65,16 @@ static struct insn insn_table[] = {
#ifndef CONFIG_CPU_MIPSR6
{ insn_cache, M(cache_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
#else
- { insn_cache, M6(cache_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
+ { insn_cache, M6(spec3_op, 0, 0, 0, cache6_op), RS | RT | SIMM9 },
#endif
+ { insn_cfc1, M(cop1_op, cfc_op, 0, 0, 0, 0), RT | RD },
+ { insn_cfcmsa, M(msa_op, 0, msa_cfc_op, 0, 0, msa_elm_op), RD | RE },
+ { insn_ctc1, M(cop1_op, ctc_op, 0, 0, 0, 0), RT | RD },
+ { insn_ctcmsa, M(msa_op, 0, msa_ctc_op, 0, 0, msa_elm_op), RD | RE },
{ insn_daddiu, M(daddiu_op, 0, 0, 0, 0, 0), RS | RT | SIMM },
{ insn_daddu, M(spec_op, 0, 0, 0, 0, daddu_op), RS | RT | RD },
{ insn_dinsm, M(spec3_op, 0, 0, 0, 0, dinsm_op), RS | RT | RD | RE },
+ { insn_di, M(cop0_op, mfmc0_op, 0, 12, 0, 0), RT },
{ insn_dins, M(spec3_op, 0, 0, 0, 0, dins_op), RS | RT | RD | RE },
{ insn_divu, M(spec_op, 0, 0, 0, 0, divu_op), RS | RT },
{ insn_dmfc0, M(cop0_op, dmfc_op, 0, 0, 0, 0), RT | RD | SET},
@@ -114,7 +119,13 @@ static struct insn insn_table[] = {
{ insn_mflo, M(spec_op, 0, 0, 0, 0, mflo_op), RD },
{ insn_mtc0, M(cop0_op, mtc_op, 0, 0, 0, 0), RT | RD | SET},
{ insn_mthc0, M(cop0_op, mthc0_op, 0, 0, 0, 0), RT | RD | SET},
+ { insn_mthi, M(spec_op, 0, 0, 0, 0, mthi_op), RS },
+ { insn_mtlo, M(spec_op, 0, 0, 0, 0, mtlo_op), RS },
+#ifndef CONFIG_CPU_MIPSR6
{ insn_mul, M(spec2_op, 0, 0, 0, 0, mul_op), RS | RT | RD},
+#else
+ { insn_mul, M(spec_op, 0, 0, 0, mult_mul_op, mult_op), RS | RT | RD},
+#endif
{ insn_ori, M(ori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_or, M(spec_op, 0, 0, 0, 0, or_op), RS | RT | RD },
#ifndef CONFIG_CPU_MIPSR6
@@ -153,6 +164,8 @@ static struct insn insn_table[] = {
{ insn_xori, M(xori_op, 0, 0, 0, 0, 0), RS | RT | UIMM },
{ insn_xor, M(spec_op, 0, 0, 0, 0, xor_op), RS | RT | RD },
{ insn_yield, M(spec3_op, 0, 0, 0, 0, yield_op), RS | RD },
+ { insn_ldpte, M(lwc2_op, 0, 0, 0, ldpte_op, mult_op), RS | RD },
+ { insn_lddir, M(lwc2_op, 0, 0, 0, lddir_op, mult_op), RS | RT | RD },
{ insn_invalid, 0, 0 }
};
diff --git a/arch/mips/mm/uasm.c b/arch/mips/mm/uasm.c
index 319051c34343..a82970442b8a 100644
--- a/arch/mips/mm/uasm.c
+++ b/arch/mips/mm/uasm.c
@@ -49,17 +49,19 @@ enum opcode {
insn_invalid,
insn_addiu, insn_addu, insn_and, insn_andi, insn_bbit0, insn_bbit1,
insn_beq, insn_beql, insn_bgez, insn_bgezl, insn_bltz, insn_bltzl,
- insn_bne, insn_cache, insn_daddiu, insn_daddu, insn_dins, insn_dinsm,
- insn_divu, insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
+ insn_bne, insn_cache, insn_cfc1, insn_cfcmsa, insn_ctc1, insn_ctcmsa,
+ insn_daddiu, insn_daddu, insn_di, insn_dins, insn_dinsm, insn_divu,
+ insn_dmfc0, insn_dmtc0, insn_drotr, insn_drotr32, insn_dsll,
insn_dsll32, insn_dsra, insn_dsrl, insn_dsrl32, insn_dsubu, insn_eret,
insn_ext, insn_ins, insn_j, insn_jal, insn_jalr, insn_jr, insn_lb,
insn_ld, insn_ldx, insn_lh, insn_ll, insn_lld, insn_lui, insn_lw,
insn_lwx, insn_mfc0, insn_mfhc0, insn_mfhi, insn_mflo, insn_mtc0,
- insn_mthc0, insn_mul, insn_or, insn_ori, insn_pref, insn_rfe,
- insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll, insn_sllv, insn_slt,
- insn_sltiu, insn_sltu, insn_sra, insn_srl, insn_srlv, insn_subu,
- insn_sw, insn_sync, insn_syscall, insn_tlbp, insn_tlbr, insn_tlbwi,
- insn_tlbwr, insn_wait, insn_wsbh, insn_xor, insn_xori, insn_yield,
+ insn_mthc0, insn_mthi, insn_mtlo, insn_mul, insn_or, insn_ori,
+ insn_pref, insn_rfe, insn_rotr, insn_sc, insn_scd, insn_sd, insn_sll,
+ insn_sllv, insn_slt, insn_sltiu, insn_sltu, insn_sra, insn_srl,
+ insn_srlv, insn_subu, insn_sw, insn_sync, insn_syscall, insn_tlbp,
+ insn_tlbr, insn_tlbwi, insn_tlbwr, insn_wait, insn_wsbh, insn_xor,
+ insn_xori, insn_yield, insn_lddir, insn_ldpte,
};
struct insn {
@@ -267,10 +269,15 @@ I_u1s2(_bltz)
I_u1s2(_bltzl)
I_u1u2s3(_bne)
I_u2s3u1(_cache)
+I_u1u2(_cfc1)
+I_u2u1(_cfcmsa)
+I_u1u2(_ctc1)
+I_u2u1(_ctcmsa)
I_u1u2u3(_dmfc0)
I_u1u2u3(_dmtc0)
I_u2u1s3(_daddiu)
I_u3u1u2(_daddu)
+I_u1(_di);
I_u1u2(_divu)
I_u2u1u3(_dsll)
I_u2u1u3(_dsll32)
@@ -300,6 +307,8 @@ I_u1(_mfhi)
I_u1(_mflo)
I_u1u2u3(_mtc0)
I_u1u2u3(_mthc0)
+I_u1(_mthi)
+I_u1(_mtlo)
I_u3u1u2(_mul)
I_u2u1u3(_ori)
I_u3u1u2(_or)
@@ -335,6 +344,8 @@ I_u1u2s3(_bbit0);
I_u1u2s3(_bbit1);
I_u3u1u2(_lwx)
I_u3u1u2(_ldx)
+I_u1u2(_ldpte)
+I_u2u1u3(_lddir)
#ifdef CONFIG_CPU_CAVIUM_OCTEON
#include <asm/octeon/octeon.h>
@@ -367,11 +378,7 @@ UASM_EXPORT_SYMBOL(ISAFUNC(uasm_build_label));
int ISAFUNC(uasm_in_compat_space_p)(long addr)
{
/* Is this address in 32bit compat space? */
-#ifdef CONFIG_64BIT
- return (((addr) & 0xffffffff00000000L) == 0xffffffff00000000L);
-#else
- return 1;
-#endif
+ return addr == (int)addr;
}
UASM_EXPORT_SYMBOL(ISAFUNC(uasm_in_compat_space_p));
diff --git a/arch/mips/mti-malta/malta-dtshim.c b/arch/mips/mti-malta/malta-dtshim.c
index f7133efc5843..151f4882ec8a 100644
--- a/arch/mips/mti-malta/malta-dtshim.c
+++ b/arch/mips/mti-malta/malta-dtshim.c
@@ -31,7 +31,7 @@ static unsigned __init gen_fdt_mem_array(__be32 *mem_array, unsigned long size)
entries = 1;
mem_array[0] = cpu_to_be32(PHYS_OFFSET);
- if (config_enabled(CONFIG_EVA)) {
+ if (IS_ENABLED(CONFIG_EVA)) {
/*
* The current Malta EVA configuration is "special" in that it
* always makes use of addresses in the upper half of the 32 bit
@@ -82,7 +82,7 @@ static void __init append_memory(void *fdt, int root_off)
physical_memsize = 32 << 20;
}
- if (config_enabled(CONFIG_CPU_BIG_ENDIAN)) {
+ if (IS_ENABLED(CONFIG_CPU_BIG_ENDIAN)) {
/*
* SOC-it swaps, or perhaps doesn't swap, when DMA'ing
* the last word of physical memory.
diff --git a/arch/mips/mti-malta/malta-memory.c b/arch/mips/mti-malta/malta-memory.c
index d5f8dae6a797..a47556723b85 100644
--- a/arch/mips/mti-malta/malta-memory.c
+++ b/arch/mips/mti-malta/malta-memory.c
@@ -32,7 +32,7 @@ static void free_init_pages_eva_malta(void *begin, void *end)
void __init fw_meminit(void)
{
- bool eva = config_enabled(CONFIG_EVA);
+ bool eva = IS_ENABLED(CONFIG_EVA);
free_init_pages_eva = eva ? free_init_pages_eva_malta : NULL;
}
diff --git a/arch/mips/mti-malta/malta-setup.c b/arch/mips/mti-malta/malta-setup.c
index 4740c82fb97a..ec5b21678fad 100644
--- a/arch/mips/mti-malta/malta-setup.c
+++ b/arch/mips/mti-malta/malta-setup.c
@@ -248,15 +248,20 @@ static void __init bonito_quirks_setup(void)
#endif
}
+void __init *plat_get_fdt(void)
+{
+ return (void *)__dtb_start;
+}
+
void __init plat_mem_setup(void)
{
unsigned int i;
- void *fdt = __dtb_start;
+ void *fdt = plat_get_fdt();
fdt = malta_dt_shim(fdt);
__dt_setup_arch(fdt);
- if (config_enabled(CONFIG_EVA))
+ if (IS_ENABLED(CONFIG_EVA))
/* EVA has already been configured in mach-malta/kernel-init.h */
pr_info("Enhanced Virtual Addressing (EVA) activated\n");
diff --git a/arch/mips/mti-malta/malta-time.c b/arch/mips/mti-malta/malta-time.c
index b7bf721eabf5..7407da04f8d6 100644
--- a/arch/mips/mti-malta/malta-time.c
+++ b/arch/mips/mti-malta/malta-time.c
@@ -21,6 +21,7 @@
#include <linux/i8253.h>
#include <linux/init.h>
#include <linux/kernel_stat.h>
+#include <linux/math64.h>
#include <linux/sched.h>
#include <linux/spinlock.h>
#include <linux/interrupt.h>
@@ -72,6 +73,8 @@ static void __init estimate_frequencies(void)
{
unsigned long flags;
unsigned int count, start;
+ unsigned char secs1, secs2, ctrl;
+ int secs;
cycle_t giccount = 0, gicstart = 0;
#if defined(CONFIG_KVM_GUEST) && CONFIG_KVM_GUEST_TIMER_FREQ
@@ -81,32 +84,51 @@ static void __init estimate_frequencies(void)
local_irq_save(flags);
- /* Start counter exactly on falling edge of update flag. */
+ if (gic_present)
+ gic_start_count();
+
+ /*
+ * Read counters exactly on rising edge of update flag.
+ * This helps get an accurate reading under virtualisation.
+ */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
-
- /* Initialize counters. */
start = read_c0_count();
- if (gic_present) {
- gic_start_count();
+ if (gic_present)
gicstart = gic_read_count();
- }
- /* Read counter exactly on falling edge of update flag. */
+ /* Wait for falling edge before reading RTC. */
while (CMOS_READ(RTC_REG_A) & RTC_UIP);
- while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
+ secs1 = CMOS_READ(RTC_SECONDS);
+ /* Read counters again exactly on rising edge of update flag. */
+ while (!(CMOS_READ(RTC_REG_A) & RTC_UIP));
count = read_c0_count();
if (gic_present)
giccount = gic_read_count();
+ /* Wait for falling edge before reading RTC again. */
+ while (CMOS_READ(RTC_REG_A) & RTC_UIP);
+ secs2 = CMOS_READ(RTC_SECONDS);
+
+ ctrl = CMOS_READ(RTC_CONTROL);
+
local_irq_restore(flags);
+ if (!(ctrl & RTC_DM_BINARY) || RTC_ALWAYS_BCD) {
+ secs1 = bcd2bin(secs1);
+ secs2 = bcd2bin(secs2);
+ }
+ secs = secs2 - secs1;
+ if (secs < 1)
+ secs += 60;
+
count -= start;
+ count /= secs;
mips_hpt_frequency = count;
if (gic_present) {
- giccount -= gicstart;
+ giccount = div_u64(giccount - gicstart, secs);
gic_frequency = giccount;
}
}
diff --git a/arch/mips/mti-sead3/sead3-setup.c b/arch/mips/mti-sead3/sead3-setup.c
index e43f4801a245..edfcaf06680d 100644
--- a/arch/mips/mti-sead3/sead3-setup.c
+++ b/arch/mips/mti-sead3/sead3-setup.c
@@ -8,7 +8,6 @@
*/
#include <linux/init.h>
#include <linux/libfdt.h>
-#include <linux/of_platform.h>
#include <linux/of_fdt.h>
#include <asm/prom.h>
@@ -83,6 +82,11 @@ static void __init parse_memsize_param(void)
}
}
+void __init *plat_get_fdt(void)
+{
+ return (void *)__dtb_start;
+}
+
void __init plat_mem_setup(void)
{
/* allow command line/bootloader env to override memory size in DT */
@@ -102,10 +106,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init customize_machine(void)
-{
- of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL);
- return 0;
-}
-arch_initcall(customize_machine);
diff --git a/arch/mips/net/bpf_jit.c b/arch/mips/net/bpf_jit.c
index 1a8c96035716..39e7b472f0d8 100644
--- a/arch/mips/net/bpf_jit.c
+++ b/arch/mips/net/bpf_jit.c
@@ -426,7 +426,7 @@ static inline void emit_load_ptr(unsigned int dst, unsigned int src,
static inline void emit_load_func(unsigned int reg, ptr imm,
struct jit_ctx *ctx)
{
- if (config_enabled(CONFIG_64BIT)) {
+ if (IS_ENABLED(CONFIG_64BIT)) {
/* At this point imm is always 64-bit */
emit_load_imm(r_tmp, (u64)imm >> 32, ctx);
emit_dsll(r_tmp_imm, r_tmp, 16, ctx); /* left shift by 16 */
@@ -516,7 +516,7 @@ static inline void emit_jr(unsigned int reg, struct jit_ctx *ctx)
static inline u16 align_sp(unsigned int num)
{
/* Double word alignment for 32-bit, quadword for 64-bit */
- unsigned int align = config_enabled(CONFIG_64BIT) ? 16 : 8;
+ unsigned int align = IS_ENABLED(CONFIG_64BIT) ? 16 : 8;
num = (num + (align - 1)) & -align;
return num;
}
@@ -1199,7 +1199,7 @@ void bpf_jit_compile(struct bpf_prog *fp)
memset(&ctx, 0, sizeof(ctx));
- ctx.offsets = kcalloc(fp->len, sizeof(*ctx.offsets), GFP_KERNEL);
+ ctx.offsets = kcalloc(fp->len + 1, sizeof(*ctx.offsets), GFP_KERNEL);
if (ctx.offsets == NULL)
return;
diff --git a/arch/mips/netlogic/common/nlm-dma.c b/arch/mips/netlogic/common/nlm-dma.c
index 3758715d4ab6..0630693bec2a 100644
--- a/arch/mips/netlogic/common/nlm-dma.c
+++ b/arch/mips/netlogic/common/nlm-dma.c
@@ -45,7 +45,7 @@
static char *nlm_swiotlb;
static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
- dma_addr_t *dma_handle, gfp_t gfp, struct dma_attrs *attrs)
+ dma_addr_t *dma_handle, gfp_t gfp, unsigned long attrs)
{
/* ignore region specifiers */
gfp &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM);
@@ -62,7 +62,7 @@ static void *nlm_dma_alloc_coherent(struct device *dev, size_t size,
}
static void nlm_dma_free_coherent(struct device *dev, size_t size,
- void *vaddr, dma_addr_t dma_handle, struct dma_attrs *attrs)
+ void *vaddr, dma_addr_t dma_handle, unsigned long attrs)
{
swiotlb_free_coherent(dev, size, vaddr, dma_handle);
}
diff --git a/arch/mips/netlogic/common/reset.S b/arch/mips/netlogic/common/reset.S
index edbab9b8691f..c474981a6c0d 100644
--- a/arch/mips/netlogic/common/reset.S
+++ b/arch/mips/netlogic/common/reset.S
@@ -50,7 +50,6 @@
#include <asm/netlogic/xlp-hal/sys.h>
#include <asm/netlogic/xlp-hal/cpucontrol.h>
-#define CP0_EBASE $15
#define SYS_CPU_COHERENT_BASE CKSEG1ADDR(XLP_DEFAULT_IO_BASE) + \
XLP_IO_SYS_OFFSET(0) + XLP_IO_PCI_HDRSZ + \
SYS_CPU_NONCOHERENT_MODE * 4
@@ -92,7 +91,7 @@
* registers. On XLPII CPUs, usual cache instructions work.
*/
.macro xlp_flush_l1_dcache
- mfc0 t0, CP0_EBASE, 0
+ mfc0 t0, CP0_PRID
andi t0, t0, PRID_IMP_MASK
slt t1, t0, 0x1200
beqz t1, 15f
@@ -171,7 +170,7 @@ FEXPORT(nlm_reset_entry)
nop
1: /* Entry point on core wakeup */
- mfc0 t0, CP0_EBASE, 0 /* processor ID */
+ mfc0 t0, CP0_PRID /* processor ID */
andi t0, PRID_IMP_MASK
li t1, 0x1500 /* XLP 9xx */
beq t0, t1, 2f /* does not need to set coherent */
@@ -182,8 +181,8 @@ FEXPORT(nlm_reset_entry)
nop
/* set bit in SYS coherent register for the core */
- mfc0 t0, CP0_EBASE, 1
- mfc0 t1, CP0_EBASE, 1
+ mfc0 t0, CP0_EBASE
+ mfc0 t1, CP0_EBASE
srl t1, 5
andi t1, 0x3 /* t1 <- node */
li t2, 0x40000
@@ -232,7 +231,7 @@ EXPORT(nlm_boot_siblings)
* NOTE: All GPR contents are lost after the mtcr above!
*/
- mfc0 v0, CP0_EBASE, 1
+ mfc0 v0, CP0_EBASE
andi v0, 0x3ff /* v0 <- node/core */
/*
diff --git a/arch/mips/netlogic/common/smpboot.S b/arch/mips/netlogic/common/smpboot.S
index 805355b0bd05..f0cc4c9de2bb 100644
--- a/arch/mips/netlogic/common/smpboot.S
+++ b/arch/mips/netlogic/common/smpboot.S
@@ -48,8 +48,6 @@
#include <asm/netlogic/xlp-hal/sys.h>
#include <asm/netlogic/xlp-hal/cpucontrol.h>
-#define CP0_EBASE $15
-
.set noreorder
.set noat
.set arch=xlr /* for mfcr/mtcr, XLR is sufficient */
@@ -86,7 +84,7 @@ NESTED(nlm_boot_secondary_cpus, 16, sp)
PTR_L gp, 0(t1)
/* a0 has the processor id */
- mfc0 a0, CP0_EBASE, 1
+ mfc0 a0, CP0_EBASE
andi a0, 0x3ff /* a0 <- node/core */
PTR_LA t0, nlm_early_init_secondary
jalr t0
diff --git a/arch/mips/netlogic/xlp/nlm_hal.c b/arch/mips/netlogic/xlp/nlm_hal.c
index 80ec929747c3..25ee69489e5e 100644
--- a/arch/mips/netlogic/xlp/nlm_hal.c
+++ b/arch/mips/netlogic/xlp/nlm_hal.c
@@ -58,7 +58,7 @@ void nlm_node_init(int node)
nodep->coremask = 1; /* node 0, boot cpu */
nodep->sysbase = nlm_get_sys_regbase(node);
nodep->picbase = nlm_get_pic_regbase(node);
- nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
if (cpu_is_xlp9xx())
nodep->socbus = xlp9xx_get_socbus(node);
else
diff --git a/arch/mips/netlogic/xlr/setup.c b/arch/mips/netlogic/xlr/setup.c
index d118b9aa7647..72ceddc9a03f 100644
--- a/arch/mips/netlogic/xlr/setup.c
+++ b/arch/mips/netlogic/xlr/setup.c
@@ -168,7 +168,7 @@ static void nlm_init_node(void)
nodep = nlm_current_node();
nodep->picbase = nlm_mmio_base(NETLOGIC_IO_PIC_OFFSET);
- nodep->ebase = read_c0_ebase() & (~((1 << 12) - 1));
+ nodep->ebase = read_c0_ebase() & MIPS_EBASE_BASE;
spin_lock_init(&nodep->piclock);
}
diff --git a/arch/mips/oprofile/common.c b/arch/mips/oprofile/common.c
index 3c9ec3ddca84..2f33992f6dff 100644
--- a/arch/mips/oprofile/common.c
+++ b/arch/mips/oprofile/common.c
@@ -77,7 +77,7 @@ int __init oprofile_arch_init(struct oprofile_operations *ops)
struct op_mips_model *lmodel = NULL;
int res;
- switch (current_cpu_type()) {
+ switch (boot_cpu_type()) {
case CPU_5KC:
case CPU_M14KC:
case CPU_M14KEC:
diff --git a/arch/mips/oprofile/op_impl.h b/arch/mips/oprofile/op_impl.h
index 7c2da27ece04..a4e758a39af4 100644
--- a/arch/mips/oprofile/op_impl.h
+++ b/arch/mips/oprofile/op_impl.h
@@ -24,7 +24,7 @@ struct op_counter_config {
unsigned long unit_mask;
};
-/* Per-architecture configury and hooks. */
+/* Per-architecture configure and hooks. */
struct op_mips_model {
void (*reg_setup) (struct op_counter_config *);
void (*cpu_setup) (void *dummy);
diff --git a/arch/mips/oprofile/op_model_loongson3.c b/arch/mips/oprofile/op_model_loongson3.c
index 8bcf7fc40f0d..85f3ee4ab456 100644
--- a/arch/mips/oprofile/op_model_loongson3.c
+++ b/arch/mips/oprofile/op_model_loongson3.c
@@ -168,33 +168,26 @@ static int loongson3_perfcount_handler(void)
return handled;
}
-static int loongson3_cpu_callback(struct notifier_block *nfb,
- unsigned long action, void *hcpu)
+static int loongson3_starting_cpu(unsigned int cpu)
{
- switch (action) {
- case CPU_STARTING:
- case CPU_STARTING_FROZEN:
- write_c0_perflo1(reg.control1);
- write_c0_perflo2(reg.control2);
- break;
- case CPU_DYING:
- case CPU_DYING_FROZEN:
- write_c0_perflo1(0xc0000000);
- write_c0_perflo2(0x40000000);
- break;
- }
-
- return NOTIFY_OK;
+ write_c0_perflo1(reg.control1);
+ write_c0_perflo2(reg.control2);
+ return 0;
}
-static struct notifier_block loongson3_notifier_block = {
- .notifier_call = loongson3_cpu_callback
-};
+static int loongson3_dying_cpu(unsigned int cpu)
+{
+ write_c0_perflo1(0xc0000000);
+ write_c0_perflo2(0x40000000);
+ return 0;
+}
static int __init loongson3_init(void)
{
on_each_cpu(reset_counters, NULL, 1);
- register_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_setup_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING,
+ "AP_MIPS_OP_LOONGSON3_STARTING",
+ loongson3_starting_cpu, loongson3_dying_cpu);
save_perf_irq = perf_irq;
perf_irq = loongson3_perfcount_handler;
@@ -204,7 +197,7 @@ static int __init loongson3_init(void)
static void loongson3_exit(void)
{
on_each_cpu(reset_counters, NULL, 1);
- unregister_hotcpu_notifier(&loongson3_notifier_block);
+ cpuhp_remove_state_nocalls(CPUHP_AP_MIPS_OP_LOONGSON3_STARTING);
perf_irq = save_perf_irq;
}
diff --git a/arch/mips/oprofile/op_model_mipsxx.c b/arch/mips/oprofile/op_model_mipsxx.c
index 8f988a61b7a8..45cb27469fba 100644
--- a/arch/mips/oprofile/op_model_mipsxx.c
+++ b/arch/mips/oprofile/op_model_mipsxx.c
@@ -269,11 +269,9 @@ static int mipsxx_perfcount_handler(void)
return handled;
}
-#define M_CONFIG1_PC (1 << 4)
-
static inline int __n_counters(void)
{
- if (!(read_c0_config1() & M_CONFIG1_PC))
+ if (!cpu_has_perf)
return 0;
if (!(read_c0_perfctrl0() & M_PERFCTL_MORE))
return 1;
diff --git a/arch/mips/pci/fixup-lantiq.c b/arch/mips/pci/fixup-lantiq.c
index c2ce41ea61d7..2b5427d3f35c 100644
--- a/arch/mips/pci/fixup-lantiq.c
+++ b/arch/mips/pci/fixup-lantiq.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2012 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2012 John Crispin <john@phrozen.org>
*/
#include <linux/of_irq.h>
diff --git a/arch/mips/pci/ops-bridge.c b/arch/mips/pci/ops-bridge.c
index 438319465cb4..57e1463fcd02 100644
--- a/arch/mips/pci/ops-bridge.c
+++ b/arch/mips/pci/ops-bridge.c
@@ -33,9 +33,9 @@ static u32 emulate_ioc3_cfg(int where, int size)
* The Bridge ASIC supports both type 0 and type 1 access. Type 1 is
* not really documented, so right now I can't write code which uses it.
* Therefore we use type 0 accesses for now even though they won't work
- * correcly for PCI-to-PCI bridges.
+ * correctly for PCI-to-PCI bridges.
*
- * The function is complicated by the ultimate brokeness of the IOC3 chip
+ * The function is complicated by the ultimate brokenness of the IOC3 chip
* which is used in SGI systems. The IOC3 can only handle 32-bit PCI
* accesses and does only decode parts of it's address space.
*/
diff --git a/arch/mips/pci/ops-lantiq.c b/arch/mips/pci/ops-lantiq.c
index e5738ee26f4f..f51e10899cc2 100644
--- a/arch/mips/pci/ops-lantiq.c
+++ b/arch/mips/pci/ops-lantiq.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/types.h>
diff --git a/arch/mips/pci/pci-alchemy.c b/arch/mips/pci/pci-alchemy.c
index 28952637a862..c8994c156e2d 100644
--- a/arch/mips/pci/pci-alchemy.c
+++ b/arch/mips/pci/pci-alchemy.c
@@ -76,7 +76,7 @@ static void mod_wired_entry(int entry, unsigned long entrylo0,
unsigned long old_ctx;
/* Save old context and create impossible VPN2 value */
- old_ctx = read_c0_entryhi() & 0xff;
+ old_ctx = read_c0_entryhi() & MIPS_ENTRYHI_ASID;
old_pagemask = read_c0_pagemask();
write_c0_index(entry);
write_c0_pagemask(pagemask);
diff --git a/arch/mips/pci/pci-ip32.c b/arch/mips/pci/pci-ip32.c
index b1e061f7fdc7..7ae89d0c7099 100644
--- a/arch/mips/pci/pci-ip32.c
+++ b/arch/mips/pci/pci-ip32.c
@@ -116,7 +116,6 @@ static struct pci_controller mace_pci_controller = {
.pci_ops = &mace_pci_ops,
.mem_resource = &mace_pci_mem_resource,
.io_resource = &mace_pci_io_resource,
- .iommu = 0,
.mem_offset = MACE_PCI_MEM_OFFSET,
.io_offset = 0,
.io_map_base = CKSEG1ADDR(MACEPCI_LOW_IO),
diff --git a/arch/mips/pci/pci-lantiq.c b/arch/mips/pci/pci-lantiq.c
index 6a15dbd085aa..b9deab17ccf2 100644
--- a/arch/mips/pci/pci-lantiq.c
+++ b/arch/mips/pci/pci-lantiq.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#include <linux/types.h>
diff --git a/arch/mips/pci/pci-lantiq.h b/arch/mips/pci/pci-lantiq.h
index 66bf6cd6be3c..0cc71253a497 100644
--- a/arch/mips/pci/pci-lantiq.h
+++ b/arch/mips/pci/pci-lantiq.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2010 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2010 John Crispin <john@phrozen.org>
*/
#ifndef _LTQ_PCI_H__
diff --git a/arch/mips/pci/pci-mt7620.c b/arch/mips/pci/pci-mt7620.c
index 1ae932c2d78b..6ce816201699 100644
--- a/arch/mips/pci/pci-mt7620.c
+++ b/arch/mips/pci/pci-mt7620.c
@@ -2,7 +2,7 @@
* Ralink MT7620A SoC PCI support
*
* Copyright (C) 2007-2013 Bruce Chang (Mediatek)
- * Copyright (C) 2013-2016 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013-2016 John Crispin <john@phrozen.org>
*
* This program is free software; you can redistribute it and/or modify it
* under the terms of the GNU General Public License version 2 as published
diff --git a/arch/mips/pci/pci-rt2880.c b/arch/mips/pci/pci-rt2880.c
index a245cad4372a..f2a1050168d9 100644
--- a/arch/mips/pci/pci-rt2880.c
+++ b/arch/mips/pci/pci-rt2880.c
@@ -1,7 +1,7 @@
/*
* Ralink RT288x SoC PCI register definitions
*
- * Copyright (C) 2009 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2009 John Crispin <john@phrozen.org>
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
*
* Parts of this file are based on Ralink's 2.6.21 BSP
diff --git a/arch/mips/pci/pci.c b/arch/mips/pci/pci.c
index b8a0bf5766f2..b4c02f29663e 100644
--- a/arch/mips/pci/pci.c
+++ b/arch/mips/pci/pci.c
@@ -83,9 +83,6 @@ static void pcibios_scanbus(struct pci_controller *hose)
LIST_HEAD(resources);
struct pci_bus *bus;
- if (!hose->iommu)
- PCI_DMA_BUS_IS_PHYS = 1;
-
if (hose->get_busno && pci_has_flag(PCI_PROBE_ONLY))
next_busno = (*hose->get_busno)();
@@ -115,7 +112,14 @@ static void pcibios_scanbus(struct pci_controller *hose)
need_domain_info = 1;
}
- if (!pci_has_flag(PCI_PROBE_ONLY)) {
+ /*
+ * We insert PCI resources into the iomem_resource and
+ * ioport_resource trees in either pci_bus_claim_resources()
+ * or pci_bus_assign_resources().
+ */
+ if (pci_has_flag(PCI_PROBE_ONLY)) {
+ pci_bus_claim_resources(bus);
+ } else {
pci_bus_size_bridges(bus);
pci_bus_assign_resources(bus);
}
@@ -322,6 +326,16 @@ void pcibios_fixup_bus(struct pci_bus *bus)
EXPORT_SYMBOL(PCIBIOS_MIN_IO);
EXPORT_SYMBOL(PCIBIOS_MIN_MEM);
+void pci_resource_to_user(const struct pci_dev *dev, int bar,
+ const struct resource *rsrc, resource_size_t *start,
+ resource_size_t *end)
+{
+ phys_addr_t size = resource_size(rsrc);
+
+ *start = fixup_bigphys_addr(rsrc->start, size);
+ *end = rsrc->start + size;
+}
+
int pci_mmap_page_range(struct pci_dev *dev, struct vm_area_struct *vma,
enum pci_mmap_state mmap_state, int write_combine)
{
diff --git a/arch/mips/pic32/Kconfig b/arch/mips/pic32/Kconfig
index 1985971b9890..527d37da05ac 100644
--- a/arch/mips/pic32/Kconfig
+++ b/arch/mips/pic32/Kconfig
@@ -14,7 +14,7 @@ config PIC32MZDA
select SYS_HAS_EARLY_PRINTK
select SYS_SUPPORTS_32BIT_KERNEL
select SYS_SUPPORTS_LITTLE_ENDIAN
- select ARCH_REQUIRE_GPIOLIB
+ select GPIOLIB
select COMMON_CLK
select CLKDEV_LOOKUP
select LIBFDT
diff --git a/arch/mips/pic32/pic32mzda/init.c b/arch/mips/pic32/pic32mzda/init.c
index 775ff90a9962..51599710472b 100644
--- a/arch/mips/pic32/pic32mzda/init.c
+++ b/arch/mips/pic32/pic32mzda/init.c
@@ -33,8 +33,8 @@ static ulong get_fdtaddr(void)
{
ulong ftaddr = 0;
- if ((fw_arg0 == -2) && fw_arg1 && !fw_arg2 && !fw_arg3)
- return (ulong)fw_arg1;
+ if (fw_passed_dtb && !fw_arg2 && !fw_arg3)
+ return (ulong)fw_passed_dtb;
if (__dtb_start < __dtb_end)
ftaddr = (ulong)__dtb_start;
@@ -147,8 +147,7 @@ static int __init plat_of_setup(void)
panic("Device tree not present");
pic32_of_prepare_platform_data(pic32_auxdata_lookup);
- if (of_platform_populate(NULL, of_default_bus_match_table,
- pic32_auxdata_lookup, NULL))
+ if (of_platform_default_populate(NULL, pic32_auxdata_lookup, NULL))
panic("Failed to populate DT");
return 0;
diff --git a/arch/mips/pic32/pic32mzda/time.c b/arch/mips/pic32/pic32mzda/time.c
index ca6a62bb10db..62a0a78b6c64 100644
--- a/arch/mips/pic32/pic32mzda/time.c
+++ b/arch/mips/pic32/pic32mzda/time.c
@@ -11,13 +11,12 @@
* FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
* for more details.
*/
-#include <linux/clk.h>
#include <linux/clk-provider.h>
#include <linux/clocksource.h>
#include <linux/init.h>
+#include <linux/irqdomain.h>
#include <linux/of.h>
#include <linux/of_irq.h>
-#include <linux/irqdomain.h>
#include <asm/time.h>
@@ -58,16 +57,12 @@ unsigned int get_c0_compare_int(void)
void __init plat_time_init(void)
{
- struct clk *clk;
+ unsigned long rate = pic32_get_pbclk(7);
of_clk_init(NULL);
- clk = clk_get_sys("cpu_clk", NULL);
- if (IS_ERR(clk))
- panic("unable to get CPU clock, err=%ld", PTR_ERR(clk));
- clk_prepare_enable(clk);
- pr_info("CPU Clock: %ldMHz\n", clk_get_rate(clk) / 1000000);
- mips_hpt_frequency = clk_get_rate(clk) / 2;
+ pr_info("CPU Clock: %ldMHz\n", rate / 1000000);
+ mips_hpt_frequency = rate / 2;
clocksource_probe();
}
diff --git a/arch/mips/pistachio/init.c b/arch/mips/pistachio/init.c
index 96ba2cc9ad3e..1c91cad7988f 100644
--- a/arch/mips/pistachio/init.c
+++ b/arch/mips/pistachio/init.c
@@ -2,6 +2,7 @@
* Pistachio platform setup
*
* Copyright (C) 2014 Google, Inc.
+ * Copyright (C) 2016 Imagination Technologies
*
* This program is free software; you can redistribute it and/or modify it
* under the terms and conditions of the GNU General Public License,
@@ -9,10 +10,10 @@
*/
#include <linux/init.h>
+#include <linux/io.h>
#include <linux/kernel.h>
#include <linux/of_address.h>
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/cacheflush.h>
#include <asm/dma-coherence.h>
@@ -24,42 +25,50 @@
#include <asm/smp-ops.h>
#include <asm/traps.h>
+/*
+ * Core revision register decoding
+ * Bits 23 to 20: Major rev
+ * Bits 15 to 8: Minor rev
+ * Bits 7 to 0: Maintenance rev
+ */
+#define PISTACHIO_CORE_REV_REG 0xB81483D0
+#define PISTACHIO_CORE_REV_A1 0x00100006
+#define PISTACHIO_CORE_REV_B0 0x00100106
+
const char *get_system_type(void)
{
- return "IMG Pistachio SoC";
-}
+ u32 core_rev;
+ const char *sys_type;
-static void __init plat_setup_iocoherency(void)
-{
- /*
- * Kernel has been configured with software coherency
- * but we might choose to turn it off and use hardware
- * coherency instead.
- */
- if (mips_cm_numiocu() != 0) {
- /* Nothing special needs to be done to enable coherency */
- pr_info("CMP IOCU detected\n");
- hw_coherentio = 1;
- if (coherentio == 0)
- pr_info("Hardware DMA cache coherency disabled\n");
- else
- pr_info("Hardware DMA cache coherency enabled\n");
- } else {
- if (coherentio == 1)
- pr_info("Hardware DMA cache coherency unsupported, but enabled from command line!\n");
- else
- pr_info("Software DMA cache coherency enabled\n");
+ core_rev = __raw_readl((const void *)PISTACHIO_CORE_REV_REG);
+
+ switch (core_rev) {
+ case PISTACHIO_CORE_REV_B0:
+ sys_type = "IMG Pistachio SoC (B0)";
+ break;
+
+ case PISTACHIO_CORE_REV_A1:
+ sys_type = "IMG Pistachio SoC (A1)";
+ break;
+
+ default:
+ sys_type = "IMG Pistachio SoC";
+ break;
}
+
+ return sys_type;
}
-void __init plat_mem_setup(void)
+void __init *plat_get_fdt(void)
{
if (fw_arg0 != -2)
panic("Device-tree not present");
+ return (void *)fw_arg1;
+}
- __dt_setup_arch((void *)fw_arg1);
-
- plat_setup_iocoherency();
+void __init plat_mem_setup(void)
+{
+ __dt_setup_arch(plat_get_fdt());
}
#define DEFAULT_CPC_BASE_ADDR 0x1bde0000
@@ -109,6 +118,8 @@ void __init prom_init(void)
mips_cm_probe();
mips_cpc_probe();
register_cps_smp_ops();
+
+ pr_info("SoC Type: %s\n", get_system_type());
}
void __init prom_free_prom_memory(void)
@@ -122,15 +133,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);
diff --git a/arch/mips/pmcs-msp71xx/msp_setup.c b/arch/mips/pmcs-msp71xx/msp_setup.c
index 9d293b3e9130..a63b73610fd4 100644
--- a/arch/mips/pmcs-msp71xx/msp_setup.c
+++ b/arch/mips/pmcs-msp71xx/msp_setup.c
@@ -118,7 +118,7 @@ void msp_restart(char *command)
/* No chip-specific reset code, just jump to the ROM reset vector */
set_c0_status(ST0_BEV | ST0_ERL);
change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
- flush_cache_all();
+ __flush_cache_all();
write_c0_wired(0);
__asm__ __volatile__("jr\t%0"::"r"(0xbfc00000));
diff --git a/arch/mips/pnx833x/common/setup.c b/arch/mips/pnx833x/common/setup.c
index 99b4d94236cc..8a7443b2535e 100644
--- a/arch/mips/pnx833x/common/setup.c
+++ b/arch/mips/pnx833x/common/setup.c
@@ -38,9 +38,6 @@ extern void pnx833x_machine_power_off(void);
int __init plat_mem_setup(void)
{
- /* fake pci bus to avoid bounce buffers */
- PCI_DMA_BUS_IS_PHYS = 1;
-
/* set mips clock to 320MHz */
#if defined(CONFIG_SOC_PNX8335)
PNX8335_WRITEFIELD(0x17, CLOCK_PLL_CPU_CTL, FREQ);
diff --git a/arch/mips/ralink/Makefile b/arch/mips/ralink/Makefile
index 0d1795a0321e..fe3471533820 100644
--- a/arch/mips/ralink/Makefile
+++ b/arch/mips/ralink/Makefile
@@ -4,7 +4,7 @@
# Makefile for the Ralink common stuff
#
# Copyright (C) 2009-2011 Gabor Juhos <juhosg@openwrt.org>
-# Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+# Copyright (C) 2013 John Crispin <john@phrozen.org>
obj-y := prom.o of.o reset.o
diff --git a/arch/mips/ralink/bootrom.c b/arch/mips/ralink/bootrom.c
index 5403468394fb..e1fa5972a81d 100644
--- a/arch/mips/ralink/bootrom.c
+++ b/arch/mips/ralink/bootrom.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/debugfs.h>
diff --git a/arch/mips/ralink/cevt-rt3352.c b/arch/mips/ralink/cevt-rt3352.c
index e46f91f971c5..f24eee04e16a 100644
--- a/arch/mips/ralink/cevt-rt3352.c
+++ b/arch/mips/ralink/cevt-rt3352.c
@@ -3,7 +3,7 @@
* License. See the file "COPYING" in the main directory of this archive
* for more details.
*
- * Copyright (C) 2013 by John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 by John Crispin <john@phrozen.org>
*/
#include <linux/clockchips.h>
@@ -117,11 +117,13 @@ static int systick_set_oneshot(struct clock_event_device *evt)
return 0;
}
-static void __init ralink_systick_init(struct device_node *np)
+static int __init ralink_systick_init(struct device_node *np)
{
+ int ret;
+
systick.membase = of_iomap(np, 0);
if (!systick.membase)
- return;
+ return -ENXIO;
systick_irqaction.name = np->name;
systick.dev.name = np->name;
@@ -131,16 +133,21 @@ static void __init ralink_systick_init(struct device_node *np)
systick.dev.irq = irq_of_parse_and_map(np, 0);
if (!systick.dev.irq) {
pr_err("%s: request_irq failed", np->name);
- return;
+ return -EINVAL;
}
- clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
- SYSTICK_FREQ, 301, 16, clocksource_mmio_readl_up);
+ ret = clocksource_mmio_init(systick.membase + SYSTICK_COUNT, np->name,
+ SYSTICK_FREQ, 301, 16,
+ clocksource_mmio_readl_up);
+ if (ret)
+ return ret;
clockevents_register_device(&systick.dev);
pr_info("%s: running - mult: %d, shift: %d\n",
np->name, systick.dev.mult, systick.dev.shift);
+
+ return 0;
}
CLOCKSOURCE_OF_DECLARE(systick, "ralink,cevt-systick", ralink_systick_init);
diff --git a/arch/mips/ralink/clk.c b/arch/mips/ralink/clk.c
index 25c4a61779f1..ebaa7cc0e995 100644
--- a/arch/mips/ralink/clk.c
+++ b/arch/mips/ralink/clk.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/ralink/common.h b/arch/mips/ralink/common.h
index 8e7d8e618fb9..b8245d0940d6 100644
--- a/arch/mips/ralink/common.h
+++ b/arch/mips/ralink/common.h
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#ifndef _RALINK_COMMON_H__
diff --git a/arch/mips/ralink/ill_acc.c b/arch/mips/ralink/ill_acc.c
index e10d10b9e82a..765d5ba98fa2 100644
--- a/arch/mips/ralink/ill_acc.c
+++ b/arch/mips/ralink/ill_acc.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/interrupt.h>
diff --git a/arch/mips/ralink/irq-gic.c b/arch/mips/ralink/irq-gic.c
index 50d6c55ab1de..2058280450b5 100644
--- a/arch/mips/ralink/irq-gic.c
+++ b/arch/mips/ralink/irq-gic.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
*/
#include <linux/init.h>
diff --git a/arch/mips/ralink/irq.c b/arch/mips/ralink/irq.c
index 4cf77f358395..4911c1445f1a 100644
--- a/arch/mips/ralink/irq.c
+++ b/arch/mips/ralink/irq.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/io.h>
diff --git a/arch/mips/ralink/mt7620.c b/arch/mips/ralink/mt7620.c
index 0d3d1a97895f..3c7c9bf57bf3 100644
--- a/arch/mips/ralink/mt7620.c
+++ b/arch/mips/ralink/mt7620.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
@@ -175,7 +175,7 @@ static struct rt2880_pmx_func spi_cs1_grp_mt7628[] = {
};
static struct rt2880_pmx_func spis_grp_mt7628[] = {
- FUNC("pwm", 3, 14, 4),
+ FUNC("pwm_uart2", 3, 14, 4),
FUNC("util", 2, 14, 4),
FUNC("gpio", 1, 14, 4),
FUNC("spis", 0, 14, 4),
@@ -188,6 +188,41 @@ static struct rt2880_pmx_func gpio_grp_mt7628[] = {
FUNC("gpio", 0, 11, 1),
};
+static struct rt2880_pmx_func p4led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 30, 1),
+ FUNC("util", 2, 30, 1),
+ FUNC("gpio", 1, 30, 1),
+ FUNC("p4led_kn", 0, 30, 1),
+};
+
+static struct rt2880_pmx_func p3led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 31, 1),
+ FUNC("util", 2, 31, 1),
+ FUNC("gpio", 1, 31, 1),
+ FUNC("p3led_kn", 0, 31, 1),
+};
+
+static struct rt2880_pmx_func p2led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 32, 1),
+ FUNC("util", 2, 32, 1),
+ FUNC("gpio", 1, 32, 1),
+ FUNC("p2led_kn", 0, 32, 1),
+};
+
+static struct rt2880_pmx_func p1led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 33, 1),
+ FUNC("util", 2, 33, 1),
+ FUNC("gpio", 1, 33, 1),
+ FUNC("p1led_kn", 0, 33, 1),
+};
+
+static struct rt2880_pmx_func p0led_kn_grp_mt7628[] = {
+ FUNC("jtag", 3, 34, 1),
+ FUNC("rsvd", 2, 34, 1),
+ FUNC("gpio", 1, 34, 1),
+ FUNC("p0led_kn", 0, 34, 1),
+};
+
static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
FUNC("rsvd", 3, 35, 1),
FUNC("rsvd", 2, 35, 1),
@@ -195,16 +230,61 @@ static struct rt2880_pmx_func wled_kn_grp_mt7628[] = {
FUNC("wled_kn", 0, 35, 1),
};
+static struct rt2880_pmx_func p4led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 39, 1),
+ FUNC("util", 2, 39, 1),
+ FUNC("gpio", 1, 39, 1),
+ FUNC("p4led_an", 0, 39, 1),
+};
+
+static struct rt2880_pmx_func p3led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 40, 1),
+ FUNC("util", 2, 40, 1),
+ FUNC("gpio", 1, 40, 1),
+ FUNC("p3led_an", 0, 40, 1),
+};
+
+static struct rt2880_pmx_func p2led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 41, 1),
+ FUNC("util", 2, 41, 1),
+ FUNC("gpio", 1, 41, 1),
+ FUNC("p2led_an", 0, 41, 1),
+};
+
+static struct rt2880_pmx_func p1led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 42, 1),
+ FUNC("util", 2, 42, 1),
+ FUNC("gpio", 1, 42, 1),
+ FUNC("p1led_an", 0, 42, 1),
+};
+
+static struct rt2880_pmx_func p0led_an_grp_mt7628[] = {
+ FUNC("jtag", 3, 43, 1),
+ FUNC("rsvd", 2, 43, 1),
+ FUNC("gpio", 1, 43, 1),
+ FUNC("p0led_an", 0, 43, 1),
+};
+
static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
- FUNC("rsvd", 3, 35, 1),
- FUNC("rsvd", 2, 35, 1),
- FUNC("gpio", 1, 35, 1),
- FUNC("wled_an", 0, 35, 1),
+ FUNC("rsvd", 3, 44, 1),
+ FUNC("rsvd", 2, 44, 1),
+ FUNC("gpio", 1, 44, 1),
+ FUNC("wled_an", 0, 44, 1),
};
#define MT7628_GPIO_MODE_MASK 0x3
+#define MT7628_GPIO_MODE_P4LED_KN 58
+#define MT7628_GPIO_MODE_P3LED_KN 56
+#define MT7628_GPIO_MODE_P2LED_KN 54
+#define MT7628_GPIO_MODE_P1LED_KN 52
+#define MT7628_GPIO_MODE_P0LED_KN 50
#define MT7628_GPIO_MODE_WLED_KN 48
+#define MT7628_GPIO_MODE_P4LED_AN 42
+#define MT7628_GPIO_MODE_P3LED_AN 40
+#define MT7628_GPIO_MODE_P2LED_AN 38
+#define MT7628_GPIO_MODE_P1LED_AN 36
+#define MT7628_GPIO_MODE_P0LED_AN 34
#define MT7628_GPIO_MODE_WLED_AN 32
#define MT7628_GPIO_MODE_PWM1 30
#define MT7628_GPIO_MODE_PWM0 28
@@ -223,9 +303,9 @@ static struct rt2880_pmx_func wled_an_grp_mt7628[] = {
#define MT7628_GPIO_MODE_GPIO 0
static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
- GRP_G("pmw1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm1", pwm1_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM1),
- GRP_G("pmw0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ GRP_G("pwm0", pwm0_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_PWM0),
GRP_G("uart2", uart2_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_UART2),
@@ -251,8 +331,28 @@ static struct rt2880_pmx_group mt7628an_pinmux_data[] = {
1, MT7628_GPIO_MODE_GPIO),
GRP_G("wled_an", wled_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_WLED_AN),
+ GRP_G("p0led_an", p0led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_AN),
+ GRP_G("p1led_an", p1led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_AN),
+ GRP_G("p2led_an", p2led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_AN),
+ GRP_G("p3led_an", p3led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_AN),
+ GRP_G("p4led_an", p4led_an_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_AN),
GRP_G("wled_kn", wled_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
1, MT7628_GPIO_MODE_WLED_KN),
+ GRP_G("p0led_kn", p0led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P0LED_KN),
+ GRP_G("p1led_kn", p1led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P1LED_KN),
+ GRP_G("p2led_kn", p2led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P2LED_KN),
+ GRP_G("p3led_kn", p3led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P3LED_KN),
+ GRP_G("p4led_kn", p4led_kn_grp_mt7628, MT7628_GPIO_MODE_MASK,
+ 1, MT7628_GPIO_MODE_P4LED_KN),
{ 0 }
};
@@ -581,11 +681,14 @@ void prom_soc_init(struct ralink_soc_info *soc_info)
(rev & CHIP_REV_ECO_MASK));
cfg0 = __raw_readl(sysc + SYSC_REG_SYSTEM_CONFIG0);
- if (is_mt76x8())
+ if (is_mt76x8()) {
dram_type = cfg0 & DRAM_TYPE_MT7628_MASK;
- else
+ } else {
dram_type = (cfg0 >> SYSCFG0_DRAM_TYPE_SHIFT) &
SYSCFG0_DRAM_TYPE_MASK;
+ if (dram_type == SYSCFG0_DRAM_TYPE_UNKNOWN)
+ dram_type = SYSCFG0_DRAM_TYPE_SDRAM;
+ }
soc_info->mem_base = MT7620_DRAM_BASE;
if (is_mt76x8())
diff --git a/arch/mips/ralink/mt7621.c b/arch/mips/ralink/mt7621.c
index e9b9fa3e1e51..a45bbbe97ac5 100644
--- a/arch/mips/ralink/mt7621.c
+++ b/arch/mips/ralink/mt7621.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/ralink/of.c b/arch/mips/ralink/of.c
index f9eda5d8f82c..0aa67a2d0ae6 100644
--- a/arch/mips/ralink/of.c
+++ b/arch/mips/ralink/of.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
* Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/io.h>
diff --git a/arch/mips/ralink/prom.c b/arch/mips/ralink/prom.c
index 39a9142f71be..5a73c5e14221 100644
--- a/arch/mips/ralink/prom.c
+++ b/arch/mips/ralink/prom.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2010 Joonas Lahtinen <joonas.lahtinen@gmail.com>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/string.h>
diff --git a/arch/mips/ralink/reset.c b/arch/mips/ralink/reset.c
index ee117c4bc4a3..64543d66e76b 100644
--- a/arch/mips/ralink/reset.c
+++ b/arch/mips/ralink/reset.c
@@ -5,7 +5,7 @@
*
* Copyright (C) 2008-2009 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/pm.h>
@@ -61,7 +61,7 @@ static int ralink_reset_device(struct reset_controller_dev *rcdev,
return ralink_deassert_device(rcdev, id);
}
-static struct reset_control_ops reset_ops = {
+static const struct reset_control_ops reset_ops = {
.reset = ralink_reset_device,
.assert = ralink_assert_device,
.deassert = ralink_deassert_device,
diff --git a/arch/mips/ralink/rt288x.c b/arch/mips/ralink/rt288x.c
index 3c84166ebcb7..285796e6d75c 100644
--- a/arch/mips/ralink/rt288x.c
+++ b/arch/mips/ralink/rt288x.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/ralink/rt305x.c b/arch/mips/ralink/rt305x.c
index d7c4ba43a428..c8a28c4bf29e 100644
--- a/arch/mips/ralink/rt305x.c
+++ b/arch/mips/ralink/rt305x.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/ralink/rt3883.c b/arch/mips/ralink/rt3883.c
index fafec947b27d..4cef9162bd9b 100644
--- a/arch/mips/ralink/rt3883.c
+++ b/arch/mips/ralink/rt3883.c
@@ -7,7 +7,7 @@
*
* Copyright (C) 2008 Imre Kaloz <kaloz@openwrt.org>
* Copyright (C) 2008-2011 Gabor Juhos <juhosg@openwrt.org>
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/kernel.h>
diff --git a/arch/mips/ralink/timer-gic.c b/arch/mips/ralink/timer-gic.c
index 5b4f186bcf95..069771dbec42 100644
--- a/arch/mips/ralink/timer-gic.c
+++ b/arch/mips/ralink/timer-gic.c
@@ -4,7 +4,7 @@
* by the Free Software Foundation.
*
* Copyright (C) 2015 Nikolay Martynov <mar.kolya@gmail.com>
- * Copyright (C) 2015 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2015 John Crispin <john@phrozen.org>
*/
#include <linux/init.h>
diff --git a/arch/mips/ralink/timer.c b/arch/mips/ralink/timer.c
index 82c72a15bf75..b0343ff336c5 100644
--- a/arch/mips/ralink/timer.c
+++ b/arch/mips/ralink/timer.c
@@ -3,7 +3,7 @@
* under the terms of the GNU General Public License version 2 as published
* by the Free Software Foundation.
*
- * Copyright (C) 2013 John Crispin <blogic@openwrt.org>
+ * Copyright (C) 2013 John Crispin <john@phrozen.org>
*/
#include <linux/module.h>
@@ -180,5 +180,5 @@ static struct platform_driver rt_timer_driver = {
module_platform_driver(rt_timer_driver);
MODULE_DESCRIPTION("Ralink RT2880 timer");
-MODULE_AUTHOR("John Crispin <blogic@openwrt.org");
+MODULE_AUTHOR("John Crispin <john@phrozen.org");
MODULE_LICENSE("GPL");
diff --git a/arch/mips/sgi-ip22/ip22-reset.c b/arch/mips/sgi-ip22/ip22-reset.c
index 063c2dd31e72..2f45b0357021 100644
--- a/arch/mips/sgi-ip22/ip22-reset.c
+++ b/arch/mips/sgi-ip22/ip22-reset.c
@@ -7,7 +7,7 @@
*/
#include <linux/linkage.h>
#include <linux/init.h>
-#include <linux/ds1286.h>
+#include <linux/rtc/ds1286.h>
#include <linux/module.h>
#include <linux/interrupt.h>
#include <linux/kernel.h>
diff --git a/arch/mips/sgi-ip27/ip27-hubio.c b/arch/mips/sgi-ip27/ip27-hubio.c
index 328ceb3c86ec..2abe016a0ffc 100644
--- a/arch/mips/sgi-ip27/ip27-hubio.c
+++ b/arch/mips/sgi-ip27/ip27-hubio.c
@@ -105,7 +105,7 @@ static void hub_setup_prb(nasid_t nasid, int prbnum, int credits)
prb.iprb_ff = force_fire_and_forget ? 1 : 0;
/*
- * Set the appropriate number of PIO cresits for the widget.
+ * Set the appropriate number of PIO credits for the widget.
*/
prb.iprb_xtalkctr = credits;
diff --git a/arch/mips/sgi-ip27/ip27-memory.c b/arch/mips/sgi-ip27/ip27-memory.c
index 8d0eb2643248..f1f88291451e 100644
--- a/arch/mips/sgi-ip27/ip27-memory.c
+++ b/arch/mips/sgi-ip27/ip27-memory.c
@@ -7,7 +7,7 @@
* Copyright (C) 2000 by Silicon Graphics, Inc.
* Copyright (C) 2004 by Christoph Hellwig
*
- * On SGI IP27 the ARC memory configuration data is completly bogus but
+ * On SGI IP27 the ARC memory configuration data is completely bogus but
* alternate easier to use mechanisms are available.
*/
#include <linux/init.h>
diff --git a/arch/mips/sgi-ip27/ip27-nmi.c b/arch/mips/sgi-ip27/ip27-nmi.c
index a2358b44420c..cfceaea92724 100644
--- a/arch/mips/sgi-ip27/ip27-nmi.c
+++ b/arch/mips/sgi-ip27/ip27-nmi.c
@@ -23,7 +23,7 @@ typedef unsigned long machreg_t;
static arch_spinlock_t nmi_lock = __ARCH_SPIN_LOCK_UNLOCKED;
/*
- * Lets see what else we need to do here. Set up sp, gp?
+ * Let's see what else we need to do here. Set up sp, gp?
*/
void nmi_dump(void)
{
diff --git a/arch/mips/sgi-ip27/ip27-xtalk.c b/arch/mips/sgi-ip27/ip27-xtalk.c
index 20f582a2137a..4fe5678ba74d 100644
--- a/arch/mips/sgi-ip27/ip27-xtalk.c
+++ b/arch/mips/sgi-ip27/ip27-xtalk.c
@@ -67,7 +67,7 @@ static int xbow_probe(nasid_t nasid)
return -ENODEV;
/*
- * Okay, here's a xbow. Lets arbitrate and find
+ * Okay, here's a xbow. Let's arbitrate and find
* out if we should initialize it. Set enabled
* hub connected at highest or lowest widget as
* master.
diff --git a/arch/mips/sibyte/Kconfig b/arch/mips/sibyte/Kconfig
index cb9a095f5c5e..707b88441567 100644
--- a/arch/mips/sibyte/Kconfig
+++ b/arch/mips/sibyte/Kconfig
@@ -143,7 +143,8 @@ config SIBYTE_CFE_CONSOLE
config SIBYTE_BUS_WATCHER
bool "Support for Bus Watcher statistics"
depends on SIBYTE_SB1xxx_SOC && \
- (SIBYTE_BCM112X || SIBYTE_SB1250)
+ (SIBYTE_BCM112X || SIBYTE_SB1250 || \
+ SIBYTE_BCM1x55 || SIBYTE_BCM1x80)
help
Handle and keep statistics on the bus error interrupts (COR_ECC,
BAD_ECC, IO_BUS).
diff --git a/arch/mips/sni/rm200.c b/arch/mips/sni/rm200.c
index a046b302623e..160b88000b4b 100644
--- a/arch/mips/sni/rm200.c
+++ b/arch/mips/sni/rm200.c
@@ -263,7 +263,7 @@ spurious_8259A_irq:
static int spurious_irq_mask;
/*
* At this point we can be sure the IRQ is spurious,
- * lets ACK and report it. [once per IRQ]
+ * let's ACK and report it. [once per IRQ]
*/
if (!(spurious_irq_mask & irqmask)) {
printk(KERN_DEBUG
diff --git a/arch/mips/sni/time.c b/arch/mips/sni/time.c
index fb4b3520cdc6..7ee14f41fc25 100644
--- a/arch/mips/sni/time.c
+++ b/arch/mips/sni/time.c
@@ -8,7 +8,6 @@
#include <asm/sni.h>
#include <asm/time.h>
-#include <asm-generic/rtc.h>
#define SNI_CLOCK_TICK_RATE 3686400
#define SNI_COUNTER2_DIV 64
diff --git a/arch/mips/txx9/generic/pci.c b/arch/mips/txx9/generic/pci.c
index a77698ff2b6f..1f6bc9a3036c 100644
--- a/arch/mips/txx9/generic/pci.c
+++ b/arch/mips/txx9/generic/pci.c
@@ -268,7 +268,7 @@ static int txx9_i8259_irq_setup(int irq)
return err;
}
-static void __init_refok quirk_slc90e66_bridge(struct pci_dev *dev)
+static void __ref quirk_slc90e66_bridge(struct pci_dev *dev)
{
int irq; /* PCI/ISA Bridge interrupt */
u8 reg_64;
diff --git a/arch/mips/txx9/generic/setup.c b/arch/mips/txx9/generic/setup.c
index 108f8a8d1640..ada92db92f87 100644
--- a/arch/mips/txx9/generic/setup.c
+++ b/arch/mips/txx9/generic/setup.c
@@ -727,7 +727,7 @@ void __init txx9_iocled_init(unsigned long baseaddr,
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
NULL,
};
diff --git a/arch/mips/txx9/rbtx4939/setup.c b/arch/mips/txx9/rbtx4939/setup.c
index 37030409745c..8b937300fb7f 100644
--- a/arch/mips/txx9/rbtx4939/setup.c
+++ b/arch/mips/txx9/rbtx4939/setup.c
@@ -215,7 +215,7 @@ static int __init rbtx4939_led_probe(struct platform_device *pdev)
int i;
static char *default_triggers[] __initdata = {
"heartbeat",
- "ide-disk",
+ "disk-activity",
"nand-disk",
};
diff --git a/arch/mips/vdso/Makefile b/arch/mips/vdso/Makefile
index ee3617c0c5e2..3b4538ec0102 100644
--- a/arch/mips/vdso/Makefile
+++ b/arch/mips/vdso/Makefile
@@ -5,10 +5,12 @@ obj-vdso-y := elf.o gettimeofday.o sigreturn.o
ccflags-vdso := \
$(filter -I%,$(KBUILD_CFLAGS)) \
$(filter -E%,$(KBUILD_CFLAGS)) \
+ $(filter -mmicromips,$(KBUILD_CFLAGS)) \
$(filter -march=%,$(KBUILD_CFLAGS))
cflags-vdso := $(ccflags-vdso) \
$(filter -W%,$(filter-out -Wa$(comma)%,$(KBUILD_CFLAGS))) \
- -O2 -g -fPIC -fno-common -fno-builtin -G 0 -DDISABLE_BRANCH_PROFILING \
+ -O2 -g -fPIC -fno-strict-aliasing -fno-common -fno-builtin -G 0 \
+ -DDISABLE_BRANCH_PROFILING \
$(call cc-option, -fno-stack-protector)
aflags-vdso := $(ccflags-vdso) \
$(filter -I%,$(KBUILD_CFLAGS)) \
@@ -50,13 +52,17 @@ quiet_cmd_vdsold = VDSO $@
cmd_vdsold = $(CC) $(c_flags) $(VDSO_LDFLAGS) \
-Wl,-T $(filter %.lds,$^) $(filter %.o,$^) -o $@
+# Strip rule for the raw .so files
+$(obj)/%.so.raw: OBJCOPYFLAGS := -S
+$(obj)/%.so.raw: $(obj)/%.so.dbg.raw FORCE
+ $(call if_changed,objcopy)
+
hostprogs-y := genvdso
quiet_cmd_genvdso = GENVDSO $@
define cmd_genvdso
- cp $< $(<:%.dbg=%) && \
- $(OBJCOPY) -S $< $(<:%.dbg=%) && \
- $(obj)/genvdso $< $(<:%.dbg=%) $@ $(VDSO_NAME)
+ $(foreach file,$(filter %.raw,$^),cp $(file) $(file:%.raw=%) &&) \
+ $(obj)/genvdso $(<:%.raw=%) $(<:%.dbg.raw=%) $@ $(VDSO_NAME)
endef
#
@@ -66,7 +72,10 @@ endef
native-abi := $(filter -mabi=%,$(KBUILD_CFLAGS))
targets += $(obj-vdso-y)
-targets += vdso.lds vdso.so.dbg vdso.so vdso-image.c
+targets += vdso.lds
+targets += vdso.so.dbg.raw vdso.so.raw
+targets += vdso.so.dbg vdso.so
+targets += vdso-image.c
obj-vdso := $(obj-vdso-y:%.o=$(obj)/%.o)
@@ -75,10 +84,11 @@ $(obj-vdso): KBUILD_AFLAGS := $(aflags-vdso) $(native-abi)
$(obj)/vdso.lds: KBUILD_CPPFLAGS := $(native-abi)
-$(obj)/vdso.so.dbg: $(obj)/vdso.lds $(obj-vdso) FORCE
+$(obj)/vdso.so.dbg.raw: $(obj)/vdso.lds $(obj-vdso) FORCE
$(call if_changed,vdsold)
-$(obj)/vdso-image.c: $(obj)/vdso.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-image.c: $(obj)/vdso.so.dbg.raw $(obj)/vdso.so.raw \
+ $(obj)/genvdso FORCE
$(call if_changed,genvdso)
obj-y += vdso-image.o
@@ -89,7 +99,10 @@ obj-y += vdso-image.o
# Define these outside the ifdef to ensure they are picked up by clean.
targets += $(obj-vdso-y:%.o=%-o32.o)
-targets += vdso-o32.lds vdso-o32.so.dbg vdso-o32.so vdso-o32-image.c
+targets += vdso-o32.lds
+targets += vdso-o32.so.dbg.raw vdso-o32.so.raw
+targets += vdso-o32.so.dbg vdso-o32.so
+targets += vdso-o32-image.c
ifdef CONFIG_MIPS32_O32
@@ -109,11 +122,12 @@ $(obj)/vdso-o32.lds: KBUILD_CPPFLAGS := -mabi=32
$(obj)/vdso-o32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
-$(obj)/vdso-o32.so.dbg: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE
+$(obj)/vdso-o32.so.dbg.raw: $(obj)/vdso-o32.lds $(obj-vdso-o32) FORCE
$(call if_changed,vdsold)
$(obj)/vdso-o32-image.c: VDSO_NAME := o32
-$(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-o32-image.c: $(obj)/vdso-o32.so.dbg.raw $(obj)/vdso-o32.so.raw \
+ $(obj)/genvdso FORCE
$(call if_changed,genvdso)
obj-y += vdso-o32-image.o
@@ -125,7 +139,10 @@ endif
#
targets += $(obj-vdso-y:%.o=%-n32.o)
-targets += vdso-n32.lds vdso-n32.so.dbg vdso-n32.so vdso-n32-image.c
+targets += vdso-n32.lds
+targets += vdso-n32.so.dbg.raw vdso-n32.so.raw
+targets += vdso-n32.so.dbg vdso-n32.so
+targets += vdso-n32-image.c
ifdef CONFIG_MIPS32_N32
@@ -145,11 +162,12 @@ $(obj)/vdso-n32.lds: KBUILD_CPPFLAGS := -mabi=n32
$(obj)/vdso-n32.lds: $(src)/vdso.lds.S FORCE
$(call if_changed_dep,cpp_lds_S)
-$(obj)/vdso-n32.so.dbg: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE
+$(obj)/vdso-n32.so.dbg.raw: $(obj)/vdso-n32.lds $(obj-vdso-n32) FORCE
$(call if_changed,vdsold)
$(obj)/vdso-n32-image.c: VDSO_NAME := n32
-$(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg $(obj)/genvdso FORCE
+$(obj)/vdso-n32-image.c: $(obj)/vdso-n32.so.dbg.raw $(obj)/vdso-n32.so.raw \
+ $(obj)/genvdso FORCE
$(call if_changed,genvdso)
obj-y += vdso-n32-image.o
diff --git a/arch/mips/vr41xx/common/cmu.c b/arch/mips/vr41xx/common/cmu.c
index 05302bfdd114..89bac9885695 100644
--- a/arch/mips/vr41xx/common/cmu.c
+++ b/arch/mips/vr41xx/common/cmu.c
@@ -3,7 +3,7 @@
*
* Copyright (C) 2001-2002 MontaVista Software Inc.
* Author: Yoichi Yuasa <source@mvista.com>
- * Copuright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
+ * Copyright (C) 2003-2005 Yoichi Yuasa <yuasa@linux-mips.org>
*
* This program is free software; you can redistribute it and/or modify
* it under the terms of the GNU General Public License as published by
diff --git a/arch/mips/vr41xx/common/pmu.c b/arch/mips/vr41xx/common/pmu.c
index d7f755833c3f..39a0db3e2b34 100644
--- a/arch/mips/vr41xx/common/pmu.c
+++ b/arch/mips/vr41xx/common/pmu.c
@@ -73,7 +73,7 @@ static inline void software_reset(void)
default:
set_c0_status(ST0_BEV | ST0_ERL);
change_c0_config(CONF_CM_CMASK, CONF_CM_UNCACHED);
- flush_cache_all();
+ __flush_cache_all();
write_c0_wired(0);
__asm__("jr %0"::"r"(0xbfc00000));
break;
diff --git a/arch/mips/xilfpga/init.c b/arch/mips/xilfpga/init.c
index ce2aee2169ac..602e384a26a2 100644
--- a/arch/mips/xilfpga/init.c
+++ b/arch/mips/xilfpga/init.c
@@ -10,7 +10,6 @@
*/
#include <linux/of_fdt.h>
-#include <linux/of_platform.h>
#include <asm/prom.h>
@@ -43,15 +42,3 @@ void __init device_tree_init(void)
unflatten_and_copy_device_tree();
}
-
-static int __init plat_of_setup(void)
-{
- if (!of_have_populated_dt())
- panic("Device tree not present");
-
- if (of_platform_populate(NULL, of_default_bus_match_table, NULL, NULL))
- panic("Failed to populate DT");
-
- return 0;
-}
-arch_initcall(plat_of_setup);