1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
|
/*
* Idle processing for ARMv6-based Qualcomm SoCs.
* Work around bugs with SWFI.
*
* Copyright (C) 2007 Google, Inc.
* Copyright (c) 2007-2009, Code Aurora Forum. All rights reserved.
*
* This software is licensed under the terms of the GNU General Public
* License version 2, as published by the Free Software Foundation, and
* may be copied, distributed, and modified under those terms.
*
* This program is distributed in the hope that it will be useful,
* but WITHOUT ANY WARRANTY; without even the implied warranty of
* MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
* GNU General Public License for more details.
*
*/
#include <linux/linkage.h>
#include <asm/assembler.h>
ENTRY(msm_arch_idle)
mov r0, #0
mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
mrc p15, 0, r1, c1, c0, 0 /* read current CR */
bic r0, r1, #(1 << 2) /* clear dcache bit */
bic r0, r0, #(1 << 12) /* clear icache bit */
mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* dsb */
mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
mcr p15, 0, r0, c7, c5, 4 /* isb */
mov pc, lr
ENTRY(msm_pm_collapse)
ldr r0, =saved_state
stmia r0!, {r4-r14}
#if defined(CONFIG_MSM_FIQ_SUPPORT)
cpsid f
#endif
mrc p15, 0, r1, c1, c0, 0 /* MMU control */
mrc p15, 0, r2, c2, c0, 0 /* ttb */
mrc p15, 0, r3, c3, c0, 0 /* dacr */
mrc p15, 0, ip, c13, c0, 1 /* context ID */
stmia r0!, {r1-r3, ip}
#if defined(CONFIG_OPROFILE)
mrc p15, 0, r1, c15, c12, 0 /* pmnc */
mrc p15, 0, r2, c15, c12, 1 /* ccnt */
mrc p15, 0, r3, c15, c12, 2 /* pmn0 */
mrc p15, 0, ip, c15, c12, 3 /* pmn1 */
stmia r0!, {r1-r3, ip}
#endif
mrc p15, 0, r1, c1, c0, 2 /* read CACR */
stmia r0!, {r1}
mov r0, #0
mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
mrc p15, 0, r1, c1, c0, 0 /* read current CR */
bic r0, r1, #(1 << 2) /* clear dcache bit */
bic r0, r0, #(1 << 12) /* clear icache bit */
mcr p15, 0, r0, c1, c0, 0 /* disable d/i cache */
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* dsb */
mcr p15, 0, r0, c7, c0, 4 /* wait for interrupt */
mcr p15, 0, r1, c1, c0, 0 /* restore d/i cache */
mcr p15, 0, r0, c7, c5, 4 /* isb */
#if defined(CONFIG_MSM_FIQ_SUPPORT)
cpsie f
#endif
ldr r0, =saved_state /* restore registers */
ldmfd r0, {r4-r14}
mov r0, #0 /* return power collapse failed */
mov pc, lr
ENTRY(msm_pm_collapse_exit)
#if 0 /* serial debug */
mov r0, #0x80000016
mcr p15, 0, r0, c15, c2, 4
mov r0, #0xA9000000
add r0, r0, #0x00A00000 /* UART1 */
/*add r0, r0, #0x00C00000*/ /* UART3 */
mov r1, #'A'
str r1, [r0, #0x00C]
#endif
ldr r1, =saved_state_end
ldr r2, =msm_pm_collapse_exit
adr r3, msm_pm_collapse_exit
add r1, r1, r3
sub r1, r1, r2
ldmdb r1!, {r2}
mcr p15, 0, r2, c1, c0, 2 /* restore CACR */
#if defined(CONFIG_OPROFILE)
ldmdb r1!, {r2-r5}
mcr p15, 0, r3, c15, c12, 1 /* ccnt */
mcr p15, 0, r4, c15, c12, 2 /* pmn0 */
mcr p15, 0, r5, c15, c12, 3 /* pmn1 */
mcr p15, 0, r2, c15, c12, 0 /* pmnc */
#endif
ldmdb r1!, {r2-r5}
mcr p15, 0, r4, c3, c0, 0 /* dacr */
mcr p15, 0, r3, c2, c0, 0 /* ttb */
mcr p15, 0, r5, c13, c0, 1 /* context ID */
mov r0, #0
mcr p15, 0, r0, c7, c5, 4 /* isb */
ldmdb r1!, {r4-r14}
/* Add 1:1 map in the PMD to allow smooth switch when turning on MMU */
and r3, r3, #~0x7F /* mask off lower 7 bits of TTB */
adr r0, msm_pm_mapped_pa /* get address of the mapped instr */
lsr r1, r0, #20 /* get the addr range of addr in MB */
lsl r1, r1, #2 /* multiply by 4 to get to the pg index */
add r3, r3, r1 /* pgd + pgd_index(addr) */
ldr r1, [r3] /* save current entry to r1 */
lsr r0, #20 /* align current addr to 1MB boundary */
lsl r0, #20
/* Create new entry for this 1MB page */
orr r0, r0, #0x400 /* PMD_SECT_AP_WRITE */
orr r0, r0, #0x2 /* PMD_TYPE_SECT|PMD_DOMAIN(DOMAIN_KERNEL) */
str r0, [r3] /* put new entry into the MMU table */
mov r0, #0
mcr p15, 0, r0, c7, c10, 4 /* dsb */
mcr p15, 0, r2, c1, c0, 0 /* MMU control */
mcr p15, 0, r0, c7, c5, 4 /* isb */
msm_pm_mapped_pa:
/* Switch to virtual */
adr r2, msm_pm_pa_to_va
ldr r0, =msm_pm_pa_to_va
mov pc, r0
msm_pm_pa_to_va:
sub r0, r0, r2
/* Restore r1 in MMU table */
add r3, r3, r0
str r1, [r3]
mov r0, #0
mcr p15, 0, r0, c7, c10, 0 /* flush entire data cache */
mcr p15, 0, r0, c7, c10, 4 /* dsb */
mcr p15, 0, r0, c7, c5, 4 /* isb */
mcr p15, 0, r0, c8, c7, 0 /* invalidate entire unified TLB */
mcr p15, 0, r0, c7, c5, 6 /* invalidate entire branch target
* cache */
mcr p15, 0, r0, c7, c7, 0 /* invalidate both data and instruction
* cache */
mcr p15, 0, r0, c7, c10, 4 /* dsb */
mcr p15, 0, r0, c7, c5, 4 /* isb */
mov r0, #1
mov pc, lr
nop
nop
nop
nop
nop
1: b 1b
.data
saved_state:
.space 4 * 11 /* r4-14 */
.space 4 * 4 /* cp15 - MMU control, ttb, dacr, context ID */
#if defined(CONFIG_OPROFILE)
.space 4 * 4 /* more cp15 - pmnc, ccnt, pmn0, pmn1 */
#endif
.space 4 /* cacr */
saved_state_end:
|