summaryrefslogtreecommitdiff
path: root/xen/arch/arm/arm32/entry.S
blob: 38826142adf0d0a3a37df31f1827a07d6d9c992b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
#include <asm/sysregs.h>
#include <asm/regs.h>
#include <asm/alternative.h>
#include <public/xen.h>

/*
 * Short-hands to defined the interrupts (A, I, F)
 *
 * _ means the interrupt state will not change
 * X means the state of interrupt X will change
 *
 * To be used with msr cpsr_* only
 */
#define IFLAGS_AIF      PSR_ABT_MASK | PSR_IRQ_MASK | PSR_FIQ_MASK
#define IFLAGS_A_F      PSR_ABT_MASK | PSR_FIQ_MASK

#define SAVE_ONE_BANKED(reg)    mrs r11, reg; str r11, [sp, #UREGS_##reg]
#define RESTORE_ONE_BANKED(reg) ldr r11, [sp, #UREGS_##reg]; msr reg, r11

#define SAVE_BANKED(mode) \
        SAVE_ONE_BANKED(SP_##mode) ; SAVE_ONE_BANKED(LR_##mode) ; SAVE_ONE_BANKED(SPSR_##mode)

#define RESTORE_BANKED(mode) \
        RESTORE_ONE_BANKED(SP_##mode) ; RESTORE_ONE_BANKED(LR_##mode) ; RESTORE_ONE_BANKED(SPSR_##mode)

/*
 * Actions that needs to be done after entering the hypervisor from the
 * guest and before the interrupts are unmasked.
 *
 * @return:
 *  r4: Set to a non-zero value if a pending Abort exception took place.
 *      Otherwise, it will be set to zero.
 */
prepare_context_from_guest:
#ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
        /*
         * Restore vectors table to the default as it may have been
         * changed when returning to the guest (see
         * return_to_hypervisor). We need to do that early (e.g before
         * any interrupts are unmasked) because hardened vectors requires
         * SP to be 8 bytes aligned. This does not hold when running in
         * the hypervisor.
         */
        ldr r1, =hyp_traps_vector
        mcr p15, 4, r1, c12, c0, 0
        isb
#endif

        ldr r11, =0xffffffff  /* Clobber SP which is only valid for hypervisor frames. */
        str r11, [sp, #UREGS_sp]
        SAVE_ONE_BANKED(SP_usr)
        /* LR_usr is the same physical register as lr and is saved by the caller */
        SAVE_BANKED(svc)
        SAVE_BANKED(abt)
        SAVE_BANKED(und)
        SAVE_BANKED(irq)
        SAVE_BANKED(fiq)
        SAVE_ONE_BANKED(R8_fiq); SAVE_ONE_BANKED(R9_fiq); SAVE_ONE_BANKED(R10_fiq)
        SAVE_ONE_BANKED(R11_fiq); SAVE_ONE_BANKED(R12_fiq);

        /*
         * We may have entered the hypervisor with pending asynchronous Abort
         * generated by the guest. If we need to categorize them, then
         * we need to consume any outstanding asynchronous Abort.
         * Otherwise, they can be consumed later on.
         */
        alternative_if SKIP_SYNCHRONIZE_SERROR_ENTRY_EXIT
        mov r4, #0              /* r4 := No Abort was consumed */
        b   skip_check
        alternative_else_nop_endif

        /*
         * Consume pending asynchronous Abort generated by the guest if any.
         *
         * The only way to consume an Abort interrupt is to unmask it. So
         * Abort exception will be unmaked for a small window and then masked
         * it again.
         *
         * It is fine to unmask asynchronous Abort exception as we fully
         * control the state of the processor and only limited code will
         * be executed if the exception returns (see do_trap_data_abort()).
         *
         * TODO: The asynchronous abort path should be reworked to
         * inject the virtual asynchronous Abort in enter_hypervisor_*
         * rather than do_trap_data_abort(). This should make easier to
         * understand the path.
         */

        /*
         * save elr_hyp to check whether the pending virtual abort exception
         * takes place while we are doing this trap exception.
         */
        mrs r1, ELR_hyp

        /*
         * Force loads and stores to complete before unmasking asynchronous
         * aborts and forcing the delivery of the exception.
         */
        dsb sy

        /*
         * Unmask asynchronous abort bit. If there is a pending asynchronous
         * abort, the data_abort exception will happen after A bit is cleared.
         */
        cpsie a

        /*
         * This is our single instruction exception window. A pending
         * asynchronous abort is guaranteed to occur at the earliest when we
         * unmask it, and at the latest just after the ISB.
         *
         * If a pending abort occurs, the program will jump to data_abort
         * exception handler, and the ELR_hyp will be set to
         * abort_guest_exit_start or abort_guest_exit_end.
         */
        .global abort_guest_exit_start
abort_guest_exit_start:

        isb

        .global abort_guest_exit_end
abort_guest_exit_end:
        /* Mask CPSR asynchronous abort bit, close the checking window. */
        cpsid a

        /*
         * Compare ELR_hyp and the saved value to check whether we are
         * returning from a valid exception caused by pending virtual
         * abort.
         */
        mrs r2, ELR_hyp
        cmp r1, r2

        /*
         * Set r4 depending on whether an asynchronous abort were
         * consumed.
         */
        movne r4, #1
        moveq r4, #0

skip_check:
        b   enter_hypervisor_from_guest_preirq
ENDPROC(prepare_context_from_guest)

        /*
         * Macro to define a trap entry.
         *
         *  @guest_iflags: Optional list of interrupts to unmask when
         *      entering from guest context. As this is used with cpsie,
         *      the letter (a, i, f) should be used.
         *
         *  @hyp_iflags: Optional list of interrupts to inherit when
         *      entering from hypervisor context. Any interrupts not
         *      listed will be kept unchanged. As this is used with cpsr_*,
         *      IFLAGS_* short-hands should be used.
         */
        .macro vector trap, guest_iflags=n, hyp_iflags=0
        /* Save registers in the stack */
        sub     sp, #(UREGS_SP_usr - UREGS_sp) /* SP, LR, SPSR, PC */
        push    {r0-r12}                       /* Save R0-R12 */
        mrs     r11, ELR_hyp                   /* ELR_hyp is return address */
        str     r11, [sp, #UREGS_pc]

        str     lr, [sp, #UREGS_lr]

        add     r11, sp, #(UREGS_kernel_sizeof + 4)

        str     r11, [sp, #UREGS_sp]

        mrc     CP32(r11, HSR)                 /* Save exception syndrome */
        str     r11, [sp, #UREGS_hsr]

        mrs     r11, SPSR
        str     r11, [sp, #UREGS_cpsr]

        /*
         * We need to distinguish whether we came from guest or
         * hypervisor context.
         */
        and     r0, r11, #PSR_MODE_MASK
        cmp     r0, #PSR_MODE_HYP

        bne     1f
        /*
         * Trap from the hypervisor
         *
         * Inherit the state of the interrupts from the hypervisor
         * context. For that we need to use SPSR (stored in r11) and
         * modify CPSR accordingly.
         *
         * CPSR = (CPSR & ~hyp_iflags) | (SPSR & hyp_iflags)
         */
        mrs     r10, cpsr
        bic     r10, r10, #\hyp_iflags
        and     r11, r11, #\hyp_iflags
        orr     r10, r10, r11
        msr     cpsr_cx, r10
        b       2f

1:
        /* Trap from the guest */
        /*
         * prepare_context_from_guest will return with r4 set to
         * a non-zero value if an asynchronous Abort was consumed.
         *
         * When an asynchronous Abort has been consumed (r4 != 0), we may have
         * injected a virtual asynchronous Abort to the guest.
         *
         * In this case, the initial exception will be discarded (PC has
         * been adjusted by inject_vabt_exception()). However, we still
         * want to give an opportunity to reschedule the vCPU. So we
         * only want to skip the handling of the initial exception (i.e.
         * do_trap_*()).
         */
        bl      prepare_context_from_guest
        .if     \guest_iflags != n
        cpsie   \guest_iflags
        .endif

        adr     lr, 2f
        cmp     r4, #0
        adrne   lr, return_from_trap
        b       enter_hypervisor_from_guest

2:
        /* We are ready to handle the trap, setup the registers and jump. */
        adr     lr, return_from_trap
        mov     r0, sp
        /*
         * Save the stack pointer in r11. It will be restored after the
         * trap has been handled (see return_from_trap).
         */
        mov     r11, sp
        bic     sp, #7      /* Align the stack pointer (noop on guest trap) */
        b       do_trap_\trap
        .endm

        .align 5
GLOBAL(hyp_traps_vector)
        b trap_reset                    /* 0x00 - Reset */
        b trap_undefined_instruction    /* 0x04 - Undefined Instruction */
        b trap_hypervisor_call          /* 0x08 - Hypervisor Call */
        b trap_prefetch_abort           /* 0x0c - Prefetch Abort */
        b trap_data_abort               /* 0x10 - Data Abort */
        b trap_guest_sync               /* 0x14 - Hypervisor */
        b trap_irq                      /* 0x18 - IRQ */
        b trap_fiq                      /* 0x1c - FIQ */

#ifdef CONFIG_HARDEN_BRANCH_PREDICTOR

        .align 5
GLOBAL(hyp_traps_vector_ic_inv)
        /*
         * We encode the exception entry in the bottom 3 bits of
         * SP, and we have to guarantee to be 8 bytes aligned.
         */
        add sp, sp, #1                  /* Reset            7 */
        add sp, sp, #1                  /* Undef            6 */
        add sp, sp, #1                  /* Hypervisor call  5 */
        add sp, sp, #1                  /* Prefetch abort   4 */
        add sp, sp, #1                  /* Data abort       3 */
        add sp, sp, #1                  /* Hypervisor       2 */
        add sp, sp, #1                  /* IRQ              1 */
        nop                             /* FIQ              0 */

        mcr p15, 0, r0, c7, c5, 0       /* ICIALLU */
        isb

        b decode_vectors

        .align 5
GLOBAL(hyp_traps_vector_bp_inv)
        /*
         * We encode the exception entry in the bottom 3 bits of
         * SP, and we have to guarantee to be 8 bytes aligned.
         */
        add sp, sp, #1                  /* Reset            7 */
        add sp, sp, #1                  /* Undef            6 */
        add sp, sp, #1                  /* Hypervisor Call  5 */
        add sp, sp, #1                  /* Prefetch abort   4 */
        add sp, sp, #1                  /* Data abort       3 */
        add sp, sp, #1                  /* Hypervisor       2 */
        add sp, sp, #1                  /* IRQ              1 */
        nop                             /* FIQ              0 */

        mcr	p15, 0, r0, c7, c5, 6	    /* BPIALL */
        isb

decode_vectors:
.macro vect_br val, targ
        eor     sp, sp, #\val
        tst     sp, #7
        eorne   sp, sp, #\val
        beq     \targ
.endm

        vect_br 0, trap_fiq
        vect_br 1, trap_irq
        vect_br 2, trap_guest_sync
        vect_br 3, trap_data_abort
        vect_br 4, trap_prefetch_abort
        vect_br 5, trap_hypervisor_call
        vect_br 6, trap_undefined_instruction
        vect_br 7, trap_reset

#endif /* CONFIG_HARDEN_BRANCH_PREDICTOR */

/* Vector not used by the Hypervisor. */
trap_reset:
        vector reset

/*
 * Vector only used by the Hypervisor.
 *
 * While the exception can be executed with all the interrupts (e.g.
 * IRQ) unmasked, the interrupted context may have purposefully masked
 * some of them. So we want to inherit the state from the interrupted
 * context.
 */
trap_undefined_instruction:
        vector undefined_instruction, hyp_iflags=IFLAGS_AIF

/* We should never reach this trap */
trap_hypervisor_call:
        vector hypervisor_call

/*
 * Vector only used by the hypervisor.
 *
 * While the exception can be executed with all the interrupts (e.g.
 * IRQ) unmasked, the interrupted context may have purposefully masked
 * some of them. So we want to inherit the state from the interrupted
 * context.
 */
trap_prefetch_abort:
       vector prefetch_abort, hyp_iflags=IFLAGS_AIF

/*
 * Vector only used by the hypervisor.
 *
 * Data Abort should be rare and most likely fatal. It is best to not
 * unmask any interrupts to limit the amount of code that can run before
 * the Data Abort is treated.
 */
trap_data_abort:
        vector data_abort

/* Vector only used by the guest. We can unmask Abort/IRQ. */
trap_guest_sync:
        vector guest_sync, guest_iflags=ai


/* Vector used by the hypervisor and the guest. */
trap_irq:
        vector irq, guest_iflags=a, hyp_iflags=IFLAGS_A_F

/*
 * Vector used by the hypervisor and the guest.
 *
 * FIQ are not meant to happen, so we don't unmask any interrupts.
 */
trap_fiq:
        vector fiq

return_from_trap:
        /*
         * Restore the stack pointer from r11. It was saved on exception
         * entry (see __DEFINE_TRAP_ENTRY).
         */
        mov sp, r11
ENTRY(return_to_new_vcpu32)
        ldr r11, [sp, #UREGS_cpsr]
        and r11, #PSR_MODE_MASK
        cmp r11, #PSR_MODE_HYP
        beq return_to_hypervisor
        /* Fall thru */
return_to_guest:
        mov r11, sp
        bic sp, #7 /* Align the stack pointer */
        bl leave_hypervisor_to_guest /* Mask IRQ on return */
        mov sp, r11
        RESTORE_ONE_BANKED(SP_usr)
        /* LR_usr is the same physical register as lr and is restored below */
        RESTORE_BANKED(svc)
        RESTORE_BANKED(abt)
        RESTORE_BANKED(und)
        RESTORE_BANKED(irq)
        RESTORE_BANKED(fiq)
        RESTORE_ONE_BANKED(R8_fiq); RESTORE_ONE_BANKED(R9_fiq); RESTORE_ONE_BANKED(R10_fiq)
        RESTORE_ONE_BANKED(R11_fiq); RESTORE_ONE_BANKED(R12_fiq);
        /* Fall thru */
return_to_hypervisor:
        cpsid ai
        ldr lr, [sp, #UREGS_lr]
        ldr r11, [sp, #UREGS_pc]
        msr ELR_hyp, r11
        ldr r11, [sp, #UREGS_cpsr]
        msr SPSR_cxsf, r11
#ifdef CONFIG_ARM32_HARDEN_BRANCH_PREDICTOR
        /*
         * Hardening branch predictor may require to setup a different
         * vector tables before returning to the guests. Those vectors
         * may rely on the state of registers that does not hold when
         * running in the hypervisor (e.g SP is 8 bytes aligned). So setup
         * HVBAR very late.
         *
         * Default vectors table will be restored on exit (see
         * prepare_context_from_guest).
         */
        mov r9, #0                      /* vector tables = NULL */
        /*
         * Load vector tables pointer from the per-cpu bp_harden_vecs
         * when returning to the guest only.
         */
        and r11, #PSR_MODE_MASK
        cmp r11, #PSR_MODE_HYP
        ldrne r11, =per_cpu__bp_harden_vecs
        mrcne p15, 4, r10, c13, c0, 2   /* r10 = per-cpu offset (HTPIDR) */
        addne r11, r11, r10             /* r11 = offset of the vector tables */
        ldrne r9, [r11]                 /* r9  = vector tables */
        cmp r9, #0                      /* Only update HVBAR when the vector */
        mcrne p15, 4, r9, c12, c0, 0    /* tables is not NULL. */
#endif
        pop {r0-r12}
        add sp, #(UREGS_SP_usr - UREGS_sp); /* SP, LR, SPSR, PC */
        clrex
        eret
        sb

/*
 * struct vcpu *__context_switch(struct vcpu *prev, struct vcpu *next)
 *
 * r0 - prev
 * r1 - next
 *
 * Returns prev in r0
 */
ENTRY(__context_switch)
        add     ip, r0, #VCPU_arch_saved_context
        stmia   ip!, {r4 - sl, fp, sp, lr}      /* Save register state */

        add     r4, r1, #VCPU_arch_saved_context
        ldmia   r4, {r4 - sl, fp, sp, pc}       /* Load registers and return */

/*
 * Local variables:
 * mode: ASM
 * indent-tabs-mode: nil
 * End:
 */