blob: b14c166ad6400497bb1dc8897b4044eea2302238 (
plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
|
#ifndef __ASM_EVENT_H__
#define __ASM_EVENT_H__
#include <asm/domain.h>
void vcpu_kick(struct vcpu *v);
void vcpu_mark_events_pending(struct vcpu *v);
void vcpu_update_evtchn_irq(struct vcpu *v);
void vcpu_block_unless_event_pending(struct vcpu *v);
static inline int vcpu_event_delivery_is_enabled(struct vcpu *v)
{
struct cpu_user_regs *regs = &v->arch.cpu_info->guest_cpu_user_regs;
return !(regs->cpsr & PSR_IRQ_MASK);
}
static inline int local_events_need_delivery_nomask(void)
{
/* XXX: if the first interrupt has already been delivered, we should
* check whether any other interrupts with priority higher than the
* one in GICV_IAR are in the lr_pending queue or in the LR
* registers and return 1 only in that case.
* In practice the guest interrupt handler should run with
* interrupts disabled so this shouldn't be a problem in the general
* case.
*/
if ( vgic_vcpu_pending_irq(current) )
return 1;
if ( !vcpu_info(current, evtchn_upcall_pending) )
return 0;
return vgic_evtchn_irq_pending(current);
}
static inline int local_events_need_delivery(void)
{
if ( !vcpu_event_delivery_is_enabled(current) )
return 0;
return local_events_need_delivery_nomask();
}
static inline void local_event_delivery_enable(void)
{
struct cpu_user_regs *regs = guest_cpu_user_regs();
regs->cpsr &= ~PSR_IRQ_MASK;
}
/* No arch specific virq definition now. Default to global. */
static inline bool arch_virq_is_global(unsigned int virq)
{
return true;
}
#endif
/*
* Local variables:
* mode: C
* c-file-style: "BSD"
* c-basic-offset: 4
* indent-tabs-mode: nil
* End:
*/
|