summaryrefslogtreecommitdiff
path: root/xen/arch/x86/include/asm/hvm/hvm.h
blob: 04cbd4ff24bd4cf46efc86dafe9fe7be0b83583d (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
/* SPDX-License-Identifier: GPL-2.0-only */
/*
 * hvm.h: Hardware virtual machine assist interface definitions.
 *
 * Leendert van Doorn, leendert@watson.ibm.com
 * Copyright (c) 2005, International Business Machines Corporation.
 */

#ifndef __ASM_X86_HVM_HVM_H__
#define __ASM_X86_HVM_HVM_H__

#include <xen/mm.h>

#include <asm/alternative.h>
#include <asm/asm_defns.h>
#include <asm/current.h>
#include <asm/x86_emulate.h>
#include <asm/hvm/asid.h>

struct pirq; /* needed by pi_update_irte */

#ifdef CONFIG_HVM_FEP
/* Permit use of the Forced Emulation Prefix in HVM guests */
extern bool_t opt_hvm_fep;
#else
#define opt_hvm_fep 0
#endif

/* Interrupt acknowledgement sources. */
enum hvm_intsrc {
    hvm_intsrc_none,
    hvm_intsrc_pic,
    hvm_intsrc_lapic,
    hvm_intsrc_nmi,
    hvm_intsrc_mce,
    hvm_intsrc_vector
};
struct hvm_intack {
    uint8_t source; /* enum hvm_intsrc */
    uint8_t vector;
};
#define hvm_intack(src, vec)   ((struct hvm_intack) { hvm_intsrc_##src, vec })
#define hvm_intack_none        hvm_intack(none, 0)
#define hvm_intack_pic(vec)    hvm_intack(pic, vec)
#define hvm_intack_lapic(vec)  hvm_intack(lapic, vec)
#define hvm_intack_nmi         hvm_intack(nmi, 2)
#define hvm_intack_mce         hvm_intack(mce, 18)
#define hvm_intack_vector(vec) hvm_intack(vector, vec)
enum hvm_intblk {
    hvm_intblk_none,      /* not blocked (deliverable) */
    hvm_intblk_shadow,    /* MOV-SS or STI shadow */
    hvm_intblk_rflags_ie, /* RFLAGS.IE == 0 */
    hvm_intblk_tpr,       /* LAPIC TPR too high */
    hvm_intblk_nmi_iret,  /* NMI blocked until IRET */
    hvm_intblk_arch,      /* SVM/VMX specific reason */
};

/* These happen to be the same as the VMX interrupt shadow definitions. */
#define HVM_INTR_SHADOW_STI    0x00000001
#define HVM_INTR_SHADOW_MOV_SS 0x00000002
#define HVM_INTR_SHADOW_SMI    0x00000004
#define HVM_INTR_SHADOW_NMI    0x00000008

/*
 * HAP super page capabilities:
 * bit0: if 2MB super page is allowed?
 * bit1: if 1GB super page is allowed?
 */
#define HVM_HAP_SUPERPAGE_2MB   0x00000001
#define HVM_HAP_SUPERPAGE_1GB   0x00000002

#define HVM_EVENT_VECTOR_UNSET    (-1)
#define HVM_EVENT_VECTOR_UPDATING (-2)

/* update_guest_cr() flags. */
#define HVM_UPDATE_GUEST_CR3_NOFLUSH 0x00000001

struct hvm_vcpu_nonreg_state {
    union {
        struct {
            uint64_t activity_state;
            uint64_t interruptibility_info;
            uint64_t pending_dbg;
            uint64_t interrupt_status;
        } vmx;
    };
};

/*
 * The hardware virtual machine (HVM) interface abstracts away from the
 * x86/x86_64 CPU virtualization assist specifics. Currently this interface
 * supports Intel's VT-x and AMD's SVM extensions.
 */
struct hvm_function_table {
    char *name;

    /* Support Hardware-Assisted Paging? */
    bool_t hap_supported;

    /* Necessary hardware support for alternate p2m's? */
    bool altp2m_supported;
    bool singlestep_supported;

    /* Hardware virtual interrupt delivery enable? */
    bool virtual_intr_delivery_enabled;

    /* Indicate HAP capabilities. */
    unsigned int hap_capabilities;

    /*
     * Initialise/destroy HVM domain/vcpu resources
     */
    int  (*domain_initialise)(struct domain *d);
    void (*domain_creation_finished)(struct domain *d);
    void (*domain_relinquish_resources)(struct domain *d);
    void (*domain_destroy)(struct domain *d);
    int  (*vcpu_initialise)(struct vcpu *v);
    void (*vcpu_destroy)(struct vcpu *v);

    /* save and load hvm guest cpu context for save/restore */
    void (*save_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);
    int (*load_cpu_ctxt)(struct vcpu *v, struct hvm_hw_cpu *ctxt);

    /* Examine specifics of the guest state. */
    unsigned int (*get_interrupt_shadow)(struct vcpu *v);
    void (*set_interrupt_shadow)(struct vcpu *v, unsigned int intr_shadow);
    void (*get_nonreg_state)(struct vcpu *v,
                             struct hvm_vcpu_nonreg_state *nrs);
    void (*set_nonreg_state)(struct vcpu *v,
                             struct hvm_vcpu_nonreg_state *nrs);
    int (*guest_x86_mode)(struct vcpu *v);
    unsigned int (*get_cpl)(struct vcpu *v);
    void (*get_segment_register)(struct vcpu *v, enum x86_segment seg,
                                 struct segment_register *reg);
    void (*set_segment_register)(struct vcpu *v, enum x86_segment seg,
                                 struct segment_register *reg);

    /* 
     * Re-set the value of CR3 that Xen runs on when handling VM exits.
     */
    void (*update_host_cr3)(struct vcpu *v);

    /*
     * Called to inform HVM layer that a guest CRn or EFER has changed.
     */
    void (*update_guest_cr)(struct vcpu *v, unsigned int cr,
                            unsigned int flags);
    void (*update_guest_efer)(struct vcpu *v);

    void (*cpuid_policy_changed)(struct vcpu *v);

    void (*fpu_leave)(struct vcpu *v);

    int  (*get_guest_pat)(struct vcpu *v, u64 *);
    int  (*set_guest_pat)(struct vcpu *v, u64);

    void (*set_tsc_offset)(struct vcpu *v, u64 offset, u64 at_tsc);

    void (*inject_event)(const struct x86_event *event);

    void (*init_hypercall_page)(void *ptr);

    bool (*event_pending)(const struct vcpu *v);
    bool (*get_pending_event)(struct vcpu *v, struct x86_event *info);
    void (*invlpg)(struct vcpu *v, unsigned long linear);

    int  (*cpu_up_prepare)(unsigned int cpu);
    void (*cpu_dead)(unsigned int cpu);

    int  (*cpu_up)(void);
    void (*cpu_down)(void);

    /* Copy up to 15 bytes from cached instruction bytes at current rIP. */
    unsigned int (*get_insn_bytes)(struct vcpu *v, uint8_t *buf);

    /* Instruction intercepts: non-void return values are X86EMUL codes. */
    void (*wbinvd_intercept)(void);
    void (*fpu_dirty_intercept)(void);
    int (*msr_read_intercept)(unsigned int msr, uint64_t *msr_content);
    int (*msr_write_intercept)(unsigned int msr, uint64_t msr_content);
    void (*handle_cd)(struct vcpu *v, unsigned long value);
    void (*set_info_guest)(struct vcpu *v);
    void (*set_rdtsc_exiting)(struct vcpu *v, bool_t);
    void (*set_descriptor_access_exiting)(struct vcpu *v, bool);

    /* Nested HVM */
    int (*nhvm_vcpu_initialise)(struct vcpu *v);
    void (*nhvm_vcpu_destroy)(struct vcpu *v);
    int (*nhvm_vcpu_reset)(struct vcpu *v);
    int (*nhvm_vcpu_vmexit_event)(struct vcpu *v, const struct x86_event *event);
    uint64_t (*nhvm_vcpu_p2m_base)(struct vcpu *v);
    bool_t (*nhvm_vmcx_guest_intercepts_event)(
        struct vcpu *v, unsigned int vector, int errcode);

    bool_t (*nhvm_vmcx_hap_enabled)(struct vcpu *v);

    enum hvm_intblk (*nhvm_intr_blocked)(struct vcpu *v);
    void (*nhvm_domain_relinquish_resources)(struct domain *d);

    /* Virtual interrupt delivery */
    void (*update_eoi_exit_bitmap)(struct vcpu *v, uint8_t vector, bool set);
    void (*process_isr)(int isr, struct vcpu *v);
    void (*deliver_posted_intr)(struct vcpu *v, u8 vector);
    void (*sync_pir_to_irr)(struct vcpu *v);
    bool (*test_pir)(const struct vcpu *v, uint8_t vector);
    void (*handle_eoi)(uint8_t vector, int isr);
    int (*pi_update_irte)(const struct vcpu *v, const struct pirq *pirq,
                          uint8_t gvec);
    void (*update_vlapic_mode)(struct vcpu *v);

    /*Walk nested p2m  */
    int (*nhvm_hap_walk_L1_p2m)(struct vcpu *v, paddr_t L2_gpa,
                                paddr_t *L1_gpa, unsigned int *page_order,
                                uint8_t *p2m_acc, struct npfec npfec);

    void (*enable_msr_interception)(struct domain *d, uint32_t msr);

    /* Alternate p2m */
    void (*altp2m_vcpu_update_p2m)(struct vcpu *v);
    void (*altp2m_vcpu_update_vmfunc_ve)(struct vcpu *v);
    bool_t (*altp2m_vcpu_emulate_ve)(struct vcpu *v);
    int (*altp2m_vcpu_emulate_vmfunc)(const struct cpu_user_regs *regs);

    /* vmtrace */
    int (*vmtrace_control)(struct vcpu *v, bool enable, bool reset);
    int (*vmtrace_output_position)(struct vcpu *v, uint64_t *pos);
    int (*vmtrace_set_option)(struct vcpu *v, uint64_t key, uint64_t value);
    int (*vmtrace_get_option)(struct vcpu *v, uint64_t key, uint64_t *value);
    int (*vmtrace_reset)(struct vcpu *v);

    uint64_t (*get_reg)(struct vcpu *v, unsigned int reg);
    void (*set_reg)(struct vcpu *v, unsigned int reg, uint64_t val);

    /*
     * Parameters and callbacks for hardware-assisted TSC scaling,
     * which are valid only when the hardware feature is available.
     */
    struct {
        /* number of bits of the fractional part of TSC scaling ratio */
        uint8_t  ratio_frac_bits;
        /* maximum-allowed TSC scaling ratio */
        uint64_t max_ratio;

        /* Architecture function to setup TSC scaling ratio */
        void (*setup)(struct vcpu *v);
    } tsc_scaling;
};

extern struct hvm_function_table hvm_funcs;
extern bool_t hvm_enabled;
extern s8 hvm_port80_allowed;

extern const struct hvm_function_table *start_svm(void);
extern const struct hvm_function_table *start_vmx(void);

int hvm_domain_initialise(struct domain *d,
                          const struct xen_domctl_createdomain *config);
void hvm_domain_relinquish_resources(struct domain *d);
void hvm_domain_destroy(struct domain *d);

int hvm_vcpu_initialise(struct vcpu *v);
void hvm_vcpu_destroy(struct vcpu *v);
void hvm_vcpu_down(struct vcpu *v);
int hvm_vcpu_cacheattr_init(struct vcpu *v);
void hvm_vcpu_cacheattr_destroy(struct vcpu *v);
void hvm_vcpu_reset_state(struct vcpu *v, uint16_t cs, uint16_t ip);

void hvm_get_guest_pat(struct vcpu *v, u64 *guest_pat);
int hvm_set_guest_pat(struct vcpu *v, u64 guest_pat);

u64 hvm_get_guest_tsc_fixed(struct vcpu *v, u64 at_tsc);

u64 hvm_scale_tsc(const struct domain *d, u64 tsc);
u64 hvm_get_tsc_scaling_ratio(u32 gtsc_khz);

void hvm_init_guest_time(struct domain *d);
void hvm_set_guest_time(struct vcpu *v, u64 guest_time);
uint64_t hvm_get_guest_time_fixed(const struct vcpu *v, uint64_t at_tsc);

int vmsi_deliver(
    struct domain *d, int vector,
    uint8_t dest, uint8_t dest_mode,
    uint8_t delivery_mode, uint8_t trig_mode);
struct hvm_pirq_dpci;
void vmsi_deliver_pirq(struct domain *d, const struct hvm_pirq_dpci *);
int hvm_girq_dest_2_vcpu_id(struct domain *d, uint8_t dest, uint8_t dest_mode);

enum hvm_intblk
hvm_interrupt_blocked(struct vcpu *v, struct hvm_intack intack);

void hvm_init_hypercall_page(struct domain *d, void *ptr);

void hvm_get_segment_register(struct vcpu *v, enum x86_segment seg,
                              struct segment_register *reg);
void hvm_set_segment_register(struct vcpu *v, enum x86_segment seg,
                              struct segment_register *reg);

void hvm_set_info_guest(struct vcpu *v);

int hvm_vmexit_cpuid(struct cpu_user_regs *regs, unsigned int inst_len);
void hvm_migrate_timers(struct vcpu *v);
void hvm_do_resume(struct vcpu *v);
void hvm_migrate_pirq(struct hvm_pirq_dpci *pirq_dpci, const struct vcpu *v);
void hvm_migrate_pirqs(struct vcpu *v);

void hvm_inject_event(const struct x86_event *event);

int hvm_event_needs_reinjection(uint8_t type, uint8_t vector);

uint8_t hvm_combine_hw_exceptions(uint8_t vec1, uint8_t vec2);

void hvm_set_rdtsc_exiting(struct domain *d, bool_t enable);

enum hvm_task_switch_reason { TSW_jmp, TSW_iret, TSW_call_or_int };
void hvm_task_switch(
    uint16_t tss_sel, enum hvm_task_switch_reason taskswitch_reason,
    int32_t errcode, unsigned int insn_len, unsigned int extra_eflags);

enum hvm_access_type {
    hvm_access_insn_fetch,
    hvm_access_none,
    hvm_access_read,
    hvm_access_write
};

bool hvm_vcpu_virtual_to_linear(
    struct vcpu *v,
    enum x86_segment seg,
    const struct segment_register *reg,
    unsigned long offset,
    unsigned int bytes,
    enum hvm_access_type access_type,
    const struct segment_register *active_cs,
    unsigned long *linear_addr);

static inline bool hvm_virtual_to_linear_addr(
    enum x86_segment seg,
    const struct segment_register *reg,
    unsigned long offset,
    unsigned int bytes,
    enum hvm_access_type access_type,
    const struct segment_register *active_cs,
    unsigned long *linear)
{
    return hvm_vcpu_virtual_to_linear(current, seg, reg, offset, bytes,
                                      access_type, active_cs, linear);
}

void *hvm_map_guest_frame_rw(unsigned long gfn, bool_t permanent,
                             bool_t *writable);
void *hvm_map_guest_frame_ro(unsigned long gfn, bool_t permanent);
void hvm_unmap_guest_frame(void *p, bool_t permanent);
void hvm_mapped_guest_frames_mark_dirty(struct domain *);

int hvm_debug_op(struct vcpu *v, int32_t op);

/* Caller should pause vcpu before calling this function */
void hvm_toggle_singlestep(struct vcpu *v);
void hvm_fast_singlestep(struct vcpu *v, uint16_t p2midx);

int hvm_hap_nested_page_fault(paddr_t gpa, unsigned long gla,
                              struct npfec npfec);

/* Check CR4/EFER values */
const char *hvm_efer_valid(const struct vcpu *v, uint64_t value,
                           signed int cr0_pg);
unsigned long hvm_cr4_guest_valid_bits(const struct domain *d);

int hvm_copy_context_and_params(struct domain *src, struct domain *dst);

int hvm_get_param(struct domain *d, uint32_t index, uint64_t *value);

#ifdef CONFIG_HVM

#define hvm_get_guest_tsc(v) hvm_get_guest_tsc_fixed(v, 0)

#define hvm_tsc_scaling_supported \
    (!!hvm_funcs.tsc_scaling.ratio_frac_bits)

#define hvm_default_tsc_scaling_ratio \
    (1ULL << hvm_funcs.tsc_scaling.ratio_frac_bits)

#define hvm_tsc_scaling_ratio(d) \
    ((d)->arch.hvm.tsc_scaling_ratio)

#define hvm_get_guest_time(v) hvm_get_guest_time_fixed(v, 0)

#define hvm_paging_enabled(v) \
    (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_PG))
#define hvm_wp_enabled(v) \
    (!!((v)->arch.hvm.guest_cr[0] & X86_CR0_WP))
#define hvm_pcid_enabled(v) \
    (!!((v)->arch.hvm.guest_cr[4] & X86_CR4_PCIDE))
#define hvm_pae_enabled(v) \
    (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PAE))
#define hvm_smep_enabled(v) \
    (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMEP))
#define hvm_smap_enabled(v) \
    (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_SMAP))
#define hvm_nx_enabled(v) \
    ((v)->arch.hvm.guest_efer & EFER_NXE)
#define hvm_pku_enabled(v) \
    (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKE))
#define hvm_pks_enabled(v) \
    (hvm_paging_enabled(v) && ((v)->arch.hvm.guest_cr[4] & X86_CR4_PKS))

/* Can we use superpages in the HAP p2m table? */
#define hap_has_1gb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_1GB))
#define hap_has_2mb (!!(hvm_funcs.hap_capabilities & HVM_HAP_SUPERPAGE_2MB))

#define hvm_long_mode_active(v) (!!((v)->arch.hvm.guest_efer & EFER_LMA))

static inline bool hvm_has_set_descriptor_access_exiting(void)
{
    return hvm_funcs.set_descriptor_access_exiting;
}

static inline void hvm_domain_creation_finished(struct domain *d)
{
    if ( hvm_funcs.domain_creation_finished )
        alternative_vcall(hvm_funcs.domain_creation_finished, d);
}

static inline int
hvm_guest_x86_mode(struct vcpu *v)
{
    ASSERT(v == current);
    return alternative_call(hvm_funcs.guest_x86_mode, v);
}

static inline void
hvm_update_host_cr3(struct vcpu *v)
{
    if ( hvm_funcs.update_host_cr3 )
        alternative_vcall(hvm_funcs.update_host_cr3, v);
}

static inline void hvm_update_guest_cr(struct vcpu *v, unsigned int cr)
{
    alternative_vcall(hvm_funcs.update_guest_cr, v, cr, 0);
}

static inline void hvm_update_guest_cr3(struct vcpu *v, bool noflush)
{
    unsigned int flags = noflush ? HVM_UPDATE_GUEST_CR3_NOFLUSH : 0;

    alternative_vcall(hvm_funcs.update_guest_cr, v, 3, flags);
}

static inline void hvm_update_guest_efer(struct vcpu *v)
{
    alternative_vcall(hvm_funcs.update_guest_efer, v);
}

static inline void hvm_cpuid_policy_changed(struct vcpu *v)
{
    alternative_vcall(hvm_funcs.cpuid_policy_changed, v);
}

static inline void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset,
                                      uint64_t at_tsc)
{
    alternative_vcall(hvm_funcs.set_tsc_offset, v, offset, at_tsc);
}

/*
 * Called to ensure than all guest-specific mappings in a tagged TLB are 
 * flushed; does *not* flush Xen's TLB entries, and on processors without a 
 * tagged TLB it will be a noop.
 */
static inline void hvm_flush_guest_tlbs(void)
{
    if ( hvm_enabled )
        hvm_asid_flush_core();
}

static inline unsigned int
hvm_get_cpl(struct vcpu *v)
{
    return alternative_call(hvm_funcs.get_cpl, v);
}

#define has_hvm_params(d) \
    ((d)->arch.hvm.params != NULL)

#define viridian_feature_mask(d) \
    (has_hvm_params(d) ? (d)->arch.hvm.params[HVM_PARAM_VIRIDIAN] : 0)

#define is_viridian_domain(d) \
    (is_hvm_domain(d) && (viridian_feature_mask(d) & HVMPV_base_freq))

#define is_viridian_vcpu(v) \
    is_viridian_domain((v)->domain)

#define has_viridian_time_ref_count(d) \
    (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_time_ref_count))

#define has_viridian_apic_assist(d) \
    (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_apic_assist))

#define has_viridian_synic(d) \
    (is_viridian_domain(d) && (viridian_feature_mask(d) & HVMPV_synic))

static inline void hvm_inject_exception(
    unsigned int vector, unsigned int type,
    unsigned int insn_len, int error_code)
{
    struct x86_event event = {
        .vector = vector,
        .type = type,
        .insn_len = insn_len,
        .error_code = error_code,
    };

    hvm_inject_event(&event);
}

static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
{
    struct x86_event event = {
        .vector = vector,
        .type = X86_EVENTTYPE_HW_EXCEPTION,
        .error_code = errcode,
    };

    hvm_inject_event(&event);
}

static inline void hvm_inject_page_fault(int errcode, unsigned long cr2)
{
    struct x86_event event = {
        .vector = X86_EXC_PF,
        .type = X86_EVENTTYPE_HW_EXCEPTION,
        .error_code = errcode,
        .cr2 = cr2,
    };

    hvm_inject_event(&event);
}

static inline bool hvm_event_pending(const struct vcpu *v)
{
    return alternative_call(hvm_funcs.event_pending, v);
}

static inline void hvm_invlpg(struct vcpu *v, unsigned long linear)
{
    alternative_vcall(hvm_funcs.invlpg, v, linear);
}

/* These bits in CR4 are owned by the host. */
#define HVM_CR4_HOST_MASK (mmu_cr4_features & \
    (X86_CR4_VMXE | X86_CR4_PAE | X86_CR4_MCE))

/* These exceptions must always be intercepted. */
#define HVM_TRAP_MASK ((1U << X86_EXC_DB)           | \
                       (1U << X86_EXC_AC) | \
                       (1U << X86_EXC_MC))

/* Called in boot/resume paths.  Must cope with no HVM support. */
static inline int hvm_cpu_up(void)
{
    if ( hvm_funcs.cpu_up )
        return alternative_call(hvm_funcs.cpu_up);

    return 0;
}

/* Called in shutdown paths.  Must cope with no HVM support. */
static inline void hvm_cpu_down(void)
{
    if ( hvm_funcs.cpu_down )
        alternative_vcall(hvm_funcs.cpu_down);
}

static inline unsigned int hvm_get_insn_bytes(struct vcpu *v, uint8_t *buf)
{
    return (hvm_funcs.get_insn_bytes
            ? alternative_call(hvm_funcs.get_insn_bytes, v, buf) : 0);
}

static inline void hvm_invalidate_regs_fields(struct cpu_user_regs *regs)
{
#ifndef NDEBUG
    regs->error_code = 0xbeef;
    regs->entry_vector = 0xbeef;
    regs->saved_upcall_mask = 0xbf;
    regs->cs = 0xbeef;
    regs->ss = 0xbeef;
    regs->ds = 0xbeef;
    regs->es = 0xbeef;
    regs->fs = 0xbeef;
    regs->gs = 0xbeef;
#endif
}

/*
 * Nested HVM
 */

/* inject vmexit into l1 guest. l1 guest will see a VMEXIT due to
 * 'trapnr' exception.
 */ 
static inline int nhvm_vcpu_vmexit_event(
    struct vcpu *v, const struct x86_event *event)
{
    return alternative_call(hvm_funcs.nhvm_vcpu_vmexit_event, v, event);
}

/* returns l1 guest's cr3 that points to the page table used to
 * translate l2 guest physical address to l1 guest physical address.
 */
static inline uint64_t nhvm_vcpu_p2m_base(struct vcpu *v)
{
    return alternative_call(hvm_funcs.nhvm_vcpu_p2m_base, v);
}

/* returns true, when l1 guest intercepts the specified trap */
static inline bool_t nhvm_vmcx_guest_intercepts_event(
    struct vcpu *v, unsigned int vector, int errcode)
{
    return alternative_call(hvm_funcs.nhvm_vmcx_guest_intercepts_event, v,
                            vector, errcode);
}

/* returns true when l1 guest wants to use hap to run l2 guest */
static inline bool_t nhvm_vmcx_hap_enabled(struct vcpu *v)
{
    return alternative_call(hvm_funcs.nhvm_vmcx_hap_enabled, v);
}

/* interrupt */
static inline enum hvm_intblk nhvm_interrupt_blocked(struct vcpu *v)
{
    return alternative_call(hvm_funcs.nhvm_intr_blocked, v);
}

static inline int nhvm_hap_walk_L1_p2m(
    struct vcpu *v, paddr_t L2_gpa, paddr_t *L1_gpa, unsigned int *page_order,
    uint8_t *p2m_acc, struct npfec npfec)
{
    return alternative_call(hvm_funcs.nhvm_hap_walk_L1_p2m,
        v, L2_gpa, L1_gpa, page_order, p2m_acc, npfec);
}

static inline void hvm_enable_msr_interception(struct domain *d, uint32_t msr)
{
    alternative_vcall(hvm_funcs.enable_msr_interception, d, msr);
}

static inline bool hvm_is_singlestep_supported(void)
{
    return hvm_funcs.singlestep_supported;
}

static inline bool hvm_hap_supported(void)
{
    return hvm_funcs.hap_supported;
}

/* returns true if hardware supports alternate p2m's */
static inline bool hvm_altp2m_supported(void)
{
    return hvm_funcs.altp2m_supported;
}

/* updates the current hardware p2m */
static inline void altp2m_vcpu_update_p2m(struct vcpu *v)
{
    if ( hvm_funcs.altp2m_vcpu_update_p2m )
        alternative_vcall(hvm_funcs.altp2m_vcpu_update_p2m, v);
}

/* updates VMCS fields related to VMFUNC and #VE */
static inline void altp2m_vcpu_update_vmfunc_ve(struct vcpu *v)
{
    if ( hvm_funcs.altp2m_vcpu_update_vmfunc_ve )
        alternative_vcall(hvm_funcs.altp2m_vcpu_update_vmfunc_ve, v);
}

/* emulates #VE */
static inline bool altp2m_vcpu_emulate_ve(struct vcpu *v)
{
    if ( hvm_funcs.altp2m_vcpu_emulate_ve )
    {
        alternative_vcall(hvm_funcs.altp2m_vcpu_emulate_ve, v);
        return true;
    }
    return false;
}

static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
    if ( hvm_funcs.vmtrace_control )
        return alternative_call(hvm_funcs.vmtrace_control, v, enable, reset);

    return -EOPNOTSUPP;
}

/* Returns -errno, or a boolean of whether tracing is currently active. */
static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
{
    if ( hvm_funcs.vmtrace_output_position )
        return alternative_call(hvm_funcs.vmtrace_output_position, v, pos);

    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_set_option(
    struct vcpu *v, uint64_t key, uint64_t value)
{
    if ( hvm_funcs.vmtrace_set_option )
        return alternative_call(hvm_funcs.vmtrace_set_option, v, key, value);

    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_get_option(
    struct vcpu *v, uint64_t key, uint64_t *value)
{
    if ( hvm_funcs.vmtrace_get_option )
        return alternative_call(hvm_funcs.vmtrace_get_option, v, key, value);

    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_reset(struct vcpu *v)
{
    if ( hvm_funcs.vmtrace_reset )
        return alternative_call(hvm_funcs.vmtrace_reset, v);

    return -EOPNOTSUPP;
}

/*
 * Accessors for registers which have per-guest-type or per-vendor locations
 * (e.g. VMCS, msr load/save lists, VMCB, VMLOAD lazy, etc).
 *
 * The caller is responsible for all auditing - these accessors do not fail,
 * but do use domain_crash() for usage errors.
 *
 * Must cope with being called in non-current context.
 */
uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg);
void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val);

/*
 * This must be defined as a macro instead of an inline function,
 * because it uses 'struct vcpu' and 'struct domain' which have
 * not been defined yet.
 */
#define arch_vcpu_block(v) ({                                   \
    struct vcpu *v_ = (v);                                      \
    struct domain *d_ = v_->domain;                             \
    if ( is_hvm_domain(d_) && d_->arch.hvm.pi_ops.vcpu_block )  \
        d_->arch.hvm.pi_ops.vcpu_block(v_);                     \
})

static inline void hvm_get_nonreg_state(struct vcpu *v,
                                        struct hvm_vcpu_nonreg_state *nrs)
{
    if ( hvm_funcs.get_nonreg_state )
        alternative_vcall(hvm_funcs.get_nonreg_state, v, nrs);
}

static inline void hvm_set_nonreg_state(struct vcpu *v,
                                        struct hvm_vcpu_nonreg_state *nrs)
{
    if ( hvm_funcs.set_nonreg_state )
        alternative_vcall(hvm_funcs.set_nonreg_state, v, nrs);
}

static inline int hvm_pi_update_irte(const struct vcpu *v,
                                     const struct pirq *pirq, uint8_t gvec)
{
    return alternative_call(hvm_funcs.pi_update_irte, v, pirq, gvec);
}

static inline void hvm_update_vlapic_mode(struct vcpu *v)
{
    if ( hvm_funcs.update_vlapic_mode )
        alternative_vcall(hvm_funcs.update_vlapic_mode, v);
}

#else  /* CONFIG_HVM */

#define hvm_enabled false

/*
 * List of inline functions above, of which only declarations are
 * needed because DCE will kick in.
 */
int hvm_guest_x86_mode(struct vcpu *v);
void hvm_cpuid_policy_changed(struct vcpu *v);
void hvm_set_tsc_offset(struct vcpu *v, uint64_t offset, uint64_t at_tsc);

/* End of prototype list */

/* Called by code in other header  */
static inline bool hvm_is_singlestep_supported(void)
{
    return false;
}

static inline bool hvm_hap_supported(void)
{
    return false;
}

static inline bool nhvm_vmcx_hap_enabled(const struct vcpu *v)
{
    ASSERT_UNREACHABLE();
    return false;
}


/* Called by common code */
static inline int hvm_cpu_up(void)
{
    return 0;
}

static inline void hvm_cpu_down(void) {}

static inline void hvm_flush_guest_tlbs(void) {}

static inline void hvm_invlpg(const struct vcpu *v, unsigned long linear)
{
    ASSERT_UNREACHABLE();
}

static inline void hvm_domain_creation_finished(struct domain *d)
{
    ASSERT_UNREACHABLE();
}

/*
 * Shadow code needs further cleanup to eliminate some HVM-only paths. For
 * now provide the stubs here but assert they will never be reached.
 */
static inline void hvm_update_host_cr3(const struct vcpu *v)
{
    ASSERT_UNREACHABLE();
}

static inline void hvm_update_guest_cr3(const struct vcpu *v, bool noflush)
{
    ASSERT_UNREACHABLE();
}

static inline unsigned int hvm_get_cpl(const struct vcpu *v)
{
    ASSERT_UNREACHABLE();
    return -1;
}

static inline bool hvm_event_pending(const struct vcpu *v)
{
    return false;
}

static inline void hvm_inject_hw_exception(unsigned int vector, int errcode)
{
    ASSERT_UNREACHABLE();
}

static inline bool hvm_has_set_descriptor_access_exiting(void)
{
    return false;
}

static inline int hvm_vmtrace_control(struct vcpu *v, bool enable, bool reset)
{
    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_output_position(struct vcpu *v, uint64_t *pos)
{
    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_set_option(
    struct vcpu *v, uint64_t key, uint64_t value)
{
    return -EOPNOTSUPP;
}

static inline int hvm_vmtrace_get_option(
    struct vcpu *v, uint64_t key, uint64_t *value)
{
    return -EOPNOTSUPP;
}

static inline uint64_t hvm_get_reg(struct vcpu *v, unsigned int reg)
{
    ASSERT_UNREACHABLE();
    return 0;
}
static inline void hvm_set_reg(struct vcpu *v, unsigned int reg, uint64_t val)
{
    ASSERT_UNREACHABLE();
}

#define is_viridian_domain(d) ((void)(d), false)
#define is_viridian_vcpu(v) ((void)(v), false)
#define has_viridian_time_ref_count(d) ((void)(d), false)
#define hvm_long_mode_active(v) ((void)(v), false)
#define hvm_get_guest_time(v) ((void)(v), 0)

#define hvm_tsc_scaling_supported false
#define hap_has_1gb false
#define hap_has_2mb false

#define hvm_paging_enabled(v) ((void)(v), false)
#define hvm_wp_enabled(v) ((void)(v), false)
#define hvm_pcid_enabled(v) ((void)(v), false)
#define hvm_pae_enabled(v) ((void)(v), false)
#define hvm_smep_enabled(v) ((void)(v), false)
#define hvm_smap_enabled(v) ((void)(v), false)
#define hvm_nx_enabled(v) ((void)(v), false)
#define hvm_pku_enabled(v) ((void)(v), false)
#define hvm_pks_enabled(v) ((void)(v), false)

#define arch_vcpu_block(v) ((void)(v))

#endif  /* CONFIG_HVM */

#endif /* __ASM_X86_HVM_HVM_H__ */

/*
 * Local variables:
 * mode: C
 * c-file-style: "BSD"
 * c-basic-offset: 4
 * tab-width: 4
 * indent-tabs-mode: nil
 * End:
 */