summaryrefslogtreecommitdiff
path: root/include/VBox/vmm/hm_vmx.h
blob: a84b01cab013348cc6c94e265699c48016390c26 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
844
845
846
847
848
849
850
851
852
853
854
855
856
857
858
859
860
861
862
863
864
865
866
867
868
869
870
871
872
873
874
875
876
877
878
879
880
881
882
883
884
885
886
887
888
889
890
891
892
893
894
895
896
897
898
899
900
901
902
903
904
905
906
907
908
909
910
911
912
913
914
915
916
917
918
919
920
921
922
923
924
925
926
927
928
929
930
931
932
933
934
935
936
937
938
939
940
941
942
943
944
945
946
947
948
949
950
951
952
953
954
955
956
957
958
959
960
961
962
963
964
965
966
967
968
969
970
971
972
973
974
975
976
977
978
979
980
981
982
983
984
985
986
987
988
989
990
991
992
993
994
995
996
997
998
999
1000
1001
1002
1003
1004
1005
1006
1007
1008
1009
1010
1011
1012
1013
1014
1015
1016
1017
1018
1019
1020
1021
1022
1023
1024
1025
1026
1027
1028
1029
1030
1031
1032
1033
1034
1035
1036
1037
1038
1039
1040
1041
1042
1043
1044
1045
1046
1047
1048
1049
1050
1051
1052
1053
1054
1055
1056
1057
1058
1059
1060
1061
1062
1063
1064
1065
1066
1067
1068
1069
1070
1071
1072
1073
1074
1075
1076
1077
1078
1079
1080
1081
1082
1083
1084
1085
1086
1087
1088
1089
1090
1091
1092
1093
1094
1095
1096
1097
1098
1099
1100
1101
1102
1103
1104
1105
1106
1107
1108
1109
1110
1111
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121
1122
1123
1124
1125
1126
1127
1128
1129
1130
1131
1132
1133
1134
1135
1136
1137
1138
1139
1140
1141
1142
1143
1144
1145
1146
1147
1148
1149
1150
1151
1152
1153
1154
1155
1156
1157
1158
1159
1160
1161
1162
1163
1164
1165
1166
1167
1168
1169
1170
1171
1172
1173
1174
1175
1176
1177
1178
1179
1180
1181
1182
1183
1184
1185
1186
1187
1188
1189
1190
1191
1192
1193
1194
1195
1196
1197
1198
1199
1200
1201
1202
1203
1204
1205
1206
1207
1208
1209
1210
1211
1212
1213
1214
1215
1216
1217
1218
1219
1220
1221
1222
1223
1224
1225
1226
1227
1228
1229
1230
1231
1232
1233
1234
1235
1236
1237
1238
1239
1240
1241
1242
1243
1244
1245
1246
1247
1248
1249
1250
1251
1252
1253
1254
1255
1256
1257
1258
1259
1260
1261
1262
1263
1264
1265
1266
1267
1268
1269
1270
1271
1272
1273
1274
1275
1276
1277
1278
1279
1280
1281
1282
1283
1284
1285
1286
1287
1288
1289
1290
1291
1292
1293
1294
1295
1296
1297
1298
1299
1300
1301
1302
1303
1304
1305
1306
1307
1308
1309
1310
1311
1312
1313
1314
1315
1316
1317
1318
1319
1320
1321
1322
1323
1324
1325
1326
1327
1328
1329
1330
1331
1332
1333
1334
1335
1336
1337
1338
1339
1340
1341
1342
1343
1344
1345
1346
1347
1348
1349
1350
1351
1352
1353
1354
1355
1356
1357
1358
1359
1360
1361
1362
1363
1364
1365
1366
1367
1368
1369
1370
1371
1372
1373
1374
1375
1376
1377
1378
1379
1380
1381
1382
1383
1384
1385
1386
1387
1388
1389
1390
1391
1392
1393
1394
1395
1396
1397
1398
1399
1400
1401
1402
1403
1404
1405
1406
1407
1408
1409
1410
1411
1412
1413
1414
1415
1416
1417
1418
1419
1420
1421
1422
1423
1424
1425
1426
1427
1428
1429
1430
1431
1432
1433
1434
1435
1436
1437
1438
1439
1440
1441
1442
1443
1444
1445
1446
1447
1448
1449
1450
1451
1452
1453
1454
1455
1456
1457
1458
1459
1460
1461
1462
1463
1464
1465
1466
1467
1468
1469
1470
1471
1472
1473
1474
1475
1476
1477
1478
1479
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493
1494
1495
1496
1497
1498
1499
1500
1501
1502
1503
1504
1505
1506
1507
1508
1509
1510
1511
1512
1513
1514
1515
1516
1517
1518
1519
1520
1521
1522
1523
1524
1525
1526
1527
1528
1529
1530
1531
1532
1533
1534
1535
1536
1537
1538
1539
1540
1541
1542
1543
1544
1545
1546
1547
1548
1549
1550
1551
1552
1553
1554
1555
1556
1557
1558
1559
1560
1561
1562
1563
1564
1565
1566
1567
1568
1569
1570
1571
1572
1573
1574
1575
1576
1577
1578
1579
1580
1581
1582
1583
1584
1585
1586
1587
1588
1589
1590
1591
1592
1593
1594
1595
1596
1597
1598
1599
1600
1601
1602
1603
1604
1605
1606
1607
1608
1609
1610
1611
1612
1613
1614
1615
1616
1617
1618
1619
1620
1621
1622
1623
1624
1625
1626
1627
1628
1629
1630
1631
1632
1633
1634
1635
1636
1637
1638
1639
1640
1641
1642
1643
1644
1645
1646
1647
1648
1649
1650
1651
1652
1653
1654
1655
1656
1657
1658
1659
1660
1661
1662
1663
1664
1665
1666
1667
1668
1669
1670
1671
1672
1673
1674
1675
1676
1677
1678
1679
1680
1681
1682
1683
1684
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705
1706
1707
1708
1709
1710
1711
1712
1713
1714
1715
1716
1717
1718
1719
1720
1721
1722
1723
1724
1725
1726
1727
1728
1729
1730
1731
1732
1733
1734
1735
1736
1737
1738
1739
1740
1741
1742
1743
1744
1745
1746
1747
1748
1749
1750
1751
1752
1753
1754
1755
1756
1757
1758
1759
1760
1761
1762
1763
1764
1765
1766
1767
1768
1769
1770
1771
1772
1773
1774
1775
1776
1777
1778
1779
1780
1781
1782
1783
1784
1785
1786
1787
1788
1789
1790
1791
1792
1793
1794
1795
1796
1797
1798
1799
1800
1801
1802
1803
1804
1805
1806
1807
1808
1809
1810
1811
1812
1813
1814
1815
1816
1817
1818
1819
1820
1821
1822
1823
1824
1825
1826
1827
1828
1829
1830
1831
1832
1833
1834
1835
1836
1837
1838
1839
1840
1841
1842
1843
1844
1845
1846
1847
1848
1849
1850
1851
1852
1853
1854
1855
1856
1857
1858
1859
1860
1861
1862
1863
1864
1865
1866
1867
1868
1869
1870
1871
1872
1873
1874
1875
1876
1877
1878
1879
1880
1881
1882
1883
1884
1885
1886
1887
1888
1889
1890
1891
1892
1893
1894
1895
1896
1897
1898
1899
1900
1901
1902
1903
1904
1905
1906
1907
1908
1909
1910
1911
1912
1913
1914
1915
1916
1917
1918
1919
1920
1921
1922
1923
1924
1925
1926
1927
1928
1929
1930
1931
1932
1933
1934
1935
1936
1937
1938
1939
1940
1941
1942
1943
1944
1945
1946
1947
1948
1949
1950
1951
1952
1953
1954
1955
1956
1957
1958
1959
1960
1961
1962
1963
1964
1965
1966
1967
1968
1969
1970
1971
1972
1973
1974
1975
1976
1977
1978
1979
1980
1981
1982
1983
1984
1985
1986
1987
1988
1989
1990
1991
1992
1993
1994
1995
1996
1997
1998
1999
2000
2001
2002
2003
2004
2005
2006
2007
2008
2009
2010
2011
2012
2013
2014
2015
2016
2017
2018
2019
2020
2021
2022
2023
2024
2025
2026
2027
2028
2029
2030
2031
2032
2033
2034
2035
2036
2037
2038
2039
2040
2041
2042
2043
2044
2045
2046
2047
2048
2049
2050
2051
2052
2053
2054
2055
2056
2057
2058
2059
2060
2061
2062
2063
2064
2065
2066
2067
2068
2069
2070
2071
2072
2073
2074
2075
2076
2077
2078
2079
2080
2081
2082
2083
2084
2085
2086
2087
2088
2089
2090
2091
2092
2093
2094
2095
2096
2097
2098
2099
2100
2101
2102
2103
2104
2105
2106
2107
2108
2109
2110
2111
2112
2113
2114
2115
2116
2117
2118
2119
2120
2121
2122
2123
2124
2125
2126
2127
2128
2129
2130
2131
2132
2133
2134
2135
2136
2137
2138
2139
2140
2141
2142
2143
2144
2145
2146
2147
2148
2149
2150
2151
2152
2153
2154
2155
2156
2157
2158
2159
2160
2161
2162
2163
2164
2165
2166
2167
2168
2169
2170
2171
2172
2173
2174
2175
2176
2177
2178
2179
2180
2181
2182
2183
2184
2185
2186
2187
2188
2189
2190
2191
2192
2193
2194
2195
2196
2197
2198
2199
2200
2201
2202
2203
2204
2205
2206
2207
2208
2209
2210
2211
2212
2213
2214
2215
2216
2217
2218
2219
2220
2221
2222
2223
2224
2225
2226
2227
2228
2229
2230
2231
2232
2233
2234
2235
2236
2237
2238
2239
2240
2241
2242
2243
2244
2245
2246
2247
2248
2249
2250
2251
2252
2253
2254
2255
2256
2257
2258
2259
2260
2261
2262
2263
2264
2265
2266
2267
2268
2269
2270
2271
2272
2273
2274
2275
2276
2277
2278
2279
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293
2294
2295
2296
2297
2298
2299
2300
2301
2302
2303
2304
2305
2306
2307
2308
2309
2310
2311
2312
2313
2314
2315
2316
2317
2318
2319
2320
2321
2322
2323
2324
2325
2326
2327
2328
2329
2330
2331
2332
2333
2334
2335
2336
2337
2338
2339
2340
2341
2342
2343
/** @file
 * HM - VMX Structures and Definitions. (VMM)
 */

/*
 * Copyright (C) 2006-2013 Oracle Corporation
 *
 * This file is part of VirtualBox Open Source Edition (OSE), as
 * available from http://www.virtualbox.org. This file is free software;
 * you can redistribute it and/or modify it under the terms of the GNU
 * General Public License (GPL) as published by the Free Software
 * Foundation, in version 2 as it comes in the "COPYING" file of the
 * VirtualBox OSE distribution. VirtualBox OSE is distributed in the
 * hope that it will be useful, but WITHOUT ANY WARRANTY of any kind.
 *
 * The contents of this file may alternatively be used under the terms
 * of the Common Development and Distribution License Version 1.0
 * (CDDL) only, as it comes in the "COPYING.CDDL" file of the
 * VirtualBox OSE distribution, in which case the provisions of the
 * CDDL are applicable instead of those of the GPL.
 *
 * You may elect to license modified versions of this file under the
 * terms and conditions of either the GPL or the CDDL or both.
 */

#ifndef ___VBox_vmm_vmx_h
#define ___VBox_vmm_vmx_h

#include <VBox/types.h>
#include <VBox/err.h>
#include <iprt/x86.h>
#include <iprt/assert.h>

/* In Visual C++ versions prior to 2012, the vmx intrinsics are only available
   when targeting AMD64. */
#if RT_INLINE_ASM_USES_INTRIN >= 16 && defined(RT_ARCH_AMD64)
# include <intrin.h>
/* We always want them as intrinsics, no functions. */
# pragma intrinsic(__vmx_on)
# pragma intrinsic(__vmx_off)
# pragma intrinsic(__vmx_vmclear)
# pragma intrinsic(__vmx_vmptrld)
# pragma intrinsic(__vmx_vmread)
# pragma intrinsic(__vmx_vmwrite)
# define VMX_USE_MSC_INTRINSICS 1
#else
# define VMX_USE_MSC_INTRINSICS 0
#endif


/** @defgroup grp_vmx   vmx Types and Definitions
 * @ingroup grp_hm
 * @{
 */

/** @def HMVMXCPU_GST_SET_UPDATED
 * Sets a guest-state-updated flag.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 * @param   fFlag   The flag to set.
 */
#define HMVMXCPU_GST_SET_UPDATED(pVCpu, fFlag)        (ASMAtomicUoOrU32(&(pVCpu)->hm.s.vmx.fUpdatedGuestState, (fFlag)))

/** @def HMVMXCPU_GST_IS_SET
 * Checks if all the flags in the specified guest-state-updated set is pending.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 * @param   fFlag   The flag to check.
 */
#define HMVMXCPU_GST_IS_SET(pVCpu, fFlag)             ((ASMAtomicUoReadU32(&(pVCpu)->hm.s.vmx.fUpdatedGuestState) & (fFlag)) == (fFlag))

/** @def HMVMXCPU_GST_IS_UPDATED
 * Checks if one or more of the flags in the specified guest-state-updated set
 * is updated.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 * @param   fFlags  The flags to check for.
 */
#define HMVMXCPU_GST_IS_UPDATED(pVCpu, fFlags)        RT_BOOL(ASMAtomicUoReadU32(&(pVCpu)->hm.s.vmx.fUpdatedGuestState) & (fFlags))

/** @def HMVMXCPU_GST_RESET_TO
 * Resets the guest-state-updated flags to the specified value.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 * @param   fFlags  The new value.
 */
#define HMVMXCPU_GST_RESET_TO(pVCpu, fFlags)          (ASMAtomicUoWriteU32(&(pVCpu)->hm.s.vmx.fUpdatedGuestState, (fFlags)))

/** @def HMVMXCPU_GST_VALUE
 * Returns the current guest-state-updated flags value.
 *
 * @param   pVCpu   Pointer to the VMCPU.
 */
#define HMVMXCPU_GST_VALUE(pVCpu)                     (ASMAtomicUoReadU32(&(pVCpu)->hm.s.vmx.fUpdatedGuestState))

/** @name Host-state restoration flags.
 * @{
 */
/* If you change these values don't forget to update the assembly defines as well! */
#define VMX_RESTORE_HOST_SEL_DS               RT_BIT(0)
#define VMX_RESTORE_HOST_SEL_ES               RT_BIT(1)
#define VMX_RESTORE_HOST_SEL_FS               RT_BIT(2)
#define VMX_RESTORE_HOST_SEL_GS               RT_BIT(3)
#define VMX_RESTORE_HOST_SEL_TR               RT_BIT(4)
#define VMX_RESTORE_HOST_GDTR                 RT_BIT(5)
#define VMX_RESTORE_HOST_IDTR                 RT_BIT(6)
#define VMX_RESTORE_HOST_REQUIRED             RT_BIT(7)
/** @} */

/**
 * Host-state restoration structure.
 * This holds host-state fields that require manual restoration.
 * Assembly version found in hm_vmx.mac (should be automatically verified).
 */
typedef struct VMXRESTOREHOST
{
    RTSEL       uHostSelDS;     /* 0x00 */
    RTSEL       uHostSelES;     /* 0x02 */
    RTSEL       uHostSelFS;     /* 0x04 */
    RTSEL       uHostSelGS;     /* 0x06 */
    RTSEL       uHostSelTR;     /* 0x08 */
    uint8_t     abPadding0[4];
    X86XDTR64   HostGdtr;       /**< 0x0e - should be aligned by it's 64-bit member.  */
    uint8_t     abPadding1[6];
    X86XDTR64   HostIdtr;       /**< 0x1e - should be aligned by it's 64-bit member. */
    uint64_t    uHostFSBase;    /* 0x28 */
    uint64_t    uHostGSBase;    /* 0x30 */
} VMXRESTOREHOST;
/** Pointer to VMXRESTOREHOST. */
typedef VMXRESTOREHOST *PVMXRESTOREHOST;
AssertCompileSize(X86XDTR64, 10);
AssertCompileMemberOffset(VMXRESTOREHOST, HostGdtr.uAddr, 16);
AssertCompileMemberOffset(VMXRESTOREHOST, HostIdtr.uAddr, 32);
AssertCompileMemberOffset(VMXRESTOREHOST, uHostFSBase,    40);
AssertCompileSize(VMXRESTOREHOST, 56);

/** @name VMX HM-error codes for VERR_HM_UNSUPPORTED_CPU_FEATURE_COMBO.
 *  UFC = Unsupported Feature Combination.
 * @{
 */
/** Unsupported pin-based VM-execution controls combo. */
#define VMX_UFC_CTRL_PIN_EXEC                                   0
/** Unsupported processor-based VM-execution controls combo. */
#define VMX_UFC_CTRL_PROC_EXEC                                  1
/** Unsupported pin-based VM-execution controls combo. */
#define VMX_UFC_CTRL_PROC_MOV_DRX_EXIT                          2
/** Unsupported VM-entry controls combo. */
#define VMX_UFC_CTRL_ENTRY                                      3
/** Unsupported VM-exit controls combo. */
#define VMX_UFC_CTRL_EXIT                                       4
/** MSR storage capacity of the VMCS autoload/store area is not sufficient
 *  for storing host MSRs. */
#define VMX_UFC_INSUFFICIENT_HOST_MSR_STORAGE                   5
/** MSR storage capacity of the VMCS autoload/store area is not sufficient
 *  for storing guest MSRs. */
#define VMX_UFC_INSUFFICIENT_GUEST_MSR_STORAGE                  6
/** Invalid VMCS size. */
#define VMX_UFC_INVALID_VMCS_SIZE                               7
/** @} */

/** @name VMX HM-error codes for VERR_VMX_INVALID_GUEST_STATE.
 *  IGS = Invalid Guest State.
 * @{
 */
/** An error occurred while checking invalid-guest-state. */
#define VMX_IGS_ERROR                                           0
/** The invalid guest-state checks did not find any reason why. */
#define VMX_IGS_REASON_NOT_FOUND                                1
/** CR0 fixed1 bits invalid. */
#define VMX_IGS_CR0_FIXED1                                      2
/** CR0 fixed0 bits invalid. */
#define VMX_IGS_CR0_FIXED0                                      3
/** CR0.PE and CR0.PE invalid VT-x/host combination. */
#define VMX_IGS_CR0_PG_PE_COMBO                                 4
/** CR4 fixed1 bits invalid. */
#define VMX_IGS_CR4_FIXED1                                      5
/** CR4 fixed0 bits invalid. */
#define VMX_IGS_CR4_FIXED0                                      6
/** Reserved bits in VMCS' DEBUGCTL MSR field not set to 0 when
 *  VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG is used. */
#define VMX_IGS_DEBUGCTL_MSR_RESERVED                           7
/** CR0.PG not set for long-mode when not using unrestricted guest. */
#define VMX_IGS_CR0_PG_LONGMODE                                 8
/** CR4.PAE not set for long-mode guest when not using unrestricted guest. */
#define VMX_IGS_CR4_PAE_LONGMODE                                9
/** CR4.PCIDE set for 32-bit guest. */
#define VMX_IGS_CR4_PCIDE                                       10
/** VMCS' DR7 reserved bits not set to 0. */
#define VMX_IGS_DR7_RESERVED                                    11
/** VMCS' PERF_GLOBAL MSR reserved bits not set to 0. */
#define VMX_IGS_PERF_GLOBAL_MSR_RESERVED                        12
/** VMCS' EFER MSR reserved bits not set to 0. */
#define VMX_IGS_EFER_MSR_RESERVED                               13
/** VMCS' EFER MSR.LMA does not match the IA32e mode guest control. */
#define VMX_IGS_EFER_LMA_GUEST_MODE_MISMATCH                    14
/** VMCS' EFER MSR.LMA does not match CR0.PG of the guest when not using
 *  unrestricted guest. */
#define VMX_IGS_EFER_LMA_PG_MISMATCH                            15
/** CS.Attr.P bit invalid. */
#define VMX_IGS_CS_ATTR_P_INVALID                               16
/** CS.Attr reserved bits not set to 0.  */
#define VMX_IGS_CS_ATTR_RESERVED                                17
/** CS.Attr.G bit invalid. */
#define VMX_IGS_CS_ATTR_G_INVALID                               18
/** CS is unusable. */
#define VMX_IGS_CS_ATTR_UNUSABLE                                19
/** CS and SS DPL unequal. */
#define VMX_IGS_CS_SS_ATTR_DPL_UNEQUAL                          20
/** CS and SS DPL mismatch. */
#define VMX_IGS_CS_SS_ATTR_DPL_MISMATCH                         21
/** CS Attr.Type invalid. */
#define VMX_IGS_CS_ATTR_TYPE_INVALID                            22
/** CS and SS RPL unequal. */
#define VMX_IGS_SS_CS_RPL_UNEQUAL                               23
/** SS.Attr.DPL and SS RPL unequal. */
#define VMX_IGS_SS_ATTR_DPL_RPL_UNEQUAL                         24
/** SS.Attr.DPL invalid for segment type. */
#define VMX_IGS_SS_ATTR_DPL_INVALID                             25
/** SS.Attr.Type invalid. */
#define VMX_IGS_SS_ATTR_TYPE_INVALID                            26
/** SS.Attr.P bit invalid. */
#define VMX_IGS_SS_ATTR_P_INVALID                               27
/** SS.Attr reserved bits not set to 0. */
#define VMX_IGS_SS_ATTR_RESERVED                                28
/** SS.Attr.G bit invalid. */
#define VMX_IGS_SS_ATTR_G_INVALID                               29
/** DS.Attr.A bit invalid. */
#define VMX_IGS_DS_ATTR_A_INVALID                               30
/** DS.Attr.P bit invalid. */
#define VMX_IGS_DS_ATTR_P_INVALID                               31
/** DS.Attr.DPL and DS RPL unequal. */
#define VMX_IGS_DS_ATTR_DPL_RPL_UNEQUAL                         32
/** DS.Attr reserved bits not set to 0. */
#define VMX_IGS_DS_ATTR_RESERVED                                33
/** DS.Attr.G bit invalid. */
#define VMX_IGS_DS_ATTR_G_INVALID                               34
/** DS.Attr.Type invalid. */
#define VMX_IGS_DS_ATTR_TYPE_INVALID                            35
/** ES.Attr.A bit invalid. */
#define VMX_IGS_ES_ATTR_A_INVALID                               36
/** ES.Attr.P bit invalid. */
#define VMX_IGS_ES_ATTR_P_INVALID                               37
/** ES.Attr.DPL and DS RPL unequal. */
#define VMX_IGS_ES_ATTR_DPL_RPL_UNEQUAL                         38
/** ES.Attr reserved bits not set to 0. */
#define VMX_IGS_ES_ATTR_RESERVED                                39
/** ES.Attr.G bit invalid. */
#define VMX_IGS_ES_ATTR_G_INVALID                               40
/** ES.Attr.Type invalid. */
#define VMX_IGS_ES_ATTR_TYPE_INVALID                            41
/** FS.Attr.A bit invalid. */
#define VMX_IGS_FS_ATTR_A_INVALID                               42
/** FS.Attr.P bit invalid. */
#define VMX_IGS_FS_ATTR_P_INVALID                               43
/** FS.Attr.DPL and DS RPL unequal. */
#define VMX_IGS_FS_ATTR_DPL_RPL_UNEQUAL                         44
/** FS.Attr reserved bits not set to 0. */
#define VMX_IGS_FS_ATTR_RESERVED                                45
/** FS.Attr.G bit invalid. */
#define VMX_IGS_FS_ATTR_G_INVALID                               46
/** FS.Attr.Type invalid. */
#define VMX_IGS_FS_ATTR_TYPE_INVALID                            47
/** GS.Attr.A bit invalid. */
#define VMX_IGS_GS_ATTR_A_INVALID                               48
/** GS.Attr.P bit invalid. */
#define VMX_IGS_GS_ATTR_P_INVALID                               49
/** GS.Attr.DPL and DS RPL unequal. */
#define VMX_IGS_GS_ATTR_DPL_RPL_UNEQUAL                         50
/** GS.Attr reserved bits not set to 0. */
#define VMX_IGS_GS_ATTR_RESERVED                                51
/** GS.Attr.G bit invalid. */
#define VMX_IGS_GS_ATTR_G_INVALID                               52
/** GS.Attr.Type invalid. */
#define VMX_IGS_GS_ATTR_TYPE_INVALID                            53
/** V86 mode CS.Base invalid. */
#define VMX_IGS_V86_CS_BASE_INVALID                             54
/** V86 mode CS.Limit invalid. */
#define VMX_IGS_V86_CS_LIMIT_INVALID                            55
/** V86 mode CS.Attr invalid. */
#define VMX_IGS_V86_CS_ATTR_INVALID                             56
/** V86 mode SS.Base invalid. */
#define VMX_IGS_V86_SS_BASE_INVALID                             57
/** V86 mode SS.Limit invalid. */
#define VMX_IGS_V86_SS_LIMIT_INVALID                            59
/** V86 mode SS.Attr invalid. */
#define VMX_IGS_V86_SS_ATTR_INVALID                             59
/** V86 mode DS.Base invalid. */
#define VMX_IGS_V86_DS_BASE_INVALID                             60
/** V86 mode DS.Limit invalid. */
#define VMX_IGS_V86_DS_LIMIT_INVALID                            61
/** V86 mode DS.Attr invalid. */
#define VMX_IGS_V86_DS_ATTR_INVALID                             62
/** V86 mode ES.Base invalid. */
#define VMX_IGS_V86_ES_BASE_INVALID                             63
/** V86 mode ES.Limit invalid. */
#define VMX_IGS_V86_ES_LIMIT_INVALID                            64
/** V86 mode ES.Attr invalid. */
#define VMX_IGS_V86_ES_ATTR_INVALID                             65
/** V86 mode FS.Base invalid. */
#define VMX_IGS_V86_FS_BASE_INVALID                             66
/** V86 mode FS.Limit invalid. */
#define VMX_IGS_V86_FS_LIMIT_INVALID                            67
/** V86 mode FS.Attr invalid. */
#define VMX_IGS_V86_FS_ATTR_INVALID                             68
/** V86 mode GS.Base invalid. */
#define VMX_IGS_V86_GS_BASE_INVALID                             69
/** V86 mode GS.Limit invalid. */
#define VMX_IGS_V86_GS_LIMIT_INVALID                            70
/** V86 mode GS.Attr invalid. */
#define VMX_IGS_V86_GS_ATTR_INVALID                             71
/** Longmode CS.Base invalid. */
#define VMX_IGS_LONGMODE_CS_BASE_INVALID                        72
/** Longmode SS.Base invalid. */
#define VMX_IGS_LONGMODE_SS_BASE_INVALID                        73
/** Longmode DS.Base invalid. */
#define VMX_IGS_LONGMODE_DS_BASE_INVALID                        74
/** Longmode ES.Base invalid. */
#define VMX_IGS_LONGMODE_ES_BASE_INVALID                        75
/** SYSENTER ESP is not canonical. */
#define VMX_IGS_SYSENTER_ESP_NOT_CANONICAL                      76
/** SYSENTER EIP is not canonical. */
#define VMX_IGS_SYSENTER_EIP_NOT_CANONICAL                      77
/** PAT MSR invalid. */
#define VMX_IGS_PAT_MSR_INVALID                                 78
/** PAT MSR reserved bits not set to 0. */
#define VMX_IGS_PAT_MSR_RESERVED                                79
/** GDTR.Base is not canonical. */
#define VMX_IGS_GDTR_BASE_NOT_CANONICAL                         80
/** IDTR.Base is not canonical. */
#define VMX_IGS_IDTR_BASE_NOT_CANONICAL                         81
/** GDTR.Limit invalid. */
#define VMX_IGS_GDTR_LIMIT_INVALID                              82
/** IDTR.Limit invalid. */
#define VMX_IGS_IDTR_LIMIT_INVALID                              83
/** Longmode RIP is invalid. */
#define VMX_IGS_LONGMODE_RIP_INVALID                            84
/** RFLAGS reserved bits not set to 0. */
#define VMX_IGS_RFLAGS_RESERVED                                 85
/** RFLAGS RA1 reserved bits not set to 1. */
#define VMX_IGS_RFLAGS_RESERVED1                                86
/** RFLAGS.VM (V86 mode) invalid. */
#define VMX_IGS_RFLAGS_VM_INVALID                               87
/** RFLAGS.IF invalid. */
#define VMX_IGS_RFLAGS_IF_INVALID                               88
/** Activity state invalid. */
#define VMX_IGS_ACTIVITY_STATE_INVALID                          89
/** Activity state HLT invalid when SS.Attr.DPL is not zero. */
#define VMX_IGS_ACTIVITY_STATE_HLT_INVALID                      90
/** Activity state ACTIVE invalid when block-by-STI or MOV SS. */
#define VMX_IGS_ACTIVITY_STATE_ACTIVE_INVALID                   91
/** Activity state SIPI WAIT invalid. */
#define VMX_IGS_ACTIVITY_STATE_SIPI_WAIT_INVALID                92
/** Interruptibility state reserved bits not set to 0. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_RESERVED                 93
/** Interruptibility state cannot be block-by-STI -and- MOV SS. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_STI_MOVSS_INVALID        94
/** Interruptibility state block-by-STI invalid for EFLAGS. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_STI_EFL_INVALID          95
/** Interruptibility state invalid while trying to deliver external
 *  interrupt. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_EXT_INT_INVALID          96
/** Interruptibility state block-by-MOVSS invalid while trying to deliver an
 *  NMI. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_MOVSS_INVALID            97
/** Interruptibility state block-by-SMI invalid when CPU is not in SMM. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_SMI_INVALID              98
/** Interruptibility state block-by-SMI invalid when trying to enter SMM. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_SMI_SMM_INVALID          99
/** Interruptibilty state block-by-STI (maybe) invalid when trying to deliver
 *  an NMI. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_STI_INVALID              100
/** Interruptibility state block-by-NMI invalid when virtual-NMIs control is
 *  active. */
#define VMX_IGS_INTERRUPTIBILITY_STATE_NMI_INVALID              101
/** Pending debug exceptions reserved bits not set to 0. */
#define VMX_IGS_PENDING_DEBUG_RESERVED                          102
/** Longmode pending debug exceptions reserved bits not set to 0. */
#define VMX_IGS_LONGMODE_PENDING_DEBUG_RESERVED                 103
/** Pending debug exceptions.BS bit is not set when it should be. */
#define VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_SET                   104
/** Pending debug exceptions.BS bit is not clear when it should be. */
#define VMX_IGS_PENDING_DEBUG_XCPT_BS_NOT_CLEAR                 105
/** VMCS link pointer reserved bits not set to 0. */
#define VMX_IGS_VMCS_LINK_PTR_RESERVED                          106
/** TR cannot index into LDT, TI bit MBZ. */
#define VMX_IGS_TR_TI_INVALID                                   107
/** LDTR cannot index into LDT. TI bit MBZ. */
#define VMX_IGS_LDTR_TI_INVALID                                 108
/** TR.Base is not canonical. */
#define VMX_IGS_TR_BASE_NOT_CANONICAL                           109
/** FS.Base is not canonical. */
#define VMX_IGS_FS_BASE_NOT_CANONICAL                           110
/** GS.Base is not canonical. */
#define VMX_IGS_GS_BASE_NOT_CANONICAL                           111
/** LDTR.Base is not canonical. */
#define VMX_IGS_LDTR_BASE_NOT_CANONICAL                         112
/** TR is unusable. */
#define VMX_IGS_TR_ATTR_UNUSABLE                                113
/** TR.Attr.S bit invalid. */
#define VMX_IGS_TR_ATTR_S_INVALID                               114
/** TR is not present. */
#define VMX_IGS_TR_ATTR_P_INVALID                               115
/** TR.Attr reserved bits not set to 0. */
#define VMX_IGS_TR_ATTR_RESERVED                                116
/** TR.Attr.G bit invalid. */
#define VMX_IGS_TR_ATTR_G_INVALID                               117
/** Longmode TR.Attr.Type invalid. */
#define VMX_IGS_LONGMODE_TR_ATTR_TYPE_INVALID                   118
/** TR.Attr.Type invalid. */
#define VMX_IGS_TR_ATTR_TYPE_INVALID                            119
/** CS.Attr.S invalid. */
#define VMX_IGS_CS_ATTR_S_INVALID                               120
/** CS.Attr.DPL invalid. */
#define VMX_IGS_CS_ATTR_DPL_INVALID                             121
/** PAE PDPTE reserved bits not set to 0. */
#define VMX_IGS_PAE_PDPTE_RESERVED                              123
/** @} */

/** @name VMX VMCS-Read cache indices.
 * @{
 */
# define VMX_VMCS_GUEST_ES_BASE_CACHE_IDX                       0
# define VMX_VMCS_GUEST_CS_BASE_CACHE_IDX                       1
# define VMX_VMCS_GUEST_SS_BASE_CACHE_IDX                       2
# define VMX_VMCS_GUEST_DS_BASE_CACHE_IDX                       3
# define VMX_VMCS_GUEST_FS_BASE_CACHE_IDX                       4
# define VMX_VMCS_GUEST_GS_BASE_CACHE_IDX                       5
# define VMX_VMCS_GUEST_LDTR_BASE_CACHE_IDX                     6
# define VMX_VMCS_GUEST_TR_BASE_CACHE_IDX                       7
# define VMX_VMCS_GUEST_GDTR_BASE_CACHE_IDX                     8
# define VMX_VMCS_GUEST_IDTR_BASE_CACHE_IDX                     9
# define VMX_VMCS_GUEST_RSP_CACHE_IDX                           10
# define VMX_VMCS_GUEST_RIP_CACHE_IDX                           11
# define VMX_VMCS_GUEST_SYSENTER_ESP_CACHE_IDX                  12
# define VMX_VMCS_GUEST_SYSENTER_EIP_CACHE_IDX                  13
# define VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX               14
# define VMX_VMCS_MAX_CACHE_IDX                                 (VMX_VMCS_RO_EXIT_QUALIFICATION_CACHE_IDX + 1)
# define VMX_VMCS_GUEST_CR3_CACHE_IDX                           15
# define VMX_VMCS_MAX_NESTED_PAGING_CACHE_IDX                   (VMX_VMCS_GUEST_CR3_CACHE_IDX + 1)
/** @} */

/** @name VMX EPT paging structures
 * @{
 */

/**
 * Number of page table entries in the EPT. (PDPTE/PDE/PTE)
 */
#define EPT_PG_ENTRIES          X86_PG_PAE_ENTRIES

/**
 * EPT Page Directory Pointer Entry. Bit view.
 * @todo uint64_t isn't safe for bitfields (gcc pedantic warnings, and IIRC,
 *       this did cause trouble with one compiler/version).
 */
#pragma pack(1)
typedef struct EPTPML4EBITS
{
    /** Present bit. */
    uint64_t    u1Present       : 1;
    /** Writable bit. */
    uint64_t    u1Write         : 1;
    /** Executable bit. */
    uint64_t    u1Execute       : 1;
    /** Reserved (must be 0). */
    uint64_t    u5Reserved      : 5;
    /** Available for software. */
    uint64_t    u4Available     : 4;
    /** Physical address of the next level (PD). Restricted by maximum physical address width of the cpu. */
    uint64_t    u40PhysAddr     : 40;
    /** Availabe for software. */
    uint64_t    u12Available    : 12;
} EPTPML4EBITS;
#pragma pack()
AssertCompileSize(EPTPML4EBITS, 8);

/** Bits 12-51 - - EPT - Physical Page number of the next level. */
#define EPT_PML4E_PG_MASK       X86_PML4E_PG_MASK
/** The page shift to get the PML4 index. */
#define EPT_PML4_SHIFT          X86_PML4_SHIFT
/** The PML4 index mask (apply to a shifted page address). */
#define EPT_PML4_MASK           X86_PML4_MASK

/**
 * EPT PML4E.
 */
#pragma pack(1)
typedef union EPTPML4E
{
    /** Normal view. */
    EPTPML4EBITS    n;
    /** Unsigned integer view. */
    X86PGPAEUINT    u;
    /** 64 bit unsigned integer view. */
    uint64_t        au64[1];
    /** 32 bit unsigned integer view. */
    uint32_t        au32[2];
} EPTPML4E;
#pragma pack()
/** Pointer to a PML4 table entry. */
typedef EPTPML4E *PEPTPML4E;
/** Pointer to a const PML4 table entry. */
typedef const EPTPML4E *PCEPTPML4E;
AssertCompileSize(EPTPML4E, 8);

/**
 * EPT PML4 Table.
 */
#pragma pack(1)
typedef struct EPTPML4
{
    EPTPML4E    a[EPT_PG_ENTRIES];
} EPTPML4;
#pragma pack()
/** Pointer to an EPT PML4 Table. */
typedef EPTPML4 *PEPTPML4;
/** Pointer to a const EPT PML4 Table. */
typedef const EPTPML4 *PCEPTPML4;

/**
 * EPT Page Directory Pointer Entry. Bit view.
 */
#pragma pack(1)
typedef struct EPTPDPTEBITS
{
    /** Present bit. */
    uint64_t    u1Present       : 1;
    /** Writable bit. */
    uint64_t    u1Write         : 1;
    /** Executable bit. */
    uint64_t    u1Execute       : 1;
    /** Reserved (must be 0). */
    uint64_t    u5Reserved      : 5;
    /** Available for software. */
    uint64_t    u4Available     : 4;
    /** Physical address of the next level (PD). Restricted by maximum physical address width of the cpu. */
    uint64_t    u40PhysAddr     : 40;
    /** Availabe for software. */
    uint64_t    u12Available    : 12;
} EPTPDPTEBITS;
#pragma pack()
AssertCompileSize(EPTPDPTEBITS, 8);

/** Bits 12-51 - - EPT - Physical Page number of the next level. */
#define EPT_PDPTE_PG_MASK       X86_PDPE_PG_MASK
/** The page shift to get the PDPT index. */
#define EPT_PDPT_SHIFT          X86_PDPT_SHIFT
/** The PDPT index mask (apply to a shifted page address). */
#define EPT_PDPT_MASK           X86_PDPT_MASK_AMD64

/**
 * EPT Page Directory Pointer.
 */
#pragma pack(1)
typedef union EPTPDPTE
{
    /** Normal view. */
    EPTPDPTEBITS    n;
    /** Unsigned integer view. */
    X86PGPAEUINT    u;
    /** 64 bit unsigned integer view. */
    uint64_t        au64[1];
    /** 32 bit unsigned integer view. */
    uint32_t        au32[2];
} EPTPDPTE;
#pragma pack()
/** Pointer to an EPT Page Directory Pointer Entry. */
typedef EPTPDPTE *PEPTPDPTE;
/** Pointer to a const EPT Page Directory Pointer Entry. */
typedef const EPTPDPTE *PCEPTPDPTE;
AssertCompileSize(EPTPDPTE, 8);

/**
 * EPT Page Directory Pointer Table.
 */
#pragma pack(1)
typedef struct EPTPDPT
{
    EPTPDPTE    a[EPT_PG_ENTRIES];
} EPTPDPT;
#pragma pack()
/** Pointer to an EPT Page Directory Pointer Table. */
typedef EPTPDPT *PEPTPDPT;
/** Pointer to a const EPT Page Directory Pointer Table. */
typedef const EPTPDPT *PCEPTPDPT;


/**
 * EPT Page Directory Table Entry. Bit view.
 */
#pragma pack(1)
typedef struct EPTPDEBITS
{
    /** Present bit. */
    uint64_t    u1Present       : 1;
    /** Writable bit. */
    uint64_t    u1Write         : 1;
    /** Executable bit. */
    uint64_t    u1Execute       : 1;
    /** Reserved (must be 0). */
    uint64_t    u4Reserved      : 4;
    /** Big page (must be 0 here). */
    uint64_t    u1Size          : 1;
    /** Available for software. */
    uint64_t    u4Available     : 4;
    /** Physical address of page table. Restricted by maximum physical address width of the cpu. */
    uint64_t    u40PhysAddr     : 40;
    /** Availabe for software. */
    uint64_t    u12Available    : 12;
} EPTPDEBITS;
#pragma pack()
AssertCompileSize(EPTPDEBITS, 8);

/** Bits 12-51 - - EPT - Physical Page number of the next level. */
#define EPT_PDE_PG_MASK         X86_PDE_PAE_PG_MASK
/** The page shift to get the PD index. */
#define EPT_PD_SHIFT            X86_PD_PAE_SHIFT
/** The PD index mask (apply to a shifted page address). */
#define EPT_PD_MASK             X86_PD_PAE_MASK

/**
 * EPT 2MB Page Directory Table Entry. Bit view.
 */
#pragma pack(1)
typedef struct EPTPDE2MBITS
{
    /** Present bit. */
    uint64_t    u1Present       : 1;
    /** Writable bit. */
    uint64_t    u1Write         : 1;
    /** Executable bit. */
    uint64_t    u1Execute       : 1;
    /** EPT Table Memory Type. MBZ for non-leaf nodes. */
    uint64_t    u3EMT           : 3;
    /** Ignore PAT memory type */
    uint64_t    u1IgnorePAT     : 1;
    /** Big page (must be 1 here). */
    uint64_t    u1Size          : 1;
    /** Available for software. */
    uint64_t    u4Available     : 4;
    /** Reserved (must be 0). */
    uint64_t    u9Reserved      : 9;
    /** Physical address of the 2MB page. Restricted by maximum physical address width of the cpu. */
    uint64_t    u31PhysAddr     : 31;
    /** Availabe for software. */
    uint64_t    u12Available    : 12;
} EPTPDE2MBITS;
#pragma pack()
AssertCompileSize(EPTPDE2MBITS, 8);

/** Bits 21-51 - - EPT - Physical Page number of the next level. */
#define EPT_PDE2M_PG_MASK       X86_PDE2M_PAE_PG_MASK

/**
 * EPT Page Directory Table Entry.
 */
#pragma pack(1)
typedef union EPTPDE
{
    /** Normal view. */
    EPTPDEBITS      n;
    /** 2MB view (big). */
    EPTPDE2MBITS    b;
    /** Unsigned integer view. */
    X86PGPAEUINT    u;
    /** 64 bit unsigned integer view. */
    uint64_t        au64[1];
    /** 32 bit unsigned integer view. */
    uint32_t        au32[2];
} EPTPDE;
#pragma pack()
/** Pointer to an EPT Page Directory Table Entry. */
typedef EPTPDE *PEPTPDE;
/** Pointer to a const EPT Page Directory Table Entry. */
typedef const EPTPDE *PCEPTPDE;
AssertCompileSize(EPTPDE, 8);

/**
 * EPT Page Directory Table.
 */
#pragma pack(1)
typedef struct EPTPD
{
    EPTPDE      a[EPT_PG_ENTRIES];
} EPTPD;
#pragma pack()
/** Pointer to an EPT Page Directory Table. */
typedef EPTPD *PEPTPD;
/** Pointer to a const EPT Page Directory Table. */
typedef const EPTPD *PCEPTPD;


/**
 * EPT Page Table Entry. Bit view.
 */
#pragma pack(1)
typedef struct EPTPTEBITS
{
    /** 0 - Present bit.
     * @remark This is a convenience "misnomer".  The bit actually indicates
     *         read access and the CPU will consider an entry with any of the
     *         first three bits set as present.  Since all our valid entries
     *         will have this bit set, it can be used as a present indicator
     *         and allow some code sharing. */
    uint64_t    u1Present       : 1;
    /** 1 - Writable bit. */
    uint64_t    u1Write         : 1;
    /** 2 - Executable bit. */
    uint64_t    u1Execute       : 1;
    /** 5:3 - EPT Memory Type. MBZ for non-leaf nodes. */
    uint64_t    u3EMT           : 3;
    /** 6 - Ignore PAT memory type */
    uint64_t    u1IgnorePAT     : 1;
    /** 11:7 - Available for software. */
    uint64_t    u5Available     : 5;
    /** 51:12 - Physical address of page. Restricted by maximum physical
     *  address width of the cpu. */
    uint64_t    u40PhysAddr     : 40;
    /** 63:52 - Available for software. */
    uint64_t    u12Available    : 12;
} EPTPTEBITS;
#pragma pack()
AssertCompileSize(EPTPTEBITS, 8);

/** Bits 12-51 - - EPT - Physical Page number of the next level. */
#define EPT_PTE_PG_MASK         X86_PTE_PAE_PG_MASK
/** The page shift to get the EPT PTE index. */
#define EPT_PT_SHIFT            X86_PT_PAE_SHIFT
/** The EPT PT index mask (apply to a shifted page address). */
#define EPT_PT_MASK             X86_PT_PAE_MASK

/**
 * EPT Page Table Entry.
 */
#pragma pack(1)
typedef union EPTPTE
{
    /** Normal view. */
    EPTPTEBITS      n;
    /** Unsigned integer view. */
    X86PGPAEUINT    u;
    /** 64 bit unsigned integer view. */
    uint64_t        au64[1];
    /** 32 bit unsigned integer view. */
    uint32_t        au32[2];
} EPTPTE;
#pragma pack()
/** Pointer to an EPT Page Directory Table Entry. */
typedef EPTPTE *PEPTPTE;
/** Pointer to a const EPT Page Directory Table Entry. */
typedef const EPTPTE *PCEPTPTE;
AssertCompileSize(EPTPTE, 8);

/**
 * EPT Page Table.
 */
#pragma pack(1)
typedef struct EPTPT
{
    EPTPTE      a[EPT_PG_ENTRIES];
} EPTPT;
#pragma pack()
/** Pointer to an extended page table. */
typedef EPTPT *PEPTPT;
/** Pointer to a const extended table. */
typedef const EPTPT *PCEPTPT;

/**
 * VPID flush types.
 */
typedef enum
{
    /** Invalidate a specific page. */
    VMX_FLUSH_VPID_INDIV_ADDR                    = 0,
    /** Invalidate one context (specific VPID). */
    VMX_FLUSH_VPID_SINGLE_CONTEXT                = 1,
    /** Invalidate all contexts (all VPIDs). */
    VMX_FLUSH_VPID_ALL_CONTEXTS                  = 2,
    /** Invalidate a single VPID context retaining global mappings. */
    VMX_FLUSH_VPID_SINGLE_CONTEXT_RETAIN_GLOBALS = 3,
    /** Unsupported by VirtualBox. */
    VMX_FLUSH_VPID_NOT_SUPPORTED                 = 0xbad,
    /** Unsupported by CPU. */
    VMX_FLUSH_VPID_NONE                          = 0xb00,
    /** 32bit hackishness. */
    VMX_FLUSH_VPID_32BIT_HACK                    = 0x7fffffff
} VMX_FLUSH_VPID;

/**
 * EPT flush types.
 */
typedef enum
{
    /** Invalidate one context (specific EPT). */
    VMX_FLUSH_EPT_SINGLE_CONTEXT                = 1,
    /* Invalidate all contexts (all EPTs) */
    VMX_FLUSH_EPT_ALL_CONTEXTS                  = 2,
    /** Unsupported by VirtualBox.   */
    VMX_FLUSH_EPT_NOT_SUPPORTED                 = 0xbad,
    /** Unsupported by CPU. */
    VMX_FLUSH_EPT_NONE                          = 0xb00,
    /** 32bit hackishness. */
    VMX_FLUSH_EPT_32BIT_HACK                    = 0x7fffffff
} VMX_FLUSH_EPT;
/** @} */

/** @name MSR autoload/store elements
 * @{
 */
#pragma pack(1)
typedef struct
{
    uint32_t    u32Msr;
    uint32_t    u32Reserved;
    uint64_t    u64Value;
} VMXAUTOMSR;
#pragma pack()
/** Pointer to an MSR load/store element. */
typedef VMXAUTOMSR *PVMXAUTOMSR;
/** Pointer to a const MSR load/store element. */
typedef const VMXAUTOMSR *PCVMXAUTOMSR;
/** @} */

/** @name VMX-capability qword
 * @{
 */
#pragma pack(1)
typedef union
{
    struct
    {
        /** Bits set here -must- be set in the correpsonding VM-execution controls. */
        uint32_t        disallowed0;
        /** Bits cleared here -must- be cleared in the corresponding VM-execution
         *  controls. */
        uint32_t        allowed1;
    } n;
    uint64_t            u;
} VMX_CAPABILITY;
#pragma pack()
/** @} */

/** @name VMX MSRs.
 *  @{
 */
typedef struct VMXMSRS
{
    uint64_t                u64FeatureCtrl;
    uint64_t                u64BasicInfo;
    VMX_CAPABILITY          VmxPinCtls;
    VMX_CAPABILITY          VmxProcCtls;
    VMX_CAPABILITY          VmxProcCtls2;
    VMX_CAPABILITY          VmxExit;
    VMX_CAPABILITY          VmxEntry;
    uint64_t                u64Misc;
    uint64_t                u64Cr0Fixed0;
    uint64_t                u64Cr0Fixed1;
    uint64_t                u64Cr4Fixed0;
    uint64_t                u64Cr4Fixed1;
    uint64_t                u64VmcsEnum;
    uint64_t                u64Vmfunc;
    uint64_t                u64EptVpidCaps;
} VMXMSRS;
/** Pointer to a VMXMSRS struct. */
typedef VMXMSRS *PVMXMSRS;
/** @} */

/** @name VMX EFLAGS reserved bits.
 * @{
 */
/** And-mask for setting reserved bits to zero */
#define VMX_EFLAGS_RESERVED_0                                   (~0xffc08028)
/** Or-mask for setting reserved bits to 1 */
#define VMX_EFLAGS_RESERVED_1                                   0x00000002
/** @} */

/** @name VMX Basic Exit Reasons.
 * @{
 */
/** -1 Invalid exit code */
#define VMX_EXIT_INVALID                                        -1
/** 0 Exception or non-maskable interrupt (NMI). */
#define VMX_EXIT_XCPT_OR_NMI                                    0
/** 1 External interrupt. */
#define VMX_EXIT_EXT_INT                                        1
/** 2 Triple fault. */
#define VMX_EXIT_TRIPLE_FAULT                                   2
/** 3 INIT signal. */
#define VMX_EXIT_INIT_SIGNAL                                    3
/** 4 Start-up IPI (SIPI). */
#define VMX_EXIT_SIPI                                           4
/** 5 I/O system-management interrupt (SMI). */
#define VMX_EXIT_IO_SMI                                         5
/** 6 Other SMI. */
#define VMX_EXIT_SMI                                            6
/** 7 Interrupt window exiting. */
#define VMX_EXIT_INT_WINDOW                                     7
/** 8 NMI window exiting. */
#define VMX_EXIT_NMI_WINDOW                                     8
/** 9 Task switch. */
#define VMX_EXIT_TASK_SWITCH                                    9
/** 10 Guest software attempted to execute CPUID. */
#define VMX_EXIT_CPUID                                          10
/** 10 Guest software attempted to execute GETSEC. */
#define VMX_EXIT_GETSEC                                         11
/** 12 Guest software attempted to execute HLT. */
#define VMX_EXIT_HLT                                            12
/** 13 Guest software attempted to execute INVD. */
#define VMX_EXIT_INVD                                           13
/** 14 Guest software attempted to execute INVLPG. */
#define VMX_EXIT_INVLPG                                         14
/** 15 Guest software attempted to execute RDPMC. */
#define VMX_EXIT_RDPMC                                          15
/** 16 Guest software attempted to execute RDTSC. */
#define VMX_EXIT_RDTSC                                          16
/** 17 Guest software attempted to execute RSM in SMM. */
#define VMX_EXIT_RSM                                            17
/** 18 Guest software executed VMCALL. */
#define VMX_EXIT_VMCALL                                         18
/** 19 Guest software executed VMCLEAR. */
#define VMX_EXIT_VMCLEAR                                        19
/** 20 Guest software executed VMLAUNCH. */
#define VMX_EXIT_VMLAUNCH                                       20
/** 21 Guest software executed VMPTRLD. */
#define VMX_EXIT_VMPTRLD                                        21
/** 22 Guest software executed VMPTRST. */
#define VMX_EXIT_VMPTRST                                        22
/** 23 Guest software executed VMREAD. */
#define VMX_EXIT_VMREAD                                         23
/** 24 Guest software executed VMRESUME. */
#define VMX_EXIT_VMRESUME                                       24
/** 25 Guest software executed VMWRITE. */
#define VMX_EXIT_VMWRITE                                        25
/** 26 Guest software executed VMXOFF. */
#define VMX_EXIT_VMXOFF                                         26
/** 27 Guest software executed VMXON. */
#define VMX_EXIT_VMXON                                          27
/** 28 Control-register accesses. */
#define VMX_EXIT_MOV_CRX                                        28
/** 29 Debug-register accesses. */
#define VMX_EXIT_MOV_DRX                                        29
/** 30 I/O instruction. */
#define VMX_EXIT_IO_INSTR                                       30
/** 31 RDMSR. Guest software attempted to execute RDMSR. */
#define VMX_EXIT_RDMSR                                          31
/** 32 WRMSR. Guest software attempted to execute WRMSR. */
#define VMX_EXIT_WRMSR                                          32
/** 33 VM-entry failure due to invalid guest state. */
#define VMX_EXIT_ERR_INVALID_GUEST_STATE                        33
/** 34 VM-entry failure due to MSR loading. */
#define VMX_EXIT_ERR_MSR_LOAD                                   34
/** 36 Guest software executed MWAIT. */
#define VMX_EXIT_MWAIT                                          36
/** 37 VM exit due to monitor trap flag. */
#define VMX_EXIT_MTF                                            37
/** 39 Guest software attempted to execute MONITOR. */
#define VMX_EXIT_MONITOR                                        39
/** 40 Guest software attempted to execute PAUSE. */
#define VMX_EXIT_PAUSE                                          40
/** 41 VM-entry failure due to machine-check. */
#define VMX_EXIT_ERR_MACHINE_CHECK                              41
/** 43 TPR below threshold. Guest software executed MOV to CR8. */
#define VMX_EXIT_TPR_BELOW_THRESHOLD                            43
/** 44 APIC access. Guest software attempted to access memory at a physical address on the APIC-access page. */
#define VMX_EXIT_APIC_ACCESS                                    44
/** 46 Access to GDTR or IDTR. Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT. */
#define VMX_EXIT_XDTR_ACCESS                                    46
/** 47 Access to LDTR or TR. Guest software attempted to execute LLDT, LTR, SLDT, or STR. */
#define VMX_EXIT_TR_ACCESS                                      47
/** 48 EPT violation. An attempt to access memory with a guest-physical address was disallowed by the configuration of the EPT paging structures. */
#define VMX_EXIT_EPT_VIOLATION                                  48
/** 49 EPT misconfiguration. An attempt to access memory with a guest-physical address encountered a misconfigured EPT paging-structure entry. */
#define VMX_EXIT_EPT_MISCONFIG                                  49
/** 50 INVEPT. Guest software attempted to execute INVEPT. */
#define VMX_EXIT_INVEPT                                         50
/** 51 RDTSCP. Guest software attempted to execute RDTSCP. */
#define VMX_EXIT_RDTSCP                                         51
/** 52 VMX-preemption timer expired. The preemption timer counted down to zero. */
#define VMX_EXIT_PREEMPT_TIMER                                  52
/** 53 INVVPID. Guest software attempted to execute INVVPID. */
#define VMX_EXIT_INVVPID                                        53
/** 54 WBINVD. Guest software attempted to execute WBINVD. */
#define VMX_EXIT_WBINVD                                         54
/** 55 XSETBV. Guest software attempted to execute XSETBV. */
#define VMX_EXIT_XSETBV                                         55
/** 57 RDRAND. Guest software attempted to execute RDRAND. */
#define VMX_EXIT_RDRAND                                         57
/** 58 INVPCID. Guest software attempted to execute INVPCID. */
#define VMX_EXIT_INVPCID                                        58
/** 59 VMFUNC. Guest software attempted to execute VMFUNC. */
#define VMX_EXIT_VMFUNC                                         59
/** The maximum exit value (inclusive). */
#define VMX_EXIT_MAX                                            (VMX_EXIT_VMFUNC)
/** @} */


/** @name VM Instruction Errors
 * @{
 */
/** VMCALL executed in VMX root operation. */
#define VMX_ERROR_VMCALL                                        1
/** VMCLEAR with invalid physical address. */
#define VMX_ERROR_VMCLEAR_INVALID_PHYS_ADDR                     2
/** VMCLEAR with VMXON pointer. */
#define VMX_ERROR_VMCLEAR_INVALID_VMXON_PTR                     3
/** VMLAUNCH with non-clear VMCS. */
#define VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS                        4
/** VMRESUME with non-launched VMCS. */
#define VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS                    5
/** VMRESUME with a corrupted VMCS (indicates corruption of the current VMCS). */
#define VMX_ERROR_VMRESUME_CORRUPTED_VMCS                       6
/** VM-entry with invalid control field(s). */
#define VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS                7
/** VM-entry with invalid host-state field(s). */
#define VMX_ERROR_VMENTRY_INVALID_HOST_STATE                    8
/** VMPTRLD with invalid physical address. */
#define VMX_ERROR_VMPTRLD_INVALID_PHYS_ADDR                     9
/** VMPTRLD with VMXON pointer. */
#define VMX_ERROR_VMPTRLD_VMXON_PTR                             10
/** VMPTRLD with incorrect VMCS revision identifier. */
#define VMX_ERROR_VMPTRLD_WRONG_VMCS_REVISION                   11
/** VMREAD/VMWRITE from/to unsupported VMCS component. */
#define VMX_ERROR_VMREAD_INVALID_COMPONENT                      12
#define VMX_ERROR_VMWRITE_INVALID_COMPONENT                     VMX_ERROR_VMREAD_INVALID_COMPONENT
/** VMWRITE to read-only VMCS component. */
#define VMX_ERROR_VMWRITE_READONLY_COMPONENT                    13
/** VMXON executed in VMX root operation. */
#define VMX_ERROR_VMXON_IN_VMX_ROOT_OP                          15
/** VM entry with invalid executive-VMCS pointer. */
#define VMX_ERROR_VMENTRY_INVALID_VMCS_EXEC_PTR                 16
/** VM entry with non-launched executive VMCS. */
#define VMX_ERROR_VMENTRY_NON_LAUNCHED_EXEC_VMCS                17
/** VM entry with executive-VMCS pointer not VMXON pointer. */
#define VMX_ERROR_VMENTRY_EXEC_VMCS_PTR                         18
/** VMCALL with non-clear VMCS. */
#define VMX_ERROR_VMCALL_NON_CLEAR_VMCS                         19
/** VMCALL with invalid VM-exit control fields. */
#define VMX_ERROR_VMCALL_INVALID_VMEXIT_FIELDS                  20
/** VMCALL with incorrect MSEG revision identifier. */
#define VMX_ERROR_VMCALL_INVALID_MSEG_REVISION                  22
/** VMXOFF under dual-monitor treatment of SMIs and SMM. */
#define VMX_ERROR_VMXOFF_DUAL_MONITOR                           23
/** VMCALL with invalid SMM-monitor features. */
#define VMX_ERROR_VMCALL_INVALID_SMM_MONITOR                    24
/** VM entry with invalid VM-execution control fields in executive VMCS. */
#define VMX_ERROR_VMENTRY_INVALID_VM_EXEC_CTRL                  25
/** VM entry with events blocked by MOV SS. */
#define VMX_ERROR_VMENTRY_MOV_SS                                26
/** Invalid operand to INVEPT/INVVPID. */
#define VMX_ERROR_INVEPTVPID_INVALID_OPERAND                    28

/** @} */


/** @name VMX MSRs - Basic VMX information.
 * @{
 */
/** VMCS revision identifier used by the processor. */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_ID(a)                      ((a) & 0x7FFFFFFF)
/** Size of the VMCS. */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_SIZE(a)                    (((a) >> 32) & 0x1FFF)
/** Width of physical address used for the VMCS.
 *  0 -> limited to the available amount of physical ram
 *  1 -> within the first 4 GB
 */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_PHYS_WIDTH(a)              (((a) >> 48) & 1)
/** Whether the processor supports the dual-monitor treatment of system-management interrupts and system-management code. (always 1) */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_DUAL_MON(a)                (((a) >> 49) & 1)
/** Memory type that must be used for the VMCS. */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_MEM_TYPE(a)                (((a) >> 50) & 0xF)
/** Whether the processor provides additional information for exits due to INS/OUTS. */
#define MSR_IA32_VMX_BASIC_INFO_VMCS_INS_OUTS(a)                RT_BOOL((a) & RT_BIT_64(54))
/** @} */


/** @name VMX MSRs - Misc VMX info.
 * @{
 */
/** Relationship between the preemption timer and tsc; count down every time bit x of the tsc changes. */
#define MSR_IA32_VMX_MISC_PREEMPT_TSC_BIT(a)                    ((a) & 0x1f)
/** Whether VM-exit stores EFER.LMA into the "IA32e mode guest" field. */
#define MSR_IA32_VMX_MISC_STORE_EFERLMA_VMEXIT(a)               (((a) >> 5) & 1)
/** Activity states supported by the implementation. */
#define MSR_IA32_VMX_MISC_ACTIVITY_STATES(a)                    (((a) >> 6) & 0x7)
/** Number of CR3 target values supported by the processor. (0-256) */
#define MSR_IA32_VMX_MISC_CR3_TARGET(a)                         (((a) >> 16) & 0x1FF)
/** Maximum nr of MSRs in the VMCS. (N+1)*512. */
#define MSR_IA32_VMX_MISC_MAX_MSR(a)                            (((((a) >> 25) & 0x7) + 1) * 512)
/** Whether RDMSR can be used to read IA32_SMBASE_MSR in SMM. */
#define MSR_IA32_VMX_MISC_RDMSR_SMBASE_MSR_SMM(a)               (((a) >> 15) & 1)
/** Whether bit 2 of IA32_SMM_MONITOR_CTL can be set to 1. */
#define MSR_IA32_VMX_MISC_SMM_MONITOR_CTL_B2(a)                 (((a) >> 28) & 1)
/** Whether VMWRITE can be used to write VM-exit information fields. */
#define MSR_IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(a)                (((a) >> 29) & 1)
/** MSEG revision identifier used by the processor. */
#define MSR_IA32_VMX_MISC_MSEG_ID(a)                            ((a) >> 32)
/** @} */


/** @name VMX MSRs - VMCS enumeration field info
 * @{
 */
/** Highest field index. */
#define MSR_IA32_VMX_VMCS_ENUM_HIGHEST_INDEX(a)                 (((a) >> 1) & 0x1FF)
/** @} */


/** @name MSR_IA32_VMX_EPT_VPID_CAPS; EPT capabilities MSR
 * @{
 */
#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_X_ONLY                             RT_BIT_64(0)
#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_W_ONLY                             RT_BIT_64(1)
#define MSR_IA32_VMX_EPT_VPID_CAP_RWX_WX_ONLY                            RT_BIT_64(2)
#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_21_BITS                            RT_BIT_64(3)
#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_30_BITS                            RT_BIT_64(4)
#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_39_BITS                            RT_BIT_64(5)
#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_48_BITS                            RT_BIT_64(6)
#define MSR_IA32_VMX_EPT_VPID_CAP_GAW_57_BITS                            RT_BIT_64(7)
#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_UC                                 RT_BIT_64(8)
#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WC                                 RT_BIT_64(9)
#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WT                                 RT_BIT_64(12)
#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WP                                 RT_BIT_64(13)
#define MSR_IA32_VMX_EPT_VPID_CAP_EMT_WB                                 RT_BIT_64(14)
#define MSR_IA32_VMX_EPT_VPID_CAP_SP_21_BITS                             RT_BIT_64(16)
#define MSR_IA32_VMX_EPT_VPID_CAP_SP_30_BITS                             RT_BIT_64(17)
#define MSR_IA32_VMX_EPT_VPID_CAP_SP_39_BITS                             RT_BIT_64(18)
#define MSR_IA32_VMX_EPT_VPID_CAP_SP_48_BITS                             RT_BIT_64(19)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT                                 RT_BIT_64(20)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT                  RT_BIT_64(25)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS                    RT_BIT_64(26)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID                                RT_BIT_64(32)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_INDIV_ADDR                     RT_BIT_64(40)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT                 RT_BIT_64(41)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS                   RT_BIT_64(42)
#define MSR_IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS  RT_BIT_64(43)

/** @} */

/** @name Extended Page Table Pointer (EPTP)
 * @{
 */
/** Uncachable EPT paging structure memory type. */
#define VMX_EPT_MEMTYPE_UC                                      0
/** Write-back EPT paging structure memory type. */
#define VMX_EPT_MEMTYPE_WB                                      6
/** Shift value to get the EPT page walk length (bits 5-3) */
#define VMX_EPT_PAGE_WALK_LENGTH_SHIFT                          3
/** Mask value to get the EPT page walk length (bits 5-3) */
#define VMX_EPT_PAGE_WALK_LENGTH_MASK                           7
/** Default EPT page-walk length (1 less than the actual EPT page-walk
 *  length) */
#define VMX_EPT_PAGE_WALK_LENGTH_DEFAULT                        3
/** @} */


/** @name VMCS field encoding - 16 bits guest fields
 * @{
 */
#define VMX_VMCS16_GUEST_FIELD_VPID                             0x0
#define VMX_VMCS16_GUEST_FIELD_ES                               0x800
#define VMX_VMCS16_GUEST_FIELD_CS                               0x802
#define VMX_VMCS16_GUEST_FIELD_SS                               0x804
#define VMX_VMCS16_GUEST_FIELD_DS                               0x806
#define VMX_VMCS16_GUEST_FIELD_FS                               0x808
#define VMX_VMCS16_GUEST_FIELD_GS                               0x80A
#define VMX_VMCS16_GUEST_FIELD_LDTR                             0x80C
#define VMX_VMCS16_GUEST_FIELD_TR                               0x80E
/** @} */

/** @name VMCS field encoding - 16 bits host fields
 * @{
 */
#define VMX_VMCS16_HOST_FIELD_ES                                0xC00
#define VMX_VMCS16_HOST_FIELD_CS                                0xC02
#define VMX_VMCS16_HOST_FIELD_SS                                0xC04
#define VMX_VMCS16_HOST_FIELD_DS                                0xC06
#define VMX_VMCS16_HOST_FIELD_FS                                0xC08
#define VMX_VMCS16_HOST_FIELD_GS                                0xC0A
#define VMX_VMCS16_HOST_FIELD_TR                                0xC0C
/** @}          */

/** @name VMCS field encoding - 64 bits host fields
 * @{
 */
#define VMX_VMCS64_HOST_FIELD_PAT_FULL                          0x2C00
#define VMX_VMCS64_HOST_FIELD_PAT_HIGH                          0x2C01
#define VMX_VMCS64_HOST_FIELD_EFER_FULL                         0x2C02
#define VMX_VMCS64_HOST_FIELD_EFER_HIGH                         0x2C03
#define VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_FULL                   0x2C04      /**< MSR IA32_PERF_GLOBAL_CTRL */
#define VMX_VMCS64_HOST_PERF_GLOBAL_CTRL_HIGH                   0x2C05      /**< MSR IA32_PERF_GLOBAL_CTRL */
/** @}          */


/** @name VMCS field encoding - 64 Bits control fields
 * @{
 */
#define VMX_VMCS64_CTRL_IO_BITMAP_A_FULL                        0x2000
#define VMX_VMCS64_CTRL_IO_BITMAP_A_HIGH                        0x2001
#define VMX_VMCS64_CTRL_IO_BITMAP_B_FULL                        0x2002
#define VMX_VMCS64_CTRL_IO_BITMAP_B_HIGH                        0x2003

/* Optional */
#define VMX_VMCS64_CTRL_MSR_BITMAP_FULL                         0x2004
#define VMX_VMCS64_CTRL_MSR_BITMAP_HIGH                         0x2005

#define VMX_VMCS64_CTRL_EXIT_MSR_STORE_FULL                     0x2006
#define VMX_VMCS64_CTRL_EXIT_MSR_STORE_HIGH                     0x2007
#define VMX_VMCS64_CTRL_EXIT_MSR_LOAD_FULL                      0x2008
#define VMX_VMCS64_CTRL_EXIT_MSR_LOAD_HIGH                      0x2009

#define VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_FULL                     0x200A
#define VMX_VMCS64_CTRL_ENTRY_MSR_LOAD_HIGH                     0x200B

#define VMX_VMCS64_CTRL_EXEC_VMCS_PTR_FULL                      0x200C
#define VMX_VMCS64_CTRL_EXEC_VMCS_PTR_HIGH                      0x200D

#define VMX_VMCS64_CTRL_TSC_OFFSET_FULL                         0x2010
#define VMX_VMCS64_CTRL_TSC_OFFSET_HIGH                         0x2011

/** Optional (VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW) */
#define VMX_VMCS64_CTRL_VAPIC_PAGEADDR_FULL                     0x2012
#define VMX_VMCS64_CTRL_VAPIC_PAGEADDR_HIGH                     0x2013

/** Optional (VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC) */
#define VMX_VMCS64_CTRL_APIC_ACCESSADDR_FULL                    0x2014
#define VMX_VMCS64_CTRL_APIC_ACCESSADDR_HIGH                    0x2015

/** Optional (VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC) */
#define VMX_VMCS64_CTRL_VMFUNC_CTRLS_FULL                       0x2018
#define VMX_VMCS64_CTRL_VMFUNC_CTRLS_HIGH                       0x2019

/** Extended page table pointer. */
#define VMX_VMCS64_CTRL_EPTP_FULL                               0x201a
#define VMX_VMCS64_CTRL_EPTP_HIGH                               0x201b

/** Extended page table pointer lists. */
#define VMX_VMCS64_CTRL_EPTP_LIST_FULL                          0x2024
#define VMX_VMCS64_CTRL_EPTP_LIST_HIGH                          0x2025

/** VM-exit guest phyiscal address. */
#define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_FULL                    0x2400
#define VMX_VMCS64_EXIT_GUEST_PHYS_ADDR_HIGH                    0x2401
/** @} */


/** @name VMCS field encoding - 64 Bits guest fields
 * @{
 */
#define VMX_VMCS64_GUEST_VMCS_LINK_PTR_FULL                     0x2800
#define VMX_VMCS64_GUEST_VMCS_LINK_PTR_HIGH                     0x2801
#define VMX_VMCS64_GUEST_DEBUGCTL_FULL                          0x2802      /**< MSR IA32_DEBUGCTL */
#define VMX_VMCS64_GUEST_DEBUGCTL_HIGH                          0x2803      /**< MSR IA32_DEBUGCTL */
#define VMX_VMCS64_GUEST_PAT_FULL                               0x2804
#define VMX_VMCS64_GUEST_PAT_HIGH                               0x2805
#define VMX_VMCS64_GUEST_EFER_FULL                              0x2806
#define VMX_VMCS64_GUEST_EFER_HIGH                              0x2807
#define VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_FULL                  0x2808      /**< MSR IA32_PERF_GLOBAL_CTRL */
#define VMX_VMCS64_GUEST_PERF_GLOBAL_CTRL_HIGH                  0x2809      /**< MSR IA32_PERF_GLOBAL_CTRL */
#define VMX_VMCS64_GUEST_PDPTE0_FULL                            0x280A
#define VMX_VMCS64_GUEST_PDPTE0_HIGH                            0x280B
#define VMX_VMCS64_GUEST_PDPTE1_FULL                            0x280C
#define VMX_VMCS64_GUEST_PDPTE1_HIGH                            0x280D
#define VMX_VMCS64_GUEST_PDPTE2_FULL                            0x280E
#define VMX_VMCS64_GUEST_PDPTE2_HIGH                            0x280F
#define VMX_VMCS64_GUEST_PDPTE3_FULL                            0x2810
#define VMX_VMCS64_GUEST_PDPTE3_HIGH                            0x2811
/** @} */


/** @name VMCS field encoding - 32 Bits control fields
 * @{
 */
#define VMX_VMCS32_CTRL_PIN_EXEC                                0x4000
#define VMX_VMCS32_CTRL_PROC_EXEC                               0x4002
#define VMX_VMCS32_CTRL_EXCEPTION_BITMAP                        0x4004
#define VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MASK                    0x4006
#define VMX_VMCS32_CTRL_PAGEFAULT_ERROR_MATCH                   0x4008
#define VMX_VMCS32_CTRL_CR3_TARGET_COUNT                        0x400A
#define VMX_VMCS32_CTRL_EXIT                                    0x400C
#define VMX_VMCS32_CTRL_EXIT_MSR_STORE_COUNT                    0x400E
#define VMX_VMCS32_CTRL_EXIT_MSR_LOAD_COUNT                     0x4010
#define VMX_VMCS32_CTRL_ENTRY                                   0x4012
#define VMX_VMCS32_CTRL_ENTRY_MSR_LOAD_COUNT                    0x4014
#define VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO                 0x4016
#define VMX_VMCS32_CTRL_ENTRY_EXCEPTION_ERRCODE                 0x4018
#define VMX_VMCS32_CTRL_ENTRY_INSTR_LENGTH                      0x401A
#define VMX_VMCS32_CTRL_TPR_THRESHOLD                           0x401C
#define VMX_VMCS32_CTRL_PROC_EXEC2                              0x401E
/** @} */


/** @name VMX_VMCS_CTRL_PIN_EXEC
 * @{
 */
/** External interrupts cause VM exits if set; otherwise dispatched through the guest's IDT. */
#define VMX_VMCS_CTRL_PIN_EXEC_EXT_INT_EXIT                     RT_BIT(0)
/** Non-maskable interrupts cause VM exits if set; otherwise dispatched through the guest's IDT. */
#define VMX_VMCS_CTRL_PIN_EXEC_NMI_EXIT                         RT_BIT(3)
/** Virtual NMIs. */
#define VMX_VMCS_CTRL_PIN_EXEC_VIRTUAL_NMI                      RT_BIT(5)
/** Activate VMX preemption timer. */
#define VMX_VMCS_CTRL_PIN_EXEC_PREEMPT_TIMER                    RT_BIT(6)
/* All other bits are reserved and must be set according to MSR IA32_VMX_PROCBASED_CTLS. */
/** @} */

/** @name VMX_VMCS_CTRL_PROC_EXEC
 * @{
 */
/** VM Exit as soon as RFLAGS.IF=1 and no blocking is active. */
#define VMX_VMCS_CTRL_PROC_EXEC_INT_WINDOW_EXIT                 RT_BIT(2)
/** Use timestamp counter offset. */
#define VMX_VMCS_CTRL_PROC_EXEC_USE_TSC_OFFSETTING              RT_BIT(3)
/** VM Exit when executing the HLT instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_HLT_EXIT                        RT_BIT(7)
/** VM Exit when executing the INVLPG instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_INVLPG_EXIT                     RT_BIT(9)
/** VM Exit when executing the MWAIT instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_MWAIT_EXIT                      RT_BIT(10)
/** VM Exit when executing the RDPMC instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_RDPMC_EXIT                      RT_BIT(11)
/** VM Exit when executing the RDTSC/RDTSCP instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_RDTSC_EXIT                      RT_BIT(12)
/** VM Exit when executing the MOV to CR3 instruction. (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
#define VMX_VMCS_CTRL_PROC_EXEC_CR3_LOAD_EXIT                   RT_BIT(15)
/** VM Exit when executing the MOV from CR3 instruction. (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
#define VMX_VMCS_CTRL_PROC_EXEC_CR3_STORE_EXIT                  RT_BIT(16)
/** VM Exit on CR8 loads. */
#define VMX_VMCS_CTRL_PROC_EXEC_CR8_LOAD_EXIT                   RT_BIT(19)
/** VM Exit on CR8 stores. */
#define VMX_VMCS_CTRL_PROC_EXEC_CR8_STORE_EXIT                  RT_BIT(20)
/** Use TPR shadow. */
#define VMX_VMCS_CTRL_PROC_EXEC_USE_TPR_SHADOW                  RT_BIT(21)
/** VM Exit when virtual nmi blocking is disabled. */
#define VMX_VMCS_CTRL_PROC_EXEC_NMI_WINDOW_EXIT                 RT_BIT(22)
/** VM Exit when executing a MOV DRx instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_MOV_DR_EXIT                     RT_BIT(23)
/** VM Exit when executing IO instructions. */
#define VMX_VMCS_CTRL_PROC_EXEC_UNCOND_IO_EXIT                  RT_BIT(24)
/** Use IO bitmaps. */
#define VMX_VMCS_CTRL_PROC_EXEC_USE_IO_BITMAPS                  RT_BIT(25)
/** Monitor trap flag. */
#define VMX_VMCS_CTRL_PROC_EXEC_MONITOR_TRAP_FLAG               RT_BIT(27)
/** Use MSR bitmaps. */
#define VMX_VMCS_CTRL_PROC_EXEC_USE_MSR_BITMAPS                 RT_BIT(28)
/** VM Exit when executing the MONITOR instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_MONITOR_EXIT                    RT_BIT(29)
/** VM Exit when executing the PAUSE instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC_PAUSE_EXIT                      RT_BIT(30)
/** Determines whether the secondary processor based VM-execution controls are used. */
#define VMX_VMCS_CTRL_PROC_EXEC_USE_SECONDARY_EXEC_CTRL         RT_BIT(31)
/** @} */

/** @name VMX_VMCS_CTRL_PROC_EXEC2
 * @{
 */
/** Virtualize APIC access. */
#define VMX_VMCS_CTRL_PROC_EXEC2_VIRT_APIC                      RT_BIT(0)
/** EPT supported/enabled. */
#define VMX_VMCS_CTRL_PROC_EXEC2_EPT                            RT_BIT(1)
/** Descriptor table instructions cause VM-exits. */
#define VMX_VMCS_CTRL_PROC_EXEC2_DESCRIPTOR_TABLE_EXIT          RT_BIT(2)
/** RDTSCP supported/enabled. */
#define VMX_VMCS_CTRL_PROC_EXEC2_RDTSCP                         RT_BIT(3)
/** Virtualize x2APIC mode. */
#define VMX_VMCS_CTRL_PROC_EXEC2_VIRT_X2APIC                    RT_BIT(4)
/** VPID supported/enabled. */
#define VMX_VMCS_CTRL_PROC_EXEC2_VPID                           RT_BIT(5)
/** VM Exit when executing the WBINVD instruction. */
#define VMX_VMCS_CTRL_PROC_EXEC2_WBINVD_EXIT                    RT_BIT(6)
/** Unrestricted guest execution. */
#define VMX_VMCS_CTRL_PROC_EXEC2_UNRESTRICTED_GUEST             RT_BIT(7)
/** A specified nr of pause loops cause a VM-exit. */
#define VMX_VMCS_CTRL_PROC_EXEC2_PAUSE_LOOP_EXIT                RT_BIT(10)
/** VM Exit when executing RDRAND instructions. */
#define VMX_VMCS_CTRL_PROC_EXEC2_RDRAND_EXIT                    RT_BIT(11)
/** Enables INVPCID instructions. */
#define VMX_VMCS_CTRL_PROC_EXEC2_INVPCID                        RT_BIT(12)
/** Enables VMFUNC instructions. */
#define VMX_VMCS_CTRL_PROC_EXEC2_VMFUNC                         RT_BIT(13)
/** @} */


/** @name VMX_VMCS_CTRL_ENTRY
 * @{
 */
/** Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
#define VMX_VMCS_CTRL_ENTRY_LOAD_DEBUG                          RT_BIT(2)
/** 64 bits guest mode. Must be 0 for CPUs that don't support AMD64. */
#define VMX_VMCS_CTRL_ENTRY_IA32E_MODE_GUEST                    RT_BIT(9)
/** In SMM mode after VM-entry. */
#define VMX_VMCS_CTRL_ENTRY_ENTRY_SMM                           RT_BIT(10)
/** Disable dual treatment of SMI and SMM; must be zero for VM-entry outside of SMM. */
#define VMX_VMCS_CTRL_ENTRY_DEACTIVATE_DUALMON                  RT_BIT(11)
/** Whether the guest IA32_PERF_GLOBAL_CTRL MSR is loaded on VM entry. */
#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PERF_MSR                 RT_BIT(13)
/** Whether the guest IA32_PAT MSR is loaded on VM entry. */
#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_PAT_MSR                  RT_BIT(14)
/** Whether the guest IA32_EFER MSR is loaded on VM entry. */
#define VMX_VMCS_CTRL_ENTRY_LOAD_GUEST_EFER_MSR                 RT_BIT(15)
/** @} */


/** @name VMX_VMCS_CTRL_EXIT
 * @{
 */
/** Save guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs) */
#define VMX_VMCS_CTRL_EXIT_SAVE_DEBUG                           RT_BIT(2)
/** Return to long mode after a VM-exit. */
#define VMX_VMCS_CTRL_EXIT_HOST_ADDR_SPACE_SIZE                 RT_BIT(9)
/** Whether the IA32_PERF_GLOBAL_CTRL MSR is loaded on VM exit. */
#define VMX_VMCS_CTRL_EXIT_LOAD_PERF_MSR                        RT_BIT(12)
/** Acknowledge external interrupts with the irq controller if one caused a VM-exit. */
#define VMX_VMCS_CTRL_EXIT_ACK_EXT_INT                          RT_BIT(15)
/** Whether the guest IA32_PAT MSR is saved on VM exit. */
#define VMX_VMCS_CTRL_EXIT_SAVE_GUEST_PAT_MSR                   RT_BIT(18)
/** Whether the host IA32_PAT MSR is loaded on VM exit. */
#define VMX_VMCS_CTRL_EXIT_LOAD_HOST_PAT_MSR                    RT_BIT(19)
/** Whether the guest IA32_EFER MSR is saved on VM exit. */
#define VMX_VMCS_CTRL_EXIT_SAVE_GUEST_EFER_MSR                  RT_BIT(20)
/** Whether the host IA32_EFER MSR is loaded on VM exit. */
#define VMX_VMCS_CTRL_EXIT_LOAD_HOST_EFER_MSR                   RT_BIT(21)
/** Whether the value of the VMX preemption timer is saved on every VM exit. */
#define VMX_VMCS_CTRL_EXIT_SAVE_VMX_PREEMPT_TIMER               RT_BIT(22)
/** @} */


/** @name VMX_VMCS_CTRL_VMFUNC
 * @{
 */
/** EPTP-switching function changes the value of the EPTP to one chosen from the EPTP list. */
#define VMX_VMCS_CTRL_VMFUNC_EPTP_SWITCHING            RT_BIT_64(0)
/** @} */


/**  @name VMCS field encoding - 32 Bits read-only fields
 * @{
 */
#define VMX_VMCS32_RO_VM_INSTR_ERROR                              0x4400
#define VMX_VMCS32_RO_EXIT_REASON                                 0x4402
#define VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO                      0x4404
#define VMX_VMCS32_RO_EXIT_INTERRUPTION_ERROR_CODE                0x4406
#define VMX_VMCS32_RO_IDT_INFO                                    0x4408
#define VMX_VMCS32_RO_IDT_ERROR_CODE                              0x440A
#define VMX_VMCS32_RO_EXIT_INSTR_LENGTH                           0x440C
#define VMX_VMCS32_RO_EXIT_INSTR_INFO                             0x440E
/** @} */

/** @name VMX_VMCS32_RO_EXIT_REASON
 * @{
 */
#define VMX_EXIT_REASON_BASIC(a)                                  ((a) & 0xffff)
/** @} */

/** @name VMX_VMCS32_CTRL_ENTRY_INTERRUPTION_INFO
 * @{
 */
#define VMX_ENTRY_INTERRUPTION_INFO_IS_VALID(a)                   RT_BOOL((a) & RT_BIT(31))
#define VMX_ENTRY_INTERRUPTION_INFO_TYPE_SHIFT                    8
#define VMX_ENTRY_INTERRUPTION_INFO_TYPE(a)                       ((a >> VMX_ENTRY_INTERRUPTION_INFO_TYPE_SHIFT) & 7)
/** @} */


/** @name VMX_VMCS32_RO_EXIT_INTERRUPTION_INFO
 * @{
 */
#define VMX_EXIT_INTERRUPTION_INFO_VECTOR(a)                      ((a) & 0xff)
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT                     8
#define VMX_EXIT_INTERRUPTION_INFO_TYPE(a)                        (((a) >> VMX_EXIT_INTERRUPTION_INFO_TYPE_SHIFT) & 7)
#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID               RT_BIT(11)
#define VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_IS_VALID(a)         RT_BOOL((a) & VMX_EXIT_INTERRUPTION_INFO_ERROR_CODE_VALID)
#define VMX_EXIT_INTERRUPTION_INFO_NMI_UNBLOCK(a)                 ((a) & RT_BIT(12))
#define VMX_EXIT_INTERRUPTION_INFO_VALID                          RT_BIT(31)
#define VMX_EXIT_INTERRUPTION_INFO_IS_VALID(a)                    RT_BOOL((a) & RT_BIT(31))
/** Construct an irq event injection value from the exit interruption info value (same except that bit 12 is reserved). */
#define VMX_VMCS_CTRL_ENTRY_IRQ_INFO_FROM_EXIT_INT_INFO(a)        ((a) & ~RT_BIT(12))
/** @} */

/** @name VMX_VMCS_RO_EXIT_INTERRUPTION_INFO_TYPE
 * @{
 */
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_EXT_INT                   0
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_NMI                       2
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_HW_XCPT                   3
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_INT                    4
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_PRIV_SW_XCPT              5
#define VMX_EXIT_INTERRUPTION_INFO_TYPE_SW_XCPT                   6
/** @} */

/** @name VMX_VMCS32_RO_IDT_VECTORING_INFO
 * @{
 */
#define VMX_IDT_VECTORING_INFO_VECTOR(a)                          ((a) & 0xff)
#define VMX_IDT_VECTORING_INFO_TYPE_SHIFT                         8
#define VMX_IDT_VECTORING_INFO_TYPE(a)                            (((a) >> VMX_IDT_VECTORING_INFO_TYPE_SHIFT) & 7)
#define VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID                   RT_BIT(11)
#define VMX_IDT_VECTORING_INFO_ERROR_CODE_IS_VALID(a)             RT_BOOL((a) & VMX_IDT_VECTORING_INFO_ERROR_CODE_VALID)
#define VMX_IDT_VECTORING_INFO_VALID(a)                           ((a) & RT_BIT(31))
#define VMX_ENTRY_INT_INFO_FROM_EXIT_IDT_INFO(a)                  ((a) & ~RT_BIT(12))
/** @} */

/** @name VMX_VMCS_RO_IDT_VECTORING_INFO_TYPE
 * @{
 */
#define VMX_IDT_VECTORING_INFO_TYPE_EXT_INT                       0
#define VMX_IDT_VECTORING_INFO_TYPE_NMI                           2
#define VMX_IDT_VECTORING_INFO_TYPE_HW_XCPT                       3
#define VMX_IDT_VECTORING_INFO_TYPE_SW_INT                        4
#define VMX_IDT_VECTORING_INFO_TYPE_PRIV_SW_XCPT                  5
#define VMX_IDT_VECTORING_INFO_TYPE_SW_XCPT                       6
/** @} */


/**  @name VMCS field encoding - 32 Bits guest state fields
 * @{
 */
#define VMX_VMCS32_GUEST_ES_LIMIT                                 0x4800
#define VMX_VMCS32_GUEST_CS_LIMIT                                 0x4802
#define VMX_VMCS32_GUEST_SS_LIMIT                                 0x4804
#define VMX_VMCS32_GUEST_DS_LIMIT                                 0x4806
#define VMX_VMCS32_GUEST_FS_LIMIT                                 0x4808
#define VMX_VMCS32_GUEST_GS_LIMIT                                 0x480A
#define VMX_VMCS32_GUEST_LDTR_LIMIT                               0x480C
#define VMX_VMCS32_GUEST_TR_LIMIT                                 0x480E
#define VMX_VMCS32_GUEST_GDTR_LIMIT                               0x4810
#define VMX_VMCS32_GUEST_IDTR_LIMIT                               0x4812
#define VMX_VMCS32_GUEST_ES_ACCESS_RIGHTS                         0x4814
#define VMX_VMCS32_GUEST_CS_ACCESS_RIGHTS                         0x4816
#define VMX_VMCS32_GUEST_SS_ACCESS_RIGHTS                         0x4818
#define VMX_VMCS32_GUEST_DS_ACCESS_RIGHTS                         0x481A
#define VMX_VMCS32_GUEST_FS_ACCESS_RIGHTS                         0x481C
#define VMX_VMCS32_GUEST_GS_ACCESS_RIGHTS                         0x481E
#define VMX_VMCS32_GUEST_LDTR_ACCESS_RIGHTS                       0x4820
#define VMX_VMCS32_GUEST_TR_ACCESS_RIGHTS                         0x4822
#define VMX_VMCS32_GUEST_INTERRUPTIBILITY_STATE                   0x4824
#define VMX_VMCS32_GUEST_ACTIVITY_STATE                           0x4826
#define VMX_VMCS32_GUEST_SYSENTER_CS                              0x482A  /**< MSR IA32_SYSENTER_CS */
#define VMX_VMCS32_GUEST_PREEMPT_TIMER_VALUE                      0x482E
/** @} */


/** @name VMX_VMCS_GUEST_ACTIVITY_STATE
 * @{
 */
/** The logical processor is active. */
#define VMX_VMCS_GUEST_ACTIVITY_ACTIVE                           0x0
/** The logical processor is inactive, because executed a HLT instruction. */
#define VMX_VMCS_GUEST_ACTIVITY_HLT                              0x1
/** The logical processor is inactive, because of a triple fault or other serious error. */
#define VMX_VMCS_GUEST_ACTIVITY_SHUTDOWN                         0x2
/** The logical processor is inactive, because it's waiting for a startup-IPI */
#define VMX_VMCS_GUEST_ACTIVITY_SIPI_WAIT                        0x3
/** @} */


/** @name VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE
 * @{
 */
#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_STI         RT_BIT(0)
#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_MOVSS       RT_BIT(1)
#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_SMI         RT_BIT(2)
#define VMX_VMCS_GUEST_INTERRUPTIBILITY_STATE_BLOCK_NMI         RT_BIT(3)
/** @} */


/** @name VMCS field encoding - 32 Bits host state fields
 * @{
 */
#define VMX_VMCS32_HOST_SYSENTER_CS                             0x4C00
/** @} */

/** @name Natural width control fields
 * @{
 */
#define VMX_VMCS_CTRL_CR0_MASK                                  0x6000
#define VMX_VMCS_CTRL_CR4_MASK                                  0x6002
#define VMX_VMCS_CTRL_CR0_READ_SHADOW                           0x6004
#define VMX_VMCS_CTRL_CR4_READ_SHADOW                           0x6006
#define VMX_VMCS_CTRL_CR3_TARGET_VAL0                           0x6008
#define VMX_VMCS_CTRL_CR3_TARGET_VAL1                           0x600A
#define VMX_VMCS_CTRL_CR3_TARGET_VAL2                           0x600C
#define VMX_VMCS_CTRL_CR3_TARGET_VAL31                          0x600E
/** @} */


/** @name Natural width read-only data fields
 * @{
 */
#define VMX_VMCS_RO_EXIT_QUALIFICATION                          0x6400
#define VMX_VMCS_RO_IO_RCX                                      0x6402
#define VMX_VMCS_RO_IO_RSX                                      0x6404
#define VMX_VMCS_RO_IO_RDI                                      0x6406
#define VMX_VMCS_RO_IO_RIP                                      0x6408
#define VMX_VMCS_RO_EXIT_GUEST_LINEAR_ADDR                      0x640A
/** @} */


/** @name VMX_VMCS_RO_EXIT_QUALIFICATION
 * @{
 */
/** 0-2:  Debug register number */
#define VMX_EXIT_QUALIFICATION_DRX_REGISTER(a)                  ((a) & 7)
/** 3:    Reserved; cleared to 0. */
#define VMX_EXIT_QUALIFICATION_DRX_RES1(a)                      (((a) >> 3) & 1)
/** 4:    Direction of move (0 = write, 1 = read) */
#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION(a)                 (((a) >> 4) & 1)
/** 5-7:  Reserved; cleared to 0. */
#define VMX_EXIT_QUALIFICATION_DRX_RES2(a)                      (((a) >> 5) & 7)
/** 8-11: General purpose register number. */
#define VMX_EXIT_QUALIFICATION_DRX_GENREG(a)                    (((a) >> 8) & 0xF)
/** Rest: reserved. */
/** @} */

/** @name VMX_EXIT_QUALIFICATION_DRX_DIRECTION values
 * @{
 */
#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION_WRITE              0
#define VMX_EXIT_QUALIFICATION_DRX_DIRECTION_READ               1
/** @} */



/** @name CRx accesses
 * @{
 */
/** 0-3:   Control register number (0 for CLTS & LMSW) */
#define VMX_EXIT_QUALIFICATION_CRX_REGISTER(a)                  ((a) & 0xF)
/** 4-5:   Access type. */
#define VMX_EXIT_QUALIFICATION_CRX_ACCESS(a)                    (((a) >> 4) & 3)
/** 6:     LMSW operand type */
#define VMX_EXIT_QUALIFICATION_CRX_LMSW_OP(a)                   (((a) >> 6) & 1)
/** 7:     Reserved; cleared to 0. */
#define VMX_EXIT_QUALIFICATION_CRX_RES1(a)                      (((a) >> 7) & 1)
/** 8-11:  General purpose register number (0 for CLTS & LMSW). */
#define VMX_EXIT_QUALIFICATION_CRX_GENREG(a)                    (((a) >> 8) & 0xF)
/** 12-15: Reserved; cleared to 0. */
#define VMX_EXIT_QUALIFICATION_CRX_RES2(a)                      (((a) >> 12) & 0xF)
/** 16-31: LMSW source data (else 0). */
#define VMX_EXIT_QUALIFICATION_CRX_LMSW_DATA(a)                 (((a) >> 16) & 0xFFFF)
/** Rest: reserved. */
/** @} */

/** @name VMX_EXIT_QUALIFICATION_CRX_ACCESS
 * @{
 */
#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_WRITE                 0
#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_READ                  1
#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_CLTS                  2
#define VMX_EXIT_QUALIFICATION_CRX_ACCESS_LMSW                  3
/** @} */

/** @name VMX_EXIT_QUALIFICATION_TASK_SWITCH
 * @{
 */
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR(a)          ((a) & 0xffff)
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE(a)              (((a) >> 30) & 0x3)
/** Task switch caused by a call instruction. */
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_CALL            0
/** Task switch caused by an iret instruction. */
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IRET            1
/** Task switch caused by a jmp instruction. */
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_JMP             2
/** Task switch caused by an interrupt gate. */
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_TYPE_IDT             3
/** @} */


/** @name VMX_EXIT_EPT_VIOLATION
 * @{
 */
/** Set if the violation was caused by a data read. */
#define VMX_EXIT_QUALIFICATION_EPT_DATA_READ                    RT_BIT(0)
/** Set if the violation was caused by a data write. */
#define VMX_EXIT_QUALIFICATION_EPT_DATA_WRITE                   RT_BIT(1)
/** Set if the violation was caused by an insruction fetch. */
#define VMX_EXIT_QUALIFICATION_EPT_INSTR_FETCH                  RT_BIT(2)
/** AND of the present bit of all EPT structures. */
#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_PRESENT                RT_BIT(3)
/** AND of the write bit of all EPT structures. */
#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_WRITE                  RT_BIT(4)
/** AND of the execute bit of all EPT structures. */
#define VMX_EXIT_QUALIFICATION_EPT_ENTRY_EXECUTE                RT_BIT(5)
/** Set if the guest linear address field contains the faulting address. */
#define VMX_EXIT_QUALIFICATION_EPT_GUEST_ADDR_VALID             RT_BIT(7)
/** If bit 7 is one: (reserved otherwise)
 *  1 - violation due to physical address access.
 *  0 - violation caused by page walk or access/dirty bit updates
 */
#define VMX_EXIT_QUALIFICATION_EPT_TRANSLATED_ACCESS            RT_BIT(8)
/** @} */


/** @name VMX_EXIT_PORT_IO
 * @{
 */
/** 0-2:   IO operation width. */
#define VMX_EXIT_QUALIFICATION_IO_WIDTH(a)                      ((a) & 7)
/** 3:     IO operation direction. */
#define VMX_EXIT_QUALIFICATION_IO_DIRECTION(a)                  (((a) >> 3) & 1)
/** 4:     String IO operation (INS / OUTS). */
#define VMX_EXIT_QUALIFICATION_IO_IS_STRING(a)                  RT_BOOL((a) & RT_BIT_64(4))
/** 5:     Repeated IO operation. */
#define VMX_EXIT_QUALIFICATION_IO_IS_REP(a)                     RT_BOOL((a) & RT_BIT_64(5))
/** 6:     Operand encoding. */
#define VMX_EXIT_QUALIFICATION_IO_ENCODING(a)                   (((a) >> 6) & 1)
/** 16-31: IO Port (0-0xffff). */
#define VMX_EXIT_QUALIFICATION_IO_PORT(a)                       (((a) >> 16) & 0xffff)
/* Rest reserved. */
/** @} */

/** @name VMX_EXIT_QUALIFICATION_IO_DIRECTION
 * @{
 */
#define VMX_EXIT_QUALIFICATION_IO_DIRECTION_OUT                 0
#define VMX_EXIT_QUALIFICATION_IO_DIRECTION_IN                  1
/** @} */


/** @name VMX_EXIT_QUALIFICATION_IO_ENCODING
 * @{
 */
#define VMX_EXIT_QUALIFICATION_IO_ENCODING_DX                   0
#define VMX_EXIT_QUALIFICATION_IO_ENCODING_IMM                  1
/** @} */

/** @name VMX_EXIT_APIC_ACCESS
 * @{
 */
/** 0-11:   If the APIC-access VM exit is due to a linear access, the offset of access within the APIC page. */
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_OFFSET(a)            ((a) & 0xfff)
/** 12-15:  Access type. */
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE(a)              ((a) & 0xf000)
/* Rest reserved. */
/** @} */


/** @name VMX_EXIT_QUALIFICATION_APIC_ACCESS_TYPE; access types
 * @{
 */
/** Linear read access. */
#define VMX_APIC_ACCESS_TYPE_LINEAR_READ                        0
/** Linear write access. */
#define VMX_APIC_ACCESS_TYPE_LINEAR_WRITE                       1
/** Linear instruction fetch access. */
#define VMX_APIC_ACCESS_TYPE_LINEAR_INSTR_FETCH                 2
/** Linear read/write access during event delivery. */
#define VMX_APIC_ACCESS_TYPE_LINEAR_EVENT_DELIVERY              3
/** Physical read/write access during event delivery. */
#define VMX_APIC_ACCESS_TYPE_PHYSICAL_EVENT_DELIVERY            10
/** Physical access for an instruction fetch or during instruction execution. */
#define VMX_APIC_ACCESS_TYPE_PHYSICAL_INSTR                     15
/** @} */

/** @} */

/** @name VMCS field encoding - Natural width guest state fields
 * @{
 */
#define VMX_VMCS_GUEST_CR0                                      0x6800
#define VMX_VMCS_GUEST_CR3                                      0x6802
#define VMX_VMCS_GUEST_CR4                                      0x6804
#define VMX_VMCS_GUEST_ES_BASE                                  0x6806
#define VMX_VMCS_GUEST_CS_BASE                                  0x6808
#define VMX_VMCS_GUEST_SS_BASE                                  0x680A
#define VMX_VMCS_GUEST_DS_BASE                                  0x680C
#define VMX_VMCS_GUEST_FS_BASE                                  0x680E
#define VMX_VMCS_GUEST_GS_BASE                                  0x6810
#define VMX_VMCS_GUEST_LDTR_BASE                                0x6812
#define VMX_VMCS_GUEST_TR_BASE                                  0x6814
#define VMX_VMCS_GUEST_GDTR_BASE                                0x6816
#define VMX_VMCS_GUEST_IDTR_BASE                                0x6818
#define VMX_VMCS_GUEST_DR7                                      0x681A
#define VMX_VMCS_GUEST_RSP                                      0x681C
#define VMX_VMCS_GUEST_RIP                                      0x681E
#define VMX_VMCS_GUEST_RFLAGS                                   0x6820
#define VMX_VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS                 0x6822
#define VMX_VMCS_GUEST_SYSENTER_ESP                             0x6824  /**< MSR IA32_SYSENTER_ESP */
#define VMX_VMCS_GUEST_SYSENTER_EIP                             0x6826  /**< MSR IA32_SYSENTER_EIP */
/** @} */


/** @name VMX_VMCS_GUEST_DEBUG_EXCEPTIONS
 * @{
 */
/** Hardware breakpoint 0 was met. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B0                      RT_BIT(0)
/** Hardware breakpoint 1 was met. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B1                      RT_BIT(1)
/** Hardware breakpoint 2 was met. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B2                      RT_BIT(2)
/** Hardware breakpoint 3 was met. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_B3                      RT_BIT(3)
/** At least one data or IO breakpoint was hit. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BREAKPOINT_ENABLED      RT_BIT(12)
/** A debug exception would have been triggered by single-step execution mode. */
#define VMX_VMCS_GUEST_DEBUG_EXCEPTIONS_BS                      RT_BIT(14)
/** Bits 4-11, 13 and 15-63 are reserved. */

/** @} */

/** @name VMCS field encoding - Natural width host state fields
 * @{
 */
#define VMX_VMCS_HOST_CR0                                       0x6C00
#define VMX_VMCS_HOST_CR3                                       0x6C02
#define VMX_VMCS_HOST_CR4                                       0x6C04
#define VMX_VMCS_HOST_FS_BASE                                   0x6C06
#define VMX_VMCS_HOST_GS_BASE                                   0x6C08
#define VMX_VMCS_HOST_TR_BASE                                   0x6C0A
#define VMX_VMCS_HOST_GDTR_BASE                                 0x6C0C
#define VMX_VMCS_HOST_IDTR_BASE                                 0x6C0E
#define VMX_VMCS_HOST_SYSENTER_ESP                              0x6C10
#define VMX_VMCS_HOST_SYSENTER_EIP                              0x6C12
#define VMX_VMCS_HOST_RSP                                       0x6C14
#define VMX_VMCS_HOST_RIP                                       0x6C16
/** @} */

/** @} */


/** @defgroup grp_vmx_asm   vmx assembly helpers
 * @ingroup grp_vmx
 * @{
 */

/**
 * Restores some host-state fields that need not be done on every VM-exit.
 *
 * @returns VBox status code.
 * @param   fRestoreHostFlags   Flags of which host registers needs to be
 *                              restored.
 * @param   pRestoreHost        Pointer to the host-restore structure.
 */
DECLASM(int) VMXRestoreHostState(uint32_t fRestoreHostFlags, PVMXRESTOREHOST pRestoreHost);


/**
 * Dispatches an NMI to the host.
 */
DECLASM(int) VMXDispatchHostNmi(void);


/**
 * Executes VMXON
 *
 * @returns VBox status code
 * @param   pVMXOn      Physical address of VMXON structure
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXEnable(RTHCPHYS pVMXOn);
#else
DECLINLINE(int) VMXEnable(RTHCPHYS pVMXOn)
{
# if RT_INLINE_ASM_GNU_STYLE
    int rc = VINF_SUCCESS;
    __asm__ __volatile__ (
       "push     %3                                             \n\t"
       "push     %2                                             \n\t"
       ".byte    0xF3, 0x0F, 0xC7, 0x34, 0x24  # VMXON [esp]    \n\t"
       "ja       2f                                             \n\t"
       "je       1f                                             \n\t"
       "movl     $"RT_XSTR(VERR_VMX_INVALID_VMXON_PTR)", %0     \n\t"
       "jmp      2f                                             \n\t"
       "1:                                                      \n\t"
       "movl     $"RT_XSTR(VERR_VMX_VMXON_FAILED)", %0          \n\t"
       "2:                                                      \n\t"
       "add      $8, %%esp                                      \n\t"
       :"=rm"(rc)
       :"0"(VINF_SUCCESS),
        "ir"((uint32_t)pVMXOn),        /* don't allow direct memory reference here, */
        "ir"((uint32_t)(pVMXOn >> 32)) /* this would not work with -fomit-frame-pointer */
       :"memory"
       );
    return rc;

# elif VMX_USE_MSC_INTRINSICS
    unsigned char rcMsc = __vmx_on(&pVMXOn);
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return rcMsc == 2 ? VERR_VMX_INVALID_VMXON_PTR : VERR_VMX_VMXON_FAILED;

# else
    int rc = VINF_SUCCESS;
    __asm
    {
        push    dword ptr [pVMXOn+4]
        push    dword ptr [pVMXOn]
        _emit   0xF3
        _emit   0x0F
        _emit   0xC7
        _emit   0x34
        _emit   0x24     /* VMXON [esp] */
        jnc     vmxon_good
        mov     dword ptr [rc], VERR_VMX_INVALID_VMXON_PTR
        jmp     the_end

vmxon_good:
        jnz     the_end
        mov     dword ptr [rc], VERR_VMX_VMXON_FAILED
the_end:
        add     esp, 8
    }
    return rc;
# endif
}
#endif


/**
 * Executes VMXOFF
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(void) VMXDisable(void);
#else
DECLINLINE(void) VMXDisable(void)
{
# if RT_INLINE_ASM_GNU_STYLE
    __asm__ __volatile__ (
       ".byte 0x0F, 0x01, 0xC4  # VMXOFF                        \n\t"
       );

# elif VMX_USE_MSC_INTRINSICS
    __vmx_off();

# else
    __asm
    {
        _emit   0x0F
        _emit   0x01
        _emit   0xC4   /* VMXOFF */
    }
# endif
}
#endif


/**
 * Executes VMCLEAR
 *
 * @returns VBox status code
 * @param   pVMCS       Physical address of VM control structure
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXClearVmcs(RTHCPHYS pVMCS);
#else
DECLINLINE(int) VMXClearVmcs(RTHCPHYS pVMCS)
{
# if RT_INLINE_ASM_GNU_STYLE
    int rc = VINF_SUCCESS;
    __asm__ __volatile__ (
       "push    %3                                              \n\t"
       "push    %2                                              \n\t"
       ".byte   0x66, 0x0F, 0xC7, 0x34, 0x24  # VMCLEAR [esp]   \n\t"
       "jnc     1f                                              \n\t"
       "movl    $"RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0       \n\t"
       "1:                                                      \n\t"
       "add     $8, %%esp                                       \n\t"
       :"=rm"(rc)
       :"0"(VINF_SUCCESS),
        "ir"((uint32_t)pVMCS),        /* don't allow direct memory reference here, */
        "ir"((uint32_t)(pVMCS >> 32)) /* this would not work with -fomit-frame-pointer */
       :"memory"
       );
    return rc;

# elif VMX_USE_MSC_INTRINSICS
    unsigned char rcMsc = __vmx_vmclear(&pVMCS);
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return VERR_VMX_INVALID_VMCS_PTR;

# else
    int rc = VINF_SUCCESS;
    __asm
    {
        push    dword ptr [pVMCS+4]
        push    dword ptr [pVMCS]
        _emit   0x66
        _emit   0x0F
        _emit   0xC7
        _emit   0x34
        _emit   0x24     /* VMCLEAR [esp] */
        jnc     success
        mov     dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
success:
        add     esp, 8
    }
    return rc;
# endif
}
#endif


/**
 * Executes VMPTRLD
 *
 * @returns VBox status code
 * @param   pVMCS       Physical address of VMCS structure
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXActivateVmcs(RTHCPHYS pVMCS);
#else
DECLINLINE(int) VMXActivateVmcs(RTHCPHYS pVMCS)
{
# if RT_INLINE_ASM_GNU_STYLE
    int rc = VINF_SUCCESS;
    __asm__ __volatile__ (
       "push    %3                                              \n\t"
       "push    %2                                              \n\t"
       ".byte   0x0F, 0xC7, 0x34, 0x24  # VMPTRLD [esp]         \n\t"
       "jnc     1f                                              \n\t"
       "movl    $"RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0       \n\t"
       "1:                                                      \n\t"
       "add     $8, %%esp                                       \n\t"
       :"=rm"(rc)
       :"0"(VINF_SUCCESS),
        "ir"((uint32_t)pVMCS),        /* don't allow direct memory reference here, */
        "ir"((uint32_t)(pVMCS >> 32)) /* this will not work with -fomit-frame-pointer */
       );
    return rc;

# elif VMX_USE_MSC_INTRINSICS
    unsigned char rcMsc = __vmx_vmptrld(&pVMCS);
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return VERR_VMX_INVALID_VMCS_PTR;

# else
    int rc = VINF_SUCCESS;
    __asm
    {
        push    dword ptr [pVMCS+4]
        push    dword ptr [pVMCS]
        _emit   0x0F
        _emit   0xC7
        _emit   0x34
        _emit   0x24     /* VMPTRLD [esp] */
        jnc     success
        mov     dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR

success:
        add     esp, 8
    }
    return rc;
# endif
}
#endif

/**
 * Executes VMPTRST
 *
 * @returns VBox status code
 * @param pVMCS    Address that will receive the current pointer
 */
DECLASM(int) VMXGetActivatedVmcs(RTHCPHYS *pVMCS);

/**
 * Executes VMWRITE
 *
 * @returns VBox status code
 * @retval  VINF_SUCCESS
 * @retval  VERR_VMX_INVALID_VMCS_PTR
 * @retval  VERR_VMX_INVALID_VMCS_FIELD
 *
 * @param   idxField        VMCS index
 * @param   u32Val          32 bits value
 *
 * @remarks The values of the two status codes can be ORed together, the result
 *          will be VERR_VMX_INVALID_VMCS_PTR.
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Val);
#else
DECLINLINE(int) VMXWriteVmcs32(uint32_t idxField, uint32_t u32Val)
{
# if RT_INLINE_ASM_GNU_STYLE
    int rc = VINF_SUCCESS;
    __asm__ __volatile__ (
       ".byte  0x0F, 0x79, 0xC2        # VMWRITE eax, edx       \n\t"
       "ja     2f                                               \n\t"
       "je     1f                                               \n\t"
       "movl   $"RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0        \n\t"
       "jmp    2f                                               \n\t"
       "1:                                                      \n\t"
       "movl   $"RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0      \n\t"
       "2:                                                      \n\t"
       :"=rm"(rc)
       :"0"(VINF_SUCCESS),
        "a"(idxField),
        "d"(u32Val)
       );
    return rc;

# elif VMX_USE_MSC_INTRINSICS
     unsigned char rcMsc = __vmx_vmwrite(idxField, u32Val);
     if (RT_LIKELY(rcMsc == 0))
         return VINF_SUCCESS;
     return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;

#else
    int rc = VINF_SUCCESS;
    __asm
    {
        push   dword ptr [u32Val]
        mov    eax, [idxField]
        _emit  0x0F
        _emit  0x79
        _emit  0x04
        _emit  0x24     /* VMWRITE eax, [esp] */
        jnc    valid_vmcs
        mov    dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
        jmp    the_end

valid_vmcs:
        jnz    the_end
        mov    dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
the_end:
        add    esp, 4
    }
    return rc;
# endif
}
#endif

/**
 * Executes VMWRITE
 *
 * @returns VBox status code
 * @retval  VINF_SUCCESS
 * @retval  VERR_VMX_INVALID_VMCS_PTR
 * @retval  VERR_VMX_INVALID_VMCS_FIELD
 *
 * @param   idxField        VMCS index
 * @param   u64Val          16, 32 or 64 bits value
 *
 * @remarks The values of the two status codes can be ORed together, the result
 *          will be VERR_VMX_INVALID_VMCS_PTR.
 */
#if !defined(RT_ARCH_X86) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
# if !VMX_USE_MSC_INTRINSICS || ARCH_BITS != 64
DECLASM(int) VMXWriteVmcs64(uint32_t idxField, uint64_t u64Val);
# else  /* VMX_USE_MSC_INTRINSICS */
DECLINLINE(int) VMXWriteVmcs64(uint32_t idxField, uint64_t u64Val)
{
    unsigned char rcMsc = __vmx_vmwrite(idxField, u64Val);
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;
}
# endif /* VMX_USE_MSC_INTRINSICS */
#else
# define VMXWriteVmcs64(idxField, u64Val)    VMXWriteVmcs64Ex(pVCpu, idxField, u64Val) /** @todo dead ugly, picking up pVCpu like this */
VMMR0DECL(int) VMXWriteVmcs64Ex(PVMCPU pVCpu, uint32_t idxField, uint64_t u64Val);
#endif

#ifdef VBOX_WITH_HYBRID_32BIT_KERNEL
# define VMXWriteVmcsHstN(idxField, uVal)       HMVMX_IS_64BIT_HOST_MODE() ?                     \
                                                   VMXWriteVmcs64(idxField, uVal)                 \
                                                 : VMXWriteVmcs32(idxField, uVal)
# define VMXWriteVmcsGstN(idxField, u64Val)     (pVCpu->CTX_SUFF(pVM)->hm.s.fAllow64BitGuests) ? \
                                                   VMXWriteVmcs64(idxField, u64Val)               \
                                                 : VMXWriteVmcs32(idxField, u64Val)
#elif ARCH_BITS == 32
# define VMXWriteVmcsHstN                       VMXWriteVmcs32
# define VMXWriteVmcsGstN(idxField, u64Val)     VMXWriteVmcs64Ex(pVCpu, idxField, u64Val)
# else  /* ARCH_BITS == 64 */
# define VMXWriteVmcsHstN                       VMXWriteVmcs64
# define VMXWriteVmcsGstN                       VMXWriteVmcs64
# endif


/**
 * Invalidate a page using invept
 * @returns VBox status code
 * @param   enmFlush    Type of flush
 * @param   pDescriptor Descriptor
 */
DECLASM(int) VMXR0InvEPT(VMX_FLUSH_EPT enmFlush, uint64_t *pDescriptor);

/**
 * Invalidate a page using invvpid
 * @returns VBox status code
 * @param   enmFlush    Type of flush
 * @param   pDescriptor Descriptor
 */
DECLASM(int) VMXR0InvVPID(VMX_FLUSH_VPID enmFlush, uint64_t *pDescriptor);

/**
 * Executes VMREAD
 *
 * @returns VBox status code
 * @retval  VINF_SUCCESS
 * @retval  VERR_VMX_INVALID_VMCS_PTR
 * @retval  VERR_VMX_INVALID_VMCS_FIELD
 *
 * @param   idxField        VMCS index
 * @param   pData           Ptr to store VM field value
 *
 * @remarks The values of the two status codes can be ORed together, the result
 *          will be VERR_VMX_INVALID_VMCS_PTR.
 */
#if ((RT_INLINE_ASM_EXTERNAL || !defined(RT_ARCH_X86)) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pData);
#else
DECLINLINE(int) VMXReadVmcs32(uint32_t idxField, uint32_t *pData)
{
# if RT_INLINE_ASM_GNU_STYLE
    int rc = VINF_SUCCESS;
    __asm__ __volatile__ (
       "movl   $"RT_XSTR(VINF_SUCCESS)", %0                      \n\t"
       ".byte  0x0F, 0x78, 0xc2        # VMREAD eax, edx         \n\t"
       "ja     2f                                                \n\t"
       "je     1f                                                \n\t"
       "movl   $"RT_XSTR(VERR_VMX_INVALID_VMCS_PTR)", %0         \n\t"
       "jmp    2f                                                \n\t"
       "1:                                                       \n\t"
       "movl   $"RT_XSTR(VERR_VMX_INVALID_VMCS_FIELD)", %0       \n\t"
       "2:                                                       \n\t"
       :"=&r"(rc),
        "=d"(*pData)
       :"a"(idxField),
        "d"(0)
       );
    return rc;

# elif VMX_USE_MSC_INTRINSICS
    unsigned char rcMsc;
#  if ARCH_BITS == 32
    rcMsc = __vmx_vmread(idxField, pData);
#  else
    uint64_t u64Tmp;
    rcMsc = __vmx_vmread(idxField, &u64Tmp);
    *pData = (uint32_t)u64Tmp;
#  endif
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;

#else
    int rc = VINF_SUCCESS;
    __asm
    {
        sub     esp, 4
        mov     dword ptr [esp], 0
        mov     eax, [idxField]
        _emit   0x0F
        _emit   0x78
        _emit   0x04
        _emit   0x24     /* VMREAD eax, [esp] */
        mov     edx, pData
        pop     dword ptr [edx]
        jnc     valid_vmcs
        mov     dword ptr [rc], VERR_VMX_INVALID_VMCS_PTR
        jmp     the_end

valid_vmcs:
        jnz     the_end
        mov     dword ptr [rc], VERR_VMX_INVALID_VMCS_FIELD
the_end:
    }
    return rc;
# endif
}
#endif

/**
 * Executes VMREAD
 *
 * @returns VBox status code
 * @retval  VINF_SUCCESS
 * @retval  VERR_VMX_INVALID_VMCS_PTR
 * @retval  VERR_VMX_INVALID_VMCS_FIELD
 *
 * @param   idxField        VMCS index
 * @param   pData           Ptr to store VM field value
 *
 * @remarks The values of the two status codes can be ORed together, the result
 *          will be VERR_VMX_INVALID_VMCS_PTR.
 */
#if (!defined(RT_ARCH_X86) && !VMX_USE_MSC_INTRINSICS) || defined(VBOX_WITH_HYBRID_32BIT_KERNEL)
DECLASM(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData);
#else
DECLINLINE(int) VMXReadVmcs64(uint32_t idxField, uint64_t *pData)
{
# if VMX_USE_MSC_INTRINSICS
    unsigned char rcMsc;
#  if ARCH_BITS == 32
    size_t        uLow;
    size_t        uHigh;
    rcMsc  = __vmx_vmread(idxField, &uLow);
    rcMsc |= __vmx_vmread(idxField + 1, &uHigh);
    *pData = RT_MAKE_U64(uLow, uHigh);
# else
    rcMsc = __vmx_vmread(idxField, pData);
# endif
    if (RT_LIKELY(rcMsc == 0))
        return VINF_SUCCESS;
    return rcMsc == 2 ? VERR_VMX_INVALID_VMCS_PTR : VERR_VMX_INVALID_VMCS_FIELD;

# elif ARCH_BITS == 32
    int rc;
    uint32_t val_hi, val;
    rc  = VMXReadVmcs32(idxField, &val);
    rc |= VMXReadVmcs32(idxField + 1, &val_hi);
    AssertRC(rc);
    *pData = RT_MAKE_U64(val, val_hi);
    return rc;

# else
#  error "Shouldn't be here..."
# endif
}
#endif

/**
 * Gets the last instruction error value from the current VMCS
 *
 * @returns error value
 */
DECLINLINE(uint32_t) VMXGetLastError(void)
{
#if ARCH_BITS == 64
    uint64_t uLastError = 0;
    int rc = VMXReadVmcs64(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
    AssertRC(rc);
    return (uint32_t)uLastError;

#else /* 32-bit host: */
    uint32_t uLastError = 0;
    int rc = VMXReadVmcs32(VMX_VMCS32_RO_VM_INSTR_ERROR, &uLastError);
    AssertRC(rc);
    return uLastError;
#endif
}

#ifdef IN_RING0
VMMR0DECL(int) VMXR0InvalidatePage(PVM pVM, PVMCPU pVCpu, RTGCPTR GCVirt);
VMMR0DECL(int) VMXR0InvalidatePhysPage(PVM pVM, PVMCPU pVCpu, RTGCPHYS GCPhys);
#endif /* IN_RING0 */

/** @} */

#endif