Repository: donnaskiez/ac
Branch: master
Commit: bd7ecfd50348
Files: 79
Total size: 1.8 MB
Directory structure:
gitextract_efljrolz/
├── .clang-format
├── .clang-format-cpp
├── .gitattributes
├── .gitignore
├── LICENSE.md
├── README.md
├── ac.sln
├── driver/
│ ├── apc.c
│ ├── apc.h
│ ├── arch.asm
│ ├── callbacks.c
│ ├── callbacks.h
│ ├── common.h
│ ├── containers/
│ │ ├── map.c
│ │ ├── map.h
│ │ ├── tree.c
│ │ └── tree.h
│ ├── cpp.hint
│ ├── crypt.c
│ ├── crypt.h
│ ├── driver.c
│ ├── driver.h
│ ├── driver.inf
│ ├── driver.vcxproj
│ ├── driver.vcxproj.filters
│ ├── hv.c
│ ├── hv.h
│ ├── hw.c
│ ├── hw.h
│ ├── ia32.h
│ ├── imports.c
│ ├── imports.h
│ ├── integrity.c
│ ├── integrity.h
│ ├── io.c
│ ├── io.h
│ ├── lib/
│ │ ├── stdlib.c
│ │ └── stdlib.h
│ ├── modules.c
│ ├── modules.h
│ ├── pe.c
│ ├── pe.h
│ ├── pool.c
│ ├── pool.h
│ ├── session.c
│ ├── session.h
│ ├── thread.c
│ ├── thread.h
│ ├── types/
│ │ ├── tpm12.h
│ │ ├── tpm20.h
│ │ ├── tpmptp.h
│ │ └── types.h
│ ├── util.c
│ └── util.h
├── module/
│ ├── client/
│ │ ├── message_queue.cpp
│ │ ├── message_queue.h
│ │ ├── pipe.cpp
│ │ └── pipe.h
│ ├── common.h
│ ├── crypt/
│ │ ├── crypt.cpp
│ │ └── crypt.h
│ ├── dispatcher/
│ │ ├── dispatcher.cpp
│ │ ├── dispatcher.h
│ │ ├── threadpool.cpp
│ │ ├── threadpool.h
│ │ ├── timer.cpp
│ │ └── timer.h
│ ├── helper.cpp
│ ├── helper.h
│ ├── imports.cpp
│ ├── imports.h
│ ├── kernel_interface/
│ │ ├── kernel_interface.cpp
│ │ └── kernel_interface.h
│ ├── main.cpp
│ ├── module.cpp
│ ├── module.h
│ ├── module.vcxproj
│ └── module.vcxproj.filters
└── server/
└── main.go
================================================
FILE CONTENTS
================================================
================================================
FILE: .clang-format
================================================
BasedOnStyle: webkit
AccessModifierOffset: -4
AlignAfterOpenBracket: AlwaysBreak
AlignConsecutiveAssignments: false
AlignConsecutiveDeclarations: false
AlignConsecutiveMacros: true
AlignEscapedNewlines: Left
AlignOperands: true
AlignTrailingComments: true
AllowAllArgumentsOnNextLine: false
AllowShortBlocksOnASingleLine: true
AllowShortCaseLabelsOnASingleLine: true
AllowShortIfStatementsOnASingleLine: false
AllowShortLoopsOnASingleLine: false
AlwaysBreakAfterReturnType: TopLevel
AlwaysBreakBeforeMultilineStrings: false
AlwaysBreakTemplateDeclarations: true #false
BinPackArguments: false
BinPackParameters: false
AllowShortFunctionsOnASingleLine: false
AllowAllParametersOfDeclarationOnNextLine: true
PenaltyBreakBeforeFirstCallParameter: 0
BreakBeforeBraces: Stroustrup
BraceWrapping:
AfterCaseLabel: true
AfterClass: true
AfterControlStatement: true
AfterEnum: true
AfterFunction: true
AfterNamespace: false
AfterStruct: true
AfterUnion: true
AfterExternBlock: false
BeforeCatch: true
BeforeElse: true
BreakBeforeBinaryOperators: None
BreakBeforeTernaryOperators: true
BreakConstructorInitializers: AfterColon
BreakStringLiterals: false
ColumnLimit: 80
CommentPragmas: '^begin_wpp|^end_wpp|^FUNC |^USESUFFIX |^USESUFFIX '
ConstructorInitializerAllOnOneLineOrOnePerLine: true
ConstructorInitializerIndentWidth: 4
ContinuationIndentWidth: 4
Cpp11BracedListStyle: true
DerivePointerAlignment: false
ExperimentalAutoDetectBinPacking: false
IndentCaseLabels: false
IndentPPDirectives: AfterHash
IndentWidth: 4
KeepEmptyLinesAtTheStartOfBlocks: false
Language: Cpp
MacroBlockBegin: '^BEGIN_MODULE$|^BEGIN_TEST_CLASS$|^BEGIN_TEST_METHOD$'
MacroBlockEnd: '^END_MODULE$|^END_TEST_CLASS$|^END_TEST_METHOD$'
MaxEmptyLinesToKeep: 1
NamespaceIndentation: None #All
PointerAlignment: Left
ReflowComments: true
SortIncludes: true
SpaceAfterCStyleCast: false
SpaceBeforeAssignmentOperators: true
SpaceBeforeCtorInitializerColon: true
SpaceBeforeParens: ControlStatements
SpaceBeforeRangeBasedForLoopColon: true
SpaceInEmptyParentheses: false
SpacesInAngles: false
SpacesInCStyleCastParentheses: false
SpacesInParentheses: false
SpacesInSquareBrackets: false
Standard: Cpp11
StatementMacros: [
'EXTERN_C',
'PAGED',
'PAGEDX',
'NONPAGED',
'PNPCODE',
'INITCODE',
'_At_',
'_When_',
'_Success_',
'_Check_return_',
'_Must_inspect_result_',
'_IRQL_requires_same_',
'_IRQL_requires_',
'_IRQL_requires_max_',
'_IRQL_requires_min_',
'_IRQL_saves_',
'_IRQL_restores_',
'_IRQL_saves_global_',
'_IRQL_restores_global_',
'_IRQL_raises_',
'_IRQL_lowers_',
'_Acquires_lock_',
'_Releases_lock_',
'_Acquires_exclusive_lock_',
'_Releases_exclusive_lock_',
'_Acquires_shared_lock_',
'_Releases_shared_lock_',
'_Requires_lock_held_',
'_Use_decl_annotations_',
'_Guarded_by_',
'__drv_preferredFunction',
'__drv_allocatesMem',
'__drv_freesMem',
]
TabWidth: '4'
UseTab: Never
================================================
FILE: .clang-format-cpp
================================================
---
BasedOnStyle: LLVM
...
================================================
FILE: .gitattributes
================================================
###############################################################################
# Set default behavior to automatically normalize line endings.
###############################################################################
* text=auto
###############################################################################
# Set default behavior for command prompt diff.
#
# This is need for earlier builds of msysgit that does not have it on by
# default for csharp files.
# Note: This is only used by command line
###############################################################################
#*.cs diff=csharp
###############################################################################
# Set the merge driver for project and solution files
#
# Merging from the command prompt will add diff markers to the files if there
# are conflicts (Merging from VS is not affected by the settings below, in VS
# the diff markers are never inserted). Diff markers may cause the following
# file extensions to fail to load in VS. An alternative would be to treat
# these files as binary and thus will always conflict and require user
# intervention with every merge. To do so, just uncomment the entries below
###############################################################################
#*.sln merge=binary
#*.csproj merge=binary
#*.vbproj merge=binary
#*.vcxproj merge=binary
#*.vcproj merge=binary
#*.dbproj merge=binary
#*.fsproj merge=binary
#*.lsproj merge=binary
#*.wixproj merge=binary
#*.modelproj merge=binary
#*.sqlproj merge=binary
#*.wwaproj merge=binary
###############################################################################
# behavior for image files
#
# image files are treated as binary by default.
###############################################################################
#*.jpg binary
#*.png binary
#*.gif binary
###############################################################################
# diff behavior for common document formats
#
# Convert binary document formats to text before diffing them. This feature
# is only available from the command line. Turn it on by uncommenting the
# entries below.
###############################################################################
#*.doc diff=astextplain
#*.DOC diff=astextplain
#*.docx diff=astextplain
#*.DOCX diff=astextplain
#*.dot diff=astextplain
#*.DOT diff=astextplain
#*.pdf diff=astextplain
#*.PDF diff=astextplain
#*.rtf diff=astextplain
#*.RTF diff=astextplain
================================================
FILE: .gitignore
================================================
## Ignore Visual Studio temporary files, build results, and
## files generated by popular Visual Studio add-ons.
##
## Get latest from https://github.com/github/gitignore/blob/master/VisualStudio.gitignore
# User-specific files
*.rsuser
*.suo
*.user
*.userosscache
*.sln.docstates
# User-specific files (MonoDevelop/Xamarin Studio)
*.userprefs
# Mono auto generated files
mono_crash.*
# Build results
[Dd]ebug/
[Dd]ebugPublic/
[Rr]elease/
[Rr]eleases/
x64/
x86/
[Ww][Ii][Nn]32/
[Aa][Rr][Mm]/
[Aa][Rr][Mm]64/
bld/
[Bb]in/
[Oo]bj/
[Oo]ut/
[Ll]og/
[Ll]ogs/
# Visual Studio 2015/2017 cache/options directory
.vs/
# Uncomment if you have tasks that create the project's static files in wwwroot
#wwwroot/
# Visual Studio 2017 auto generated files
Generated\ Files/
# MSTest test Results
[Tt]est[Rr]esult*/
[Bb]uild[Ll]og.*
# NUnit
*.VisualState.xml
TestResult.xml
nunit-*.xml
# Build Results of an ATL Project
[Dd]ebugPS/
[Rr]eleasePS/
dlldata.c
# Benchmark Results
BenchmarkDotNet.Artifacts/
# .NET Core
project.lock.json
project.fragment.lock.json
artifacts/
# ASP.NET Scaffolding
ScaffoldingReadMe.txt
# StyleCop
StyleCopReport.xml
# Files built by Visual Studio
*_i.c
*_p.c
*_h.h
*.ilk
*.meta
*.obj
*.iobj
*.pch
*.pdb
*.ipdb
*.pgc
*.pgd
*.rsp
*.sbr
*.tlb
*.tli
*.tlh
*.tmp
*.tmp_proj
*_wpftmp.csproj
*.log
*.vspscc
*.vssscc
.builds
*.pidb
*.svclog
*.scc
# Chutzpah Test files
_Chutzpah*
# Visual C++ cache files
ipch/
*.aps
*.ncb
*.opendb
*.opensdf
*.sdf
*.cachefile
*.VC.db
*.VC.VC.opendb
# Visual Studio profiler
*.psess
*.vsp
*.vspx
*.sap
# Visual Studio Trace Files
*.e2e
# TFS 2012 Local Workspace
$tf/
# Guidance Automation Toolkit
*.gpState
# ReSharper is a .NET coding add-in
_ReSharper*/
*.[Rr]e[Ss]harper
*.DotSettings.user
# TeamCity is a build add-in
_TeamCity*
# DotCover is a Code Coverage Tool
*.dotCover
# AxoCover is a Code Coverage Tool
.axoCover/*
!.axoCover/settings.json
# Coverlet is a free, cross platform Code Coverage Tool
coverage*.json
coverage*.xml
coverage*.info
# Visual Studio code coverage results
*.coverage
*.coveragexml
# NCrunch
_NCrunch_*
.*crunch*.local.xml
nCrunchTemp_*
# MightyMoose
*.mm.*
AutoTest.Net/
# Web workbench (sass)
.sass-cache/
# Installshield output folder
[Ee]xpress/
# DocProject is a documentation generator add-in
DocProject/buildhelp/
DocProject/Help/*.HxT
DocProject/Help/*.HxC
DocProject/Help/*.hhc
DocProject/Help/*.hhk
DocProject/Help/*.hhp
DocProject/Help/Html2
DocProject/Help/html
# Click-Once directory
publish/
# Publish Web Output
*.[Pp]ublish.xml
*.azurePubxml
# Note: Comment the next line if you want to checkin your web deploy settings,
# but database connection strings (with potential passwords) will be unencrypted
*.pubxml
*.publishproj
# Microsoft Azure Web App publish settings. Comment the next line if you want to
# checkin your Azure Web App publish settings, but sensitive information contained
# in these scripts will be unencrypted
PublishScripts/
# NuGet Packages
*.nupkg
# NuGet Symbol Packages
*.snupkg
# The packages folder can be ignored because of Package Restore
**/[Pp]ackages/*
# except build/, which is used as an MSBuild target.
!**/[Pp]ackages/build/
# Uncomment if necessary however generally it will be regenerated when needed
#!**/[Pp]ackages/repositories.config
# NuGet v3's project.json files produces more ignorable files
*.nuget.props
*.nuget.targets
# Microsoft Azure Build Output
csx/
*.build.csdef
# Microsoft Azure Emulator
ecf/
rcf/
# Windows Store app package directories and files
AppPackages/
BundleArtifacts/
Package.StoreAssociation.xml
_pkginfo.txt
*.appx
*.appxbundle
*.appxupload
# Visual Studio cache files
# files ending in .cache can be ignored
*.[Cc]ache
# but keep track of directories ending in .cache
!?*.[Cc]ache/
# Others
ClientBin/
~$*
*~
*.dbmdl
*.dbproj.schemaview
*.jfm
*.pfx
*.publishsettings
orleans.codegen.cs
# Including strong name files can present a security risk
# (https://github.com/github/gitignore/pull/2483#issue-259490424)
#*.snk
# Since there are multiple workflows, uncomment next line to ignore bower_components
# (https://github.com/github/gitignore/pull/1529#issuecomment-104372622)
#bower_components/
# RIA/Silverlight projects
Generated_Code/
# Backup & report files from converting an old project file
# to a newer Visual Studio version. Backup files are not needed,
# because we have git ;-)
_UpgradeReport_Files/
Backup*/
UpgradeLog*.XML
UpgradeLog*.htm
ServiceFabricBackup/
*.rptproj.bak
# SQL Server files
*.mdf
*.ldf
*.ndf
# Business Intelligence projects
*.rdl.data
*.bim.layout
*.bim_*.settings
*.rptproj.rsuser
*- [Bb]ackup.rdl
*- [Bb]ackup ([0-9]).rdl
*- [Bb]ackup ([0-9][0-9]).rdl
# Microsoft Fakes
FakesAssemblies/
# GhostDoc plugin setting file
*.GhostDoc.xml
# Node.js Tools for Visual Studio
.ntvs_analysis.dat
node_modules/
# Visual Studio 6 build log
*.plg
# Visual Studio 6 workspace options file
*.opt
# Visual Studio 6 auto-generated workspace file (contains which files were open etc.)
*.vbw
# Visual Studio LightSwitch build output
**/*.HTMLClient/GeneratedArtifacts
**/*.DesktopClient/GeneratedArtifacts
**/*.DesktopClient/ModelManifest.xml
**/*.Server/GeneratedArtifacts
**/*.Server/ModelManifest.xml
_Pvt_Extensions
# Paket dependency manager
.paket/paket.exe
paket-files/
# FAKE - F# Make
.fake/
# CodeRush personal settings
.cr/personal
# Python Tools for Visual Studio (PTVS)
__pycache__/
*.pyc
# Cake - Uncomment if you are using it
# tools/**
# !tools/packages.config
# Tabs Studio
*.tss
# Telerik's JustMock configuration file
*.jmconfig
# BizTalk build output
*.btp.cs
*.btm.cs
*.odx.cs
*.xsd.cs
# OpenCover UI analysis results
OpenCover/
# Azure Stream Analytics local run output
ASALocalRun/
# MSBuild Binary and Structured Log
*.binlog
# NVidia Nsight GPU debugger configuration file
*.nvuser
# MFractors (Xamarin productivity tool) working folder
.mfractor/
# Local History for Visual Studio
.localhistory/
# BeatPulse healthcheck temp database
healthchecksdb
# Backup folder for Package Reference Convert tool in Visual Studio 2017
MigrationBackup/
# Ionide (cross platform F# VS Code tools) working folder
.ionide/
# Fody - auto-generated XML schema
FodyWeavers.xsd
================================================
FILE: LICENSE.md
================================================
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
Copyright (C) 2007 Free Software Foundation, Inc.
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU Affero General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
Copyright (C)
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published
by the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see .
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
.
================================================
FILE: README.md
================================================
# ac
open source anti cheat (lol) which I made for fun.
# features
- Attached thread detection
- Process module .text section integrity checks
- NMI stackwalking via isr iretq
- APC, DPC stackwalking
- Return address exception hooking detection
- Chained .data pointer detection (iffy)
- Handle stripping via obj callbacks
- Process handle table enumeration
- System module device object verification
- System module .text integrity checks
- Removal of threads cid table entry detection
- Driver dispatch routine validation
- Extraction of various hardware identifiers
- EPT hook detection
- Various image integrity checks both of driver + module
- Hypervisor detection
- HalDispatch and HalPrivateDispatch routine validation
- Dynamic import resolving & encryption
- Malicious PCI device detection via configuration space scanning
- Win32kBase_DxgInterface routine validation
# architecuture
- todo!
# planned features
Theres a long list of features I still want to implement, the question is whether I can be bothored implementing them. I would say I'd accept pull requests for new features but I would expect high quality code and thorough testing with verifier (both inside a vm and bare metal).
# example
- I have recorded an example of the program running with CS2. Note that vac was obviously disabled. *If you decide to test with a steam game do not forget to launch in insecure mode*
- Shown are the kernel `VERBOSE` level logs in DebugView along with the usermode application console and some additional performance benchmarking things.
- (You can find the video here)[https://youtu.be/b3mH7w8pOxs]
# known issues
- [See the issues page](https://github.com/donnaskiez/ac/issues)
- Feel free to open a new issue if you find any bugs
# windows versions tested:
- Win10 22H2
- Win11 22H2
# how to build
Requires [Visual Studio](https://visualstudio.microsoft.com/downloads/) and the [WDK](https://learn.microsoft.com/en-us/windows-hardware/drivers/download-the-wdk) for compilation.
## test signing mode
Before we continue, ensure you enable test signing mode as this driver is not signed.
1. Open a command prompt as Administrator
2. Enter the following commands:
```bash
bcdedit -set TESTSIGNING on
bcdedit /debug on
```
3. Restart Windows
## building and running the project
1. Clone the project i.e `git clone git@github.com:donnaskiez/ac.git`
2. Open the project in visual studio
3. Select `Release - No Server - Win10` or `Release - No Server - Win11` depending on the version of Windows you will be running the driver on.
4. Build the project in visual studio, if you experience any build issues - check the drivers project settings are the following:
- `Inf2Cat -> General -> Use Local Time` to `Yes`
- `C/C++ -> Treat Warnings As Errors` to `No`
- `C/C++ -> Spectre Mitigation` to `Disabled`
5. Move the `driver.sys` file located in `ac\x64\Release - No Server\` into the `Windows\System32\Drivers` directory
- You can rename the driver if you would like
6. Use the [OSR Loader](https://www.osronline.com/article.cfm%5Earticle=157.htm) and select `driver.sys` (or whatever you named it) that you moved to the Windows drivers folder. *DO NOT REGISTER THE SERVICE YET*.
7. Under `Service Start` select `System`. This is VERY important!
8. Click `Register Service`. *Do NOT click* `Start Service`!
9. Restart Windows.
10. Once restarted, open the program you would like to protect. This could be anything i.e cs2, notepad etc.
- if you do use a game to test, ensure the games anti-cheat is turned off before testing
11. Open your dll injector of choice (I simply use [Process Hacker](https://processhacker.sourceforge.io/))
12. Inject the dll found in `ac\x64\Release - No Server\` named `user.dll` into the target program
Logs will be printed to both the terminal output and the kernel debugger. See below for configuring kernel debugger output.
Note: The server is not needed for the program to function properly.
# how to configure kernel debugging output
The kernel driver is setup to log at 4 distinct levels:
```C
#define LOG_ERROR_LEVEL
#define LOG_WARNING_LEVEL
#define LOG_INFO_LEVEL
#define LOG_VERBOSE_LEVEL
```
As the names suggest, `ERROR_LEVEL` is for errors, `WARNING_LEVEL` is for warnings. `INFO_LEVEL` is for general information regarding what requests the driver is processing and `VERBOSE_LEVEL` contains very detailed information for each request.
## creating the registry key
If you are unfamiliar with the kernel debugging mask, you probably need to set one up. If you already have a debugging mask setup, you can skip to `setting the mask` below.
1. Open the Registry Editor
2. Copy and pase `Computer\HKEY_LOCAL_MACHINE\SYSTEM\CurrentControlSet\Control\Session Manager` into the bar at the top and press enter
3. On the left hand side, right click `Session Manager` and select `New -> Key`
4. Name the key `Debug Print Filter`
5. On the left hand side you should now see `Debug Print Filter`, right click and select `New -> DWORD (32 bit) Value`
6. Name the key `DEFAULT`
## setting the mask
1. Within the `Debug Print Filter` registry, double click the key named `DEFAULT`
2. Determine the level(s) of logging you would like to see. For most people interested I would set either `INFO_LEVEL` or `VERBOSE_LEVEL`. Remember that if you set `INFO_LEVEL`, you will see all `INFO_LEVEL`, `WARNING_LEVEL` and `ERROR_LEVEL` logs. Ie you see all logs above and including your set level.
```
ERROR_LEVEL = 0x3
WARNING_LEVEL = 0x7
INFO_LEVEL = 0xf
VERBOSE_LEVEL = 0x1f
```
3. Enter the value for the given logging level (seen above)
4. Click `Ok` and restart Windows.
## filtering debug output
If you choose to use `INFO_LEVEL` or `VERBOSE_LEVEL` there may be many logs from the kernel so we want to filter them out.
### windbg
With WinDbg connected to the target:
1. Pause the target using the `Break` button
2. Use the command: `.ofilter donna-ac*`
### debugview
1. Click `Edit -> Filter/Highlight`
2. Set the `Include` string to `donna-ac*`
## License
We have decided to put this Project under **AGPL-3.0**!
https://choosealicense.com/licenses/agpl-3.0/
# contact
feel free to dm me on discord or uc @donnaskiez
================================================
FILE: ac.sln
================================================
Microsoft Visual Studio Solution File, Format Version 12.00
# Visual Studio Version 17
VisualStudioVersion = 17.5.33502.453
MinimumVisualStudioVersion = 10.0.40219.1
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "driver", "driver\driver.vcxproj", "{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}"
EndProject
Project("{8BC9CEB8-8B4A-11D0-8D11-00A0C91BC942}") = "module", "module\module.vcxproj", "{3B18467A-4358-45EF-81B1-5C6F9B0B6728}"
EndProject
Global
GlobalSection(SolutionConfigurationPlatforms) = preSolution
Debug|Any CPU = Debug|Any CPU
Debug|ARM64 = Debug|ARM64
Debug|x64 = Debug|x64
Debug|x86 = Debug|x86
Release - No Server - Win10|Any CPU = Release - No Server - Win10|Any CPU
Release - No Server - Win10|ARM64 = Release - No Server - Win10|ARM64
Release - No Server - Win10|x64 = Release - No Server - Win10|x64
Release - No Server - Win10|x86 = Release - No Server - Win10|x86
Release - No Server - Win11|Any CPU = Release - No Server - Win11|Any CPU
Release - No Server - Win11|ARM64 = Release - No Server - Win11|ARM64
Release - No Server - Win11|x64 = Release - No Server - Win11|x64
Release - No Server - Win11|x86 = Release - No Server - Win11|x86
Release|Any CPU = Release|Any CPU
Release|ARM64 = Release|ARM64
Release|x64 = Release|x64
Release|x86 = Release|x86
EndGlobalSection
GlobalSection(ProjectConfigurationPlatforms) = postSolution
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|Any CPU.ActiveCfg = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|Any CPU.Build.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|Any CPU.Deploy.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|ARM64.ActiveCfg = Debug|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|ARM64.Build.0 = Debug|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|ARM64.Deploy.0 = Debug|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x64.ActiveCfg = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x64.Build.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x64.Deploy.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x86.ActiveCfg = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x86.Build.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Debug|x86.Deploy.0 = Debug|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|Any CPU.ActiveCfg = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|Any CPU.Build.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|Any CPU.Deploy.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|ARM64.ActiveCfg = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|ARM64.Build.0 = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|ARM64.Deploy.0 = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x64.ActiveCfg = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x64.Build.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x64.Deploy.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x86.ActiveCfg = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x86.Build.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win10|x86.Deploy.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|Any CPU.ActiveCfg = Release - Win11|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|Any CPU.Build.0 = Release - Win11|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|Any CPU.Deploy.0 = Release - Win11|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|ARM64.ActiveCfg = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|ARM64.Build.0 = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|ARM64.Deploy.0 = Release - Win10|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x64.ActiveCfg = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x64.Build.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x64.Deploy.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x86.ActiveCfg = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x86.Build.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release - No Server - Win11|x86.Deploy.0 = Release - Win10|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|Any CPU.ActiveCfg = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|Any CPU.Build.0 = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|Any CPU.Deploy.0 = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|ARM64.ActiveCfg = Release|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|ARM64.Build.0 = Release|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|ARM64.Deploy.0 = Release|ARM64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x64.ActiveCfg = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x64.Build.0 = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x64.Deploy.0 = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x86.ActiveCfg = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x86.Build.0 = Release|x64
{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}.Release|x86.Deploy.0 = Release|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|Any CPU.ActiveCfg = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|Any CPU.Build.0 = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|ARM64.ActiveCfg = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|ARM64.Build.0 = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|x64.ActiveCfg = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|x64.Build.0 = Debug|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|x86.ActiveCfg = Debug|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Debug|x86.Build.0 = Debug|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|Any CPU.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|Any CPU.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|ARM64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|ARM64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|x64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|x64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|x86.ActiveCfg = Release - No Server|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win10|x86.Build.0 = Release - No Server|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|Any CPU.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|Any CPU.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|ARM64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|ARM64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|x64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|x64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|x86.ActiveCfg = Release - No Server|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release - No Server - Win11|x86.Build.0 = Release - No Server|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|Any CPU.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|Any CPU.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|ARM64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|ARM64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|x64.ActiveCfg = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|x64.Build.0 = Release - No Server|x64
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|x86.ActiveCfg = Release - No Server|Win32
{3B18467A-4358-45EF-81B1-5C6F9B0B6728}.Release|x86.Build.0 = Release - No Server|Win32
EndGlobalSection
GlobalSection(SolutionProperties) = preSolution
HideSolutionNode = FALSE
EndGlobalSection
GlobalSection(ExtensibilityGlobals) = postSolution
SolutionGuid = {5F2C89ED-CBEA-4DAD-8576-94C53821D2E8}
EndGlobalSection
EndGlobal
================================================
FILE: driver/apc.c
================================================
#include "apc.h"
#include "driver.h"
#include "imports.h"
#include "lib/stdlib.h"
VOID
GetApcContextByIndex(_Out_ PVOID* Context, _In_ UINT32 Index)
{
NT_ASSERT(Index <= MAXIMUM_APC_CONTEXTS);
AcquireDriverConfigLock();
*Context = (PVOID)GetApcContextArray()[Index];
ReleaseDriverConfigLock();
}
VOID
GetApcContext(_Out_ PVOID* Context, _In_ UINT32 ContextIdentifier)
{
NT_ASSERT(ContextIdentifier <= MAXIMUM_APC_CONTEXTS);
PAPC_CONTEXT_HEADER header = NULL;
AcquireDriverConfigLock();
for (UINT32 index = 0; index < MAXIMUM_APC_CONTEXTS; index++) {
header = GetApcContextArray()[index];
if (!header)
continue;
if (header->context_id != ContextIdentifier)
continue;
*Context = header;
goto unlock;
}
unlock:
ReleaseDriverConfigLock();
}
/*
* No need to hold the lock here as the thread freeing the APCs will
* already hold the configuration lock. We also dont want to release and
* reclaim the lock before calling this function since we need to ensure
* we hold the lock during the entire decrement and free process.
*/
BOOLEAN
FreeApcContextStructure(_Inout_ PAPC_CONTEXT_HEADER Context)
{
NT_ASSERT(Context <= MAXIMUM_APC_CONTEXTS);
PUINT64 entry = NULL;
for (UINT32 index = 0; index < MAXIMUM_APC_CONTEXTS; index++) {
entry = GetApcContextArray();
if (entry[index] != (UINT64)Context)
continue;
if (Context->count > 0)
return FALSE;
ImpExFreePoolWithTag(Context, POOL_TAG_APC);
entry[index] = NULL;
return TRUE;
}
return FALSE;
}
VOID
IncrementApcCount(_In_ UINT32 ContextId)
{
NT_ASSERT(ContextId <= MAXIMUM_APC_CONTEXTS);
PAPC_CONTEXT_HEADER header = NULL;
GetApcContext(&header, ContextId);
if (!header)
return;
AcquireDriverConfigLock();
header->count += 1;
ReleaseDriverConfigLock();
}
VOID
FreeApcAndDecrementApcCount(_Inout_ PRKAPC Apc, _In_ UINT32 ContextId)
{
NT_ASSERT(Apc != NULL);
NT_ASSERT(ContextId <= MAXIMUM_APC_CONTEXTS);
PAPC_CONTEXT_HEADER context = NULL;
ImpExFreePoolWithTag(Apc, POOL_TAG_APC);
GetApcContext(&context, ContextId);
if (!context)
return;
AcquireDriverConfigLock();
context->count -= 1;
ReleaseDriverConfigLock();
}
/*
* The reason we use a query model rather then checking the count of queued APCs
* after each APC free and decrement is that the lock will be recursively
* acquired by freeing threads (i.e executing APCs) rather then APC allocation
* threads. The reason for this being that freeing threads are executing at a
* higher IRQL then the APC allocation thread, hence they are granted higher
* priority by the scheduler when determining which thread will accquire the
* lock next:
*
* [+] Freeing thread -> ApcKernelRoutine IRQL: 1 (APC_LEVEL)
* [+] Allocation thread -> ValidateThreadViaKernelApcCallback IRQL: 0
* (PASSIVE_LEVEL)
*
* As a result, once an APC is executed and reaches the freeing stage, it will
* acquire the lock and decrement it. Then, if atleast 1 APC execution thread is
* waiting on the lock, it will be prioritised due to its higher IRQL and the
* cycle will continue. Eventually, the count will reach 0 due to recursive
* acquisition by the executing APC threads and then the function will free the
* APC context structure. This will then cause a bug check the next time a
* thread accesses the context structure and hence not good :c.
*
* So to combat this, we add in a flag specifying whether or not an allocation
* of APCs is in progress, and even if the count is 0 we will not free the
* context structure until the count is 0 and allocation_in_progress is 0. We
* can then call this function alongside other query callbacks via IOCTL to
* constantly monitor the status of open APC contexts.
*/
NTSTATUS
QueryActiveApcContextsForCompletion()
{
PAPC_CONTEXT_HEADER entry = NULL;
AcquireDriverConfigLock();
for (UINT32 index = 0; index < MAXIMUM_APC_CONTEXTS; index++) {
GetApcContextByIndex(&entry, index);
if (!entry)
continue;
if (entry->count > 0 || entry->allocation_in_progress == TRUE)
continue;
switch (entry->context_id) {
case APC_CONTEXT_ID_STACKWALK:
FreeApcStackwalkApcContextInformation(entry);
FreeApcContextStructure(entry);
break;
}
}
ReleaseDriverConfigLock();
return STATUS_SUCCESS;
}
VOID
InsertApcContext(_In_ PVOID Context)
{
NT_ASSERT(Context != NULL);
PUINT64 entry = NULL;
if (IsDriverUnloading())
return;
AcquireDriverConfigLock();
for (UINT32 index = 0; index < MAXIMUM_APC_CONTEXTS; index++) {
entry = GetApcContextArray();
if (entry[index] == NULL) {
entry[index] = (UINT64)Context;
goto end;
}
}
end:
ReleaseDriverConfigLock();
}
/*
* The driver config structure holds an array of pointers to APC context
* structures. These APC context structures are unique to each APC operation
* that this driver will perform. For example, a single context will manage all
* APCs that are used to stackwalk, whilst another context will be used to
* manage all APCs used to query a threads memory for example.
*
* Due to the nature of APCs, its important to keep a total or count of the
* number of APCs we have allocated and queued to threads. This information is
* stored in the APC_CONTEXT_HEADER which all APC context structures will
* contain as the first entry in their structure. It holds the ContextId which
* is a unique identifier for the type of APC operation it is managing aswell as
* the number of currently queued APCs.
*
* When an APC is allocated a queued, we increment this count. When an APC is
* completed and freed, we decrement this counter and free the APC itself. If
* all APCs have been freed and the counter is 0,the following objects will be
* freed:
*
* 1. Any additional allocations used by the APC stored in the context structure
* 2. The APC context structure for the given APC operation
* 3. The APC context entry in g_DriverConfig->>apc_contexts will be zero'd.
*
* It's important to remember that the driver can unload when pending APC's have
* not been freed due to the limitations windows places on APCs, however I am in
* the process of finding a solution for this.
*/
BOOLEAN
DrvUnloadFreeAllApcContextStructures()
{
PUINT64 entry = NULL;
PAPC_CONTEXT_HEADER context = NULL;
LARGE_INTEGER delay = {.QuadPart = -ABSOLUTE(SECONDS(1))};
AcquireDriverConfigLock();
for (UINT32 index = 0; index < MAXIMUM_APC_CONTEXTS; index++) {
entry = GetApcContextArray();
if (entry[index] == NULL)
continue;
context = entry[index];
if (context->count > 0) {
DEBUG_VERBOSE(
"Still active APCs: Index: %lx, Count: %lx",
index,
context->count);
KeDelayExecutionThread(KernelMode, FALSE, &delay);
ReleaseDriverConfigLock();
return FALSE;
}
ImpExFreePoolWithTag(context, POOL_TAG_APC);
}
ReleaseDriverConfigLock();
return TRUE;
}
================================================
FILE: driver/apc.h
================================================
#ifndef APC_H
#define APC_H
#include "common.h"
#include "apc.h"
#include "driver.h"
#include "imports.h"
VOID
GetApcContextByIndex(_Out_ PVOID* Context, _In_ UINT32 Index);
VOID
GetApcContext(_Out_ PVOID* Context, _In_ UINT32 ContextIdentifier);
BOOLEAN
FreeApcContextStructure(_Inout_ PAPC_CONTEXT_HEADER Context);
VOID
IncrementApcCount(_In_ UINT32 ContextId);
VOID
FreeApcAndDecrementApcCount(_Inout_ PRKAPC Apc, _In_ UINT32 ContextId);
NTSTATUS
QueryActiveApcContextsForCompletion();
VOID
InsertApcContext(_In_ PVOID Context);
BOOLEAN
DrvUnloadFreeAllApcContextStructures();
#endif
================================================
FILE: driver/arch.asm
================================================
.code
; Tests the emulation of the INVD instruction
;
; source and references:
;
; https://secret.club/2020/04/13/how-anti-cheats-detect-system-emulation.html#invdwbinvd
; https://www.felixcloutier.com/x86/invd
; https://www.felixcloutier.com/x86/wbinvd
;
; Returns int
TestINVDEmulation PROC
pushfq
cli
push 1 ; push some dummy data onto the stack which will exist in writeback cache
wbinvd ; flush the internal cpu caches and write back all modified cache
; lines to main memory
mov byte ptr [rsp], 0 ; set our dummy value to 0, this takes place inside writeback memory
invd ; flush the internal caches, however this instruction will not write
; back to system memory as opposed to wbinvd, meaning our previous
; instruction which only operated on cached writeback data and not
; system memory has been invalidated.
pop rax ; on a real system as a result of our data update instruction being
; invalidated, the result will be 1. On a system that does not
; properly implement INVD, the result will be 0 as the instruction does
; not properly flush the caches.
xor rax, 1 ; invert result so function returns same way as all verification methods
popfq
ret
TestINVDEmulation ENDP
END
================================================
FILE: driver/callbacks.c
================================================
#include "callbacks.h"
#include "containers/map.h"
#include "containers/tree.h"
#include "crypt.h"
#include "driver.h"
#include "imports.h"
#include "lib/stdlib.h"
#include "modules.h"
#include "pool.h"
#include "session.h"
#include "thread.h"
#include "util.h"
#define PROCESS_HASHMAP_BUCKET_COUNT 101
STATIC
BOOLEAN
EnumHandleCallback(
_In_ PHANDLE_TABLE HandleTable,
_In_ PHANDLE_TABLE_ENTRY Entry,
_In_ HANDLE Handle,
_In_ PVOID Context);
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, ObPostOpCallbackRoutine)
# pragma alloc_text(PAGE, ObPreOpCallbackRoutine)
# pragma alloc_text(PAGE, EnumHandleCallback)
# pragma alloc_text(PAGE, EnumerateProcessHandles)
# pragma alloc_text(PAGE, InitialiseThreadList)
# pragma alloc_text(PAGE, ExUnlockHandleTableEntry)
#endif
VOID
CleanupThreadListFreeCallback(_In_ PTHREAD_LIST_ENTRY ThreadListEntry)
{
ImpObDereferenceObject(ThreadListEntry->thread);
ImpObDereferenceObject(ThreadListEntry->owning_process);
}
VOID
UnregisterProcessCreateNotifyRoutine()
{
RtlHashmapSetInactive(GetProcessHashmap());
ImpPsSetCreateProcessNotifyRoutine(ProcessCreateNotifyRoutine, TRUE);
}
VOID
UnregisterImageLoadNotifyRoutine()
{
InterlockedExchange(&GetDriverList()->active, FALSE);
PsRemoveLoadImageNotifyRoutine(ImageLoadNotifyRoutineCallback);
}
VOID
UnregisterThreadCreateNotifyRoutine()
{
InterlockedExchange(&GetThreadTree()->active, FALSE);
ImpPsRemoveCreateThreadNotifyRoutine(ThreadCreateNotifyRoutine);
}
VOID
CleanupThreadListOnDriverUnload()
{
RtlRbTreeEnumerate(GetThreadTree(), CleanupThreadListFreeCallback, NULL);
RtlRbTreeDeleteTree(GetThreadTree());
}
VOID
CleanupDriverListOnDriverUnload()
{
PLIST_ENTRY entry = NULL;
PDRIVER_LIST_HEAD head = GetDriverList();
PDRIVER_LIST_ENTRY driver = NULL;
ImpKeAcquireGuardedMutex(&head->lock);
while (!IsListEmpty(&head->list_entry)) {
entry = RemoveHeadList(&head->list_entry);
driver = CONTAINING_RECORD(entry, DRIVER_LIST_ENTRY, list_entry);
ExFreePoolWithTag(entry, POOL_TAG_DRIVER_LIST);
}
ImpKeReleaseGuardedMutex(&head->lock);
}
VOID
EnumerateDriverListWithCallbackRoutine(
_In_ DRIVERLIST_CALLBACK_ROUTINE CallbackRoutine, _In_opt_ PVOID Context)
{
NT_ASSERT(CallbackRoutine != NULL);
PDRIVER_LIST_HEAD head = GetDriverList();
PLIST_ENTRY entry = NULL;
PDRIVER_LIST_ENTRY driver = NULL;
ImpKeAcquireGuardedMutex(&head->lock);
if (CallbackRoutine) {
entry = head->list_entry.Flink;
while (entry != &head->list_entry) {
driver = CONTAINING_RECORD(entry, DRIVER_LIST_ENTRY, list_entry);
CallbackRoutine(driver, Context);
entry = entry->Flink;
}
}
ImpKeReleaseGuardedMutex(&head->lock);
}
VOID
DriverListEntryToExtendedModuleInfo(
_In_ PDRIVER_LIST_ENTRY Entry, _Out_ PRTL_MODULE_EXTENDED_INFO Extended)
{
Extended->ImageBase = Entry->ImageBase;
Extended->ImageSize = Entry->ImageSize;
IntCopyMemory(
Extended->FullPathName,
Entry->path,
sizeof(Extended->FullPathName));
}
NTSTATUS
InitialiseDriverList()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
SYSTEM_MODULES modules = {0};
PDRIVER_LIST_ENTRY entry = NULL;
PRTL_MODULE_EXTENDED_INFO module_entry = NULL;
PDRIVER_LIST_HEAD head = GetDriverList();
InterlockedExchange(&head->active, TRUE);
InitializeListHead(&head->list_entry);
InitializeListHead(&head->deferred_list);
KeInitializeGuardedMutex(&head->lock);
head->can_hash_x86 = FALSE;
head->work_item = IoAllocateWorkItem(GetDriverDeviceObject());
if (!head->work_item)
return STATUS_INSUFFICIENT_RESOURCES;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
KeAcquireGuardedMutex(&head->lock);
/* skip hal.dll and ntoskrnl.exe */
for (UINT32 index = 2; index < modules.module_count; index++) {
entry = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(DRIVER_LIST_ENTRY),
POOL_TAG_DRIVER_LIST);
if (!entry)
continue;
module_entry = &((PRTL_MODULE_EXTENDED_INFO)modules.address)[index];
entry->hashed = TRUE;
entry->ImageBase = module_entry->ImageBase;
entry->ImageSize = module_entry->ImageSize;
IntCopyMemory(
entry->path,
module_entry->FullPathName,
sizeof(module_entry->FullPathName));
status = HashModule(module_entry, entry->text_hash);
if (status == STATUS_INVALID_IMAGE_WIN_32) {
DEBUG_ERROR(
"32 bit module not hashed, will hash later. %x",
status);
entry->hashed = FALSE;
entry->x86 = TRUE;
InsertHeadList(&head->deferred_list, &entry->deferred_entry);
}
else if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashModule failed with status %x", status);
entry->hashed = FALSE;
}
InsertHeadList(&head->list_entry, &entry->list_entry);
}
KeReleaseGuardedMutex(&head->lock);
head->active = TRUE;
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return STATUS_SUCCESS;
}
/*
* I actually think a spinlock here for the driver list is what we want rather
* then a mutex, but implementing a spinlock has its challenges... todo: have a
* think!
*/
VOID
FindDriverEntryByBaseAddress(
_In_ PVOID ImageBase, _Out_ PDRIVER_LIST_ENTRY* Entry)
{
NT_ASSERT(ImageBase != NULL);
NT_ASSERT(Entry != NULL);
PDRIVER_LIST_HEAD head = GetDriverList();
PLIST_ENTRY entry = NULL;
PDRIVER_LIST_ENTRY driver = NULL;
*Entry = NULL;
ImpKeAcquireGuardedMutex(&head->lock);
entry = head->list_entry.Flink;
while (entry != &head->list_entry) {
driver = CONTAINING_RECORD(entry, DRIVER_LIST_ENTRY, list_entry);
if (driver->ImageBase == ImageBase) {
*Entry = driver;
goto unlock;
}
entry = entry->Flink;
}
unlock:
ImpKeReleaseGuardedMutex(&head->lock);
}
STATIC
BOOLEAN
ProcessHashmapCompareFunction(_In_ PVOID Struct1, _In_ PVOID Struct2)
{
NT_ASSERT(Struct1 != NULL);
NT_ASSERT(Struct2 != NULL);
HANDLE h1 = *((PHANDLE)Struct1);
HANDLE h2 = *((PHANDLE)Struct2);
return h1 == h2 ? TRUE : FALSE;
}
STATIC
UINT32
ProcessHashmapHashFunction(_In_ UINT64 Key)
{
return ((UINT32)Key) % PROCESS_HASHMAP_BUCKET_COUNT;
}
STATIC
VOID
ImageLoadInsertNonSystemImageIntoProcessHashmap(
_In_ PIMAGE_INFO ImageInfo,
_In_ HANDLE ProcessId,
_In_opt_ PUNICODE_STRING FullImageName)
{
INT32 index = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
PEPROCESS process = NULL;
PRTL_HASHMAP map = GetProcessHashmap();
PPROCESS_LIST_ENTRY entry = NULL;
PPROCESS_MAP_MODULE_ENTRY module = NULL;
PPROCESS_MODULE_MAP_CONTEXT context = NULL;
if (!map->active)
return;
status = PsLookupProcessByProcessId(ProcessId, &process);
if (!NT_SUCCESS(status))
return;
index = RtlHashmapHashKeyAndAcquireBucket(map, ProcessId);
if (index == STATUS_INVALID_HASHMAP_INDEX)
return;
entry = RtlHashmapEntryLookup(GetProcessHashmap(), index, &ProcessId);
/* critical error has occured */
if (!entry) {
DEBUG_ERROR("RtlLookupEntryHashmap failed.");
goto end;
}
context = (PPROCESS_MODULE_MAP_CONTEXT)map->context;
module = ExAllocateFromLookasideListEx(&context->pool);
if (!module)
goto end;
/* for now lets just do base and size */
module->base = ImageInfo->ImageBase;
module->size = ImageInfo->ImageSize;
/*
* 1. We dont care if this errors
* 2. There is a bug with the conversion need 2 look into...
*/
if (FullImageName)
UnicodeToCharBufString(
FullImageName,
module->path,
sizeof(module->path));
InsertTailList(&entry->module_list, &module->entry);
entry->list_count++;
end:
RtlHashmapReleaseBucket(map, index);
}
VOID
ImageLoadNotifyRoutineCallback(
_In_opt_ PUNICODE_STRING FullImageName,
_In_ HANDLE ProcessId,
_In_ PIMAGE_INFO ImageInfo)
{
UNREFERENCED_PARAMETER(ProcessId);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_LIST_ENTRY entry = NULL;
RTL_MODULE_EXTENDED_INFO module = {0};
PDRIVER_LIST_HEAD head = GetDriverList();
ANSI_STRING ansi_path = {0};
if (InterlockedExchange(&head->active, head->active) == FALSE)
return;
if (ImageInfo->SystemModeImage == FALSE) {
ImageLoadInsertNonSystemImageIntoProcessHashmap(
ImageInfo,
ProcessId,
FullImageName);
return;
}
FindDriverEntryByBaseAddress(ImageInfo->ImageBase, &entry);
/* if we image exists, exit */
if (entry)
return;
entry = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(DRIVER_LIST_ENTRY),
POOL_TAG_DRIVER_LIST);
if (!entry)
return;
entry->hashed = TRUE;
entry->x86 = FALSE;
entry->ImageBase = ImageInfo->ImageBase;
entry->ImageSize = ImageInfo->ImageSize;
module.ImageBase = ImageInfo->ImageBase;
module.ImageSize = ImageInfo->ImageSize;
if (FullImageName) {
UnicodeToCharBufString(
FullImageName,
module.FullPathName,
sizeof(module.FullPathName));
IntCopyMemory(
entry->path,
module.FullPathName,
sizeof(module.FullPathName));
}
DEBUG_VERBOSE("New system image ansi: %s", entry->path);
hash:
status = HashModule(&module, &entry->text_hash);
if (status == STATUS_INVALID_IMAGE_WIN_32) {
DEBUG_ERROR("32 bit module not hashed, will hash later. %x", status);
entry->x86 = TRUE;
entry->hashed = FALSE;
}
else if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashModule failed with status %x", status);
entry->hashed = FALSE;
}
KeAcquireGuardedMutex(&head->lock);
InsertHeadList(&head->list_entry, &entry->list_entry);
KeReleaseGuardedMutex(&head->lock);
}
/* assumes map lock is held */
VOID
FreeProcessEntryModuleList(
_In_ PPROCESS_LIST_ENTRY Entry, _In_opt_ PVOID Context)
{
UNREFERENCED_PARAMETER(Context);
NT_ASSERT(Entry != NULL);
PRTL_HASHMAP map = GetProcessHashmap();
PLIST_ENTRY list = NULL;
PPROCESS_MAP_MODULE_ENTRY list_entry = NULL;
PPROCESS_MODULE_MAP_CONTEXT context = map->context;
while (!IsListEmpty(&Entry->module_list)) {
list = RemoveTailList(&Entry->module_list);
list_entry = CONTAINING_RECORD(list, PROCESS_MAP_MODULE_ENTRY, entry);
ExFreeToLookasideListEx(&context->pool, list_entry);
}
}
VOID
EnumerateProcessModuleList(
_In_ HANDLE ProcessId,
_In_ PROCESS_MODULE_CALLBACK Callback,
_In_opt_ PVOID Context)
{
INT32 index = 0;
PRTL_HASHMAP map = GetProcessHashmap();
BOOLEAN ret = FALSE;
PPROCESS_LIST_ENTRY entry = NULL;
PLIST_ENTRY list = NULL;
PPROCESS_MAP_MODULE_ENTRY module = NULL;
if (!map->active)
return;
index = RtlHashmapHashKeyAndAcquireBucket(map, ProcessId);
if (index == STATUS_INVALID_HASHMAP_INDEX)
return;
entry = RtlHashmapEntryLookup(map, index, &ProcessId);
if (!entry)
goto end;
for (list = entry->module_list.Flink; list != &entry->module_list;
list = list->Flink) {
module = CONTAINING_RECORD(list, PROCESS_MAP_MODULE_ENTRY, entry);
if (Callback(module, Context))
goto end;
}
end:
RtlHashmapReleaseBucket(map, index);
}
VOID
FindOurUserModeModuleEntry(
_In_ PROCESS_MODULE_CALLBACK Callback, _In_opt_ PVOID Context)
{
NT_ASSERT(Callback != NULL);
INT32 index = 0;
PRTL_HASHMAP map = GetProcessHashmap();
PPROCESS_LIST_ENTRY entry = NULL;
PACTIVE_SESSION session = GetActiveSession();
PLIST_ENTRY list = NULL;
PPROCESS_MAP_MODULE_ENTRY module = NULL;
if (!map->active)
return;
index = RtlHashmapHashKeyAndAcquireBucket(map, session->km_handle);
if (index == STATUS_INVALID_HASHMAP_INDEX)
return;
entry = RtlHashmapEntryLookup(map, index, &session->km_handle);
if (!entry)
return;
for (list = entry->module_list.Flink; list != &entry->module_list;
list = list->Flink) {
module = CONTAINING_RECORD(list, PROCESS_MAP_MODULE_ENTRY, entry);
if (module->base == session->module.base_address &&
module->size == session->module.size) {
Callback(module, Context);
goto end;
}
}
end:
RtlHashmapReleaseBucket(map, index);
}
VOID
CleanupProcessHashmap()
{
PRTL_HASHMAP map = GetProcessHashmap();
PRTL_HASHMAP_ENTRY entry = NULL;
PRTL_HASHMAP_ENTRY temp = NULL;
PLIST_ENTRY list = NULL;
PPROCESS_MODULE_MAP_CONTEXT context = NULL;
RtlHashmapSetInactive(map);
/* First, free all module lists */
RtlHashmapEnumerate(map, FreeProcessEntryModuleList, NULL);
for (UINT32 index = 0; index < map->bucket_count; index++) {
entry = &map->buckets[index];
KeAcquireGuardedMutex(&map->locks[index]);
while (!IsListEmpty(&entry->entry)) {
list = RemoveHeadList(&entry->entry);
temp = CONTAINING_RECORD(list, RTL_HASHMAP_ENTRY, entry);
ExFreePoolWithTag(temp, POOL_TAG_HASHMAP);
}
KeReleaseGuardedMutex(&map->locks[index]);
}
context = map->context;
ExDeleteLookasideListEx(&context->pool);
ExFreePoolWithTag(map->context, POOL_TAG_HASHMAP);
RtlHashmapDelete(map);
}
NTSTATUS
InitialiseProcessHashmap()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
PPROCESS_MODULE_MAP_CONTEXT context = NULL;
context = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(PROCESS_MODULE_MAP_CONTEXT),
POOL_TAG_HASHMAP);
if (!context)
return STATUS_INSUFFICIENT_RESOURCES;
status = ExInitializeLookasideListEx(
&context->pool,
NULL,
NULL,
NonPagedPoolNx,
0,
sizeof(PROCESS_MAP_MODULE_ENTRY),
POOL_TAG_MODULE_LIST,
0);
if (!NT_SUCCESS(status)) {
ExFreePoolWithTag(context, POOL_TAG_HASHMAP);
return status;
}
status = RtlHashmapCreate(
PROCESS_HASHMAP_BUCKET_COUNT,
sizeof(PROCESS_LIST_ENTRY),
ProcessHashmapHashFunction,
ProcessHashmapCompareFunction,
context,
GetProcessHashmap());
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlCreateHashmap: %lx", status);
ExDeleteLookasideListEx(&context->pool);
ExFreePoolWithTag(context, POOL_TAG_HASHMAP);
return status;
}
return status;
}
STATIC
UINT32
ThreadListTreeCompare(_In_ PVOID Key, _In_ PVOID Object)
{
NT_ASSERT(Key != NULL);
NT_ASSERT(Object != NULL);
HANDLE tid_1 = *((PHANDLE)Object);
HANDLE tid_2 = *((PHANDLE)Key);
if (tid_2 < tid_1)
return RB_TREE_LESS_THAN;
else if (tid_2 > tid_1)
return RB_TREE_GREATER_THAN;
else
return RB_TREE_EQUAL;
}
NTSTATUS
InitialiseThreadList()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PRB_TREE tree = GetThreadTree();
status =
RtlRbTreeCreate(ThreadListTreeCompare, sizeof(THREAD_LIST_ENTRY), tree);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlRbTreeCreate: %x", status);
return status;
}
tree->active = TRUE;
return status;
}
VOID
FindThreadListEntryByThreadAddress(
_In_ HANDLE ThreadId, _Out_ PTHREAD_LIST_ENTRY* Entry)
{
PRB_TREE tree = GetThreadTree();
RtlRbTreeAcquireLock(tree);
*Entry = RtlRbTreeFindNodeObject(tree, &ThreadId);
RtlRbTreeReleaselock(tree);
}
FORCEINLINE
STATIC
BOOLEAN
CanInitiateDeferredHashing(_In_ LPCSTR ProcessName, _In_ PDRIVER_LIST_HEAD Head)
{
return !IntCompareString(ProcessName, "winlogon.exe") && Head->work_item
? TRUE
: FALSE;
}
#ifdef DEBUG
STATIC
VOID
PrintHashmapCallback(_In_ PPROCESS_LIST_ENTRY Entry, _In_opt_ PVOID Context)
{
PPROCESS_MAP_MODULE_ENTRY module = NULL;
PLIST_ENTRY list = NULL;
UNREFERENCED_PARAMETER(Context);
DEBUG_VERBOSE("Process ID: %p", Entry->process_id);
for (list = Entry->module_list.Flink; list != &Entry->module_list;
list = list->Flink) {
module = CONTAINING_RECORD(list, PROCESS_MAP_MODULE_ENTRY, entry);
DEBUG_VERBOSE(
" -> Module Base: %p, size: %lx, path: %s",
(PVOID)module->base,
module->size,
module->path);
}
}
VOID
EnumerateAndPrintProcessHashmap()
{
RtlHashmapEnumerate(GetProcessHashmap(), PrintHashmapCallback, NULL);
}
#endif
VOID
ProcessCreateNotifyRoutine(
_In_ HANDLE ParentId, _In_ HANDLE ProcessId, _In_ BOOLEAN Create)
{
INT32 index = 0;
PKPROCESS parent = NULL;
PKPROCESS process = NULL;
PDRIVER_LIST_HEAD driver_list = GetDriverList();
LPCSTR process_name = NULL;
PRTL_HASHMAP map = GetProcessHashmap();
PPROCESS_LIST_ENTRY entry = NULL;
if (!map->active)
return;
ImpPsLookupProcessByProcessId(ParentId, &parent);
ImpPsLookupProcessByProcessId(ProcessId, &process);
if (!parent || !process)
return;
process_name = ImpPsGetProcessImageFileName(process);
index = RtlHashmapHashKeyAndAcquireBucket(map, ProcessId);
if (index == STATUS_INVALID_HASHMAP_INDEX)
return;
if (Create) {
entry = RtlHashmapEntryInsert(map, index);
if (!entry)
goto end;
entry->process_id = ProcessId;
entry->process = process;
entry->parent = parent;
InitializeListHead(&entry->module_list);
entry->list_count = 0;
/*
* Notify to our driver that we can hash x86 modules, and hash
* any x86 modules that werent hashed.
*/
if (CanInitiateDeferredHashing(process_name, driver_list)) {
IoQueueWorkItem(
driver_list->work_item,
DeferredModuleHashingCallback,
NormalWorkQueue,
NULL);
}
}
else {
entry = RtlHashmapEntryLookup(map, ProcessId, &ProcessId);
if (!entry) {
DEBUG_ERROR("UNABLE TO FIND PROCESS NODE!!!");
goto end;
}
ImpObDereferenceObject(entry->parent);
ImpObDereferenceObject(entry->process);
FreeProcessEntryModuleList(entry, NULL);
RtlHashmapEntryDelete(map, index, &ProcessId);
}
end:
RtlHashmapReleaseBucket(map, index);
}
VOID
ThreadCreateNotifyRoutine(
_In_ HANDLE ProcessId, _In_ HANDLE ThreadId, _In_ BOOLEAN Create)
{
PTHREAD_LIST_ENTRY entry = NULL;
PKTHREAD thread = NULL;
PKPROCESS process = NULL;
PRB_TREE tree = GetThreadTree();
/* ensure we don't insert new entries if we are unloading */
if (!tree->active)
return;
ImpPsLookupThreadByThreadId(ThreadId, &thread);
ImpPsLookupProcessByProcessId(ProcessId, &process);
/* ideally we should dereference the other but this shouldnt really ever
* fail */
if (!thread || !process)
return;
RtlRbTreeAcquireLock(tree);
if (Create) {
entry = RtlRbTreeInsertNode(tree, &ThreadId);
if (!entry)
goto end;
entry->thread_id = ThreadId;
entry->thread = thread;
entry->owning_process = process;
entry->apc = NULL;
entry->apc_queued = FALSE;
}
else {
entry = RtlRbTreeFindNodeObject(tree, &ThreadId);
if (!entry)
goto end;
ImpObDereferenceObject(entry->thread);
ImpObDereferenceObject(entry->owning_process);
RtlRbTreeDeleteNode(tree, &ThreadId);
}
end:
RtlRbTreeReleaselock(tree);
}
VOID
ObPostOpCallbackRoutine(
_In_ PVOID RegistrationContext,
_In_ POB_POST_OPERATION_INFORMATION OperationInformation)
{
PAGED_CODE();
UNREFERENCED_PARAMETER(RegistrationContext);
UNREFERENCED_PARAMETER(OperationInformation);
}
#define MAX_PROCESS_NAME_LENGTH 30
#define PROCESS_HANDLE_OPEN_DOWNGRADE_COUNT 4
#define DOWNGRADE_LSASS 0
#define DOWNGRADE_CSRSS 1
#define DOWNGRADE_WERFAULT 2
#define DOWNGRADE_MSMPENG 3
CHAR PROCESS_HANDLE_OPEN_DOWNGRADE[PROCESS_HANDLE_OPEN_DOWNGRADE_COUNT]
[MAX_PROCESS_NAME_LENGTH] = {
"lsass.exe",
"csrss.exe",
"WerFault.exe",
"MsMpEng.exe"};
#define PROCESS_HANDLE_OPEN_WHITELIST_COUNT 3
CHAR PROCESS_HANDLE_OPEN_WHITELIST[PROCESS_HANDLE_OPEN_WHITELIST_COUNT]
[MAX_PROCESS_NAME_LENGTH] = {
"Discord.exe",
"svchost.exe",
"explorer.exe"};
STATIC
BOOLEAN
IsWhitelistedHandleOpenProcess(_In_ LPCSTR ProcessName)
{
for (UINT32 index = 0; index < PROCESS_HANDLE_OPEN_WHITELIST_COUNT;
index++) {
if (!IntCompareString(
ProcessName,
PROCESS_HANDLE_OPEN_WHITELIST[index]))
return TRUE;
}
return FALSE;
}
STATIC
BOOLEAN
IsDowngradeHandleOpenProcess(_In_ LPCSTR ProcessName)
{
for (UINT32 index = 0; index < PROCESS_HANDLE_OPEN_DOWNGRADE_COUNT;
index++) {
if (!IntCompareString(
ProcessName,
PROCESS_HANDLE_OPEN_DOWNGRADE[index]))
return TRUE;
}
return FALSE;
}
// https://www.sysnative.com/forums/threads/object-headers-handles-and-types.34987/
#define GET_OBJECT_HEADER_FROM_HANDLE(x) ((x << 4) | 0xffff000000000000);
OB_PREOP_CALLBACK_STATUS
ObPreOpCallbackRoutine(
_In_ PVOID RegistrationContext,
_In_ POB_PRE_OPERATION_INFORMATION OperationInformation)
{
PAGED_CODE();
UNREFERENCED_PARAMETER(RegistrationContext);
/* access mask to completely strip permissions */
ACCESS_MASK deny_access = SYNCHRONIZE | PROCESS_TERMINATE;
NTSTATUS status = STATUS_UNSUCCESSFUL;
PEPROCESS process_creator = PsGetCurrentProcess();
PEPROCESS protected_process = NULL;
PEPROCESS target_process = (PEPROCESS)OperationInformation->Object;
HANDLE process_creator_id = ImpPsGetProcessId(process_creator);
LONG protected_process_id = 0;
LPCSTR process_creator_name = NULL;
LPCSTR target_process_name = NULL;
LPCSTR protected_process_name = NULL;
POB_CALLBACKS_CONFIG configuration = NULL;
UINT32 report_size = 0;
/*
* This is to prevent the condition where the thread executing this
* function is scheduled whilst we are cleaning up the callbacks on
* driver unload. We must hold the driver config lock to ensure the pool
* containing the callback configuration lock is not freed
*/
SessionGetCallbackConfiguration(&configuration);
if (!configuration)
return OB_PREOP_SUCCESS;
ImpKeAcquireGuardedMutex(&configuration->lock);
SessionGetProcessId(&protected_process_id);
SessionGetProcess(&protected_process);
if (!protected_process_id || !protected_process)
goto end;
process_creator_name = ImpPsGetProcessImageFileName(process_creator);
target_process_name = ImpPsGetProcessImageFileName(target_process);
protected_process_name = ImpPsGetProcessImageFileName(protected_process);
if (!protected_process_name || !target_process_name)
goto end;
if (IntCompareString(protected_process_name, target_process_name))
goto end;
/*
* WerFault is some windows 11 application that cries when it
* cant get a handle, so well allow it for now... todo; learn
* more about it
*
* todo: perform stricter checks rather then the image name.
* perhapds check some certificate or something.
*/
if (IsDowngradeHandleOpenProcess(process_creator_name) ||
!IntCompareString(process_creator_name, target_process_name)) {
/* We will downgrade these handles later */
// DEBUG_LOG("Handles created by CSRSS, LSASS and
// WerFault are allowed for now...");
}
else if (target_process == process_creator) {
// DEBUG_LOG("handles made by NOTEPAD r okay :)");
/* handles created by the game (notepad) are okay */
}
else {
OperationInformation->Parameters->CreateHandleInformation
.DesiredAccess = deny_access;
OperationInformation->Parameters->DuplicateHandleInformation
.DesiredAccess = deny_access;
/*
* These processes will constantly open handles to any
* open process for various reasons, so we will still
* strip them but we won't report them.. for now
* atleast.
*/
if (IsWhitelistedHandleOpenProcess(process_creator_name))
goto end;
report_size = CryptRequestRequiredBufferLength(
sizeof(OPEN_HANDLE_FAILURE_REPORT));
POPEN_HANDLE_FAILURE_REPORT report = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
report_size,
REPORT_POOL_TAG);
if (!report)
goto end;
INIT_REPORT_PACKET(report, REPORT_ILLEGAL_HANDLE_OPERATION, 0);
report->is_kernel_handle = OperationInformation->KernelHandle;
report->process_id = process_creator_id;
report->thread_id = ImpPsGetCurrentThreadId();
report->access = OperationInformation->Parameters
->CreateHandleInformation.DesiredAccess;
IntCopyMemory(
report->process_name,
process_creator_name,
HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH);
status = CryptEncryptBuffer(report, report_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %x", status);
ExFreePoolWithTag(report, report_size);
goto end;
}
IrpQueueSchedulePacket(report, report_size);
}
end:
ImpKeReleaseGuardedMutex(&configuration->lock);
return OB_PREOP_SUCCESS;
}
/* stolen from ReactOS xD */
VOID NTAPI
ExUnlockHandleTableEntry(
IN PHANDLE_TABLE HandleTable, IN PHANDLE_TABLE_ENTRY HandleTableEntry)
{
INT64 old_value;
PAGED_CODE();
/* Set the lock bit and make sure it wasn't earlier */
old_value = InterlockedOr((PLONG)&HandleTableEntry->VolatileLowValue, 1);
/* Unblock any waiters */
#pragma warning(push)
#pragma warning(disable : C6387)
ImpExfUnblockPushLock(&HandleTable->HandleContentionEvent, NULL);
#pragma warning(pop)
}
FORCEINLINE
STATIC
ACCESS_MASK
GetHandleAccessMask(_In_ PHANDLE_TABLE_ENTRY Entry)
{
return (ACCESS_MASK)Entry->GrantedAccessBits;
}
static UNICODE_STRING OBJECT_TYPE_PROCESS = RTL_CONSTANT_STRING(L"Process");
static UNICODE_STRING OBJECT_TYPE_THREAD = RTL_CONSTANT_STRING(L"Thread");
STATIC
BOOLEAN
EnumHandleCallback(
_In_ PHANDLE_TABLE HandleTable,
_In_ PHANDLE_TABLE_ENTRY Entry,
_In_ HANDLE Handle,
_In_ PVOID Context)
{
PAGED_CODE();
UNREFERENCED_PARAMETER(Context);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PVOID object = NULL;
PVOID object_header = NULL;
POBJECT_TYPE object_type = NULL;
PEPROCESS process = NULL;
PEPROCESS protected_process = NULL;
LPCSTR process_name = NULL;
LPCSTR protected_process_name = NULL;
ACCESS_MASK handle_access_mask = 0;
UINT32 report_size = 0;
object_header = GET_OBJECT_HEADER_FROM_HANDLE(Entry->ObjectPointerBits);
/* Object header is the first 30 bytes of the object */
object = (uintptr_t)object_header + OBJECT_HEADER_SIZE;
object_type = ImpObGetObjectType(object);
/* TODO: check for threads aswell */
if (ImpRtlCompareUnicodeString(
&object_type->Name,
&OBJECT_TYPE_PROCESS,
TRUE)) {
goto end;
}
process = (PEPROCESS)object;
process_name = ImpPsGetProcessImageFileName(process);
SessionGetProcess(&protected_process);
protected_process_name = ImpPsGetProcessImageFileName(protected_process);
if (IntCompareString(process_name, protected_process_name))
goto end;
DEBUG_VERBOSE(
"Handle references our protected process with access mask: %lx",
(ACCESS_MASK)Entry->GrantedAccessBits);
handle_access_mask = GetHandleAccessMask(Entry);
/* These permissions can be stripped from every process
* including CSRSS and LSASS */
if (handle_access_mask & PROCESS_CREATE_PROCESS) {
Entry->GrantedAccessBits &= ~PROCESS_CREATE_PROCESS;
DEBUG_VERBOSE("Stripped PROCESS_CREATE_PROCESS");
}
if (handle_access_mask & PROCESS_CREATE_THREAD) {
Entry->GrantedAccessBits &= ~PROCESS_CREATE_THREAD;
DEBUG_VERBOSE("Stripped PROCESS_CREATE_THREAD");
}
if (handle_access_mask & PROCESS_DUP_HANDLE) {
Entry->GrantedAccessBits &= ~PROCESS_DUP_HANDLE;
DEBUG_VERBOSE("Stripped PROCESS_DUP_HANDLE");
}
if (handle_access_mask & PROCESS_QUERY_INFORMATION) {
Entry->GrantedAccessBits &= ~PROCESS_QUERY_INFORMATION;
DEBUG_VERBOSE("Stripped PROCESS_QUERY_INFORMATION");
}
if (handle_access_mask & PROCESS_QUERY_LIMITED_INFORMATION) {
Entry->GrantedAccessBits &= ~PROCESS_QUERY_LIMITED_INFORMATION;
DEBUG_VERBOSE("Stripped PROCESS_QUERY_LIMITED_INFORMATION");
}
if (handle_access_mask & PROCESS_VM_READ) {
Entry->GrantedAccessBits &= ~PROCESS_VM_READ;
DEBUG_VERBOSE("Stripped PROCESS_VM_READ");
}
if (!IntCompareString(process_name, "csrss.exe") ||
!IntCompareString(process_name, "lsass.exe")) {
DEBUG_VERBOSE(
"Required system process allowed, only stripping some permissions");
goto end;
}
/* Permissions beyond here can only be stripped from non
* critical processes */
if (handle_access_mask & PROCESS_SET_INFORMATION) {
Entry->GrantedAccessBits &= ~PROCESS_SET_INFORMATION;
DEBUG_VERBOSE("Stripped PROCESS_SET_INFORMATION");
}
if (handle_access_mask & PROCESS_SET_QUOTA) {
Entry->GrantedAccessBits &= ~PROCESS_SET_QUOTA;
DEBUG_VERBOSE("Stripped PROCESS_SET_QUOTA");
}
if (handle_access_mask & PROCESS_SUSPEND_RESUME) {
Entry->GrantedAccessBits &= ~PROCESS_SUSPEND_RESUME;
DEBUG_VERBOSE("Stripped PROCESS_SUSPEND_RESUME ");
}
if (handle_access_mask & PROCESS_TERMINATE) {
Entry->GrantedAccessBits &= ~PROCESS_TERMINATE;
DEBUG_VERBOSE("Stripped PROCESS_TERMINATE");
}
if (handle_access_mask & PROCESS_VM_OPERATION) {
Entry->GrantedAccessBits &= ~PROCESS_VM_OPERATION;
DEBUG_VERBOSE("Stripped PROCESS_VM_OPERATION");
}
if (handle_access_mask & PROCESS_VM_WRITE) {
Entry->GrantedAccessBits &= ~PROCESS_VM_WRITE;
DEBUG_VERBOSE("Stripped PROCESS_VM_WRITE");
}
report_size =
CryptRequestRequiredBufferLength(sizeof(OPEN_HANDLE_FAILURE_REPORT));
POPEN_HANDLE_FAILURE_REPORT report =
ImpExAllocatePool2(POOL_FLAG_NON_PAGED, report_size, REPORT_POOL_TAG);
if (!report)
goto end;
/*
* Using the same report structure as the ObRegisterCallbacks
* report since both of these reports are closely related by the
* fact they are triggered by a process either opening a handle
* to our protected process or have a valid open handle to it. I
* also don't think its worth creating another queue
* specifically for open handle reports since they will be rare.
*/
INIT_REPORT_PACKET(report, REPORT_ILLEGAL_HANDLE_OPERATION, 0);
report->is_kernel_handle = Entry->Attributes & OBJ_KERNEL_HANDLE;
report->process_id = ImpPsGetProcessId(process);
report->thread_id = 0;
report->access = handle_access_mask;
IntCopyMemory(
&report->process_name,
process_name,
HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH);
status = CryptEncryptBuffer(report, report_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, report_size);
goto end;
}
IrpQueueSchedulePacket(report, report_size);
end:
ExUnlockHandleTableEntry(HandleTable, Entry);
return FALSE;
}
NTSTATUS
EnumerateProcessHandles(_In_ PPROCESS_LIST_ENTRY Entry, _In_opt_ PVOID Context)
{
/* Handles are stored in pageable memory */
PAGED_CODE();
UNREFERENCED_PARAMETER(Context);
NT_ASSERT(Entry != NULL);
if (!Entry)
return STATUS_INVALID_PARAMETER;
if (Entry->process == PsInitialSystemProcess)
return STATUS_SUCCESS;
PHANDLE_TABLE handle_table =
*(PHANDLE_TABLE*)((uintptr_t)Entry->process +
EPROCESS_HANDLE_TABLE_OFFSET);
if (!handle_table)
return STATUS_INVALID_ADDRESS;
if (!ImpMmIsAddressValid(handle_table))
return STATUS_INVALID_ADDRESS;
#pragma warning(push)
#pragma warning(suppress : 6387)
ImpExEnumHandleTable(handle_table, EnumHandleCallback, NULL, NULL);
#pragma warning(pop)
return STATUS_SUCCESS;
}
#define REPEAT_TIME_10_SEC 10000
STATIC
VOID
TimerObjectValidateProcessModuleCallback(
_In_ PPROCESS_MAP_MODULE_ENTRY Entry, _In_opt_ PVOID Context)
{
NT_ASSERT(Entry != NULL);
CHAR hash[SHA_256_HASH_LENGTH] = {0};
NTSTATUS status = STATUS_UNSUCCESSFUL;
PACTIVE_SESSION session = (PACTIVE_SESSION)Context;
if (!ARGUMENT_PRESENT(Context))
return;
status = HashUserModule(Entry, hash, sizeof(hash));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashUserModule: %x", status);
return;
}
if (IntCompareMemory(hash, session->module.module_hash, sizeof(hash)) !=
sizeof(hash)) {
DEBUG_ERROR("User module hash not matching!! MODIFIED!");
return;
}
DEBUG_VERBOSE("User module hash valid.");
}
STATIC
VOID
TimerObjectWorkItemRoutine(
_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PTIMER_OBJECT timer = (PTIMER_OBJECT)Context;
PDRIVER_LIST_HEAD list = GetDriverList();
PACTIVE_SESSION session = GetActiveSession();
UNREFERENCED_PARAMETER(DeviceObject);
if (!ARGUMENT_PRESENT(Context))
return;
if (!list->active)
goto end;
DEBUG_VERBOSE("Integrity check timer callback invoked.");
RtlRbTreePrintCurrentStatistics(GetThreadTree());
if (!ValidateOurDriversDispatchRoutines()) {
DEBUG_VERBOSE("l");
}
status = ValidateOurDriverImage();
if (!NT_SUCCESS(status))
DEBUG_ERROR("ValidateOurDriverImage failed with status %x", status);
KeAcquireGuardedMutex(&session->lock);
if (!session->is_session_active) {
KeReleaseGuardedMutex(&session->lock);
goto end;
}
FindOurUserModeModuleEntry(
TimerObjectValidateProcessModuleCallback,
session);
KeReleaseGuardedMutex(&session->lock);
end:
InterlockedExchange(&timer->state, FALSE);
}
/*
* This routine is executed every x seconds, and is run at IRQL = DISPATCH_LEVEL
*/
STATIC
VOID
TimerObjectCallbackRoutine(
_In_ PKDPC Dpc,
_In_opt_ PVOID DeferredContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2)
{
UNREFERENCED_PARAMETER(Dpc);
UNREFERENCED_PARAMETER(SystemArgument1);
UNREFERENCED_PARAMETER(SystemArgument2);
NT_ASSERT(DeferredContext != NULL);
if (!HasDriverLoaded() || !ARGUMENT_PRESENT(DeferredContext))
return;
PTIMER_OBJECT timer = (PTIMER_OBJECT)DeferredContext;
/* we dont want to queue our work item if it hasnt executed */
if (timer->state)
return;
/* we queue a work item because DPCs run at IRQL = DISPATCH_LEVEL and we
* need certain routines which cannot be run at an IRQL this high.*/
InterlockedExchange(&timer->state, TRUE);
IoQueueWorkItem(
timer->work_item,
TimerObjectWorkItemRoutine,
BackgroundWorkQueue,
timer);
}
NTSTATUS
InitialiseTimerObject(_Out_ PTIMER_OBJECT Timer)
{
LARGE_INTEGER due_time = {.QuadPart = -ABSOLUTE(SECONDS(5))};
Timer->work_item = IoAllocateWorkItem(GetDriverDeviceObject());
if (!Timer->work_item)
return STATUS_MEMORY_NOT_ALLOCATED;
KeInitializeDpc(&Timer->dpc, TimerObjectCallbackRoutine, Timer);
KeInitializeTimer(&Timer->timer);
KeSetTimerEx(&Timer->timer, due_time, REPEAT_TIME_10_SEC, &Timer->dpc);
DEBUG_VERBOSE("Successfully initialised global timer callback.");
return STATUS_SUCCESS;
}
VOID
CleanupDriverTimerObjects(_Inout_ PTIMER_OBJECT Timer)
{
/* this routine blocks until all queued DPCs on all processors have
* executed. */
KeFlushQueuedDpcs();
/* wait for our work item to complete */
while (Timer->state)
YieldProcessor();
/* now its safe to free and cancel our timers, pools etc. */
KeCancelTimer(&Timer->timer);
IoFreeWorkItem(Timer->work_item);
DEBUG_VERBOSE("Freed timer objects.");
}
VOID
UnregisterProcessObCallbacks()
{
PAGED_CODE();
PACTIVE_SESSION config = GetActiveSession();
AcquireDriverConfigLock();
if (config->callback_configuration.registration_handle) {
ImpObUnRegisterCallbacks(
config->callback_configuration.registration_handle);
config->callback_configuration.registration_handle = NULL;
}
ReleaseDriverConfigLock();
}
NTSTATUS
RegisterProcessObCallbacks()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
PACTIVE_SESSION config = GetActiveSession();
OB_CALLBACK_REGISTRATION callback_registration = {0};
OB_OPERATION_REGISTRATION operation_registration = {0};
DEBUG_VERBOSE("Enabling ObRegisterCallbacks.");
AcquireDriverConfigLock();
operation_registration.ObjectType = PsProcessType;
operation_registration.Operations |= OB_OPERATION_HANDLE_CREATE;
operation_registration.Operations |= OB_OPERATION_HANDLE_DUPLICATE;
operation_registration.PreOperation = ObPreOpCallbackRoutine;
operation_registration.PostOperation = ObPostOpCallbackRoutine;
callback_registration.Version = OB_FLT_REGISTRATION_VERSION;
callback_registration.OperationRegistration = &operation_registration;
callback_registration.OperationRegistrationCount = 1;
callback_registration.RegistrationContext = NULL;
status = ImpObRegisterCallbacks(
&callback_registration,
&config->callback_configuration.registration_handle);
if (!NT_SUCCESS(status))
DEBUG_ERROR("ObRegisterCallbacks failed with status %x", status);
ReleaseDriverConfigLock();
return status;
}
VOID
InitialiseObCallbacksConfiguration(_Out_ PACTIVE_SESSION ProcessConfig)
{
ImpKeInitializeGuardedMutex(&ProcessConfig->callback_configuration.lock);
}
================================================
FILE: driver/callbacks.h
================================================
#ifndef CALLBACKS_H
#define CALLBACKS_H
#include "driver.h"
#include "common.h"
#include
typedef void (*THREADLIST_CALLBACK_ROUTINE)(
_In_ PTHREAD_LIST_ENTRY ThreadListEntry, _In_opt_ PVOID Context);
#define DRIVER_PATH_LENGTH 0x100
#define SHA_256_HASH_LENGTH 32
typedef struct _DRIVER_LIST_ENTRY {
LIST_ENTRY list_entry;
PVOID ImageBase;
ULONG ImageSize;
BOOLEAN hashed;
BOOLEAN x86;
CHAR path[DRIVER_PATH_LENGTH];
CHAR text_hash[SHA_256_HASH_LENGTH];
/*
* This LIST_ENTRY is to be used for modules where the hashing needs to
* be deferred. For example, when x86 modules can't be hashed on driver
* load.
*/
LIST_ENTRY deferred_entry;
} DRIVER_LIST_ENTRY, *PDRIVER_LIST_ENTRY;
typedef void (*DRIVERLIST_CALLBACK_ROUTINE)(
_In_ PDRIVER_LIST_ENTRY DriverListEntry, _In_opt_ PVOID Context);
typedef BOOLEAN (*PROCESS_MODULE_CALLBACK)(_In_ PPROCESS_MAP_MODULE_ENTRY Entry,
_In_opt_ PVOID Context);
NTSTATUS
InitialiseDriverList();
VOID NTAPI
ExUnlockHandleTableEntry(IN PHANDLE_TABLE HandleTable,
IN PHANDLE_TABLE_ENTRY HandleTableEntry);
VOID
ObPostOpCallbackRoutine(_In_ PVOID RegistrationContext,
_In_ POB_POST_OPERATION_INFORMATION
OperationInformation);
OB_PREOP_CALLBACK_STATUS
ObPreOpCallbackRoutine(_In_ PVOID RegistrationContext,
_In_ POB_PRE_OPERATION_INFORMATION OperationInformation);
NTSTATUS
InitialiseThreadList();
VOID
ThreadCreateNotifyRoutine(_In_ HANDLE ProcessId,
_In_ HANDLE ThreadId,
_In_ BOOLEAN Create);
VOID
ProcessCreateNotifyRoutine(_In_ HANDLE ParentId,
_In_ HANDLE ProcessId,
_In_ BOOLEAN Create);
VOID
CleanupThreadListOnDriverUnload();
VOID
FindThreadListEntryByThreadAddress(_In_ HANDLE ThreadId,
_Out_ PTHREAD_LIST_ENTRY* Entry);
VOID
FindDriverEntryByBaseAddress(_In_ PVOID ImageBase,
_Out_ PDRIVER_LIST_ENTRY* Entry);
VOID
CleanupDriverListOnDriverUnload();
VOID
ImageLoadNotifyRoutineCallback(_In_opt_ PUNICODE_STRING FullImageName,
_In_ HANDLE ProcessId,
_In_ PIMAGE_INFO ImageInfo);
NTSTATUS
InitialiseTimerObject(_Out_ PTIMER_OBJECT Timer);
VOID
CleanupDriverTimerObjects(_Inout_ PTIMER_OBJECT Timer);
VOID
UnregisterProcessCreateNotifyRoutine();
VOID
UnregisterImageLoadNotifyRoutine();
VOID
UnregisterThreadCreateNotifyRoutine();
VOID
UnregisterProcessObCallbacks();
NTSTATUS
RegisterProcessObCallbacks();
VOID
InitialiseObCallbacksConfiguration(_Out_ PACTIVE_SESSION ProcessConfig);
VOID
EnumerateDriverListWithCallbackRoutine(
_In_ DRIVERLIST_CALLBACK_ROUTINE CallbackRoutine, _In_opt_ PVOID Context);
VOID
DriverListEntryToExtendedModuleInfo(_In_ PDRIVER_LIST_ENTRY Entry,
_Out_ PRTL_MODULE_EXTENDED_INFO Extended);
NTSTATUS
InitialiseProcessHashmap();
NTSTATUS
EnumerateProcessHandles(_In_ PPROCESS_LIST_ENTRY Entry, _In_opt_ PVOID Context);
VOID
EnumerateAndPrintProcessHashmap();
VOID
CleanupProcessHashmap();
VOID
EnumerateProcessModuleList(_In_ HANDLE ProcessId,
_In_ PROCESS_MODULE_CALLBACK Callback,
_In_opt_ PVOID Context);
VOID
FindOurUserModeModuleEntry(_In_ PROCESS_MODULE_CALLBACK Callback,
_In_opt_ PVOID Context);
#endif
================================================
FILE: driver/common.h
================================================
#ifndef COMMON_H
#define COMMON_H
#include
#include
#include "io.h"
#include "types/types.h"
#include
/*
* For numbers < 32, these are equivalent to 0ul << x.
*
* For an item to be printed, its bitwise AND'd with the set filter. If the
* result is non zero the log will be printed.
*/
#define LOG_ERROR_LEVEL 1
#define LOG_WARNING_LEVEL 2
#define LOG_INFO_LEVEL 3
#define LOG_VERBOSE_LEVEL 4
#define DPFLTR_MASK 0x80000000
#define DEBUG_ERROR(fmt, ...) \
DbgPrintEx(DPFLTR_DEFAULT_ID, \
LOG_ERROR_LEVEL, \
"donna-ac : [ERROR] ::: " fmt "\n", \
##__VA_ARGS__)
#define DEBUG_WARNING(fmt, ...) \
DbgPrintEx(DPFLTR_DEFAULT_ID, \
LOG_WARNING_LEVEL, \
"donna-ac : [WARNING] : " fmt "\n", \
##__VA_ARGS__)
#define DEBUG_INFO(fmt, ...) \
DbgPrintEx(DPFLTR_DEFAULT_ID, \
LOG_INFO_LEVEL, \
"donna-ac : [INFO] :::: " fmt "\n", \
##__VA_ARGS__)
#define DEBUG_VERBOSE(fmt, ...) \
DbgPrintEx(DPFLTR_DEFAULT_ID, \
LOG_VERBOSE_LEVEL, \
"donna-ac : [VERBOSE] : " fmt "\n", \
##__VA_ARGS__)
#define HEX_DUMP(fmt, ...) \
DbgPrintEx(DPFLTR_DEFAULT_ID, LOG_VERBOSE_LEVEL, fmt, ##__VA_ARGS__)
#define STATIC static
#define INLINE inline
#define MAX_MODULE_PATH 260
#define RVA(Cast, Base, Rel) \
((Cast)((DWORD_PTR)(Base) + (DWORD_PTR)(Rel)))
#define ARRAYLEN(len, type) ((len) / sizeof(type))
/*
* Interlocked intrinsics are only atomic with respect to other InterlockedXxx
* functions, so all reads and writes to the THREAD_LIST->active flag must be
* with Interlocked instrinsics to ensure atomicity.
*/
typedef struct _THREAD_LIST_HEAD {
SINGLE_LIST_ENTRY start;
volatile BOOLEAN active;
KGUARDED_MUTEX lock;
LOOKASIDE_LIST_EX lookaside_list;
} THREAD_LIST_HEAD, *PTHREAD_LIST_HEAD;
typedef struct _DRIVER_LIST_HEAD {
LIST_ENTRY list_entry;
volatile ULONG count;
volatile BOOLEAN active;
KGUARDED_MUTEX lock;
/* modules that need to be hashed later. */
PIO_WORKITEM work_item;
LIST_ENTRY deferred_list;
volatile BOOLEAN deferred_complete;
volatile LONG can_hash_x86;
} DRIVER_LIST_HEAD, *PDRIVER_LIST_HEAD;
typedef struct _THREAD_LIST_ENTRY {
HANDLE thread_id;
PKTHREAD thread;
PKPROCESS owning_process;
BOOLEAN apc_queued;
PKAPC apc;
} THREAD_LIST_ENTRY, *PTHREAD_LIST_ENTRY;
typedef struct _PROCESS_MODULE_MAP_CONTEXT {
LOOKASIDE_LIST_EX pool;
} PROCESS_MODULE_MAP_CONTEXT, *PPROCESS_MODULE_MAP_CONTEXT;
typedef struct _PROCESS_MAP_MODULE_ENTRY {
LIST_ENTRY entry;
UINT64 base;
UINT32 size;
CHAR path[MAX_MODULE_PATH];
} PROCESS_MAP_MODULE_ENTRY, *PPROCESS_MAP_MODULE_ENTRY;
typedef struct _PROCESS_LIST_ENTRY {
/* IMPORTANT THIS IS FIRST!*/
HANDLE process_id;
PEPROCESS process;
PEPROCESS parent;
LIST_ENTRY module_list;
volatile UINT32 list_count;
} PROCESS_LIST_ENTRY, *PPROCESS_LIST_ENTRY;
/*
* ioctl_flag consists of the first 16 bits of the Function part of the CTL code
* cookie_value consists of a static 16 bit value generated by the user mode app
* on startup which is then passed to the driver and stored.
*/
typedef union _SECURITY_COOKIE {
struct {
UINT32 ioctl_flag : 16;
UINT32 cookie_value : 16;
} bits;
UINT32 flags;
} SECURITY_COOKIE, *PSECURITY_COOKIE;
typedef struct _TIMER_OBJECT {
/*
* state = 1: callback in progress
* state = 0: no callback in progress (i.e safe to free and unregister)
*/
volatile LONG state;
KTIMER timer;
KDPC dpc;
PIO_WORKITEM work_item;
} TIMER_OBJECT, *PTIMER_OBJECT;
typedef enum _ENVIRONMENT_TYPE {
NativeWindows = 0,
Vmware,
VirtualBox
} ENVIRONMENT_TYPE;
typedef enum _PROCESSOR_TYPE {
Unknown = 0,
GenuineIntel,
AuthenticAmd
} PROCESSOR_TYPE;
#define VENDOR_STRING_MAX_LENGTH 256
#define DRIVER_PATH_MAX_LENGTH 512
#define MOTHERBOARD_SERIAL_CODE_LENGTH 64
#define DEVICE_DRIVE_0_SERIAL_CODE_LENGTH 64
#define MAX_REPORTS_PER_IRP 20
#define POOL_TAG_STRINGS 'strs'
#define IOCTL_STORAGE_QUERY_PROPERTY 0x002D1400
#define MAXIMUM_APC_CONTEXTS 10
typedef struct _SYSTEM_INFORMATION {
CHAR motherboard_serial[MOTHERBOARD_SERIAL_CODE_LENGTH];
CHAR drive_0_serial[DEVICE_DRIVE_0_SERIAL_CODE_LENGTH];
CHAR vendor[VENDOR_STRING_MAX_LENGTH];
BOOLEAN virtualised_environment;
ENVIRONMENT_TYPE environment;
PROCESSOR_TYPE processor;
RTL_OSVERSIONINFOW os_information;
} SYSTEM_INFORMATION, *PSYSTEM_INFORMATION;
typedef struct _OB_CALLBACKS_CONFIG {
PVOID registration_handle;
KGUARDED_MUTEX lock;
} OB_CALLBACKS_CONFIG, *POB_CALLBACKS_CONFIG;
typedef struct _DEFERRED_REPORT {
LIST_ENTRY list_entry;
PVOID buffer;
UINT32 buffer_size;
} DEFERRED_REPORT, *PDEFERRED_REPORT;
typedef struct _DEFERRED_REPORTS_LIST {
LIST_ENTRY head;
UINT32 count;
KGUARDED_MUTEX lock;
} DEFERRED_REPORTS_LIST, *PDEFERRED_REPORTS_LIST;
#define EVENT_COUNT 5
typedef struct _IRP_QUEUE_HEAD {
LIST_ENTRY queue;
volatile UINT32 irp_count;
volatile UINT32 total_reports_completed;
volatile UINT32 total_irps_completed;
volatile UINT32 total_heartbeats_completed;
IO_CSQ csq;
KGUARDED_MUTEX lock;
DEFERRED_REPORTS_LIST deferred_reports;
} IRP_QUEUE_HEAD, *PIRP_QUEUE_HEAD;
typedef struct _IRP_QUEUE_ENTRY {
SINGLE_LIST_ENTRY entry;
PIRP irp;
} IRP_QUEUE_ENTRY, *PIRP_QUEUE_ENTRY;
/*
* This structure can change at anytime based on whether
* the target process to protect is open / closed / changes etc.
*/
#define AES_256_KEY_SIZE 32
#define AES_256_IV_SIZE 16
typedef struct _HEARTBEAT_CONFIGURATION {
volatile UINT32 counter;
/* Signifies if a heartbeat callback routine is currently executing. */
volatile UINT32 active;
LARGE_INTEGER seed;
/*
* We actually want the timer and DPC objects to be allocated, so that each
* time our heartbeat callback routine is run, we can remove the timer and
* add a new timer. This makes it harder to identify our heartbeat timers.
*/
PKTIMER timer;
PKDPC dpc;
PIO_WORKITEM work_item;
} HEARTBEAT_CONFIGURATION, *PHEARTBEAT_CONFIGURATION;
#define SHA_256_HASH_LENGTH 32
/* Contains information on our user mode module. */
typedef struct _MODULE_INFORMATION {
PVOID base_address;
UINT32 size;
CHAR path[MAX_MODULE_PATH];
CHAR module_hash[SHA_256_HASH_LENGTH];
} MODULE_INFORMATION, *PMODULE_INFORMATION;
typedef struct _SESSION_INITIATION_PACKET {
UINT32 cookie;
PVOID process_id;
UCHAR aes_key[AES_256_KEY_SIZE];
UCHAR aes_iv[AES_256_IV_SIZE];
MODULE_INFORMATION module_info;
} SESSION_INITIATION_PACKET, *PSESSION_INITIATION_PACKET;
typedef struct _ACTIVE_SESSION {
volatile BOOLEAN is_session_active;
PVOID um_handle;
PVOID km_handle;
PEPROCESS process;
OB_CALLBACKS_CONFIG callback_configuration;
struct {
UINT32 cookie;
UINT32 magic_number;
PUCHAR aes_key[AES_256_KEY_SIZE];
PUCHAR iv[AES_256_IV_SIZE];
BCRYPT_KEY_HANDLE key_handle;
PUCHAR key_object;
UINT32 key_object_length;
};
struct SESSION_STATISTICS {
UINT32 irps_received;
UINT32 report_count;
UINT32 heartbeat_count;
};
MODULE_INFORMATION module;
HEARTBEAT_CONFIGURATION heartbeat_config;
KGUARDED_MUTEX lock;
} ACTIVE_SESSION, *PACTIVE_SESSION;
#define NMI_CONTEXT_POOL '7331'
#define STACK_FRAMES_POOL 'loop'
#define INVALID_DRIVER_LIST_HEAD_POOL 'rwar'
#define INVALID_DRIVER_LIST_ENTRY_POOL 'gaah'
#define POOL_TAG_APC 'apcc'
#define POOL_TAG_CRYPT 'tpcr'
#define POOL_TAG_HW 'hwhw'
#define POOL_TAG_DPC 'apcc'
#define POOL_TAG_HEARTBEAT 'teab'
#define SYSTEM_MODULES_POOL 'halb'
#define THREAD_DATA_POOL 'doof'
#define PROC_AFFINITY_POOL 'eeee'
#define TEMP_BUFFER_POOL 'ffff'
#define DRIVER_PATH_POOL_TAG 'path'
#define POOL_TAG_INTEGRITY 'intg'
#define POOL_TAG_MODULE_MEMORY_BUF 'lolo'
#define POOL_TAG_MODULE_MEMORY_BUF_2 'leeo'
#define POOL_TAG_HASH_OBJECT 'hobj'
#define POOL_TAG_RESULTING_HASH 'hash'
#define POOL_TAG_SAVE_EX_REGIONS 'sexc'
#define POOL_DUMP_BLOCK_TAG 'dump'
#define POOL_DEBUGGER_DATA_TAG 'data'
#define PROCESS_ADDRESS_LIST_TAG 'addr'
#define ANALYSE_PROCESS_TAG 'anls'
#define INVALID_PROCESS_REPORT_TAG 'invd'
#define QUEUE_POOL_TAG 'qqqq'
#define REPORT_QUEUE_TEMP_BUFFER_TAG 'temp'
#define REPORT_POOL_TAG 'repo'
#define MODULES_REPORT_POOL_TAG 'modu'
#define POOL_TAG_LIST_ITEM 'tsil'
#define POOL_TAG_THREAD_LIST 'list'
#define POOL_TAG_PROCESS_LIST 'plis'
#define POOL_TAG_USER_MODULE_LIST 'resu'
#define POOL_TAG_USER_MODULE_NODE 'edon'
#define POOL_TAG_DRIVER_LIST 'drvl'
#define POOL_TAG_IRP_QUEUE 'irpp'
#define POOL_TAG_TIMER 'time'
#define POOL_TAG_MODULE_LIST 'elom'
#define POOL_TAG_RB_TREE 'eert'
#define POOL_TAG_HASHMAP 'hsah'
#define IA32_APERF_MSR 0x000000E8
#define ERROR -1
#define STACK_FRAME_POOL_SIZE 0x200
#define NUMBER_HASH_BUCKETS 37
#define KTHREAD_STACK_BASE_OFFSET 0x030
#define KTHREAD_STACK_LIMIT_OFFSET 0x038
#define KTHREAD_THREADLIST_OFFSET 0x2f8
#define KTHREAD_APC_STATE_OFFSET 0x098
#define KTHREAD_START_ADDRESS_OFFSET 0x450
#define KTHREAD_MISC_FLAGS_OFFSET 0x074
#define KTHREAD_WAIT_IRQL_OFFSET 0x186
#define KTHREAD_PREVIOUS_MODE_OFFSET 0x232
#define KTHREAD_STATE_OFFSET 0x184
#define KTHREAD_MISC_FLAGS_APC_QUEUEABLE 14
#define KTHREAD_MISC_FLAGS_ALERTABLE 4
#define EPROCESS_PEAK_VIRTUAL_SIZE_OFFSET 0x490
#define EPROCESS_VAD_ROOT_OFFSET 0x7d8
#define EPROCESS_OBJECT_TABLE_OFFSET 0x570
#define EPROCESS_IMAGE_NAME_OFFSET 0x5a8
#define EPROCESS_PEB_OFFSET 0x550
#define EPROCESS_SECTION_BASE_OFFSET 0x520
#define EPROCESS_IMAGE_FILE_NAME_OFFSET 0x5a8
#define EPROCESS_HANDLE_TABLE_OFFSET 0x570
#define EPROCESS_PLIST_ENTRY_OFFSET 0x448
#define KPROCESS_THREADLIST_OFFSET 0x030
#define KPROCESS_DIRECTORY_TABLE_BASE_OFFSET 0x028
#define OBJECT_HEADER_SIZE 0x30
#define OBJECT_HEADER_TYPE_INDEX_OFFSET 0x018
#define POOL_HEADER_BLOCK_SIZE_OFFSET 0x02
#define POOL_HEADER_TAG_OFFSET 0x04
#define KPROCESS_OFFSET_FROM_POOL_HEADER_SIZE_1 0x70
#define KPROCESS_OFFSET_FROM_POOL_HEADER_SIZE_2 0x80
#define KPROCESS_OFFSET_FROM_POOL_HEADER_SIZE_3 0x30
#define EPROCESS_SIZE 0xa40
#define KPCRB_CURRENT_THREAD 0x8
#define IA32_GS_BASE 0xc0000101
#define KPRCB_OFFSET_FROM_GS_BASE 0x180
#define MODULE_VALIDATION_FAILURE_MAX_REPORT_COUNT 20
#define IMAGE_DIRECTORY_ENTRY_EXPORT 0
#define IMAGE_DIRECTORY_ENTRY_IMPORT 1
#define IMAGE_DIRECTORY_ENTRY_RESOURCE 2
#define IMAGE_DIRECTORY_ENTRY_EXCEPTION 3
#define IMAGE_DIRECTORY_ENTRY_SECURITY 4
#define IMAGE_DIRECTORY_ENTRY_BASERELOC 5
#define IMAGE_DIRECTORY_ENTRY_DEBUG 6
#define IMAGE_DIRECTORY_ENTRY_COPYRIGHT 7
#define IMAGE_DIRECTORY_ENTRY_GLOBALPTR 8 /* (MIPS GP) */
#define IMAGE_DIRECTORY_ENTRY_TLS 9
#define IMAGE_DIRECTORY_ENTRY_LOAD_CONFIG 10
#define IMAGE_DIRECTORY_ENTRY_BOUND_IMPORT 11
#define IMAGE_DIRECTORY_ENTRY_IAT 12 /* Import Address Table */
#define IMAGE_DIRECTORY_ENTRY_DELAY_IMPORT 13
#define IMAGE_DIRECTORY_ENTRY_COM_DESCRIPTOR 14
/*
* Generic macros that allow you to quickly determine whether
* or not a page table entry is present or may forward to a
* large page of data, rather than another page table (applies
* only to PDPTEs and PDEs)
*
* Some nice macros courtesy of:
* https://www.unknowncheats.me/forum/general-programming-and-reversing/523359-introduction-physical-memory.html
*/
#define PAGE_1GB_SHIFT 30
#define PAGE_1GB_OFFSET(x) (x & (~(MAXUINT64 << PAGE_1GB_SHIFT)))
#define PAGE_2MB_SHIFT 21
#define PAGE_2MB_OFFSET(x) (x & (~(MAXUINT64 << PAGE_2MB_SHIFT)))
#define PAGE_4KB_SHIFT 12
#define PAGE_4KB_OFFSET(x) (x & (~(MAXUINT64 << PAGE_4KB_SHIFT)))
typedef struct _KAFFINITY_EX {
USHORT Count;
USHORT Size;
ULONG Reserved;
ULONGLONG Bitmap[20];
} KAFFINITY_EX, *PKAFFINITY_EX;
typedef struct _OBJECT_DIRECTORY_ENTRY {
struct _OBJECT_DIRECTORY_ENTRY* ChainLink;
PVOID Object;
ULONG HashValue;
} OBJECT_DIRECTORY_ENTRY, *POBJECT_DIRECTORY_ENTRY;
typedef struct _OBJECT_DIRECTORY {
POBJECT_DIRECTORY_ENTRY HashBuckets[NUMBER_HASH_BUCKETS];
EX_PUSH_LOCK Lock;
struct _DEVICE_MAP* DeviceMap;
ULONG SessionId;
PVOID NamespaceEntry;
ULONG Flags;
} OBJECT_DIRECTORY, *POBJECT_DIRECTORY;
typedef struct _DEVICE_MAP {
struct _OBJECT_DIRECTORY* DosDevicesDirectory;
struct _OBJECT_DIRECTORY* GlobalDosDevicesDirectory;
ULONG ReferenceCount;
ULONG DriveMap;
UCHAR DriveType[32];
} DEVICE_MAP, *PDEVICE_MAP;
typedef struct _RTL_MODULE_EXTENDED_INFO {
PVOID ImageBase;
ULONG ImageSize;
USHORT FileNameOffset;
CHAR FullPathName[0x100];
} RTL_MODULE_EXTENDED_INFO, *PRTL_MODULE_EXTENDED_INFO;
/*
Thread Information Block: (GS register)
SEH frame: 0x00
Stack Base: 0x08
Stack Limit: 0x10
SubSystemTib: 0x18
Fiber Data: 0x20
Arbitrary Data: 0x28
TEB: 0x30
Environment Pointer: 0x38
Process ID: 0x40
Current Thread ID: 0x48
Active RPC Handle: 0x50
Thread Local Storage Array: 0x58
PEB: 0x60
Last error number: 0x68
Count Owned Critical Sections: 0x6C
CSR Client Thread: 0x70
Win32 Thread Information: 0x78
...
*/
typedef struct _OBJECT_TYPE {
LIST_ENTRY TypeList;
UNICODE_STRING Name;
PVOID DefaultObject;
UCHAR Index;
ULONG TotalNumberOfObjects;
ULONG TotalNumberOfHandles;
ULONG HighWaterNumberOfObjects;
ULONG HighWaterNumberOfHandles;
PVOID TypeInfo; //_OBJECT_TYPE_INITIALIZER
EX_PUSH_LOCK TypeLock;
ULONG Key;
LIST_ENTRY CallbackList;
} OBJECT_TYPE, *POBJECT_TYPE;
typedef struct _PEB_LDR_DATA {
BYTE Reserved1[8];
PVOID Reserved2[3];
LIST_ENTRY InMemoryOrderModuleList;
} PEB_LDR_DATA, *PPEB_LDR_DATA;
typedef struct _LDR_DATA_TABLE_ENTRY {
PVOID Reserved1[2];
LIST_ENTRY InMemoryOrderLinks;
PVOID Reserved2[2];
PVOID DllBase;
PVOID Reserved3[2];
UNICODE_STRING FullDllName;
BYTE Reserved4[8];
PVOID Reserved5[3];
#pragma warning(push)
#pragma warning(disable : 4201) // we'll always use the Microsoft compiler
union {
ULONG CheckSum;
PVOID Reserved6;
} DUMMYUNIONNAME;
#pragma warning(pop)
ULONG TimeDateStamp;
} LDR_DATA_TABLE_ENTRY, *PLDR_DATA_TABLE_ENTRY;
typedef struct _PEB {
BYTE Reserved1[2];
BYTE BeingDebugged;
BYTE Reserved2[1];
PVOID Reserved3[2];
PPEB_LDR_DATA Ldr;
PVOID ProcessParameters;
PVOID Reserved4[3];
PVOID AtlThunkSListPtr;
PVOID Reserved5;
ULONG Reserved6;
PVOID Reserved7;
ULONG Reserved8;
ULONG AtlThunkSListPtr32;
PVOID Reserved9[45];
BYTE Reserved10[96];
PVOID PostProcessInitRoutine;
BYTE Reserved11[128];
PVOID Reserved12[1];
ULONG SessionId;
} PEB, *PPEB;
typedef struct _PEB32 {
UCHAR InheritedAddressSpace;
UCHAR ReadImageFileExecOptions;
UCHAR BeingDebugged;
UCHAR BitField;
ULONG Mutant;
ULONG ImageBaseAddress;
ULONG Ldr;
ULONG ProcessParameters;
ULONG SubSystemData;
ULONG ProcessHeap;
ULONG FastPebLock;
ULONG AtlThunkSListPtr;
ULONG IFEOKey;
ULONG CrossProcessFlags;
ULONG UserSharedInfoPtr;
ULONG SystemReserved;
ULONG AtlThunkSListPtr32;
ULONG ApiSetMap;
} PEB32, *PPEB32;
typedef struct _PEB_LDR_DATA32 {
ULONG Length;
UCHAR Initialized;
ULONG SsHandle;
LIST_ENTRY32 InLoadOrderModuleList;
LIST_ENTRY32 InMemoryOrderModuleList;
LIST_ENTRY32 InInitializationOrderModuleList;
} PEB_LDR_DATA32, *PPEB_LDR_DATA32;
typedef struct _LDR_DATA_TABLE_ENTRY32 {
LIST_ENTRY32 InLoadOrderLinks;
LIST_ENTRY32 InMemoryOrderLinks;
LIST_ENTRY32 InInitializationOrderLinks;
ULONG DllBase;
ULONG EntryPoint;
ULONG SizeOfImage;
UNICODE_STRING32 FullDllName;
UNICODE_STRING32 BaseDllName;
ULONG Flags;
USHORT LoadCount;
USHORT TlsIndex;
LIST_ENTRY32 HashLinks;
ULONG TimeDateStamp;
} LDR_DATA_TABLE_ENTRY32, *PLDR_DATA_TABLE_ENTRY32;
typedef struct _HANDLE_TABLE_ENTRY_INFO {
ULONG AuditMask;
ULONG MaxRelativeAccessMask;
} HANDLE_TABLE_ENTRY_INFO, *PHANDLE_TABLE_ENTRY_INFO;
typedef union _EXHANDLE {
struct {
int TagBits : 2;
int Index : 30;
} u;
void* GenericHandleOverlay;
ULONG_PTR Value;
} EXHANDLE, *PEXHANDLE;
#pragma warning(disable : 4214 4201)
#pragma pack(push, 1)
typedef struct _POOL_HEADER // Size=16
{
union {
struct {
unsigned long PreviousSize : 8; // Size=4 Offset=0 BitOffset=0
// BitCount=8
unsigned long PoolIndex : 8; // Size=4 Offset=0
// BitOffset=8 BitCount=8
unsigned long BlockSize : 8; // Size=4 Offset=0
// BitOffset=16 BitCount=8
unsigned long PoolType : 8; // Size=4 Offset=0
// BitOffset=24 BitCount=8
};
unsigned long Ulong1; // Size=4 Offset=0
};
unsigned long PoolTag; // Size=4 Offset=4
union {
struct _EPROCESS* ProcessBilled; // Size=8 Offset=8
struct {
unsigned short AllocatorBackTraceIndex; // Size=2 Offset=8
unsigned short PoolTagHash; // Size=2 Offset=10
};
};
} POOL_HEADER, *PPOOL_HEADER;
#pragma pack(pop)
typedef struct _HANDLE_TABLE_ENTRY // Size=16
{
union {
ULONG_PTR VolatileLowValue; // Size=8 Offset=0
ULONG_PTR LowValue; // Size=8 Offset=0
struct _HANDLE_TABLE_ENTRY_INFO* InfoTable; // Size=8 Offset=0
struct {
ULONG_PTR Unlocked : 1; // Size=8 Offset=0 BitOffset=0
// BitCount=1
ULONG_PTR RefCnt : 16; // Size=8 Offset=0 BitOffset=1
// BitCount=16
ULONG_PTR Attributes : 3; // Size=8 Offset=0
// BitOffset=17 BitCount=3
ULONG_PTR
ObjectPointerBits : 44; // Size=8 Offset=0 BitOffset=20
// BitCount=44
};
};
union {
ULONG_PTR HighValue; // Size=8 Offset=8
struct _HANDLE_TABLE_ENTRY* NextFreeHandleEntry; // Size=8 Offset=8
union _EXHANDLE LeafHandleValue; // Size=8 Offset=8
struct {
ULONG GrantedAccessBits : 25; // Size=4 Offset=8
// BitOffset=0 BitCount=25
ULONG NoRightsUpgrade : 1; // Size=4 Offset=8
// BitOffset=25 BitCount=1
ULONG Spare : 6; // Size=4 Offset=8 BitOffset=26
// BitCount=6
};
};
ULONG TypeInfo; // Size=4 Offset=12
} HANDLE_TABLE_ENTRY, *PHANDLE_TABLE_ENTRY;
typedef struct _HANDLE_TABLE_FREE_LIST {
EX_PUSH_LOCK FreeListLock;
PHANDLE_TABLE_ENTRY FirstFreeHandleEntry;
PHANDLE_TABLE_ENTRY LastFreeHandleEntry;
LONG HandleCount;
ULONG HighWaterMark;
} HANDLE_TABLE_FREE_LIST, *PHANDLE_TABLE_FREE_LIST;
typedef struct _HANDLE_TRACE_DB_ENTRY {
CLIENT_ID ClientId;
PVOID Handle;
ULONG Type;
PVOID StackTrace[16];
} HANDLE_TRACE_DB_ENTRY, *PHANDLE_TRACE_DB_ENTRY;
typedef struct _HANDLE_TRACE_DEBUG_INFO {
LONG RefCount;
ULONG TableSize;
ULONG BitMaskFlags;
FAST_MUTEX CloseCompactionLock;
ULONG CurrentStackIndex;
HANDLE_TRACE_DB_ENTRY TraceDb[1];
} HANDLE_TRACE_DEBUG_INFO, *PHANDLE_TRACE_DEBUG_INFO;
typedef struct _HANDLE_TABLE {
ULONG NextHandleNeedingPool;
LONG ExtraInfoPages;
ULONGLONG TableCode;
PEPROCESS QuotaProcess;
LIST_ENTRY HandleTableList;
ULONG UniqueProcessId;
union {
ULONG Flags;
struct {
UCHAR StrictFIFO : 1;
UCHAR EnableHandleExceptions : 1;
UCHAR Rundown : 1;
UCHAR Duplicated : 1;
UCHAR RaiseUMExceptionOnInvalidHandleClose : 1;
};
};
EX_PUSH_LOCK HandleContentionEvent;
EX_PUSH_LOCK HandleTableLock;
union {
HANDLE_TABLE_FREE_LIST FreeLists[1];
UCHAR ActualEntry[32];
};
struct _HANDLE_TRACE_DEBUG_INFO* DebugInfo;
} HANDLE_TABLE, *PHANDLE_TABLE;
typedef BOOLEAN (*EX_ENUMERATE_HANDLE_ROUTINE)(IN PHANDLE_TABLE_ENTRY
HandleTableEntry,
IN HANDLE Handle,
IN PVOID EnumParameter);
typedef struct _OBJECT_CREATE_INFORMATION {
ULONG Attributes;
PVOID RootDirectory;
CHAR ProbeMode;
ULONG PagedPoolCharge;
ULONG NonPagedPoolCharge;
ULONG SecurityDescriptorCharge;
PVOID SecurityDescriptor;
struct _SECURITY_QUALITY_OF_SERVICE* SecurityQos;
struct _SECURITY_QUALITY_OF_SERVICE SecurityQualityOfService;
} OBJECT_CREATE_INFORMATION, *POBJECT_CREATE_INFORMATION;
typedef struct _OBJECT_HEADER {
LONGLONG PointerCount;
union {
LONGLONG HandleCount;
PVOID NextToFree;
};
EX_PUSH_LOCK Lock;
UCHAR TypeIndex;
union {
UCHAR TraceFlags;
struct {
UCHAR DbgRefTrace : 1;
UCHAR DbgTracePermanent : 1;
};
};
UCHAR InfoMask;
union {
UCHAR Flags;
struct {
UCHAR NewObject : 1;
UCHAR KernelObject : 1;
UCHAR KernelOnlyAccess : 1;
UCHAR ExclusiveObject : 1;
UCHAR PermanentObject : 1;
UCHAR DefaultSecurityQuota : 1;
UCHAR SingleHandleEntry : 1;
UCHAR DeletedInline : 1;
};
};
ULONG Reserved;
union {
POBJECT_CREATE_INFORMATION ObjectCreateInfo;
PVOID QuotaBlockCharged;
};
PVOID SecurityDescriptor;
QUAD Body;
} OBJECT_HEADER, *POBJECT_HEADER;
#define IMAGE_SCN_MEM_EXECUTE 0x20000000
#define IMAGE_SCN_MEM_READ 0x40000000
#define IMAGE_SCN_MEM_WRITE 0x80000000
#define IMAGE_SIZEOF_SHORT_NAME 8
typedef struct _IMAGE_SECTION_HEADER {
unsigned char Name[IMAGE_SIZEOF_SHORT_NAME];
union {
unsigned long PhysicalAddress;
unsigned long VirtualSize;
} Misc;
unsigned long VirtualAddress;
unsigned long SizeOfRawData;
unsigned long PointerToRawData;
unsigned long PointerToRelocations;
unsigned long PointerToLinenumbers;
unsigned short NumberOfRelocations;
unsigned short NumberOfLinenumbers;
unsigned long Characteristics;
} IMAGE_SECTION_HEADER, *PIMAGE_SECTION_HEADER;
typedef struct _IMAGE_FILE_HEADER {
unsigned short Machine;
unsigned short NumberOfSections;
unsigned long TimeDateStamp;
unsigned long PointerToSymbolTable;
unsigned long NumberOfSymbols;
unsigned short SizeOfOptionalHeader;
unsigned short Characteristics;
} IMAGE_FILE_HEADER, *PIMAGE_FILE_HEADER;
typedef struct _IMAGE_DATA_DIRECTORY {
unsigned long VirtualAddress;
unsigned long Size;
} IMAGE_DATA_DIRECTORY, *PIMAGE_DATA_DIRECTORY;
#define IMAGE_NUMBEROF_DIRECTORY_ENTRIES 16
typedef struct _IMAGE_OPTIONAL_HEADER64 {
unsigned short Magic;
unsigned char MajorLinkerVersion;
unsigned char MinorLinkerVersion;
unsigned long SizeOfCode;
unsigned long SizeOfInitializedData;
unsigned long SizeOfUninitializedData;
unsigned long AddressOfEntryPoint;
unsigned long BaseOfCode;
ULONGLONG ImageBase;
unsigned long SectionAlignment;
unsigned long FileAlignment;
unsigned short MajorOperatingSystemVersion;
unsigned short MinorOperatingSystemVersion;
unsigned short MajorImageVersion;
unsigned short MinorImageVersion;
unsigned short MajorSubsystemVersion;
unsigned short MinorSubsystemVersion;
unsigned long Win32VersionValue;
unsigned long SizeOfImage;
unsigned long SizeOfHeaders;
unsigned long CheckSum;
unsigned short Subsystem;
unsigned short DllCharacteristics;
ULONGLONG SizeOfStackReserve;
ULONGLONG SizeOfStackCommit;
ULONGLONG SizeOfHeapReserve;
ULONGLONG SizeOfHeapCommit;
unsigned long LoaderFlags;
unsigned long NumberOfRvaAndSizes;
IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
} IMAGE_OPTIONAL_HEADER64, *PIMAGE_OPTIONAL_HEADER64;
typedef unsigned long DWORD;
typedef unsigned short WORD;
typedef struct _IMAGE_OPTIONAL_HEADER32 {
WORD Magic;
BYTE MajorLinkerVersion;
BYTE MinorLinkerVersion;
DWORD SizeOfCode;
DWORD SizeOfInitializedData;
DWORD SizeOfUninitializedData;
DWORD AddressOfEntryPoint;
DWORD BaseOfCode;
DWORD BaseOfData;
DWORD ImageBase;
DWORD SectionAlignment;
DWORD FileAlignment;
WORD MajorOperatingSystemVersion;
WORD MinorOperatingSystemVersion;
WORD MajorImageVersion;
WORD MinorImageVersion;
WORD MajorSubsystemVersion;
WORD MinorSubsystemVersion;
DWORD Win32VersionValue;
DWORD SizeOfImage;
DWORD SizeOfHeaders;
DWORD CheckSum;
WORD Subsystem;
WORD DllCharacteristics;
DWORD SizeOfStackReserve;
DWORD SizeOfStackCommit;
DWORD SizeOfHeapReserve;
DWORD SizeOfHeapCommit;
DWORD LoaderFlags;
DWORD NumberOfRvaAndSizes;
IMAGE_DATA_DIRECTORY DataDirectory[IMAGE_NUMBEROF_DIRECTORY_ENTRIES];
} IMAGE_OPTIONAL_HEADER32, *PIMAGE_OPTIONAL_HEADER32;
typedef struct _IMAGE_DOS_HEADER { // DOS .EXE header
unsigned short e_magic; // Magic number
unsigned short e_cblp; // Bytes on last page of file
unsigned short e_cp; // Pages in file
unsigned short e_crlc; // Relocations
unsigned short e_cparhdr; // Size of header in paragraphs
unsigned short e_minalloc; // Minimum extra paragraphs needed
unsigned short e_maxalloc; // Maximum extra paragraphs needed
unsigned short e_ss; // Initial (relative) SS value
unsigned short e_sp; // Initial SP value
unsigned short e_csum; // Checksum
unsigned short e_ip; // Initial IP value
unsigned short e_cs; // Initial (relative) CS value
unsigned short e_lfarlc; // File address of relocation table
unsigned short e_ovno; // Overlay number
unsigned short e_res[4]; // Reserved words
unsigned short e_oemid; // OEM identifier (for e_oeminfo)
unsigned short e_oeminfo; // OEM information; e_oemid specific
unsigned short e_res2[10]; // Reserved words
LONG e_lfanew; // File address of new exe header
} IMAGE_DOS_HEADER, *PIMAGE_DOS_HEADER;
typedef struct _KLDR_DATA_TABLE_ENTRY {
LIST_ENTRY InLoadOrderLinks;
PVOID ExceptionTable;
ULONG ExceptionTableSize;
// ULONG padding on IA64
PVOID GpValue;
PVOID NonPagedDebugInfo;
PVOID DllBase;
PVOID EntryPoint;
ULONG SizeOfImage;
UNICODE_STRING FullDllName;
UNICODE_STRING BaseDllName;
ULONG Flags;
USHORT LoadCount;
USHORT __Unused5;
PVOID SectionPointer;
ULONG CheckSum;
// ULONG padding on IA64
PVOID LoadedImports;
PVOID PatchInformation;
} KLDR_DATA_TABLE_ENTRY, *PKLDR_DATA_TABLE_ENTRY;
typedef struct _IMAGE_EXPORT_DIRECTORY {
DWORD Characteristics;
DWORD TimeDateStamp;
WORD MajorVersion;
WORD MinorVersion;
DWORD Name;
DWORD Base;
DWORD NumberOfFunctions;
DWORD NumberOfNames;
DWORD AddressOfFunctions;
DWORD AddressOfNames;
DWORD AddressOfNameOrdinals;
} IMAGE_EXPORT_DIRECTORY, *PIMAGE_EXPORT_DIRECTORY;
typedef struct _LOCAL_NT_HEADER {
unsigned long Signature;
IMAGE_FILE_HEADER FileHeader;
IMAGE_OPTIONAL_HEADER32 OptionalHeader;
} LOCAL_NT_HEADER, *PLOCAL_NT_HEADER;
#define IMAGE_FIRST_SECTION(ntheader) \
((PIMAGE_SECTION_HEADER)((ULONG_PTR)(ntheader) + \
FIELD_OFFSET(LOCAL_NT_HEADER, OptionalHeader) + \
((ntheader))->FileHeader.SizeOfOptionalHeader))
/* creds: https://www.unknowncheats.me/forum/2602838-post2.html */
typedef struct _DBGKD_DEBUG_DATA_HEADER64 {
LIST_ENTRY64 List;
ULONG OwnerTag;
ULONG Size;
} DBGKD_DEBUG_DATA_HEADER64, *PDBGKD_DEBUG_DATA_HEADER64;
typedef NTSTATUS(__stdcall* ZwQuerySystemInformation)(
_In_ UINT32 SystemInformationClass,
_Inout_ PVOID SystemInformation,
_In_ ULONG SystemInformationLength,
_Out_opt_ PULONG ReturnLength);
#define SYSTEM_BIGPOOL_INFORMATION_ID 0x42
typedef struct _SYSTEM_BIGPOOL_ENTRY {
union {
PVOID VirtualAddress;
ULONG_PTR NonPaged : 1;
};
SIZE_T SizeInBytes;
union {
UCHAR Tag[4];
ULONG TagUlong;
};
} SYSTEM_BIGPOOL_ENTRY, *PSYSTEM_BIGPOOL_ENTRY;
typedef struct _SYSTEM_BIGPOOL_INFORMATION {
ULONG Count;
_Field_size_(Count) SYSTEM_BIGPOOL_ENTRY AllocatedInfo[1];
} SYSTEM_BIGPOOL_INFORMATION, *PSYSTEM_BIGPOOL_INFORMATION;
typedef struct _KDDEBUGGER_DATA64 {
DBGKD_DEBUG_DATA_HEADER64 Header;
ULONG64 KernBase;
ULONG64 BreakpointWithStatus;
ULONG64 SavedContext;
USHORT ThCallbackStack;
USHORT NextCallback;
USHORT FramePointer;
USHORT PaeEnabled;
ULONG64 KiCallUserMode;
ULONG64 KeUserCallbackDispatcher;
ULONG64 PsLoadedModuleList;
ULONG64 PsActiveProcessHead;
ULONG64 PspCidTable;
ULONG64 ExpSystemResourcesList;
ULONG64 ExpPagedPoolDescriptor;
ULONG64 ExpNumberOfPagedPools;
ULONG64 KeTimeIncrement;
ULONG64 KeBugCheckCallbackListHead;
ULONG64 KiBugcheckData;
ULONG64 IopErrorLogListHead;
ULONG64 ObpRootDirectoryObject;
ULONG64 ObpTypeObjectType;
ULONG64 MmSystemCacheStart;
ULONG64 MmSystemCacheEnd;
ULONG64 MmSystemCacheWs;
ULONG64 MmPfnDatabase;
ULONG64 MmSystemPtesStart;
ULONG64 MmSystemPtesEnd;
ULONG64 MmSubsectionBase;
ULONG64 MmNumberOfPagingFiles;
ULONG64 MmLowestPhysicalPage;
ULONG64 MmHighestPhysicalPage;
ULONG64 MmNumberOfPhysicalPages;
ULONG64 MmMaximumNonPagedPoolInBytes;
ULONG64 MmNonPagedSystemStart;
ULONG64 MmNonPagedPoolStart;
ULONG64 MmNonPagedPoolEnd;
ULONG64 MmPagedPoolStart;
ULONG64 MmPagedPoolEnd;
ULONG64 MmPagedPoolInformation;
ULONG64 MmPageSize;
ULONG64 MmSizeOfPagedPoolInBytes;
ULONG64 MmTotalCommitLimit;
ULONG64 MmTotalCommittedPages;
ULONG64 MmSharedCommit;
ULONG64 MmDriverCommit;
ULONG64 MmProcessCommit;
ULONG64 MmPagedPoolCommit;
ULONG64 MmExtendedCommit;
ULONG64 MmZeroedPageListHead;
ULONG64 MmFreePageListHead;
ULONG64 MmStandbyPageListHead;
ULONG64 MmModifiedPageListHead;
ULONG64 MmModifiedNoWritePageListHead;
ULONG64 MmAvailablePages;
ULONG64 MmResidentAvailablePages;
ULONG64 PoolTrackTable;
ULONG64 NonPagedPoolDescriptor;
ULONG64 MmHighestUserAddress;
ULONG64 MmSystemRangeStart;
ULONG64 MmUserProbeAddress;
ULONG64 KdPrintCircularBuffer;
ULONG64 KdPrintCircularBufferEnd;
ULONG64 KdPrintWritePointer;
ULONG64 KdPrintRolloverCount;
ULONG64 MmLoadedUserImageList;
ULONG64 NtBuildLab;
ULONG64 KiNormalSystemCall;
ULONG64 KiProcessorBlock;
ULONG64 MmUnloadedDrivers;
ULONG64 MmLastUnloadedDriver;
ULONG64 MmTriageActionTaken;
ULONG64 MmSpecialPoolTag;
ULONG64 KernelVerifier;
ULONG64 MmVerifierData;
ULONG64 MmAllocatedNonPagedPool;
ULONG64 MmPeakCommitment;
ULONG64 MmTotalCommitLimitMaximum;
ULONG64 CmNtCSDVersion;
ULONG64 MmPhysicalMemoryBlock;
ULONG64 MmSessionBase;
ULONG64 MmSessionSize;
ULONG64 MmSystemParentTablePage;
ULONG64 MmVirtualTranslationBase;
USHORT OffsetKThreadNextProcessor;
USHORT OffsetKThreadTeb;
USHORT OffsetKThreadKernelStack;
USHORT OffsetKThreadInitialStack;
USHORT OffsetKThreadApcProcess;
USHORT OffsetKThreadState;
USHORT OffsetKThreadBStore;
USHORT OffsetKThreadBStoreLimit;
USHORT SizeEProcess;
USHORT OffsetEprocessPeb;
USHORT OffsetEprocessParentCID;
USHORT OffsetEprocessDirectoryTableBase;
USHORT SizePrcb;
USHORT OffsetPrcbDpcRoutine;
USHORT OffsetPrcbCurrentThread;
USHORT OffsetPrcbMhz;
USHORT OffsetPrcbCpuType;
USHORT OffsetPrcbVendorString;
USHORT OffsetPrcbProcStateContext;
USHORT OffsetPrcbNumber;
USHORT SizeEThread;
ULONG64 KdPrintCircularBufferPtr;
ULONG64 KdPrintBufferSize;
ULONG64 KeLoaderBlock;
USHORT SizePcr;
USHORT OffsetPcrSelfPcr;
USHORT OffsetPcrCurrentPrcb;
USHORT OffsetPcrContainedPrcb;
USHORT OffsetPcrInitialBStore;
USHORT OffsetPcrBStoreLimit;
USHORT OffsetPcrInitialStack;
USHORT OffsetPcrStackLimit;
USHORT OffsetPrcbPcrPage;
USHORT OffsetPrcbProcStateSpecialReg;
USHORT GdtR0Code;
USHORT GdtR0Data;
USHORT GdtR0Pcr;
USHORT GdtR3Code;
USHORT GdtR3Data;
USHORT GdtR3Teb;
USHORT GdtLdt;
USHORT GdtTss;
USHORT Gdt64R3CmCode;
USHORT Gdt64R3CmTeb;
ULONG64 IopNumTriageDumpDataBlocks;
ULONG64 IopTriageDumpDataBlocks;
} KDDEBUGGER_DATA64, *PKDDEBUGGER_DATA64;
typedef struct _KDDEBUGGER_DATA_ADDITION64 {
ULONG64 VfCrashDataBlock;
ULONG64 MmBadPagesDetected;
ULONG64 MmZeroedPageSingleBitErrorsDetected;
ULONG64 EtwpDebuggerData;
USHORT OffsetPrcbContext;
USHORT OffsetPrcbMaxBreakpoints;
USHORT OffsetPrcbMaxWatchpoints;
ULONG OffsetKThreadStackLimit;
ULONG OffsetKThreadStackBase;
ULONG OffsetKThreadQueueListEntry;
ULONG OffsetEThreadIrpList;
USHORT OffsetPrcbIdleThread;
USHORT OffsetPrcbNormalDpcState;
USHORT OffsetPrcbDpcStack;
USHORT OffsetPrcbIsrStack;
USHORT SizeKDPC_STACK_FRAME;
USHORT OffsetKPriQueueThreadListHead;
USHORT OffsetKThreadWaitReason;
USHORT Padding;
ULONG64 PteBase;
ULONG64 RetpolineStubFunctionTable;
ULONG RetpolineStubFunctionTableSize;
ULONG RetpolineStubOffset;
ULONG RetpolineStubSize;
} KDDEBUGGER_DATA_ADDITION64, *PKDDEBUGGER_DATA_ADDITION64;
typedef struct _DUMP_HEADER {
ULONG Signature;
ULONG ValidDump;
ULONG MajorVersion;
ULONG MinorVersion;
ULONG_PTR DirectoryTableBase;
ULONG_PTR PfnDataBase;
PLIST_ENTRY PsLoadedModuleList;
PLIST_ENTRY PsActiveProcessHead;
ULONG MachineImageType;
ULONG NumberProcessors;
ULONG BugCheckCode;
ULONG_PTR BugCheckParameter1;
ULONG_PTR BugCheckParameter2;
ULONG_PTR BugCheckParameter3;
ULONG_PTR BugCheckParameter4;
CHAR VersionUser[32];
struct _KDDEBUGGER_DATA64* KdDebuggerDataBlock;
} DUMP_HEADER, *PDUMP_HEADER;
typedef union _VIRTUAL_MEMORY_ADDRESS {
struct {
UINT64 PageIndex : 12; /* 0:11 */
UINT64 PtIndex : 9; /* 12:20 */
UINT64 PdIndex : 9; /* 21:29 */
UINT64 PdptIndex : 9; /* 30:38 */
UINT64 Pml4Index : 9; /* 39:47 */
UINT64 Unused : 16; /* 48:63 */
} Bits;
UINT64 BitAddress;
} VIRTUAL_ADDRESS, *PVIRTUAL_ADDRESS;
typedef union _PML4_ENTRY {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 _Ignored0 : 1; /* 6 */
UINT64 _Reserved0 : 1; /* 7 */
UINT64 _Ignored1 : 4; /* 11:8 */
UINT64 PhysicalAddress : 40; /* 51:12 */
UINT64 _Ignored2 : 11; /* 62:52 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PML4E;
typedef union _PDPT_ENTRY {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 _Ignored0 : 1; /* 6 */
UINT64 PageSize : 1; /* 7 */
UINT64 _Ignored1 : 4; /* 11:8 */
UINT64 PhysicalAddress : 40; /* 51:12 */
UINT64 _Ignored2 : 11; /* 62:52 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PDPTE;
typedef union _PD_ENTRY {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 _Ignored0 : 1; /* 6 */
UINT64 PageSize : 1; /* 7 */
UINT64 _Ignored1 : 4; /* 11:8 */
UINT64 PhysicalAddress : 38; /* 49:12 */
UINT64 _Reserved0 : 2; /* 51:50 */
UINT64 _Ignored2 : 11; /* 62:52 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PDE;
typedef union _PT_ENTRY {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 Dirty : 1; /* 6 */
UINT64 PageAttributeTable : 1; /* 7 */
UINT64 Global : 1; /* 8 */
UINT64 _Ignored0 : 3; /* 11:9 */
UINT64 PhysicalAddress : 38; /* 49:12 */
UINT64 _Reserved0 : 2; /* 51:50 */
UINT64 _Ignored1 : 7; /* 58:52 */
UINT64 ProtectionKey : 4; /* 62:59 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PTE;
typedef union _PDPT_ENTRY_LARGE {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 Dirty : 1; /* 6 */
UINT64 PageSize : 1; /* 7 */
UINT64 Global : 1; /* 8 */
UINT64 _Ignored0 : 3; /* 11:9 */
UINT64 PageAttributeTable : 1; /* 12 */
UINT64 _Reserved0 : 17; /* 29:13 */
UINT64 PhysicalAddress : 22; /* 51:30 */
UINT64 _Ignored1 : 7; /* 58:52 */
UINT64 ProtectionKey : 4; /* 62:59 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PDPTE_LARGE;
typedef union _PD_ENTRY_LARGE {
struct {
UINT64 Present : 1; /* 0 */
UINT64 ReadWrite : 1; /* 1 */
UINT64 UserSupervisor : 1; /* 2 */
UINT64 PageWriteThrough : 1; /* 3 */
UINT64 PageCacheDisable : 1; /* 4 */
UINT64 Accessed : 1; /* 5 */
UINT64 Dirty : 1; /* 6 */
UINT64 PageSize : 1; /* 7 */
UINT64 Global : 1; /* 8 */
UINT64 _Ignored0 : 3; /* 11:9 */
UINT64 PageAttributeTalbe : 1; /* 12 */
UINT64 _Reserved0 : 8; /* 20:13 */
UINT64 PhysicalAddress : 29; /* 49:21 */
UINT64 _Reserved1 : 2; /* 51:50 */
UINT64 _Ignored1 : 7; /* 58:52 */
UINT64 ProtectionKey : 4; /* 62:59 */
UINT64 ExecuteDisable : 1; /* 63 */
} Bits;
UINT64 BitAddress;
} PDE_LARGE;
// typedef struct _KAPC_STATE
//{
// LIST_ENTRY ApcListHead[ MaximumMode ];
// struct _KPROCESS* Process;
// union {
// UCHAR InProgressFlags;
// struct
// {
// BOOLEAN KernelApcInProgress : 1;
// BOOLEAN SpecialApcInProgress : 1;
// };
// };
//
// BOOLEAN KernelApcPending;
// union {
// BOOLEAN UserApcPendingAll;
// struct
// {
// BOOLEAN SpecialUserApcPending : 1;
// BOOLEAN UserApcPending : 1;
// };
// };
// } KAPC_STATE, * PKAPC_STATE, * PRKAPC_STATE;
typedef struct _RAW_SMBIOS_DATA {
BYTE Used20CallingMethod;
BYTE SMBIOSMajorVersion;
BYTE SMBIOSMinorVersion;
BYTE DmiRevision;
UINT32 Length;
BYTE SMBIOSTableData[1];
} RAW_SMBIOS_DATA, *PRAW_SMBIOS_DATA;
typedef struct _SMBIOS_TABLE_HEADER {
UCHAR Type;
UCHAR Length;
USHORT Handle;
PCHAR TableData;
} SMBIOS_TABLE_HEADER, *PSMBIOS_TABLE_HEADER;
typedef struct _RAW_SMBIOS_TABLE_01 {
UCHAR Type;
UCHAR Length;
USHORT Handle;
UCHAR Manufacturer;
UCHAR ProductName;
UCHAR Version;
UCHAR SerialNumber;
UCHAR UUID[16];
UCHAR WakeUpType;
UCHAR SKUNumber;
UCHAR Family;
} RAW_SMBIOS_TABLE_01, *PRAW_SMBIOS_TABLE_01;
typedef struct _RAW_SMBIOS_TABLE_02 {
UCHAR Type;
UCHAR Length;
USHORT Handle;
BYTE Manufacturer;
BYTE Product;
BYTE Version;
BYTE SerialNumber;
BYTE AssetTag;
BYTE FeatureFlags;
BYTE LocationInChassis;
UINT16 ChassisHandle;
BYTE BoardType;
BYTE NumberOfContainedObjectHandles;
BYTE ContainedObjectHandles[256];
} RAW_SMBIOS_TABLE_02, *PRAW_SMBIOS_TABLE_02;
typedef struct _RTL_RELATIVE_NAME {
UNICODE_STRING RelativeName;
HANDLE ContainingDirectory;
void* CurDirRef;
} RTL_RELATIVE_NAME, *PRTL_RELATIVE_NAME;
typedef struct _STORAGE_DESCRIPTOR_HEADER {
ULONG Version;
ULONG Size;
} STORAGE_DESCRIPTOR_HEADER, *PSTORAGE_DESCRIPTOR_HEADER;
typedef enum _STORAGE_BUS_TYPE {
BusTypeUnknown = 0x00,
BusTypeScsi,
BusTypeAtapi,
BusTypeAta,
BusType1394,
BusTypeSsa,
BusTypeFibre,
BusTypeUsb,
BusTypeRAID,
BusTypeMaxReserved = 0x7F
} STORAGE_BUS_TYPE,
*PSTORAGE_BUS_TYPE;
typedef enum _STORAGE_SET_TYPE {
PropertyStandardSet = 0, // Sets the descriptor
PropertyExistsSet, // Used to test whether the descriptor is supported
PropertySetMaxDefined // use to validate the value
} STORAGE_SET_TYPE,
*PSTORAGE_SET_TYPE;
//
// define some initial property id's
//
typedef enum _STORAGE_QUERY_TYPE {
PropertyStandardQuery = 0, // Retrieves the descriptor
PropertyExistsQuery, // Used to test whether the descriptor is supported
PropertyMaskQuery, // Used to retrieve a mask of writeable fields in the
// descriptor
PropertyQueryMaxDefined // use to validate the value
} STORAGE_QUERY_TYPE,
*PSTORAGE_QUERY_TYPE;
typedef enum _STORAGE_PROPERTY_ID {
StorageDeviceProperty = 0,
StorageAdapterProperty,
StorageDeviceIdProperty,
StorageDeviceUniqueIdProperty, // See storduid.h for details
StorageDeviceWriteCacheProperty,
StorageMiniportProperty,
StorageAccessAlignmentProperty,
StorageDeviceSeekPenaltyProperty,
StorageDeviceTrimProperty,
StorageDeviceWriteAggregationProperty,
StorageDeviceDeviceTelemetryProperty,
StorageDeviceLBProvisioningProperty,
StorageDevicePowerProperty,
StorageDeviceCopyOffloadProperty,
StorageDeviceResiliencyProperty,
StorageDeviceMediumProductType,
StorageAdapterRpmbProperty,
StorageAdapterCryptoProperty,
StorageDeviceIoCapabilityProperty = 48,
StorageAdapterProtocolSpecificProperty,
StorageDeviceProtocolSpecificProperty,
StorageAdapterTemperatureProperty,
StorageDeviceTemperatureProperty,
StorageAdapterPhysicalTopologyProperty,
StorageDevicePhysicalTopologyProperty,
StorageDeviceAttributesProperty,
StorageDeviceManagementStatus,
StorageAdapterSerialNumberProperty,
StorageDeviceLocationProperty,
StorageDeviceNumaProperty,
StorageDeviceZonedDeviceProperty,
StorageDeviceUnsafeShutdownCount,
StorageDeviceEnduranceProperty,
StorageDeviceLedStateProperty,
StorageDeviceSelfEncryptionProperty = 64,
StorageFruIdProperty,
} STORAGE_PROPERTY_ID,
*PSTORAGE_PROPERTY_ID;
typedef struct _STORAGE_PROPERTY_QUERY {
STORAGE_PROPERTY_ID PropertyId;
STORAGE_QUERY_TYPE QueryType;
UCHAR AdditionalParameters[1];
} STORAGE_PROPERTY_QUERY, *PSTORAGE_PROPERTY_QUERY;
typedef struct _STORAGE_DEVICE_DESCRIPTOR {
ULONG Version;
ULONG Size;
UCHAR DeviceType;
UCHAR DeviceTypeModifier;
BOOLEAN RemovableMedia;
BOOLEAN CommandQueueing;
ULONG VendorIdOffset;
ULONG ProductIdOffset;
ULONG ProductRevisionOffset;
ULONG SerialNumberOffset;
STORAGE_BUS_TYPE BusType;
ULONG RawPropertiesLength;
UCHAR RawDeviceProperties[1];
} STORAGE_DEVICE_DESCRIPTOR, *PSTORAGE_DEVICE_DESCRIPTOR;
NTKERNELAPI
BOOLEAN
ExEnumHandleTable(__in PHANDLE_TABLE HandleTable,
__in EX_ENUMERATE_HANDLE_ROUTINE EnumHandleProcedure,
__in PVOID EnumParameter,
__out_opt PHANDLE Handle);
NTKERNELAPI
POBJECT_TYPE
NTAPI
ObGetObjectType(_In_ PVOID Object);
typedef struct _EX_PUSH_LOCK_WAIT_BLOCK* PEX_PUSH_LOCK_WAIT_BLOCK;
NTKERNELAPI
VOID FASTCALL
ExfUnblockPushLock(_Inout_ PEX_PUSH_LOCK PushLock,
_Inout_opt_ PEX_PUSH_LOCK_WAIT_BLOCK WaitBlock);
LPCSTR
NTSYSAPI
NTAPI
PsGetProcessImageFileName(PEPROCESS Process);
EXTERN_C
VOID
KeInitializeAffinityEx(PKAFFINITY_EX affinity);
EXTERN_C
VOID
KeAddProcessorAffinityEx(PKAFFINITY_EX affinity, INT num);
EXTERN_C
VOID
HalSendNMI(PKAFFINITY_EX affinity);
NTSTATUS
RtlQueryModuleInformation(ULONG* InformationLength,
ULONG SizePerModule,
PVOID InformationBuffer);
NTSTATUS
NTAPI
NtSetInformationProcess(_In_ HANDLE ProcessHandle,
_In_ PROCESSINFOCLASS ProcessInformationClass,
_In_ PVOID ProcessInformation,
_In_ ULONG ProcessInformationLength);
NTSYSAPI
ULONG
NTAPI
KeCapturePersistentThreadState(__in PCONTEXT Context,
__in_opt PKTHREAD Thread,
__in ULONG BugCheckCode,
__in ULONG_PTR BugCheckParameter1,
__in ULONG_PTR BugCheckParameter2,
__in ULONG_PTR BugCheckParameter3,
__in ULONG_PTR BugCheckParameter4,
__in PDUMP_HEADER DumpHeader);
BOOLEAN NTAPI
RtlDosPathNameToRelativeNtPathName_U(_In_ PCWSTR DosFileName,
_Out_ PUNICODE_STRING NtFileName,
_Out_opt_ PWSTR* FilePath,
_Out_opt_ PRTL_RELATIVE_NAME RelativeName);
typedef _Function_class_(KNORMAL_ROUTINE) _IRQL_requires_(PASSIVE_LEVEL)
_IRQL_requires_same_
VOID NTAPI
KNORMAL_ROUTINE(_In_opt_ PVOID NormalContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2);
typedef KNORMAL_ROUTINE* PKNORMAL_ROUTINE;
typedef _Function_class_(KRUNDOWN_ROUTINE) _IRQL_requires_(PASSIVE_LEVEL)
_IRQL_requires_same_
VOID NTAPI
KRUNDOWN_ROUTINE(_In_ PRKAPC Apc);
typedef KRUNDOWN_ROUTINE* PKRUNDOWN_ROUTINE;
typedef _Function_class_(KKERNEL_ROUTINE) _IRQL_requires_(APC_LEVEL)
_IRQL_requires_same_
VOID NTAPI
KKERNEL_ROUTINE(_In_ PRKAPC Apc,
_Inout_ _Deref_pre_maybenull_ PKNORMAL_ROUTINE* NormalRoutine,
_Inout_ _Deref_pre_maybenull_ PVOID* NormalContext,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument1,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument2);
typedef KKERNEL_ROUTINE* PKKERNEL_ROUTINE;
typedef enum _KAPC_ENVIRONMENT {
OriginalApcEnvironment,
AttachedApcEnvironment,
CurrentApcEnvironment,
InsertApcEnvironment
} KAPC_ENVIRONMENT,
*PKAPC_ENVIRONMENT;
NTKERNELAPI
VOID NTAPI
KeInitializeApc(_Out_ PRKAPC Apc,
_In_ PRKTHREAD Thread,
_In_ KAPC_ENVIRONMENT Environment,
_In_ PKKERNEL_ROUTINE KernelRoutine,
_In_opt_ PKRUNDOWN_ROUTINE RundownRoutine,
_In_opt_ PKNORMAL_ROUTINE NormalRoutine,
_In_ KPROCESSOR_MODE Mode,
_In_opt_ PVOID NormalContext);
NTSTATUS
NTAPI
MmCopyVirtualMemory(PEPROCESS SourceProcess,
PVOID SourceAddress,
PEPROCESS TargetProcess,
PVOID TargetAddress,
SIZE_T BufferSize,
KPROCESSOR_MODE PreviousMode,
PSIZE_T ReturnSize);
NTKERNELAPI
BOOLEAN
NTAPI
KeInsertQueueApc(_Inout_ PRKAPC Apc,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2,
_In_ KPRIORITY Increment);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, Signature) == 0);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, ValidDump) == 4);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, MajorVersion) == 8);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, MinorVersion) == 0xc);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, DirectoryTableBase) == 0x10);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, PfnDataBase) == 0x18);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, PsLoadedModuleList) == 0x20);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, PsActiveProcessHead) == 0x28);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, MachineImageType) == 0x30);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, NumberProcessors) == 0x34);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, BugCheckCode) == 0x38);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, BugCheckParameter1) == 0x40);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, BugCheckParameter2) == 0x48);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, BugCheckParameter3) == 0x50);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, BugCheckParameter4) == 0x58);
C_ASSERT(FIELD_OFFSET(DUMP_HEADER, KdDebuggerDataBlock) == 0x80);
#ifndef _WIN64
# define KDDEBUGGER_DATA_OFFSET 0x1068
#else
# define KDDEBUGGER_DATA_OFFSET 0x2080
#endif
#ifndef _WIN64
# define DUMP_BLOCK_SIZE 0x20000
#else
# define DUMP_BLOCK_SIZE 0x40000
#endif
#define IA32_GS_BASE 0xc0000101
#define KPCR_TSS_BASE_OFFSET 0x008
#define TSS_IST_OFFSET 0x01c
#define WINDOWS_USERMODE_MAX_ADDRESS 0x00007FFFFFFFFFFF
typedef struct _MACHINE_FRAME {
UINT64 rip;
UINT64 cs;
UINT64 eflags;
UINT64 rsp;
UINT64 ss;
} MACHINE_FRAME, *PMACHINE_FRAME;
NTKERNELAPI
_IRQL_requires_max_(APC_LEVEL)
_IRQL_requires_min_(PASSIVE_LEVEL)
_IRQL_requires_same_
VOID
KeGenericCallDpc(_In_ PKDEFERRED_ROUTINE Routine, _In_opt_ PVOID Context);
NTKERNELAPI
_IRQL_requires_(DISPATCH_LEVEL)
_IRQL_requires_same_
VOID
KeSignalCallDpcDone(_In_ PVOID SystemArgument1);
PEPROCESS
NTAPI
PsGetNextProcess(IN PEPROCESS OldProcess OPTIONAL);
PETHREAD
NTAPI
PsGetNextProcessThread(IN PEPROCESS Process, IN PETHREAD Thread OPTIONAL);
#define ABSOLUTE(wait) (wait)
#define RELATIVE(wait) (-(wait))
#define NANOSECONDS(nanos) (((signed __int64)(nanos)) / 100L)
#define MICROSECONDS(micros) (((signed __int64)(micros)) * NANOSECONDS(1000L))
#define MILLISECONDS(milli) (((signed __int64)(milli)) * MICROSECONDS(1000L))
#define SECONDS(seconds) (((signed __int64)(seconds)) * MILLISECONDS(1000L))
// https://learn.microsoft.com/en-us/windows/win32/procthread/process-security-and-access-rights
#define PROCESS_CREATE_PROCESS 0x0080
#define PROCESS_TERMINATE 0x0001
#define PROCESS_CREATE_THREAD 0x0002
#define PROCESS_QUERY_INFORMATION 0x0400
#define PROCESS_QUERY_LIMITED_INFORMATION 0x1000
#define PROCESS_SET_INFORMATION 0x0200
#define PROCESS_SET_QUOTA 0x0100
#define PROCESS_SUSPEND_RESUME 0x0800
#define PROCESS_VM_OPERATION 0x0008
#define PROCESS_VM_READ 0x0010
#define PROCESS_VM_WRITE 0x0020
typedef struct _NT_HEADER_64 {
UINT32 Signature;
IMAGE_FILE_HEADER FileHeader;
IMAGE_OPTIONAL_HEADER64 OptionalHeader;
} NT_HEADER_64, *PNT_HEADER_64;
#endif
================================================
FILE: driver/containers/map.c
================================================
#include "map.h"
#include "../lib/stdlib.h"
VOID
RtlHashmapDelete(_In_ PRTL_HASHMAP Hashmap)
{
ExFreePoolWithTag(Hashmap->buckets, POOL_TAG_HASHMAP);
ExFreePoolWithTag(Hashmap->locks, POOL_TAG_HASHMAP);
ExDeleteLookasideListEx(&Hashmap->pool);
}
VOID
RtlHashmapSetInactive(_Inout_ PRTL_HASHMAP Hashmap)
{
Hashmap->active = FALSE;
}
NTSTATUS
RtlHashmapCreate(
_In_ UINT32 BucketCount,
_In_ UINT32 EntryObjectSize,
_In_ HASH_FUNCTION HashFunction,
_In_ COMPARE_FUNCTION CompareFunction,
_In_opt_ PVOID Context,
_Out_ PRTL_HASHMAP Hashmap)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 entry_size = sizeof(RTL_HASHMAP_ENTRY) + EntryObjectSize;
PRTL_HASHMAP_ENTRY entry = NULL;
if (!CompareFunction || !HashFunction)
return STATUS_INVALID_PARAMETER;
Hashmap->buckets = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
BucketCount * entry_size,
POOL_TAG_HASHMAP);
if (!Hashmap->buckets)
return STATUS_INSUFFICIENT_RESOURCES;
Hashmap->locks = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(KGUARDED_MUTEX) * BucketCount,
POOL_TAG_HASHMAP);
if (!Hashmap->locks) {
ExFreePoolWithTag(Hashmap->buckets, POOL_TAG_HASHMAP);
return STATUS_INSUFFICIENT_RESOURCES;
}
for (UINT32 index = 0; index < BucketCount; index++) {
entry = &Hashmap->buckets[index];
entry->in_use = FALSE;
InitializeListHead(&entry->entry);
KeInitializeGuardedMutex(&Hashmap->locks[index]);
}
status = ExInitializeLookasideListEx(
&Hashmap->pool,
NULL,
NULL,
NonPagedPoolNx,
0,
entry_size,
POOL_TAG_HASHMAP,
0);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ExInitializeLookasideListEx: %x", status);
ExFreePoolWithTag(Hashmap->buckets, POOL_TAG_HASHMAP);
ExFreePoolWithTag(Hashmap->locks, POOL_TAG_HASHMAP);
return status;
}
Hashmap->bucket_count = BucketCount;
Hashmap->hash_function = HashFunction;
Hashmap->compare_function = CompareFunction;
Hashmap->object_size = EntryObjectSize;
Hashmap->active = TRUE;
Hashmap->context = Context;
return STATUS_SUCCESS;
}
FORCEINLINE
STATIC
PRTL_HASHMAP_ENTRY
RtlpHashmapFindUnusedEntry(_In_ PLIST_ENTRY Head)
{
PRTL_HASHMAP_ENTRY entry = NULL;
PLIST_ENTRY list_entry = Head->Flink;
while (list_entry != Head) {
entry = CONTAINING_RECORD(list_entry, RTL_HASHMAP_ENTRY, entry);
if (entry->in_use == FALSE) {
entry->in_use = TRUE;
return entry;
}
list_entry = list_entry->Flink;
}
return NULL;
}
FORCEINLINE
STATIC
PRTL_HASHMAP_ENTRY
RtlpHashmapAllocateBucketEntry(_In_ PRTL_HASHMAP Hashmap)
{
PRTL_HASHMAP_ENTRY entry = ExAllocateFromLookasideListEx(&Hashmap->pool);
if (!entry)
return NULL;
entry->in_use = TRUE;
return entry;
}
FORCEINLINE
STATIC
BOOLEAN
RtlpHashmapIsIndexInRange(_In_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index)
{
return Index < Hashmap->bucket_count ? TRUE : FALSE;
}
INT32
RtlHashmapHashKeyAndAcquireBucket(_Inout_ PRTL_HASHMAP Hashmap, _In_ UINT64 Key)
{
UINT32 index = Hashmap->hash_function(Key);
if (!RtlpHashmapIsIndexInRange(Hashmap, index))
return -1;
KeAcquireGuardedMutex(&Hashmap->locks[index]);
return index;
}
VOID
RtlHashmapReleaseBucket(_Inout_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index)
{
/* No index check here, assuming we exit the caller early if we fail on
* acquisition */
KeReleaseGuardedMutex(&Hashmap->locks[Index]);
}
/* assumes map lock is held */
PVOID
RtlHashmapEntryInsert(_In_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index)
{
UINT32 index = 0;
PLIST_ENTRY list_head = NULL;
PRTL_HASHMAP_ENTRY entry = NULL;
PRTL_HASHMAP_ENTRY new_entry = NULL;
if (!Hashmap->active)
return NULL;
list_head = &(&Hashmap->buckets[index])->entry;
entry = RtlpHashmapFindUnusedEntry(list_head);
if (entry)
return entry;
new_entry = RtlpHashmapAllocateBucketEntry(Hashmap);
if (!new_entry) {
DEBUG_ERROR("Failed to allocate new entry");
return NULL;
}
InsertHeadList(list_head, &new_entry->entry);
return new_entry->object;
}
/* Returns a pointer to the start of the entries caller defined data. i.e
* &PRTL_HASHMAP_ENTRY->Object
*
* Also assumes lock is held.
*/
PVOID
RtlHashmapEntryLookup(
_In_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index, _In_ PVOID Compare)
{
UINT32 index = 0;
PRTL_HASHMAP_ENTRY entry = NULL;
if (!Hashmap->active)
return NULL;
entry = &Hashmap->buckets[index];
while (entry) {
if (entry->in_use == FALSE)
goto increment;
if (Hashmap->compare_function(entry->object, Compare))
return entry->object;
increment:
entry = CONTAINING_RECORD(entry->entry.Flink, RTL_HASHMAP_ENTRY, entry);
}
DEBUG_ERROR("Unable to find entry in hashmap.");
return NULL;
}
/* Assumes lock is held */
BOOLEAN
RtlHashmapEntryDelete(
_Inout_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index, _In_ PVOID Compare)
{
UINT32 index = 0;
PLIST_ENTRY list_head = NULL;
PLIST_ENTRY list_entry = NULL;
PRTL_HASHMAP_ENTRY entry = NULL;
if (!Hashmap->active)
return FALSE;
list_head = &(&Hashmap->buckets[index])->entry;
list_entry = list_head->Flink;
while (list_entry != list_head) {
entry = CONTAINING_RECORD(list_entry, RTL_HASHMAP_ENTRY, entry);
if (entry->in_use &&
Hashmap->compare_function(entry->object, Compare)) {
if (entry == list_head) {
entry->in_use = FALSE;
}
else {
RemoveEntryList(&entry->entry);
ExFreeToLookasideListEx(&Hashmap->pool, entry);
}
return TRUE;
}
list_entry = list_entry->Flink;
}
return FALSE;
}
/* assumes lock is held */
VOID
RtlHashmapEnumerate(
_In_ PRTL_HASHMAP Hashmap,
_In_ ENUMERATE_HASHMAP EnumerationCallback,
_In_opt_ PVOID Context)
{
PLIST_ENTRY list_head = NULL;
PLIST_ENTRY list_entry = NULL;
PRTL_HASHMAP_ENTRY entry = NULL;
for (UINT32 index = 0; index < Hashmap->bucket_count; index++) {
KeAcquireGuardedMutex(&Hashmap->locks[index]);
list_head = &Hashmap->buckets[index];
list_entry = list_head->Flink;
while (list_entry != list_head) {
entry = CONTAINING_RECORD(list_entry, RTL_HASHMAP_ENTRY, entry);
if (entry->in_use == TRUE)
EnumerationCallback(entry->object, Context);
list_entry = list_entry->Flink;
}
KeReleaseGuardedMutex(&Hashmap->locks[index]);
}
}
================================================
FILE: driver/containers/map.h
================================================
#ifndef MAP_H
#define MAP_H
#include "../common.h"
/* To improve efficiency, each entry contains a common header
* RTL_HASHMAP_ENTRY*, reducing the need to store a seperate pointer to the
* entrys data. */
typedef struct _RTL_HASHMAP_ENTRY {
LIST_ENTRY entry;
UINT32 in_use;
CHAR object[];
} RTL_HASHMAP_ENTRY, *PRTL_HASHMAP_ENTRY;
typedef UINT32 (*HASH_FUNCTION)(_In_ UINT64 Key);
/* Struct1 being the node being compared to the value in Struct 2*/
typedef BOOLEAN (*COMPARE_FUNCTION)(_In_ PVOID Struct1, _In_ PVOID Struct2);
typedef struct _RTL_HASHMAP {
/* Array of RTL_HASHMAP_ENTRIES with length = bucket_count */
PRTL_HASHMAP_ENTRY buckets;
/* per bucket locks */
PKGUARDED_MUTEX locks;
/* Number of buckets, ideally a prime number */
UINT32 bucket_count;
/* Size of each custom object existing after the RTL_HASHMAP_ENTRY */
UINT32 object_size;
/* Pointer to caller-designated callback routines */
HASH_FUNCTION hash_function;
COMPARE_FUNCTION compare_function;
/* in the future bucket entries will use this */
LOOKASIDE_LIST_EX pool;
/* user allocated context */
PVOID context;
volatile UINT32 active;
} RTL_HASHMAP, *PRTL_HASHMAP;
typedef VOID (*ENUMERATE_HASHMAP)(_In_ PRTL_HASHMAP_ENTRY Entry,
_In_opt_ PVOID Context);
#define STATUS_INVALID_HASHMAP_INDEX -1
/* Hashmap is caller allocated */
NTSTATUS
RtlHashmapCreate(_In_ UINT32 BucketCount,
_In_ UINT32 EntryObjectSize,
_In_ HASH_FUNCTION HashFunction,
_In_ COMPARE_FUNCTION CompareFunction,
_In_opt_ PVOID Context,
_Out_ PRTL_HASHMAP Hashmap);
PVOID
RtlHashmapEntryInsert(_In_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index);
PVOID
RtlHashmapEntryLookup(_In_ PRTL_HASHMAP Hashmap,
_In_ UINT32 Index,
_In_ PVOID Compare);
BOOLEAN
RtlHashmapEntryDelete(_Inout_ PRTL_HASHMAP Hashmap,
_In_ UINT32 Index,
_In_ PVOID Compare);
VOID
RtlHashmapEnumerate(_In_ PRTL_HASHMAP Hashmap,
_In_ ENUMERATE_HASHMAP EnumerationCallback,
_In_opt_ PVOID Context);
VOID
RtlHashmapDelete(_In_ PRTL_HASHMAP Hashmap);
INT32
RtlHashmapHashKeyAndAcquireBucket(_Inout_ PRTL_HASHMAP Hashmap,
_In_ UINT64 Key);
VOID
RtlHashmapReleaseBucket(_Inout_ PRTL_HASHMAP Hashmap, _In_ UINT32 Index);
VOID
RtlHashmapSetInactive(_Inout_ PRTL_HASHMAP Hashmap);
#endif
================================================
FILE: driver/containers/tree.c
================================================
#include "tree.h"
#include "../lib/stdlib.h"
/*
* Basic red-black tree implementation. Currently, the enumeration routines are
* recursive, which may not be the best idea given the environment this is meant
* for (kernel mode). We can always fix that up later though :).
*
* Example of a Red-Black Tree:
*
* grandparent(B)
* |
* +--------+--------+
* | |
* parent(R) uncle(R)
* |
* +-----+-----+
* | |
* Node(R) sibling(B)
* |
* +-----+
* | |
* child ...
*
* Legend:
* - 'B' represents a Black node
* - 'R' represents a Red node
*
* Labels for components during insert and delete fix-up:
*
* - Node: The newly inserted node that may cause a violation.
* - Parent: The parent of the newly inserted node.
* - Grandparent: The grandparent of the newly inserted node.
* - Uncle: The sibling of the parent node.
* - Sibling: The sibling of the node to be deleted or fixed.
* - Child: The child of the node to be deleted or fixed.
*
* In this example:
* - Each 'B' is a black node.
* - Each 'R' is a red node.
* - The labels illustrate a typical structure that might be encountered during
* the insertion or deletion process, where the new node, its parent,
* grandparent, uncle, sibling, and child are involved in the rebalancing
* operations.
*
* Resources used:
* https://www.kernel.org/doc/Documentation/rbtree.txt
* https://github.com/torvalds/linux/blob/master/lib/rbtree.c
* https://www.osronline.com/article.cfm%5Earticle=516.htm
* https://learn.microsoft.com/en-us/windows-hardware/drivers/ddi/ntddk/ns-ntddk-_rtl_avl_table
* (for structure ideas)
*
*/
FORCEINLINE
STATIC
VOID
RtlpRbTreeIncrementInsertionCount(_In_ PRB_TREE Tree)
{
InterlockedIncrement(&Tree->insertion_count);
}
FORCEINLINE
STATIC
VOID
RtlpRbTreeIncrementDeletionCount(_In_ PRB_TREE Tree)
{
InterlockedIncrement(&Tree->deletion_count);
}
FORCEINLINE
STATIC
VOID
RtlpRbTreeIncrementNodeCount(_In_ PRB_TREE Tree)
{
InterlockedIncrement(&Tree->node_count);
}
FORCEINLINE
STATIC
VOID
RtlpRbTreeDecrementNodeCount(_In_ PRB_TREE Tree)
{
InterlockedDecrement(&Tree->node_count);
}
VOID
RtlRbTreePrintCurrentStatistics(_In_ PRB_TREE Tree)
{
DEBUG_VERBOSE("Tree: %llx", (UINT64)Tree);
DEBUG_VERBOSE("Node count: %lx", Tree->node_count);
DEBUG_VERBOSE("Insertion count: %lx", Tree->insertion_count);
DEBUG_VERBOSE("Deletion count: %lx", Tree->deletion_count);
}
/**
* Initialises a caller allocated RB_TREE structure.
*
* Key Member Variables in `RB_TREE`:
*
* > `RB_COMPARE compare`:
* - This is a function pointer to the comparison function provided by the
* caller. It is used to compare two keys and maintain the order of the
* red-black tree.
*
* > `UINT32 object_size`:
* - This stores the size of the objects that will be stored in the tree. It
* is used to allocate memory for the nodes.
* - Lets say each node needs to have a THREAD_LIST_ENTRY object. The
* ObjectSize = sizeof(THREAD_LIST_OBJECT) and in turn will mean each node
* will be of size: sizeof(THREAD_LIST_OBJECT) + sizeof(RB_TREE_NODE). This is
* also this size the lookaside list pools will be set to.
*
* > `LOOKASIDE_LIST_EX pool`:
* - This is a lookaside list that provides a fast, efficient way to allocate
* and free fixed-size blocks of memory for the tree nodes. The size of each
* block is `ObjectSize + sizeof(RB_TREE_NODE)`.
*/
NTSTATUS
RtlRbTreeCreate(
_In_ RB_COMPARE Compare, _In_ UINT32 ObjectSize, _Out_ PRB_TREE Tree)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
if (!ARGUMENT_PRESENT(Compare) || ObjectSize == 0)
return STATUS_INVALID_PARAMETER;
status = ExInitializeLookasideListEx(
&Tree->pool,
NULL,
NULL,
NonPagedPoolNx,
0,
ObjectSize + sizeof(RB_TREE_NODE),
POOL_TAG_RB_TREE,
0);
if (!NT_SUCCESS(status))
return status;
Tree->compare = Compare;
Tree->deletion_count = 0;
Tree->insertion_count = 0;
Tree->node_count = 0;
KeInitializeGuardedMutex(&Tree->lock);
return STATUS_SUCCESS;
}
/* This function is used to maintain the balance of a red-black tree by
* performing a left rotation around a given node. A left rotation moves the
* given node down to the left and its right child up to take its place.
*
* The structure of the tree before and after the rotation is as follows:
*
* Before Rotation: After Rotation:
* (Node) (Right_Child)
* / \ / \
* (A) (Right_Child) -> (Node) (C)
* / \ / \
* (B) (C) (A) (B)
*/
STATIC
VOID
RtlpRbTreeRotateLeft(_In_ PRB_TREE Tree, _In_ PRB_TREE_NODE Node)
{
PRB_TREE_NODE right_child = Node->right;
Node->right = right_child->left;
if (right_child->left)
right_child->left->parent = Node;
right_child->parent = Node->parent;
if (!Node->parent)
Tree->root = right_child;
else if (Node == Node->parent->left)
Node->parent->left = right_child;
else
Node->parent->right = right_child;
right_child->left = Node;
Node->parent = right_child;
}
/*
* This function is used to maintain the balance of a red-black tree by
* performing a right rotation around a given node. A right rotation moves the
* given node down to the right and its left child up to take its place.
*
* The structure of the tree before and after the rotation is as follows:
*
* Before Rotation: After Rotation:
* (Node) (Left_Child)
* / \ / \
* (Left_Child) (C) -> (A) (Node)
* / \ / \
* (A) (B) (B) (C)
*
*/
STATIC
VOID
RtlpRbTreeRotateRight(_In_ PRB_TREE Tree, _In_ PRB_TREE_NODE Node)
{
PRB_TREE_NODE left_child = Node->left;
Node->left = left_child->right;
if (left_child->right)
left_child->right->parent = Node;
left_child->parent = Node->parent;
if (!Node->parent)
Tree->root = left_child;
else if (Node == Node->parent->right)
Node->parent->right = left_child;
else
Node->parent->left = left_child;
left_child->right = Node;
Node->parent = left_child;
}
/*
* This function ensures the red-black tree properties are maintained after a
* new node is inserted. It adjusts the colors and performs rotations as
* necessary.
*
* Example scenario:
*
* Inserted Node causing a fixup:
* (Grandparent) (Parent)
* / \ / \
* (Parent) (Uncle) -> (Node) (Grandparent)
* / / \
* (Node) (Left) (Uncle)
*/
STATIC
VOID
RtlpRbTreeFixupInsert(_In_ PRB_TREE Tree, _In_ PRB_TREE_NODE Node)
{
PRB_TREE_NODE uncle = NULL;
PRB_TREE_NODE parent = NULL;
PRB_TREE_NODE grandparent = NULL;
while ((parent = Node->parent) && parent->colour == red) {
grandparent = parent->parent;
if (parent == grandparent->left) {
uncle = grandparent->right;
if (uncle && uncle->colour == red) {
parent->colour = black;
uncle->colour = black;
grandparent->colour = red;
Node = grandparent;
}
else {
if (Node == parent->right) {
RtlpRbTreeRotateLeft(Tree, parent);
Node = parent;
parent = Node->parent;
}
parent->colour = black;
grandparent->colour = red;
RtlpRbTreeRotateRight(Tree, grandparent);
}
}
else {
uncle = grandparent->left;
if (uncle && uncle->colour == red) {
parent->colour = black;
uncle->colour = black;
grandparent->colour = red;
Node = grandparent;
}
else {
if (Node == parent->left) {
RtlpRbTreeRotateRight(Tree, parent);
Node = parent;
parent = Node->parent;
}
parent->colour = black;
grandparent->colour = red;
RtlpRbTreeRotateLeft(Tree, grandparent);
}
}
}
Tree->root->colour = black;
}
/*
* ASSUMES LOCK IS HELD!
*
* This function inserts a new node into the red-black tree, and then calls a
* fix-up routine to ensure the tree properties are maintained.
*
* Example insertion process:
*
* Before insertion:
* (Root)
* / \
* (Left) (Right)
*
* After insertion:
* (Root)
* / \
* (Left) (Right)
* /
* (Node)
*
* After fix-up:
* (Root)
* / \
* (Left) (Node)
* \
* (Right)
*/
PVOID
RtlRbTreeInsertNode(_In_ PRB_TREE Tree, _In_ PVOID Key)
{
UINT32 result = 0;
PRB_TREE_NODE node = NULL;
PRB_TREE_NODE parent = NULL;
PRB_TREE_NODE current = NULL;
node = ExAllocateFromLookasideListEx(&Tree->pool);
if (!node)
return NULL;
node->parent = NULL;
node->left = NULL;
node->right = NULL;
node->colour = red;
current = Tree->root;
while (current) {
parent = current;
result = Tree->compare(Key, current->object);
if (result == RB_TREE_LESS_THAN) {
current = current->left;
}
else if (result == RB_TREE_GREATER_THAN) {
current = current->right;
}
else {
ExFreeToLookasideListEx(&Tree->pool, node);
/* Since we allocate and free a node, no housekeeping regarding
* stats needs to be done. */
return current->object;
}
}
node->parent = parent;
if (!parent)
Tree->root = node;
else if (result == RB_TREE_LESS_THAN)
parent->left = node;
else
parent->right = node;
RtlpRbTreeFixupInsert(Tree, node);
RtlpRbTreeIncrementInsertionCount(Tree);
RtlpRbTreeIncrementNodeCount(Tree);
return node->object;
}
/*
* ASSUMES LOCK IS HELD!
*
* This function traverses the left children of the given node to find and
* return the node with the minimum key in the subtree.
*
* Example traversal to find minimum:
*
* (Root)
* / \
* (Left) (Right)
* /
* (Node)
*
* After finding minimum:
* (Root)
* / \
* (Node) (Right)
*
* Returns the left-most node.
*/
STATIC
PRB_TREE_NODE
RtlpRbTreeMinimum(_In_ PRB_TREE_NODE Node)
{
while (Node->left != NULL)
Node = Node->left;
return Node;
}
/*
* ASSUMES LOCK IS HELD!
*
* This function is called after a node is deleted from the Red-Black Tree.
* It ensures that the tree remains balanced and the Red-Black properties are
* maintained. It performs the necessary rotations and recoloring.
*
* Example fixup scenarios:
*
* Before Fixup: After Fixup:
* (Parent) (Parent)
* / \ / \
* (Node) (Sibling) (Node) (Sibling)
* / \ / \
* (Left) (Right) (Left) (Right)
*
* The fixup process ensures that the tree remains balanced.
*/
STATIC
VOID
RtlpRbTreeFixupDelete(_In_ PRB_TREE Tree, _In_ PRB_TREE_NODE Node)
{
PRB_TREE_NODE sibling = NULL;
while (Node != Tree->root && Node->colour == black) {
if (Node == Node->parent->left) {
sibling = Node->parent->right;
if (sibling && sibling->colour == red) {
sibling->colour = black;
Node->parent->colour = red;
RtlpRbTreeRotateLeft(Tree, Node->parent);
sibling = Node->parent->right;
}
if (sibling && (!sibling->left || sibling->left->colour == black) &&
(!sibling->right || sibling->right->colour == black)) {
sibling->colour = red;
Node = Node->parent;
}
else {
if (sibling &&
(!sibling->right || sibling->right->colour == black)) {
if (sibling->left)
sibling->left->colour = black;
sibling->colour = red;
RtlpRbTreeRotateRight(Tree, sibling);
sibling = Node->parent->right;
}
if (sibling) {
sibling->colour = Node->parent->colour;
Node->parent->colour = black;
if (sibling->right)
sibling->right->colour = black;
RtlpRbTreeRotateLeft(Tree, Node->parent);
}
Node = Tree->root;
}
}
else {
sibling = Node->parent->left;
if (sibling && sibling->colour == red) {
sibling->colour = black;
Node->parent->colour = red;
RtlpRbTreeRotateRight(Tree, Node->parent);
sibling = Node->parent->left;
}
if (sibling &&
(!sibling->right || sibling->right->colour == black) &&
(!sibling->left || sibling->left->colour == black)) {
sibling->colour = red;
Node = Node->parent;
}
else {
if (sibling &&
(!sibling->left || sibling->left->colour == black)) {
if (sibling->right)
sibling->right->colour = black;
sibling->colour = red;
RtlpRbTreeRotateLeft(Tree, sibling);
sibling = Node->parent->left;
}
if (sibling) {
sibling->colour = Node->parent->colour;
Node->parent->colour = black;
if (sibling->left)
sibling->left->colour = black;
RtlpRbTreeRotateRight(Tree, Node->parent);
}
Node = Tree->root;
}
}
}
Node->colour = black;
}
/*
* ASSUMES LOCK IS HELD!
*
* This function replaces the subtree rooted at the node `toBeReplacedNode` with
* the subtree rooted at the node `replacementNode`. It adjusts the parent
* pointers accordingly.
*
* Example scenario:
*
* Before Transplant: After Transplant:
* (ParentNode) (ParentNode)
* / \ / \
* (toBeReplaced) Sibling (Replacement) Sibling
* / \ / \
* Left Right Left Right
*
* The transplant process ensures that the subtree rooted at `replacementNode`
* takes the place of the subtree rooted at `toBeReplacedNode`.
*/
STATIC
VOID
RtlpRbTreeTransplant(
_In_ PRB_TREE Tree,
_In_ PRB_TREE_NODE Target,
_In_ PRB_TREE_NODE Replacement)
{
if (!Target->parent)
Tree->root = Replacement;
else if (Target == Target->parent->left)
Target->parent->left = Replacement;
else
Target->parent->right = Replacement;
if (Replacement)
Replacement->parent = Target->parent;
}
STATIC
PRB_TREE_NODE
RtlpRbTreeFindNode(_In_ PRB_TREE Tree, _In_ PVOID Key)
{
INT32 result = 0;
PRB_TREE_NODE current = Tree->root;
while (current) {
result = Tree->compare(Key, current->object);
if (result == RB_TREE_EQUAL)
return current;
else if (result == RB_TREE_LESS_THAN)
current = current->left;
else
current = current->right;
}
return NULL;
}
/*
* ASSUMES LOCK IS HELD!
*
* This function removes a node with the specified key from the Red-Black Tree
* and ensures the tree remains balanced by performing necessary rotations and
* recoloring.
*
* Example scenario:
*
* Before Deletion: After Deletion:
* (ParentNode) (ParentNode)
* / \ / \
* (TargetNode) Sibling (Replacement) Sibling
* / \ / \
* LeftChild RightChild LeftChild RightChild
*
* The deletion process involves finding the target node, replacing it with a
* suitable successor or child, and ensuring the Red-Black Tree properties are
* maintained.
*/
VOID
RtlRbTreeDeleteNode(_In_ PRB_TREE Tree, _In_ PVOID Key)
{
PRB_TREE_NODE target = NULL;
PRB_TREE_NODE child = NULL;
PRB_TREE_NODE successor = NULL;
COLOUR colour = {0};
/* We want the node not the object */
target = RtlpRbTreeFindNode(Tree, Key);
if (!target)
return;
colour = target->colour;
if (!target->left) {
child = target->right;
RtlpRbTreeTransplant(Tree, target, target->right);
}
else if (!target->right) {
child = target->left;
RtlpRbTreeTransplant(Tree, target, target->left);
}
else {
successor = RtlpRbTreeMinimum(target->right);
colour = successor->colour;
child = successor->right;
if (successor->parent == target) {
if (child)
child->parent = successor;
}
else {
RtlpRbTreeTransplant(Tree, successor, successor->right);
successor->right = target->right;
successor->right->parent = successor;
}
RtlpRbTreeTransplant(Tree, target, successor);
successor->left = target->left;
successor->left->parent = successor;
successor->colour = target->colour;
}
if (colour == black && child)
RtlpRbTreeFixupDelete(Tree, child);
ExFreeToLookasideListEx(&Tree->pool, target);
RtlpRbTreeIncrementDeletionCount(Tree);
RtlpRbTreeDecrementNodeCount(Tree);
}
/* Public API that is used to find the node object for an associated key. Should
* be used externally when wanting to find an object with a key value. If you
* are wanting to get the node itself, use the RtlpRbTreeFindNode routine. */
PVOID
RtlRbTreeFindNodeObject(_In_ PRB_TREE Tree, _In_ PVOID Key)
{
INT32 result = 0;
PRB_TREE_NODE current = Tree->root;
while (current) {
result = Tree->compare(Key, current->object);
if (result == RB_TREE_EQUAL)
return current->object;
else if (result == RB_TREE_LESS_THAN)
current = current->left;
else
current = current->right;
}
return NULL;
}
STATIC
VOID
RtlpRbTreeEnumerate(
_In_ PRB_TREE_NODE Node,
_In_ RB_ENUM_CALLBACK Callback,
_In_opt_ PVOID Context)
{
if (Node == NULL)
return;
RtlpRbTreeEnumerate(Node->left, Callback, Context);
Callback(Node->object, Context);
RtlpRbTreeEnumerate(Node->right, Callback, Context);
}
VOID
RtlRbTreeEnumerate(
_In_ PRB_TREE Tree, _In_ RB_ENUM_CALLBACK Callback, _In_opt_ PVOID Context)
{
if (Tree->root == NULL)
return;
RtlRbTreeAcquireLock(Tree);
RtlpRbTreeEnumerate(Tree->root, Callback, Context);
RtlRbTreeReleaselock(Tree);
}
STATIC
VOID
RtlpPrintInOrder(PRB_TREE_NODE Node)
{
if (Node == NULL)
return;
RtlpPrintInOrder(Node->left);
const char* color = (Node->colour == red) ? "Red" : "Black";
DbgPrintEx(
DPFLTR_DEFAULT_ID,
DPFLTR_INFO_LEVEL,
"Node: Key=%p, Color=%s\n",
*((PHANDLE)Node->object),
color);
RtlpPrintInOrder(Node->right);
}
VOID
RtlRbTreeInOrderPrint(_In_ PRB_TREE Tree)
{
DEBUG_ERROR("*************************************************");
DEBUG_ERROR("<><><><>STARTING IN ORDER PRINT <><><><><><");
RtlRbTreeAcquireLock(Tree);
RtlpPrintInOrder(Tree->root);
RtlRbTreeReleaselock(Tree);
DEBUG_ERROR("<><><><>ENDING IN ORDER PRINT <><><><><><");
DEBUG_ERROR("*************************************************");
}
STATIC
VOID
RtlpRbTreeDeleteSubtree(_In_ PRB_TREE Tree, _In_ PRB_TREE_NODE Node)
{
if (Node == NULL)
return;
RtlpRbTreeDeleteSubtree(Tree, Node->left);
RtlpRbTreeDeleteSubtree(Tree, Node->right);
ExFreeToLookasideListEx(&Tree->pool, Node);
}
VOID
RtlRbTreeDeleteTree(_In_ PRB_TREE Tree)
{
Tree->active = FALSE;
RtlRbTreeAcquireLock(Tree);
RtlpRbTreeDeleteSubtree(Tree, Tree->root);
ExDeleteLookasideListEx(&Tree->pool);
RtlRbTreeReleaselock(Tree);
}
================================================
FILE: driver/containers/tree.h
================================================
#ifndef TREE_H
#define TREE_H
#include "../common.h"
#define RB_TREE_EQUAL 0
#define RB_TREE_LESS_THAN 1
#define RB_TREE_GREATER_THAN 2
typedef enum _COLOUR { red, black } COLOUR;
typedef struct _RB_TREE_NODE {
struct _RB_TREE_NODE* parent;
struct _RB_TREE_NODE* left;
struct _RB_TREE_NODE* right;
COLOUR colour;
CHAR object[];
} RB_TREE_NODE, *PRB_TREE_NODE;
typedef UINT32 (*RB_COMPARE)(_In_ PVOID Key, _In_ PVOID Object);
typedef struct _RB_TREE {
PRB_TREE_NODE root;
KGUARDED_MUTEX lock;
RB_COMPARE compare;
LOOKASIDE_LIST_EX pool;
UINT32 object_size;
UINT32 active;
volatile UINT32 node_count;
volatile UINT32 insertion_count;
volatile UINT32 deletion_count;
} RB_TREE, *PRB_TREE;
typedef VOID (*RB_CALLBACK)(PRB_TREE_NODE Node);
typedef VOID (*RB_ENUM_CALLBACK)(_In_ PVOID Object, _In_opt_ PVOID Context);
PVOID
RtlRbTreeInsertNode(_In_ PRB_TREE Tree, _In_ PVOID Key);
NTSTATUS
RtlRbTreeCreate(_In_ RB_COMPARE Compare,
_In_ UINT32 ObjectSize,
_Out_ PRB_TREE Tree);
VOID
RtlRbTreeDeleteNode(_In_ PRB_TREE Tree, _In_ PVOID Key);
PVOID
RtlRbTreeFindNodeObject(_In_ PRB_TREE Tree, _In_ PVOID Key);
VOID
RtlRbTreeEnumerate(_In_ PRB_TREE Tree,
_In_ RB_ENUM_CALLBACK Callback,
_In_opt_ PVOID Context);
#define ENUMERATE_THREADS(callback, context) \
RtlRbTreeEnumerate(GetThreadTree(), callback, context)
VOID
RtlRbTreeDeleteTree(_In_ PRB_TREE Tree);
VOID
RtlRbTreeInOrderPrint(_In_ PRB_TREE Tree);
FORCEINLINE
STATIC
VOID
RtlRbTreeAcquireLock(_Inout_ PRB_TREE Tree)
{
KeAcquireGuardedMutex(&Tree->lock);
}
FORCEINLINE
STATIC
VOID
RtlRbTreeReleaselock(_Inout_ PRB_TREE Tree)
{
KeReleaseGuardedMutex(&Tree->lock);
}
VOID
RtlRbTreePrintCurrentStatistics(_In_ PRB_TREE Tree);
#endif
================================================
FILE: driver/cpp.hint
================================================
// Hint files help the Visual Studio IDE interpret Visual C++ identifiers
// such as names of functions and macros.
// For more information see https://go.microsoft.com/fwlink/?linkid=865984
#define _Inout_ _SAL2_Source_(_Inout_, (), _Prepost_valid_)
#define _Inout_
#define _In_ _SAL2_Source_(_In_, (), _Pre1_impl_(__notnull_impl_notref) _Pre_valid_impl_ _Deref_pre1_impl_(__readaccess_impl_notref))
#define _In_
#define STATIC
#define VOID
#define INLINE
================================================
FILE: driver/crypt.c
================================================
#include "crypt.h"
#include "driver.h"
#include "imports.h"
#include "lib/stdlib.h"
#include "session.h"
#include "types/tpm20.h"
#include "types/tpmptp.h"
#include "util.h"
#include
#include
FORCEINLINE
STATIC
UINT64
CryptGenerateRandomKey64(_In_ PUINT32 Seed)
{
return ((UINT64)RtlRandomEx(Seed) << 32 | RtlRandomEx(Seed));
}
STATIC
__m256i
CryptXorKeyGenerate_m256i()
{
UINT32 seed = (UINT32)__rdtsc();
UINT64 key_1 = CryptGenerateRandomKey64(&seed);
UINT64 key_2 = CryptGenerateRandomKey64(&seed);
UINT64 key_3 = CryptGenerateRandomKey64(&seed);
UINT64 key_4 = CryptGenerateRandomKey64(&seed);
return _mm256_set_epi64x(key_1, key_2, key_3, key_4);
}
UINT64
CryptXorKeyGenerate_uint64()
{
UINT32 seed = (UINT32)__rdtsc();
return CryptGenerateRandomKey64(&seed);
}
VOID
CryptEncryptImportsArray(_In_ PUINT64 Array, _In_ UINT32 Entries)
{
__m256i* imports_key = GetDriverImportsKey();
UINT32 block_size = sizeof(__m256i) / sizeof(UINT64);
UINT32 block_count = Entries / block_size;
*imports_key = CryptXorKeyGenerate_m256i();
/*
* Here we break down the import array into blocks of 32 bytes. Each
* block is loaded into an SSE register, xored with the key, and then
* copied back into the array.
*/
for (UINT32 block_index = 0; block_index < block_count; block_index++) {
__m256i current_block = {0};
__m256i load_block = {0};
__m256i xored_block = {0};
IntCopyMemory(
¤t_block,
&Array[block_index * block_size],
sizeof(__m256i));
load_block = _mm256_loadu_si256(¤t_block);
xored_block = _mm256_xor_si256(load_block, *imports_key);
IntCopyMemory(
&Array[block_index * block_size],
&xored_block,
sizeof(__m256i));
}
}
STATIC
INLINE
__m256i
CryptDecryptImportBlock(_In_ PUINT64 Array, _In_ UINT32 BlockIndex)
{
__m256i load_block = {0};
__m256i* imports_key = GetDriverImportsKey();
UINT32 block_size = sizeof(__m256i) / sizeof(UINT64);
IntCopyMemory(
&load_block,
&Array[BlockIndex * block_size],
sizeof(__m256i));
return _mm256_xor_si256(load_block, *imports_key);
}
FORCEINLINE
INLINE
VOID
CryptFindContainingBlockForArrayIndex(
_In_ UINT32 EntryIndex,
_In_ UINT32 BlockSize,
_Out_ PUINT32 ContainingBlockIndex,
_Out_ PUINT32 BlockSubIndex)
{
UINT32 containing_block = EntryIndex;
UINT32 block_index = 0;
if (EntryIndex < BlockSize) {
*ContainingBlockIndex = 0;
*BlockSubIndex = EntryIndex;
return;
}
if (EntryIndex == BlockSize) {
*ContainingBlockIndex = 1;
*BlockSubIndex = 0;
return;
}
while (containing_block % BlockSize != 0) {
containing_block--;
block_index++;
}
*ContainingBlockIndex = containing_block / BlockSize;
*BlockSubIndex = block_index;
}
UINT64
CryptDecryptImportsArrayEntry(
_In_ PUINT64 Array, _In_ UINT32 Entries, _In_ UINT32 EntryIndex)
{
__m256i original_block = {0};
__m128i original_half = {0};
UINT32 block_size = sizeof(__m256i) / sizeof(UINT64);
UINT32 containing_block_index = 0;
UINT32 block_sub_index = 0;
UINT64 pointer = 0;
CryptFindContainingBlockForArrayIndex(
EntryIndex,
block_size,
&containing_block_index,
&block_sub_index);
original_block = CryptDecryptImportBlock(Array, containing_block_index);
if (block_sub_index < 2) {
original_half = _mm256_extracti128_si256(original_block, 0);
if (block_sub_index < 1)
pointer = _mm_extract_epi64(original_half, 0);
else
pointer = _mm_extract_epi64(original_half, 1);
}
else {
original_half = _mm256_extracti128_si256(original_block, 1);
if (block_sub_index == 2)
pointer = _mm_extract_epi64(original_half, 0);
else
pointer = _mm_extract_epi64(original_half, 1);
}
return pointer;
}
STATIC
PBCRYPT_KEY_DATA_BLOB_HEADER
CryptBuildBlobForKeyImport(_In_ PACTIVE_SESSION Session)
{
PBCRYPT_KEY_DATA_BLOB_HEADER blob = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + AES_256_KEY_SIZE,
POOL_TAG_CRYPT);
if (!blob)
return NULL;
blob->dwMagic = BCRYPT_KEY_DATA_BLOB_MAGIC;
blob->dwVersion = BCRYPT_KEY_DATA_BLOB_VERSION1;
blob->cbKeyData = AES_256_KEY_SIZE;
IntCopyMemory(
(UINT64)blob + sizeof(BCRYPT_KEY_DATA_BLOB_HEADER),
Session->aes_key,
AES_256_KEY_SIZE);
return blob;
}
#define AES_256_BLOCK_SIZE 16
UINT32
CryptRequestRequiredBufferLength(_In_ UINT32 BufferLength)
{
// status = BCryptEncrypt(session->key_handle,
// lol,
// BufferLength,
// NULL,
// session->iv,
// sizeof(session->iv),
// NULL,
// 0,
// RequiredLength,
// 0);
// if (!NT_SUCCESS(status))
// DEBUG_ERROR("CryptRequestRequiredBufferLength -> BCryptEncrypt: %x",
// status);
return (BufferLength + AES_256_BLOCK_SIZE - 1) / AES_256_BLOCK_SIZE *
AES_256_BLOCK_SIZE;
}
/* Encrypts in place! */
NTSTATUS
CryptEncryptBuffer(_In_ PVOID Buffer, _In_ UINT32 BufferLength)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 data_copied = 0;
PACTIVE_SESSION session = GetActiveSession();
UCHAR local_iv[sizeof(session->iv)] = {0};
UINT64 buffer = (UINT64)Buffer;
UINT32 length = BufferLength;
/* The IV is consumed during every encrypt / decrypt procedure, so to ensure
* we have access to the iv we need to create a local copy.*/
IntCopyMemory(local_iv, session->iv, sizeof(session->iv));
/* We arent encrypting the first 16 bytes */
buffer = buffer + AES_256_BLOCK_SIZE;
length = length - AES_256_BLOCK_SIZE;
status = BCryptEncrypt(
session->key_handle,
buffer,
length,
NULL,
local_iv,
sizeof(local_iv),
buffer,
length,
&data_copied,
0);
if (!NT_SUCCESS(status))
DEBUG_ERROR("CryptEncryptBuffer -> BCryptEncrypt: %x", status);
return status;
}
/* Lock is held */
VOID
CryptCloseSessionCryptObjects()
{
PACTIVE_SESSION session = GetActiveSession();
if (session->key_handle) {
BCryptDestroyKey(session->key_handle);
session->key_handle = NULL;
}
if (session->key_object) {
ExFreePoolWithTag(session->key_object, POOL_TAG_CRYPT);
session->key_object = NULL;
}
session->key_object_length = 0;
}
NTSTATUS
CryptInitialiseSessionCryptObjects()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 data_copied = 0;
PACTIVE_SESSION session = GetActiveSession();
PBCRYPT_KEY_DATA_BLOB_HEADER blob = NULL;
BCRYPT_ALG_HANDLE* handle = GetCryptHandle_AES();
blob = CryptBuildBlobForKeyImport(session);
if (!blob)
return STATUS_INSUFFICIENT_RESOURCES;
status = BCryptGetProperty(
*handle,
BCRYPT_OBJECT_LENGTH,
&session->key_object_length,
sizeof(UINT32),
&data_copied,
0);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptGetProperty: %x", status);
goto end;
}
session->key_object = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
session->key_object_length,
POOL_TAG_CRYPT);
if (!session->key_object) {
status = STATUS_INSUFFICIENT_RESOURCES;
goto end;
}
DEBUG_INFO(
"key object: %llx, key_object_length: %lx",
session->key_object,
session->key_object_length);
status = BCryptImportKey(
*handle,
NULL,
BCRYPT_KEY_DATA_BLOB,
&session->key_handle,
session->key_object,
session->key_object_length,
blob,
sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + AES_256_KEY_SIZE,
0);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptImportKey: %x", status);
ExFreePoolWithTag(session->key_object, POOL_TAG_CRYPT);
goto end;
}
end:
if (blob)
ExFreePoolWithTag(blob, POOL_TAG_CRYPT);
return status;
}
NTSTATUS
CryptInitialiseProvider()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
BCRYPT_ALG_HANDLE* handle = GetCryptHandle_AES();
status = BCryptOpenAlgorithmProvider(
handle,
BCRYPT_AES_ALGORITHM,
NULL,
BCRYPT_PROV_DISPATCH);
if (!NT_SUCCESS(status))
DEBUG_ERROR("BCryptOpenAlgorithmProvider: %x", status);
return status;
}
VOID
CryptCloseProvider()
{
BCRYPT_ALG_HANDLE* handle = GetCryptHandle_AES();
BCryptCloseAlgorithmProvider(*handle, 0);
}
/*
* Basic TPM EK Extraction implementation. Various sources were used alongside
* the various TPM specification manuals.
*
* https://github.com/tianocore/edk2
* https://github.com/microsoft/ms-tpm-20-ref
* https://github.com/SyncUD/tpm-mmio
*/
#define TPM20_INTEL_BASE_PHYSICAL 0xfed40000
#define TPM20_OBJECT_HANDLE_EK 0x81010001
#define TPM20_PTP_NO_VALID_CHIP 0xFF
STATIC
BOOLEAN
TpmIsPlatformSupported()
{
PSYSTEM_INFORMATION system = GetDriverConfigSystemInformation();
if (system->processor == AuthenticAmd) {
DEBUG_ERROR(
"TpmPlatformSuport unavailable on process type: AuthenticAmd");
return FALSE;
}
if (system->processor == GenuineIntel)
return TRUE;
return FALSE;
}
STATIC
NTSTATUS
TpmCheckPtpRegisterPresence(_In_ PVOID Register, _Out_ PUINT32 Result)
{
UINT8 value = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
*Result = FALSE;
status = MapAndReadPhysical(Register, sizeof(value), &value, sizeof(value));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("MapAndReadPhysical: %x", status);
return status;
}
if (value != TPM20_PTP_NO_VALID_CHIP)
*Result = TRUE;
return status;
}
FORCEINLINE
STATIC
TPM2_PTP_INTERFACE_TYPE
TpmExtractInterfaceTypeFromCapabilityAndId(
_In_ PTP_CRB_INTERFACE_IDENTIFIER* Identifier,
_In_ PTP_FIFO_INTERFACE_CAPABILITY* Capability)
{
if ((Identifier->Bits.InterfaceType ==
PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_CRB) &&
(Identifier->Bits.InterfaceVersion ==
PTP_INTERFACE_IDENTIFIER_INTERFACE_VERSION_CRB) &&
(Identifier->Bits.CapCRB != 0)) {
return Tpm2PtpInterfaceCrb;
}
if ((Identifier->Bits.InterfaceType ==
PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_FIFO) &&
(Identifier->Bits.InterfaceVersion ==
PTP_INTERFACE_IDENTIFIER_INTERFACE_VERSION_FIFO) &&
(Identifier->Bits.CapFIFO != 0) &&
(Capability->Bits.InterfaceVersion ==
INTERFACE_CAPABILITY_INTERFACE_VERSION_PTP)) {
return Tpm2PtpInterfaceFifo;
}
if (Identifier->Bits.InterfaceType ==
PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_TIS) {
return Tpm2PtpInterfaceTis;
}
return Tpm2PtpInterfaceMax;
}
/*
* Assumes the presence of the register has already been confirmed via
* TpmCheckPtpRegisterPresence.
*/
STATIC
NTSTATUS
TpmGetPtpInterfaceType(
_In_ PVOID Register, _Out_ TPM2_PTP_INTERFACE_TYPE* InterfaceType)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PTP_CRB_INTERFACE_IDENTIFIER identifier = {0};
PTP_FIFO_INTERFACE_CAPABILITY capability = {0};
*InterfaceType = 0;
status = MapAndReadPhysical(
(UINT64)(&((PTP_CRB_REGISTERS*)Register)->InterfaceId),
sizeof(PTP_CRB_INTERFACE_IDENTIFIER),
&identifier,
sizeof(PTP_CRB_INTERFACE_IDENTIFIER));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("MapAndReadPhysical: %x", status);
return status;
}
status = MapAndReadPhysical(
(UINT64) & ((PTP_FIFO_REGISTERS*)Register)->InterfaceCapability,
sizeof(PTP_FIFO_INTERFACE_CAPABILITY),
&capability,
sizeof(PTP_FIFO_INTERFACE_CAPABILITY));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("MapAndReadPhysical: %x", status);
return status;
}
*InterfaceType =
TpmExtractInterfaceTypeFromCapabilityAndId(&identifier, &capability);
return status;
}
NTSTATUS
TpmExtractEndorsementKey()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
BOOLEAN presence = FALSE;
TPM2_PTP_INTERFACE_TYPE type = {0};
if (!TpmIsPlatformSupported())
return STATUS_NOT_SUPPORTED;
status = TpmCheckPtpRegisterPresence(TPM20_INTEL_BASE_PHYSICAL, &presence);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("TpmCheckPtpRegisterPresence: %x", status);
return status;
}
if (!presence) {
DEBUG_INFO("TPM2.0 PTP Presence not detected.");
return STATUS_UNSUCCESSFUL;
}
status = TpmGetPtpInterfaceType(TPM20_INTEL_BASE_PHYSICAL, &type);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("TpmGetPtpInterfaceType: %x", status);
return status;
}
DEBUG_INFO("TPM2.0 PTP Interface Type: %x", (UINT32)type);
return status;
}
NTSTATUS
CryptHashBuffer_sha256(
_In_ PVOID Buffer,
_In_ ULONG BufferSize,
_Out_ PVOID* HashResult,
_Out_ PULONG HashResultSize)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
BCRYPT_ALG_HANDLE* algo_handle = GetCryptHandle_Sha256();
BCRYPT_HASH_HANDLE hash_handle = NULL;
ULONG bytes_copied = 0;
ULONG resulting_hash_size = 0;
ULONG hash_object_size = 0;
PCHAR hash_object = NULL;
PCHAR resulting_hash = NULL;
*HashResult = NULL;
*HashResultSize = 0;
/*
* Request the size of the hash object buffer, this is different then
* the buffer that will store the resulting hash, instead this will be
* used to store the hash object used to create the hash.
*/
status = BCryptGetProperty(
*algo_handle,
BCRYPT_OBJECT_LENGTH,
(PCHAR)&hash_object_size,
sizeof(ULONG),
&bytes_copied,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptGetProperty failed with status %x", status);
goto end;
}
hash_object = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
hash_object_size,
POOL_TAG_INTEGRITY);
if (!hash_object) {
status = STATUS_MEMORY_NOT_ALLOCATED;
goto end;
}
/*
* This call gets the size of the resulting hash, which we will use to
* allocate the resulting hash buffer.
*/
status = BCryptGetProperty(
*algo_handle,
BCRYPT_HASH_LENGTH,
(PCHAR)&resulting_hash_size,
sizeof(ULONG),
&bytes_copied,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptGetProperty failed with status %x", status);
goto end;
}
resulting_hash = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
resulting_hash_size,
POOL_TAG_INTEGRITY);
if (!resulting_hash) {
status = STATUS_MEMORY_NOT_ALLOCATED;
goto end;
}
/*
* Here we create our hash object and store it in the hash_object
* buffer.
*/
status = BCryptCreateHash(
*algo_handle,
&hash_handle,
hash_object,
hash_object_size,
NULL,
NULL,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptCreateHash failed with status %x", status);
goto end;
}
/*
* This function hashes the buffer, but does NOT store it in our
* resulting buffer yet, we need to call BCryptFinishHash to retrieve
* the final hash.
*/
status = BCryptHashData(hash_handle, Buffer, BufferSize, NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptHashData failed with status %x", status);
goto end;
}
/*
* As said in the previous comment, this is where we retrieve the final
* hash and store it in our output buffer.
*/
status = BCryptFinishHash(
hash_handle,
resulting_hash,
resulting_hash_size,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("BCryptFinishHash failed with status %x", status);
goto end;
}
*HashResult = resulting_hash;
*HashResultSize = resulting_hash_size;
end:
if (hash_handle)
BCryptDestroyHash(hash_handle);
if (hash_object)
ImpExFreePoolWithTag(hash_object, POOL_TAG_INTEGRITY);
return status;
}
================================================
FILE: driver/crypt.h
================================================
#ifndef CRYPT_H
#define CRYPT_H
#include "common.h"
#define XOR_ROTATION_AMT 13
FORCEINLINE
VOID
CryptEncryptPointer64(_Inout_ PUINT64 Pointer, _In_ UINT64 Key)
{
*Pointer = _rotl64(*Pointer ^ Key, XOR_ROTATION_AMT);
}
FORCEINLINE
VOID
CryptDecryptPointer64(_Inout_ PUINT64 Pointer, _In_ UINT64 Key)
{
*Pointer = _rotr64(*Pointer, XOR_ROTATION_AMT) ^ Key;
}
FORCEINLINE
UINT64
CryptDecryptPointerOutOfPlace64(_In_ PUINT64 Pointer, _In_ UINT64 Key)
{
volatile UINT64 temp = *Pointer;
CryptDecryptPointer64(&temp, Key);
return temp;
}
VOID
CryptEncryptImportsArray(_In_ PUINT64 Array, _In_ UINT32 Entries);
UINT64
CryptDecryptImportsArrayEntry(_In_ PUINT64 Array,
_In_ UINT32 Entries,
_In_ UINT32 EntryIndex);
NTSTATUS
CryptInitialiseProvider();
UINT32
CryptRequestRequiredBufferLength(_In_ UINT32 BufferLength);
NTSTATUS
CryptEncryptBuffer(_In_ PVOID Buffer, _In_ UINT32 BufferLength);
NTSTATUS
CryptInitialiseSessionCryptObjects();
VOID
CryptCloseSessionCryptObjects();
VOID
CryptCloseProvider();
NTSTATUS
TpmExtractEndorsementKey();
UINT64
CryptXorKeyGenerate_uint64();
VOID
CryptEncryptPointer64(_Inout_ PUINT64 Pointer, _In_ UINT64 Key);
VOID
CryptDecryptPointer64(_Inout_ PUINT64 Pointer, _In_ UINT64 Key);
UINT64
CryptDecryptPointerOutOfPlace64(_In_ PUINT64 Pointer, _In_ UINT64 Key);
NTSTATUS
CryptHashBuffer_sha256(_In_ PVOID Buffer,
_In_ ULONG BufferSize,
_Out_ PVOID* HashResult,
_Out_ PULONG HashResultSize);
#endif
================================================
FILE: driver/driver.c
================================================
#include "driver.h"
#include "apc.h"
#include "callbacks.h"
#include "common.h"
#include "crypt.h"
#include "hv.h"
#include "hw.h"
#include "imports.h"
#include "integrity.h"
#include "io.h"
#include "lib/stdlib.h"
#include "modules.h"
#include "pool.h"
#include "session.h"
#include "thread.h"
#include
STATIC
VOID
DriverUnload(_In_ PDRIVER_OBJECT DriverObject);
_Function_class_(DRIVER_INITIALIZE) _IRQL_requires_same_
NTSTATUS
DriverEntry(
_In_ PDRIVER_OBJECT DriverObject, _In_ PUNICODE_STRING RegistryPath);
STATIC
NTSTATUS
RegistryPathQueryCallbackRoutine(
IN PWSTR ValueName,
IN ULONG ValueType,
IN PVOID ValueData,
IN ULONG ValueLength,
IN PVOID Context,
IN PVOID EntryContext);
STATIC
VOID
DrvUnloadUnregisterObCallbacks();
STATIC
VOID
DrvUnloadFreeConfigStrings();
STATIC
VOID
DrvUnloadFreeThreadList();
STATIC
VOID
DrvUnloadFreeProcessList();
STATIC
NTSTATUS
DrvLoadEnableNotifyRoutines();
STATIC
NTSTATUS
DrvLoadInitialiseDriverConfig(
_In_ PDRIVER_OBJECT DriverObject, _In_ PUNICODE_STRING RegistryPath);
#ifdef ALLOC_PRAGMA
# pragma alloc_text(INIT, DriverEntry)
# pragma alloc_text(PAGE, GetDriverName)
# pragma alloc_text(PAGE, GetDriverPath)
# pragma alloc_text(PAGE, GetDriverRegistryPath)
# pragma alloc_text(PAGE, GetDriverDeviceName)
# pragma alloc_text(PAGE, GetDriverSymbolicLink)
# pragma alloc_text(PAGE, GetDriverConfigSystemInformation)
# pragma alloc_text(PAGE, RegistryPathQueryCallbackRoutine)
# pragma alloc_text(PAGE, DrvUnloadUnregisterObCallbacks)
# pragma alloc_text(PAGE, DrvUnloadFreeConfigStrings)
# pragma alloc_text(PAGE, DrvUnloadFreeThreadList)
# pragma alloc_text(PAGE, DrvLoadEnableNotifyRoutines)
# pragma alloc_text(PAGE, DrvLoadEnableNotifyRoutines)
# pragma alloc_text(PAGE, DrvLoadInitialiseDriverConfig)
#endif
typedef struct _DRIVER_CONFIG {
volatile UINT32 nmi_status;
UNICODE_STRING unicode_driver_name;
ANSI_STRING ansi_driver_name;
PUNICODE_STRING device_name;
PUNICODE_STRING device_symbolic_link;
UNICODE_STRING driver_path;
UNICODE_STRING registry_path;
SYSTEM_INFORMATION system_information;
PVOID apc_contexts[MAXIMUM_APC_CONTEXTS];
PDRIVER_OBJECT driver_object;
PDEVICE_OBJECT device_object;
volatile BOOLEAN unload_in_progress;
KGUARDED_MUTEX lock;
SYS_MODULE_VAL_CONTEXT sys_val_context;
IRP_QUEUE_HEAD irp_queue;
TIMER_OBJECT integrity_check_timer;
ACTIVE_SESSION session_information;
RB_TREE thread_tree;
DRIVER_LIST_HEAD driver_list;
RTL_HASHMAP process_hashmap;
SHARED_MAPPING mapping;
BOOLEAN has_driver_loaded;
BCRYPT_ALG_HANDLE aes_hash;
BCRYPT_ALG_HANDLE sha256_hash;
} DRIVER_CONFIG, *PDRIVER_CONFIG;
UNICODE_STRING g_DeviceName = RTL_CONSTANT_STRING(L"\\Device\\DonnaAC");
UNICODE_STRING g_DeviceSymbolicLink = RTL_CONSTANT_STRING(L"\\??\\DonnaAC");
/* xor key generated on driver entry used to encrypt the imports array. Kept in
* here since imports array is encrypted before the device extension is
* allocated.*/
__m256i g_ImportsKey;
/* xor key generated that encrypts the DeviceObject->DeviceExtension aswell as
* our g_DriverConfig pointer. Probably best not to even use the device
* extension but whatevs */
UINT64 g_DeviceExtensionKey;
/*
* Rather then getting the driver state from the device object passed to our
* IOCTL handlers, store a pointer to the device extension here and abstract it
* with getters which can be accessed globally. The reason for this is because
* there isnt a way for us to pass a context structure to some of notify
* routines so I think it's better to do it this way.
*
* Note that the device extension pointer should be encrypted
*/
PDRIVER_CONFIG g_DriverConfig = NULL;
/* Its not ideal that this isnt inlined, but it causes errors with the
* decryption process and subsequently causes deadlocks / invalid pointer errors
* etc. Will need to look into it.*/
DECLSPEC_NOINLINE
PDRIVER_CONFIG
GetDecryptedDriverConfig()
{
return (PDRIVER_CONFIG)CryptDecryptPointerOutOfPlace64(
(PUINT64)&g_DriverConfig,
g_DeviceExtensionKey);
}
#define POOL_TAG_CONFIG 'conf'
STATIC
VOID
EncryptDeviceExtensionPointers(_In_ PDEVICE_OBJECT DeviceObject)
{
CryptEncryptPointer64(&g_DriverConfig, g_DeviceExtensionKey);
CryptEncryptPointer64(&DeviceObject->DeviceExtension, g_DeviceExtensionKey);
}
STATIC
VOID
DecryptDeviceExtensionPointers(_In_ PDEVICE_OBJECT DeviceObject)
{
CryptDecryptPointer64(&g_DriverConfig, g_DeviceExtensionKey);
CryptDecryptPointer64(&DeviceObject->DeviceExtension, g_DeviceExtensionKey);
}
PUINT64
GetDriverDeviceExtensionKey()
{
return &g_DeviceExtensionKey;
}
__m256i*
GetDriverImportsKey()
{
return &g_ImportsKey;
}
STATIC
VOID
SetDriverLoadedFlag()
{
PAGED_CODE();
GetDecryptedDriverConfig()->has_driver_loaded = TRUE;
}
BCRYPT_ALG_HANDLE*
GetCryptHandle_Sha256()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->sha256_hash;
}
PRTL_HASHMAP
GetProcessHashmap()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->process_hashmap;
}
BCRYPT_ALG_HANDLE*
GetCryptHandle_AES()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->aes_hash;
}
BOOLEAN
HasDriverLoaded()
{
PAGED_CODE();
return GetDecryptedDriverConfig()->has_driver_loaded;
}
VOID
UnsetNmiInProgressFlag()
{
PAGED_CODE();
InterlockedDecrement(&GetDecryptedDriverConfig()->nmi_status);
}
BOOLEAN
IsNmiInProgress()
{
PAGED_CODE();
return InterlockedCompareExchange(
&GetDecryptedDriverConfig()->nmi_status,
TRUE,
FALSE) != 0;
}
PSHARED_MAPPING
GetSharedMappingConfig()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->mapping;
}
VOID
AcquireDriverConfigLock()
{
PAGED_CODE();
ImpKeAcquireGuardedMutex(&GetDecryptedDriverConfig()->lock);
}
VOID
ReleaseDriverConfigLock()
{
PAGED_CODE();
ImpKeReleaseGuardedMutex(&GetDecryptedDriverConfig()->lock);
}
PUINT64
GetApcContextArray()
{
PAGED_CODE();
return (PUINT64)GetDecryptedDriverConfig()->apc_contexts;
}
BOOLEAN
IsDriverUnloading()
{
PAGED_CODE();
return InterlockedExchange(
&GetDecryptedDriverConfig()->unload_in_progress,
GetDecryptedDriverConfig()->unload_in_progress);
}
PACTIVE_SESSION
GetActiveSession()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->session_information;
}
LPCSTR
GetDriverName()
{
PAGED_CODE();
return GetDecryptedDriverConfig()->ansi_driver_name.Buffer;
}
PDEVICE_OBJECT
GetDriverDeviceObject()
{
PAGED_CODE();
return GetDecryptedDriverConfig()->device_object;
}
PDRIVER_OBJECT
GetDriverObject()
{
PAGED_CODE();
return GetDecryptedDriverConfig()->driver_object;
}
PIRP_QUEUE_HEAD
GetIrpQueueHead()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->irp_queue;
}
PSYS_MODULE_VAL_CONTEXT
GetSystemModuleValidationContext()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->sys_val_context;
}
PUNICODE_STRING
GetDriverPath()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->driver_path;
}
PUNICODE_STRING
GetDriverRegistryPath()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->registry_path;
}
PUNICODE_STRING
GetDriverDeviceName()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->device_name;
}
PUNICODE_STRING
GetDriverSymbolicLink()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->device_symbolic_link;
}
PSYSTEM_INFORMATION
GetDriverConfigSystemInformation()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->system_information;
}
PRB_TREE
GetThreadTree()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->thread_tree;
}
PDRIVER_LIST_HEAD
GetDriverList()
{
PAGED_CODE();
return &GetDecryptedDriverConfig()->driver_list;
}
/*
* The question is, What happens if we attempt to register our callbacks after
* we unregister them but before we free the pool? Hm.. No Good.
*
* Okay to solve this well acquire the driver lock aswell, we could also just
* store the structure in the .data section but i ceebs atm.
*
* This definitely doesn't seem optimal, but it works ...
*/
STATIC
VOID
DrvUnloadUnregisterObCallbacks()
{
PAGED_CODE();
UnregisterProcessObCallbacks();
}
STATIC
VOID
DrvUnloadFreeConfigStrings()
{
PAGED_CODE();
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
if (cfg->unicode_driver_name.Buffer)
ImpExFreePoolWithTag(cfg->unicode_driver_name.Buffer, POOL_TAG_STRINGS);
if (cfg->driver_path.Buffer)
ImpExFreePoolWithTag(cfg->driver_path.Buffer, POOL_TAG_STRINGS);
if (cfg->ansi_driver_name.Buffer)
ImpRtlFreeAnsiString(&cfg->ansi_driver_name);
}
STATIC
VOID
DrvUnloadDeleteSymbolicLink()
{
if (GetDecryptedDriverConfig()->device_symbolic_link)
ImpIoDeleteSymbolicLink(
GetDecryptedDriverConfig()->device_symbolic_link);
}
STATIC
VOID
DrvUnloadFreeThreadList()
{
PAGED_CODE();
CleanupThreadListOnDriverUnload();
}
STATIC
VOID
DrvUnloadFreeDriverList()
{
PAGED_CODE();
CleanupDriverListOnDriverUnload();
}
STATIC
VOID
DrvUnloadFreeTimerObject()
{
PAGED_CODE();
CleanupDriverTimerObjects(
&GetDecryptedDriverConfig()->integrity_check_timer);
}
STATIC
VOID
DrvUnloadFreeProcessList()
{
PAGED_CODE();
CleanupProcessHashmap();
}
STATIC
VOID
DrvUnloadFreeModuleValidationContext()
{
PAGED_CODE();
CleanupValidationContextOnUnload(
&GetDecryptedDriverConfig()->sys_val_context);
}
STATIC
VOID
CloseHashingAlgorithmProvider()
{
BCRYPT_ALG_HANDLE* handle = GetCryptHandle_Sha256();
BCryptCloseAlgorithmProvider(*handle, 0);
}
STATIC
VOID
DriverUnload(_In_ PDRIVER_OBJECT DriverObject)
{
DEBUG_VERBOSE("Unloading...");
InterlockedExchange(&GetDecryptedDriverConfig()->unload_in_progress, TRUE);
while (DrvUnloadFreeAllApcContextStructures() == FALSE)
YieldProcessor();
DrvUnloadFreeTimerObject();
DrvUnloadFreeModuleValidationContext();
DrvUnloadUnregisterObCallbacks();
UnregisterThreadCreateNotifyRoutine();
UnregisterProcessCreateNotifyRoutine();
UnregisterImageLoadNotifyRoutine();
DrvUnloadFreeThreadList();
DrvUnloadFreeProcessList();
DrvUnloadFreeDriverList();
CryptCloseProvider();
CloseHashingAlgorithmProvider();
DrvUnloadFreeConfigStrings();
DrvUnloadDeleteSymbolicLink();
DecryptDeviceExtensionPointers(DriverObject->DeviceObject);
ImpIoDeleteDevice(DriverObject->DeviceObject);
DEBUG_INFO("Driver successfully unloaded.");
}
STATIC
NTSTATUS
DrvLoadEnableNotifyRoutines()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
DEBUG_VERBOSE("Enabling driver wide notify routines.");
status = PsSetLoadImageNotifyRoutine(ImageLoadNotifyRoutineCallback);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"PsSetLoadImageNotifyRoutine failed with status %x",
status);
return status;
}
status = ImpPsSetCreateThreadNotifyRoutine(ThreadCreateNotifyRoutine);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"PsSetCreateThreadNotifyRoutine failed with status %x",
status);
PsRemoveLoadImageNotifyRoutine(ImageLoadNotifyRoutineCallback);
return status;
}
status =
ImpPsSetCreateProcessNotifyRoutine(ProcessCreateNotifyRoutine, FALSE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"PsSetCreateProcessNotifyRoutine failed with status %x",
status);
ImpPsRemoveCreateThreadNotifyRoutine(ThreadCreateNotifyRoutine);
PsRemoveLoadImageNotifyRoutine(ImageLoadNotifyRoutineCallback);
return status;
}
DEBUG_VERBOSE("Successfully enabled driver wide notify routines.");
return status;
}
STATIC
NTSTATUS
DrvLoadSetupDriverLists()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
status = InitialiseDriverList();
if (!NT_SUCCESS(status)) {
UnregisterProcessCreateNotifyRoutine();
UnregisterThreadCreateNotifyRoutine();
UnregisterImageLoadNotifyRoutine();
DEBUG_ERROR("InitialiseDriverList failed with status %x", status);
return status;
}
status = InitialiseThreadList();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("InitialiseThreadList failed with status %x", status);
UnregisterProcessCreateNotifyRoutine();
UnregisterThreadCreateNotifyRoutine();
UnregisterImageLoadNotifyRoutine();
CleanupDriverListOnDriverUnload();
return status;
}
status = InitialiseProcessHashmap();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("InitialiseProcessList failed with status %x", status);
UnregisterProcessCreateNotifyRoutine();
UnregisterThreadCreateNotifyRoutine();
UnregisterImageLoadNotifyRoutine();
CleanupDriverListOnDriverUnload();
CleanupThreadListOnDriverUnload();
return status;
}
return status;
}
/*
* Regular routines
*/
STATIC
NTSTATUS
RegistryPathQueryCallbackRoutine(
IN PWSTR ValueName,
IN ULONG ValueType,
IN PVOID ValueData,
IN ULONG ValueLength,
IN PVOID Context,
IN PVOID EntryContext)
{
PAGED_CODE();
UNICODE_STRING value_name = {0};
UNICODE_STRING image_path = RTL_CONSTANT_STRING(L"ImagePath");
UNICODE_STRING display_name = RTL_CONSTANT_STRING(L"DisplayName");
UNICODE_STRING value = {0};
PVOID temp_buffer = NULL;
ImpRtlInitUnicodeString(&value_name, ValueName);
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
if (ImpRtlCompareUnicodeString(&value_name, &image_path, FALSE) == FALSE) {
temp_buffer =
ImpExAllocatePool2(POOL_FLAG_PAGED, ValueLength, POOL_TAG_STRINGS);
if (!temp_buffer)
return STATUS_MEMORY_NOT_ALLOCATED;
IntCopyMemory(temp_buffer, ValueData, ValueLength);
cfg->driver_path.Buffer = (PWCH)temp_buffer;
cfg->driver_path.Length = ValueLength;
cfg->driver_path.MaximumLength = ValueLength;
}
if (ImpRtlCompareUnicodeString(&value_name, &display_name, FALSE) ==
FALSE) {
temp_buffer = ImpExAllocatePool2(
POOL_FLAG_PAGED,
ValueLength + 20,
POOL_TAG_STRINGS);
if (!temp_buffer)
return STATUS_MEMORY_NOT_ALLOCATED;
IntCopyMemory(temp_buffer, ValueData, ValueLength);
IntWideStringCopy(
(PWCH)((UINT64)temp_buffer + ValueLength - 2),
L".sys");
cfg->unicode_driver_name.Buffer = (PWCH)temp_buffer;
cfg->unicode_driver_name.Length = ValueLength + 20;
cfg->unicode_driver_name.MaximumLength = ValueLength + 20;
}
return STATUS_SUCCESS;
}
/*
* Values returned from CPUID that are equval to the vendor string
*/
#define CPUID_AUTHENTIC_AMD_EBX 0x68747541
#define CPUID_AUTHENTIC_AMD_EDX 0x69746e65
#define CPUID_AUTHENTIC_AMD_ECX 0x444d4163
#define CPUID_GENUINE_INTEL_EBX 0x756e6547
#define CPUID_GENUINE_INTEL_EDX 0x49656e69
#define CPUID_GENUINE_INTEL_ECX 0x6c65746e
#define EBX_REGISTER 1
#define ECX_REGISTER 2
#define EDX_REGISTER 3
STATIC
NTSTATUS
GetSystemProcessorType()
{
UINT32 cpuid[4] = {0};
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
__cpuid(cpuid, 0);
DEBUG_VERBOSE(
"Cpuid: EBX: %lx, ECX: %lx, EDX: %lx",
cpuid[1],
cpuid[2],
cpuid[3]);
if (cpuid[EBX_REGISTER] == CPUID_AUTHENTIC_AMD_EBX &&
cpuid[ECX_REGISTER] == CPUID_AUTHENTIC_AMD_ECX &&
cpuid[EDX_REGISTER] == CPUID_AUTHENTIC_AMD_EDX) {
cfg->system_information.processor = AuthenticAmd;
return STATUS_SUCCESS;
}
else if (
cpuid[EBX_REGISTER] == CPUID_GENUINE_INTEL_EBX &&
cpuid[ECX_REGISTER] == CPUID_GENUINE_INTEL_ECX &&
cpuid[EDX_REGISTER] == CPUID_GENUINE_INTEL_EDX) {
cfg->system_information.processor = GenuineIntel;
return STATUS_SUCCESS;
}
else {
cfg->system_information.processor = Unknown;
return STATUS_UNSUCCESSFUL;
}
}
/*
* Even though we are technically not meant to be operating when running under a
* virtualized system, it is still useful to test the attainment of system
* information under a virtualized system for testing purposes.
*/
STATIC
NTSTATUS
ParseSmbiosForGivenSystemEnvironment()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
status = ParseSMBIOSTable(
&cfg->system_information.vendor,
VENDOR_STRING_MAX_LENGTH,
SmbiosInformation,
SMBIOS_VENDOR_STRING_SUB_INDEX);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ParseSMBIOSTable failed with status %x", status);
return status;
}
if (IntFindSubstring(&cfg->system_information.vendor, "VMware, Inc"))
cfg->system_information.environment = Vmware;
else if (IntFindSubstring(&cfg->system_information.vendor, "innotek GmbH"))
cfg->system_information.environment = VirtualBox;
else
cfg->system_information.environment = NativeWindows;
switch (cfg->system_information.environment) {
case NativeWindows: {
status = ParseSMBIOSTable(
&cfg->system_information.motherboard_serial,
MOTHERBOARD_SERIAL_CODE_LENGTH,
VendorSpecificInformation,
SMBIOS_NATIVE_SERIAL_NUMBER_SUB_INDEX);
break;
}
case Vmware: {
status = ParseSMBIOSTable(
&cfg->system_information.motherboard_serial,
MOTHERBOARD_SERIAL_CODE_LENGTH,
SystemInformation,
SMBIOS_VMWARE_SERIAL_NUMBER_SUB_INDEX);
break;
}
case VirtualBox:
default:
DEBUG_WARNING("Environment type not supported.");
return STATUS_NOT_SUPPORTED;
}
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ParseSMBIOSTable 2 failed with status %x", status);
return status;
}
return status;
}
STATIC
NTSTATUS
DrvLoadGatherSystemEnvironmentSettings()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
if (APERFMsrTimingCheck())
cfg->system_information.virtualised_environment = TRUE;
status = GetOsVersionInformation(&cfg->system_information.os_information);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetOsVersionInformation failed with status %x", status);
return status;
}
status = GetSystemProcessorType();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemProcessorType failed with status %x", status);
return status;
}
status = ParseSmbiosForGivenSystemEnvironment();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ParseSmbiosForGivenSystemEnvironment failed with status %x",
status);
return status;
}
status = GetHardDiskDriveSerialNumber(
&cfg->system_information.drive_0_serial,
sizeof(cfg->system_information.drive_0_serial));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"GetHardDiskDriverSerialNumber failed with status %x",
status);
return status;
}
DEBUG_VERBOSE(
"OS Major Version: %lx, Minor Version: %lx, Build Number: %lx",
cfg->system_information.os_information.dwMajorVersion,
cfg->system_information.os_information.dwMinorVersion,
cfg->system_information.os_information.dwBuildNumber);
DEBUG_VERBOSE("Environment type: %lx", cfg->system_information.environment);
DEBUG_VERBOSE("Processor type: %lx", cfg->system_information.processor);
DEBUG_VERBOSE(
"Motherboard serial: %s",
cfg->system_information.motherboard_serial);
DEBUG_VERBOSE("Drive 0 serial: %s", cfg->system_information.drive_0_serial);
return status;
}
STATIC
NTSTATUS
DrvLoadRetrieveDriverNameFromRegistry(_In_ PUNICODE_STRING RegistryPath)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
RTL_QUERY_REGISTRY_TABLE query[3] = {0};
query[0].Flags = RTL_QUERY_REGISTRY_NOEXPAND;
query[0].Name = L"ImagePath";
query[0].DefaultType = REG_MULTI_SZ;
query[0].DefaultLength = 0;
query[0].DefaultData = NULL;
query[0].EntryContext = NULL;
query[0].QueryRoutine = RegistryPathQueryCallbackRoutine;
query[1].Flags = RTL_QUERY_REGISTRY_NOEXPAND;
query[1].Name = L"DisplayName";
query[1].DefaultType = REG_SZ;
query[1].DefaultLength = 0;
query[1].DefaultData = NULL;
query[1].EntryContext = NULL;
query[1].QueryRoutine = RegistryPathQueryCallbackRoutine;
status = RtlxQueryRegistryValues(
RTL_REGISTRY_ABSOLUTE,
RegistryPath->Buffer,
&query,
NULL,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlxQueryRegistryValues failed with status %x", status);
return status;
}
/*
* The registry path contains the name of the driver i.e Driver, but
* does not contain the .sys extension. Lets add it to our stored driver
* name since we need the .sys extension when querying the system
* modules for our driver.
*/
status = ImpRtlUnicodeStringToAnsiString(
&cfg->ansi_driver_name,
&cfg->unicode_driver_name,
TRUE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"RtlUnicodeStringToAnsiString failed with status %x",
status);
}
return status;
}
STATIC
NTSTATUS
DrvLoadInitialiseDriverConfig(
_In_ PDRIVER_OBJECT DriverObject, _In_ PUNICODE_STRING RegistryPath)
{
PAGED_CODE();
DEBUG_VERBOSE("Initialising driver configuration");
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_CONFIG cfg = GetDecryptedDriverConfig();
ImpKeInitializeGuardedMutex(&cfg->lock);
IrpQueueInitialise();
SessionInitialiseCallbackConfiguration();
cfg->unload_in_progress = FALSE;
cfg->system_information.virtualised_environment = FALSE;
cfg->sys_val_context.active = FALSE;
status = DrvLoadRetrieveDriverNameFromRegistry(RegistryPath);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"DrvLoadRetrieveDriverNameFromRegistry failed with status %x",
status);
return status;
}
/* when this function failed, we bugcheck in freeconfigstrings todo: fix */
status = DrvLoadGatherSystemEnvironmentSettings();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"GatherSystemEnvironmentSettings failed with status %x",
status);
return status;
}
status = InitialiseTimerObject(&cfg->integrity_check_timer);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("InitialiseTimerObject failed with status %x", status);
return status;
}
status = IrpQueueInitialise();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("IrpQueueInitialise failed with status %x", status);
return status;
}
DEBUG_VERBOSE("driver name: %s", cfg->ansi_driver_name.Buffer);
return status;
}
STATIC
NTSTATUS
InitialiseHashingAlgorithmProvider()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
BCRYPT_ALG_HANDLE* handle = GetCryptHandle_Sha256();
status = BCryptOpenAlgorithmProvider(
handle,
BCRYPT_SHA256_ALGORITHM,
NULL,
BCRYPT_PROV_DISPATCH);
if (!NT_SUCCESS(status))
DEBUG_ERROR("BCryptOpenAlgorithmProvider: %x", status);
return status;
}
NTSTATUS
DriverEntry(_In_ PDRIVER_OBJECT DriverObject, _In_ PUNICODE_STRING RegistryPath)
{
BOOLEAN flag = FALSE;
NTSTATUS status = STATUS_UNSUCCESSFUL;
DriverObject->MajorFunction[IRP_MJ_CREATE] = DeviceCreate;
DriverObject->MajorFunction[IRP_MJ_CLOSE] = DeviceClose;
DriverObject->MajorFunction[IRP_MJ_DEVICE_CONTROL] = DeviceControl;
DriverObject->DriverUnload = DriverUnload;
g_DeviceExtensionKey = CryptXorKeyGenerate_uint64();
status = ImpResolveDynamicImports(DriverObject);
if (!NT_SUCCESS(status))
return STATUS_FAILED_DRIVER_ENTRY;
DEBUG_VERBOSE("Beginning driver entry routine...");
status = ImpIoCreateDevice(
DriverObject,
sizeof(DRIVER_CONFIG),
&g_DeviceName,
FILE_DEVICE_UNKNOWN,
FILE_DEVICE_SECURE_OPEN,
FALSE,
&DriverObject->DeviceObject);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("IoCreateDevice failed with status %x", status);
return status;
}
g_DriverConfig = DriverObject->DeviceObject->DeviceExtension;
g_DriverConfig->device_object = DriverObject->DeviceObject;
g_DriverConfig->driver_object = DriverObject;
g_DriverConfig->device_name = &g_DeviceName;
g_DriverConfig->device_symbolic_link = &g_DeviceSymbolicLink;
EncryptDeviceExtensionPointers(DriverObject->DeviceObject);
status = DrvLoadInitialiseDriverConfig(DriverObject, RegistryPath);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"InitialiseDriverConfigOnDriverEntry failed with status %x",
status);
DrvUnloadFreeConfigStrings();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
status = SessionInitialiseStructure();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("SessionInitialiseStructure failed with status %x", status);
DrvUnloadFreeConfigStrings();
DrvUnloadFreeTimerObject();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
status = IoCreateSymbolicLink(
GetDecryptedDriverConfig()->device_symbolic_link,
GetDecryptedDriverConfig()->device_name);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("IoCreateSymbolicLink failed with status %x", status);
DrvUnloadFreeConfigStrings();
DrvUnloadFreeTimerObject();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
status = DrvLoadEnableNotifyRoutines();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("EnablenotifyRoutines failed with status %x", status);
DrvUnloadFreeConfigStrings();
DrvUnloadFreeTimerObject();
DrvUnloadDeleteSymbolicLink();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
status = InitialiseHashingAlgorithmProvider();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"InitialiseHashingAlgorithmProvider failed with status %x",
status);
DrvUnloadFreeConfigStrings();
DrvUnloadFreeTimerObject();
DrvUnloadDeleteSymbolicLink();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
status = DrvLoadSetupDriverLists();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("DrvLoadSetupDriverLists failed with status %x", status);
CloseHashingAlgorithmProvider();
DrvUnloadFreeConfigStrings();
DrvUnloadFreeTimerObject();
DrvUnloadDeleteSymbolicLink();
ImpIoDeleteDevice(GetDecryptedDriverConfig()->device_object);
return status;
}
SetDriverLoadedFlag();
TpmExtractEndorsementKey();
// PoolScanForManualMappedDrivers();
DEBUG_INFO("Driver Entry Complete.");
return STATUS_SUCCESS;
}
================================================
FILE: driver/driver.h
================================================
#ifndef DRIVER_H
#define DRIVER_H
#include "common.h"
#include
#include "modules.h"
#include "integrity.h"
#include "callbacks.h"
#include "containers/map.h"
#include "containers/tree.h"
BCRYPT_ALG_HANDLE*
GetCryptHandle_AES();
BCRYPT_ALG_HANDLE*
GetCryptHandle_Sha256();
NTSTATUS
QueryActiveApcContextsForCompletion();
LPCSTR
GetDriverName();
PDEVICE_OBJECT
GetDriverDeviceObject();
PDRIVER_OBJECT
GetDriverObject();
PIRP_QUEUE_HEAD
GetIrpQueueHead();
PSYS_MODULE_VAL_CONTEXT
GetSystemModuleValidationContext();
PUNICODE_STRING
GetDriverPath();
PUNICODE_STRING
GetDriverRegistryPath();
PUNICODE_STRING
GetDriverDeviceName();
PUNICODE_STRING
GetDriverSymbolicLink();
PSYSTEM_INFORMATION
GetDriverConfigSystemInformation();
PRB_TREE
GetThreadTree();
PDRIVER_LIST_HEAD
GetDriverList();
PUINT64
GetApcContextArray();
VOID
AcquireDriverConfigLock();
VOID
ReleaseDriverConfigLock();
BOOLEAN
IsDriverUnloading();
PACTIVE_SESSION
GetActiveSession();
PSHARED_MAPPING
GetSharedMappingConfig();
VOID
UnsetNmiInProgressFlag();
BOOLEAN
IsNmiInProgress();
BOOLEAN
HasDriverLoaded();
PRTL_HASHMAP
GetProcessHashmap();
__m256i*
GetDriverImportsKey();
PUINT64
GetDriverDeviceExtensionKey();
#endif
================================================
FILE: driver/driver.inf
================================================
;
; driver.inf
;
[Version]
Signature="$WINDOWS NT$"
Class=System ; TODO: specify appropriate Class
ClassGuid={4d36e97d-e325-11ce-bfc1-08002be10318} ; TODO: specify appropriate ClassGuid
Provider=%ManufacturerName%
CatalogFile=driver.cat
DriverVer= ; TODO: set DriverVer in stampinf property pages
PnpLockdown=1
[DestinationDirs]
DefaultDestDir = 12
driver_Device_CoInstaller_CopyFiles = 11
[SourceDisksNames]
1 = %DiskName%,,,""
[SourceDisksFiles]
driver.sys = 1,,
WdfCoInstaller$KMDFCOINSTALLERVERSION$.dll=1 ; make sure the number matches with SourceDisksNames
;*****************************************
; Install Section
;*****************************************
[Manufacturer]
%ManufacturerName%=Standard,NT$ARCH$
[Standard.NT$ARCH$]
%driver.DeviceDesc%=driver_Device, Root\driver ; TODO: edit hw-id
[driver_Device.NT]
CopyFiles=Drivers_Dir
[Drivers_Dir]
driver.sys
;-------------- Service installation
[driver_Device.NT.Services]
AddService = driver,%SPSVCINST_ASSOCSERVICE%, driver_Service_Inst
; -------------- driver driver install sections
[driver_Service_Inst]
DisplayName = %driver.SVCDESC%
ServiceType = 1 ; SERVICE_KERNEL_DRIVER
StartType = 3 ; SERVICE_DEMAND_START
ErrorControl = 1 ; SERVICE_ERROR_NORMAL
ServiceBinary = %12%\driver.sys
;
;--- driver_Device Coinstaller installation ------
;
[driver_Device.NT.CoInstallers]
AddReg=driver_Device_CoInstaller_AddReg
CopyFiles=driver_Device_CoInstaller_CopyFiles
[driver_Device_CoInstaller_AddReg]
HKR,,CoInstallers32,0x00010000, "WdfCoInstaller$KMDFCOINSTALLERVERSION$.dll,WdfCoInstaller"
[driver_Device_CoInstaller_CopyFiles]
WdfCoInstaller$KMDFCOINSTALLERVERSION$.dll
[driver_Device.NT.Wdf]
KmdfService = driver, driver_wdfsect
[driver_wdfsect]
KmdfLibraryVersion = $KMDFVERSION$
[Strings]
SPSVCINST_ASSOCSERVICE= 0x00000002
ManufacturerName="" ;TODO: Replace with your manufacturer name
DiskName = "driver Installation Disk"
driver.DeviceDesc = "driver Device"
driver.SVCDESC = "driver Service"
================================================
FILE: driver/driver.vcxproj
================================================
Debugx64Release - Win10ARM64Release - Win10x64Release - Win11ARM64Release - Win11x64Releasex64DebugARM64ReleaseARM64{0AE83EC6-DDEA-4EDE-B1B2-1B2AB1E8BB54}{1bc93793-694f-48fe-9372-81e2b05556fd}v4.512.0Debugx64driver$(LatestTargetPlatformVersion)Windows10trueWindowsKernelModeDriver10.0DriverKMDFUniversalfalseWindows10falseWindowsKernelModeDriver10.0DriverKMDFUniversalfalseWindows10falseWindowsKernelModeDriver10.0DriverKMDFUniversalfalseWindows10falseWindowsKernelModeDriver10.0NTDDI_WIN10_VBDriverKMDFUniversalfalse
<_NT_TARGET_VERSION>0xA00000C
Windows10trueWindowsKernelModeDriver10.0DriverKMDFUniversalWindows10falseWindowsKernelModeDriver10.0DriverKMDFUniversalWindows10falseWindowsKernelModeDriver10.0DriverKMDFUniversalWindows10falseWindowsKernelModeDriver10.0DriverKMDFUniversalDbgengKernelDebuggertrueDbgengKernelDebuggertruetruefalsefalseDbgengKernelDebuggertruetruefalsefalseDbgengKernelDebuggertruetruefalsefalse$(SolutionDir)$(Platform)\$(Configuration)\DbgengKernelDebuggerDbgengKernelDebuggerDbgengKernelDebuggerDbgengKernelDebuggersha256false/INTEGRITYCHECK %(AdditionalOptions)Cng.lib;%(AdditionalDependencies)sha256falsetrue/INTEGRITYCHECK %(AdditionalOptions)Cng.lib;%(AdditionalDependencies)sha256falsetrue/INTEGRITYCHECK %(AdditionalOptions)Cng.lib;%(AdditionalDependencies)sha256falsetrue/INTEGRITYCHECK %(AdditionalOptions)Cng.lib;netio.lib;%(AdditionalDependencies)sha256sha256sha256sha256
================================================
FILE: driver/driver.vcxproj.filters
================================================
{4FC737F1-C7A5-4376-A066-2A32D752A2FF}cpp;c;cc;cxx;def;odl;idl;hpj;bat;asm;asmx{93995380-89BD-4b04-88EB-625FBE52EBFB}h;hpp;hxx;hm;inl;inc;xsd{67DA6AB6-F800-4c08-8B7A-83BB121AAD01}rc;ico;cur;bmp;dlg;rc2;rct;bin;rgs;gif;jpg;jpeg;jpe;resx;tiff;tif;png;wav;mfcribbon-ms{8E41214B-6785-4CFE-B992-037D68949A14}inf;inv;inx;mof;mc;Driver FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesSource FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesHeader FilesSource Files
================================================
FILE: driver/hv.c
================================================
#include "hv.h"
#include "common.h"
#include "imports.h"
#include "io.h"
#include "lib/stdlib.h"
#include
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, PerformVirtualizationDetection)
#endif
#define TOTAL_ITERATION_COUNT 20
/*
* TODO: Perform the test in a loop and average the delta out, then compare it
* to an instruction such as FYL2XP1 (source: secret.club) which has an average
* execution time slightly higher then the CPUID instruction then compare the
* two. If the average time for the CPUID instruction is higher then the average
* time for the FYL2XP1 instruction it is a dead giveaway we are running on a
* virtualized system.
*
* reference: https://secret.club/2020/01/12/battleye-hypervisor-detection.html
*/
BOOLEAN
APERFMsrTimingCheck()
{
KAFFINITY new_affinity = {0};
KAFFINITY old_affinity = {0};
UINT64 old_irql = 0;
UINT64 aperf_delta = 0;
UINT64 aperf_before = 0;
UINT64 aperf_after = 0;
INT cpuid_result[4];
/*
* First thing we do is we lock the current thread to the logical
* processor its executing on.
*/
new_affinity = (KAFFINITY)(1ull << KeGetCurrentProcessorNumber());
old_affinity = ImpKeSetSystemAffinityThreadEx(new_affinity);
/*
* Once we've locked our thread to the current core, we save the old
* irql and raise to HIGH_LEVEL to ensure the chance our thread is
* preempted by a thread with a higher IRQL is extremely low.
*/
old_irql = __readcr8();
__writecr8(HIGH_LEVEL);
/*
* Then we also disable interrupts, once again making sure our thread
* is not preempted.
*/
_disable();
/*
* Once our thread is ready for the test, we read the APERF from the
* MSR register and store it. We then execute a CPUID instruction
* which we don't really care about and immediately after read the APERF
* counter once again and store it in a seperate variable.
*/
aperf_before = __readmsr(IA32_APERF_MSR) << 32;
__cpuid(cpuid_result, 1);
aperf_after = __readmsr(IA32_APERF_MSR) << 32;
/*
* Once we have performed our test, we want to make sure we are not
* hogging the cpu time from other threads, so we reverse the initial
* preparation process. i.e we first enable interrupts, lower our irql
* to the threads previous irql before it was raised and then restore
* the threads affinity back to its original affinity.
*/
_enable();
__writecr8(old_irql);
ImpKeRevertToUserAffinityThreadEx(old_affinity);
/*
* Now the only thing left to do is calculate the change. Now, on some
* VMs such as VMWARE the aperf value will be 0, meaning the change will
* be 0. This is a dead giveaway we are executing in a VM.
*/
aperf_delta = aperf_after - aperf_before;
return aperf_delta == 0 ? TRUE : FALSE;
}
NTSTATUS
PerformVirtualizationDetection(_Inout_ PIRP Irp)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
HYPERVISOR_DETECTION_REPORT report = {0};
status = ValidateIrpOutputBuffer(Irp, sizeof(HYPERVISOR_DETECTION_REPORT));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpOutputBuffer failed with status %x", status);
return status;
}
report.aperf_msr_timing_check = APERFMsrTimingCheck();
report.invd_emulation_check = TestINVDEmulation();
Irp->IoStatus.Information = sizeof(HYPERVISOR_DETECTION_REPORT);
IntCopyMemory(
Irp->AssociatedIrp.SystemBuffer,
&report,
sizeof(HYPERVISOR_DETECTION_REPORT));
return STATUS_SUCCESS;
}
================================================
FILE: driver/hv.h
================================================
#ifndef HV_H
#define HV_H
#include
#include "common.h"
NTSTATUS
PerformVirtualizationDetection(_Inout_ PIRP Irp);
BOOLEAN
APERFMsrTimingCheck();
extern INT
TestINVDEmulation();
#endif
================================================
FILE: driver/hw.c
================================================
#include "hw.h"
#include "crypt.h"
#include "imports.h"
#include "lib/stdlib.h"
#include "modules.h"
#define PCI_VENDOR_ID_OFFSET 0x00
#define PCI_DEVICE_ID_OFFSET 0x02
#define FLAGGED_DEVICE_ID_COUNT 2
USHORT FLAGGED_DEVICE_IDS[FLAGGED_DEVICE_ID_COUNT] = {
0x0666, // default PCIe Squirrel DeviceID (used by PCI Leech)
0xffff};
typedef NTSTATUS (*PCI_DEVICE_CALLBACK)(
_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context);
/*
* Every PCI device has a set of registers commonly referred to as the PCI
* configuration space. In modern PCI-e devices an extended configuration space
* was implemented. These configuration spaces are mapped into main memory and
* this allows us to read/write to the registers.
*
* The configuration space consists of a standard header, containing information
* such as the DeviceID, VendorID, Status and so on. Below is the header schema
* including offsets.
*
* | Offset 0x00: Header Type
* | Offset 0x01: Multi-Function Device Indicator
* | Offset 0x02: Device ID (Low Byte)
* | Offset 0x03: Device ID (High Byte)
* | Offset 0x04: Status Register (16 bits)
* | Offset 0x06: Command Register (16 bits)
* | Offset 0x08: Class Code
* | Offset 0x09: Subclass Code
* | Offset 0x0A: Prog IF (Programming Interface)
* | Offset 0x0B: Revision ID
* | Offset 0x0C: BIST (Built-in Self-Test)
* | Offset 0x0D: Header Type (Secondary)
* | Offset 0x0E: Latency Timer
* | Offset 0x0F: Cache Line Size
* | Offset 0x10: Base Address Register 0 (BAR0) - 32 bits
* | Offset 0x14: Base Address Register 1 (BAR1) - 32 bits
* | Offset 0x18: Base Address Register 2 (BAR2) - 32 bits
* | Offset 0x1C: Base Address Register 3 (BAR3) - 32 bits
* | Offset 0x20: Base Address Register 4 (BAR4) - 32 bits
* | Offset 0x24: Base Address Register 5 (BAR5) - 32 bits
* | Offset 0x28: Cardbus CIS Pointer (for Cardbus bridges)
* | Offset 0x2C: Subsystem Vendor ID
* | Offset 0x2E: Subsystem ID
* | Offset 0x30: Expansion ROM Base Address
* | Offset 0x34: Reserved
* | Offset 0x38: Reserved
* | Offset 0x3C: Max_Lat (Maximum Latency)
* | Offset 0x3D: Min_Gnt (Minimum Grant)
* | Offset 0x3E: Interrupt Pin
* | Offset 0x3F: Interrupt Line
*
* We can use this to then query important information from PCI devices within
* the device tree. To keep up with modern windows kernel programming, we can
* make use of the IRP_MN_READ_CONFIG code, which as the name suggests, reads
* from a PCI devices configuration space.
*/
STATIC
NTSTATUS
QueryPciDeviceConfigurationSpace(
_In_ PDEVICE_OBJECT DeviceObject,
_In_ UINT32 Offset,
_Out_opt_ PVOID Buffer,
_In_ UINT32 BufferLength)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
KEVENT event = {0};
IO_STATUS_BLOCK io = {0};
PIRP irp = NULL;
PIO_STACK_LOCATION packet = NULL;
if (BufferLength == 0)
return STATUS_BUFFER_TOO_SMALL;
KeInitializeEvent(&event, NotificationEvent, FALSE);
/*
* we dont need to free this IRP as the IO manager will free it when the
* request is completed
*/
irp = IoBuildSynchronousFsdRequest(
IRP_MJ_PNP,
DeviceObject,
NULL,
0,
NULL,
&event,
&io);
if (!irp) {
DEBUG_ERROR("IoBuildSynchronousFsdRequest failed with no status.");
return STATUS_INSUFFICIENT_RESOURCES;
}
packet = IoGetNextIrpStackLocation(irp);
packet->MinorFunction = IRP_MN_READ_CONFIG;
packet->Parameters.ReadWriteConfig.WhichSpace = PCI_WHICHSPACE_CONFIG;
packet->Parameters.ReadWriteConfig.Offset = Offset;
packet->Parameters.ReadWriteConfig.Buffer = Buffer;
packet->Parameters.ReadWriteConfig.Length = BufferLength;
status = IoCallDriver(DeviceObject, irp);
if (status == STATUS_PENDING) {
KeWaitForSingleObject(&event, Executive, KernelMode, FALSE, NULL);
status = io.Status;
}
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"Failed to read configuration space with status %x",
status);
return status;
}
/*
* NOTE: Caller is responsible for freeing the array.
*/
STATIC
NTSTATUS
EnumerateDriverObjectDeviceObjects(
_In_ PDRIVER_OBJECT DriverObject,
_Out_ PDEVICE_OBJECT** DeviceObjectArray,
_Out_ PUINT32 ArrayEntries)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 object_count = 0;
PDEVICE_OBJECT* buffer = NULL;
UINT32 buffer_size = 0;
*DeviceObjectArray = NULL;
*ArrayEntries = 0;
status = IoEnumerateDeviceObjectList(DriverObject, NULL, 0, &object_count);
if (status != STATUS_BUFFER_TOO_SMALL) {
DEBUG_ERROR(
"IoEnumerateDeviceObjectList failed with status %x",
status);
return status;
}
buffer_size = object_count * sizeof(UINT64);
buffer = ExAllocatePool2(POOL_FLAG_NON_PAGED, buffer_size, POOL_TAG_HW);
if (!buffer)
return STATUS_INSUFFICIENT_RESOURCES;
status = IoEnumerateDeviceObjectList(
DriverObject,
buffer,
buffer_size,
&object_count);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"IoEnumerateDeviceObjectList failed with status %x",
status);
ExFreePoolWithTag(buffer, POOL_TAG_HW);
return status;
}
DEBUG_VERBOSE(
"EnumerateDriverObjectDeviceObjects: Object Count: %lx",
object_count);
*DeviceObjectArray = buffer;
*ArrayEntries = object_count;
return status;
}
/*
* While this isnt a perfect check to determine whether a DEVICE_OBJECT is
* indeed a PDO or FDO, this is Peters preferred method... hence it is now my
* preferred method... :smiling_imp:
*/
STATIC
BOOLEAN
IsDeviceObjectValidPdo(_In_ PDEVICE_OBJECT DeviceObject)
{
return DeviceObject->Flags & DO_BUS_ENUMERATED_DEVICE ? TRUE : FALSE;
}
/*
* Windows splits DEVICE_OBJECTS up into 2 categories:
*
* Physical Device Object (PDO)
* Functional Device Object (FDO)
*
* A PDO represents each device that is connected to a physical bus. Each PDO
* has an associated DEVICE_NODE. An FDO represents the functionality of the
* device. Its how the system interacts with the device objects.
*
* More information can be found here:
* https://learn.microsoft.com/en-gb/windows-hardware/drivers/gettingstarted/device-nodes-and-device-stacks
*
* A device stack can have multiple PDO's, but can only have one FDO. This means
* to access each PCI device on the system, we can enumerate all device objects
* given the PCI FDO which is called pci.sys.
*/
NTSTATUS
EnumeratePciDeviceObjects(
_In_ PCI_DEVICE_CALLBACK CallbackRoutine, _In_opt_ PVOID Context)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UNICODE_STRING pci = RTL_CONSTANT_STRING(L"\\Driver\\pci");
PDRIVER_OBJECT pci_driver_object = NULL;
PDEVICE_OBJECT* pci_device_objects = NULL;
PDEVICE_OBJECT current_device = NULL;
UINT32 pci_device_objects_count = 0;
status = GetDriverObjectByDriverName(&pci, &pci_driver_object);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"GetDriverObjectByDriverName failed with status %x",
status);
return status;
}
status = EnumerateDriverObjectDeviceObjects(
pci_driver_object,
&pci_device_objects,
&pci_device_objects_count);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"EnumerateDriverObjectDeviceObjects failed with status %x",
status);
return status;
}
for (UINT32 index = 0; index < pci_device_objects_count; index++) {
current_device = pci_device_objects[index];
/* make sure we have a valid PDO */
if (!IsDeviceObjectValidPdo(current_device)) {
ObDereferenceObject(current_device);
continue;
}
status = CallbackRoutine(current_device, Context);
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"EnumeratePciDeviceObjects CallbackRoutine failed with status %x",
status);
ObDereferenceObject(current_device);
}
if (pci_device_objects)
ExFreePoolWithTag(pci_device_objects, POOL_TAG_HW);
return status;
}
BOOLEAN
IsPciConfigurationSpaceFlagged(_In_ PPCI_COMMON_HEADER Configuration)
{
for (UINT32 index = 0; index < FLAGGED_DEVICE_ID_COUNT; index++) {
if (Configuration->DeviceID == FLAGGED_DEVICE_IDS[index])
return TRUE;
}
return FALSE;
}
STATIC
VOID
ReportBlacklistedPcieDevice(
_In_ PDEVICE_OBJECT DeviceObject, _In_ PPCI_COMMON_HEADER Header)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PBLACKLISTED_PCIE_DEVICE_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(
sizeof(BLACKLISTED_PCIE_DEVICE_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_BLACKLISTED_PCIE_DEVICE, 0);
report->device_object = (UINT64)DeviceObject;
report->device_id = Header->DeviceID;
report->vendor_id = Header->VendorID;
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, len);
return;
}
IrpQueueSchedulePacket(report, len);
}
STATIC
NTSTATUS
PciDeviceQueryCallback(_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context)
{
UNREFERENCED_PARAMETER(Context);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PCI_COMMON_HEADER header = {0};
status = QueryPciDeviceConfigurationSpace(
DeviceObject,
PCI_VENDOR_ID_OFFSET,
&header,
sizeof(PCI_COMMON_HEADER));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"QueryPciDeviceConfigurationSpace failed with status %x",
status);
return status;
}
if (IsPciConfigurationSpaceFlagged(&header)) {
DEBUG_VERBOSE(
"Flagged DeviceID found. Device: %llx, DeviceId: %lx",
(UINT64)DeviceObject,
header.DeviceID);
ReportBlacklistedPcieDevice(DeviceObject, &header);
}
return status;
}
NTSTATUS
ValidatePciDevices()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
status = EnumeratePciDeviceObjects(PciDeviceQueryCallback, NULL);
if (!NT_SUCCESS(status))
DEBUG_ERROR("EnumeratePciDeviceObjects failed with status %x", status);
return status;
}
================================================
FILE: driver/hw.h
================================================
#ifndef HW_H
#define HW_H
#include "common.h"
NTSTATUS
ValidatePciDevices();
#endif
================================================
FILE: driver/ia32.h
================================================
/** @file */
#pragma once
typedef unsigned char UINT8;
typedef unsigned short UINT16;
typedef unsigned int UINT32;
typedef unsigned long long UINT64;
#if defined(_MSC_EXTENSIONS)
# pragma warning(push)
# pragma warning(disable : 4201)
#endif
/**
* @defgroup INTEL_MANUAL \
* Intel Manual
*
* @remarks All references are based on Intel(R) 64 and IA-32 architectures software developer's
* manual combined volumes: 1, 2A, 2B, 2C, 2D, 3A, 3B, 3C, 3D, and 4 (June 2021) and Intel(R)
* Virtualization Technology for Directed I/O (April 2021)
* @{
*/
/**
* @defgroup CONTROL_REGISTERS \
* Control registers
*
* Control registers (CR0, CR1, CR2, CR3, and CR4) determine operating mode of the processor and the
* characteristics of the currently executing task. These registers are 32 bits in all 32-bit modes
* and compatibility mode. In 64-bit mode, control registers are expanded to 64 bits. The MOV CRn
* instructions are used to manipulate the register bits. Operand-size prefixes for these
* instructions are ignored. The following is also true:
* - The control registers can be read and loaded (or modified) using the
* move-to-or-from-control-registers forms of the MOV instruction. In protected mode, the MOV
* instructions allow the control registers to be read or loaded (at privilege level 0 only). This
* restriction means that application programs or operating-system procedures (running at privilege
* levels 1, 2, or 3) are prevented from reading or loading the control registers.
* - Bits 63:32 of CR0 and CR4 are reserved and must be written with zeros. Writing a nonzero value
* to any of the upper 32 bits results in a general-protection exception, \#GP(0).
* - All 64 bits of CR2 are writable by software.
* - Bits 51:40 of CR3 are reserved and must be 0.
* - The MOV CRn instructions do not check that addresses written to CR2 and CR3 are within the
* linear-address or physical-address limitations of the implementation.
* - Register CR8 is available in 64-bit mode only. The control registers are summarized below, and
* each architecturally defined control field in these control registers is described individually.
* - CR0 - Contains system control flags that control operating mode and states of the processor.
* - CR1 - Reserved.
* - CR2 - Contains the page-fault linear address (the linear address that caused a page fault).
* - CR3 - Contains the physical address of the base of the paging-structure hierarchy and two flags
* (PCD and PWT). Only the most-significant bits (less the lower 12 bits) of the base address are
* specified; the lower 12 bits of the address are assumed to be 0. The first paging structure must
* thus be aligned to a page (4-KByte) boundary. The PCD and PWT flags control caching of that
* paging structure in the processor's internal data caches (they do not control TLB caching of
* page-directory information). When using the physical address extension, the CR3 register contains
* the base address of the page-directorypointer table. In IA-32e mode, the CR3 register contains
* the base address of the PML4 table.
* - CR4 - Contains a group of flags that enable several architectural extensions, and indicate
* operating system or executive support for specific processor capabilities.
* - CR8 - Provides read and write access to the Task Priority Register (TPR). It specifies the
* priority threshold value that operating systems use to control the priority class of external
* interrupts allowed to interrupt the processor. This register is available only in 64-bit mode.
* However, interrupt filtering continues to apply in compatibility mode.
*
* @see Vol3A[2.5(CONTROL REGISTERS)] (reference)
* @{
*/
typedef union
{
struct
{
/**
* @brief Protection Enable
*
* [Bit 0] Enables protected mode when set; enables real-address mode when clear.
* This flag does not enable paging directly. It only enables segment-level
* protection. To enable paging, both the PE and PG flags must be set.
*
* @see Vol3A[9.9(Mode Switching)]
*/
UINT64 ProtectionEnable : 1;
#define CR0_PROTECTION_ENABLE_BIT 0
#define CR0_PROTECTION_ENABLE_FLAG 0x01
#define CR0_PROTECTION_ENABLE_MASK 0x01
#define CR0_PROTECTION_ENABLE(_) (((_) >> 0) & 0x01)
/**
* @brief Monitor Coprocessor
*
* [Bit 1] Controls the interaction of the WAIT (or FWAIT) instruction with the TS
* flag (bit 3 of CR0). If the MP flag is set, a WAIT instruction generates a
* device-not-available exception (\#NM) if the TS flag is also set. If the MP flag
* is clear, the WAIT instruction ignores the setting of the TS flag.
*/
UINT64 MonitorCoprocessor : 1;
#define CR0_MONITOR_COPROCESSOR_BIT 1
#define CR0_MONITOR_COPROCESSOR_FLAG 0x02
#define CR0_MONITOR_COPROCESSOR_MASK 0x01
#define CR0_MONITOR_COPROCESSOR(_) (((_) >> 1) & 0x01)
/**
* @brief FPU Emulation
*
* [Bit 2] Indicates that the processor does not have an internal or external x87
* FPU when set; indicates an x87 FPU is present when clear. This flag also affects
* the execution of MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions. When the EM flag is
* set, execution of an x87 FPU instruction generates a device-not-available
* exception (\#NM). This flag must be set when the processor does not have an
* internal x87 FPU or is not connected to an external math coprocessor. Setting
* this flag forces all floating-point instructions to be handled by software
* emulation. Also, when the EM flag is set, execution of an MMX instruction causes
* an invalid-opcode exception (\#UD) to be generated. Thus, if an IA-32 or Intel 64
* processor incorporates MMX technology, the EM flag must be set to 0 to enable
* execution of MMX instructions. Similarly for SSE/SSE2/SSE3/SSSE3/SSE4 extensions,
* when the EM flag is set, execution of most SSE/SSE2/SSE3/SSSE3/SSE4 instructions
* causes an invalid opcode exception (\#UD) to be generated. If an IA-32 or Intel
* 64 processor incorporates the SSE/SSE2/SSE3/SSSE3/SSE4 extensions, the EM flag
* must be set to 0 to enable execution of these extensions.
* SSE/SSE2/SSE3/SSSE3/SSE4 instructions not affected by the EM flag include: PAUSE,
* PREFETCHh, SFENCE, LFENCE, MFENCE, MOVNTI, CLFLUSH, CRC32, and POPCNT.
*/
UINT64 EmulateFpu : 1;
#define CR0_EMULATE_FPU_BIT 2
#define CR0_EMULATE_FPU_FLAG 0x04
#define CR0_EMULATE_FPU_MASK 0x01
#define CR0_EMULATE_FPU(_) (((_) >> 2) & 0x01)
/**
* @brief Task Switched
*
* [Bit 3] Allows the saving of the x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context on
* a task switch to be delayed until an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4
* instruction is actually executed by the new task. The processor sets this flag on
* every task switch and tests it when executing x87
* FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions.
* - If the TS flag is set and the EM flag (bit 2 of CR0) is clear, a
* device-not-available exception (\#NM) is raised prior to the execution of any x87
* FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instruction; with the exception of PAUSE,
* PREFETCHh, SFENCE, LFENCE, MFENCE, MOVNTI, CLFLUSH, CRC32, and POPCNT.
* - If the TS flag is set and the MP flag (bit 1 of CR0) and EM flag are clear, an
* \#NM exception is not raised prior to the execution of an x87 FPU WAIT/FWAIT
* instruction.
* - If the EM flag is set, the setting of the TS flag has no effect on the
* execution of x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instructions. The processor
* does not automatically save the context of the x87 FPU, XMM, and MXCSR registers
* on a task switch. Instead, it sets the TS flag, which causes the processor to
* raise an \#NM exception whenever it encounters an x87
* FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instruction in the instruction stream for the
* new task (with the exception of the instructions listed above). The fault handler
* for the \#NM exception can then be used to clear the TS flag (with the CLTS
* instruction) and save the context of the x87 FPU, XMM, and MXCSR registers. If
* the task never encounters an x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 instruction,
* the x87 FPU/MMX/SSE/SSE2/SSE3/SSSE3/SSE4 context is never saved.
*/
UINT64 TaskSwitched : 1;
#define CR0_TASK_SWITCHED_BIT 3
#define CR0_TASK_SWITCHED_FLAG 0x08
#define CR0_TASK_SWITCHED_MASK 0x01
#define CR0_TASK_SWITCHED(_) (((_) >> 3) & 0x01)
/**
* @brief Extension Type
*
* [Bit 4] Reserved in the Pentium 4, Intel Xeon, P6 family, and Pentium processors.
* In the Pentium 4, Intel Xeon, and P6 family processors, this flag is hardcoded
* to 1. In the Intel386 and Intel486 processors, this flag indicates support of
* Intel 387 DX math coprocessor instructions when set.
*/
UINT64 ExtensionType : 1;
#define CR0_EXTENSION_TYPE_BIT 4
#define CR0_EXTENSION_TYPE_FLAG 0x10
#define CR0_EXTENSION_TYPE_MASK 0x01
#define CR0_EXTENSION_TYPE(_) (((_) >> 4) & 0x01)
/**
* @brief Numeric Error
*
* [Bit 5] Enables the native (internal) mechanism for reporting x87 FPU errors when
* set; enables the PC-style x87 FPU error reporting mechanism when clear. When the
* NE flag is clear and the IGNNE\# input is asserted, x87 FPU errors are ignored.
* When the NE flag is clear and the IGNNE\# input is deasserted, an unmasked x87
* FPU error causes the processor to assert the FERR\# pin to generate an external
* interrupt and to stop instruction execution immediately before executing the next
* waiting floating-point instruction or WAIT/FWAIT instruction. The FERR\# pin is
* intended to drive an input to an external interrupt controller (the FERR\# pin
* emulates the ERROR\# pin of the Intel 287 and Intel 387 DX math coprocessors).
* The NE flag, IGNNE\# pin, and FERR\# pin are used with external logic to
* implement PC-style error reporting. Using FERR\# and IGNNE\# to handle
* floating-point exceptions is deprecated by modern operating systems; this
* non-native approach also limits newer processors to operate with one logical
* processor active.
*
* @see Vol1[8.7(Handling x87 FPU Exceptions in Software)]
* @see Vol1[A.1(APPENDIX A | EFLAGS Cross-Reference)]
*/
UINT64 NumericError : 1;
#define CR0_NUMERIC_ERROR_BIT 5
#define CR0_NUMERIC_ERROR_FLAG 0x20
#define CR0_NUMERIC_ERROR_MASK 0x01
#define CR0_NUMERIC_ERROR(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 10;
/**
* @brief Write Protect
*
* [Bit 16] When set, inhibits supervisor-level procedures from writing into
* readonly pages; when clear, allows supervisor-level procedures to write into
* read-only pages (regardless of the U/S bit setting). This flag facilitates
* implementation of the copy-onwrite method of creating a new process (forking)
* used by operating systems such as UNIX.
*
* @see Vol3A[4.1.3(Paging-Mode Modifiers)]
* @see Vol3A[4.6(ACCESS RIGHTS)]
*/
UINT64 WriteProtect : 1;
#define CR0_WRITE_PROTECT_BIT 16
#define CR0_WRITE_PROTECT_FLAG 0x10000
#define CR0_WRITE_PROTECT_MASK 0x01
#define CR0_WRITE_PROTECT(_) (((_) >> 16) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief Alignment Mask
*
* [Bit 18] Enables automatic alignment checking when set; disables alignment
* checking when clear. Alignment checking is performed only when the AM flag is
* set, the AC flag in the EFLAGS register is set, CPL is 3, and the processor is
* operating in either protected or virtual-8086 mode.
*/
UINT64 AlignmentMask : 1;
#define CR0_ALIGNMENT_MASK_BIT 18
#define CR0_ALIGNMENT_MASK_FLAG 0x40000
#define CR0_ALIGNMENT_MASK_MASK 0x01
#define CR0_ALIGNMENT_MASK(_) (((_) >> 18) & 0x01)
UINT64 Reserved3 : 10;
/**
* @brief Not Write-through
*
* [Bit 29] When the NW and CD flags are clear, write-back (for Pentium 4, Intel
* Xeon, P6 family, and Pentium processors) or write-through (for Intel486
* processors) is enabled for writes that hit the cache and invalidation cycles are
* enabled.
*/
UINT64 NotWriteThrough : 1;
#define CR0_NOT_WRITE_THROUGH_BIT 29
#define CR0_NOT_WRITE_THROUGH_FLAG 0x20000000
#define CR0_NOT_WRITE_THROUGH_MASK 0x01
#define CR0_NOT_WRITE_THROUGH(_) (((_) >> 29) & 0x01)
/**
* @brief Cache Disable
*
* [Bit 30] When the CD and NW flags are clear, caching of memory locations for the
* whole of physical memory in the processor's internal (and external) caches is
* enabled. When the CD flag is set, caching is restricted. To prevent the processor
* from accessing and updating its caches, the CD flag must be set and the caches
* must be invalidated so that no cache hits can occur.
*
* @see Vol3A[11.5.3(Preventing Caching)]
* @see Vol3A[11.5(CACHE CONTROL)]
*/
UINT64 CacheDisable : 1;
#define CR0_CACHE_DISABLE_BIT 30
#define CR0_CACHE_DISABLE_FLAG 0x40000000
#define CR0_CACHE_DISABLE_MASK 0x01
#define CR0_CACHE_DISABLE(_) (((_) >> 30) & 0x01)
/**
* @brief Paging Enable
*
* [Bit 31] Enables paging when set; disables paging when clear. When paging is
* disabled, all linear addresses are treated as physical addresses. The PG flag has
* no effect if the PE flag (bit 0 of register CR0) is not also set; setting the PG
* flag when the PE flag is clear causes a general-protection exception (\#GP).
* On Intel 64 processors, enabling and disabling IA-32e mode operation also
* requires modifying CR0.PG.
*
* @see Vol3A[4(PAGING)]
*/
UINT64 PagingEnable : 1;
#define CR0_PAGING_ENABLE_BIT 31
#define CR0_PAGING_ENABLE_FLAG 0x80000000
#define CR0_PAGING_ENABLE_MASK 0x01
#define CR0_PAGING_ENABLE(_) (((_) >> 31) & 0x01)
UINT64 Reserved4 : 32;
} Fields;
UINT64 AsUInt;
} CR0;
typedef union
{
struct
{
UINT64 Reserved1 : 3;
/**
* @brief Page-level Write-Through
*
* [Bit 3] Controls the memory type used to access the first paging structure of the
* current paging-structure hierarchy. This bit is not used if paging is disabled,
* with PAE paging, or with 4-level paging if CR4.PCIDE=1.
*
* @see Vol3A[4.9(PAGING AND MEMORY TYPING)]
*/
UINT64 PageLevelWriteThrough : 1;
#define CR3_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define CR3_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define CR3_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define CR3_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* @brief Page-level Cache Disable
*
* [Bit 4] Controls the memory type used to access the first paging structure of the
* current paging-structure hierarchy. This bit is not used if paging is disabled,
* with PAE paging, or with 4-level paging2 if CR4.PCIDE=1.
*
* @see Vol3A[4.9(PAGING AND MEMORY TYPING)]
*/
UINT64 PageLevelCacheDisable : 1;
#define CR3_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define CR3_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define CR3_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define CR3_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
UINT64 Reserved2 : 7;
/**
* @brief Address of page directory
*
* [Bits 47:12] Physical address of the 4-KByte aligned page directory (32-bit
* paging) or PML4 table (64-bit paging) used for linear-address translation.
*
* @see Vol3A[4.3(32-BIT PAGING)]
* @see Vol3A[4.5(4-LEVEL PAGING)]
*/
UINT64 AddressOfPageDirectory : 36;
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_BIT 12
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_FLAG 0xFFFFFFFFF000
#define CR3_ADDRESS_OF_PAGE_DIRECTORY_MASK 0xFFFFFFFFF
#define CR3_ADDRESS_OF_PAGE_DIRECTORY(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved3 : 16;
};
UINT64 AsUInt;
} CR3;
// typedef union
//{
// struct
// {
// /**
// * @brief Virtual-8086 Mode Extensions
// *
// * [Bit 0] Enables interrupt- and exception-handling extensions in virtual-8086 mode
//when set; disables the extensions when
// * clear. Use of the virtual mode extensions can improve the performance of
//virtual-8086 applications by eliminating the
// * overhead of calling the virtual- 8086 monitor to handle interrupts and exceptions
//that occur while executing an 8086
// * program and, instead, redirecting the interrupts and exceptions back to the 8086
//program's handlers. It also provides
// * hardware support for a virtual interrupt flag (VIF) to improve reliability of
//running 8086 programs in multitasking and
// * multiple-processor environments.
// *
// * @see Vol3B[20.3(INTERRUPT AND EXCEPTION HANDLING IN VIRTUAL-8086 MODE)]
// */
// UINT64 VirtualModeExtensions : 1;
// #define CR4_VIRTUAL_MODE_EXTENSIONS_BIT 0
// #define CR4_VIRTUAL_MODE_EXTENSIONS_FLAG 0x01
// #define CR4_VIRTUAL_MODE_EXTENSIONS_MASK 0x01
// #define CR4_VIRTUAL_MODE_EXTENSIONS(_) (((_) >> 0) & 0x01)
//
// /**
// * @brief Protected-Mode Virtual Interrupts
// *
// * [Bit 1] Enables hardware support for a virtual interrupt flag (VIF) in protected
//mode when set; disables the VIF flag in
// * protected mode when clear.
// *
// * @see Vol3B[20.4(PROTECTED-MODE VIRTUAL INTERRUPTS)]
// */
// UINT64 ProtectedModeVirtualInterrupts : 1;
// #define CR4_PROTECTED_MODE_VIRTUAL_INTERRUPTS_BIT 1
// #define CR4_PROTECTED_MODE_VIRTUAL_INTERRUPTS_FLAG 0x02
// #define CR4_PROTECTED_MODE_VIRTUAL_INTERRUPTS_MASK 0x01
// #define CR4_PROTECTED_MODE_VIRTUAL_INTERRUPTS(_) (((_) >> 1) & 0x01)
//
// /**
// * @brief Time Stamp Disable
// *
// * [Bit 2] Restricts the execution of the RDTSC instruction to procedures running at
//privilege level 0 when set; allows
// * RDTSC instruction to be executed at any privilege level when clear. This bit also
//applies to the RDTSCP instruction if
// * supported (if CPUID.80000001H:EDX[27] = 1).
// */
// UINT64 TimestampDisable : 1;
// #define CR4_TIMESTAMP_DISABLE_BIT 2
// #define CR4_TIMESTAMP_DISABLE_FLAG 0x04
// #define CR4_TIMESTAMP_DISABLE_MASK 0x01
// #define CR4_TIMESTAMP_DISABLE(_) (((_) >> 2) & 0x01)
//
// /**
// * @brief Debugging Extensions
// *
// * [Bit 3] References to debug registers DR4 and DR5 cause an undefined opcode (\#UD)
//exception to be generated when set;
// * when clear, processor aliases references to registers DR4 and DR5 for compatibility
//with software written to run on
// * earlier IA-32 processors.
// *
// * @see Vol3B[17.2.2(Debug Registers DR4 and DR5)]
// */
// UINT64 DebuggingExtensions : 1;
// #define CR4_DEBUGGING_EXTENSIONS_BIT 3
// #define CR4_DEBUGGING_EXTENSIONS_FLAG 0x08
// #define CR4_DEBUGGING_EXTENSIONS_MASK 0x01
// #define CR4_DEBUGGING_EXTENSIONS(_) (((_) >> 3) & 0x01)
//
// /**
// * @brief Page Size Extensions
// *
// * [Bit 4] Enables 4-MByte pages with 32-bit paging when set; restricts 32-bit paging
//to pages of 4 KBytes when clear.
// *
// * @see Vol3A[4.3(32-BIT PAGING)]
// */
// UINT64 PageSizeExtensions : 1;
// #define CR4_PAGE_SIZE_EXTENSIONS_BIT 4
// #define CR4_PAGE_SIZE_EXTENSIONS_FLAG 0x10
// #define CR4_PAGE_SIZE_EXTENSIONS_MASK 0x01
// #define CR4_PAGE_SIZE_EXTENSIONS(_) (((_) >> 4) & 0x01)
//
// /**
// * @brief Physical Address Extension
// *
// * [Bit 5] When set, enables paging to produce physical addresses with more than 32
//bits. When clear, restricts physical
// * addresses to 32 bits. PAE must be set before entering IA-32e mode.
// *
// * @see Vol3A[4(PAGING)]
// */
// UINT64 PhysicalAddressExtension : 1;
// #define CR4_PHYSICAL_ADDRESS_EXTENSION_BIT 5
// #define CR4_PHYSICAL_ADDRESS_EXTENSION_FLAG 0x20
// #define CR4_PHYSICAL_ADDRESS_EXTENSION_MASK 0x01
// #define CR4_PHYSICAL_ADDRESS_EXTENSION(_) (((_) >> 5) & 0x01)
//
// /**
// * @brief Machine-Check Enable
// *
// * [Bit 6] Enables the machine-check exception when set; disables the machine-check
//exception when clear.
// *
// * @see Vol3B[15(MACHINE-CHECK ARCHITECTURE)]
// */
// UINT64 MachineCheckEnable : 1;
// #define CR4_MACHINE_CHECK_ENABLE_BIT 6
// #define CR4_MACHINE_CHECK_ENABLE_FLAG 0x40
// #define CR4_MACHINE_CHECK_ENABLE_MASK 0x01
// #define CR4_MACHINE_CHECK_ENABLE(_) (((_) >> 6) & 0x01)
//
// /**
// * @brief Page Global Enable
// *
// * [Bit 7] (Introduced in the P6 family processors.) Enables the global page feature
//when set; disables the global page
// * feature when clear. The global page feature allows frequently used or shared pages
//to be marked as global to all users
// * (done with the global flag, bit 8, in a page-directory or page-table entry). Global
//pages are not flushed from the
// * translation-lookaside buffer (TLB) on a task switch or a write to register CR3.
//When enabling the global page feature,
// * paging must be enabled (by setting the PG flag in control register CR0) before the
//PGE flag is set. Reversing this
// * sequence may affect program correctness, and processor performance will be
//impacted.
// *
// * @see Vol3A[4.10(CACHING TRANSLATION INFORMATION)]
// */
// UINT64 PageGlobalEnable : 1;
// #define CR4_PAGE_GLOBAL_ENABLE_BIT 7
// #define CR4_PAGE_GLOBAL_ENABLE_FLAG 0x80
// #define CR4_PAGE_GLOBAL_ENABLE_MASK 0x01
// #define CR4_PAGE_GLOBAL_ENABLE(_) (((_) >> 7) & 0x01)
//
// /**
// * @brief Performance-Monitoring Counter Enable
// *
// * [Bit 8] Enables execution of the RDPMC instruction for programs or procedures
//running at any protection level when set;
// * RDPMC instruction can be executed only at protection level 0 when clear.
// */
// UINT64 PerformanceMonitoringCounterEnable : 1;
// #define CR4_PERFORMANCE_MONITORING_COUNTER_ENABLE_BIT 8
// #define CR4_PERFORMANCE_MONITORING_COUNTER_ENABLE_FLAG 0x100
// #define CR4_PERFORMANCE_MONITORING_COUNTER_ENABLE_MASK 0x01
// #define CR4_PERFORMANCE_MONITORING_COUNTER_ENABLE(_) (((_) >> 8) & 0x01)
//
// /**
// * @brief Operating System Support for FXSAVE and FXRSTOR instructions
// *
// * [Bit 9] When set, this flag:
// * -# indicates to software that the operating system supports the use of the FXSAVE
//and FXRSTOR instructions,
// * -# enables the FXSAVE and FXRSTOR instructions to save and restore the contents of
//the XMM and MXCSR registers along
// * with the contents of the x87 FPU and MMX registers, and
// * -# enables the processor to execute SSE/SSE2/SSE3/SSSE3/SSE4 instructions, with the
//exception of the PAUSE, PREFETCHh,
// * SFENCE, LFENCE, MFENCE, MOVNTI, CLFLUSH, CRC32, and POPCNT.
// * If this flag is clear, the FXSAVE and FXRSTOR instructions will save and restore
//the contents of the x87 FPU and MMX
// * registers, but they may not save and restore the contents of the XMM and MXCSR
//registers. Also, the processor will
// * generate an invalid opcode exception (\#UD) if it attempts to execute any
//SSE/SSE2/SSE3 instruction, with the exception
// * of PAUSE, PREFETCHh, SFENCE, LFENCE, MFENCE, MOVNTI, CLFLUSH, CRC32, and POPCNT.
//The operating system or executive must
// * explicitly set this flag.
// *
// * @remarks CPUID feature flag FXSR indicates availability of the FXSAVE/FXRSTOR
//instructions. The OSFXSR bit provides
// * operating system software with a means of enabling FXSAVE/FXRSTOR to
//save/restore the contents of the X87 FPU, XMM and
// * MXCSR registers. Consequently OSFXSR bit indicates that the operating
//system provides context switch support for
// * SSE/SSE2/SSE3/SSSE3/SSE4.
// */
// UINT64 OsFxsaveFxrstorSupport : 1;
// #define CR4_OS_FXSAVE_FXRSTOR_SUPPORT_BIT 9
// #define CR4_OS_FXSAVE_FXRSTOR_SUPPORT_FLAG 0x200
// #define CR4_OS_FXSAVE_FXRSTOR_SUPPORT_MASK 0x01
// #define CR4_OS_FXSAVE_FXRSTOR_SUPPORT(_) (((_) >> 9) & 0x01)
//
// /**
// * @brief Operating System Support for Unmasked SIMD Floating-Point Exceptions
// *
// * [Bit 10] Operating System Support for Unmasked SIMD Floating-Point Exceptions -
//When set, indicates that the operating
// * system supports the handling of unmasked SIMD floating-point exceptions through an
//exception handler that is invoked
// * when a SIMD floating-point exception (\#XM) is generated. SIMD floating-point
//exceptions are only generated by
// * SSE/SSE2/SSE3/SSE4.1 SIMD floating-point instructions.
// * The operating system or executive must explicitly set this flag. If this flag is
//not set, the processor will generate an
// * invalid opcode exception (\#UD) whenever it detects an unmasked SIMD floating-point
//exception.
// */
// UINT64 OsXmmExceptionSupport : 1;
// #define CR4_OS_XMM_EXCEPTION_SUPPORT_BIT 10
// #define CR4_OS_XMM_EXCEPTION_SUPPORT_FLAG 0x400
// #define CR4_OS_XMM_EXCEPTION_SUPPORT_MASK 0x01
// #define CR4_OS_XMM_EXCEPTION_SUPPORT(_) (((_) >> 10) & 0x01)
//
// /**
// * @brief User-Mode Instruction Prevention
// *
// * [Bit 11] When set, the following instructions cannot be executed if CPL > 0: SGDT,
//SIDT, SLDT, SMSW, and STR. An attempt
// * at such execution causes a general-protection exception (\#GP).
// */
// UINT64 UsermodeInstructionPrevention : 1;
// #define CR4_USERMODE_INSTRUCTION_PREVENTION_BIT 11
// #define CR4_USERMODE_INSTRUCTION_PREVENTION_FLAG 0x800
// #define CR4_USERMODE_INSTRUCTION_PREVENTION_MASK 0x01
// #define CR4_USERMODE_INSTRUCTION_PREVENTION(_) (((_) >> 11) & 0x01)
//
// /**
// * @brief 57-bit Linear Addresses
// *
// * [Bit 12] When set in IA-32e mode, the processor uses 5-level paging to translate
//57-bit linear addresses. When clear in
// * IA-32e mode, the processor uses 4-level paging to translate 48-bit linear
//addresses. This bit cannot be modified in
// * IA-32e mode.
// *
// * @see Vol3C[4(PAGING)]
// */
// UINT64 LinearAddresses57Bit : 1;
// #define CR4_LINEAR_ADDRESSES_57_BIT_BIT 12
// #define CR4_LINEAR_ADDRESSES_57_BIT_FLAG 0x1000
// #define CR4_LINEAR_ADDRESSES_57_BIT_MASK 0x01
// #define CR4_LINEAR_ADDRESSES_57_BIT(_) (((_) >> 12) & 0x01)
//
// /**
// * @brief VMX-Enable
// *
// * [Bit 13] Enables VMX operation when set.
// *
// * @see Vol3C[23(INTRODUCTION TO VIRTUAL MACHINE EXTENSIONS)]
// */
// UINT64 VmxEnable : 1;
// #define CR4_VMX_ENABLE_BIT 13
// #define CR4_VMX_ENABLE_FLAG 0x2000
// #define CR4_VMX_ENABLE_MASK 0x01
// #define CR4_VMX_ENABLE(_) (((_) >> 13) & 0x01)
//
// /**
// * @brief SMX-Enable
// *
// * [Bit 14] Enables SMX operation when set.
// *
// * @see Vol2[6(SAFER MODE EXTENSIONS REFERENCE)]
// */
// UINT64 SmxEnable : 1;
// #define CR4_SMX_ENABLE_BIT 14
// #define CR4_SMX_ENABLE_FLAG 0x4000
// #define CR4_SMX_ENABLE_MASK 0x01
// #define CR4_SMX_ENABLE(_) (((_) >> 14) & 0x01)
// UINT64 Reserved1 : 1;
//
// /**
// * @brief FSGSBASE-Enable
// *
// * [Bit 16] Enables the instructions RDFSBASE, RDGSBASE, WRFSBASE, and WRGSBASE.
// */
// UINT64 FsgsbaseEnable : 1;
// #define CR4_FSGSBASE_ENABLE_BIT 16
// #define CR4_FSGSBASE_ENABLE_FLAG 0x10000
// #define CR4_FSGSBASE_ENABLE_MASK 0x01
// #define CR4_FSGSBASE_ENABLE(_) (((_) >> 16) & 0x01)
//
// /**
// * @brief PCID-Enable
// *
// * [Bit 17] Enables process-context identifiers (PCIDs) when set. Can be set only in
//IA-32e mode (if IA32_EFER.LMA = 1).
// *
// * @see Vol3A[4.10.1(Process-Context Identifiers (PCIDs))]
// */
// UINT64 PcidEnable : 1;
// #define CR4_PCID_ENABLE_BIT 17
// #define CR4_PCID_ENABLE_FLAG 0x20000
// #define CR4_PCID_ENABLE_MASK 0x01
// #define CR4_PCID_ENABLE(_) (((_) >> 17) & 0x01)
//
// /**
// * @brief XSAVE and Processor Extended States-Enable
// *
// * [Bit 18] When set, this flag:
// * -# indicates (via CPUID.01H:ECX.OSXSAVE[bit 27]) that the operating system supports
//the use of the XGETBV, XSAVE and
// * XRSTOR instructions by general software;
// * -# enables the XSAVE and XRSTOR instructions to save and restore the x87 FPU state
//(including MMX registers), the SSE
// * state (XMM registers and MXCSR), along with other processor extended states enabled
//in XCR0;
// * -# enables the processor to execute XGETBV and XSETBV instructions in order to read
//and write XCR0.
// *
// * @see Vol3A[2.6(EXTENDED CONTROL REGISTERS (INCLUDING XCR0))]
// * @see Vol3A[13(SYSTEM PROGRAMMING FOR INSTRUCTION SET EXTENSIONS AND PROCESSOR
//EXTENDED)]
// */
// UINT64 OsXsave : 1;
// #define CR4_OS_XSAVE_BIT 18
// #define CR4_OS_XSAVE_FLAG 0x40000
// #define CR4_OS_XSAVE_MASK 0x01
// #define CR4_OS_XSAVE(_) (((_) >> 18) & 0x01)
//
// /**
// * @brief Key-Locker-Enable
// *
// * [Bit 19] When set, the LOADIWKEY instruction is enabled; in addition, if support
//for the AES Key Locker instructions has
// * been activated by system firmware, CPUID.19H:EBX.AESKLE[bit 0] is enumerated as 1
//and the AES Key Locker instructions
// * are enabled. When clear, CPUID.19H:EBX.AESKLE[bit 0] is enumerated as 0 and
//execution of any Key Locker instruction
// * causes an invalid-opcode exception (\#UD).
// */
// UINT64 KeyLockerEnable : 1;
// #define CR4_KEY_LOCKER_ENABLE_BIT 19
// #define CR4_KEY_LOCKER_ENABLE_FLAG 0x80000
// #define CR4_KEY_LOCKER_ENABLE_MASK 0x01
// #define CR4_KEY_LOCKER_ENABLE(_) (((_) >> 19) & 0x01)
//
// /**
// * @brief SMEP-Enable
// *
// * [Bit 20] Enables supervisor-mode execution prevention (SMEP) when set.
// *
// * @see Vol3A[4.6(ACCESS RIGHTS)]
// */
// UINT64 SmepEnable : 1;
// #define CR4_SMEP_ENABLE_BIT 20
// #define CR4_SMEP_ENABLE_FLAG 0x100000
// #define CR4_SMEP_ENABLE_MASK 0x01
// #define CR4_SMEP_ENABLE(_) (((_) >> 20) & 0x01)
//
// /**
// * @brief SMAP-Enable
// *
// * [Bit 21] Enables supervisor-mode access prevention (SMAP) when set.
// *
// * @see Vol3A[4.6(ACCESS RIGHTS)]
// */
// UINT64 SmapEnable : 1;
// #define CR4_SMAP_ENABLE_BIT 21
// #define CR4_SMAP_ENABLE_FLAG 0x200000
// #define CR4_SMAP_ENABLE_MASK 0x01
// #define CR4_SMAP_ENABLE(_) (((_) >> 21) & 0x01)
//
// /**
// * @brief Protection-Key-Enable
// *
// * [Bit 22] Enables 4-level paging to associate each linear address with a protection
//key. The PKRU register specifies, for
// * each protection key, whether user-mode linear addresses with that protection key
//can be read or written. This bit also
// * enables access to the PKRU register using the RDPKRU and WRPKRU instructions.
// */
// UINT64 ProtectionKeyEnable : 1;
// #define CR4_PROTECTION_KEY_ENABLE_BIT 22
// #define CR4_PROTECTION_KEY_ENABLE_FLAG 0x400000
// #define CR4_PROTECTION_KEY_ENABLE_MASK 0x01
// #define CR4_PROTECTION_KEY_ENABLE(_) (((_) >> 22) & 0x01)
//
// /**
// * @brief Control-flow Enforcement Technology
// *
// * [Bit 23] Enables control-flow enforcement technology when set. This flag can be set
//only if CR0.WP is set, and it must
// * be clear before CR0.WP can be cleared.
// *
// * @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
// */
// UINT64 ControlFlowEnforcementEnable : 1;
// #define CR4_CONTROL_FLOW_ENFORCEMENT_ENABLE_BIT 23
// #define CR4_CONTROL_FLOW_ENFORCEMENT_ENABLE_FLAG 0x800000
// #define CR4_CONTROL_FLOW_ENFORCEMENT_ENABLE_MASK 0x01
// #define CR4_CONTROL_FLOW_ENFORCEMENT_ENABLE(_) (((_) >> 23) & 0x01)
//
// /**
// * @brief Enable protection keys for supervisor-mode pages
// *
// * [Bit 24] 4-level paging and 5-level paging associate each supervisor-mode linear
//address with a protection key. When
// * set, this flag allows use of the IA32_PKRS MSR to specify, for each protection key,
//whether supervisor-mode linear
// * addresses with that protection key can be read or written.
// */
// UINT64 ProtectionKeyForSupervisorModeEnable : 1;
// #define CR4_PROTECTION_KEY_FOR_SUPERVISOR_MODE_ENABLE_BIT 24
// #define CR4_PROTECTION_KEY_FOR_SUPERVISOR_MODE_ENABLE_FLAG 0x1000000
// #define CR4_PROTECTION_KEY_FOR_SUPERVISOR_MODE_ENABLE_MASK 0x01
// #define CR4_PROTECTION_KEY_FOR_SUPERVISOR_MODE_ENABLE(_) (((_) >> 24) & 0x01)
// UINT64 Reserved2 : 39;
// };
//
// UINT64 AsUInt;
// } CR4;
typedef union
{
struct
{
/**
* @brief Task Priority Level
*
* [Bits 3:0] This sets the threshold value corresponding to the highestpriority
* interrupt to be blocked. A value of 0 means all interrupts are enabled. This
* field is available in 64- bit mode. A value of 15 means all interrupts will be
* disabled.
*/
UINT64 TaskPriorityLevel : 4;
#define CR8_TASK_PRIORITY_LEVEL_BIT 0
#define CR8_TASK_PRIORITY_LEVEL_FLAG 0x0F
#define CR8_TASK_PRIORITY_LEVEL_MASK 0x0F
#define CR8_TASK_PRIORITY_LEVEL(_) (((_) >> 0) & 0x0F)
/**
* @brief Reserved
*
* [Bits 63:4] Reserved and must be written with zeros. Failure to do this causes a
* general-protection exception.
*/
UINT64 Reserved : 60;
#define CR8_RESERVED_BIT 4
#define CR8_RESERVED_FLAG 0xFFFFFFFFFFFFFFF0
#define CR8_RESERVED_MASK 0xFFFFFFFFFFFFFFF
#define CR8_RESERVED(_) (((_) >> 4) & 0xFFFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} CR8;
/**
* @}
*/
/**
* @defgroup DEBUG_REGISTERS \
* Debug registers
*
* Eight debug registers control the debug operation of the processor. These registers can be
* written to and read using the move to/from debug register form of the MOV instruction. A debug
* register may be the source or destination operand for one of these instructions. Debug registers
* are privileged resources; a MOV instruction that accesses these registers can only be executed in
* real-address mode, in SMM or in protected mode at a CPL of 0. An attempt to read or write the
* debug registers from any other privilege level generates a general-protection exception (\#GP).
* The primary function of the debug registers is to set up and monitor from 1 to 4 breakpoints,
* numbered 0 though 3. For each breakpoint, the following information can be specified:
* - The linear address where the breakpoint is to occur.
* - The length of the breakpoint location: 1, 2, 4, or 8 bytes.
* - The operation that must be performed at the address for a debug exception to be generated.
* - Whether the breakpoint is enabled.
* - Whether the breakpoint condition was present when the debug exception was generated.
*
* @see Vol3B[17.2.4(Debug Control Register (DR7))]
* @see Vol3B[17.2(DEBUG REGISTERS)] (reference)
* @{
*/
typedef union
{
struct
{
/**
* @brief B0 through B3 (breakpoint condition detected) flags
*
* [Bits 3:0] Indicates (when set) that its associated breakpoint condition was met
* when a debug exception was generated. These flags are set if the condition
* described for each breakpoint by the LENn, and R/Wn flags in debug control
* register DR7 is true. They may or may not be set if the breakpoint is not enabled
* by the Ln or the Gn flags in register DR7. Therefore on a \#DB, a debug handler
* should check only those B0-B3 bits which correspond to an enabled breakpoint.
*/
UINT64 BreakpointCondition : 4;
#define DR6_BREAKPOINT_CONDITION_BIT 0
#define DR6_BREAKPOINT_CONDITION_FLAG 0x0F
#define DR6_BREAKPOINT_CONDITION_MASK 0x0F
#define DR6_BREAKPOINT_CONDITION(_) (((_) >> 0) & 0x0F)
UINT64 Reserved1 : 9;
/**
* @brief BD (debug register access detected) flag
*
* [Bit 13] Indicates that the next instruction in the instruction stream accesses
* one of the debug registers (DR0 through DR7). This flag is enabled when the GD
* (general detect) flag in debug control register DR7 is set.
*
* @see Vol3B[17.2.4(Debug Control Register (DR7))]
*/
UINT64 DebugRegisterAccessDetected : 1;
#define DR6_DEBUG_REGISTER_ACCESS_DETECTED_BIT 13
#define DR6_DEBUG_REGISTER_ACCESS_DETECTED_FLAG 0x2000
#define DR6_DEBUG_REGISTER_ACCESS_DETECTED_MASK 0x01
#define DR6_DEBUG_REGISTER_ACCESS_DETECTED(_) (((_) >> 13) & 0x01)
/**
* @brief BS (single step) flag
*
* [Bit 14] Indicates (when set) that the debug exception was triggered by the
* singlestep execution mode (enabled with the TF flag in the EFLAGS register). The
* single-step mode is the highestpriority debug exception. When the BS flag is set,
* any of the other debug status bits also may be set.
*/
UINT64 SingleInstruction : 1;
#define DR6_SINGLE_INSTRUCTION_BIT 14
#define DR6_SINGLE_INSTRUCTION_FLAG 0x4000
#define DR6_SINGLE_INSTRUCTION_MASK 0x01
#define DR6_SINGLE_INSTRUCTION(_) (((_) >> 14) & 0x01)
/**
* @brief BT (task switch) flag
*
* [Bit 15] Indicates (when set) that the debug exception was triggered by the
* singlestep execution mode (enabled with the TF flag in the EFLAGS register). The
* single-step mode is the highestpriority debug exception. When the BS flag is set,
* any of the other debug status bits also may be set.
*/
UINT64 TaskSwitch : 1;
#define DR6_TASK_SWITCH_BIT 15
#define DR6_TASK_SWITCH_FLAG 0x8000
#define DR6_TASK_SWITCH_MASK 0x01
#define DR6_TASK_SWITCH(_) (((_) >> 15) & 0x01)
/**
* @brief RTM (restricted transactional memory) flag
*
* [Bit 16] Indicates (when clear) that a debug exception (\#DB) or breakpoint
* exception (\#BP) occurred inside an RTM region while advanced debugging of RTM
* transactional regions was enabled. This bit is set for any other debug exception
* (including all those that occur when advanced debugging of RTM transactional
* regions is not enabled). This bit is always 1 if the processor does not support
* RTM.
*
* @see Vol3B[17.3.3(Debug Exceptions, Breakpoint Exceptions, and Restricted
* Transactional Memory (RTM))]
*/
UINT64 RestrictedTransactionalMemory : 1;
#define DR6_RESTRICTED_TRANSACTIONAL_MEMORY_BIT 16
#define DR6_RESTRICTED_TRANSACTIONAL_MEMORY_FLAG 0x10000
#define DR6_RESTRICTED_TRANSACTIONAL_MEMORY_MASK 0x01
#define DR6_RESTRICTED_TRANSACTIONAL_MEMORY(_) (((_) >> 16) & 0x01)
UINT64 Reserved2 : 47;
};
UINT64 AsUInt;
} DR6;
typedef union
{
struct
{
/**
* @brief L0 through L3 (local breakpoint enable) flags (bits 0, 2, 4, and 6)
*
* [Bit 0] Enables (when set) the breakpoint condition for the associated breakpoint
* for the current task. When a breakpoint condition is detected and its associated
* Ln flag is set, a debug exception is generated. The processor automatically
* clears these flags on every task switch to avoid unwanted breakpoint conditions
* in the new task.
*/
UINT64 LocalBreakpoint0 : 1;
#define DR7_LOCAL_BREAKPOINT_0_BIT 0
#define DR7_LOCAL_BREAKPOINT_0_FLAG 0x01
#define DR7_LOCAL_BREAKPOINT_0_MASK 0x01
#define DR7_LOCAL_BREAKPOINT_0(_) (((_) >> 0) & 0x01)
/**
* @brief G0 through G3 (global breakpoint enable) flags (bits 1, 3, 5, and 7)
*
* [Bit 1] Enables (when set) the breakpoint condition for the associated breakpoint
* for all tasks. When a breakpoint condition is detected and its associated Gn flag
* is set, a debug exception is generated. The processor does not clear these flags
* on a task switch, allowing a breakpoint to be enabled for all tasks.
*/
UINT64 GlobalBreakpoint0 : 1;
#define DR7_GLOBAL_BREAKPOINT_0_BIT 1
#define DR7_GLOBAL_BREAKPOINT_0_FLAG 0x02
#define DR7_GLOBAL_BREAKPOINT_0_MASK 0x01
#define DR7_GLOBAL_BREAKPOINT_0(_) (((_) >> 1) & 0x01)
UINT64 LocalBreakpoint1 : 1;
#define DR7_LOCAL_BREAKPOINT_1_BIT 2
#define DR7_LOCAL_BREAKPOINT_1_FLAG 0x04
#define DR7_LOCAL_BREAKPOINT_1_MASK 0x01
#define DR7_LOCAL_BREAKPOINT_1(_) (((_) >> 2) & 0x01)
UINT64 GlobalBreakpoint1 : 1;
#define DR7_GLOBAL_BREAKPOINT_1_BIT 3
#define DR7_GLOBAL_BREAKPOINT_1_FLAG 0x08
#define DR7_GLOBAL_BREAKPOINT_1_MASK 0x01
#define DR7_GLOBAL_BREAKPOINT_1(_) (((_) >> 3) & 0x01)
UINT64 LocalBreakpoint2 : 1;
#define DR7_LOCAL_BREAKPOINT_2_BIT 4
#define DR7_LOCAL_BREAKPOINT_2_FLAG 0x10
#define DR7_LOCAL_BREAKPOINT_2_MASK 0x01
#define DR7_LOCAL_BREAKPOINT_2(_) (((_) >> 4) & 0x01)
UINT64 GlobalBreakpoint2 : 1;
#define DR7_GLOBAL_BREAKPOINT_2_BIT 5
#define DR7_GLOBAL_BREAKPOINT_2_FLAG 0x20
#define DR7_GLOBAL_BREAKPOINT_2_MASK 0x01
#define DR7_GLOBAL_BREAKPOINT_2(_) (((_) >> 5) & 0x01)
UINT64 LocalBreakpoint3 : 1;
#define DR7_LOCAL_BREAKPOINT_3_BIT 6
#define DR7_LOCAL_BREAKPOINT_3_FLAG 0x40
#define DR7_LOCAL_BREAKPOINT_3_MASK 0x01
#define DR7_LOCAL_BREAKPOINT_3(_) (((_) >> 6) & 0x01)
UINT64 GlobalBreakpoint3 : 1;
#define DR7_GLOBAL_BREAKPOINT_3_BIT 7
#define DR7_GLOBAL_BREAKPOINT_3_FLAG 0x80
#define DR7_GLOBAL_BREAKPOINT_3_MASK 0x01
#define DR7_GLOBAL_BREAKPOINT_3(_) (((_) >> 7) & 0x01)
/**
* @brief LE (local exact breakpoint enable)
*
* [Bit 8] This feature is not supported in the P6 family processors, later IA-32
* processors, and Intel 64 processors. When set, these flags cause the processor to
* detect the exact instruction that caused a data breakpoint condition. For
* backward and forward compatibility with other Intel processors, we recommend that
* the LE and GE flags be set to 1 if exact breakpoints are required.
*/
UINT64 LocalExactBreakpoint : 1;
#define DR7_LOCAL_EXACT_BREAKPOINT_BIT 8
#define DR7_LOCAL_EXACT_BREAKPOINT_FLAG 0x100
#define DR7_LOCAL_EXACT_BREAKPOINT_MASK 0x01
#define DR7_LOCAL_EXACT_BREAKPOINT(_) (((_) >> 8) & 0x01)
UINT64 GlobalExactBreakpoint : 1;
#define DR7_GLOBAL_EXACT_BREAKPOINT_BIT 9
#define DR7_GLOBAL_EXACT_BREAKPOINT_FLAG 0x200
#define DR7_GLOBAL_EXACT_BREAKPOINT_MASK 0x01
#define DR7_GLOBAL_EXACT_BREAKPOINT(_) (((_) >> 9) & 0x01)
UINT64 Reserved1 : 1;
/**
* @brief RTM (restricted transactional memory) flag
*
* [Bit 11] Enables (when set) advanced debugging of RTM transactional regions. This
* advanced debugging is enabled only if IA32_DEBUGCTL.RTM is also set.
*
* @see Vol3B[17.3.3(Debug Exceptions, Breakpoint Exceptions, and Restricted
* Transactional Memory (RTM))]
*/
UINT64 RestrictedTransactionalMemory : 1;
#define DR7_RESTRICTED_TRANSACTIONAL_MEMORY_BIT 11
#define DR7_RESTRICTED_TRANSACTIONAL_MEMORY_FLAG 0x800
#define DR7_RESTRICTED_TRANSACTIONAL_MEMORY_MASK 0x01
#define DR7_RESTRICTED_TRANSACTIONAL_MEMORY(_) (((_) >> 11) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief GD (general detect enable) flag
*
* [Bit 13] Enables (when set) debug-register protection, which causes a debug
* exception to be generated prior to any MOV instruction that accesses a debug
* register. When such a condition is detected, the BD flag in debug status register
* DR6 is set prior to generating the exception. This condition is provided to
* support in-circuit emulators. When the emulator needs to access the debug
* registers, emulator software can set the GD flag to prevent interference from the
* program currently executing on the processor. The processor clears the GD flag
* upon entering to the debug exception handler, to allow the handler access to the
* debug registers.
*/
UINT64 GeneralDetect : 1;
#define DR7_GENERAL_DETECT_BIT 13
#define DR7_GENERAL_DETECT_FLAG 0x2000
#define DR7_GENERAL_DETECT_MASK 0x01
#define DR7_GENERAL_DETECT(_) (((_) >> 13) & 0x01)
UINT64 Reserved3 : 2;
/**
* @brief R/W0 through R/W3 (read/write) fields (bits 16, 17, 20, 21, 24, 25, 28,
* and 29)
*
* [Bits 17:16] Specifies the breakpoint condition for the corresponding breakpoint.
* The DE (debug extensions) flag in control register CR4 determines how the bits in
* the R/Wn fields are interpreted. When the DE flag is set, the processor
* interprets bits as follows:
* - 00 - Break on instruction execution only.
* - 01 - Break on data writes only.
* - 10 - Break on I/O reads or writes.
* - 11 - Break on data reads or writes but not instruction fetches.
* When the DE flag is clear, the processor interprets the R/Wn bits the same as for
* the Intel386(TM) and Intel486(TM) processors, which is as follows:
* - 00 - Break on instruction execution only.
* - 01 - Break on data writes only.
* - 10 - Undefined.
* - 11 - Break on data reads or writes but not instruction fetches.
*/
UINT64 ReadWrite0 : 2;
#define DR7_READ_WRITE_0_BIT 16
#define DR7_READ_WRITE_0_FLAG 0x30000
#define DR7_READ_WRITE_0_MASK 0x03
#define DR7_READ_WRITE_0(_) (((_) >> 16) & 0x03)
/**
* @brief LEN0 through LEN3 (Length) fields (bits 18, 19, 22, 23, 26, 27, 30, and
* 31)
*
* [Bits 19:18] Specify the size of the memory location at the address specified in
* the corresponding breakpoint address register (DR0 through DR3). These fields are
* interpreted as follows:
* - 00 - 1-byte length.
* - 01 - 2-byte length.
* - 10 - Undefined (or 8 byte length, see note below).
* - 11 - 4-byte length.
* If the corresponding RWn field in register DR7 is 00 (instruction execution),
* then the LENn field should also be 00. The effect of using other lengths is
* undefined.
*
* @see Vol3B[17.2.5(Breakpoint Field Recognition)]
*/
UINT64 Length0 : 2;
#define DR7_LENGTH_0_BIT 18
#define DR7_LENGTH_0_FLAG 0xC0000
#define DR7_LENGTH_0_MASK 0x03
#define DR7_LENGTH_0(_) (((_) >> 18) & 0x03)
UINT64 ReadWrite1 : 2;
#define DR7_READ_WRITE_1_BIT 20
#define DR7_READ_WRITE_1_FLAG 0x300000
#define DR7_READ_WRITE_1_MASK 0x03
#define DR7_READ_WRITE_1(_) (((_) >> 20) & 0x03)
UINT64 Length1 : 2;
#define DR7_LENGTH_1_BIT 22
#define DR7_LENGTH_1_FLAG 0xC00000
#define DR7_LENGTH_1_MASK 0x03
#define DR7_LENGTH_1(_) (((_) >> 22) & 0x03)
UINT64 ReadWrite2 : 2;
#define DR7_READ_WRITE_2_BIT 24
#define DR7_READ_WRITE_2_FLAG 0x3000000
#define DR7_READ_WRITE_2_MASK 0x03
#define DR7_READ_WRITE_2(_) (((_) >> 24) & 0x03)
UINT64 Length2 : 2;
#define DR7_LENGTH_2_BIT 26
#define DR7_LENGTH_2_FLAG 0xC000000
#define DR7_LENGTH_2_MASK 0x03
#define DR7_LENGTH_2(_) (((_) >> 26) & 0x03)
UINT64 ReadWrite3 : 2;
#define DR7_READ_WRITE_3_BIT 28
#define DR7_READ_WRITE_3_FLAG 0x30000000
#define DR7_READ_WRITE_3_MASK 0x03
#define DR7_READ_WRITE_3(_) (((_) >> 28) & 0x03)
UINT64 Length3 : 2;
#define DR7_LENGTH_3_BIT 30
#define DR7_LENGTH_3_FLAG 0xC0000000
#define DR7_LENGTH_3_MASK 0x03
#define DR7_LENGTH_3(_) (((_) >> 30) & 0x03)
UINT64 Reserved4 : 32;
};
UINT64 AsUInt;
} DR7;
/**
* @}
*/
/**
* @defgroup CPUID \
* CPUID
*
* @see Vol2A[3.2(CPUID)] (reference)
* @{
*/
/**
* @brief Returns CPUID's Highest Value for Basic Processor Information and the Vendor
* Identification String
*
* When CPUID executes with EAX set to 0, the processor returns the highest value the CPUID
* recognizes for returning basic processor information. The value is returned in the EAX register
* and is processor specific. A vendor identification string is also returned in EBX, EDX, and ECX.
* For Intel processors, the string is "GenuineIntel" and is expressed:
* - EBX <- 756e6547h (* "Genu", with G in the low eight bits of BL *)
* - EDX <- 49656e69h (* "ineI", with i in the low eight bits of DL *)
* - ECX <- 6c65746eh (* "ntel", with n in the low eight bits of CL *)
*/
#define CPUID_SIGNATURE 0x00000000
typedef struct
{
/**
* @brief EAX
*
* Maximum Input Value for Basic CPUID Information.
*/
UINT32 MaxCpuidInputValue;
/**
* @brief EBX
*
* "Genu"
*/
UINT32 EbxValueGenu;
/**
* @brief ECX
*
* "ntel"
*/
UINT32 EcxValueNtel;
/**
* @brief EDX
*
* "ineI"
*/
UINT32 EdxValueInei;
} CPUID_EAX_00;
/**
* @brief Returns Model, Family, Stepping Information, Additional Information and Feature
* Information
*
* Returns:
* * Model, Family, Stepping Information in EAX
* * Additional Information in EBX
* * Feature Information in ECX and EDX
*/
#define CPUID_VERSION_INFORMATION 0x00000001
typedef struct
{
/**
* @brief When CPUID executes with EAX set to 01H, version information is returned in EAX
*/
union
{
struct
{
UINT32 SteppingId : 4;
#define CPUID_VERSION_INFORMATION_STEPPING_ID_BIT 0
#define CPUID_VERSION_INFORMATION_STEPPING_ID_FLAG 0x0F
#define CPUID_VERSION_INFORMATION_STEPPING_ID_MASK 0x0F
#define CPUID_VERSION_INFORMATION_STEPPING_ID(_) (((_) >> 0) & 0x0F)
UINT32 Model : 4;
#define CPUID_VERSION_INFORMATION_MODEL_BIT 4
#define CPUID_VERSION_INFORMATION_MODEL_FLAG 0xF0
#define CPUID_VERSION_INFORMATION_MODEL_MASK 0x0F
#define CPUID_VERSION_INFORMATION_MODEL(_) (((_) >> 4) & 0x0F)
UINT32 FamilyId : 4;
#define CPUID_VERSION_INFORMATION_FAMILY_ID_BIT 8
#define CPUID_VERSION_INFORMATION_FAMILY_ID_FLAG 0xF00
#define CPUID_VERSION_INFORMATION_FAMILY_ID_MASK 0x0F
#define CPUID_VERSION_INFORMATION_FAMILY_ID(_) (((_) >> 8) & 0x0F)
/**
* [Bits 13:12] - 0 - Original OEM Processor
* - 1 - Intel OverDrive(R) Processor
* - 2 - Dual processor (not applicable to Intel486 processors)
* - 3 - Intel reserved
*/
UINT32 ProcessorType : 2;
#define CPUID_VERSION_INFORMATION_PROCESSOR_TYPE_BIT 12
#define CPUID_VERSION_INFORMATION_PROCESSOR_TYPE_FLAG 0x3000
#define CPUID_VERSION_INFORMATION_PROCESSOR_TYPE_MASK 0x03
#define CPUID_VERSION_INFORMATION_PROCESSOR_TYPE(_) (((_) >> 12) & 0x03)
UINT32 Reserved1 : 2;
/**
* [Bits 19:16] The Extended Model ID needs to be examined only when the
* Family ID is 06H or 0FH.
*/
UINT32 ExtendedModelId : 4;
#define CPUID_VERSION_INFORMATION_EXTENDED_MODEL_ID_BIT 16
#define CPUID_VERSION_INFORMATION_EXTENDED_MODEL_ID_FLAG 0xF0000
#define CPUID_VERSION_INFORMATION_EXTENDED_MODEL_ID_MASK 0x0F
#define CPUID_VERSION_INFORMATION_EXTENDED_MODEL_ID(_) (((_) >> 16) & 0x0F)
/**
* [Bits 27:20] The Extended Family ID needs to be examined only when the
* Family ID is 0FH.
*/
UINT32 ExtendedFamilyId : 8;
#define CPUID_VERSION_INFORMATION_EXTENDED_FAMILY_ID_BIT 20
#define CPUID_VERSION_INFORMATION_EXTENDED_FAMILY_ID_FLAG 0xFF00000
#define CPUID_VERSION_INFORMATION_EXTENDED_FAMILY_ID_MASK 0xFF
#define CPUID_VERSION_INFORMATION_EXTENDED_FAMILY_ID(_) (((_) >> 20) & 0xFF)
UINT32 Reserved2 : 4;
};
UINT32 AsUInt;
} CpuidVersionInformation;
/**
* @brief When CPUID executes with EAX set to 01H, additional information is returned to the
* EBX register
*/
union
{
struct
{
/**
* [Bits 7:0] This number provides an entry into a brand string table that
* contains brand strings for IA-32 processors. More information about this
* field is provided later in this section.
*/
UINT32 BrandIndex : 8;
#define CPUID_ADDITIONAL_INFORMATION_BRAND_INDEX_BIT 0
#define CPUID_ADDITIONAL_INFORMATION_BRAND_INDEX_FLAG 0xFF
#define CPUID_ADDITIONAL_INFORMATION_BRAND_INDEX_MASK 0xFF
#define CPUID_ADDITIONAL_INFORMATION_BRAND_INDEX(_) (((_) >> 0) & 0xFF)
/**
* @brief Value * 8 = cache line size in bytes; used also by CLFLUSHOPT
*
* [Bits 15:8] This number indicates the size of the cache line flushed by
* the CLFLUSH and CLFLUSHOPT instructions in 8-byte increments. This field
* was introduced in the Pentium 4 processor.
*/
UINT32 ClflushLineSize : 8;
#define CPUID_ADDITIONAL_INFORMATION_CLFLUSH_LINE_SIZE_BIT 8
#define CPUID_ADDITIONAL_INFORMATION_CLFLUSH_LINE_SIZE_FLAG 0xFF00
#define CPUID_ADDITIONAL_INFORMATION_CLFLUSH_LINE_SIZE_MASK 0xFF
#define CPUID_ADDITIONAL_INFORMATION_CLFLUSH_LINE_SIZE(_) (((_) >> 8) & 0xFF)
/**
* [Bits 23:16] Maximum number of addressable IDs for logical processors in
* this physical package.
*
* @remarks The nearest power-of-2 integer that is not smaller than
* EBX[23:16] is the number of unique initial APIC IDs reserved for
* addressing different logical processors in a physical package. This field
* is only valid if CPUID.1.EDX.HTT[bit 28] = 1.
*/
UINT32 MaxAddressableIds : 8;
#define CPUID_ADDITIONAL_INFORMATION_MAX_ADDRESSABLE_IDS_BIT 16
#define CPUID_ADDITIONAL_INFORMATION_MAX_ADDRESSABLE_IDS_FLAG 0xFF0000
#define CPUID_ADDITIONAL_INFORMATION_MAX_ADDRESSABLE_IDS_MASK 0xFF
#define CPUID_ADDITIONAL_INFORMATION_MAX_ADDRESSABLE_IDS(_) (((_) >> 16) & 0xFF)
/**
* [Bits 31:24] This number is the 8-bit ID that is assigned to the local
* APIC on the processor during power up. This field was introduced in the
* Pentium 4 processor.
*/
UINT32 InitialApicId : 8;
#define CPUID_ADDITIONAL_INFORMATION_INITIAL_APIC_ID_BIT 24
#define CPUID_ADDITIONAL_INFORMATION_INITIAL_APIC_ID_FLAG 0xFF000000
#define CPUID_ADDITIONAL_INFORMATION_INITIAL_APIC_ID_MASK 0xFF
#define CPUID_ADDITIONAL_INFORMATION_INITIAL_APIC_ID(_) (((_) >> 24) & 0xFF)
};
UINT32 AsUInt;
} CpuidAdditionalInformation;
/**
* @brief When CPUID executes with EAX set to 01H, feature information is returned in ECX
* and EDX
*/
union
{
struct
{
/**
* @brief Streaming SIMD Extensions 3 (SSE3)
*
* [Bit 0] A value of 1 indicates the processor supports this technology.
*/
UINT32 StreamingSimdExtensions3 : 1;
#define CPUID_FEATURE_INFORMATION_ECX_STREAMING_SIMD_EXTENSIONS_3_BIT 0
#define CPUID_FEATURE_INFORMATION_ECX_STREAMING_SIMD_EXTENSIONS_3_FLAG 0x01
#define CPUID_FEATURE_INFORMATION_ECX_STREAMING_SIMD_EXTENSIONS_3_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_STREAMING_SIMD_EXTENSIONS_3(_) (((_) >> 0) & 0x01)
/**
* @brief PCLMULQDQ instruction
*
* [Bit 1] A value of 1 indicates the processor supports the PCLMULQDQ
* instruction.
*/
UINT32 PclmulqdqInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_PCLMULQDQ_INSTRUCTION_BIT 1
#define CPUID_FEATURE_INFORMATION_ECX_PCLMULQDQ_INSTRUCTION_FLAG 0x02
#define CPUID_FEATURE_INFORMATION_ECX_PCLMULQDQ_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_PCLMULQDQ_INSTRUCTION(_) (((_) >> 1) & 0x01)
/**
* @brief 64-bit DS Area
*
* [Bit 2] A value of 1 indicates the processor supports DS area using
* 64-bit layout.
*/
UINT32 DsArea64BitLayout : 1;
#define CPUID_FEATURE_INFORMATION_ECX_DS_AREA_64BIT_LAYOUT_BIT 2
#define CPUID_FEATURE_INFORMATION_ECX_DS_AREA_64BIT_LAYOUT_FLAG 0x04
#define CPUID_FEATURE_INFORMATION_ECX_DS_AREA_64BIT_LAYOUT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_DS_AREA_64BIT_LAYOUT(_) (((_) >> 2) & 0x01)
/**
* @brief MONITOR/MWAIT instruction
*
* [Bit 3] A value of 1 indicates the processor supports this feature.
*/
UINT32 MonitorMwaitInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_MONITOR_MWAIT_INSTRUCTION_BIT 3
#define CPUID_FEATURE_INFORMATION_ECX_MONITOR_MWAIT_INSTRUCTION_FLAG 0x08
#define CPUID_FEATURE_INFORMATION_ECX_MONITOR_MWAIT_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_MONITOR_MWAIT_INSTRUCTION(_) (((_) >> 3) & 0x01)
/**
* @brief CPL Qualified Debug Store
*
* [Bit 4] A value of 1 indicates the processor supports the extensions to
* the Debug Store feature to allow for branch message storage qualified by
* CPL.
*/
UINT32 CplQualifiedDebugStore : 1;
#define CPUID_FEATURE_INFORMATION_ECX_CPL_QUALIFIED_DEBUG_STORE_BIT 4
#define CPUID_FEATURE_INFORMATION_ECX_CPL_QUALIFIED_DEBUG_STORE_FLAG 0x10
#define CPUID_FEATURE_INFORMATION_ECX_CPL_QUALIFIED_DEBUG_STORE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_CPL_QUALIFIED_DEBUG_STORE(_) (((_) >> 4) & 0x01)
/**
* @brief Virtual Machine Extensions
*
* [Bit 5] A value of 1 indicates that the processor supports this
* technology.
*/
UINT32 VirtualMachineExtensions : 1;
#define CPUID_FEATURE_INFORMATION_ECX_VIRTUAL_MACHINE_EXTENSIONS_BIT 5
#define CPUID_FEATURE_INFORMATION_ECX_VIRTUAL_MACHINE_EXTENSIONS_FLAG 0x20
#define CPUID_FEATURE_INFORMATION_ECX_VIRTUAL_MACHINE_EXTENSIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_VIRTUAL_MACHINE_EXTENSIONS(_) (((_) >> 5) & 0x01)
/**
* @brief Safer Mode Extensions
*
* [Bit 6] A value of 1 indicates that the processor supports this
* technology.
*
* @see Vol2[6(SAFER MODE EXTENSIONS REFERENCE)]
*/
UINT32 SaferModeExtensions : 1;
#define CPUID_FEATURE_INFORMATION_ECX_SAFER_MODE_EXTENSIONS_BIT 6
#define CPUID_FEATURE_INFORMATION_ECX_SAFER_MODE_EXTENSIONS_FLAG 0x40
#define CPUID_FEATURE_INFORMATION_ECX_SAFER_MODE_EXTENSIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_SAFER_MODE_EXTENSIONS(_) (((_) >> 6) & 0x01)
/**
* @brief Enhanced Intel SpeedStep(R) technology
*
* [Bit 7] A value of 1 indicates that the processor supports this
* technology.
*/
UINT32 EnhancedIntelSpeedstepTechnology : 1;
#define CPUID_FEATURE_INFORMATION_ECX_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_BIT 7
#define CPUID_FEATURE_INFORMATION_ECX_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_FLAG 0x80
#define CPUID_FEATURE_INFORMATION_ECX_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY(_) (((_) >> 7) & 0x01)
/**
* @brief Thermal Monitor 2
*
* [Bit 8] A value of 1 indicates whether the processor supports this
* technology.
*/
UINT32 ThermalMonitor2 : 1;
#define CPUID_FEATURE_INFORMATION_ECX_THERMAL_MONITOR_2_BIT 8
#define CPUID_FEATURE_INFORMATION_ECX_THERMAL_MONITOR_2_FLAG 0x100
#define CPUID_FEATURE_INFORMATION_ECX_THERMAL_MONITOR_2_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_THERMAL_MONITOR_2(_) (((_) >> 8) & 0x01)
/**
* @brief Supplemental Streaming SIMD Extensions 3 (SSSE3)
*
* [Bit 9] A value of 1 indicates the presence of the Supplemental Streaming
* SIMD Extensions 3 (SSSE3). A value of 0 indicates the instruction
* extensions are not present in the processor.
*/
UINT32 SupplementalStreamingSimdExtensions3 : 1;
#define CPUID_FEATURE_INFORMATION_ECX_SUPPLEMENTAL_STREAMING_SIMD_EXTENSIONS_3_BIT 9
#define CPUID_FEATURE_INFORMATION_ECX_SUPPLEMENTAL_STREAMING_SIMD_EXTENSIONS_3_FLAG 0x200
#define CPUID_FEATURE_INFORMATION_ECX_SUPPLEMENTAL_STREAMING_SIMD_EXTENSIONS_3_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_SUPPLEMENTAL_STREAMING_SIMD_EXTENSIONS_3(_) \
(((_) >> 9) & 0x01)
/**
* @brief L1 Context ID
*
* [Bit 10] A value of 1 indicates the L1 data cache mode can be set to
* either adaptive mode or shared mode. A value of 0 indicates this feature
* is not supported. See definition of the IA32_MISC_ENABLE MSR Bit 24 (L1
* Data Cache Context Mode) for details.
*/
UINT32 L1ContextId : 1;
#define CPUID_FEATURE_INFORMATION_ECX_L1_CONTEXT_ID_BIT 10
#define CPUID_FEATURE_INFORMATION_ECX_L1_CONTEXT_ID_FLAG 0x400
#define CPUID_FEATURE_INFORMATION_ECX_L1_CONTEXT_ID_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_L1_CONTEXT_ID(_) (((_) >> 10) & 0x01)
/**
* @brief IA32_DEBUG_INTERFACE MSR for silicon debug
*
* [Bit 11] A value of 1 indicates the processor supports
* IA32_DEBUG_INTERFACE MSR for silicon debug.
*/
UINT32 SiliconDebug : 1;
#define CPUID_FEATURE_INFORMATION_ECX_SILICON_DEBUG_BIT 11
#define CPUID_FEATURE_INFORMATION_ECX_SILICON_DEBUG_FLAG 0x800
#define CPUID_FEATURE_INFORMATION_ECX_SILICON_DEBUG_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_SILICON_DEBUG(_) (((_) >> 11) & 0x01)
/**
* @brief FMA extensions using YMM state
*
* [Bit 12] A value of 1 indicates the processor supports FMA (Fused
* Multiple Add) extensions using YMM state.
*/
UINT32 FmaExtensions : 1;
#define CPUID_FEATURE_INFORMATION_ECX_FMA_EXTENSIONS_BIT 12
#define CPUID_FEATURE_INFORMATION_ECX_FMA_EXTENSIONS_FLAG 0x1000
#define CPUID_FEATURE_INFORMATION_ECX_FMA_EXTENSIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_FMA_EXTENSIONS(_) (((_) >> 12) & 0x01)
/**
* @brief CMPXCHG16B instruction
*
* [Bit 13] A value of 1 indicates that the feature is available.
*/
UINT32 Cmpxchg16BInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_CMPXCHG16B_INSTRUCTION_BIT 13
#define CPUID_FEATURE_INFORMATION_ECX_CMPXCHG16B_INSTRUCTION_FLAG 0x2000
#define CPUID_FEATURE_INFORMATION_ECX_CMPXCHG16B_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_CMPXCHG16B_INSTRUCTION(_) (((_) >> 13) & 0x01)
/**
* @brief xTPR Update Control
*
* [Bit 14] A value of 1 indicates that the processor supports changing
* IA32_MISC_ENABLE[bit 23].
*/
UINT32 XtprUpdateControl : 1;
#define CPUID_FEATURE_INFORMATION_ECX_XTPR_UPDATE_CONTROL_BIT 14
#define CPUID_FEATURE_INFORMATION_ECX_XTPR_UPDATE_CONTROL_FLAG 0x4000
#define CPUID_FEATURE_INFORMATION_ECX_XTPR_UPDATE_CONTROL_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_XTPR_UPDATE_CONTROL(_) (((_) >> 14) & 0x01)
/**
* @brief Perfmon and Debug Capability
*
* [Bit 15] A value of 1 indicates the processor supports the performance
* and debug feature indication MSR IA32_PERF_CAPABILITIES.
*/
UINT32 PerfmonAndDebugCapability : 1;
#define CPUID_FEATURE_INFORMATION_ECX_PERFMON_AND_DEBUG_CAPABILITY_BIT 15
#define CPUID_FEATURE_INFORMATION_ECX_PERFMON_AND_DEBUG_CAPABILITY_FLAG 0x8000
#define CPUID_FEATURE_INFORMATION_ECX_PERFMON_AND_DEBUG_CAPABILITY_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_PERFMON_AND_DEBUG_CAPABILITY(_) (((_) >> 15) & 0x01)
UINT32 Reserved1 : 1;
/**
* @brief Process-context identifiers
*
* [Bit 17] A value of 1 indicates that the processor supports PCIDs and
* that software may set CR4.PCIDE to 1.
*/
UINT32 ProcessContextIdentifiers : 1;
#define CPUID_FEATURE_INFORMATION_ECX_PROCESS_CONTEXT_IDENTIFIERS_BIT 17
#define CPUID_FEATURE_INFORMATION_ECX_PROCESS_CONTEXT_IDENTIFIERS_FLAG 0x20000
#define CPUID_FEATURE_INFORMATION_ECX_PROCESS_CONTEXT_IDENTIFIERS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_PROCESS_CONTEXT_IDENTIFIERS(_) (((_) >> 17) & 0x01)
/**
* @brief Direct Cache Access
*
* [Bit 18] A value of 1 indicates the processor supports the ability to
* prefetch data from a memory mapped device (Direct Cache Access).
*/
UINT32 DirectCacheAccess : 1;
#define CPUID_FEATURE_INFORMATION_ECX_DIRECT_CACHE_ACCESS_BIT 18
#define CPUID_FEATURE_INFORMATION_ECX_DIRECT_CACHE_ACCESS_FLAG 0x40000
#define CPUID_FEATURE_INFORMATION_ECX_DIRECT_CACHE_ACCESS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_DIRECT_CACHE_ACCESS(_) (((_) >> 18) & 0x01)
/**
* @brief SSE4.1 support
*
* [Bit 19] A value of 1 indicates that the processor supports SSE4.1.
*/
UINT32 Sse41Support : 1;
#define CPUID_FEATURE_INFORMATION_ECX_SSE41_SUPPORT_BIT 19
#define CPUID_FEATURE_INFORMATION_ECX_SSE41_SUPPORT_FLAG 0x80000
#define CPUID_FEATURE_INFORMATION_ECX_SSE41_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_SSE41_SUPPORT(_) (((_) >> 19) & 0x01)
/**
* @brief SSE4.2 support
*
* [Bit 20] A value of 1 indicates that the processor supports SSE4.2.
*/
UINT32 Sse42Support : 1;
#define CPUID_FEATURE_INFORMATION_ECX_SSE42_SUPPORT_BIT 20
#define CPUID_FEATURE_INFORMATION_ECX_SSE42_SUPPORT_FLAG 0x100000
#define CPUID_FEATURE_INFORMATION_ECX_SSE42_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_SSE42_SUPPORT(_) (((_) >> 20) & 0x01)
/**
* @brief x2APIC support
*
* [Bit 21] A value of 1 indicates that the processor supports x2APIC
* feature.
*/
UINT32 X2ApicSupport : 1;
#define CPUID_FEATURE_INFORMATION_ECX_X2APIC_SUPPORT_BIT 21
#define CPUID_FEATURE_INFORMATION_ECX_X2APIC_SUPPORT_FLAG 0x200000
#define CPUID_FEATURE_INFORMATION_ECX_X2APIC_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_X2APIC_SUPPORT(_) (((_) >> 21) & 0x01)
/**
* @brief MOVBE instruction
*
* [Bit 22] A value of 1 indicates that the processor supports MOVBE
* instruction.
*/
UINT32 MovbeInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_MOVBE_INSTRUCTION_BIT 22
#define CPUID_FEATURE_INFORMATION_ECX_MOVBE_INSTRUCTION_FLAG 0x400000
#define CPUID_FEATURE_INFORMATION_ECX_MOVBE_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_MOVBE_INSTRUCTION(_) (((_) >> 22) & 0x01)
/**
* @brief POPCNT instruction
*
* [Bit 23] A value of 1 indicates that the processor supports the POPCNT
* instruction.
*/
UINT32 PopcntInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_POPCNT_INSTRUCTION_BIT 23
#define CPUID_FEATURE_INFORMATION_ECX_POPCNT_INSTRUCTION_FLAG 0x800000
#define CPUID_FEATURE_INFORMATION_ECX_POPCNT_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_POPCNT_INSTRUCTION(_) (((_) >> 23) & 0x01)
/**
* @brief TSC Deadline
*
* [Bit 24] A value of 1 indicates that the processor's local APIC timer
* supports one-shot operation using a TSC deadline value.
*/
UINT32 TscDeadline : 1;
#define CPUID_FEATURE_INFORMATION_ECX_TSC_DEADLINE_BIT 24
#define CPUID_FEATURE_INFORMATION_ECX_TSC_DEADLINE_FLAG 0x1000000
#define CPUID_FEATURE_INFORMATION_ECX_TSC_DEADLINE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_TSC_DEADLINE(_) (((_) >> 24) & 0x01)
/**
* @brief AESNI instruction extensions
*
* [Bit 25] A value of 1 indicates that the processor supports the AESNI
* instruction extensions.
*/
UINT32 AesniInstructionExtensions : 1;
#define CPUID_FEATURE_INFORMATION_ECX_AESNI_INSTRUCTION_EXTENSIONS_BIT 25
#define CPUID_FEATURE_INFORMATION_ECX_AESNI_INSTRUCTION_EXTENSIONS_FLAG 0x2000000
#define CPUID_FEATURE_INFORMATION_ECX_AESNI_INSTRUCTION_EXTENSIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_AESNI_INSTRUCTION_EXTENSIONS(_) (((_) >> 25) & 0x01)
/**
* @brief XSAVE/XRSTOR instruction extensions
*
* [Bit 26] A value of 1 indicates that the processor supports the
* XSAVE/XRSTOR processor extended states feature, the XSETBV/XGETBV
* instructions, and XCR0.
*/
UINT32 XsaveXrstorInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_XSAVE_XRSTOR_INSTRUCTION_BIT 26
#define CPUID_FEATURE_INFORMATION_ECX_XSAVE_XRSTOR_INSTRUCTION_FLAG 0x4000000
#define CPUID_FEATURE_INFORMATION_ECX_XSAVE_XRSTOR_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_XSAVE_XRSTOR_INSTRUCTION(_) (((_) >> 26) & 0x01)
/**
* @brief CR4.OSXSAVE[bit 18] set
*
* [Bit 27] A value of 1 indicates that the OS has set CR4.OSXSAVE[bit 18]
* to enable XSETBV/XGETBV instructions to access XCR0 and to support
* processor extended state management using XSAVE/XRSTOR.
*/
UINT32 OsxSave : 1;
#define CPUID_FEATURE_INFORMATION_ECX_OSX_SAVE_BIT 27
#define CPUID_FEATURE_INFORMATION_ECX_OSX_SAVE_FLAG 0x8000000
#define CPUID_FEATURE_INFORMATION_ECX_OSX_SAVE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_OSX_SAVE(_) (((_) >> 27) & 0x01)
/**
* @brief AVX instruction extensions support
*
* [Bit 28] A value of 1 indicates the processor supports the AVX
* instruction extensions.
*/
UINT32 AvxSupport : 1;
#define CPUID_FEATURE_INFORMATION_ECX_AVX_SUPPORT_BIT 28
#define CPUID_FEATURE_INFORMATION_ECX_AVX_SUPPORT_FLAG 0x10000000
#define CPUID_FEATURE_INFORMATION_ECX_AVX_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_AVX_SUPPORT(_) (((_) >> 28) & 0x01)
/**
* @brief 16-bit floating-point conversion instructions support
*
* [Bit 29] A value of 1 indicates that processor supports 16-bit
* floating-point conversion instructions.
*/
UINT32 HalfPrecisionConversionInstructions : 1;
#define CPUID_FEATURE_INFORMATION_ECX_HALF_PRECISION_CONVERSION_INSTRUCTIONS_BIT 29
#define CPUID_FEATURE_INFORMATION_ECX_HALF_PRECISION_CONVERSION_INSTRUCTIONS_FLAG 0x20000000
#define CPUID_FEATURE_INFORMATION_ECX_HALF_PRECISION_CONVERSION_INSTRUCTIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_HALF_PRECISION_CONVERSION_INSTRUCTIONS(_) (((_) >> 29) & 0x01)
/**
* @brief RDRAND instruction support
*
* [Bit 30] A value of 1 indicates that processor supports RDRAND
* instruction.
*/
UINT32 RdrandInstruction : 1;
#define CPUID_FEATURE_INFORMATION_ECX_RDRAND_INSTRUCTION_BIT 30
#define CPUID_FEATURE_INFORMATION_ECX_RDRAND_INSTRUCTION_FLAG 0x40000000
#define CPUID_FEATURE_INFORMATION_ECX_RDRAND_INSTRUCTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_ECX_RDRAND_INSTRUCTION(_) (((_) >> 30) & 0x01)
UINT32 Reserved2 : 1;
};
UINT32 AsUInt;
} CpuidFeatureInformationEcx;
/**
* @brief When CPUID executes with EAX set to 01H, feature information is returned in ECX
* and EDX
*/
union
{
struct
{
/**
* @brief Floating Point Unit On-Chip
*
* [Bit 0] The processor contains an x87 FPU.
*/
UINT32 FloatingPointUnitOnChip : 1;
#define CPUID_FEATURE_INFORMATION_EDX_FLOATING_POINT_UNIT_ON_CHIP_BIT 0
#define CPUID_FEATURE_INFORMATION_EDX_FLOATING_POINT_UNIT_ON_CHIP_FLAG 0x01
#define CPUID_FEATURE_INFORMATION_EDX_FLOATING_POINT_UNIT_ON_CHIP_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_FLOATING_POINT_UNIT_ON_CHIP(_) (((_) >> 0) & 0x01)
/**
* @brief Virtual 8086 Mode Enhancements
*
* [Bit 1] Virtual 8086 mode enhancements, including CR4.VME for controlling
* the feature, CR4.PVI for protected mode virtual interrupts, software
* interrupt indirection, expansion of the TSS with the software indirection
* bitmap, and EFLAGS.VIF and EFLAGS.VIP flags.
*/
UINT32 Virtual8086ModeEnhancements : 1;
#define CPUID_FEATURE_INFORMATION_EDX_VIRTUAL_8086_MODE_ENHANCEMENTS_BIT 1
#define CPUID_FEATURE_INFORMATION_EDX_VIRTUAL_8086_MODE_ENHANCEMENTS_FLAG 0x02
#define CPUID_FEATURE_INFORMATION_EDX_VIRTUAL_8086_MODE_ENHANCEMENTS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_VIRTUAL_8086_MODE_ENHANCEMENTS(_) (((_) >> 1) & 0x01)
/**
* @brief Debugging Extensions
*
* [Bit 2] Support for I/O breakpoints, including CR4.DE for controlling the
* feature, and optional trapping of accesses to DR4 and DR5.
*/
UINT32 DebuggingExtensions : 1;
#define CPUID_FEATURE_INFORMATION_EDX_DEBUGGING_EXTENSIONS_BIT 2
#define CPUID_FEATURE_INFORMATION_EDX_DEBUGGING_EXTENSIONS_FLAG 0x04
#define CPUID_FEATURE_INFORMATION_EDX_DEBUGGING_EXTENSIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_DEBUGGING_EXTENSIONS(_) (((_) >> 2) & 0x01)
/**
* @brief Page Size Extension
*
* [Bit 3] Large pages of size 4 MByte are supported, including CR4.PSE for
* controlling the feature, the defined dirty bit in PDE (Page Directory
* Entries), optional reserved bit trapping in CR3, PDEs, and PTEs.
*/
UINT32 PageSizeExtension : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_BIT 3
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_FLAG 0x08
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION(_) (((_) >> 3) & 0x01)
/**
* @brief Time Stamp Counter
*
* [Bit 4] The RDTSC instruction is supported, including CR4.TSD for
* controlling privilege.
*/
UINT32 TimestampCounter : 1;
#define CPUID_FEATURE_INFORMATION_EDX_TIMESTAMP_COUNTER_BIT 4
#define CPUID_FEATURE_INFORMATION_EDX_TIMESTAMP_COUNTER_FLAG 0x10
#define CPUID_FEATURE_INFORMATION_EDX_TIMESTAMP_COUNTER_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_TIMESTAMP_COUNTER(_) (((_) >> 4) & 0x01)
/**
* @brief Model Specific Registers RDMSR and WRMSR Instructions
*
* [Bit 5] The RDMSR and WRMSR instructions are supported. Some of the MSRs
* are implementation dependent.
*/
UINT32 RdmsrWrmsrInstructions : 1;
#define CPUID_FEATURE_INFORMATION_EDX_RDMSR_WRMSR_INSTRUCTIONS_BIT 5
#define CPUID_FEATURE_INFORMATION_EDX_RDMSR_WRMSR_INSTRUCTIONS_FLAG 0x20
#define CPUID_FEATURE_INFORMATION_EDX_RDMSR_WRMSR_INSTRUCTIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_RDMSR_WRMSR_INSTRUCTIONS(_) (((_) >> 5) & 0x01)
/**
* @brief Physical Address Extension
*
* [Bit 6] Physical addresses greater than 32 bits are supported: extended
* page table entry formats, an extra level in the page translation tables
* is defined, 2-MByte pages are supported instead of 4 Mbyte pages if PAE
* bit is 1.
*/
UINT32 PhysicalAddressExtension : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PHYSICAL_ADDRESS_EXTENSION_BIT 6
#define CPUID_FEATURE_INFORMATION_EDX_PHYSICAL_ADDRESS_EXTENSION_FLAG 0x40
#define CPUID_FEATURE_INFORMATION_EDX_PHYSICAL_ADDRESS_EXTENSION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PHYSICAL_ADDRESS_EXTENSION(_) (((_) >> 6) & 0x01)
/**
* @brief Machine Check Exception
*
* [Bit 7] Exception 18 is defined for Machine Checks, including CR4.MCE for
* controlling the feature. This feature does not define the model-specific
* implementations of machine-check error logging, reporting, and processor
* shutdowns. Machine Check exception handlers may have to depend on
* processor version to do model specific processing of the exception, or
* test for the presence of the Machine Check feature.
*/
UINT32 MachineCheckException : 1;
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_EXCEPTION_BIT 7
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_EXCEPTION_FLAG 0x80
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_EXCEPTION_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_EXCEPTION(_) (((_) >> 7) & 0x01)
/**
* @brief CMPXCHG8B Instruction
*
* [Bit 8] The compare-and-exchange 8 bytes (64 bits) instruction is
* supported (implicitly locked and atomic).
*/
UINT32 Cmpxchg8B : 1;
#define CPUID_FEATURE_INFORMATION_EDX_CMPXCHG8B_BIT 8
#define CPUID_FEATURE_INFORMATION_EDX_CMPXCHG8B_FLAG 0x100
#define CPUID_FEATURE_INFORMATION_EDX_CMPXCHG8B_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_CMPXCHG8B(_) (((_) >> 8) & 0x01)
/**
* @brief APIC On-Chip
*
* [Bit 9] The processor contains an Advanced Programmable Interrupt
* Controller (APIC), responding to memory mapped commands in the physical
* address range FFFE0000H to FFFE0FFFH (by default - some processors permit
* the APIC to be relocated).
*/
UINT32 ApicOnChip : 1;
#define CPUID_FEATURE_INFORMATION_EDX_APIC_ON_CHIP_BIT 9
#define CPUID_FEATURE_INFORMATION_EDX_APIC_ON_CHIP_FLAG 0x200
#define CPUID_FEATURE_INFORMATION_EDX_APIC_ON_CHIP_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_APIC_ON_CHIP(_) (((_) >> 9) & 0x01)
UINT32 Reserved1 : 1;
/**
* @brief SYSENTER and SYSEXIT Instructions
*
* [Bit 11] The SYSENTER and SYSEXIT and associated MSRs are supported.
*/
UINT32 SysenterSysexitInstructions : 1;
#define CPUID_FEATURE_INFORMATION_EDX_SYSENTER_SYSEXIT_INSTRUCTIONS_BIT 11
#define CPUID_FEATURE_INFORMATION_EDX_SYSENTER_SYSEXIT_INSTRUCTIONS_FLAG 0x800
#define CPUID_FEATURE_INFORMATION_EDX_SYSENTER_SYSEXIT_INSTRUCTIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_SYSENTER_SYSEXIT_INSTRUCTIONS(_) (((_) >> 11) & 0x01)
/**
* @brief Memory Type Range Registers
*
* [Bit 12] MTRRs are supported. The MTRRcap MSR contains feature bits that
* describe what memory types are supported, how many variable MTRRs are
* supported, and whether fixed MTRRs are supported.
*/
UINT32 MemoryTypeRangeRegisters : 1;
#define CPUID_FEATURE_INFORMATION_EDX_MEMORY_TYPE_RANGE_REGISTERS_BIT 12
#define CPUID_FEATURE_INFORMATION_EDX_MEMORY_TYPE_RANGE_REGISTERS_FLAG 0x1000
#define CPUID_FEATURE_INFORMATION_EDX_MEMORY_TYPE_RANGE_REGISTERS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_MEMORY_TYPE_RANGE_REGISTERS(_) (((_) >> 12) & 0x01)
/**
* @brief Page Global Bit
*
* [Bit 13] The global bit is supported in paging-structure entries that map
* a page, indicating TLB entries that are common to different processes and
* need not be flushed. The CR4.PGE bit controls this feature.
*/
UINT32 PageGlobalBit : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_GLOBAL_BIT_BIT 13
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_GLOBAL_BIT_FLAG 0x2000
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_GLOBAL_BIT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_GLOBAL_BIT(_) (((_) >> 13) & 0x01)
/**
* @brief Machine Check Architecture
*
* [Bit 14] A value of 1 indicates the Machine Check Architecture of
* reporting machine errors is supported. The MCG_CAP MSR contains feature
* bits describing how many banks of error reporting MSRs are supported.
*/
UINT32 MachineCheckArchitecture : 1;
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_ARCHITECTURE_BIT 14
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_ARCHITECTURE_FLAG 0x4000
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_ARCHITECTURE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_MACHINE_CHECK_ARCHITECTURE(_) (((_) >> 14) & 0x01)
/**
* @brief Conditional Move Instructions
*
* [Bit 15] The conditional move instruction CMOV is supported. In addition,
* if x87 FPU is present as indicated by the CPUID.FPU feature bit, then the
* FCOMI and FCMOV instructions are supported
*/
UINT32 ConditionalMoveInstructions : 1;
#define CPUID_FEATURE_INFORMATION_EDX_CONDITIONAL_MOVE_INSTRUCTIONS_BIT 15
#define CPUID_FEATURE_INFORMATION_EDX_CONDITIONAL_MOVE_INSTRUCTIONS_FLAG 0x8000
#define CPUID_FEATURE_INFORMATION_EDX_CONDITIONAL_MOVE_INSTRUCTIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_CONDITIONAL_MOVE_INSTRUCTIONS(_) (((_) >> 15) & 0x01)
/**
* @brief Page Attribute Table
*
* [Bit 16] Page Attribute Table is supported. This feature augments the
* Memory Type Range Registers (MTRRs), allowing an operating system to
* specify attributes of memory accessed through a linear address on a 4KB
* granularity.
*/
UINT32 PageAttributeTable : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_ATTRIBUTE_TABLE_BIT 16
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_ATTRIBUTE_TABLE_FLAG 0x10000
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_ATTRIBUTE_TABLE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_ATTRIBUTE_TABLE(_) (((_) >> 16) & 0x01)
/**
* @brief 36-Bit Page Size Extension
*
* [Bit 17] 4-MByte pages addressing physical memory beyond 4 GBytes are
* supported with 32-bit paging. This feature indicates that upper bits of
* the physical address of a 4-MByte page are encoded in bits 20:13 of the
* page-directory entry. Such physical addresses are limited by MAXPHYADDR
* and may be up to 40 bits in size.
*/
UINT32 PageSizeExtension36Bit : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_36BIT_BIT 17
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_36BIT_FLAG 0x20000
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_36BIT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PAGE_SIZE_EXTENSION_36BIT(_) (((_) >> 17) & 0x01)
/**
* @brief Processor Serial Number
*
* [Bit 18] The processor supports the 96-bit processor identification
* number feature and the feature is enabled.
*/
UINT32 ProcessorSerialNumber : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PROCESSOR_SERIAL_NUMBER_BIT 18
#define CPUID_FEATURE_INFORMATION_EDX_PROCESSOR_SERIAL_NUMBER_FLAG 0x40000
#define CPUID_FEATURE_INFORMATION_EDX_PROCESSOR_SERIAL_NUMBER_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PROCESSOR_SERIAL_NUMBER(_) (((_) >> 18) & 0x01)
/**
* @brief CLFLUSH Instruction
*
* [Bit 19] CLFLUSH Instruction is supported.
*/
UINT32 Clflush : 1;
#define CPUID_FEATURE_INFORMATION_EDX_CLFLUSH_BIT 19
#define CPUID_FEATURE_INFORMATION_EDX_CLFLUSH_FLAG 0x80000
#define CPUID_FEATURE_INFORMATION_EDX_CLFLUSH_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_CLFLUSH(_) (((_) >> 19) & 0x01)
UINT32 Reserved2 : 1;
/**
* @brief Debug Store
*
* [Bit 21] The processor supports the ability to write debug information
* into a memory resident buffer. This feature is used by the branch trace
* store (BTS) and processor event-based sampling (PEBS) facilities.
*
* @see Vol3C[23(INTRODUCTION TO VIRTUAL MACHINE EXTENSIONS)]
*/
UINT32 DebugStore : 1;
#define CPUID_FEATURE_INFORMATION_EDX_DEBUG_STORE_BIT 21
#define CPUID_FEATURE_INFORMATION_EDX_DEBUG_STORE_FLAG 0x200000
#define CPUID_FEATURE_INFORMATION_EDX_DEBUG_STORE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_DEBUG_STORE(_) (((_) >> 21) & 0x01)
/**
* @brief Thermal Monitor and Software Controlled Clock Facilities
*
* [Bit 22] The processor implements internal MSRs that allow processor
* temperature to be monitored and processor performance to be modulated in
* predefined duty cycles under software control.
*/
UINT32 ThermalControlMsrsForAcpi : 1;
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_CONTROL_MSRS_FOR_ACPI_BIT 22
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_CONTROL_MSRS_FOR_ACPI_FLAG 0x400000
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_CONTROL_MSRS_FOR_ACPI_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_CONTROL_MSRS_FOR_ACPI(_) (((_) >> 22) & 0x01)
/**
* @brief Intel MMX Technology
*
* [Bit 23] The processor supports the Intel MMX technology.
*/
UINT32 MmxSupport : 1;
#define CPUID_FEATURE_INFORMATION_EDX_MMX_SUPPORT_BIT 23
#define CPUID_FEATURE_INFORMATION_EDX_MMX_SUPPORT_FLAG 0x800000
#define CPUID_FEATURE_INFORMATION_EDX_MMX_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_MMX_SUPPORT(_) (((_) >> 23) & 0x01)
/**
* @brief FXSAVE and FXRSTOR Instructions
*
* [Bit 24] The FXSAVE and FXRSTOR instructions are supported for fast save
* and restore of the floating point context. Presence of this bit also
* indicates that CR4.OSFXSR is available for an operating system to
* indicate that it supports the FXSAVE and FXRSTOR instructions.
*/
UINT32 FxsaveFxrstorInstructions : 1;
#define CPUID_FEATURE_INFORMATION_EDX_FXSAVE_FXRSTOR_INSTRUCTIONS_BIT 24
#define CPUID_FEATURE_INFORMATION_EDX_FXSAVE_FXRSTOR_INSTRUCTIONS_FLAG 0x1000000
#define CPUID_FEATURE_INFORMATION_EDX_FXSAVE_FXRSTOR_INSTRUCTIONS_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_FXSAVE_FXRSTOR_INSTRUCTIONS(_) (((_) >> 24) & 0x01)
/**
* @brief SSE extensions support
*
* [Bit 25] The processor supports the SSE extensions.
*/
UINT32 SseSupport : 1;
#define CPUID_FEATURE_INFORMATION_EDX_SSE_SUPPORT_BIT 25
#define CPUID_FEATURE_INFORMATION_EDX_SSE_SUPPORT_FLAG 0x2000000
#define CPUID_FEATURE_INFORMATION_EDX_SSE_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_SSE_SUPPORT(_) (((_) >> 25) & 0x01)
/**
* @brief SSE2 extensions support
*
* [Bit 26] The processor supports the SSE2 extensions.
*/
UINT32 Sse2Support : 1;
#define CPUID_FEATURE_INFORMATION_EDX_SSE2_SUPPORT_BIT 26
#define CPUID_FEATURE_INFORMATION_EDX_SSE2_SUPPORT_FLAG 0x4000000
#define CPUID_FEATURE_INFORMATION_EDX_SSE2_SUPPORT_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_SSE2_SUPPORT(_) (((_) >> 26) & 0x01)
/**
* @brief Self Snoop
*
* [Bit 27] The processor supports the management of conflicting memory
* types by performing a snoop of its own cache structure for transactions
* issued to the bus.
*/
UINT32 SelfSnoop : 1;
#define CPUID_FEATURE_INFORMATION_EDX_SELF_SNOOP_BIT 27
#define CPUID_FEATURE_INFORMATION_EDX_SELF_SNOOP_FLAG 0x8000000
#define CPUID_FEATURE_INFORMATION_EDX_SELF_SNOOP_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_SELF_SNOOP(_) (((_) >> 27) & 0x01)
/**
* @brief Max APIC IDs reserved field is Valid
*
* [Bit 28] A value of 0 for HTT indicates there is only a single logical
* processor in the package and software should assume only a single APIC ID
* is reserved. A value of 1 for HTT indicates the value in
* CPUID.1.EBX[23:16] (the Maximum number of addressable IDs for logical
* processors in this package) is valid for the package.
*/
UINT32 HyperThreadingTechnology : 1;
#define CPUID_FEATURE_INFORMATION_EDX_HYPER_THREADING_TECHNOLOGY_BIT 28
#define CPUID_FEATURE_INFORMATION_EDX_HYPER_THREADING_TECHNOLOGY_FLAG 0x10000000
#define CPUID_FEATURE_INFORMATION_EDX_HYPER_THREADING_TECHNOLOGY_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_HYPER_THREADING_TECHNOLOGY(_) (((_) >> 28) & 0x01)
/**
* @brief Thermal Monitor
*
* [Bit 29] The processor implements the thermal monitor automatic thermal
* control circuitry (TCC).
*/
UINT32 ThermalMonitor : 1;
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_MONITOR_BIT 29
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_MONITOR_FLAG 0x20000000
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_MONITOR_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_THERMAL_MONITOR(_) (((_) >> 29) & 0x01)
UINT32 Reserved3 : 1;
/**
* @brief Pending Break Enable
*
* [Bit 31] The processor supports the use of the FERR\#/PBE\# pin when the
* processor is in the stop-clock state (STPCLK\# is asserted) to signal the
* processor that an interrupt is pending and that the processor should
* return to normal operation to handle the interrupt. Bit 10 (PBE enable)
* in the IA32_MISC_ENABLE MSR enables this capability.
*/
UINT32 PendingBreakEnable : 1;
#define CPUID_FEATURE_INFORMATION_EDX_PENDING_BREAK_ENABLE_BIT 31
#define CPUID_FEATURE_INFORMATION_EDX_PENDING_BREAK_ENABLE_FLAG 0x80000000
#define CPUID_FEATURE_INFORMATION_EDX_PENDING_BREAK_ENABLE_MASK 0x01
#define CPUID_FEATURE_INFORMATION_EDX_PENDING_BREAK_ENABLE(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} CpuidFeatureInformationEdx;
} CPUID_EAX_01;
/**
* @brief Deterministic Cache Parameters Leaf
*
* When CPUID executes with EAX set to 04H and ECX contains an index value, the processor returns
* encoded data that describe a set of deterministic cache parameters (for the cache level
* associated with the input in ECX). Valid index values start from 0. Software can enumerate the
* deterministic cache parameters for each level of the cache hierarchy starting with an index value
* of 0, until the parameters report the value associated with the cache type field is 0. The
* architecturally defined fields reported by deterministic cache parameters are documented in Table
* 3-8. This Cache Size in Bytes
* - = (Ways + 1) * (Partitions + 1) * (Line_Size + 1) * (Sets + 1)
* - = (EBX[31:22] + 1) * (EBX[21:12] + 1) * (EBX[11:0] + 1) * (ECX + 1)
* The CPUID leaf 04H also reports data that can be used to derive the topology of processor cores
* in a physical package. This information is constant for all valid index values. Software can
* query the raw data reported by executing CPUID with EAX=04H and ECX=0 and use it as part of the
* topology enumeration algorithm.
*
* @see Vol3A[8(Multiple-Processor Management)]
*/
#define CPUID_CACHE_PARAMETERS 0x00000004
typedef struct
{
union
{
struct
{
/**
* [Bits 4:0] - 0 = Null - No more caches.
* - 1 = Data Cache.
* - 2 = Instruction Cache.
* - 3 = Unified Cache.
* - 4-31 = Reserved.
*/
UINT32 CacheTypeField : 5;
#define CPUID_EAX_CACHE_TYPE_FIELD_BIT 0
#define CPUID_EAX_CACHE_TYPE_FIELD_FLAG 0x1F
#define CPUID_EAX_CACHE_TYPE_FIELD_MASK 0x1F
#define CPUID_EAX_CACHE_TYPE_FIELD(_) (((_) >> 0) & 0x1F)
/**
* [Bits 7:5] Cache Level (starts at 1).
*/
UINT32 CacheLevel : 3;
#define CPUID_EAX_CACHE_LEVEL_BIT 5
#define CPUID_EAX_CACHE_LEVEL_FLAG 0xE0
#define CPUID_EAX_CACHE_LEVEL_MASK 0x07
#define CPUID_EAX_CACHE_LEVEL(_) (((_) >> 5) & 0x07)
/**
* [Bit 8] Self Initializing cache level (does not need SW initialization).
*/
UINT32 SelfInitializingCacheLevel : 1;
#define CPUID_EAX_SELF_INITIALIZING_CACHE_LEVEL_BIT 8
#define CPUID_EAX_SELF_INITIALIZING_CACHE_LEVEL_FLAG 0x100
#define CPUID_EAX_SELF_INITIALIZING_CACHE_LEVEL_MASK 0x01
#define CPUID_EAX_SELF_INITIALIZING_CACHE_LEVEL(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Fully Associative cache.
*/
UINT32 FullyAssociativeCache : 1;
#define CPUID_EAX_FULLY_ASSOCIATIVE_CACHE_BIT 9
#define CPUID_EAX_FULLY_ASSOCIATIVE_CACHE_FLAG 0x200
#define CPUID_EAX_FULLY_ASSOCIATIVE_CACHE_MASK 0x01
#define CPUID_EAX_FULLY_ASSOCIATIVE_CACHE(_) (((_) >> 9) & 0x01)
UINT32 Reserved1 : 4;
/**
* [Bits 25:14] Maximum number of addressable IDs for logical processors
* sharing this cache.
*
* @note Add one to the return value to get the result.
* The nearest power-of-2 integer that is not smaller than (1 +
* EAX[25:14]) is the number of unique initial APIC IDs reserved for
* addressing different logical processors sharing this cache.
*/
UINT32 MaxAddressableIdsForLogicalProcessorsSharingThisCache : 12;
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_SHARING_THIS_CACHE_BIT 14
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_SHARING_THIS_CACHE_FLAG 0x3FFC000
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_SHARING_THIS_CACHE_MASK 0xFFF
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_SHARING_THIS_CACHE(_) \
(((_) >> 14) & 0xFFF)
/**
* [Bits 31:26] Maximum number of addressable IDs for processor cores in the
* physical package.
*
* @note Add one to the return value to get the result.
* The nearest power-of-2 integer that is not smaller than (1 +
* EAX[31:26]) is the number of unique Core_IDs reserved for addressing
* different processor cores in a physical package. Core ID is a subset of
* bits of the initial APIC ID. The returned value is constant for valid
* initial values in ECX. Valid ECX values start from 0.
*/
UINT32 MaxAddressableIdsForProcessorCoresInPhysicalPackage : 6;
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_PROCESSOR_CORES_IN_PHYSICAL_PACKAGE_BIT 26
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_PROCESSOR_CORES_IN_PHYSICAL_PACKAGE_FLAG 0xFC000000
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_PROCESSOR_CORES_IN_PHYSICAL_PACKAGE_MASK 0x3F
#define CPUID_EAX_MAX_ADDRESSABLE_IDS_FOR_PROCESSOR_CORES_IN_PHYSICAL_PACKAGE(_) \
(((_) >> 26) & 0x3F)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 11:0] System Coherency Line Size.
*
* @note Add one to the return value to get the result.
*/
UINT32 SystemCoherencyLineSize : 12;
#define CPUID_EBX_SYSTEM_COHERENCY_LINE_SIZE_BIT 0
#define CPUID_EBX_SYSTEM_COHERENCY_LINE_SIZE_FLAG 0xFFF
#define CPUID_EBX_SYSTEM_COHERENCY_LINE_SIZE_MASK 0xFFF
#define CPUID_EBX_SYSTEM_COHERENCY_LINE_SIZE(_) (((_) >> 0) & 0xFFF)
/**
* [Bits 21:12] Physical Line partitions.
*
* @note Add one to the return value to get the result.
*/
UINT32 PhysicalLinePartitions : 10;
#define CPUID_EBX_PHYSICAL_LINE_PARTITIONS_BIT 12
#define CPUID_EBX_PHYSICAL_LINE_PARTITIONS_FLAG 0x3FF000
#define CPUID_EBX_PHYSICAL_LINE_PARTITIONS_MASK 0x3FF
#define CPUID_EBX_PHYSICAL_LINE_PARTITIONS(_) (((_) >> 12) & 0x3FF)
/**
* [Bits 31:22] Ways of associativity.
*
* @note Add one to the return value to get the result.
*/
UINT32 WaysOfAssociativity : 10;
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_BIT 22
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_FLAG 0xFFC00000
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_MASK 0x3FF
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY(_) (((_) >> 22) & 0x3FF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Number of Sets.
*
* @note Add one to the return value to get the result.
*/
UINT32 NumberOfSets : 32;
#define CPUID_ECX_NUMBER_OF_SETS_BIT 0
#define CPUID_ECX_NUMBER_OF_SETS_FLAG 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS_MASK 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* @brief Write-Back Invalidate/Invalidate
*
* [Bit 0] - 0 = WBINVD/INVD from threads sharing this cache acts upon lower
* level caches for threads sharing this cache.
* - 1 = WBINVD/INVD is not guaranteed to act upon lower level caches of
* non-originating threads sharing this cache.
*/
UINT32 WriteBackInvalidate : 1;
#define CPUID_EDX_WRITE_BACK_INVALIDATE_BIT 0
#define CPUID_EDX_WRITE_BACK_INVALIDATE_FLAG 0x01
#define CPUID_EDX_WRITE_BACK_INVALIDATE_MASK 0x01
#define CPUID_EDX_WRITE_BACK_INVALIDATE(_) (((_) >> 0) & 0x01)
/**
* @brief Cache Inclusiveness
*
* [Bit 1] - 0 = Cache is not inclusive of lower cache levels.
* - 1 = Cache is inclusive of lower cache levels.
*/
UINT32 CacheInclusiveness : 1;
#define CPUID_EDX_CACHE_INCLUSIVENESS_BIT 1
#define CPUID_EDX_CACHE_INCLUSIVENESS_FLAG 0x02
#define CPUID_EDX_CACHE_INCLUSIVENESS_MASK 0x01
#define CPUID_EDX_CACHE_INCLUSIVENESS(_) (((_) >> 1) & 0x01)
/**
* @brief Complex Cache Indexing
*
* [Bit 2] - 0 = Direct mapped cache.
* - 1 = A complex function is used to index the cache, potentially using
* all address bits.
*/
UINT32 ComplexCacheIndexing : 1;
#define CPUID_EDX_COMPLEX_CACHE_INDEXING_BIT 2
#define CPUID_EDX_COMPLEX_CACHE_INDEXING_FLAG 0x04
#define CPUID_EDX_COMPLEX_CACHE_INDEXING_MASK 0x01
#define CPUID_EDX_COMPLEX_CACHE_INDEXING(_) (((_) >> 2) & 0x01)
UINT32 Reserved1 : 29;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_04;
/**
* @brief MONITOR/MWAIT Leaf
*
* When CPUID executes with EAX set to 05H, the processor returns information about features
* available to MONITOR/MWAIT instructions. The MONITOR instruction is used for address-range
* monitoring in conjunction with MWAIT instruction. The MWAIT instruction optionally provides
* additional extensions for advanced power management.
*/
#define CPUID_MONITOR_MWAIT 0x00000005
typedef struct
{
union
{
struct
{
/**
* [Bits 15:0] Smallest monitor-line size in bytes (default is processor's
* monitor granularity).
*/
UINT32 SmallestMonitorLineSize : 16;
#define CPUID_EAX_SMALLEST_MONITOR_LINE_SIZE_BIT 0
#define CPUID_EAX_SMALLEST_MONITOR_LINE_SIZE_FLAG 0xFFFF
#define CPUID_EAX_SMALLEST_MONITOR_LINE_SIZE_MASK 0xFFFF
#define CPUID_EAX_SMALLEST_MONITOR_LINE_SIZE(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 15:0] Largest monitor-line size in bytes (default is processor's
* monitor granularity).
*/
UINT32 LargestMonitorLineSize : 16;
#define CPUID_EBX_LARGEST_MONITOR_LINE_SIZE_BIT 0
#define CPUID_EBX_LARGEST_MONITOR_LINE_SIZE_FLAG 0xFFFF
#define CPUID_EBX_LARGEST_MONITOR_LINE_SIZE_MASK 0xFFFF
#define CPUID_EBX_LARGEST_MONITOR_LINE_SIZE(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] Enumeration of Monitor-Mwait extensions (beyond EAX and EBX
* registers) supported.
*/
UINT32 EnumerationOfMonitorMwaitExtensions : 1;
#define CPUID_ECX_ENUMERATION_OF_MONITOR_MWAIT_EXTENSIONS_BIT 0
#define CPUID_ECX_ENUMERATION_OF_MONITOR_MWAIT_EXTENSIONS_FLAG 0x01
#define CPUID_ECX_ENUMERATION_OF_MONITOR_MWAIT_EXTENSIONS_MASK 0x01
#define CPUID_ECX_ENUMERATION_OF_MONITOR_MWAIT_EXTENSIONS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Supports treating interrupts as break-event for MWAIT, even when
* interrupts disabled.
*/
UINT32 SupportsTreatingInterruptsAsBreakEventForMwait : 1;
#define CPUID_ECX_SUPPORTS_TREATING_INTERRUPTS_AS_BREAK_EVENT_FOR_MWAIT_BIT 1
#define CPUID_ECX_SUPPORTS_TREATING_INTERRUPTS_AS_BREAK_EVENT_FOR_MWAIT_FLAG 0x02
#define CPUID_ECX_SUPPORTS_TREATING_INTERRUPTS_AS_BREAK_EVENT_FOR_MWAIT_MASK 0x01
#define CPUID_ECX_SUPPORTS_TREATING_INTERRUPTS_AS_BREAK_EVENT_FOR_MWAIT(_) (((_) >> 1) & 0x01)
UINT32 Reserved1 : 30;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 3:0] Number of C0 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC0SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C0_SUB_C_STATES_BIT 0
#define CPUID_EDX_NUMBER_OF_C0_SUB_C_STATES_FLAG 0x0F
#define CPUID_EDX_NUMBER_OF_C0_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C0_SUB_C_STATES(_) (((_) >> 0) & 0x0F)
/**
* [Bits 7:4] Number of C1 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC1SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C1_SUB_C_STATES_BIT 4
#define CPUID_EDX_NUMBER_OF_C1_SUB_C_STATES_FLAG 0xF0
#define CPUID_EDX_NUMBER_OF_C1_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C1_SUB_C_STATES(_) (((_) >> 4) & 0x0F)
/**
* [Bits 11:8] Number of C2 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC2SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C2_SUB_C_STATES_BIT 8
#define CPUID_EDX_NUMBER_OF_C2_SUB_C_STATES_FLAG 0xF00
#define CPUID_EDX_NUMBER_OF_C2_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C2_SUB_C_STATES(_) (((_) >> 8) & 0x0F)
/**
* [Bits 15:12] Number of C3 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC3SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C3_SUB_C_STATES_BIT 12
#define CPUID_EDX_NUMBER_OF_C3_SUB_C_STATES_FLAG 0xF000
#define CPUID_EDX_NUMBER_OF_C3_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C3_SUB_C_STATES(_) (((_) >> 12) & 0x0F)
/**
* [Bits 19:16] Number of C4 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC4SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C4_SUB_C_STATES_BIT 16
#define CPUID_EDX_NUMBER_OF_C4_SUB_C_STATES_FLAG 0xF0000
#define CPUID_EDX_NUMBER_OF_C4_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C4_SUB_C_STATES(_) (((_) >> 16) & 0x0F)
/**
* [Bits 23:20] Number of C5 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC5SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C5_SUB_C_STATES_BIT 20
#define CPUID_EDX_NUMBER_OF_C5_SUB_C_STATES_FLAG 0xF00000
#define CPUID_EDX_NUMBER_OF_C5_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C5_SUB_C_STATES(_) (((_) >> 20) & 0x0F)
/**
* [Bits 27:24] Number of C6 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC6SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C6_SUB_C_STATES_BIT 24
#define CPUID_EDX_NUMBER_OF_C6_SUB_C_STATES_FLAG 0xF000000
#define CPUID_EDX_NUMBER_OF_C6_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C6_SUB_C_STATES(_) (((_) >> 24) & 0x0F)
/**
* [Bits 31:28] Number of C7 sub C-states supported using MWAIT.
*/
UINT32 NumberOfC7SubCStates : 4;
#define CPUID_EDX_NUMBER_OF_C7_SUB_C_STATES_BIT 28
#define CPUID_EDX_NUMBER_OF_C7_SUB_C_STATES_FLAG 0xF0000000
#define CPUID_EDX_NUMBER_OF_C7_SUB_C_STATES_MASK 0x0F
#define CPUID_EDX_NUMBER_OF_C7_SUB_C_STATES(_) (((_) >> 28) & 0x0F)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_05;
/**
* @brief Thermal and Power Management Leaf
*
* When CPUID executes with EAX set to 06H, the processor returns information about thermal and
* power management features.
*/
#define CPUID_THERMAL_AND_POWER_MANAGEMENT 0x00000006
typedef struct
{
union
{
struct
{
/**
* [Bit 0] Digital temperature sensor is supported if set.
*/
UINT32 TemperatureSensorSupported : 1;
#define CPUID_EAX_TEMPERATURE_SENSOR_SUPPORTED_BIT 0
#define CPUID_EAX_TEMPERATURE_SENSOR_SUPPORTED_FLAG 0x01
#define CPUID_EAX_TEMPERATURE_SENSOR_SUPPORTED_MASK 0x01
#define CPUID_EAX_TEMPERATURE_SENSOR_SUPPORTED(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Intel Turbo Boost Technology available (see description of
* IA32_MISC_ENABLE[38]).
*/
UINT32 IntelTurboBoostTechnologyAvailable : 1;
#define CPUID_EAX_INTEL_TURBO_BOOST_TECHNOLOGY_AVAILABLE_BIT 1
#define CPUID_EAX_INTEL_TURBO_BOOST_TECHNOLOGY_AVAILABLE_FLAG 0x02
#define CPUID_EAX_INTEL_TURBO_BOOST_TECHNOLOGY_AVAILABLE_MASK 0x01
#define CPUID_EAX_INTEL_TURBO_BOOST_TECHNOLOGY_AVAILABLE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] ARAT. APIC-Timer-always-running feature is supported if set.
*/
UINT32 ApicTimerAlwaysRunning : 1;
#define CPUID_EAX_APIC_TIMER_ALWAYS_RUNNING_BIT 2
#define CPUID_EAX_APIC_TIMER_ALWAYS_RUNNING_FLAG 0x04
#define CPUID_EAX_APIC_TIMER_ALWAYS_RUNNING_MASK 0x01
#define CPUID_EAX_APIC_TIMER_ALWAYS_RUNNING(_) (((_) >> 2) & 0x01)
UINT32 Reserved1 : 1;
/**
* [Bit 4] PLN. Power limit notification controls are supported if set.
*/
UINT32 PowerLimitNotification : 1;
#define CPUID_EAX_POWER_LIMIT_NOTIFICATION_BIT 4
#define CPUID_EAX_POWER_LIMIT_NOTIFICATION_FLAG 0x10
#define CPUID_EAX_POWER_LIMIT_NOTIFICATION_MASK 0x01
#define CPUID_EAX_POWER_LIMIT_NOTIFICATION(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] ECMD. Clock modulation duty cycle extension is supported if set.
*/
UINT32 ClockModulationDuty : 1;
#define CPUID_EAX_CLOCK_MODULATION_DUTY_BIT 5
#define CPUID_EAX_CLOCK_MODULATION_DUTY_FLAG 0x20
#define CPUID_EAX_CLOCK_MODULATION_DUTY_MASK 0x01
#define CPUID_EAX_CLOCK_MODULATION_DUTY(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] PTM. Package thermal management is supported if set.
*/
UINT32 PackageThermalManagement : 1;
#define CPUID_EAX_PACKAGE_THERMAL_MANAGEMENT_BIT 6
#define CPUID_EAX_PACKAGE_THERMAL_MANAGEMENT_FLAG 0x40
#define CPUID_EAX_PACKAGE_THERMAL_MANAGEMENT_MASK 0x01
#define CPUID_EAX_PACKAGE_THERMAL_MANAGEMENT(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] HWP. HWP base registers (IA32_PM_ENABLE[bit 0],
* IA32_HWP_CAPABILITIES, IA32_HWP_REQUEST, IA32_HWP_STATUS) are supported
* if set.
*/
UINT32 HwpBaseRegisters : 1;
#define CPUID_EAX_HWP_BASE_REGISTERS_BIT 7
#define CPUID_EAX_HWP_BASE_REGISTERS_FLAG 0x80
#define CPUID_EAX_HWP_BASE_REGISTERS_MASK 0x01
#define CPUID_EAX_HWP_BASE_REGISTERS(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] HWP_Notification. IA32_HWP_INTERRUPT MSR is supported if set.
*/
UINT32 HwpNotification : 1;
#define CPUID_EAX_HWP_NOTIFICATION_BIT 8
#define CPUID_EAX_HWP_NOTIFICATION_FLAG 0x100
#define CPUID_EAX_HWP_NOTIFICATION_MASK 0x01
#define CPUID_EAX_HWP_NOTIFICATION(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] HWP_Activity_Window. IA32_HWP_REQUEST[bits 41:32] is supported if
* set.
*/
UINT32 HwpActivityWindow : 1;
#define CPUID_EAX_HWP_ACTIVITY_WINDOW_BIT 9
#define CPUID_EAX_HWP_ACTIVITY_WINDOW_FLAG 0x200
#define CPUID_EAX_HWP_ACTIVITY_WINDOW_MASK 0x01
#define CPUID_EAX_HWP_ACTIVITY_WINDOW(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] HWP_Energy_Performance_Preference. IA32_HWP_REQUEST[bits 31:24]
* is supported if set.
*/
UINT32 HwpEnergyPerformancePreference : 1;
#define CPUID_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE_BIT 10
#define CPUID_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE_FLAG 0x400
#define CPUID_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE_MASK 0x01
#define CPUID_EAX_HWP_ENERGY_PERFORMANCE_PREFERENCE(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] HWP_Package_Level_Request. IA32_HWP_REQUEST_PKG MSR is supported
* if set.
*/
UINT32 HwpPackageLevelRequest : 1;
#define CPUID_EAX_HWP_PACKAGE_LEVEL_REQUEST_BIT 11
#define CPUID_EAX_HWP_PACKAGE_LEVEL_REQUEST_FLAG 0x800
#define CPUID_EAX_HWP_PACKAGE_LEVEL_REQUEST_MASK 0x01
#define CPUID_EAX_HWP_PACKAGE_LEVEL_REQUEST(_) (((_) >> 11) & 0x01)
UINT32 Reserved2 : 1;
/**
* [Bit 13] HDC. HDC base registers IA32_PKG_HDC_CTL, IA32_PM_CTL1,
* IA32_THREAD_STALL MSRs are supported if set.
*/
UINT32 Hdc : 1;
#define CPUID_EAX_HDC_BIT 13
#define CPUID_EAX_HDC_FLAG 0x2000
#define CPUID_EAX_HDC_MASK 0x01
#define CPUID_EAX_HDC(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] Intel(R) Turbo Boost Max Technology 3.0 available.
*/
UINT32 IntelTurboBoostMaxTechnology3Available : 1;
#define CPUID_EAX_INTEL_TURBO_BOOST_MAX_TECHNOLOGY_3_AVAILABLE_BIT 14
#define CPUID_EAX_INTEL_TURBO_BOOST_MAX_TECHNOLOGY_3_AVAILABLE_FLAG 0x4000
#define CPUID_EAX_INTEL_TURBO_BOOST_MAX_TECHNOLOGY_3_AVAILABLE_MASK 0x01
#define CPUID_EAX_INTEL_TURBO_BOOST_MAX_TECHNOLOGY_3_AVAILABLE(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] HWP Capabilities. Highest Performance change is supported if
* set.
*/
UINT32 HwpCapabilities : 1;
#define CPUID_EAX_HWP_CAPABILITIES_BIT 15
#define CPUID_EAX_HWP_CAPABILITIES_FLAG 0x8000
#define CPUID_EAX_HWP_CAPABILITIES_MASK 0x01
#define CPUID_EAX_HWP_CAPABILITIES(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] HWP PECI override is supported if set.
*/
UINT32 HwpPeciOverride : 1;
#define CPUID_EAX_HWP_PECI_OVERRIDE_BIT 16
#define CPUID_EAX_HWP_PECI_OVERRIDE_FLAG 0x10000
#define CPUID_EAX_HWP_PECI_OVERRIDE_MASK 0x01
#define CPUID_EAX_HWP_PECI_OVERRIDE(_) (((_) >> 16) & 0x01)
/**
* [Bit 17] Flexible HWP is supported if set.
*/
UINT32 FlexibleHwp : 1;
#define CPUID_EAX_FLEXIBLE_HWP_BIT 17
#define CPUID_EAX_FLEXIBLE_HWP_FLAG 0x20000
#define CPUID_EAX_FLEXIBLE_HWP_MASK 0x01
#define CPUID_EAX_FLEXIBLE_HWP(_) (((_) >> 17) & 0x01)
/**
* [Bit 18] Fast access mode for the IA32_HWP_REQUEST MSR is supported if
* set.
*/
UINT32 FastAccessModeForHwpRequestMsr : 1;
#define CPUID_EAX_FAST_ACCESS_MODE_FOR_HWP_REQUEST_MSR_BIT 18
#define CPUID_EAX_FAST_ACCESS_MODE_FOR_HWP_REQUEST_MSR_FLAG 0x40000
#define CPUID_EAX_FAST_ACCESS_MODE_FOR_HWP_REQUEST_MSR_MASK 0x01
#define CPUID_EAX_FAST_ACCESS_MODE_FOR_HWP_REQUEST_MSR(_) (((_) >> 18) & 0x01)
UINT32 Reserved3 : 1;
/**
* [Bit 20] Ignoring Idle Logical Processor HWP request is supported if set.
*/
UINT32 IgnoringIdleLogicalProcessorHwpRequest : 1;
#define CPUID_EAX_IGNORING_IDLE_LOGICAL_PROCESSOR_HWP_REQUEST_BIT 20
#define CPUID_EAX_IGNORING_IDLE_LOGICAL_PROCESSOR_HWP_REQUEST_FLAG 0x100000
#define CPUID_EAX_IGNORING_IDLE_LOGICAL_PROCESSOR_HWP_REQUEST_MASK 0x01
#define CPUID_EAX_IGNORING_IDLE_LOGICAL_PROCESSOR_HWP_REQUEST(_) (((_) >> 20) & 0x01)
UINT32 Reserved4 : 2;
/**
* [Bit 23] Intel Thread Director supported if set. IA32_HW_FEEDBACK_CHAR
* and IA32_HW_FEEDBACK_THREAD_CONFIG MSRs are supported if set.
*/
UINT32 IntelThreadDirector : 1;
#define CPUID_EAX_INTEL_THREAD_DIRECTOR_BIT 23
#define CPUID_EAX_INTEL_THREAD_DIRECTOR_FLAG 0x800000
#define CPUID_EAX_INTEL_THREAD_DIRECTOR_MASK 0x01
#define CPUID_EAX_INTEL_THREAD_DIRECTOR(_) (((_) >> 23) & 0x01)
UINT32 Reserved5 : 8;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 3:0] Number of Interrupt Thresholds in Digital Thermal Sensor.
*/
UINT32 NumberOfInterruptThresholdsInThermalSensor : 4;
#define CPUID_EBX_NUMBER_OF_INTERRUPT_THRESHOLDS_IN_THERMAL_SENSOR_BIT 0
#define CPUID_EBX_NUMBER_OF_INTERRUPT_THRESHOLDS_IN_THERMAL_SENSOR_FLAG 0x0F
#define CPUID_EBX_NUMBER_OF_INTERRUPT_THRESHOLDS_IN_THERMAL_SENSOR_MASK 0x0F
#define CPUID_EBX_NUMBER_OF_INTERRUPT_THRESHOLDS_IN_THERMAL_SENSOR(_) (((_) >> 0) & 0x0F)
UINT32 Reserved1 : 28;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] Hardware Coordination Feedback Capability (Presence of IA32_MPERF
* and IA32_APERF). The capability to provide a measure of delivered
* processor performance (since last reset of the counters), as a percentage
* of the expected processor performance when running at the TSC frequency.
*/
UINT32 HardwareCoordinationFeedbackCapability : 1;
#define CPUID_ECX_HARDWARE_COORDINATION_FEEDBACK_CAPABILITY_BIT 0
#define CPUID_ECX_HARDWARE_COORDINATION_FEEDBACK_CAPABILITY_FLAG 0x01
#define CPUID_ECX_HARDWARE_COORDINATION_FEEDBACK_CAPABILITY_MASK 0x01
#define CPUID_ECX_HARDWARE_COORDINATION_FEEDBACK_CAPABILITY(_) (((_) >> 0) & 0x01)
UINT32 Reserved1 : 2;
/**
* [Bit 3] Number of Intel Thread Director classes supported by the
* processor. Information for that many classes is written into the Intel
* Thread Director Table by the hardware.
*/
UINT32 NumberOfIntelThreadDirectorClasses : 1;
#define CPUID_ECX_NUMBER_OF_INTEL_THREAD_DIRECTOR_CLASSES_BIT 3
#define CPUID_ECX_NUMBER_OF_INTEL_THREAD_DIRECTOR_CLASSES_FLAG 0x08
#define CPUID_ECX_NUMBER_OF_INTEL_THREAD_DIRECTOR_CLASSES_MASK 0x01
#define CPUID_ECX_NUMBER_OF_INTEL_THREAD_DIRECTOR_CLASSES(_) (((_) >> 3) & 0x01)
UINT32 Reserved2 : 4;
/**
* [Bits 15:8] The processor supports performance-energy bias preference if
* CPUID.06H:ECX.SETBH[bit 3] is set and it also implies the presence of a
* new architectural MSR called IA32_ENERGY_PERF_BIAS (1B0H).
*/
UINT32 PerformanceEnergyBiasPreference : 8;
#define CPUID_ECX_PERFORMANCE_ENERGY_BIAS_PREFERENCE_BIT 8
#define CPUID_ECX_PERFORMANCE_ENERGY_BIAS_PREFERENCE_FLAG 0xFF00
#define CPUID_ECX_PERFORMANCE_ENERGY_BIAS_PREFERENCE_MASK 0xFF
#define CPUID_ECX_PERFORMANCE_ENERGY_BIAS_PREFERENCE(_) (((_) >> 8) & 0xFF)
UINT32 Reserved3 : 16;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_06;
/**
* @brief Structured Extended Feature Flags Enumeration Leaf (Output depends on ECX input value)
*
* When CPUID executes with EAX set to 07H and ECX = 0, the processor returns information about the
* maximum input value for sub-leaves that contain extended feature flags. When CPUID executes with
* EAX set to 07H and the input value of ECX is invalid (see leaf 07H entry in Table 3-8), the
* processor returns 0 in EAX/EBX/ECX/EDX. In subleaf 0, EAX returns the maximum input value of the
* highest leaf 7 sub-leaf, and EBX, ECX & EDX contain information of extended feature flags.
*/
#define CPUID_STRUCTURED_EXTENDED_FEATURE_FLAGS 0x00000007
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reports the maximum input value for supported leaf 7
* sub-leaves.
*/
UINT32 NumberOfSubLeaves : 32;
#define CPUID_EAX_NUMBER_OF_SUB_LEAVES_BIT 0
#define CPUID_EAX_NUMBER_OF_SUB_LEAVES_FLAG 0xFFFFFFFF
#define CPUID_EAX_NUMBER_OF_SUB_LEAVES_MASK 0xFFFFFFFF
#define CPUID_EAX_NUMBER_OF_SUB_LEAVES(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bit 0] Supports RDFSBASE/RDGSBASE/WRFSBASE/WRGSBASE if 1.
*/
UINT32 Fsgsbase : 1;
#define CPUID_EBX_FSGSBASE_BIT 0
#define CPUID_EBX_FSGSBASE_FLAG 0x01
#define CPUID_EBX_FSGSBASE_MASK 0x01
#define CPUID_EBX_FSGSBASE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] IA32_TSC_ADJUST MSR is supported if 1.
*/
UINT32 Ia32TscAdjustMsr : 1;
#define CPUID_EBX_IA32_TSC_ADJUST_MSR_BIT 1
#define CPUID_EBX_IA32_TSC_ADJUST_MSR_FLAG 0x02
#define CPUID_EBX_IA32_TSC_ADJUST_MSR_MASK 0x01
#define CPUID_EBX_IA32_TSC_ADJUST_MSR(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Supports Intel(R) Software Guard Extensions (Intel(R) SGX
* Extensions) if 1.
*/
UINT32 Sgx : 1;
#define CPUID_EBX_SGX_BIT 2
#define CPUID_EBX_SGX_FLAG 0x04
#define CPUID_EBX_SGX_MASK 0x01
#define CPUID_EBX_SGX(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] BMI1.
*/
UINT32 Bmi1 : 1;
#define CPUID_EBX_BMI1_BIT 3
#define CPUID_EBX_BMI1_FLAG 0x08
#define CPUID_EBX_BMI1_MASK 0x01
#define CPUID_EBX_BMI1(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] HLE.
*/
UINT32 Hle : 1;
#define CPUID_EBX_HLE_BIT 4
#define CPUID_EBX_HLE_FLAG 0x10
#define CPUID_EBX_HLE_MASK 0x01
#define CPUID_EBX_HLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] AVX2.
*/
UINT32 Avx2 : 1;
#define CPUID_EBX_AVX2_BIT 5
#define CPUID_EBX_AVX2_FLAG 0x20
#define CPUID_EBX_AVX2_MASK 0x01
#define CPUID_EBX_AVX2(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] x87 FPU Data Pointer updated only on x87 exceptions if 1.
*/
UINT32 FdpExcptnOnly : 1;
#define CPUID_EBX_FDP_EXCPTN_ONLY_BIT 6
#define CPUID_EBX_FDP_EXCPTN_ONLY_FLAG 0x40
#define CPUID_EBX_FDP_EXCPTN_ONLY_MASK 0x01
#define CPUID_EBX_FDP_EXCPTN_ONLY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Supports Supervisor-Mode Execution Prevention if 1.
*/
UINT32 Smep : 1;
#define CPUID_EBX_SMEP_BIT 7
#define CPUID_EBX_SMEP_FLAG 0x80
#define CPUID_EBX_SMEP_MASK 0x01
#define CPUID_EBX_SMEP(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] BMI2.
*/
UINT32 Bmi2 : 1;
#define CPUID_EBX_BMI2_BIT 8
#define CPUID_EBX_BMI2_FLAG 0x100
#define CPUID_EBX_BMI2_MASK 0x01
#define CPUID_EBX_BMI2(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Supports Enhanced REP MOVSB/STOSB if 1.
*/
UINT32 EnhancedRepMovsbStosb : 1;
#define CPUID_EBX_ENHANCED_REP_MOVSB_STOSB_BIT 9
#define CPUID_EBX_ENHANCED_REP_MOVSB_STOSB_FLAG 0x200
#define CPUID_EBX_ENHANCED_REP_MOVSB_STOSB_MASK 0x01
#define CPUID_EBX_ENHANCED_REP_MOVSB_STOSB(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] If 1, supports INVPCID instruction for system software that
* manages process-context identifiers.
*/
UINT32 Invpcid : 1;
#define CPUID_EBX_INVPCID_BIT 10
#define CPUID_EBX_INVPCID_FLAG 0x400
#define CPUID_EBX_INVPCID_MASK 0x01
#define CPUID_EBX_INVPCID(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] RTM.
*/
UINT32 Rtm : 1;
#define CPUID_EBX_RTM_BIT 11
#define CPUID_EBX_RTM_FLAG 0x800
#define CPUID_EBX_RTM_MASK 0x01
#define CPUID_EBX_RTM(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] Supports Intel(R) Resource Director Technology (Intel(R) RDT)
* Monitoring capability if 1.
*/
UINT32 RdtM : 1;
#define CPUID_EBX_RDT_M_BIT 12
#define CPUID_EBX_RDT_M_FLAG 0x1000
#define CPUID_EBX_RDT_M_MASK 0x01
#define CPUID_EBX_RDT_M(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] Deprecates FPU CS and FPU DS values if 1.
*/
UINT32 Deprecates : 1;
#define CPUID_EBX_DEPRECATES_BIT 13
#define CPUID_EBX_DEPRECATES_FLAG 0x2000
#define CPUID_EBX_DEPRECATES_MASK 0x01
#define CPUID_EBX_DEPRECATES(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] Supports Intel(R) Memory Protection Extensions if 1.
*/
UINT32 Mpx : 1;
#define CPUID_EBX_MPX_BIT 14
#define CPUID_EBX_MPX_FLAG 0x4000
#define CPUID_EBX_MPX_MASK 0x01
#define CPUID_EBX_MPX(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] Supports Intel(R) Resource Director Technology (Intel(R) RDT)
* Allocation capability if 1.
*/
UINT32 Rdt : 1;
#define CPUID_EBX_RDT_BIT 15
#define CPUID_EBX_RDT_FLAG 0x8000
#define CPUID_EBX_RDT_MASK 0x01
#define CPUID_EBX_RDT(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] AVX512F.
*/
UINT32 Avx512F : 1;
#define CPUID_EBX_AVX512F_BIT 16
#define CPUID_EBX_AVX512F_FLAG 0x10000
#define CPUID_EBX_AVX512F_MASK 0x01
#define CPUID_EBX_AVX512F(_) (((_) >> 16) & 0x01)
/**
* [Bit 17] AVX512DQ.
*/
UINT32 Avx512Dq : 1;
#define CPUID_EBX_AVX512DQ_BIT 17
#define CPUID_EBX_AVX512DQ_FLAG 0x20000
#define CPUID_EBX_AVX512DQ_MASK 0x01
#define CPUID_EBX_AVX512DQ(_) (((_) >> 17) & 0x01)
/**
* [Bit 18] RDSEED.
*/
UINT32 Rdseed : 1;
#define CPUID_EBX_RDSEED_BIT 18
#define CPUID_EBX_RDSEED_FLAG 0x40000
#define CPUID_EBX_RDSEED_MASK 0x01
#define CPUID_EBX_RDSEED(_) (((_) >> 18) & 0x01)
/**
* [Bit 19] ADX.
*/
UINT32 Adx : 1;
#define CPUID_EBX_ADX_BIT 19
#define CPUID_EBX_ADX_FLAG 0x80000
#define CPUID_EBX_ADX_MASK 0x01
#define CPUID_EBX_ADX(_) (((_) >> 19) & 0x01)
/**
* [Bit 20] Supports Supervisor-Mode Access Prevention (and the CLAC/STAC
* instructions) if 1.
*/
UINT32 Smap : 1;
#define CPUID_EBX_SMAP_BIT 20
#define CPUID_EBX_SMAP_FLAG 0x100000
#define CPUID_EBX_SMAP_MASK 0x01
#define CPUID_EBX_SMAP(_) (((_) >> 20) & 0x01)
/**
* [Bit 21] AVX512_IFMA.
*/
UINT32 Avx512Ifma : 1;
#define CPUID_EBX_AVX512_IFMA_BIT 21
#define CPUID_EBX_AVX512_IFMA_FLAG 0x200000
#define CPUID_EBX_AVX512_IFMA_MASK 0x01
#define CPUID_EBX_AVX512_IFMA(_) (((_) >> 21) & 0x01)
UINT32 Reserved1 : 1;
/**
* [Bit 23] CLFLUSHOPT.
*/
UINT32 Clflushopt : 1;
#define CPUID_EBX_CLFLUSHOPT_BIT 23
#define CPUID_EBX_CLFLUSHOPT_FLAG 0x800000
#define CPUID_EBX_CLFLUSHOPT_MASK 0x01
#define CPUID_EBX_CLFLUSHOPT(_) (((_) >> 23) & 0x01)
/**
* [Bit 24] CLWB.
*/
UINT32 Clwb : 1;
#define CPUID_EBX_CLWB_BIT 24
#define CPUID_EBX_CLWB_FLAG 0x1000000
#define CPUID_EBX_CLWB_MASK 0x01
#define CPUID_EBX_CLWB(_) (((_) >> 24) & 0x01)
/**
* [Bit 25] Intel Processor Trace.
*/
UINT32 Intel : 1;
#define CPUID_EBX_INTEL_BIT 25
#define CPUID_EBX_INTEL_FLAG 0x2000000
#define CPUID_EBX_INTEL_MASK 0x01
#define CPUID_EBX_INTEL(_) (((_) >> 25) & 0x01)
/**
* [Bit 26] (Intel(R) Xeon Phi(TM) only).
*/
UINT32 Avx512Pf : 1;
#define CPUID_EBX_AVX512PF_BIT 26
#define CPUID_EBX_AVX512PF_FLAG 0x4000000
#define CPUID_EBX_AVX512PF_MASK 0x01
#define CPUID_EBX_AVX512PF(_) (((_) >> 26) & 0x01)
/**
* [Bit 27] (Intel(R) Xeon Phi(TM) only).
*/
UINT32 Avx512Er : 1;
#define CPUID_EBX_AVX512ER_BIT 27
#define CPUID_EBX_AVX512ER_FLAG 0x8000000
#define CPUID_EBX_AVX512ER_MASK 0x01
#define CPUID_EBX_AVX512ER(_) (((_) >> 27) & 0x01)
/**
* [Bit 28] AVX512CD.
*/
UINT32 Avx512Cd : 1;
#define CPUID_EBX_AVX512CD_BIT 28
#define CPUID_EBX_AVX512CD_FLAG 0x10000000
#define CPUID_EBX_AVX512CD_MASK 0x01
#define CPUID_EBX_AVX512CD(_) (((_) >> 28) & 0x01)
/**
* [Bit 29] Supports Intel(R) Secure Hash Algorithm Extensions (Intel(R) SHA
* Extensions) if 1.
*/
UINT32 Sha : 1;
#define CPUID_EBX_SHA_BIT 29
#define CPUID_EBX_SHA_FLAG 0x20000000
#define CPUID_EBX_SHA_MASK 0x01
#define CPUID_EBX_SHA(_) (((_) >> 29) & 0x01)
/**
* [Bit 30] AVX512BW.
*/
UINT32 Avx512Bw : 1;
#define CPUID_EBX_AVX512BW_BIT 30
#define CPUID_EBX_AVX512BW_FLAG 0x40000000
#define CPUID_EBX_AVX512BW_MASK 0x01
#define CPUID_EBX_AVX512BW(_) (((_) >> 30) & 0x01)
/**
* [Bit 31] AVX512VL.
*/
UINT32 Avx512Vl : 1;
#define CPUID_EBX_AVX512VL_BIT 31
#define CPUID_EBX_AVX512VL_FLAG 0x80000000
#define CPUID_EBX_AVX512VL_MASK 0x01
#define CPUID_EBX_AVX512VL(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] (Intel(R) Xeon Phi(TM) only).
*/
UINT32 Prefetchwt1 : 1;
#define CPUID_ECX_PREFETCHWT1_BIT 0
#define CPUID_ECX_PREFETCHWT1_FLAG 0x01
#define CPUID_ECX_PREFETCHWT1_MASK 0x01
#define CPUID_ECX_PREFETCHWT1(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] AVX512_VBMI.
*/
UINT32 Avx512Vbmi : 1;
#define CPUID_ECX_AVX512_VBMI_BIT 1
#define CPUID_ECX_AVX512_VBMI_FLAG 0x02
#define CPUID_ECX_AVX512_VBMI_MASK 0x01
#define CPUID_ECX_AVX512_VBMI(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Supports user-mode instruction prevention if 1.
*/
UINT32 Umip : 1;
#define CPUID_ECX_UMIP_BIT 2
#define CPUID_ECX_UMIP_FLAG 0x04
#define CPUID_ECX_UMIP_MASK 0x01
#define CPUID_ECX_UMIP(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Supports protection keys for user-mode pages if 1.
*/
UINT32 Pku : 1;
#define CPUID_ECX_PKU_BIT 3
#define CPUID_ECX_PKU_FLAG 0x08
#define CPUID_ECX_PKU_MASK 0x01
#define CPUID_ECX_PKU(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] If 1, OS has set CR4.PKE to enable protection keys (and the
* RDPKRU/WRPKRU instructions).
*/
UINT32 Ospke : 1;
#define CPUID_ECX_OSPKE_BIT 4
#define CPUID_ECX_OSPKE_FLAG 0x10
#define CPUID_ECX_OSPKE_MASK 0x01
#define CPUID_ECX_OSPKE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] WAITPKG.
*/
UINT32 Waitpkg : 1;
#define CPUID_ECX_WAITPKG_BIT 5
#define CPUID_ECX_WAITPKG_FLAG 0x20
#define CPUID_ECX_WAITPKG_MASK 0x01
#define CPUID_ECX_WAITPKG(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] AVX512_VBMI2.
*/
UINT32 Avx512Vbmi2 : 1;
#define CPUID_ECX_AVX512_VBMI2_BIT 6
#define CPUID_ECX_AVX512_VBMI2_FLAG 0x40
#define CPUID_ECX_AVX512_VBMI2_MASK 0x01
#define CPUID_ECX_AVX512_VBMI2(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Supports CET shadow stack features if 1. Processors that set this
* bit define bits 1:0 of the IA32_U_CET and IA32_S_CET MSRs. Enumerates
* support for the following MSRs: IA32_INTERRUPT_SPP_TABLE_ADDR,
* IA32_PL3_SSP, IA32_PL2_SSP, IA32_PL1_SSP, and IA32_PL0_SSP.
*/
UINT32 CetSs : 1;
#define CPUID_ECX_CET_SS_BIT 7
#define CPUID_ECX_CET_SS_FLAG 0x80
#define CPUID_ECX_CET_SS_MASK 0x01
#define CPUID_ECX_CET_SS(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] GFNI.
*/
UINT32 Gfni : 1;
#define CPUID_ECX_GFNI_BIT 8
#define CPUID_ECX_GFNI_FLAG 0x100
#define CPUID_ECX_GFNI_MASK 0x01
#define CPUID_ECX_GFNI(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] VAES.
*/
UINT32 Vaes : 1;
#define CPUID_ECX_VAES_BIT 9
#define CPUID_ECX_VAES_FLAG 0x200
#define CPUID_ECX_VAES_MASK 0x01
#define CPUID_ECX_VAES(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] VPCLMULQDQ.
*/
UINT32 Vpclmulqdq : 1;
#define CPUID_ECX_VPCLMULQDQ_BIT 10
#define CPUID_ECX_VPCLMULQDQ_FLAG 0x400
#define CPUID_ECX_VPCLMULQDQ_MASK 0x01
#define CPUID_ECX_VPCLMULQDQ(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] AVX512_VNNI.
*/
UINT32 Avx512Vnni : 1;
#define CPUID_ECX_AVX512_VNNI_BIT 11
#define CPUID_ECX_AVX512_VNNI_FLAG 0x800
#define CPUID_ECX_AVX512_VNNI_MASK 0x01
#define CPUID_ECX_AVX512_VNNI(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] AVX512_BITALG.
*/
UINT32 Avx512Bitalg : 1;
#define CPUID_ECX_AVX512_BITALG_BIT 12
#define CPUID_ECX_AVX512_BITALG_FLAG 0x1000
#define CPUID_ECX_AVX512_BITALG_MASK 0x01
#define CPUID_ECX_AVX512_BITALG(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] If 1, the following MSRs are supported: IA32_TME_CAPABILITY,
* IA32_TME_ACTIVATE, IA32_TME_EXCLUDE_MASK, and IA32_TME_EXCLUDE_BASE.
*/
UINT32 TmeEn : 1;
#define CPUID_ECX_TME_EN_BIT 13
#define CPUID_ECX_TME_EN_FLAG 0x2000
#define CPUID_ECX_TME_EN_MASK 0x01
#define CPUID_ECX_TME_EN(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] AVX512_VPOPCNTDQ.
*/
UINT32 Avx512Vpopcntdq : 1;
#define CPUID_ECX_AVX512_VPOPCNTDQ_BIT 14
#define CPUID_ECX_AVX512_VPOPCNTDQ_FLAG 0x4000
#define CPUID_ECX_AVX512_VPOPCNTDQ_MASK 0x01
#define CPUID_ECX_AVX512_VPOPCNTDQ(_) (((_) >> 14) & 0x01)
UINT32 Reserved1 : 1;
/**
* [Bit 16] Supports 57-bit linear addresses and five-level paging if 1.
*/
UINT32 La57 : 1;
#define CPUID_ECX_LA57_BIT 16
#define CPUID_ECX_LA57_FLAG 0x10000
#define CPUID_ECX_LA57_MASK 0x01
#define CPUID_ECX_LA57(_) (((_) >> 16) & 0x01)
/**
* [Bits 21:17] The value of MAWAU used by the BNDLDX and BNDSTX
* instructions in 64-bit mode.
*/
UINT32 Mawau : 5;
#define CPUID_ECX_MAWAU_BIT 17
#define CPUID_ECX_MAWAU_FLAG 0x3E0000
#define CPUID_ECX_MAWAU_MASK 0x1F
#define CPUID_ECX_MAWAU(_) (((_) >> 17) & 0x1F)
/**
* [Bit 22] RDPID and IA32_TSC_AUX are available if 1.
*/
UINT32 Rdpid : 1;
#define CPUID_ECX_RDPID_BIT 22
#define CPUID_ECX_RDPID_FLAG 0x400000
#define CPUID_ECX_RDPID_MASK 0x01
#define CPUID_ECX_RDPID(_) (((_) >> 22) & 0x01)
/**
* [Bit 23] KL. Supports Key Locker if 1.
*/
UINT32 Kl : 1;
#define CPUID_ECX_KL_BIT 23
#define CPUID_ECX_KL_FLAG 0x800000
#define CPUID_ECX_KL_MASK 0x01
#define CPUID_ECX_KL(_) (((_) >> 23) & 0x01)
UINT32 Reserved2 : 1;
/**
* [Bit 25] Supports cache line demote if 1.
*/
UINT32 Cldemote : 1;
#define CPUID_ECX_CLDEMOTE_BIT 25
#define CPUID_ECX_CLDEMOTE_FLAG 0x2000000
#define CPUID_ECX_CLDEMOTE_MASK 0x01
#define CPUID_ECX_CLDEMOTE(_) (((_) >> 25) & 0x01)
UINT32 Reserved3 : 1;
/**
* [Bit 27] Supports MOVDIRI if 1.
*/
UINT32 Movdiri : 1;
#define CPUID_ECX_MOVDIRI_BIT 27
#define CPUID_ECX_MOVDIRI_FLAG 0x8000000
#define CPUID_ECX_MOVDIRI_MASK 0x01
#define CPUID_ECX_MOVDIRI(_) (((_) >> 27) & 0x01)
/**
* [Bit 28] Supports MOVDIR64B if 1.
*/
UINT32 Movdir64B : 1;
#define CPUID_ECX_MOVDIR64B_BIT 28
#define CPUID_ECX_MOVDIR64B_FLAG 0x10000000
#define CPUID_ECX_MOVDIR64B_MASK 0x01
#define CPUID_ECX_MOVDIR64B(_) (((_) >> 28) & 0x01)
UINT32 Reserved4 : 1;
/**
* [Bit 30] Supports SGX Launch Configuration if 1.
*/
UINT32 SgxLc : 1;
#define CPUID_ECX_SGX_LC_BIT 30
#define CPUID_ECX_SGX_LC_FLAG 0x40000000
#define CPUID_ECX_SGX_LC_MASK 0x01
#define CPUID_ECX_SGX_LC(_) (((_) >> 30) & 0x01)
/**
* [Bit 31] Supports protection keys for supervisor-mode pages if 1.
*/
UINT32 Pks : 1;
#define CPUID_ECX_PKS_BIT 31
#define CPUID_ECX_PKS_FLAG 0x80000000
#define CPUID_ECX_PKS_MASK 0x01
#define CPUID_ECX_PKS(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
UINT32 Reserved1 : 2;
/**
* [Bit 2] (Intel(R) Xeon Phi(TM) only.)
*/
UINT32 Avx5124Vnniw : 1;
#define CPUID_EDX_AVX512_4VNNIW_BIT 2
#define CPUID_EDX_AVX512_4VNNIW_FLAG 0x04
#define CPUID_EDX_AVX512_4VNNIW_MASK 0x01
#define CPUID_EDX_AVX512_4VNNIW(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] (Intel(R) Xeon Phi(TM) only.)
*/
UINT32 Avx5124Fmaps : 1;
#define CPUID_EDX_AVX512_4FMAPS_BIT 3
#define CPUID_EDX_AVX512_4FMAPS_FLAG 0x08
#define CPUID_EDX_AVX512_4FMAPS_MASK 0x01
#define CPUID_EDX_AVX512_4FMAPS(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Fast Short REP MOV.
*/
UINT32 FastShortRepMov : 1;
#define CPUID_EDX_FAST_SHORT_REP_MOV_BIT 4
#define CPUID_EDX_FAST_SHORT_REP_MOV_FLAG 0x10
#define CPUID_EDX_FAST_SHORT_REP_MOV_MASK 0x01
#define CPUID_EDX_FAST_SHORT_REP_MOV(_) (((_) >> 4) & 0x01)
UINT32 Reserved2 : 3;
/**
* [Bit 8] AVX512_VP2INTERSECT.
*/
UINT32 Avx512Vp2Intersect : 1;
#define CPUID_EDX_AVX512_VP2INTERSECT_BIT 8
#define CPUID_EDX_AVX512_VP2INTERSECT_FLAG 0x100
#define CPUID_EDX_AVX512_VP2INTERSECT_MASK 0x01
#define CPUID_EDX_AVX512_VP2INTERSECT(_) (((_) >> 8) & 0x01)
UINT32 Reserved3 : 1;
/**
* [Bit 10] MD_CLEAR supported.
*/
UINT32 MdClear : 1;
#define CPUID_EDX_MD_CLEAR_BIT 10
#define CPUID_EDX_MD_CLEAR_FLAG 0x400
#define CPUID_EDX_MD_CLEAR_MASK 0x01
#define CPUID_EDX_MD_CLEAR(_) (((_) >> 10) & 0x01)
UINT32 Reserved4 : 3;
/**
* [Bit 14] SERIALIZE supported.
*/
UINT32 Serialize : 1;
#define CPUID_EDX_SERIALIZE_BIT 14
#define CPUID_EDX_SERIALIZE_FLAG 0x4000
#define CPUID_EDX_SERIALIZE_MASK 0x01
#define CPUID_EDX_SERIALIZE(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] If 1, the processor is identified as a hybrid part.
*/
UINT32 Hybrid : 1;
#define CPUID_EDX_HYBRID_BIT 15
#define CPUID_EDX_HYBRID_FLAG 0x8000
#define CPUID_EDX_HYBRID_MASK 0x01
#define CPUID_EDX_HYBRID(_) (((_) >> 15) & 0x01)
UINT32 Reserved5 : 2;
/**
* [Bit 18] Supports PCONFIG if 1.
*/
UINT32 Pconfig : 1;
#define CPUID_EDX_PCONFIG_BIT 18
#define CPUID_EDX_PCONFIG_FLAG 0x40000
#define CPUID_EDX_PCONFIG_MASK 0x01
#define CPUID_EDX_PCONFIG(_) (((_) >> 18) & 0x01)
UINT32 Reserved6 : 1;
/**
* [Bit 20] Supports CET indirect branch tracking features if 1. Processors
* that set this bit define bits 5:2 and bits 63:10 of the IA32_U_CET and
* IA32_S_CET MSRs.
*/
UINT32 CetIbt : 1;
#define CPUID_EDX_CET_IBT_BIT 20
#define CPUID_EDX_CET_IBT_FLAG 0x100000
#define CPUID_EDX_CET_IBT_MASK 0x01
#define CPUID_EDX_CET_IBT(_) (((_) >> 20) & 0x01)
UINT32 Reserved7 : 5;
/**
* [Bit 26] Enumerates support for indirect branch restricted speculation
* (IBRS) and the indirect branch predictor barrier (IBPB). Processors that
* set this bit support the IA32_SPEC_CTRL MSR and the IA32_PRED_CMD MSR.
* They allow software to set IA32_SPEC_CTRL[0] (IBRS) and IA32_PRED_CMD[0]
* (IBPB).
*/
UINT32 IbrsIbpb : 1;
#define CPUID_EDX_IBRS_IBPB_BIT 26
#define CPUID_EDX_IBRS_IBPB_FLAG 0x4000000
#define CPUID_EDX_IBRS_IBPB_MASK 0x01
#define CPUID_EDX_IBRS_IBPB(_) (((_) >> 26) & 0x01)
/**
* [Bit 27] Enumerates support for single thread indirect branch predictors
* (STIBP). Processors that set this bit support the IA32_SPEC_CTRL MSR.
* They allow software to set IA32_SPEC_CTRL[1] (STIBP).
*/
UINT32 Stibp : 1;
#define CPUID_EDX_STIBP_BIT 27
#define CPUID_EDX_STIBP_FLAG 0x8000000
#define CPUID_EDX_STIBP_MASK 0x01
#define CPUID_EDX_STIBP(_) (((_) >> 27) & 0x01)
/**
* [Bit 28] Enumerates support for L1D_FLUSH. Processors that set this bit
* support the IA32_FLUSH_CMD MSR. They allow software to set
* IA32_FLUSH_CMD[0] (L1D_FLUSH).
*/
UINT32 L1DFlush : 1;
#define CPUID_EDX_L1D_FLUSH_BIT 28
#define CPUID_EDX_L1D_FLUSH_FLAG 0x10000000
#define CPUID_EDX_L1D_FLUSH_MASK 0x01
#define CPUID_EDX_L1D_FLUSH(_) (((_) >> 28) & 0x01)
/**
* [Bit 29] Enumerates support for the IA32_ARCH_CAPABILITIES MSR.
*/
UINT32 Ia32ArchCapabilities : 1;
#define CPUID_EDX_IA32_ARCH_CAPABILITIES_BIT 29
#define CPUID_EDX_IA32_ARCH_CAPABILITIES_FLAG 0x20000000
#define CPUID_EDX_IA32_ARCH_CAPABILITIES_MASK 0x01
#define CPUID_EDX_IA32_ARCH_CAPABILITIES(_) (((_) >> 29) & 0x01)
/**
* [Bit 30] Enumerates support for the IA32_CORE_CAPABILITIES MSR.
*/
UINT32 Ia32CoreCapabilities : 1;
#define CPUID_EDX_IA32_CORE_CAPABILITIES_BIT 30
#define CPUID_EDX_IA32_CORE_CAPABILITIES_FLAG 0x40000000
#define CPUID_EDX_IA32_CORE_CAPABILITIES_MASK 0x01
#define CPUID_EDX_IA32_CORE_CAPABILITIES(_) (((_) >> 30) & 0x01)
/**
* [Bit 31] Enumerates support for Speculative Store Bypass Disable (SSBD).
* Processors that set this bit support the IA32_SPEC_CTRL MSR. They allow
* software to set IA32_SPEC_CTRL[2] (SSBD).
*/
UINT32 Ssbd : 1;
#define CPUID_EDX_SSBD_BIT 31
#define CPUID_EDX_SSBD_FLAG 0x80000000
#define CPUID_EDX_SSBD_MASK 0x01
#define CPUID_EDX_SSBD(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_07;
/**
* @brief Direct Cache Access Information Leaf
*
* When CPUID executes with EAX set to 09H, the processor returns information about Direct Cache
* Access capabilities.
*/
#define CPUID_DIRECT_CACHE_ACCESS_INFORMATION 0x00000009
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Value of bits [31:0] of IA32_PLATFORM_DCA_CAP MSR (address
* 1F8H).
*/
UINT32 Ia32PlatformDcaCap : 32;
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_BIT 0
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_FLAG 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_MASK 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_09;
/**
* @brief Architectural Performance Monitoring Leaf
*
* When CPUID executes with EAX set to 0AH, the processor returns information about support for
* architectural performance monitoring capabilities. Architectural performance monitoring is
* supported if the version ID is greater than Pn 0. For each version of architectural performance
* monitoring capability, software must enumerate this leaf to discover the programming facilities
* and the architectural performance events available in the processor.
*
* @see Vol3C[23(Introduction to Virtual-Machine Extensions)]
*/
#define CPUID_ARCHITECTURAL_PERFORMANCE_MONITORING 0x0000000A
typedef struct
{
union
{
struct
{
/**
* [Bits 7:0] Version ID of architectural performance monitoring.
*/
UINT32 VersionIdOfArchitecturalPerformanceMonitoring : 8;
#define CPUID_EAX_VERSION_ID_OF_ARCHITECTURAL_PERFORMANCE_MONITORING_BIT 0
#define CPUID_EAX_VERSION_ID_OF_ARCHITECTURAL_PERFORMANCE_MONITORING_FLAG 0xFF
#define CPUID_EAX_VERSION_ID_OF_ARCHITECTURAL_PERFORMANCE_MONITORING_MASK 0xFF
#define CPUID_EAX_VERSION_ID_OF_ARCHITECTURAL_PERFORMANCE_MONITORING(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Number of general-purpose performance monitoring counter per
* logical processor.
*/
UINT32 NumberOfPerformanceMonitoringCounterPerLogicalProcessor : 8;
#define CPUID_EAX_NUMBER_OF_PERFORMANCE_MONITORING_COUNTER_PER_LOGICAL_PROCESSOR_BIT 8
#define CPUID_EAX_NUMBER_OF_PERFORMANCE_MONITORING_COUNTER_PER_LOGICAL_PROCESSOR_FLAG 0xFF00
#define CPUID_EAX_NUMBER_OF_PERFORMANCE_MONITORING_COUNTER_PER_LOGICAL_PROCESSOR_MASK 0xFF
#define CPUID_EAX_NUMBER_OF_PERFORMANCE_MONITORING_COUNTER_PER_LOGICAL_PROCESSOR(_) \
(((_) >> 8) & 0xFF)
/**
* [Bits 23:16] Bit width of general-purpose, performance monitoring
* counter.
*/
UINT32 BitWidthOfPerformanceMonitoringCounter : 8;
#define CPUID_EAX_BIT_WIDTH_OF_PERFORMANCE_MONITORING_COUNTER_BIT 16
#define CPUID_EAX_BIT_WIDTH_OF_PERFORMANCE_MONITORING_COUNTER_FLAG 0xFF0000
#define CPUID_EAX_BIT_WIDTH_OF_PERFORMANCE_MONITORING_COUNTER_MASK 0xFF
#define CPUID_EAX_BIT_WIDTH_OF_PERFORMANCE_MONITORING_COUNTER(_) (((_) >> 16) & 0xFF)
/**
* [Bits 31:24] Length of EBX bit vector to enumerate architectural
* performance monitoring events.
*/
UINT32 EbxBitVectorLength : 8;
#define CPUID_EAX_EBX_BIT_VECTOR_LENGTH_BIT 24
#define CPUID_EAX_EBX_BIT_VECTOR_LENGTH_FLAG 0xFF000000
#define CPUID_EAX_EBX_BIT_VECTOR_LENGTH_MASK 0xFF
#define CPUID_EAX_EBX_BIT_VECTOR_LENGTH(_) (((_) >> 24) & 0xFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bit 0] Core cycle event not available if 1.
*/
UINT32 CoreCycleEventNotAvailable : 1;
#define CPUID_EBX_CORE_CYCLE_EVENT_NOT_AVAILABLE_BIT 0
#define CPUID_EBX_CORE_CYCLE_EVENT_NOT_AVAILABLE_FLAG 0x01
#define CPUID_EBX_CORE_CYCLE_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_CORE_CYCLE_EVENT_NOT_AVAILABLE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Instruction retired event not available if 1.
*/
UINT32 InstructionRetiredEventNotAvailable : 1;
#define CPUID_EBX_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_BIT 1
#define CPUID_EBX_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_FLAG 0x02
#define CPUID_EBX_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Reference cycles event not available if 1.
*/
UINT32 ReferenceCyclesEventNotAvailable : 1;
#define CPUID_EBX_REFERENCE_CYCLES_EVENT_NOT_AVAILABLE_BIT 2
#define CPUID_EBX_REFERENCE_CYCLES_EVENT_NOT_AVAILABLE_FLAG 0x04
#define CPUID_EBX_REFERENCE_CYCLES_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_REFERENCE_CYCLES_EVENT_NOT_AVAILABLE(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Last-level cache reference event not available if 1.
*/
UINT32 LastLevelCacheReferenceEventNotAvailable : 1;
#define CPUID_EBX_LAST_LEVEL_CACHE_REFERENCE_EVENT_NOT_AVAILABLE_BIT 3
#define CPUID_EBX_LAST_LEVEL_CACHE_REFERENCE_EVENT_NOT_AVAILABLE_FLAG 0x08
#define CPUID_EBX_LAST_LEVEL_CACHE_REFERENCE_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_LAST_LEVEL_CACHE_REFERENCE_EVENT_NOT_AVAILABLE(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Last-level cache misses event not available if 1.
*/
UINT32 LastLevelCacheMissesEventNotAvailable : 1;
#define CPUID_EBX_LAST_LEVEL_CACHE_MISSES_EVENT_NOT_AVAILABLE_BIT 4
#define CPUID_EBX_LAST_LEVEL_CACHE_MISSES_EVENT_NOT_AVAILABLE_FLAG 0x10
#define CPUID_EBX_LAST_LEVEL_CACHE_MISSES_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_LAST_LEVEL_CACHE_MISSES_EVENT_NOT_AVAILABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Branch instruction retired event not available if 1.
*/
UINT32 BranchInstructionRetiredEventNotAvailable : 1;
#define CPUID_EBX_BRANCH_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_BIT 5
#define CPUID_EBX_BRANCH_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_FLAG 0x20
#define CPUID_EBX_BRANCH_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_BRANCH_INSTRUCTION_RETIRED_EVENT_NOT_AVAILABLE(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Branch mispredict retired event not available if 1.
*/
UINT32 BranchMispredictRetiredEventNotAvailable : 1;
#define CPUID_EBX_BRANCH_MISPREDICT_RETIRED_EVENT_NOT_AVAILABLE_BIT 6
#define CPUID_EBX_BRANCH_MISPREDICT_RETIRED_EVENT_NOT_AVAILABLE_FLAG 0x40
#define CPUID_EBX_BRANCH_MISPREDICT_RETIRED_EVENT_NOT_AVAILABLE_MASK 0x01
#define CPUID_EBX_BRANCH_MISPREDICT_RETIRED_EVENT_NOT_AVAILABLE(_) (((_) >> 6) & 0x01)
UINT32 Reserved1 : 25;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 4:0] Number of fixed-function performance counters (if Version ID >
* 1).
*/
UINT32 NumberOfFixedFunctionPerformanceCounters : 5;
#define CPUID_EDX_NUMBER_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_BIT 0
#define CPUID_EDX_NUMBER_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_FLAG 0x1F
#define CPUID_EDX_NUMBER_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_MASK 0x1F
#define CPUID_EDX_NUMBER_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS(_) (((_) >> 0) & 0x1F)
/**
* [Bits 12:5] Bit width of fixed-function performance counters (if Version
* ID > 1).
*/
UINT32 BitWidthOfFixedFunctionPerformanceCounters : 8;
#define CPUID_EDX_BIT_WIDTH_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_BIT 5
#define CPUID_EDX_BIT_WIDTH_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_FLAG 0x1FE0
#define CPUID_EDX_BIT_WIDTH_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS_MASK 0xFF
#define CPUID_EDX_BIT_WIDTH_OF_FIXED_FUNCTION_PERFORMANCE_COUNTERS(_) (((_) >> 5) & 0xFF)
UINT32 Reserved1 : 2;
/**
* [Bit 15] AnyThread deprecation.
*/
UINT32 AnyThreadDeprecation : 1;
#define CPUID_EDX_ANY_THREAD_DEPRECATION_BIT 15
#define CPUID_EDX_ANY_THREAD_DEPRECATION_FLAG 0x8000
#define CPUID_EDX_ANY_THREAD_DEPRECATION_MASK 0x01
#define CPUID_EDX_ANY_THREAD_DEPRECATION(_) (((_) >> 15) & 0x01)
UINT32 Reserved2 : 16;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0A;
/**
* @brief Extended Topology Enumeration Leaf
*
* When CPUID executes with EAX set to 0BH, the processor returns information about extended
* topology enumeration data. Software must detect the presence of CPUID leaf 0BH by verifying
* - the highest leaf index supported by CPUID is >= 0BH, and
* - CPUID.0BH:EBX[15:0] reports a non-zero value.
*
* @note Most of Leaf 0BH output depends on the initial value in ECX. The EDX output of leaf 0BH is
* always valid and does not vary with input value in ECX. Output value in ECX[7:0] always equals
* input value in ECX[7:0]. Sub-leaf index 0 enumerates SMT level. Each subsequent higher sub-leaf
* index enumerates a higherlevel topological entity in hierarchical order. For sub-leaves that
* return an invalid level-type of 0 in ECX[15:8]; EAX and EBX will return 0. If an input value n in
* ECX returns the invalid level-type of 0 in ECX[15:8], other input values with ECX > n also return
* 0 in ECX[15:8].
*/
#define CPUID_EXTENDED_TOPOLOGY 0x0000000B
typedef struct
{
union
{
struct
{
/**
* [Bits 4:0] Number of bits to shift right on x2APIC ID to get a unique
* topology ID of the next level type. All logical processors with the same
* next level ID share current level.
*
* @note Software should use this field (EAX[4:0]) to enumerate processor
* topology of the system.
*/
UINT32 X2ApicIdToUniqueTopologyIdShift : 5;
#define CPUID_EAX_X2APIC_ID_TO_UNIQUE_TOPOLOGY_ID_SHIFT_BIT 0
#define CPUID_EAX_X2APIC_ID_TO_UNIQUE_TOPOLOGY_ID_SHIFT_FLAG 0x1F
#define CPUID_EAX_X2APIC_ID_TO_UNIQUE_TOPOLOGY_ID_SHIFT_MASK 0x1F
#define CPUID_EAX_X2APIC_ID_TO_UNIQUE_TOPOLOGY_ID_SHIFT(_) (((_) >> 0) & 0x1F)
UINT32 Reserved1 : 27;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 15:0] Number of logical processors at this level type. The number
* reflects configuration as shipped by Intel.
*
* @note Software must not use EBX[15:0] to enumerate processor topology of
* the system. This value in this field (EBX[15:0]) is only intended for
* display/diagnostic purposes. The actual number of logical processors
* available to BIOS/OS/Applications may be different from the value of
* EBX[15:0], depending on software and platform hardware configurations.
*/
UINT32 NumberOfLogicalProcessorsAtThisLevelType : 16;
#define CPUID_EBX_NUMBER_OF_LOGICAL_PROCESSORS_AT_THIS_LEVEL_TYPE_BIT 0
#define CPUID_EBX_NUMBER_OF_LOGICAL_PROCESSORS_AT_THIS_LEVEL_TYPE_FLAG 0xFFFF
#define CPUID_EBX_NUMBER_OF_LOGICAL_PROCESSORS_AT_THIS_LEVEL_TYPE_MASK 0xFFFF
#define CPUID_EBX_NUMBER_OF_LOGICAL_PROCESSORS_AT_THIS_LEVEL_TYPE(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 7:0] Level number. Same value in ECX input.
*/
UINT32 LevelNumber : 8;
#define CPUID_ECX_LEVEL_NUMBER_BIT 0
#define CPUID_ECX_LEVEL_NUMBER_FLAG 0xFF
#define CPUID_ECX_LEVEL_NUMBER_MASK 0xFF
#define CPUID_ECX_LEVEL_NUMBER(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Level type.
*
* @note The value of the "level type" field is not related to level numbers
* in any way, higher "level type" values do not mean higher levels. Level
* type field has the following encoding:
* - 0: Invalid.
* - 1: SMT.
* - 2: Core.
* - 3-255: Reserved.
*/
UINT32 LevelType : 8;
#define CPUID_ECX_LEVEL_TYPE_BIT 8
#define CPUID_ECX_LEVEL_TYPE_FLAG 0xFF00
#define CPUID_ECX_LEVEL_TYPE_MASK 0xFF
#define CPUID_ECX_LEVEL_TYPE(_) (((_) >> 8) & 0xFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] x2APIC ID the current logical processor.
*/
UINT32 X2ApicId : 32;
#define CPUID_EDX_X2APIC_ID_BIT 0
#define CPUID_EDX_X2APIC_ID_FLAG 0xFFFFFFFF
#define CPUID_EDX_X2APIC_ID_MASK 0xFFFFFFFF
#define CPUID_EDX_X2APIC_ID(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0B;
/**
* @defgroup CPUID_EAX_0D \
* EAX = 0x0D
*
* When CPUID executes with EAX set to 0DH and ECX = 0, the processor returns information about the
* bit-vector representation of all processor state extensions that are supported in the processor
* and storage size requirements of the XSAVE/XRSTOR area. When CPUID executes with EAX set to 0DH
* and ECX = n (n > 1, and is a valid sub-leaf index), the processor returns information about the
* size and offset of each processor extended state save area within the XSAVE/XRSTOR area. Software
* can use the forward-extendable technique depicted below to query the valid sub-leaves and obtain
* size and offset information for each processor extended state save area:
For i = 2 to 62 //
* sub-leaf 1 is reserved IF (CPUID.(EAX=0DH, ECX=0):VECTOR[i] = 1) // VECTOR is the 64-bit value of
* EDX:EAX Execute CPUID.(EAX=0DH, ECX = i) to examine size and offset for sub-leaf i; FI;
* @{
*/
#define CPUID_EXTENDED_STATE_INFORMATION 0x0000000D
/**
* @brief Processor Extended State Enumeration Main Leaf (EAX = 0DH, ECX = 0)
*/
typedef struct
{
/**
* @brief Reports the supported bits of the lower 32 bits of XCR0. XCR0[n] can be set to 1
* only if EAX[n] is 1
*/
union
{
struct
{
/**
* [Bit 0] x87 state.
*/
UINT32 X87State : 1;
#define CPUID_EAX_X87_STATE_BIT 0
#define CPUID_EAX_X87_STATE_FLAG 0x01
#define CPUID_EAX_X87_STATE_MASK 0x01
#define CPUID_EAX_X87_STATE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] SSE state.
*/
UINT32 SseState : 1;
#define CPUID_EAX_SSE_STATE_BIT 1
#define CPUID_EAX_SSE_STATE_FLAG 0x02
#define CPUID_EAX_SSE_STATE_MASK 0x01
#define CPUID_EAX_SSE_STATE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] AVX state.
*/
UINT32 AvxState : 1;
#define CPUID_EAX_AVX_STATE_BIT 2
#define CPUID_EAX_AVX_STATE_FLAG 0x04
#define CPUID_EAX_AVX_STATE_MASK 0x01
#define CPUID_EAX_AVX_STATE(_) (((_) >> 2) & 0x01)
/**
* [Bits 4:3] MPX state.
*/
UINT32 MpxState : 2;
#define CPUID_EAX_MPX_STATE_BIT 3
#define CPUID_EAX_MPX_STATE_FLAG 0x18
#define CPUID_EAX_MPX_STATE_MASK 0x03
#define CPUID_EAX_MPX_STATE(_) (((_) >> 3) & 0x03)
/**
* [Bits 7:5] AVX-512 state.
*/
UINT32 Avx512State : 3;
#define CPUID_EAX_AVX_512_STATE_BIT 5
#define CPUID_EAX_AVX_512_STATE_FLAG 0xE0
#define CPUID_EAX_AVX_512_STATE_MASK 0x07
#define CPUID_EAX_AVX_512_STATE(_) (((_) >> 5) & 0x07)
/**
* [Bit 8] Used for IA32_XSS.
*/
UINT32 UsedForIa32Xss1 : 1;
#define CPUID_EAX_USED_FOR_IA32_XSS_1_BIT 8
#define CPUID_EAX_USED_FOR_IA32_XSS_1_FLAG 0x100
#define CPUID_EAX_USED_FOR_IA32_XSS_1_MASK 0x01
#define CPUID_EAX_USED_FOR_IA32_XSS_1(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] PKRU state.
*/
UINT32 PkruState : 1;
#define CPUID_EAX_PKRU_STATE_BIT 9
#define CPUID_EAX_PKRU_STATE_FLAG 0x200
#define CPUID_EAX_PKRU_STATE_MASK 0x01
#define CPUID_EAX_PKRU_STATE(_) (((_) >> 9) & 0x01)
UINT32 Reserved1 : 3;
/**
* [Bit 13] Used for IA32_XSS.
*/
UINT32 UsedForIa32Xss2 : 1;
#define CPUID_EAX_USED_FOR_IA32_XSS_2_BIT 13
#define CPUID_EAX_USED_FOR_IA32_XSS_2_FLAG 0x2000
#define CPUID_EAX_USED_FOR_IA32_XSS_2_MASK 0x01
#define CPUID_EAX_USED_FOR_IA32_XSS_2(_) (((_) >> 13) & 0x01)
UINT32 Reserved2 : 18;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Maximum size (bytes, from the beginning of the XSAVE/XRSTOR
* save area) required by enabled features in XCR0. May be different than
* ECX if some features at the end of the XSAVE save area are not enabled.
*/
UINT32 MaxSizeRequiredByEnabledFeaturesInXcr0 : 32;
#define CPUID_EBX_MAX_SIZE_REQUIRED_BY_ENABLED_FEATURES_IN_XCR0_BIT 0
#define CPUID_EBX_MAX_SIZE_REQUIRED_BY_ENABLED_FEATURES_IN_XCR0_FLAG 0xFFFFFFFF
#define CPUID_EBX_MAX_SIZE_REQUIRED_BY_ENABLED_FEATURES_IN_XCR0_MASK 0xFFFFFFFF
#define CPUID_EBX_MAX_SIZE_REQUIRED_BY_ENABLED_FEATURES_IN_XCR0(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Maximum size (bytes, from the beginning of the XSAVE/XRSTOR
* save area) of the XSAVE/XRSTOR save area required by all supported
* features in the processor, i.e., all the valid bit fields in XCR0.
*/
UINT32 MaxSizeOfXsaveXrstorSaveArea : 32;
#define CPUID_ECX_MAX_SIZE_OF_XSAVE_XRSTOR_SAVE_AREA_BIT 0
#define CPUID_ECX_MAX_SIZE_OF_XSAVE_XRSTOR_SAVE_AREA_FLAG 0xFFFFFFFF
#define CPUID_ECX_MAX_SIZE_OF_XSAVE_XRSTOR_SAVE_AREA_MASK 0xFFFFFFFF
#define CPUID_ECX_MAX_SIZE_OF_XSAVE_XRSTOR_SAVE_AREA(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Reports the supported bits of the upper 32 bits of XCR0.
* XCR0[n+32] can be set to 1 only if EDX[n] is 1.
*/
UINT32 Xcr0SupportedBits : 32;
#define CPUID_EDX_XCR0_SUPPORTED_BITS_BIT 0
#define CPUID_EDX_XCR0_SUPPORTED_BITS_FLAG 0xFFFFFFFF
#define CPUID_EDX_XCR0_SUPPORTED_BITS_MASK 0xFFFFFFFF
#define CPUID_EDX_XCR0_SUPPORTED_BITS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0D_ECX_00;
/**
* @brief Direct Cache Access Information Leaf
*/
typedef struct
{
union
{
struct
{
UINT32 Reserved1 : 1;
/**
* [Bit 1] Supports XSAVEC and the compacted form of XRSTOR if set.
*/
UINT32 SupportsXsavecAndCompactedXrstor : 1;
#define CPUID_EAX_SUPPORTS_XSAVEC_AND_COMPACTED_XRSTOR_BIT 1
#define CPUID_EAX_SUPPORTS_XSAVEC_AND_COMPACTED_XRSTOR_FLAG 0x02
#define CPUID_EAX_SUPPORTS_XSAVEC_AND_COMPACTED_XRSTOR_MASK 0x01
#define CPUID_EAX_SUPPORTS_XSAVEC_AND_COMPACTED_XRSTOR(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Supports XGETBV with ECX = 1 if set.
*/
UINT32 SupportsXgetbvWithEcx1 : 1;
#define CPUID_EAX_SUPPORTS_XGETBV_WITH_ECX_1_BIT 2
#define CPUID_EAX_SUPPORTS_XGETBV_WITH_ECX_1_FLAG 0x04
#define CPUID_EAX_SUPPORTS_XGETBV_WITH_ECX_1_MASK 0x01
#define CPUID_EAX_SUPPORTS_XGETBV_WITH_ECX_1(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Supports XSAVES/XRSTORS and IA32_XSS if set.
*/
UINT32 SupportsXsaveXrstorAndIa32Xss : 1;
#define CPUID_EAX_SUPPORTS_XSAVE_XRSTOR_AND_IA32_XSS_BIT 3
#define CPUID_EAX_SUPPORTS_XSAVE_XRSTOR_AND_IA32_XSS_FLAG 0x08
#define CPUID_EAX_SUPPORTS_XSAVE_XRSTOR_AND_IA32_XSS_MASK 0x01
#define CPUID_EAX_SUPPORTS_XSAVE_XRSTOR_AND_IA32_XSS(_) (((_) >> 3) & 0x01)
UINT32 Reserved2 : 28;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] The size in bytes of the XSAVE area containing all states
* enabled by XCRO | IA32_XSS.
*/
UINT32 SizeOfXsaveAread : 32;
#define CPUID_EBX_SIZE_OF_XSAVE_AREAD_BIT 0
#define CPUID_EBX_SIZE_OF_XSAVE_AREAD_FLAG 0xFFFFFFFF
#define CPUID_EBX_SIZE_OF_XSAVE_AREAD_MASK 0xFFFFFFFF
#define CPUID_EBX_SIZE_OF_XSAVE_AREAD(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 7:0] Used for XCR0.
*/
UINT32 UsedForXcr01 : 8;
#define CPUID_ECX_USED_FOR_XCR0_1_BIT 0
#define CPUID_ECX_USED_FOR_XCR0_1_FLAG 0xFF
#define CPUID_ECX_USED_FOR_XCR0_1_MASK 0xFF
#define CPUID_ECX_USED_FOR_XCR0_1(_) (((_) >> 0) & 0xFF)
/**
* [Bit 8] PT state.
*/
UINT32 PtState : 1;
#define CPUID_ECX_PT_STATE_BIT 8
#define CPUID_ECX_PT_STATE_FLAG 0x100
#define CPUID_ECX_PT_STATE_MASK 0x01
#define CPUID_ECX_PT_STATE(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Used for XCR0.
*/
UINT32 UsedForXcr02 : 1;
#define CPUID_ECX_USED_FOR_XCR0_2_BIT 9
#define CPUID_ECX_USED_FOR_XCR0_2_FLAG 0x200
#define CPUID_ECX_USED_FOR_XCR0_2_MASK 0x01
#define CPUID_ECX_USED_FOR_XCR0_2(_) (((_) >> 9) & 0x01)
UINT32 Reserved1 : 1;
/**
* [Bit 11] CET user state.
*/
UINT32 CetUserState : 1;
#define CPUID_ECX_CET_USER_STATE_BIT 11
#define CPUID_ECX_CET_USER_STATE_FLAG 0x800
#define CPUID_ECX_CET_USER_STATE_MASK 0x01
#define CPUID_ECX_CET_USER_STATE(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] CET supervisor state.
*/
UINT32 CetSupervisorState : 1;
#define CPUID_ECX_CET_SUPERVISOR_STATE_BIT 12
#define CPUID_ECX_CET_SUPERVISOR_STATE_FLAG 0x1000
#define CPUID_ECX_CET_SUPERVISOR_STATE_MASK 0x01
#define CPUID_ECX_CET_SUPERVISOR_STATE(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] HDC state.
*/
UINT32 HdcState : 1;
#define CPUID_ECX_HDC_STATE_BIT 13
#define CPUID_ECX_HDC_STATE_FLAG 0x2000
#define CPUID_ECX_HDC_STATE_MASK 0x01
#define CPUID_ECX_HDC_STATE(_) (((_) >> 13) & 0x01)
UINT32 Reserved2 : 1;
/**
* [Bit 15] LBR state.
*/
UINT32 LbrState : 1;
#define CPUID_ECX_LBR_STATE_BIT 15
#define CPUID_ECX_LBR_STATE_FLAG 0x8000
#define CPUID_ECX_LBR_STATE_MASK 0x01
#define CPUID_ECX_LBR_STATE(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] HWP state.
*/
UINT32 HwpState : 1;
#define CPUID_ECX_HWP_STATE_BIT 16
#define CPUID_ECX_HWP_STATE_FLAG 0x10000
#define CPUID_ECX_HWP_STATE_MASK 0x01
#define CPUID_ECX_HWP_STATE(_) (((_) >> 16) & 0x01)
UINT32 Reserved3 : 15;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Reports the supported bits of the upper 32 bits of the
* IA32_XSS MSR. IA32_XSS[n+32] can be set to 1 only if EDX[n] is 1
*/
UINT32 SupportedUpperIa32XssBits : 32;
#define CPUID_EDX_SUPPORTED_UPPER_IA32_XSS_BITS_BIT 0
#define CPUID_EDX_SUPPORTED_UPPER_IA32_XSS_BITS_FLAG 0xFFFFFFFF
#define CPUID_EDX_SUPPORTED_UPPER_IA32_XSS_BITS_MASK 0xFFFFFFFF
#define CPUID_EDX_SUPPORTED_UPPER_IA32_XSS_BITS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0D_ECX_01;
/**
* @brief Processor Extended State Enumeration Sub-leaves (EAX = 0DH, ECX = n, n > 1)
*
* @note Leaf 0DH output depends on the initial value in ECX. Each sub-leaf index (starting at
* position 2) is supported if it corresponds to a supported bit in either the XCR0 register or the
* IA32_XSS MSR. If ECX contains an invalid sub-leaf index, EAX/EBX/ECX/EDX return 0. Sub-leaf n (0
* <= n <= 31) is invalid if sub-leaf 0 returns 0 in EAX[n] and sub-leaf 1 returns 0 in ECX[n].
* Sub-leaf n (32 <= n <= 63) is invalid if sub-leaf 0 returns 0 in EDX[n-32] and sub-leaf 1 returns
* 0 in EDX[n-32].
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] The size in bytes (from the offset specified in EBX) of the
* save area for an extended state feature associated with a valid sub-leaf
* index, n.
*/
UINT32 Ia32PlatformDcaCap : 32;
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_BIT 0
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_FLAG 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_MASK 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] The offset in bytes of this extended state component's save
* area from the beginning of the XSAVE/XRSTOR area. This field reports 0 if
* the sub-leaf index, n, does not map to a valid bit in the XCR0 register.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] Is set if the bit n (corresponding to the sub-leaf index) is
* supported in the IA32_XSS MSR; it is clear if bit n is instead supported
* in XCR0.
*/
UINT32 Ecx2 : 1;
#define CPUID_ECX_ECX_2_BIT 0
#define CPUID_ECX_ECX_2_FLAG 0x01
#define CPUID_ECX_ECX_2_MASK 0x01
#define CPUID_ECX_ECX_2(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Is set if, when the compacted format of an XSAVE area is used,
* this extended state component located on the next 64-byte boundary
* following the preceding state component (otherwise, it is located
* immediately following the preceding state component).
*/
UINT32 Ecx1 : 1;
#define CPUID_ECX_ECX_1_BIT 1
#define CPUID_ECX_ECX_1_FLAG 0x02
#define CPUID_ECX_ECX_1_MASK 0x01
#define CPUID_ECX_ECX_1(_) (((_) >> 1) & 0x01)
UINT32 Reserved1 : 30;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] This field reports 0 if the sub-leaf index, n, is invalid;
* otherwise it is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0D_ECX_N;
/**
* @}
*/
/**
* @defgroup CPUID_EAX_0F \
* EAX = 0x0F
*
* When CPUID executes with EAX set to 0FH and ECX = 0, the processor returns information about the
* bit-vector representation of QoS monitoring resource types that are supported in the processor
* and maximum range of RMID values the processor can use to monitor of any supported resource
* types. Each bit, starting from bit 1, corresponds to a specific resource type if the bit is set.
* The bit position corresponds to the sub-leaf index (or ResID) that software must use to query QoS
* monitoring capability available for that type. See Table 3-8. When CPUID executes with EAX set to
* 0FH and ECX = n (n >= 1, and is a valid ResID), the processor returns information software can
* use to program IA32_PQR_ASSOC, IA32_QM_EVTSEL MSRs before reading QoS data from the IA32_QM_CTR
* MSR.
* @{
*/
#define CPUID_INTEL_RESOURCE_DIRECTOR_TECHNOLOGY_MONITORING_INFORMATION 0x0000000F
/**
* @brief Intel Resource Director Technology (Intel RDT) Monitoring Enumeration Sub-leaf (EAX = 0FH,
* ECX = 0)
*
* @note Leaf 0FH output depends on the initial value in ECX. Sub-leaf index 0 reports valid
* resource type starting at bit position 1 of EDX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Maximum range (zero-based) of RMID within this physical
* processor of all types.
*/
UINT32 RmidMaxRange : 32;
#define CPUID_EBX_RMID_MAX_RANGE_BIT 0
#define CPUID_EBX_RMID_MAX_RANGE_FLAG 0xFFFFFFFF
#define CPUID_EBX_RMID_MAX_RANGE_MASK 0xFFFFFFFF
#define CPUID_EBX_RMID_MAX_RANGE(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
UINT32 Reserved1 : 1;
/**
* [Bit 1] Supports L3 Cache Intel RDT Monitoring if 1.
*/
UINT32 SupportsL3CacheIntelRdtMonitoring : 1;
#define CPUID_EDX_SUPPORTS_L3_CACHE_INTEL_RDT_MONITORING_BIT 1
#define CPUID_EDX_SUPPORTS_L3_CACHE_INTEL_RDT_MONITORING_FLAG 0x02
#define CPUID_EDX_SUPPORTS_L3_CACHE_INTEL_RDT_MONITORING_MASK 0x01
#define CPUID_EDX_SUPPORTS_L3_CACHE_INTEL_RDT_MONITORING(_) (((_) >> 1) & 0x01)
UINT32 Reserved2 : 30;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0F_ECX_00;
/**
* @brief L3 Cache Intel RDT Monitoring Capability Enumeration Sub-leaf (EAX = 0FH, ECX = 1)
*
* @note Leaf 0FH output depends on the initial value in ECX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Conversion factor from reported IA32_QM_CTR value to
* occupancy metric (bytes).
*/
UINT32 ConversionFactor : 32;
#define CPUID_EBX_CONVERSION_FACTOR_BIT 0
#define CPUID_EBX_CONVERSION_FACTOR_FLAG 0xFFFFFFFF
#define CPUID_EBX_CONVERSION_FACTOR_MASK 0xFFFFFFFF
#define CPUID_EBX_CONVERSION_FACTOR(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Maximum range (zero-based) of RMID within this physical
* processor of all types.
*/
UINT32 RmidMaxRange : 32;
#define CPUID_ECX_RMID_MAX_RANGE_BIT 0
#define CPUID_ECX_RMID_MAX_RANGE_FLAG 0xFFFFFFFF
#define CPUID_ECX_RMID_MAX_RANGE_MASK 0xFFFFFFFF
#define CPUID_ECX_RMID_MAX_RANGE(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bit 0] Supports L3 occupancy monitoring if 1.
*/
UINT32 SupportsL3OccupancyMonitoring : 1;
#define CPUID_EDX_SUPPORTS_L3_OCCUPANCY_MONITORING_BIT 0
#define CPUID_EDX_SUPPORTS_L3_OCCUPANCY_MONITORING_FLAG 0x01
#define CPUID_EDX_SUPPORTS_L3_OCCUPANCY_MONITORING_MASK 0x01
#define CPUID_EDX_SUPPORTS_L3_OCCUPANCY_MONITORING(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Supports L3 Total Bandwidth monitoring if 1.
*/
UINT32 SupportsL3TotalBandwidthMonitoring : 1;
#define CPUID_EDX_SUPPORTS_L3_TOTAL_BANDWIDTH_MONITORING_BIT 1
#define CPUID_EDX_SUPPORTS_L3_TOTAL_BANDWIDTH_MONITORING_FLAG 0x02
#define CPUID_EDX_SUPPORTS_L3_TOTAL_BANDWIDTH_MONITORING_MASK 0x01
#define CPUID_EDX_SUPPORTS_L3_TOTAL_BANDWIDTH_MONITORING(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Supports L3 Local Bandwidth monitoring if 1.
*/
UINT32 SupportsL3LocalBandwidthMonitoring : 1;
#define CPUID_EDX_SUPPORTS_L3_LOCAL_BANDWIDTH_MONITORING_BIT 2
#define CPUID_EDX_SUPPORTS_L3_LOCAL_BANDWIDTH_MONITORING_FLAG 0x04
#define CPUID_EDX_SUPPORTS_L3_LOCAL_BANDWIDTH_MONITORING_MASK 0x01
#define CPUID_EDX_SUPPORTS_L3_LOCAL_BANDWIDTH_MONITORING(_) (((_) >> 2) & 0x01)
UINT32 Reserved1 : 29;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_0F_ECX_01;
/**
* @}
*/
/**
* @defgroup CPUID_EAX_10 \
* EAX = 0x10
*
* When CPUID executes with EAX set to 10H and ECX = 0, the processor returns information about the
* bit-vector representation of QoS Enforcement resource types that are supported in the processor.
* Each bit, starting from bit 1, corresponds to a specific resource type if the bit is set. The bit
* position corresponds to the sub-leaf index (or ResID) that software must use to query QoS
* enforcement capability available for that type. When CPUID executes with EAX set to 10H and ECX =
* n (n >= 1, and is a valid ResID), the processor returns information about available classes of
* service and range of QoS mask MSRs that software can use to configure each class of services
* using capability bit masks in the QoS Mask registers, IA32_resourceType_Mask_n.
* @{
*/
#define CPUID_INTEL_RESOURCE_DIRECTOR_TECHNOLOGY_ALLOCATION_INFORMATION 0x00000010
/**
* @brief Intel Resource Director Technology (Intel RDT) Allocation Enumeration Sub-leaf (EAX = 10H,
* ECX = 0)
*
* @note Leaf 10H output depends on the initial value in ECX. Sub-leaf index 0 reports valid
* resource identification (ResID) starting at bit position 1 of EBX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Value of bits [31:0] of IA32_PLATFORM_DCA_CAP MSR (address
* 1F8H).
*/
UINT32 Ia32PlatformDcaCap : 32;
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_BIT 0
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_FLAG 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP_MASK 0xFFFFFFFF
#define CPUID_EAX_IA32_PLATFORM_DCA_CAP(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
UINT32 Reserved1 : 1;
/**
* [Bit 1] Supports L3 Cache Allocation Technology if 1.
*/
UINT32 SupportsL3CacheAllocationTechnology : 1;
#define CPUID_EBX_SUPPORTS_L3_CACHE_ALLOCATION_TECHNOLOGY_BIT 1
#define CPUID_EBX_SUPPORTS_L3_CACHE_ALLOCATION_TECHNOLOGY_FLAG 0x02
#define CPUID_EBX_SUPPORTS_L3_CACHE_ALLOCATION_TECHNOLOGY_MASK 0x01
#define CPUID_EBX_SUPPORTS_L3_CACHE_ALLOCATION_TECHNOLOGY(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Supports L2 Cache Allocation Technology if 1.
*/
UINT32 SupportsL2CacheAllocationTechnology : 1;
#define CPUID_EBX_SUPPORTS_L2_CACHE_ALLOCATION_TECHNOLOGY_BIT 2
#define CPUID_EBX_SUPPORTS_L2_CACHE_ALLOCATION_TECHNOLOGY_FLAG 0x04
#define CPUID_EBX_SUPPORTS_L2_CACHE_ALLOCATION_TECHNOLOGY_MASK 0x01
#define CPUID_EBX_SUPPORTS_L2_CACHE_ALLOCATION_TECHNOLOGY(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Supports Memory Bandwidth Allocation if 1.
*/
UINT32 SupportsMemoryBandwidthAllocation : 1;
#define CPUID_EBX_SUPPORTS_MEMORY_BANDWIDTH_ALLOCATION_BIT 3
#define CPUID_EBX_SUPPORTS_MEMORY_BANDWIDTH_ALLOCATION_FLAG 0x08
#define CPUID_EBX_SUPPORTS_MEMORY_BANDWIDTH_ALLOCATION_MASK 0x01
#define CPUID_EBX_SUPPORTS_MEMORY_BANDWIDTH_ALLOCATION(_) (((_) >> 3) & 0x01)
UINT32 Reserved2 : 28;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_10_ECX_00;
/**
* @brief L3 Cache Allocation Technology Enumeration Sub-leaf (EAX = 10H, ECX = ResID = 1)
*
* @note Leaf 10H output depends on the initial value in ECX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 4:0] Length of the capacity bit mask for the corresponding ResID
* using minus-one notation.
*/
UINT32 LengthOfCapacityBitMask : 5;
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_BIT 0
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_FLAG 0x1F
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_MASK 0x1F
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK(_) (((_) >> 0) & 0x1F)
UINT32 Reserved1 : 27;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Bit-granular map of isolation/contention of allocation units.
*/
UINT32 Ebx0 : 32;
#define CPUID_EBX_EBX_0_BIT 0
#define CPUID_EBX_EBX_0_FLAG 0xFFFFFFFF
#define CPUID_EBX_EBX_0_MASK 0xFFFFFFFF
#define CPUID_EBX_EBX_0(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
UINT32 Reserved1 : 2;
/**
* [Bit 2] Code and Data Prioritization Technology supported if 1.
*/
UINT32 CodeAndDataPriorizationTechnologySupported : 1;
#define CPUID_ECX_CODE_AND_DATA_PRIORIZATION_TECHNOLOGY_SUPPORTED_BIT 2
#define CPUID_ECX_CODE_AND_DATA_PRIORIZATION_TECHNOLOGY_SUPPORTED_FLAG 0x04
#define CPUID_ECX_CODE_AND_DATA_PRIORIZATION_TECHNOLOGY_SUPPORTED_MASK 0x01
#define CPUID_ECX_CODE_AND_DATA_PRIORIZATION_TECHNOLOGY_SUPPORTED(_) (((_) >> 2) & 0x01)
UINT32 Reserved2 : 29;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 15:0] Highest COS number supported for this ResID.
*/
UINT32 HighestCosNumberSupported : 16;
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_BIT 0
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_FLAG 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_MASK 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_10_ECX_01;
/**
* @brief L2 Cache Allocation Technology Enumeration Sub-leaf (EAX = 10H, ECX = ResID = 2)
*
* @note Leaf 10H output depends on the initial value in ECX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 4:0] Length of the capacity bit mask for the corresponding ResID
* using minus-one notation.
*/
UINT32 LengthOfCapacityBitMask : 5;
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_BIT 0
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_FLAG 0x1F
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK_MASK 0x1F
#define CPUID_EAX_LENGTH_OF_CAPACITY_BIT_MASK(_) (((_) >> 0) & 0x1F)
UINT32 Reserved1 : 27;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Bit-granular map of isolation/contention of allocation units.
*/
UINT32 Ebx0 : 32;
#define CPUID_EBX_EBX_0_BIT 0
#define CPUID_EBX_EBX_0_FLAG 0xFFFFFFFF
#define CPUID_EBX_EBX_0_MASK 0xFFFFFFFF
#define CPUID_EBX_EBX_0(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 15:0] Highest COS number supported for this ResID.
*/
UINT32 HighestCosNumberSupported : 16;
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_BIT 0
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_FLAG 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_MASK 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_10_ECX_02;
/**
* @brief Memory Bandwidth Allocation Enumeration Sub-leaf (EAX = 10H, ECX = ResID = 3)
*
* @note Leaf 10H output depends on the initial value in ECX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 11:0] Reports the maximum MBA throttling value supported for the
* corresponding ResID using minus-one notation.
*/
UINT32 MaxMbaThrottlingValue : 12;
#define CPUID_EAX_MAX_MBA_THROTTLING_VALUE_BIT 0
#define CPUID_EAX_MAX_MBA_THROTTLING_VALUE_FLAG 0xFFF
#define CPUID_EAX_MAX_MBA_THROTTLING_VALUE_MASK 0xFFF
#define CPUID_EAX_MAX_MBA_THROTTLING_VALUE(_) (((_) >> 0) & 0xFFF)
UINT32 Reserved1 : 20;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
UINT32 Reserved1 : 2;
/**
* [Bit 2] Reports whether the response of the delay values is linear.
*/
UINT32 ResponseOfDelayIsLinear : 1;
#define CPUID_ECX_RESPONSE_OF_DELAY_IS_LINEAR_BIT 2
#define CPUID_ECX_RESPONSE_OF_DELAY_IS_LINEAR_FLAG 0x04
#define CPUID_ECX_RESPONSE_OF_DELAY_IS_LINEAR_MASK 0x01
#define CPUID_ECX_RESPONSE_OF_DELAY_IS_LINEAR(_) (((_) >> 2) & 0x01)
UINT32 Reserved2 : 29;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 15:0] Highest COS number supported for this ResID.
*/
UINT32 HighestCosNumberSupported : 16;
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_BIT 0
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_FLAG 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED_MASK 0xFFFF
#define CPUID_EDX_HIGHEST_COS_NUMBER_SUPPORTED(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_10_ECX_03;
/**
* @}
*/
/**
* @defgroup CPUID_EAX_12 \
* EAX = 0x12
*
* When CPUID executes with EAX set to 12H and ECX = 0H, the processor returns information about
* Intel SGX capabilities. When CPUID executes with EAX set to 12H and ECX = 1H, the processor
* returns information about Intel SGX attributes. When CPUID executes with EAX set to 12H and ECX =
* n (n > 1), the processor returns information about Intel SGX Enclave Page Cache.
* @{
*/
#define CPUID_INTEL_SGX 0x00000012
/**
* @brief Intel SGX Capability Enumeration Leaf, sub-leaf 0 (EAX = 12H, ECX = 0)
*
* @note Leaf 12H sub-leaf 0 (ECX = 0) is supported if CPUID.(EAX=07H, ECX=0H):EBX[SGX] = 1.
*/
typedef struct
{
union
{
struct
{
/**
* [Bit 0] If 1, Indicates Intel SGX supports the collection of SGX1 leaf
* functions.
*/
UINT32 Sgx1 : 1;
#define CPUID_EAX_SGX1_BIT 0
#define CPUID_EAX_SGX1_FLAG 0x01
#define CPUID_EAX_SGX1_MASK 0x01
#define CPUID_EAX_SGX1(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] If 1, Indicates Intel SGX supports the collection of SGX2 leaf
* functions.
*/
UINT32 Sgx2 : 1;
#define CPUID_EAX_SGX2_BIT 1
#define CPUID_EAX_SGX2_FLAG 0x02
#define CPUID_EAX_SGX2_MASK 0x01
#define CPUID_EAX_SGX2(_) (((_) >> 1) & 0x01)
UINT32 Reserved1 : 3;
/**
* [Bit 5] If 1, indicates Intel SGX supports ENCLV instruction leaves
* EINCVIRTCHILD, EDECVIRTCHILD, and ESETCONTEXT.
*/
UINT32 SgxEnclvAdvanced : 1;
#define CPUID_EAX_SGX_ENCLV_ADVANCED_BIT 5
#define CPUID_EAX_SGX_ENCLV_ADVANCED_FLAG 0x20
#define CPUID_EAX_SGX_ENCLV_ADVANCED_MASK 0x01
#define CPUID_EAX_SGX_ENCLV_ADVANCED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] If 1, indicates Intel SGX supports ENCLS instruction leaves
* ETRACKC, ERDINFO, ELDBC, and ELDUC.
*/
UINT32 SgxEnclsAdvanced : 1;
#define CPUID_EAX_SGX_ENCLS_ADVANCED_BIT 6
#define CPUID_EAX_SGX_ENCLS_ADVANCED_FLAG 0x40
#define CPUID_EAX_SGX_ENCLS_ADVANCED_MASK 0x01
#define CPUID_EAX_SGX_ENCLS_ADVANCED(_) (((_) >> 6) & 0x01)
UINT32 Reserved2 : 25;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Bit vector of supported extended SGX features.
*/
UINT32 Miscselect : 32;
#define CPUID_EBX_MISCSELECT_BIT 0
#define CPUID_EBX_MISCSELECT_FLAG 0xFFFFFFFF
#define CPUID_EBX_MISCSELECT_MASK 0xFFFFFFFF
#define CPUID_EBX_MISCSELECT(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 7:0] The maximum supported enclave size in non-64-bit mode is
* 2^(EDX[7:0]).
*/
UINT32 MaxEnclaveSizeNot64 : 8;
#define CPUID_EDX_MAX_ENCLAVE_SIZE_NOT64_BIT 0
#define CPUID_EDX_MAX_ENCLAVE_SIZE_NOT64_FLAG 0xFF
#define CPUID_EDX_MAX_ENCLAVE_SIZE_NOT64_MASK 0xFF
#define CPUID_EDX_MAX_ENCLAVE_SIZE_NOT64(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] The maximum supported enclave size in 64-bit mode is
* 2^(EDX[15:8]).
*/
UINT32 MaxEnclaveSize64 : 8;
#define CPUID_EDX_MAX_ENCLAVE_SIZE_64_BIT 8
#define CPUID_EDX_MAX_ENCLAVE_SIZE_64_FLAG 0xFF00
#define CPUID_EDX_MAX_ENCLAVE_SIZE_64_MASK 0xFF
#define CPUID_EDX_MAX_ENCLAVE_SIZE_64(_) (((_) >> 8) & 0xFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_12_ECX_00;
/**
* @brief Intel SGX Attributes Enumeration Leaf, sub-leaf 1 (EAX = 12H, ECX = 1)
*
* @note Leaf 12H sub-leaf 1 (ECX = 1) is supported if CPUID.(EAX=07H, ECX=0H):EBX[SGX] = 1.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reports the valid bits of SECS.ATTRIBUTES[31:0] that software
* can set with ECREATE.
*/
UINT32 ValidSecsAttributes0 : 32;
#define CPUID_EAX_VALID_SECS_ATTRIBUTES_0_BIT 0
#define CPUID_EAX_VALID_SECS_ATTRIBUTES_0_FLAG 0xFFFFFFFF
#define CPUID_EAX_VALID_SECS_ATTRIBUTES_0_MASK 0xFFFFFFFF
#define CPUID_EAX_VALID_SECS_ATTRIBUTES_0(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Reports the valid bits of SECS.ATTRIBUTES[63:32] that
* software can set with ECREATE.
*/
UINT32 ValidSecsAttributes1 : 32;
#define CPUID_EBX_VALID_SECS_ATTRIBUTES_1_BIT 0
#define CPUID_EBX_VALID_SECS_ATTRIBUTES_1_FLAG 0xFFFFFFFF
#define CPUID_EBX_VALID_SECS_ATTRIBUTES_1_MASK 0xFFFFFFFF
#define CPUID_EBX_VALID_SECS_ATTRIBUTES_1(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Reports the valid bits of SECS.ATTRIBUTES[95:64] that
* software can set with ECREATE.
*/
UINT32 ValidSecsAttributes2 : 32;
#define CPUID_ECX_VALID_SECS_ATTRIBUTES_2_BIT 0
#define CPUID_ECX_VALID_SECS_ATTRIBUTES_2_FLAG 0xFFFFFFFF
#define CPUID_ECX_VALID_SECS_ATTRIBUTES_2_MASK 0xFFFFFFFF
#define CPUID_ECX_VALID_SECS_ATTRIBUTES_2(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Reports the valid bits of SECS.ATTRIBUTES[127:96] that
* software can set with ECREATE.
*/
UINT32 ValidSecsAttributes3 : 32;
#define CPUID_EDX_VALID_SECS_ATTRIBUTES_3_BIT 0
#define CPUID_EDX_VALID_SECS_ATTRIBUTES_3_FLAG 0xFFFFFFFF
#define CPUID_EDX_VALID_SECS_ATTRIBUTES_3_MASK 0xFFFFFFFF
#define CPUID_EDX_VALID_SECS_ATTRIBUTES_3(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_12_ECX_01;
/**
* @brief Intel SGX EPC Enumeration Leaf, sub-leaves (EAX = 12H, ECX = 2 or higher)
*
* @note Leaf 12H sub-leaf 2 or higher (ECX >= 2) is supported if CPUID.(EAX=07H, ECX=0H):EBX[SGX]
* = 1. This structure describes sub-leaf type 0.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 3:0] Sub-leaf Type 0. Indicates this sub-leaf is invalid.
*/
UINT32 SubLeafType : 4;
#define CPUID_EAX_SUB_LEAF_TYPE_BIT 0
#define CPUID_EAX_SUB_LEAF_TYPE_FLAG 0x0F
#define CPUID_EAX_SUB_LEAF_TYPE_MASK 0x0F
#define CPUID_EAX_SUB_LEAF_TYPE(_) (((_) >> 0) & 0x0F)
UINT32 Reserved1 : 28;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is zero.
*/
UINT32 Zero : 32;
#define CPUID_EBX_ZERO_BIT 0
#define CPUID_EBX_ZERO_FLAG 0xFFFFFFFF
#define CPUID_EBX_ZERO_MASK 0xFFFFFFFF
#define CPUID_EBX_ZERO(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is zero.
*/
UINT32 Zero : 32;
#define CPUID_ECX_ZERO_BIT 0
#define CPUID_ECX_ZERO_FLAG 0xFFFFFFFF
#define CPUID_ECX_ZERO_MASK 0xFFFFFFFF
#define CPUID_ECX_ZERO(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is zero.
*/
UINT32 Zero : 32;
#define CPUID_EDX_ZERO_BIT 0
#define CPUID_EDX_ZERO_FLAG 0xFFFFFFFF
#define CPUID_EDX_ZERO_MASK 0xFFFFFFFF
#define CPUID_EDX_ZERO(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_12_ECX_02P_SLT_0;
/**
* @brief Intel SGX EPC Enumeration Leaf, sub-leaves (EAX = 12H, ECX = 2 or higher)
*
* @note Leaf 12H sub-leaf 2 or higher (ECX >= 2) is supported if CPUID.(EAX=07H, ECX=0H):EBX[SGX]
* = 1. This structure describes sub-leaf type 1.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 3:0] Sub-leaf Type 1. This sub-leaf enumerates an EPC section.
* EBX:EAX and EDX:ECX provide information on the Enclave Page Cache (EPC)
* section.
*/
UINT32 SubLeafType : 4;
#define CPUID_EAX_SUB_LEAF_TYPE_BIT 0
#define CPUID_EAX_SUB_LEAF_TYPE_FLAG 0x0F
#define CPUID_EAX_SUB_LEAF_TYPE_MASK 0x0F
#define CPUID_EAX_SUB_LEAF_TYPE(_) (((_) >> 0) & 0x0F)
UINT32 Reserved1 : 8;
/**
* [Bits 31:12] Bits 31:12 of the physical address of the base of the EPC
* section.
*/
UINT32 EpcBasePhysicalAddress1 : 20;
#define CPUID_EAX_EPC_BASE_PHYSICAL_ADDRESS_1_BIT 12
#define CPUID_EAX_EPC_BASE_PHYSICAL_ADDRESS_1_FLAG 0xFFFFF000
#define CPUID_EAX_EPC_BASE_PHYSICAL_ADDRESS_1_MASK 0xFFFFF
#define CPUID_EAX_EPC_BASE_PHYSICAL_ADDRESS_1(_) (((_) >> 12) & 0xFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 19:0] Bits 51:32 of the physical address of the base of the EPC
* section.
*/
UINT32 EpcBasePhysicalAddress2 : 20;
#define CPUID_EBX_EPC_BASE_PHYSICAL_ADDRESS_2_BIT 0
#define CPUID_EBX_EPC_BASE_PHYSICAL_ADDRESS_2_FLAG 0xFFFFF
#define CPUID_EBX_EPC_BASE_PHYSICAL_ADDRESS_2_MASK 0xFFFFF
#define CPUID_EBX_EPC_BASE_PHYSICAL_ADDRESS_2(_) (((_) >> 0) & 0xFFFFF)
UINT32 Reserved1 : 12;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 3:0] EPC section property encoding defined as follows:
* - If EAX[3:0] 0000b, then all bits of the EDX:ECX pair are enumerated as
* 0.
* - If EAX[3:0] 0001b, then this section has confidentiality and integrity
* protection. All other encodings are reserved.
*/
UINT32 EpcSectionProperty : 4;
#define CPUID_ECX_EPC_SECTION_PROPERTY_BIT 0
#define CPUID_ECX_EPC_SECTION_PROPERTY_FLAG 0x0F
#define CPUID_ECX_EPC_SECTION_PROPERTY_MASK 0x0F
#define CPUID_ECX_EPC_SECTION_PROPERTY(_) (((_) >> 0) & 0x0F)
UINT32 Reserved1 : 8;
/**
* [Bits 31:12] Bits 31:12 of the size of the corresponding EPC section
* within the Processor Reserved Memory.
*/
UINT32 EpcSize1 : 20;
#define CPUID_ECX_EPC_SIZE_1_BIT 12
#define CPUID_ECX_EPC_SIZE_1_FLAG 0xFFFFF000
#define CPUID_ECX_EPC_SIZE_1_MASK 0xFFFFF
#define CPUID_ECX_EPC_SIZE_1(_) (((_) >> 12) & 0xFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 19:0] Bits 51:32 of the size of the corresponding EPC section
* within the Processor Reserved Memory.
*/
UINT32 EpcSize2 : 20;
#define CPUID_EDX_EPC_SIZE_2_BIT 0
#define CPUID_EDX_EPC_SIZE_2_FLAG 0xFFFFF
#define CPUID_EDX_EPC_SIZE_2_MASK 0xFFFFF
#define CPUID_EDX_EPC_SIZE_2(_) (((_) >> 0) & 0xFFFFF)
UINT32 Reserved1 : 12;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_12_ECX_02P_SLT_1;
/**
* @}
*/
/**
* @defgroup CPUID_EAX_14 \
* EAX = 0x14
*
* When CPUID executes with EAX set to 14H and ECX = 0H, the processor returns information about
* Intel Processor Trace extensions. When CPUID executes with EAX set to 14H and ECX = n (n > 0 and
* less than the number of non-zero bits in CPUID.(EAX=14H, ECX= 0H).EAX), the processor returns
* information about packet generation in Intel Processor Trace.
* @{
*/
#define CPUID_INTEL_PROCESSOR_TRACE_INFORMATION 0x00000014
/**
* @brief Intel Processor Trace Enumeration Main Leaf (EAX = 14H, ECX = 0)
*
* @note Leaf 14H main leaf (ECX = 0).
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reports the maximum sub-leaf supported in leaf 14H.
*/
UINT32 MaxSubLeaf : 32;
#define CPUID_EAX_MAX_SUB_LEAF_BIT 0
#define CPUID_EAX_MAX_SUB_LEAF_FLAG 0xFFFFFFFF
#define CPUID_EAX_MAX_SUB_LEAF_MASK 0xFFFFFFFF
#define CPUID_EAX_MAX_SUB_LEAF(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bit 0] If 1, indicates that IA32_RTIT_CTL.CR3Filter can be set to 1, and
* that IA32_RTIT_CR3_MATCH MSR can be accessed.
*/
UINT32 Flag0 : 1;
#define CPUID_EBX_FLAG0_BIT 0
#define CPUID_EBX_FLAG0_FLAG 0x01
#define CPUID_EBX_FLAG0_MASK 0x01
#define CPUID_EBX_FLAG0(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] If 1, indicates support of Configurable PSB and Cycle-Accurate
* Mode.
*/
UINT32 Flag1 : 1;
#define CPUID_EBX_FLAG1_BIT 1
#define CPUID_EBX_FLAG1_FLAG 0x02
#define CPUID_EBX_FLAG1_MASK 0x01
#define CPUID_EBX_FLAG1(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If 1, indicates support of IP Filtering, TraceStop filtering, and
* preservation of Intel PT MSRs across warm reset.
*/
UINT32 Flag2 : 1;
#define CPUID_EBX_FLAG2_BIT 2
#define CPUID_EBX_FLAG2_FLAG 0x04
#define CPUID_EBX_FLAG2_MASK 0x01
#define CPUID_EBX_FLAG2(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] If 1, indicates support of MTC timing packet and suppression of
* COFI-based packets.
*/
UINT32 Flag3 : 1;
#define CPUID_EBX_FLAG3_BIT 3
#define CPUID_EBX_FLAG3_FLAG 0x08
#define CPUID_EBX_FLAG3_MASK 0x01
#define CPUID_EBX_FLAG3(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] If 1, indicates support of PTWRITE. Writes can set
* IA32_RTIT_CTL[12] (PTWEn) and IA32_RTIT_CTL[5] (FUPonPTW), and PTWRITE
* can generate packets.
*/
UINT32 Flag4 : 1;
#define CPUID_EBX_FLAG4_BIT 4
#define CPUID_EBX_FLAG4_FLAG 0x10
#define CPUID_EBX_FLAG4_MASK 0x01
#define CPUID_EBX_FLAG4(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] If 1, indicates support of Power Event Trace. Writes can set
* IA32_RTIT_CTL[4] (PwrEvtEn), enabling Power Event Trace packet
* generation.
*/
UINT32 Flag5 : 1;
#define CPUID_EBX_FLAG5_BIT 5
#define CPUID_EBX_FLAG5_FLAG 0x20
#define CPUID_EBX_FLAG5_MASK 0x01
#define CPUID_EBX_FLAG5(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] If 1, indicates support for PSB and PH preservation. Writes can
* set IA32_RTIT CTL[56] (InjectPsb-PmiOnEnable), enabling the processor to
* setIA32_12TIT STATUS[7] (PendTopaPMI) and/or IA32_RTIT_STATUS[6]
* (PendPSB) in order to preserve ToPA PMIs and/or PSBs otherwise lost due
* to Intel PT disable. Writes can also set PendToPAPMI and PendPSB.
*/
UINT32 Flag6 : 1;
#define CPUID_EBX_FLAG6_BIT 6
#define CPUID_EBX_FLAG6_FLAG 0x40
#define CPUID_EBX_FLAG6_MASK 0x01
#define CPUID_EBX_FLAG6(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] If 1, writes can set IA32_RTIT_CTL[31] (EventEn), enabling Event
* Trace packet generation.
*/
UINT32 Flag7 : 1;
#define CPUID_EBX_FLAG7_BIT 7
#define CPUID_EBX_FLAG7_FLAG 0x80
#define CPUID_EBX_FLAG7_MASK 0x01
#define CPUID_EBX_FLAG7(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] If 1, writes can set IA32_RTIT_CTL[55] (DisTNT), disabling TNT
* packet generation.
*/
UINT32 Flag8 : 1;
#define CPUID_EBX_FLAG8_BIT 8
#define CPUID_EBX_FLAG8_FLAG 0x100
#define CPUID_EBX_FLAG8_MASK 0x01
#define CPUID_EBX_FLAG8(_) (((_) >> 8) & 0x01)
UINT32 Reserved1 : 23;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] If 1, Tracing can be enabled with IA32_RTIT_CTL.ToPA = 1, hence
* utilizing the ToPA output scheme; IA32_RTIT_OUTPUT_BASE and
* IA32_RTIT_OUTPUT_MASK_PTRS MSRs can be accessed.
*/
UINT32 Flag0 : 1;
#define CPUID_ECX_FLAG0_BIT 0
#define CPUID_ECX_FLAG0_FLAG 0x01
#define CPUID_ECX_FLAG0_MASK 0x01
#define CPUID_ECX_FLAG0(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] If 1, ToPA tables can hold any number of output entries, up to
* the maximum allowed by the MaskOrTableOffset field of
* IA32_RTIT_OUTPUT_MASK_PTRS.
*/
UINT32 Flag1 : 1;
#define CPUID_ECX_FLAG1_BIT 1
#define CPUID_ECX_FLAG1_FLAG 0x02
#define CPUID_ECX_FLAG1_MASK 0x01
#define CPUID_ECX_FLAG1(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If 1, indicates support of Single-Range Output scheme.
*/
UINT32 Flag2 : 1;
#define CPUID_ECX_FLAG2_BIT 2
#define CPUID_ECX_FLAG2_FLAG 0x04
#define CPUID_ECX_FLAG2_MASK 0x01
#define CPUID_ECX_FLAG2(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] If 1, indicates support of output to Trace Transport subsystem.
*/
UINT32 Flag3 : 1;
#define CPUID_ECX_FLAG3_BIT 3
#define CPUID_ECX_FLAG3_FLAG 0x08
#define CPUID_ECX_FLAG3_MASK 0x01
#define CPUID_ECX_FLAG3(_) (((_) >> 3) & 0x01)
UINT32 Reserved1 : 27;
/**
* [Bit 31] If 1, generated packets which contain IP payloads have LIP
* values, which include the CS base component.
*/
UINT32 Flag31 : 1;
#define CPUID_ECX_FLAG31_BIT 31
#define CPUID_ECX_FLAG31_FLAG 0x80000000
#define CPUID_ECX_FLAG31_MASK 0x01
#define CPUID_ECX_FLAG31(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_14_ECX_00;
/**
* @brief Intel Processor Trace Enumeration Sub-leaf (EAX = 14H, ECX = 1)
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 2:0] Number of configurable Address Ranges for filtering.
*/
UINT32 NumberOfConfigurableAddressRangesForFiltering : 3;
#define CPUID_EAX_NUMBER_OF_CONFIGURABLE_ADDRESS_RANGES_FOR_FILTERING_BIT 0
#define CPUID_EAX_NUMBER_OF_CONFIGURABLE_ADDRESS_RANGES_FOR_FILTERING_FLAG 0x07
#define CPUID_EAX_NUMBER_OF_CONFIGURABLE_ADDRESS_RANGES_FOR_FILTERING_MASK 0x07
#define CPUID_EAX_NUMBER_OF_CONFIGURABLE_ADDRESS_RANGES_FOR_FILTERING(_) (((_) >> 0) & 0x07)
UINT32 Reserved1 : 13;
/**
* [Bits 31:16] Bitmap of supported MTC period encodings.
*/
UINT32 BitmapOfSupportedMtcPeriodEncodings : 16;
#define CPUID_EAX_BITMAP_OF_SUPPORTED_MTC_PERIOD_ENCODINGS_BIT 16
#define CPUID_EAX_BITMAP_OF_SUPPORTED_MTC_PERIOD_ENCODINGS_FLAG 0xFFFF0000
#define CPUID_EAX_BITMAP_OF_SUPPORTED_MTC_PERIOD_ENCODINGS_MASK 0xFFFF
#define CPUID_EAX_BITMAP_OF_SUPPORTED_MTC_PERIOD_ENCODINGS(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 15:0] Bitmap of supported Cycle Threshold value encodings.
*/
UINT32 BitmapOfSupportedCycleThresholdValueEncodings : 16;
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CYCLE_THRESHOLD_VALUE_ENCODINGS_BIT 0
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CYCLE_THRESHOLD_VALUE_ENCODINGS_FLAG 0xFFFF
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CYCLE_THRESHOLD_VALUE_ENCODINGS_MASK 0xFFFF
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CYCLE_THRESHOLD_VALUE_ENCODINGS(_) (((_) >> 0) & 0xFFFF)
/**
* [Bits 31:16] Bitmap of supported Configurable PSB frequency encodings.
*/
UINT32 BitmapOfSupportedConfigurablePsbFrequencyEncodings : 16;
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CONFIGURABLE_PSB_FREQUENCY_ENCODINGS_BIT 16
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CONFIGURABLE_PSB_FREQUENCY_ENCODINGS_FLAG 0xFFFF0000
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CONFIGURABLE_PSB_FREQUENCY_ENCODINGS_MASK 0xFFFF
#define CPUID_EBX_BITMAP_OF_SUPPORTED_CONFIGURABLE_PSB_FREQUENCY_ENCODINGS(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_14_ECX_01;
/**
* @}
*/
/**
* @brief Stamp Counter and Nominal Core Crystal Clock Information Leaf
*
* When CPUID executes with EAX set to 15H and ECX = 0H, the processor returns information about
* Time Stamp Counter and Core Crystal Clock.
*
* @note If EBX[31:0] is 0, the TSC/"core crystal clock" ratio is not enumerated.
* EBX[31:0]/EAX[31:0] indicates the ratio of the TSC frequency and the core crystal clock
* frequency. If ECX is 0, the nominal core crystal clock frequency is not enumerated. "TSC
* frequency" = "core crystal clock frequency" * EBX/EAX.
*/
#define CPUID_TIME_STAMP_COUNTER_INFORMATION 0x00000015
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] An unsigned integer which is the denominator of the TSC/"core
* crystal clock" ratio.
*/
UINT32 Denominator : 32;
#define CPUID_EAX_DENOMINATOR_BIT 0
#define CPUID_EAX_DENOMINATOR_FLAG 0xFFFFFFFF
#define CPUID_EAX_DENOMINATOR_MASK 0xFFFFFFFF
#define CPUID_EAX_DENOMINATOR(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] An unsigned integer which is the numerator of the TSC/"core
* crystal clock" ratio.
*/
UINT32 Numerator : 32;
#define CPUID_EBX_NUMERATOR_BIT 0
#define CPUID_EBX_NUMERATOR_FLAG 0xFFFFFFFF
#define CPUID_EBX_NUMERATOR_MASK 0xFFFFFFFF
#define CPUID_EBX_NUMERATOR(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] An unsigned integer which is the nominal frequency of the
* core crystal clock in Hz.
*/
UINT32 NominalFrequency : 32;
#define CPUID_ECX_NOMINAL_FREQUENCY_BIT 0
#define CPUID_ECX_NOMINAL_FREQUENCY_FLAG 0xFFFFFFFF
#define CPUID_ECX_NOMINAL_FREQUENCY_MASK 0xFFFFFFFF
#define CPUID_ECX_NOMINAL_FREQUENCY(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_15;
/**
* @brief Processor Frequency Information Leaf
*
* When CPUID executes with EAX set to 16H, the processor returns information about Processor
* Frequency Information.
*
* @note Data is returned from this interface in accordance with the processor's specification and
* does not reflect actual values. Suitable use of this data includes the display of processor
* information in like manner to the processor brand string and for determining the appropriate
* range to use when displaying processor information e.g. frequency history graphs. The returned
* information should not be used for any other purpose as the returned information does not
* accurately correlate to information / counters returned by other processor interfaces.
* While a processor may support the Processor Frequency Information leaf, fields that return
* a value of zero are not supported.
*/
#define CPUID_PROCESSOR_FREQUENCY_INFORMATION 0x00000016
typedef struct
{
union
{
struct
{
/**
* [Bits 15:0] Processor Base Frequency (in MHz).
*/
UINT32 ProcesorBaseFrequencyMhz : 16;
#define CPUID_EAX_PROCESOR_BASE_FREQUENCY_MHZ_BIT 0
#define CPUID_EAX_PROCESOR_BASE_FREQUENCY_MHZ_FLAG 0xFFFF
#define CPUID_EAX_PROCESOR_BASE_FREQUENCY_MHZ_MASK 0xFFFF
#define CPUID_EAX_PROCESOR_BASE_FREQUENCY_MHZ(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 15:0] Maximum Frequency (in MHz).
*/
UINT32 ProcessorMaximumFrequencyMhz : 16;
#define CPUID_EBX_PROCESSOR_MAXIMUM_FREQUENCY_MHZ_BIT 0
#define CPUID_EBX_PROCESSOR_MAXIMUM_FREQUENCY_MHZ_FLAG 0xFFFF
#define CPUID_EBX_PROCESSOR_MAXIMUM_FREQUENCY_MHZ_MASK 0xFFFF
#define CPUID_EBX_PROCESSOR_MAXIMUM_FREQUENCY_MHZ(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 15:0] Bus (Reference) Frequency (in MHz).
*/
UINT32 BusFrequencyMhz : 16;
#define CPUID_ECX_BUS_FREQUENCY_MHZ_BIT 0
#define CPUID_ECX_BUS_FREQUENCY_MHZ_FLAG 0xFFFF
#define CPUID_ECX_BUS_FREQUENCY_MHZ_MASK 0xFFFF
#define CPUID_ECX_BUS_FREQUENCY_MHZ(_) (((_) >> 0) & 0xFFFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_16;
/**
* @defgroup CPUID_EAX_17 \
* EAX = 0x17
*
* When CPUID executes with EAX set to 17H, the processor returns information about the
* System-On-Chip Vendor Attribute Enumeration.
* @{
*/
#define CPUID_SOC_VENDOR_INFORMATION 0x00000017
/**
* @brief System-On-Chip Vendor Attribute Enumeration Main Leaf (EAX = 17H, ECX = 0)
*
* @note Leaf 17H main leaf (ECX = 0). Leaf 17H output depends on the initial value in ECX. Leaf 17H
* sub-leaves 1 through 3 reports SOC Vendor Brand String. Leaf 17H is valid if MaxSOCID_Index >= 3.
* Leaf 17H sub-leaves 4 and above are reserved.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reports the maximum input value of supported sub-leaf in leaf
* 17H.
*/
UINT32 MaxSocIdIndex : 32;
#define CPUID_EAX_MAX_SOC_ID_INDEX_BIT 0
#define CPUID_EAX_MAX_SOC_ID_INDEX_FLAG 0xFFFFFFFF
#define CPUID_EAX_MAX_SOC_ID_INDEX_MASK 0xFFFFFFFF
#define CPUID_EAX_MAX_SOC_ID_INDEX(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 15:0] SOC Vendor ID.
*/
UINT32 SocVendorId : 16;
#define CPUID_EBX_SOC_VENDOR_ID_BIT 0
#define CPUID_EBX_SOC_VENDOR_ID_FLAG 0xFFFF
#define CPUID_EBX_SOC_VENDOR_ID_MASK 0xFFFF
#define CPUID_EBX_SOC_VENDOR_ID(_) (((_) >> 0) & 0xFFFF)
/**
* [Bit 16] If 1, the SOC Vendor ID field is assigned via an industry
* standard enumeration scheme. Otherwise, the SOC Vendor ID field is
* assigned by Intel.
*/
UINT32 IsVendorScheme : 1;
#define CPUID_EBX_IS_VENDOR_SCHEME_BIT 16
#define CPUID_EBX_IS_VENDOR_SCHEME_FLAG 0x10000
#define CPUID_EBX_IS_VENDOR_SCHEME_MASK 0x01
#define CPUID_EBX_IS_VENDOR_SCHEME(_) (((_) >> 16) & 0x01)
UINT32 Reserved1 : 15;
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] A unique number an SOC vendor assigns to its SOC projects.
*/
UINT32 ProjectId : 32;
#define CPUID_ECX_PROJECT_ID_BIT 0
#define CPUID_ECX_PROJECT_ID_FLAG 0xFFFFFFFF
#define CPUID_ECX_PROJECT_ID_MASK 0xFFFFFFFF
#define CPUID_ECX_PROJECT_ID(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] A unique number within an SOC project that an SOC vendor
* assigns.
*/
UINT32 SteppingId : 32;
#define CPUID_EDX_STEPPING_ID_BIT 0
#define CPUID_EDX_STEPPING_ID_FLAG 0xFFFFFFFF
#define CPUID_EDX_STEPPING_ID_MASK 0xFFFFFFFF
#define CPUID_EDX_STEPPING_ID(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_17_ECX_00;
/**
* @brief System-On-Chip Vendor Attribute Enumeration Sub-leaf (EAX = 17H, ECX = 1..3)
*
* @note Leaf 17H output depends on the initial value in ECX. SOC Vendor Brand String is a UTF-8
* encoded string padded with trailing bytes of 00H. The complete SOC Vendor Brand String is
* constructed by concatenating in ascending order of EAX:EBX:ECX:EDX and from the sub-leaf 1
* fragment towards sub-leaf 3.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] SOC Vendor Brand String. UTF-8 encoded string.
*/
UINT32 SocVendorBrandString : 32;
#define CPUID_EAX_SOC_VENDOR_BRAND_STRING_BIT 0
#define CPUID_EAX_SOC_VENDOR_BRAND_STRING_FLAG 0xFFFFFFFF
#define CPUID_EAX_SOC_VENDOR_BRAND_STRING_MASK 0xFFFFFFFF
#define CPUID_EAX_SOC_VENDOR_BRAND_STRING(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] SOC Vendor Brand String. UTF-8 encoded string.
*/
UINT32 SocVendorBrandString : 32;
#define CPUID_EBX_SOC_VENDOR_BRAND_STRING_BIT 0
#define CPUID_EBX_SOC_VENDOR_BRAND_STRING_FLAG 0xFFFFFFFF
#define CPUID_EBX_SOC_VENDOR_BRAND_STRING_MASK 0xFFFFFFFF
#define CPUID_EBX_SOC_VENDOR_BRAND_STRING(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] SOC Vendor Brand String. UTF-8 encoded string.
*/
UINT32 SocVendorBrandString : 32;
#define CPUID_ECX_SOC_VENDOR_BRAND_STRING_BIT 0
#define CPUID_ECX_SOC_VENDOR_BRAND_STRING_FLAG 0xFFFFFFFF
#define CPUID_ECX_SOC_VENDOR_BRAND_STRING_MASK 0xFFFFFFFF
#define CPUID_ECX_SOC_VENDOR_BRAND_STRING(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] SOC Vendor Brand String. UTF-8 encoded string.
*/
UINT32 SocVendorBrandString : 32;
#define CPUID_EDX_SOC_VENDOR_BRAND_STRING_BIT 0
#define CPUID_EDX_SOC_VENDOR_BRAND_STRING_FLAG 0xFFFFFFFF
#define CPUID_EDX_SOC_VENDOR_BRAND_STRING_MASK 0xFFFFFFFF
#define CPUID_EDX_SOC_VENDOR_BRAND_STRING(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_17_ECX_01_03;
/**
* @brief System-On-Chip Vendor Attribute Enumeration Sub-leaves (EAX = 17H, ECX > MaxSOCID_Index)
*
* @note Leaf 17H output depends on the initial value in ECX.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reserved = 0.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Reserved = 0.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Reserved = 0.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Reserved = 0.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_17_ECX_N;
/**
* @}
*/
/**
* @defgroup CPUID_EAX_18 \
* EAX = 0x18
*
* When CPUID executes with EAX set to 18H, the processor returns information about the
* Deterministic Address Translation Parameters.
* @{
*/
#define CPUID_DETERMINISTIC_ADDRESS_TRANSLATION_PARAMETERS 0x00000018
/**
* @brief Deterministic Address Translation Parameters Main Leaf (EAX = 18H, ECX = 0)
*
* @note Each sub-leaf enumerates a different address translation structure.
* If ECX contains an invalid sub-leaf index, EAX/EBX/ECX/EDX return 0. Sub-leaf index n is
* invalid if n exceeds the value that sub-leaf 0 returns in EAX. A sub-leaf index is also invalid
* if EDX[4:0] returns 0. Valid sub-leaves do not need to be contiguous or in any particular order.
* A valid sub-leaf may be in a higher input ECX value than an invalid sub-leaf or than a valid
* sub-leaf of a higher or lower-level structure.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Reports the maximum input value of supported sub-leaf in leaf
* 18H.
*/
UINT32 MaxSubLeaf : 32;
#define CPUID_EAX_MAX_SUB_LEAF_BIT 0
#define CPUID_EAX_MAX_SUB_LEAF_FLAG 0xFFFFFFFF
#define CPUID_EAX_MAX_SUB_LEAF_MASK 0xFFFFFFFF
#define CPUID_EAX_MAX_SUB_LEAF(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bit 0] 4K page size entries supported by this structure.
*/
UINT32 PageEntries4KbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_BIT 0
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_FLAG 0x01
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] 2MB page size entries supported by this structure.
*/
UINT32 PageEntries2MbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_BIT 1
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_FLAG 0x02
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] 4MB page size entries supported by this structure.
*/
UINT32 PageEntries4MbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_BIT 2
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_FLAG 0x04
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] 1 GB page size entries supported by this structure.
*/
UINT32 PageEntries1GbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_BIT 3
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_FLAG 0x08
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED(_) (((_) >> 3) & 0x01)
UINT32 Reserved1 : 4;
/**
* [Bits 10:8] Partitioning (0: Soft partitioning between the logical
* processors sharing this structure).
*/
UINT32 Partitioning : 3;
#define CPUID_EBX_PARTITIONING_BIT 8
#define CPUID_EBX_PARTITIONING_FLAG 0x700
#define CPUID_EBX_PARTITIONING_MASK 0x07
#define CPUID_EBX_PARTITIONING(_) (((_) >> 8) & 0x07)
UINT32 Reserved2 : 5;
/**
* [Bits 31:16] W = Ways of associativity.
*/
UINT32 WaysOfAssociativity00 : 16;
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_00_BIT 16
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_00_FLAG 0xFFFF0000
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_00_MASK 0xFFFF
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_00(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Number of Sets.
*/
UINT32 NumberOfSets : 32;
#define CPUID_ECX_NUMBER_OF_SETS_BIT 0
#define CPUID_ECX_NUMBER_OF_SETS_FLAG 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS_MASK 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 4:0] Translation cache type field.
* - 00000b: Null (indicates this sub-leaf is not valid).
* - 00001b: Data TLB.
* - 00010b: Instruction TLB.
* - 00011b: Unified TLB.
* All other encodings are reserved.
*
* @note Some unified TLBs will allow a single TLB entry to satisfy data
* read/write and instruction fetches. Others will require separate entries
* (e.g., one loaded on data read/write and another loaded on an instruction
* fetch) . Please see the Intel(R) 64 and IA-32 Architectures Optimization
* Reference Manual for details of a particular product.
*/
UINT32 TranslationCacheTypeField : 5;
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_BIT 0
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_FLAG 0x1F
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_MASK 0x1F
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD(_) (((_) >> 0) & 0x1F)
/**
* [Bits 7:5] Translation cache level (starts at 1).
*/
UINT32 TranslationCacheLevel : 3;
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_BIT 5
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_FLAG 0xE0
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_MASK 0x07
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL(_) (((_) >> 5) & 0x07)
/**
* [Bit 8] Fully associative structure.
*/
UINT32 FullyAssociativeStructure : 1;
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_BIT 8
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_FLAG 0x100
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_MASK 0x01
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE(_) (((_) >> 8) & 0x01)
UINT32 Reserved1 : 5;
/**
* [Bits 25:14] Maximum number of addressable IDs for logical processors
* sharing this translation cache.
*
* @note Add one to the return value to get the result.
*/
UINT32 MaxAddressableIdsForLogicalProcessors : 12;
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_BIT 14
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_FLAG 0x3FFC000
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_MASK 0xFFF
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS(_) (((_) >> 14) & 0xFFF)
UINT32 Reserved2 : 6;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_18_ECX_00;
/**
* @brief Deterministic Address Translation Parameters Sub-leaf (EAX = 18H, ECX >= 1)
*
* @note Each sub-leaf enumerates a different address translation structure.
* If ECX contains an invalid sub-leaf index, EAX/EBX/ECX/EDX return 0. Sub-leaf index n is
* invalid if n exceeds the value that sub-leaf 0 returns in EAX. A sub-leaf index is also invalid
* if EDX[4:0] returns 0. Valid sub-leaves do not need to be contiguous or in any particular order.
* A valid sub-leaf may be in a higher input ECX value than an invalid sub-leaf or than a valid
* sub-leaf of a higher or lower-level structure.
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bit 0] 4K page size entries supported by this structure.
*/
UINT32 PageEntries4KbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_BIT 0
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_FLAG 0x01
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_4KB_SUPPORTED(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] 2MB page size entries supported by this structure.
*/
UINT32 PageEntries2MbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_BIT 1
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_FLAG 0x02
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_2MB_SUPPORTED(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] 4MB page size entries supported by this structure.
*/
UINT32 PageEntries4MbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_BIT 2
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_FLAG 0x04
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_4MB_SUPPORTED(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] 1 GB page size entries supported by this structure.
*/
UINT32 PageEntries1GbSupported : 1;
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_BIT 3
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_FLAG 0x08
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED_MASK 0x01
#define CPUID_EBX_PAGE_ENTRIES_1GB_SUPPORTED(_) (((_) >> 3) & 0x01)
UINT32 Reserved1 : 4;
/**
* [Bits 10:8] Partitioning (0: Soft partitioning between the logical
* processors sharing this structure).
*/
UINT32 Partitioning : 3;
#define CPUID_EBX_PARTITIONING_BIT 8
#define CPUID_EBX_PARTITIONING_FLAG 0x700
#define CPUID_EBX_PARTITIONING_MASK 0x07
#define CPUID_EBX_PARTITIONING(_) (((_) >> 8) & 0x07)
UINT32 Reserved2 : 5;
/**
* [Bits 31:16] W = Ways of associativity.
*/
UINT32 WaysOfAssociativity01 : 16;
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_01_BIT 16
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_01_FLAG 0xFFFF0000
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_01_MASK 0xFFFF
#define CPUID_EBX_WAYS_OF_ASSOCIATIVITY_01(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Number of Sets.
*/
UINT32 NumberOfSets : 32;
#define CPUID_ECX_NUMBER_OF_SETS_BIT 0
#define CPUID_ECX_NUMBER_OF_SETS_FLAG 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS_MASK 0xFFFFFFFF
#define CPUID_ECX_NUMBER_OF_SETS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 4:0] Translation cache type field.
* - 00000b: Null (indicates this sub-leaf is not valid).
* - 00001b: Data TLB.
* - 00010b: Instruction TLB.
* - 00011b: Unified TLB.
* All other encodings are reserved.
*
* @note Some unified TLBs will allow a single TLB entry to satisfy data
* read/write and instruction fetches. Others will require separate entries
* (e.g., one loaded on data read/write and another loaded on an instruction
* fetch) . Please see the Intel(R) 64 and IA-32 Architectures Optimization
* Reference Manual for details of a particular product.
*/
UINT32 TranslationCacheTypeField : 5;
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_BIT 0
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_FLAG 0x1F
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD_MASK 0x1F
#define CPUID_EDX_TRANSLATION_CACHE_TYPE_FIELD(_) (((_) >> 0) & 0x1F)
/**
* [Bits 7:5] Translation cache level (starts at 1).
*/
UINT32 TranslationCacheLevel : 3;
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_BIT 5
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_FLAG 0xE0
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL_MASK 0x07
#define CPUID_EDX_TRANSLATION_CACHE_LEVEL(_) (((_) >> 5) & 0x07)
/**
* [Bit 8] Fully associative structure.
*/
UINT32 FullyAssociativeStructure : 1;
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_BIT 8
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_FLAG 0x100
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE_MASK 0x01
#define CPUID_EDX_FULLY_ASSOCIATIVE_STRUCTURE(_) (((_) >> 8) & 0x01)
UINT32 Reserved1 : 5;
/**
* [Bits 25:14] Maximum number of addressable IDs for logical processors
* sharing this translation cache.
*
* @note Add one to the return value to get the result.
*/
UINT32 MaxAddressableIdsForLogicalProcessors : 12;
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_BIT 14
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_FLAG 0x3FFC000
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS_MASK 0xFFF
#define CPUID_EDX_MAX_ADDRESSABLE_IDS_FOR_LOGICAL_PROCESSORS(_) (((_) >> 14) & 0xFFF)
UINT32 Reserved2 : 6;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_18_ECX_01P;
/**
* @}
*/
/**
* @brief Extended Function CPUID Information
*
* When CPUID executes with EAX set to 80000000H, the processor returns the highest value the
* processor recognizes for returning extended processor information. The value is returned in the
* EAX register and is processor specific.
*/
#define CPUID_EXTENDED_FUNCTION_INFORMATION 0x80000000
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Maximum Input Value for Extended Function CPUID Information.
*/
UINT32 MaxExtendedFunctions : 32;
#define CPUID_EAX_MAX_EXTENDED_FUNCTIONS_BIT 0
#define CPUID_EAX_MAX_EXTENDED_FUNCTIONS_FLAG 0xFFFFFFFF
#define CPUID_EAX_MAX_EXTENDED_FUNCTIONS_MASK 0xFFFFFFFF
#define CPUID_EAX_MAX_EXTENDED_FUNCTIONS(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000000;
/**
* Extended Function CPUID Information.
*/
#define CPUID_EXTENDED_CPU_SIGNATURE 0x80000001
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bit 0] LAHF/SAHF available in 64-bit mode.
*/
UINT32 LahfSahfAvailableIn64BitMode : 1;
#define CPUID_ECX_LAHF_SAHF_AVAILABLE_IN_64_BIT_MODE_BIT 0
#define CPUID_ECX_LAHF_SAHF_AVAILABLE_IN_64_BIT_MODE_FLAG 0x01
#define CPUID_ECX_LAHF_SAHF_AVAILABLE_IN_64_BIT_MODE_MASK 0x01
#define CPUID_ECX_LAHF_SAHF_AVAILABLE_IN_64_BIT_MODE(_) (((_) >> 0) & 0x01)
UINT32 Reserved1 : 4;
/**
* [Bit 5] LZCNT.
*/
UINT32 Lzcnt : 1;
#define CPUID_ECX_LZCNT_BIT 5
#define CPUID_ECX_LZCNT_FLAG 0x20
#define CPUID_ECX_LZCNT_MASK 0x01
#define CPUID_ECX_LZCNT(_) (((_) >> 5) & 0x01)
UINT32 Reserved2 : 2;
/**
* [Bit 8] PREFETCHW.
*/
UINT32 Prefetchw : 1;
#define CPUID_ECX_PREFETCHW_BIT 8
#define CPUID_ECX_PREFETCHW_FLAG 0x100
#define CPUID_ECX_PREFETCHW_MASK 0x01
#define CPUID_ECX_PREFETCHW(_) (((_) >> 8) & 0x01)
UINT32 Reserved3 : 23;
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
UINT32 Reserved1 : 11;
/**
* [Bit 11] SYSCALL/SYSRET available in 64-bit mode.
*/
UINT32 SyscallSysretAvailableIn64BitMode : 1;
#define CPUID_EDX_SYSCALL_SYSRET_AVAILABLE_IN_64_BIT_MODE_BIT 11
#define CPUID_EDX_SYSCALL_SYSRET_AVAILABLE_IN_64_BIT_MODE_FLAG 0x800
#define CPUID_EDX_SYSCALL_SYSRET_AVAILABLE_IN_64_BIT_MODE_MASK 0x01
#define CPUID_EDX_SYSCALL_SYSRET_AVAILABLE_IN_64_BIT_MODE(_) (((_) >> 11) & 0x01)
UINT32 Reserved2 : 8;
/**
* [Bit 20] Execute Disable Bit available.
*/
UINT32 ExecuteDisableBitAvailable : 1;
#define CPUID_EDX_EXECUTE_DISABLE_BIT_AVAILABLE_BIT 20
#define CPUID_EDX_EXECUTE_DISABLE_BIT_AVAILABLE_FLAG 0x100000
#define CPUID_EDX_EXECUTE_DISABLE_BIT_AVAILABLE_MASK 0x01
#define CPUID_EDX_EXECUTE_DISABLE_BIT_AVAILABLE(_) (((_) >> 20) & 0x01)
UINT32 Reserved3 : 5;
/**
* [Bit 26] 1-GByte pages are available if 1.
*/
UINT32 Pages1GbAvailable : 1;
#define CPUID_EDX_PAGES_1GB_AVAILABLE_BIT 26
#define CPUID_EDX_PAGES_1GB_AVAILABLE_FLAG 0x4000000
#define CPUID_EDX_PAGES_1GB_AVAILABLE_MASK 0x01
#define CPUID_EDX_PAGES_1GB_AVAILABLE(_) (((_) >> 26) & 0x01)
/**
* [Bit 27] RDTSCP and IA32_TSC_AUX are available if 1.
*/
UINT32 RdtscpAvailable : 1;
#define CPUID_EDX_RDTSCP_AVAILABLE_BIT 27
#define CPUID_EDX_RDTSCP_AVAILABLE_FLAG 0x8000000
#define CPUID_EDX_RDTSCP_AVAILABLE_MASK 0x01
#define CPUID_EDX_RDTSCP_AVAILABLE(_) (((_) >> 27) & 0x01)
UINT32 Reserved4 : 1;
/**
* [Bit 29] Intel(R) 64 Architecture available if 1.
*/
UINT32 Ia64Available : 1;
#define CPUID_EDX_IA64_AVAILABLE_BIT 29
#define CPUID_EDX_IA64_AVAILABLE_FLAG 0x20000000
#define CPUID_EDX_IA64_AVAILABLE_MASK 0x01
#define CPUID_EDX_IA64_AVAILABLE(_) (((_) >> 29) & 0x01)
UINT32 Reserved5 : 2;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000001;
/**
* Extended Function CPUID Information.
*/
#define CPUID_BRAND_STRING1 0x80000002
/**
* Extended Function CPUID Information.
*/
#define CPUID_BRAND_STRING2 0x80000003
/**
* Extended Function CPUID Information.
*/
#define CPUID_BRAND_STRING3 0x80000004
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String.
*/
UINT32 ProcessorBrandString1 : 32;
#define CPUID_EAX_PROCESSOR_BRAND_STRING_1_BIT 0
#define CPUID_EAX_PROCESSOR_BRAND_STRING_1_FLAG 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_1_MASK 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_1(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString2 : 32;
#define CPUID_EBX_PROCESSOR_BRAND_STRING_2_BIT 0
#define CPUID_EBX_PROCESSOR_BRAND_STRING_2_FLAG 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_2_MASK 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_2(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString3 : 32;
#define CPUID_ECX_PROCESSOR_BRAND_STRING_3_BIT 0
#define CPUID_ECX_PROCESSOR_BRAND_STRING_3_FLAG 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_3_MASK 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_3(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString4 : 32;
#define CPUID_EDX_PROCESSOR_BRAND_STRING_4_BIT 0
#define CPUID_EDX_PROCESSOR_BRAND_STRING_4_FLAG 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_4_MASK 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_4(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000002;
/**
* @brief Extended Function CPUID Information
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString5 : 32;
#define CPUID_EAX_PROCESSOR_BRAND_STRING_5_BIT 0
#define CPUID_EAX_PROCESSOR_BRAND_STRING_5_FLAG 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_5_MASK 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_5(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString6 : 32;
#define CPUID_EBX_PROCESSOR_BRAND_STRING_6_BIT 0
#define CPUID_EBX_PROCESSOR_BRAND_STRING_6_FLAG 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_6_MASK 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_6(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString7 : 32;
#define CPUID_ECX_PROCESSOR_BRAND_STRING_7_BIT 0
#define CPUID_ECX_PROCESSOR_BRAND_STRING_7_FLAG 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_7_MASK 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_7(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString8 : 32;
#define CPUID_EDX_PROCESSOR_BRAND_STRING_8_BIT 0
#define CPUID_EDX_PROCESSOR_BRAND_STRING_8_FLAG 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_8_MASK 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_8(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000003;
/**
* @brief Extended Function CPUID Information
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString9 : 32;
#define CPUID_EAX_PROCESSOR_BRAND_STRING_9_BIT 0
#define CPUID_EAX_PROCESSOR_BRAND_STRING_9_FLAG 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_9_MASK 0xFFFFFFFF
#define CPUID_EAX_PROCESSOR_BRAND_STRING_9(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString10 : 32;
#define CPUID_EBX_PROCESSOR_BRAND_STRING_10_BIT 0
#define CPUID_EBX_PROCESSOR_BRAND_STRING_10_FLAG 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_10_MASK 0xFFFFFFFF
#define CPUID_EBX_PROCESSOR_BRAND_STRING_10(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString11 : 32;
#define CPUID_ECX_PROCESSOR_BRAND_STRING_11_BIT 0
#define CPUID_ECX_PROCESSOR_BRAND_STRING_11_FLAG 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_11_MASK 0xFFFFFFFF
#define CPUID_ECX_PROCESSOR_BRAND_STRING_11(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] Processor Brand String Continued.
*/
UINT32 ProcessorBrandString12 : 32;
#define CPUID_EDX_PROCESSOR_BRAND_STRING_12_BIT 0
#define CPUID_EDX_PROCESSOR_BRAND_STRING_12_FLAG 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_12_MASK 0xFFFFFFFF
#define CPUID_EDX_PROCESSOR_BRAND_STRING_12(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000004;
/**
* @brief Extended Function CPUID Information
*/
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000005;
/**
* Extended Function CPUID Information.
*/
#define CPUID_EXTENDED_CACHE_INFO 0x80000006
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 7:0] Cache Line size in bytes.
*/
UINT32 CacheLineSizeInBytes : 8;
#define CPUID_ECX_CACHE_LINE_SIZE_IN_BYTES_BIT 0
#define CPUID_ECX_CACHE_LINE_SIZE_IN_BYTES_FLAG 0xFF
#define CPUID_ECX_CACHE_LINE_SIZE_IN_BYTES_MASK 0xFF
#define CPUID_ECX_CACHE_LINE_SIZE_IN_BYTES(_) (((_) >> 0) & 0xFF)
UINT32 Reserved1 : 4;
/**
* [Bits 15:12] L2 Associativity field.
* L2 associativity field encodings:
* - 00H - Disabled.
* - 01H - Direct mapped.
* - 02H - 2-way.
* - 04H - 4-way.
* - 06H - 8-way.
* - 08H - 16-way.
* - 0FH - Fully associative.
*/
UINT32 L2AssociativityField : 4;
#define CPUID_ECX_L2_ASSOCIATIVITY_FIELD_BIT 12
#define CPUID_ECX_L2_ASSOCIATIVITY_FIELD_FLAG 0xF000
#define CPUID_ECX_L2_ASSOCIATIVITY_FIELD_MASK 0x0F
#define CPUID_ECX_L2_ASSOCIATIVITY_FIELD(_) (((_) >> 12) & 0x0F)
/**
* [Bits 31:16] Cache size in 1K units.
*/
UINT32 CacheSizeIn1KUnits : 16;
#define CPUID_ECX_CACHE_SIZE_IN_1K_UNITS_BIT 16
#define CPUID_ECX_CACHE_SIZE_IN_1K_UNITS_FLAG 0xFFFF0000
#define CPUID_ECX_CACHE_SIZE_IN_1K_UNITS_MASK 0xFFFF
#define CPUID_ECX_CACHE_SIZE_IN_1K_UNITS(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000006;
/**
* Extended Function CPUID Information.
*/
#define CPUID_EXTENDED_TIME_STAMP_COUNTER 0x80000007
typedef struct
{
union
{
struct
{
/**
* [Bits 31:0] EAX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EAX_RESERVED_BIT 0
#define CPUID_EAX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EAX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EAX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
UINT32 Reserved1 : 8;
/**
* [Bit 8] Invariant TSC available if 1.
*/
UINT32 InvariantTscAvailable : 1;
#define CPUID_EDX_INVARIANT_TSC_AVAILABLE_BIT 8
#define CPUID_EDX_INVARIANT_TSC_AVAILABLE_FLAG 0x100
#define CPUID_EDX_INVARIANT_TSC_AVAILABLE_MASK 0x01
#define CPUID_EDX_INVARIANT_TSC_AVAILABLE(_) (((_) >> 8) & 0x01)
UINT32 Reserved2 : 23;
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000007;
/**
* Extended Function CPUID Information.
*/
#define CPUID_EXTENDED_VIRTUAL_PHYSICAL_ADDRESS_SIZE 0x80000008
typedef struct
{
/**
* @brief Linear/Physical Address size
*/
union
{
struct
{
/**
* [Bits 7:0] Number of Physical Address Bits.
*/
UINT32 NumberOfPhysicalAddressBits : 8;
#define CPUID_EAX_NUMBER_OF_PHYSICAL_ADDRESS_BITS_BIT 0
#define CPUID_EAX_NUMBER_OF_PHYSICAL_ADDRESS_BITS_FLAG 0xFF
#define CPUID_EAX_NUMBER_OF_PHYSICAL_ADDRESS_BITS_MASK 0xFF
#define CPUID_EAX_NUMBER_OF_PHYSICAL_ADDRESS_BITS(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Number of Linear Address Bits.
*/
UINT32 NumberOfLinearAddressBits : 8;
#define CPUID_EAX_NUMBER_OF_LINEAR_ADDRESS_BITS_BIT 8
#define CPUID_EAX_NUMBER_OF_LINEAR_ADDRESS_BITS_FLAG 0xFF00
#define CPUID_EAX_NUMBER_OF_LINEAR_ADDRESS_BITS_MASK 0xFF
#define CPUID_EAX_NUMBER_OF_LINEAR_ADDRESS_BITS(_) (((_) >> 8) & 0xFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} Eax;
union
{
struct
{
/**
* [Bits 31:0] EBX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EBX_RESERVED_BIT 0
#define CPUID_EBX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EBX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EBX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ebx;
union
{
struct
{
/**
* [Bits 31:0] ECX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_ECX_RESERVED_BIT 0
#define CPUID_ECX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_ECX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_ECX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Ecx;
union
{
struct
{
/**
* [Bits 31:0] EDX is reserved.
*/
UINT32 Reserved : 32;
#define CPUID_EDX_RESERVED_BIT 0
#define CPUID_EDX_RESERVED_FLAG 0xFFFFFFFF
#define CPUID_EDX_RESERVED_MASK 0xFFFFFFFF
#define CPUID_EDX_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
};
UINT32 AsUInt;
} Edx;
} CPUID_EAX_80000008;
/**
* @}
*/
/**
* @defgroup MODEL_SPECIFIC_REGISTERS \
* Model Specific Registers
*
* @see Vol2A[3.2(CPUID)] (reference)
* @{
*/
/**
* @defgroup IA32_P5_MC \
* IA32_P5_MC_(x)
*
* When machine-check exceptions are enabled for the Pentium processor (MCE flag is set in control
* register CR4), the machine-check exception handler uses the RDMSR instruction to read the error
* type from the P5_MC_TYPE register and the machine check address from the P5_MC_ADDR register. The
* handler then normally reports these register values to the system console before aborting
* execution.
*
* @see Vol3B[15.10.2(Pentium Processor Machine-Check Exception Handling)] (reference)
* @{
*/
/**
* Machine-check exception address.
*
* @remarks 05_01H
* @see Vol4[2.22(MSRS IN PENTIUM PROCESSORS)]
*/
#define IA32_P5_MC_ADDR 0x00000000
/**
* Machine-check exception type.
*
* @remarks 05_01H
* @see Vol4[2.22(MSRS IN PENTIUM PROCESSORS)]
*/
#define IA32_P5_MC_TYPE 0x00000001
/**
* @}
*/
/**
* System coherence line size.
*
* @remarks 0F_03H
* @see Vol3A[8.10.5(Monitor/Mwait Address Range Determination)]
* @see Vol3A[8.10.5(Monitor/Mwait Address Range Determination)] (reference)
*/
#define IA32_MONITOR_FILTER_LINE_SIZE 0x00000006
/**
* Value as returned by instruction RDTSC.
*
* @remarks 05_01H
* @see Vol3B[17.17(TIME-STAMP COUNTER)]
*/
#define IA32_TIME_STAMP_COUNTER 0x00000010
/**
* The operating system can use this MSR to determine "slot" information for the processor and the
* proper microcode update to load.
*
* @remarks 06_01H
*/
#define IA32_PLATFORM_ID 0x00000017
typedef union
{
struct
{
UINT64 Reserved1 : 50;
/**
* @brief Platform Id (RO)
*
* [Bits 52:50] Contains information concerning the intended platform for the
* processor.
*
*
* 52 | 51 | 50 | _
* --:|:--:|:---|-----------------
* 0 | 0 | 0 | Processor Flag 0
* 0 | 0 | 1 | Processor Flag 1
* 0 | 1 | 0 | Processor Flag 2
* 0 | 1 | 1 | Processor Flag 3
* 1 | 0 | 0 | Processor Flag 4
* 1 | 0 | 1 | Processor Flag 5
* 1 | 1 | 0 | Processor Flag 6
* 1 | 1 | 1 | Processor Flag 7
*/
UINT64 PlatformId : 3;
#define IA32_PLATFORM_ID_PLATFORM_ID_BIT 50
#define IA32_PLATFORM_ID_PLATFORM_ID_FLAG 0x1C000000000000
#define IA32_PLATFORM_ID_PLATFORM_ID_MASK 0x07
#define IA32_PLATFORM_ID_PLATFORM_ID(_) (((_) >> 50) & 0x07)
UINT64 Reserved2 : 11;
};
UINT64 AsUInt;
} IA32_PLATFORM_ID_REGISTER;
/**
* This register holds the APIC base address, permitting the relocation of the APIC memory map.
*
* @remarks 06_01H
* @see Vol3A[10.4.4(Local APIC Status and Location)]
* @see Vol3A[10.4.5(Relocating the Local APIC Registers)]
*/
#define IA32_APIC_BASE 0x0000001B
typedef union
{
struct
{
UINT64 Reserved1 : 8;
/**
* [Bit 8] BSP flag.
*/
UINT64 BspFlag : 1;
#define IA32_APIC_BASE_BSP_FLAG_BIT 8
#define IA32_APIC_BASE_BSP_FLAG_FLAG 0x100
#define IA32_APIC_BASE_BSP_FLAG_MASK 0x01
#define IA32_APIC_BASE_BSP_FLAG(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 10] Enable x2APIC mode.
*/
UINT64 EnableX2ApicMode : 1;
#define IA32_APIC_BASE_ENABLE_X2APIC_MODE_BIT 10
#define IA32_APIC_BASE_ENABLE_X2APIC_MODE_FLAG 0x400
#define IA32_APIC_BASE_ENABLE_X2APIC_MODE_MASK 0x01
#define IA32_APIC_BASE_ENABLE_X2APIC_MODE(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] APIC Global Enable.
*/
UINT64 ApicGlobalEnable : 1;
#define IA32_APIC_BASE_APIC_GLOBAL_ENABLE_BIT 11
#define IA32_APIC_BASE_APIC_GLOBAL_ENABLE_FLAG 0x800
#define IA32_APIC_BASE_APIC_GLOBAL_ENABLE_MASK 0x01
#define IA32_APIC_BASE_APIC_GLOBAL_ENABLE(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] APIC Base.
*/
UINT64 ApicBase : 36;
#define IA32_APIC_BASE_APIC_BASE_BIT 12
#define IA32_APIC_BASE_APIC_BASE_FLAG 0xFFFFFFFFF000
#define IA32_APIC_BASE_APIC_BASE_MASK 0xFFFFFFFFF
#define IA32_APIC_BASE_APIC_BASE(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved3 : 16;
};
UINT64 AsUInt;
} IA32_APIC_BASE_REGISTER;
/**
* Control Features in Intel 64 Processor.
*
* @remarks If any one enumeration condition for defined bit field holds.
*/
#define IA32_FEATURE_CONTROL 0x0000003A
typedef union
{
struct
{
/**
* @brief Lock bit (R/WO)
*
* [Bit 0] When set, locks this MSR from being written; writes to this bit will
* result in GP(0).
*
* @note Once the Lock bit is set, the contents of this register cannot be modified.
* Therefore the lock bit must be set after configuring support for Intel
* Virtualization Technology and prior to transferring control to an option ROM or
* the OS. Hence, once the Lock bit is set, the entire IA32_FEATURE_CONTROL contents
* are preserved across RESET when PWRGOOD is not deasserted.
* @remarks If any one enumeration condition for defined bit field position greater
* than bit 0 holds.
*/
UINT64 LockBit : 1;
#define IA32_FEATURE_CONTROL_LOCK_BIT_BIT 0
#define IA32_FEATURE_CONTROL_LOCK_BIT_FLAG 0x01
#define IA32_FEATURE_CONTROL_LOCK_BIT_MASK 0x01
#define IA32_FEATURE_CONTROL_LOCK_BIT(_) (((_) >> 0) & 0x01)
/**
* @brief Enable VMX inside SMX operation (R/WL)
*
* [Bit 1] This bit enables a system executive to use VMX in conjunction with SMX to
* support Intel(R) Trusted Execution Technology. BIOS must set this bit only when
* the CPUID function 1 returns VMX feature flag and SMX feature flag set (ECX bits
* 5 and 6 respectively).
*
* @remarks If CPUID.01H:ECX[5] = 1 && CPUID.01H:ECX[6] = 1
*/
UINT64 EnableVmxInsideSmx : 1;
#define IA32_FEATURE_CONTROL_ENABLE_VMX_INSIDE_SMX_BIT 1
#define IA32_FEATURE_CONTROL_ENABLE_VMX_INSIDE_SMX_FLAG 0x02
#define IA32_FEATURE_CONTROL_ENABLE_VMX_INSIDE_SMX_MASK 0x01
#define IA32_FEATURE_CONTROL_ENABLE_VMX_INSIDE_SMX(_) (((_) >> 1) & 0x01)
/**
* @brief Enable VMX outside SMX operation (R/WL)
*
* [Bit 2] This bit enables VMX for a system executive that does not require SMX.
* BIOS must set this bit only when the CPUID function 1 returns the VMX feature
* flag set (ECX bit 5).
*
* @remarks If CPUID.01H:ECX[5] = 1
*/
UINT64 EnableVmxOutsideSmx : 1;
#define IA32_FEATURE_CONTROL_ENABLE_VMX_OUTSIDE_SMX_BIT 2
#define IA32_FEATURE_CONTROL_ENABLE_VMX_OUTSIDE_SMX_FLAG 0x04
#define IA32_FEATURE_CONTROL_ENABLE_VMX_OUTSIDE_SMX_MASK 0x01
#define IA32_FEATURE_CONTROL_ENABLE_VMX_OUTSIDE_SMX(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 5;
/**
* @brief SENTER Local Function Enable (R/WL)
*
* [Bits 14:8] When set, each bit in the field represents an enable control for a
* corresponding SENTER function. This field is supported only if CPUID.1:ECX.[bit
* 6] is set.
*
* @remarks If CPUID.01H:ECX[6] = 1
*/
UINT64 SenterLocalFunctionEnables : 7;
#define IA32_FEATURE_CONTROL_SENTER_LOCAL_FUNCTION_ENABLES_BIT 8
#define IA32_FEATURE_CONTROL_SENTER_LOCAL_FUNCTION_ENABLES_FLAG 0x7F00
#define IA32_FEATURE_CONTROL_SENTER_LOCAL_FUNCTION_ENABLES_MASK 0x7F
#define IA32_FEATURE_CONTROL_SENTER_LOCAL_FUNCTION_ENABLES(_) (((_) >> 8) & 0x7F)
/**
* @brief SENTER Global Enable (R/WL)
*
* [Bit 15] This bit must be set to enable SENTER leaf functions. This bit is
* supported only if CPUID.1:ECX.[bit 6] is set.
*
* @remarks If CPUID.01H:ECX[6] = 1
*/
UINT64 SenterGlobalEnable : 1;
#define IA32_FEATURE_CONTROL_SENTER_GLOBAL_ENABLE_BIT 15
#define IA32_FEATURE_CONTROL_SENTER_GLOBAL_ENABLE_FLAG 0x8000
#define IA32_FEATURE_CONTROL_SENTER_GLOBAL_ENABLE_MASK 0x01
#define IA32_FEATURE_CONTROL_SENTER_GLOBAL_ENABLE(_) (((_) >> 15) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief SGX Launch Control Enable (R/WL)
*
* [Bit 17] This bit must be set to enable runtime reconfiguration of SGX Launch
* Control via the IA32_SGXLEPUBKEYHASHn MSR.
*
* @remarks If CPUID.(EAX=07H, ECX=0H): ECX[30] = 1
*/
UINT64 SgxLaunchControlEnable : 1;
#define IA32_FEATURE_CONTROL_SGX_LAUNCH_CONTROL_ENABLE_BIT 17
#define IA32_FEATURE_CONTROL_SGX_LAUNCH_CONTROL_ENABLE_FLAG 0x20000
#define IA32_FEATURE_CONTROL_SGX_LAUNCH_CONTROL_ENABLE_MASK 0x01
#define IA32_FEATURE_CONTROL_SGX_LAUNCH_CONTROL_ENABLE(_) (((_) >> 17) & 0x01)
/**
* @brief SGX Global Enable (R/WL)
*
* [Bit 18] This bit must be set to enable SGX leaf functions.
*
* @remarks If CPUID.(EAX=07H, ECX=0H): EBX[2] = 1
*/
UINT64 SgxGlobalEnable : 1;
#define IA32_FEATURE_CONTROL_SGX_GLOBAL_ENABLE_BIT 18
#define IA32_FEATURE_CONTROL_SGX_GLOBAL_ENABLE_FLAG 0x40000
#define IA32_FEATURE_CONTROL_SGX_GLOBAL_ENABLE_MASK 0x01
#define IA32_FEATURE_CONTROL_SGX_GLOBAL_ENABLE(_) (((_) >> 18) & 0x01)
UINT64 Reserved3 : 1;
/**
* @brief LMCE On (R/WL)
*
* [Bit 20] When set, system software can program the MSRs associated with LMCE to
* configure delivery of some machine check exceptions to a single logical
* processor.
*
* @remarks If IA32_MCG_CAP[27] = 1
*/
UINT64 LmceOn : 1;
#define IA32_FEATURE_CONTROL_LMCE_ON_BIT 20
#define IA32_FEATURE_CONTROL_LMCE_ON_FLAG 0x100000
#define IA32_FEATURE_CONTROL_LMCE_ON_MASK 0x01
#define IA32_FEATURE_CONTROL_LMCE_ON(_) (((_) >> 20) & 0x01)
UINT64 Reserved4 : 43;
};
UINT64 AsUInt;
} IA32_FEATURE_CONTROL_REGISTER;
/**
* Per Logical Processor TSC Adjust.
*
* @remarks If CPUID.(EAX=07H, ECX=0H): EBX[1] = 1
*/
#define IA32_TSC_ADJUST 0x0000003B
typedef struct
{
/**
* Local offset value of the IA32_TSC for a logical processor. Reset value is zero. A write
* to IA32_TSC will modify the local offset in IA32_TSC_ADJUST and the content of IA32_TSC,
* but does not affect the internal invariant TSC hardware.
*/
UINT64 ThreadAdjust;
} IA32_TSC_ADJUST_REGISTER;
/**
* Speculation Control. The MSR bits are defined as logical processor scope. On some core
* implementations, the bits may impact sibling logical processors on the same core. This MSR has a
* value of 0 after reset and is unaffected by INIT\# or SIPI\#.
*
* @remarks If any one of the enumeration conditions for defined bit field positions holds.
*/
#define IA32_SPEC_CTRL 0x00000048
typedef union
{
struct
{
/**
* [Bit 0] IBRS: Indirect Branch Restricted Speculation (IBRS). Restricts
* speculation of indirect branch.
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[26]=1
*/
UINT64 Ibrs : 1;
#define IA32_SPEC_CTRL_IBRS_BIT 0
#define IA32_SPEC_CTRL_IBRS_FLAG 0x01
#define IA32_SPEC_CTRL_IBRS_MASK 0x01
#define IA32_SPEC_CTRL_IBRS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] STIBP: Single Thread Indirect Branch Predictors (STIBP). Prevents
* indirect branch predictions on all logical processors on the core from being
* controlled by any sibling logical processor in the same core.
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[27]=1
*/
UINT64 Stibp : 1;
#define IA32_SPEC_CTRL_STIBP_BIT 1
#define IA32_SPEC_CTRL_STIBP_FLAG 0x02
#define IA32_SPEC_CTRL_STIBP_MASK 0x01
#define IA32_SPEC_CTRL_STIBP(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] SSBD: Speculative Store Bypass Disable (SSBD). Delays speculative
* execution of a load until the addresses for all older stores are known.
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[31]=1
*/
UINT64 Ssbd : 1;
#define IA32_SPEC_CTRL_SSBD_BIT 2
#define IA32_SPEC_CTRL_SSBD_FLAG 0x04
#define IA32_SPEC_CTRL_SSBD_MASK 0x01
#define IA32_SPEC_CTRL_SSBD(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 61;
};
UINT64 AsUInt;
} IA32_SPEC_CTRL_REGISTER;
/**
* Prediction Command. Gives software a way to issue commands that affect the state of predictors.
*
* @remarks If any one of the enumeration conditions for defined bit field positions holds.
*/
#define IA32_PRED_CMD 0x00000049
typedef union
{
struct
{
/**
* [Bit 0] IBPB: Indirect Branch Prediction Barrier (IBPB).
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[26]=1
*/
UINT64 Ibpb : 1;
#define IA32_PRED_CMD_IBPB_BIT 0
#define IA32_PRED_CMD_IBPB_FLAG 0x01
#define IA32_PRED_CMD_IBPB_MASK 0x01
#define IA32_PRED_CMD_IBPB(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_PRED_CMD_REGISTER;
/**
* @brief BIOS Update Trigger (W)
*
* Executing a WRMSR instruction to this MSR causes a microcode update to be loaded into the
* processor. A processor may prevent writing to this MSR when loading guest states on VM entries or
* saving guest states on VM exits.
*
* @remarks 06_01H
* @see Vol3A[9.11.6(Microcode Update Loader)]
*/
#define IA32_BIOS_UPDATE_TRIGGER 0x00000079
/**
* @brief BIOS Update Signature (RO)
*
* Returns the microcode update signature following the execution of CPUID.01H. A processor may
* prevent writing to this MSR when loading guest states on VM entries or saving guest states on VM
* exits.
*
* @remarks 06_01H
*/
#define IA32_BIOS_UPDATE_SIGNATURE 0x0000008B
typedef union
{
struct
{
/**
* [Bits 31:0] Reserved.
*/
UINT64 Reserved : 32;
#define IA32_BIOS_UPDATE_SIGNATURE_RESERVED_BIT 0
#define IA32_BIOS_UPDATE_SIGNATURE_RESERVED_FLAG 0xFFFFFFFF
#define IA32_BIOS_UPDATE_SIGNATURE_RESERVED_MASK 0xFFFFFFFF
#define IA32_BIOS_UPDATE_SIGNATURE_RESERVED(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* @brief Microcode update signature
*
* [Bits 63:32] This field contains the signature of the currently loaded microcode
* update when read following the execution of the CPUID instruction, function 1. It
* is required that this register field be pre-loaded with zero prior to executing
* the CPUID, function 1. If the field remains equal to zero, then there is no
* microcode update loaded. Another nonzero value will be the signature.
*
* @see Vol3A[9.11.7.1(Determining the Signature)] (reference)
*/
UINT64 MicrocodeUpdateSignature : 32;
#define IA32_BIOS_UPDATE_SIGNATURE_MICROCODE_UPDATE_SIGNATURE_BIT 32
#define IA32_BIOS_UPDATE_SIGNATURE_MICROCODE_UPDATE_SIGNATURE_FLAG 0xFFFFFFFF00000000
#define IA32_BIOS_UPDATE_SIGNATURE_MICROCODE_UPDATE_SIGNATURE_MASK 0xFFFFFFFF
#define IA32_BIOS_UPDATE_SIGNATURE_MICROCODE_UPDATE_SIGNATURE(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_BIOS_UPDATE_SIGNATURE_REGISTER;
/**
* @defgroup IA32_SGXLEPUBKEYHASH \
* IA32_SGXLEPUBKEYHASH[(64*n+63):(64*n)]
*
* Bits (64*n+63):(64*n) of the SHA256 digest of the SIGSTRUCT.MODULUS for SGX Launch Enclave. On
* reset, the default value is the digest of Intel's signing key.
*
* @remarks Read permitted If CPUID.(EAX=12H,ECX=0H): EAX[0]=1 && CPUID.(EAX=07H,ECX=0H):ECX[30]=1.
* Write permitted if CPUID.(EAX=12H,ECX=0H): EAX[0]=1 && IA32_FEATURE_CONTROL[17] = 1 &&
* IA32_FEATURE_CONTROL[0] = 1.
* @{
*/
#define IA32_SGXLEPUBKEYHASH0 0x0000008C
#define IA32_SGXLEPUBKEYHASH1 0x0000008D
#define IA32_SGXLEPUBKEYHASH2 0x0000008E
#define IA32_SGXLEPUBKEYHASH3 0x0000008F
/**
* @}
*/
/**
* SMM Monitor Configuration.
*
* @remarks If CPUID.01H: ECX[5]=1 || CPUID.01H: ECX[6] = 1
*/
#define IA32_SMM_MONITOR_CTL 0x0000009B
typedef union
{
struct
{
/**
* @brief Valid (R/W)
*
* [Bit 0] The STM may be invoked using VMCALL only if this bit is 1. Because VMCALL
* is used to activate the dual-monitor treatment, the dual-monitor treatment cannot
* be activated if the bit is 0. This bit is cleared when the logical processor is
* reset.
*
* @see Vol3C[34.15.6(Activating the Dual-Monitor Treatment)]
* @see Vol3C[34.15.5(Enabling the Dual-Monitor Treatment)] (reference)
*/
UINT64 Valid : 1;
#define IA32_SMM_MONITOR_CTL_VALID_BIT 0
#define IA32_SMM_MONITOR_CTL_VALID_FLAG 0x01
#define IA32_SMM_MONITOR_CTL_VALID_MASK 0x01
#define IA32_SMM_MONITOR_CTL_VALID(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 1;
/**
* @brief Controls SMI unblocking by VMXOFF
*
* [Bit 2] Determines whether executions of VMXOFF unblock SMIs under the default
* treatment of SMIs and SMM. Executions of VMXOFF unblock SMIs unless bit 2 is 1
* (the value of bit 0 is irrelevant).
*
* @remarks If IA32_VMX_MISC[28]
* @see Vol3C[34.14.4(VMXOFF and SMI Unblocking)]
* @see Vol3C[34.15.5(Enabling the Dual-Monitor Treatment)] (reference)
*/
UINT64 SmiUnblockingByVmxoff : 1;
#define IA32_SMM_MONITOR_CTL_SMI_UNBLOCKING_BY_VMXOFF_BIT 2
#define IA32_SMM_MONITOR_CTL_SMI_UNBLOCKING_BY_VMXOFF_FLAG 0x04
#define IA32_SMM_MONITOR_CTL_SMI_UNBLOCKING_BY_VMXOFF_MASK 0x01
#define IA32_SMM_MONITOR_CTL_SMI_UNBLOCKING_BY_VMXOFF(_) (((_) >> 2) & 0x01)
UINT64 Reserved2 : 9;
/**
* @brief MSEG Base (R/W)
*
* [Bits 31:12] Value that, when shifted left 12 bits, is the physical address of
* MSEG (the MSEG base address).
*
* @see Vol3C[34.15.5(Enabling the Dual-Monitor Treatment)] (reference)
*/
UINT64 MsegBase : 20;
#define IA32_SMM_MONITOR_CTL_MSEG_BASE_BIT 12
#define IA32_SMM_MONITOR_CTL_MSEG_BASE_FLAG 0xFFFFF000
#define IA32_SMM_MONITOR_CTL_MSEG_BASE_MASK 0xFFFFF
#define IA32_SMM_MONITOR_CTL_MSEG_BASE(_) (((_) >> 12) & 0xFFFFF)
UINT64 Reserved3 : 32;
};
UINT64 AsUInt;
} IA32_SMM_MONITOR_CTL_REGISTER;
typedef struct
{
/**
* @brief MSEG revision identifier
*
* Different processors may use different MSEG revision identifiers. These identifiers
* enable software to avoid using an MSEG header formatted for one processor on a processor
* that uses a different format. Software can discover the MSEG revision identifier that a
* processor uses by reading the VMX capability MSR IA32_VMX_MISC.
*
* @see Vol3D[A.6(MISCELLANEOUS DATA)]
*/
UINT32 MsegHeaderRevision;
/**
* @brief SMM-transfer monitor features field
*
* Bits 31:1 of this field are reserved and must be zero. Bit 0 of the field is the IA-32e
* mode SMM feature bit. It indicates whether the logical processor will be in IA-32e mode
* after the STM is activated.
*
* @see Vol3C[34.15.6(Activating the Dual-Monitor Treatment)]
*/
UINT32 MonitorFeatures;
/**
* Define values for the MonitorFeatures field of MSEG_HEADER.
*/
#define IA32_STM_FEATURES_IA32E 0x00000001
/**
* Fields that determine how processor state is loaded when the STM is activated. SMM code
* should establish these fields so that activating of the STM invokes the STM's
* initialization code.
*
* @see Vol3C[34.15.6.5(Loading Host State)]
*/
UINT32 GdtrLimit;
UINT32 GdtrBaseOffset;
UINT32 CsSelector;
UINT32 EipOffset;
UINT32 EspOffset;
UINT32 Cr3Offset;
} IA32_MSEG_HEADER;
/**
* Base address of the logical processor's SMRAM image.
*
* @remarks If IA32_VMX_MISC[15]
*/
#define IA32_SMBASE 0x0000009E
/**
* @defgroup IA32_PMC \
* IA32_PMC(n)
*
* General Performance Counters.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
* @{
*/
#define IA32_PMC0 0x000000C1
#define IA32_PMC1 0x000000C2
#define IA32_PMC2 0x000000C3
#define IA32_PMC3 0x000000C4
#define IA32_PMC4 0x000000C5
#define IA32_PMC5 0x000000C6
#define IA32_PMC6 0x000000C7
#define IA32_PMC7 0x000000C8
/**
* @}
*/
/**
* TSC Frequency Clock Counter.
*
* @remarks If CPUID.06H: ECX[0] = 1
*/
#define IA32_MPERF 0x000000E7
typedef struct
{
/**
* @brief C0 TSC Frequency Clock Count
*
* Increments at fixed interval (relative to TSC freq.) when the logical processor is in C0.
* Cleared upon overflow / wrap-around of IA32_APERF.
*/
UINT64 C0Mcnt;
} IA32_MPERF_REGISTER;
/**
* Actual Performance Clock Counter
*
* @remarks If CPUID.06H: ECX[0] = 1
*/
#define IA32_APERF 0x000000E8
typedef struct
{
/**
* @brief C0 Actual Frequency Clock Count
*
* Accumulates core clock counts at the coordinated clock frequency, when the logical
* processor is in C0. Cleared upon overflow / wrap-around of IA32_MPERF.
*/
UINT64 C0Acnt;
} IA32_APERF_REGISTER;
/**
* MTRR Capability.
*
* @see Vol3A[11.11.2.1(IA32_MTRR_DEF_TYPE MSR)]
* @see Vol3A[11.11.1(MTRR Feature Identification)] (reference)
*/
#define IA32_MTRR_CAPABILITIES 0x000000FE
typedef union
{
struct
{
/**
* @brief VCNT (variable range registers count) field
*
* [Bits 7:0] Indicates the number of variable ranges implemented on the processor.
*/
UINT64 VariableRangeCount : 8;
#define IA32_MTRR_CAPABILITIES_VARIABLE_RANGE_COUNT_BIT 0
#define IA32_MTRR_CAPABILITIES_VARIABLE_RANGE_COUNT_FLAG 0xFF
#define IA32_MTRR_CAPABILITIES_VARIABLE_RANGE_COUNT_MASK 0xFF
#define IA32_MTRR_CAPABILITIES_VARIABLE_RANGE_COUNT(_) (((_) >> 0) & 0xFF)
/**
* @brief FIX (fixed range registers supported) flag
*
* [Bit 8] Fixed range MTRRs (IA32_MTRR_FIX64K_00000 through IA32_MTRR_FIX4K_0F8000)
* are supported when set; no fixed range registers are supported when clear.
*/
UINT64 FixedRangeSupported : 1;
#define IA32_MTRR_CAPABILITIES_FIXED_RANGE_SUPPORTED_BIT 8
#define IA32_MTRR_CAPABILITIES_FIXED_RANGE_SUPPORTED_FLAG 0x100
#define IA32_MTRR_CAPABILITIES_FIXED_RANGE_SUPPORTED_MASK 0x01
#define IA32_MTRR_CAPABILITIES_FIXED_RANGE_SUPPORTED(_) (((_) >> 8) & 0x01)
UINT64 Reserved1 : 1;
/**
* @brief WC (write combining) flag
*
* [Bit 10] The write-combining (WC) memory type is supported when set; the WC type
* is not supported when clear.
*/
UINT64 WcSupported : 1;
#define IA32_MTRR_CAPABILITIES_WC_SUPPORTED_BIT 10
#define IA32_MTRR_CAPABILITIES_WC_SUPPORTED_FLAG 0x400
#define IA32_MTRR_CAPABILITIES_WC_SUPPORTED_MASK 0x01
#define IA32_MTRR_CAPABILITIES_WC_SUPPORTED(_) (((_) >> 10) & 0x01)
/**
* @brief SMRR (System-Management Range Register) flag
*
* [Bit 11] The system-management range register (SMRR) interface is supported when
* bit 11 is set; the SMRR interface is not supported when clear.
*/
UINT64 SmrrSupported : 1;
#define IA32_MTRR_CAPABILITIES_SMRR_SUPPORTED_BIT 11
#define IA32_MTRR_CAPABILITIES_SMRR_SUPPORTED_FLAG 0x800
#define IA32_MTRR_CAPABILITIES_SMRR_SUPPORTED_MASK 0x01
#define IA32_MTRR_CAPABILITIES_SMRR_SUPPORTED(_) (((_) >> 11) & 0x01)
UINT64 Reserved2 : 52;
};
UINT64 AsUInt;
} IA32_MTRR_CAPABILITIES_REGISTER;
/**
* Enumeration of Architectural Features.
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[29]=1
*/
#define IA32_ARCH_CAPABILITIES 0x0000010A
typedef union
{
struct
{
/**
* [Bit 0] RDCL_NO: The processor is not susceptible to Rogue Data Cache Load
* (RDCL).
*/
UINT64 RdclNo : 1;
#define IA32_ARCH_CAPABILITIES_RDCL_NO_BIT 0
#define IA32_ARCH_CAPABILITIES_RDCL_NO_FLAG 0x01
#define IA32_ARCH_CAPABILITIES_RDCL_NO_MASK 0x01
#define IA32_ARCH_CAPABILITIES_RDCL_NO(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] IBRS_ALL: The processor supports enhanced IBRS.
*/
UINT64 IbrsAll : 1;
#define IA32_ARCH_CAPABILITIES_IBRS_ALL_BIT 1
#define IA32_ARCH_CAPABILITIES_IBRS_ALL_FLAG 0x02
#define IA32_ARCH_CAPABILITIES_IBRS_ALL_MASK 0x01
#define IA32_ARCH_CAPABILITIES_IBRS_ALL(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] RSBA: The processor supports RSB Alternate. Alternative branch predictors
* may be used by RET instructions when the RSB is empty. SW using retpoline may be
* affected by this behavior.
*/
UINT64 Rsba : 1;
#define IA32_ARCH_CAPABILITIES_RSBA_BIT 2
#define IA32_ARCH_CAPABILITIES_RSBA_FLAG 0x04
#define IA32_ARCH_CAPABILITIES_RSBA_MASK 0x01
#define IA32_ARCH_CAPABILITIES_RSBA(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] SKIP_L1DFL_VMENTRY: A value of 1 indicates the hypervisor need not flush
* the L1D on VM entry.
*/
UINT64 SkipL1DflVmentry : 1;
#define IA32_ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY_BIT 3
#define IA32_ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY_FLAG 0x08
#define IA32_ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY_MASK 0x01
#define IA32_ARCH_CAPABILITIES_SKIP_L1DFL_VMENTRY(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] SSB_NO: Processor is not susceptible to Speculative Store Bypass.
*/
UINT64 SsbNo : 1;
#define IA32_ARCH_CAPABILITIES_SSB_NO_BIT 4
#define IA32_ARCH_CAPABILITIES_SSB_NO_FLAG 0x10
#define IA32_ARCH_CAPABILITIES_SSB_NO_MASK 0x01
#define IA32_ARCH_CAPABILITIES_SSB_NO(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] MDS_NO: Processor is not susceptible to Microarchitectural Data Sampling
* (MDS).
*/
UINT64 MdsNo : 1;
#define IA32_ARCH_CAPABILITIES_MDS_NO_BIT 5
#define IA32_ARCH_CAPABILITIES_MDS_NO_FLAG 0x20
#define IA32_ARCH_CAPABILITIES_MDS_NO_MASK 0x01
#define IA32_ARCH_CAPABILITIES_MDS_NO(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] IF_PSCHANGE_MC_NO: The processor is not susceptible to a machine check
* error due to modifying the size of a code page without TLB invalidation.
*/
UINT64 IfPschangeMcNo : 1;
#define IA32_ARCH_CAPABILITIES_IF_PSCHANGE_MC_NO_BIT 6
#define IA32_ARCH_CAPABILITIES_IF_PSCHANGE_MC_NO_FLAG 0x40
#define IA32_ARCH_CAPABILITIES_IF_PSCHANGE_MC_NO_MASK 0x01
#define IA32_ARCH_CAPABILITIES_IF_PSCHANGE_MC_NO(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] TSX_CTRL: If 1, indicates presence of IA32_TSX_CTRL MSR.
*/
UINT64 TsxCtrl : 1;
#define IA32_ARCH_CAPABILITIES_TSX_CTRL_BIT 7
#define IA32_ARCH_CAPABILITIES_TSX_CTRL_FLAG 0x80
#define IA32_ARCH_CAPABILITIES_TSX_CTRL_MASK 0x01
#define IA32_ARCH_CAPABILITIES_TSX_CTRL(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] TAA_NO: If 1, processor is not affected by TAA.
*/
UINT64 TaaNo : 1;
#define IA32_ARCH_CAPABILITIES_TAA_NO_BIT 8
#define IA32_ARCH_CAPABILITIES_TAA_NO_FLAG 0x100
#define IA32_ARCH_CAPABILITIES_TAA_NO_MASK 0x01
#define IA32_ARCH_CAPABILITIES_TAA_NO(_) (((_) >> 8) & 0x01)
UINT64 Reserved1 : 55;
};
UINT64 AsUInt;
} IA32_ARCH_CAPABILITIES_REGISTER;
/**
* Flush Command. Gives software a way to invalidate structures with finer granularity than other
* architectural methods.
*
* @remarks If any one of the enumeration conditions for defined bit field positions holds.
*/
#define IA32_FLUSH_CMD 0x0000010B
typedef union
{
struct
{
/**
* [Bit 0] L1D_FLUSH: Writeback and invalidate the L1 data cache.
*
* @remarks If CPUID.(EAX=07H,ECX=0):EDX[28]=1
*/
UINT64 L1DFlush : 1;
#define IA32_FLUSH_CMD_L1D_FLUSH_BIT 0
#define IA32_FLUSH_CMD_L1D_FLUSH_FLAG 0x01
#define IA32_FLUSH_CMD_L1D_FLUSH_MASK 0x01
#define IA32_FLUSH_CMD_L1D_FLUSH(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_FLUSH_CMD_REGISTER;
/**
* Flush Command. Gives software a way to invalidate structures with finer granularity than other
* architectural methods.
*
* @remarks Thread scope. Not architecturally serializing.
* Available when CPUID.ARCH_CAP(EAX=7H,ECX = 0):EDX[29] = 1 and IA32_ARCH_CAPABILITIES.bit
* 7 = 1.
*/
#define IA32_TSX_CTRL 0x00000122
typedef union
{
struct
{
/**
* [Bit 0] RTM_DISABLE: When set to 1, XBEGIN will always abort with EAX code 0.
*/
UINT64 RtmDisable : 1;
#define IA32_TSX_CTRL_RTM_DISABLE_BIT 0
#define IA32_TSX_CTRL_RTM_DISABLE_FLAG 0x01
#define IA32_TSX_CTRL_RTM_DISABLE_MASK 0x01
#define IA32_TSX_CTRL_RTM_DISABLE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] TSX_CPUID_CLEAR: When set to 1, CPUID.07H.EBX.RTM [bit 11] and
* CPUID.07H.EBX.HLE [bit 4] report 0. When set to 0 and the SKU supports TSX, these
* bits will return 1.
*/
UINT64 TsxCpuidClear : 1;
#define IA32_TSX_CTRL_TSX_CPUID_CLEAR_BIT 1
#define IA32_TSX_CTRL_TSX_CPUID_CLEAR_FLAG 0x02
#define IA32_TSX_CTRL_TSX_CPUID_CLEAR_MASK 0x01
#define IA32_TSX_CTRL_TSX_CPUID_CLEAR(_) (((_) >> 1) & 0x01)
UINT64 Reserved1 : 62;
};
UINT64 AsUInt;
} IA32_TSX_CTRL_REGISTER;
/**
* @brief SYSENTER_CS_MSR (R/W)
*
* The lower 16 bits of this MSR are the segment selector for the privilege level 0 code segment.
* This value is also used to determine the segment selector of the privilege level 0 stack segment.
* This value cannot indicate a null selector.
*
* @remarks 06_01H
* @see Vol2B[4.3(Instructions (M-U) | SYSCALL - Fast System Call)] (reference)
*/
#define IA32_SYSENTER_CS 0x00000174
typedef union
{
struct
{
/**
* [Bits 15:0] CS Selector.
*/
UINT64 CsSelector : 16;
#define IA32_SYSENTER_CS_CS_SELECTOR_BIT 0
#define IA32_SYSENTER_CS_CS_SELECTOR_FLAG 0xFFFF
#define IA32_SYSENTER_CS_CS_SELECTOR_MASK 0xFFFF
#define IA32_SYSENTER_CS_CS_SELECTOR(_) (((_) >> 0) & 0xFFFF)
/**
* [Bits 31:16] Not used.
*
* @remarks Can be read and written.
*/
UINT64 NotUsed1 : 16;
#define IA32_SYSENTER_CS_NOT_USED_1_BIT 16
#define IA32_SYSENTER_CS_NOT_USED_1_FLAG 0xFFFF0000
#define IA32_SYSENTER_CS_NOT_USED_1_MASK 0xFFFF
#define IA32_SYSENTER_CS_NOT_USED_1(_) (((_) >> 16) & 0xFFFF)
/**
* [Bits 63:32] Not used.
*
* @remarks Writes ignored; reads return zero.
*/
UINT64 NotUsed2 : 32;
#define IA32_SYSENTER_CS_NOT_USED_2_BIT 32
#define IA32_SYSENTER_CS_NOT_USED_2_FLAG 0xFFFFFFFF00000000
#define IA32_SYSENTER_CS_NOT_USED_2_MASK 0xFFFFFFFF
#define IA32_SYSENTER_CS_NOT_USED_2(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_SYSENTER_CS_REGISTER;
/**
* @brief SYSENTER_ESP_MSR (R/W)
*
* The value of this MSR is loaded into RSP (thus, this value contains the stack pointer for the
* privilege level 0 stack). This value cannot represent a non-canonical address. In protected mode,
* only bits 31:0 are loaded.
*
* @remarks 06_01H
* @see Vol2B[4.3(Instructions (M-U) | SYSCALL - Fast System Call)] (reference)
*/
#define IA32_SYSENTER_ESP 0x00000175
/**
* @brief SYSENTER_EIP_MSR (R/W)
*
* The value of this MSR is loaded into RIP (thus, this value references the first instruction of
* the selected operating procedure or routine). In protected mode, only bits 31:0 are loaded.
*
* @remarks 06_01H
* @see Vol2B[4.3(Instructions (M-U) | SYSCALL - Fast System Call)] (reference)
*/
#define IA32_SYSENTER_EIP 0x00000176
/**
* Global Machine Check Capability.
*
* @remarks 06_01H
*/
#define IA32_MCG_CAP 0x00000179
typedef union
{
struct
{
/**
* [Bits 7:0] Number of reporting banks.
*/
UINT64 Count : 8;
#define IA32_MCG_CAP_COUNT_BIT 0
#define IA32_MCG_CAP_COUNT_FLAG 0xFF
#define IA32_MCG_CAP_COUNT_MASK 0xFF
#define IA32_MCG_CAP_COUNT(_) (((_) >> 0) & 0xFF)
/**
* [Bit 8] IA32_MCG_CTL is present if this bit is set.
*/
UINT64 McgCtlP : 1;
#define IA32_MCG_CAP_MCG_CTL_P_BIT 8
#define IA32_MCG_CAP_MCG_CTL_P_FLAG 0x100
#define IA32_MCG_CAP_MCG_CTL_P_MASK 0x01
#define IA32_MCG_CAP_MCG_CTL_P(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Extended machine check state registers are present if this bit is set.
*/
UINT64 McgExtP : 1;
#define IA32_MCG_CAP_MCG_EXT_P_BIT 9
#define IA32_MCG_CAP_MCG_EXT_P_FLAG 0x200
#define IA32_MCG_CAP_MCG_EXT_P_MASK 0x01
#define IA32_MCG_CAP_MCG_EXT_P(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Support for corrected MC error event is present.
*
* @remarks 06_01H
*/
UINT64 McpCmciP : 1;
#define IA32_MCG_CAP_MCP_CMCI_P_BIT 10
#define IA32_MCG_CAP_MCP_CMCI_P_FLAG 0x400
#define IA32_MCG_CAP_MCP_CMCI_P_MASK 0x01
#define IA32_MCG_CAP_MCP_CMCI_P(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Threshold-based error status register are present if this bit is set.
*/
UINT64 McgTesP : 1;
#define IA32_MCG_CAP_MCG_TES_P_BIT 11
#define IA32_MCG_CAP_MCG_TES_P_FLAG 0x800
#define IA32_MCG_CAP_MCG_TES_P_MASK 0x01
#define IA32_MCG_CAP_MCG_TES_P(_) (((_) >> 11) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bits 23:16] Number of extended machine check state registers present.
*/
UINT64 McgExtCnt : 8;
#define IA32_MCG_CAP_MCG_EXT_CNT_BIT 16
#define IA32_MCG_CAP_MCG_EXT_CNT_FLAG 0xFF0000
#define IA32_MCG_CAP_MCG_EXT_CNT_MASK 0xFF
#define IA32_MCG_CAP_MCG_EXT_CNT(_) (((_) >> 16) & 0xFF)
/**
* [Bit 24] The processor supports software error recovery if this bit is set.
*/
UINT64 McgSerP : 1;
#define IA32_MCG_CAP_MCG_SER_P_BIT 24
#define IA32_MCG_CAP_MCG_SER_P_FLAG 0x1000000
#define IA32_MCG_CAP_MCG_SER_P_MASK 0x01
#define IA32_MCG_CAP_MCG_SER_P(_) (((_) >> 24) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 26] Indicates that the processor allows platform firmware to be invoked when
* an error is detected so that it may provide additional platform specific
* information in an ACPI format "Generic Error Data Entry" that augments the data
* included in machine check bank registers.
*
* @remarks 06_3EH
*/
UINT64 McgElogP : 1;
#define IA32_MCG_CAP_MCG_ELOG_P_BIT 26
#define IA32_MCG_CAP_MCG_ELOG_P_FLAG 0x4000000
#define IA32_MCG_CAP_MCG_ELOG_P_MASK 0x01
#define IA32_MCG_CAP_MCG_ELOG_P(_) (((_) >> 26) & 0x01)
/**
* [Bit 27] Indicates that the processor supports extended state in IA32_MCG_STATUS
* and associated MSR necessary to configure Local Machine Check Exception (LMCE).
*
* @remarks 06_3EH
*/
UINT64 McgLmceP : 1;
#define IA32_MCG_CAP_MCG_LMCE_P_BIT 27
#define IA32_MCG_CAP_MCG_LMCE_P_FLAG 0x8000000
#define IA32_MCG_CAP_MCG_LMCE_P_MASK 0x01
#define IA32_MCG_CAP_MCG_LMCE_P(_) (((_) >> 27) & 0x01)
UINT64 Reserved3 : 36;
};
UINT64 AsUInt;
} IA32_MCG_CAP_REGISTER;
/**
* Global Machine Check Status.
*
* @remarks 06_01H
*/
#define IA32_MCG_STATUS 0x0000017A
typedef union
{
struct
{
/**
* [Bit 0] Restart IP valid.
*
* @remarks 06_01H
*/
UINT64 Ripv : 1;
#define IA32_MCG_STATUS_RIPV_BIT 0
#define IA32_MCG_STATUS_RIPV_FLAG 0x01
#define IA32_MCG_STATUS_RIPV_MASK 0x01
#define IA32_MCG_STATUS_RIPV(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Error IP valid.
*
* @remarks 06_01H
*/
UINT64 Eipv : 1;
#define IA32_MCG_STATUS_EIPV_BIT 1
#define IA32_MCG_STATUS_EIPV_FLAG 0x02
#define IA32_MCG_STATUS_EIPV_MASK 0x01
#define IA32_MCG_STATUS_EIPV(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Machine check in progress.
*
* @remarks 06_01H
*/
UINT64 Mcip : 1;
#define IA32_MCG_STATUS_MCIP_BIT 2
#define IA32_MCG_STATUS_MCIP_FLAG 0x04
#define IA32_MCG_STATUS_MCIP_MASK 0x01
#define IA32_MCG_STATUS_MCIP(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] If IA32_MCG_CAP.LMCE_P[27] = 1.
*/
UINT64 LmceS : 1;
#define IA32_MCG_STATUS_LMCE_S_BIT 3
#define IA32_MCG_STATUS_LMCE_S_FLAG 0x08
#define IA32_MCG_STATUS_LMCE_S_MASK 0x01
#define IA32_MCG_STATUS_LMCE_S(_) (((_) >> 3) & 0x01)
UINT64 Reserved1 : 60;
};
UINT64 AsUInt;
} IA32_MCG_STATUS_REGISTER;
/**
* Global Machine Check Control.
*
* @remarks If IA32_MCG_CAP.CTL_P[8] = 1
*/
#define IA32_MCG_CTL 0x0000017B
/**
* @defgroup IA32_PERFEVTSEL \
* IA32_PERFEVTSEL(n)
*
* Performance Event Select Register n.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
* @{
*/
#define IA32_PERFEVTSEL0 0x00000186
#define IA32_PERFEVTSEL1 0x00000187
#define IA32_PERFEVTSEL2 0x00000188
#define IA32_PERFEVTSEL3 0x00000189
typedef union
{
struct
{
/**
* [Bits 7:0] Selects a performance event logic unit.
*/
UINT64 EventSelect : 8;
#define IA32_PERFEVTSEL_EVENT_SELECT_BIT 0
#define IA32_PERFEVTSEL_EVENT_SELECT_FLAG 0xFF
#define IA32_PERFEVTSEL_EVENT_SELECT_MASK 0xFF
#define IA32_PERFEVTSEL_EVENT_SELECT(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Qualifies the microarchitectural condition to detect on the selected
* event logic.
*/
UINT64 UMask : 8;
#define IA32_PERFEVTSEL_U_MASK_BIT 8
#define IA32_PERFEVTSEL_U_MASK_FLAG 0xFF00
#define IA32_PERFEVTSEL_U_MASK_MASK 0xFF
#define IA32_PERFEVTSEL_U_MASK(_) (((_) >> 8) & 0xFF)
/**
* [Bit 16] Counts while in privilege level is not ring 0.
*/
UINT64 Usr : 1;
#define IA32_PERFEVTSEL_USR_BIT 16
#define IA32_PERFEVTSEL_USR_FLAG 0x10000
#define IA32_PERFEVTSEL_USR_MASK 0x01
#define IA32_PERFEVTSEL_USR(_) (((_) >> 16) & 0x01)
/**
* [Bit 17] Counts while in privilege level is ring 0.
*/
UINT64 Os : 1;
#define IA32_PERFEVTSEL_OS_BIT 17
#define IA32_PERFEVTSEL_OS_FLAG 0x20000
#define IA32_PERFEVTSEL_OS_MASK 0x01
#define IA32_PERFEVTSEL_OS(_) (((_) >> 17) & 0x01)
/**
* [Bit 18] Enables edge detection if set.
*/
UINT64 Edge : 1;
#define IA32_PERFEVTSEL_EDGE_BIT 18
#define IA32_PERFEVTSEL_EDGE_FLAG 0x40000
#define IA32_PERFEVTSEL_EDGE_MASK 0x01
#define IA32_PERFEVTSEL_EDGE(_) (((_) >> 18) & 0x01)
/**
* [Bit 19] Enables pin control.
*/
UINT64 Pc : 1;
#define IA32_PERFEVTSEL_PC_BIT 19
#define IA32_PERFEVTSEL_PC_FLAG 0x80000
#define IA32_PERFEVTSEL_PC_MASK 0x01
#define IA32_PERFEVTSEL_PC(_) (((_) >> 19) & 0x01)
/**
* [Bit 20] Enables interrupt on counter overflow.
*/
UINT64 Intr : 1;
#define IA32_PERFEVTSEL_INTR_BIT 20
#define IA32_PERFEVTSEL_INTR_FLAG 0x100000
#define IA32_PERFEVTSEL_INTR_MASK 0x01
#define IA32_PERFEVTSEL_INTR(_) (((_) >> 20) & 0x01)
/**
* [Bit 21] When set to 1, it enables counting the associated event conditions
* occurring across all logical processors sharing a processor core. When set to 0,
* the counter only increments the associated event conditions occurring in the
* logical processor which programmed the MSR.
*/
UINT64 AnyThread : 1;
#define IA32_PERFEVTSEL_ANY_THREAD_BIT 21
#define IA32_PERFEVTSEL_ANY_THREAD_FLAG 0x200000
#define IA32_PERFEVTSEL_ANY_THREAD_MASK 0x01
#define IA32_PERFEVTSEL_ANY_THREAD(_) (((_) >> 21) & 0x01)
/**
* [Bit 22] Enables the corresponding performance counter to commence counting when
* this bit is set.
*/
UINT64 En : 1;
#define IA32_PERFEVTSEL_EN_BIT 22
#define IA32_PERFEVTSEL_EN_FLAG 0x400000
#define IA32_PERFEVTSEL_EN_MASK 0x01
#define IA32_PERFEVTSEL_EN(_) (((_) >> 22) & 0x01)
/**
* [Bit 23] Invert the CMASK.
*/
UINT64 Inv : 1;
#define IA32_PERFEVTSEL_INV_BIT 23
#define IA32_PERFEVTSEL_INV_FLAG 0x800000
#define IA32_PERFEVTSEL_INV_MASK 0x01
#define IA32_PERFEVTSEL_INV(_) (((_) >> 23) & 0x01)
/**
* [Bits 31:24] When CMASK is not zero, the corresponding performance counter
* increments each cycle if the event count is greater than or equal to the CMASK.
*/
UINT64 Cmask : 8;
#define IA32_PERFEVTSEL_CMASK_BIT 24
#define IA32_PERFEVTSEL_CMASK_FLAG 0xFF000000
#define IA32_PERFEVTSEL_CMASK_MASK 0xFF
#define IA32_PERFEVTSEL_CMASK(_) (((_) >> 24) & 0xFF)
UINT64 Reserved1 : 32;
};
UINT64 AsUInt;
} IA32_PERFEVTSEL_REGISTER;
/**
* @}
*/
/**
* Current Performance Status.
*
* @remarks 0F_03H
* @see Vol3B[14.1.1(Software Interface For Initiating Performance State Transitions)]
*/
#define IA32_PERF_STATUS 0x00000198
typedef union
{
struct
{
/**
* [Bits 15:0] Current performance State Value.
*/
UINT64 StateValue : 16;
#define IA32_PERF_STATUS_STATE_VALUE_BIT 0
#define IA32_PERF_STATUS_STATE_VALUE_FLAG 0xFFFF
#define IA32_PERF_STATUS_STATE_VALUE_MASK 0xFFFF
#define IA32_PERF_STATUS_STATE_VALUE(_) (((_) >> 0) & 0xFFFF)
UINT64 Reserved1 : 48;
};
UINT64 AsUInt;
} IA32_PERF_STATUS_REGISTER;
/**
* @brief Performance Control (R/W)
*
* Performance Control. Software makes a request for a new Performance state (P-State) by writing
* this MSR.
*
* @remarks 0F_03H
* @see Vol3B[14.1.1(Software Interface For Initiating Performance State Transitions)]
*/
#define IA32_PERF_CTL 0x00000199
typedef union
{
struct
{
/**
* [Bits 15:0] Target performance State Value.
*/
UINT64 TargetStateValue : 16;
#define IA32_PERF_CTL_TARGET_STATE_VALUE_BIT 0
#define IA32_PERF_CTL_TARGET_STATE_VALUE_FLAG 0xFFFF
#define IA32_PERF_CTL_TARGET_STATE_VALUE_MASK 0xFFFF
#define IA32_PERF_CTL_TARGET_STATE_VALUE(_) (((_) >> 0) & 0xFFFF)
UINT64 Reserved1 : 16;
/**
* [Bit 32] IDA Engage.
*
* @remarks 06_0FH (Mobile only)
*/
UINT64 IdaEngage : 1;
#define IA32_PERF_CTL_IDA_ENGAGE_BIT 32
#define IA32_PERF_CTL_IDA_ENGAGE_FLAG 0x100000000
#define IA32_PERF_CTL_IDA_ENGAGE_MASK 0x01
#define IA32_PERF_CTL_IDA_ENGAGE(_) (((_) >> 32) & 0x01)
UINT64 Reserved2 : 31;
};
UINT64 AsUInt;
} IA32_PERF_CTL_REGISTER;
/**
* Clock Modulation Control.
*
* @remarks If CPUID.01H:EDX[22] = 1
* @see Vol3B[14.7.3(Software Controlled Clock Modulation)]
*/
#define IA32_CLOCK_MODULATION 0x0000019A
typedef union
{
struct
{
/**
* [Bit 0] Extended On-Demand Clock Modulation Duty Cycle.
*
* @remarks If CPUID.06H:EAX[5] = 1
*/
UINT64 ExtendedOnDemandClockModulationDutyCycle : 1;
#define IA32_CLOCK_MODULATION_EXTENDED_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_BIT 0
#define IA32_CLOCK_MODULATION_EXTENDED_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_FLAG 0x01
#define IA32_CLOCK_MODULATION_EXTENDED_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_MASK 0x01
#define IA32_CLOCK_MODULATION_EXTENDED_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE(_) (((_) >> 0) & 0x01)
/**
* @brief On-Demand Clock Modulation Duty Cycle
*
* [Bits 3:1] On-Demand Clock Modulation Duty Cycle: Specific encoded values for
* target duty cycle modulation.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 OnDemandClockModulationDutyCycle : 3;
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_BIT 1
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_FLAG 0x0E
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE_MASK 0x07
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_DUTY_CYCLE(_) (((_) >> 1) & 0x07)
/**
* @brief On-Demand Clock Modulation Enable
*
* [Bit 4] On-Demand Clock Modulation Enable: Set 1 to enable modulation.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 OnDemandClockModulationEnable : 1;
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_ENABLE_BIT 4
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_ENABLE_FLAG 0x10
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_ENABLE_MASK 0x01
#define IA32_CLOCK_MODULATION_ON_DEMAND_CLOCK_MODULATION_ENABLE(_) (((_) >> 4) & 0x01)
UINT64 Reserved1 : 59;
};
UINT64 AsUInt;
} IA32_CLOCK_MODULATION_REGISTER;
/**
* @brief Thermal Interrupt Control (R/W)
*
* Thermal Interrupt Control. Enables and disables the generation of an interrupt on temperature
* transitions detected with the processor's thermal sensors and thermal monitor.
*
* @remarks If CPUID.01H:EDX[22] = 1
* @see Vol3B[14.7.2(Thermal Monitor)]
*/
#define IA32_THERM_INTERRUPT 0x0000019B
typedef union
{
struct
{
/**
* [Bit 0] High-Temperature Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 HighTemperatureInterruptEnable : 1;
#define IA32_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_BIT 0
#define IA32_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_FLAG 0x01
#define IA32_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Low-Temperature Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 LowTemperatureInterruptEnable : 1;
#define IA32_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_BIT 1
#define IA32_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_FLAG 0x02
#define IA32_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] PROCHOT\# Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ProchotInterruptEnable : 1;
#define IA32_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_BIT 2
#define IA32_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_FLAG 0x04
#define IA32_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] FORCEPR\# Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ForceprInterruptEnable : 1;
#define IA32_THERM_INTERRUPT_FORCEPR_INTERRUPT_ENABLE_BIT 3
#define IA32_THERM_INTERRUPT_FORCEPR_INTERRUPT_ENABLE_FLAG 0x08
#define IA32_THERM_INTERRUPT_FORCEPR_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_FORCEPR_INTERRUPT_ENABLE(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Critical Temperature Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 CriticalTemperatureInterruptEnable : 1;
#define IA32_THERM_INTERRUPT_CRITICAL_TEMPERATURE_INTERRUPT_ENABLE_BIT 4
#define IA32_THERM_INTERRUPT_CRITICAL_TEMPERATURE_INTERRUPT_ENABLE_FLAG 0x10
#define IA32_THERM_INTERRUPT_CRITICAL_TEMPERATURE_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_CRITICAL_TEMPERATURE_INTERRUPT_ENABLE(_) (((_) >> 4) & 0x01)
UINT64 Reserved1 : 3;
/**
* [Bits 14:8] Threshold \#1 Value
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 Threshold1Value : 7;
#define IA32_THERM_INTERRUPT_THRESHOLD1_VALUE_BIT 8
#define IA32_THERM_INTERRUPT_THRESHOLD1_VALUE_FLAG 0x7F00
#define IA32_THERM_INTERRUPT_THRESHOLD1_VALUE_MASK 0x7F
#define IA32_THERM_INTERRUPT_THRESHOLD1_VALUE(_) (((_) >> 8) & 0x7F)
/**
* [Bit 15] Threshold \#1 Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 Threshold1InterruptEnable : 1;
#define IA32_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_BIT 15
#define IA32_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_FLAG 0x8000
#define IA32_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE(_) (((_) >> 15) & 0x01)
/**
* [Bits 22:16] Threshold \#2 Value.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 Threshold2Value : 7;
#define IA32_THERM_INTERRUPT_THRESHOLD2_VALUE_BIT 16
#define IA32_THERM_INTERRUPT_THRESHOLD2_VALUE_FLAG 0x7F0000
#define IA32_THERM_INTERRUPT_THRESHOLD2_VALUE_MASK 0x7F
#define IA32_THERM_INTERRUPT_THRESHOLD2_VALUE(_) (((_) >> 16) & 0x7F)
/**
* [Bit 23] Threshold \#2 Interrupt Enable.
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 Threshold2InterruptEnable : 1;
#define IA32_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_BIT 23
#define IA32_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_FLAG 0x800000
#define IA32_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE(_) (((_) >> 23) & 0x01)
/**
* [Bit 24] Power Limit Notification Enable.
*
* @remarks If CPUID.06H:EAX[4] = 1
*/
UINT64 PowerLimitNotificationEnable : 1;
#define IA32_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_BIT 24
#define IA32_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_FLAG 0x1000000
#define IA32_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_MASK 0x01
#define IA32_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE(_) (((_) >> 24) & 0x01)
UINT64 Reserved2 : 39;
};
UINT64 AsUInt;
} IA32_THERM_INTERRUPT_REGISTER;
/**
* @brief Thermal Status Information (RO)
*
* Thermal Status Information. Contains status information about the processor's thermal sensor and
* automatic thermal monitoring facilities.
*
* @remarks If CPUID.01H:EDX[22] = 1
* @see Vol3B[14.7.2(Thermal Monitor)]
*/
#define IA32_THERM_STATUS 0x0000019C
typedef union
{
struct
{
/**
* [Bit 0] Thermal Status
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ThermalStatus : 1;
#define IA32_THERM_STATUS_THERMAL_STATUS_BIT 0
#define IA32_THERM_STATUS_THERMAL_STATUS_FLAG 0x01
#define IA32_THERM_STATUS_THERMAL_STATUS_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_STATUS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Thermal Status Log
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ThermalStatusLog : 1;
#define IA32_THERM_STATUS_THERMAL_STATUS_LOG_BIT 1
#define IA32_THERM_STATUS_THERMAL_STATUS_LOG_FLAG 0x02
#define IA32_THERM_STATUS_THERMAL_STATUS_LOG_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_STATUS_LOG(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] PROCHOT \# or FORCEPR\# event
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ProchotForceprEvent : 1;
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_EVENT_BIT 2
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_EVENT_FLAG 0x04
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_EVENT_MASK 0x01
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_EVENT(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] PROCHOT \# or FORCEPR\# log
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 ProchotForceprLog : 1;
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_LOG_BIT 3
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_LOG_FLAG 0x08
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_LOG_MASK 0x01
#define IA32_THERM_STATUS_PROCHOT_FORCEPR_LOG(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Critical Temperature Status
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 CriticalTemperatureStatus : 1;
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_BIT 4
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_FLAG 0x10
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_MASK 0x01
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Critical Temperature Status log
*
* @remarks If CPUID.01H:EDX[22] = 1
*/
UINT64 CriticalTemperatureStatusLog : 1;
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_BIT 5
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_FLAG 0x20
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_MASK 0x01
#define IA32_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Thermal Threshold \#1 Status
*
* @remarks If CPUID.01H:ECX[8] = 1
*/
UINT64 ThermalThreshold1Status : 1;
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_BIT 6
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_FLAG 0x40
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_STATUS(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Thermal Threshold \#1 log
*
* @remarks If CPUID.01H:ECX[8] = 1
*/
UINT64 ThermalThreshold1Log : 1;
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_LOG_BIT 7
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_LOG_FLAG 0x80
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_LOG_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_THRESHOLD1_LOG(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Thermal Threshold \#2 Status
*
* @remarks If CPUID.01H:ECX[8] = 1
*/
UINT64 ThermalThreshold2Status : 1;
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_BIT 8
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_FLAG 0x100
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_STATUS(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Thermal Threshold \#2 log
*
* @remarks If CPUID.01H:ECX[8] = 1
*/
UINT64 ThermalThreshold2Log : 1;
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_LOG_BIT 9
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_LOG_FLAG 0x200
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_LOG_MASK 0x01
#define IA32_THERM_STATUS_THERMAL_THRESHOLD2_LOG(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Power Limitation Status
*
* @remarks If CPUID.06H:EAX[4] = 1
*/
UINT64 PowerLimitationStatus : 1;
#define IA32_THERM_STATUS_POWER_LIMITATION_STATUS_BIT 10
#define IA32_THERM_STATUS_POWER_LIMITATION_STATUS_FLAG 0x400
#define IA32_THERM_STATUS_POWER_LIMITATION_STATUS_MASK 0x01
#define IA32_THERM_STATUS_POWER_LIMITATION_STATUS(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Power Limitation log
*
* @remarks If CPUID.06H:EAX[4] = 1
*/
UINT64 PowerLimitationLog : 1;
#define IA32_THERM_STATUS_POWER_LIMITATION_LOG_BIT 11
#define IA32_THERM_STATUS_POWER_LIMITATION_LOG_FLAG 0x800
#define IA32_THERM_STATUS_POWER_LIMITATION_LOG_MASK 0x01
#define IA32_THERM_STATUS_POWER_LIMITATION_LOG(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] Current Limit Status
*
* @remarks If CPUID.06H:EAX[7] = 1
*/
UINT64 CurrentLimitStatus : 1;
#define IA32_THERM_STATUS_CURRENT_LIMIT_STATUS_BIT 12
#define IA32_THERM_STATUS_CURRENT_LIMIT_STATUS_FLAG 0x1000
#define IA32_THERM_STATUS_CURRENT_LIMIT_STATUS_MASK 0x01
#define IA32_THERM_STATUS_CURRENT_LIMIT_STATUS(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] Current Limit log
*
* @remarks If CPUID.06H:EAX[7] = 1
*/
UINT64 CurrentLimitLog : 1;
#define IA32_THERM_STATUS_CURRENT_LIMIT_LOG_BIT 13
#define IA32_THERM_STATUS_CURRENT_LIMIT_LOG_FLAG 0x2000
#define IA32_THERM_STATUS_CURRENT_LIMIT_LOG_MASK 0x01
#define IA32_THERM_STATUS_CURRENT_LIMIT_LOG(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] Cross Domain Limit Status
*
* @remarks If CPUID.06H:EAX[7] = 1
*/
UINT64 CrossDomainLimitStatus : 1;
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_STATUS_BIT 14
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_STATUS_FLAG 0x4000
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_STATUS_MASK 0x01
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_STATUS(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] Cross Domain Limit log
*
* @remarks If CPUID.06H:EAX[7] = 1
*/
UINT64 CrossDomainLimitLog : 1;
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_LOG_BIT 15
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_LOG_FLAG 0x8000
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_LOG_MASK 0x01
#define IA32_THERM_STATUS_CROSS_DOMAIN_LIMIT_LOG(_) (((_) >> 15) & 0x01)
/**
* [Bits 22:16] Digital Readout
*
* @remarks If CPUID.06H:EAX[0] = 1
*/
UINT64 DigitalReadout : 7;
#define IA32_THERM_STATUS_DIGITAL_READOUT_BIT 16
#define IA32_THERM_STATUS_DIGITAL_READOUT_FLAG 0x7F0000
#define IA32_THERM_STATUS_DIGITAL_READOUT_MASK 0x7F
#define IA32_THERM_STATUS_DIGITAL_READOUT(_) (((_) >> 16) & 0x7F)
UINT64 Reserved1 : 4;
/**
* [Bits 30:27] Resolution in Degrees Celsius
*
* @remarks If CPUID.06H:EAX[0] = 1
*/
UINT64 ResolutionInDegreesCelsius : 4;
#define IA32_THERM_STATUS_RESOLUTION_IN_DEGREES_CELSIUS_BIT 27
#define IA32_THERM_STATUS_RESOLUTION_IN_DEGREES_CELSIUS_FLAG 0x78000000
#define IA32_THERM_STATUS_RESOLUTION_IN_DEGREES_CELSIUS_MASK 0x0F
#define IA32_THERM_STATUS_RESOLUTION_IN_DEGREES_CELSIUS(_) (((_) >> 27) & 0x0F)
/**
* [Bit 31] Reading Valid
*
* @remarks If CPUID.06H:EAX[0] = 1
*/
UINT64 ReadingValid : 1;
#define IA32_THERM_STATUS_READING_VALID_BIT 31
#define IA32_THERM_STATUS_READING_VALID_FLAG 0x80000000
#define IA32_THERM_STATUS_READING_VALID_MASK 0x01
#define IA32_THERM_STATUS_READING_VALID(_) (((_) >> 31) & 0x01)
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} IA32_THERM_STATUS_REGISTER;
/**
* @brief Enable Misc. Processor Features (R/W)
*
* Allows a variety of processor functions to be enabled and disabled.
*/
#define IA32_MISC_ENABLE 0x000001A0
typedef union
{
struct
{
/**
* @brief Fast-Strings Enable
*
* [Bit 0] When set, the fast-strings feature (for REP MOVS and REP STORS) is
* enabled (default). When clear, fast-strings are disabled.
*
* @remarks 0F_0H
*/
UINT64 FastStringsEnable : 1;
#define IA32_MISC_ENABLE_FAST_STRINGS_ENABLE_BIT 0
#define IA32_MISC_ENABLE_FAST_STRINGS_ENABLE_FLAG 0x01
#define IA32_MISC_ENABLE_FAST_STRINGS_ENABLE_MASK 0x01
#define IA32_MISC_ENABLE_FAST_STRINGS_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 2;
/**
* @brief Automatic Thermal Control Circuit Enable (R/W)
*
* [Bit 3] - 1 = Setting this bit enables the thermal control circuit (TCC) portion
* of the Intel Thermal Monitor feature. This allows the processor to automatically
* reduce power consumption in response to TCC activation.
* - 0 = Disabled.
*
* @note In some products clearing this bit might be ignored in critical thermal
* conditions, and TM1, TM2 and adaptive thermal throttling will still be activated.
* The default value of this field varies with product.
* @remarks 0F_0H
*/
UINT64 AutomaticThermalControlCircuitEnable : 1;
#define IA32_MISC_ENABLE_AUTOMATIC_THERMAL_CONTROL_CIRCUIT_ENABLE_BIT 3
#define IA32_MISC_ENABLE_AUTOMATIC_THERMAL_CONTROL_CIRCUIT_ENABLE_FLAG 0x08
#define IA32_MISC_ENABLE_AUTOMATIC_THERMAL_CONTROL_CIRCUIT_ENABLE_MASK 0x01
#define IA32_MISC_ENABLE_AUTOMATIC_THERMAL_CONTROL_CIRCUIT_ENABLE(_) (((_) >> 3) & 0x01)
UINT64 Reserved2 : 3;
/**
* @brief Performance Monitoring Available (R)
*
* [Bit 7] - 1 = Performance monitoring enabled.
* - 0 = Performance monitoring disabled.
*
* @remarks 0F_0H
*/
UINT64 PerformanceMonitoringAvailable : 1;
#define IA32_MISC_ENABLE_PERFORMANCE_MONITORING_AVAILABLE_BIT 7
#define IA32_MISC_ENABLE_PERFORMANCE_MONITORING_AVAILABLE_FLAG 0x80
#define IA32_MISC_ENABLE_PERFORMANCE_MONITORING_AVAILABLE_MASK 0x01
#define IA32_MISC_ENABLE_PERFORMANCE_MONITORING_AVAILABLE(_) (((_) >> 7) & 0x01)
UINT64 Reserved3 : 3;
/**
* @brief Branch Trace Storage Unavailable (RO)
*
* [Bit 11] - 1 = Processor doesn't support branch trace storage (BTS).
* - 0 = BTS is supported.
*
* @remarks 0F_0H
*/
UINT64 BranchTraceStorageUnavailable : 1;
#define IA32_MISC_ENABLE_BRANCH_TRACE_STORAGE_UNAVAILABLE_BIT 11
#define IA32_MISC_ENABLE_BRANCH_TRACE_STORAGE_UNAVAILABLE_FLAG 0x800
#define IA32_MISC_ENABLE_BRANCH_TRACE_STORAGE_UNAVAILABLE_MASK 0x01
#define IA32_MISC_ENABLE_BRANCH_TRACE_STORAGE_UNAVAILABLE(_) (((_) >> 11) & 0x01)
/**
* @brief Processor Event Based Sampling (PEBS) Unavailable (RO)
*
* [Bit 12] - 1 = PEBS is not supported.
* - 0 = PEBS is supported.
*
* @remarks 06_0FH
*/
UINT64 ProcessorEventBasedSamplingUnavailable : 1;
#define IA32_MISC_ENABLE_PROCESSOR_EVENT_BASED_SAMPLING_UNAVAILABLE_BIT 12
#define IA32_MISC_ENABLE_PROCESSOR_EVENT_BASED_SAMPLING_UNAVAILABLE_FLAG 0x1000
#define IA32_MISC_ENABLE_PROCESSOR_EVENT_BASED_SAMPLING_UNAVAILABLE_MASK 0x01
#define IA32_MISC_ENABLE_PROCESSOR_EVENT_BASED_SAMPLING_UNAVAILABLE(_) (((_) >> 12) & 0x01)
UINT64 Reserved4 : 3;
/**
* @brief Enhanced Intel SpeedStep Technology Enable (R/W)
*
* [Bit 16] - 0 = Enhanced Intel SpeedStep Technology disabled.
* - 1 = Enhanced Intel SpeedStep Technology enabled.
*
* @remarks If CPUID.01H: ECX[7] = 1
*/
UINT64 EnhancedIntelSpeedstepTechnologyEnable : 1;
#define IA32_MISC_ENABLE_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_ENABLE_BIT 16
#define IA32_MISC_ENABLE_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_ENABLE_FLAG 0x10000
#define IA32_MISC_ENABLE_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_ENABLE_MASK 0x01
#define IA32_MISC_ENABLE_ENHANCED_INTEL_SPEEDSTEP_TECHNOLOGY_ENABLE(_) (((_) >> 16) & 0x01)
UINT64 Reserved5 : 1;
/**
* @brief ENABLE MONITOR FSM (R/W)
*
* [Bit 18] When this bit is set to 0, the MONITOR feature flag is not set
* (CPUID.01H:ECX[bit3] = 0). This indicates that MONITOR/MWAIT are not supported.
* Software attempts to execute MONITOR/MWAIT will cause \#UD when this bit is 0.
* When this bit is set to 1 (default), MONITOR/MWAIT are supported
* (CPUID.01H:ECX[bit 3] = 1). If the SSE3 feature flag ECX[0] is not set
* (CPUID.01H:ECX[bit 0] = 0), the OS must not attempt to alter this bit. BIOS must
* leave it in the default state. Writing this bit when the SSE3 feature flag is set
* to 0 may generate a \#GP exception.
*
* @remarks 0F_03H
*/
UINT64 EnableMonitorFsm : 1;
#define IA32_MISC_ENABLE_ENABLE_MONITOR_FSM_BIT 18
#define IA32_MISC_ENABLE_ENABLE_MONITOR_FSM_FLAG 0x40000
#define IA32_MISC_ENABLE_ENABLE_MONITOR_FSM_MASK 0x01
#define IA32_MISC_ENABLE_ENABLE_MONITOR_FSM(_) (((_) >> 18) & 0x01)
UINT64 Reserved6 : 3;
/**
* @brief Limit CPUID Maxval (R/W)
*
* [Bit 22] When this bit is set to 1, CPUID.00H returns a maximum value in EAX[7:0]
* of 2. BIOS should contain a setup question that allows users to specify when the
* installed OS does not support CPUID functions greater than 2. Before setting this
* bit, BIOS must execute the CPUID.0H and examine the maximum value returned in
* EAX[7:0]. If the maximum value is greater than 2, this bit is supported.
* Otherwise, this bit is not supported. Setting this bit when the maximum value is
* not greater than 2 may generate a \#GP exception. Setting this bit may cause
* unexpected behavior in software that depends on the availability of CPUID leaves
* greater than 2.
*
* @remarks 0F_03H
*/
UINT64 LimitCpuidMaxval : 1;
#define IA32_MISC_ENABLE_LIMIT_CPUID_MAXVAL_BIT 22
#define IA32_MISC_ENABLE_LIMIT_CPUID_MAXVAL_FLAG 0x400000
#define IA32_MISC_ENABLE_LIMIT_CPUID_MAXVAL_MASK 0x01
#define IA32_MISC_ENABLE_LIMIT_CPUID_MAXVAL(_) (((_) >> 22) & 0x01)
/**
* @brief xTPR Message Disable (R/W)
*
* [Bit 23] When set to 1, xTPR messages are disabled. xTPR messages are optional
* messages that allow the processor to inform the chipset of its priority.
*
* @remarks If CPUID.01H:ECX[14] = 1
*/
UINT64 XtprMessageDisable : 1;
#define IA32_MISC_ENABLE_XTPR_MESSAGE_DISABLE_BIT 23
#define IA32_MISC_ENABLE_XTPR_MESSAGE_DISABLE_FLAG 0x800000
#define IA32_MISC_ENABLE_XTPR_MESSAGE_DISABLE_MASK 0x01
#define IA32_MISC_ENABLE_XTPR_MESSAGE_DISABLE(_) (((_) >> 23) & 0x01)
UINT64 Reserved7 : 10;
/**
* @brief XD Bit Disable (R/W)
*
* [Bit 34] When set to 1, the Execute Disable Bit feature (XD Bit) is disabled and
* the XD Bit extended feature flag will be clear (CPUID.80000001H: EDX[20]=0). When
* set to a 0 (default), the Execute Disable Bit feature (if available) allows the
* OS to enable PAE paging and take advantage of data only pages. BIOS must not
* alter the contents of this bit location, if XD bit is not supported. Writing this
* bit to 1 when the XD Bit extended feature flag is set to 0 may generate a \#GP
* exception.
*
* @remarks If CPUID.80000001H:EDX[20] = 1
*/
UINT64 XdBitDisable : 1;
#define IA32_MISC_ENABLE_XD_BIT_DISABLE_BIT 34
#define IA32_MISC_ENABLE_XD_BIT_DISABLE_FLAG 0x400000000
#define IA32_MISC_ENABLE_XD_BIT_DISABLE_MASK 0x01
#define IA32_MISC_ENABLE_XD_BIT_DISABLE(_) (((_) >> 34) & 0x01)
UINT64 Reserved8 : 29;
};
UINT64 AsUInt;
} IA32_MISC_ENABLE_REGISTER;
/**
* Performance Energy Bias Hint.
*
* @remarks If CPUID.6H:ECX[3] = 1
*/
#define IA32_ENERGY_PERF_BIAS 0x000001B0
typedef union
{
struct
{
/**
* @brief Power Policy Preference
*
* [Bits 3:0] - 0 indicates preference to highest performance.
* - 15 indicates preference to maximize energy saving.
*/
UINT64 PowerPolicyPreference : 4;
#define IA32_ENERGY_PERF_BIAS_POWER_POLICY_PREFERENCE_BIT 0
#define IA32_ENERGY_PERF_BIAS_POWER_POLICY_PREFERENCE_FLAG 0x0F
#define IA32_ENERGY_PERF_BIAS_POWER_POLICY_PREFERENCE_MASK 0x0F
#define IA32_ENERGY_PERF_BIAS_POWER_POLICY_PREFERENCE(_) (((_) >> 0) & 0x0F)
UINT64 Reserved1 : 60;
};
UINT64 AsUInt;
} IA32_ENERGY_PERF_BIAS_REGISTER;
/**
* @brief Package Thermal Status Information (RO)
*
* Package Thermal Status Information. Contains status information about the package's thermal
* sensor.
*
* @remarks If CPUID.06H: EAX[6] = 1
* @see Vol3B[14.8(PACKAGE LEVEL THERMAL MANAGEMENT)]
*/
#define IA32_PACKAGE_THERM_STATUS 0x000001B1
typedef union
{
struct
{
/**
* [Bit 0] Pkg Thermal Status
*/
UINT64 ThermalStatus : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_BIT 0
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_FLAG 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Pkg Thermal Status Log
*/
UINT64 ThermalStatusLog : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_LOG_BIT 1
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_LOG_FLAG 0x02
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_STATUS_LOG(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Pkg PROCHOT \# event
*/
UINT64 ProchotEvent : 1;
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_EVENT_BIT 2
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_EVENT_FLAG 0x04
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_EVENT_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_EVENT(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Pkg PROCHOT \# log
*/
UINT64 ProchotLog : 1;
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_LOG_BIT 3
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_LOG_FLAG 0x08
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_PROCHOT_LOG(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Pkg Critical Temperature Status
*/
UINT64 CriticalTemperatureStatus : 1;
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_BIT 4
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_FLAG 0x10
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Pkg Critical Temperature Status Log
*/
UINT64 CriticalTemperatureStatusLog : 1;
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_BIT 5
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_FLAG 0x20
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_CRITICAL_TEMPERATURE_STATUS_LOG(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Pkg Thermal Threshold \#1 Status
*/
UINT64 ThermalThreshold1Status : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_BIT 6
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_FLAG 0x40
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_STATUS_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_STATUS(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Pkg Thermal Threshold \#1 log
*/
UINT64 ThermalThreshold1Log : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_LOG_BIT 7
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_LOG_FLAG 0x80
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD1_LOG(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Pkg Thermal Threshold \#2 Status
*/
UINT64 ThermalThreshold2Status : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_BIT 8
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_FLAG 0x100
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_STATUS_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_STATUS(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] Pkg Thermal Threshold \#2 log
*/
UINT64 ThermalThreshold2Log : 1;
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_LOG_BIT 9
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_LOG_FLAG 0x200
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_THERMAL_THRESHOLD2_LOG(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Pkg Power Limitation Status
*/
UINT64 PowerLimitationStatus : 1;
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_STATUS_BIT 10
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_STATUS_FLAG 0x400
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_STATUS_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_STATUS(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Pkg Power Limitation log
*/
UINT64 PowerLimitationLog : 1;
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_LOG_BIT 11
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_LOG_FLAG 0x800
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_LOG_MASK 0x01
#define IA32_PACKAGE_THERM_STATUS_POWER_LIMITATION_LOG(_) (((_) >> 11) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bits 22:16] Pkg Digital Readout
*/
UINT64 DigitalReadout : 7;
#define IA32_PACKAGE_THERM_STATUS_DIGITAL_READOUT_BIT 16
#define IA32_PACKAGE_THERM_STATUS_DIGITAL_READOUT_FLAG 0x7F0000
#define IA32_PACKAGE_THERM_STATUS_DIGITAL_READOUT_MASK 0x7F
#define IA32_PACKAGE_THERM_STATUS_DIGITAL_READOUT(_) (((_) >> 16) & 0x7F)
UINT64 Reserved2 : 41;
};
UINT64 AsUInt;
} IA32_PACKAGE_THERM_STATUS_REGISTER;
/**
* @brief Package Thermal Interrupt Control (RO)
*
* Enables and disables the generation of an interrupt on temperature transitions detected with the
* package's thermal sensor.
*
* @remarks If CPUID.06H: EAX[6] = 1
* @see Vol3B[14.8(PACKAGE LEVEL THERMAL MANAGEMENT)]
*/
#define IA32_PACKAGE_THERM_INTERRUPT 0x000001B2
typedef union
{
struct
{
/**
* [Bit 0] Pkg High-Temperature Interrupt Enable.
*/
UINT64 HighTemperatureInterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_BIT 0
#define IA32_PACKAGE_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_FLAG 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_HIGH_TEMPERATURE_INTERRUPT_ENABLE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Pkg Low-Temperature Interrupt Enable.
*/
UINT64 LowTemperatureInterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_BIT 1
#define IA32_PACKAGE_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_FLAG 0x02
#define IA32_PACKAGE_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_LOW_TEMPERATURE_INTERRUPT_ENABLE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Pkg PROCHOT\# Interrupt Enable.
*/
UINT64 ProchotInterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_BIT 2
#define IA32_PACKAGE_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_FLAG 0x04
#define IA32_PACKAGE_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_PROCHOT_INTERRUPT_ENABLE(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 4] Pkg Overheat Interrupt Enable.
*/
UINT64 OverheatInterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_OVERHEAT_INTERRUPT_ENABLE_BIT 4
#define IA32_PACKAGE_THERM_INTERRUPT_OVERHEAT_INTERRUPT_ENABLE_FLAG 0x10
#define IA32_PACKAGE_THERM_INTERRUPT_OVERHEAT_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_OVERHEAT_INTERRUPT_ENABLE(_) (((_) >> 4) & 0x01)
UINT64 Reserved2 : 3;
/**
* [Bits 14:8] Pkg Threshold \#1 Value
*/
UINT64 Threshold1Value : 7;
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_VALUE_BIT 8
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_VALUE_FLAG 0x7F00
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_VALUE_MASK 0x7F
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_VALUE(_) (((_) >> 8) & 0x7F)
/**
* [Bit 15] Pkg Threshold \#1 Interrupt Enable.
*/
UINT64 Threshold1InterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_BIT 15
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_FLAG 0x8000
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD1_INTERRUPT_ENABLE(_) (((_) >> 15) & 0x01)
/**
* [Bits 22:16] Pkg Threshold \#2 Value.
*/
UINT64 Threshold2Value : 7;
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_VALUE_BIT 16
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_VALUE_FLAG 0x7F0000
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_VALUE_MASK 0x7F
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_VALUE(_) (((_) >> 16) & 0x7F)
/**
* [Bit 23] Pkg Threshold \#2 Interrupt Enable.
*/
UINT64 Threshold2InterruptEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_BIT 23
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_FLAG 0x800000
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_THRESHOLD2_INTERRUPT_ENABLE(_) (((_) >> 23) & 0x01)
/**
* [Bit 24] Pkg Power Limit Notification Enable.
*/
UINT64 PowerLimitNotificationEnable : 1;
#define IA32_PACKAGE_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_BIT 24
#define IA32_PACKAGE_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_FLAG 0x1000000
#define IA32_PACKAGE_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE_MASK 0x01
#define IA32_PACKAGE_THERM_INTERRUPT_POWER_LIMIT_NOTIFICATION_ENABLE(_) (((_) >> 24) & 0x01)
UINT64 Reserved3 : 39;
};
UINT64 AsUInt;
} IA32_PACKAGE_THERM_INTERRUPT_REGISTER;
/**
* Trace/Profile Resource Control.
*
* @remarks 06_0EH
*/
#define IA32_DEBUGCTL 0x000001D9
typedef union
{
struct
{
/**
* [Bit 0] Setting this bit to 1 enables the processor to record a running trace of
* the most recent branches taken by the processor in the LBR stack.
*
* @remarks 06_01H
*/
UINT64 Lbr : 1;
#define IA32_DEBUGCTL_LBR_BIT 0
#define IA32_DEBUGCTL_LBR_FLAG 0x01
#define IA32_DEBUGCTL_LBR_MASK 0x01
#define IA32_DEBUGCTL_LBR(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Setting this bit to 1 enables the processor to treat EFLAGS.TF as
* single-step on branches instead of single-step on instructions.
*
* @remarks 06_01H
*/
UINT64 Btf : 1;
#define IA32_DEBUGCTL_BTF_BIT 1
#define IA32_DEBUGCTL_BTF_FLAG 0x02
#define IA32_DEBUGCTL_BTF_MASK 0x01
#define IA32_DEBUGCTL_BTF(_) (((_) >> 1) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bit 6] Setting this bit to 1 enables branch trace messages to be sent.
*
* @remarks 06_0EH
*/
UINT64 Tr : 1;
#define IA32_DEBUGCTL_TR_BIT 6
#define IA32_DEBUGCTL_TR_FLAG 0x40
#define IA32_DEBUGCTL_TR_MASK 0x01
#define IA32_DEBUGCTL_TR(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Setting this bit enables branch trace messages (BTMs) to be logged in a
* BTS buffer.
*
* @remarks 06_0EH
*/
UINT64 Bts : 1;
#define IA32_DEBUGCTL_BTS_BIT 7
#define IA32_DEBUGCTL_BTS_FLAG 0x80
#define IA32_DEBUGCTL_BTS_MASK 0x01
#define IA32_DEBUGCTL_BTS(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] When clear, BTMs are logged in a BTS buffer in circular fashion. When
* this bit is set, an interrupt is generated by the BTS facility when the BTS
* buffer is full.
*
* @remarks 06_0EH
*/
UINT64 Btint : 1;
#define IA32_DEBUGCTL_BTINT_BIT 8
#define IA32_DEBUGCTL_BTINT_FLAG 0x100
#define IA32_DEBUGCTL_BTINT_MASK 0x01
#define IA32_DEBUGCTL_BTINT(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] When set, BTS or BTM is skipped if CPL = 0.
*
* @remarks 06_0FH
*/
UINT64 BtsOffOs : 1;
#define IA32_DEBUGCTL_BTS_OFF_OS_BIT 9
#define IA32_DEBUGCTL_BTS_OFF_OS_FLAG 0x200
#define IA32_DEBUGCTL_BTS_OFF_OS_MASK 0x01
#define IA32_DEBUGCTL_BTS_OFF_OS(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] When set, BTS or BTM is skipped if CPL > 0.
*
* @remarks 06_0FH
*/
UINT64 BtsOffUsr : 1;
#define IA32_DEBUGCTL_BTS_OFF_USR_BIT 10
#define IA32_DEBUGCTL_BTS_OFF_USR_FLAG 0x400
#define IA32_DEBUGCTL_BTS_OFF_USR_MASK 0x01
#define IA32_DEBUGCTL_BTS_OFF_USR(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] When set, the LBR stack is frozen on a PMI request.
*
* @remarks If CPUID.01H: ECX[15] = 1 && CPUID.0AH: EAX[7:0] > 1
*/
UINT64 FreezeLbrsOnPmi : 1;
#define IA32_DEBUGCTL_FREEZE_LBRS_ON_PMI_BIT 11
#define IA32_DEBUGCTL_FREEZE_LBRS_ON_PMI_FLAG 0x800
#define IA32_DEBUGCTL_FREEZE_LBRS_ON_PMI_MASK 0x01
#define IA32_DEBUGCTL_FREEZE_LBRS_ON_PMI(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] When set, each ENABLE bit of the global counter control MSR are frozen
* (address 38FH) on a PMI request.
*
* @remarks If CPUID.01H: ECX[15] = 1 && CPUID.0AH: EAX[7:0] > 1
*/
UINT64 FreezePerfmonOnPmi : 1;
#define IA32_DEBUGCTL_FREEZE_PERFMON_ON_PMI_BIT 12
#define IA32_DEBUGCTL_FREEZE_PERFMON_ON_PMI_FLAG 0x1000
#define IA32_DEBUGCTL_FREEZE_PERFMON_ON_PMI_MASK 0x01
#define IA32_DEBUGCTL_FREEZE_PERFMON_ON_PMI(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] When set, enables the logical processor to receive and generate PMI on
* behalf of the uncore.
*
* @remarks 06_1AH
*/
UINT64 EnableUncorePmi : 1;
#define IA32_DEBUGCTL_ENABLE_UNCORE_PMI_BIT 13
#define IA32_DEBUGCTL_ENABLE_UNCORE_PMI_FLAG 0x2000
#define IA32_DEBUGCTL_ENABLE_UNCORE_PMI_MASK 0x01
#define IA32_DEBUGCTL_ENABLE_UNCORE_PMI(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] When set, freezes perfmon and trace messages while in SMM.
*
* @remarks If IA32_PERF_CAPABILITIES[12] = 1
*/
UINT64 FreezeWhileSmm : 1;
#define IA32_DEBUGCTL_FREEZE_WHILE_SMM_BIT 14
#define IA32_DEBUGCTL_FREEZE_WHILE_SMM_FLAG 0x4000
#define IA32_DEBUGCTL_FREEZE_WHILE_SMM_MASK 0x01
#define IA32_DEBUGCTL_FREEZE_WHILE_SMM(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] When set, enables DR7 debug bit on XBEGIN.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[11] = 1)
*/
UINT64 RtmDebug : 1;
#define IA32_DEBUGCTL_RTM_DEBUG_BIT 15
#define IA32_DEBUGCTL_RTM_DEBUG_FLAG 0x8000
#define IA32_DEBUGCTL_RTM_DEBUG_MASK 0x01
#define IA32_DEBUGCTL_RTM_DEBUG(_) (((_) >> 15) & 0x01)
UINT64 Reserved2 : 48;
};
UINT64 AsUInt;
} IA32_DEBUGCTL_REGISTER;
/**
* @brief SMRR Base Address (Writeable only in SMM)
*
* SMRR Base Address. Base address of SMM memory range.
*
* @remarks If IA32_MTRRCAP.SMRR[11] = 1
*/
#define IA32_SMRR_PHYSBASE 0x000001F2
typedef union
{
struct
{
/**
* @brief Type
*
* [Bits 7:0] Type. Specifies memory type of the range.
*/
UINT64 Type : 8;
#define IA32_SMRR_PHYSBASE_TYPE_BIT 0
#define IA32_SMRR_PHYSBASE_TYPE_FLAG 0xFF
#define IA32_SMRR_PHYSBASE_TYPE_MASK 0xFF
#define IA32_SMRR_PHYSBASE_TYPE(_) (((_) >> 0) & 0xFF)
UINT64 Reserved1 : 4;
/**
* [Bits 31:12] SMRR physical Base Address.
*/
UINT64 SmrrPhysicalBaseAddress : 20;
#define IA32_SMRR_PHYSBASE_SMRR_PHYSICAL_BASE_ADDRESS_BIT 12
#define IA32_SMRR_PHYSBASE_SMRR_PHYSICAL_BASE_ADDRESS_FLAG 0xFFFFF000
#define IA32_SMRR_PHYSBASE_SMRR_PHYSICAL_BASE_ADDRESS_MASK 0xFFFFF
#define IA32_SMRR_PHYSBASE_SMRR_PHYSICAL_BASE_ADDRESS(_) (((_) >> 12) & 0xFFFFF)
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} IA32_SMRR_PHYSBASE_REGISTER;
/**
* @brief SMRR Range Mask (Writeable only in SMM)
*
* Range Mask of SMM memory range.
*
* @remarks If IA32_MTRRCAP[SMRR] = 1
*/
#define IA32_SMRR_PHYSMASK 0x000001F3
typedef union
{
struct
{
UINT64 Reserved1 : 11;
/**
* [Bit 11] Enable range mask.
*/
UINT64 EnableRangeMask : 1;
#define IA32_SMRR_PHYSMASK_ENABLE_RANGE_MASK_BIT 11
#define IA32_SMRR_PHYSMASK_ENABLE_RANGE_MASK_FLAG 0x800
#define IA32_SMRR_PHYSMASK_ENABLE_RANGE_MASK_MASK 0x01
#define IA32_SMRR_PHYSMASK_ENABLE_RANGE_MASK(_) (((_) >> 11) & 0x01)
/**
* [Bits 31:12] SMRR address range mask.
*/
UINT64 SmrrAddressRangeMask : 20;
#define IA32_SMRR_PHYSMASK_SMRR_ADDRESS_RANGE_MASK_BIT 12
#define IA32_SMRR_PHYSMASK_SMRR_ADDRESS_RANGE_MASK_FLAG 0xFFFFF000
#define IA32_SMRR_PHYSMASK_SMRR_ADDRESS_RANGE_MASK_MASK 0xFFFFF
#define IA32_SMRR_PHYSMASK_SMRR_ADDRESS_RANGE_MASK(_) (((_) >> 12) & 0xFFFFF)
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} IA32_SMRR_PHYSMASK_REGISTER;
/**
* DCA Capability.
*
* @remarks If CPUID.01H: ECX[18] = 1
*/
#define IA32_PLATFORM_DCA_CAP 0x000001F8
/**
* If set, CPU supports Prefetch-Hint type.
*
* @remarks If CPUID.01H: ECX[18] = 1
*/
#define IA32_CPU_DCA_CAP 0x000001F9
/**
* DCA type 0 Status and Control register.
*
* @remarks If CPUID.01H: ECX[18] = 1
*/
#define IA32_DCA_0_CAP 0x000001FA
typedef union
{
struct
{
/**
* [Bit 0] Set by HW when DCA is fuseenabled and no defeatures are set.
*/
UINT64 DcaActive : 1;
#define IA32_DCA_0_CAP_DCA_ACTIVE_BIT 0
#define IA32_DCA_0_CAP_DCA_ACTIVE_FLAG 0x01
#define IA32_DCA_0_CAP_DCA_ACTIVE_MASK 0x01
#define IA32_DCA_0_CAP_DCA_ACTIVE(_) (((_) >> 0) & 0x01)
/**
* [Bits 2:1] TRANSACTION.
*/
UINT64 Transaction : 2;
#define IA32_DCA_0_CAP_TRANSACTION_BIT 1
#define IA32_DCA_0_CAP_TRANSACTION_FLAG 0x06
#define IA32_DCA_0_CAP_TRANSACTION_MASK 0x03
#define IA32_DCA_0_CAP_TRANSACTION(_) (((_) >> 1) & 0x03)
/**
* [Bits 6:3] DCA_TYPE.
*/
UINT64 DcaType : 4;
#define IA32_DCA_0_CAP_DCA_TYPE_BIT 3
#define IA32_DCA_0_CAP_DCA_TYPE_FLAG 0x78
#define IA32_DCA_0_CAP_DCA_TYPE_MASK 0x0F
#define IA32_DCA_0_CAP_DCA_TYPE(_) (((_) >> 3) & 0x0F)
/**
* [Bits 10:7] DCA_QUEUE_SIZE.
*/
UINT64 DcaQueueSize : 4;
#define IA32_DCA_0_CAP_DCA_QUEUE_SIZE_BIT 7
#define IA32_DCA_0_CAP_DCA_QUEUE_SIZE_FLAG 0x780
#define IA32_DCA_0_CAP_DCA_QUEUE_SIZE_MASK 0x0F
#define IA32_DCA_0_CAP_DCA_QUEUE_SIZE(_) (((_) >> 7) & 0x0F)
UINT64 Reserved1 : 2;
/**
* [Bits 16:13] Writes will update the register but have no HW side-effect.
*/
UINT64 DcaDelay : 4;
#define IA32_DCA_0_CAP_DCA_DELAY_BIT 13
#define IA32_DCA_0_CAP_DCA_DELAY_FLAG 0x1E000
#define IA32_DCA_0_CAP_DCA_DELAY_MASK 0x0F
#define IA32_DCA_0_CAP_DCA_DELAY(_) (((_) >> 13) & 0x0F)
UINT64 Reserved2 : 7;
/**
* [Bit 24] SW can request DCA block by setting this bit.
*/
UINT64 SwBlock : 1;
#define IA32_DCA_0_CAP_SW_BLOCK_BIT 24
#define IA32_DCA_0_CAP_SW_BLOCK_FLAG 0x1000000
#define IA32_DCA_0_CAP_SW_BLOCK_MASK 0x01
#define IA32_DCA_0_CAP_SW_BLOCK(_) (((_) >> 24) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bit 26] Set when DCA is blocked by HW (e.g. CR0.CD = 1).
*/
UINT64 HwBlock : 1;
#define IA32_DCA_0_CAP_HW_BLOCK_BIT 26
#define IA32_DCA_0_CAP_HW_BLOCK_FLAG 0x4000000
#define IA32_DCA_0_CAP_HW_BLOCK_MASK 0x01
#define IA32_DCA_0_CAP_HW_BLOCK(_) (((_) >> 26) & 0x01)
UINT64 Reserved4 : 37;
};
UINT64 AsUInt;
} IA32_DCA_0_CAP_REGISTER;
/**
* @defgroup IA32_MTRR_PHYSBASE \
* IA32_MTRR_PHYSBASE(n)
*
* IA32_MTRR_PHYSBASE(0-9).
*
* @remarks If CPUID.01H: EDX.MTRR[12] = 1
* @see Vol3A[11.11.2.3(Variable Range MTRRs)]
* @{
*/
typedef union
{
struct
{
/**
* [Bits 7:0] Specifies the memory type for the range.
*/
UINT64 Type : 8;
#define IA32_MTRR_PHYSBASE_TYPE_BIT 0
#define IA32_MTRR_PHYSBASE_TYPE_FLAG 0xFF
#define IA32_MTRR_PHYSBASE_TYPE_MASK 0xFF
#define IA32_MTRR_PHYSBASE_TYPE(_) (((_) >> 0) & 0xFF)
UINT64 Reserved1 : 4;
/**
* [Bits 47:12] Specifies the base address of the address range. This 24-bit value,
* in the case where MAXPHYADDR is 36 bits, is extended by 12 bits at the low end to
* form the base address (this automatically aligns the address on a 4-KByte
* boundary).
*/
UINT64 PageFrameNumber : 36;
#define IA32_MTRR_PHYSBASE_PAGE_FRAME_NUMBER_BIT 12
#define IA32_MTRR_PHYSBASE_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define IA32_MTRR_PHYSBASE_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define IA32_MTRR_PHYSBASE_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 16;
};
UINT64 AsUInt;
} IA32_MTRR_PHYSBASE_REGISTER;
#define IA32_MTRR_PHYSBASE0 0x00000200
#define IA32_MTRR_PHYSBASE1 0x00000202
#define IA32_MTRR_PHYSBASE2 0x00000204
#define IA32_MTRR_PHYSBASE3 0x00000206
#define IA32_MTRR_PHYSBASE4 0x00000208
#define IA32_MTRR_PHYSBASE5 0x0000020A
#define IA32_MTRR_PHYSBASE6 0x0000020C
#define IA32_MTRR_PHYSBASE7 0x0000020E
#define IA32_MTRR_PHYSBASE8 0x00000210
#define IA32_MTRR_PHYSBASE9 0x00000212
/**
* @}
*/
/**
* @defgroup IA32_MTRR_PHYSMASK \
* IA32_MTRR_PHYSMASK(n)
*
* IA32_MTRR_PHYSMASK(0-9).
*
* @remarks If CPUID.01H: EDX.MTRR[12] = 1
* @see Vol3A[11.11.2.3(Variable Range MTRRs)]
* @{
*/
typedef union
{
struct
{
UINT64 Reserved1 : 11;
/**
* [Bit 11] Enables the register pair when set; disables register pair when clear.
*/
UINT64 Valid : 1;
#define IA32_MTRR_PHYSMASK_VALID_BIT 11
#define IA32_MTRR_PHYSMASK_VALID_FLAG 0x800
#define IA32_MTRR_PHYSMASK_VALID_MASK 0x01
#define IA32_MTRR_PHYSMASK_VALID(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Specifies a mask (24 bits if the maximum physical address size is 36
* bits, 28 bits if the maximum physical address size is 40 bits). The mask
* determines the range of the region being mapped, according to the following
* relationships:
* - Address_Within_Range AND PhysMask = PhysBase AND PhysMask
* - This value is extended by 12 bits at the low end to form the mask value.
* - The width of the PhysMask field depends on the maximum physical address size
* supported by the processor. CPUID.80000008H reports the maximum physical address
* size supported by the processor. If CPUID.80000008H is not available, software
* may assume that the processor supports a 36-bit physical address size.
*
* @see Vol3A[11.11.3(Example Base and Mask Calculations)]
*/
UINT64 PageFrameNumber : 36;
#define IA32_MTRR_PHYSMASK_PAGE_FRAME_NUMBER_BIT 12
#define IA32_MTRR_PHYSMASK_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define IA32_MTRR_PHYSMASK_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define IA32_MTRR_PHYSMASK_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 16;
};
UINT64 AsUInt;
} IA32_MTRR_PHYSMASK_REGISTER;
#define IA32_MTRR_PHYSMASK0 0x00000201
#define IA32_MTRR_PHYSMASK1 0x00000203
#define IA32_MTRR_PHYSMASK2 0x00000205
#define IA32_MTRR_PHYSMASK3 0x00000207
#define IA32_MTRR_PHYSMASK4 0x00000209
#define IA32_MTRR_PHYSMASK5 0x0000020B
#define IA32_MTRR_PHYSMASK6 0x0000020D
#define IA32_MTRR_PHYSMASK7 0x0000020F
#define IA32_MTRR_PHYSMASK8 0x00000211
#define IA32_MTRR_PHYSMASK9 0x00000213
/**
* @}
*/
/**
* @defgroup IA32_MTRR_FIX \
* IA32_MTRR_FIX(x)
*
* IA32_MTRR_FIX(x).
*
* @remarks If CPUID.01H: EDX.MTRR[12] = 1
* @see Vol3A[11.11.2.2(Fixed Range MTRRs)]
* @{
*/
/**
* @defgroup IA32_MTRR_FIX64K \
* IA32_MTRR_FIX64K(x)
*
* IA32_MTRR_FIX64K(x).
* @{
*/
#define IA32_MTRR_FIX64K_BASE 0x00000000
#define IA32_MTRR_FIX64K_SIZE 0x00010000
#define IA32_MTRR_FIX64K_00000 0x00000250
/**
* @}
*/
/**
* @defgroup IA32_MTRR_FIX16K \
* IA32_MTRR_FIX16K(x)
*
* IA32_MTRR_FIX16K(x).
* @{
*/
#define IA32_MTRR_FIX16K_BASE 0x00080000
#define IA32_MTRR_FIX16K_SIZE 0x00004000
#define IA32_MTRR_FIX16K_80000 0x00000258
#define IA32_MTRR_FIX16K_A0000 0x00000259
/**
* @}
*/
/**
* @defgroup IA32_MTRR_FIX4K \
* IA32_MTRR_FIX4K(x)
*
* IA32_MTRR_FIX4K(x).
* @{
*/
#define IA32_MTRR_FIX4K_BASE 0x000C0000
#define IA32_MTRR_FIX4K_SIZE 0x00001000
#define IA32_MTRR_FIX4K_C0000 0x00000268
#define IA32_MTRR_FIX4K_C8000 0x00000269
#define IA32_MTRR_FIX4K_D0000 0x0000026A
#define IA32_MTRR_FIX4K_D8000 0x0000026B
#define IA32_MTRR_FIX4K_E0000 0x0000026C
#define IA32_MTRR_FIX4K_E8000 0x0000026D
#define IA32_MTRR_FIX4K_F0000 0x0000026E
#define IA32_MTRR_FIX4K_F8000 0x0000026F
/**
* @}
*/
/**
* Architecture defined number of fixed range MTRRs (1 for 64k, 2 for 16k, 8 for 4k).
*/
#define IA32_MTRR_FIX_COUNT ((1 + 2 + 8) * 8)
/**
* Architecture defined number of variable range MTRRs. See: Table 2-2. IA-32 Architectural MSRs
*
* @see Vol3A[2.1(ARCHITECTURAL MSRS)]
*/
#define IA32_MTRR_VARIABLE_COUNT 0x0000000A
/**
* A size of array to store all possible MTRRs.
*/
#define IA32_MTRR_COUNT (IA32_MTRR_FIX_COUNT + IA32_MTRR_VARIABLE_COUNT)
/**
* @}
*/
/**
* IA32_PAT.
*
* @remarks If CPUID.01H: EDX.MTRR[16] = 1
*/
#define IA32_PAT 0x00000277
typedef union
{
struct
{
/**
* [Bits 2:0] PA0.
*/
UINT64 Pa0 : 3;
#define IA32_PAT_PA0_BIT 0
#define IA32_PAT_PA0_FLAG 0x07
#define IA32_PAT_PA0_MASK 0x07
#define IA32_PAT_PA0(_) (((_) >> 0) & 0x07)
UINT64 Reserved1 : 5;
/**
* [Bits 10:8] PA1.
*/
UINT64 Pa1 : 3;
#define IA32_PAT_PA1_BIT 8
#define IA32_PAT_PA1_FLAG 0x700
#define IA32_PAT_PA1_MASK 0x07
#define IA32_PAT_PA1(_) (((_) >> 8) & 0x07)
UINT64 Reserved2 : 5;
/**
* [Bits 18:16] PA2.
*/
UINT64 Pa2 : 3;
#define IA32_PAT_PA2_BIT 16
#define IA32_PAT_PA2_FLAG 0x70000
#define IA32_PAT_PA2_MASK 0x07
#define IA32_PAT_PA2(_) (((_) >> 16) & 0x07)
UINT64 Reserved3 : 5;
/**
* [Bits 26:24] PA3.
*/
UINT64 Pa3 : 3;
#define IA32_PAT_PA3_BIT 24
#define IA32_PAT_PA3_FLAG 0x7000000
#define IA32_PAT_PA3_MASK 0x07
#define IA32_PAT_PA3(_) (((_) >> 24) & 0x07)
UINT64 Reserved4 : 5;
/**
* [Bits 34:32] PA4.
*/
UINT64 Pa4 : 3;
#define IA32_PAT_PA4_BIT 32
#define IA32_PAT_PA4_FLAG 0x700000000
#define IA32_PAT_PA4_MASK 0x07
#define IA32_PAT_PA4(_) (((_) >> 32) & 0x07)
UINT64 Reserved5 : 5;
/**
* [Bits 42:40] PA5.
*/
UINT64 Pa5 : 3;
#define IA32_PAT_PA5_BIT 40
#define IA32_PAT_PA5_FLAG 0x70000000000
#define IA32_PAT_PA5_MASK 0x07
#define IA32_PAT_PA5(_) (((_) >> 40) & 0x07)
UINT64 Reserved6 : 5;
/**
* [Bits 50:48] PA6.
*/
UINT64 Pa6 : 3;
#define IA32_PAT_PA6_BIT 48
#define IA32_PAT_PA6_FLAG 0x7000000000000
#define IA32_PAT_PA6_MASK 0x07
#define IA32_PAT_PA6(_) (((_) >> 48) & 0x07)
UINT64 Reserved7 : 5;
/**
* [Bits 58:56] PA7.
*/
UINT64 Pa7 : 3;
#define IA32_PAT_PA7_BIT 56
#define IA32_PAT_PA7_FLAG 0x700000000000000
#define IA32_PAT_PA7_MASK 0x07
#define IA32_PAT_PA7(_) (((_) >> 56) & 0x07)
UINT64 Reserved8 : 5;
};
UINT64 AsUInt;
} IA32_PAT_REGISTER;
/**
* @defgroup IA32_MC_CTL2 \
* IA32_MC(i)_CTL2
*
* MSR to enable/disable CMCI capability for bank n.
*
* @remarks If IA32_MCG_CAP[10] = 1 && IA32_MCG_CAP[7:0] > n
* @see Vol3B[15.3.2.5(IA32_MCi_CTL2 MSRs)]
* @{
*/
#define IA32_MC0_CTL2 0x00000280
#define IA32_MC1_CTL2 0x00000281
#define IA32_MC2_CTL2 0x00000282
#define IA32_MC3_CTL2 0x00000283
#define IA32_MC4_CTL2 0x00000284
#define IA32_MC5_CTL2 0x00000285
#define IA32_MC6_CTL2 0x00000286
#define IA32_MC7_CTL2 0x00000287
#define IA32_MC8_CTL2 0x00000288
#define IA32_MC9_CTL2 0x00000289
#define IA32_MC10_CTL2 0x0000028A
#define IA32_MC11_CTL2 0x0000028B
#define IA32_MC12_CTL2 0x0000028C
#define IA32_MC13_CTL2 0x0000028D
#define IA32_MC14_CTL2 0x0000028E
#define IA32_MC15_CTL2 0x0000028F
#define IA32_MC16_CTL2 0x00000290
#define IA32_MC17_CTL2 0x00000291
#define IA32_MC18_CTL2 0x00000292
#define IA32_MC19_CTL2 0x00000293
#define IA32_MC20_CTL2 0x00000294
#define IA32_MC21_CTL2 0x00000295
#define IA32_MC22_CTL2 0x00000296
#define IA32_MC23_CTL2 0x00000297
#define IA32_MC24_CTL2 0x00000298
#define IA32_MC25_CTL2 0x00000299
#define IA32_MC26_CTL2 0x0000029A
#define IA32_MC27_CTL2 0x0000029B
#define IA32_MC28_CTL2 0x0000029C
#define IA32_MC29_CTL2 0x0000029D
#define IA32_MC30_CTL2 0x0000029E
#define IA32_MC31_CTL2 0x0000029F
typedef union
{
struct
{
/**
* [Bits 14:0] Corrected error count threshold.
*/
UINT64 CorrectedErrorCountThreshold : 15;
#define IA32_MC_CTL2_CORRECTED_ERROR_COUNT_THRESHOLD_BIT 0
#define IA32_MC_CTL2_CORRECTED_ERROR_COUNT_THRESHOLD_FLAG 0x7FFF
#define IA32_MC_CTL2_CORRECTED_ERROR_COUNT_THRESHOLD_MASK 0x7FFF
#define IA32_MC_CTL2_CORRECTED_ERROR_COUNT_THRESHOLD(_) (((_) >> 0) & 0x7FFF)
UINT64 Reserved1 : 15;
/**
* [Bit 30] CMCI_EN.
*/
UINT64 CmciEn : 1;
#define IA32_MC_CTL2_CMCI_EN_BIT 30
#define IA32_MC_CTL2_CMCI_EN_FLAG 0x40000000
#define IA32_MC_CTL2_CMCI_EN_MASK 0x01
#define IA32_MC_CTL2_CMCI_EN(_) (((_) >> 30) & 0x01)
UINT64 Reserved2 : 33;
};
UINT64 AsUInt;
} IA32_MC_CTL2_REGISTER;
/**
* @}
*/
/**
* IA32_MTRR_DEF_TYPE.
*
* @remarks If CPUID.01H: EDX.MTRR[12] = 1
*/
#define IA32_MTRR_DEF_TYPE 0x000002FF
typedef union
{
struct
{
/**
* [Bits 2:0] Default Memory Type.
*/
UINT64 DefaultMemoryType : 3;
#define IA32_MTRR_DEF_TYPE_DEFAULT_MEMORY_TYPE_BIT 0
#define IA32_MTRR_DEF_TYPE_DEFAULT_MEMORY_TYPE_FLAG 0x07
#define IA32_MTRR_DEF_TYPE_DEFAULT_MEMORY_TYPE_MASK 0x07
#define IA32_MTRR_DEF_TYPE_DEFAULT_MEMORY_TYPE(_) (((_) >> 0) & 0x07)
UINT64 Reserved1 : 7;
/**
* [Bit 10] Fixed Range MTRR Enable.
*/
UINT64 FixedRangeMtrrEnable : 1;
#define IA32_MTRR_DEF_TYPE_FIXED_RANGE_MTRR_ENABLE_BIT 10
#define IA32_MTRR_DEF_TYPE_FIXED_RANGE_MTRR_ENABLE_FLAG 0x400
#define IA32_MTRR_DEF_TYPE_FIXED_RANGE_MTRR_ENABLE_MASK 0x01
#define IA32_MTRR_DEF_TYPE_FIXED_RANGE_MTRR_ENABLE(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] MTRR Enable.
*/
UINT64 MtrrEnable : 1;
#define IA32_MTRR_DEF_TYPE_MTRR_ENABLE_BIT 11
#define IA32_MTRR_DEF_TYPE_MTRR_ENABLE_FLAG 0x800
#define IA32_MTRR_DEF_TYPE_MTRR_ENABLE_MASK 0x01
#define IA32_MTRR_DEF_TYPE_MTRR_ENABLE(_) (((_) >> 11) & 0x01)
UINT64 Reserved2 : 52;
};
UINT64 AsUInt;
} IA32_MTRR_DEF_TYPE_REGISTER;
/**
* @defgroup IA32_FIXED_CTR \
* IA32_FIXED_CTR(n)
*
* Fixed-Function Performance Counter n.
*
* @remarks If CPUID.0AH: EDX[4:0] > n
* @{
*/
/**
* Counts Instr_Retired.Any.
*/
#define IA32_FIXED_CTR0 0x00000309
/**
* Counts CPU_CLK_Unhalted.Core
*/
#define IA32_FIXED_CTR1 0x0000030A
/**
* Counts CPU_CLK_Unhalted.Ref
*/
#define IA32_FIXED_CTR2 0x0000030B
/**
* @}
*/
/**
* Read Only MSR that enumerates the existence of performance monitoring features.
*
* @remarks If CPUID.01H: ECX[15] = 1
*/
#define IA32_PERF_CAPABILITIES 0x00000345
typedef union
{
struct
{
/**
* [Bits 5:0] LBR format.
*/
UINT64 LbrFormat : 6;
#define IA32_PERF_CAPABILITIES_LBR_FORMAT_BIT 0
#define IA32_PERF_CAPABILITIES_LBR_FORMAT_FLAG 0x3F
#define IA32_PERF_CAPABILITIES_LBR_FORMAT_MASK 0x3F
#define IA32_PERF_CAPABILITIES_LBR_FORMAT(_) (((_) >> 0) & 0x3F)
/**
* [Bit 6] PEBS Trap.
*/
UINT64 PebsTrap : 1;
#define IA32_PERF_CAPABILITIES_PEBS_TRAP_BIT 6
#define IA32_PERF_CAPABILITIES_PEBS_TRAP_FLAG 0x40
#define IA32_PERF_CAPABILITIES_PEBS_TRAP_MASK 0x01
#define IA32_PERF_CAPABILITIES_PEBS_TRAP(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] PEBSSaveArchRegs.
*/
UINT64 PebsSaveArchRegs : 1;
#define IA32_PERF_CAPABILITIES_PEBS_SAVE_ARCH_REGS_BIT 7
#define IA32_PERF_CAPABILITIES_PEBS_SAVE_ARCH_REGS_FLAG 0x80
#define IA32_PERF_CAPABILITIES_PEBS_SAVE_ARCH_REGS_MASK 0x01
#define IA32_PERF_CAPABILITIES_PEBS_SAVE_ARCH_REGS(_) (((_) >> 7) & 0x01)
/**
* [Bits 11:8] PEBS Record Format.
*/
UINT64 PebsRecordFormat : 4;
#define IA32_PERF_CAPABILITIES_PEBS_RECORD_FORMAT_BIT 8
#define IA32_PERF_CAPABILITIES_PEBS_RECORD_FORMAT_FLAG 0xF00
#define IA32_PERF_CAPABILITIES_PEBS_RECORD_FORMAT_MASK 0x0F
#define IA32_PERF_CAPABILITIES_PEBS_RECORD_FORMAT(_) (((_) >> 8) & 0x0F)
/**
* [Bit 12] Freeze while SMM is supported.
*/
UINT64 FreezeWhileSmmIsSupported : 1;
#define IA32_PERF_CAPABILITIES_FREEZE_WHILE_SMM_IS_SUPPORTED_BIT 12
#define IA32_PERF_CAPABILITIES_FREEZE_WHILE_SMM_IS_SUPPORTED_FLAG 0x1000
#define IA32_PERF_CAPABILITIES_FREEZE_WHILE_SMM_IS_SUPPORTED_MASK 0x01
#define IA32_PERF_CAPABILITIES_FREEZE_WHILE_SMM_IS_SUPPORTED(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] Full width of counter writable via IA32_A_PMCx.
*/
UINT64 FullWidthCounterWrite : 1;
#define IA32_PERF_CAPABILITIES_FULL_WIDTH_COUNTER_WRITE_BIT 13
#define IA32_PERF_CAPABILITIES_FULL_WIDTH_COUNTER_WRITE_FLAG 0x2000
#define IA32_PERF_CAPABILITIES_FULL_WIDTH_COUNTER_WRITE_MASK 0x01
#define IA32_PERF_CAPABILITIES_FULL_WIDTH_COUNTER_WRITE(_) (((_) >> 13) & 0x01)
UINT64 Reserved1 : 50;
};
UINT64 AsUInt;
} IA32_PERF_CAPABILITIES_REGISTER;
/**
* @brief Fixed-Function Performance Counter Control (R/W)
*
* Fixed-Function Performance Counter Control. Counter increments while the results of ANDing
* respective enable bit in IA32_PERF_GLOBAL_CTRL with the corresponding OS or USR bits in this MSR
* is true.
*
* @remarks If CPUID.0AH: EAX[7:0] > 1
*/
#define IA32_FIXED_CTR_CTRL 0x0000038D
typedef union
{
struct
{
/**
* [Bit 0] EN0_OS: Enable Fixed Counter 0 to count while CPL = 0.
*/
UINT64 En0Os : 1;
#define IA32_FIXED_CTR_CTRL_EN0_OS_BIT 0
#define IA32_FIXED_CTR_CTRL_EN0_OS_FLAG 0x01
#define IA32_FIXED_CTR_CTRL_EN0_OS_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN0_OS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] EN0_Usr: Enable Fixed Counter 0 to count while CPL > 0.
*/
UINT64 En0Usr : 1;
#define IA32_FIXED_CTR_CTRL_EN0_USR_BIT 1
#define IA32_FIXED_CTR_CTRL_EN0_USR_FLAG 0x02
#define IA32_FIXED_CTR_CTRL_EN0_USR_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN0_USR(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] AnyThread: When set to 1, it enables counting the associated event
* conditions occurring across all logical processors sharing a processor core. When
* set to 0, the counter only increments the associated event conditions occurring
* in the logical processor which programmed the MSR.
*/
UINT64 AnyThread0 : 1;
#define IA32_FIXED_CTR_CTRL_ANY_THREAD0_BIT 2
#define IA32_FIXED_CTR_CTRL_ANY_THREAD0_FLAG 0x04
#define IA32_FIXED_CTR_CTRL_ANY_THREAD0_MASK 0x01
#define IA32_FIXED_CTR_CTRL_ANY_THREAD0(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] EN0_PMI: Enable PMI when fixed counter 0 overflows.
*/
UINT64 En0Pmi : 1;
#define IA32_FIXED_CTR_CTRL_EN0_PMI_BIT 3
#define IA32_FIXED_CTR_CTRL_EN0_PMI_FLAG 0x08
#define IA32_FIXED_CTR_CTRL_EN0_PMI_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN0_PMI(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] EN1_OS: Enable Fixed Counter 1 to count while CPL = 0.
*/
UINT64 En1Os : 1;
#define IA32_FIXED_CTR_CTRL_EN1_OS_BIT 4
#define IA32_FIXED_CTR_CTRL_EN1_OS_FLAG 0x10
#define IA32_FIXED_CTR_CTRL_EN1_OS_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN1_OS(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] EN1_Usr: Enable Fixed Counter 1 to count while CPL > 0.
*/
UINT64 En1Usr : 1;
#define IA32_FIXED_CTR_CTRL_EN1_USR_BIT 5
#define IA32_FIXED_CTR_CTRL_EN1_USR_FLAG 0x20
#define IA32_FIXED_CTR_CTRL_EN1_USR_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN1_USR(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] AnyThread: When set to 1, it enables counting the associated event
* conditions occurring across all logical processors sharing a processor core. When
* set to 0, the counter only increments the associated event conditions occurring
* in the logical processor which programmed the MSR.
*
* @remarks If CPUID.0AH: EAX[7:0] > 2
*/
UINT64 AnyThread1 : 1;
#define IA32_FIXED_CTR_CTRL_ANY_THREAD1_BIT 6
#define IA32_FIXED_CTR_CTRL_ANY_THREAD1_FLAG 0x40
#define IA32_FIXED_CTR_CTRL_ANY_THREAD1_MASK 0x01
#define IA32_FIXED_CTR_CTRL_ANY_THREAD1(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] EN1_PMI: Enable PMI when fixed counter 1 overflows.
*/
UINT64 En1Pmi : 1;
#define IA32_FIXED_CTR_CTRL_EN1_PMI_BIT 7
#define IA32_FIXED_CTR_CTRL_EN1_PMI_FLAG 0x80
#define IA32_FIXED_CTR_CTRL_EN1_PMI_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN1_PMI(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] EN2_OS: Enable Fixed Counter 2 to count while CPL = 0.
*/
UINT64 En2Os : 1;
#define IA32_FIXED_CTR_CTRL_EN2_OS_BIT 8
#define IA32_FIXED_CTR_CTRL_EN2_OS_FLAG 0x100
#define IA32_FIXED_CTR_CTRL_EN2_OS_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN2_OS(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] EN2_Usr: Enable Fixed Counter 2 to count while CPL > 0.
*/
UINT64 En2Usr : 1;
#define IA32_FIXED_CTR_CTRL_EN2_USR_BIT 9
#define IA32_FIXED_CTR_CTRL_EN2_USR_FLAG 0x200
#define IA32_FIXED_CTR_CTRL_EN2_USR_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN2_USR(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] AnyThread: When set to 1, it enables counting the associated event
* conditions occurring across all logical processors sharing a processor core. When
* set to 0, the counter only increments the associated event conditions occurring
* in the logical processor which programmed the MSR.
*
* @remarks If CPUID.0AH: EAX[7:0] > 2
*/
UINT64 AnyThread2 : 1;
#define IA32_FIXED_CTR_CTRL_ANY_THREAD2_BIT 10
#define IA32_FIXED_CTR_CTRL_ANY_THREAD2_FLAG 0x400
#define IA32_FIXED_CTR_CTRL_ANY_THREAD2_MASK 0x01
#define IA32_FIXED_CTR_CTRL_ANY_THREAD2(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] EN2_PMI: Enable PMI when fixed counter 2 overflows.
*/
UINT64 En2Pmi : 1;
#define IA32_FIXED_CTR_CTRL_EN2_PMI_BIT 11
#define IA32_FIXED_CTR_CTRL_EN2_PMI_FLAG 0x800
#define IA32_FIXED_CTR_CTRL_EN2_PMI_MASK 0x01
#define IA32_FIXED_CTR_CTRL_EN2_PMI(_) (((_) >> 11) & 0x01)
UINT64 Reserved1 : 52;
};
UINT64 AsUInt;
} IA32_FIXED_CTR_CTRL_REGISTER;
/**
* Global Performance Counter Status.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
#define IA32_PERF_GLOBAL_STATUS 0x0000038E
typedef union
{
struct
{
/**
* [Bit 0] Ovf_PMC0: Overflow status of IA32_PMC0.
*
* @remarks If CPUID.0AH: EAX[15:8] > 0
*/
UINT64 OvfPmc0 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC0_BIT 0
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC0_FLAG 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC0_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC0(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Ovf_PMC1: Overflow status of IA32_PMC1.
*
* @remarks If CPUID.0AH: EAX[15:8] > 1
*/
UINT64 OvfPmc1 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC1_BIT 1
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC1_FLAG 0x02
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC1_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC1(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Ovf_PMC2: Overflow status of IA32_PMC2.
*
* @remarks If CPUID.0AH: EAX[15:8] > 2
*/
UINT64 OvfPmc2 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC2_BIT 2
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC2_FLAG 0x04
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC2_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC2(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Ovf_PMC3: Overflow status of IA32_PMC3.
*
* @remarks If CPUID.0AH: EAX[15:8] > 3
*/
UINT64 OvfPmc3 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC3_BIT 3
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC3_FLAG 0x08
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC3_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_PMC3(_) (((_) >> 3) & 0x01)
UINT64 Reserved1 : 28;
/**
* [Bit 32] Ovf_FixedCtr0: Overflow status of IA32_FIXED_CTR0.
*
* @remarks If CPUID.0AH: EAX[7:0] > 1
*/
UINT64 OvfFixedctr0 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR0_BIT 32
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR0_FLAG 0x100000000
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR0_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR0(_) (((_) >> 32) & 0x01)
/**
* [Bit 33] Ovf_FixedCtr1: Overflow status of IA32_FIXED_CTR1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 1
*/
UINT64 OvfFixedctr1 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR1_BIT 33
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR1_FLAG 0x200000000
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR1_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR1(_) (((_) >> 33) & 0x01)
/**
* [Bit 34] Ovf_FixedCtr2: Overflow status of IA32_FIXED_CTR2.
*
* @remarks If CPUID.0AH: EAX[7:0] > 1
*/
UINT64 OvfFixedctr2 : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR2_BIT 34
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR2_FLAG 0x400000000
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR2_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_FIXEDCTR2(_) (((_) >> 34) & 0x01)
UINT64 Reserved2 : 20;
/**
* [Bit 55] Trace_ToPA_PMI: A PMI occurred due to a ToPA entry memory buffer that
* was completely filled.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[25] = 1) && IA32_RTIT_CTL.ToPA = 1
*/
UINT64 TraceTopaPmi : 1;
#define IA32_PERF_GLOBAL_STATUS_TRACE_TOPA_PMI_BIT 55
#define IA32_PERF_GLOBAL_STATUS_TRACE_TOPA_PMI_FLAG 0x80000000000000
#define IA32_PERF_GLOBAL_STATUS_TRACE_TOPA_PMI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_TRACE_TOPA_PMI(_) (((_) >> 55) & 0x01)
UINT64 Reserved3 : 2;
/**
* [Bit 58] LBR_Frz. LBRs are frozen due to:
* * IA32_DEBUGCTL.FREEZE_LBR_ON_PMI=1.
* * The LBR stack overflowed.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 LbrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_LBR_FRZ_BIT 58
#define IA32_PERF_GLOBAL_STATUS_LBR_FRZ_FLAG 0x400000000000000
#define IA32_PERF_GLOBAL_STATUS_LBR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_LBR_FRZ(_) (((_) >> 58) & 0x01)
/**
* [Bit 59] CTR_Frz. Performance counters in the core PMU are frozen due to:
* * IA32_DEBUGCTL.FREEZE_PERFMON_ON_PMI=1.
* * One or more core PMU counters overflowed.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 CtrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_CTR_FRZ_BIT 59
#define IA32_PERF_GLOBAL_STATUS_CTR_FRZ_FLAG 0x800000000000000
#define IA32_PERF_GLOBAL_STATUS_CTR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_CTR_FRZ(_) (((_) >> 59) & 0x01)
/**
* [Bit 60] ASCI: Data in the performance counters in the core PMU may include
* contributions from the direct or indirect operation Intel SGX to protect an
* enclave.
*
* @remarks If CPUID.(EAX=07H, ECX=0):EBX[2] = 1
*/
UINT64 Asci : 1;
#define IA32_PERF_GLOBAL_STATUS_ASCI_BIT 60
#define IA32_PERF_GLOBAL_STATUS_ASCI_FLAG 0x1000000000000000
#define IA32_PERF_GLOBAL_STATUS_ASCI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_ASCI(_) (((_) >> 60) & 0x01)
/**
* [Bit 61] Uncore counter overflow status.
*
* @remarks If CPUID.0AH: EAX[7:0] > 2
*/
UINT64 OvfUncore : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_UNCORE_BIT 61
#define IA32_PERF_GLOBAL_STATUS_OVF_UNCORE_FLAG 0x2000000000000000
#define IA32_PERF_GLOBAL_STATUS_OVF_UNCORE_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_UNCORE(_) (((_) >> 61) & 0x01)
/**
* [Bit 62] OvfBuf: DS SAVE area Buffer overflow status.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
UINT64 OvfBuf : 1;
#define IA32_PERF_GLOBAL_STATUS_OVF_BUF_BIT 62
#define IA32_PERF_GLOBAL_STATUS_OVF_BUF_FLAG 0x4000000000000000
#define IA32_PERF_GLOBAL_STATUS_OVF_BUF_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_OVF_BUF(_) (((_) >> 62) & 0x01)
/**
* [Bit 63] CondChgd: Status bits of this register have changed.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
UINT64 CondChgd : 1;
#define IA32_PERF_GLOBAL_STATUS_COND_CHGD_BIT 63
#define IA32_PERF_GLOBAL_STATUS_COND_CHGD_FLAG 0x8000000000000000
#define IA32_PERF_GLOBAL_STATUS_COND_CHGD_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_COND_CHGD(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} IA32_PERF_GLOBAL_STATUS_REGISTER;
/**
* @brief Global Performance Counter Control (R/W)
*
* Global Performance Counter Control. Counter increments while the result of ANDing the respective
* enable bit in this MSR with the corresponding OS or USR bits in the general-purpose or fixed
* counter control MSR is true.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
#define IA32_PERF_GLOBAL_CTRL 0x0000038F
typedef union
{
struct
{
/**
* [Bits 31:0] EN_PMC(n). Enable bitmask. Only the first n-1 bits are valid. Bits
* 31:n are reserved.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
*/
UINT64 EnPmcn : 32;
#define IA32_PERF_GLOBAL_CTRL_EN_PMCN_BIT 0
#define IA32_PERF_GLOBAL_CTRL_EN_PMCN_FLAG 0xFFFFFFFF
#define IA32_PERF_GLOBAL_CTRL_EN_PMCN_MASK 0xFFFFFFFF
#define IA32_PERF_GLOBAL_CTRL_EN_PMCN(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* [Bits 63:32] EN_FIXED_CTR(n). Enable bitmask. Only the first n-1 bits are valid.
* Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EDX[4:0] > n
*/
UINT64 EnFixedCtrn : 32;
#define IA32_PERF_GLOBAL_CTRL_EN_FIXED_CTRN_BIT 32
#define IA32_PERF_GLOBAL_CTRL_EN_FIXED_CTRN_FLAG 0xFFFFFFFF00000000
#define IA32_PERF_GLOBAL_CTRL_EN_FIXED_CTRN_MASK 0xFFFFFFFF
#define IA32_PERF_GLOBAL_CTRL_EN_FIXED_CTRN(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_PERF_GLOBAL_CTRL_REGISTER;
/**
* Global Performance Counter Overflow Reset Control.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
#define IA32_PERF_GLOBAL_STATUS_RESET 0x00000390
typedef union
{
struct
{
/**
* [Bits 31:0] Set 1 to clear Ovf_PMC(n) bit. Clear bitmask. Only the first n-1 bits
* are valid. Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
*/
UINT64 ClearOvfPmcn : 32;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_PMCN_BIT 0
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_PMCN_FLAG 0xFFFFFFFF
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_PMCN_MASK 0xFFFFFFFF
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_PMCN(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* [Bits 34:32] Set 1 to clear Ovf_FIXED_CTR(n) bit. Clear bitmask. Only the first
* n-1 bits are valid. Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EDX[4:0] > n
*/
UINT64 ClearOvfFixedCtrn : 3;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_FIXED_CTRN_BIT 32
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_FIXED_CTRN_FLAG 0x700000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_FIXED_CTRN_MASK 0x07
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_FIXED_CTRN(_) (((_) >> 32) & 0x07)
UINT64 Reserved1 : 20;
/**
* [Bit 55] Set 1 to clear Trace_ToPA_PMI bit.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[25] = 1) && IA32_RTIT_CTL.ToPA = 1
*/
UINT64 ClearTraceTopaPmi : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_TRACE_TOPA_PMI_BIT 55
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_TRACE_TOPA_PMI_FLAG 0x80000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_TRACE_TOPA_PMI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_TRACE_TOPA_PMI(_) (((_) >> 55) & 0x01)
UINT64 Reserved2 : 2;
/**
* [Bit 58] Set 1 to clear LBR_Frz bit.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 ClearLbrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_LBR_FRZ_BIT 58
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_LBR_FRZ_FLAG 0x400000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_LBR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_LBR_FRZ(_) (((_) >> 58) & 0x01)
/**
* [Bit 59] Set 1 to clear CTR_Frz bit.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 ClearCtrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_CTR_FRZ_BIT 59
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_CTR_FRZ_FLAG 0x800000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_CTR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_CTR_FRZ(_) (((_) >> 59) & 0x01)
/**
* [Bit 60] Set 1 to clear ASCI bit.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 ClearAsci : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_ASCI_BIT 60
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_ASCI_FLAG 0x1000000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_ASCI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_ASCI(_) (((_) >> 60) & 0x01)
/**
* [Bit 61] Set 1 to clear Ovf_Uncore bit.
*
* @remarks 06_2EH
*/
UINT64 ClearOvfUncore : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_UNCORE_BIT 61
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_UNCORE_FLAG 0x2000000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_UNCORE_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_UNCORE(_) (((_) >> 61) & 0x01)
/**
* [Bit 62] Set 1 to clear OvfBuf bit.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
UINT64 ClearOvfBuf : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_BUF_BIT 62
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_BUF_FLAG 0x4000000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_BUF_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_OVF_BUF(_) (((_) >> 62) & 0x01)
/**
* [Bit 63] Set 1 to clear CondChgd bit.
*
* @remarks If CPUID.0AH: EAX[7:0] > 0
*/
UINT64 ClearCondChgd : 1;
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_COND_CHGD_BIT 63
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_COND_CHGD_FLAG 0x8000000000000000
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_COND_CHGD_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_RESET_CLEAR_COND_CHGD(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} IA32_PERF_GLOBAL_STATUS_RESET_REGISTER;
/**
* Global Performance Counter Overflow Set Control.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
#define IA32_PERF_GLOBAL_STATUS_SET 0x00000391
typedef union
{
struct
{
/**
* [Bits 31:0] Set 1 to cause Ovf_PMC(n) = 1. Set bitmask. Only the first n-1 bits
* are valid. Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
*/
UINT64 OvfPmcn : 32;
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_PMCN_BIT 0
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_PMCN_FLAG 0xFFFFFFFF
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_PMCN_MASK 0xFFFFFFFF
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_PMCN(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* [Bits 34:32] Set 1 to cause Ovf_FIXED_CTR(n) = 1. Set bitmask. Only the first n-1
* bits are valid. Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EDX[4:0] > n
*/
UINT64 OvfFixedCtrn : 3;
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_FIXED_CTRN_BIT 32
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_FIXED_CTRN_FLAG 0x700000000
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_FIXED_CTRN_MASK 0x07
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_FIXED_CTRN(_) (((_) >> 32) & 0x07)
UINT64 Reserved1 : 20;
/**
* [Bit 55] Set 1 to cause Trace_ToPA_PMI = 1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 TraceTopaPmi : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_TRACE_TOPA_PMI_BIT 55
#define IA32_PERF_GLOBAL_STATUS_SET_TRACE_TOPA_PMI_FLAG 0x80000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_TRACE_TOPA_PMI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_TRACE_TOPA_PMI(_) (((_) >> 55) & 0x01)
UINT64 Reserved2 : 2;
/**
* [Bit 58] Set 1 to cause LBR_Frz = 1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 LbrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_LBR_FRZ_BIT 58
#define IA32_PERF_GLOBAL_STATUS_SET_LBR_FRZ_FLAG 0x400000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_LBR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_LBR_FRZ(_) (((_) >> 58) & 0x01)
/**
* [Bit 59] Set 1 to cause CTR_Frz = 1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 CtrFrz : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_CTR_FRZ_BIT 59
#define IA32_PERF_GLOBAL_STATUS_SET_CTR_FRZ_FLAG 0x800000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_CTR_FRZ_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_CTR_FRZ(_) (((_) >> 59) & 0x01)
/**
* [Bit 60] Set 1 to cause ASCI = 1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 Asci : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_ASCI_BIT 60
#define IA32_PERF_GLOBAL_STATUS_SET_ASCI_FLAG 0x1000000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_ASCI_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_ASCI(_) (((_) >> 60) & 0x01)
/**
* [Bit 61] Set 1 to cause Ovf_Uncore = 1.
*
* @remarks 06_2EH
*/
UINT64 OvfUncore : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_UNCORE_BIT 61
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_UNCORE_FLAG 0x2000000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_UNCORE_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_UNCORE(_) (((_) >> 61) & 0x01)
/**
* [Bit 62] Set 1 to cause OvfBuf = 1.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
UINT64 OvfBuf : 1;
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_BUF_BIT 62
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_BUF_FLAG 0x4000000000000000
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_BUF_MASK 0x01
#define IA32_PERF_GLOBAL_STATUS_SET_OVF_BUF(_) (((_) >> 62) & 0x01)
UINT64 Reserved3 : 1;
};
UINT64 AsUInt;
} IA32_PERF_GLOBAL_STATUS_SET_REGISTER;
/**
* Indicator that core perfmon interface is in use.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
#define IA32_PERF_GLOBAL_INUSE 0x00000392
typedef union
{
struct
{
/**
* [Bits 31:0] IA32_PERFEVTSEL(n) in use. Status bitmask. Only the first n-1 bits
* are valid. Bits 31:n are reserved.
*
* @remarks If CPUID.0AH: EAX[15:8] > n
*/
UINT64 Ia32PerfevtselnInUse : 32;
#define IA32_PERF_GLOBAL_INUSE_IA32_PERFEVTSELN_IN_USE_BIT 0
#define IA32_PERF_GLOBAL_INUSE_IA32_PERFEVTSELN_IN_USE_FLAG 0xFFFFFFFF
#define IA32_PERF_GLOBAL_INUSE_IA32_PERFEVTSELN_IN_USE_MASK 0xFFFFFFFF
#define IA32_PERF_GLOBAL_INUSE_IA32_PERFEVTSELN_IN_USE(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* [Bits 34:32] IA32_FIXED_CTR(n) in use. Status bitmask. Only the first n-1 bits
* are valid. Bits 31:n are reserved.
*/
UINT64 Ia32FixedCtrnInUse : 3;
#define IA32_PERF_GLOBAL_INUSE_IA32_FIXED_CTRN_IN_USE_BIT 32
#define IA32_PERF_GLOBAL_INUSE_IA32_FIXED_CTRN_IN_USE_FLAG 0x700000000
#define IA32_PERF_GLOBAL_INUSE_IA32_FIXED_CTRN_IN_USE_MASK 0x07
#define IA32_PERF_GLOBAL_INUSE_IA32_FIXED_CTRN_IN_USE(_) (((_) >> 32) & 0x07)
UINT64 Reserved1 : 28;
/**
* [Bit 63] PMI in use.
*/
UINT64 PmiInUse : 1;
#define IA32_PERF_GLOBAL_INUSE_PMI_IN_USE_BIT 63
#define IA32_PERF_GLOBAL_INUSE_PMI_IN_USE_FLAG 0x8000000000000000
#define IA32_PERF_GLOBAL_INUSE_PMI_IN_USE_MASK 0x01
#define IA32_PERF_GLOBAL_INUSE_PMI_IN_USE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} IA32_PERF_GLOBAL_INUSE_REGISTER;
/**
* PEBS Control.
*
* @remarks If CPUID.0AH: EAX[7:0] > 3
*/
#define IA32_PEBS_ENABLE 0x000003F1
typedef union
{
struct
{
/**
* [Bit 0] Enable PEBS on IA32_PMC0.
*
* @remarks 06_0FH
*/
UINT64 EnablePebs : 1;
#define IA32_PEBS_ENABLE_ENABLE_PEBS_BIT 0
#define IA32_PEBS_ENABLE_ENABLE_PEBS_FLAG 0x01
#define IA32_PEBS_ENABLE_ENABLE_PEBS_MASK 0x01
#define IA32_PEBS_ENABLE_ENABLE_PEBS(_) (((_) >> 0) & 0x01)
/**
* [Bits 3:1] Reserved or model specific.
*/
UINT64 Reservedormodelspecific1 : 3;
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC1_BIT 1
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC1_FLAG 0x0E
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC1_MASK 0x07
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC1(_) (((_) >> 1) & 0x07)
UINT64 Reserved1 : 28;
/**
* [Bits 35:32] Reserved or model specific.
*/
UINT64 Reservedormodelspecific2 : 4;
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC2_BIT 32
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC2_FLAG 0xF00000000
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC2_MASK 0x0F
#define IA32_PEBS_ENABLE_RESERVEDORMODELSPECIFIC2(_) (((_) >> 32) & 0x0F)
UINT64 Reserved2 : 28;
};
UINT64 AsUInt;
} IA32_PEBS_ENABLE_REGISTER;
/**
* @defgroup IA32_MC_CTL \
* IA32_MC(i)_CTL
*
* IA32_MC(0-28)_CTL.
*
* @remarks If IA32_MCG_CAP.CNT > n
* @{
*/
#define IA32_MC0_CTL 0x00000400
#define IA32_MC1_CTL 0x00000404
#define IA32_MC2_CTL 0x00000408
#define IA32_MC3_CTL 0x0000040C
#define IA32_MC4_CTL 0x00000410
#define IA32_MC5_CTL 0x00000414
#define IA32_MC6_CTL 0x00000418
#define IA32_MC7_CTL 0x0000041C
#define IA32_MC8_CTL 0x00000420
#define IA32_MC9_CTL 0x00000424
#define IA32_MC10_CTL 0x00000428
#define IA32_MC11_CTL 0x0000042C
#define IA32_MC12_CTL 0x00000430
#define IA32_MC13_CTL 0x00000434
#define IA32_MC14_CTL 0x00000438
#define IA32_MC15_CTL 0x0000043C
#define IA32_MC16_CTL 0x00000440
#define IA32_MC17_CTL 0x00000444
#define IA32_MC18_CTL 0x00000448
#define IA32_MC19_CTL 0x0000044C
#define IA32_MC20_CTL 0x00000450
#define IA32_MC21_CTL 0x00000454
#define IA32_MC22_CTL 0x00000458
#define IA32_MC23_CTL 0x0000045C
#define IA32_MC24_CTL 0x00000460
#define IA32_MC25_CTL 0x00000464
#define IA32_MC26_CTL 0x00000468
#define IA32_MC27_CTL 0x0000046C
#define IA32_MC28_CTL 0x00000470
/**
* @}
*/
/**
* @defgroup IA32_MC_STATUS \
* IA32_MC(i)_STATUS
*
* IA32_MC(0-28)_STATUS.
*
* @remarks If IA32_MCG_CAP.CNT > n
* @{
*/
#define IA32_MC0_STATUS 0x00000401
#define IA32_MC1_STATUS 0x00000405
#define IA32_MC2_STATUS 0x00000409
#define IA32_MC3_STATUS 0x0000040D
#define IA32_MC4_STATUS 0x00000411
#define IA32_MC5_STATUS 0x00000415
#define IA32_MC6_STATUS 0x00000419
#define IA32_MC7_STATUS 0x0000041D
#define IA32_MC8_STATUS 0x00000421
#define IA32_MC9_STATUS 0x00000425
#define IA32_MC10_STATUS 0x00000429
#define IA32_MC11_STATUS 0x0000042D
#define IA32_MC12_STATUS 0x00000431
#define IA32_MC13_STATUS 0x00000435
#define IA32_MC14_STATUS 0x00000439
#define IA32_MC15_STATUS 0x0000043D
#define IA32_MC16_STATUS 0x00000441
#define IA32_MC17_STATUS 0x00000445
#define IA32_MC18_STATUS 0x00000449
#define IA32_MC19_STATUS 0x0000044D
#define IA32_MC20_STATUS 0x00000451
#define IA32_MC21_STATUS 0x00000455
#define IA32_MC22_STATUS 0x00000459
#define IA32_MC23_STATUS 0x0000045D
#define IA32_MC24_STATUS 0x00000461
#define IA32_MC25_STATUS 0x00000465
#define IA32_MC26_STATUS 0x00000469
#define IA32_MC27_STATUS 0x0000046D
#define IA32_MC28_STATUS 0x00000471
/**
* @}
*/
/**
* @defgroup IA32_MC_ADDR \
* IA32_MC(i)_ADDR
*
* IA32_MC(0-28)_ADDR.
*
* @remarks If IA32_MCG_CAP.CNT > n
* @{
*/
#define IA32_MC0_ADDR 0x00000402
#define IA32_MC1_ADDR 0x00000406
#define IA32_MC2_ADDR 0x0000040A
#define IA32_MC3_ADDR 0x0000040E
#define IA32_MC4_ADDR 0x00000412
#define IA32_MC5_ADDR 0x00000416
#define IA32_MC6_ADDR 0x0000041A
#define IA32_MC7_ADDR 0x0000041E
#define IA32_MC8_ADDR 0x00000422
#define IA32_MC9_ADDR 0x00000426
#define IA32_MC10_ADDR 0x0000042A
#define IA32_MC11_ADDR 0x0000042E
#define IA32_MC12_ADDR 0x00000432
#define IA32_MC13_ADDR 0x00000436
#define IA32_MC14_ADDR 0x0000043A
#define IA32_MC15_ADDR 0x0000043E
#define IA32_MC16_ADDR 0x00000442
#define IA32_MC17_ADDR 0x00000446
#define IA32_MC18_ADDR 0x0000044A
#define IA32_MC19_ADDR 0x0000044E
#define IA32_MC20_ADDR 0x00000452
#define IA32_MC21_ADDR 0x00000456
#define IA32_MC22_ADDR 0x0000045A
#define IA32_MC23_ADDR 0x0000045E
#define IA32_MC24_ADDR 0x00000462
#define IA32_MC25_ADDR 0x00000466
#define IA32_MC26_ADDR 0x0000046A
#define IA32_MC27_ADDR 0x0000046E
#define IA32_MC28_ADDR 0x00000472
/**
* @}
*/
/**
* @defgroup IA32_MC_MISC \
* IA32_MC(i)_MISC
*
* IA32_MC(0-28)_MISC.
*
* @remarks If IA32_MCG_CAP.CNT > n
* @{
*/
#define IA32_MC0_MISC 0x00000403
#define IA32_MC1_MISC 0x00000407
#define IA32_MC2_MISC 0x0000040B
#define IA32_MC3_MISC 0x0000040F
#define IA32_MC4_MISC 0x00000413
#define IA32_MC5_MISC 0x00000417
#define IA32_MC6_MISC 0x0000041B
#define IA32_MC7_MISC 0x0000041F
#define IA32_MC8_MISC 0x00000423
#define IA32_MC9_MISC 0x00000427
#define IA32_MC10_MISC 0x0000042B
#define IA32_MC11_MISC 0x0000042F
#define IA32_MC12_MISC 0x00000433
#define IA32_MC13_MISC 0x00000437
#define IA32_MC14_MISC 0x0000043B
#define IA32_MC15_MISC 0x0000043F
#define IA32_MC16_MISC 0x00000443
#define IA32_MC17_MISC 0x00000447
#define IA32_MC18_MISC 0x0000044B
#define IA32_MC19_MISC 0x0000044F
#define IA32_MC20_MISC 0x00000453
#define IA32_MC21_MISC 0x00000457
#define IA32_MC22_MISC 0x0000045B
#define IA32_MC23_MISC 0x0000045F
#define IA32_MC24_MISC 0x00000463
#define IA32_MC25_MISC 0x00000467
#define IA32_MC26_MISC 0x0000046B
#define IA32_MC27_MISC 0x0000046F
#define IA32_MC28_MISC 0x00000473
/**
* @}
*/
/**
* Reporting Register of Basic VMX Capabilities.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.1(BASIC VMX INFORMATION)]
* @see Vol3D[A.1(Basic VMX Information)] (reference)
*/
#define IA32_VMX_BASIC 0x00000480
typedef union
{
struct
{
/**
* @brief VMCS revision identifier used by the processor
*
* [Bits 30:0] 31-bit VMCS revision identifier used by the processor. Processors
* that use the same VMCS revision identifier use the same size for VMCS regions.
*/
UINT64 VmcsRevisionId : 31;
#define IA32_VMX_BASIC_VMCS_REVISION_ID_BIT 0
#define IA32_VMX_BASIC_VMCS_REVISION_ID_FLAG 0x7FFFFFFF
#define IA32_VMX_BASIC_VMCS_REVISION_ID_MASK 0x7FFFFFFF
#define IA32_VMX_BASIC_VMCS_REVISION_ID(_) (((_) >> 0) & 0x7FFFFFFF)
/**
* [Bit 31] Bit 31 is always 0.
*/
UINT64 MustBeZero : 1;
#define IA32_VMX_BASIC_MUST_BE_ZERO_BIT 31
#define IA32_VMX_BASIC_MUST_BE_ZERO_FLAG 0x80000000
#define IA32_VMX_BASIC_MUST_BE_ZERO_MASK 0x01
#define IA32_VMX_BASIC_MUST_BE_ZERO(_) (((_) >> 31) & 0x01)
/**
* @brief Size of the VMCS
*
* [Bits 44:32] Report the number of bytes that software should allocate for the
* VMXON region and any VMCS region. It is a value greater than 0 and at most 4096
* (bit 44 is set if and only if bits 43:32 are clear).
*/
UINT64 VmcsSizeInBytes : 13;
#define IA32_VMX_BASIC_VMCS_SIZE_IN_BYTES_BIT 32
#define IA32_VMX_BASIC_VMCS_SIZE_IN_BYTES_FLAG 0x1FFF00000000
#define IA32_VMX_BASIC_VMCS_SIZE_IN_BYTES_MASK 0x1FFF
#define IA32_VMX_BASIC_VMCS_SIZE_IN_BYTES(_) (((_) >> 32) & 0x1FFF)
UINT64 Reserved1 : 3;
/**
* @brief Width of physical address used for the VMCS
* - 0 -> limited to the available amount of physical RAM
* - 1 -> within the first 4 GB
*
* [Bit 48] Indicates the width of the physical addresses that may be used for the
* VMXON region, each VMCS, and data structures referenced by pointers in a VMCS
* (I/O bitmaps, virtual-APIC page, MSR areas for VMX transitions). If the bit is 0,
* these addresses are limited to the processor's physical-address width.2 If the
* bit is 1, these addresses are limited to 32 bits. This bit is always 0 for
* processors that support Intel 64 architecture.
*/
UINT64 VmcsPhysicalAddressWidth : 1;
#define IA32_VMX_BASIC_VMCS_PHYSICAL_ADDRESS_WIDTH_BIT 48
#define IA32_VMX_BASIC_VMCS_PHYSICAL_ADDRESS_WIDTH_FLAG 0x1000000000000
#define IA32_VMX_BASIC_VMCS_PHYSICAL_ADDRESS_WIDTH_MASK 0x01
#define IA32_VMX_BASIC_VMCS_PHYSICAL_ADDRESS_WIDTH(_) (((_) >> 48) & 0x01)
/**
* @brief Whether the processor supports the dual-monitor treatment of
* system-management interrupts and system-management code (always 1)
*
* [Bit 49] Read as 1, the logical processor supports the dual-monitor treatment of
* system-management interrupts and system-management mode.
*
* @see Vol3C[34.15(DUAL-MONITOR TREATMENT OF SMIs AND SMM)]
*/
UINT64 DualMonitorSupport : 1;
#define IA32_VMX_BASIC_DUAL_MONITOR_SUPPORT_BIT 49
#define IA32_VMX_BASIC_DUAL_MONITOR_SUPPORT_FLAG 0x2000000000000
#define IA32_VMX_BASIC_DUAL_MONITOR_SUPPORT_MASK 0x01
#define IA32_VMX_BASIC_DUAL_MONITOR_SUPPORT(_) (((_) >> 49) & 0x01)
/**
* @brief Memory type that must be used for the VMCS
*
* [Bits 53:50] Report the memory type that should be used for the VMCS, for data
* structures referenced by pointers in the VMCS (I/O bitmaps, virtual-APIC page,
* MSR areas for VMX transitions), and for the MSEG header. If software needs to
* access these data structures (e.g., to modify the contents of the MSR bitmaps),
* it can configure the paging structures to map them into the linear-address space.
* If it does so, it should establish mappings that use the memory type reported
* bits 53:50 in this MSR.
* As of this writing, all processors that support VMX operation indicate the
* write-back type.
*/
UINT64 MemoryType : 4;
#define IA32_VMX_BASIC_MEMORY_TYPE_BIT 50
#define IA32_VMX_BASIC_MEMORY_TYPE_FLAG 0x3C000000000000
#define IA32_VMX_BASIC_MEMORY_TYPE_MASK 0x0F
#define IA32_VMX_BASIC_MEMORY_TYPE(_) (((_) >> 50) & 0x0F)
/**
* @brief Whether the processor provides additional information for exits due to
* INS/OUTS
*
* [Bit 54] When set to 1, the processor reports information in the VM-exit
* instruction-information field on VM exits due to execution of the INS and OUTS
* instructions. This reporting is done only if this bit is read as 1.
*
* @see Vol3C[27.2.4(Information for VM Exits Due to Instruction Execution)]
*/
UINT64 InsOutsReporting : 1;
#define IA32_VMX_BASIC_INS_OUTS_REPORTING_BIT 54
#define IA32_VMX_BASIC_INS_OUTS_REPORTING_FLAG 0x40000000000000
#define IA32_VMX_BASIC_INS_OUTS_REPORTING_MASK 0x01
#define IA32_VMX_BASIC_INS_OUTS_REPORTING(_) (((_) >> 54) & 0x01)
/**
* @brief Whether default 1 bits in control MSRs (pin/proc/exit/entry) may be
* cleared to 0 and that 'true' control MSRs are supported
*
* [Bit 55] Is read as 1 if any VMX controls that default to 1 may be cleared to 0.
* It also reports support for the VMX capability MSRs IA32_VMX_TRUE_PINBASED_CTLS,
* IA32_VMX_TRUE_PROCBASED_CTLS, IA32_VMX_TRUE_EXIT_CTLS, and
* IA32_VMX_TRUE_ENTRY_CTLS.
*
* @see Vol3D[A.2(RESERVED CONTROLS AND DEFAULT SETTINGS)]
* @see Vol3D[A.3.1(Pin-Based VM-Execution Controls)]
* @see Vol3D[A.3.2(Primary Processor-Based VM-Execution Controls)]
* @see Vol3D[A.4(VM-EXIT CONTROLS)]
* @see Vol3D[A.5(VM-ENTRY CONTROLS)]
*/
UINT64 VmxControls : 1;
#define IA32_VMX_BASIC_VMX_CONTROLS_BIT 55
#define IA32_VMX_BASIC_VMX_CONTROLS_FLAG 0x80000000000000
#define IA32_VMX_BASIC_VMX_CONTROLS_MASK 0x01
#define IA32_VMX_BASIC_VMX_CONTROLS(_) (((_) >> 55) & 0x01)
UINT64 Reserved2 : 8;
};
UINT64 AsUInt;
} IA32_VMX_BASIC_REGISTER;
/**
* Capability Reporting Register of Pin-Based VM-Execution Controls.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.3.1(Pin-Based VM-Execution Controls)]
* @see Vol3C[24.6.1(Pin-Based VM-Execution Controls)] (reference)
*/
#define IA32_VMX_PINBASED_CTLS 0x00000481
typedef union
{
struct
{
/**
* @brief External interrupts cause VM-exits if set; otherwise dispatched through
* the guest's IDT
*
* [Bit 0] If this control is 1, external interrupts cause VM exits. Otherwise, they
* are delivered normally through the guest interrupt-descriptor table (IDT). If
* this control is 1, the value of RFLAGS.IF does not affect interrupt blocking.
*/
UINT64 ExternalInterruptExiting : 1;
#define IA32_VMX_PINBASED_CTLS_EXTERNAL_INTERRUPT_EXITING_BIT 0
#define IA32_VMX_PINBASED_CTLS_EXTERNAL_INTERRUPT_EXITING_FLAG 0x01
#define IA32_VMX_PINBASED_CTLS_EXTERNAL_INTERRUPT_EXITING_MASK 0x01
#define IA32_VMX_PINBASED_CTLS_EXTERNAL_INTERRUPT_EXITING(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 2;
/**
* @brief Non-maskable interrupts cause VM-exits if set; otherwise dispatched
* through the guest's IDT
*
* [Bit 3] If this control is 1, non-maskable interrupts (NMIs) cause VM exits.
* Otherwise, they are delivered normally using descriptor 2 of the IDT. This
* control also determines interactions between IRET and blocking by NMI.
*
* @see Vol3C[25.3(CHANGES TO INSTRUCTION BEHAVIOR IN VMX NON-ROOT OPERATION)]
*/
UINT64 NmiExiting : 1;
#define IA32_VMX_PINBASED_CTLS_NMI_EXITING_BIT 3
#define IA32_VMX_PINBASED_CTLS_NMI_EXITING_FLAG 0x08
#define IA32_VMX_PINBASED_CTLS_NMI_EXITING_MASK 0x01
#define IA32_VMX_PINBASED_CTLS_NMI_EXITING(_) (((_) >> 3) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief Virtual NMIs
*
* [Bit 5] If this control is 1, NMIs are never blocked and the "blocking by NMI"
* bit (bit 3) in the interruptibility-state field indicates "virtual-NMI blocking".
* This control also interacts with the "NMI-window exiting" VM-execution control.
*
* @see Vol3C[24.6.2(Processor-Based VM-Execution Controls)]
*/
UINT64 VirtualNmi : 1;
#define IA32_VMX_PINBASED_CTLS_VIRTUAL_NMI_BIT 5
#define IA32_VMX_PINBASED_CTLS_VIRTUAL_NMI_FLAG 0x20
#define IA32_VMX_PINBASED_CTLS_VIRTUAL_NMI_MASK 0x01
#define IA32_VMX_PINBASED_CTLS_VIRTUAL_NMI(_) (((_) >> 5) & 0x01)
/**
* @brief Activate VMX preemption timer
*
* [Bit 6] If this control is 1, the VMX-preemption timer counts down in VMX
* non-root operation. A VM exit occurs when the timer counts down to zero.
*
* @see Vol3C[25.5.1(VMX-Preemption Timer)]
* @see Vol3C[25.2(OTHER CAUSES OF VM EXITS)]
*/
UINT64 ActivateVmxPreemptionTimer : 1;
#define IA32_VMX_PINBASED_CTLS_ACTIVATE_VMX_PREEMPTION_TIMER_BIT 6
#define IA32_VMX_PINBASED_CTLS_ACTIVATE_VMX_PREEMPTION_TIMER_FLAG 0x40
#define IA32_VMX_PINBASED_CTLS_ACTIVATE_VMX_PREEMPTION_TIMER_MASK 0x01
#define IA32_VMX_PINBASED_CTLS_ACTIVATE_VMX_PREEMPTION_TIMER(_) (((_) >> 6) & 0x01)
/**
* @brief Process interrupts with the posted-interrupt notification vector
*
* [Bit 7] If this control is 1, the processor treats interrupts with the
* posted-interrupt notification vector specially, updating the virtual-APIC page
* with posted-interrupt requests.
*
* @see Vol3C[24.6.8(Controls for APIC Virtualization)]
* @see Vol3C[29.6(POSTED-INTERRUPT PROCESSING)]
*/
UINT64 ProcessPostedInterrupts : 1;
#define IA32_VMX_PINBASED_CTLS_PROCESS_POSTED_INTERRUPTS_BIT 7
#define IA32_VMX_PINBASED_CTLS_PROCESS_POSTED_INTERRUPTS_FLAG 0x80
#define IA32_VMX_PINBASED_CTLS_PROCESS_POSTED_INTERRUPTS_MASK 0x01
#define IA32_VMX_PINBASED_CTLS_PROCESS_POSTED_INTERRUPTS(_) (((_) >> 7) & 0x01)
UINT64 Reserved3 : 56;
};
UINT64 AsUInt;
} IA32_VMX_PINBASED_CTLS_REGISTER;
/**
* Capability Reporting Register of Primary Processor-Based VM-Execution Controls.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.3.2(Primary Processor-Based VM-Execution Controls)]
* @see Vol3C[24.6.2(Processor-Based VM-Execution Controls)] (reference)
*/
#define IA32_VMX_PROCBASED_CTLS 0x00000482
typedef union
{
struct
{
UINT64 Reserved1 : 2;
/**
* @brief VM-exit as soon as RFLAGS.IF=1 and no blocking is active
*
* [Bit 2] If this control is 1, a VM exit occurs at the beginning of any
* instruction if RFLAGS.IF = 1 and there are no other blocking of interrupts.
*
* @see Vol3C[24.4.2(Guest Non-Register State)]
*/
UINT64 InterruptWindowExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_INTERRUPT_WINDOW_EXITING_BIT 2
#define IA32_VMX_PROCBASED_CTLS_INTERRUPT_WINDOW_EXITING_FLAG 0x04
#define IA32_VMX_PROCBASED_CTLS_INTERRUPT_WINDOW_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_INTERRUPT_WINDOW_EXITING(_) (((_) >> 2) & 0x01)
/**
* @brief Use timestamp counter offset
*
* [Bit 3] This control determines whether executions of RDTSC, executions of
* RDTSCP, and executions of RDMSR that read from the IA32_TIME_STAMP_COUNTER MSR
* return a value modified by the TSC offset field.
*
* @see Vol3C[24.6.5(Time-Stamp Counter Offset and Multiplier)]
* @see Vol3C[25.3(CHANGES TO INSTRUCTION BEHAVIOR IN VMX NON-ROOT OPERATION)]
*/
UINT64 UseTscOffsetting : 1;
#define IA32_VMX_PROCBASED_CTLS_USE_TSC_OFFSETTING_BIT 3
#define IA32_VMX_PROCBASED_CTLS_USE_TSC_OFFSETTING_FLAG 0x08
#define IA32_VMX_PROCBASED_CTLS_USE_TSC_OFFSETTING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_USE_TSC_OFFSETTING(_) (((_) >> 3) & 0x01)
UINT64 Reserved2 : 3;
/**
* @brief VM-exit when executing the HLT instruction
*
* [Bit 7] This control determines whether executions of HLT cause VM exits.
*/
UINT64 HltExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_HLT_EXITING_BIT 7
#define IA32_VMX_PROCBASED_CTLS_HLT_EXITING_FLAG 0x80
#define IA32_VMX_PROCBASED_CTLS_HLT_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_HLT_EXITING(_) (((_) >> 7) & 0x01)
UINT64 Reserved3 : 1;
/**
* @brief VM-exit when executing the INVLPG instruction
*
* [Bit 9] This control determines whether executions of INVLPG cause VM exits.
*/
UINT64 InvlpgExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_INVLPG_EXITING_BIT 9
#define IA32_VMX_PROCBASED_CTLS_INVLPG_EXITING_FLAG 0x200
#define IA32_VMX_PROCBASED_CTLS_INVLPG_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_INVLPG_EXITING(_) (((_) >> 9) & 0x01)
/**
* @brief VM-exit when executing the MWAIT instruction
*
* [Bit 10] This control determines whether executions of MWAIT cause VM exits.
*/
UINT64 MwaitExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_MWAIT_EXITING_BIT 10
#define IA32_VMX_PROCBASED_CTLS_MWAIT_EXITING_FLAG 0x400
#define IA32_VMX_PROCBASED_CTLS_MWAIT_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_MWAIT_EXITING(_) (((_) >> 10) & 0x01)
/**
* @brief VM-exit when executing the RDPMC instruction
*
* [Bit 11] This control determines whether executions of RDPMC cause VM exits.
*/
UINT64 RdpmcExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_RDPMC_EXITING_BIT 11
#define IA32_VMX_PROCBASED_CTLS_RDPMC_EXITING_FLAG 0x800
#define IA32_VMX_PROCBASED_CTLS_RDPMC_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_RDPMC_EXITING(_) (((_) >> 11) & 0x01)
/**
* @brief VM-exit when executing the RDTSC/RDTSCP instruction
*
* [Bit 12] This control determines whether executions of RDTSC and RDTSCP cause VM
* exits.
*/
UINT64 RdtscExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_RDTSC_EXITING_BIT 12
#define IA32_VMX_PROCBASED_CTLS_RDTSC_EXITING_FLAG 0x1000
#define IA32_VMX_PROCBASED_CTLS_RDTSC_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_RDTSC_EXITING(_) (((_) >> 12) & 0x01)
UINT64 Reserved4 : 2;
/**
* @brief VM-exit when executing the MOV to CR3 instruction (forced to 1 on the
* 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs)
*
* [Bit 15] In conjunction with the CR3-target controls, this control determines
* whether executions of MOV to CR3 cause VM exits. The first processors to support
* the virtual-machine extensions supported only the 1-setting of this control.
*
* @see Vol3C[24.6.7(CR3-Target Controls)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 Cr3LoadExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_CR3_LOAD_EXITING_BIT 15
#define IA32_VMX_PROCBASED_CTLS_CR3_LOAD_EXITING_FLAG 0x8000
#define IA32_VMX_PROCBASED_CTLS_CR3_LOAD_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_CR3_LOAD_EXITING(_) (((_) >> 15) & 0x01)
/**
* @brief VM-exit when executing the MOV from CR3 instruction (forced to 1 on the
* 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs)
*
* [Bit 16] This control determines whether executions of MOV from CR3 cause VM
* exits. The first processors to support the virtual-machine extensions supported
* only the 1-setting of this control.
*/
UINT64 Cr3StoreExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_CR3_STORE_EXITING_BIT 16
#define IA32_VMX_PROCBASED_CTLS_CR3_STORE_EXITING_FLAG 0x10000
#define IA32_VMX_PROCBASED_CTLS_CR3_STORE_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_CR3_STORE_EXITING(_) (((_) >> 16) & 0x01)
/**
* @brief Determines whether the tertiary processor based VM-execution controls are
* used
*
* [Bit 17] This control determines whether the tertiary processor-based
* VM-execution controls are used. If this control is 0, the logical processor
* operates as if all the tertiary processor-based VM-execution controls were also
* 0.
*/
UINT64 ActivateTertiaryControls : 1;
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_TERTIARY_CONTROLS_BIT 17
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_TERTIARY_CONTROLS_FLAG 0x20000
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_TERTIARY_CONTROLS_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_TERTIARY_CONTROLS(_) (((_) >> 17) & 0x01)
UINT64 Reserved5 : 1;
/**
* @brief VM-exit on CR8 loads
*
* [Bit 19] This control determines whether executions of MOV to CR8 cause VM exits.
*/
UINT64 Cr8LoadExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_CR8_LOAD_EXITING_BIT 19
#define IA32_VMX_PROCBASED_CTLS_CR8_LOAD_EXITING_FLAG 0x80000
#define IA32_VMX_PROCBASED_CTLS_CR8_LOAD_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_CR8_LOAD_EXITING(_) (((_) >> 19) & 0x01)
/**
* @brief VM-exit on CR8 stores
*
* [Bit 20] This control determines whether executions of MOV from CR8 cause VM
* exits.
*/
UINT64 Cr8StoreExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_CR8_STORE_EXITING_BIT 20
#define IA32_VMX_PROCBASED_CTLS_CR8_STORE_EXITING_FLAG 0x100000
#define IA32_VMX_PROCBASED_CTLS_CR8_STORE_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_CR8_STORE_EXITING(_) (((_) >> 20) & 0x01)
/**
* @brief Use TPR shadow
*
* [Bit 21] Setting this control to 1 enables TPR virtualization and other
* APIC-virtualization features.
*
* @see Vol3C[29(APIC VIRTUALIZATION AND VIRTUAL INTERRUPTS)]
*/
UINT64 UseTprShadow : 1;
#define IA32_VMX_PROCBASED_CTLS_USE_TPR_SHADOW_BIT 21
#define IA32_VMX_PROCBASED_CTLS_USE_TPR_SHADOW_FLAG 0x200000
#define IA32_VMX_PROCBASED_CTLS_USE_TPR_SHADOW_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_USE_TPR_SHADOW(_) (((_) >> 21) & 0x01)
/**
* @brief VM-exit when virtual NMI blocking is disabled
*
* [Bit 22] If this control is 1, a VM exit occurs at the beginning of any
* instruction if there is no virtual-NMI blocking.
*
* @see Vol3C[24.4.2(Guest Non-Register State)]
*/
UINT64 NmiWindowExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_NMI_WINDOW_EXITING_BIT 22
#define IA32_VMX_PROCBASED_CTLS_NMI_WINDOW_EXITING_FLAG 0x400000
#define IA32_VMX_PROCBASED_CTLS_NMI_WINDOW_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_NMI_WINDOW_EXITING(_) (((_) >> 22) & 0x01)
/**
* @brief VM-exit when executing a MOV DRx instruction
*
* [Bit 23] This control determines whether executions of MOV DR cause VM exits.
*/
UINT64 MovDrExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_MOV_DR_EXITING_BIT 23
#define IA32_VMX_PROCBASED_CTLS_MOV_DR_EXITING_FLAG 0x800000
#define IA32_VMX_PROCBASED_CTLS_MOV_DR_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_MOV_DR_EXITING(_) (((_) >> 23) & 0x01)
/**
* @brief VM-exit when executing IO instructions
*
* [Bit 24] This control determines whether executions of I/O instructions (IN,
* INS/INSB/INSW/INSD, OUT, and OUTS/OUTSB/OUTSW/OUTSD) cause VM exits.
*/
UINT64 UnconditionalIoExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_UNCONDITIONAL_IO_EXITING_BIT 24
#define IA32_VMX_PROCBASED_CTLS_UNCONDITIONAL_IO_EXITING_FLAG 0x1000000
#define IA32_VMX_PROCBASED_CTLS_UNCONDITIONAL_IO_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_UNCONDITIONAL_IO_EXITING(_) (((_) >> 24) & 0x01)
/**
* @brief Use IO bitmaps
*
* [Bit 25] This control determines whether I/O bitmaps are used to restrict
* executions of I/O instructions For this control, "0" means "do not use I/O
* bitmaps" and "1" means "use I/O bitmaps." If the I/O bitmaps are used, the
* setting of the "unconditional I/O exiting" control is ignored.
*
* @see Vol3C[24.6.4(I/O-Bitmap Addresses)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 UseIoBitmaps : 1;
#define IA32_VMX_PROCBASED_CTLS_USE_IO_BITMAPS_BIT 25
#define IA32_VMX_PROCBASED_CTLS_USE_IO_BITMAPS_FLAG 0x2000000
#define IA32_VMX_PROCBASED_CTLS_USE_IO_BITMAPS_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_USE_IO_BITMAPS(_) (((_) >> 25) & 0x01)
UINT64 Reserved6 : 1;
/**
* @brief Monitor trap flag
*
* [Bit 27] If this control is 1, the monitor trap flag debugging feature is
* enabled.
*
* @see Vol3C[25.5.2(Monitor Trap Flag)]
*/
UINT64 MonitorTrapFlag : 1;
#define IA32_VMX_PROCBASED_CTLS_MONITOR_TRAP_FLAG_BIT 27
#define IA32_VMX_PROCBASED_CTLS_MONITOR_TRAP_FLAG_FLAG 0x8000000
#define IA32_VMX_PROCBASED_CTLS_MONITOR_TRAP_FLAG_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_MONITOR_TRAP_FLAG(_) (((_) >> 27) & 0x01)
/**
* @brief Use MSR bitmaps
*
* [Bit 28] This control determines whether MSR bitmaps are used to control
* execution of the RDMSR and WRMSR instructions. For this control, "0" means "do
* not use MSR bitmaps" and "1" means "use MSR bitmaps." If the MSR bitmaps are not
* used, all executions of the RDMSR and WRMSR instructions cause VM exits.
*
* @see Vol3C[24.6.9(MSR-Bitmap Address)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 UseMsrBitmaps : 1;
#define IA32_VMX_PROCBASED_CTLS_USE_MSR_BITMAPS_BIT 28
#define IA32_VMX_PROCBASED_CTLS_USE_MSR_BITMAPS_FLAG 0x10000000
#define IA32_VMX_PROCBASED_CTLS_USE_MSR_BITMAPS_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_USE_MSR_BITMAPS(_) (((_) >> 28) & 0x01)
/**
* @brief VM-exit when executing the MONITOR instruction
*
* [Bit 29] This control determines whether executions of MONITOR cause VM exits.
*/
UINT64 MonitorExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_MONITOR_EXITING_BIT 29
#define IA32_VMX_PROCBASED_CTLS_MONITOR_EXITING_FLAG 0x20000000
#define IA32_VMX_PROCBASED_CTLS_MONITOR_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_MONITOR_EXITING(_) (((_) >> 29) & 0x01)
/**
* @brief VM-exit when executing the PAUSE instruction
*
* [Bit 30] This control determines whether executions of PAUSE cause VM exits.
*/
UINT64 PauseExiting : 1;
#define IA32_VMX_PROCBASED_CTLS_PAUSE_EXITING_BIT 30
#define IA32_VMX_PROCBASED_CTLS_PAUSE_EXITING_FLAG 0x40000000
#define IA32_VMX_PROCBASED_CTLS_PAUSE_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_PAUSE_EXITING(_) (((_) >> 30) & 0x01)
/**
* @brief Determines whether the secondary processor based VM-execution controls are
* used
*
* [Bit 31] This control determines whether the secondary processor-based
* VM-execution controls are used. If this control is 0, the logical processor
* operates as if all the secondary processor-based VM-execution controls were also
* 0.
*/
UINT64 ActivateSecondaryControls : 1;
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_SECONDARY_CONTROLS_BIT 31
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_SECONDARY_CONTROLS_FLAG 0x80000000
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_SECONDARY_CONTROLS_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS_ACTIVATE_SECONDARY_CONTROLS(_) (((_) >> 31) & 0x01)
UINT64 Reserved7 : 32;
};
UINT64 AsUInt;
} IA32_VMX_PROCBASED_CTLS_REGISTER;
/**
* Capability Reporting Register of Primaryr VM-Exit Controls.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.4.1(Primaryr VM-Exit Controls)]
* @see Vol3C[24.7.1(VM-Exit Controls)] (reference)
*/
#define IA32_VMX_EXIT_CTLS 0x00000483
typedef union
{
struct
{
UINT64 Reserved1 : 2;
/**
* @brief Save guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the
* 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs)
*
* [Bit 2] This control determines whether DR7 and the IA32_DEBUGCTL MSR are saved
* on VM exit. The first processors to support the virtual-machine extensions
* supported only the 1-setting of this control.
*/
UINT64 SaveDebugControls : 1;
#define IA32_VMX_EXIT_CTLS_SAVE_DEBUG_CONTROLS_BIT 2
#define IA32_VMX_EXIT_CTLS_SAVE_DEBUG_CONTROLS_FLAG 0x04
#define IA32_VMX_EXIT_CTLS_SAVE_DEBUG_CONTROLS_MASK 0x01
#define IA32_VMX_EXIT_CTLS_SAVE_DEBUG_CONTROLS(_) (((_) >> 2) & 0x01)
UINT64 Reserved2 : 6;
/**
* @brief Return to long mode after a VM-exit
*
* [Bit 9] On processors that support Intel 64 architecture, this control determines
* whether a logical processor is in 64-bit mode after the next VM exit. Its value
* is loaded into CS.L, IA32_EFER.LME, and IA32_EFER.LMA on every VM exit.1 This
* control must be 0 on processors that do not support Intel 64 architecture.
*/
UINT64 HostAddressSpaceSize : 1;
#define IA32_VMX_EXIT_CTLS_HOST_ADDRESS_SPACE_SIZE_BIT 9
#define IA32_VMX_EXIT_CTLS_HOST_ADDRESS_SPACE_SIZE_FLAG 0x200
#define IA32_VMX_EXIT_CTLS_HOST_ADDRESS_SPACE_SIZE_MASK 0x01
#define IA32_VMX_EXIT_CTLS_HOST_ADDRESS_SPACE_SIZE(_) (((_) >> 9) & 0x01)
UINT64 Reserved3 : 2;
/**
* @brief Whether the IA32_PERF_GLOBAL_CTRL MSR is loaded on VM-exit
*
* [Bit 12] This control determines whether the IA32_PERF_GLOBAL_CTRL MSR is loaded
* on VM exit.
*/
UINT64 LoadIa32PerfGlobalCtrl : 1;
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_BIT 12
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_FLAG 0x1000
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_MASK 0x01
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL(_) (((_) >> 12) & 0x01)
UINT64 Reserved4 : 2;
/**
* @brief Acknowledge external interrupts with the irq controller if one caused a
* VM-exit
*
* [Bit 15] This control affects VM exits due to external interrupts:
* - If such a VM exit occurs and this control is 1, the logical processor
* acknowledges the interrupt controller, acquiring the interrupt's vector. The
* vector is stored in the VM-exit interruption-information field, which is marked
* valid.
* - If such a VM exit occurs and this control is 0, the interrupt is not
* acknowledged and the VM-exit interruption-information field is marked invalid.
*/
UINT64 AcknowledgeInterruptOnExit : 1;
#define IA32_VMX_EXIT_CTLS_ACKNOWLEDGE_INTERRUPT_ON_EXIT_BIT 15
#define IA32_VMX_EXIT_CTLS_ACKNOWLEDGE_INTERRUPT_ON_EXIT_FLAG 0x8000
#define IA32_VMX_EXIT_CTLS_ACKNOWLEDGE_INTERRUPT_ON_EXIT_MASK 0x01
#define IA32_VMX_EXIT_CTLS_ACKNOWLEDGE_INTERRUPT_ON_EXIT(_) (((_) >> 15) & 0x01)
UINT64 Reserved5 : 2;
/**
* @brief Whether the guest IA32_PAT MSR is saved on VM-exit
*
* [Bit 18] This control determines whether the IA32_PAT MSR is saved on VM exit.
*/
UINT64 SaveIa32Pat : 1;
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_PAT_BIT 18
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_PAT_FLAG 0x40000
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_PAT_MASK 0x01
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_PAT(_) (((_) >> 18) & 0x01)
/**
* @brief Whether the host IA32_PAT MSR is loaded on VM-exit
*
* [Bit 19] This control determines whether the IA32_PAT MSR is loaded on VM exit.
*/
UINT64 LoadIa32Pat : 1;
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PAT_BIT 19
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PAT_FLAG 0x80000
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PAT_MASK 0x01
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PAT(_) (((_) >> 19) & 0x01)
/**
* @brief Whether the guest IA32_EFER MSR is saved on VM-exit
*
* [Bit 20] This control determines whether the IA32_EFER MSR is saved on VM exit.
*/
UINT64 SaveIa32Efer : 1;
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_EFER_BIT 20
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_EFER_FLAG 0x100000
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_EFER_MASK 0x01
#define IA32_VMX_EXIT_CTLS_SAVE_IA32_EFER(_) (((_) >> 20) & 0x01)
/**
* @brief Whether the host IA32_EFER MSR is loaded on VM-exit
*
* [Bit 21] This control determines whether the IA32_EFER MSR is loaded on VM exit.
*/
UINT64 LoadIa32Efer : 1;
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_EFER_BIT 21
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_EFER_FLAG 0x200000
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_EFER_MASK 0x01
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_EFER(_) (((_) >> 21) & 0x01)
/**
* @brief Whether the value of the VMX preemption timer is saved on every VM-exit
*
* [Bit 22] This control determines whether the value of the VMX-preemption timer is
* saved on VM exit.
*/
UINT64 SaveVmxPreemptionTimerValue : 1;
#define IA32_VMX_EXIT_CTLS_SAVE_VMX_PREEMPTION_TIMER_VALUE_BIT 22
#define IA32_VMX_EXIT_CTLS_SAVE_VMX_PREEMPTION_TIMER_VALUE_FLAG 0x400000
#define IA32_VMX_EXIT_CTLS_SAVE_VMX_PREEMPTION_TIMER_VALUE_MASK 0x01
#define IA32_VMX_EXIT_CTLS_SAVE_VMX_PREEMPTION_TIMER_VALUE(_) (((_) >> 22) & 0x01)
/**
* [Bit 23] This control determines whether the IA32_BNDCFGS MSR is cleared on VM
* exit.
*/
UINT64 ClearIa32Bndcfgs : 1;
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_BNDCFGS_BIT 23
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_BNDCFGS_FLAG 0x800000
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_BNDCFGS_MASK 0x01
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_BNDCFGS(_) (((_) >> 23) & 0x01)
/**
* [Bit 24] If this control is 1, Intel Processor Trace does not produce a paging
* information packet (PIP) on a VM exit or a VMCS packet on an SMM VM exit.
*
* @see Vol3C[35(INTEL(R) PROCESSOR TRACE)]
*/
UINT64 ConcealVmxFromPt : 1;
#define IA32_VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT_BIT 24
#define IA32_VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT_FLAG 0x1000000
#define IA32_VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT_MASK 0x01
#define IA32_VMX_EXIT_CTLS_CONCEAL_VMX_FROM_PT(_) (((_) >> 24) & 0x01)
/**
* [Bit 25] This control determines whether the IA32_RTIT_CTL MSR is cleared on VM
* exit.
*
* @see Vol3C[35(INTEL(R) PROCESSOR TRACE)]
*/
UINT64 ClearIa32RtitCtl : 1;
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_RTIT_CTL_BIT 25
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_RTIT_CTL_FLAG 0x2000000
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_RTIT_CTL_MASK 0x01
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_RTIT_CTL(_) (((_) >> 25) & 0x01)
/**
* [Bit 26] This control determines whether the IA32_LBR_CTL MSR is cleared on VM
* exit.
*/
UINT64 ClearIa32LbrCtl : 1;
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_LBR_CTL_BIT 26
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_LBR_CTL_FLAG 0x4000000
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_LBR_CTL_MASK 0x01
#define IA32_VMX_EXIT_CTLS_CLEAR_IA32_LBR_CTL(_) (((_) >> 26) & 0x01)
UINT64 Reserved6 : 1;
/**
* [Bit 28] This control determines whether CET-related MSRs and SPP are loaded on
* VM exit.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 LoadIa32CetState : 1;
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_CET_STATE_BIT 28
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_CET_STATE_FLAG 0x10000000
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_CET_STATE_MASK 0x01
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_CET_STATE(_) (((_) >> 28) & 0x01)
/**
* [Bit 29] This control determines whether the IA32_PKRS MSR is loaded on VM exit.
*/
UINT64 LoadIa32Pkrs : 1;
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PKRS_BIT 29
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PKRS_FLAG 0x20000000
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PKRS_MASK 0x01
#define IA32_VMX_EXIT_CTLS_LOAD_IA32_PKRS(_) (((_) >> 29) & 0x01)
UINT64 Reserved7 : 1;
/**
* [Bit 31] This control determines whether the secondary VM-exit controls are used.
* If this control is 0, the logical processor operates as if all the secondary
* VM-exit controls were also 0.
*/
UINT64 ActivateSecondaryControls : 1;
#define IA32_VMX_EXIT_CTLS_ACTIVATE_SECONDARY_CONTROLS_BIT 31
#define IA32_VMX_EXIT_CTLS_ACTIVATE_SECONDARY_CONTROLS_FLAG 0x80000000
#define IA32_VMX_EXIT_CTLS_ACTIVATE_SECONDARY_CONTROLS_MASK 0x01
#define IA32_VMX_EXIT_CTLS_ACTIVATE_SECONDARY_CONTROLS(_) (((_) >> 31) & 0x01)
UINT64 Reserved8 : 32;
};
UINT64 AsUInt;
} IA32_VMX_EXIT_CTLS_REGISTER;
/**
* Capability Reporting Register of VM-Entry Controls.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.5(VM-ENTRY CONTROLS)]
* @see Vol3D[24.8.1(VM-Entry Controls)] (reference)
*/
#define IA32_VMX_ENTRY_CTLS 0x00000484
typedef union
{
struct
{
UINT64 Reserved1 : 2;
/**
* @brief Load guest debug controls (dr7 & IA32_DEBUGCTL_MSR) (forced to 1 on the
* 'first' VT-x capable CPUs; this actually includes the newest Nehalem CPUs)
*
* [Bit 2] This control determines whether DR7 and the IA32_DEBUGCTL MSR are loaded
* on VM entry. The first processors to support the virtual-machine extensions
* supported only the 1-setting of this control.
*/
UINT64 LoadDebugControls : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_DEBUG_CONTROLS_BIT 2
#define IA32_VMX_ENTRY_CTLS_LOAD_DEBUG_CONTROLS_FLAG 0x04
#define IA32_VMX_ENTRY_CTLS_LOAD_DEBUG_CONTROLS_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_DEBUG_CONTROLS(_) (((_) >> 2) & 0x01)
UINT64 Reserved2 : 6;
/**
* @brief 64 bits guest mode. Must be 0 for CPUs that don't support AMD64
*
* [Bit 9] On processors that support Intel 64 architecture, this control determines
* whether the logical processor is in IA-32e mode after VM entry. Its value is
* loaded into IA32_EFER.LMA as part of VM entry. This control must be 0 on
* processors that do not support Intel 64 architecture.
*/
UINT64 Ia32EModeGuest : 1;
#define IA32_VMX_ENTRY_CTLS_IA32E_MODE_GUEST_BIT 9
#define IA32_VMX_ENTRY_CTLS_IA32E_MODE_GUEST_FLAG 0x200
#define IA32_VMX_ENTRY_CTLS_IA32E_MODE_GUEST_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_IA32E_MODE_GUEST(_) (((_) >> 9) & 0x01)
/**
* @brief In SMM mode after VM-entry
*
* [Bit 10] This control determines whether the logical processor is in
* system-management mode (SMM) after VM entry. This control must be 0 for any VM
* entry from outside SMM.
*/
UINT64 EntryToSmm : 1;
#define IA32_VMX_ENTRY_CTLS_ENTRY_TO_SMM_BIT 10
#define IA32_VMX_ENTRY_CTLS_ENTRY_TO_SMM_FLAG 0x400
#define IA32_VMX_ENTRY_CTLS_ENTRY_TO_SMM_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_ENTRY_TO_SMM(_) (((_) >> 10) & 0x01)
/**
* @brief Disable dual treatment of SMI and SMM; must be zero for VM-entry outside
* of SMM
*
* [Bit 11] If set to 1, the default treatment of SMIs and SMM is in effect after
* the VM entry. This control must be 0 for any VM entry from outside SMM
*
* @see Vol3C[34.15.7(Deactivating the Dual-Monitor Treatment)]
*/
UINT64 DeactivateDualMonitorTreatment : 1;
#define IA32_VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MONITOR_TREATMENT_BIT 11
#define IA32_VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MONITOR_TREATMENT_FLAG 0x800
#define IA32_VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MONITOR_TREATMENT_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_DEACTIVATE_DUAL_MONITOR_TREATMENT(_) (((_) >> 11) & 0x01)
UINT64 Reserved3 : 1;
/**
* @brief Whether the guest IA32_PERF_GLOBAL_CTRL MSR is loaded on VM-entry
*
* [Bit 13] This control determines whether the IA32_PERF_GLOBAL_CTRL MSR is loaded
* on VM entry.
*/
UINT64 LoadIa32PerfGlobalCtrl : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_BIT 13
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_FLAG 0x2000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PERF_GLOBAL_CTRL(_) (((_) >> 13) & 0x01)
/**
* @brief Whether the guest IA32_PAT MSR is loaded on VM-entry
*
* [Bit 14] This control determines whether the IA32_PAT MSR is loaded on VM entry.
*/
UINT64 LoadIa32Pat : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PAT_BIT 14
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PAT_FLAG 0x4000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PAT_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PAT(_) (((_) >> 14) & 0x01)
/**
* @brief Whether the guest IA32_EFER MSR is loaded on VM-entry
*
* [Bit 15] This control determines whether the IA32_EFER MSR is loaded on VM entry.
*/
UINT64 LoadIa32Efer : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_EFER_BIT 15
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_EFER_FLAG 0x8000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_EFER_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_EFER(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] This control determines whether the IA32_BNDCFGS MSR is loaded on VM
* entry.
*/
UINT64 LoadIa32Bndcfgs : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_BNDCFGS_BIT 16
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_BNDCFGS_FLAG 0x10000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_BNDCFGS_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_BNDCFGS(_) (((_) >> 16) & 0x01)
/**
* [Bit 17] If this control is 1, Intel Processor Trace does not produce a paging
* information packet (PIP) on a VM entry or a VMCS packet on a VM entry that
* returns from SMM.
*
* @see Vol3C[35(INTEL(R) PROCESSOR TRACE)]
*/
UINT64 ConcealVmxFromPt : 1;
#define IA32_VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT_BIT 17
#define IA32_VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT_FLAG 0x20000
#define IA32_VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_CONCEAL_VMX_FROM_PT(_) (((_) >> 17) & 0x01)
/**
* [Bit 18] This control determines whether the IA32_RTIT_CTL MSR is loaded on VM
* entry.
*/
UINT64 LoadIa32RtitCtl : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_RTIT_CTL_BIT 18
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_RTIT_CTL_FLAG 0x40000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_RTIT_CTL_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_RTIT_CTL(_) (((_) >> 18) & 0x01)
UINT64 Reserved4 : 1;
/**
* [Bit 20] This control determines whether CET-related MSRs and SPP are loaded on
* VM entry.
*/
UINT64 LoadCetState : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_CET_STATE_BIT 20
#define IA32_VMX_ENTRY_CTLS_LOAD_CET_STATE_FLAG 0x100000
#define IA32_VMX_ENTRY_CTLS_LOAD_CET_STATE_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_CET_STATE(_) (((_) >> 20) & 0x01)
/**
* [Bit 21] This control determines whether the IA32_LBR_CTL MSR is loaded on VM
* entry.
*/
UINT64 LoadIa32LbrCtl : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_LBR_CTL_BIT 21
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_LBR_CTL_FLAG 0x200000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_LBR_CTL_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_LBR_CTL(_) (((_) >> 21) & 0x01)
/**
* [Bit 22] This control determines whether the IA32_PKRS MSR is loaded on VM entry.
*/
UINT64 LoadIa32Pkrs : 1;
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PKRS_BIT 22
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PKRS_FLAG 0x400000
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PKRS_MASK 0x01
#define IA32_VMX_ENTRY_CTLS_LOAD_IA32_PKRS(_) (((_) >> 22) & 0x01)
UINT64 Reserved5 : 41;
};
UINT64 AsUInt;
} IA32_VMX_ENTRY_CTLS_REGISTER;
/**
* Reporting Register of Miscellaneous VMX Capabilities.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.6(MISCELLANEOUS DATA)]
* @see Vol3D[A.6(Miscellaneous Data)] (reference)
*/
#define IA32_VMX_MISC 0x00000485
typedef union
{
struct
{
/**
* @brief Relationship between the preemption timer and tsc; count down every time
* bit x of the tsc changes
*
* [Bits 4:0] Report a value X that specifies the relationship between the rate of
* the VMX-preemption timer and that of the timestamp counter (TSC). Specifically,
* the VMX-preemption timer (if it is active) counts down by 1 every time bit X in
* the TSC changes due to a TSC increment.
*/
UINT64 PreemptionTimerTscRelationship : 5;
#define IA32_VMX_MISC_PREEMPTION_TIMER_TSC_RELATIONSHIP_BIT 0
#define IA32_VMX_MISC_PREEMPTION_TIMER_TSC_RELATIONSHIP_FLAG 0x1F
#define IA32_VMX_MISC_PREEMPTION_TIMER_TSC_RELATIONSHIP_MASK 0x1F
#define IA32_VMX_MISC_PREEMPTION_TIMER_TSC_RELATIONSHIP(_) (((_) >> 0) & 0x1F)
/**
* @brief Whether VM-exit stores EFER.LMA into the "IA32e mode guest" field
*
* [Bit 5] When set to 1, VM exits store the value of IA32_EFER.LMA into the "IA-32e
* mode guest" VM-entry control. This bit is read as 1 on any logical processor that
* supports the 1-setting of the "unrestricted guest" VM-execution control.
*
* @see Vol3C[27.2(RECORDING VM-EXIT INFORMATION AND UPDATING VM-ENTRY CONTROL
* FIELDS)]
*/
UINT64 StoreEferLmaOnVmexit : 1;
#define IA32_VMX_MISC_STORE_EFER_LMA_ON_VMEXIT_BIT 5
#define IA32_VMX_MISC_STORE_EFER_LMA_ON_VMEXIT_FLAG 0x20
#define IA32_VMX_MISC_STORE_EFER_LMA_ON_VMEXIT_MASK 0x01
#define IA32_VMX_MISC_STORE_EFER_LMA_ON_VMEXIT(_) (((_) >> 5) & 0x01)
/**
* @brief Activity states supported by the implementation
*
* [Bits 8:6] Report, as a bitmap, the activity states supported by the
* implementation:
* - Bit 6 reports (if set) the support for activity state 1 (HLT).
* - Bit 7 reports (if set) the support for activity state 2 (shutdown).
* - Bit 8 reports (if set) the support for activity state 3 (wait-for-SIPI).
* If an activity state is not supported, the implementation causes a VM entry to
* fail if it attempts to establish that activity state. All implementations support
* VM entry to activity state 0 (active).
*/
UINT64 ActivityStates : 3;
#define IA32_VMX_MISC_ACTIVITY_STATES_BIT 6
#define IA32_VMX_MISC_ACTIVITY_STATES_FLAG 0x1C0
#define IA32_VMX_MISC_ACTIVITY_STATES_MASK 0x07
#define IA32_VMX_MISC_ACTIVITY_STATES(_) (((_) >> 6) & 0x07)
UINT64 Reserved1 : 5;
/**
* @brief Intel Processor Trace (Intel PT) can be used in VMX operation
*
* [Bit 14] When set to 1, Intel(R) Processor Trace (Intel PT) can be used in VMX
* operation. If the processor supports Intel PT but does not allow it to be used in
* VMX operation, execution of VMXON clears IA32_RTIT_CTL.TraceEn; any attempt to
* write IA32_RTIT_CTL while in VMX operation (including VMX root operation) causes
* a general-protection exception.
*
* @see Vol3C[30.3(VMX INSTRUCTIONS | VMXON-Enter VMX Operation)]
*/
UINT64 IntelPtAvailableInVmx : 1;
#define IA32_VMX_MISC_INTEL_PT_AVAILABLE_IN_VMX_BIT 14
#define IA32_VMX_MISC_INTEL_PT_AVAILABLE_IN_VMX_FLAG 0x4000
#define IA32_VMX_MISC_INTEL_PT_AVAILABLE_IN_VMX_MASK 0x01
#define IA32_VMX_MISC_INTEL_PT_AVAILABLE_IN_VMX(_) (((_) >> 14) & 0x01)
/**
* @brief Whether RDMSR can be used to read IA32_SMBASE_MSR in SMM
*
* [Bit 15] When set to 1, the RDMSR instruction can be used in system-management
* mode (SMM) to read the IA32_SMBASE MSR (MSR address 9EH).
*
* @see Vol3C[34.15.6.3(Saving Guest State)]
*/
UINT64 RdmsrCanReadIa32SmbaseMsrInSmm : 1;
#define IA32_VMX_MISC_RDMSR_CAN_READ_IA32_SMBASE_MSR_IN_SMM_BIT 15
#define IA32_VMX_MISC_RDMSR_CAN_READ_IA32_SMBASE_MSR_IN_SMM_FLAG 0x8000
#define IA32_VMX_MISC_RDMSR_CAN_READ_IA32_SMBASE_MSR_IN_SMM_MASK 0x01
#define IA32_VMX_MISC_RDMSR_CAN_READ_IA32_SMBASE_MSR_IN_SMM(_) (((_) >> 15) & 0x01)
/**
* @brief Number of CR3 target values supported by the processor (0-256)
*
* [Bits 24:16] Indicate the number of CR3-target values supported by the processor.
* This number is a value between 0 and 256, inclusive (bit 24 is set if and only if
* bits 23:16 are clear).
*/
UINT64 Cr3TargetCount : 9;
#define IA32_VMX_MISC_CR3_TARGET_COUNT_BIT 16
#define IA32_VMX_MISC_CR3_TARGET_COUNT_FLAG 0x1FF0000
#define IA32_VMX_MISC_CR3_TARGET_COUNT_MASK 0x1FF
#define IA32_VMX_MISC_CR3_TARGET_COUNT(_) (((_) >> 16) & 0x1FF)
/**
* @brief Maximum number of MSRs in the VMCS. (N+1)*512
*
* [Bits 27:25] Used to compute the recommended maximum number of MSRs that should
* appear in the VM-exit MSR-store list, the VM-exit MSR-load list, or the VM-entry
* MSR-load list. Specifically, if the value bits 27:25 of IA32_VMX_MISC is N, then
* 512 * (N + 1) is the recommended maximum number of MSRs to be included in each
* list. If the limit is exceeded, undefined processor behavior may result
* (including a machine check during the VMX transition).
*/
UINT64 MaxNumberOfMsr : 3;
#define IA32_VMX_MISC_MAX_NUMBER_OF_MSR_BIT 25
#define IA32_VMX_MISC_MAX_NUMBER_OF_MSR_FLAG 0xE000000
#define IA32_VMX_MISC_MAX_NUMBER_OF_MSR_MASK 0x07
#define IA32_VMX_MISC_MAX_NUMBER_OF_MSR(_) (((_) >> 25) & 0x07)
/**
* @brief Whether bit 2 of IA32_SMM_MONITOR_CTL can be set to 1
*
* [Bit 28] When set to 1, bit 2 of the IA32_SMM_MONITOR_CTL can be set to 1. VMXOFF
* unblocks SMIs unless IA32_SMM_MONITOR_CTL[bit 2] is 1.
*
* @see Vol3C[34.14.4(VMXOFF and SMI Unblocking)]
*/
UINT64 SmmMonitorCtlB2 : 1;
#define IA32_VMX_MISC_SMM_MONITOR_CTL_B2_BIT 28
#define IA32_VMX_MISC_SMM_MONITOR_CTL_B2_FLAG 0x10000000
#define IA32_VMX_MISC_SMM_MONITOR_CTL_B2_MASK 0x01
#define IA32_VMX_MISC_SMM_MONITOR_CTL_B2(_) (((_) >> 28) & 0x01)
/**
* @brief Whether VMWRITE can be used to write VM-exit information fields
*
* [Bit 29] When set to 1, software can use VMWRITE to write to any supported field
* in the VMCS; otherwise, VMWRITE cannot be used to modify VM-exit information
* fields.
*/
UINT64 VmwriteVmexitInfo : 1;
#define IA32_VMX_MISC_VMWRITE_VMEXIT_INFO_BIT 29
#define IA32_VMX_MISC_VMWRITE_VMEXIT_INFO_FLAG 0x20000000
#define IA32_VMX_MISC_VMWRITE_VMEXIT_INFO_MASK 0x01
#define IA32_VMX_MISC_VMWRITE_VMEXIT_INFO(_) (((_) >> 29) & 0x01)
/**
* [Bit 30] When set to 1, VM entry allows injection of a software interrupt,
* software exception, or privileged software exception with an instruction length
* of 0.
*/
UINT64 ZeroLengthInstructionVmentryInjection : 1;
#define IA32_VMX_MISC_ZERO_LENGTH_INSTRUCTION_VMENTRY_INJECTION_BIT 30
#define IA32_VMX_MISC_ZERO_LENGTH_INSTRUCTION_VMENTRY_INJECTION_FLAG 0x40000000
#define IA32_VMX_MISC_ZERO_LENGTH_INSTRUCTION_VMENTRY_INJECTION_MASK 0x01
#define IA32_VMX_MISC_ZERO_LENGTH_INSTRUCTION_VMENTRY_INJECTION(_) (((_) >> 30) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief MSEG revision identifier used by the processor
*
* [Bits 63:32] Report the 32-bit MSEG revision identifier used by the processor.
*/
UINT64 MsegId : 32;
#define IA32_VMX_MISC_MSEG_ID_BIT 32
#define IA32_VMX_MISC_MSEG_ID_FLAG 0xFFFFFFFF00000000
#define IA32_VMX_MISC_MSEG_ID_MASK 0xFFFFFFFF
#define IA32_VMX_MISC_MSEG_ID(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_VMX_MISC_REGISTER;
/**
* Capability Reporting Register of CR0 Bits Fixed to 0.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.7(VMX-FIXED BITS IN CR0)]
* @see Vol3D[A.7(VMX-Fixed Bits in CR0)] (reference)
*/
#define IA32_VMX_CR0_FIXED0 0x00000486
/**
* Capability Reporting Register of CR0 Bits Fixed to 1.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.7(VMX-FIXED BITS IN CR0)]
* @see Vol3D[A.7(VMX-Fixed Bits in CR0)] (reference)
*/
#define IA32_VMX_CR0_FIXED1 0x00000487
/**
* Capability Reporting Register of CR4 Bits Fixed to 0.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.8(VMX-FIXED BITS IN CR4)]
* @see Vol3D[A.8(VMX-Fixed Bits in CR4)] (reference)
*/
#define IA32_VMX_CR4_FIXED0 0x00000488
/**
* Capability Reporting Register of CR4 Bits Fixed to 1.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.8(VMX-FIXED BITS IN CR4)]
* @see Vol3D[A.8(VMX-Fixed Bits in CR4)] (reference)
*/
#define IA32_VMX_CR4_FIXED1 0x00000489
/**
* Capability Reporting Register of VMCS Field Enumeration.
*
* @remarks If CPUID.01H:ECX.[5] = 1
* @see Vol3D[A.9(VMCS ENUMERATION)]
* @see Vol3D[A.9(VMCS Enumeration)] (reference)
*/
#define IA32_VMX_VMCS_ENUM 0x0000048A
typedef union
{
struct
{
/**
* [Bit 0] Indicates access type.
*/
UINT64 AccessType : 1;
#define IA32_VMX_VMCS_ENUM_ACCESS_TYPE_BIT 0
#define IA32_VMX_VMCS_ENUM_ACCESS_TYPE_FLAG 0x01
#define IA32_VMX_VMCS_ENUM_ACCESS_TYPE_MASK 0x01
#define IA32_VMX_VMCS_ENUM_ACCESS_TYPE(_) (((_) >> 0) & 0x01)
/**
* [Bits 9:1] Highest index value used for any VMCS encoding.
*/
UINT64 HighestIndexValue : 9;
#define IA32_VMX_VMCS_ENUM_HIGHEST_INDEX_VALUE_BIT 1
#define IA32_VMX_VMCS_ENUM_HIGHEST_INDEX_VALUE_FLAG 0x3FE
#define IA32_VMX_VMCS_ENUM_HIGHEST_INDEX_VALUE_MASK 0x1FF
#define IA32_VMX_VMCS_ENUM_HIGHEST_INDEX_VALUE(_) (((_) >> 1) & 0x1FF)
/**
* [Bits 11:10] Indicate the field's type.
*/
UINT64 FieldType : 2;
#define IA32_VMX_VMCS_ENUM_FIELD_TYPE_BIT 10
#define IA32_VMX_VMCS_ENUM_FIELD_TYPE_FLAG 0xC00
#define IA32_VMX_VMCS_ENUM_FIELD_TYPE_MASK 0x03
#define IA32_VMX_VMCS_ENUM_FIELD_TYPE(_) (((_) >> 10) & 0x03)
UINT64 Reserved1 : 1;
/**
* [Bits 14:13] Indicate the field's width.
*/
UINT64 FieldWidth : 2;
#define IA32_VMX_VMCS_ENUM_FIELD_WIDTH_BIT 13
#define IA32_VMX_VMCS_ENUM_FIELD_WIDTH_FLAG 0x6000
#define IA32_VMX_VMCS_ENUM_FIELD_WIDTH_MASK 0x03
#define IA32_VMX_VMCS_ENUM_FIELD_WIDTH(_) (((_) >> 13) & 0x03)
UINT64 Reserved2 : 49;
};
UINT64 AsUInt;
} IA32_VMX_VMCS_ENUM_REGISTER;
/**
* Capability Reporting Register of Secondary Processor-Based VM-Execution Controls.
*
* @remarks If ( CPUID.01H:ECX.[5] && IA32_VMX_PROCBASED_CTLS[63] )
* @see Vol3D[A.3.3(Secondary Processor-Based VM-Execution Controls)]
* @see Vol3D[24.6.2(Processor-Based VM-Execution Controls)] (reference)
*/
#define IA32_VMX_PROCBASED_CTLS2 0x0000048B
typedef union
{
struct
{
/**
* @brief Virtualize APIC access
*
* [Bit 0] If this control is 1, the logical processor treats specially accesses to
* the page with the APICaccess address.
*
* @see Vol3C[29.4(VIRTUALIZING MEMORY-MAPPED APIC ACCESSES)]
*/
UINT64 VirtualizeApicAccesses : 1;
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_APIC_ACCESSES_BIT 0
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_APIC_ACCESSES_FLAG 0x01
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_APIC_ACCESSES_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_APIC_ACCESSES(_) (((_) >> 0) & 0x01)
/**
* @brief EPT supported/enabled
*
* [Bit 1] If this control is 1, extended page tables (EPT) are enabled.
*
* @see Vol3C[28.2(THE EXTENDED PAGE TABLE MECHANISM (EPT))]
*/
UINT64 EnableEpt : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_EPT_BIT 1
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_EPT_FLAG 0x02
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_EPT_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_EPT(_) (((_) >> 1) & 0x01)
/**
* @brief Descriptor table instructions cause VM-exits
*
* [Bit 2] This control determines whether executions of LGDT, LIDT, LLDT, LTR,
* SGDT, SIDT, SLDT, and STR cause VM exits.
*/
UINT64 DescriptorTableExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_DESCRIPTOR_TABLE_EXITING_BIT 2
#define IA32_VMX_PROCBASED_CTLS2_DESCRIPTOR_TABLE_EXITING_FLAG 0x04
#define IA32_VMX_PROCBASED_CTLS2_DESCRIPTOR_TABLE_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_DESCRIPTOR_TABLE_EXITING(_) (((_) >> 2) & 0x01)
/**
* @brief RDTSCP supported/enabled
*
* [Bit 3] If this control is 0, any execution of RDTSCP causes an invalid-opcode
* exception (\#UD).
*/
UINT64 EnableRdtscp : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_RDTSCP_BIT 3
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_RDTSCP_FLAG 0x08
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_RDTSCP_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_RDTSCP(_) (((_) >> 3) & 0x01)
/**
* @brief Virtualize x2APIC mode
*
* [Bit 4] If this control is 1, the logical processor treats specially RDMSR and
* WRMSR to APIC MSRs (in the range 800H-8FFH).
*
* @see Vol3C[29.5(VIRTUALIZING MSR-BASED APIC ACCESSES)]
*/
UINT64 VirtualizeX2ApicMode : 1;
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_X2APIC_MODE_BIT 4
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_X2APIC_MODE_FLAG 0x10
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_X2APIC_MODE_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_VIRTUALIZE_X2APIC_MODE(_) (((_) >> 4) & 0x01)
/**
* @brief VPID supported/enabled
*
* [Bit 5] If this control is 1, cached translations of linear addresses are
* associated with a virtualprocessor identifier (VPID).
*
* @see Vol3C[28.1(VIRTUAL PROCESSOR IDENTIFIERS (VPIDS))]
*/
UINT64 EnableVpid : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VPID_BIT 5
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VPID_FLAG 0x20
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VPID_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VPID(_) (((_) >> 5) & 0x01)
/**
* @brief VM-exit when executing the WBINVD instruction
*
* [Bit 6] This control determines whether executions of WBINVD cause VM exits.
*/
UINT64 WbinvdExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_WBINVD_EXITING_BIT 6
#define IA32_VMX_PROCBASED_CTLS2_WBINVD_EXITING_FLAG 0x40
#define IA32_VMX_PROCBASED_CTLS2_WBINVD_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_WBINVD_EXITING(_) (((_) >> 6) & 0x01)
/**
* @brief Unrestricted guest execution
*
* [Bit 7] This control determines whether guest software may run in unpaged
* protected mode or in realaddress mode.
*/
UINT64 UnrestrictedGuest : 1;
#define IA32_VMX_PROCBASED_CTLS2_UNRESTRICTED_GUEST_BIT 7
#define IA32_VMX_PROCBASED_CTLS2_UNRESTRICTED_GUEST_FLAG 0x80
#define IA32_VMX_PROCBASED_CTLS2_UNRESTRICTED_GUEST_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_UNRESTRICTED_GUEST(_) (((_) >> 7) & 0x01)
/**
* @brief APIC register virtualization
*
* [Bit 8] If this control is 1, the logical processor virtualizes certain APIC
* accesses.
*
* @see Vol3C[29.4(VIRTUALIZING MEMORY-MAPPED APIC ACCESSES)]
* @see Vol3C[29.5(VIRTUALIZING MSR-BASED APIC ACCESSES)]
*/
UINT64 ApicRegisterVirtualization : 1;
#define IA32_VMX_PROCBASED_CTLS2_APIC_REGISTER_VIRTUALIZATION_BIT 8
#define IA32_VMX_PROCBASED_CTLS2_APIC_REGISTER_VIRTUALIZATION_FLAG 0x100
#define IA32_VMX_PROCBASED_CTLS2_APIC_REGISTER_VIRTUALIZATION_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_APIC_REGISTER_VIRTUALIZATION(_) (((_) >> 8) & 0x01)
/**
* @brief Virtual-interrupt delivery
*
* [Bit 9] This controls enables the evaluation and delivery of pending virtual
* interrupts as well as the emulation of writes to the APIC registers that control
* interrupt prioritization.
*/
UINT64 VirtualInterruptDelivery : 1;
#define IA32_VMX_PROCBASED_CTLS2_VIRTUAL_INTERRUPT_DELIVERY_BIT 9
#define IA32_VMX_PROCBASED_CTLS2_VIRTUAL_INTERRUPT_DELIVERY_FLAG 0x200
#define IA32_VMX_PROCBASED_CTLS2_VIRTUAL_INTERRUPT_DELIVERY_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_VIRTUAL_INTERRUPT_DELIVERY(_) (((_) >> 9) & 0x01)
/**
* @brief A specified number of pause loops cause a VM-exit
*
* [Bit 10] This control determines whether a series of executions of PAUSE can
* cause a VM exit.
*
* @see Vol3C[24.6.13(Controls for PAUSE-Loop Exiting)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 PauseLoopExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_PAUSE_LOOP_EXITING_BIT 10
#define IA32_VMX_PROCBASED_CTLS2_PAUSE_LOOP_EXITING_FLAG 0x400
#define IA32_VMX_PROCBASED_CTLS2_PAUSE_LOOP_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_PAUSE_LOOP_EXITING(_) (((_) >> 10) & 0x01)
/**
* @brief VM-exit when executing RDRAND instructions
*
* [Bit 11] This control determines whether executions of RDRAND cause VM exits.
*/
UINT64 RdrandExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_RDRAND_EXITING_BIT 11
#define IA32_VMX_PROCBASED_CTLS2_RDRAND_EXITING_FLAG 0x800
#define IA32_VMX_PROCBASED_CTLS2_RDRAND_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_RDRAND_EXITING(_) (((_) >> 11) & 0x01)
/**
* @brief Enables INVPCID instructions
*
* [Bit 12] If this control is 0, any execution of INVPCID causes a \#UD.
*/
UINT64 EnableInvpcid : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_INVPCID_BIT 12
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_INVPCID_FLAG 0x1000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_INVPCID_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_INVPCID(_) (((_) >> 12) & 0x01)
/**
* @brief Enables VMFUNC instructions
*
* [Bit 13] Setting this control to 1 enables use of the VMFUNC instruction in VMX
* non-root operation.
*
* @see Vol3C[25.5.5(VM Functions)]
*/
UINT64 EnableVmFunctions : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VM_FUNCTIONS_BIT 13
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VM_FUNCTIONS_FLAG 0x2000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VM_FUNCTIONS_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_VM_FUNCTIONS(_) (((_) >> 13) & 0x01)
/**
* @brief Enables VMCS shadowing
*
* [Bit 14] If this control is 1, executions of VMREAD and VMWRITE in VMX non-root
* operation may access a shadow VMCS (instead of causing VM exits).
*
* @see {'Vol3C[24.10(VMCS TYPES': 'ORDINARY AND SHADOW)]'}
* @see Vol3C[30.3(VMX INSTRUCTIONS)]
*/
UINT64 VmcsShadowing : 1;
#define IA32_VMX_PROCBASED_CTLS2_VMCS_SHADOWING_BIT 14
#define IA32_VMX_PROCBASED_CTLS2_VMCS_SHADOWING_FLAG 0x4000
#define IA32_VMX_PROCBASED_CTLS2_VMCS_SHADOWING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_VMCS_SHADOWING(_) (((_) >> 14) & 0x01)
/**
* @brief Enables ENCLS VM-exits
*
* [Bit 15] If this control is 1, executions of ENCLS consult the ENCLS-exiting
* bitmap to determine whether the instruction causes a VM exit.
*
* @see Vol3C[24.6.16(ENCLS-Exiting Bitmap)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 EnableEnclsExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLS_EXITING_BIT 15
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLS_EXITING_FLAG 0x8000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLS_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLS_EXITING(_) (((_) >> 15) & 0x01)
/**
* @brief VM-exit when executing RDSEED
*
* [Bit 16] This control determines whether executions of RDSEED cause VM exits.
*/
UINT64 RdseedExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_RDSEED_EXITING_BIT 16
#define IA32_VMX_PROCBASED_CTLS2_RDSEED_EXITING_FLAG 0x10000
#define IA32_VMX_PROCBASED_CTLS2_RDSEED_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_RDSEED_EXITING(_) (((_) >> 16) & 0x01)
/**
* @brief Enables page-modification logging
*
* [Bit 17] If this control is 1, an access to a guest-physical address that sets an
* EPT dirty bit first adds an entry to the page-modification log.
*
* @see Vol3C[28.2.5(Page-Modification Logging)]
*/
UINT64 EnablePml : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_PML_BIT 17
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_PML_FLAG 0x20000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_PML_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_PML(_) (((_) >> 17) & 0x01)
/**
* @brief Controls whether EPT-violations may cause
*
* [Bit 18] If this control is 1, EPT violations may cause virtualization exceptions
* (\#VE) instead of VM exits.
*
* @see Vol3C[25.5.6(Virtualization Exceptions)]
*/
UINT64 EptViolation : 1;
#define IA32_VMX_PROCBASED_CTLS2_EPT_VIOLATION_BIT 18
#define IA32_VMX_PROCBASED_CTLS2_EPT_VIOLATION_FLAG 0x40000
#define IA32_VMX_PROCBASED_CTLS2_EPT_VIOLATION_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_EPT_VIOLATION(_) (((_) >> 18) & 0x01)
/**
* @brief Conceal VMX non-root operation from Intel processor trace (PT)
*
* [Bit 19] If this control is 1, Intel Processor Trace suppresses from PIPs an
* indication that the processor was in VMX non-root operation and omits a VMCS
* packet from any PSB+ produced in VMX nonroot operation.
*
* @see Vol3C[35(INTEL(R) PROCESSOR TRACE)]
*/
UINT64 ConcealVmxFromPt : 1;
#define IA32_VMX_PROCBASED_CTLS2_CONCEAL_VMX_FROM_PT_BIT 19
#define IA32_VMX_PROCBASED_CTLS2_CONCEAL_VMX_FROM_PT_FLAG 0x80000
#define IA32_VMX_PROCBASED_CTLS2_CONCEAL_VMX_FROM_PT_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_CONCEAL_VMX_FROM_PT(_) (((_) >> 19) & 0x01)
/**
* @brief Enables XSAVES/XRSTORS instructions
*
* [Bit 20] If this control is 0, any execution of XSAVES or XRSTORS causes a \#UD.
*/
UINT64 EnableXsaves : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_XSAVES_BIT 20
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_XSAVES_FLAG 0x100000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_XSAVES_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_XSAVES(_) (((_) >> 20) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 22] If this control is 1, EPT execute permissions are based on whether the
* linear address being accessed is supervisor mode or user mode.
*
* @see Vol3C[28(VMX SUPPORT FOR ADDRESS TRANSLATION)]
*/
UINT64 ModeBasedExecuteControlForEpt : 1;
#define IA32_VMX_PROCBASED_CTLS2_MODE_BASED_EXECUTE_CONTROL_FOR_EPT_BIT 22
#define IA32_VMX_PROCBASED_CTLS2_MODE_BASED_EXECUTE_CONTROL_FOR_EPT_FLAG 0x400000
#define IA32_VMX_PROCBASED_CTLS2_MODE_BASED_EXECUTE_CONTROL_FOR_EPT_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_MODE_BASED_EXECUTE_CONTROL_FOR_EPT(_) (((_) >> 22) & 0x01)
/**
* [Bit 23] If this control is 1, EPT write permissions may be specified at the
* granularity of 128 bytes.
*
* @see Vol3C[28.2.4(Sub-Page Write Permissions)]
*/
UINT64 SubPageWritePermissionsForEpt : 1;
#define IA32_VMX_PROCBASED_CTLS2_SUB_PAGE_WRITE_PERMISSIONS_FOR_EPT_BIT 23
#define IA32_VMX_PROCBASED_CTLS2_SUB_PAGE_WRITE_PERMISSIONS_FOR_EPT_FLAG 0x800000
#define IA32_VMX_PROCBASED_CTLS2_SUB_PAGE_WRITE_PERMISSIONS_FOR_EPT_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_SUB_PAGE_WRITE_PERMISSIONS_FOR_EPT(_) (((_) >> 23) & 0x01)
/**
* [Bit 24] If this control is 1, all output addresses used by Intel Processor Trace
* are treated as guestphysical addresses and translated using EPT.
*
* @see Vol3C[25.5.3(Translation of Guest-Physical Addresses Using EPT)]
*/
UINT64 PtUsesGuestPhysicalAddresses : 1;
#define IA32_VMX_PROCBASED_CTLS2_PT_USES_GUEST_PHYSICAL_ADDRESSES_BIT 24
#define IA32_VMX_PROCBASED_CTLS2_PT_USES_GUEST_PHYSICAL_ADDRESSES_FLAG 0x1000000
#define IA32_VMX_PROCBASED_CTLS2_PT_USES_GUEST_PHYSICAL_ADDRESSES_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_PT_USES_GUEST_PHYSICAL_ADDRESSES(_) (((_) >> 24) & 0x01)
/**
* @brief Use TSC scaling
*
* [Bit 25] This control determines whether executions of RDTSC, executions of
* RDTSCP, and executions of RDMSR that read from the IA32_TIME_STAMP_COUNTER MSR
* return a value modified by the TSC multiplier field.
*
* @see Vol3C[24.6.5(Time-Stamp Counter Offset and Multiplier)]
* @see Vol3C[25.3(CHANGES TO INSTRUCTION BEHAVIOR IN VMX NON-ROOT OPERATION)]
*/
UINT64 UseTscScaling : 1;
#define IA32_VMX_PROCBASED_CTLS2_USE_TSC_SCALING_BIT 25
#define IA32_VMX_PROCBASED_CTLS2_USE_TSC_SCALING_FLAG 0x2000000
#define IA32_VMX_PROCBASED_CTLS2_USE_TSC_SCALING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_USE_TSC_SCALING(_) (((_) >> 25) & 0x01)
/**
* @brief Enables TPAUSE/UMONITOR/UMWAIT instructions
*
* [Bit 26] If this control is 0, any execution of TPAUSE, UMONITOR, or UMWAIT
* causes a \#UD.
*/
UINT64 EnableUserWaitPause : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_USER_WAIT_PAUSE_BIT 26
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_USER_WAIT_PAUSE_FLAG 0x4000000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_USER_WAIT_PAUSE_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_USER_WAIT_PAUSE(_) (((_) >> 26) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief Enables ENCLV VM-exits
*
* [Bit 28] If this control is 1, executions of ENCLV consult the ENCLV-exiting
* bitmap to determine whether the instruction causes a VM exit.
*
* @see Vol3C[24.6.17(ENCLV-Exiting Bitmap)]
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
UINT64 EnableEnclvExiting : 1;
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLV_EXITING_BIT 28
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLV_EXITING_FLAG 0x10000000
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLV_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS2_ENABLE_ENCLV_EXITING(_) (((_) >> 28) & 0x01)
UINT64 Reserved3 : 35;
};
UINT64 AsUInt;
} IA32_VMX_PROCBASED_CTLS2_REGISTER;
/**
* Capability Reporting Register of EPT and VPID.
*
* @remarks If ( CPUID.01H:ECX.[5] && IA32_VMX_PROCBASED_CTLS[63] && (IA32_VMX_PROCBASED_CTLS2[33]
* || IA32_VMX_PROCBASED_CTLS2[37]) )
* @see Vol3D[A.10(VPID AND EPT CAPABILITIES)]
* @see Vol3D[A.10(VPID and EPT Capabilities)] (reference)
*/
#define IA32_VMX_EPT_VPID_CAP 0x0000048C
typedef union
{
struct
{
/**
* [Bit 0] When set to 1, the processor supports execute-only translations by EPT.
* This support allows software to configure EPT paging-structure entries in which
* bits 1:0 are clear (indicating that data accesses are not allowed) and bit 2 is
* set (indicating that instruction fetches are allowed).
*/
UINT64 ExecuteOnlyPages : 1;
#define IA32_VMX_EPT_VPID_CAP_EXECUTE_ONLY_PAGES_BIT 0
#define IA32_VMX_EPT_VPID_CAP_EXECUTE_ONLY_PAGES_FLAG 0x01
#define IA32_VMX_EPT_VPID_CAP_EXECUTE_ONLY_PAGES_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_EXECUTE_ONLY_PAGES(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 5;
/**
* [Bit 6] Indicates support for a page-walk length of 4.
*/
UINT64 PageWalkLength4 : 1;
#define IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4_BIT 6
#define IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4_FLAG 0x40
#define IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_PAGE_WALK_LENGTH_4(_) (((_) >> 6) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 8] When set to 1, the logical processor allows software to configure the EPT
* paging-structure memory type to be uncacheable (UC).
*
* @see Vol3C[24.6.11(Extended-Page-Table Pointer (EPTP))]
*/
UINT64 MemoryTypeUncacheable : 1;
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_UNCACHEABLE_BIT 8
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_UNCACHEABLE_FLAG 0x100
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_UNCACHEABLE_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_UNCACHEABLE(_) (((_) >> 8) & 0x01)
UINT64 Reserved3 : 5;
/**
* [Bit 14] When set to 1, the logical processor allows software to configure the
* EPT paging-structure memory type to be write-back (WB).
*/
UINT64 MemoryTypeWriteBack : 1;
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_WRITE_BACK_BIT 14
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_WRITE_BACK_FLAG 0x4000
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_WRITE_BACK_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_MEMORY_TYPE_WRITE_BACK(_) (((_) >> 14) & 0x01)
UINT64 Reserved4 : 1;
/**
* [Bit 16] When set to 1, the logical processor allows software to configure a EPT
* PDE to map a 2-Mbyte page (by setting bit 7 in the EPT PDE).
*/
UINT64 Pde2MbPages : 1;
#define IA32_VMX_EPT_VPID_CAP_PDE_2MB_PAGES_BIT 16
#define IA32_VMX_EPT_VPID_CAP_PDE_2MB_PAGES_FLAG 0x10000
#define IA32_VMX_EPT_VPID_CAP_PDE_2MB_PAGES_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_PDE_2MB_PAGES(_) (((_) >> 16) & 0x01)
/**
* [Bit 17] When set to 1, the logical processor allows software to configure a EPT
* PDPTE to map a 1-Gbyte page (by setting bit 7 in the EPT PDPTE).
*/
UINT64 Pdpte1GbPages : 1;
#define IA32_VMX_EPT_VPID_CAP_PDPTE_1GB_PAGES_BIT 17
#define IA32_VMX_EPT_VPID_CAP_PDPTE_1GB_PAGES_FLAG 0x20000
#define IA32_VMX_EPT_VPID_CAP_PDPTE_1GB_PAGES_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_PDPTE_1GB_PAGES(_) (((_) >> 17) & 0x01)
UINT64 Reserved5 : 2;
/**
* [Bit 20] If bit 20 is read as 1, the INVEPT instruction is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 Invept : 1;
#define IA32_VMX_EPT_VPID_CAP_INVEPT_BIT 20
#define IA32_VMX_EPT_VPID_CAP_INVEPT_FLAG 0x100000
#define IA32_VMX_EPT_VPID_CAP_INVEPT_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVEPT(_) (((_) >> 20) & 0x01)
/**
* [Bit 21] When set to 1, accessed and dirty flags for EPT are supported.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 EptAccessedAndDirtyFlags : 1;
#define IA32_VMX_EPT_VPID_CAP_EPT_ACCESSED_AND_DIRTY_FLAGS_BIT 21
#define IA32_VMX_EPT_VPID_CAP_EPT_ACCESSED_AND_DIRTY_FLAGS_FLAG 0x200000
#define IA32_VMX_EPT_VPID_CAP_EPT_ACCESSED_AND_DIRTY_FLAGS_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_EPT_ACCESSED_AND_DIRTY_FLAGS(_) (((_) >> 21) & 0x01)
/**
* [Bit 22] When set to 1, the processor reports advanced VM-exit information for
* EPT violations. This reporting is done only if this bit is read as 1.
*
* @see Vol3C[27.2.1(Basic VM-Exit Information)]
*/
UINT64 AdvancedVmexitEptViolationsInformation : 1;
#define IA32_VMX_EPT_VPID_CAP_ADVANCED_VMEXIT_EPT_VIOLATIONS_INFORMATION_BIT 22
#define IA32_VMX_EPT_VPID_CAP_ADVANCED_VMEXIT_EPT_VIOLATIONS_INFORMATION_FLAG 0x400000
#define IA32_VMX_EPT_VPID_CAP_ADVANCED_VMEXIT_EPT_VIOLATIONS_INFORMATION_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_ADVANCED_VMEXIT_EPT_VIOLATIONS_INFORMATION(_) (((_) >> 22) & 0x01)
/**
* [Bit 23] If bit 23 is read as 1, supervisor shadow-stack control is supported.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 SupervisorShadowStack : 1;
#define IA32_VMX_EPT_VPID_CAP_SUPERVISOR_SHADOW_STACK_BIT 23
#define IA32_VMX_EPT_VPID_CAP_SUPERVISOR_SHADOW_STACK_FLAG 0x800000
#define IA32_VMX_EPT_VPID_CAP_SUPERVISOR_SHADOW_STACK_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_SUPERVISOR_SHADOW_STACK(_) (((_) >> 23) & 0x01)
UINT64 Reserved6 : 1;
/**
* [Bit 25] When set to 1, the single-context INVEPT type is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 InveptSingleContext : 1;
#define IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT_BIT 25
#define IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT_FLAG 0x2000000
#define IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVEPT_SINGLE_CONTEXT(_) (((_) >> 25) & 0x01)
/**
* [Bit 26] When set to 1, the all-context INVEPT type is supported.
*
* @see Vol3C[30(VMX INSTRUCTION REFERENCE)]
* @see Vol3C[28.3.3.1(Operations that Invalidate Cached Mappings)]
*/
UINT64 InveptAllContexts : 1;
#define IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS_BIT 26
#define IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS_FLAG 0x4000000
#define IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVEPT_ALL_CONTEXTS(_) (((_) >> 26) & 0x01)
UINT64 Reserved7 : 5;
/**
* [Bit 32] When set to 1, the INVVPID instruction is supported.
*/
UINT64 Invvpid : 1;
#define IA32_VMX_EPT_VPID_CAP_INVVPID_BIT 32
#define IA32_VMX_EPT_VPID_CAP_INVVPID_FLAG 0x100000000
#define IA32_VMX_EPT_VPID_CAP_INVVPID_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVVPID(_) (((_) >> 32) & 0x01)
UINT64 Reserved8 : 7;
/**
* [Bit 40] When set to 1, the individual-address INVVPID type is supported.
*/
UINT64 InvvpidIndividualAddress : 1;
#define IA32_VMX_EPT_VPID_CAP_INVVPID_INDIVIDUAL_ADDRESS_BIT 40
#define IA32_VMX_EPT_VPID_CAP_INVVPID_INDIVIDUAL_ADDRESS_FLAG 0x10000000000
#define IA32_VMX_EPT_VPID_CAP_INVVPID_INDIVIDUAL_ADDRESS_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVVPID_INDIVIDUAL_ADDRESS(_) (((_) >> 40) & 0x01)
/**
* [Bit 41] When set to 1, the single-context INVVPID type is supported.
*/
UINT64 InvvpidSingleContext : 1;
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_BIT 41
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_FLAG 0x20000000000
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT(_) (((_) >> 41) & 0x01)
/**
* [Bit 42] When set to 1, the all-context INVVPID type is supported.
*/
UINT64 InvvpidAllContexts : 1;
#define IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS_BIT 42
#define IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS_FLAG 0x40000000000
#define IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVVPID_ALL_CONTEXTS(_) (((_) >> 42) & 0x01)
/**
* [Bit 43] When set to 1, the single-context-retaining-globals INVVPID type is
* supported.
*/
UINT64 InvvpidSingleContextRetainGlobals : 1;
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS_BIT 43
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS_FLAG 0x80000000000
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS_MASK 0x01
#define IA32_VMX_EPT_VPID_CAP_INVVPID_SINGLE_CONTEXT_RETAIN_GLOBALS(_) (((_) >> 43) & 0x01)
UINT64 Reserved9 : 4;
/**
* [Bits 53:48] Enumerate the maximum HLAT prefix size. It is expected that any
* processor that supports the 1-setting of the "enable HLAT" VM-execution control
* will enumerate this value as 1.
*
* @see Vol3A[4.5.1(Ordinary Paging and HLAT Paging)]
*/
UINT64 MaxHlatPrefixSize : 6;
#define IA32_VMX_EPT_VPID_CAP_MAX_HLAT_PREFIX_SIZE_BIT 48
#define IA32_VMX_EPT_VPID_CAP_MAX_HLAT_PREFIX_SIZE_FLAG 0x3F000000000000
#define IA32_VMX_EPT_VPID_CAP_MAX_HLAT_PREFIX_SIZE_MASK 0x3F
#define IA32_VMX_EPT_VPID_CAP_MAX_HLAT_PREFIX_SIZE(_) (((_) >> 48) & 0x3F)
UINT64 Reserved10 : 10;
};
UINT64 AsUInt;
} IA32_VMX_EPT_VPID_CAP_REGISTER;
/**
* @defgroup IA32_VMX_TRUE_CTLS \
* IA32_VMX_TRUE_(x)_CTLS
*
* Capability Reporting Register of Pin-Based VM-Execution Flex Controls, Primary Processor-Based
* VM-Execution Flex Controls, VM-Exit Flex Controls and VM-Entry Flex Controls.
*
* @remarks If ( CPUID.01H:ECX.[5] = 1 && IA32_VMX_BASIC[55] )
* @see Vol3D[A.3.1(Pin-Based VM-Execution Controls)]
* @see Vol3D[A.3.2(Primary Processor-Based VM-Execution Controls)]
* @see Vol3D[A.4(VM-EXIT CONTROLS)]
* @see Vol3D[A.5(VM-ENTRY CONTROLS)]
* @see Vol3D[A.3.1(Pin-Based VMExecution Controls)] (reference)
* @see Vol3D[A.3.2(Primary Processor-Based VM-Execution Controls)] (reference)
* @see Vol3D[A.4(VM-Exit Controls)] (reference)
* @see Vol3D[A.5(VM-Entry Controls)] (reference)
* @{
*/
#define IA32_VMX_TRUE_PINBASED_CTLS 0x0000048D
#define IA32_VMX_TRUE_PROCBASED_CTLS 0x0000048E
#define IA32_VMX_TRUE_EXIT_CTLS 0x0000048F
#define IA32_VMX_TRUE_ENTRY_CTLS 0x00000490
typedef union
{
struct
{
/**
* [Bits 31:0] Indicate the allowed 0-settings of these controls. VM entry allows
* control X to be 0 if bit X in the MSR is cleared to 0; if bit X in the MSR is set
* to 1, VM entry fails if control X is 0.
*/
UINT64 Allowed0Settings : 32;
#define IA32_VMX_TRUE_CTLS_ALLOWED_0_SETTINGS_BIT 0
#define IA32_VMX_TRUE_CTLS_ALLOWED_0_SETTINGS_FLAG 0xFFFFFFFF
#define IA32_VMX_TRUE_CTLS_ALLOWED_0_SETTINGS_MASK 0xFFFFFFFF
#define IA32_VMX_TRUE_CTLS_ALLOWED_0_SETTINGS(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* [Bits 63:32] Indicate the allowed 1-settings of these controls. VM entry allows
* control X to be 1 if bit 32+X in the MSR is set to 1; if bit 32+X in the MSR is
* cleared to 0, VM entry fails if control X is 1.
*/
UINT64 Allowed1Settings : 32;
#define IA32_VMX_TRUE_CTLS_ALLOWED_1_SETTINGS_BIT 32
#define IA32_VMX_TRUE_CTLS_ALLOWED_1_SETTINGS_FLAG 0xFFFFFFFF00000000
#define IA32_VMX_TRUE_CTLS_ALLOWED_1_SETTINGS_MASK 0xFFFFFFFF
#define IA32_VMX_TRUE_CTLS_ALLOWED_1_SETTINGS(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_VMX_TRUE_CTLS_REGISTER;
/**
* @}
*/
/**
* Capability Reporting Register of VMFunction Controls.
*
* @remarks If ( CPUID.01H:ECX.[5] = 1 && IA32_VMX_BASIC[55] )
* @see Vol3D[A.11(VM FUNCTIONS)]
* @see Vol3D[24.6.14(VM-Function Controls)] (reference)
*/
#define IA32_VMX_VMFUNC 0x00000491
typedef union
{
struct
{
/**
* [Bit 0] The EPTP-switching VM function changes the EPT pointer to a value chosen
* from the EPTP list.
*
* @see Vol3C[25.5.5.3(EPTP Switching)]
*/
UINT64 EptpSwitching : 1;
#define IA32_VMX_VMFUNC_EPTP_SWITCHING_BIT 0
#define IA32_VMX_VMFUNC_EPTP_SWITCHING_FLAG 0x01
#define IA32_VMX_VMFUNC_EPTP_SWITCHING_MASK 0x01
#define IA32_VMX_VMFUNC_EPTP_SWITCHING(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_VMX_VMFUNC_REGISTER;
/**
* Capability Reporting Register of Tertiary Processor-Based VM-Execution Controls.
*
* @remarks If ( CPUID.01H:ECX.[5] && IA32_VMX_PROCBASED_CTLS[49] )
* @see Vol3D[A.3.4(Tertiary Processor-Based VM-Execution Controls)]
* @see Vol3D[24.6.2(Processor-Based VM-Execution Controls)] (reference)
*/
#define IA32_VMX_PROCBASED_CTLS3 0x00000492
typedef union
{
struct
{
/**
* @brief Executions of LOADIWKEY cause VM exits
*
* [Bit 0] This control determines whether executions of LOADIWKEY cause VM exits.
*/
UINT64 LoadiwkeyExiting : 1;
#define IA32_VMX_PROCBASED_CTLS3_LOADIWKEY_EXITING_BIT 0
#define IA32_VMX_PROCBASED_CTLS3_LOADIWKEY_EXITING_FLAG 0x01
#define IA32_VMX_PROCBASED_CTLS3_LOADIWKEY_EXITING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS3_LOADIWKEY_EXITING(_) (((_) >> 0) & 0x01)
/**
* @brief Enables hypervisor-managed linear-address translation
*
* [Bit 1] This control enables hypervisor-managed linear-address translation.
*
* @see Vol3A[4.5.1(Ordinary Paging and HLAT Paging)]
*/
UINT64 EnableHlat : 1;
#define IA32_VMX_PROCBASED_CTLS3_ENABLE_HLAT_BIT 1
#define IA32_VMX_PROCBASED_CTLS3_ENABLE_HLAT_FLAG 0x02
#define IA32_VMX_PROCBASED_CTLS3_ENABLE_HLAT_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS3_ENABLE_HLAT(_) (((_) >> 1) & 0x01)
/**
* @brief If this control is 1, EPT permissions can be specified to allow writes
* only for paging-related control updates
*
* [Bit 2] If this control is 1, EPT permissions can be specified to allow writes
* only for paging-related control updates.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 EptPagingWrite : 1;
#define IA32_VMX_PROCBASED_CTLS3_EPT_PAGING_WRITE_BIT 2
#define IA32_VMX_PROCBASED_CTLS3_EPT_PAGING_WRITE_FLAG 0x04
#define IA32_VMX_PROCBASED_CTLS3_EPT_PAGING_WRITE_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS3_EPT_PAGING_WRITE(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] If this control is 1, EPT permissions can be specified to prevent
* accesses using linear addresses verification whose translation has certain
* properties.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 GuestPaging : 1;
#define IA32_VMX_PROCBASED_CTLS3_GUEST_PAGING_BIT 3
#define IA32_VMX_PROCBASED_CTLS3_GUEST_PAGING_FLAG 0x08
#define IA32_VMX_PROCBASED_CTLS3_GUEST_PAGING_MASK 0x01
#define IA32_VMX_PROCBASED_CTLS3_GUEST_PAGING(_) (((_) >> 3) & 0x01)
UINT64 Reserved1 : 60;
};
UINT64 AsUInt;
} IA32_VMX_PROCBASED_CTLS3_REGISTER;
/**
* Capability Reporting Register of Secondary VM-Exit Controls.
*
* @remarks If ( CPUID.01H:ECX.[5] && IA32_VMX_EXIT_CTLS[63] )
* @see Vol3D[A.4.2(Secondary VM-Exit Controls)]
* @see Vol3C[24.7.1(VM-Exit Controls)] (reference)
*/
#define IA32_VMX_EXIT_CTLS2 0x00000493
typedef union
{
struct
{
UINT64 Reserved : 64;
#define IA32_VMX_EXIT_CTLS2_RESERVED_BIT 0
#define IA32_VMX_EXIT_CTLS2_RESERVED_FLAG 0xFFFFFFFFFFFFFFFF
#define IA32_VMX_EXIT_CTLS2_RESERVED_MASK 0xFFFFFFFFFFFFFFFF
#define IA32_VMX_EXIT_CTLS2_RESERVED(_) (((_) >> 0) & 0xFFFFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} IA32_VMX_EXIT_CTLS2_REGISTER;
/**
* @defgroup IA32_A_PMC \
* IA32_A_PMC(n)
*
* Full Width Writable IA32_PMC(n) Alias.
*
* @remarks (If CPUID.0AH: EAX[15:8] > 0) && IA32_PERF_CAPABILITIES[13] = 1
* @{
*/
#define IA32_A_PMC0 0x000004C1
#define IA32_A_PMC1 0x000004C2
#define IA32_A_PMC2 0x000004C3
#define IA32_A_PMC3 0x000004C4
#define IA32_A_PMC4 0x000004C5
#define IA32_A_PMC5 0x000004C6
#define IA32_A_PMC6 0x000004C7
#define IA32_A_PMC7 0x000004C8
/**
* @}
*/
/**
* Allows software to signal some MCEs to only a single logical processor in the system.
*
* @remarks If IA32_MCG_CAP.LMCE_P = 1
* @see Vol3B[15.3.1.4(IA32_MCG_EXT_CTL MSR)]
*/
#define IA32_MCG_EXT_CTL 0x000004D0
typedef union
{
struct
{
UINT64 LmceEn : 1;
#define IA32_MCG_EXT_CTL_LMCE_EN_BIT 0
#define IA32_MCG_EXT_CTL_LMCE_EN_FLAG 0x01
#define IA32_MCG_EXT_CTL_LMCE_EN_MASK 0x01
#define IA32_MCG_EXT_CTL_LMCE_EN(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_MCG_EXT_CTL_REGISTER;
/**
* @brief Status and SVN Threshold of SGX Support for ACM (RO)
*
* Intel SGX only allows launching ACMs with an Intel SGX SVN that is at the same level or higher
* than the expected Intel SGX SVN. The expected Intel SGX SVN is specified by BIOS and locked down
* by the processor on the first successful execution of an Intel SGX instruction that doesn't
* return an error code. Intel SGX provides interfaces for system software to discover whether a non
* faulting Intel SGX instruction has been executed, and evaluate the suitability of the Intel SGX
* SVN value of any ACM that is expected to be launched by the OS or the VMM.
*
* @remarks If CPUID.(EAX=07H, ECX=0H): EBX[2] = 1
* @see Vol3D[41.11.3(Interactions with Authenticated Code Modules (ACMs))] (reference)
*/
#define IA32_SGX_SVN_STATUS 0x00000500
typedef union
{
struct
{
/**
* [Bit 0] - If 1, indicates that a non-faulting Intel SGX instruction has been
* executed, consequently, launching a properly signed ACM but with Intel SGX SVN
* value less than the BIOS specified Intel SGX SVN threshold would lead to an TXT
* shutdown.
* - If 0, indicates that the processor will allow a properly signed ACM to launch
* irrespective of the Intel SGX SVN value of the ACM.
*
* @see Vol3D[41.11.3(Interactions with Authenticated Code Modules (ACMs))]
*/
UINT64 Lock : 1;
#define IA32_SGX_SVN_STATUS_LOCK_BIT 0
#define IA32_SGX_SVN_STATUS_LOCK_FLAG 0x01
#define IA32_SGX_SVN_STATUS_LOCK_MASK 0x01
#define IA32_SGX_SVN_STATUS_LOCK(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 15;
/**
* @brief Reflects the expected threshold of Intel SGX SVN for the SINIT ACM
*
* [Bits 23:16] - If CPUID.01H:ECX.SMX = 1, this field reflects the expected
* threshold of Intel SGX SVN for the SINIT ACM.
* - If CPUID.01H:ECX.SMX = 0, this field is reserved (0).
*
* @see Vol3D[41.11.3(Interactions with Authenticated Code Modules (ACMs))]
*/
UINT64 SgxSvnSinit : 8;
#define IA32_SGX_SVN_STATUS_SGX_SVN_SINIT_BIT 16
#define IA32_SGX_SVN_STATUS_SGX_SVN_SINIT_FLAG 0xFF0000
#define IA32_SGX_SVN_STATUS_SGX_SVN_SINIT_MASK 0xFF
#define IA32_SGX_SVN_STATUS_SGX_SVN_SINIT(_) (((_) >> 16) & 0xFF)
UINT64 Reserved2 : 40;
};
UINT64 AsUInt;
} IA32_SGX_SVN_STATUS_REGISTER;
/**
* Trace Output Base Register.
*
* @remarks If ( (CPUID.(EAX=07H, ECX=0):EBX[25] = 1) && ( (CPUID.(EAX=14H,ECX=0): ECX[0] = 1) ||
* (CPUID.(EAX=14H,ECX=0):ECX[2] = 1) ) )
* @see Vol3C[35.2.7.7(IA32_RTIT_OUTPUT_BASE MSR)] (reference)
*/
#define IA32_RTIT_OUTPUT_BASE 0x00000560
typedef union
{
struct
{
UINT64 Reserved1 : 7;
/**
* @brief Base physical address
*
* [Bits 47:7] The base physical address. How this address is used depends on the
* value of IA32_RTIT_CTL.ToPA:
* - 0: This is the base physical address of a single, contiguous physical output
* region. This could be mapped to DRAM or to MMIO, depending on the value. The base
* address should be aligned with the size of the region, such that none of the 1s
* in the mask value overlap with 1s in the base address. If the base is not
* aligned, an operational error will result.
* - 1: The base physical address of the current ToPA table. The address must be 4K
* aligned. Writing an address in which bits 11:7 are non-zero will not cause a
* \#GP, but an operational error will be signaled once TraceEn is set.
*
* @see Vol3C[35.2.7.8(IA32_RTIT_OUTPUT_MASK_PTRS MSR)]
* @see Vol3C[35.3.9(Operational Errors)]
* @see Vol3C[35.2.6.2(Table of Physical Addresses (ToPA))]
*/
UINT64 BasePhysicalAddress : 41;
#define IA32_RTIT_OUTPUT_BASE_BASE_PHYSICAL_ADDRESS_BIT 7
#define IA32_RTIT_OUTPUT_BASE_BASE_PHYSICAL_ADDRESS_FLAG 0xFFFFFFFFFF80
#define IA32_RTIT_OUTPUT_BASE_BASE_PHYSICAL_ADDRESS_MASK 0x1FFFFFFFFFF
#define IA32_RTIT_OUTPUT_BASE_BASE_PHYSICAL_ADDRESS(_) (((_) >> 7) & 0x1FFFFFFFFFF)
UINT64 Reserved2 : 16;
};
UINT64 AsUInt;
} IA32_RTIT_OUTPUT_BASE_REGISTER;
/**
* Trace Output Mask Pointers Register.
*
* @remarks If ( (CPUID.(EAX=07H, ECX=0):EBX[25] = 1) && ( (CPUID.(EAX=14H,ECX=0):ECX[0] = 1) ||
* (CPUID.(EAX=14H,ECX=0):ECX[2] = 1) ) )
* @see Vol3C[35.2.7.8(IA32_RTIT_OUTPUT_MASK_PTRS MSR)] (reference)
*/
#define IA32_RTIT_OUTPUT_MASK_PTRS 0x00000561
typedef union
{
struct
{
/**
* [Bits 6:0] Forced to 1, writes are ignored.
*/
UINT64 LowerMask : 7;
#define IA32_RTIT_OUTPUT_MASK_PTRS_LOWER_MASK_BIT 0
#define IA32_RTIT_OUTPUT_MASK_PTRS_LOWER_MASK_FLAG 0x7F
#define IA32_RTIT_OUTPUT_MASK_PTRS_LOWER_MASK_MASK 0x7F
#define IA32_RTIT_OUTPUT_MASK_PTRS_LOWER_MASK(_) (((_) >> 0) & 0x7F)
/**
* @brief MaskOrTableOffset
*
* [Bits 31:7] The use of this field depends on the value of IA32_RTIT_CTL.ToPA:
* - 0: This field holds bits 31:7 of the mask value for the single, contiguous
* physical output region. The size of this field indicates that regions can be of
* size 128B up to 4GB. This value (combined with the lower 7 bits, which are
* reserved to 1) will be ANDed with the OutputOffset field to determine the next
* write address. All 1s in this field should be consecutive and starting at bit 7,
* otherwise the region will not be contiguous, and an operational error will be
* signaled when TraceEn is set.
* - 1: This field holds bits 27:3 of the offset pointer into the current ToPA
* table. This value can be added to the IA32_RTIT_OUTPUT_BASE value to produce a
* pointer to the current ToPA table entry, which itself is a pointer to the current
* output region. In this scenario, the lower 7 reserved bits are ignored. This
* field supports tables up to 256 MBytes in size.
*
* @see Vol3C[35.3.9(Operational Errors)]
*/
UINT64 MaskOrTableOffset : 25;
#define IA32_RTIT_OUTPUT_MASK_PTRS_MASK_OR_TABLE_OFFSET_BIT 7
#define IA32_RTIT_OUTPUT_MASK_PTRS_MASK_OR_TABLE_OFFSET_FLAG 0xFFFFFF80
#define IA32_RTIT_OUTPUT_MASK_PTRS_MASK_OR_TABLE_OFFSET_MASK 0x1FFFFFF
#define IA32_RTIT_OUTPUT_MASK_PTRS_MASK_OR_TABLE_OFFSET(_) (((_) >> 7) & 0x1FFFFFF)
/**
* @brief Output Offset
*
* [Bits 63:32] The use of this field depends on the value of IA32_RTIT_CTL.ToPA:
* - 0: This is bits 31:0 of the offset pointer into the single, contiguous physical
* output region. This value will be added to the IA32_RTIT_OUTPUT_BASE value to
* form the physical address at which the next byte of packet output data will be
* written. This value must be less than or equal to the MaskOrTableOffset field,
* otherwise an operational error will be signaled when TraceEn is set.
* - 1: This field holds bits 31:0 of the offset pointer into the current ToPA
* output region. This value will be added to the output region base field, found in
* the current ToPA table entry, to form the physical address at which the next byte
* of trace output data will be written. This value must be less than the ToPA entry
* size, otherwise an operational error will be signaled when TraceEn is set.
*
* @see Vol3C[35.3.9(Operational Errors)]
*/
UINT64 OutputOffset : 32;
#define IA32_RTIT_OUTPUT_MASK_PTRS_OUTPUT_OFFSET_BIT 32
#define IA32_RTIT_OUTPUT_MASK_PTRS_OUTPUT_OFFSET_FLAG 0xFFFFFFFF00000000
#define IA32_RTIT_OUTPUT_MASK_PTRS_OUTPUT_OFFSET_MASK 0xFFFFFFFF
#define IA32_RTIT_OUTPUT_MASK_PTRS_OUTPUT_OFFSET(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_RTIT_OUTPUT_MASK_PTRS_REGISTER;
/**
* Trace Control Register.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[25] = 1)
* @see Vol3C[35.2.7.2(IA32_RTIT_CTL MSR)] (reference)
*/
#define IA32_RTIT_CTL 0x00000570
typedef union
{
struct
{
/**
* @brief TraceEn
*
* [Bit 0] If 1, enables tracing; else tracing is disabled.
* When this bit transitions from 1 to 0, all buffered packets are flushed out of
* internal buffers. A further store, fence, or architecturally serializing
* instruction may be required to ensure that packet data can be observed at the
* trace endpoint. Note that the processor will clear this bit on \#SMI (Section)
* and warm reset. Other MSR bits of IA32_RTIT_CTL (and other trace configuration
* MSRs) are not impacted by these events.
*
* @see Vol3C[35.2.7.3(Enabling and Disabling Packet Generation with TraceEn)]
*/
UINT64 TraceEnabled : 1;
#define IA32_RTIT_CTL_TRACE_ENABLED_BIT 0
#define IA32_RTIT_CTL_TRACE_ENABLED_FLAG 0x01
#define IA32_RTIT_CTL_TRACE_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_TRACE_ENABLED(_) (((_) >> 0) & 0x01)
/**
* @brief CYCEn
*
* [Bit 1] - 0: Disables CYC Packet.
* - 1: Enables CYC Packet.
*
* @remarks If CPUID.(EAX=14H, ECX=0):EBX.CPSB_CAM[bit 1] = 0
* @see Vol3C[35.4.2.14(Cycle Count (CYC) Packet)]
*/
UINT64 CycEnabled : 1;
#define IA32_RTIT_CTL_CYC_ENABLED_BIT 1
#define IA32_RTIT_CTL_CYC_ENABLED_FLAG 0x02
#define IA32_RTIT_CTL_CYC_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_CYC_ENABLED(_) (((_) >> 1) & 0x01)
/**
* @brief OS
*
* [Bit 2] - 0: Packet generation is disabled when CPL = 0.
* - 1: Packet generation may be enabled when CPL = 0.
*/
UINT64 Os : 1;
#define IA32_RTIT_CTL_OS_BIT 2
#define IA32_RTIT_CTL_OS_FLAG 0x04
#define IA32_RTIT_CTL_OS_MASK 0x01
#define IA32_RTIT_CTL_OS(_) (((_) >> 2) & 0x01)
/**
* @brief User
*
* [Bit 3] - 0: Packet generation is disabled when CPL > 0.
* - 1: Packet generation may be enabled when CPL > 0.
*/
UINT64 User : 1;
#define IA32_RTIT_CTL_USER_BIT 3
#define IA32_RTIT_CTL_USER_FLAG 0x08
#define IA32_RTIT_CTL_USER_MASK 0x01
#define IA32_RTIT_CTL_USER(_) (((_) >> 3) & 0x01)
/**
* @brief PwrEvtEn
*
* [Bit 4] - 0: Power Event Trace packets are disabled.
* - 1: Power Event Trace packets are enabled.
*
* @see Vol3C[35.2.3(Power Event Tracing)]
*/
UINT64 PowerEventTraceEnabled : 1;
#define IA32_RTIT_CTL_POWER_EVENT_TRACE_ENABLED_BIT 4
#define IA32_RTIT_CTL_POWER_EVENT_TRACE_ENABLED_FLAG 0x10
#define IA32_RTIT_CTL_POWER_EVENT_TRACE_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_POWER_EVENT_TRACE_ENABLED(_) (((_) >> 4) & 0x01)
/**
* @brief FUPonPTW
*
* [Bit 5] - 0: PTW packets are not followed by FUPs.
* - 1: PTW packets are followed by FUPs.
*/
UINT64 FupOnPtw : 1;
#define IA32_RTIT_CTL_FUP_ON_PTW_BIT 5
#define IA32_RTIT_CTL_FUP_ON_PTW_FLAG 0x20
#define IA32_RTIT_CTL_FUP_ON_PTW_MASK 0x01
#define IA32_RTIT_CTL_FUP_ON_PTW(_) (((_) >> 5) & 0x01)
/**
* @brief FabricEn
*
* [Bit 6] - 0: Trace output is directed to the memory subsystem, mechanism depends
* on IA32_RTIT_CTL.ToPA.
* - 1: Trace output is directed to the trace transport subsystem,
* IA32_RTIT_CTL.ToPA is ignored.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):ECX[3] = 1) Reserved if CPUID.(EAX=14H,
* ECX=0):ECX[bit 3] = 0
*/
UINT64 FabricEnabled : 1;
#define IA32_RTIT_CTL_FABRIC_ENABLED_BIT 6
#define IA32_RTIT_CTL_FABRIC_ENABLED_FLAG 0x40
#define IA32_RTIT_CTL_FABRIC_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_FABRIC_ENABLED(_) (((_) >> 6) & 0x01)
/**
* @brief CR3 filter
*
* [Bit 7] - 0: Disables CR3 filtering.
* - 1: Enables CR3 filtering.
*/
UINT64 Cr3Filter : 1;
#define IA32_RTIT_CTL_CR3_FILTER_BIT 7
#define IA32_RTIT_CTL_CR3_FILTER_FLAG 0x80
#define IA32_RTIT_CTL_CR3_FILTER_MASK 0x01
#define IA32_RTIT_CTL_CR3_FILTER(_) (((_) >> 7) & 0x01)
/**
* @brief ToPA
*
* [Bit 8] - 0: Single-range output scheme enabled.
* - 1: ToPA output scheme enabled.
*
* @remarks 0: If CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit 2] = 1 and
* IA32_RTIT_CTL.FabricEn=0 1: If CPUID.(EAX=14H, ECX=0):ECX.TOPA[bit 0] = 1, and
* IA32_RTIT_CTL.FabricEn=0 WRMSR to IA32_RTIT_CTL that sets TraceEn but clears this
* bit and FabricEn would cause \#GP: If CPUID.(EAX=14H, ECX=0):ECX.SNGLRGNOUT[bit
* 2] = 0 WRMSR to IA32_RTIT_CTL that sets this bit causes \#GP: If CPUID.(EAX=14H,
* ECX=0):ECX.TOPA[bit 0] = 0
* @see Vol3C[35.2.6.2(Table of Physical Addresses (ToPA))]
*/
UINT64 Topa : 1;
#define IA32_RTIT_CTL_TOPA_BIT 8
#define IA32_RTIT_CTL_TOPA_FLAG 0x100
#define IA32_RTIT_CTL_TOPA_MASK 0x01
#define IA32_RTIT_CTL_TOPA(_) (((_) >> 8) & 0x01)
/**
* @brief MTCEn
*
* [Bit 9] - 0: Disables MTC Packet.
* - 1: Enables MTC Packet.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[3] = 1) Reserved if CPUID.(EAX=14H,
* ECX=0):EBX.MTC[bit 3] = 0
* @see Vol3C[35.4.2.16(Overflow (OVF) Packet)]
*/
UINT64 MtcEnabled : 1;
#define IA32_RTIT_CTL_MTC_ENABLED_BIT 9
#define IA32_RTIT_CTL_MTC_ENABLED_FLAG 0x200
#define IA32_RTIT_CTL_MTC_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_MTC_ENABLED(_) (((_) >> 9) & 0x01)
/**
* @brief TSCEn
*
* [Bit 10] - 0: Disable TSC packets.
* - 1: Enable TSC packets.
*
* @see Vol3C[35.4.2.11(Timestamp Counter (TSC) Packet)]
*/
UINT64 TscEnabled : 1;
#define IA32_RTIT_CTL_TSC_ENABLED_BIT 10
#define IA32_RTIT_CTL_TSC_ENABLED_FLAG 0x400
#define IA32_RTIT_CTL_TSC_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_TSC_ENABLED(_) (((_) >> 10) & 0x01)
/**
* @brief DisRETC
*
* [Bit 11] - 0: Enable RET compression.
* - 1: Disable RET compression.
*
* @see Vol3C[35.2.1.2(Indirect Transfer COFI)]
*/
UINT64 RetCompressionDisabled : 1;
#define IA32_RTIT_CTL_RET_COMPRESSION_DISABLED_BIT 11
#define IA32_RTIT_CTL_RET_COMPRESSION_DISABLED_FLAG 0x800
#define IA32_RTIT_CTL_RET_COMPRESSION_DISABLED_MASK 0x01
#define IA32_RTIT_CTL_RET_COMPRESSION_DISABLED(_) (((_) >> 11) & 0x01)
/**
* @brief PTWEn
*
* [Bit 12] - 0: PTWRITE packet generation disabled.
* - 1: PTWRITE packet generation enabled.
*/
UINT64 PtwEnabled : 1;
#define IA32_RTIT_CTL_PTW_ENABLED_BIT 12
#define IA32_RTIT_CTL_PTW_ENABLED_FLAG 0x1000
#define IA32_RTIT_CTL_PTW_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_PTW_ENABLED(_) (((_) >> 12) & 0x01)
/**
* @brief BranchEn
*
* [Bit 13] - 0: Disable COFI-based packets.
* - 1: Enable COFI-based packets: FUP, TIP, TIP.PGE, TIP.PGD, TNT, MODE.Exec,
* MODE.TSX.
*
* @see Vol3C[35.2.5.4(Branch Enable (BranchEn))]
*/
UINT64 BranchEnabled : 1;
#define IA32_RTIT_CTL_BRANCH_ENABLED_BIT 13
#define IA32_RTIT_CTL_BRANCH_ENABLED_FLAG 0x2000
#define IA32_RTIT_CTL_BRANCH_ENABLED_MASK 0x01
#define IA32_RTIT_CTL_BRANCH_ENABLED(_) (((_) >> 13) & 0x01)
/**
* @brief MTCFreq
*
* [Bits 17:14] Defines MTC packet Frequency, which is based on the core crystal
* clock, or Always Running Timer (ART). MTC will be sent each time the selected ART
* bit toggles. The following Encodings are defined: 0: ART(0), 1: ART(1), 2:
* ART(2), 3: ART(3), 4: ART(4), 5: ART(5), 6: ART(6), 7: ART(7), 8: ART(8), 9:
* ART(9), 10: ART(10), 11: ART(11), 12: ART(12), 13: ART(13), 14: ART(14), 15:
* ART(15)
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[3] = 1) Reserved if CPUID.(EAX=14H,
* ECX=0):EBX.MTC[bit 3] = 0
* @see Vol3C[35.3.1(Detection of Intel Processor Trace and Capability Enumeration)]
*/
UINT64 MtcFrequency : 4;
#define IA32_RTIT_CTL_MTC_FREQUENCY_BIT 14
#define IA32_RTIT_CTL_MTC_FREQUENCY_FLAG 0x3C000
#define IA32_RTIT_CTL_MTC_FREQUENCY_MASK 0x0F
#define IA32_RTIT_CTL_MTC_FREQUENCY(_) (((_) >> 14) & 0x0F)
UINT64 Reserved1 : 1;
/**
* @brief CYCThresh
*
* [Bits 22:19] CYC packet threshold. CYC packets will be sent with the first
* eligible packet after N cycles have passed since the last CYC packet. If
* CycThresh is 0 then N=0, otherwise N is defined as 2(CycThresh-1). The following
* Encodings are defined: 0: 0, 1: 1, 2: 2, 3: 4, 4: 8, 5: 16, 6: 32, 7: 64, 8: 128,
* 9: 256, 10: 512, 11: 1024, 12: 2048, 13: 4096, 14: 8192, 15: 16384
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[1] = 1) Reserved if CPUID.(EAX=14H,
* ECX=0):EBX.CPSB_CAM[bit 1] = 0
* @see Vol3C[35.3.6(Cycle-Accurate Mode)]
* @see Vol3C[35.3.1(Detection of Intel Processor Trace and Capability Enumeration)]
*/
UINT64 CycThreshold : 4;
#define IA32_RTIT_CTL_CYC_THRESHOLD_BIT 19
#define IA32_RTIT_CTL_CYC_THRESHOLD_FLAG 0x780000
#define IA32_RTIT_CTL_CYC_THRESHOLD_MASK 0x0F
#define IA32_RTIT_CTL_CYC_THRESHOLD(_) (((_) >> 19) & 0x0F)
UINT64 Reserved2 : 1;
/**
* @brief PSBFreq
*
* [Bits 27:24] Indicates the frequency of PSB packets. PSB packet frequency is
* based on the number of Intel PT packet bytes output, so this field allows the
* user to determine the increment of IA32_IA32_RTIT_STATUS.PacketByteCnt that
* should cause a PSB to be generated. Note that PSB insertion is not precise, but
* the average output bytes per PSB should approximate the SW selected period. The
* following Encodings are defined: 0: 2K, 1: 4K, 2: 8K, 3: 16K, 4: 32K, 5: 64K, 6:
* 128K, 7: 256K, 8: 512K, 9: 1M, 10: 2M, 11: 4M, 12: 8M, 13: 16M, 14: 32M, 15: 64M
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[1] = 1) Reserved if CPUID.(EAX=14H,
* ECX=0):EBX.CPSB_CAM[bit 1] = 0
* @see Vol3C[35.3.1(Detection of Intel Processor Trace and Capability Enumeration)]
*/
UINT64 PsbFrequency : 4;
#define IA32_RTIT_CTL_PSB_FREQUENCY_BIT 24
#define IA32_RTIT_CTL_PSB_FREQUENCY_FLAG 0xF000000
#define IA32_RTIT_CTL_PSB_FREQUENCY_MASK 0x0F
#define IA32_RTIT_CTL_PSB_FREQUENCY(_) (((_) >> 24) & 0x0F)
UINT64 Reserved3 : 4;
/**
* @brief ADDR0_CFG
*
* [Bits 35:32] Configures the base/limit register pair IA32_RTIT_ADDR0_A/B based on
* the following encodings:
* - 0: ADDR0 range unused.
* - 1: The [IA32_RTIT_ADDR0_A..IA32_RTIT_ADDR0_B] range defines a FilterEn range.
* FilterEn will only be set when the IP is within this range, though other FilterEn
* ranges can additionally be used.
* - 2: The [IA32_RTIT_ADDR0_A..IA32_RTIT_ADDR0_B] range defines a TraceStop range.
* TraceStop will be asserted if code branches into this range.
* - 3..15: Reserved (\#GP).
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > 0) Reserved if CPUID.(EAX=14H,
* ECX=1):EBX.RANGECNT[2:0] >= 0
* @see Vol3C[35.2.4.3(Filtering by IP)]
* @see Vol3C[35.4.2.10(Core:Bus Ratio (CBR) Packet)]
*/
UINT64 Addr0Cfg : 4;
#define IA32_RTIT_CTL_ADDR0_CFG_BIT 32
#define IA32_RTIT_CTL_ADDR0_CFG_FLAG 0xF00000000
#define IA32_RTIT_CTL_ADDR0_CFG_MASK 0x0F
#define IA32_RTIT_CTL_ADDR0_CFG(_) (((_) >> 32) & 0x0F)
/**
* @brief ADDR1_CFG
*
* [Bits 39:36] Configures the base/limit register pair IA32_RTIT_ADDR1_A/B based on
* the following encodings:
* - 0: ADDR1 range unused.
* - 1: The [IA32_RTIT_ADDR1_A..IA32_RTIT_ADDR1_B] range defines a FilterEn range.
* FilterEn will only be set when the IP is within this range, though other FilterEn
* ranges can additionally be used.
* - 2: The [IA32_RTIT_ADDR1_A..IA32_RTIT_ADDR1_B] range defines a TraceStop range.
* TraceStop will be asserted if code branches into this range.
* - 3..15: Reserved (\#GP).
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > 1) Reserved if CPUID.(EAX=14H,
* ECX=1):EBX.RANGECNT[2:0] < 2
* @see Vol3C[35.2.4.3(Filtering by IP)]
* @see Vol3C[35.4.2.10(Core:Bus Ratio (CBR) Packet)]
*/
UINT64 Addr1Cfg : 4;
#define IA32_RTIT_CTL_ADDR1_CFG_BIT 36
#define IA32_RTIT_CTL_ADDR1_CFG_FLAG 0xF000000000
#define IA32_RTIT_CTL_ADDR1_CFG_MASK 0x0F
#define IA32_RTIT_CTL_ADDR1_CFG(_) (((_) >> 36) & 0x0F)
/**
* @brief ADDR2_CFG
*
* [Bits 43:40] Configures the base/limit register pair IA32_RTIT_ADDR2_A/B based on
* the following encodings:
* - 0: ADDR2 range unused.
* - 1: The [IA32_RTIT_ADDR2_A..IA32_RTIT_ADDR2_B] range defines a FilterEn range.
* FilterEn will only be set when the IP is within this range, though other FilterEn
* ranges can additionally be used.
* - 2: The [IA32_RTIT_ADDR2_A..IA32_RTIT_ADDR2_B] range defines a TraceStop range.
* TraceStop will be asserted if code branches into this range.
* - 3..15: Reserved (\#GP).
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > 2) Reserved if CPUID.(EAX=14H,
* ECX=1):EBX.RANGECNT[2:0] < 3
* @see Vol3C[35.2.4.3(Filtering by IP)]
* @see Vol3C[35.4.2.10(Core:Bus Ratio (CBR) Packet)]
*/
UINT64 Addr2Cfg : 4;
#define IA32_RTIT_CTL_ADDR2_CFG_BIT 40
#define IA32_RTIT_CTL_ADDR2_CFG_FLAG 0xF0000000000
#define IA32_RTIT_CTL_ADDR2_CFG_MASK 0x0F
#define IA32_RTIT_CTL_ADDR2_CFG(_) (((_) >> 40) & 0x0F)
/**
* @brief ADDR3_CFG
*
* [Bits 47:44] Configures the base/limit register pair IA32_RTIT_ADDR3_A/B based on
* the following encodings:
* - 0: ADDR3 range unused.
* - 1: The [IA32_RTIT_ADDR3_A..IA32_RTIT_ADDR3_B] range defines a FilterEn range.
* FilterEn will only be set when the IP is within this range, though other FilterEn
* ranges can additionally be used.
* - 2: The [IA32_RTIT_ADDR3_A..IA32_RTIT_ADDR3_B] range defines a TraceStop range.
* TraceStop will be asserted if code branches into this range.
* - 3..15: Reserved (\#GP).
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > 3) Reserved if CPUID.(EAX=14H,
* ECX=1):EBX.RANGECNT[2:0] < 4
* @see Vol3C[35.2.4.3(Filtering by IP)]
* @see Vol3C[35.4.2.10(Core:Bus Ratio (CBR) Packet)]
*/
UINT64 Addr3Cfg : 4;
#define IA32_RTIT_CTL_ADDR3_CFG_BIT 44
#define IA32_RTIT_CTL_ADDR3_CFG_FLAG 0xF00000000000
#define IA32_RTIT_CTL_ADDR3_CFG_MASK 0x0F
#define IA32_RTIT_CTL_ADDR3_CFG(_) (((_) >> 44) & 0x0F)
UINT64 Reserved4 : 8;
/**
* @brief InjectPsbPmiOnEnable
*
* [Bit 56] - 1: Enables use of IA32_RTIT_STATUS bits PendPSB[6] and PendTopaPMI[7].
* - 0: IA32_RTIT_STATUS bits 6 and 7 are ignored.
*
* @remarks Reserved if CPUID.(EAX=14H, ECX=0):EBX.INJECTPSBPMI[6] = 0
* @see Vol3C[35.2.7.4(IA32_RTIT_STATUS MSR)]
*/
UINT64 InjectPsbPmiOnEnable : 1;
#define IA32_RTIT_CTL_INJECT_PSB_PMI_ON_ENABLE_BIT 56
#define IA32_RTIT_CTL_INJECT_PSB_PMI_ON_ENABLE_FLAG 0x100000000000000
#define IA32_RTIT_CTL_INJECT_PSB_PMI_ON_ENABLE_MASK 0x01
#define IA32_RTIT_CTL_INJECT_PSB_PMI_ON_ENABLE(_) (((_) >> 56) & 0x01)
UINT64 Reserved5 : 7;
};
UINT64 AsUInt;
} IA32_RTIT_CTL_REGISTER;
/**
* Tracing Status Register.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[25] = 1)
*/
#define IA32_RTIT_STATUS 0x00000571
typedef union
{
struct
{
/**
* @brief FilterEn (writes ignored)
*
* [Bit 0] This bit is written by the processor, and indicates that tracing is
* allowed for the current IP. Writes are ignored.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[2] = 1)
* @see Vol3C[35.2.5.5(Filter Enable (FilterEn))]
*/
UINT64 FilterEnabled : 1;
#define IA32_RTIT_STATUS_FILTER_ENABLED_BIT 0
#define IA32_RTIT_STATUS_FILTER_ENABLED_FLAG 0x01
#define IA32_RTIT_STATUS_FILTER_ENABLED_MASK 0x01
#define IA32_RTIT_STATUS_FILTER_ENABLED(_) (((_) >> 0) & 0x01)
/**
* @brief ContexEn (writes ignored)
*
* [Bit 1] The processor sets this bit to indicate that tracing is allowed for the
* current context. Writes are ignored.
*
* @see Vol3C[35.2.5.3(Context Enable (ContextEn))]
*/
UINT64 ContextEnabled : 1;
#define IA32_RTIT_STATUS_CONTEXT_ENABLED_BIT 1
#define IA32_RTIT_STATUS_CONTEXT_ENABLED_FLAG 0x02
#define IA32_RTIT_STATUS_CONTEXT_ENABLED_MASK 0x01
#define IA32_RTIT_STATUS_CONTEXT_ENABLED(_) (((_) >> 1) & 0x01)
/**
* @brief TriggerEn (writes ignored)
*
* [Bit 2] The processor sets this bit to indicate that tracing is enabled. Writes
* are ignored.
*
* @see Vol3C[35.2.5.2(Trigger Enable (TriggerEn))]
*/
UINT64 TriggerEnabled : 1;
#define IA32_RTIT_STATUS_TRIGGER_ENABLED_BIT 2
#define IA32_RTIT_STATUS_TRIGGER_ENABLED_FLAG 0x04
#define IA32_RTIT_STATUS_TRIGGER_ENABLED_MASK 0x01
#define IA32_RTIT_STATUS_TRIGGER_ENABLED(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 1;
/**
* @brief Error
*
* [Bit 4] The processor sets this bit to indicate that an operational error has
* been encountered. When this bit is set, TriggerEn is cleared to 0 and packet
* generation is disabled. When TraceEn is cleared, software can write this bit.
* Once it is set, only software can clear it. It is not recommended that software
* ever set this bit, except in cases where it is restoring a prior saved state.
*
* @see Vol3C[35.2.6.2(Table of Physical Addresses (ToPA) | ToPA Errors)]
*/
UINT64 Error : 1;
#define IA32_RTIT_STATUS_ERROR_BIT 4
#define IA32_RTIT_STATUS_ERROR_FLAG 0x10
#define IA32_RTIT_STATUS_ERROR_MASK 0x01
#define IA32_RTIT_STATUS_ERROR(_) (((_) >> 4) & 0x01)
/**
* @brief Stopped
*
* [Bit 5] The processor sets this bit to indicate that a ToPA Stop condition has
* been encountered. When this bit is set, TriggerEn is cleared to 0 and packet
* generation is disabled. When TraceEn is cleared, software can write this bit.
* Once it is set, only software can clear it. It is not recommended that software
* ever set this bit, except in cases where it is restoring a prior saved state.
*
* @see Vol3C[35.2.6.2(Table of Physical Addresses (ToPA) | ToPA STOP)]
*/
UINT64 Stopped : 1;
#define IA32_RTIT_STATUS_STOPPED_BIT 5
#define IA32_RTIT_STATUS_STOPPED_FLAG 0x20
#define IA32_RTIT_STATUS_STOPPED_MASK 0x01
#define IA32_RTIT_STATUS_STOPPED(_) (((_) >> 5) & 0x01)
/**
* @brief Pend PSB
*
* [Bit 6] If IA32_RTIT_CTL.InjectPsbPmiOnEnable[56] = 1, the processor sets this
* bit when the threshold for a PSB+ to be inserted has been reached. The processor
* will clear this bit when the PSB+ has been inserted into the trace. If PendPSB =
* 1 and InjectPsbPmiOnEnable = 1 when IA32_RTIT_CTL.TraceEn[0] transitions from 0
* to 1, a PSB+ will be inserted into the trace.
*
* @remarks If CPUID.(EAX=14H, ECX=0):EBX.INJECTPSBPMI[6] = 1
*/
UINT64 PendPsb : 1;
#define IA32_RTIT_STATUS_PEND_PSB_BIT 6
#define IA32_RTIT_STATUS_PEND_PSB_FLAG 0x40
#define IA32_RTIT_STATUS_PEND_PSB_MASK 0x01
#define IA32_RTIT_STATUS_PEND_PSB(_) (((_) >> 6) & 0x01)
/**
* @brief Pend ToPA PMI
*
* [Bit 7] If IA32_RTIT_CTL.InjectPsbPmiOnEnable[56] = 1, the processor sets this
* bit when the threshold for a ToPA PMI to be inserted has been reached. Software
* should clear this bit once the ToPA PMI has been handled. If PendTopaPMI = 1 and
* InjectPsbPmiOnEnable = 1 when IA32_RTIT_CTL.TraceEn[0] transitions from 0 to 1, a
* PMI will be pended.
*
* @remarks If CPUID.(EAX=14H, ECX=0):EBX.INJECTPSBPMI[6] = 1
* @see Vol3C[35.2.6.2(Table of Physical Addresses (ToPA) | ToPA PMI)]
*/
UINT64 PendTopaPmi : 1;
#define IA32_RTIT_STATUS_PEND_TOPA_PMI_BIT 7
#define IA32_RTIT_STATUS_PEND_TOPA_PMI_FLAG 0x80
#define IA32_RTIT_STATUS_PEND_TOPA_PMI_MASK 0x01
#define IA32_RTIT_STATUS_PEND_TOPA_PMI(_) (((_) >> 7) & 0x01)
UINT64 Reserved2 : 24;
/**
* @brief PacketByteCnt
*
* [Bits 48:32] This field is written by the processor, and holds a count of packet
* bytes that have been sent out. The processor also uses this field to determine
* when the next PSB packet should be inserted. Note that the processor may clear or
* modify this field at any time while IA32_RTIT_CTL.TraceEn=1. It will have a
* stable value when IA32_RTIT_CTL.TraceEn=0.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[1] > 3)
* @see Vol3C[35.4.2.17(Packet Stream Boundary (PSB) Packet)]
*/
UINT64 PacketByteCount : 17;
#define IA32_RTIT_STATUS_PACKET_BYTE_COUNT_BIT 32
#define IA32_RTIT_STATUS_PACKET_BYTE_COUNT_FLAG 0x1FFFF00000000
#define IA32_RTIT_STATUS_PACKET_BYTE_COUNT_MASK 0x1FFFF
#define IA32_RTIT_STATUS_PACKET_BYTE_COUNT(_) (((_) >> 32) & 0x1FFFF)
UINT64 Reserved3 : 15;
};
UINT64 AsUInt;
} IA32_RTIT_STATUS_REGISTER;
/**
* @brief Trace Filter CR3 Match Register (R/W)
*
* The IA32_RTIT_CR3_MATCH register is compared against CR3 when IA32_RTIT_CTL.CR3Filter is 1. Bits
* 63:5 hold the CR3 address value to match, bits 4:0 are reserved to 0.
*
* @remarks If (CPUID.(EAX=07H, ECX=0):EBX[25] = 1)
* @see Vol3C[35.2.4.2(Filtering by CR3)]
* @see Vol3C[35.2.7.6(IA32_RTIT_CR3_MATCH MSR)] (reference)
*/
#define IA32_RTIT_CR3_MATCH 0x00000572
typedef union
{
struct
{
UINT64 Reserved1 : 5;
/**
* [Bits 63:5] CR3[63:5] value to match.
*/
UINT64 Cr3ValueToMatch : 59;
#define IA32_RTIT_CR3_MATCH_CR3_VALUE_TO_MATCH_BIT 5
#define IA32_RTIT_CR3_MATCH_CR3_VALUE_TO_MATCH_FLAG 0xFFFFFFFFFFFFFFE0
#define IA32_RTIT_CR3_MATCH_CR3_VALUE_TO_MATCH_MASK 0x7FFFFFFFFFFFFFF
#define IA32_RTIT_CR3_MATCH_CR3_VALUE_TO_MATCH(_) (((_) >> 5) & 0x7FFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} IA32_RTIT_CR3_MATCH_REGISTER;
/**
* @defgroup IA32_RTIT_ADDR \
* IA32_RTIT_ADDR(x)
*
* The role of the IA32_RTIT_ADDRn_A/B register pairs, for each n, is determined by the
* corresponding ADDRn_CFG fields in IA32_RTIT_CTL. The number of these register pairs is enumerated
* by CPUID.(EAX=14H, ECX=1):EAX.RANGECNT[2:0].
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > n)
* @see Vol3C[35.2.7.2(IA32_RTIT_CTL MSR)]
* @see Vol3C[35.2.7.5(IA32_RTIT_ADDRn_A and IA32_RTIT_ADDRn_B MSRs)] (reference)
* @{
*/
/**
* @defgroup IA32_RTIT_ADDR_A \
* IA32_RTIT_ADDR(n)_A
*
* Region n Start Address.
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > n)
* @{
*/
#define IA32_RTIT_ADDR0_A 0x00000580
#define IA32_RTIT_ADDR1_A 0x00000582
#define IA32_RTIT_ADDR2_A 0x00000584
#define IA32_RTIT_ADDR3_A 0x00000586
/**
* @}
*/
/**
* @defgroup IA32_RTIT_ADDR_B \
* IA32_RTIT_ADDR(n)_B
*
* Region n End Address.
*
* @remarks If (CPUID.(EAX=07H, ECX=1):EAX[2:0] > n)
* @{
*/
#define IA32_RTIT_ADDR0_B 0x00000581
#define IA32_RTIT_ADDR1_B 0x00000583
#define IA32_RTIT_ADDR2_B 0x00000585
#define IA32_RTIT_ADDR3_B 0x00000587
/**
* @}
*/
typedef union
{
struct
{
/**
* [Bits 47:0] Virtual Address.
*/
UINT64 VirtualAddress : 48;
#define IA32_RTIT_ADDR_VIRTUAL_ADDRESS_BIT 0
#define IA32_RTIT_ADDR_VIRTUAL_ADDRESS_FLAG 0xFFFFFFFFFFFF
#define IA32_RTIT_ADDR_VIRTUAL_ADDRESS_MASK 0xFFFFFFFFFFFF
#define IA32_RTIT_ADDR_VIRTUAL_ADDRESS(_) (((_) >> 0) & 0xFFFFFFFFFFFF)
/**
* [Bits 63:48] SignExt_VA.
*/
UINT64 SignExtVa : 16;
#define IA32_RTIT_ADDR_SIGN_EXT_VA_BIT 48
#define IA32_RTIT_ADDR_SIGN_EXT_VA_FLAG 0xFFFF000000000000
#define IA32_RTIT_ADDR_SIGN_EXT_VA_MASK 0xFFFF
#define IA32_RTIT_ADDR_SIGN_EXT_VA(_) (((_) >> 48) & 0xFFFF)
};
UINT64 AsUInt;
} IA32_RTIT_ADDR_REGISTER;
/**
* @}
*/
/**
* DS Save Area. Points to the linear address of the first byte of the DS buffer management area,
* which is used to manage the BTS and PEBS buffers. Returns:
* - [63:0] The linear address of the first byte of the DS buffer management area, if IA-32e mode is
* active.
* - [31:0] The linear address of the first byte of the DS buffer management area, if not in IA-32e
* mode.
* - [63:32] Reserved if not in IA-32e mode.
*
* @remarks If CPUID.01H:EDX.DS[21] = 1
* @see Vol3B[18.6.3.4(Debug Store (DS) Mechanism)]
*/
#define IA32_DS_AREA 0x00000600
/**
* Configure User Mode CET
*
* @remarks - Bits 1:0 are defined if CPUID.(EAX=07H,ECX=0H):ECX.CET_SS[07] = 1. - Bits 5:2 and bits
* 63:10 are defined if CPUID.(EAX=07H,ECX=0H):EDX.CET_IBT[20] = 1.
*/
#define IA32_U_CET 0x000006A0
typedef union
{
struct
{
/**
* [Bit 0] When set to 1, enable shadow stacks at CPL3.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 ShStkEn : 1;
#define IA32_U_CET_SH_STK_EN_BIT 0
#define IA32_U_CET_SH_STK_EN_FLAG 0x01
#define IA32_U_CET_SH_STK_EN_MASK 0x01
#define IA32_U_CET_SH_STK_EN(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] When set to 1, enables the WRSSD/WRSSQ instructions.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 WrShstkEn : 1;
#define IA32_U_CET_WR_SHSTK_EN_BIT 1
#define IA32_U_CET_WR_SHSTK_EN_FLAG 0x02
#define IA32_U_CET_WR_SHSTK_EN_MASK 0x01
#define IA32_U_CET_WR_SHSTK_EN(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] When set to 1, enables indirect branch tracking
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 EndbrEn : 1;
#define IA32_U_CET_ENDBR_EN_BIT 2
#define IA32_U_CET_ENDBR_EN_FLAG 0x04
#define IA32_U_CET_ENDBR_EN_MASK 0x01
#define IA32_U_CET_ENDBR_EN(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Enable legacy compatibility treatment for indirect branch tracking.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 LegIwEn : 1;
#define IA32_U_CET_LEG_IW_EN_BIT 3
#define IA32_U_CET_LEG_IW_EN_FLAG 0x08
#define IA32_U_CET_LEG_IW_EN_MASK 0x01
#define IA32_U_CET_LEG_IW_EN(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] When set to 1, enables use of no-track prefix for indirect branch
* tracking.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 NoTrackEn : 1;
#define IA32_U_CET_NO_TRACK_EN_BIT 4
#define IA32_U_CET_NO_TRACK_EN_FLAG 0x10
#define IA32_U_CET_NO_TRACK_EN_MASK 0x01
#define IA32_U_CET_NO_TRACK_EN(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] When set to 1, disables suppression of CET indirect branch tracking on
* legacy compatibility.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 SuppressDis : 1;
#define IA32_U_CET_SUPPRESS_DIS_BIT 5
#define IA32_U_CET_SUPPRESS_DIS_FLAG 0x20
#define IA32_U_CET_SUPPRESS_DIS_MASK 0x01
#define IA32_U_CET_SUPPRESS_DIS(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bit 10] When set to 1, indirect branch tracking is suppressed. This bit can be
* written to 1 only if TRACKER is written as IDLE.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 Suppress : 1;
#define IA32_U_CET_SUPPRESS_BIT 10
#define IA32_U_CET_SUPPRESS_FLAG 0x400
#define IA32_U_CET_SUPPRESS_MASK 0x01
#define IA32_U_CET_SUPPRESS(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Value of the indirect branch tracking state machine. Values: IDLE (0),
* WAIT_FOR_ENDBRANCH(1).
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 Tracker : 1;
#define IA32_U_CET_TRACKER_BIT 11
#define IA32_U_CET_TRACKER_FLAG 0x800
#define IA32_U_CET_TRACKER_MASK 0x01
#define IA32_U_CET_TRACKER(_) (((_) >> 11) & 0x01)
/**
* [Bits 63:12] Linear address bits 63:12 of a legacy code page bitmap used for
* legacy compatibility when indirect branch tracking is enabled. If the processor
* does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture
* this value cannot represent a non-canonical address. In protected mode, only 31:0
* are loaded. The linear address written must be aligned to 8 bytes and bits 2:0
* must be 0 (hardware requires bits 1:0 to be 0).
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 EbLegBitmapBase : 52;
#define IA32_U_CET_EB_LEG_BITMAP_BASE_BIT 12
#define IA32_U_CET_EB_LEG_BITMAP_BASE_FLAG 0xFFFFFFFFFFFFF000
#define IA32_U_CET_EB_LEG_BITMAP_BASE_MASK 0xFFFFFFFFFFFFF
#define IA32_U_CET_EB_LEG_BITMAP_BASE(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} IA32_U_CET_REGISTER;
/**
* Configure Supervisor Mode CET
*
* @remarks - Bits 1:0 are defined if CPUID.(EAX=07H,ECX=0H):ECX.CET_SS[07] = 1. - Bits 5:2 and bits
* 63:10 are defined if CPUID.(EAX=07H,ECX=0H):EDX.CET_IBT[20] = 1.
*/
#define IA32_S_CET 0x000006A2
typedef union
{
struct
{
/**
* [Bit 0] When set to 1, enable shadow stacks at CPL0.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 ShStkEn : 1;
#define IA32_S_CET_SH_STK_EN_BIT 0
#define IA32_S_CET_SH_STK_EN_FLAG 0x01
#define IA32_S_CET_SH_STK_EN_MASK 0x01
#define IA32_S_CET_SH_STK_EN(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] When set to 1, enables the WRSSD/WRSSQ instructions.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 WrShstkEn : 1;
#define IA32_S_CET_WR_SHSTK_EN_BIT 1
#define IA32_S_CET_WR_SHSTK_EN_FLAG 0x02
#define IA32_S_CET_WR_SHSTK_EN_MASK 0x01
#define IA32_S_CET_WR_SHSTK_EN(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] When set to 1, enables indirect branch tracking
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 EndbrEn : 1;
#define IA32_S_CET_ENDBR_EN_BIT 2
#define IA32_S_CET_ENDBR_EN_FLAG 0x04
#define IA32_S_CET_ENDBR_EN_MASK 0x01
#define IA32_S_CET_ENDBR_EN(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Enable legacy compatibility treatment for indirect branch tracking.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 LegIwEn : 1;
#define IA32_S_CET_LEG_IW_EN_BIT 3
#define IA32_S_CET_LEG_IW_EN_FLAG 0x08
#define IA32_S_CET_LEG_IW_EN_MASK 0x01
#define IA32_S_CET_LEG_IW_EN(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] When set to 1, enables use of no-track prefix for indirect branch
* tracking.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 NoTrackEn : 1;
#define IA32_S_CET_NO_TRACK_EN_BIT 4
#define IA32_S_CET_NO_TRACK_EN_FLAG 0x10
#define IA32_S_CET_NO_TRACK_EN_MASK 0x01
#define IA32_S_CET_NO_TRACK_EN(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] When set to 1, disables suppression of CET indirect branch tracking on
* legacy compatibility.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 SuppressDis : 1;
#define IA32_S_CET_SUPPRESS_DIS_BIT 5
#define IA32_S_CET_SUPPRESS_DIS_FLAG 0x20
#define IA32_S_CET_SUPPRESS_DIS_MASK 0x01
#define IA32_S_CET_SUPPRESS_DIS(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bit 10] When set to 1, indirect branch tracking is suppressed. This bit can be
* written to 1 only if TRACKER is written as IDLE.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 Suppress : 1;
#define IA32_S_CET_SUPPRESS_BIT 10
#define IA32_S_CET_SUPPRESS_FLAG 0x400
#define IA32_S_CET_SUPPRESS_MASK 0x01
#define IA32_S_CET_SUPPRESS(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Value of the indirect branch tracking state machine. Values: IDLE (0),
* WAIT_FOR_ENDBRANCH(1).
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 Tracker : 1;
#define IA32_S_CET_TRACKER_BIT 11
#define IA32_S_CET_TRACKER_FLAG 0x800
#define IA32_S_CET_TRACKER_MASK 0x01
#define IA32_S_CET_TRACKER(_) (((_) >> 11) & 0x01)
/**
* [Bits 63:12] Linear address bits 63:12 of a legacy code page bitmap used for
* legacy compatibility when indirect branch tracking is enabled. If the processor
* does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture
* this value cannot represent a non-canonical address. In protected mode, only 31:0
* are loaded. The linear address written must be aligned to 8 bytes and bits 2:0
* must be 0 (hardware requires bits 1:0 to be 0).
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT64 EbLegBitmapBase : 52;
#define IA32_S_CET_EB_LEG_BITMAP_BASE_BIT 12
#define IA32_S_CET_EB_LEG_BITMAP_BASE_FLAG 0xFFFFFFFFFFFFF000
#define IA32_S_CET_EB_LEG_BITMAP_BASE_MASK 0xFFFFFFFFFFFFF
#define IA32_S_CET_EB_LEG_BITMAP_BASE(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} IA32_S_CET_REGISTER;
/**
* Linear address to be loaded into SSP on transition to privilege level 0.
* If the processor does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture this value
* cannot represent a non-canonical address. In protected mode, only 31:0 are loaded. The linear
* address written must be aligned to 8 bytes and bits 2:0 must be 0 (hardware requires bits 1:0 to
* be 0).
*
* @remarks If CPUID.(EAX=07H, ECX=0H):ECX.CET_SS[07] = 1
*/
#define IA32_PL0_SSP 0x000006A4
/**
* Linear address to be loaded into SSP on transition to privilege level 1.
* If the processor does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture this value
* cannot represent a non-canonical address. In protected mode, only 31:0 are loaded. The linear
* address written must be aligned to 8 bytes and bits 2:0 must be 0 (hardware requires bits 1:0 to
* be 0).
*
* @remarks If CPUID.(EAX=07H, ECX=0H):ECX.CET_SS[07] = 1
*/
#define IA32_PL1_SSP 0x000006A5
/**
* Linear address to be loaded into SSP on transition to privilege level 2.
* If the processor does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture this value
* cannot represent a non-canonical address. In protected mode, only 31:0 are loaded. The linear
* address written must be aligned to 8 bytes and bits 2:0 must be 0 (hardware requires bits 1:0 to
* be 0).
*
* @remarks If CPUID.(EAX=07H, ECX=0H):ECX.CET_SS[07] = 1
*/
#define IA32_PL2_SSP 0x000006A6
/**
* Linear address to be loaded into SSP on transition to privilege level 3.
* If the processor does not support Intel 64 architecture, these fields have only 32 bits; bits
* 63:32 of the MSRs are reserved. On processors that support Intel 64 architecture this value
* cannot represent a non-canonical address. In protected mode, only 31:0 are loaded. The linear
* address written must be aligned to 8 bytes and bits 2:0 must be 0 (hardware requires bits 1:0 to
* be 0).
*
* @remarks If CPUID.(EAX=07H, ECX=0H):ECX.CET_SS[07] = 1
*/
#define IA32_PL3_SSP 0x000006A7
/**
* Linear address of a table of seven shadow stack pointers that are selected in IA-32e mode using
* the IST index (when not 0) from the interrupt gate descriptor. This MSR is not present on
* processors that do not support Intel 64 architecture. This field cannot represent a non-canonical
* address.
*
* @remarks If CPUID.(EAX=07H, ECX=0H):ECX.CET_SS[07] = 1
*/
#define IA32_INTERRUPT_SSP_TABLE_ADDR 0x000006A8
/**
* TSC Target of Local APIC's TSC Deadline Mode.
*
* @remarks If CPUID.01H:ECX.[24] = 1
*/
#define IA32_TSC_DEADLINE 0x000006E0
/**
* Enable/disable HWP.
*
* @remarks If CPUID.06H:EAX.[7] = 1
*/
#define IA32_PM_ENABLE 0x00000770
typedef union
{
struct
{
/**
* [Bit 0] HWP_ENABLE.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.2(Enabling HWP)]
*/
UINT64 HwpEnable : 1;
#define IA32_PM_ENABLE_HWP_ENABLE_BIT 0
#define IA32_PM_ENABLE_HWP_ENABLE_FLAG 0x01
#define IA32_PM_ENABLE_HWP_ENABLE_MASK 0x01
#define IA32_PM_ENABLE_HWP_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_PM_ENABLE_REGISTER;
/**
* HWP Performance Range Enumeration.
*
* @remarks If CPUID.06H:EAX.[7] = 1
*/
#define IA32_HWP_CAPABILITIES 0x00000771
typedef union
{
struct
{
/**
* [Bits 7:0] Highest_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.3(HWP Performance Range and Dynamic Capabilities)]
*/
UINT64 HighestPerformance : 8;
#define IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE_BIT 0
#define IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE_FLAG 0xFF
#define IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE_MASK 0xFF
#define IA32_HWP_CAPABILITIES_HIGHEST_PERFORMANCE(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Guaranteed_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.3(HWP Performance Range and Dynamic Capabilities)]
*/
UINT64 GuaranteedPerformance : 8;
#define IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE_BIT 8
#define IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE_FLAG 0xFF00
#define IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE_MASK 0xFF
#define IA32_HWP_CAPABILITIES_GUARANTEED_PERFORMANCE(_) (((_) >> 8) & 0xFF)
/**
* [Bits 23:16] Most_Efficient_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.3(HWP Performance Range and Dynamic Capabilities)]
*/
UINT64 MostEfficientPerformance : 8;
#define IA32_HWP_CAPABILITIES_MOST_EFFICIENT_PERFORMANCE_BIT 16
#define IA32_HWP_CAPABILITIES_MOST_EFFICIENT_PERFORMANCE_FLAG 0xFF0000
#define IA32_HWP_CAPABILITIES_MOST_EFFICIENT_PERFORMANCE_MASK 0xFF
#define IA32_HWP_CAPABILITIES_MOST_EFFICIENT_PERFORMANCE(_) (((_) >> 16) & 0xFF)
/**
* [Bits 31:24] Lowest_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.3(HWP Performance Range and Dynamic Capabilities)]
*/
UINT64 LowestPerformance : 8;
#define IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE_BIT 24
#define IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE_FLAG 0xFF000000
#define IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE_MASK 0xFF
#define IA32_HWP_CAPABILITIES_LOWEST_PERFORMANCE(_) (((_) >> 24) & 0xFF)
UINT64 Reserved1 : 32;
};
UINT64 AsUInt;
} IA32_HWP_CAPABILITIES_REGISTER;
/**
* Power Management Control Hints for All Logical Processors in a Package.
*
* @remarks If CPUID.06H:EAX.[11] = 1
*/
#define IA32_HWP_REQUEST_PKG 0x00000772
typedef union
{
struct
{
/**
* [Bits 7:0] Minimum_Performance.
*
* @remarks If CPUID.06H:EAX.[11] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 MinimumPerformance : 8;
#define IA32_HWP_REQUEST_PKG_MINIMUM_PERFORMANCE_BIT 0
#define IA32_HWP_REQUEST_PKG_MINIMUM_PERFORMANCE_FLAG 0xFF
#define IA32_HWP_REQUEST_PKG_MINIMUM_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_PKG_MINIMUM_PERFORMANCE(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Maximum_Performance.
*
* @remarks If CPUID.06H:EAX.[11] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 MaximumPerformance : 8;
#define IA32_HWP_REQUEST_PKG_MAXIMUM_PERFORMANCE_BIT 8
#define IA32_HWP_REQUEST_PKG_MAXIMUM_PERFORMANCE_FLAG 0xFF00
#define IA32_HWP_REQUEST_PKG_MAXIMUM_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_PKG_MAXIMUM_PERFORMANCE(_) (((_) >> 8) & 0xFF)
/**
* [Bits 23:16] Desired_Performance.
*
* @remarks If CPUID.06H:EAX.[11] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 DesiredPerformance : 8;
#define IA32_HWP_REQUEST_PKG_DESIRED_PERFORMANCE_BIT 16
#define IA32_HWP_REQUEST_PKG_DESIRED_PERFORMANCE_FLAG 0xFF0000
#define IA32_HWP_REQUEST_PKG_DESIRED_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_PKG_DESIRED_PERFORMANCE(_) (((_) >> 16) & 0xFF)
/**
* [Bits 31:24] Energy_Performance_Preference.
*
* @remarks If CPUID.06H:EAX.[11] = 1 && CPUID.06H:EAX.[10] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 EnergyPerformancePreference : 8;
#define IA32_HWP_REQUEST_PKG_ENERGY_PERFORMANCE_PREFERENCE_BIT 24
#define IA32_HWP_REQUEST_PKG_ENERGY_PERFORMANCE_PREFERENCE_FLAG 0xFF000000
#define IA32_HWP_REQUEST_PKG_ENERGY_PERFORMANCE_PREFERENCE_MASK 0xFF
#define IA32_HWP_REQUEST_PKG_ENERGY_PERFORMANCE_PREFERENCE(_) (((_) >> 24) & 0xFF)
/**
* [Bits 41:32] Activity_Window.
*
* @remarks If CPUID.06H:EAX.[11] = 1 && CPUID.06H:EAX.[9] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 ActivityWindow : 10;
#define IA32_HWP_REQUEST_PKG_ACTIVITY_WINDOW_BIT 32
#define IA32_HWP_REQUEST_PKG_ACTIVITY_WINDOW_FLAG 0x3FF00000000
#define IA32_HWP_REQUEST_PKG_ACTIVITY_WINDOW_MASK 0x3FF
#define IA32_HWP_REQUEST_PKG_ACTIVITY_WINDOW(_) (((_) >> 32) & 0x3FF)
UINT64 Reserved1 : 22;
};
UINT64 AsUInt;
} IA32_HWP_REQUEST_PKG_REGISTER;
/**
* Control HWP Native Interrupts.
*
* @remarks If CPUID.06H:EAX.[8] = 1
*/
#define IA32_HWP_INTERRUPT 0x00000773
typedef union
{
struct
{
/**
* [Bit 0] EN_Guaranteed_Performance_Change.
*
* @remarks If CPUID.06H:EAX.[8] = 1
* @see Vol3B[14.4.6(HWP Notifications)]
*/
UINT64 EnGuaranteedPerformanceChange : 1;
#define IA32_HWP_INTERRUPT_EN_GUARANTEED_PERFORMANCE_CHANGE_BIT 0
#define IA32_HWP_INTERRUPT_EN_GUARANTEED_PERFORMANCE_CHANGE_FLAG 0x01
#define IA32_HWP_INTERRUPT_EN_GUARANTEED_PERFORMANCE_CHANGE_MASK 0x01
#define IA32_HWP_INTERRUPT_EN_GUARANTEED_PERFORMANCE_CHANGE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] EN_Excursion_Minimum.
*
* @remarks If CPUID.06H:EAX.[8] = 1
* @see Vol3B[14.4.6(HWP Notifications)]
*/
UINT64 EnExcursionMinimum : 1;
#define IA32_HWP_INTERRUPT_EN_EXCURSION_MINIMUM_BIT 1
#define IA32_HWP_INTERRUPT_EN_EXCURSION_MINIMUM_FLAG 0x02
#define IA32_HWP_INTERRUPT_EN_EXCURSION_MINIMUM_MASK 0x01
#define IA32_HWP_INTERRUPT_EN_EXCURSION_MINIMUM(_) (((_) >> 1) & 0x01)
UINT64 Reserved1 : 62;
};
UINT64 AsUInt;
} IA32_HWP_INTERRUPT_REGISTER;
/**
* Power Management Control Hints to a Logical Processor.
*
* @remarks If CPUID.06H:EAX.[7] = 1
*/
#define IA32_HWP_REQUEST 0x00000774
typedef union
{
struct
{
/**
* [Bits 7:0] Minimum_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 MinimumPerformance : 8;
#define IA32_HWP_REQUEST_MINIMUM_PERFORMANCE_BIT 0
#define IA32_HWP_REQUEST_MINIMUM_PERFORMANCE_FLAG 0xFF
#define IA32_HWP_REQUEST_MINIMUM_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_MINIMUM_PERFORMANCE(_) (((_) >> 0) & 0xFF)
/**
* [Bits 15:8] Maximum_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 MaximumPerformance : 8;
#define IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE_BIT 8
#define IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE_FLAG 0xFF00
#define IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_MAXIMUM_PERFORMANCE(_) (((_) >> 8) & 0xFF)
/**
* [Bits 23:16] Desired_Performance.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 DesiredPerformance : 8;
#define IA32_HWP_REQUEST_DESIRED_PERFORMANCE_BIT 16
#define IA32_HWP_REQUEST_DESIRED_PERFORMANCE_FLAG 0xFF0000
#define IA32_HWP_REQUEST_DESIRED_PERFORMANCE_MASK 0xFF
#define IA32_HWP_REQUEST_DESIRED_PERFORMANCE(_) (((_) >> 16) & 0xFF)
/**
* [Bits 31:24] Energy_Performance_Preference.
*
* @remarks If CPUID.06H:EAX.[7] = 1 && CPUID.06H:EAX.[10] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 EnergyPerformancePreference : 8;
#define IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE_BIT 24
#define IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE_FLAG 0xFF000000
#define IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE_MASK 0xFF
#define IA32_HWP_REQUEST_ENERGY_PERFORMANCE_PREFERENCE(_) (((_) >> 24) & 0xFF)
/**
* [Bits 41:32] Activity_Window.
*
* @remarks If CPUID.06H:EAX.[7] = 1 && CPUID.06H:EAX.[9] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 ActivityWindow : 10;
#define IA32_HWP_REQUEST_ACTIVITY_WINDOW_BIT 32
#define IA32_HWP_REQUEST_ACTIVITY_WINDOW_FLAG 0x3FF00000000
#define IA32_HWP_REQUEST_ACTIVITY_WINDOW_MASK 0x3FF
#define IA32_HWP_REQUEST_ACTIVITY_WINDOW(_) (((_) >> 32) & 0x3FF)
/**
* [Bit 42] Package_Control.
*
* @remarks If CPUID.06H:EAX.[7] = 1 && CPUID.06H:EAX.[11] = 1
* @see Vol3B[14.4.4(Managing HWP)]
*/
UINT64 PackageControl : 1;
#define IA32_HWP_REQUEST_PACKAGE_CONTROL_BIT 42
#define IA32_HWP_REQUEST_PACKAGE_CONTROL_FLAG 0x40000000000
#define IA32_HWP_REQUEST_PACKAGE_CONTROL_MASK 0x01
#define IA32_HWP_REQUEST_PACKAGE_CONTROL(_) (((_) >> 42) & 0x01)
UINT64 Reserved1 : 21;
};
UINT64 AsUInt;
} IA32_HWP_REQUEST_REGISTER;
/**
* Log bits indicating changes to Guaranteed & excursions to Minimum.
*
* @remarks If CPUID.06H:EAX.[7] = 1
*/
#define IA32_HWP_STATUS 0x00000777
typedef union
{
struct
{
/**
* [Bit 0] Guaranteed_Performance_Change.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.5(HWP Feedback)]
*/
UINT64 GuaranteedPerformanceChange : 1;
#define IA32_HWP_STATUS_GUARANTEED_PERFORMANCE_CHANGE_BIT 0
#define IA32_HWP_STATUS_GUARANTEED_PERFORMANCE_CHANGE_FLAG 0x01
#define IA32_HWP_STATUS_GUARANTEED_PERFORMANCE_CHANGE_MASK 0x01
#define IA32_HWP_STATUS_GUARANTEED_PERFORMANCE_CHANGE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 2] Excursion_To_Minimum.
*
* @remarks If CPUID.06H:EAX.[7] = 1
* @see Vol3B[14.4.5(HWP Feedback)]
*/
UINT64 ExcursionToMinimum : 1;
#define IA32_HWP_STATUS_EXCURSION_TO_MINIMUM_BIT 2
#define IA32_HWP_STATUS_EXCURSION_TO_MINIMUM_FLAG 0x04
#define IA32_HWP_STATUS_EXCURSION_TO_MINIMUM_MASK 0x01
#define IA32_HWP_STATUS_EXCURSION_TO_MINIMUM(_) (((_) >> 2) & 0x01)
UINT64 Reserved2 : 61;
};
UINT64 AsUInt;
} IA32_HWP_STATUS_REGISTER;
/**
* x2APIC ID Register.
*
* @remarks If CPUID.01H:ECX[21] = 1 && IA32_APIC_BASE.[10] = 1
* @see Vol3A[10.12(EXTENDED XAPIC (X2APIC))]
*/
#define IA32_X2APIC_APICID 0x00000802
/**
* x2APIC Version Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_VERSION 0x00000803
/**
* x2APIC Task Priority Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_TPR 0x00000808
/**
* x2APIC Processor Priority Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_PPR 0x0000080A
/**
* x2APIC EOI Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_EOI 0x0000080B
/**
* x2APIC Logical Destination Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LDR 0x0000080D
/**
* x2APIC Spurious Interrupt Vector Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_SIVR 0x0000080F
/**
* @defgroup IA32_X2APIC_ISR \
* IA32_X2APIC_ISR(n)
*
* x2APIC In-Service Register Bits (n * 32 + 31):(n * 32).
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
* @{
*/
#define IA32_X2APIC_ISR0 0x00000810
#define IA32_X2APIC_ISR1 0x00000811
#define IA32_X2APIC_ISR2 0x00000812
#define IA32_X2APIC_ISR3 0x00000813
#define IA32_X2APIC_ISR4 0x00000814
#define IA32_X2APIC_ISR5 0x00000815
#define IA32_X2APIC_ISR6 0x00000816
#define IA32_X2APIC_ISR7 0x00000817
/**
* @}
*/
/**
* @defgroup IA32_X2APIC_TMR \
* IA32_X2APIC_TMR(n)
*
* x2APIC Trigger Mode Register Bits (n * 32 + 31):(n * 32).
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
* @{
*/
#define IA32_X2APIC_TMR0 0x00000818
#define IA32_X2APIC_TMR1 0x00000819
#define IA32_X2APIC_TMR2 0x0000081A
#define IA32_X2APIC_TMR3 0x0000081B
#define IA32_X2APIC_TMR4 0x0000081C
#define IA32_X2APIC_TMR5 0x0000081D
#define IA32_X2APIC_TMR6 0x0000081E
#define IA32_X2APIC_TMR7 0x0000081F
/**
* @}
*/
/**
* @defgroup IA32_X2APIC_IRR \
* IA32_X2APIC_IRR(n)
*
* x2APIC Interrupt Request Register Bits (n * 32 + 31):(n * 32).
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
* @{
*/
#define IA32_X2APIC_IRR0 0x00000820
#define IA32_X2APIC_IRR1 0x00000821
#define IA32_X2APIC_IRR2 0x00000822
#define IA32_X2APIC_IRR3 0x00000823
#define IA32_X2APIC_IRR4 0x00000824
#define IA32_X2APIC_IRR5 0x00000825
#define IA32_X2APIC_IRR6 0x00000826
#define IA32_X2APIC_IRR7 0x00000827
/**
* @}
*/
/**
* x2APIC Error Status Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_ESR 0x00000828
/**
* x2APIC LVT Corrected Machine Check Interrupt Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_CMCI 0x0000082F
/**
* x2APIC Interrupt Command Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_ICR 0x00000830
/**
* x2APIC LVT Timer Interrupt Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_TIMER 0x00000832
/**
* x2APIC LVT Thermal Sensor Interrupt Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_THERMAL 0x00000833
/**
* x2APIC LVT Performance Monitor Interrupt Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_PMI 0x00000834
/**
* x2APIC LVT LINT0 Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_LINT0 0x00000835
/**
* x2APIC LVT LINT1 Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_LINT1 0x00000836
/**
* x2APIC LVT Error Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_LVT_ERROR 0x00000837
/**
* x2APIC Initial Count Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_INIT_COUNT 0x00000838
/**
* x2APIC Current Count Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_CUR_COUNT 0x00000839
/**
* x2APIC Divide Configuration Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_DIV_CONF 0x0000083E
/**
* x2APIC Self IPI Register.
*
* @remarks If CPUID.01H:ECX.[21] = 1 && IA32_APIC_BASE.[10] = 1
*/
#define IA32_X2APIC_SELF_IPI 0x0000083F
/**
* Silicon Debug Feature Control.
*
* @remarks If CPUID.01H:ECX.[11] = 1
*/
#define IA32_DEBUG_INTERFACE 0x00000C80
typedef union
{
struct
{
/**
* @brief Enable (R/W)
*
* [Bit 0] BIOS set 1 to enable Silicon debug features. Default is 0.
*
* @remarks If CPUID.01H:ECX.[11] = 1
*/
UINT64 Enable : 1;
#define IA32_DEBUG_INTERFACE_ENABLE_BIT 0
#define IA32_DEBUG_INTERFACE_ENABLE_FLAG 0x01
#define IA32_DEBUG_INTERFACE_ENABLE_MASK 0x01
#define IA32_DEBUG_INTERFACE_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 29;
/**
* @brief Lock (R/W)
*
* [Bit 30] If 1, locks any further change to the MSR. The lock bit is set
* automatically on the first SMI assertion even if not explicitly set by BIOS.
* Default is 0.
*
* @remarks If CPUID.01H:ECX.[11] = 1
*/
UINT64 Lock : 1;
#define IA32_DEBUG_INTERFACE_LOCK_BIT 30
#define IA32_DEBUG_INTERFACE_LOCK_FLAG 0x40000000
#define IA32_DEBUG_INTERFACE_LOCK_MASK 0x01
#define IA32_DEBUG_INTERFACE_LOCK(_) (((_) >> 30) & 0x01)
/**
* @brief Debug Occurred (R/O)
*
* [Bit 31] This "sticky bit" is set by hardware to indicate the status of bit 0.
* Default is 0.
*
* @remarks If CPUID.01H:ECX.[11] = 1
*/
UINT64 DebugOccurred : 1;
#define IA32_DEBUG_INTERFACE_DEBUG_OCCURRED_BIT 31
#define IA32_DEBUG_INTERFACE_DEBUG_OCCURRED_FLAG 0x80000000
#define IA32_DEBUG_INTERFACE_DEBUG_OCCURRED_MASK 0x01
#define IA32_DEBUG_INTERFACE_DEBUG_OCCURRED(_) (((_) >> 31) & 0x01)
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} IA32_DEBUG_INTERFACE_REGISTER;
/**
* L3 QOS Configuration.
*
* @remarks If ( CPUID.(EAX=10H, ECX=1):ECX.[2] = 1 )
*/
#define IA32_L3_QOS_CFG 0x00000C81
typedef union
{
struct
{
/**
* @brief Enable (R/W)
*
* [Bit 0] Set 1 to enable L3 CAT masks and COS to operate in Code and Data
* Prioritization (CDP) mode.
*/
UINT64 Enable : 1;
#define IA32_L3_QOS_CFG_ENABLE_BIT 0
#define IA32_L3_QOS_CFG_ENABLE_FLAG 0x01
#define IA32_L3_QOS_CFG_ENABLE_MASK 0x01
#define IA32_L3_QOS_CFG_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_L3_QOS_CFG_REGISTER;
/**
* L2 QOS Configuration.
*
* @remarks If ( CPUID.(EAX=10H, ECX=2):ECX.[2] = 1 )
*/
#define IA32_L2_QOS_CFG 0x00000C82
typedef union
{
struct
{
/**
* @brief Enable (R/W)
*
* [Bit 0] Set 1 to enable L2 CAT masks and COS to operate in Code and Data
* Prioritization (CDP) mode.
*/
UINT64 Enable : 1;
#define IA32_L2_QOS_CFG_ENABLE_BIT 0
#define IA32_L2_QOS_CFG_ENABLE_FLAG 0x01
#define IA32_L2_QOS_CFG_ENABLE_MASK 0x01
#define IA32_L2_QOS_CFG_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_L2_QOS_CFG_REGISTER;
/**
* Monitoring Event Select Register.
*
* @remarks If ( CPUID.(EAX=07H, ECX=0):EBX.[12] = 1 )
*/
#define IA32_QM_EVTSEL 0x00000C8D
typedef union
{
struct
{
/**
* @brief Event ID
*
* [Bits 7:0] ID of a supported monitoring event to report via IA32_QM_CTR.
*/
UINT64 EventId : 8;
#define IA32_QM_EVTSEL_EVENT_ID_BIT 0
#define IA32_QM_EVTSEL_EVENT_ID_FLAG 0xFF
#define IA32_QM_EVTSEL_EVENT_ID_MASK 0xFF
#define IA32_QM_EVTSEL_EVENT_ID(_) (((_) >> 0) & 0xFF)
UINT64 Reserved1 : 24;
/**
* @brief Resource Monitoring ID
*
* [Bits 63:32] ID for monitoring hardware to report monitored data via IA32_QM_CTR.
*
* @remarks Bits [N+31:32] N = Ceil (Log2 (CPUID.(EAX= 0FH,ECX=0H).EBX[31:0] + 1))
*/
UINT64 ResourceMonitoringId : 32;
#define IA32_QM_EVTSEL_RESOURCE_MONITORING_ID_BIT 32
#define IA32_QM_EVTSEL_RESOURCE_MONITORING_ID_FLAG 0xFFFFFFFF00000000
#define IA32_QM_EVTSEL_RESOURCE_MONITORING_ID_MASK 0xFFFFFFFF
#define IA32_QM_EVTSEL_RESOURCE_MONITORING_ID(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_QM_EVTSEL_REGISTER;
/**
* Monitoring Counter Register.
*
* @remarks If ( CPUID.(EAX=07H, ECX=0):EBX.[12] = 1 )
*/
#define IA32_QM_CTR 0x00000C8E
typedef union
{
struct
{
/**
* [Bits 61:0] Resource Monitored Data.
*/
UINT64 ResourceMonitoredData : 62;
#define IA32_QM_CTR_RESOURCE_MONITORED_DATA_BIT 0
#define IA32_QM_CTR_RESOURCE_MONITORED_DATA_FLAG 0x3FFFFFFFFFFFFFFF
#define IA32_QM_CTR_RESOURCE_MONITORED_DATA_MASK 0x3FFFFFFFFFFFFFFF
#define IA32_QM_CTR_RESOURCE_MONITORED_DATA(_) (((_) >> 0) & 0x3FFFFFFFFFFFFFFF)
/**
* @brief Unavailable
*
* [Bit 62] If 1, indicates data for this RMID is not available or not monitored for
* this resource or RMID.
*/
UINT64 Unavailable : 1;
#define IA32_QM_CTR_UNAVAILABLE_BIT 62
#define IA32_QM_CTR_UNAVAILABLE_FLAG 0x4000000000000000
#define IA32_QM_CTR_UNAVAILABLE_MASK 0x01
#define IA32_QM_CTR_UNAVAILABLE(_) (((_) >> 62) & 0x01)
/**
* @brief Error
*
* [Bit 63] If 1, indicates an unsupported RMID or event type was written to
* IA32_PQR_QM_EVTSEL.
*/
UINT64 Error : 1;
#define IA32_QM_CTR_ERROR_BIT 63
#define IA32_QM_CTR_ERROR_FLAG 0x8000000000000000
#define IA32_QM_CTR_ERROR_MASK 0x01
#define IA32_QM_CTR_ERROR(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} IA32_QM_CTR_REGISTER;
/**
* Resource Association Register.
*
* @remarks If ( (CPUID.(EAX=07H, ECX=0):EBX[12] = 1) or (CPUID.(EAX=07H, ECX=0):EBX[15] = 1 ) )
*/
#define IA32_PQR_ASSOC 0x00000C8F
typedef union
{
struct
{
/**
* @brief Resource Monitoring ID (R/W)
*
* [Bits 31:0] ID for monitoring hardware to track internal operation, e.g., memory
* access.
*
* @remarks Bits [N-1:0] N = Ceil (Log2 (CPUID.(EAX= 0FH, ECX=0H).EBX[31:0] +1))
* 31:N Reserved
*/
UINT64 ResourceMonitoringId : 32;
#define IA32_PQR_ASSOC_RESOURCE_MONITORING_ID_BIT 0
#define IA32_PQR_ASSOC_RESOURCE_MONITORING_ID_FLAG 0xFFFFFFFF
#define IA32_PQR_ASSOC_RESOURCE_MONITORING_ID_MASK 0xFFFFFFFF
#define IA32_PQR_ASSOC_RESOURCE_MONITORING_ID(_) (((_) >> 0) & 0xFFFFFFFF)
/**
* @brief COS (R/W)
*
* [Bits 63:32] The class of service (COS) to enforce (on writes); returns the
* current COS when read.
*
* @remarks If ( CPUID.(EAX=07H, ECX=0):EBX.[15] = 1 )
*/
UINT64 Cos : 32;
#define IA32_PQR_ASSOC_COS_BIT 32
#define IA32_PQR_ASSOC_COS_FLAG 0xFFFFFFFF00000000
#define IA32_PQR_ASSOC_COS_MASK 0xFFFFFFFF
#define IA32_PQR_ASSOC_COS(_) (((_) >> 32) & 0xFFFFFFFF)
};
UINT64 AsUInt;
} IA32_PQR_ASSOC_REGISTER;
/**
* Supervisor State of MPX Configuration.
*
* @remarks If (CPUID.(EAX=07H, ECX=0H):EBX[14] = 1)
*/
#define IA32_BNDCFGS 0x00000D90
typedef union
{
struct
{
/**
* [Bit 0] Enable Intel MPX in supervisor mode.
*/
UINT64 Enable : 1;
#define IA32_BNDCFGS_ENABLE_BIT 0
#define IA32_BNDCFGS_ENABLE_FLAG 0x01
#define IA32_BNDCFGS_ENABLE_MASK 0x01
#define IA32_BNDCFGS_ENABLE(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Preserve the bounds registers for near branch instructions in the absence
* of the BND prefix.
*/
UINT64 BndPreserve : 1;
#define IA32_BNDCFGS_BND_PRESERVE_BIT 1
#define IA32_BNDCFGS_BND_PRESERVE_FLAG 0x02
#define IA32_BNDCFGS_BND_PRESERVE_MASK 0x01
#define IA32_BNDCFGS_BND_PRESERVE(_) (((_) >> 1) & 0x01)
UINT64 Reserved1 : 10;
/**
* [Bits 63:12] Base Address of Bound Directory.
*/
UINT64 BoundDirectoryBaseAddress : 52;
#define IA32_BNDCFGS_BOUND_DIRECTORY_BASE_ADDRESS_BIT 12
#define IA32_BNDCFGS_BOUND_DIRECTORY_BASE_ADDRESS_FLAG 0xFFFFFFFFFFFFF000
#define IA32_BNDCFGS_BOUND_DIRECTORY_BASE_ADDRESS_MASK 0xFFFFFFFFFFFFF
#define IA32_BNDCFGS_BOUND_DIRECTORY_BASE_ADDRESS(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} IA32_BNDCFGS_REGISTER;
/**
* Extended Supervisor State Mask.
*
* @remarks If ( CPUID.(0DH, 1):EAX.[3] = 1
*/
#define IA32_XSS 0x00000DA0
typedef union
{
struct
{
UINT64 Reserved1 : 8;
/**
* [Bit 8] Trace Packet Configuration State.
*/
UINT64 TracePacketConfigurationState : 1;
#define IA32_XSS_TRACE_PACKET_CONFIGURATION_STATE_BIT 8
#define IA32_XSS_TRACE_PACKET_CONFIGURATION_STATE_FLAG 0x100
#define IA32_XSS_TRACE_PACKET_CONFIGURATION_STATE_MASK 0x01
#define IA32_XSS_TRACE_PACKET_CONFIGURATION_STATE(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 55;
};
UINT64 AsUInt;
} IA32_XSS_REGISTER;
/**
* Package Level Enable/disable HDC.
*
* @remarks If CPUID.06H:EAX.[13] = 1
*/
#define IA32_PKG_HDC_CTL 0x00000DB0
typedef union
{
struct
{
/**
* @brief HDC_Pkg_Enable (R/W)
*
* [Bit 0] Force HDC idling or wake up HDC-idled logical processors in the package.
*
* @remarks If CPUID.06H:EAX.[13] = 1
* @see Vol3B[14.5.2(Package level Enabling HDC)]
*/
UINT64 HdcPkgEnable : 1;
#define IA32_PKG_HDC_CTL_HDC_PKG_ENABLE_BIT 0
#define IA32_PKG_HDC_CTL_HDC_PKG_ENABLE_FLAG 0x01
#define IA32_PKG_HDC_CTL_HDC_PKG_ENABLE_MASK 0x01
#define IA32_PKG_HDC_CTL_HDC_PKG_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_PKG_HDC_CTL_REGISTER;
/**
* Enable/disable HWP.
*
* @remarks If CPUID.06H:EAX.[13] = 1
*/
#define IA32_PM_CTL1 0x00000DB1
typedef union
{
struct
{
/**
* @brief HDC_Allow_Block (R/W)
*
* [Bit 0] Allow/Block this logical processor for package level HDC control.
*
* @remarks If CPUID.06H:EAX.[13] = 1
* @see Vol3B[14.5.3(Logical-Processor Level HDC Control)]
*/
UINT64 HdcAllowBlock : 1;
#define IA32_PM_CTL1_HDC_ALLOW_BLOCK_BIT 0
#define IA32_PM_CTL1_HDC_ALLOW_BLOCK_FLAG 0x01
#define IA32_PM_CTL1_HDC_ALLOW_BLOCK_MASK 0x01
#define IA32_PM_CTL1_HDC_ALLOW_BLOCK(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 63;
};
UINT64 AsUInt;
} IA32_PM_CTL1_REGISTER;
/**
* Per-Logical_Processor HDC Idle Residency.
*
* @remarks If CPUID.06H:EAX.[13] = 1
*/
#define IA32_THREAD_STALL 0x00000DB2
typedef struct
{
/**
* @brief Stall_Cycle_Cnt (R/W)
*
* Stalled cycles due to HDC forced idle on this logical processor.
*
* @remarks If CPUID.06H:EAX.[13] = 1
* @see Vol3B[14.5.4.1(IA32_THREAD_STALL)]
*/
UINT64 StallCycleCount;
} IA32_THREAD_STALL_REGISTER;
/**
* Extended Feature Enables.
*
* @remarks If CPUID.06H:EAX.[13] = 1
*/
#define IA32_EFER 0xC0000080
typedef union
{
struct
{
/**
* @brief SYSCALL Enable (R/W)
*
* [Bit 0] Enables SYSCALL/SYSRET instructions in 64-bit mode.
*/
UINT64 SyscallEnable : 1;
#define IA32_EFER_SYSCALL_ENABLE_BIT 0
#define IA32_EFER_SYSCALL_ENABLE_FLAG 0x01
#define IA32_EFER_SYSCALL_ENABLE_MASK 0x01
#define IA32_EFER_SYSCALL_ENABLE(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 7;
/**
* @brief IA-32e Mode Enable (R/W)
*
* [Bit 8] Enables IA-32e mode operation.
*/
UINT64 Ia32EModeEnable : 1;
#define IA32_EFER_IA32E_MODE_ENABLE_BIT 8
#define IA32_EFER_IA32E_MODE_ENABLE_FLAG 0x100
#define IA32_EFER_IA32E_MODE_ENABLE_MASK 0x01
#define IA32_EFER_IA32E_MODE_ENABLE(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief IA-32e Mode Active (R)
*
* [Bit 10] Indicates IA-32e mode is active when set.
*/
UINT64 Ia32EModeActive : 1;
#define IA32_EFER_IA32E_MODE_ACTIVE_BIT 10
#define IA32_EFER_IA32E_MODE_ACTIVE_FLAG 0x400
#define IA32_EFER_IA32E_MODE_ACTIVE_MASK 0x01
#define IA32_EFER_IA32E_MODE_ACTIVE(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] Execute Disable Bit Enable.
*/
UINT64 ExecuteDisableBitEnable : 1;
#define IA32_EFER_EXECUTE_DISABLE_BIT_ENABLE_BIT 11
#define IA32_EFER_EXECUTE_DISABLE_BIT_ENABLE_FLAG 0x800
#define IA32_EFER_EXECUTE_DISABLE_BIT_ENABLE_MASK 0x01
#define IA32_EFER_EXECUTE_DISABLE_BIT_ENABLE(_) (((_) >> 11) & 0x01)
UINT64 Reserved3 : 52;
};
UINT64 AsUInt;
} IA32_EFER_REGISTER;
/**
* System Call Target Address.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_STAR 0xC0000081
/**
* @brief IA-32e Mode System Call Target Address (R/W)
*
* Target RIP for the called procedure when SYSCALL is executed in 64-bit mode.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_LSTAR 0xC0000082
/**
* @brief IA-32e Mode System Call Target Address (R/W)
*
* Not used, as the SYSCALL instruction is not recognized in compatibility mode.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_CSTAR 0xC0000083
/**
* System Call Flag Mask.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_FMASK 0xC0000084
/**
* Map of BASE Address of FS.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_FS_BASE 0xC0000100
/**
* Map of BASE Address of GS.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_GS_BASE 0xC0000101
/**
* Swap Target of BASE Address of GS.
*
* @remarks If CPUID.80000001:EDX.[29] = 1
*/
#define IA32_KERNEL_GS_BASE 0xC0000102
/**
* Auxiliary TSC.
*
* @remarks If CPUID.80000001H: EDX[27] = 1 or CPUID.(EAX=7,ECX=0):ECX[bit 22] = 1
*/
#define IA32_TSC_AUX 0xC0000103
typedef union
{
struct
{
/**
* [Bits 31:0] AUX. Auxiliary signature of TSC.
*/
UINT64 TscAuxiliarySignature : 32;
#define IA32_TSC_AUX_TSC_AUXILIARY_SIGNATURE_BIT 0
#define IA32_TSC_AUX_TSC_AUXILIARY_SIGNATURE_FLAG 0xFFFFFFFF
#define IA32_TSC_AUX_TSC_AUXILIARY_SIGNATURE_MASK 0xFFFFFFFF
#define IA32_TSC_AUX_TSC_AUXILIARY_SIGNATURE(_) (((_) >> 0) & 0xFFFFFFFF)
UINT64 Reserved1 : 32;
};
UINT64 AsUInt;
} IA32_TSC_AUX_REGISTER;
/**
* @}
*/
/**
* @defgroup PAGING \
* Paging
* @{
*/
/**
* @defgroup PAGING_32 \
* 32-Bit Paging
*
* A logical processor uses 32-bit paging if CR0.PG = 1 and CR4.PAE = 0. 32-bit paging translates
* 32-bit linear addresses to 40-bit physical addresses. Although 40 bits corresponds to 1 TByte,
* linear addresses are limited to 32 bits; at most 4 GBytes of linear-address space may be accessed
* at any given time. 32-bit paging uses a hierarchy of paging structures to produce a translation
* for a linear address. CR3 is used to locate the first paging-structure, the page directory.
* 32-bit paging may map linear addresses to either 4-KByte pages or 4-MByte pages.
*
* @see Vol3A[4.5(4-LEVEL PAGING)] (reference)
* @{
*/
/**
* @brief Format of a 32-Bit Page-Directory Entry that Maps a 4-MByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to map a 4-MByte page.
*/
UINT32 Present : 1;
#define PDE_4MB_32_PRESENT_BIT 0
#define PDE_4MB_32_PRESENT_FLAG 0x01
#define PDE_4MB_32_PRESENT_MASK 0x01
#define PDE_4MB_32_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 4-MByte page
* referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Write : 1;
#define PDE_4MB_32_WRITE_BIT 1
#define PDE_4MB_32_WRITE_FLAG 0x02
#define PDE_4MB_32_WRITE_MASK 0x01
#define PDE_4MB_32_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 4-MByte
* page referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Supervisor : 1;
#define PDE_4MB_32_SUPERVISOR_BIT 2
#define PDE_4MB_32_SUPERVISOR_FLAG 0x04
#define PDE_4MB_32_SUPERVISOR_MASK 0x01
#define PDE_4MB_32_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the 4-MByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelWriteThrough : 1;
#define PDE_4MB_32_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDE_4MB_32_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDE_4MB_32_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDE_4MB_32_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the 4-MByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelCacheDisable : 1;
#define PDE_4MB_32_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDE_4MB_32_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDE_4MB_32_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDE_4MB_32_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether software has accessed the 4-MByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT32 Accessed : 1;
#define PDE_4MB_32_ACCESSED_BIT 5
#define PDE_4MB_32_ACCESSED_FLAG 0x20
#define PDE_4MB_32_ACCESSED_MASK 0x01
#define PDE_4MB_32_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Dirty; indicates whether software has written to the 4-MByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT32 Dirty : 1;
#define PDE_4MB_32_DIRTY_BIT 6
#define PDE_4MB_32_DIRTY_FLAG 0x40
#define PDE_4MB_32_DIRTY_MASK 0x01
#define PDE_4MB_32_DIRTY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Page size; must be 1 (otherwise, this entry references a page table).
*/
UINT32 LargePage : 1;
#define PDE_4MB_32_LARGE_PAGE_BIT 7
#define PDE_4MB_32_LARGE_PAGE_FLAG 0x80
#define PDE_4MB_32_LARGE_PAGE_MASK 0x01
#define PDE_4MB_32_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Global; if CR4.PGE = 1, determines whether the translation is global;
* ignored otherwise.
*
* @see Vol3A[4.10(Caching Translation Information)]
*/
UINT32 Global : 1;
#define PDE_4MB_32_GLOBAL_BIT 8
#define PDE_4MB_32_GLOBAL_FLAG 0x100
#define PDE_4MB_32_GLOBAL_MASK 0x01
#define PDE_4MB_32_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 11:9] Ignored.
*/
UINT32 Ignored1 : 3;
#define PDE_4MB_32_IGNORED_1_BIT 9
#define PDE_4MB_32_IGNORED_1_FLAG 0xE00
#define PDE_4MB_32_IGNORED_1_MASK 0x07
#define PDE_4MB_32_IGNORED_1(_) (((_) >> 9) & 0x07)
/**
* [Bit 12] Indirectly determines the memory type used to access the 4-MByte page
* referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 Pat : 1;
#define PDE_4MB_32_PAT_BIT 12
#define PDE_4MB_32_PAT_FLAG 0x1000
#define PDE_4MB_32_PAT_MASK 0x01
#define PDE_4MB_32_PAT(_) (((_) >> 12) & 0x01)
/**
* [Bits 20:13] Bits (M-1):32 of physical address of the 4-MByte page referenced by
* this entry.
*/
UINT32 PageFrameNumberLow : 8;
#define PDE_4MB_32_PAGE_FRAME_NUMBER_LOW_BIT 13
#define PDE_4MB_32_PAGE_FRAME_NUMBER_LOW_FLAG 0x1FE000
#define PDE_4MB_32_PAGE_FRAME_NUMBER_LOW_MASK 0xFF
#define PDE_4MB_32_PAGE_FRAME_NUMBER_LOW(_) (((_) >> 13) & 0xFF)
UINT32 Reserved1 : 1;
/**
* [Bits 31:22] Bits 31:22 of physical address of the 4-MByte page referenced by
* this entry.
*/
UINT32 PageFrameNumberHigh : 10;
#define PDE_4MB_32_PAGE_FRAME_NUMBER_HIGH_BIT 22
#define PDE_4MB_32_PAGE_FRAME_NUMBER_HIGH_FLAG 0xFFC00000
#define PDE_4MB_32_PAGE_FRAME_NUMBER_HIGH_MASK 0x3FF
#define PDE_4MB_32_PAGE_FRAME_NUMBER_HIGH(_) (((_) >> 22) & 0x3FF)
};
UINT32 AsUInt;
} PDE_4MB_32;
/**
* @brief Format of a 32-Bit Page-Directory Entry that References a Page Table
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to reference a page table.
*/
UINT32 Present : 1;
#define PDE_32_PRESENT_BIT 0
#define PDE_32_PRESENT_FLAG 0x01
#define PDE_32_PRESENT_MASK 0x01
#define PDE_32_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 4-MByte region
* controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Write : 1;
#define PDE_32_WRITE_BIT 1
#define PDE_32_WRITE_FLAG 0x02
#define PDE_32_WRITE_MASK 0x01
#define PDE_32_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 4-MByte
* region controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Supervisor : 1;
#define PDE_32_SUPERVISOR_BIT 2
#define PDE_32_SUPERVISOR_FLAG 0x04
#define PDE_32_SUPERVISOR_MASK 0x01
#define PDE_32_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the page table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelWriteThrough : 1;
#define PDE_32_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDE_32_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDE_32_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDE_32_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the page table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelCacheDisable : 1;
#define PDE_32_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDE_32_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDE_32_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDE_32_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether this entry has been used for linear-address
* translation.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT32 Accessed : 1;
#define PDE_32_ACCESSED_BIT 5
#define PDE_32_ACCESSED_FLAG 0x20
#define PDE_32_ACCESSED_MASK 0x01
#define PDE_32_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Ignored.
*/
UINT32 Ignored1 : 1;
#define PDE_32_IGNORED_1_BIT 6
#define PDE_32_IGNORED_1_FLAG 0x40
#define PDE_32_IGNORED_1_MASK 0x01
#define PDE_32_IGNORED_1(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] If CR4.PSE = 1, must be 0 (otherwise, this entry maps a 4-MByte page);
* otherwise, ignored.
*/
UINT32 LargePage : 1;
#define PDE_32_LARGE_PAGE_BIT 7
#define PDE_32_LARGE_PAGE_FLAG 0x80
#define PDE_32_LARGE_PAGE_MASK 0x01
#define PDE_32_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bits 11:8] Ignored.
*/
UINT32 Ignored2 : 4;
#define PDE_32_IGNORED_2_BIT 8
#define PDE_32_IGNORED_2_FLAG 0xF00
#define PDE_32_IGNORED_2_MASK 0x0F
#define PDE_32_IGNORED_2(_) (((_) >> 8) & 0x0F)
/**
* [Bits 31:12] Physical address of 4-KByte aligned page table referenced by this
* entry.
*/
UINT32 PageFrameNumber : 20;
#define PDE_32_PAGE_FRAME_NUMBER_BIT 12
#define PDE_32_PAGE_FRAME_NUMBER_FLAG 0xFFFFF000
#define PDE_32_PAGE_FRAME_NUMBER_MASK 0xFFFFF
#define PDE_32_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFF)
};
UINT32 AsUInt;
} PDE_32;
/**
* @brief Format of a 32-Bit Page-Table Entry that Maps a 4-KByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to map a 4-KByte page.
*/
UINT32 Present : 1;
#define PTE_32_PRESENT_BIT 0
#define PTE_32_PRESENT_FLAG 0x01
#define PTE_32_PRESENT_MASK 0x01
#define PTE_32_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Write : 1;
#define PTE_32_WRITE_BIT 1
#define PTE_32_WRITE_FLAG 0x02
#define PTE_32_WRITE_MASK 0x01
#define PTE_32_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte
* page referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT32 Supervisor : 1;
#define PTE_32_SUPERVISOR_BIT 2
#define PTE_32_SUPERVISOR_FLAG 0x04
#define PTE_32_SUPERVISOR_MASK 0x01
#define PTE_32_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the 4-KByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelWriteThrough : 1;
#define PTE_32_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PTE_32_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PTE_32_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PTE_32_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the 4-KByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 PageLevelCacheDisable : 1;
#define PTE_32_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PTE_32_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PTE_32_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PTE_32_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether software has accessed the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT32 Accessed : 1;
#define PTE_32_ACCESSED_BIT 5
#define PTE_32_ACCESSED_FLAG 0x20
#define PTE_32_ACCESSED_MASK 0x01
#define PTE_32_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Dirty; indicates whether software has written to the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT32 Dirty : 1;
#define PTE_32_DIRTY_BIT 6
#define PTE_32_DIRTY_FLAG 0x40
#define PTE_32_DIRTY_MASK 0x01
#define PTE_32_DIRTY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Indirectly determines the memory type used to access the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT32 Pat : 1;
#define PTE_32_PAT_BIT 7
#define PTE_32_PAT_FLAG 0x80
#define PTE_32_PAT_MASK 0x01
#define PTE_32_PAT(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Global; if CR4.PGE = 1, determines whether the translation is global;
* ignored otherwise.
*
* @see Vol3A[4.10(Caching Translation Information)]
*/
UINT32 Global : 1;
#define PTE_32_GLOBAL_BIT 8
#define PTE_32_GLOBAL_FLAG 0x100
#define PTE_32_GLOBAL_MASK 0x01
#define PTE_32_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 11:9] Ignored.
*/
UINT32 Ignored1 : 3;
#define PTE_32_IGNORED_1_BIT 9
#define PTE_32_IGNORED_1_FLAG 0xE00
#define PTE_32_IGNORED_1_MASK 0x07
#define PTE_32_IGNORED_1(_) (((_) >> 9) & 0x07)
/**
* [Bits 31:12] Physical address of 4-KByte aligned page table referenced by this
* entry.
*/
UINT32 PageFrameNumber : 20;
#define PTE_32_PAGE_FRAME_NUMBER_BIT 12
#define PTE_32_PAGE_FRAME_NUMBER_FLAG 0xFFFFF000
#define PTE_32_PAGE_FRAME_NUMBER_MASK 0xFFFFF
#define PTE_32_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFF)
};
UINT32 AsUInt;
} PTE_32;
/**
* @brief Format of a common Page-Table Entry
*/
typedef union
{
struct
{
UINT32 Present : 1;
#define PT_ENTRY_32_PRESENT_BIT 0
#define PT_ENTRY_32_PRESENT_FLAG 0x01
#define PT_ENTRY_32_PRESENT_MASK 0x01
#define PT_ENTRY_32_PRESENT(_) (((_) >> 0) & 0x01)
UINT32 Write : 1;
#define PT_ENTRY_32_WRITE_BIT 1
#define PT_ENTRY_32_WRITE_FLAG 0x02
#define PT_ENTRY_32_WRITE_MASK 0x01
#define PT_ENTRY_32_WRITE(_) (((_) >> 1) & 0x01)
UINT32 Supervisor : 1;
#define PT_ENTRY_32_SUPERVISOR_BIT 2
#define PT_ENTRY_32_SUPERVISOR_FLAG 0x04
#define PT_ENTRY_32_SUPERVISOR_MASK 0x01
#define PT_ENTRY_32_SUPERVISOR(_) (((_) >> 2) & 0x01)
UINT32 PageLevelWriteThrough : 1;
#define PT_ENTRY_32_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PT_ENTRY_32_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PT_ENTRY_32_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PT_ENTRY_32_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
UINT32 PageLevelCacheDisable : 1;
#define PT_ENTRY_32_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PT_ENTRY_32_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PT_ENTRY_32_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PT_ENTRY_32_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
UINT32 Accessed : 1;
#define PT_ENTRY_32_ACCESSED_BIT 5
#define PT_ENTRY_32_ACCESSED_FLAG 0x20
#define PT_ENTRY_32_ACCESSED_MASK 0x01
#define PT_ENTRY_32_ACCESSED(_) (((_) >> 5) & 0x01)
UINT32 Dirty : 1;
#define PT_ENTRY_32_DIRTY_BIT 6
#define PT_ENTRY_32_DIRTY_FLAG 0x40
#define PT_ENTRY_32_DIRTY_MASK 0x01
#define PT_ENTRY_32_DIRTY(_) (((_) >> 6) & 0x01)
UINT32 LargePage : 1;
#define PT_ENTRY_32_LARGE_PAGE_BIT 7
#define PT_ENTRY_32_LARGE_PAGE_FLAG 0x80
#define PT_ENTRY_32_LARGE_PAGE_MASK 0x01
#define PT_ENTRY_32_LARGE_PAGE(_) (((_) >> 7) & 0x01)
UINT32 Global : 1;
#define PT_ENTRY_32_GLOBAL_BIT 8
#define PT_ENTRY_32_GLOBAL_FLAG 0x100
#define PT_ENTRY_32_GLOBAL_MASK 0x01
#define PT_ENTRY_32_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 11:9] Ignored.
*/
UINT32 Ignored1 : 3;
#define PT_ENTRY_32_IGNORED_1_BIT 9
#define PT_ENTRY_32_IGNORED_1_FLAG 0xE00
#define PT_ENTRY_32_IGNORED_1_MASK 0x07
#define PT_ENTRY_32_IGNORED_1(_) (((_) >> 9) & 0x07)
/**
* [Bits 31:12] Physical address of the 4-KByte page referenced by this entry.
*/
UINT32 PageFrameNumber : 20;
#define PT_ENTRY_32_PAGE_FRAME_NUMBER_BIT 12
#define PT_ENTRY_32_PAGE_FRAME_NUMBER_FLAG 0xFFFFF000
#define PT_ENTRY_32_PAGE_FRAME_NUMBER_MASK 0xFFFFF
#define PT_ENTRY_32_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFF)
};
UINT32 AsUInt;
} PT_ENTRY_32;
/**
* @defgroup PAGING_STRUCTURES_ENTRY_COUNT_32 \
* Paging structures entry counts
*
* Paging structures entry counts.
* @{
*/
#define PDE_ENTRY_COUNT_32 0x00000400
#define PTE_ENTRY_COUNT_32 0x00000400
/**
* @}
*/
/**
* @}
*/
/**
* @defgroup PAGING_64 \
* 64-Bit (4-Level) Paging
*
* A logical processor uses 4-level paging if CR0.PG = 1, CR4.PAE = 1, and IA32_EFER.LME = 1. With
* 4-level paging, linear address are translated using a hierarchy of in-memory paging structures
* located using the contents of CR3. 4-level paging translates 48-bit linear addresses to 52-bit
* physical addresses. Although 52 bits corresponds to 4 PBytes, linear addresses are limited to 48
* bits; at most 256 TBytes of linear-address space may be accessed at any given time. 4-level
* paging uses a hierarchy of paging structures to produce a translation for a linear address. CR3
* is used to locate the first paging-structure, the PML4 table. Use of CR3 with 4-level paging
* depends on whether processcontext identifiers (PCIDs) have been enabled by setting CR4.PCIDE.
*
* @see Vol3A[4.5(4-LEVEL PAGING)] (reference)
* @{
*/
/**
* @brief Format of a 4-Level PML4 Entry (PML4E) that References a Page-Directory-Pointer Table
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to reference a page-directory-pointer table.
*/
UINT64 Present : 1;
#define PML4E_64_PRESENT_BIT 0
#define PML4E_64_PRESENT_FLAG 0x01
#define PML4E_64_PRESENT_MASK 0x01
#define PML4E_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 512-GByte region
* controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PML4E_64_WRITE_BIT 1
#define PML4E_64_WRITE_FLAG 0x02
#define PML4E_64_WRITE_MASK 0x01
#define PML4E_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the
* 512-GByte region controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PML4E_64_SUPERVISOR_BIT 2
#define PML4E_64_SUPERVISOR_FLAG 0x04
#define PML4E_64_SUPERVISOR_MASK 0x01
#define PML4E_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the page-directory-pointer table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PML4E_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PML4E_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PML4E_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PML4E_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the page-directory-pointer table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PML4E_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PML4E_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PML4E_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PML4E_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether this entry has been used for linear-address
* translation.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PML4E_64_ACCESSED_BIT 5
#define PML4E_64_ACCESSED_FLAG 0x20
#define PML4E_64_ACCESSED_MASK 0x01
#define PML4E_64_ACCESSED(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 7] Reserved (must be 0).
*/
UINT64 MustBeZero : 1;
#define PML4E_64_MUST_BE_ZERO_BIT 7
#define PML4E_64_MUST_BE_ZERO_FLAG 0x80
#define PML4E_64_MUST_BE_ZERO_MASK 0x01
#define PML4E_64_MUST_BE_ZERO(_) (((_) >> 7) & 0x01)
/**
* [Bits 10:8] Ignored.
*/
UINT64 Ignored1 : 3;
#define PML4E_64_IGNORED_1_BIT 8
#define PML4E_64_IGNORED_1_FLAG 0x700
#define PML4E_64_IGNORED_1_MASK 0x07
#define PML4E_64_IGNORED_1(_) (((_) >> 8) & 0x07)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PML4E_64_RESTART_BIT 11
#define PML4E_64_RESTART_FLAG 0x800
#define PML4E_64_RESTART_MASK 0x01
#define PML4E_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Physical address of 4-KByte aligned page-directory-pointer table
* referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define PML4E_64_PAGE_FRAME_NUMBER_BIT 12
#define PML4E_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define PML4E_64_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define PML4E_64_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 4;
/**
* [Bits 62:52] Ignored.
*/
UINT64 Ignored2 : 11;
#define PML4E_64_IGNORED_2_BIT 52
#define PML4E_64_IGNORED_2_FLAG 0x7FF0000000000000
#define PML4E_64_IGNORED_2_MASK 0x7FF
#define PML4E_64_IGNORED_2(_) (((_) >> 52) & 0x7FF)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 512-GByte region controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PML4E_64_EXECUTE_DISABLE_BIT 63
#define PML4E_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PML4E_64_EXECUTE_DISABLE_MASK 0x01
#define PML4E_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PML4E_64;
/**
* @brief Format of a 4-Level Page-Directory-Pointer-Table Entry (PDPTE) that Maps a 1-GByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to map a 1-GByte page.
*/
UINT64 Present : 1;
#define PDPTE_1GB_64_PRESENT_BIT 0
#define PDPTE_1GB_64_PRESENT_FLAG 0x01
#define PDPTE_1GB_64_PRESENT_MASK 0x01
#define PDPTE_1GB_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 1-GByte page
* referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PDPTE_1GB_64_WRITE_BIT 1
#define PDPTE_1GB_64_WRITE_FLAG 0x02
#define PDPTE_1GB_64_WRITE_MASK 0x01
#define PDPTE_1GB_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 1-GByte
* page referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PDPTE_1GB_64_SUPERVISOR_BIT 2
#define PDPTE_1GB_64_SUPERVISOR_FLAG 0x04
#define PDPTE_1GB_64_SUPERVISOR_MASK 0x01
#define PDPTE_1GB_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the 1-GByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PDPTE_1GB_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDPTE_1GB_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDPTE_1GB_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDPTE_1GB_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the 1-GByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PDPTE_1GB_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDPTE_1GB_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDPTE_1GB_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDPTE_1GB_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether software has accessed the 1-GByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PDPTE_1GB_64_ACCESSED_BIT 5
#define PDPTE_1GB_64_ACCESSED_FLAG 0x20
#define PDPTE_1GB_64_ACCESSED_MASK 0x01
#define PDPTE_1GB_64_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Dirty; indicates whether software has written to the 1-GByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Dirty : 1;
#define PDPTE_1GB_64_DIRTY_BIT 6
#define PDPTE_1GB_64_DIRTY_FLAG 0x40
#define PDPTE_1GB_64_DIRTY_MASK 0x01
#define PDPTE_1GB_64_DIRTY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Page size; must be 1 (otherwise, this entry references a page directory).
*/
UINT64 LargePage : 1;
#define PDPTE_1GB_64_LARGE_PAGE_BIT 7
#define PDPTE_1GB_64_LARGE_PAGE_FLAG 0x80
#define PDPTE_1GB_64_LARGE_PAGE_MASK 0x01
#define PDPTE_1GB_64_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Global; if CR4.PGE = 1, determines whether the translation is global;
* ignored otherwise.
*
* @see Vol3A[4.10(Caching Translation Information)]
*/
UINT64 Global : 1;
#define PDPTE_1GB_64_GLOBAL_BIT 8
#define PDPTE_1GB_64_GLOBAL_FLAG 0x100
#define PDPTE_1GB_64_GLOBAL_MASK 0x01
#define PDPTE_1GB_64_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 10:9] Ignored.
*/
UINT64 Ignored1 : 2;
#define PDPTE_1GB_64_IGNORED_1_BIT 9
#define PDPTE_1GB_64_IGNORED_1_FLAG 0x600
#define PDPTE_1GB_64_IGNORED_1_MASK 0x03
#define PDPTE_1GB_64_IGNORED_1(_) (((_) >> 9) & 0x03)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PDPTE_1GB_64_RESTART_BIT 11
#define PDPTE_1GB_64_RESTART_FLAG 0x800
#define PDPTE_1GB_64_RESTART_MASK 0x01
#define PDPTE_1GB_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] Indirectly determines the memory type used to access the 1-GByte page
* referenced by this entry.
*
* @note The PAT is supported on all processors that support 4-level paging.
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 Pat : 1;
#define PDPTE_1GB_64_PAT_BIT 12
#define PDPTE_1GB_64_PAT_FLAG 0x1000
#define PDPTE_1GB_64_PAT_MASK 0x01
#define PDPTE_1GB_64_PAT(_) (((_) >> 12) & 0x01)
UINT64 Reserved1 : 17;
/**
* [Bits 47:30] Physical address of the 1-GByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 18;
#define PDPTE_1GB_64_PAGE_FRAME_NUMBER_BIT 30
#define PDPTE_1GB_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFC0000000
#define PDPTE_1GB_64_PAGE_FRAME_NUMBER_MASK 0x3FFFF
#define PDPTE_1GB_64_PAGE_FRAME_NUMBER(_) (((_) >> 30) & 0x3FFFF)
UINT64 Reserved2 : 4;
/**
* [Bits 58:52] Ignored.
*/
UINT64 Ignored2 : 7;
#define PDPTE_1GB_64_IGNORED_2_BIT 52
#define PDPTE_1GB_64_IGNORED_2_FLAG 0x7F0000000000000
#define PDPTE_1GB_64_IGNORED_2_MASK 0x7F
#define PDPTE_1GB_64_IGNORED_2(_) (((_) >> 52) & 0x7F)
/**
* [Bits 62:59] Protection key; if CR4.PKE = 1, determines the protection key of the
* page; ignored otherwise.
*
* @see Vol3A[4.6.2(Protection Keys)]
*/
UINT64 ProtectionKey : 4;
#define PDPTE_1GB_64_PROTECTION_KEY_BIT 59
#define PDPTE_1GB_64_PROTECTION_KEY_FLAG 0x7800000000000000
#define PDPTE_1GB_64_PROTECTION_KEY_MASK 0x0F
#define PDPTE_1GB_64_PROTECTION_KEY(_) (((_) >> 59) & 0x0F)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 1-GByte page controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PDPTE_1GB_64_EXECUTE_DISABLE_BIT 63
#define PDPTE_1GB_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PDPTE_1GB_64_EXECUTE_DISABLE_MASK 0x01
#define PDPTE_1GB_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PDPTE_1GB_64;
/**
* @brief Format of a 4-Level Page-Directory-Pointer-Table Entry (PDPTE) that References a Page
* Directory
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to reference a page directory.
*/
UINT64 Present : 1;
#define PDPTE_64_PRESENT_BIT 0
#define PDPTE_64_PRESENT_FLAG 0x01
#define PDPTE_64_PRESENT_MASK 0x01
#define PDPTE_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 1-GByte region
* controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PDPTE_64_WRITE_BIT 1
#define PDPTE_64_WRITE_FLAG 0x02
#define PDPTE_64_WRITE_MASK 0x01
#define PDPTE_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 1-GByte
* region controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PDPTE_64_SUPERVISOR_BIT 2
#define PDPTE_64_SUPERVISOR_FLAG 0x04
#define PDPTE_64_SUPERVISOR_MASK 0x01
#define PDPTE_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the page directory referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PDPTE_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDPTE_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDPTE_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDPTE_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the page directory referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PDPTE_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDPTE_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDPTE_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDPTE_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether this entry has been used for linear-address
* translation.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PDPTE_64_ACCESSED_BIT 5
#define PDPTE_64_ACCESSED_FLAG 0x20
#define PDPTE_64_ACCESSED_MASK 0x01
#define PDPTE_64_ACCESSED(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 7] Page size; must be 0 (otherwise, this entry maps a 1-GByte page).
*/
UINT64 LargePage : 1;
#define PDPTE_64_LARGE_PAGE_BIT 7
#define PDPTE_64_LARGE_PAGE_FLAG 0x80
#define PDPTE_64_LARGE_PAGE_MASK 0x01
#define PDPTE_64_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bits 10:8] Ignored.
*/
UINT64 Ignored1 : 3;
#define PDPTE_64_IGNORED_1_BIT 8
#define PDPTE_64_IGNORED_1_FLAG 0x700
#define PDPTE_64_IGNORED_1_MASK 0x07
#define PDPTE_64_IGNORED_1(_) (((_) >> 8) & 0x07)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PDPTE_64_RESTART_BIT 11
#define PDPTE_64_RESTART_FLAG 0x800
#define PDPTE_64_RESTART_MASK 0x01
#define PDPTE_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Physical address of 4-KByte aligned page directory referenced by
* this entry.
*/
UINT64 PageFrameNumber : 36;
#define PDPTE_64_PAGE_FRAME_NUMBER_BIT 12
#define PDPTE_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define PDPTE_64_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define PDPTE_64_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 4;
/**
* [Bits 62:52] Ignored.
*/
UINT64 Ignored2 : 11;
#define PDPTE_64_IGNORED_2_BIT 52
#define PDPTE_64_IGNORED_2_FLAG 0x7FF0000000000000
#define PDPTE_64_IGNORED_2_MASK 0x7FF
#define PDPTE_64_IGNORED_2(_) (((_) >> 52) & 0x7FF)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 1-GByte region controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PDPTE_64_EXECUTE_DISABLE_BIT 63
#define PDPTE_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PDPTE_64_EXECUTE_DISABLE_MASK 0x01
#define PDPTE_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PDPTE_64;
/**
* @brief Format of a 4-Level Page-Directory Entry that Maps a 2-MByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to map a 2-MByte page.
*/
UINT64 Present : 1;
#define PDE_2MB_64_PRESENT_BIT 0
#define PDE_2MB_64_PRESENT_FLAG 0x01
#define PDE_2MB_64_PRESENT_MASK 0x01
#define PDE_2MB_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 2-MByte page
* referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PDE_2MB_64_WRITE_BIT 1
#define PDE_2MB_64_WRITE_FLAG 0x02
#define PDE_2MB_64_WRITE_MASK 0x01
#define PDE_2MB_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 2-MByte
* page referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PDE_2MB_64_SUPERVISOR_BIT 2
#define PDE_2MB_64_SUPERVISOR_FLAG 0x04
#define PDE_2MB_64_SUPERVISOR_MASK 0x01
#define PDE_2MB_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the 2-MByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PDE_2MB_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDE_2MB_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDE_2MB_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDE_2MB_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the 2-MByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PDE_2MB_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDE_2MB_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDE_2MB_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDE_2MB_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether software has accessed the 2-MByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PDE_2MB_64_ACCESSED_BIT 5
#define PDE_2MB_64_ACCESSED_FLAG 0x20
#define PDE_2MB_64_ACCESSED_MASK 0x01
#define PDE_2MB_64_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Dirty; indicates whether software has written to the 2-MByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Dirty : 1;
#define PDE_2MB_64_DIRTY_BIT 6
#define PDE_2MB_64_DIRTY_FLAG 0x40
#define PDE_2MB_64_DIRTY_MASK 0x01
#define PDE_2MB_64_DIRTY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Page size; must be 1 (otherwise, this entry references a page directory).
*/
UINT64 LargePage : 1;
#define PDE_2MB_64_LARGE_PAGE_BIT 7
#define PDE_2MB_64_LARGE_PAGE_FLAG 0x80
#define PDE_2MB_64_LARGE_PAGE_MASK 0x01
#define PDE_2MB_64_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Global; if CR4.PGE = 1, determines whether the translation is global;
* ignored otherwise.
*
* @see Vol3A[4.10(Caching Translation Information)]
*/
UINT64 Global : 1;
#define PDE_2MB_64_GLOBAL_BIT 8
#define PDE_2MB_64_GLOBAL_FLAG 0x100
#define PDE_2MB_64_GLOBAL_MASK 0x01
#define PDE_2MB_64_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 10:9] Ignored.
*/
UINT64 Ignored1 : 2;
#define PDE_2MB_64_IGNORED_1_BIT 9
#define PDE_2MB_64_IGNORED_1_FLAG 0x600
#define PDE_2MB_64_IGNORED_1_MASK 0x03
#define PDE_2MB_64_IGNORED_1(_) (((_) >> 9) & 0x03)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PDE_2MB_64_RESTART_BIT 11
#define PDE_2MB_64_RESTART_FLAG 0x800
#define PDE_2MB_64_RESTART_MASK 0x01
#define PDE_2MB_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] Indirectly determines the memory type used to access the 2-MByte page
* referenced by this entry.
*
* @note The PAT is supported on all processors that support 4-level paging.
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 Pat : 1;
#define PDE_2MB_64_PAT_BIT 12
#define PDE_2MB_64_PAT_FLAG 0x1000
#define PDE_2MB_64_PAT_MASK 0x01
#define PDE_2MB_64_PAT(_) (((_) >> 12) & 0x01)
UINT64 Reserved1 : 8;
/**
* [Bits 47:21] Physical address of the 2-MByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 27;
#define PDE_2MB_64_PAGE_FRAME_NUMBER_BIT 21
#define PDE_2MB_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFE00000
#define PDE_2MB_64_PAGE_FRAME_NUMBER_MASK 0x7FFFFFF
#define PDE_2MB_64_PAGE_FRAME_NUMBER(_) (((_) >> 21) & 0x7FFFFFF)
UINT64 Reserved2 : 4;
/**
* [Bits 58:52] Ignored.
*/
UINT64 Ignored2 : 7;
#define PDE_2MB_64_IGNORED_2_BIT 52
#define PDE_2MB_64_IGNORED_2_FLAG 0x7F0000000000000
#define PDE_2MB_64_IGNORED_2_MASK 0x7F
#define PDE_2MB_64_IGNORED_2(_) (((_) >> 52) & 0x7F)
/**
* [Bits 62:59] Protection key; if CR4.PKE = 1, determines the protection key of the
* page; ignored otherwise.
*
* @see Vol3A[4.6.2(Protection Keys)]
*/
UINT64 ProtectionKey : 4;
#define PDE_2MB_64_PROTECTION_KEY_BIT 59
#define PDE_2MB_64_PROTECTION_KEY_FLAG 0x7800000000000000
#define PDE_2MB_64_PROTECTION_KEY_MASK 0x0F
#define PDE_2MB_64_PROTECTION_KEY(_) (((_) >> 59) & 0x0F)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 2-MByte page controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PDE_2MB_64_EXECUTE_DISABLE_BIT 63
#define PDE_2MB_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PDE_2MB_64_EXECUTE_DISABLE_MASK 0x01
#define PDE_2MB_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PDE_2MB_64;
/**
* @brief Format of a 4-Level Page-Directory Entry that References a Page Table
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to reference a page table.
*/
UINT64 Present : 1;
#define PDE_64_PRESENT_BIT 0
#define PDE_64_PRESENT_FLAG 0x01
#define PDE_64_PRESENT_MASK 0x01
#define PDE_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 2-MByte region
* controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PDE_64_WRITE_BIT 1
#define PDE_64_WRITE_FLAG 0x02
#define PDE_64_WRITE_MASK 0x01
#define PDE_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 2-MByte
* region controlled by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PDE_64_SUPERVISOR_BIT 2
#define PDE_64_SUPERVISOR_FLAG 0x04
#define PDE_64_SUPERVISOR_MASK 0x01
#define PDE_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the page table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PDE_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PDE_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PDE_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PDE_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the page table referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PDE_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PDE_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PDE_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PDE_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether this entry has been used for linear-address
* translation.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PDE_64_ACCESSED_BIT 5
#define PDE_64_ACCESSED_FLAG 0x20
#define PDE_64_ACCESSED_MASK 0x01
#define PDE_64_ACCESSED(_) (((_) >> 5) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 7] Page size; must be 0 (otherwise, this entry maps a 2-MByte page).
*/
UINT64 LargePage : 1;
#define PDE_64_LARGE_PAGE_BIT 7
#define PDE_64_LARGE_PAGE_FLAG 0x80
#define PDE_64_LARGE_PAGE_MASK 0x01
#define PDE_64_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bits 10:8] Ignored.
*/
UINT64 Ignored1 : 3;
#define PDE_64_IGNORED_1_BIT 8
#define PDE_64_IGNORED_1_FLAG 0x700
#define PDE_64_IGNORED_1_MASK 0x07
#define PDE_64_IGNORED_1(_) (((_) >> 8) & 0x07)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PDE_64_RESTART_BIT 11
#define PDE_64_RESTART_FLAG 0x800
#define PDE_64_RESTART_MASK 0x01
#define PDE_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Physical address of 4-KByte aligned page table referenced by this
* entry.
*/
UINT64 PageFrameNumber : 36;
#define PDE_64_PAGE_FRAME_NUMBER_BIT 12
#define PDE_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define PDE_64_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define PDE_64_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 4;
/**
* [Bits 62:52] Ignored.
*/
UINT64 Ignored2 : 11;
#define PDE_64_IGNORED_2_BIT 52
#define PDE_64_IGNORED_2_FLAG 0x7FF0000000000000
#define PDE_64_IGNORED_2_MASK 0x7FF
#define PDE_64_IGNORED_2(_) (((_) >> 52) & 0x7FF)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 2-MByte region controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PDE_64_EXECUTE_DISABLE_BIT 63
#define PDE_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PDE_64_EXECUTE_DISABLE_MASK 0x01
#define PDE_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PDE_64;
/**
* @brief Format of a 4-Level Page-Table Entry that Maps a 4-KByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Present; must be 1 to map a 4-KByte page.
*/
UINT64 Present : 1;
#define PTE_64_PRESENT_BIT 0
#define PTE_64_PRESENT_FLAG 0x01
#define PTE_64_PRESENT_MASK 0x01
#define PTE_64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Read/write; if 0, writes may not be allowed to the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Write : 1;
#define PTE_64_WRITE_BIT 1
#define PTE_64_WRITE_FLAG 0x02
#define PTE_64_WRITE_MASK 0x01
#define PTE_64_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] User/supervisor; if 0, user-mode accesses are not allowed to the 4-KByte
* page referenced by this entry.
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 Supervisor : 1;
#define PTE_64_SUPERVISOR_BIT 2
#define PTE_64_SUPERVISOR_FLAG 0x04
#define PTE_64_SUPERVISOR_MASK 0x01
#define PTE_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the 4-KByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelWriteThrough : 1;
#define PTE_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PTE_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PTE_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PTE_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the 4-KByte page referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 PageLevelCacheDisable : 1;
#define PTE_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PTE_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PTE_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PTE_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] Accessed; indicates whether software has accessed the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Accessed : 1;
#define PTE_64_ACCESSED_BIT 5
#define PTE_64_ACCESSED_FLAG 0x20
#define PTE_64_ACCESSED_MASK 0x01
#define PTE_64_ACCESSED(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] Dirty; indicates whether software has written to the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.8(Accessed and Dirty Flags)]
*/
UINT64 Dirty : 1;
#define PTE_64_DIRTY_BIT 6
#define PTE_64_DIRTY_FLAG 0x40
#define PTE_64_DIRTY_MASK 0x01
#define PTE_64_DIRTY(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Indirectly determines the memory type used to access the 4-KByte page
* referenced by this entry.
*
* @see Vol3A[4.9.2(Paging and Memory Typing When the PAT is Supported (Pentium III
* and More Recent Processor Families))]
*/
UINT64 Pat : 1;
#define PTE_64_PAT_BIT 7
#define PTE_64_PAT_FLAG 0x80
#define PTE_64_PAT_MASK 0x01
#define PTE_64_PAT(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] Global; if CR4.PGE = 1, determines whether the translation is global;
* ignored otherwise.
*
* @see Vol3A[4.10(Caching Translation Information)]
*/
UINT64 Global : 1;
#define PTE_64_GLOBAL_BIT 8
#define PTE_64_GLOBAL_FLAG 0x100
#define PTE_64_GLOBAL_MASK 0x01
#define PTE_64_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 10:9] Ignored.
*/
UINT64 Ignored1 : 2;
#define PTE_64_IGNORED_1_BIT 9
#define PTE_64_IGNORED_1_FLAG 0x600
#define PTE_64_IGNORED_1_MASK 0x03
#define PTE_64_IGNORED_1(_) (((_) >> 9) & 0x03)
/**
* [Bit 11] For ordinary paging, ignored; for HLAT paging, restart (if 1,
* linear-address translation is restarted with ordinary paging)
*
* @see Vol3A[4.5.5(Restart of HLAT Paging)]
*/
UINT64 Restart : 1;
#define PTE_64_RESTART_BIT 11
#define PTE_64_RESTART_FLAG 0x800
#define PTE_64_RESTART_MASK 0x01
#define PTE_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Physical address of the 4-KByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define PTE_64_PAGE_FRAME_NUMBER_BIT 12
#define PTE_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define PTE_64_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define PTE_64_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved1 : 4;
/**
* [Bits 58:52] Ignored.
*/
UINT64 Ignored2 : 7;
#define PTE_64_IGNORED_2_BIT 52
#define PTE_64_IGNORED_2_FLAG 0x7F0000000000000
#define PTE_64_IGNORED_2_MASK 0x7F
#define PTE_64_IGNORED_2(_) (((_) >> 52) & 0x7F)
/**
* [Bits 62:59] Protection key; if CR4.PKE = 1, determines the protection key of the
* page; ignored otherwise.
*
* @see Vol3A[4.6.2(Protection Keys)]
*/
UINT64 ProtectionKey : 4;
#define PTE_64_PROTECTION_KEY_BIT 59
#define PTE_64_PROTECTION_KEY_FLAG 0x7800000000000000
#define PTE_64_PROTECTION_KEY_MASK 0x0F
#define PTE_64_PROTECTION_KEY(_) (((_) >> 59) & 0x0F)
/**
* [Bit 63] If IA32_EFER.NXE = 1, execute-disable (if 1, instruction fetches are not
* allowed from the 1-GByte page controlled by this entry); otherwise, reserved
* (must be 0).
*
* @see Vol3A[4.6(Access Rights)]
*/
UINT64 ExecuteDisable : 1;
#define PTE_64_EXECUTE_DISABLE_BIT 63
#define PTE_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PTE_64_EXECUTE_DISABLE_MASK 0x01
#define PTE_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PTE_64;
/**
* @brief Format of a common Page-Table Entry
*/
typedef union
{
struct
{
UINT64 Present : 1;
#define PT_ENTRY_64_PRESENT_BIT 0
#define PT_ENTRY_64_PRESENT_FLAG 0x01
#define PT_ENTRY_64_PRESENT_MASK 0x01
#define PT_ENTRY_64_PRESENT(_) (((_) >> 0) & 0x01)
UINT64 Write : 1;
#define PT_ENTRY_64_WRITE_BIT 1
#define PT_ENTRY_64_WRITE_FLAG 0x02
#define PT_ENTRY_64_WRITE_MASK 0x01
#define PT_ENTRY_64_WRITE(_) (((_) >> 1) & 0x01)
UINT64 Supervisor : 1;
#define PT_ENTRY_64_SUPERVISOR_BIT 2
#define PT_ENTRY_64_SUPERVISOR_FLAG 0x04
#define PT_ENTRY_64_SUPERVISOR_MASK 0x01
#define PT_ENTRY_64_SUPERVISOR(_) (((_) >> 2) & 0x01)
UINT64 PageLevelWriteThrough : 1;
#define PT_ENTRY_64_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define PT_ENTRY_64_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define PT_ENTRY_64_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define PT_ENTRY_64_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
UINT64 PageLevelCacheDisable : 1;
#define PT_ENTRY_64_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define PT_ENTRY_64_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define PT_ENTRY_64_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define PT_ENTRY_64_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
UINT64 Accessed : 1;
#define PT_ENTRY_64_ACCESSED_BIT 5
#define PT_ENTRY_64_ACCESSED_FLAG 0x20
#define PT_ENTRY_64_ACCESSED_MASK 0x01
#define PT_ENTRY_64_ACCESSED(_) (((_) >> 5) & 0x01)
UINT64 Dirty : 1;
#define PT_ENTRY_64_DIRTY_BIT 6
#define PT_ENTRY_64_DIRTY_FLAG 0x40
#define PT_ENTRY_64_DIRTY_MASK 0x01
#define PT_ENTRY_64_DIRTY(_) (((_) >> 6) & 0x01)
UINT64 LargePage : 1;
#define PT_ENTRY_64_LARGE_PAGE_BIT 7
#define PT_ENTRY_64_LARGE_PAGE_FLAG 0x80
#define PT_ENTRY_64_LARGE_PAGE_MASK 0x01
#define PT_ENTRY_64_LARGE_PAGE(_) (((_) >> 7) & 0x01)
UINT64 Global : 1;
#define PT_ENTRY_64_GLOBAL_BIT 8
#define PT_ENTRY_64_GLOBAL_FLAG 0x100
#define PT_ENTRY_64_GLOBAL_MASK 0x01
#define PT_ENTRY_64_GLOBAL(_) (((_) >> 8) & 0x01)
/**
* [Bits 10:9] Ignored.
*/
UINT64 Ignored1 : 2;
#define PT_ENTRY_64_IGNORED_1_BIT 9
#define PT_ENTRY_64_IGNORED_1_FLAG 0x600
#define PT_ENTRY_64_IGNORED_1_MASK 0x03
#define PT_ENTRY_64_IGNORED_1(_) (((_) >> 9) & 0x03)
UINT64 Restart : 1;
#define PT_ENTRY_64_RESTART_BIT 11
#define PT_ENTRY_64_RESTART_FLAG 0x800
#define PT_ENTRY_64_RESTART_MASK 0x01
#define PT_ENTRY_64_RESTART(_) (((_) >> 11) & 0x01)
/**
* [Bits 47:12] Physical address of the 4-KByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define PT_ENTRY_64_PAGE_FRAME_NUMBER_BIT 12
#define PT_ENTRY_64_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define PT_ENTRY_64_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define PT_ENTRY_64_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved1 : 4;
/**
* [Bits 58:52] Ignored.
*/
UINT64 Ignored2 : 7;
#define PT_ENTRY_64_IGNORED_2_BIT 52
#define PT_ENTRY_64_IGNORED_2_FLAG 0x7F0000000000000
#define PT_ENTRY_64_IGNORED_2_MASK 0x7F
#define PT_ENTRY_64_IGNORED_2(_) (((_) >> 52) & 0x7F)
UINT64 ProtectionKey : 4;
#define PT_ENTRY_64_PROTECTION_KEY_BIT 59
#define PT_ENTRY_64_PROTECTION_KEY_FLAG 0x7800000000000000
#define PT_ENTRY_64_PROTECTION_KEY_MASK 0x0F
#define PT_ENTRY_64_PROTECTION_KEY(_) (((_) >> 59) & 0x0F)
UINT64 ExecuteDisable : 1;
#define PT_ENTRY_64_EXECUTE_DISABLE_BIT 63
#define PT_ENTRY_64_EXECUTE_DISABLE_FLAG 0x8000000000000000
#define PT_ENTRY_64_EXECUTE_DISABLE_MASK 0x01
#define PT_ENTRY_64_EXECUTE_DISABLE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} PT_ENTRY_64;
/**
* @defgroup PAGING_STRUCTURES_ENTRY_COUNT_64 \
* Paging structures entry counts
*
* Paging structures entry counts.
* @{
*/
#define PML4E_ENTRY_COUNT_64 0x00000200
#define PDPTE_ENTRY_COUNT_64 0x00000200
#define PDE_ENTRY_COUNT_64 0x00000200
#define PTE_ENTRY_COUNT_64 0x00000200
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
typedef enum
{
/**
* If the INVPCID type is 0, the logical processor invalidates mappings-except global
* translations-associated with the PCID specified in the INVPCID descriptor and that would
* be used to translate the linear address specified in the INVPCID descriptor.2 (The
* instruction may also invalidate global translations, as well as mappings associated with
* other PCIDs and for other linear addresses.)
*/
InvpcidIndividualAddress = 0x00000000,
/**
* If the INVPCID type is 1, the logical processor invalidates all mappings-except global
* translations-associated with the PCID specified in the INVPCID descriptor. (The
* instruction may also invalidate global translations, as well as mappings associated with
* other PCIDs.)
*/
InvpcidSingleContext = 0x00000001,
/**
* If the INVPCID type is 2, the logical processor invalidates mappings-including global
* translations-associated with all PCIDs.
*/
InvpcidAllContextWithGlobals = 0x00000002,
/**
* If the INVPCID type is 3, the logical processor invalidates mappings-except global
* translations- associated with all PCIDs. (The instruction may also invalidate global
* translations.)
*/
InvpcidAllContext = 0x00000003,
} INVPCID_TYPE;
typedef union
{
struct
{
UINT64 Pcid : 12;
#define INVPCID_DESCRIPTOR_PCID_BIT 0
#define INVPCID_DESCRIPTOR_PCID_FLAG 0xFFF
#define INVPCID_DESCRIPTOR_PCID_MASK 0xFFF
#define INVPCID_DESCRIPTOR_PCID(_) (((_) >> 0) & 0xFFF)
/**
* [Bits 63:12] Must be zero.
*/
UINT64 Reserved1 : 52;
#define INVPCID_DESCRIPTOR_RESERVED1_BIT 12
#define INVPCID_DESCRIPTOR_RESERVED1_FLAG 0xFFFFFFFFFFFFF000
#define INVPCID_DESCRIPTOR_RESERVED1_MASK 0xFFFFFFFFFFFFF
#define INVPCID_DESCRIPTOR_RESERVED1(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
UINT64 LinearAddress : 64;
#define INVPCID_DESCRIPTOR_LINEAR_ADDRESS_BIT 64
#define INVPCID_DESCRIPTOR_LINEAR_ADDRESS_FLAG 0xFFFFFFFFFFFFFFFF0000000000000000
#define INVPCID_DESCRIPTOR_LINEAR_ADDRESS_MASK 0xFFFFFFFFFFFFFFFF
#define INVPCID_DESCRIPTOR_LINEAR_ADDRESS(_) (((_) >> 64) & 0xFFFFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} INVPCID_DESCRIPTOR;
/**
* @defgroup SEGMENT_DESCRIPTORS \
* Segment descriptors
* @{
*/
/**
* @brief Pseudo-Descriptor Format (32-bit)
*
* @see Vol3A[3.5.1(Segment Descriptor Tables)] (reference)
*/
#pragma pack(push, 1)
typedef struct
{
/**
* Limit.
*/
UINT16 Limit;
/**
* Base Address.
*/
UINT32 BaseAddress;
} SEGMENT_DESCRIPTOR_REGISTER_32;
#pragma pack(pop)
/**
* @brief Pseudo-Descriptor Format (64-bit)
*
* @see Vol3A[3.5.1(Segment Descriptor Tables)] (reference)
*/
#pragma pack(push, 1)
typedef struct
{
/**
* Limit.
*/
UINT16 Limit;
/**
* Base Address.
*/
UINT64 BaseAddress;
} SEGMENT_DESCRIPTOR_REGISTER_64;
#pragma pack(pop)
/**
* @brief Segment access rights
*
* @see Vol2A[3.2(Instructions (A-L) | LAR-Load Access Rights Byte)] (reference)
*/
typedef union
{
struct
{
UINT32 Reserved1 : 8;
/**
* @brief Type field
*
* [Bits 11:8] Indicates the segment or gate type and specifies the kinds of access
* that can be made to the segment and the direction of growth. The interpretation
* of this field depends on whether the descriptor type flag specifies an
* application (code or data) descriptor or a system descriptor. The encoding of the
* type field is different for code, data, and system descriptors.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)]
*/
UINT32 Type : 4;
#define SEGMENT_ACCESS_RIGHTS_TYPE_BIT 8
#define SEGMENT_ACCESS_RIGHTS_TYPE_FLAG 0xF00
#define SEGMENT_ACCESS_RIGHTS_TYPE_MASK 0x0F
#define SEGMENT_ACCESS_RIGHTS_TYPE(_) (((_) >> 8) & 0x0F)
/**
* @brief S (descriptor type) flag
*
* [Bit 12] Specifies whether the segment descriptor is for a system segment (S flag
* is clear) or a code or data segment (S flag is set).
*/
UINT32 DescriptorType : 1;
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_BIT 12
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_FLAG 0x1000
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE(_) (((_) >> 12) & 0x01)
/**
* @brief DPL (descriptor privilege level) field
*
* [Bits 14:13] Specifies the privilege level of the segment. The privilege level
* can range from 0 to 3, with 0 being the most privileged level. The DPL is used to
* control access to the segment. See Section 5.5, "Privilege Levels", for a
* description of the relationship of the DPL to the CPL of the executing code
* segment and the RPL of a segment selector.
*/
UINT32 DescriptorPrivilegeLevel : 2;
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_BIT 13
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_FLAG 0x6000
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_MASK 0x03
#define SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL(_) (((_) >> 13) & 0x03)
/**
* @brief P (segment-present) flag
*
* [Bit 15] Indicates whether the segment is present in memory (set) or not present
* (clear). If this flag is clear, the processor generates a segment-not-present
* exception (\#NP) when a segment selector that points to the segment descriptor is
* loaded into a segment register. Memory management software can use this flag to
* control which segments are actually loaded into physical memory at a given time.
* It offers a control in addition to paging for managing virtual memory.
*/
UINT32 Present : 1;
#define SEGMENT_ACCESS_RIGHTS_PRESENT_BIT 15
#define SEGMENT_ACCESS_RIGHTS_PRESENT_FLAG 0x8000
#define SEGMENT_ACCESS_RIGHTS_PRESENT_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_PRESENT(_) (((_) >> 15) & 0x01)
UINT32 Reserved2 : 4;
/**
* @brief Available bit
*
* [Bit 20] Bit 20 of the second doubleword of the segment descriptor is available
* for use by system software.
*/
UINT32 System : 1;
#define SEGMENT_ACCESS_RIGHTS_SYSTEM_BIT 20
#define SEGMENT_ACCESS_RIGHTS_SYSTEM_FLAG 0x100000
#define SEGMENT_ACCESS_RIGHTS_SYSTEM_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_SYSTEM(_) (((_) >> 20) & 0x01)
/**
* @brief L (64-bit code segment) flag
*
* [Bit 21] In IA-32e mode, bit 21 of the second doubleword of the segment
* descriptor indicates whether a code segment contains native 64-bit code. A value
* of 1 indicates instructions in this code segment are executed in 64-bit mode. A
* value of 0 indicates the instructions in this code segment are executed in
* compatibility mode. If L-bit is set, then D-bit must be cleared. When not in
* IA-32e mode or for non-code segments, bit 21 is reserved and should always be set
* to 0.
*/
UINT32 LongMode : 1;
#define SEGMENT_ACCESS_RIGHTS_LONG_MODE_BIT 21
#define SEGMENT_ACCESS_RIGHTS_LONG_MODE_FLAG 0x200000
#define SEGMENT_ACCESS_RIGHTS_LONG_MODE_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_LONG_MODE(_) (((_) >> 21) & 0x01)
/**
* @brief D/B (default operation size/default stack pointer size and/or upper bound)
* flag
*
* [Bit 22] Performs different functions depending on whether the segment descriptor
* is an executable code segment, an expand-down data segment, or a stack segment.
* (This flag should always be set to 1 for 32-bit code and data segments and to 0
* for 16-bit code and data segments.)
* - Executable code segment. The flag is called the D flag and it indicates the
* default length for effective addresses and operands referenced by instructions in
* the segment. If the flag is set, 32-bit addresses and 32-bit or 8-bit operands
* are assumed; if it is clear, 16-bit addresses and 16-bit or 8-bit operands are
* assumed. The instruction prefix 66H can be used to select an operand size other
* than the default, and the prefix 67H can be used select an address size other
* than the default.
* - Stack segment (data segment pointed to by the SS register). The flag is called
* the B (big) flag and it specifies the size of the stack pointer used for implicit
* stack operations (such as pushes, pops, and calls). If the flag is set, a 32-bit
* stack pointer is used, which is stored in the 32-bit ESP register; if the flag is
* clear, a 16-bit stack pointer is used, which is stored in the 16- bit SP
* register. If the stack segment is set up to be an expand-down data segment
* (described in the next paragraph), the B flag also specifies the upper bound of
* the stack segment.
* - Expand-down data segment. The flag is called the B flag and it specifies the
* upper bound of the segment. If the flag is set, the upper bound is FFFFFFFFH (4
* GBytes); if the flag is clear, the upper bound is FFFFH (64 KBytes).
*/
UINT32 DefaultBig : 1;
#define SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_BIT 22
#define SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_FLAG 0x400000
#define SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG(_) (((_) >> 22) & 0x01)
/**
* @brief G (granularity) flag
*
* [Bit 23] Determines the scaling of the segment limit field. When the granularity
* flag is clear, the segment limit is interpreted in byte units; when flag is set,
* the segment limit is interpreted in 4-KByte units. (This flag does not affect the
* granularity of the base address; it is always byte granular.) When the
* granularity flag is set, the twelve least significant bits of an offset are not
* tested when checking the offset against the segment limit. For example, when the
* granularity flag is set, a limit of 0 results in valid offsets from 0 to 4095.
*/
UINT32 Granularity : 1;
#define SEGMENT_ACCESS_RIGHTS_GRANULARITY_BIT 23
#define SEGMENT_ACCESS_RIGHTS_GRANULARITY_FLAG 0x800000
#define SEGMENT_ACCESS_RIGHTS_GRANULARITY_MASK 0x01
#define SEGMENT_ACCESS_RIGHTS_GRANULARITY(_) (((_) >> 23) & 0x01)
UINT32 Reserved3 : 8;
};
UINT32 AsUInt;
} SEGMENT_ACCESS_RIGHTS;
/**
* @brief General Segment Descriptor (32-bit)
*
* A segment descriptor is a data structure in a GDT or LDT that provides the processor with the
* size and location of a segment, as well as access control and status information. Segment
* descriptors are typically created by compilers, linkers, loaders, or the operating system or
* executive, but not application programs.
*
* @see Vol3A[5.2(FIELDS AND FLAGS USED FOR SEGMENT-LEVEL AND PAGE-LEVEL PROTECTION)]
* @see Vol3A[5.2.1(Code-Segment Descriptor in 64-bit Mode)]
* @see Vol3A[5.8.3(Call Gates)]
* @see Vol3A[6.11(IDT DESCRIPTORS)]
* @see Vol3A[6.14.1(64-Bit Mode IDT)]
* @see Vol3A[7.2.2(TSS Descriptor)]
* @see Vol3A[7.2.3(TSS Descriptor in 64-bit mode)]
* @see Vol3A[7.2.5(Task-Gate Descriptor)]
* @see Vol3A[3.4.5(Segment Descriptors)] (reference)
*/
typedef struct
{
/**
* @brief Segment limit field (15:00)
*
* Specifies the size of the segment. The processor puts together the two segment limit
* fields to form a 20-bit value. The processor interprets the segment limit in one of two
* ways, depending on the setting of the G (granularity) flag:
* - If the granularity flag is clear, the segment size can range from 1 byte to 1 MByte, in
* byte increments.
* - If the granularity flag is set, the segment size can range from 4 KBytes to 4 GBytes,
* in 4-KByte increments. The processor uses the segment limit in two different ways,
* depending on whether the segment is an expand-up or an expand-down segment. For expand-up
* segments, the offset in a logical address can range from 0 to the segment limit. Offsets
* greater than the segment limit generate general-protection exceptions (\#GP, for all
* segments other than SS) or stack-fault exceptions (\#SS for the SS segment). For
* expand-down segments, the segment limit has the reverse function; the offset can range
* from the segment limit plus 1 to FFFFFFFFH or FFFFH, depending on the setting of the B
* flag. Offsets less than or equal to the segment limit generate general-protection
* exceptions or stack-fault exceptions. Decreasing the value in the segment limit field for
* an expanddown segment allocates new memory at the bottom of the segment's address space,
* rather than at the top. IA-32 architecture stacks always grow downwards, making this
* mechanism convenient for expandable stacks.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)]
*/
UINT16 SegmentLimitLow;
/**
* @brief Base address field (15:00)
*
* Defines the location of byte 0 of the segment within the 4-GByte linear address space.
* The processor puts together the three base address fields to form a single 32-bit value.
* Segment base addresses should be aligned to 16-byte boundaries. Although 16-byte
* alignment is not required, this alignment allows programs to maximize performance by
* aligning code and data on 16-byte boundaries.
*/
UINT16 BaseAddressLow;
/**
* @brief Segment descriptor fields
*/
union
{
struct
{
/**
* [Bits 7:0] Base address field (23:16); see description of $BASE_LOW for
* more details.
*/
UINT32 BaseAddressMiddle : 8;
#define SEGMENT__BASE_ADDRESS_MIDDLE_BIT 0
#define SEGMENT__BASE_ADDRESS_MIDDLE_FLAG 0xFF
#define SEGMENT__BASE_ADDRESS_MIDDLE_MASK 0xFF
#define SEGMENT__BASE_ADDRESS_MIDDLE(_) (((_) >> 0) & 0xFF)
/**
* @brief Type field
*
* [Bits 11:8] Indicates the segment or gate type and specifies the kinds of
* access that can be made to the segment and the direction of growth. The
* interpretation of this field depends on whether the descriptor type flag
* specifies an application (code or data) descriptor or a system
* descriptor. The encoding of the type field is different for code, data,
* and system descriptors.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)]
*/
UINT32 Type : 4;
#define SEGMENT__TYPE_BIT 8
#define SEGMENT__TYPE_FLAG 0xF00
#define SEGMENT__TYPE_MASK 0x0F
#define SEGMENT__TYPE(_) (((_) >> 8) & 0x0F)
/**
* @brief S (descriptor type) flag
*
* [Bit 12] Specifies whether the segment descriptor is for a system segment
* (S flag is clear) or a code or data segment (S flag is set).
*/
UINT32 DescriptorType : 1;
#define SEGMENT__DESCRIPTOR_TYPE_BIT 12
#define SEGMENT__DESCRIPTOR_TYPE_FLAG 0x1000
#define SEGMENT__DESCRIPTOR_TYPE_MASK 0x01
#define SEGMENT__DESCRIPTOR_TYPE(_) (((_) >> 12) & 0x01)
/**
* @brief DPL (descriptor privilege level) field
*
* [Bits 14:13] Specifies the privilege level of the segment. The privilege
* level can range from 0 to 3, with 0 being the most privileged level. The
* DPL is used to control access to the segment. See Section 5.5, "Privilege
* Levels", for a description of the relationship of the DPL to the CPL of
* the executing code segment and the RPL of a segment selector.
*/
UINT32 DescriptorPrivilegeLevel : 2;
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_BIT 13
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_FLAG 0x6000
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_MASK 0x03
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL(_) (((_) >> 13) & 0x03)
/**
* @brief P (segment-present) flag
*
* [Bit 15] Indicates whether the segment is present in memory (set) or not
* present (clear). If this flag is clear, the processor generates a
* segment-not-present exception (\#NP) when a segment selector that points
* to the segment descriptor is loaded into a segment register. Memory
* management software can use this flag to control which segments are
* actually loaded into physical memory at a given time. It offers a control
* in addition to paging for managing virtual memory.
*/
UINT32 Present : 1;
#define SEGMENT__PRESENT_BIT 15
#define SEGMENT__PRESENT_FLAG 0x8000
#define SEGMENT__PRESENT_MASK 0x01
#define SEGMENT__PRESENT(_) (((_) >> 15) & 0x01)
/**
* [Bits 19:16] Segment limit field (19:16); see description of $LIMIT_LOW
* for more details.
*/
UINT32 SegmentLimitHigh : 4;
#define SEGMENT__SEGMENT_LIMIT_HIGH_BIT 16
#define SEGMENT__SEGMENT_LIMIT_HIGH_FLAG 0xF0000
#define SEGMENT__SEGMENT_LIMIT_HIGH_MASK 0x0F
#define SEGMENT__SEGMENT_LIMIT_HIGH(_) (((_) >> 16) & 0x0F)
/**
* @brief Available bit
*
* [Bit 20] Bit 20 of the second doubleword of the segment descriptor is
* available for use by system software.
*/
UINT32 System : 1;
#define SEGMENT__SYSTEM_BIT 20
#define SEGMENT__SYSTEM_FLAG 0x100000
#define SEGMENT__SYSTEM_MASK 0x01
#define SEGMENT__SYSTEM(_) (((_) >> 20) & 0x01)
/**
* @brief L (64-bit code segment) flag
*
* [Bit 21] In IA-32e mode, bit 21 of the second doubleword of the segment
* descriptor indicates whether a code segment contains native 64-bit code.
* A value of 1 indicates instructions in this code segment are executed in
* 64-bit mode. A value of 0 indicates the instructions in this code segment
* are executed in compatibility mode. If L-bit is set, then D-bit must be
* cleared. When not in IA-32e mode or for non-code segments, bit 21 is
* reserved and should always be set to 0.
*/
UINT32 LongMode : 1;
#define SEGMENT__LONG_MODE_BIT 21
#define SEGMENT__LONG_MODE_FLAG 0x200000
#define SEGMENT__LONG_MODE_MASK 0x01
#define SEGMENT__LONG_MODE(_) (((_) >> 21) & 0x01)
/**
* @brief D/B (default operation size/default stack pointer size and/or
* upper bound) flag
*
* [Bit 22] Performs different functions depending on whether the segment
* descriptor is an executable code segment, an expand-down data segment, or
* a stack segment. (This flag should always be set to 1 for 32-bit code and
* data segments and to 0 for 16-bit code and data segments.)
* - Executable code segment. The flag is called the D flag and it indicates
* the default length for effective addresses and operands referenced by
* instructions in the segment. If the flag is set, 32-bit addresses and
* 32-bit or 8-bit operands are assumed; if it is clear, 16-bit addresses
* and 16-bit or 8-bit operands are assumed. The instruction prefix 66H can
* be used to select an operand size other than the default, and the prefix
* 67H can be used select an address size other than the default.
* - Stack segment (data segment pointed to by the SS register). The flag is
* called the B (big) flag and it specifies the size of the stack pointer
* used for implicit stack operations (such as pushes, pops, and calls). If
* the flag is set, a 32-bit stack pointer is used, which is stored in the
* 32-bit ESP register; if the flag is clear, a 16-bit stack pointer is
* used, which is stored in the 16- bit SP register. If the stack segment is
* set up to be an expand-down data segment (described in the next
* paragraph), the B flag also specifies the upper bound of the stack
* segment.
* - Expand-down data segment. The flag is called the B flag and it
* specifies the upper bound of the segment. If the flag is set, the upper
* bound is FFFFFFFFH (4 GBytes); if the flag is clear, the upper bound is
* FFFFH (64 KBytes).
*/
UINT32 DefaultBig : 1;
#define SEGMENT__DEFAULT_BIG_BIT 22
#define SEGMENT__DEFAULT_BIG_FLAG 0x400000
#define SEGMENT__DEFAULT_BIG_MASK 0x01
#define SEGMENT__DEFAULT_BIG(_) (((_) >> 22) & 0x01)
/**
* @brief G (granularity) flag
*
* [Bit 23] Determines the scaling of the segment limit field. When the
* granularity flag is clear, the segment limit is interpreted in byte
* units; when flag is set, the segment limit is interpreted in 4-KByte
* units. (This flag does not affect the granularity of the base address; it
* is always byte granular.) When the granularity flag is set, the twelve
* least significant bits of an offset are not tested when checking the
* offset against the segment limit. For example, when the granularity flag
* is set, a limit of 0 results in valid offsets from 0 to 4095.
*/
UINT32 Granularity : 1;
#define SEGMENT__GRANULARITY_BIT 23
#define SEGMENT__GRANULARITY_FLAG 0x800000
#define SEGMENT__GRANULARITY_MASK 0x01
#define SEGMENT__GRANULARITY(_) (((_) >> 23) & 0x01)
/**
* [Bits 31:24] Base address field (31:24); see description of $BASE_LOW for
* more details.
*/
UINT32 BaseAddressHigh : 8;
#define SEGMENT__BASE_ADDRESS_HIGH_BIT 24
#define SEGMENT__BASE_ADDRESS_HIGH_FLAG 0xFF000000
#define SEGMENT__BASE_ADDRESS_HIGH_MASK 0xFF
#define SEGMENT__BASE_ADDRESS_HIGH(_) (((_) >> 24) & 0xFF)
};
UINT32 AsUInt;
};
} SEGMENT_DESCRIPTOR_32;
/**
* @brief General Segment Descriptor (64-bit)
*
* A segment descriptor is a data structure in a GDT or LDT that provides the processor with the
* size and location of a segment, as well as access control and status information. Segment
* descriptors are typically created by compilers, linkers, loaders, or the operating system or
* executive, but not application programs.
*
* @see Vol3A[3.4.5(Segment Descriptors)] (reference)
*/
typedef struct
{
/**
* @brief Segment limit field (15:00)
*
* Specifies the size of the segment. The processor puts together the two segment limit
* fields to form a 20-bit value. The processor interprets the segment limit in one of two
* ways, depending on the setting of the G (granularity) flag:
* - If the granularity flag is clear, the segment size can range from 1 byte to 1 MByte, in
* byte increments.
* - If the granularity flag is set, the segment size can range from 4 KBytes to 4 GBytes,
* in 4-KByte increments. The processor uses the segment limit in two different ways,
* depending on whether the segment is an expand-up or an expand-down segment. For expand-up
* segments, the offset in a logical address can range from 0 to the segment limit. Offsets
* greater than the segment limit generate general-protection exceptions (\#GP, for all
* segments other than SS) or stack-fault exceptions (\#SS for the SS segment). For
* expand-down segments, the segment limit has the reverse function; the offset can range
* from the segment limit plus 1 to FFFFFFFFH or FFFFH, depending on the setting of the B
* flag. Offsets less than or equal to the segment limit generate general-protection
* exceptions or stack-fault exceptions. Decreasing the value in the segment limit field for
* an expanddown segment allocates new memory at the bottom of the segment's address space,
* rather than at the top. IA-32 architecture stacks always grow downwards, making this
* mechanism convenient for expandable stacks.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)]
*/
UINT16 SegmentLimitLow;
/**
* @brief Base address field (15:00)
*
* Defines the location of byte 0 of the segment within the 4-GByte linear address space.
* The processor puts together the three base address fields to form a single 32-bit value.
* Segment base addresses should be aligned to 16-byte boundaries. Although 16-byte
* alignment is not required, this alignment allows programs to maximize performance by
* aligning code and data on 16-byte boundaries.
*/
UINT16 BaseAddressLow;
/**
* @brief Segment descriptor fields
*/
union
{
struct
{
/**
* [Bits 7:0] Base address field (23:16); see description of $BASE_LOW for
* more details.
*/
UINT32 BaseAddressMiddle : 8;
#define SEGMENT__BASE_ADDRESS_MIDDLE_BIT 0
#define SEGMENT__BASE_ADDRESS_MIDDLE_FLAG 0xFF
#define SEGMENT__BASE_ADDRESS_MIDDLE_MASK 0xFF
#define SEGMENT__BASE_ADDRESS_MIDDLE(_) (((_) >> 0) & 0xFF)
/**
* @brief Type field
*
* [Bits 11:8] Indicates the segment or gate type and specifies the kinds of
* access that can be made to the segment and the direction of growth. The
* interpretation of this field depends on whether the descriptor type flag
* specifies an application (code or data) descriptor or a system
* descriptor. The encoding of the type field is different for code, data,
* and system descriptors.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)]
*/
UINT32 Type : 4;
#define SEGMENT__TYPE_BIT 8
#define SEGMENT__TYPE_FLAG 0xF00
#define SEGMENT__TYPE_MASK 0x0F
#define SEGMENT__TYPE(_) (((_) >> 8) & 0x0F)
/**
* @brief S (descriptor type) flag
*
* [Bit 12] Specifies whether the segment descriptor is for a system segment
* (S flag is clear) or a code or data segment (S flag is set).
*/
UINT32 DescriptorType : 1;
#define SEGMENT__DESCRIPTOR_TYPE_BIT 12
#define SEGMENT__DESCRIPTOR_TYPE_FLAG 0x1000
#define SEGMENT__DESCRIPTOR_TYPE_MASK 0x01
#define SEGMENT__DESCRIPTOR_TYPE(_) (((_) >> 12) & 0x01)
/**
* @brief DPL (descriptor privilege level) field
*
* [Bits 14:13] Specifies the privilege level of the segment. The privilege
* level can range from 0 to 3, with 0 being the most privileged level. The
* DPL is used to control access to the segment. See Section 5.5, "Privilege
* Levels", for a description of the relationship of the DPL to the CPL of
* the executing code segment and the RPL of a segment selector.
*/
UINT32 DescriptorPrivilegeLevel : 2;
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_BIT 13
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_FLAG 0x6000
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_MASK 0x03
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL(_) (((_) >> 13) & 0x03)
/**
* @brief P (segment-present) flag
*
* [Bit 15] Indicates whether the segment is present in memory (set) or not
* present (clear). If this flag is clear, the processor generates a
* segment-not-present exception (\#NP) when a segment selector that points
* to the segment descriptor is loaded into a segment register. Memory
* management software can use this flag to control which segments are
* actually loaded into physical memory at a given time. It offers a control
* in addition to paging for managing virtual memory.
*/
UINT32 Present : 1;
#define SEGMENT__PRESENT_BIT 15
#define SEGMENT__PRESENT_FLAG 0x8000
#define SEGMENT__PRESENT_MASK 0x01
#define SEGMENT__PRESENT(_) (((_) >> 15) & 0x01)
/**
* [Bits 19:16] Segment limit field (19:16); see description of $LIMIT_LOW
* for more details.
*/
UINT32 SegmentLimitHigh : 4;
#define SEGMENT__SEGMENT_LIMIT_HIGH_BIT 16
#define SEGMENT__SEGMENT_LIMIT_HIGH_FLAG 0xF0000
#define SEGMENT__SEGMENT_LIMIT_HIGH_MASK 0x0F
#define SEGMENT__SEGMENT_LIMIT_HIGH(_) (((_) >> 16) & 0x0F)
/**
* @brief Available bit
*
* [Bit 20] Bit 20 of the second doubleword of the segment descriptor is
* available for use by system software.
*/
UINT32 System : 1;
#define SEGMENT__SYSTEM_BIT 20
#define SEGMENT__SYSTEM_FLAG 0x100000
#define SEGMENT__SYSTEM_MASK 0x01
#define SEGMENT__SYSTEM(_) (((_) >> 20) & 0x01)
/**
* @brief L (64-bit code segment) flag
*
* [Bit 21] In IA-32e mode, bit 21 of the second doubleword of the segment
* descriptor indicates whether a code segment contains native 64-bit code.
* A value of 1 indicates instructions in this code segment are executed in
* 64-bit mode. A value of 0 indicates the instructions in this code segment
* are executed in compatibility mode. If L-bit is set, then D-bit must be
* cleared. When not in IA-32e mode or for non-code segments, bit 21 is
* reserved and should always be set to 0.
*/
UINT32 LongMode : 1;
#define SEGMENT__LONG_MODE_BIT 21
#define SEGMENT__LONG_MODE_FLAG 0x200000
#define SEGMENT__LONG_MODE_MASK 0x01
#define SEGMENT__LONG_MODE(_) (((_) >> 21) & 0x01)
/**
* @brief D/B (default operation size/default stack pointer size and/or
* upper bound) flag
*
* [Bit 22] Performs different functions depending on whether the segment
* descriptor is an executable code segment, an expand-down data segment, or
* a stack segment. (This flag should always be set to 1 for 32-bit code and
* data segments and to 0 for 16-bit code and data segments.)
* - Executable code segment. The flag is called the D flag and it indicates
* the default length for effective addresses and operands referenced by
* instructions in the segment. If the flag is set, 32-bit addresses and
* 32-bit or 8-bit operands are assumed; if it is clear, 16-bit addresses
* and 16-bit or 8-bit operands are assumed. The instruction prefix 66H can
* be used to select an operand size other than the default, and the prefix
* 67H can be used select an address size other than the default.
* - Stack segment (data segment pointed to by the SS register). The flag is
* called the B (big) flag and it specifies the size of the stack pointer
* used for implicit stack operations (such as pushes, pops, and calls). If
* the flag is set, a 32-bit stack pointer is used, which is stored in the
* 32-bit ESP register; if the flag is clear, a 16-bit stack pointer is
* used, which is stored in the 16- bit SP register. If the stack segment is
* set up to be an expand-down data segment (described in the next
* paragraph), the B flag also specifies the upper bound of the stack
* segment.
* - Expand-down data segment. The flag is called the B flag and it
* specifies the upper bound of the segment. If the flag is set, the upper
* bound is FFFFFFFFH (4 GBytes); if the flag is clear, the upper bound is
* FFFFH (64 KBytes).
*/
UINT32 DefaultBig : 1;
#define SEGMENT__DEFAULT_BIG_BIT 22
#define SEGMENT__DEFAULT_BIG_FLAG 0x400000
#define SEGMENT__DEFAULT_BIG_MASK 0x01
#define SEGMENT__DEFAULT_BIG(_) (((_) >> 22) & 0x01)
/**
* @brief G (granularity) flag
*
* [Bit 23] Determines the scaling of the segment limit field. When the
* granularity flag is clear, the segment limit is interpreted in byte
* units; when flag is set, the segment limit is interpreted in 4-KByte
* units. (This flag does not affect the granularity of the base address; it
* is always byte granular.) When the granularity flag is set, the twelve
* least significant bits of an offset are not tested when checking the
* offset against the segment limit. For example, when the granularity flag
* is set, a limit of 0 results in valid offsets from 0 to 4095.
*/
UINT32 Granularity : 1;
#define SEGMENT__GRANULARITY_BIT 23
#define SEGMENT__GRANULARITY_FLAG 0x800000
#define SEGMENT__GRANULARITY_MASK 0x01
#define SEGMENT__GRANULARITY(_) (((_) >> 23) & 0x01)
/**
* [Bits 31:24] Base address field (31:24); see description of $BASE_LOW for
* more details.
*/
UINT32 BaseAddressHigh : 8;
#define SEGMENT__BASE_ADDRESS_HIGH_BIT 24
#define SEGMENT__BASE_ADDRESS_HIGH_FLAG 0xFF000000
#define SEGMENT__BASE_ADDRESS_HIGH_MASK 0xFF
#define SEGMENT__BASE_ADDRESS_HIGH(_) (((_) >> 24) & 0xFF)
};
UINT32 AsUInt;
};
/**
* Base address field (32:63); see description of $BASE_LOW for more details.
*/
UINT32 BaseAddressUpper;
/**
* This field must be set to zero.
*/
UINT32 MustBeZero;
} SEGMENT_DESCRIPTOR_64;
/**
* @brief Interrupt Gate Descriptor (64-bit)
*
* @see Vol3A[6.14.1(64-Bit Mode IDT)] (reference)
*/
typedef struct
{
/**
* Offset to procedure entry point (15:00).
*/
UINT16 OffsetLow;
/**
* Segment selector for destination code segment.
*/
UINT16 SegmentSelector;
union
{
struct
{
/**
* [Bits 2:0] Index into the TSS Interrupt Stack Table.
*/
UINT32 InterruptStackTable : 3;
#define SEGMENT__INTERRUPT_STACK_TABLE_BIT 0
#define SEGMENT__INTERRUPT_STACK_TABLE_FLAG 0x07
#define SEGMENT__INTERRUPT_STACK_TABLE_MASK 0x07
#define SEGMENT__INTERRUPT_STACK_TABLE(_) (((_) >> 0) & 0x07)
/**
* [Bits 7:3] This field must be set to zero.
*/
UINT32 MustBeZero0 : 5;
#define SEGMENT__MUST_BE_ZERO_0_BIT 3
#define SEGMENT__MUST_BE_ZERO_0_FLAG 0xF8
#define SEGMENT__MUST_BE_ZERO_0_MASK 0x1F
#define SEGMENT__MUST_BE_ZERO_0(_) (((_) >> 3) & 0x1F)
/**
* [Bits 11:8] Indicates the segment or gate type.
*/
UINT32 Type : 4;
#define SEGMENT__TYPE_BIT 8
#define SEGMENT__TYPE_FLAG 0xF00
#define SEGMENT__TYPE_MASK 0x0F
#define SEGMENT__TYPE(_) (((_) >> 8) & 0x0F)
/**
* [Bit 12] This field must be set to zero.
*/
UINT32 MustBeZero1 : 1;
#define SEGMENT__MUST_BE_ZERO_1_BIT 12
#define SEGMENT__MUST_BE_ZERO_1_FLAG 0x1000
#define SEGMENT__MUST_BE_ZERO_1_MASK 0x01
#define SEGMENT__MUST_BE_ZERO_1(_) (((_) >> 12) & 0x01)
/**
* [Bits 14:13] Specifies the segment privilege level.
*/
UINT32 DescriptorPrivilegeLevel : 2;
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_BIT 13
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_FLAG 0x6000
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL_MASK 0x03
#define SEGMENT__DESCRIPTOR_PRIVILEGE_LEVEL(_) (((_) >> 13) & 0x03)
/**
* [Bit 15] Indicates whether the segment is present in memory (set) or not
* present (clear).
*/
UINT32 Present : 1;
#define SEGMENT__PRESENT_BIT 15
#define SEGMENT__PRESENT_FLAG 0x8000
#define SEGMENT__PRESENT_MASK 0x01
#define SEGMENT__PRESENT(_) (((_) >> 15) & 0x01)
/**
* [Bits 31:16] Offset to procedure entry point (31:16).
*/
UINT32 OffsetMiddle : 16;
#define SEGMENT__OFFSET_MIDDLE_BIT 16
#define SEGMENT__OFFSET_MIDDLE_FLAG 0xFFFF0000
#define SEGMENT__OFFSET_MIDDLE_MASK 0xFFFF
#define SEGMENT__OFFSET_MIDDLE(_) (((_) >> 16) & 0xFFFF)
};
UINT32 AsUInt;
};
/**
* Offset to procedure entry point (63:32).
*/
UINT32 OffsetHigh;
UINT32 Reserved;
} SEGMENT_DESCRIPTOR_INTERRUPT_GATE_64;
#define SEGMENT_DESCRIPTOR_TYPE_SYSTEM 0x00000000
#define SEGMENT_DESCRIPTOR_TYPE_CODE_OR_DATA 0x00000001
/**
* @defgroup SEGMENT_DESCRIPTOR_CODE_AND_DATA_TYPE \
* Code- and Data-Segment Descriptor Types
*
* When the S (descriptor type) flag in a segment descriptor is set, the descriptor is for either a
* code or a data segment. The highest order bit of the type field (bit 11 of the second double word
* of the segment descriptor) then determines whether the descriptor is for a data segment (clear)
* or a code segment (set). For data segments, the three low-order bits of the type field (bits 8,
* 9, and 10) are interpreted as accessed (A), write-enable (W), and expansion-direction (E). See
* Table 3-1 for a description of the encoding of the bits in the type field for code and data
* segments. Data segments can be read-only or read/write segments, depending on the setting of the
* write-enable bit.
*
* @see Vol3A[3.4.5.1(Code- and Data-Segment Descriptor Types)] (reference)
* @{
*/
/**
* Read-Only.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_ONLY 0x00000000
/**
* Data Read-Only, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_ONLY_ACCESSED 0x00000001
/**
* Data Read/Write.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE 0x00000002
/**
* Data Read/Write, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_ACCESSED 0x00000003
/**
* Data Read-Only, expand-down.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_ONLY_EXPAND_DOWN 0x00000004
/**
* Data Read-Only, expand-down, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_ONLY_EXPAND_DOWN_ACCESSED 0x00000005
/**
* Data Read/Write, expand-down.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_EXPAND_DOWN 0x00000006
/**
* Data Read/Write, expand-down, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_DATA_READ_WRITE_EXPAND_DOWN_ACCESSED 0x00000007
/**
* Code Execute-Only.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_ONLY 0x00000008
/**
* Code Execute-Only, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_ONLY_ACCESSED 0x00000009
/**
* Code Execute/Read.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ 0x0000000A
/**
* Code Execute/Read, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_ACCESSED 0x0000000B
/**
* Code Execute-Only, conforming.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_ONLY_CONFORMING 0x0000000C
/**
* Code Execute-Only, conforming, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_ONLY_CONFORMING_ACCESSED 0x0000000D
/**
* Code Execute/Read, conforming.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_CONFORMING 0x0000000E
/**
* Code Execute/Read, conforming, accessed.
*/
#define SEGMENT_DESCRIPTOR_TYPE_CODE_EXECUTE_READ_CONFORMING_ACCESSED 0x0000000F
/**
* @}
*/
/**
* @defgroup SEGMENT_DESCRIPTOR_SYSTEM_TYPE \
* System Descriptor Types
*
* When the S (descriptor type) flag in a segment descriptor is clear, the descriptor type is a
* system descriptor. The processor recognizes the following types of system descriptors:
* - Local descriptor-table (LDT) segment descriptor.
* - Task-state segment (TSS) descriptor.
* - Call-gate descriptor.
* - Interrupt-gate descriptor.
* - Trap-gate descriptor.
* - Task-gate descriptor.
* These descriptor types fall into two categories: system-segment descriptors and gate descriptors.
* Systemsegment descriptors point to system segments (LDT and TSS segments). Gate descriptors are
* in themselves "gates," which hold pointers to procedure entry points in code segments (call,
* interrupt, and trap gates) or which hold segment selectors for TSS's (task gates).
*
* @see Vol3A[3.5(SYSTEM DESCRIPTOR TYPES)] (reference)
* @{
*/
/**
* - 32-Bit Mode: Reserved
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_RESERVED_1 0x00000000
/**
* - 32-Bit Mode: 16-bit TSS (Available)
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_TSS_16_AVAILABLE 0x00000001
/**
* - 32-Bit Mode: LDT
* - IA-32e Mode: LDT
*/
#define SEGMENT_DESCRIPTOR_TYPE_LDT 0x00000002
/**
* - 32-Bit Mode: 16-bit TSS (Busy)
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_TSS_16_BUSY 0x00000003
/**
* - 32-Bit Mode: 16-bit Call Gate
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_CALL_GATE_16 0x00000004
/**
* - 32-Bit Mode: Task Gate
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_TASK_GATE 0x00000005
/**
* - 32-Bit Mode: 16-bit Interrupt Gate
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_INTERRUPT_GATE_16 0x00000006
/**
* - 32-Bit Mode: 16-bit Trap Gate
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_TRAP_GATE_16 0x00000007
/**
* - 32-Bit Mode: Reserved
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_RESERVED_2 0x00000008
/**
* - 32-Bit Mode: 32-bit TSS (Available)
* - IA-32e Mode: 64-bit TSS (Available)
*/
#define SEGMENT_DESCRIPTOR_TYPE_TSS_AVAILABLE 0x00000009
/**
* - 32-Bit Mode: Reserved
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_RESERVED_3 0x0000000A
/**
* - 32-Bit Mode: 32-bit TSS (Busy)
* - IA-32e Mode: 64-bit TSS (Busy)
*/
#define SEGMENT_DESCRIPTOR_TYPE_TSS_BUSY 0x0000000B
/**
* - 32-Bit Mode: 32-bit Call Gate
* - IA-32e Mode: 64-bit Call Gate
*/
#define SEGMENT_DESCRIPTOR_TYPE_CALL_GATE 0x0000000C
/**
* - 32-Bit Mode: Reserved
* - IA-32e Mode: Reserved
*/
#define SEGMENT_DESCRIPTOR_TYPE_RESERVED_4 0x0000000D
/**
* - 32-Bit Mode: 32-bit Interrupt Gate
* - IA-32e Mode: 64-bit Interrupt Gate
*/
#define SEGMENT_DESCRIPTOR_TYPE_INTERRUPT_GATE 0x0000000E
/**
* - 32-Bit Mode: 32-bit Trap Gate
* - IA-32e Mode: 64-bit Trap Gate
*/
#define SEGMENT_DESCRIPTOR_TYPE_TRAP_GATE 0x0000000F
/**
* @}
*/
/**
* @brief A segment selector is a 16-bit identifier for a segment. It does not point directly to the
* segment, but instead points to the segment descriptor that defines the segment
*
* @see Vol3A[3.4.2(Segment Selectors)] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 1:0] Specifies the privilege level of the selector. The privilege level can
* range from 0 to 3, with 0 being the most privileged level.
*
* @see Vol3A[5.5(Privilege Levels)]
*/
UINT16 RequestPrivilegeLevel : 2;
#define SEGMENT_SELECTOR_REQUEST_PRIVILEGE_LEVEL_BIT 0
#define SEGMENT_SELECTOR_REQUEST_PRIVILEGE_LEVEL_FLAG 0x03
#define SEGMENT_SELECTOR_REQUEST_PRIVILEGE_LEVEL_MASK 0x03
#define SEGMENT_SELECTOR_REQUEST_PRIVILEGE_LEVEL(_) (((_) >> 0) & 0x03)
/**
* [Bit 2] Specifies the descriptor table to use: clearing this flag selects the
* GDT; setting this flag selects the current LDT.
*/
UINT16 Table : 1;
#define SEGMENT_SELECTOR_TABLE_BIT 2
#define SEGMENT_SELECTOR_TABLE_FLAG 0x04
#define SEGMENT_SELECTOR_TABLE_MASK 0x01
#define SEGMENT_SELECTOR_TABLE(_) (((_) >> 2) & 0x01)
/**
* [Bits 15:3] Selects one of 8192 descriptors in the GDT or LDT. The processor
* multiplies the index value by 8 (the number of bytes in a segment descriptor) and
* adds the result to the base address of the GDT or LDT (from the GDTR or LDTR
* register, respectively).
*/
UINT16 Index : 13;
#define SEGMENT_SELECTOR_INDEX_BIT 3
#define SEGMENT_SELECTOR_INDEX_FLAG 0xFFF8
#define SEGMENT_SELECTOR_INDEX_MASK 0x1FFF
#define SEGMENT_SELECTOR_INDEX(_) (((_) >> 3) & 0x1FFF)
};
UINT16 AsUInt;
} SEGMENT_SELECTOR;
/**
* @}
*/
/**
* @brief Task State Segment (64-bit)
*
* @see Vol3C[7.7(Task Management in 64-bit Mode)] (reference)
*/
#pragma pack(push, 1)
typedef struct
{
/**
* Reserved bits. Set to 0.
*/
UINT32 Reserved0;
/**
* Stack pointer for privilege level 0.
*/
UINT64 Rsp0;
/**
* Stack pointer for privilege level 1.
*/
UINT64 Rsp1;
/**
* Stack pointer for privilege level 2.
*/
UINT64 Rsp2;
/**
* Reserved bits. Set to 0.
*/
UINT64 Reserved1;
/**
* Interrupt stack table pointer (1).
*/
UINT64 Ist1;
/**
* Interrupt stack table pointer (2).
*/
UINT64 Ist2;
/**
* Interrupt stack table pointer (3).
*/
UINT64 Ist3;
/**
* Interrupt stack table pointer (4).
*/
UINT64 Ist4;
/**
* Interrupt stack table pointer (5).
*/
UINT64 Ist5;
/**
* Interrupt stack table pointer (6).
*/
UINT64 Ist6;
/**
* Interrupt stack table pointer (7).
*/
UINT64 Ist7;
/**
* Reserved bits. Set to 0.
*/
UINT64 Reserved2;
/**
* Reserved bits. Set to 0.
*/
UINT16 Reserved3;
/**
* The 16-bit offset to the I/O permission bit map from the 64-bit TSS base.
*/
UINT16 IoMapBase;
} TASK_STATE_SEGMENT_64;
#pragma pack(pop)
/**
* @defgroup VMX \
* VMX
* @{
*/
/**
* @{
*/
/**
* @defgroup VMX_BASIC_EXIT_REASONS \
* VMX Basic Exit Reasons
*
* VMX Basic Exit Reasons.
*
* @see Vol3D[C(VMX BASIC EXIT REASONS)] (reference)
* @{
*/
/**
* @brief Exception or non-maskable interrupt (NMI)
*
* Either:
* -# Guest software caused an exception and the bit in the exception bitmap associated with
* exception's vector was 1. This case includes executions of BOUND that cause \#BR, executions of
* INT1 (they cause \#DB), executions of INT3 (they cause
* \#BP), executions of INTO that cause \#OF, and executions of UD0, UD1, and UD2 (they cause \#UD).
* -# An NMI was delivered to the logical processor and the "NMI exiting" VM-execution control
* was 1.
*/
#define VMX_EXIT_REASON_EXCEPTION_OR_NMI 0x00000000
/**
* @brief External interrupt
*
* An external interrupt arrived and the "external-interrupt exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXTERNAL_INTERRUPT 0x00000001
/**
* @brief Triple fault
*
* The logical processor encountered an exception while attempting to call the double-fault handler
* and that exception did not itself cause a VM exit due to the exception bitmap.
*/
#define VMX_EXIT_REASON_TRIPLE_FAULT 0x00000002
/**
* @brief INIT signal
*
* An INIT signal arrived.
*/
#define VMX_EXIT_REASON_INIT_SIGNAL 0x00000003
/**
* @brief Start-up IPI (SIPI)
*
* A SIPI arrived while the logical processor was in the "wait-for-SIPI" state.
*/
#define VMX_EXIT_REASON_STARTUP_IPI 0x00000004
/**
* @brief I/O system-management interrupt (SMI)
*
* An SMI arrived immediately after retirement of an I/O instruction and caused an SMM VM exit.
*
* @see Vol3C[34.15.2(SMM VM Exits)]
*/
#define VMX_EXIT_REASON_IO_SMI 0x00000005
/**
* @brief Other SMI
*
* An SMI arrived and caused an SMM VM exit but not immediately after retirement of an I/O
* instruction.
*
* @see Vol3C[34.15.2(SMM VM Exits)]
*/
#define VMX_EXIT_REASON_SMI 0x00000006
/**
* @brief Interrupt window exiting
*
* At the beginning of an instruction, RFLAGS.IF was 1; events were not blocked by STI or by MOV SS;
* and the "interrupt-window exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_INTERRUPT_WINDOW 0x00000007
/**
* @brief NMI window exiting
*
* At the beginning of an instruction, there was no virtual-NMI blocking; events were not blocked by
* MOV SS; and the "NMI-window exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_NMI_WINDOW 0x00000008
/**
* @brief Task switch
*
* Guest software attempted a task switch.
*/
#define VMX_EXIT_REASON_TASK_SWITCH 0x00000009
/**
* @brief CPUID
*
* Guest software attempted to execute CPUID.
*/
#define VMX_EXIT_REASON_EXECUTE_CPUID 0x0000000A
/**
* @brief GETSEC
*
* Guest software attempted to execute GETSEC.
*/
#define VMX_EXIT_REASON_EXECUTE_GETSEC 0x0000000B
/**
* @brief HLT
*
* Guest software attempted to execute HLT and the "HLT exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_HLT 0x0000000C
/**
* @brief INVD
*
* Guest software attempted to execute INVD.
*/
#define VMX_EXIT_REASON_EXECUTE_INVD 0x0000000D
/**
* @brief INVLPG
*
* Guest software attempted to execute INVLPG and the "INVLPG exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_INVLPG 0x0000000E
/**
* @brief RDPMC
*
* Guest software attempted to execute RDPMC and the "RDPMC exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_RDPMC 0x0000000F
/**
* @brief RDTSC
*
* Guest software attempted to execute RDTSC and the "RDTSC exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_RDTSC 0x00000010
/**
* @brief RSM in SMM
*
* Guest software attempted to execute RSM in SMM.
*/
#define VMX_EXIT_REASON_EXECUTE_RSM_IN_SMM 0x00000011
/**
* @brief VMCALL
*
* VMCALL was executed either by guest software (causing an ordinary VM exit) or by the executive
* monitor (causing an SMM VM exit).
*
* @see Vol3C[34.15.2(SMM VM Exits)]
*/
#define VMX_EXIT_REASON_EXECUTE_VMCALL 0x00000012
/**
* @brief VMCLEAR
*
* Guest software attempted to execute VMCLEAR.
*/
#define VMX_EXIT_REASON_EXECUTE_VMCLEAR 0x00000013
/**
* @brief VMLAUNCH
*
* Guest software attempted to execute VMLAUNCH.
*/
#define VMX_EXIT_REASON_EXECUTE_VMLAUNCH 0x00000014
/**
* @brief VMPTRLD
*
* Guest software attempted to execute VMPTRLD.
*/
#define VMX_EXIT_REASON_EXECUTE_VMPTRLD 0x00000015
/**
* @brief VMPTRST
*
* Guest software attempted to execute VMPTRST.
*/
#define VMX_EXIT_REASON_EXECUTE_VMPTRST 0x00000016
/**
* @brief VMREAD
*
* Guest software attempted to execute VMREAD.
*/
#define VMX_EXIT_REASON_EXECUTE_VMREAD 0x00000017
/**
* @brief VMRESUME
*
* Guest software attempted to execute VMRESUME.
*/
#define VMX_EXIT_REASON_EXECUTE_VMRESUME 0x00000018
/**
* @brief VMWRITE
*
* Guest software attempted to execute VMWRITE.
*/
#define VMX_EXIT_REASON_EXECUTE_VMWRITE 0x00000019
/**
* @brief VMXOFF
*
* Guest software attempted to execute VMXOFF.
*/
#define VMX_EXIT_REASON_EXECUTE_VMXOFF 0x0000001A
/**
* @brief VMXON
*
* Guest software attempted to execute VMXON.
*/
#define VMX_EXIT_REASON_EXECUTE_VMXON 0x0000001B
/**
* @brief Control-register accesses
*
* Guest software attempted to access CR0, CR3, CR4, or CR8 using CLTS, LMSW, or MOV CR and the
* VM-execution control fields indicate that a VM exit should occur. This basic exit reason is not
* used for trap-like VM exits following executions of the MOV to CR8 instruction when the "use TPR
* shadow" VM-execution control is 1. Such VM exits instead use basic exit reason 43.
*
* @see Vol3C[25.1(INSTRUCTIONS THAT CAUSE VM EXITS)]
*/
#define VMX_EXIT_REASON_MOV_CR 0x0000001C
/**
* @brief Debug-register accesses
*
* Guest software attempted a MOV to or from a debug register and the "MOV-DR exiting" VM-execution
* control was 1.
*/
#define VMX_EXIT_REASON_MOV_DR 0x0000001D
/**
* @brief I/O instruction
*
* Guest software attempted to execute an I/O instruction and either:
* -# The "use I/O bitmaps" VM-execution control was 0 and the "unconditional I/O exiting"
* VM-execution control was 1.
* -# The "use I/O bitmaps" VM-execution control was 1 and a bit in the I/O bitmap associated with
* one of the ports accessed by the I/O instruction was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_IO_INSTRUCTION 0x0000001E
/**
* @brief RDMSR
*
* Guest software attempted to execute RDMSR and either:
* -# The "use MSR bitmaps" VM-execution control was 0.
* -# The value of RCX is neither in the range 00000000H - 00001FFFH nor in the range C0000000H -
* C0001FFFH.
* -# The value of RCX was in the range 00000000H - 00001FFFH and the nth bit in read bitmap for low
* MSRs is 1, where n was the value of RCX.
* -# The value of RCX is in the range C0000000H - C0001FFFH and the nth bit in read bitmap for high
* MSRs is 1, where n is the value of RCX & 00001FFFH.
*/
#define VMX_EXIT_REASON_EXECUTE_RDMSR 0x0000001F
/**
* @brief WRMSR
*
* Guest software attempted to execute WRMSR and either:
* -# The "use MSR bitmaps" VM-execution control was 0.
* -# The value of RCX is neither in the range 00000000H - 00001FFFH nor in the range C0000000H -
* C0001FFFH.
* -# The value of RCX was in the range 00000000H - 00001FFFH and the nth bit in write bitmap for
* low MSRs is 1, where n was the value of RCX.
* -# The value of RCX is in the range C0000000H - C0001FFFH and the nth bit in write bitmap for
* high MSRs is 1, where n is the value of RCX & 00001FFFH.
*/
#define VMX_EXIT_REASON_EXECUTE_WRMSR 0x00000020
/**
* @brief VM-entry failure due to invalid guest state
*
* A VM entry failed one of the checks identified in Section 26.3.1.
*/
#define VMX_EXIT_REASON_ERROR_INVALID_GUEST_STATE 0x00000021
/**
* @brief VM-entry failure due to MSR loading
*
* A VM entry failed in an attempt to load MSRs. See Section 26.4.
*/
#define VMX_EXIT_REASON_ERROR_MSR_LOAD 0x00000022
/**
* @brief Guest software executed MWAIT
*
* Guest software attempted to execute MWAIT and the "MWAIT exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_MWAIT 0x00000024
/**
* @brief VM-exit due to monitor trap flag
*
* A VM entry occurred due to the 1-setting of the "monitor trap flag" VM-execution control and
* injection of an MTF VM exit as part of VM entry.
*
* @see Vol3C[25.5.2(Monitor Trap Flag)]
*/
#define VMX_EXIT_REASON_MONITOR_TRAP_FLAG 0x00000025
/**
* @brief Guest software attempted to execute MONITOR
*
* Guest software attempted to execute MONITOR and the "MONITOR exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_MONITOR 0x00000027
/**
* @brief Guest software attempted to execute PAUSE
*
* Either guest software attempted to execute PAUSE and the "PAUSE exiting" VM-execution control was
* 1 or the "PAUSE-loop exiting" VM-execution control was 1 and guest software executed a PAUSE loop
* with execution time exceeding PLE_Window.
*
* @see Vol3C[25.1.3(Instructions That Cause VM Exits Conditionally)]
*/
#define VMX_EXIT_REASON_EXECUTE_PAUSE 0x00000028
/**
* @brief VM-entry failure due to machine-check
*
* A machine-check event occurred during VM entry.
*
* @see Vol3C[26.8(MACHINE-CHECK EVENTS DURING VM ENTRY)]
*/
#define VMX_EXIT_REASON_ERROR_MACHINE_CHECK 0x00000029
/**
* @brief TPR below threshold
*
* The logical processor determined that the value of bits 7:4 of the byte at offset 080H on the
* virtual-APIC page was below that of the TPR threshold VM-execution control field while the "use
* TPR shadow" VMexecution control was 1 either as part of TPR virtualization or VM entry.
*
* @see Vol3C[29.1.2(TPR Virtualization)]
* @see Vol3C[26.6.7(VM Exits Induced by the TPR Threshold)]
*/
#define VMX_EXIT_REASON_TPR_BELOW_THRESHOLD 0x0000002B
/**
* @brief APIC access
*
* Guest software attempted to access memory at a physical address on the APIC-access page and the
* "virtualize APIC accesses" VM-execution control was 1.
*
* @see Vol3C[29.4(VIRTUALIZING MEMORY-MAPPED APIC ACCESSES)]
*/
#define VMX_EXIT_REASON_APIC_ACCESS 0x0000002C
/**
* @brief Virtualized EOI
*
* EOI virtualization was performed for a virtual interrupt whose vector indexed a bit set in the
* EOIexit bitmap.
*/
#define VMX_EXIT_REASON_VIRTUALIZED_EOI 0x0000002D
/**
* @brief Access to GDTR or IDTR
*
* Guest software attempted to execute LGDT, LIDT, SGDT, or SIDT and the "descriptor-table exiting"
* VM-execution control was 1.
*/
#define VMX_EXIT_REASON_GDTR_IDTR_ACCESS 0x0000002E
/**
* @brief Access to LDTR or TR
*
* Guest software attempted to execute LLDT, LTR, SLDT, or STR and the "descriptor-table exiting"
* VM-execution control was 1.
*/
#define VMX_EXIT_REASON_LDTR_TR_ACCESS 0x0000002F
/**
* @brief EPT violation
*
* An attempt to access memory with a guest-physical address was disallowed by the configuration of
* the EPT paging structures.
*/
#define VMX_EXIT_REASON_EPT_VIOLATION 0x00000030
/**
* @brief EPT misconfiguration
*
* An attempt to access memory with a guest-physical address encountered a misconfigured EPT
* paging-structure entry.
*/
#define VMX_EXIT_REASON_EPT_MISCONFIGURATION 0x00000031
/**
* @brief INVEPT
*
* Guest software attempted to execute INVEPT.
*/
#define VMX_EXIT_REASON_EXECUTE_INVEPT 0x00000032
/**
* @brief RDTSCP
*
* Guest software attempted to execute RDTSCP and the "enable RDTSCP" and "RDTSC exiting"
* VM-execution controls were both 1.
*/
#define VMX_EXIT_REASON_EXECUTE_RDTSCP 0x00000033
/**
* @brief VMX-preemption timer expired
*
* The preemption timer counted down to zero.
*/
#define VMX_EXIT_REASON_VMX_PREEMPTION_TIMER_EXPIRED 0x00000034
/**
* @brief INVVPID
*
* Guest software attempted to execute INVVPID.
*/
#define VMX_EXIT_REASON_EXECUTE_INVVPID 0x00000035
/**
* @brief WBINVD
*
* Guest software attempted to execute WBINVD and the "WBINVD exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_WBINVD 0x00000036
/**
* @brief XSETBV - Guest software attempted to execute XSETBV
*
* Guest software attempted to execute XSETBV.
*/
#define VMX_EXIT_REASON_EXECUTE_XSETBV 0x00000037
/**
* @brief APIC write
*
* Guest software completed a write to the virtual-APIC page that must be virtualized by VMM
* software.
*
* @see Vol3C[29.4.3.3(APIC-Write VM Exits)]
*/
#define VMX_EXIT_REASON_APIC_WRITE 0x00000038
/**
* @brief RDRAND
*
* Guest software attempted to execute RDRAND and the "RDRAND exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_RDRAND 0x00000039
/**
* @brief INVPCID
*
* Guest software attempted to execute INVPCID and the "enable INVPCID" and "INVLPG exiting"
* VM-execution controls were both 1.
*/
#define VMX_EXIT_REASON_EXECUTE_INVPCID 0x0000003A
/**
* @brief VMFUNC
*
* Guest software invoked a VM function with the VMFUNC instruction and the VM function either was
* not enabled or generated a function-specific condition causing a VM exit.
*/
#define VMX_EXIT_REASON_EXECUTE_VMFUNC 0x0000003B
/**
* @brief ENCLS
*
* Guest software attempted to execute ENCLS and "enable ENCLS exiting" VM-execution control was 1
* and either:
* -# EAX < 63 and the corresponding bit in the ENCLS-exiting bitmap is 1; or
* -# EAX >= 63 and bit 63 in the ENCLS-exiting bitmap is 1.
*/
#define VMX_EXIT_REASON_EXECUTE_ENCLS 0x0000003C
/**
* @brief RDSEED
*
* Guest software attempted to execute RDSEED and the "RDSEED exiting" VM-execution control was 1.
*/
#define VMX_EXIT_REASON_EXECUTE_RDSEED 0x0000003D
/**
* @brief Page-modification log full
*
* The processor attempted to create a page-modification log entry and the value of the PML index
* was not in the range 0-511.
*/
#define VMX_EXIT_REASON_PAGE_MODIFICATION_LOG_FULL 0x0000003E
/**
* @brief XSAVES
*
* Guest software attempted to execute XSAVES, the "enable XSAVES/XRSTORS" was 1, and a bit was set
* in the logical-AND of the following three values: EDX:EAX, the IA32_XSS MSR, and the XSS-exiting
* bitmap.
*/
#define VMX_EXIT_REASON_EXECUTE_XSAVES 0x0000003F
/**
* @brief XRSTORS
*
* Guest software attempted to execute XRSTORS, the "enable XSAVES/XRSTORS" was 1, and a bit was set
* in the logical-AND of the following three values: EDX:EAX, the IA32_XSS MSR, and the XSS-exiting
* bitmap.
*/
#define VMX_EXIT_REASON_EXECUTE_XRSTORS 0x00000040
/**
* @}
*/
/**
* @defgroup VMX_INSTRUCTION_ERROR_NUMBERS \
* VM-Instruction Error Numbers
*
* VM-Instruction Error Numbers.
*
* @see Vol3C[30.4(VM INSTRUCTION ERROR NUMBERS)] (reference)
* @{
*/
/**
* VMCALL executed in VMX root operation.
*/
#define VMX_ERROR_VMCALL_IN_VMX_ROOT_OPERATION 0x00000001
/**
* VMCLEAR with invalid physical address.
*/
#define VMX_ERROR_VMCLEAR_INVALID_PHYSICAL_ADDRESS 0x00000002
/**
* VMCLEAR with VMXON pointer.
*/
#define VMX_ERROR_VMCLEAR_INVALID_VMXON_POINTER 0x00000003
/**
* VMLAUNCH with non-clear VMCS.
*/
#define VMX_ERROR_VMLAUCH_NON_CLEAR_VMCS 0x00000004
/**
* VMRESUME with non-launched VMCS.
*/
#define VMX_ERROR_VMRESUME_NON_LAUNCHED_VMCS 0x00000005
/**
* VMRESUME after VMXOFF (VMXOFF and VMXON between VMLAUNCH and VMRESUME).
*/
#define VMX_ERROR_VMRESUME_AFTER_VMXOFF 0x00000006
/**
* VM entry with invalid control field(s).
*/
#define VMX_ERROR_VMENTRY_INVALID_CONTROL_FIELDS 0x00000007
/**
* VM entry with invalid host-state field(s).
*/
#define VMX_ERROR_VMENTRY_INVALID_HOST_STATE 0x00000008
/**
* VMPTRLD with invalid physical address.
*/
#define VMX_ERROR_VMPTRLD_INVALID_PHYSICAL_ADDRESS 0x00000009
/**
* VMPTRLD with VMXON pointer.
*/
#define VMX_ERROR_VMPTRLD_VMXON_POINTER 0x0000000A
/**
* VMPTRLD with incorrect VMCS revision identifier.
*/
#define VMX_ERROR_VMPTRLD_INCORRECT_VMCS_REVISION_ID 0x0000000B
/**
* VMREAD/VMWRITE from/to unsupported VMCS component.
*/
#define VMX_ERROR_VMREAD_VMWRITE_INVALID_COMPONENT 0x0000000C
/**
* VMWRITE to read-only VMCS component.
*/
#define VMX_ERROR_VMWRITE_READONLY_COMPONENT 0x0000000D
/**
* VMXON executed in VMX root operation.
*/
#define VMX_ERROR_VMXON_IN_VMX_ROOT_OP 0x0000000F
/**
* VM entry with invalid executive-VMCS pointer.
*/
#define VMX_ERROR_VMENTRY_INVALID_VMCS_EXECUTIVE_POINTER 0x00000010
/**
* VM entry with non-launched executive VMCS.
*/
#define VMX_ERROR_VMENTRY_NON_LAUNCHED_EXECUTIVE_VMCS 0x00000011
/**
* VM entry with executive-VMCS pointer not VMXON pointer (when attempting to deactivate the
* dual-monitor treatment of SMIs and SMM).
*/
#define VMX_ERROR_VMENTRY_EXECUTIVE_VMCS_PTR 0x00000012
/**
* VMCALL with non-clear VMCS (when attempting to activate the dual-monitor treatment of SMIs and
* SMM).
*/
#define VMX_ERROR_VMCALL_NON_CLEAR_VMCS 0x00000013
/**
* VMCALL with invalid VM-exit control fields.
*/
#define VMX_ERROR_VMCALL_INVALID_VMEXIT_FIELDS 0x00000014
/**
* VMCALL with incorrect MSEG revision identifier (when attempting to activate the dual-monitor
* treatment of SMIs and SMM).
*/
#define VMX_ERROR_VMCALL_INVALID_MSEG_REVISION_ID 0x00000016
/**
* VMXOFF under dual-monitor treatment of SMIs and SMM.
*/
#define VMX_ERROR_VMXOFF_DUAL_MONITOR 0x00000017
/**
* VMCALL with invalid SMM-monitor features (when attempting to activate the dual-monitor treatment
* of SMIs and SMM).
*/
#define VMX_ERROR_VMCALL_INVALID_SMM_MONITOR 0x00000018
/**
* VM entry with invalid VM-execution control fields in executive VMCS (when attempting to return
* from SMM).
*/
#define VMX_ERROR_VMENTRY_INVALID_VM_EXECUTION_CONTROL 0x00000019
/**
* VM entry with events blocked by MOV SS.
*/
#define VMX_ERROR_VMENTRY_MOV_SS 0x0000001A
/**
* Invalid operand to INVEPT/INVVPID.
*/
#define VMX_ERROR_INVEPT_INVVPID_INVALID_OPERAND 0x0000001C
/**
* @}
*/
/**
* @defgroup VMX_EXCEPTIONS \
* Virtualization Exceptions
*
* Virtualization Exceptions.
*
* @see Vol3C[25.5.6(Virtualization Exceptions)] (reference)
* @{
*/
typedef struct
{
/**
* The 32-bit value that would have been saved into the VMCS as an exit reason had a VM exit
* occurred instead of the virtualization exception. For EPT violations, this value is 48
* (00000030H).
*/
UINT32 Reason;
/**
* FFFFFFFFH
*/
UINT32 ExceptionMask;
/**
* The 64-bit value that would have been saved into the VMCS as an exit qualification had a
* VM exit occurred instead of the virtualization exception.
*/
UINT64 Exit;
/**
* The 64-bit value that would have been saved into the VMCS as a guest-linear address had a
* VM exit occurred instead of the virtualization exception.
*/
UINT64 GuestLinearAddress;
/**
* The 64-bit value that would have been saved into the VMCS as a guest-physical address had
* a VM exit occurred instead of the virtualization exception.
*/
UINT64 GuestPhysicalAddress;
/**
* The current 16-bit value of the EPTP index VM-execution control.
*
* @see Vol3C[24.6.18(Controls for Virtualization Exceptions)]
* @see Vol3C[25.5.5.3(EPTP Switching)]
*/
UINT16 CurrentEptpIndex;
} VMX_VIRTUALIZATION_EXCEPTION_INFORMATION;
/**
* @}
*/
/**
* @defgroup VMX_BASIC_EXIT_INFORMATION \
* Basic VM-Exit Information
*
* Basic VM-Exit Information.
*
* @see Vol3C[27.2.1(Basic VM-Exit Information)] (reference)
* @{
*/
/**
* @brief Exit Qualification for Debug Exceptions
*/
typedef union
{
struct
{
/**
* @brief B0 - B3
*
* [Bits 3:0] When set, each of these bits indicates that the corresponding
* breakpoint condition was met. Any of these bits may be set even if its
* corresponding enabling bit in DR7 is not set.
*/
UINT64 BreakpointCondition : 4;
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_BREAKPOINT_CONDITION_BIT 0
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_BREAKPOINT_CONDITION_FLAG 0x0F
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_BREAKPOINT_CONDITION_MASK 0x0F
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_BREAKPOINT_CONDITION(_) (((_) >> 0) & 0x0F)
UINT64 Reserved1 : 9;
/**
* @brief BD
*
* [Bit 13] When set, this bit indicates that the cause of the debug exception is
* "debug register access detected."
*/
UINT64 DebugRegisterAccessDetected : 1;
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_DEBUG_REGISTER_ACCESS_DETECTED_BIT 13
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_DEBUG_REGISTER_ACCESS_DETECTED_FLAG 0x2000
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_DEBUG_REGISTER_ACCESS_DETECTED_MASK 0x01
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_DEBUG_REGISTER_ACCESS_DETECTED(_) \
(((_) >> 13) & 0x01)
/**
* @brief BS
*
* [Bit 14] When set, this bit indicates that the cause of the debug exception is
* either the execution of a single instruction (if RFLAGS.TF = 1 and
* IA32_DEBUGCTL.BTF = 0) or a taken branch (if RFLAGS.TF = DEBUGCTL.BTF = 1).
*/
UINT64 SingleInstruction : 1;
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_SINGLE_INSTRUCTION_BIT 14
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_SINGLE_INSTRUCTION_FLAG 0x4000
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_SINGLE_INSTRUCTION_MASK 0x01
#define VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION_SINGLE_INSTRUCTION(_) (((_) >> 14) & 0x01)
UINT64 Reserved2 : 49;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_DEBUG_EXCEPTION;
/**
* @brief Exit Qualification for Task Switch
*/
typedef union
{
struct
{
/**
* [Bits 15:0] Selector of task-state segment (TSS) to which the guest attempted to
* switch.
*/
UINT64 Selector : 16;
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR_BIT 0
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR_FLAG 0xFFFF
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR_MASK 0xFFFF
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SELECTOR(_) (((_) >> 0) & 0xFFFF)
UINT64 Reserved1 : 14;
/**
* [Bits 31:30] Source of task switch initiation.
*/
UINT64 Source : 2;
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SOURCE_BIT 30
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SOURCE_FLAG 0xC0000000
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SOURCE_MASK 0x03
#define VMX_EXIT_QUALIFICATION_TASK_SWITCH_SOURCE(_) (((_) >> 30) & 0x03)
#define VMX_EXIT_QUALIFICATION_TYPE_CALL_INSTRUCTION 0x00000000
#define VMX_EXIT_QUALIFICATION_TYPE_IRET_INSTRUCTION 0x00000001
#define VMX_EXIT_QUALIFICATION_TYPE_JMP_INSTRUCTION 0x00000002
#define VMX_EXIT_QUALIFICATION_TYPE_TASK_GATE_IN_IDT 0x00000003
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_TASK_SWITCH;
/**
* @brief Exit Qualification for Control-Register Accesses
*/
typedef union
{
struct
{
/**
* [Bits 3:0] Number of control register (0 for CLTS and LMSW). Bit 3 is always 0 on
* processors that do not support Intel 64 architecture as they do not support CR8.
*/
UINT64 ControlRegister : 4;
#define VMX_EXIT_QUALIFICATION_MOV_CR_CONTROL_REGISTER_BIT 0
#define VMX_EXIT_QUALIFICATION_MOV_CR_CONTROL_REGISTER_FLAG 0x0F
#define VMX_EXIT_QUALIFICATION_MOV_CR_CONTROL_REGISTER_MASK 0x0F
#define VMX_EXIT_QUALIFICATION_MOV_CR_CONTROL_REGISTER(_) (((_) >> 0) & 0x0F)
#define VMX_EXIT_QUALIFICATION_REGISTER_CR0 0x00000000
#define VMX_EXIT_QUALIFICATION_REGISTER_CR2 0x00000002
#define VMX_EXIT_QUALIFICATION_REGISTER_CR3 0x00000003
#define VMX_EXIT_QUALIFICATION_REGISTER_CR4 0x00000004
#define VMX_EXIT_QUALIFICATION_REGISTER_CR8 0x00000008
/**
* [Bits 5:4] Access type.
*/
UINT64 AccessType : 2;
#define VMX_EXIT_QUALIFICATION_MOV_CR_ACCESS_TYPE_BIT 4
#define VMX_EXIT_QUALIFICATION_MOV_CR_ACCESS_TYPE_FLAG 0x30
#define VMX_EXIT_QUALIFICATION_MOV_CR_ACCESS_TYPE_MASK 0x03
#define VMX_EXIT_QUALIFICATION_MOV_CR_ACCESS_TYPE(_) (((_) >> 4) & 0x03)
#define VMX_EXIT_QUALIFICATION_ACCESS_MOV_TO_CR 0x00000000
#define VMX_EXIT_QUALIFICATION_ACCESS_MOV_FROM_CR 0x00000001
#define VMX_EXIT_QUALIFICATION_ACCESS_CLTS 0x00000002
#define VMX_EXIT_QUALIFICATION_ACCESS_LMSW 0x00000003
/**
* [Bit 6] LMSW operand type. For CLTS and MOV CR, cleared to 0.
*/
UINT64 LmswOperandType : 1;
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_OPERAND_TYPE_BIT 6
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_OPERAND_TYPE_FLAG 0x40
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_OPERAND_TYPE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_OPERAND_TYPE(_) (((_) >> 6) & 0x01)
#define VMX_EXIT_QUALIFICATION_LMSW_OP_REGISTER 0x00000000
#define VMX_EXIT_QUALIFICATION_LMSW_OP_MEMORY 0x00000001
UINT64 Reserved1 : 1;
/**
* [Bits 11:8] For MOV CR, the general-purpose register.
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_EXIT_QUALIFICATION_MOV_CR_GENERAL_PURPOSE_REGISTER_BIT 8
#define VMX_EXIT_QUALIFICATION_MOV_CR_GENERAL_PURPOSE_REGISTER_FLAG 0xF00
#define VMX_EXIT_QUALIFICATION_MOV_CR_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_EXIT_QUALIFICATION_MOV_CR_GENERAL_PURPOSE_REGISTER(_) (((_) >> 8) & 0x0F)
#define VMX_EXIT_QUALIFICATION_GENREG_RAX 0x00000000
#define VMX_EXIT_QUALIFICATION_GENREG_RCX 0x00000001
#define VMX_EXIT_QUALIFICATION_GENREG_RDX 0x00000002
#define VMX_EXIT_QUALIFICATION_GENREG_RBX 0x00000003
#define VMX_EXIT_QUALIFICATION_GENREG_RSP 0x00000004
#define VMX_EXIT_QUALIFICATION_GENREG_RBP 0x00000005
#define VMX_EXIT_QUALIFICATION_GENREG_RSI 0x00000006
#define VMX_EXIT_QUALIFICATION_GENREG_RDI 0x00000007
#define VMX_EXIT_QUALIFICATION_GENREG_R8 0x00000008
#define VMX_EXIT_QUALIFICATION_GENREG_R9 0x00000009
#define VMX_EXIT_QUALIFICATION_GENREG_R10 0x0000000A
#define VMX_EXIT_QUALIFICATION_GENREG_R11 0x0000000B
#define VMX_EXIT_QUALIFICATION_GENREG_R12 0x0000000C
#define VMX_EXIT_QUALIFICATION_GENREG_R13 0x0000000D
#define VMX_EXIT_QUALIFICATION_GENREG_R14 0x0000000E
#define VMX_EXIT_QUALIFICATION_GENREG_R15 0x0000000F
UINT64 Reserved2 : 4;
/**
* [Bits 31:16] For LMSW, the LMSW source data. For CLTS and MOV CR, cleared to 0.
*/
UINT64 LmswSourceData : 16;
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_SOURCE_DATA_BIT 16
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_SOURCE_DATA_FLAG 0xFFFF0000
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_SOURCE_DATA_MASK 0xFFFF
#define VMX_EXIT_QUALIFICATION_MOV_CR_LMSW_SOURCE_DATA(_) (((_) >> 16) & 0xFFFF)
UINT64 Reserved3 : 32;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_MOV_CR;
/**
* @brief Exit Qualification for MOV DR
*/
typedef union
{
struct
{
/**
* [Bits 2:0] Number of debug register.
*/
UINT64 DebugRegister : 3;
#define VMX_EXIT_QUALIFICATION_MOV_DR_DEBUG_REGISTER_BIT 0
#define VMX_EXIT_QUALIFICATION_MOV_DR_DEBUG_REGISTER_FLAG 0x07
#define VMX_EXIT_QUALIFICATION_MOV_DR_DEBUG_REGISTER_MASK 0x07
#define VMX_EXIT_QUALIFICATION_MOV_DR_DEBUG_REGISTER(_) (((_) >> 0) & 0x07)
#define VMX_EXIT_QUALIFICATION_REGISTER_DR0 0x00000000
#define VMX_EXIT_QUALIFICATION_REGISTER_DR1 0x00000001
#define VMX_EXIT_QUALIFICATION_REGISTER_DR2 0x00000002
#define VMX_EXIT_QUALIFICATION_REGISTER_DR3 0x00000003
#define VMX_EXIT_QUALIFICATION_REGISTER_DR6 0x00000006
#define VMX_EXIT_QUALIFICATION_REGISTER_DR7 0x00000007
UINT64 Reserved1 : 1;
/**
* [Bit 4] Direction of access (0 = MOV to DR; 1 = MOV from DR).
*/
UINT64 DirectionOfAccess : 1;
#define VMX_EXIT_QUALIFICATION_MOV_DR_DIRECTION_OF_ACCESS_BIT 4
#define VMX_EXIT_QUALIFICATION_MOV_DR_DIRECTION_OF_ACCESS_FLAG 0x10
#define VMX_EXIT_QUALIFICATION_MOV_DR_DIRECTION_OF_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_MOV_DR_DIRECTION_OF_ACCESS(_) (((_) >> 4) & 0x01)
#define VMX_EXIT_QUALIFICATION_DIRECTION_MOV_TO_DR 0x00000000
#define VMX_EXIT_QUALIFICATION_DIRECTION_MOV_FROM_DR 0x00000001
UINT64 Reserved2 : 3;
/**
* [Bits 11:8] General-purpose register.
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_EXIT_QUALIFICATION_MOV_DR_GENERAL_PURPOSE_REGISTER_BIT 8
#define VMX_EXIT_QUALIFICATION_MOV_DR_GENERAL_PURPOSE_REGISTER_FLAG 0xF00
#define VMX_EXIT_QUALIFICATION_MOV_DR_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_EXIT_QUALIFICATION_MOV_DR_GENERAL_PURPOSE_REGISTER(_) (((_) >> 8) & 0x0F)
UINT64 Reserved3 : 52;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_MOV_DR;
/**
* @brief Exit Qualification for I/O Instructions
*/
typedef union
{
struct
{
/**
* [Bits 2:0] Size of access.
*/
UINT64 SizeOfAccess : 3;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_SIZE_OF_ACCESS_BIT 0
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_SIZE_OF_ACCESS_FLAG 0x07
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_SIZE_OF_ACCESS_MASK 0x07
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_SIZE_OF_ACCESS(_) (((_) >> 0) & 0x07)
#define VMX_EXIT_QUALIFICATION_WIDTH_1_BYTE 0x00000000
#define VMX_EXIT_QUALIFICATION_WIDTH_2_BYTE 0x00000001
#define VMX_EXIT_QUALIFICATION_WIDTH_4_BYTE 0x00000003
/**
* [Bit 3] Direction of the attempted access (0 = OUT, 1 = IN).
*/
UINT64 DirectionOfAccess : 1;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_DIRECTION_OF_ACCESS_BIT 3
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_DIRECTION_OF_ACCESS_FLAG 0x08
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_DIRECTION_OF_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_DIRECTION_OF_ACCESS(_) (((_) >> 3) & 0x01)
#define VMX_EXIT_QUALIFICATION_DIRECTION_OUT 0x00000000
#define VMX_EXIT_QUALIFICATION_DIRECTION_IN 0x00000001
/**
* [Bit 4] String instruction (0 = not string; 1 = string).
*/
UINT64 StringInstruction : 1;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_STRING_INSTRUCTION_BIT 4
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_STRING_INSTRUCTION_FLAG 0x10
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_STRING_INSTRUCTION_MASK 0x01
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_STRING_INSTRUCTION(_) (((_) >> 4) & 0x01)
#define VMX_EXIT_QUALIFICATION_IS_STRING_NOT_STRING 0x00000000
#define VMX_EXIT_QUALIFICATION_IS_STRING_STRING 0x00000001
/**
* [Bit 5] REP prefixed (0 = not REP; 1 = REP).
*/
UINT64 RepPrefixed : 1;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_REP_PREFIXED_BIT 5
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_REP_PREFIXED_FLAG 0x20
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_REP_PREFIXED_MASK 0x01
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_REP_PREFIXED(_) (((_) >> 5) & 0x01)
#define VMX_EXIT_QUALIFICATION_IS_REP_NOT_REP 0x00000000
#define VMX_EXIT_QUALIFICATION_IS_REP_REP 0x00000001
/**
* [Bit 6] Operand encoding (0 = DX, 1 = immediate).
*/
UINT64 OperandEncoding : 1;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_OPERAND_ENCODING_BIT 6
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_OPERAND_ENCODING_FLAG 0x40
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_OPERAND_ENCODING_MASK 0x01
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_OPERAND_ENCODING(_) (((_) >> 6) & 0x01)
#define VMX_EXIT_QUALIFICATION_ENCODING_DX 0x00000000
#define VMX_EXIT_QUALIFICATION_ENCODING_IMMEDIATE 0x00000001
UINT64 Reserved1 : 9;
/**
* [Bits 31:16] Port number (as specified in DX or in an immediate operand).
*/
UINT64 PortNumber : 16;
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_PORT_NUMBER_BIT 16
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_PORT_NUMBER_FLAG 0xFFFF0000
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_PORT_NUMBER_MASK 0xFFFF
#define VMX_EXIT_QUALIFICATION_IO_INSTRUCTION_PORT_NUMBER(_) (((_) >> 16) & 0xFFFF)
UINT64 Reserved2 : 32;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_IO_INSTRUCTION;
/**
* @brief Exit Qualification for APIC-Access VM Exits from Linear Accesses and Guest-Physical
* Accesses
*/
typedef union
{
struct
{
/**
* [Bits 11:0] - If the APIC-access VM exit is due to a linear access, the offset of
* access within the APIC page.
* - Undefined if the APIC-access VM exit is due a guest-physical access.
*/
UINT64 PageOffset : 12;
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_PAGE_OFFSET_BIT 0
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_PAGE_OFFSET_FLAG 0xFFF
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_PAGE_OFFSET_MASK 0xFFF
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_PAGE_OFFSET(_) (((_) >> 0) & 0xFFF)
/**
* [Bits 15:12] Access type.
*/
UINT64 AccessType : 4;
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_ACCESS_TYPE_BIT 12
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_ACCESS_TYPE_FLAG 0xF000
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_ACCESS_TYPE_MASK 0x0F
#define VMX_EXIT_QUALIFICATION_APIC_ACCESS_ACCESS_TYPE(_) (((_) >> 12) & 0x0F)
/**
* Linear access for a data read during instruction execution.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_LINEAR_READ 0x00000000
/**
* Linear access for a data write during instruction execution.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_LINEAR_WRITE 0x00000001
/**
* Linear access for an instruction fetch.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_LINEAR_INSTRUCTION_FETCH 0x00000002
/**
* Linear access (read or write) during event delivery.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_LINEAR_EVENT_DELIVERY 0x00000003
/**
* Guest-physical access during event delivery.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_PHYSICAL_EVENT_DELIVERY 0x0000000A
/**
* Guest-physical access for an instruction fetch or during instruction execution.
*/
#define VMX_EXIT_QUALIFICATION_TYPE_PHYSICAL_INSTRUCTION_FETCH 0x0000000F
UINT64 Reserved1 : 48;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_APIC_ACCESS;
/**
* @brief Exit Qualification for EPT Violations
*/
typedef union
{
struct
{
/**
* [Bit 0] Set if the access causing the EPT violation was a data read.
*/
UINT64 ReadAccess : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READ_ACCESS_BIT 0
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READ_ACCESS_FLAG 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READ_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Set if the access causing the EPT violation was a data write.
*/
UINT64 WriteAccess : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_WRITE_ACCESS_BIT 1
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_WRITE_ACCESS_FLAG 0x02
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_WRITE_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Set if the access causing the EPT violation was an instruction fetch.
*/
UINT64 ExecuteAccess : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_ACCESS_BIT 2
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_ACCESS_FLAG 0x04
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] The logical-AND of bit 0 in the EPT paging-structure entries used to
* translate the guest-physical address of the access causing the EPT violation
* (indicates whether the guest-physical address was readable).
*/
UINT64 EptReadable : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_READABLE_BIT 3
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_READABLE_FLAG 0x08
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_READABLE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_READABLE(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] The logical-AND of bit 1 in the EPT paging-structure entries used to
* translate the guest-physical address of the access causing the EPT violation
* (indicates whether the guest-physical address was writeable).
*/
UINT64 EptWriteable : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_WRITEABLE_BIT 4
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_WRITEABLE_FLAG 0x10
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_WRITEABLE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_WRITEABLE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] The logical-AND of bit 2 in the EPT paging-structure entries used to
* translate the guest-physical address of the access causing the EPT violation. If
* the "mode-based execute control for EPT" VM-execution control is 0, this
* indicates whether the guest-physical address was executable. If that control is
* 1, this indicates whether the guest-physical address was executable for
* supervisor-mode linear addresses.
*/
UINT64 EptExecutable : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_BIT 5
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_FLAG 0x20
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] If the "mode-based execute control" VM-execution control is 0, the value
* of this bit is undefined. If that control is 1, this bit is the logical-AND of
* bit 10 in the EPT paging-structures entries used to translate the guest-physical
* address of the access causing the EPT violation. In this case, it indicates
* whether the guest-physical address was executable for user-mode linear addresses.
*/
UINT64 EptExecutableForUserMode : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_FOR_USER_MODE_BIT 6
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_FOR_USER_MODE_FLAG 0x40
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_FOR_USER_MODE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EPT_EXECUTABLE_FOR_USER_MODE(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Set if the guest linear-address field is valid. The guest linear-address
* field is valid for all EPT violations except those resulting from an attempt to
* load the guest PDPTEs as part of the execution of the MOV CR instruction.
*/
UINT64 ValidGuestLinearAddress : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_VALID_GUEST_LINEAR_ADDRESS_BIT 7
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_VALID_GUEST_LINEAR_ADDRESS_FLAG 0x80
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_VALID_GUEST_LINEAR_ADDRESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_VALID_GUEST_LINEAR_ADDRESS(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] If bit 7 is 1:
* - Set if the access causing the EPT violation is to a guest-physical address that
* is the translation of a linear address.
* - Clear if the access causing the EPT violation is to a paging-structure entry as
* part of a page walk or the update of an accessed or dirty bit. Reserved if bit 7
* is 0 (cleared to 0).
*/
UINT64 CausedByTranslation : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_CAUSED_BY_TRANSLATION_BIT 8
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_CAUSED_BY_TRANSLATION_FLAG 0x100
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_CAUSED_BY_TRANSLATION_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_CAUSED_BY_TRANSLATION(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] This bit is 0 if the linear address is a supervisor-mode linear address
* and 1 if it is a user-mode linear address. Otherwise, this bit is undefined.
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit
* information for EPT violations. (If CR0.PG = 0, the translation of every linear
* address is a user-mode linear address and thus this bit will be 1.)
*/
UINT64 UserModeLinearAddress : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_USER_MODE_LINEAR_ADDRESS_BIT 9
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_USER_MODE_LINEAR_ADDRESS_FLAG 0x200
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_USER_MODE_LINEAR_ADDRESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_USER_MODE_LINEAR_ADDRESS(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] This bit is 0 if paging translates the linear address to a read-only
* page and 1 if it translates to a read/write page. Otherwise, this bit is
* undefined
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit
* information for EPT violations. (If CR0.PG = 0, every linear address is
* read/write and thus this bit will be 1.)
*/
UINT64 ReadableWritablePage : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READABLE_WRITABLE_PAGE_BIT 10
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READABLE_WRITABLE_PAGE_FLAG 0x400
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READABLE_WRITABLE_PAGE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_READABLE_WRITABLE_PAGE(_) (((_) >> 10) & 0x01)
/**
* [Bit 11] This bit is 0 if paging translates the linear address to an executable
* page and 1 if it translates to an execute-disable page. Otherwise, this bit is
* undefined.
*
* @remarks If bit 7 is 1, bit 8 is 1, and the processor supports advanced VM-exit
* information for EPT violations. (If CR0.PG = 0, CR4.PAE = 0, or IA32_EFER.NXE =
* 0, every linear address is executable and thus this bit will be 0.)
*/
UINT64 ExecuteDisablePage : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_DISABLE_PAGE_BIT 11
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_DISABLE_PAGE_FLAG 0x800
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_DISABLE_PAGE_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_EXECUTE_DISABLE_PAGE(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] NMI unblocking due to IRET.
*/
UINT64 NmiUnblocking : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_NMI_UNBLOCKING_BIT 12
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_NMI_UNBLOCKING_FLAG 0x1000
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_NMI_UNBLOCKING_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_NMI_UNBLOCKING(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] Set if the access causing the EPT violation was a shadow-stack access.
*/
UINT64 ShadowStackAccess : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SHADOW_STACK_ACCESS_BIT 13
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SHADOW_STACK_ACCESS_FLAG 0x2000
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SHADOW_STACK_ACCESS_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SHADOW_STACK_ACCESS(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] If supervisor shadow-stack control is enabled (by setting bit 7 of
* EPTP), this bit is the same as bit 60 in the EPT paging-structure entry that maps
* the page of the guest-physical address of the access causing the EPT violation.
* Otherwise (or if translation of the guest-physical address terminates before
* reaching an EPT paging-structure entry that maps a page), this bit is undefined.
*/
UINT64 SupervisorShadowStack : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SUPERVISOR_SHADOW_STACK_BIT 14
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SUPERVISOR_SHADOW_STACK_FLAG 0x4000
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SUPERVISOR_SHADOW_STACK_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_SUPERVISOR_SHADOW_STACK(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] This bit is set if the EPT violation was caused as a result of
* guest-paging verification.
*/
UINT64 GuestPagingVerification : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_GUEST_PAGING_VERIFICATION_BIT 15
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_GUEST_PAGING_VERIFICATION_FLAG 0x8000
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_GUEST_PAGING_VERIFICATION_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_GUEST_PAGING_VERIFICATION(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] This bit is set if the access was asynchronous to instruction execution
* not the result of event delivery. (The bit is set if the access is related to
* trace output by Intel PT; see Section 25.5.4.) Otherwise, this bit is cleared.
*/
UINT64 AsynchronousToInstruction : 1;
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_ASYNCHRONOUS_TO_INSTRUCTION_BIT 16
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_ASYNCHRONOUS_TO_INSTRUCTION_FLAG 0x10000
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_ASYNCHRONOUS_TO_INSTRUCTION_MASK 0x01
#define VMX_EXIT_QUALIFICATION_EPT_VIOLATION_ASYNCHRONOUS_TO_INSTRUCTION(_) (((_) >> 16) & 0x01)
UINT64 Reserved1 : 47;
};
UINT64 AsUInt;
} VMX_EXIT_QUALIFICATION_EPT_VIOLATION;
/**
* @}
*/
/**
* @defgroup VMX_VMEXIT_INSTRUCTION_INFORMATION \
* Information for VM Exits Due to Instruction Execution
*
* Information for VM Exits Due to Instruction Execution.
*
* @see Vol3C[27.2.4(Information for VM Exits Due to Instruction Execution)] (reference)
* @{
*/
/**
* @brief VM-Exit Instruction-Information Field as Used for INS and OUTS
*/
typedef union
{
struct
{
UINT64 Reserved1 : 7;
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used.
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
UINT64 Reserved2 : 5;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used. Undefined for VM exits due to execution of INS.
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
UINT64 Reserved3 : 46;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_INS_OUTS;
/**
* @brief VM-Exit Instruction-Information Field as Used for INVEPT, INVPCID, and INVVPID
*/
typedef union
{
struct
{
/**
* @brief Scaling
*
* [Bits 1:0] 0: no scaling
* 1: scale by 2
* 2: scale by 4
* 3: scale by 8 (used only on processors that support Intel 64 architecture)
* Undefined for instructions with no index register (bit 22 is set).
*/
UINT64 Scaling : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SCALING_BIT 0
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SCALING_FLAG 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SCALING_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SCALING(_) (((_) >> 0) & 0x03)
UINT64 Reserved1 : 5;
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used.
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
UINT64 Reserved2 : 5;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used. Undefined for VM exits due to execution of INS.
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
/**
* [Bits 21:18] General-purpose register. Undefined for instructions with no index
* register (bit 22 is set).
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_BIT 18
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_FLAG 0x3C0000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER(_) (((_) >> 18) & 0x0F)
/**
* [Bit 22] IndexReg invalid (0 = valid; 1 = invalid).
*/
UINT64 GeneralPurposeRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_INVALID_BIT 22
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_INVALID_FLAG 0x400000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_GENERAL_PURPOSE_REGISTER_INVALID(_) \
(((_) >> 22) & 0x01)
/**
* [Bits 26:23] BaseReg (encoded as IndexReg above). Undefined for memory
* instructions with no base register (bit 27 is set).
*/
UINT64 BaseRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_BIT 23
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_FLAG 0x7800000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER(_) (((_) >> 23) & 0x0F)
/**
* [Bit 27] BaseReg invalid (0 = valid; 1 = invalid).
*/
UINT64 BaseRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_INVALID_BIT 27
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_INVALID_FLAG 0x8000000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_BASE_REGISTER_INVALID(_) (((_) >> 27) & 0x01)
/**
* [Bits 31:28] Reg2 (same encoding as IndexReg above).
*/
UINT64 Register2 : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_REGISTER_2_BIT 28
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_REGISTER_2_FLAG 0xF0000000
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_REGISTER_2_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE_REGISTER_2(_) (((_) >> 28) & 0x0F)
UINT64 Reserved3 : 32;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_INVALIDATE;
/**
* @brief VM-Exit Instruction-Information Field as Used for LIDT, LGDT, SIDT, or SGDT
*/
typedef union
{
struct
{
/**
* @brief Scaling
*
* [Bits 1:0] 0: no scaling
* 1: scale by 2
* 2: scale by 4
* 3: scale by 8 (used only on processors that support Intel 64 architecture)
* Undefined for instructions with no index register (bit 22 is set).
*/
UINT64 Scaling : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SCALING_BIT 0
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SCALING_FLAG 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SCALING_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SCALING(_) (((_) >> 0) & 0x03)
UINT64 Reserved1 : 5;
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used.
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
UINT64 Reserved2 : 1;
/**
* @brief Operand size
*
* [Bit 11] 0: 16-bit
* 1: 32-bit
* Undefined for VM exits from 64-bit mode.
*/
UINT64 OperandSize : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_OPERAND_SIZE_BIT 11
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_OPERAND_SIZE_FLAG 0x800
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_OPERAND_SIZE_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_OPERAND_SIZE(_) (((_) >> 11) & 0x01)
UINT64 Reserved3 : 3;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used.
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
/**
* [Bits 21:18] General-purpose register. Undefined for instructions with no index
* register (bit 22 is set).
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_BIT 18
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_FLAG 0x3C0000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER(_) \
(((_) >> 18) & 0x0F)
/**
* [Bit 22] IndexReg invalid (0 = valid; 1 = invalid).
*/
UINT64 GeneralPurposeRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_BIT 22
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_FLAG 0x400000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID(_) \
(((_) >> 22) & 0x01)
/**
* [Bits 26:23] BaseReg (encoded as IndexReg above). Undefined for memory
* instructions with no base register (bit 27 is set).
*/
UINT64 BaseRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_BIT 23
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_FLAG 0x7800000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER(_) (((_) >> 23) & 0x0F)
/**
* [Bit 27] BaseReg invalid (0 = valid; 1 = invalid).
*/
UINT64 BaseRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_INVALID_BIT 27
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_INVALID_FLAG 0x8000000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_BASE_REGISTER_INVALID(_) (((_) >> 27) & 0x01)
/**
* @brief Instruction identity
*
* [Bits 29:28] 0: SGDT
* 1: SIDT
* 2: LGDT
* 3: LIDT
*/
UINT64 Instruction : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_INSTRUCTION_BIT 28
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_INSTRUCTION_FLAG 0x30000000
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_INSTRUCTION_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS_INSTRUCTION(_) (((_) >> 28) & 0x03)
UINT64 Reserved4 : 34;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_GDTR_IDTR_ACCESS;
/**
* @brief VM-Exit Instruction-Information Field as Used for LLDT, LTR, SLDT, and STR
*/
typedef union
{
struct
{
/**
* @brief Scaling
*
* [Bits 1:0] 0: no scaling
* 1: scale by 2
* 2: scale by 4
* 3: scale by 8 (used only on processors that support Intel 64 architecture)
* Undefined for instructions with no index register (bit 22 is set).
*/
UINT64 Scaling : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SCALING_BIT 0
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SCALING_FLAG 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SCALING_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SCALING(_) (((_) >> 0) & 0x03)
UINT64 Reserved1 : 1;
/**
* [Bits 6:3] Reg1. Undefined for memory instructions (bit 10 is clear).
*/
UINT64 Reg1 : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_REG_1_BIT 3
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_REG_1_FLAG 0x78
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_REG_1_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_REG_1(_) (((_) >> 3) & 0x0F)
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used. Undefined for register instructions (bit 10 is set).
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
/**
* [Bit 10] Mem/Reg (0 = memory; 1 = register).
*/
UINT64 MemoryRegister : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_MEMORY_REGISTER_BIT 10
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_MEMORY_REGISTER_FLAG 0x400
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_MEMORY_REGISTER_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_MEMORY_REGISTER(_) (((_) >> 10) & 0x01)
UINT64 Reserved2 : 4;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used. Undefined for register instructions (bit 10 is set).
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
/**
* [Bits 21:18] General-purpose register. Undefined for register instructions (bit
* 10 is set) and for memory instructions with no index register (bit 10 is clear
* and bit 22 is set).
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_BIT 18
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_FLAG 0x3C0000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER(_) (((_) >> 18) & 0x0F)
/**
* [Bit 22] IndexReg invalid (0 = valid; 1 = invalid). Undefined for register
* instructions (bit 10 is set).
*/
UINT64 GeneralPurposeRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_BIT 22
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_FLAG 0x400000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_GENERAL_PURPOSE_REGISTER_INVALID(_) \
(((_) >> 22) & 0x01)
/**
* [Bits 26:23] BaseReg (encoded as IndexReg above). Undefined for register
* instructions (bit 10 is set) and for memory instructions with no base register
* (bit 10 is clear and bit 27 is set).
*/
UINT64 BaseRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_BIT 23
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_FLAG 0x7800000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER(_) (((_) >> 23) & 0x0F)
/**
* [Bit 27] BaseReg invalid (0 = valid; 1 = invalid). Undefined for register
* instructions (bit 10 is set).
*/
UINT64 BaseRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_INVALID_BIT 27
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_INVALID_FLAG 0x8000000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_BASE_REGISTER_INVALID(_) (((_) >> 27) & 0x01)
/**
* @brief Instruction identity
*
* [Bits 29:28] 0: SLDT
* 1: STR
* 2: LLDT
* 3: LTR
*/
UINT64 Instruction : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_INSTRUCTION_BIT 28
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_INSTRUCTION_FLAG 0x30000000
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_INSTRUCTION_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS_INSTRUCTION(_) (((_) >> 28) & 0x03)
UINT64 Reserved3 : 34;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_LDTR_TR_ACCESS;
/**
* @brief VM-Exit Instruction-Information Field as Used for RDRAND and RDSEED
*/
typedef union
{
struct
{
UINT64 Reserved1 : 3;
/**
* [Bits 6:3] Destination register.
*/
UINT64 DestinationRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_DESTINATION_REGISTER_BIT 3
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_DESTINATION_REGISTER_FLAG 0x78
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_DESTINATION_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_DESTINATION_REGISTER(_) (((_) >> 3) & 0x0F)
UINT64 Reserved2 : 4;
/**
* @brief Operand size
*
* [Bits 12:11] 0: 16-bit
* 1: 32-bit
* 2: 64-bit
* The value 3 is not used.
*/
UINT64 OperandSize : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_OPERAND_SIZE_BIT 11
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_OPERAND_SIZE_FLAG 0x1800
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_OPERAND_SIZE_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED_OPERAND_SIZE(_) (((_) >> 11) & 0x03)
UINT64 Reserved3 : 51;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_RDRAND_RDSEED;
/**
* @brief VM-Exit Instruction-Information Field as Used for VMCLEAR, VMPTRLD, VMPTRST, VMXON,
* XRSTORS, and XSAVES
*/
typedef union
{
struct
{
/**
* @brief Scaling
*
* [Bits 1:0] 0: no scaling
* 1: scale by 2
* 2: scale by 4
* 3: scale by 8 (used only on processors that support Intel 64 architecture)
* Undefined for instructions with no index register (bit 22 is set).
*/
UINT64 Scaling : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SCALING_BIT 0
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SCALING_FLAG 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SCALING_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SCALING(_) (((_) >> 0) & 0x03)
UINT64 Reserved1 : 5;
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used.
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
UINT64 Reserved2 : 5;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used.
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
/**
* [Bits 21:18] General-purpose register. Undefined for instructions with no index
* register (bit 22 is set).
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_BIT 18
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_FLAG 0x3C0000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER(_) (((_) >> 18) & 0x0F)
/**
* [Bit 22] IndexReg invalid (0 = valid; 1 = invalid).
*/
UINT64 GeneralPurposeRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_INVALID_BIT 22
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_INVALID_FLAG 0x400000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_GENERAL_PURPOSE_REGISTER_INVALID(_) \
(((_) >> 22) & 0x01)
/**
* [Bits 26:23] BaseReg (encoded as IndexReg above). Undefined for memory
* instructions with no base register (bit 27 is set).
*/
UINT64 BaseRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_BIT 23
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_FLAG 0x7800000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER(_) (((_) >> 23) & 0x0F)
/**
* [Bit 27] BaseReg invalid (0 = valid; 1 = invalid).
*/
UINT64 BaseRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_INVALID_BIT 27
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_INVALID_FLAG 0x8000000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES_BASE_REGISTER_INVALID(_) (((_) >> 27) & 0x01)
UINT64 Reserved3 : 36;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_VMX_AND_XSAVES;
/**
* @brief VM-Exit Instruction-Information Field as Used for VMREAD and VMWRITE
*/
typedef union
{
struct
{
/**
* @brief Scaling
*
* [Bits 1:0] 0: no scaling
* 1: scale by 2
* 2: scale by 4
* 3: scale by 8 (used only on processors that support Intel 64 architecture)
* Undefined for register instructions (bit 10 is set) and for memory instructions
* with no index register (bit 10 is clear and bit 22 is set).
*/
UINT64 Scaling : 2;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SCALING_BIT 0
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SCALING_FLAG 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SCALING_MASK 0x03
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SCALING(_) (((_) >> 0) & 0x03)
UINT64 Reserved1 : 1;
/**
* [Bits 6:3] Reg1. Undefined for memory instructions (bit 10 is clear).
*/
UINT64 Register1 : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_1_BIT 3
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_1_FLAG 0x78
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_1_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_1(_) (((_) >> 3) & 0x0F)
/**
* @brief Address size
*
* [Bits 9:7] 0: 16-bit
* 1: 32-bit
* 2: 64-bit (used only on processors that support Intel 64 architecture)
* Other values not used. Undefined for register instructions (bit 10 is set).
*/
UINT64 AddressSize : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_ADDRESS_SIZE_BIT 7
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_ADDRESS_SIZE_FLAG 0x380
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_ADDRESS_SIZE_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_ADDRESS_SIZE(_) (((_) >> 7) & 0x07)
/**
* [Bit 10] Mem/Reg (0 = memory; 1 = register).
*/
UINT64 MemoryRegister : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_MEMORY_REGISTER_BIT 10
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_MEMORY_REGISTER_FLAG 0x400
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_MEMORY_REGISTER_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_MEMORY_REGISTER(_) (((_) >> 10) & 0x01)
UINT64 Reserved2 : 4;
/**
* @brief Segment register
*
* [Bits 17:15] 0: ES
* 1: CS
* 2: SS
* 3: DS
* 4: FS
* 5: GS
* Other values not used. Undefined for register instructions (bit 10 is set).
*/
UINT64 SegmentRegister : 3;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SEGMENT_REGISTER_BIT 15
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SEGMENT_REGISTER_FLAG 0x38000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SEGMENT_REGISTER_MASK 0x07
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_SEGMENT_REGISTER(_) (((_) >> 15) & 0x07)
/**
* [Bits 21:18] General-purpose register. Undefined for register instructions (bit
* 10 is set) and for memory instructions with no index register (bit 10 is clear
* and bit 22 is set).
*/
UINT64 GeneralPurposeRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_BIT 18
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_FLAG 0x3C0000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER(_) (((_) >> 18) & 0x0F)
/**
* [Bit 22] IndexReg invalid (0 = valid; 1 = invalid). Undefined for register
* instructions (bit 10 is set).
*/
UINT64 GeneralPurposeRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_INVALID_BIT 22
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_INVALID_FLAG 0x400000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_GENERAL_PURPOSE_REGISTER_INVALID(_) \
(((_) >> 22) & 0x01)
/**
* [Bits 26:23] BaseReg (encoded as Reg1 above). Undefined for register instructions
* (bit 10 is set) and for memory instructions with no base register (bit 10 is
* clear and bit 27 is set).
*/
UINT64 BaseRegister : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_BIT 23
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_FLAG 0x7800000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER(_) (((_) >> 23) & 0x0F)
/**
* [Bit 27] BaseReg invalid (0 = valid; 1 = invalid).
*/
UINT64 BaseRegisterInvalid : 1;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_INVALID_BIT 27
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_INVALID_FLAG 0x8000000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_INVALID_MASK 0x01
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_BASE_REGISTER_INVALID(_) (((_) >> 27) & 0x01)
/**
* [Bits 31:28] Reg2 (same encoding as IndexReg above).
*/
UINT64 Register2 : 4;
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_2_BIT 28
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_2_FLAG 0xF0000000
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_2_MASK 0x0F
#define VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE_REGISTER_2(_) (((_) >> 28) & 0x0F)
UINT64 Reserved3 : 32;
};
UINT64 AsUInt;
} VMX_VMEXIT_INSTRUCTION_INFO_VMREAD_VMWRITE;
/**
* @}
*/
/**
* @brief - The low 16 bits correspond to bits 23:8 of the upper 32 bits of a 64-bit segment
* descriptor. While bits 19:16 of code-segment and data-segment descriptors correspond to the upper
* 4 bits of the segment limit, the corresponding bits (bits 11:8) are reserved in this VMCS field.
* - Bit 16 indicates an unusable segment. Attempts to use such a segment fault except in
* 64-bit mode. In general, a segment register is unusable if it has been loaded with a null
* selector.
* - Bits 31:17 are reserved
*
* @note There are a few exceptions to this statement. For example, a segment with a non-null
* selector may be unusable following a task switch that fails after its commit point. In contrast,
* the TR register is usable after processor reset despite having a null selector
* @see SEGMENT_DESCRIPTOR_32
* @see SEGMENT_DESCRIPTOR_64
* @see XXX_ACCESS_RIGHTS fields of 32_BIT_GUEST_STATE_FIELDS
* @see Vol3C[24.4.2(Guest Non-Register State)] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 3:0] Segment type.
*/
UINT32 Type : 4;
#define VMX_SEGMENT_ACCESS_RIGHTS_TYPE_BIT 0
#define VMX_SEGMENT_ACCESS_RIGHTS_TYPE_FLAG 0x0F
#define VMX_SEGMENT_ACCESS_RIGHTS_TYPE_MASK 0x0F
#define VMX_SEGMENT_ACCESS_RIGHTS_TYPE(_) (((_) >> 0) & 0x0F)
/**
* [Bit 4] S - Descriptor type (0 = system; 1 = code or data).
*/
UINT32 DescriptorType : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_BIT 4
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_FLAG 0x10
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_TYPE(_) (((_) >> 4) & 0x01)
/**
* [Bits 6:5] DPL - Descriptor privilege level.
*/
UINT32 DescriptorPrivilegeLevel : 2;
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_BIT 5
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_FLAG 0x60
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL_MASK 0x03
#define VMX_SEGMENT_ACCESS_RIGHTS_DESCRIPTOR_PRIVILEGE_LEVEL(_) (((_) >> 5) & 0x03)
/**
* [Bit 7] P - Segment present.
*/
UINT32 Present : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_PRESENT_BIT 7
#define VMX_SEGMENT_ACCESS_RIGHTS_PRESENT_FLAG 0x80
#define VMX_SEGMENT_ACCESS_RIGHTS_PRESENT_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_PRESENT(_) (((_) >> 7) & 0x01)
UINT32 Reserved1 : 4;
/**
* [Bit 12] AVL - Available for use by system software.
*/
UINT32 AvailableBit : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_AVAILABLE_BIT_BIT 12
#define VMX_SEGMENT_ACCESS_RIGHTS_AVAILABLE_BIT_FLAG 0x1000
#define VMX_SEGMENT_ACCESS_RIGHTS_AVAILABLE_BIT_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_AVAILABLE_BIT(_) (((_) >> 12) & 0x01)
/**
* [Bit 13] Reserved (except for CS). L - 64-bit mode active (for CS only).
*/
UINT32 LongMode : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_LONG_MODE_BIT 13
#define VMX_SEGMENT_ACCESS_RIGHTS_LONG_MODE_FLAG 0x2000
#define VMX_SEGMENT_ACCESS_RIGHTS_LONG_MODE_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_LONG_MODE(_) (((_) >> 13) & 0x01)
/**
* [Bit 14] D/B - Default operation size (0 = 16-bit segment; 1 = 32-bit segment).
*/
UINT32 DefaultBig : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_BIT 14
#define VMX_SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_FLAG 0x4000
#define VMX_SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_DEFAULT_BIG(_) (((_) >> 14) & 0x01)
/**
* [Bit 15] G - Granularity.
*/
UINT32 Granularity : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_GRANULARITY_BIT 15
#define VMX_SEGMENT_ACCESS_RIGHTS_GRANULARITY_FLAG 0x8000
#define VMX_SEGMENT_ACCESS_RIGHTS_GRANULARITY_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_GRANULARITY(_) (((_) >> 15) & 0x01)
/**
* [Bit 16] Segment unusable (0 = usable; 1 = unusable).
*/
UINT32 Unusable : 1;
#define VMX_SEGMENT_ACCESS_RIGHTS_UNUSABLE_BIT 16
#define VMX_SEGMENT_ACCESS_RIGHTS_UNUSABLE_FLAG 0x10000
#define VMX_SEGMENT_ACCESS_RIGHTS_UNUSABLE_MASK 0x01
#define VMX_SEGMENT_ACCESS_RIGHTS_UNUSABLE(_) (((_) >> 16) & 0x01)
UINT32 Reserved2 : 15;
};
UINT32 AsUInt;
} VMX_SEGMENT_ACCESS_RIGHTS;
/**
* @brief The IA-32 architecture includes features that permit certain events to be blocked for a
* period of time. This field contains information about such blocking
*
* @see INTERRUPTIBILITY_STATE of 32_BIT_GUEST_STATE_FIELDS
* @see Vol3C[24.4.2(Guest Non-Register State)] (reference)
*/
typedef union
{
struct
{
/**
* [Bit 0] Execution of STI with RFLAGS.IF = 0 blocks maskable interrupts on the
* instruction boundary following its execution.1 Setting this bit indicates that
* this blocking is in effect.
*
* @see Vol2B[4(STI-Set Interrupt Flag)]
*/
UINT32 BlockingBySti : 1;
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_STI_BIT 0
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_STI_FLAG 0x01
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_STI_MASK 0x01
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_STI(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Execution of a MOV to SS or a POP to SS blocks or suppresses certain
* debug exceptions as well as interrupts (maskable and nonmaskable) on the
* instruction boundary following its execution. Setting this bit indicates that
* this blocking is in effect. This document uses the term "blocking by MOV SS," but
* it applies equally to POP SS.
*
* @see Vol3A[6.8.3(Masking Exceptions and Interrupts When Switching Stacks)]
*/
UINT32 BlockingByMovSs : 1;
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_MOV_SS_BIT 1
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_MOV_SS_FLAG 0x02
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_MOV_SS_MASK 0x01
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_MOV_SS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] System-management interrupts (SMIs) are disabled while the processor is
* in system-management mode (SMM). Setting this bit indicates that blocking of SMIs
* is in effect.
*
* @see Vol3C[34.2(System Management Interrupt (SMI))]
*/
UINT32 BlockingBySmi : 1;
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_SMI_BIT 2
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_SMI_FLAG 0x04
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_SMI_MASK 0x01
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_SMI(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] Delivery of a non-maskable interrupt (NMI) or a system-management
* interrupt (SMI) blocks subsequent NMIs until the next execution of IRET. Setting
* this bit indicates that blocking of NMIs is in effect. Clearing this bit does not
* imply that NMIs are not (temporarily) blocked for other reasons. If the "virtual
* NMIs" VM-execution control is 1, this bit does not control the blocking of NMIs.
* Instead, it refers to "virtual-NMI blocking" (the fact that guest software is not
* ready for an NMI).
*
* @see Vol3C[6.7.1(Handling Multiple NMIs)]
* @see Vol3C[25.3(CHANGES TO INSTRUCTION BEHAVIOR IN VMX NON-ROOT OPERATION)]
* @see Vol3C[24.6.1(Pin-Based VM-Execution Controls)]
*/
UINT32 BlockingByNmi : 1;
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_NMI_BIT 3
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_NMI_FLAG 0x08
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_NMI_MASK 0x01
#define VMX_INTERRUPTIBILITY_STATE_BLOCKING_BY_NMI(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] A VM exit saves this bit as 1 to indicate that the VM exit was incident
* to enclave mode.
*/
UINT32 EnclaveInterruption : 1;
#define VMX_INTERRUPTIBILITY_STATE_ENCLAVE_INTERRUPTION_BIT 4
#define VMX_INTERRUPTIBILITY_STATE_ENCLAVE_INTERRUPTION_FLAG 0x10
#define VMX_INTERRUPTIBILITY_STATE_ENCLAVE_INTERRUPTION_MASK 0x01
#define VMX_INTERRUPTIBILITY_STATE_ENCLAVE_INTERRUPTION(_) (((_) >> 4) & 0x01)
UINT32 Reserved1 : 27;
};
UINT32 AsUInt;
} VMX_INTERRUPTIBILITY_STATE;
typedef enum
{
/**
* The logical processor is executing instructions normally.
*/
VmxActive = 0x00000000,
/**
* The logical processor is inactive because it executed the HLT instruction.
*/
VmxHlt = 0x00000001,
/**
* The logical processor is inactive because it incurred a triple fault1 or some other
* serious error.
*/
VmxShutdown = 0x00000002,
/**
* The logical processor is inactive because it is waiting for a startup-IPI (SIPI).
*/
VmxWaitForSipi = 0x00000003,
} VMX_GUEST_ACTIVITY_STATE;
/**
* @brief IA-32 processors may recognize one or more debug exceptions without immediately delivering
* them. This field contains information about such exceptions
*
* @see Vol3C[24.4.2(Guest Non-Register State)] (reference)
*/
typedef union
{
struct
{
/**
* [Bit 0] When set, indicates that the corresponding breakpoint condition was met.
* May be set even if the corresponding enabling bit in DR7 is not set.
*/
UINT64 B0 : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_B0_BIT 0
#define VMX_PENDING_DEBUG_EXCEPTIONS_B0_FLAG 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_B0_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_B0(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] When set, indicates that the corresponding breakpoint condition was met.
* May be set even if the corresponding enabling bit in DR7 is not set.
*/
UINT64 B1 : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_B1_BIT 1
#define VMX_PENDING_DEBUG_EXCEPTIONS_B1_FLAG 0x02
#define VMX_PENDING_DEBUG_EXCEPTIONS_B1_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_B1(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] When set, indicates that the corresponding breakpoint condition was met.
* May be set even if the corresponding enabling bit in DR7 is not set.
*/
UINT64 B2 : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_B2_BIT 2
#define VMX_PENDING_DEBUG_EXCEPTIONS_B2_FLAG 0x04
#define VMX_PENDING_DEBUG_EXCEPTIONS_B2_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_B2(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] When set, indicates that the corresponding breakpoint condition was met.
* May be set even if the corresponding enabling bit in DR7 is not set.
*/
UINT64 B3 : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_B3_BIT 3
#define VMX_PENDING_DEBUG_EXCEPTIONS_B3_FLAG 0x08
#define VMX_PENDING_DEBUG_EXCEPTIONS_B3_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_B3(_) (((_) >> 3) & 0x01)
UINT64 Reserved1 : 8;
/**
* [Bit 12] When set, this bit indicates that at least one data or I/O breakpoint
* was met and was enabled in DR7.
*/
UINT64 EnabledBreakpoint : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_ENABLED_BREAKPOINT_BIT 12
#define VMX_PENDING_DEBUG_EXCEPTIONS_ENABLED_BREAKPOINT_FLAG 0x1000
#define VMX_PENDING_DEBUG_EXCEPTIONS_ENABLED_BREAKPOINT_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_ENABLED_BREAKPOINT(_) (((_) >> 12) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 14] When set, this bit indicates that a debug exception would have been
* triggered by single-step execution mode.
*/
UINT64 Bs : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_BS_BIT 14
#define VMX_PENDING_DEBUG_EXCEPTIONS_BS_FLAG 0x4000
#define VMX_PENDING_DEBUG_EXCEPTIONS_BS_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_BS(_) (((_) >> 14) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bit 16] When set, this bit indicates that a debug exception (\#DB) or a
* breakpoint exception (\#BP) occurred inside an RTM region while advanced
* debugging of RTM transactional regions was enabled.
*/
UINT64 Rtm : 1;
#define VMX_PENDING_DEBUG_EXCEPTIONS_RTM_BIT 16
#define VMX_PENDING_DEBUG_EXCEPTIONS_RTM_FLAG 0x10000
#define VMX_PENDING_DEBUG_EXCEPTIONS_RTM_MASK 0x01
#define VMX_PENDING_DEBUG_EXCEPTIONS_RTM(_) (((_) >> 16) & 0x01)
UINT64 Reserved4 : 47;
};
UINT64 AsUInt;
} VMX_PENDING_DEBUG_EXCEPTIONS;
/**
* @}
*/
/**
* @brief Format of Exit Reason
*
* Exit reason (32 bits). This field encodes the reason for the VM exit and has the structure.
*
* @see Vol3C[24.9.1(Basic VM-Exit Information)] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 15:0] Provides basic information about the cause of the VM exit (if bit 31
* is clear) or of the VM-entry failure (if bit 31 is set).
*/
UINT32 BasicExitReason : 16;
#define VMX_VMEXIT_REASON_BASIC_EXIT_REASON_BIT 0
#define VMX_VMEXIT_REASON_BASIC_EXIT_REASON_FLAG 0xFFFF
#define VMX_VMEXIT_REASON_BASIC_EXIT_REASON_MASK 0xFFFF
#define VMX_VMEXIT_REASON_BASIC_EXIT_REASON(_) (((_) >> 0) & 0xFFFF)
/**
* [Bit 16] Always cleared to 0.
*/
UINT32 Always0 : 1;
#define VMX_VMEXIT_REASON_ALWAYS0_BIT 16
#define VMX_VMEXIT_REASON_ALWAYS0_FLAG 0x10000
#define VMX_VMEXIT_REASON_ALWAYS0_MASK 0x01
#define VMX_VMEXIT_REASON_ALWAYS0(_) (((_) >> 16) & 0x01)
UINT32 Reserved1 : 10;
#define VMX_VMEXIT_REASON_RESERVED1_BIT 17
#define VMX_VMEXIT_REASON_RESERVED1_FLAG 0x7FE0000
#define VMX_VMEXIT_REASON_RESERVED1_MASK 0x3FF
#define VMX_VMEXIT_REASON_RESERVED1(_) (((_) >> 17) & 0x3FF)
/**
* [Bit 27] A VM exit saves this bit as 1 to indicate that the VM exit was incident
* to enclave mode.
*/
UINT32 EnclaveMode : 1;
#define VMX_VMEXIT_REASON_ENCLAVE_MODE_BIT 27
#define VMX_VMEXIT_REASON_ENCLAVE_MODE_FLAG 0x8000000
#define VMX_VMEXIT_REASON_ENCLAVE_MODE_MASK 0x01
#define VMX_VMEXIT_REASON_ENCLAVE_MODE(_) (((_) >> 27) & 0x01)
/**
* [Bit 28] Pending MTF VM exit.
*/
UINT32 PendingMtfVmExit : 1;
#define VMX_VMEXIT_REASON_PENDING_MTF_VM_EXIT_BIT 28
#define VMX_VMEXIT_REASON_PENDING_MTF_VM_EXIT_FLAG 0x10000000
#define VMX_VMEXIT_REASON_PENDING_MTF_VM_EXIT_MASK 0x01
#define VMX_VMEXIT_REASON_PENDING_MTF_VM_EXIT(_) (((_) >> 28) & 0x01)
/**
* [Bit 29] VM exit from VMX root operation.
*/
UINT32 VmExitFromVmxRoot : 1;
#define VMX_VMEXIT_REASON_VM_EXIT_FROM_VMX_ROOT_BIT 29
#define VMX_VMEXIT_REASON_VM_EXIT_FROM_VMX_ROOT_FLAG 0x20000000
#define VMX_VMEXIT_REASON_VM_EXIT_FROM_VMX_ROOT_MASK 0x01
#define VMX_VMEXIT_REASON_VM_EXIT_FROM_VMX_ROOT(_) (((_) >> 29) & 0x01)
UINT32 Reserved2 : 1;
#define VMX_VMEXIT_REASON_RESERVED2_BIT 30
#define VMX_VMEXIT_REASON_RESERVED2_FLAG 0x40000000
#define VMX_VMEXIT_REASON_RESERVED2_MASK 0x01
#define VMX_VMEXIT_REASON_RESERVED2(_) (((_) >> 30) & 0x01)
/**
* [Bit 31] VM-entry failure:
* - 0 = true VM exit
* - 1 = VM-entry failure
*/
UINT32 VmEntryFailure : 1;
#define VMX_VMEXIT_REASON_VM_ENTRY_FAILURE_BIT 31
#define VMX_VMEXIT_REASON_VM_ENTRY_FAILURE_FLAG 0x80000000
#define VMX_VMEXIT_REASON_VM_ENTRY_FAILURE_MASK 0x01
#define VMX_VMEXIT_REASON_VM_ENTRY_FAILURE(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} VMX_VMEXIT_REASON;
typedef struct
{
#define IO_BITMAP_A_MIN 0x00000000
#define IO_BITMAP_A_MAX 0x00007FFF
#define IO_BITMAP_B_MIN 0x00008000
#define IO_BITMAP_B_MAX 0x0000FFFF
UINT8 IoA[4096];
UINT8 IoB[4096];
} VMX_IO_BITMAP;
typedef struct
{
#define MSR_ID_LOW_MIN 0x00000000
#define MSR_ID_LOW_MAX 0x00001FFF
#define MSR_ID_HIGH_MIN 0xC0000000
#define MSR_ID_HIGH_MAX 0xC0001FFF
UINT8 RdmsrLow[1024];
UINT8 RdmsrHigh[1024];
UINT8 WrmsrLow[1024];
UINT8 WrmsrHigh[1024];
} VMX_MSR_BITMAP;
/**
* @defgroup EPT \
* The extended page-table mechanism
*
* The extended page-table mechanism (EPT) is a feature that can be used to support the
* virtualization of physical memory. When EPT is in use, certain addresses that would normally be
* treated as physical addresses (and used to access memory) are instead treated as guest-physical
* addresses. Guest-physical addresses are translated by traversing a set of EPT paging structures
* to produce physical addresses that are used to access memory.
*
* @see Vol3C[28.2(THE EXTENDED PAGE TABLE MECHANISM (EPT))] (reference)
* @{
*/
/**
* @brief Extended-Page-Table Pointer (EPTP)
*
* The extended-page-table pointer (EPTP) contains the address of the base of EPT PML4 table, as
* well as other EPT configuration information.
*
* @see Vol3C[28.2.2(EPT Translation Mechanism]
* @see Vol3C[24.6.11(Extended-Page-Table Pointer (EPTP)] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 2:0] EPT paging-structure memory type:
* - 0 = Uncacheable (UC)
* - 6 = Write-back (WB)
* Other values are reserved.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
#define EPT_POINTER_MEMORY_TYPE_BIT 0
#define EPT_POINTER_MEMORY_TYPE_FLAG 0x07
#define EPT_POINTER_MEMORY_TYPE_MASK 0x07
#define EPT_POINTER_MEMORY_TYPE(_) (((_) >> 0) & 0x07)
/**
* [Bits 5:3] This value is 1 less than the EPT page-walk length.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 PageWalkLength : 3;
#define EPT_POINTER_PAGE_WALK_LENGTH_BIT 3
#define EPT_POINTER_PAGE_WALK_LENGTH_FLAG 0x38
#define EPT_POINTER_PAGE_WALK_LENGTH_MASK 0x07
#define EPT_POINTER_PAGE_WALK_LENGTH(_) (((_) >> 3) & 0x07)
#define EPT_PAGE_WALK_LENGTH_4 0x00000003
/**
* [Bit 6] Setting this control to 1 enables accessed and dirty flags for EPT.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 EnableAccessAndDirtyFlags : 1;
#define EPT_POINTER_ENABLE_ACCESS_AND_DIRTY_FLAGS_BIT 6
#define EPT_POINTER_ENABLE_ACCESS_AND_DIRTY_FLAGS_FLAG 0x40
#define EPT_POINTER_ENABLE_ACCESS_AND_DIRTY_FLAGS_MASK 0x01
#define EPT_POINTER_ENABLE_ACCESS_AND_DIRTY_FLAGS(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Setting this control to 1 enables enforcement of access rights for
* supervisor shadow-stack pages.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 EnableSupervisorShadowStackPages : 1;
#define EPT_POINTER_ENABLE_SUPERVISOR_SHADOW_STACK_PAGES_BIT 7
#define EPT_POINTER_ENABLE_SUPERVISOR_SHADOW_STACK_PAGES_FLAG 0x80
#define EPT_POINTER_ENABLE_SUPERVISOR_SHADOW_STACK_PAGES_MASK 0x01
#define EPT_POINTER_ENABLE_SUPERVISOR_SHADOW_STACK_PAGES(_) (((_) >> 7) & 0x01)
UINT64 Reserved1 : 4;
/**
* [Bits 47:12] Bits N-1:12 of the physical address of the 4-KByte aligned EPT PML4
* table.
*/
UINT64 PageFrameNumber : 36;
#define EPT_POINTER_PAGE_FRAME_NUMBER_BIT 12
#define EPT_POINTER_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_POINTER_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_POINTER_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 16;
} Fields;
UINT64 AsUInt;
} EPT_POINTER;
/**
* @brief Format of an EPT PML4 Entry (PML4E) that References an EPT Page-Directory-Pointer Table
*
* A 4-KByte naturally aligned EPT PML4 table is located at the physical address specified in bits
* 51:12 of the extended-page-table pointer (EPTP), a VM-execution control field. An EPT PML4 table
* comprises 512 64-bit entries (EPT PML4Es). An EPT PML4E is selected using the physical address
* defined as follows:
* - Bits 63:52 are all 0.
* - Bits 51:12 are from the EPTP.
* - Bits 11:3 are bits 47:39 of the guest-physical address.
* - Bits 2:0 are all 0.
* Because an EPT PML4E is identified using bits 47:39 of the guest-physical address, it controls
* access to a 512- GByte region of the guest-physical-address space.
*
* @see Vol3C[24.6.11(Extended-Page-Table Pointer (EPTP)]
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 512-GByte
* region controlled by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PML4E_READ_ACCESS_BIT 0
#define EPT_PML4E_READ_ACCESS_FLAG 0x01
#define EPT_PML4E_READ_ACCESS_MASK 0x01
#define EPT_PML4E_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 512-GByte
* region controlled by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PML4E_WRITE_ACCESS_BIT 1
#define EPT_PML4E_WRITE_ACCESS_FLAG 0x02
#define EPT_PML4E_WRITE_ACCESS_MASK 0x01
#define EPT_PML4E_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 512-GByte region controlled by this entry. If that control is 1, execute access
* for supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 512-GByte region controlled
* by this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PML4E_EXECUTE_ACCESS_BIT 2
#define EPT_PML4E_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PML4E_EXECUTE_ACCESS_MASK 0x01
#define EPT_PML4E_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 512-GByte region controlled by this entry. Ignored if bit 6 of
* EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PML4E_ACCESSED_BIT 8
#define EPT_PML4E_ACCESSED_FLAG 0x100
#define EPT_PML4E_ACCESSED_MASK 0x01
#define EPT_PML4E_ACCESSED(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 512-GByte region
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PML4E_USER_MODE_EXECUTE_BIT 10
#define EPT_PML4E_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PML4E_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PML4E_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page-directory-pointer table
* referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define EPT_PML4E_PAGE_FRAME_NUMBER_BIT 12
#define EPT_PML4E_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_PML4E_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_PML4E_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved4 : 16;
} Fields;
UINT64 AsUInt;
} EPT_PML4E;
/**
* @brief Format of an EPT Page-Directory-Pointer-Table Entry (PDPTE) that Maps a 1-GByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 1-GByte page
* referenced by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PDPTE_1GB_READ_ACCESS_BIT 0
#define EPT_PDPTE_1GB_READ_ACCESS_FLAG 0x01
#define EPT_PDPTE_1GB_READ_ACCESS_MASK 0x01
#define EPT_PDPTE_1GB_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 1-GByte page
* referenced by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PDPTE_1GB_WRITE_ACCESS_BIT 1
#define EPT_PDPTE_1GB_WRITE_ACCESS_FLAG 0x02
#define EPT_PDPTE_1GB_WRITE_ACCESS_MASK 0x01
#define EPT_PDPTE_1GB_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 1-GByte page controlled by this entry. If that control is 1, execute access for
* supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 1-GByte page controlled by
* this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PDPTE_1GB_EXECUTE_ACCESS_BIT 2
#define EPT_PDPTE_1GB_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PDPTE_1GB_EXECUTE_ACCESS_MASK 0x01
#define EPT_PDPTE_1GB_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
/**
* [Bits 5:3] EPT memory type for this 1-GByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
#define EPT_PDPTE_1GB_MEMORY_TYPE_BIT 3
#define EPT_PDPTE_1GB_MEMORY_TYPE_FLAG 0x38
#define EPT_PDPTE_1GB_MEMORY_TYPE_MASK 0x07
#define EPT_PDPTE_1GB_MEMORY_TYPE(_) (((_) >> 3) & 0x07)
/**
* [Bit 6] Ignore PAT memory type for this 1-GByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
#define EPT_PDPTE_1GB_IGNORE_PAT_BIT 6
#define EPT_PDPTE_1GB_IGNORE_PAT_FLAG 0x40
#define EPT_PDPTE_1GB_IGNORE_PAT_MASK 0x01
#define EPT_PDPTE_1GB_IGNORE_PAT(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Must be 1 (otherwise, this entry references an EPT page directory).
*/
UINT64 LargePage : 1;
#define EPT_PDPTE_1GB_LARGE_PAGE_BIT 7
#define EPT_PDPTE_1GB_LARGE_PAGE_FLAG 0x80
#define EPT_PDPTE_1GB_LARGE_PAGE_MASK 0x01
#define EPT_PDPTE_1GB_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 1-GByte page referenced by this entry. Ignored if bit 6 of EPTP
* is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PDPTE_1GB_ACCESSED_BIT 8
#define EPT_PDPTE_1GB_ACCESSED_FLAG 0x100
#define EPT_PDPTE_1GB_ACCESSED_MASK 0x01
#define EPT_PDPTE_1GB_ACCESSED(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has
* written to the 1-GByte page referenced by this entry. Ignored if bit 6 of EPTP is
* 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
#define EPT_PDPTE_1GB_DIRTY_BIT 9
#define EPT_PDPTE_1GB_DIRTY_FLAG 0x200
#define EPT_PDPTE_1GB_DIRTY_MASK 0x01
#define EPT_PDPTE_1GB_DIRTY(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 1-GByte page
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PDPTE_1GB_USER_MODE_EXECUTE_BIT 10
#define EPT_PDPTE_1GB_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PDPTE_1GB_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PDPTE_1GB_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved1 : 19;
/**
* [Bits 47:30] Physical address of 4-KByte aligned EPT page-directory-pointer table
* referenced by this entry.
*/
UINT64 PageFrameNumber : 18;
#define EPT_PDPTE_1GB_PAGE_FRAME_NUMBER_BIT 30
#define EPT_PDPTE_1GB_PAGE_FRAME_NUMBER_FLAG 0xFFFFC0000000
#define EPT_PDPTE_1GB_PAGE_FRAME_NUMBER_MASK 0x3FFFF
#define EPT_PDPTE_1GB_PAGE_FRAME_NUMBER(_) (((_) >> 30) & 0x3FFFF)
UINT64 Reserved2 : 9;
/**
* [Bit 57] Verify guest paging. If the "guest-paging verification" VM-execution
* control is 1, indicates limits on the guest paging structures used to access the
* 1-GByte page controlled by this entry (see Section 28.3.3.2). If that control is
* 0, this bit is ignored.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 VerifyGuestPaging : 1;
#define EPT_PDPTE_1GB_VERIFY_GUEST_PAGING_BIT 57
#define EPT_PDPTE_1GB_VERIFY_GUEST_PAGING_FLAG 0x200000000000000
#define EPT_PDPTE_1GB_VERIFY_GUEST_PAGING_MASK 0x01
#define EPT_PDPTE_1GB_VERIFY_GUEST_PAGING(_) (((_) >> 57) & 0x01)
/**
* [Bit 58] Paging-write access. If the "EPT paging-write control" VM-execution
* control is 1, indicates that guest paging may update the 1-GByte page controlled
* by this entry (see Section 28.3.3.2). If that control is 0, this bit is ignored
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 PagingWriteAccess : 1;
#define EPT_PDPTE_1GB_PAGING_WRITE_ACCESS_BIT 58
#define EPT_PDPTE_1GB_PAGING_WRITE_ACCESS_FLAG 0x400000000000000
#define EPT_PDPTE_1GB_PAGING_WRITE_ACCESS_MASK 0x01
#define EPT_PDPTE_1GB_PAGING_WRITE_ACCESS(_) (((_) >> 58) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bit 60] Supervisor shadow stack. If bit 7 of EPTP is 1, indicates whether
* supervisor shadow stack accesses are allowed to guest-physical addresses in the
* 1-GByte page mapped by this entry (see Section 28.3.3.2)
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 SupervisorShadowStack : 1;
#define EPT_PDPTE_1GB_SUPERVISOR_SHADOW_STACK_BIT 60
#define EPT_PDPTE_1GB_SUPERVISOR_SHADOW_STACK_FLAG 0x1000000000000000
#define EPT_PDPTE_1GB_SUPERVISOR_SHADOW_STACK_MASK 0x01
#define EPT_PDPTE_1GB_SUPERVISOR_SHADOW_STACK(_) (((_) >> 60) & 0x01)
UINT64 Reserved4 : 2;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1,
* EPT violations caused by accesses to this page are convertible to virtualization
* exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
#define EPT_PDPTE_1GB_SUPPRESS_VE_BIT 63
#define EPT_PDPTE_1GB_SUPPRESS_VE_FLAG 0x8000000000000000
#define EPT_PDPTE_1GB_SUPPRESS_VE_MASK 0x01
#define EPT_PDPTE_1GB_SUPPRESS_VE(_) (((_) >> 63) & 0x01)
} Fields;
UINT64 AsUInt;
} EPT_PDPTE_1GB;
/**
* @brief Format of an EPT Page-Directory-Pointer-Table Entry (PDPTE) that References an EPT Page
* Directory
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 1-GByte region
* controlled by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PDPTE_READ_ACCESS_BIT 0
#define EPT_PDPTE_READ_ACCESS_FLAG 0x01
#define EPT_PDPTE_READ_ACCESS_MASK 0x01
#define EPT_PDPTE_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 1-GByte
* region controlled by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PDPTE_WRITE_ACCESS_BIT 1
#define EPT_PDPTE_WRITE_ACCESS_FLAG 0x02
#define EPT_PDPTE_WRITE_ACCESS_MASK 0x01
#define EPT_PDPTE_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 1-GByte region controlled by this entry. If that control is 1, execute access for
* supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 1-GByte region controlled by
* this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PDPTE_EXECUTE_ACCESS_BIT 2
#define EPT_PDPTE_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PDPTE_EXECUTE_ACCESS_MASK 0x01
#define EPT_PDPTE_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 1-GByte region controlled by this entry. Ignored if bit 6 of
* EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PDPTE_ACCESSED_BIT 8
#define EPT_PDPTE_ACCESSED_FLAG 0x100
#define EPT_PDPTE_ACCESSED_MASK 0x01
#define EPT_PDPTE_ACCESSED(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 1-GByte region
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PDPTE_USER_MODE_EXECUTE_BIT 10
#define EPT_PDPTE_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PDPTE_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PDPTE_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page-directory-pointer table
* referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define EPT_PDPTE_PAGE_FRAME_NUMBER_BIT 12
#define EPT_PDPTE_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_PDPTE_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_PDPTE_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved4 : 16;
} Fields;
UINT64 AsUInt;
} EPT_PDPTE;
/**
* @brief Format of an EPT Page-Directory Entry (PDE) that Maps a 2-MByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 2-MByte page
* referenced by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PDE_2MB_READ_ACCESS_BIT 0
#define EPT_PDE_2MB_READ_ACCESS_FLAG 0x01
#define EPT_PDE_2MB_READ_ACCESS_MASK 0x01
#define EPT_PDE_2MB_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 2-MByte page
* referenced by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PDE_2MB_WRITE_ACCESS_BIT 1
#define EPT_PDE_2MB_WRITE_ACCESS_FLAG 0x02
#define EPT_PDE_2MB_WRITE_ACCESS_MASK 0x01
#define EPT_PDE_2MB_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 2-MByte page controlled by this entry. If that control is 1, execute access for
* supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 2-MByte page controlled by
* this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PDE_2MB_EXECUTE_ACCESS_BIT 2
#define EPT_PDE_2MB_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PDE_2MB_EXECUTE_ACCESS_MASK 0x01
#define EPT_PDE_2MB_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
/**
* [Bits 5:3] EPT memory type for this 2-MByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
#define EPT_PDE_2MB_MEMORY_TYPE_BIT 3
#define EPT_PDE_2MB_MEMORY_TYPE_FLAG 0x38
#define EPT_PDE_2MB_MEMORY_TYPE_MASK 0x07
#define EPT_PDE_2MB_MEMORY_TYPE(_) (((_) >> 3) & 0x07)
/**
* [Bit 6] Ignore PAT memory type for this 2-MByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
#define EPT_PDE_2MB_IGNORE_PAT_BIT 6
#define EPT_PDE_2MB_IGNORE_PAT_FLAG 0x40
#define EPT_PDE_2MB_IGNORE_PAT_MASK 0x01
#define EPT_PDE_2MB_IGNORE_PAT(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] Must be 1 (otherwise, this entry references an EPT page table).
*/
UINT64 LargePage : 1;
#define EPT_PDE_2MB_LARGE_PAGE_BIT 7
#define EPT_PDE_2MB_LARGE_PAGE_FLAG 0x80
#define EPT_PDE_2MB_LARGE_PAGE_MASK 0x01
#define EPT_PDE_2MB_LARGE_PAGE(_) (((_) >> 7) & 0x01)
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 2-MByte page referenced by this entry. Ignored if bit 6 of EPTP
* is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PDE_2MB_ACCESSED_BIT 8
#define EPT_PDE_2MB_ACCESSED_FLAG 0x100
#define EPT_PDE_2MB_ACCESSED_MASK 0x01
#define EPT_PDE_2MB_ACCESSED(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has
* written to the 2-MByte page referenced by this entry. Ignored if bit 6 of EPTP is
* 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
#define EPT_PDE_2MB_DIRTY_BIT 9
#define EPT_PDE_2MB_DIRTY_FLAG 0x200
#define EPT_PDE_2MB_DIRTY_MASK 0x01
#define EPT_PDE_2MB_DIRTY(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 2-MByte page
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PDE_2MB_USER_MODE_EXECUTE_BIT 10
#define EPT_PDE_2MB_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PDE_2MB_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PDE_2MB_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved1 : 10;
/**
* [Bits 47:21] Physical address of 4-KByte aligned EPT page-directory-pointer table
* referenced by this entry.
*/
UINT64 PageFrameNumber : 27;
#define EPT_PDE_2MB_PAGE_FRAME_NUMBER_BIT 21
#define EPT_PDE_2MB_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFE00000
#define EPT_PDE_2MB_PAGE_FRAME_NUMBER_MASK 0x7FFFFFF
#define EPT_PDE_2MB_PAGE_FRAME_NUMBER(_) (((_) >> 21) & 0x7FFFFFF)
UINT64 Reserved2 : 9;
/**
* [Bit 57] Verify guest paging. If the "guest-paging verification" VM-execution
* control is 1, indicates limits on the guest paging structures used to access the
* 2-MByte page controlled by this entry (see Section 28.3.3.2). If that control is
* 0, this bit is ignored.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 VerifyGuestPaging : 1;
#define EPT_PDE_2MB_VERIFY_GUEST_PAGING_BIT 57
#define EPT_PDE_2MB_VERIFY_GUEST_PAGING_FLAG 0x200000000000000
#define EPT_PDE_2MB_VERIFY_GUEST_PAGING_MASK 0x01
#define EPT_PDE_2MB_VERIFY_GUEST_PAGING(_) (((_) >> 57) & 0x01)
/**
* [Bit 58] Paging-write access. If the "EPT paging-write control" VM-execution
* control is 1, indicates that guest paging may update the 2-MByte page controlled
* by this entry (see Section 28.3.3.2). If that control is 0, this bit is ignored
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 PagingWriteAccess : 1;
#define EPT_PDE_2MB_PAGING_WRITE_ACCESS_BIT 58
#define EPT_PDE_2MB_PAGING_WRITE_ACCESS_FLAG 0x400000000000000
#define EPT_PDE_2MB_PAGING_WRITE_ACCESS_MASK 0x01
#define EPT_PDE_2MB_PAGING_WRITE_ACCESS(_) (((_) >> 58) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bit 60] Supervisor shadow stack. If bit 7 of EPTP is 1, indicates whether
* supervisor shadow stack accesses are allowed to guest-physical addresses in the
* 2-MByte page mapped by this entry (see Section 28.3.3.2)
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 SupervisorShadowStack : 1;
#define EPT_PDE_2MB_SUPERVISOR_SHADOW_STACK_BIT 60
#define EPT_PDE_2MB_SUPERVISOR_SHADOW_STACK_FLAG 0x1000000000000000
#define EPT_PDE_2MB_SUPERVISOR_SHADOW_STACK_MASK 0x01
#define EPT_PDE_2MB_SUPERVISOR_SHADOW_STACK(_) (((_) >> 60) & 0x01)
UINT64 Reserved4 : 2;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1,
* EPT violations caused by accesses to this page are convertible to virtualization
* exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
#define EPT_PDE_2MB_SUPPRESS_VE_BIT 63
#define EPT_PDE_2MB_SUPPRESS_VE_FLAG 0x8000000000000000
#define EPT_PDE_2MB_SUPPRESS_VE_MASK 0x01
#define EPT_PDE_2MB_SUPPRESS_VE(_) (((_) >> 63) & 0x01)
} Fields;
UINT64 AsUInt;
} EPT_PDE_2MB;
/**
* @brief Format of an EPT Page-Directory Entry (PDE) that References an EPT Page Table
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 2-MByte region
* controlled by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PDE_READ_ACCESS_BIT 0
#define EPT_PDE_READ_ACCESS_FLAG 0x01
#define EPT_PDE_READ_ACCESS_MASK 0x01
#define EPT_PDE_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 2-MByte
* region controlled by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PDE_WRITE_ACCESS_BIT 1
#define EPT_PDE_WRITE_ACCESS_FLAG 0x02
#define EPT_PDE_WRITE_ACCESS_MASK 0x01
#define EPT_PDE_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 2-MByte region controlled by this entry. If that control is 1, execute access for
* supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 2-MByte region controlled by
* this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PDE_EXECUTE_ACCESS_BIT 2
#define EPT_PDE_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PDE_EXECUTE_ACCESS_MASK 0x01
#define EPT_PDE_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
UINT64 Reserved1 : 5;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 2-MByte region controlled by this entry. Ignored if bit 6 of
* EPTP is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PDE_ACCESSED_BIT 8
#define EPT_PDE_ACCESSED_FLAG 0x100
#define EPT_PDE_ACCESSED_MASK 0x01
#define EPT_PDE_ACCESSED(_) (((_) >> 8) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 2-MByte region
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PDE_USER_MODE_EXECUTE_BIT 10
#define EPT_PDE_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PDE_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PDE_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved3 : 1;
/**
* [Bits 47:12] Physical address of 4-KByte aligned EPT page table referenced by
* this entry.
*/
UINT64 PageFrameNumber : 36;
#define EPT_PDE_PAGE_FRAME_NUMBER_BIT 12
#define EPT_PDE_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_PDE_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_PDE_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved4 : 16;
} Fields;
UINT64 AsUInt;
} EPT_PDE;
/**
* @brief Format of an EPT Page-Table Entry that Maps a 4-KByte Page
*/
typedef union
{
struct
{
/**
* [Bit 0] Read access; indicates whether reads are allowed from the 4-KByte page
* referenced by this entry.
*/
UINT64 ReadAccess : 1;
#define EPT_PTE_READ_ACCESS_BIT 0
#define EPT_PTE_READ_ACCESS_FLAG 0x01
#define EPT_PTE_READ_ACCESS_MASK 0x01
#define EPT_PTE_READ_ACCESS(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Write access; indicates whether writes are allowed from the 4-KByte page
* referenced by this entry.
*/
UINT64 WriteAccess : 1;
#define EPT_PTE_WRITE_ACCESS_BIT 1
#define EPT_PTE_WRITE_ACCESS_FLAG 0x02
#define EPT_PTE_WRITE_ACCESS_MASK 0x01
#define EPT_PTE_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If the "mode-based execute control for EPT" VM-execution control is 0,
* execute access; indicates whether instruction fetches are allowed from the
* 4-KByte page controlled by this entry. If that control is 1, execute access for
* supervisor-mode linear addresses; indicates whether instruction fetches are
* allowed from supervisor-mode linear addresses in the 4-KByte page controlled by
* this entry.
*/
UINT64 ExecuteAccess : 1;
#define EPT_PTE_EXECUTE_ACCESS_BIT 2
#define EPT_PTE_EXECUTE_ACCESS_FLAG 0x04
#define EPT_PTE_EXECUTE_ACCESS_MASK 0x01
#define EPT_PTE_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
/**
* [Bits 5:3] EPT memory type for this 4-KByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 MemoryType : 3;
#define EPT_PTE_MEMORY_TYPE_BIT 3
#define EPT_PTE_MEMORY_TYPE_FLAG 0x38
#define EPT_PTE_MEMORY_TYPE_MASK 0x07
#define EPT_PTE_MEMORY_TYPE(_) (((_) >> 3) & 0x07)
/**
* [Bit 6] Ignore PAT memory type for this 4-KByte page.
*
* @see Vol3C[28.2.6(EPT and memory Typing)]
*/
UINT64 IgnorePat : 1;
#define EPT_PTE_IGNORE_PAT_BIT 6
#define EPT_PTE_IGNORE_PAT_FLAG 0x40
#define EPT_PTE_IGNORE_PAT_MASK 0x01
#define EPT_PTE_IGNORE_PAT(_) (((_) >> 6) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 8] If bit 6 of EPTP is 1, accessed flag for EPT; indicates whether software
* has accessed the 4-KByte page referenced by this entry. Ignored if bit 6 of EPTP
* is 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Accessed : 1;
#define EPT_PTE_ACCESSED_BIT 8
#define EPT_PTE_ACCESSED_FLAG 0x100
#define EPT_PTE_ACCESSED_MASK 0x01
#define EPT_PTE_ACCESSED(_) (((_) >> 8) & 0x01)
/**
* [Bit 9] If bit 6 of EPTP is 1, dirty flag for EPT; indicates whether software has
* written to the 4-KByte page referenced by this entry. Ignored if bit 6 of EPTP is
* 0.
*
* @see Vol3C[28.2.4(Accessed and Dirty Flags for EPT)]
*/
UINT64 Dirty : 1;
#define EPT_PTE_DIRTY_BIT 9
#define EPT_PTE_DIRTY_FLAG 0x200
#define EPT_PTE_DIRTY_MASK 0x01
#define EPT_PTE_DIRTY(_) (((_) >> 9) & 0x01)
/**
* [Bit 10] Execute access for user-mode linear addresses. If the "mode-based
* execute control for EPT" VM-execution control is 1, indicates whether instruction
* fetches are allowed from user-mode linear addresses in the 4-KByte page
* controlled by this entry. If that control is 0, this bit is ignored.
*/
UINT64 UserModeExecute : 1;
#define EPT_PTE_USER_MODE_EXECUTE_BIT 10
#define EPT_PTE_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_PTE_USER_MODE_EXECUTE_MASK 0x01
#define EPT_PTE_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved2 : 1;
/**
* [Bits 47:12] Physical address of the 4-KByte page referenced by this entry.
*/
UINT64 PageFrameNumber : 36;
#define EPT_PTE_PAGE_FRAME_NUMBER_BIT 12
#define EPT_PTE_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_PTE_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_PTE_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved3 : 9;
/**
* [Bit 57] Verify guest paging. If the "guest-paging verification" VM-execution
* control is 1, indicates limits on the guest paging structures used to access the
* 4-KByte page controlled by this entry (see Section 28.3.3.2). If that control is
* 0, this bit is ignored.
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 VerifyGuestPaging : 1;
#define EPT_PTE_VERIFY_GUEST_PAGING_BIT 57
#define EPT_PTE_VERIFY_GUEST_PAGING_FLAG 0x200000000000000
#define EPT_PTE_VERIFY_GUEST_PAGING_MASK 0x01
#define EPT_PTE_VERIFY_GUEST_PAGING(_) (((_) >> 57) & 0x01)
/**
* [Bit 58] Paging-write access. If the "EPT paging-write control" VM-execution
* control is 1, indicates that guest paging may update the 4-KByte page controlled
* by this entry (see Section 28.3.3.2). If that control is 0, this bit is ignored
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 PagingWriteAccess : 1;
#define EPT_PTE_PAGING_WRITE_ACCESS_BIT 58
#define EPT_PTE_PAGING_WRITE_ACCESS_FLAG 0x400000000000000
#define EPT_PTE_PAGING_WRITE_ACCESS_MASK 0x01
#define EPT_PTE_PAGING_WRITE_ACCESS(_) (((_) >> 58) & 0x01)
UINT64 Reserved4 : 1;
/**
* [Bit 60] Supervisor shadow stack. If bit 7 of EPTP is 1, indicates whether
* supervisor shadow stack accesses are allowed to guest-physical addresses in the
* 4-KByte page mapped by this entry (see Section 28.3.3.2)
*
* @see Vol3C[28.3.3.2(EPT Violations)]
*/
UINT64 SupervisorShadowStack : 1;
#define EPT_PTE_SUPERVISOR_SHADOW_STACK_BIT 60
#define EPT_PTE_SUPERVISOR_SHADOW_STACK_FLAG 0x1000000000000000
#define EPT_PTE_SUPERVISOR_SHADOW_STACK_MASK 0x01
#define EPT_PTE_SUPERVISOR_SHADOW_STACK(_) (((_) >> 60) & 0x01)
/**
* [Bit 61] Sub-page write permissions. If the "sub-page write permissions for EPT"
* VM-execution control is 1, writes to individual 128-byte regions of the 4-KByte
* page referenced by this entry may be allowed even if the page would normally not
* be writable (see Section 28.3.4). If "sub-page write permissions for EPT"
* VM-execution control is 0, this bit is ignored.
*
* @see Vol3C[28.3.4(Sub-Page Write Permissions)]
*/
UINT64 SubPageWritePermissions : 1;
#define EPT_PTE_SUB_PAGE_WRITE_PERMISSIONS_BIT 61
#define EPT_PTE_SUB_PAGE_WRITE_PERMISSIONS_FLAG 0x2000000000000000
#define EPT_PTE_SUB_PAGE_WRITE_PERMISSIONS_MASK 0x01
#define EPT_PTE_SUB_PAGE_WRITE_PERMISSIONS(_) (((_) >> 61) & 0x01)
UINT64 Reserved5 : 1;
/**
* [Bit 63] Suppress \#VE. If the "EPT-violation \#VE" VM-execution control is 1,
* EPT violations caused by accesses to this page are convertible to virtualization
* exceptions only if this bit is 0. If "EPT-violation \#VE" VMexecution control is
* 0, this bit is ignored.
*
* @see Vol3C[25.5.6.1(Convertible EPT Violations)]
*/
UINT64 SuppressVe : 1;
#define EPT_PTE_SUPPRESS_VE_BIT 63
#define EPT_PTE_SUPPRESS_VE_FLAG 0x8000000000000000
#define EPT_PTE_SUPPRESS_VE_MASK 0x01
#define EPT_PTE_SUPPRESS_VE(_) (((_) >> 63) & 0x01)
} Fields;
UINT64 AsUInt;
} EPT_PTE;
/**
* @brief Format of a common EPT Entry
*/
typedef union
{
struct
{
UINT64 ReadAccess : 1;
#define EPT_ENTRY_READ_ACCESS_BIT 0
#define EPT_ENTRY_READ_ACCESS_FLAG 0x01
#define EPT_ENTRY_READ_ACCESS_MASK 0x01
#define EPT_ENTRY_READ_ACCESS(_) (((_) >> 0) & 0x01)
UINT64 WriteAccess : 1;
#define EPT_ENTRY_WRITE_ACCESS_BIT 1
#define EPT_ENTRY_WRITE_ACCESS_FLAG 0x02
#define EPT_ENTRY_WRITE_ACCESS_MASK 0x01
#define EPT_ENTRY_WRITE_ACCESS(_) (((_) >> 1) & 0x01)
UINT64 ExecuteAccess : 1;
#define EPT_ENTRY_EXECUTE_ACCESS_BIT 2
#define EPT_ENTRY_EXECUTE_ACCESS_FLAG 0x04
#define EPT_ENTRY_EXECUTE_ACCESS_MASK 0x01
#define EPT_ENTRY_EXECUTE_ACCESS(_) (((_) >> 2) & 0x01)
UINT64 MemoryType : 3;
#define EPT_ENTRY_MEMORY_TYPE_BIT 3
#define EPT_ENTRY_MEMORY_TYPE_FLAG 0x38
#define EPT_ENTRY_MEMORY_TYPE_MASK 0x07
#define EPT_ENTRY_MEMORY_TYPE(_) (((_) >> 3) & 0x07)
UINT64 IgnorePat : 1;
#define EPT_ENTRY_IGNORE_PAT_BIT 6
#define EPT_ENTRY_IGNORE_PAT_FLAG 0x40
#define EPT_ENTRY_IGNORE_PAT_MASK 0x01
#define EPT_ENTRY_IGNORE_PAT(_) (((_) >> 6) & 0x01)
UINT64 LargePage : 1;
#define EPT_ENTRY_LARGE_PAGE_BIT 7
#define EPT_ENTRY_LARGE_PAGE_FLAG 0x80
#define EPT_ENTRY_LARGE_PAGE_MASK 0x01
#define EPT_ENTRY_LARGE_PAGE(_) (((_) >> 7) & 0x01)
UINT64 Accessed : 1;
#define EPT_ENTRY_ACCESSED_BIT 8
#define EPT_ENTRY_ACCESSED_FLAG 0x100
#define EPT_ENTRY_ACCESSED_MASK 0x01
#define EPT_ENTRY_ACCESSED(_) (((_) >> 8) & 0x01)
UINT64 Dirty : 1;
#define EPT_ENTRY_DIRTY_BIT 9
#define EPT_ENTRY_DIRTY_FLAG 0x200
#define EPT_ENTRY_DIRTY_MASK 0x01
#define EPT_ENTRY_DIRTY(_) (((_) >> 9) & 0x01)
UINT64 UserModeExecute : 1;
#define EPT_ENTRY_USER_MODE_EXECUTE_BIT 10
#define EPT_ENTRY_USER_MODE_EXECUTE_FLAG 0x400
#define EPT_ENTRY_USER_MODE_EXECUTE_MASK 0x01
#define EPT_ENTRY_USER_MODE_EXECUTE(_) (((_) >> 10) & 0x01)
UINT64 Reserved1 : 1;
UINT64 PageFrameNumber : 36;
#define EPT_ENTRY_PAGE_FRAME_NUMBER_BIT 12
#define EPT_ENTRY_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define EPT_ENTRY_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define EPT_ENTRY_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved2 : 15;
UINT64 SuppressVe : 1;
#define EPT_ENTRY_SUPPRESS_VE_BIT 63
#define EPT_ENTRY_SUPPRESS_VE_FLAG 0x8000000000000000
#define EPT_ENTRY_SUPPRESS_VE_MASK 0x01
#define EPT_ENTRY_SUPPRESS_VE(_) (((_) >> 63) & 0x01)
} Fields;
UINT64 AsUInt;
} EPT_ENTRY;
/**
* @defgroup EPT_TABLE_LEVEL \
* EPT Table level numbers
*
* EPT Table level numbers.
* @{
*/
#define EPT_LEVEL_PML4E 0x00000003
#define EPT_LEVEL_PDPTE 0x00000002
#define EPT_LEVEL_PDE 0x00000001
#define EPT_LEVEL_PTE 0x00000000
/**
* @}
*/
/**
* @defgroup EPT_ENTRY_COUNT \
* EPT Entry counts
*
* EPT Entry counts.
* @{
*/
#define EPT_PML4E_ENTRY_COUNT 0x00000200
#define EPT_PDPTE_ENTRY_COUNT 0x00000200
#define EPT_PDE_ENTRY_COUNT 0x00000200
#define EPT_PTE_ENTRY_COUNT 0x00000200
/**
* @}
*/
/**
* @}
*/
typedef enum
{
/**
* If the INVEPT type is 1, the logical processor invalidates all guest-physical mappings
* and combined mappings associated with the EP4TA specified in the INVEPT descriptor.
* Combined mappings for that EP4TA are invalidated for all VPIDs and all PCIDs. (The
* instruction may invalidate mappings associated with other EP4TAs.)
*/
InveptSingleContext = 0x00000001,
/**
* If the INVEPT type is 2, the logical processor invalidates guest-physical mappings and
* combined mappings associated with all EP4TAs (and, for combined mappings, for all VPIDs
* and PCIDs).
*/
InveptAllContext = 0x00000002,
} INVEPT_TYPE;
typedef enum
{
/**
* If the INVVPID type is 0, the logical processor invalidates linear mappings and combined
* mappings associated with the VPID specified in the INVVPID descriptor and that would be
* used to translate the linear address specified in of the INVVPID descriptor. Linear
* mappings and combined mappings for that VPID and linear address are invalidated for all
* PCIDs and, for combined mappings, all EP4TAs. (The instruction may also invalidate
* mappings associated with other VPIDs and for other linear addresses).
*/
InvvpidIndividualAddress = 0x00000000,
/**
* If the INVVPID type is 1, the logical processor invalidates all linear mappings and
* combined mappings associated with the VPID specified in the INVVPID descriptor. Linear
* mappings and combined mappings for that VPID are invalidated for all PCIDs and, for
* combined mappings, all EP4TAs. (The instruction may also invalidate mappings associated
* with other VPIDs).
*/
InvvpidSingleContext = 0x00000001,
/**
* If the INVVPID type is 2, the logical processor invalidates linear mappings and combined
* mappings associated with all VPIDs except VPID 0000H and with all PCIDs. (The instruction
* may also invalidate linear mappings with VPID 0000H.) Combined mappings are invalidated
* for all EP4TAs.
*/
InvvpidAllContext = 0x00000002,
/**
* If the INVVPID type is 3, the logical processor invalidates linear mappings and combined
* mappings associated with the VPID specified in the INVVPID descriptor. Linear mappings
* and combined mappings for that VPID are invalidated for all PCIDs and, for combined
* mappings, all EP4TAs. The logical processor is not required to invalidate information
* that was used for global translations (although it may do so). (The instruction may also
* invalidate mappings associated with other VPIDs).
*
* @see Vol3C[4.10(Caching Translation Information)]
*/
InvvpidSingleContextRetainingGlobals = 0x00000003,
} INVVPID_TYPE;
typedef struct
{
UINT64 EptPointer;
/**
* Must be zero.
*/
UINT64 Reserved;
} INVEPT_DESCRIPTOR;
typedef struct
{
UINT16 Vpid;
/**
* Must be zero.
*/
UINT16 Reserved1;
/**
* Must be zero.
*/
UINT32 Reserved2;
UINT64 LinearAddress;
} INVVPID_DESCRIPTOR;
/**
* @brief Hypervisor-Managed linear-Address Translation Pointer (HLATP)
*
* The hypervisor-managed linear-address translation pointer (HLAT pointer or HLATP) is used by HLAT
* paging to locate and access the first paging structure used for linear-address translation.
*
* @see Vol3A[4.5(4-LEVEL PAGING AND 5-LEVEL PAGING)]
*/
typedef union
{
struct
{
UINT64 Reserved1 : 3;
/**
* [Bit 3] Page-level write-through; indirectly determines the memory type used to
* access the first HLAT paging structure during linear-address translation.
*/
UINT64 PageLevelWriteThrough : 1;
#define HLAT_POINTER_PAGE_LEVEL_WRITE_THROUGH_BIT 3
#define HLAT_POINTER_PAGE_LEVEL_WRITE_THROUGH_FLAG 0x08
#define HLAT_POINTER_PAGE_LEVEL_WRITE_THROUGH_MASK 0x01
#define HLAT_POINTER_PAGE_LEVEL_WRITE_THROUGH(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] Page-level cache disable; indirectly determines the memory type used to
* access the first HLAT paging structure during linear-address translation.
*/
UINT64 PageLevelCacheDisable : 1;
#define HLAT_POINTER_PAGE_LEVEL_CACHE_DISABLE_BIT 4
#define HLAT_POINTER_PAGE_LEVEL_CACHE_DISABLE_FLAG 0x10
#define HLAT_POINTER_PAGE_LEVEL_CACHE_DISABLE_MASK 0x01
#define HLAT_POINTER_PAGE_LEVEL_CACHE_DISABLE(_) (((_) >> 4) & 0x01)
UINT64 Reserved2 : 7;
/**
* [Bits 47:12] Guest-physical address (4KB-aligned) of the first HLAT paging
* structure during linear-address translation)
*/
UINT64 PageFrameNumber : 36;
#define HLAT_POINTER_PAGE_FRAME_NUMBER_BIT 12
#define HLAT_POINTER_PAGE_FRAME_NUMBER_FLAG 0xFFFFFFFFF000
#define HLAT_POINTER_PAGE_FRAME_NUMBER_MASK 0xFFFFFFFFF
#define HLAT_POINTER_PAGE_FRAME_NUMBER(_) (((_) >> 12) & 0xFFFFFFFFF)
UINT64 Reserved3 : 16;
};
UINT64 AsUInt;
} HLAT_POINTER;
/**
* @brief Format of the VMCS Region
*
* A logical processor uses virtual-machine control data structures (VMCSs) while it is in VMX
* operation. These manage transitions into and out of VMX non-root operation (VM entries and VM
* exits) as well as processor behavior in VMX non-root operation. This structure is manipulated by
* the new instructions VMCLEAR, VMPTRLD, VMREAD, and VMWRITE. A VMCS region comprises up to
* 4-KBytes. The exact size is implementation specific and can be determined by consulting the VMX
* capability MSR IA32_VMX_BASIC.
*
* @see Vol3C[24.2(FORMAT OF THE VMCS REGION)] (reference)
*/
typedef struct
{
struct
{
/**
* @brief VMCS revision identifier
*
* [Bits 30:0] Processors that maintain VMCS data in different formats (see below)
* use different VMCS revision identifiers. These identifiers enable software to
* avoid using a VMCS region formatted for one processor on a processor that uses a
* different format.
* Software should write the VMCS revision identifier to the VMCS region before
* using that region for a VMCS. The VMCS revision identifier is never written by
* the processor; VMPTRLD fails if its operand references a VMCS region whose VMCS
* revision identifier differs from that used by the processor.
* Software can discover the VMCS revision identifier that a processor uses by
* reading the VMX capability MSR IA32_VMX_BASIC.
*
* @see Vol3C[24.6.2(Processor-Based VM-Execution Controls)]
* @see Vol3D[A.1(BASIC VMX INFORMATION)]
*/
UINT32 RevisionId : 31;
/**
* @brief Shadow-VMCS indicator
*
* [Bit 31] Software should clear or set the shadow-VMCS indicator depending on
* whether the VMCS is to be an ordinary VMCS or a shadow VMCS. VMPTRLD fails if the
* shadow-VMCS indicator is set and the processor does not support the 1-setting of
* the "VMCS shadowing" VM-execution control. Software can discover support for this
* setting by reading the VMX capability MSR IA32_VMX_PROCBASED_CTLS2.
*
* @see Vol3C[24.10(VMCS TYPES ORDINARY AND SHADOW)]
*/
UINT32 ShadowVmcsIndicator : 1;
};
/**
* @brief VMX-abort indicator
*
* The contents of these bits do not control processor operation in any way. A logical
* processor writes a non-zero value into these bits if a VMX abort occurs. Software may
* also write into this field.
*
* @see Vol3D[27.7(VMX Aborts)]
*/
UINT32 AbortIndicator;
/**
* @brief VMCS data (implementation-specific format)
*
* These parts of the VMCS control VMX non-root operation and the VMX transitions.
* The format of these data is implementation-specific. To ensure proper behavior in VMX
* operation, software should maintain the VMCS region and related structures in writeback
* cacheable memory. Future implementations may allow or require a different memory type.
* Software should consult the VMX capability MSR IA32_VMX_BASIC.
*
* @see Vol3C[24.11.4(Software Access to Related Structures)]
* @see Vol3D[A.1(BASIC VMX INFORMATION)]
*/
UINT8 Data[4088];
} VMCS;
/**
* @brief Format of the VMXON Region
*
* Before executing VMXON, software allocates a region of memory that the logical processor uses to
* support VMX operation. This region is called the VMXON region. A VMXON region comprises up to
* 4-KBytes. The exact size is implementation specific and can be determined by consulting the VMX
* capability MSR IA32_VMX_BASIC.
*
* @see Vol3C[24.11.5(VMXON Region)] (reference)
*/
typedef struct
{
struct
{
/**
* @brief VMCS revision identifier
*
* [Bits 30:0] Before executing VMXON, software should write the VMCS revision
* identifier to the VMXON region. (Specifically, it should write the 31-bit VMCS
* revision identifier to bits 30:0 of the first 4 bytes of the VMXON region; bit 31
* should be cleared to 0.)
*
* @see VMCS
* @see Vol3C[24.2(FORMAT OF THE VMCS REGION)]
* @see Vol3C[24.11.5(VMXON Region)]
*/
UINT32 RevisionId : 31;
/**
* [Bit 31] Bit 31 is always 0.
*/
UINT32 MustBeZero : 1;
};
/**
* @brief VMXON data (implementation-specific format)
*
* The format of these data is implementation-specific. To ensure proper behavior in VMX
* operation, software should not access or modify the VMXON region of a logical processor
* between execution of VMXON and VMXOFF on that logical processor. Doing otherwise may lead
* to unpredictable behavior.
*
* @see Vol3C[24.11.4(Software Access to Related Structures)]
* @see Vol3D[A.1(BASIC VMX INFORMATION)]
*/
UINT8 Data[4092];
} VMXON;
/**
* @defgroup VMCS_FIELDS \
* VMCS (VM Control Structure)
*
* Every component of the VMCS is encoded by a 32-bit field that can be used by VMREAD and VMWRITE.
* This enumerates all fields in the VMCS and their encodings. Fields are grouped by width (16-bit,
* 32-bit, etc.) and type (guest-state, host-state, etc.).
*
* @see Vol3D[B(APPENDIX B FIELD ENCODING IN VMCS)] (reference)
* @{
*/
typedef union
{
struct
{
/**
* [Bit 0] Access type (0 = full; 1 = high); must be full for 16-bit, 32-bit, and
* natural-width fields.
*/
UINT16 AccessType : 1;
#define VMCS_COMPONENT_ENCODING_ACCESS_TYPE_BIT 0
#define VMCS_COMPONENT_ENCODING_ACCESS_TYPE_FLAG 0x01
#define VMCS_COMPONENT_ENCODING_ACCESS_TYPE_MASK 0x01
#define VMCS_COMPONENT_ENCODING_ACCESS_TYPE(_) (((_) >> 0) & 0x01)
/**
* [Bits 9:1] Index.
*/
UINT16 Index : 9;
#define VMCS_COMPONENT_ENCODING_INDEX_BIT 1
#define VMCS_COMPONENT_ENCODING_INDEX_FLAG 0x3FE
#define VMCS_COMPONENT_ENCODING_INDEX_MASK 0x1FF
#define VMCS_COMPONENT_ENCODING_INDEX(_) (((_) >> 1) & 0x1FF)
/**
* [Bits 11:10] Type:
* 0: control
* 1: VM-exit information
* 2: guest state
* 3: host state
*/
UINT16 Type : 2;
#define VMCS_COMPONENT_ENCODING_TYPE_BIT 10
#define VMCS_COMPONENT_ENCODING_TYPE_FLAG 0xC00
#define VMCS_COMPONENT_ENCODING_TYPE_MASK 0x03
#define VMCS_COMPONENT_ENCODING_TYPE(_) (((_) >> 10) & 0x03)
/**
* [Bit 12] Reserved (must be 0).
*/
UINT16 MustBeZero : 1;
#define VMCS_COMPONENT_ENCODING_MUST_BE_ZERO_BIT 12
#define VMCS_COMPONENT_ENCODING_MUST_BE_ZERO_FLAG 0x1000
#define VMCS_COMPONENT_ENCODING_MUST_BE_ZERO_MASK 0x01
#define VMCS_COMPONENT_ENCODING_MUST_BE_ZERO(_) (((_) >> 12) & 0x01)
/**
* [Bits 14:13] Width:
* 0: 16-bit
* 1: 64-bit
* 2: 32-bit
* 3: natural-width
*/
UINT16 Width : 2;
#define VMCS_COMPONENT_ENCODING_WIDTH_BIT 13
#define VMCS_COMPONENT_ENCODING_WIDTH_FLAG 0x6000
#define VMCS_COMPONENT_ENCODING_WIDTH_MASK 0x03
#define VMCS_COMPONENT_ENCODING_WIDTH(_) (((_) >> 13) & 0x03)
UINT16 Reserved1 : 1;
};
UINT16 AsUInt;
} VMCS_COMPONENT_ENCODING;
/**
* @defgroup VMCS_16_BIT \
* 16-Bit Fields
*
* 16-Bit Fields.
*
* @see Vol3D[B.1(16-BIT FIELDS)] (reference)
* @{
*/
/**
* @defgroup VMCS_16_BIT_CONTROL_FIELDS \
* 16-Bit Control Fields
*
* 16-Bit Control Fields.
* @{
*/
/**
* Virtual-processor identifier (VPID).
*
* @remarks This field exists only on processors that support the 1-setting of the "enable VPID"
* VM-execution control.
*/
#define VMCS_CTRL_VIRTUAL_PROCESSOR_IDENTIFIER 0x00000000
/**
* Posted-interrupt notification vector.
*
* @remarks This field exists only on processors that support the 1-setting of the "process posted
* interrupts" VM-execution control.
*/
#define VMCS_CTRL_POSTED_INTERRUPT_NOTIFICATION_VECTOR 0x00000002
/**
* EPTP index.
*
* @remarks This field exists only on processors that support the 1-setting of the "EPT-violation
* \#VE" VM-execution control.
*/
#define VMCS_CTRL_EPTP_INDEX 0x00000004
/**
* HLAT prefix size.
*
* @remarks This field exists only on processors that support the 1-setting of the "enable HLAT"
* VM-execution control.
*/
#define VMCS_CTRL_HLAT_PREFIX_SIZE 0x00000006
/**
* @}
*/
/**
* @defgroup VMCS_16_BIT_GUEST_STATE_FIELDS \
* 16-Bit Guest-State Fields
*
* 16-Bit Guest-State Fields.
* @{
*/
/**
* Guest ES selector.
*/
#define VMCS_GUEST_ES_SELECTOR 0x00000800
/**
* Guest CS selector.
*/
#define VMCS_GUEST_CS_SELECTOR 0x00000802
/**
* Guest SS selector.
*/
#define VMCS_GUEST_SS_SELECTOR 0x00000804
/**
* Guest DS selector.
*/
#define VMCS_GUEST_DS_SELECTOR 0x00000806
/**
* Guest FS selector.
*/
#define VMCS_GUEST_FS_SELECTOR 0x00000808
/**
* Guest GS selector.
*/
#define VMCS_GUEST_GS_SELECTOR 0x0000080A
/**
* Guest LDTR selector.
*/
#define VMCS_GUEST_LDTR_SELECTOR 0x0000080C
/**
* Guest TR selector.
*/
#define VMCS_GUEST_TR_SELECTOR 0x0000080E
/**
* Guest interrupt status.
*
* @remarks This field exists only on processors that support the 1-setting of the
* "virtual-interrupt delivery" VM-execution control.
*/
#define VMCS_GUEST_INTERRUPT_STATUS 0x00000810
/**
* PML index.
*
* @remarks This field exists only on processors that support the 1-setting of the "enable PML"
* VM-execution control.
*/
#define VMCS_GUEST_PML_INDEX 0x00000812
/**
* @}
*/
/**
* @defgroup VMCS_16_BIT_HOST_STATE_FIELDS \
* 16-Bit Host-State Fields
*
* 16-Bit Host-State Fields.
* @{
*/
/**
* Host ES selector.
*/
#define VMCS_HOST_ES_SELECTOR 0x00000C00
/**
* Host CS selector.
*/
#define VMCS_HOST_CS_SELECTOR 0x00000C02
/**
* Host SS selector.
*/
#define VMCS_HOST_SS_SELECTOR 0x00000C04
/**
* Host DS selector.
*/
#define VMCS_HOST_DS_SELECTOR 0x00000C06
/**
* Host FS selector.
*/
#define VMCS_HOST_FS_SELECTOR 0x00000C08
/**
* Host GS selector.
*/
#define VMCS_HOST_GS_SELECTOR 0x00000C0A
/**
* Host TR selector.
*/
#define VMCS_HOST_TR_SELECTOR 0x00000C0C
/**
* @}
*/
/**
* @}
*/
/**
* @defgroup VMCS_64_BIT \
* 64-Bit Fields
*
* 64-Bit Fields.
*
* @see Vol3D[B.2(64-BIT FIELDS)] (reference)
* @{
*/
/**
* @defgroup VMCS_64_BIT_CONTROL_FIELDS \
* 64-Bit Control Fields
*
* 64-Bit Control Fields.
* @{
*/
/**
* Address of I/O bitmap A.
*/
#define VMCS_CTRL_IO_BITMAP_A_ADDRESS 0x00002000
/**
* Address of I/O bitmap B.
*/
#define VMCS_CTRL_IO_BITMAP_B_ADDRESS 0x00002002
/**
* Address of MSR bitmaps.
*/
#define VMCS_CTRL_MSR_BITMAP_ADDRESS 0x00002004
/**
* VM-exit MSR-store address.
*/
#define VMCS_CTRL_VMEXIT_MSR_STORE_ADDRESS 0x00002006
/**
* VM-exit MSR-load address.
*/
#define VMCS_CTRL_VMEXIT_MSR_LOAD_ADDRESS 0x00002008
/**
* VM-entry MSR-load address.
*/
#define VMCS_CTRL_VMENTRY_MSR_LOAD_ADDRESS 0x0000200A
/**
* Executive-VMCS pointer.
*/
#define VMCS_CTRL_EXECUTIVE_VMCS_POINTER 0x0000200C
/**
* PML address.
*/
#define VMCS_CTRL_PML_ADDRESS 0x0000200E
/**
* TSC offset.
*/
#define VMCS_CTRL_TSC_OFFSET 0x00002010
/**
* Virtual-APIC address.
*/
#define VMCS_CTRL_VIRTUAL_APIC_ADDRESS 0x00002012
/**
* APIC-access address.
*/
#define VMCS_CTRL_APIC_ACCESS_ADDRESS 0x00002014
/**
* Posted-interrupt descriptor address
*/
#define VMCS_CTRL_POSTED_INTERRUPT_DESCRIPTOR_ADDRESS 0x00002016
/**
* VM-function controls.
*/
#define VMCS_CTRL_VMFUNC_CONTROLS 0x00002018
/**
* EPT pointer.
*/
#define VMCS_CTRL_EPT_POINTER 0x0000201A
/**
* EOI-exit bitmap 0.
*/
#define VMCS_CTRL_EOI_EXIT_BITMAP_0 0x0000201C
/**
* EOI-exit bitmap 1.
*/
#define VMCS_CTRL_EOI_EXIT_BITMAP_1 0x0000201E
/**
* EOI-exit bitmap 2.
*/
#define VMCS_CTRL_EOI_EXIT_BITMAP_2 0x00002020
/**
* EOI-exit bitmap 3.
*/
#define VMCS_CTRL_EOI_EXIT_BITMAP_3 0x00002022
/**
* EPTP-list address.
*/
#define VMCS_CTRL_EPT_POINTER_LIST_ADDRESS 0x00002024
/**
* VMREAD-bitmap address.
*/
#define VMCS_CTRL_VMREAD_BITMAP_ADDRESS 0x00002026
/**
* VMWRITE-bitmap address.
*/
#define VMCS_CTRL_VMWRITE_BITMAP_ADDRESS 0x00002028
/**
* Virtualization-exception information address.
*/
#define VMCS_CTRL_VIRTUALIZATION_EXCEPTION_INFORMATION_ADDRESS 0x0000202A
/**
* XSS-exiting bitmap.
*/
#define VMCS_CTRL_XSS_EXITING_BITMAP 0x0000202C
/**
* ENCLS-exiting bitmap.
*/
#define VMCS_CTRL_ENCLS_EXITING_BITMAP 0x0000202E
/**
* Sub-page-permission-table pointer.
*/
#define VMCS_CTRL_SUB_PAGE_PERMISSION_TABLE_POINTER 0x00002030
/**
* TSC multiplier.
*/
#define VMCS_CTRL_TSC_MULTIPLIER 0x00002032
/**
* Tertiary processor-based VM-execution controls.
*/
#define VMCS_CTRL_TERTIARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS 0x00002034
/**
* ENCLV-exiting bitmap.
*/
#define VMCS_CTRL_ENCLV_EXITING_BITMAP 0x00002036
/**
* Hypervisor-managed linear-address translation pointer.
*/
#define VMCS_CTRL_HLAT_POINTER 0x00002040
/**
* Secondary VM-exit controls.
*/
#define VMCS_CTRL_SECONDARY_VMEXIT_CONTROLS 0x00002044
/**
* @}
*/
/**
* @defgroup VMCS_64_BIT_READ_ONLY_DATA_FIELDS \
* 64-Bit Read-Only Data Field
*
* 64-Bit Read-Only Data Field.
* @{
*/
/**
* Guest-physical address.
*/
#define VMCS_GUEST_PHYSICAL_ADDRESS 0x00002400
/**
* @}
*/
/**
* @defgroup VMCS_64_BIT_GUEST_STATE_FIELDS \
* 64-Bit Guest-State Fields
*
* 64-Bit Guest-State Fields.
* @{
*/
/**
* VMCS link pointer.
*/
#define VMCS_GUEST_VMCS_LINK_POINTER 0x00002800
/**
* Guest IA32_DEBUGCTL.
*/
#define VMCS_GUEST_DEBUGCTL 0x00002802
/**
* Guest IA32_PAT.
*/
#define VMCS_GUEST_PAT 0x00002804
/**
* Guest IA32_EFER.
*/
#define VMCS_GUEST_EFER 0x00002806
/**
* Guest IA32_PERF_GLOBAL_CTRL.
*/
#define VMCS_GUEST_PERF_GLOBAL_CTRL 0x00002808
/**
* Guest PDPTE0.
*/
#define VMCS_GUEST_PDPTE0 0x0000280A
/**
* Guest PDPTE1.
*/
#define VMCS_GUEST_PDPTE1 0x0000280C
/**
* Guest PDPTE2.
*/
#define VMCS_GUEST_PDPTE2 0x0000280E
/**
* Guest PDPTE3.
*/
#define VMCS_GUEST_PDPTE3 0x00002810
/**
* Guest IA32_BNDCFGS.
*/
#define VMCS_GUEST_BNDCFGS 0x00002812
/**
* Guest IA32_RTIT_CTL.
*/
#define VMCS_GUEST_RTIT_CTL 0x00002814
/**
* Guest IA32_LBR_CTL.
*/
#define VMCS_GUEST_LBR_CTL 0x00002816
/**
* Guest IA32_PKRS
*/
#define VMCS_GUEST_PKRS 0x00002818
/**
* @}
*/
/**
* @defgroup VMCS_64_BIT_HOST_STATE_FIELDS \
* 64-Bit Host-State Fields
*
* 64-Bit Host-State Fields.
* @{
*/
/**
* Host IA32_PAT.
*/
#define VMCS_HOST_PAT 0x00002C00
/**
* Host IA32_EFER.
*/
#define VMCS_HOST_EFER 0x00002C02
/**
* Host IA32_PERF_GLOBAL_CTRL.
*/
#define VMCS_HOST_PERF_GLOBAL_CTRL 0x00002C04
/**
* Host IA32_PKRS
*/
#define VMCS_HOST_PKRS 0x00002C06
/**
* @}
*/
/**
* @}
*/
/**
* @defgroup VMCS_32_BIT \
* 32-Bit Fields
*
* 32-Bit Fields.
*
* @see Vol3D[B.3(32-BIT FIELDS)] (reference)
* @{
*/
/**
* @defgroup VMCS_32_BIT_CONTROL_FIELDS \
* 32-Bit Control Fields
*
* 32-Bit Control Fields.
* @{
*/
/**
* Pin-based VM-execution controls.
*/
#define VMCS_CTRL_PIN_BASED_VM_EXECUTION_CONTROLS 0x00004000
/**
* Primary processor-based VM-execution controls.
*/
#define VMCS_CTRL_PROCESSOR_BASED_VM_EXECUTION_CONTROLS 0x00004002
/**
* Exception bitmap.
*/
#define VMCS_CTRL_EXCEPTION_BITMAP 0x00004004
/**
* Page-fault error-code mask.
*/
#define VMCS_CTRL_PAGEFAULT_ERROR_CODE_MASK 0x00004006
/**
* Page-fault error-code match.
*/
#define VMCS_CTRL_PAGEFAULT_ERROR_CODE_MATCH 0x00004008
/**
* CR3-target count.
*/
#define VMCS_CTRL_CR3_TARGET_COUNT 0x0000400A
/**
* Primary VM-exit controls.
*/
#define VMCS_CTRL_PRIMARY_VMEXIT_CONTROLS 0x0000400C
/**
* VM-exit MSR-store count.
*/
#define VMCS_CTRL_VMEXIT_MSR_STORE_COUNT 0x0000400E
/**
* VM-exit MSR-load count.
*/
#define VMCS_CTRL_VMEXIT_MSR_LOAD_COUNT 0x00004010
/**
* VM-entry controls.
*/
#define VMCS_CTRL_VMENTRY_CONTROLS 0x00004012
/**
* VM-entry MSR-load count.
*/
#define VMCS_CTRL_VMENTRY_MSR_LOAD_COUNT 0x00004014
/**
* VM-entry interruption-information field.
*/
#define VMCS_CTRL_VMENTRY_INTERRUPTION_INFORMATION_FIELD 0x00004016
/**
* VM-entry exception error code.
*/
#define VMCS_CTRL_VMENTRY_EXCEPTION_ERROR_CODE 0x00004018
/**
* VM-entry instruction length.
*/
#define VMCS_CTRL_VMENTRY_INSTRUCTION_LENGTH 0x0000401A
/**
* TPR threshold.
*/
#define VMCS_CTRL_TPR_THRESHOLD 0x0000401C
/**
* Secondary processor-based VM-execution controls.
*/
#define VMCS_CTRL_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS 0x0000401E
/**
* PLE_Gap.
*/
#define VMCS_CTRL_PLE_GAP 0x00004020
/**
* PLE_Window.
*/
#define VMCS_CTRL_PLE_WINDOW 0x00004022
/**
* @}
*/
/**
* @defgroup VMCS_32_BIT_READ_ONLY_DATA_FIELDS \
* 32-Bit Read-Only Data Fields
*
* 32-Bit Read-Only Data Fields.
* @{
*/
/**
* VM-instruction error.
*/
#define VMCS_VM_INSTRUCTION_ERROR 0x00004400
/**
* Exit reason.
*/
#define VMCS_EXIT_REASON 0x00004402
/**
* VM-exit interruption information.
*/
#define VMCS_VMEXIT_INTERRUPTION_INFORMATION 0x00004404
/**
* VM-exit interruption error code.
*/
#define VMCS_VMEXIT_INTERRUPTION_ERROR_CODE 0x00004406
/**
* IDT-vectoring information field.
*/
#define VMCS_IDT_VECTORING_INFORMATION 0x00004408
/**
* IDT-vectoring error code.
*/
#define VMCS_IDT_VECTORING_ERROR_CODE 0x0000440A
/**
* VM-exit instruction length.
*/
#define VMCS_VMEXIT_INSTRUCTION_LENGTH 0x0000440C
/**
* VM-exit instruction information.
*/
#define VMCS_VMEXIT_INSTRUCTION_INFO 0x0000440E
/**
* @}
*/
/**
* @defgroup VMCS_32_BIT_GUEST_STATE_FIELDS \
* 32-Bit Guest-State Fields
*
* 32-Bit Guest-State Fields.
* @{
*/
/**
* Guest ES limit.
*/
#define VMCS_GUEST_ES_LIMIT 0x00004800
/**
* Guest CS limit.
*/
#define VMCS_GUEST_CS_LIMIT 0x00004802
/**
* Guest SS limit.
*/
#define VMCS_GUEST_SS_LIMIT 0x00004804
/**
* Guest DS limit.
*/
#define VMCS_GUEST_DS_LIMIT 0x00004806
/**
* Guest FS limit.
*/
#define VMCS_GUEST_FS_LIMIT 0x00004808
/**
* Guest GS limit.
*/
#define VMCS_GUEST_GS_LIMIT 0x0000480A
/**
* Guest LDTR limit.
*/
#define VMCS_GUEST_LDTR_LIMIT 0x0000480C
/**
* Guest TR limit.
*/
#define VMCS_GUEST_TR_LIMIT 0x0000480E
/**
* Guest GDTR limit.
*/
#define VMCS_GUEST_GDTR_LIMIT 0x00004810
/**
* Guest IDTR limit.
*/
#define VMCS_GUEST_IDTR_LIMIT 0x00004812
/**
* Guest ES access rights.
*/
#define VMCS_GUEST_ES_ACCESS_RIGHTS 0x00004814
/**
* Guest CS access rights.
*/
#define VMCS_GUEST_CS_ACCESS_RIGHTS 0x00004816
/**
* Guest SS access rights.
*/
#define VMCS_GUEST_SS_ACCESS_RIGHTS 0x00004818
/**
* Guest DS access rights.
*/
#define VMCS_GUEST_DS_ACCESS_RIGHTS 0x0000481A
/**
* Guest FS access rights.
*/
#define VMCS_GUEST_FS_ACCESS_RIGHTS 0x0000481C
/**
* Guest GS access rights.
*/
#define VMCS_GUEST_GS_ACCESS_RIGHTS 0x0000481E
/**
* Guest LDTR access rights.
*/
#define VMCS_GUEST_LDTR_ACCESS_RIGHTS 0x00004820
/**
* Guest TR access rights.
*/
#define VMCS_GUEST_TR_ACCESS_RIGHTS 0x00004822
/**
* Guest interruptibility state.
*/
#define VMCS_GUEST_INTERRUPTIBILITY_STATE 0x00004824
/**
* Guest activity state.
*/
#define VMCS_GUEST_ACTIVITY_STATE 0x00004826
/**
* Guest SMBASE.
*/
#define VMCS_GUEST_SMBASE 0x00004828
/**
* Guest IA32_SYSENTER_CS.
*/
#define VMCS_GUEST_SYSENTER_CS 0x0000482A
/**
* VMX-preemption timer value.
*/
#define VMCS_GUEST_VMX_PREEMPTION_TIMER_VALUE 0x0000482E
/**
* @}
*/
/**
* @defgroup VMCS_32_BIT_HOST_STATE_FIELDS \
* 32-Bit Host-State Field
*
* 32-Bit Host-State Field.
* @{
*/
/**
* Host IA32_SYSENTER_CS.
*/
#define VMCS_HOST_SYSENTER_CS 0x00004C00
/**
* @}
*/
/**
* @}
*/
/**
* @defgroup VMCS_NATURAL_WIDTH \
* Natural-Width Fields
*
* Natural-Width Fields.
*
* @see Vol3D[B.4(NATURAL-WIDTH FIELDS)] (reference)
* @{
*/
/**
* @defgroup VMCS_NATURAL_WIDTH_CONTROL_FIELDS \
* Natural-Width Control Fields
*
* Natural-Width Control Fields
* @{
*/
/**
* CR0 guest/host mask.
*/
#define VMCS_CTRL_CR0_GUEST_HOST_MASK 0x00006000
/**
* CR4 guest/host mask.
*/
#define VMCS_CTRL_CR4_GUEST_HOST_MASK 0x00006002
/**
* CR0 read shadow.
*/
#define VMCS_CTRL_CR0_READ_SHADOW 0x00006004
/**
* CR4 read shadow.
*/
#define VMCS_CTRL_CR4_READ_SHADOW 0x00006006
/**
* CR3-target value 0.
*/
#define VMCS_CTRL_CR3_TARGET_VALUE_0 0x00006008
/**
* CR3-target value 1.
*/
#define VMCS_CTRL_CR3_TARGET_VALUE_1 0x0000600A
/**
* CR3-target value 2.
*/
#define VMCS_CTRL_CR3_TARGET_VALUE_2 0x0000600C
/**
* CR3-target value 3.
*/
#define VMCS_CTRL_CR3_TARGET_VALUE_3 0x0000600E
/**
* @}
*/
/**
* @defgroup VMCS_NATURAL_WIDTH_READ_ONLY_DATA_FIELDS \
* Natural-Width Read-Only Data Fields
*
* Natural-Width Read-Only Data Fields.
* @{
*/
/**
* Exit qualification.
*/
#define VMCS_EXIT_QUALIFICATION 0x00006400
/**
* I/O RCX.
*/
#define VMCS_IO_RCX 0x00006402
/**
* I/O RSI.
*/
#define VMCS_IO_RSI 0x00006404
/**
* I/O RDI.
*/
#define VMCS_IO_RDI 0x00006406
/**
* I/O RIP.
*/
#define VMCS_IO_RIP 0x00006408
/**
* Guest-linear address.
*/
#define VMCS_EXIT_GUEST_LINEAR_ADDRESS 0x0000640A
/**
* @}
*/
/**
* @defgroup VMCS_NATURAL_WIDTH_GUEST_STATE_FIELDS \
* Natural-Width Guest-State Fields
*
* Natural-Width Guest-State Fields.
* @{
*/
/**
* Guest CR0.
*/
#define VMCS_GUEST_CR0 0x00006800
/**
* Guest CR3.
*/
#define VMCS_GUEST_CR3 0x00006802
/**
* Guest CR4.
*/
#define VMCS_GUEST_CR4 0x00006804
/**
* Guest ES base.
*/
#define VMCS_GUEST_ES_BASE 0x00006806
/**
* Guest CS base.
*/
#define VMCS_GUEST_CS_BASE 0x00006808
/**
* Guest SS base.
*/
#define VMCS_GUEST_SS_BASE 0x0000680A
/**
* Guest DS base.
*/
#define VMCS_GUEST_DS_BASE 0x0000680C
/**
* Guest FS base.
*/
#define VMCS_GUEST_FS_BASE 0x0000680E
/**
* Guest GS base.
*/
#define VMCS_GUEST_GS_BASE 0x00006810
/**
* Guest LDTR base.
*/
#define VMCS_GUEST_LDTR_BASE 0x00006812
/**
* Guest TR base.
*/
#define VMCS_GUEST_TR_BASE 0x00006814
/**
* Guest GDTR base.
*/
#define VMCS_GUEST_GDTR_BASE 0x00006816
/**
* Guest IDTR base.
*/
#define VMCS_GUEST_IDTR_BASE 0x00006818
/**
* Guest DR7.
*/
#define VMCS_GUEST_DR7 0x0000681A
/**
* Guest RSP.
*/
#define VMCS_GUEST_RSP 0x0000681C
/**
* Guest RIP.
*/
#define VMCS_GUEST_RIP 0x0000681E
/**
* Guest RFLAGS.
*/
#define VMCS_GUEST_RFLAGS 0x00006820
/**
* Guest pending debug exceptions.
*/
#define VMCS_GUEST_PENDING_DEBUG_EXCEPTIONS 0x00006822
/**
* Guest IA32_SYSENTER_ESP.
*/
#define VMCS_GUEST_SYSENTER_ESP 0x00006824
/**
* Guest IA32_SYSENTER_EIP.
*/
#define VMCS_GUEST_SYSENTER_EIP 0x00006826
/**
* Guest IA32_S_CET.
*/
#define VMCS_GUEST_S_CET 0x00006C28
/**
* Guest SSP.
*/
#define VMCS_GUEST_SSP 0x00006C2A
/**
* Guest IA32_INTERRUPT_SSP_TABLE_ADDR.
*/
#define VMCS_GUEST_INTERRUPT_SSP_TABLE_ADDR 0x00006C2C
/**
* @}
*/
/**
* @defgroup VMCS_NATURAL_WIDTH_HOST_STATE_FIELDS \
* Natural-Width Host-State Fields
*
* Natural-Width Host-State Fields.
* @{
*/
/**
* Host CR0.
*/
#define VMCS_HOST_CR0 0x00006C00
/**
* Host CR3.
*/
#define VMCS_HOST_CR3 0x00006C02
/**
* Host CR4.
*/
#define VMCS_HOST_CR4 0x00006C04
/**
* Host FS base.
*/
#define VMCS_HOST_FS_BASE 0x00006C06
/**
* Host GS base.
*/
#define VMCS_HOST_GS_BASE 0x00006C08
/**
* Host TR base.
*/
#define VMCS_HOST_TR_BASE 0x00006C0A
/**
* Host GDTR base.
*/
#define VMCS_HOST_GDTR_BASE 0x00006C0C
/**
* Host IDTR base.
*/
#define VMCS_HOST_IDTR_BASE 0x00006C0E
/**
* Host IA32_SYSENTER_ESP.
*/
#define VMCS_HOST_SYSENTER_ESP 0x00006C10
/**
* Host IA32_SYSENTER_EIP.
*/
#define VMCS_HOST_SYSENTER_EIP 0x00006C12
/**
* Host RSP.
*/
#define VMCS_HOST_RSP 0x00006C14
/**
* Host RIP.
*/
#define VMCS_HOST_RIP 0x00006C16
/**
* Host IA32_S_CET.
*/
#define VMCS_HOST_S_CET 0x00006C18
/**
* Host SSP.
*/
#define VMCS_HOST_SSP 0x00006C1A
/**
* Host IA32_INTERRUPT_SSP_TABLE_ADDR.
*/
#define VMCS_HOST_INTERRUPT_SSP_TABLE_ADDR 0x00006C1C
/**
* @}
*/
/**
* @}
*/
/**
* @}
*/
/**
* @brief Valid interruption types
*/
typedef enum
{
/**
* External interrupt.
*/
ExternalInterrupt = 0x00000000,
/**
* Non-maskable interrupt (NMI).
*/
NonMaskableInterrupt = 0x00000002,
/**
* Hardware exception (e.g,. \#PF).
*/
HardwareException = 0x00000003,
/**
* Software interrupt (INT n).
*/
SoftwareInterrupt = 0x00000004,
/**
* Privileged software exception (INT1).
*/
PrivilegedSoftwareException = 0x00000005,
/**
* Software exception (INT3 or INTO).
*/
SoftwareException = 0x00000006,
/**
* Other event. This type is used for injection of events that are not delivered through the
* IDT.
*/
OtherEvent = 0x00000007,
} INTERRUPTION_TYPE;
/**
* @brief VM entry can be configured to conclude by delivering an event through the IDT (after all
* guest state and MSRs have been loaded). This process is called event injection and is controlled
* by these VM-entry control fields
*
* @see Vol3A[24.8.3(VM-Entry Controls for Event Injection)] (reference)
*/
typedef union
{
struct
{
/**
* @brief Vector of interrupt or exception
*
* [Bits 7:0] Determines which entry in the IDT is used or which other event is
* injected.
*/
UINT32 Vector : 8;
#define VMENTRY_INTERRUPT_INFORMATION_VECTOR_BIT 0
#define VMENTRY_INTERRUPT_INFORMATION_VECTOR_FLAG 0xFF
#define VMENTRY_INTERRUPT_INFORMATION_VECTOR_MASK 0xFF
#define VMENTRY_INTERRUPT_INFORMATION_VECTOR(_) (((_) >> 0) & 0xFF)
/**
* @brief Interruption type
*
* [Bits 10:8] Determines details of how the injection is performed.
*/
UINT32 InterruptionType : 3;
#define VMENTRY_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_BIT 8
#define VMENTRY_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_FLAG 0x700
#define VMENTRY_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_MASK 0x07
#define VMENTRY_INTERRUPT_INFORMATION_INTERRUPTION_TYPE(_) (((_) >> 8) & 0x07)
/**
* @brief Deliver error code (0 = do not deliver; 1 = deliver)
*
* [Bit 11] Determines whether delivery pushes an error code on the guest stack.
*/
UINT32 DeliverErrorCode : 1;
#define VMENTRY_INTERRUPT_INFORMATION_DELIVER_ERROR_CODE_BIT 11
#define VMENTRY_INTERRUPT_INFORMATION_DELIVER_ERROR_CODE_FLAG 0x800
#define VMENTRY_INTERRUPT_INFORMATION_DELIVER_ERROR_CODE_MASK 0x01
#define VMENTRY_INTERRUPT_INFORMATION_DELIVER_ERROR_CODE(_) (((_) >> 11) & 0x01)
UINT32 Reserved1 : 19;
/**
* @brief Valid
*
* [Bit 31] VM entry injects an event if and only if the valid bit is 1. The valid
* bit in this field is cleared on every VM exit.
*/
UINT32 Valid : 1;
#define VMENTRY_INTERRUPT_INFORMATION_VALID_BIT 31
#define VMENTRY_INTERRUPT_INFORMATION_VALID_FLAG 0x80000000
#define VMENTRY_INTERRUPT_INFORMATION_VALID_MASK 0x01
#define VMENTRY_INTERRUPT_INFORMATION_VALID(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} VMENTRY_INTERRUPT_INFORMATION;
/**
* @brief VM entry can be configured to conclude by delivering an event through the IDT (after all
* guest state and MSRs have been loaded). This process is called event injection and is controlled
* by these VM-entry control fields
*
* @see Vol3A[24.9.2(Information for VM Exits Due to Vectored Events)] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 7:0] Vector of interrupt or exception.
*/
UINT32 Vector : 8;
#define VMEXIT_INTERRUPT_INFORMATION_VECTOR_BIT 0
#define VMEXIT_INTERRUPT_INFORMATION_VECTOR_FLAG 0xFF
#define VMEXIT_INTERRUPT_INFORMATION_VECTOR_MASK 0xFF
#define VMEXIT_INTERRUPT_INFORMATION_VECTOR(_) (((_) >> 0) & 0xFF)
/**
* [Bits 10:8] Interruption type.
*/
UINT32 InterruptionType : 3;
#define VMEXIT_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_BIT 8
#define VMEXIT_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_FLAG 0x700
#define VMEXIT_INTERRUPT_INFORMATION_INTERRUPTION_TYPE_MASK 0x07
#define VMEXIT_INTERRUPT_INFORMATION_INTERRUPTION_TYPE(_) (((_) >> 8) & 0x07)
/**
* [Bit 11] Deliver error code (0 = do not deliver; 1 = deliver).
*/
UINT32 ErrorCodeValid : 1;
#define VMEXIT_INTERRUPT_INFORMATION_ERROR_CODE_VALID_BIT 11
#define VMEXIT_INTERRUPT_INFORMATION_ERROR_CODE_VALID_FLAG 0x800
#define VMEXIT_INTERRUPT_INFORMATION_ERROR_CODE_VALID_MASK 0x01
#define VMEXIT_INTERRUPT_INFORMATION_ERROR_CODE_VALID(_) (((_) >> 11) & 0x01)
/**
* [Bit 12] NMI unblocking due to IRET.
*/
UINT32 NmiUnblocking : 1;
#define VMEXIT_INTERRUPT_INFORMATION_NMI_UNBLOCKING_BIT 12
#define VMEXIT_INTERRUPT_INFORMATION_NMI_UNBLOCKING_FLAG 0x1000
#define VMEXIT_INTERRUPT_INFORMATION_NMI_UNBLOCKING_MASK 0x01
#define VMEXIT_INTERRUPT_INFORMATION_NMI_UNBLOCKING(_) (((_) >> 12) & 0x01)
UINT32 Reserved1 : 18;
/**
* [Bit 31] Valid.
*/
UINT32 Valid : 1;
#define VMEXIT_INTERRUPT_INFORMATION_VALID_BIT 31
#define VMEXIT_INTERRUPT_INFORMATION_VALID_FLAG 0x80000000
#define VMEXIT_INTERRUPT_INFORMATION_VALID_MASK 0x01
#define VMEXIT_INTERRUPT_INFORMATION_VALID(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} VMEXIT_INTERRUPT_INFORMATION;
/**
* @}
*/
/**
* @defgroup APIC \
* Advanced Programmable Interrupt Controller (APIC)
*
* Software interacts with the local APIC by reading and writing its registers. APIC registers are
* memory-mapped to a 4-KByte region of the processor's physical address space with an initial
* starting address of FEE00000H. For correct APIC operation, this address space must be mapped to
* an area of memory that has been designated as strong uncacheable (UC).
*
* @remarks Registers are 32 bits, 64 bits, or 256 bits in width; all are aligned on 128-bit
* boundaries. All 32-bit registers should be accessed using 128-bit aligned 32-bit loads or stores.
* Some processors may support loads and stores of less than 32 bits to some of the APIC registers.
* This is model specific behavior and is not guaranteed to work on all processors. Any FP/MMX/SSE
* access to an APIC register, or any access that touches bytes 4 through 15 of an APIC register may
* cause undefined behavior and must not be executed. This undefined behavior could include hangs,
* incorrect results or unexpected exceptions, including machine checks, and may vary between
* implementations. Wider registers (64-bit or 256-bit) must be accessed using multiple 32-bit loads
* or stores, with all accesses being 128-bit aligned.
* @see Vol3A[10.4.1(The Local APIC Block Diagram)] (reference)
* @{
*/
/**
* Local APIC Base Address.
*
* @remarks Reserved.
*/
#define APIC_BASE_ADDRESS 0xFEE00000
/**
* Local APIC ID Register.
*/
#define APIC_ID 0x00000020
/**
* Local APIC Version Register.
*/
#define APIC_VERSION 0x00000030
/**
* Task Priority Register (TPR).
*/
#define APIC_TASK_PRIORITY 0x00000080
/**
* Arbitration Priority Register (APR).
*/
#define APIC_ARBITRATION_PRIORITY 0x00000090
/**
* Processor Priority Register (PPR).
*/
#define APIC_PROCESSOR_PRIORITY 0x000000A0
/**
* EOI Register.
*/
#define APIC_EOI 0x000000B0
/**
* Remote Read Register (RRD).
*/
#define APIC_REMOTE_READ 0x000000C0
/**
* Logical Destination Register.
*/
#define APIC_LOGICAL_DESTINATION 0x000000D0
/**
* Destination Format Register.
*
* @see Vol3A[10.6.2.2(Logical Destination Mode)]
*/
#define APIC_DESTINATION_FORMAT 0x000000E0
/**
* Spurious Interrupt Vector Register.
*
* @see Vol3A[10.9(SPURIOUS INTERRUPT)]
*/
#define APIC_SPURIOUS_INTERRUPT_VECTOR 0x000000F0
/**
* In-Service Register (ISR); bits 31:0.
*/
#define APIC_IN_SERVICE_BITS_31_0 0x00000100
/**
* In-Service Register (ISR); bits 63:32.
*/
#define APIC_IN_SERVICE_BITS_63_32 0x00000110
/**
* In-Service Register (ISR); bits 95:64.
*/
#define APIC_IN_SERVICE_BITS_95_64 0x00000120
/**
* In-Service Register (ISR); bits 127:96.
*/
#define APIC_IN_SERVICE_BITS_127_96 0x00000130
/**
* In-Service Register (ISR); bits 159:128.
*/
#define APIC_IN_SERVICE_BITS_159_128 0x00000140
/**
* In-Service Register (ISR); bits 191:160.
*/
#define APIC_IN_SERVICE_BITS_191_160 0x00000150
/**
* In-Service Register (ISR); bits 223:192.
*/
#define APIC_IN_SERVICE_BITS_223_192 0x00000160
/**
* In-Service Register (ISR); bits 255:224.
*/
#define APIC_IN_SERVICE_BITS_255_224 0x00000170
/**
* Trigger Mode Register (TMR); bits 31:0.
*/
#define APIC_TRIGGER_MODE_BITS_31_0 0x00000180
/**
* Trigger Mode Register (TMR); bits 63:32.
*/
#define APIC_TRIGGER_MODE_BITS_63_32 0x00000190
/**
* Trigger Mode Register (TMR); bits 95:64.
*/
#define APIC_TRIGGER_MODE_BITS_95_64 0x000001A0
/**
* Trigger Mode Register (TMR); bits 127:96.
*/
#define APIC_TRIGGER_MODE_BITS_127_96 0x000001B0
/**
* Trigger Mode Register (TMR); bits 159:128.
*/
#define APIC_TRIGGER_MODE_BITS_159_128 0x000001C0
/**
* Trigger Mode Register (TMR); bits 191:160.
*/
#define APIC_TRIGGER_MODE_BITS_191_160 0x000001D0
/**
* Trigger Mode Register (TMR); bits 223:192.
*/
#define APIC_TRIGGER_MODE_BITS_223_192 0x000001E0
/**
* Trigger Mode Register (TMR); bits 255:224.
*/
#define APIC_TRIGGER_MODE_BITS_255_224 0x000001F0
/**
* Interrupt Request Register (IRR); bits 31:0.
*/
#define APIC_INTERRUPT_REQUEST_BITS_31_0 0x00000200
/**
* Interrupt Request Register (IRR); bits 63:32.
*/
#define APIC_INTERRUPT_REQUEST_BITS_63_32 0x00000210
/**
* Interrupt Request Register (IRR); bits 95:64.
*/
#define APIC_INTERRUPT_REQUEST_BITS_95_64 0x00000220
/**
* Interrupt Request Register (IRR); bits 127:96.
*/
#define APIC_INTERRUPT_REQUEST_BITS_127_96 0x00000230
/**
* Interrupt Request Register (IRR); bits 159:128.
*/
#define APIC_INTERRUPT_REQUEST_BITS_159_128 0x00000240
/**
* Interrupt Request Register (IRR); bits 191:160.
*/
#define APIC_INTERRUPT_REQUEST_BITS_191_160 0x00000250
/**
* Interrupt Request Register (IRR); bits 223:192.
*/
#define APIC_INTERRUPT_REQUEST_BITS_223_192 0x00000260
/**
* Interrupt Request Register (IRR); bits 255:224.
*/
#define APIC_INTERRUPT_REQUEST_BITS_255_224 0x00000270
/**
* Error Status Register.
*/
#define APIC_ERROR_STATUS 0x00000280
/**
* LVT Corrected Machine Check Interrupt (CMCI) Register.
*/
#define APIC_LVT_CORRECTED_MACHINE_CHECK_INTERRUPT 0x000002F0
/**
* Interrupt Command Register (ICR); bits 0-31.
*/
#define APIC_INTERRUPT_COMMAND_BITS_0_31 0x00000300
/**
* Interrupt Command Register (ICR); bits 32-63.
*/
#define APIC_INTERRUPT_COMMAND_BITS_32_63 0x00000310
/**
* LVT Timer Register.
*/
#define APIC_LVT_TIMER 0x00000320
/**
* LVT Thermal Sensor Register.
*/
#define APIC_LVT_THERMAL_SENSOR 0x00000330
/**
* LVT Performance Monitoring Counters Register.
*/
#define APIC_LVT_PERFORMANCE_MONITORING_COUNTERS 0x00000340
/**
* LVT LINT0 Register.
*/
#define APIC_LVT_LINT0 0x00000350
/**
* LVT LINT1 Register.
*/
#define APIC_LVT_LINT1 0x00000360
/**
* LVT Error Register.
*/
#define APIC_LVT_ERROR 0x00000370
/**
* Initial Count Register (for Timer).
*/
#define APIC_INITIAL_COUNT 0x00000380
/**
* Current Count Register (for Timer).
*/
#define APIC_CURRENT_COUNT 0x00000390
/**
* Divide Configuration Register (for Timer).
*/
#define APIC_DIVIDE_CONFIGURATION 0x000003E0
/**
* @}
*/
/**
* The 32-bit EFLAGS register contains a group of status flags, a control flag, and a group of
* system flags. The status flags (bits 0, 2, 4, 6, 7, and 11) of the EFLAGS register indicate the
* results of arithmetic instructions, such as the ADD, SUB, MUL, and DIV instructions. The system
* flags and IOPL field in the EFLAGS register control operating-system or executive operations.
*
* @see Vol1[3.4.3(EFLAGS)] (reference)
*/
typedef union
{
struct
{
/**
* @brief Carry flag
*
* [Bit 0] Set if an arithmetic operation generates a carry or a borrow out of the
* mostsignificant bit of the result; cleared otherwise. This flag indicates an
* overflow condition for unsigned-integer arithmetic. It is also used in
* multiple-precision arithmetic.
*/
UINT32 CarryFlag : 1;
#define EFLAGS_CARRY_FLAG_BIT 0
#define EFLAGS_CARRY_FLAG_FLAG 0x01
#define EFLAGS_CARRY_FLAG_MASK 0x01
#define EFLAGS_CARRY_FLAG(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Reserved - always 1
*/
UINT32 ReadAs1 : 1;
#define EFLAGS_READ_AS_1_BIT 1
#define EFLAGS_READ_AS_1_FLAG 0x02
#define EFLAGS_READ_AS_1_MASK 0x01
#define EFLAGS_READ_AS_1(_) (((_) >> 1) & 0x01)
/**
* @brief Parity flag
*
* [Bit 2] Set if the least-significant byte of the result contains an even number
* of 1 bits; cleared otherwise.
*/
UINT32 ParityFlag : 1;
#define EFLAGS_PARITY_FLAG_BIT 2
#define EFLAGS_PARITY_FLAG_FLAG 0x04
#define EFLAGS_PARITY_FLAG_MASK 0x01
#define EFLAGS_PARITY_FLAG(_) (((_) >> 2) & 0x01)
UINT32 Reserved1 : 1;
/**
* @brief Auxiliary Carry flag
*
* [Bit 4] Set if an arithmetic operation generates a carry or a borrow out of bit 3
* of the result; cleared otherwise. This flag is used in binary-coded decimal (BCD)
* arithmetic.
*/
UINT32 AuxiliaryCarryFlag : 1;
#define EFLAGS_AUXILIARY_CARRY_FLAG_BIT 4
#define EFLAGS_AUXILIARY_CARRY_FLAG_FLAG 0x10
#define EFLAGS_AUXILIARY_CARRY_FLAG_MASK 0x01
#define EFLAGS_AUXILIARY_CARRY_FLAG(_) (((_) >> 4) & 0x01)
UINT32 Reserved2 : 1;
/**
* @brief Zero flag
*
* [Bit 6] Set if the result is zero; cleared otherwise.
*/
UINT32 ZeroFlag : 1;
#define EFLAGS_ZERO_FLAG_BIT 6
#define EFLAGS_ZERO_FLAG_FLAG 0x40
#define EFLAGS_ZERO_FLAG_MASK 0x01
#define EFLAGS_ZERO_FLAG(_) (((_) >> 6) & 0x01)
/**
* @brief Sign flag
*
* [Bit 7] Set equal to the most-significant bit of the result, which is the sign
* bit of a signed integer. (0 indicates a positive value and 1 indicates a negative
* value.)
*/
UINT32 SignFlag : 1;
#define EFLAGS_SIGN_FLAG_BIT 7
#define EFLAGS_SIGN_FLAG_FLAG 0x80
#define EFLAGS_SIGN_FLAG_MASK 0x01
#define EFLAGS_SIGN_FLAG(_) (((_) >> 7) & 0x01)
/**
* @brief Trap flag
*
* [Bit 8] Set to enable single-step mode for debugging; clear to disable
* single-step mode.
*/
UINT32 TrapFlag : 1;
#define EFLAGS_TRAP_FLAG_BIT 8
#define EFLAGS_TRAP_FLAG_FLAG 0x100
#define EFLAGS_TRAP_FLAG_MASK 0x01
#define EFLAGS_TRAP_FLAG(_) (((_) >> 8) & 0x01)
/**
* @brief Interrupt enable flag
*
* [Bit 9] Controls the response of the processor to maskable interrupt requests.
* Set to respond to maskable interrupts; cleared to inhibit maskable interrupts.
*/
UINT32 InterruptEnableFlag : 1;
#define EFLAGS_INTERRUPT_ENABLE_FLAG_BIT 9
#define EFLAGS_INTERRUPT_ENABLE_FLAG_FLAG 0x200
#define EFLAGS_INTERRUPT_ENABLE_FLAG_MASK 0x01
#define EFLAGS_INTERRUPT_ENABLE_FLAG(_) (((_) >> 9) & 0x01)
/**
* @brief Direction flag
*
* [Bit 10] Controls string instructions (MOVS, CMPS, SCAS, LODS, and STOS). Setting
* the DF flag causes the string instructions to auto-decrement (to process strings
* from high addresses to low addresses). Clearing the DF flag causes the string
* instructions to auto-increment (process strings from low addresses to high
* addresses).
*/
UINT32 DirectionFlag : 1;
#define EFLAGS_DIRECTION_FLAG_BIT 10
#define EFLAGS_DIRECTION_FLAG_FLAG 0x400
#define EFLAGS_DIRECTION_FLAG_MASK 0x01
#define EFLAGS_DIRECTION_FLAG(_) (((_) >> 10) & 0x01)
/**
* @brief Overflow flag
*
* [Bit 11] Set if the integer result is too large a positive number or too small a
* negative number (excluding the sign-bit) to fit in the destination operand;
* cleared otherwise. This flag indicates an overflow condition for signed-integer
* (two's complement) arithmetic.
*/
UINT32 OverflowFlag : 1;
#define EFLAGS_OVERFLOW_FLAG_BIT 11
#define EFLAGS_OVERFLOW_FLAG_FLAG 0x800
#define EFLAGS_OVERFLOW_FLAG_MASK 0x01
#define EFLAGS_OVERFLOW_FLAG(_) (((_) >> 11) & 0x01)
/**
* @brief I/O privilege level field
*
* [Bits 13:12] Indicates the I/O privilege level of the currently running program
* or task. The current privilege level (CPL) of the currently running program or
* task must be less than or equal to the I/O privilege level to access the I/O
* address space. The POPF and IRET instructions can modify this field only when
* operating at a CPL of 0.
*/
UINT32 IoPrivilegeLevel : 2;
#define EFLAGS_IO_PRIVILEGE_LEVEL_BIT 12
#define EFLAGS_IO_PRIVILEGE_LEVEL_FLAG 0x3000
#define EFLAGS_IO_PRIVILEGE_LEVEL_MASK 0x03
#define EFLAGS_IO_PRIVILEGE_LEVEL(_) (((_) >> 12) & 0x03)
/**
* @brief Nested task flag
*
* [Bit 14] Controls the chaining of interrupted and called tasks. Set when the
* current task is linked to the previously executed task; cleared when the current
* task is not linked to another task.
*/
UINT32 NestedTaskFlag : 1;
#define EFLAGS_NESTED_TASK_FLAG_BIT 14
#define EFLAGS_NESTED_TASK_FLAG_FLAG 0x4000
#define EFLAGS_NESTED_TASK_FLAG_MASK 0x01
#define EFLAGS_NESTED_TASK_FLAG(_) (((_) >> 14) & 0x01)
UINT32 Reserved3 : 1;
/**
* @brief Resume flag
*
* [Bit 16] Controls the processor's response to debug exceptions.
*/
UINT32 ResumeFlag : 1;
#define EFLAGS_RESUME_FLAG_BIT 16
#define EFLAGS_RESUME_FLAG_FLAG 0x10000
#define EFLAGS_RESUME_FLAG_MASK 0x01
#define EFLAGS_RESUME_FLAG(_) (((_) >> 16) & 0x01)
/**
* @brief Virtual-8086 mode flag
*
* [Bit 17] Set to enable virtual-8086 mode; clear to return to protected mode
* without virtual-8086 mode semantics.
*/
UINT32 Virtual8086ModeFlag : 1;
#define EFLAGS_VIRTUAL_8086_MODE_FLAG_BIT 17
#define EFLAGS_VIRTUAL_8086_MODE_FLAG_FLAG 0x20000
#define EFLAGS_VIRTUAL_8086_MODE_FLAG_MASK 0x01
#define EFLAGS_VIRTUAL_8086_MODE_FLAG(_) (((_) >> 17) & 0x01)
/**
* @brief Alignment check (or access control) flag
*
* [Bit 18] If the AM bit is set in the CR0 register, alignment checking of
* user-mode data accesses is enabled if and only if this flag is 1. If the SMAP bit
* is set in the CR4 register, explicit supervisor-mode data accesses to user-mode
* pages are allowed if and only if this bit is 1.
*
* @see Vol3A[4.6(ACCESS RIGHTS)]
*/
UINT32 AlignmentCheckFlag : 1;
#define EFLAGS_ALIGNMENT_CHECK_FLAG_BIT 18
#define EFLAGS_ALIGNMENT_CHECK_FLAG_FLAG 0x40000
#define EFLAGS_ALIGNMENT_CHECK_FLAG_MASK 0x01
#define EFLAGS_ALIGNMENT_CHECK_FLAG(_) (((_) >> 18) & 0x01)
/**
* @brief Virtual interrupt flag
*
* [Bit 19] Virtual image of the IF flag. Used in conjunction with the VIP flag. (To
* use this flag and the VIP flag the virtual mode extensions are enabled by setting
* the VME flag in control register CR4.)
*/
UINT32 VirtualInterruptFlag : 1;
#define EFLAGS_VIRTUAL_INTERRUPT_FLAG_BIT 19
#define EFLAGS_VIRTUAL_INTERRUPT_FLAG_FLAG 0x80000
#define EFLAGS_VIRTUAL_INTERRUPT_FLAG_MASK 0x01
#define EFLAGS_VIRTUAL_INTERRUPT_FLAG(_) (((_) >> 19) & 0x01)
/**
* @brief Virtual interrupt pending flag
*
* [Bit 20] Set to indicate that an interrupt is pending; clear when no interrupt is
* pending. (Software sets and clears this flag; the processor only reads it.) Used
* in conjunction with the VIF flag.
*/
UINT32 VirtualInterruptPendingFlag : 1;
#define EFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_BIT 20
#define EFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_FLAG 0x100000
#define EFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_MASK 0x01
#define EFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG(_) (((_) >> 20) & 0x01)
/**
* @brief Identification flag
*
* [Bit 21] The ability of a program to set or clear this flag indicates support for
* the CPUID instruction.
*/
UINT32 IdentificationFlag : 1;
#define EFLAGS_IDENTIFICATION_FLAG_BIT 21
#define EFLAGS_IDENTIFICATION_FLAG_FLAG 0x200000
#define EFLAGS_IDENTIFICATION_FLAG_MASK 0x01
#define EFLAGS_IDENTIFICATION_FLAG(_) (((_) >> 21) & 0x01)
UINT32 Reserved4 : 10;
};
UINT32 AsUInt;
} EFLAGS;
/**
* The 64-bit RFLAGS register contains a group of status flags, a control flag, and a group of
* system flags in 64-bit mode. The upper 32 bits of RFLAGS register is reserved. The lower 32 bits
* of RFLAGS is the same as EFLAGS.
*
* @see EFLAGS
* @see Vol1[3.4.3.4(RFLAGS Register in 64-Bit Mode)] (reference)
*/
// typedef union
//{
// struct
// {
// /**
// * @brief Carry flag
// *
// * [Bit 0] See the description in EFLAGS.
// */
// UINT64 CarryFlag : 1;
// #define RFLAGS_CARRY_FLAG_BIT 0
// #define RFLAGS_CARRY_FLAG_FLAG 0x01
// #define RFLAGS_CARRY_FLAG_MASK 0x01
// #define RFLAGS_CARRY_FLAG(_) (((_) >> 0) & 0x01)
//
// /**
// * [Bit 1] Reserved - always 1
// */
// UINT64 ReadAs1 : 1;
// #define RFLAGS_READ_AS_1_BIT 1
// #define RFLAGS_READ_AS_1_FLAG 0x02
// #define RFLAGS_READ_AS_1_MASK 0x01
// #define RFLAGS_READ_AS_1(_) (((_) >> 1) & 0x01)
//
// /**
// * @brief Parity flag
// *
// * [Bit 2] See the description in EFLAGS.
// */
// UINT64 ParityFlag : 1;
// #define RFLAGS_PARITY_FLAG_BIT 2
// #define RFLAGS_PARITY_FLAG_FLAG 0x04
// #define RFLAGS_PARITY_FLAG_MASK 0x01
// #define RFLAGS_PARITY_FLAG(_) (((_) >> 2) & 0x01)
// UINT64 Reserved1 : 1;
//
// /**
// * @brief Auxiliary Carry flag
// *
// * [Bit 4] See the description in EFLAGS.
// */
// UINT64 AuxiliaryCarryFlag : 1;
// #define RFLAGS_AUXILIARY_CARRY_FLAG_BIT 4
// #define RFLAGS_AUXILIARY_CARRY_FLAG_FLAG 0x10
// #define RFLAGS_AUXILIARY_CARRY_FLAG_MASK 0x01
// #define RFLAGS_AUXILIARY_CARRY_FLAG(_) (((_) >> 4) & 0x01)
// UINT64 Reserved2 : 1;
//
// /**
// * @brief Zero flag
// *
// * [Bit 6] See the description in EFLAGS.
// */
// UINT64 ZeroFlag : 1;
// #define RFLAGS_ZERO_FLAG_BIT 6
// #define RFLAGS_ZERO_FLAG_FLAG 0x40
// #define RFLAGS_ZERO_FLAG_MASK 0x01
// #define RFLAGS_ZERO_FLAG(_) (((_) >> 6) & 0x01)
//
// /**
// * @brief Sign flag
// *
// * [Bit 7] See the description in EFLAGS.
// */
// UINT64 SignFlag : 1;
// #define RFLAGS_SIGN_FLAG_BIT 7
// #define RFLAGS_SIGN_FLAG_FLAG 0x80
// #define RFLAGS_SIGN_FLAG_MASK 0x01
// #define RFLAGS_SIGN_FLAG(_) (((_) >> 7) & 0x01)
//
// /**
// * @brief Trap flag
// *
// * [Bit 8] See the description in EFLAGS.
// */
// UINT64 TrapFlag : 1;
// #define RFLAGS_TRAP_FLAG_BIT 8
// #define RFLAGS_TRAP_FLAG_FLAG 0x100
// #define RFLAGS_TRAP_FLAG_MASK 0x01
// #define RFLAGS_TRAP_FLAG(_) (((_) >> 8) & 0x01)
//
// /**
// * @brief Interrupt enable flag
// *
// * [Bit 9] See the description in EFLAGS.
// */
// UINT64 InterruptEnableFlag : 1;
// #define RFLAGS_INTERRUPT_ENABLE_FLAG_BIT 9
// #define RFLAGS_INTERRUPT_ENABLE_FLAG_FLAG 0x200
// #define RFLAGS_INTERRUPT_ENABLE_FLAG_MASK 0x01
// #define RFLAGS_INTERRUPT_ENABLE_FLAG(_) (((_) >> 9) & 0x01)
//
// /**
// * @brief Direction flag
// *
// * [Bit 10] See the description in EFLAGS.
// */
// UINT64 DirectionFlag : 1;
// #define RFLAGS_DIRECTION_FLAG_BIT 10
// #define RFLAGS_DIRECTION_FLAG_FLAG 0x400
// #define RFLAGS_DIRECTION_FLAG_MASK 0x01
// #define RFLAGS_DIRECTION_FLAG(_) (((_) >> 10) & 0x01)
//
// /**
// * @brief Overflow flag
// *
// * [Bit 11] See the description in EFLAGS.
// */
// UINT64 OverflowFlag : 1;
// #define RFLAGS_OVERFLOW_FLAG_BIT 11
// #define RFLAGS_OVERFLOW_FLAG_FLAG 0x800
// #define RFLAGS_OVERFLOW_FLAG_MASK 0x01
// #define RFLAGS_OVERFLOW_FLAG(_) (((_) >> 11) & 0x01)
//
// /**
// * @brief I/O privilege level field
// *
// * [Bits 13:12] See the description in EFLAGS.
// */
// UINT64 IoPrivilegeLevel : 2;
// #define RFLAGS_IO_PRIVILEGE_LEVEL_BIT 12
// #define RFLAGS_IO_PRIVILEGE_LEVEL_FLAG 0x3000
// #define RFLAGS_IO_PRIVILEGE_LEVEL_MASK 0x03
// #define RFLAGS_IO_PRIVILEGE_LEVEL(_) (((_) >> 12) & 0x03)
//
// /**
// * @brief Nested task flag
// *
// * [Bit 14] See the description in EFLAGS.
// */
// UINT64 NestedTaskFlag : 1;
// #define RFLAGS_NESTED_TASK_FLAG_BIT 14
// #define RFLAGS_NESTED_TASK_FLAG_FLAG 0x4000
// #define RFLAGS_NESTED_TASK_FLAG_MASK 0x01
// #define RFLAGS_NESTED_TASK_FLAG(_) (((_) >> 14) & 0x01)
// UINT64 Reserved3 : 1;
//
// /**
// * @brief Resume flag
// *
// * [Bit 16] See the description in EFLAGS.
// */
// UINT64 ResumeFlag : 1;
// #define RFLAGS_RESUME_FLAG_BIT 16
// #define RFLAGS_RESUME_FLAG_FLAG 0x10000
// #define RFLAGS_RESUME_FLAG_MASK 0x01
// #define RFLAGS_RESUME_FLAG(_) (((_) >> 16) & 0x01)
//
// /**
// * @brief Virtual-8086 mode flag
// *
// * [Bit 17] See the description in EFLAGS.
// */
// UINT64 Virtual8086ModeFlag : 1;
// #define RFLAGS_VIRTUAL_8086_MODE_FLAG_BIT 17
// #define RFLAGS_VIRTUAL_8086_MODE_FLAG_FLAG 0x20000
// #define RFLAGS_VIRTUAL_8086_MODE_FLAG_MASK 0x01
// #define RFLAGS_VIRTUAL_8086_MODE_FLAG(_) (((_) >> 17) & 0x01)
//
// /**
// * @brief Alignment check (or access control) flag
// *
// * [Bit 18] See the description in EFLAGS.
// *
// * @see Vol3A[4.6(ACCESS RIGHTS)]
// */
// UINT64 AlignmentCheckFlag : 1;
// #define RFLAGS_ALIGNMENT_CHECK_FLAG_BIT 18
// #define RFLAGS_ALIGNMENT_CHECK_FLAG_FLAG 0x40000
// #define RFLAGS_ALIGNMENT_CHECK_FLAG_MASK 0x01
// #define RFLAGS_ALIGNMENT_CHECK_FLAG(_) (((_) >> 18) & 0x01)
//
// /**
// * @brief Virtual interrupt flag
// *
// * [Bit 19] See the description in EFLAGS.
// */
// UINT64 VirtualInterruptFlag : 1;
// #define RFLAGS_VIRTUAL_INTERRUPT_FLAG_BIT 19
// #define RFLAGS_VIRTUAL_INTERRUPT_FLAG_FLAG 0x80000
// #define RFLAGS_VIRTUAL_INTERRUPT_FLAG_MASK 0x01
// #define RFLAGS_VIRTUAL_INTERRUPT_FLAG(_) (((_) >> 19) & 0x01)
//
// /**
// * @brief Virtual interrupt pending flag
// *
// * [Bit 20] See the description in EFLAGS.
// */
// UINT64 VirtualInterruptPendingFlag : 1;
// #define RFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_BIT 20
// #define RFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_FLAG 0x100000
// #define RFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG_MASK 0x01
// #define RFLAGS_VIRTUAL_INTERRUPT_PENDING_FLAG(_) (((_) >> 20) & 0x01)
//
// /**
// * @brief Identification flag
// *
// * [Bit 21] See the description in EFLAGS.
// */
// UINT64 IdentificationFlag : 1;
// #define RFLAGS_IDENTIFICATION_FLAG_BIT 21
// #define RFLAGS_IDENTIFICATION_FLAG_FLAG 0x200000
// #define RFLAGS_IDENTIFICATION_FLAG_MASK 0x01
// #define RFLAGS_IDENTIFICATION_FLAG(_) (((_) >> 21) & 0x01)
// UINT64 Reserved4 : 42;
// };
//
// UINT64 AsUInt;
// } RFLAGS;
/**
* @defgroup EXCEPTIONS \
* Exceptions
* @{
*/
/**
* @brief Control Protection Exception
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))] (reference)
*/
typedef union
{
struct
{
/**
* [Bits 14:0] One of the following values:
* 1 - NEAR-RET: Indicates the \#CP was caused by a near RET instruction.
* 2 - FAR-RET/IRET: Indicates the \#CP was caused by a FAR RET or IRET instruction.
* 3 - ENDBRANCH: indicates the \#CP was due to missing ENDBRANCH at target of an
* indirect call or jump instruction. 4 - RSTORSSP: Indicates the \#CP was caused by
* a shadow-stack-restore token check failure in the RSTORSSP instruction. 5 -
* SETSSBSY: Indicates \#CP was caused by a supervisor shadow stack token check
* failure in the SETSSBSY instruction.
*/
UINT32 Cpec : 15;
#define CONTROL_PROTECTION_EXCEPTION_CPEC_BIT 0
#define CONTROL_PROTECTION_EXCEPTION_CPEC_FLAG 0x7FFF
#define CONTROL_PROTECTION_EXCEPTION_CPEC_MASK 0x7FFF
#define CONTROL_PROTECTION_EXCEPTION_CPEC(_) (((_) >> 0) & 0x7FFF)
/**
* [Bit 15] If set to 1, indicates the \#CP occurred during enclave execution.
*/
UINT32 Encl : 1;
#define CONTROL_PROTECTION_EXCEPTION_ENCL_BIT 15
#define CONTROL_PROTECTION_EXCEPTION_ENCL_FLAG 0x8000
#define CONTROL_PROTECTION_EXCEPTION_ENCL_MASK 0x01
#define CONTROL_PROTECTION_EXCEPTION_ENCL(_) (((_) >> 15) & 0x01)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} CONTROL_PROTECTION_EXCEPTION;
/**
* @brief Exceptions that can occur when the instruction is executed in protected mode.
* Each exception is given a mnemonic that consists of a pound sign (\#) followed by two
* letters and an optional error code in parentheses. For example, \#GP(0) denotes a general
* protection exception with an error code of 0
*
* @see Vol2A[3.1.1.13(Protected Mode Exceptions Section)] (reference)
* @see Vol3A[6.3.1(External Interrupts)] (reference)
*/
typedef enum
{
/**
* #DE - Divide Error.
* Source: DIV and IDIV instructions.
* Error Code: No.
*/
DivideError = 0x00000000,
/**
* #DB - Debug.
* Source: Any code or data reference.
* Error Code: No.
*/
Debug = 0x00000001,
/**
* Nonmaskable Interrupt.
* Source: Generated externally by asserting the processor's NMI pin or
* through an NMI request set by the I/O APIC to the local APIC.
* Error Code: No.
*/
Nmi = 0x00000002,
/**
* #BP - Breakpoint.
* Source: INT3 instruction.
* Error Code: No.
*/
Breakpoint = 0x00000003,
/**
* #OF - Overflow.
* Source: INTO instruction.
* Error Code: No.
*/
Overflow = 0x00000004,
/**
* #BR - BOUND Range Exceeded.
* Source: BOUND instruction.
* Error Code: No.
*/
BoundRangeExceeded = 0x00000005,
/**
* #UD - Invalid Opcode (Undefined Opcode).
* Source: UD instruction or reserved opcode.
* Error Code: No.
*/
InvalidOpcode = 0x00000006,
/**
* #NM - Device Not Available (No Math Coprocessor).
* Source: Floating-point or WAIT/FWAIT instruction.
* Error Code: No.
*/
DeviceNotAvailable = 0x00000007,
/**
* #DF - Double Fault.
* Source: Any instruction that can generate an exception, an NMI, or an INTR.
* Error Code: Yes (zero).
*/
DoubleFault = 0x00000008,
/**
* #\## - Coprocessor Segment Overrun (reserved).
* Source: Floating-point instruction.
* Error Code: No.
*
* @note Processors after the Intel386 processor do not generate this exception.
*/
CoprocessorSegmentOverrun = 0x00000009,
/**
* #TS - Invalid TSS.
* Source: Task switch or TSS access.
* Error Code: Yes.
*/
InvalidTss = 0x0000000A,
/**
* #NP - Segment Not Present.
* Source: Loading segment registers or accessing system segments.
* Error Code: Yes.
*/
SegmentNotPresent = 0x0000000B,
/**
* #SS - Stack Segment Fault.
* Source: Stack operations and SS register loads.
* Error Code: Yes.
*/
StackSegmentFault = 0x0000000C,
/**
* #GP - General Protection.
* Source: Any memory reference and other protection checks.
* Error Code: Yes.
*/
GeneralProtection = 0x0000000D,
/**
* #PF - Page Fault.
* Source: Any memory reference.
* Error Code: Yes.
*/
PageFault = 0x0000000E,
/**
* #MF - Floating-Point Error (Math Fault).
* Source: Floating-point or WAIT/FWAIT instruction.
* Error Code: No.
*/
X87FloatingPointError = 0x00000010,
/**
* #AC - Alignment Check.
* Source: Any data reference in memory.
* Error Code: Yes.
*/
AlignmentCheck = 0x00000011,
/**
* #MC - Machine Check.
* Source: Model dependent machine check errors.
* Error Code: No.
*/
MachineCheck = 0x00000012,
/**
* #XM - SIMD Floating-Point Numeric Error.
* Source: SSE/SSE2/SSE3 floating-point instructions.
* Error Code: No.
*/
SimdFloatingPointError = 0x00000013,
/**
* #VE - Virtualization Exception.
* Source: EPT violations.
* Error Code: No.
*/
VirtualizationException = 0x00000014,
/**
* #CP - Control Protection Exception.
* Source: Control flow transfer attempt violated the control flow enforcement technology
* constraints. Error Code: Yes.
*/
ControlProtection = 0x00000015,
} EXCEPTION_VECTOR;
/**
* @brief When an exception condition is related to a specific segment selector or IDT vector, the
* processor pushes an error code onto the stack of the exception handler (whether it is a procedure
* or task). The error code resembles a segment selector; however, instead of a TI flag and RPL
* field, the error code contains 3 different flags
*
* @see Vol3A[6.13(ERROR CODE)] (reference)
*/
typedef union
{
struct
{
/**
* [Bit 0] When set, indicates that the exception occurred during delivery of an
* event external to the program, such as an interrupt or an earlier exception. The
* bit is cleared if the exception occurred during delivery of a software interrupt
* (INT n, INT3, or INTO).
*/
UINT32 ExternalEvent : 1;
#define EXCEPTION_ERROR_CODE_EXTERNAL_EVENT_BIT 0
#define EXCEPTION_ERROR_CODE_EXTERNAL_EVENT_FLAG 0x01
#define EXCEPTION_ERROR_CODE_EXTERNAL_EVENT_MASK 0x01
#define EXCEPTION_ERROR_CODE_EXTERNAL_EVENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] When set, indicates that the index portion of the error code refers to a
* gate descriptor in the IDT; when clear, indicates that the index refers to a
* descriptor in the GDT or the current LDT.
*/
UINT32 DescriptorLocation : 1;
#define EXCEPTION_ERROR_CODE_DESCRIPTOR_LOCATION_BIT 1
#define EXCEPTION_ERROR_CODE_DESCRIPTOR_LOCATION_FLAG 0x02
#define EXCEPTION_ERROR_CODE_DESCRIPTOR_LOCATION_MASK 0x01
#define EXCEPTION_ERROR_CODE_DESCRIPTOR_LOCATION(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] Only used when the IDT flag is clear. When set, the TI flag indicates
* that the index portion of the error code refers to a segment or gate descriptor
* in the LDT; when clear, it indicates that the index refers to a descriptor in the
* current GDT.
*/
UINT32 GdtLdt : 1;
#define EXCEPTION_ERROR_CODE_GDT_LDT_BIT 2
#define EXCEPTION_ERROR_CODE_GDT_LDT_FLAG 0x04
#define EXCEPTION_ERROR_CODE_GDT_LDT_MASK 0x01
#define EXCEPTION_ERROR_CODE_GDT_LDT(_) (((_) >> 2) & 0x01)
/**
* [Bits 15:3] The segment selector index field provides an index into the IDT, GDT,
* or current LDT to the segment or gate selector being referenced by the error
* code. In some cases the error code is null (all bits are clear except possibly
* EXT). A null error code indicates that the error was not caused by a reference to
* a specific segment or that a null segment selector was referenced in an
* operation.
*
* @note The format of the error code is different for page-fault exceptions (#PF).
*/
UINT32 Index : 13;
#define EXCEPTION_ERROR_CODE_INDEX_BIT 3
#define EXCEPTION_ERROR_CODE_INDEX_FLAG 0xFFF8
#define EXCEPTION_ERROR_CODE_INDEX_MASK 0x1FFF
#define EXCEPTION_ERROR_CODE_INDEX(_) (((_) >> 3) & 0x1FFF)
UINT32 Reserved1 : 16;
};
UINT32 AsUInt;
} EXCEPTION_ERROR_CODE;
/**
* @brief Page fault exception
*
* @see Vol3A[4.7(PAGE-FAULT EXCEPTIONS)] (reference)
*/
typedef union
{
struct
{
/**
* [Bit 0] This flag is 0 if there is no translation for the linear address because
* the P flag was 0 in one of the pagingstructure entries used to translate that
* address.
*/
UINT32 Present : 1;
#define PAGE_FAULT_EXCEPTION_PRESENT_BIT 0
#define PAGE_FAULT_EXCEPTION_PRESENT_FLAG 0x01
#define PAGE_FAULT_EXCEPTION_PRESENT_MASK 0x01
#define PAGE_FAULT_EXCEPTION_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] If the access causing the page-fault exception was a write, this flag is
* 1; otherwise, it is 0. This flag describes the access causing the page-fault
* exception, not the access rights specified by paging.
*/
UINT32 Write : 1;
#define PAGE_FAULT_EXCEPTION_WRITE_BIT 1
#define PAGE_FAULT_EXCEPTION_WRITE_FLAG 0x02
#define PAGE_FAULT_EXCEPTION_WRITE_MASK 0x01
#define PAGE_FAULT_EXCEPTION_WRITE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If a user-mode access caused the page-fault exception, this flag is 1; it
* is 0 if a supervisor-mode access did so. This flag describes the access causing
* the page-fault exception, not the access rights specified by paging.
*
* @see Vol3A[4.6(ACCESS RIGHTS)]
*/
UINT32 UserModeAccess : 1;
#define PAGE_FAULT_EXCEPTION_USER_MODE_ACCESS_BIT 2
#define PAGE_FAULT_EXCEPTION_USER_MODE_ACCESS_FLAG 0x04
#define PAGE_FAULT_EXCEPTION_USER_MODE_ACCESS_MASK 0x01
#define PAGE_FAULT_EXCEPTION_USER_MODE_ACCESS(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] This flag is 1 if there is no translation for the linear address because
* a reserved bit was set in one of the pagingstructure entries used to translate
* that address. (Because reserved bits are not checked in a paging-structure entry
* whose P flag is 0, bit 3 of the error code can be set only if bit 0 is also set).
* Bits reserved in the paging-structure entries are reserved for future
* functionality. Software developers should be aware that such bits may be used in
* the future and that a paging-structure entry that causes a page-fault exception
* on one processor might not do so in the future.
*/
UINT32 ReservedBitViolation : 1;
#define PAGE_FAULT_EXCEPTION_RESERVED_BIT_VIOLATION_BIT 3
#define PAGE_FAULT_EXCEPTION_RESERVED_BIT_VIOLATION_FLAG 0x08
#define PAGE_FAULT_EXCEPTION_RESERVED_BIT_VIOLATION_MASK 0x01
#define PAGE_FAULT_EXCEPTION_RESERVED_BIT_VIOLATION(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] This flag is 1 if (1) the access causing the page-fault exception was an
* instruction fetch; and (2) either (a) CR4.SMEP = 1; or (b) both (i) CR4.PAE = 1
* (either PAE paging or 4-level paging is in use); and (ii) IA32_EFER.NXE = 1.
* Otherwise, the flag is 0. This flag describes the access causing the page-fault
* exception, not the access rights specified by paging.
*/
UINT32 Execute : 1;
#define PAGE_FAULT_EXCEPTION_EXECUTE_BIT 4
#define PAGE_FAULT_EXCEPTION_EXECUTE_FLAG 0x10
#define PAGE_FAULT_EXCEPTION_EXECUTE_MASK 0x01
#define PAGE_FAULT_EXCEPTION_EXECUTE(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] This flag is 1 if (1) IA32_EFER.LMA = CR4.PKE = 1; (2) the access causing
* the page-fault exception was a data access; (3) the linear address was a
* user-mode address with protection key i; and (5) the PKRU register is such that
* either (a) ADi = 1; or (b) the following all hold: (i) WDi = 1; (ii) the access
* is a write access; and (iii) either CR0.WP = 1 or the access causing the
* page-fault exception was a user-mode access.
*
* @see Vol3A[4.6.2(Protection Keys)]
*/
UINT32 ProtectionKeyViolation : 1;
#define PAGE_FAULT_EXCEPTION_PROTECTION_KEY_VIOLATION_BIT 5
#define PAGE_FAULT_EXCEPTION_PROTECTION_KEY_VIOLATION_FLAG 0x20
#define PAGE_FAULT_EXCEPTION_PROTECTION_KEY_VIOLATION_MASK 0x01
#define PAGE_FAULT_EXCEPTION_PROTECTION_KEY_VIOLATION(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] If the access causing the page-fault exception was a shadow-stack access
* (including shadow-stack accesses in enclave mode), this flag is 1; otherwise, it
* is 0. This flag describes the access causing the page-fault exception, not the
* access rights specified by paging.
*
* @see Vol1[18(CONTROL-FLOW ENFORCEMENT TECHNOLOGY (CET))]
*/
UINT32 ShadowStack : 1;
#define PAGE_FAULT_EXCEPTION_SHADOW_STACK_BIT 6
#define PAGE_FAULT_EXCEPTION_SHADOW_STACK_FLAG 0x40
#define PAGE_FAULT_EXCEPTION_SHADOW_STACK_MASK 0x01
#define PAGE_FAULT_EXCEPTION_SHADOW_STACK(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] This flag is 1 if there is no translation for the linear address using
* HLAT paging because, in one of the paging- structure entries used to translate
* that address, either the P flag was 0 or a reserved bit was set. An error code
* will set this flag only if it clears bit o or sets bit 3. This flag will not be
* set by a page fault resulting from a violation of access rights, nor for one
* encountered during ordinary paging, including the case in which there has been a
* restart of HLAT paging.
*
* @see Vol3A[4.5.1(Ordinary Paging and HLAT Paging)]
*/
UINT32 Hlat : 1;
#define PAGE_FAULT_EXCEPTION_HLAT_BIT 7
#define PAGE_FAULT_EXCEPTION_HLAT_FLAG 0x80
#define PAGE_FAULT_EXCEPTION_HLAT_MASK 0x01
#define PAGE_FAULT_EXCEPTION_HLAT(_) (((_) >> 7) & 0x01)
UINT32 Reserved1 : 7;
/**
* [Bit 15] This flag is 1 if the exception is unrelated to paging and resulted from
* violation of SGX-specific access-control requirements. Because such a violation
* can occur only if there is no ordinary page fault, this flag is set only if the P
* flag (bit 0) is 1 and the RSVD flag (bit 3) and the PK flag (bit 5) are both 0.
*/
UINT32 Sgx : 1;
#define PAGE_FAULT_EXCEPTION_SGX_BIT 15
#define PAGE_FAULT_EXCEPTION_SGX_FLAG 0x8000
#define PAGE_FAULT_EXCEPTION_SGX_MASK 0x01
#define PAGE_FAULT_EXCEPTION_SGX(_) (((_) >> 15) & 0x01)
UINT32 Reserved2 : 16;
};
UINT32 AsUInt;
} PAGE_FAULT_EXCEPTION;
/**
* @}
*/
/**
* @defgroup MEMORY_TYPE \
* Memory caching type
*
* The processor allows any area of system memory to be cached in the L1, L2, and L3 caches. In
* individual pages or regions of system memory, it allows the type of caching (also called memory
* type) to be specified.
*
* @see Vol3A[11.11(MEMORY TYPE RANGE REGISTERS (MTRRS))]
* @see Vol3A[11.5(CACHE CONTROL)]
* @see Vol3A[11.3(METHODS OF CACHING AVAILABLE)] (reference)
* @{
*/
/**
* @brief Strong Uncacheable (UC)
*
* System memory locations are not cached. All reads and writes appear on the system bus and are
* executed in program order without reordering. No speculative memory accesses, pagetable walks, or
* prefetches of speculated branch targets are made. This type of cache-control is useful for
* memory-mapped I/O devices. When used with normal RAM, it greatly reduces processor performance.
*/
#define MEMORY_TYPE_UNCACHEABLE 0x00000000
/**
* @brief Write Combining (WC)
*
* System memory locations are not cached (as with uncacheable memory) and coherency is not enforced
* by the processor's bus coherency protocol. Speculative reads are allowed. Writes may be delayed
* and combined in the write combining buffer (WC buffer) to reduce memory accesses. If the WC
* buffer is partially filled, the writes may be delayed until the next occurrence of a serializing
* event; such as, an SFENCE or MFENCE instruction, CPUID execution, a read or write to uncached
* memory, an interrupt occurrence, or a LOCK instruction execution. This type of cache-control is
* appropriate for video frame buffers, where the order of writes is unimportant as long as the
* writes update memory so they can be seen on the graphics display. This memory type is available
* in the Pentium Pro and Pentium II processors by programming the MTRRs; or in processor families
* starting from the Pentium III processors by programming the MTRRs or by selecting it through the
* PAT.
*
* @see Vol3A[11.3.1(Buffering of Write Combining Memory Locations)]
*/
#define MEMORY_TYPE_WRITE_COMBINING 0x00000001
/**
* @brief Write-through (WT)
*
* Writes and reads to and from system memory are cached. Reads come from cache lines on cache hits;
* read misses cause cache fills. Speculative reads are allowed. All writes are written to a cache
* line (when possible) and through to system memory. When writing through to memory, invalid cache
* lines are never filled, and valid cache lines are either filled or invalidated. Write combining
* is allowed. This type of cache-control is appropriate for frame buffers or when there are devices
* on the system bus that access system memory, but do not perform snooping of memory accesses. It
* enforces coherency between caches in the processors and system memory.
*/
#define MEMORY_TYPE_WRITE_THROUGH 0x00000004
/**
* @brief Write protected (WP)
*
* Reads come from cache lines when possible, and read misses cause cache fills. Writes are
* propagated to the system bus and cause corresponding cache lines on all processors on the bus to
* be invalidated. Speculative reads are allowed. This memory type is available in processor
* families starting from the P6 family processors by programming the MTRRs.
*/
#define MEMORY_TYPE_WRITE_PROTECTED 0x00000005
/**
* @brief Write-back (WB)
*
* Writes and reads to and from system memory are cached. Reads come from cache lines on cache hits;
* read misses cause cache fills. Speculative reads are allowed. Write misses cause cache line fills
* (in processor families starting with the P6 family processors), and writes are performed entirely
* in the cache, when possible. Write combining is allowed. The write-back memory type reduces bus
* traffic by eliminating many unnecessary writes to system memory. Writes to a cache line are not
* immediately forwarded to system memory; instead, they are accumulated in the cache. The modified
* cache lines are written to system memory later, when a write-back operation is performed.
* Write-back operations are triggered when cache lines need to be deallocated, such as when new
* cache lines are being allocated in a cache that is already full. They also are triggered by the
* mechanisms used to maintain cache consistency. This type of cache-control provides the best
* performance, but it requires that all devices that access system memory on the system bus be able
* to snoop memory accesses to insure system memory and cache coherency.
*/
#define MEMORY_TYPE_WRITE_BACK 0x00000006
/**
* @brief Uncacheable (UC-)
*
* Has same characteristics as the strong uncacheable (UC) memory type, except that this memory type
* can be overridden by programming the MTRRs for the WC memory type. This memory type is available
* in processor families starting from the Pentium III processors and can only be selected through
* the PAT.
*/
#define MEMORY_TYPE_UNCACHEABLE_MINUS 0x00000007
#define MEMORY_TYPE_INVALID 0x000000FF
/**
* @}
*/
/**
* @defgroup VTD \
* VTD
* @{
*/
/**
* @brief The Root Table Address Register points to a table of root-entries, when the Translation
* Table Mode (TTM) field in the register is 00b
*
* @see VTd[9.1(Root Entry)]
*/
typedef struct
{
union
{
struct
{
/**
* [Bit 0] This field indicates whether the root-entry is present.
* * 0: Indicates the root-entry is not present. All other fields are
* ignored by hardware.
* * 1: Indicates the root-entry is present.
*/
UINT64 Present : 1;
#define VTD_Lower64_PRESENT_BIT 0
#define VTD_Lower64_PRESENT_FLAG 0x01
#define VTD_Lower64_PRESENT_MASK 0x01
#define VTD_Lower64_PRESENT(_) (((_) >> 0) & 0x01)
UINT64 Reserved1 : 11;
/**
* [Bits 63:12] Pointer to Context-table for this bus. The Context-table is
* 4KB in size and size aligned. Hardware treats bits 63:HAW as reserved
* (0), where HAW is the host address width of the platform.
*/
UINT64 ContextTablePointer : 52;
#define VTD_Lower64_CONTEXT_TABLE_POINTER_BIT 12
#define VTD_Lower64_CONTEXT_TABLE_POINTER_FLAG 0xFFFFFFFFFFFFF000
#define VTD_Lower64_CONTEXT_TABLE_POINTER_MASK 0xFFFFFFFFFFFFF
#define VTD_Lower64_CONTEXT_TABLE_POINTER(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} Lower64;
union
{
struct
{
/**
* [Bits 63:0] Reserved. Must be 0.
*/
UINT64 Reserved : 64;
#define VTD_Upper64_RESERVED_BIT 0
#define VTD_Upper64_RESERVED_FLAG 0xFFFFFFFFFFFFFFFF
#define VTD_Upper64_RESERVED_MASK 0xFFFFFFFFFFFFFFFF
#define VTD_Upper64_RESERVED(_) (((_) >> 0) & 0xFFFFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} Upper64;
} VTD_ROOT_ENTRY;
/**
* @brief Context-entries support translation of requests-without-PASID. Context-entries are
* referenced through root-entries
*
* @see VTd[9.3(Context Entry)]
*/
typedef struct
{
union
{
struct
{
/**
* [Bit 0]
* * 0: Indicates the context-entry is not present. All other fields except
* Fault Processing Disable (FPD) field are ignored by hardware.
* * 1: Indicates the context-entry is present.
*/
UINT64 Present : 1;
#define VTD_Lower64_PRESENT_BIT 0
#define VTD_Lower64_PRESENT_FLAG 0x01
#define VTD_Lower64_PRESENT_MASK 0x01
#define VTD_Lower64_PRESENT(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] Enables or disables recording/reporting of qualified
* non-recoverable faults.
* * 0: Qualified non-recoverable faults are recorded/reported for requests
* processed through this context-entry.
* * 1: Qualified non-recoverable faults are not recorded/reported for
* requests processed through this context-entry. This field is evaluated by
* hardware irrespective of the setting of the present (P) field.
*/
UINT64 FaultProcessingDisable : 1;
#define VTD_Lower64_FAULT_PROCESSING_DISABLE_BIT 1
#define VTD_Lower64_FAULT_PROCESSING_DISABLE_FLAG 0x02
#define VTD_Lower64_FAULT_PROCESSING_DISABLE_MASK 0x01
#define VTD_Lower64_FAULT_PROCESSING_DISABLE(_) (((_) >> 1) & 0x01)
/**
* [Bits 3:2] This field is applicable only for requests-without-PASID, as
* hardware blocks all requests-with- PASID in legacy mode before they can
* use context table.
* * 00b: Untranslated requests are translated using second-level paging
* structures referenced through SLPTPTR field. Translated requests and
* Translation Requests are blocked.
* * 01b: Untranslated, Translated and Translation Requests are supported.
* This encoding is treated as reserved by hardware implementations not
* supporting Device-TLBs (DT=0 in Extended Capability Register).
* * 10b: Untranslated requests are processed as pass-through. SLPTPTR field
* is ignored by hardware. Translated and Translation Requests are blocked.
* This encoding is treated by hardware as reserved for hardware
* implementations not supporting Pass Through (PT=0 in Extended Capability
* Register).
* * 11b: Reserved.
*/
UINT64 TranslationType : 2;
#define VTD_Lower64_TRANSLATION_TYPE_BIT 2
#define VTD_Lower64_TRANSLATION_TYPE_FLAG 0x0C
#define VTD_Lower64_TRANSLATION_TYPE_MASK 0x03
#define VTD_Lower64_TRANSLATION_TYPE(_) (((_) >> 2) & 0x03)
UINT64 Reserved1 : 8;
/**
* [Bits 63:12] When the Translation-Type (TT) field is 00b or 01b, this
* field points to the base of second level paging entries (described in
* Section 9.8). Hardware treats bits 63:HAW as reserved (0), where HAW is
* the host address width of the platform. This field is ignored by hardware
* when Translation-Type (TT) field is 10b (pass-through).
*/
UINT64 SecondLevelPageTranslationPointer : 52;
#define VTD_Lower64_SECOND_LEVEL_PAGE_TRANSLATION_POINTER_BIT 12
#define VTD_Lower64_SECOND_LEVEL_PAGE_TRANSLATION_POINTER_FLAG 0xFFFFFFFFFFFFF000
#define VTD_Lower64_SECOND_LEVEL_PAGE_TRANSLATION_POINTER_MASK 0xFFFFFFFFFFFFF
#define VTD_Lower64_SECOND_LEVEL_PAGE_TRANSLATION_POINTER(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} Lower64;
union
{
struct
{
/**
* [Bits 2:0] When the Translation-type (TT) field is 00b or 01b, this field
* indicates the adjusted guest address- width (AGAW) to be used by hardware
* for the second-level page-table walk. The following encodings are defined
* for this field:
* * 000b: Reserved
* * 001b: 39-bit AGAW (3-level page table)
* * 010b: 48-bit AGAW (4-level page table)
* * 011b: 57-bit AGAW (5-level page table)
* * 100b-111b: Reserved
* The value specified in this field must match an AGAW value supported by
* hardware (as reported in the SAGAW field in the Capability Register).
* When the Translation-type (TT) field indicates pass-through processing
* (10b), this field must be programmed to indicate the largest AGAW value
* supported by hardware. Untranslated requests-without-PASID processed
* through this context-entry and accessing addresses above 2X-1 (where X is
* the AGAW value indicated by this field) are blocked and treated as
* translation faults.
*/
UINT64 AddressWidth : 3;
#define VTD_Upper64_ADDRESS_WIDTH_BIT 0
#define VTD_Upper64_ADDRESS_WIDTH_FLAG 0x07
#define VTD_Upper64_ADDRESS_WIDTH_MASK 0x07
#define VTD_Upper64_ADDRESS_WIDTH(_) (((_) >> 0) & 0x07)
/**
* [Bits 6:3] Hardware ignores the programming of this field.
*/
UINT64 Ignored : 4;
#define VTD_Upper64_IGNORED_BIT 3
#define VTD_Upper64_IGNORED_FLAG 0x78
#define VTD_Upper64_IGNORED_MASK 0x0F
#define VTD_Upper64_IGNORED(_) (((_) >> 3) & 0x0F)
UINT64 Reserved1 : 1;
/**
* [Bits 17:8] Identifier for the domain to which this context-entry maps.
* Hardware may use the domain identifier to tag its internal caches. The
* Capability Register reports the domain-id width supported by hardware.
* For implementations supporting less than 16-bit domain-ids, unused bits
* of this field are treated as reserved by hardware. For example, for
* implementation supporting 8-bit domain-ids, bits 87:80 of this field are
* treated as reserved. Context-entries programmed with the same domain
* identifier must always reference same address translation (SLPTPTR
* field). Context-entries referencing same address translation are
* recommended to be programmed with same domain id for hardware efficiency.
* When Caching Mode (CM) field in Capability Register is reported as Set,
* the domain-id value of zero is architecturally reserved. Software must
* not use domain-id value of zero when CM is Set.
*/
UINT64 DomainIdentifier : 10;
#define VTD_Upper64_DOMAIN_IDENTIFIER_BIT 8
#define VTD_Upper64_DOMAIN_IDENTIFIER_FLAG 0x3FF00
#define VTD_Upper64_DOMAIN_IDENTIFIER_MASK 0x3FF
#define VTD_Upper64_DOMAIN_IDENTIFIER(_) (((_) >> 8) & 0x3FF)
UINT64 Reserved2 : 46;
};
UINT64 AsUInt;
} Upper64;
} VTD_CONTEXT_ENTRY;
/**
* @defgroup VTD_ENTRY_COUNT \
* Table entry counts
*
* Table entry counts.
* @{
*/
#define VTD_ROOT_ENTRY_COUNT 0x00000100
#define VTD_CONTEXT_ENTRY_COUNT 0x00000100
/**
* @}
*/
/**
* Register to report the implementation version. Backward compatibility for the architecture is
* maintained with new revision numbers, allowing software to load remapping hardware drivers
* written for prior versions.
*
* @remarks VER_REG
* @see VTd[10.4.1(Version Register)]
*/
#define VTD_VERSION 0x00000000
typedef union
{
struct
{
/**
* @brief Minor Version number (RO)
*
* [Bits 3:0] Indicates Minor Version of Implementation.
*/
UINT32 Minor : 4;
#define VTD_VERSION_MINOR_BIT 0
#define VTD_VERSION_MINOR_FLAG 0x0F
#define VTD_VERSION_MINOR_MASK 0x0F
#define VTD_VERSION_MINOR(_) (((_) >> 0) & 0x0F)
/**
* @brief Major Version number (RO)
*
* [Bits 7:4] Indicates Major Version of Implementation.
*/
UINT32 Major : 4;
#define VTD_VERSION_MAJOR_BIT 4
#define VTD_VERSION_MAJOR_FLAG 0xF0
#define VTD_VERSION_MAJOR_MASK 0x0F
#define VTD_VERSION_MAJOR(_) (((_) >> 4) & 0x0F)
UINT32 Reserved1 : 24;
};
UINT32 AsUInt;
} VTD_VERSION_REGISTER;
/**
* Register to report general remapping hardware capabilities.
*
* @remarks CAP_REG
* @see VTd[10.4.2(Capability Register)]
*/
#define VTD_CAPABILITY 0x00000008
typedef union
{
struct
{
/**
* @brief Number of domains supported (RO)
*
* [Bits 2:0]
* * 000b: Hardware supports 4-bit domain-ids with support for up to 16 domains.
* * 001b: Hardware supports 6-bit domain-ids with support for up to 64 domains.
* * 010b: Hardware supports 8-bit domain-ids with support for up to 256 domains.
* * 011b: Hardware supports 10-bit domain-ids with support for up to 1024 domains.
* * 100b: Hardware supports 12-bit domain-ids with support for up to 4K domains.
* * 101b: Hardware supports 14-bit domain-ids with support for up to 16K domains.
* * 110b: Hardware supports 16-bit domain-ids with support for up to 64K domains.
* * 111b: Reserved.
*/
UINT64 NumberOfDomainsSupported : 3;
#define VTD_CAPABILITY_NUMBER_OF_DOMAINS_SUPPORTED_BIT 0
#define VTD_CAPABILITY_NUMBER_OF_DOMAINS_SUPPORTED_FLAG 0x07
#define VTD_CAPABILITY_NUMBER_OF_DOMAINS_SUPPORTED_MASK 0x07
#define VTD_CAPABILITY_NUMBER_OF_DOMAINS_SUPPORTED(_) (((_) >> 0) & 0x07)
/**
* @brief Advanced Fault Logging (RO)
*
* [Bit 3]
* * 0: Indicates advanced fault logging is not supported. Only primary fault
* logging is supported.
* * 1: Indicates advanced fault logging is supported.
*/
UINT64 AdvancedFaultLogging : 1;
#define VTD_CAPABILITY_ADVANCED_FAULT_LOGGING_BIT 3
#define VTD_CAPABILITY_ADVANCED_FAULT_LOGGING_FLAG 0x08
#define VTD_CAPABILITY_ADVANCED_FAULT_LOGGING_MASK 0x01
#define VTD_CAPABILITY_ADVANCED_FAULT_LOGGING(_) (((_) >> 3) & 0x01)
/**
* @brief Required Write-Buffer Flushing (RO)
*
* [Bit 4]
* * 0: Indicates no write-buffer flushing is needed to ensure changes to
* memory-resident structures are visible to hardware.
* * 1: Indicates software must explicitly flush the write buffers to ensure updates
* made to memory-resident remapping structures are visible to hardware.
*/
UINT64 RequiredWriteBufferFlushing : 1;
#define VTD_CAPABILITY_REQUIRED_WRITE_BUFFER_FLUSHING_BIT 4
#define VTD_CAPABILITY_REQUIRED_WRITE_BUFFER_FLUSHING_FLAG 0x10
#define VTD_CAPABILITY_REQUIRED_WRITE_BUFFER_FLUSHING_MASK 0x01
#define VTD_CAPABILITY_REQUIRED_WRITE_BUFFER_FLUSHING(_) (((_) >> 4) & 0x01)
/**
* @brief Protected Low-Memory Region (RO)
*
* [Bit 5]
* * 0: Indicates protected low-memory region is not supported.
* * 1: Indicates protected low-memory region is supported.
*/
UINT64 ProtectedLowMemoryRegion : 1;
#define VTD_CAPABILITY_PROTECTED_LOW_MEMORY_REGION_BIT 5
#define VTD_CAPABILITY_PROTECTED_LOW_MEMORY_REGION_FLAG 0x20
#define VTD_CAPABILITY_PROTECTED_LOW_MEMORY_REGION_MASK 0x01
#define VTD_CAPABILITY_PROTECTED_LOW_MEMORY_REGION(_) (((_) >> 5) & 0x01)
/**
* @brief Protected High-Memory Region (RO)
*
* [Bit 6]
* * 0: Indicates protected high-memory region is not supported.
* * 1: Indicates protected high-memory region is supported.
*/
UINT64 ProtectedHighMemoryRegion : 1;
#define VTD_CAPABILITY_PROTECTED_HIGH_MEMORY_REGION_BIT 6
#define VTD_CAPABILITY_PROTECTED_HIGH_MEMORY_REGION_FLAG 0x40
#define VTD_CAPABILITY_PROTECTED_HIGH_MEMORY_REGION_MASK 0x01
#define VTD_CAPABILITY_PROTECTED_HIGH_MEMORY_REGION(_) (((_) >> 6) & 0x01)
/**
* @brief Caching Mode (RO)
*
* [Bit 7] This field applies to all DMA and Interrupt remap tables except FLtables.
* Hardware will not cache faulting FL-only translations in IOTLB or
* FL-paging-structure caches.
* * 0: Not-present and erroneous entries are not cached in any of the remapping
* caches. Invalidations are not required for modifications to individual not
* present or invalid entries. However, any modifications that result in decreasing
* the effective permissions or partial permission increases require invalidations
* for them to be effective.
* * 1: Not-present and erroneous mappings may be cached in the remapping caches.
* Any software updates to the remapping structures (including updates to
* "not-present" or erroneous entries) require explicit invalidation.
*/
UINT64 CachingMode : 1;
#define VTD_CAPABILITY_CACHING_MODE_BIT 7
#define VTD_CAPABILITY_CACHING_MODE_FLAG 0x80
#define VTD_CAPABILITY_CACHING_MODE_MASK 0x01
#define VTD_CAPABILITY_CACHING_MODE(_) (((_) >> 7) & 0x01)
/**
* @brief Supported Adjusted Guest Address Widths (RO)
*
* [Bits 12:8] This 5-bit field indicates the supported adjusted guest address
* widths (which in turn represents the levels of page-table walks for the 4KB base
* page size) supported by the hardware implementation. A value of 1 in any of these
* bits indicates the corresponding adjusted guest address width is supported. The
* adjusted guest address widths corresponding to various bit positions within this
* field are:
* * 0: Reserved
* * 1: 39-bit AGAW (3-level page-table)
* * 2: 48-bit AGAW (4-level page-table)
* * 3: 57-bit AGAW (5-level page-table)
* * 4: Reserved
* Software must ensure that the adjusted guest address width used to set up the
* page tables is one of the supported guest address widths reported in this field.
* Hardware implementations reporting second-level translation support (SLTS) field
* as Clear also report this field as 0.
*/
UINT64 SupportedAdjustedGuestAddressWidths : 5;
#define VTD_CAPABILITY_SUPPORTED_ADJUSTED_GUEST_ADDRESS_WIDTHS_BIT 8
#define VTD_CAPABILITY_SUPPORTED_ADJUSTED_GUEST_ADDRESS_WIDTHS_FLAG 0x1F00
#define VTD_CAPABILITY_SUPPORTED_ADJUSTED_GUEST_ADDRESS_WIDTHS_MASK 0x1F
#define VTD_CAPABILITY_SUPPORTED_ADJUSTED_GUEST_ADDRESS_WIDTHS(_) (((_) >> 8) & 0x1F)
UINT64 Reserved1 : 3;
/**
* @brief Maximum Guest Address Width (RO)
*
* [Bits 21:16] This field indicates the maximum guest physical address width
* supported by second-level translation in remapping hardware. The Maximum Guest
* Address Width (MGAW) is computed as (N+1), where N is the valued reported in this
* field. For example, a hardware implementation supporting 48-bit MGAW reports a
* value of 47 (101111b) in this field. If the value in this field is X,
* untranslated DMA requests with addresses above 2(X+1)-1 that are subjected to
* second-level translation are blocked by hardware. Device-TLB translation requests
* to addresses above 2(X+1)-1 that are subjected to second-level translation from
* allowed devices return a null Translation-Completion Data with R=W=0. Guest
* addressability for a given DMA request is limited to the minimum of the value
* reported through this field and the adjusted guest address width of the
* corresponding page-table structure. (Adjusted guest address widths supported by
* hardware are reported through the SAGAW field).
* Implementations must support MGAW at least equal to the physical addressability
* (host address width) of the platform.
*/
UINT64 MaximumGuestAddressWidth : 6;
#define VTD_CAPABILITY_MAXIMUM_GUEST_ADDRESS_WIDTH_BIT 16
#define VTD_CAPABILITY_MAXIMUM_GUEST_ADDRESS_WIDTH_FLAG 0x3F0000
#define VTD_CAPABILITY_MAXIMUM_GUEST_ADDRESS_WIDTH_MASK 0x3F
#define VTD_CAPABILITY_MAXIMUM_GUEST_ADDRESS_WIDTH(_) (((_) >> 16) & 0x3F)
/**
* @brief Zero Length Read (RO)
*
* [Bit 22]
* * 0: Indicates the remapping hardware unit blocks (and treats as fault) zero
* length DMA read requests to write-only pages.
* * 1: Indicates the remapping hardware unit supports zero length DMA read requests
* to write-only pages. DMA remapping hardware implementations are recommended to
* report ZLR field as Set.
*/
UINT64 ZeroLengthRead : 1;
#define VTD_CAPABILITY_ZERO_LENGTH_READ_BIT 22
#define VTD_CAPABILITY_ZERO_LENGTH_READ_FLAG 0x400000
#define VTD_CAPABILITY_ZERO_LENGTH_READ_MASK 0x01
#define VTD_CAPABILITY_ZERO_LENGTH_READ(_) (((_) >> 22) & 0x01)
/**
* @brief Deprecated (RO)
*
* [Bit 23] This field must be reported as 0 to ensure backward compatibility with
* older software.
*/
UINT64 Deprecated : 1;
#define VTD_CAPABILITY_DEPRECATED_BIT 23
#define VTD_CAPABILITY_DEPRECATED_FLAG 0x800000
#define VTD_CAPABILITY_DEPRECATED_MASK 0x01
#define VTD_CAPABILITY_DEPRECATED(_) (((_) >> 23) & 0x01)
/**
* @brief Fault-recording Register offset (RO)
*
* [Bits 33:24] This field specifies the offset of the first fault recording
* register relative to the register base address of this remapping hardware unit.
* If the register base address is X, and the value reported in this field is Y, the
* address for the first fault recording register is calculated as X+(16*Y).
*/
UINT64 FaultRecordingRegisterOffset : 10;
#define VTD_CAPABILITY_FAULT_RECORDING_REGISTER_OFFSET_BIT 24
#define VTD_CAPABILITY_FAULT_RECORDING_REGISTER_OFFSET_FLAG 0x3FF000000
#define VTD_CAPABILITY_FAULT_RECORDING_REGISTER_OFFSET_MASK 0x3FF
#define VTD_CAPABILITY_FAULT_RECORDING_REGISTER_OFFSET(_) (((_) >> 24) & 0x3FF)
/**
* @brief Second Level Large Page Support (RO)
*
* [Bits 37:34] This field indicates the large page sizes supported by hardware.
* A value of 1 in any of these bits indicates the corresponding large page size is
* supported. The large-page sizes corresponding to various bit positions within
* this field are:
* * 0: 21-bit offset to page frame (2MB)
* * 1: 30-bit offset to page frame (1GB)
* * 2: Reserved
* * 3: Reserved
* Hardware implementations supporting a specific large-page size must support all
* smaller large-page sizes. i.e., only valid values for this field are 0000b,
* 0001b, 0011b.
*/
UINT64 SecondLevelLargePageSupport : 4;
#define VTD_CAPABILITY_SECOND_LEVEL_LARGE_PAGE_SUPPORT_BIT 34
#define VTD_CAPABILITY_SECOND_LEVEL_LARGE_PAGE_SUPPORT_FLAG 0x3C00000000
#define VTD_CAPABILITY_SECOND_LEVEL_LARGE_PAGE_SUPPORT_MASK 0x0F
#define VTD_CAPABILITY_SECOND_LEVEL_LARGE_PAGE_SUPPORT(_) (((_) >> 34) & 0x0F)
UINT64 Reserved2 : 1;
/**
* @brief Page Selective Invalidation (RO)
*
* [Bit 39]
* * 0: Hardware supports only global and domain-selective invalidates for IOTLB.
* * 1: Hardware supports page-selective, domain-selective, and global invalidates
* for IOTLB. Hardware implementations reporting this field as Set are recommended
* to support a Maximum Address Mask Value (MAMV) value of at least 9 (or 18 if
* supporting 1GB pages with second level translation). This field is applicable
* only for IOTLB invalidations for second-level translation. Irrespective of value
* reported in this field, implementations supporting SMTS must support page/address
* selective IOTLB invalidation for first-level translation.
*/
UINT64 PageSelectiveInvalidation : 1;
#define VTD_CAPABILITY_PAGE_SELECTIVE_INVALIDATION_BIT 39
#define VTD_CAPABILITY_PAGE_SELECTIVE_INVALIDATION_FLAG 0x8000000000
#define VTD_CAPABILITY_PAGE_SELECTIVE_INVALIDATION_MASK 0x01
#define VTD_CAPABILITY_PAGE_SELECTIVE_INVALIDATION(_) (((_) >> 39) & 0x01)
/**
* @brief Number of Fault-recording Registers (RO)
*
* [Bits 47:40] Number of fault recording registers is computed as N+1, where N is
* the value reported in this field. Implementations must support at least one fault
* recording register (NFR = 0) for each remapping hardware unit in the platform.
* The maximum number of fault recording registers per remapping hardware unit is
* 256.
*/
UINT64 NumberOfFaultRecordingRegisters : 8;
#define VTD_CAPABILITY_NUMBER_OF_FAULT_RECORDING_REGISTERS_BIT 40
#define VTD_CAPABILITY_NUMBER_OF_FAULT_RECORDING_REGISTERS_FLAG 0xFF0000000000
#define VTD_CAPABILITY_NUMBER_OF_FAULT_RECORDING_REGISTERS_MASK 0xFF
#define VTD_CAPABILITY_NUMBER_OF_FAULT_RECORDING_REGISTERS(_) (((_) >> 40) & 0xFF)
/**
* @brief Maximum Address Mask Value (RO)
*
* [Bits 53:48] The value in this field indicates the maximum supported value for
* the Address Mask (AM) field in the Invalidation Address register (IVA_REG), and
* IOTLB Invalidation Descriptor (iotlb_inv_dsc) used for invalidations of
* second-level translation.
* This field is valid when the PSI field in Capability register is reported as Set.
* Independent of value reported in this field, implementations supporting SMTS must
* support address-selective PASID-based IOTLB invalidations (p_iotlb_inv_dsc) with
* any defined address mask.
*/
UINT64 MaximumAddressMaskValue : 6;
#define VTD_CAPABILITY_MAXIMUM_ADDRESS_MASK_VALUE_BIT 48
#define VTD_CAPABILITY_MAXIMUM_ADDRESS_MASK_VALUE_FLAG 0x3F000000000000
#define VTD_CAPABILITY_MAXIMUM_ADDRESS_MASK_VALUE_MASK 0x3F
#define VTD_CAPABILITY_MAXIMUM_ADDRESS_MASK_VALUE(_) (((_) >> 48) & 0x3F)
/**
* @brief Write Draining (RO)
*
* [Bit 54]
* * 0: Hardware does not support draining of write requests on IOTLB Invalidation.
* * 1: Hardware supports draining of write requests on IOTLB Invalidation.
* Hardware implementation with Major Version 2 or higher (VER_REG), always performs
* required drain without software explicitly requesting a drain in IOTLB
* invalidation. This field is deprecated and hardware will always report it as 1 to
* maintain backward compatibility with software.
*/
UINT64 WriteDraining : 1;
#define VTD_CAPABILITY_WRITE_DRAINING_BIT 54
#define VTD_CAPABILITY_WRITE_DRAINING_FLAG 0x40000000000000
#define VTD_CAPABILITY_WRITE_DRAINING_MASK 0x01
#define VTD_CAPABILITY_WRITE_DRAINING(_) (((_) >> 54) & 0x01)
/**
* @brief Read Draining (RO)
*
* [Bit 55]
* * 0: Hardware does not support draining of read requests on IOTLB Invalidation.
* * 1: Hardware supports draining of read requests on IOTLB Invalidation.
* Hardware implementation with Major Version 2 or higher (VER_REG), always performs
* required drain without software explicitly requesting a drain in IOTLB
* invalidation. This field is deprecated and hardware will always report it as 1 to
* maintain backward compatibility with software.
*/
UINT64 ReadDraining : 1;
#define VTD_CAPABILITY_READ_DRAINING_BIT 55
#define VTD_CAPABILITY_READ_DRAINING_FLAG 0x80000000000000
#define VTD_CAPABILITY_READ_DRAINING_MASK 0x01
#define VTD_CAPABILITY_READ_DRAINING(_) (((_) >> 55) & 0x01)
/**
* @brief First Level 1-GByte Page Support (RO)
*
* [Bit 56] A value of 1 in this field indicates 1-GByte page size is supported for
* first-level translation. Hardware implementation reporting First-level
* Translation Support (FLTS) as Clear also report this field as Clear.
*/
UINT64 FirstLevel1GbytePageSupport : 1;
#define VTD_CAPABILITY_FIRST_LEVEL_1GBYTE_PAGE_SUPPORT_BIT 56
#define VTD_CAPABILITY_FIRST_LEVEL_1GBYTE_PAGE_SUPPORT_FLAG 0x100000000000000
#define VTD_CAPABILITY_FIRST_LEVEL_1GBYTE_PAGE_SUPPORT_MASK 0x01
#define VTD_CAPABILITY_FIRST_LEVEL_1GBYTE_PAGE_SUPPORT(_) (((_) >> 56) & 0x01)
UINT64 Reserved3 : 2;
/**
* @brief Posted Interrupts Support (RO)
*
* [Bit 59]
* * 0: Hardware does not support Posting of Interrupts.
* * 1: Hardware supports Posting of Interrupts.
* Hardware implementation reporting Interrupt Remapping support (IR) field in
* Extended Capability Register as Clear also report this field as Clear.
*/
UINT64 PostedInterruptsSupport : 1;
#define VTD_CAPABILITY_POSTED_INTERRUPTS_SUPPORT_BIT 59
#define VTD_CAPABILITY_POSTED_INTERRUPTS_SUPPORT_FLAG 0x800000000000000
#define VTD_CAPABILITY_POSTED_INTERRUPTS_SUPPORT_MASK 0x01
#define VTD_CAPABILITY_POSTED_INTERRUPTS_SUPPORT(_) (((_) >> 59) & 0x01)
/**
* @brief First Level 5-level Paging Support (RO)
*
* [Bit 60]
* * 0: Hardware does not support 5-level paging for first-level translation.
* * 1: Hardware supports 5-level paging for first-level translation.
* Hardware implementation reporting First-level Translation Support (FLTS) as Clear
* also report this field as Clear.
*/
UINT64 FirstLevel5LevelPagingSupport : 1;
#define VTD_CAPABILITY_FIRST_LEVEL_5LEVEL_PAGING_SUPPORT_BIT 60
#define VTD_CAPABILITY_FIRST_LEVEL_5LEVEL_PAGING_SUPPORT_FLAG 0x1000000000000000
#define VTD_CAPABILITY_FIRST_LEVEL_5LEVEL_PAGING_SUPPORT_MASK 0x01
#define VTD_CAPABILITY_FIRST_LEVEL_5LEVEL_PAGING_SUPPORT(_) (((_) >> 60) & 0x01)
UINT64 Reserved4 : 1;
/**
* @brief Enhanced Set Interrupt Remap Table Pointer Support (RO)
*
* [Bit 62]
* * 0: Hardware does not invalidate all Interrupt remapping hardware translation
* caches as part of SIRTP flow.
* * 1: Hardware invalidates all Interrupt remapping hardware translation caches as
* part of SIRTP flow.
*/
UINT64 EnhancedSetInterruptRemapTablePointerSupport : 1;
#define VTD_CAPABILITY_ENHANCED_SET_INTERRUPT_REMAP_TABLE_POINTER_SUPPORT_BIT 62
#define VTD_CAPABILITY_ENHANCED_SET_INTERRUPT_REMAP_TABLE_POINTER_SUPPORT_FLAG 0x4000000000000000
#define VTD_CAPABILITY_ENHANCED_SET_INTERRUPT_REMAP_TABLE_POINTER_SUPPORT_MASK 0x01
#define VTD_CAPABILITY_ENHANCED_SET_INTERRUPT_REMAP_TABLE_POINTER_SUPPORT(_) (((_) >> 62) & 0x01)
/**
* @brief Enhanced Set Root Table Pointer Support (RO)
*
* [Bit 63]
* * 0: Hardware does not invalidate all DMA remapping hardware translation caches
* as part of SRTP flow.
* * 1: Hardware invalidates all DMA remapping hardware translation caches as part
* of SRTP flow.
*/
UINT64 EnhancedSetRootTablePointerSupport : 1;
#define VTD_CAPABILITY_ENHANCED_SET_ROOT_TABLE_POINTER_SUPPORT_BIT 63
#define VTD_CAPABILITY_ENHANCED_SET_ROOT_TABLE_POINTER_SUPPORT_FLAG 0x8000000000000000
#define VTD_CAPABILITY_ENHANCED_SET_ROOT_TABLE_POINTER_SUPPORT_MASK 0x01
#define VTD_CAPABILITY_ENHANCED_SET_ROOT_TABLE_POINTER_SUPPORT(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} VTD_CAPABILITY_REGISTER;
/**
* Register to report remapping hardware extended capabilities
*
* @remarks ECAP_REG
* @see VTd[10.4.3(Extended Capability Register)]
*/
#define VTD_EXTENDED_CAPABILITY 0x00000010
typedef union
{
struct
{
/**
* @brief Page-walk Coherency (RO)
*
* [Bit 0] This field indicates if hardware access to the root, scalable-mode root,
* context, scalable-mode-context, scalable-mode PASIDdirectory, scalable-mode
* PASID-table, and interrupt-remap tables, and legacy-mode second-level paging
* structures are coherent (snooped) or not.
* * 0:Indicates hardware accesses to remapping structures are non-coherent.
* * 1:Indicates hardware accesses to remapping structures are coherent.
* Hardware access to advanced fault log, invalidation queue, invalidation
* semaphore, page-request queue are always snooped.
*/
UINT64 PageWalkCoherency : 1;
#define VTD_EXTENDED_CAPABILITY_PAGE_WALK_COHERENCY_BIT 0
#define VTD_EXTENDED_CAPABILITY_PAGE_WALK_COHERENCY_FLAG 0x01
#define VTD_EXTENDED_CAPABILITY_PAGE_WALK_COHERENCY_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_PAGE_WALK_COHERENCY(_) (((_) >> 0) & 0x01)
/**
* @brief Queued Invalidation support (RO)
*
* [Bit 1]
* * 0: Hardware does not support queued invalidations.
* * 1: Hardware supports queued invalidations.
*/
UINT64 QueuedInvalidationSupport : 1;
#define VTD_EXTENDED_CAPABILITY_QUEUED_INVALIDATION_SUPPORT_BIT 1
#define VTD_EXTENDED_CAPABILITY_QUEUED_INVALIDATION_SUPPORT_FLAG 0x02
#define VTD_EXTENDED_CAPABILITY_QUEUED_INVALIDATION_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_QUEUED_INVALIDATION_SUPPORT(_) (((_) >> 1) & 0x01)
/**
* @brief Device-TLB support (RO)
*
* [Bit 2]
* * 0: Hardware does not support Device-TLBs.
* * 1: Hardware supports Device-TLBs.
* Hardware implementation reporting Queued Invalidation support (QI) field as Clear
* also report this field as Clear.
*/
UINT64 DeviceTlbSupport : 1;
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_SUPPORT_BIT 2
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_SUPPORT_FLAG 0x04
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_SUPPORT(_) (((_) >> 2) & 0x01)
/**
* @brief Interrupt Remapping support (RO)
*
* [Bit 3]
* * 0: Hardware does not support interrupt remapping.
* * 1: Hardware supports interrupt remapping.
* Hardware implementation reporting Queued Invalidation support (QI) field as Clear
* also report this field as Clear.
*/
UINT64 InterruptRemappingSupport : 1;
#define VTD_EXTENDED_CAPABILITY_INTERRUPT_REMAPPING_SUPPORT_BIT 3
#define VTD_EXTENDED_CAPABILITY_INTERRUPT_REMAPPING_SUPPORT_FLAG 0x08
#define VTD_EXTENDED_CAPABILITY_INTERRUPT_REMAPPING_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_INTERRUPT_REMAPPING_SUPPORT(_) (((_) >> 3) & 0x01)
/**
* @brief Extended Interrupt Mode (RO)
*
* [Bit 4]
* * 0: On Intel(R) 64 platforms, hardware supports only 8-bit APIC-IDs (xAPIC
* Mode).
* * 1: On Intel(R) 64 platforms, hardware supports 32-bit APICIDs (x2APIC mode).
* Hardware implementation reporting Interrupt Remapping support (IR) field as Clear
* also report this field as Clear.
*/
UINT64 ExtendedInterruptMode : 1;
#define VTD_EXTENDED_CAPABILITY_EXTENDED_INTERRUPT_MODE_BIT 4
#define VTD_EXTENDED_CAPABILITY_EXTENDED_INTERRUPT_MODE_FLAG 0x10
#define VTD_EXTENDED_CAPABILITY_EXTENDED_INTERRUPT_MODE_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_EXTENDED_INTERRUPT_MODE(_) (((_) >> 4) & 0x01)
/**
* @brief Deprecated1 (RO)
*
* [Bit 5] This field must be reported as 0 to ensure backward compatibility with
* older software.
*/
UINT64 Deprecated1 : 1;
#define VTD_EXTENDED_CAPABILITY_DEPRECATED1_BIT 5
#define VTD_EXTENDED_CAPABILITY_DEPRECATED1_FLAG 0x20
#define VTD_EXTENDED_CAPABILITY_DEPRECATED1_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_DEPRECATED1(_) (((_) >> 5) & 0x01)
/**
* @brief Pass Through (RO)
*
* [Bit 6]
* * 0: Hardware does not support pass-through translation type in context-entries
* and scalable-mode-pasid-table-entries.
* * 1: Hardware supports pass-through translation type in context and
* scalable-mode-pasid-table-entries.
*/
UINT64 PassThrough : 1;
#define VTD_EXTENDED_CAPABILITY_PASS_THROUGH_BIT 6
#define VTD_EXTENDED_CAPABILITY_PASS_THROUGH_FLAG 0x40
#define VTD_EXTENDED_CAPABILITY_PASS_THROUGH_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_PASS_THROUGH(_) (((_) >> 6) & 0x01)
/**
* @brief Snoop Control (RO)
*
* [Bit 7]
* * 0: Hardware does not support 1-setting of the SNP field in the page-table
* entries.
* * 1: Hardware supports the 1-setting of the SNP field in the page-table entries.
* Implementations are recommended to support Snoop Control to support software
* usages that require Snoop Control for assignment of devices behind a remapping
* hardware unit.
*/
UINT64 SnoopControl : 1;
#define VTD_EXTENDED_CAPABILITY_SNOOP_CONTROL_BIT 7
#define VTD_EXTENDED_CAPABILITY_SNOOP_CONTROL_FLAG 0x80
#define VTD_EXTENDED_CAPABILITY_SNOOP_CONTROL_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_SNOOP_CONTROL(_) (((_) >> 7) & 0x01)
/**
* @brief IOTLB Register Offset (RO)
*
* [Bits 17:8] This field specifies the offset to the IOTLB registers relative to
* the register base address of this remapping hardware unit. If the register base
* address is X, and the value reported in this field is Y, the address for the
* IOTLB registers is calculated as X+(16*Y).
*/
UINT64 IotlbRegisterOffset : 10;
#define VTD_EXTENDED_CAPABILITY_IOTLB_REGISTER_OFFSET_BIT 8
#define VTD_EXTENDED_CAPABILITY_IOTLB_REGISTER_OFFSET_FLAG 0x3FF00
#define VTD_EXTENDED_CAPABILITY_IOTLB_REGISTER_OFFSET_MASK 0x3FF
#define VTD_EXTENDED_CAPABILITY_IOTLB_REGISTER_OFFSET(_) (((_) >> 8) & 0x3FF)
UINT64 Reserved1 : 2;
/**
* @brief Maximum Handle Mask Value (RO)
*
* [Bits 23:20] The value in this field indicates the maximum supported value for
* the Interrupt Mask (IM) field in the Interrupt Entry Cache Invalidation
* Descriptor (iec_inv_dsc). This field is unused and is reported as 0 if Interrupt
* Remapping support (IR) field is Clear.
*/
UINT64 MaximumHandleMaskValue : 4;
#define VTD_EXTENDED_CAPABILITY_MAXIMUM_HANDLE_MASK_VALUE_BIT 20
#define VTD_EXTENDED_CAPABILITY_MAXIMUM_HANDLE_MASK_VALUE_FLAG 0xF00000
#define VTD_EXTENDED_CAPABILITY_MAXIMUM_HANDLE_MASK_VALUE_MASK 0x0F
#define VTD_EXTENDED_CAPABILITY_MAXIMUM_HANDLE_MASK_VALUE(_) (((_) >> 20) & 0x0F)
/**
* @brief Deprecated2 (RO)
*
* [Bit 24] In prior versions of this specification this bit was used to enumerate
* "Extended mode address translation" which is now deprecated. This field must be
* reported as 0 to ensure backward compatibility with any software that enables
* extended mode address translation.
*/
UINT64 Deprecated2 : 1;
#define VTD_EXTENDED_CAPABILITY_DEPRECATED2_BIT 24
#define VTD_EXTENDED_CAPABILITY_DEPRECATED2_FLAG 0x1000000
#define VTD_EXTENDED_CAPABILITY_DEPRECATED2_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_DEPRECATED2(_) (((_) >> 24) & 0x01)
/**
* @brief Memory Type Support (RO)
*
* [Bit 25]
* * 0: Hardware does not support Memory Type in first-level translation and
* Extended Memory type in second-level translation.
* * 1: Hardware supports Memory Type in first-level translation and Extended Memory
* type in second-level translation. Hardware implementations reporting Scalable
* Mode Translation Support (SMTS) field as Clear also report this field as Clear.
* Remapping hardware units with, one or more devices that operate in processor
* coherency domain, under its scope must report this field as Set.
*/
UINT64 MemoryTypeSupport : 1;
#define VTD_EXTENDED_CAPABILITY_MEMORY_TYPE_SUPPORT_BIT 25
#define VTD_EXTENDED_CAPABILITY_MEMORY_TYPE_SUPPORT_FLAG 0x2000000
#define VTD_EXTENDED_CAPABILITY_MEMORY_TYPE_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_MEMORY_TYPE_SUPPORT(_) (((_) >> 25) & 0x01)
/**
* @brief Nested Translation Support (RO)
*
* [Bit 26]
* * 0: Hardware does not support nested translations.
* * 1: Hardware supports nested translations.
* Hardware implementations reporting Scalable Mode Translation Support (SMTS) field
* as Clear or First-level Translation Support (FLTS) field as Clear or Second-level
* Translation Support (SLTS) field as Clear also report this field as Clear.
*/
UINT64 NestedTranslationSupport : 1;
#define VTD_EXTENDED_CAPABILITY_NESTED_TRANSLATION_SUPPORT_BIT 26
#define VTD_EXTENDED_CAPABILITY_NESTED_TRANSLATION_SUPPORT_FLAG 0x4000000
#define VTD_EXTENDED_CAPABILITY_NESTED_TRANSLATION_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_NESTED_TRANSLATION_SUPPORT(_) (((_) >> 26) & 0x01)
UINT64 Reserved2 : 1;
/**
* @brief Deprecated3 (RO)
*
* [Bit 28] This field must be reported as 0 to ensure backward compatibility with
* older software.
*/
UINT64 Deprecated3 : 1;
#define VTD_EXTENDED_CAPABILITY_DEPRECATED3_BIT 28
#define VTD_EXTENDED_CAPABILITY_DEPRECATED3_FLAG 0x10000000
#define VTD_EXTENDED_CAPABILITY_DEPRECATED3_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_DEPRECATED3(_) (((_) >> 28) & 0x01)
/**
* @brief Page Request Support (RO)
*
* [Bit 29]
* * 0: Hardware does not support page requests.
* * 1: Hardware supports page requests.
* Hardware implementation reporting Device-TLB support (DT) field as Clear or
* Scalable Mode Translation Support (SMTS) field as Clear also report this field as
* Clear.
*/
UINT64 PageRequestSupport : 1;
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_SUPPORT_BIT 29
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_SUPPORT_FLAG 0x20000000
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_SUPPORT(_) (((_) >> 29) & 0x01)
/**
* @brief Execute Request Support (RO)
*
* [Bit 30]
* * 0: Hardware does not support requests-with-PASID seeking execute permission.
* * 1: Hardware supports requests-with-PASID seeking execute permission.
* Hardware implementations reporting Process Address Space ID support (PASID) field
* as Clear must report this field as Clear.
*/
UINT64 ExecuteRequestSupport : 1;
#define VTD_EXTENDED_CAPABILITY_EXECUTE_REQUEST_SUPPORT_BIT 30
#define VTD_EXTENDED_CAPABILITY_EXECUTE_REQUEST_SUPPORT_FLAG 0x40000000
#define VTD_EXTENDED_CAPABILITY_EXECUTE_REQUEST_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_EXECUTE_REQUEST_SUPPORT(_) (((_) >> 30) & 0x01)
UINT64 Reserved3 : 2;
/**
* @brief No Write Flag Support (RO)
*
* [Bit 33]
* * 0: Hardware ignores the 'No Write' (NW) flag in Device-TLB
* translation-requests, and behaves as if NW is always 0.
* * 1: Hardware supports the 'No Write' (NW) flag in Device-TLB
* translation-requests. Hardware implementations reporting Device-TLB support (DT)
* field as Clear also report this field as Clear.
*/
UINT64 NoWriteFlagSupport : 1;
#define VTD_EXTENDED_CAPABILITY_NO_WRITE_FLAG_SUPPORT_BIT 33
#define VTD_EXTENDED_CAPABILITY_NO_WRITE_FLAG_SUPPORT_FLAG 0x200000000
#define VTD_EXTENDED_CAPABILITY_NO_WRITE_FLAG_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_NO_WRITE_FLAG_SUPPORT(_) (((_) >> 33) & 0x01)
/**
* @brief Extended Accessed Flag Support (RO)
*
* [Bit 34]
* * 0: Hardware does not support the extended-accessed (EA) bit in first-level
* paging-structure entries.
* * 1: Hardware supports the extended-accessed (EA) bit in first-level
* paging-structure entries. Hardware implementations reporting Scalable-Mode
* Page-walk Coherency Support (SWPWCS) as Clear also report this field as Clear.
*/
UINT64 ExtendedAccessedFlagSupport : 1;
#define VTD_EXTENDED_CAPABILITY_EXTENDED_ACCESSED_FLAG_SUPPORT_BIT 34
#define VTD_EXTENDED_CAPABILITY_EXTENDED_ACCESSED_FLAG_SUPPORT_FLAG 0x400000000
#define VTD_EXTENDED_CAPABILITY_EXTENDED_ACCESSED_FLAG_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_EXTENDED_ACCESSED_FLAG_SUPPORT(_) (((_) >> 34) & 0x01)
/**
* @brief PASID Size Supported (RO)
*
* [Bits 39:35] This field reports the PASID size supported by the remapping
* hardware for requests-with-PASID. A value of N in this field indicates hardware
* supports PASID field of N+1 bits (For example, value of 7 in this field,
* indicates 8-bit PASIDs are supported). Requests-with-PASID with PASID value
* beyond the limit specified by this field are treated as error by the remapping
* hardware. This field is unused and reported as 0 if Scalable Mode Translation
* Support (SMTS) field is Clear.
*/
UINT64 PasidSizeSupported : 5;
#define VTD_EXTENDED_CAPABILITY_PASID_SIZE_SUPPORTED_BIT 35
#define VTD_EXTENDED_CAPABILITY_PASID_SIZE_SUPPORTED_FLAG 0xF800000000
#define VTD_EXTENDED_CAPABILITY_PASID_SIZE_SUPPORTED_MASK 0x1F
#define VTD_EXTENDED_CAPABILITY_PASID_SIZE_SUPPORTED(_) (((_) >> 35) & 0x1F)
/**
* @brief Process Address Space ID Support (RO)
*
* [Bit 40]
* * 0: Hardware does not support requests tagged with Process Address Space IDs.
* * 1: Hardware supports requests tagged with Process Address Space IDs.
* Hardware implementations reporting Scalable Mode Translation Support (SMTS) field
* as Clear also report this field as Clear.
*/
UINT64 ProcessAddressSpaceIdSupport : 1;
#define VTD_EXTENDED_CAPABILITY_PROCESS_ADDRESS_SPACE_ID_SUPPORT_BIT 40
#define VTD_EXTENDED_CAPABILITY_PROCESS_ADDRESS_SPACE_ID_SUPPORT_FLAG 0x10000000000
#define VTD_EXTENDED_CAPABILITY_PROCESS_ADDRESS_SPACE_ID_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_PROCESS_ADDRESS_SPACE_ID_SUPPORT(_) (((_) >> 40) & 0x01)
/**
* @brief Device-TLB Invalidation Throttle (RO)
*
* [Bit 41]
* * 0: Hardware does not support Device-TLB Invalidation Throttling.
* * 1: Hardware supports Device-TLB Invalidation Throttling.
* Hardware implementations reporting Device-TLB support (DT) as Clear also report
* this field as Clear.
*/
UINT64 DeviceTlbInvalidationThrottle : 1;
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_INVALIDATION_THROTTLE_BIT 41
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_INVALIDATION_THROTTLE_FLAG 0x20000000000
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_INVALIDATION_THROTTLE_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_DEVICE_TLB_INVALIDATION_THROTTLE(_) (((_) >> 41) & 0x01)
/**
* @brief Page-request Drain Support (RO)
*
* [Bit 42]
* * 0: Hardware does not support Page-request Drain (PD) flag in inv_wait_dsc.
* * 1: Hardware supports Page-request Drain (PD) flag in inv_wait_dsc.
* Hardware implementations reporting Device-TLB support (DT) as Clear also report
* this field as Clear.
*/
UINT64 PageRequestDrainSupport : 1;
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_DRAIN_SUPPORT_BIT 42
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_DRAIN_SUPPORT_FLAG 0x40000000000
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_DRAIN_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_PAGE_REQUEST_DRAIN_SUPPORT(_) (((_) >> 42) & 0x01)
/**
* @brief Scalable Mode Translation Support (RO)
*
* [Bit 43]
* * 0: Hardware does not support Scalable Mode DMA Remapping.
* * 1: Hardware supports Scalable Mode DMA Remapping through scalable-mode
* context-table and PASID-table structures. Hardware implementation reporting
* Queued Invalidation (QI) field as Clear also report this field as Clear. Hardware
* implementation reporting First-Level Translation Support (FLTS), Second-level
* Translation Support (SLTS) and Pass-through Support (PT) as Clear also report
* this field as Clear.
*/
UINT64 ScalableModeTranslationSupport : 1;
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_TRANSLATION_SUPPORT_BIT 43
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_TRANSLATION_SUPPORT_FLAG 0x80000000000
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_TRANSLATION_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_TRANSLATION_SUPPORT(_) (((_) >> 43) & 0x01)
/**
* @brief Virtual Command Support (RO)
*
* [Bit 44]
* * 0: Hardware does not support command submission to virtual-DMA Remapping
* hardware.
* * 1: Hardware does support command submission to virtual- DMA Remapping hardware.
* Hardware implementations of this architecture report a value of 0 in this field.
* Software implementations (emulation) of this architecture may report VCS=1.
* Software managing remapping hardware should be written to handle both values of
* VCS.
*/
UINT64 VirtualCommandSupport : 1;
#define VTD_EXTENDED_CAPABILITY_VIRTUAL_COMMAND_SUPPORT_BIT 44
#define VTD_EXTENDED_CAPABILITY_VIRTUAL_COMMAND_SUPPORT_FLAG 0x100000000000
#define VTD_EXTENDED_CAPABILITY_VIRTUAL_COMMAND_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_VIRTUAL_COMMAND_SUPPORT(_) (((_) >> 44) & 0x01)
/**
* @brief Second-Level Accessed/Dirty Support (RO)
*
* [Bit 45]
* * 0: Hardware does not support Accessed/Dirty bits in Second- Level translation.
* * 1: Hardware supports Accessed/Dirty bits in Second-Level translation.
* Hardware implementations reporting Scalable-Mode Page-walk Coherency Support
* (SMPWCS) as Clear also report this field as Clear.
*/
UINT64 SecondLevelAccessedDirtySupport : 1;
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_ACCESSED_DIRTY_SUPPORT_BIT 45
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_ACCESSED_DIRTY_SUPPORT_FLAG 0x200000000000
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_ACCESSED_DIRTY_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_ACCESSED_DIRTY_SUPPORT(_) (((_) >> 45) & 0x01)
/**
* @brief Second-level Translation Support (RO)
*
* [Bit 46]
* * 0: Hardware does not support PASID Granular Translation Type of second-level
* (PGTT=010b) in scalable-mode PASIDTable entry.
* * 1: Hardware supports PASID Granular Translation Type of second-level
* (PGTT=010b) in scalable-mode PASID-Table entry.
*/
UINT64 SecondLevelTranslationSupport : 1;
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_TRANSLATION_SUPPORT_BIT 46
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_TRANSLATION_SUPPORT_FLAG 0x400000000000
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_TRANSLATION_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_SECOND_LEVEL_TRANSLATION_SUPPORT(_) (((_) >> 46) & 0x01)
/**
* @brief First-level Translation Support (RO)
*
* [Bit 47]
* * 0: Hardware does not support PASID Granular Translation Type of first-level
* (PGTT=001b) in scalable-mode PASIDTable entry.
* * 1: Hardware supports PASID Granular Translation Type of first-level (PGTT=001b)
* in scalable-mode PASID-Table entry. Hardware implementations reporting Scalable
* Mode Translation Support (SMTS) as Clear also report this field as Clear
*/
UINT64 FirstLevelTranslationSupport : 1;
#define VTD_EXTENDED_CAPABILITY_FIRST_LEVEL_TRANSLATION_SUPPORT_BIT 47
#define VTD_EXTENDED_CAPABILITY_FIRST_LEVEL_TRANSLATION_SUPPORT_FLAG 0x800000000000
#define VTD_EXTENDED_CAPABILITY_FIRST_LEVEL_TRANSLATION_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_FIRST_LEVEL_TRANSLATION_SUPPORT(_) (((_) >> 47) & 0x01)
/**
* @brief Scalable-Mode Page-walk Coherency (RO)
*
* [Bit 48]
* * 0: Hardware access to paging structures accessed through PASID-table entry are
* not snooped.
* * 1: Hardware access to paging structures accessed through PASID-table entry are
* snooped if PWSNP field in PASID-table entry is Set. Paging-structures accessed
* through PASID-table entry are not snooped if PWSNP field in PASID-table entry is
* Clear. Hardware implementations reporting Scalable Mode Translation Support
* (SMTS) as Clear also report this field as Clear.
*/
UINT64 ScalableModePageWalkCoherency : 1;
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_PAGE_WALK_COHERENCY_BIT 48
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_PAGE_WALK_COHERENCY_FLAG 0x1000000000000
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_PAGE_WALK_COHERENCY_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_SCALABLE_MODE_PAGE_WALK_COHERENCY(_) (((_) >> 48) & 0x01)
/**
* @brief RID-PASID Support (RO)
*
* [Bit 49]
* * 0: Hardware does not support RID_PASID field in scalable mode context-entry. It
* uses the value of 0 for RID_PASID.
* * 1: Hardware supports the RID_PASID field in scalable-mode context-entry.
* Hardware implementations reporting Scalable Mode Translation Support (SMTS) as
* Clear also report this field as Clear.
*/
UINT64 RidPasidSupport : 1;
#define VTD_EXTENDED_CAPABILITY_RID_PASID_SUPPORT_BIT 49
#define VTD_EXTENDED_CAPABILITY_RID_PASID_SUPPORT_FLAG 0x2000000000000
#define VTD_EXTENDED_CAPABILITY_RID_PASID_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_RID_PASID_SUPPORT(_) (((_) >> 49) & 0x01)
UINT64 Reserved4 : 2;
/**
* @brief Abort DMA Mode Support (RO)
*
* [Bit 52]
* * 0: Hardware does not support Abort DMA Mode.
* * 1: Hardware supports Abort DMA Mode.
*/
UINT64 AbortDmaModeSupport : 1;
#define VTD_EXTENDED_CAPABILITY_ABORT_DMA_MODE_SUPPORT_BIT 52
#define VTD_EXTENDED_CAPABILITY_ABORT_DMA_MODE_SUPPORT_FLAG 0x10000000000000
#define VTD_EXTENDED_CAPABILITY_ABORT_DMA_MODE_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_ABORT_DMA_MODE_SUPPORT(_) (((_) >> 52) & 0x01)
/**
* @brief RID_PRIV Support (RO)
*
* [Bit 53]
* * 0: Hardware does not support the RID_PRIV field in the scalable-mode
* context-entry. It uses the value of 0 for RID_PRIV.
* * 1: Hardware supports the RID_PRIV field in the scalable mode context-entry.
* Hardware implementations reporting Supervisor Request Support (SRS) as Clear also
* report this field as Clear.
*/
UINT64 RidPrivSupport : 1;
#define VTD_EXTENDED_CAPABILITY_RID_PRIV_SUPPORT_BIT 53
#define VTD_EXTENDED_CAPABILITY_RID_PRIV_SUPPORT_FLAG 0x20000000000000
#define VTD_EXTENDED_CAPABILITY_RID_PRIV_SUPPORT_MASK 0x01
#define VTD_EXTENDED_CAPABILITY_RID_PRIV_SUPPORT(_) (((_) >> 53) & 0x01)
UINT64 Reserved5 : 10;
};
UINT64 AsUInt;
} VTD_EXTENDED_CAPABILITY_REGISTER;
/**
* Register to control remapping hardware. If multiple control fields in this register need to be
* modified, software must serialize the modifications through multiple writes to this register. For
* example, to update a bit field in this register at offset X with value of Y, software must follow
* below steps:
* 1. Tmp = Read GSTS_REG
* 2. Status = (Tmp & 96FFFFFFh) // Reset the one-shot bits
* 3. if (Y) {Command = (Status | (1 << X))} else {Command = (Status & ~(1 << X))}
* 4. Write Command to GCMD_REG
* 5. Wait until GSTS_REG[X] indicates command is serviced.
*
* @remarks GCMD_REG
* @see VTd[10.4.4(Global Command Register)]
*/
#define VTD_GLOBAL_COMMAND 0x00000018
typedef union
{
struct
{
UINT32 Reserved1 : 23;
/**
* @brief Compatibility Format Interrupt (WO)
*
* [Bit 23] This field is valid only for Intel(R) 64 implementations supporting
* interrupt remapping. Software writes to this field to enable or disable
* Compatibility Format interrupts on Intel(R) 64 platforms. The value in this field
* is effective only when interrupt-remapping is enabled and Extended Interrupt Mode
* (x2APIC mode) is not enabled.
* * 0: Block Compatibility format interrupts.
* * 1: Process Compatibility format interrupts as pass-through (bypass interrupt
* remapping). Hardware reports the status of updating this field through the CFIS
* field in the Global Status register. The value returned on a read of this field
* is undefined.
*/
UINT32 CompatibilityFormatInterrupt : 1;
#define VTD_GLOBAL_COMMAND_COMPATIBILITY_FORMAT_INTERRUPT_BIT 23
#define VTD_GLOBAL_COMMAND_COMPATIBILITY_FORMAT_INTERRUPT_FLAG 0x800000
#define VTD_GLOBAL_COMMAND_COMPATIBILITY_FORMAT_INTERRUPT_MASK 0x01
#define VTD_GLOBAL_COMMAND_COMPATIBILITY_FORMAT_INTERRUPT(_) (((_) >> 23) & 0x01)
/**
* @brief Set Interrupt Remap Table Pointer (WO)
*
* [Bit 24] This field is valid only for implementations supporting interrupt
* remapping. Software sets this field to set/update the interrupt remapping table
* pointer used by hardware. The interrupt remapping table pointer is specified
* through the Interrupt Remapping Table Address (IRTA_REG) register. Hardware
* reports the status of the 'Set Interrupt Remap Table Pointer' operation through
* the IRTPS field in the Global Status register. The 'Set Interrupt Remap Table
* Pointer' operation must be performed before enabling or re-enabling (after
* disabling) interrupt-remapping hardware through the IRE field. Clearing this bit
* has no effect. The value returned on a read of this field is undefined.
*/
UINT32 SetInterruptRemapTablePointer : 1;
#define VTD_GLOBAL_COMMAND_SET_INTERRUPT_REMAP_TABLE_POINTER_BIT 24
#define VTD_GLOBAL_COMMAND_SET_INTERRUPT_REMAP_TABLE_POINTER_FLAG 0x1000000
#define VTD_GLOBAL_COMMAND_SET_INTERRUPT_REMAP_TABLE_POINTER_MASK 0x01
#define VTD_GLOBAL_COMMAND_SET_INTERRUPT_REMAP_TABLE_POINTER(_) (((_) >> 24) & 0x01)
/**
* @brief Interrupt Remapping Enable (WO)
*
* [Bit 25] This field is valid only for implementations supporting interrupt
* remapping.
* * 0: Disable interrupt-remapping hardware
* * 1: Enable interrupt-remapping hardware
* Hardware reports the status of the interrupt remapping enable operation through
* the IRES field in the Global Status register. There may be active interrupt
* requests in the platform when software updates this field. Hardware must enable
* or disable interrupt-remapping logic only at deterministic transaction
* boundaries, so that any in-flight interrupts are either subject to remapping or
* not at all. For implementations reporting the Enhanced Set Interrupt Remap Table
* Pointer Support (ESIRTPS) field as Set, hardware performs global invalidation on
* all Interrupt remapping caches as part of Interrupt Remapping Disable operation.
* Hardware implementations must drain any in-flight interrupts requests queued in
* the Root-Complex before completing the interrupt-remapping enable command and
* reflecting the status of the command through the IRES field in the Global Status
* register.
* The value returned on a read of this field is undefined.
*/
UINT32 InterruptRemappingEnable : 1;
#define VTD_GLOBAL_COMMAND_INTERRUPT_REMAPPING_ENABLE_BIT 25
#define VTD_GLOBAL_COMMAND_INTERRUPT_REMAPPING_ENABLE_FLAG 0x2000000
#define VTD_GLOBAL_COMMAND_INTERRUPT_REMAPPING_ENABLE_MASK 0x01
#define VTD_GLOBAL_COMMAND_INTERRUPT_REMAPPING_ENABLE(_) (((_) >> 25) & 0x01)
/**
* @brief Queued Invalidation Enable (WO)
*
* [Bit 26] This field is valid only for implementations supporting queued
* invalidations. Software writes to this field to enable or disable queued
* invalidations.
* * 0: Disable queued invalidations.
* * 1: Enable use of queued invalidations.
* Hardware reports the status of queued invalidation enable operation through QIES
* field in the Global Status register. The value returned on a read of this field
* is undefined.
*/
UINT32 QueuedInvalidationEnable : 1;
#define VTD_GLOBAL_COMMAND_QUEUED_INVALIDATION_ENABLE_BIT 26
#define VTD_GLOBAL_COMMAND_QUEUED_INVALIDATION_ENABLE_FLAG 0x4000000
#define VTD_GLOBAL_COMMAND_QUEUED_INVALIDATION_ENABLE_MASK 0x01
#define VTD_GLOBAL_COMMAND_QUEUED_INVALIDATION_ENABLE(_) (((_) >> 26) & 0x01)
/**
* @brief Write Buffer Flush (WO)
*
* [Bit 27] This bit is valid only for implementations requiring write buffer
* flushing. Software sets this field to request that hardware flush the
* Root-Complex internal write buffers. This is done to ensure any updates to the
* memory resident remapping structures are not held in any internal write posting
* buffers. Hardware reports the status of the write buffer flushing operation
* through the WBFS field in the Global Status register. Clearing this bit has no
* effect. The value returned on a read of this field is undefined.
*/
UINT32 WriteBufferFlush : 1;
#define VTD_GLOBAL_COMMAND_WRITE_BUFFER_FLUSH_BIT 27
#define VTD_GLOBAL_COMMAND_WRITE_BUFFER_FLUSH_FLAG 0x8000000
#define VTD_GLOBAL_COMMAND_WRITE_BUFFER_FLUSH_MASK 0x01
#define VTD_GLOBAL_COMMAND_WRITE_BUFFER_FLUSH(_) (((_) >> 27) & 0x01)
/**
* @brief Enable Advanced Fault Logging (WO)
*
* [Bit 28] This field is valid only for implementations supporting advanced fault
* logging. Software writes to this field to request hardware to enable or disable
* advanced fault logging:
* * 0: Disable advanced fault logging. In this case, translation faults are
* reported through the Fault Recording registers.
* * 1: Enable use of memory-resident fault log. When enabled, translation faults
* are recorded in the memory-resident log. The fault log pointer must be set in
* hardware (through the SFL field) before enabling advanced fault logging. Hardware
* reports the status of the advanced fault logging enable operation through the
* AFLS field in the Global Status register. The value returned on read of this
* field is undefined.
*/
UINT32 EnableAdvancedFaultLogging : 1;
#define VTD_GLOBAL_COMMAND_ENABLE_ADVANCED_FAULT_LOGGING_BIT 28
#define VTD_GLOBAL_COMMAND_ENABLE_ADVANCED_FAULT_LOGGING_FLAG 0x10000000
#define VTD_GLOBAL_COMMAND_ENABLE_ADVANCED_FAULT_LOGGING_MASK 0x01
#define VTD_GLOBAL_COMMAND_ENABLE_ADVANCED_FAULT_LOGGING(_) (((_) >> 28) & 0x01)
/**
* @brief Set Fault Log (WO)
*
* [Bit 29] This field is valid only for implementations supporting advanced fault
* logging. Software sets this field to request hardware to set/update the fault-log
* pointer used by hardware. The fault-log pointer is specified through Advanced
* Fault Log register. Hardware reports the status of the 'Set Fault Log' operation
* through the FLS field in the Global Status register. The fault log pointer must
* be set before enabling advanced fault logging (through EAFL field). Once advanced
* fault logging is enabled, the fault log pointer may be updated through this field
* while DMA remapping is active. Clearing this bit has no effect. The value
* returned on read of this field is undefined.
*/
UINT32 SetFaultLog : 1;
#define VTD_GLOBAL_COMMAND_SET_FAULT_LOG_BIT 29
#define VTD_GLOBAL_COMMAND_SET_FAULT_LOG_FLAG 0x20000000
#define VTD_GLOBAL_COMMAND_SET_FAULT_LOG_MASK 0x01
#define VTD_GLOBAL_COMMAND_SET_FAULT_LOG(_) (((_) >> 29) & 0x01)
/**
* @brief Set Root Table Pointer (WO)
*
* [Bit 30] Software sets this field to set/update the root-table pointer (and
* translation table mode) used by hardware. The root-table pointer (and translation
* table mode) is specified through the Root Table Address (RTADDR_REG) register.
* Hardware reports the status of the 'Set Root Table Pointer' operation through the
* RTPS field in the Global Status register. The 'Set Root Table Pointer' operation
* must be performed before enabling or re-enabling (after disabling) DMA remapping
* through the TE field. For details on invalidation that software may have to
* perform after the Clearing this bit has no effect. The value returned on a read
* of this field is undefined.
*/
UINT32 SetRootTablePointer : 1;
#define VTD_GLOBAL_COMMAND_SET_ROOT_TABLE_POINTER_BIT 30
#define VTD_GLOBAL_COMMAND_SET_ROOT_TABLE_POINTER_FLAG 0x40000000
#define VTD_GLOBAL_COMMAND_SET_ROOT_TABLE_POINTER_MASK 0x01
#define VTD_GLOBAL_COMMAND_SET_ROOT_TABLE_POINTER(_) (((_) >> 30) & 0x01)
/**
* @brief Translation Enable (WO)
*
* [Bit 31] Software writes to this field to request hardware to enable/disable DMA
* remapping:
* * 0: Disable DMA remapping
* * 1: Enable DMA remapping
* Hardware reports the status of the translation enable operation through the TES
* field in the Global Status register. There may be active DMA requests in the
* platform when software updates this field. Hardware must enable or disable
* remapping logic only at deterministic transaction boundaries, so that any
* in-flight transaction is either subject to remapping or not at all. Hardware
* implementations supporting DMA draining must drain any inflight DMA read/write
* requests queued within the Root-Complex before completing the translation enable
* command and reflecting the status of the command through the TES field in the
* Global Status register. For implementations reporting Scalable Mode Translation
* Support (SMTS) field as Set, hardware performs global invalidation on all DMA
* remapping translation caches as part of Translation Disable operation. The value
* returned on a read of this field is undefined.
*/
UINT32 TranslationEnable : 1;
#define VTD_GLOBAL_COMMAND_TRANSLATION_ENABLE_BIT 31
#define VTD_GLOBAL_COMMAND_TRANSLATION_ENABLE_FLAG 0x80000000
#define VTD_GLOBAL_COMMAND_TRANSLATION_ENABLE_MASK 0x01
#define VTD_GLOBAL_COMMAND_TRANSLATION_ENABLE(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} VTD_GLOBAL_COMMAND_REGISTER;
/**
* Register to report general remapping hardware status.
*
* @remarks GSTS_REG
* @see VTd[10.4.5(Global Status Register)]
*/
#define VTD_GLOBAL_STATUS 0x0000001C
typedef union
{
struct
{
UINT32 Reserved1 : 23;
/**
* @brief Compatibility Format Interrupt Status (RO)
*
* [Bit 23] This field indicates the status of Compatibility format interrupts on
* Intel(R) 64 implementations supporting interrupt-remapping. The value reported in
* this field is applicable only when interrupt-remapping is enabled and extended
* interrupt mode (x2APIC mode) is not enabled.
* * 0: Compatibility format interrupts are blocked.
* * 1: Compatibility format interrupts are processed as pass-through (bypassing
* interrupt remapping).
*/
UINT32 CompatibilityFormatInterruptStatus : 1;
#define VTD_GLOBAL_STATUS_COMPATIBILITY_FORMAT_INTERRUPT_STATUS_BIT 23
#define VTD_GLOBAL_STATUS_COMPATIBILITY_FORMAT_INTERRUPT_STATUS_FLAG 0x800000
#define VTD_GLOBAL_STATUS_COMPATIBILITY_FORMAT_INTERRUPT_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_COMPATIBILITY_FORMAT_INTERRUPT_STATUS(_) (((_) >> 23) & 0x01)
/**
* @brief Interrupt Remapping Table Pointer Status (RO)
*
* [Bit 24] This field indicates the status of the interrupt remapping table pointer
* in hardware. This field is cleared by hardware when software sets the SIRTP field
* in the Global Command register. This field is Set by hardware when hardware
* completes the 'Set Interrupt Remap Table Pointer' operation using the value
* provided in the Interrupt Remapping Table Address register.
*/
UINT32 InterruptRemappingTablePointerStatus : 1;
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_TABLE_POINTER_STATUS_BIT 24
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_TABLE_POINTER_STATUS_FLAG 0x1000000
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_TABLE_POINTER_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_TABLE_POINTER_STATUS(_) (((_) >> 24) & 0x01)
/**
* @brief Interrupt Remapping Enable Status (RO)
*
* [Bit 25] This field indicates the status of Interrupt-remapping hardware.
* * 0: Interrupt-remapping hardware is not enabled
* * 1: Interrupt-remapping hardware is enabled
*/
UINT32 InterruptRemappingEnableStatus : 1;
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_ENABLE_STATUS_BIT 25
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_ENABLE_STATUS_FLAG 0x2000000
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_ENABLE_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_INTERRUPT_REMAPPING_ENABLE_STATUS(_) (((_) >> 25) & 0x01)
/**
* @brief Queued Invalidation Enable Status (RO)
*
* [Bit 26] This field indicates queued invalidation enable status.
* * 0: queued invalidation is not enabled
* * 1: queued invalidation is enabled
*/
UINT32 QueuedInvalidationEnableStatus : 1;
#define VTD_GLOBAL_STATUS_QUEUED_INVALIDATION_ENABLE_STATUS_BIT 26
#define VTD_GLOBAL_STATUS_QUEUED_INVALIDATION_ENABLE_STATUS_FLAG 0x4000000
#define VTD_GLOBAL_STATUS_QUEUED_INVALIDATION_ENABLE_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_QUEUED_INVALIDATION_ENABLE_STATUS(_) (((_) >> 26) & 0x01)
/**
* @brief Write Buffer Flush Status (RO)
*
* [Bit 27] This field is valid only for implementations requiring write buffer
* flushing. This field indicates the status of the write buffer flush command. It
* is
* * Set by hardware when software sets the WBF field in the Global Command
* register.
* * Cleared by hardware when hardware completes the write buffer flushing
* operation.
*/
UINT32 WriteBufferFlushStatus : 1;
#define VTD_GLOBAL_STATUS_WRITE_BUFFER_FLUSH_STATUS_BIT 27
#define VTD_GLOBAL_STATUS_WRITE_BUFFER_FLUSH_STATUS_FLAG 0x8000000
#define VTD_GLOBAL_STATUS_WRITE_BUFFER_FLUSH_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_WRITE_BUFFER_FLUSH_STATUS(_) (((_) >> 27) & 0x01)
/**
* @brief Advanced Fault Logging Status (RO)
*
* [Bit 28] This field is valid only for implementations supporting advanced fault
* logging. It indicates the advanced fault logging status:
* * 0: Advanced Fault Logging is not enabled
* * 1: Advanced Fault Logging is enabled
*/
UINT32 AdvancedFaultLoggingStatus : 1;
#define VTD_GLOBAL_STATUS_ADVANCED_FAULT_LOGGING_STATUS_BIT 28
#define VTD_GLOBAL_STATUS_ADVANCED_FAULT_LOGGING_STATUS_FLAG 0x10000000
#define VTD_GLOBAL_STATUS_ADVANCED_FAULT_LOGGING_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_ADVANCED_FAULT_LOGGING_STATUS(_) (((_) >> 28) & 0x01)
/**
* @brief Fault Log Status (RO)
*
* [Bit 29] This field:
* * Is cleared by hardware when software Sets the SFL field in the Global Command
* register.
* * Is Set by hardware when hardware completes the 'Set Fault Log Pointer'
* operation using the value provided in the Advanced Fault Log register.
*/
UINT32 FaultLogStatus : 1;
#define VTD_GLOBAL_STATUS_FAULT_LOG_STATUS_BIT 29
#define VTD_GLOBAL_STATUS_FAULT_LOG_STATUS_FLAG 0x20000000
#define VTD_GLOBAL_STATUS_FAULT_LOG_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_FAULT_LOG_STATUS(_) (((_) >> 29) & 0x01)
/**
* @brief Root Table Pointer Status (RO)
*
* [Bit 30] This field indicates the status of the root-table pointer in hardware.
* This field is cleared by hardware when software sets the SRTP field in the Global
* Command register. This field is set by hardware when hardware completes the 'Set
* Root Table Pointer' operation using the value provided in the Root Table Address
* register.
*/
UINT32 RootTablePointerStatus : 1;
#define VTD_GLOBAL_STATUS_ROOT_TABLE_POINTER_STATUS_BIT 30
#define VTD_GLOBAL_STATUS_ROOT_TABLE_POINTER_STATUS_FLAG 0x40000000
#define VTD_GLOBAL_STATUS_ROOT_TABLE_POINTER_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_ROOT_TABLE_POINTER_STATUS(_) (((_) >> 30) & 0x01)
/**
* @brief Translation Enable Status (RO)
*
* [Bit 31] This field indicates the status of DMA-remapping hardware.
* * 0: DMA remapping is not enabled
* * 1: DMA remapping is enabled
*/
UINT32 TranslationEnableStatus : 1;
#define VTD_GLOBAL_STATUS_TRANSLATION_ENABLE_STATUS_BIT 31
#define VTD_GLOBAL_STATUS_TRANSLATION_ENABLE_STATUS_FLAG 0x80000000
#define VTD_GLOBAL_STATUS_TRANSLATION_ENABLE_STATUS_MASK 0x01
#define VTD_GLOBAL_STATUS_TRANSLATION_ENABLE_STATUS(_) (((_) >> 31) & 0x01)
};
UINT32 AsUInt;
} VTD_GLOBAL_STATUS_REGISTER;
/**
* Register providing the base address of root-table and the translation table mode. Software
* programs the desired values in this register but these values take effect only after software
* executes Set Root Table Pointer command through the SRTP field in the Global Command Register
* (GCMD_REG).
*
* @remarks RTADDR_REG
* @see VTd[10.4.6(Root Table Address Register)]
*/
#define VTD_ROOT_TABLE_ADDRESS 0x00000020
typedef union
{
struct
{
UINT64 Reserved1 : 10;
/**
* @brief Translation Table Mode (RW)
*
* [Bits 11:10] This field specifies the translation mode used for DMA remapping.
* * 00: legacy mode - uses root tables and context tables.
* * 01: scalable mode - uses scalable-mode root tables and scalable mode context
* tables.
* * 10: reserved - in prior version of this specification, this encoding was used
* to enable extended mode which is no longer supported.
* * 11: abort-dma mode.
* For implementations reporting Enhanced SRTP Support (ESRTPS) field as Clear in
* the Capability register, software must not modify this field while DMA remapping
* is active (TES=1 in Global Status register). The value of this field takes effect
* only after software executes Set Root Table Pointer command.
*/
UINT64 TranslationTableMode : 2;
#define VTD_ROOT_TABLE_ADDRESS_TRANSLATION_TABLE_MODE_BIT 10
#define VTD_ROOT_TABLE_ADDRESS_TRANSLATION_TABLE_MODE_FLAG 0xC00
#define VTD_ROOT_TABLE_ADDRESS_TRANSLATION_TABLE_MODE_MASK 0x03
#define VTD_ROOT_TABLE_ADDRESS_TRANSLATION_TABLE_MODE(_) (((_) >> 10) & 0x03)
/**
* @brief Root Table Address (RW)
*
* [Bits 63:12] This field points to the base of the page-aligned, 4KB-sized
* root-table in system memory. Hardware may ignore and not implement bits 63:HAW,
* where HAW is the host address width. The value of this field takes effect only
* after software executes Set Root Table Pointer command.
*/
UINT64 RootTableAddress : 52;
#define VTD_ROOT_TABLE_ADDRESS_ROOT_TABLE_ADDRESS_BIT 12
#define VTD_ROOT_TABLE_ADDRESS_ROOT_TABLE_ADDRESS_FLAG 0xFFFFFFFFFFFFF000
#define VTD_ROOT_TABLE_ADDRESS_ROOT_TABLE_ADDRESS_MASK 0xFFFFFFFFFFFFF
#define VTD_ROOT_TABLE_ADDRESS_ROOT_TABLE_ADDRESS(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} VTD_ROOT_TABLE_ADDRESS_REGISTER;
/**
* Register to manage context-cache.The act of writing the uppermost byte of the CCMD_REG with the
* ICC field Set causes the hardware to perform the context-cache invalidation.
*
* @remarks CCMD_RE
* @see VTd[10.4.7(Context Command Register)]
*/
#define VTD_CONTEXT_COMMAND 0x00000028
typedef union
{
struct
{
/**
* @brief Domain-ID (RW)
*
* [Bits 15:0] Indicates the id of the domain whose context-entries need to be
* selectively invalidated. This field must be programmed by software for both
* domain selective and device-selective invalidation requests. The Capability
* register reports the domain-id width supported by hardware. Software must ensure
* that the value written to this field is within this limit. Hardware ignores (and
* may not implement) bits 15:N, where N is the supported domain-id width reported
* in the Capability register.
*/
UINT64 DomainId : 16;
#define VTD_CONTEXT_COMMAND_DOMAIN_ID_BIT 0
#define VTD_CONTEXT_COMMAND_DOMAIN_ID_FLAG 0xFFFF
#define VTD_CONTEXT_COMMAND_DOMAIN_ID_MASK 0xFFFF
#define VTD_CONTEXT_COMMAND_DOMAIN_ID(_) (((_) >> 0) & 0xFFFF)
/**
* @brief Source-ID (WO)
*
* [Bits 31:16] Indicates the source-id of the device whose corresponding
* context-entry needs to be selectively invalidated.This field along with the FM
* field must be programmed by software for device-selective invalidation requests.
* The value returned on a read of this field is undefined.
*/
UINT64 SourceId : 16;
#define VTD_CONTEXT_COMMAND_SOURCE_ID_BIT 16
#define VTD_CONTEXT_COMMAND_SOURCE_ID_FLAG 0xFFFF0000
#define VTD_CONTEXT_COMMAND_SOURCE_ID_MASK 0xFFFF
#define VTD_CONTEXT_COMMAND_SOURCE_ID(_) (((_) >> 16) & 0xFFFF)
/**
* @brief Function Mask (WO)
*
* [Bits 33:32] Software may use the Function Mask to perform device-selective
* invalidations on behalf of devices supporting PCI Express Phantom Functions. This
* field specifies which bits of the function number portion (least significant
* three bits) of the SID field to mask when performing device selective
* invalidations.The following encodings are defined for this field:
* * 00: No bits in the SID field masked
* * 01: Mask bit 2 in the SID field
* * 10: Mask bits 2:1 in the SID field
* * 11: Mask bits 2:0 in the SID field
* The context-entries corresponding to the source-ids specified through the SID and
* FM fields must have the domain-id specified in the DID field. The value returned
* on a read of this field is undefined.
*/
UINT64 FunctionMask : 2;
#define VTD_CONTEXT_COMMAND_FUNCTION_MASK_BIT 32
#define VTD_CONTEXT_COMMAND_FUNCTION_MASK_FLAG 0x300000000
#define VTD_CONTEXT_COMMAND_FUNCTION_MASK_MASK 0x03
#define VTD_CONTEXT_COMMAND_FUNCTION_MASK(_) (((_) >> 32) & 0x03)
UINT64 Reserved1 : 25;
/**
* @brief Context Actual Invalidation Granularity (RO)
*
* [Bits 60:59] Hardware reports the granularity at which an invalidation request
* was processed through the CAIG field at the time of reporting invalidation
* completion (by clearing the ICC field). The following are the encodings for this
* field:
* * 00: Error. This indicates hardware detected an incorrect invalidation request
* and ignored the request, e.g., register based invalidation when Translation Table
* Mode (TTM) in Root Table Address Register is not programmed to legacy mode
* (RTADDR_REG.TTM!=00b).
* On hardware implementations with Major Version 6 or higher (VER_REG), all
* invalidation requests through this register are treated as incorrect invalidation
* requests. Software should use the Queued Invalidation interface to perform
* context-cache invalidations for such hardware implementations. Refer to
* Section 6.5 for more details.
* * 01: Global Invalidation performed. This could be in response to a global,
* domain-selective, or device-selective invalidation request.
* * 10: Domain-selective invalidation performed using the domain-id specified by
* software in the DID field. This could be in response to a domain-selective or
* device-selective invalidation request.
* * 11: Device-selective invalidation performed using the source-id and domain-id
* specified by software in the SID and FM fields. This can only be in response to a
* device-selective invalidation request.
*/
UINT64 ContextActualInvalidationGranularity : 2;
#define VTD_CONTEXT_COMMAND_CONTEXT_ACTUAL_INVALIDATION_GRANULARITY_BIT 59
#define VTD_CONTEXT_COMMAND_CONTEXT_ACTUAL_INVALIDATION_GRANULARITY_FLAG 0x1800000000000000
#define VTD_CONTEXT_COMMAND_CONTEXT_ACTUAL_INVALIDATION_GRANULARITY_MASK 0x03
#define VTD_CONTEXT_COMMAND_CONTEXT_ACTUAL_INVALIDATION_GRANULARITY(_) (((_) >> 59) & 0x03)
/**
* @brief Context Invalidation Request Granularity (RW)
*
* [Bits 62:61] Software provides the requested invalidation granularity through
* this field when setting the ICC field:
* * 00: Reserved.
* * 01: Global Invalidation request.
* * 10: Domain-selective invalidation request. The target domain-id must be
* specified in the DID field.
* * 11: Device-selective invalidation request. The target source-id(s) must be
* specified through the SID and FM fields, and the domain-id [that was programmed
* in the context-entry for these device(s)] must be provided in the DID field.
* Hardware implementations may process an invalidation request by performing
* invalidation at a coarser granularity than requested. Hardware indicates
* completion of the invalidation request by clearing the ICC field. At this time,
* hardware also indicates the granularity at which the actual invalidation was
* performed through the CAIG field.
*/
UINT64 ContextInvalidationRequestGranularity : 2;
#define VTD_CONTEXT_COMMAND_CONTEXT_INVALIDATION_REQUEST_GRANULARITY_BIT 61
#define VTD_CONTEXT_COMMAND_CONTEXT_INVALIDATION_REQUEST_GRANULARITY_FLAG 0x6000000000000000
#define VTD_CONTEXT_COMMAND_CONTEXT_INVALIDATION_REQUEST_GRANULARITY_MASK 0x03
#define VTD_CONTEXT_COMMAND_CONTEXT_INVALIDATION_REQUEST_GRANULARITY(_) (((_) >> 61) & 0x03)
/**
* @brief Invalidate Context-Cache (RW)
*
* [Bit 63] Software requests invalidation of context-cache by setting this field.
* Software must also set the requested invalidation granularity by programming the
* CIRG field. Software must read back and check the ICC field is Clear to confirm
* the invalidation is complete. Software must not update this register when this
* field is Set. Hardware clears the ICC field to indicate the invalidation request
* is complete.Hardware also indicates the granularity at which the invalidation
* operation was performed through the CAIG field. Software must submit a
* context-cache invalidation request through this field only when there are no
* invalidation requests pending at this remapping hardware unit. Since information
* from the context-cache may be used by hardware to tag IOTLB entries, software
* must perform domain-selective (or global) invalidation of IOTLB after the
* context-cache invalidation has completed. Hardware implementations reporting a
* write-buffer flushing requirement (RWBF=1 in the Capability register) must
* implicitly perform a write buffer flush before invalidating the context-cache.
* When Translation Table Mode field in Root Table Address register is not setup as
* legacy mode (RTADDR_REG.TTM!=00b), hardware will ignore the value provided by
* software in this register, treat it as an incorrect invalidation request, and
* report a value of 00b in CAIG field.
*/
UINT64 InvalidateContextCache : 1;
#define VTD_CONTEXT_COMMAND_INVALIDATE_CONTEXT_CACHE_BIT 63
#define VTD_CONTEXT_COMMAND_INVALIDATE_CONTEXT_CACHE_FLAG 0x8000000000000000
#define VTD_CONTEXT_COMMAND_INVALIDATE_CONTEXT_CACHE_MASK 0x01
#define VTD_CONTEXT_COMMAND_INVALIDATE_CONTEXT_CACHE(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} VTD_CONTEXT_COMMAND_REGISTER;
/**
* Register to provide the DMA address whose corresponding IOTLB entry needs to be invalidated
* through the corresponding IOTLB Invalidate register. This register is a write-only register. A
* value returned on a read of this register is undefined.
*
* @remarks IVA_REG
* @see VTd[10.4.8.2(Invalidate Address Register)]
*/
#define VTD_INVALIDATE_ADDRESS 0x00000000
typedef union
{
struct
{
/**
* @brief Address Mask (WO)
*
* [Bits 5:0] The value in this field specifies the number of low order bits of the
* ADDR field that must be masked for the invalidation operation. This field enables
* software to request invalidation of contiguous mappings for size-aligned regions.
* When invalidating mappings for large-pages, software must specify the appropriate
* mask value. For example, when invalidating mapping for a 2MB page, software must
* specify an address mask value of at least 9. Hardware implementations report the
* maximum supported address mask value through the Capability register. A value
* returned on a read of this field is undefined.
*/
UINT64 AddressMask : 6;
#define VTD_INVALIDATE_ADDRESS_ADDRESS_MASK_BIT 0
#define VTD_INVALIDATE_ADDRESS_ADDRESS_MASK_FLAG 0x3F
#define VTD_INVALIDATE_ADDRESS_ADDRESS_MASK_MASK 0x3F
#define VTD_INVALIDATE_ADDRESS_ADDRESS_MASK(_) (((_) >> 0) & 0x3F)
/**
* @brief Invalidation Hint (WO)
*
* [Bit 6] The field provides hints to hardware about preserving or flushing the
* nonleaf (context-entry) entries that may be cached in hardware:
* * 0: Software may have modified both leaf and non-leaf second-level
* paging-structure entries corresponding to mappings specified in the ADDR and AM
* fields. On a page-selective-within-domain invalidation request, hardware must
* invalidate the cached entries associated with the mappings specified by DID, ADDR
* and AM fields, in both IOTLB and paging-structure caches.
* * 1: Software has not modified any second-level non-leaf paging entries
* associated with the mappings specified by the ADDR and AM fields. On a
* page-selective-within-domain invalidation request, hardware may preserve the
* cached second-level mappings in paging-structurecaches. A value returned on a
* read of this field is undefined.
*/
UINT64 InvalidationHint : 1;
#define VTD_INVALIDATE_ADDRESS_INVALIDATION_HINT_BIT 6
#define VTD_INVALIDATE_ADDRESS_INVALIDATION_HINT_FLAG 0x40
#define VTD_INVALIDATE_ADDRESS_INVALIDATION_HINT_MASK 0x01
#define VTD_INVALIDATE_ADDRESS_INVALIDATION_HINT(_) (((_) >> 6) & 0x01)
UINT64 Reserved1 : 5;
/**
* @brief Address (WO)
*
* [Bits 63:12] Software provides the second-level-input-address that needs to be
* page selectively invalidated. To make a page-selective-within-domain invalidation
* request to hardware, software must first write the appropriate fields in this
* register, and then issue the page-selective-within-domain invalidate command
* through the IOTLB_REG. Hardware ignores bits 63:N, where N is the maximum guest
* address width (MGAW) supported. A value returned on a read of this field is
* undefined.
*/
UINT64 PageAddress : 52;
#define VTD_INVALIDATE_ADDRESS_PAGE_ADDRESS_BIT 12
#define VTD_INVALIDATE_ADDRESS_PAGE_ADDRESS_FLAG 0xFFFFFFFFFFFFF000
#define VTD_INVALIDATE_ADDRESS_PAGE_ADDRESS_MASK 0xFFFFFFFFFFFFF
#define VTD_INVALIDATE_ADDRESS_PAGE_ADDRESS(_) (((_) >> 12) & 0xFFFFFFFFFFFFF)
};
UINT64 AsUInt;
} VTD_INVALIDATE_ADDRESS_REGISTER;
/**
* Register to invalidate IOTLB. The act of writing the upper byte of the IOTLB_REG with the IVT
* field Set causes the hardware to perform the IOTLB invalidation.
*
* @remarks IOTLB_REG
* @see VTd[10.4.8.1(IOTLB Invalidate Register)]
*/
#define VTD_IOTLB_INVALIDATE 0x00000008
typedef union
{
struct
{
UINT64 Reserved1 : 32;
/**
* @brief Domain-ID (RW)
*
* [Bits 47:32] Indicates the ID of the domain whose IOTLB entries need to be
* selectively invalidated. This field must be programmed by software for
* domainselective and page-selective invalidation requests. The Capability register
* reports the domain-id width supported by hardware. Software must ensure that the
* value written to this field is within this limit. Hardware may ignore and not
* implement bits 47:(32+N), where N is the supported domain-id width reported in
* the Capability register.
*/
UINT64 DomainId : 16;
#define VTD_IOTLB_INVALIDATE_DOMAIN_ID_BIT 32
#define VTD_IOTLB_INVALIDATE_DOMAIN_ID_FLAG 0xFFFF00000000
#define VTD_IOTLB_INVALIDATE_DOMAIN_ID_MASK 0xFFFF
#define VTD_IOTLB_INVALIDATE_DOMAIN_ID(_) (((_) >> 32) & 0xFFFF)
/**
* @brief Drain Writes (RW)
*
* [Bit 48] This field is ignored by hardware if the DWD field is reported as Clear
* in the Capability register. When the DWD field is reported as Set in the
* Capability register, the following encodings are supported for this field:
* * 0: Hardware may complete the IOTLB invalidation without draining DMA write
* requests.
* * 1: Hardware must drain relevant translated DMA write requests.
*/
UINT64 DrainWrites : 1;
#define VTD_IOTLB_INVALIDATE_DRAIN_WRITES_BIT 48
#define VTD_IOTLB_INVALIDATE_DRAIN_WRITES_FLAG 0x1000000000000
#define VTD_IOTLB_INVALIDATE_DRAIN_WRITES_MASK 0x01
#define VTD_IOTLB_INVALIDATE_DRAIN_WRITES(_) (((_) >> 48) & 0x01)
/**
* @brief Drain Reads (RW)
*
* [Bit 49] This field is ignored by hardware if the DRD field is reported as Clear
* in the Capability register. When the DRD field is reported as Set in the
* Capability register, the following encodings are supported for this field:
* * 0: Hardware may complete the IOTLB invalidation without draining DMA read
* requests.
* * 1: Hardware must drain DMA read requests.
*/
UINT64 DrainReads : 1;
#define VTD_IOTLB_INVALIDATE_DRAIN_READS_BIT 49
#define VTD_IOTLB_INVALIDATE_DRAIN_READS_FLAG 0x2000000000000
#define VTD_IOTLB_INVALIDATE_DRAIN_READS_MASK 0x01
#define VTD_IOTLB_INVALIDATE_DRAIN_READS(_) (((_) >> 49) & 0x01)
UINT64 Reserved2 : 7;
/**
* @brief IOTLB Actual Invalidation Granularity (RO)
*
* [Bits 58:57] Hardware reports the granularity at which an invalidation request
* was processed through this field when reporting invalidation completion (by
* clearing the IVT field). The following are the encodings for this field.
* * 00: Error. This indicates hardware detected an incorrect invalidation request
* and ignored the request, e.g., register based invalidation when Translation Table
* Mode (TTM) in Root Table Address Register is not programmed to legacy mode
* (RTADDR_REG.TTM!=00b), detected an unsupported address mask value in Invalidate
* Address register for page-selective invalidation requests. On hardware
* implementations with Major Version 6 or higher (VER_REG), all invalidation
* requests through this register are treated as incorrect invalidation requests.
* Software should use the Queued Invalidation interface to perform IOTLB
* invalidations for such hardware implementations. Refer to Section 6.5 for more
* details.
* * 01: Global Invalidation performed. This could be in response to a global,
* domain-selective, or page-selective invalidation request.
* * 10: Domain-selective invalidation performed using the domain-id specified by
* software in the DID field. This could be in response to a domain-selective or a
* page-selective invalidation request.
* * 11: Page-selective-within-domain invalidation performed using the address, mask
* and hint specified by software in the Invalidate Address register and domain-id
* specified in DID field. This can be in response to a page-selective-within-domain
* invalidation request.
*/
UINT64 IotlbActualInvalidationGranularity : 2;
#define VTD_IOTLB_INVALIDATE_IOTLB_ACTUAL_INVALIDATION_GRANULARITY_BIT 57
#define VTD_IOTLB_INVALIDATE_IOTLB_ACTUAL_INVALIDATION_GRANULARITY_FLAG 0x600000000000000
#define VTD_IOTLB_INVALIDATE_IOTLB_ACTUAL_INVALIDATION_GRANULARITY_MASK 0x03
#define VTD_IOTLB_INVALIDATE_IOTLB_ACTUAL_INVALIDATION_GRANULARITY(_) (((_) >> 57) & 0x03)
UINT64 Reserved3 : 1;
/**
* @brief IOTLB Invalidation Request Granularity (RW)
*
* [Bits 61:60] When requesting hardware to invalidate the IOTLB (by setting the IVT
* field), software writes the requested invalidation granularity through this
* field. The following are the encodings for the field.
* * 00: Reserved.
* * 01: Global invalidation request.
* * 10: Domain-selective invalidation request. The target domain-id must be
* specified in the DID field.
* * 11: Page-selective-within-domain invalidation request. The target address,
* mask, and invalidation hint must be specified in the Invalidate Address register,
* and the domain-id must be provided in the DID field. Hardware implementations may
* process an invalidation request by performing invalidation at a coarser
* granularity than requested. Hardware indicates completion of the invalidation
* request by clearing the IVT field. At that time, the granularity at which actual
* invalidation was performed is reported through the IAIG field.
*/
UINT64 IotlbInvalidationRequestGranularity : 2;
#define VTD_IOTLB_INVALIDATE_IOTLB_INVALIDATION_REQUEST_GRANULARITY_BIT 60
#define VTD_IOTLB_INVALIDATE_IOTLB_INVALIDATION_REQUEST_GRANULARITY_FLAG 0x3000000000000000
#define VTD_IOTLB_INVALIDATE_IOTLB_INVALIDATION_REQUEST_GRANULARITY_MASK 0x03
#define VTD_IOTLB_INVALIDATE_IOTLB_INVALIDATION_REQUEST_GRANULARITY(_) (((_) >> 60) & 0x03)
UINT64 Reserved4 : 1;
/**
* @brief Invalidate IOTLB (RW)
*
* [Bit 63] Software requests IOTLB invalidation by setting this field. Software
* must also set the requested invalidation granularity by programming the IIRG
* field. Hardware clears the IVT field to indicate the invalidation request is
* complete. Hardware also indicates the granularity at which the invalidation
* operation was performed through the IAIG field. Software must not submit another
* invalidation request through this register while the IVT field is Set, nor update
* the associated Invalidate Address register. Software must not submit IOTLB
* invalidation requests when there is a context-cache invalidation request pending
* at this remapping hardware unit. Hardware implementations reporting a
* write-buffer flushing requirement (RWBF=1 in Capability register) must implicitly
* perform a write buffer flushing before invalidating the IOTLB. Refer to
* Section 6.8 for write buffer flushing requirements. When Translation Table Mode
* field in Root Table Address registers is not setup as legacy mode
* (RTADDR_REG.TTM!=00b), hardware will ignore the value provided by software in
* this register, treat it as an incorrect invalidation request, and report a value
* of 00b in IAIG field.
*/
UINT64 InvalidateIotlb : 1;
#define VTD_IOTLB_INVALIDATE_INVALIDATE_IOTLB_BIT 63
#define VTD_IOTLB_INVALIDATE_INVALIDATE_IOTLB_FLAG 0x8000000000000000
#define VTD_IOTLB_INVALIDATE_INVALIDATE_IOTLB_MASK 0x01
#define VTD_IOTLB_INVALIDATE_INVALIDATE_IOTLB(_) (((_) >> 63) & 0x01)
};
UINT64 AsUInt;
} VTD_IOTLB_INVALIDATE_REGISTER;
/**
* @}
*/
typedef union
{
struct
{
/**
* [Bit 0] This bit 0 must be 1. An attempt to write 0 to this bit causes a
* general-protection exception.
*/
UINT64 X87 : 1;
#define XCR0_X87_BIT 0
#define XCR0_X87_FLAG 0x01
#define XCR0_X87_MASK 0x01
#define XCR0_X87(_) (((_) >> 0) & 0x01)
/**
* [Bit 1] If 1, the XSAVE feature set can be used to manage MXCSR and the XMM
* registers (XMM0-XMM15 in 64-bit mode; otherwise XMM0-XMM7).
*/
UINT64 Sse : 1;
#define XCR0_SSE_BIT 1
#define XCR0_SSE_FLAG 0x02
#define XCR0_SSE_MASK 0x01
#define XCR0_SSE(_) (((_) >> 1) & 0x01)
/**
* [Bit 2] If 1, AVX instructions can be executed and the XSAVE feature set can be
* used to manage the upper halves of the YMM registers (YMM0-YMM15 in 64-bit mode;
* otherwise YMM0-YMM7).
*/
UINT64 Avx : 1;
#define XCR0_AVX_BIT 2
#define XCR0_AVX_FLAG 0x04
#define XCR0_AVX_MASK 0x01
#define XCR0_AVX(_) (((_) >> 2) & 0x01)
/**
* [Bit 3] If 1, MPX instructions can be executed and the XSAVE feature set can be
* used to manage the bounds registers BND0-BND3.
*/
UINT64 Bndreg : 1;
#define XCR0_BNDREG_BIT 3
#define XCR0_BNDREG_FLAG 0x08
#define XCR0_BNDREG_MASK 0x01
#define XCR0_BNDREG(_) (((_) >> 3) & 0x01)
/**
* [Bit 4] If 1, MPX instructions can be executed and the XSAVE feature set can be
* used to manage the BNDCFGU and BNDSTATUS registers.
*/
UINT64 Bndcsr : 1;
#define XCR0_BNDCSR_BIT 4
#define XCR0_BNDCSR_FLAG 0x10
#define XCR0_BNDCSR_MASK 0x01
#define XCR0_BNDCSR(_) (((_) >> 4) & 0x01)
/**
* [Bit 5] If 1, AVX-512 instructions can be executed and the XSAVE feature set can
* be used to manage the opmask registers k0-k7.
*/
UINT64 Opmask : 1;
#define XCR0_OPMASK_BIT 5
#define XCR0_OPMASK_FLAG 0x20
#define XCR0_OPMASK_MASK 0x01
#define XCR0_OPMASK(_) (((_) >> 5) & 0x01)
/**
* [Bit 6] If 1, AVX-512 instructions can be executed and the XSAVE feature set can
* be used to manage the upper halves of the lower ZMM registers (ZMM0-ZMM15 in
* 64-bit mode; otherwise ZMM0-ZMM7).
*/
UINT64 ZmmHi256 : 1;
#define XCR0_ZMM_HI256_BIT 6
#define XCR0_ZMM_HI256_FLAG 0x40
#define XCR0_ZMM_HI256_MASK 0x01
#define XCR0_ZMM_HI256(_) (((_) >> 6) & 0x01)
/**
* [Bit 7] If 1, AVX-512 instructions can be executed and the XSAVE feature set can
* be used to manage the upper ZMM registers (ZMM16-ZMM31, oonly in 64-bit mode).
*/
UINT64 ZmmHi16 : 1;
#define XCR0_ZMM_HI16_BIT 7
#define XCR0_ZMM_HI16_FLAG 0x80
#define XCR0_ZMM_HI16_MASK 0x01
#define XCR0_ZMM_HI16(_) (((_) >> 7) & 0x01)
UINT64 Reserved1 : 1;
/**
* [Bit 9] If 1, the XSAVE feature set can be used to manage the PKRU register.
*/
UINT64 Pkru : 1;
#define XCR0_PKRU_BIT 9
#define XCR0_PKRU_FLAG 0x200
#define XCR0_PKRU_MASK 0x01
#define XCR0_PKRU(_) (((_) >> 9) & 0x01)
UINT64 Reserved2 : 54;
};
UINT64 AsUInt;
} XCR0;
/**
* @}
*/
#if defined(_MSC_EXTENSIONS)
# pragma warning(pop)
#endif
================================================
FILE: driver/imports.c
================================================
#include "imports.h"
#include "common.h"
#include "driver.h"
#include "crypt.h"
#include
#include "lib/stdlib.h"
PVOID
ImpResolveNtImport(PDRIVER_OBJECT DriverObject, PCZPSTR ExportName)
{
PVOID image_base = NULL;
PIMAGE_DOS_HEADER dos_header = NULL;
PLOCAL_NT_HEADER nt_header = NULL;
PIMAGE_OPTIONAL_HEADER64 optional_header = NULL;
PIMAGE_DATA_DIRECTORY data_dir = NULL;
PIMAGE_EXPORT_DIRECTORY export_dir = NULL;
PUINT32 export_name_table = NULL;
PCHAR name = NULL;
PUINT16 ordinals_table = NULL;
PUINT32 export_addr_table = NULL;
UINT32 ordinal = 0;
PVOID target_function_addr = 0;
UINT32 export_offset = 0;
image_base = FindDriverBaseNoApi(DriverObject, L"ntoskrnl.exe");
if (!image_base) {
DEBUG_ERROR("FindDriverBaseNoApi failed with no status");
return NULL;
}
/*
* todo: add comment explaining this shit also this ugly af
*/
dos_header = (PIMAGE_DOS_HEADER)image_base;
nt_header = (struct _IMAGE_NT_HEADERS64*)((UINT64)image_base +
dos_header->e_lfanew);
optional_header = (PIMAGE_OPTIONAL_HEADER64)&nt_header->OptionalHeader;
data_dir = (PIMAGE_DATA_DIRECTORY) &
(optional_header->DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT]);
export_dir = (PIMAGE_EXPORT_DIRECTORY)((UINT64)image_base +
data_dir->VirtualAddress);
export_name_table =
(PUINT32)((UINT64)image_base + export_dir->AddressOfNames);
ordinals_table =
(PUINT16)((UINT64)image_base + export_dir->AddressOfNameOrdinals);
export_addr_table =
(PUINT32)((UINT64)image_base + export_dir->AddressOfFunctions);
for (INT index = 0; index < export_dir->NumberOfNames; index++) {
name = (PCHAR)((UINT64)image_base + export_name_table[index]);
if (IntCompareString(name, ExportName))
continue;
ordinal = ordinals_table[index];
export_offset = export_addr_table[ordinal];
target_function_addr = (PVOID)((UINT64)image_base + export_offset);
return target_function_addr;
}
return NULL;
}
/*
* The strings in this array need to be hashed at compile time, then we can use
* the same hash function to compare when we walk the export table.
*/
#define NT_IMPORT_MAX_LENGTH 128
#define NT_IMPORT_COUNT 79
CHAR NT_IMPORTS[NT_IMPORT_COUNT][NT_IMPORT_MAX_LENGTH] = {
"ObDereferenceObject",
"PsLookupThreadByThreadId",
"MmIsAddressValid",
"PsSetCreateProcessNotifyRoutine",
"PsRemoveCreateThreadNotifyRoutine",
"PsGetCurrentThreadId",
"PsGetProcessId",
"PsLookupProcessByProcessId",
"ExEnumHandleTable",
"ObGetObjectType",
"ExfUnblockPushLock",
"PsGetProcessImageFileName",
"strstr",
"RtlInitUnicodeString",
"RtlQueryRegistryValues",
"MmGetSystemRoutineAddress",
"RtlUnicodeStringToAnsiString",
"RtlCopyUnicodeString",
"RtlFreeAnsiString",
"KeInitializeGuardedMutex",
"IoCreateDevice",
"IoCreateSymbolicLink",
"IoDeleteDevice",
"IoDeleteSymbolicLink",
"ObRegisterCallbacks",
"ObUnRegisterCallbacks",
"PsSetCreateThreadNotifyRoutine",
"KeRevertToUserAffinityThreadEx",
"KeSetSystemAffinityThreadEx",
"strnlen",
"RtlInitAnsiString",
"RtlAnsiStringToUnicodeString",
"IoGetCurrentProcess",
"RtlGetVersion",
"RtlCompareMemory",
"ExGetSystemFirmwareTable",
"IoAllocateWorkItem",
"IoFreeWorkItem",
"IoQueueWorkItem",
"ZwOpenFile",
"ZwClose",
"ZwCreateSection",
"ZwMapViewOfSection",
"ZwUnmapViewOfSection",
"MmCopyMemory",
"ZwDeviceIoControlFile",
"KeStackAttachProcess",
"KeUnstackDetachProcess",
"KeWaitForSingleObject",
"PsCreateSystemThread",
"IofCompleteRequest",
"ObReferenceObjectByHandle",
"KeDelayExecutionThread",
"KeRegisterNmiCallback",
"KeDeregisterNmiCallback",
"KeQueryActiveProcessorCount",
"ExAcquirePushLockExclusiveEx",
"ExReleasePushLockExclusiveEx",
"PsGetThreadId",
"RtlCaptureStackBackTrace",
"ZwOpenDirectoryObject",
"KeInitializeAffinityEx",
"KeAddProcessorAffinityEx",
"RtlQueryModuleInformation",
"KeInitializeApc",
"KeInsertQueueApc",
"KeGenericCallDpc",
"KeSignalCallDpcDone",
"MmGetPhysicalMemoryRangesEx2",
"MmGetVirtualForPhysical",
"ObfReferenceObject",
"ExFreePoolWithTag",
"ExAllocatePool2",
"KeReleaseGuardedMutex",
"KeAcquireGuardedMutex",
"DbgPrintEx",
"RtlCompareUnicodeString",
"RtlFreeUnicodeString",
"PsGetProcessImageFileName"};
DRIVER_IMPORTS driver_imports = {0};
NTSTATUS
ImpResolveDynamicImports(_In_ PDRIVER_OBJECT DriverObject)
{
PUINT64 imports_array = (PUINT64)&driver_imports;
for (UINT32 index = 0; index < NT_IMPORT_COUNT; index++) {
imports_array[index] =
ImpResolveNtImport(DriverObject, NT_IMPORTS[index]);
if (!imports_array[index])
return STATUS_UNSUCCESSFUL;
}
CryptEncryptImportsArray(&driver_imports, IMPORTS_LENGTH);
return STATUS_SUCCESS;
}
VOID
ImpObDereferenceObject(_In_ PVOID Object)
{
pObDereferenceObject impObDereferenceObject =
(pObDereferenceObject)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, OB_DEREFERENCE_OBJECT_INDEX);
impObDereferenceObject(Object);
}
NTSTATUS
ImpPsLookupThreadByThreadId(_In_ HANDLE ThreadId, _Out_ PETHREAD* Thread)
{
pPsLookupThreadByThreadId impPsLookupThreadByThreadId =
(pPsLookupThreadByThreadId)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_LOOKUP_THREAD_BY_THREAD_ID_INDEX);
return impPsLookupThreadByThreadId(ThreadId, Thread);
}
BOOLEAN
ImpMmIsAddressValid(_In_ PVOID VirtualAddress)
{
pMmIsAddressValid impMmIsAddressValid =
(pMmIsAddressValid)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, MM_IS_ADDRESS_VALID_INDEX);
return impMmIsAddressValid(VirtualAddress);
}
NTSTATUS
ImpPsSetCreateProcessNotifyRoutine(
_In_ PCREATE_PROCESS_NOTIFY_ROUTINE NotifyRoutine, _In_ BOOLEAN Remove)
{
pPsSetCreateProcessNotifyRoutine impPsSetCreateProcessNotifyRoutine =
(pPsSetCreateProcessNotifyRoutine)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_SET_CREATE_PROCESS_NOTIFY_ROUTINE_INDEX);
return impPsSetCreateProcessNotifyRoutine(NotifyRoutine, Remove);
}
NTSTATUS
ImpPsRemoveCreateThreadNotifyRoutine(
_In_ PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine)
{
pPsRemoveCreateThreadNotifyRoutine impPsRemoveCreateThreadNotifyRoutine =
(pPsRemoveCreateThreadNotifyRoutine)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_REMOVE_CREATE_THREAD_NOTIFY_ROUTINE_INDEX);
return impPsRemoveCreateThreadNotifyRoutine(NotifyRoutine);
}
HANDLE
ImpPsGetCurrentThreadId()
{
pPsGetCurrentThreadId impPsGetCurrentThreadId =
(pPsGetCurrentThreadId)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, PS_GET_CURRENT_THREAD_ID_INDEX);
return impPsGetCurrentThreadId();
}
HANDLE
ImpPsGetProcessId(_In_ PEPROCESS Process)
{
pPsGetProcessId impPsGetProcessId =
(pPsGetProcessId)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, PS_GET_PROCESS_ID_INDEX);
return impPsGetProcessId(Process);
}
NTSTATUS
ImpPsLookupProcessByProcessId(_In_ HANDLE ProcessId, _Out_ PEPROCESS* Process)
{
pPsLookupProcessByProcessId impPsLookupProcessByProcessId =
(pPsLookupProcessByProcessId)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_LOOKUP_PROCESS_BY_PROCESS_ID_INDEX);
return impPsLookupProcessByProcessId(ProcessId, Process);
}
PVOID
ImpExEnumHandleTable(_In_ PHANDLE_TABLE HandleTable,
_In_ PVOID Callback,
_In_opt_ PVOID Context,
_Out_opt_ PHANDLE Handle)
{
pExEnumHandleTable impExEnumHandleTable =
(pExEnumHandleTable)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, EX_ENUM_HANDLE_TABLE_INDEX);
return impExEnumHandleTable(HandleTable, Callback, Context, Handle);
}
POBJECT_TYPE
ImpObGetObjectType(_In_ PVOID Object)
{
pObGetObjectType impObGetObjectType =
(pObGetObjectType)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, OB_GET_OBJECT_TYPE_INDEX);
return impObGetObjectType(Object);
}
VOID
ImpExfUnblockPushLock(_In_ PEX_PUSH_LOCK PushLock, _In_ PVOID WaitBlock)
{
pExfUnblockPushLock impExfUnblockPushLock =
(pExfUnblockPushLock)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, EXF_UNBLOCK_PUSH_LOCK_INDEX);
impExfUnblockPushLock(PushLock, WaitBlock);
}
LPCSTR
ImpPsGetProcessImageFileName(_In_ PEPROCESS Process)
{
pPsGetProcessImageFileName impPsGetProcessImageFileName =
(pPsGetProcessImageFileName)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_GET_PROCESS_IMAGE_FILE_NAME_INDEX);
return impPsGetProcessImageFileName(Process);
}
INT
ImpStrStr(_In_ CHAR* haystack, _In_ CHAR* needle)
{
pstrstr impStrStr = (pstrstr)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, STRSTR_INDEX);
return impStrStr(haystack, needle);
}
VOID
ImpRtlInitUnicodeString(_In_ PUNICODE_STRING DestinationString,
_In_ PCWSTR SourceString)
{
pRtlInitUnicodeString impRtlInitUnicodeString =
(pRtlInitUnicodeString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_INIT_UNICODE_STRING_INDEX);
impRtlInitUnicodeString(DestinationString, SourceString);
}
NTSTATUS
ImpRtlQueryRegistryValues(_In_ ULONG RelativeTo,
_In_ PCWSTR Path,
_In_ PRTL_QUERY_REGISTRY_TABLE QueryTable,
_In_opt_ void* Context,
_In_ void* Environment)
{
pRtlQueryRegistryValues impRtlQueryRegistryValues =
(pRtlQueryRegistryValues)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_QUERY_REGISTRY_VALUES_INDEX);
return impRtlQueryRegistryValues(
RelativeTo, Path, QueryTable, Context, Environment);
}
PVOID
ImpMmGetSystemRoutineAddress(_In_ PUNICODE_STRING SystemRoutineName)
{
pMmGetSystemRoutineAddress impMmGetSystemRoutineAddress =
(pMmGetSystemRoutineAddress)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
MM_GET_SYSTEM_ROUTINE_ADDRESS_INDEX);
return impMmGetSystemRoutineAddress(SystemRoutineName);
}
NTSTATUS
ImpRtlUnicodeStringToAnsiString(_In_ PANSI_STRING DestinationString,
_In_ PCUNICODE_STRING SourceString,
_In_ BOOLEAN AllocateDestinationString)
{
pRtlUnicodeStringToAnsiString impRtlUnicodeStringToAnsiString =
(pRtlUnicodeStringToAnsiString)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
RTL_UNICODE_STRING_TO_ANSI_STRING_INDEX);
return impRtlUnicodeStringToAnsiString(
DestinationString, SourceString, AllocateDestinationString);
}
VOID
ImpRtlCopyUnicodeString(_In_ PUNICODE_STRING DestinationString,
_In_ PCUNICODE_STRING SourceString)
{
pRtlCopyUnicodeString impRtlCopyUnicodeString =
(pRtlCopyUnicodeString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_COPY_UNICODE_STRING_INDEX);
impRtlCopyUnicodeString(DestinationString, SourceString);
}
VOID
ImpRtlFreeAnsiString(_In_ PANSI_STRING AnsiString)
{
pRtlFreeAnsiString impRtlFreeAnsiString =
(pRtlFreeAnsiString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_FREE_ANSI_STRING_INDEX);
impRtlFreeAnsiString(AnsiString);
}
VOID
ImpKeInitializeGuardedMutex(_In_ PKGUARDED_MUTEX GuardedMutex)
{
pKeInitializeGuardedMutex impKeInitializeGuardedMutex =
(pKeInitializeGuardedMutex)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_INITIALIZE_GUARDED_MUTEX_INDEX);
impKeInitializeGuardedMutex(GuardedMutex);
}
NTSTATUS
ImpIoCreateDevice(_In_ PDRIVER_OBJECT DriverObject,
_In_ ULONG DeviceExtensionSize,
_In_opt_ PUNICODE_STRING DeviceName,
_In_ DEVICE_TYPE DeviceType,
_In_ ULONG DeviceCharacteristics,
_In_ BOOLEAN Exclusive,
_Out_ PDEVICE_OBJECT* DeviceObject)
{
pIoCreateDevice impIoCreateDevice =
(pIoCreateDevice)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_CREATE_DEVICE_INDEX);
return impIoCreateDevice(DriverObject,
DeviceExtensionSize,
DeviceName,
DeviceType,
DeviceCharacteristics,
Exclusive,
DeviceObject);
}
NTSTATUS
ImpIoCreateSymbolicLink(_In_ PUNICODE_STRING SymbolicLinkName,
_In_ PUNICODE_STRING DeviceName)
{
pIoCreateSymbolicLink impIoCreateSymbolicLink =
(pIoCreateSymbolicLink)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_CREATE_SYMBOLIC_LINK_INDEX);
return impIoCreateSymbolicLink(SymbolicLinkName, DeviceName);
}
VOID
ImpIoDeleteDevice(_In_ PDEVICE_OBJECT DeviceObject)
{
pIoDeleteDevice impIoDeleteDevice =
(pIoDeleteDevice)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_DELETE_DEVICE_INDEX);
impIoDeleteDevice(DeviceObject);
}
VOID
ImpIoDeleteSymbolicLink(_In_ PUNICODE_STRING SymbolicLinkName)
{
pIoDeleteSymbolicLink impIoDeleteSymbolicLink =
(pIoDeleteSymbolicLink)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_DELETE_SYMBOLIC_LINK_INDEX);
impIoDeleteSymbolicLink(SymbolicLinkName);
}
NTSTATUS
ImpObRegisterCallbacks(_In_ POB_CALLBACK_REGISTRATION CallbackRegistration,
_Out_ PVOID* RegistrationHandle)
{
pObRegisterCallbacks impObRegisterCallbacks =
(pObRegisterCallbacks)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, OB_REGISTER_CALLBACKS_INDEX);
return impObRegisterCallbacks(CallbackRegistration, RegistrationHandle);
}
VOID
ImpObUnRegisterCallbacks(_In_ PVOID RegistrationHandle)
{
pObUnRegisterCallbacks impObUnRegisterCallbacks =
(pObUnRegisterCallbacks)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, OB_UNREGISTER_CALLBACKS_INDEX);
impObUnRegisterCallbacks(RegistrationHandle);
}
NTSTATUS
ImpPsSetCreateThreadNotifyRoutine(
_In_ PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine)
{
pPsSetCreateThreadNotifyRoutine impPsSetCreateThreadNotifyRoutine =
(pPsSetCreateThreadNotifyRoutine)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
PS_SET_CREATE_THREAD_NOTIFY_ROUTINE_INDEX);
return impPsSetCreateThreadNotifyRoutine(NotifyRoutine);
}
VOID
ImpKeRevertToUserAffinityThreadEx(_In_ KAFFINITY Affinity)
{
pKeRevertToUserAffinityThreadEx impKeRevertToUserAffinityThreadEx =
(pKeRevertToUserAffinityThreadEx)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
KE_REVERT_TO_USER_AFFINITY_THREAD_EX_INDEX);
impKeRevertToUserAffinityThreadEx(Affinity);
}
KAFFINITY
ImpKeSetSystemAffinityThreadEx(_In_ KAFFINITY Affinity)
{
pKeSetSystemAffinityThreadEx impKeSetSystemAffinityThreadEx =
(pKeSetSystemAffinityThreadEx)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
KE_SET_SYSTEM_AFFINITY_THREAD_EX_INDEX);
return impKeSetSystemAffinityThreadEx(Affinity);
}
SIZE_T
ImpStrnlen(_In_ CHAR* str, _In_ SIZE_T maxCount)
{
pstrnlen impStrnlen = (pstrnlen)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, STRNLEN_INDEX);
return impStrnlen(str, maxCount);
}
VOID
ImpRtlInitAnsiString(_In_ PANSI_STRING DestinationString,
_In_ PCSZ SourceString)
{
pRtlInitAnsiString impRtlInitAnsiString =
(pRtlInitAnsiString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_INIT_ANSI_STRING_INDEX);
impRtlInitAnsiString(DestinationString, SourceString);
}
NTSTATUS
ImpRtlAnsiStringToUnicodeString(_In_ PUNICODE_STRING DestinationString,
_In_ PCANSI_STRING SourceString,
_In_ BOOLEAN AllocateDestinationString)
{
pRtlAnsiStringToUnicodeString impRtlAnsiStringToUnicodeString =
(pRtlAnsiStringToUnicodeString)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
RTL_ANSI_STRING_TO_UNICODE_STRING_INDEX);
return impRtlAnsiStringToUnicodeString(
DestinationString, SourceString, AllocateDestinationString);
}
PEPROCESS
ImpIoGetCurrentProcess()
{
pIoGetCurrentProcess impIoGetCurrentProcess =
(pIoGetCurrentProcess)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_GET_CURRENT_PROCESS_INDEX);
return impIoGetCurrentProcess();
}
NTSTATUS
ImpRtlGetVersion(_Out_ PRTL_OSVERSIONINFOW lpVersionInformation)
{
pRtlGetVersion impRtlGetVersion =
(pRtlGetVersion)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_GET_VERSION_INDEX);
return impRtlGetVersion(lpVersionInformation);
}
SIZE_T
ImpRtlCompareMemory(_In_ PVOID Source1, _In_ PVOID Source2, _In_ SIZE_T Length)
{
pRtlCompareMemory impRtlCompareMemory =
(pRtlCompareMemory)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_COMPARE_MEMORY_INDEX);
return impRtlCompareMemory(Source1, Source2, Length);
}
NTSTATUS
ImpExGetSystemFirmwareTable(_In_ ULONG FirmwareTableProviderSignature,
_In_ ULONG FirmwareTableID,
_In_ PVOID pFirmwareTableBuffer,
_In_ ULONG BufferLength,
_Out_ PULONG ReturnLength)
{
pExGetSystemFirmwareTable impExGetSystemFirmwareTable =
(pExGetSystemFirmwareTable)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
EX_GET_SYSTEM_FIRMWARE_TABLE_INDEX);
return impExGetSystemFirmwareTable(FirmwareTableProviderSignature,
FirmwareTableID,
pFirmwareTableBuffer,
BufferLength,
ReturnLength);
}
PIO_WORKITEM
ImpIoAllocateWorkItem(_In_ PDEVICE_OBJECT DeviceObject)
{
pIoAllocateWorkItem impIoAllocateWorkItem =
(pIoAllocateWorkItem)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_ALLOCATE_WORK_ITEM_INDEX);
return impIoAllocateWorkItem(DeviceObject);
}
VOID
ImpIoFreeWorkItem(_In_ PIO_WORKITEM WorkItem)
{
pIoFreeWorkItem impIoFreeWorkItem =
(pIoFreeWorkItem)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_FREE_WORK_ITEM_INDEX);
impIoFreeWorkItem(WorkItem);
}
VOID
ImpIoQueueWorkItem(_In_ PIO_WORKITEM IoWorkItem,
_In_ PIO_WORKITEM_ROUTINE WorkerRoutine,
_In_ WORK_QUEUE_TYPE QueueType,
_In_opt_ PVOID Context)
{
pIoQueueWorkItem impIoQueueWorkItem =
(pIoQueueWorkItem)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IO_QUEUE_WORK_ITEM_INDEX);
impIoQueueWorkItem(IoWorkItem, WorkerRoutine, QueueType, Context);
}
NTSTATUS
ImpZwOpenFile(_Out_ PHANDLE FileHandle,
_In_ ACCESS_MASK DesiredAccess,
_In_ POBJECT_ATTRIBUTES ObjectAttributes,
_Out_ PIO_STATUS_BLOCK IoStatusBlock,
_In_ ULONG ShareAccess,
_In_ ULONG OpenOptions)
{
pZwOpenFile impZwOpenFile = (pZwOpenFile)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_OPEN_FILE_INDEX);
return impZwOpenFile(FileHandle,
DesiredAccess,
ObjectAttributes,
IoStatusBlock,
ShareAccess,
OpenOptions);
}
NTSTATUS
ImpZwClose(_In_ HANDLE Handle)
{
pZwClose impZwClose = (pZwClose)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_CLOSE_INDEX);
return impZwClose(Handle);
}
NTSTATUS
ImpZwCreateSection(_Out_ PHANDLE SectionHandle,
_In_ ACCESS_MASK DesiredAccess,
_In_opt_ POBJECT_ATTRIBUTES ObjectAttributes,
_In_opt_ PLARGE_INTEGER MaximumSize,
_In_ ULONG SectionPageProtection,
_In_ ULONG AllocationAttributes,
_In_opt_ HANDLE FileHandle)
{
pZwCreateSection impZwCreateSection =
(pZwCreateSection)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_CREATE_SECTION_INDEX);
return impZwCreateSection(SectionHandle,
DesiredAccess,
ObjectAttributes,
MaximumSize,
SectionPageProtection,
AllocationAttributes,
FileHandle);
}
NTSTATUS
ImpZwMapViewOfSection(_In_ HANDLE SectionHandle,
_In_ HANDLE ProcessHandle,
_Inout_ PVOID* BaseAddress,
_In_ ULONG_PTR ZeroBits,
_In_ SIZE_T CommitSize,
_Inout_opt_ PLARGE_INTEGER SectionOffset,
_Inout_ PSIZE_T ViewSize,
_In_ SECTION_INHERIT InheritDisposition,
_In_ ULONG AllocationType,
_In_ ULONG Win32Protect)
{
pZwMapViewOfSection impZwMapViewOfSection =
(pZwMapViewOfSection)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_MAP_VIEW_OF_SECTION_INDEX);
return impZwMapViewOfSection(SectionHandle,
ProcessHandle,
BaseAddress,
ZeroBits,
CommitSize,
SectionOffset,
ViewSize,
InheritDisposition,
AllocationType,
Win32Protect);
}
NTSTATUS
ImpZwUnmapViewOfSection(_In_ HANDLE ProcessHandle, _In_ PVOID BaseAddress)
{
pZwUnmapViewOfSection impZwUnmapViewOfSection =
(pZwUnmapViewOfSection)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_UNMAP_VIEW_OF_SECTION_INDEX);
return impZwUnmapViewOfSection(ProcessHandle, BaseAddress);
}
NTSTATUS
ImpMmCopyMemory(_In_ PVOID TargetAddress,
_In_ MM_COPY_ADDRESS SourceAddress,
_In_ SIZE_T NumberOfBytes,
_In_ ULONG Flags,
_Out_ PSIZE_T NumberOfBytesTransferred)
{
pMmCopyMemory impMmCopyMemory =
(pMmCopyMemory)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, MM_COPY_MEMORY_INDEX);
return impMmCopyMemory(TargetAddress,
SourceAddress,
NumberOfBytes,
Flags,
NumberOfBytesTransferred);
}
NTSTATUS
ImpZwDeviceIoControlFile(_In_ HANDLE FileHandle,
_In_opt_ HANDLE Event,
_In_opt_ PIO_APC_ROUTINE ApcRoutine,
_In_opt_ PVOID ApcContext,
_Out_ PIO_STATUS_BLOCK IoStatusBlock,
_In_ ULONG IoControlCode,
_In_opt_ PVOID InputBuffer,
_In_ ULONG InputBufferLength,
_Out_opt_ PVOID OutputBuffer,
_In_ ULONG OutputBufferLength)
{
pZwDeviceIoControlFile impZwDeviceIoControlFile =
(pZwDeviceIoControlFile)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_DEVICE_IO_CONTROL_FILE_INDEX);
return impZwDeviceIoControlFile(FileHandle,
Event,
ApcRoutine,
ApcContext,
IoStatusBlock,
IoControlCode,
InputBuffer,
InputBufferLength,
OutputBuffer,
OutputBufferLength);
}
VOID
ImpKeStackAttachProcess(_In_ PRKPROCESS Process, _Out_ PKAPC_STATE ApcState)
{
pKeStackAttachProcess impKeStackAttachProcess =
(pKeStackAttachProcess)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_STACK_ATTACH_PROCESS_INDEX);
impKeStackAttachProcess(Process, ApcState);
}
VOID
ImpKeUnstackDetachProcess(_In_ PKAPC_STATE ApcState)
{
pKeUnstackDetachProcess impKeUnstackDetachProcess =
(pKeUnstackDetachProcess)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_UNSTACK_DETACH_PROCESS_INDEX);
impKeUnstackDetachProcess(ApcState);
}
NTSTATUS
ImpKeWaitForSingleObject(_In_ PVOID Object,
_In_ KWAIT_REASON WaitReason,
_In_ KPROCESSOR_MODE WaitMode,
_In_ BOOLEAN Alertable,
_In_ PLARGE_INTEGER Timeout)
{
pKeWaitForSingleObject impKeWaitForSingleObject =
(pKeWaitForSingleObject)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_WAIT_FOR_SINGLE_OBJECT_INDEX);
return impKeWaitForSingleObject(
Object, WaitReason, WaitMode, Alertable, Timeout);
}
NTSTATUS
ImpPsCreateSystemThread(_Out_ PHANDLE ThreadHandle,
_In_ ULONG DesiredAccess,
_In_opt_ POBJECT_ATTRIBUTES ObjectAttributes,
_In_opt_ HANDLE ProcessHandle,
_Out_opt_ PCLIENT_ID ClientId,
_In_ PKSTART_ROUTINE StartRoutine,
_In_opt_ PVOID StartContext)
{
pPsCreateSystemThread impPsCreateSystemThread =
(pPsCreateSystemThread)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, PS_CREATE_SYSTEM_THREAD_INDEX);
return impPsCreateSystemThread(ThreadHandle,
DesiredAccess,
ObjectAttributes,
ProcessHandle,
ClientId,
StartRoutine,
StartContext);
}
VOID
ImpIofCompleteRequest(_In_ PIRP Irp, _In_ CCHAR PriorityBoost)
{
pIofCompleteRequest impIofCompleteRequest =
(pIofCompleteRequest)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, IOF_COMPLETE_REQUEST_INDEX);
impIofCompleteRequest(Irp, PriorityBoost);
}
NTSTATUS
ImpObReferenceObjectByHandle(_In_ HANDLE Handle,
_In_ ACCESS_MASK DesiredAccess,
_In_opt_ POBJECT_TYPE ObjectType,
_In_ KPROCESSOR_MODE AccessMode,
_Out_ PVOID* Object,
_Out_opt_ POBJECT_HANDLE_INFORMATION
HandleInformation)
{
pObReferenceObjectByHandle impObReferenceObjectByHandle =
(pObReferenceObjectByHandle)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
OB_REFERENCE_OBJECT_BY_HANDLE_INDEX);
return impObReferenceObjectByHandle(Handle,
DesiredAccess,
ObjectType,
AccessMode,
Object,
HandleInformation);
}
NTSTATUS
ImpKeDelayExecutionThread(_In_ KPROCESSOR_MODE WaitMode,
_In_ BOOLEAN Alertable,
_In_ PLARGE_INTEGER Interval)
{
pKeDelayExecutionThread impKeDelayExecutionThread =
(pKeDelayExecutionThread)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_DELAY_EXECUTION_THREAD_INDEX);
return impKeDelayExecutionThread(WaitMode, Alertable, Interval);
}
PVOID
ImpKeRegisterNmiCallback(_In_ PVOID CallbackRoutine, _In_opt_ PVOID Context)
{
pKeRegisterNmiCallback impKeRegisterNmiCallback =
(pKeRegisterNmiCallback)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_REGISTER_NMI_CALLBACK_INDEX);
return impKeRegisterNmiCallback(CallbackRoutine, Context);
}
NTSTATUS
ImpKeDeregisterNmiCallback(_In_ PVOID Handle)
{
pKeDeregisterNmiCallback impKeDeregisterNmiCallback =
(pKeDeregisterNmiCallback)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_DEREGISTER_NMI_CALLBACK_INDEX);
return impKeDeregisterNmiCallback(Handle);
}
ULONG
ImpKeQueryActiveProcessorCount(_In_ PKAFFINITY ActiveProcessors)
{
pKeQueryActiveProcessorCount impKeQueryActiveProcessorCount =
(pKeQueryActiveProcessorCount)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
KE_QUERY_ACTIVE_PROCESSOR_COUNT_INDEX);
return impKeQueryActiveProcessorCount(ActiveProcessors);
}
VOID
ImpExAcquirePushLockExclusiveEx(_Inout_ PEX_PUSH_LOCK PushLock,
_In_ ULONG Flags)
{
pExAcquirePushLockExclusiveEx impExAcquirePushLockExclusiveEx =
(pExAcquirePushLockExclusiveEx)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
EX_ACQUIRE_PUSH_LOCK_EXCLUSIVE_EX_INDEX);
impExAcquirePushLockExclusiveEx(PushLock, Flags);
}
VOID
ImpExReleasePushLockExclusiveEx(_Inout_ PEX_PUSH_LOCK PushLock,
_In_ ULONG Flags)
{
pExReleasePushLockExclusiveEx impExReleasePushLockExclusiveEx =
(pExReleasePushLockExclusiveEx)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
EX_RELEASE_PUSH_LOCK_EXCLUSIVE_EX_INDEX);
impExReleasePushLockExclusiveEx(PushLock, Flags);
}
HANDLE
ImpPsGetThreadId(_In_ PETHREAD Thread)
{
pPsGetThreadId impPsGetThreadId =
(pPsGetThreadId)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, PS_GET_THREAD_ID_INDEX);
return impPsGetThreadId(Thread);
}
USHORT
ImpRtlCaptureStackBackTrace(_In_ ULONG FramesToSkip,
_In_ ULONG FramesToCapture,
_Out_ PVOID* BackTrace,
_Out_opt_ PULONG BackTraceHash)
{
pRtlCaptureStackBackTrace impRtlCaptureStackBackTrace =
(pRtlCaptureStackBackTrace)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
RTL_CAPTURE_STACK_BACK_TRACE_INDEX);
return impRtlCaptureStackBackTrace(
FramesToSkip, FramesToCapture, BackTrace, BackTraceHash);
}
NTSTATUS
ImpZwOpenDirectoryObject(_Out_ PHANDLE DirectoryHandle,
_In_ ACCESS_MASK DesiredAccess,
_In_ POBJECT_ATTRIBUTES ObjectAttributes)
{
pZwOpenDirectoryObject impZwOpenDirectoryObject =
(pZwOpenDirectoryObject)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, ZW_OPEN_DIRECTORY_OBJECT_INDEX);
return impZwOpenDirectoryObject(
DirectoryHandle, DesiredAccess, ObjectAttributes);
}
VOID
ImpKeInitializeAffinityEx(_In_ PKAFFINITY_EX AffinityMask)
{
pKeInitializeAffinityEx impKeInitializeAffinityEx =
(pKeInitializeAffinityEx)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_INITIALIZE_AFFINITY_EX_INDEX);
impKeInitializeAffinityEx(AffinityMask);
}
VOID
ImpKeAddProcessorAffinityEx(_In_ PKAFFINITY_EX Affinity, _In_ INT CoreNumber)
{
pKeAddProcessorAffinityEx impKeAddProcessorAffinityEx =
(pKeAddProcessorAffinityEx)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
KE_ADD_PROCESSOR_AFFINITY_EX_INDEX);
impKeAddProcessorAffinityEx(Affinity, CoreNumber);
}
NTSTATUS
ImpRtlQueryModuleInformation(_Inout_ ULONG* InformationLength,
_In_ ULONG SizePerModule,
_In_ PVOID InformationBuffer)
{
pRtlQueryModuleInformation impRtlQueryModuleInformation =
(pRtlQueryModuleInformation)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
RTL_QUERY_MODULE_INFORMATION_INDEX);
return impRtlQueryModuleInformation(
InformationLength, SizePerModule, InformationBuffer);
}
VOID
ImpKeInitializeApc(_In_ PKAPC Apc,
_In_ PKTHREAD Thread,
_In_ KAPC_ENVIRONMENT Environment,
_In_ PKKERNEL_ROUTINE KernelRoutine,
_In_ PKRUNDOWN_ROUTINE RundownRoutine,
_In_ PKNORMAL_ROUTINE NormalRoutine,
_In_ KPROCESSOR_MODE ApcMode,
_In_ PVOID NormalContext)
{
pKeInitializeApc impKeInitializeApc =
(pKeInitializeApc)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_INITIALIZE_APC_INDEX);
impKeInitializeApc(Apc,
Thread,
Environment,
KernelRoutine,
RundownRoutine,
NormalRoutine,
ApcMode,
NormalContext);
}
BOOLEAN
ImpKeInsertQueueApc(_In_ PKAPC Apc,
_In_ PVOID SystemArgument1,
_In_ PVOID SystemArgument2,
_In_ KPRIORITY Increment)
{
pKeInsertQueueApc impKeInsertQueueApc =
(pKeInsertQueueApc)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_INSERT_QUEUE_APC_INDEX);
return impKeInsertQueueApc(
Apc, SystemArgument1, SystemArgument2, Increment);
}
VOID
ImpKeGenericCallDpc(_In_ PKDEFERRED_ROUTINE DpcRoutine, _In_ PVOID Context)
{
pKeGenericCallDpc impKeGenericCallDpc =
(pKeGenericCallDpc)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_GENERIC_CALL_DPC_INDEX);
impKeGenericCallDpc(DpcRoutine, Context);
}
VOID
ImpKeSignalCallDpcDone(_In_ PVOID SystemArgument1)
{
pKeSignalCallDpcDone impKeSignalCallDpcDone =
(pKeSignalCallDpcDone)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_SIGNAL_CALL_DPC_DONE_INDEX);
impKeSignalCallDpcDone(SystemArgument1);
}
PPHYSICAL_MEMORY_RANGE
ImpMmGetPhysicalMemoryRangesEx2(_In_ PVOID PartitionObject, _In_ ULONG Flags)
{
pMmGetPhysicalMemoryRangesEx2 impMmGetPhysicalMemoryRangesEx2 =
(pMmGetPhysicalMemoryRangesEx2)CryptDecryptImportsArrayEntry(
&driver_imports,
IMPORTS_LENGTH,
MM_GET_PHYSICAL_MEMORY_RANGES_EX2_INDEX);
return impMmGetPhysicalMemoryRangesEx2(PartitionObject, Flags);
}
PVOID
ImpMmGetVirtualForPhysical(_In_ PHYSICAL_ADDRESS PhysicalAddress)
{
pMmGetVirtualForPhysical impMmGetVirtualForPhysical =
(pMmGetVirtualForPhysical)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, MM_GET_VIRTUAL_FOR_PHYSICAL_INDEX);
return impMmGetVirtualForPhysical(PhysicalAddress);
}
LONG_PTR
ImpObfReferenceObject(_In_ PVOID Object)
{
pObfReferenceObject impObfReferenceObject =
(pObfReferenceObject)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, OBF_REFERENCE_OBJECT_INDEX);
return impObfReferenceObject(Object);
}
VOID
ImpExFreePoolWithTag(_In_ PVOID P, _In_ ULONG Tag)
{
pExFreePoolWithTag impExFreePoolWithTag =
(pExFreePoolWithTag)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, EX_FREE_POOL_WITH_TAG_INDEX);
impExFreePoolWithTag(P, Tag);
}
PVOID
ImpExAllocatePool2(_In_ POOL_FLAGS Flags,
_In_ SIZE_T NumberOfBytes,
_In_ ULONG Tag)
{
pExAllocatePool2 impExAllocatePool2 =
(pExAllocatePool2)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, EX_ALLOCATE_POOL2_INDEX);
return impExAllocatePool2(Flags, NumberOfBytes, Tag);
}
VOID
ImpKeReleaseGuardedMutex(_In_ PKGUARDED_MUTEX GuardedMutex)
{
pKeReleaseGuardedMutex impKeReleaseGuardedMutex =
(pKeReleaseGuardedMutex)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_RELEASE_GUARDED_MUTEX_INDEX);
impKeReleaseGuardedMutex(GuardedMutex);
}
VOID
ImpKeAcquireGuardedMutex(_In_ PKGUARDED_MUTEX GuardedMutex)
{
pKeAcquireGuardedMutex impKeAcquireGuardedMutex =
(pKeAcquireGuardedMutex)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, KE_ACQUIRE_GUARDED_MUTEX_INDEX);
impKeAcquireGuardedMutex(GuardedMutex);
}
ULONG
ImpDbgPrintEx(_In_ ULONG ComponentId, _In_ ULONG Level, _In_ PCSTR Format, ...)
{
pDbgPrintEx impDbgPrintEx = (pDbgPrintEx)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, DBG_PRINT_EX_INDEX);
va_list args;
va_start(args, Format);
ULONG result = impDbgPrintEx(ComponentId, Level, Format, args);
va_end(args);
return result;
}
LONG
ImpRtlCompareUnicodeString(_In_ PCUNICODE_STRING String1,
_In_ PCUNICODE_STRING String2,
_In_ BOOLEAN CaseInSensitive)
{
pRtlCompareUnicodeString impRtlCompareUnicodeString =
(pRtlCompareUnicodeString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_COMPARE_UNICODE_STRING_INDEX);
return impRtlCompareUnicodeString(String1, String2, CaseInSensitive);
}
VOID
ImpRtlFreeUnicodeString(_In_ PUNICODE_STRING UnicodeString)
{
pRtlFreeUnicodeString impRtlFreeUnicodeString =
(pRtlFreeUnicodeString)CryptDecryptImportsArrayEntry(
&driver_imports, IMPORTS_LENGTH, RTL_FREE_UNICODE_STRING_INDEX);
impRtlFreeUnicodeString(UnicodeString);
}
================================================
FILE: driver/imports.h
================================================
#ifndef IMPORTS_H
#define IMPORTS_H
#include "common.h"
PVOID
ImpResolveNtImport(PDRIVER_OBJECT DriverObject, PCZPSTR ExportName);
NTSTATUS
ImpResolveDynamicImports(_In_ PDRIVER_OBJECT DriverObject);
#define IMPORT_FUNCTION_MAX_LENGTH 128
#define IMPORT_FUNCTION_COUNT 256
// clang-format off
typedef
void* (*pObDereferenceObject)(
void* Object
);
typedef
void* (*pObReferenceObject)(
void* Object
);
typedef
NTSTATUS (*pPsLookupThreadByThreadId)(
HANDLE ThreadId,
PETHREAD* Thread
);
typedef
BOOLEAN (*pMmIsAddressValid)(
void* VirtualAddress
);
typedef
NTSTATUS (*pPsSetCreateProcessNotifyRoutine)(
PCREATE_PROCESS_NOTIFY_ROUTINE NotifyRoutine,
BOOLEAN Remove
);
typedef
NTSTATUS (*pPsRemoveCreateThreadNotifyRoutine)(
PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine
);
typedef
HANDLE (*pPsGetCurrentThreadId)(
void
);
typedef
HANDLE (*pPsGetProcessId)(
PEPROCESS Process
);
typedef
NTSTATUS (*pPsLookupProcessByProcessId)(
HANDLE ProcessId,
PEPROCESS* Process
);
typedef
void* (*pExEnumHandleTable)(
PHANDLE_TABLE HandleTable,
void* Callback,
void* Context,
PHANDLE Handle);
typedef
POBJECT_TYPE (*pObGetObjectType)(
void* Object
);
typedef
void (*pExfUnblockPushLock)(
PEX_PUSH_LOCK PushLock,
void* WaitBlock
);
typedef
LPCSTR (*pPsGetProcessImageFileName)(
PEPROCESS Process
);
typedef
INT (*pstrcmp)(
const CHAR* str1,
const CHAR* str2
);
typedef
PCHAR (*pstrstr)(
const CHAR* haystack,
const CHAR* needle
);
typedef
void (*pRtlInitUnicodeString)(
PUNICODE_STRING DestinationString,
PCWSTR SourceString
);
typedef
NTSTATUS (*pRtlQueryRegistryValues)(
ULONG RelativeTo,
PCWSTR Path,
PRTL_QUERY_REGISTRY_TABLE QueryTable,
void* Context,
void* Environment
);
typedef
void* (*pMmGetSystemRoutineAddress)(
PUNICODE_STRING SystemRoutineName
);
typedef
NTSTATUS (*pRtlUnicodeStringToAnsiString)(
PANSI_STRING DestinationString,
PCUNICODE_STRING SourceString,
BOOLEAN AllocateDestinationString
);
typedef
void (*pRtlCopyUnicodeString)(
PUNICODE_STRING DestinationString,
PCUNICODE_STRING SourceString
);
typedef
void (*pRtlFreeAnsiString)(
PANSI_STRING AnsiString
);
typedef
void (*pKeInitializeGuardedMutex)(
PKGUARDED_MUTEX GuardedMutex
);
typedef
NTSTATUS (*pIoCreateDevice)(
PDRIVER_OBJECT DriverObject,
ULONG DeviceExtensionSize,
PUNICODE_STRING DeviceName,
DEVICE_TYPE DeviceType,
ULONG DeviceCharacteristics,
BOOLEAN Exclusive,
PDEVICE_OBJECT *DeviceObject
);
typedef
NTSTATUS (*pIoCreateSymbolicLink)(
PUNICODE_STRING SymbolicLinkName,
PUNICODE_STRING DeviceName
);
typedef
void (*pIoDeleteDevice)(
PDEVICE_OBJECT DeviceObject
);
typedef
void (*pIoDeleteSymbolicLink)(
PUNICODE_STRING SymbolicLinkName
);
typedef
NTSTATUS (*pObRegisterCallbacks)(
POB_CALLBACK_REGISTRATION CallbackRegistration,
void** RegistrationHandle
);
typedef
void (*pObUnRegisterCallbacks)(
void* RegistrationHandle
);
typedef
NTSTATUS (*pPsSetCreateThreadNotifyRoutine)(
PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine
);
typedef
void (*pKeRevertToUserAffinityThreadEx)(
KAFFINITY Affinity
);
typedef
KAFFINITY (*pKeSetSystemAffinityThreadEx)(
KAFFINITY Affinity
);
typedef
SIZE_T (*pstrnlen)(
const CHAR* str,
SIZE_T maxCount
);
typedef
void (*pRtlInitAnsiString)(
PANSI_STRING DestinationString,
PCSZ SourceString
);
typedef
NTSTATUS (*pRtlAnsiStringToUnicodeString)(
PUNICODE_STRING DestinationString,
PCANSI_STRING SourceString,
BOOLEAN AllocateDestinationString
);
typedef
PEPROCESS (*pIoGetCurrentProcess)(
void
);
typedef
NTSTATUS (*pRtlGetVersion)(
PRTL_OSVERSIONINFOW lpVersionInformation
);
typedef
SIZE_T (*pRtlCompareMemory)(
const void* Source1,
const void* Source2,
SIZE_T Length
);
typedef
NTSTATUS (*pExGetSystemFirmwareTable)(
ULONG FirmwareTableProviderSignature,
ULONG FirmwareTableID,
void* pFirmwareTableBuffer,
ULONG BufferLength,
PULONG ReturnLength
);
typedef
PIO_WORKITEM (*pIoAllocateWorkItem)(
PDEVICE_OBJECT DeviceObject
);
typedef
void (*pIoFreeWorkItem)(
PIO_WORKITEM WorkItem
);
typedef
void (*pIoQueueWorkItem)(
PIO_WORKITEM IoWorkItem,
PIO_WORKITEM_ROUTINE WorkerRoutine,
WORK_QUEUE_TYPE QueueType,
void* Context
);
typedef
NTSTATUS (*pZwOpenFile)(
PHANDLE FileHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PIO_STATUS_BLOCK IoStatusBlock,
ULONG ShareAccess,
ULONG OpenOptions
);
typedef
NTSTATUS (*pZwClose)(
HANDLE Handle
);
typedef
NTSTATUS (*pZwCreateSection)(
PHANDLE SectionHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PLARGE_INTEGER MaximumSize,
ULONG SectionPageProtection,
ULONG AllocationAttributes,
HANDLE FileHandle
);
typedef
NTSTATUS (*pZwMapViewOfSection)(
HANDLE SectionHandle,
HANDLE ProcessHandle,
void** BaseAddress,
ULONG_PTR ZeroBits,
SIZE_T CommitSize,
PLARGE_INTEGER SectionOffset,
PSIZE_T ViewSize,
SECTION_INHERIT InheritDisposition,
ULONG AllocationType,
ULONG Win32Protect
);
typedef
NTSTATUS (*pZwUnmapViewOfSection)(
HANDLE ProcessHandle,
void* BaseAddress
);
typedef
NTSTATUS (*pMmCopyMemory)(
PVOID TargetAddress,
MM_COPY_ADDRESS SourceAddress,
SIZE_T NumberOfBytes,
ULONG Flags,
PSIZE_T NumberOfBytesTransferred
);
typedef
NTSTATUS (*pZwDeviceIoControlFile)(
HANDLE FileHandle,
HANDLE Event,
PIO_APC_ROUTINE ApcRoutine,
void* ApcContext,
PIO_STATUS_BLOCK IoStatusBlock,
ULONG IoControlCode,
void* InputBuffer,
ULONG InputBufferLength,
void* OutputBuffer,
ULONG OutputBufferLength
);
typedef
void (*pKeStackAttachProcess)(
PRKPROCESS Process,
PKAPC_STATE ApcState
);
typedef
void (*pKeUnstackDetachProcess)(
PKAPC_STATE ApcState
);
typedef
NTSTATUS (*pKeWaitForSingleObject)(
void* Object,
KWAIT_REASON WaitReason,
KPROCESSOR_MODE WaitMode,
BOOLEAN Alertable,
PLARGE_INTEGER Timeout
);
typedef
NTSTATUS (*pPsCreateSystemThread)(
PHANDLE ThreadHandle,
ULONG DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
HANDLE ProcessHandle,
PCLIENT_ID ClientId,
PKSTART_ROUTINE StartRoutine,
void* StartContext
);
typedef
void (*pIofCompleteRequest)(
PIRP Irp,
CCHAR PriorityBoost
);
typedef
NTSTATUS (*pObReferenceObjectByHandle)(
HANDLE Handle,
ACCESS_MASK DesiredAccess,
POBJECT_TYPE ObjectType,
KPROCESSOR_MODE AccessMode,
void** Object,
POBJECT_HANDLE_INFORMATION HandleInformation
);
typedef
NTSTATUS (*pKeDelayExecutionThread)(
KPROCESSOR_MODE WaitMode,
BOOLEAN Alertable,
PLARGE_INTEGER Interval
);
typedef
void* (*pKeRegisterNmiCallback)(
void* CallbackRoutine,
void* Context
);
typedef
NTSTATUS (*pKeDeregisterNmiCallback)(
void* Handle
);
typedef
ULONG (*pKeQueryActiveProcessorCount)(
PKAFFINITY ActiveProcessors
);
typedef
void (*pExAcquirePushLockExclusiveEx)(
PEX_PUSH_LOCK PushLock,
ULONG Flags
);
typedef
void (*pExReleasePushLockExclusiveEx)(
PEX_PUSH_LOCK PushLock,
ULONG Flags
);
typedef
HANDLE (*pPsGetThreadId)(
PETHREAD Thread
);
typedef
USHORT (*pRtlCaptureStackBackTrace)(
ULONG FramesToSkip,
ULONG FramesToCapture,
void** BackTrace,
PULONG BackTraceHash
);
typedef
NTSTATUS (*pZwOpenDirectoryObject)(
PHANDLE DirectoryHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes
);
typedef
void (*pKeInitializeAffinityEx)(
PKAFFINITY_EX AffinityMask
);
typedef
void (*pKeAddProcessorAffinityEx)(
PKAFFINITY_EX Affinity,
INT CoreNumber
);
typedef
NTSTATUS (*pRtlQueryModuleInformation)(
ULONG* InformationLength,
ULONG SizePerModule,
PVOID InformationBuffer
);
typedef
void (*pKeInitializeApc)(
PKAPC Apc,
PKTHREAD Thread,
KAPC_ENVIRONMENT Environment,
PKKERNEL_ROUTINE KernelRoutine,
PKRUNDOWN_ROUTINE RundownRoutine,
PKNORMAL_ROUTINE NormalRoutine,
KPROCESSOR_MODE ApcMode,
void* NormalContext
);
typedef
BOOLEAN (*pKeInsertQueueApc)(
PKAPC Apc,
void* SystemArgument1,
void* SystemArgument2,
KPRIORITY Increment
);
typedef
void (*pKeGenericCallDpc)(
PKDEFERRED_ROUTINE DpcRoutine,
void* Context
);
typedef
void (*pKeSignalCallDpcDone)(
void* SystemArgument1
);
typedef
PPHYSICAL_MEMORY_RANGE (*pMmGetPhysicalMemoryRangesEx2)(
PVOID PartitionObject,
ULONG Flags
);
typedef
void* (*pMmGetVirtualForPhysical)(
PHYSICAL_ADDRESS PhysicalAddress
);
typedef
LONG_PTR (*pObfReferenceObject)(
void* Object
);
typedef
void (*pExFreePoolWithTag)(
void* P,
ULONG Tag
);
typedef
void* (*pExAllocatePool2)(
POOL_FLAGS Flags,
SIZE_T NumberOfBytes,
ULONG Tag
);
typedef
void (*pKeReleaseGuardedMutex)(
PKGUARDED_MUTEX GuardedMutex
);
typedef
void (*pKeAcquireGuardedMutex)(
PKGUARDED_MUTEX GuardedMutex
);
typedef
ULONG (*pDbgPrintEx)(
ULONG ComponentId,
ULONG Level,
PCSTR Format,
...
);
typedef
LONG (*pRtlCompareUnicodeString)(
PCUNICODE_STRING String1,
PCUNICODE_STRING String2,
BOOLEAN CaseInSensitive
);
typedef
PIO_STACK_LOCATION (*pIoGetCurrentIrpStackLocation)(
PIRP Irp
);
typedef
void (*pRtlFreeUnicodeString)(
PUNICODE_STRING UnicodeString
);
// clang-format on
#define OB_DEREFERENCE_OBJECT_INDEX 0
#define PS_LOOKUP_THREAD_BY_THREAD_ID_INDEX 1
#define MM_IS_ADDRESS_VALID_INDEX 2
#define PS_SET_CREATE_PROCESS_NOTIFY_ROUTINE_INDEX 3
#define PS_REMOVE_CREATE_THREAD_NOTIFY_ROUTINE_INDEX 4
#define PS_GET_CURRENT_THREAD_ID_INDEX 5
#define PS_GET_PROCESS_ID_INDEX 6
#define PS_LOOKUP_PROCESS_BY_PROCESS_ID_INDEX 7
#define EX_ENUM_HANDLE_TABLE_INDEX 8
#define OB_GET_OBJECT_TYPE_INDEX 9
#define EXF_UNBLOCK_PUSH_LOCK_INDEX 10
#define PS_GET_PROCESS_IMAGE_FILE_NAME_INDEX 11
#define STRSTR_INDEX 12
#define RTL_INIT_UNICODE_STRING_INDEX 13
#define RTL_QUERY_REGISTRY_VALUES_INDEX 14
#define MM_GET_SYSTEM_ROUTINE_ADDRESS_INDEX 15
#define RTL_UNICODE_STRING_TO_ANSI_STRING_INDEX 16
#define RTL_COPY_UNICODE_STRING_INDEX 17
#define RTL_FREE_ANSI_STRING_INDEX 18
#define KE_INITIALIZE_GUARDED_MUTEX_INDEX 19
#define IO_CREATE_DEVICE_INDEX 20
#define IO_CREATE_SYMBOLIC_LINK_INDEX 21
#define IO_DELETE_DEVICE_INDEX 22
#define IO_DELETE_SYMBOLIC_LINK_INDEX 23
#define OB_REGISTER_CALLBACKS_INDEX 24
#define OB_UNREGISTER_CALLBACKS_INDEX 25
#define PS_SET_CREATE_THREAD_NOTIFY_ROUTINE_INDEX 26
#define KE_REVERT_TO_USER_AFFINITY_THREAD_EX_INDEX 27
#define KE_SET_SYSTEM_AFFINITY_THREAD_EX_INDEX 28
#define STRNLEN_INDEX 29
#define RTL_INIT_ANSI_STRING_INDEX 30
#define RTL_ANSI_STRING_TO_UNICODE_STRING_INDEX 31
#define IO_GET_CURRENT_PROCESS_INDEX 32
#define RTL_GET_VERSION_INDEX 33
#define RTL_COMPARE_MEMORY_INDEX 34
#define EX_GET_SYSTEM_FIRMWARE_TABLE_INDEX 35
#define IO_ALLOCATE_WORK_ITEM_INDEX 36
#define IO_FREE_WORK_ITEM_INDEX 37
#define IO_QUEUE_WORK_ITEM_INDEX 38
#define ZW_OPEN_FILE_INDEX 39
#define ZW_CLOSE_INDEX 40
#define ZW_CREATE_SECTION_INDEX 41
#define ZW_MAP_VIEW_OF_SECTION_INDEX 42
#define ZW_UNMAP_VIEW_OF_SECTION_INDEX 43
#define MM_COPY_MEMORY_INDEX 44
#define ZW_DEVICE_IO_CONTROL_FILE_INDEX 45
#define KE_STACK_ATTACH_PROCESS_INDEX 46
#define KE_UNSTACK_DETACH_PROCESS_INDEX 47
#define KE_WAIT_FOR_SINGLE_OBJECT_INDEX 48
#define PS_CREATE_SYSTEM_THREAD_INDEX 49
#define IOF_COMPLETE_REQUEST_INDEX 50
#define OB_REFERENCE_OBJECT_BY_HANDLE_INDEX 51
#define KE_DELAY_EXECUTION_THREAD_INDEX 52
#define KE_REGISTER_NMI_CALLBACK_INDEX 53
#define KE_DEREGISTER_NMI_CALLBACK_INDEX 54
#define KE_QUERY_ACTIVE_PROCESSOR_COUNT_INDEX 55
#define EX_ACQUIRE_PUSH_LOCK_EXCLUSIVE_EX_INDEX 56
#define EX_RELEASE_PUSH_LOCK_EXCLUSIVE_EX_INDEX 57
#define PS_GET_THREAD_ID_INDEX 58
#define RTL_CAPTURE_STACK_BACK_TRACE_INDEX 59
#define ZW_OPEN_DIRECTORY_OBJECT_INDEX 60
#define KE_INITIALIZE_AFFINITY_EX_INDEX 61
#define KE_ADD_PROCESSOR_AFFINITY_EX_INDEX 62
#define RTL_QUERY_MODULE_INFORMATION_INDEX 63
#define KE_INITIALIZE_APC_INDEX 64
#define KE_INSERT_QUEUE_APC_INDEX 65
#define KE_GENERIC_CALL_DPC_INDEX 66
#define KE_SIGNAL_CALL_DPC_DONE_INDEX 67
#define MM_GET_PHYSICAL_MEMORY_RANGES_EX2_INDEX 68
#define MM_GET_VIRTUAL_FOR_PHYSICAL_INDEX 69
#define OBF_REFERENCE_OBJECT_INDEX 70
#define EX_FREE_POOL_WITH_TAG_INDEX 71
#define EX_ALLOCATE_POOL2_INDEX 72
#define KE_RELEASE_GUARDED_MUTEX_INDEX 73
#define KE_ACQUIRE_GUARDED_MUTEX_INDEX 74
#define DBG_PRINT_EX_INDEX 75
#define RTL_COMPARE_UNICODE_STRING_INDEX 76
#define RTL_FREE_UNICODE_STRING_INDEX 77
#define PS_GET_PROCESS_IMAGE_FILE_NAME_INDEX 78
typedef struct _DRIVER_IMPORTS
{
pObDereferenceObject DrvImpObDereferenceObject;
pPsLookupThreadByThreadId DrvImpPsLookupThreadByThreadId;
pMmIsAddressValid DrvImpMmIsAddressValid;
pPsSetCreateProcessNotifyRoutine DrvImpPsSetCreateProcessNotifyRoutine;
pPsRemoveCreateThreadNotifyRoutine DrvImpPsRemoveCreateThreadNotifyRoutine;
pPsGetCurrentThreadId DrvImpPsGetCurrentThreadId;
pPsGetProcessId DrvImpPsGetProcessId;
pPsLookupProcessByProcessId DrvImpPsLookupProcessByProcessId;
pExEnumHandleTable DrvImpExEnumHandleTable;
pObGetObjectType DrvImpObGetObjectType;
pExfUnblockPushLock DrvImpExfUnblockPushLock;
pPsGetProcessImageFileName DrvImpPsGetProcessImage;
pstrstr DrvImpstrstr;
pRtlInitUnicodeString DrvImpRtlInitUnicodeString;
pRtlQueryRegistryValues DrvImpRtlQueryRegistryValues;
pMmGetSystemRoutineAddress DrvImpMmGetSystemRoutineAddress;
pRtlUnicodeStringToAnsiString DrvImpRtlUnicodeStringToAnsiString;
pRtlCopyUnicodeString DrvImpRtlCopyUnicodeString;
pRtlFreeAnsiString DrvImpRtlFreeAnsiString;
pKeInitializeGuardedMutex DrvImpKeInitializeGuardedMutex;
pIoCreateDevice DrvImpIoCreateDevice;
pIoCreateSymbolicLink DrvImpIoCreateSymbolicLink;
pIoDeleteDevice DrvImpIoDeleteDevice;
pIoDeleteSymbolicLink DrvImpIoDeleteSymbolicLink;
pObRegisterCallbacks DrvImpObRegisterCallbacks;
pObUnRegisterCallbacks DrvImpObUnRegisterCallbacks;
pPsSetCreateThreadNotifyRoutine DrvImpPsSetCreateThreadNotifyRoutine;
pKeRevertToUserAffinityThreadEx DrvImpKeRevertToUserAffinityThreadEx;
pKeSetSystemAffinityThreadEx DrvImpKeSetSystemAffinityThreadEx;
pstrnlen DrvImpstrnlen;
pRtlInitAnsiString DrvImpRtlInitAnsiString;
pRtlAnsiStringToUnicodeString DrvImpRtlAnsiStringToUnicodeString;
pIoGetCurrentProcess DrvImpIoGetCurrentProcess;
pRtlGetVersion DrvImpRtlGetVersion;
pRtlCompareMemory DrvImpRtlCompareMemory;
pExGetSystemFirmwareTable DrvImpExGetSystemFirmwareTable;
pIoAllocateWorkItem DrvImpIoAllocateWorkItem;
pIoFreeWorkItem DrvImpIoFreeWorkItem;
pIoQueueWorkItem DrvImpIoQueueWorkItem;
pZwOpenFile DrvImpZwOpenFile;
pZwClose DrvImpZwClose;
pZwCreateSection DrvImpZwCreateSection;
pZwMapViewOfSection DrvImpZwMapViewOfSection;
pZwUnmapViewOfSection DrvImpZwUnmapViewOfSection;
pMmCopyMemory DrvImpMmCopyMemory;
pZwDeviceIoControlFile DrvImpZwDeviceIoControlFile;
pKeStackAttachProcess DrvImpKeStackAttachProcess;
pKeUnstackDetachProcess DrvImpKeUnstackDetachProcess;
pKeWaitForSingleObject DrvImpKeWaitForSingleObject;
pPsCreateSystemThread DrvImpPsCreateSystemThread;
pIofCompleteRequest DrvImpIofCompleteRequest;
pObReferenceObjectByHandle DrvImpObReferenceObjectByHandle;
pKeDelayExecutionThread DrvImpKeDelayExecutionThread;
pKeRegisterNmiCallback DrvImpKeRegisterNmiCallback;
pKeDeregisterNmiCallback DrvImpKeDeregisterNmiCallback;
pKeQueryActiveProcessorCount DrvImpKeQueryActiveProcessorCount;
pExAcquirePushLockExclusiveEx DrvImpExAcquirePushLockExclusiveEx;
pExReleasePushLockExclusiveEx DrvImpExReleasePushLockExclusiveEx;
pPsGetThreadId DrvImpPsGetThreadId;
pRtlCaptureStackBackTrace DrvImpRtlCaptureStackBackTrace;
pZwOpenDirectoryObject DrvImpZwOpenDirectoryObject;
pKeInitializeAffinityEx DrvImpKeInitializeAffinityEx;
pKeAddProcessorAffinityEx DrvImpKeAddProcessorAffinityEx;
pRtlQueryModuleInformation DrvImpRtlQueryModuleInformation;
pKeInitializeApc DrvImpKeInitializeApc;
pKeInsertQueueApc DrvImpKeInsertQueueApc;
pKeGenericCallDpc DrvImpKeGenericCallDpc;
pKeSignalCallDpcDone DrvImpKeSignalCallDpcDone;
pMmGetPhysicalMemoryRangesEx2 DrvImpMmGetPhysicalMemoryRangesEx2;
pMmGetVirtualForPhysical DrvImpMmGetVirtualForPhysical;
pObfReferenceObject DrvImpObfReferenceObject;
pExFreePoolWithTag DrvImpExFreePoolWithTag;
pExAllocatePool2 DrvImpExAllocatePool2;
pKeReleaseGuardedMutex DrvImpKeReleaseGuardedMutex;
pKeAcquireGuardedMutex DrvImpKeAcquireGuardedMutex;
pDbgPrintEx DrvImpDbgPrintEx;
pRtlCompareUnicodeString DrvImpRtlCompareUnicodeString;
pRtlFreeUnicodeString DrvImpRtlFreeUnicodeString;
pPsGetProcessImageFileName DrvImpPsGetProcessImageFileName;
UINT64 dummy;
} DRIVER_IMPORTS, *PDRIVER_IMPORTS;
#define IMPORTS_LENGTH sizeof(DRIVER_IMPORTS) / sizeof(UINT64)
VOID
ImpObDereferenceObject(_In_ PVOID Object);
NTSTATUS
ImpPsLookupThreadByThreadId(HANDLE ThreadId, PETHREAD* Thread);
BOOLEAN
ImpMmIsAddressValid(_In_ PVOID VirtualAddress);
NTSTATUS
ImpPsSetCreateProcessNotifyRoutine(PCREATE_PROCESS_NOTIFY_ROUTINE NotifyRoutine, BOOLEAN Remove);
NTSTATUS
ImpPsRemoveCreateThreadNotifyRoutine(PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine);
HANDLE
ImpPsGetCurrentThreadId();
HANDLE
ImpPsGetProcessId(PEPROCESS Process);
NTSTATUS
ImpPsLookupProcessByProcessId(HANDLE ProcessId, PEPROCESS* Process);
PVOID
ImpExEnumHandleTable(_In_ PHANDLE_TABLE HandleTable,
_In_ PVOID Callback,
_In_opt_ PVOID Context,
_Out_opt_ PHANDLE Handle);
POBJECT_TYPE
ImpObGetObjectType(_In_ PVOID Object);
VOID
ImpExfUnblockPushLock(_In_ PEX_PUSH_LOCK PushLock, _In_ PVOID WaitBlock);
LPCSTR
ImpPsGetProcessImageFileName(PEPROCESS Process);
INT
ImpStrStr(_In_ CHAR* haystack, _In_ CHAR* needle);
void
ImpRtlInitUnicodeString(PUNICODE_STRING DestinationString, PCWSTR SourceString);
NTSTATUS
ImpRtlQueryRegistryValues(ULONG RelativeTo,
PCWSTR Path,
PRTL_QUERY_REGISTRY_TABLE QueryTable,
void* Context,
void* Environment);
void*
ImpMmGetSystemRoutineAddress(PUNICODE_STRING SystemRoutineName);
NTSTATUS
ImpRtlUnicodeStringToAnsiString(PANSI_STRING DestinationString,
PCUNICODE_STRING SourceString,
BOOLEAN AllocateDestinationString);
void
ImpRtlCopyUnicodeString(PUNICODE_STRING DestinationString, PCUNICODE_STRING SourceString);
void
ImpRtlFreeAnsiString(PANSI_STRING AnsiString);
void
ImpKeInitializeGuardedMutex(PKGUARDED_MUTEX GuardedMutex);
NTSTATUS
ImpIoCreateDevice(PDRIVER_OBJECT DriverObject,
ULONG DeviceExtensionSize,
PUNICODE_STRING DeviceName,
DEVICE_TYPE DeviceType,
ULONG DeviceCharacteristics,
BOOLEAN Exclusive,
PDEVICE_OBJECT* DeviceObject);
NTSTATUS
ImpIoCreateSymbolicLink(PUNICODE_STRING SymbolicLinkName, PUNICODE_STRING DeviceName);
void
ImpIoDeleteDevice(PDEVICE_OBJECT DeviceObject);
void
ImpIoDeleteSymbolicLink(PUNICODE_STRING SymbolicLinkName);
NTSTATUS
ImpObRegisterCallbacks(_In_ POB_CALLBACK_REGISTRATION CallbackRegistration,
_Out_ PVOID* RegistrationHandle);
VOID
ImpObUnRegisterCallbacks(_In_ PVOID RegistrationHandle);
NTSTATUS
ImpPsSetCreateThreadNotifyRoutine(PCREATE_THREAD_NOTIFY_ROUTINE NotifyRoutine);
void
ImpKeRevertToUserAffinityThreadEx(KAFFINITY Affinity);
KAFFINITY
ImpKeSetSystemAffinityThreadEx(KAFFINITY Affinity);
SIZE_T
ImpStrnlen(_In_ CHAR* str, _In_ SIZE_T maxCount);
void
ImpRtlInitAnsiString(PANSI_STRING DestinationString, PCSZ SourceString);
NTSTATUS
ImpRtlAnsiStringToUnicodeString(PUNICODE_STRING DestinationString,
PCANSI_STRING SourceString,
BOOLEAN AllocateDestinationString);
PEPROCESS
ImpIoGetCurrentProcess(void);
NTSTATUS
ImpRtlGetVersion(PRTL_OSVERSIONINFOW lpVersionInformation);
SIZE_T
ImpRtlCompareMemory(_In_ PVOID Source1, _In_ PVOID Source2, _In_ SIZE_T Length);
NTSTATUS
ImpExGetSystemFirmwareTable(_In_ ULONG FirmwareTableProviderSignature,
_In_ ULONG FirmwareTableID,
_In_ PVOID pFirmwareTableBuffer,
_In_ ULONG BufferLength,
_Out_ PULONG ReturnLength);
PIO_WORKITEM
ImpIoAllocateWorkItem(PDEVICE_OBJECT DeviceObject);
void
ImpIoFreeWorkItem(PIO_WORKITEM WorkItem);
VOID
ImpIoQueueWorkItem(_In_ PIO_WORKITEM IoWorkItem,
_In_ PIO_WORKITEM_ROUTINE WorkerRoutine,
_In_ WORK_QUEUE_TYPE QueueType,
_In_opt_ PVOID Context);
NTSTATUS
ImpZwOpenFile(PHANDLE FileHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PIO_STATUS_BLOCK IoStatusBlock,
ULONG ShareAccess,
ULONG OpenOptions);
NTSTATUS
ImpZwClose(HANDLE Handle);
NTSTATUS
ImpZwCreateSection(PHANDLE SectionHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes,
PLARGE_INTEGER MaximumSize,
ULONG SectionPageProtection,
ULONG AllocationAttributes,
HANDLE FileHandle);
NTSTATUS
ImpZwMapViewOfSection(_In_ HANDLE SectionHandle,
_In_ HANDLE ProcessHandle,
_Inout_ PVOID* BaseAddress,
_In_ ULONG_PTR ZeroBits,
_In_ SIZE_T CommitSize,
_Inout_opt_ PLARGE_INTEGER SectionOffset,
_Inout_ PSIZE_T ViewSize,
_In_ SECTION_INHERIT InheritDisposition,
_In_ ULONG AllocationType,
_In_ ULONG Win32Protect);
NTSTATUS
ImpZwUnmapViewOfSection(_In_ HANDLE ProcessHandle, _In_ PVOID BaseAddress);
NTSTATUS
ImpMmCopyMemory(PVOID TargetAddress,
MM_COPY_ADDRESS SourceAddress,
SIZE_T NumberOfBytes,
ULONG Flags,
PSIZE_T NumberOfBytesTransferred);
NTSTATUS
ImpZwDeviceIoControlFile(_In_ HANDLE FileHandle,
_In_opt_ HANDLE Event,
_In_opt_ PIO_APC_ROUTINE ApcRoutine,
_In_opt_ PVOID ApcContext,
_Out_ PIO_STATUS_BLOCK IoStatusBlock,
_In_ ULONG IoControlCode,
_In_opt_ PVOID InputBuffer,
_In_ ULONG InputBufferLength,
_Out_opt_ PVOID OutputBuffer,
_In_ ULONG OutputBufferLength);
void
ImpKeStackAttachProcess(PRKPROCESS Process, PKAPC_STATE ApcState);
void
ImpKeUnstackDetachProcess(PKAPC_STATE ApcState);
NTSTATUS
ImpKeWaitForSingleObject(_In_ PVOID Object,
_In_ KWAIT_REASON WaitReason,
_In_ KPROCESSOR_MODE WaitMode,
_In_ BOOLEAN Alertable,
_In_ PLARGE_INTEGER Timeout);
NTSTATUS
ImpPsCreateSystemThread(_Out_ PHANDLE ThreadHandle,
_In_ ULONG DesiredAccess,
_In_opt_ POBJECT_ATTRIBUTES ObjectAttributes,
_In_opt_ HANDLE ProcessHandle,
_Out_opt_ PCLIENT_ID ClientId,
_In_ PKSTART_ROUTINE StartRoutine,
_In_opt_ PVOID StartContext);
void
ImpIofCompleteRequest(PIRP Irp, CCHAR PriorityBoost);
NTSTATUS
ImpObReferenceObjectByHandle(_In_ HANDLE Handle,
_In_ ACCESS_MASK DesiredAccess,
_In_opt_ POBJECT_TYPE ObjectType,
_In_ KPROCESSOR_MODE AccessMode,
_Out_ PVOID* Object,
_Out_opt_ POBJECT_HANDLE_INFORMATION HandleInformation);
NTSTATUS
ImpKeDelayExecutionThread(KPROCESSOR_MODE WaitMode, BOOLEAN Alertable, PLARGE_INTEGER Interval);
PVOID
ImpKeRegisterNmiCallback(_In_ PVOID CallbackRoutine, _In_opt_ PVOID Context);
NTSTATUS
ImpKeDeregisterNmiCallback(_In_ PVOID Handle);
ULONG
ImpKeQueryActiveProcessorCount(PKAFFINITY ActiveProcessors);
void
ImpExAcquirePushLockExclusiveEx(PEX_PUSH_LOCK PushLock, ULONG Flags);
void
ImpExReleasePushLockExclusiveEx(PEX_PUSH_LOCK PushLock, ULONG Flags);
HANDLE
ImpPsGetThreadId(PETHREAD Thread);
USHORT
ImpRtlCaptureStackBackTrace(_In_ ULONG FramesToSkip,
_In_ ULONG FramesToCapture,
_Out_ PVOID* BackTrace,
_Out_opt_ PULONG BackTraceHash);
NTSTATUS
ImpZwOpenDirectoryObject(PHANDLE DirectoryHandle,
ACCESS_MASK DesiredAccess,
POBJECT_ATTRIBUTES ObjectAttributes);
void
ImpKeInitializeAffinityEx(PKAFFINITY_EX AffinityMask);
VOID
ImpKeAddProcessorAffinityEx(_In_ PKAFFINITY_EX affinity, _In_ INT num);
NTSTATUS
ImpRtlQueryModuleInformation(_Inout_ ULONG* InformationLength,
_In_ ULONG SizePerModule,
_In_ PVOID InformationBuffer);
VOID
ImpKeInitializeApc(_In_ PKAPC Apc,
_In_ PKTHREAD Thread,
_In_ KAPC_ENVIRONMENT Environment,
_In_ PKKERNEL_ROUTINE KernelRoutine,
_In_ PKRUNDOWN_ROUTINE RundownRoutine,
_In_ PKNORMAL_ROUTINE NormalRoutine,
_In_ KPROCESSOR_MODE ApcMode,
_In_ PVOID NormalContext);
BOOLEAN
ImpKeInsertQueueApc(_In_ PKAPC Apc,
_In_ PVOID SystemArgument1,
_In_ PVOID SystemArgument2,
_In_ KPRIORITY Increment);
VOID
ImpKeGenericCallDpc(_In_ PKDEFERRED_ROUTINE DpcRoutine, _In_ PVOID Context);
VOID
ImpKeSignalCallDpcDone(_In_ PVOID SystemArgument1);
PPHYSICAL_MEMORY_RANGE
ImpMmGetPhysicalMemoryRangesEx2(_In_ PVOID PartitionObject, _In_ ULONG Flags);
void*
ImpMmGetVirtualForPhysical(_In_ PHYSICAL_ADDRESS PhysicalAddress);
LONG_PTR
ImpObfReferenceObject(_In_ PVOID Object);
VOID
ImpExFreePoolWithTag(_In_ PVOID P, _In_ ULONG Tag);
void*
ImpExAllocatePool2(_In_ POOL_FLAGS Flags, _In_ SIZE_T NumberOfBytes, _In_ ULONG Tag);
VOID
ImpKeReleaseGuardedMutex(_In_ PKGUARDED_MUTEX GuardedMutex);
VOID
ImpKeAcquireGuardedMutex(_In_ PKGUARDED_MUTEX GuardedMutex);
ULONG
ImpDbgPrintEx(_In_ ULONG ComponentId, _In_ ULONG Level, _In_ PCSTR Format, ...);
LONG
ImpRtlCompareUnicodeString(_In_ PCUNICODE_STRING String1,
_In_ PCUNICODE_STRING String2,
_In_ BOOLEAN CaseInSensitive);
VOID
ImpRtlFreeUnicodeString(_In_ PUNICODE_STRING UnicodeString);
#endif
================================================
FILE: driver/integrity.c
================================================
#include "integrity.h"
#include "callbacks.h"
#include "common.h"
#include "crypt.h"
#include "driver.h"
#include "imports.h"
#include "io.h"
#include "lib/stdlib.h"
#include "modules.h"
#include "pe.h"
#include "session.h"
#include "util.h"
#include
#include
#include
// clang-format off
typedef struct _INTEGRITY_CHECK_HEADER {
/* Count of total sections contained within the buffer */
UINT32 section_count;
/* Total size of the buffer */
UINT32 total_size;
} INTEGRITY_CHECK_HEADER, *PINTEGRITY_CHECK_HEADER;
typedef struct _PROCESS_MODULE_INFORMATION {
/* Pointer to the base of the module*/
PVOID module_base;
/* Total size of the module */
SIZE_T module_size;
/* Path to the modules executable image*/
WCHAR module_path[MAX_MODULE_PATH];
} PROCESS_MODULE_INFORMATION, *PPROCESS_MODULE_INFORMATION;
typedef struct _PROCESS_MODULE_VALIDATION_RESULT {
/* Boolean value of whether or not the module image is valid */
UINT32 is_module_valid;
} PROCESS_MODULE_VALIDATION_RESULT, *PPROCESS_MODULE_VALIDATION_RESULT;
typedef struct _VAL_INTEGRITY_HEADER {
/* Header containing information pertaining to the buffer */
INTEGRITY_CHECK_HEADER integrity_check_header;
/* Section header */
IMAGE_SECTION_HEADER section_header;
/* Pointer to the start of the sections image */
CHAR section_base[];
} VAL_INTEGRITY_HEADER, *PVAL_INTEGRITY_HEADER;
STATIC
NTSTATUS
InitiateEptFunctionAddressArrays();
STATIC
NTSTATUS
GetModuleInformationByName(
_Out_ PRTL_MODULE_EXTENDED_INFO ModuleInfo,
_In_ LPCSTR ModuleName);
STATIC
NTSTATUS
StoreModuleExecutableRegionsInBuffer(
_Out_ PVOID* Buffer,
_In_ PVOID ModuleBase,
_In_ SIZE_T ModuleSize,
_Out_ PSIZE_T BytesWritten,
_In_ BOOLEAN IsModulex86
);
STATIC
NTSTATUS
MapDiskImageIntoVirtualAddressSpace(
_Inout_ PHANDLE SectionHandle,
_Out_ PVOID* Section,
_In_ PUNICODE_STRING Path,
_Out_ PSIZE_T Size
);
STATIC
VOID
GetNextSMBIOSStructureInTable(
_Inout_ PSMBIOS_TABLE_HEADER* CurrentStructure
);
STATIC
NTSTATUS
GetStringAtIndexFromSMBIOSTable(
_In_ PSMBIOS_TABLE_HEADER Table,
_In_ UINT32 Index,
_In_ PVOID Buffer,
_In_ SIZE_T BufferSize
);
STATIC
NTSTATUS
GetAverageReadTimeAtRoutine(
_In_ PVOID RoutineAddress,
_Out_ PUINT64 AverageTime
);
STATIC
VOID
HeartbeatDpcRoutine(
_In_ PKDPC Dpc,
_In_opt_ PVOID DeferredContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2
);
// clang-format on
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, GetDriverImageSize)
# pragma alloc_text(PAGE, GetModuleInformationByName)
# pragma alloc_text(PAGE, StoreModuleExecutableRegionsInBuffer)
# pragma alloc_text(PAGE, MapDiskImageIntoVirtualAddressSpace)
# pragma alloc_text(PAGE, RetrieveInMemoryModuleExecutableSections)
# pragma alloc_text(PAGE, GetNextSMBIOSStructureInTable)
# pragma alloc_text(PAGE, GetStringAtIndexFromSMBIOSTable)
# pragma alloc_text(PAGE, ParseSMBIOSTable)
# pragma alloc_text(PAGE, ValidateProcessLoadedModule)
# pragma alloc_text(PAGE, GetHardDiskDriveSerialNumber)
# pragma alloc_text(PAGE, ScanForSignature)
# pragma alloc_text(PAGE, InitiateEptFunctionAddressArrays)
# pragma alloc_text(PAGE, DetectEptHooksInKeyFunctions)
#endif
/*
* note: this can be put into its own function wihtout an IRP as argument then
* it can be used in both the get driver image ioctl handler and the
* CopyDriverExecvutableRegions func
*/
NTSTATUS
GetDriverImageSize(_Inout_ PIRP Irp)
{
PAGED_CODE();
NT_ASSERT(Irp != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
LPCSTR name = GetDriverName();
SYSTEM_MODULES modules = {0};
PRTL_MODULE_EXTENDED_INFO driver = NULL;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
driver = FindSystemModuleByName(name, &modules);
if (!driver) {
DEBUG_ERROR("FindSystemModuleByName failed with no status code");
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return STATUS_NOT_FOUND;
}
status = ValidateIrpOutputBuffer(Irp, sizeof(UINT32));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpOutputBuffer failed with status %x", status);
goto end;
}
Irp->IoStatus.Information = sizeof(UINT32);
IntCopyMemory(
Irp->AssociatedIrp.SystemBuffer,
&driver->ImageSize,
sizeof(UINT32));
end:
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
STATIC
NTSTATUS
GetModuleInformationByName(
_Out_ PRTL_MODULE_EXTENDED_INFO ModuleInfo, _In_ LPCSTR ModuleName)
{
PAGED_CODE();
NT_ASSERT(ModuleName != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
LPCSTR driver_name = GetDriverName();
SYSTEM_MODULES modules = {0};
PRTL_MODULE_EXTENDED_INFO driver_info = NULL;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
/* TODO: think this remains from testing, we only use this to find our
* driver anyway but should be fixed. */
driver_info = FindSystemModuleByName(driver_name, &modules);
if (!driver_info) {
DEBUG_ERROR("FindSystemModuleByName failed with no status");
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return STATUS_NOT_FOUND;
}
ModuleInfo->FileNameOffset = driver_info->FileNameOffset;
ModuleInfo->ImageBase = driver_info->ImageBase;
ModuleInfo->ImageSize = driver_info->ImageSize;
IntCopyMemory(
ModuleInfo->FullPathName,
driver_info->FullPathName,
sizeof(ModuleInfo->FullPathName));
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
#define PE_TYPE_32_BIT 0x10b
FORCEINLINE
STATIC
BOOLEAN
IsSectionExecutable(_In_ PIMAGE_SECTION_HEADER Section)
{
return Section->Characteristics & IMAGE_SCN_MEM_EXECUTE ? TRUE : FALSE;
}
FORCEINLINE
STATIC
BOOLEAN
IsModuleAddressSafe(_In_ PVOID Base, _In_ BOOLEAN x86)
{
return !MmIsAddressValid(Base) && !x86 ? FALSE : TRUE;
}
FORCEINLINE
STATIC
UINT32
GetSectionTotalPacketSize(_In_ PIMAGE_SECTION_HEADER Section)
{
return Section->SizeOfRawData + sizeof(IMAGE_SECTION_HEADER);
}
FORCEINLINE
STATIC
VOID
InitIntegrityCheckHeader(
_Out_ PINTEGRITY_CHECK_HEADER Header,
_In_ UINT32 SectionCount,
_In_ UINT32 TotalSize)
{
Header->section_count = SectionCount;
Header->total_size = TotalSize + sizeof(INTEGRITY_CHECK_HEADER);
}
STATIC
NTSTATUS
StoreModuleExecutableRegionsInBuffer(
_Out_ PVOID* Buffer,
_In_ PVOID ModuleBase,
_In_ SIZE_T ModuleSize,
_Out_ PSIZE_T BytesWritten,
_In_ BOOLEAN IsModulex86)
{
PAGED_CODE();
NT_ASSERT(Buffer != NULL);
NT_ASSERT(ModuleBase != NULL);
NT_ASSERT(BytesWritten != NULL);
UINT32 total_packet_size = 0;
UINT32 num_sections = 0;
UINT32 num_executable_sections = 0;
UINT64 buffer_base = 0;
UINT32 bytes_returned = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
PNT_HEADER_64 nt_header = NULL;
PIMAGE_SECTION_HEADER section = NULL;
MM_COPY_ADDRESS address = {0};
INTEGRITY_CHECK_HEADER header = {0};
if (!ModuleBase || !ModuleSize)
return STATUS_INVALID_PARAMETER;
if (!IsModuleAddressSafe(ModuleBase, IsModulex86))
return STATUS_UNSUCCESSFUL;
/*
* The reason we allocate a buffer to temporarily hold the section data
* is that we don't know the total size until after we iterate over the
* sections meaning we cant set Irp->IoStatus.Information to the size of
* our reponse until we enumerate and count all executable sections for
* the file.
*/
*BytesWritten = 0;
*Buffer = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
ModuleSize + sizeof(INTEGRITY_CHECK_HEADER),
POOL_TAG_INTEGRITY);
if (*Buffer == NULL)
return STATUS_MEMORY_NOT_ALLOCATED;
/* For context, when we are hashing x86 modules, MmIsAddressValid will
* return FALSE. Yet we still need protection for when an invalid address is
* passed for a non-x86 based image.*/
/*
* The IMAGE_DOS_HEADER.e_lfanew stores the offset of the
* IMAGE_NT_HEADER from the base of the image.
*/
nt_header = PeGetNtHeader(ModuleBase);
num_sections = GetSectionCount(nt_header);
/*
* The IMAGE_FIRST_SECTION macro takes in an IMAGE_NT_HEADER and returns
* the address of the first section of the PE file.
*/
section = IMAGE_FIRST_SECTION(nt_header);
buffer_base = (UINT64)*Buffer + sizeof(INTEGRITY_CHECK_HEADER);
for (UINT32 index = 0; index < num_sections - 1; index++) {
if (!IsSectionExecutable(section)) {
section++;
continue;
}
address.VirtualAddress = section;
status = ImpMmCopyMemory(
(UINT64)buffer_base + total_packet_size,
address,
sizeof(IMAGE_SECTION_HEADER),
MM_COPY_MEMORY_VIRTUAL,
&bytes_returned);
if (!NT_SUCCESS(status)) {
ImpExFreePoolWithTag(*Buffer, POOL_TAG_INTEGRITY);
*Buffer = NULL;
return status;
}
address.VirtualAddress = (UINT64)ModuleBase + section->PointerToRawData;
status = ImpMmCopyMemory(
(UINT64)buffer_base + total_packet_size +
sizeof(IMAGE_SECTION_HEADER),
address,
section->SizeOfRawData,
MM_COPY_MEMORY_VIRTUAL,
&bytes_returned);
if (!NT_SUCCESS(status)) {
ImpExFreePoolWithTag(*Buffer, POOL_TAG_INTEGRITY);
*Buffer = NULL;
return status;
}
total_packet_size += GetSectionTotalPacketSize(section);
num_executable_sections++;
section++;
}
InitIntegrityCheckHeader(
&header,
num_executable_sections,
total_packet_size);
IntCopyMemory(*Buffer, &header, sizeof(INTEGRITY_CHECK_HEADER));
*BytesWritten = total_packet_size + sizeof(INTEGRITY_CHECK_HEADER);
return status;
}
STATIC
NTSTATUS
MapDiskImageIntoVirtualAddressSpace(
_Inout_ PHANDLE SectionHandle,
_Out_ PVOID* Section,
_In_ PUNICODE_STRING Path,
_Out_ PSIZE_T Size)
{
PAGED_CODE();
NT_ASSERT(SectionHandle != NULL);
NT_ASSERT(Section != NULL);
NT_ASSERT(Path != NULL);
NT_ASSERT(Size != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
HANDLE handle = NULL;
OBJECT_ATTRIBUTES oa = {0};
PIO_STATUS_BLOCK io = NULL;
UNICODE_STRING path = {0};
*Section = NULL;
*Size = 0;
ImpRtlInitUnicodeString(&path, Path->Buffer);
InitializeObjectAttributes(&oa, &path, OBJ_KERNEL_HANDLE, NULL, NULL);
status = ImpZwOpenFile(&handle, GENERIC_READ, &oa, &io, NULL, NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ZwOpenFile failed with status %x", status);
return status;
}
oa.ObjectName = NULL;
/*
* Its important that we set the SEC_IMAGE flag with the PAGE_READONLY
* flag as we are mapping an executable image.
*/
status = ImpZwCreateSection(
SectionHandle,
SECTION_ALL_ACCESS,
&oa,
NULL,
PAGE_READONLY,
SEC_IMAGE,
handle);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ZwCreateSection failed with status %x", status);
ImpZwClose(handle);
*SectionHandle = NULL;
return status;
}
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ObReferenceObjectByHandle failed with status %x", status);
return status;
}
/*
* Mapping a section with the flag SEC_IMAGE (see function above) tells
* the os we are mapping an executable image. This then allows the OS to
* take care of parsing the PE header and dealing with all relocations
* for us, meaning the mapped image will be identical to the in memory
* image.
*/
status = ImpZwMapViewOfSection(
*SectionHandle,
ZwCurrentProcess(),
Section,
NULL,
NULL,
NULL,
Size,
ViewUnmap,
MEM_TOP_DOWN,
PAGE_READONLY);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ZwMapViewOfSection failed with status %x", status);
ImpZwClose(handle);
ImpZwClose(*SectionHandle);
*SectionHandle = NULL;
return status;
}
ImpZwClose(handle);
return status;
}
NTSTATUS
RetrieveInMemoryModuleExecutableSections(_Inout_ PIRP Irp)
{
PAGED_CODE();
NT_ASSERT(Irp != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
SIZE_T bytes_written = NULL;
PVOID buffer = NULL;
RTL_MODULE_EXTENDED_INFO module_info = {0};
LPCSTR driver_name = GetDriverName();
status = GetModuleInformationByName(&module_info, driver_name);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetModuleInformationByName failed with status %x", status);
return status;
}
status = StoreModuleExecutableRegionsInBuffer(
&buffer,
module_info.ImageBase,
module_info.ImageSize,
&bytes_written,
FALSE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"StoreModuleExecutableRegionsInBuffe failed with status %x",
status);
return status;
}
status = ValidateIrpOutputBuffer(Irp, bytes_written);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpOutputBuffer failed with status %x", status);
goto end;
}
Irp->IoStatus.Information = bytes_written;
IntCopyMemory(Irp->AssociatedIrp.SystemBuffer, buffer, bytes_written);
end:
if (buffer)
ImpExFreePoolWithTag(buffer, POOL_TAG_INTEGRITY);
return status;
}
#define SMBIOS_TABLE 'RSMB'
#define NULL_TERMINATOR '\0'
/*
* From line 727 in the SMBIOS Specification:
*
* 727 Each structure shall be terminated by a double-null (0000h), either
* directly following the 728 formatted area (if no strings are present) or
* directly following the last string. This includes 729 system- and
* OEM-specific structures and allows upper-level software to easily traverse
* the 730 structure table. (See structure-termination examples later in this
* clause.)
*
* TLDR is that if the first two characters proceeding the structure are null
* terminators, then there are no strings, otherwise to find the end of the
* string section simply iterate until there is a double null terminator.
*
* source:
* https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.1.pdf
*/
STATIC
VOID
GetNextSMBIOSStructureInTable(_Inout_ PSMBIOS_TABLE_HEADER* CurrentStructure)
{
PAGED_CODE();
NT_ASSERT(CurrentStructure != NULL);
PCHAR string_section_start = NULL;
PCHAR current_char_in_strings = NULL;
PCHAR next_char_in_strings = NULL;
string_section_start =
(PCHAR)((UINT64)*CurrentStructure + (*CurrentStructure)->Length);
current_char_in_strings = string_section_start;
next_char_in_strings = string_section_start + 1;
for (;;) {
if (*current_char_in_strings == NULL_TERMINATOR &&
*next_char_in_strings == NULL_TERMINATOR) {
*CurrentStructure =
(PSMBIOS_TABLE_HEADER)(next_char_in_strings + 1);
return;
}
current_char_in_strings++;
next_char_in_strings++;
}
}
/*
* Remember that the string index does not start from the beginning of the
* struct. For example, lets take RAW_SMBIOS_TABLE_02: the first string is NOT
* "Type" at index 0, the first string is Manufacturer. So if we want to find
* the SerialNumber, the string index would be 4, as the previous 3 values
* (after the header) are all strings. So remember, the index is into the number
* of strings that exist for the given table, NOT the size of the structure or a
* values index into the struct.
*
* Here we count the number of strings by incrementing the string_count each
* time we pass a null terminator so we know when we're at the beginning of the
* target string.
*/
STATIC
NTSTATUS
GetStringAtIndexFromSMBIOSTable(
_In_ PSMBIOS_TABLE_HEADER Table,
_In_ UINT32 Index,
_In_ PVOID Buffer,
_In_ SIZE_T BufferSize)
{
PAGED_CODE();
NT_ASSERT(Table != NULL);
NT_ASSERT(Buffer != NULL);
UINT32 current_string_char_index = 0;
UINT32 string_count = 0;
PCHAR current_string_char = (PCHAR)((UINT64)Table + Table->Length);
PCHAR next_string_char = current_string_char + 1;
UINT64 dest = 0;
for (;;) {
if (*current_string_char == NULL_TERMINATOR &&
*next_string_char == NULL_TERMINATOR)
return STATUS_NOT_FOUND;
if (current_string_char_index >= BufferSize)
return STATUS_BUFFER_TOO_SMALL;
if (string_count + 1 == Index) {
if (*current_string_char == NULL_TERMINATOR)
return STATUS_SUCCESS;
dest = (UINT64)Buffer + current_string_char_index;
IntCopyMemory(dest, current_string_char, sizeof(CHAR));
current_string_char_index++;
goto increment;
}
if (*current_string_char == NULL_TERMINATOR) {
current_string_char_index = 0;
string_count++;
}
increment:
current_string_char++;
next_string_char++;
}
return STATUS_NOT_FOUND;
}
FORCEINLINE
STATIC
PRAW_SMBIOS_DATA
GetRawSmbiosData(_In_ PVOID FirmwareTable)
{
return (PRAW_SMBIOS_DATA)FirmwareTable;
}
FORCEINLINE
STATIC
PSMBIOS_TABLE_HEADER
GetSmbiosTableHeader(_In_ PRAW_SMBIOS_DATA Data)
{
return (PSMBIOS_TABLE_HEADER)(&Data->SMBIOSTableData[0]);
}
NTSTATUS
ParseSMBIOSTable(
_Out_ PVOID Buffer,
_In_ SIZE_T BufferSize,
_In_ SMBIOS_TABLE_INDEX TableIndex,
_In_ ULONG TableSubIndex)
{
PAGED_CODE();
NT_ASSERT(Buffer != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PVOID buffer = NULL;
ULONG buffer_size = 0;
ULONG bytes_copied = 0;
PRAW_SMBIOS_DATA smbios_data = NULL;
PSMBIOS_TABLE_HEADER header = NULL;
PRAW_SMBIOS_TABLE_01 baseboard = NULL;
status =
ImpExGetSystemFirmwareTable(SMBIOS_TABLE, 0, NULL, 0, &buffer_size);
/*
* Because we pass a null buffer here, the NTSTATUS result will be a
* BUFFER_TOO_SMALL error, so to validate this function call we check
* the return bytes returned (which indicate required buffer size) is
* above 0.
*/
if (buffer_size == NULL) {
DEBUG_ERROR(
"ExGetSystemFirmwareTable call 1 failed to get required buffer size.");
return STATUS_BUFFER_TOO_SMALL;
}
buffer = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
buffer_size,
POOL_TAG_INTEGRITY);
if (!buffer)
return STATUS_MEMORY_NOT_ALLOCATED;
status = ImpExGetSystemFirmwareTable(
SMBIOS_TABLE,
NULL,
buffer,
buffer_size,
&bytes_copied);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ExGetSystemFirmwareTable call 2 failed with status %x",
status);
goto end;
}
smbios_data = GetRawSmbiosData(buffer);
header = GetSmbiosTableHeader(smbios_data);
/*
* The System Information table is equal to Type == 2 and contains the
* serial number of the motherboard in the computer among various other
* things.
*
* source:
* https://www.dmtf.org/sites/default/files/standards/documents/DSP0134_2.7.1.pdf
* line 823
*/
while (header->Type != TableIndex)
GetNextSMBIOSStructureInTable(&header);
status = GetStringAtIndexFromSMBIOSTable(
header,
TableSubIndex,
Buffer,
BufferSize);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"GetStringAtIndexFromSMBIOSTable failed with status %x",
status);
goto end;
}
end:
if (buffer)
ImpExFreePoolWithTag(buffer, POOL_TAG_INTEGRITY);
return status;
}
STATIC
NTSTATUS
ComputeHashOfSections(
_In_ PIMAGE_SECTION_HEADER DiskSection,
_In_ PIMAGE_SECTION_HEADER MemorySection,
_Out_ PVOID* DiskHash,
_Out_ PULONG DiskHashSize,
_Out_ PVOID* MemoryHash,
_Out_ PULONG MemoryHashSize)
{
NT_ASSERT(DiskSection != NULL);
NT_ASSERT(MemorySection != NULL);
NT_ASSERT(DiskHash != NULL);
NT_ASSERT(DiskHashSize != NULL);
NT_ASSERT(MemoryHash != NULL);
NT_ASSERT(MemoryHashSize != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
if (DiskSection->SizeOfRawData != MemorySection->SizeOfRawData) {
DEBUG_WARNING("Executable section sizes differ between images.");
return STATUS_INVALID_BUFFER_SIZE;
}
status = CryptHashBuffer_sha256(
(UINT64)DiskSection + sizeof(IMAGE_SECTION_HEADER),
DiskSection->SizeOfRawData,
DiskHash,
DiskHashSize);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptHashBuffer_sha256 failed with status %x", status);
return status;
}
status = CryptHashBuffer_sha256(
(UINT64)MemorySection + sizeof(IMAGE_SECTION_HEADER),
MemorySection->SizeOfRawData,
MemoryHash,
MemoryHashSize);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptHashBuffer_sha256 2 failed with status %x", status);
return status;
}
return status;
}
FORCEINLINE
STATIC
BOOLEAN
CompareHashes(_In_ PVOID Hash1, _In_ PVOID Hash2, _In_ UINT32 Length)
{
return IntCompareMemory(Hash1, Hash2, Length) == Length ? TRUE : FALSE;
}
STATIC
VOID
ReportInvalidProcessModule(_In_ PPROCESS_MODULE_INFORMATION Module)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PPROCESS_MODULE_VALIDATION_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(
sizeof(PROCESS_MODULE_VALIDATION_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_INVALID_PROCESS_MODULE, 0);
report->image_base = Module->module_base;
report->image_size = Module->module_size;
IntCopyMemory(
report->module_path,
Module->module_path,
sizeof(report->module_path));
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, len);
return;
}
IrpQueueSchedulePacket(report, len);
}
/*
* Because the infrastructure has already been setup to validate modules in the
* driver, that is how I will validate the usermode modules as well. Another
* reason is that the win32 api makes it very easy to take a snapshot of the
* modules and enumerate them with easy to use functions and macros.
*
* 1. Take a snapshot of the modules in the process from our dll
* 2. pass the image base, image size and the image path to our driver via an
* IRP
* 3. from our driver, to first verify the in memory module, attach to our
* protected process and using the base + size simply use
* StoreModuleExecutableRegionsInBuffer()
* 4. Next we use the path to map the image on disk into memory, and pass the
* image to StoreModuleExecutableRegionsInBuffer() just as we did before.
* 5. With the 2 buffers that contain both images executable regions, we hash
* them and compare for anomalies.
*
* note: Its important to realise that since these are user mode modules, they
* are often hooked by various legitimate programs - such as discord, nvidia
* etc. So this needs to be rethinked.
*/
NTSTATUS
ValidateProcessLoadedModule(_Inout_ PIRP Irp)
{
PAGED_CODE();
NT_ASSERT(Irp != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PROCESS_MODULE_VALIDATION_RESULT validation_result = {0};
PPROCESS_MODULE_INFORMATION module_info = NULL;
PKPROCESS process = NULL;
KAPC_STATE apc_state = {0};
PVAL_INTEGRITY_HEADER memory_buffer = NULL;
PVAL_INTEGRITY_HEADER disk_buffer = NULL;
PVOID memory_hash = NULL;
PVOID disk_hash = NULL;
ULONG memory_hash_size = 0;
ULONG disk_hash_size = 0;
SIZE_T bytes_written = 0;
UNICODE_STRING module_path = {0};
HANDLE section_handle = NULL;
PVOID section = NULL;
ULONG section_size = 0;
status = ValidateIrpInputBuffer(Irp, sizeof(PROCESS_MODULE_INFORMATION));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpInputBuffer failed with status %x", status);
return status;
}
module_info = (PPROCESS_MODULE_INFORMATION)Irp->AssociatedIrp.SystemBuffer;
SessionGetProcess(&process);
ImpRtlInitUnicodeString(&module_path, &module_info->module_path);
/*
* Attach because the offsets given are from the process' context.
*/
ImpKeStackAttachProcess(process, &apc_state);
status = StoreModuleExecutableRegionsInBuffer(
&memory_buffer,
module_info->module_base,
module_info->module_size,
&bytes_written,
FALSE);
ImpKeUnstackDetachProcess(&apc_state);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"StoreModuleExecutableRegionsInBuffer failed with status %x",
status);
goto end;
}
status = MapDiskImageIntoVirtualAddressSpace(
§ion_handle,
§ion,
&module_path,
§ion_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"MapDiskImageIntoVirtualAddressSpace failed with status %x",
status);
goto end;
}
status = StoreModuleExecutableRegionsInBuffer(
&disk_buffer,
section,
section_size,
&bytes_written,
FALSE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"StoreModuleExecutableRegionsInbuffer 2 failed with status %x",
status);
goto end;
}
status = ComputeHashOfSections(
&memory_buffer->section_header,
&disk_buffer->section_header,
&disk_hash,
&disk_hash_size,
&memory_hash,
&memory_hash_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ComputeHashOfSections failed with status %x", status);
goto end;
}
if (!CompareHashes(disk_hash, memory_hash, memory_hash_size))
ReportInvalidProcessModule(module_info);
end:
if (section_handle)
ImpZwClose(section_handle);
if (section)
ImpZwUnmapViewOfSection(ZwCurrentProcess(), section);
if (memory_buffer)
ImpExFreePoolWithTag(memory_buffer, POOL_TAG_INTEGRITY);
if (memory_hash)
ImpExFreePoolWithTag(memory_hash, POOL_TAG_INTEGRITY);
if (disk_buffer)
ImpExFreePoolWithTag(disk_buffer, POOL_TAG_INTEGRITY);
if (disk_hash)
ImpExFreePoolWithTag(disk_hash, POOL_TAG_INTEGRITY);
return status;
}
NTSTATUS
HashUserModule(
_In_ PPROCESS_MAP_MODULE_ENTRY Entry,
_Out_ PVOID OutBuffer,
_In_ UINT32 OutBufferSize)
{
PAGED_CODE();
NT_ASSERT(Entry != NULL);
NT_ASSERT(OutBuffer != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
KAPC_STATE apc_state = {0};
PVAL_INTEGRITY_HEADER memory_buffer = NULL;
PVOID memory_hash = NULL;
ULONG memory_hash_size = 0;
SIZE_T bytes_written = 0;
PACTIVE_SESSION session = GetActiveSession();
/*
* Attach because the offsets given are from the process' context.
*/
ImpKeStackAttachProcess(session->process, &apc_state);
status = StoreModuleExecutableRegionsInBuffer(
&memory_buffer,
Entry->base,
Entry->size,
&bytes_written,
FALSE);
ImpKeUnstackDetachProcess(&apc_state);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"StoreModuleExecutableRegionsInBuffer failed with status %x",
status);
goto end;
}
status = CryptHashBuffer_sha256(
memory_buffer->section_base,
memory_buffer->section_header.SizeOfRawData,
&memory_hash,
&memory_hash_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptHashBuffer_sha256 failed with status %x", status);
goto end;
}
if (OutBufferSize > memory_hash_size) {
status = STATUS_BUFFER_TOO_SMALL;
goto end;
}
IntCopyMemory(OutBuffer, memory_hash, memory_hash_size);
end:
if (memory_buffer)
ImpExFreePoolWithTag(memory_buffer, POOL_TAG_INTEGRITY);
if (memory_hash)
ImpExFreePoolWithTag(memory_hash, POOL_TAG_INTEGRITY);
return status;
}
FORCEINLINE
STATIC
PCHAR
GetStorageDescriptorSerialNumber(_In_ PSTORAGE_DEVICE_DESCRIPTOR Descriptor)
{
return (PCHAR)((UINT64)Descriptor + Descriptor->SerialNumberOffset);
}
FORCEINLINE
STATIC
SIZE_T
GetStorageDescriptorSerialLength(_In_ PCHAR SerialNumber)
{
return IntStringLength(SerialNumber, DEVICE_DRIVE_0_SERIAL_CODE_LENGTH) + 1;
}
FORCEINLINE
STATIC
VOID
InitStorageProperties(
_Out_ PSTORAGE_PROPERTY_QUERY Query,
_In_ STORAGE_PROPERTY_ID PropertyId,
_In_ STORAGE_QUERY_TYPE QueryType)
{
Query->PropertyId = PropertyId;
Query->QueryType = QueryType;
}
/*
* TODO: Query PhysicalDrive%n to get the serial numbers for all harddrives, can
* use the command "wmic diskdrive" check in console.
*/
NTSTATUS
GetHardDiskDriveSerialNumber(
_Inout_ PVOID ConfigDrive0Serial, _In_ SIZE_T ConfigDrive0MaxSize)
{
PAGED_CODE();
NT_ASSERT(ConfigDrive0Serial != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
HANDLE handle = NULL;
OBJECT_ATTRIBUTES attributes = {0};
IO_STATUS_BLOCK status_block = {0};
STORAGE_PROPERTY_QUERY query = {0};
STORAGE_DESCRIPTOR_HEADER header = {0};
PSTORAGE_DEVICE_DESCRIPTOR descriptor = NULL;
UNICODE_STRING path = {0};
PCHAR serial_number = NULL;
SIZE_T serial_length = 0;
ImpRtlInitUnicodeString(&path, L"\\DosDevices\\PhysicalDrive0");
/*
* No need to use the flag OBJ_FORCE_ACCESS_CHECK since we arent passing
* a handle given to us from usermode.
*/
InitializeObjectAttributes(
&attributes,
&path,
OBJ_CASE_INSENSITIVE | OBJ_KERNEL_HANDLE,
NULL,
NULL);
status = ImpZwOpenFile(
&handle,
GENERIC_READ,
&attributes,
&status_block,
NULL,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ZwOpenFile on PhysicalDrive0 failed with status %x",
status);
goto end;
}
InitStorageProperties(&query, StorageDeviceProperty, PropertyStandardQuery);
status = ImpZwDeviceIoControlFile(
handle,
NULL,
NULL,
NULL,
&status_block,
IOCTL_STORAGE_QUERY_PROPERTY,
&query,
sizeof(STORAGE_PROPERTY_QUERY),
&header,
sizeof(STORAGE_DESCRIPTOR_HEADER));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ZwDeviceIoControlFile first call failed with status %x",
status);
goto end;
}
descriptor = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
header.Size,
POOL_TAG_INTEGRITY);
if (!descriptor) {
status = STATUS_MEMORY_NOT_ALLOCATED;
goto end;
}
status = ImpZwDeviceIoControlFile(
handle,
NULL,
NULL,
NULL,
&status_block,
IOCTL_STORAGE_QUERY_PROPERTY,
&query,
sizeof(STORAGE_PROPERTY_QUERY),
descriptor,
header.Size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ZwDeviceIoControlFile second call failed with status %x",
status);
goto end;
}
if (!descriptor->SerialNumberOffset)
goto end;
serial_number = GetStorageDescriptorSerialNumber(descriptor);
serial_length = GetStorageDescriptorSerialLength(serial_number);
if (serial_length > ConfigDrive0MaxSize) {
status = STATUS_BUFFER_TOO_SMALL;
goto end;
}
IntCopyMemory(ConfigDrive0Serial, serial_number, serial_length);
end:
if (handle)
ImpZwClose(handle);
if (descriptor)
ImpExFreePoolWithTag(descriptor, POOL_TAG_INTEGRITY);
return status;
}
PVOID
ScanForSignature(
_In_ PVOID BaseAddress,
_In_ SIZE_T MaxLength,
_In_ LPCSTR Signature,
_In_ SIZE_T SignatureLength)
{
PAGED_CODE();
NT_ASSERT(BaseAddress != NULL);
NT_ASSERT(Signature != NULL);
CHAR current_char = 0;
CHAR current_sig_char = 0;
for (UINT32 index = 0; index < MaxLength; index++) {
for (UINT32 sig = 0; sig < SignatureLength + 1; sig++) {
current_char = *(PCHAR)((UINT64)BaseAddress + index + sig);
current_sig_char = Signature[sig];
if (sig == SignatureLength)
return (PVOID)((UINT64)BaseAddress + index);
if (current_char != current_sig_char)
break;
}
}
return NULL;
}
/*
* Lets ensure to the compiler doens't optimise out our useless instructions...
*/
#pragma optimize("", off)
STATIC
UINT64
MeasureInstructionRead(_In_ PVOID InstructionAddress)
{
NT_ASSERT(InstructionAddress != NULL);
CONST UINT64 start = __readmsr(IA32_APERF_MSR) << 32;
CHAR value = *(PCHAR)InstructionAddress;
return (__readmsr(IA32_APERF_MSR) << 32) - start;
}
#pragma optimize("", on)
STATIC
UINT64
MeasureReads(_In_ PVOID Address, _In_ ULONG Count)
{
NT_ASSERT(Address != NULL);
NT_ASSERT(Count > 0);
UINT64 read_average = 0;
KIRQL irql = {0};
MeasureInstructionRead(Address);
KeRaiseIrql(HIGH_LEVEL, &irql);
_disable();
for (UINT32 iteration = 0; iteration < Count; iteration++)
read_average += MeasureInstructionRead(Address);
_enable();
KeLowerIrql(irql);
DEBUG_VERBOSE("EPT Detection - Read Average: %llx", read_average);
return read_average / Count;
}
#define EPT_CHECK_NUM_ITERATIONS 30
#define EPT_CONTROL_FUNCTIONS_COUNT 4
#define EPT_PROTECTED_FUNCTIONS_COUNT 2
#define EPT_MAX_FUNCTION_NAME_LENGTH 128
#define EPT_EXECUTION_TIME_MULTIPLIER 10
/*
* Even though we test for the presence of a hypervisor, we should still test
* for the presence of EPT hooks on key functions as this is a primary method
* for reversing AC's.
*
* Credits to momo5502 for the idea: https://momo5502.com/blog/?p=255
*
* [+] EPT: Read average: 14991c28f5c2
* [+] no EPT: Read average: 28828f5c28
*
* On average a read when HyperDbg's !epthook is active is around ~125x longer.
* Will need to continue testing with other HV's, however it is a good start.
*/
STATIC
NTSTATUS
GetAverageReadTimeAtRoutine(
_In_ PVOID RoutineAddress, _Out_ PUINT64 AverageTime)
{
NT_ASSERT(RoutineAddress != NULL);
NT_ASSERT(AverageTime != NULL);
if (!RoutineAddress || !AverageTime)
return STATUS_UNSUCCESSFUL;
if (!MmIsAddressValid(RoutineAddress))
return STATUS_INVALID_ADDRESS;
*AverageTime = MeasureReads(RoutineAddress, EPT_CHECK_NUM_ITERATIONS);
return *AverageTime == 0 ? STATUS_UNSUCCESSFUL : STATUS_SUCCESS;
}
/*
* todo: encrypt both arrays
*
* The goal with the control functions is to find a reference time for an
* average read on a function that is not EPT hooked. To accomplish this I've
* selected some arbitrary, rarely used functions that shouldn't really ever
* have an EPT hook active on them. This will give us a baseline that we can
* then average out to find a relatively accurate average read time.
*
* From here, we have an array of protected functions which are commonly hooked
* via EPT to reverse anti cheats. We then check the read times of these
* functions and compare them to the average of the read times for the control
* functions. If the read threshold exceeds a multiple of 10, we can be fairly
* certain an EPT hook is active.
*
* Each time we measure the read we perform 30 iterations to ensure we get a
* consistent result aswell as disabling interrupts + raising IRQL to ensure the
* test is as accurate as possible.
*
* The following open source Intel VT-X hv's w/ EPT functionality have been
* tested and detected in a non vm environment:
*
* HyperDbg !epthook (https://github.com/HyperDbg/HyperDbg): detected
* DdiMon (https://github.com/tandasat/DdiMon): detected
*/
WCHAR CONTROL_FUNCTIONS[EPT_CONTROL_FUNCTIONS_COUNT]
[EPT_MAX_FUNCTION_NAME_LENGTH] = {
L"RtlAssert",
L"PsAcquireSiloHardReference",
L"PsDereferencePrimaryToken",
L"ZwCommitEnlistment"};
WCHAR PROTECTED_FUNCTIONS[EPT_PROTECTED_FUNCTIONS_COUNT]
[EPT_MAX_FUNCTION_NAME_LENGTH] = {
L"ExAllocatePoolWithTag", L"MmCopyMemory"};
/*
* For whatever reason MmGetSystemRoutineAddress only works once, then every
* call thereafter fails. So will be storing the routine addresses in arrays
* since they dont change once the kernel is loaded.
*/
#pragma section("NonPagedPool", read, write)
__declspec(allocate("NonPagedPool")) UINT64
CONTROL_FUNCTION_ADDRESSES[EPT_CONTROL_FUNCTIONS_COUNT] = {0};
__declspec(allocate("NonPagedPool")) UINT64
PROTECTED_FUNCTION_ADDRESSES[EPT_PROTECTED_FUNCTIONS_COUNT] = {0};
STATIC
NTSTATUS
InitiateEptFunctionAddressArrays()
{
PAGED_CODE();
UNICODE_STRING current_function = {0};
for (UINT32 index = 0; index < EPT_CONTROL_FUNCTIONS_COUNT; index++) {
ImpRtlInitUnicodeString(¤t_function, CONTROL_FUNCTIONS[index]);
CONTROL_FUNCTION_ADDRESSES[index] =
ImpMmGetSystemRoutineAddress(¤t_function);
NT_ASSERT(CONTROL_FUNCTION_ADDRESSES[index] != NULL);
if (!CONTROL_FUNCTION_ADDRESSES[index])
return STATUS_UNSUCCESSFUL;
}
for (UINT32 index = 0; index < EPT_PROTECTED_FUNCTIONS_COUNT; index++) {
ImpRtlInitUnicodeString(¤t_function, PROTECTED_FUNCTIONS[index]);
PROTECTED_FUNCTION_ADDRESSES[index] =
ImpMmGetSystemRoutineAddress(¤t_function);
NT_ASSERT(PROTECTED_FUNCTION_ADDRESSES[index] != NULL);
if (!PROTECTED_FUNCTION_ADDRESSES[index])
return STATUS_UNSUCCESSFUL;
}
return STATUS_SUCCESS;
}
STATIC
VOID
ReportEptHook(
_In_ UINT64 ControlAverage,
_In_ UINT64 ReadAverage,
_In_ WCHAR FunctionName)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PEPT_HOOK_REPORT report = NULL;
UNICODE_STRING string = {0};
len = CryptRequestRequiredBufferLength(sizeof(EPT_HOOK_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_EPT_HOOK, 0);
report->control_average = ControlAverage;
report->read_average = ReadAverage;
RtlInitUnicodeString(&string, FunctionName);
status = UnicodeToCharBufString(
&string,
report->function_name,
sizeof(report->function_name));
if (!NT_SUCCESS(status))
DEBUG_ERROR("UnicodeToCharBufString: %x", status);
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, len);
return;
}
IrpQueueSchedulePacket(report, len);
}
NTSTATUS
DetectEptHooksInKeyFunctions()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 control_fails = 0;
UINT64 instruction_time = 0;
UINT64 control_time_sum = 0;
UINT64 control_average = 0;
/* todo: once we call this, we need to set a flag to skip this,
* otherwise we just return early */
status = InitiateEptFunctionAddressArrays();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"InitiateEptFunctionAddressArrays failed with status %x",
status);
return status;
}
for (UINT32 index = 0; index < EPT_CONTROL_FUNCTIONS_COUNT; index++) {
status = GetAverageReadTimeAtRoutine(
CONTROL_FUNCTION_ADDRESSES[index],
&instruction_time);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"DetectEptPresentOnFunction failed with status %x",
status);
control_fails += 1;
continue;
}
control_time_sum += instruction_time;
}
if (control_time_sum == 0)
return STATUS_UNSUCCESSFUL;
control_average =
control_time_sum / (EPT_CONTROL_FUNCTIONS_COUNT - control_fails);
if (control_average == 0)
return STATUS_UNSUCCESSFUL;
for (UINT32 index = 0; index < EPT_PROTECTED_FUNCTIONS_COUNT; index++) {
status = GetAverageReadTimeAtRoutine(
PROTECTED_FUNCTION_ADDRESSES[index],
&instruction_time);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"DetectEptPresentOnFunction failed with status %x",
status);
continue;
}
/* [+] EPT hook detected at function: ExAllocatePoolWithTag with
* execution time of: 149b7777777 */
if (control_average * EPT_EXECUTION_TIME_MULTIPLIER <
instruction_time) {
DEBUG_WARNING(
"EPT hook detected at function: %llx with execution time of: %llx",
PROTECTED_FUNCTION_ADDRESSES[index],
instruction_time);
ReportEptHook(
control_average,
instruction_time,
PROTECTED_FUNCTION_ADDRESSES[index]);
}
}
return status;
}
VOID
FindWinLogonProcess(_In_ PPROCESS_LIST_ENTRY Node, _In_opt_ PVOID Context)
{
NT_ASSERT(Node != NULL);
NT_ASSERT(Context != NULL);
LPCSTR process_name = NULL;
PEPROCESS* process = (PEPROCESS*)Context;
if (!Context)
return;
process_name = ImpPsGetProcessImageFileName(Node->process);
if (!IntCompareString(process_name, "winlogon.exe"))
*process = Node->process;
}
STATIC
NTSTATUS
StoreModuleExecutableRegionsx86(
_In_ PRTL_MODULE_EXTENDED_INFO Module,
_In_ PVOID* Buffer,
_In_ PULONG BufferSize)
{
NT_ASSERT(Module != NULL);
NT_ASSERT(Buffer != NULL);
NT_ASSERT(BufferSize != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PEPROCESS process = NULL;
KAPC_STATE apc_state = {0};
RtlHashmapEnumerate(GetProcessHashmap(), FindWinLogonProcess, &process);
if (!process)
return STATUS_NOT_FOUND;
ImpKeStackAttachProcess(process, &apc_state);
status = StoreModuleExecutableRegionsInBuffer(
Buffer,
Module->ImageBase,
Module->ImageSize,
BufferSize,
TRUE);
ImpKeUnstackDetachProcess(&apc_state);
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"StoreModuleExecutableRegionsInBuffer-x86 failed with status %x",
status);
return status;
}
FORCEINLINE
STATIC
VOID
Enablex86Hashing(_In_ PDRIVER_LIST_HEAD Head)
{
Head->can_hash_x86 = TRUE;
}
VOID
DeferredModuleHashingCallback(
_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context)
{
UNREFERENCED_PARAMETER(Context);
UNREFERENCED_PARAMETER(DeviceObject);
NTSTATUS status = STATUS_UNSUCCESSFUL;
RTL_MODULE_EXTENDED_INFO module = {0};
PDRIVER_LIST_HEAD list = GetDriverList();
PLIST_ENTRY head = &GetDriverList()->deferred_list;
PLIST_ENTRY entry = NULL;
PDRIVER_LIST_ENTRY driver = NULL;
Enablex86Hashing(list);
entry = RemoveHeadList(head);
if (entry == head)
goto end;
while (entry != head) {
driver = CONTAINING_RECORD(entry, DRIVER_LIST_ENTRY, deferred_entry);
DriverListEntryToExtendedModuleInfo(driver, &module);
DEBUG_VERBOSE("Hashing Deferred Module: %s", module.FullPathName);
status = HashModule(&module, &driver->text_hash);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashModule-x86 failed with status %x", status);
driver->hashed = FALSE;
entry = RemoveHeadList(head);
continue;
}
driver->hashed = TRUE;
entry = RemoveHeadList(head);
}
end:
DEBUG_VERBOSE("All deferred modules hashed.");
ImpIoFreeWorkItem(list->work_item);
list->work_item = NULL;
}
NTSTATUS
HashModule(_In_ PRTL_MODULE_EXTENDED_INFO Module, _Out_ PVOID Hash)
{
NT_ASSERT(Module != NULL);
NT_ASSERT(Hash != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
ANSI_STRING ansi_string = {0};
UNICODE_STRING path = {0};
ULONG memory_text_size = 0;
PVOID memory_hash = NULL;
ULONG memory_hash_size = 0;
PVAL_INTEGRITY_HEADER memory_buffer = NULL;
ULONG memory_buffer_size = 0;
PDRIVER_LIST_HEAD list = GetDriverList();
ImpRtlInitAnsiString(&ansi_string, Module->FullPathName);
if (!ansi_string.Buffer) {
DEBUG_ERROR("RtlInitAnsiString failed with status %x", status);
return STATUS_UNSUCCESSFUL;
}
status = ImpRtlAnsiStringToUnicodeString(&path, &ansi_string, TRUE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"RtlAnsiStringToUnicodeString failed with status %x",
status);
return status;
}
/*
* For win32k and related modules, because they are 32bit for us to read
* the memory we need to attach to a 32 bit process. A simple check is
* that the 32 bit image base wont be a valid address, while this is
* hacky it works. Then we simply attach to a 32 bit address space, in
* our case winlogon, which will allow us to perform the copy.
*
* Since the driver loads at system startup, our driver is loaded before
* the WinLogon process has started, so to combat this return return
* early with a status code. This will mark the module as not hashed and
* x86. We will then queue a work item to hash these modules later once
* WinLogon has started.
*/
if (!ImpMmIsAddressValid(Module->ImageBase) && !list->can_hash_x86) {
status = STATUS_INVALID_IMAGE_WIN_32;
goto end;
}
else if (!ImpMmIsAddressValid(Module->ImageBase) && list->can_hash_x86) {
/*
* Once the WinLogon process has started, we can then hash new
* x86 modules.
*/
status = StoreModuleExecutableRegionsx86(
Module,
(PVOID)&memory_buffer,
&memory_buffer_size);
}
else {
status = StoreModuleExecutableRegionsInBuffer(
(PVOID)&memory_buffer,
Module->ImageBase,
Module->ImageSize,
&memory_buffer_size,
FALSE);
}
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"StoreModuleExecutableRegionsInbuffer 2 failed with status %x",
status);
goto end;
}
status = CryptHashBuffer_sha256(
memory_buffer->section_base,
memory_buffer->section_header.SizeOfRawData,
&memory_hash,
&memory_hash_size);
if (!NT_SUCCESS(status)) {
DEBUG_VERBOSE("ComputeHashOfSections failed with status %x", status);
goto end;
}
IntCopyMemory(Hash, memory_hash, memory_hash_size);
end:
if (memory_buffer)
ImpExFreePoolWithTag(memory_buffer, POOL_TAG_INTEGRITY);
if (memory_hash)
ImpExFreePoolWithTag(memory_hash, POOL_TAG_INTEGRITY);
if (path.Buffer)
ImpRtlFreeUnicodeString(&path);
return status;
}
/*
* As said in the comment below, in the future we want to be able to copy a
* small part of the spot where the image has changed, say the next 50 bytes.
* This would be useful for scanning for any jmp x etc. For this thisl do.
*/
STATIC
VOID
ReportModifiedSystemImage(_In_ PRTL_MODULE_EXTENDED_INFO Module)
{
NT_ASSERT(Module != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PSYSTEM_MODULE_INTEGRITY_CHECK_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(
sizeof(SYSTEM_MODULE_INTEGRITY_CHECK_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_PATCHED_SYSTEM_MODULE, 0);
report->image_base = Module->ImageBase;
report->image_size = Module->ImageSize;
IntCopyMemory(
report->path_name,
Module->FullPathName,
sizeof(report->path_name));
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, len);
return;
}
IrpQueueSchedulePacket(report, len);
}
VOID
ValidateSystemModule(_In_ PRTL_MODULE_EXTENDED_INFO Module)
{
NT_ASSERT(Module != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDRIVER_LIST_ENTRY entry = NULL;
PVOID hash = NULL;
hash = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
SHA_256_HASH_LENGTH,
POOL_TAG_INTEGRITY);
if (!hash)
return;
FindDriverEntryByBaseAddress(Module->ImageBase, &entry);
if (!entry) {
DEBUG_ERROR("FindDriverEntryByBaseAddress failed with no status");
goto end;
}
/* For now, there is some issue that sometimes occurs when validing x86
* modules, for now lets skip them.*/
if (entry->x86)
goto end;
/*
* Ideally, we would like to have access to the offset into the module that
* doesnt match, allowing us to copy the next 50 bytes for example. Since we
* only store the hash, we can only check whether something has changed, but
* we dont really have access to any information regarding what changed. In
* the future it might be nice (though requires a fair amount of memory) to
* store a copy of images on load in the list alongside the hash. That way
* if there is a change in the hash, we can access the old buffer, perform a
* memory comparison, and find the point where the change exists.
*/
status = HashModule(Module, hash);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashModule failed with status %x", status);
goto end;
}
if (CompareHashes(hash, entry->text_hash, SHA_256_HASH_LENGTH)) {
DEBUG_VERBOSE(
"Module: %s text regions are valid.",
Module->FullPathName);
}
else {
DEBUG_WARNING(
"**!!** Module: %s text regions are NOT valid **!!**",
Module->FullPathName);
ReportModifiedSystemImage(Module);
}
end:
if (hash)
ExFreePoolWithTag(hash, POOL_TAG_INTEGRITY);
}
STATIC
VOID
ReportModifiedSelfDriverImage(_In_ PRTL_MODULE_EXTENDED_INFO Module)
{
NT_ASSERT(Module != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PDRIVER_SELF_INTEGRITY_CHECK_REPORT packet = NULL;
len = CryptRequestRequiredBufferLength(
sizeof(DRIVER_SELF_INTEGRITY_CHECK_REPORT));
packet = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!packet)
return;
INIT_REPORT_PACKET(packet, REPORT_SELF_DRIVER_PATCHED, 0);
packet->image_base = Module->ImageBase;
packet->image_size = Module->ImageSize;
IntCopyMemory(
packet->path_name,
Module->FullPathName,
sizeof(packet->path_name));
status = CryptEncryptBuffer(packet, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(packet, len);
return;
}
IrpQueueSchedulePacket(packet, len);
}
NTSTATUS
ValidateOurDriverImage()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
SYSTEM_MODULES modules = {0};
PRTL_MODULE_EXTENDED_INFO module_info = NULL;
PVOID memory_hash = NULL;
ULONG memory_hash_size = 0;
PDRIVER_LIST_ENTRY entry = NULL;
LPCSTR driver_name = GetDriverName();
PUNICODE_STRING path = GetDriverPath();
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
module_info = FindSystemModuleByName(driver_name, &modules);
if (!module_info) {
DEBUG_ERROR("FindSystemModuleByName failed with no status.");
goto end;
}
memory_hash = ExAllocatePool2(
POOL_FLAG_NON_PAGED,
SHA_256_HASH_LENGTH,
POOL_TAG_INTEGRITY);
if (!memory_hash)
goto end;
FindDriverEntryByBaseAddress(module_info->ImageBase, &entry);
if (!entry) {
DEBUG_ERROR("FindDriverEntryByBaseAddress failed with no status.");
goto end;
}
if (entry->hashed == FALSE) {
DEBUG_WARNING("Our module has not been hashed, returning.");
status = STATUS_HASH_NOT_PRESENT;
goto end;
}
status = HashModule(module_info, memory_hash);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashModule failed with status %x", status);
goto end;
}
/*
* Since we don't pass a return value, I think we would raise an invalid
* module error and stop the users game session ? since module .text
* section error would be a large red flag
*/
if (CompareHashes(memory_hash, entry->text_hash, SHA_256_HASH_LENGTH)) {
DEBUG_VERBOSE("Driver image is valid. Integrity check complete");
}
else {
DEBUG_WARNING("**!!** Driver image is NOT valid. **!!**");
ReportModifiedSelfDriverImage(module_info);
}
end:
if (memory_hash)
ExFreePoolWithTag(memory_hash, POOL_TAG_INTEGRITY);
if (modules.address)
ExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
FORCEINLINE
STATIC
VOID
IncrementActiveThreadCount(_Inout_ PSYS_MODULE_VAL_CONTEXT Context)
{
InterlockedIncrement(&Context->active_thread_count);
}
FORCEINLINE
STATIC
VOID
DecrementActiveThreadCount(_Inout_ PSYS_MODULE_VAL_CONTEXT Context)
{
InterlockedDecrement(&Context->active_thread_count);
}
FORCEINLINE
STATIC
VOID
SetVerificationBlockAsComplete(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
InterlockedExchange(&Context->complete, TRUE);
}
FORCEINLINE
STATIC
UINT32
GetCurrentVerificationIndex(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
return InterlockedExchange(&Context->current_count, Context->current_count);
}
FORCEINLINE
STATIC
UINT32
GetCurrentVerificationMaxIndex(
_In_ PSYS_MODULE_VAL_CONTEXT Context, _In_ UINT32 Count)
{
return Count + Context->block_size;
}
FORCEINLINE
STATIC
VOID
UpdateCurrentVerificationIndex(
_In_ PSYS_MODULE_VAL_CONTEXT Context, _In_ UINT32 Count)
{
InterlockedExchange(&Context->current_count, Count);
}
STATIC
VOID
SystemModuleVerificationDispatchFunction(
_In_ PDEVICE_OBJECT DeviceObject, _In_ PSYS_MODULE_VAL_CONTEXT Context)
{
UNREFERENCED_PARAMETER(DeviceObject);
NT_ASSERT(Context != NULL);
UINT32 count = 0;
UINT32 max = 0;
IncrementActiveThreadCount(Context);
count = GetCurrentVerificationIndex(Context);
/*
* theres a race condition here, where if the max is taken after a thread
* has alredy completed an iteration, meaning the current_count will be +1
* then what the starting thread is expecting, meaning the final iteration
* will be off by one. To fix just need to calculate the block max before
* threads are dispatched. todo!
*/
max = GetCurrentVerificationMaxIndex(Context, count);
for (; count < max && count < Context->total_count; count++) {
DEBUG_VERBOSE(
"ThrId: %lx, Count: %lx, Max: %lx, Total Count: %lx",
PsGetCurrentThreadId(),
count,
max,
Context->total_count);
if (!InterlockedCompareExchange(
&Context->dispatcher_info[count].validated,
TRUE,
FALSE)) {
ValidateSystemModule(&Context->module_info[count]);
}
}
if (count == Context->total_count)
SetVerificationBlockAsComplete(Context);
UpdateCurrentVerificationIndex(Context, count);
DecrementActiveThreadCount(Context);
}
#define VALIDATION_BLOCK_SIZE 25
FORCEINLINE
STATIC
VOID
InitSysModuleValidationContext(
_Out_ PSYS_MODULE_VAL_CONTEXT Context,
_In_ PMODULE_DISPATCHER_HEADER DispatcherArray,
_In_ PSYSTEM_MODULES SystemModules)
{
Context->active_thread_count = 0;
Context->active = TRUE;
Context->complete = FALSE;
Context->dispatcher_info = DispatcherArray;
Context->module_info = SystemModules->address;
Context->total_count = SystemModules->module_count;
Context->block_size = VALIDATION_BLOCK_SIZE;
/* skip hal.dll and ntosrnl.exe */
Context->current_count = 2;
}
/*
* Multithreaded delayed priority work items improve 1% lows by 25% and reduces
* average PC latency by 10% compared to traditional multithreading. This is
* important as having high average fps but low 1% lows just leads to stuttery
* gameplay which in competitive multiplayer games is simply not alright.
* Overall still room for improvement but from a statistical and feel standpoint
* which the gameplay is much smoother (tested in cs2).
*
* A potential idea for further improvement is finding the cores with the least
* cpu usages and setting the worker threads affinity accordingly.
*/
STATIC
NTSTATUS
InitialiseSystemModuleVerificationContext(PSYS_MODULE_VAL_CONTEXT Context)
{
NT_ASSERT(Context != NULL);
NTSTATUS status = STATUS_UNSUCCESSFUL;
SYSTEM_MODULES modules = {0};
PMODULE_DISPATCHER_HEADER dispatcher = NULL;
UINT32 count = 0;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
DEBUG_VERBOSE("driver count: %lx", modules.module_count);
count = modules.module_count * sizeof(MODULE_DISPATCHER_HEADER);
dispatcher =
ImpExAllocatePool2(POOL_FLAG_NON_PAGED, count, POOL_TAG_INTEGRITY);
if (!dispatcher) {
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return STATUS_MEMORY_NOT_ALLOCATED;
}
InitSysModuleValidationContext(Context, dispatcher, &modules);
return status;
}
VOID
FreeWorkItems(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
NT_ASSERT(Context != NULL);
for (UINT32 index = 0; index < VERIFICATION_THREAD_COUNT; index++) {
if (Context->work_items[index]) {
ImpIoFreeWorkItem(Context->work_items[index]);
Context->work_items[index] = NULL;
}
}
}
STATIC
VOID
FreeModuleVerificationItems(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
NT_ASSERT(Context != NULL);
/* if a thread hasnt completed by this point, something catastrophic has
* gone wrong and maybe its better not to yield..*/
while (Context->active_thread_count)
YieldProcessor();
if (Context->module_info) {
ImpExFreePoolWithTag(Context->module_info, SYSTEM_MODULES_POOL);
Context->module_info = NULL;
}
if (Context->dispatcher_info) {
ImpExFreePoolWithTag(Context->dispatcher_info, POOL_TAG_INTEGRITY);
Context->dispatcher_info = NULL;
}
}
VOID
CleanupValidationContextOnUnload(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
Context->active = FALSE;
Context->complete = TRUE;
FreeWorkItems(Context);
FreeModuleVerificationItems(Context);
}
STATIC
VOID
DispatchVerificationWorkerThreads(_In_ PSYS_MODULE_VAL_CONTEXT Context)
{
for (UINT32 index = 0; index < VERIFICATION_THREAD_COUNT; index++) {
Context->work_items[index] =
ImpIoAllocateWorkItem(GetDriverDeviceObject());
if (!Context->work_items[index])
continue;
ImpIoQueueWorkItem(
Context->work_items[index],
SystemModuleVerificationDispatchFunction,
DelayedWorkQueue,
Context);
}
}
NTSTATUS
SystemModuleVerificationDispatcher()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PIO_WORKITEM work_item = NULL;
PSYS_MODULE_VAL_CONTEXT context = GetSystemModuleValidationContext();
if (context->complete) {
DEBUG_VERBOSE(
"System modules integrity check complete. Freeing items.");
context->active = FALSE;
context->complete = FALSE;
FreeModuleVerificationItems(context);
FreeWorkItems(context);
return STATUS_SUCCESS;
}
if (!context->active) {
DEBUG_VERBOSE("Context not active, generating new one");
status = InitialiseSystemModuleVerificationContext(context);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"InitialiseSystemModuleVerificationContext failed with status %x",
status);
return status;
}
}
else {
FreeWorkItems(context);
}
DispatchVerificationWorkerThreads(context);
DEBUG_VERBOSE(
"All worker threads dispatched for system module validation.");
return STATUS_SUCCESS;
}
NTSTATUS
GetOsVersionInformation(_Out_ PRTL_OSVERSIONINFOW VersionInfo)
{
NTSTATUS status = STATUS_ABANDONED;
RTL_OSVERSIONINFOW info = {0};
if (!VersionInfo)
return STATUS_INVALID_PARAMETER;
status = ImpRtlGetVersion(&info);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlGetVersion failed with status %x", status);
return status;
}
VersionInfo->dwBuildNumber = info.dwBuildNumber;
VersionInfo->dwMajorVersion = info.dwMajorVersion;
VersionInfo->dwMinorVersion = info.dwMinorVersion;
VersionInfo->dwOSVersionInfoSize = info.dwOSVersionInfoSize;
VersionInfo->dwPlatformId = info.dwPlatformId;
IntCopyMemory(
VersionInfo->szCSDVersion,
info.szCSDVersion,
sizeof(VersionInfo->szCSDVersion));
return status;
}
BOOLEAN
ValidateOurDriversDispatchRoutines()
{
PDRIVER_OBJECT driver = GetDriverObject();
if (driver->MajorFunction[IRP_MJ_CREATE] != DeviceCreate ||
driver->MajorFunction[IRP_MJ_CLOSE] != DeviceClose ||
driver->MajorFunction[IRP_MJ_DEVICE_CONTROL] != DeviceControl) {
DEBUG_WARNING(
"**!!** Drivers dispatch routine has been tampered with. **!!**");
return FALSE;
}
return TRUE;
}
STATIC
VOID
FreeHeartbeatObjects(_Inout_ PHEARTBEAT_CONFIGURATION Configuration)
{
if (Configuration->dpc) {
ImpExFreePoolWithTag(Configuration->dpc, POOL_TAG_HEARTBEAT);
Configuration->dpc = NULL;
}
if (Configuration->timer) {
ImpExFreePoolWithTag(Configuration->timer, POOL_TAG_HEARTBEAT);
Configuration->timer = NULL;
}
}
STATIC
NTSTATUS
AllocateHeartbeatObjects(_Inout_ PHEARTBEAT_CONFIGURATION Configuration)
{
Configuration->dpc = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(KDPC),
POOL_TAG_HEARTBEAT);
if (!Configuration->dpc)
return STATUS_INSUFFICIENT_RESOURCES;
Configuration->timer = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(KTIMER),
POOL_TAG_HEARTBEAT);
if (!Configuration->timer) {
ImpExFreePoolWithTag(Configuration->dpc, POOL_TAG_HEARTBEAT);
return STATUS_INSUFFICIENT_RESOURCES;
}
return STATUS_SUCCESS;
}
#define HEARTBEAT_NANOSECONDS_LOW \
(60ULL * 10000000ULL) // 1 min in 100-nanosecond intervals
#define HEARTBEAT_NANOSECONDS_HIGH \
(240ULL * 10000000ULL) // 4 mins in 100-nanosecond intervals
#define TICKS_TO_100_NS_INTERVALS(tick_count) ((tick_count) * 100000)
/* Generate a random due time between 1 and 4 minutes in 100-nanosecond
* intervals. */
STATIC
LARGE_INTEGER
GenerateHeartbeatDueTime()
{
UINT64 interval = 0;
LARGE_INTEGER ticks = {0};
LARGE_INTEGER due_time = {0};
KeQueryTickCount(&ticks);
interval = HEARTBEAT_NANOSECONDS_LOW +
(TICKS_TO_100_NS_INTERVALS(ticks.QuadPart) %
(HEARTBEAT_NANOSECONDS_HIGH - HEARTBEAT_NANOSECONDS_LOW));
due_time.QuadPart = -interval;
return due_time;
}
FORCEINLINE
STATIC
VOID
InitialiseHeartbeatObjects(_Inout_ PHEARTBEAT_CONFIGURATION Config)
{
KeInitializeDpc(Config->dpc, HeartbeatDpcRoutine, Config);
KeInitializeTimer(Config->timer);
KeSetTimer(Config->timer, GenerateHeartbeatDueTime(), Config->dpc);
}
FORCEINLINE
STATIC
VOID
SetHeartbeatActive(_Inout_ PHEARTBEAT_CONFIGURATION Configuration)
{
InterlockedIncrement(&Configuration->active);
}
FORCEINLINE
STATIC
VOID
SetHeartbeatInactive(_Inout_ PHEARTBEAT_CONFIGURATION Configuration)
{
InterlockedDecrement(&Configuration->active);
}
/* Blocks until heartbeat execution is complete */
FORCEINLINE
STATIC
VOID
WaitForHeartbeatCompletion(_In_ PHEARTBEAT_CONFIGURATION Configuration)
{
while (Configuration->active)
YieldProcessor();
}
FORCEINLINE
STATIC
VOID
IncrementHeartbeatCounter(_In_ PHEARTBEAT_CONFIGURATION Configuration)
{
InterlockedIncrement(&Configuration->counter);
}
FORCEINLINE
STATIC
PHEARTBEAT_PACKET
BuildHeartbeatPacket(_In_ UINT32 Size)
{
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
PHEARTBEAT_PACKET packet = NULL;
packet = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, Size, POOL_TAG_HEARTBEAT);
if (!packet)
return NULL;
INIT_HEARTBEAT_PACKET(packet);
KeAcquireGuardedMutex(&queue->lock);
/*
* Its important to remember that since we query the packet metrics before
* the metrics are incremented for the current packets they will always be 1
* less then whats noted.
*/
packet->total_heartbeats_completed = queue->total_heartbeats_completed;
packet->total_irps_completed = queue->total_irps_completed;
packet->total_reports_completed = queue->total_reports_completed;
KeReleaseGuardedMutex(&queue->lock);
return packet;
}
STATIC
VOID
HeartbeatWorkItem(_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context)
{
UNREFERENCED_PARAMETER(DeviceObject);
NT_ASSERT(Context != NULL);
if (!ARGUMENT_PRESENT(Context))
return;
UINT32 packet_size = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
PHEARTBEAT_PACKET packet = NULL;
PHEARTBEAT_CONFIGURATION config = (PHEARTBEAT_CONFIGURATION)Context;
DEBUG_VERBOSE("Heartbeat timer alerted. Generating heartbeat packet.");
SetHeartbeatActive(config);
packet_size = CryptRequestRequiredBufferLength(sizeof(HEARTBEAT_PACKET));
packet = BuildHeartbeatPacket(packet_size);
if (packet) {
status = CryptEncryptBuffer(packet, packet_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(packet, POOL_TAG_HEARTBEAT);
goto queue_next;
}
IrpQueueSchedulePacket(packet, packet_size);
IncrementHeartbeatCounter(config);
}
queue_next:
/* Ensure we wait until our heartbeats DPC has terminated. */
KeFlushQueuedDpcs();
FreeHeartbeatObjects(config);
status = AllocateHeartbeatObjects(config);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("AllocateHeartbeatObjects %x", status);
return;
}
InitialiseHeartbeatObjects(config);
SetHeartbeatInactive(config);
}
STATIC
VOID
HeartbeatDpcRoutine(
_In_ PKDPC Dpc,
_In_opt_ PVOID DeferredContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2)
{
UNREFERENCED_PARAMETER(Dpc);
UNREFERENCED_PARAMETER(SystemArgument1);
UNREFERENCED_PARAMETER(SystemArgument2);
if (!ARGUMENT_PRESENT(DeferredContext))
return;
PHEARTBEAT_CONFIGURATION config = (PHEARTBEAT_CONFIGURATION)DeferredContext;
IoQueueWorkItem(
config->work_item,
HeartbeatWorkItem,
NormalWorkQueue,
config);
}
/*
* The premise behind this initial heartbeat monitor is that at a random
* interval a timer will be set. Once this timer is set, a dpc routine will
* run that will insert a heartbeat packet into the io queue which will be
* processed by user mode. Once the heartbeat is inserted, we queue a work
* item which will wait until the dpc routine is finished, free the current
* timer and work item (this is safe as the timer is removed from the timer
* queue when its alerted) and allocate a new timer and dpc object. We will
* then initalise them and insert them with another random value.
*
* The goal of this is to make reverse engineering the heartbeat process as
* hard as possible. And while it is only a start, I think its a start in
* the right direction.
*/
NTSTATUS
InitialiseHeartbeatConfiguration(_Out_ PHEARTBEAT_CONFIGURATION Configuration)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
Configuration->counter = 0;
Configuration->active = FALSE;
Configuration->seed = GenerateRandSeed();
Configuration->work_item = IoAllocateWorkItem(GetDriverDeviceObject());
if (!Configuration->work_item)
return STATUS_INSUFFICIENT_RESOURCES;
status = AllocateHeartbeatObjects(Configuration);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("AllocateHeartbeatObjects %x", status);
return status;
}
InitialiseHeartbeatObjects(Configuration);
return status;
}
VOID
FreeHeartbeatConfiguration(_Inout_ PHEARTBEAT_CONFIGURATION Configuration)
{
WaitForHeartbeatCompletion(Configuration);
KeCancelTimer(Configuration->timer);
FreeHeartbeatObjects(Configuration);
IoFreeWorkItem(Configuration->work_item);
}
================================================
FILE: driver/integrity.h
================================================
#ifndef INTEGRITY_H
#define INTEGRITY_H
#include
#include "common.h"
typedef struct _MODULE_DISPATCHER_HEADER {
volatile UINT32 validated; // if this is > 0, a thread is already using it
UINT8 result;
} MODULE_DISPATCHER_HEADER, *PMODULE_DISPATCHER_HEADER;
typedef struct _SYSTEM_MODULE_INFORMATION {
MODULE_DISPATCHER_HEADER dispatcher_header;
RTL_MODULE_EXTENDED_INFO module_information;
} SYSTEM_MODULE_INFORMATION, *PSYSTEM_MODULE_INFORMATION;
#define VERIFICATION_THREAD_COUNT 4
typedef struct _SYS_MODULE_VAL_CONTEXT {
/* Stores the number of actively executing worker threads */
volatile LONG active_thread_count;
/* determines whether a validation is in progress */
volatile LONG active;
/* determines whether a validation is complete */
volatile LONG complete;
/* current count of validated modules */
volatile LONG current_count;
/* total count of modules */
UINT32 total_count;
/* number of modules to validate in a single sweep */
UINT32 block_size;
/* pointer to the buffer containing the system module information */
PRTL_MODULE_EXTENDED_INFO module_info;
/* pointer to the array of dispatcher info used to synchonize threads */
PMODULE_DISPATCHER_HEADER dispatcher_info;
/* array of pointers to work items, used to free work items when
* complete */
PIO_WORKITEM work_items[VERIFICATION_THREAD_COUNT];
} SYS_MODULE_VAL_CONTEXT, *PSYS_MODULE_VAL_CONTEXT;
typedef enum _SMBIOS_TABLE_INDEX {
SmbiosInformation = 0,
SystemInformation,
VendorSpecificInformation,
ChassisInformation
} SMBIOS_TABLE_INDEX;
#define SMBIOS_VMWARE_SERIAL_NUMBER_SUB_INDEX 3
#define SMBIOS_NATIVE_SERIAL_NUMBER_SUB_INDEX 4
#define SMBIOS_VENDOR_STRING_SUB_INDEX 1
NTSTATUS
GetDriverImageSize(_Inout_ PIRP Irp);
NTSTATUS
RetrieveInMemoryModuleExecutableSections(_Inout_ PIRP Irp);
NTSTATUS
ValidateProcessLoadedModule(_Inout_ PIRP Irp);
NTSTATUS
GetHardDiskDriveSerialNumber(_Inout_ PVOID ConfigDrive0Serial,
_In_ SIZE_T ConfigDrive0MaxSize);
NTSTATUS
ParseSMBIOSTable(_Out_ PVOID Buffer,
_In_ SIZE_T BufferSize,
_In_ SMBIOS_TABLE_INDEX TableIndex,
_In_ ULONG TableSubIndex);
NTSTATUS
DetectEptHooksInKeyFunctions();
PVOID
ScanForSignature(_In_ PVOID BaseAddress,
_In_ SIZE_T MaxLength,
_In_ LPCSTR Signature,
_In_ SIZE_T SignatureLength);
NTSTATUS
GetOsVersionInformation(_Out_ PRTL_OSVERSIONINFOW VersionInfo);
NTSTATUS
SystemModuleVerificationDispatcher();
NTSTATUS
ValidateOurDriverImage();
VOID
CleanupValidationContextOnUnload(_In_ PSYS_MODULE_VAL_CONTEXT Context);
UINT32
CalculateCpuCoreUsage(_In_ UINT32 Core);
NTSTATUS
HashModule(_In_ PRTL_MODULE_EXTENDED_INFO Module, _Out_ PVOID Hash);
VOID
ValidateSystemModule(_In_ PRTL_MODULE_EXTENDED_INFO Module);
BOOLEAN
ValidateOurDriversDispatchRoutines();
VOID
DeferredModuleHashingCallback(_In_ PDEVICE_OBJECT DeviceObject,
_In_opt_ PVOID Context);
VOID
FindWinLogonProcess(_In_ PPROCESS_LIST_ENTRY Node, _In_opt_ PVOID Context);
NTSTATUS
InitialiseHeartbeatConfiguration(
_Out_ PHEARTBEAT_CONFIGURATION Configuration);
VOID
FreeHeartbeatConfiguration(_Inout_ PHEARTBEAT_CONFIGURATION Configuration);
NTSTATUS
HashUserModule(_In_ PPROCESS_MAP_MODULE_ENTRY Entry,
_Out_ PVOID OutBuffer,
_In_ UINT32 OutBufferSize);
#endif
================================================
FILE: driver/io.c
================================================
#include "io.h"
#include "callbacks.h"
#include "containers/map.h"
#include "driver.h"
#include "hv.h"
#include "hw.h"
#include "imports.h"
#include "integrity.h"
#include "lib/stdlib.h"
#include "modules.h"
#include "pool.h"
#include "session.h"
#include "thread.h"
STATIC
NTSTATUS
DispatchApcOperation(_In_ PAPC_OPERATION_ID Operation);
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, DispatchApcOperation)
# pragma alloc_text(PAGE, DeviceControl)
# pragma alloc_text(PAGE, DeviceClose)
# pragma alloc_text(PAGE, DeviceCreate)
#endif
#define IOCTL_RUN_NMI_CALLBACKS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20001, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_VALIDATE_DRIVER_OBJECTS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20002, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_NOTIFY_DRIVER_ON_PROCESS_LAUNCH \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20004, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_HANDLE_REPORTS_IN_CALLBACK_QUEUE \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20005, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PERFORM_VIRTUALIZATION_CHECK \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20006, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_ENUMERATE_HANDLE_TABLES \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20007, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_RETRIEVE_MODULE_EXECUTABLE_REGIONS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20008, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_REQUEST_TOTAL_MODULE_SIZE \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20009, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_NOTIFY_DRIVER_ON_PROCESS_TERMINATION \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20010, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_SCAN_FOR_UNLINKED_PROCESS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20011, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_PERFORM_INTEGRITY_CHECK \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20013, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_DETECT_ATTACHED_THREADS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20014, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_VALIDATE_PROCESS_LOADED_MODULE \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20015, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_REQUEST_HARDWARE_INFORMATION \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20016, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_INITIATE_APC_OPERATION \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20017, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_CHECK_FOR_EPT_HOOK \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20018, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_LAUNCH_DPC_STACKWALK \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20019, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_VALIDATE_SYSTEM_MODULES \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20020, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_INSERT_IRP_INTO_QUEUE \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20021, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_QUERY_DEFERRED_REPORTS \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20022, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_INITIATE_SHARED_MAPPING \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20023, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_VALIDATE_PCI_DEVICES \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20024, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define IOCTL_VALIDATE_WIN32K_TABLES \
CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20025, METHOD_BUFFERED, FILE_ANY_ACCESS)
#define APC_OPERATION_STACKWALK 0x1
/*
* Basic cancel-safe IRP queue implementation. Stores pending IRPs in a list,
* allowing us to dequeue entries to send data back to user mode without being
* invoked by the user mode module via an io completion port.
*
* user mode program will automatically queue another irp when an irp completes,
* ensuring queue has a sufficient supply.
*
* note: maybe we should use a spinlock here? Dont really want competing threads
* sleeping. I think spinlock should be used here.
*/
STATIC
VOID
IrpQueueAcquireLock(_In_ PIO_CSQ Csq, _Out_ PKIRQL Irql)
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
}
STATIC
VOID
IrpQueueReleaseLock(_In_ PIO_CSQ Csq, _In_ KIRQL Irql)
{
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
STATIC
PIRP
IrpQueuePeekNextEntry(_In_ PIO_CSQ Csq, _In_ PIRP Irp, _In_ PVOID Context)
{
UNREFERENCED_PARAMETER(Context);
NT_ASSERT(Irp != NULL);
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
if (queue->irp_count == 0)
return NULL;
return CONTAINING_RECORD(queue->queue.Flink, IRP, Tail.Overlay.ListEntry);
}
STATIC
VOID
IrpQueueRemove(_In_ PIO_CSQ Csq, _In_ PIRP Irp)
{
UNREFERENCED_PARAMETER(Csq);
GetIrpQueueHead()->irp_count--;
RemoveEntryList(&Irp->Tail.Overlay.ListEntry);
}
STATIC
BOOLEAN
IrpQueueIsThereDeferredPackets(_In_ PIRP_QUEUE_HEAD Queue)
{
return Queue->deferred_reports.count > 0 ? TRUE : FALSE;
}
STATIC
PDEFERRED_REPORT
IrpQueueRemoveDeferredPacket(_In_ PIRP_QUEUE_HEAD Queue)
{
return RemoveHeadList(&Queue->deferred_reports.head);
}
STATIC
VOID
IrpQueueFreeDeferredPacket(_In_ PDEFERRED_REPORT Report)
{
ImpExFreePoolWithTag(Report->buffer, REPORT_POOL_TAG);
ImpExFreePoolWithTag(Report, REPORT_POOL_TAG);
}
FORCEINLINE
STATIC
UINT16
GetPacketType(_In_ PVOID Buffer)
{
PPACKET_HEADER header = (PPACKET_HEADER)Buffer;
return header->packet_type;
}
FORCEINLINE
STATIC
VOID
IncrementPacketMetics(_In_ PIRP_QUEUE_HEAD Queue, UINT16 Type)
{
switch (Type) {
case PACKET_TYPE_HEARTBEAT: Queue->total_heartbeats_completed++; break;
case PACKET_TYPE_REPORT: Queue->total_reports_completed++; break;
}
Queue->total_irps_completed++;
}
STATIC
NTSTATUS
IrpQueueCompleteDeferredPacket(_In_ PDEFERRED_REPORT Report, _In_ PIRP Irp)
{
NT_ASSERT(Report != NULL);
NTSTATUS status = ValidateIrpOutputBuffer(Irp, Report->buffer_size);
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
UINT16 type = GetPacketType(Report->buffer);
if (!NT_SUCCESS(status))
return status;
IncrementPacketMetics(queue, type);
IntCopyMemory(
Irp->AssociatedIrp.SystemBuffer,
Report->buffer,
Report->buffer_size);
Irp->IoStatus.Status = STATUS_SUCCESS;
Irp->IoStatus.Information = Report->buffer_size;
IofCompleteRequest(Irp, IO_NO_INCREMENT);
IrpQueueFreeDeferredPacket(Report);
return STATUS_SUCCESS;
}
STATIC
NTSTATUS
IrpQueueQueryPendingPackets(_In_ PIRP Irp)
{
NT_ASSERT(Irp != NULL);
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
PDEFERRED_REPORT report = NULL;
NTSTATUS status = STATUS_UNSUCCESSFUL;
KIRQL irql = 0;
/*
* Important we hold the lock before we call IsThereDeferredReport to
* prevent the race condition where in the period between when we get a
* TRUE result and another thread removes the last entry from the list.
* We then request a deferred report and will receive a null value
* leading to a bugcheck in the subsequent call to
* CompleteDeferredReport.
*/
KeAcquireGuardedMutex(&queue->deferred_reports.lock);
DEBUG_INFO("deferred packet count: %lx", queue->deferred_reports.count);
if (IrpQueueIsThereDeferredPackets(queue)) {
report = IrpQueueRemoveDeferredPacket(queue);
status = IrpQueueCompleteDeferredPacket(report, Irp);
if (!NT_SUCCESS(status)) {
IrpQueueFreeDeferredPacket(report);
goto end;
}
queue->deferred_reports.count--;
}
end:
KeReleaseGuardedMutex(&queue->deferred_reports.lock);
return status;
}
STATIC
VOID
IrpQueueInsert(_In_ PIO_CSQ Csq, _In_ PIRP Irp)
{
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
InsertTailList(&queue->queue, &Irp->Tail.Overlay.ListEntry);
queue->irp_count++;
}
STATIC
VOID
IrpQueueCompleteCancelledIrp(_In_ PIO_CSQ Csq, _In_ PIRP Irp)
{
UNREFERENCED_PARAMETER(Csq);
Irp->IoStatus.Status = STATUS_CANCELLED;
Irp->IoStatus.Information = 0;
ImpIofCompleteRequest(Irp, IO_NO_INCREMENT);
}
STATIC
PDEFERRED_REPORT
IrpQueueAllocateDeferredPacket(_In_ PVOID Buffer, _In_ UINT32 BufferSize)
{
NT_ASSERT(Buffer != NULL);
NT_ASSERT(BufferSize != 0);
PDEFERRED_REPORT report = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(DEFERRED_REPORT),
REPORT_POOL_TAG);
if (!report)
return NULL;
report->buffer = Buffer;
report->buffer_size = BufferSize;
return report;
}
#define MAX_DEFERRED_REPORTS_COUNT 256
STATIC
VOID
IrpQueueDeferPacket(
_In_ PIRP_QUEUE_HEAD Queue, _In_ PVOID Buffer, _In_ UINT32 BufferSize)
{
NT_ASSERT(Queue != NULL);
NT_ASSERT(Buffer != NULL);
PDEFERRED_REPORT report = NULL;
/*
* arbitrary number, if we ever do have 100 deferred reports, theres
* probably a catastrophic error somewhere else
*/
if (Queue->deferred_reports.count > MAX_DEFERRED_REPORTS_COUNT) {
ImpExFreePoolWithTag(Buffer, REPORT_POOL_TAG);
return;
}
report = IrpQueueAllocateDeferredPacket(Buffer, BufferSize);
if (!report)
return;
KeAcquireGuardedMutex(&Queue->deferred_reports.lock);
InsertTailList(&Queue->deferred_reports.head, &report->list_entry);
Queue->deferred_reports.count++;
KeReleaseGuardedMutex(&Queue->deferred_reports.lock);
}
/*
* takes ownership of the buffer, and regardless of the outcome will free it.
*
* IMPORTANT: All report buffers must be allocated in non paged memory.
*/
STATIC
NTSTATUS
IrpQueueCompletePacket(_In_ PVOID Buffer, _In_ ULONG BufferSize)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
PIRP irp = IoCsqRemoveNextIrp(&queue->csq, NULL);
UINT16 type = GetPacketType(Buffer);
/*
* If no irps are available in our queue, lets store it in a deferred
* reports list which should be checked each time we insert a new irp
* into the queue.
*/
if (!irp) {
IrpQueueDeferPacket(queue, Buffer, BufferSize);
return STATUS_SUCCESS;
}
status = ValidateIrpOutputBuffer(irp, BufferSize);
/*
* Not sure how we should handle this, for now lets just free the buffer
* and return a status.
*/
if (!NT_SUCCESS(status)) {
ImpExFreePoolWithTag(Buffer, REPORT_POOL_TAG);
irp->IoStatus.Status = STATUS_INSUFFICIENT_RESOURCES;
irp->IoStatus.Information = 0;
ImpIofCompleteRequest(irp, IO_NO_INCREMENT);
return status;
}
IncrementPacketMetics(queue, type);
irp->IoStatus.Status = STATUS_SUCCESS;
irp->IoStatus.Information = BufferSize;
IntCopyMemory(irp->AssociatedIrp.SystemBuffer, Buffer, BufferSize);
ImpExFreePoolWithTag(Buffer, REPORT_POOL_TAG);
ImpIofCompleteRequest(irp, IO_NO_INCREMENT);
return status;
}
/*
* Due to the fact that many reporting structures are holding a mutex when
* scheduling a report packet, we need an alternative queueing option from DPCs
* and spinlocks. Here we will use an array of work items (
*
* Hmm this is an interesting issue. Not sure how we shall resolve this, for now
* this works well enough.
*/
VOID
IrpQueueSchedulePacket(_In_ PVOID Buffer, _In_ UINT32 BufferLength)
{
IrpQueueCompletePacket(Buffer, BufferLength);
}
STATIC
VOID
IrpQueueFreeDeferredPackets()
{
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
PDEFERRED_REPORT report = NULL;
KIRQL irql = 0;
/* just in case... */
KeAcquireGuardedMutex(&queue->deferred_reports.lock);
while (IrpQueueIsThereDeferredPackets(queue)) {
report = IrpQueueRemoveDeferredPacket(queue);
IrpQueueFreeDeferredPacket(report);
}
KeReleaseGuardedMutex(&queue->deferred_reports.lock);
}
NTSTATUS
IrpQueueInitialise()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
KeInitializeGuardedMutex(&queue->lock);
KeInitializeGuardedMutex(&queue->deferred_reports.lock);
InitializeListHead(&queue->queue);
InitializeListHead(&queue->deferred_reports.head);
status = IoCsqInitialize(
&queue->csq,
IrpQueueInsert,
IrpQueueRemove,
IrpQueuePeekNextEntry,
IrpQueueAcquireLock,
IrpQueueReleaseLock,
IrpQueueCompleteCancelledIrp);
if (!NT_SUCCESS(status))
DEBUG_ERROR("IoCsqInitialize failed with status %x", status);
return status;
}
VOID
SharedMappingWorkRoutine(
_In_ PDEVICE_OBJECT DeviceObject, _In_opt_ PVOID Context)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
HANDLE handle = NULL;
PSHARED_MAPPING state = (PSHARED_MAPPING)Context;
InterlockedIncrement(&state->work_item_status);
DEBUG_VERBOSE(
"SharedMapping work routine called. OperationId: %lx",
state->kernel_buffer->operation_id);
switch (state->kernel_buffer->operation_id) {
case ssRunNmiCallbacks:
DEBUG_INFO("SHARED_STATE_OPERATION_ID: RunNmiCallbacks Received.");
status = HandleNmiIOCTL();
if (!NT_SUCCESS(status))
DEBUG_ERROR("RunNmiCallbacks failed with status %lx", status);
break;
case ssValidateDriverObjects:
DEBUG_INFO(
"SHARED_STATE_OPERATION_ID: ValidateDriverObjects Received.");
status = ImpPsCreateSystemThread(
&handle,
PROCESS_ALL_ACCESS,
NULL,
NULL,
NULL,
HandleValidateDriversIOCTL,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("PsCreateSystemThread failed with status %x", status);
goto end;
}
ImpZwClose(handle);
break;
case ssEnumerateHandleTables:
DEBUG_INFO("SHARED_STATE_OPERATION_ID: EnumerateHandleTables Received");
/* can maybe implement this better so we can extract a status
* value */
RtlHashmapEnumerate(GetProcessHashmap(), EnumerateProcessHandles, NULL);
break;
case ssScanForUnlinkedProcesses:
// DEBUG_INFO(
// "SHARED_STATE_OPERATION_ID: ScanForUnlinkedProcesses Received");
// status = FindUnlinkedProcesses();
// if (!NT_SUCCESS(status))
// DEBUG_ERROR("FindUnlinkedProcesses failed with status %x",
// status);
break;
case ssPerformModuleIntegrityCheck:
DEBUG_INFO("SHARED_STATE_OPERATION_ID: PerformIntegrityCheck Received");
status = ValidateOurDriverImage();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"VerifyInMemoryImageVsDiskImage failed with status %x",
status);
break;
case ssScanForAttachedThreads:
DEBUG_INFO(
"SHARED_STATE_OPERATION_ID: ScanForAttachedThreads Received");
DetectThreadsAttachedToProtectedProcess();
break;
case ssScanForEptHooks:
DEBUG_INFO("SHARED_STATE_OPERATION_ID: ScanForEptHooks Received");
status = DetectEptHooksInKeyFunctions();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"DetectEpthooksInKeyFunctions failed with status %x",
status);
break;
case ssInitiateDpcStackwalk:
DEBUG_INFO("SHARED_STATE_OPERATION_ID Received");
status = DispatchStackwalkToEachCpuViaDpc();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"DispatchStackwalkToEachCpuViaDpc failed with status %x",
status);
break;
case ssValidateSystemModules:
DEBUG_INFO("SHARED_STATE_OPERATION_ID: ValidateSystemModules Received");
status = SystemModuleVerificationDispatcher();
if (!NT_SUCCESS(status))
DEBUG_ERROR("ValidateSystemModules failed with status %x", status);
break;
case ssValidateWin32kDispatchTables:
DEBUG_INFO(
"SHARED_STATE_OPERATION_ID: ValidateWin32kDispatchTables Received");
status = ValidateWin32kDispatchTables();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"ValidateWin32kDispatchTables failed with status %x",
status);
break;
default: DEBUG_ERROR("Invalid SHARED_STATE_OPERATION_ID Received");
}
end:
InterlockedDecrement(&state->work_item_status);
}
/* again, we want to run our routine at apc level not dispatch level */
VOID
SharedMappingDpcRoutine(
_In_ PKDPC Dpc,
_In_opt_ PVOID DeferredContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2)
{
PSHARED_MAPPING mapping = (PSHARED_MAPPING)DeferredContext;
if (!mapping->active || mapping->work_item_status)
return;
IoQueueWorkItem(
mapping->work_item,
SharedMappingWorkRoutine,
NormalWorkQueue,
mapping);
}
#define REPEAT_TIME_15_SEC 30000
VOID
SharedMappingTerminate()
{
PSHARED_MAPPING mapping = GetSharedMappingConfig();
if (!mapping->active)
return;
while (mapping->work_item_status)
YieldProcessor();
mapping->active = FALSE;
mapping->user_buffer = NULL;
mapping->size = 0;
KeCancelTimer(&mapping->timer);
IoFreeWorkItem(mapping->work_item);
IoFreeMdl(mapping->mdl);
ExFreePoolWithTag(mapping->kernel_buffer, POOL_TAG_INTEGRITY);
RtlZeroMemory(mapping, sizeof(SHARED_MAPPING));
}
STATIC
NTSTATUS
SharedMappingInitialiseTimer(_In_ PSHARED_MAPPING Mapping)
{
LARGE_INTEGER due_time = {0};
LONG period = 0;
due_time.QuadPart = -ABSOLUTE(SECONDS(30));
Mapping->work_item = IoAllocateWorkItem(GetDriverDeviceObject());
if (!Mapping->work_item) {
DEBUG_ERROR("IoAllocateWorkItem failed with no status.");
return STATUS_INSUFFICIENT_RESOURCES;
}
KeInitializeDpc(&Mapping->timer_dpc, SharedMappingDpcRoutine, Mapping);
KeInitializeTimer(&Mapping->timer);
KeSetTimerEx(
&Mapping->timer,
due_time,
REPEAT_TIME_15_SEC,
&Mapping->timer_dpc);
DEBUG_VERBOSE("Initialised shared mapping event timer.");
return STATUS_SUCCESS;
}
STATIC
VOID
InitSharedMappingStructure(
_Out_ PSHARED_MAPPING Mapping,
_In_ PVOID KernelBuffer,
_In_ PVOID UserBuffer,
_In_ PMDL Mdl)
{
Mapping->kernel_buffer = (PSHARED_STATE)KernelBuffer;
Mapping->user_buffer = UserBuffer;
Mapping->mdl = Mdl;
Mapping->size = PAGE_SIZE;
Mapping->active = TRUE;
Mapping->work_item_status = FALSE;
}
STATIC
NTSTATUS
SharedMappingInitialise(_In_ PIRP Irp)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PMDL mdl = NULL;
PSHARED_MAPPING mapping = NULL;
PSHARED_MAPPING_INIT mapping_init = NULL;
PEPROCESS process = NULL;
PVOID buffer = NULL;
PVOID user_buffer = NULL;
mapping = GetSharedMappingConfig();
/* TODO: need to copy these out */
status = ValidateIrpOutputBuffer(Irp, sizeof(SHARED_MAPPING_INIT));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpOutputBuffer failed with status %x", status);
return status;
}
/*
* remember that ExAllocatePool2 zeroes the allocation, so no need to
* zero
*/
buffer =
ExAllocatePool2(POOL_FLAG_NON_PAGED, PAGE_SIZE, POOL_TAG_INTEGRITY);
if (!buffer)
return STATUS_INSUFFICIENT_RESOURCES;
mdl = IoAllocateMdl(buffer, PAGE_SIZE, FALSE, FALSE, NULL);
if (!mdl) {
DEBUG_ERROR("IoAllocateMdl failed with no status");
ExFreePoolWithTag(buffer, POOL_TAG_INTEGRITY);
return STATUS_INSUFFICIENT_RESOURCES;
}
MmBuildMdlForNonPagedPool(mdl);
__try {
user_buffer = MmMapLockedPagesSpecifyCache(
mdl,
UserMode,
MmCached,
NULL,
FALSE,
NormalPagePriority | MdlMappingNoExecute);
}
__except (EXCEPTION_EXECUTE_HANDLER) {
status = GetExceptionCode();
DEBUG_ERROR(
"MmMapLockedPagesSpecifyCache failed with status %x",
status);
IoFreeMdl(mdl);
ExFreePoolWithTag(buffer, POOL_TAG_INTEGRITY);
return status;
}
InitSharedMappingStructure(mapping, buffer, user_buffer, mdl);
SharedMappingInitialiseTimer(mapping);
mapping_init = (PSHARED_MAPPING_INIT)Irp->AssociatedIrp.SystemBuffer;
mapping_init->buffer = user_buffer;
mapping_init->size = PAGE_SIZE;
return status;
}
STATIC
NTSTATUS
DispatchApcOperation(_In_ PAPC_OPERATION_ID Operation)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
DEBUG_VERBOSE("Dispatching APC Operation...");
switch (Operation->operation_id) {
case APC_OPERATION_STACKWALK:
DEBUG_INFO(
"Initiating APC stackwalk operation with operation id %i",
Operation->operation_id);
status = ValidateThreadsViaKernelApc();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"ValidateThreadsViaKernelApc failed with status %x",
status);
return status;
default:
DEBUG_WARNING("Invalid operation ID passed");
return STATUS_INVALID_PARAMETER;
}
return STATUS_SUCCESS;
}
/*
* Obviously, its important we check that the input and output buffer sizes for
* each IRP is big enough to hold the incoming and outgoing information.
*
* Another important thing to note is that the windows IO manager will only zero
* out the size of the input buffer. Given that we use METHOD_BUFFERED for all
* communication, the input and output buffer are the same, with the size used
* being that of the greatest buffer passed to DeviceIoControl. The IO manager
* will then zero our the buffer to the size of the input buffer, so if the
* output buffer is larger then the input buffer there will be uninitialised
* memory in the buffer so we must zero out the buffer to the length of the
* output buffer.
*
* We then set the IoStatus.Information field to the size of the buffer we are
* passing back. If we don't do this and we allocate an output buffer of size
* 0x1000, yet only use 0x100 bytes, the user mode apps output buffer will
* receive 0x100 bytes + 0x900 bytes of uninitialised memory which is an
* information leak.
*/
NTSTATUS
ValidateIrpOutputBuffer(_In_ PIRP Irp, _In_ ULONG RequiredSize)
{
if (!Irp || !RequiredSize)
return STATUS_INVALID_PARAMETER;
PIO_STACK_LOCATION io = IoGetCurrentIrpStackLocation(Irp);
if (!io)
return STATUS_UNSUCCESSFUL;
if (io->Parameters.DeviceIoControl.OutputBufferLength < RequiredSize)
return STATUS_BUFFER_TOO_SMALL;
RtlSecureZeroMemory(Irp->AssociatedIrp.SystemBuffer, RequiredSize);
Irp->IoStatus.Information = RequiredSize;
return STATUS_SUCCESS;
}
/*
* Here we just check that the input buffers size matches the expected size..
* It isnt a very secure check but we can work on that later...
*/
NTSTATUS
ValidateIrpInputBuffer(_In_ PIRP Irp, _In_ ULONG RequiredSize)
{
if (!Irp || !RequiredSize)
return STATUS_INVALID_PARAMETER;
PIO_STACK_LOCATION io = IoGetCurrentIrpStackLocation(Irp);
if (!io)
return STATUS_UNSUCCESSFUL;
if (io->Parameters.DeviceIoControl.InputBufferLength != RequiredSize)
return STATUS_INVALID_BUFFER_SIZE;
return STATUS_SUCCESS;
}
NTSTATUS
DeviceControl(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp)
{
PAGED_CODE();
NTSTATUS status = STATUS_SUCCESS;
PIO_STACK_LOCATION stack_location = IoGetCurrentIrpStackLocation(Irp);
HANDLE handle = NULL;
PKTHREAD thread = NULL;
BOOLEAN security_flag = FALSE;
/*
* LMAO
*/
SessionIsActive(&security_flag);
if (security_flag == FALSE &&
stack_location->Parameters.DeviceIoControl.IoControlCode !=
IOCTL_NOTIFY_DRIVER_ON_PROCESS_LAUNCH) {
status = STATUS_ACCESS_DENIED;
goto end;
}
switch (stack_location->Parameters.DeviceIoControl.IoControlCode) {
case IOCTL_RUN_NMI_CALLBACKS:
DEBUG_INFO("IOCTL_RUN_NMI_CALLBACKS Received.");
status = HandleNmiIOCTL(Irp);
if (!NT_SUCCESS(status))
DEBUG_ERROR("RunNmiCallbacks failed with status %lx", status);
break;
case IOCTL_VALIDATE_DRIVER_OBJECTS:
DEBUG_INFO("IOCTL_VALIDATE_DRIVER_OBJECTS Received.");
/*
* The reason this function is run in a new thread and not the
* thread issuing the IOCTL is because ZwOpenDirectoryObject
* issues a user mode handle if called on the user mode thread
* calling DeviceIoControl. This is a problem because when we
* pass said handle to ObReferenceObjectByHandle it will issue a
* bug check under windows driver verifier.
*/
status = ImpPsCreateSystemThread(
&handle,
PROCESS_ALL_ACCESS,
NULL,
NULL,
NULL,
HandleValidateDriversIOCTL,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("PsCreateSystemThread failed with status %x", status);
goto end;
}
ImpZwClose(handle);
break;
case IOCTL_NOTIFY_DRIVER_ON_PROCESS_LAUNCH:;
DEBUG_INFO("IOCTL_NOTIFY_DRIVER_ON_PROCESS_LAUNCH Received");
status = SessionInitialise(Irp);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("InitialiseSession failed with status %x", status);
goto end;
}
status = RegisterProcessObCallbacks();
if (!NT_SUCCESS(status))
DEBUG_ERROR("EnableObCallbacks failed with status %x", status);
break;
case IOCTL_HANDLE_REPORTS_IN_CALLBACK_QUEUE:
DEBUG_INFO("IOCTL_HANDLE_REPORTS_IN_CALLBACK_QUEUE Received");
status = QueryActiveApcContextsForCompletion();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"QueryActiveApcContextsForCompletion failed with status %x",
status);
break;
case IOCTL_PERFORM_VIRTUALIZATION_CHECK:
DEBUG_INFO("IOCTL_PERFORM_VIRTUALIZATION_CHECK Received");
status = PerformVirtualizationDetection(Irp);
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"PerformVirtualizationDetection failed with status %x",
status);
break;
case IOCTL_ENUMERATE_HANDLE_TABLES:
DEBUG_INFO("IOCTL_ENUMERATE_HANDLE_TABLES Received");
/* can maybe implement this better so we can extract a status
* value */
RtlHashmapEnumerate(GetProcessHashmap(), EnumerateProcessHandles, NULL);
break;
case IOCTL_RETRIEVE_MODULE_EXECUTABLE_REGIONS:
DEBUG_VERBOSE("IOCTL_RETRIEVE_MODULE_EXECUTABLE_REGIONS Received");
status = ImpPsCreateSystemThread(
&handle,
PROCESS_ALL_ACCESS,
NULL,
NULL,
NULL,
RetrieveInMemoryModuleExecutableSections,
Irp);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("PsCreateSystemThread failed with status %x", status);
goto end;
}
status = ImpObReferenceObjectByHandle(
handle,
THREAD_ALL_ACCESS,
*PsThreadType,
KernelMode,
&thread,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ObReferenceObjectbyhandle failed with status %lx",
status);
ImpZwClose(handle);
goto end;
}
ImpKeWaitForSingleObject(thread, Executive, KernelMode, FALSE, NULL);
ImpZwClose(handle);
ImpObDereferenceObject(thread);
break;
case IOCTL_REQUEST_TOTAL_MODULE_SIZE:
DEBUG_INFO("IOCTL_REQUEST_TOTAL_MODULE_SIZE Received");
status = GetDriverImageSize(Irp);
if (!NT_SUCCESS(status))
DEBUG_ERROR("GetDriverImageSize failed with status %x", status);
break;
case IOCTL_NOTIFY_DRIVER_ON_PROCESS_TERMINATION:
DEBUG_INFO("IOCTL_NOTIFY_DRIVER_ON_PROCESS_TERMINATION Received");
SessionTerminate();
UnregisterProcessObCallbacks();
break;
case IOCTL_SCAN_FOR_UNLINKED_PROCESS:
// DEBUG_INFO("IOCTL_SCAN_FOR_UNLINKED_PROCESS Received");
// status = FindUnlinkedProcesses();
// if (!NT_SUCCESS(status))
// DEBUG_ERROR("FindUnlinkedProcesses failed with status %x",
// status);
break;
case IOCTL_PERFORM_INTEGRITY_CHECK:
DEBUG_INFO("IOCTL_PERFORM_INTEGRITY_CHECK Received");
status = ValidateOurDriverImage();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"VerifyInMemoryImageVsDiskImage failed with status %x",
status);
break;
case IOCTL_DETECT_ATTACHED_THREADS:
DEBUG_INFO("IOCTL_DETECT_ATTACHED_THREADS Received");
DetectThreadsAttachedToProtectedProcess();
break;
case IOCTL_VALIDATE_PROCESS_LOADED_MODULE:
DEBUG_INFO("IOCTL_VALIDATE_PROCESS_LOADED_MODULE Received");
status = ValidateProcessLoadedModule(Irp);
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"ValidateProcessLoadedModule failed with status %x",
status);
break;
case IOCTL_REQUEST_HARDWARE_INFORMATION:;
DEBUG_INFO("IOCTL_REQUEST_HARDWARE_INFORMATION Received");
PSYSTEM_INFORMATION system_information =
GetDriverConfigSystemInformation();
status = ValidateIrpOutputBuffer(Irp, sizeof(SYSTEM_INFORMATION));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ValidateIrpOutputBuffer failed with status %x",
status);
goto end;
}
Irp->IoStatus.Information = sizeof(SYSTEM_INFORMATION);
IntCopyMemory(
Irp->AssociatedIrp.SystemBuffer,
system_information,
sizeof(SYSTEM_INFORMATION));
break;
case IOCTL_INITIATE_APC_OPERATION:;
DEBUG_INFO("IOCTL_INITIATE_APC_OPERATION Received");
PAPC_OPERATION_ID operation =
(PAPC_OPERATION_ID)Irp->AssociatedIrp.SystemBuffer;
status = DispatchApcOperation(operation);
if (!NT_SUCCESS(status))
DEBUG_ERROR("DispatchApcOperation failed with status %x", status);
break;
case IOCTL_CHECK_FOR_EPT_HOOK:
DEBUG_INFO("IOCTL_CHECK_FOR_EPT_HOOK Received");
status = DetectEptHooksInKeyFunctions();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"DetectEpthooksInKeyFunctions failed with status %x",
status);
break;
case IOCTL_VALIDATE_SYSTEM_MODULES:
DEBUG_INFO("IOCTL_VALIDATE_SYSTEM_MODULES Received");
status = SystemModuleVerificationDispatcher();
if (!NT_SUCCESS(status))
DEBUG_ERROR("ValidateSystemModules failed with status %x", status);
break;
case IOCTL_LAUNCH_DPC_STACKWALK:
DEBUG_INFO("IOCTL_LAUNCH_DPC_STACKWALK Received");
status = DispatchStackwalkToEachCpuViaDpc();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"DispatchStackwalkToEachCpuViaDpc failed with status %x",
status);
break;
case IOCTL_INSERT_IRP_INTO_QUEUE:;
// DEBUG_INFO("IOCTL_INSERT_IRP_INTO_QUEUE Received");
PIRP_QUEUE_HEAD queue = GetIrpQueueHead();
/*
* Given the nature of the Windows IO subsystem and the
* cancel-safe queue implementation we use, we need to query for
* deferred reports before insert an irp into the queue. The
* reason for this is the cancel-safe queue will automically
* mark the irp as pending, so if we then use that irp to return
* a deferred report and return success here verifier has a lil
* cry.
*/
/* before we queue our IRP, check if we can complete a deferred
* report */
status = IrpQueueQueryPendingPackets(Irp);
/* if we return success, weve completed the irp, we can return
* success */
if (!NT_SUCCESS(status)) {
/* if there are no deferred reports, store the irp in
* the queue */
IoCsqInsertIrp(&queue->csq, Irp, NULL);
/* we dont want to complete the request */
return STATUS_PENDING;
}
return STATUS_SUCCESS;
case IOCTL_INITIATE_SHARED_MAPPING:
DEBUG_INFO("IOCTL_INITIATE_SHARED_MAPPING Received");
status = SharedMappingInitialise(Irp);
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"SharedMappingInitialise failed with status %x",
status);
break;
case IOCTL_VALIDATE_PCI_DEVICES:
DEBUG_INFO("IOCTL_VALIDATE_PCI_DEVICES Received");
status = ImpPsCreateSystemThread(
&handle,
PROCESS_ALL_ACCESS,
NULL,
NULL,
NULL,
ValidatePciDevices,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("PsCreateSystemThread failed with status %x", status);
goto end;
}
ImpZwClose(handle);
break;
case IOCTL_VALIDATE_WIN32K_TABLES:
DEBUG_INFO("IOCTL_VALIDATE_WIN32K_TABLES Received");
status = ValidateWin32kDispatchTables();
if (!NT_SUCCESS(status))
DEBUG_ERROR(
"ValidateWin32kDispatchTables failed with status %x",
status);
break;
default:
DEBUG_WARNING(
"Invalid IOCTL passed to driver: %lx",
stack_location->Parameters.DeviceIoControl.IoControlCode);
status = STATUS_INVALID_PARAMETER;
break;
}
end:
DEBUG_VERBOSE("Completing IRP with status %x", status);
Irp->IoStatus.Status = status;
IoCompleteRequest(Irp, IO_NO_INCREMENT);
return status;
}
NTSTATUS
DeviceClose(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp)
{
PAGED_CODE();
UNREFERENCED_PARAMETER(DeviceObject);
DEBUG_INFO("Handle to driver closed.");
/* This needs to be fixed lol, cos anyone can just open a handle whhich
* might not begin a session.*/
if (GetActiveSession()->is_session_active) {
SessionTerminate();
UnregisterProcessObCallbacks();
SharedMappingTerminate();
}
IoCompleteRequest(Irp, IO_NO_INCREMENT);
return Irp->IoStatus.Status;
}
NTSTATUS
DeviceCreate(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp)
{
PAGED_CODE();
UNREFERENCED_PARAMETER(DeviceObject);
DEBUG_INFO("Handle to driver opened.");
IoCompleteRequest(Irp, IO_NO_INCREMENT);
return Irp->IoStatus.Status;
}
================================================
FILE: driver/io.h
================================================
#ifndef IO_H
#define IO_H
#include
#include
#include
#include "common.h"
typedef struct _SHARED_MAPPING_INIT {
PVOID buffer;
SIZE_T size;
} SHARED_MAPPING_INIT, *PSHARED_MAPPING_INIT;
typedef enum _SHARED_STATE_OPERATION_ID {
ssRunNmiCallbacks = 0,
ssValidateDriverObjects,
ssEnumerateHandleTables,
ssScanForUnlinkedProcesses,
ssPerformModuleIntegrityCheck,
ssScanForAttachedThreads,
ssScanForEptHooks,
ssInitiateDpcStackwalk,
ssValidateSystemModules,
ssValidateWin32kDispatchTables
} SHARED_STATE_OPERATION_ID;
typedef struct _SHARED_STATE {
volatile UINT32 status;
volatile UINT16 operation_id;
} SHARED_STATE, *PSHARED_STATE;
typedef struct _SHARED_MAPPING {
volatile LONG work_item_status;
PVOID user_buffer;
PSHARED_STATE kernel_buffer;
PMDL mdl;
SIZE_T size;
volatile BOOLEAN active;
KTIMER timer;
KDPC timer_dpc;
PIO_WORKITEM work_item;
} SHARED_MAPPING, *PSHARED_MAPPING;
NTSTATUS
DeviceControl(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp);
NTSTATUS
DeviceClose(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp);
NTSTATUS
DeviceCreate(_In_ PDEVICE_OBJECT DeviceObject, _Inout_ PIRP Irp);
NTSTATUS
ValidateIrpOutputBuffer(_In_ PIRP Irp, _In_ ULONG RequiredSize);
NTSTATUS
ValidateIrpInputBuffer(_In_ PIRP Irp, _In_ ULONG RequiredSize);
NTSTATUS
IrpQueueInitialise();
VOID
IrpQueueSchedulePacket(_In_ PVOID Buffer, _In_ UINT32 BufferLength);
#endif
================================================
FILE: driver/lib/stdlib.c
================================================
#include "stdlib.h"
VOID
IntCopyMemory(_In_ PVOID Destination, _In_ PVOID Source, _In_ SIZE_T Length)
{
PUCHAR dest = (PUCHAR)Destination;
PUCHAR src = (PUCHAR)Source;
for (SIZE_T index = 0; index < Length; index++)
dest[index] = src[index];
}
SIZE_T
IntStringLength(_In_ PCHAR String, _In_ SIZE_T MaxLength)
{
SIZE_T length = 0;
while (length < MaxLength && String[length] != '\0')
length++;
return length;
}
SIZE_T
IntCompareMemory(_In_ PVOID Source1, _In_ PVOID Source2, _In_ SIZE_T Length)
{
PUCHAR src1 = (PUCHAR)Source1;
PUCHAR src2 = (PUCHAR)Source2;
for (SIZE_T i = 0; i < Length; i++) {
if (src1[i] != src2[i])
return i;
}
return Length;
}
PCHAR
IntFindSubstring(_In_ PCHAR String1, _In_ PCHAR String2)
{
if (*String2 == '\0')
return String1;
for (PCHAR s1 = String1; *s1 != '\0'; s1++) {
PCHAR p1 = s1;
PCHAR p2 = String2;
while (*p1 != '\0' && *p2 != '\0' && *p1 == *p2) {
p1++;
p2++;
}
if (*p2 == '\0')
return s1;
}
return NULL;
}
INT32
IntCompareString(_In_ PCHAR String1, _In_ PCHAR String2)
{
while (*String1 != '\0' && *String2 != '\0') {
if (*String1 != *String2)
return (INT32)(*String1 - *String2);
String1++;
String2++;
}
return (INT32)(*String1 - *String2);
}
PWCHAR
IntWideStringCopy(_In_ PWCHAR Destination, _In_ PWCHAR Source)
{
PWCHAR dest = Destination;
while ((*dest++ = *Source++) != '\0')
;
return Destination;
}
================================================
FILE: driver/lib/stdlib.h
================================================
#ifndef STDLIB_H
#define STDLIB_H
#include "../common.h"
VOID
IntCopyMemory(_In_ PVOID Destination, _In_ PVOID Source, _In_ SIZE_T Length);
SIZE_T
IntStringLength(_In_ PCHAR String, _In_ SIZE_T MaxLength);
SIZE_T
IntCompareMemory(_In_ PVOID Source1, _In_ PVOID Source2, _In_ SIZE_T Length);
PCHAR
IntFindSubstring(_In_ PCHAR String1, _In_ PCHAR String2);
INT32
IntCompareString(_In_ PCHAR String1, _In_ PCHAR String2);
PWCHAR
IntWideStringCopy(_In_ PWCHAR Destination, _In_ PWCHAR Source);
#endif
================================================
FILE: driver/modules.c
================================================
#include "modules.h"
#include "apc.h"
#include "callbacks.h"
#include "containers/tree.h"
#include "crypt.h"
#include "driver.h"
#include "ia32.h"
#include "imports.h"
#include "io.h"
#include "pe.h"
#include "thread.h"
#include "lib/stdlib.h"
#define WHITELISTED_MODULE_TAG 'whte'
#define NMI_DELAY 200 * 10000
#define WHITELISTED_MODULE_COUNT 11
#define MODULE_MAX_STRING_SIZE 256
#define NTOSKRNL 0
#define CLASSPNP 1
#define WDF01000 2
/*
* The modules seen in the array below have been seen to commonly hook other
* drivers' IOCTL dispatch routines. Its possible to see this by using
* WinObjEx64 and checking which module each individual dispatch routine lies
* in. These modules are then addded to the list (in addition to either the
* driver itself or ntoskrnl) which is seen as a valid region for a drivers
* dispatch routine to lie within.
*/
CHAR WHITELISTED_MODULES[WHITELISTED_MODULE_COUNT][MODULE_MAX_STRING_SIZE] = {
"ntoskrnl.exe",
"CLASSPNP.SYS",
"Wdf01000.sys",
"HIDCLASS.SYS",
"storport.sys",
"dxgkrnl.sys",
"ndis.sys",
"ks.sys",
"portcls.sys",
"rdbss.sys",
"LXCORE.SYS"};
#define MODULE_REPORT_DRIVER_NAME_BUFFER_SIZE 128
#define SYSTEM_IDLE_PROCESS_ID 0
#define SYSTEM_PROCESS_ID 4
#define SVCHOST_PROCESS_ID 8
typedef struct _WHITELISTED_REGIONS {
UINT64 base;
UINT64 size;
} WHITELISTED_REGIONS, *PWHITELISTED_REGIONS;
typedef struct _NMI_CONTEXT {
UINT64 interrupted_rip;
UINT64 interrupted_rsp;
UINT64 kthread;
UINT32 callback_count;
BOOLEAN user_thread;
} NMI_CONTEXT, *PNMI_CONTEXT;
#define DPC_STACKWALK_STACKFRAME_COUNT 10
/* the first 3 frames are isr handlers which we dont care about */
#define DPC_STACKWALK_FRAMES_TO_SKIP 3
typedef struct _DPC_CONTEXT {
UINT64 stack_frame[DPC_STACKWALK_STACKFRAME_COUNT];
UINT16 frames_captured;
volatile BOOLEAN executed;
} DPC_CONTEXT, *PDPC_CONTEXT;
// clang-format off
STATIC
VOID
PopulateWhitelistedModuleBuffer(
_Inout_ PWHITELISTED_REGIONS Whitelist,
_In_ PSYSTEM_MODULES SystemModules
);
STATIC
NTSTATUS
ValidateDriverObjectsWrapper(
_In_ PSYSTEM_MODULES SystemModules
);
STATIC
NTSTATUS
AnalyseNmiData(
_In_ PNMI_CONTEXT NmiContext,
_In_ PSYSTEM_MODULES SystemModules
);
STATIC
NTSTATUS
LaunchNonMaskableInterrupt();
STATIC
VOID
ApcRundownRoutine(
_In_ PRKAPC Apc
);
STATIC
VOID
ApcKernelRoutine(
_In_ PRKAPC Apc,
_Inout_ _Deref_pre_maybenull_ PKNORMAL_ROUTINE* NormalRoutine,
_Inout_ _Deref_pre_maybenull_ PVOID* NormalContext,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument1,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument2);
STATIC
VOID
ApcNormalRoutine(
_In_opt_ PVOID NormalContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2
);
STATIC
VOID
ValidateThreadViaKernelApcCallback(
_In_ PTHREAD_LIST_ENTRY ThreadListEntry,
_Inout_opt_ PVOID Context
);
// clang-format on
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, FindSystemModuleByName)
# pragma alloc_text(PAGE, PopulateWhitelistedModuleBuffer)
# pragma alloc_text(PAGE, GetSystemModuleInformation)
# pragma alloc_text(PAGE, ValidateDriverObjectsWrapper)
# pragma alloc_text(PAGE, HandleValidateDriversIOCTL)
# pragma alloc_text(PAGE, IsInstructionPointerInInvalidRegion)
# pragma alloc_text(PAGE, AnalyseNmiData)
# pragma alloc_text(PAGE, LaunchNonMaskableInterrupt)
# pragma alloc_text(PAGE, HandleNmiIOCTL)
# pragma alloc_text(PAGE, ApcRundownRoutine)
# pragma alloc_text(PAGE, ApcKernelRoutine)
# pragma alloc_text(PAGE, ApcNormalRoutine)
# pragma alloc_text(PAGE, ValidateThreadsViaKernelApc)
# pragma alloc_text(PAGE, ValidateThreadViaKernelApcCallback)
#endif
/*
* This returns a reference to an entry in the system modules array retrieved
* via GetSystemModuleInformation. It's important to remember we don't free the
* modules once we retrieve this reference, and instead only free them when we
* are done using it.
*/
PRTL_MODULE_EXTENDED_INFO
FindSystemModuleByName(
_In_ LPCSTR ModuleName, _In_ PSYSTEM_MODULES SystemModules)
{
PAGED_CODE();
if (!ModuleName || !SystemModules)
return NULL;
PRTL_MODULE_EXTENDED_INFO modules =
(PRTL_MODULE_EXTENDED_INFO)SystemModules->address;
for (INT index = 0; index < SystemModules->module_count; index++) {
if (IntFindSubstring(modules[index].FullPathName, ModuleName)) {
return &modules[index];
}
}
return NULL;
}
STATIC
VOID
PopulateWhitelistedModuleBuffer(
_Inout_ PWHITELISTED_REGIONS Whitelist, _In_ PSYSTEM_MODULES SystemModules)
{
PAGED_CODE();
LPCSTR entry = NULL;
PRTL_MODULE_EXTENDED_INFO module = NULL;
PWHITELISTED_REGIONS region = NULL;
for (UINT32 index = 0; index < WHITELISTED_MODULE_COUNT; index++) {
entry = WHITELISTED_MODULES[index];
module = FindSystemModuleByName(entry, SystemModules);
/* not everyone will contain all whitelisted modules */
if (!module)
continue;
region = &Whitelist[index];
region->base = (UINT64)module->ImageBase;
region->size = (UINT64)module->ImageBase + module->ImageSize;
}
}
STATIC
UINT64
GetDriverMajorDispatchFunction(_In_ PDRIVER_OBJECT Driver)
{
return Driver->MajorFunction[IRP_MJ_DEVICE_CONTROL];
}
STATIC
BOOLEAN
DoesDriverHaveInvalidDispatchRoutine(
_In_ PDRIVER_OBJECT Driver,
_In_ PSYSTEM_MODULES Modules,
_In_ PWHITELISTED_REGIONS Regions)
{
PAGED_CODE();
UINT64 dispatch_function = 0;
UINT64 module_base = 0;
UINT64 module_end = 0;
PRTL_MODULE_EXTENDED_INFO module = NULL;
dispatch_function = GetDriverMajorDispatchFunction(Driver);
if (!dispatch_function)
return FALSE;
module = (PRTL_MODULE_EXTENDED_INFO)Modules->address;
for (UINT32 index = 0; index < Modules->module_count; index++) {
if (module[index].ImageBase != Driver->DriverStart)
continue;
/* make sure our driver has a device object which is required
* for IOCTL */
if (!Driver->DeviceObject)
return FALSE;
module_base = (UINT64)module[index].ImageBase;
module_end = module_base + module[index].ImageSize;
/* firstly, check if its inside its own module */
if (dispatch_function >= module_base && dispatch_function <= module_end)
return FALSE;
/*
* The WDF framework and other low level drivers often hook the
* dispatch routines when initiating the respective config of
* their framework or system. With a bit of digging you can view
* the drivers reponsible for the hooks. What this means is that
* there will be legit drivers with dispatch routines that point
* outside of ntoskrnl and their own memory region. So, I have
* formed a list which contains the drivers that perform these
* hooks and we iteratively check if the dispatch routine is
* contained within one of these whitelisted regions. A note on
* how to imrpove this is the fact that a code cave can be used
* inside a whitelisted region which then jumps to an invalid
* region such as a manually mapped driver. So in the future we
* should implement a function which checks for standard hook
* implementations like mov rax jmp rax etc.
*/
for (UINT32 index = 0; index < WHITELISTED_MODULE_COUNT; index++) {
if (dispatch_function >= Regions[index].base &&
dispatch_function <= Regions[index].size)
return FALSE;
}
DEBUG_WARNING(
"Driver with invalid dispatch routine found: %s",
module[index].FullPathName);
return TRUE;
}
return FALSE;
}
STATIC
BOOLEAN
DoesDriverObjectHaveBackingModule(
_In_ PSYSTEM_MODULES ModuleInformation, _In_ PDRIVER_OBJECT DriverObject)
{
PAGED_CODE();
PRTL_MODULE_EXTENDED_INFO modules = NULL;
PRTL_MODULE_EXTENDED_INFO entry = NULL;
modules = (PRTL_MODULE_EXTENDED_INFO)ModuleInformation->address;
for (UINT32 index = 0; index < ModuleInformation->module_count; index++) {
entry = &modules[index];
if (entry->ImageSize == 0 || entry->ImageBase == 0)
return STATUS_INVALID_MEMBER;
if (entry->ImageBase == DriverObject->DriverStart) {
return TRUE;
}
}
DEBUG_WARNING(
"Driver found with no backing system image at address: %llx",
(UINT64)DriverObject->DriverStart);
return FALSE;
}
FORCEINLINE
STATIC
VOID
InitSystemModulesStructure(
_Out_ PSYSTEM_MODULES Modules, _In_ PVOID Buffer, _In_ INT Count)
{
Modules->address = Buffer;
Modules->module_count = Count;
}
// https://imphash.medium.com/windows-process-internals-a-few-concepts-to-know-before-jumping-on-memory-forensics-part-3-4a0e195d947b
NTSTATUS
GetSystemModuleInformation(_Out_ PSYSTEM_MODULES ModuleInformation)
{
PAGED_CODE();
ULONG size = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
PRTL_MODULE_EXTENDED_INFO buffer = NULL;
if (!ModuleInformation)
return STATUS_INVALID_PARAMETER;
status = RtlQueryModuleInformation(
&size,
sizeof(RTL_MODULE_EXTENDED_INFO),
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlQueryModuleInformation failed with status %x", status);
return status;
}
buffer = ExAllocatePool2(POOL_FLAG_NON_PAGED, size, SYSTEM_MODULES_POOL);
if (!buffer) {
DEBUG_ERROR("Failed to allocate pool LOL");
return STATUS_MEMORY_NOT_ALLOCATED;
}
status = RtlQueryModuleInformation(
&size,
sizeof(RTL_MODULE_EXTENDED_INFO),
buffer);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"RtlQueryModuleInformation 2 failed with status %x",
status);
ExFreePoolWithTag(buffer, SYSTEM_MODULES_POOL);
return STATUS_ABANDONED;
}
InitSystemModulesStructure(
ModuleInformation,
buffer,
ARRAYLEN(size, RTL_MODULE_EXTENDED_INFO));
return status;
}
STATIC
VOID
ReportInvalidDriverObject(_In_ PDRIVER_OBJECT Driver, _In_ UINT32 ReportSubType)
{
UINT32 len = 0;
NTSTATUS status = STATUS_UNSUCCESSFUL;
ANSI_STRING string = {0};
PMODULE_VALIDATION_FAILURE report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(MODULE_VALIDATION_FAILURE));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, POOL_TAG_INTEGRITY);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_MODULE_VALIDATION_FAILURE, ReportSubType);
report->driver_base_address = Driver->DriverStart;
report->driver_size = Driver->DriverSize;
string.Length = 0;
string.MaximumLength = MODULE_REPORT_DRIVER_NAME_BUFFER_SIZE;
string.Buffer = &report->driver_name;
/* Continue regardless of result */
ImpRtlUnicodeStringToAnsiString(&string, &Driver->DriverName, FALSE);
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
FORCEINLINE
STATIC
POBJECT_DIRECTORY_ENTRY
GetNextObject(_In_ POBJECT_DIRECTORY_ENTRY Entry)
{
return Entry->ChainLink;
}
FORCEINLINE
STATIC
PVOID
GetObjectFromDirectory(_In_ POBJECT_DIRECTORY_ENTRY Entry)
{
return Entry->Object;
}
STATIC
VOID
ValidateDriverObjects(
_In_ PSYSTEM_MODULES Modules,
_In_ POBJECT_DIRECTORY_ENTRY Entry,
_In_ PWHITELISTED_REGIONS Whitelist)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
POBJECT_DIRECTORY_ENTRY entry = Entry;
PDRIVER_OBJECT driver = NULL;
while (entry) {
driver = GetObjectFromDirectory(entry);
if (!DoesDriverObjectHaveBackingModule(Modules, driver)) {
ReportInvalidDriverObject(driver, REPORT_SUBTYPE_NO_BACKING_MODULE);
}
if (DoesDriverHaveInvalidDispatchRoutine(driver, Modules, Whitelist)) {
ReportInvalidDriverObject(driver, REPORT_SUBTYPE_INVALID_DISPATCH);
}
entry = GetNextObject(entry);
}
}
/* TODO: this function needs to be rewritten. Infact, this entire file needs to
* be rewritten.
* god this is so bad.
*/
STATIC
NTSTATUS
ValidateDriverObjectsWrapper(_In_ PSYSTEM_MODULES SystemModules)
{
PAGED_CODE();
HANDLE handle = NULL;
OBJECT_ATTRIBUTES oa = {0};
PVOID dir = {0};
UNICODE_STRING dir_name = {0};
PWHITELISTED_REGIONS wl = NULL;
NTSTATUS status = STATUS_UNSUCCESSFUL;
POBJECT_DIRECTORY dir_object = NULL;
POBJECT_DIRECTORY_ENTRY bucket = NULL;
ImpRtlInitUnicodeString(&dir_name, L"\\Driver");
InitializeObjectAttributes(
&oa,
&dir_name,
OBJ_CASE_INSENSITIVE,
NULL,
NULL);
status = ImpZwOpenDirectoryObject(&handle, DIRECTORY_ALL_ACCESS, &oa);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ZwOpenDirectoryObject failed with status %x", status);
return status;
}
status = ImpObReferenceObjectByHandle(
handle,
DIRECTORY_ALL_ACCESS,
NULL,
KernelMode,
&dir,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ObReferenceObjectByHandle failed with status %x", status);
ImpZwClose(handle);
return status;
}
/*
* Windows organises its drivers in object directories (not the same as
* files directories). For the driver directory, there are 37 entries,
* each driver is hashed and indexed. If there is a driver with a
* duplicate index, it is inserted into same index in a linked list
* using the _OBJECT_DIRECTORY_ENTRY struct. So to enumerate all drivers
* we visit each entry in the hashmap, enumerate all objects in the
* linked list at entry j then we increment the hashmap index i. The
* motivation behind this is that when a driver is accessed, it is
* brought to the first index in the linked list, so drivers that are
* accessed the most can be accessed quickly
*/
dir_object = (POBJECT_DIRECTORY)dir;
ImpExAcquirePushLockExclusiveEx(&dir_object->Lock, NULL);
wl = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
WHITELISTED_MODULE_COUNT * sizeof(WHITELISTED_REGIONS),
WHITELISTED_MODULE_TAG);
if (!wl)
goto end;
PopulateWhitelistedModuleBuffer(wl, SystemModules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"PopulateWhitelistedModuleBuffer failed with status %x",
status);
goto end;
}
for (UINT32 index = 0; index < NUMBER_HASH_BUCKETS; index++) {
bucket = dir_object->HashBuckets[index];
ValidateDriverObjects(SystemModules, bucket, wl);
}
end:
if (wl)
ImpExFreePoolWithTag(wl, WHITELISTED_MODULE_TAG);
ImpExReleasePushLockExclusiveEx(&dir_object->Lock, 0);
ImpObDereferenceObject(dir);
ImpZwClose(handle);
return STATUS_SUCCESS;
}
FORCEINLINE
STATIC
BOOLEAN
IsUserModeAddress(_In_ UINT64 Rip)
{
return Rip <= WINDOWS_USERMODE_MAX_ADDRESS ? TRUE : FALSE;
}
NTSTATUS
HandleValidateDriversIOCTL()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
ULONG length = 0;
SYSTEM_MODULES modules = {0};
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
status = ValidateDriverObjectsWrapper(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateDriverObjects failed with status %x", status);
goto end;
}
end:
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
/*
* TODO: this probably doesnt need to return an NTSTATUS, we can just return a
* boolean and remove the out variable.
*/
BOOLEAN
IsInstructionPointerInInvalidRegion(
_In_ UINT64 Rip, _In_ PSYSTEM_MODULES SystemModules)
{
PAGED_CODE();
PRTL_MODULE_EXTENDED_INFO modules =
(PRTL_MODULE_EXTENDED_INFO)SystemModules->address;
/* Note that this does not check for HAL or PatchGuard Execution */
for (UINT32 index = 0; index < SystemModules->module_count; index++) {
UINT64 base = (UINT64)modules[index].ImageBase;
UINT64 end = base + modules[index].ImageSize;
if (Rip >= base && Rip <= end) {
return FALSE;
}
}
return TRUE;
}
BOOLEAN
IsInstructionPointerInsideSpecifiedModule(
_In_ UINT64 Rip, _In_ PRTL_MODULE_EXTENDED_INFO Module)
{
UINT64 base = (UINT64)Module->ImageBase;
UINT64 end = base + Module->ImageSize;
if (Rip >= base && Rip <= end)
return TRUE;
return FALSE;
}
STATIC
VOID
ReportNmiBlocking()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PNMI_CALLBACK_FAILURE report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(NMI_CALLBACK_FAILURE));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return STATUS_INSUFFICIENT_RESOURCES;
INIT_REPORT_PACKET(report, REPORT_NMI_CALLBACK_FAILURE, 0);
report->kthread_address = NULL;
report->invalid_rip = NULL;
report->were_nmis_disabled = TRUE;
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
STATIC
VOID
ReportMissingCidTableEntry(_In_ PNMI_CONTEXT Context)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PHIDDEN_SYSTEM_THREAD_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(HIDDEN_SYSTEM_THREAD_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_HIDDEN_SYSTEM_THREAD, 0);
report->found_in_kthreadlist = FALSE; // wip
report->found_in_pspcidtable = FALSE;
report->thread_id = ImpPsGetThreadId(Context->kthread);
report->thread_address = Context->kthread;
IntCopyMemory(report->thread, Context->kthread, sizeof(report->thread));
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
STATIC
VOID
ReportInvalidRipFoundDuringNmi(
_In_ PNMI_CONTEXT Context, _In_ UINT32 ReportSubCode)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PNMI_CALLBACK_FAILURE report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(HIDDEN_SYSTEM_THREAD_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_NMI_CALLBACK_FAILURE, ReportSubCode);
report->kthread_address = Context->kthread;
report->invalid_rip = Context->interrupted_rip;
report->were_nmis_disabled = FALSE;
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
#define INSTRUCTION_UD2_BYTE_1 0x0F
#define INSTRUCTION_UD2_BYTE_2 0x0B
#define INSTRUCTION_INT3_BYTE_1 0xCC
STATIC
BOOLEAN
DoesRetInstructionCauseException(_In_ UINT64 ReturnAddress)
{
/* UD2 instruction is 2 bytes*/
UCHAR opcodes[2] = {0};
/* we deal with um later */
if (IsUserModeAddress(ReturnAddress))
return FALSE;
if (!MmIsAddressValid(ReturnAddress))
return FALSE;
/* Shoudln't really ever occur */
__try {
IntCopyMemory(&opcodes, ReturnAddress, sizeof(opcodes));
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return FALSE;
}
if (opcodes[0] == INSTRUCTION_UD2_BYTE_1 &&
opcodes[1] == INSTRUCTION_UD2_BYTE_2)
return TRUE;
if (opcodes[0] == INSTRUCTION_INT3_BYTE_1)
return TRUE;
DEBUG_VERBOSE(
"Ret address instruction doesnt unconditionally throw exception");
return FALSE;
}
/*
* todo: i think we should split this function up into each analysis i.e one for
* the interrupted rip, one for the cid etc.
*/
STATIC
NTSTATUS
AnalyseNmiData(_In_ PNMI_CONTEXT NmiContext, _In_ PSYSTEM_MODULES Modules)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
BOOLEAN flag = FALSE;
PNMI_CONTEXT context = NULL;
if (!NmiContext || !Modules)
return STATUS_INVALID_PARAMETER;
for (UINT32 core = 0; core < ImpKeQueryActiveProcessorCount(0); core++) {
context = &NmiContext[core];
/* Make sure our NMIs were run */
if (!context->callback_count) {
ReportNmiBlocking();
return STATUS_SUCCESS;
}
DEBUG_VERBOSE(
"Analysing Nmi Data for: cpu number: %i callback count: %lx",
core,
context->callback_count);
/*
* Our NMI callback allows us to interrupt every running thread
* on each core. Now it is common practice for malicious
* programs to either unlink their thread from the KTHREAD
* linked list or remove their threads entry from the
* PspCidTable or both. Now the reason an unlinked thread can
* still be scheduled is because the scheduler keeps a seperate
* list that it uses to schedule threads. It then places these
* threads in the KPRCB in either the CurrentThread, IdleThread
* or NextThread.
*
* Since you can't just set a threads affinity to enumerate over
* all cores and read the KPCRB->CurrentThread (since it will
* just show your thread) we have to interrupt the thread. So
* below we are validating that the thread is indeed in our own
* threads list using our callback routine and then using
* PsGetThreadId
*
* I also want to integrate a way to SAFELY determine whether a
* thread has been removed from the KTHREADs linked list, maybe
* PsGetNextProcess ?
*/
if (!DoesThreadHaveValidCidEntry(context->kthread))
ReportMissingCidTableEntry(context);
if (IsInstructionPointerInInvalidRegion(
context->interrupted_rip,
Modules))
ReportInvalidRipFoundDuringNmi(context, 0);
if (context->user_thread)
continue;
if (DoesRetInstructionCauseException(context->interrupted_rip))
ReportInvalidRipFoundDuringNmi(
context,
REPORT_SUBTYPE_EXCEPTION_THROWING_RET);
}
return STATUS_SUCCESS;
}
FORCEINLINE
STATIC
TASK_STATE_SEGMENT_64*
GetTaskStateSegment(_In_ UINT64 Kpcr)
{
return *(TASK_STATE_SEGMENT_64**)(Kpcr + KPCR_TSS_BASE_OFFSET);
}
FORCEINLINE
STATIC
PMACHINE_FRAME
GetIsrMachineFrame(_In_ TASK_STATE_SEGMENT_64* TaskStateSegment)
{
return TaskStateSegment->Ist3 - sizeof(MACHINE_FRAME);
}
STATIC BOOLEAN
NmiCallback(_Inout_opt_ PVOID Context, _In_ BOOLEAN Handled)
{
UNREFERENCED_PARAMETER(Handled);
ULONG core = KeGetCurrentProcessorNumber();
PNMI_CONTEXT context = &((PNMI_CONTEXT)Context)[core];
UINT64 kpcr = 0;
TASK_STATE_SEGMENT_64* tss = NULL;
PMACHINE_FRAME machine_frame = NULL;
if (!ARGUMENT_PRESENT(Context))
return TRUE;
/*
* To find the IRETQ frame (MACHINE_FRAME) we need to find the top of
* the NMI ISR stack. This is stored at TSS->Ist[3]. To find the TSS, we
* can read it from KPCR->TSS_BASE. Once we have our TSS, we can read
* the value at TSS->Ist[3] which points to the top of the ISR stack,
* and subtract the size of the MACHINE_FRAME struct. Allowing us read
* the interrupted RIP.
*
* The reason this is needed is because RtlCaptureStackBackTrace is not
* safe to run at IRQL = HIGH_LEVEL, hence we need to manually unwind
* the ISR stack to find the interrupted rip.
*/
kpcr = __readmsr(IA32_GS_BASE);
tss = GetTaskStateSegment(kpcr);
machine_frame = GetIsrMachineFrame(tss);
if (IsUserModeAddress(machine_frame->rip))
context->user_thread = TRUE;
context->interrupted_rip = machine_frame->rip;
context->interrupted_rsp = machine_frame->rsp;
context->kthread = PsGetCurrentThread();
context->callback_count++;
return TRUE;
}
#define NMI_DELAY_TIME 200 * 10000
STATIC
NTSTATUS
LaunchNonMaskableInterrupt()
{
PAGED_CODE();
PKAFFINITY_EX affinity = NULL;
LARGE_INTEGER delay = {0};
affinity = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(KAFFINITY_EX),
PROC_AFFINITY_POOL);
if (!affinity)
return STATUS_MEMORY_NOT_ALLOCATED;
delay.QuadPart -= NMI_DELAY_TIME;
for (ULONG core = 0; core < ImpKeQueryActiveProcessorCount(0); core++) {
ImpKeInitializeAffinityEx(affinity);
ImpKeAddProcessorAffinityEx(affinity, core);
HalSendNMI(affinity);
/*
* Only a single NMI can be active at any given time, so
* arbitrarily delay execution to allow time for the NMI to be
* processed
*/
ImpKeDelayExecutionThread(KernelMode, FALSE, &delay);
}
ImpExFreePoolWithTag(affinity, PROC_AFFINITY_POOL);
return STATUS_SUCCESS;
}
NTSTATUS
HandleNmiIOCTL()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
PVOID handle = NULL;
SYSTEM_MODULES modules = {0};
PNMI_CONTEXT context = NULL;
UINT32 size = 0;
size = ImpKeQueryActiveProcessorCount(0) * sizeof(NMI_CONTEXT);
/* Ensure we don't continue if another NMI operation is in progress */
if (IsNmiInProgress())
return STATUS_ALREADY_COMMITTED;
status = ValidateHalDispatchTables();
/* do we continue ? probably. */
if (!NT_SUCCESS(status))
DEBUG_ERROR("ValidateHalDispatchTables failed with status %x", status);
context = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, size, NMI_CONTEXT_POOL);
if (!context) {
UnsetNmiInProgressFlag();
return STATUS_MEMORY_NOT_ALLOCATED;
}
/*
* We want to register and unregister our callback each time so it
* becomes harder for people to hook our callback and get up to some
* funny business
*/
handle = ImpKeRegisterNmiCallback(NmiCallback, context);
if (!handle) {
DEBUG_ERROR("KeRegisterNmiCallback failed with no status.");
goto end;
}
/*
* We query the system modules each time since they can potentially
* change at any time
*/
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("Error retriving system module information");
goto end;
}
status = LaunchNonMaskableInterrupt();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("Error running NMI callbacks");
goto end;
}
status = AnalyseNmiData(context, &modules);
if (!NT_SUCCESS(status))
DEBUG_ERROR("Error analysing nmi data");
end:
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
if (context)
ImpExFreePoolWithTag(context, NMI_CONTEXT_POOL);
if (handle)
ImpKeDeregisterNmiCallback(handle);
UnsetNmiInProgressFlag();
return status;
}
/*
* The RundownRoutine is executed if the thread terminates before the APC was
* delivered to user mode.
*/
STATIC
VOID
ApcRundownRoutine(_In_ PRKAPC Apc)
{
PAGED_CODE();
FreeApcAndDecrementApcCount(Apc, APC_CONTEXT_ID_STACKWALK);
}
STATIC
VOID
ReportApcStackwalkViolation(_In_ UINT64 Rip)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PAPC_STACKWALK_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(APC_STACKWALK_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_APC_STACKWALK, 0);
report->kthread_address = (UINT64)KeGetCurrentThread();
report->invalid_rip = Rip;
// report->driver ?? todo!
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
/*
* The KernelRoutine is executed in kernel mode at APC_LEVEL before the APC is
* delivered. This is also where we want to free our APC object.
*/
STATIC
VOID
ApcKernelRoutine(
_In_ PRKAPC Apc,
_Inout_ _Deref_pre_maybenull_ PKNORMAL_ROUTINE* NormalRoutine,
_Inout_ _Deref_pre_maybenull_ PVOID* NormalContext,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument1,
_Inout_ _Deref_pre_maybenull_ PVOID* SystemArgument2)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
PVOID buffer = NULL;
INT frames_captured = 0;
UINT64 frame = 0;
PAPC_STACKWALK_CONTEXT context = NULL;
PTHREAD_LIST_ENTRY entry = NULL;
context = (PAPC_STACKWALK_CONTEXT)Apc->NormalContext;
FindThreadListEntryByThreadAddress(KeGetCurrentThread(), &entry);
if (!entry)
return;
buffer = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
STACK_FRAME_POOL_SIZE,
POOL_TAG_APC);
if (!buffer)
goto free;
frames_captured = ImpRtlCaptureStackBackTrace(
NULL,
STACK_FRAME_POOL_SIZE / sizeof(UINT64),
buffer,
NULL);
if (!frames_captured)
goto free;
for (UINT32 index = 0; index < frames_captured; index++) {
frame = ((PUINT64)buffer)[index];
/*
* Apc->NormalContext holds the address of our context data
* structure that we passed into KeInitializeApc as the last
* argument.
*/
if (IsInstructionPointerInInvalidRegion(frame, context->modules)) {
ReportApcStackwalkViolation(frame);
}
}
free:
if (buffer)
ImpExFreePoolWithTag(buffer, POOL_TAG_APC);
FreeApcAndDecrementApcCount(Apc, APC_CONTEXT_ID_STACKWALK);
entry->apc = NULL;
entry->apc_queued = FALSE;
}
/*
* The NormalRoutine is executed in user mode when the APC is delivered.
*/
STATIC
VOID
ApcNormalRoutine(
_In_opt_ PVOID NormalContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2)
{
PAGED_CODE();
}
#define THREAD_STATE_TERMINATED 4
#define THREAD_STATE_WAIT 5
#define THREAD_STATE_INIT 0
STATIC
VOID
ValidateThreadViaKernelApcCallback(
_In_ PTHREAD_LIST_ENTRY Entry, _Inout_opt_ PVOID Context)
{
PAGED_CODE();
PKAPC apc = NULL;
PLONG flags = NULL;
PCHAR prev_mode = NULL;
PUCHAR state = NULL;
BOOLEAN apc_queueable = FALSE;
LPCSTR proc_name = NULL;
PAPC_STACKWALK_CONTEXT context = NULL;
context = (PAPC_STACKWALK_CONTEXT)Context;
if (!ARGUMENT_PRESENT(Context))
return;
proc_name = ImpPsGetProcessImageFileName(Entry->owning_process);
/*
* Its possible to set the KThread->ApcQueueable flag to false ensuring
* that no APCs can be queued to the thread, as KeInsertQueueApc will
* check this flag before queueing an APC so lets make sure we flip this
* before before queueing ours. Since we filter out any system threads
* this should be fine... c:
*/
flags = RVA(PLONG, Entry->thread, KTHREAD_MISC_FLAGS_OFFSET);
prev_mode = RVA(PCHAR, Entry->thread, KTHREAD_PREVIOUS_MODE_OFFSET);
state = RVA(PUCHAR, Entry->thread, KTHREAD_STATE_OFFSET);
/*
* For now, lets only check for system threads. However, we also want to
* check for threads executing in kernel mode, i.e KTHREAD->PreviousMode
* == UserMode.
*/
if (Entry->owning_process != PsInitialSystemProcess)
return;
if (Entry->thread == KeGetCurrentThread() || !Entry->thread)
return;
DEBUG_VERBOSE(
"Validating thread: %llx, process name: %s via kernel APC stackwalk.",
Entry->thread,
proc_name);
SetFlag(*flags, KTHREAD_MISC_FLAGS_ALERTABLE);
SetFlag(*flags, KTHREAD_MISC_FLAGS_APC_QUEUEABLE);
apc = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, sizeof(KAPC), POOL_TAG_APC);
if (!apc)
return;
ImpKeInitializeApc(
apc,
Entry->thread,
OriginalApcEnvironment,
ApcKernelRoutine,
ApcRundownRoutine,
ApcNormalRoutine,
KernelMode,
Context);
if (!ImpKeInsertQueueApc(apc, NULL, NULL, IO_NO_INCREMENT)) {
DEBUG_ERROR("KeInsertQueueApc failed with no status.");
ImpExFreePoolWithTag(apc, POOL_TAG_APC);
return;
}
Entry->apc = apc;
Entry->apc_queued = TRUE;
IncrementApcCount(APC_CONTEXT_ID_STACKWALK);
}
FORCEINLINE
STATIC
VOID
SetApcAllocationInProgress(_In_ PAPC_STACKWALK_CONTEXT Context)
{
Context->header.allocation_in_progress = TRUE;
}
FORCEINLINE
STATIC
VOID
UnsetApcAllocationInProgress(_In_ PAPC_STACKWALK_CONTEXT Context)
{
Context->header.allocation_in_progress = FALSE;
}
/*
* Since NMIs are only executed on the thread that is running on each logical
* core, it makes sense to make use of APCs that, while can be masked off,
* provide us to easily issue a callback routine to threads we want a stack
* trace of. Hence by utilising both APCs and NMIs we get excellent coverage of
* the entire system.
*/
NTSTATUS
ValidateThreadsViaKernelApc()
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
PAPC_STACKWALK_CONTEXT context = NULL;
/* First, ensure we dont already have an ongoing operation */
GetApcContext(&context, APC_CONTEXT_ID_STACKWALK);
if (context) {
DEBUG_WARNING("Existing APC_STACKWALK operation already in progress.");
return STATUS_SUCCESS;
}
context = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(APC_STACKWALK_CONTEXT),
POOL_TAG_APC);
if (!context)
return STATUS_MEMORY_NOT_ALLOCATED;
context->header.context_id = APC_CONTEXT_ID_STACKWALK;
context->modules = ImpExAllocatePool2(
POOL_FLAG_NON_PAGED,
sizeof(SYSTEM_MODULES),
POOL_TAG_APC);
if (!context->modules) {
ImpExFreePoolWithTag(context, POOL_TAG_APC);
return STATUS_MEMORY_NOT_ALLOCATED;
}
status = GetSystemModuleInformation(context->modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
ImpExFreePoolWithTag(context->modules, POOL_TAG_APC);
ImpExFreePoolWithTag(context, POOL_TAG_APC);
return STATUS_MEMORY_NOT_ALLOCATED;
}
InsertApcContext(context);
SetApcAllocationInProgress(context);
ENUMERATE_THREADS(ValidateThreadViaKernelApcCallback, context);
UnsetApcAllocationInProgress(context);
return status;
}
VOID
FreeApcStackwalkApcContextInformation(_Inout_ PAPC_STACKWALK_CONTEXT Context)
{
if (Context->modules->address)
ImpExFreePoolWithTag(Context->modules->address, SYSTEM_MODULES_POOL);
if (Context->modules)
ImpExFreePoolWithTag(Context->modules, POOL_TAG_APC);
}
VOID
DpcStackwalkCallbackRoutine(
_In_ PKDPC Dpc,
_In_opt_ PVOID DeferredContext,
_In_opt_ PVOID SystemArgument1,
_In_opt_ PVOID SystemArgument2)
{
UNREFERENCED_PARAMETER(Dpc);
UNREFERENCED_PARAMETER(SystemArgument2);
PDPC_CONTEXT context = NULL;
if (!ARGUMENT_PRESENT(DeferredContext))
return;
context = &((PDPC_CONTEXT)DeferredContext)[KeGetCurrentProcessorNumber()];
context->frames_captured = ImpRtlCaptureStackBackTrace(
DPC_STACKWALK_FRAMES_TO_SKIP,
DPC_STACKWALK_STACKFRAME_COUNT,
&context->stack_frame,
NULL);
InterlockedExchange(&context->executed, TRUE);
#pragma warning(push)
#pragma warning(disable : C6387)
ImpKeSignalCallDpcDone(SystemArgument1);
#pragma warning(pop)
DEBUG_VERBOSE(
"Executed DPC on core: %lx, with %lx frames captured.",
KeGetCurrentProcessorNumber(),
context->frames_captured);
}
STATIC
VOID
ReportDpcStackwalkViolation(
_In_ PDPC_CONTEXT Context, _In_ UINT64 Frame, _In_ UINT32 ReportSubtype)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PDPC_STACKWALK_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(DPC_STACKWALK_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_DPC_STACKWALK, ReportSubtype);
report->kthread_address = PsGetCurrentThread();
report->invalid_rip = Frame;
// IntCopyMemory(report->driver,
// (UINT64)Context[core].stack_frame[frame]
// - 0x50,
// APC_STACKWALK_BUFFER_SIZE);
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
STATIC
VOID
ValidateDpcStackFrame(_In_ PDPC_CONTEXT Context, _In_ PSYSTEM_MODULES Modules)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
BOOLEAN flag = FALSE;
UINT64 rip = 0;
/* With regards to this, lets only check the interrupted rip */
if (DoesRetInstructionCauseException(Context->stack_frame[0]))
ReportDpcStackwalkViolation(
Context,
Context->stack_frame[0],
REPORT_SUBTYPE_EXCEPTION_THROWING_RET);
for (UINT32 frame = 0; frame < Context->frames_captured; frame++) {
rip = Context->stack_frame[frame];
if (IsInstructionPointerInInvalidRegion(rip, Modules))
ReportDpcStackwalkViolation(Context, rip, 0);
}
}
STATIC
VOID
ValidateDpcCapturedStack(
_In_ PSYSTEM_MODULES Modules, _In_ PDPC_CONTEXT Context)
{
BOOLEAN flag = FALSE;
PDPC_CONTEXT context = NULL;
UINT32 count = ImpKeQueryActiveProcessorCount(0);
for (UINT32 core = 0; core < count; core++) {
context = &Context[core];
if (!context->executed)
DEBUG_WARNING(
"DPC Stackwalk routine not executed. Core: %lx",
core);
ValidateDpcStackFrame(&Context[core], Modules);
}
}
/*
* Lets use DPCs as another form of stackwalking rather then inter-process
* interrupts because DPCs run at IRQL = DISPATCH_LEVEL, allowing us to use
* functions such as RtlCaptureStackBackTrace whereas IPIs run at IRQL =
* IPI_LEVEL. DPCs are also harder to mask compared to APCs which can be masked
* with the flip of a bit in the KTHREAD structure.
*/
NTSTATUS
DispatchStackwalkToEachCpuViaDpc()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PDPC_CONTEXT context = NULL;
SYSTEM_MODULES modules = {0};
UINT32 size = 0;
size = ImpKeQueryActiveProcessorCount(0) * sizeof(DPC_CONTEXT);
context = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, size, POOL_TAG_DPC);
if (!context)
return STATUS_MEMORY_NOT_ALLOCATED;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
goto end;
}
/* KeGenericCallDpc will queue a DPC to each processor with importance =
* HighImportance. This means our DPC will be inserted into the front of
* the DPC queue and executed immediately.*/
ImpKeGenericCallDpc(DpcStackwalkCallbackRoutine, context);
/* Flush all DPC's in the system to ensure ours have run */
KeFlushQueuedDpcs();
ValidateDpcCapturedStack(&modules, context);
end:
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
if (context)
ImpExFreePoolWithTag(context, POOL_TAG_DPC);
return status;
}
/* todo: walk the chain of pointers to prevent jmp chaining */
STATIC
VOID
ValidateTableDispatchRoutines(
_In_ PVOID* Base,
_In_ UINT32 Entries,
_In_ PSYSTEM_MODULES Modules,
_Out_ PVOID* Routine)
{
for (UINT32 index = 0; index < Entries; index++) {
if (!Base[index])
continue;
if (IsInstructionPointerInInvalidRegion(Base[index], Modules))
*Routine = Base[index];
}
}
/*
* windows version info: https://www.techthoughts.info/windows-version-numbers/
*
* sizes:
* https://www.vergiliusproject.com/kernels/x64/Windows%2011/22H2%20(2022%20Update)/HAL_PRIVATE_DISPATCH
*/
#define HAL_PRIVATE_DISPATCH_W11_22H2_SIZE 0x4f0
#define HAL_PRIVATE_DISPATCH_W10_22H2_SIZE 0x4b0
#define WINDOWS_10_MAX_BUILD_NUMBER 19045
STATIC
UINT32
GetHalPrivateDispatchTableRoutineCount(_In_ PRTL_OSVERSIONINFOW VersionInfo)
{
if (VersionInfo->dwBuildNumber <= WINDOWS_10_MAX_BUILD_NUMBER)
return (HAL_PRIVATE_DISPATCH_W10_22H2_SIZE / sizeof(UINT64)) - 1;
else
return (HAL_PRIVATE_DISPATCH_W11_22H2_SIZE / sizeof(UINT64)) - 1;
}
STATIC
NTSTATUS
ValidateHalPrivateDispatchTable(
_Out_ PVOID* Routine, _In_ PSYSTEM_MODULES Modules)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PVOID table = NULL;
UNICODE_STRING string = RTL_CONSTANT_STRING(L"HalPrivateDispatchTable");
PVOID* base = NULL;
RTL_OSVERSIONINFOW os_info = {0};
UINT32 count = 0;
DEBUG_VERBOSE("Validating HalPrivateDispatchTable.");
table = ImpMmGetSystemRoutineAddress(&string);
if (!table)
return status;
status = GetOsVersionInformation(&os_info);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetOsVersionInformation failed with status %x", status);
return status;
}
base = (UINT64)table + sizeof(UINT64);
count = GetHalPrivateDispatchTableRoutineCount(&os_info);
ValidateTableDispatchRoutines(base, count, Modules, Routine);
return status;
}
STATIC
VOID
ValidateHalDispatchTable(_Out_ PVOID* Routine, _In_ PSYSTEM_MODULES Modules)
{
*Routine = NULL;
DEBUG_VERBOSE("Validating HalDispatchTable.");
/*
* Since windows exports all the function pointers inside the
* HalDispatchTable, we may aswell make use of them and validate it this
* way. While it definitely is ugly, it is the safest way to do it.
*
* What if there are 2 invalid routines? hmm.. tink.
*/
if (IsInstructionPointerInInvalidRegion(
HalQuerySystemInformation,
Modules)) {
*Routine = HalQuerySystemInformation;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalSetSystemInformation, Modules)) {
*Routine = HalSetSystemInformation;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalQueryBusSlots, Modules)) {
*Routine = HalQueryBusSlots;
goto end;
}
if (IsInstructionPointerInInvalidRegion(
HalReferenceHandlerForBus,
Modules)) {
*Routine = HalReferenceHandlerForBus;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalReferenceBusHandler, Modules)) {
*Routine = HalReferenceBusHandler;
goto end;
}
if (IsInstructionPointerInInvalidRegion(
HalDereferenceBusHandler,
Modules)) {
*Routine = HalDereferenceBusHandler;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalInitPnpDriver, Modules)) {
*Routine = HalInitPnpDriver;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalInitPowerManagement, Modules)) {
*Routine = HalInitPowerManagement;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalGetDmaAdapter, Modules)) {
*Routine = HalGetDmaAdapter;
goto end;
}
if (IsInstructionPointerInInvalidRegion(
HalGetInterruptTranslator,
Modules)) {
*Routine = HalGetInterruptTranslator;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalStartMirroring, Modules)) {
*Routine = HalStartMirroring;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalEndMirroring, Modules)) {
*Routine = HalEndMirroring;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalMirrorPhysicalMemory, Modules)) {
*Routine = HalMirrorPhysicalMemory;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalEndOfBoot, Modules)) {
*Routine = HalEndOfBoot;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalMirrorVerify, Modules)) {
*Routine = HalMirrorVerify;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalGetCachedAcpiTable, Modules)) {
*Routine = HalGetCachedAcpiTable;
goto end;
}
if (IsInstructionPointerInInvalidRegion(
HalSetPciErrorHandlerCallback,
Modules)) {
*Routine = HalSetPciErrorHandlerCallback;
goto end;
}
if (IsInstructionPointerInInvalidRegion(HalGetPrmCache, Modules)) {
*Routine = HalGetPrmCache;
goto end;
}
end:
return;
}
STATIC
VOID
ReportDataTableInvalidRoutine(_In_ TABLE_ID TableId, _In_ UINT64 Address)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PDATA_TABLE_ROUTINE_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(DATA_TABLE_ROUTINE_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
DEBUG_WARNING(
"Invalid data table routine found. Table: %lx, Address: %llx",
TableId,
Address);
INIT_REPORT_PACKET(report, REPORT_DATA_TABLE_ROUTINE, 0);
report->address = Address;
report->table_id = TableId;
report->index = 0;
IntCopyMemory(report->routine, Address, DATA_TABLE_ROUTINE_BUF_SIZE);
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
NTSTATUS
ValidateHalDispatchTables()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
SYSTEM_MODULES modules = {0};
PVOID routine1 = NULL;
PVOID routine2 = NULL;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed with status %x", status);
return status;
}
ValidateHalDispatchTable(&routine1, &modules);
if (routine1)
ReportDataTableInvalidRoutine(HalDispatch, routine1);
else
DEBUG_VERBOSE("HalDispatch dispatch routines are valid.");
status = ValidateHalPrivateDispatchTable(&routine2, &modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR(
"ValidateHalPrivateDispatchTable failed with status %x",
status);
goto end;
}
if (routine2)
ReportDataTableInvalidRoutine(HalPrivateDispatch, routine2);
else
DEBUG_VERBOSE("HalPrivateDispatch dispatch routines are valid.");
end:
if (modules.address)
ImpExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
NTSTATUS
GetDriverObjectByDriverName(
_In_ PUNICODE_STRING DriverName, _Out_ PDRIVER_OBJECT* DriverObject)
{
HANDLE handle = NULL;
OBJECT_ATTRIBUTES attributes = {0};
PVOID dir = {0};
UNICODE_STRING dir_name = {0};
NTSTATUS status = STATUS_UNSUCCESSFUL;
POBJECT_DIRECTORY dir_object = NULL;
POBJECT_DIRECTORY_ENTRY entry = NULL;
POBJECT_DIRECTORY_ENTRY sub_entry = NULL;
PDRIVER_OBJECT driver = NULL;
*DriverObject = NULL;
ImpRtlInitUnicodeString(&dir_name, L"\\Driver");
InitializeObjectAttributes(
&attributes,
&dir_name,
OBJ_CASE_INSENSITIVE,
NULL,
NULL);
status =
ImpZwOpenDirectoryObject(&handle, DIRECTORY_ALL_ACCESS, &attributes);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ZwOpenDirectoryObject failed with status %x", status);
return status;
}
status = ImpObReferenceObjectByHandle(
handle,
DIRECTORY_ALL_ACCESS,
NULL,
KernelMode,
&dir,
NULL);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ObReferenceObjectByHandle failed with status %x", status);
ImpZwClose(handle);
return status;
}
dir_object = (POBJECT_DIRECTORY)dir;
ImpExAcquirePushLockExclusiveEx(&dir_object->Lock, NULL);
for (UINT32 index = 0; index < NUMBER_HASH_BUCKETS; index++) {
entry = dir_object->HashBuckets[index];
if (!entry)
continue;
sub_entry = entry;
while (sub_entry) {
driver = GetObjectFromDirectory(sub_entry);
if (!RtlCompareUnicodeString(
DriverName,
&driver->DriverName,
FALSE)) {
*DriverObject = driver;
goto end;
}
sub_entry = GetNextObject(sub_entry);
}
}
end:
ImpExReleasePushLockExclusiveEx(&dir_object->Lock, 0);
ImpObDereferenceObject(dir);
ImpZwClose(handle);
return STATUS_SUCCESS;
}
PVOID
FindDriverBaseNoApi(_In_ PDRIVER_OBJECT DriverObject, _In_ PWCH Name)
{
PKLDR_DATA_TABLE_ENTRY first = NULL;
PKLDR_DATA_TABLE_ENTRY entry = NULL;
/* first entry contains invalid data, 2nd entry is the kernel */
first = (PKLDR_DATA_TABLE_ENTRY)DriverObject->DriverSection;
entry = ((PKLDR_DATA_TABLE_ENTRY)DriverObject->DriverSection)
->InLoadOrderLinks.Flink->Flink;
while (entry->InLoadOrderLinks.Flink != first) {
/* todo: write our own unicode string comparison function, since
* the entire point of this is to find exports with no exports.
*/
if (!wcscmp(entry->BaseDllName.Buffer, Name)) {
return entry->DllBase;
}
entry = entry->InLoadOrderLinks.Flink;
}
return NULL;
}
VOID
ValidateDispatchTableRoutines(_In_ PVOID* Table, _In_ UINT32 Entries)
{
}
PRTL_MODULE_EXTENDED_INFO
FindModuleByName(_In_ PSYSTEM_MODULES Modules, _In_ PCHAR ModuleName)
{
for (UINT32 index = 0; index < Modules->module_count; index++) {
PRTL_MODULE_EXTENDED_INFO entry =
&((PRTL_MODULE_EXTENDED_INFO)(Modules->address))[index];
if (IntFindSubstring(entry->FullPathName, ModuleName))
return entry;
}
return NULL;
}
#define KERNEL_LOW_ADDRESS 0xFFFF000000000000
#define KERNEL_HIGH_ADDRESS 0xFFFFFFFFFFFFFFFF
BOOLEAN
IsValidKernelAddress(_In_ UINT64 Address)
{
if (!(Address >= KERNEL_LOW_ADDRESS && Address <= KERNEL_HIGH_ADDRESS))
return FALSE;
if (!MmIsAddressValid(Address))
return FALSE;
return TRUE;
}
/*
* Follows a chain of valid pointers until a pointer is no longer present in the
* chain, and returns the final pointer. Assumes the argument "Start" contains a
* valid pointer at its address.
*
* The try catch here is also useless. We can work on making this more secure
* later.
*/
PVOID
FindChainedPointerEnding(_In_ PVOID* Start)
{
PVOID* current = *Start;
PVOID prev = Start;
while (IsValidKernelAddress(current)) {
__try {
prev = current;
current = *current;
}
__except (EXCEPTION_EXECUTE_HANDLER) {
return prev;
}
}
return prev;
}
#define WIN32KBASE_DXGKRNL_INTERFACE_FUNC_COUNT 98
// clang-format off
/*
* ffffa135`fa847828 fffff805`5c7ccf60
* ffffa135`fa847828 fffff805`5c7ccf60 dxgkrnl!DXG_GUEST_COMPOSITIONOBJECTCHANNEL::ChannelStarted
* ffffa135`fa847830 fffff805`5c7ccf60 dxgkrnl!DXG_GUEST_COMPOSITIONOBJECTCHANNEL::ChannelStarted
* ffffa135`fa847838 fffff805`5c7e4ca0 dxgkrnl!DxgkProcessCallout
* ffffa135`fa847840 fffff805`5c7b2580 dxgkrnl!DxgkNotifyProcessFreezeCallout
* ffffa135`fa847848 fffff805`5c7b2430 dxgkrnl!DxgkNotifyProcessThawCallout
* ffffa135`fa847850 fffff805`5c7daf30 dxgkrnl!DxgkOpenAdapter
* ffffa135`fa847858 fffff805`5c7ff6e0 dxgkrnl!DxgkEnumAdapters2Impl
* ffffa135`fa847860 fffff805`5c839f00 dxgkrnl!DxgkGetMaximumAdapterCount
* ffffa135`fa847868 fffff805`5c7e37c0 dxgkrnl!DxgkCloseAdapterImpl
* ffffa135`fa847870 fffff805`5c7b3970 dxgkrnl!DxgkDestroyDevice
* ffffa135`fa847878 fffff805`5c7c8370 dxgkrnl!DxgkEscape
* ffffa135`fa847880 fffff805`5c7c58d0 dxgkrnl!DxgkGetPresentHistoryInternal
* ffffa135`fa847888 fffff805`5c9569a0 dxgkrnl!DxgkReleaseProcessVidPnSourceOwners
* ffffa135`fa847890 fffff805`5c8f4de0 dxgkrnl!DxgkPollDisplayChildrenInternal
* ffffa135`fa847898 fffff805`5c837390 dxgkrnl!DxgkFlushPresentHistory
* ffffa135`fa8478a0 fffff805`5c802e00 dxgkrnl!DxgkGetPathsModality
* ffffa135`fa8478a8 fffff805`5c82e7c0 dxgkrnl!DxgkFunctionalizePathsModality
* ffffa135`fa8478b0 fffff805`5c82e6d0 dxgkrnl!DxgkApplyPathsModality
* ffffa135`fa8478b8 fffff805`5c819740 dxgkrnl!DxgkFinalizePathsModality
* ffffa135`fa8478c0 fffff805`5c7b01c0 dxgkrnl!DxgkPersistPathsModality
* ffffa135`fa8478c8 fffff805`5c839d80 dxgkrnl!DxgkFreePathsModality
* ffffa135`fa8478d0 fffff805`5c816870 dxgkrnl!DxgkAugmentCdsj
* ffffa135`fa8478d8 fffff805`5c821270 dxgkrnl!DxgkGetPresentHistoryReadyEvent
* ffffa135`fa8478e0 fffff805`5c806eb0 dxgkrnl!DxgkGetDisplayConfigBufferSizes
* ffffa135`fa8478e8 fffff805`5c8070e0 dxgkrnl!DxgkQueryDisplayConfig
* ffffa135`fa8478f0 fffff805`5c9677d0 dxgkrnl!DxgkHandleForceProjectionMonitor
* ffffa135`fa8478f8 fffff805`5c838f10 dxgkrnl!DxgkUpdateCddDevmodeExtraData
* ffffa135`fa847900 fffff805`5c967ca0 dxgkrnl!DxgkProcessDisplayCalloutBatch
* ffffa135`fa847908 fffff805`5c7f8880 dxgkrnl!DxgkDisplayConfigDeviceInfo
* ffffa135`fa847910 fffff805`5c7e11f0 dxgkrnl!DxgkGetAdapterDeviceDesc
* ffffa135`fa847918 fffff805`5c7e9200 dxgkrnl!DxgkGetMonitorInternalInfo
* ffffa135`fa847920 fffff805`5c82a4f0 dxgkrnl!DxgkBeginTopologyTransition
* ffffa135`fa847928 fffff805`5c829f50 dxgkrnl!DxgkCompleteTopologyTransition
* ffffa135`fa847930 fffff805`5c8f4130 dxgkrnl!DxgkNeedToEnableCddPrimary
* ffffa135`fa847938 fffff805`5c82a090 dxgkrnl!DxgkInvalidateMonitorConnections
* ffffa135`fa847940 fffff805`5c807340 dxgkrnl!DxgkWriteDiagEntry
* ffffa135`fa847948 fffff805`5c815800 dxgkrnl!DxgkGetAdapterDefaultScaling
* ffffa135`fa847950 fffff805`5c816240 dxgkrnl!DxgkConvertDisplayConfigCScalingToDdiScaling
* ffffa135`fa847958 fffff805`5c8397e0 dxgkrnl!DxgkGetGlobalRawmodeFlag
* ffffa135`fa847960 fffff805`5c967e70 dxgkrnl!DxgkSetGlobalRawmodeFlag
* ffffa135`fa847968 fffff805`5c839530 dxgkrnl!DxgkQueryModeListCacheLuid
* ffffa135`fa847970 fffff805`5c826ff0 dxgkrnl!DxgkThreadCallout
* ffffa135`fa847978 fffff805`5c829c40 dxgkrnl!DxgkSessionConnected
* ffffa135`fa847980 fffff805`5c829a60 dxgkrnl!DxgkPreSessionDisconnected
* ffffa135`fa847988 fffff805`5c829b90 dxgkrnl!DxgkSessionDisconnected
* ffffa135`fa847990 fffff805`5c844420 dxgkrnl!DxgkSessionReconnected
* ffffa135`fa847998 fffff805`5c8440f0 dxgkrnl!DxgkGetAdapter
* ffffa135`fa8479a0 fffff805`5c844290 dxgkrnl!DxgkReleaseAdapter
* ffffa135`fa8479a8 fffff805`5c82c200 dxgkrnl!DxgkDesktopSwitch
* ffffa135`fa8479b0 fffff805`5c811860 dxgkrnl!DxgkStatusChangeNotify
* ffffa135`fa8479b8 fffff805`5c928fd0 dxgkrnl!DxgkEnableUnorderedWaitsForDevice
* ffffa135`fa8479c0 fffff805`5c839670 dxgkrnl!DxgkCddVerifyCddDevMode
* ffffa135`fa8479c8 fffff805`5c93bf30 dxgkrnl!DxgkIsVidPnSourceOwnerDwm
* ffffa135`fa8479d0 fffff805`5c8377a0 dxgkrnl!DxgkIsVidPnSourceOwnerExclusive
* ffffa135`fa8479d8 fffff805`5c7f8720 dxgkrnl!DxgkGetMonitorDeviceObject
* ffffa135`fa8479e0 fffff805`5c831680 dxgkrnl!DxgkRegisterDwmProcess
* ffffa135`fa8479e8 fffff805`5c8fa0a0 dxgkrnl!DxgkGetSharedResourceAdapterLuid
* ffffa135`fa8479f0 fffff805`5c8e7590 dxgkrnl!DxgkNotifyMonitorDimming
* ffffa135`fa8479f8 fffff805`5c820d10 dxgkrnl!DxgkGetSharedAllocationObjectType
* ffffa135`fa847a00 fffff805`5c820d20 dxgkrnl!DxgkGetSharedSyncObjectType
* ffffa135`fa847a08 fffff805`5c83b1b0 dxgkrnl!DxgkGetDisplayManagerObjectType
* ffffa135`fa847a10 fffff805`5c93be10 dxgkrnl!DxgkGetProcessInterferenceCount
* ffffa135`fa847a18 fffff805`5c839cd0 dxgkrnl!DxgkGetGpuUsageStatistics
* ffffa135`fa847a20 fffff805`5c815320 dxgkrnl!DxgkUpdateGdiInfo
* ffffa135`fa847a28 fffff805`5c8393d0 dxgkrnl!DxgkSetPresenterViewMode
* ffffa135`fa847a30 fffff805`5c836930 dxgkrnl!DxgkGetPresenterViewMode
* ffffa135`fa847a38 fffff805`5c827820 dxgkrnl!DxgkSetProcessStatus
* ffffa135`fa847a40 fffff805`5c7fa180 dxgkrnl!DxgkConvertLegacyQDCAdapterAndIdToActual
* ffffa135`fa847a48 fffff805`5c81b510 dxgkrnl!DxgkDisplayOnOff
* ffffa135`fa847a50 fffff805`5c815c30 dxgkrnl!DxgkIsVirtualizationDisabledForTarget
* ffffa135`fa847a58 fffff805`5c8378f0 dxgkrnl!DxgkIsSourceInHardwareClone
* ffffa135`fa847a60 fffff805`5c96d7d0 dxgkrnl!DxgkProcessLockScreen
* ffffa135`fa847a68 fffff805`5c964bd0 dxgkrnl!DxgkCopyPathsModality
* ffffa135`fa847a70 fffff805`5c964b30 dxgkrnl!DxgkApplyCdsjToPathsModality
* ffffa135`fa847a78 fffff805`5c979410 dxgkrnl!DxgkUpdateDpiInfoForNewOverride
* ffffa135`fa847a80 fffff805`5c839a00 dxgkrnl!DxgkInitializeDpi
* ffffa135`fa847a88 fffff805`5c839930 dxgkrnl!DxgkGetDpiOverrideForSource
* ffffa135`fa847a90 fffff805`5c980420 dxgkrnl!DxgkGetLegacyDpiInfo
* ffffa135`fa847a98 fffff805`5c94e0e0 dxgkrnl!DxgkWin32kSetPointerPosition
* ffffa135`fa847aa0 fffff805`5c94e240 dxgkrnl!DxgkWin32kSetPointerShape
* ffffa135`fa847aa8 fffff805`5c844730 dxgkrnl!DxgkGetUseHWGPUInRemoteSession
* ffffa135`fa847ab0 fffff805`5c945520 dxgkrnl!DxgkLPMDisplayControl
* ffffa135`fa847ab8 fffff805`5c945470 dxgkrnl!DxgkEnableHighPrecisionBrightness
* ffffa135`fa847ac0 fffff805`5c945640 dxgkrnl!DxgkSetHighPrecisionBrightness
* ffffa135`fa847ac8 fffff805`5c844670 dxgkrnl!DxgkChangeD3RequestsState
* ffffa135`fa847ad0 fffff805`5c836b90 dxgkrnl!DxgkGetMonitorEdid
* ffffa135`fa847ad8 fffff805`5c967620 dxgkrnl!DxgkConvertPathsModalityToDisplayConfig
* ffffa135`fa847ae0 fffff805`5c815d40 dxgkrnl!DxgkConvertDisplayConfigToDevMode
* ffffa135`fa847ae8 fffff805`5c7febd0 dxgkrnl!DxgkDDisplayEnumInternal
* ffffa135`fa847af0 fffff805`5c9677a0 dxgkrnl!DxgkGetMonitorDisplayId
* ffffa135`fa847af8 fffff805`5c964c60 dxgkrnl!DxgkEnumerateModesForPathsModality
* ffffa135`fa847b00 fffff805`5c8f0e70 dxgkrnl!DxgCreateLiveDumpWithWdLogs
* ffffa135`fa847b08 fffff805`5c9818d0 dxgkrnl!DxgkDispMgrReferenceObjectByHandle
* ffffa135`fa847b10 fffff805`5c9818b0 dxgkrnl!DxgkDispMgrIsTargetOwned
* ffffa135`fa847b18 fffff805`5c98bb20 dxgkrnl!DxgkCheckDisplayState
* ffffa135`fa847b20 fffff805`5c8363c0 dxgkrnl!DxgkSetKernelDisplayPolicy
* ffffa135`fa847b28 fffff805`5c839720 dxgkrnl!DxgkSendDisplayBrokerMessage
* ffffa135`fa847b30 fffff805`5c96fb30 dxgkrnl!DxgkGetWddmRemoteSessionGdiViewRange
*/
// clang-format on
STATIC
VOID
ReportWin32kBase_DxgInterfaceViolation(
_In_ UINT32 TableIndex, _In_ UINT64 Address)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
UINT32 len = 0;
PDATA_TABLE_ROUTINE_REPORT report = NULL;
len = CryptRequestRequiredBufferLength(sizeof(DATA_TABLE_ROUTINE_REPORT));
report = ImpExAllocatePool2(POOL_FLAG_NON_PAGED, len, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_DATA_TABLE_ROUTINE, 0);
report->address = Address;
report->table_id = Win32kBase_gDxgInterface;
report->index = TableIndex;
// todo! report->routine = ??
// todo: maybe get routine by name from index ?
status = CryptEncryptBuffer(report, len);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, len);
}
STATIC
NTSTATUS
ValidateWin32kBase_gDxgInterface()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
SYSTEM_MODULES modules = {0};
PRTL_MODULE_EXTENDED_INFO win32kbase = NULL;
PRTL_MODULE_EXTENDED_INFO dxgkrnl = NULL;
KAPC_STATE apc = {0};
PKPROCESS winlogon = NULL;
PVOID* dxg_interface = NULL;
PVOID entry = NULL;
status = GetSystemModuleInformation(&modules);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("GetSystemModuleInformation failed %x", status);
return status;
}
win32kbase = FindModuleByName(&modules, "win32kbase.sys");
if (!win32kbase) {
status = STATUS_UNSUCCESSFUL;
goto end;
}
RtlHashmapEnumerate(GetProcessHashmap(), FindWinLogonProcess, &winlogon);
if (!winlogon) {
status = STATUS_UNSUCCESSFUL;
goto end;
}
KeStackAttachProcess(winlogon, &apc);
dxg_interface = PeFindExportByName(win32kbase->ImageBase, "gDxgkInterface");
if (!dxg_interface) {
status = STATUS_UNSUCCESSFUL;
goto detatch;
}
/* The functions in this table reside in dxgkrnl.sys */
dxgkrnl = FindModuleByName(&modules, "dxgkrnl.sys");
if (!dxgkrnl) {
status = STATUS_UNSUCCESSFUL;
goto detatch;
}
/* first 3 qwords are housekeeping. */
for (UINT32 index = 3; index < WIN32KBASE_DXGKRNL_INTERFACE_FUNC_COUNT + 3;
index++) {
if (!dxg_interface[index])
continue;
entry = FindChainedPointerEnding(dxg_interface[index]);
#if DEBUG
DEBUG_INFO("chain entry test: %p", entry);
DEBUG_INFO("regular entry: %p", dxg_interface[index]);
#endif
if (!IsInstructionPointerInsideSpecifiedModule(entry, dxgkrnl)) {
DEBUG_ERROR("invalid entry!!!");
ReportWin32kBase_DxgInterfaceViolation(index, entry);
}
}
detatch:
KeUnstackDetachProcess(&apc);
end:
if (modules.address)
ExFreePoolWithTag(modules.address, SYSTEM_MODULES_POOL);
return status;
}
/* todo: win32kEngInterface */
NTSTATUS
ValidateWin32kDispatchTables()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
status = ValidateWin32kBase_gDxgInterface();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateWin32kBase_gDxgInterface: %x", status);
return status;
}
return status;
}
================================================
FILE: driver/modules.h
================================================
#ifndef MODULES_H
#define MODULES_H
#include
#include
#include "common.h"
typedef struct _APC_OPERATION_ID {
int operation_id;
} APC_OPERATION_ID, *PAPC_OPERATION_ID;
/* system modules information */
typedef struct _SYSTEM_MODULES {
PVOID address;
INT module_count;
} SYSTEM_MODULES, *PSYSTEM_MODULES;
#define APC_CONTEXT_ID_STACKWALK 0x1
#define APC_CONTEXT_ID_STARTADDRESS 0x2
typedef struct _APC_CONTEXT_HEADER {
LONG context_id;
volatile INT count;
volatile INT allocation_in_progress;
} APC_CONTEXT_HEADER, *PAPC_CONTEXT_HEADER;
typedef struct _APC_STACKWALK_CONTEXT {
APC_CONTEXT_HEADER header;
PSYSTEM_MODULES modules;
} APC_STACKWALK_CONTEXT, *PAPC_STACKWALK_CONTEXT;
NTSTATUS
GetSystemModuleInformation(_Out_ PSYSTEM_MODULES ModuleInformation);
NTSTATUS
HandleValidateDriversIOCTL();
PRTL_MODULE_EXTENDED_INFO
FindSystemModuleByName(_In_ LPCSTR ModuleName,
_In_ PSYSTEM_MODULES SystemModules);
NTSTATUS
HandleNmiIOCTL();
NTSTATUS
ValidateThreadsViaKernelApc();
VOID
FreeApcStackwalkApcContextInformation(_Inout_ PAPC_STACKWALK_CONTEXT Context);
BOOLEAN
IsInstructionPointerInInvalidRegion(_In_ UINT64 Rip,
_In_ PSYSTEM_MODULES SystemModules);
PVOID
FindDriverBaseNoApi(_In_ PDRIVER_OBJECT DriverObject, _In_ PWCH Name);
NTSTATUS
DispatchStackwalkToEachCpuViaDpc();
NTSTATUS
ValidateHalDispatchTables();
PVOID
FindDriverBaseNoApi(_In_ PDRIVER_OBJECT DriverObject, _In_ PWCH Name);
NTSTATUS
GetDriverObjectByDriverName(_In_ PUNICODE_STRING DriverName,
_Out_ PDRIVER_OBJECT* DriverObject);
NTSTATUS
ValidateWin32kDispatchTables();
#endif
================================================
FILE: driver/pe.c
================================================
#include "pe.h"
#include "lib/stdlib.h"
PNT_HEADER_64
PeGetNtHeaderSafe(_In_ PVOID Image)
{
PIMAGE_DOS_HEADER dos = (PIMAGE_DOS_HEADER)Image;
if (!MmIsAddressValid(Image))
return NULL;
if (dos->e_magic != IMAGE_DOS_SIGNATURE)
return NULL;
return RVA(PNT_HEADER_64, Image, dos->e_lfanew);
}
PNT_HEADER_64
PeGetNtHeader(_In_ PVOID Image)
{
PIMAGE_DOS_HEADER dos = (PIMAGE_DOS_HEADER)Image;
if (dos->e_magic != IMAGE_DOS_SIGNATURE)
return NULL;
return RVA(PNT_HEADER_64, Image, dos->e_lfanew);
}
PIMAGE_DATA_DIRECTORY
PeGetExportDataDirectory(_In_ PVOID Image)
{
PNT_HEADER_64 nt = PeGetNtHeader(Image);
if (IMAGE_DIRECTORY_ENTRY_EXPORT >= nt->OptionalHeader.NumberOfRvaAndSizes)
return NULL;
return (PIMAGE_DATA_DIRECTORY)&nt->OptionalHeader
.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
}
PIMAGE_DATA_DIRECTORY
PeGetExportDataDirectorySafe(_In_ PVOID Image)
{
PNT_HEADER_64 nt = PeGetNtHeader(Image);
if (!MmIsAddressValid(Image))
return NULL;
if (IMAGE_DIRECTORY_ENTRY_EXPORT >= nt->OptionalHeader.NumberOfRvaAndSizes)
return NULL;
return (PIMAGE_DATA_DIRECTORY)&nt->OptionalHeader
.DataDirectory[IMAGE_DIRECTORY_ENTRY_EXPORT];
}
PIMAGE_EXPORT_DIRECTORY
PeGetExportDirectory(
_In_ PVOID Image, _In_ PIMAGE_DATA_DIRECTORY ExportDataDirectory)
{
if (!ExportDataDirectory->VirtualAddress || !ExportDataDirectory->Size)
return NULL;
return RVA(
PIMAGE_EXPORT_DIRECTORY,
Image,
ExportDataDirectory->VirtualAddress);
}
PIMAGE_EXPORT_DIRECTORY
PeGetExportDirectorySafe(
_In_ PVOID Image, _In_ PIMAGE_DATA_DIRECTORY ExportDataDirectory)
{
if (!MmIsAddressValid(Image))
return NULL;
if (!ExportDataDirectory->VirtualAddress || !ExportDataDirectory->Size)
return NULL;
return RVA(
PIMAGE_EXPORT_DIRECTORY,
Image,
ExportDataDirectory->VirtualAddress);
}
UINT32
GetSectionCount(_In_ PNT_HEADER_64 Header)
{
return Header->FileHeader.NumberOfSections;
}
UINT32
GetSectionCountSafe(_In_ PNT_HEADER_64 Header)
{
if (!MmIsAddressValid(Header))
return NULL;
return Header->FileHeader.NumberOfSections;
}
PVOID
PeFindExportByName(_In_ PVOID Image, _In_ PCHAR Name)
{
ANSI_STRING target = {0};
PNT_HEADER_64 nt = NULL;
PIMAGE_DATA_DIRECTORY data = NULL;
PIMAGE_EXPORT_DIRECTORY export = NULL;
RtlInitAnsiString(&target, Name);
nt = PeGetNtHeader(Image);
if (!nt)
return NULL;
data = PeGetExportDataDirectory(Image);
if (!data)
return NULL;
export = PeGetExportDirectory(Image, data);
if (!export)
return NULL;
PUINT32 functions = RVA(PUINT32, Image, export->AddressOfFunctions);
PUINT32 names = RVA(PUINT32, Image, export->AddressOfNames);
PUINT16 ordinals = RVA(PUINT16, Image, export->AddressOfNameOrdinals);
for (UINT32 index = 0; index < export->NumberOfNames; index++) {
PCHAR export = RVA(PCHAR, Image, names[index]);
if (!IntCompareString(Name, export))
return RVA(PVOID, Image, functions[ordinals[index]]);
}
return NULL;
}
================================================
FILE: driver/pe.h
================================================
#ifndef PE_H
#define PE_H
#include "common.h"
#define IMAGE_DOS_SIGNATURE 0x5a4d /* MZ */
#define IMAGE_NT_SIGNATURE 0x00004550 /* PE00 */
PVOID
PeFindExportByName(_In_ PVOID Image, _In_ PCHAR Name);
PNT_HEADER_64
PeGetNtHeader(_In_ PVOID Image);
PIMAGE_DATA_DIRECTORY
PeGetExportDataDirectory(_In_ PVOID Image);
PIMAGE_EXPORT_DIRECTORY
PeGetExportDirectory(_In_ PVOID Image,
_In_ PIMAGE_DATA_DIRECTORY ExportDataDirectory);
UINT32
GetSectionCount(_In_ PNT_HEADER_64 Header);
PIMAGE_EXPORT_DIRECTORY
PeGetExportDirectorySafe(_In_ PVOID Image,
_In_ PIMAGE_DATA_DIRECTORY ExportDataDirectory);
PIMAGE_DATA_DIRECTORY
PeGetExportDataDirectorySafe(_In_ PVOID Image);
PNT_HEADER_64
PeGetNtHeaderSafe(_In_ PVOID Image);
UINT32
GetSectionCountSafe(_In_ PNT_HEADER_64 Header);
#endif
================================================
FILE: driver/pool.c
================================================
#include "pool.h"
#include "callbacks.h"
#include "crypt.h"
#include "ia32.h"
#include "imports.h"
#include "lib/stdlib.h"
#include
#define PML4_ENTRY_COUNT 512
#define PDPT_ENTRY_COUNT 512
#define PD_ENTRY_COUNT 512
#define PT_ENTRY_COUNT 512
#define LARGE_PAGE_2MB_ENTRIES 0x200
#define LARGE_PAGE_1GB_ENTRIES 0x40000
#define IS_VALID_PAGE(pt, idx) ((pt)[(index)].Present)
#define IS_LARGE_PAGE(pt) ((pt).LargePage)
STATIC PVOID local_page_copy_buf = NULL;
/*
* Using MmGetPhysicalMemoryRangesEx2(), we can get a block of structures that
* describe the physical memory layout. With each physical page base we are
* going to enumerate, we want to make sure it lies within an appropriate region
* of physical memory, so this function is to check for exactly that.
*/
STATIC
BOOLEAN
PoolpIsAddressInPhysicalRange(
_In_ UINT64 PhysicalAddress,
_In_ PPHYSICAL_MEMORY_RANGE PhysicalMemoryRanges)
{
ULONG index = 0;
UINT64 start = 0;
UINT64 end = 0;
while (PhysicalMemoryRanges[index].NumberOfBytes.QuadPart) {
start = PhysicalMemoryRanges[index].BaseAddress.QuadPart;
end = start + PhysicalMemoryRanges[index].NumberOfBytes.QuadPart;
if (PhysicalAddress >= start && PhysicalAddress <= end)
return TRUE;
index++;
}
return FALSE;
}
STATIC
BOOLEAN
PoolpScanLargePage(
_In_ UINT64 PageBase,
_In_ UINT32 PageSize,
_In_ PAGE_CALLBACK Callback,
_In_opt_ PVOID Context)
{
UINT64 page = 0;
BOOLEAN stop = FALSE;
if (!PageBase || !ImpMmIsAddressValid(PageBase))
return FALSE;
for (UINT32 page_index = 0; page_index < PageSize; page_index++) {
page = PageBase + (page_index * PAGE_SIZE);
if (Callback(page, PAGE_SIZE, Context))
return TRUE;
}
return FALSE;
}
STATIC
BOOLEAN
PoolpScanPageTable(
_In_ PTE_64 Pte, _In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context)
{
UINT64 page = 0;
PHYSICAL_ADDRESS pa = {0};
NTSTATUS status = STATUS_UNSUCCESSFUL;
MM_COPY_ADDRESS addr = {0};
UINT32 bytes = 0;
pa.QuadPart = Pte.PageFrameNumber << PAGE_4KB_SHIFT;
page = MmGetVirtualForPhysical(pa);
if (!page || !ImpMmIsAddressValid(page)) {
addr.PhysicalAddress = pa;
status = MmCopyMemory(
local_page_copy_buf,
addr,
PAGE_SIZE,
MM_COPY_MEMORY_PHYSICAL,
&bytes);
if (!NT_SUCCESS(status))
return FALSE;
DEBUG_VERBOSE(
"valid mm page: %llx, pa: %llx, copied: %lx",
local_page_copy_buf,
pa.QuadPart,
bytes);
return Callback(local_page_copy_buf, PAGE_SIZE, Context);
}
return Callback(page, PAGE_SIZE, Context);
}
STATIC
BOOLEAN
PoolpScanPageDirectory(
_In_ PDE_64 Pde, _In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context)
{
PTE_64* pt = NULL;
PDE_2MB_64 pdel = {0};
PHYSICAL_ADDRESS pa = {0};
if (IS_LARGE_PAGE(Pde)) {
pdel.AsUInt = Pde.AsUInt;
pa.QuadPart = pdel.PageFrameNumber << PAGE_2MB_SHIFT;
return PoolpScanLargePage(
ImpMmGetVirtualForPhysical(pa),
LARGE_PAGE_2MB_ENTRIES,
Callback,
Context);
}
pa.QuadPart = Pde.PageFrameNumber << PAGE_4KB_SHIFT;
pt = ImpMmGetVirtualForPhysical(pa);
if (!pt || !ImpMmIsAddressValid(pt))
return FALSE;
for (UINT32 index = 0; index < PT_ENTRY_COUNT; index++) {
if (!IS_VALID_PAGE(pt, index))
continue;
// DEBUG_VERBOSE(
// "------> pt va: %llx, pte: %llx, index: %lx",
// pt,
// pt[index],
// index);
if (PoolpScanPageTable(pt[index], Callback, Context))
return TRUE;
}
return FALSE;
}
STATIC
BOOLEAN
PoolpScanPageDirectoryPointerTable(
_In_ PDPTE_64 Pdpte, _In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context)
{
PDE_64* pd = NULL;
PDPTE_1GB_64 pdptel = {0};
PHYSICAL_ADDRESS pa = {0};
if (IS_LARGE_PAGE(Pdpte)) {
pdptel.AsUInt = Pdpte.AsUInt;
pa.QuadPart = pdptel.PageFrameNumber << PAGE_1GB_SHIFT;
return PoolpScanLargePage(
ImpMmGetVirtualForPhysical(pa),
LARGE_PAGE_1GB_ENTRIES,
Callback,
Context);
}
pa.QuadPart = Pdpte.PageFrameNumber << PAGE_4KB_SHIFT;
pd = ImpMmGetVirtualForPhysical(pa);
if (!pd || !ImpMmIsAddressValid(pd))
return FALSE;
for (UINT32 index = 0; index < PD_ENTRY_COUNT; index++) {
if (!IS_VALID_PAGE(pd, index))
continue;
// DEBUG_VERBOSE(
// "----> pd va: %llx, pde: %llx, index: %lx",
// pd,
// pd[index],
// index);
if (PoolpScanPageDirectory(pd[index], Callback, Context))
return TRUE;
}
return FALSE;
}
STATIC
BOOLEAN
PoolpScanPageMapLevel4(
_In_ PML4E_64 Pml4e, _In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context)
{
BOOLEAN stop = FALSE;
PDPTE_64* pdpt = NULL;
PHYSICAL_ADDRESS pa = {0};
pa.QuadPart = Pml4e.PageFrameNumber << PAGE_4KB_SHIFT;
pdpt = ImpMmGetVirtualForPhysical(pa);
if (!pdpt || !ImpMmIsAddressValid(pdpt))
return FALSE;
for (UINT32 index = 0; index < PDPT_ENTRY_COUNT; index++) {
if (!IS_VALID_PAGE(pdpt, index))
continue;
// DEBUG_VERBOSE(
// "--> pdpt va: %llx, pdpte: %llx, index: %lx",
// pdpt,
// pdpt[index],
// index);
if (PoolpScanPageDirectoryPointerTable(pdpt[index], Callback, Context))
return TRUE;
}
return FALSE;
}
NTSTATUS
PoolScanSystemSpace(_In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context)
{
NT_ASSERT(Callback != NULL);
CR3 cr3 = {0};
PML4E_64* pml4 = NULL;
PHYSICAL_ADDRESS pa = {0};
if (!Callback)
return STATUS_INVALID_PARAMETER;
cr3.AsUInt = __readcr3();
pa.QuadPart = cr3.AddressOfPageDirectory << PAGE_4KB_SHIFT;
pml4 = ImpMmGetVirtualForPhysical(pa);
// DEBUG_VERBOSE("system cr3: %llx", cr3.AsUInt);
if (!pml4 || !ImpMmIsAddressValid(pml4))
return STATUS_UNSUCCESSFUL;
for (UINT32 index = 490; index < PML4_ENTRY_COUNT; index++) {
if (!IS_VALID_PAGE(pml4, index))
continue;
// DEBUG_VERBOSE(
// "pml4 va: %llx, pml4e: %llx, index: %lx",
// pml4,
// pml4[index],
// index);
if (PoolpScanPageMapLevel4(pml4[index], Callback, Context))
break;
}
return STATUS_SUCCESS;
}
/* Credits to Samuel Tulach c:
* https://tulach.cc/detecting-manually-mapped-drivers/ */
// #36aae000 4d 5a 90
#define GADGET_BYTE_ONE 0x4D // 0xFF
#define GADGET_BYTE_TWO 0x5A // 0x25
#define GADGET_BYTE_THREE 0x90 // 0x25
STATIC
BOOLEAN
PoolScanForManualMappedDriverCallback(
_In_ UINT64 Page, _In_ UINT32 PageSize, _In_opt_ PVOID Context)
{
PCHAR byte = (PCHAR)Page;
// DEBUG_VERBOSE("--------> page: %llx", Page);
for (UINT32 index = 0; index < PageSize - 1; index++) {
if (byte[index] == GADGET_BYTE_ONE &&
byte[index + 1] == GADGET_BYTE_TWO &&
byte[index + 2] == GADGET_BYTE_THREE) {
DEBUG_VERBOSE("FOUND!");
}
}
return FALSE;
}
NTSTATUS
PoolScanForManualMappedDrivers()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
DEBUG_VERBOSE("scanning for gadget");
local_page_copy_buf =
ImpExAllocatePool2(POOL_FLAG_NON_PAGED, PAGE_SIZE, POOL_TAG_INTEGRITY);
PoolScanSystemSpace(PoolScanForManualMappedDriverCallback, NULL);
DEBUG_VERBOSE("fnished scanning");
ImpExFreePoolWithTag(local_page_copy_buf, POOL_TAG_INTEGRITY);
}
================================================
FILE: driver/pool.h
================================================
#ifndef POOL_H
#define POOL_H
#include
#include "common.h"
typedef BOOLEAN (*PAGE_CALLBACK)(_In_ UINT64 Page, _In_ UINT32 PageSize, _In_opt_ PVOID Context);
NTSTATUS
PoolScanSystemSpace(_In_ PAGE_CALLBACK Callback, _In_opt_ PVOID Context);
NTSTATUS
PoolScanForManualMappedDrivers();
#endif
================================================
FILE: driver/session.c
================================================
#include "session.h"
#include "crypt.h"
#include "imports.h"
#include "util.h"
#include "lib/stdlib.h"
NTSTATUS
SessionInitialiseStructure()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PACTIVE_SESSION session = GetActiveSession();
KeInitializeGuardedMutex(&session->lock);
status = CryptInitialiseProvider();
if (!NT_SUCCESS(status))
DEBUG_ERROR("CryptInitialiseProvider: %x", status);
return status;
}
VOID
SessionInitialiseCallbackConfiguration()
{
InitialiseObCallbacksConfiguration(GetActiveSession());
}
VOID
SessionIsActive(_Out_ PBOOLEAN Flag)
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
*Flag = GetActiveSession()->is_session_active;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
VOID
SessionGetProcess(_Out_ PEPROCESS* Process)
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
*Process = GetActiveSession()->process;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
VOID
SessionGetProcessId(_Out_ PLONG ProcessId)
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
*ProcessId = GetActiveSession()->km_handle;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
VOID
SessionGetCallbackConfiguration(
_Out_ POB_CALLBACKS_CONFIG* CallbackConfiguration)
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
*CallbackConfiguration = &GetActiveSession()->callback_configuration;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
STATIC
VOID
SessionTerminateHeartbeat(_In_ PHEARTBEAT_CONFIGURATION Configuration)
{
FreeHeartbeatConfiguration(Configuration);
}
VOID
SessionTerminate()
{
DEBUG_INFO("Termination active session.");
PACTIVE_SESSION session = GetActiveSession();
KIRQL irql = {0};
KeAcquireGuardedMutex(&session->lock);
session->km_handle = NULL;
session->um_handle = NULL;
session->process = NULL;
session->is_session_active = FALSE;
RtlZeroMemory(&session->module, sizeof(MODULE_INFORMATION));
SessionTerminateHeartbeat(&session->heartbeat_config);
CryptCloseSessionCryptObjects();
KeReleaseGuardedMutex(&session->lock);
}
/* Return type for this doesnt matter */
STATIC
BOOLEAN
HashOurUserModuleOnEntryCallback(
_In_ PPROCESS_MAP_MODULE_ENTRY Entry, _In_opt_ PVOID Context)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PACTIVE_SESSION session = (PACTIVE_SESSION)Context;
if (!ARGUMENT_PRESENT(Context))
return FALSE;
status = HashUserModule(
Entry,
session->module.module_hash,
sizeof(session->module.module_hash));
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("HashUserModule: %lx", status);
return FALSE;
}
DEBUG_VERBOSE("User module hashed!");
DumpBufferToKernelDebugger(
session->module.module_hash,
sizeof(session->module.module_hash));
return TRUE;
}
NTSTATUS
SessionInitialise(_In_ PIRP Irp)
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
PEPROCESS process = NULL;
PSESSION_INITIATION_PACKET initiation = NULL;
PACTIVE_SESSION session = GetActiveSession();
KIRQL irql = {0};
DEBUG_VERBOSE("Initialising new session.");
status = ValidateIrpInputBuffer(
Irp,
sizeof(SESSION_INITIATION_PACKET) - SHA_256_HASH_LENGTH);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("ValidateIrpInputBuffer failed with status %x", status);
return status;
}
initiation = (PSESSION_INITIATION_PACKET)Irp->AssociatedIrp.SystemBuffer;
KeAcquireGuardedMutex(&session->lock);
session->um_handle = initiation->process_id;
/* What if we pass an invalid handle here? not good. */
status = ImpPsLookupProcessByProcessId(session->um_handle, &process);
if (!NT_SUCCESS(status)) {
status = STATUS_INVALID_PARAMETER;
goto end;
}
session->km_handle = ImpPsGetProcessId(process);
session->process = process;
session->cookie = initiation->cookie;
IntCopyMemory(session->aes_key, initiation->aes_key, AES_256_KEY_SIZE);
IntCopyMemory(session->iv, initiation->aes_iv, AES_256_IV_SIZE);
session->module.base_address = initiation->module_info.base_address;
session->module.size = initiation->module_info.size;
IntCopyMemory(
session->module.path,
initiation->module_info.path,
MAX_MODULE_PATH);
DEBUG_VERBOSE("Module base: %llx", session->module.base_address);
DEBUG_VERBOSE("Module size: %lx ", session->module.size);
DEBUG_VERBOSE("Module path: %s", session->module.path);
status = CryptInitialiseSessionCryptObjects();
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptInitialiseSessionCryptObjects: %x", status);
goto end;
}
status = InitialiseHeartbeatConfiguration(&session->heartbeat_config);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("InitialiseHeartbeatConfiguration %x", status);
goto end;
}
FindOurUserModeModuleEntry(HashOurUserModuleOnEntryCallback, session);
session->is_session_active = TRUE;
end:
KeReleaseGuardedMutex(&session->lock);
return status;
}
VOID
SessionTerminateProcess()
{
NTSTATUS status = STATUS_UNSUCCESSFUL;
ULONG process_id = 0;
SessionGetProcessId(&process_id);
if (!process_id) {
DEBUG_ERROR("Failed to terminate process as process id is null");
return;
}
/* Make sure we pass a km handle to ZwTerminateProcess and NOT a
* usermode handle. */
status = ZwTerminateProcess(
process_id,
STATUS_SYSTEM_INTEGRITY_POLICY_VIOLATION);
if (!NT_SUCCESS(status)) {
/*
* We don't want to clear the process config if
* ZwTerminateProcess fails so we can try again.
*/
DEBUG_ERROR("ZwTerminateProcess failed with status %x", status);
return;
}
/* this wont be needed when procloadstuff is implemented */
SessionTerminate();
}
VOID
SessionIncrementIrpsProcessedCount()
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
GetActiveSession()->irps_received;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
VOID
SessionIncrementReportCount()
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
GetActiveSession()->report_count++;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
VOID
SessionIncrementHeartbeatCount()
{
KeAcquireGuardedMutex(&GetActiveSession()->lock);
GetActiveSession()->heartbeat_count++;
KeReleaseGuardedMutex(&GetActiveSession()->lock);
}
================================================
FILE: driver/session.h
================================================
#ifndef SESSION_H
#define SESSION_H
#include "common.h"
#include "driver.h"
NTSTATUS
SessionInitialiseStructure();
VOID
SessionInitialiseCallbackConfiguration();
VOID
SessionIsActive(_Out_ PBOOLEAN Flag);
VOID
SessionGetProcess(_Out_ PEPROCESS* Process);
VOID
SessionGetProcessId(_Out_ PLONG ProcessId);
VOID
SessionGetCallbackConfiguration(
_Out_ POB_CALLBACKS_CONFIG* CallbackConfiguration);
VOID
SessionTerminate();
NTSTATUS
SessionInitialise(_In_ PIRP Irp);
VOID
SessionTerminateProcess();
VOID
SessionIncrementIrpsProcessedCount();
VOID
SessionIncrementReportCount();
VOID
SessionIncrementHeartbeatCount();
#endif
================================================
FILE: driver/thread.c
================================================
#include "thread.h"
#include
#include "callbacks.h"
#include "driver.h"
#include "pool.h"
#include "containers/tree.h"
#include "crypt.h"
#include "imports.h"
#include "session.h"
#include "lib/stdlib.h"
#ifdef ALLOC_PRAGMA
# pragma alloc_text(PAGE, DetectThreadsAttachedToProtectedProcess)
# pragma alloc_text(PAGE, DoesThreadHaveValidCidEntry)
#endif
BOOLEAN
DoesThreadHaveValidCidEntry(_In_ PETHREAD Thread)
{
PAGED_CODE();
NTSTATUS status = STATUS_UNSUCCESSFUL;
HANDLE thread_id = NULL;
PETHREAD thread = NULL;
/*
* PsGetThreadId simply returns ETHREAD->Cid.UniqueThread
*/
thread_id = ImpPsGetThreadId(Thread);
/*
* For each core on the processor, the first x threads equal to x cores
* will be assigned a cid equal to its equivalent core. These threads
* are generally executing the HLT instruction or some other boring
* stuff while the processor is not busy. The reason this is important
* is because passing in a handle value of 0 which, even though is a
* valid cid, returns a non success status meaning we mark it an invalid
* cid entry even though it is. To combat this we simply add a little
* check here. The problem is this can be easily bypassed by simply
* modifying the ETHREAD->Cid.UniqueThread identifier.. So while it isnt
* a perfect detection method for now it's good enough.
*/
if ((UINT64)thread_id < (UINT64)ImpKeQueryActiveProcessorCount(NULL))
return TRUE;
/*
* PsLookupThreadByThreadId will use a threads id to find its cid entry,
* and return the pointer contained in the HANDLE_TABLE entry pointing
* to the thread object. Meaning if we pass a valid thread id which we
* retrieved above and dont receive a STATUS_SUCCESS the cid entry could
* potentially be removed or disrupted..
*/
status = ImpPsLookupThreadByThreadId(thread_id, &thread);
if (!NT_SUCCESS(status)) {
DEBUG_WARNING(
"Failed to lookup thread by id. PspCidTable entry potentially removed.");
return FALSE;
}
return TRUE;
}
/*
* I did not reverse this myself and previously had no idea how you would go
* about detecting KiAttachProcess so credits to KANKOSHEV for the find:
*
* https://github.com/KANKOSHEV/Detect-KeAttachProcess/tree/main
* https://doxygen.reactos.org/d0/dc9/procobj_8c.html#adec6dc539d4a5c0ee7d0f48e24ef0933
*
* To expand on his writeup a little, the offset that he provides is equivalent
* to PKAPC_STATE->Process. This is where KiAttachProcess writes the process
* that thread is attaching to when it's called. The APC_STATE structure holds
* relevant information about the thread's APC state and is quite important
* during context switch scenarios as it's how the thread determines if it has
* any APC's queued.
*/
STATIC VOID
DetectAttachedThreadsProcessCallback(
_In_ PTHREAD_LIST_ENTRY ThreadListEntry, _Inout_opt_ PVOID Context)
{
UNREFERENCED_PARAMETER(Context);
NTSTATUS status = STATUS_UNSUCCESSFUL;
PKAPC_STATE apc_state = NULL;
PEPROCESS protected_process = NULL;
UINT32 packet_size =
CryptRequestRequiredBufferLength(sizeof(ATTACH_PROCESS_REPORT));
SessionGetProcess(&protected_process);
if (!protected_process)
return;
apc_state = (PKAPC_STATE)((UINT64)ThreadListEntry->thread +
KTHREAD_APC_STATE_OFFSET);
/*
* We don't care if a thread owned by our protected process is attached
*
* todo: this is filterless and will just report anything, need to have
* a look into what processes actually attach to real games
*/
if (!(apc_state->Process == protected_process &&
ThreadListEntry->owning_process != protected_process)) {
return;
}
DEBUG_WARNING(
"Thread is attached to our protected process: %llx",
(UINT64)ThreadListEntry->thread);
PATTACH_PROCESS_REPORT report =
ImpExAllocatePool2(POOL_FLAG_NON_PAGED, packet_size, REPORT_POOL_TAG);
if (!report)
return;
INIT_REPORT_PACKET(report, REPORT_ILLEGAL_ATTACH_PROCESS, 0);
report->thread_id = ImpPsGetThreadId(ThreadListEntry->thread);
report->thread_address = ThreadListEntry->thread;
status = CryptEncryptBuffer(report, packet_size);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("CryptEncryptBuffer: %lx", status);
ImpExFreePoolWithTag(report, REPORT_POOL_TAG);
return;
}
IrpQueueSchedulePacket(report, packet_size);
}
VOID
DetectThreadsAttachedToProtectedProcess()
{
PAGED_CODE();
DEBUG_VERBOSE("Detecting threads attached to our process...");
RtlRbTreeEnumerate(
GetThreadTree(),
DetectAttachedThreadsProcessCallback,
NULL);
}
================================================
FILE: driver/thread.h
================================================
#ifndef THREAD_H
#define THREAD_H
#include
#include "common.h"
#include "callbacks.h"
BOOLEAN
DoesThreadHaveValidCidEntry(_In_ PETHREAD Thread);
VOID
DetectThreadsAttachedToProtectedProcess();
#endif
================================================
FILE: driver/types/tpm12.h
================================================
/** @file
TPM Specification data structures (TCG TPM Specification Version 1.2 Revision
103) See http://trustedcomputinggroup.org for latest specification updates
Copyright (c) 2006 - 2018, Intel Corporation. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _TPM12_H_
#define _TPM12_H_
#include "../common.h"
///
/// The start of TPM return codes
///
#define TPM_BASE 0
//
// All structures MUST be packed on a byte boundary.
//
#pragma pack(1)
//
// Part 2, section 2.2.3: Helper redefinitions
//
///
/// Indicates the conditions where it is required that authorization be
/// presented
///
typedef UINT8 TPM_AUTH_DATA_USAGE;
///
/// The information as to what the payload is in an encrypted structure
///
typedef UINT8 TPM_PAYLOAD_TYPE;
///
/// The version info breakdown
///
typedef UINT8 TPM_VERSION_BYTE;
///
/// The state of the dictionary attack mitigation logic
///
typedef UINT8 TPM_DA_STATE;
///
/// The request or response authorization type
///
typedef UINT16 TPM_TAG;
///
/// The protocol in use
///
typedef UINT16 TPM_PROTOCOL_ID;
///
/// Indicates the start state
///
typedef UINT16 TPM_STARTUP_TYPE;
///
/// The definition of the encryption scheme
///
typedef UINT16 TPM_ENC_SCHEME;
///
/// The definition of the signature scheme
///
typedef UINT16 TPM_SIG_SCHEME;
///
/// The definition of the migration scheme
///
typedef UINT16 TPM_MIGRATE_SCHEME;
///
/// Sets the state of the physical presence mechanism
///
typedef UINT16 TPM_PHYSICAL_PRESENCE;
///
/// Indicates the types of entity that are supported by the TPM
///
typedef UINT16 TPM_ENTITY_TYPE;
///
/// Indicates the permitted usage of the key
///
typedef UINT16 TPM_KEY_USAGE;
///
/// The type of asymmetric encrypted structure in use by the endorsement key
///
typedef UINT16 TPM_EK_TYPE;
///
/// The tag for the structure
///
typedef UINT16 TPM_STRUCTURE_TAG;
///
/// The platform specific spec to which the information relates to
///
typedef UINT16 TPM_PLATFORM_SPECIFIC;
///
/// The command ordinal
///
typedef UINT32 TPM_COMMAND_CODE;
///
/// Identifies a TPM capability area
///
typedef UINT32 TPM_CAPABILITY_AREA;
///
/// Indicates information regarding a key
///
typedef UINT32 TPM_KEY_FLAGS;
///
/// Indicates the type of algorithm
///
typedef UINT32 TPM_ALGORITHM_ID;
///
/// The locality modifier
///
typedef UINT32 TPM_MODIFIER_INDICATOR;
///
/// The actual number of a counter
///
typedef UINT32 TPM_ACTUAL_COUNT;
///
/// Attributes that define what options are in use for a transport session
///
typedef UINT32 TPM_TRANSPORT_ATTRIBUTES;
///
/// Handle to an authorization session
///
typedef UINT32 TPM_AUTHHANDLE;
///
/// Index to a DIR register
///
typedef UINT32 TPM_DIRINDEX;
///
/// The area where a key is held assigned by the TPM
///
typedef UINT32 TPM_KEY_HANDLE;
///
/// Index to a PCR register
///
typedef UINT32 TPM_PCRINDEX;
///
/// The return code from a function
///
typedef UINT32 TPM_RESULT;
///
/// The types of resources that a TPM may have using internal resources
///
typedef UINT32 TPM_RESOURCE_TYPE;
///
/// Allows for controlling of the key when loaded and how to handle TPM_Startup
/// issues
///
typedef UINT32 TPM_KEY_CONTROL;
///
/// The index into the NV storage area
///
typedef UINT32 TPM_NV_INDEX;
///
/// The family ID. Family IDs are automatically assigned a sequence number by
/// the TPM. A trusted process can set the FamilyID value in an individual row
/// to NULL, which invalidates that row. The family ID resets to NULL on each
/// change of TPM Owner.
///
typedef UINT32 TPM_FAMILY_ID;
///
/// IA value used as a label for the most recent verification of this family.
/// Set to zero when not in use.
///
typedef UINT32 TPM_FAMILY_VERIFICATION;
///
/// How the TPM handles var
///
typedef UINT32 TPM_STARTUP_EFFECTS;
///
/// The mode of a symmetric encryption
///
typedef UINT32 TPM_SYM_MODE;
///
/// The family flags
///
typedef UINT32 TPM_FAMILY_FLAGS;
///
/// The index value for the delegate NV table
///
typedef UINT32 TPM_DELEGATE_INDEX;
///
/// The restrictions placed on delegation of CMK commands
///
typedef UINT32 TPM_CMK_DELEGATE;
///
/// The ID value of a monotonic counter
///
typedef UINT32 TPM_COUNT_ID;
///
/// A command to execute
///
typedef UINT32 TPM_REDIT_COMMAND;
///
/// A transport session handle
///
typedef UINT32 TPM_TRANSHANDLE;
///
/// A generic handle could be key, transport etc
///
typedef UINT32 TPM_HANDLE;
///
/// What operation is happening
///
typedef UINT32 TPM_FAMILY_OPERATION;
//
// Part 2, section 2.2.4: Vendor specific
// The following defines allow for the quick specification of a
// vendor specific item.
//
#define TPM_Vendor_Specific32 ((UINT32)0x00000400)
#define TPM_Vendor_Specific8 ((UINT8)0x80)
//
// Part 2, section 3.1: TPM_STRUCTURE_TAG
//
#define TPM_TAG_CONTEXTBLOB ((TPM_STRUCTURE_TAG)0x0001)
#define TPM_TAG_CONTEXT_SENSITIVE ((TPM_STRUCTURE_TAG)0x0002)
#define TPM_TAG_CONTEXTPOINTER ((TPM_STRUCTURE_TAG)0x0003)
#define TPM_TAG_CONTEXTLIST ((TPM_STRUCTURE_TAG)0x0004)
#define TPM_TAG_SIGNINFO ((TPM_STRUCTURE_TAG)0x0005)
#define TPM_TAG_PCR_INFO_LONG ((TPM_STRUCTURE_TAG)0x0006)
#define TPM_TAG_PERSISTENT_FLAGS ((TPM_STRUCTURE_TAG)0x0007)
#define TPM_TAG_VOLATILE_FLAGS ((TPM_STRUCTURE_TAG)0x0008)
#define TPM_TAG_PERSISTENT_DATA ((TPM_STRUCTURE_TAG)0x0009)
#define TPM_TAG_VOLATILE_DATA ((TPM_STRUCTURE_TAG)0x000A)
#define TPM_TAG_SV_DATA ((TPM_STRUCTURE_TAG)0x000B)
#define TPM_TAG_EK_BLOB ((TPM_STRUCTURE_TAG)0x000C)
#define TPM_TAG_EK_BLOB_AUTH ((TPM_STRUCTURE_TAG)0x000D)
#define TPM_TAG_COUNTER_VALUE ((TPM_STRUCTURE_TAG)0x000E)
#define TPM_TAG_TRANSPORT_INTERNAL ((TPM_STRUCTURE_TAG)0x000F)
#define TPM_TAG_TRANSPORT_LOG_IN ((TPM_STRUCTURE_TAG)0x0010)
#define TPM_TAG_TRANSPORT_LOG_OUT ((TPM_STRUCTURE_TAG)0x0011)
#define TPM_TAG_AUDIT_EVENT_IN ((TPM_STRUCTURE_TAG)0x0012)
#define TPM_TAG_AUDIT_EVENT_OUT ((TPM_STRUCTURE_TAG)0x0013)
#define TPM_TAG_CURRENT_TICKS ((TPM_STRUCTURE_TAG)0x0014)
#define TPM_TAG_KEY ((TPM_STRUCTURE_TAG)0x0015)
#define TPM_TAG_STORED_DATA12 ((TPM_STRUCTURE_TAG)0x0016)
#define TPM_TAG_NV_ATTRIBUTES ((TPM_STRUCTURE_TAG)0x0017)
#define TPM_TAG_NV_DATA_PUBLIC ((TPM_STRUCTURE_TAG)0x0018)
#define TPM_TAG_NV_DATA_SENSITIVE ((TPM_STRUCTURE_TAG)0x0019)
#define TPM_TAG_DELEGATIONS ((TPM_STRUCTURE_TAG)0x001A)
#define TPM_TAG_DELEGATE_PUBLIC ((TPM_STRUCTURE_TAG)0x001B)
#define TPM_TAG_DELEGATE_TABLE_ROW ((TPM_STRUCTURE_TAG)0x001C)
#define TPM_TAG_TRANSPORT_AUTH ((TPM_STRUCTURE_TAG)0x001D)
#define TPM_TAG_TRANSPORT_PUBLIC ((TPM_STRUCTURE_TAG)0x001E)
#define TPM_TAG_PERMANENT_FLAGS ((TPM_STRUCTURE_TAG)0x001F)
#define TPM_TAG_STCLEAR_FLAGS ((TPM_STRUCTURE_TAG)0x0020)
#define TPM_TAG_STANY_FLAGS ((TPM_STRUCTURE_TAG)0x0021)
#define TPM_TAG_PERMANENT_DATA ((TPM_STRUCTURE_TAG)0x0022)
#define TPM_TAG_STCLEAR_DATA ((TPM_STRUCTURE_TAG)0x0023)
#define TPM_TAG_STANY_DATA ((TPM_STRUCTURE_TAG)0x0024)
#define TPM_TAG_FAMILY_TABLE_ENTRY ((TPM_STRUCTURE_TAG)0x0025)
#define TPM_TAG_DELEGATE_SENSITIVE ((TPM_STRUCTURE_TAG)0x0026)
#define TPM_TAG_DELG_KEY_BLOB ((TPM_STRUCTURE_TAG)0x0027)
#define TPM_TAG_KEY12 ((TPM_STRUCTURE_TAG)0x0028)
#define TPM_TAG_CERTIFY_INFO2 ((TPM_STRUCTURE_TAG)0x0029)
#define TPM_TAG_DELEGATE_OWNER_BLOB ((TPM_STRUCTURE_TAG)0x002A)
#define TPM_TAG_EK_BLOB_ACTIVATE ((TPM_STRUCTURE_TAG)0x002B)
#define TPM_TAG_DAA_BLOB ((TPM_STRUCTURE_TAG)0x002C)
#define TPM_TAG_DAA_CONTEXT ((TPM_STRUCTURE_TAG)0x002D)
#define TPM_TAG_DAA_ENFORCE ((TPM_STRUCTURE_TAG)0x002E)
#define TPM_TAG_DAA_ISSUER ((TPM_STRUCTURE_TAG)0x002F)
#define TPM_TAG_CAP_VERSION_INFO ((TPM_STRUCTURE_TAG)0x0030)
#define TPM_TAG_DAA_SENSITIVE ((TPM_STRUCTURE_TAG)0x0031)
#define TPM_TAG_DAA_TPM ((TPM_STRUCTURE_TAG)0x0032)
#define TPM_TAG_CMK_MIGAUTH ((TPM_STRUCTURE_TAG)0x0033)
#define TPM_TAG_CMK_SIGTICKET ((TPM_STRUCTURE_TAG)0x0034)
#define TPM_TAG_CMK_MA_APPROVAL ((TPM_STRUCTURE_TAG)0x0035)
#define TPM_TAG_QUOTE_INFO2 ((TPM_STRUCTURE_TAG)0x0036)
#define TPM_TAG_DA_INFO ((TPM_STRUCTURE_TAG)0x0037)
#define TPM_TAG_DA_LIMITED ((TPM_STRUCTURE_TAG)0x0038)
#define TPM_TAG_DA_ACTION_TYPE ((TPM_STRUCTURE_TAG)0x0039)
//
// Part 2, section 4: TPM Types
//
//
// Part 2, section 4.1: TPM_RESOURCE_TYPE
//
#define TPM_RT_KEY \
((TPM_RESOURCE_TYPE)0x00000001) ///< The handle is a key handle and is the
///< result of a LoadKey type operation
#define TPM_RT_AUTH \
((TPM_RESOURCE_TYPE)0x00000002) ///< The handle is an authorization handle.
///< Auth handles come from TPM_OIAP,
///< TPM_OSAP and TPM_DSAP
#define TPM_RT_HASH ((TPM_RESOURCE_TYPE)0x00000003) ///< Reserved for hashes
#define TPM_RT_TRANS \
((TPM_RESOURCE_TYPE)0x00000004) ///< The handle is for a transport session.
///< Transport handles come from
///< TPM_EstablishTransport
#define TPM_RT_CONTEXT \
((TPM_RESOURCE_TYPE)0x00000005) ///< Resource wrapped and held outside the
///< TPM using the context save/restore
///< commands
#define TPM_RT_COUNTER \
((TPM_RESOURCE_TYPE)0x00000006) ///< Reserved for counters
#define TPM_RT_DELEGATE \
((TPM_RESOURCE_TYPE)0x00000007) ///< The handle is for a delegate row. These
///< are the internal rows held in NV
///< storage by the TPM
#define TPM_RT_DAA_TPM \
((TPM_RESOURCE_TYPE)0x00000008) ///< The value is a DAA TPM specific blob
#define TPM_RT_DAA_V0 \
((TPM_RESOURCE_TYPE)0x00000009) ///< The value is a DAA V0 parameter
#define TPM_RT_DAA_V1 \
((TPM_RESOURCE_TYPE)0x0000000A) ///< The value is a DAA V1 parameter
//
// Part 2, section 4.2: TPM_PAYLOAD_TYPE
//
#define TPM_PT_ASYM \
((TPM_PAYLOAD_TYPE)0x01) ///< The entity is an asymmetric key
#define TPM_PT_BIND ((TPM_PAYLOAD_TYPE)0x02) ///< The entity is bound data
#define TPM_PT_MIGRATE \
((TPM_PAYLOAD_TYPE)0x03) ///< The entity is a migration blob
#define TPM_PT_MAINT \
((TPM_PAYLOAD_TYPE)0x04) ///< The entity is a maintenance blob
#define TPM_PT_SEAL ((TPM_PAYLOAD_TYPE)0x05) ///< The entity is sealed data
#define TPM_PT_MIGRATE_RESTRICTED \
((TPM_PAYLOAD_TYPE)0x06) ///< The entity is a restricted-migration
///< asymmetric key
#define TPM_PT_MIGRATE_EXTERNAL \
((TPM_PAYLOAD_TYPE)0x07) ///< The entity is a external migratable key
#define TPM_PT_CMK_MIGRATE \
((TPM_PAYLOAD_TYPE)0x08) ///< The entity is a CMK migratable blob
#define TPM_PT_VENDOR_SPECIFIC \
((TPM_PAYLOAD_TYPE)0x80) ///< 0x80 - 0xFF Vendor specific payloads
//
// Part 2, section 4.3: TPM_ENTITY_TYPE
//
#define TPM_ET_KEYHANDLE ((UINT16)0x0001) ///< The entity is a keyHandle or key
#define TPM_ET_OWNER ((UINT16)0x0002) ///< The entity is the TPM Owner
#define TPM_ET_DATA ((UINT16)0x0003) ///< The entity is some data
#define TPM_ET_SRK ((UINT16)0x0004) ///< The entity is the SRK
#define TPM_ET_KEY ((UINT16)0x0005) ///< The entity is a key or keyHandle
#define TPM_ET_REVOKE ((UINT16)0x0006) ///< The entity is the RevokeTrust value
#define TPM_ET_DEL_OWNER_BLOB \
((UINT16)0x0007) ///< The entity is a delegate owner blob
#define TPM_ET_DEL_ROW ((UINT16)0x0008) ///< The entity is a delegate row
#define TPM_ET_DEL_KEY_BLOB \
((UINT16)0x0009) ///< The entity is a delegate key blob
#define TPM_ET_COUNTER ((UINT16)0x000A) ///< The entity is a counter
#define TPM_ET_NV ((UINT16)0x000B) ///< The entity is a NV index
#define TPM_ET_OPERATOR ((UINT16)0x000C) ///< The entity is the operator
#define TPM_ET_RESERVED_HANDLE \
((UINT16)0x0040) ///< Reserved. This value avoids collisions with the handle
///< MSB setting.
//
// TPM_ENTITY_TYPE MSB Values: The MSB is used to indicate the ADIP encryption
// sheme when applicable
//
#define TPM_ET_XOR ((UINT16)0x0000) ///< ADIP encryption scheme: XOR
#define TPM_ET_AES128 ((UINT16)0x0006) ///< ADIP encryption scheme: AES 128 bits
//
// Part 2, section 4.4.1: Reserved Key Handles
//
#define TPM_KH_SRK \
((TPM_KEY_HANDLE)0x40000000) ///< The handle points to the SRK
#define TPM_KH_OWNER \
((TPM_KEY_HANDLE)0x40000001) ///< The handle points to the TPM Owner
#define TPM_KH_REVOKE \
((TPM_KEY_HANDLE)0x40000002) ///< The handle points to the RevokeTrust value
#define TPM_KH_TRANSPORT \
((TPM_KEY_HANDLE)0x40000003) ///< The handle points to the
///< EstablishTransport static authorization
#define TPM_KH_OPERATOR \
((TPM_KEY_HANDLE)0x40000004) ///< The handle points to the Operator auth
#define TPM_KH_ADMIN \
((TPM_KEY_HANDLE)0x40000005) ///< The handle points to the delegation
///< administration auth
#define TPM_KH_EK \
((TPM_KEY_HANDLE)0x40000006) ///< The handle points to the PUBEK, only
///< usable with TPM_OwnerReadInternalPub
//
// Part 2, section 4.5: TPM_STARTUP_TYPE
//
#define TPM_ST_CLEAR \
((TPM_STARTUP_TYPE)0x0001) ///< The TPM is starting up from a clean state
#define TPM_ST_STATE \
((TPM_STARTUP_TYPE)0x0002) ///< The TPM is starting up from a saved state
#define TPM_ST_DEACTIVATED \
((TPM_STARTUP_TYPE)0x0003) ///< The TPM is to startup and set the
///< deactivated flag to TRUE
//
// Part 2, section 4.6: TPM_STATUP_EFFECTS
// The table makeup is still an open issue.
//
//
// Part 2, section 4.7: TPM_PROTOCOL_ID
//
#define TPM_PID_OIAP ((TPM_PROTOCOL_ID)0x0001) ///< The OIAP protocol.
#define TPM_PID_OSAP ((TPM_PROTOCOL_ID)0x0002) ///< The OSAP protocol.
#define TPM_PID_ADIP ((TPM_PROTOCOL_ID)0x0003) ///< The ADIP protocol.
#define TPM_PID_ADCP ((TPM_PROTOCOL_ID)0x0004) ///< The ADCP protocol.
#define TPM_PID_OWNER \
((TPM_PROTOCOL_ID)0x0005) ///< The protocol for taking ownership of a TPM.
#define TPM_PID_DSAP ((TPM_PROTOCOL_ID)0x0006) ///< The DSAP protocol
#define TPM_PID_TRANSPORT ((TPM_PROTOCOL_ID)0x0007) ///< The transport protocol
//
// Part 2, section 4.8: TPM_ALGORITHM_ID
// The TPM MUST support the algorithms TPM_ALG_RSA, TPM_ALG_SHA, TPM_ALG_HMAC,
// TPM_ALG_MGF1
//
#define TPM_ALG_RSA ((TPM_ALGORITHM_ID)0x00000001) ///< The RSA algorithm.
#define TPM_ALG_DES ((TPM_ALGORITHM_ID)0x00000002) ///< The DES algorithm
#define TPM_ALG_3DES \
((TPM_ALGORITHM_ID)0x00000003) ///< The 3DES algorithm in EDE mode
#define TPM_ALG_SHA ((TPM_ALGORITHM_ID)0x00000004) ///< The SHA1 algorithm
#define TPM_ALG_HMAC \
((TPM_ALGORITHM_ID)0x00000005) ///< The RFC 2104 HMAC algorithm
#define TPM_ALG_AES128 \
((TPM_ALGORITHM_ID)0x00000006) ///< The AES algorithm, key size 128
#define TPM_ALG_MGF1 \
((TPM_ALGORITHM_ID)0x00000007) ///< The XOR algorithm using MGF1 to create a
///< string the size of the encrypted block
#define TPM_ALG_AES192 ((TPM_ALGORITHM_ID)0x00000008) ///< AES, key size 192
#define TPM_ALG_AES256 ((TPM_ALGORITHM_ID)0x00000009) ///< AES, key size 256
#define TPM_ALG_XOR \
((TPM_ALGORITHM_ID)0x0000000A) ///< XOR using the rolling nonces
//
// Part 2, section 4.9: TPM_PHYSICAL_PRESENCE
//
#define TPM_PHYSICAL_PRESENCE_HW_DISABLE \
((TPM_PHYSICAL_PRESENCE)0x0200) ///< Sets the physicalPresenceHWEnable to
///< FALSE
#define TPM_PHYSICAL_PRESENCE_CMD_DISABLE \
((TPM_PHYSICAL_PRESENCE)0x0100) ///< Sets the physicalPresenceCMDEnable to
///< FALSE
#define TPM_PHYSICAL_PRESENCE_LIFETIME_LOCK \
((TPM_PHYSICAL_PRESENCE)0x0080) ///< Sets the physicalPresenceLifetimeLock
///< to TRUE
#define TPM_PHYSICAL_PRESENCE_HW_ENABLE \
((TPM_PHYSICAL_PRESENCE)0x0040) ///< Sets the physicalPresenceHWEnable to
///< TRUE
#define TPM_PHYSICAL_PRESENCE_CMD_ENABLE \
((TPM_PHYSICAL_PRESENCE)0x0020) ///< Sets the physicalPresenceCMDEnable to
///< TRUE
#define TPM_PHYSICAL_PRESENCE_NOTPRESENT \
((TPM_PHYSICAL_PRESENCE)0x0010) ///< Sets PhysicalPresence = FALSE
#define TPM_PHYSICAL_PRESENCE_PRESENT \
((TPM_PHYSICAL_PRESENCE)0x0008) ///< Sets PhysicalPresence = TRUE
#define TPM_PHYSICAL_PRESENCE_LOCK \
((TPM_PHYSICAL_PRESENCE)0x0004) ///< Sets PhysicalPresenceLock = TRUE
//
// Part 2, section 4.10: TPM_MIGRATE_SCHEME
//
#define TPM_MS_MIGRATE \
((TPM_MIGRATE_SCHEME)0x0001) ///< A public key that can be used with all TPM
///< migration commands other than 'ReWrap'
///< mode.
#define TPM_MS_REWRAP \
((TPM_MIGRATE_SCHEME)0x0002) ///< A public key that can be used for the
///< ReWrap mode of TPM_CreateMigrationBlob.
#define TPM_MS_MAINT \
((TPM_MIGRATE_SCHEME)0x0003) ///< A public key that can be used for the
///< Maintenance commands
#define TPM_MS_RESTRICT_MIGRATE \
((TPM_MIGRATE_SCHEME)0x0004) ///< The key is to be migrated to a Migration
///< Authority.
#define TPM_MS_RESTRICT_APPROVE_DOUBLE \
((TPM_MIGRATE_SCHEME)0x0005) ///< The key is to be migrated to an entity
///< approved by a Migration Authority using
///< double wrapping
//
// Part 2, section 4.11: TPM_EK_TYPE
//
#define TPM_EK_TYPE_ACTIVATE \
((TPM_EK_TYPE)0x0001) ///< The blob MUST be TPM_EK_BLOB_ACTIVATE
#define TPM_EK_TYPE_AUTH \
((TPM_EK_TYPE)0x0002) ///< The blob MUST be TPM_EK_BLOB_AUTH
//
// Part 2, section 4.12: TPM_PLATFORM_SPECIFIC
//
#define TPM_PS_PC_11 \
((TPM_PLATFORM_SPECIFIC)0x0001) ///< PC Specific version 1.1
#define TPM_PS_PC_12 \
((TPM_PLATFORM_SPECIFIC)0x0002) ///< PC Specific version 1.2
#define TPM_PS_PDA_12 \
((TPM_PLATFORM_SPECIFIC)0x0003) ///< PDA Specific version 1.2
#define TPM_PS_Server_12 \
((TPM_PLATFORM_SPECIFIC)0x0004) ///< Server Specific version 1.2
#define TPM_PS_Mobile_12 \
((TPM_PLATFORM_SPECIFIC)0x0005) ///< Mobil Specific version 1.2
//
// Part 2, section 5: Basic Structures
//
///
/// Part 2, section 5.1: TPM_STRUCT_VER
///
typedef struct tdTPM_STRUCT_VER {
UINT8 major;
UINT8 minor;
UINT8 revMajor;
UINT8 revMinor;
} TPM_STRUCT_VER;
///
/// Part 2, section 5.3: TPM_VERSION
///
typedef struct tdTPM_VERSION {
TPM_VERSION_BYTE major;
TPM_VERSION_BYTE minor;
UINT8 revMajor;
UINT8 revMinor;
} TPM_VERSION;
#define TPM_SHA1_160_HASH_LEN 0x14
#define TPM_SHA1BASED_NONCE_LEN TPM_SHA1_160_HASH_LEN
///
/// Part 2, section 5.4: TPM_DIGEST
///
typedef struct tdTPM_DIGEST {
UINT8 digest[TPM_SHA1_160_HASH_LEN];
} TPM_DIGEST;
///
/// This SHALL be the digest of the chosen identityLabel and privacyCA for a new
/// TPM identity
///
typedef TPM_DIGEST TPM_CHOSENID_HASH;
///
/// This SHALL be the hash of a list of PCR indexes and PCR values that a key or
/// data is bound to
///
typedef TPM_DIGEST TPM_COMPOSITE_HASH;
///
/// This SHALL be the value of a DIR register
///
typedef TPM_DIGEST TPM_DIRVALUE;
typedef TPM_DIGEST TPM_HMAC;
///
/// The value inside of the PCR
///
typedef TPM_DIGEST TPM_PCRVALUE;
///
/// This SHALL be the value of the current internal audit state
///
typedef TPM_DIGEST TPM_AUDITDIGEST;
///
/// Part 2, section 5.5: TPM_NONCE
///
typedef struct tdTPM_NONCE {
UINT8 nonce[20];
} TPM_NONCE;
///
/// This SHALL be a random value generated by a TPM immediately after the EK is
/// installed in that TPM, whenever an EK is installed in that TPM
///
typedef TPM_NONCE TPM_DAA_TPM_SEED;
///
/// This SHALL be a random value
///
typedef TPM_NONCE TPM_DAA_CONTEXT_SEED;
//
// Part 2, section 5.6: TPM_AUTHDATA
//
///
/// The AuthData data is the information that is saved or passed to provide
/// proof of ownership 296 of an entity
///
typedef UINT8 tdTPM_AUTHDATA[20];
typedef tdTPM_AUTHDATA TPM_AUTHDATA;
///
/// A secret plaintext value used in the authorization process
///
typedef TPM_AUTHDATA TPM_SECRET;
///
/// A ciphertext (encrypted) version of AuthData data. The encryption mechanism
/// depends on the context
///
typedef TPM_AUTHDATA TPM_ENCAUTH;
///
/// Part 2, section 5.7: TPM_KEY_HANDLE_LIST
/// Size of handle is loaded * sizeof(TPM_KEY_HANDLE)
///
typedef struct tdTPM_KEY_HANDLE_LIST {
UINT16 loaded;
TPM_KEY_HANDLE handle[1];
} TPM_KEY_HANDLE_LIST;
//
// Part 2, section 5.8: TPM_KEY_USAGE values
//
///
/// TPM_KEY_SIGNING SHALL indicate a signing key. The [private] key SHALL be
/// used for signing operations, only. This means that it MUST be a leaf of the
/// Protected Storage key hierarchy.
///
#define TPM_KEY_SIGNING ((UINT16)0x0010)
///
/// TPM_KEY_STORAGE SHALL indicate a storage key. The key SHALL be used to wrap
/// and unwrap other keys in the Protected Storage hierarchy
///
#define TPM_KEY_STORAGE ((UINT16)0x0011)
///
/// TPM_KEY_IDENTITY SHALL indicate an identity key. The key SHALL be used for
/// operations that require a TPM identity, only.
///
#define TPM_KEY_IDENTITY ((UINT16)0x0012)
///
/// TPM_KEY_AUTHCHANGE SHALL indicate an ephemeral key that is in use during
/// the ChangeAuthAsym process, only.
///
#define TPM_KEY_AUTHCHANGE ((UINT16)0x0013)
///
/// TPM_KEY_BIND SHALL indicate a key that can be used for TPM_Bind and
/// TPM_Unbind operations only.
///
#define TPM_KEY_BIND ((UINT16)0x0014)
///
/// TPM_KEY_LEGACY SHALL indicate a key that can perform signing and binding
/// operations. The key MAY be used for both signing and binding operations.
/// The TPM_KEY_LEGACY key type is to allow for use by applications where both
/// signing and encryption operations occur with the same key. The use of this
/// key type is not recommended TPM_KEY_MIGRATE 0x0016 This SHALL indicate a
/// key in use for TPM_MigrateKey
///
#define TPM_KEY_LEGACY ((UINT16)0x0015)
///
/// TPM_KEY_MIGRAGE SHALL indicate a key in use for TPM_MigrateKey
///
#define TPM_KEY_MIGRATE ((UINT16)0x0016)
//
// Part 2, section 5.8.1: Mandatory Key Usage Schemes
//
#define TPM_ES_NONE ((TPM_ENC_SCHEME)0x0001)
#define TPM_ES_RSAESPKCSv15 ((TPM_ENC_SCHEME)0x0002)
#define TPM_ES_RSAESOAEP_SHA1_MGF1 ((TPM_ENC_SCHEME)0x0003)
#define TPM_ES_SYM_CNT ((TPM_ENC_SCHEME)0x0004) ///< rev94 defined
#define TPM_ES_SYM_CTR ((TPM_ENC_SCHEME)0x0004)
#define TPM_ES_SYM_OFB ((TPM_ENC_SCHEME)0x0005)
#define TPM_SS_NONE ((TPM_SIG_SCHEME)0x0001)
#define TPM_SS_RSASSAPKCS1v15_SHA1 ((TPM_SIG_SCHEME)0x0002)
#define TPM_SS_RSASSAPKCS1v15_DER ((TPM_SIG_SCHEME)0x0003)
#define TPM_SS_RSASSAPKCS1v15_INFO ((TPM_SIG_SCHEME)0x0004)
//
// Part 2, section 5.9: TPM_AUTH_DATA_USAGE values
//
#define TPM_AUTH_NEVER ((TPM_AUTH_DATA_USAGE)0x00)
#define TPM_AUTH_ALWAYS ((TPM_AUTH_DATA_USAGE)0x01)
#define TPM_AUTH_PRIV_USE_ONLY ((TPM_AUTH_DATA_USAGE)0x03)
///
/// Part 2, section 5.10: TPM_KEY_FLAGS
///
typedef enum tdTPM_KEY_FLAGS {
redirection = 0x00000001,
migratable = 0x00000002,
isVolatile = 0x00000004,
pcrIgnoredOnRead = 0x00000008,
migrateAuthority = 0x00000010
} TPM_KEY_FLAGS_BITS;
///
/// Part 2, section 5.11: TPM_CHANGEAUTH_VALIDATE
///
typedef struct tdTPM_CHANGEAUTH_VALIDATE {
TPM_SECRET newAuthSecret;
TPM_NONCE n1;
} TPM_CHANGEAUTH_VALIDATE;
///
/// Part 2, section 5.12: TPM_MIGRATIONKEYAUTH
/// declared after section 10 to catch declaration of TPM_PUBKEY
///
/// Part 2 section 10.1: TPM_KEY_PARMS
/// [size_is(parmSize)] BYTE* parms;
///
typedef struct tdTPM_KEY_PARMS {
TPM_ALGORITHM_ID algorithmID;
TPM_ENC_SCHEME encScheme;
TPM_SIG_SCHEME sigScheme;
UINT32 parmSize;
UINT8* parms;
} TPM_KEY_PARMS;
///
/// Part 2, section 10.4: TPM_STORE_PUBKEY
///
typedef struct tdTPM_STORE_PUBKEY {
UINT32 keyLength;
UINT8 key[1];
} TPM_STORE_PUBKEY;
///
/// Part 2, section 10.5: TPM_PUBKEY
///
typedef struct tdTPM_PUBKEY {
TPM_KEY_PARMS algorithmParms;
TPM_STORE_PUBKEY pubKey;
} TPM_PUBKEY;
///
/// Part 2, section 5.12: TPM_MIGRATIONKEYAUTH
///
typedef struct tdTPM_MIGRATIONKEYAUTH {
TPM_PUBKEY migrationKey;
TPM_MIGRATE_SCHEME migrationScheme;
TPM_DIGEST digest;
} TPM_MIGRATIONKEYAUTH;
///
/// Part 2, section 5.13: TPM_COUNTER_VALUE
///
typedef struct tdTPM_COUNTER_VALUE {
TPM_STRUCTURE_TAG tag;
UINT8 label[4];
TPM_ACTUAL_COUNT counter;
} TPM_COUNTER_VALUE;
///
/// Part 2, section 5.14: TPM_SIGN_INFO
/// Size of data indicated by dataLen
///
typedef struct tdTPM_SIGN_INFO {
TPM_STRUCTURE_TAG tag;
UINT8 fixed[4];
TPM_NONCE replay;
UINT32 dataLen;
UINT8* data;
} TPM_SIGN_INFO;
///
/// Part 2, section 5.15: TPM_MSA_COMPOSITE
/// Number of migAuthDigest indicated by MSAlist
///
typedef struct tdTPM_MSA_COMPOSITE {
UINT32 MSAlist;
TPM_DIGEST migAuthDigest[1];
} TPM_MSA_COMPOSITE;
///
/// Part 2, section 5.16: TPM_CMK_AUTH
///
typedef struct tdTPM_CMK_AUTH {
TPM_DIGEST migrationAuthorityDigest;
TPM_DIGEST destinationKeyDigest;
TPM_DIGEST sourceKeyDigest;
} TPM_CMK_AUTH;
//
// Part 2, section 5.17: TPM_CMK_DELEGATE
//
#define TPM_CMK_DELEGATE_SIGNING ((TPM_CMK_DELEGATE)BIT31)
#define TPM_CMK_DELEGATE_STORAGE ((TPM_CMK_DELEGATE)BIT30)
#define TPM_CMK_DELEGATE_BIND ((TPM_CMK_DELEGATE)BIT29)
#define TPM_CMK_DELEGATE_LEGACY ((TPM_CMK_DELEGATE)BIT28)
#define TPM_CMK_DELEGATE_MIGRATE ((TPM_CMK_DELEGATE)BIT27)
///
/// Part 2, section 5.18: TPM_SELECT_SIZE
///
typedef struct tdTPM_SELECT_SIZE {
UINT8 major;
UINT8 minor;
UINT16 reqSize;
} TPM_SELECT_SIZE;
///
/// Part 2, section 5,19: TPM_CMK_MIGAUTH
///
typedef struct tdTPM_CMK_MIGAUTH {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST msaDigest;
TPM_DIGEST pubKeyDigest;
} TPM_CMK_MIGAUTH;
///
/// Part 2, section 5.20: TPM_CMK_SIGTICKET
///
typedef struct tdTPM_CMK_SIGTICKET {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST verKeyDigest;
TPM_DIGEST signedData;
} TPM_CMK_SIGTICKET;
///
/// Part 2, section 5.21: TPM_CMK_MA_APPROVAL
///
typedef struct tdTPM_CMK_MA_APPROVAL {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST migrationAuthorityDigest;
} TPM_CMK_MA_APPROVAL;
//
// Part 2, section 6: Command Tags
//
#define TPM_TAG_RQU_COMMAND ((TPM_STRUCTURE_TAG)0x00C1)
#define TPM_TAG_RQU_AUTH1_COMMAND ((TPM_STRUCTURE_TAG)0x00C2)
#define TPM_TAG_RQU_AUTH2_COMMAND ((TPM_STRUCTURE_TAG)0x00C3)
#define TPM_TAG_RSP_COMMAND ((TPM_STRUCTURE_TAG)0x00C4)
#define TPM_TAG_RSP_AUTH1_COMMAND ((TPM_STRUCTURE_TAG)0x00C5)
#define TPM_TAG_RSP_AUTH2_COMMAND ((TPM_STRUCTURE_TAG)0x00C6)
///
/// Part 2, section 7.1: TPM_PERMANENT_FLAGS
///
typedef struct tdTPM_PERMANENT_FLAGS {
TPM_STRUCTURE_TAG tag;
BOOLEAN disable;
BOOLEAN ownership;
BOOLEAN deactivated;
BOOLEAN readPubek;
BOOLEAN disableOwnerClear;
BOOLEAN allowMaintenance;
BOOLEAN physicalPresenceLifetimeLock;
BOOLEAN physicalPresenceHWEnable;
BOOLEAN physicalPresenceCMDEnable;
BOOLEAN CEKPUsed;
BOOLEAN TPMpost;
BOOLEAN TPMpostLock;
BOOLEAN FIPS;
BOOLEAN operator;
BOOLEAN enableRevokeEK;
BOOLEAN nvLocked;
BOOLEAN readSRKPub;
BOOLEAN tpmEstablished;
BOOLEAN maintenanceDone;
BOOLEAN disableFullDALogicInfo;
} TPM_PERMANENT_FLAGS;
//
// Part 2, section 7.1.1: Flag Restrictions (of TPM_PERMANENT_FLAGS)
//
#define TPM_PF_DISABLE ((TPM_CAPABILITY_AREA)1)
#define TPM_PF_OWNERSHIP ((TPM_CAPABILITY_AREA)2)
#define TPM_PF_DEACTIVATED ((TPM_CAPABILITY_AREA)3)
#define TPM_PF_READPUBEK ((TPM_CAPABILITY_AREA)4)
#define TPM_PF_DISABLEOWNERCLEAR ((TPM_CAPABILITY_AREA)5)
#define TPM_PF_ALLOWMAINTENANCE ((TPM_CAPABILITY_AREA)6)
#define TPM_PF_PHYSICALPRESENCELIFETIMELOCK ((TPM_CAPABILITY_AREA)7)
#define TPM_PF_PHYSICALPRESENCEHWENABLE ((TPM_CAPABILITY_AREA)8)
#define TPM_PF_PHYSICALPRESENCECMDENABLE ((TPM_CAPABILITY_AREA)9)
#define TPM_PF_CEKPUSED ((TPM_CAPABILITY_AREA)10)
#define TPM_PF_TPMPOST ((TPM_CAPABILITY_AREA)11)
#define TPM_PF_TPMPOSTLOCK ((TPM_CAPABILITY_AREA)12)
#define TPM_PF_FIPS ((TPM_CAPABILITY_AREA)13)
#define TPM_PF_OPERATOR ((TPM_CAPABILITY_AREA)14)
#define TPM_PF_ENABLEREVOKEEK ((TPM_CAPABILITY_AREA)15)
#define TPM_PF_NV_LOCKED ((TPM_CAPABILITY_AREA)16)
#define TPM_PF_READSRKPUB ((TPM_CAPABILITY_AREA)17)
#define TPM_PF_TPMESTABLISHED ((TPM_CAPABILITY_AREA)18)
#define TPM_PF_MAINTENANCEDONE ((TPM_CAPABILITY_AREA)19)
#define TPM_PF_DISABLEFULLDALOGICINFO ((TPM_CAPABILITY_AREA)20)
///
/// Part 2, section 7.2: TPM_STCLEAR_FLAGS
///
typedef struct tdTPM_STCLEAR_FLAGS {
TPM_STRUCTURE_TAG tag;
BOOLEAN deactivated;
BOOLEAN disableForceClear;
BOOLEAN physicalPresence;
BOOLEAN physicalPresenceLock;
BOOLEAN bGlobalLock;
} TPM_STCLEAR_FLAGS;
//
// Part 2, section 7.2.1: Flag Restrictions (of TPM_STCLEAR_FLAGS)
//
#define TPM_SF_DEACTIVATED ((TPM_CAPABILITY_AREA)1)
#define TPM_SF_DISABLEFORCECLEAR ((TPM_CAPABILITY_AREA)2)
#define TPM_SF_PHYSICALPRESENCE ((TPM_CAPABILITY_AREA)3)
#define TPM_SF_PHYSICALPRESENCELOCK ((TPM_CAPABILITY_AREA)4)
#define TPM_SF_BGLOBALLOCK ((TPM_CAPABILITY_AREA)5)
///
/// Part 2, section 7.3: TPM_STANY_FLAGS
///
typedef struct tdTPM_STANY_FLAGS {
TPM_STRUCTURE_TAG tag;
BOOLEAN postInitialise;
TPM_MODIFIER_INDICATOR localityModifier;
BOOLEAN transportExclusive;
BOOLEAN TOSPresent;
} TPM_STANY_FLAGS;
//
// Part 2, section 7.3.1: Flag Restrictions (of TPM_STANY_FLAGS)
//
#define TPM_AF_POSTINITIALISE ((TPM_CAPABILITY_AREA)1)
#define TPM_AF_LOCALITYMODIFIER ((TPM_CAPABILITY_AREA)2)
#define TPM_AF_TRANSPORTEXCLUSIVE ((TPM_CAPABILITY_AREA)3)
#define TPM_AF_TOSPRESENT ((TPM_CAPABILITY_AREA)4)
//
// All those structures defined in section 7.4, 7.5, 7.6 are not normative and
// thus no definitions here
//
// Part 2, section 7.4: TPM_PERMANENT_DATA
//
#define TPM_MIN_COUNTERS 4 ///< the minimum number of counters is 4
#define TPM_DELEGATE_KEY TPM_KEY
#define TPM_NUM_PCR 16
#define TPM_MAX_NV_WRITE_NOOWNER 64
//
// Part 2, section 7.4.1: PERMANENT_DATA Subcap for SetCapability
//
#define TPM_PD_REVMAJOR ((TPM_CAPABILITY_AREA)1)
#define TPM_PD_REVMINOR ((TPM_CAPABILITY_AREA)2)
#define TPM_PD_TPMPROOF ((TPM_CAPABILITY_AREA)3)
#define TPM_PD_OWNERAUTH ((TPM_CAPABILITY_AREA)4)
#define TPM_PD_OPERATORAUTH ((TPM_CAPABILITY_AREA)5)
#define TPM_PD_MANUMAINTPUB ((TPM_CAPABILITY_AREA)6)
#define TPM_PD_ENDORSEMENTKEY ((TPM_CAPABILITY_AREA)7)
#define TPM_PD_SRK ((TPM_CAPABILITY_AREA)8)
#define TPM_PD_DELEGATEKEY ((TPM_CAPABILITY_AREA)9)
#define TPM_PD_CONTEXTKEY ((TPM_CAPABILITY_AREA)10)
#define TPM_PD_AUDITMONOTONICCOUNTER ((TPM_CAPABILITY_AREA)11)
#define TPM_PD_MONOTONICCOUNTER ((TPM_CAPABILITY_AREA)12)
#define TPM_PD_PCRATTRIB ((TPM_CAPABILITY_AREA)13)
#define TPM_PD_ORDINALAUDITSTATUS ((TPM_CAPABILITY_AREA)14)
#define TPM_PD_AUTHDIR ((TPM_CAPABILITY_AREA)15)
#define TPM_PD_RNGSTATE ((TPM_CAPABILITY_AREA)16)
#define TPM_PD_FAMILYTABLE ((TPM_CAPABILITY_AREA)17)
#define TPM_DELEGATETABLE ((TPM_CAPABILITY_AREA)18)
#define TPM_PD_EKRESET ((TPM_CAPABILITY_AREA)19)
#define TPM_PD_MAXNVBUFSIZE ((TPM_CAPABILITY_AREA)20)
#define TPM_PD_LASTFAMILYID ((TPM_CAPABILITY_AREA)21)
#define TPM_PD_NOOWNERNVWRITE ((TPM_CAPABILITY_AREA)22)
#define TPM_PD_RESTRICTDELEGATE ((TPM_CAPABILITY_AREA)23)
#define TPM_PD_TPMDAASEED ((TPM_CAPABILITY_AREA)24)
#define TPM_PD_DAAPROOF ((TPM_CAPABILITY_AREA)25)
///
/// Part 2, section 7.5: TPM_STCLEAR_DATA
/// available inside TPM only
///
typedef struct tdTPM_STCLEAR_DATA {
TPM_STRUCTURE_TAG tag;
TPM_NONCE contextNonceKey;
TPM_COUNT_ID countID;
UINT32 ownerReference;
BOOLEAN disableResetLock;
TPM_PCRVALUE PCR[TPM_NUM_PCR];
UINT32 deferredPhysicalPresence;
} TPM_STCLEAR_DATA;
//
// Part 2, section 7.5.1: STCLEAR_DATA Subcap for SetCapability
//
#define TPM_SD_CONTEXTNONCEKEY ((TPM_CAPABILITY_AREA)0x00000001)
#define TPM_SD_COUNTID ((TPM_CAPABILITY_AREA)0x00000002)
#define TPM_SD_OWNERREFERENCE ((TPM_CAPABILITY_AREA)0x00000003)
#define TPM_SD_DISABLERESETLOCK ((TPM_CAPABILITY_AREA)0x00000004)
#define TPM_SD_PCR ((TPM_CAPABILITY_AREA)0x00000005)
#define TPM_SD_DEFERREDPHYSICALPRESENCE ((TPM_CAPABILITY_AREA)0x00000006)
//
// Part 2, section 7.6.1: STANY_DATA Subcap for SetCapability
//
#define TPM_AD_CONTEXTNONCESESSION ((TPM_CAPABILITY_AREA)1)
#define TPM_AD_AUDITDIGEST ((TPM_CAPABILITY_AREA)2)
#define TPM_AD_CURRENTTICKS ((TPM_CAPABILITY_AREA)3)
#define TPM_AD_CONTEXTCOUNT ((TPM_CAPABILITY_AREA)4)
#define TPM_AD_CONTEXTLIST ((TPM_CAPABILITY_AREA)5)
#define TPM_AD_SESSIONS ((TPM_CAPABILITY_AREA)6)
//
// Part 2, section 8: PCR Structures
//
///
/// Part 2, section 8.1: TPM_PCR_SELECTION
/// Size of pcrSelect[] indicated by sizeOfSelect
///
typedef struct tdTPM_PCR_SELECTION {
UINT16 sizeOfSelect;
UINT8 pcrSelect[1];
} TPM_PCR_SELECTION;
///
/// Part 2, section 8.2: TPM_PCR_COMPOSITE
/// Size of pcrValue[] indicated by valueSize
///
typedef struct tdTPM_PCR_COMPOSITE {
TPM_PCR_SELECTION select;
UINT32 valueSize;
TPM_PCRVALUE pcrValue[1];
} TPM_PCR_COMPOSITE;
///
/// Part 2, section 8.3: TPM_PCR_INFO
///
typedef struct tdTPM_PCR_INFO {
TPM_PCR_SELECTION pcrSelection;
TPM_COMPOSITE_HASH digestAtRelease;
TPM_COMPOSITE_HASH digestAtCreation;
} TPM_PCR_INFO;
///
/// Part 2, section 8.6: TPM_LOCALITY_SELECTION
///
typedef UINT8 TPM_LOCALITY_SELECTION;
#define TPM_LOC_FOUR ((UINT8)0x10)
#define TPM_LOC_THREE ((UINT8)0x08)
#define TPM_LOC_TWO ((UINT8)0x04)
#define TPM_LOC_ONE ((UINT8)0x02)
#define TPM_LOC_ZERO ((UINT8)0x01)
///
/// Part 2, section 8.4: TPM_PCR_INFO_LONG
///
typedef struct tdTPM_PCR_INFO_LONG {
TPM_STRUCTURE_TAG tag;
TPM_LOCALITY_SELECTION localityAtCreation;
TPM_LOCALITY_SELECTION localityAtRelease;
TPM_PCR_SELECTION creationPCRSelection;
TPM_PCR_SELECTION releasePCRSelection;
TPM_COMPOSITE_HASH digestAtCreation;
TPM_COMPOSITE_HASH digestAtRelease;
} TPM_PCR_INFO_LONG;
///
/// Part 2, section 8.5: TPM_PCR_INFO_SHORT
///
typedef struct tdTPM_PCR_INFO_SHORT {
TPM_PCR_SELECTION pcrSelection;
TPM_LOCALITY_SELECTION localityAtRelease;
TPM_COMPOSITE_HASH digestAtRelease;
} TPM_PCR_INFO_SHORT;
///
/// Part 2, section 8.8: TPM_PCR_ATTRIBUTES
///
typedef struct tdTPM_PCR_ATTRIBUTES {
BOOLEAN pcrReset;
TPM_LOCALITY_SELECTION pcrExtendLocal;
TPM_LOCALITY_SELECTION pcrResetLocal;
} TPM_PCR_ATTRIBUTES;
//
// Part 2, section 9: Storage Structures
//
///
/// Part 2, section 9.1: TPM_STORED_DATA
/// [size_is(sealInfoSize)] BYTE* sealInfo;
/// [size_is(encDataSize)] BYTE* encData;
///
typedef struct tdTPM_STORED_DATA {
TPM_STRUCT_VER ver;
UINT32 sealInfoSize;
UINT8* sealInfo;
UINT32 encDataSize;
UINT8* encData;
} TPM_STORED_DATA;
///
/// Part 2, section 9.2: TPM_STORED_DATA12
/// [size_is(sealInfoSize)] BYTE* sealInfo;
/// [size_is(encDataSize)] BYTE* encData;
///
typedef struct tdTPM_STORED_DATA12 {
TPM_STRUCTURE_TAG tag;
TPM_ENTITY_TYPE et;
UINT32 sealInfoSize;
UINT8* sealInfo;
UINT32 encDataSize;
UINT8* encData;
} TPM_STORED_DATA12;
///
/// Part 2, section 9.3: TPM_SEALED_DATA
/// [size_is(dataSize)] BYTE* data;
///
typedef struct tdTPM_SEALED_DATA {
TPM_PAYLOAD_TYPE payload;
TPM_SECRET authData;
TPM_NONCE tpmProof;
TPM_DIGEST storedDigest;
UINT32 dataSize;
UINT8* data;
} TPM_SEALED_DATA;
///
/// Part 2, section 9.4: TPM_SYMMETRIC_KEY
/// [size_is(size)] BYTE* data;
///
typedef struct tdTPM_SYMMETRIC_KEY {
TPM_ALGORITHM_ID algId;
TPM_ENC_SCHEME encScheme;
UINT16 dataSize;
UINT8* data;
} TPM_SYMMETRIC_KEY;
///
/// Part 2, section 9.5: TPM_BOUND_DATA
///
typedef struct tdTPM_BOUND_DATA {
TPM_STRUCT_VER ver;
TPM_PAYLOAD_TYPE payload;
UINT8 payloadData[1];
} TPM_BOUND_DATA;
//
// Part 2 section 10: TPM_KEY complex
//
//
// Section 10.1, 10.4, and 10.5 have been defined previously
//
///
/// Part 2, section 10.2: TPM_KEY
/// [size_is(encDataSize)] BYTE* encData;
///
typedef struct tdTPM_KEY {
TPM_STRUCT_VER ver;
TPM_KEY_USAGE keyUsage;
TPM_KEY_FLAGS keyFlags;
TPM_AUTH_DATA_USAGE authDataUsage;
TPM_KEY_PARMS algorithmParms;
UINT32 PCRInfoSize;
UINT8* PCRInfo;
TPM_STORE_PUBKEY pubKey;
UINT32 encDataSize;
UINT8* encData;
} TPM_KEY;
///
/// Part 2, section 10.3: TPM_KEY12
/// [size_is(encDataSize)] BYTE* encData;
///
typedef struct tdTPM_KEY12 {
TPM_STRUCTURE_TAG tag;
UINT16 fill;
TPM_KEY_USAGE keyUsage;
TPM_KEY_FLAGS keyFlags;
TPM_AUTH_DATA_USAGE authDataUsage;
TPM_KEY_PARMS algorithmParms;
UINT32 PCRInfoSize;
UINT8* PCRInfo;
TPM_STORE_PUBKEY pubKey;
UINT32 encDataSize;
UINT8* encData;
} TPM_KEY12;
///
/// Part 2, section 10.7: TPM_STORE_PRIVKEY
/// [size_is(keyLength)] BYTE* key;
///
typedef struct tdTPM_STORE_PRIVKEY {
UINT32 keyLength;
UINT8* key;
} TPM_STORE_PRIVKEY;
///
/// Part 2, section 10.6: TPM_STORE_ASYMKEY
///
typedef struct tdTPM_STORE_ASYMKEY {
// pos len total
TPM_PAYLOAD_TYPE payload; // 0 1 1
TPM_SECRET usageAuth; // 1 20 21
TPM_SECRET migrationAuth; // 21 20 41
TPM_DIGEST pubDataDigest; // 41 20 61
TPM_STORE_PRIVKEY privKey; // 61 132-151 193-214
} TPM_STORE_ASYMKEY;
///
/// Part 2, section 10.8: TPM_MIGRATE_ASYMKEY
/// [size_is(partPrivKeyLen)] BYTE* partPrivKey;
///
typedef struct tdTPM_MIGRATE_ASYMKEY {
// pos len total
TPM_PAYLOAD_TYPE payload; // 0 1 1
TPM_SECRET usageAuth; // 1 20 21
TPM_DIGEST pubDataDigest; // 21 20 41
UINT32 partPrivKeyLen; // 41 4 45
UINT8* partPrivKey; // 45 112-127 157-172
} TPM_MIGRATE_ASYMKEY;
///
/// Part 2, section 10.9: TPM_KEY_CONTROL
///
#define TPM_KEY_CONTROL_OWNER_EVICT ((UINT32)0x00000001)
//
// Part 2, section 11: Signed Structures
//
///
/// Part 2, section 11.1: TPM_CERTIFY_INFO Structure
///
typedef struct tdTPM_CERTIFY_INFO {
TPM_STRUCT_VER version;
TPM_KEY_USAGE keyUsage;
TPM_KEY_FLAGS keyFlags;
TPM_AUTH_DATA_USAGE authDataUsage;
TPM_KEY_PARMS algorithmParms;
TPM_DIGEST pubkeyDigest;
TPM_NONCE data;
BOOLEAN parentPCRStatus;
UINT32 PCRInfoSize;
UINT8* PCRInfo;
} TPM_CERTIFY_INFO;
///
/// Part 2, section 11.2: TPM_CERTIFY_INFO2 Structure
///
typedef struct tdTPM_CERTIFY_INFO2 {
TPM_STRUCTURE_TAG tag;
UINT8 fill;
TPM_PAYLOAD_TYPE payloadType;
TPM_KEY_USAGE keyUsage;
TPM_KEY_FLAGS keyFlags;
TPM_AUTH_DATA_USAGE authDataUsage;
TPM_KEY_PARMS algorithmParms;
TPM_DIGEST pubkeyDigest;
TPM_NONCE data;
BOOLEAN parentPCRStatus;
UINT32 PCRInfoSize;
UINT8* PCRInfo;
UINT32 migrationAuthoritySize;
UINT8* migrationAuthority;
} TPM_CERTIFY_INFO2;
///
/// Part 2, section 11.3 TPM_QUOTE_INFO Structure
///
typedef struct tdTPM_QUOTE_INFO {
TPM_STRUCT_VER version;
UINT8 fixed[4];
TPM_COMPOSITE_HASH digestValue;
TPM_NONCE externalData;
} TPM_QUOTE_INFO;
///
/// Part 2, section 11.4 TPM_QUOTE_INFO2 Structure
///
typedef struct tdTPM_QUOTE_INFO2 {
TPM_STRUCTURE_TAG tag;
UINT8 fixed[4];
TPM_NONCE externalData;
TPM_PCR_INFO_SHORT infoShort;
} TPM_QUOTE_INFO2;
//
// Part 2, section 12: Identity Structures
//
///
/// Part 2, section 12.1 TPM_EK_BLOB
///
typedef struct tdTPM_EK_BLOB {
TPM_STRUCTURE_TAG tag;
TPM_EK_TYPE ekType;
UINT32 blobSize;
UINT8* blob;
} TPM_EK_BLOB;
///
/// Part 2, section 12.2 TPM_EK_BLOB_ACTIVATE
///
typedef struct tdTPM_EK_BLOB_ACTIVATE {
TPM_STRUCTURE_TAG tag;
TPM_SYMMETRIC_KEY sessionKey;
TPM_DIGEST idDigest;
TPM_PCR_INFO_SHORT pcrInfo;
} TPM_EK_BLOB_ACTIVATE;
///
/// Part 2, section 12.3 TPM_EK_BLOB_AUTH
///
typedef struct tdTPM_EK_BLOB_AUTH {
TPM_STRUCTURE_TAG tag;
TPM_SECRET authValue;
} TPM_EK_BLOB_AUTH;
///
/// Part 2, section 12.5 TPM_IDENTITY_CONTENTS
///
typedef struct tdTPM_IDENTITY_CONTENTS {
TPM_STRUCT_VER ver;
UINT32 ordinal;
TPM_CHOSENID_HASH labelPrivCADigest;
TPM_PUBKEY identityPubKey;
} TPM_IDENTITY_CONTENTS;
///
/// Part 2, section 12.6 TPM_IDENTITY_REQ
///
typedef struct tdTPM_IDENTITY_REQ {
UINT32 asymSize;
UINT32 symSize;
TPM_KEY_PARMS asymAlgorithm;
TPM_KEY_PARMS symAlgorithm;
UINT8* asymBlob;
UINT8* symBlob;
} TPM_IDENTITY_REQ;
///
/// Part 2, section 12.7 TPM_IDENTITY_PROOF
///
typedef struct tdTPM_IDENTITY_PROOF {
TPM_STRUCT_VER ver;
UINT32 labelSize;
UINT32 identityBindingSize;
UINT32 endorsementSize;
UINT32 platformSize;
UINT32 conformanceSize;
TPM_PUBKEY identityKey;
UINT8* labelArea;
UINT8* identityBinding;
UINT8* endorsementCredential;
UINT8* platformCredential;
UINT8* conformanceCredential;
} TPM_IDENTITY_PROOF;
///
/// Part 2, section 12.8 TPM_ASYM_CA_CONTENTS
///
typedef struct tdTPM_ASYM_CA_CONTENTS {
TPM_SYMMETRIC_KEY sessionKey;
TPM_DIGEST idDigest;
} TPM_ASYM_CA_CONTENTS;
///
/// Part 2, section 12.9 TPM_SYM_CA_ATTESTATION
///
typedef struct tdTPM_SYM_CA_ATTESTATION {
UINT32 credSize;
TPM_KEY_PARMS algorithm;
UINT8* credential;
} TPM_SYM_CA_ATTESTATION;
///
/// Part 2, section 15: Tick Structures
/// Placed here out of order because definitions are used in section 13.
///
typedef struct tdTPM_CURRENT_TICKS {
TPM_STRUCTURE_TAG tag;
UINT64 currentTicks;
UINT16 tickRate;
TPM_NONCE tickNonce;
} TPM_CURRENT_TICKS;
///
/// Part 2, section 13: Transport structures
///
///
/// Part 2, section 13.1: TPM _TRANSPORT_PUBLIC
///
typedef struct tdTPM_TRANSPORT_PUBLIC {
TPM_STRUCTURE_TAG tag;
TPM_TRANSPORT_ATTRIBUTES transAttributes;
TPM_ALGORITHM_ID algId;
TPM_ENC_SCHEME encScheme;
} TPM_TRANSPORT_PUBLIC;
//
// Part 2, section 13.1.1 TPM_TRANSPORT_ATTRIBUTES Definitions
//
#define TPM_TRANSPORT_ENCRYPT ((UINT32)BIT0)
#define TPM_TRANSPORT_LOG ((UINT32)BIT1)
#define TPM_TRANSPORT_EXCLUSIVE ((UINT32)BIT2)
///
/// Part 2, section 13.2 TPM_TRANSPORT_INTERNAL
///
typedef struct tdTPM_TRANSPORT_INTERNAL {
TPM_STRUCTURE_TAG tag;
TPM_AUTHDATA authData;
TPM_TRANSPORT_PUBLIC transPublic;
TPM_TRANSHANDLE transHandle;
TPM_NONCE transNonceEven;
TPM_DIGEST transDigest;
} TPM_TRANSPORT_INTERNAL;
///
/// Part 2, section 13.3 TPM_TRANSPORT_LOG_IN structure
///
typedef struct tdTPM_TRANSPORT_LOG_IN {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST parameters;
TPM_DIGEST pubKeyHash;
} TPM_TRANSPORT_LOG_IN;
///
/// Part 2, section 13.4 TPM_TRANSPORT_LOG_OUT structure
///
typedef struct tdTPM_TRANSPORT_LOG_OUT {
TPM_STRUCTURE_TAG tag;
TPM_CURRENT_TICKS currentTicks;
TPM_DIGEST parameters;
TPM_MODIFIER_INDICATOR locality;
} TPM_TRANSPORT_LOG_OUT;
///
/// Part 2, section 13.5 TPM_TRANSPORT_AUTH structure
///
typedef struct tdTPM_TRANSPORT_AUTH {
TPM_STRUCTURE_TAG tag;
TPM_AUTHDATA authData;
} TPM_TRANSPORT_AUTH;
//
// Part 2, section 14: Audit Structures
//
///
/// Part 2, section 14.1 TPM_AUDIT_EVENT_IN structure
///
typedef struct tdTPM_AUDIT_EVENT_IN {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST inputParms;
TPM_COUNTER_VALUE auditCount;
} TPM_AUDIT_EVENT_IN;
///
/// Part 2, section 14.2 TPM_AUDIT_EVENT_OUT structure
///
typedef struct tdTPM_AUDIT_EVENT_OUT {
TPM_STRUCTURE_TAG tag;
TPM_COMMAND_CODE ordinal;
TPM_DIGEST outputParms;
TPM_COUNTER_VALUE auditCount;
TPM_RESULT returnCode;
} TPM_AUDIT_EVENT_OUT;
//
// Part 2, section 16: Return Codes
//
#define TPM_VENDOR_ERROR TPM_Vendor_Specific32
#define TPM_NON_FATAL 0x00000800
#define TPM_SUCCESS ((TPM_RESULT)TPM_BASE)
#define TPM_AUTHFAIL ((TPM_RESULT)(TPM_BASE + 1))
#define TPM_BADINDEX ((TPM_RESULT)(TPM_BASE + 2))
#define TPM_BAD_PARAMETER ((TPM_RESULT)(TPM_BASE + 3))
#define TPM_AUDITFAILURE ((TPM_RESULT)(TPM_BASE + 4))
#define TPM_CLEAR_DISABLED ((TPM_RESULT)(TPM_BASE + 5))
#define TPM_DEACTIVATED ((TPM_RESULT)(TPM_BASE + 6))
#define TPM_DISABLED ((TPM_RESULT)(TPM_BASE + 7))
#define TPM_DISABLED_CMD ((TPM_RESULT)(TPM_BASE + 8))
#define TPM_FAIL ((TPM_RESULT)(TPM_BASE + 9))
#define TPM_BAD_ORDINAL ((TPM_RESULT)(TPM_BASE + 10))
#define TPM_INSTALL_DISABLED ((TPM_RESULT)(TPM_BASE + 11))
#define TPM_INVALID_KEYHANDLE ((TPM_RESULT)(TPM_BASE + 12))
#define TPM_KEYNOTFOUND ((TPM_RESULT)(TPM_BASE + 13))
#define TPM_INAPPROPRIATE_ENC ((TPM_RESULT)(TPM_BASE + 14))
#define TPM_MIGRATEFAIL ((TPM_RESULT)(TPM_BASE + 15))
#define TPM_INVALID_PCR_INFO ((TPM_RESULT)(TPM_BASE + 16))
#define TPM_NOSPACE ((TPM_RESULT)(TPM_BASE + 17))
#define TPM_NOSRK ((TPM_RESULT)(TPM_BASE + 18))
#define TPM_NOTSEALED_BLOB ((TPM_RESULT)(TPM_BASE + 19))
#define TPM_OWNER_SET ((TPM_RESULT)(TPM_BASE + 20))
#define TPM_RESOURCES ((TPM_RESULT)(TPM_BASE + 21))
#define TPM_SHORTRANDOM ((TPM_RESULT)(TPM_BASE + 22))
#define TPM_SIZE ((TPM_RESULT)(TPM_BASE + 23))
#define TPM_WRONGPCRVAL ((TPM_RESULT)(TPM_BASE + 24))
#define TPM_BAD_PARAM_SIZE ((TPM_RESULT)(TPM_BASE + 25))
#define TPM_SHA_THREAD ((TPM_RESULT)(TPM_BASE + 26))
#define TPM_SHA_ERROR ((TPM_RESULT)(TPM_BASE + 27))
#define TPM_FAILEDSELFTEST ((TPM_RESULT)(TPM_BASE + 28))
#define TPM_AUTH2FAIL ((TPM_RESULT)(TPM_BASE + 29))
#define TPM_BADTAG ((TPM_RESULT)(TPM_BASE + 30))
#define TPM_IOERROR ((TPM_RESULT)(TPM_BASE + 31))
#define TPM_ENCRYPT_ERROR ((TPM_RESULT)(TPM_BASE + 32))
#define TPM_DECRYPT_ERROR ((TPM_RESULT)(TPM_BASE + 33))
#define TPM_INVALID_AUTHHANDLE ((TPM_RESULT)(TPM_BASE + 34))
#define TPM_NO_ENDORSEMENT ((TPM_RESULT)(TPM_BASE + 35))
#define TPM_INVALID_KEYUSAGE ((TPM_RESULT)(TPM_BASE + 36))
#define TPM_WRONG_ENTITYTYPE ((TPM_RESULT)(TPM_BASE + 37))
#define TPM_INVALID_POSTINIT ((TPM_RESULT)(TPM_BASE + 38))
#define TPM_INAPPROPRIATE_SIG ((TPM_RESULT)(TPM_BASE + 39))
#define TPM_BAD_KEY_PROPERTY ((TPM_RESULT)(TPM_BASE + 40))
#define TPM_BAD_MIGRATION ((TPM_RESULT)(TPM_BASE + 41))
#define TPM_BAD_SCHEME ((TPM_RESULT)(TPM_BASE + 42))
#define TPM_BAD_DATASIZE ((TPM_RESULT)(TPM_BASE + 43))
#define TPM_BAD_MODE ((TPM_RESULT)(TPM_BASE + 44))
#define TPM_BAD_PRESENCE ((TPM_RESULT)(TPM_BASE + 45))
#define TPM_BAD_VERSION ((TPM_RESULT)(TPM_BASE + 46))
#define TPM_NO_WRAP_TRANSPORT ((TPM_RESULT)(TPM_BASE + 47))
#define TPM_AUDITFAIL_UNSUCCESSFUL ((TPM_RESULT)(TPM_BASE + 48))
#define TPM_AUDITFAIL_SUCCESSFUL ((TPM_RESULT)(TPM_BASE + 49))
#define TPM_NOTRESETABLE ((TPM_RESULT)(TPM_BASE + 50))
#define TPM_NOTLOCAL ((TPM_RESULT)(TPM_BASE + 51))
#define TPM_BAD_TYPE ((TPM_RESULT)(TPM_BASE + 52))
#define TPM_INVALID_RESOURCE ((TPM_RESULT)(TPM_BASE + 53))
#define TPM_NOTFIPS ((TPM_RESULT)(TPM_BASE + 54))
#define TPM_INVALID_FAMILY ((TPM_RESULT)(TPM_BASE + 55))
#define TPM_NO_NV_PERMISSION ((TPM_RESULT)(TPM_BASE + 56))
#define TPM_REQUIRES_SIGN ((TPM_RESULT)(TPM_BASE + 57))
#define TPM_KEY_NOTSUPPORTED ((TPM_RESULT)(TPM_BASE + 58))
#define TPM_AUTH_CONFLICT ((TPM_RESULT)(TPM_BASE + 59))
#define TPM_AREA_LOCKED ((TPM_RESULT)(TPM_BASE + 60))
#define TPM_BAD_LOCALITY ((TPM_RESULT)(TPM_BASE + 61))
#define TPM_READ_ONLY ((TPM_RESULT)(TPM_BASE + 62))
#define TPM_PER_NOWRITE ((TPM_RESULT)(TPM_BASE + 63))
#define TPM_FAMILYCOUNT ((TPM_RESULT)(TPM_BASE + 64))
#define TPM_WRITE_LOCKED ((TPM_RESULT)(TPM_BASE + 65))
#define TPM_BAD_ATTRIBUTES ((TPM_RESULT)(TPM_BASE + 66))
#define TPM_INVALID_STRUCTURE ((TPM_RESULT)(TPM_BASE + 67))
#define TPM_KEY_OWNER_CONTROL ((TPM_RESULT)(TPM_BASE + 68))
#define TPM_BAD_COUNTER ((TPM_RESULT)(TPM_BASE + 69))
#define TPM_NOT_FULLWRITE ((TPM_RESULT)(TPM_BASE + 70))
#define TPM_CONTEXT_GAP ((TPM_RESULT)(TPM_BASE + 71))
#define TPM_MAXNVWRITES ((TPM_RESULT)(TPM_BASE + 72))
#define TPM_NOOPERATOR ((TPM_RESULT)(TPM_BASE + 73))
#define TPM_RESOURCEMISSING ((TPM_RESULT)(TPM_BASE + 74))
#define TPM_DELEGATE_LOCK ((TPM_RESULT)(TPM_BASE + 75))
#define TPM_DELEGATE_FAMILY ((TPM_RESULT)(TPM_BASE + 76))
#define TPM_DELEGATE_ADMIN ((TPM_RESULT)(TPM_BASE + 77))
#define TPM_TRANSPORT_NOTEXCLUSIVE ((TPM_RESULT)(TPM_BASE + 78))
#define TPM_OWNER_CONTROL ((TPM_RESULT)(TPM_BASE + 79))
#define TPM_DAA_RESOURCES ((TPM_RESULT)(TPM_BASE + 80))
#define TPM_DAA_INPUT_DATA0 ((TPM_RESULT)(TPM_BASE + 81))
#define TPM_DAA_INPUT_DATA1 ((TPM_RESULT)(TPM_BASE + 82))
#define TPM_DAA_ISSUER_SETTINGS ((TPM_RESULT)(TPM_BASE + 83))
#define TPM_DAA_TPM_SETTINGS ((TPM_RESULT)(TPM_BASE + 84))
#define TPM_DAA_STAGE ((TPM_RESULT)(TPM_BASE + 85))
#define TPM_DAA_ISSUER_VALIDITY ((TPM_RESULT)(TPM_BASE + 86))
#define TPM_DAA_WRONG_W ((TPM_RESULT)(TPM_BASE + 87))
#define TPM_BAD_HANDLE ((TPM_RESULT)(TPM_BASE + 88))
#define TPM_BAD_DELEGATE ((TPM_RESULT)(TPM_BASE + 89))
#define TPM_BADCONTEXT ((TPM_RESULT)(TPM_BASE + 90))
#define TPM_TOOMANYCONTEXTS ((TPM_RESULT)(TPM_BASE + 91))
#define TPM_MA_TICKET_SIGNATURE ((TPM_RESULT)(TPM_BASE + 92))
#define TPM_MA_DESTINATION ((TPM_RESULT)(TPM_BASE + 93))
#define TPM_MA_SOURCE ((TPM_RESULT)(TPM_BASE + 94))
#define TPM_MA_AUTHORITY ((TPM_RESULT)(TPM_BASE + 95))
#define TPM_PERMANENTEK ((TPM_RESULT)(TPM_BASE + 97))
#define TPM_BAD_SIGNATURE ((TPM_RESULT)(TPM_BASE + 98))
#define TPM_NOCONTEXTSPACE ((TPM_RESULT)(TPM_BASE + 99))
#define TPM_RETRY ((TPM_RESULT)(TPM_BASE + TPM_NON_FATAL))
#define TPM_NEEDS_SELFTEST ((TPM_RESULT)(TPM_BASE + TPM_NON_FATAL + 1))
#define TPM_DOING_SELFTEST ((TPM_RESULT)(TPM_BASE + TPM_NON_FATAL + 2))
#define TPM_DEFEND_LOCK_RUNNING ((TPM_RESULT)(TPM_BASE + TPM_NON_FATAL + 3))
//
// Part 2, section 17: Ordinals
//
// Ordinals are 32 bit values. The upper byte contains values that serve as
// flag indicators, the next byte contains values indicating what committee
// designated the ordinal, and the final two bytes contain the Command
// Ordinal Index.
// 3 2 1
// 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0 9 8 7 6 5 4 3 2 1 0
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
// |P|C|V| Reserved| Purview | Command Ordinal Index |
// +-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+-+
//
// Where:
//
// * P is Protected/Unprotected command. When 0 the command is a Protected
// command, when 1 the command is an Unprotected command.
//
// * C is Non-Connection/Connection related command. When 0 this command
// passes through to either the protected (TPM) or unprotected (TSS)
// components.
//
// * V is TPM/Vendor command. When 0 the command is TPM defined, when 1 the
// command is vendor defined.
//
// * All reserved area bits are set to 0.
//
#define TPM_ORD_ActivateIdentity ((TPM_COMMAND_CODE)0x0000007A)
#define TPM_ORD_AuthorizeMigrationKey ((TPM_COMMAND_CODE)0x0000002B)
#define TPM_ORD_CertifyKey ((TPM_COMMAND_CODE)0x00000032)
#define TPM_ORD_CertifyKey2 ((TPM_COMMAND_CODE)0x00000033)
#define TPM_ORD_CertifySelfTest ((TPM_COMMAND_CODE)0x00000052)
#define TPM_ORD_ChangeAuth ((TPM_COMMAND_CODE)0x0000000C)
#define TPM_ORD_ChangeAuthAsymFinish ((TPM_COMMAND_CODE)0x0000000F)
#define TPM_ORD_ChangeAuthAsymStart ((TPM_COMMAND_CODE)0x0000000E)
#define TPM_ORD_ChangeAuthOwner ((TPM_COMMAND_CODE)0x00000010)
#define TPM_ORD_CMK_ApproveMA ((TPM_COMMAND_CODE)0x0000001D)
#define TPM_ORD_CMK_ConvertMigration ((TPM_COMMAND_CODE)0x00000024)
#define TPM_ORD_CMK_CreateBlob ((TPM_COMMAND_CODE)0x0000001B)
#define TPM_ORD_CMK_CreateKey ((TPM_COMMAND_CODE)0x00000013)
#define TPM_ORD_CMK_CreateTicket ((TPM_COMMAND_CODE)0x00000012)
#define TPM_ORD_CMK_SetRestrictions ((TPM_COMMAND_CODE)0x0000001C)
#define TPM_ORD_ContinueSelfTest ((TPM_COMMAND_CODE)0x00000053)
#define TPM_ORD_ConvertMigrationBlob ((TPM_COMMAND_CODE)0x0000002A)
#define TPM_ORD_CreateCounter ((TPM_COMMAND_CODE)0x000000DC)
#define TPM_ORD_CreateEndorsementKeyPair ((TPM_COMMAND_CODE)0x00000078)
#define TPM_ORD_CreateMaintenanceArchive ((TPM_COMMAND_CODE)0x0000002C)
#define TPM_ORD_CreateMigrationBlob ((TPM_COMMAND_CODE)0x00000028)
#define TPM_ORD_CreateRevocableEK ((TPM_COMMAND_CODE)0x0000007F)
#define TPM_ORD_CreateWrapKey ((TPM_COMMAND_CODE)0x0000001F)
#define TPM_ORD_DAA_JOIN ((TPM_COMMAND_CODE)0x00000029)
#define TPM_ORD_DAA_SIGN ((TPM_COMMAND_CODE)0x00000031)
#define TPM_ORD_Delegate_CreateKeyDelegation ((TPM_COMMAND_CODE)0x000000D4)
#define TPM_ORD_Delegate_CreateOwnerDelegation ((TPM_COMMAND_CODE)0x000000D5)
#define TPM_ORD_Delegate_LoadOwnerDelegation ((TPM_COMMAND_CODE)0x000000D8)
#define TPM_ORD_Delegate_Manage ((TPM_COMMAND_CODE)0x000000D2)
#define TPM_ORD_Delegate_ReadTable ((TPM_COMMAND_CODE)0x000000DB)
#define TPM_ORD_Delegate_UpdateVerification ((TPM_COMMAND_CODE)0x000000D1)
#define TPM_ORD_Delegate_VerifyDelegation ((TPM_COMMAND_CODE)0x000000D6)
#define TPM_ORD_DirRead ((TPM_COMMAND_CODE)0x0000001A)
#define TPM_ORD_DirWriteAuth ((TPM_COMMAND_CODE)0x00000019)
#define TPM_ORD_DisableForceClear ((TPM_COMMAND_CODE)0x0000005E)
#define TPM_ORD_DisableOwnerClear ((TPM_COMMAND_CODE)0x0000005C)
#define TPM_ORD_DisablePubekRead ((TPM_COMMAND_CODE)0x0000007E)
#define TPM_ORD_DSAP ((TPM_COMMAND_CODE)0x00000011)
#define TPM_ORD_EstablishTransport ((TPM_COMMAND_CODE)0x000000E6)
#define TPM_ORD_EvictKey ((TPM_COMMAND_CODE)0x00000022)
#define TPM_ORD_ExecuteTransport ((TPM_COMMAND_CODE)0x000000E7)
#define TPM_ORD_Extend ((TPM_COMMAND_CODE)0x00000014)
#define TPM_ORD_FieldUpgrade ((TPM_COMMAND_CODE)0x000000AA)
#define TPM_ORD_FlushSpecific ((TPM_COMMAND_CODE)0x000000BA)
#define TPM_ORD_ForceClear ((TPM_COMMAND_CODE)0x0000005D)
#define TPM_ORD_GetAuditDigest ((TPM_COMMAND_CODE)0x00000085)
#define TPM_ORD_GetAuditDigestSigned ((TPM_COMMAND_CODE)0x00000086)
#define TPM_ORD_GetAuditEvent ((TPM_COMMAND_CODE)0x00000082)
#define TPM_ORD_GetAuditEventSigned ((TPM_COMMAND_CODE)0x00000083)
#define TPM_ORD_GetCapability ((TPM_COMMAND_CODE)0x00000065)
#define TPM_ORD_GetCapabilityOwner ((TPM_COMMAND_CODE)0x00000066)
#define TPM_ORD_GetCapabilitySigned ((TPM_COMMAND_CODE)0x00000064)
#define TPM_ORD_GetOrdinalAuditStatus ((TPM_COMMAND_CODE)0x0000008C)
#define TPM_ORD_GetPubKey ((TPM_COMMAND_CODE)0x00000021)
#define TPM_ORD_GetRandom ((TPM_COMMAND_CODE)0x00000046)
#define TPM_ORD_GetTestResult ((TPM_COMMAND_CODE)0x00000054)
#define TPM_ORD_GetTicks ((TPM_COMMAND_CODE)0x000000F1)
#define TPM_ORD_IncrementCounter ((TPM_COMMAND_CODE)0x000000DD)
#define TPM_ORD_Init ((TPM_COMMAND_CODE)0x00000097)
#define TPM_ORD_KeyControlOwner ((TPM_COMMAND_CODE)0x00000023)
#define TPM_ORD_KillMaintenanceFeature ((TPM_COMMAND_CODE)0x0000002E)
#define TPM_ORD_LoadAuthContext ((TPM_COMMAND_CODE)0x000000B7)
#define TPM_ORD_LoadContext ((TPM_COMMAND_CODE)0x000000B9)
#define TPM_ORD_LoadKey ((TPM_COMMAND_CODE)0x00000020)
#define TPM_ORD_LoadKey2 ((TPM_COMMAND_CODE)0x00000041)
#define TPM_ORD_LoadKeyContext ((TPM_COMMAND_CODE)0x000000B5)
#define TPM_ORD_LoadMaintenanceArchive ((TPM_COMMAND_CODE)0x0000002D)
#define TPM_ORD_LoadManuMaintPub ((TPM_COMMAND_CODE)0x0000002F)
#define TPM_ORD_MakeIdentity ((TPM_COMMAND_CODE)0x00000079)
#define TPM_ORD_MigrateKey ((TPM_COMMAND_CODE)0x00000025)
#define TPM_ORD_NV_DefineSpace ((TPM_COMMAND_CODE)0x000000CC)
#define TPM_ORD_NV_ReadValue ((TPM_COMMAND_CODE)0x000000CF)
#define TPM_ORD_NV_ReadValueAuth ((TPM_COMMAND_CODE)0x000000D0)
#define TPM_ORD_NV_WriteValue ((TPM_COMMAND_CODE)0x000000CD)
#define TPM_ORD_NV_WriteValueAuth ((TPM_COMMAND_CODE)0x000000CE)
#define TPM_ORD_OIAP ((TPM_COMMAND_CODE)0x0000000A)
#define TPM_ORD_OSAP ((TPM_COMMAND_CODE)0x0000000B)
#define TPM_ORD_OwnerClear ((TPM_COMMAND_CODE)0x0000005B)
#define TPM_ORD_OwnerReadInternalPub ((TPM_COMMAND_CODE)0x00000081)
#define TPM_ORD_OwnerReadPubek ((TPM_COMMAND_CODE)0x0000007D)
#define TPM_ORD_OwnerSetDisable ((TPM_COMMAND_CODE)0x0000006E)
#define TPM_ORD_PCR_Reset ((TPM_COMMAND_CODE)0x000000C8)
#define TPM_ORD_PcrRead ((TPM_COMMAND_CODE)0x00000015)
#define TPM_ORD_PhysicalDisable ((TPM_COMMAND_CODE)0x00000070)
#define TPM_ORD_PhysicalEnable ((TPM_COMMAND_CODE)0x0000006F)
#define TPM_ORD_PhysicalSetDeactivated ((TPM_COMMAND_CODE)0x00000072)
#define TPM_ORD_Quote ((TPM_COMMAND_CODE)0x00000016)
#define TPM_ORD_Quote2 ((TPM_COMMAND_CODE)0x0000003E)
#define TPM_ORD_ReadCounter ((TPM_COMMAND_CODE)0x000000DE)
#define TPM_ORD_ReadManuMaintPub ((TPM_COMMAND_CODE)0x00000030)
#define TPM_ORD_ReadPubek ((TPM_COMMAND_CODE)0x0000007C)
#define TPM_ORD_ReleaseCounter ((TPM_COMMAND_CODE)0x000000DF)
#define TPM_ORD_ReleaseCounterOwner ((TPM_COMMAND_CODE)0x000000E0)
#define TPM_ORD_ReleaseTransportSigned ((TPM_COMMAND_CODE)0x000000E8)
#define TPM_ORD_Reset ((TPM_COMMAND_CODE)0x0000005A)
#define TPM_ORD_ResetLockValue ((TPM_COMMAND_CODE)0x00000040)
#define TPM_ORD_RevokeTrust ((TPM_COMMAND_CODE)0x00000080)
#define TPM_ORD_SaveAuthContext ((TPM_COMMAND_CODE)0x000000B6)
#define TPM_ORD_SaveContext ((TPM_COMMAND_CODE)0x000000B8)
#define TPM_ORD_SaveKeyContext ((TPM_COMMAND_CODE)0x000000B4)
#define TPM_ORD_SaveState ((TPM_COMMAND_CODE)0x00000098)
#define TPM_ORD_Seal ((TPM_COMMAND_CODE)0x00000017)
#define TPM_ORD_Sealx ((TPM_COMMAND_CODE)0x0000003D)
#define TPM_ORD_SelfTestFull ((TPM_COMMAND_CODE)0x00000050)
#define TPM_ORD_SetCapability ((TPM_COMMAND_CODE)0x0000003F)
#define TPM_ORD_SetOperatorAuth ((TPM_COMMAND_CODE)0x00000074)
#define TPM_ORD_SetOrdinalAuditStatus ((TPM_COMMAND_CODE)0x0000008D)
#define TPM_ORD_SetOwnerInstall ((TPM_COMMAND_CODE)0x00000071)
#define TPM_ORD_SetOwnerPointer ((TPM_COMMAND_CODE)0x00000075)
#define TPM_ORD_SetRedirection ((TPM_COMMAND_CODE)0x0000009A)
#define TPM_ORD_SetTempDeactivated ((TPM_COMMAND_CODE)0x00000073)
#define TPM_ORD_SHA1Complete ((TPM_COMMAND_CODE)0x000000A2)
#define TPM_ORD_SHA1CompleteExtend ((TPM_COMMAND_CODE)0x000000A3)
#define TPM_ORD_SHA1Start ((TPM_COMMAND_CODE)0x000000A0)
#define TPM_ORD_SHA1Update ((TPM_COMMAND_CODE)0x000000A1)
#define TPM_ORD_Sign ((TPM_COMMAND_CODE)0x0000003C)
#define TPM_ORD_Startup ((TPM_COMMAND_CODE)0x00000099)
#define TPM_ORD_StirRandom ((TPM_COMMAND_CODE)0x00000047)
#define TPM_ORD_TakeOwnership ((TPM_COMMAND_CODE)0x0000000D)
#define TPM_ORD_Terminate_Handle ((TPM_COMMAND_CODE)0x00000096)
#define TPM_ORD_TickStampBlob ((TPM_COMMAND_CODE)0x000000F2)
#define TPM_ORD_UnBind ((TPM_COMMAND_CODE)0x0000001E)
#define TPM_ORD_Unseal ((TPM_COMMAND_CODE)0x00000018)
#define TSC_ORD_PhysicalPresence ((TPM_COMMAND_CODE)0x4000000A)
#define TSC_ORD_ResetEstablishmentBit ((TPM_COMMAND_CODE)0x4000000B)
//
// Part 2, section 18: Context structures
//
///
/// Part 2, section 18.1: TPM_CONTEXT_BLOB
///
typedef struct tdTPM_CONTEXT_BLOB {
TPM_STRUCTURE_TAG tag;
TPM_RESOURCE_TYPE resourceType;
TPM_HANDLE handle;
UINT8 label[16];
UINT32 contextCount;
TPM_DIGEST integrityDigest;
UINT32 additionalSize;
UINT8* additionalData;
UINT32 sensitiveSize;
UINT8* sensitiveData;
} TPM_CONTEXT_BLOB;
///
/// Part 2, section 18.2 TPM_CONTEXT_SENSITIVE
///
typedef struct tdTPM_CONTEXT_SENSITIVE {
TPM_STRUCTURE_TAG tag;
TPM_NONCE contextNonce;
UINT32 internalSize;
UINT8* internalData;
} TPM_CONTEXT_SENSITIVE;
//
// Part 2, section 19: NV Structures
//
//
// Part 2, section 19.1.1: Required TPM_NV_INDEX values
//
#define TPM_NV_INDEX_LOCK ((UINT32)0xffffffff)
#define TPM_NV_INDEX0 ((UINT32)0x00000000)
#define TPM_NV_INDEX_DIR ((UINT32)0x10000001)
#define TPM_NV_INDEX_EKCert ((UINT32)0x0000f000)
#define TPM_NV_INDEX_TPM_CC ((UINT32)0x0000f001)
#define TPM_NV_INDEX_PlatformCert ((UINT32)0x0000f002)
#define TPM_NV_INDEX_Platform_CC ((UINT32)0x0000f003)
//
// Part 2, section 19.1.2: Reserved Index values
//
#define TPM_NV_INDEX_TSS_BASE ((UINT32)0x00011100)
#define TPM_NV_INDEX_PC_BASE ((UINT32)0x00011200)
#define TPM_NV_INDEX_SERVER_BASE ((UINT32)0x00011300)
#define TPM_NV_INDEX_MOBILE_BASE ((UINT32)0x00011400)
#define TPM_NV_INDEX_PERIPHERAL_BASE ((UINT32)0x00011500)
#define TPM_NV_INDEX_GROUP_RESV_BASE ((UINT32)0x00010000)
///
/// Part 2, section 19.2: TPM_NV_ATTRIBUTES
///
typedef struct tdTPM_NV_ATTRIBUTES {
TPM_STRUCTURE_TAG tag;
UINT32 attributes;
} TPM_NV_ATTRIBUTES;
#define TPM_NV_PER_READ_STCLEAR (BIT31)
#define TPM_NV_PER_AUTHREAD (BIT18)
#define TPM_NV_PER_OWNERREAD (BIT17)
#define TPM_NV_PER_PPREAD (BIT16)
#define TPM_NV_PER_GLOBALLOCK (BIT15)
#define TPM_NV_PER_WRITE_STCLEAR (BIT14)
#define TPM_NV_PER_WRITEDEFINE (BIT13)
#define TPM_NV_PER_WRITEALL (BIT12)
#define TPM_NV_PER_AUTHWRITE (BIT2)
#define TPM_NV_PER_OWNERWRITE (BIT1)
#define TPM_NV_PER_PPWRITE (BIT0)
///
/// Part 2, section 19.3: TPM_NV_DATA_PUBLIC
///
typedef struct tdTPM_NV_DATA_PUBLIC {
TPM_STRUCTURE_TAG tag;
TPM_NV_INDEX nvIndex;
TPM_PCR_INFO_SHORT pcrInfoRead;
TPM_PCR_INFO_SHORT pcrInfoWrite;
TPM_NV_ATTRIBUTES permission;
BOOLEAN bReadSTClear;
BOOLEAN bWriteSTClear;
BOOLEAN bWriteDefine;
UINT32 dataSize;
} TPM_NV_DATA_PUBLIC;
//
// Part 2, section 20: Delegate Structures
//
#define TPM_DEL_OWNER_BITS ((UINT32)0x00000001)
#define TPM_DEL_KEY_BITS ((UINT32)0x00000002)
///
/// Part 2, section 20.2: Delegate Definitions
///
typedef struct tdTPM_DELEGATIONS {
TPM_STRUCTURE_TAG tag;
UINT32 delegateType;
UINT32 per1;
UINT32 per2;
} TPM_DELEGATIONS;
//
// Part 2, section 20.2.1: Owner Permission Settings
//
#define TPM_DELEGATE_SetOrdinalAuditStatus (BIT30)
#define TPM_DELEGATE_DirWriteAuth (BIT29)
#define TPM_DELEGATE_CMK_ApproveMA (BIT28)
#define TPM_DELEGATE_NV_WriteValue (BIT27)
#define TPM_DELEGATE_CMK_CreateTicket (BIT26)
#define TPM_DELEGATE_NV_ReadValue (BIT25)
#define TPM_DELEGATE_Delegate_LoadOwnerDelegation (BIT24)
#define TPM_DELEGATE_DAA_Join (BIT23)
#define TPM_DELEGATE_AuthorizeMigrationKey (BIT22)
#define TPM_DELEGATE_CreateMaintenanceArchive (BIT21)
#define TPM_DELEGATE_LoadMaintenanceArchive (BIT20)
#define TPM_DELEGATE_KillMaintenanceFeature (BIT19)
#define TPM_DELEGATE_OwnerReadInteralPub (BIT18)
#define TPM_DELEGATE_ResetLockValue (BIT17)
#define TPM_DELEGATE_OwnerClear (BIT16)
#define TPM_DELEGATE_DisableOwnerClear (BIT15)
#define TPM_DELEGATE_NV_DefineSpace (BIT14)
#define TPM_DELEGATE_OwnerSetDisable (BIT13)
#define TPM_DELEGATE_SetCapability (BIT12)
#define TPM_DELEGATE_MakeIdentity (BIT11)
#define TPM_DELEGATE_ActivateIdentity (BIT10)
#define TPM_DELEGATE_OwnerReadPubek (BIT9)
#define TPM_DELEGATE_DisablePubekRead (BIT8)
#define TPM_DELEGATE_SetRedirection (BIT7)
#define TPM_DELEGATE_FieldUpgrade (BIT6)
#define TPM_DELEGATE_Delegate_UpdateVerification (BIT5)
#define TPM_DELEGATE_CreateCounter (BIT4)
#define TPM_DELEGATE_ReleaseCounterOwner (BIT3)
#define TPM_DELEGATE_DelegateManage (BIT2)
#define TPM_DELEGATE_Delegate_CreateOwnerDelegation (BIT1)
#define TPM_DELEGATE_DAA_Sign (BIT0)
//
// Part 2, section 20.2.3: Key Permission settings
//
#define TPM_KEY_DELEGATE_CMK_ConvertMigration (BIT28)
#define TPM_KEY_DELEGATE_TickStampBlob (BIT27)
#define TPM_KEY_DELEGATE_ChangeAuthAsymStart (BIT26)
#define TPM_KEY_DELEGATE_ChangeAuthAsymFinish (BIT25)
#define TPM_KEY_DELEGATE_CMK_CreateKey (BIT24)
#define TPM_KEY_DELEGATE_MigrateKey (BIT23)
#define TPM_KEY_DELEGATE_LoadKey2 (BIT22)
#define TPM_KEY_DELEGATE_EstablishTransport (BIT21)
#define TPM_KEY_DELEGATE_ReleaseTransportSigned (BIT20)
#define TPM_KEY_DELEGATE_Quote2 (BIT19)
#define TPM_KEY_DELEGATE_Sealx (BIT18)
#define TPM_KEY_DELEGATE_MakeIdentity (BIT17)
#define TPM_KEY_DELEGATE_ActivateIdentity (BIT16)
#define TPM_KEY_DELEGATE_GetAuditDigestSigned (BIT15)
#define TPM_KEY_DELEGATE_Sign (BIT14)
#define TPM_KEY_DELEGATE_CertifyKey2 (BIT13)
#define TPM_KEY_DELEGATE_CertifyKey (BIT12)
#define TPM_KEY_DELEGATE_CreateWrapKey (BIT11)
#define TPM_KEY_DELEGATE_CMK_CreateBlob (BIT10)
#define TPM_KEY_DELEGATE_CreateMigrationBlob (BIT9)
#define TPM_KEY_DELEGATE_ConvertMigrationBlob (BIT8)
#define TPM_KEY_DELEGATE_CreateKeyDelegation (BIT7)
#define TPM_KEY_DELEGATE_ChangeAuth (BIT6)
#define TPM_KEY_DELEGATE_GetPubKey (BIT5)
#define TPM_KEY_DELEGATE_UnBind (BIT4)
#define TPM_KEY_DELEGATE_Quote (BIT3)
#define TPM_KEY_DELEGATE_Unseal (BIT2)
#define TPM_KEY_DELEGATE_Seal (BIT1)
#define TPM_KEY_DELEGATE_LoadKey (BIT0)
//
// Part 2, section 20.3: TPM_FAMILY_FLAGS
//
#define TPM_DELEGATE_ADMIN_LOCK (BIT1)
#define TPM_FAMFLAG_ENABLE (BIT0)
///
/// Part 2, section 20.4: TPM_FAMILY_LABEL
///
typedef struct tdTPM_FAMILY_LABEL {
UINT8 label;
} TPM_FAMILY_LABEL;
///
/// Part 2, section 20.5: TPM_FAMILY_TABLE_ENTRY
///
typedef struct tdTPM_FAMILY_TABLE_ENTRY {
TPM_STRUCTURE_TAG tag;
TPM_FAMILY_LABEL label;
TPM_FAMILY_ID familyID;
TPM_FAMILY_VERIFICATION verificationCount;
TPM_FAMILY_FLAGS flags;
} TPM_FAMILY_TABLE_ENTRY;
//
// Part 2, section 20.6: TPM_FAMILY_TABLE
//
#define TPM_NUM_FAMILY_TABLE_ENTRY_MIN 8
typedef struct tdTPM_FAMILY_TABLE {
TPM_FAMILY_TABLE_ENTRY famTableRow[TPM_NUM_FAMILY_TABLE_ENTRY_MIN];
} TPM_FAMILY_TABLE;
///
/// Part 2, section 20.7: TPM_DELEGATE_LABEL
///
typedef struct tdTPM_DELEGATE_LABEL {
UINT8 label;
} TPM_DELEGATE_LABEL;
///
/// Part 2, section 20.8: TPM_DELEGATE_PUBLIC
///
typedef struct tdTPM_DELEGATE_PUBLIC {
TPM_STRUCTURE_TAG tag;
TPM_DELEGATE_LABEL label;
TPM_PCR_INFO_SHORT pcrInfo;
TPM_DELEGATIONS permissions;
TPM_FAMILY_ID familyID;
TPM_FAMILY_VERIFICATION verificationCount;
} TPM_DELEGATE_PUBLIC;
///
/// Part 2, section 20.9: TPM_DELEGATE_TABLE_ROW
///
typedef struct tdTPM_DELEGATE_TABLE_ROW {
TPM_STRUCTURE_TAG tag;
TPM_DELEGATE_PUBLIC pub;
TPM_SECRET authValue;
} TPM_DELEGATE_TABLE_ROW;
//
// Part 2, section 20.10: TPM_DELEGATE_TABLE
//
#define TPM_NUM_DELEGATE_TABLE_ENTRY_MIN 2
typedef struct tdTPM_DELEGATE_TABLE {
TPM_DELEGATE_TABLE_ROW delRow[TPM_NUM_DELEGATE_TABLE_ENTRY_MIN];
} TPM_DELEGATE_TABLE;
///
/// Part 2, section 20.11: TPM_DELEGATE_SENSITIVE
///
typedef struct tdTPM_DELEGATE_SENSITIVE {
TPM_STRUCTURE_TAG tag;
TPM_SECRET authValue;
} TPM_DELEGATE_SENSITIVE;
///
/// Part 2, section 20.12: TPM_DELEGATE_OWNER_BLOB
///
typedef struct tdTPM_DELEGATE_OWNER_BLOB {
TPM_STRUCTURE_TAG tag;
TPM_DELEGATE_PUBLIC pub;
TPM_DIGEST integrityDigest;
UINT32 additionalSize;
UINT8* additionalArea;
UINT32 sensitiveSize;
UINT8* sensitiveArea;
} TPM_DELEGATE_OWNER_BLOB;
///
/// Part 2, section 20.13: TTPM_DELEGATE_KEY_BLOB
///
typedef struct tdTPM_DELEGATE_KEY_BLOB {
TPM_STRUCTURE_TAG tag;
TPM_DELEGATE_PUBLIC pub;
TPM_DIGEST integrityDigest;
TPM_DIGEST pubKeyDigest;
UINT32 additionalSize;
UINT8* additionalArea;
UINT32 sensitiveSize;
UINT8* sensitiveArea;
} TPM_DELEGATE_KEY_BLOB;
//
// Part 2, section 20.14: TPM_FAMILY_OPERATION Values
//
#define TPM_FAMILY_CREATE ((UINT32)0x00000001)
#define TPM_FAMILY_ENABLE ((UINT32)0x00000002)
#define TPM_FAMILY_ADMIN ((UINT32)0x00000003)
#define TPM_FAMILY_INVALIDATE ((UINT32)0x00000004)
//
// Part 2, section 21.1: TPM_CAPABILITY_AREA for GetCapability
//
#define TPM_CAP_ORD ((TPM_CAPABILITY_AREA)0x00000001)
#define TPM_CAP_ALG ((TPM_CAPABILITY_AREA)0x00000002)
#define TPM_CAP_PID ((TPM_CAPABILITY_AREA)0x00000003)
#define TPM_CAP_FLAG ((TPM_CAPABILITY_AREA)0x00000004)
#define TPM_CAP_PROPERTY ((TPM_CAPABILITY_AREA)0x00000005)
#define TPM_CAP_VERSION ((TPM_CAPABILITY_AREA)0x00000006)
#define TPM_CAP_KEY_HANDLE ((TPM_CAPABILITY_AREA)0x00000007)
#define TPM_CAP_CHECK_LOADED ((TPM_CAPABILITY_AREA)0x00000008)
#define TPM_CAP_SYM_MODE ((TPM_CAPABILITY_AREA)0x00000009)
#define TPM_CAP_KEY_STATUS ((TPM_CAPABILITY_AREA)0x0000000C)
#define TPM_CAP_NV_LIST ((TPM_CAPABILITY_AREA)0x0000000D)
#define TPM_CAP_MFR ((TPM_CAPABILITY_AREA)0x00000010)
#define TPM_CAP_NV_INDEX ((TPM_CAPABILITY_AREA)0x00000011)
#define TPM_CAP_TRANS_ALG ((TPM_CAPABILITY_AREA)0x00000012)
#define TPM_CAP_HANDLE ((TPM_CAPABILITY_AREA)0x00000014)
#define TPM_CAP_TRANS_ES ((TPM_CAPABILITY_AREA)0x00000015)
#define TPM_CAP_AUTH_ENCRYPT ((TPM_CAPABILITY_AREA)0x00000017)
#define TPM_CAP_SELECT_SIZE ((TPM_CAPABILITY_AREA)0x00000018)
#define TPM_CAP_VERSION_VAL ((TPM_CAPABILITY_AREA)0x0000001A)
#define TPM_CAP_FLAG_PERMANENT ((TPM_CAPABILITY_AREA)0x00000108)
#define TPM_CAP_FLAG_VOLATILE ((TPM_CAPABILITY_AREA)0x00000109)
//
// Part 2, section 21.2: CAP_PROPERTY Subcap values for GetCapability
//
#define TPM_CAP_PROP_PCR ((TPM_CAPABILITY_AREA)0x00000101)
#define TPM_CAP_PROP_DIR ((TPM_CAPABILITY_AREA)0x00000102)
#define TPM_CAP_PROP_MANUFACTURER ((TPM_CAPABILITY_AREA)0x00000103)
#define TPM_CAP_PROP_KEYS ((TPM_CAPABILITY_AREA)0x00000104)
#define TPM_CAP_PROP_MIN_COUNTER ((TPM_CAPABILITY_AREA)0x00000107)
#define TPM_CAP_PROP_AUTHSESS ((TPM_CAPABILITY_AREA)0x0000010A)
#define TPM_CAP_PROP_TRANSESS ((TPM_CAPABILITY_AREA)0x0000010B)
#define TPM_CAP_PROP_COUNTERS ((TPM_CAPABILITY_AREA)0x0000010C)
#define TPM_CAP_PROP_MAX_AUTHSESS ((TPM_CAPABILITY_AREA)0x0000010D)
#define TPM_CAP_PROP_MAX_TRANSESS ((TPM_CAPABILITY_AREA)0x0000010E)
#define TPM_CAP_PROP_MAX_COUNTERS ((TPM_CAPABILITY_AREA)0x0000010F)
#define TPM_CAP_PROP_MAX_KEYS ((TPM_CAPABILITY_AREA)0x00000110)
#define TPM_CAP_PROP_OWNER ((TPM_CAPABILITY_AREA)0x00000111)
#define TPM_CAP_PROP_CONTEXT ((TPM_CAPABILITY_AREA)0x00000112)
#define TPM_CAP_PROP_MAX_CONTEXT ((TPM_CAPABILITY_AREA)0x00000113)
#define TPM_CAP_PROP_FAMILYROWS ((TPM_CAPABILITY_AREA)0x00000114)
#define TPM_CAP_PROP_TIS_TIMEOUT ((TPM_CAPABILITY_AREA)0x00000115)
#define TPM_CAP_PROP_STARTUP_EFFECT ((TPM_CAPABILITY_AREA)0x00000116)
#define TPM_CAP_PROP_DELEGATE_ROW ((TPM_CAPABILITY_AREA)0x00000117)
#define TPM_CAP_PROP_DAA_MAX ((TPM_CAPABILITY_AREA)0x00000119)
#define CAP_PROP_SESSION_DAA ((TPM_CAPABILITY_AREA)0x0000011A)
#define TPM_CAP_PROP_CONTEXT_DIST ((TPM_CAPABILITY_AREA)0x0000011B)
#define TPM_CAP_PROP_DAA_INTERRUPT ((TPM_CAPABILITY_AREA)0x0000011C)
#define TPM_CAP_PROP_SESSIONS ((TPM_CAPABILITY_AREA)0x0000011D)
#define TPM_CAP_PROP_MAX_SESSIONS ((TPM_CAPABILITY_AREA)0x0000011E)
#define TPM_CAP_PROP_CMK_RESTRICTION ((TPM_CAPABILITY_AREA)0x0000011F)
#define TPM_CAP_PROP_DURATION ((TPM_CAPABILITY_AREA)0x00000120)
#define TPM_CAP_PROP_ACTIVE_COUNTER ((TPM_CAPABILITY_AREA)0x00000122)
#define TPM_CAP_PROP_MAX_NV_AVAILABLE ((TPM_CAPABILITY_AREA)0x00000123)
#define TPM_CAP_PROP_INPUT_BUFFER ((TPM_CAPABILITY_AREA)0x00000124)
//
// Part 2, section 21.4: TPM_CAPABILITY_AREA for SetCapability
//
#define TPM_SET_PERM_FLAGS ((TPM_CAPABILITY_AREA)0x00000001)
#define TPM_SET_PERM_DATA ((TPM_CAPABILITY_AREA)0x00000002)
#define TPM_SET_STCLEAR_FLAGS ((TPM_CAPABILITY_AREA)0x00000003)
#define TPM_SET_STCLEAR_DATA ((TPM_CAPABILITY_AREA)0x00000004)
#define TPM_SET_STANY_FLAGS ((TPM_CAPABILITY_AREA)0x00000005)
#define TPM_SET_STANY_DATA ((TPM_CAPABILITY_AREA)0x00000006)
///
/// Part 2, section 21.6: TPM_CAP_VERSION_INFO
/// [size_is(vendorSpecificSize)] BYTE* vendorSpecific;
///
typedef struct tdTPM_CAP_VERSION_INFO {
TPM_STRUCTURE_TAG tag;
TPM_VERSION version;
UINT16 specLevel;
UINT8 errataRev;
UINT8 tpmVendorID[4];
UINT16 vendorSpecificSize;
UINT8* vendorSpecific;
} TPM_CAP_VERSION_INFO;
///
/// Part 2, section 21.10: TPM_DA_ACTION_TYPE
///
typedef struct tdTPM_DA_ACTION_TYPE {
TPM_STRUCTURE_TAG tag;
UINT32 actions;
} TPM_DA_ACTION_TYPE;
#define TPM_DA_ACTION_FAILURE_MODE (((UINT32)1) << 3)
#define TPM_DA_ACTION_DEACTIVATE (((UINT32)1) << 2)
#define TPM_DA_ACTION_DISABLE (((UINT32)1) << 1)
#define TPM_DA_ACTION_TIMEOUT (((UINT32)1) << 0)
///
/// Part 2, section 21.7: TPM_DA_INFO
///
typedef struct tdTPM_DA_INFO {
TPM_STRUCTURE_TAG tag;
TPM_DA_STATE state;
UINT16 currentCount;
UINT16 thresholdCount;
TPM_DA_ACTION_TYPE actionAtThreshold;
UINT32 actionDependValue;
UINT32 vendorDataSize;
UINT8* vendorData;
} TPM_DA_INFO;
///
/// Part 2, section 21.8: TPM_DA_INFO_LIMITED
///
typedef struct tdTPM_DA_INFO_LIMITED {
TPM_STRUCTURE_TAG tag;
TPM_DA_STATE state;
TPM_DA_ACTION_TYPE actionAtThreshold;
UINT32 vendorDataSize;
UINT8* vendorData;
} TPM_DA_INFO_LIMITED;
//
// Part 2, section 21.9: CAP_PROPERTY Subcap values for GetCapability
//
#define TPM_DA_STATE_INACTIVE ((UINT8)0x00)
#define TPM_DA_STATE_ACTIVE ((UINT8)0x01)
//
// Part 2, section 22: DAA Structures
//
//
// Part 2, section 22.1: Size definitions
//
#define TPM_DAA_SIZE_r0 (43)
#define TPM_DAA_SIZE_r1 (43)
#define TPM_DAA_SIZE_r2 (128)
#define TPM_DAA_SIZE_r3 (168)
#define TPM_DAA_SIZE_r4 (219)
#define TPM_DAA_SIZE_NT (20)
#define TPM_DAA_SIZE_v0 (128)
#define TPM_DAA_SIZE_v1 (192)
#define TPM_DAA_SIZE_NE (256)
#define TPM_DAA_SIZE_w (256)
#define TPM_DAA_SIZE_issuerModulus (256)
//
// Part 2, section 22.2: Constant definitions
//
#define TPM_DAA_power0 (104)
#define TPM_DAA_power1 (1024)
///
/// Part 2, section 22.3: TPM_DAA_ISSUER
///
typedef struct tdTPM_DAA_ISSUER {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST DAA_digest_R0;
TPM_DIGEST DAA_digest_R1;
TPM_DIGEST DAA_digest_S0;
TPM_DIGEST DAA_digest_S1;
TPM_DIGEST DAA_digest_n;
TPM_DIGEST DAA_digest_gamma;
UINT8 DAA_generic_q[26];
} TPM_DAA_ISSUER;
///
/// Part 2, section 22.4: TPM_DAA_TPM
///
typedef struct tdTPM_DAA_TPM {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST DAA_digestIssuer;
TPM_DIGEST DAA_digest_v0;
TPM_DIGEST DAA_digest_v1;
TPM_DIGEST DAA_rekey;
UINT32 DAA_count;
} TPM_DAA_TPM;
///
/// Part 2, section 22.5: TPM_DAA_CONTEXT
///
typedef struct tdTPM_DAA_CONTEXT {
TPM_STRUCTURE_TAG tag;
TPM_DIGEST DAA_digestContext;
TPM_DIGEST DAA_digest;
TPM_DAA_CONTEXT_SEED DAA_contextSeed;
UINT8 DAA_scratch[256];
UINT8 DAA_stage;
} TPM_DAA_CONTEXT;
///
/// Part 2, section 22.6: TPM_DAA_JOINDATA
///
typedef struct tdTPM_DAA_JOINDATA {
UINT8 DAA_join_u0[128];
UINT8 DAA_join_u1[138];
TPM_DIGEST DAA_digest_n0;
} TPM_DAA_JOINDATA;
///
/// Part 2, section 22.8: TPM_DAA_BLOB
///
typedef struct tdTPM_DAA_BLOB {
TPM_STRUCTURE_TAG tag;
TPM_RESOURCE_TYPE resourceType;
UINT8 label[16];
TPM_DIGEST blobIntegrity;
UINT32 additionalSize;
UINT8* additionalData;
UINT32 sensitiveSize;
UINT8* sensitiveData;
} TPM_DAA_BLOB;
///
/// Part 2, section 22.9: TPM_DAA_SENSITIVE
///
typedef struct tdTPM_DAA_SENSITIVE {
TPM_STRUCTURE_TAG tag;
UINT32 internalSize;
UINT8* internalData;
} TPM_DAA_SENSITIVE;
//
// Part 2, section 23: Redirection
//
///
/// Part 2 section 23.1: TPM_REDIR_COMMAND
/// This section defines exactly one value but does not
/// give it a name. The definition of TPM_SetRedirection in Part3
/// refers to exactly one name but does not give its value. We join
/// them here.
///
#define TPM_REDIR_GPIO (0x00000001)
///
/// TPM Command Headers defined in Part 3
///
typedef struct tdTPM_RQU_COMMAND_HDR {
TPM_STRUCTURE_TAG tag;
UINT32 paramSize;
TPM_COMMAND_CODE ordinal;
} TPM_RQU_COMMAND_HDR;
///
/// TPM Response Headers defined in Part 3
///
typedef struct tdTPM_RSP_COMMAND_HDR {
TPM_STRUCTURE_TAG tag;
UINT32 paramSize;
TPM_RESULT returnCode;
} TPM_RSP_COMMAND_HDR;
#pragma pack()
#endif
================================================
FILE: driver/types/tpm20.h
================================================
/** @file
TPM2.0 Specification data structures
(Trusted Platform Module Library Specification, Family "2.0", Level 00, Revision 00.96,
@http://www.trustedcomputinggroup.org/resources/tpm_library_specification)
Check http://trustedcomputinggroup.org for latest specification updates.
Copyright (c) 2013 - 2015, Intel Corporation. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _TPM20_H_
#define _TPM20_H_
#include "tpm12.h"
#pragma pack (1)
// Annex A Algorithm Constants
// Table 205 - Defines for SHA1 Hash Values
#define SHA1_DIGEST_SIZE 20
#define SHA1_BLOCK_SIZE 64
// Table 206 - Defines for SHA256 Hash Values
#define SHA256_DIGEST_SIZE 32
#define SHA256_BLOCK_SIZE 64
// Table 207 - Defines for SHA384 Hash Values
#define SHA384_DIGEST_SIZE 48
#define SHA384_BLOCK_SIZE 128
// Table 208 - Defines for SHA512 Hash Values
#define SHA512_DIGEST_SIZE 64
#define SHA512_BLOCK_SIZE 128
// Table 209 - Defines for SM3_256 Hash Values
#define SM3_256_DIGEST_SIZE 32
#define SM3_256_BLOCK_SIZE 64
// Table 210 - Defines for Architectural Limits Values
#define MAX_SESSION_NUMBER 3
// Annex B Implementation Definitions
// Table 211 - Defines for Logic Values
#define YES 1
#define NO 0
#define SET 1
#define CLEAR 0
// Table 215 - Defines for RSA Algorithm Constants
#define MAX_RSA_KEY_BITS 2048
#define MAX_RSA_KEY_BYTES ((MAX_RSA_KEY_BITS + 7) / 8)
// Table 216 - Defines for ECC Algorithm Constants
#define MAX_ECC_KEY_BITS 256
#define MAX_ECC_KEY_BYTES ((MAX_ECC_KEY_BITS + 7) / 8)
// Table 217 - Defines for AES Algorithm Constants
#define MAX_AES_KEY_BITS 128
#define MAX_AES_BLOCK_SIZE_BYTES 16
#define MAX_AES_KEY_BYTES ((MAX_AES_KEY_BITS + 7) / 8)
// Table 218 - Defines for SM4 Algorithm Constants
#define MAX_SM4_KEY_BITS 128
#define MAX_SM4_BLOCK_SIZE_BYTES 16
#define MAX_SM4_KEY_BYTES ((MAX_SM4_KEY_BITS + 7) / 8)
// Table 219 - Defines for Symmetric Algorithm Constants
#define MAX_SYM_KEY_BITS MAX_AES_KEY_BITS
#define MAX_SYM_KEY_BYTES MAX_AES_KEY_BYTES
#define MAX_SYM_BLOCK_SIZE MAX_AES_BLOCK_SIZE_BYTES
// Table 220 - Defines for Implementation Values
typedef UINT16 BSIZE;
#define BUFFER_ALIGNMENT 4
#define IMPLEMENTATION_PCR 24
#define PLATFORM_PCR 24
#define DRTM_PCR 17
#define NUM_LOCALITIES 5
#define MAX_HANDLE_NUM 3
#define MAX_ACTIVE_SESSIONS 64
typedef UINT16 CONTEXT_SLOT;
typedef UINT64 CONTEXT_COUNTER;
#define MAX_LOADED_SESSIONS 3
#define MAX_SESSION_NUM 3
#define MAX_LOADED_OBJECTS 3
#define MIN_EVICT_OBJECTS 2
#define PCR_SELECT_MIN ((PLATFORM_PCR + 7) / 8)
#define PCR_SELECT_MAX ((IMPLEMENTATION_PCR + 7) / 8)
#define NUM_POLICY_PCR_GROUP 1
#define NUM_AUTHVALUE_PCR_GROUP 1
#define MAX_CONTEXT_SIZE 4000
#define MAX_DIGEST_BUFFER 1024
#define MAX_NV_INDEX_SIZE 1024
#define MAX_CAP_BUFFER 1024
#define NV_MEMORY_SIZE 16384
#define NUM_STATIC_PCR 16
#define MAX_ALG_LIST_SIZE 64
#define TIMER_PRESCALE 100000
#define PRIMARY_SEED_SIZE 32
#define CONTEXT_ENCRYPT_ALG TPM_ALG_AES
#define CONTEXT_ENCRYPT_KEY_BITS MAX_SYM_KEY_BITS
#define CONTEXT_ENCRYPT_KEY_BYTES ((CONTEXT_ENCRYPT_KEY_BITS + 7) / 8)
#define CONTEXT_INTEGRITY_HASH_ALG TPM_ALG_SHA256
#define CONTEXT_INTEGRITY_HASH_SIZE SHA256_DIGEST_SIZE
#define PROOF_SIZE CONTEXT_INTEGRITY_HASH_SIZE
#define NV_CLOCK_UPDATE_INTERVAL 12
#define NUM_POLICY_PCR 1
#define MAX_COMMAND_SIZE 4096
#define MAX_RESPONSE_SIZE 4096
#define ORDERLY_BITS 8
#define MAX_ORDERLY_COUNT ((1 << ORDERLY_BITS) - 1)
#define ALG_ID_FIRST TPM_ALG_FIRST
#define ALG_ID_LAST TPM_ALG_LAST
#define MAX_SYM_DATA 128
#define MAX_RNG_ENTROPY_SIZE 64
#define RAM_INDEX_SPACE 512
#define RSA_DEFAULT_PUBLIC_EXPONENT 0x00010001
#define CRT_FORMAT_RSA YES
#define PRIVATE_VENDOR_SPECIFIC_BYTES ((MAX_RSA_KEY_BYTES / 2) * ( 3 + CRT_FORMAT_RSA * 2))
// Capability related MAX_ value
#define MAX_CAP_DATA (MAX_CAP_BUFFER - sizeof(TPM_CAP) - sizeof(UINT32))
#define MAX_CAP_ALGS (MAX_CAP_DATA / sizeof(TPMS_ALG_PROPERTY))
#define MAX_CAP_HANDLES (MAX_CAP_DATA / sizeof(TPM_HANDLE))
#define MAX_CAP_CC (MAX_CAP_DATA / sizeof(TPM_CC))
#define MAX_TPM_PROPERTIES (MAX_CAP_DATA / sizeof(TPMS_TAGGED_PROPERTY))
#define MAX_PCR_PROPERTIES (MAX_CAP_DATA / sizeof(TPMS_TAGGED_PCR_SELECT))
#define MAX_ECC_CURVES (MAX_CAP_DATA / sizeof(TPM_ECC_CURVE))
//
// Always set 5 here, because we want to support all hash algo in BIOS.
//
#define HASH_COUNT 5
// 5 Base Types
// Table 3 - Definition of Base Types
typedef UINT8 BYTE;
// Table 4 - Definition of Types for Documentation Clarity
//
// NOTE: Comment because it has same name as TPM1.2 (value is same, so not runtime issue)
//
// typedef UINT32 TPM_ALGORITHM_ID;
// typedef UINT32 TPM_MODIFIER_INDICATOR;
typedef UINT32 TPM_AUTHORIZATION_SIZE;
typedef UINT32 TPM_PARAMETER_SIZE;
typedef UINT16 TPM_KEY_SIZE;
typedef UINT16 TPM_KEY_BITS;
// 6 Constants
// Table 6 - TPM_GENERATED Constants
typedef UINT32 TPM_GENERATED;
#define TPM_GENERATED_VALUE (TPM_GENERATED)(0xff544347)
// Table 7 - TPM_ALG_ID Constants
typedef UINT16 TPM_ALG_ID;
//
// NOTE: Comment some algo which has same name as TPM1.2 (value is same, so not runtime issue)
//
#define TPM_ALG_ERROR (TPM_ALG_ID)(0x0000)
#define TPM_ALG_FIRST (TPM_ALG_ID)(0x0001)
// #define TPM_ALG_RSA (TPM_ALG_ID)(0x0001)
// #define TPM_ALG_SHA (TPM_ALG_ID)(0x0004)
#define TPM_ALG_SHA1 (TPM_ALG_ID)(0x0004)
// #define TPM_ALG_HMAC (TPM_ALG_ID)(0x0005)
#define TPM_ALG_AES (TPM_ALG_ID)(0x0006)
// #define TPM_ALG_MGF1 (TPM_ALG_ID)(0x0007)
#define TPM_ALG_KEYEDHASH (TPM_ALG_ID)(0x0008)
// #define TPM_ALG_XOR (TPM_ALG_ID)(0x000A)
#define TPM_ALG_SHA256 (TPM_ALG_ID)(0x000B)
#define TPM_ALG_SHA384 (TPM_ALG_ID)(0x000C)
#define TPM_ALG_SHA512 (TPM_ALG_ID)(0x000D)
#define TPM_ALG_NULL (TPM_ALG_ID)(0x0010)
#define TPM_ALG_SM3_256 (TPM_ALG_ID)(0x0012)
#define TPM_ALG_SM4 (TPM_ALG_ID)(0x0013)
#define TPM_ALG_RSASSA (TPM_ALG_ID)(0x0014)
#define TPM_ALG_RSAES (TPM_ALG_ID)(0x0015)
#define TPM_ALG_RSAPSS (TPM_ALG_ID)(0x0016)
#define TPM_ALG_OAEP (TPM_ALG_ID)(0x0017)
#define TPM_ALG_ECDSA (TPM_ALG_ID)(0x0018)
#define TPM_ALG_ECDH (TPM_ALG_ID)(0x0019)
#define TPM_ALG_ECDAA (TPM_ALG_ID)(0x001A)
#define TPM_ALG_SM2 (TPM_ALG_ID)(0x001B)
#define TPM_ALG_ECSCHNORR (TPM_ALG_ID)(0x001C)
#define TPM_ALG_ECMQV (TPM_ALG_ID)(0x001D)
#define TPM_ALG_KDF1_SP800_56a (TPM_ALG_ID)(0x0020)
#define TPM_ALG_KDF2 (TPM_ALG_ID)(0x0021)
#define TPM_ALG_KDF1_SP800_108 (TPM_ALG_ID)(0x0022)
#define TPM_ALG_ECC (TPM_ALG_ID)(0x0023)
#define TPM_ALG_SYMCIPHER (TPM_ALG_ID)(0x0025)
#define TPM_ALG_CTR (TPM_ALG_ID)(0x0040)
#define TPM_ALG_OFB (TPM_ALG_ID)(0x0041)
#define TPM_ALG_CBC (TPM_ALG_ID)(0x0042)
#define TPM_ALG_CFB (TPM_ALG_ID)(0x0043)
#define TPM_ALG_ECB (TPM_ALG_ID)(0x0044)
#define TPM_ALG_LAST (TPM_ALG_ID)(0x0044)
// Table 8 - TPM_ECC_CURVE Constants
typedef UINT16 TPM_ECC_CURVE;
#define TPM_ECC_NONE (TPM_ECC_CURVE)(0x0000)
#define TPM_ECC_NIST_P192 (TPM_ECC_CURVE)(0x0001)
#define TPM_ECC_NIST_P224 (TPM_ECC_CURVE)(0x0002)
#define TPM_ECC_NIST_P256 (TPM_ECC_CURVE)(0x0003)
#define TPM_ECC_NIST_P384 (TPM_ECC_CURVE)(0x0004)
#define TPM_ECC_NIST_P521 (TPM_ECC_CURVE)(0x0005)
#define TPM_ECC_BN_P256 (TPM_ECC_CURVE)(0x0010)
#define TPM_ECC_BN_P638 (TPM_ECC_CURVE)(0x0011)
#define TPM_ECC_SM2_P256 (TPM_ECC_CURVE)(0x0020)
// Table 11 - TPM_CC Constants (Numeric Order)
typedef UINT32 TPM_CC;
#define TPM_CC_FIRST (TPM_CC)(0x0000011F)
#define TPM_CC_PP_FIRST (TPM_CC)(0x0000011F)
#define TPM_CC_NV_UndefineSpaceSpecial (TPM_CC)(0x0000011F)
#define TPM_CC_EvictControl (TPM_CC)(0x00000120)
#define TPM_CC_HierarchyControl (TPM_CC)(0x00000121)
#define TPM_CC_NV_UndefineSpace (TPM_CC)(0x00000122)
#define TPM_CC_ChangeEPS (TPM_CC)(0x00000124)
#define TPM_CC_ChangePPS (TPM_CC)(0x00000125)
#define TPM_CC_Clear (TPM_CC)(0x00000126)
#define TPM_CC_ClearControl (TPM_CC)(0x00000127)
#define TPM_CC_ClockSet (TPM_CC)(0x00000128)
#define TPM_CC_HierarchyChangeAuth (TPM_CC)(0x00000129)
#define TPM_CC_NV_DefineSpace (TPM_CC)(0x0000012A)
#define TPM_CC_PCR_Allocate (TPM_CC)(0x0000012B)
#define TPM_CC_PCR_SetAuthPolicy (TPM_CC)(0x0000012C)
#define TPM_CC_PP_Commands (TPM_CC)(0x0000012D)
#define TPM_CC_SetPrimaryPolicy (TPM_CC)(0x0000012E)
#define TPM_CC_FieldUpgradeStart (TPM_CC)(0x0000012F)
#define TPM_CC_ClockRateAdjust (TPM_CC)(0x00000130)
#define TPM_CC_CreatePrimary (TPM_CC)(0x00000131)
#define TPM_CC_NV_GlobalWriteLock (TPM_CC)(0x00000132)
#define TPM_CC_PP_LAST (TPM_CC)(0x00000132)
#define TPM_CC_GetCommandAuditDigest (TPM_CC)(0x00000133)
#define TPM_CC_NV_Increment (TPM_CC)(0x00000134)
#define TPM_CC_NV_SetBits (TPM_CC)(0x00000135)
#define TPM_CC_NV_Extend (TPM_CC)(0x00000136)
#define TPM_CC_NV_Write (TPM_CC)(0x00000137)
#define TPM_CC_NV_WriteLock (TPM_CC)(0x00000138)
#define TPM_CC_DictionaryAttackLockReset (TPM_CC)(0x00000139)
#define TPM_CC_DictionaryAttackParameters (TPM_CC)(0x0000013A)
#define TPM_CC_NV_ChangeAuth (TPM_CC)(0x0000013B)
#define TPM_CC_PCR_Event (TPM_CC)(0x0000013C)
#define TPM_CC_PCR_Reset (TPM_CC)(0x0000013D)
#define TPM_CC_SequenceComplete (TPM_CC)(0x0000013E)
#define TPM_CC_SetAlgorithmSet (TPM_CC)(0x0000013F)
#define TPM_CC_SetCommandCodeAuditStatus (TPM_CC)(0x00000140)
#define TPM_CC_FieldUpgradeData (TPM_CC)(0x00000141)
#define TPM_CC_IncrementalSelfTest (TPM_CC)(0x00000142)
#define TPM_CC_SelfTest (TPM_CC)(0x00000143)
#define TPM_CC_Startup (TPM_CC)(0x00000144)
#define TPM_CC_Shutdown (TPM_CC)(0x00000145)
#define TPM_CC_StirRandom (TPM_CC)(0x00000146)
#define TPM_CC_ActivateCredential (TPM_CC)(0x00000147)
#define TPM_CC_Certify (TPM_CC)(0x00000148)
#define TPM_CC_PolicyNV (TPM_CC)(0x00000149)
#define TPM_CC_CertifyCreation (TPM_CC)(0x0000014A)
#define TPM_CC_Duplicate (TPM_CC)(0x0000014B)
#define TPM_CC_GetTime (TPM_CC)(0x0000014C)
#define TPM_CC_GetSessionAuditDigest (TPM_CC)(0x0000014D)
#define TPM_CC_NV_Read (TPM_CC)(0x0000014E)
#define TPM_CC_NV_ReadLock (TPM_CC)(0x0000014F)
#define TPM_CC_ObjectChangeAuth (TPM_CC)(0x00000150)
#define TPM_CC_PolicySecret (TPM_CC)(0x00000151)
#define TPM_CC_Rewrap (TPM_CC)(0x00000152)
#define TPM_CC_Create (TPM_CC)(0x00000153)
#define TPM_CC_ECDH_ZGen (TPM_CC)(0x00000154)
#define TPM_CC_HMAC (TPM_CC)(0x00000155)
#define TPM_CC_Import (TPM_CC)(0x00000156)
#define TPM_CC_Load (TPM_CC)(0x00000157)
#define TPM_CC_Quote (TPM_CC)(0x00000158)
#define TPM_CC_RSA_Decrypt (TPM_CC)(0x00000159)
#define TPM_CC_HMAC_Start (TPM_CC)(0x0000015B)
#define TPM_CC_SequenceUpdate (TPM_CC)(0x0000015C)
#define TPM_CC_Sign (TPM_CC)(0x0000015D)
#define TPM_CC_Unseal (TPM_CC)(0x0000015E)
#define TPM_CC_PolicySigned (TPM_CC)(0x00000160)
#define TPM_CC_ContextLoad (TPM_CC)(0x00000161)
#define TPM_CC_ContextSave (TPM_CC)(0x00000162)
#define TPM_CC_ECDH_KeyGen (TPM_CC)(0x00000163)
#define TPM_CC_EncryptDecrypt (TPM_CC)(0x00000164)
#define TPM_CC_FlushContext (TPM_CC)(0x00000165)
#define TPM_CC_LoadExternal (TPM_CC)(0x00000167)
#define TPM_CC_MakeCredential (TPM_CC)(0x00000168)
#define TPM_CC_NV_ReadPublic (TPM_CC)(0x00000169)
#define TPM_CC_PolicyAuthorize (TPM_CC)(0x0000016A)
#define TPM_CC_PolicyAuthValue (TPM_CC)(0x0000016B)
#define TPM_CC_PolicyCommandCode (TPM_CC)(0x0000016C)
#define TPM_CC_PolicyCounterTimer (TPM_CC)(0x0000016D)
#define TPM_CC_PolicyCpHash (TPM_CC)(0x0000016E)
#define TPM_CC_PolicyLocality (TPM_CC)(0x0000016F)
#define TPM_CC_PolicyNameHash (TPM_CC)(0x00000170)
#define TPM_CC_PolicyOR (TPM_CC)(0x00000171)
#define TPM_CC_PolicyTicket (TPM_CC)(0x00000172)
#define TPM_CC_ReadPublic (TPM_CC)(0x00000173)
#define TPM_CC_RSA_Encrypt (TPM_CC)(0x00000174)
#define TPM_CC_StartAuthSession (TPM_CC)(0x00000176)
#define TPM_CC_VerifySignature (TPM_CC)(0x00000177)
#define TPM_CC_ECC_Parameters (TPM_CC)(0x00000178)
#define TPM_CC_FirmwareRead (TPM_CC)(0x00000179)
#define TPM_CC_GetCapability (TPM_CC)(0x0000017A)
#define TPM_CC_GetRandom (TPM_CC)(0x0000017B)
#define TPM_CC_GetTestResult (TPM_CC)(0x0000017C)
#define TPM_CC_Hash (TPM_CC)(0x0000017D)
#define TPM_CC_PCR_Read (TPM_CC)(0x0000017E)
#define TPM_CC_PolicyPCR (TPM_CC)(0x0000017F)
#define TPM_CC_PolicyRestart (TPM_CC)(0x00000180)
#define TPM_CC_ReadClock (TPM_CC)(0x00000181)
#define TPM_CC_PCR_Extend (TPM_CC)(0x00000182)
#define TPM_CC_PCR_SetAuthValue (TPM_CC)(0x00000183)
#define TPM_CC_NV_Certify (TPM_CC)(0x00000184)
#define TPM_CC_EventSequenceComplete (TPM_CC)(0x00000185)
#define TPM_CC_HashSequenceStart (TPM_CC)(0x00000186)
#define TPM_CC_PolicyPhysicalPresence (TPM_CC)(0x00000187)
#define TPM_CC_PolicyDuplicationSelect (TPM_CC)(0x00000188)
#define TPM_CC_PolicyGetDigest (TPM_CC)(0x00000189)
#define TPM_CC_TestParms (TPM_CC)(0x0000018A)
#define TPM_CC_Commit (TPM_CC)(0x0000018B)
#define TPM_CC_PolicyPassword (TPM_CC)(0x0000018C)
#define TPM_CC_ZGen_2Phase (TPM_CC)(0x0000018D)
#define TPM_CC_EC_Ephemeral (TPM_CC)(0x0000018E)
#define TPM_CC_LAST (TPM_CC)(0x0000018E)
// Table 15 - TPM_RC Constants (Actions)
typedef UINT32 TPM_RC;
#define TPM_RC_SUCCESS (TPM_RC)(0x000)
#define TPM_RC_BAD_TAG (TPM_RC)(0x030)
#define RC_VER1 (TPM_RC)(0x100)
#define TPM_RC_INITIALIZE (TPM_RC)(RC_VER1 + 0x000)
#define TPM_RC_FAILURE (TPM_RC)(RC_VER1 + 0x001)
#define TPM_RC_SEQUENCE (TPM_RC)(RC_VER1 + 0x003)
#define TPM_RC_PRIVATE (TPM_RC)(RC_VER1 + 0x00B)
#define TPM_RC_HMAC (TPM_RC)(RC_VER1 + 0x019)
#define TPM_RC_DISABLED (TPM_RC)(RC_VER1 + 0x020)
#define TPM_RC_EXCLUSIVE (TPM_RC)(RC_VER1 + 0x021)
#define TPM_RC_AUTH_TYPE (TPM_RC)(RC_VER1 + 0x024)
#define TPM_RC_AUTH_MISSING (TPM_RC)(RC_VER1 + 0x025)
#define TPM_RC_POLICY (TPM_RC)(RC_VER1 + 0x026)
#define TPM_RC_PCR (TPM_RC)(RC_VER1 + 0x027)
#define TPM_RC_PCR_CHANGED (TPM_RC)(RC_VER1 + 0x028)
#define TPM_RC_UPGRADE (TPM_RC)(RC_VER1 + 0x02D)
#define TPM_RC_TOO_MANY_CONTEXTS (TPM_RC)(RC_VER1 + 0x02E)
#define TPM_RC_AUTH_UNAVAILABLE (TPM_RC)(RC_VER1 + 0x02F)
#define TPM_RC_REBOOT (TPM_RC)(RC_VER1 + 0x030)
#define TPM_RC_UNBALANCED (TPM_RC)(RC_VER1 + 0x031)
#define TPM_RC_COMMAND_SIZE (TPM_RC)(RC_VER1 + 0x042)
#define TPM_RC_COMMAND_CODE (TPM_RC)(RC_VER1 + 0x043)
#define TPM_RC_AUTHSIZE (TPM_RC)(RC_VER1 + 0x044)
#define TPM_RC_AUTH_CONTEXT (TPM_RC)(RC_VER1 + 0x045)
#define TPM_RC_NV_RANGE (TPM_RC)(RC_VER1 + 0x046)
#define TPM_RC_NV_SIZE (TPM_RC)(RC_VER1 + 0x047)
#define TPM_RC_NV_LOCKED (TPM_RC)(RC_VER1 + 0x048)
#define TPM_RC_NV_AUTHORIZATION (TPM_RC)(RC_VER1 + 0x049)
#define TPM_RC_NV_UNINITIALIZED (TPM_RC)(RC_VER1 + 0x04A)
#define TPM_RC_NV_SPACE (TPM_RC)(RC_VER1 + 0x04B)
#define TPM_RC_NV_DEFINED (TPM_RC)(RC_VER1 + 0x04C)
#define TPM_RC_BAD_CONTEXT (TPM_RC)(RC_VER1 + 0x050)
#define TPM_RC_CPHASH (TPM_RC)(RC_VER1 + 0x051)
#define TPM_RC_PARENT (TPM_RC)(RC_VER1 + 0x052)
#define TPM_RC_NEEDS_TEST (TPM_RC)(RC_VER1 + 0x053)
#define TPM_RC_NO_RESULT (TPM_RC)(RC_VER1 + 0x054)
#define TPM_RC_SENSITIVE (TPM_RC)(RC_VER1 + 0x055)
#define RC_MAX_FM0 (TPM_RC)(RC_VER1 + 0x07F)
#define RC_FMT1 (TPM_RC)(0x080)
#define TPM_RC_ASYMMETRIC (TPM_RC)(RC_FMT1 + 0x001)
#define TPM_RC_ATTRIBUTES (TPM_RC)(RC_FMT1 + 0x002)
#define TPM_RC_HASH (TPM_RC)(RC_FMT1 + 0x003)
#define TPM_RC_VALUE (TPM_RC)(RC_FMT1 + 0x004)
#define TPM_RC_HIERARCHY (TPM_RC)(RC_FMT1 + 0x005)
#define TPM_RC_KEY_SIZE (TPM_RC)(RC_FMT1 + 0x007)
#define TPM_RC_MGF (TPM_RC)(RC_FMT1 + 0x008)
#define TPM_RC_MODE (TPM_RC)(RC_FMT1 + 0x009)
#define TPM_RC_TYPE (TPM_RC)(RC_FMT1 + 0x00A)
#define TPM_RC_HANDLE (TPM_RC)(RC_FMT1 + 0x00B)
#define TPM_RC_KDF (TPM_RC)(RC_FMT1 + 0x00C)
#define TPM_RC_RANGE (TPM_RC)(RC_FMT1 + 0x00D)
#define TPM_RC_AUTH_FAIL (TPM_RC)(RC_FMT1 + 0x00E)
#define TPM_RC_NONCE (TPM_RC)(RC_FMT1 + 0x00F)
#define TPM_RC_PP (TPM_RC)(RC_FMT1 + 0x010)
#define TPM_RC_SCHEME (TPM_RC)(RC_FMT1 + 0x012)
#define TPM_RC_SIZE (TPM_RC)(RC_FMT1 + 0x015)
#define TPM_RC_SYMMETRIC (TPM_RC)(RC_FMT1 + 0x016)
#define TPM_RC_TAG (TPM_RC)(RC_FMT1 + 0x017)
#define TPM_RC_SELECTOR (TPM_RC)(RC_FMT1 + 0x018)
#define TPM_RC_INSUFFICIENT (TPM_RC)(RC_FMT1 + 0x01A)
#define TPM_RC_SIGNATURE (TPM_RC)(RC_FMT1 + 0x01B)
#define TPM_RC_KEY (TPM_RC)(RC_FMT1 + 0x01C)
#define TPM_RC_POLICY_FAIL (TPM_RC)(RC_FMT1 + 0x01D)
#define TPM_RC_INTEGRITY (TPM_RC)(RC_FMT1 + 0x01F)
#define TPM_RC_TICKET (TPM_RC)(RC_FMT1 + 0x020)
#define TPM_RC_RESERVED_BITS (TPM_RC)(RC_FMT1 + 0x021)
#define TPM_RC_BAD_AUTH (TPM_RC)(RC_FMT1 + 0x022)
#define TPM_RC_EXPIRED (TPM_RC)(RC_FMT1 + 0x023)
#define TPM_RC_POLICY_CC (TPM_RC)(RC_FMT1 + 0x024 )
#define TPM_RC_BINDING (TPM_RC)(RC_FMT1 + 0x025)
#define TPM_RC_CURVE (TPM_RC)(RC_FMT1 + 0x026)
#define TPM_RC_ECC_POINT (TPM_RC)(RC_FMT1 + 0x027)
#define RC_WARN (TPM_RC)(0x900)
#define TPM_RC_CONTEXT_GAP (TPM_RC)(RC_WARN + 0x001)
#define TPM_RC_OBJECT_MEMORY (TPM_RC)(RC_WARN + 0x002)
#define TPM_RC_SESSION_MEMORY (TPM_RC)(RC_WARN + 0x003)
#define TPM_RC_MEMORY (TPM_RC)(RC_WARN + 0x004)
#define TPM_RC_SESSION_HANDLES (TPM_RC)(RC_WARN + 0x005)
#define TPM_RC_OBJECT_HANDLES (TPM_RC)(RC_WARN + 0x006)
#define TPM_RC_LOCALITY (TPM_RC)(RC_WARN + 0x007)
#define TPM_RC_YIELDED (TPM_RC)(RC_WARN + 0x008)
#define TPM_RC_CANCELED (TPM_RC)(RC_WARN + 0x009)
#define TPM_RC_TESTING (TPM_RC)(RC_WARN + 0x00A)
#define TPM_RC_REFERENCE_H0 (TPM_RC)(RC_WARN + 0x010)
#define TPM_RC_REFERENCE_H1 (TPM_RC)(RC_WARN + 0x011)
#define TPM_RC_REFERENCE_H2 (TPM_RC)(RC_WARN + 0x012)
#define TPM_RC_REFERENCE_H3 (TPM_RC)(RC_WARN + 0x013)
#define TPM_RC_REFERENCE_H4 (TPM_RC)(RC_WARN + 0x014)
#define TPM_RC_REFERENCE_H5 (TPM_RC)(RC_WARN + 0x015)
#define TPM_RC_REFERENCE_H6 (TPM_RC)(RC_WARN + 0x016)
#define TPM_RC_REFERENCE_S0 (TPM_RC)(RC_WARN + 0x018)
#define TPM_RC_REFERENCE_S1 (TPM_RC)(RC_WARN + 0x019)
#define TPM_RC_REFERENCE_S2 (TPM_RC)(RC_WARN + 0x01A)
#define TPM_RC_REFERENCE_S3 (TPM_RC)(RC_WARN + 0x01B)
#define TPM_RC_REFERENCE_S4 (TPM_RC)(RC_WARN + 0x01C)
#define TPM_RC_REFERENCE_S5 (TPM_RC)(RC_WARN + 0x01D)
#define TPM_RC_REFERENCE_S6 (TPM_RC)(RC_WARN + 0x01E)
#define TPM_RC_NV_RATE (TPM_RC)(RC_WARN + 0x020)
#define TPM_RC_LOCKOUT (TPM_RC)(RC_WARN + 0x021)
#define TPM_RC_RETRY (TPM_RC)(RC_WARN + 0x022)
#define TPM_RC_NV_UNAVAILABLE (TPM_RC)(RC_WARN + 0x023)
#define TPM_RC_NOT_USED (TPM_RC)(RC_WARN + 0x7F)
#define TPM_RC_H (TPM_RC)(0x000)
#define TPM_RC_P (TPM_RC)(0x040)
#define TPM_RC_S (TPM_RC)(0x800)
#define TPM_RC_1 (TPM_RC)(0x100)
#define TPM_RC_2 (TPM_RC)(0x200)
#define TPM_RC_3 (TPM_RC)(0x300)
#define TPM_RC_4 (TPM_RC)(0x400)
#define TPM_RC_5 (TPM_RC)(0x500)
#define TPM_RC_6 (TPM_RC)(0x600)
#define TPM_RC_7 (TPM_RC)(0x700)
#define TPM_RC_8 (TPM_RC)(0x800)
#define TPM_RC_9 (TPM_RC)(0x900)
#define TPM_RC_A (TPM_RC)(0xA00)
#define TPM_RC_B (TPM_RC)(0xB00)
#define TPM_RC_C (TPM_RC)(0xC00)
#define TPM_RC_D (TPM_RC)(0xD00)
#define TPM_RC_E (TPM_RC)(0xE00)
#define TPM_RC_F (TPM_RC)(0xF00)
#define TPM_RC_N_MASK (TPM_RC)(0xF00)
// Table 16 - TPM_CLOCK_ADJUST Constants
typedef INT8 TPM_CLOCK_ADJUST;
#define TPM_CLOCK_COARSE_SLOWER (TPM_CLOCK_ADJUST)(-3)
#define TPM_CLOCK_MEDIUM_SLOWER (TPM_CLOCK_ADJUST)(-2)
#define TPM_CLOCK_FINE_SLOWER (TPM_CLOCK_ADJUST)(-1)
#define TPM_CLOCK_NO_CHANGE (TPM_CLOCK_ADJUST)(0)
#define TPM_CLOCK_FINE_FASTER (TPM_CLOCK_ADJUST)(1)
#define TPM_CLOCK_MEDIUM_FASTER (TPM_CLOCK_ADJUST)(2)
#define TPM_CLOCK_COARSE_FASTER (TPM_CLOCK_ADJUST)(3)
// Table 17 - TPM_EO Constants
typedef UINT16 TPM_EO;
#define TPM_EO_EQ (TPM_EO)(0x0000)
#define TPM_EO_NEQ (TPM_EO)(0x0001)
#define TPM_EO_SIGNED_GT (TPM_EO)(0x0002)
#define TPM_EO_UNSIGNED_GT (TPM_EO)(0x0003)
#define TPM_EO_SIGNED_LT (TPM_EO)(0x0004)
#define TPM_EO_UNSIGNED_LT (TPM_EO)(0x0005)
#define TPM_EO_SIGNED_GE (TPM_EO)(0x0006)
#define TPM_EO_UNSIGNED_GE (TPM_EO)(0x0007)
#define TPM_EO_SIGNED_LE (TPM_EO)(0x0008)
#define TPM_EO_UNSIGNED_LE (TPM_EO)(0x0009)
#define TPM_EO_BITSET (TPM_EO)(0x000A)
#define TPM_EO_BITCLEAR (TPM_EO)(0x000B)
// Table 18 - TPM_ST Constants
typedef UINT16 TPM_ST;
#define TPM_ST_RSP_COMMAND (TPM_ST)(0x00C4)
#define TPM_ST_NULL (TPM_ST)(0X8000)
#define TPM_ST_NO_SESSIONS (TPM_ST)(0x8001)
#define TPM_ST_SESSIONS (TPM_ST)(0x8002)
#define TPM_ST_ATTEST_NV (TPM_ST)(0x8014)
#define TPM_ST_ATTEST_COMMAND_AUDIT (TPM_ST)(0x8015)
#define TPM_ST_ATTEST_SESSION_AUDIT (TPM_ST)(0x8016)
#define TPM_ST_ATTEST_CERTIFY (TPM_ST)(0x8017)
#define TPM_ST_ATTEST_QUOTE (TPM_ST)(0x8018)
#define TPM_ST_ATTEST_TIME (TPM_ST)(0x8019)
#define TPM_ST_ATTEST_CREATION (TPM_ST)(0x801A)
#define TPM_ST_CREATION (TPM_ST)(0x8021)
#define TPM_ST_VERIFIED (TPM_ST)(0x8022)
#define TPM_ST_AUTH_SECRET (TPM_ST)(0x8023)
#define TPM_ST_HASHCHECK (TPM_ST)(0x8024)
#define TPM_ST_AUTH_SIGNED (TPM_ST)(0x8025)
#define TPM_ST_FU_MANIFEST (TPM_ST)(0x8029)
// Table 19 - TPM_SU Constants
typedef UINT16 TPM_SU;
#define TPM_SU_CLEAR (TPM_SU)(0x0000)
#define TPM_SU_STATE (TPM_SU)(0x0001)
// Table 20 - TPM_SE Constants
typedef UINT8 TPM_SE;
#define TPM_SE_HMAC (TPM_SE)(0x00)
#define TPM_SE_POLICY (TPM_SE)(0x01)
#define TPM_SE_TRIAL (TPM_SE)(0x03)
// Table 21 - TPM_CAP Constants
typedef UINT32 TPM_CAP;
#define TPM_CAP_FIRST (TPM_CAP)(0x00000000)
#define TPM_CAP_ALGS (TPM_CAP)(0x00000000)
#define TPM_CAP_HANDLES (TPM_CAP)(0x00000001)
#define TPM_CAP_COMMANDS (TPM_CAP)(0x00000002)
#define TPM_CAP_PP_COMMANDS (TPM_CAP)(0x00000003)
#define TPM_CAP_AUDIT_COMMANDS (TPM_CAP)(0x00000004)
#define TPM_CAP_PCRS (TPM_CAP)(0x00000005)
#define TPM_CAP_TPM_PROPERTIES (TPM_CAP)(0x00000006)
#define TPM_CAP_PCR_PROPERTIES (TPM_CAP)(0x00000007)
#define TPM_CAP_ECC_CURVES (TPM_CAP)(0x00000008)
#define TPM_CAP_LAST (TPM_CAP)(0x00000008)
#define TPM_CAP_VENDOR_PROPERTY (TPM_CAP)(0x00000100)
// Table 22 - TPM_PT Constants
typedef UINT32 TPM_PT;
#define TPM_PT_NONE (TPM_PT)(0x00000000)
#define PT_GROUP (TPM_PT)(0x00000100)
#define PT_FIXED (TPM_PT)(PT_GROUP * 1)
#define TPM_PT_FAMILY_INDICATOR (TPM_PT)(PT_FIXED + 0)
#define TPM_PT_LEVEL (TPM_PT)(PT_FIXED + 1)
#define TPM_PT_REVISION (TPM_PT)(PT_FIXED + 2)
#define TPM_PT_DAY_OF_YEAR (TPM_PT)(PT_FIXED + 3)
#define TPM_PT_YEAR (TPM_PT)(PT_FIXED + 4)
#define TPM_PT_MANUFACTURER (TPM_PT)(PT_FIXED + 5)
#define TPM_PT_VENDOR_STRING_1 (TPM_PT)(PT_FIXED + 6)
#define TPM_PT_VENDOR_STRING_2 (TPM_PT)(PT_FIXED + 7)
#define TPM_PT_VENDOR_STRING_3 (TPM_PT)(PT_FIXED + 8)
#define TPM_PT_VENDOR_STRING_4 (TPM_PT)(PT_FIXED + 9)
#define TPM_PT_VENDOR_TPM_TYPE (TPM_PT)(PT_FIXED + 10)
#define TPM_PT_FIRMWARE_VERSION_1 (TPM_PT)(PT_FIXED + 11)
#define TPM_PT_FIRMWARE_VERSION_2 (TPM_PT)(PT_FIXED + 12)
#define TPM_PT_INPUT_BUFFER (TPM_PT)(PT_FIXED + 13)
#define TPM_PT_HR_TRANSIENT_MIN (TPM_PT)(PT_FIXED + 14)
#define TPM_PT_HR_PERSISTENT_MIN (TPM_PT)(PT_FIXED + 15)
#define TPM_PT_HR_LOADED_MIN (TPM_PT)(PT_FIXED + 16)
#define TPM_PT_ACTIVE_SESSIONS_MAX (TPM_PT)(PT_FIXED + 17)
#define TPM_PT_PCR_COUNT (TPM_PT)(PT_FIXED + 18)
#define TPM_PT_PCR_SELECT_MIN (TPM_PT)(PT_FIXED + 19)
#define TPM_PT_CONTEXT_GAP_MAX (TPM_PT)(PT_FIXED + 20)
#define TPM_PT_NV_COUNTERS_MAX (TPM_PT)(PT_FIXED + 22)
#define TPM_PT_NV_INDEX_MAX (TPM_PT)(PT_FIXED + 23)
#define TPM_PT_MEMORY (TPM_PT)(PT_FIXED + 24)
#define TPM_PT_CLOCK_UPDATE (TPM_PT)(PT_FIXED + 25)
#define TPM_PT_CONTEXT_HASH (TPM_PT)(PT_FIXED + 26)
#define TPM_PT_CONTEXT_SYM (TPM_PT)(PT_FIXED + 27)
#define TPM_PT_CONTEXT_SYM_SIZE (TPM_PT)(PT_FIXED + 28)
#define TPM_PT_ORDERLY_COUNT (TPM_PT)(PT_FIXED + 29)
#define TPM_PT_MAX_COMMAND_SIZE (TPM_PT)(PT_FIXED + 30)
#define TPM_PT_MAX_RESPONSE_SIZE (TPM_PT)(PT_FIXED + 31)
#define TPM_PT_MAX_DIGEST (TPM_PT)(PT_FIXED + 32)
#define TPM_PT_MAX_OBJECT_CONTEXT (TPM_PT)(PT_FIXED + 33)
#define TPM_PT_MAX_SESSION_CONTEXT (TPM_PT)(PT_FIXED + 34)
#define TPM_PT_PS_FAMILY_INDICATOR (TPM_PT)(PT_FIXED + 35)
#define TPM_PT_PS_LEVEL (TPM_PT)(PT_FIXED + 36)
#define TPM_PT_PS_REVISION (TPM_PT)(PT_FIXED + 37)
#define TPM_PT_PS_DAY_OF_YEAR (TPM_PT)(PT_FIXED + 38)
#define TPM_PT_PS_YEAR (TPM_PT)(PT_FIXED + 39)
#define TPM_PT_SPLIT_MAX (TPM_PT)(PT_FIXED + 40)
#define TPM_PT_TOTAL_COMMANDS (TPM_PT)(PT_FIXED + 41)
#define TPM_PT_LIBRARY_COMMANDS (TPM_PT)(PT_FIXED + 42)
#define TPM_PT_VENDOR_COMMANDS (TPM_PT)(PT_FIXED + 43)
#define PT_VAR (TPM_PT)(PT_GROUP * 2)
#define TPM_PT_PERMANENT (TPM_PT)(PT_VAR + 0)
#define TPM_PT_STARTUP_CLEAR (TPM_PT)(PT_VAR + 1)
#define TPM_PT_HR_NV_INDEX (TPM_PT)(PT_VAR + 2)
#define TPM_PT_HR_LOADED (TPM_PT)(PT_VAR + 3)
#define TPM_PT_HR_LOADED_AVAIL (TPM_PT)(PT_VAR + 4)
#define TPM_PT_HR_ACTIVE (TPM_PT)(PT_VAR + 5)
#define TPM_PT_HR_ACTIVE_AVAIL (TPM_PT)(PT_VAR + 6)
#define TPM_PT_HR_TRANSIENT_AVAIL (TPM_PT)(PT_VAR + 7)
#define TPM_PT_HR_PERSISTENT (TPM_PT)(PT_VAR + 8)
#define TPM_PT_HR_PERSISTENT_AVAIL (TPM_PT)(PT_VAR + 9)
#define TPM_PT_NV_COUNTERS (TPM_PT)(PT_VAR + 10)
#define TPM_PT_NV_COUNTERS_AVAIL (TPM_PT)(PT_VAR + 11)
#define TPM_PT_ALGORITHM_SET (TPM_PT)(PT_VAR + 12)
#define TPM_PT_LOADED_CURVES (TPM_PT)(PT_VAR + 13)
#define TPM_PT_LOCKOUT_COUNTER (TPM_PT)(PT_VAR + 14)
#define TPM_PT_MAX_AUTH_FAIL (TPM_PT)(PT_VAR + 15)
#define TPM_PT_LOCKOUT_INTERVAL (TPM_PT)(PT_VAR + 16)
#define TPM_PT_LOCKOUT_RECOVERY (TPM_PT)(PT_VAR + 17)
#define TPM_PT_NV_WRITE_RECOVERY (TPM_PT)(PT_VAR + 18)
#define TPM_PT_AUDIT_COUNTER_0 (TPM_PT)(PT_VAR + 19)
#define TPM_PT_AUDIT_COUNTER_1 (TPM_PT)(PT_VAR + 20)
// Table 23 - TPM_PT_PCR Constants
typedef UINT32 TPM_PT_PCR;
#define TPM_PT_PCR_FIRST (TPM_PT_PCR)(0x00000000)
#define TPM_PT_PCR_SAVE (TPM_PT_PCR)(0x00000000)
#define TPM_PT_PCR_EXTEND_L0 (TPM_PT_PCR)(0x00000001)
#define TPM_PT_PCR_RESET_L0 (TPM_PT_PCR)(0x00000002)
#define TPM_PT_PCR_EXTEND_L1 (TPM_PT_PCR)(0x00000003)
#define TPM_PT_PCR_RESET_L1 (TPM_PT_PCR)(0x00000004)
#define TPM_PT_PCR_EXTEND_L2 (TPM_PT_PCR)(0x00000005)
#define TPM_PT_PCR_RESET_L2 (TPM_PT_PCR)(0x00000006)
#define TPM_PT_PCR_EXTEND_L3 (TPM_PT_PCR)(0x00000007)
#define TPM_PT_PCR_RESET_L3 (TPM_PT_PCR)(0x00000008)
#define TPM_PT_PCR_EXTEND_L4 (TPM_PT_PCR)(0x00000009)
#define TPM_PT_PCR_RESET_L4 (TPM_PT_PCR)(0x0000000A)
#define TPM_PT_PCR_NO_INCREMENT (TPM_PT_PCR)(0x00000011)
#define TPM_PT_PCR_DRTM_RESET (TPM_PT_PCR)(0x00000012)
#define TPM_PT_PCR_POLICY (TPM_PT_PCR)(0x00000013)
#define TPM_PT_PCR_AUTH (TPM_PT_PCR)(0x00000014)
#define TPM_PT_PCR_LAST (TPM_PT_PCR)(0x00000014)
// Table 24 - TPM_PS Constants
typedef UINT32 TPM_PS;
#define TPM_PS_MAIN (TPM_PS)(0x00000000)
#define TPM_PS_PC (TPM_PS)(0x00000001)
#define TPM_PS_PDA (TPM_PS)(0x00000002)
#define TPM_PS_CELL_PHONE (TPM_PS)(0x00000003)
#define TPM_PS_SERVER (TPM_PS)(0x00000004)
#define TPM_PS_PERIPHERAL (TPM_PS)(0x00000005)
#define TPM_PS_TSS (TPM_PS)(0x00000006)
#define TPM_PS_STORAGE (TPM_PS)(0x00000007)
#define TPM_PS_AUTHENTICATION (TPM_PS)(0x00000008)
#define TPM_PS_EMBEDDED (TPM_PS)(0x00000009)
#define TPM_PS_HARDCOPY (TPM_PS)(0x0000000A)
#define TPM_PS_INFRASTRUCTURE (TPM_PS)(0x0000000B)
#define TPM_PS_VIRTUALIZATION (TPM_PS)(0x0000000C)
#define TPM_PS_TNC (TPM_PS)(0x0000000D)
#define TPM_PS_MULTI_TENANT (TPM_PS)(0x0000000E)
#define TPM_PS_TC (TPM_PS)(0x0000000F)
// 7 Handles
// Table 25 - Handles Types
//
// NOTE: Comment because it has same name as TPM1.2 (value is same, so not runtime issue)
//
// typedef UINT32 TPM_HANDLE;
// Table 26 - TPM_HT Constants
typedef UINT8 TPM_HT;
#define TPM_HT_PCR (TPM_HT)(0x00)
#define TPM_HT_NV_INDEX (TPM_HT)(0x01)
#define TPM_HT_HMAC_SESSION (TPM_HT)(0x02)
#define TPM_HT_LOADED_SESSION (TPM_HT)(0x02)
#define TPM_HT_POLICY_SESSION (TPM_HT)(0x03)
#define TPM_HT_ACTIVE_SESSION (TPM_HT)(0x03)
#define TPM_HT_PERMANENT (TPM_HT)(0x40)
#define TPM_HT_TRANSIENT (TPM_HT)(0x80)
#define TPM_HT_PERSISTENT (TPM_HT)(0x81)
// Table 27 - TPM_RH Constants
typedef UINT32 TPM_RH;
#define TPM_RH_FIRST (TPM_RH)(0x40000000)
#define TPM_RH_SRK (TPM_RH)(0x40000000)
#define TPM_RH_OWNER (TPM_RH)(0x40000001)
#define TPM_RH_REVOKE (TPM_RH)(0x40000002)
#define TPM_RH_TRANSPORT (TPM_RH)(0x40000003)
#define TPM_RH_OPERATOR (TPM_RH)(0x40000004)
#define TPM_RH_ADMIN (TPM_RH)(0x40000005)
#define TPM_RH_EK (TPM_RH)(0x40000006)
#define TPM_RH_NULL (TPM_RH)(0x40000007)
#define TPM_RH_UNASSIGNED (TPM_RH)(0x40000008)
#define TPM_RS_PW (TPM_RH)(0x40000009)
#define TPM_RH_LOCKOUT (TPM_RH)(0x4000000A)
#define TPM_RH_ENDORSEMENT (TPM_RH)(0x4000000B)
#define TPM_RH_PLATFORM (TPM_RH)(0x4000000C)
#define TPM_RH_PLATFORM_NV (TPM_RH)(0x4000000D)
#define TPM_RH_AUTH_00 (TPM_RH)(0x40000010)
#define TPM_RH_AUTH_FF (TPM_RH)(0x4000010F)
#define TPM_RH_LAST (TPM_RH)(0x4000010F)
// Table 28 - TPM_HC Constants
typedef TPM_HANDLE TPM_HC;
#define HR_HANDLE_MASK (TPM_HC)(0x00FFFFFF)
#define HR_RANGE_MASK (TPM_HC)(0xFF000000)
#define HR_SHIFT (TPM_HC)(24)
#define HR_PCR (TPM_HC)((TPM_HC)TPM_HT_PCR << HR_SHIFT)
#define HR_HMAC_SESSION (TPM_HC)((TPM_HC)TPM_HT_HMAC_SESSION << HR_SHIFT)
#define HR_POLICY_SESSION (TPM_HC)((TPM_HC)TPM_HT_POLICY_SESSION << HR_SHIFT)
#define HR_TRANSIENT (TPM_HC)((TPM_HC)TPM_HT_TRANSIENT << HR_SHIFT)
#define HR_PERSISTENT (TPM_HC)((TPM_HC)TPM_HT_PERSISTENT << HR_SHIFT)
#define HR_NV_INDEX (TPM_HC)((TPM_HC)TPM_HT_NV_INDEX << HR_SHIFT)
#define HR_PERMANENT (TPM_HC)((TPM_HC)TPM_HT_PERMANENT << HR_SHIFT)
#define PCR_FIRST (TPM_HC)(HR_PCR + 0)
#define PCR_LAST (TPM_HC)(PCR_FIRST + IMPLEMENTATION_PCR - 1)
#define HMAC_SESSION_FIRST (TPM_HC)(HR_HMAC_SESSION + 0)
#define HMAC_SESSION_LAST (TPM_HC)(HMAC_SESSION_FIRST + MAX_ACTIVE_SESSIONS - 1)
#define LOADED_SESSION_FIRST (TPM_HC)(HMAC_SESSION_FIRST)
#define LOADED_SESSION_LAST (TPM_HC)(HMAC_SESSION_LAST)
#define POLICY_SESSION_FIRST (TPM_HC)(HR_POLICY_SESSION + 0)
#define POLICY_SESSION_LAST (TPM_HC)(POLICY_SESSION_FIRST + MAX_ACTIVE_SESSIONS - 1)
#define TRANSIENT_FIRST (TPM_HC)(HR_TRANSIENT + 0)
#define ACTIVE_SESSION_FIRST (TPM_HC)(POLICY_SESSION_FIRST)
#define ACTIVE_SESSION_LAST (TPM_HC)(POLICY_SESSION_LAST)
#define TRANSIENT_LAST (TPM_HC)(TRANSIENT_FIRST+MAX_LOADED_OBJECTS - 1)
#define PERSISTENT_FIRST (TPM_HC)(HR_PERSISTENT + 0)
#define PERSISTENT_LAST (TPM_HC)(PERSISTENT_FIRST + 0x00FFFFFF)
#define PLATFORM_PERSISTENT (TPM_HC)(PERSISTENT_FIRST + 0x00800000)
#define NV_INDEX_FIRST (TPM_HC)(HR_NV_INDEX + 0)
#define NV_INDEX_LAST (TPM_HC)(NV_INDEX_FIRST + 0x00FFFFFF)
#define PERMANENT_FIRST (TPM_HC)(TPM_RH_FIRST)
#define PERMANENT_LAST (TPM_HC)(TPM_RH_LAST)
// 8 Attribute Structures
// Table 29 - TPMA_ALGORITHM Bits
typedef struct {
UINT32 asymmetric : 1;
UINT32 symmetric : 1;
UINT32 hash : 1;
UINT32 object : 1;
UINT32 reserved4_7 : 4;
UINT32 signing : 1;
UINT32 encrypting : 1;
UINT32 method : 1;
UINT32 reserved11_31 : 21;
} TPMA_ALGORITHM;
// Table 30 - TPMA_OBJECT Bits
typedef struct {
UINT32 reserved1 : 1;
UINT32 fixedTPM : 1;
UINT32 stClear : 1;
UINT32 reserved4 : 1;
UINT32 fixedParent : 1;
UINT32 sensitiveDataOrigin : 1;
UINT32 userWithAuth : 1;
UINT32 adminWithPolicy : 1;
UINT32 reserved8_9 : 2;
UINT32 noDA : 1;
UINT32 encryptedDuplication : 1;
UINT32 reserved12_15 : 4;
UINT32 restricted : 1;
UINT32 decrypt : 1;
UINT32 sign : 1;
UINT32 reserved19_31 : 13;
} TPMA_OBJECT;
// Table 31 - TPMA_SESSION Bits
typedef struct {
UINT8 continueSession : 1;
UINT8 auditExclusive : 1;
UINT8 auditReset : 1;
UINT8 reserved3_4 : 2;
UINT8 decrypt : 1;
UINT8 encrypt : 1;
UINT8 audit : 1;
} TPMA_SESSION;
// Table 32 - TPMA_LOCALITY Bits
//
// NOTE: Use low case here to resolve conflict
//
typedef struct {
UINT8 locZero : 1;
UINT8 locOne : 1;
UINT8 locTwo : 1;
UINT8 locThree : 1;
UINT8 locFour : 1;
UINT8 Extended : 3;
} TPMA_LOCALITY;
// Table 33 - TPMA_PERMANENT Bits
typedef struct {
UINT32 ownerAuthSet : 1;
UINT32 endorsementAuthSet : 1;
UINT32 lockoutAuthSet : 1;
UINT32 reserved3_7 : 5;
UINT32 disableClear : 1;
UINT32 inLockout : 1;
UINT32 tpmGeneratedEPS : 1;
UINT32 reserved11_31 : 21;
} TPMA_PERMANENT;
// Table 34 - TPMA_STARTUP_CLEAR Bits
typedef struct {
UINT32 phEnable : 1;
UINT32 shEnable : 1;
UINT32 ehEnable : 1;
UINT32 reserved3_30 : 28;
UINT32 orderly : 1;
} TPMA_STARTUP_CLEAR;
// Table 35 - TPMA_MEMORY Bits
typedef struct {
UINT32 sharedRAM : 1;
UINT32 sharedNV : 1;
UINT32 objectCopiedToRam : 1;
UINT32 reserved3_31 : 29;
} TPMA_MEMORY;
// Table 36 - TPMA_CC Bits
typedef struct {
UINT32 commandIndex : 16;
UINT32 reserved16_21 : 6;
UINT32 nv : 1;
UINT32 extensive : 1;
UINT32 flushed : 1;
UINT32 cHandles : 3;
UINT32 rHandle : 1;
UINT32 V : 1;
UINT32 Res : 2;
} TPMA_CC;
// 9 Interface Types
// Table 37 - TPMI_YES_NO Type
typedef BYTE TPMI_YES_NO;
// Table 38 - TPMI_DH_OBJECT Type
typedef TPM_HANDLE TPMI_DH_OBJECT;
// Table 39 - TPMI_DH_PERSISTENT Type
typedef TPM_HANDLE TPMI_DH_PERSISTENT;
// Table 40 - TPMI_DH_ENTITY Type
typedef TPM_HANDLE TPMI_DH_ENTITY;
// Table 41 - TPMI_DH_PCR Type
typedef TPM_HANDLE TPMI_DH_PCR;
// Table 42 - TPMI_SH_AUTH_SESSION Type
typedef TPM_HANDLE TPMI_SH_AUTH_SESSION;
// Table 43 - TPMI_SH_HMAC Type
typedef TPM_HANDLE TPMI_SH_HMAC;
// Table 44 - TPMI_SH_POLICY Type
typedef TPM_HANDLE TPMI_SH_POLICY;
// Table 45 - TPMI_DH_CONTEXT Type
typedef TPM_HANDLE TPMI_DH_CONTEXT;
// Table 46 - TPMI_RH_HIERARCHY Type
typedef TPM_HANDLE TPMI_RH_HIERARCHY;
// Table 47 - TPMI_RH_HIERARCHY_AUTH Type
typedef TPM_HANDLE TPMI_RH_HIERARCHY_AUTH;
// Table 48 - TPMI_RH_PLATFORM Type
typedef TPM_HANDLE TPMI_RH_PLATFORM;
// Table 49 - TPMI_RH_OWNER Type
typedef TPM_HANDLE TPMI_RH_OWNER;
// Table 50 - TPMI_RH_ENDORSEMENT Type
typedef TPM_HANDLE TPMI_RH_ENDORSEMENT;
// Table 51 - TPMI_RH_PROVISION Type
typedef TPM_HANDLE TPMI_RH_PROVISION;
// Table 52 - TPMI_RH_CLEAR Type
typedef TPM_HANDLE TPMI_RH_CLEAR;
// Table 53 - TPMI_RH_NV_AUTH Type
typedef TPM_HANDLE TPMI_RH_NV_AUTH;
// Table 54 - TPMI_RH_LOCKOUT Type
typedef TPM_HANDLE TPMI_RH_LOCKOUT;
// Table 55 - TPMI_RH_NV_INDEX Type
typedef TPM_HANDLE TPMI_RH_NV_INDEX;
// Table 56 - TPMI_ALG_HASH Type
typedef TPM_ALG_ID TPMI_ALG_HASH;
// Table 57 - TPMI_ALG_ASYM Type
typedef TPM_ALG_ID TPMI_ALG_ASYM;
// Table 58 - TPMI_ALG_SYM Type
typedef TPM_ALG_ID TPMI_ALG_SYM;
// Table 59 - TPMI_ALG_SYM_OBJECT Type
typedef TPM_ALG_ID TPMI_ALG_SYM_OBJECT;
// Table 60 - TPMI_ALG_SYM_MODE Type
typedef TPM_ALG_ID TPMI_ALG_SYM_MODE;
// Table 61 - TPMI_ALG_KDF Type
typedef TPM_ALG_ID TPMI_ALG_KDF;
// Table 62 - TPMI_ALG_SIG_SCHEME Type
typedef TPM_ALG_ID TPMI_ALG_SIG_SCHEME;
// Table 63 - TPMI_ECC_KEY_EXCHANGE Type
typedef TPM_ALG_ID TPMI_ECC_KEY_EXCHANGE;
// Table 64 - TPMI_ST_COMMAND_TAG Type
typedef TPM_ST TPMI_ST_COMMAND_TAG;
// 10 Structure Definitions
// Table 65 - TPMS_ALGORITHM_DESCRIPTION Structure
typedef struct {
TPM_ALG_ID alg;
TPMA_ALGORITHM attributes;
} TPMS_ALGORITHM_DESCRIPTION;
// Table 66 - TPMU_HA Union
typedef union {
BYTE sha1[SHA1_DIGEST_SIZE];
BYTE sha256[SHA256_DIGEST_SIZE];
BYTE sm3_256[SM3_256_DIGEST_SIZE];
BYTE sha384[SHA384_DIGEST_SIZE];
BYTE sha512[SHA512_DIGEST_SIZE];
} TPMU_HA;
// Table 67 - TPMT_HA Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
TPMU_HA digest;
} TPMT_HA;
// Table 68 - TPM2B_DIGEST Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (TPMU_HA)];
} TPM2B_DIGEST;
// Table 69 - TPM2B_DATA Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (TPMT_HA)];
} TPM2B_DATA;
// Table 70 - TPM2B_NONCE Types
typedef TPM2B_DIGEST TPM2B_NONCE;
// Table 71 - TPM2B_AUTH Types
typedef TPM2B_DIGEST TPM2B_AUTH;
// Table 72 - TPM2B_OPERAND Types
typedef TPM2B_DIGEST TPM2B_OPERAND;
// Table 73 - TPM2B_EVENT Structure
typedef struct {
UINT16 size;
BYTE buffer[1024];
} TPM2B_EVENT;
// Table 74 - TPM2B_MAX_BUFFER Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_DIGEST_BUFFER];
} TPM2B_MAX_BUFFER;
// Table 75 - TPM2B_MAX_NV_BUFFER Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_NV_INDEX_SIZE];
} TPM2B_MAX_NV_BUFFER;
// Table 76 - TPM2B_TIMEOUT Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (UINT64)];
} TPM2B_TIMEOUT;
// Table 77 -- TPM2B_IV Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_SYM_BLOCK_SIZE];
} TPM2B_IV;
// Table 78 - TPMU_NAME Union
typedef union {
TPMT_HA digest;
TPM_HANDLE handle;
} TPMU_NAME;
// Table 79 - TPM2B_NAME Structure
typedef struct {
UINT16 size;
BYTE name[sizeof (TPMU_NAME)];
} TPM2B_NAME;
// Table 80 - TPMS_PCR_SELECT Structure
typedef struct {
UINT8 sizeofSelect;
BYTE pcrSelect[PCR_SELECT_MAX];
} TPMS_PCR_SELECT;
// Table 81 - TPMS_PCR_SELECTION Structure
typedef struct {
TPMI_ALG_HASH hash;
UINT8 sizeofSelect;
BYTE pcrSelect[PCR_SELECT_MAX];
} TPMS_PCR_SELECTION;
// Table 84 - TPMT_TK_CREATION Structure
typedef struct {
TPM_ST tag;
TPMI_RH_HIERARCHY hierarchy;
TPM2B_DIGEST digest;
} TPMT_TK_CREATION;
// Table 85 - TPMT_TK_VERIFIED Structure
typedef struct {
TPM_ST tag;
TPMI_RH_HIERARCHY hierarchy;
TPM2B_DIGEST digest;
} TPMT_TK_VERIFIED;
// Table 86 - TPMT_TK_AUTH Structure
typedef struct {
TPM_ST tag;
TPMI_RH_HIERARCHY hierarchy;
TPM2B_DIGEST digest;
} TPMT_TK_AUTH;
// Table 87 - TPMT_TK_HASHCHECK Structure
typedef struct {
TPM_ST tag;
TPMI_RH_HIERARCHY hierarchy;
TPM2B_DIGEST digest;
} TPMT_TK_HASHCHECK;
// Table 88 - TPMS_ALG_PROPERTY Structure
typedef struct {
TPM_ALG_ID alg;
TPMA_ALGORITHM algProperties;
} TPMS_ALG_PROPERTY;
// Table 89 - TPMS_TAGGED_PROPERTY Structure
typedef struct {
TPM_PT property;
UINT32 value;
} TPMS_TAGGED_PROPERTY;
// Table 90 - TPMS_TAGGED_PCR_SELECT Structure
typedef struct {
TPM_PT tag;
UINT8 sizeofSelect;
BYTE pcrSelect[PCR_SELECT_MAX];
} TPMS_TAGGED_PCR_SELECT;
// Table 91 - TPML_CC Structure
typedef struct {
UINT32 count;
TPM_CC commandCodes[MAX_CAP_CC];
} TPML_CC;
// Table 92 - TPML_CCA Structure
typedef struct {
UINT32 count;
TPMA_CC commandAttributes[MAX_CAP_CC];
} TPML_CCA;
// Table 93 - TPML_ALG Structure
typedef struct {
UINT32 count;
TPM_ALG_ID algorithms[MAX_ALG_LIST_SIZE];
} TPML_ALG;
// Table 94 - TPML_HANDLE Structure
typedef struct {
UINT32 count;
TPM_HANDLE handle[MAX_CAP_HANDLES];
} TPML_HANDLE;
// Table 95 - TPML_DIGEST Structure
typedef struct {
UINT32 count;
TPM2B_DIGEST digests[8];
} TPML_DIGEST;
// Table 96 -- TPML_DIGEST_VALUES Structure
typedef struct {
UINT32 count;
TPMT_HA digests[HASH_COUNT];
} TPML_DIGEST_VALUES;
// Table 97 - TPM2B_DIGEST_VALUES Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (TPML_DIGEST_VALUES)];
} TPM2B_DIGEST_VALUES;
// Table 98 - TPML_PCR_SELECTION Structure
typedef struct {
UINT32 count;
TPMS_PCR_SELECTION pcrSelections[HASH_COUNT];
} TPML_PCR_SELECTION;
// Table 99 - TPML_ALG_PROPERTY Structure
typedef struct {
UINT32 count;
TPMS_ALG_PROPERTY algProperties[MAX_CAP_ALGS];
} TPML_ALG_PROPERTY;
// Table 100 - TPML_TAGGED_TPM_PROPERTY Structure
typedef struct {
UINT32 count;
TPMS_TAGGED_PROPERTY tpmProperty[MAX_TPM_PROPERTIES];
} TPML_TAGGED_TPM_PROPERTY;
// Table 101 - TPML_TAGGED_PCR_PROPERTY Structure
typedef struct {
UINT32 count;
TPMS_TAGGED_PCR_SELECT pcrProperty[MAX_PCR_PROPERTIES];
} TPML_TAGGED_PCR_PROPERTY;
// Table 102 - TPML_ECC_CURVE Structure
typedef struct {
UINT32 count;
TPM_ECC_CURVE eccCurves[MAX_ECC_CURVES];
} TPML_ECC_CURVE;
// Table 103 - TPMU_CAPABILITIES Union
typedef union {
TPML_ALG_PROPERTY algorithms;
TPML_HANDLE handles;
TPML_CCA command;
TPML_CC ppCommands;
TPML_CC auditCommands;
TPML_PCR_SELECTION assignedPCR;
TPML_TAGGED_TPM_PROPERTY tpmProperties;
TPML_TAGGED_PCR_PROPERTY pcrProperties;
TPML_ECC_CURVE eccCurves;
} TPMU_CAPABILITIES;
// Table 104 - TPMS_CAPABILITY_DATA Structure
typedef struct {
TPM_CAP capability;
TPMU_CAPABILITIES data;
} TPMS_CAPABILITY_DATA;
// Table 105 - TPMS_CLOCK_INFO Structure
typedef struct {
UINT64 clock;
UINT32 resetCount;
UINT32 restartCount;
TPMI_YES_NO safe;
} TPMS_CLOCK_INFO;
// Table 106 - TPMS_TIME_INFO Structure
typedef struct {
UINT64 time;
TPMS_CLOCK_INFO clockInfo;
} TPMS_TIME_INFO;
// Table 107 - TPMS_TIME_ATTEST_INFO Structure
typedef struct {
TPMS_TIME_INFO time;
UINT64 firmwareVersion;
} TPMS_TIME_ATTEST_INFO;
// Table 108 - TPMS_CERTIFY_INFO Structure
typedef struct {
TPM2B_NAME name;
TPM2B_NAME qualifiedName;
} TPMS_CERTIFY_INFO;
// Table 109 - TPMS_QUOTE_INFO Structure
typedef struct {
TPML_PCR_SELECTION pcrSelect;
TPM2B_DIGEST pcrDigest;
} TPMS_QUOTE_INFO;
// Table 110 - TPMS_COMMAND_AUDIT_INFO Structure
typedef struct {
UINT64 auditCounter;
TPM_ALG_ID digestAlg;
TPM2B_DIGEST auditDigest;
TPM2B_DIGEST commandDigest;
} TPMS_COMMAND_AUDIT_INFO;
// Table 111 - TPMS_SESSION_AUDIT_INFO Structure
typedef struct {
TPMI_YES_NO exclusiveSession;
TPM2B_DIGEST sessionDigest;
} TPMS_SESSION_AUDIT_INFO;
// Table 112 - TPMS_CREATION_INFO Structure
typedef struct {
TPM2B_NAME objectName;
TPM2B_DIGEST creationHash;
} TPMS_CREATION_INFO;
// Table 113 - TPMS_NV_CERTIFY_INFO Structure
typedef struct {
TPM2B_NAME indexName;
UINT16 offset;
TPM2B_MAX_NV_BUFFER nvContents;
} TPMS_NV_CERTIFY_INFO;
// Table 114 - TPMI_ST_ATTEST Type
typedef TPM_ST TPMI_ST_ATTEST;
// Table 115 - TPMU_ATTEST Union
typedef union {
TPMS_CERTIFY_INFO certify;
TPMS_CREATION_INFO creation;
TPMS_QUOTE_INFO quote;
TPMS_COMMAND_AUDIT_INFO commandAudit;
TPMS_SESSION_AUDIT_INFO sessionAudit;
TPMS_TIME_ATTEST_INFO time;
TPMS_NV_CERTIFY_INFO nv;
} TPMU_ATTEST;
// Table 116 - TPMS_ATTEST Structure
typedef struct {
TPM_GENERATED magic;
TPMI_ST_ATTEST type;
TPM2B_NAME qualifiedSigner;
TPM2B_DATA extraData;
TPMS_CLOCK_INFO clockInfo;
UINT64 firmwareVersion;
TPMU_ATTEST attested;
} TPMS_ATTEST;
// Table 117 - TPM2B_ATTEST Structure
typedef struct {
UINT16 size;
BYTE attestationData[sizeof (TPMS_ATTEST)];
} TPM2B_ATTEST;
// Table 118 - TPMS_AUTH_COMMAND Structure
typedef struct {
TPMI_SH_AUTH_SESSION sessionHandle;
TPM2B_NONCE nonce;
TPMA_SESSION sessionAttributes;
TPM2B_AUTH hmac;
} TPMS_AUTH_COMMAND;
// Table 119 - TPMS_AUTH_RESPONSE Structure
typedef struct {
TPM2B_NONCE nonce;
TPMA_SESSION sessionAttributes;
TPM2B_AUTH hmac;
} TPMS_AUTH_RESPONSE;
// 11 Algorithm Parameters and Structures
// Table 120 - TPMI_AES_KEY_BITS Type
typedef TPM_KEY_BITS TPMI_AES_KEY_BITS;
// Table 121 - TPMI_SM4_KEY_BITS Type
typedef TPM_KEY_BITS TPMI_SM4_KEY_BITS;
// Table 122 - TPMU_SYM_KEY_BITS Union
typedef union {
TPMI_AES_KEY_BITS aes;
TPMI_SM4_KEY_BITS SM4;
TPM_KEY_BITS sym;
TPMI_ALG_HASH xor;
} TPMU_SYM_KEY_BITS;
// Table 123 - TPMU_SYM_MODE Union
typedef union {
TPMI_ALG_SYM_MODE aes;
TPMI_ALG_SYM_MODE SM4;
TPMI_ALG_SYM_MODE sym;
} TPMU_SYM_MODE;
// Table 125 - TPMT_SYM_DEF Structure
typedef struct {
TPMI_ALG_SYM algorithm;
TPMU_SYM_KEY_BITS keyBits;
TPMU_SYM_MODE mode;
} TPMT_SYM_DEF;
// Table 126 - TPMT_SYM_DEF_OBJECT Structure
typedef struct {
TPMI_ALG_SYM_OBJECT algorithm;
TPMU_SYM_KEY_BITS keyBits;
TPMU_SYM_MODE mode;
} TPMT_SYM_DEF_OBJECT;
// Table 127 - TPM2B_SYM_KEY Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_SYM_KEY_BYTES];
} TPM2B_SYM_KEY;
// Table 128 - TPMS_SYMCIPHER_PARMS Structure
typedef struct {
TPMT_SYM_DEF_OBJECT sym;
} TPMS_SYMCIPHER_PARMS;
// Table 129 - TPM2B_SENSITIVE_DATA Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_SYM_DATA];
} TPM2B_SENSITIVE_DATA;
// Table 130 - TPMS_SENSITIVE_CREATE Structure
typedef struct {
TPM2B_AUTH userAuth;
TPM2B_SENSITIVE_DATA data;
} TPMS_SENSITIVE_CREATE;
// Table 131 - TPM2B_SENSITIVE_CREATE Structure
typedef struct {
UINT16 size;
TPMS_SENSITIVE_CREATE sensitive;
} TPM2B_SENSITIVE_CREATE;
// Table 132 - TPMS_SCHEME_SIGHASH Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_SIGHASH;
// Table 133 - TPMI_ALG_KEYEDHASH_SCHEME Type
typedef TPM_ALG_ID TPMI_ALG_KEYEDHASH_SCHEME;
// Table 134 - HMAC_SIG_SCHEME Types
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_HMAC;
// Table 135 - TPMS_SCHEME_XOR Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
TPMI_ALG_KDF kdf;
} TPMS_SCHEME_XOR;
// Table 136 - TPMU_SCHEME_KEYEDHASH Union
typedef union {
TPMS_SCHEME_HMAC hmac;
TPMS_SCHEME_XOR xor;
} TPMU_SCHEME_KEYEDHASH;
// Table 137 - TPMT_KEYEDHASH_SCHEME Structure
typedef struct {
TPMI_ALG_KEYEDHASH_SCHEME scheme;
TPMU_SCHEME_KEYEDHASH details;
} TPMT_KEYEDHASH_SCHEME;
// Table 138 - RSA_SIG_SCHEMES Types
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_RSASSA;
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_RSAPSS;
// Table 139 - ECC_SIG_SCHEMES Types
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_ECDSA;
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_SM2;
typedef TPMS_SCHEME_SIGHASH TPMS_SCHEME_ECSCHNORR;
// Table 140 - TPMS_SCHEME_ECDAA Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
UINT16 count;
} TPMS_SCHEME_ECDAA;
// Table 141 - TPMU_SIG_SCHEME Union
typedef union {
TPMS_SCHEME_RSASSA rsassa;
TPMS_SCHEME_RSAPSS rsapss;
TPMS_SCHEME_ECDSA ecdsa;
TPMS_SCHEME_ECDAA ecdaa;
TPMS_SCHEME_ECSCHNORR ecSchnorr;
TPMS_SCHEME_HMAC hmac;
TPMS_SCHEME_SIGHASH any;
} TPMU_SIG_SCHEME;
// Table 142 - TPMT_SIG_SCHEME Structure
typedef struct {
TPMI_ALG_SIG_SCHEME scheme;
TPMU_SIG_SCHEME details;
} TPMT_SIG_SCHEME;
// Table 143 - TPMS_SCHEME_OAEP Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_OAEP;
// Table 144 - TPMS_SCHEME_ECDH Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_ECDH;
// Table 145 - TPMS_SCHEME_MGF1 Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_MGF1;
// Table 146 - TPMS_SCHEME_KDF1_SP800_56a Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_KDF1_SP800_56a;
// Table 147 - TPMS_SCHEME_KDF2 Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_KDF2;
// Table 148 - TPMS_SCHEME_KDF1_SP800_108 Structure
typedef struct {
TPMI_ALG_HASH hashAlg;
} TPMS_SCHEME_KDF1_SP800_108;
// Table 149 - TPMU_KDF_SCHEME Union
typedef union {
TPMS_SCHEME_MGF1 mgf1;
TPMS_SCHEME_KDF1_SP800_56a kdf1_SP800_56a;
TPMS_SCHEME_KDF2 kdf2;
TPMS_SCHEME_KDF1_SP800_108 kdf1_sp800_108;
} TPMU_KDF_SCHEME;
// Table 150 - TPMT_KDF_SCHEME Structure
typedef struct {
TPMI_ALG_KDF scheme;
TPMU_KDF_SCHEME details;
} TPMT_KDF_SCHEME;
// Table 151 - TPMI_ALG_ASYM_SCHEME Type
typedef TPM_ALG_ID TPMI_ALG_ASYM_SCHEME;
// Table 152 - TPMU_ASYM_SCHEME Union
typedef union {
TPMS_SCHEME_RSASSA rsassa;
TPMS_SCHEME_RSAPSS rsapss;
TPMS_SCHEME_OAEP oaep;
TPMS_SCHEME_ECDSA ecdsa;
TPMS_SCHEME_ECDAA ecdaa;
TPMS_SCHEME_ECSCHNORR ecSchnorr;
TPMS_SCHEME_SIGHASH anySig;
} TPMU_ASYM_SCHEME;
// Table 153 - TPMT_ASYM_SCHEME Structure
typedef struct {
TPMI_ALG_ASYM_SCHEME scheme;
TPMU_ASYM_SCHEME details;
} TPMT_ASYM_SCHEME;
// Table 154 - TPMI_ALG_RSA_SCHEME Type
typedef TPM_ALG_ID TPMI_ALG_RSA_SCHEME;
// Table 155 - TPMT_RSA_SCHEME Structure
typedef struct {
TPMI_ALG_RSA_SCHEME scheme;
TPMU_ASYM_SCHEME details;
} TPMT_RSA_SCHEME;
// Table 156 - TPMI_ALG_RSA_DECRYPT Type
typedef TPM_ALG_ID TPMI_ALG_RSA_DECRYPT;
// Table 157 - TPMT_RSA_DECRYPT Structure
typedef struct {
TPMI_ALG_RSA_DECRYPT scheme;
TPMU_ASYM_SCHEME details;
} TPMT_RSA_DECRYPT;
// Table 158 - TPM2B_PUBLIC_KEY_RSA Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_RSA_KEY_BYTES];
} TPM2B_PUBLIC_KEY_RSA;
// Table 159 - TPMI_RSA_KEY_BITS Type
typedef TPM_KEY_BITS TPMI_RSA_KEY_BITS;
// Table 160 - TPM2B_PRIVATE_KEY_RSA Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_RSA_KEY_BYTES/2];
} TPM2B_PRIVATE_KEY_RSA;
// Table 161 - TPM2B_ECC_PARAMETER Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_ECC_KEY_BYTES];
} TPM2B_ECC_PARAMETER;
// Table 162 - TPMS_ECC_POINT Structure
typedef struct {
TPM2B_ECC_PARAMETER x;
TPM2B_ECC_PARAMETER y;
} TPMS_ECC_POINT;
// Table 163 -- TPM2B_ECC_POINT Structure
typedef struct {
UINT16 size;
TPMS_ECC_POINT point;
} TPM2B_ECC_POINT;
// Table 164 - TPMI_ALG_ECC_SCHEME Type
typedef TPM_ALG_ID TPMI_ALG_ECC_SCHEME;
// Table 165 - TPMI_ECC_CURVE Type
typedef TPM_ECC_CURVE TPMI_ECC_CURVE;
// Table 166 - TPMT_ECC_SCHEME Structure
typedef struct {
TPMI_ALG_ECC_SCHEME scheme;
TPMU_SIG_SCHEME details;
} TPMT_ECC_SCHEME;
// Table 167 - TPMS_ALGORITHM_DETAIL_ECC Structure
typedef struct {
TPM_ECC_CURVE curveID;
UINT16 keySize;
TPMT_KDF_SCHEME kdf;
TPMT_ECC_SCHEME sign;
TPM2B_ECC_PARAMETER p;
TPM2B_ECC_PARAMETER a;
TPM2B_ECC_PARAMETER b;
TPM2B_ECC_PARAMETER gX;
TPM2B_ECC_PARAMETER gY;
TPM2B_ECC_PARAMETER n;
TPM2B_ECC_PARAMETER h;
} TPMS_ALGORITHM_DETAIL_ECC;
// Table 168 - TPMS_SIGNATURE_RSASSA Structure
typedef struct {
TPMI_ALG_HASH hash;
TPM2B_PUBLIC_KEY_RSA sig;
} TPMS_SIGNATURE_RSASSA;
// Table 169 - TPMS_SIGNATURE_RSAPSS Structure
typedef struct {
TPMI_ALG_HASH hash;
TPM2B_PUBLIC_KEY_RSA sig;
} TPMS_SIGNATURE_RSAPSS;
// Table 170 - TPMS_SIGNATURE_ECDSA Structure
typedef struct {
TPMI_ALG_HASH hash;
TPM2B_ECC_PARAMETER signatureR;
TPM2B_ECC_PARAMETER signatureS;
} TPMS_SIGNATURE_ECDSA;
// Table 171 - TPMU_SIGNATURE Union
typedef union {
TPMS_SIGNATURE_RSASSA rsassa;
TPMS_SIGNATURE_RSAPSS rsapss;
TPMS_SIGNATURE_ECDSA ecdsa;
TPMS_SIGNATURE_ECDSA sm2;
TPMS_SIGNATURE_ECDSA ecdaa;
TPMS_SIGNATURE_ECDSA ecschnorr;
TPMT_HA hmac;
TPMS_SCHEME_SIGHASH any;
} TPMU_SIGNATURE;
// Table 172 - TPMT_SIGNATURE Structure
typedef struct {
TPMI_ALG_SIG_SCHEME sigAlg;
TPMU_SIGNATURE signature;
} TPMT_SIGNATURE;
// Table 173 - TPMU_ENCRYPTED_SECRET Union
typedef union {
BYTE ecc[sizeof (TPMS_ECC_POINT)];
BYTE rsa[MAX_RSA_KEY_BYTES];
BYTE symmetric[sizeof (TPM2B_DIGEST)];
BYTE keyedHash[sizeof (TPM2B_DIGEST)];
} TPMU_ENCRYPTED_SECRET;
// Table 174 - TPM2B_ENCRYPTED_SECRET Structure
typedef struct {
UINT16 size;
BYTE secret[sizeof (TPMU_ENCRYPTED_SECRET)];
} TPM2B_ENCRYPTED_SECRET;
// 12 Key/Object Complex
// Table 175 - TPMI_ALG_PUBLIC Type
typedef TPM_ALG_ID TPMI_ALG_PUBLIC;
// Table 176 - TPMU_PUBLIC_ID Union
typedef union {
TPM2B_DIGEST keyedHash;
TPM2B_DIGEST sym;
TPM2B_PUBLIC_KEY_RSA rsa;
TPMS_ECC_POINT ecc;
} TPMU_PUBLIC_ID;
// Table 177 - TPMS_KEYEDHASH_PARMS Structure
typedef struct {
TPMT_KEYEDHASH_SCHEME scheme;
} TPMS_KEYEDHASH_PARMS;
// Table 178 - TPMS_ASYM_PARMS Structure
typedef struct {
TPMT_SYM_DEF_OBJECT symmetric;
TPMT_ASYM_SCHEME scheme;
} TPMS_ASYM_PARMS;
// Table 179 - TPMS_RSA_PARMS Structure
typedef struct {
TPMT_SYM_DEF_OBJECT symmetric;
TPMT_RSA_SCHEME scheme;
TPMI_RSA_KEY_BITS keyBits;
UINT32 exponent;
} TPMS_RSA_PARMS;
// Table 180 - TPMS_ECC_PARMS Structure
typedef struct {
TPMT_SYM_DEF_OBJECT symmetric;
TPMT_ECC_SCHEME scheme;
TPMI_ECC_CURVE curveID;
TPMT_KDF_SCHEME kdf;
} TPMS_ECC_PARMS;
// Table 181 - TPMU_PUBLIC_PARMS Union
typedef union {
TPMS_KEYEDHASH_PARMS keyedHashDetail;
TPMT_SYM_DEF_OBJECT symDetail;
TPMS_RSA_PARMS rsaDetail;
TPMS_ECC_PARMS eccDetail;
TPMS_ASYM_PARMS asymDetail;
} TPMU_PUBLIC_PARMS;
// Table 182 - TPMT_PUBLIC_PARMS Structure
typedef struct {
TPMI_ALG_PUBLIC type;
TPMU_PUBLIC_PARMS parameters;
} TPMT_PUBLIC_PARMS;
// Table 183 - TPMT_PUBLIC Structure
typedef struct {
TPMI_ALG_PUBLIC type;
TPMI_ALG_HASH nameAlg;
TPMA_OBJECT objectAttributes;
TPM2B_DIGEST authPolicy;
TPMU_PUBLIC_PARMS parameters;
TPMU_PUBLIC_ID unique;
} TPMT_PUBLIC;
// Table 184 - TPM2B_PUBLIC Structure
typedef struct {
UINT16 size;
TPMT_PUBLIC publicArea;
} TPM2B_PUBLIC;
// Table 185 - TPM2B_PRIVATE_VENDOR_SPECIFIC Structure
typedef struct {
UINT16 size;
BYTE buffer[PRIVATE_VENDOR_SPECIFIC_BYTES];
} TPM2B_PRIVATE_VENDOR_SPECIFIC;
// Table 186 - TPMU_SENSITIVE_COMPOSITE Union
typedef union {
TPM2B_PRIVATE_KEY_RSA rsa;
TPM2B_ECC_PARAMETER ecc;
TPM2B_SENSITIVE_DATA bits;
TPM2B_SYM_KEY sym;
TPM2B_PRIVATE_VENDOR_SPECIFIC any;
} TPMU_SENSITIVE_COMPOSITE;
// Table 187 - TPMT_SENSITIVE Structure
typedef struct {
TPMI_ALG_PUBLIC sensitiveType;
TPM2B_AUTH authValue;
TPM2B_DIGEST seedValue;
TPMU_SENSITIVE_COMPOSITE sensitive;
} TPMT_SENSITIVE;
// Table 188 - TPM2B_SENSITIVE Structure
typedef struct {
UINT16 size;
TPMT_SENSITIVE sensitiveArea;
} TPM2B_SENSITIVE;
// Table 189 - _PRIVATE Structure
typedef struct {
TPM2B_DIGEST integrityOuter;
TPM2B_DIGEST integrityInner;
TPMT_SENSITIVE sensitive;
} _PRIVATE;
// Table 190 - TPM2B_PRIVATE Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (_PRIVATE)];
} TPM2B_PRIVATE;
// Table 191 - _ID_OBJECT Structure
typedef struct {
TPM2B_DIGEST integrityHMAC;
TPM2B_DIGEST encIdentity;
} _ID_OBJECT;
// Table 192 - TPM2B_ID_OBJECT Structure
typedef struct {
UINT16 size;
BYTE credential[sizeof (_ID_OBJECT)];
} TPM2B_ID_OBJECT;
// 13 NV Storage Structures
// Table 193 - TPM_NV_INDEX Bits
//
// NOTE: Comment here to resolve conflict
//
// typedef struct {
// UINT32 index : 22;
// UINT32 space : 2;
// UINT32 RH_NV : 8;
// } TPM_NV_INDEX;
// Table 195 - TPMA_NV Bits
typedef struct {
UINT32 TPMA_NV_PPWRITE : 1;
UINT32 TPMA_NV_OWNERWRITE : 1;
UINT32 TPMA_NV_AUTHWRITE : 1;
UINT32 TPMA_NV_POLICYWRITE : 1;
UINT32 TPMA_NV_COUNTER : 1;
UINT32 TPMA_NV_BITS : 1;
UINT32 TPMA_NV_EXTEND : 1;
UINT32 reserved7_9 : 3;
UINT32 TPMA_NV_POLICY_DELETE : 1;
UINT32 TPMA_NV_WRITELOCKED : 1;
UINT32 TPMA_NV_WRITEALL : 1;
UINT32 TPMA_NV_WRITEDEFINE : 1;
UINT32 TPMA_NV_WRITE_STCLEAR : 1;
UINT32 TPMA_NV_GLOBALLOCK : 1;
UINT32 TPMA_NV_PPREAD : 1;
UINT32 TPMA_NV_OWNERREAD : 1;
UINT32 TPMA_NV_AUTHREAD : 1;
UINT32 TPMA_NV_POLICYREAD : 1;
UINT32 reserved20_24 : 5;
UINT32 TPMA_NV_NO_DA : 1;
UINT32 TPMA_NV_ORDERLY : 1;
UINT32 TPMA_NV_CLEAR_STCLEAR : 1;
UINT32 TPMA_NV_READLOCKED : 1;
UINT32 TPMA_NV_WRITTEN : 1;
UINT32 TPMA_NV_PLATFORMCREATE : 1;
UINT32 TPMA_NV_READ_STCLEAR : 1;
} TPMA_NV;
// Table 196 - TPMS_NV_PUBLIC Structure
typedef struct {
TPMI_RH_NV_INDEX nvIndex;
TPMI_ALG_HASH nameAlg;
TPMA_NV attributes;
TPM2B_DIGEST authPolicy;
UINT16 dataSize;
} TPMS_NV_PUBLIC;
// Table 197 - TPM2B_NV_PUBLIC Structure
typedef struct {
UINT16 size;
TPMS_NV_PUBLIC nvPublic;
} TPM2B_NV_PUBLIC;
// 14 Context Data
// Table 198 - TPM2B_CONTEXT_SENSITIVE Structure
typedef struct {
UINT16 size;
BYTE buffer[MAX_CONTEXT_SIZE];
} TPM2B_CONTEXT_SENSITIVE;
// Table 199 - TPMS_CONTEXT_DATA Structure
typedef struct {
TPM2B_DIGEST integrity;
TPM2B_CONTEXT_SENSITIVE encrypted;
} TPMS_CONTEXT_DATA;
// Table 200 - TPM2B_CONTEXT_DATA Structure
typedef struct {
UINT16 size;
BYTE buffer[sizeof (TPMS_CONTEXT_DATA)];
} TPM2B_CONTEXT_DATA;
// Table 201 - TPMS_CONTEXT Structure
typedef struct {
UINT64 sequence;
TPMI_DH_CONTEXT savedHandle;
TPMI_RH_HIERARCHY hierarchy;
TPM2B_CONTEXT_DATA contextBlob;
} TPMS_CONTEXT;
// 15 Creation Data
// Table 203 - TPMS_CREATION_DATA Structure
typedef struct {
TPML_PCR_SELECTION pcrSelect;
TPM2B_DIGEST pcrDigest;
TPMA_LOCALITY locality;
TPM_ALG_ID parentNameAlg;
TPM2B_NAME parentName;
TPM2B_NAME parentQualifiedName;
TPM2B_DATA outsideInfo;
} TPMS_CREATION_DATA;
// Table 204 - TPM2B_CREATION_DATA Structure
typedef struct {
UINT16 size;
TPMS_CREATION_DATA creationData;
} TPM2B_CREATION_DATA;
//
// Command Header
//
typedef struct {
TPM_ST tag;
UINT32 paramSize;
TPM_CC commandCode;
} TPM2_COMMAND_HEADER;
typedef struct {
TPM_ST tag;
UINT32 paramSize;
TPM_RC responseCode;
} TPM2_RESPONSE_HEADER;
#pragma pack ()
//
// TCG Algorithm Registry
//
#define HASH_ALG_SHA1 0x00000001
#define HASH_ALG_SHA256 0x00000002
#define HASH_ALG_SHA384 0x00000004
#define HASH_ALG_SHA512 0x00000008
#define HASH_ALG_SM3_256 0x00000010
#endif
================================================
FILE: driver/types/tpmptp.h
================================================
/** @file
Platform TPM Profile Specification definition for TPM2.0.
It covers both FIFO and CRB interface.
Copyright (c) 2016 - 2018, Intel Corporation. All rights reserved.
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#ifndef _TPM_PTP_H_
#define _TPM_PTP_H_
//
// PTP FIFO definition
//
//
// Set structure alignment to 1-byte
//
#pragma pack(1)
//
// Register set map as specified in PTP specification Chapter 5
//
typedef struct {
///
/// Used to gain ownership for this particular port.
///
UINT8 Access; // 0
UINT8 Reserved1[7]; // 1
///
/// Controls interrupts.
///
UINT32 IntEnable; // 8
///
/// SIRQ vector to be used by the TPM.
///
UINT8 IntVector; // 0ch
UINT8 Reserved2[3]; // 0dh
///
/// What caused interrupt.
///
UINT32 IntSts; // 10h
///
/// Shows which interrupts are supported by that particular TPM.
///
UINT32 InterfaceCapability; // 14h
///
/// Status Register. Provides status of the TPM.
///
UINT8 Status; // 18h
///
/// Number of consecutive writes that can be done to the TPM.
///
UINT16 BurstCount; // 19h
///
/// Additional Status Register.
///
UINT8 StatusEx; // 1Bh
UINT8 Reserved3[8];
///
/// Read or write FIFO, depending on transaction.
///
UINT32 DataFifo; // 24h
UINT8 Reserved4[8]; // 28h
///
/// Used to identify the Interface types supported by the TPM.
///
UINT32 InterfaceId; // 30h
UINT8 Reserved5[0x4c]; // 34h
///
/// Extended ReadFIFO or WriteFIFO, depending on the current bus cycle (read
/// or write)
///
UINT32 XDataFifo; // 80h
UINT8 Reserved6[0xe7c]; // 84h
///
/// Vendor ID
///
UINT16 Vid; // 0f00h
///
/// Device ID
///
UINT16 Did; // 0f02h
///
/// Revision ID
///
UINT8 Rid; // 0f04h
UINT8 Reserved[0xfb]; // 0f05h
} PTP_FIFO_REGISTERS;
//
// Restore original structure alignment
//
#pragma pack()
//
// Define pointer types used to access TIS registers on PC
//
typedef PTP_FIFO_REGISTERS* PTP_FIFO_REGISTERS_PTR;
//
// Define bits of FIFO Interface Identifier Register
//
typedef union {
struct {
UINT32 InterfaceType : 4;
UINT32 InterfaceVersion : 4;
UINT32 CapLocality : 1;
UINT32 Reserved1 : 2;
UINT32 CapDataXferSizeSupport : 2;
UINT32 CapFIFO : 1;
UINT32 CapCRB : 1;
UINT32 CapIFRes : 2;
UINT32 InterfaceSelector : 2;
UINT32 IntfSelLock : 1;
UINT32 Reserved2 : 4;
UINT32 Reserved3 : 8;
} Bits;
UINT32 Uint32;
} PTP_FIFO_INTERFACE_IDENTIFIER;
//
// Define bits of FIFO Interface Capability Register
//
typedef union {
struct {
UINT32 DataAvailIntSupport : 1;
UINT32 StsValidIntSupport : 1;
UINT32 LocalityChangeIntSupport : 1;
UINT32 InterruptLevelHigh : 1;
UINT32 InterruptLevelLow : 1;
UINT32 InterruptEdgeRising : 1;
UINT32 InterruptEdgeFalling : 1;
UINT32 CommandReadyIntSupport : 1;
UINT32 BurstCountStatic : 1;
UINT32 DataTransferSizeSupport : 2;
UINT32 Reserved : 17;
UINT32 InterfaceVersion : 3;
UINT32 Reserved2 : 1;
} Bits;
UINT32 Uint32;
} PTP_FIFO_INTERFACE_CAPABILITY;
///
/// InterfaceVersion
///
#define INTERFACE_CAPABILITY_INTERFACE_VERSION_TIS_12 0x0
#define INTERFACE_CAPABILITY_INTERFACE_VERSION_TIS_13 0x2
#define INTERFACE_CAPABILITY_INTERFACE_VERSION_PTP 0x3
//
// Define bits of ACCESS and STATUS registers
//
///
/// This bit is a 1 to indicate that the other bits in this register are valid.
///
#define PTP_FIFO_VALID BIT7
///
/// Indicate that this locality is active.
///
#define PTP_FIFO_ACC_ACTIVE BIT5
///
/// Set to 1 to indicate that this locality had the TPM taken away while
/// this locality had the TIS_PC_ACC_ACTIVE bit set.
///
#define PTP_FIFO_ACC_SEIZED BIT4
///
/// Set to 1 to indicate that TPM MUST reset the
/// TIS_PC_ACC_ACTIVE bit and remove ownership for localities less than the
/// locality that is writing this bit.
///
#define PTP_FIFO_ACC_SEIZE BIT3
///
/// When this bit is 1, another locality is requesting usage of the TPM.
///
#define PTP_FIFO_ACC_PENDIND BIT2
///
/// Set to 1 to indicate that this locality is requesting to use TPM.
///
#define PTP_FIFO_ACC_RQUUSE BIT1
///
/// A value of 1 indicates that a T/OS has not been established on the platform
///
#define PTP_FIFO_ACC_ESTABLISH BIT0
///
/// This field indicates that STS_DATA and STS_EXPECT are valid
///
#define PTP_FIFO_STS_VALID BIT7
///
/// When this bit is 1, TPM is in the Ready state,
/// indicating it is ready to receive a new command.
///
#define PTP_FIFO_STS_READY BIT6
///
/// Write a 1 to this bit to cause the TPM to execute that command.
///
#define PTP_FIFO_STS_GO BIT5
///
/// This bit indicates that the TPM has data available as a response.
///
#define PTP_FIFO_STS_DATA BIT4
///
/// The TPM sets this bit to a value of 1 when it expects another byte of data
/// for a command.
///
#define PTP_FIFO_STS_EXPECT BIT3
///
/// Indicates that the TPM has completed all self-test actions following a
/// TPM_ContinueSelfTest command.
///
#define PTP_FIFO_STS_SELFTEST_DONE BIT2
///
/// Writes a 1 to this bit to force the TPM to re-send the response.
///
#define PTP_FIFO_STS_RETRY BIT1
///
/// TPM Family Identifier.
/// 00: TPM 1.2 Family
/// 01: TPM 2.0 Family
///
#define PTP_FIFO_STS_EX_TPM_FAMILY (BIT2 | BIT3)
#define PTP_FIFO_STS_EX_TPM_FAMILY_OFFSET (2)
#define PTP_FIFO_STS_EX_TPM_FAMILY_TPM12 (0)
#define PTP_FIFO_STS_EX_TPM_FAMILY_TPM20 (BIT2)
///
/// A write of 1 after tpmGo and before dataAvail aborts the currently executing
/// command, resulting in a response of TPM_RC_CANCELLED. A write of 1 after
/// dataAvail and before tpmGo is ignored by the TPM.
///
#define PTP_FIFO_STS_EX_CANCEL BIT0
//
// PTP CRB definition
//
//
// Set structure alignment to 1-byte
//
#pragma pack(1)
//
// Register set map as specified in PTP specification Chapter 5
//
typedef struct {
///
/// Used to determine current state of Locality of the TPM.
///
UINT32 LocalityState; // 0
UINT8 Reserved1[4]; // 4
///
/// Used to gain control of the TPM by this Locality.
///
UINT32 LocalityControl; // 8
///
/// Used to determine whether Locality has been granted or Seized.
///
UINT32 LocalityStatus; // 0ch
UINT8 Reserved2[0x20]; // 10h
///
/// Used to identify the Interface types supported by the TPM.
///
UINT32 InterfaceId; // 30h
///
/// Vendor ID
///
UINT16 Vid; // 34h
///
/// Device ID
///
UINT16 Did; // 36h
///
/// Optional Register used in low memory environments prior to
/// CRB_DATA_BUFFER availability.
///
UINT64 CrbControlExtension; // 38h
///
/// Register used to initiate transactions for the CRB interface.
///
UINT32 CrbControlRequest; // 40h
///
/// Register used by the TPM to provide status of the CRB interface.
///
UINT32 CrbControlStatus; // 44h
///
/// Register used by software to cancel command processing.
///
UINT32 CrbControlCancel; // 48h
///
/// Register used to indicate presence of command or response data in the
/// CRB buffer.
///
UINT32 CrbControlStart; // 4Ch
///
/// Register used to configure and respond to interrupts.
///
UINT32 CrbInterruptEnable; // 50h
UINT32 CrbInterruptStatus; // 54h
///
/// Size of the Command buffer.
///
UINT32 CrbControlCommandSize; // 58h
///
/// Command buffer start address
///
UINT32 CrbControlCommandAddressLow; // 5Ch
UINT32 CrbControlCommandAddressHigh; // 60h
///
/// Size of the Response buffer
///
UINT32 CrbControlResponseSize; // 64h
///
/// Address of the start of the Response buffer
///
UINT64 CrbControlResponseAddrss; // 68h
UINT8 Reserved4[0x10]; // 70h
///
/// Command/Response Data may be defined as large as 3968 (0xF80).
///
UINT8 CrbDataBuffer[0xF80]; // 80h
} PTP_CRB_REGISTERS;
//
// Define pointer types used to access CRB registers on PTP
//
typedef PTP_CRB_REGISTERS* PTP_CRB_REGISTERS_PTR;
//
// Define bits of CRB Interface Identifier Register
//
typedef union {
struct {
UINT32 InterfaceType : 4;
UINT32 InterfaceVersion : 4;
UINT32 CapLocality : 1;
UINT32 CapCRBIdleBypass : 1;
UINT32 Reserved1 : 1;
UINT32 CapDataXferSizeSupport : 2;
UINT32 CapFIFO : 1;
UINT32 CapCRB : 1;
UINT32 CapIFRes : 2;
UINT32 InterfaceSelector : 2;
UINT32 IntfSelLock : 1;
UINT32 Reserved2 : 4;
UINT32 Rid : 8;
} Bits;
UINT32 Uint32;
} PTP_CRB_INTERFACE_IDENTIFIER;
///
/// InterfaceType
///
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_FIFO 0x0
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_CRB 0x1
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_TYPE_TIS 0xF
typedef enum {
Tpm2PtpInterfaceTis,
Tpm2PtpInterfaceFifo,
Tpm2PtpInterfaceCrb,
Tpm2PtpInterfaceMax
} TPM2_PTP_INTERFACE_TYPE;
///
/// InterfaceVersion
///
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_VERSION_FIFO 0x0
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_VERSION_CRB 0x1
///
/// InterfaceSelector
///
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_SELECTOR_FIFO 0x0
#define PTP_INTERFACE_IDENTIFIER_INTERFACE_SELECTOR_CRB 0x1
//
// Define bits of Locality State Register
//
///
/// This bit indicates whether all other bits of this register contain valid
/// values, if it is a 1.
///
#define PTP_CRB_LOCALITY_STATE_TPM_REG_VALID_STATUS BIT7
///
/// 000 - Locality 0
/// 001 - Locality 1
/// 010 - Locality 2
/// 011 - Locality 3
/// 100 - Locality 4
///
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_MASK (BIT2 | BIT3 | BIT4)
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_0 (0)
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_1 (BIT2)
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_2 (BIT3)
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_3 (BIT2 | BIT3)
#define PTP_CRB_LOCALITY_STATE_ACTIVE_LOCALITY_4 (BIT4)
///
/// A 0 indicates to the host that no locality is assigned.
/// A 1 indicates a locality has been assigned.
///
#define PTP_CRB_LOCALITY_STATE_LOCALITY_ASSIGNED BIT1
///
/// The TPM clears this bit to 0 upon receipt of _TPM_Hash_End
/// The TPM sets this bit to a 1 when the TPM_LOC_CTRL_x.resetEstablishment
/// field is set to 1.
///
#define PTP_CRB_LOCALITY_STATE_TPM_ESTABLISHED BIT0
//
// Define bits of Locality Control Register
//
///
/// Writes (1): Reset TPM_LOC_STATE_x.tpmEstablished bit if the write occurs
/// from Locality 3 or 4.
///
#define PTP_CRB_LOCALITY_CONTROL_RESET_ESTABLISHMENT_BIT BIT3
///
/// Writes (1): The TPM gives control of the TPM to the locality setting this
/// bit if it is the higher priority locality.
///
#define PTP_CRB_LOCALITY_CONTROL_SEIZE BIT2
///
/// Writes (1): The active Locality is done with the TPM.
///
#define PTP_CRB_LOCALITY_CONTROL_RELINQUISH BIT1
///
/// Writes (1): Interrupt the TPM and generate a locality arbitration algorithm.
///
#define PTP_CRB_LOCALITY_CONTROL_REQUEST_ACCESS BIT0
//
// Define bits of Locality Status Register
//
///
/// 0: A higher locality has not initiated a Seize arbitration process.
/// 1: A higher locality has Seized the TPM from this locality.
///
#define PTP_CRB_LOCALITY_STATUS_BEEN_SEIZED BIT1
///
/// 0: Locality has not been granted to the TPM.
/// 1: Locality has been granted access to the TPM
///
#define PTP_CRB_LOCALITY_STATUS_GRANTED BIT0
//
// Define bits of CRB Control Area Request Register
//
///
/// Used by Software to indicate transition the TPM to and from the Idle state
/// 1: Set by Software to indicate response has been read from the response
/// buffer and TPM can transition to Idle 0: Cleared to 0 by TPM to acknowledge
/// the request when TPM enters Idle state. TPM SHALL complete this transition
/// within TIMEOUT_C.
///
#define PTP_CRB_CONTROL_AREA_REQUEST_GO_IDLE BIT1
///
/// Used by Software to request the TPM transition to the Ready State.
/// 1: Set to 1 by Software to indicate the TPM should be ready to receive a
/// command. 0: Cleared to 0 by TPM to acknowledge the request. TPM SHALL
/// complete this transition within TIMEOUT_C.
///
#define PTP_CRB_CONTROL_AREA_REQUEST_COMMAND_READY BIT0
//
// Define bits of CRB Control Area Status Register
//
///
/// Used by TPM to indicate it is in the Idle State
/// 1: Set by TPM when in the Idle State
/// 0: Cleared by TPM on receipt of TPM_CRB_CTRL_REQ_x.cmdReady when TPM
/// transitions to the Ready State. SHALL be cleared by TIMEOUT_C.
///
#define PTP_CRB_CONTROL_AREA_STATUS_TPM_IDLE BIT1
///
/// Used by the TPM to indicate current status.
/// 1: Set by TPM to indicate a FATAL Error
/// 0: Indicates TPM is operational
///
#define PTP_CRB_CONTROL_AREA_STATUS_TPM_STATUS BIT0
//
// Define bits of CRB Control Cancel Register
//
///
/// Used by software to cancel command processing Reads return correct value
/// Writes (0000 0001h): Cancel a command
/// Writes (0000 0000h): Clears field when command has been cancelled
///
#define PTP_CRB_CONTROL_CANCEL BIT0
//
// Define bits of CRB Control Start Register
//
///
/// When set by software, indicates a command is ready for processing.
/// Writes (0000 0001h): TPM transitions to Command Execution
/// Writes (0000 0000h): TPM clears this field and transitions to Command
/// Completion
///
#define PTP_CRB_CONTROL_START BIT0
//
// Restore original structure alignment
//
#pragma pack()
//
// Default TimeOut value
//
#define PTP_TIMEOUT_A (750 * 1000) // 750ms
#define PTP_TIMEOUT_B (2000 * 1000) // 2s
#define PTP_TIMEOUT_C (200 * 1000) // 200ms
#define PTP_TIMEOUT_D (30 * 1000) // 30ms
#endif
================================================
FILE: driver/types/types.h
================================================
#ifndef TYPES_H
#define TYPES_H
#include "../common.h"
#define REPORT_NMI_CALLBACK_FAILURE 50
#define REPORT_MODULE_VALIDATION_FAILURE 60
#define REPORT_ILLEGAL_HANDLE_OPERATION 70
#define REPORT_INVALID_PROCESS_ALLOCATION 80
#define REPORT_HIDDEN_SYSTEM_THREAD 90
#define REPORT_ILLEGAL_ATTACH_PROCESS 100
#define REPORT_APC_STACKWALK 110
#define REPORT_DPC_STACKWALK 120
#define REPORT_DATA_TABLE_ROUTINE 130
#define REPORT_INVALID_PROCESS_MODULE 140
#define REPORT_PATCHED_SYSTEM_MODULE 150
#define REPORT_SELF_DRIVER_PATCHED 160
#define REPORT_BLACKLISTED_PCIE_DEVICE 170
#define REPORT_EPT_HOOK 180
#define REPORT_SUBTYPE_NO_BACKING_MODULE 0x0
#define REPORT_SUBTYPE_INVALID_DISPATCH 0x1
#define REPORT_SUBTYPE_EXCEPTION_THROWING_RET 0x2
#define PACKET_TYPE_REPORT 0x0
#define PACKET_TYPE_HEARTBEAT 0x1
#define PACKET_MAGIC_NUMBER 0x1337
#define INIT_REPORT_PACKET(report, code, subcode) \
{ \
(report)->header.packet_header.packet_type = PACKET_TYPE_REPORT; \
(report)->header.packet_header.magic_number = PACKET_MAGIC_NUMBER; \
(report)->header.report_code = code; \
(report)->header.report_sub_type = subcode; \
}
#define INIT_HEARTBEAT_PACKET(packet) \
{ \
(packet)->header.packet_header.packet_type = PACKET_TYPE_HEARTBEAT; \
(packet)->header.packet_header.magic_number = PACKET_MAGIC_NUMBER; \
}
/* TODO: the naming here is fucking terrible need to clean everything up */
/* infact lots of the mess in the header files needs to be cleaned up */
/* use a UINT16 rather then enum to explicitly state the size */
typedef struct _PACKET_HEADER {
UINT32 packet_type;
UINT32 magic_number;
} PACKET_HEADER, *PPACKET_HEADER;
/* unencrypted header structures, should always == AES block size i.e 16 */
typedef struct _REPORT_PACKET_HEADER {
PACKET_HEADER packet_header;
UINT32 report_code;
UINT32 report_sub_type;
} REPORT_PACKET_HEADER, *PREPORT_PACKET_HEADER;
typedef struct _HEARTBEAT_PACKET_HEADER {
PACKET_HEADER packet_header;
UINT32 unused[2];
} HEARTBEAT_PACKET_HEADER, *PHEARTBEAT_PACKET_HEADER;
#define AES_256_BLOCK_SIZE 16
static_assert(sizeof(HEARTBEAT_PACKET_HEADER) == AES_256_BLOCK_SIZE,
"invalid heartbeat header size");
static_assert(sizeof(REPORT_PACKET_HEADER) == AES_256_BLOCK_SIZE,
"invalid report header size");
typedef enum _TABLE_ID {
HalDispatch = 0,
HalPrivateDispatch,
Win32kBase_gDxgInterface
} TABLE_ID;
typedef struct _HYPERVISOR_DETECTION_REPORT {
REPORT_PACKET_HEADER header;
UINT8 aperf_msr_timing_check;
UINT8 invd_emulation_check;
} HYPERVISOR_DETECTION_REPORT, *PHYPERVISOR_DETECTION_REPORT;
#define APC_STACKWALK_BUFFER_SIZE 500
typedef struct _APC_STACKWALK_REPORT {
REPORT_PACKET_HEADER header;
UINT64 kthread_address;
UINT64 invalid_rip;
CHAR driver[APC_STACKWALK_BUFFER_SIZE];
} APC_STACKWALK_REPORT, *PAPC_STACKWALK_REPORT;
typedef struct _DPC_STACKWALK_REPORT {
REPORT_PACKET_HEADER header;
UINT64 kthread_address;
UINT64 invalid_rip;
CHAR driver[APC_STACKWALK_BUFFER_SIZE];
} DPC_STACKWALK_REPORT, *PDPC_STACKWALK_REPORT;
typedef struct _MODULE_VALIDATION_FAILURE {
REPORT_PACKET_HEADER header;
UINT64 driver_base_address;
UINT64 driver_size;
CHAR driver_name[128];
} MODULE_VALIDATION_FAILURE, *PMODULE_VALIDATION_FAILURE;
#define DATA_TABLE_ROUTINE_BUF_SIZE 256
typedef struct _DATA_TABLE_ROUTINE_REPORT {
REPORT_PACKET_HEADER header;
TABLE_ID table_id;
UINT64 address;
UINT32 index;
CHAR routine[DATA_TABLE_ROUTINE_BUF_SIZE];
} DATA_TABLE_ROUTINE_REPORT, *PDATA_TABLE_ROUTINE_REPORT;
typedef struct _NMI_CALLBACK_FAILURE {
REPORT_PACKET_HEADER header;
UINT8 were_nmis_disabled;
UINT64 kthread_address;
UINT64 invalid_rip;
} NMI_CALLBACK_FAILURE, *PNMI_CALLBACK_FAILURE;
#define REPORT_INVALID_PROCESS_BUFFER_SIZE 500
typedef struct _INVALID_PROCESS_ALLOCATION_REPORT {
REPORT_PACKET_HEADER header;
CHAR process[REPORT_INVALID_PROCESS_BUFFER_SIZE];
} INVALID_PROCESS_ALLOCATION_REPORT, *PINVALID_PROCESS_ALLOCATION_REPORT;
typedef struct _HIDDEN_SYSTEM_THREAD_REPORT {
REPORT_PACKET_HEADER header;
UINT8 found_in_kthreadlist;
UINT8 found_in_pspcidtable;
UINT64 thread_address;
UINT32 thread_id;
CHAR thread[500];
} HIDDEN_SYSTEM_THREAD_REPORT, *PHIDDEN_SYSTEM_THREAD_REPORT;
typedef struct _ATTACH_PROCESS_REPORT {
REPORT_PACKET_HEADER header;
UINT32 thread_id;
UINT64 thread_address;
} ATTACH_PROCESS_REPORT, *PATTACH_PROCESS_REPORT;
typedef struct _KPRCB_THREAD_VALIDATION_CTX {
REPORT_PACKET_HEADER header;
UINT64 thread;
BOOLEAN thread_found_in_pspcidtable;
// BOOLEAN thread_found_in_kthreadlist;
BOOLEAN finished;
} KPRCB_THREAD_VALIDATION_CTX, *PKPRCB_THREAD_VALIDATION_CTX;
#define HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH 64
typedef struct _OPEN_HANDLE_FAILURE_REPORT {
REPORT_PACKET_HEADER header;
UINT32 is_kernel_handle;
UINT32 process_id;
UINT32 thread_id;
UINT32 access;
CHAR process_name[HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH];
} OPEN_HANDLE_FAILURE_REPORT, *POPEN_HANDLE_FAILURE_REPORT;
#define MODULE_PATH_LEN 256
typedef struct _PROCESS_MODULE_VALIDATION_REPORT {
REPORT_PACKET_HEADER header;
UINT64 image_base;
UINT32 image_size;
WCHAR module_path[MODULE_PATH_LEN];
} PROCESS_MODULE_VALIDATION_REPORT, *PPROCESS_MODULE_VALIDATION_REPORT;
typedef struct _HEARTBEAT_PACKET {
HEARTBEAT_PACKET_HEADER header;
UINT32 heartbeat_count;
UINT32 total_reports_completed;
UINT32 total_irps_completed;
UINT32 total_heartbeats_completed;
} HEARTBEAT_PACKET, *PHEARTBEAT_PACKET;
typedef struct _SYSTEM_MODULE_INTEGRITY_CHECK_REPORT {
REPORT_PACKET_HEADER header;
UINT64 image_base;
UINT32 image_size;
CHAR path_name[0x100];
} SYSTEM_MODULE_INTEGRITY_CHECK_REPORT, *PSYSTEM_MODULE_INTEGRITY_CHECK_REPORT;
typedef struct _EPT_HOOK_REPORT {
REPORT_PACKET_HEADER header;
UINT64 control_average;
UINT64 read_average;
CHAR function_name[128];
} EPT_HOOK_REPORT, *PEPT_HOOK_REPORT;
typedef struct _DRIVER_SELF_INTEGRITY_CHECK_REPORT {
REPORT_PACKET_HEADER header;
UINT64 image_base;
UINT32 image_size;
CHAR path_name[0x100];
} DRIVER_SELF_INTEGRITY_CHECK_REPORT, *PDRIVER_SELF_INTEGRITY_CHECK_REPORT;
typedef struct _BLACKLISTED_PCIE_DEVICE_REPORT {
REPORT_PACKET_HEADER header;
UINT64 device_object;
UINT16 device_id;
UINT16 vendor_id;
} BLACKLISTED_PCIE_DEVICE_REPORT, *PBLACKLISTED_PCIE_DEVICE_REPORT;
#endif
================================================
FILE: driver/util.c
================================================
#include "common.h"
#include "lib/stdlib.h"
LARGE_INTEGER
GenerateRandSeed()
{
LARGE_INTEGER system_time = {0};
LARGE_INTEGER up_time = {0};
LARGE_INTEGER seed = {0};
KeQuerySystemTime(&system_time);
KeQueryTickCount(&up_time);
seed.QuadPart = system_time.QuadPart ^ up_time.QuadPart;
return seed;
}
NTSTATUS
MapAndReadPhysical(
_In_ UINT64 PhysicalAddress,
_In_ UINT32 ReadLength,
_Out_ PVOID OutputBuffer,
_In_ UINT32 OutputBufferLength)
{
PVOID va = NULL;
PHYSICAL_ADDRESS pa = {.QuadPart = PhysicalAddress};
if (ReadLength > OutputBufferLength)
return STATUS_BUFFER_TOO_SMALL;
va = MmMapIoSpace(pa, ReadLength, MmNonCached);
if (!va)
return STATUS_UNSUCCESSFUL;
switch (ReadLength) {
case 1: READ_REGISTER_BUFFER_UCHAR(va, OutputBuffer, 1); break;
case 2: READ_REGISTER_BUFFER_USHORT(va, OutputBuffer, 1); break;
case 4: READ_REGISTER_BUFFER_ULONG(va, OutputBuffer, 1); break;
case 8: READ_REGISTER_BUFFER_ULONG64(va, OutputBuffer, 1); break;
}
MmUnmapIoSpace(va, ReadLength);
return STATUS_SUCCESS;
}
NTSTATUS
UnicodeToCharBufString(
_In_ PUNICODE_STRING UnicodeString,
_Out_ PVOID OutBuffer,
_In_ UINT32 OutBufferSize)
{
ANSI_STRING string = {0};
NTSTATUS status = STATUS_UNSUCCESSFUL;
status = RtlUnicodeStringToAnsiString(&string, UnicodeString, TRUE);
if (!NT_SUCCESS(status)) {
DEBUG_ERROR("RtlUnicodeStringToAnsiString: %x", status);
return status;
}
if (string.Length > OutBufferSize) {
RtlFreeAnsiString(&string);
return STATUS_BUFFER_TOO_SMALL;
}
IntCopyMemory(OutBuffer, string.Buffer, string.Length);
RtlFreeAnsiString(&string);
return STATUS_SUCCESS;
}
#define BYTES_PER_LINE 16
VOID
DumpBufferToKernelDebugger(_In_ PCHAR Buffer, _In_ UINT32 BufferLength)
{
UINT32 i = 0;
UINT32 j = 0;
for (i = 0; i < BufferLength; i += BYTES_PER_LINE) {
HEX_DUMP("%08x ", i);
for (j = 0; j < BYTES_PER_LINE; ++j) {
if (i + j < BufferLength) {
HEX_DUMP("%02x ", (unsigned char)Buffer[i + j]);
}
else {
HEX_DUMP(" ");
}
}
HEX_DUMP(" ");
for (j = 0; j < BYTES_PER_LINE; ++j) {
if (i + j < BufferLength) {
char c = Buffer[i + j];
if (c >= 32 && c <= 126) {
HEX_DUMP("%c", c);
}
else {
HEX_DUMP(".");
}
}
}
HEX_DUMP("\n");
}
}
================================================
FILE: driver/util.h
================================================
#ifndef UTIL_H
#define UTIL_H
#include "common.h"
LARGE_INTEGER
GenerateRandSeed();
NTSTATUS
MapAndReadPhysical(_In_ UINT64 PhysicalAddress,
_In_ UINT32 ReadLength,
_Out_ PVOID OutputBuffer,
_In_ UINT32 OutputBufferLength);
NTSTATUS
UnicodeToCharBufString(_In_ PUNICODE_STRING UnicodeString,
_Out_ PVOID OutBuffer,
_In_ UINT32 OutBufferSize);
VOID
DumpBufferToKernelDebugger(_In_ PCHAR Buffer, _In_ UINT32 BufferLength);
#endif
================================================
FILE: module/client/message_queue.cpp
================================================
#include "message_queue.h"
#include
#define TEST_STEAM_64_ID 123456789;
client::message_queue::message_queue(LPTSTR PipeName) {
#if NO_SERVER
LOG_INFO("No_Server build used. Not opening named pipe.");
#else
this->pipe_interface = std::make_unique(PipeName);
#endif
}
void client::message_queue::dequeue_message(void *Buffer, size_t Size) {
#if NO_SERVER
return;
#else
this->pipe_interface->read_pipe(Buffer, Size);
#endif
}
void client::message_queue::enqueue_message(void *Buffer, size_t Size) {
#if NO_SERVER
return;
#else
return;
#endif
}
================================================
FILE: module/client/message_queue.h
================================================
#ifndef REPORT_H
#define REPORT_H
#include
#include "../dispatcher/threadpool.h"
#include "../common.h"
#include "pipe.h"
#define REPORT_BUFFER_SIZE 8192
#define SEND_BUFFER_SIZE 8192
#define MAX_SIGNATURE_SIZE 256
#define MESSAGE_TYPE_CLIENT_REPORT 1
#define MESSAGE_TYPE_CLIENT_SEND 2
#define MESSAGE_TYPE_CLIENT_REQUEST 3
namespace client {
class message_queue {
struct MESSAGE_PACKET_HEADER {
int message_type;
int request_id;
unsigned __int64 steam64_id;
};
std::unique_ptr pipe_interface;
std::mutex lock;
byte report_buffer[REPORT_BUFFER_SIZE];
public:
message_queue(LPTSTR PipeName);
void enqueue_message(void *Buffer, size_t Size);
void dequeue_message(void *Buffer, size_t Size);
};
} // namespace client
#endif
================================================
FILE: module/client/pipe.cpp
================================================
#include "pipe.h"
#include "../common.h"
#include
client::pipe::pipe(LPTSTR PipeName) {
this->pipe_name = PipeName;
this->pipe_handle =
CreateFile(this->pipe_name, GENERIC_READ | GENERIC_WRITE, 0, NULL,
OPEN_EXISTING, FILE_FLAG_OVERLAPPED, NULL);
if (this->pipe_handle == INVALID_HANDLE_VALUE) {
LOG_ERROR("CreateFile failed with status 0x%x", GetLastError());
return;
}
}
void client::pipe::write_pipe(PVOID Buffer, SIZE_T Size) {
DWORD bytes_written = 0;
WriteFile(this->pipe_handle, Buffer, Size, &bytes_written, NULL);
if (bytes_written == 0) {
LOG_ERROR("WriteFile failed with status code 0x%x", GetLastError());
return;
}
}
void client::pipe::read_pipe(PVOID Buffer, SIZE_T Size) {
BOOL status = FALSE;
DWORD bytes_read = 0;
status = ReadFile(this->pipe_handle, Buffer, Size, &bytes_read, NULL);
if (status == NULL) {
LOG_ERROR("ReadFile failed with status code 0x%x", GetLastError());
return;
}
}
================================================
FILE: module/client/pipe.h
================================================
#pragma once
#include
#define MESSAGE_TYPE_CLIENT_REPORT 1
#define MESSAGE_TYPE_CLIENT_SEND 2
#define MESSAGE_TYPE_CLIENT_REQUEST 3
#define MOTHERBOARD_SERIAL_CODE_LENGTH 64
#define DEVICE_DRIVE_0_SERIAL_CODE_LENGTH 64
namespace client {
class pipe {
HANDLE pipe_handle;
LPTSTR pipe_name;
public:
pipe(LPTSTR PipeName);
void write_pipe(PVOID Buffer, SIZE_T Size);
void read_pipe(PVOID Buffer, SIZE_T Size);
};
namespace headers {
typedef enum _ENVIRONMENT_TYPE {
NativeWindows = 0,
Vmware,
VirtualBox
} ENVIRONMENT_TYPE;
typedef enum _PROCESSOR_TYPE {
Unknown = 0,
GenuineIntel,
AuthenticAmd
} PROCESSOR_TYPE;
#define VENDOR_STRING_MAX_LENGTH 256
struct SYSTEM_INFORMATION {
CHAR motherboard_serial[MOTHERBOARD_SERIAL_CODE_LENGTH];
CHAR drive_0_serial[DEVICE_DRIVE_0_SERIAL_CODE_LENGTH];
CHAR vendor[VENDOR_STRING_MAX_LENGTH];
BOOLEAN virtualised_environment;
ENVIRONMENT_TYPE environment;
PROCESSOR_TYPE processor;
RTL_OSVERSIONINFOW os_information;
};
} // namespace headers
} // namespace client
================================================
FILE: module/common.h
================================================
#pragma once
#include
#include
#include
#include
#include
#define LOG_INFO(fmt, ...) printf("[+] " fmt "\n", ##__VA_ARGS__)
#define LOG_ERROR(fmt, ...) printf("[-] " fmt "\n", ##__VA_ARGS__)
#define ABSOLUTE(wait) (wait)
#define RELATIVE(wait) (-(wait))
#define NANOSECONDS(nanos) (((signed __int64)(nanos)) / 100L)
#define MICROSECONDS(micros) (((signed __int64)(micros)) * NANOSECONDS(1000L))
#define MILLISECONDS(milli) (((signed __int64)(milli)) * MICROSECONDS(1000L))
#define SECONDS(seconds) (((signed __int64)(seconds)) * MILLISECONDS(1000L))
#define LOG_ERROR(fmt, ...) printf("[-] " fmt "\n", ##__VA_ARGS__)
================================================
FILE: module/crypt/crypt.cpp
================================================
#include "crypt.h"
#include "../common.h"
#include
#include
#include
#include
#include
#pragma comment(lib, "bcrypt.lib")
BCRYPT_ALG_HANDLE alg_handle = NULL;
BCRYPT_KEY_HANDLE key_handle = NULL;
namespace crypt {
#define STATUS_UNSUCCESSFUL ((NTSTATUS)0xC0000001L)
namespace globals {
#define TEST_AES_KEY_LENGTH 0x32
#define TEST_AES_IV_LENGTH 0x16
const unsigned char TEST_KEY[] = {
0xAA, 0x50, 0xA7, 0x00, 0x79, 0xF1, 0x6C, 0x2D, 0x6B, 0xAD, 0xAC,
0x19, 0x18, 0x66, 0xFB, 0xEF, 0xCA, 0x9B, 0x6D, 0x3E, 0xA3, 0x7D,
0x2D, 0xF6, 0x10, 0x95, 0xB3, 0xB3, 0x8D, 0x34, 0x69, 0xF1};
const unsigned char TEST_IV[] = {0x00, 0x01, 0x02, 0x03, 0x04, 0x05,
0x06, 0x07, 0x08, 0x09, 0x0A, 0x0B,
0x0C, 0x0D, 0x0E, 0x0F};
PBCRYPT_KEY_DATA_BLOB_HEADER blob = nullptr;
static PUCHAR key_object = NULL;
static UINT32 key_object_length = 0;
} // namespace globals
boolean initialise_session_key() {
globals::blob = reinterpret_cast(
malloc(sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + sizeof(globals::TEST_KEY)));
if (!globals::blob)
return false;
globals::blob->dwMagic = BCRYPT_KEY_DATA_BLOB_MAGIC;
globals::blob->dwVersion = BCRYPT_KEY_DATA_BLOB_VERSION1;
globals::blob->cbKeyData = sizeof(globals::TEST_KEY);
memcpy((void *)((UINT64)globals::blob + sizeof(BCRYPT_KEY_DATA_BLOB_HEADER)),
(void *)globals::TEST_KEY, sizeof(globals::TEST_KEY));
return true;
}
boolean initialise_provider() {
UINT32 data_copied = 0;
NTSTATUS status =
BCryptOpenAlgorithmProvider(&alg_handle, BCRYPT_AES_ALGORITHM, NULL, 0);
if (!NT_SUCCESS(status)) {
LOG_ERROR("BCryptOpenAlgorithmProvider: %x", status);
return false;
}
status = BCryptGetProperty(alg_handle, BCRYPT_OBJECT_LENGTH,
(PUCHAR)&globals::key_object_length,
sizeof(UINT32), (PULONG)&data_copied, 0);
if (!NT_SUCCESS(status)) {
LOG_ERROR("BCryptGetProperty: %x", status);
return false;
}
globals::key_object = (PUCHAR)malloc(globals::key_object_length);
if (!globals::key_object)
return false;
if (!initialise_session_key())
return false;
status = BCryptImportKey(
alg_handle, NULL, BCRYPT_KEY_DATA_BLOB, &key_handle, globals::key_object,
globals::key_object_length, (PUCHAR)globals::blob,
sizeof(BCRYPT_KEY_DATA_BLOB_HEADER) + sizeof(globals::TEST_KEY), 0);
if (!NT_SUCCESS(status)) {
LOG_ERROR("BCryptImportKey: %x", status);
return false;
}
return true;
}
boolean decrypt_packet(void *packet, uint32_t packet_length) {
ULONG data_copied = 0;
unsigned char local_iv[sizeof(globals::TEST_IV)] = {0};
memcpy((void *)local_iv, (void *)globals::TEST_IV, sizeof(globals::TEST_IV));
void* buffer = (void*)((UINT64)packet + 16);
uint32_t length = packet_length - 16;
NTSTATUS status = BCryptDecrypt(
key_handle, (PUCHAR)buffer, length, NULL, (PUCHAR)local_iv,
sizeof(globals::TEST_IV), (PUCHAR)buffer, length, &data_copied, 0);
if (!NT_SUCCESS(status)) {
LOG_ERROR("BCryptDecrypt: %x", status);
return false;
}
return true;
}
uint32_t get_padded_packet_size(uint32_t original_size) {
uint32_t remainder = original_size % 16;
if (remainder != 0) {
original_size += 16 - remainder;
}
return original_size;
}
const unsigned char *get_test_key() { return globals::TEST_KEY; }
const unsigned char *get_test_iv() { return globals::TEST_IV; }
} // namespace crypt
================================================
FILE: module/crypt/crypt.h
================================================
#pragma once
#include
#include
namespace crypt {
const unsigned char *get_test_key();
const unsigned char *get_test_iv();
boolean initialise_provider();
boolean decrypt_packet(void *packet, uint32_t packet_length);
uint32_t get_padded_packet_size(uint32_t original_size);
} // namespace crypt
================================================
FILE: module/dispatcher/dispatcher.cpp
================================================
#include "dispatcher.h"
#include "../client/message_queue.h"
#include "../crypt/crypt.h"
#include "../helper.h"
#include
#include
dispatcher::dispatcher::dispatcher(LPCWSTR driver_name,
client::message_queue &message_queue,
module::module_information *module_info)
: thread_pool(DISPATCHER_THREAD_COUNT),
k_interface(driver_name, message_queue, module_info) {
this->module_info = module_info;
}
void dispatcher::dispatcher::request_session_pk() {
#ifdef NO_SERVER
LOG_INFO("NO_SERVER Build used. Generating local session key pair.");
#else
LOG_INFO("Requesting session key pair.");
#endif
}
void dispatcher::dispatcher::write_shared_mapping_operation() {
int operation =
helper::generate_rand_int(kernel_interface::SHARED_STATE_OPERATION_COUNT);
LOG_INFO("Shared mapping operation callback received. operation: %lx",
operation);
this->k_interface.write_shared_mapping_operation(
*reinterpret_cast(
&operation));
}
void dispatcher::dispatcher::init_timer_callbacks() {
/* we want to offset when our driver routines are called */
this->k_interface.initiate_shared_mapping();
std::optional result = this->timers.insert_callback(
std::bind(&dispatcher::dispatcher::write_shared_mapping_operation, this),
WRITE_SHARED_MAPPING_DUE_TIME, WRITE_SHARED_MAPPING_PERIOD);
helper::sleep_thread(TIMER_CALLBACK_DELAY);
}
void dispatcher::dispatcher::run_timer_thread() {
thread_pool.queue_job([this]() { this->timers.run_timer_thread(); });
}
void dispatcher::dispatcher::run_io_port_thread() {
thread_pool.queue_job([this]() { k_interface.run_completion_port(); });
}
void dispatcher::dispatcher::run() {
// helper::generate_rand_seed();
crypt::initialise_provider();
std::srand(std::time(nullptr));
this->init_timer_callbacks();
this->run_timer_thread();
this->run_io_port_thread();
thread_pool.queue_job([this]() { k_interface.run_completion_port(); });
while (true) {
LOG_INFO("issueing kernel job!");
this->issue_kernel_job();
helper::sleep_thread(DISPATCH_LOOP_SLEEP_TIME);
}
}
void dispatcher::dispatcher::issue_kernel_job() {
switch (helper::generate_rand_int(KERNEL_DISPATCH_FUNCTION_COUNT)) {
case 0:
thread_pool.queue_job([this]() { k_interface.enumerate_handle_tables(); });
break;
case 1:
thread_pool.queue_job([this]() { k_interface.perform_integrity_check(); });
break;
case 2:
thread_pool.queue_job(
[this]() { k_interface.scan_for_unlinked_processes(); });
break;
case 3:
thread_pool.queue_job(
[this]() { k_interface.verify_process_module_executable_regions(); });
break;
case 4:
thread_pool.queue_job(
[this]() { k_interface.validate_system_driver_objects(); });
break;
case 5:
thread_pool.queue_job([this]() { k_interface.run_nmi_callbacks(); });
break;
case 6:
thread_pool.queue_job(
[this]() { k_interface.scan_for_attached_threads(); });
break;
case 7:
thread_pool.queue_job([this]() { k_interface.initiate_apc_stackwalk(); });
break;
case 8:
thread_pool.queue_job([this]() { k_interface.scan_for_ept_hooks(); });
break;
case 9:
thread_pool.queue_job([this]() { k_interface.perform_dpc_stackwalk(); });
break;
case 10:
thread_pool.queue_job([this]() { k_interface.validate_system_modules(); });
break;
case 11:
thread_pool.queue_job([this]() { k_interface.validate_pci_devices(); });
break;
case 12:
thread_pool.queue_job(
[this]() { k_interface.validate_win32k_dispatch_tables(); });
break;
}
}
================================================
FILE: module/dispatcher/dispatcher.h
================================================
#pragma once
#include "threadpool.h"
#include "timer.h"
#include "../kernel_interface/kernel_interface.h"
#include "../module.h"
namespace dispatcher {
constexpr int DISPATCH_LOOP_SLEEP_TIME = 30;
constexpr int KERNEL_DISPATCH_FUNCTION_COUNT = 12;
constexpr int DISPATCHER_THREAD_COUNT = 4;
constexpr int TIMER_CALLBACK_DELAY = 15;
constexpr int WRITE_SHARED_MAPPING_PERIOD = 30;
constexpr int WRITE_SHARED_MAPPING_DUE_TIME = 30;
class dispatcher {
timer timers;
thread_pool thread_pool;
kernel_interface::kernel_interface k_interface;
module::module_information *module_info;
void issue_kernel_job();
void write_shared_mapping_operation();
void init_timer_callbacks();
void run_timer_thread();
void run_io_port_thread();
void request_session_pk();
public:
dispatcher(LPCWSTR driver_name, client::message_queue &queue, module::module_information* module_info);
void run();
};
} // namespace dispatcher
================================================
FILE: module/dispatcher/threadpool.cpp
================================================
#include "threadpool.h"
/*
* This is the idle loop each thread will be running until a job is ready
* for execution
*/
void dispatcher::thread_pool::wait_for_task() {
while (true) {
std::function job;
{
std::unique_lock lock(this->queue_mutex);
/*
* This is equivalent to :
*
* while (!this->jobs.empty() || should_terminate)
* mutex_condition.wait(lock);
*
* we are essentially waiting for a job to be queued up or the terminate
*flag to be set. Another piece of useful information is that the
*predicate is checked under the lock as the precondition for .wait() is
*that the calling thread owns the lock.
*
* Now, when .wait() is run, the lock is unlocked the the executing thread
*is blocked and is added to a list of threads current waiting on the
*predicate. In our case whether there are new jobs available for the
*terminate flag is set. Once the condition variables are true i.e there
*are new jobs or we are terminating, the lock is reacquired by the thread
*and the thread is unblocked.
*/
mutex_condition.wait(lock, [this] {
return !this->jobs.empty() || this->should_terminate;
});
if (this->should_terminate)
return;
/* get the first job in the queue*/
job = jobs.front();
jobs.pop();
}
/* run the job */
job();
}
}
dispatcher::thread_pool::thread_pool(int thread_count) {
this->thread_count = thread_count;
this->should_terminate = false;
/* Initiate our threads and store them in our threads vector */
for (int i = 0; i < this->thread_count; i++) {
this->threads.emplace_back(std::thread(&thread_pool::wait_for_task, this));
}
}
void dispatcher::thread_pool::queue_job(const std::function &job) {
/* push a job into our job queue safely by holding our queue lock */
std::unique_lock lock(this->queue_mutex);
this->jobs.push(job);
lock.unlock();
mutex_condition.notify_one();
}
void dispatcher::thread_pool::terminate() {
/* safely set our termination flag to true */
std::unique_lock lock(this->queue_mutex);
should_terminate = true;
lock.unlock();
/* unlock all threads waiting on our condition */
mutex_condition.notify_all();
/* join the threads and clear our threads vector */
for (std::thread &thread : threads) {
thread.join();
}
threads.clear();
}
bool dispatcher::thread_pool::busy_wait() {
/* allows us to wait for when the job queue is empty allowing us to safely
* call the destructor */
std::unique_lock lock(this->queue_mutex);
bool pool_busy = !jobs.empty();
this->queue_mutex.unlock();
return pool_busy;
}
================================================
FILE: module/dispatcher/threadpool.h
================================================
#pragma once
#include
#include
#include
#include
namespace dispatcher {
/*
* This ThreadPool class is a simple threadpool implementation that will allow
* us to delegate jobs to a set number of threads without the constant need to
* close and open new threads.
*/
class thread_pool {
int thread_count;
bool should_terminate;
std::mutex queue_mutex;
std::condition_variable mutex_condition;
std::vector threads;
std::queue> jobs;
void wait_for_task();
public:
thread_pool(int thread_count);
void queue_job(const std::function &job);
void terminate();
bool busy_wait();
};
} // namespace dispatcher
================================================
FILE: module/dispatcher/timer.cpp
================================================
#include "timer.h"
#include "../common.h"
#include "../helper.h"
dispatcher::timer::timer() {
this->active_callbacks = 0;
for (auto &entry : handles) {
entry = INVALID_HANDLE_VALUE;
}
}
dispatcher::timer::~timer() {}
HANDLE dispatcher::timer::create_timer_object() {
return CreateWaitableTimer(nullptr, false, nullptr);
}
bool dispatcher::timer::set_timer_object(HANDLE handle, LARGE_INTEGER *due_time,
unsigned long period) {
return SetWaitableTimer(handle, due_time, period, nullptr, nullptr, false) > 0
? true
: false;
}
dispatcher::timer::callback::callback(std::function routine,
int due_time_seconds,
int period_seconds) {
this->callback_routine = routine;
this->due_time.QuadPart = helper::seconds_to_nanoseconds(due_time_seconds);
this->period = helper::seconds_to_milliseconds(period_seconds);
}
std::optional
dispatcher::timer::insert_callback(std::function routine,
int due_time_seconds, int period_seconds) {
std::lock_guard lock(this->lock);
std::optional handle = this->find_free_handle();
if (!handle.has_value()) {
LOG_ERROR("No free event handles available. Unable to create timer.");
return {};
}
*handle.value() = create_timer_object();
if (*handle.value() == NULL) {
LOG_ERROR("CreateWaitableTimer failed with status %x", GetLastError());
return {};
}
callback cb(routine, due_time_seconds, period_seconds);
if (!set_timer_object(*handle.value(), &cb.due_time, cb.period)) {
LOG_ERROR("SetWaitableTimer failed with status %x", GetLastError());
}
std::pair entry(*handle.value(), cb);
this->callbacks.insert(entry);
this->insert_handle(*handle.value());
this->active_callbacks++;
return *handle.value();
}
/* assumes lock is held by caller */
std::optional dispatcher::timer::find_free_handle() {
for (int index = 0; index < MAXIMUM_WAIT_OBJECTS; index++) {
if (handles[index] == INVALID_HANDLE_VALUE)
return &handles[index];
}
return {};
}
/* assumes lock is held */
void dispatcher::timer::insert_handle(HANDLE handle) {
for (HANDLE entry : this->handles) {
if (entry == INVALID_HANDLE_VALUE) {
entry = handle;
return;
}
}
}
/* assumes lock is held */
void dispatcher::timer::close_handle_entry(HANDLE handle) {
this->callbacks.erase(handle);
for (int entry = 0; entry < MAXIMUM_WAIT_OBJECTS; entry++) {
if (this->handles[entry] == handle) {
CloseHandle(handle);
this->handles[entry] = INVALID_HANDLE_VALUE;
/* ordering doesnt matter, as long as the valid handles are at the front
* of the array and are contiguous */
std::sort(this->handles.begin(), this->handles.end());
this->active_callbacks--;
return;
}
}
}
void dispatcher::timer::dispatch_callback_for_index(unsigned long index) {
std::unordered_map::const_iterator it =
this->callbacks.find(handles[index]);
if (it == this->callbacks.end())
return;
it->second.callback_routine();
}
/* assumes lock is held */
void dispatcher::timer::query_removal_queue() {
if (callbacks_to_remove.empty())
return;
while (!callbacks_to_remove.empty()) {
HANDLE entry = callbacks_to_remove.front();
this->close_handle_entry(entry);
this->callbacks_to_remove.pop();
this->active_callbacks--;
}
}
/* todo: maybe have an event object that we can wait on whilst no events are queued, then when we do insert an event alert the event object ? though this isnt urgent for our use case...*/
void dispatcher::timer::run_timer_thread() {
if (this->active_callbacks == 0)
return;
while (true) {
unsigned long index = WaitForMultipleObjects(
this->active_callbacks, reinterpret_cast(&handles), false,
INFINITE);
{
std::lock_guard lock(this->lock);
this->dispatch_callback_for_index(index);
this->query_removal_queue();
/* maybe we should have some default event ? */
if (this->active_callbacks == 0)
return;
}
}
}
/*
* If we remove a callback whilst the main loop is sleeping, it means the
* information passed to KeWaitForMultipleObjects will be wrong, hence we need
* to wait until our thread is run by the scheduler, perform the operation for
* the alerted handle and THEN remove any entries from the removal queue. Then
* once we recall KeWaitForMultipleObjects the new handle array will be valid.
*/
void dispatcher::timer::remove_callback(HANDLE handle) {
std::lock_guard lock(this->lock);
this->callbacks_to_remove.push(handle);
}
================================================
FILE: module/dispatcher/timer.h
================================================
#pragma once
#include
#include
#include
#include
#include
#include
#include
#include
/*
* array of handles which we pass to WaitForMultipleEvents
*
* needa do this rather then use the dedicated apc routien pointer in set timer
* cos u cant just take a pointer to a member function for some reason lol like
* tf
*
* map maps a handle to a callback object, this object contains various
* information bust most important the callback routine. When the event is
* signaled, it returns a handle, use that handle to index the map and run the
* callback routine. This has to be done as the handles needs to be in a
* contiguous array, so we can use an array of callback objects.
*/
namespace dispatcher {
constexpr int HANDLE_AVAILABLE = 0;
constexpr int HANDLE_NOT_AVAILABLE = 1;
class timer {
struct callback {
bool in_use;
std::function callback_routine;
LARGE_INTEGER due_time;
unsigned long period;
callback(std::function routine, int due_time_seconds,
int period_seconds);
};
std::optional find_free_handle();
void close_handle_entry(HANDLE handle);
void dispatch_callback_for_index(unsigned long index);
HANDLE create_timer_object();
bool set_timer_object(HANDLE handle, LARGE_INTEGER *due_time,
unsigned long period);
void query_removal_queue();
void insert_handle(HANDLE handle);
public:
std::mutex lock;
std::array handles;
std::unordered_map callbacks;
std::queue callbacks_to_remove;
int active_callbacks;
timer();
~timer();
std::optional insert_callback(std::function routine,
int due_time_seconds,
int period_seconds);
void remove_callback(HANDLE handle);
void run_timer_thread();
};
} // namespace dispatcher
================================================
FILE: module/helper.cpp
================================================
#include "helper.h"
#include
#include
#include "crypt/crypt.h"
void
helper::generate_rand_seed()
{
srand(time(0));
}
int
helper::generate_rand_int(int max)
{
return std::rand() % max;
}
void
helper::sleep_thread(int seconds)
{
std::this_thread::sleep_for(std::chrono::seconds(seconds));
}
int
helper::get_report_id_from_buffer(void* buffer)
{
kernel_interface::report_header* header =
reinterpret_cast(
(uint64_t)buffer + sizeof(kernel_interface::report_header));
return header->report_code;
}
kernel_interface::report_id
helper::get_kernel_report_type(void* buffer)
{
switch (helper::get_report_id_from_buffer(buffer)) {
case kernel_interface::report_id::report_nmi_callback_failure:
return kernel_interface::report_id::report_nmi_callback_failure;
case kernel_interface::report_id::report_module_validation_failure:
return kernel_interface::report_id::report_module_validation_failure;
case kernel_interface::report_id::report_illegal_handle_operation:
return kernel_interface::report_id::report_illegal_handle_operation;
case kernel_interface::report_id::report_invalid_process_allocation:
return kernel_interface::report_id::report_invalid_process_allocation;
case kernel_interface::report_id::report_hidden_system_thread:
return kernel_interface::report_id::report_hidden_system_thread;
case kernel_interface::report_id::report_illegal_attach_process:
return kernel_interface::report_id::report_illegal_attach_process;
case kernel_interface::report_id::report_apc_stackwalk:
return kernel_interface::report_id::report_apc_stackwalk;
case kernel_interface::report_id::report_dpc_stackwalk:
return kernel_interface::report_id::report_dpc_stackwalk;
case kernel_interface::report_id::report_data_table_routine:
return kernel_interface::report_id::report_data_table_routine;
}
}
void
print_report_packet(void* buffer)
{
kernel_interface::report_header* report_header =
(kernel_interface::report_header*)buffer;
LOG_INFO("report code: %lx", report_header->report_code);
LOG_INFO("report sub code: %lx", report_header->report_sub_type);
switch (report_header->report_code) {
case kernel_interface::report_id::report_nmi_callback_failure: {
kernel_interface::nmi_callback_failure* r1 =
reinterpret_cast(buffer);
LOG_INFO("were_nmis_disabled: %lx", r1->were_nmis_disabled);
LOG_INFO("kthread_address: %llx", r1->kthread_address);
LOG_INFO("invalid_rip: %llx", r1->invalid_rip);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_invalid_process_allocation: {
kernel_interface::invalid_process_allocation_report* r2 =
reinterpret_cast<
kernel_interface::invalid_process_allocation_report*>(buffer);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_hidden_system_thread: {
kernel_interface::hidden_system_thread_report* r3 =
reinterpret_cast(
buffer);
LOG_INFO("found_in_kthreadlist: %lx", r3->found_in_kthreadlist);
LOG_INFO("found_in_pspcidtable: %lx", r3->found_in_pspcidtable);
LOG_INFO("thread_address: %llx", r3->thread_address);
LOG_INFO("thread_id: %lx", r3->thread_id);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_illegal_attach_process: {
kernel_interface::attach_process_report* r4 =
reinterpret_cast(buffer);
LOG_INFO("report type: attach_process_report");
LOG_INFO("report code: %lx", r4->report_code);
LOG_INFO("thread_id: %lx", r4->thread_id);
LOG_INFO("thread_address: %llx", r4->thread_address);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_illegal_handle_operation: {
kernel_interface::open_handle_failure_report* r5 =
reinterpret_cast(
buffer);
LOG_INFO("is_kernel_handle: %lx", r5->is_kernel_handle);
LOG_INFO("process_id: %lx", r5->process_id);
LOG_INFO("thread_id: %lx", r5->thread_id);
LOG_INFO("access: %lx", r5->access);
LOG_INFO("process_name: %s", r5->process_name);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_invalid_process_module: {
kernel_interface::process_module_validation_report* r6 =
reinterpret_cast<
kernel_interface::process_module_validation_report*>(buffer);
LOG_INFO("image_base: %llx", r6->image_base);
LOG_INFO("image_size: %u", r6->image_size);
LOG_INFO("module_path: %ls", r6->module_path);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_apc_stackwalk: {
kernel_interface::apc_stackwalk_report* r7 =
reinterpret_cast(buffer);
LOG_INFO("kthread_address: %llx", r7->kthread_address);
LOG_INFO("invalid_rip: %llx", r7->invalid_rip);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_dpc_stackwalk: {
kernel_interface::dpc_stackwalk_report* r8 =
reinterpret_cast(buffer);
LOG_INFO("kthread_address: %llx", r8->kthread_address);
LOG_INFO("invalid_rip: %llx", r8->invalid_rip);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_data_table_routine: {
kernel_interface::data_table_routine_report* r9 =
reinterpret_cast(
buffer);
LOG_INFO("id: %d", r9->id);
LOG_INFO("address: %llx", r9->address);
LOG_INFO("routine: %s", r9->routine);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_module_validation_failure: {
kernel_interface::module_validation_failure* r10 =
reinterpret_cast(
buffer);
LOG_INFO("driver_base_address: %llx", r10->driver_base_address);
LOG_INFO("driver_size: %llx", r10->driver_size);
LOG_INFO("driver_name: %s", r10->driver_name);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_patched_system_module: {
kernel_interface::system_module_integrity_check_report* r11 =
reinterpret_cast<
kernel_interface::system_module_integrity_check_report*>(
buffer);
LOG_INFO("image_base: %llx", r11->image_base);
LOG_INFO("image_size: %lx", r11->image_size);
LOG_INFO("path_name: %s", r11->path_name);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_self_driver_patched: {
kernel_interface::driver_self_integrity_check_report* r12 =
reinterpret_cast<
kernel_interface::driver_self_integrity_check_report*>(buffer);
LOG_INFO("image_base: %llx", r12->image_base);
LOG_INFO("image_size: %lx", r12->image_size);
LOG_INFO("path_name: %s", r12->path_name);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_blacklisted_pcie_device: {
kernel_interface::blacklisted_pcie_device_report* r13 =
reinterpret_cast(
buffer);
LOG_INFO("device_object: %llx", r13->device_object);
LOG_INFO("device_id: %x", r13->device_id);
LOG_INFO("vendor_id: %x", r13->vendor_id);
LOG_INFO("********************************");
break;
}
case kernel_interface::report_id::report_ept_hook: {
kernel_interface::ept_hook_failure* r14 =
reinterpret_cast(buffer);
LOG_INFO("control_average: %llx", r14->control_average);
LOG_INFO("read_average: %llx", r14->read_average);
LOG_INFO("function_name: %s", r14->function_name);
LOG_INFO("********************************");
break;
}
default: LOG_INFO("Invalid report type."); break;
}
}
void
print_heartbeat_packet(void* buffer)
{
kernel_interface::heartbeat_packet* hb =
reinterpret_cast(buffer);
LOG_INFO("Heartbeat Count: %lx", hb->heartbeat_count);
LOG_INFO("Total Reports Completed: %lx", hb->total_reports_completed);
LOG_INFO("Total IRPs Completed: %lx", hb->total_irps_completed);
LOG_INFO("Total Heartbeats Completed: %lx", hb->total_heartbeats_completed);
LOG_INFO("********************************");
}
void
helper::print_kernel_report(void* buffer)
{
uint32_t size = crypt::get_padded_packet_size(
sizeof(kernel_interface::open_handle_failure_report));
crypt::decrypt_packet(buffer, size);
kernel_interface::packet_header* header =
reinterpret_cast(buffer);
LOG_INFO("packet type: %lx", header->packet_type);
switch (header->packet_type) {
case 0: print_report_packet(buffer); break;
case 1: print_heartbeat_packet(buffer); break;
}
}
unsigned __int64
helper::seconds_to_nanoseconds(int seconds)
{
return ABSOLUTE(SECONDS(seconds));
}
unsigned __int32
helper::seconds_to_milliseconds(int seconds)
{
return seconds * 1000;
}
================================================
FILE: module/helper.h
================================================
#pragma once
#include "kernel_interface/kernel_interface.h"
namespace helper {
void generate_rand_seed();
int generate_rand_int(int max);
void sleep_thread(int seconds);
kernel_interface::report_id get_kernel_report_type(void *buffer);
int get_report_id_from_buffer(void *buffer);
void print_kernel_report(void *buffer);
unsigned __int64 seconds_to_nanoseconds(int seconds);
unsigned __int32 seconds_to_milliseconds(int seconds);
} // namespace helper
================================================
FILE: module/imports.cpp
================================================
#include "imports.h"
bool imports::initialise_imports() { return false; }
================================================
FILE: module/imports.h
================================================
#pragma once
namespace imports
{
bool initialise_imports();
}
================================================
FILE: module/kernel_interface/kernel_interface.cpp
================================================
#include "kernel_interface.h"
#include
#include "../common.h"
#include "../crypt/crypt.h"
#include "../helper.h"
#include
#include
typedef BOOLEAN(NTAPI *RtlDosPathNameToNtPathName_U)(PCWSTR DosPathName,
PUNICODE_STRING NtPathName,
PCWSTR *NtFileNamePart,
PVOID DirectoryInfo);
kernel_interface::event_dispatcher *
kernel_interface::kernel_interface::get_free_event_entry() {
std::lock_guard lock(this->lock);
for (std::vector::iterator it = events.begin();
it != events.end(); it++) {
if (it->in_use == false) {
it->in_use = true;
return &(*it);
}
}
return nullptr;
}
void kernel_interface::kernel_interface::terminate_completion_port() {
std::lock_guard lock(this->lock);
for (std::vector::iterator it = events.begin();
it != events.end(); it++) {
free(it->buffer);
CloseHandle(it->overlapped.hEvent);
}
}
void kernel_interface::kernel_interface::run_completion_port() {
DWORD bytes = 0;
OVERLAPPED *io = nullptr;
ULONG_PTR key = 0;
while (true) {
GetQueuedCompletionStatus(this->port, &bytes, &key, &io, INFINITE);
if (io == nullptr)
continue;
void *buffer = get_buffer_from_event_object(io);
helper::print_kernel_report(buffer);
release_event_object(io);
send_pending_irp();
}
}
void kernel_interface::kernel_interface::initiate_completion_port() {
for (int index = 0; index < EVENT_COUNT; index++) {
void *buffer = malloc(MAXIMUM_REPORT_BUFFER_SIZE);
this->events.push_back(
event_dispatcher(buffer, MAXIMUM_REPORT_BUFFER_SIZE));
}
this->port = CreateIoCompletionPort(this->driver_handle, nullptr, 0, 0);
if (!this->port) {
LOG_ERROR("CreateIoCompletePort failed with status %x", GetLastError());
return;
}
for (int index = 0; index < EVENT_COUNT; index++) {
send_pending_irp();
}
LOG_INFO("Finished initialising completion port.");
}
void kernel_interface::kernel_interface::release_event_object(
OVERLAPPED *event) {
std::lock_guard lock(this->lock);
for (std::vector::iterator it = events.begin();
it != events.end(); it++) {
if (&it->overlapped == event) {
/* simply zero our the buffer, no need to free and realloc */
memset(it->buffer, 0, it->buffer_size);
it->in_use = false;
ResetEvent(it->overlapped.hEvent);
}
}
}
void *kernel_interface::kernel_interface::get_buffer_from_event_object(
OVERLAPPED *event) {
std::lock_guard lock(this->lock);
for (std::vector::iterator it = events.begin();
it != events.end(); it++) {
if (&it->overlapped == event) {
return it->buffer;
}
}
return nullptr;
}
kernel_interface::kernel_interface::kernel_interface(
LPCWSTR driver_name, client::message_queue &queue,
module::module_information *module_info)
: message_queue(queue) {
this->driver_name = driver_name;
this->module_info = module_info;
this->port = INVALID_HANDLE_VALUE;
this->driver_handle = CreateFileW(
driver_name, GENERIC_WRITE | GENERIC_READ | GENERIC_EXECUTE, 0, 0,
OPEN_EXISTING, FILE_ATTRIBUTE_SYSTEM | FILE_FLAG_OVERLAPPED, 0);
if (this->driver_handle == INVALID_HANDLE_VALUE) {
LOG_ERROR("Failed to open handle to driver with status 0x%x",
GetLastError());
return;
}
this->notify_driver_on_process_launch();
this->initiate_completion_port();
}
kernel_interface::kernel_interface::~kernel_interface() {
this->terminate_completion_port();
this->notify_driver_on_process_termination();
}
unsigned int kernel_interface::kernel_interface::generic_driver_call_output(
ioctl_code ioctl, void *output_buffer, size_t buffer_size,
unsigned long *bytes_returned) {
return DeviceIoControl(this->driver_handle, ioctl, nullptr, 0, output_buffer,
buffer_size, bytes_returned, nullptr);
}
void kernel_interface::kernel_interface::generic_driver_call_input(
ioctl_code ioctl, void *input_buffer, size_t buffer_size,
unsigned long *bytes_returned) {
if (!DeviceIoControl(this->driver_handle, ioctl, input_buffer, buffer_size,
nullptr, 0, bytes_returned, nullptr))
LOG_ERROR("DeviceIoControl failed with status %x", GetLastError());
}
void kernel_interface::kernel_interface::generic_driver_call_apc(
apc_operation operation) {
apc_operation_init init = {0};
init.operation_id = operation;
this->generic_driver_call_input(ioctl_code::InitiateApcStackwalkOperation,
&init, sizeof(init), nullptr);
}
void kernel_interface::kernel_interface::notify_driver_on_process_launch() {
unsigned long bytes_returned = 0;
session_initiation_packet packet = {0};
packet.process_id = reinterpret_cast(GetCurrentProcessId());
packet.session_cookie = 123;
memcpy(packet.aes_key, crypt::get_test_key(), 32);
memcpy(packet.aes_iv, crypt::get_test_iv(), 16);
memcpy(&packet.module_info, (void*)this->module_info,
sizeof(module::module_information));
generic_driver_call_input(ioctl_code::NotifyDriverOnProcessLaunch, &packet,
sizeof(session_initiation_packet), &bytes_returned);
}
void kernel_interface::kernel_interface::detect_system_virtualization() {
unsigned int status = 0;
unsigned long bytes_returned = 0;
hv_detection_packet packet = {0};
status = generic_driver_call_output(ioctl_code::PerformVirtualisationCheck,
&packet, sizeof(packet), &bytes_returned);
if (!status) {
LOG_ERROR("Failed virtualization detection with status %x", GetLastError());
return;
}
if (packet.aperf_msr_timing_check == true ||
packet.invd_emulation_check == true)
LOG_INFO("HYPERVISOR DETECTED!!!");
}
void kernel_interface::kernel_interface::generic_driver_call(ioctl_code ioctl) {
if (!DeviceIoControl(this->driver_handle, ioctl, nullptr, 0, nullptr, 0,
nullptr, nullptr))
LOG_ERROR("DeviceIoControl failed with status %x", GetLastError());
}
void kernel_interface::kernel_interface::run_nmi_callbacks() {
this->generic_driver_call(ioctl_code::RunNmiCallbacks);
}
void kernel_interface::kernel_interface::validate_pci_devices() {
this->generic_driver_call(ioctl_code::ValidatePciDevices);
}
void kernel_interface::kernel_interface::validate_system_driver_objects() {
this->generic_driver_call(ioctl_code::ValidateDriverObjects);
}
void kernel_interface::kernel_interface::enumerate_handle_tables() {
this->generic_driver_call(ioctl_code::EnumerateHandleTables);
}
void kernel_interface::kernel_interface::scan_for_unlinked_processes() {
this->generic_driver_call(ioctl_code::ScanForUnlinkedProcesses);
}
void kernel_interface::kernel_interface::perform_integrity_check() {
this->generic_driver_call(ioctl_code::PerformModuleIntegrityCheck);
}
void kernel_interface::kernel_interface::
notify_driver_on_process_termination() {
this->generic_driver_call(ioctl_code::NotifyDriverOnProcessTermination);
}
void kernel_interface::kernel_interface::scan_for_attached_threads() {
this->generic_driver_call(ioctl_code::ScanFroAttachedThreads);
}
void kernel_interface::kernel_interface::scan_for_ept_hooks() {
this->generic_driver_call(ioctl_code::ScanForEptHooks);
}
void kernel_interface::kernel_interface::perform_dpc_stackwalk() {
this->generic_driver_call(ioctl_code::InitiateDpcStackwalk);
}
void kernel_interface::kernel_interface::validate_system_modules() {
this->generic_driver_call(ioctl_code::ValidateSystemModules);
}
void kernel_interface::kernel_interface::validate_win32k_dispatch_tables() {
this->generic_driver_call(ioctl_code::ValidateWin32kDispatchTables);
}
void kernel_interface::kernel_interface::
verify_process_module_executable_regions() {
// HANDLE handle = INVALID_HANDLE_VALUE;
// MODULEENTRY32 module_entry = {0};
// BOOLEAN status = FALSE;
// process_module module = {0};
// unsigned long bytes_returned = 0;
// RtlDosPathNameToNtPathName_U pRtlDosPathNameToNtPathName_U = NULL;
// UNICODE_STRING nt_path_name = {0};
// pRtlDosPathNameToNtPathName_U =
// (RtlDosPathNameToNtPathName_U)GetProcAddress(
// GetModuleHandle("ntdll.dll"), "RtlDosPathNameToNtPathName_U");
// handle = CreateToolhelp32Snapshot(TH32CS_SNAPMODULE | TH32CS_SNAPMODULE32,
// GetCurrentProcessId());
// if (handle == INVALID_HANDLE_VALUE) {
// LOG_ERROR("CreateToolHelp32Snapshot with TH32CS_SNAPMODULE failed with "
// "status 0x%x",
// GetLastError());
// return;
// }
// module_entry.dwSize = sizeof(MODULEENTRY32);
// if (!Module32First(handle, &module_entry)) {
// LOG_ERROR("Module32First failed with status 0x%x", GetLastError());
// return;
// }
// do {
// module.module_base = module_entry.modBaseAddr;
// module.module_size = module_entry.modBaseSize;
// status = (*pRtlDosPathNameToNtPathName_U)(module_entry.szExePath,
// &nt_path_name, NULL, NULL);
// if (!status) {
// LOG_ERROR("RtlDosPathNameToNtPathName_U failed with no status.");
// continue;
// }
// memcpy(module.module_path, nt_path_name.Buffer, MAX_MODULE_PATH);
// this->generic_driver_call_input(ioctl_code::ValidateProcessLoadedModule,
// &module, sizeof(module),
// &bytes_returned);
// } while (Module32Next(handle, &module_entry));
// end:
// CloseHandle(handle);
}
void kernel_interface::kernel_interface::initiate_apc_stackwalk() {
this->generic_driver_call_apc(apc_operation::operation_stackwalk);
}
void kernel_interface::kernel_interface::send_pending_irp() {
DWORD status = 0;
event_dispatcher *event = get_free_event_entry();
if (!event) {
LOG_ERROR("All event objects in use.");
return;
}
status = DeviceIoControl(
this->driver_handle, ioctl_code::InsertIrpIntoIrpQueue, NULL, NULL,
event->buffer, event->buffer_size, NULL, &event->overlapped);
/*
* im not sure why this returns a status of ERROR_INVALID_FUNCTION when we use
* the inserted irp to complete a deferred irp - even though that procedure
* should return STATUS_SUCCESS? Weird.. Anyhow it works.
*/
if (status == ERROR_IO_PENDING || status == ERROR_SUCCESS ||
status == ERROR_INVALID_FUNCTION)
return;
LOG_ERROR("failed to insert irp into irp queue %x", status);
}
void kernel_interface::kernel_interface::write_shared_mapping_operation(
shared_state_operation_id operation_id) {
InterlockedExchange16(
reinterpret_cast(&this->mapping.buffer->operation_id),
operation_id);
}
void kernel_interface::kernel_interface::initiate_shared_mapping() {
LOG_INFO("Initialising shared memory buffer!");
unsigned long bytes_returned = 0;
unsigned long result = this->generic_driver_call_output(
ioctl_code::InitiateSharedMapping, &this->mapping,
sizeof(kernel_interface::shared_mapping), &bytes_returned);
if (!result) {
LOG_ERROR("DeviceIoControl failed with status %x", GetLastError());
return;
}
}
================================================
FILE: module/kernel_interface/kernel_interface.h
================================================
#pragma once
#include
#include "../client/message_queue.h"
#include "../module.h"
namespace kernel_interface {
static constexpr int EVENT_COUNT = 5;
static constexpr int MAX_MODULE_PATH = 256;
static constexpr int MAXIMUM_REPORT_BUFFER_SIZE = 1000;
static constexpr int QUERY_DEFERRED_REPORT_COUNT = 10;
static constexpr int AES_128_KEY_SIZE = 16;
enum report_id {
report_nmi_callback_failure = 50,
report_module_validation_failure = 60,
report_illegal_handle_operation = 70,
report_invalid_process_allocation = 80,
report_hidden_system_thread = 90,
report_illegal_attach_process = 100,
report_apc_stackwalk = 110,
report_dpc_stackwalk = 120,
report_data_table_routine = 130,
report_invalid_process_module = 140,
report_patched_system_module = 150,
report_self_driver_patched = 160,
report_blacklisted_pcie_device = 170,
report_ept_hook = 180
};
#define AES_256_BLOCK_SIZE 16
struct packet_header {
uint32_t packet_type;
uint32_t magic_number;
};
struct heartbeat_header {
packet_header header;
uint32_t unused[2];
};
struct report_header {
struct packet_header header;
uint32_t report_code;
uint32_t report_sub_type;
};
static_assert(sizeof(heartbeat_header) == AES_256_BLOCK_SIZE);
static_assert(sizeof(report_header) == AES_256_BLOCK_SIZE);
constexpr int APC_STACKWALK_BUFFER_SIZE = 500;
constexpr int DATA_TABLE_ROUTINE_BUF_SIZE = 256;
constexpr int REPORT_INVALID_PROCESS_BUFFER_SIZE = 500;
constexpr int HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH = 64;
constexpr int MODULE_PATH_LEN = 256;
struct apc_stackwalk_report {
report_header report_header;
uint64_t kthread_address;
uint64_t invalid_rip;
char driver[APC_STACKWALK_BUFFER_SIZE];
};
struct dpc_stackwalk_report {
report_header report_header;
uint64_t kthread_address;
uint64_t invalid_rip;
char driver[APC_STACKWALK_BUFFER_SIZE];
};
struct module_validation_failure {
report_header report_header;
uint64_t driver_base_address;
uint64_t driver_size;
char driver_name[128];
};
struct ept_hook_failure {
report_header report_header;
uint64_t control_average;
uint64_t read_average;
char function_name[128];
};
enum table_id { hal_dispatch = 0, hal_private_dispatch };
struct data_table_routine_report {
report_header report_header;
table_id id;
uint64_t address;
uint32_t index;
char routine[DATA_TABLE_ROUTINE_BUF_SIZE];
};
struct nmi_callback_failure {
report_header report_header;
uint8_t were_nmis_disabled;
uint64_t kthread_address;
uint64_t invalid_rip;
};
struct invalid_process_allocation_report {
report_header report_header;
char process[REPORT_INVALID_PROCESS_BUFFER_SIZE];
};
struct hidden_system_thread_report {
report_header report_header;
uint8_t found_in_kthreadlist;
uint8_t found_in_pspcidtable;
uint64_t thread_address;
uint32_t thread_id;
char thread[500];
};
struct attach_process_report {
int report_code;
uint32_t thread_id;
uint64_t thread_address;
};
struct open_handle_failure_report {
report_header report_header;
uint32_t is_kernel_handle;
uint32_t process_id;
uint32_t thread_id;
uint32_t access;
char process_name[HANDLE_REPORT_PROCESS_NAME_MAX_LENGTH];
};
struct process_module_validation_report {
report_header report_header;
uint64_t image_base;
uint32_t image_size;
wchar_t module_path[MODULE_PATH_LEN];
};
struct system_module_integrity_check_report {
report_header header;
uint64_t image_base;
uint32_t image_size;
char path_name[0x100];
};
struct driver_self_integrity_check_report {
report_header header;
uint64_t image_base;
uint32_t image_size;
char path_name[0x100];
};
struct heartbeat_packet {
heartbeat_header header;
uint32_t heartbeat_count;
uint32_t total_reports_completed;
uint32_t total_irps_completed;
uint32_t total_heartbeats_completed;
};
struct blacklisted_pcie_device_report {
report_header header;
uint64_t device_object;
uint16_t device_id;
uint16_t vendor_id;
};
enum apc_operation { operation_stackwalk = 0x1 };
// clang-format off
enum ioctl_code
{
RunNmiCallbacks = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20001, METHOD_BUFFERED, FILE_ANY_ACCESS),
ValidateDriverObjects = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20002, METHOD_BUFFERED, FILE_ANY_ACCESS),
NotifyDriverOnProcessLaunch = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20004, METHOD_BUFFERED, FILE_ANY_ACCESS),
QueryForApcCompletion = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20005, METHOD_BUFFERED, FILE_ANY_ACCESS),
PerformVirtualisationCheck = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20006, METHOD_BUFFERED, FILE_ANY_ACCESS),
EnumerateHandleTables = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20007, METHOD_BUFFERED, FILE_ANY_ACCESS),
NotifyDriverOnProcessTermination = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20010, METHOD_BUFFERED, FILE_ANY_ACCESS),
ScanForUnlinkedProcesses = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20011, METHOD_BUFFERED, FILE_ANY_ACCESS),
PerformModuleIntegrityCheck = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20013, METHOD_BUFFERED, FILE_ANY_ACCESS),
ScanFroAttachedThreads = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20014, METHOD_BUFFERED, FILE_ANY_ACCESS),
ValidateProcessLoadedModule = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20015, METHOD_BUFFERED, FILE_ANY_ACCESS),
RequestHardwareInformation = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20016, METHOD_BUFFERED, FILE_ANY_ACCESS),
InitiateApcStackwalkOperation = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20017, METHOD_BUFFERED, FILE_ANY_ACCESS),
ScanForEptHooks = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20018, METHOD_BUFFERED, FILE_ANY_ACCESS),
InitiateDpcStackwalk = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20019, METHOD_BUFFERED, FILE_ANY_ACCESS),
ValidateSystemModules = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20020, METHOD_BUFFERED, FILE_ANY_ACCESS),
InsertIrpIntoIrpQueue = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20021, METHOD_BUFFERED, FILE_ANY_ACCESS),
QueryDeferredReports = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20022, METHOD_BUFFERED, FILE_ANY_ACCESS),
InitiateSharedMapping = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20023, METHOD_BUFFERED, FILE_ANY_ACCESS),
ValidatePciDevices = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20024, METHOD_BUFFERED, FILE_ANY_ACCESS),
ValidateWin32kDispatchTables = CTL_CODE(FILE_DEVICE_UNKNOWN, 0x20025, METHOD_BUFFERED, FILE_ANY_ACCESS)
};
constexpr int SHARED_STATE_OPERATION_COUNT = 10;
enum shared_state_operation_id
{
ssRunNmiCallbacks = 0,
ssValidateDriverObjects,
ssEnumerateHandleTables,
ssScanForUnlinkedProcesses,
ssPerformModuleIntegrityCheck,
ssScanForAttachedThreads,
ssScanForEptHooks,
ssInitiateDpcStackwalk,
ssValidateSystemModules,
ssValidateWin32kDispatchTables
};
// clang-format on
struct event_dispatcher {
bool in_use;
OVERLAPPED overlapped;
void *buffer;
unsigned long buffer_size;
event_dispatcher(void *buffer, unsigned long buffer_size) {
this->in_use = false;
this->overlapped.hEvent = CreateEvent(nullptr, false, false, nullptr);
this->buffer = buffer;
this->buffer_size = buffer_size;
}
};
class kernel_interface {
struct session_initiation_packet {
unsigned __int32 session_cookie;
void *process_id;
unsigned char aes_key[32];
unsigned char aes_iv[16];
struct module::module_information module_info;
};
struct hv_detection_packet {
unsigned long aperf_msr_timing_check;
unsigned long invd_emulation_check;
};
struct process_module {
void *module_base;
size_t module_size;
wchar_t module_path[MAX_MODULE_PATH];
};
struct apc_operation_init {
int operation_id;
};
HANDLE driver_handle;
LPCWSTR driver_name;
client::message_queue &message_queue;
HANDLE port;
std::mutex lock;
std::vector events;
module::module_information* module_info;
struct shared_data {
unsigned __int32 status;
unsigned __int16 operation_id;
};
struct shared_mapping {
shared_data *buffer;
size_t size;
};
shared_mapping mapping;
void initiate_completion_port();
void terminate_completion_port();
event_dispatcher *get_free_event_entry();
void release_event_object(OVERLAPPED *event);
void *get_buffer_from_event_object(OVERLAPPED *event);
void notify_driver_on_process_launch();
void notify_driver_on_process_termination();
void generic_driver_call(ioctl_code ioctl);
unsigned int generic_driver_call_output(ioctl_code ioctl, void *output_buffer,
size_t buffer_size,
unsigned long *bytes_returned);
void generic_driver_call_input(ioctl_code ioctl, void *input_buffer,
size_t buffer_size,
unsigned long *bytes_returned);
void generic_driver_call_apc(apc_operation operation);
public:
kernel_interface(LPCWSTR driver_name, client::message_queue &queue,
module::module_information *module_info);
~kernel_interface();
void run_completion_port();
void run_nmi_callbacks();
void validate_pci_devices();
void validate_system_driver_objects();
void detect_system_virtualization();
void enumerate_handle_tables();
void scan_for_unlinked_processes();
void perform_integrity_check();
void scan_for_attached_threads();
void scan_for_ept_hooks();
void perform_dpc_stackwalk();
void validate_system_modules();
void verify_process_module_executable_regions();
void initiate_apc_stackwalk();
void send_pending_irp();
void write_shared_mapping_operation(shared_state_operation_id operation_id);
void initiate_shared_mapping();
void validate_win32k_dispatch_tables();
};
} // namespace kernel_interface
================================================
FILE: module/main.cpp
================================================
#include
#include "module.h"
DWORD WINAPI Init(HINSTANCE hinstDLL) {
module::run(hinstDLL);
return 0;
}
BOOL APIENTRY DllMain(HMODULE hModule, DWORD ul_reason_for_call,
LPVOID lpReserved) {
switch (ul_reason_for_call) {
case DLL_PROCESS_ATTACH: {
DisableThreadLibraryCalls(hModule);
const auto thread =
CreateThread(nullptr, 0, reinterpret_cast(Init),
hModule, 0, nullptr);
if (thread)
CloseHandle(thread);
break;
}
case DLL_THREAD_ATTACH:
case DLL_THREAD_DETACH:
case DLL_PROCESS_DETACH:
{
LOG_INFO("process closing!");
break;
}
}
return TRUE;
}
================================================
FILE: module/module.cpp
================================================
#include "module.h"
#include
#include "client/message_queue.h"
#include "dispatcher/dispatcher.h"
#include "crypt/crypt.h"
#include
bool module::get_module_information(module_information *out) {
BOOL ret = FALSE;
HMODULE module = {0};
MODULEINFO info = {0};
ret = GetModuleHandleExA(GET_MODULE_HANDLE_EX_FLAG_FROM_ADDRESS |
GET_MODULE_HANDLE_EX_FLAG_UNCHANGED_REFCOUNT,
(LPCSTR)get_module_information, &module);
if (!ret)
return false;
ret = GetModuleInformation(GetCurrentProcess(), module, (LPMODULEINFO)&info,
sizeof(info));
if (!ret)
return false;
if (!GetModuleFileNameA(module, out->path, MAX_PATH))
return false;
out->base_address = info.lpBaseOfDll;
out->size = info.SizeOfImage;
LOG_INFO("base: %llx", out->base_address);
LOG_INFO("size: %lx", out->size);
LOG_INFO("path: %s", out->path);
return true;
}
void module::run(HINSTANCE hinstDLL) {
AllocConsole();
FILE *file = NULL;
freopen_s(&file, "CONOUT$", "w", stdout);
freopen_s(&file, "CONIN$", "r", stdin);
LPTSTR pipe_name = (LPTSTR)L"\\\\.\\pipe\\DonnaACPipe";
LPCWSTR driver_name = L"\\\\.\\DonnaAC";
module::module_information info = {0};
if (!module::get_module_information(&info)) {
LOG_ERROR("get_module_information: %x", GetLastError());
fclose(stdout);
fclose(stdin);
FreeConsole();
FreeLibraryAndExitThread(hinstDLL, 0);
return;
}
client::message_queue queue(pipe_name);
dispatcher::dispatcher dispatch(driver_name, queue, &info);
dispatch.run();
fclose(stdout);
fclose(stdin);
FreeConsole();
FreeLibraryAndExitThread(hinstDLL, 0);
}
void module::terminate() {}
================================================
FILE: module/module.h
================================================
#pragma once
#include "common.h"
#include
namespace module {
void run(HINSTANCE hinstDLL);
void terminate();
struct module_information {
void *base_address;
uint32_t size;
char path[MAX_PATH];
};
bool get_module_information(module_information *info);
} // namespace module
================================================
FILE: module/module.vcxproj
================================================
DebugWin32Release - No ServerWin32Debugx64Release - No Serverx64testWin32testx6417.0Win32Proj{3b18467a-4358-45ef-81b1-5c6f9b0b6728}module10.0DynamicLibrarytruev143UnicodeDynamicLibraryfalsev143trueUnicodeDynamicLibraryfalsev143trueUnicodeDynamicLibrarytruev143UnicodeDynamicLibraryfalsev143trueUnicodeDynamicLibraryfalsev143trueUnicodeLevel3trueWIN32;_DEBUG;MODULE_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)trueUsepch.hWindowstruefalseLevel3truetruetrueWIN32;NDEBUG;MODULE_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)trueUsepch.hWindowstruetruetruefalseLevel3truetruetrueWIN32;NDEBUG;MODULE_EXPORTS;_WINDOWS;_USRDLL;%(PreprocessorDefinitions)trueUsepch.hWindowstruetruetruefalseLevel3truetrueNotUsingpch.hstdcpp20WindowstruefalseLevel3truetruetrueNO_SERVERtrueNotUsingpch.hstdcpp20WindowstruetruetruefalseLevel3truetruetrueNO_SERVERtrueNotUsingpch.hstdcpp20Windowstruetruetruefalse
================================================
FILE: module/module.vcxproj.filters
================================================
================================================
FILE: server/main.go
================================================
package main
import "fmt"
func main() {
fmt.Println("Hello, World!")
}