Import of kernel-5.14.0-570.62.1.el9_6
This commit is contained in:
parent
0e0250be6d
commit
1fd0357946
@ -531,7 +531,9 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v1
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsa
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/vmscape
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
||||
@ -203,7 +203,7 @@ Architecture (MCA)\ [#f3]_.
|
||||
mode).
|
||||
|
||||
.. [#f3] For more details about the Machine Check Architecture (MCA),
|
||||
please read Documentation/x86/x86_64/machinecheck.rst at the Kernel tree.
|
||||
please read Documentation/arch/x86/x86_64/machinecheck.rst at the Kernel tree.
|
||||
|
||||
EDAC - Error Detection And Correction
|
||||
*************************************
|
||||
|
||||
@ -23,3 +23,4 @@ are configurable at compile, boot or run time.
|
||||
gather_data_sampling
|
||||
reg-file-data-sampling
|
||||
indirect-target-selection
|
||||
vmscape
|
||||
|
||||
@ -58,7 +58,7 @@ Because the buffers are potentially shared between Hyper-Threads cross
|
||||
Hyper-Thread attacks are possible.
|
||||
|
||||
Deeper technical information is available in the MDS specific x86
|
||||
architecture section: :ref:`Documentation/x86/mds.rst <mds>`.
|
||||
architecture section: :ref:`Documentation/arch/x86/mds.rst <mds>`.
|
||||
|
||||
|
||||
Attack scenarios
|
||||
|
||||
@ -157,9 +157,7 @@ This is achieved by using the otherwise unused and obsolete VERW instruction in
|
||||
combination with a microcode update. The microcode clears the affected CPU
|
||||
buffers when the VERW instruction is executed.
|
||||
|
||||
Kernel reuses the MDS function to invoke the buffer clearing:
|
||||
|
||||
mds_clear_cpu_buffers()
|
||||
Kernel does the buffer clearing with x86_clear_cpu_buffers().
|
||||
|
||||
On MDS affected CPUs, the kernel already invokes CPU buffer clear on
|
||||
kernel/userspace, hypervisor/guest and C-state (idle) transitions. No
|
||||
|
||||
@ -104,7 +104,20 @@ The possible values in this file are:
|
||||
|
||||
(spec_rstack_overflow=ibpb-vmexit)
|
||||
|
||||
* 'Mitigation: Reduced Speculation':
|
||||
|
||||
This mitigation gets automatically enabled when the above one "IBPB on
|
||||
VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
|
||||
|
||||
It gets automatically enabled on machines which have the
|
||||
SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
|
||||
to the above =ibpb-vmexit mitigation because the user/kernel boundary is
|
||||
not affected anymore and thus "safe RET" is not needed.
|
||||
|
||||
After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
|
||||
is detected (functionality present on all such machines) and that
|
||||
practically overrides IBPB on VMEXIT as it has a lot less performance
|
||||
impact and takes care of the guest->host attack vector too.
|
||||
|
||||
In order to exploit vulnerability, an attacker needs to:
|
||||
|
||||
@ -158,3 +171,72 @@ poisoned BTB entry and using that safe one for all function returns.
|
||||
In older Zen1 and Zen2, this is accomplished using a reinterpretation
|
||||
technique similar to Retbleed one: srso_untrain_ret() and
|
||||
srso_safe_ret().
|
||||
|
||||
Checking the safe RET mitigation actually works
|
||||
-----------------------------------------------
|
||||
|
||||
In case one wants to validate whether the SRSO safe RET mitigation works
|
||||
on a kernel, one could use two performance counters
|
||||
|
||||
* PMC_0xc8 - Count of RET/RET lw retired
|
||||
* PMC_0xc9 - Count of RET/RET lw retired mispredicted
|
||||
|
||||
and compare the number of RETs retired properly vs those retired
|
||||
mispredicted, in kernel mode. Another way of specifying those events
|
||||
is::
|
||||
|
||||
# perf list ex_ret_near_ret
|
||||
|
||||
List of pre-defined events (to be used in -e or -M):
|
||||
|
||||
core:
|
||||
ex_ret_near_ret
|
||||
[Retired Near Returns]
|
||||
ex_ret_near_ret_mispred
|
||||
[Retired Near Returns Mispredicted]
|
||||
|
||||
Either the command using the event mnemonics::
|
||||
|
||||
# perf stat -e ex_ret_near_ret:k -e ex_ret_near_ret_mispred:k sleep 10s
|
||||
|
||||
or using the raw PMC numbers::
|
||||
|
||||
# perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
|
||||
|
||||
should give the same amount. I.e., every RET retired should be
|
||||
mispredicted::
|
||||
|
||||
[root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
|
||||
|
||||
Performance counter stats for 'sleep 10s':
|
||||
|
||||
137,167 cpu/event=0xc8,umask=0/k
|
||||
137,173 cpu/event=0xc9,umask=0/k
|
||||
|
||||
10.004110303 seconds time elapsed
|
||||
|
||||
0.000000000 seconds user
|
||||
0.004462000 seconds sys
|
||||
|
||||
vs the case when the mitigation is disabled (spec_rstack_overflow=off)
|
||||
or not functioning properly, showing usually a lot smaller number of
|
||||
mispredicted retired RETs vs the overall count of retired RETs during
|
||||
a workload::
|
||||
|
||||
[root@brent: ~/kernel/linux/tools/perf> ./perf stat -e cpu/event=0xc8,umask=0/k -e cpu/event=0xc9,umask=0/k sleep 10s
|
||||
|
||||
Performance counter stats for 'sleep 10s':
|
||||
|
||||
201,627 cpu/event=0xc8,umask=0/k
|
||||
4,074 cpu/event=0xc9,umask=0/k
|
||||
|
||||
10.003267252 seconds time elapsed
|
||||
|
||||
0.002729000 seconds user
|
||||
0.000000000 seconds sys
|
||||
|
||||
Also, there is a selftest which performs the above, go to
|
||||
tools/testing/selftests/x86/ and do::
|
||||
|
||||
make srso
|
||||
./srso
|
||||
|
||||
@ -63,7 +63,7 @@ attacker needs to begin a TSX transaction and raise an asynchronous abort
|
||||
which in turn potentially leaks data stored in the buffers.
|
||||
|
||||
More detailed technical information is available in the TAA specific x86
|
||||
architecture section: :ref:`Documentation/x86/tsx_async_abort.rst <tsx_async_abort>`.
|
||||
architecture section: :ref:`Documentation/arch/x86/tsx_async_abort.rst <tsx_async_abort>`.
|
||||
|
||||
|
||||
Attack scenarios
|
||||
|
||||
110
Documentation/admin-guide/hw-vuln/vmscape.rst
Normal file
110
Documentation/admin-guide/hw-vuln/vmscape.rst
Normal file
@ -0,0 +1,110 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
VMSCAPE
|
||||
=======
|
||||
|
||||
VMSCAPE is a vulnerability that may allow a guest to influence the branch
|
||||
prediction in host userspace. It particularly affects hypervisors like QEMU.
|
||||
|
||||
Even if a hypervisor may not have any sensitive data like disk encryption keys,
|
||||
guest-userspace may be able to attack the guest-kernel using the hypervisor as
|
||||
a confused deputy.
|
||||
|
||||
Affected processors
|
||||
-------------------
|
||||
|
||||
The following CPU families are affected by VMSCAPE:
|
||||
|
||||
**Intel processors:**
|
||||
- Skylake generation (Parts without Enhanced-IBRS)
|
||||
- Cascade Lake generation - (Parts affected by ITS guest/host separation)
|
||||
- Alder Lake and newer (Parts affected by BHI)
|
||||
|
||||
Note that, BHI affected parts that use BHB clearing software mitigation e.g.
|
||||
Icelake are not vulnerable to VMSCAPE.
|
||||
|
||||
**AMD processors:**
|
||||
- Zen series (families 0x17, 0x19, 0x1a)
|
||||
|
||||
** Hygon processors:**
|
||||
- Family 0x18
|
||||
|
||||
Mitigation
|
||||
----------
|
||||
|
||||
Conditional IBPB
|
||||
----------------
|
||||
|
||||
Kernel tracks when a CPU has run a potentially malicious guest and issues an
|
||||
IBPB before the first exit to userspace after VM-exit. If userspace did not run
|
||||
between VM-exit and the next VM-entry, no IBPB is issued.
|
||||
|
||||
Note that the existing userspace mitigation against Spectre-v2 is effective in
|
||||
protecting the userspace. They are insufficient to protect the userspace VMMs
|
||||
from a malicious guest. This is because Spectre-v2 mitigations are applied at
|
||||
context switch time, while the userspace VMM can run after a VM-exit without a
|
||||
context switch.
|
||||
|
||||
Vulnerability enumeration and mitigation is not applied inside a guest. This is
|
||||
because nested hypervisors should already be deploying IBPB to isolate
|
||||
themselves from nested guests.
|
||||
|
||||
SMT considerations
|
||||
------------------
|
||||
|
||||
When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be
|
||||
vulnerable to cross-thread attacks. For complete protection against VMSCAPE
|
||||
attacks in SMT environments, STIBP should be enabled.
|
||||
|
||||
The kernel will issue a warning if SMT is enabled without adequate STIBP
|
||||
protection. Warning is not issued when:
|
||||
|
||||
- SMT is disabled
|
||||
- STIBP is enabled system-wide
|
||||
- Intel eIBRS is enabled (which implies STIBP protection)
|
||||
|
||||
System information and options
|
||||
------------------------------
|
||||
|
||||
The sysfs file showing VMSCAPE mitigation status is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/vmscape
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
* 'Not affected':
|
||||
|
||||
The processor is not vulnerable to VMSCAPE attacks.
|
||||
|
||||
* 'Vulnerable':
|
||||
|
||||
The processor is vulnerable and no mitigation has been applied.
|
||||
|
||||
* 'Mitigation: IBPB before exit to userspace':
|
||||
|
||||
Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has
|
||||
run a potentially malicious guest and issues an IBPB before the first
|
||||
exit to userspace after VM-exit.
|
||||
|
||||
* 'Mitigation: IBPB on VMEXIT':
|
||||
|
||||
IBPB is issued on every VM-exit. This occurs when other mitigations like
|
||||
RETBLEED or SRSO are already issuing IBPB on VM-exit.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
----------------------------------------------
|
||||
|
||||
The mitigation can be controlled via the ``vmscape=`` command line parameter:
|
||||
|
||||
* ``vmscape=off``:
|
||||
|
||||
Disable the VMSCAPE mitigation.
|
||||
|
||||
* ``vmscape=ibpb``:
|
||||
|
||||
Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y).
|
||||
|
||||
* ``vmscape=force``:
|
||||
|
||||
Force vulnerability detection and mitigation even on processors that are
|
||||
not known to be affected.
|
||||
@ -181,7 +181,7 @@ parameter is applicable::
|
||||
X86-32 X86-32, aka i386 architecture is enabled.
|
||||
X86-64 X86-64 architecture is enabled.
|
||||
More X86-64 boot options can be found in
|
||||
Documentation/x86/x86_64/boot-options.rst.
|
||||
Documentation/arch/x86/x86_64/boot-options.rst.
|
||||
X86 Either 32-bit or 64-bit x86 (same as X86-32+X86-64)
|
||||
X86_UV SGI UV support is enabled.
|
||||
XEN Xen support is enabled
|
||||
@ -199,7 +199,7 @@ Do not modify the syntax of boot loader parameters without extreme
|
||||
need or coordination with <Documentation/arch/x86/boot.rst>.
|
||||
|
||||
There are also arch-specific kernel-parameters not documented here.
|
||||
See for example <Documentation/x86/x86_64/boot-options.rst>.
|
||||
See for example <Documentation/arch/x86/x86_64/boot-options.rst>.
|
||||
|
||||
Note that ALL kernel parameters listed below are CASE SENSITIVE, and that
|
||||
a trailing = on the name of any parameter states that that parameter will
|
||||
|
||||
@ -3165,7 +3165,7 @@
|
||||
|
||||
mce [X86-32] Machine Check Exception
|
||||
|
||||
mce=option [X86-64] See Documentation/x86/x86_64/boot-options.rst
|
||||
mce=option [X86-64] See Documentation/arch/x86/x86_64/boot-options.rst
|
||||
|
||||
md= [HW] RAID subsystems devices and level
|
||||
See Documentation/admin-guide/md.rst.
|
||||
@ -3427,6 +3427,7 @@
|
||||
srbds=off [X86,INTEL]
|
||||
ssbd=force-off [ARM64]
|
||||
tsx_async_abort=off [X86]
|
||||
vmscape=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -4628,7 +4629,7 @@
|
||||
See Documentation/admin-guide/blockdev/paride.rst.
|
||||
|
||||
pirq= [SMP,APIC] Manual mp-table setup
|
||||
See Documentation/x86/i386/IO-APIC.rst.
|
||||
See Documentation/arch/x86/i386/IO-APIC.rst.
|
||||
|
||||
plip= [PPT,NET] Parallel port network link
|
||||
Format: { parport<nr> | timid | 0 }
|
||||
@ -5910,7 +5911,7 @@
|
||||
|
||||
serialnumber [BUGS=X86-32]
|
||||
|
||||
sev=option[,option...] [X86-64] See Documentation/x86/x86_64/boot-options.rst
|
||||
sev=option[,option...] [X86-64] See Documentation/arch/x86/x86_64/boot-options.rst
|
||||
|
||||
shapers= [NET]
|
||||
Maximal number of shapers.
|
||||
@ -6663,6 +6664,19 @@
|
||||
first trust source as a backend which is initialized
|
||||
successfully during iteration.
|
||||
|
||||
tsa= [X86] Control mitigation for Transient Scheduler
|
||||
Attacks on AMD CPUs. Search the following in your
|
||||
favourite search engine for more details:
|
||||
|
||||
"Technical guidance for mitigating transient scheduler
|
||||
attacks".
|
||||
|
||||
off - disable the mitigation
|
||||
on - enable the mitigation (default)
|
||||
user - mitigate only user/kernel transitions
|
||||
vm - mitigate only guest/host transitions
|
||||
|
||||
|
||||
tsc= Disable clocksource stability checks for TSC.
|
||||
Format: <string>
|
||||
[x86] reliable: mark tsc clocksource as reliable, this
|
||||
@ -7091,6 +7105,16 @@
|
||||
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
|
||||
Format: <command>
|
||||
|
||||
vmscape= [X86] Controls mitigation for VMscape attacks.
|
||||
VMscape attacks can leak information from a userspace
|
||||
hypervisor to a guest via speculative side-channels.
|
||||
|
||||
off - disable the mitigation
|
||||
ibpb - use Indirect Branch Prediction Barrier
|
||||
(IBPB) mitigation (default)
|
||||
force - force vulnerability detection even on
|
||||
unaffected processors
|
||||
|
||||
vsyscall= [X86-64]
|
||||
Controls the behavior of vsyscalls (i.e. calls to
|
||||
fixed addresses of 0xffffffffff600x00 from legacy
|
||||
|
||||
@ -22,5 +22,5 @@ implementation.
|
||||
s390/index
|
||||
../sh/index
|
||||
../sparc/index
|
||||
../x86/index
|
||||
x86/index
|
||||
../xtensa/index
|
||||
|
||||
@ -1344,7 +1344,7 @@ follow::
|
||||
In addition to read/modify/write the setup header of the struct
|
||||
boot_params as that of 16-bit boot protocol, the boot loader should
|
||||
also fill the additional fields of the struct boot_params as
|
||||
described in chapter Documentation/x86/zero-page.rst.
|
||||
described in chapter Documentation/arch/x86/zero-page.rst.
|
||||
|
||||
After setting up the struct boot_params, the boot loader can load the
|
||||
32/64-bit kernel in the same way as that of 16-bit boot protocol.
|
||||
@ -1380,7 +1380,7 @@ can be calculated as follows::
|
||||
In addition to read/modify/write the setup header of the struct
|
||||
boot_params as that of 16-bit boot protocol, the boot loader should
|
||||
also fill the additional fields of the struct boot_params as described
|
||||
in chapter Documentation/x86/zero-page.rst.
|
||||
in chapter Documentation/arch/x86/zero-page.rst.
|
||||
|
||||
After setting up the struct boot_params, the boot loader can load
|
||||
64-bit kernel in the same way as that of 16-bit boot protocol, but
|
||||
|
||||
@ -93,7 +93,7 @@ enters a C-state.
|
||||
|
||||
The kernel provides a function to invoke the buffer clearing:
|
||||
|
||||
mds_clear_cpu_buffers()
|
||||
x86_clear_cpu_buffers()
|
||||
|
||||
Also macro CLEAR_CPU_BUFFERS can be used in ASM late in exit-to-user path.
|
||||
Other than CFLAGS.ZF, this macro doesn't clobber any registers.
|
||||
@ -185,9 +185,9 @@ Mitigation points
|
||||
idle clearing would be a window dressing exercise and is therefore not
|
||||
activated.
|
||||
|
||||
The invocation is controlled by the static key mds_idle_clear which is
|
||||
switched depending on the chosen mitigation mode and the SMT state of
|
||||
the system.
|
||||
The invocation is controlled by the static key cpu_buf_idle_clear which is
|
||||
switched depending on the chosen mitigation mode and the SMT state of the
|
||||
system.
|
||||
|
||||
The buffer clear is only invoked before entering the C-State to prevent
|
||||
that stale data from the idling CPU from spilling to the Hyper-Thread
|
||||
@ -28,7 +28,7 @@ are aligned with platform MTRR setup. If MTRRs are only set up by the platform
|
||||
firmware code though and the OS does not make any specific MTRR mapping
|
||||
requests mtrr_type_lookup() should always return MTRR_TYPE_INVALID.
|
||||
|
||||
For details refer to Documentation/x86/pat.rst.
|
||||
For details refer to Documentation/arch/x86/pat.rst.
|
||||
|
||||
.. tip::
|
||||
On Intel P6 family processors (Pentium Pro, Pentium II and later)
|
||||
@ -20,7 +20,7 @@ physical address space. This "ought to be enough for anybody" ©.
|
||||
QEMU 2.9 and later support 5-level paging.
|
||||
|
||||
Virtual memory layout for 5-level paging is described in
|
||||
Documentation/x86/x86_64/mm.rst
|
||||
Documentation/arch/x86/x86_64/mm.rst
|
||||
|
||||
|
||||
Enabling 5-level paging
|
||||
@ -9,7 +9,7 @@ only the AMD64 specific ones are listed here.
|
||||
|
||||
Machine check
|
||||
=============
|
||||
Please see Documentation/x86/x86_64/machinecheck.rst for sysfs runtime tunables.
|
||||
Please see Documentation/arch/x86/x86_64/machinecheck.rst for sysfs runtime tunables.
|
||||
|
||||
mce=off
|
||||
Disable machine check
|
||||
@ -82,7 +82,7 @@ APICs
|
||||
Don't use the local APIC (alias for i386 compatibility)
|
||||
|
||||
pirq=...
|
||||
See Documentation/x86/i386/IO-APIC.rst
|
||||
See Documentation/arch/x86/i386/IO-APIC.rst
|
||||
|
||||
noapictimer
|
||||
Don't set up the APIC timer
|
||||
@ -18,7 +18,7 @@ For more information on the features of cpusets, see
|
||||
Documentation/admin-guide/cgroup-v1/cpusets.rst.
|
||||
There are a number of different configurations you can use for your needs. For
|
||||
more information on the numa=fake command line option and its various ways of
|
||||
configuring fake nodes, see Documentation/x86/x86_64/boot-options.rst.
|
||||
configuring fake nodes, see Documentation/arch/x86/x86_64/boot-options.rst.
|
||||
|
||||
For the purposes of this introduction, we'll assume a very primitive NUMA
|
||||
emulation setup of "numa=fake=4*512,". This will split our system memory into
|
||||
@ -410,7 +410,7 @@ ioremap_uc()
|
||||
|
||||
ioremap_uc() behaves like ioremap() except that on the x86 architecture without
|
||||
'PAT' mode, it marks memory as uncached even when the MTRR has designated
|
||||
it as cacheable, see Documentation/x86/pat.rst.
|
||||
it as cacheable, see Documentation/arch/x86/pat.rst.
|
||||
|
||||
Portable drivers should avoid the use of ioremap_uc().
|
||||
|
||||
|
||||
@ -7658,7 +7658,7 @@ system fingerprint. To prevent userspace from circumventing such restrictions
|
||||
by running an enclave in a VM, KVM prevents access to privileged attributes by
|
||||
default.
|
||||
|
||||
See Documentation/x86/sgx.rst for more details.
|
||||
See Documentation/arch/x86/sgx.rst for more details.
|
||||
|
||||
7.26 KVM_CAP_PPC_RPT_INVALIDATE
|
||||
-------------------------------
|
||||
|
||||
12
MAINTAINERS
12
MAINTAINERS
@ -1013,7 +1013,7 @@ M: Naveen Krishna Chatradhi <naveenkrishna.chatradhi@amd.com>
|
||||
R: Carlos Bilbao <carlos.bilbao@amd.com>
|
||||
L: platform-driver-x86@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/x86/amd_hsmp.rst
|
||||
F: Documentation/arch/x86/amd_hsmp.rst
|
||||
F: arch/x86/include/asm/amd_hsmp.h
|
||||
F: arch/x86/include/uapi/asm/amd_hsmp.h
|
||||
F: drivers/platform/x86/amd/hsmp.c
|
||||
@ -10029,7 +10029,7 @@ L: tboot-devel@lists.sourceforge.net
|
||||
S: Supported
|
||||
W: http://tboot.sourceforge.net
|
||||
T: hg http://tboot.hg.sourceforge.net:8000/hgroot/tboot/tboot
|
||||
F: Documentation/x86/intel_txt.rst
|
||||
F: Documentation/arch/x86/intel_txt.rst
|
||||
F: arch/x86/kernel/tboot.c
|
||||
F: include/linux/tboot.h
|
||||
|
||||
@ -10040,7 +10040,7 @@ L: linux-sgx@vger.kernel.org
|
||||
S: Supported
|
||||
Q: https://patchwork.kernel.org/project/intel-sgx/list/
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/sgx
|
||||
F: Documentation/x86/sgx.rst
|
||||
F: Documentation/arch/x86/sgx.rst
|
||||
F: arch/x86/entry/vdso/vsgx.S
|
||||
F: arch/x86/include/asm/sgx.h
|
||||
F: arch/x86/include/uapi/asm/sgx.h
|
||||
@ -16428,7 +16428,7 @@ M: Fenghua Yu <fenghua.yu@intel.com>
|
||||
M: Reinette Chatre <reinette.chatre@intel.com>
|
||||
L: linux-kernel@vger.kernel.org
|
||||
S: Supported
|
||||
F: Documentation/x86/resctrl*
|
||||
F: Documentation/arch/x86/resctrl*
|
||||
F: arch/x86/include/asm/resctrl.h
|
||||
F: arch/x86/kernel/cpu/resctrl/
|
||||
F: tools/testing/selftests/resctrl/
|
||||
@ -21024,7 +21024,7 @@ L: linux-kernel@vger.kernel.org
|
||||
S: Maintained
|
||||
T: git git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git x86/core
|
||||
F: Documentation/devicetree/bindings/x86/
|
||||
F: Documentation/x86/
|
||||
F: Documentation/arch/x86/
|
||||
F: arch/x86/
|
||||
|
||||
X86 ENTRY CODE
|
||||
@ -21040,7 +21040,7 @@ M: Borislav Petkov <bp@alien8.de>
|
||||
L: linux-edac@vger.kernel.org
|
||||
S: Maintained
|
||||
F: Documentation/ABI/testing/sysfs-mce
|
||||
F: Documentation/x86/x86_64/machinecheck.rst
|
||||
F: Documentation/arch/x86/x86_64/machinecheck.rst
|
||||
F: arch/x86/kernel/cpu/mce/*
|
||||
|
||||
X86 MICROCODE UPDATE SUPPORT
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 570.60.1
|
||||
RHEL_RELEASE = 570.62.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -1133,7 +1133,7 @@ config SMP
|
||||
uniprocessor machines. On a uniprocessor machine, the kernel
|
||||
will run faster if you say N here.
|
||||
|
||||
See also <file:Documentation/x86/i386/IO-APIC.rst>,
|
||||
See also <file:Documentation/arch/x86/i386/IO-APIC.rst>,
|
||||
<file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO available at
|
||||
<http://tldp.org/HOWTO/SMP-HOWTO.html>.
|
||||
|
||||
|
||||
@ -53,17 +53,15 @@ static void invoke_syscall(struct pt_regs *regs, unsigned int scno,
|
||||
syscall_set_return_value(current, regs, 0, ret);
|
||||
|
||||
/*
|
||||
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
|
||||
* but not enough for arm64 stack utilization comfort. To keep
|
||||
* reasonable stack head room, reduce the maximum offset to 9 bits.
|
||||
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
|
||||
* bits. The actual entropy will be further reduced by the compiler
|
||||
* when applying stack alignment constraints: the AAPCS mandates a
|
||||
* 16-byte aligned SP at function boundaries, which will remove the
|
||||
* 4 low bits from any entropy chosen here.
|
||||
*
|
||||
* The actual entropy will be further reduced by the compiler when
|
||||
* applying stack alignment constraints: the AAPCS mandates a
|
||||
* 16-byte (i.e. 4-bit) aligned SP at function boundaries.
|
||||
*
|
||||
* The resulting 5 bits of entropy is seen in SP[8:4].
|
||||
* The resulting 6 bits of entropy is seen in SP[9:4].
|
||||
*/
|
||||
choose_random_kstack_offset(get_random_int() & 0x1FF);
|
||||
choose_random_kstack_offset(get_random_u16());
|
||||
}
|
||||
|
||||
static inline bool has_syscall_work(unsigned long flags)
|
||||
|
||||
@ -6,6 +6,7 @@
|
||||
* Author(s): Michael Holzheu <holzheu@linux.vnet.ibm.com>
|
||||
*/
|
||||
|
||||
#include <linux/security.h>
|
||||
#include <linux/slab.h>
|
||||
#include "hypfs.h"
|
||||
|
||||
@ -64,24 +65,29 @@ static long dbfs_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
|
||||
long rc;
|
||||
|
||||
mutex_lock(&df->lock);
|
||||
if (df->unlocked_ioctl)
|
||||
rc = df->unlocked_ioctl(file, cmd, arg);
|
||||
else
|
||||
rc = -ENOTTY;
|
||||
mutex_unlock(&df->lock);
|
||||
return rc;
|
||||
}
|
||||
|
||||
static const struct file_operations dbfs_ops = {
|
||||
static const struct file_operations dbfs_ops_ioctl = {
|
||||
.read = dbfs_read,
|
||||
.llseek = no_llseek,
|
||||
.unlocked_ioctl = dbfs_ioctl,
|
||||
};
|
||||
|
||||
static const struct file_operations dbfs_ops = {
|
||||
.read = dbfs_read,
|
||||
.llseek = no_llseek,
|
||||
};
|
||||
|
||||
void hypfs_dbfs_create_file(struct hypfs_dbfs_file *df)
|
||||
{
|
||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df,
|
||||
&dbfs_ops);
|
||||
const struct file_operations *fops = &dbfs_ops;
|
||||
|
||||
if (df->unlocked_ioctl && !security_locked_down(LOCKDOWN_DEBUGFS))
|
||||
fops = &dbfs_ops_ioctl;
|
||||
df->dentry = debugfs_create_file(df->name, 0400, dbfs_dir, df, fops);
|
||||
mutex_init(&df->lock);
|
||||
}
|
||||
|
||||
|
||||
@ -55,7 +55,7 @@ static __always_inline void arch_exit_to_user_mode(void)
|
||||
static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
||||
unsigned long ti_work)
|
||||
{
|
||||
choose_random_kstack_offset(get_tod_clock_fast() & 0xff);
|
||||
choose_random_kstack_offset(get_tod_clock_fast());
|
||||
}
|
||||
|
||||
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
|
||||
|
||||
154
arch/x86/Kconfig
154
arch/x86/Kconfig
@ -441,7 +441,7 @@ config SMP
|
||||
Y to "Enhanced Real Time Clock Support", below. The "Advanced Power
|
||||
Management" code will be disabled if you say Y here.
|
||||
|
||||
See also <file:Documentation/x86/i386/IO-APIC.rst>,
|
||||
See also <file:Documentation/arch/x86/i386/IO-APIC.rst>,
|
||||
<file:Documentation/admin-guide/lockup-watchdogs.rst> and the SMP-HOWTO available at
|
||||
<http://www.tldp.org/docs.html#howto>.
|
||||
|
||||
@ -1318,6 +1318,7 @@ config X86_REBOOTFIXUPS
|
||||
config MICROCODE
|
||||
def_bool y
|
||||
depends on CPU_SUP_AMD || CPU_SUP_INTEL
|
||||
select CRYPTO_LIB_SHA256 if CPU_SUP_AMD
|
||||
|
||||
config MICROCODE_INITRD32
|
||||
def_bool y
|
||||
@ -1503,7 +1504,7 @@ config X86_5LEVEL
|
||||
A kernel with the option enabled can be booted on machines that
|
||||
support 4- or 5-level paging.
|
||||
|
||||
See Documentation/x86/x86_64/5level-paging.rst for more
|
||||
See Documentation/arch/x86/x86_64/5level-paging.rst for more
|
||||
information.
|
||||
|
||||
Say N if unsure.
|
||||
@ -1757,7 +1758,7 @@ config MTRR
|
||||
You can safely say Y even if your machine doesn't have MTRRs, you'll
|
||||
just add about 9 KB to your kernel.
|
||||
|
||||
See <file:Documentation/x86/mtrr.rst> for more information.
|
||||
See <file:Documentation/arch/x86/mtrr.rst> for more information.
|
||||
|
||||
config MTRR_SANITIZER
|
||||
def_bool y
|
||||
@ -2494,7 +2495,7 @@ config MITIGATION_PAGE_TABLE_ISOLATION
|
||||
ensuring that the majority of kernel addresses are not mapped
|
||||
into userspace.
|
||||
|
||||
See Documentation/x86/pti.rst for more details.
|
||||
See Documentation/arch/x86/pti.rst for more details.
|
||||
|
||||
config MITIGATION_RETPOLINE
|
||||
bool "Avoid speculative indirect branches in kernel"
|
||||
@ -2560,7 +2561,8 @@ config MITIGATION_IBPB_ENTRY
|
||||
depends on CPU_SUP_AMD && X86_64
|
||||
default y
|
||||
help
|
||||
Compile the kernel with support for the retbleed=ibpb mitigation.
|
||||
Compile the kernel with support for the retbleed=ibpb and
|
||||
spec_rstack_overflow={ibpb,ibpb-vmexit} mitigations.
|
||||
|
||||
config MITIGATION_IBRS_ENTRY
|
||||
bool "Enable IBRS on kernel entry"
|
||||
@ -2588,24 +2590,15 @@ config MITIGATION_SLS
|
||||
against straight line speculation. The kernel image might be slightly
|
||||
larger.
|
||||
|
||||
config MITIGATION_GDS_FORCE
|
||||
bool "Force GDS Mitigation"
|
||||
config MITIGATION_GDS
|
||||
bool "Mitigate Gather Data Sampling"
|
||||
depends on CPU_SUP_INTEL
|
||||
default n
|
||||
default y
|
||||
help
|
||||
Gather Data Sampling (GDS) is a hardware vulnerability which allows
|
||||
unprivileged speculative access to data which was previously stored in
|
||||
vector registers.
|
||||
|
||||
This option is equivalent to setting gather_data_sampling=force on the
|
||||
command line. The microcode mitigation is used if present, otherwise
|
||||
AVX is disabled as a mitigation. On affected systems that are missing
|
||||
the microcode any userspace code that unconditionally uses AVX will
|
||||
break with this option set.
|
||||
|
||||
Setting this option on systems not vulnerable to GDS has no effect.
|
||||
|
||||
If in doubt, say N.
|
||||
Enable mitigation for Gather Data Sampling (GDS). GDS is a hardware
|
||||
vulnerability which allows unprivileged speculative access to data
|
||||
which was previously stored in vector registers. The attacker uses gather
|
||||
instructions to infer the stale vector register data.
|
||||
|
||||
config MITIGATION_RFDS
|
||||
bool "RFDS Mitigation"
|
||||
@ -2639,6 +2632,125 @@ config MITIGATION_ITS
|
||||
disabled, mitigation cannot be enabled via cmdline.
|
||||
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
|
||||
|
||||
config MITIGATION_MDS
|
||||
bool "Mitigate Microarchitectural Data Sampling (MDS) hardware bug"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Microarchitectural Data Sampling (MDS). MDS is
|
||||
a hardware vulnerability which allows unprivileged speculative access
|
||||
to data which is available in various CPU internal buffers.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/mds.rst>
|
||||
|
||||
config MITIGATION_TAA
|
||||
bool "Mitigate TSX Asynchronous Abort (TAA) hardware bug"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Enable mitigation for TSX Asynchronous Abort (TAA). TAA is a hardware
|
||||
vulnerability that allows unprivileged speculative access to data
|
||||
which is available in various CPU internal buffers by using
|
||||
asynchronous aborts within an Intel TSX transactional region.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/tsx_async_abort.rst>
|
||||
|
||||
config MITIGATION_MMIO_STALE_DATA
|
||||
bool "Mitigate MMIO Stale Data hardware bug"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Enable mitigation for MMIO Stale Data hardware bugs. Processor MMIO
|
||||
Stale Data Vulnerabilities are a class of memory-mapped I/O (MMIO)
|
||||
vulnerabilities that can expose data. The vulnerabilities require the
|
||||
attacker to have access to MMIO.
|
||||
See also
|
||||
<file:Documentation/admin-guide/hw-vuln/processor_mmio_stale_data.rst>
|
||||
|
||||
config MITIGATION_L1TF
|
||||
bool "Mitigate L1 Terminal Fault (L1TF) hardware bug"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Mitigate L1 Terminal Fault (L1TF) hardware bug. L1 Terminal Fault is a
|
||||
hardware vulnerability which allows unprivileged speculative access to data
|
||||
available in the Level 1 Data Cache.
|
||||
See <file:Documentation/admin-guide/hw-vuln/l1tf.rst
|
||||
|
||||
config MITIGATION_RETBLEED
|
||||
bool "Mitigate RETBleed hardware bug"
|
||||
depends on (CPU_SUP_INTEL && MITIGATION_SPECTRE_V2) || MITIGATION_UNRET_ENTRY || MITIGATION_IBPB_ENTRY
|
||||
default y
|
||||
help
|
||||
Enable mitigation for RETBleed (Arbitrary Speculative Code Execution
|
||||
with Return Instructions) vulnerability. RETBleed is a speculative
|
||||
execution attack which takes advantage of microarchitectural behavior
|
||||
in many modern microprocessors, similar to Spectre v2. An
|
||||
unprivileged attacker can use these flaws to bypass conventional
|
||||
memory security restrictions to gain read access to privileged memory
|
||||
that would otherwise be inaccessible.
|
||||
|
||||
config MITIGATION_SPECTRE_V1
|
||||
bool "Mitigate SPECTRE V1 hardware bug"
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Spectre V1 (Bounds Check Bypass). Spectre V1 is a
|
||||
class of side channel attacks that takes advantage of speculative
|
||||
execution that bypasses conditional branch instructions used for
|
||||
memory access bounds check.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||
|
||||
config MITIGATION_SPECTRE_V2
|
||||
bool "Mitigate SPECTRE V2 hardware bug"
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Spectre V2 (Branch Target Injection). Spectre
|
||||
V2 is a class of side channel attacks that takes advantage of
|
||||
indirect branch predictors inside the processor. In Spectre variant 2
|
||||
attacks, the attacker can steer speculative indirect branches in the
|
||||
victim to gadget code by poisoning the branch target buffer of a CPU
|
||||
used for predicting indirect branch addresses.
|
||||
See also <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||
|
||||
config MITIGATION_SRBDS
|
||||
bool "Mitigate Special Register Buffer Data Sampling (SRBDS) hardware bug"
|
||||
depends on CPU_SUP_INTEL
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Special Register Buffer Data Sampling (SRBDS).
|
||||
SRBDS is a hardware vulnerability that allows Microarchitectural Data
|
||||
Sampling (MDS) techniques to infer values returned from special
|
||||
register accesses. An unprivileged user can extract values returned
|
||||
from RDRAND and RDSEED executed on another core or sibling thread
|
||||
using MDS techniques.
|
||||
See also
|
||||
<file:Documentation/admin-guide/hw-vuln/special-register-buffer-data-sampling.rst>
|
||||
|
||||
config MITIGATION_SSB
|
||||
bool "Mitigate Speculative Store Bypass (SSB) hardware bug"
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Speculative Store Bypass (SSB). SSB is a
|
||||
hardware security vulnerability and its exploitation takes advantage
|
||||
of speculative execution in a similar way to the Meltdown and Spectre
|
||||
security vulnerabilities.
|
||||
|
||||
config MITIGATION_TSA
|
||||
bool "Mitigate Transient Scheduler Attacks"
|
||||
depends on CPU_SUP_AMD
|
||||
default y
|
||||
help
|
||||
Enable mitigation for Transient Scheduler Attacks. TSA is a hardware
|
||||
security vulnerability on AMD CPUs which can lead to forwarding of
|
||||
invalid info to subsequent instructions and thus can affect their
|
||||
timing and thereby cause a leakage.
|
||||
|
||||
config MITIGATION_VMSCAPE
|
||||
bool "Mitigate VMSCAPE"
|
||||
depends on KVM
|
||||
default y
|
||||
help
|
||||
Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
|
||||
vulnerability on Intel and AMD CPUs that may allow a guest to do
|
||||
Spectre v2 style attacks on userspace hypervisor.
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
||||
@ -97,7 +97,7 @@ config IOMMU_DEBUG
|
||||
code. When you use it make sure you have a big enough
|
||||
IOMMU/AGP aperture. Most of the options enabled by this can
|
||||
be set more finegrained using the iommu= command line
|
||||
options. See Documentation/x86/x86_64/boot-options.rst for more
|
||||
options. See Documentation/arch/x86/x86_64/boot-options.rst for more
|
||||
details.
|
||||
|
||||
config IOMMU_LEAK
|
||||
|
||||
@ -28,20 +28,20 @@ EXPORT_SYMBOL_GPL(entry_ibpb);
|
||||
|
||||
/*
|
||||
* Define the VERW operand that is disguised as entry code so that
|
||||
* it can be referenced with KPTI enabled. This ensure VERW can be
|
||||
* it can be referenced with KPTI enabled. This ensures VERW can be
|
||||
* used late in exit-to-user path after page tables are switched.
|
||||
*/
|
||||
.pushsection .entry.text, "ax"
|
||||
|
||||
.align L1_CACHE_BYTES, 0xcc
|
||||
SYM_CODE_START_NOALIGN(mds_verw_sel)
|
||||
SYM_CODE_START_NOALIGN(x86_verw_sel)
|
||||
UNWIND_HINT_UNDEFINED
|
||||
ANNOTATE_NOENDBR
|
||||
.word __KERNEL_DS
|
||||
.align L1_CACHE_BYTES, 0xcc
|
||||
SYM_CODE_END(mds_verw_sel);
|
||||
SYM_CODE_END(x86_verw_sel);
|
||||
/* For KVM */
|
||||
EXPORT_SYMBOL_GPL(mds_verw_sel);
|
||||
EXPORT_SYMBOL_GPL(x86_verw_sel);
|
||||
|
||||
.popsection
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
*
|
||||
* entry.S contains the system-call and fault low-level handling routines.
|
||||
*
|
||||
* Some of this is documented in Documentation/x86/entry_64.rst
|
||||
* Some of this is documented in Documentation/arch/x86/entry_64.rst
|
||||
*
|
||||
* A note on terminology:
|
||||
* - iret frame: Architecture defined interrupt frame from SS to RIP
|
||||
|
||||
@ -264,7 +264,16 @@ struct x86_cpu_desc {
|
||||
.x86_microcode_rev = (revision), \
|
||||
}
|
||||
|
||||
#define AMD_CPU_DESC(fam, model, stepping, revision) { \
|
||||
.x86_family = (fam), \
|
||||
.x86_vendor = X86_VENDOR_AMD, \
|
||||
.x86_model = (model), \
|
||||
.x86_stepping = (stepping), \
|
||||
.x86_microcode_rev = (revision), \
|
||||
}
|
||||
|
||||
extern const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match);
|
||||
extern bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table);
|
||||
extern bool x86_match_min_microcode_rev(const struct x86_cpu_id *table);
|
||||
|
||||
#endif /* _ASM_X86_CPU_DEVICE_ID */
|
||||
|
||||
@ -20,170 +20,170 @@
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
||||
* in /proc/cpuinfo instead of the macro name. Otherwise, this feature
|
||||
* bit is not displayed in /proc/cpuinfo at all.
|
||||
*
|
||||
* When adding new features here that depend on other features,
|
||||
* please update the table in kernel/cpu/cpuid-deps.c as well.
|
||||
*/
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* "fpu" Onboard FPU */
|
||||
#define X86_FEATURE_VME ( 0*32+ 1) /* "vme" Virtual Mode Extensions */
|
||||
#define X86_FEATURE_DE ( 0*32+ 2) /* "de" Debugging Extensions */
|
||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* "pse" Page Size Extensions */
|
||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* "tsc" Time Stamp Counter */
|
||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* "msr" Model-Specific Registers */
|
||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* "pae" Physical Address Extensions */
|
||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* "mce" Machine Check Exception */
|
||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* "cx8" CMPXCHG8 instruction */
|
||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* "apic" Onboard APIC */
|
||||
#define X86_FEATURE_SEP ( 0*32+11) /* "sep" SYSENTER/SYSEXIT */
|
||||
#define X86_FEATURE_MTRR ( 0*32+12) /* "mtrr" Memory Type Range Registers */
|
||||
#define X86_FEATURE_PGE ( 0*32+13) /* "pge" Page Global Enable */
|
||||
#define X86_FEATURE_MCA ( 0*32+14) /* "mca" Machine Check Architecture */
|
||||
#define X86_FEATURE_CMOV ( 0*32+15) /* "cmov" CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
|
||||
#define X86_FEATURE_PAT ( 0*32+16) /* "pat" Page Attribute Table */
|
||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* "pse36" 36-bit PSEs */
|
||||
#define X86_FEATURE_PN ( 0*32+18) /* "pn" Processor serial number */
|
||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* "clflush" CLFLUSH instruction */
|
||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_ACPI ( 0*32+22) /* "acpi" ACPI via MSR */
|
||||
#define X86_FEATURE_MMX ( 0*32+23) /* "mmx" Multimedia Extensions */
|
||||
#define X86_FEATURE_FXSR ( 0*32+24) /* "fxsr" FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
||||
#define X86_FEATURE_HT ( 0*32+28) /* "ht" Hyper-Threading */
|
||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
||||
#define X86_FEATURE_IA64 ( 0*32+30) /* "ia64" IA-64 processor */
|
||||
#define X86_FEATURE_PBE ( 0*32+31) /* "pbe" Pending Break Enable */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
||||
/* Don't duplicate feature flags which are redundant with Intel! */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* "syscall" SYSCALL/SYSRET */
|
||||
#define X86_FEATURE_MP ( 1*32+19) /* "mp" MP Capable */
|
||||
#define X86_FEATURE_NX ( 1*32+20) /* "nx" Execute Disable */
|
||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* "mmxext" AMD MMX extensions */
|
||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* "fxsr_opt" FXSAVE/FXRSTOR optimizations */
|
||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
|
||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* "rdtscp" RDTSCP */
|
||||
#define X86_FEATURE_LM ( 1*32+29) /* "lm" Long Mode (x86-64, 64-bit support) */
|
||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* "3dnowext" AMD 3DNow extensions */
|
||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* "3dnow" 3DNow */
|
||||
|
||||
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* "recovery" CPU in recovery mode */
|
||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* "longrun" Longrun power control */
|
||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* "lrti" LongRun table interface */
|
||||
|
||||
/* Other features, Linux-defined mapping, word 3 */
|
||||
/* This range is used for feature bits which conflict or are synthesized */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
||||
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
|
||||
#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
|
||||
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
|
||||
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
|
||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
||||
#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
|
||||
#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
||||
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
|
||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* "cxmmx" Cyrix MMX extensions */
|
||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* "k6_mtrr" AMD K6 nonstandard MTRRs */
|
||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* "cyrix_arr" Cyrix ARRs (= MTRRs) */
|
||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
|
||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
|
||||
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
|
||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */
|
||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */
|
||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
|
||||
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
|
||||
#define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */
|
||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* "arch_perfmon" Intel Architectural PerfMon */
|
||||
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
|
||||
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
|
||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
|
||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
|
||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
|
||||
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
|
||||
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
|
||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* "acc_power" AMD Accumulated Power Mechanism */
|
||||
#define X86_FEATURE_NOPL ( 3*32+20) /* "nopl" The NOPL (0F 1F) instructions */
|
||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* Always-present feature */
|
||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* "xtopology" CPU topology enum extensions */
|
||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* "tsc_reliable" TSC is known to be reliable */
|
||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* "nonstop_tsc" TSC does not stop in C states */
|
||||
#define X86_FEATURE_CPUID ( 3*32+25) /* "cpuid" CPU has CPUID instruction itself */
|
||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* "extd_apicid" Extended APICID (8 bits) */
|
||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* "amd_dcm" AMD multi-node processor */
|
||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* "aperfmperf" P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
|
||||
#define X86_FEATURE_RAPL ( 3*32+29) /* "rapl" AMD/Hygon RAPL interface */
|
||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* "nonstop_tsc_s3" TSC doesn't stop in S3 state */
|
||||
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* "tsc_known_freq" TSC has known frequency */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
|
||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* "pclmulqdq" PCLMULQDQ instruction */
|
||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* "dtes64" 64-bit Debug Store */
|
||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
|
||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
||||
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* "vmx" Hardware virtualization */
|
||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* "smx" Safer Mode eXtensions */
|
||||
#define X86_FEATURE_EST ( 4*32+ 7) /* "est" Enhanced SpeedStep */
|
||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* "tm2" Thermal Monitor 2 */
|
||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* "ssse3" Supplemental SSE-3 */
|
||||
#define X86_FEATURE_CID ( 4*32+10) /* "cid" Context ID */
|
||||
#define X86_FEATURE_SDBG ( 4*32+11) /* "sdbg" Silicon Debug */
|
||||
#define X86_FEATURE_FMA ( 4*32+12) /* "fma" Fused multiply-add */
|
||||
#define X86_FEATURE_CX16 ( 4*32+13) /* "cx16" CMPXCHG16B instruction */
|
||||
#define X86_FEATURE_XTPR ( 4*32+14) /* "xtpr" Send Task Priority Messages */
|
||||
#define X86_FEATURE_PDCM ( 4*32+15) /* "pdcm" Perf/Debug Capabilities MSR */
|
||||
#define X86_FEATURE_PCID ( 4*32+17) /* "pcid" Process Context Identifiers */
|
||||
#define X86_FEATURE_DCA ( 4*32+18) /* "dca" Direct Cache Access */
|
||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* "x2apic" X2APIC */
|
||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* "movbe" MOVBE instruction */
|
||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* "popcnt" POPCNT instruction */
|
||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* "tsc_deadline_timer" TSC deadline timer */
|
||||
#define X86_FEATURE_AES ( 4*32+25) /* "aes" AES instructions */
|
||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* "xsave" XSAVE/XRSTOR/XSETBV/XGETBV instructions */
|
||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* XSAVE instruction enabled in the OS */
|
||||
#define X86_FEATURE_AVX ( 4*32+28) /* "avx" Advanced Vector Extensions */
|
||||
#define X86_FEATURE_F16C ( 4*32+29) /* "f16c" 16-bit FP conversions */
|
||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* "rdrand" RDRAND instruction */
|
||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* "hypervisor" Running on a hypervisor */
|
||||
|
||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */
|
||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* "ace2_en" ACE v2 enabled */
|
||||
#define X86_FEATURE_PHE ( 5*32+10) /* "phe" PadLock Hash Engine */
|
||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */
|
||||
#define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */
|
||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */
|
||||
|
||||
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
||||
#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
|
||||
#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
|
||||
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
|
||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
|
||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */
|
||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* "cmp_legacy" If yes HyperThreading not valid */
|
||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* "svm" Secure Virtual Machine */
|
||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* "extapic" Extended APIC space */
|
||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* "cr8_legacy" CR8 in 32-bit mode */
|
||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* "abm" Advanced bit manipulation */
|
||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* "sse4a" SSE-4A */
|
||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* "misalignsse" Misaligned SSE mode */
|
||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* "3dnowprefetch" 3DNow prefetch instructions */
|
||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* "osvw" OS Visible Workaround */
|
||||
#define X86_FEATURE_IBS ( 6*32+10) /* "ibs" Instruction Based Sampling */
|
||||
#define X86_FEATURE_XOP ( 6*32+11) /* "xop" Extended AVX instructions */
|
||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* "skinit" SKINIT/STGI instructions */
|
||||
#define X86_FEATURE_WDT ( 6*32+13) /* "wdt" Watchdog timer */
|
||||
#define X86_FEATURE_LWP ( 6*32+15) /* "lwp" Light Weight Profiling */
|
||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* "fma4" 4 operands MAC instructions */
|
||||
#define X86_FEATURE_TCE ( 6*32+17) /* "tce" Translation Cache Extension */
|
||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* "nodeid_msr" NodeId MSR */
|
||||
#define X86_FEATURE_TBM ( 6*32+21) /* "tbm" Trailing Bit Manipulations */
|
||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* "topoext" Topology extensions CPUID leafs */
|
||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* "perfctr_core" Core performance counter extensions */
|
||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* "perfctr_nb" NB performance counter extensions */
|
||||
#define X86_FEATURE_BPEXT ( 6*32+26) /* "bpext" Data breakpoint extension */
|
||||
#define X86_FEATURE_PTSC ( 6*32+27) /* "ptsc" Performance time-stamp counter */
|
||||
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* "perfctr_llc" Last Level Cache performance counter extensions */
|
||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* "mwaitx" MWAIT extension (MONITORX/MWAITX instructions) */
|
||||
|
||||
/*
|
||||
* Auxiliary flags: Linux defined - For features scattered in various
|
||||
@ -191,93 +191,93 @@
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
|
||||
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
|
||||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
||||
#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
||||
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */
|
||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
||||
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* "ring3mwait" Ring 3 MONITOR/MWAIT instructions */
|
||||
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* "cpuid_fault" Intel CPUID faulting */
|
||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* "cpb" AMD Core Performance Boost */
|
||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* "epb" IA32_ENERGY_PERF_BIAS support */
|
||||
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* "cat_l3" Cache Allocation Technology L3 */
|
||||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* "cat_l2" Cache Allocation Technology L2 */
|
||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* "cdp_l3" Code and Data Prioritization L3 */
|
||||
#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* "tdx_host_platform" Platform supports being a TDX host */
|
||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* "hw_pstate" AMD HW-PState */
|
||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* "proc_feedback" AMD ProcFeedbackInterface */
|
||||
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* Use compacted XSTATE (XSAVES or XSAVEC) */
|
||||
#define X86_FEATURE_PTI ( 7*32+11) /* "pti" Kernel Page Table Isolation enabled */
|
||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* Set/clear IBRS on kernel entry/exit */
|
||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* Fill RSB on VM-Exit */
|
||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* "intel_ppin" Intel Processor Inventory Number */
|
||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* "cdp_l2" Code and Data Prioritization L2 */
|
||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* MSR SPEC_CTRL is implemented */
|
||||
#define X86_FEATURE_SSBD ( 7*32+17) /* "ssbd" Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */
|
||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */
|
||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */
|
||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
|
||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
|
||||
#define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier without a guaranteed RSB flush */
|
||||
#define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */
|
||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */
|
||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* "ibrs_enhanced" Enhanced IBRS */
|
||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* MSR IA32_FEAT_CTL configured */
|
||||
|
||||
/* Virtualization flags: Linux defined, word 8 */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
|
||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* "tpr_shadow" Intel TPR Shadow */
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
|
||||
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
||||
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
|
||||
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
|
||||
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
|
||||
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
|
||||
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
|
||||
#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
|
||||
#define X86_FEATURE_EPT_AD ( 8*32+17) /* "ept_ad" Intel Extended Page Table access-dirty bit */
|
||||
#define X86_FEATURE_VMCALL ( 8*32+18) /* Hypervisor supports the VMCALL instruction */
|
||||
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* VMware prefers VMMCALL hypercall instruction */
|
||||
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* PV unlock function */
|
||||
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* PV vcpu_is_preempted function */
|
||||
#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* "tdx_guest" Intel Trust Domain Extensions Guest */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
|
||||
#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
||||
#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
||||
#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
||||
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
|
||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
|
||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
|
||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* "fsgsbase" RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
|
||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* "tsc_adjust" TSC adjustment MSR 0x3B */
|
||||
#define X86_FEATURE_SGX ( 9*32+ 2) /* "sgx" Software Guard Extensions */
|
||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* "bmi1" 1st group bit manipulation extensions */
|
||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* "hle" Hardware Lock Elision */
|
||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* "avx2" AVX2 instructions */
|
||||
#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* FPU data pointer updated only on x87 exceptions */
|
||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* "smep" Supervisor Mode Execution Protection */
|
||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* "bmi2" 2nd group bit manipulation extensions */
|
||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* "erms" Enhanced REP MOVSB/STOSB instructions */
|
||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* "invpcid" Invalidate Processor Context ID */
|
||||
#define X86_FEATURE_RTM ( 9*32+11) /* "rtm" Restricted Transactional Memory */
|
||||
#define X86_FEATURE_CQM ( 9*32+12) /* "cqm" Cache QoS Monitoring */
|
||||
#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* Zero out FPU CS and FPU DS */
|
||||
#define X86_FEATURE_MPX ( 9*32+14) /* "mpx" Memory Protection Extension */
|
||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* "rdt_a" Resource Director Technology Allocation */
|
||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* "avx512f" AVX-512 Foundation */
|
||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* "avx512dq" AVX-512 DQ (Double/Quad granular) Instructions */
|
||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* "rdseed" RDSEED instruction */
|
||||
#define X86_FEATURE_ADX ( 9*32+19) /* "adx" ADCX and ADOX instructions */
|
||||
#define X86_FEATURE_SMAP ( 9*32+20) /* "smap" Supervisor Mode Access Prevention */
|
||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* "avx512ifma" AVX-512 Integer Fused Multiply-Add instructions */
|
||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* "clflushopt" CLFLUSHOPT instruction */
|
||||
#define X86_FEATURE_CLWB ( 9*32+24) /* "clwb" CLWB instruction */
|
||||
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* "intel_pt" Intel Processor Trace */
|
||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* "avx512pf" AVX-512 Prefetch */
|
||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* "avx512er" AVX-512 Exponential and Reciprocal */
|
||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* "avx512cd" AVX-512 Conflict Detection */
|
||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* "sha_ni" SHA1/SHA256 Instruction Extensions */
|
||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* "avx512bw" AVX-512 BW (Byte/Word granular) Instructions */
|
||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* "avx512vl" AVX-512 VL (128/256 Vector Length) Extensions */
|
||||
|
||||
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
||||
#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */
|
||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* "xsaveopt" XSAVEOPT instruction */
|
||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* "xsavec" XSAVEC instruction */
|
||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* "xgetbv1" XGETBV with ECX = 1 instruction */
|
||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* "xsaves" XSAVES/XRSTORS instructions */
|
||||
#define X86_FEATURE_XFD (10*32+ 4) /* eXtended Feature Disabling */
|
||||
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
@ -285,188 +285,192 @@
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
||||
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
|
||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
|
||||
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
|
||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
|
||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
|
||||
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
|
||||
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
|
||||
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
|
||||
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
|
||||
|
||||
|
||||
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
|
||||
|
||||
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
|
||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
|
||||
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
||||
#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
|
||||
#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
|
||||
#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
|
||||
#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
|
||||
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* "cqm_llc" LLC QoS if 1 */
|
||||
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* "cqm_occup_llc" LLC occupancy monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* "cqm_mbm_total" LLC Total MBM monitoring */
|
||||
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* "cqm_mbm_local" LLC Local MBM monitoring */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* LFENCE in user entry SWAPGS path */
|
||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* LFENCE in kernel entry SWAPGS path */
|
||||
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* "split_lock_detect" #AC for split lock */
|
||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* Per-thread Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_SGX1 (11*32+ 8) /* Basic SGX */
|
||||
#define X86_FEATURE_SGX2 (11*32+ 9) /* SGX Enclave Dynamic Memory Management (EDMM) */
|
||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* Issue an IBPB on kernel entry */
|
||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* RET prediction control */
|
||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* Use LFENCE for Spectre variant 2 */
|
||||
#define X86_FEATURE_RETHUNK (11*32+14) /* Use REturn THUNK */
|
||||
#define X86_FEATURE_UNRET (11*32+15) /* AMD BTB untrain return */
|
||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* Use IBPB during runtime firmware calls */
|
||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* Fill RSB on VM exit when EIBRS is enabled */
|
||||
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* SGX EDECCSSA user leaf function */
|
||||
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* Call depth tracking for RSB stuffing */
|
||||
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* MSR IA32_TSX_CTRL (Intel) implemented */
|
||||
#define X86_FEATURE_SMBA (11*32+21) /* Slow Memory Bandwidth Allocation */
|
||||
#define X86_FEATURE_BMEC (11*32+22) /* Bandwidth Monitoring Event Configuration */
|
||||
#define X86_FEATURE_USER_SHSTK (11*32+23) /* "user_shstk" Shadow stack support for user mode applications */
|
||||
#define X86_FEATURE_SRSO (11*32+24) /* AMD BTB untrain RETs */
|
||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* AMD BTB untrain RETs through aliasing */
|
||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* Issue an IBPB only on VMEXIT */
|
||||
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
||||
#define X86_FEATURE_ZEN2 (11*32+28) /* CPU based on Zen2 microarchitecture */
|
||||
#define X86_FEATURE_ZEN3 (11*32+29) /* CPU based on Zen3 microarchitecture */
|
||||
#define X86_FEATURE_ZEN4 (11*32+30) /* CPU based on Zen4 microarchitecture */
|
||||
#define X86_FEATURE_ZEN1 (11*32+31) /* CPU based on Zen1 microarchitecture */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||
#define X86_FEATURE_SHA512 (12*32+ 0) /* SHA512 instructions */
|
||||
#define X86_FEATURE_SM3 (12*32+ 1) /* SM3 instructions */
|
||||
#define X86_FEATURE_SM4 (12*32+ 2) /* SM4 instructions */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
||||
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
|
||||
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
|
||||
#define X86_FEATURE_FZRM (12*32+10) /* "" Fast zero-length REP MOVSB */
|
||||
#define X86_FEATURE_FSRS (12*32+11) /* "" Fast short REP STOSB */
|
||||
#define X86_FEATURE_FSRC (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
|
||||
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
|
||||
#define X86_FEATURE_WRMSRNS (12*32+19) /* "" Non-serializing WRMSR */
|
||||
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
|
||||
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
|
||||
#define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */
|
||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */
|
||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */
|
||||
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */
|
||||
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */
|
||||
#define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */
|
||||
#define X86_FEATURE_FSRS (12*32+11) /* Fast short REP STOSB */
|
||||
#define X86_FEATURE_FSRC (12*32+12) /* Fast short REP {CMPSB,SCASB} */
|
||||
#define X86_FEATURE_LKGS (12*32+18) /* Load "kernel" (userspace) GS */
|
||||
#define X86_FEATURE_WRMSRNS (12*32+19) /* Non-serializing WRMSR */
|
||||
#define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */
|
||||
#define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */
|
||||
#define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
||||
#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */
|
||||
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
|
||||
#define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */
|
||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
|
||||
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
||||
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
|
||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
|
||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
|
||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
|
||||
#define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
|
||||
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
|
||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
|
||||
#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
|
||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* Speculative Store Bypass is fixed in hardware. */
|
||||
#define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */
|
||||
#define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
|
||||
#define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */
|
||||
#define X86_FEATURE_AMD_IBPB_RET (13*32+30) /* IBPB clears return address predictor */
|
||||
#define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */
|
||||
|
||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
||||
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
|
||||
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
|
||||
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
|
||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
|
||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
|
||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
|
||||
#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* "" HWP Highest perf change */
|
||||
#define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */
|
||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* "dtherm" Digital Thermal Sensor */
|
||||
#define X86_FEATURE_IDA (14*32+ 1) /* "ida" Intel Dynamic Acceleration */
|
||||
#define X86_FEATURE_ARAT (14*32+ 2) /* "arat" Always Running APIC Timer */
|
||||
#define X86_FEATURE_PLN (14*32+ 4) /* "pln" Intel Power Limit Notification */
|
||||
#define X86_FEATURE_PTS (14*32+ 6) /* "pts" Intel Package Thermal Status */
|
||||
#define X86_FEATURE_HWP (14*32+ 7) /* "hwp" Intel Hardware P-states */
|
||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* "hwp_notify" HWP Notification */
|
||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* "hwp_act_window" HWP Activity Window */
|
||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* "hwp_epp" HWP Energy Perf. Preference */
|
||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* "hwp_pkg_req" HWP Package Level Request */
|
||||
#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* HWP Highest perf change */
|
||||
#define X86_FEATURE_HFI (14*32+19) /* "hfi" Hardware Feedback Interface */
|
||||
|
||||
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
|
||||
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
|
||||
#define X86_FEATURE_NPT (15*32+ 0) /* "npt" Nested Page Table support */
|
||||
#define X86_FEATURE_LBRV (15*32+ 1) /* "lbrv" LBR Virtualization support */
|
||||
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
||||
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
||||
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
||||
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
||||
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
|
||||
#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
|
||||
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
|
||||
#define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */
|
||||
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
|
||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* "flushbyasid" Flush-by-ASID support */
|
||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* "decodeassists" Decode Assists support */
|
||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* "pausefilter" Filtered pause intercept */
|
||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* "pfthreshold" Pause filter threshold */
|
||||
#define X86_FEATURE_AVIC (15*32+13) /* "avic" Virtual Interrupt Controller */
|
||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* "v_vmsave_vmload" Virtual VMSAVE VMLOAD */
|
||||
#define X86_FEATURE_VGIF (15*32+16) /* "vgif" Virtual GIF */
|
||||
#define X86_FEATURE_X2AVIC (15*32+18) /* "x2avic" Virtual x2apic */
|
||||
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
|
||||
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
|
||||
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
||||
#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
|
||||
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
|
||||
#define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */
|
||||
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
|
||||
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
|
||||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
||||
#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */
|
||||
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
|
||||
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
|
||||
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
|
||||
#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
|
||||
#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */
|
||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
|
||||
#define X86_FEATURE_UMIP (16*32+ 2) /* "umip" User Mode Instruction Protection */
|
||||
#define X86_FEATURE_PKU (16*32+ 3) /* "pku" Protection Keys for Userspace */
|
||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* "ospke" OS Protection Keys Enable */
|
||||
#define X86_FEATURE_WAITPKG (16*32+ 5) /* "waitpkg" UMONITOR/UMWAIT/TPAUSE Instructions */
|
||||
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* "avx512_vbmi2" Additional AVX512 Vector Bit Manipulation Instructions */
|
||||
#define X86_FEATURE_SHSTK (16*32+ 7) /* Shadow stack */
|
||||
#define X86_FEATURE_GFNI (16*32+ 8) /* "gfni" Galois Field New Instructions */
|
||||
#define X86_FEATURE_VAES (16*32+ 9) /* "vaes" Vector AES */
|
||||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* "vpclmulqdq" Carry-Less Multiplication Double Quadword */
|
||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* "avx512_vnni" Vector Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* "avx512_bitalg" Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||
#define X86_FEATURE_TME (16*32+13) /* "tme" Intel Total Memory Encryption */
|
||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* "avx512_vpopcntdq" POPCNT for vectors of DW/QW */
|
||||
#define X86_FEATURE_LA57 (16*32+16) /* "la57" 5-level page tables */
|
||||
#define X86_FEATURE_RDPID (16*32+22) /* "rdpid" RDPID instruction */
|
||||
#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* "bus_lock_detect" Bus Lock detect */
|
||||
#define X86_FEATURE_CLDEMOTE (16*32+25) /* "cldemote" CLDEMOTE instruction */
|
||||
#define X86_FEATURE_MOVDIRI (16*32+27) /* "movdiri" MOVDIRI instruction */
|
||||
#define X86_FEATURE_MOVDIR64B (16*32+28) /* "movdir64b" MOVDIR64B instruction */
|
||||
#define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */
|
||||
#define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */
|
||||
|
||||
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
|
||||
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
|
||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */
|
||||
#define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */
|
||||
#define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */
|
||||
|
||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
|
||||
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
|
||||
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
|
||||
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
|
||||
#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
|
||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
||||
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
|
||||
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
|
||||
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
||||
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
|
||||
#define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */
|
||||
#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
|
||||
#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */
|
||||
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
|
||||
#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* "avx512_4vnniw" AVX-512 Neural Network Instructions */
|
||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* "avx512_4fmaps" AVX-512 Multiply Accumulation Single precision */
|
||||
#define X86_FEATURE_FSRM (18*32+ 4) /* "fsrm" Fast Short Rep Mov */
|
||||
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* "avx512_vp2intersect" AVX-512 Intersect for D/Q */
|
||||
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* SRBDS mitigation MSR available */
|
||||
#define X86_FEATURE_MD_CLEAR (18*32+10) /* "md_clear" VERW clears CPU buffers */
|
||||
#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* RTM transaction always aborts */
|
||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* TSX_FORCE_ABORT */
|
||||
#define X86_FEATURE_SERIALIZE (18*32+14) /* "serialize" SERIALIZE instruction */
|
||||
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* This part has CPUs of more than one type */
|
||||
#define X86_FEATURE_TSXLDTRK (18*32+16) /* "tsxldtrk" TSX Suspend Load Address Tracking */
|
||||
#define X86_FEATURE_PCONFIG (18*32+18) /* "pconfig" Intel PCONFIG */
|
||||
#define X86_FEATURE_ARCH_LBR (18*32+19) /* "arch_lbr" Intel ARCH LBR */
|
||||
#define X86_FEATURE_IBT (18*32+20) /* "ibt" Indirect Branch Tracking */
|
||||
#define X86_FEATURE_AMX_BF16 (18*32+22) /* "amx_bf16" AMX bf16 Support */
|
||||
#define X86_FEATURE_AVX512_FP16 (18*32+23) /* "avx512_fp16" AVX512 FP16 */
|
||||
#define X86_FEATURE_AMX_TILE (18*32+24) /* "amx_tile" AMX tile Support */
|
||||
#define X86_FEATURE_AMX_INT8 (18*32+25) /* "amx_int8" AMX int8 Support */
|
||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
|
||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
|
||||
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* "flush_l1d" Flush L1D cache */
|
||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* "arch_capabilities" IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||
#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* IA32_CORE_CAPABILITIES MSR */
|
||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */
|
||||
|
||||
/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
|
||||
#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
|
||||
#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
|
||||
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
|
||||
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
|
||||
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
|
||||
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
|
||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
|
||||
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
|
||||
#define X86_FEATURE_SVSM (19*32+28) /* SVSM present */
|
||||
#define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */
|
||||
#define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */
|
||||
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
|
||||
#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */
|
||||
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */
|
||||
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
|
||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
|
||||
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
|
||||
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
|
||||
|
||||
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
||||
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
|
||||
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* "" WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
|
||||
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
|
||||
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
|
||||
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
|
||||
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
|
||||
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
|
||||
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
|
||||
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
|
||||
#define X86_FEATURE_VERW_CLEAR (20*32+ 5) /* The memory form of VERW mitigates TSA */
|
||||
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
|
||||
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
|
||||
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
|
||||
|
||||
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
||||
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
|
||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
|
||||
#define X86_FEATURE_SRSO_USER_KERNEL_NO (20*32+30) /* CPU is not affected by SRSO across user/kernel boundaries */
|
||||
#define X86_FEATURE_SRSO_BP_SPEC_REDUCE (20*32+31) /*
|
||||
* BP_CFG[BpSpecReduce] can be used to mitigate SRSO for VMs.
|
||||
* (SRSO_MSR_FIX in the official doc).
|
||||
*/
|
||||
|
||||
/*
|
||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||
@ -474,64 +478,72 @@
|
||||
*
|
||||
* Reuse free bits when adding new feature flags!
|
||||
*/
|
||||
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
|
||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
||||
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
|
||||
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
|
||||
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
|
||||
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* "amd_lbr_pmc_freeze" AMD LBR and PMC Freeze */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
|
||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
|
||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
|
||||
#define X86_FEATURE_AMD_FAST_CPPC (21*32+ 5) /* Fast CPPC */
|
||||
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32+ 6) /* Heterogeneous Core Topology */
|
||||
#define X86_FEATURE_AMD_WORKLOAD_CLASS (21*32+ 7) /* Workload Classification */
|
||||
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32+10) /* Use thunk for indirect branches in lower half of cacheline */
|
||||
#define X86_FEATURE_TSA_SQ_NO (21*32+11) /* AMD CPU not vulnerable to TSA-SQ */
|
||||
#define X86_FEATURE_TSA_L1_NO (21*32+12) /* AMD CPU not vulnerable to TSA-L1 */
|
||||
#define X86_FEATURE_CLEAR_CPU_BUF_VM (21*32+13) /* Clear CPU buffers using VERW before VMRUN */
|
||||
#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* Use IBPB on exit-to-userspace, see VMSCAPE bug */
|
||||
|
||||
/*
|
||||
* BUG word(s)
|
||||
*/
|
||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||
|
||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
||||
#define X86_BUG_F00F X86_BUG(0) /* "f00f" Intel F00F */
|
||||
#define X86_BUG_FDIV X86_BUG(1) /* "fdiv" FPU FDIV */
|
||||
#define X86_BUG_COMA X86_BUG(2) /* "coma" Cyrix 6x86 coma */
|
||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
|
||||
#define X86_BUG_11AP X86_BUG(5) /* "11ap" Bad local APIC aka 11AP */
|
||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* "fxsave_leak" FXSAVE leaks FOP/FIP/FOP */
|
||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* "clflush_monitor" AAI65, CLFLUSH required before MONITOR */
|
||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* "sysret_ss_attrs" SYSRET doesn't fix up SS attrs */
|
||||
#ifdef CONFIG_X86_32
|
||||
/*
|
||||
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
|
||||
* to avoid confusion.
|
||||
*/
|
||||
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||
#define X86_BUG_ESPFIX X86_BUG(9) /* IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||
#endif
|
||||
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
|
||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
|
||||
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
|
||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
|
||||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
||||
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
|
||||
#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
|
||||
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
|
||||
#define X86_BUG_NULL_SEG X86_BUG(10) /* "null_seg" Nulling a selector preserves the base */
|
||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* "swapgs_fence" SWAPGS without input dep on GS */
|
||||
#define X86_BUG_MONITOR X86_BUG(12) /* "monitor" IPI required to wake up remote CPU */
|
||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* "amd_e400" CPU is among the affected by Erratum 400 */
|
||||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* "cpu_meltdown" CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* "spectre_v1" CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* "spectre_v2" CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* "spec_store_bypass" CPU is affected by speculative store bypass attack */
|
||||
#define X86_BUG_L1TF X86_BUG(18) /* "l1tf" CPU is affected by L1 Terminal Fault */
|
||||
#define X86_BUG_MDS X86_BUG(19) /* "mds" CPU is affected by Microarchitectural data sampling */
|
||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* "msbds_only" CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||
#define X86_BUG_SWAPGS X86_BUG(21) /* "swapgs" CPU is affected by speculation through SWAPGS */
|
||||
#define X86_BUG_TAA X86_BUG(22) /* "taa" CPU is affected by TSX Async Abort(TAA) */
|
||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */
|
||||
#define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */
|
||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */
|
||||
#define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */
|
||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||
#define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */
|
||||
#define X86_BUG_GDS X86_BUG(30) /* "gds" CPU is affected by Gather Data Sampling */
|
||||
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
|
||||
|
||||
/* BUG word 2 */
|
||||
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
|
||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
||||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
||||
#define X86_BUG_ITS X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
|
||||
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
|
||||
#define X86_BUG_SRSO X86_BUG( 1*32+ 0) /* "srso" AMD SRSO bug */
|
||||
#define X86_BUG_DIV0 X86_BUG( 1*32+ 1) /* "div0" AMD DIV0 speculation bug */
|
||||
#define X86_BUG_RFDS X86_BUG( 1*32+ 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
|
||||
#define X86_BUG_BHI X86_BUG( 1*32+ 3) /* "bhi" CPU is affected by Branch History Injection */
|
||||
#define X86_BUG_IBPB_NO_RET X86_BUG( 1*32+ 4) /* "ibpb_no_ret" IBPB omits return target predictions */
|
||||
#define X86_BUG_ITS X86_BUG( 1*32+ 7) /* "its" CPU is affected by Indirect Target Selection */
|
||||
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG( 1*32+ 8) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
|
||||
#define X86_BUG_TSA X86_BUG( 1*32+ 9) /* "tsa" CPU is affected by Transient Scheduler Attacks */
|
||||
#define X86_BUG_VMSCAPE X86_BUG( 1*32+10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
||||
@ -73,19 +73,23 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
|
||||
#endif
|
||||
|
||||
/*
|
||||
* Ultimately, this value will get limited by KSTACK_OFFSET_MAX(),
|
||||
* but not enough for x86 stack utilization comfort. To keep
|
||||
* reasonable stack head room, reduce the maximum offset to 8 bits.
|
||||
*
|
||||
* The actual entropy will be further reduced by the compiler when
|
||||
* applying stack alignment constraints (see cc_stack_align4/8 in
|
||||
* This value will get limited by KSTACK_OFFSET_MAX(), which is 10
|
||||
* bits. The actual entropy will be further reduced by the compiler
|
||||
* when applying stack alignment constraints (see cc_stack_align4/8 in
|
||||
* arch/x86/Makefile), which will remove the 3 (x86_64) or 2 (ia32)
|
||||
* low bits from any entropy chosen here.
|
||||
*
|
||||
* Therefore, final stack offset entropy will be 5 (x86_64) or
|
||||
* 6 (ia32) bits.
|
||||
* Therefore, final stack offset entropy will be 7 (x86_64) or
|
||||
* 8 (ia32) bits.
|
||||
*/
|
||||
choose_random_kstack_offset(rdtsc() & 0xFF);
|
||||
choose_random_kstack_offset(rdtsc());
|
||||
|
||||
/* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
|
||||
if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
|
||||
this_cpu_read(x86_ibpb_exit_to_user)) {
|
||||
indirect_branch_prediction_barrier();
|
||||
this_cpu_write(x86_ibpb_exit_to_user, false);
|
||||
}
|
||||
}
|
||||
#define arch_exit_to_user_mode_prepare arch_exit_to_user_mode_prepare
|
||||
|
||||
|
||||
@ -44,13 +44,13 @@ static __always_inline void native_irq_enable(void)
|
||||
|
||||
static __always_inline void native_safe_halt(void)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
x86_idle_clear_cpu_buffers();
|
||||
asm volatile("sti; hlt": : :"memory");
|
||||
}
|
||||
|
||||
static __always_inline void native_halt(void)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
x86_idle_clear_cpu_buffers();
|
||||
asm volatile("hlt": : :"memory");
|
||||
}
|
||||
|
||||
|
||||
@ -17,10 +17,12 @@ struct ucode_cpu_info {
|
||||
void load_ucode_bsp(void);
|
||||
void load_ucode_ap(void);
|
||||
void microcode_bsp_resume(void);
|
||||
bool __init microcode_loader_disabled(void);
|
||||
#else
|
||||
static inline void load_ucode_bsp(void) { }
|
||||
static inline void load_ucode_ap(void) { }
|
||||
static inline void microcode_bsp_resume(void) { }
|
||||
static inline bool __init microcode_loader_disabled(void) { return false; }
|
||||
#endif
|
||||
|
||||
extern unsigned long initrd_start_early;
|
||||
|
||||
@ -692,6 +692,7 @@
|
||||
|
||||
/* Zen4 */
|
||||
#define MSR_ZEN4_BP_CFG 0xc001102e
|
||||
#define MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT 4
|
||||
#define MSR_ZEN4_BP_CFG_SHARED_BTB_FIX_BIT 5
|
||||
|
||||
/* Fam 19h MSRs */
|
||||
|
||||
@ -26,29 +26,29 @@
|
||||
#define TPAUSE_C01_STATE 1
|
||||
#define TPAUSE_C02_STATE 0
|
||||
|
||||
static __always_inline void __monitor(const void *eax, unsigned long ecx,
|
||||
unsigned long edx)
|
||||
static __always_inline void __monitor(const void *eax, u32 ecx, u32 edx)
|
||||
{
|
||||
/* "monitor %eax, %ecx, %edx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xc8;"
|
||||
/*
|
||||
* Use the instruction mnemonic with implicit operands, as the LLVM
|
||||
* assembler fails to assemble the mnemonic with explicit operands:
|
||||
*/
|
||||
asm volatile("monitor" :: "a" (eax), "c" (ecx), "d" (edx));
|
||||
}
|
||||
|
||||
static __always_inline void __monitorx(const void *eax, u32 ecx, u32 edx)
|
||||
{
|
||||
/* "monitorx %eax, %ecx, %edx" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xfa"
|
||||
:: "a" (eax), "c" (ecx), "d"(edx));
|
||||
}
|
||||
|
||||
static __always_inline void __monitorx(const void *eax, unsigned long ecx,
|
||||
unsigned long edx)
|
||||
static __always_inline void __mwait(u32 eax, u32 ecx)
|
||||
{
|
||||
/* "monitorx %eax, %ecx, %edx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xfa;"
|
||||
:: "a" (eax), "c" (ecx), "d"(edx));
|
||||
}
|
||||
|
||||
static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
|
||||
/* "mwait %eax, %ecx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xc9;"
|
||||
:: "a" (eax), "c" (ecx));
|
||||
/*
|
||||
* Use the instruction mnemonic with implicit operands, as the LLVM
|
||||
* assembler fails to assemble the mnemonic with explicit operands:
|
||||
*/
|
||||
asm volatile("mwait" :: "a" (eax), "c" (ecx));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -77,22 +77,28 @@ static __always_inline void __mwait(unsigned long eax, unsigned long ecx)
|
||||
* EAX (logical) address to monitor
|
||||
* ECX #GP if not zero
|
||||
*/
|
||||
static __always_inline void __mwaitx(unsigned long eax, unsigned long ebx,
|
||||
unsigned long ecx)
|
||||
static __always_inline void __mwaitx(u32 eax, u32 ebx, u32 ecx)
|
||||
{
|
||||
/* No MDS buffer clear as this is AMD/HYGON only */
|
||||
/* No need for TSA buffer clearing on AMD */
|
||||
|
||||
/* "mwaitx %eax, %ebx, %ecx;" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xfb;"
|
||||
/* "mwaitx %eax, %ebx, %ecx" */
|
||||
asm volatile(".byte 0x0f, 0x01, 0xfb"
|
||||
:: "a" (eax), "b" (ebx), "c" (ecx));
|
||||
}
|
||||
|
||||
static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
|
||||
/*
|
||||
* Re-enable interrupts right upon calling mwait in such a way that
|
||||
* no interrupt can fire _before_ the execution of mwait, ie: no
|
||||
* instruction must be placed between "sti" and "mwait".
|
||||
*
|
||||
* This is necessary because if an interrupt queues a timer before
|
||||
* executing mwait, it would otherwise go unnoticed and the next tick
|
||||
* would not be reprogrammed accordingly before mwait ever wakes up.
|
||||
*/
|
||||
static __always_inline void __sti_mwait(u32 eax, u32 ecx)
|
||||
{
|
||||
mds_idle_clear_cpu_buffers();
|
||||
/* "mwait %eax, %ecx;" */
|
||||
asm volatile("sti; .byte 0x0f, 0x01, 0xc9;"
|
||||
:: "a" (eax), "c" (ecx));
|
||||
|
||||
asm volatile("sti; mwait" :: "a" (eax), "c" (ecx));
|
||||
}
|
||||
|
||||
/*
|
||||
@ -107,16 +113,16 @@ static __always_inline void __sti_mwait(unsigned long eax, unsigned long ecx)
|
||||
*/
|
||||
static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned long ecx)
|
||||
{
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
x86_idle_clear_cpu_buffers();
|
||||
|
||||
if (static_cpu_has_bug(X86_BUG_MONITOR) || !current_set_polling_and_test()) {
|
||||
const void *addr = ¤t_thread_info()->flags;
|
||||
bool ibrs_disabled = false;
|
||||
u64 spec_ctrl;
|
||||
|
||||
if (static_cpu_has_bug(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
mb();
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
mb();
|
||||
}
|
||||
|
||||
if (irqs_disabled() && (ecx & 1) &&
|
||||
cpu_feature_enabled(X86_FEATURE_KERNEL_IBRS)) {
|
||||
/* NMI always enable IBRS on exception entry */
|
||||
@ -126,16 +132,19 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
|
||||
}
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
|
||||
__monitor(addr, 0, 0);
|
||||
|
||||
if (need_resched())
|
||||
goto out;
|
||||
|
||||
if (!need_resched()) {
|
||||
if (ecx & 1) {
|
||||
__mwait(eax, ecx);
|
||||
} else {
|
||||
__sti_mwait(eax, ecx);
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
}
|
||||
out:
|
||||
if (ibrs_disabled) {
|
||||
native_wrmsrl(MSR_IA32_SPEC_CTRL, spec_ctrl);
|
||||
__this_cpu_write(x86_spec_ctrl_current, spec_ctrl);
|
||||
@ -152,13 +161,13 @@ static __always_inline void mwait_idle_with_hints(unsigned long eax, unsigned lo
|
||||
*/
|
||||
static inline void __tpause(u32 ecx, u32 edx, u32 eax)
|
||||
{
|
||||
/* "tpause %ecx, %edx, %eax;" */
|
||||
/* "tpause %ecx, %edx, %eax" */
|
||||
#ifdef CONFIG_AS_TPAUSE
|
||||
asm volatile("tpause %%ecx\n"
|
||||
asm volatile("tpause %%ecx"
|
||||
:
|
||||
: "c"(ecx), "d"(edx), "a"(eax));
|
||||
#else
|
||||
asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1\t\n"
|
||||
asm volatile(".byte 0x66, 0x0f, 0xae, 0xf1"
|
||||
:
|
||||
: "c"(ecx), "d"(edx), "a"(eax));
|
||||
#endif
|
||||
|
||||
@ -325,16 +325,31 @@
|
||||
.endm
|
||||
|
||||
/*
|
||||
* Macro to execute VERW instruction that mitigate transient data sampling
|
||||
* attacks such as MDS. On affected systems a microcode update overloaded VERW
|
||||
* instruction to also clear the CPU buffers. VERW clobbers CFLAGS.ZF.
|
||||
*
|
||||
* Macro to execute VERW insns that mitigate transient data sampling
|
||||
* attacks such as MDS or TSA. On affected systems a microcode update
|
||||
* overloaded VERW insns to also clear the CPU buffers. VERW clobbers
|
||||
* CFLAGS.ZF.
|
||||
* Note: Only the memory operand variant of VERW clears the CPU buffers.
|
||||
*/
|
||||
.macro CLEAR_CPU_BUFFERS
|
||||
ALTERNATIVE __stringify(verw _ASM_RIP(mds_verw_sel)), "", ALT_NOT(X86_FEATURE_CLEAR_CPU_BUF)
|
||||
.macro __CLEAR_CPU_BUFFERS feature
|
||||
#ifdef CONFIG_X86_64
|
||||
ALTERNATIVE "", "verw x86_verw_sel(%rip)", \feature
|
||||
#else
|
||||
/*
|
||||
* In 32bit mode, the memory operand must be a %cs reference. The data
|
||||
* segments may not be usable (vm86 mode), and the stack segment may not
|
||||
* be flat (ESPFIX32).
|
||||
*/
|
||||
ALTERNATIVE "", "verw %cs:x86_verw_sel", \feature
|
||||
#endif
|
||||
.endm
|
||||
|
||||
#define CLEAR_CPU_BUFFERS \
|
||||
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF
|
||||
|
||||
#define VM_CLEAR_CPU_BUFFERS \
|
||||
__CLEAR_CPU_BUFFERS X86_FEATURE_CLEAR_CPU_BUF_VM
|
||||
|
||||
#ifdef CONFIG_X86_64
|
||||
.macro CLEAR_BRANCH_HISTORY
|
||||
ALTERNATIVE "", "call clear_bhb_loop", X86_FEATURE_CLEAR_BHB_LOOP
|
||||
@ -541,6 +556,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
|
||||
extern u64 x86_pred_cmd;
|
||||
|
||||
DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
|
||||
@ -552,6 +569,13 @@ DECLARE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
extern void update_spec_ctrl_cond(u64 val);
|
||||
extern u64 spec_ctrl_current(void);
|
||||
|
||||
/*
|
||||
* RHEL kABI:
|
||||
* Since mds_idle_clear is a kABI protected symbol, we will have to map
|
||||
* cpu_buf_idle_clear back to mds_idle_clear.
|
||||
*/
|
||||
#define cpu_buf_idle_clear mds_idle_clear
|
||||
|
||||
/*
|
||||
* With retpoline, we must use IBRS to restrict branch prediction
|
||||
* before calling into firmware.
|
||||
@ -580,24 +604,24 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
DECLARE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
|
||||
|
||||
DECLARE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
DECLARE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
|
||||
|
||||
extern u16 mds_verw_sel;
|
||||
extern u16 x86_verw_sel;
|
||||
|
||||
#include <asm/segment.h>
|
||||
|
||||
/**
|
||||
* mds_clear_cpu_buffers - Mitigation for MDS and TAA vulnerability
|
||||
* x86_clear_cpu_buffers - Buffer clearing support for different x86 CPU vulns
|
||||
*
|
||||
* This uses the otherwise unused and obsolete VERW instruction in
|
||||
* combination with microcode which triggers a CPU buffer flush when the
|
||||
* instruction is executed.
|
||||
*/
|
||||
static __always_inline void mds_clear_cpu_buffers(void)
|
||||
static __always_inline void x86_clear_cpu_buffers(void)
|
||||
{
|
||||
static const u16 ds = __KERNEL_DS;
|
||||
|
||||
@ -614,14 +638,15 @@ static __always_inline void mds_clear_cpu_buffers(void)
|
||||
}
|
||||
|
||||
/**
|
||||
* mds_idle_clear_cpu_buffers - Mitigation for MDS vulnerability
|
||||
* x86_idle_clear_cpu_buffers - Buffer clearing support in idle for the MDS
|
||||
* and TSA vulnerabilities.
|
||||
*
|
||||
* Clear CPU buffers if the corresponding static key is enabled
|
||||
*/
|
||||
static __always_inline void mds_idle_clear_cpu_buffers(void)
|
||||
static __always_inline void x86_idle_clear_cpu_buffers(void)
|
||||
{
|
||||
if (static_branch_likely(&mds_idle_clear))
|
||||
mds_clear_cpu_buffers();
|
||||
if (static_branch_likely(&cpu_buf_idle_clear))
|
||||
x86_clear_cpu_buffers();
|
||||
}
|
||||
|
||||
#endif /* __ASSEMBLY__ */
|
||||
|
||||
@ -49,7 +49,7 @@
|
||||
|
||||
#define __START_KERNEL_map _AC(0xffffffff80000000, UL)
|
||||
|
||||
/* See Documentation/x86/x86_64/mm.rst for a description of the memory map. */
|
||||
/* See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map. */
|
||||
|
||||
#define __PHYSICAL_MASK_SHIFT 52
|
||||
|
||||
|
||||
@ -104,7 +104,7 @@ extern unsigned int ptrs_per_p4d;
|
||||
#define PGDIR_MASK (~(PGDIR_SIZE - 1))
|
||||
|
||||
/*
|
||||
* See Documentation/x86/x86_64/mm.rst for a description of the memory map.
|
||||
* See Documentation/arch/x86/x86_64/mm.rst for a description of the memory map.
|
||||
*
|
||||
* Be very careful vs. KASLR when changing anything here. The KASLR address
|
||||
* range must not overlap with anything except the KASAN shadow area, which
|
||||
|
||||
@ -34,4 +34,8 @@ static inline void __tlb_remove_table(void *table)
|
||||
free_page_and_swap_cache(table);
|
||||
}
|
||||
|
||||
static inline void invlpg(unsigned long addr)
|
||||
{
|
||||
asm volatile("invlpg (%0)" ::"r" (addr) : "memory");
|
||||
}
|
||||
#endif /* _ASM_X86_TLB_H */
|
||||
|
||||
@ -9,85 +9,85 @@
|
||||
|
||||
/*
|
||||
* Note: If the comment begins with a quoted string, that string is used
|
||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
||||
* in /proc/cpuinfo instead of the macro name. Otherwise, this feature bit
|
||||
* is not displayed in /proc/cpuinfo at all.
|
||||
*/
|
||||
|
||||
/* Pin-Based VM-Execution Controls, EPT/VPID, APIC and VM-Functions, word 0 */
|
||||
#define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* "" VM-Exit on vectored interrupts */
|
||||
#define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* "" VM-Exit on NMIs */
|
||||
#define VMX_FEATURE_INTR_EXITING ( 0*32+ 0) /* VM-Exit on vectored interrupts */
|
||||
#define VMX_FEATURE_NMI_EXITING ( 0*32+ 3) /* VM-Exit on NMIs */
|
||||
#define VMX_FEATURE_VIRTUAL_NMIS ( 0*32+ 5) /* "vnmi" NMI virtualization */
|
||||
#define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* VMX Preemption Timer */
|
||||
#define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* Posted Interrupts */
|
||||
#define VMX_FEATURE_PREEMPTION_TIMER ( 0*32+ 6) /* "preemption_timer" VMX Preemption Timer */
|
||||
#define VMX_FEATURE_POSTED_INTR ( 0*32+ 7) /* "posted_intr" Posted Interrupts */
|
||||
|
||||
/* EPT/VPID features, scattered to bits 16-23 */
|
||||
#define VMX_FEATURE_INVVPID ( 0*32+ 16) /* INVVPID is supported */
|
||||
#define VMX_FEATURE_INVVPID ( 0*32+ 16) /* "invvpid" INVVPID is supported */
|
||||
#define VMX_FEATURE_EPT_EXECUTE_ONLY ( 0*32+ 17) /* "ept_x_only" EPT entries can be execute only */
|
||||
#define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* EPT Accessed/Dirty bits */
|
||||
#define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* 1GB EPT pages */
|
||||
#define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* 5-level EPT paging */
|
||||
#define VMX_FEATURE_EPT_AD ( 0*32+ 18) /* "ept_ad" EPT Accessed/Dirty bits */
|
||||
#define VMX_FEATURE_EPT_1GB ( 0*32+ 19) /* "ept_1gb" 1GB EPT pages */
|
||||
#define VMX_FEATURE_EPT_5LEVEL ( 0*32+ 20) /* "ept_5level" 5-level EPT paging */
|
||||
|
||||
/* Aggregated APIC features 24-27 */
|
||||
#define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* TPR shadow + virt APIC */
|
||||
#define VMX_FEATURE_APICV ( 0*32+ 25) /* TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */
|
||||
#define VMX_FEATURE_FLEXPRIORITY ( 0*32+ 24) /* "flexpriority" TPR shadow + virt APIC */
|
||||
#define VMX_FEATURE_APICV ( 0*32+ 25) /* "apicv" TPR shadow + APIC reg virt + virt intr delivery + posted interrupts */
|
||||
|
||||
/* VM-Functions, shifted to bits 28-31 */
|
||||
#define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* EPTP switching (in guest) */
|
||||
#define VMX_FEATURE_EPTP_SWITCHING ( 0*32+ 28) /* "eptp_switching" EPTP switching (in guest) */
|
||||
|
||||
/* Primary Processor-Based VM-Execution Controls, word 1 */
|
||||
#define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* "" VM-Exit if INTRs are unblocked in guest */
|
||||
#define VMX_FEATURE_INTR_WINDOW_EXITING ( 1*32+ 2) /* VM-Exit if INTRs are unblocked in guest */
|
||||
#define VMX_FEATURE_USE_TSC_OFFSETTING ( 1*32+ 3) /* "tsc_offset" Offset hardware TSC when read in guest */
|
||||
#define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* "" VM-Exit on HLT */
|
||||
#define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* "" VM-Exit on INVLPG */
|
||||
#define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* "" VM-Exit on MWAIT */
|
||||
#define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* "" VM-Exit on RDPMC */
|
||||
#define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* "" VM-Exit on RDTSC */
|
||||
#define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* "" VM-Exit on writes to CR3 */
|
||||
#define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* "" VM-Exit on reads from CR3 */
|
||||
#define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* "" Enable Tertiary VM-Execution Controls */
|
||||
#define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* "" VM-Exit on writes to CR8 */
|
||||
#define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* "" VM-Exit on reads from CR8 */
|
||||
#define VMX_FEATURE_HLT_EXITING ( 1*32+ 7) /* VM-Exit on HLT */
|
||||
#define VMX_FEATURE_INVLPG_EXITING ( 1*32+ 9) /* VM-Exit on INVLPG */
|
||||
#define VMX_FEATURE_MWAIT_EXITING ( 1*32+ 10) /* VM-Exit on MWAIT */
|
||||
#define VMX_FEATURE_RDPMC_EXITING ( 1*32+ 11) /* VM-Exit on RDPMC */
|
||||
#define VMX_FEATURE_RDTSC_EXITING ( 1*32+ 12) /* VM-Exit on RDTSC */
|
||||
#define VMX_FEATURE_CR3_LOAD_EXITING ( 1*32+ 15) /* VM-Exit on writes to CR3 */
|
||||
#define VMX_FEATURE_CR3_STORE_EXITING ( 1*32+ 16) /* VM-Exit on reads from CR3 */
|
||||
#define VMX_FEATURE_TERTIARY_CONTROLS ( 1*32+ 17) /* Enable Tertiary VM-Execution Controls */
|
||||
#define VMX_FEATURE_CR8_LOAD_EXITING ( 1*32+ 19) /* VM-Exit on writes to CR8 */
|
||||
#define VMX_FEATURE_CR8_STORE_EXITING ( 1*32+ 20) /* VM-Exit on reads from CR8 */
|
||||
#define VMX_FEATURE_VIRTUAL_TPR ( 1*32+ 21) /* "vtpr" TPR virtualization, a.k.a. TPR shadow */
|
||||
#define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* "" VM-Exit if NMIs are unblocked in guest */
|
||||
#define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* "" VM-Exit on accesses to debug registers */
|
||||
#define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* "" VM-Exit on *all* IN{S} and OUT{S}*/
|
||||
#define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* "" VM-Exit based on I/O port */
|
||||
#define VMX_FEATURE_NMI_WINDOW_EXITING ( 1*32+ 22) /* VM-Exit if NMIs are unblocked in guest */
|
||||
#define VMX_FEATURE_MOV_DR_EXITING ( 1*32+ 23) /* VM-Exit on accesses to debug registers */
|
||||
#define VMX_FEATURE_UNCOND_IO_EXITING ( 1*32+ 24) /* VM-Exit on *all* IN{S} and OUT{S}*/
|
||||
#define VMX_FEATURE_USE_IO_BITMAPS ( 1*32+ 25) /* VM-Exit based on I/O port */
|
||||
#define VMX_FEATURE_MONITOR_TRAP_FLAG ( 1*32+ 27) /* "mtf" VMX single-step VM-Exits */
|
||||
#define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* "" VM-Exit based on MSR index */
|
||||
#define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* "" VM-Exit on MONITOR (MWAIT's accomplice) */
|
||||
#define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* "" VM-Exit on PAUSE (unconditionally) */
|
||||
#define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* "" Enable Secondary VM-Execution Controls */
|
||||
#define VMX_FEATURE_USE_MSR_BITMAPS ( 1*32+ 28) /* VM-Exit based on MSR index */
|
||||
#define VMX_FEATURE_MONITOR_EXITING ( 1*32+ 29) /* VM-Exit on MONITOR (MWAIT's accomplice) */
|
||||
#define VMX_FEATURE_PAUSE_EXITING ( 1*32+ 30) /* VM-Exit on PAUSE (unconditionally) */
|
||||
#define VMX_FEATURE_SEC_CONTROLS ( 1*32+ 31) /* Enable Secondary VM-Execution Controls */
|
||||
|
||||
/* Secondary Processor-Based VM-Execution Controls, word 2 */
|
||||
#define VMX_FEATURE_VIRT_APIC_ACCESSES ( 2*32+ 0) /* "vapic" Virtualize memory mapped APIC accesses */
|
||||
#define VMX_FEATURE_EPT ( 2*32+ 1) /* Extended Page Tables, a.k.a. Two-Dimensional Paging */
|
||||
#define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* "" VM-Exit on {S,L}*DT instructions */
|
||||
#define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* "" Enable RDTSCP in guest */
|
||||
#define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* "" Virtualize X2APIC for the guest */
|
||||
#define VMX_FEATURE_VPID ( 2*32+ 5) /* Virtual Processor ID (TLB ASID modifier) */
|
||||
#define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* "" VM-Exit on WBINVD */
|
||||
#define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* Allow Big Real Mode and other "invalid" states */
|
||||
#define VMX_FEATURE_EPT ( 2*32+ 1) /* "ept" Extended Page Tables, a.k.a. Two-Dimensional Paging */
|
||||
#define VMX_FEATURE_DESC_EXITING ( 2*32+ 2) /* VM-Exit on {S,L}*DT instructions */
|
||||
#define VMX_FEATURE_RDTSCP ( 2*32+ 3) /* Enable RDTSCP in guest */
|
||||
#define VMX_FEATURE_VIRTUAL_X2APIC ( 2*32+ 4) /* Virtualize X2APIC for the guest */
|
||||
#define VMX_FEATURE_VPID ( 2*32+ 5) /* "vpid" Virtual Processor ID (TLB ASID modifier) */
|
||||
#define VMX_FEATURE_WBINVD_EXITING ( 2*32+ 6) /* VM-Exit on WBINVD */
|
||||
#define VMX_FEATURE_UNRESTRICTED_GUEST ( 2*32+ 7) /* "unrestricted_guest" Allow Big Real Mode and other "invalid" states */
|
||||
#define VMX_FEATURE_APIC_REGISTER_VIRT ( 2*32+ 8) /* "vapic_reg" Hardware emulation of reads to the virtual-APIC */
|
||||
#define VMX_FEATURE_VIRT_INTR_DELIVERY ( 2*32+ 9) /* "vid" Evaluation and delivery of pending virtual interrupts */
|
||||
#define VMX_FEATURE_PAUSE_LOOP_EXITING ( 2*32+ 10) /* "ple" Conditionally VM-Exit on PAUSE at CPL0 */
|
||||
#define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* "" VM-Exit on RDRAND*/
|
||||
#define VMX_FEATURE_INVPCID ( 2*32+ 12) /* "" Enable INVPCID in guest */
|
||||
#define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* "" Enable VM-Functions (leaf dependent) */
|
||||
#define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* VMREAD/VMWRITE in guest can access shadow VMCS */
|
||||
#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* "" VM-Exit on ENCLS (leaf dependent) */
|
||||
#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* "" VM-Exit on RDSEED */
|
||||
#define VMX_FEATURE_RDRAND_EXITING ( 2*32+ 11) /* VM-Exit on RDRAND*/
|
||||
#define VMX_FEATURE_INVPCID ( 2*32+ 12) /* Enable INVPCID in guest */
|
||||
#define VMX_FEATURE_VMFUNC ( 2*32+ 13) /* Enable VM-Functions (leaf dependent) */
|
||||
#define VMX_FEATURE_SHADOW_VMCS ( 2*32+ 14) /* "shadow_vmcs" VMREAD/VMWRITE in guest can access shadow VMCS */
|
||||
#define VMX_FEATURE_ENCLS_EXITING ( 2*32+ 15) /* VM-Exit on ENCLS (leaf dependent) */
|
||||
#define VMX_FEATURE_RDSEED_EXITING ( 2*32+ 16) /* VM-Exit on RDSEED */
|
||||
#define VMX_FEATURE_PAGE_MOD_LOGGING ( 2*32+ 17) /* "pml" Log dirty pages into buffer */
|
||||
#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* Conditionally reflect EPT violations as #VE exceptions */
|
||||
#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* "" Suppress VMX indicators in Processor Trace */
|
||||
#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* "" Enable XSAVES and XRSTORS in guest */
|
||||
#define VMX_FEATURE_EPT_VIOLATION_VE ( 2*32+ 18) /* "ept_violation_ve" Conditionally reflect EPT violations as #VE exceptions */
|
||||
#define VMX_FEATURE_PT_CONCEAL_VMX ( 2*32+ 19) /* Suppress VMX indicators in Processor Trace */
|
||||
#define VMX_FEATURE_XSAVES ( 2*32+ 20) /* Enable XSAVES and XRSTORS in guest */
|
||||
#define VMX_FEATURE_MODE_BASED_EPT_EXEC ( 2*32+ 22) /* "ept_mode_based_exec" Enable separate EPT EXEC bits for supervisor vs. user */
|
||||
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* "" Processor Trace logs GPAs */
|
||||
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* Scale hardware TSC when read in guest */
|
||||
#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* Enable TPAUSE, UMONITOR, UMWAIT in guest */
|
||||
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* "" VM-Exit on ENCLV (leaf dependent) */
|
||||
#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* "" VM-Exit when bus lock caused */
|
||||
#define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* VM-Exit when no event windows after notify window */
|
||||
#define VMX_FEATURE_PT_USE_GPA ( 2*32+ 24) /* Processor Trace logs GPAs */
|
||||
#define VMX_FEATURE_TSC_SCALING ( 2*32+ 25) /* "tsc_scaling" Scale hardware TSC when read in guest */
|
||||
#define VMX_FEATURE_USR_WAIT_PAUSE ( 2*32+ 26) /* "usr_wait_pause" Enable TPAUSE, UMONITOR, UMWAIT in guest */
|
||||
#define VMX_FEATURE_ENCLV_EXITING ( 2*32+ 28) /* VM-Exit on ENCLV (leaf dependent) */
|
||||
#define VMX_FEATURE_BUS_LOCK_DETECTION ( 2*32+ 30) /* VM-Exit when bus lock caused */
|
||||
#define VMX_FEATURE_NOTIFY_VM_EXITING ( 2*32+ 31) /* "notify_vm_exiting" VM-Exit when no event windows after notify window */
|
||||
|
||||
/* Tertiary Processor-Based VM-Execution Controls, word 3 */
|
||||
#define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* Enable IPI virtualization */
|
||||
#define VMX_FEATURE_IPI_VIRT ( 3*32+ 4) /* "ipi_virt" Enable IPI virtualization */
|
||||
#endif /* _ASM_X86_VMXFEATURES_H */
|
||||
|
||||
@ -13,6 +13,7 @@
|
||||
#include <asm/apic.h>
|
||||
#include <asm/cacheinfo.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/spec-ctrl.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/numa.h>
|
||||
@ -367,6 +368,47 @@ static void bsp_determine_snp(struct cpuinfo_x86 *c)
|
||||
#endif
|
||||
}
|
||||
|
||||
#define ZEN_MODEL_STEP_UCODE(fam, model, step, ucode) \
|
||||
X86_MATCH_VFM_STEPS(VFM_MAKE(X86_VENDOR_AMD, fam, model), \
|
||||
step, step, ucode)
|
||||
|
||||
static const struct x86_cpu_id amd_tsa_microcode[] = {
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x1, 0x0a0011d7),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x01, 0x2, 0x0a00123b),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x08, 0x2, 0x0a00820d),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x1, 0x0a10114c),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x11, 0x2, 0x0a10124c),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x18, 0x1, 0x0a108109),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x0, 0x0a20102e),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x21, 0x2, 0x0a201211),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x44, 0x1, 0x0a404108),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x50, 0x0, 0x0a500012),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x61, 0x2, 0x0a60120a),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x74, 0x1, 0x0a704108),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x75, 0x2, 0x0a705208),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x78, 0x0, 0x0a708008),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0x7c, 0x0, 0x0a70c008),
|
||||
ZEN_MODEL_STEP_UCODE(0x19, 0xa0, 0x2, 0x0aa00216),
|
||||
{},
|
||||
};
|
||||
|
||||
static void tsa_init(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_HYPERVISOR))
|
||||
return;
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_ZEN3) ||
|
||||
cpu_has(c, X86_FEATURE_ZEN4)) {
|
||||
if (x86_match_min_microcode_rev(amd_tsa_microcode))
|
||||
setup_force_cpu_cap(X86_FEATURE_VERW_CLEAR);
|
||||
else
|
||||
pr_debug("%s: current revision: 0x%x\n", __func__, c->microcode);
|
||||
} else {
|
||||
setup_force_cpu_cap(X86_FEATURE_TSA_SQ_NO);
|
||||
setup_force_cpu_cap(X86_FEATURE_TSA_L1_NO);
|
||||
}
|
||||
}
|
||||
|
||||
static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
{
|
||||
if (cpu_has(c, X86_FEATURE_CONSTANT_TSC)) {
|
||||
@ -474,6 +516,9 @@ static void bsp_init_amd(struct cpuinfo_x86 *c)
|
||||
}
|
||||
|
||||
bsp_determine_snp(c);
|
||||
|
||||
tsa_init(c);
|
||||
|
||||
return;
|
||||
|
||||
warn:
|
||||
@ -794,6 +839,12 @@ static void init_amd_bd(struct cpuinfo_x86 *c)
|
||||
clear_rdrand_cpuid_bit(c);
|
||||
}
|
||||
|
||||
static const struct x86_cpu_desc erratum_1386_microcode[] = {
|
||||
AMD_CPU_DESC(0x17, 0x1, 0x2, 0x0800126e),
|
||||
AMD_CPU_DESC(0x17, 0x31, 0x0, 0x08301052),
|
||||
{},
|
||||
};
|
||||
|
||||
static void fix_erratum_1386(struct cpuinfo_x86 *c)
|
||||
{
|
||||
/*
|
||||
@ -803,7 +854,13 @@ static void fix_erratum_1386(struct cpuinfo_x86 *c)
|
||||
*
|
||||
* Affected parts all have no supervisor XSAVE states, meaning that
|
||||
* the XSAVEC instruction (which works fine) is equivalent.
|
||||
*
|
||||
* Clear the feature flag only on microcode revisions which
|
||||
* don't have the fix.
|
||||
*/
|
||||
if (x86_cpu_has_min_microcode_rev(erratum_1386_microcode))
|
||||
return;
|
||||
|
||||
clear_cpu_cap(c, X86_FEATURE_XSAVES);
|
||||
}
|
||||
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
#include <asm/msr.h>
|
||||
#include <asm/vmx.h>
|
||||
#include <asm/paravirt.h>
|
||||
#include <asm/intel-family.h>
|
||||
#include <asm/cpu_device_id.h>
|
||||
#include <asm/e820/api.h>
|
||||
#include <asm/hypervisor.h>
|
||||
#include <asm/tlbflush.h>
|
||||
@ -51,6 +51,11 @@ static void __init l1d_flush_select_mitigation(void);
|
||||
static void __init srso_select_mitigation(void);
|
||||
static void __init gds_select_mitigation(void);
|
||||
static void __init its_select_mitigation(void);
|
||||
static void __init tsa_select_mitigation(void);
|
||||
static void __init tsa_apply_mitigation(void);
|
||||
static void __init vmscape_select_mitigation(void);
|
||||
static void __init vmscape_update_mitigation(void);
|
||||
static void __init vmscape_apply_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||
u64 x86_spec_ctrl_base;
|
||||
@ -58,7 +63,15 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
|
||||
/* The current value of the SPEC_CTRL MSR with task-specific bits set */
|
||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
/*
|
||||
* Set when the CPU has run a potentially malicious guest. An IBPB will
|
||||
* be needed to before running userspace. That IBPB will flush the branch
|
||||
* predictor content.
|
||||
*/
|
||||
DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
|
||||
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
@ -123,9 +136,9 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
|
||||
/* Control unconditional IBPB in switch_mm() */
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
|
||||
|
||||
/* Control MDS CPU buffer clear before idling (halt, mwait) */
|
||||
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
|
||||
EXPORT_SYMBOL_GPL(mds_idle_clear);
|
||||
/* Control CPU buffer clear before idling (halt, mwait) */
|
||||
DEFINE_STATIC_KEY_FALSE(cpu_buf_idle_clear);
|
||||
EXPORT_SYMBOL_GPL(cpu_buf_idle_clear);
|
||||
|
||||
/*
|
||||
* Controls whether l1d flush based mitigations are enabled,
|
||||
@ -134,9 +147,13 @@ EXPORT_SYMBOL_GPL(mds_idle_clear);
|
||||
*/
|
||||
DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
|
||||
|
||||
/* Controls CPU Fill buffer clear before KVM guest MMIO accesses */
|
||||
DEFINE_STATIC_KEY_FALSE(mmio_stale_data_clear);
|
||||
EXPORT_SYMBOL_GPL(mmio_stale_data_clear);
|
||||
/*
|
||||
* Controls CPU Fill buffer clear before VMenter. This is a subset of
|
||||
* X86_FEATURE_CLEAR_CPU_BUF, and should only be enabled when KVM-only
|
||||
* mitigation is required.
|
||||
*/
|
||||
DEFINE_STATIC_KEY_FALSE(cpu_buf_vm_clear);
|
||||
EXPORT_SYMBOL_GPL(cpu_buf_vm_clear);
|
||||
|
||||
void __init cpu_select_mitigations(void)
|
||||
{
|
||||
@ -186,6 +203,11 @@ void __init cpu_select_mitigations(void)
|
||||
srso_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
its_select_mitigation();
|
||||
tsa_select_mitigation();
|
||||
tsa_apply_mitigation();
|
||||
vmscape_select_mitigation();
|
||||
vmscape_update_mitigation();
|
||||
vmscape_apply_mitigation();
|
||||
}
|
||||
|
||||
/*
|
||||
@ -244,7 +266,8 @@ static void x86_amd_ssb_disable(void)
|
||||
#define pr_fmt(fmt) "MDS: " fmt
|
||||
|
||||
/* Default mitigation for MDS-affected CPUs */
|
||||
static enum mds_mitigations mds_mitigation __ro_after_init = MDS_MITIGATION_FULL;
|
||||
static enum mds_mitigations mds_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_MDS) ? MDS_MITIGATION_FULL : MDS_MITIGATION_OFF;
|
||||
static bool mds_nosmt __ro_after_init = false;
|
||||
|
||||
static const char * const mds_strings[] = {
|
||||
@ -304,7 +327,8 @@ enum taa_mitigations {
|
||||
};
|
||||
|
||||
/* Default mitigation for TAA-affected CPUs */
|
||||
static enum taa_mitigations taa_mitigation __ro_after_init = TAA_MITIGATION_VERW;
|
||||
static enum taa_mitigations taa_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_TAA) ? TAA_MITIGATION_VERW : TAA_MITIGATION_OFF;
|
||||
static bool taa_nosmt __ro_after_init;
|
||||
|
||||
static const char * const taa_strings[] = {
|
||||
@ -402,7 +426,8 @@ enum mmio_mitigations {
|
||||
};
|
||||
|
||||
/* Default mitigation for Processor MMIO Stale Data vulnerabilities */
|
||||
static enum mmio_mitigations mmio_mitigation __ro_after_init = MMIO_MITIGATION_VERW;
|
||||
static enum mmio_mitigations mmio_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_MMIO_STALE_DATA) ? MMIO_MITIGATION_VERW : MMIO_MITIGATION_OFF;
|
||||
static bool mmio_nosmt __ro_after_init = false;
|
||||
|
||||
static const char * const mmio_strings[] = {
|
||||
@ -436,9 +461,9 @@ static void __init mmio_select_mitigation(void)
|
||||
* mitigations, disable KVM-only mitigation in that case.
|
||||
*/
|
||||
if (boot_cpu_has(X86_FEATURE_CLEAR_CPU_BUF))
|
||||
static_branch_disable(&mmio_stale_data_clear);
|
||||
static_branch_disable(&cpu_buf_vm_clear);
|
||||
else
|
||||
static_branch_enable(&mmio_stale_data_clear);
|
||||
static_branch_enable(&cpu_buf_vm_clear);
|
||||
|
||||
/*
|
||||
* If Processor-MMIO-Stale-Data bug is present and Fill Buffer data can
|
||||
@ -446,7 +471,7 @@ static void __init mmio_select_mitigation(void)
|
||||
* is required irrespective of SMT state.
|
||||
*/
|
||||
if (!(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO))
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
static_branch_enable(&cpu_buf_idle_clear);
|
||||
|
||||
/*
|
||||
* Check if the system has the right microcode.
|
||||
@ -565,7 +590,7 @@ static void __init md_clear_update_mitigation(void)
|
||||
taa_select_mitigation();
|
||||
}
|
||||
/*
|
||||
* MMIO_MITIGATION_OFF is not checked here so that mmio_stale_data_clear
|
||||
* MMIO_MITIGATION_OFF is not checked here so that cpu_buf_vm_clear
|
||||
* gets updated correctly as per X86_FEATURE_CLEAR_CPU_BUF state.
|
||||
*/
|
||||
if (boot_cpu_has_bug(X86_BUG_MMIO_STALE_DATA)) {
|
||||
@ -616,7 +641,8 @@ enum srbds_mitigations {
|
||||
SRBDS_MITIGATION_HYPERVISOR,
|
||||
};
|
||||
|
||||
static enum srbds_mitigations srbds_mitigation __ro_after_init = SRBDS_MITIGATION_FULL;
|
||||
static enum srbds_mitigations srbds_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_SRBDS) ? SRBDS_MITIGATION_FULL : SRBDS_MITIGATION_OFF;
|
||||
|
||||
static const char * const srbds_strings[] = {
|
||||
[SRBDS_MITIGATION_OFF] = "Vulnerable",
|
||||
@ -742,11 +768,8 @@ enum gds_mitigations {
|
||||
GDS_MITIGATION_HYPERVISOR,
|
||||
};
|
||||
|
||||
#if IS_ENABLED(CONFIG_MITIGATION_GDS_FORCE)
|
||||
static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FORCE;
|
||||
#else
|
||||
static enum gds_mitigations gds_mitigation __ro_after_init = GDS_MITIGATION_FULL;
|
||||
#endif
|
||||
static enum gds_mitigations gds_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_GDS) ? GDS_MITIGATION_FULL : GDS_MITIGATION_OFF;
|
||||
|
||||
static const char * const gds_strings[] = {
|
||||
[GDS_MITIGATION_OFF] = "Vulnerable",
|
||||
@ -882,7 +905,8 @@ enum spectre_v1_mitigation {
|
||||
};
|
||||
|
||||
static enum spectre_v1_mitigation spectre_v1_mitigation __ro_after_init =
|
||||
SPECTRE_V1_MITIGATION_AUTO;
|
||||
IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V1) ?
|
||||
SPECTRE_V1_MITIGATION_AUTO : SPECTRE_V1_MITIGATION_NONE;
|
||||
|
||||
static const char * const spectre_v1_strings[] = {
|
||||
[SPECTRE_V1_MITIGATION_NONE] = "Vulnerable: __user pointer sanitization and usercopy barriers only; no swapgs barriers",
|
||||
@ -997,7 +1021,7 @@ static const char * const retbleed_strings[] = {
|
||||
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
|
||||
RETBLEED_MITIGATION_NONE;
|
||||
static enum retbleed_mitigation_cmd retbleed_cmd __ro_after_init =
|
||||
RETBLEED_CMD_AUTO;
|
||||
IS_ENABLED(CONFIG_MITIGATION_RETBLEED) ? RETBLEED_CMD_AUTO : RETBLEED_CMD_OFF;
|
||||
|
||||
static int __ro_after_init retbleed_nosmt = false;
|
||||
|
||||
@ -1132,6 +1156,22 @@ do_cmd_auto:
|
||||
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
|
||||
mitigate_smt = true;
|
||||
|
||||
/*
|
||||
* IBPB on entry already obviates the need for
|
||||
* software-based untraining so clear those in case some
|
||||
* other mitigation like SRSO has selected them.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_UNRET);
|
||||
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
|
||||
break;
|
||||
|
||||
case RETBLEED_MITIGATION_STUFF:
|
||||
@ -1311,6 +1351,94 @@ out:
|
||||
pr_info("%s\n", its_strings[its_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Transient Scheduler Attacks: " fmt
|
||||
|
||||
enum tsa_mitigations {
|
||||
TSA_MITIGATION_NONE,
|
||||
TSA_MITIGATION_AUTO,
|
||||
TSA_MITIGATION_UCODE_NEEDED,
|
||||
TSA_MITIGATION_USER_KERNEL,
|
||||
TSA_MITIGATION_VM,
|
||||
TSA_MITIGATION_FULL,
|
||||
};
|
||||
|
||||
static const char * const tsa_strings[] = {
|
||||
[TSA_MITIGATION_NONE] = "Vulnerable",
|
||||
[TSA_MITIGATION_UCODE_NEEDED] = "Vulnerable: No microcode",
|
||||
[TSA_MITIGATION_USER_KERNEL] = "Mitigation: Clear CPU buffers: user/kernel boundary",
|
||||
[TSA_MITIGATION_VM] = "Mitigation: Clear CPU buffers: VM",
|
||||
[TSA_MITIGATION_FULL] = "Mitigation: Clear CPU buffers",
|
||||
};
|
||||
|
||||
static enum tsa_mitigations tsa_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_TSA) ? TSA_MITIGATION_AUTO : TSA_MITIGATION_NONE;
|
||||
|
||||
static int __init tsa_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off"))
|
||||
tsa_mitigation = TSA_MITIGATION_NONE;
|
||||
else if (!strcmp(str, "on"))
|
||||
tsa_mitigation = TSA_MITIGATION_FULL;
|
||||
else if (!strcmp(str, "user"))
|
||||
tsa_mitigation = TSA_MITIGATION_USER_KERNEL;
|
||||
else if (!strcmp(str, "vm"))
|
||||
tsa_mitigation = TSA_MITIGATION_VM;
|
||||
else
|
||||
pr_err("Ignoring unknown tsa=%s option.\n", str);
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("tsa", tsa_parse_cmdline);
|
||||
|
||||
static void __init tsa_select_mitigation(void)
|
||||
{
|
||||
if (cpu_mitigations_off() || !boot_cpu_has_bug(X86_BUG_TSA)) {
|
||||
tsa_mitigation = TSA_MITIGATION_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (tsa_mitigation == TSA_MITIGATION_NONE)
|
||||
return;
|
||||
|
||||
if (!boot_cpu_has(X86_FEATURE_VERW_CLEAR)) {
|
||||
tsa_mitigation = TSA_MITIGATION_UCODE_NEEDED;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (tsa_mitigation == TSA_MITIGATION_AUTO)
|
||||
tsa_mitigation = TSA_MITIGATION_FULL;
|
||||
|
||||
/*
|
||||
* No need to set verw_clear_cpu_buf_mitigation_selected - it
|
||||
* doesn't fit all cases here and it is not needed because this
|
||||
* is the only VERW-based mitigation on AMD.
|
||||
*/
|
||||
out:
|
||||
pr_info("%s\n", tsa_strings[tsa_mitigation]);
|
||||
}
|
||||
|
||||
static void __init tsa_apply_mitigation(void)
|
||||
{
|
||||
switch (tsa_mitigation) {
|
||||
case TSA_MITIGATION_USER_KERNEL:
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
break;
|
||||
case TSA_MITIGATION_VM:
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
|
||||
break;
|
||||
case TSA_MITIGATION_FULL:
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF);
|
||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_CPU_BUF_VM);
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||
|
||||
@ -1603,17 +1731,18 @@ static void __init spec_v2_print_cond(const char *reason, bool secure)
|
||||
|
||||
static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
{
|
||||
enum spectre_v2_mitigation_cmd cmd = SPECTRE_V2_CMD_AUTO;
|
||||
enum spectre_v2_mitigation_cmd cmd;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
cmd = IS_ENABLED(CONFIG_MITIGATION_SPECTRE_V2) ? SPECTRE_V2_CMD_AUTO : SPECTRE_V2_CMD_NONE;
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospectre_v2") ||
|
||||
cpu_mitigations_off())
|
||||
return SPECTRE_V2_CMD_NONE;
|
||||
|
||||
ret = cmdline_find_option(boot_command_line, "spectre_v2", arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
return cmd;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, mitigation_options[i].option))
|
||||
@ -1623,8 +1752,8 @@ static enum spectre_v2_mitigation_cmd __init spectre_v2_parse_cmdline(void)
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPECTRE_V2_CMD_AUTO;
|
||||
pr_err("unknown option (%s). Switching to default mode\n", arg);
|
||||
return cmd;
|
||||
}
|
||||
|
||||
if ((cmd == SPECTRE_V2_CMD_RETPOLINE ||
|
||||
@ -2076,73 +2205,13 @@ static void update_mds_branch_idle(void)
|
||||
return;
|
||||
|
||||
if (sched_smt_active()) {
|
||||
static_branch_enable(&mds_idle_clear);
|
||||
static_branch_enable(&cpu_buf_idle_clear);
|
||||
} else if (mmio_mitigation == MMIO_MITIGATION_OFF ||
|
||||
(x86_arch_cap_msr & ARCH_CAP_FBSDP_NO)) {
|
||||
static_branch_disable(&mds_idle_clear);
|
||||
static_branch_disable(&cpu_buf_idle_clear);
|
||||
}
|
||||
}
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
|
||||
void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
update_stibp_strict();
|
||||
break;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
update_indir_branch_cond();
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mds_mitigation) {
|
||||
case MDS_MITIGATION_FULL:
|
||||
case MDS_MITIGATION_VMWERV:
|
||||
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
|
||||
pr_warn_once(MDS_MSG_SMT);
|
||||
update_mds_branch_idle();
|
||||
break;
|
||||
case MDS_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (taa_mitigation) {
|
||||
case TAA_MITIGATION_VERW:
|
||||
case TAA_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(TAA_MSG_SMT);
|
||||
break;
|
||||
case TAA_MITIGATION_TSX_DISABLED:
|
||||
case TAA_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
* Provide a debugfs file to dump SPEC_CTRL MSRs of all the CPUs
|
||||
@ -2255,10 +2324,12 @@ static const struct {
|
||||
|
||||
static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
||||
{
|
||||
enum ssb_mitigation_cmd cmd = SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
enum ssb_mitigation_cmd cmd;
|
||||
char arg[20];
|
||||
int ret, i;
|
||||
|
||||
cmd = IS_ENABLED(CONFIG_MITIGATION_SSB) ?
|
||||
SPEC_STORE_BYPASS_CMD_AUTO : SPEC_STORE_BYPASS_CMD_NONE;
|
||||
if (cmdline_find_option_bool(boot_command_line, "nospec_store_bypass_disable") ||
|
||||
cpu_mitigations_off()) {
|
||||
return SPEC_STORE_BYPASS_CMD_NONE;
|
||||
@ -2266,7 +2337,7 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
||||
ret = cmdline_find_option(boot_command_line, "spec_store_bypass_disable",
|
||||
arg, sizeof(arg));
|
||||
if (ret < 0)
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
return cmd;
|
||||
|
||||
for (i = 0; i < ARRAY_SIZE(ssb_mitigation_options); i++) {
|
||||
if (!match_option(arg, ret, ssb_mitigation_options[i].option))
|
||||
@ -2277,8 +2348,8 @@ static enum ssb_mitigation_cmd __init ssb_parse_cmdline(void)
|
||||
}
|
||||
|
||||
if (i >= ARRAY_SIZE(ssb_mitigation_options)) {
|
||||
pr_err("unknown option (%s). Switching to AUTO select\n", arg);
|
||||
return SPEC_STORE_BYPASS_CMD_AUTO;
|
||||
pr_err("unknown option (%s). Switching to default mode\n", arg);
|
||||
return cmd;
|
||||
}
|
||||
}
|
||||
|
||||
@ -2605,7 +2676,8 @@ EXPORT_SYMBOL_GPL(itlb_multihit_kvm_mitigation);
|
||||
#define pr_fmt(fmt) "L1TF: " fmt
|
||||
|
||||
/* Default mitigation for L1TF-affected CPUs */
|
||||
enum l1tf_mitigations l1tf_mitigation __ro_after_init = L1TF_MITIGATION_FLUSH;
|
||||
enum l1tf_mitigations l1tf_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_L1TF) ? L1TF_MITIGATION_FLUSH : L1TF_MITIGATION_OFF;
|
||||
#if IS_ENABLED(CONFIG_KVM_INTEL)
|
||||
EXPORT_SYMBOL_GPL(l1tf_mitigation);
|
||||
#endif
|
||||
@ -2631,20 +2703,20 @@ static void override_cache_bits(struct cpuinfo_x86 *c)
|
||||
if (c->x86 != 6)
|
||||
return;
|
||||
|
||||
switch (c->x86_model) {
|
||||
case INTEL_FAM6_NEHALEM:
|
||||
case INTEL_FAM6_WESTMERE:
|
||||
case INTEL_FAM6_SANDYBRIDGE:
|
||||
case INTEL_FAM6_IVYBRIDGE:
|
||||
case INTEL_FAM6_HASWELL:
|
||||
case INTEL_FAM6_HASWELL_L:
|
||||
case INTEL_FAM6_HASWELL_G:
|
||||
case INTEL_FAM6_BROADWELL:
|
||||
case INTEL_FAM6_BROADWELL_G:
|
||||
case INTEL_FAM6_SKYLAKE_L:
|
||||
case INTEL_FAM6_SKYLAKE:
|
||||
case INTEL_FAM6_KABYLAKE_L:
|
||||
case INTEL_FAM6_KABYLAKE:
|
||||
switch (c->x86_vfm) {
|
||||
case INTEL_NEHALEM:
|
||||
case INTEL_WESTMERE:
|
||||
case INTEL_SANDYBRIDGE:
|
||||
case INTEL_IVYBRIDGE:
|
||||
case INTEL_HASWELL:
|
||||
case INTEL_HASWELL_L:
|
||||
case INTEL_HASWELL_G:
|
||||
case INTEL_BROADWELL:
|
||||
case INTEL_BROADWELL_G:
|
||||
case INTEL_SKYLAKE_L:
|
||||
case INTEL_SKYLAKE:
|
||||
case INTEL_KABYLAKE_L:
|
||||
case INTEL_KABYLAKE:
|
||||
if (c->x86_cache_bits < 44)
|
||||
c->x86_cache_bits = 44;
|
||||
break;
|
||||
@ -2734,6 +2806,7 @@ enum srso_mitigation {
|
||||
SRSO_MITIGATION_SAFE_RET,
|
||||
SRSO_MITIGATION_IBPB,
|
||||
SRSO_MITIGATION_IBPB_ON_VMEXIT,
|
||||
SRSO_MITIGATION_BP_SPEC_REDUCE,
|
||||
};
|
||||
|
||||
enum srso_mitigation_cmd {
|
||||
@ -2751,7 +2824,8 @@ static const char * const srso_strings[] = {
|
||||
[SRSO_MITIGATION_MICROCODE] = "Vulnerable: Microcode, no safe RET",
|
||||
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: Safe RET",
|
||||
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
|
||||
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
|
||||
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only",
|
||||
[SRSO_MITIGATION_BP_SPEC_REDUCE] = "Mitigation: Reduced Speculation"
|
||||
};
|
||||
|
||||
static enum srso_mitigation srso_mitigation __ro_after_init = SRSO_MITIGATION_NONE;
|
||||
@ -2785,13 +2859,12 @@ static void __init srso_select_mitigation(void)
|
||||
{
|
||||
bool has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE);
|
||||
|
||||
if (cpu_mitigations_off())
|
||||
return;
|
||||
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO)) {
|
||||
if (!boot_cpu_has_bug(X86_BUG_SRSO) ||
|
||||
cpu_mitigations_off() ||
|
||||
srso_cmd == SRSO_CMD_OFF) {
|
||||
if (boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (has_microcode) {
|
||||
@ -2803,7 +2876,7 @@ static void __init srso_select_mitigation(void)
|
||||
*/
|
||||
if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
|
||||
setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
|
||||
return;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB) {
|
||||
@ -2819,11 +2892,6 @@ static void __init srso_select_mitigation(void)
|
||||
}
|
||||
|
||||
switch (srso_cmd) {
|
||||
case SRSO_CMD_OFF:
|
||||
if (boot_cpu_has(X86_FEATURE_SBPB))
|
||||
x86_pred_cmd = PRED_CMD_SBPB;
|
||||
return;
|
||||
|
||||
case SRSO_CMD_MICROCODE:
|
||||
if (has_microcode) {
|
||||
srso_mitigation = SRSO_MITIGATION_MICROCODE;
|
||||
@ -2832,6 +2900,9 @@ static void __init srso_select_mitigation(void)
|
||||
break;
|
||||
|
||||
case SRSO_CMD_SAFE_RET:
|
||||
if (boot_cpu_has(X86_FEATURE_SRSO_USER_KERNEL_NO))
|
||||
goto ibpb_on_vmexit;
|
||||
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
|
||||
/*
|
||||
* Enable the return thunk for generated code
|
||||
@ -2860,32 +2931,245 @@ static void __init srso_select_mitigation(void)
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
|
||||
if (has_microcode) {
|
||||
setup_force_cpu_cap(X86_FEATURE_ENTRY_IBPB);
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB;
|
||||
|
||||
/*
|
||||
* IBPB on entry already obviates the need for
|
||||
* software-based untraining so clear those in case some
|
||||
* other mitigation like Retbleed has selected them.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_UNRET);
|
||||
setup_clear_cpu_cap(X86_FEATURE_RETHUNK);
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
|
||||
}
|
||||
break;
|
||||
|
||||
ibpb_on_vmexit:
|
||||
case SRSO_CMD_IBPB_ON_VMEXIT:
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_SRSO)) {
|
||||
if (!boot_cpu_has(X86_FEATURE_ENTRY_IBPB) && has_microcode) {
|
||||
if (boot_cpu_has(X86_FEATURE_SRSO_BP_SPEC_REDUCE)) {
|
||||
pr_notice("Reducing speculation to address VM/HV SRSO attack vector.\n");
|
||||
srso_mitigation = SRSO_MITIGATION_BP_SPEC_REDUCE;
|
||||
break;
|
||||
}
|
||||
|
||||
if (IS_ENABLED(CONFIG_MITIGATION_IBPB_ENTRY)) {
|
||||
if (has_microcode) {
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_ON_VMEXIT);
|
||||
srso_mitigation = SRSO_MITIGATION_IBPB_ON_VMEXIT;
|
||||
|
||||
/*
|
||||
* There is no need for RSB filling: entry_ibpb() ensures
|
||||
* all predictions, including the RSB, are invalidated,
|
||||
* regardless of IBPB implementation.
|
||||
*/
|
||||
setup_clear_cpu_cap(X86_FEATURE_RSB_VMEXIT);
|
||||
}
|
||||
} else {
|
||||
pr_err("WARNING: kernel not compiled with MITIGATION_SRSO.\n");
|
||||
pr_err("WARNING: kernel not compiled with MITIGATION_IBPB_ENTRY.\n");
|
||||
}
|
||||
break;
|
||||
default:
|
||||
break;
|
||||
}
|
||||
|
||||
out:
|
||||
/*
|
||||
* Clear the feature flag if this mitigation is not selected as that
|
||||
* feature flag controls the BpSpecReduce MSR bit toggling in KVM.
|
||||
*/
|
||||
if (srso_mitigation != SRSO_MITIGATION_BP_SPEC_REDUCE)
|
||||
setup_clear_cpu_cap(X86_FEATURE_SRSO_BP_SPEC_REDUCE);
|
||||
|
||||
if (srso_mitigation != SRSO_MITIGATION_NONE)
|
||||
pr_info("%s\n", srso_strings[srso_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "VMSCAPE: " fmt
|
||||
|
||||
enum vmscape_mitigations {
|
||||
VMSCAPE_MITIGATION_NONE,
|
||||
VMSCAPE_MITIGATION_AUTO,
|
||||
VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
|
||||
VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
|
||||
};
|
||||
|
||||
static const char * const vmscape_strings[] = {
|
||||
[VMSCAPE_MITIGATION_NONE] = "Vulnerable",
|
||||
/* [VMSCAPE_MITIGATION_AUTO] */
|
||||
[VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
|
||||
[VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
|
||||
};
|
||||
|
||||
static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
|
||||
|
||||
static int __init vmscape_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
|
||||
} else if (!strcmp(str, "ibpb")) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
|
||||
} else if (!strcmp(str, "force")) {
|
||||
setup_force_cpu_bug(X86_BUG_VMSCAPE);
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
|
||||
} else {
|
||||
pr_err("Ignoring unknown vmscape=%s option.\n", str);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("vmscape", vmscape_parse_cmdline);
|
||||
|
||||
static void __init vmscape_select_mitigation(void)
|
||||
{
|
||||
if (cpu_mitigations_off() ||
|
||||
!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
|
||||
!boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
|
||||
}
|
||||
|
||||
static void __init vmscape_update_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
|
||||
return;
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
|
||||
srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
|
||||
|
||||
pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
|
||||
}
|
||||
|
||||
static void __init vmscape_apply_mitigation(void)
|
||||
{
|
||||
if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
|
||||
|
||||
void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
update_stibp_strict();
|
||||
break;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
update_indir_branch_cond();
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mds_mitigation) {
|
||||
case MDS_MITIGATION_FULL:
|
||||
case MDS_MITIGATION_VMWERV:
|
||||
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
|
||||
pr_warn_once(MDS_MSG_SMT);
|
||||
update_mds_branch_idle();
|
||||
break;
|
||||
case MDS_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (taa_mitigation) {
|
||||
case TAA_MITIGATION_VERW:
|
||||
case TAA_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(TAA_MSG_SMT);
|
||||
break;
|
||||
case TAA_MITIGATION_TSX_DISABLED:
|
||||
case TAA_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (tsa_mitigation) {
|
||||
case TSA_MITIGATION_USER_KERNEL:
|
||||
case TSA_MITIGATION_VM:
|
||||
case TSA_MITIGATION_AUTO:
|
||||
case TSA_MITIGATION_FULL:
|
||||
/*
|
||||
* TSA-SQ can potentially lead to info leakage between
|
||||
* SMT threads.
|
||||
*/
|
||||
if (sched_smt_active())
|
||||
static_branch_enable(&cpu_buf_idle_clear);
|
||||
else
|
||||
static_branch_disable(&cpu_buf_idle_clear);
|
||||
break;
|
||||
case TSA_MITIGATION_NONE:
|
||||
case TSA_MITIGATION_UCODE_NEEDED:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (vmscape_mitigation) {
|
||||
case VMSCAPE_MITIGATION_NONE:
|
||||
case VMSCAPE_MITIGATION_AUTO:
|
||||
break;
|
||||
case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
|
||||
case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
|
||||
/*
|
||||
* Hypervisors can be attacked across-threads, warn for SMT when
|
||||
* STIBP is not already enabled system-wide.
|
||||
*
|
||||
* Intel eIBRS (!AUTOIBRS) implies STIBP on.
|
||||
*/
|
||||
if (!sched_smt_active() ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
|
||||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
|
||||
break;
|
||||
pr_warn_once(VMSCAPE_MSG_SMT);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
|
||||
@ -3124,6 +3408,16 @@ static ssize_t gds_show_state(char *buf)
|
||||
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t tsa_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", tsa_strings[tsa_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t vmscape_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
@ -3185,6 +3479,12 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_ITS:
|
||||
return its_show_state(buf);
|
||||
|
||||
case X86_BUG_TSA:
|
||||
return tsa_show_state(buf);
|
||||
|
||||
case X86_BUG_VMSCAPE:
|
||||
return vmscape_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -3269,6 +3569,16 @@ ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_att
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_tsa(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_TSA);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
|
||||
}
|
||||
#endif
|
||||
|
||||
void __warn_thunk(void)
|
||||
|
||||
@ -1209,8 +1209,8 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
|
||||
VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
|
||||
|
||||
VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | MSBDS_ONLY),
|
||||
VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_SSB | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||
|
||||
VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||
@ -1278,54 +1278,73 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
||||
#define ITS BIT(8)
|
||||
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
|
||||
#define ITS_NATIVE_ONLY BIT(9)
|
||||
/* CPU is affected by Transient Scheduler Attacks */
|
||||
#define TSA BIT(10)
|
||||
/* CPU is affected by VMSCAPE */
|
||||
#define VMSCAPE BIT(11)
|
||||
|
||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
|
||||
VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SANDYBRIDGE, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE_X, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||
VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_METEORLAKE_L, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_H, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ARROWLAKE, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ARROWLAKE_U, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_LUNARLAKE_M, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_SAPPHIRERAPIDS_X, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_GRANITERAPIDS_X, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_EMERALDRAPIDS_X, X86_STEP_MAX, VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MAX, MMIO | RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX, RFDS),
|
||||
VULNBL_INTEL_STEPS(INTEL_ATOM_CRESTMONT_X, X86_STEP_MAX, VMSCAPE),
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO),
|
||||
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO),
|
||||
VULNBL_AMD(0x19, SRSO),
|
||||
VULNBL_AMD(0x17, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
|
||||
VULNBL_HYGON(0x18, RETBLEED | SMT_RSB | SRSO | VMSCAPE),
|
||||
VULNBL_AMD(0x19, SRSO | TSA | VMSCAPE),
|
||||
VULNBL_AMD(0x1a, SRSO | VMSCAPE),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1527,12 +1546,33 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||
setup_force_cpu_bug(X86_BUG_BHI);
|
||||
|
||||
if (cpu_has(c, X86_FEATURE_AMD_IBPB) && !cpu_has(c, X86_FEATURE_AMD_IBPB_RET))
|
||||
setup_force_cpu_bug(X86_BUG_IBPB_NO_RET);
|
||||
|
||||
if (vulnerable_to_its(x86_arch_cap_msr)) {
|
||||
setup_force_cpu_bug(X86_BUG_ITS);
|
||||
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
|
||||
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
|
||||
}
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||
if (!cpu_has(c, X86_FEATURE_TSA_SQ_NO) ||
|
||||
!cpu_has(c, X86_FEATURE_TSA_L1_NO)) {
|
||||
if (cpu_matches(cpu_vuln_blacklist, TSA) ||
|
||||
/* Enable bug on Zen guests to allow for live migration. */
|
||||
(cpu_has(c, X86_FEATURE_HYPERVISOR) && cpu_has(c, X86_FEATURE_ZEN)))
|
||||
setup_force_cpu_bug(X86_BUG_TSA);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
* Set the bug only on bare-metal. A nested hypervisor should already be
|
||||
* deploying IBPB to isolate itself from nested guests.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
|
||||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
setup_force_cpu_bug(X86_BUG_VMSCAPE);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
||||
@ -116,3 +116,14 @@ bool x86_cpu_has_min_microcode_rev(const struct x86_cpu_desc *table)
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_cpu_has_min_microcode_rev);
|
||||
|
||||
bool x86_match_min_microcode_rev(const struct x86_cpu_id *table)
|
||||
{
|
||||
const struct x86_cpu_id *res = x86_match_cpu(table);
|
||||
|
||||
if (!res || res->driver_data > boot_cpu_data.microcode)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(x86_match_min_microcode_rev);
|
||||
|
||||
@ -23,17 +23,22 @@
|
||||
|
||||
#include <linux/earlycpio.h>
|
||||
#include <linux/firmware.h>
|
||||
#include <linux/bsearch.h>
|
||||
#include <linux/uaccess.h>
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/initrd.h>
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/pci.h>
|
||||
|
||||
#include <crypto/sha2.h>
|
||||
|
||||
#include <asm/microcode.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/cmdline.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/cpu.h>
|
||||
#include <asm/msr.h>
|
||||
#include <asm/tlb.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
@ -89,6 +94,31 @@ static struct equiv_cpu_table {
|
||||
struct equiv_cpu_entry *entry;
|
||||
} equiv_table;
|
||||
|
||||
union zen_patch_rev {
|
||||
struct {
|
||||
__u32 rev : 8,
|
||||
stepping : 4,
|
||||
model : 4,
|
||||
__reserved : 4,
|
||||
ext_model : 4,
|
||||
ext_fam : 8;
|
||||
};
|
||||
__u32 ucode_rev;
|
||||
};
|
||||
|
||||
union cpuid_1_eax {
|
||||
struct {
|
||||
__u32 stepping : 4,
|
||||
model : 4,
|
||||
family : 4,
|
||||
__reserved0 : 4,
|
||||
ext_model : 4,
|
||||
ext_fam : 8,
|
||||
__reserved1 : 4;
|
||||
};
|
||||
__u32 full;
|
||||
};
|
||||
|
||||
/*
|
||||
* This points to the current valid container of microcode patches which we will
|
||||
* save from the initrd/builtin before jettisoning its contents. @mc is the
|
||||
@ -96,7 +126,6 @@ static struct equiv_cpu_table {
|
||||
*/
|
||||
struct cont_desc {
|
||||
struct microcode_amd *mc;
|
||||
u32 cpuid_1_eax;
|
||||
u32 psize;
|
||||
u8 *data;
|
||||
size_t size;
|
||||
@ -109,10 +138,149 @@ struct cont_desc {
|
||||
static const char
|
||||
ucode_path[] __maybe_unused = "kernel/x86/microcode/AuthenticAMD.bin";
|
||||
|
||||
/*
|
||||
* This is CPUID(1).EAX on the BSP. It is used in two ways:
|
||||
*
|
||||
* 1. To ignore the equivalence table on Zen1 and newer.
|
||||
*
|
||||
* 2. To match which patches to load because the patch revision ID
|
||||
* already contains the f/m/s for which the microcode is destined
|
||||
* for.
|
||||
*/
|
||||
static u32 bsp_cpuid_1_eax __ro_after_init;
|
||||
|
||||
static bool sha_check = true;
|
||||
|
||||
struct patch_digest {
|
||||
u32 patch_id;
|
||||
u8 sha256[SHA256_DIGEST_SIZE];
|
||||
};
|
||||
|
||||
#include "amd_shas.c"
|
||||
|
||||
static int cmp_id(const void *key, const void *elem)
|
||||
{
|
||||
struct patch_digest *pd = (struct patch_digest *)elem;
|
||||
u32 patch_id = *(u32 *)key;
|
||||
|
||||
if (patch_id == pd->patch_id)
|
||||
return 0;
|
||||
else if (patch_id < pd->patch_id)
|
||||
return -1;
|
||||
else
|
||||
return 1;
|
||||
}
|
||||
|
||||
static bool need_sha_check(u32 cur_rev)
|
||||
{
|
||||
switch (cur_rev >> 8) {
|
||||
case 0x80012: return cur_rev <= 0x800126f; break;
|
||||
case 0x80082: return cur_rev <= 0x800820f; break;
|
||||
case 0x83010: return cur_rev <= 0x830107c; break;
|
||||
case 0x86001: return cur_rev <= 0x860010e; break;
|
||||
case 0x86081: return cur_rev <= 0x8608108; break;
|
||||
case 0x87010: return cur_rev <= 0x8701034; break;
|
||||
case 0x8a000: return cur_rev <= 0x8a0000a; break;
|
||||
case 0xa0010: return cur_rev <= 0xa00107a; break;
|
||||
case 0xa0011: return cur_rev <= 0xa0011da; break;
|
||||
case 0xa0012: return cur_rev <= 0xa001243; break;
|
||||
case 0xa0082: return cur_rev <= 0xa00820e; break;
|
||||
case 0xa1011: return cur_rev <= 0xa101153; break;
|
||||
case 0xa1012: return cur_rev <= 0xa10124e; break;
|
||||
case 0xa1081: return cur_rev <= 0xa108109; break;
|
||||
case 0xa2010: return cur_rev <= 0xa20102f; break;
|
||||
case 0xa2012: return cur_rev <= 0xa201212; break;
|
||||
case 0xa4041: return cur_rev <= 0xa404109; break;
|
||||
case 0xa5000: return cur_rev <= 0xa500013; break;
|
||||
case 0xa6012: return cur_rev <= 0xa60120a; break;
|
||||
case 0xa7041: return cur_rev <= 0xa704109; break;
|
||||
case 0xa7052: return cur_rev <= 0xa705208; break;
|
||||
case 0xa7080: return cur_rev <= 0xa708009; break;
|
||||
case 0xa70c0: return cur_rev <= 0xa70C009; break;
|
||||
case 0xaa001: return cur_rev <= 0xaa00116; break;
|
||||
case 0xaa002: return cur_rev <= 0xaa00218; break;
|
||||
default: break;
|
||||
}
|
||||
|
||||
pr_info("You should not be seeing this. Please send the following couple of lines to x86-<at>-kernel.org\n");
|
||||
pr_info("CPUID(1).EAX: 0x%x, current revision: 0x%x\n", bsp_cpuid_1_eax, cur_rev);
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool verify_sha256_digest(u32 patch_id, u32 cur_rev, const u8 *data, unsigned int len)
|
||||
{
|
||||
struct patch_digest *pd = NULL;
|
||||
u8 digest[SHA256_DIGEST_SIZE];
|
||||
struct sha256_state s;
|
||||
int i;
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17 ||
|
||||
x86_family(bsp_cpuid_1_eax) > 0x19)
|
||||
return true;
|
||||
|
||||
if (!need_sha_check(cur_rev))
|
||||
return true;
|
||||
|
||||
if (!sha_check)
|
||||
return true;
|
||||
|
||||
pd = bsearch(&patch_id, phashes, ARRAY_SIZE(phashes), sizeof(struct patch_digest), cmp_id);
|
||||
if (!pd) {
|
||||
pr_err("No sha256 digest for patch ID: 0x%x found\n", patch_id);
|
||||
return false;
|
||||
}
|
||||
|
||||
sha256_init(&s);
|
||||
sha256_update(&s, data, len);
|
||||
sha256_final(&s, digest);
|
||||
|
||||
if (memcmp(digest, pd->sha256, sizeof(digest))) {
|
||||
pr_err("Patch 0x%x SHA256 digest mismatch!\n", patch_id);
|
||||
|
||||
for (i = 0; i < SHA256_DIGEST_SIZE; i++)
|
||||
pr_cont("0x%x ", digest[i]);
|
||||
pr_info("\n");
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static u32 get_patch_level(void)
|
||||
{
|
||||
u32 rev, dummy __always_unused;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
return rev;
|
||||
}
|
||||
|
||||
static union cpuid_1_eax ucode_rev_to_cpuid(unsigned int val)
|
||||
{
|
||||
union zen_patch_rev p;
|
||||
union cpuid_1_eax c;
|
||||
|
||||
p.ucode_rev = val;
|
||||
c.full = 0;
|
||||
|
||||
c.stepping = p.stepping;
|
||||
c.model = p.model;
|
||||
c.ext_model = p.ext_model;
|
||||
c.family = 0xf;
|
||||
c.ext_fam = p.ext_fam;
|
||||
|
||||
return c;
|
||||
}
|
||||
|
||||
static u16 find_equiv_id(struct equiv_cpu_table *et, u32 sig)
|
||||
{
|
||||
unsigned int i;
|
||||
|
||||
/* Zen and newer do not need an equivalence table. */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
||||
return 0;
|
||||
|
||||
if (!et || !et->num_entries)
|
||||
return 0;
|
||||
|
||||
@ -159,6 +327,10 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
|
||||
if (!verify_container(buf, buf_size))
|
||||
return false;
|
||||
|
||||
/* Zen and newer do not need an equivalence table. */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
||||
return true;
|
||||
|
||||
cont_type = hdr[1];
|
||||
if (cont_type != UCODE_EQUIV_CPU_TABLE_TYPE) {
|
||||
pr_debug("Wrong microcode container equivalence table type: %u.\n",
|
||||
@ -185,8 +357,7 @@ static bool verify_equivalence_table(const u8 *buf, size_t buf_size)
|
||||
* On success, @sh_psize returns the patch size according to the section header,
|
||||
* to the caller.
|
||||
*/
|
||||
static bool
|
||||
__verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
||||
static bool __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
||||
{
|
||||
u32 p_type, p_size;
|
||||
const u32 *hdr;
|
||||
@ -222,8 +393,9 @@ __verify_patch_section(const u8 *buf, size_t buf_size, u32 *sh_psize)
|
||||
* exceed the per-family maximum). @sh_psize is the size read from the section
|
||||
* header.
|
||||
*/
|
||||
static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size)
|
||||
static unsigned int __verify_patch_size(u32 sh_psize, size_t buf_size)
|
||||
{
|
||||
u8 family = x86_family(bsp_cpuid_1_eax);
|
||||
u32 max_size;
|
||||
|
||||
if (family >= 0x15)
|
||||
@ -258,9 +430,9 @@ static unsigned int __verify_patch_size(u8 family, u32 sh_psize, size_t buf_size
|
||||
* positive: patch is not for this family, skip it
|
||||
* 0: success
|
||||
*/
|
||||
static int
|
||||
verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
|
||||
static int verify_patch(const u8 *buf, size_t buf_size, u32 *patch_size)
|
||||
{
|
||||
u8 family = x86_family(bsp_cpuid_1_eax);
|
||||
struct microcode_header_amd *mc_hdr;
|
||||
unsigned int ret;
|
||||
u32 sh_psize;
|
||||
@ -286,7 +458,7 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
|
||||
return -1;
|
||||
}
|
||||
|
||||
ret = __verify_patch_size(family, sh_psize, buf_size);
|
||||
ret = __verify_patch_size(sh_psize, buf_size);
|
||||
if (!ret) {
|
||||
pr_debug("Per-family patch size mismatch.\n");
|
||||
return -1;
|
||||
@ -308,6 +480,15 @@ verify_patch(u8 family, const u8 *buf, size_t buf_size, u32 *patch_size)
|
||||
return 0;
|
||||
}
|
||||
|
||||
static bool mc_patch_matches(struct microcode_amd *mc, u16 eq_id)
|
||||
{
|
||||
/* Zen and newer do not need an equivalence table. */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
||||
return ucode_rev_to_cpuid(mc->hdr.patch_id).full == bsp_cpuid_1_eax;
|
||||
else
|
||||
return eq_id == mc->hdr.processor_rev_id;
|
||||
}
|
||||
|
||||
/*
|
||||
* This scans the ucode blob for the proper container as we can have multiple
|
||||
* containers glued together. Returns the equivalence ID from the equivalence
|
||||
@ -336,7 +517,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
||||
* doesn't contain a patch for the CPU, scan through the whole container
|
||||
* so that it can be skipped in case there are other containers appended.
|
||||
*/
|
||||
eq_id = find_equiv_id(&table, desc->cpuid_1_eax);
|
||||
eq_id = find_equiv_id(&table, bsp_cpuid_1_eax);
|
||||
|
||||
buf += hdr[2] + CONTAINER_HDR_SZ;
|
||||
size -= hdr[2] + CONTAINER_HDR_SZ;
|
||||
@ -350,7 +531,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
||||
u32 patch_size;
|
||||
int ret;
|
||||
|
||||
ret = verify_patch(x86_family(desc->cpuid_1_eax), buf, size, &patch_size);
|
||||
ret = verify_patch(buf, size, &patch_size);
|
||||
if (ret < 0) {
|
||||
/*
|
||||
* Patch verification failed, skip to the next container, if
|
||||
@ -363,7 +544,7 @@ static size_t parse_container(u8 *ucode, size_t size, struct cont_desc *desc)
|
||||
}
|
||||
|
||||
mc = (struct microcode_amd *)(buf + SECTION_HDR_SIZE);
|
||||
if (eq_id == mc->hdr.processor_rev_id) {
|
||||
if (mc_patch_matches(mc, eq_id)) {
|
||||
desc->psize = patch_size;
|
||||
desc->mc = mc;
|
||||
}
|
||||
@ -413,59 +594,41 @@ static void scan_containers(u8 *ucode, size_t size, struct cont_desc *desc)
|
||||
}
|
||||
}
|
||||
|
||||
static int __apply_microcode_amd(struct microcode_amd *mc)
|
||||
static bool __apply_microcode_amd(struct microcode_amd *mc, u32 *cur_rev,
|
||||
unsigned int psize)
|
||||
{
|
||||
u32 rev, dummy;
|
||||
unsigned long p_addr = (unsigned long)&mc->hdr.data_code;
|
||||
|
||||
native_wrmsrl(MSR_AMD64_PATCH_LOADER, (u64)(long)&mc->hdr.data_code);
|
||||
if (!verify_sha256_digest(mc->hdr.patch_id, *cur_rev, (const u8 *)p_addr, psize))
|
||||
return false;
|
||||
|
||||
/* verify patch application was successful */
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
if (rev != mc->hdr.patch_id)
|
||||
return -1;
|
||||
native_wrmsrl(MSR_AMD64_PATCH_LOADER, p_addr);
|
||||
|
||||
return 0;
|
||||
}
|
||||
if (x86_family(bsp_cpuid_1_eax) == 0x17) {
|
||||
unsigned long p_addr_end = p_addr + psize - 1;
|
||||
|
||||
/*
|
||||
* Early load occurs before we can vmalloc(). So we look for the microcode
|
||||
* patch container file in initrd, traverse equivalent cpu table, look for a
|
||||
* matching microcode patch, and update, all in initrd memory in place.
|
||||
* When vmalloc() is available for use later -- on 64-bit during first AP load,
|
||||
* and on 32-bit during save_microcode_in_initrd_amd() -- we can call
|
||||
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
||||
* kernel heap memory.
|
||||
*
|
||||
* Returns true if container found (sets @desc), false otherwise.
|
||||
*/
|
||||
static bool early_apply_microcode(u32 cpuid_1_eax, u32 old_rev, void *ucode, size_t size)
|
||||
{
|
||||
struct cont_desc desc = { 0 };
|
||||
struct microcode_amd *mc;
|
||||
bool ret = false;
|
||||
|
||||
desc.cpuid_1_eax = cpuid_1_eax;
|
||||
|
||||
scan_containers(ucode, size, &desc);
|
||||
|
||||
mc = desc.mc;
|
||||
if (!mc)
|
||||
return ret;
|
||||
invlpg(p_addr);
|
||||
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
* Flush next page too if patch image is crossing a page
|
||||
* boundary.
|
||||
*/
|
||||
if (old_rev > mc->hdr.patch_id)
|
||||
return ret;
|
||||
if (p_addr >> PAGE_SHIFT != p_addr_end >> PAGE_SHIFT)
|
||||
invlpg(p_addr_end);
|
||||
}
|
||||
|
||||
return !__apply_microcode_amd(mc);
|
||||
/* verify patch application was successful */
|
||||
*cur_rev = get_patch_level();
|
||||
if (*cur_rev != mc->hdr.patch_id)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
|
||||
static bool get_builtin_microcode(struct cpio_data *cp)
|
||||
{
|
||||
char fw_name[36] = "amd-ucode/microcode_amd.bin";
|
||||
u8 family = x86_family(bsp_cpuid_1_eax);
|
||||
struct firmware fw;
|
||||
|
||||
if (IS_ENABLED(CONFIG_X86_32))
|
||||
@ -484,85 +647,144 @@ static bool get_builtin_microcode(struct cpio_data *cp, u8 family)
|
||||
return false;
|
||||
}
|
||||
|
||||
static void __init find_blobs_in_containers(unsigned int cpuid_1_eax, struct cpio_data *ret)
|
||||
static bool __init find_blobs_in_containers(struct cpio_data *ret)
|
||||
{
|
||||
struct cpio_data cp;
|
||||
bool found;
|
||||
|
||||
if (!get_builtin_microcode(&cp, x86_family(cpuid_1_eax)))
|
||||
if (!get_builtin_microcode(&cp))
|
||||
cp = find_microcode_in_initrd(ucode_path);
|
||||
|
||||
found = cp.data && cp.size;
|
||||
if (found)
|
||||
*ret = cp;
|
||||
|
||||
return found;
|
||||
}
|
||||
|
||||
/*
|
||||
* Early load occurs before we can vmalloc(). So we look for the microcode
|
||||
* patch container file in initrd, traverse equivalent cpu table, look for a
|
||||
* matching microcode patch, and update, all in initrd memory in place.
|
||||
* When vmalloc() is available for use later -- on 64-bit during first AP load,
|
||||
* and on 32-bit during save_microcode_in_initrd() -- we can call
|
||||
* load_microcode_amd() to save equivalent cpu table and microcode patches in
|
||||
* kernel heap memory.
|
||||
*/
|
||||
void __init load_ucode_amd_bsp(struct early_load_data *ed, unsigned int cpuid_1_eax)
|
||||
{
|
||||
struct cont_desc desc = { };
|
||||
struct microcode_amd *mc;
|
||||
struct cpio_data cp = { };
|
||||
u32 dummy;
|
||||
char buf[4];
|
||||
u32 rev;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->old_rev, dummy);
|
||||
if (cmdline_find_option(boot_command_line, "microcode.amd_sha_check", buf, 4)) {
|
||||
if (!strncmp(buf, "off", 3)) {
|
||||
sha_check = false;
|
||||
pr_warn_once("It is a very very bad idea to disable the blobs SHA check!\n");
|
||||
add_taint(TAINT_CPU_OUT_OF_SPEC, LOCKDEP_STILL_OK);
|
||||
}
|
||||
}
|
||||
|
||||
bsp_cpuid_1_eax = cpuid_1_eax;
|
||||
|
||||
rev = get_patch_level();
|
||||
ed->old_rev = rev;
|
||||
|
||||
/* Needed in load_microcode_amd() */
|
||||
ucode_cpu_info[0].cpu_sig.sig = cpuid_1_eax;
|
||||
|
||||
find_blobs_in_containers(cpuid_1_eax, &cp);
|
||||
if (!(cp.data && cp.size))
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return;
|
||||
|
||||
if (early_apply_microcode(cpuid_1_eax, ed->old_rev, cp.data, cp.size))
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, ed->new_rev, dummy);
|
||||
}
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size);
|
||||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax = native_cpuid_eax(1);
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (dis_ucode_ldr || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
find_blobs_in_containers(cpuid_1_eax, &cp);
|
||||
if (!(cp.data && cp.size))
|
||||
return -EINVAL;
|
||||
|
||||
desc.cpuid_1_eax = cpuid_1_eax;
|
||||
|
||||
scan_containers(cp.data, cp.size, &desc);
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
mc = desc.mc;
|
||||
if (!mc)
|
||||
return;
|
||||
|
||||
return 0;
|
||||
/*
|
||||
* Allow application of the same revision to pick up SMT-specific
|
||||
* changes even if the revision of the other SMT thread is already
|
||||
* up-to-date.
|
||||
*/
|
||||
if (ed->old_rev > mc->hdr.patch_id)
|
||||
return;
|
||||
|
||||
if (__apply_microcode_amd(mc, &rev, desc.psize))
|
||||
ed->new_rev = rev;
|
||||
}
|
||||
|
||||
static inline bool patch_cpus_equivalent(struct ucode_patch *p,
|
||||
struct ucode_patch *n,
|
||||
bool ignore_stepping)
|
||||
{
|
||||
/* Zen and newer hardcode the f/m/s in the patch ID */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
||||
union cpuid_1_eax p_cid = ucode_rev_to_cpuid(p->patch_id);
|
||||
union cpuid_1_eax n_cid = ucode_rev_to_cpuid(n->patch_id);
|
||||
|
||||
if (ignore_stepping) {
|
||||
p_cid.stepping = 0;
|
||||
n_cid.stepping = 0;
|
||||
}
|
||||
|
||||
return p_cid.full == n_cid.full;
|
||||
} else {
|
||||
return p->equiv_cpu == n->equiv_cpu;
|
||||
}
|
||||
}
|
||||
early_initcall(save_microcode_in_initrd);
|
||||
|
||||
/*
|
||||
* a small, trivial cache of per-family ucode patches
|
||||
*/
|
||||
static struct ucode_patch *cache_find_patch(u16 equiv_cpu)
|
||||
static struct ucode_patch *cache_find_patch(struct ucode_cpu_info *uci, u16 equiv_cpu)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
struct ucode_patch n;
|
||||
|
||||
n.equiv_cpu = equiv_cpu;
|
||||
n.patch_id = uci->cpu_sig.rev;
|
||||
|
||||
WARN_ON_ONCE(!n.patch_id);
|
||||
|
||||
list_for_each_entry(p, µcode_cache, plist)
|
||||
if (p->equiv_cpu == equiv_cpu)
|
||||
if (patch_cpus_equivalent(p, &n, false))
|
||||
return p;
|
||||
|
||||
return NULL;
|
||||
}
|
||||
|
||||
static inline int patch_newer(struct ucode_patch *p, struct ucode_patch *n)
|
||||
{
|
||||
/* Zen and newer hardcode the f/m/s in the patch ID */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17) {
|
||||
union zen_patch_rev zp, zn;
|
||||
|
||||
zp.ucode_rev = p->patch_id;
|
||||
zn.ucode_rev = n->patch_id;
|
||||
|
||||
if (zn.stepping != zp.stepping)
|
||||
return -1;
|
||||
|
||||
return zn.rev > zp.rev;
|
||||
} else {
|
||||
return n->patch_id > p->patch_id;
|
||||
}
|
||||
}
|
||||
|
||||
static void update_cache(struct ucode_patch *new_patch)
|
||||
{
|
||||
struct ucode_patch *p;
|
||||
int ret;
|
||||
|
||||
list_for_each_entry(p, µcode_cache, plist) {
|
||||
if (p->equiv_cpu == new_patch->equiv_cpu) {
|
||||
if (p->patch_id >= new_patch->patch_id) {
|
||||
if (patch_cpus_equivalent(p, new_patch, true)) {
|
||||
ret = patch_newer(p, new_patch);
|
||||
if (ret < 0)
|
||||
continue;
|
||||
else if (!ret) {
|
||||
/* we already have the latest patch */
|
||||
kfree(new_patch->data);
|
||||
kfree(new_patch);
|
||||
@ -593,13 +815,17 @@ static void free_cache(void)
|
||||
static struct ucode_patch *find_patch(unsigned int cpu)
|
||||
{
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
u16 equiv_id;
|
||||
u16 equiv_id = 0;
|
||||
|
||||
uci->cpu_sig.rev = get_patch_level();
|
||||
|
||||
if (x86_family(bsp_cpuid_1_eax) < 0x17) {
|
||||
equiv_id = find_equiv_id(&equiv_table, uci->cpu_sig.sig);
|
||||
if (!equiv_id)
|
||||
return NULL;
|
||||
}
|
||||
|
||||
return cache_find_patch(equiv_id);
|
||||
return cache_find_patch(uci, equiv_id);
|
||||
}
|
||||
|
||||
void reload_ucode_amd(unsigned int cpu)
|
||||
@ -614,22 +840,20 @@ void reload_ucode_amd(unsigned int cpu)
|
||||
|
||||
mc = p->data;
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
rev = get_patch_level();
|
||||
if (rev < mc->hdr.patch_id) {
|
||||
if (!__apply_microcode_amd(mc))
|
||||
pr_info_once("reload revision: 0x%08x\n", mc->hdr.patch_id);
|
||||
if (__apply_microcode_amd(mc, &rev, p->size))
|
||||
pr_info_once("reload revision: 0x%08x\n", rev);
|
||||
}
|
||||
}
|
||||
|
||||
static int collect_cpu_info_amd(int cpu, struct cpu_signature *csig)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &cpu_data(cpu);
|
||||
struct ucode_cpu_info *uci = ucode_cpu_info + cpu;
|
||||
struct ucode_patch *p;
|
||||
|
||||
csig->sig = cpuid_eax(0x00000001);
|
||||
csig->rev = c->microcode;
|
||||
csig->rev = get_patch_level();
|
||||
|
||||
/*
|
||||
* a patch could have been loaded early, set uci->mc so that
|
||||
@ -649,7 +873,7 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
||||
struct ucode_cpu_info *uci;
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
u32 rev, dummy __always_unused;
|
||||
u32 rev;
|
||||
|
||||
BUG_ON(raw_smp_processor_id() != cpu);
|
||||
|
||||
@ -659,18 +883,18 @@ static enum ucode_state apply_microcode_amd(int cpu)
|
||||
if (!p)
|
||||
return UCODE_NFOUND;
|
||||
|
||||
rev = uci->cpu_sig.rev;
|
||||
|
||||
mc_amd = p->data;
|
||||
uci->mc = p->data;
|
||||
|
||||
rdmsr(MSR_AMD64_PATCH_LEVEL, rev, dummy);
|
||||
|
||||
/* need to apply patch? */
|
||||
if (rev > mc_amd->hdr.patch_id) {
|
||||
ret = UCODE_OK;
|
||||
goto out;
|
||||
}
|
||||
|
||||
if (__apply_microcode_amd(mc_amd)) {
|
||||
if (!__apply_microcode_amd(mc_amd, &rev, p->size)) {
|
||||
pr_err("CPU%d: update failed for patch_level=0x%08x\n",
|
||||
cpu, mc_amd->hdr.patch_id);
|
||||
return UCODE_ERROR;
|
||||
@ -709,6 +933,10 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
|
||||
hdr = (const u32 *)buf;
|
||||
equiv_tbl_len = hdr[2];
|
||||
|
||||
/* Zen and newer do not need an equivalence table. */
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
||||
goto out;
|
||||
|
||||
equiv_table.entry = vmalloc(equiv_tbl_len);
|
||||
if (!equiv_table.entry) {
|
||||
pr_err("failed to allocate equivalent CPU table\n");
|
||||
@ -718,12 +946,16 @@ static size_t install_equiv_cpu_table(const u8 *buf, size_t buf_size)
|
||||
memcpy(equiv_table.entry, buf + CONTAINER_HDR_SZ, equiv_tbl_len);
|
||||
equiv_table.num_entries = equiv_tbl_len / sizeof(struct equiv_cpu_entry);
|
||||
|
||||
out:
|
||||
/* add header length */
|
||||
return equiv_tbl_len + CONTAINER_HDR_SZ;
|
||||
}
|
||||
|
||||
static void free_equiv_cpu_table(void)
|
||||
{
|
||||
if (x86_family(bsp_cpuid_1_eax) >= 0x17)
|
||||
return;
|
||||
|
||||
vfree(equiv_table.entry);
|
||||
memset(&equiv_table, 0, sizeof(equiv_table));
|
||||
}
|
||||
@ -749,7 +981,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
u16 proc_id;
|
||||
int ret;
|
||||
|
||||
ret = verify_patch(family, fw, leftover, patch_size);
|
||||
ret = verify_patch(fw, leftover, patch_size);
|
||||
if (ret)
|
||||
return ret;
|
||||
|
||||
@ -774,7 +1006,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
patch->patch_id = mc_hdr->patch_id;
|
||||
patch->equiv_cpu = proc_id;
|
||||
|
||||
pr_debug("%s: Added patch_id: 0x%08x, proc_id: 0x%04x\n",
|
||||
pr_debug("%s: Adding patch_id: 0x%08x, proc_id: 0x%04x\n",
|
||||
__func__, patch->patch_id, proc_id);
|
||||
|
||||
/* ... and add to cache. */
|
||||
@ -784,8 +1016,7 @@ static int verify_and_add_patch(u8 family, u8 *fw, unsigned int leftover,
|
||||
}
|
||||
|
||||
/* Scan the blob in @data and add microcode patches to the cache. */
|
||||
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
size_t size)
|
||||
static enum ucode_state __load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
u8 *fw = (u8 *)data;
|
||||
size_t offset;
|
||||
@ -818,6 +1049,20 @@ static enum ucode_state __load_microcode_amd(u8 family, const u8 *data,
|
||||
return UCODE_OK;
|
||||
}
|
||||
|
||||
static enum ucode_state _load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK)
|
||||
cleanup();
|
||||
|
||||
return ret;
|
||||
}
|
||||
|
||||
static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t size)
|
||||
{
|
||||
struct cpuinfo_x86 *c;
|
||||
@ -825,14 +1070,9 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
struct ucode_patch *p;
|
||||
enum ucode_state ret;
|
||||
|
||||
/* free old equiv table */
|
||||
free_equiv_cpu_table();
|
||||
|
||||
ret = __load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK) {
|
||||
cleanup();
|
||||
ret = _load_microcode_amd(family, data, size);
|
||||
if (ret != UCODE_OK)
|
||||
return ret;
|
||||
}
|
||||
|
||||
for_each_node_with_cpus(nid) {
|
||||
cpu = cpumask_first(cpumask_of_node(nid));
|
||||
@ -851,6 +1091,34 @@ static enum ucode_state load_microcode_amd(u8 family, const u8 *data, size_t siz
|
||||
return ret;
|
||||
}
|
||||
|
||||
static int __init save_microcode_in_initrd(void)
|
||||
{
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
struct cont_desc desc = { 0 };
|
||||
unsigned int cpuid_1_eax;
|
||||
enum ucode_state ret;
|
||||
struct cpio_data cp;
|
||||
|
||||
if (microcode_loader_disabled() || c->x86_vendor != X86_VENDOR_AMD || c->x86 < 0x10)
|
||||
return 0;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
|
||||
if (!find_blobs_in_containers(&cp))
|
||||
return -EINVAL;
|
||||
|
||||
scan_containers(cp.data, cp.size, &desc);
|
||||
if (!desc.mc)
|
||||
return -EINVAL;
|
||||
|
||||
ret = _load_microcode_amd(x86_family(cpuid_1_eax), desc.data, desc.size);
|
||||
if (ret > UCODE_UPDATED)
|
||||
return -EINVAL;
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_initcall(save_microcode_in_initrd);
|
||||
|
||||
/*
|
||||
* AMD microcode firmware naming convention, up to family 15h they are in
|
||||
* the legacy file:
|
||||
|
||||
556
arch/x86/kernel/cpu/microcode/amd_shas.c
Normal file
556
arch/x86/kernel/cpu/microcode/amd_shas.c
Normal file
@ -0,0 +1,556 @@
|
||||
/* Keep 'em sorted. */
|
||||
static const struct patch_digest phashes[] = {
|
||||
{ 0x8001227, {
|
||||
0x99,0xc0,0x9b,0x2b,0xcc,0x9f,0x52,0x1b,
|
||||
0x1a,0x5f,0x1d,0x83,0xa1,0x6c,0xc4,0x46,
|
||||
0xe2,0x6c,0xda,0x73,0xfb,0x2d,0x23,0xa8,
|
||||
0x77,0xdc,0x15,0x31,0x33,0x4a,0x46,0x18,
|
||||
}
|
||||
},
|
||||
{ 0x8001250, {
|
||||
0xc0,0x0b,0x6b,0x19,0xfd,0x5c,0x39,0x60,
|
||||
0xd5,0xc3,0x57,0x46,0x54,0xe4,0xd1,0xaa,
|
||||
0xa8,0xf7,0x1f,0xa8,0x6a,0x60,0x3e,0xe3,
|
||||
0x27,0x39,0x8e,0x53,0x30,0xf8,0x49,0x19,
|
||||
}
|
||||
},
|
||||
{ 0x800126e, {
|
||||
0xf3,0x8b,0x2b,0xb6,0x34,0xe3,0xc8,0x2c,
|
||||
0xef,0xec,0x63,0x6d,0xc8,0x76,0x77,0xb3,
|
||||
0x25,0x5a,0xb7,0x52,0x8c,0x83,0x26,0xe6,
|
||||
0x4c,0xbe,0xbf,0xe9,0x7d,0x22,0x6a,0x43,
|
||||
}
|
||||
},
|
||||
{ 0x800126f, {
|
||||
0x2b,0x5a,0xf2,0x9c,0xdd,0xd2,0x7f,0xec,
|
||||
0xec,0x96,0x09,0x57,0xb0,0x96,0x29,0x8b,
|
||||
0x2e,0x26,0x91,0xf0,0x49,0x33,0x42,0x18,
|
||||
0xdd,0x4b,0x65,0x5a,0xd4,0x15,0x3d,0x33,
|
||||
}
|
||||
},
|
||||
{ 0x800820d, {
|
||||
0x68,0x98,0x83,0xcd,0x22,0x0d,0xdd,0x59,
|
||||
0x73,0x2c,0x5b,0x37,0x1f,0x84,0x0e,0x67,
|
||||
0x96,0x43,0x83,0x0c,0x46,0x44,0xab,0x7c,
|
||||
0x7b,0x65,0x9e,0x57,0xb5,0x90,0x4b,0x0e,
|
||||
}
|
||||
},
|
||||
{ 0x8301025, {
|
||||
0xe4,0x7d,0xdb,0x1e,0x14,0xb4,0x5e,0x36,
|
||||
0x8f,0x3e,0x48,0x88,0x3c,0x6d,0x76,0xa1,
|
||||
0x59,0xc6,0xc0,0x72,0x42,0xdf,0x6c,0x30,
|
||||
0x6f,0x0b,0x28,0x16,0x61,0xfc,0x79,0x77,
|
||||
}
|
||||
},
|
||||
{ 0x8301055, {
|
||||
0x81,0x7b,0x99,0x1b,0xae,0x2d,0x4f,0x9a,
|
||||
0xef,0x13,0xce,0xb5,0x10,0xaf,0x6a,0xea,
|
||||
0xe5,0xb0,0x64,0x98,0x10,0x68,0x34,0x3b,
|
||||
0x9d,0x7a,0xd6,0x22,0x77,0x5f,0xb3,0x5b,
|
||||
}
|
||||
},
|
||||
{ 0x8301072, {
|
||||
0xcf,0x76,0xa7,0x1a,0x49,0xdf,0x2a,0x5e,
|
||||
0x9e,0x40,0x70,0xe5,0xdd,0x8a,0xa8,0x28,
|
||||
0x20,0xdc,0x91,0xd8,0x2c,0xa6,0xa0,0xb1,
|
||||
0x2d,0x22,0x26,0x94,0x4b,0x40,0x85,0x30,
|
||||
}
|
||||
},
|
||||
{ 0x830107a, {
|
||||
0x2a,0x65,0x8c,0x1a,0x5e,0x07,0x21,0x72,
|
||||
0xdf,0x90,0xa6,0x51,0x37,0xd3,0x4b,0x34,
|
||||
0xc4,0xda,0x03,0xe1,0x8a,0x6c,0xfb,0x20,
|
||||
0x04,0xb2,0x81,0x05,0xd4,0x87,0xf4,0x0a,
|
||||
}
|
||||
},
|
||||
{ 0x830107b, {
|
||||
0xb3,0x43,0x13,0x63,0x56,0xc1,0x39,0xad,
|
||||
0x10,0xa6,0x2b,0xcc,0x02,0xe6,0x76,0x2a,
|
||||
0x1e,0x39,0x58,0x3e,0x23,0x6e,0xa4,0x04,
|
||||
0x95,0xea,0xf9,0x6d,0xc2,0x8a,0x13,0x19,
|
||||
}
|
||||
},
|
||||
{ 0x830107c, {
|
||||
0x21,0x64,0xde,0xfb,0x9f,0x68,0x96,0x47,
|
||||
0x70,0x5c,0xe2,0x8f,0x18,0x52,0x6a,0xac,
|
||||
0xa4,0xd2,0x2e,0xe0,0xde,0x68,0x66,0xc3,
|
||||
0xeb,0x1e,0xd3,0x3f,0xbc,0x51,0x1d,0x38,
|
||||
}
|
||||
},
|
||||
{ 0x860010d, {
|
||||
0x86,0xb6,0x15,0x83,0xbc,0x3b,0x9c,0xe0,
|
||||
0xb3,0xef,0x1d,0x99,0x84,0x35,0x15,0xf7,
|
||||
0x7c,0x2a,0xc6,0x42,0xdb,0x73,0x07,0x5c,
|
||||
0x7d,0xc3,0x02,0xb5,0x43,0x06,0x5e,0xf8,
|
||||
}
|
||||
},
|
||||
{ 0x8608108, {
|
||||
0x14,0xfe,0x57,0x86,0x49,0xc8,0x68,0xe2,
|
||||
0x11,0xa3,0xcb,0x6e,0xff,0x6e,0xd5,0x38,
|
||||
0xfe,0x89,0x1a,0xe0,0x67,0xbf,0xc4,0xcc,
|
||||
0x1b,0x9f,0x84,0x77,0x2b,0x9f,0xaa,0xbd,
|
||||
}
|
||||
},
|
||||
{ 0x8701034, {
|
||||
0xc3,0x14,0x09,0xa8,0x9c,0x3f,0x8d,0x83,
|
||||
0x9b,0x4c,0xa5,0xb7,0x64,0x8b,0x91,0x5d,
|
||||
0x85,0x6a,0x39,0x26,0x1e,0x14,0x41,0xa8,
|
||||
0x75,0xea,0xa6,0xf9,0xc9,0xd1,0xea,0x2b,
|
||||
}
|
||||
},
|
||||
{ 0x8a00008, {
|
||||
0xd7,0x2a,0x93,0xdc,0x05,0x2f,0xa5,0x6e,
|
||||
0x0c,0x61,0x2c,0x07,0x9f,0x38,0xe9,0x8e,
|
||||
0xef,0x7d,0x2a,0x05,0x4d,0x56,0xaf,0x72,
|
||||
0xe7,0x56,0x47,0x6e,0x60,0x27,0xd5,0x8c,
|
||||
}
|
||||
},
|
||||
{ 0x8a0000a, {
|
||||
0x73,0x31,0x26,0x22,0xd4,0xf9,0xee,0x3c,
|
||||
0x07,0x06,0xe7,0xb9,0xad,0xd8,0x72,0x44,
|
||||
0x33,0x31,0xaa,0x7d,0xc3,0x67,0x0e,0xdb,
|
||||
0x47,0xb5,0xaa,0xbc,0xf5,0xbb,0xd9,0x20,
|
||||
}
|
||||
},
|
||||
{ 0xa00104c, {
|
||||
0x3c,0x8a,0xfe,0x04,0x62,0xd8,0x6d,0xbe,
|
||||
0xa7,0x14,0x28,0x64,0x75,0xc0,0xa3,0x76,
|
||||
0xb7,0x92,0x0b,0x97,0x0a,0x8e,0x9c,0x5b,
|
||||
0x1b,0xc8,0x9d,0x3a,0x1e,0x81,0x3d,0x3b,
|
||||
}
|
||||
},
|
||||
{ 0xa00104e, {
|
||||
0xc4,0x35,0x82,0x67,0xd2,0x86,0xe5,0xb2,
|
||||
0xfd,0x69,0x12,0x38,0xc8,0x77,0xba,0xe0,
|
||||
0x70,0xf9,0x77,0x89,0x10,0xa6,0x74,0x4e,
|
||||
0x56,0x58,0x13,0xf5,0x84,0x70,0x28,0x0b,
|
||||
}
|
||||
},
|
||||
{ 0xa001053, {
|
||||
0x92,0x0e,0xf4,0x69,0x10,0x3b,0xf9,0x9d,
|
||||
0x31,0x1b,0xa6,0x99,0x08,0x7d,0xd7,0x25,
|
||||
0x7e,0x1e,0x89,0xba,0x35,0x8d,0xac,0xcb,
|
||||
0x3a,0xb4,0xdf,0x58,0x12,0xcf,0xc0,0xc3,
|
||||
}
|
||||
},
|
||||
{ 0xa001058, {
|
||||
0x33,0x7d,0xa9,0xb5,0x4e,0x62,0x13,0x36,
|
||||
0xef,0x66,0xc9,0xbd,0x0a,0xa6,0x3b,0x19,
|
||||
0xcb,0xf5,0xc2,0xc3,0x55,0x47,0x20,0xec,
|
||||
0x1f,0x7b,0xa1,0x44,0x0e,0x8e,0xa4,0xb2,
|
||||
}
|
||||
},
|
||||
{ 0xa001075, {
|
||||
0x39,0x02,0x82,0xd0,0x7c,0x26,0x43,0xe9,
|
||||
0x26,0xa3,0xd9,0x96,0xf7,0x30,0x13,0x0a,
|
||||
0x8a,0x0e,0xac,0xe7,0x1d,0xdc,0xe2,0x0f,
|
||||
0xcb,0x9e,0x8d,0xbc,0xd2,0xa2,0x44,0xe0,
|
||||
}
|
||||
},
|
||||
{ 0xa001078, {
|
||||
0x2d,0x67,0xc7,0x35,0xca,0xef,0x2f,0x25,
|
||||
0x4c,0x45,0x93,0x3f,0x36,0x01,0x8c,0xce,
|
||||
0xa8,0x5b,0x07,0xd3,0xc1,0x35,0x3c,0x04,
|
||||
0x20,0xa2,0xfc,0xdc,0xe6,0xce,0x26,0x3e,
|
||||
}
|
||||
},
|
||||
{ 0xa001079, {
|
||||
0x43,0xe2,0x05,0x9c,0xfd,0xb7,0x5b,0xeb,
|
||||
0x5b,0xe9,0xeb,0x3b,0x96,0xf4,0xe4,0x93,
|
||||
0x73,0x45,0x3e,0xac,0x8d,0x3b,0xe4,0xdb,
|
||||
0x10,0x31,0xc1,0xe4,0xa2,0xd0,0x5a,0x8a,
|
||||
}
|
||||
},
|
||||
{ 0xa00107a, {
|
||||
0x5f,0x92,0xca,0xff,0xc3,0x59,0x22,0x5f,
|
||||
0x02,0xa0,0x91,0x3b,0x4a,0x45,0x10,0xfd,
|
||||
0x19,0xe1,0x8a,0x6d,0x9a,0x92,0xc1,0x3f,
|
||||
0x75,0x78,0xac,0x78,0x03,0x1d,0xdb,0x18,
|
||||
}
|
||||
},
|
||||
{ 0xa001143, {
|
||||
0x56,0xca,0xf7,0x43,0x8a,0x4c,0x46,0x80,
|
||||
0xec,0xde,0xe5,0x9c,0x50,0x84,0x9a,0x42,
|
||||
0x27,0xe5,0x51,0x84,0x8f,0x19,0xc0,0x8d,
|
||||
0x0c,0x25,0xb4,0xb0,0x8f,0x10,0xf3,0xf8,
|
||||
}
|
||||
},
|
||||
{ 0xa001144, {
|
||||
0x42,0xd5,0x9b,0xa7,0xd6,0x15,0x29,0x41,
|
||||
0x61,0xc4,0x72,0x3f,0xf3,0x06,0x78,0x4b,
|
||||
0x65,0xf3,0x0e,0xfa,0x9c,0x87,0xde,0x25,
|
||||
0xbd,0xb3,0x9a,0xf4,0x75,0x13,0x53,0xdc,
|
||||
}
|
||||
},
|
||||
{ 0xa00115d, {
|
||||
0xd4,0xc4,0x49,0x36,0x89,0x0b,0x47,0xdd,
|
||||
0xfb,0x2f,0x88,0x3b,0x5f,0xf2,0x8e,0x75,
|
||||
0xc6,0x6c,0x37,0x5a,0x90,0x25,0x94,0x3e,
|
||||
0x36,0x9c,0xae,0x02,0x38,0x6c,0xf5,0x05,
|
||||
}
|
||||
},
|
||||
{ 0xa001173, {
|
||||
0x28,0xbb,0x9b,0xd1,0xa0,0xa0,0x7e,0x3a,
|
||||
0x59,0x20,0xc0,0xa9,0xb2,0x5c,0xc3,0x35,
|
||||
0x53,0x89,0xe1,0x4c,0x93,0x2f,0x1d,0xc3,
|
||||
0xe5,0xf7,0xf3,0xc8,0x9b,0x61,0xaa,0x9e,
|
||||
}
|
||||
},
|
||||
{ 0xa0011a8, {
|
||||
0x97,0xc6,0x16,0x65,0x99,0xa4,0x85,0x3b,
|
||||
0xf6,0xce,0xaa,0x49,0x4a,0x3a,0xc5,0xb6,
|
||||
0x78,0x25,0xbc,0x53,0xaf,0x5d,0xcf,0xf4,
|
||||
0x23,0x12,0xbb,0xb1,0xbc,0x8a,0x02,0x2e,
|
||||
}
|
||||
},
|
||||
{ 0xa0011ce, {
|
||||
0xcf,0x1c,0x90,0xa3,0x85,0x0a,0xbf,0x71,
|
||||
0x94,0x0e,0x80,0x86,0x85,0x4f,0xd7,0x86,
|
||||
0xae,0x38,0x23,0x28,0x2b,0x35,0x9b,0x4e,
|
||||
0xfe,0xb8,0xcd,0x3d,0x3d,0x39,0xc9,0x6a,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d1, {
|
||||
0xdf,0x0e,0xca,0xde,0xf6,0xce,0x5c,0x1e,
|
||||
0x4c,0xec,0xd7,0x71,0x83,0xcc,0xa8,0x09,
|
||||
0xc7,0xc5,0xfe,0xb2,0xf7,0x05,0xd2,0xc5,
|
||||
0x12,0xdd,0xe4,0xf3,0x92,0x1c,0x3d,0xb8,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d3, {
|
||||
0x91,0xe6,0x10,0xd7,0x57,0xb0,0x95,0x0b,
|
||||
0x9a,0x24,0xee,0xf7,0xcf,0x56,0xc1,0xa6,
|
||||
0x4a,0x52,0x7d,0x5f,0x9f,0xdf,0xf6,0x00,
|
||||
0x65,0xf7,0xea,0xe8,0x2a,0x88,0xe2,0x26,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d5, {
|
||||
0xed,0x69,0x89,0xf4,0xeb,0x64,0xc2,0x13,
|
||||
0xe0,0x51,0x1f,0x03,0x26,0x52,0x7d,0xb7,
|
||||
0x93,0x5d,0x65,0xca,0xb8,0x12,0x1d,0x62,
|
||||
0x0d,0x5b,0x65,0x34,0x69,0xb2,0x62,0x21,
|
||||
}
|
||||
},
|
||||
{ 0xa0011d7, {
|
||||
0x35,0x07,0xcd,0x40,0x94,0xbc,0x81,0x6b,
|
||||
0xfc,0x61,0x56,0x1a,0xe2,0xdb,0x96,0x12,
|
||||
0x1c,0x1c,0x31,0xb1,0x02,0x6f,0xe5,0xd2,
|
||||
0xfe,0x1b,0x04,0x03,0x2c,0x8f,0x4c,0x36,
|
||||
}
|
||||
},
|
||||
{ 0xa001223, {
|
||||
0xfb,0x32,0x5f,0xc6,0x83,0x4f,0x8c,0xb8,
|
||||
0xa4,0x05,0xf9,0x71,0x53,0x01,0x16,0xc4,
|
||||
0x83,0x75,0x94,0xdd,0xeb,0x7e,0xb7,0x15,
|
||||
0x8e,0x3b,0x50,0x29,0x8a,0x9c,0xcc,0x45,
|
||||
}
|
||||
},
|
||||
{ 0xa001224, {
|
||||
0x0e,0x0c,0xdf,0xb4,0x89,0xee,0x35,0x25,
|
||||
0xdd,0x9e,0xdb,0xc0,0x69,0x83,0x0a,0xad,
|
||||
0x26,0xa9,0xaa,0x9d,0xfc,0x3c,0xea,0xf9,
|
||||
0x6c,0xdc,0xd5,0x6d,0x8b,0x6e,0x85,0x4a,
|
||||
}
|
||||
},
|
||||
{ 0xa001227, {
|
||||
0xab,0xc6,0x00,0x69,0x4b,0x50,0x87,0xad,
|
||||
0x5f,0x0e,0x8b,0xea,0x57,0x38,0xce,0x1d,
|
||||
0x0f,0x75,0x26,0x02,0xf6,0xd6,0x96,0xe9,
|
||||
0x87,0xb9,0xd6,0x20,0x27,0x7c,0xd2,0xe0,
|
||||
}
|
||||
},
|
||||
{ 0xa001229, {
|
||||
0x7f,0x49,0x49,0x48,0x46,0xa5,0x50,0xa6,
|
||||
0x28,0x89,0x98,0xe2,0x9e,0xb4,0x7f,0x75,
|
||||
0x33,0xa7,0x04,0x02,0xe4,0x82,0xbf,0xb4,
|
||||
0xa5,0x3a,0xba,0x24,0x8d,0x31,0x10,0x1d,
|
||||
}
|
||||
},
|
||||
{ 0xa00122e, {
|
||||
0x56,0x94,0xa9,0x5d,0x06,0x68,0xfe,0xaf,
|
||||
0xdf,0x7a,0xff,0x2d,0xdf,0x74,0x0f,0x15,
|
||||
0x66,0xfb,0x00,0xb5,0x51,0x97,0x9b,0xfa,
|
||||
0xcb,0x79,0x85,0x46,0x25,0xb4,0xd2,0x10,
|
||||
}
|
||||
},
|
||||
{ 0xa001231, {
|
||||
0x0b,0x46,0xa5,0xfc,0x18,0x15,0xa0,0x9e,
|
||||
0xa6,0xdc,0xb7,0xff,0x17,0xf7,0x30,0x64,
|
||||
0xd4,0xda,0x9e,0x1b,0xc3,0xfc,0x02,0x3b,
|
||||
0xe2,0xc6,0x0e,0x41,0x54,0xb5,0x18,0xdd,
|
||||
}
|
||||
},
|
||||
{ 0xa001234, {
|
||||
0x88,0x8d,0xed,0xab,0xb5,0xbd,0x4e,0xf7,
|
||||
0x7f,0xd4,0x0e,0x95,0x34,0x91,0xff,0xcc,
|
||||
0xfb,0x2a,0xcd,0xf7,0xd5,0xdb,0x4c,0x9b,
|
||||
0xd6,0x2e,0x73,0x50,0x8f,0x83,0x79,0x1a,
|
||||
}
|
||||
},
|
||||
{ 0xa001236, {
|
||||
0x3d,0x30,0x00,0xb9,0x71,0xba,0x87,0x78,
|
||||
0xa8,0x43,0x55,0xc4,0x26,0x59,0xcf,0x9d,
|
||||
0x93,0xce,0x64,0x0e,0x8b,0x72,0x11,0x8b,
|
||||
0xa3,0x8f,0x51,0xe9,0xca,0x98,0xaa,0x25,
|
||||
}
|
||||
},
|
||||
{ 0xa001238, {
|
||||
0x72,0xf7,0x4b,0x0c,0x7d,0x58,0x65,0xcc,
|
||||
0x00,0xcc,0x57,0x16,0x68,0x16,0xf8,0x2a,
|
||||
0x1b,0xb3,0x8b,0xe1,0xb6,0x83,0x8c,0x7e,
|
||||
0xc0,0xcd,0x33,0xf2,0x8d,0xf9,0xef,0x59,
|
||||
}
|
||||
},
|
||||
{ 0xa00123b, {
|
||||
0xef,0xa1,0x1e,0x71,0xf1,0xc3,0x2c,0xe2,
|
||||
0xc3,0xef,0x69,0x41,0x7a,0x54,0xca,0xc3,
|
||||
0x8f,0x62,0x84,0xee,0xc2,0x39,0xd9,0x28,
|
||||
0x95,0xa7,0x12,0x49,0x1e,0x30,0x71,0x72,
|
||||
}
|
||||
},
|
||||
{ 0xa00820c, {
|
||||
0xa8,0x0c,0x81,0xc0,0xa6,0x00,0xe7,0xf3,
|
||||
0x5f,0x65,0xd3,0xb9,0x6f,0xea,0x93,0x63,
|
||||
0xf1,0x8c,0x88,0x45,0xd7,0x82,0x80,0xd1,
|
||||
0xe1,0x3b,0x8d,0xb2,0xf8,0x22,0x03,0xe2,
|
||||
}
|
||||
},
|
||||
{ 0xa00820d, {
|
||||
0xf9,0x2a,0xc0,0xf4,0x9e,0xa4,0x87,0xa4,
|
||||
0x7d,0x87,0x00,0xfd,0xab,0xda,0x19,0xca,
|
||||
0x26,0x51,0x32,0xc1,0x57,0x91,0xdf,0xc1,
|
||||
0x05,0xeb,0x01,0x7c,0x5a,0x95,0x21,0xb7,
|
||||
}
|
||||
},
|
||||
{ 0xa10113e, {
|
||||
0x05,0x3c,0x66,0xd7,0xa9,0x5a,0x33,0x10,
|
||||
0x1b,0xf8,0x9c,0x8f,0xed,0xfc,0xa7,0xa0,
|
||||
0x15,0xe3,0x3f,0x4b,0x1d,0x0d,0x0a,0xd5,
|
||||
0xfa,0x90,0xc4,0xed,0x9d,0x90,0xaf,0x53,
|
||||
}
|
||||
},
|
||||
{ 0xa101144, {
|
||||
0xb3,0x0b,0x26,0x9a,0xf8,0x7c,0x02,0x26,
|
||||
0x35,0x84,0x53,0xa4,0xd3,0x2c,0x7c,0x09,
|
||||
0x68,0x7b,0x96,0xb6,0x93,0xef,0xde,0xbc,
|
||||
0xfd,0x4b,0x15,0xd2,0x81,0xd3,0x51,0x47,
|
||||
}
|
||||
},
|
||||
{ 0xa101148, {
|
||||
0x20,0xd5,0x6f,0x40,0x4a,0xf6,0x48,0x90,
|
||||
0xc2,0x93,0x9a,0xc2,0xfd,0xac,0xef,0x4f,
|
||||
0xfa,0xc0,0x3d,0x92,0x3c,0x6d,0x01,0x08,
|
||||
0xf1,0x5e,0xb0,0xde,0xb4,0x98,0xae,0xc4,
|
||||
}
|
||||
},
|
||||
{ 0xa10114c, {
|
||||
0x9e,0xb6,0xa2,0xd9,0x87,0x38,0xc5,0x64,
|
||||
0xd8,0x88,0xfa,0x78,0x98,0xf9,0x6f,0x74,
|
||||
0x39,0x90,0x1b,0xa5,0xcf,0x5e,0xb4,0x2a,
|
||||
0x02,0xff,0xd4,0x8c,0x71,0x8b,0xe2,0xc0,
|
||||
}
|
||||
},
|
||||
{ 0xa10123e, {
|
||||
0x03,0xb9,0x2c,0x76,0x48,0x93,0xc9,0x18,
|
||||
0xfb,0x56,0xfd,0xf7,0xe2,0x1d,0xca,0x4d,
|
||||
0x1d,0x13,0x53,0x63,0xfe,0x42,0x6f,0xfc,
|
||||
0x19,0x0f,0xf1,0xfc,0xa7,0xdd,0x89,0x1b,
|
||||
}
|
||||
},
|
||||
{ 0xa101244, {
|
||||
0x71,0x56,0xb5,0x9f,0x21,0xbf,0xb3,0x3c,
|
||||
0x8c,0xd7,0x36,0xd0,0x34,0x52,0x1b,0xb1,
|
||||
0x46,0x2f,0x04,0xf0,0x37,0xd8,0x1e,0x72,
|
||||
0x24,0xa2,0x80,0x84,0x83,0x65,0x84,0xc0,
|
||||
}
|
||||
},
|
||||
{ 0xa101248, {
|
||||
0xed,0x3b,0x95,0xa6,0x68,0xa7,0x77,0x3e,
|
||||
0xfc,0x17,0x26,0xe2,0x7b,0xd5,0x56,0x22,
|
||||
0x2c,0x1d,0xef,0xeb,0x56,0xdd,0xba,0x6e,
|
||||
0x1b,0x7d,0x64,0x9d,0x4b,0x53,0x13,0x75,
|
||||
}
|
||||
},
|
||||
{ 0xa10124c, {
|
||||
0x29,0xea,0xf1,0x2c,0xb2,0xe4,0xef,0x90,
|
||||
0xa4,0xcd,0x1d,0x86,0x97,0x17,0x61,0x46,
|
||||
0xfc,0x22,0xcb,0x57,0x75,0x19,0xc8,0xcc,
|
||||
0x0c,0xf5,0xbc,0xac,0x81,0x9d,0x9a,0xd2,
|
||||
}
|
||||
},
|
||||
{ 0xa108108, {
|
||||
0xed,0xc2,0xec,0xa1,0x15,0xc6,0x65,0xe9,
|
||||
0xd0,0xef,0x39,0xaa,0x7f,0x55,0x06,0xc6,
|
||||
0xf5,0xd4,0x3f,0x7b,0x14,0xd5,0x60,0x2c,
|
||||
0x28,0x1e,0x9c,0x59,0x69,0x99,0x4d,0x16,
|
||||
}
|
||||
},
|
||||
{ 0xa108109, {
|
||||
0x85,0xb4,0xbd,0x7c,0x49,0xa7,0xbd,0xfa,
|
||||
0x49,0x36,0x80,0x81,0xc5,0xb7,0x39,0x1b,
|
||||
0x9a,0xaa,0x50,0xde,0x9b,0xe9,0x32,0x35,
|
||||
0x42,0x7e,0x51,0x4f,0x52,0x2c,0x28,0x59,
|
||||
}
|
||||
},
|
||||
{ 0xa20102d, {
|
||||
0xf9,0x6e,0xf2,0x32,0xd3,0x0f,0x5f,0x11,
|
||||
0x59,0xa1,0xfe,0xcc,0xcd,0x9b,0x42,0x89,
|
||||
0x8b,0x89,0x2f,0xb5,0xbb,0x82,0xef,0x23,
|
||||
0x8c,0xe9,0x19,0x3e,0xcc,0x3f,0x7b,0xb4,
|
||||
}
|
||||
},
|
||||
{ 0xa20102e, {
|
||||
0xbe,0x1f,0x32,0x04,0x0d,0x3c,0x9c,0xdd,
|
||||
0xe1,0xa4,0xbf,0x76,0x3a,0xec,0xc2,0xf6,
|
||||
0x11,0x00,0xa7,0xaf,0x0f,0xe5,0x02,0xc5,
|
||||
0x54,0x3a,0x1f,0x8c,0x16,0xb5,0xff,0xbe,
|
||||
}
|
||||
},
|
||||
{ 0xa201210, {
|
||||
0xe8,0x6d,0x51,0x6a,0x8e,0x72,0xf3,0xfe,
|
||||
0x6e,0x16,0xbc,0x62,0x59,0x40,0x17,0xe9,
|
||||
0x6d,0x3d,0x0e,0x6b,0xa7,0xac,0xe3,0x68,
|
||||
0xf7,0x55,0xf0,0x13,0xbb,0x22,0xf6,0x41,
|
||||
}
|
||||
},
|
||||
{ 0xa201211, {
|
||||
0x69,0xa1,0x17,0xec,0xd0,0xf6,0x6c,0x95,
|
||||
0xe2,0x1e,0xc5,0x59,0x1a,0x52,0x0a,0x27,
|
||||
0xc4,0xed,0xd5,0x59,0x1f,0xbf,0x00,0xff,
|
||||
0x08,0x88,0xb5,0xe1,0x12,0xb6,0xcc,0x27,
|
||||
}
|
||||
},
|
||||
{ 0xa404107, {
|
||||
0xbb,0x04,0x4e,0x47,0xdd,0x5e,0x26,0x45,
|
||||
0x1a,0xc9,0x56,0x24,0xa4,0x4c,0x82,0xb0,
|
||||
0x8b,0x0d,0x9f,0xf9,0x3a,0xdf,0xc6,0x81,
|
||||
0x13,0xbc,0xc5,0x25,0xe4,0xc5,0xc3,0x99,
|
||||
}
|
||||
},
|
||||
{ 0xa404108, {
|
||||
0x69,0x67,0x43,0x06,0xf8,0x0c,0x62,0xdc,
|
||||
0xa4,0x21,0x30,0x4f,0x0f,0x21,0x2c,0xcb,
|
||||
0xcc,0x37,0xf1,0x1c,0xc3,0xf8,0x2f,0x19,
|
||||
0xdf,0x53,0x53,0x46,0xb1,0x15,0xea,0x00,
|
||||
}
|
||||
},
|
||||
{ 0xa500011, {
|
||||
0x23,0x3d,0x70,0x7d,0x03,0xc3,0xc4,0xf4,
|
||||
0x2b,0x82,0xc6,0x05,0xda,0x80,0x0a,0xf1,
|
||||
0xd7,0x5b,0x65,0x3a,0x7d,0xab,0xdf,0xa2,
|
||||
0x11,0x5e,0x96,0x7e,0x71,0xe9,0xfc,0x74,
|
||||
}
|
||||
},
|
||||
{ 0xa500012, {
|
||||
0xeb,0x74,0x0d,0x47,0xa1,0x8e,0x09,0xe4,
|
||||
0x93,0x4c,0xad,0x03,0x32,0x4c,0x38,0x16,
|
||||
0x10,0x39,0xdd,0x06,0xaa,0xce,0xd6,0x0f,
|
||||
0x62,0x83,0x9d,0x8e,0x64,0x55,0xbe,0x63,
|
||||
}
|
||||
},
|
||||
{ 0xa601209, {
|
||||
0x66,0x48,0xd4,0x09,0x05,0xcb,0x29,0x32,
|
||||
0x66,0xb7,0x9a,0x76,0xcd,0x11,0xf3,0x30,
|
||||
0x15,0x86,0xcc,0x5d,0x97,0x0f,0xc0,0x46,
|
||||
0xe8,0x73,0xe2,0xd6,0xdb,0xd2,0x77,0x1d,
|
||||
}
|
||||
},
|
||||
{ 0xa60120a, {
|
||||
0x0c,0x8b,0x3d,0xfd,0x52,0x52,0x85,0x7d,
|
||||
0x20,0x3a,0xe1,0x7e,0xa4,0x21,0x3b,0x7b,
|
||||
0x17,0x86,0xae,0xac,0x13,0xb8,0x63,0x9d,
|
||||
0x06,0x01,0xd0,0xa0,0x51,0x9a,0x91,0x2c,
|
||||
}
|
||||
},
|
||||
{ 0xa704107, {
|
||||
0xf3,0xc6,0x58,0x26,0xee,0xac,0x3f,0xd6,
|
||||
0xce,0xa1,0x72,0x47,0x3b,0xba,0x2b,0x93,
|
||||
0x2a,0xad,0x8e,0x6b,0xea,0x9b,0xb7,0xc2,
|
||||
0x64,0x39,0x71,0x8c,0xce,0xe7,0x41,0x39,
|
||||
}
|
||||
},
|
||||
{ 0xa704108, {
|
||||
0xd7,0x55,0x15,0x2b,0xfe,0xc4,0xbc,0x93,
|
||||
0xec,0x91,0xa0,0xae,0x45,0xb7,0xc3,0x98,
|
||||
0x4e,0xff,0x61,0x77,0x88,0xc2,0x70,0x49,
|
||||
0xe0,0x3a,0x1d,0x84,0x38,0x52,0xbf,0x5a,
|
||||
}
|
||||
},
|
||||
{ 0xa705206, {
|
||||
0x8d,0xc0,0x76,0xbd,0x58,0x9f,0x8f,0xa4,
|
||||
0x12,0x9d,0x21,0xfb,0x48,0x21,0xbc,0xe7,
|
||||
0x67,0x6f,0x04,0x18,0xae,0x20,0x87,0x4b,
|
||||
0x03,0x35,0xe9,0xbe,0xfb,0x06,0xdf,0xfc,
|
||||
}
|
||||
},
|
||||
{ 0xa705208, {
|
||||
0x30,0x1d,0x55,0x24,0xbc,0x6b,0x5a,0x19,
|
||||
0x0c,0x7d,0x1d,0x74,0xaa,0xd1,0xeb,0xd2,
|
||||
0x16,0x62,0xf7,0x5b,0xe1,0x1f,0x18,0x11,
|
||||
0x5c,0xf0,0x94,0x90,0x26,0xec,0x69,0xff,
|
||||
}
|
||||
},
|
||||
{ 0xa708007, {
|
||||
0x6b,0x76,0xcc,0x78,0xc5,0x8a,0xa3,0xe3,
|
||||
0x32,0x2d,0x79,0xe4,0xc3,0x80,0xdb,0xb2,
|
||||
0x07,0xaa,0x3a,0xe0,0x57,0x13,0x72,0x80,
|
||||
0xdf,0x92,0x73,0x84,0x87,0x3c,0x73,0x93,
|
||||
}
|
||||
},
|
||||
{ 0xa708008, {
|
||||
0x08,0x6e,0xf0,0x22,0x4b,0x8e,0xc4,0x46,
|
||||
0x58,0x34,0xe6,0x47,0xa2,0x28,0xfd,0xab,
|
||||
0x22,0x3d,0xdd,0xd8,0x52,0x9e,0x1d,0x16,
|
||||
0xfa,0x01,0x68,0x14,0x79,0x3e,0xe8,0x6b,
|
||||
}
|
||||
},
|
||||
{ 0xa70c005, {
|
||||
0x88,0x5d,0xfb,0x79,0x64,0xd8,0x46,0x3b,
|
||||
0x4a,0x83,0x8e,0x77,0x7e,0xcf,0xb3,0x0f,
|
||||
0x1f,0x1f,0xf1,0x97,0xeb,0xfe,0x56,0x55,
|
||||
0xee,0x49,0xac,0xe1,0x8b,0x13,0xc5,0x13,
|
||||
}
|
||||
},
|
||||
{ 0xa70c008, {
|
||||
0x0f,0xdb,0x37,0xa1,0x10,0xaf,0xd4,0x21,
|
||||
0x94,0x0d,0xa4,0xa2,0xe9,0x86,0x6c,0x0e,
|
||||
0x85,0x7c,0x36,0x30,0xa3,0x3a,0x78,0x66,
|
||||
0x18,0x10,0x60,0x0d,0x78,0x3d,0x44,0xd0,
|
||||
}
|
||||
},
|
||||
{ 0xaa00116, {
|
||||
0xe8,0x4c,0x2c,0x88,0xa1,0xac,0x24,0x63,
|
||||
0x65,0xe5,0xaa,0x2d,0x16,0xa9,0xc3,0xf5,
|
||||
0xfe,0x1d,0x5e,0x65,0xc7,0xaa,0x92,0x4d,
|
||||
0x91,0xee,0x76,0xbb,0x4c,0x66,0x78,0xc9,
|
||||
}
|
||||
},
|
||||
{ 0xaa00212, {
|
||||
0xbd,0x57,0x5d,0x0a,0x0a,0x30,0xc1,0x75,
|
||||
0x95,0x58,0x5e,0x93,0x02,0x28,0x43,0x71,
|
||||
0xed,0x42,0x29,0xc8,0xec,0x34,0x2b,0xb2,
|
||||
0x1a,0x65,0x4b,0xfe,0x07,0x0f,0x34,0xa1,
|
||||
}
|
||||
},
|
||||
{ 0xaa00213, {
|
||||
0xed,0x58,0xb7,0x76,0x81,0x7f,0xd9,0x3a,
|
||||
0x1a,0xff,0x8b,0x34,0xb8,0x4a,0x99,0x0f,
|
||||
0x28,0x49,0x6c,0x56,0x2b,0xdc,0xb7,0xed,
|
||||
0x96,0xd5,0x9d,0xc1,0x7a,0xd4,0x51,0x9b,
|
||||
}
|
||||
},
|
||||
{ 0xaa00215, {
|
||||
0x55,0xd3,0x28,0xcb,0x87,0xa9,0x32,0xe9,
|
||||
0x4e,0x85,0x4b,0x7c,0x6b,0xd5,0x7c,0xd4,
|
||||
0x1b,0x51,0x71,0x3a,0x0e,0x0b,0xdc,0x9b,
|
||||
0x68,0x2f,0x46,0xee,0xfe,0xc6,0x6d,0xef,
|
||||
}
|
||||
},
|
||||
{ 0xaa00216, {
|
||||
0x79,0xfb,0x5b,0x9f,0xb6,0xe6,0xa8,0xf5,
|
||||
0x4e,0x7c,0x4f,0x8e,0x1d,0xad,0xd0,0x08,
|
||||
0xc2,0x43,0x7c,0x8b,0xe6,0xdb,0xd0,0xd2,
|
||||
0xe8,0x39,0x26,0xc1,0xe5,0x5a,0x48,0xf1,
|
||||
}
|
||||
},
|
||||
};
|
||||
@ -42,7 +42,7 @@
|
||||
#include "internal.h"
|
||||
|
||||
static struct microcode_ops *microcode_ops;
|
||||
bool dis_ucode_ldr = true;
|
||||
static bool dis_ucode_ldr = false;
|
||||
|
||||
bool force_minrev = IS_ENABLED(CONFIG_MICROCODE_LATE_FORCE_MINREV);
|
||||
module_param(force_minrev, bool, S_IRUSR | S_IWUSR);
|
||||
@ -84,6 +84,9 @@ static bool amd_check_current_patch_level(void)
|
||||
u32 lvl, dummy, i;
|
||||
u32 *levels;
|
||||
|
||||
if (x86_cpuid_vendor() != X86_VENDOR_AMD)
|
||||
return false;
|
||||
|
||||
native_rdmsr(MSR_AMD64_PATCH_LEVEL, lvl, dummy);
|
||||
|
||||
levels = final_levels;
|
||||
@ -95,27 +98,29 @@ static bool amd_check_current_patch_level(void)
|
||||
return false;
|
||||
}
|
||||
|
||||
static bool __init check_loader_disabled_bsp(void)
|
||||
bool __init microcode_loader_disabled(void)
|
||||
{
|
||||
static const char *__dis_opt_str = "dis_ucode_ldr";
|
||||
const char *cmdline = boot_command_line;
|
||||
const char *option = __dis_opt_str;
|
||||
if (dis_ucode_ldr)
|
||||
return true;
|
||||
|
||||
/*
|
||||
* CPUID(1).ECX[31]: reserved for hypervisor use. This is still not
|
||||
* completely accurate as xen pv guests don't see that CPUID bit set but
|
||||
* that's good enough as they don't land on the BSP path anyway.
|
||||
* Disable when:
|
||||
*
|
||||
* 1) The CPU does not support CPUID.
|
||||
*
|
||||
* 2) Bit 31 in CPUID[1]:ECX is clear
|
||||
* The bit is reserved for hypervisor use. This is still not
|
||||
* completely accurate as XEN PV guests don't see that CPUID bit
|
||||
* set, but that's good enough as they don't land on the BSP
|
||||
* path anyway.
|
||||
*
|
||||
* 3) Certain AMD patch levels are not allowed to be
|
||||
* overwritten.
|
||||
*/
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
return true;
|
||||
|
||||
if (x86_cpuid_vendor() == X86_VENDOR_AMD) {
|
||||
if (amd_check_current_patch_level())
|
||||
return true;
|
||||
}
|
||||
|
||||
if (cmdline_find_option_bool(cmdline, option) <= 0)
|
||||
dis_ucode_ldr = false;
|
||||
if (!have_cpuid_p() ||
|
||||
native_cpuid_ecx(1) & BIT(31) ||
|
||||
amd_check_current_patch_level())
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
return dis_ucode_ldr;
|
||||
}
|
||||
@ -125,7 +130,10 @@ void __init load_ucode_bsp(void)
|
||||
unsigned int cpuid_1_eax;
|
||||
bool intel = true;
|
||||
|
||||
if (!have_cpuid_p())
|
||||
if (cmdline_find_option_bool(boot_command_line, "dis_ucode_ldr") > 0)
|
||||
dis_ucode_ldr = true;
|
||||
|
||||
if (microcode_loader_disabled())
|
||||
return;
|
||||
|
||||
cpuid_1_eax = native_cpuid_eax(1);
|
||||
@ -146,9 +154,6 @@ void __init load_ucode_bsp(void)
|
||||
return;
|
||||
}
|
||||
|
||||
if (check_loader_disabled_bsp())
|
||||
return;
|
||||
|
||||
if (intel)
|
||||
load_ucode_intel_bsp(&early_data);
|
||||
else
|
||||
@ -159,6 +164,11 @@ void load_ucode_ap(void)
|
||||
{
|
||||
unsigned int cpuid_1_eax;
|
||||
|
||||
/*
|
||||
* Can't use microcode_loader_disabled() here - .init section
|
||||
* hell. It doesn't have to either - the BSP variant must've
|
||||
* parsed cmdline already anyway.
|
||||
*/
|
||||
if (dis_ucode_ldr)
|
||||
return;
|
||||
|
||||
@ -810,7 +820,7 @@ static int __init microcode_init(void)
|
||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||
int error;
|
||||
|
||||
if (dis_ucode_ldr)
|
||||
if (microcode_loader_disabled())
|
||||
return -EINVAL;
|
||||
|
||||
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||
|
||||
@ -395,7 +395,7 @@ static int __init save_builtin_microcode(void)
|
||||
if (xchg(&ucode_patch_va, NULL) != UCODE_BSP_LOADED)
|
||||
return 0;
|
||||
|
||||
if (dis_ucode_ldr || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
if (microcode_loader_disabled() || boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||
return 0;
|
||||
|
||||
uci.mc = get_microcode_blob(&uci, true);
|
||||
|
||||
@ -94,20 +94,17 @@ static inline unsigned int x86_cpuid_family(void)
|
||||
return x86_family(eax);
|
||||
}
|
||||
|
||||
extern bool dis_ucode_ldr;
|
||||
extern bool force_minrev;
|
||||
|
||||
#ifdef CONFIG_CPU_SUP_AMD
|
||||
void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family);
|
||||
void load_ucode_amd_ap(unsigned int family);
|
||||
int save_microcode_in_initrd_amd(unsigned int family);
|
||||
void reload_ucode_amd(unsigned int cpu);
|
||||
struct microcode_ops *init_amd_microcode(void);
|
||||
void exit_amd_microcode(void);
|
||||
#else /* CONFIG_CPU_SUP_AMD */
|
||||
static inline void load_ucode_amd_bsp(struct early_load_data *ed, unsigned int family) { }
|
||||
static inline void load_ucode_amd_ap(unsigned int family) { }
|
||||
static inline int save_microcode_in_initrd_amd(unsigned int family) { return -EINVAL; }
|
||||
static inline void reload_ucode_amd(unsigned int cpu) { }
|
||||
static inline struct microcode_ops *init_amd_microcode(void) { return NULL; }
|
||||
static inline void exit_amd_microcode(void) { }
|
||||
|
||||
@ -30,8 +30,7 @@ dump_array()
|
||||
|
||||
# If the /* comment */ starts with a quote string, grab that.
|
||||
VALUE="$(echo "$i" | sed -n 's@.*/\* *\("[^"]*"\).*\*/@\1@p')"
|
||||
[ -z "$VALUE" ] && VALUE="\"$NAME\""
|
||||
[ "$VALUE" = '""' ] && continue
|
||||
[ ! "$VALUE" ] && continue
|
||||
|
||||
# Name is uppercase, VALUE is all lowercase
|
||||
VALUE="$(echo "$VALUE" | tr A-Z a-z)"
|
||||
|
||||
@ -49,6 +49,9 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
|
||||
{ X86_FEATURE_TSA_L1_NO, CPUID_ECX, 2, 0x80000021, 0 },
|
||||
{ X86_FEATURE_AMD_WORKLOAD_CLASS, CPUID_EAX, 22, 0x80000021, 0 },
|
||||
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
|
||||
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
|
||||
#define EREMOVE_ERROR_MESSAGE \
|
||||
"EREMOVE returned %d (0x%x) and an EPC page was leaked. SGX may become unusable. " \
|
||||
"Refer to Documentation/x86/sgx.rst for more information."
|
||||
"Refer to Documentation/arch/x86/sgx.rst for more information."
|
||||
|
||||
#define SGX_MAX_EPC_SECTIONS 8
|
||||
#define SGX_EEXTEND_BLOCK_SIZE 256
|
||||
|
||||
@ -144,10 +144,6 @@ void __init __no_stack_protector mk_early_pgtbl_32(void)
|
||||
*ptr = (unsigned long)ptep + PAGE_OFFSET;
|
||||
|
||||
#ifdef CONFIG_MICROCODE_INITRD32
|
||||
/* Running on a hypervisor? */
|
||||
if (native_cpuid_ecx(1) & BIT(31))
|
||||
return;
|
||||
|
||||
params = (struct boot_params *)__pa_nodebug(&boot_params);
|
||||
if (!params->hdr.ramdisk_size || !params->hdr.ramdisk_image)
|
||||
return;
|
||||
|
||||
@ -109,7 +109,7 @@ void __init pci_iommu_alloc(void)
|
||||
}
|
||||
|
||||
/*
|
||||
* See <Documentation/x86/x86_64/boot-options.rst> for the iommu kernel
|
||||
* See <Documentation/arch/x86/x86_64/boot-options.rst> for the iommu kernel
|
||||
* parameter documentation.
|
||||
*/
|
||||
static __init int iommu_setup(char *p)
|
||||
|
||||
@ -897,19 +897,24 @@ static int prefer_mwait_c1_over_halt(const struct cpuinfo_x86 *c)
|
||||
*/
|
||||
static __cpuidle void mwait_idle(void)
|
||||
{
|
||||
if (!current_set_polling_and_test()) {
|
||||
if (this_cpu_has(X86_BUG_CLFLUSH_MONITOR)) {
|
||||
mb(); /* quirk */
|
||||
clflush((void *)¤t_thread_info()->flags);
|
||||
mb(); /* quirk */
|
||||
}
|
||||
if (need_resched())
|
||||
return;
|
||||
|
||||
x86_idle_clear_cpu_buffers();
|
||||
|
||||
if (!current_set_polling_and_test()) {
|
||||
const void *addr = ¤t_thread_info()->flags;
|
||||
|
||||
alternative_input("", "clflush (%[addr])", X86_BUG_CLFLUSH_MONITOR, [addr] "a" (addr));
|
||||
__monitor(addr, 0, 0);
|
||||
if (need_resched())
|
||||
goto out;
|
||||
|
||||
__monitor((void *)¤t_thread_info()->flags, 0, 0);
|
||||
if (!need_resched()) {
|
||||
__sti_mwait(0, 0);
|
||||
raw_local_irq_disable();
|
||||
}
|
||||
}
|
||||
|
||||
out:
|
||||
__current_clr_polling();
|
||||
}
|
||||
|
||||
|
||||
@ -814,13 +814,16 @@ void kvm_set_cpu_caps(void)
|
||||
|
||||
kvm_cpu_cap_mask(CPUID_8000_0021_EAX,
|
||||
F(NO_NESTED_DATA_BP) | F(LFENCE_RDTSC) | 0 /* SmmPgCfgLock */ |
|
||||
0 /* 4:Resv */ | F(VERW_CLEAR) |
|
||||
F(NULL_SEL_CLR_BASE) | F(AUTOIBRS) | 0 /* PrefetchCtlMsr */ |
|
||||
F(WRMSR_XX_BASE_NS)
|
||||
F(WRMSR_XX_BASE_NS) | F(SRSO_USER_KERNEL_NO)
|
||||
);
|
||||
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_SBPB);
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_IBPB_BRTYPE);
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_SRSO_NO);
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_SQ_NO);
|
||||
kvm_cpu_cap_check_and_set(X86_FEATURE_TSA_L1_NO);
|
||||
|
||||
kvm_cpu_cap_init_kvm_defined(CPUID_8000_0022_EAX,
|
||||
F(PERFMON_V2)
|
||||
@ -1378,8 +1381,9 @@ static inline int __do_cpuid_func(struct kvm_cpuid_array *array, u32 function)
|
||||
entry->eax = entry->ebx = entry->ecx = entry->edx = 0;
|
||||
break;
|
||||
case 0x80000021:
|
||||
entry->ebx = entry->ecx = entry->edx = 0;
|
||||
entry->ebx = entry->edx = 0;
|
||||
cpuid_entry_override(entry, CPUID_8000_0021_EAX);
|
||||
cpuid_entry_override(entry, CPUID_8000_0021_ECX);
|
||||
break;
|
||||
/* AMD Extended Performance Monitoring and Debug */
|
||||
case 0x80000022: {
|
||||
|
||||
@ -18,6 +18,7 @@ enum kvm_only_cpuid_leafs {
|
||||
CPUID_8000_0022_EAX,
|
||||
CPUID_7_2_EDX,
|
||||
CPUID_24_0_EBX,
|
||||
CPUID_8000_0021_ECX,
|
||||
NR_KVM_CPU_CAPS,
|
||||
|
||||
NKVMCAPINTS = NR_KVM_CPU_CAPS - NCAPINTS,
|
||||
@ -69,6 +70,10 @@ enum kvm_only_cpuid_leafs {
|
||||
/* CPUID level 0x80000022 (EAX) */
|
||||
#define KVM_X86_FEATURE_PERFMON_V2 KVM_X86_FEATURE(CPUID_8000_0022_EAX, 0)
|
||||
|
||||
/* CPUID level 0x80000021 (ECX) */
|
||||
#define KVM_X86_FEATURE_TSA_SQ_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 1)
|
||||
#define KVM_X86_FEATURE_TSA_L1_NO KVM_X86_FEATURE(CPUID_8000_0021_ECX, 2)
|
||||
|
||||
struct cpuid_reg {
|
||||
u32 function;
|
||||
u32 index;
|
||||
@ -99,6 +104,7 @@ static const struct cpuid_reg reverse_cpuid[] = {
|
||||
[CPUID_8000_0022_EAX] = {0x80000022, 0, CPUID_EAX},
|
||||
[CPUID_7_2_EDX] = { 7, 2, CPUID_EDX},
|
||||
[CPUID_24_0_EBX] = { 0x24, 0, CPUID_EBX},
|
||||
[CPUID_8000_0021_ECX] = {0x80000021, 0, CPUID_ECX},
|
||||
};
|
||||
|
||||
/*
|
||||
@ -136,6 +142,8 @@ static __always_inline u32 __feature_translate(int x86_feature)
|
||||
KVM_X86_TRANSLATE_FEATURE(PERFMON_V2);
|
||||
KVM_X86_TRANSLATE_FEATURE(RRSBA_CTRL);
|
||||
KVM_X86_TRANSLATE_FEATURE(BHI_CTRL);
|
||||
KVM_X86_TRANSLATE_FEATURE(TSA_SQ_NO);
|
||||
KVM_X86_TRANSLATE_FEATURE(TSA_L1_NO);
|
||||
default:
|
||||
return x86_feature;
|
||||
}
|
||||
|
||||
@ -607,6 +607,9 @@ static void svm_disable_virtualization_cpu(void)
|
||||
kvm_cpu_svm_disable();
|
||||
|
||||
amd_pmu_disable_virt();
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_clear_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
}
|
||||
|
||||
static int svm_enable_virtualization_cpu(void)
|
||||
@ -684,6 +687,9 @@ static int svm_enable_virtualization_cpu(void)
|
||||
rdmsr(MSR_TSC_AUX, sev_es_host_save_area(sd)->tsc_aux, msr_hi);
|
||||
}
|
||||
|
||||
if (cpu_feature_enabled(X86_FEATURE_SRSO_BP_SPEC_REDUCE))
|
||||
msr_set_bit(MSR_ZEN4_BP_CFG, MSR_ZEN4_BP_CFG_BP_SPEC_REDUCE_BIT);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
|
||||
@ -169,6 +169,9 @@ SYM_FUNC_START(__svm_vcpu_run)
|
||||
#endif
|
||||
mov VCPU_RDI(%_ASM_DI), %_ASM_DI
|
||||
|
||||
/* Clobbers EFLAGS.ZF */
|
||||
VM_CLEAR_CPU_BUFFERS
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
@ -341,6 +344,9 @@ SYM_FUNC_START(__svm_sev_es_vcpu_run)
|
||||
mov SVM_current_vmcb(%rdi), %rax
|
||||
mov KVM_VMCB_pa(%rax), %rax
|
||||
|
||||
/* Clobbers EFLAGS.ZF */
|
||||
VM_CLEAR_CPU_BUFFERS
|
||||
|
||||
/* Enter guest mode */
|
||||
sti
|
||||
|
||||
|
||||
@ -7292,12 +7292,16 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
|
||||
* mitigation for MDS is done late in VMentry and is still
|
||||
* executed in spite of L1D Flush. This is because an extra VERW
|
||||
* should not matter much after the big hammer L1D Flush.
|
||||
*
|
||||
* cpu_buf_vm_clear is used when system is not vulnerable to MDS/TAA,
|
||||
* and is affected by MMIO Stale Data. In such cases mitigation in only
|
||||
* needed against an MMIO capable guest.
|
||||
*/
|
||||
if (static_branch_unlikely(&vmx_l1d_should_flush))
|
||||
vmx_l1d_flush(vcpu);
|
||||
else if (static_branch_unlikely(&mmio_stale_data_clear) &&
|
||||
else if (static_branch_unlikely(&cpu_buf_vm_clear) &&
|
||||
kvm_arch_has_assigned_device(vcpu->kvm))
|
||||
mds_clear_cpu_buffers();
|
||||
x86_clear_cpu_buffers();
|
||||
|
||||
vmx_disable_fb_clear(vmx);
|
||||
|
||||
|
||||
@ -11129,6 +11129,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.guest_fpu.xfd_err)
|
||||
wrmsrl(MSR_IA32_XFD_ERR, 0);
|
||||
|
||||
/*
|
||||
* Mark this CPU as needing a branch predictor flush before running
|
||||
* userspace. Must be done before enabling preemption to ensure it gets
|
||||
* set for the CPU that actually ran the guest, and not the CPU that it
|
||||
* may migrate to.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
|
||||
this_cpu_write(x86_ibpb_exit_to_user, true);
|
||||
|
||||
/*
|
||||
* Consume any pending interrupts, including the possible source of
|
||||
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
|
||||
|
||||
@ -103,6 +103,7 @@ int msr_set_bit(u32 msr, u8 bit)
|
||||
{
|
||||
return __flip_bit(msr, bit, true);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msr_set_bit);
|
||||
|
||||
/**
|
||||
* msr_clear_bit - Clear @bit in a MSR @msr.
|
||||
@ -118,6 +119,7 @@ int msr_clear_bit(u32 msr, u8 bit)
|
||||
{
|
||||
return __flip_bit(msr, bit, false);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(msr_clear_bit);
|
||||
|
||||
#ifdef CONFIG_TRACEPOINTS
|
||||
void do_trace_write_msr(unsigned int msr, u64 val, int failed)
|
||||
|
||||
@ -234,7 +234,7 @@ within_inclusive(unsigned long addr, unsigned long start, unsigned long end)
|
||||
* take full advantage of the limited (s32) immediate addressing range (2G)
|
||||
* of x86_64.
|
||||
*
|
||||
* See Documentation/x86/x86_64/mm.rst for more detail.
|
||||
* See Documentation/arch/x86/x86_64/mm.rst for more detail.
|
||||
*/
|
||||
|
||||
static inline unsigned long highmap_start_pfn(void)
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user