Import of kernel-5.14.0-570.35.1.el9_6
This commit is contained in:
parent
e4e18e9661
commit
00afcb91bc
@ -519,6 +519,7 @@ Description: information about CPUs heterogeneity.
|
|||||||
|
|
||||||
What: /sys/devices/system/cpu/vulnerabilities
|
What: /sys/devices/system/cpu/vulnerabilities
|
||||||
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||||
/sys/devices/system/cpu/vulnerabilities/mds
|
/sys/devices/system/cpu/vulnerabilities/mds
|
||||||
|
@ -22,3 +22,4 @@ are configurable at compile, boot or run time.
|
|||||||
srso
|
srso
|
||||||
gather_data_sampling
|
gather_data_sampling
|
||||||
reg-file-data-sampling
|
reg-file-data-sampling
|
||||||
|
indirect-target-selection
|
||||||
|
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
@ -0,0 +1,168 @@
|
|||||||
|
.. SPDX-License-Identifier: GPL-2.0
|
||||||
|
|
||||||
|
Indirect Target Selection (ITS)
|
||||||
|
===============================
|
||||||
|
|
||||||
|
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
|
||||||
|
released before Alder Lake. ITS may allow an attacker to control the prediction
|
||||||
|
of indirect branches and RETs located in the lower half of a cacheline.
|
||||||
|
|
||||||
|
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
|
||||||
|
|
||||||
|
Scope of Impact
|
||||||
|
---------------
|
||||||
|
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
|
||||||
|
predicted with unintended target corresponding to a branch in the guest.
|
||||||
|
|
||||||
|
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
|
||||||
|
gadgets.
|
||||||
|
|
||||||
|
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
|
||||||
|
branches may still be predicted with targets corresponding to direct branches
|
||||||
|
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
|
||||||
|
should be available via distro updates. Alternatively microcode can be
|
||||||
|
obtained from Intel's github repository [#f1]_.
|
||||||
|
|
||||||
|
Affected CPUs
|
||||||
|
-------------
|
||||||
|
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
|
||||||
|
|
||||||
|
======================== ============ ==================== ===============
|
||||||
|
Common name Family_Model eIBRS Intra-mode BTI
|
||||||
|
Guest/Host Isolation
|
||||||
|
======================== ============ ==================== ===============
|
||||||
|
SKYLAKE_X (step >= 6) 06_55H Affected Affected
|
||||||
|
ICELAKE_X 06_6AH Not affected Affected
|
||||||
|
ICELAKE_D 06_6CH Not affected Affected
|
||||||
|
ICELAKE_L 06_7EH Not affected Affected
|
||||||
|
TIGERLAKE_L 06_8CH Not affected Affected
|
||||||
|
TIGERLAKE 06_8DH Not affected Affected
|
||||||
|
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
|
||||||
|
KABYLAKE (step >= 13) 06_9EH Affected Affected
|
||||||
|
COMETLAKE 06_A5H Affected Affected
|
||||||
|
COMETLAKE_L 06_A6H Affected Affected
|
||||||
|
ROCKETLAKE 06_A7H Not affected Affected
|
||||||
|
======================== ============ ==================== ===============
|
||||||
|
|
||||||
|
- All affected CPUs enumerate Enhanced IBRS feature.
|
||||||
|
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
|
||||||
|
update for mitigation.
|
||||||
|
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
|
||||||
|
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
|
||||||
|
host's affected status.
|
||||||
|
- Intel Atom CPUs are not affected by ITS.
|
||||||
|
|
||||||
|
Mitigation
|
||||||
|
----------
|
||||||
|
As only the indirect branches and RETs that have their last byte of instruction
|
||||||
|
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
|
||||||
|
the mitigation is to not allow indirect branches in the lower half.
|
||||||
|
|
||||||
|
This is achieved by relying on existing retpoline support in the kernel, and in
|
||||||
|
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
|
||||||
|
added ITS-safe thunks. These safe thunks consists of indirect branch in the
|
||||||
|
second half of the cacheline. Not all retpoline sites are patched to thunks, if
|
||||||
|
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
|
||||||
|
indirect branch.
|
||||||
|
|
||||||
|
Dynamic thunks
|
||||||
|
~~~~~~~~~~~~~~
|
||||||
|
From a dynamically allocated pool of safe-thunks, each vulnerable site is
|
||||||
|
replaced with a new thunk, such that they get a unique address. This could
|
||||||
|
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
|
||||||
|
against aliasing.
|
||||||
|
|
||||||
|
Note, for simplicity, indirect branches in eBPF programs are always replaced
|
||||||
|
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
|
||||||
|
in future this can be changed to use dynamic thunks.
|
||||||
|
|
||||||
|
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
|
||||||
|
thunks. This is because RETs get their prediction from RSB mostly that does not
|
||||||
|
depend on source address. RETs that underflow RSB may benefit from dynamic
|
||||||
|
thunks. But, RETs significantly outnumber indirect branches, and any benefit
|
||||||
|
from a unique source address could be outweighed by the increased icache
|
||||||
|
footprint and iTLB pressure.
|
||||||
|
|
||||||
|
Retpoline
|
||||||
|
~~~~~~~~~
|
||||||
|
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
|
||||||
|
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
|
||||||
|
safe thunks. Unless user requested the RSB-stuffing mitigation.
|
||||||
|
|
||||||
|
RSB Stuffing
|
||||||
|
~~~~~~~~~~~~
|
||||||
|
RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
|
||||||
|
attacks. And it also mitigates RETs that are vulnerable to ITS.
|
||||||
|
|
||||||
|
Mitigation in guests
|
||||||
|
^^^^^^^^^^^^^^^^^^^^
|
||||||
|
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
|
||||||
|
and Family/Model of the guest. This is because eIBRS feature could be hidden
|
||||||
|
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
|
||||||
|
indicates that the guest is running on an unaffected host.
|
||||||
|
|
||||||
|
To prevent guests from unnecessarily deploying the mitigation on unaffected
|
||||||
|
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
|
||||||
|
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
|
||||||
|
is not set by any hardware, but is **intended for VMMs to synthesize** it for
|
||||||
|
guests as per the host's affected status.
|
||||||
|
|
||||||
|
Mitigation options
|
||||||
|
^^^^^^^^^^^^^^^^^^
|
||||||
|
The ITS mitigation can be controlled using the "indirect_target_selection"
|
||||||
|
kernel parameter. The available options are:
|
||||||
|
|
||||||
|
======== ===================================================================
|
||||||
|
on (default) Deploy the "Aligned branch/return thunks" mitigation.
|
||||||
|
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
|
||||||
|
deployed for the affected RET instructions. Retpoline mitigates
|
||||||
|
indirect branches.
|
||||||
|
|
||||||
|
off Disable ITS mitigation.
|
||||||
|
|
||||||
|
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
|
||||||
|
part of ITS. Otherwise, mitigation is not deployed. This option is
|
||||||
|
useful when host userspace is not in the threat model, and only
|
||||||
|
attacks from guest to host are considered.
|
||||||
|
|
||||||
|
stuff Deploy RSB-fill mitigation when retpoline is also deployed.
|
||||||
|
Otherwise, deploy the default mitigation. When retpoline mitigation
|
||||||
|
is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
|
||||||
|
ITS.
|
||||||
|
|
||||||
|
force Force the ITS bug and deploy the default mitigation.
|
||||||
|
======== ===================================================================
|
||||||
|
|
||||||
|
Sysfs reporting
|
||||||
|
---------------
|
||||||
|
|
||||||
|
The sysfs file showing ITS mitigation status is:
|
||||||
|
|
||||||
|
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||||
|
|
||||||
|
Note, microcode mitigation status is not reported in this file.
|
||||||
|
|
||||||
|
The possible values in this file are:
|
||||||
|
|
||||||
|
.. list-table::
|
||||||
|
|
||||||
|
* - Not affected
|
||||||
|
- The processor is not vulnerable.
|
||||||
|
* - Vulnerable
|
||||||
|
- System is vulnerable and no mitigation has been applied.
|
||||||
|
* - Vulnerable, KVM: Not affected
|
||||||
|
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
|
||||||
|
guest/host isolation.
|
||||||
|
* - Mitigation: Aligned branch/return thunks
|
||||||
|
- The mitigation is enabled, affected indirect branches and RETs are
|
||||||
|
relocated to safe thunks.
|
||||||
|
* - Mitigation: Retpolines, Stuffing RSB
|
||||||
|
- The mitigation is enabled using retpoline and RSB stuffing.
|
||||||
|
|
||||||
|
References
|
||||||
|
----------
|
||||||
|
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
|
||||||
|
|
||||||
|
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
||||||
|
|
||||||
|
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
|
@ -29,14 +29,6 @@ Below is the list of affected Intel processors [#f1]_:
|
|||||||
RAPTORLAKE_S 06_BFH
|
RAPTORLAKE_S 06_BFH
|
||||||
=================== ============
|
=================== ============
|
||||||
|
|
||||||
As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
|
|
||||||
RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
|
|
||||||
vulnerable in Linux because they share the same family/model with an affected
|
|
||||||
part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
|
|
||||||
CPUID.HYBRID. This information could be used to distinguish between the
|
|
||||||
affected and unaffected parts, but it is deemed not worth adding complexity as
|
|
||||||
the reporting is fixed automatically when these parts enumerate RFDS_NO.
|
|
||||||
|
|
||||||
Mitigation
|
Mitigation
|
||||||
==========
|
==========
|
||||||
Intel released a microcode update that enables software to clear sensitive
|
Intel released a microcode update that enables software to clear sensitive
|
||||||
|
@ -2060,6 +2060,23 @@
|
|||||||
different crypto accelerators. This option can be used
|
different crypto accelerators. This option can be used
|
||||||
to achieve best performance for particular HW.
|
to achieve best performance for particular HW.
|
||||||
|
|
||||||
|
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
|
||||||
|
Target Selection(ITS) bug in Intel CPUs. Updated
|
||||||
|
microcode is also required for a fix in IBPB.
|
||||||
|
|
||||||
|
on: Enable mitigation (default).
|
||||||
|
off: Disable mitigation.
|
||||||
|
force: Force the ITS bug and deploy default
|
||||||
|
mitigation.
|
||||||
|
vmexit: Only deploy mitigation if CPU is affected by
|
||||||
|
guest/host isolation part of ITS.
|
||||||
|
stuff: Deploy RSB-fill mitigation when retpoline is
|
||||||
|
also deployed. Otherwise, deploy the default
|
||||||
|
mitigation.
|
||||||
|
|
||||||
|
For details see:
|
||||||
|
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
|
||||||
|
|
||||||
init= [KNL]
|
init= [KNL]
|
||||||
Format: <full_path>
|
Format: <full_path>
|
||||||
Run specified binary instead of /sbin/init as init
|
Run specified binary instead of /sbin/init as init
|
||||||
@ -3389,6 +3406,7 @@
|
|||||||
expose users to several CPU vulnerabilities.
|
expose users to several CPU vulnerabilities.
|
||||||
Equivalent to: if nokaslr then kpti=0 [ARM64]
|
Equivalent to: if nokaslr then kpti=0 [ARM64]
|
||||||
gather_data_sampling=off [X86]
|
gather_data_sampling=off [X86]
|
||||||
|
indirect_target_selection=off [X86]
|
||||||
kvm.nx_huge_pages=off [X86]
|
kvm.nx_huge_pages=off [X86]
|
||||||
l1tf=off [X86]
|
l1tf=off [X86]
|
||||||
mds=off [X86]
|
mds=off [X86]
|
||||||
@ -6015,9 +6033,15 @@
|
|||||||
deployment of the HW BHI control and the SW BHB
|
deployment of the HW BHI control and the SW BHB
|
||||||
clearing sequence.
|
clearing sequence.
|
||||||
|
|
||||||
on - (default) Enable the HW or SW mitigation
|
on - (default) Enable the HW or SW mitigation as
|
||||||
as needed.
|
needed. This protects the kernel from
|
||||||
off - Disable the mitigation.
|
both syscalls and VMs.
|
||||||
|
vmexit - On systems which don't have the HW mitigation
|
||||||
|
available, enable the SW mitigation on vmexit
|
||||||
|
ONLY. On such systems, the host kernel is
|
||||||
|
protected from VM-originated BHI attacks, but
|
||||||
|
may still be vulnerable to syscall attacks.
|
||||||
|
off - Disable the mitigation.
|
||||||
|
|
||||||
spectre_v2= [X86] Control mitigation of Spectre variant 2
|
spectre_v2= [X86] Control mitigation of Spectre variant 2
|
||||||
(indirect branch speculation) vulnerability.
|
(indirect branch speculation) vulnerability.
|
||||||
|
@ -12,7 +12,7 @@ RHEL_MINOR = 6
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 570.33.2
|
RHEL_RELEASE = 570.35.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# ZSTREAM
|
# ZSTREAM
|
||||||
|
@ -80,6 +80,7 @@
|
|||||||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||||
|
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
|
||||||
#define ARM_CPU_PART_CORTEX_A520 0xD80
|
#define ARM_CPU_PART_CORTEX_A520 0xD80
|
||||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||||
#define ARM_CPU_PART_CORTEX_A715 0xD4D
|
#define ARM_CPU_PART_CORTEX_A715 0xD4D
|
||||||
@ -159,6 +160,7 @@
|
|||||||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||||
|
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
|
||||||
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
|
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
|
||||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||||
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
|
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
|
||||||
|
@ -686,6 +686,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
|
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
|
||||||
|
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
|
||||||
|
|
||||||
s32 aarch64_get_branch_offset(u32 insn);
|
s32 aarch64_get_branch_offset(u32 insn);
|
||||||
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
u32 aarch64_set_branch_offset(u32 insn, s32 offset);
|
||||||
|
@ -97,7 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
|
|||||||
|
|
||||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||||
u8 spectre_bhb_loop_affected(int scope);
|
extern bool __nospectre_bhb;
|
||||||
|
u8 get_spectre_bhb_loop_value(void);
|
||||||
|
bool is_spectre_bhb_fw_mitigated(void);
|
||||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||||
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
|
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
|
||||||
|
|
||||||
|
@ -845,52 +845,71 @@ static unsigned long system_bhb_mitigations;
|
|||||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||||
* SCOPE_SYSTEM call will give the right answer.
|
* SCOPE_SYSTEM call will give the right answer.
|
||||||
*/
|
*/
|
||||||
u8 spectre_bhb_loop_affected(int scope)
|
static bool is_spectre_bhb_safe(int scope)
|
||||||
|
{
|
||||||
|
static const struct midr_range spectre_bhb_safe_list[] = {
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
|
||||||
|
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||||
|
{},
|
||||||
|
};
|
||||||
|
static bool all_safe = true;
|
||||||
|
|
||||||
|
if (scope != SCOPE_LOCAL_CPU)
|
||||||
|
return all_safe;
|
||||||
|
|
||||||
|
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
all_safe = false;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
static u8 spectre_bhb_loop_affected(void)
|
||||||
{
|
{
|
||||||
u8 k = 0;
|
u8 k = 0;
|
||||||
static u8 max_bhb_k;
|
|
||||||
|
|
||||||
if (scope == SCOPE_LOCAL_CPU) {
|
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||||
{},
|
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
|
||||||
};
|
{},
|
||||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
};
|
||||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||||
{},
|
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||||
};
|
{},
|
||||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
};
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||||
{},
|
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||||
};
|
{},
|
||||||
|
};
|
||||||
|
|
||||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||||
k = 32;
|
k = 32;
|
||||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||||
k = 24;
|
k = 24;
|
||||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||||
k = 11;
|
k = 11;
|
||||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||||
k = 8;
|
k = 8;
|
||||||
|
|
||||||
max_bhb_k = max(max_bhb_k, k);
|
|
||||||
} else {
|
|
||||||
k = max_bhb_k;
|
|
||||||
}
|
|
||||||
|
|
||||||
return k;
|
return k;
|
||||||
}
|
}
|
||||||
@ -916,29 +935,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool is_spectre_bhb_fw_affected(int scope)
|
static bool has_spectre_bhb_fw_mitigation(void)
|
||||||
{
|
{
|
||||||
static bool system_affected;
|
|
||||||
enum mitigation_state fw_state;
|
enum mitigation_state fw_state;
|
||||||
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
||||||
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
|
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
|
||||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
|
||||||
{},
|
|
||||||
};
|
|
||||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
|
||||||
spectre_bhb_firmware_mitigated_list);
|
|
||||||
|
|
||||||
if (scope != SCOPE_LOCAL_CPU)
|
|
||||||
return system_affected;
|
|
||||||
|
|
||||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||||
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
|
return has_smccc && fw_state == SPECTRE_MITIGATED;
|
||||||
system_affected = true;
|
|
||||||
return true;
|
|
||||||
}
|
|
||||||
|
|
||||||
return false;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool supports_ecbhb(int scope)
|
static bool supports_ecbhb(int scope)
|
||||||
@ -954,6 +957,8 @@ static bool supports_ecbhb(int scope)
|
|||||||
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
|
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u8 max_bhb_k;
|
||||||
|
|
||||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||||
int scope)
|
int scope)
|
||||||
{
|
{
|
||||||
@ -962,16 +967,23 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
|||||||
if (supports_csv2p3(scope))
|
if (supports_csv2p3(scope))
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
if (supports_clearbhb(scope))
|
if (is_spectre_bhb_safe(scope))
|
||||||
return true;
|
return false;
|
||||||
|
|
||||||
if (spectre_bhb_loop_affected(scope))
|
/*
|
||||||
return true;
|
* At this point the core isn't known to be "safe" so we're going to
|
||||||
|
* assume it's vulnerable. We still need to update `max_bhb_k` though,
|
||||||
|
* but only if we aren't mitigating with clearbhb though.
|
||||||
|
*/
|
||||||
|
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
|
||||||
|
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
|
||||||
|
|
||||||
if (is_spectre_bhb_fw_affected(scope))
|
return true;
|
||||||
return true;
|
}
|
||||||
|
|
||||||
return false;
|
u8 get_spectre_bhb_loop_value(void)
|
||||||
|
{
|
||||||
|
return max_bhb_k;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||||
@ -991,7 +1003,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
|||||||
isb();
|
isb();
|
||||||
}
|
}
|
||||||
|
|
||||||
static bool __read_mostly __nospectre_bhb;
|
bool __read_mostly __nospectre_bhb;
|
||||||
static int __init parse_spectre_bhb_param(char *str)
|
static int __init parse_spectre_bhb_param(char *str)
|
||||||
{
|
{
|
||||||
__nospectre_bhb = true;
|
__nospectre_bhb = true;
|
||||||
@ -1002,7 +1014,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
|
|||||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||||
{
|
{
|
||||||
bp_hardening_cb_t cpu_cb;
|
bp_hardening_cb_t cpu_cb;
|
||||||
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
|
enum mitigation_state state = SPECTRE_VULNERABLE;
|
||||||
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
||||||
|
|
||||||
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
||||||
@ -1028,7 +1040,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
|||||||
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
||||||
state = SPECTRE_MITIGATED;
|
state = SPECTRE_MITIGATED;
|
||||||
set_bit(BHB_INSN, &system_bhb_mitigations);
|
set_bit(BHB_INSN, &system_bhb_mitigations);
|
||||||
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
|
} else if (spectre_bhb_loop_affected()) {
|
||||||
/*
|
/*
|
||||||
* Ensure KVM uses the indirect vector which will have the
|
* Ensure KVM uses the indirect vector which will have the
|
||||||
* branchy-loop added. A57/A72-r0 will already have selected
|
* branchy-loop added. A57/A72-r0 will already have selected
|
||||||
@ -1041,37 +1053,39 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
|||||||
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
||||||
state = SPECTRE_MITIGATED;
|
state = SPECTRE_MITIGATED;
|
||||||
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
||||||
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
|
} else if (has_spectre_bhb_fw_mitigation()) {
|
||||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
/*
|
||||||
if (fw_state == SPECTRE_MITIGATED) {
|
* Ensure KVM uses one of the spectre bp_hardening
|
||||||
/*
|
* vectors. The indirect vector doesn't include the EL3
|
||||||
* Ensure KVM uses one of the spectre bp_hardening
|
* call, so needs upgrading to
|
||||||
* vectors. The indirect vector doesn't include the EL3
|
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||||
* call, so needs upgrading to
|
*/
|
||||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||||
*/
|
data->slot += 1;
|
||||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
|
||||||
data->slot += 1;
|
|
||||||
|
|
||||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* The WA3 call in the vectors supersedes the WA1 call
|
* The WA3 call in the vectors supersedes the WA1 call
|
||||||
* made during context-switch. Uninstall any firmware
|
* made during context-switch. Uninstall any firmware
|
||||||
* bp_hardening callback.
|
* bp_hardening callback.
|
||||||
*/
|
*/
|
||||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||||
|
|
||||||
state = SPECTRE_MITIGATED;
|
state = SPECTRE_MITIGATED;
|
||||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
update_mitigation_state(&spectre_bhb_state, state);
|
update_mitigation_state(&spectre_bhb_state, state);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
bool is_spectre_bhb_fw_mitigated(void)
|
||||||
|
{
|
||||||
|
return test_bit(BHB_FW, &system_bhb_mitigations);
|
||||||
|
}
|
||||||
|
|
||||||
/* Patched to NOP when enabled */
|
/* Patched to NOP when enabled */
|
||||||
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
||||||
__le32 *origptr,
|
__le32 *origptr,
|
||||||
@ -1100,7 +1114,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
|||||||
{
|
{
|
||||||
u8 rd;
|
u8 rd;
|
||||||
u32 insn;
|
u32 insn;
|
||||||
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
|
|
||||||
|
|
||||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||||
|
|
||||||
@ -1109,7 +1122,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
|||||||
|
|
||||||
insn = le32_to_cpu(*origptr);
|
insn = le32_to_cpu(*origptr);
|
||||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||||
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
|
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
|
||||||
AARCH64_INSN_VARIANT_64BIT,
|
AARCH64_INSN_VARIANT_64BIT,
|
||||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||||
*updptr++ = cpu_to_le32(insn);
|
*updptr++ = cpu_to_le32(insn);
|
||||||
|
@ -5,6 +5,7 @@
|
|||||||
*
|
*
|
||||||
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
|
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||||
*/
|
*/
|
||||||
|
#include <linux/bitfield.h>
|
||||||
#include <linux/bitops.h>
|
#include <linux/bitops.h>
|
||||||
#include <linux/bug.h>
|
#include <linux/bug.h>
|
||||||
#include <linux/printk.h>
|
#include <linux/printk.h>
|
||||||
@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
|
|||||||
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
|
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
|
||||||
|
{
|
||||||
|
switch (type) {
|
||||||
|
case AARCH64_INSN_MB_SY:
|
||||||
|
return 0xf;
|
||||||
|
case AARCH64_INSN_MB_ST:
|
||||||
|
return 0xe;
|
||||||
|
case AARCH64_INSN_MB_LD:
|
||||||
|
return 0xd;
|
||||||
|
case AARCH64_INSN_MB_ISH:
|
||||||
|
return 0xb;
|
||||||
|
case AARCH64_INSN_MB_ISHST:
|
||||||
|
return 0xa;
|
||||||
|
case AARCH64_INSN_MB_ISHLD:
|
||||||
|
return 0x9;
|
||||||
|
case AARCH64_INSN_MB_NSH:
|
||||||
|
return 0x7;
|
||||||
|
case AARCH64_INSN_MB_NSHST:
|
||||||
|
return 0x6;
|
||||||
|
case AARCH64_INSN_MB_NSHLD:
|
||||||
|
return 0x5;
|
||||||
|
default:
|
||||||
|
pr_err("%s: unknown barrier type %d\n", __func__, type);
|
||||||
|
return AARCH64_BREAK_FAULT;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
||||||
{
|
{
|
||||||
u32 opt;
|
u32 opt;
|
||||||
u32 insn;
|
u32 insn;
|
||||||
|
|
||||||
switch (type) {
|
opt = __get_barrier_crm_val(type);
|
||||||
case AARCH64_INSN_MB_SY:
|
if (opt == AARCH64_BREAK_FAULT)
|
||||||
opt = 0xf;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_ST:
|
|
||||||
opt = 0xe;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_LD:
|
|
||||||
opt = 0xd;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_ISH:
|
|
||||||
opt = 0xb;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_ISHST:
|
|
||||||
opt = 0xa;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_ISHLD:
|
|
||||||
opt = 0x9;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_NSH:
|
|
||||||
opt = 0x7;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_NSHST:
|
|
||||||
opt = 0x6;
|
|
||||||
break;
|
|
||||||
case AARCH64_INSN_MB_NSHLD:
|
|
||||||
opt = 0x5;
|
|
||||||
break;
|
|
||||||
default:
|
|
||||||
pr_err("%s: unknown dmb type %d\n", __func__, type);
|
|
||||||
return AARCH64_BREAK_FAULT;
|
return AARCH64_BREAK_FAULT;
|
||||||
}
|
|
||||||
|
|
||||||
insn = aarch64_insn_get_dmb_value();
|
insn = aarch64_insn_get_dmb_value();
|
||||||
insn &= ~GENMASK(11, 8);
|
insn &= ~GENMASK(11, 8);
|
||||||
@ -1515,3 +1514,18 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
|||||||
|
|
||||||
return insn;
|
return insn;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
|
||||||
|
{
|
||||||
|
u32 opt, insn;
|
||||||
|
|
||||||
|
opt = __get_barrier_crm_val(type);
|
||||||
|
if (opt == AARCH64_BREAK_FAULT)
|
||||||
|
return AARCH64_BREAK_FAULT;
|
||||||
|
|
||||||
|
insn = aarch64_insn_get_dsb_base_value();
|
||||||
|
insn &= ~GENMASK(11, 8);
|
||||||
|
insn |= (opt << 8);
|
||||||
|
|
||||||
|
return insn;
|
||||||
|
}
|
||||||
|
@ -7,6 +7,7 @@
|
|||||||
|
|
||||||
#define pr_fmt(fmt) "bpf_jit: " fmt
|
#define pr_fmt(fmt) "bpf_jit: " fmt
|
||||||
|
|
||||||
|
#include <linux/arm-smccc.h>
|
||||||
#include <linux/bitfield.h>
|
#include <linux/bitfield.h>
|
||||||
#include <linux/bpf.h>
|
#include <linux/bpf.h>
|
||||||
#include <linux/filter.h>
|
#include <linux/filter.h>
|
||||||
@ -17,6 +18,7 @@
|
|||||||
#include <asm/asm-extable.h>
|
#include <asm/asm-extable.h>
|
||||||
#include <asm/byteorder.h>
|
#include <asm/byteorder.h>
|
||||||
#include <asm/cacheflush.h>
|
#include <asm/cacheflush.h>
|
||||||
|
#include <asm/cpufeature.h>
|
||||||
#include <asm/debug-monitors.h>
|
#include <asm/debug-monitors.h>
|
||||||
#include <asm/insn.h>
|
#include <asm/insn.h>
|
||||||
#include <asm/patching.h>
|
#include <asm/patching.h>
|
||||||
@ -857,7 +859,51 @@ static void build_plt(struct jit_ctx *ctx)
|
|||||||
plt->target = (u64)&dummy_tramp;
|
plt->target = (u64)&dummy_tramp;
|
||||||
}
|
}
|
||||||
|
|
||||||
static void build_epilogue(struct jit_ctx *ctx)
|
/* Clobbers BPF registers 1-4, aka x0-x3 */
|
||||||
|
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
|
||||||
|
{
|
||||||
|
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
|
||||||
|
u8 k = get_spectre_bhb_loop_value();
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
|
||||||
|
cpu_mitigations_off() || __nospectre_bhb ||
|
||||||
|
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (capable(CAP_SYS_ADMIN))
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (supports_clearbhb(SCOPE_SYSTEM)) {
|
||||||
|
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (k) {
|
||||||
|
emit_a64_mov_i64(r1, k, ctx);
|
||||||
|
emit(A64_B(1), ctx);
|
||||||
|
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
|
||||||
|
emit(A64_B_(A64_COND_NE, -2), ctx);
|
||||||
|
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
|
||||||
|
emit(aarch64_insn_get_isb_value(), ctx);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (is_spectre_bhb_fw_mitigated()) {
|
||||||
|
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
|
||||||
|
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
|
||||||
|
switch (arm_smccc_1_1_get_conduit()) {
|
||||||
|
case SMCCC_CONDUIT_HVC:
|
||||||
|
emit(aarch64_insn_get_hvc_value(), ctx);
|
||||||
|
break;
|
||||||
|
case SMCCC_CONDUIT_SMC:
|
||||||
|
emit(aarch64_insn_get_smc_value(), ctx);
|
||||||
|
break;
|
||||||
|
default:
|
||||||
|
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
|
||||||
{
|
{
|
||||||
const u8 r0 = bpf2a64[BPF_REG_0];
|
const u8 r0 = bpf2a64[BPF_REG_0];
|
||||||
const u8 ptr = bpf2a64[TCCNT_PTR];
|
const u8 ptr = bpf2a64[TCCNT_PTR];
|
||||||
@ -870,10 +916,13 @@ static void build_epilogue(struct jit_ctx *ctx)
|
|||||||
|
|
||||||
emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
|
emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
|
||||||
|
|
||||||
|
if (was_classic)
|
||||||
|
build_bhb_mitigation(ctx);
|
||||||
|
|
||||||
/* Restore FP/LR registers */
|
/* Restore FP/LR registers */
|
||||||
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
||||||
|
|
||||||
/* Set return value */
|
/* Move the return value from bpf:r0 (aka x7) to x0 */
|
||||||
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
||||||
|
|
||||||
/* Authenticate lr */
|
/* Authenticate lr */
|
||||||
@ -1786,7 +1835,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
|||||||
}
|
}
|
||||||
|
|
||||||
ctx.epilogue_offset = ctx.idx;
|
ctx.epilogue_offset = ctx.idx;
|
||||||
build_epilogue(&ctx);
|
build_epilogue(&ctx, was_classic);
|
||||||
build_plt(&ctx);
|
build_plt(&ctx);
|
||||||
|
|
||||||
extable_align = __alignof__(struct exception_table_entry);
|
extable_align = __alignof__(struct exception_table_entry);
|
||||||
@ -1849,7 +1898,7 @@ skip_init_ctx:
|
|||||||
goto out_free_hdr;
|
goto out_free_hdr;
|
||||||
}
|
}
|
||||||
|
|
||||||
build_epilogue(&ctx);
|
build_epilogue(&ctx, was_classic);
|
||||||
build_plt(&ctx);
|
build_plt(&ctx);
|
||||||
|
|
||||||
/* Extra pass to validate JITed code. */
|
/* Extra pass to validate JITed code. */
|
||||||
|
@ -15,10 +15,19 @@
|
|||||||
#define ARCH_FUNC_PREFIX "."
|
#define ARCH_FUNC_PREFIX "."
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_KFENCE
|
||||||
|
extern bool kfence_disabled;
|
||||||
|
|
||||||
|
static inline void disable_kfence(void)
|
||||||
|
{
|
||||||
|
kfence_disabled = true;
|
||||||
|
}
|
||||||
|
|
||||||
static inline bool arch_kfence_init_pool(void)
|
static inline bool arch_kfence_init_pool(void)
|
||||||
{
|
{
|
||||||
return true;
|
return !kfence_disabled;
|
||||||
}
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_PPC64
|
#ifdef CONFIG_PPC64
|
||||||
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
static inline bool kfence_protect_page(unsigned long addr, bool protect)
|
||||||
|
@ -17,6 +17,7 @@
|
|||||||
#include <linux/hugetlb.h>
|
#include <linux/hugetlb.h>
|
||||||
#include <linux/string_helpers.h>
|
#include <linux/string_helpers.h>
|
||||||
#include <linux/memory.h>
|
#include <linux/memory.h>
|
||||||
|
#include <linux/kfence.h>
|
||||||
|
|
||||||
#include <asm/pgalloc.h>
|
#include <asm/pgalloc.h>
|
||||||
#include <asm/mmu_context.h>
|
#include <asm/mmu_context.h>
|
||||||
@ -31,6 +32,7 @@
|
|||||||
#include <asm/uaccess.h>
|
#include <asm/uaccess.h>
|
||||||
#include <asm/ultravisor.h>
|
#include <asm/ultravisor.h>
|
||||||
#include <asm/set_memory.h>
|
#include <asm/set_memory.h>
|
||||||
|
#include <asm/kfence.h>
|
||||||
|
|
||||||
#include <trace/events/thp.h>
|
#include <trace/events/thp.h>
|
||||||
|
|
||||||
@ -293,7 +295,8 @@ static unsigned long next_boundary(unsigned long addr, unsigned long end)
|
|||||||
|
|
||||||
static int __meminit create_physical_mapping(unsigned long start,
|
static int __meminit create_physical_mapping(unsigned long start,
|
||||||
unsigned long end,
|
unsigned long end,
|
||||||
int nid, pgprot_t _prot)
|
int nid, pgprot_t _prot,
|
||||||
|
unsigned long mapping_sz_limit)
|
||||||
{
|
{
|
||||||
unsigned long vaddr, addr, mapping_size = 0;
|
unsigned long vaddr, addr, mapping_size = 0;
|
||||||
bool prev_exec, exec = false;
|
bool prev_exec, exec = false;
|
||||||
@ -301,7 +304,10 @@ static int __meminit create_physical_mapping(unsigned long start,
|
|||||||
int psize;
|
int psize;
|
||||||
unsigned long max_mapping_size = memory_block_size;
|
unsigned long max_mapping_size = memory_block_size;
|
||||||
|
|
||||||
if (debug_pagealloc_enabled_or_kfence())
|
if (mapping_sz_limit < max_mapping_size)
|
||||||
|
max_mapping_size = mapping_sz_limit;
|
||||||
|
|
||||||
|
if (debug_pagealloc_enabled())
|
||||||
max_mapping_size = PAGE_SIZE;
|
max_mapping_size = PAGE_SIZE;
|
||||||
|
|
||||||
start = ALIGN(start, PAGE_SIZE);
|
start = ALIGN(start, PAGE_SIZE);
|
||||||
@ -356,8 +362,74 @@ static int __meminit create_physical_mapping(unsigned long start,
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_KFENCE
|
||||||
|
static bool __ro_after_init kfence_early_init = !!CONFIG_KFENCE_SAMPLE_INTERVAL;
|
||||||
|
|
||||||
|
static int __init parse_kfence_early_init(char *arg)
|
||||||
|
{
|
||||||
|
int val;
|
||||||
|
|
||||||
|
if (get_option(&arg, &val))
|
||||||
|
kfence_early_init = !!val;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("kfence.sample_interval", parse_kfence_early_init);
|
||||||
|
|
||||||
|
static inline phys_addr_t alloc_kfence_pool(void)
|
||||||
|
{
|
||||||
|
phys_addr_t kfence_pool;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* TODO: Support to enable KFENCE after bootup depends on the ability to
|
||||||
|
* split page table mappings. As such support is not currently
|
||||||
|
* implemented for radix pagetables, support enabling KFENCE
|
||||||
|
* only at system startup for now.
|
||||||
|
*
|
||||||
|
* After support for splitting mappings is available on radix,
|
||||||
|
* alloc_kfence_pool() & map_kfence_pool() can be dropped and
|
||||||
|
* mapping for __kfence_pool memory can be
|
||||||
|
* split during arch_kfence_init_pool().
|
||||||
|
*/
|
||||||
|
if (!kfence_early_init)
|
||||||
|
goto no_kfence;
|
||||||
|
|
||||||
|
kfence_pool = memblock_phys_alloc(KFENCE_POOL_SIZE, PAGE_SIZE);
|
||||||
|
if (!kfence_pool)
|
||||||
|
goto no_kfence;
|
||||||
|
|
||||||
|
memblock_mark_nomap(kfence_pool, KFENCE_POOL_SIZE);
|
||||||
|
return kfence_pool;
|
||||||
|
|
||||||
|
no_kfence:
|
||||||
|
disable_kfence();
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void map_kfence_pool(phys_addr_t kfence_pool)
|
||||||
|
{
|
||||||
|
if (!kfence_pool)
|
||||||
|
return;
|
||||||
|
|
||||||
|
if (create_physical_mapping(kfence_pool, kfence_pool + KFENCE_POOL_SIZE,
|
||||||
|
-1, PAGE_KERNEL, PAGE_SIZE))
|
||||||
|
goto err;
|
||||||
|
|
||||||
|
memblock_clear_nomap(kfence_pool, KFENCE_POOL_SIZE);
|
||||||
|
__kfence_pool = __va(kfence_pool);
|
||||||
|
return;
|
||||||
|
|
||||||
|
err:
|
||||||
|
memblock_phys_free(kfence_pool, KFENCE_POOL_SIZE);
|
||||||
|
disable_kfence();
|
||||||
|
}
|
||||||
|
#else
|
||||||
|
static inline phys_addr_t alloc_kfence_pool(void) { return 0; }
|
||||||
|
static inline void map_kfence_pool(phys_addr_t kfence_pool) { }
|
||||||
|
#endif
|
||||||
|
|
||||||
static void __init radix_init_pgtable(void)
|
static void __init radix_init_pgtable(void)
|
||||||
{
|
{
|
||||||
|
phys_addr_t kfence_pool;
|
||||||
unsigned long rts_field;
|
unsigned long rts_field;
|
||||||
phys_addr_t start, end;
|
phys_addr_t start, end;
|
||||||
u64 i;
|
u64 i;
|
||||||
@ -365,6 +437,8 @@ static void __init radix_init_pgtable(void)
|
|||||||
/* We don't support slb for radix */
|
/* We don't support slb for radix */
|
||||||
slb_set_size(0);
|
slb_set_size(0);
|
||||||
|
|
||||||
|
kfence_pool = alloc_kfence_pool();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Create the linear mapping
|
* Create the linear mapping
|
||||||
*/
|
*/
|
||||||
@ -381,9 +455,11 @@ static void __init radix_init_pgtable(void)
|
|||||||
}
|
}
|
||||||
|
|
||||||
WARN_ON(create_physical_mapping(start, end,
|
WARN_ON(create_physical_mapping(start, end,
|
||||||
-1, PAGE_KERNEL));
|
-1, PAGE_KERNEL, ~0UL));
|
||||||
}
|
}
|
||||||
|
|
||||||
|
map_kfence_pool(kfence_pool);
|
||||||
|
|
||||||
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
|
if (!cpu_has_feature(CPU_FTR_HVMODE) &&
|
||||||
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
|
cpu_has_feature(CPU_FTR_P9_RADIX_PREFETCH_BUG)) {
|
||||||
/*
|
/*
|
||||||
@ -890,7 +966,7 @@ int __meminit radix__create_section_mapping(unsigned long start,
|
|||||||
}
|
}
|
||||||
|
|
||||||
return create_physical_mapping(__pa(start), __pa(end),
|
return create_physical_mapping(__pa(start), __pa(end),
|
||||||
nid, prot);
|
nid, prot, ~0UL);
|
||||||
}
|
}
|
||||||
|
|
||||||
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
|
int __meminit radix__remove_section_mapping(unsigned long start, unsigned long end)
|
||||||
|
@ -31,6 +31,9 @@ EXPORT_SYMBOL_GPL(kernstart_virt_addr);
|
|||||||
|
|
||||||
bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
|
bool disable_kuep = !IS_ENABLED(CONFIG_PPC_KUEP);
|
||||||
bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
|
bool disable_kuap = !IS_ENABLED(CONFIG_PPC_KUAP);
|
||||||
|
#ifdef CONFIG_KFENCE
|
||||||
|
bool __ro_after_init kfence_disabled;
|
||||||
|
#endif
|
||||||
|
|
||||||
static int __init parse_nosmep(char *p)
|
static int __init parse_nosmep(char *p)
|
||||||
{
|
{
|
||||||
|
@ -438,7 +438,7 @@ static void cpum_cf_make_setsize(enum cpumf_ctr_set ctrset)
|
|||||||
ctrset_size = 48;
|
ctrset_size = 48;
|
||||||
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
|
else if (cpumf_ctr_info.csvn >= 3 && cpumf_ctr_info.csvn <= 5)
|
||||||
ctrset_size = 128;
|
ctrset_size = 128;
|
||||||
else if (cpumf_ctr_info.csvn == 6 || cpumf_ctr_info.csvn == 7)
|
else if (cpumf_ctr_info.csvn >= 6 && cpumf_ctr_info.csvn <= 8)
|
||||||
ctrset_size = 160;
|
ctrset_size = 160;
|
||||||
break;
|
break;
|
||||||
case CPUMF_CTR_SET_MT_DIAG:
|
case CPUMF_CTR_SET_MT_DIAG:
|
||||||
|
@ -237,7 +237,6 @@ CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_NO_SPECIAL, 0x00f4);
|
|||||||
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
|
CPUMF_EVENT_ATTR(cf_z14, TX_C_TABORT_SPECIAL, 0x00f5);
|
||||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||||
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
CPUMF_EVENT_ATTR(cf_z14, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||||
|
|
||||||
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
|
CPUMF_EVENT_ATTR(cf_z15, L1D_RO_EXCL_WRITES, 0x0080);
|
||||||
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
|
CPUMF_EVENT_ATTR(cf_z15, DTLB2_WRITES, 0x0081);
|
||||||
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
|
CPUMF_EVENT_ATTR(cf_z15, DTLB2_MISSES, 0x0082);
|
||||||
@ -365,6 +364,83 @@ CPUMF_EVENT_ATTR(cf_z16, NNPA_WAIT_LOCK, 0x010d);
|
|||||||
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
|
CPUMF_EVENT_ATTR(cf_z16, NNPA_HOLD_LOCK, 0x010e);
|
||||||
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||||
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
CPUMF_EVENT_ATTR(cf_z16, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, L1D_RO_EXCL_WRITES, 0x0080);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DTLB2_WRITES, 0x0081);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DTLB2_MISSES, 0x0082);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, CRSTE_1MB_WRITES, 0x0083);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DTLB2_GPAGE_WRITES, 0x0084);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ITLB2_WRITES, 0x0086);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ITLB2_MISSES, 0x0087);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TLB2_PTE_WRITES, 0x0089);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TLB2_CRSTE_WRITES, 0x008a);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TLB2_ENGINES_BUSY, 0x008b);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TX_C_TEND, 0x008c);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TX_NC_TEND, 0x008d);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, L1C_TLB2_MISSES, 0x008f);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ, 0x0091);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_IV, 0x0092);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_CHIP_HIT, 0x0093);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_REQ_DRAWER_HIT, 0x0094);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP, 0x0095);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_IV, 0x0096);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_CHIP_HIT, 0x0097);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT, 0x0098);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE, 0x0099);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER, 0x009a);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER, 0x009b);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_CHIP_MEMORY, 0x009c);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_MODULE_MEMORY, 0x009d);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_ON_DRAWER_MEMORY, 0x009e);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DCW_OFF_DRAWER_MEMORY, 0x009f);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_IV, 0x00a0);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT, 0x00a1);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT, 0x00a2);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_IV, 0x00a3);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT, 0x00a4);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT, 0x00a5);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_IV, 0x00a6);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT, 0x00a7);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT, 0x00a8);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ, 0x00a9);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_IV, 0x00aa);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_CHIP_HIT, 0x00ab);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_REQ_DRAWER_HIT, 0x00ac);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP, 0x00ad);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_IV, 0x00ae);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_CHIP_HIT, 0x00af);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT, 0x00b0);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_MODULE, 0x00b1);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_ON_DRAWER, 0x00b2);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, ICW_OFF_DRAWER, 0x00b3);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, CYCLES_SAMETHRD, 0x00ca);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, CYCLES_DIFFTHRD, 0x00cb);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, INST_SAMETHRD, 0x00cc);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, INST_DIFFTHRD, 0x00cd);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, WRONG_BRANCH_PREDICTION, 0x00ce);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, VX_BCD_EXECUTION_SLOTS, 0x00e1);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DECIMAL_INSTRUCTIONS, 0x00e2);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, LAST_HOST_TRANSLATIONS, 0x00e8);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TX_NC_TABORT, 0x00f4);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_NO_SPECIAL, 0x00f5);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, TX_C_TABORT_SPECIAL, 0x00f6);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DFLT_ACCESS, 0x00f8);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DFLT_CYCLES, 0x00fd);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, SORTL, 0x0100);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DFLT_CC, 0x0109);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, DFLT_CCFINISH, 0x010a);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_INVOCATIONS, 0x010b);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPLETIONS, 0x010c);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_WAIT_LOCK, 0x010d);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_HOLD_LOCK, 0x010e);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_ONCHIP, 0x0110);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_OFFCHIP, 0x0111);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_INST_DIFF, 0x0112);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_4K_PREFETCH, 0x0114);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_COMPL_LOCK, 0x0115);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK, 0x0116);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO, 0x0117);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE, 0x01c0);
|
||||||
|
CPUMF_EVENT_ATTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE, 0x01c1);
|
||||||
|
|
||||||
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
|
static struct attribute *cpumcf_fvn1_pmu_event_attr[] __initdata = {
|
||||||
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
|
CPUMF_EVENT_PTR(cf_fvn1, CPU_CYCLES),
|
||||||
@ -414,7 +490,7 @@ static struct attribute *cpumcf_svn_12345_pmu_event_attr[] __initdata = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
static struct attribute *cpumcf_svn_67_pmu_event_attr[] __initdata = {
|
static struct attribute *cpumcf_svn_678_pmu_event_attr[] __initdata = {
|
||||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
|
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_FUNCTIONS),
|
||||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
|
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_CYCLES),
|
||||||
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
|
CPUMF_EVENT_PTR(cf_svn_12345, PRNG_BLOCKED_FUNCTIONS),
|
||||||
@ -779,6 +855,87 @@ static struct attribute *cpumcf_z16_pmu_event_attr[] __initdata = {
|
|||||||
NULL,
|
NULL,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
static struct attribute *cpumcf_z17_pmu_event_attr[] __initdata = {
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, L1D_RO_EXCL_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DTLB2_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DTLB2_MISSES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, CRSTE_1MB_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DTLB2_GPAGE_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ITLB2_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ITLB2_MISSES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TLB2_PTE_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TLB2_CRSTE_WRITES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TLB2_ENGINES_BUSY),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TX_C_TEND),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TX_NC_TEND),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, L1C_TLB2_MISSES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_REQ),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_REQ_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_CHIP_MEMORY),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_MODULE_MEMORY),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_ON_DRAWER_MEMORY),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DCW_OFF_DRAWER_MEMORY),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_MODULE_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_ON_DRAWER_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, IDCW_OFF_DRAWER_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_REQ),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_REQ_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_IV),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_CHIP_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_CHIP_DRAWER_HIT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_MODULE),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_ON_DRAWER),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, ICW_OFF_DRAWER),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, CYCLES_SAMETHRD),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, CYCLES_DIFFTHRD),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, INST_SAMETHRD),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, INST_DIFFTHRD),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, WRONG_BRANCH_PREDICTION),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, VX_BCD_EXECUTION_SLOTS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DECIMAL_INSTRUCTIONS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, LAST_HOST_TRANSLATIONS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TX_NC_TABORT),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_NO_SPECIAL),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, TX_C_TABORT_SPECIAL),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DFLT_ACCESS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DFLT_CYCLES),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, SORTL),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DFLT_CC),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, DFLT_CCFINISH),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_INVOCATIONS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_COMPLETIONS),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_WAIT_LOCK),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_HOLD_LOCK),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_ONCHIP),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_OFFCHIP),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_INST_DIFF),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_4K_PREFETCH),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_COMPL_LOCK),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, NNPA_RETRY_LOCK_WITH_PLO),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_ONE_THR_ACTIVE),
|
||||||
|
CPUMF_EVENT_PTR(cf_z17, MT_DIAG_CYCLES_TWO_THR_ACTIVE),
|
||||||
|
NULL,
|
||||||
|
};
|
||||||
|
|
||||||
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
|
/* END: CPUM_CF COUNTER DEFINITIONS ===================================== */
|
||||||
|
|
||||||
static struct attribute_group cpumcf_pmu_events_group = {
|
static struct attribute_group cpumcf_pmu_events_group = {
|
||||||
@ -859,7 +1016,7 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
|
|||||||
if (ci.csvn >= 1 && ci.csvn <= 5)
|
if (ci.csvn >= 1 && ci.csvn <= 5)
|
||||||
csvn = cpumcf_svn_12345_pmu_event_attr;
|
csvn = cpumcf_svn_12345_pmu_event_attr;
|
||||||
else if (ci.csvn >= 6)
|
else if (ci.csvn >= 6)
|
||||||
csvn = cpumcf_svn_67_pmu_event_attr;
|
csvn = cpumcf_svn_678_pmu_event_attr;
|
||||||
|
|
||||||
/* Determine model-specific counter set(s) */
|
/* Determine model-specific counter set(s) */
|
||||||
get_cpu_id(&cpu_id);
|
get_cpu_id(&cpu_id);
|
||||||
@ -892,6 +1049,10 @@ __init const struct attribute_group **cpumf_cf_event_group(void)
|
|||||||
case 0x3932:
|
case 0x3932:
|
||||||
model = cpumcf_z16_pmu_event_attr;
|
model = cpumcf_z16_pmu_event_attr;
|
||||||
break;
|
break;
|
||||||
|
case 0x9175:
|
||||||
|
case 0x9176:
|
||||||
|
model = cpumcf_z17_pmu_event_attr;
|
||||||
|
break;
|
||||||
default:
|
default:
|
||||||
model = none;
|
model = none;
|
||||||
break;
|
break;
|
||||||
|
@ -634,6 +634,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num)
|
|||||||
{
|
{
|
||||||
struct perf_pmu_events_attr *pa;
|
struct perf_pmu_events_attr *pa;
|
||||||
|
|
||||||
|
/* Index larger than array_size, no counter name available */
|
||||||
|
if (num >= ARRAY_SIZE(paicrypt_ctrnames)) {
|
||||||
|
attrs[num] = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
||||||
if (!pa)
|
if (!pa)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -654,14 +660,13 @@ static int __init attr_event_init(void)
|
|||||||
struct attribute **attrs;
|
struct attribute **attrs;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
attrs = kmalloc_array(ARRAY_SIZE(paicrypt_ctrnames) + 1, sizeof(*attrs),
|
attrs = kmalloc_array(paicrypt_cnt + 2, sizeof(*attrs), GFP_KERNEL);
|
||||||
GFP_KERNEL);
|
|
||||||
if (!attrs)
|
if (!attrs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (i = 0; i < ARRAY_SIZE(paicrypt_ctrnames); i++) {
|
for (i = 0; i <= paicrypt_cnt; i++) {
|
||||||
ret = attr_event_init_one(attrs, i);
|
ret = attr_event_init_one(attrs, i);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
attr_event_free(attrs, i - 1);
|
attr_event_free(attrs, i);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -588,6 +588,12 @@ static int __init attr_event_init_one(struct attribute **attrs, int num)
|
|||||||
{
|
{
|
||||||
struct perf_pmu_events_attr *pa;
|
struct perf_pmu_events_attr *pa;
|
||||||
|
|
||||||
|
/* Index larger than array_size, no counter name available */
|
||||||
|
if (num >= ARRAY_SIZE(paiext_ctrnames)) {
|
||||||
|
attrs[num] = NULL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
pa = kzalloc(sizeof(*pa), GFP_KERNEL);
|
||||||
if (!pa)
|
if (!pa)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
@ -608,14 +614,13 @@ static int __init attr_event_init(void)
|
|||||||
struct attribute **attrs;
|
struct attribute **attrs;
|
||||||
int ret, i;
|
int ret, i;
|
||||||
|
|
||||||
attrs = kmalloc_array(ARRAY_SIZE(paiext_ctrnames) + 1, sizeof(*attrs),
|
attrs = kmalloc_array(paiext_cnt + 2, sizeof(*attrs), GFP_KERNEL);
|
||||||
GFP_KERNEL);
|
|
||||||
if (!attrs)
|
if (!attrs)
|
||||||
return -ENOMEM;
|
return -ENOMEM;
|
||||||
for (i = 0; i < ARRAY_SIZE(paiext_ctrnames); i++) {
|
for (i = 0; i <= paiext_cnt; i++) {
|
||||||
ret = attr_event_init_one(attrs, i);
|
ret = attr_event_init_one(attrs, i);
|
||||||
if (ret) {
|
if (ret) {
|
||||||
attr_event_free(attrs, i - 1);
|
attr_event_free(attrs, i);
|
||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -561,6 +561,16 @@ static void __init alloc_masks(struct sysinfo_15_1_x *info,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int __init detect_polarization(union topology_entry *tle)
|
||||||
|
{
|
||||||
|
struct topology_core *tl_core;
|
||||||
|
|
||||||
|
while (tle->nl)
|
||||||
|
tle = next_tle(tle);
|
||||||
|
tl_core = (struct topology_core *)tle;
|
||||||
|
return tl_core->pp != POLARIZATION_HRZ;
|
||||||
|
}
|
||||||
|
|
||||||
void __init topology_init_early(void)
|
void __init topology_init_early(void)
|
||||||
{
|
{
|
||||||
struct sysinfo_15_1_x *info;
|
struct sysinfo_15_1_x *info;
|
||||||
@ -580,6 +590,7 @@ void __init topology_init_early(void)
|
|||||||
__func__, PAGE_SIZE, PAGE_SIZE);
|
__func__, PAGE_SIZE, PAGE_SIZE);
|
||||||
info = tl_info;
|
info = tl_info;
|
||||||
store_topology(info);
|
store_topology(info);
|
||||||
|
cpu_management = detect_polarization(info->tle);
|
||||||
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
|
pr_info("The CPU configuration topology of the machine is: %d %d %d %d %d %d / %d\n",
|
||||||
info->mag[0], info->mag[1], info->mag[2], info->mag[3],
|
info->mag[0], info->mag[1], info->mag[2], info->mag[3],
|
||||||
info->mag[4], info->mag[5], info->mnest);
|
info->mag[4], info->mag[5], info->mnest);
|
||||||
|
@ -2628,6 +2628,17 @@ config MITIGATION_SPECTRE_BHI
|
|||||||
indirect branches.
|
indirect branches.
|
||||||
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||||
|
|
||||||
|
config MITIGATION_ITS
|
||||||
|
bool "Enable Indirect Target Selection mitigation"
|
||||||
|
depends on CPU_SUP_INTEL && X86_64
|
||||||
|
depends on MITIGATION_RETPOLINE && MITIGATION_RETHUNK
|
||||||
|
default y
|
||||||
|
help
|
||||||
|
Enable Indirect Target Selection (ITS) mitigation. ITS is a bug in
|
||||||
|
BPU on some Intel CPUs that may allow Spectre V2 style attacks. If
|
||||||
|
disabled, mitigation cannot be enabled via cmdline.
|
||||||
|
See <file:Documentation/admin-guide/hw-vuln/indirect-target-selection.rst>
|
||||||
|
|
||||||
endif
|
endif
|
||||||
|
|
||||||
config ARCH_HAS_ADD_PAGES
|
config ARCH_HAS_ADD_PAGES
|
||||||
|
@ -1530,7 +1530,9 @@ SYM_CODE_END(rewind_stack_and_make_dead)
|
|||||||
* ORC to unwind properly.
|
* ORC to unwind properly.
|
||||||
*
|
*
|
||||||
* The alignment is for performance and not for safety, and may be safely
|
* The alignment is for performance and not for safety, and may be safely
|
||||||
* refactored in the future if needed.
|
* refactored in the future if needed. The .skips are for safety, to ensure
|
||||||
|
* that all RETs are in the second half of a cacheline to mitigate Indirect
|
||||||
|
* Target Selection, rather than taking the slowpath via its_return_thunk.
|
||||||
*/
|
*/
|
||||||
SYM_FUNC_START(clear_bhb_loop)
|
SYM_FUNC_START(clear_bhb_loop)
|
||||||
push %rbp
|
push %rbp
|
||||||
@ -1540,10 +1542,22 @@ SYM_FUNC_START(clear_bhb_loop)
|
|||||||
call 1f
|
call 1f
|
||||||
jmp 5f
|
jmp 5f
|
||||||
.align 64, 0xcc
|
.align 64, 0xcc
|
||||||
|
/*
|
||||||
|
* Shift instructions so that the RET is in the upper half of the
|
||||||
|
* cacheline and don't take the slowpath to its_return_thunk.
|
||||||
|
*/
|
||||||
|
.skip 32 - (.Lret1 - 1f), 0xcc
|
||||||
ANNOTATE_INTRA_FUNCTION_CALL
|
ANNOTATE_INTRA_FUNCTION_CALL
|
||||||
1: call 2f
|
1: call 2f
|
||||||
RET
|
.Lret1: RET
|
||||||
.align 64, 0xcc
|
.align 64, 0xcc
|
||||||
|
/*
|
||||||
|
* As above shift instructions for RET at .Lret2 as well.
|
||||||
|
*
|
||||||
|
* This should be ideally be: .skip 32 - (.Lret2 - 2f), 0xcc
|
||||||
|
* but some Clang versions (e.g. 18) don't like this.
|
||||||
|
*/
|
||||||
|
.skip 32 - 18, 0xcc
|
||||||
2: movl $5, %eax
|
2: movl $5, %eax
|
||||||
3: jmp 4f
|
3: jmp 4f
|
||||||
nop
|
nop
|
||||||
@ -1551,7 +1565,7 @@ SYM_FUNC_START(clear_bhb_loop)
|
|||||||
jnz 3b
|
jnz 3b
|
||||||
sub $1, %ecx
|
sub $1, %ecx
|
||||||
jnz 1b
|
jnz 1b
|
||||||
RET
|
.Lret2: RET
|
||||||
5: lfence
|
5: lfence
|
||||||
pop %rbp
|
pop %rbp
|
||||||
RET
|
RET
|
||||||
|
@ -135,6 +135,20 @@ static __always_inline int x86_call_depth_emit_accounting(u8 **pprog,
|
|||||||
}
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_MITIGATION_RETHUNK) && defined(CONFIG_OBJTOOL)
|
||||||
|
extern bool cpu_wants_rethunk(void);
|
||||||
|
extern bool cpu_wants_rethunk_at(void *addr);
|
||||||
|
#else
|
||||||
|
static __always_inline bool cpu_wants_rethunk(void)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
static __always_inline bool cpu_wants_rethunk_at(void *addr)
|
||||||
|
{
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
extern void alternatives_smp_module_add(struct module *mod, char *name,
|
extern void alternatives_smp_module_add(struct module *mod, char *name,
|
||||||
void *locks, void *locks_end,
|
void *locks, void *locks_end,
|
||||||
|
@ -53,9 +53,11 @@
|
|||||||
#define X86_CENTAUR_FAM6_C7_D 0xd
|
#define X86_CENTAUR_FAM6_C7_D 0xd
|
||||||
#define X86_CENTAUR_FAM6_NANO 0xf
|
#define X86_CENTAUR_FAM6_NANO 0xf
|
||||||
|
|
||||||
#define X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
|
/* x86_cpu_id::flags */
|
||||||
|
#define X86_CPU_ID_FLAG_ENTRY_VALID BIT(0)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE - Base macro for CPU matching
|
* X86_MATCH_CPU - Base macro for CPU matching
|
||||||
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
* @_vendor: The vendor name, e.g. INTEL, AMD, HYGON, ..., ANY
|
||||||
* The name is expanded to X86_VENDOR_@_vendor
|
* The name is expanded to X86_VENDOR_@_vendor
|
||||||
* @_family: The family number or X86_FAMILY_ANY
|
* @_family: The family number or X86_FAMILY_ANY
|
||||||
@ -72,23 +74,14 @@
|
|||||||
* into another macro at the usage site for good reasons, then please
|
* into another macro at the usage site for good reasons, then please
|
||||||
* start this local macro with X86_MATCH to allow easy grepping.
|
* start this local macro with X86_MATCH to allow easy grepping.
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
|
#define X86_MATCH_CPU(_vendor, _family, _model, _steppings, _feature, _type, _data) { \
|
||||||
_steppings, _feature, _data) { \
|
|
||||||
.vendor = X86_VENDOR_##_vendor, \
|
|
||||||
.family = _family, \
|
|
||||||
.model = _model, \
|
|
||||||
.steppings = _steppings, \
|
|
||||||
.feature = _feature, \
|
|
||||||
.driver_data = (unsigned long) _data \
|
|
||||||
}
|
|
||||||
|
|
||||||
#define X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE(_vendor, _family, _model, \
|
|
||||||
_steppings, _feature, _data) { \
|
|
||||||
.vendor = _vendor, \
|
.vendor = _vendor, \
|
||||||
.family = _family, \
|
.family = _family, \
|
||||||
.model = _model, \
|
.model = _model, \
|
||||||
.steppings = _steppings, \
|
.steppings = _steppings, \
|
||||||
.feature = _feature, \
|
.feature = _feature, \
|
||||||
|
.flags = X86_CPU_ID_FLAG_ENTRY_VALID, \
|
||||||
|
.type = _type, \
|
||||||
.driver_data = (unsigned long) _data \
|
.driver_data = (unsigned long) _data \
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -107,8 +100,7 @@
|
|||||||
* set to wildcards.
|
* set to wildcards.
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
|
#define X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, feature, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(vendor, family, model, \
|
X86_MATCH_CPU(X86_VENDOR_##vendor, family, model, X86_STEPPING_ANY, feature, data)
|
||||||
X86_STEPPING_ANY, feature, data)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
|
* X86_MATCH_VENDOR_FAM_FEATURE - Macro for matching vendor, family and CPU feature
|
||||||
@ -119,13 +111,10 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is casted to unsigned long internally.
|
* etc. is casted to unsigned long internally.
|
||||||
*
|
|
||||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
|
||||||
* set to wildcards.
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
|
#define X86_MATCH_VENDOR_FAM_FEATURE(vendor, family, feature, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, \
|
X86_MATCH_CPU(X86_VENDOR_##vendor, family, X86_MODEL_ANY, \
|
||||||
X86_MODEL_ANY, feature, data)
|
X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
|
* X86_MATCH_VENDOR_FEATURE - Macro for matching vendor and CPU feature
|
||||||
@ -135,12 +124,10 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is casted to unsigned long internally.
|
* etc. is casted to unsigned long internally.
|
||||||
*
|
|
||||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
|
||||||
* set to wildcards.
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
|
#define X86_MATCH_VENDOR_FEATURE(vendor, feature, data) \
|
||||||
X86_MATCH_VENDOR_FAM_FEATURE(vendor, X86_FAMILY_ANY, feature, data)
|
X86_MATCH_CPU(X86_VENDOR_##vendor, X86_FAMILY_ANY, X86_MODEL_ANY, \
|
||||||
|
X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_FEATURE - Macro for matching a CPU feature
|
* X86_MATCH_FEATURE - Macro for matching a CPU feature
|
||||||
@ -148,12 +135,10 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is casted to unsigned long internally.
|
* etc. is casted to unsigned long internally.
|
||||||
*
|
|
||||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
|
||||||
* set to wildcards.
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_FEATURE(feature, data) \
|
#define X86_MATCH_FEATURE(feature, data) \
|
||||||
X86_MATCH_VENDOR_FEATURE(ANY, feature, data)
|
X86_MATCH_CPU(X86_VENDOR_ANY, X86_FAMILY_ANY, X86_MODEL_ANY, \
|
||||||
|
X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
|
* X86_MATCH_VENDOR_FAM_MODEL - Match vendor, family and model
|
||||||
@ -164,13 +149,10 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is casted to unsigned long internally.
|
* etc. is casted to unsigned long internally.
|
||||||
*
|
|
||||||
* All other missing arguments of X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
|
||||||
* set to wildcards.
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
|
#define X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(vendor, family, model, \
|
X86_MATCH_CPU(X86_VENDOR_##vendor, family, model, X86_STEPPING_ANY, \
|
||||||
X86_FEATURE_ANY, data)
|
X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VENDOR_FAM - Match vendor and family
|
* X86_MATCH_VENDOR_FAM - Match vendor and family
|
||||||
@ -180,12 +162,10 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is casted to unsigned long internally.
|
* etc. is casted to unsigned long internally.
|
||||||
*
|
|
||||||
* All other missing arguments to X86_MATCH_VENDOR_FAM_MODEL_FEATURE() are
|
|
||||||
* set of wildcards.
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
|
#define X86_MATCH_VENDOR_FAM(vendor, family, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, X86_MODEL_ANY, data)
|
X86_MATCH_CPU(X86_VENDOR_##vendor, family, X86_MODEL_ANY, \
|
||||||
|
X86_STEPPING_ANY, X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
|
* X86_MATCH_INTEL_FAM6_MODEL - Match vendor INTEL, family 6 and model
|
||||||
@ -204,8 +184,8 @@
|
|||||||
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
|
X86_MATCH_VENDOR_FAM_MODEL(INTEL, 6, INTEL_FAM6_##model, data)
|
||||||
|
|
||||||
#define X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(model, steppings, data) \
|
#define X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(model, steppings, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
X86_MATCH_CPU(X86_VENDOR_INTEL, 6, INTEL_FAM6_##model, \
|
||||||
steppings, X86_FEATURE_ANY, data)
|
steppings, X86_FEATURE_ANY, data)
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VFM - Match encoded vendor/family/model
|
* X86_MATCH_VFM - Match encoded vendor/family/model
|
||||||
@ -213,32 +193,26 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is cast to unsigned long internally.
|
* etc. is cast to unsigned long internally.
|
||||||
*
|
|
||||||
* Stepping and feature are set to wildcards
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VFM(vfm, data) \
|
#define X86_MATCH_VFM(vfm, data) \
|
||||||
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
|
X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
|
||||||
VFM_VENDOR(vfm), \
|
X86_STEPPING_ANY, X86_FEATURE_ANY, X86_CPU_TYPE_ANY, data)
|
||||||
VFM_FAMILY(vfm), \
|
|
||||||
VFM_MODEL(vfm), \
|
|
||||||
X86_STEPPING_ANY, X86_FEATURE_ANY, data)
|
|
||||||
|
|
||||||
|
#define __X86_STEPPINGS(mins, maxs) GENMASK(maxs, mins)
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VFM_STEPPINGS - Match encoded vendor/family/model/stepping
|
* X86_MATCH_VFM_STEPS - Match encoded vendor/family/model and steppings
|
||||||
|
* range.
|
||||||
* @vfm: Encoded 8-bits each for vendor, family, model
|
* @vfm: Encoded 8-bits each for vendor, family, model
|
||||||
* @steppings: Bitmask of steppings to match
|
* @min_step: Lowest stepping number to match
|
||||||
|
* @max_step: Highest stepping number to match
|
||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is cast to unsigned long internally.
|
* etc. is cast to unsigned long internally.
|
||||||
*
|
|
||||||
* feature is set to wildcard
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VFM_STEPPINGS(vfm, steppings, data) \
|
#define X86_MATCH_VFM_STEPS(vfm, min_step, max_step, data) \
|
||||||
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
|
X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
|
||||||
VFM_VENDOR(vfm), \
|
__X86_STEPPINGS(min_step, max_step), X86_FEATURE_ANY, \
|
||||||
VFM_FAMILY(vfm), \
|
X86_CPU_TYPE_ANY, data)
|
||||||
VFM_MODEL(vfm), \
|
|
||||||
steppings, X86_FEATURE_ANY, data)
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
|
* X86_MATCH_VFM_FEATURE - Match encoded vendor/family/model/feature
|
||||||
@ -247,15 +221,22 @@
|
|||||||
* @data: Driver specific data or NULL. The internal storage
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
* format is unsigned long. The supplied value, pointer
|
* format is unsigned long. The supplied value, pointer
|
||||||
* etc. is cast to unsigned long internally.
|
* etc. is cast to unsigned long internally.
|
||||||
*
|
|
||||||
* Steppings is set to wildcard
|
|
||||||
*/
|
*/
|
||||||
#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
|
#define X86_MATCH_VFM_FEATURE(vfm, feature, data) \
|
||||||
X86_MATCH_VENDORID_FAM_MODEL_STEPPINGS_FEATURE( \
|
X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
|
||||||
VFM_VENDOR(vfm), \
|
X86_STEPPING_ANY, feature, X86_CPU_TYPE_ANY, data)
|
||||||
VFM_FAMILY(vfm), \
|
|
||||||
VFM_MODEL(vfm), \
|
/**
|
||||||
X86_STEPPING_ANY, feature, data)
|
* X86_MATCH_VFM_CPU_TYPE - Match encoded vendor/family/model/type
|
||||||
|
* @vfm: Encoded 8-bits each for vendor, family, model
|
||||||
|
* @type: CPU type e.g. P-core, E-core
|
||||||
|
* @data: Driver specific data or NULL. The internal storage
|
||||||
|
* format is unsigned long. The supplied value, pointer
|
||||||
|
* etc. is cast to unsigned long internally.
|
||||||
|
*/
|
||||||
|
#define X86_MATCH_VFM_CPU_TYPE(vfm, type, data) \
|
||||||
|
X86_MATCH_CPU(VFM_VENDOR(vfm), VFM_FAMILY(vfm), VFM_MODEL(vfm), \
|
||||||
|
X86_STEPPING_ANY, X86_FEATURE_ANY, type, data)
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Match specific microcode revisions.
|
* Match specific microcode revisions.
|
||||||
|
@ -479,7 +479,9 @@
|
|||||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
||||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
||||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
||||||
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* "" AMD Fast CPPC */
|
#define X86_FEATURE_AMD_FAST_CPPC (21*32 + 5) /* Fast CPPC */
|
||||||
|
#define X86_FEATURE_AMD_HETEROGENEOUS_CORES (21*32 + 6) /* Heterogeneous Core Topology */
|
||||||
|
#define X86_FEATURE_INDIRECT_THUNK_ITS (21*32 + 9) /* Use thunk for indirect branches in lower half of cacheline */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BUG word(s)
|
* BUG word(s)
|
||||||
@ -530,4 +532,6 @@
|
|||||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
||||||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
||||||
|
#define X86_BUG_ITS X86_BUG(1*32 + 6) /* "its" CPU is affected by Indirect Target Selection */
|
||||||
|
#define X86_BUG_ITS_NATIVE_ONLY X86_BUG(1*32 + 7) /* "its_native_only" CPU is affected by ITS, VMX is not affected */
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
@ -264,4 +264,10 @@
|
|||||||
/* Family 19 */
|
/* Family 19 */
|
||||||
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
|
#define INTEL_PANTHERCOVE_X IFM(19, 0x01) /* Diamond Rapids */
|
||||||
|
|
||||||
|
/* CPU core types */
|
||||||
|
enum intel_cpu_type {
|
||||||
|
INTEL_CPU_TYPE_ATOM = 0x20,
|
||||||
|
INTEL_CPU_TYPE_CORE = 0x40,
|
||||||
|
};
|
||||||
|
|
||||||
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
#endif /* _ASM_X86_INTEL_FAMILY_H */
|
||||||
|
@ -159,6 +159,10 @@
|
|||||||
* CPU is not affected by Branch
|
* CPU is not affected by Branch
|
||||||
* History Injection.
|
* History Injection.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
|
||||||
|
* IA32_XAPIC_DISABLE_STATUS MSR
|
||||||
|
* supported
|
||||||
|
*/
|
||||||
#define ARCH_CAP_PBRSB_NO BIT(24) /*
|
#define ARCH_CAP_PBRSB_NO BIT(24) /*
|
||||||
* Not susceptible to Post-Barrier
|
* Not susceptible to Post-Barrier
|
||||||
* Return Stack Buffer Predictions.
|
* Return Stack Buffer Predictions.
|
||||||
@ -180,11 +184,14 @@
|
|||||||
* VERW clears CPU Register
|
* VERW clears CPU Register
|
||||||
* File.
|
* File.
|
||||||
*/
|
*/
|
||||||
|
#define ARCH_CAP_ITS_NO BIT_ULL(62) /*
|
||||||
#define ARCH_CAP_XAPIC_DISABLE BIT(21) /*
|
* Not susceptible to
|
||||||
* IA32_XAPIC_DISABLE_STATUS MSR
|
* Indirect Target Selection.
|
||||||
* supported
|
* This bit is not set by
|
||||||
*/
|
* HW, but is synthesized by
|
||||||
|
* VMMs for guests to know
|
||||||
|
* their affected status.
|
||||||
|
*/
|
||||||
|
|
||||||
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
#define MSR_IA32_FLUSH_CMD 0x0000010b
|
||||||
#define L1D_FLUSH BIT(0) /*
|
#define L1D_FLUSH BIT(0) /*
|
||||||
|
@ -356,10 +356,14 @@
|
|||||||
".long 999b\n\t" \
|
".long 999b\n\t" \
|
||||||
".popsection\n\t"
|
".popsection\n\t"
|
||||||
|
|
||||||
|
#define ITS_THUNK_SIZE 64
|
||||||
|
|
||||||
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
typedef u8 retpoline_thunk_t[RETPOLINE_THUNK_SIZE];
|
||||||
|
typedef u8 its_thunk_t[ITS_THUNK_SIZE];
|
||||||
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
extern retpoline_thunk_t __x86_indirect_thunk_array[];
|
||||||
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
|
extern retpoline_thunk_t __x86_indirect_call_thunk_array[];
|
||||||
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
|
extern retpoline_thunk_t __x86_indirect_jump_thunk_array[];
|
||||||
|
extern its_thunk_t __x86_indirect_its_thunk_array[];
|
||||||
|
|
||||||
#ifdef CONFIG_MITIGATION_RETHUNK
|
#ifdef CONFIG_MITIGATION_RETHUNK
|
||||||
extern void __x86_return_thunk(void);
|
extern void __x86_return_thunk(void);
|
||||||
@ -383,6 +387,12 @@ static inline void srso_return_thunk(void) {}
|
|||||||
static inline void srso_alias_return_thunk(void) {}
|
static inline void srso_alias_return_thunk(void) {}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#ifdef CONFIG_MITIGATION_ITS
|
||||||
|
extern void its_return_thunk(void);
|
||||||
|
#else
|
||||||
|
static inline void its_return_thunk(void) {}
|
||||||
|
#endif
|
||||||
|
|
||||||
extern void retbleed_return_thunk(void);
|
extern void retbleed_return_thunk(void);
|
||||||
extern void srso_return_thunk(void);
|
extern void srso_return_thunk(void);
|
||||||
extern void srso_alias_return_thunk(void);
|
extern void srso_alias_return_thunk(void);
|
||||||
|
@ -113,6 +113,26 @@ struct cpuinfo_topology {
|
|||||||
u32 l2c_id;
|
u32 l2c_id;
|
||||||
};
|
};
|
||||||
|
|
||||||
|
struct cpuinfo_topology_rh {
|
||||||
|
// Hardware defined CPU-type
|
||||||
|
union {
|
||||||
|
u32 cpu_type;
|
||||||
|
struct {
|
||||||
|
// CPUID.1A.EAX[23-0]
|
||||||
|
u32 intel_native_model_id :24;
|
||||||
|
// CPUID.1A.EAX[31-24]
|
||||||
|
u32 intel_type :8;
|
||||||
|
};
|
||||||
|
struct {
|
||||||
|
// CPUID 0x80000026.EBX
|
||||||
|
u32 amd_num_processors :16,
|
||||||
|
amd_power_eff_ranking :8,
|
||||||
|
amd_native_model_id :4,
|
||||||
|
amd_type :4;
|
||||||
|
};
|
||||||
|
};
|
||||||
|
};
|
||||||
|
|
||||||
struct cpuinfo_x86 {
|
struct cpuinfo_x86 {
|
||||||
union {
|
union {
|
||||||
/*
|
/*
|
||||||
@ -179,7 +199,8 @@ struct cpuinfo_x86 {
|
|||||||
/* Address space bits used by the cache internally */
|
/* Address space bits used by the cache internally */
|
||||||
u8 x86_cache_bits;
|
u8 x86_cache_bits;
|
||||||
unsigned initialized : 1;
|
unsigned initialized : 1;
|
||||||
RH_KABI_RESERVE(1)
|
|
||||||
|
RH_KABI_USE(1, struct cpuinfo_topology_rh topo_rh)
|
||||||
RH_KABI_RESERVE(2)
|
RH_KABI_RESERVE(2)
|
||||||
RH_KABI_RESERVE(3)
|
RH_KABI_RESERVE(3)
|
||||||
RH_KABI_RESERVE(4)
|
RH_KABI_RESERVE(4)
|
||||||
|
@ -114,6 +114,12 @@ enum x86_topology_domains {
|
|||||||
TOPO_MAX_DOMAIN,
|
TOPO_MAX_DOMAIN,
|
||||||
};
|
};
|
||||||
|
|
||||||
|
enum x86_topology_cpu_type {
|
||||||
|
TOPO_CPU_TYPE_PERFORMANCE,
|
||||||
|
TOPO_CPU_TYPE_EFFICIENCY,
|
||||||
|
TOPO_CPU_TYPE_UNKNOWN,
|
||||||
|
};
|
||||||
|
|
||||||
struct x86_topology_system {
|
struct x86_topology_system {
|
||||||
unsigned int dom_shifts[TOPO_MAX_DOMAIN];
|
unsigned int dom_shifts[TOPO_MAX_DOMAIN];
|
||||||
unsigned int dom_size[TOPO_MAX_DOMAIN];
|
unsigned int dom_size[TOPO_MAX_DOMAIN];
|
||||||
@ -144,6 +150,9 @@ extern unsigned int __max_threads_per_core;
|
|||||||
extern unsigned int __num_threads_per_package;
|
extern unsigned int __num_threads_per_package;
|
||||||
extern unsigned int __num_cores_per_package;
|
extern unsigned int __num_cores_per_package;
|
||||||
|
|
||||||
|
const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c);
|
||||||
|
enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c);
|
||||||
|
|
||||||
static inline unsigned int topology_max_packages(void)
|
static inline unsigned int topology_max_packages(void)
|
||||||
{
|
{
|
||||||
return __max_logical_packages;
|
return __max_logical_packages;
|
||||||
|
@ -31,6 +31,7 @@
|
|||||||
#include <asm/paravirt.h>
|
#include <asm/paravirt.h>
|
||||||
#include <asm/asm-prototypes.h>
|
#include <asm/asm-prototypes.h>
|
||||||
#include <asm/cfi.h>
|
#include <asm/cfi.h>
|
||||||
|
#include <asm/ibt.h>
|
||||||
|
|
||||||
int __read_mostly alternatives_patched;
|
int __read_mostly alternatives_patched;
|
||||||
|
|
||||||
@ -522,7 +523,8 @@ static int emit_indirect(int op, int reg, u8 *bytes)
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
static int __emit_trampoline(void *addr, struct insn *insn, u8 *bytes,
|
||||||
|
void *call_dest, void *jmp_dest)
|
||||||
{
|
{
|
||||||
u8 op = insn->opcode.bytes[0];
|
u8 op = insn->opcode.bytes[0];
|
||||||
int i = 0;
|
int i = 0;
|
||||||
@ -543,7 +545,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
|
|||||||
switch (op) {
|
switch (op) {
|
||||||
case CALL_INSN_OPCODE:
|
case CALL_INSN_OPCODE:
|
||||||
__text_gen_insn(bytes+i, op, addr+i,
|
__text_gen_insn(bytes+i, op, addr+i,
|
||||||
__x86_indirect_call_thunk_array[reg],
|
call_dest,
|
||||||
CALL_INSN_SIZE);
|
CALL_INSN_SIZE);
|
||||||
i += CALL_INSN_SIZE;
|
i += CALL_INSN_SIZE;
|
||||||
break;
|
break;
|
||||||
@ -551,7 +553,7 @@ static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8
|
|||||||
case JMP32_INSN_OPCODE:
|
case JMP32_INSN_OPCODE:
|
||||||
clang_jcc:
|
clang_jcc:
|
||||||
__text_gen_insn(bytes+i, op, addr+i,
|
__text_gen_insn(bytes+i, op, addr+i,
|
||||||
__x86_indirect_jump_thunk_array[reg],
|
jmp_dest,
|
||||||
JMP32_INSN_SIZE);
|
JMP32_INSN_SIZE);
|
||||||
i += JMP32_INSN_SIZE;
|
i += JMP32_INSN_SIZE;
|
||||||
break;
|
break;
|
||||||
@ -566,6 +568,35 @@ clang_jcc:
|
|||||||
return i;
|
return i;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static int emit_call_track_retpoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||||
|
{
|
||||||
|
return __emit_trampoline(addr, insn, bytes,
|
||||||
|
__x86_indirect_call_thunk_array[reg],
|
||||||
|
__x86_indirect_jump_thunk_array[reg]);
|
||||||
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MITIGATION_ITS
|
||||||
|
static int emit_its_trampoline(void *addr, struct insn *insn, int reg, u8 *bytes)
|
||||||
|
{
|
||||||
|
return __emit_trampoline(addr, insn, bytes,
|
||||||
|
__x86_indirect_its_thunk_array[reg],
|
||||||
|
__x86_indirect_its_thunk_array[reg]);
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Check if an indirect branch is at ITS-unsafe address */
|
||||||
|
static bool cpu_wants_indirect_its_thunk_at(unsigned long addr, int reg)
|
||||||
|
{
|
||||||
|
if (!cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* Indirect branch opcode is 2 or 3 bytes depending on reg */
|
||||||
|
addr += 1 + reg / 8;
|
||||||
|
|
||||||
|
/* Lower-half of the cacheline? */
|
||||||
|
return !(addr & 0x20);
|
||||||
|
}
|
||||||
|
#endif
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rewrite the compiler generated retpoline thunk calls.
|
* Rewrite the compiler generated retpoline thunk calls.
|
||||||
*
|
*
|
||||||
@ -640,6 +671,15 @@ static int patch_retpoline(void *addr, struct insn *insn, u8 *bytes)
|
|||||||
bytes[i++] = 0xe8; /* LFENCE */
|
bytes[i++] = 0xe8; /* LFENCE */
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#ifdef CONFIG_MITIGATION_ITS
|
||||||
|
/*
|
||||||
|
* Check if the address of last byte of emitted-indirect is in
|
||||||
|
* lower-half of the cacheline. Such branches need ITS mitigation.
|
||||||
|
*/
|
||||||
|
if (cpu_wants_indirect_its_thunk_at((unsigned long)addr + i, reg))
|
||||||
|
return emit_its_trampoline(addr, insn, reg, bytes);
|
||||||
|
#endif
|
||||||
|
|
||||||
ret = emit_indirect(op, reg, bytes + i);
|
ret = emit_indirect(op, reg, bytes + i);
|
||||||
if (ret < 0)
|
if (ret < 0)
|
||||||
return ret;
|
return ret;
|
||||||
@ -711,6 +751,21 @@ void __init_or_module noinline apply_retpolines(s32 *start, s32 *end)
|
|||||||
|
|
||||||
#ifdef CONFIG_MITIGATION_RETHUNK
|
#ifdef CONFIG_MITIGATION_RETHUNK
|
||||||
|
|
||||||
|
bool cpu_wants_rethunk(void)
|
||||||
|
{
|
||||||
|
return cpu_feature_enabled(X86_FEATURE_RETHUNK);
|
||||||
|
}
|
||||||
|
|
||||||
|
bool cpu_wants_rethunk_at(void *addr)
|
||||||
|
{
|
||||||
|
if (!cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
||||||
|
return false;
|
||||||
|
if (x86_return_thunk != its_return_thunk)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return !((unsigned long)addr & 0x20);
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Rewrite the compiler generated return thunk tail-calls.
|
* Rewrite the compiler generated return thunk tail-calls.
|
||||||
*
|
*
|
||||||
@ -727,7 +782,7 @@ static int patch_return(void *addr, struct insn *insn, u8 *bytes)
|
|||||||
int i = 0;
|
int i = 0;
|
||||||
|
|
||||||
/* Patch the custom return thunks... */
|
/* Patch the custom return thunks... */
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
if (cpu_wants_rethunk_at(addr)) {
|
||||||
i = JMP32_INSN_SIZE;
|
i = JMP32_INSN_SIZE;
|
||||||
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
|
__text_gen_insn(bytes, JMP32_INSN_OPCODE, addr, x86_return_thunk, i);
|
||||||
} else {
|
} else {
|
||||||
@ -744,7 +799,7 @@ void __init_or_module noinline apply_returns(s32 *start, s32 *end)
|
|||||||
{
|
{
|
||||||
s32 *s;
|
s32 *s;
|
||||||
|
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
if (cpu_wants_rethunk())
|
||||||
static_call_force_reinit();
|
static_call_force_reinit();
|
||||||
|
|
||||||
for (s = start; s < end; s++) {
|
for (s = start; s < end; s++) {
|
||||||
@ -1648,6 +1703,8 @@ static noinline void __init alt_reloc_selftest(void)
|
|||||||
|
|
||||||
void __init alternative_instructions(void)
|
void __init alternative_instructions(void)
|
||||||
{
|
{
|
||||||
|
u64 ibt;
|
||||||
|
|
||||||
int3_selftest();
|
int3_selftest();
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1691,6 +1748,9 @@ void __init alternative_instructions(void)
|
|||||||
*/
|
*/
|
||||||
apply_paravirt(__parainstructions, __parainstructions_end);
|
apply_paravirt(__parainstructions, __parainstructions_end);
|
||||||
|
|
||||||
|
/* Keep CET-IBT disabled until caller/callee are patched */
|
||||||
|
ibt = ibt_save(/*disable*/ true);
|
||||||
|
|
||||||
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
|
__apply_fineibt(__retpoline_sites, __retpoline_sites_end,
|
||||||
__cfi_sites, __cfi_sites_end, true);
|
__cfi_sites, __cfi_sites_end, true);
|
||||||
|
|
||||||
@ -1718,6 +1778,8 @@ void __init alternative_instructions(void)
|
|||||||
*/
|
*/
|
||||||
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
apply_seal_endbr(__ibt_endbr_seal, __ibt_endbr_seal_end);
|
||||||
|
|
||||||
|
ibt_restore(ibt);
|
||||||
|
|
||||||
#ifdef CONFIG_SMP
|
#ifdef CONFIG_SMP
|
||||||
/* Patch to UP if other cpus not imminent. */
|
/* Patch to UP if other cpus not imminent. */
|
||||||
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
|
if (!noreplace_smp && (num_present_cpus() == 1 || setup_max_cpus <= 1)) {
|
||||||
|
@ -509,32 +509,32 @@ static struct clock_event_device lapic_clockevent = {
|
|||||||
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
static DEFINE_PER_CPU(struct clock_event_device, lapic_events);
|
||||||
|
|
||||||
static const struct x86_cpu_id deadline_match[] __initconst = {
|
static const struct x86_cpu_id deadline_match[] __initconst = {
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x2, 0x2), 0x3a), /* EP */
|
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x2, 0x2, 0x3a), /* EP */
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(HASWELL_X, X86_STEPPINGS(0x4, 0x4), 0x0f), /* EX */
|
X86_MATCH_VFM_STEPS(INTEL_HASWELL_X, 0x4, 0x4, 0x0f), /* EX */
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_X, 0x0b000020),
|
X86_MATCH_VFM(INTEL_BROADWELL_X, 0x0b000020),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x2, 0x2), 0x00000011),
|
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x2, 0x2, 0x00000011),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x3, 0x3), 0x0700000e),
|
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x3, 0x3, 0x0700000e),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x4, 0x4), 0x0f00000c),
|
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x4, 0x4, 0x0f00000c),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(BROADWELL_D, X86_STEPPINGS(0x5, 0x5), 0x0e000003),
|
X86_MATCH_VFM_STEPS(INTEL_BROADWELL_D, 0x5, 0x5, 0x0e000003),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x3, 0x3), 0x01000136),
|
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x3, 0x3, 0x01000136),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x4, 0x4), 0x02000014),
|
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x4, 0x4, 0x02000014),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x5, 0xf), 0),
|
X86_MATCH_VFM_STEPS(INTEL_SKYLAKE_X, 0x5, 0xf, 0),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL, 0x22),
|
X86_MATCH_VFM(INTEL_HASWELL, 0x22),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_L, 0x20),
|
X86_MATCH_VFM(INTEL_HASWELL_L, 0x20),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( HASWELL_G, 0x17),
|
X86_MATCH_VFM(INTEL_HASWELL_G, 0x17),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL, 0x25),
|
X86_MATCH_VFM(INTEL_BROADWELL, 0x25),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( BROADWELL_G, 0x17),
|
X86_MATCH_VFM(INTEL_BROADWELL_G, 0x17),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE_L, 0xb2),
|
X86_MATCH_VFM(INTEL_SKYLAKE_L, 0xb2),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( SKYLAKE, 0xb2),
|
X86_MATCH_VFM(INTEL_SKYLAKE, 0xb2),
|
||||||
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE_L, 0x52),
|
X86_MATCH_VFM(INTEL_KABYLAKE_L, 0x52),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL( KABYLAKE, 0x52),
|
X86_MATCH_VFM(INTEL_KABYLAKE, 0x52),
|
||||||
|
|
||||||
{},
|
{},
|
||||||
};
|
};
|
||||||
|
@ -124,25 +124,24 @@ static bool __init slv_set_max_freq_ratio(u64 *base_freq, u64 *turbo_freq)
|
|||||||
return true;
|
return true;
|
||||||
}
|
}
|
||||||
|
|
||||||
#define X86_MATCH(model) \
|
#define X86_MATCH(vfm) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
|
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_APERFMPERF, NULL)
|
||||||
INTEL_FAM6_##model, X86_FEATURE_APERFMPERF, NULL)
|
|
||||||
|
|
||||||
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
|
static const struct x86_cpu_id has_knl_turbo_ratio_limits[] __initconst = {
|
||||||
X86_MATCH(XEON_PHI_KNL),
|
X86_MATCH(INTEL_XEON_PHI_KNL),
|
||||||
X86_MATCH(XEON_PHI_KNM),
|
X86_MATCH(INTEL_XEON_PHI_KNM),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
|
static const struct x86_cpu_id has_skx_turbo_ratio_limits[] __initconst = {
|
||||||
X86_MATCH(SKYLAKE_X),
|
X86_MATCH(INTEL_SKYLAKE_X),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
|
static const struct x86_cpu_id has_glm_turbo_ratio_limits[] __initconst = {
|
||||||
X86_MATCH(ATOM_GOLDMONT),
|
X86_MATCH(INTEL_ATOM_GOLDMONT),
|
||||||
X86_MATCH(ATOM_GOLDMONT_D),
|
X86_MATCH(INTEL_ATOM_GOLDMONT_D),
|
||||||
X86_MATCH(ATOM_GOLDMONT_PLUS),
|
X86_MATCH(INTEL_ATOM_GOLDMONT_PLUS),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -50,6 +50,7 @@ static void __init srbds_select_mitigation(void);
|
|||||||
static void __init l1d_flush_select_mitigation(void);
|
static void __init l1d_flush_select_mitigation(void);
|
||||||
static void __init srso_select_mitigation(void);
|
static void __init srso_select_mitigation(void);
|
||||||
static void __init gds_select_mitigation(void);
|
static void __init gds_select_mitigation(void);
|
||||||
|
static void __init its_select_mitigation(void);
|
||||||
|
|
||||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||||
u64 x86_spec_ctrl_base;
|
u64 x86_spec_ctrl_base;
|
||||||
@ -68,6 +69,14 @@ static DEFINE_MUTEX(spec_ctrl_mutex);
|
|||||||
|
|
||||||
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
void (*x86_return_thunk)(void) __ro_after_init = __x86_return_thunk;
|
||||||
|
|
||||||
|
static void __init set_return_thunk(void *thunk)
|
||||||
|
{
|
||||||
|
if (x86_return_thunk != __x86_return_thunk)
|
||||||
|
pr_warn("x86/bugs: return thunk changed\n");
|
||||||
|
|
||||||
|
x86_return_thunk = thunk;
|
||||||
|
}
|
||||||
|
|
||||||
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
/* Update SPEC_CTRL MSR and its cached copy unconditionally */
|
||||||
static void update_spec_ctrl(u64 val)
|
static void update_spec_ctrl(u64 val)
|
||||||
{
|
{
|
||||||
@ -176,6 +185,7 @@ void __init cpu_select_mitigations(void)
|
|||||||
*/
|
*/
|
||||||
srso_select_mitigation();
|
srso_select_mitigation();
|
||||||
gds_select_mitigation();
|
gds_select_mitigation();
|
||||||
|
its_select_mitigation();
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
@ -1109,7 +1119,7 @@ do_cmd_auto:
|
|||||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||||
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
setup_force_cpu_cap(X86_FEATURE_UNRET);
|
||||||
|
|
||||||
x86_return_thunk = retbleed_return_thunk;
|
set_return_thunk(retbleed_return_thunk);
|
||||||
|
|
||||||
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
|
||||||
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)
|
||||||
@ -1128,7 +1138,7 @@ do_cmd_auto:
|
|||||||
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||||
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
||||||
|
|
||||||
x86_return_thunk = call_depth_return_thunk;
|
set_return_thunk(call_depth_return_thunk);
|
||||||
break;
|
break;
|
||||||
|
|
||||||
default:
|
default:
|
||||||
@ -1162,6 +1172,145 @@ do_cmd_auto:
|
|||||||
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
|
pr_info("%s\n", retbleed_strings[retbleed_mitigation]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#undef pr_fmt
|
||||||
|
#define pr_fmt(fmt) "ITS: " fmt
|
||||||
|
|
||||||
|
enum its_mitigation_cmd {
|
||||||
|
ITS_CMD_OFF,
|
||||||
|
ITS_CMD_ON,
|
||||||
|
ITS_CMD_VMEXIT,
|
||||||
|
ITS_CMD_RSB_STUFF,
|
||||||
|
};
|
||||||
|
|
||||||
|
enum its_mitigation {
|
||||||
|
ITS_MITIGATION_OFF,
|
||||||
|
ITS_MITIGATION_VMEXIT_ONLY,
|
||||||
|
ITS_MITIGATION_ALIGNED_THUNKS,
|
||||||
|
ITS_MITIGATION_RETPOLINE_STUFF,
|
||||||
|
};
|
||||||
|
|
||||||
|
static const char * const its_strings[] = {
|
||||||
|
[ITS_MITIGATION_OFF] = "Vulnerable",
|
||||||
|
[ITS_MITIGATION_VMEXIT_ONLY] = "Mitigation: Vulnerable, KVM: Not affected",
|
||||||
|
[ITS_MITIGATION_ALIGNED_THUNKS] = "Mitigation: Aligned branch/return thunks",
|
||||||
|
[ITS_MITIGATION_RETPOLINE_STUFF] = "Mitigation: Retpolines, Stuffing RSB",
|
||||||
|
};
|
||||||
|
|
||||||
|
static enum its_mitigation its_mitigation __ro_after_init = ITS_MITIGATION_ALIGNED_THUNKS;
|
||||||
|
|
||||||
|
static enum its_mitigation_cmd its_cmd __ro_after_init =
|
||||||
|
IS_ENABLED(CONFIG_MITIGATION_ITS) ? ITS_CMD_ON : ITS_CMD_OFF;
|
||||||
|
|
||||||
|
static int __init its_parse_cmdline(char *str)
|
||||||
|
{
|
||||||
|
if (!str)
|
||||||
|
return -EINVAL;
|
||||||
|
|
||||||
|
if (!IS_ENABLED(CONFIG_MITIGATION_ITS)) {
|
||||||
|
pr_err("Mitigation disabled at compile time, ignoring option (%s)", str);
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (!strcmp(str, "off")) {
|
||||||
|
its_cmd = ITS_CMD_OFF;
|
||||||
|
} else if (!strcmp(str, "on")) {
|
||||||
|
its_cmd = ITS_CMD_ON;
|
||||||
|
} else if (!strcmp(str, "force")) {
|
||||||
|
its_cmd = ITS_CMD_ON;
|
||||||
|
setup_force_cpu_bug(X86_BUG_ITS);
|
||||||
|
} else if (!strcmp(str, "vmexit")) {
|
||||||
|
its_cmd = ITS_CMD_VMEXIT;
|
||||||
|
} else if (!strcmp(str, "stuff")) {
|
||||||
|
its_cmd = ITS_CMD_RSB_STUFF;
|
||||||
|
} else {
|
||||||
|
pr_err("Ignoring unknown indirect_target_selection option (%s).", str);
|
||||||
|
}
|
||||||
|
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
early_param("indirect_target_selection", its_parse_cmdline);
|
||||||
|
|
||||||
|
static void __init its_select_mitigation(void)
|
||||||
|
{
|
||||||
|
enum its_mitigation_cmd cmd = its_cmd;
|
||||||
|
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_ITS) || cpu_mitigations_off()) {
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Retpoline+CDT mitigates ITS, bail out */
|
||||||
|
if (boot_cpu_has(X86_FEATURE_RETPOLINE) &&
|
||||||
|
boot_cpu_has(X86_FEATURE_CALL_DEPTH)) {
|
||||||
|
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* Exit early to avoid irrelevant warnings */
|
||||||
|
if (cmd == ITS_CMD_OFF) {
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (spectre_v2_enabled == SPECTRE_V2_NONE) {
|
||||||
|
pr_err("WARNING: Spectre-v2 mitigation is off, disabling ITS\n");
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (!IS_ENABLED(CONFIG_MITIGATION_RETPOLINE) ||
|
||||||
|
!IS_ENABLED(CONFIG_MITIGATION_RETHUNK)) {
|
||||||
|
pr_err("WARNING: ITS mitigation depends on retpoline and rethunk support\n");
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (IS_ENABLED(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)) {
|
||||||
|
pr_err("WARNING: ITS mitigation is not compatible with CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B\n");
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
if (boot_cpu_has(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||||
|
pr_err("WARNING: ITS mitigation is not compatible with lfence mitigation\n");
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
|
||||||
|
if (cmd == ITS_CMD_RSB_STUFF &&
|
||||||
|
(!boot_cpu_has(X86_FEATURE_RETPOLINE) || !IS_ENABLED(CONFIG_MITIGATION_CALL_DEPTH_TRACKING))) {
|
||||||
|
pr_err("RSB stuff mitigation not supported, using default\n");
|
||||||
|
cmd = ITS_CMD_ON;
|
||||||
|
}
|
||||||
|
|
||||||
|
switch (cmd) {
|
||||||
|
case ITS_CMD_OFF:
|
||||||
|
its_mitigation = ITS_MITIGATION_OFF;
|
||||||
|
break;
|
||||||
|
case ITS_CMD_VMEXIT:
|
||||||
|
if (boot_cpu_has_bug(X86_BUG_ITS_NATIVE_ONLY)) {
|
||||||
|
its_mitigation = ITS_MITIGATION_VMEXIT_ONLY;
|
||||||
|
goto out;
|
||||||
|
}
|
||||||
|
fallthrough;
|
||||||
|
case ITS_CMD_ON:
|
||||||
|
its_mitigation = ITS_MITIGATION_ALIGNED_THUNKS;
|
||||||
|
if (!boot_cpu_has(X86_FEATURE_RETPOLINE))
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_INDIRECT_THUNK_ITS);
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||||
|
set_return_thunk(its_return_thunk);
|
||||||
|
break;
|
||||||
|
case ITS_CMD_RSB_STUFF:
|
||||||
|
its_mitigation = ITS_MITIGATION_RETPOLINE_STUFF;
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_RETHUNK);
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_CALL_DEPTH);
|
||||||
|
set_return_thunk(call_depth_return_thunk);
|
||||||
|
if (retbleed_mitigation == RETBLEED_MITIGATION_NONE) {
|
||||||
|
retbleed_mitigation = RETBLEED_MITIGATION_STUFF;
|
||||||
|
pr_info("Retbleed mitigation updated to stuffing\n");
|
||||||
|
}
|
||||||
|
break;
|
||||||
|
}
|
||||||
|
out:
|
||||||
|
pr_info("%s\n", its_strings[its_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
#undef pr_fmt
|
#undef pr_fmt
|
||||||
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
#define pr_fmt(fmt) "Spectre V2 : " fmt
|
||||||
|
|
||||||
@ -1632,6 +1781,7 @@ static bool __init spec_ctrl_bhi_dis(void)
|
|||||||
enum bhi_mitigations {
|
enum bhi_mitigations {
|
||||||
BHI_MITIGATION_OFF,
|
BHI_MITIGATION_OFF,
|
||||||
BHI_MITIGATION_ON,
|
BHI_MITIGATION_ON,
|
||||||
|
BHI_MITIGATION_VMEXIT_ONLY,
|
||||||
};
|
};
|
||||||
|
|
||||||
static enum bhi_mitigations bhi_mitigation __ro_after_init =
|
static enum bhi_mitigations bhi_mitigation __ro_after_init =
|
||||||
@ -1646,6 +1796,8 @@ static int __init spectre_bhi_parse_cmdline(char *str)
|
|||||||
bhi_mitigation = BHI_MITIGATION_OFF;
|
bhi_mitigation = BHI_MITIGATION_OFF;
|
||||||
else if (!strcmp(str, "on"))
|
else if (!strcmp(str, "on"))
|
||||||
bhi_mitigation = BHI_MITIGATION_ON;
|
bhi_mitigation = BHI_MITIGATION_ON;
|
||||||
|
else if (!strcmp(str, "vmexit"))
|
||||||
|
bhi_mitigation = BHI_MITIGATION_VMEXIT_ONLY;
|
||||||
else
|
else
|
||||||
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
|
pr_err("Ignoring unknown spectre_bhi option (%s)", str);
|
||||||
|
|
||||||
@ -1666,19 +1818,22 @@ static void __init bhi_select_mitigation(void)
|
|||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (spec_ctrl_bhi_dis())
|
|
||||||
return;
|
|
||||||
|
|
||||||
if (!IS_ENABLED(CONFIG_X86_64))
|
if (!IS_ENABLED(CONFIG_X86_64))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
/* Mitigate KVM by default */
|
/* Mitigate in hardware if supported */
|
||||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
|
if (spec_ctrl_bhi_dis())
|
||||||
pr_info("Spectre BHI mitigation: SW BHB clearing on vm exit\n");
|
return;
|
||||||
|
|
||||||
/* Mitigate syscalls when the mitigation is forced =on */
|
if (bhi_mitigation == BHI_MITIGATION_VMEXIT_ONLY) {
|
||||||
|
pr_info("Spectre BHI mitigation: SW BHB clearing on VM exit only\n");
|
||||||
|
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall and VM exit\n");
|
||||||
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
|
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP);
|
||||||
pr_info("Spectre BHI mitigation: SW BHB clearing on syscall\n");
|
setup_force_cpu_cap(X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void __init spectre_v2_select_mitigation(void)
|
static void __init spectre_v2_select_mitigation(void)
|
||||||
@ -2687,10 +2842,10 @@ static void __init srso_select_mitigation(void)
|
|||||||
|
|
||||||
if (boot_cpu_data.x86 == 0x19) {
|
if (boot_cpu_data.x86 == 0x19) {
|
||||||
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
|
setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
|
||||||
x86_return_thunk = srso_alias_return_thunk;
|
set_return_thunk(srso_alias_return_thunk);
|
||||||
} else {
|
} else {
|
||||||
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
setup_force_cpu_cap(X86_FEATURE_SRSO);
|
||||||
x86_return_thunk = srso_return_thunk;
|
set_return_thunk(srso_return_thunk);
|
||||||
}
|
}
|
||||||
if (has_microcode)
|
if (has_microcode)
|
||||||
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
srso_mitigation = SRSO_MITIGATION_SAFE_RET;
|
||||||
@ -2840,6 +2995,11 @@ static ssize_t rfds_show_state(char *buf)
|
|||||||
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
|
return sysfs_emit(buf, "%s\n", rfds_strings[rfds_mitigation]);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static ssize_t its_show_state(char *buf)
|
||||||
|
{
|
||||||
|
return sysfs_emit(buf, "%s\n", its_strings[its_mitigation]);
|
||||||
|
}
|
||||||
|
|
||||||
static char *stibp_state(void)
|
static char *stibp_state(void)
|
||||||
{
|
{
|
||||||
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
if (spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||||
@ -3022,6 +3182,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
|||||||
case X86_BUG_RFDS:
|
case X86_BUG_RFDS:
|
||||||
return rfds_show_state(buf);
|
return rfds_show_state(buf);
|
||||||
|
|
||||||
|
case X86_BUG_ITS:
|
||||||
|
return its_show_state(buf);
|
||||||
|
|
||||||
default:
|
default:
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
@ -3101,6 +3264,11 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
|
|||||||
{
|
{
|
||||||
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
ssize_t cpu_show_indirect_target_selection(struct device *dev, struct device_attribute *attr, char *buf)
|
||||||
|
{
|
||||||
|
return cpu_show_common(dev, attr, buf, X86_BUG_ITS);
|
||||||
|
}
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
void __warn_thunk(void)
|
void __warn_thunk(void)
|
||||||
|
@ -111,17 +111,17 @@ static const struct x86_cpu_id ppin_cpuids[] = {
|
|||||||
X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_FEATURE(X86_FEATURE_INTEL_PPIN, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
|
|
||||||
/* Legacy models without CPUID enumeration */
|
/* Legacy models without CPUID enumeration */
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_IVYBRIDGE_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_HASWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_BROADWELL_D, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_BROADWELL_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_SKYLAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_ICELAKE_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_ICELAKE_D, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNL, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL(XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
|
X86_MATCH_VFM(INTEL_XEON_PHI_KNM, &ppin_info[X86_VENDOR_INTEL]),
|
||||||
|
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
@ -1171,8 +1171,8 @@ static void identify_cpu_without_cpuid(struct cpuinfo_x86 *c)
|
|||||||
#define VULNWL(vendor, family, model, whitelist) \
|
#define VULNWL(vendor, family, model, whitelist) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, whitelist)
|
||||||
|
|
||||||
#define VULNWL_INTEL(model, whitelist) \
|
#define VULNWL_INTEL(vfm, whitelist) \
|
||||||
VULNWL(INTEL, 6, INTEL_FAM6_##model, whitelist)
|
X86_MATCH_VFM(vfm, whitelist)
|
||||||
|
|
||||||
#define VULNWL_AMD(family, whitelist) \
|
#define VULNWL_AMD(family, whitelist) \
|
||||||
VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
|
VULNWL(AMD, family, X86_MODEL_ANY, whitelist)
|
||||||
@ -1189,32 +1189,32 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
|
VULNWL(VORTEX, 6, X86_MODEL_ANY, NO_SPECULATION),
|
||||||
|
|
||||||
/* Intel Family 6 */
|
/* Intel Family 6 */
|
||||||
VULNWL_INTEL(TIGERLAKE, NO_MMIO),
|
VULNWL_INTEL(INTEL_TIGERLAKE, NO_MMIO),
|
||||||
VULNWL_INTEL(TIGERLAKE_L, NO_MMIO),
|
VULNWL_INTEL(INTEL_TIGERLAKE_L, NO_MMIO),
|
||||||
VULNWL_INTEL(ALDERLAKE, NO_MMIO),
|
VULNWL_INTEL(INTEL_ALDERLAKE, NO_MMIO),
|
||||||
VULNWL_INTEL(ALDERLAKE_L, NO_MMIO),
|
VULNWL_INTEL(INTEL_ALDERLAKE_L, NO_MMIO),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL_TABLET, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SALTWELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_BONNELL, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_BONNELL_MID, NO_SPECULATION | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT_D, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_SILVERMONT_MID, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_XEON_PHI_KNL, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_XEON_PHI_KNM, NO_SSB | NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(CORE_YONAH, NO_SSB),
|
VULNWL_INTEL(INTEL_CORE_YONAH, NO_SSB),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT_MID, NO_L1TF | MSBDS_ONLY | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
VULNWL_INTEL(ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
VULNWL_INTEL(INTEL_ATOM_AIRMONT_NP, NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT),
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_D, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO),
|
||||||
VULNWL_INTEL(ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
VULNWL_INTEL(INTEL_ATOM_GOLDMONT_PLUS, NO_MDS | NO_L1TF | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_EIBRS_PBRSB),
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Technically, swapgs isn't serializing on AMD (despite it previously
|
* Technically, swapgs isn't serializing on AMD (despite it previously
|
||||||
@ -1224,9 +1224,9 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
* good enough for our purposes.
|
* good enough for our purposes.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
VULNWL_INTEL(ATOM_TREMONT, NO_EIBRS_PBRSB),
|
VULNWL_INTEL(INTEL_ATOM_TREMONT, NO_EIBRS_PBRSB),
|
||||||
VULNWL_INTEL(ATOM_TREMONT_L, NO_EIBRS_PBRSB),
|
VULNWL_INTEL(INTEL_ATOM_TREMONT_L, NO_EIBRS_PBRSB),
|
||||||
VULNWL_INTEL(ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
VULNWL_INTEL(INTEL_ATOM_TREMONT_D, NO_ITLB_MULTIHIT | NO_EIBRS_PBRSB),
|
||||||
|
|
||||||
/* AMD Family 0xf - 0x12 */
|
/* AMD Family 0xf - 0x12 */
|
||||||
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
VULNWL_AMD(0x0f, NO_MELTDOWN | NO_SSB | NO_L1TF | NO_MDS | NO_SWAPGS | NO_ITLB_MULTIHIT | NO_MMIO | NO_BHI),
|
||||||
@ -1247,10 +1247,11 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
#define VULNBL(vendor, family, model, blacklist) \
|
#define VULNBL(vendor, family, model, blacklist) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
|
X86_MATCH_VENDOR_FAM_MODEL(vendor, family, model, blacklist)
|
||||||
|
|
||||||
#define VULNBL_INTEL_STEPPINGS(model, steppings, issues) \
|
#define VULNBL_INTEL_STEPS(vfm, max_stepping, issues) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_STEPPINGS_FEATURE(INTEL, 6, \
|
X86_MATCH_VFM_STEPS(vfm, X86_STEP_MIN, max_stepping, issues)
|
||||||
INTEL_FAM6_##model, steppings, \
|
|
||||||
X86_FEATURE_ANY, issues)
|
#define VULNBL_INTEL_TYPE(vfm, cpu_type, issues) \
|
||||||
|
X86_MATCH_VFM_CPU_TYPE(vfm, INTEL_CPU_TYPE_##cpu_type, issues)
|
||||||
|
|
||||||
#define VULNBL_AMD(family, blacklist) \
|
#define VULNBL_AMD(family, blacklist) \
|
||||||
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
|
VULNBL(AMD, family, X86_MODEL_ANY, blacklist)
|
||||||
@ -1273,45 +1274,52 @@ static const __initconst struct x86_cpu_id cpu_vuln_whitelist[] = {
|
|||||||
#define GDS BIT(6)
|
#define GDS BIT(6)
|
||||||
/* CPU is affected by Register File Data Sampling */
|
/* CPU is affected by Register File Data Sampling */
|
||||||
#define RFDS BIT(7)
|
#define RFDS BIT(7)
|
||||||
|
/* CPU is affected by Indirect Target Selection */
|
||||||
|
#define ITS BIT(8)
|
||||||
|
/* CPU is affected by Indirect Target Selection, but guest-host isolation is not affected */
|
||||||
|
#define ITS_NATIVE_ONLY BIT(9)
|
||||||
|
|
||||||
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
static const struct x86_cpu_id cpu_vuln_blacklist[] __initconst = {
|
||||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_IVYBRIDGE, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_HASWELL, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_HASWELL_L, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_HASWELL_G, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
|
VULNBL_INTEL_STEPS(INTEL_HASWELL_X, X86_STEP_MAX, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
|
VULNBL_INTEL_STEPS(INTEL_BROADWELL_D, X86_STEP_MAX, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_BROADWELL_G, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
VULNBL_INTEL_STEPS(INTEL_BROADWELL_X, X86_STEP_MAX, MMIO),
|
||||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_BROADWELL, X86_STEP_MAX, SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, 0x5, MMIO | RETBLEED | GDS),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_X, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_SKYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_SKYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, 0xb, MMIO | RETBLEED | GDS | SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
VULNBL_INTEL_STEPS(INTEL_KABYLAKE_L, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, 0xc, MMIO | RETBLEED | GDS | SRBDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
|
VULNBL_INTEL_STEPS(INTEL_KABYLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | SRBDS | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
|
VULNBL_INTEL_STEPS(INTEL_CANNONLAKE_L, X86_STEP_MAX, RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
VULNBL_INTEL_STEPS(INTEL_ICELAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
VULNBL_INTEL_STEPS(INTEL_ICELAKE_D, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
VULNBL_INTEL_STEPS(INTEL_ICELAKE_X, X86_STEP_MAX, MMIO | GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
|
VULNBL_INTEL_STEPS(INTEL_COMETLAKE, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
|
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, 0x0, MMIO | RETBLEED | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
VULNBL_INTEL_STEPS(INTEL_COMETLAKE_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED | GDS | ITS),
|
||||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE_L, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_TIGERLAKE, X86_STEP_MAX, GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_LAKEFIELD, X86_STEP_MAX, MMIO | MMIO_SBDS | RETBLEED),
|
||||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ROCKETLAKE, X86_STEP_MAX, MMIO | RETBLEED | GDS | ITS | ITS_NATIVE_ONLY),
|
||||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_TYPE(INTEL_ALDERLAKE, ATOM, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ALDERLAKE_L, X86_STEP_MAX, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_TYPE(INTEL_RAPTORLAKE, ATOM, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_P, X86_STEP_MAX, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
|
VULNBL_INTEL_STEPS(INTEL_RAPTORLAKE_S, X86_STEP_MAX, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ATOM_GRACEMONT, X86_STEP_MAX, RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_D, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MAX, MMIO | RFDS),
|
||||||
VULNBL_INTEL_STEPPINGS(ATOM_GOLDMONT_PLUS, X86_STEPPING_ANY, RFDS),
|
VULNBL_INTEL_STEPS(INTEL_ATOM_TREMONT_L, X86_STEP_MAX, MMIO | MMIO_SBDS | RFDS),
|
||||||
|
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT, X86_STEP_MAX, RFDS),
|
||||||
|
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_D, X86_STEP_MAX, RFDS),
|
||||||
|
VULNBL_INTEL_STEPS(INTEL_ATOM_GOLDMONT_PLUS, X86_STEP_MAX, RFDS),
|
||||||
|
|
||||||
VULNBL_AMD(0x15, RETBLEED),
|
VULNBL_AMD(0x15, RETBLEED),
|
||||||
VULNBL_AMD(0x16, RETBLEED),
|
VULNBL_AMD(0x16, RETBLEED),
|
||||||
@ -1363,6 +1371,32 @@ static bool __init vulnerable_to_rfds(u64 x86_arch_cap_msr)
|
|||||||
return cpu_matches(cpu_vuln_blacklist, RFDS);
|
return cpu_matches(cpu_vuln_blacklist, RFDS);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool __init vulnerable_to_its(u64 x86_arch_cap_msr)
|
||||||
|
{
|
||||||
|
/* The "immunity" bit trumps everything else: */
|
||||||
|
if (x86_arch_cap_msr & ARCH_CAP_ITS_NO)
|
||||||
|
return false;
|
||||||
|
if (boot_cpu_data.x86_vendor != X86_VENDOR_INTEL)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/* None of the affected CPUs have BHI_CTRL */
|
||||||
|
if (boot_cpu_has(X86_FEATURE_BHI_CTRL))
|
||||||
|
return false;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If a VMM did not expose ITS_NO, assume that a guest could
|
||||||
|
* be running on a vulnerable hardware or may migrate to such
|
||||||
|
* hardware.
|
||||||
|
*/
|
||||||
|
if (boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (cpu_matches(cpu_vuln_blacklist, ITS))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
|
u64 x86_arch_cap_msr = x86_read_arch_cap_msr();
|
||||||
@ -1483,13 +1517,22 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
|||||||
if (vulnerable_to_rfds(x86_arch_cap_msr))
|
if (vulnerable_to_rfds(x86_arch_cap_msr))
|
||||||
setup_force_cpu_bug(X86_BUG_RFDS);
|
setup_force_cpu_bug(X86_BUG_RFDS);
|
||||||
|
|
||||||
/* When virtualized, eIBRS could be hidden, assume vulnerable */
|
/*
|
||||||
if (!(x86_arch_cap_msr & ARCH_CAP_BHI_NO) &&
|
* Intel parts with eIBRS are vulnerable to BHI attacks. Parts with
|
||||||
!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
* BHI_NO still need to use the BHI mitigation to prevent Intra-mode
|
||||||
|
* attacks. When virtualized, eIBRS could be hidden, assume vulnerable.
|
||||||
|
*/
|
||||||
|
if (!cpu_matches(cpu_vuln_whitelist, NO_BHI) &&
|
||||||
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
(boot_cpu_has(X86_FEATURE_IBRS_ENHANCED) ||
|
||||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||||
setup_force_cpu_bug(X86_BUG_BHI);
|
setup_force_cpu_bug(X86_BUG_BHI);
|
||||||
|
|
||||||
|
if (vulnerable_to_its(x86_arch_cap_msr)) {
|
||||||
|
setup_force_cpu_bug(X86_BUG_ITS);
|
||||||
|
if (cpu_matches(cpu_vuln_blacklist, ITS_NATIVE_ONLY))
|
||||||
|
setup_force_cpu_bug(X86_BUG_ITS_NATIVE_ONLY);
|
||||||
|
}
|
||||||
|
|
||||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -22,6 +22,7 @@ static int cpu_debug_show(struct seq_file *m, void *p)
|
|||||||
seq_printf(m, "die_id: %u\n", c->topo.die_id);
|
seq_printf(m, "die_id: %u\n", c->topo.die_id);
|
||||||
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
|
seq_printf(m, "cu_id: %u\n", c->topo.cu_id);
|
||||||
seq_printf(m, "core_id: %u\n", c->topo.core_id);
|
seq_printf(m, "core_id: %u\n", c->topo.core_id);
|
||||||
|
seq_printf(m, "cpu_type: %s\n", get_topology_cpu_type_name(c));
|
||||||
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
|
seq_printf(m, "logical_pkg_id: %u\n", c->topo.logical_pkg_id);
|
||||||
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
|
seq_printf(m, "logical_die_id: %u\n", c->topo.logical_die_id);
|
||||||
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);
|
seq_printf(m, "llc_id: %u\n", c->topo.llc_id);
|
||||||
|
@ -6,7 +6,35 @@
|
|||||||
#include <linux/slab.h>
|
#include <linux/slab.h>
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* x86_match_cpu - match current CPU again an array of x86_cpu_ids
|
* x86_match_vendor_cpu_type - helper function to match the hardware defined
|
||||||
|
* cpu-type for a single entry in the x86_cpu_id
|
||||||
|
* table. Note, this function does not match the
|
||||||
|
* generic cpu-types TOPO_CPU_TYPE_EFFICIENCY and
|
||||||
|
* TOPO_CPU_TYPE_PERFORMANCE.
|
||||||
|
* @c: Pointer to the cpuinfo_x86 structure of the CPU to match.
|
||||||
|
* @m: Pointer to the x86_cpu_id entry to match against.
|
||||||
|
*
|
||||||
|
* Return: true if the cpu-type matches, false otherwise.
|
||||||
|
*/
|
||||||
|
static bool x86_match_vendor_cpu_type(struct cpuinfo_x86 *c, const struct x86_cpu_id *m)
|
||||||
|
{
|
||||||
|
if (m->type == X86_CPU_TYPE_ANY)
|
||||||
|
return true;
|
||||||
|
|
||||||
|
/* Hybrid CPUs are special, they are assumed to match all cpu-types */
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_HYBRID_CPU))
|
||||||
|
return true;
|
||||||
|
|
||||||
|
if (c->x86_vendor == X86_VENDOR_INTEL)
|
||||||
|
return m->type == c->topo_rh.intel_type;
|
||||||
|
if (c->x86_vendor == X86_VENDOR_AMD)
|
||||||
|
return m->type == c->topo_rh.amd_type;
|
||||||
|
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* x86_match_cpu - match current CPU against an array of x86_cpu_ids
|
||||||
* @match: Pointer to array of x86_cpu_ids. Last entry terminated with
|
* @match: Pointer to array of x86_cpu_ids. Last entry terminated with
|
||||||
* {}.
|
* {}.
|
||||||
*
|
*
|
||||||
@ -38,9 +66,7 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
|
|||||||
const struct x86_cpu_id *m;
|
const struct x86_cpu_id *m;
|
||||||
struct cpuinfo_x86 *c = &boot_cpu_data;
|
struct cpuinfo_x86 *c = &boot_cpu_data;
|
||||||
|
|
||||||
for (m = match;
|
for (m = match; m->flags & X86_CPU_ID_FLAG_ENTRY_VALID; m++) {
|
||||||
m->vendor | m->family | m->model | m->steppings | m->feature;
|
|
||||||
m++) {
|
|
||||||
if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
|
if (m->vendor != X86_VENDOR_ANY && c->x86_vendor != m->vendor)
|
||||||
continue;
|
continue;
|
||||||
if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
|
if (m->family != X86_FAMILY_ANY && c->x86 != m->family)
|
||||||
@ -52,6 +78,8 @@ const struct x86_cpu_id *x86_match_cpu(const struct x86_cpu_id *match)
|
|||||||
continue;
|
continue;
|
||||||
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
|
if (m->feature != X86_FEATURE_ANY && !cpu_has(c, m->feature))
|
||||||
continue;
|
continue;
|
||||||
|
if (!x86_match_vendor_cpu_type(c, m))
|
||||||
|
continue;
|
||||||
return m;
|
return m;
|
||||||
}
|
}
|
||||||
return NULL;
|
return NULL;
|
||||||
|
@ -45,13 +45,14 @@ static const struct cpuid_bit cpuid_bits[] = {
|
|||||||
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
|
{ X86_FEATURE_HW_PSTATE, CPUID_EDX, 7, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
{ X86_FEATURE_CPB, CPUID_EDX, 9, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
||||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||||
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
||||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||||
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
|
{ X86_FEATURE_PERFMON_V2, CPUID_EAX, 0, 0x80000022, 0 },
|
||||||
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
|
{ X86_FEATURE_AMD_LBR_V2, CPUID_EAX, 1, 0x80000022, 0 },
|
||||||
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
|
{ X86_FEATURE_AMD_LBR_PMC_FREEZE, CPUID_EAX, 2, 0x80000022, 0 },
|
||||||
|
{ X86_FEATURE_AMD_HETEROGENEOUS_CORES, CPUID_EAX, 30, 0x80000026, 0 },
|
||||||
{ 0, 0, 0, 0, 0 }
|
{ 0, 0, 0, 0, 0 }
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -182,6 +182,9 @@ static void parse_topology_amd(struct topo_scan *tscan)
|
|||||||
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
|
if (cpu_feature_enabled(X86_FEATURE_TOPOEXT))
|
||||||
has_topoext = cpu_parse_topology_ext(tscan);
|
has_topoext = cpu_parse_topology_ext(tscan);
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_AMD_HETEROGENEOUS_CORES))
|
||||||
|
tscan->c->topo_rh.cpu_type = cpuid_ebx(0x80000026);
|
||||||
|
|
||||||
if (!has_topoext && !parse_8000_0008(tscan))
|
if (!has_topoext && !parse_8000_0008(tscan))
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
@ -3,6 +3,7 @@
|
|||||||
|
|
||||||
#include <xen/xen.h>
|
#include <xen/xen.h>
|
||||||
|
|
||||||
|
#include <asm/intel-family.h>
|
||||||
#include <asm/apic.h>
|
#include <asm/apic.h>
|
||||||
#include <asm/processor.h>
|
#include <asm/processor.h>
|
||||||
#include <asm/smp.h>
|
#include <asm/smp.h>
|
||||||
@ -27,6 +28,36 @@ void topology_set_dom(struct topo_scan *tscan, enum x86_topology_domains dom,
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
enum x86_topology_cpu_type get_topology_cpu_type(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
if (c->x86_vendor == X86_VENDOR_INTEL) {
|
||||||
|
switch (c->topo_rh.intel_type) {
|
||||||
|
case INTEL_CPU_TYPE_ATOM: return TOPO_CPU_TYPE_EFFICIENCY;
|
||||||
|
case INTEL_CPU_TYPE_CORE: return TOPO_CPU_TYPE_PERFORMANCE;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
if (c->x86_vendor == X86_VENDOR_AMD) {
|
||||||
|
switch (c->topo_rh.amd_type) {
|
||||||
|
case 0: return TOPO_CPU_TYPE_PERFORMANCE;
|
||||||
|
case 1: return TOPO_CPU_TYPE_EFFICIENCY;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
return TOPO_CPU_TYPE_UNKNOWN;
|
||||||
|
}
|
||||||
|
|
||||||
|
const char *get_topology_cpu_type_name(struct cpuinfo_x86 *c)
|
||||||
|
{
|
||||||
|
switch (get_topology_cpu_type(c)) {
|
||||||
|
case TOPO_CPU_TYPE_PERFORMANCE:
|
||||||
|
return "performance";
|
||||||
|
case TOPO_CPU_TYPE_EFFICIENCY:
|
||||||
|
return "efficiency";
|
||||||
|
default:
|
||||||
|
return "unknown";
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
|
static unsigned int __maybe_unused parse_num_cores_legacy(struct cpuinfo_x86 *c)
|
||||||
{
|
{
|
||||||
struct {
|
struct {
|
||||||
@ -88,6 +119,9 @@ static void parse_topology(struct topo_scan *tscan, bool early)
|
|||||||
.llc_id = BAD_APICID,
|
.llc_id = BAD_APICID,
|
||||||
.l2c_id = BAD_APICID,
|
.l2c_id = BAD_APICID,
|
||||||
};
|
};
|
||||||
|
const struct cpuinfo_topology_rh topo_rh_defaults = {
|
||||||
|
.cpu_type = TOPO_CPU_TYPE_UNKNOWN,
|
||||||
|
};
|
||||||
struct cpuinfo_x86 *c = tscan->c;
|
struct cpuinfo_x86 *c = tscan->c;
|
||||||
struct {
|
struct {
|
||||||
u32 unused0 : 16,
|
u32 unused0 : 16,
|
||||||
@ -96,6 +130,7 @@ static void parse_topology(struct topo_scan *tscan, bool early)
|
|||||||
} ebx;
|
} ebx;
|
||||||
|
|
||||||
c->topo = topo_defaults;
|
c->topo = topo_defaults;
|
||||||
|
c->topo_rh = topo_rh_defaults;
|
||||||
|
|
||||||
if (fake_topology(tscan))
|
if (fake_topology(tscan))
|
||||||
return;
|
return;
|
||||||
@ -132,6 +167,8 @@ static void parse_topology(struct topo_scan *tscan, bool early)
|
|||||||
case X86_VENDOR_INTEL:
|
case X86_VENDOR_INTEL:
|
||||||
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
|
if (!IS_ENABLED(CONFIG_CPU_SUP_INTEL) || !cpu_parse_topology_ext(tscan))
|
||||||
parse_legacy(tscan);
|
parse_legacy(tscan);
|
||||||
|
if (c->cpuid_level >= 0x1a)
|
||||||
|
c->topo_rh.cpu_type = cpuid_eax(0x1a);
|
||||||
break;
|
break;
|
||||||
case X86_VENDOR_HYGON:
|
case X86_VENDOR_HYGON:
|
||||||
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
|
if (IS_ENABLED(CONFIG_CPU_SUP_HYGON))
|
||||||
|
@ -364,7 +364,7 @@ create_trampoline(struct ftrace_ops *ops, unsigned int *tramp_size)
|
|||||||
goto fail;
|
goto fail;
|
||||||
|
|
||||||
ip = trampoline + size;
|
ip = trampoline + size;
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
if (cpu_wants_rethunk_at(ip))
|
||||||
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
|
__text_gen_insn(ip, JMP32_INSN_OPCODE, ip, x86_return_thunk, JMP32_INSN_SIZE);
|
||||||
else
|
else
|
||||||
memcpy(ip, retq, sizeof(retq));
|
memcpy(ip, retq, sizeof(retq));
|
||||||
|
@ -81,7 +81,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||||||
break;
|
break;
|
||||||
|
|
||||||
case RET:
|
case RET:
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
if (cpu_wants_rethunk_at(insn))
|
||||||
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
code = text_gen_insn(JMP32_INSN_OPCODE, insn, x86_return_thunk);
|
||||||
else
|
else
|
||||||
code = &retinsn;
|
code = &retinsn;
|
||||||
@ -90,7 +90,7 @@ static void __ref __static_call_transform(void *insn, enum insn_type type,
|
|||||||
case JCC:
|
case JCC:
|
||||||
if (!func) {
|
if (!func) {
|
||||||
func = __static_call_return;
|
func = __static_call_return;
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK))
|
if (cpu_wants_rethunk())
|
||||||
func = x86_return_thunk;
|
func = x86_return_thunk;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -541,4 +541,14 @@ INIT_PER_CPU(irq_stack_backing_store);
|
|||||||
"SRSO function pair won't alias");
|
"SRSO function pair won't alias");
|
||||||
#endif
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
|
||||||
|
. = ASSERT(__x86_indirect_its_thunk_rax & 0x20, "__x86_indirect_thunk_rax not in second half of cacheline");
|
||||||
|
. = ASSERT(((__x86_indirect_its_thunk_rcx - __x86_indirect_its_thunk_rax) % 64) == 0, "Indirect thunks are not cacheline apart");
|
||||||
|
. = ASSERT(__x86_indirect_its_thunk_array == __x86_indirect_its_thunk_rax, "Gap in ITS thunk array");
|
||||||
|
#endif
|
||||||
|
|
||||||
|
#if defined(CONFIG_MITIGATION_ITS) && !defined(CONFIG_DEBUG_FORCE_FUNCTION_ALIGN_64B)
|
||||||
|
. = ASSERT(its_return_thunk & 0x20, "its_return_thunk not in second half of cacheline");
|
||||||
|
#endif
|
||||||
|
|
||||||
#endif /* CONFIG_X86_64 */
|
#endif /* CONFIG_X86_64 */
|
||||||
|
@ -1578,7 +1578,7 @@ static bool kvm_is_immutable_feature_msr(u32 msr)
|
|||||||
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
ARCH_CAP_PSCHANGE_MC_NO | ARCH_CAP_TSX_CTRL_MSR | ARCH_CAP_TAA_NO | \
|
||||||
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
ARCH_CAP_SBDR_SSDP_NO | ARCH_CAP_FBSDP_NO | ARCH_CAP_PSDP_NO | \
|
||||||
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
|
ARCH_CAP_FB_CLEAR | ARCH_CAP_RRSBA | ARCH_CAP_PBRSB_NO | ARCH_CAP_GDS_NO | \
|
||||||
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO)
|
ARCH_CAP_RFDS_NO | ARCH_CAP_RFDS_CLEAR | ARCH_CAP_BHI_NO | ARCH_CAP_ITS_NO)
|
||||||
|
|
||||||
static u64 kvm_get_arch_capabilities(void)
|
static u64 kvm_get_arch_capabilities(void)
|
||||||
{
|
{
|
||||||
@ -1612,6 +1612,8 @@ static u64 kvm_get_arch_capabilities(void)
|
|||||||
data |= ARCH_CAP_MDS_NO;
|
data |= ARCH_CAP_MDS_NO;
|
||||||
if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
if (!boot_cpu_has_bug(X86_BUG_RFDS))
|
||||||
data |= ARCH_CAP_RFDS_NO;
|
data |= ARCH_CAP_RFDS_NO;
|
||||||
|
if (!boot_cpu_has_bug(X86_BUG_ITS))
|
||||||
|
data |= ARCH_CAP_ITS_NO;
|
||||||
|
|
||||||
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
if (!boot_cpu_has(X86_FEATURE_RTM)) {
|
||||||
/*
|
/*
|
||||||
|
@ -366,6 +366,45 @@ SYM_FUNC_END(call_depth_return_thunk)
|
|||||||
|
|
||||||
#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
|
#endif /* CONFIG_MITIGATION_CALL_DEPTH_TRACKING */
|
||||||
|
|
||||||
|
#ifdef CONFIG_MITIGATION_ITS
|
||||||
|
|
||||||
|
.macro ITS_THUNK reg
|
||||||
|
|
||||||
|
SYM_INNER_LABEL(__x86_indirect_its_thunk_\reg, SYM_L_GLOBAL)
|
||||||
|
UNWIND_HINT_UNDEFINED
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
|
ANNOTATE_RETPOLINE_SAFE
|
||||||
|
jmp *%\reg
|
||||||
|
int3
|
||||||
|
.align 32, 0xcc /* fill to the end of the line */
|
||||||
|
.skip 32, 0xcc /* skip to the next upper half */
|
||||||
|
.endm
|
||||||
|
|
||||||
|
/* ITS mitigation requires thunks be aligned to upper half of cacheline */
|
||||||
|
.align 64, 0xcc
|
||||||
|
.skip 32, 0xcc
|
||||||
|
SYM_CODE_START(__x86_indirect_its_thunk_array)
|
||||||
|
|
||||||
|
#define GEN(reg) ITS_THUNK reg
|
||||||
|
#include <asm/GEN-for-each-reg.h>
|
||||||
|
#undef GEN
|
||||||
|
|
||||||
|
.align 64, 0xcc
|
||||||
|
SYM_CODE_END(__x86_indirect_its_thunk_array)
|
||||||
|
|
||||||
|
.align 64, 0xcc
|
||||||
|
.skip 32, 0xcc
|
||||||
|
SYM_CODE_START(its_return_thunk)
|
||||||
|
UNWIND_HINT_FUNC
|
||||||
|
ANNOTATE_NOENDBR
|
||||||
|
ANNOTATE_UNRET_SAFE
|
||||||
|
ret
|
||||||
|
int3
|
||||||
|
SYM_CODE_END(its_return_thunk)
|
||||||
|
EXPORT_SYMBOL(its_return_thunk)
|
||||||
|
|
||||||
|
#endif /* CONFIG_MITIGATION_ITS */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function name is magical and is used by -mfunction-return=thunk-extern
|
* This function name is magical and is used by -mfunction-return=thunk-extern
|
||||||
* for the compiler to generate JMPs to it.
|
* for the compiler to generate JMPs to it.
|
||||||
|
@ -41,6 +41,8 @@ static u8 *emit_code(u8 *ptr, u32 bytes, unsigned int len)
|
|||||||
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
|
#define EMIT2(b1, b2) EMIT((b1) + ((b2) << 8), 2)
|
||||||
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
|
#define EMIT3(b1, b2, b3) EMIT((b1) + ((b2) << 8) + ((b3) << 16), 3)
|
||||||
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
|
#define EMIT4(b1, b2, b3, b4) EMIT((b1) + ((b2) << 8) + ((b3) << 16) + ((b4) << 24), 4)
|
||||||
|
#define EMIT5(b1, b2, b3, b4, b5) \
|
||||||
|
do { EMIT1(b1); EMIT4(b2, b3, b4, b5); } while (0)
|
||||||
|
|
||||||
#define EMIT1_off32(b1, off) \
|
#define EMIT1_off32(b1, off) \
|
||||||
do { EMIT1(b1); EMIT(off, 4); } while (0)
|
do { EMIT1(b1); EMIT(off, 4); } while (0)
|
||||||
@ -637,7 +639,10 @@ static void emit_indirect_jump(u8 **pprog, int reg, u8 *ip)
|
|||||||
{
|
{
|
||||||
u8 *prog = *pprog;
|
u8 *prog = *pprog;
|
||||||
|
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
if (cpu_feature_enabled(X86_FEATURE_INDIRECT_THUNK_ITS)) {
|
||||||
|
OPTIMIZER_HIDE_VAR(reg);
|
||||||
|
emit_jump(&prog, &__x86_indirect_its_thunk_array[reg], ip);
|
||||||
|
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE_LFENCE)) {
|
||||||
EMIT_LFENCE();
|
EMIT_LFENCE();
|
||||||
EMIT2(0xFF, 0xE0 + reg);
|
EMIT2(0xFF, 0xE0 + reg);
|
||||||
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
} else if (cpu_feature_enabled(X86_FEATURE_RETPOLINE)) {
|
||||||
@ -659,7 +664,7 @@ static void emit_return(u8 **pprog, u8 *ip)
|
|||||||
{
|
{
|
||||||
u8 *prog = *pprog;
|
u8 *prog = *pprog;
|
||||||
|
|
||||||
if (cpu_feature_enabled(X86_FEATURE_RETHUNK)) {
|
if (cpu_wants_rethunk()) {
|
||||||
emit_jump(&prog, x86_return_thunk, ip);
|
emit_jump(&prog, x86_return_thunk, ip);
|
||||||
} else {
|
} else {
|
||||||
EMIT1(0xC3); /* ret */
|
EMIT1(0xC3); /* ret */
|
||||||
@ -1412,6 +1417,48 @@ static void emit_shiftx(u8 **pprog, u32 dst_reg, u8 src_reg, bool is64, u8 op)
|
|||||||
#define LOAD_TAIL_CALL_CNT_PTR(stack) \
|
#define LOAD_TAIL_CALL_CNT_PTR(stack) \
|
||||||
__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
|
__LOAD_TCC_PTR(BPF_TAIL_CALL_CNT_PTR_STACK_OFF(stack))
|
||||||
|
|
||||||
|
static int emit_spectre_bhb_barrier(u8 **pprog, u8 *ip,
|
||||||
|
struct bpf_prog *bpf_prog)
|
||||||
|
{
|
||||||
|
u8 *prog = *pprog;
|
||||||
|
u8 *func;
|
||||||
|
|
||||||
|
if (cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP)) {
|
||||||
|
/* The clearing sequence clobbers eax and ecx. */
|
||||||
|
EMIT1(0x50); /* push rax */
|
||||||
|
EMIT1(0x51); /* push rcx */
|
||||||
|
ip += 2;
|
||||||
|
|
||||||
|
func = (u8 *)clear_bhb_loop;
|
||||||
|
ip += x86_call_depth_emit_accounting(&prog, func);
|
||||||
|
|
||||||
|
if (emit_call(&prog, func, ip))
|
||||||
|
return -EINVAL;
|
||||||
|
EMIT1(0x59); /* pop rcx */
|
||||||
|
EMIT1(0x58); /* pop rax */
|
||||||
|
}
|
||||||
|
/* Insert IBHF instruction */
|
||||||
|
if ((cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_LOOP) &&
|
||||||
|
cpu_feature_enabled(X86_FEATURE_HYPERVISOR)) ||
|
||||||
|
cpu_feature_enabled(X86_FEATURE_CLEAR_BHB_HW)) {
|
||||||
|
/*
|
||||||
|
* Add an Indirect Branch History Fence (IBHF). IBHF acts as a
|
||||||
|
* fence preventing branch history from before the fence from
|
||||||
|
* affecting indirect branches after the fence. This is
|
||||||
|
* specifically used in cBPF jitted code to prevent Intra-mode
|
||||||
|
* BHI attacks. The IBHF instruction is designed to be a NOP on
|
||||||
|
* hardware that doesn't need or support it. The REP and REX.W
|
||||||
|
* prefixes are required by the microcode, and they also ensure
|
||||||
|
* that the NOP is unlikely to be used in existing code.
|
||||||
|
*
|
||||||
|
* IBHF is not a valid instruction in 32-bit mode.
|
||||||
|
*/
|
||||||
|
EMIT5(0xF3, 0x48, 0x0F, 0x1E, 0xF8); /* ibhf */
|
||||||
|
}
|
||||||
|
*pprog = prog;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
static int do_jit(struct bpf_prog *bpf_prog, int *addrs, u8 *image, u8 *rw_image,
|
||||||
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
int oldproglen, struct jit_context *ctx, bool jmp_padding)
|
||||||
{
|
{
|
||||||
@ -2405,6 +2452,13 @@ emit_jmp:
|
|||||||
seen_exit = true;
|
seen_exit = true;
|
||||||
/* Update cleanup_addr */
|
/* Update cleanup_addr */
|
||||||
ctx->cleanup_addr = proglen;
|
ctx->cleanup_addr = proglen;
|
||||||
|
if (bpf_prog_was_classic(bpf_prog) &&
|
||||||
|
!capable(CAP_SYS_ADMIN)) {
|
||||||
|
u8 *ip = image + addrs[i - 1];
|
||||||
|
|
||||||
|
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||||
|
return -EINVAL;
|
||||||
|
}
|
||||||
if (bpf_prog->aux->exception_boundary) {
|
if (bpf_prog->aux->exception_boundary) {
|
||||||
pop_callee_regs(&prog, all_callee_regs_used);
|
pop_callee_regs(&prog, all_callee_regs_used);
|
||||||
pop_r12(&prog);
|
pop_r12(&prog);
|
||||||
|
@ -117,14 +117,13 @@ static void punit_dbgfs_unregister(void)
|
|||||||
debugfs_remove_recursive(punit_dbg_file);
|
debugfs_remove_recursive(punit_dbg_file);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define X86_MATCH(model, data) \
|
#define X86_MATCH(vfm, data) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, INTEL_FAM6_##model, \
|
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_MWAIT, data)
|
||||||
X86_FEATURE_MWAIT, data)
|
|
||||||
|
|
||||||
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
|
static const struct x86_cpu_id intel_punit_cpu_ids[] = {
|
||||||
X86_MATCH(ATOM_SILVERMONT, &punit_device_byt),
|
X86_MATCH(INTEL_ATOM_SILVERMONT, &punit_device_byt),
|
||||||
X86_MATCH(ATOM_SILVERMONT_MID, &punit_device_tng),
|
X86_MATCH(INTEL_ATOM_SILVERMONT_MID, &punit_device_tng),
|
||||||
X86_MATCH(ATOM_AIRMONT, &punit_device_cht),
|
X86_MATCH(INTEL_ATOM_AIRMONT, &punit_device_cht),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
|
MODULE_DEVICE_TABLE(x86cpu, intel_punit_cpu_ids);
|
||||||
|
@ -554,6 +554,7 @@ CONFIG_MITIGATION_SLS=y
|
|||||||
# CONFIG_MITIGATION_GDS_FORCE is not set
|
# CONFIG_MITIGATION_GDS_FORCE is not set
|
||||||
CONFIG_MITIGATION_RFDS=y
|
CONFIG_MITIGATION_RFDS=y
|
||||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||||
|
CONFIG_MITIGATION_ITS=y
|
||||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -555,6 +555,7 @@ CONFIG_MITIGATION_SLS=y
|
|||||||
# CONFIG_MITIGATION_GDS_FORCE is not set
|
# CONFIG_MITIGATION_GDS_FORCE is not set
|
||||||
CONFIG_MITIGATION_RFDS=y
|
CONFIG_MITIGATION_RFDS=y
|
||||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||||
|
CONFIG_MITIGATION_ITS=y
|
||||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -555,6 +555,7 @@ CONFIG_MITIGATION_SLS=y
|
|||||||
# CONFIG_MITIGATION_GDS_FORCE is not set
|
# CONFIG_MITIGATION_GDS_FORCE is not set
|
||||||
CONFIG_MITIGATION_RFDS=y
|
CONFIG_MITIGATION_RFDS=y
|
||||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||||
|
CONFIG_MITIGATION_ITS=y
|
||||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -551,6 +551,7 @@ CONFIG_MITIGATION_SLS=y
|
|||||||
# CONFIG_MITIGATION_GDS_FORCE is not set
|
# CONFIG_MITIGATION_GDS_FORCE is not set
|
||||||
CONFIG_MITIGATION_RFDS=y
|
CONFIG_MITIGATION_RFDS=y
|
||||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||||
|
CONFIG_MITIGATION_ITS=y
|
||||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||||
|
|
||||||
#
|
#
|
||||||
|
@ -579,6 +579,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
|
|||||||
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
|
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
|
||||||
CPU_SHOW_VULN_FALLBACK(gds);
|
CPU_SHOW_VULN_FALLBACK(gds);
|
||||||
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
|
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
|
||||||
|
CPU_SHOW_VULN_FALLBACK(indirect_target_selection);
|
||||||
|
|
||||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||||
@ -594,6 +595,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
|
|||||||
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
|
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
|
||||||
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
||||||
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
|
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
|
||||||
|
static DEVICE_ATTR(indirect_target_selection, 0444, cpu_show_indirect_target_selection, NULL);
|
||||||
|
|
||||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||||
&dev_attr_meltdown.attr,
|
&dev_attr_meltdown.attr,
|
||||||
@ -610,6 +612,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
|||||||
&dev_attr_spec_rstack_overflow.attr,
|
&dev_attr_spec_rstack_overflow.attr,
|
||||||
&dev_attr_gather_data_sampling.attr,
|
&dev_attr_gather_data_sampling.attr,
|
||||||
&dev_attr_reg_file_data_sampling.attr,
|
&dev_attr_reg_file_data_sampling.attr,
|
||||||
|
&dev_attr_indirect_target_selection.attr,
|
||||||
NULL
|
NULL
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -924,7 +924,7 @@ static u32 amd_pstate_get_transition_delay_us(unsigned int cpu)
|
|||||||
|
|
||||||
transition_delay_ns = cppc_get_transition_latency(cpu);
|
transition_delay_ns = cppc_get_transition_latency(cpu);
|
||||||
if (transition_delay_ns == CPUFREQ_ETERNAL) {
|
if (transition_delay_ns == CPUFREQ_ETERNAL) {
|
||||||
if (cpu_feature_enabled(X86_FEATURE_FAST_CPPC))
|
if (cpu_feature_enabled(X86_FEATURE_AMD_FAST_CPPC))
|
||||||
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
|
return AMD_PSTATE_FAST_CPPC_TRANSITION_DELAY;
|
||||||
else
|
else
|
||||||
return AMD_PSTATE_TRANSITION_DELAY;
|
return AMD_PSTATE_TRANSITION_DELAY;
|
||||||
|
@ -520,10 +520,10 @@ static struct cpufreq_driver centrino_driver = {
|
|||||||
* or ASCII model IDs.
|
* or ASCII model IDs.
|
||||||
*/
|
*/
|
||||||
static const struct x86_cpu_id centrino_ids[] = {
|
static const struct x86_cpu_id centrino_ids[] = {
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 9, X86_FEATURE_EST, NULL),
|
X86_MATCH_VFM_FEATURE(IFM( 6, 9), X86_FEATURE_EST, NULL),
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, 13, X86_FEATURE_EST, NULL),
|
X86_MATCH_VFM_FEATURE(IFM( 6, 13), X86_FEATURE_EST, NULL),
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 3, X86_FEATURE_EST, NULL),
|
X86_MATCH_VFM_FEATURE(IFM(15, 3), X86_FEATURE_EST, NULL),
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 15, 4, X86_FEATURE_EST, NULL),
|
X86_MATCH_VFM_FEATURE(IFM(15, 4), X86_FEATURE_EST, NULL),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
|
|
||||||
|
@ -853,15 +853,17 @@ static struct res_config gnr_cfg = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id i10nm_cpuids[] = {
|
static const struct x86_cpu_id i10nm_cpuids[] = {
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
|
X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, X86_STEP_MIN, 0x3, &i10nm_cfg0),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
|
X86_MATCH_VFM_STEPS(INTEL_ATOM_TREMONT_D, 0x4, X86_STEP_MAX, &i10nm_cfg1),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x0, 0x3), &i10nm_cfg0),
|
X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, X86_STEP_MIN, 0x3, &i10nm_cfg0),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_X, X86_STEPPINGS(0x4, 0xf), &i10nm_cfg1),
|
X86_MATCH_VFM_STEPS(INTEL_ICELAKE_X, 0x4, X86_STEP_MAX, &i10nm_cfg1),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ICELAKE_D, X86_STEPPINGS(0x0, 0xf), &i10nm_cfg1),
|
X86_MATCH_VFM( INTEL_ICELAKE_D, &i10nm_cfg1),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SAPPHIRERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
|
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPINGS(0x0, 0xf), &spr_cfg),
|
X86_MATCH_VFM(INTEL_SAPPHIRERAPIDS_X, &spr_cfg),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
|
X86_MATCH_VFM(INTEL_EMERALDRAPIDS_X, &spr_cfg),
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(ATOM_CRESTMONT_X, X86_STEPPINGS(0x0, 0xf), &gnr_cfg),
|
X86_MATCH_VFM(INTEL_GRANITERAPIDS_X, &gnr_cfg),
|
||||||
|
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT_X, &gnr_cfg),
|
||||||
|
X86_MATCH_VFM(INTEL_ATOM_CRESTMONT, &gnr_cfg),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
|
MODULE_DEVICE_TABLE(x86cpu, i10nm_cpuids);
|
||||||
|
@ -164,7 +164,7 @@ static struct res_config skx_cfg = {
|
|||||||
};
|
};
|
||||||
|
|
||||||
static const struct x86_cpu_id skx_cpuids[] = {
|
static const struct x86_cpu_id skx_cpuids[] = {
|
||||||
X86_MATCH_INTEL_FAM6_MODEL_STEPPINGS(SKYLAKE_X, X86_STEPPINGS(0x0, 0xf), &skx_cfg),
|
X86_MATCH_VFM(INTEL_SKYLAKE_X, &skx_cfg),
|
||||||
{ }
|
{ }
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
|
MODULE_DEVICE_TABLE(x86cpu, skx_cpuids);
|
||||||
|
@ -298,6 +298,7 @@ static int amd_i2c_dw_xfer_quirk(struct i2c_adapter *adap, struct i2c_msg *msgs,
|
|||||||
|
|
||||||
dev->msgs = msgs;
|
dev->msgs = msgs;
|
||||||
dev->msgs_num = num_msgs;
|
dev->msgs_num = num_msgs;
|
||||||
|
dev->msg_write_idx = 0;
|
||||||
i2c_dw_xfer_init(dev);
|
i2c_dw_xfer_init(dev);
|
||||||
i2c_dw_disable_int(dev);
|
i2c_dw_disable_int(dev);
|
||||||
|
|
||||||
|
@ -309,7 +309,7 @@ static void rtw_coex_tdma_timer_base(struct rtw_dev *rtwdev, u8 type)
|
|||||||
{
|
{
|
||||||
struct rtw_coex *coex = &rtwdev->coex;
|
struct rtw_coex *coex = &rtwdev->coex;
|
||||||
struct rtw_coex_stat *coex_stat = &coex->stat;
|
struct rtw_coex_stat *coex_stat = &coex->stat;
|
||||||
u8 para[2] = {0};
|
u8 para[6] = {};
|
||||||
u8 times;
|
u8 times;
|
||||||
u16 tbtt_interval = coex_stat->wl_beacon_interval;
|
u16 tbtt_interval = coex_stat->wl_beacon_interval;
|
||||||
|
|
||||||
|
@ -11,16 +11,15 @@
|
|||||||
|
|
||||||
#include "ifs.h"
|
#include "ifs.h"
|
||||||
|
|
||||||
#define X86_MATCH(model, array_gen) \
|
#define X86_MATCH(vfm, array_gen) \
|
||||||
X86_MATCH_VENDOR_FAM_MODEL_FEATURE(INTEL, 6, \
|
X86_MATCH_VFM_FEATURE(vfm, X86_FEATURE_CORE_CAPABILITIES, array_gen)
|
||||||
INTEL_FAM6_##model, X86_FEATURE_CORE_CAPABILITIES, array_gen)
|
|
||||||
|
|
||||||
static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
|
static const struct x86_cpu_id ifs_cpu_ids[] __initconst = {
|
||||||
X86_MATCH(SAPPHIRERAPIDS_X, ARRAY_GEN0),
|
X86_MATCH(INTEL_SAPPHIRERAPIDS_X, ARRAY_GEN0),
|
||||||
X86_MATCH(EMERALDRAPIDS_X, ARRAY_GEN0),
|
X86_MATCH(INTEL_EMERALDRAPIDS_X, ARRAY_GEN0),
|
||||||
X86_MATCH(GRANITERAPIDS_X, ARRAY_GEN0),
|
X86_MATCH(INTEL_GRANITERAPIDS_X, ARRAY_GEN0),
|
||||||
X86_MATCH(GRANITERAPIDS_D, ARRAY_GEN0),
|
X86_MATCH(INTEL_GRANITERAPIDS_D, ARRAY_GEN0),
|
||||||
X86_MATCH(ATOM_CRESTMONT_X, ARRAY_GEN1),
|
X86_MATCH(INTEL_ATOM_CRESTMONT_X, ARRAY_GEN1),
|
||||||
{}
|
{}
|
||||||
};
|
};
|
||||||
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
|
MODULE_DEVICE_TABLE(x86cpu, ifs_cpu_ids);
|
||||||
|
@ -46,7 +46,6 @@ int dasd_gendisk_alloc(struct dasd_block *block)
|
|||||||
*/
|
*/
|
||||||
.max_segment_size = PAGE_SIZE,
|
.max_segment_size = PAGE_SIZE,
|
||||||
.seg_boundary_mask = PAGE_SIZE - 1,
|
.seg_boundary_mask = PAGE_SIZE - 1,
|
||||||
.dma_alignment = PAGE_SIZE - 1,
|
|
||||||
.max_segments = USHRT_MAX,
|
.max_segments = USHRT_MAX,
|
||||||
};
|
};
|
||||||
struct gendisk *gdp;
|
struct gendisk *gdp;
|
||||||
|
@ -77,6 +77,8 @@ extern ssize_t cpu_show_gds(struct device *dev,
|
|||||||
struct device_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
|
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
|
||||||
struct device_attribute *attr, char *buf);
|
struct device_attribute *attr, char *buf);
|
||||||
|
extern ssize_t cpu_show_indirect_target_selection(struct device *dev,
|
||||||
|
struct device_attribute *attr, char *buf);
|
||||||
|
|
||||||
extern __printf(4, 5)
|
extern __printf(4, 5)
|
||||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||||
|
@ -281,6 +281,8 @@ long hugetlb_change_protection(struct vm_area_struct *vma,
|
|||||||
|
|
||||||
bool is_hugetlb_entry_migration(pte_t pte);
|
bool is_hugetlb_entry_migration(pte_t pte);
|
||||||
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma);
|
||||||
|
void fixup_hugetlb_reservations(struct vm_area_struct *vma);
|
||||||
|
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr);
|
||||||
|
|
||||||
#else /* !CONFIG_HUGETLB_PAGE */
|
#else /* !CONFIG_HUGETLB_PAGE */
|
||||||
|
|
||||||
@ -490,6 +492,12 @@ static inline vm_fault_t hugetlb_fault(struct mm_struct *mm,
|
|||||||
|
|
||||||
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
|
static inline void hugetlb_unshare_all_pmds(struct vm_area_struct *vma) { }
|
||||||
|
|
||||||
|
static inline void fixup_hugetlb_reservations(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void hugetlb_split(struct vm_area_struct *vma, unsigned long addr) {}
|
||||||
|
|
||||||
#endif /* !CONFIG_HUGETLB_PAGE */
|
#endif /* !CONFIG_HUGETLB_PAGE */
|
||||||
/*
|
/*
|
||||||
* hugepages at page global directory. If arch support
|
* hugepages at page global directory. If arch support
|
||||||
|
@ -677,6 +677,9 @@ struct x86_cpu_id {
|
|||||||
__u16 model;
|
__u16 model;
|
||||||
__u16 steppings;
|
__u16 steppings;
|
||||||
__u16 feature; /* bit index */
|
__u16 feature; /* bit index */
|
||||||
|
/* Solely for kernel-internal use: DO NOT EXPORT to userspace! */
|
||||||
|
__u16 flags;
|
||||||
|
__u8 type;
|
||||||
kernel_ulong_t driver_data;
|
kernel_ulong_t driver_data;
|
||||||
};
|
};
|
||||||
|
|
||||||
@ -685,7 +688,10 @@ struct x86_cpu_id {
|
|||||||
#define X86_FAMILY_ANY 0
|
#define X86_FAMILY_ANY 0
|
||||||
#define X86_MODEL_ANY 0
|
#define X86_MODEL_ANY 0
|
||||||
#define X86_STEPPING_ANY 0
|
#define X86_STEPPING_ANY 0
|
||||||
|
#define X86_STEP_MIN 0
|
||||||
|
#define X86_STEP_MAX 0xf
|
||||||
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
|
#define X86_FEATURE_ANY 0 /* Same as FPU, you can't test for that */
|
||||||
|
#define X86_CPU_TYPE_ANY 0
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Generic table type for matching CPU features.
|
* Generic table type for matching CPU features.
|
||||||
|
@ -29,6 +29,7 @@
|
|||||||
#include <linux/idr.h>
|
#include <linux/idr.h>
|
||||||
#include <linux/leds.h>
|
#include <linux/leds.h>
|
||||||
#include <linux/rculist.h>
|
#include <linux/rculist.h>
|
||||||
|
#include <linux/srcu.h>
|
||||||
|
|
||||||
#include <net/bluetooth/hci.h>
|
#include <net/bluetooth/hci.h>
|
||||||
#include <net/bluetooth/hci_sync.h>
|
#include <net/bluetooth/hci_sync.h>
|
||||||
@ -338,6 +339,7 @@ struct adv_monitor {
|
|||||||
|
|
||||||
struct hci_dev {
|
struct hci_dev {
|
||||||
struct list_head list;
|
struct list_head list;
|
||||||
|
struct srcu_struct srcu;
|
||||||
struct mutex lock;
|
struct mutex lock;
|
||||||
|
|
||||||
struct ida unset_handle_ida;
|
struct ida unset_handle_ida;
|
||||||
|
83
mm/hugetlb.c
83
mm/hugetlb.c
@ -96,7 +96,7 @@ static void hugetlb_vma_lock_free(struct vm_area_struct *vma);
|
|||||||
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
|
static void hugetlb_vma_lock_alloc(struct vm_area_struct *vma);
|
||||||
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
|
static void __hugetlb_vma_unlock_write_free(struct vm_area_struct *vma);
|
||||||
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
||||||
unsigned long start, unsigned long end);
|
unsigned long start, unsigned long end, bool take_locks);
|
||||||
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
|
static struct resv_map *vma_resv_map(struct vm_area_struct *vma);
|
||||||
|
|
||||||
static inline bool subpool_is_free(struct hugepage_subpool *spool)
|
static inline bool subpool_is_free(struct hugepage_subpool *spool)
|
||||||
@ -1216,7 +1216,7 @@ void hugetlb_dup_vma_private(struct vm_area_struct *vma)
|
|||||||
/*
|
/*
|
||||||
* Reset and decrement one ref on hugepage private reservation.
|
* Reset and decrement one ref on hugepage private reservation.
|
||||||
* Called with mm->mmap_lock writer semaphore held.
|
* Called with mm->mmap_lock writer semaphore held.
|
||||||
* This function should be only used by move_vma() and operate on
|
* This function should be only used by mremap and operate on
|
||||||
* same sized vma. It should never come here with last ref on the
|
* same sized vma. It should never come here with last ref on the
|
||||||
* reservation.
|
* reservation.
|
||||||
*/
|
*/
|
||||||
@ -4941,26 +4941,40 @@ static int hugetlb_vm_op_split(struct vm_area_struct *vma, unsigned long addr)
|
|||||||
{
|
{
|
||||||
if (addr & ~(huge_page_mask(hstate_vma(vma))))
|
if (addr & ~(huge_page_mask(hstate_vma(vma))))
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
|
return 0;
|
||||||
|
}
|
||||||
|
|
||||||
|
void hugetlb_split(struct vm_area_struct *vma, unsigned long addr)
|
||||||
|
{
|
||||||
/*
|
/*
|
||||||
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
|
* PMD sharing is only possible for PUD_SIZE-aligned address ranges
|
||||||
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
|
* in HugeTLB VMAs. If we will lose PUD_SIZE alignment due to this
|
||||||
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
|
* split, unshare PMDs in the PUD_SIZE interval surrounding addr now.
|
||||||
|
* This function is called in the middle of a VMA split operation, with
|
||||||
|
* MM, VMA and rmap all write-locked to prevent concurrent page table
|
||||||
|
* walks (except hardware and gup_fast()).
|
||||||
*/
|
*/
|
||||||
|
vma_assert_write_locked(vma);
|
||||||
|
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
|
||||||
|
|
||||||
if (addr & ~PUD_MASK) {
|
if (addr & ~PUD_MASK) {
|
||||||
/*
|
|
||||||
* hugetlb_vm_op_split is called right before we attempt to
|
|
||||||
* split the VMA. We will need to unshare PMDs in the old and
|
|
||||||
* new VMAs, so let's unshare before we split.
|
|
||||||
*/
|
|
||||||
unsigned long floor = addr & PUD_MASK;
|
unsigned long floor = addr & PUD_MASK;
|
||||||
unsigned long ceil = floor + PUD_SIZE;
|
unsigned long ceil = floor + PUD_SIZE;
|
||||||
|
|
||||||
if (floor >= vma->vm_start && ceil <= vma->vm_end)
|
if (floor >= vma->vm_start && ceil <= vma->vm_end) {
|
||||||
hugetlb_unshare_pmds(vma, floor, ceil);
|
/*
|
||||||
|
* Locking:
|
||||||
|
* Use take_locks=false here.
|
||||||
|
* The file rmap lock is already held.
|
||||||
|
* The hugetlb VMA lock can't be taken when we already
|
||||||
|
* hold the file rmap lock, and we don't need it because
|
||||||
|
* its purpose is to synchronize against concurrent page
|
||||||
|
* table walks, which are not possible thanks to the
|
||||||
|
* locks held by our caller.
|
||||||
|
*/
|
||||||
|
hugetlb_unshare_pmds(vma, floor, ceil, /* take_locks = */ false);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
return 0;
|
|
||||||
}
|
}
|
||||||
|
|
||||||
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
|
static unsigned long hugetlb_vm_op_pagesize(struct vm_area_struct *vma)
|
||||||
@ -7147,6 +7161,13 @@ int huge_pmd_unshare(struct mm_struct *mm, struct vm_area_struct *vma,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
pud_clear(pud);
|
pud_clear(pud);
|
||||||
|
/*
|
||||||
|
* Once our caller drops the rmap lock, some other process might be
|
||||||
|
* using this page table as a normal, non-hugetlb page table.
|
||||||
|
* Wait for pending gup_fast() in other threads to finish before letting
|
||||||
|
* that happen.
|
||||||
|
*/
|
||||||
|
tlb_remove_table_sync_one();
|
||||||
put_page(virt_to_page(ptep));
|
put_page(virt_to_page(ptep));
|
||||||
mm_dec_nr_pmds(mm);
|
mm_dec_nr_pmds(mm);
|
||||||
return 1;
|
return 1;
|
||||||
@ -7383,9 +7404,16 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If @take_locks is false, the caller must ensure that no concurrent page table
|
||||||
|
* access can happen (except for gup_fast() and hardware page walks).
|
||||||
|
* If @take_locks is true, we take the hugetlb VMA lock (to lock out things like
|
||||||
|
* concurrent page fault handling) and the file rmap lock.
|
||||||
|
*/
|
||||||
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
||||||
unsigned long start,
|
unsigned long start,
|
||||||
unsigned long end)
|
unsigned long end,
|
||||||
|
bool take_locks)
|
||||||
{
|
{
|
||||||
struct hstate *h = hstate_vma(vma);
|
struct hstate *h = hstate_vma(vma);
|
||||||
unsigned long sz = huge_page_size(h);
|
unsigned long sz = huge_page_size(h);
|
||||||
@ -7409,8 +7437,12 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
|||||||
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
|
mmu_notifier_range_init(&range, MMU_NOTIFY_CLEAR, 0, mm,
|
||||||
start, end);
|
start, end);
|
||||||
mmu_notifier_invalidate_range_start(&range);
|
mmu_notifier_invalidate_range_start(&range);
|
||||||
hugetlb_vma_lock_write(vma);
|
if (take_locks) {
|
||||||
i_mmap_lock_write(vma->vm_file->f_mapping);
|
hugetlb_vma_lock_write(vma);
|
||||||
|
i_mmap_lock_write(vma->vm_file->f_mapping);
|
||||||
|
} else {
|
||||||
|
i_mmap_assert_write_locked(vma->vm_file->f_mapping);
|
||||||
|
}
|
||||||
for (address = start; address < end; address += PUD_SIZE) {
|
for (address = start; address < end; address += PUD_SIZE) {
|
||||||
ptep = hugetlb_walk(vma, address, sz);
|
ptep = hugetlb_walk(vma, address, sz);
|
||||||
if (!ptep)
|
if (!ptep)
|
||||||
@ -7420,8 +7452,10 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
|||||||
spin_unlock(ptl);
|
spin_unlock(ptl);
|
||||||
}
|
}
|
||||||
flush_hugetlb_tlb_range(vma, start, end);
|
flush_hugetlb_tlb_range(vma, start, end);
|
||||||
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
if (take_locks) {
|
||||||
hugetlb_vma_unlock_write(vma);
|
i_mmap_unlock_write(vma->vm_file->f_mapping);
|
||||||
|
hugetlb_vma_unlock_write(vma);
|
||||||
|
}
|
||||||
/*
|
/*
|
||||||
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
|
* No need to call mmu_notifier_arch_invalidate_secondary_tlbs(), see
|
||||||
* Documentation/mm/mmu_notifier.rst.
|
* Documentation/mm/mmu_notifier.rst.
|
||||||
@ -7436,7 +7470,22 @@ static void hugetlb_unshare_pmds(struct vm_area_struct *vma,
|
|||||||
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
|
void hugetlb_unshare_all_pmds(struct vm_area_struct *vma)
|
||||||
{
|
{
|
||||||
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
|
hugetlb_unshare_pmds(vma, ALIGN(vma->vm_start, PUD_SIZE),
|
||||||
ALIGN_DOWN(vma->vm_end, PUD_SIZE));
|
ALIGN_DOWN(vma->vm_end, PUD_SIZE),
|
||||||
|
/* take_locks = */ true);
|
||||||
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* For hugetlb, mremap() is an odd edge case - while the VMA copying is
|
||||||
|
* performed, we permit both the old and new VMAs to reference the same
|
||||||
|
* reservation.
|
||||||
|
*
|
||||||
|
* We fix this up after the operation succeeds, or if a newly allocated VMA
|
||||||
|
* is closed as a result of a failure to allocate memory.
|
||||||
|
*/
|
||||||
|
void fixup_hugetlb_reservations(struct vm_area_struct *vma)
|
||||||
|
{
|
||||||
|
if (is_vm_hugetlb_page(vma))
|
||||||
|
clear_vma_resv_huge_pages(vma);
|
||||||
}
|
}
|
||||||
|
|
||||||
#ifdef CONFIG_CMA
|
#ifdef CONFIG_CMA
|
||||||
|
@ -2400,7 +2400,14 @@ static int __split_vma(struct vma_iterator *vmi, struct vm_area_struct *vma,
|
|||||||
init_vma_prep(&vp, vma);
|
init_vma_prep(&vp, vma);
|
||||||
vp.insert = new;
|
vp.insert = new;
|
||||||
vma_prepare(&vp);
|
vma_prepare(&vp);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Get rid of huge pages and shared page tables straddling the split
|
||||||
|
* boundary.
|
||||||
|
*/
|
||||||
vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
|
vma_adjust_trans_huge(vma, vma->vm_start, addr, 0);
|
||||||
|
if (is_vm_hugetlb_page(vma))
|
||||||
|
hugetlb_split(vma, addr);
|
||||||
|
|
||||||
if (new_below) {
|
if (new_below) {
|
||||||
vma->vm_start = addr;
|
vma->vm_start = addr;
|
||||||
@ -3519,6 +3526,7 @@ struct vm_area_struct *copy_vma(struct vm_area_struct **vmap,
|
|||||||
return new_vma;
|
return new_vma;
|
||||||
|
|
||||||
out_vma_link:
|
out_vma_link:
|
||||||
|
fixup_hugetlb_reservations(new_vma);
|
||||||
vma_close(new_vma);
|
vma_close(new_vma);
|
||||||
|
|
||||||
if (new_vma->vm_file)
|
if (new_vma->vm_file)
|
||||||
|
@ -676,9 +676,7 @@ static unsigned long move_vma(struct vm_area_struct *vma,
|
|||||||
mremap_userfaultfd_prep(new_vma, uf);
|
mremap_userfaultfd_prep(new_vma, uf);
|
||||||
}
|
}
|
||||||
|
|
||||||
if (is_vm_hugetlb_page(vma)) {
|
fixup_hugetlb_reservations(vma);
|
||||||
clear_vma_resv_huge_pages(vma);
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Conceal VM_ACCOUNT so old reservation is not undone */
|
/* Conceal VM_ACCOUNT so old reservation is not undone */
|
||||||
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
|
if (vm_flags & VM_ACCOUNT && !(flags & MREMAP_DONTUNMAP)) {
|
||||||
|
@ -63,7 +63,7 @@ static DEFINE_IDA(hci_index_ida);
|
|||||||
|
|
||||||
/* Get HCI device by index.
|
/* Get HCI device by index.
|
||||||
* Device is held on return. */
|
* Device is held on return. */
|
||||||
struct hci_dev *hci_dev_get(int index)
|
static struct hci_dev *__hci_dev_get(int index, int *srcu_index)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev = NULL, *d;
|
struct hci_dev *hdev = NULL, *d;
|
||||||
|
|
||||||
@ -76,6 +76,8 @@ struct hci_dev *hci_dev_get(int index)
|
|||||||
list_for_each_entry(d, &hci_dev_list, list) {
|
list_for_each_entry(d, &hci_dev_list, list) {
|
||||||
if (d->id == index) {
|
if (d->id == index) {
|
||||||
hdev = hci_dev_hold(d);
|
hdev = hci_dev_hold(d);
|
||||||
|
if (srcu_index)
|
||||||
|
*srcu_index = srcu_read_lock(&d->srcu);
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@ -83,6 +85,22 @@ struct hci_dev *hci_dev_get(int index)
|
|||||||
return hdev;
|
return hdev;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
struct hci_dev *hci_dev_get(int index)
|
||||||
|
{
|
||||||
|
return __hci_dev_get(index, NULL);
|
||||||
|
}
|
||||||
|
|
||||||
|
static struct hci_dev *hci_dev_get_srcu(int index, int *srcu_index)
|
||||||
|
{
|
||||||
|
return __hci_dev_get(index, srcu_index);
|
||||||
|
}
|
||||||
|
|
||||||
|
static void hci_dev_put_srcu(struct hci_dev *hdev, int srcu_index)
|
||||||
|
{
|
||||||
|
srcu_read_unlock(&hdev->srcu, srcu_index);
|
||||||
|
hci_dev_put(hdev);
|
||||||
|
}
|
||||||
|
|
||||||
/* ---- Inquiry support ---- */
|
/* ---- Inquiry support ---- */
|
||||||
|
|
||||||
bool hci_discovery_active(struct hci_dev *hdev)
|
bool hci_discovery_active(struct hci_dev *hdev)
|
||||||
@ -567,9 +585,9 @@ static int hci_dev_do_reset(struct hci_dev *hdev)
|
|||||||
int hci_dev_reset(__u16 dev)
|
int hci_dev_reset(__u16 dev)
|
||||||
{
|
{
|
||||||
struct hci_dev *hdev;
|
struct hci_dev *hdev;
|
||||||
int err;
|
int err, srcu_index;
|
||||||
|
|
||||||
hdev = hci_dev_get(dev);
|
hdev = hci_dev_get_srcu(dev, &srcu_index);
|
||||||
if (!hdev)
|
if (!hdev)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
@ -591,7 +609,7 @@ int hci_dev_reset(__u16 dev)
|
|||||||
err = hci_dev_do_reset(hdev);
|
err = hci_dev_do_reset(hdev);
|
||||||
|
|
||||||
done:
|
done:
|
||||||
hci_dev_put(hdev);
|
hci_dev_put_srcu(hdev, srcu_index);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -2440,6 +2458,11 @@ struct hci_dev *hci_alloc_dev_priv(int sizeof_priv)
|
|||||||
if (!hdev)
|
if (!hdev)
|
||||||
return NULL;
|
return NULL;
|
||||||
|
|
||||||
|
if (init_srcu_struct(&hdev->srcu)) {
|
||||||
|
kfree(hdev);
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
|
hdev->pkt_type = (HCI_DM1 | HCI_DH1 | HCI_HV1);
|
||||||
hdev->esco_type = (ESCO_HV1);
|
hdev->esco_type = (ESCO_HV1);
|
||||||
hdev->link_mode = (HCI_LM_ACCEPT);
|
hdev->link_mode = (HCI_LM_ACCEPT);
|
||||||
@ -2684,6 +2707,9 @@ void hci_unregister_dev(struct hci_dev *hdev)
|
|||||||
list_del(&hdev->list);
|
list_del(&hdev->list);
|
||||||
write_unlock(&hci_dev_list_lock);
|
write_unlock(&hci_dev_list_lock);
|
||||||
|
|
||||||
|
synchronize_srcu(&hdev->srcu);
|
||||||
|
cleanup_srcu_struct(&hdev->srcu);
|
||||||
|
|
||||||
disable_work_sync(&hdev->rx_work);
|
disable_work_sync(&hdev->rx_work);
|
||||||
disable_work_sync(&hdev->cmd_work);
|
disable_work_sync(&hdev->cmd_work);
|
||||||
disable_work_sync(&hdev->tx_work);
|
disable_work_sync(&hdev->tx_work);
|
||||||
|
@ -647,12 +647,9 @@ static void *bpf_test_init(const union bpf_attr *kattr, u32 user_size,
|
|||||||
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
|
void __user *data_in = u64_to_user_ptr(kattr->test.data_in);
|
||||||
void *data;
|
void *data;
|
||||||
|
|
||||||
if (size < ETH_HLEN || size > PAGE_SIZE - headroom - tailroom)
|
if (user_size < ETH_HLEN || user_size > PAGE_SIZE - headroom - tailroom)
|
||||||
return ERR_PTR(-EINVAL);
|
return ERR_PTR(-EINVAL);
|
||||||
|
|
||||||
if (user_size > size)
|
|
||||||
return ERR_PTR(-EMSGSIZE);
|
|
||||||
|
|
||||||
size = SKB_DATA_ALIGN(size);
|
size = SKB_DATA_ALIGN(size);
|
||||||
data = kzalloc(size + headroom + tailroom, GFP_USER);
|
data = kzalloc(size + headroom + tailroom, GFP_USER);
|
||||||
if (!data)
|
if (!data)
|
||||||
|
@ -273,6 +273,7 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
|||||||
bool copy_dtor;
|
bool copy_dtor;
|
||||||
__sum16 check;
|
__sum16 check;
|
||||||
__be16 newlen;
|
__be16 newlen;
|
||||||
|
int ret = 0;
|
||||||
|
|
||||||
mss = skb_shinfo(gso_skb)->gso_size;
|
mss = skb_shinfo(gso_skb)->gso_size;
|
||||||
if (gso_skb->len <= sizeof(*uh) + mss)
|
if (gso_skb->len <= sizeof(*uh) + mss)
|
||||||
@ -295,6 +296,10 @@ struct sk_buff *__udp_gso_segment(struct sk_buff *gso_skb,
|
|||||||
if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
|
if (skb_pagelen(gso_skb) - sizeof(*uh) == skb_shinfo(gso_skb)->gso_size)
|
||||||
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
|
return __udp_gso_segment_list(gso_skb, features, is_ipv6);
|
||||||
|
|
||||||
|
ret = __skb_linearize(gso_skb);
|
||||||
|
if (ret)
|
||||||
|
return ERR_PTR(ret);
|
||||||
|
|
||||||
/* Setup csum, as fraglist skips this in udp4_gro_receive. */
|
/* Setup csum, as fraglist skips this in udp4_gro_receive. */
|
||||||
gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
|
gso_skb->csum_start = skb_transport_header(gso_skb) - gso_skb->head;
|
||||||
gso_skb->csum_offset = offsetof(struct udphdr, check);
|
gso_skb->csum_offset = offsetof(struct udphdr, check);
|
||||||
|
@ -510,9 +510,8 @@ static int tls_strp_read_sock(struct tls_strparser *strp)
|
|||||||
if (inq < strp->stm.full_len)
|
if (inq < strp->stm.full_len)
|
||||||
return tls_strp_read_copy(strp, true);
|
return tls_strp_read_copy(strp, true);
|
||||||
|
|
||||||
|
tls_strp_load_anchor_with_queue(strp, inq);
|
||||||
if (!strp->stm.full_len) {
|
if (!strp->stm.full_len) {
|
||||||
tls_strp_load_anchor_with_queue(strp, inq);
|
|
||||||
|
|
||||||
sz = tls_rx_msg_size(strp, strp->anchor);
|
sz = tls_rx_msg_size(strp, strp->anchor);
|
||||||
if (sz < 0) {
|
if (sz < 0) {
|
||||||
tls_strp_abort_strp(strp, sz);
|
tls_strp_abort_strp(strp, sz);
|
||||||
|
1
redhat/configs/common/generic/x86/CONFIG_MITIGATION_ITS
Normal file
1
redhat/configs/common/generic/x86/CONFIG_MITIGATION_ITS
Normal file
@ -0,0 +1 @@
|
|||||||
|
CONFIG_MITIGATION_ITS=y
|
@ -1,11 +1,77 @@
|
|||||||
* Thu Aug 07 2025 Patrick Talbert <ptalbert@redhat.com> [5.14.0-570.33.2.el9_6]
|
* Sat Aug 09 2025 Patrick Talbert <ptalbert@redhat.com> [5.14.0-570.35.1.el9_6]
|
||||||
|
- s390/dasd: Remove DMA alignment (CKI Backport Bot) [RHEL-91593]
|
||||||
|
- s390/cpumf: Update CPU Measurement facility extended counter set support (CKI Backport Bot) [RHEL-103066]
|
||||||
|
- s390/topology: Improve topology detection (CKI Backport Bot) [RHEL-92100]
|
||||||
|
- s390/pai: export number of sysfs attribute files (CKI Backport Bot) [RHEL-87178]
|
||||||
|
- s390/pai: fix attr_event_free upper limit for pai device drivers (CKI Backport Bot) [RHEL-87178]
|
||||||
|
- powerpc/64s/radix/kfence: map __kfence_pool at page granularity (Mamatha Inamdar) [RHEL-92081]
|
||||||
|
- wifi: rtw88: fix the 'para' buffer size to avoid reading out of bounds (CKI Backport Bot) [RHEL-103151] {CVE-2025-38159}
|
||||||
|
- redhat: Mark kernel incompatible with xdp-tools<1.5.4 (Felix Maurer) [RHEL-101008]
|
||||||
|
- bpf, test_run: Fix use-after-free issue in eth_skb_pkt_type() (CKI Backport Bot) [RHEL-101008] {CVE-2025-21867}
|
||||||
|
- arm64: proton-pack: Add new CPUs 'k' values for branch mitigation (Waiman Long) [RHEL-100603]
|
||||||
|
- arm64: bpf: Only mitigate cBPF programs loaded by unprivileged users (Waiman Long) [RHEL-100603] {CVE-2025-37963}
|
||||||
|
- arm64: bpf: Add BHB mitigation to the epilogue for cBPF programs (Waiman Long) [RHEL-100603] {CVE-2025-37948}
|
||||||
|
- arm64: proton-pack: Expose whether the branchy loop k value (Waiman Long) [RHEL-100603]
|
||||||
|
- arm64: proton-pack: Expose whether the platform is mitigated by firmware (Waiman Long) [RHEL-100603]
|
||||||
|
- arm64: insn: Add support for encoding DSB (Waiman Long) [RHEL-100603]
|
||||||
|
- redhat/configs: Enable CONFIG_MITIGATION_ITS for x86 (Waiman Long) [RHEL-100603]
|
||||||
|
- selftest/x86/bugs: Add selftests for ITS (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/ibt: Keep IBT disabled during alternative patching (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Align RETs in BHB clear sequence to avoid thunking (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Add support for RSB stuffing mitigation (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Add "vmexit" option to skip mitigation on some CPUs (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Enable Indirect Target Selection mitigation (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Add support for ITS-safe return thunk (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Add support for ITS-safe indirect thunk (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/its: Enumerate Indirect Target Selection (ITS) bug (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- Documentation: x86/bugs/its: Add ITS documentation (Waiman Long) [RHEL-100603 RHEL-92182] {CVE-2024-28956}
|
||||||
|
- x86/bhi: Do not set BHI_DIS_S in 32-bit mode (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/bpf: Add IBHF call at end of classic BPF (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/bpf: Call branch history clearing sequence on exit (Waiman Long) [RHEL-100603]
|
||||||
|
- arm64: errata: Assume that unknown CPUs _are_ vulnerable to Spectre BHB (Waiman Long) [RHEL-100603]
|
||||||
|
- arm64: errata: Add QCOM_KRYO_4XX_GOLD to the spectre_bhb_k24_list (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/rfds: Exclude P-only parts from the RFDS affected list (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Update x86_match_cpu() to also use cpu-type (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Add cpu_type to struct x86_cpu_id (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Shorten CPU matching macro (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Fix the description of X86_MATCH_VFM_STEPS() (Waiman Long) [RHEL-100603]
|
||||||
|
- selftests: Warn about skipped tests in result summary (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Fix typo in x86_match_cpu()'s doc (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Expose only stepping min/max interface (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Add CPU type to struct cpuinfo_topology (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpufeatures: Add X86_FEATURE_AMD_HETEROGENEOUS_CORES (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpufeatures: Rename X86_FEATURE_FAST_CPPC to have AMD prefix (Waiman Long) [RHEL-100603]
|
||||||
|
- tools/include: Sync x86 headers with the kernel sources (Waiman Long) [RHEL-100603]
|
||||||
|
- selftests: ksft: Fix finished() helper exit code on skipped tests (Waiman Long) [RHEL-100603]
|
||||||
|
- kselftest: Move ksft helper module to common directory (Waiman Long) [RHEL-100603]
|
||||||
|
- platform/x86/intel/ifs: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/platform/atom: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- cpufreq: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/bugs: Add 'spectre_bhi=vmexit' cmdline option (Waiman Long) [RHEL-100603]
|
||||||
|
- EDAC/skx: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- EDAC/i10nm: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/cpu: Fix x86_match_cpu() to match just X86_VENDOR_INTEL (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/aperfmperf: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/apic: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- x86/bugs: Switch to new Intel CPU model defines (Waiman Long) [RHEL-100603]
|
||||||
|
- EDAC/i10nm: Add Intel Grand Ridge micro-server support (Waiman Long) [RHEL-100603]
|
||||||
- Revert "sch_htb: make htb_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
- Revert "sch_htb: make htb_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
||||||
- Revert "sch_drr: make drr_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
- Revert "sch_drr: make drr_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
||||||
- Revert "sch_qfq: make qfq_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
- Revert "sch_qfq: make qfq_qlen_notify() idempotent" (Patrick Talbert) [RHEL-108138]
|
||||||
- Revert "codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()" (Patrick Talbert) [RHEL-108138]
|
- Revert "codel: remove sch->q.qlen check before qdisc_tree_reduce_backlog()" (Patrick Talbert) [RHEL-108138]
|
||||||
- Revert "sch_htb: make htb_deactivate() idempotent" (Patrick Talbert) [RHEL-108138]
|
- Revert "sch_htb: make htb_deactivate() idempotent" (Patrick Talbert) [RHEL-108138]
|
||||||
- Revert "net/sched: Always pass notifications when child class becomes empty" (Patrick Talbert) [RHEL-108138]
|
- Revert "net/sched: Always pass notifications when child class becomes empty" (Patrick Talbert) [RHEL-108138]
|
||||||
Resolves: RHEL-108138
|
Resolves: RHEL-100603, RHEL-101008, RHEL-103066, RHEL-103151, RHEL-108138, RHEL-87178, RHEL-91593, RHEL-92081, RHEL-92100, RHEL-92182
|
||||||
|
|
||||||
|
* Wed Aug 06 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.34.1.el9_6]
|
||||||
|
- i2c/designware: Fix an initialization issue (CKI Backport Bot) [RHEL-106625] {CVE-2025-38380}
|
||||||
|
- tls: always refresh the queue when reading sock (CKI Backport Bot) [RHEL-106081] {CVE-2025-38471}
|
||||||
|
- net: fix udp gso skb_segment after pull from frag_list (Guillaume Nault) [RHEL-103028] {CVE-2025-38124}
|
||||||
|
- mm/hugetlb: fix huge_pmd_unshare() vs GUP-fast race (Rafael Aquini) [RHEL-101246] {CVE-2025-38085}
|
||||||
|
- mm/hugetlb: unshare page tables during VMA split, not before (Rafael Aquini) [RHEL-101282] {CVE-2025-38084}
|
||||||
|
- mm: fix copy_vma() error handling for hugetlb mappings (Rafael Aquini) [RHEL-101282]
|
||||||
|
- Bluetooth: hci_core: Fix use-after-free in vhci_flush() (CKI Backport Bot) [RHEL-103256] {CVE-2025-38250}
|
||||||
|
Resolves: RHEL-101246, RHEL-101282, RHEL-103028, RHEL-103256, RHEL-106081, RHEL-106625
|
||||||
|
|
||||||
* Sat Aug 02 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.33.1.el9_6]
|
* Sat Aug 02 2025 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-570.33.1.el9_6]
|
||||||
- net/sched: Always pass notifications when child class becomes empty (CKI Backport Bot) [RHEL-93387] {CVE-2025-38350}
|
- net/sched: Always pass notifications when child class becomes empty (CKI Backport Bot) [RHEL-93387] {CVE-2025-38350}
|
||||||
|
@ -989,6 +989,7 @@ Recommends: linux-firmware\
|
|||||||
Requires(preun): systemd >= 200\
|
Requires(preun): systemd >= 200\
|
||||||
Conflicts: xfsprogs < 4.3.0-1\
|
Conflicts: xfsprogs < 4.3.0-1\
|
||||||
Conflicts: xorg-x11-drv-vmmouse < 13.0.99\
|
Conflicts: xorg-x11-drv-vmmouse < 13.0.99\
|
||||||
|
Conflicts: xdp-tools < 1.5.4\
|
||||||
%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\
|
%{expand:%%{?kernel%{?1:_%{1}}_conflicts:Conflicts: %%{kernel%{?1:_%{1}}_conflicts}}}\
|
||||||
%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\
|
%{expand:%%{?kernel%{?1:_%{1}}_obsoletes:Obsoletes: %%{kernel%{?1:_%{1}}_obsoletes}}}\
|
||||||
%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\
|
%{expand:%%{?kernel%{?1:_%{1}}_provides:Provides: %%{kernel%{?1:_%{1}}_provides}}}\
|
||||||
|
@ -20,170 +20,170 @@
|
|||||||
|
|
||||||
/*
|
/*
|
||||||
* Note: If the comment begins with a quoted string, that string is used
|
* Note: If the comment begins with a quoted string, that string is used
|
||||||
* in /proc/cpuinfo instead of the macro name. If the string is "",
|
* in /proc/cpuinfo instead of the macro name. Otherwise, this feature
|
||||||
* this feature bit is not displayed in /proc/cpuinfo at all.
|
* bit is not displayed in /proc/cpuinfo at all.
|
||||||
*
|
*
|
||||||
* When adding new features here that depend on other features,
|
* When adding new features here that depend on other features,
|
||||||
* please update the table in kernel/cpu/cpuid-deps.c as well.
|
* please update the table in kernel/cpu/cpuid-deps.c as well.
|
||||||
*/
|
*/
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
|
/* Intel-defined CPU features, CPUID level 0x00000001 (EDX), word 0 */
|
||||||
#define X86_FEATURE_FPU ( 0*32+ 0) /* Onboard FPU */
|
#define X86_FEATURE_FPU ( 0*32+ 0) /* "fpu" Onboard FPU */
|
||||||
#define X86_FEATURE_VME ( 0*32+ 1) /* Virtual Mode Extensions */
|
#define X86_FEATURE_VME ( 0*32+ 1) /* "vme" Virtual Mode Extensions */
|
||||||
#define X86_FEATURE_DE ( 0*32+ 2) /* Debugging Extensions */
|
#define X86_FEATURE_DE ( 0*32+ 2) /* "de" Debugging Extensions */
|
||||||
#define X86_FEATURE_PSE ( 0*32+ 3) /* Page Size Extensions */
|
#define X86_FEATURE_PSE ( 0*32+ 3) /* "pse" Page Size Extensions */
|
||||||
#define X86_FEATURE_TSC ( 0*32+ 4) /* Time Stamp Counter */
|
#define X86_FEATURE_TSC ( 0*32+ 4) /* "tsc" Time Stamp Counter */
|
||||||
#define X86_FEATURE_MSR ( 0*32+ 5) /* Model-Specific Registers */
|
#define X86_FEATURE_MSR ( 0*32+ 5) /* "msr" Model-Specific Registers */
|
||||||
#define X86_FEATURE_PAE ( 0*32+ 6) /* Physical Address Extensions */
|
#define X86_FEATURE_PAE ( 0*32+ 6) /* "pae" Physical Address Extensions */
|
||||||
#define X86_FEATURE_MCE ( 0*32+ 7) /* Machine Check Exception */
|
#define X86_FEATURE_MCE ( 0*32+ 7) /* "mce" Machine Check Exception */
|
||||||
#define X86_FEATURE_CX8 ( 0*32+ 8) /* CMPXCHG8 instruction */
|
#define X86_FEATURE_CX8 ( 0*32+ 8) /* "cx8" CMPXCHG8 instruction */
|
||||||
#define X86_FEATURE_APIC ( 0*32+ 9) /* Onboard APIC */
|
#define X86_FEATURE_APIC ( 0*32+ 9) /* "apic" Onboard APIC */
|
||||||
#define X86_FEATURE_SEP ( 0*32+11) /* SYSENTER/SYSEXIT */
|
#define X86_FEATURE_SEP ( 0*32+11) /* "sep" SYSENTER/SYSEXIT */
|
||||||
#define X86_FEATURE_MTRR ( 0*32+12) /* Memory Type Range Registers */
|
#define X86_FEATURE_MTRR ( 0*32+12) /* "mtrr" Memory Type Range Registers */
|
||||||
#define X86_FEATURE_PGE ( 0*32+13) /* Page Global Enable */
|
#define X86_FEATURE_PGE ( 0*32+13) /* "pge" Page Global Enable */
|
||||||
#define X86_FEATURE_MCA ( 0*32+14) /* Machine Check Architecture */
|
#define X86_FEATURE_MCA ( 0*32+14) /* "mca" Machine Check Architecture */
|
||||||
#define X86_FEATURE_CMOV ( 0*32+15) /* CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
|
#define X86_FEATURE_CMOV ( 0*32+15) /* "cmov" CMOV instructions (plus FCMOVcc, FCOMI with FPU) */
|
||||||
#define X86_FEATURE_PAT ( 0*32+16) /* Page Attribute Table */
|
#define X86_FEATURE_PAT ( 0*32+16) /* "pat" Page Attribute Table */
|
||||||
#define X86_FEATURE_PSE36 ( 0*32+17) /* 36-bit PSEs */
|
#define X86_FEATURE_PSE36 ( 0*32+17) /* "pse36" 36-bit PSEs */
|
||||||
#define X86_FEATURE_PN ( 0*32+18) /* Processor serial number */
|
#define X86_FEATURE_PN ( 0*32+18) /* "pn" Processor serial number */
|
||||||
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* CLFLUSH instruction */
|
#define X86_FEATURE_CLFLUSH ( 0*32+19) /* "clflush" CLFLUSH instruction */
|
||||||
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
#define X86_FEATURE_DS ( 0*32+21) /* "dts" Debug Store */
|
||||||
#define X86_FEATURE_ACPI ( 0*32+22) /* ACPI via MSR */
|
#define X86_FEATURE_ACPI ( 0*32+22) /* "acpi" ACPI via MSR */
|
||||||
#define X86_FEATURE_MMX ( 0*32+23) /* Multimedia Extensions */
|
#define X86_FEATURE_MMX ( 0*32+23) /* "mmx" Multimedia Extensions */
|
||||||
#define X86_FEATURE_FXSR ( 0*32+24) /* FXSAVE/FXRSTOR, CR4.OSFXSR */
|
#define X86_FEATURE_FXSR ( 0*32+24) /* "fxsr" FXSAVE/FXRSTOR, CR4.OSFXSR */
|
||||||
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
#define X86_FEATURE_XMM ( 0*32+25) /* "sse" */
|
||||||
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
#define X86_FEATURE_XMM2 ( 0*32+26) /* "sse2" */
|
||||||
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
#define X86_FEATURE_SELFSNOOP ( 0*32+27) /* "ss" CPU self snoop */
|
||||||
#define X86_FEATURE_HT ( 0*32+28) /* Hyper-Threading */
|
#define X86_FEATURE_HT ( 0*32+28) /* "ht" Hyper-Threading */
|
||||||
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
#define X86_FEATURE_ACC ( 0*32+29) /* "tm" Automatic clock control */
|
||||||
#define X86_FEATURE_IA64 ( 0*32+30) /* IA-64 processor */
|
#define X86_FEATURE_IA64 ( 0*32+30) /* "ia64" IA-64 processor */
|
||||||
#define X86_FEATURE_PBE ( 0*32+31) /* Pending Break Enable */
|
#define X86_FEATURE_PBE ( 0*32+31) /* "pbe" Pending Break Enable */
|
||||||
|
|
||||||
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
/* AMD-defined CPU features, CPUID level 0x80000001, word 1 */
|
||||||
/* Don't duplicate feature flags which are redundant with Intel! */
|
/* Don't duplicate feature flags which are redundant with Intel! */
|
||||||
#define X86_FEATURE_SYSCALL ( 1*32+11) /* SYSCALL/SYSRET */
|
#define X86_FEATURE_SYSCALL ( 1*32+11) /* "syscall" SYSCALL/SYSRET */
|
||||||
#define X86_FEATURE_MP ( 1*32+19) /* MP Capable */
|
#define X86_FEATURE_MP ( 1*32+19) /* "mp" MP Capable */
|
||||||
#define X86_FEATURE_NX ( 1*32+20) /* Execute Disable */
|
#define X86_FEATURE_NX ( 1*32+20) /* "nx" Execute Disable */
|
||||||
#define X86_FEATURE_MMXEXT ( 1*32+22) /* AMD MMX extensions */
|
#define X86_FEATURE_MMXEXT ( 1*32+22) /* "mmxext" AMD MMX extensions */
|
||||||
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* FXSAVE/FXRSTOR optimizations */
|
#define X86_FEATURE_FXSR_OPT ( 1*32+25) /* "fxsr_opt" FXSAVE/FXRSTOR optimizations */
|
||||||
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
#define X86_FEATURE_GBPAGES ( 1*32+26) /* "pdpe1gb" GB pages */
|
||||||
#define X86_FEATURE_RDTSCP ( 1*32+27) /* RDTSCP */
|
#define X86_FEATURE_RDTSCP ( 1*32+27) /* "rdtscp" RDTSCP */
|
||||||
#define X86_FEATURE_LM ( 1*32+29) /* Long Mode (x86-64, 64-bit support) */
|
#define X86_FEATURE_LM ( 1*32+29) /* "lm" Long Mode (x86-64, 64-bit support) */
|
||||||
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* AMD 3DNow extensions */
|
#define X86_FEATURE_3DNOWEXT ( 1*32+30) /* "3dnowext" AMD 3DNow extensions */
|
||||||
#define X86_FEATURE_3DNOW ( 1*32+31) /* 3DNow */
|
#define X86_FEATURE_3DNOW ( 1*32+31) /* "3dnow" 3DNow */
|
||||||
|
|
||||||
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
/* Transmeta-defined CPU features, CPUID level 0x80860001, word 2 */
|
||||||
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* CPU in recovery mode */
|
#define X86_FEATURE_RECOVERY ( 2*32+ 0) /* "recovery" CPU in recovery mode */
|
||||||
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* Longrun power control */
|
#define X86_FEATURE_LONGRUN ( 2*32+ 1) /* "longrun" Longrun power control */
|
||||||
#define X86_FEATURE_LRTI ( 2*32+ 3) /* LongRun table interface */
|
#define X86_FEATURE_LRTI ( 2*32+ 3) /* "lrti" LongRun table interface */
|
||||||
|
|
||||||
/* Other features, Linux-defined mapping, word 3 */
|
/* Other features, Linux-defined mapping, word 3 */
|
||||||
/* This range is used for feature bits which conflict or are synthesized */
|
/* This range is used for feature bits which conflict or are synthesized */
|
||||||
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* Cyrix MMX extensions */
|
#define X86_FEATURE_CXMMX ( 3*32+ 0) /* "cxmmx" Cyrix MMX extensions */
|
||||||
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* AMD K6 nonstandard MTRRs */
|
#define X86_FEATURE_K6_MTRR ( 3*32+ 1) /* "k6_mtrr" AMD K6 nonstandard MTRRs */
|
||||||
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* Cyrix ARRs (= MTRRs) */
|
#define X86_FEATURE_CYRIX_ARR ( 3*32+ 2) /* "cyrix_arr" Cyrix ARRs (= MTRRs) */
|
||||||
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* Centaur MCRs (= MTRRs) */
|
#define X86_FEATURE_CENTAUR_MCR ( 3*32+ 3) /* "centaur_mcr" Centaur MCRs (= MTRRs) */
|
||||||
#define X86_FEATURE_K8 ( 3*32+ 4) /* "" Opteron, Athlon64 */
|
#define X86_FEATURE_K8 ( 3*32+ 4) /* Opteron, Athlon64 */
|
||||||
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* "" CPU based on Zen5 microarchitecture */
|
#define X86_FEATURE_ZEN5 ( 3*32+ 5) /* CPU based on Zen5 microarchitecture */
|
||||||
#define X86_FEATURE_P3 ( 3*32+ 6) /* "" P3 */
|
#define X86_FEATURE_P3 ( 3*32+ 6) /* P3 */
|
||||||
#define X86_FEATURE_P4 ( 3*32+ 7) /* "" P4 */
|
#define X86_FEATURE_P4 ( 3*32+ 7) /* P4 */
|
||||||
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* TSC ticks at a constant rate */
|
#define X86_FEATURE_CONSTANT_TSC ( 3*32+ 8) /* "constant_tsc" TSC ticks at a constant rate */
|
||||||
#define X86_FEATURE_UP ( 3*32+ 9) /* SMP kernel running on UP */
|
#define X86_FEATURE_UP ( 3*32+ 9) /* "up" SMP kernel running on UP */
|
||||||
#define X86_FEATURE_ART ( 3*32+10) /* Always running timer (ART) */
|
#define X86_FEATURE_ART ( 3*32+10) /* "art" Always running timer (ART) */
|
||||||
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* Intel Architectural PerfMon */
|
#define X86_FEATURE_ARCH_PERFMON ( 3*32+11) /* "arch_perfmon" Intel Architectural PerfMon */
|
||||||
#define X86_FEATURE_PEBS ( 3*32+12) /* Precise-Event Based Sampling */
|
#define X86_FEATURE_PEBS ( 3*32+12) /* "pebs" Precise-Event Based Sampling */
|
||||||
#define X86_FEATURE_BTS ( 3*32+13) /* Branch Trace Store */
|
#define X86_FEATURE_BTS ( 3*32+13) /* "bts" Branch Trace Store */
|
||||||
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* "" syscall in IA32 userspace */
|
#define X86_FEATURE_SYSCALL32 ( 3*32+14) /* syscall in IA32 userspace */
|
||||||
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* "" sysenter in IA32 userspace */
|
#define X86_FEATURE_SYSENTER32 ( 3*32+15) /* sysenter in IA32 userspace */
|
||||||
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* REP microcode works well */
|
#define X86_FEATURE_REP_GOOD ( 3*32+16) /* "rep_good" REP microcode works well */
|
||||||
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* AMD Last Branch Record Extension Version 2 */
|
#define X86_FEATURE_AMD_LBR_V2 ( 3*32+17) /* "amd_lbr_v2" AMD Last Branch Record Extension Version 2 */
|
||||||
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* "" Clear CPU buffers using VERW */
|
#define X86_FEATURE_CLEAR_CPU_BUF ( 3*32+18) /* Clear CPU buffers using VERW */
|
||||||
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* AMD Accumulated Power Mechanism */
|
#define X86_FEATURE_ACC_POWER ( 3*32+19) /* "acc_power" AMD Accumulated Power Mechanism */
|
||||||
#define X86_FEATURE_NOPL ( 3*32+20) /* The NOPL (0F 1F) instructions */
|
#define X86_FEATURE_NOPL ( 3*32+20) /* "nopl" The NOPL (0F 1F) instructions */
|
||||||
#define X86_FEATURE_ALWAYS ( 3*32+21) /* "" Always-present feature */
|
#define X86_FEATURE_ALWAYS ( 3*32+21) /* Always-present feature */
|
||||||
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* CPU topology enum extensions */
|
#define X86_FEATURE_XTOPOLOGY ( 3*32+22) /* "xtopology" CPU topology enum extensions */
|
||||||
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* TSC is known to be reliable */
|
#define X86_FEATURE_TSC_RELIABLE ( 3*32+23) /* "tsc_reliable" TSC is known to be reliable */
|
||||||
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* TSC does not stop in C states */
|
#define X86_FEATURE_NONSTOP_TSC ( 3*32+24) /* "nonstop_tsc" TSC does not stop in C states */
|
||||||
#define X86_FEATURE_CPUID ( 3*32+25) /* CPU has CPUID instruction itself */
|
#define X86_FEATURE_CPUID ( 3*32+25) /* "cpuid" CPU has CPUID instruction itself */
|
||||||
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* Extended APICID (8 bits) */
|
#define X86_FEATURE_EXTD_APICID ( 3*32+26) /* "extd_apicid" Extended APICID (8 bits) */
|
||||||
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* AMD multi-node processor */
|
#define X86_FEATURE_AMD_DCM ( 3*32+27) /* "amd_dcm" AMD multi-node processor */
|
||||||
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
|
#define X86_FEATURE_APERFMPERF ( 3*32+28) /* "aperfmperf" P-State hardware coordination feedback capability (APERF/MPERF MSRs) */
|
||||||
#define X86_FEATURE_RAPL ( 3*32+29) /* AMD/Hygon RAPL interface */
|
#define X86_FEATURE_RAPL ( 3*32+29) /* "rapl" AMD/Hygon RAPL interface */
|
||||||
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* TSC doesn't stop in S3 state */
|
#define X86_FEATURE_NONSTOP_TSC_S3 ( 3*32+30) /* "nonstop_tsc_s3" TSC doesn't stop in S3 state */
|
||||||
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* TSC has known frequency */
|
#define X86_FEATURE_TSC_KNOWN_FREQ ( 3*32+31) /* "tsc_known_freq" TSC has known frequency */
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
|
/* Intel-defined CPU features, CPUID level 0x00000001 (ECX), word 4 */
|
||||||
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
#define X86_FEATURE_XMM3 ( 4*32+ 0) /* "pni" SSE-3 */
|
||||||
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* PCLMULQDQ instruction */
|
#define X86_FEATURE_PCLMULQDQ ( 4*32+ 1) /* "pclmulqdq" PCLMULQDQ instruction */
|
||||||
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* 64-bit Debug Store */
|
#define X86_FEATURE_DTES64 ( 4*32+ 2) /* "dtes64" 64-bit Debug Store */
|
||||||
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
|
#define X86_FEATURE_MWAIT ( 4*32+ 3) /* "monitor" MONITOR/MWAIT support */
|
||||||
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
|
#define X86_FEATURE_DSCPL ( 4*32+ 4) /* "ds_cpl" CPL-qualified (filtered) Debug Store */
|
||||||
#define X86_FEATURE_VMX ( 4*32+ 5) /* Hardware virtualization */
|
#define X86_FEATURE_VMX ( 4*32+ 5) /* "vmx" Hardware virtualization */
|
||||||
#define X86_FEATURE_SMX ( 4*32+ 6) /* Safer Mode eXtensions */
|
#define X86_FEATURE_SMX ( 4*32+ 6) /* "smx" Safer Mode eXtensions */
|
||||||
#define X86_FEATURE_EST ( 4*32+ 7) /* Enhanced SpeedStep */
|
#define X86_FEATURE_EST ( 4*32+ 7) /* "est" Enhanced SpeedStep */
|
||||||
#define X86_FEATURE_TM2 ( 4*32+ 8) /* Thermal Monitor 2 */
|
#define X86_FEATURE_TM2 ( 4*32+ 8) /* "tm2" Thermal Monitor 2 */
|
||||||
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* Supplemental SSE-3 */
|
#define X86_FEATURE_SSSE3 ( 4*32+ 9) /* "ssse3" Supplemental SSE-3 */
|
||||||
#define X86_FEATURE_CID ( 4*32+10) /* Context ID */
|
#define X86_FEATURE_CID ( 4*32+10) /* "cid" Context ID */
|
||||||
#define X86_FEATURE_SDBG ( 4*32+11) /* Silicon Debug */
|
#define X86_FEATURE_SDBG ( 4*32+11) /* "sdbg" Silicon Debug */
|
||||||
#define X86_FEATURE_FMA ( 4*32+12) /* Fused multiply-add */
|
#define X86_FEATURE_FMA ( 4*32+12) /* "fma" Fused multiply-add */
|
||||||
#define X86_FEATURE_CX16 ( 4*32+13) /* CMPXCHG16B instruction */
|
#define X86_FEATURE_CX16 ( 4*32+13) /* "cx16" CMPXCHG16B instruction */
|
||||||
#define X86_FEATURE_XTPR ( 4*32+14) /* Send Task Priority Messages */
|
#define X86_FEATURE_XTPR ( 4*32+14) /* "xtpr" Send Task Priority Messages */
|
||||||
#define X86_FEATURE_PDCM ( 4*32+15) /* Perf/Debug Capabilities MSR */
|
#define X86_FEATURE_PDCM ( 4*32+15) /* "pdcm" Perf/Debug Capabilities MSR */
|
||||||
#define X86_FEATURE_PCID ( 4*32+17) /* Process Context Identifiers */
|
#define X86_FEATURE_PCID ( 4*32+17) /* "pcid" Process Context Identifiers */
|
||||||
#define X86_FEATURE_DCA ( 4*32+18) /* Direct Cache Access */
|
#define X86_FEATURE_DCA ( 4*32+18) /* "dca" Direct Cache Access */
|
||||||
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
#define X86_FEATURE_XMM4_1 ( 4*32+19) /* "sse4_1" SSE-4.1 */
|
||||||
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
#define X86_FEATURE_XMM4_2 ( 4*32+20) /* "sse4_2" SSE-4.2 */
|
||||||
#define X86_FEATURE_X2APIC ( 4*32+21) /* X2APIC */
|
#define X86_FEATURE_X2APIC ( 4*32+21) /* "x2apic" X2APIC */
|
||||||
#define X86_FEATURE_MOVBE ( 4*32+22) /* MOVBE instruction */
|
#define X86_FEATURE_MOVBE ( 4*32+22) /* "movbe" MOVBE instruction */
|
||||||
#define X86_FEATURE_POPCNT ( 4*32+23) /* POPCNT instruction */
|
#define X86_FEATURE_POPCNT ( 4*32+23) /* "popcnt" POPCNT instruction */
|
||||||
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* TSC deadline timer */
|
#define X86_FEATURE_TSC_DEADLINE_TIMER ( 4*32+24) /* "tsc_deadline_timer" TSC deadline timer */
|
||||||
#define X86_FEATURE_AES ( 4*32+25) /* AES instructions */
|
#define X86_FEATURE_AES ( 4*32+25) /* "aes" AES instructions */
|
||||||
#define X86_FEATURE_XSAVE ( 4*32+26) /* XSAVE/XRSTOR/XSETBV/XGETBV instructions */
|
#define X86_FEATURE_XSAVE ( 4*32+26) /* "xsave" XSAVE/XRSTOR/XSETBV/XGETBV instructions */
|
||||||
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* "" XSAVE instruction enabled in the OS */
|
#define X86_FEATURE_OSXSAVE ( 4*32+27) /* XSAVE instruction enabled in the OS */
|
||||||
#define X86_FEATURE_AVX ( 4*32+28) /* Advanced Vector Extensions */
|
#define X86_FEATURE_AVX ( 4*32+28) /* "avx" Advanced Vector Extensions */
|
||||||
#define X86_FEATURE_F16C ( 4*32+29) /* 16-bit FP conversions */
|
#define X86_FEATURE_F16C ( 4*32+29) /* "f16c" 16-bit FP conversions */
|
||||||
#define X86_FEATURE_RDRAND ( 4*32+30) /* RDRAND instruction */
|
#define X86_FEATURE_RDRAND ( 4*32+30) /* "rdrand" RDRAND instruction */
|
||||||
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* Running on a hypervisor */
|
#define X86_FEATURE_HYPERVISOR ( 4*32+31) /* "hypervisor" Running on a hypervisor */
|
||||||
|
|
||||||
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
/* VIA/Cyrix/Centaur-defined CPU features, CPUID level 0xC0000001, word 5 */
|
||||||
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
#define X86_FEATURE_XSTORE ( 5*32+ 2) /* "rng" RNG present (xstore) */
|
||||||
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
#define X86_FEATURE_XSTORE_EN ( 5*32+ 3) /* "rng_en" RNG enabled */
|
||||||
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
#define X86_FEATURE_XCRYPT ( 5*32+ 6) /* "ace" on-CPU crypto (xcrypt) */
|
||||||
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
#define X86_FEATURE_XCRYPT_EN ( 5*32+ 7) /* "ace_en" on-CPU crypto enabled */
|
||||||
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* Advanced Cryptography Engine v2 */
|
#define X86_FEATURE_ACE2 ( 5*32+ 8) /* "ace2" Advanced Cryptography Engine v2 */
|
||||||
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* ACE v2 enabled */
|
#define X86_FEATURE_ACE2_EN ( 5*32+ 9) /* "ace2_en" ACE v2 enabled */
|
||||||
#define X86_FEATURE_PHE ( 5*32+10) /* PadLock Hash Engine */
|
#define X86_FEATURE_PHE ( 5*32+10) /* "phe" PadLock Hash Engine */
|
||||||
#define X86_FEATURE_PHE_EN ( 5*32+11) /* PHE enabled */
|
#define X86_FEATURE_PHE_EN ( 5*32+11) /* "phe_en" PHE enabled */
|
||||||
#define X86_FEATURE_PMM ( 5*32+12) /* PadLock Montgomery Multiplier */
|
#define X86_FEATURE_PMM ( 5*32+12) /* "pmm" PadLock Montgomery Multiplier */
|
||||||
#define X86_FEATURE_PMM_EN ( 5*32+13) /* PMM enabled */
|
#define X86_FEATURE_PMM_EN ( 5*32+13) /* "pmm_en" PMM enabled */
|
||||||
|
|
||||||
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
|
/* More extended AMD flags: CPUID level 0x80000001, ECX, word 6 */
|
||||||
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* LAHF/SAHF in long mode */
|
#define X86_FEATURE_LAHF_LM ( 6*32+ 0) /* "lahf_lm" LAHF/SAHF in long mode */
|
||||||
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* If yes HyperThreading not valid */
|
#define X86_FEATURE_CMP_LEGACY ( 6*32+ 1) /* "cmp_legacy" If yes HyperThreading not valid */
|
||||||
#define X86_FEATURE_SVM ( 6*32+ 2) /* Secure Virtual Machine */
|
#define X86_FEATURE_SVM ( 6*32+ 2) /* "svm" Secure Virtual Machine */
|
||||||
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* Extended APIC space */
|
#define X86_FEATURE_EXTAPIC ( 6*32+ 3) /* "extapic" Extended APIC space */
|
||||||
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* CR8 in 32-bit mode */
|
#define X86_FEATURE_CR8_LEGACY ( 6*32+ 4) /* "cr8_legacy" CR8 in 32-bit mode */
|
||||||
#define X86_FEATURE_ABM ( 6*32+ 5) /* Advanced bit manipulation */
|
#define X86_FEATURE_ABM ( 6*32+ 5) /* "abm" Advanced bit manipulation */
|
||||||
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* SSE-4A */
|
#define X86_FEATURE_SSE4A ( 6*32+ 6) /* "sse4a" SSE-4A */
|
||||||
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* Misaligned SSE mode */
|
#define X86_FEATURE_MISALIGNSSE ( 6*32+ 7) /* "misalignsse" Misaligned SSE mode */
|
||||||
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* 3DNow prefetch instructions */
|
#define X86_FEATURE_3DNOWPREFETCH ( 6*32+ 8) /* "3dnowprefetch" 3DNow prefetch instructions */
|
||||||
#define X86_FEATURE_OSVW ( 6*32+ 9) /* OS Visible Workaround */
|
#define X86_FEATURE_OSVW ( 6*32+ 9) /* "osvw" OS Visible Workaround */
|
||||||
#define X86_FEATURE_IBS ( 6*32+10) /* Instruction Based Sampling */
|
#define X86_FEATURE_IBS ( 6*32+10) /* "ibs" Instruction Based Sampling */
|
||||||
#define X86_FEATURE_XOP ( 6*32+11) /* extended AVX instructions */
|
#define X86_FEATURE_XOP ( 6*32+11) /* "xop" Extended AVX instructions */
|
||||||
#define X86_FEATURE_SKINIT ( 6*32+12) /* SKINIT/STGI instructions */
|
#define X86_FEATURE_SKINIT ( 6*32+12) /* "skinit" SKINIT/STGI instructions */
|
||||||
#define X86_FEATURE_WDT ( 6*32+13) /* Watchdog timer */
|
#define X86_FEATURE_WDT ( 6*32+13) /* "wdt" Watchdog timer */
|
||||||
#define X86_FEATURE_LWP ( 6*32+15) /* Light Weight Profiling */
|
#define X86_FEATURE_LWP ( 6*32+15) /* "lwp" Light Weight Profiling */
|
||||||
#define X86_FEATURE_FMA4 ( 6*32+16) /* 4 operands MAC instructions */
|
#define X86_FEATURE_FMA4 ( 6*32+16) /* "fma4" 4 operands MAC instructions */
|
||||||
#define X86_FEATURE_TCE ( 6*32+17) /* Translation Cache Extension */
|
#define X86_FEATURE_TCE ( 6*32+17) /* "tce" Translation Cache Extension */
|
||||||
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* NodeId MSR */
|
#define X86_FEATURE_NODEID_MSR ( 6*32+19) /* "nodeid_msr" NodeId MSR */
|
||||||
#define X86_FEATURE_TBM ( 6*32+21) /* Trailing Bit Manipulations */
|
#define X86_FEATURE_TBM ( 6*32+21) /* "tbm" Trailing Bit Manipulations */
|
||||||
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* Topology extensions CPUID leafs */
|
#define X86_FEATURE_TOPOEXT ( 6*32+22) /* "topoext" Topology extensions CPUID leafs */
|
||||||
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* Core performance counter extensions */
|
#define X86_FEATURE_PERFCTR_CORE ( 6*32+23) /* "perfctr_core" Core performance counter extensions */
|
||||||
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* NB performance counter extensions */
|
#define X86_FEATURE_PERFCTR_NB ( 6*32+24) /* "perfctr_nb" NB performance counter extensions */
|
||||||
#define X86_FEATURE_BPEXT ( 6*32+26) /* Data breakpoint extension */
|
#define X86_FEATURE_BPEXT ( 6*32+26) /* "bpext" Data breakpoint extension */
|
||||||
#define X86_FEATURE_PTSC ( 6*32+27) /* Performance time-stamp counter */
|
#define X86_FEATURE_PTSC ( 6*32+27) /* "ptsc" Performance time-stamp counter */
|
||||||
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* Last Level Cache performance counter extensions */
|
#define X86_FEATURE_PERFCTR_LLC ( 6*32+28) /* "perfctr_llc" Last Level Cache performance counter extensions */
|
||||||
#define X86_FEATURE_MWAITX ( 6*32+29) /* MWAIT extension (MONITORX/MWAITX instructions) */
|
#define X86_FEATURE_MWAITX ( 6*32+29) /* "mwaitx" MWAIT extension (MONITORX/MWAITX instructions) */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Auxiliary flags: Linux defined - For features scattered in various
|
* Auxiliary flags: Linux defined - For features scattered in various
|
||||||
@ -191,93 +191,93 @@
|
|||||||
*
|
*
|
||||||
* Reuse free bits when adding new feature flags!
|
* Reuse free bits when adding new feature flags!
|
||||||
*/
|
*/
|
||||||
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* Ring 3 MONITOR/MWAIT instructions */
|
#define X86_FEATURE_RING3MWAIT ( 7*32+ 0) /* "ring3mwait" Ring 3 MONITOR/MWAIT instructions */
|
||||||
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* Intel CPUID faulting */
|
#define X86_FEATURE_CPUID_FAULT ( 7*32+ 1) /* "cpuid_fault" Intel CPUID faulting */
|
||||||
#define X86_FEATURE_CPB ( 7*32+ 2) /* AMD Core Performance Boost */
|
#define X86_FEATURE_CPB ( 7*32+ 2) /* "cpb" AMD Core Performance Boost */
|
||||||
#define X86_FEATURE_EPB ( 7*32+ 3) /* IA32_ENERGY_PERF_BIAS support */
|
#define X86_FEATURE_EPB ( 7*32+ 3) /* "epb" IA32_ENERGY_PERF_BIAS support */
|
||||||
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* Cache Allocation Technology L3 */
|
#define X86_FEATURE_CAT_L3 ( 7*32+ 4) /* "cat_l3" Cache Allocation Technology L3 */
|
||||||
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* Cache Allocation Technology L2 */
|
#define X86_FEATURE_CAT_L2 ( 7*32+ 5) /* "cat_l2" Cache Allocation Technology L2 */
|
||||||
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* Code and Data Prioritization L3 */
|
#define X86_FEATURE_CDP_L3 ( 7*32+ 6) /* "cdp_l3" Code and Data Prioritization L3 */
|
||||||
#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* Platform supports being a TDX host */
|
#define X86_FEATURE_TDX_HOST_PLATFORM ( 7*32+ 7) /* "tdx_host_platform" Platform supports being a TDX host */
|
||||||
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* AMD HW-PState */
|
#define X86_FEATURE_HW_PSTATE ( 7*32+ 8) /* "hw_pstate" AMD HW-PState */
|
||||||
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* AMD ProcFeedbackInterface */
|
#define X86_FEATURE_PROC_FEEDBACK ( 7*32+ 9) /* "proc_feedback" AMD ProcFeedbackInterface */
|
||||||
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* "" Use compacted XSTATE (XSAVES or XSAVEC) */
|
#define X86_FEATURE_XCOMPACTED ( 7*32+10) /* Use compacted XSTATE (XSAVES or XSAVEC) */
|
||||||
#define X86_FEATURE_PTI ( 7*32+11) /* Kernel Page Table Isolation enabled */
|
#define X86_FEATURE_PTI ( 7*32+11) /* "pti" Kernel Page Table Isolation enabled */
|
||||||
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* "" Set/clear IBRS on kernel entry/exit */
|
#define X86_FEATURE_KERNEL_IBRS ( 7*32+12) /* Set/clear IBRS on kernel entry/exit */
|
||||||
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* "" Fill RSB on VM-Exit */
|
#define X86_FEATURE_RSB_VMEXIT ( 7*32+13) /* Fill RSB on VM-Exit */
|
||||||
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* Intel Processor Inventory Number */
|
#define X86_FEATURE_INTEL_PPIN ( 7*32+14) /* "intel_ppin" Intel Processor Inventory Number */
|
||||||
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* Code and Data Prioritization L2 */
|
#define X86_FEATURE_CDP_L2 ( 7*32+15) /* "cdp_l2" Code and Data Prioritization L2 */
|
||||||
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* "" MSR SPEC_CTRL is implemented */
|
#define X86_FEATURE_MSR_SPEC_CTRL ( 7*32+16) /* MSR SPEC_CTRL is implemented */
|
||||||
#define X86_FEATURE_SSBD ( 7*32+17) /* Speculative Store Bypass Disable */
|
#define X86_FEATURE_SSBD ( 7*32+17) /* "ssbd" Speculative Store Bypass Disable */
|
||||||
#define X86_FEATURE_MBA ( 7*32+18) /* Memory Bandwidth Allocation */
|
#define X86_FEATURE_MBA ( 7*32+18) /* "mba" Memory Bandwidth Allocation */
|
||||||
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* "" Fill RSB on context switches */
|
#define X86_FEATURE_RSB_CTXSW ( 7*32+19) /* Fill RSB on context switches */
|
||||||
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* AMD Performance Monitoring Version 2 */
|
#define X86_FEATURE_PERFMON_V2 ( 7*32+20) /* "perfmon_v2" AMD Performance Monitoring Version 2 */
|
||||||
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* "" Indirect Branch Prediction Barrier enabled */
|
#define X86_FEATURE_USE_IBPB ( 7*32+21) /* Indirect Branch Prediction Barrier enabled */
|
||||||
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* "" Use IBRS during runtime firmware calls */
|
#define X86_FEATURE_USE_IBRS_FW ( 7*32+22) /* Use IBRS during runtime firmware calls */
|
||||||
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* "" Disable Speculative Store Bypass. */
|
#define X86_FEATURE_SPEC_STORE_BYPASS_DISABLE ( 7*32+23) /* Disable Speculative Store Bypass. */
|
||||||
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* "" AMD SSBD implementation via LS_CFG MSR */
|
#define X86_FEATURE_LS_CFG_SSBD ( 7*32+24) /* AMD SSBD implementation via LS_CFG MSR */
|
||||||
#define X86_FEATURE_IBRS ( 7*32+25) /* Indirect Branch Restricted Speculation */
|
#define X86_FEATURE_IBRS ( 7*32+25) /* "ibrs" Indirect Branch Restricted Speculation */
|
||||||
#define X86_FEATURE_IBPB ( 7*32+26) /* Indirect Branch Prediction Barrier */
|
#define X86_FEATURE_IBPB ( 7*32+26) /* "ibpb" Indirect Branch Prediction Barrier */
|
||||||
#define X86_FEATURE_STIBP ( 7*32+27) /* Single Thread Indirect Branch Predictors */
|
#define X86_FEATURE_STIBP ( 7*32+27) /* "stibp" Single Thread Indirect Branch Predictors */
|
||||||
#define X86_FEATURE_ZEN ( 7*32+28) /* "" Generic flag for all Zen and newer */
|
#define X86_FEATURE_ZEN ( 7*32+28) /* Generic flag for all Zen and newer */
|
||||||
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* "" L1TF workaround PTE inversion */
|
#define X86_FEATURE_L1TF_PTEINV ( 7*32+29) /* L1TF workaround PTE inversion */
|
||||||
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* Enhanced IBRS */
|
#define X86_FEATURE_IBRS_ENHANCED ( 7*32+30) /* "ibrs_enhanced" Enhanced IBRS */
|
||||||
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* "" MSR IA32_FEAT_CTL configured */
|
#define X86_FEATURE_MSR_IA32_FEAT_CTL ( 7*32+31) /* MSR IA32_FEAT_CTL configured */
|
||||||
|
|
||||||
/* Virtualization flags: Linux defined, word 8 */
|
/* Virtualization flags: Linux defined, word 8 */
|
||||||
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* Intel TPR Shadow */
|
#define X86_FEATURE_TPR_SHADOW ( 8*32+ 0) /* "tpr_shadow" Intel TPR Shadow */
|
||||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* Intel FlexPriority */
|
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
|
||||||
#define X86_FEATURE_EPT ( 8*32+ 2) /* Intel Extended Page Table */
|
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
|
||||||
#define X86_FEATURE_VPID ( 8*32+ 3) /* Intel Virtual Processor ID */
|
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
|
||||||
|
|
||||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* Prefer VMMCALL to VMCALL */
|
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
|
||||||
#define X86_FEATURE_XENPV ( 8*32+16) /* "" Xen paravirtual guest */
|
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
|
||||||
#define X86_FEATURE_EPT_AD ( 8*32+17) /* Intel Extended Page Table access-dirty bit */
|
#define X86_FEATURE_EPT_AD ( 8*32+17) /* "ept_ad" Intel Extended Page Table access-dirty bit */
|
||||||
#define X86_FEATURE_VMCALL ( 8*32+18) /* "" Hypervisor supports the VMCALL instruction */
|
#define X86_FEATURE_VMCALL ( 8*32+18) /* Hypervisor supports the VMCALL instruction */
|
||||||
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* "" VMware prefers VMMCALL hypercall instruction */
|
#define X86_FEATURE_VMW_VMMCALL ( 8*32+19) /* VMware prefers VMMCALL hypercall instruction */
|
||||||
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* "" PV unlock function */
|
#define X86_FEATURE_PVUNLOCK ( 8*32+20) /* PV unlock function */
|
||||||
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* "" PV vcpu_is_preempted function */
|
#define X86_FEATURE_VCPUPREEMPT ( 8*32+21) /* PV vcpu_is_preempted function */
|
||||||
#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* Intel Trust Domain Extensions Guest */
|
#define X86_FEATURE_TDX_GUEST ( 8*32+22) /* "tdx_guest" Intel Trust Domain Extensions Guest */
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EBX), word 9 */
|
||||||
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
|
#define X86_FEATURE_FSGSBASE ( 9*32+ 0) /* "fsgsbase" RDFSBASE, WRFSBASE, RDGSBASE, WRGSBASE instructions*/
|
||||||
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* TSC adjustment MSR 0x3B */
|
#define X86_FEATURE_TSC_ADJUST ( 9*32+ 1) /* "tsc_adjust" TSC adjustment MSR 0x3B */
|
||||||
#define X86_FEATURE_SGX ( 9*32+ 2) /* Software Guard Extensions */
|
#define X86_FEATURE_SGX ( 9*32+ 2) /* "sgx" Software Guard Extensions */
|
||||||
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* 1st group bit manipulation extensions */
|
#define X86_FEATURE_BMI1 ( 9*32+ 3) /* "bmi1" 1st group bit manipulation extensions */
|
||||||
#define X86_FEATURE_HLE ( 9*32+ 4) /* Hardware Lock Elision */
|
#define X86_FEATURE_HLE ( 9*32+ 4) /* "hle" Hardware Lock Elision */
|
||||||
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* AVX2 instructions */
|
#define X86_FEATURE_AVX2 ( 9*32+ 5) /* "avx2" AVX2 instructions */
|
||||||
#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* "" FPU data pointer updated only on x87 exceptions */
|
#define X86_FEATURE_FDP_EXCPTN_ONLY ( 9*32+ 6) /* FPU data pointer updated only on x87 exceptions */
|
||||||
#define X86_FEATURE_SMEP ( 9*32+ 7) /* Supervisor Mode Execution Protection */
|
#define X86_FEATURE_SMEP ( 9*32+ 7) /* "smep" Supervisor Mode Execution Protection */
|
||||||
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* 2nd group bit manipulation extensions */
|
#define X86_FEATURE_BMI2 ( 9*32+ 8) /* "bmi2" 2nd group bit manipulation extensions */
|
||||||
#define X86_FEATURE_ERMS ( 9*32+ 9) /* Enhanced REP MOVSB/STOSB instructions */
|
#define X86_FEATURE_ERMS ( 9*32+ 9) /* "erms" Enhanced REP MOVSB/STOSB instructions */
|
||||||
#define X86_FEATURE_INVPCID ( 9*32+10) /* Invalidate Processor Context ID */
|
#define X86_FEATURE_INVPCID ( 9*32+10) /* "invpcid" Invalidate Processor Context ID */
|
||||||
#define X86_FEATURE_RTM ( 9*32+11) /* Restricted Transactional Memory */
|
#define X86_FEATURE_RTM ( 9*32+11) /* "rtm" Restricted Transactional Memory */
|
||||||
#define X86_FEATURE_CQM ( 9*32+12) /* Cache QoS Monitoring */
|
#define X86_FEATURE_CQM ( 9*32+12) /* "cqm" Cache QoS Monitoring */
|
||||||
#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* "" Zero out FPU CS and FPU DS */
|
#define X86_FEATURE_ZERO_FCS_FDS ( 9*32+13) /* Zero out FPU CS and FPU DS */
|
||||||
#define X86_FEATURE_MPX ( 9*32+14) /* Memory Protection Extension */
|
#define X86_FEATURE_MPX ( 9*32+14) /* "mpx" Memory Protection Extension */
|
||||||
#define X86_FEATURE_RDT_A ( 9*32+15) /* Resource Director Technology Allocation */
|
#define X86_FEATURE_RDT_A ( 9*32+15) /* "rdt_a" Resource Director Technology Allocation */
|
||||||
#define X86_FEATURE_AVX512F ( 9*32+16) /* AVX-512 Foundation */
|
#define X86_FEATURE_AVX512F ( 9*32+16) /* "avx512f" AVX-512 Foundation */
|
||||||
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* AVX-512 DQ (Double/Quad granular) Instructions */
|
#define X86_FEATURE_AVX512DQ ( 9*32+17) /* "avx512dq" AVX-512 DQ (Double/Quad granular) Instructions */
|
||||||
#define X86_FEATURE_RDSEED ( 9*32+18) /* RDSEED instruction */
|
#define X86_FEATURE_RDSEED ( 9*32+18) /* "rdseed" RDSEED instruction */
|
||||||
#define X86_FEATURE_ADX ( 9*32+19) /* ADCX and ADOX instructions */
|
#define X86_FEATURE_ADX ( 9*32+19) /* "adx" ADCX and ADOX instructions */
|
||||||
#define X86_FEATURE_SMAP ( 9*32+20) /* Supervisor Mode Access Prevention */
|
#define X86_FEATURE_SMAP ( 9*32+20) /* "smap" Supervisor Mode Access Prevention */
|
||||||
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* AVX-512 Integer Fused Multiply-Add instructions */
|
#define X86_FEATURE_AVX512IFMA ( 9*32+21) /* "avx512ifma" AVX-512 Integer Fused Multiply-Add instructions */
|
||||||
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* CLFLUSHOPT instruction */
|
#define X86_FEATURE_CLFLUSHOPT ( 9*32+23) /* "clflushopt" CLFLUSHOPT instruction */
|
||||||
#define X86_FEATURE_CLWB ( 9*32+24) /* CLWB instruction */
|
#define X86_FEATURE_CLWB ( 9*32+24) /* "clwb" CLWB instruction */
|
||||||
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* Intel Processor Trace */
|
#define X86_FEATURE_INTEL_PT ( 9*32+25) /* "intel_pt" Intel Processor Trace */
|
||||||
#define X86_FEATURE_AVX512PF ( 9*32+26) /* AVX-512 Prefetch */
|
#define X86_FEATURE_AVX512PF ( 9*32+26) /* "avx512pf" AVX-512 Prefetch */
|
||||||
#define X86_FEATURE_AVX512ER ( 9*32+27) /* AVX-512 Exponential and Reciprocal */
|
#define X86_FEATURE_AVX512ER ( 9*32+27) /* "avx512er" AVX-512 Exponential and Reciprocal */
|
||||||
#define X86_FEATURE_AVX512CD ( 9*32+28) /* AVX-512 Conflict Detection */
|
#define X86_FEATURE_AVX512CD ( 9*32+28) /* "avx512cd" AVX-512 Conflict Detection */
|
||||||
#define X86_FEATURE_SHA_NI ( 9*32+29) /* SHA1/SHA256 Instruction Extensions */
|
#define X86_FEATURE_SHA_NI ( 9*32+29) /* "sha_ni" SHA1/SHA256 Instruction Extensions */
|
||||||
#define X86_FEATURE_AVX512BW ( 9*32+30) /* AVX-512 BW (Byte/Word granular) Instructions */
|
#define X86_FEATURE_AVX512BW ( 9*32+30) /* "avx512bw" AVX-512 BW (Byte/Word granular) Instructions */
|
||||||
#define X86_FEATURE_AVX512VL ( 9*32+31) /* AVX-512 VL (128/256 Vector Length) Extensions */
|
#define X86_FEATURE_AVX512VL ( 9*32+31) /* "avx512vl" AVX-512 VL (128/256 Vector Length) Extensions */
|
||||||
|
|
||||||
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
|
/* Extended state features, CPUID level 0x0000000d:1 (EAX), word 10 */
|
||||||
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* XSAVEOPT instruction */
|
#define X86_FEATURE_XSAVEOPT (10*32+ 0) /* "xsaveopt" XSAVEOPT instruction */
|
||||||
#define X86_FEATURE_XSAVEC (10*32+ 1) /* XSAVEC instruction */
|
#define X86_FEATURE_XSAVEC (10*32+ 1) /* "xsavec" XSAVEC instruction */
|
||||||
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* XGETBV with ECX = 1 instruction */
|
#define X86_FEATURE_XGETBV1 (10*32+ 2) /* "xgetbv1" XGETBV with ECX = 1 instruction */
|
||||||
#define X86_FEATURE_XSAVES (10*32+ 3) /* XSAVES/XRSTORS instructions */
|
#define X86_FEATURE_XSAVES (10*32+ 3) /* "xsaves" XSAVES/XRSTORS instructions */
|
||||||
#define X86_FEATURE_XFD (10*32+ 4) /* "" eXtended Feature Disabling */
|
#define X86_FEATURE_XFD (10*32+ 4) /* eXtended Feature Disabling */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||||
@ -285,185 +285,182 @@
|
|||||||
*
|
*
|
||||||
* Reuse free bits when adding new feature flags!
|
* Reuse free bits when adding new feature flags!
|
||||||
*/
|
*/
|
||||||
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* LLC QoS if 1 */
|
#define X86_FEATURE_CQM_LLC (11*32+ 0) /* "cqm_llc" LLC QoS if 1 */
|
||||||
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* LLC occupancy monitoring */
|
#define X86_FEATURE_CQM_OCCUP_LLC (11*32+ 1) /* "cqm_occup_llc" LLC occupancy monitoring */
|
||||||
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* LLC Total MBM monitoring */
|
#define X86_FEATURE_CQM_MBM_TOTAL (11*32+ 2) /* "cqm_mbm_total" LLC Total MBM monitoring */
|
||||||
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* LLC Local MBM monitoring */
|
#define X86_FEATURE_CQM_MBM_LOCAL (11*32+ 3) /* "cqm_mbm_local" LLC Local MBM monitoring */
|
||||||
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* "" LFENCE in user entry SWAPGS path */
|
#define X86_FEATURE_FENCE_SWAPGS_USER (11*32+ 4) /* LFENCE in user entry SWAPGS path */
|
||||||
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* "" LFENCE in kernel entry SWAPGS path */
|
#define X86_FEATURE_FENCE_SWAPGS_KERNEL (11*32+ 5) /* LFENCE in kernel entry SWAPGS path */
|
||||||
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* #AC for split lock */
|
#define X86_FEATURE_SPLIT_LOCK_DETECT (11*32+ 6) /* "split_lock_detect" #AC for split lock */
|
||||||
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* "" Per-thread Memory Bandwidth Allocation */
|
#define X86_FEATURE_PER_THREAD_MBA (11*32+ 7) /* Per-thread Memory Bandwidth Allocation */
|
||||||
#define X86_FEATURE_SGX1 (11*32+ 8) /* "" Basic SGX */
|
#define X86_FEATURE_SGX1 (11*32+ 8) /* Basic SGX */
|
||||||
#define X86_FEATURE_SGX2 (11*32+ 9) /* "" SGX Enclave Dynamic Memory Management (EDMM) */
|
#define X86_FEATURE_SGX2 (11*32+ 9) /* SGX Enclave Dynamic Memory Management (EDMM) */
|
||||||
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* "" Issue an IBPB on kernel entry */
|
#define X86_FEATURE_ENTRY_IBPB (11*32+10) /* Issue an IBPB on kernel entry */
|
||||||
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* "" RET prediction control */
|
#define X86_FEATURE_RRSBA_CTRL (11*32+11) /* RET prediction control */
|
||||||
#define X86_FEATURE_RETPOLINE (11*32+12) /* "" Generic Retpoline mitigation for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE (11*32+12) /* Generic Retpoline mitigation for Spectre variant 2 */
|
||||||
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* "" Use LFENCE for Spectre variant 2 */
|
#define X86_FEATURE_RETPOLINE_LFENCE (11*32+13) /* Use LFENCE for Spectre variant 2 */
|
||||||
#define X86_FEATURE_RETHUNK (11*32+14) /* "" Use REturn THUNK */
|
#define X86_FEATURE_RETHUNK (11*32+14) /* Use REturn THUNK */
|
||||||
#define X86_FEATURE_UNRET (11*32+15) /* "" AMD BTB untrain return */
|
#define X86_FEATURE_UNRET (11*32+15) /* AMD BTB untrain return */
|
||||||
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* "" Use IBPB during runtime firmware calls */
|
#define X86_FEATURE_USE_IBPB_FW (11*32+16) /* Use IBPB during runtime firmware calls */
|
||||||
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* "" Fill RSB on VM exit when EIBRS is enabled */
|
#define X86_FEATURE_RSB_VMEXIT_LITE (11*32+17) /* Fill RSB on VM exit when EIBRS is enabled */
|
||||||
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* "" SGX EDECCSSA user leaf function */
|
#define X86_FEATURE_SGX_EDECCSSA (11*32+18) /* SGX EDECCSSA user leaf function */
|
||||||
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* "" Call depth tracking for RSB stuffing */
|
#define X86_FEATURE_CALL_DEPTH (11*32+19) /* Call depth tracking for RSB stuffing */
|
||||||
#define X86_FEATURE_SMBA (11*32+21) /* "" Slow Memory Bandwidth Allocation */
|
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* MSR IA32_TSX_CTRL (Intel) implemented */
|
||||||
#define X86_FEATURE_BMEC (11*32+22) /* "" Bandwidth Monitoring Event Configuration */
|
#define X86_FEATURE_SMBA (11*32+21) /* Slow Memory Bandwidth Allocation */
|
||||||
#define X86_FEATURE_USER_SHSTK (11*32+23) /* Shadow stack support for user mode applications */
|
#define X86_FEATURE_BMEC (11*32+22) /* Bandwidth Monitoring Event Configuration */
|
||||||
|
#define X86_FEATURE_USER_SHSTK (11*32+23) /* "user_shstk" Shadow stack support for user mode applications */
|
||||||
|
#define X86_FEATURE_SRSO (11*32+24) /* AMD BTB untrain RETs */
|
||||||
#define X86_FEATURE_MSR_TSX_CTRL (11*32+20) /* "" MSR IA32_TSX_CTRL (Intel) implemented */
|
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* AMD BTB untrain RETs through aliasing */
|
||||||
|
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* Issue an IBPB only on VMEXIT */
|
||||||
#define X86_FEATURE_SRSO (11*32+24) /* "" AMD BTB untrain RETs */
|
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
||||||
#define X86_FEATURE_SRSO_ALIAS (11*32+25) /* "" AMD BTB untrain RETs through aliasing */
|
#define X86_FEATURE_ZEN2 (11*32+28) /* CPU based on Zen2 microarchitecture */
|
||||||
#define X86_FEATURE_IBPB_ON_VMEXIT (11*32+26) /* "" Issue an IBPB only on VMEXIT */
|
#define X86_FEATURE_ZEN3 (11*32+29) /* CPU based on Zen3 microarchitecture */
|
||||||
#define X86_FEATURE_APIC_MSRS_FENCE (11*32+27) /* "" IA32_TSC_DEADLINE and X2APIC MSRs need fencing */
|
#define X86_FEATURE_ZEN4 (11*32+30) /* CPU based on Zen4 microarchitecture */
|
||||||
#define X86_FEATURE_ZEN2 (11*32+28) /* "" CPU based on Zen2 microarchitecture */
|
#define X86_FEATURE_ZEN1 (11*32+31) /* CPU based on Zen1 microarchitecture */
|
||||||
#define X86_FEATURE_ZEN3 (11*32+29) /* "" CPU based on Zen3 microarchitecture */
|
|
||||||
#define X86_FEATURE_ZEN4 (11*32+30) /* "" CPU based on Zen4 microarchitecture */
|
|
||||||
#define X86_FEATURE_ZEN1 (11*32+31) /* "" CPU based on Zen1 microarchitecture */
|
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:1 (EAX), word 12 */
|
||||||
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* AVX VNNI instructions */
|
#define X86_FEATURE_AVX_VNNI (12*32+ 4) /* "avx_vnni" AVX VNNI instructions */
|
||||||
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* AVX512 BFLOAT16 instructions */
|
#define X86_FEATURE_AVX512_BF16 (12*32+ 5) /* "avx512_bf16" AVX512 BFLOAT16 instructions */
|
||||||
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* "" CMPccXADD instructions */
|
#define X86_FEATURE_CMPCCXADD (12*32+ 7) /* CMPccXADD instructions */
|
||||||
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* "" Intel Architectural PerfMon Extension */
|
#define X86_FEATURE_ARCH_PERFMON_EXT (12*32+ 8) /* Intel Architectural PerfMon Extension */
|
||||||
#define X86_FEATURE_FZRM (12*32+10) /* "" Fast zero-length REP MOVSB */
|
#define X86_FEATURE_FZRM (12*32+10) /* Fast zero-length REP MOVSB */
|
||||||
#define X86_FEATURE_FSRS (12*32+11) /* "" Fast short REP STOSB */
|
#define X86_FEATURE_FSRS (12*32+11) /* Fast short REP STOSB */
|
||||||
#define X86_FEATURE_FSRC (12*32+12) /* "" Fast short REP {CMPSB,SCASB} */
|
#define X86_FEATURE_FSRC (12*32+12) /* Fast short REP {CMPSB,SCASB} */
|
||||||
#define X86_FEATURE_LKGS (12*32+18) /* "" Load "kernel" (userspace) GS */
|
#define X86_FEATURE_LKGS (12*32+18) /* Load "kernel" (userspace) GS */
|
||||||
#define X86_FEATURE_WRMSRNS (12*32+19) /* "" Non-serializing WRMSR */
|
#define X86_FEATURE_WRMSRNS (12*32+19) /* Non-serializing WRMSR */
|
||||||
#define X86_FEATURE_AMX_FP16 (12*32+21) /* "" AMX fp16 Support */
|
#define X86_FEATURE_AMX_FP16 (12*32+21) /* AMX fp16 Support */
|
||||||
#define X86_FEATURE_AVX_IFMA (12*32+23) /* "" Support for VPMADD52[H,L]UQ */
|
#define X86_FEATURE_AVX_IFMA (12*32+23) /* Support for VPMADD52[H,L]UQ */
|
||||||
#define X86_FEATURE_LAM (12*32+26) /* Linear Address Masking */
|
#define X86_FEATURE_LAM (12*32+26) /* "lam" Linear Address Masking */
|
||||||
|
|
||||||
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
/* AMD-defined CPU features, CPUID level 0x80000008 (EBX), word 13 */
|
||||||
#define X86_FEATURE_CLZERO (13*32+ 0) /* CLZERO instruction */
|
#define X86_FEATURE_CLZERO (13*32+ 0) /* "clzero" CLZERO instruction */
|
||||||
#define X86_FEATURE_IRPERF (13*32+ 1) /* Instructions Retired Count */
|
#define X86_FEATURE_IRPERF (13*32+ 1) /* "irperf" Instructions Retired Count */
|
||||||
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* Always save/restore FP error pointers */
|
#define X86_FEATURE_XSAVEERPTR (13*32+ 2) /* "xsaveerptr" Always save/restore FP error pointers */
|
||||||
#define X86_FEATURE_RDPRU (13*32+ 4) /* Read processor register at user level */
|
#define X86_FEATURE_RDPRU (13*32+ 4) /* "rdpru" Read processor register at user level */
|
||||||
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* WBNOINVD instruction */
|
#define X86_FEATURE_WBNOINVD (13*32+ 9) /* "wbnoinvd" WBNOINVD instruction */
|
||||||
#define X86_FEATURE_AMD_IBPB (13*32+12) /* "" Indirect Branch Prediction Barrier */
|
#define X86_FEATURE_AMD_IBPB (13*32+12) /* Indirect Branch Prediction Barrier */
|
||||||
#define X86_FEATURE_AMD_IBRS (13*32+14) /* "" Indirect Branch Restricted Speculation */
|
#define X86_FEATURE_AMD_IBRS (13*32+14) /* Indirect Branch Restricted Speculation */
|
||||||
#define X86_FEATURE_AMD_STIBP (13*32+15) /* "" Single Thread Indirect Branch Predictors */
|
#define X86_FEATURE_AMD_STIBP (13*32+15) /* Single Thread Indirect Branch Predictors */
|
||||||
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* "" Single Thread Indirect Branch Predictors always-on preferred */
|
#define X86_FEATURE_AMD_STIBP_ALWAYS_ON (13*32+17) /* Single Thread Indirect Branch Predictors always-on preferred */
|
||||||
#define X86_FEATURE_AMD_PPIN (13*32+23) /* Protected Processor Inventory Number */
|
#define X86_FEATURE_AMD_PPIN (13*32+23) /* "amd_ppin" Protected Processor Inventory Number */
|
||||||
#define X86_FEATURE_AMD_SSBD (13*32+24) /* "" Speculative Store Bypass Disable */
|
#define X86_FEATURE_AMD_SSBD (13*32+24) /* Speculative Store Bypass Disable */
|
||||||
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* Virtualized Speculative Store Bypass Disable */
|
#define X86_FEATURE_VIRT_SSBD (13*32+25) /* "virt_ssbd" Virtualized Speculative Store Bypass Disable */
|
||||||
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* "" Speculative Store Bypass is fixed in hardware. */
|
#define X86_FEATURE_AMD_SSB_NO (13*32+26) /* Speculative Store Bypass is fixed in hardware. */
|
||||||
#define X86_FEATURE_CPPC (13*32+27) /* Collaborative Processor Performance Control */
|
#define X86_FEATURE_CPPC (13*32+27) /* "cppc" Collaborative Processor Performance Control */
|
||||||
#define X86_FEATURE_AMD_PSFD (13*32+28) /* "" Predictive Store Forwarding Disable */
|
#define X86_FEATURE_AMD_PSFD (13*32+28) /* Predictive Store Forwarding Disable */
|
||||||
#define X86_FEATURE_BTC_NO (13*32+29) /* "" Not vulnerable to Branch Type Confusion */
|
#define X86_FEATURE_BTC_NO (13*32+29) /* Not vulnerable to Branch Type Confusion */
|
||||||
#define X86_FEATURE_BRS (13*32+31) /* Branch Sampling available */
|
#define X86_FEATURE_BRS (13*32+31) /* "brs" Branch Sampling available */
|
||||||
|
|
||||||
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
/* Thermal and Power Management Leaf, CPUID level 0x00000006 (EAX), word 14 */
|
||||||
#define X86_FEATURE_DTHERM (14*32+ 0) /* Digital Thermal Sensor */
|
#define X86_FEATURE_DTHERM (14*32+ 0) /* "dtherm" Digital Thermal Sensor */
|
||||||
#define X86_FEATURE_IDA (14*32+ 1) /* Intel Dynamic Acceleration */
|
#define X86_FEATURE_IDA (14*32+ 1) /* "ida" Intel Dynamic Acceleration */
|
||||||
#define X86_FEATURE_ARAT (14*32+ 2) /* Always Running APIC Timer */
|
#define X86_FEATURE_ARAT (14*32+ 2) /* "arat" Always Running APIC Timer */
|
||||||
#define X86_FEATURE_PLN (14*32+ 4) /* Intel Power Limit Notification */
|
#define X86_FEATURE_PLN (14*32+ 4) /* "pln" Intel Power Limit Notification */
|
||||||
#define X86_FEATURE_PTS (14*32+ 6) /* Intel Package Thermal Status */
|
#define X86_FEATURE_PTS (14*32+ 6) /* "pts" Intel Package Thermal Status */
|
||||||
#define X86_FEATURE_HWP (14*32+ 7) /* Intel Hardware P-states */
|
#define X86_FEATURE_HWP (14*32+ 7) /* "hwp" Intel Hardware P-states */
|
||||||
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* HWP Notification */
|
#define X86_FEATURE_HWP_NOTIFY (14*32+ 8) /* "hwp_notify" HWP Notification */
|
||||||
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* HWP Activity Window */
|
#define X86_FEATURE_HWP_ACT_WINDOW (14*32+ 9) /* "hwp_act_window" HWP Activity Window */
|
||||||
#define X86_FEATURE_HWP_EPP (14*32+10) /* HWP Energy Perf. Preference */
|
#define X86_FEATURE_HWP_EPP (14*32+10) /* "hwp_epp" HWP Energy Perf. Preference */
|
||||||
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* HWP Package Level Request */
|
#define X86_FEATURE_HWP_PKG_REQ (14*32+11) /* "hwp_pkg_req" HWP Package Level Request */
|
||||||
#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* "" HWP Highest perf change */
|
#define X86_FEATURE_HWP_HIGHEST_PERF_CHANGE (14*32+15) /* HWP Highest perf change */
|
||||||
#define X86_FEATURE_HFI (14*32+19) /* Hardware Feedback Interface */
|
#define X86_FEATURE_HFI (14*32+19) /* "hfi" Hardware Feedback Interface */
|
||||||
|
|
||||||
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
|
/* AMD SVM Feature Identification, CPUID level 0x8000000a (EDX), word 15 */
|
||||||
#define X86_FEATURE_NPT (15*32+ 0) /* Nested Page Table support */
|
#define X86_FEATURE_NPT (15*32+ 0) /* "npt" Nested Page Table support */
|
||||||
#define X86_FEATURE_LBRV (15*32+ 1) /* LBR Virtualization support */
|
#define X86_FEATURE_LBRV (15*32+ 1) /* "lbrv" LBR Virtualization support */
|
||||||
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
#define X86_FEATURE_SVML (15*32+ 2) /* "svm_lock" SVM locking MSR */
|
||||||
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
#define X86_FEATURE_NRIPS (15*32+ 3) /* "nrip_save" SVM next_rip save */
|
||||||
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
#define X86_FEATURE_TSCRATEMSR (15*32+ 4) /* "tsc_scale" TSC scaling support */
|
||||||
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
#define X86_FEATURE_VMCBCLEAN (15*32+ 5) /* "vmcb_clean" VMCB clean bits support */
|
||||||
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* flush-by-ASID support */
|
#define X86_FEATURE_FLUSHBYASID (15*32+ 6) /* "flushbyasid" Flush-by-ASID support */
|
||||||
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* Decode Assists support */
|
#define X86_FEATURE_DECODEASSISTS (15*32+ 7) /* "decodeassists" Decode Assists support */
|
||||||
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* filtered pause intercept */
|
#define X86_FEATURE_PAUSEFILTER (15*32+10) /* "pausefilter" Filtered pause intercept */
|
||||||
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* pause filter threshold */
|
#define X86_FEATURE_PFTHRESHOLD (15*32+12) /* "pfthreshold" Pause filter threshold */
|
||||||
#define X86_FEATURE_AVIC (15*32+13) /* Virtual Interrupt Controller */
|
#define X86_FEATURE_AVIC (15*32+13) /* "avic" Virtual Interrupt Controller */
|
||||||
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* Virtual VMSAVE VMLOAD */
|
#define X86_FEATURE_V_VMSAVE_VMLOAD (15*32+15) /* "v_vmsave_vmload" Virtual VMSAVE VMLOAD */
|
||||||
#define X86_FEATURE_VGIF (15*32+16) /* Virtual GIF */
|
#define X86_FEATURE_VGIF (15*32+16) /* "vgif" Virtual GIF */
|
||||||
#define X86_FEATURE_X2AVIC (15*32+18) /* Virtual x2apic */
|
#define X86_FEATURE_X2AVIC (15*32+18) /* "x2avic" Virtual x2apic */
|
||||||
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* Virtual SPEC_CTRL */
|
#define X86_FEATURE_V_SPEC_CTRL (15*32+20) /* "v_spec_ctrl" Virtual SPEC_CTRL */
|
||||||
#define X86_FEATURE_VNMI (15*32+25) /* Virtual NMI */
|
#define X86_FEATURE_VNMI (15*32+25) /* "vnmi" Virtual NMI */
|
||||||
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* "" SVME addr check */
|
#define X86_FEATURE_SVME_ADDR_CHK (15*32+28) /* SVME addr check */
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (ECX), word 16 */
|
||||||
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* AVX512 Vector Bit Manipulation instructions*/
|
#define X86_FEATURE_AVX512VBMI (16*32+ 1) /* "avx512vbmi" AVX512 Vector Bit Manipulation instructions*/
|
||||||
#define X86_FEATURE_UMIP (16*32+ 2) /* User Mode Instruction Protection */
|
#define X86_FEATURE_UMIP (16*32+ 2) /* "umip" User Mode Instruction Protection */
|
||||||
#define X86_FEATURE_PKU (16*32+ 3) /* Protection Keys for Userspace */
|
#define X86_FEATURE_PKU (16*32+ 3) /* "pku" Protection Keys for Userspace */
|
||||||
#define X86_FEATURE_OSPKE (16*32+ 4) /* OS Protection Keys Enable */
|
#define X86_FEATURE_OSPKE (16*32+ 4) /* "ospke" OS Protection Keys Enable */
|
||||||
#define X86_FEATURE_WAITPKG (16*32+ 5) /* UMONITOR/UMWAIT/TPAUSE Instructions */
|
#define X86_FEATURE_WAITPKG (16*32+ 5) /* "waitpkg" UMONITOR/UMWAIT/TPAUSE Instructions */
|
||||||
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* Additional AVX512 Vector Bit Manipulation Instructions */
|
#define X86_FEATURE_AVX512_VBMI2 (16*32+ 6) /* "avx512_vbmi2" Additional AVX512 Vector Bit Manipulation Instructions */
|
||||||
#define X86_FEATURE_SHSTK (16*32+ 7) /* "" Shadow stack */
|
#define X86_FEATURE_SHSTK (16*32+ 7) /* Shadow stack */
|
||||||
#define X86_FEATURE_GFNI (16*32+ 8) /* Galois Field New Instructions */
|
#define X86_FEATURE_GFNI (16*32+ 8) /* "gfni" Galois Field New Instructions */
|
||||||
#define X86_FEATURE_VAES (16*32+ 9) /* Vector AES */
|
#define X86_FEATURE_VAES (16*32+ 9) /* "vaes" Vector AES */
|
||||||
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* Carry-Less Multiplication Double Quadword */
|
#define X86_FEATURE_VPCLMULQDQ (16*32+10) /* "vpclmulqdq" Carry-Less Multiplication Double Quadword */
|
||||||
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* Vector Neural Network Instructions */
|
#define X86_FEATURE_AVX512_VNNI (16*32+11) /* "avx512_vnni" Vector Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
#define X86_FEATURE_AVX512_BITALG (16*32+12) /* "avx512_bitalg" Support for VPOPCNT[B,W] and VPSHUF-BITQMB instructions */
|
||||||
#define X86_FEATURE_TME (16*32+13) /* Intel Total Memory Encryption */
|
#define X86_FEATURE_TME (16*32+13) /* "tme" Intel Total Memory Encryption */
|
||||||
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* POPCNT for vectors of DW/QW */
|
#define X86_FEATURE_AVX512_VPOPCNTDQ (16*32+14) /* "avx512_vpopcntdq" POPCNT for vectors of DW/QW */
|
||||||
#define X86_FEATURE_LA57 (16*32+16) /* 5-level page tables */
|
#define X86_FEATURE_LA57 (16*32+16) /* "la57" 5-level page tables */
|
||||||
#define X86_FEATURE_RDPID (16*32+22) /* RDPID instruction */
|
#define X86_FEATURE_RDPID (16*32+22) /* "rdpid" RDPID instruction */
|
||||||
#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* Bus Lock detect */
|
#define X86_FEATURE_BUS_LOCK_DETECT (16*32+24) /* "bus_lock_detect" Bus Lock detect */
|
||||||
#define X86_FEATURE_CLDEMOTE (16*32+25) /* CLDEMOTE instruction */
|
#define X86_FEATURE_CLDEMOTE (16*32+25) /* "cldemote" CLDEMOTE instruction */
|
||||||
#define X86_FEATURE_MOVDIRI (16*32+27) /* MOVDIRI instruction */
|
#define X86_FEATURE_MOVDIRI (16*32+27) /* "movdiri" MOVDIRI instruction */
|
||||||
#define X86_FEATURE_MOVDIR64B (16*32+28) /* MOVDIR64B instruction */
|
#define X86_FEATURE_MOVDIR64B (16*32+28) /* "movdir64b" MOVDIR64B instruction */
|
||||||
#define X86_FEATURE_ENQCMD (16*32+29) /* ENQCMD and ENQCMDS instructions */
|
#define X86_FEATURE_ENQCMD (16*32+29) /* "enqcmd" ENQCMD and ENQCMDS instructions */
|
||||||
#define X86_FEATURE_SGX_LC (16*32+30) /* Software Guard Extensions Launch Control */
|
#define X86_FEATURE_SGX_LC (16*32+30) /* "sgx_lc" Software Guard Extensions Launch Control */
|
||||||
|
|
||||||
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
|
/* AMD-defined CPU features, CPUID level 0x80000007 (EBX), word 17 */
|
||||||
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* MCA overflow recovery support */
|
#define X86_FEATURE_OVERFLOW_RECOV (17*32+ 0) /* "overflow_recov" MCA overflow recovery support */
|
||||||
#define X86_FEATURE_SUCCOR (17*32+ 1) /* Uncorrectable error containment and recovery */
|
#define X86_FEATURE_SUCCOR (17*32+ 1) /* "succor" Uncorrectable error containment and recovery */
|
||||||
#define X86_FEATURE_SMCA (17*32+ 3) /* Scalable MCA */
|
#define X86_FEATURE_SMCA (17*32+ 3) /* "smca" Scalable MCA */
|
||||||
|
|
||||||
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
/* Intel-defined CPU features, CPUID level 0x00000007:0 (EDX), word 18 */
|
||||||
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* AVX-512 Neural Network Instructions */
|
#define X86_FEATURE_AVX512_4VNNIW (18*32+ 2) /* "avx512_4vnniw" AVX-512 Neural Network Instructions */
|
||||||
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* AVX-512 Multiply Accumulation Single precision */
|
#define X86_FEATURE_AVX512_4FMAPS (18*32+ 3) /* "avx512_4fmaps" AVX-512 Multiply Accumulation Single precision */
|
||||||
#define X86_FEATURE_FSRM (18*32+ 4) /* Fast Short Rep Mov */
|
#define X86_FEATURE_FSRM (18*32+ 4) /* "fsrm" Fast Short Rep Mov */
|
||||||
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* AVX-512 Intersect for D/Q */
|
#define X86_FEATURE_AVX512_VP2INTERSECT (18*32+ 8) /* "avx512_vp2intersect" AVX-512 Intersect for D/Q */
|
||||||
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* "" SRBDS mitigation MSR available */
|
#define X86_FEATURE_SRBDS_CTRL (18*32+ 9) /* SRBDS mitigation MSR available */
|
||||||
#define X86_FEATURE_MD_CLEAR (18*32+10) /* VERW clears CPU buffers */
|
#define X86_FEATURE_MD_CLEAR (18*32+10) /* "md_clear" VERW clears CPU buffers */
|
||||||
#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* "" RTM transaction always aborts */
|
#define X86_FEATURE_RTM_ALWAYS_ABORT (18*32+11) /* RTM transaction always aborts */
|
||||||
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* "" TSX_FORCE_ABORT */
|
#define X86_FEATURE_TSX_FORCE_ABORT (18*32+13) /* TSX_FORCE_ABORT */
|
||||||
#define X86_FEATURE_SERIALIZE (18*32+14) /* SERIALIZE instruction */
|
#define X86_FEATURE_SERIALIZE (18*32+14) /* "serialize" SERIALIZE instruction */
|
||||||
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* "" This part has CPUs of more than one type */
|
#define X86_FEATURE_HYBRID_CPU (18*32+15) /* This part has CPUs of more than one type */
|
||||||
#define X86_FEATURE_TSXLDTRK (18*32+16) /* TSX Suspend Load Address Tracking */
|
#define X86_FEATURE_TSXLDTRK (18*32+16) /* "tsxldtrk" TSX Suspend Load Address Tracking */
|
||||||
#define X86_FEATURE_PCONFIG (18*32+18) /* Intel PCONFIG */
|
#define X86_FEATURE_PCONFIG (18*32+18) /* "pconfig" Intel PCONFIG */
|
||||||
#define X86_FEATURE_ARCH_LBR (18*32+19) /* Intel ARCH LBR */
|
#define X86_FEATURE_ARCH_LBR (18*32+19) /* "arch_lbr" Intel ARCH LBR */
|
||||||
#define X86_FEATURE_IBT (18*32+20) /* Indirect Branch Tracking */
|
#define X86_FEATURE_IBT (18*32+20) /* "ibt" Indirect Branch Tracking */
|
||||||
#define X86_FEATURE_AMX_BF16 (18*32+22) /* AMX bf16 Support */
|
#define X86_FEATURE_AMX_BF16 (18*32+22) /* "amx_bf16" AMX bf16 Support */
|
||||||
#define X86_FEATURE_AVX512_FP16 (18*32+23) /* AVX512 FP16 */
|
#define X86_FEATURE_AVX512_FP16 (18*32+23) /* "avx512_fp16" AVX512 FP16 */
|
||||||
#define X86_FEATURE_AMX_TILE (18*32+24) /* AMX tile Support */
|
#define X86_FEATURE_AMX_TILE (18*32+24) /* "amx_tile" AMX tile Support */
|
||||||
#define X86_FEATURE_AMX_INT8 (18*32+25) /* AMX int8 Support */
|
#define X86_FEATURE_AMX_INT8 (18*32+25) /* "amx_int8" AMX int8 Support */
|
||||||
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* "" Speculation Control (IBRS + IBPB) */
|
#define X86_FEATURE_SPEC_CTRL (18*32+26) /* Speculation Control (IBRS + IBPB) */
|
||||||
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* "" Single Thread Indirect Branch Predictors */
|
#define X86_FEATURE_INTEL_STIBP (18*32+27) /* Single Thread Indirect Branch Predictors */
|
||||||
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* Flush L1D cache */
|
#define X86_FEATURE_FLUSH_L1D (18*32+28) /* "flush_l1d" Flush L1D cache */
|
||||||
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* IA32_ARCH_CAPABILITIES MSR (Intel) */
|
#define X86_FEATURE_ARCH_CAPABILITIES (18*32+29) /* "arch_capabilities" IA32_ARCH_CAPABILITIES MSR (Intel) */
|
||||||
#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* "" IA32_CORE_CAPABILITIES MSR */
|
#define X86_FEATURE_CORE_CAPABILITIES (18*32+30) /* IA32_CORE_CAPABILITIES MSR */
|
||||||
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* "" Speculative Store Bypass Disable */
|
#define X86_FEATURE_SPEC_CTRL_SSBD (18*32+31) /* Speculative Store Bypass Disable */
|
||||||
|
|
||||||
/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
|
/* AMD-defined memory encryption features, CPUID level 0x8000001f (EAX), word 19 */
|
||||||
#define X86_FEATURE_SME (19*32+ 0) /* AMD Secure Memory Encryption */
|
#define X86_FEATURE_SME (19*32+ 0) /* "sme" AMD Secure Memory Encryption */
|
||||||
#define X86_FEATURE_SEV (19*32+ 1) /* AMD Secure Encrypted Virtualization */
|
#define X86_FEATURE_SEV (19*32+ 1) /* "sev" AMD Secure Encrypted Virtualization */
|
||||||
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* "" VM Page Flush MSR is supported */
|
#define X86_FEATURE_VM_PAGE_FLUSH (19*32+ 2) /* VM Page Flush MSR is supported */
|
||||||
#define X86_FEATURE_SEV_ES (19*32+ 3) /* AMD Secure Encrypted Virtualization - Encrypted State */
|
#define X86_FEATURE_SEV_ES (19*32+ 3) /* "sev_es" AMD Secure Encrypted Virtualization - Encrypted State */
|
||||||
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* AMD Secure Encrypted Virtualization - Secure Nested Paging */
|
#define X86_FEATURE_SEV_SNP (19*32+ 4) /* "sev_snp" AMD Secure Encrypted Virtualization - Secure Nested Paging */
|
||||||
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* "" Virtual TSC_AUX */
|
#define X86_FEATURE_V_TSC_AUX (19*32+ 9) /* Virtual TSC_AUX */
|
||||||
#define X86_FEATURE_SME_COHERENT (19*32+10) /* "" AMD hardware-enforced cache coherency */
|
#define X86_FEATURE_SME_COHERENT (19*32+10) /* AMD hardware-enforced cache coherency */
|
||||||
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* AMD SEV-ES full debug state swap support */
|
#define X86_FEATURE_DEBUG_SWAP (19*32+14) /* "debug_swap" AMD SEV-ES full debug state swap support */
|
||||||
#define X86_FEATURE_SVSM (19*32+28) /* SVSM present */
|
#define X86_FEATURE_SVSM (19*32+28) /* "svsm" SVSM present */
|
||||||
|
|
||||||
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
/* AMD-defined Extended Feature 2 EAX, CPUID level 0x80000021 (EAX), word 20 */
|
||||||
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* "" No Nested Data Breakpoints */
|
#define X86_FEATURE_NO_NESTED_DATA_BP (20*32+ 0) /* No Nested Data Breakpoints */
|
||||||
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* "" WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
|
#define X86_FEATURE_WRMSR_XX_BASE_NS (20*32+ 1) /* WRMSR to {FS,GS,KERNEL_GS}_BASE is non-serializing */
|
||||||
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* "" LFENCE always serializing / synchronizes RDTSC */
|
#define X86_FEATURE_LFENCE_RDTSC (20*32+ 2) /* LFENCE always serializing / synchronizes RDTSC */
|
||||||
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* "" Null Selector Clears Base */
|
#define X86_FEATURE_NULL_SEL_CLR_BASE (20*32+ 6) /* Null Selector Clears Base */
|
||||||
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* "" Automatic IBRS */
|
#define X86_FEATURE_AUTOIBRS (20*32+ 8) /* Automatic IBRS */
|
||||||
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* "" SMM_CTL MSR is not present */
|
#define X86_FEATURE_NO_SMM_CTL_MSR (20*32+ 9) /* SMM_CTL MSR is not present */
|
||||||
|
|
||||||
#define X86_FEATURE_SBPB (20*32+27) /* "" Selective Branch Prediction Barrier */
|
#define X86_FEATURE_SBPB (20*32+27) /* Selective Branch Prediction Barrier */
|
||||||
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* "" MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
#define X86_FEATURE_IBPB_BRTYPE (20*32+28) /* MSR_PRED_CMD[IBPB] flushes all branch type predictions */
|
||||||
#define X86_FEATURE_SRSO_NO (20*32+29) /* "" CPU is not affected by SRSO */
|
#define X86_FEATURE_SRSO_NO (20*32+29) /* CPU is not affected by SRSO */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* Extended auxiliary flags: Linux defined - for features scattered in various
|
* Extended auxiliary flags: Linux defined - for features scattered in various
|
||||||
@ -471,60 +468,60 @@
|
|||||||
*
|
*
|
||||||
* Reuse free bits when adding new feature flags!
|
* Reuse free bits when adding new feature flags!
|
||||||
*/
|
*/
|
||||||
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* AMD LBR and PMC Freeze */
|
#define X86_FEATURE_AMD_LBR_PMC_FREEZE (21*32+ 0) /* "amd_lbr_pmc_freeze" AMD LBR and PMC Freeze */
|
||||||
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* "" Clear branch history at syscall entry using SW loop */
|
#define X86_FEATURE_CLEAR_BHB_LOOP (21*32+ 1) /* Clear branch history at syscall entry using SW loop */
|
||||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* BHI_DIS_S HW control available */
|
||||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* BHI_DIS_S HW control enabled */
|
||||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* Clear branch history at vmexit using SW loop */
|
||||||
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* "" AMD Fast CPPC */
|
#define X86_FEATURE_FAST_CPPC (21*32 + 5) /* AMD Fast CPPC */
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* BUG word(s)
|
* BUG word(s)
|
||||||
*/
|
*/
|
||||||
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
#define X86_BUG(x) (NCAPINTS*32 + (x))
|
||||||
|
|
||||||
#define X86_BUG_F00F X86_BUG(0) /* Intel F00F */
|
#define X86_BUG_F00F X86_BUG(0) /* "f00f" Intel F00F */
|
||||||
#define X86_BUG_FDIV X86_BUG(1) /* FPU FDIV */
|
#define X86_BUG_FDIV X86_BUG(1) /* "fdiv" FPU FDIV */
|
||||||
#define X86_BUG_COMA X86_BUG(2) /* Cyrix 6x86 coma */
|
#define X86_BUG_COMA X86_BUG(2) /* "coma" Cyrix 6x86 coma */
|
||||||
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
#define X86_BUG_AMD_TLB_MMATCH X86_BUG(3) /* "tlb_mmatch" AMD Erratum 383 */
|
||||||
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
#define X86_BUG_AMD_APIC_C1E X86_BUG(4) /* "apic_c1e" AMD Erratum 400 */
|
||||||
#define X86_BUG_11AP X86_BUG(5) /* Bad local APIC aka 11AP */
|
#define X86_BUG_11AP X86_BUG(5) /* "11ap" Bad local APIC aka 11AP */
|
||||||
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* FXSAVE leaks FOP/FIP/FOP */
|
#define X86_BUG_FXSAVE_LEAK X86_BUG(6) /* "fxsave_leak" FXSAVE leaks FOP/FIP/FOP */
|
||||||
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* AAI65, CLFLUSH required before MONITOR */
|
#define X86_BUG_CLFLUSH_MONITOR X86_BUG(7) /* "clflush_monitor" AAI65, CLFLUSH required before MONITOR */
|
||||||
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* SYSRET doesn't fix up SS attrs */
|
#define X86_BUG_SYSRET_SS_ATTRS X86_BUG(8) /* "sysret_ss_attrs" SYSRET doesn't fix up SS attrs */
|
||||||
#ifdef CONFIG_X86_32
|
#ifdef CONFIG_X86_32
|
||||||
/*
|
/*
|
||||||
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
|
* 64-bit kernels don't use X86_BUG_ESPFIX. Make the define conditional
|
||||||
* to avoid confusion.
|
* to avoid confusion.
|
||||||
*/
|
*/
|
||||||
#define X86_BUG_ESPFIX X86_BUG(9) /* "" IRET to 16-bit SS corrupts ESP/RSP high bits */
|
#define X86_BUG_ESPFIX X86_BUG(9) /* IRET to 16-bit SS corrupts ESP/RSP high bits */
|
||||||
#endif
|
#endif
|
||||||
#define X86_BUG_NULL_SEG X86_BUG(10) /* Nulling a selector preserves the base */
|
#define X86_BUG_NULL_SEG X86_BUG(10) /* "null_seg" Nulling a selector preserves the base */
|
||||||
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* SWAPGS without input dep on GS */
|
#define X86_BUG_SWAPGS_FENCE X86_BUG(11) /* "swapgs_fence" SWAPGS without input dep on GS */
|
||||||
#define X86_BUG_MONITOR X86_BUG(12) /* IPI required to wake up remote CPU */
|
#define X86_BUG_MONITOR X86_BUG(12) /* "monitor" IPI required to wake up remote CPU */
|
||||||
#define X86_BUG_AMD_E400 X86_BUG(13) /* CPU is among the affected by Erratum 400 */
|
#define X86_BUG_AMD_E400 X86_BUG(13) /* "amd_e400" CPU is among the affected by Erratum 400 */
|
||||||
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* CPU is affected by meltdown attack and needs kernel page table isolation */
|
#define X86_BUG_CPU_MELTDOWN X86_BUG(14) /* "cpu_meltdown" CPU is affected by meltdown attack and needs kernel page table isolation */
|
||||||
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* CPU is affected by Spectre variant 1 attack with conditional branches */
|
#define X86_BUG_SPECTRE_V1 X86_BUG(15) /* "spectre_v1" CPU is affected by Spectre variant 1 attack with conditional branches */
|
||||||
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* CPU is affected by Spectre variant 2 attack with indirect branches */
|
#define X86_BUG_SPECTRE_V2 X86_BUG(16) /* "spectre_v2" CPU is affected by Spectre variant 2 attack with indirect branches */
|
||||||
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* CPU is affected by speculative store bypass attack */
|
#define X86_BUG_SPEC_STORE_BYPASS X86_BUG(17) /* "spec_store_bypass" CPU is affected by speculative store bypass attack */
|
||||||
#define X86_BUG_L1TF X86_BUG(18) /* CPU is affected by L1 Terminal Fault */
|
#define X86_BUG_L1TF X86_BUG(18) /* "l1tf" CPU is affected by L1 Terminal Fault */
|
||||||
#define X86_BUG_MDS X86_BUG(19) /* CPU is affected by Microarchitectural data sampling */
|
#define X86_BUG_MDS X86_BUG(19) /* "mds" CPU is affected by Microarchitectural data sampling */
|
||||||
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* CPU is only affected by the MSDBS variant of BUG_MDS */
|
#define X86_BUG_MSBDS_ONLY X86_BUG(20) /* "msbds_only" CPU is only affected by the MSDBS variant of BUG_MDS */
|
||||||
#define X86_BUG_SWAPGS X86_BUG(21) /* CPU is affected by speculation through SWAPGS */
|
#define X86_BUG_SWAPGS X86_BUG(21) /* "swapgs" CPU is affected by speculation through SWAPGS */
|
||||||
#define X86_BUG_TAA X86_BUG(22) /* CPU is affected by TSX Async Abort(TAA) */
|
#define X86_BUG_TAA X86_BUG(22) /* "taa" CPU is affected by TSX Async Abort(TAA) */
|
||||||
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* CPU may incur MCE during certain page attribute changes */
|
#define X86_BUG_ITLB_MULTIHIT X86_BUG(23) /* "itlb_multihit" CPU may incur MCE during certain page attribute changes */
|
||||||
#define X86_BUG_SRBDS X86_BUG(24) /* CPU may leak RNG bits if not mitigated */
|
#define X86_BUG_SRBDS X86_BUG(24) /* "srbds" CPU may leak RNG bits if not mitigated */
|
||||||
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
#define X86_BUG_MMIO_STALE_DATA X86_BUG(25) /* "mmio_stale_data" CPU is affected by Processor MMIO Stale Data vulnerabilities */
|
||||||
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* CPU is too old and its MMIO Stale Data status is unknown */
|
#define X86_BUG_MMIO_UNKNOWN X86_BUG(26) /* "mmio_unknown" CPU is too old and its MMIO Stale Data status is unknown */
|
||||||
#define X86_BUG_RETBLEED X86_BUG(27) /* CPU is affected by RETBleed */
|
#define X86_BUG_RETBLEED X86_BUG(27) /* "retbleed" CPU is affected by RETBleed */
|
||||||
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* EIBRS is vulnerable to Post Barrier RSB Predictions */
|
#define X86_BUG_EIBRS_PBRSB X86_BUG(28) /* "eibrs_pbrsb" EIBRS is vulnerable to Post Barrier RSB Predictions */
|
||||||
#define X86_BUG_SMT_RSB X86_BUG(29) /* CPU is vulnerable to Cross-Thread Return Address Predictions */
|
#define X86_BUG_SMT_RSB X86_BUG(29) /* "smt_rsb" CPU is vulnerable to Cross-Thread Return Address Predictions */
|
||||||
#define X86_BUG_GDS X86_BUG(30) /* CPU is affected by Gather Data Sampling */
|
#define X86_BUG_GDS X86_BUG(30) /* "gds" CPU is affected by Gather Data Sampling */
|
||||||
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* CPU may incur #MC if non-TD software does partial write to TDX private memory */
|
#define X86_BUG_TDX_PW_MCE X86_BUG(31) /* "tdx_pw_mce" CPU may incur #MC if non-TD software does partial write to TDX private memory */
|
||||||
|
|
||||||
/* BUG word 2 */
|
/* BUG word 2 */
|
||||||
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* AMD SRSO bug */
|
#define X86_BUG_SRSO X86_BUG(1*32 + 0) /* "srso" AMD SRSO bug */
|
||||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* "div0" AMD DIV0 speculation bug */
|
||||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* "rfds" CPU is vulnerable to Register File Data Sampling */
|
||||||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* "bhi" CPU is affected by Branch History Injection */
|
||||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||||
|
@ -86,6 +86,7 @@ TARGETS += user
|
|||||||
TARGETS += vDSO
|
TARGETS += vDSO
|
||||||
TARGETS += mm
|
TARGETS += mm
|
||||||
TARGETS += x86
|
TARGETS += x86
|
||||||
|
TARGETS += x86/bugs
|
||||||
TARGETS += zram
|
TARGETS += zram
|
||||||
#Please keep the TARGETS list alphabetically sorted
|
#Please keep the TARGETS list alphabetically sorted
|
||||||
# Run "make quicktest=1 run_tests" or
|
# Run "make quicktest=1 run_tests" or
|
||||||
@ -227,6 +228,7 @@ ifdef INSTALL_PATH
|
|||||||
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
|
install -m 744 kselftest/runner.sh $(INSTALL_PATH)/kselftest/
|
||||||
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
|
install -m 744 kselftest/prefix.pl $(INSTALL_PATH)/kselftest/
|
||||||
install -m 744 kselftest/ktap_helpers.sh $(INSTALL_PATH)/kselftest/
|
install -m 744 kselftest/ktap_helpers.sh $(INSTALL_PATH)/kselftest/
|
||||||
|
install -m 744 kselftest/ksft.py $(INSTALL_PATH)/kselftest/
|
||||||
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
|
install -m 744 run_kselftest.sh $(INSTALL_PATH)/
|
||||||
install -m 744 prepare_system.sh $(INSTALL_PATH)/
|
install -m 744 prepare_system.sh $(INSTALL_PATH)/
|
||||||
rm -f $(TEST_LIST)
|
rm -f $(TEST_LIST)
|
||||||
|
@ -137,6 +137,11 @@ static inline void ksft_set_plan(unsigned int plan)
|
|||||||
|
|
||||||
static inline void ksft_print_cnts(void)
|
static inline void ksft_print_cnts(void)
|
||||||
{
|
{
|
||||||
|
if (ksft_cnt.ksft_xskip > 0)
|
||||||
|
printf(
|
||||||
|
"# %u skipped test(s) detected. Consider enabling relevant config options to improve coverage.\n",
|
||||||
|
ksft_cnt.ksft_xskip
|
||||||
|
);
|
||||||
if (ksft_plan != ksft_test_num())
|
if (ksft_plan != ksft_test_num())
|
||||||
printf("# Planned tests != run tests (%u != %u)\n",
|
printf("# Planned tests != run tests (%u != %u)\n",
|
||||||
ksft_plan, ksft_test_num());
|
ksft_plan, ksft_test_num());
|
||||||
|
93
tools/testing/selftests/kselftest/ksft.py
Normal file
93
tools/testing/selftests/kselftest/ksft.py
Normal file
@ -0,0 +1,93 @@
|
|||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2023 Collabora Ltd
|
||||||
|
#
|
||||||
|
# Kselftest helpers for outputting in KTAP format. Based on kselftest.h.
|
||||||
|
#
|
||||||
|
|
||||||
|
import sys
|
||||||
|
|
||||||
|
ksft_cnt = {"pass": 0, "fail": 0, "skip": 0}
|
||||||
|
ksft_num_tests = 0
|
||||||
|
ksft_test_number = 1
|
||||||
|
|
||||||
|
KSFT_PASS = 0
|
||||||
|
KSFT_FAIL = 1
|
||||||
|
KSFT_SKIP = 4
|
||||||
|
|
||||||
|
|
||||||
|
def print_header():
|
||||||
|
print("TAP version 13")
|
||||||
|
|
||||||
|
|
||||||
|
def set_plan(num_tests):
|
||||||
|
global ksft_num_tests
|
||||||
|
ksft_num_tests = num_tests
|
||||||
|
print("1..{}".format(num_tests))
|
||||||
|
|
||||||
|
|
||||||
|
def print_cnts():
|
||||||
|
if ksft_cnt['skip'] > 0:
|
||||||
|
print(f"# {ksft_cnt['skip']} skipped test(s) detected. Consider enabling relevant config options to improve coverage.")
|
||||||
|
|
||||||
|
print(
|
||||||
|
f"# Totals: pass:{ksft_cnt['pass']} fail:{ksft_cnt['fail']} xfail:0 xpass:0 skip:{ksft_cnt['skip']} error:0"
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def print_msg(msg):
|
||||||
|
print(f"# {msg}")
|
||||||
|
|
||||||
|
|
||||||
|
def _test_print(result, description, directive=None):
|
||||||
|
if directive:
|
||||||
|
directive_str = f"# {directive}"
|
||||||
|
else:
|
||||||
|
directive_str = ""
|
||||||
|
|
||||||
|
global ksft_test_number
|
||||||
|
print(f"{result} {ksft_test_number} {description} {directive_str}")
|
||||||
|
ksft_test_number += 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_result_pass(description):
|
||||||
|
_test_print("ok", description)
|
||||||
|
ksft_cnt["pass"] += 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_result_fail(description):
|
||||||
|
_test_print("not ok", description)
|
||||||
|
ksft_cnt["fail"] += 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_result_skip(description):
|
||||||
|
_test_print("ok", description, "SKIP")
|
||||||
|
ksft_cnt["skip"] += 1
|
||||||
|
|
||||||
|
|
||||||
|
def test_result(condition, description=""):
|
||||||
|
if condition:
|
||||||
|
test_result_pass(description)
|
||||||
|
else:
|
||||||
|
test_result_fail(description)
|
||||||
|
|
||||||
|
|
||||||
|
def finished():
|
||||||
|
if ksft_cnt["pass"] + ksft_cnt["skip"] == ksft_num_tests:
|
||||||
|
exit_code = KSFT_PASS
|
||||||
|
else:
|
||||||
|
exit_code = KSFT_FAIL
|
||||||
|
|
||||||
|
print_cnts()
|
||||||
|
|
||||||
|
sys.exit(exit_code)
|
||||||
|
|
||||||
|
|
||||||
|
def exit_fail():
|
||||||
|
print_cnts()
|
||||||
|
sys.exit(KSFT_FAIL)
|
||||||
|
|
||||||
|
|
||||||
|
def exit_pass():
|
||||||
|
print_cnts()
|
||||||
|
sys.exit(KSFT_PASS)
|
@ -107,5 +107,9 @@ ktap_finished() {
|
|||||||
}
|
}
|
||||||
|
|
||||||
ktap_print_totals() {
|
ktap_print_totals() {
|
||||||
|
if [ "$KTAP_CNT_SKIP" -gt 0 ]; then
|
||||||
|
echo "# $KTAP_CNT_SKIP skipped test(s) detected. " \
|
||||||
|
"Consider enabling relevant config options to improve coverage."
|
||||||
|
fi
|
||||||
echo "# Totals: pass:$KTAP_CNT_PASS fail:$KTAP_CNT_FAIL xfail:0 xpass:0 skip:$KTAP_CNT_SKIP error:0"
|
echo "# Totals: pass:$KTAP_CNT_PASS fail:$KTAP_CNT_FAIL xfail:0 xpass:0 skip:$KTAP_CNT_SKIP error:0"
|
||||||
}
|
}
|
||||||
|
3
tools/testing/selftests/x86/bugs/Makefile
Normal file
3
tools/testing/selftests/x86/bugs/Makefile
Normal file
@ -0,0 +1,3 @@
|
|||||||
|
TEST_PROGS := its_sysfs.py its_permutations.py its_indirect_alignment.py its_ret_alignment.py
|
||||||
|
TEST_FILES := common.py
|
||||||
|
include ../../lib.mk
|
164
tools/testing/selftests/x86/bugs/common.py
Executable file
164
tools/testing/selftests/x86/bugs/common.py
Executable file
@ -0,0 +1,164 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# This contains kselftest framework adapted common functions for testing
|
||||||
|
# mitigation for x86 bugs.
|
||||||
|
|
||||||
|
import os, sys, re, shutil
|
||||||
|
|
||||||
|
sys.path.insert(0, '../../kselftest')
|
||||||
|
import ksft
|
||||||
|
|
||||||
|
def read_file(path):
|
||||||
|
if not os.path.exists(path):
|
||||||
|
return None
|
||||||
|
with open(path, 'r') as file:
|
||||||
|
return file.read().strip()
|
||||||
|
|
||||||
|
def cpuinfo_has(arg):
|
||||||
|
cpuinfo = read_file('/proc/cpuinfo')
|
||||||
|
if arg in cpuinfo:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def cmdline_has(arg):
|
||||||
|
cmdline = read_file('/proc/cmdline')
|
||||||
|
if arg in cmdline:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def cmdline_has_either(args):
|
||||||
|
cmdline = read_file('/proc/cmdline')
|
||||||
|
for arg in args:
|
||||||
|
if arg in cmdline:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def cmdline_has_none(args):
|
||||||
|
return not cmdline_has_either(args)
|
||||||
|
|
||||||
|
def cmdline_has_all(args):
|
||||||
|
cmdline = read_file('/proc/cmdline')
|
||||||
|
for arg in args:
|
||||||
|
if arg not in cmdline:
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def get_sysfs(bug):
|
||||||
|
return read_file("/sys/devices/system/cpu/vulnerabilities/" + bug)
|
||||||
|
|
||||||
|
def sysfs_has(bug, mitigation):
|
||||||
|
status = get_sysfs(bug)
|
||||||
|
if mitigation in status:
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def sysfs_has_either(bugs, mitigations):
|
||||||
|
for bug in bugs:
|
||||||
|
for mitigation in mitigations:
|
||||||
|
if sysfs_has(bug, mitigation):
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def sysfs_has_none(bugs, mitigations):
|
||||||
|
return not sysfs_has_either(bugs, mitigations)
|
||||||
|
|
||||||
|
def sysfs_has_all(bugs, mitigations):
|
||||||
|
for bug in bugs:
|
||||||
|
for mitigation in mitigations:
|
||||||
|
if not sysfs_has(bug, mitigation):
|
||||||
|
return False
|
||||||
|
return True
|
||||||
|
|
||||||
|
def bug_check_pass(bug, found):
|
||||||
|
ksft.print_msg(f"\nFound: {found}")
|
||||||
|
# ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
|
||||||
|
ksft.test_result_pass(f'{bug}: {found}')
|
||||||
|
|
||||||
|
def bug_check_fail(bug, found, expected):
|
||||||
|
ksft.print_msg(f'\nFound:\t {found}')
|
||||||
|
ksft.print_msg(f'Expected:\t {expected}')
|
||||||
|
ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
|
||||||
|
ksft.test_result_fail(f'{bug}: {found}')
|
||||||
|
|
||||||
|
def bug_status_unknown(bug, found):
|
||||||
|
ksft.print_msg(f'\nUnknown status: {found}')
|
||||||
|
ksft.print_msg(f"\ncmdline: {read_file('/proc/cmdline')}")
|
||||||
|
ksft.test_result_fail(f'{bug}: {found}')
|
||||||
|
|
||||||
|
def basic_checks_sufficient(bug, mitigation):
|
||||||
|
if not mitigation:
|
||||||
|
bug_status_unknown(bug, "None")
|
||||||
|
return True
|
||||||
|
elif mitigation == "Not affected":
|
||||||
|
ksft.test_result_pass(bug)
|
||||||
|
return True
|
||||||
|
elif mitigation == "Vulnerable":
|
||||||
|
if cmdline_has_either([f'{bug}=off', 'mitigations=off']):
|
||||||
|
bug_check_pass(bug, mitigation)
|
||||||
|
return True
|
||||||
|
return False
|
||||||
|
|
||||||
|
def get_section_info(vmlinux, section_name):
|
||||||
|
from elftools.elf.elffile import ELFFile
|
||||||
|
with open(vmlinux, 'rb') as f:
|
||||||
|
elffile = ELFFile(f)
|
||||||
|
section = elffile.get_section_by_name(section_name)
|
||||||
|
if section is None:
|
||||||
|
ksft.print_msg("Available sections in vmlinux:")
|
||||||
|
for sec in elffile.iter_sections():
|
||||||
|
ksft.print_msg(sec.name)
|
||||||
|
raise ValueError(f"Section {section_name} not found in {vmlinux}")
|
||||||
|
return section['sh_addr'], section['sh_offset'], section['sh_size']
|
||||||
|
|
||||||
|
def get_patch_sites(vmlinux, offset, size):
|
||||||
|
import struct
|
||||||
|
output = []
|
||||||
|
with open(vmlinux, 'rb') as f:
|
||||||
|
f.seek(offset)
|
||||||
|
i = 0
|
||||||
|
while i < size:
|
||||||
|
data = f.read(4) # s32
|
||||||
|
if not data:
|
||||||
|
break
|
||||||
|
sym_offset = struct.unpack('<i', data)[0] + i
|
||||||
|
i += 4
|
||||||
|
output.append(sym_offset)
|
||||||
|
return output
|
||||||
|
|
||||||
|
def get_instruction_from_vmlinux(elffile, section, virtual_address, target_address):
|
||||||
|
from capstone import Cs, CS_ARCH_X86, CS_MODE_64
|
||||||
|
section_start = section['sh_addr']
|
||||||
|
section_end = section_start + section['sh_size']
|
||||||
|
|
||||||
|
if not (section_start <= target_address < section_end):
|
||||||
|
return None
|
||||||
|
|
||||||
|
offset = target_address - section_start
|
||||||
|
code = section.data()[offset:offset + 16]
|
||||||
|
|
||||||
|
cap = init_capstone()
|
||||||
|
for instruction in cap.disasm(code, target_address):
|
||||||
|
if instruction.address == target_address:
|
||||||
|
return instruction
|
||||||
|
return None
|
||||||
|
|
||||||
|
def init_capstone():
|
||||||
|
from capstone import Cs, CS_ARCH_X86, CS_MODE_64, CS_OPT_SYNTAX_ATT
|
||||||
|
cap = Cs(CS_ARCH_X86, CS_MODE_64)
|
||||||
|
cap.syntax = CS_OPT_SYNTAX_ATT
|
||||||
|
return cap
|
||||||
|
|
||||||
|
def get_runtime_kernel():
|
||||||
|
import drgn
|
||||||
|
return drgn.program_from_kernel()
|
||||||
|
|
||||||
|
def check_dependencies_or_skip(modules, script_name="unknown test"):
|
||||||
|
for mod in modules:
|
||||||
|
try:
|
||||||
|
__import__(mod)
|
||||||
|
except ImportError:
|
||||||
|
ksft.test_result_skip(f"Skipping {script_name}: missing module '{mod}'")
|
||||||
|
ksft.finished()
|
150
tools/testing/selftests/x86/bugs/its_indirect_alignment.py
Executable file
150
tools/testing/selftests/x86/bugs/its_indirect_alignment.py
Executable file
@ -0,0 +1,150 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# Test for indirect target selection (ITS) mitigation.
|
||||||
|
#
|
||||||
|
# Test if indirect CALL/JMP are correctly patched by evaluating
|
||||||
|
# the vmlinux .retpoline_sites in /proc/kcore.
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
# add-apt-repository ppa:michel-slm/kernel-utils
|
||||||
|
# apt update
|
||||||
|
# apt install -y python3-drgn python3-pyelftools python3-capstone
|
||||||
|
#
|
||||||
|
# Best to copy the vmlinux at a standard location:
|
||||||
|
# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
|
||||||
|
# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
|
||||||
|
#
|
||||||
|
# Usage: ./its_indirect_alignment.py [vmlinux]
|
||||||
|
|
||||||
|
import os, sys, argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
this_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
sys.path.insert(0, this_dir + '/../../kselftest')
|
||||||
|
import ksft
|
||||||
|
import common as c
|
||||||
|
|
||||||
|
bug = "indirect_target_selection"
|
||||||
|
|
||||||
|
mitigation = c.get_sysfs(bug)
|
||||||
|
if not mitigation or "Aligned branch/return thunks" not in mitigation:
|
||||||
|
ksft.test_result_skip("Skipping its_indirect_alignment.py: Aligned branch/return thunks not enabled")
|
||||||
|
ksft.finished()
|
||||||
|
|
||||||
|
if c.sysfs_has("spectre_v2", "Retpolines"):
|
||||||
|
ksft.test_result_skip("Skipping its_indirect_alignment.py: Retpolines deployed")
|
||||||
|
ksft.finished()
|
||||||
|
|
||||||
|
c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_indirect_alignment.py")
|
||||||
|
|
||||||
|
from elftools.elf.elffile import ELFFile
|
||||||
|
from drgn.helpers.common.memory import identify_address
|
||||||
|
|
||||||
|
cap = c.init_capstone()
|
||||||
|
|
||||||
|
if len(os.sys.argv) > 1:
|
||||||
|
arg_vmlinux = os.sys.argv[1]
|
||||||
|
if not os.path.exists(arg_vmlinux):
|
||||||
|
ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at argument path: {arg_vmlinux}")
|
||||||
|
ksft.exit_fail()
|
||||||
|
os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
|
||||||
|
os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
|
||||||
|
|
||||||
|
vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
|
||||||
|
if not os.path.exists(vmlinux):
|
||||||
|
ksft.test_result_fail(f"its_indirect_alignment.py: vmlinux not found at {vmlinux}")
|
||||||
|
ksft.exit_fail()
|
||||||
|
|
||||||
|
ksft.print_msg(f"Using vmlinux: {vmlinux}")
|
||||||
|
|
||||||
|
retpolines_start_vmlinux, retpolines_sec_offset, size = c.get_section_info(vmlinux, '.retpoline_sites')
|
||||||
|
ksft.print_msg(f"vmlinux: Section .retpoline_sites (0x{retpolines_start_vmlinux:x}) found at 0x{retpolines_sec_offset:x} with size 0x{size:x}")
|
||||||
|
|
||||||
|
sites_offset = c.get_patch_sites(vmlinux, retpolines_sec_offset, size)
|
||||||
|
total_retpoline_tests = len(sites_offset)
|
||||||
|
ksft.print_msg(f"Found {total_retpoline_tests} retpoline sites")
|
||||||
|
|
||||||
|
prog = c.get_runtime_kernel()
|
||||||
|
retpolines_start_kcore = prog.symbol('__retpoline_sites').address
|
||||||
|
ksft.print_msg(f'kcore: __retpoline_sites: 0x{retpolines_start_kcore:x}')
|
||||||
|
|
||||||
|
x86_indirect_its_thunk_r15 = prog.symbol('__x86_indirect_its_thunk_r15').address
|
||||||
|
ksft.print_msg(f'kcore: __x86_indirect_its_thunk_r15: 0x{x86_indirect_its_thunk_r15:x}')
|
||||||
|
|
||||||
|
tests_passed = 0
|
||||||
|
tests_failed = 0
|
||||||
|
tests_unknown = 0
|
||||||
|
|
||||||
|
with open(vmlinux, 'rb') as f:
|
||||||
|
elffile = ELFFile(f)
|
||||||
|
text_section = elffile.get_section_by_name('.text')
|
||||||
|
|
||||||
|
for i in range(0, len(sites_offset)):
|
||||||
|
site = retpolines_start_kcore + sites_offset[i]
|
||||||
|
vmlinux_site = retpolines_start_vmlinux + sites_offset[i]
|
||||||
|
passed = unknown = failed = False
|
||||||
|
try:
|
||||||
|
vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
|
||||||
|
kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
|
||||||
|
operand = kcore_insn.op_str
|
||||||
|
insn_end = site + kcore_insn.size - 1 # TODO handle Jcc.32 __x86_indirect_thunk_\reg
|
||||||
|
safe_site = insn_end & 0x20
|
||||||
|
site_status = "" if safe_site else "(unsafe)"
|
||||||
|
|
||||||
|
ksft.print_msg(f"\nSite {i}: {identify_address(prog, site)} <0x{site:x}> {site_status}")
|
||||||
|
ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
|
||||||
|
ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
|
||||||
|
|
||||||
|
if (site & 0x20) ^ (insn_end & 0x20):
|
||||||
|
ksft.print_msg(f"\tSite at safe/unsafe boundary: {str(kcore_insn.bytes)} {kcore_insn.mnemonic} {operand}")
|
||||||
|
if safe_site:
|
||||||
|
tests_passed += 1
|
||||||
|
passed = True
|
||||||
|
ksft.print_msg(f"\tPASSED: At safe address")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if operand.startswith('0xffffffff'):
|
||||||
|
thunk = int(operand, 16)
|
||||||
|
if thunk > x86_indirect_its_thunk_r15:
|
||||||
|
insn_at_thunk = list(cap.disasm(prog.read(thunk, 16), thunk))[0]
|
||||||
|
operand += ' -> ' + insn_at_thunk.mnemonic + ' ' + insn_at_thunk.op_str + ' <dynamic-thunk?>'
|
||||||
|
if 'jmp' in insn_at_thunk.mnemonic and thunk & 0x20:
|
||||||
|
ksft.print_msg(f"\tPASSED: Found {operand} at safe address")
|
||||||
|
passed = True
|
||||||
|
if not passed:
|
||||||
|
if kcore_insn.operands[0].type == capstone.CS_OP_IMM:
|
||||||
|
operand += ' <' + prog.symbol(int(operand, 16)) + '>'
|
||||||
|
if '__x86_indirect_its_thunk_' in operand:
|
||||||
|
ksft.print_msg(f"\tPASSED: Found {operand}")
|
||||||
|
else:
|
||||||
|
ksft.print_msg(f"\tPASSED: Found direct branch: {kcore_insn}, ITS thunk not required.")
|
||||||
|
passed = True
|
||||||
|
else:
|
||||||
|
unknown = True
|
||||||
|
if passed:
|
||||||
|
tests_passed += 1
|
||||||
|
elif unknown:
|
||||||
|
ksft.print_msg(f"UNKNOWN: unexpected operand: {kcore_insn}")
|
||||||
|
tests_unknown += 1
|
||||||
|
else:
|
||||||
|
ksft.print_msg(f'\t************* FAILED *************')
|
||||||
|
ksft.print_msg(f"\tFound {kcore_insn.bytes} {kcore_insn.mnemonic} {operand}")
|
||||||
|
ksft.print_msg(f'\t**********************************')
|
||||||
|
tests_failed += 1
|
||||||
|
except Exception as e:
|
||||||
|
ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
|
||||||
|
tests_unknown += 1
|
||||||
|
|
||||||
|
ksft.print_msg(f"\n\nSummary:")
|
||||||
|
ksft.print_msg(f"PASS: \t{tests_passed} \t/ {total_retpoline_tests}")
|
||||||
|
ksft.print_msg(f"FAIL: \t{tests_failed} \t/ {total_retpoline_tests}")
|
||||||
|
ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_retpoline_tests}")
|
||||||
|
|
||||||
|
if tests_failed == 0:
|
||||||
|
ksft.test_result_pass("All ITS return thunk sites passed")
|
||||||
|
else:
|
||||||
|
ksft.test_result_fail(f"{tests_failed} ITS return thunk sites failed")
|
||||||
|
ksft.finished()
|
109
tools/testing/selftests/x86/bugs/its_permutations.py
Executable file
109
tools/testing/selftests/x86/bugs/its_permutations.py
Executable file
@ -0,0 +1,109 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# Test for indirect target selection (ITS) cmdline permutations with other bugs
|
||||||
|
# like spectre_v2 and retbleed.
|
||||||
|
|
||||||
|
import os, sys, subprocess, itertools, re, shutil
|
||||||
|
|
||||||
|
test_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
sys.path.insert(0, test_dir + '/../../kselftest')
|
||||||
|
import ksft
|
||||||
|
import common as c
|
||||||
|
|
||||||
|
bug = "indirect_target_selection"
|
||||||
|
mitigation = c.get_sysfs(bug)
|
||||||
|
|
||||||
|
if not mitigation or "Not affected" in mitigation:
|
||||||
|
ksft.test_result_skip("Skipping its_permutations.py: not applicable")
|
||||||
|
ksft.finished()
|
||||||
|
|
||||||
|
if shutil.which('vng') is None:
|
||||||
|
ksft.test_result_skip("Skipping its_permutations.py: virtme-ng ('vng') not found in PATH.")
|
||||||
|
ksft.finished()
|
||||||
|
|
||||||
|
TEST = f"{test_dir}/its_sysfs.py"
|
||||||
|
default_kparam = ['clearcpuid=hypervisor', 'panic=5', 'panic_on_warn=1', 'oops=panic', 'nmi_watchdog=1', 'hung_task_panic=1']
|
||||||
|
|
||||||
|
DEBUG = " -v "
|
||||||
|
|
||||||
|
# Install dependencies
|
||||||
|
# https://github.com/arighi/virtme-ng
|
||||||
|
# apt install virtme-ng
|
||||||
|
BOOT_CMD = f"vng --run {test_dir}/../../../../../arch/x86/boot/bzImage "
|
||||||
|
#BOOT_CMD += DEBUG
|
||||||
|
|
||||||
|
bug = "indirect_target_selection"
|
||||||
|
|
||||||
|
input_options = {
|
||||||
|
'indirect_target_selection' : ['off', 'on', 'stuff', 'vmexit'],
|
||||||
|
'retbleed' : ['off', 'stuff', 'auto'],
|
||||||
|
'spectre_v2' : ['off', 'on', 'eibrs', 'retpoline', 'ibrs', 'eibrs,retpoline'],
|
||||||
|
}
|
||||||
|
|
||||||
|
def pretty_print(output):
|
||||||
|
OKBLUE = '\033[94m'
|
||||||
|
OKGREEN = '\033[92m'
|
||||||
|
WARNING = '\033[93m'
|
||||||
|
FAIL = '\033[91m'
|
||||||
|
ENDC = '\033[0m'
|
||||||
|
BOLD = '\033[1m'
|
||||||
|
|
||||||
|
# Define patterns and their corresponding colors
|
||||||
|
patterns = {
|
||||||
|
r"^ok \d+": OKGREEN,
|
||||||
|
r"^not ok \d+": FAIL,
|
||||||
|
r"^# Testing .*": OKBLUE,
|
||||||
|
r"^# Found: .*": WARNING,
|
||||||
|
r"^# Totals: .*": BOLD,
|
||||||
|
r"pass:([1-9]\d*)": OKGREEN,
|
||||||
|
r"fail:([1-9]\d*)": FAIL,
|
||||||
|
r"skip:([1-9]\d*)": WARNING,
|
||||||
|
}
|
||||||
|
|
||||||
|
# Apply colors based on patterns
|
||||||
|
for pattern, color in patterns.items():
|
||||||
|
output = re.sub(pattern, lambda match: f"{color}{match.group(0)}{ENDC}", output, flags=re.MULTILINE)
|
||||||
|
|
||||||
|
print(output)
|
||||||
|
|
||||||
|
combinations = list(itertools.product(*input_options.values()))
|
||||||
|
ksft.print_header()
|
||||||
|
ksft.set_plan(len(combinations))
|
||||||
|
|
||||||
|
logs = ""
|
||||||
|
|
||||||
|
for combination in combinations:
|
||||||
|
append = ""
|
||||||
|
log = ""
|
||||||
|
for p in default_kparam:
|
||||||
|
append += f' --append={p}'
|
||||||
|
command = BOOT_CMD + append
|
||||||
|
test_params = ""
|
||||||
|
for i, key in enumerate(input_options.keys()):
|
||||||
|
param = f'{key}={combination[i]}'
|
||||||
|
test_params += f' {param}'
|
||||||
|
command += f" --append={param}"
|
||||||
|
command += f" -- {TEST}"
|
||||||
|
test_name = f"{bug} {test_params}"
|
||||||
|
pretty_print(f'# Testing {test_name}')
|
||||||
|
t = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
|
||||||
|
t.wait()
|
||||||
|
output, _ = t.communicate()
|
||||||
|
if t.returncode == 0:
|
||||||
|
ksft.test_result_pass(test_name)
|
||||||
|
else:
|
||||||
|
ksft.test_result_fail(test_name)
|
||||||
|
output = output.decode()
|
||||||
|
log += f" {output}"
|
||||||
|
pretty_print(log)
|
||||||
|
logs += output + "\n"
|
||||||
|
|
||||||
|
# Optionally use tappy to parse the output
|
||||||
|
# apt install python3-tappy
|
||||||
|
with open("logs.txt", "w") as f:
|
||||||
|
f.write(logs)
|
||||||
|
|
||||||
|
ksft.finished()
|
139
tools/testing/selftests/x86/bugs/its_ret_alignment.py
Executable file
139
tools/testing/selftests/x86/bugs/its_ret_alignment.py
Executable file
@ -0,0 +1,139 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# Test for indirect target selection (ITS) mitigation.
|
||||||
|
#
|
||||||
|
# Tests if the RETs are correctly patched by evaluating the
|
||||||
|
# vmlinux .return_sites in /proc/kcore.
|
||||||
|
#
|
||||||
|
# Install dependencies
|
||||||
|
# add-apt-repository ppa:michel-slm/kernel-utils
|
||||||
|
# apt update
|
||||||
|
# apt install -y python3-drgn python3-pyelftools python3-capstone
|
||||||
|
#
|
||||||
|
# Run on target machine
|
||||||
|
# mkdir -p /usr/lib/debug/lib/modules/$(uname -r)
|
||||||
|
# cp $VMLINUX /usr/lib/debug/lib/modules/$(uname -r)/vmlinux
|
||||||
|
#
|
||||||
|
# Usage: ./its_ret_alignment.py
|
||||||
|
|
||||||
|
import os, sys, argparse
|
||||||
|
from pathlib import Path
|
||||||
|
|
||||||
|
this_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
sys.path.insert(0, this_dir + '/../../kselftest')
|
||||||
|
import ksft
|
||||||
|
import common as c
|
||||||
|
|
||||||
|
bug = "indirect_target_selection"
|
||||||
|
mitigation = c.get_sysfs(bug)
|
||||||
|
if not mitigation or "Aligned branch/return thunks" not in mitigation:
|
||||||
|
ksft.test_result_skip("Skipping its_ret_alignment.py: Aligned branch/return thunks not enabled")
|
||||||
|
ksft.finished()
|
||||||
|
|
||||||
|
c.check_dependencies_or_skip(['drgn', 'elftools', 'capstone'], script_name="its_ret_alignment.py")
|
||||||
|
|
||||||
|
from elftools.elf.elffile import ELFFile
|
||||||
|
from drgn.helpers.common.memory import identify_address
|
||||||
|
|
||||||
|
cap = c.init_capstone()
|
||||||
|
|
||||||
|
if len(os.sys.argv) > 1:
|
||||||
|
arg_vmlinux = os.sys.argv[1]
|
||||||
|
if not os.path.exists(arg_vmlinux):
|
||||||
|
ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at user-supplied path: {arg_vmlinux}")
|
||||||
|
ksft.exit_fail()
|
||||||
|
os.makedirs(f"/usr/lib/debug/lib/modules/{os.uname().release}", exist_ok=True)
|
||||||
|
os.system(f'cp {arg_vmlinux} /usr/lib/debug/lib/modules/$(uname -r)/vmlinux')
|
||||||
|
|
||||||
|
vmlinux = f"/usr/lib/debug/lib/modules/{os.uname().release}/vmlinux"
|
||||||
|
if not os.path.exists(vmlinux):
|
||||||
|
ksft.test_result_fail(f"its_ret_alignment.py: vmlinux not found at {vmlinux}")
|
||||||
|
ksft.exit_fail()
|
||||||
|
|
||||||
|
ksft.print_msg(f"Using vmlinux: {vmlinux}")
|
||||||
|
|
||||||
|
rethunks_start_vmlinux, rethunks_sec_offset, size = c.get_section_info(vmlinux, '.return_sites')
|
||||||
|
ksft.print_msg(f"vmlinux: Section .return_sites (0x{rethunks_start_vmlinux:x}) found at 0x{rethunks_sec_offset:x} with size 0x{size:x}")
|
||||||
|
|
||||||
|
sites_offset = c.get_patch_sites(vmlinux, rethunks_sec_offset, size)
|
||||||
|
total_rethunk_tests = len(sites_offset)
|
||||||
|
ksft.print_msg(f"Found {total_rethunk_tests} rethunk sites")
|
||||||
|
|
||||||
|
prog = c.get_runtime_kernel()
|
||||||
|
rethunks_start_kcore = prog.symbol('__return_sites').address
|
||||||
|
ksft.print_msg(f'kcore: __rethunk_sites: 0x{rethunks_start_kcore:x}')
|
||||||
|
|
||||||
|
its_return_thunk = prog.symbol('its_return_thunk').address
|
||||||
|
ksft.print_msg(f'kcore: its_return_thunk: 0x{its_return_thunk:x}')
|
||||||
|
|
||||||
|
tests_passed = 0
|
||||||
|
tests_failed = 0
|
||||||
|
tests_unknown = 0
|
||||||
|
tests_skipped = 0
|
||||||
|
|
||||||
|
with open(vmlinux, 'rb') as f:
|
||||||
|
elffile = ELFFile(f)
|
||||||
|
text_section = elffile.get_section_by_name('.text')
|
||||||
|
|
||||||
|
for i in range(len(sites_offset)):
|
||||||
|
site = rethunks_start_kcore + sites_offset[i]
|
||||||
|
vmlinux_site = rethunks_start_vmlinux + sites_offset[i]
|
||||||
|
try:
|
||||||
|
passed = unknown = failed = skipped = False
|
||||||
|
|
||||||
|
symbol = identify_address(prog, site)
|
||||||
|
vmlinux_insn = c.get_instruction_from_vmlinux(elffile, text_section, text_section['sh_addr'], vmlinux_site)
|
||||||
|
kcore_insn = list(cap.disasm(prog.read(site, 16), site))[0]
|
||||||
|
|
||||||
|
insn_end = site + kcore_insn.size - 1
|
||||||
|
|
||||||
|
safe_site = insn_end & 0x20
|
||||||
|
site_status = "" if safe_site else "(unsafe)"
|
||||||
|
|
||||||
|
ksft.print_msg(f"\nSite {i}: {symbol} <0x{site:x}> {site_status}")
|
||||||
|
ksft.print_msg(f"\tvmlinux: 0x{vmlinux_insn.address:x}:\t{vmlinux_insn.mnemonic}\t{vmlinux_insn.op_str}")
|
||||||
|
ksft.print_msg(f"\tkcore: 0x{kcore_insn.address:x}:\t{kcore_insn.mnemonic}\t{kcore_insn.op_str}")
|
||||||
|
|
||||||
|
if safe_site:
|
||||||
|
tests_passed += 1
|
||||||
|
passed = True
|
||||||
|
ksft.print_msg(f"\tPASSED: At safe address")
|
||||||
|
continue
|
||||||
|
|
||||||
|
if "jmp" in kcore_insn.mnemonic:
|
||||||
|
passed = True
|
||||||
|
elif "ret" not in kcore_insn.mnemonic:
|
||||||
|
skipped = True
|
||||||
|
|
||||||
|
if passed:
|
||||||
|
ksft.print_msg(f"\tPASSED: Found {kcore_insn.mnemonic} {kcore_insn.op_str}")
|
||||||
|
tests_passed += 1
|
||||||
|
elif skipped:
|
||||||
|
ksft.print_msg(f"\tSKIPPED: Found '{kcore_insn.mnemonic}'")
|
||||||
|
tests_skipped += 1
|
||||||
|
elif unknown:
|
||||||
|
ksft.print_msg(f"UNKNOWN: An unknown instruction: {kcore_insn}")
|
||||||
|
tests_unknown += 1
|
||||||
|
else:
|
||||||
|
ksft.print_msg(f'\t************* FAILED *************')
|
||||||
|
ksft.print_msg(f"\tFound {kcore_insn.mnemonic} {kcore_insn.op_str}")
|
||||||
|
ksft.print_msg(f'\t**********************************')
|
||||||
|
tests_failed += 1
|
||||||
|
except Exception as e:
|
||||||
|
ksft.print_msg(f"UNKNOWN: An unexpected error occurred: {e}")
|
||||||
|
tests_unknown += 1
|
||||||
|
|
||||||
|
ksft.print_msg(f"\n\nSummary:")
|
||||||
|
ksft.print_msg(f"PASSED: \t{tests_passed} \t/ {total_rethunk_tests}")
|
||||||
|
ksft.print_msg(f"FAILED: \t{tests_failed} \t/ {total_rethunk_tests}")
|
||||||
|
ksft.print_msg(f"SKIPPED: \t{tests_skipped} \t/ {total_rethunk_tests}")
|
||||||
|
ksft.print_msg(f"UNKNOWN: \t{tests_unknown} \t/ {total_rethunk_tests}")
|
||||||
|
|
||||||
|
if tests_failed == 0:
|
||||||
|
ksft.test_result_pass("All ITS return thunk sites passed.")
|
||||||
|
else:
|
||||||
|
ksft.test_result_fail(f"{tests_failed} failed sites need ITS return thunks.")
|
||||||
|
ksft.finished()
|
65
tools/testing/selftests/x86/bugs/its_sysfs.py
Executable file
65
tools/testing/selftests/x86/bugs/its_sysfs.py
Executable file
@ -0,0 +1,65 @@
|
|||||||
|
#! /usr/bin/python3
|
||||||
|
# SPDX-License-Identifier: GPL-2.0
|
||||||
|
#
|
||||||
|
# Copyright (c) 2025 Intel Corporation
|
||||||
|
#
|
||||||
|
# Test for Indirect Target Selection(ITS) mitigation sysfs status.
|
||||||
|
|
||||||
|
import sys, os, re
|
||||||
|
this_dir = os.path.dirname(os.path.realpath(__file__))
|
||||||
|
sys.path.insert(0, this_dir + '/../../kselftest')
|
||||||
|
import ksft
|
||||||
|
|
||||||
|
from common import *
|
||||||
|
|
||||||
|
bug = "indirect_target_selection"
|
||||||
|
mitigation = get_sysfs(bug)
|
||||||
|
|
||||||
|
ITS_MITIGATION_ALIGNED_THUNKS = "Mitigation: Aligned branch/return thunks"
|
||||||
|
ITS_MITIGATION_RETPOLINE_STUFF = "Mitigation: Retpolines, Stuffing RSB"
|
||||||
|
ITS_MITIGATION_VMEXIT_ONLY = "Mitigation: Vulnerable, KVM: Not affected"
|
||||||
|
ITS_MITIGATION_VULNERABLE = "Vulnerable"
|
||||||
|
|
||||||
|
def check_mitigation():
|
||||||
|
if mitigation == ITS_MITIGATION_ALIGNED_THUNKS:
|
||||||
|
if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
|
||||||
|
bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_RETPOLINE_STUFF)
|
||||||
|
return
|
||||||
|
if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
|
||||||
|
bug_check_fail(bug, ITS_MITIGATION_ALIGNED_THUNKS, ITS_MITIGATION_VMEXIT_ONLY)
|
||||||
|
return
|
||||||
|
bug_check_pass(bug, ITS_MITIGATION_ALIGNED_THUNKS)
|
||||||
|
return
|
||||||
|
|
||||||
|
if mitigation == ITS_MITIGATION_RETPOLINE_STUFF:
|
||||||
|
if cmdline_has(f'{bug}=stuff') and sysfs_has("spectre_v2", "Retpolines"):
|
||||||
|
bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
|
||||||
|
return
|
||||||
|
if sysfs_has('retbleed', 'Stuffing'):
|
||||||
|
bug_check_pass(bug, ITS_MITIGATION_RETPOLINE_STUFF)
|
||||||
|
return
|
||||||
|
bug_check_fail(bug, ITS_MITIGATION_RETPOLINE_STUFF, ITS_MITIGATION_ALIGNED_THUNKS)
|
||||||
|
|
||||||
|
if mitigation == ITS_MITIGATION_VMEXIT_ONLY:
|
||||||
|
if cmdline_has(f'{bug}=vmexit') and cpuinfo_has('its_native_only'):
|
||||||
|
bug_check_pass(bug, ITS_MITIGATION_VMEXIT_ONLY)
|
||||||
|
return
|
||||||
|
bug_check_fail(bug, ITS_MITIGATION_VMEXIT_ONLY, ITS_MITIGATION_ALIGNED_THUNKS)
|
||||||
|
|
||||||
|
if mitigation == ITS_MITIGATION_VULNERABLE:
|
||||||
|
if sysfs_has("spectre_v2", "Vulnerable"):
|
||||||
|
bug_check_pass(bug, ITS_MITIGATION_VULNERABLE)
|
||||||
|
else:
|
||||||
|
bug_check_fail(bug, "Mitigation", ITS_MITIGATION_VULNERABLE)
|
||||||
|
|
||||||
|
bug_status_unknown(bug, mitigation)
|
||||||
|
return
|
||||||
|
|
||||||
|
ksft.print_header()
|
||||||
|
ksft.set_plan(1)
|
||||||
|
ksft.print_msg(f'{bug}: {mitigation} ...')
|
||||||
|
|
||||||
|
if not basic_checks_sufficient(bug, mitigation):
|
||||||
|
check_mitigation()
|
||||||
|
|
||||||
|
ksft.finished()
|
Loading…
Reference in New Issue
Block a user