Import of kernel-6.12.0-55.43.1.el10_0
This commit is contained in:
parent
7b5eeb45cb
commit
9c95bdb733
@ -511,6 +511,7 @@ Description: information about CPUs heterogeneity.
|
||||
|
||||
What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/gather_data_sampling
|
||||
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||
/sys/devices/system/cpu/vulnerabilities/itlb_multihit
|
||||
/sys/devices/system/cpu/vulnerabilities/l1tf
|
||||
/sys/devices/system/cpu/vulnerabilities/mds
|
||||
|
||||
@ -22,3 +22,5 @@ are configurable at compile, boot or run time.
|
||||
srso
|
||||
gather_data_sampling
|
||||
reg-file-data-sampling
|
||||
rsb
|
||||
indirect-target-selection
|
||||
|
||||
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
168
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
Normal file
@ -0,0 +1,168 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
Indirect Target Selection (ITS)
|
||||
===============================
|
||||
|
||||
ITS is a vulnerability in some Intel CPUs that support Enhanced IBRS and were
|
||||
released before Alder Lake. ITS may allow an attacker to control the prediction
|
||||
of indirect branches and RETs located in the lower half of a cacheline.
|
||||
|
||||
ITS is assigned CVE-2024-28956 with a CVSS score of 4.7 (Medium).
|
||||
|
||||
Scope of Impact
|
||||
---------------
|
||||
- **eIBRS Guest/Host Isolation**: Indirect branches in KVM/kernel may still be
|
||||
predicted with unintended target corresponding to a branch in the guest.
|
||||
|
||||
- **Intra-Mode BTI**: In-kernel training such as through cBPF or other native
|
||||
gadgets.
|
||||
|
||||
- **Indirect Branch Prediction Barrier (IBPB)**: After an IBPB, indirect
|
||||
branches may still be predicted with targets corresponding to direct branches
|
||||
executed prior to the IBPB. This is fixed by the IPU 2025.1 microcode, which
|
||||
should be available via distro updates. Alternatively microcode can be
|
||||
obtained from Intel's github repository [#f1]_.
|
||||
|
||||
Affected CPUs
|
||||
-------------
|
||||
Below is the list of ITS affected CPUs [#f2]_ [#f3]_:
|
||||
|
||||
======================== ============ ==================== ===============
|
||||
Common name Family_Model eIBRS Intra-mode BTI
|
||||
Guest/Host Isolation
|
||||
======================== ============ ==================== ===============
|
||||
SKYLAKE_X (step >= 6) 06_55H Affected Affected
|
||||
ICELAKE_X 06_6AH Not affected Affected
|
||||
ICELAKE_D 06_6CH Not affected Affected
|
||||
ICELAKE_L 06_7EH Not affected Affected
|
||||
TIGERLAKE_L 06_8CH Not affected Affected
|
||||
TIGERLAKE 06_8DH Not affected Affected
|
||||
KABYLAKE_L (step >= 12) 06_8EH Affected Affected
|
||||
KABYLAKE (step >= 13) 06_9EH Affected Affected
|
||||
COMETLAKE 06_A5H Affected Affected
|
||||
COMETLAKE_L 06_A6H Affected Affected
|
||||
ROCKETLAKE 06_A7H Not affected Affected
|
||||
======================== ============ ==================== ===============
|
||||
|
||||
- All affected CPUs enumerate Enhanced IBRS feature.
|
||||
- IBPB isolation is affected on all ITS affected CPUs, and need a microcode
|
||||
update for mitigation.
|
||||
- None of the affected CPUs enumerate BHI_CTRL which was introduced in Golden
|
||||
Cove (Alder Lake and Sapphire Rapids). This can help guests to determine the
|
||||
host's affected status.
|
||||
- Intel Atom CPUs are not affected by ITS.
|
||||
|
||||
Mitigation
|
||||
----------
|
||||
As only the indirect branches and RETs that have their last byte of instruction
|
||||
in the lower half of the cacheline are vulnerable to ITS, the basic idea behind
|
||||
the mitigation is to not allow indirect branches in the lower half.
|
||||
|
||||
This is achieved by relying on existing retpoline support in the kernel, and in
|
||||
compilers. ITS-vulnerable retpoline sites are runtime patched to point to newly
|
||||
added ITS-safe thunks. These safe thunks consists of indirect branch in the
|
||||
second half of the cacheline. Not all retpoline sites are patched to thunks, if
|
||||
a retpoline site is evaluated to be ITS-safe, it is replaced with an inline
|
||||
indirect branch.
|
||||
|
||||
Dynamic thunks
|
||||
~~~~~~~~~~~~~~
|
||||
From a dynamically allocated pool of safe-thunks, each vulnerable site is
|
||||
replaced with a new thunk, such that they get a unique address. This could
|
||||
improve the branch prediction accuracy. Also, it is a defense-in-depth measure
|
||||
against aliasing.
|
||||
|
||||
Note, for simplicity, indirect branches in eBPF programs are always replaced
|
||||
with a jump to a static thunk in __x86_indirect_its_thunk_array. If required,
|
||||
in future this can be changed to use dynamic thunks.
|
||||
|
||||
All vulnerable RETs are replaced with a static thunk, they do not use dynamic
|
||||
thunks. This is because RETs get their prediction from RSB mostly that does not
|
||||
depend on source address. RETs that underflow RSB may benefit from dynamic
|
||||
thunks. But, RETs significantly outnumber indirect branches, and any benefit
|
||||
from a unique source address could be outweighed by the increased icache
|
||||
footprint and iTLB pressure.
|
||||
|
||||
Retpoline
|
||||
~~~~~~~~~
|
||||
Retpoline sequence also mitigates ITS-unsafe indirect branches. For this
|
||||
reason, when retpoline is enabled, ITS mitigation only relocates the RETs to
|
||||
safe thunks. Unless user requested the RSB-stuffing mitigation.
|
||||
|
||||
RSB Stuffing
|
||||
~~~~~~~~~~~~
|
||||
RSB-stuffing via Call Depth Tracking is a mitigation for Retbleed RSB-underflow
|
||||
attacks. And it also mitigates RETs that are vulnerable to ITS.
|
||||
|
||||
Mitigation in guests
|
||||
^^^^^^^^^^^^^^^^^^^^
|
||||
All guests deploy ITS mitigation by default, irrespective of eIBRS enumeration
|
||||
and Family/Model of the guest. This is because eIBRS feature could be hidden
|
||||
from a guest. One exception to this is when a guest enumerates BHI_DIS_S, which
|
||||
indicates that the guest is running on an unaffected host.
|
||||
|
||||
To prevent guests from unnecessarily deploying the mitigation on unaffected
|
||||
platforms, Intel has defined ITS_NO bit(62) in MSR IA32_ARCH_CAPABILITIES. When
|
||||
a guest sees this bit set, it should not enumerate the ITS bug. Note, this bit
|
||||
is not set by any hardware, but is **intended for VMMs to synthesize** it for
|
||||
guests as per the host's affected status.
|
||||
|
||||
Mitigation options
|
||||
^^^^^^^^^^^^^^^^^^
|
||||
The ITS mitigation can be controlled using the "indirect_target_selection"
|
||||
kernel parameter. The available options are:
|
||||
|
||||
======== ===================================================================
|
||||
on (default) Deploy the "Aligned branch/return thunks" mitigation.
|
||||
If spectre_v2 mitigation enables retpoline, aligned-thunks are only
|
||||
deployed for the affected RET instructions. Retpoline mitigates
|
||||
indirect branches.
|
||||
|
||||
off Disable ITS mitigation.
|
||||
|
||||
vmexit Equivalent to "=on" if the CPU is affected by guest/host isolation
|
||||
part of ITS. Otherwise, mitigation is not deployed. This option is
|
||||
useful when host userspace is not in the threat model, and only
|
||||
attacks from guest to host are considered.
|
||||
|
||||
stuff Deploy RSB-fill mitigation when retpoline is also deployed.
|
||||
Otherwise, deploy the default mitigation. When retpoline mitigation
|
||||
is enabled, RSB-stuffing via Call-Depth-Tracking also mitigates
|
||||
ITS.
|
||||
|
||||
force Force the ITS bug and deploy the default mitigation.
|
||||
======== ===================================================================
|
||||
|
||||
Sysfs reporting
|
||||
---------------
|
||||
|
||||
The sysfs file showing ITS mitigation status is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/indirect_target_selection
|
||||
|
||||
Note, microcode mitigation status is not reported in this file.
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
.. list-table::
|
||||
|
||||
* - Not affected
|
||||
- The processor is not vulnerable.
|
||||
* - Vulnerable
|
||||
- System is vulnerable and no mitigation has been applied.
|
||||
* - Vulnerable, KVM: Not affected
|
||||
- System is vulnerable to intra-mode BTI, but not affected by eIBRS
|
||||
guest/host isolation.
|
||||
* - Mitigation: Aligned branch/return thunks
|
||||
- The mitigation is enabled, affected indirect branches and RETs are
|
||||
relocated to safe thunks.
|
||||
* - Mitigation: Retpolines, Stuffing RSB
|
||||
- The mitigation is enabled using retpoline and RSB stuffing.
|
||||
|
||||
References
|
||||
----------
|
||||
.. [#f1] Microcode repository - https://github.com/intel/Intel-Linux-Processor-Microcode-Data-Files
|
||||
|
||||
.. [#f2] Affected Processors list - https://www.intel.com/content/www/us/en/developer/topic-technology/software-security-guidance/processors-affected-consolidated-product-cpu-model.html
|
||||
|
||||
.. [#f3] Affected Processors list (machine readable) - https://github.com/intel/Intel-affected-processor-list
|
||||
@ -29,14 +29,6 @@ Below is the list of affected Intel processors [#f1]_:
|
||||
RAPTORLAKE_S 06_BFH
|
||||
=================== ============
|
||||
|
||||
As an exception to this table, Intel Xeon E family parts ALDERLAKE(06_97H) and
|
||||
RAPTORLAKE(06_B7H) codenamed Catlow are not affected. They are reported as
|
||||
vulnerable in Linux because they share the same family/model with an affected
|
||||
part. Unlike their affected counterparts, they do not enumerate RFDS_CLEAR or
|
||||
CPUID.HYBRID. This information could be used to distinguish between the
|
||||
affected and unaffected parts, but it is deemed not worth adding complexity as
|
||||
the reporting is fixed automatically when these parts enumerate RFDS_NO.
|
||||
|
||||
Mitigation
|
||||
==========
|
||||
Intel released a microcode update that enables software to clear sensitive
|
||||
|
||||
268
Documentation/admin-guide/hw-vuln/rsb.rst
Normal file
268
Documentation/admin-guide/hw-vuln/rsb.rst
Normal file
@ -0,0 +1,268 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
=======================
|
||||
RSB-related mitigations
|
||||
=======================
|
||||
|
||||
.. warning::
|
||||
Please keep this document up-to-date, otherwise you will be
|
||||
volunteered to update it and convert it to a very long comment in
|
||||
bugs.c!
|
||||
|
||||
Since 2018 there have been many Spectre CVEs related to the Return Stack
|
||||
Buffer (RSB) (sometimes referred to as the Return Address Stack (RAS) or
|
||||
Return Address Predictor (RAP) on AMD).
|
||||
|
||||
Information about these CVEs and how to mitigate them is scattered
|
||||
amongst a myriad of microarchitecture-specific documents.
|
||||
|
||||
This document attempts to consolidate all the relevant information in
|
||||
once place and clarify the reasoning behind the current RSB-related
|
||||
mitigations. It's meant to be as concise as possible, focused only on
|
||||
the current kernel mitigations: what are the RSB-related attack vectors
|
||||
and how are they currently being mitigated?
|
||||
|
||||
It's *not* meant to describe how the RSB mechanism operates or how the
|
||||
exploits work. More details about those can be found in the references
|
||||
below.
|
||||
|
||||
Rather, this is basically a glorified comment, but too long to actually
|
||||
be one. So when the next CVE comes along, a kernel developer can
|
||||
quickly refer to this as a refresher to see what we're actually doing
|
||||
and why.
|
||||
|
||||
At a high level, there are two classes of RSB attacks: RSB poisoning
|
||||
(Intel and AMD) and RSB underflow (Intel only). They must each be
|
||||
considered individually for each attack vector (and microarchitecture
|
||||
where applicable).
|
||||
|
||||
----
|
||||
|
||||
RSB poisoning (Intel and AMD)
|
||||
=============================
|
||||
|
||||
SpectreRSB
|
||||
~~~~~~~~~~
|
||||
|
||||
RSB poisoning is a technique used by SpectreRSB [#spectre-rsb]_ where
|
||||
an attacker poisons an RSB entry to cause a victim's return instruction
|
||||
to speculate to an attacker-controlled address. This can happen when
|
||||
there are unbalanced CALLs/RETs after a context switch or VMEXIT.
|
||||
|
||||
* All attack vectors can potentially be mitigated by flushing out any
|
||||
poisoned RSB entries using an RSB filling sequence
|
||||
[#intel-rsb-filling]_ [#amd-rsb-filling]_ when transitioning between
|
||||
untrusted and trusted domains. But this has a performance impact and
|
||||
should be avoided whenever possible.
|
||||
|
||||
.. DANGER::
|
||||
**FIXME**: Currently we're flushing 32 entries. However, some CPU
|
||||
models have more than 32 entries. The loop count needs to be
|
||||
increased for those. More detailed information is needed about RSB
|
||||
sizes.
|
||||
|
||||
* On context switch, the user->user mitigation requires ensuring the
|
||||
RSB gets filled or cleared whenever IBPB gets written [#cond-ibpb]_
|
||||
during a context switch:
|
||||
|
||||
* AMD:
|
||||
On Zen 4+, IBPB (or SBPB [#amd-sbpb]_ if used) clears the RSB.
|
||||
This is indicated by IBPB_RET in CPUID [#amd-ibpb-rsb]_.
|
||||
|
||||
On Zen < 4, the RSB filling sequence [#amd-rsb-filling]_ must be
|
||||
always be done in addition to IBPB [#amd-ibpb-no-rsb]_. This is
|
||||
indicated by X86_BUG_IBPB_NO_RET.
|
||||
|
||||
* Intel:
|
||||
IBPB always clears the RSB:
|
||||
|
||||
"Software that executed before the IBPB command cannot control
|
||||
the predicted targets of indirect branches executed after the
|
||||
command on the same logical processor. The term indirect branch
|
||||
in this context includes near return instructions, so these
|
||||
predicted targets may come from the RSB." [#intel-ibpb-rsb]_
|
||||
|
||||
* On context switch, user->kernel attacks are prevented by SMEP. User
|
||||
space can only insert user space addresses into the RSB. Even
|
||||
non-canonical addresses can't be inserted due to the page gap at the
|
||||
end of the user canonical address space reserved by TASK_SIZE_MAX.
|
||||
A SMEP #PF at instruction fetch prevents the kernel from speculatively
|
||||
executing user space.
|
||||
|
||||
* AMD:
|
||||
"Finally, branches that are predicted as 'ret' instructions get
|
||||
their predicted targets from the Return Address Predictor (RAP).
|
||||
AMD recommends software use a RAP stuffing sequence (mitigation
|
||||
V2-3 in [2]) and/or Supervisor Mode Execution Protection (SMEP)
|
||||
to ensure that the addresses in the RAP are safe for
|
||||
speculation. Collectively, we refer to these mitigations as "RAP
|
||||
Protection"." [#amd-smep-rsb]_
|
||||
|
||||
* Intel:
|
||||
"On processors with enhanced IBRS, an RSB overwrite sequence may
|
||||
not suffice to prevent the predicted target of a near return
|
||||
from using an RSB entry created in a less privileged predictor
|
||||
mode. Software can prevent this by enabling SMEP (for
|
||||
transitions from user mode to supervisor mode) and by having
|
||||
IA32_SPEC_CTRL.IBRS set during VM exits." [#intel-smep-rsb]_
|
||||
|
||||
* On VMEXIT, guest->host attacks are mitigated by eIBRS (and PBRSB
|
||||
mitigation if needed):
|
||||
|
||||
* AMD:
|
||||
"When Automatic IBRS is enabled, the internal return address
|
||||
stack used for return address predictions is cleared on VMEXIT."
|
||||
[#amd-eibrs-vmexit]_
|
||||
|
||||
* Intel:
|
||||
"On processors with enhanced IBRS, an RSB overwrite sequence may
|
||||
not suffice to prevent the predicted target of a near return
|
||||
from using an RSB entry created in a less privileged predictor
|
||||
mode. Software can prevent this by enabling SMEP (for
|
||||
transitions from user mode to supervisor mode) and by having
|
||||
IA32_SPEC_CTRL.IBRS set during VM exits. Processors with
|
||||
enhanced IBRS still support the usage model where IBRS is set
|
||||
only in the OS/VMM for OSes that enable SMEP. To do this, such
|
||||
processors will ensure that guest behavior cannot control the
|
||||
RSB after a VM exit once IBRS is set, even if IBRS was not set
|
||||
at the time of the VM exit." [#intel-eibrs-vmexit]_
|
||||
|
||||
Note that some Intel CPUs are susceptible to Post-barrier Return
|
||||
Stack Buffer Predictions (PBRSB) [#intel-pbrsb]_, where the last
|
||||
CALL from the guest can be used to predict the first unbalanced RET.
|
||||
In this case the PBRSB mitigation is needed in addition to eIBRS.
|
||||
|
||||
AMD RETBleed / SRSO / Branch Type Confusion
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
On AMD, poisoned RSB entries can also be created by the AMD RETBleed
|
||||
variant [#retbleed-paper]_ [#amd-btc]_ or by Speculative Return Stack
|
||||
Overflow [#amd-srso]_ (Inception [#inception-paper]_). The kernel
|
||||
protects itself by replacing every RET in the kernel with a branch to a
|
||||
single safe RET.
|
||||
|
||||
----
|
||||
|
||||
RSB underflow (Intel only)
|
||||
==========================
|
||||
|
||||
RSB Alternate (RSBA) ("Intel Retbleed")
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some Intel Skylake-generation CPUs are susceptible to the Intel variant
|
||||
of RETBleed [#retbleed-paper]_ (Return Stack Buffer Underflow
|
||||
[#intel-rsbu]_). If a RET is executed when the RSB buffer is empty due
|
||||
to mismatched CALLs/RETs or returning from a deep call stack, the branch
|
||||
predictor can fall back to using the Branch Target Buffer (BTB). If a
|
||||
user forces a BTB collision then the RET can speculatively branch to a
|
||||
user-controlled address.
|
||||
|
||||
* Note that RSB filling doesn't fully mitigate this issue. If there
|
||||
are enough unbalanced RETs, the RSB may still underflow and fall back
|
||||
to using a poisoned BTB entry.
|
||||
|
||||
* On context switch, user->user underflow attacks are mitigated by the
|
||||
conditional IBPB [#cond-ibpb]_ on context switch which effectively
|
||||
clears the BTB:
|
||||
|
||||
* "The indirect branch predictor barrier (IBPB) is an indirect branch
|
||||
control mechanism that establishes a barrier, preventing software
|
||||
that executed before the barrier from controlling the predicted
|
||||
targets of indirect branches executed after the barrier on the same
|
||||
logical processor." [#intel-ibpb-btb]_
|
||||
|
||||
* On context switch and VMEXIT, user->kernel and guest->host RSB
|
||||
underflows are mitigated by IBRS or eIBRS:
|
||||
|
||||
* "Enabling IBRS (including enhanced IBRS) will mitigate the "RSBU"
|
||||
attack demonstrated by the researchers. As previously documented,
|
||||
Intel recommends the use of enhanced IBRS, where supported. This
|
||||
includes any processor that enumerates RRSBA but not RRSBA_DIS_S."
|
||||
[#intel-rsbu]_
|
||||
|
||||
However, note that eIBRS and IBRS do not mitigate intra-mode attacks.
|
||||
Like RRSBA below, this is mitigated by clearing the BHB on kernel
|
||||
entry.
|
||||
|
||||
As an alternative to classic IBRS, call depth tracking (combined with
|
||||
retpolines) can be used to track kernel returns and fill the RSB when
|
||||
it gets close to being empty.
|
||||
|
||||
Restricted RSB Alternate (RRSBA)
|
||||
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Some newer Intel CPUs have Restricted RSB Alternate (RRSBA) behavior,
|
||||
which, similar to RSBA described above, also falls back to using the BTB
|
||||
on RSB underflow. The only difference is that the predicted targets are
|
||||
restricted to the current domain when eIBRS is enabled:
|
||||
|
||||
* "Restricted RSB Alternate (RRSBA) behavior allows alternate branch
|
||||
predictors to be used by near RET instructions when the RSB is
|
||||
empty. When eIBRS is enabled, the predicted targets of these
|
||||
alternate predictors are restricted to those belonging to the
|
||||
indirect branch predictor entries of the current prediction domain.
|
||||
[#intel-eibrs-rrsba]_
|
||||
|
||||
When a CPU with RRSBA is vulnerable to Branch History Injection
|
||||
[#bhi-paper]_ [#intel-bhi]_, an RSB underflow could be used for an
|
||||
intra-mode BTI attack. This is mitigated by clearing the BHB on
|
||||
kernel entry.
|
||||
|
||||
However if the kernel uses retpolines instead of eIBRS, it needs to
|
||||
disable RRSBA:
|
||||
|
||||
* "Where software is using retpoline as a mitigation for BHI or
|
||||
intra-mode BTI, and the processor both enumerates RRSBA and
|
||||
enumerates RRSBA_DIS controls, it should disable this behavior."
|
||||
[#intel-retpoline-rrsba]_
|
||||
|
||||
----
|
||||
|
||||
References
|
||||
==========
|
||||
|
||||
.. [#spectre-rsb] `Spectre Returns! Speculation Attacks using the Return Stack Buffer <https://arxiv.org/pdf/1807.07940.pdf>`_
|
||||
|
||||
.. [#intel-rsb-filling] "Empty RSB Mitigation on Skylake-generation" in `Retpoline: A Branch Target Injection Mitigation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/retpoline-branch-target-injection-mitigation.html#inpage-nav-5-1>`_
|
||||
|
||||
.. [#amd-rsb-filling] "Mitigation V2-3" in `Software Techniques for Managing Speculation <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/software-techniques-for-managing-speculation.pdf>`_
|
||||
|
||||
.. [#cond-ibpb] Whether IBPB is written depends on whether the prev and/or next task is protected from Spectre attacks. It typically requires opting in per task or system-wide. For more details see the documentation for the ``spectre_v2_user`` cmdline option in Documentation/admin-guide/kernel-parameters.txt.
|
||||
|
||||
.. [#amd-sbpb] IBPB without flushing of branch type predictions. Only exists for AMD.
|
||||
|
||||
.. [#amd-ibpb-rsb] "Function 8000_0008h -- Processor Capacity Parameters and Extended Feature Identification" in `AMD64 Architecture Programmer's Manual Volume 3: General-Purpose and System Instructions <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24594.pdf>`_. SBPB behaves the same way according to `this email <https://lore.kernel.org/5175b163a3736ca5fd01cedf406735636c99a>`_.
|
||||
|
||||
.. [#amd-ibpb-no-rsb] `Spectre Attacks: Exploiting Speculative Execution <https://comsec.ethz.ch/wp-content/files/ibpb_sp25.pdf>`_
|
||||
|
||||
.. [#intel-ibpb-rsb] "Introduction" in `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
|
||||
|
||||
.. [#amd-smep-rsb] "Existing Mitigations" in `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
|
||||
|
||||
.. [#intel-smep-rsb] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
|
||||
|
||||
.. [#amd-eibrs-vmexit] "Extended Feature Enable Register (EFER)" in `AMD64 Architecture Programmer's Manual Volume 2: System Programming <https://www.amd.com/content/dam/amd/en/documents/processor-tech-docs/programmer-references/24593.pdf>`_
|
||||
|
||||
.. [#intel-eibrs-vmexit] "Enhanced IBRS" in `Indirect Branch Restricted Speculation <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-restricted-speculation.html>`_
|
||||
|
||||
.. [#intel-pbrsb] `Post-barrier Return Stack Buffer Predictions / CVE-2022-26373 / INTEL-SA-00706 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/post-barrier-return-stack-buffer-predictions.html>`_
|
||||
|
||||
.. [#retbleed-paper] `RETBleed: Arbitrary Speculative Code Execution with Return Instruction <https://comsec.ethz.ch/wp-content/files/retbleed_sec22.pdf>`_
|
||||
|
||||
.. [#amd-btc] `Technical Guidance for Mitigating Branch Type Confusion <https://www.amd.com/content/dam/amd/en/documents/resources/technical-guidance-for-mitigating-branch-type-confusion.pdf>`_
|
||||
|
||||
.. [#amd-srso] `Technical Update Regarding Speculative Return Stack Overflow <https://www.amd.com/content/dam/amd/en/documents/corporate/cr/speculative-return-stack-overflow-whitepaper.pdf>`_
|
||||
|
||||
.. [#inception-paper] `Inception: Exposing New Attack Surfaces with Training in Transient Execution <https://comsec.ethz.ch/wp-content/files/inception_sec23.pdf>`_
|
||||
|
||||
.. [#intel-rsbu] `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
|
||||
|
||||
.. [#intel-ibpb-btb] `Indirect Branch Predictor Barrier' <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/indirect-branch-predictor-barrier.html>`_
|
||||
|
||||
.. [#intel-eibrs-rrsba] "Guidance for RSBU" in `Return Stack Buffer Underflow / Return Stack Buffer Underflow / CVE-2022-29901, CVE-2022-28693 / INTEL-SA-00702 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/advisory-guidance/return-stack-buffer-underflow.html>`_
|
||||
|
||||
.. [#bhi-paper] `Branch History Injection: On the Effectiveness of Hardware Mitigations Against Cross-Privilege Spectre-v2 Attacks <http://download.vusec.net/papers/bhi-spectre-bhb_sec22.pdf>`_
|
||||
|
||||
.. [#intel-bhi] `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
|
||||
|
||||
.. [#intel-retpoline-rrsba] "Retpoline" in `Branch History Injection and Intra-mode Branch Target Injection / CVE-2022-0001, CVE-2022-0002 / INTEL-SA-00598 <https://www.intel.com/content/www/us/en/developer/articles/technical/software-security-guidance/technical-documentation/branch-history-injection.html>`_
|
||||
@ -104,7 +104,20 @@ The possible values in this file are:
|
||||
|
||||
(spec_rstack_overflow=ibpb-vmexit)
|
||||
|
||||
* 'Mitigation: Reduced Speculation':
|
||||
|
||||
This mitigation gets automatically enabled when the above one "IBPB on
|
||||
VMEXIT" has been selected and the CPU supports the BpSpecReduce bit.
|
||||
|
||||
It gets automatically enabled on machines which have the
|
||||
SRSO_USER_KERNEL_NO=1 CPUID bit. In that case, the code logic is to switch
|
||||
to the above =ibpb-vmexit mitigation because the user/kernel boundary is
|
||||
not affected anymore and thus "safe RET" is not needed.
|
||||
|
||||
After enabling the IBPB on VMEXIT mitigation option, the BpSpecReduce bit
|
||||
is detected (functionality present on all such machines) and that
|
||||
practically overrides IBPB on VMEXIT as it has a lot less performance
|
||||
impact and takes care of the guest->host attack vector too.
|
||||
|
||||
In order to exploit vulnerability, an attacker needs to:
|
||||
|
||||
|
||||
@ -2149,6 +2149,23 @@
|
||||
different crypto accelerators. This option can be used
|
||||
to achieve best performance for particular HW.
|
||||
|
||||
indirect_target_selection= [X86,Intel] Mitigation control for Indirect
|
||||
Target Selection(ITS) bug in Intel CPUs. Updated
|
||||
microcode is also required for a fix in IBPB.
|
||||
|
||||
on: Enable mitigation (default).
|
||||
off: Disable mitigation.
|
||||
force: Force the ITS bug and deploy default
|
||||
mitigation.
|
||||
vmexit: Only deploy mitigation if CPU is affected by
|
||||
guest/host isolation part of ITS.
|
||||
stuff: Deploy RSB-fill mitigation when retpoline is
|
||||
also deployed. Otherwise, deploy the default
|
||||
mitigation.
|
||||
|
||||
For details see:
|
||||
Documentation/admin-guide/hw-vuln/indirect-target-selection.rst
|
||||
|
||||
init= [KNL]
|
||||
Format: <full_path>
|
||||
Run specified binary instead of /sbin/init as init
|
||||
@ -3508,6 +3525,7 @@
|
||||
expose users to several CPU vulnerabilities.
|
||||
Equivalent to: if nokaslr then kpti=0 [ARM64]
|
||||
gather_data_sampling=off [X86]
|
||||
indirect_target_selection=off [X86]
|
||||
kvm.nx_huge_pages=off [X86]
|
||||
l1tf=off [X86]
|
||||
mds=off [X86]
|
||||
@ -6252,6 +6270,8 @@
|
||||
|
||||
Selecting 'on' will also enable the mitigation
|
||||
against user space to user space task attacks.
|
||||
Selecting specific mitigation does not force enable
|
||||
user mitigations.
|
||||
|
||||
Selecting 'off' will disable both the kernel and
|
||||
the user space protections.
|
||||
|
||||
@ -212,6 +212,17 @@ pid>/``).
|
||||
This value defaults to 0.
|
||||
|
||||
|
||||
core_sort_vma
|
||||
=============
|
||||
|
||||
The default coredump writes VMAs in address order. By setting
|
||||
``core_sort_vma`` to 1, VMAs will be written from smallest size
|
||||
to largest size. This is known to break at least elfutils, but
|
||||
can be handy when dealing with very large (and truncated)
|
||||
coredumps where the more useful debugging details are included
|
||||
in the smaller VMAs.
|
||||
|
||||
|
||||
core_uses_pid
|
||||
=============
|
||||
|
||||
|
||||
@ -151,16 +151,195 @@ the more significant 4-byte word.
|
||||
We always think of our offsets as if there were no quirk, and we translate
|
||||
them afterwards, before accessing the memory region.
|
||||
|
||||
Note on buffer lengths not multiple of 4
|
||||
----------------------------------------
|
||||
|
||||
To deal with memory layout quirks where groups of 4 bytes are laid out "little
|
||||
endian" relative to each other, but "big endian" within the group itself, the
|
||||
concept of groups of 4 bytes is intrinsic to the packing API (not to be
|
||||
confused with the memory access, which is performed byte by byte, though).
|
||||
|
||||
With buffer lengths not multiple of 4, this means one group will be incomplete.
|
||||
Depending on the quirks, this may lead to discontinuities in the bit fields
|
||||
accessible through the buffer. The packing API assumes discontinuities were not
|
||||
the intention of the memory layout, so it avoids them by effectively logically
|
||||
shortening the most significant group of 4 octets to the number of octets
|
||||
actually available.
|
||||
|
||||
Example with a 31 byte sized buffer given below. Physical buffer offsets are
|
||||
implicit, and increase from left to right within a group, and from top to
|
||||
bottom within a column.
|
||||
|
||||
No quirks:
|
||||
|
||||
::
|
||||
|
||||
31 29 28 | Group 7 (most significant)
|
||||
27 26 25 24 | Group 6
|
||||
23 22 21 20 | Group 5
|
||||
19 18 17 16 | Group 4
|
||||
15 14 13 12 | Group 3
|
||||
11 10 9 8 | Group 2
|
||||
7 6 5 4 | Group 1
|
||||
3 2 1 0 | Group 0 (least significant)
|
||||
|
||||
QUIRK_LSW32_IS_FIRST:
|
||||
|
||||
::
|
||||
|
||||
3 2 1 0 | Group 0 (least significant)
|
||||
7 6 5 4 | Group 1
|
||||
11 10 9 8 | Group 2
|
||||
15 14 13 12 | Group 3
|
||||
19 18 17 16 | Group 4
|
||||
23 22 21 20 | Group 5
|
||||
27 26 25 24 | Group 6
|
||||
30 29 28 | Group 7 (most significant)
|
||||
|
||||
QUIRK_LITTLE_ENDIAN:
|
||||
|
||||
::
|
||||
|
||||
30 28 29 | Group 7 (most significant)
|
||||
24 25 26 27 | Group 6
|
||||
20 21 22 23 | Group 5
|
||||
16 17 18 19 | Group 4
|
||||
12 13 14 15 | Group 3
|
||||
8 9 10 11 | Group 2
|
||||
4 5 6 7 | Group 1
|
||||
0 1 2 3 | Group 0 (least significant)
|
||||
|
||||
QUIRK_LITTLE_ENDIAN | QUIRK_LSW32_IS_FIRST:
|
||||
|
||||
::
|
||||
|
||||
0 1 2 3 | Group 0 (least significant)
|
||||
4 5 6 7 | Group 1
|
||||
8 9 10 11 | Group 2
|
||||
12 13 14 15 | Group 3
|
||||
16 17 18 19 | Group 4
|
||||
20 21 22 23 | Group 5
|
||||
24 25 26 27 | Group 6
|
||||
28 29 30 | Group 7 (most significant)
|
||||
|
||||
Intended use
|
||||
------------
|
||||
|
||||
Drivers that opt to use this API first need to identify which of the above 3
|
||||
quirk combinations (for a total of 8) match what the hardware documentation
|
||||
describes. Then they should wrap the packing() function, creating a new
|
||||
xxx_packing() that calls it using the proper QUIRK_* one-hot bits set.
|
||||
describes.
|
||||
|
||||
There are 3 supported usage patterns, detailed below.
|
||||
|
||||
packing()
|
||||
^^^^^^^^^
|
||||
|
||||
This API function is deprecated.
|
||||
|
||||
The packing() function returns an int-encoded error code, which protects the
|
||||
programmer against incorrect API use. The errors are not expected to occur
|
||||
during runtime, therefore it is reasonable for xxx_packing() to return void
|
||||
and simply swallow those errors. Optionally it can dump stack or print the
|
||||
error description.
|
||||
during runtime, therefore it is reasonable to wrap packing() into a custom
|
||||
function which returns void and swallows those errors. Optionally it can
|
||||
dump stack or print the error description.
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
void my_packing(void *buf, u64 *val, int startbit, int endbit,
|
||||
size_t len, enum packing_op op)
|
||||
{
|
||||
int err;
|
||||
|
||||
/* Adjust quirks accordingly */
|
||||
err = packing(buf, val, startbit, endbit, len, op, QUIRK_LSW32_IS_FIRST);
|
||||
if (likely(!err))
|
||||
return;
|
||||
|
||||
if (err == -EINVAL) {
|
||||
pr_err("Start bit (%d) expected to be larger than end (%d)\n",
|
||||
startbit, endbit);
|
||||
} else if (err == -ERANGE) {
|
||||
if ((startbit - endbit + 1) > 64)
|
||||
pr_err("Field %d-%d too large for 64 bits!\n",
|
||||
startbit, endbit);
|
||||
else
|
||||
pr_err("Cannot store %llx inside bits %d-%d (would truncate)\n",
|
||||
*val, startbit, endbit);
|
||||
}
|
||||
dump_stack();
|
||||
}
|
||||
|
||||
pack() and unpack()
|
||||
^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
These are const-correct variants of packing(), and eliminate the last "enum
|
||||
packing_op op" argument.
|
||||
|
||||
Calling pack(...) is equivalent, and preferred, to calling packing(..., PACK).
|
||||
|
||||
Calling unpack(...) is equivalent, and preferred, to calling packing(..., UNPACK).
|
||||
|
||||
pack_fields() and unpack_fields()
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
The library exposes optimized functions for the scenario where there are many
|
||||
fields represented in a buffer, and it encourages consumer drivers to avoid
|
||||
repetitive calls to pack() and unpack() for each field, but instead use
|
||||
pack_fields() and unpack_fields(), which reduces the code footprint.
|
||||
|
||||
These APIs use field definitions in arrays of ``struct packed_field_u8`` or
|
||||
``struct packed_field_u16``, allowing consumer drivers to minimize the size
|
||||
of these arrays according to their custom requirements.
|
||||
|
||||
The pack_fields() and unpack_fields() API functions are actually macros which
|
||||
automatically select the appropriate function at compile time, based on the
|
||||
type of the fields array passed in.
|
||||
|
||||
An additional benefit over pack() and unpack() is that sanity checks on the
|
||||
field definitions are handled at compile time with ``BUILD_BUG_ON`` rather
|
||||
than only when the offending code is executed. These functions return void and
|
||||
wrapping them to handle unexpected errors is not necessary.
|
||||
|
||||
It is recommended, but not required, that you wrap your packed buffer into a
|
||||
structured type with a fixed size. This generally makes it easier for the
|
||||
compiler to enforce that the correct size buffer is used.
|
||||
|
||||
Here is an example of how to use the fields APIs:
|
||||
|
||||
.. code-block:: c
|
||||
|
||||
/* Ordering inside the unpacked structure is flexible and can be different
|
||||
* from the packed buffer. Here, it is optimized to reduce padding.
|
||||
*/
|
||||
struct data {
|
||||
u64 field3;
|
||||
u32 field4;
|
||||
u16 field1;
|
||||
u8 field2;
|
||||
};
|
||||
|
||||
#define SIZE 13
|
||||
|
||||
typdef struct __packed { u8 buf[SIZE]; } packed_buf_t;
|
||||
|
||||
static const struct packed_field_u8 fields[] = {
|
||||
PACKED_FIELD(100, 90, struct data, field1),
|
||||
PACKED_FIELD(90, 87, struct data, field2),
|
||||
PACKED_FIELD(86, 30, struct data, field3),
|
||||
PACKED_FIELD(29, 0, struct data, field4),
|
||||
};
|
||||
|
||||
void unpack_your_data(const packed_buf_t *buf, struct data *unpacked)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(*buf) != SIZE;
|
||||
|
||||
unpack_fields(buf, sizeof(*buf), unpacked, fields,
|
||||
QUIRK_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
void pack_your_data(const struct data *unpacked, packed_buf_t *buf)
|
||||
{
|
||||
BUILD_BUG_ON(sizeof(*buf) != SIZE;
|
||||
|
||||
pack_fields(buf, sizeof(*buf), unpacked, fields,
|
||||
QUIRK_LITTLE_ENDIAN);
|
||||
}
|
||||
|
||||
@ -101,6 +101,37 @@ example, if Rx packets are 10 and Netdev (software statistics) displays
|
||||
rx_bytes as "X", then ethtool (hardware statistics) will display rx_bytes as
|
||||
"X+40" (4 bytes CRC x 10 packets).
|
||||
|
||||
ethtool reset
|
||||
-------------
|
||||
The driver supports 3 types of resets:
|
||||
|
||||
- PF reset - resets only components associated with the given PF, does not
|
||||
impact other PFs
|
||||
|
||||
- CORE reset - whole adapter is affected, reset all PFs
|
||||
|
||||
- GLOBAL reset - same as CORE but mac and phy components are also reinitialized
|
||||
|
||||
These are mapped to ethtool reset flags as follow:
|
||||
|
||||
- PF reset:
|
||||
|
||||
# ethtool --reset <ethX> irq dma filter offload
|
||||
|
||||
- CORE reset:
|
||||
|
||||
# ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \
|
||||
ram-shared
|
||||
|
||||
- GLOBAL reset:
|
||||
|
||||
# ethtool --reset <ethX> irq-shared dma-shared filter-shared offload-shared \
|
||||
mac-shared phy-shared ram-shared
|
||||
|
||||
In switchdev mode you can reset a VF using port representor:
|
||||
|
||||
# ethtool --reset <repr> irq dma filter offload
|
||||
|
||||
|
||||
Viewing Link Messages
|
||||
---------------------
|
||||
|
||||
@ -17427,6 +17427,8 @@ S: Supported
|
||||
F: Documentation/core-api/packing.rst
|
||||
F: include/linux/packing.h
|
||||
F: lib/packing.c
|
||||
F: lib/packing_test.c
|
||||
F: scripts/gen_packed_field_checks.c
|
||||
|
||||
PADATA PARALLEL EXECUTION MECHANISM
|
||||
M: Steffen Klassert <steffen.klassert@secunet.com>
|
||||
|
||||
4
Makefile
4
Makefile
@ -1336,6 +1336,10 @@ PHONY += scripts_unifdef
|
||||
scripts_unifdef: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=scripts scripts/unifdef
|
||||
|
||||
PHONY += scripts_gen_packed_field_checks
|
||||
scripts_gen_packed_field_checks: scripts_basic
|
||||
$(Q)$(MAKE) $(build)=scripts scripts/gen_packed_field_checks
|
||||
|
||||
# ---------------------------------------------------------------------------
|
||||
# Install
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.42.1
|
||||
RHEL_RELEASE = 55.43.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
||||
@ -1024,6 +1024,14 @@ config ARCH_WANTS_EXECMEM_LATE
|
||||
enough entropy for module space randomization, for instance
|
||||
arm64.
|
||||
|
||||
config ARCH_HAS_EXECMEM_ROX
|
||||
bool
|
||||
depends on MMU && !HIGHMEM
|
||||
help
|
||||
For architectures that support allocations of executable memory
|
||||
with read-only execute permissions. Architecture must implement
|
||||
execmem_fill_trapping_insns() callback to enable this.
|
||||
|
||||
config HAVE_IRQ_EXIT_ON_IRQ_STACK
|
||||
bool
|
||||
help
|
||||
|
||||
@ -5,3 +5,4 @@ generic-y += agp.h
|
||||
generic-y += asm-offsets.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -6,3 +6,4 @@ generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += parport.h
|
||||
generic-y += user.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include <asm/insn.h>
|
||||
#include <asm/set_memory.h>
|
||||
#include <asm/stacktrace.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
/*
|
||||
* The compiler emitted profiling hook consists of
|
||||
|
||||
@ -1,7 +1,7 @@
|
||||
// SPDX-License-Identifier: GPL-2.0
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/insn.h>
|
||||
|
||||
static void __arch_jump_label_transform(struct jump_entry *entry,
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
#include <linux/kgdb.h>
|
||||
#include <linux/uaccess.h>
|
||||
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] =
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/smp_plat.h>
|
||||
#include <asm/opcodes.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
struct patch {
|
||||
void *addr;
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
#include <asm/cacheflush.h>
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include "../decode-arm.h"
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
/* for arm_gen_branch */
|
||||
#include <asm/insn.h>
|
||||
/* for patch_text */
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include "core.h"
|
||||
|
||||
|
||||
@ -80,6 +80,7 @@
|
||||
#define ARM_CPU_PART_CORTEX_A78AE 0xD42
|
||||
#define ARM_CPU_PART_CORTEX_X1 0xD44
|
||||
#define ARM_CPU_PART_CORTEX_A510 0xD46
|
||||
#define ARM_CPU_PART_CORTEX_X1C 0xD4C
|
||||
#define ARM_CPU_PART_CORTEX_A520 0xD80
|
||||
#define ARM_CPU_PART_CORTEX_A710 0xD47
|
||||
#define ARM_CPU_PART_CORTEX_A715 0xD4D
|
||||
@ -163,6 +164,7 @@
|
||||
#define MIDR_CORTEX_A78AE MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A78AE)
|
||||
#define MIDR_CORTEX_X1 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1)
|
||||
#define MIDR_CORTEX_A510 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A510)
|
||||
#define MIDR_CORTEX_X1C MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_X1C)
|
||||
#define MIDR_CORTEX_A520 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A520)
|
||||
#define MIDR_CORTEX_A710 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A710)
|
||||
#define MIDR_CORTEX_A715 MIDR_CPU_MODEL(ARM_CPU_IMP_ARM, ARM_CPU_PART_CORTEX_A715)
|
||||
|
||||
@ -692,6 +692,7 @@ u32 aarch64_insn_gen_cas(enum aarch64_insn_register result,
|
||||
}
|
||||
#endif
|
||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type);
|
||||
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type);
|
||||
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_system_register sysreg);
|
||||
|
||||
|
||||
@ -13,6 +13,7 @@ int set_memory_valid(unsigned long addr, int numpages, int enable);
|
||||
|
||||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
|
||||
bool kernel_page_present(struct page *page);
|
||||
|
||||
#endif /* _ASM_ARM64_SET_MEMORY_H */
|
||||
|
||||
@ -97,7 +97,9 @@ enum mitigation_state arm64_get_meltdown_state(void);
|
||||
|
||||
enum mitigation_state arm64_get_spectre_bhb_state(void);
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry, int scope);
|
||||
u8 spectre_bhb_loop_affected(int scope);
|
||||
extern bool __nospectre_bhb;
|
||||
u8 get_spectre_bhb_loop_value(void);
|
||||
bool is_spectre_bhb_fw_mitigated(void);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *__unused);
|
||||
bool try_emulate_el1_ssbs(struct pt_regs *regs, u32 instr);
|
||||
|
||||
|
||||
@ -15,7 +15,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#ifdef CONFIG_DYNAMIC_FTRACE_WITH_ARGS
|
||||
struct fregs_offset {
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/smp.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
bool arch_jump_label_transform_queue(struct jump_entry *entry,
|
||||
enum jump_label_type type)
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/traps.h>
|
||||
|
||||
struct dbg_reg_def_t dbg_reg_def[DBG_MAX_REG_NUM] = {
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
static DEFINE_RAW_SPINLOCK(patch_lock);
|
||||
|
||||
@ -27,7 +27,7 @@
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/system_misc.h>
|
||||
|
||||
@ -845,52 +845,71 @@ static unsigned long system_bhb_mitigations;
|
||||
* This must be called with SCOPE_LOCAL_CPU for each type of CPU, before any
|
||||
* SCOPE_SYSTEM call will give the right answer.
|
||||
*/
|
||||
u8 spectre_bhb_loop_affected(int scope)
|
||||
static bool is_spectre_bhb_safe(int scope)
|
||||
{
|
||||
static const struct midr_range spectre_bhb_safe_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A35),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A53),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A55),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A510),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A520),
|
||||
MIDR_ALL_VERSIONS(MIDR_BRAHMA_B53),
|
||||
{},
|
||||
};
|
||||
static bool all_safe = true;
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return all_safe;
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_safe_list))
|
||||
return true;
|
||||
|
||||
all_safe = false;
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
static u8 spectre_bhb_loop_affected(void)
|
||||
{
|
||||
u8 k = 0;
|
||||
static u8 max_bhb_k;
|
||||
|
||||
if (scope == SCOPE_LOCAL_CPU) {
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k32_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78AE),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A78C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X1C),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A710),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_X2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N2),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_V1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k24_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A76),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A77),
|
||||
MIDR_ALL_VERSIONS(MIDR_NEOVERSE_N1),
|
||||
MIDR_ALL_VERSIONS(MIDR_QCOM_KRYO_4XX_GOLD),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k11_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_AMPERE1),
|
||||
{},
|
||||
};
|
||||
static const struct midr_range spectre_bhb_k8_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A72),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A57),
|
||||
{},
|
||||
};
|
||||
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||
k = 11;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
max_bhb_k = max(max_bhb_k, k);
|
||||
} else {
|
||||
k = max_bhb_k;
|
||||
}
|
||||
if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k32_list))
|
||||
k = 32;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k24_list))
|
||||
k = 24;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k11_list))
|
||||
k = 11;
|
||||
else if (is_midr_in_range_list(read_cpuid_id(), spectre_bhb_k8_list))
|
||||
k = 8;
|
||||
|
||||
return k;
|
||||
}
|
||||
@ -916,29 +935,13 @@ static enum mitigation_state spectre_bhb_get_cpu_fw_mitigation_state(void)
|
||||
}
|
||||
}
|
||||
|
||||
static bool is_spectre_bhb_fw_affected(int scope)
|
||||
static bool has_spectre_bhb_fw_mitigation(void)
|
||||
{
|
||||
static bool system_affected;
|
||||
enum mitigation_state fw_state;
|
||||
bool has_smccc = arm_smccc_1_1_get_conduit() != SMCCC_CONDUIT_NONE;
|
||||
static const struct midr_range spectre_bhb_firmware_mitigated_list[] = {
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A73),
|
||||
MIDR_ALL_VERSIONS(MIDR_CORTEX_A75),
|
||||
{},
|
||||
};
|
||||
bool cpu_in_list = is_midr_in_range_list(read_cpuid_id(),
|
||||
spectre_bhb_firmware_mitigated_list);
|
||||
|
||||
if (scope != SCOPE_LOCAL_CPU)
|
||||
return system_affected;
|
||||
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (cpu_in_list || (has_smccc && fw_state == SPECTRE_MITIGATED)) {
|
||||
system_affected = true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
return has_smccc && fw_state == SPECTRE_MITIGATED;
|
||||
}
|
||||
|
||||
static bool supports_ecbhb(int scope)
|
||||
@ -954,6 +957,8 @@ static bool supports_ecbhb(int scope)
|
||||
ID_AA64MMFR1_EL1_ECBHB_SHIFT);
|
||||
}
|
||||
|
||||
static u8 max_bhb_k;
|
||||
|
||||
bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
int scope)
|
||||
{
|
||||
@ -962,16 +967,23 @@ bool is_spectre_bhb_affected(const struct arm64_cpu_capabilities *entry,
|
||||
if (supports_csv2p3(scope))
|
||||
return false;
|
||||
|
||||
if (supports_clearbhb(scope))
|
||||
return true;
|
||||
if (is_spectre_bhb_safe(scope))
|
||||
return false;
|
||||
|
||||
if (spectre_bhb_loop_affected(scope))
|
||||
return true;
|
||||
/*
|
||||
* At this point the core isn't known to be "safe" so we're going to
|
||||
* assume it's vulnerable. We still need to update `max_bhb_k` though,
|
||||
* but only if we aren't mitigating with clearbhb though.
|
||||
*/
|
||||
if (scope == SCOPE_LOCAL_CPU && !supports_clearbhb(SCOPE_LOCAL_CPU))
|
||||
max_bhb_k = max(max_bhb_k, spectre_bhb_loop_affected());
|
||||
|
||||
if (is_spectre_bhb_fw_affected(scope))
|
||||
return true;
|
||||
return true;
|
||||
}
|
||||
|
||||
return false;
|
||||
u8 get_spectre_bhb_loop_value(void)
|
||||
{
|
||||
return max_bhb_k;
|
||||
}
|
||||
|
||||
static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
@ -991,7 +1003,7 @@ static void this_cpu_set_vectors(enum arm64_bp_harden_el1_vectors slot)
|
||||
isb();
|
||||
}
|
||||
|
||||
static bool __read_mostly __nospectre_bhb;
|
||||
bool __read_mostly __nospectre_bhb;
|
||||
static int __init parse_spectre_bhb_param(char *str)
|
||||
{
|
||||
__nospectre_bhb = true;
|
||||
@ -1002,7 +1014,7 @@ early_param("nospectre_bhb", parse_spectre_bhb_param);
|
||||
void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
{
|
||||
bp_hardening_cb_t cpu_cb;
|
||||
enum mitigation_state fw_state, state = SPECTRE_VULNERABLE;
|
||||
enum mitigation_state state = SPECTRE_VULNERABLE;
|
||||
struct bp_hardening_data *data = this_cpu_ptr(&bp_hardening_data);
|
||||
|
||||
if (!is_spectre_bhb_affected(entry, SCOPE_LOCAL_CPU))
|
||||
@ -1028,7 +1040,7 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_CLEAR_INSN);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_INSN, &system_bhb_mitigations);
|
||||
} else if (spectre_bhb_loop_affected(SCOPE_LOCAL_CPU)) {
|
||||
} else if (spectre_bhb_loop_affected()) {
|
||||
/*
|
||||
* Ensure KVM uses the indirect vector which will have the
|
||||
* branchy-loop added. A57/A72-r0 will already have selected
|
||||
@ -1041,37 +1053,39 @@ void spectre_bhb_enable_mitigation(const struct arm64_cpu_capabilities *entry)
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_LOOP);
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_LOOP, &system_bhb_mitigations);
|
||||
} else if (is_spectre_bhb_fw_affected(SCOPE_LOCAL_CPU)) {
|
||||
fw_state = spectre_bhb_get_cpu_fw_mitigation_state();
|
||||
if (fw_state == SPECTRE_MITIGATED) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
} else if (has_spectre_bhb_fw_mitigation()) {
|
||||
/*
|
||||
* Ensure KVM uses one of the spectre bp_hardening
|
||||
* vectors. The indirect vector doesn't include the EL3
|
||||
* call, so needs upgrading to
|
||||
* HYP_VECTOR_SPECTRE_INDIRECT.
|
||||
*/
|
||||
if (!data->slot || data->slot == HYP_VECTOR_INDIRECT)
|
||||
data->slot += 1;
|
||||
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
this_cpu_set_vectors(EL1_VECTOR_BHB_FW);
|
||||
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
/*
|
||||
* The WA3 call in the vectors supersedes the WA1 call
|
||||
* made during context-switch. Uninstall any firmware
|
||||
* bp_hardening callback.
|
||||
*/
|
||||
cpu_cb = spectre_v2_get_sw_mitigation_cb();
|
||||
if (__this_cpu_read(bp_hardening_data.fn) != cpu_cb)
|
||||
__this_cpu_write(bp_hardening_data.fn, NULL);
|
||||
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
state = SPECTRE_MITIGATED;
|
||||
set_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
|
||||
update_mitigation_state(&spectre_bhb_state, state);
|
||||
}
|
||||
|
||||
bool is_spectre_bhb_fw_mitigated(void)
|
||||
{
|
||||
return test_bit(BHB_FW, &system_bhb_mitigations);
|
||||
}
|
||||
|
||||
/* Patched to NOP when enabled */
|
||||
void noinstr spectre_bhb_patch_loop_mitigation_enable(struct alt_instr *alt,
|
||||
__le32 *origptr,
|
||||
@ -1100,7 +1114,6 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
{
|
||||
u8 rd;
|
||||
u32 insn;
|
||||
u16 loop_count = spectre_bhb_loop_affected(SCOPE_SYSTEM);
|
||||
|
||||
BUG_ON(nr_inst != 1); /* MOV -> MOV */
|
||||
|
||||
@ -1109,7 +1122,7 @@ void noinstr spectre_bhb_patch_loop_iter(struct alt_instr *alt,
|
||||
|
||||
insn = le32_to_cpu(*origptr);
|
||||
rd = aarch64_insn_decode_register(AARCH64_INSN_REGTYPE_RD, insn);
|
||||
insn = aarch64_insn_gen_movewide(rd, loop_count, 0,
|
||||
insn = aarch64_insn_gen_movewide(rd, max_bhb_k, 0,
|
||||
AARCH64_INSN_VARIANT_64BIT,
|
||||
AARCH64_INSN_MOVEWIDE_ZERO);
|
||||
*updptr++ = cpu_to_le32(insn);
|
||||
|
||||
@ -41,7 +41,7 @@
|
||||
#include <asm/extable.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/stack_pointer.h>
|
||||
|
||||
@ -5,6 +5,7 @@
|
||||
*
|
||||
* Copyright (C) 2014-2016 Zi Shen Lim <zlim.lnx@gmail.com>
|
||||
*/
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bitops.h>
|
||||
#include <linux/bug.h>
|
||||
#include <linux/printk.h>
|
||||
@ -1471,43 +1472,41 @@ u32 aarch64_insn_gen_extr(enum aarch64_insn_variant variant,
|
||||
return aarch64_insn_encode_register(AARCH64_INSN_REGTYPE_RM, insn, Rm);
|
||||
}
|
||||
|
||||
static u32 __get_barrier_crm_val(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
switch (type) {
|
||||
case AARCH64_INSN_MB_SY:
|
||||
return 0xf;
|
||||
case AARCH64_INSN_MB_ST:
|
||||
return 0xe;
|
||||
case AARCH64_INSN_MB_LD:
|
||||
return 0xd;
|
||||
case AARCH64_INSN_MB_ISH:
|
||||
return 0xb;
|
||||
case AARCH64_INSN_MB_ISHST:
|
||||
return 0xa;
|
||||
case AARCH64_INSN_MB_ISHLD:
|
||||
return 0x9;
|
||||
case AARCH64_INSN_MB_NSH:
|
||||
return 0x7;
|
||||
case AARCH64_INSN_MB_NSHST:
|
||||
return 0x6;
|
||||
case AARCH64_INSN_MB_NSHLD:
|
||||
return 0x5;
|
||||
default:
|
||||
pr_err("%s: unknown barrier type %d\n", __func__, type);
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
u32 opt;
|
||||
u32 insn;
|
||||
|
||||
switch (type) {
|
||||
case AARCH64_INSN_MB_SY:
|
||||
opt = 0xf;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ST:
|
||||
opt = 0xe;
|
||||
break;
|
||||
case AARCH64_INSN_MB_LD:
|
||||
opt = 0xd;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISH:
|
||||
opt = 0xb;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISHST:
|
||||
opt = 0xa;
|
||||
break;
|
||||
case AARCH64_INSN_MB_ISHLD:
|
||||
opt = 0x9;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSH:
|
||||
opt = 0x7;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSHST:
|
||||
opt = 0x6;
|
||||
break;
|
||||
case AARCH64_INSN_MB_NSHLD:
|
||||
opt = 0x5;
|
||||
break;
|
||||
default:
|
||||
pr_err("%s: unknown dmb type %d\n", __func__, type);
|
||||
opt = __get_barrier_crm_val(type);
|
||||
if (opt == AARCH64_BREAK_FAULT)
|
||||
return AARCH64_BREAK_FAULT;
|
||||
}
|
||||
|
||||
insn = aarch64_insn_get_dmb_value();
|
||||
insn &= ~GENMASK(11, 8);
|
||||
@ -1516,6 +1515,21 @@ u32 aarch64_insn_gen_dmb(enum aarch64_insn_mb_type type)
|
||||
return insn;
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_dsb(enum aarch64_insn_mb_type type)
|
||||
{
|
||||
u32 opt, insn;
|
||||
|
||||
opt = __get_barrier_crm_val(type);
|
||||
if (opt == AARCH64_BREAK_FAULT)
|
||||
return AARCH64_BREAK_FAULT;
|
||||
|
||||
insn = aarch64_insn_get_dsb_base_value();
|
||||
insn &= ~GENMASK(11, 8);
|
||||
insn |= (opt << 8);
|
||||
|
||||
return insn;
|
||||
}
|
||||
|
||||
u32 aarch64_insn_gen_mrs(enum aarch64_insn_register result,
|
||||
enum aarch64_insn_system_register sysreg)
|
||||
{
|
||||
|
||||
@ -192,6 +192,16 @@ int set_direct_map_default_noflush(struct page *page)
|
||||
PAGE_SIZE, change_page_range, &data);
|
||||
}
|
||||
|
||||
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
|
||||
{
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
|
||||
if (!can_set_direct_map())
|
||||
return 0;
|
||||
|
||||
return set_memory_valid(addr, nr, valid);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_PAGEALLOC
|
||||
void __kernel_map_pages(struct page *page, int numpages, int enable)
|
||||
{
|
||||
|
||||
@ -7,6 +7,7 @@
|
||||
|
||||
#define pr_fmt(fmt) "bpf_jit: " fmt
|
||||
|
||||
#include <linux/arm-smccc.h>
|
||||
#include <linux/bitfield.h>
|
||||
#include <linux/bpf.h>
|
||||
#include <linux/filter.h>
|
||||
@ -17,9 +18,10 @@
|
||||
#include <asm/asm-extable.h>
|
||||
#include <asm/byteorder.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/cpufeature.h>
|
||||
#include <asm/debug-monitors.h>
|
||||
#include <asm/insn.h>
|
||||
#include <asm/patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/set_memory.h>
|
||||
|
||||
#include "bpf_jit.h"
|
||||
@ -857,7 +859,51 @@ static void build_plt(struct jit_ctx *ctx)
|
||||
plt->target = (u64)&dummy_tramp;
|
||||
}
|
||||
|
||||
static void build_epilogue(struct jit_ctx *ctx)
|
||||
/* Clobbers BPF registers 1-4, aka x0-x3 */
|
||||
static void __maybe_unused build_bhb_mitigation(struct jit_ctx *ctx)
|
||||
{
|
||||
const u8 r1 = bpf2a64[BPF_REG_1]; /* aka x0 */
|
||||
u8 k = get_spectre_bhb_loop_value();
|
||||
|
||||
if (!IS_ENABLED(CONFIG_MITIGATE_SPECTRE_BRANCH_HISTORY) ||
|
||||
cpu_mitigations_off() || __nospectre_bhb ||
|
||||
arm64_get_spectre_v2_state() == SPECTRE_VULNERABLE)
|
||||
return;
|
||||
|
||||
if (capable(CAP_SYS_ADMIN))
|
||||
return;
|
||||
|
||||
if (supports_clearbhb(SCOPE_SYSTEM)) {
|
||||
emit(aarch64_insn_gen_hint(AARCH64_INSN_HINT_CLEARBHB), ctx);
|
||||
return;
|
||||
}
|
||||
|
||||
if (k) {
|
||||
emit_a64_mov_i64(r1, k, ctx);
|
||||
emit(A64_B(1), ctx);
|
||||
emit(A64_SUBS_I(true, r1, r1, 1), ctx);
|
||||
emit(A64_B_(A64_COND_NE, -2), ctx);
|
||||
emit(aarch64_insn_gen_dsb(AARCH64_INSN_MB_ISH), ctx);
|
||||
emit(aarch64_insn_get_isb_value(), ctx);
|
||||
}
|
||||
|
||||
if (is_spectre_bhb_fw_mitigated()) {
|
||||
emit(A64_ORR_I(false, r1, AARCH64_INSN_REG_ZR,
|
||||
ARM_SMCCC_ARCH_WORKAROUND_3), ctx);
|
||||
switch (arm_smccc_1_1_get_conduit()) {
|
||||
case SMCCC_CONDUIT_HVC:
|
||||
emit(aarch64_insn_get_hvc_value(), ctx);
|
||||
break;
|
||||
case SMCCC_CONDUIT_SMC:
|
||||
emit(aarch64_insn_get_smc_value(), ctx);
|
||||
break;
|
||||
default:
|
||||
pr_err_once("Firmware mitigation enabled with unknown conduit\n");
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
static void build_epilogue(struct jit_ctx *ctx, bool was_classic)
|
||||
{
|
||||
const u8 r0 = bpf2a64[BPF_REG_0];
|
||||
const u8 ptr = bpf2a64[TCCNT_PTR];
|
||||
@ -870,10 +916,13 @@ static void build_epilogue(struct jit_ctx *ctx)
|
||||
|
||||
emit(A64_POP(A64_ZR, ptr, A64_SP), ctx);
|
||||
|
||||
if (was_classic)
|
||||
build_bhb_mitigation(ctx);
|
||||
|
||||
/* Restore FP/LR registers */
|
||||
emit(A64_POP(A64_FP, A64_LR, A64_SP), ctx);
|
||||
|
||||
/* Set return value */
|
||||
/* Move the return value from bpf:r0 (aka x7) to x0 */
|
||||
emit(A64_MOV(1, A64_R(0), r0), ctx);
|
||||
|
||||
/* Authenticate lr */
|
||||
@ -1817,7 +1866,7 @@ struct bpf_prog *bpf_int_jit_compile(struct bpf_prog *prog)
|
||||
}
|
||||
|
||||
ctx.epilogue_offset = ctx.idx;
|
||||
build_epilogue(&ctx);
|
||||
build_epilogue(&ctx, was_classic);
|
||||
build_plt(&ctx);
|
||||
|
||||
extable_align = __alignof__(struct exception_table_entry);
|
||||
@ -1880,7 +1929,7 @@ skip_init_ctx:
|
||||
goto out_free_hdr;
|
||||
}
|
||||
|
||||
build_epilogue(&ctx);
|
||||
build_epilogue(&ctx, was_classic);
|
||||
build_plt(&ctx);
|
||||
|
||||
/* Extra pass to validate JITed code. */
|
||||
|
||||
@ -11,3 +11,4 @@ generic-y += qspinlock.h
|
||||
generic-y += parport.h
|
||||
generic-y += user.h
|
||||
generic-y += vmlinux.lds.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -5,3 +5,4 @@ generic-y += extable.h
|
||||
generic-y += iomap.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -11,3 +11,4 @@ generic-y += ioctl.h
|
||||
generic-y += mmzone.h
|
||||
generic-y += statfs.h
|
||||
generic-y += param.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -17,5 +17,6 @@ int set_memory_rw(unsigned long addr, int numpages);
|
||||
bool kernel_page_present(struct page *page);
|
||||
int set_direct_map_default_noflush(struct page *page);
|
||||
int set_direct_map_invalid_noflush(struct page *page);
|
||||
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid);
|
||||
|
||||
#endif /* _ASM_LOONGARCH_SET_MEMORY_H */
|
||||
|
||||
@ -216,3 +216,22 @@ int set_direct_map_invalid_noflush(struct page *page)
|
||||
|
||||
return __set_memory(addr, 1, __pgprot(0), __pgprot(_PAGE_PRESENT | _PAGE_VALID));
|
||||
}
|
||||
|
||||
int set_direct_map_valid_noflush(struct page *page, unsigned nr, bool valid)
|
||||
{
|
||||
unsigned long addr = (unsigned long)page_address(page);
|
||||
pgprot_t set, clear;
|
||||
|
||||
if (addr < vm_map_base)
|
||||
return 0;
|
||||
|
||||
if (valid) {
|
||||
set = PAGE_KERNEL;
|
||||
clear = __pgprot(0);
|
||||
} else {
|
||||
set = __pgprot(0);
|
||||
clear = __pgprot(_PAGE_PRESENT | _PAGE_VALID);
|
||||
}
|
||||
|
||||
return __set_memory(addr, 1, set, clear);
|
||||
}
|
||||
|
||||
@ -4,3 +4,4 @@ generic-y += extable.h
|
||||
generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += spinlock.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -8,3 +8,4 @@ generic-y += parport.h
|
||||
generic-y += syscalls.h
|
||||
generic-y += tlb.h
|
||||
generic-y += user.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -13,3 +13,4 @@ generic-y += parport.h
|
||||
generic-y += qrwlock.h
|
||||
generic-y += qspinlock.h
|
||||
generic-y += user.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -7,3 +7,4 @@ generic-y += kvm_para.h
|
||||
generic-y += mcs_spinlock.h
|
||||
generic-y += spinlock.h
|
||||
generic-y += user.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -9,3 +9,4 @@ generic-y += spinlock.h
|
||||
generic-y += qrwlock_types.h
|
||||
generic-y += qrwlock.h
|
||||
generic-y += user.h
|
||||
generic-y += text-patching.h
|
||||
|
||||
@ -20,7 +20,7 @@
|
||||
#include <asm/assembly.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#define __hot __section(".text.hot")
|
||||
|
||||
|
||||
@ -8,7 +8,7 @@
|
||||
#include <linux/jump_label.h>
|
||||
#include <linux/bug.h>
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
static inline int reassemble_17(int as17)
|
||||
{
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/traps.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cacheflush.h>
|
||||
|
||||
const struct kgdb_arch arch_kgdb_ops = {
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
#include <linux/kprobes.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
DEFINE_PER_CPU(struct kprobe *, current_kprobe) = NULL;
|
||||
DEFINE_PER_CPU(struct kprobe_ctlblk, kprobe_ctlblk);
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/fixmap.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
struct patch {
|
||||
void *addr;
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include <linux/percpu.h>
|
||||
#include <linux/module.h>
|
||||
#include <asm/probes.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#ifdef CONFIG_KPROBES
|
||||
#define __ARCH_WANT_KPROBES_INSN_SLOT
|
||||
|
||||
@ -29,6 +29,7 @@ extern cpumask_var_t node_to_cpumask_map[];
|
||||
#ifdef CONFIG_MEMORY_HOTPLUG
|
||||
extern unsigned long max_pfn;
|
||||
u64 memory_hotplug_max(void);
|
||||
u64 hot_add_drconf_memory_max(void);
|
||||
#else
|
||||
#define memory_hotplug_max() memblock_end_of_DRAM()
|
||||
#endif
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
#include <linux/io.h>
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/of.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/kdump.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <linux/uio.h>
|
||||
|
||||
@ -9,7 +9,7 @@
|
||||
#include <linux/of_fdt.h>
|
||||
#include <asm/epapr_hcalls.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
|
||||
@ -5,7 +5,7 @@
|
||||
|
||||
#include <linux/kernel.h>
|
||||
#include <linux/jump_label.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
void arch_jump_label_transform(struct jump_entry *entry,
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include <asm/processor.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/debug.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <linux/slab.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
|
||||
@ -21,7 +21,7 @@
|
||||
#include <linux/slab.h>
|
||||
#include <linux/set_memory.h>
|
||||
#include <linux/execmem.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
#include <linux/bug.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
/* Count how many different relocations (different symbol, different
|
||||
addend) */
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
#include <linux/kernel.h>
|
||||
#include <asm/module.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <linux/sort.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
@ -54,7 +54,7 @@
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#endif
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/exec.h>
|
||||
#include <asm/livepatch.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
#include <linux/debugfs.h>
|
||||
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/security_features.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/setup.h>
|
||||
|
||||
@ -40,7 +40,7 @@
|
||||
#include <asm/time.h>
|
||||
#include <asm/serial.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/asm-prototypes.h>
|
||||
#include <asm/kdump.h>
|
||||
|
||||
@ -60,7 +60,7 @@
|
||||
#include <asm/xmon.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/kexec.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/opal.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
@ -2,7 +2,7 @@
|
||||
#include <linux/memory.h>
|
||||
#include <linux/static_call.h>
|
||||
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
void arch_static_call_transform(void *site, void *tramp, void *func, bool tail)
|
||||
{
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include <linux/list.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include <linux/list.h>
|
||||
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/syscall.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
@ -17,7 +17,7 @@
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
static int __patch_mem(void *exec_addr, unsigned long val, void *patch_addr, bool is_dword)
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
#include <linux/sched/mm.h>
|
||||
#include <linux/stop_machine.h>
|
||||
#include <asm/cputable.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/interrupt.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
@ -6,7 +6,7 @@
|
||||
#include <linux/vmalloc.h>
|
||||
#include <linux/init.h>
|
||||
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
static int __init instr_is_branch_to_addr(const u32 *instr, unsigned long addr)
|
||||
{
|
||||
|
||||
@ -11,7 +11,7 @@
|
||||
#include <asm/cpu_has_feature.h>
|
||||
#include <asm/sstep.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
#define MAX_SUBTESTS 16
|
||||
|
||||
@ -25,7 +25,7 @@
|
||||
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
@ -57,7 +57,7 @@
|
||||
#include <asm/sections.h>
|
||||
#include <asm/copro.h>
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/fadump.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/tm.h>
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/udbg.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include "internal.h"
|
||||
|
||||
|
||||
@ -7,7 +7,7 @@
|
||||
#include <linux/memblock.h>
|
||||
#include <linux/sched/task.h>
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
static pgprot_t __init kasan_prot_ro(void)
|
||||
|
||||
@ -26,7 +26,7 @@
|
||||
#include <asm/svm.h>
|
||||
#include <asm/mmzone.h>
|
||||
#include <asm/ftrace.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/setup.h>
|
||||
#include <asm/fixmap.h>
|
||||
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
#include <asm/mmu.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/smp.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
@ -10,7 +10,7 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/dma.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
|
||||
@ -37,7 +37,7 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/hugetlb.h>
|
||||
#include <asm/paca.h>
|
||||
|
||||
@ -24,7 +24,7 @@
|
||||
#include <asm/pgalloc.h>
|
||||
#include <asm/tlbflush.h>
|
||||
#include <asm/tlb.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cputhreads.h>
|
||||
|
||||
#include <mm/mmu_decl.h>
|
||||
|
||||
@ -1336,7 +1336,7 @@ int hot_add_scn_to_nid(unsigned long scn_addr)
|
||||
return nid;
|
||||
}
|
||||
|
||||
static u64 hot_add_drconf_memory_max(void)
|
||||
u64 hot_add_drconf_memory_max(void)
|
||||
{
|
||||
struct device_node *memory = NULL;
|
||||
struct device_node *dn = NULL;
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
#include <linux/bpf.h>
|
||||
|
||||
#include <asm/kprobes.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include "bpf_jit.h"
|
||||
|
||||
|
||||
@ -14,7 +14,7 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/inst.h>
|
||||
|
||||
#define PERF_8xx_ID_CPU_CYCLES 1
|
||||
|
||||
@ -16,7 +16,7 @@
|
||||
#include <asm/machdep.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/ptrace.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/hw_irq.h>
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
|
||||
@ -23,7 +23,7 @@
|
||||
#include <asm/mpic.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/fsl_pm.h>
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@
|
||||
#include <linux/delay.h>
|
||||
#include <linux/pgtable.h>
|
||||
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/pci-bridge.h>
|
||||
#include <asm/mpic.h>
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/rtas.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
|
||||
#include "interrupt.h"
|
||||
#include <asm/udbg.h>
|
||||
|
||||
@ -35,7 +35,7 @@
|
||||
|
||||
#include <asm/ptrace.h>
|
||||
#include <linux/atomic.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/irq.h>
|
||||
#include <asm/page.h>
|
||||
#include <asm/sections.h>
|
||||
|
||||
@ -18,7 +18,7 @@
|
||||
#include <asm/opal.h>
|
||||
#include <asm/cputhreads.h>
|
||||
#include <asm/cpuidle.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/smp.h>
|
||||
#include <asm/runlatch.h>
|
||||
#include <asm/dbell.h>
|
||||
|
||||
@ -28,7 +28,7 @@
|
||||
#include <asm/xive.h>
|
||||
#include <asm/opal.h>
|
||||
#include <asm/runlatch.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/kvm_ppc.h>
|
||||
#include <asm/ppc-opcode.h>
|
||||
|
||||
@ -1293,17 +1293,13 @@ static LIST_HEAD(failed_ddw_pdn_list);
|
||||
|
||||
static phys_addr_t ddw_memory_hotplug_max(void)
|
||||
{
|
||||
resource_size_t max_addr = memory_hotplug_max();
|
||||
struct device_node *memory;
|
||||
resource_size_t max_addr;
|
||||
|
||||
for_each_node_by_type(memory, "memory") {
|
||||
struct resource res;
|
||||
|
||||
if (of_address_to_resource(memory, 0, &res))
|
||||
continue;
|
||||
|
||||
max_addr = max_t(resource_size_t, max_addr, res.end + 1);
|
||||
}
|
||||
#if defined(CONFIG_NUMA) && defined(CONFIG_MEMORY_HOTPLUG)
|
||||
max_addr = hot_add_drconf_memory_max();
|
||||
#else
|
||||
max_addr = memblock_end_of_DRAM();
|
||||
#endif
|
||||
|
||||
return max_addr;
|
||||
}
|
||||
@ -1609,7 +1605,7 @@ static bool enable_ddw(struct pci_dev *dev, struct device_node *pdn)
|
||||
|
||||
if (direct_mapping) {
|
||||
/* DDW maps the whole partition, so enable direct DMA mapping */
|
||||
ret = walk_system_ram_range(0, memblock_end_of_DRAM() >> PAGE_SHIFT,
|
||||
ret = walk_system_ram_range(0, ddw_memory_hotplug_max() >> PAGE_SHIFT,
|
||||
win64->value, tce_setrange_multi_pSeriesLP_walk);
|
||||
if (ret) {
|
||||
dev_info(&dev->dev, "failed to map DMA window for %pOF: %d\n",
|
||||
@ -2355,11 +2351,17 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
||||
struct memory_notify *arg = data;
|
||||
int ret = 0;
|
||||
|
||||
/* This notifier can get called when onlining persistent memory as well.
|
||||
* TCEs are not pre-mapped for persistent memory. Persistent memory will
|
||||
* always be above ddw_memory_hotplug_max()
|
||||
*/
|
||||
|
||||
switch (action) {
|
||||
case MEM_GOING_ONLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_setrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
@ -2371,7 +2373,8 @@ static int iommu_mem_notifier(struct notifier_block *nb, unsigned long action,
|
||||
case MEM_OFFLINE:
|
||||
spin_lock(&dma_win_list_lock);
|
||||
list_for_each_entry(window, &dma_win_list, list) {
|
||||
if (window->direct) {
|
||||
if (window->direct && (arg->start_pfn << PAGE_SHIFT) <
|
||||
ddw_memory_hotplug_max()) {
|
||||
ret |= tce_clearrange_multi_pSeriesLP(arg->start_pfn,
|
||||
arg->nr_pages, window->prop);
|
||||
}
|
||||
|
||||
@ -39,7 +39,7 @@
|
||||
#include <asm/xive.h>
|
||||
#include <asm/dbell.h>
|
||||
#include <asm/plpar_wrappers.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/svm.h>
|
||||
#include <asm/kvm_guest.h>
|
||||
|
||||
|
||||
@ -50,7 +50,7 @@
|
||||
#include <asm/xive.h>
|
||||
#include <asm/opal.h>
|
||||
#include <asm/firmware.h>
|
||||
#include <asm/code-patching.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/sections.h>
|
||||
#include <asm/inst.h>
|
||||
#include <asm/interrupt.h>
|
||||
|
||||
@ -13,7 +13,7 @@
|
||||
#include <asm/alternative.h>
|
||||
#include <asm/cacheflush.h>
|
||||
#include <asm/errata_list.h>
|
||||
#include <asm/patch.h>
|
||||
#include <asm/text-patching.h>
|
||||
#include <asm/processor.h>
|
||||
#include <asm/sbi.h>
|
||||
#include <asm/vendorid_list.h>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue
Block a user