Import of kernel-4.18.0-553.83.1.el8_10
This commit is contained in:
parent
baad5c6bff
commit
7c661d18ee
@ -499,6 +499,7 @@ What: /sys/devices/system/cpu/vulnerabilities
|
||||
/sys/devices/system/cpu/vulnerabilities/spectre_v2
|
||||
/sys/devices/system/cpu/vulnerabilities/srbds
|
||||
/sys/devices/system/cpu/vulnerabilities/tsx_async_abort
|
||||
/sys/devices/system/cpu/vulnerabilities/vmscape
|
||||
Date: January 2018
|
||||
Contact: Linux kernel mailing list <linux-kernel@vger.kernel.org>
|
||||
Description: Information about CPU vulnerabilities
|
||||
|
||||
@ -19,3 +19,4 @@ are configurable at compile, boot or run time.
|
||||
srso
|
||||
gather_data_sampling
|
||||
reg-file-data-sampling
|
||||
vmscape
|
||||
|
||||
110
Documentation/admin-guide/hw-vuln/vmscape.rst
Normal file
110
Documentation/admin-guide/hw-vuln/vmscape.rst
Normal file
@ -0,0 +1,110 @@
|
||||
.. SPDX-License-Identifier: GPL-2.0
|
||||
|
||||
VMSCAPE
|
||||
=======
|
||||
|
||||
VMSCAPE is a vulnerability that may allow a guest to influence the branch
|
||||
prediction in host userspace. It particularly affects hypervisors like QEMU.
|
||||
|
||||
Even if a hypervisor may not have any sensitive data like disk encryption keys,
|
||||
guest-userspace may be able to attack the guest-kernel using the hypervisor as
|
||||
a confused deputy.
|
||||
|
||||
Affected processors
|
||||
-------------------
|
||||
|
||||
The following CPU families are affected by VMSCAPE:
|
||||
|
||||
**Intel processors:**
|
||||
- Skylake generation (Parts without Enhanced-IBRS)
|
||||
- Cascade Lake generation - (Parts affected by ITS guest/host separation)
|
||||
- Alder Lake and newer (Parts affected by BHI)
|
||||
|
||||
Note that, BHI affected parts that use BHB clearing software mitigation e.g.
|
||||
Icelake are not vulnerable to VMSCAPE.
|
||||
|
||||
**AMD processors:**
|
||||
- Zen series (families 0x17, 0x19, 0x1a)
|
||||
|
||||
** Hygon processors:**
|
||||
- Family 0x18
|
||||
|
||||
Mitigation
|
||||
----------
|
||||
|
||||
Conditional IBPB
|
||||
----------------
|
||||
|
||||
Kernel tracks when a CPU has run a potentially malicious guest and issues an
|
||||
IBPB before the first exit to userspace after VM-exit. If userspace did not run
|
||||
between VM-exit and the next VM-entry, no IBPB is issued.
|
||||
|
||||
Note that the existing userspace mitigation against Spectre-v2 is effective in
|
||||
protecting the userspace. They are insufficient to protect the userspace VMMs
|
||||
from a malicious guest. This is because Spectre-v2 mitigations are applied at
|
||||
context switch time, while the userspace VMM can run after a VM-exit without a
|
||||
context switch.
|
||||
|
||||
Vulnerability enumeration and mitigation is not applied inside a guest. This is
|
||||
because nested hypervisors should already be deploying IBPB to isolate
|
||||
themselves from nested guests.
|
||||
|
||||
SMT considerations
|
||||
------------------
|
||||
|
||||
When Simultaneous Multi-Threading (SMT) is enabled, hypervisors can be
|
||||
vulnerable to cross-thread attacks. For complete protection against VMSCAPE
|
||||
attacks in SMT environments, STIBP should be enabled.
|
||||
|
||||
The kernel will issue a warning if SMT is enabled without adequate STIBP
|
||||
protection. Warning is not issued when:
|
||||
|
||||
- SMT is disabled
|
||||
- STIBP is enabled system-wide
|
||||
- Intel eIBRS is enabled (which implies STIBP protection)
|
||||
|
||||
System information and options
|
||||
------------------------------
|
||||
|
||||
The sysfs file showing VMSCAPE mitigation status is:
|
||||
|
||||
/sys/devices/system/cpu/vulnerabilities/vmscape
|
||||
|
||||
The possible values in this file are:
|
||||
|
||||
* 'Not affected':
|
||||
|
||||
The processor is not vulnerable to VMSCAPE attacks.
|
||||
|
||||
* 'Vulnerable':
|
||||
|
||||
The processor is vulnerable and no mitigation has been applied.
|
||||
|
||||
* 'Mitigation: IBPB before exit to userspace':
|
||||
|
||||
Conditional IBPB mitigation is enabled. The kernel tracks when a CPU has
|
||||
run a potentially malicious guest and issues an IBPB before the first
|
||||
exit to userspace after VM-exit.
|
||||
|
||||
* 'Mitigation: IBPB on VMEXIT':
|
||||
|
||||
IBPB is issued on every VM-exit. This occurs when other mitigations like
|
||||
RETBLEED or SRSO are already issuing IBPB on VM-exit.
|
||||
|
||||
Mitigation control on the kernel command line
|
||||
----------------------------------------------
|
||||
|
||||
The mitigation can be controlled via the ``vmscape=`` command line parameter:
|
||||
|
||||
* ``vmscape=off``:
|
||||
|
||||
Disable the VMSCAPE mitigation.
|
||||
|
||||
* ``vmscape=ibpb``:
|
||||
|
||||
Enable conditional IBPB mitigation (default when CONFIG_MITIGATION_VMSCAPE=y).
|
||||
|
||||
* ``vmscape=force``:
|
||||
|
||||
Force vulnerability detection and mitigation even on processors that are
|
||||
not known to be affected.
|
||||
@ -2874,6 +2874,7 @@
|
||||
srbds=off [X86,INTEL]
|
||||
ssbd=force-off [ARM64]
|
||||
tsx_async_abort=off [X86]
|
||||
vmscape=off [X86]
|
||||
|
||||
Exceptions:
|
||||
This does not have any effect on
|
||||
@ -6019,6 +6020,16 @@
|
||||
vmpoff= [KNL,S390] Perform z/VM CP command after power off.
|
||||
Format: <command>
|
||||
|
||||
vmscape= [X86] Controls mitigation for VMscape attacks.
|
||||
VMscape attacks can leak information from a userspace
|
||||
hypervisor to a guest via speculative side-channels.
|
||||
|
||||
off - disable the mitigation
|
||||
ibpb - use Indirect Branch Prediction Barrier
|
||||
(IBPB) mitigation (default)
|
||||
force - force vulnerability detection even on
|
||||
unaffected processors
|
||||
|
||||
vsyscall= [X86-64]
|
||||
Controls the behavior of vsyscalls (i.e. calls to
|
||||
fixed addresses of 0xffffffffff600x00 from legacy
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.82.1
|
||||
RHEL_RELEASE = 553.83.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -225,7 +225,7 @@ static inline int __pcilg_mio_inuser(
|
||||
[ioaddr_len] "+&d" (ioaddr_len.pair),
|
||||
[cc] "+d" (cc), [val] "=d" (val),
|
||||
[dst] "+a" (dst), [cnt] "+d" (cnt), [tmp] "=d" (tmp),
|
||||
[shift] "+d" (shift)
|
||||
[shift] "+a" (shift)
|
||||
:: "cc", "memory");
|
||||
|
||||
/* did we write everything to the user space buffer? */
|
||||
|
||||
@ -2626,6 +2626,14 @@ config MITIGATION_SPECTRE_BHI
|
||||
indirect branches.
|
||||
See <file:Documentation/admin-guide/hw-vuln/spectre.rst>
|
||||
|
||||
config MITIGATION_VMSCAPE
|
||||
bool "Mitigate VMSCAPE"
|
||||
depends on KVM
|
||||
default y
|
||||
help
|
||||
Enable mitigation for VMSCAPE attacks. VMSCAPE is a hardware security
|
||||
vulnerability on Intel and AMD CPUs that may allow a guest to do
|
||||
Spectre v2 style attacks on userspace hypervisor.
|
||||
endif
|
||||
|
||||
config ARCH_HAS_ADD_PAGES
|
||||
|
||||
@ -222,6 +222,13 @@ __visible inline void prepare_exit_to_usermode(struct pt_regs *regs)
|
||||
ti->status &= ~(TS_COMPAT|TS_I386_REGS_POKED);
|
||||
#endif
|
||||
|
||||
/* Avoid unnecessary reads of 'x86_ibpb_exit_to_user' */
|
||||
if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER) &&
|
||||
this_cpu_read(x86_ibpb_exit_to_user)) {
|
||||
indirect_branch_prediction_barrier();
|
||||
this_cpu_write(x86_ibpb_exit_to_user, false);
|
||||
}
|
||||
|
||||
user_enter_irqoff();
|
||||
|
||||
amd_clear_divider();
|
||||
|
||||
@ -445,6 +445,7 @@
|
||||
#define X86_FEATURE_BHI_CTRL (21*32+ 2) /* "" BHI_DIS_S HW control available */
|
||||
#define X86_FEATURE_CLEAR_BHB_HW (21*32+ 3) /* "" BHI_DIS_S HW control enabled */
|
||||
#define X86_FEATURE_CLEAR_BHB_LOOP_ON_VMEXIT (21*32+ 4) /* "" Clear branch history at vmexit using SW loop */
|
||||
#define X86_FEATURE_IBPB_EXIT_TO_USER (21*32+14) /* "" Use IBPB on exit-to-userspace, see VMSCAPE bug */
|
||||
|
||||
/* RHEL specific auxillary flags, word 22 */
|
||||
#define X86_FEATURE_IBRS_EXIT_SET (22*32+ 0) /* "" Set IBRS on kernel exit */
|
||||
@ -497,4 +498,5 @@
|
||||
#define X86_BUG_DIV0 X86_BUG(1*32 + 1) /* AMD DIV0 speculation bug */
|
||||
#define X86_BUG_RFDS X86_BUG(1*32 + 2) /* CPU is vulnerable to Register File Data Sampling */
|
||||
#define X86_BUG_BHI X86_BUG(1*32 + 3) /* CPU is affected by Branch History Injection */
|
||||
#define X86_BUG_VMSCAPE X86_BUG(1*32 +10) /* "vmscape" CPU is affected by VMSCAPE attacks from guests */
|
||||
#endif /* _ASM_X86_CPUFEATURES_H */
|
||||
|
||||
@ -371,6 +371,8 @@ void alternative_msr_write(unsigned int msr, u64 val, unsigned int feature)
|
||||
|
||||
extern u64 x86_pred_cmd;
|
||||
|
||||
DECLARE_PER_CPU(bool, x86_ibpb_exit_to_user);
|
||||
|
||||
static inline void indirect_branch_prediction_barrier(void)
|
||||
{
|
||||
alternative_msr_write(MSR_IA32_PRED_CMD, x86_pred_cmd, X86_FEATURE_USE_IBPB);
|
||||
|
||||
@ -51,6 +51,9 @@ static void __init mmio_select_mitigation(void);
|
||||
static void __init srbds_select_mitigation(void);
|
||||
static void __init srso_select_mitigation(void);
|
||||
static void __init gds_select_mitigation(void);
|
||||
static void __init vmscape_select_mitigation(void);
|
||||
static void __init vmscape_update_mitigation(void);
|
||||
static void __init vmscape_apply_mitigation(void);
|
||||
|
||||
/* The base value of the SPEC_CTRL MSR without task-specific bits set */
|
||||
u64 x86_spec_ctrl_base;
|
||||
@ -60,6 +63,14 @@ EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
|
||||
DEFINE_PER_CPU(u64, x86_spec_ctrl_current);
|
||||
EXPORT_SYMBOL_GPL(x86_spec_ctrl_current);
|
||||
|
||||
/*
|
||||
* Set when the CPU has run a potentially malicious guest. An IBPB will
|
||||
* be needed to before running userspace. That IBPB will flush the branch
|
||||
* predictor content.
|
||||
*/
|
||||
DEFINE_PER_CPU(bool, x86_ibpb_exit_to_user);
|
||||
EXPORT_PER_CPU_SYMBOL_GPL(x86_ibpb_exit_to_user);
|
||||
|
||||
u64 x86_pred_cmd __ro_after_init = PRED_CMD_IBPB;
|
||||
EXPORT_SYMBOL_GPL(x86_pred_cmd);
|
||||
|
||||
@ -185,6 +196,10 @@ void __init check_bugs(void)
|
||||
srso_select_mitigation();
|
||||
gds_select_mitigation();
|
||||
|
||||
vmscape_select_mitigation();
|
||||
vmscape_update_mitigation();
|
||||
vmscape_apply_mitigation();
|
||||
|
||||
arch_smt_update();
|
||||
|
||||
#ifdef CONFIG_X86_32
|
||||
@ -1939,66 +1954,6 @@ static void update_mds_branch_idle(void)
|
||||
}
|
||||
}
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
|
||||
void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
update_stibp_strict();
|
||||
break;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
update_indir_branch_cond();
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mds_mitigation) {
|
||||
case MDS_MITIGATION_FULL:
|
||||
case MDS_MITIGATION_VMWERV:
|
||||
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
|
||||
pr_warn_once(MDS_MSG_SMT);
|
||||
update_mds_branch_idle();
|
||||
break;
|
||||
case MDS_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (taa_mitigation) {
|
||||
case TAA_MITIGATION_VERW:
|
||||
case TAA_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(TAA_MSG_SMT);
|
||||
break;
|
||||
case TAA_MITIGATION_TSX_DISABLED:
|
||||
case TAA_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_DEBUG_FS
|
||||
/*
|
||||
* Provide a debugfs file to dump SPEC_CTRL MSRs of all the CPUs
|
||||
@ -2704,9 +2659,163 @@ out:
|
||||
pr_info("%s\n", srso_strings[srso_mitigation]);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) "VMSCAPE: " fmt
|
||||
|
||||
enum vmscape_mitigations {
|
||||
VMSCAPE_MITIGATION_NONE,
|
||||
VMSCAPE_MITIGATION_AUTO,
|
||||
VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER,
|
||||
VMSCAPE_MITIGATION_IBPB_ON_VMEXIT,
|
||||
};
|
||||
|
||||
static const char * const vmscape_strings[] = {
|
||||
[VMSCAPE_MITIGATION_NONE] = "Vulnerable",
|
||||
/* [VMSCAPE_MITIGATION_AUTO] */
|
||||
[VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER] = "Mitigation: IBPB before exit to userspace",
|
||||
[VMSCAPE_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT",
|
||||
};
|
||||
|
||||
static enum vmscape_mitigations vmscape_mitigation __ro_after_init =
|
||||
IS_ENABLED(CONFIG_MITIGATION_VMSCAPE) ? VMSCAPE_MITIGATION_AUTO : VMSCAPE_MITIGATION_NONE;
|
||||
|
||||
static int __init vmscape_parse_cmdline(char *str)
|
||||
{
|
||||
if (!str)
|
||||
return -EINVAL;
|
||||
|
||||
if (!strcmp(str, "off")) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
|
||||
} else if (!strcmp(str, "ibpb")) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
|
||||
} else if (!strcmp(str, "force")) {
|
||||
setup_force_cpu_bug(X86_BUG_VMSCAPE);
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_AUTO;
|
||||
} else {
|
||||
pr_err("Ignoring unknown vmscape=%s option.\n", str);
|
||||
}
|
||||
|
||||
return 0;
|
||||
}
|
||||
early_param("vmscape", vmscape_parse_cmdline);
|
||||
|
||||
static void __init vmscape_select_mitigation(void)
|
||||
{
|
||||
if (cpu_mitigations_off() ||
|
||||
!boot_cpu_has_bug(X86_BUG_VMSCAPE) ||
|
||||
!boot_cpu_has(X86_FEATURE_IBPB)) {
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_NONE;
|
||||
return;
|
||||
}
|
||||
|
||||
if (vmscape_mitigation == VMSCAPE_MITIGATION_AUTO)
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER;
|
||||
}
|
||||
|
||||
static void __init vmscape_update_mitigation(void)
|
||||
{
|
||||
if (!boot_cpu_has_bug(X86_BUG_VMSCAPE))
|
||||
return;
|
||||
|
||||
if (retbleed_mitigation == RETBLEED_MITIGATION_IBPB ||
|
||||
srso_mitigation == SRSO_MITIGATION_IBPB_ON_VMEXIT)
|
||||
vmscape_mitigation = VMSCAPE_MITIGATION_IBPB_ON_VMEXIT;
|
||||
|
||||
pr_info("%s\n", vmscape_strings[vmscape_mitigation]);
|
||||
}
|
||||
|
||||
static void __init vmscape_apply_mitigation(void)
|
||||
{
|
||||
if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
|
||||
setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
|
||||
}
|
||||
|
||||
#undef pr_fmt
|
||||
#define pr_fmt(fmt) fmt
|
||||
|
||||
#define MDS_MSG_SMT "MDS CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/mds.html for more details.\n"
|
||||
#define TAA_MSG_SMT "TAA CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/tsx_async_abort.html for more details.\n"
|
||||
#define MMIO_MSG_SMT "MMIO Stale Data CPU bug present and SMT on, data leak possible. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/processor_mmio_stale_data.html for more details.\n"
|
||||
#define VMSCAPE_MSG_SMT "VMSCAPE: SMT on, STIBP is required for full protection. See https://www.kernel.org/doc/html/latest/admin-guide/hw-vuln/vmscape.html for more details.\n"
|
||||
|
||||
void cpu_bugs_smt_update(void)
|
||||
{
|
||||
mutex_lock(&spec_ctrl_mutex);
|
||||
|
||||
if (sched_smt_active() && unprivileged_ebpf_enabled() &&
|
||||
spectre_v2_enabled == SPECTRE_V2_EIBRS_LFENCE)
|
||||
pr_warn_once(SPECTRE_V2_EIBRS_LFENCE_EBPF_SMT_MSG);
|
||||
|
||||
switch (spectre_v2_user_stibp) {
|
||||
case SPECTRE_V2_USER_NONE:
|
||||
break;
|
||||
case SPECTRE_V2_USER_STRICT:
|
||||
case SPECTRE_V2_USER_STRICT_PREFERRED:
|
||||
update_stibp_strict();
|
||||
break;
|
||||
case SPECTRE_V2_USER_PRCTL:
|
||||
case SPECTRE_V2_USER_SECCOMP:
|
||||
update_indir_branch_cond();
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mds_mitigation) {
|
||||
case MDS_MITIGATION_FULL:
|
||||
case MDS_MITIGATION_VMWERV:
|
||||
if (sched_smt_active() && !boot_cpu_has(X86_BUG_MSBDS_ONLY))
|
||||
pr_warn_once(MDS_MSG_SMT);
|
||||
update_mds_branch_idle();
|
||||
break;
|
||||
case MDS_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (taa_mitigation) {
|
||||
case TAA_MITIGATION_VERW:
|
||||
case TAA_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(TAA_MSG_SMT);
|
||||
break;
|
||||
case TAA_MITIGATION_TSX_DISABLED:
|
||||
case TAA_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (mmio_mitigation) {
|
||||
case MMIO_MITIGATION_VERW:
|
||||
case MMIO_MITIGATION_UCODE_NEEDED:
|
||||
if (sched_smt_active())
|
||||
pr_warn_once(MMIO_MSG_SMT);
|
||||
break;
|
||||
case MMIO_MITIGATION_OFF:
|
||||
break;
|
||||
}
|
||||
|
||||
switch (vmscape_mitigation) {
|
||||
case VMSCAPE_MITIGATION_NONE:
|
||||
case VMSCAPE_MITIGATION_AUTO:
|
||||
break;
|
||||
case VMSCAPE_MITIGATION_IBPB_ON_VMEXIT:
|
||||
case VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER:
|
||||
/*
|
||||
* Hypervisors can be attacked across-threads, warn for SMT when
|
||||
* STIBP is not already enabled system-wide.
|
||||
*
|
||||
* Intel eIBRS (!AUTOIBRS) implies STIBP on.
|
||||
*/
|
||||
if (!sched_smt_active() ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT ||
|
||||
spectre_v2_user_stibp == SPECTRE_V2_USER_STRICT_PREFERRED ||
|
||||
(spectre_v2_in_eibrs_mode(spectre_v2_enabled) &&
|
||||
!boot_cpu_has(X86_FEATURE_AUTOIBRS)))
|
||||
break;
|
||||
pr_warn_once(VMSCAPE_MSG_SMT);
|
||||
break;
|
||||
}
|
||||
|
||||
mutex_unlock(&spec_ctrl_mutex);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_SYSFS
|
||||
|
||||
#define L1TF_DEFAULT_MSG "Mitigation: PTE Inversion"
|
||||
@ -2940,6 +3049,11 @@ static ssize_t gds_show_state(char *buf)
|
||||
return sysfs_emit(buf, "%s\n", gds_strings[gds_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t vmscape_show_state(char *buf)
|
||||
{
|
||||
return sysfs_emit(buf, "%s\n", vmscape_strings[vmscape_mitigation]);
|
||||
}
|
||||
|
||||
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
|
||||
char *buf, unsigned int bug)
|
||||
{
|
||||
@ -2998,6 +3112,9 @@ static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr
|
||||
case X86_BUG_RFDS:
|
||||
return rfds_show_state(buf);
|
||||
|
||||
case X86_BUG_VMSCAPE:
|
||||
return vmscape_show_state(buf);
|
||||
|
||||
default:
|
||||
break;
|
||||
}
|
||||
@ -3077,4 +3194,9 @@ ssize_t cpu_show_reg_file_data_sampling(struct device *dev, struct device_attrib
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_RFDS);
|
||||
}
|
||||
|
||||
ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf)
|
||||
{
|
||||
return cpu_show_common(dev, attr, buf, X86_BUG_VMSCAPE);
|
||||
}
|
||||
#endif
|
||||
|
||||
@ -1164,39 +1164,48 @@ static const __initconst struct x86_cpu_id_v2 cpu_vuln_whitelist[] = {
|
||||
#define GDS BIT(6)
|
||||
/* CPU is affected by Register File Data Sampling */
|
||||
#define RFDS BIT(7)
|
||||
/* CPU is affected by VMSCAPE */
|
||||
#define VMSCAPE BIT(11)
|
||||
|
||||
static const struct x86_cpu_id_v2 cpu_vuln_blacklist[] __initconst = {
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS),
|
||||
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(SANDYBRIDGE_X, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(SANDYBRIDGE, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE_X, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(IVYBRIDGE, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_L, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_G, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(HASWELL_X, X86_STEPPING_ANY, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_D, X86_STEPPING_ANY, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_G, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL_X, X86_STEPPING_ANY, MMIO | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(BROADWELL, X86_STEPPING_ANY, SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_X, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(SKYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE_L, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(KABYLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS | SRBDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(CANNONLAKE_L, X86_STEPPING_ANY, RETBLEED | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_D, X86_STEPPING_ANY, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ICELAKE_X, X86_STEPPING_ANY, MMIO | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPINGS(0x0, 0x0), MMIO | RETBLEED | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(COMETLAKE_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED | GDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE_L, X86_STEPPING_ANY, GDS),
|
||||
VULNBL_INTEL_STEPPINGS(TIGERLAKE, X86_STEPPING_ANY, GDS),
|
||||
VULNBL_INTEL_STEPPINGS(LAKEFIELD, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RETBLEED),
|
||||
VULNBL_INTEL_STEPPINGS(ROCKETLAKE, X86_STEPPING_ANY, MMIO | RETBLEED | GDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(ALDERLAKE_L, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_P, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(RAPTORLAKE_S, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(METEORLAKE_L, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(SAPPHIRERAPIDS_X,X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(GRANITERAPIDS_X, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(EMERALDRAPIDS_X, X86_STEPPING_ANY, VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_GRACEMONT, X86_STEPPING_ANY, RFDS | VMSCAPE),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_D, X86_STEPPING_ANY, MMIO | RFDS),
|
||||
VULNBL_INTEL_STEPPINGS(ATOM_TREMONT_L, X86_STEPPING_ANY, MMIO | MMIO_SBDS | RFDS),
|
||||
@ -1206,9 +1215,9 @@ static const struct x86_cpu_id_v2 cpu_vuln_blacklist[] __initconst = {
|
||||
|
||||
VULNBL_AMD(0x15, RETBLEED),
|
||||
VULNBL_AMD(0x16, RETBLEED),
|
||||
VULNBL_AMD(0x17, RETBLEED | SRSO),
|
||||
VULNBL_HYGON(0x18, RETBLEED),
|
||||
VULNBL_AMD(0x19, SRSO),
|
||||
VULNBL_AMD(0x17, RETBLEED | SRSO | VMSCAPE),
|
||||
VULNBL_HYGON(0x18, RETBLEED | VMSCAPE),
|
||||
VULNBL_AMD(0x19, SRSO | VMSCAPE),
|
||||
{}
|
||||
};
|
||||
|
||||
@ -1371,6 +1380,14 @@ static void __init cpu_set_bug_bits(struct cpuinfo_x86 *c)
|
||||
boot_cpu_has(X86_FEATURE_HYPERVISOR)))
|
||||
setup_force_cpu_bug(X86_BUG_BHI);
|
||||
|
||||
/*
|
||||
* Set the bug only on bare-metal. A nested hypervisor should already be
|
||||
* deploying IBPB to isolate itself from nested guests.
|
||||
*/
|
||||
if (cpu_matches(cpu_vuln_blacklist, VMSCAPE) &&
|
||||
!boot_cpu_has(X86_FEATURE_HYPERVISOR))
|
||||
setup_force_cpu_bug(X86_BUG_VMSCAPE);
|
||||
|
||||
if (cpu_matches(cpu_vuln_whitelist, NO_MELTDOWN))
|
||||
return;
|
||||
|
||||
|
||||
@ -10037,6 +10037,15 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
|
||||
if (vcpu->arch.guest_fpu.xfd_err)
|
||||
wrmsrl(MSR_IA32_XFD_ERR, 0);
|
||||
|
||||
/*
|
||||
* Mark this CPU as needing a branch predictor flush before running
|
||||
* userspace. Must be done before enabling preemption to ensure it gets
|
||||
* set for the CPU that actually ran the guest, and not the CPU that it
|
||||
* may migrate to.
|
||||
*/
|
||||
if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
|
||||
this_cpu_write(x86_ibpb_exit_to_user, true);
|
||||
|
||||
/*
|
||||
* Consume any pending interrupts, including the possible source of
|
||||
* VM-Exit on SVM and any ticks that occur between VM-Exit and now.
|
||||
|
||||
@ -756,6 +756,7 @@ CONFIG_SLS=y
|
||||
# CONFIG_GDS_FORCE_MITIGATION is not set
|
||||
CONFIG_MITIGATION_RFDS=y
|
||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||
CONFIG_MITIGATION_VMSCAPE=y
|
||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
|
||||
|
||||
@ -757,6 +757,7 @@ CONFIG_SLS=y
|
||||
# CONFIG_GDS_FORCE_MITIGATION is not set
|
||||
CONFIG_MITIGATION_RFDS=y
|
||||
CONFIG_MITIGATION_SPECTRE_BHI=y
|
||||
CONFIG_MITIGATION_VMSCAPE=y
|
||||
CONFIG_ARCH_HAS_ADD_PAGES=y
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTPLUG=y
|
||||
CONFIG_ARCH_ENABLE_MEMORY_HOTREMOVE=y
|
||||
|
||||
@ -551,6 +551,7 @@ CPU_SHOW_VULN_FALLBACK(retbleed);
|
||||
CPU_SHOW_VULN_FALLBACK(spec_rstack_overflow);
|
||||
CPU_SHOW_VULN_FALLBACK(gds);
|
||||
CPU_SHOW_VULN_FALLBACK(reg_file_data_sampling);
|
||||
CPU_SHOW_VULN_FALLBACK(vmscape);
|
||||
|
||||
static DEVICE_ATTR(meltdown, 0444, cpu_show_meltdown, NULL);
|
||||
static DEVICE_ATTR(spectre_v1, 0444, cpu_show_spectre_v1, NULL);
|
||||
@ -566,6 +567,7 @@ static DEVICE_ATTR(retbleed, 0444, cpu_show_retbleed, NULL);
|
||||
static DEVICE_ATTR(spec_rstack_overflow, 0444, cpu_show_spec_rstack_overflow, NULL);
|
||||
static DEVICE_ATTR(gather_data_sampling, 0444, cpu_show_gds, NULL);
|
||||
static DEVICE_ATTR(reg_file_data_sampling, 0444, cpu_show_reg_file_data_sampling, NULL);
|
||||
static DEVICE_ATTR(vmscape, 0444, cpu_show_vmscape, NULL);
|
||||
|
||||
static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_meltdown.attr,
|
||||
@ -582,6 +584,7 @@ static struct attribute *cpu_root_vulnerabilities_attrs[] = {
|
||||
&dev_attr_spec_rstack_overflow.attr,
|
||||
&dev_attr_gather_data_sampling.attr,
|
||||
&dev_attr_reg_file_data_sampling.attr,
|
||||
&dev_attr_vmscape.attr,
|
||||
NULL
|
||||
};
|
||||
|
||||
|
||||
@ -1709,6 +1709,7 @@ I40E_CHECK_CMD_LENGTH(i40e_aq_set_phy_config);
|
||||
struct i40e_aq_set_mac_config {
|
||||
__le16 max_frame_size;
|
||||
u8 params;
|
||||
#define I40E_AQ_SET_MAC_CONFIG_CRC_EN BIT(2)
|
||||
u8 tx_timer_priority; /* bitmap */
|
||||
__le16 tx_timer_value;
|
||||
__le16 fc_refresh_threshold;
|
||||
|
||||
@ -1503,6 +1503,40 @@ int i40e_set_fc(struct i40e_hw *hw, u8 *aq_failures,
|
||||
return status;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_set_mac_config - Configure MAC settings
|
||||
* @hw: pointer to the hw struct
|
||||
* @max_frame_size: Maximum Frame Size to be supported by the port
|
||||
* @cmd_details: pointer to command details structure or NULL
|
||||
*
|
||||
* Set MAC configuration (0x0603). Note that max_frame_size must be greater
|
||||
* than zero.
|
||||
*
|
||||
* Return: 0 on success, or a negative error code on failure.
|
||||
*/
|
||||
int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
|
||||
struct i40e_asq_cmd_details *cmd_details)
|
||||
{
|
||||
struct i40e_aq_set_mac_config *cmd;
|
||||
struct i40e_aq_desc desc;
|
||||
|
||||
cmd = (struct i40e_aq_set_mac_config *)&desc.params.raw;
|
||||
|
||||
if (max_frame_size == 0)
|
||||
return -EINVAL;
|
||||
|
||||
i40e_fill_default_direct_cmd_desc(&desc, i40e_aqc_opc_set_mac_config);
|
||||
|
||||
cmd->max_frame_size = cpu_to_le16(max_frame_size);
|
||||
cmd->params = I40E_AQ_SET_MAC_CONFIG_CRC_EN;
|
||||
|
||||
#define I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD 0x7FFF
|
||||
cmd->fc_refresh_threshold =
|
||||
cpu_to_le16(I40E_AQ_SET_MAC_CONFIG_FC_DEFAULT_THRESHOLD);
|
||||
|
||||
return i40e_asq_send_command(hw, &desc, NULL, 0, cmd_details);
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_aq_clear_pxe_mode
|
||||
* @hw: pointer to the hw struct
|
||||
|
||||
@ -16194,13 +16194,17 @@ static int i40e_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
|
||||
ERR_PTR(err),
|
||||
i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
|
||||
|
||||
/* make sure the MFS hasn't been set lower than the default */
|
||||
#define MAX_FRAME_SIZE_DEFAULT 0x2600
|
||||
val = (rd32(&pf->hw, I40E_PRTGL_SAH) &
|
||||
I40E_PRTGL_SAH_MFS_MASK) >> I40E_PRTGL_SAH_MFS_SHIFT;
|
||||
if (val < MAX_FRAME_SIZE_DEFAULT)
|
||||
dev_warn(&pdev->dev, "MFS for port %x has been set below the default: %x\n",
|
||||
i, val);
|
||||
|
||||
err = i40e_aq_set_mac_config(hw, MAX_FRAME_SIZE_DEFAULT, NULL);
|
||||
if (err)
|
||||
dev_warn(&pdev->dev, "set mac config ret = %pe last_status = %s\n",
|
||||
ERR_PTR(err), i40e_aq_str(&pf->hw, pf->hw.aq.asq_last_status));
|
||||
|
||||
/* Make sure the MFS is set to the expected value */
|
||||
val = rd32(hw, I40E_PRTGL_SAH);
|
||||
FIELD_MODIFY(I40E_PRTGL_SAH_MFS_MASK, &val, MAX_FRAME_SIZE_DEFAULT);
|
||||
wr32(hw, I40E_PRTGL_SAH, val);
|
||||
|
||||
/* Add a filter to drop all Flow control frames from any VSI from being
|
||||
* transmitted. By doing so we stop a malicious VF from sending out
|
||||
|
||||
@ -109,6 +109,8 @@ int i40e_aq_set_mac_loopback(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
int i40e_aq_set_phy_int_mask(struct i40e_hw *hw, u16 mask,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
int i40e_aq_set_mac_config(struct i40e_hw *hw, u16 max_frame_size,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
int i40e_aq_clear_pxe_mode(struct i40e_hw *hw,
|
||||
struct i40e_asq_cmd_details *cmd_details);
|
||||
int i40e_aq_set_link_restart_an(struct i40e_hw *hw,
|
||||
|
||||
@ -1903,7 +1903,7 @@ void qed_get_vport_stats(struct qed_dev *cdev, struct qed_eth_stats *stats)
|
||||
{
|
||||
u32 i;
|
||||
|
||||
if (!cdev) {
|
||||
if (!cdev || cdev->recov_in_prog) {
|
||||
memset(stats, 0, sizeof(*stats));
|
||||
return;
|
||||
}
|
||||
|
||||
@ -280,6 +280,10 @@ struct qede_dev {
|
||||
#define QEDE_ERR_WARN 3
|
||||
|
||||
struct qede_dump_info dump_info;
|
||||
struct delayed_work periodic_task;
|
||||
unsigned long stats_coal_ticks;
|
||||
u32 stats_coal_usecs;
|
||||
spinlock_t stats_lock; /* lock for vport stats access */
|
||||
};
|
||||
|
||||
enum QEDE_STATE {
|
||||
|
||||
@ -430,6 +430,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
|
||||
}
|
||||
}
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
for (i = 0; i < QEDE_NUM_STATS; i++) {
|
||||
if (qede_is_irrelevant_stat(edev, i))
|
||||
continue;
|
||||
@ -439,6 +441,8 @@ static void qede_get_ethtool_stats(struct net_device *dev,
|
||||
buf++;
|
||||
}
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
|
||||
__qede_unlock(edev);
|
||||
}
|
||||
|
||||
@ -830,6 +834,7 @@ out:
|
||||
|
||||
coal->rx_coalesce_usecs = rx_coal;
|
||||
coal->tx_coalesce_usecs = tx_coal;
|
||||
coal->stats_block_coalesce_usecs = edev->stats_coal_usecs;
|
||||
|
||||
return rc;
|
||||
}
|
||||
@ -844,6 +849,19 @@ int qede_set_coalesce(struct net_device *dev,
|
||||
int i, rc = 0;
|
||||
u16 rxc, txc;
|
||||
|
||||
if (edev->stats_coal_usecs != coal->stats_block_coalesce_usecs) {
|
||||
edev->stats_coal_usecs = coal->stats_block_coalesce_usecs;
|
||||
if (edev->stats_coal_usecs) {
|
||||
edev->stats_coal_ticks = usecs_to_jiffies(edev->stats_coal_usecs);
|
||||
schedule_delayed_work(&edev->periodic_task, 0);
|
||||
|
||||
DP_INFO(edev, "Configured stats coal ticks=%lu jiffies\n",
|
||||
edev->stats_coal_ticks);
|
||||
} else {
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
}
|
||||
}
|
||||
|
||||
if (!netif_running(dev)) {
|
||||
DP_INFO(edev, "Interface is down\n");
|
||||
return -EINVAL;
|
||||
@ -2254,7 +2272,8 @@ out:
|
||||
}
|
||||
|
||||
static const struct ethtool_ops qede_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
|
||||
.get_link_ksettings = qede_get_link_ksettings,
|
||||
.set_link_ksettings = qede_set_link_ksettings,
|
||||
.get_drvinfo = qede_get_drvinfo,
|
||||
@ -2305,7 +2324,8 @@ static const struct ethtool_ops qede_ethtool_ops = {
|
||||
};
|
||||
|
||||
static const struct ethtool_ops qede_vf_ethtool_ops = {
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS,
|
||||
.supported_coalesce_params = ETHTOOL_COALESCE_USECS |
|
||||
ETHTOOL_COALESCE_STATS_BLOCK_USECS,
|
||||
.get_link_ksettings = qede_get_link_ksettings,
|
||||
.get_drvinfo = qede_get_drvinfo,
|
||||
.get_msglevel = qede_get_msglevel,
|
||||
|
||||
@ -312,6 +312,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
|
||||
|
||||
edev->ops->get_vport_stats(edev->cdev, &stats);
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
p_common->no_buff_discards = stats.common.no_buff_discards;
|
||||
p_common->packet_too_big_discard = stats.common.packet_too_big_discard;
|
||||
p_common->ttl0_discard = stats.common.ttl0_discard;
|
||||
@ -409,6 +411,8 @@ void qede_fill_by_demand_stats(struct qede_dev *edev)
|
||||
p_ah->tx_1519_to_max_byte_packets =
|
||||
stats.ah.tx_1519_to_max_byte_packets;
|
||||
}
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
}
|
||||
|
||||
static void qede_get_stats64(struct net_device *dev,
|
||||
@ -417,9 +421,10 @@ static void qede_get_stats64(struct net_device *dev,
|
||||
struct qede_dev *edev = netdev_priv(dev);
|
||||
struct qede_stats_common *p_common;
|
||||
|
||||
qede_fill_by_demand_stats(edev);
|
||||
p_common = &edev->stats.common;
|
||||
|
||||
spin_lock(&edev->stats_lock);
|
||||
|
||||
stats->rx_packets = p_common->rx_ucast_pkts + p_common->rx_mcast_pkts +
|
||||
p_common->rx_bcast_pkts;
|
||||
stats->tx_packets = p_common->tx_ucast_pkts + p_common->tx_mcast_pkts +
|
||||
@ -439,6 +444,8 @@ static void qede_get_stats64(struct net_device *dev,
|
||||
stats->collisions = edev->stats.bb.tx_total_collisions;
|
||||
stats->rx_crc_errors = p_common->rx_crc_errors;
|
||||
stats->rx_frame_errors = p_common->rx_align_errors;
|
||||
|
||||
spin_unlock(&edev->stats_lock);
|
||||
}
|
||||
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
@ -1071,6 +1078,23 @@ static void qede_unlock(struct qede_dev *edev)
|
||||
rtnl_unlock();
|
||||
}
|
||||
|
||||
static void qede_periodic_task(struct work_struct *work)
|
||||
{
|
||||
struct qede_dev *edev = container_of(work, struct qede_dev,
|
||||
periodic_task.work);
|
||||
|
||||
qede_fill_by_demand_stats(edev);
|
||||
schedule_delayed_work(&edev->periodic_task, edev->stats_coal_ticks);
|
||||
}
|
||||
|
||||
static void qede_init_periodic_task(struct qede_dev *edev)
|
||||
{
|
||||
INIT_DELAYED_WORK(&edev->periodic_task, qede_periodic_task);
|
||||
spin_lock_init(&edev->stats_lock);
|
||||
edev->stats_coal_usecs = USEC_PER_SEC;
|
||||
edev->stats_coal_ticks = usecs_to_jiffies(USEC_PER_SEC);
|
||||
}
|
||||
|
||||
static void qede_sp_task(struct work_struct *work)
|
||||
{
|
||||
struct qede_dev *edev = container_of(work, struct qede_dev,
|
||||
@ -1090,6 +1114,7 @@ static void qede_sp_task(struct work_struct *work)
|
||||
*/
|
||||
|
||||
if (test_and_clear_bit(QEDE_SP_RECOVERY, &edev->sp_flags)) {
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
#ifdef CONFIG_QED_SRIOV
|
||||
/* SRIOV must be disabled outside the lock to avoid a deadlock.
|
||||
* The recovery of the active VFs is currently not supported.
|
||||
@ -1284,6 +1309,7 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
|
||||
*/
|
||||
INIT_DELAYED_WORK(&edev->sp_task, qede_sp_task);
|
||||
mutex_init(&edev->qede_lock);
|
||||
qede_init_periodic_task(edev);
|
||||
|
||||
rc = register_netdev(edev->ndev);
|
||||
if (rc) {
|
||||
@ -1308,6 +1334,11 @@ static int __qede_probe(struct pci_dev *pdev, u32 dp_module, u8 dp_level,
|
||||
edev->rx_copybreak = QEDE_RX_HDR_SIZE;
|
||||
|
||||
qede_log_probe(edev);
|
||||
|
||||
/* retain user config (for example - after recovery) */
|
||||
if (edev->stats_coal_usecs)
|
||||
schedule_delayed_work(&edev->periodic_task, 0);
|
||||
|
||||
return 0;
|
||||
|
||||
err4:
|
||||
@ -1376,6 +1407,7 @@ static void __qede_remove(struct pci_dev *pdev, enum qede_remove_mode mode)
|
||||
unregister_netdev(ndev);
|
||||
|
||||
cancel_delayed_work_sync(&edev->sp_task);
|
||||
cancel_delayed_work_sync(&edev->periodic_task);
|
||||
|
||||
edev->ops->common->set_power_state(cdev, PCI_D0);
|
||||
|
||||
|
||||
@ -168,8 +168,6 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
||||
|
||||
inode->rh_reserved2 = 0;
|
||||
|
||||
if (security_inode_alloc(inode))
|
||||
goto out;
|
||||
spin_lock_init(&inode->i_lock);
|
||||
lockdep_set_class(&inode->i_lock, &sb->s_type->i_lock_key);
|
||||
|
||||
@ -200,11 +198,12 @@ int inode_init_always(struct super_block *sb, struct inode *inode)
|
||||
inode->i_fsnotify_mask = 0;
|
||||
#endif
|
||||
inode->i_flctx = NULL;
|
||||
|
||||
if (unlikely(security_inode_alloc(inode)))
|
||||
return -ENOMEM;
|
||||
this_cpu_inc(nr_inodes);
|
||||
|
||||
return 0;
|
||||
out:
|
||||
return -ENOMEM;
|
||||
}
|
||||
EXPORT_SYMBOL(inode_init_always);
|
||||
|
||||
|
||||
@ -41,7 +41,9 @@ struct fanotify_perm_event {
|
||||
struct fanotify_event fae;
|
||||
u32 response; /* userspace answer to the event */
|
||||
unsigned short state; /* state of the event */
|
||||
unsigned short watchdog_cnt; /* already scanned by watchdog? */
|
||||
int fd; /* fd we passed to userspace for this event */
|
||||
pid_t recv_pid; /* pid of task receiving the event */
|
||||
union {
|
||||
struct fanotify_response_info_header hdr;
|
||||
struct fanotify_response_info_audit_rule audit_rule;
|
||||
|
||||
@ -28,6 +28,117 @@
|
||||
#define FANOTIFY_DEFAULT_MAX_MARKS 8192
|
||||
#define FANOTIFY_DEFAULT_MAX_LISTENERS 128
|
||||
|
||||
static int perm_group_timeout __read_mostly;
|
||||
|
||||
#ifdef CONFIG_SYSCTL
|
||||
|
||||
#include <linux/sysctl.h>
|
||||
|
||||
static struct ctl_table fanotify_table[] = {
|
||||
{
|
||||
.procname = "watchdog_timeout",
|
||||
.data = &perm_group_timeout,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_dointvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
},
|
||||
{ }
|
||||
};
|
||||
|
||||
static void __init fanotify_sysctls_init(void)
|
||||
{
|
||||
register_sysctl("fs/fanotify", fanotify_table);
|
||||
}
|
||||
#else
|
||||
#define fanotify_sysctls_init() do { } while (0)
|
||||
#endif /* CONFIG_SYSCTL */
|
||||
|
||||
static LIST_HEAD(perm_group_list);
|
||||
static DEFINE_SPINLOCK(perm_group_lock);
|
||||
static void perm_group_watchdog(struct work_struct *work);
|
||||
static DECLARE_DELAYED_WORK(perm_group_work, perm_group_watchdog);
|
||||
|
||||
static void perm_group_watchdog_schedule(void)
|
||||
{
|
||||
schedule_delayed_work(&perm_group_work, secs_to_jiffies(perm_group_timeout));
|
||||
}
|
||||
|
||||
static void perm_group_watchdog(struct work_struct *work)
|
||||
{
|
||||
struct fsnotify_group *group;
|
||||
struct fanotify_perm_event *event;
|
||||
struct task_struct *task;
|
||||
pid_t failed_pid = 0;
|
||||
|
||||
guard(spinlock)(&perm_group_lock);
|
||||
if (list_empty(&perm_group_list))
|
||||
return;
|
||||
|
||||
list_for_each_entry(group, &perm_group_list,
|
||||
fanotify_data.perm_grp_list) {
|
||||
/*
|
||||
* Ok to test without lock, racing with an addition is
|
||||
* fine, will deal with it next round
|
||||
*/
|
||||
if (list_empty(&group->fanotify_data.access_list))
|
||||
continue;
|
||||
|
||||
spin_lock(&group->notification_lock);
|
||||
list_for_each_entry(event, &group->fanotify_data.access_list,
|
||||
fae.fse.list) {
|
||||
if (likely(event->watchdog_cnt == 0)) {
|
||||
event->watchdog_cnt = 1;
|
||||
} else if (event->watchdog_cnt == 1) {
|
||||
/* Report on event only once */
|
||||
event->watchdog_cnt = 2;
|
||||
|
||||
/* Do not report same pid repeatedly */
|
||||
if (event->recv_pid == failed_pid)
|
||||
continue;
|
||||
|
||||
failed_pid = event->recv_pid;
|
||||
rcu_read_lock();
|
||||
task = find_task_by_pid_ns(event->recv_pid,
|
||||
&init_pid_ns);
|
||||
pr_warn_ratelimited(
|
||||
"PID %u (%s) failed to respond to fanotify queue for more than %d seconds\n",
|
||||
event->recv_pid,
|
||||
task ? task->comm : NULL,
|
||||
perm_group_timeout);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
}
|
||||
spin_unlock(&group->notification_lock);
|
||||
}
|
||||
perm_group_watchdog_schedule();
|
||||
}
|
||||
|
||||
static void fanotify_perm_watchdog_group_remove(struct fsnotify_group *group)
|
||||
{
|
||||
if (!list_empty(&group->fanotify_data.perm_grp_list)) {
|
||||
/* Perm event watchdog can no longer scan this group. */
|
||||
spin_lock(&perm_group_lock);
|
||||
list_del_init(&group->fanotify_data.perm_grp_list);
|
||||
spin_unlock(&perm_group_lock);
|
||||
}
|
||||
}
|
||||
|
||||
static void fanotify_perm_watchdog_group_add(struct fsnotify_group *group)
|
||||
{
|
||||
if (!perm_group_timeout)
|
||||
return;
|
||||
|
||||
spin_lock(&perm_group_lock);
|
||||
if (list_empty(&group->fanotify_data.perm_grp_list)) {
|
||||
/* Add to perm_group_list for monitoring by watchdog. */
|
||||
if (list_empty(&perm_group_list))
|
||||
perm_group_watchdog_schedule();
|
||||
list_add_tail(&group->fanotify_data.perm_grp_list, &perm_group_list);
|
||||
}
|
||||
spin_unlock(&perm_group_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
* All flags that may be specified in parameter event_f_flags of fanotify_init.
|
||||
*
|
||||
@ -375,6 +486,7 @@ static ssize_t fanotify_read(struct file *file, char __user *buf,
|
||||
spin_lock(&group->notification_lock);
|
||||
list_add_tail(&kevent->list,
|
||||
&group->fanotify_data.access_list);
|
||||
FANOTIFY_PE(kevent)->recv_pid = current->pid;
|
||||
spin_unlock(&group->notification_lock);
|
||||
}
|
||||
}
|
||||
@ -435,6 +547,8 @@ static int fanotify_release(struct inode *ignored, struct file *file)
|
||||
*/
|
||||
fsnotify_group_stop_queueing(group);
|
||||
|
||||
fanotify_perm_watchdog_group_remove(group);
|
||||
|
||||
/*
|
||||
* Process all permission events on access_list and notification queue
|
||||
* and simulate reply from userspace.
|
||||
@ -698,6 +812,10 @@ static int fanotify_add_mark(struct fsnotify_group *group,
|
||||
mutex_unlock(&group->mark_mutex);
|
||||
|
||||
fsnotify_put_mark(fsn_mark);
|
||||
|
||||
if (mask & FANOTIFY_PERM_EVENTS)
|
||||
fanotify_perm_watchdog_group_add(group);
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -806,6 +924,7 @@ SYSCALL_DEFINE2(fanotify_init, unsigned int, flags, unsigned int, event_f_flags)
|
||||
group->fanotify_data.f_flags = event_f_flags;
|
||||
init_waitqueue_head(&group->fanotify_data.access_waitq);
|
||||
INIT_LIST_HEAD(&group->fanotify_data.access_list);
|
||||
INIT_LIST_HEAD(&group->fanotify_data.perm_grp_list);
|
||||
switch (flags & FANOTIFY_CLASS_BITS) {
|
||||
case FAN_CLASS_NOTIF:
|
||||
group->priority = FS_PRIO_0;
|
||||
@ -1015,6 +1134,7 @@ static int __init fanotify_user_setup(void)
|
||||
fanotify_perm_event_cachep =
|
||||
KMEM_CACHE(fanotify_perm_event, SLAB_PANIC);
|
||||
}
|
||||
fanotify_sysctls_init();
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
||||
@ -16,6 +16,7 @@
|
||||
#define _LINUX_BITFIELD_H
|
||||
|
||||
#include <linux/build_bug.h>
|
||||
#include <linux/typecheck.h>
|
||||
#include <asm/byteorder.h>
|
||||
|
||||
/*
|
||||
@ -43,8 +44,7 @@
|
||||
* FIELD_PREP(REG_FIELD_D, 0x40);
|
||||
*
|
||||
* Modify:
|
||||
* reg &= ~REG_FIELD_C;
|
||||
* reg |= FIELD_PREP(REG_FIELD_C, c);
|
||||
* FIELD_MODIFY(REG_FIELD_C, ®, c);
|
||||
*/
|
||||
|
||||
#define __bf_shf(x) (__builtin_ffsll(x) - 1)
|
||||
@ -117,6 +117,23 @@
|
||||
(typeof(_mask))(((_reg) & (_mask)) >> __bf_shf(_mask)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* FIELD_MODIFY() - modify a bitfield element
|
||||
* @_mask: shifted mask defining the field's length and position
|
||||
* @_reg_p: pointer to the memory that should be updated
|
||||
* @_val: value to store in the bitfield
|
||||
*
|
||||
* FIELD_MODIFY() modifies the set of bits in @_reg_p specified by @_mask,
|
||||
* by replacing them with the bitfield value passed in as @_val.
|
||||
*/
|
||||
#define FIELD_MODIFY(_mask, _reg_p, _val) \
|
||||
({ \
|
||||
typecheck_pointer(_reg_p); \
|
||||
__BF_FIELD_CHECK(_mask, *(_reg_p), _val, "FIELD_MODIFY: "); \
|
||||
*(_reg_p) &= ~(_mask); \
|
||||
*(_reg_p) |= (((typeof(_mask))(_val) << __bf_shf(_mask)) & (_mask)); \
|
||||
})
|
||||
|
||||
extern void __compiletime_error("value doesn't fit into mask")
|
||||
__field_overflow(void);
|
||||
extern void __compiletime_error("bad bitfield mask")
|
||||
|
||||
@ -4,6 +4,7 @@
|
||||
|
||||
#include <asm/types.h>
|
||||
#include <linux/bits.h>
|
||||
#include <linux/typecheck.h>
|
||||
|
||||
#include <uapi/linux/kernel.h>
|
||||
|
||||
@ -255,6 +256,55 @@ static __always_inline void __assign_bit(long nr, volatile unsigned long *addr,
|
||||
__clear_bit(nr, addr);
|
||||
}
|
||||
|
||||
/**
|
||||
* __ptr_set_bit - Set bit in a pointer's value
|
||||
* @nr: the bit to set
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* __ptr_set_bit(bit, &p);
|
||||
*/
|
||||
#define __ptr_set_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
__set_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __ptr_clear_bit - Clear bit in a pointer's value
|
||||
* @nr: the bit to clear
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* __ptr_clear_bit(bit, &p);
|
||||
*/
|
||||
#define __ptr_clear_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
__clear_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
/**
|
||||
* __ptr_test_bit - Test bit in a pointer's value
|
||||
* @nr: the bit to test
|
||||
* @addr: the address of the pointer variable
|
||||
*
|
||||
* Example:
|
||||
* void *p = foo();
|
||||
* if (__ptr_test_bit(bit, &p)) {
|
||||
* ...
|
||||
* } else {
|
||||
* ...
|
||||
* }
|
||||
*/
|
||||
#define __ptr_test_bit(nr, addr) \
|
||||
({ \
|
||||
typecheck_pointer(*(addr)); \
|
||||
test_bit(nr, (unsigned long *)(addr)); \
|
||||
})
|
||||
|
||||
#ifdef __KERNEL__
|
||||
|
||||
#ifndef set_mask_bits
|
||||
|
||||
@ -76,6 +76,7 @@ extern ssize_t cpu_show_gds(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_reg_file_data_sampling(struct device *dev,
|
||||
struct device_attribute *attr, char *buf);
|
||||
extern ssize_t cpu_show_vmscape(struct device *dev, struct device_attribute *attr, char *buf);
|
||||
|
||||
extern __printf(4, 5)
|
||||
struct device *cpu_device_create(struct device *parent, void *drvdata,
|
||||
|
||||
@ -203,6 +203,8 @@ struct fsnotify_group {
|
||||
int f_flags; /* event_f_flags from fanotify_init() */
|
||||
unsigned int max_marks;
|
||||
struct user_struct *user;
|
||||
/* chained on perm_group_list */
|
||||
struct list_head perm_grp_list;
|
||||
} fanotify_data;
|
||||
#endif /* CONFIG_FANOTIFY */
|
||||
};
|
||||
|
||||
@ -371,6 +371,19 @@ static __always_inline unsigned long msecs_to_jiffies(const unsigned int m)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* secs_to_jiffies: - convert seconds to jiffies
|
||||
* @_secs: time in seconds
|
||||
*
|
||||
* Conversion is done by simple multiplication with HZ
|
||||
*
|
||||
* secs_to_jiffies() is defined as a macro rather than a static inline
|
||||
* function so it can be used in static initializers.
|
||||
*
|
||||
* Return: jiffies value
|
||||
*/
|
||||
#define secs_to_jiffies(_secs) (unsigned long)((_secs) * HZ)
|
||||
|
||||
extern unsigned long __usecs_to_jiffies(const unsigned int u);
|
||||
#if !(USEC_PER_SEC % HZ)
|
||||
static inline unsigned long _usecs_to_jiffies(const unsigned int u)
|
||||
|
||||
@ -22,4 +22,13 @@
|
||||
(void)__tmp; \
|
||||
})
|
||||
|
||||
/*
|
||||
* Check at compile time that something is a pointer type.
|
||||
*/
|
||||
#define typecheck_pointer(x) \
|
||||
({ typeof(x) __dummy; \
|
||||
(void)sizeof(*__dummy); \
|
||||
1; \
|
||||
})
|
||||
|
||||
#endif /* TYPECHECK_H_INCLUDED */
|
||||
|
||||
18
mm/zswap.c
18
mm/zswap.c
@ -936,6 +936,24 @@ static int zswap_writeback_entry(struct zpool *pool, unsigned long handle)
|
||||
goto fail;
|
||||
|
||||
case ZSWAP_SWAPCACHE_NEW: /* page is locked */
|
||||
/*
|
||||
* Having a local reference to the zswap entry doesn't exclude
|
||||
* swapping from invalidating and recycling the swap slot. Once
|
||||
* the swapcache is secured against concurrent swapping to and
|
||||
* from the slot, recheck that the entry is still current before
|
||||
* writing.
|
||||
*/
|
||||
spin_lock(&tree->lock);
|
||||
if (zswap_rb_search(&tree->rbroot, entry->offset) != entry) {
|
||||
spin_unlock(&tree->lock);
|
||||
delete_from_swap_cache(page);
|
||||
unlock_page(page);
|
||||
put_page(page);
|
||||
ret = -ENOMEM;
|
||||
goto fail;
|
||||
}
|
||||
spin_unlock(&tree->lock);
|
||||
|
||||
/* decompress */
|
||||
dlen = PAGE_SIZE;
|
||||
src = (u8 *)zpool_map_handle(entry->pool->zpool, entry->handle,
|
||||
|
||||
@ -43,8 +43,6 @@
|
||||
#define ZERO_KEY "\x00\x00\x00\x00\x00\x00\x00\x00" \
|
||||
"\x00\x00\x00\x00\x00\x00\x00\x00"
|
||||
|
||||
#define secs_to_jiffies(_secs) msecs_to_jiffies((_secs) * 1000)
|
||||
|
||||
/* Handle HCI Event packets */
|
||||
|
||||
static void *hci_ev_skb_pull(struct hci_dev *hdev, struct sk_buff *skb,
|
||||
|
||||
Loading…
Reference in New Issue
Block a user