Import of kernel-5.14.0-611.34.1.el9_7
This commit is contained in:
parent
56e2fefa0b
commit
e86d4965b2
@ -12,7 +12,7 @@ RHEL_MINOR = 7
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 611.30.1
|
||||
RHEL_RELEASE = 611.34.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -115,5 +115,18 @@ void get_cpuflags(void)
|
||||
cpuid(0x80000001, &ignored, &ignored, &cpu.flags[6],
|
||||
&cpu.flags[1]);
|
||||
}
|
||||
|
||||
if (max_amd_level >= 0x8000001f) {
|
||||
u32 ebx;
|
||||
|
||||
/*
|
||||
* The X86_FEATURE_COHERENCY_SFW_NO feature bit is in
|
||||
* the virtualization flags entry (word 8) and set by
|
||||
* scattered.c, so the bit needs to be explicitly set.
|
||||
*/
|
||||
cpuid(0x8000001f, &ignored, &ebx, &ignored, &ignored);
|
||||
if (ebx & BIT(31))
|
||||
set_bit(X86_FEATURE_COHERENCY_SFW_NO, cpu.flags);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@ -1293,6 +1293,13 @@ static void pvalidate_4k_page(unsigned long vaddr, unsigned long paddr, bool val
|
||||
if (ret)
|
||||
__pval_terminate(PHYS_PFN(paddr), validate, RMP_PG_SIZE_4K, ret, 0);
|
||||
}
|
||||
|
||||
/*
|
||||
* If validating memory (making it private) and affected by the
|
||||
* cache-coherency vulnerability, perform the cache eviction mitigation.
|
||||
*/
|
||||
if (validate && !has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
|
||||
sev_evict_cache((void *)vaddr, 1);
|
||||
}
|
||||
|
||||
static void pval_pages(struct snp_psc_desc *desc)
|
||||
@ -1477,10 +1484,31 @@ static void svsm_pval_pages(struct snp_psc_desc *desc)
|
||||
|
||||
static void pvalidate_pages(struct snp_psc_desc *desc)
|
||||
{
|
||||
struct psc_entry *e;
|
||||
unsigned int i;
|
||||
|
||||
if (snp_vmpl)
|
||||
svsm_pval_pages(desc);
|
||||
else
|
||||
pval_pages(desc);
|
||||
|
||||
/*
|
||||
* If not affected by the cache-coherency vulnerability there is no need
|
||||
* to perform the cache eviction mitigation.
|
||||
*/
|
||||
if (has_cpuflag(X86_FEATURE_COHERENCY_SFW_NO))
|
||||
return;
|
||||
|
||||
for (i = 0; i <= desc->hdr.end_entry; i++) {
|
||||
e = &desc->entries[i];
|
||||
|
||||
/*
|
||||
* If validating memory (making it private) perform the cache
|
||||
* eviction mitigation.
|
||||
*/
|
||||
if (e->operation == SNP_PAGE_STATE_PRIVATE)
|
||||
sev_evict_cache(pfn_to_kaddr(e->gfn), e->pagesize ? 512 : 1);
|
||||
}
|
||||
}
|
||||
|
||||
static int vmgexit_psc(struct ghcb *ghcb, struct snp_psc_desc *desc)
|
||||
|
||||
@ -228,6 +228,7 @@
|
||||
#define X86_FEATURE_FLEXPRIORITY ( 8*32+ 1) /* "flexpriority" Intel FlexPriority */
|
||||
#define X86_FEATURE_EPT ( 8*32+ 2) /* "ept" Intel Extended Page Table */
|
||||
#define X86_FEATURE_VPID ( 8*32+ 3) /* "vpid" Intel Virtual Processor ID */
|
||||
#define X86_FEATURE_COHERENCY_SFW_NO ( 8*32+ 4) /* SNP cache coherency software work around not needed */
|
||||
|
||||
#define X86_FEATURE_VMMCALL ( 8*32+15) /* "vmmcall" Prefer VMMCALL to VMCALL */
|
||||
#define X86_FEATURE_XENPV ( 8*32+16) /* Xen paravirtual guest */
|
||||
|
||||
@ -424,6 +424,24 @@ void snp_kexec_finish(void);
|
||||
void snp_kexec_begin(void);
|
||||
int snp_svsm_vtpm_send_command(u8 *buffer);
|
||||
|
||||
static inline void sev_evict_cache(void *va, int npages)
|
||||
{
|
||||
volatile u8 val __always_unused;
|
||||
u8 *bytes = va;
|
||||
int page_idx;
|
||||
|
||||
/*
|
||||
* For SEV guests, a read from the first/last cache-lines of a 4K page
|
||||
* using the guest key is sufficient to cause a flush of all cache-lines
|
||||
* associated with that 4K page without incurring all the overhead of a
|
||||
* full CLFLUSH sequence.
|
||||
*/
|
||||
for (page_idx = 0; page_idx < npages; page_idx++) {
|
||||
val = bytes[page_idx * PAGE_SIZE];
|
||||
val = bytes[page_idx * PAGE_SIZE + PAGE_SIZE - 1];
|
||||
}
|
||||
}
|
||||
|
||||
#else /* !CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
#define snp_vmpl 0
|
||||
@ -464,6 +482,7 @@ static inline void set_pte_enc_mask(pte_t *kpte, unsigned long pfn, pgprot_t new
|
||||
static inline void snp_kexec_finish(void) { }
|
||||
static inline void snp_kexec_begin(void) { }
|
||||
static inline int snp_svsm_vtpm_send_command(u8 *buffer) { return -ENODEV; }
|
||||
static inline void sev_evict_cache(void *va, int npages) {}
|
||||
|
||||
#endif /* CONFIG_AMD_MEM_ENCRYPT */
|
||||
|
||||
|
||||
@ -47,6 +47,7 @@ static const struct cpuid_bit cpuid_bits[] = {
|
||||
{ X86_FEATURE_PROC_FEEDBACK, CPUID_EDX, 11, 0x80000007, 0 },
|
||||
{ X86_FEATURE_AMD_FAST_CPPC, CPUID_EDX, 15, 0x80000007, 0 },
|
||||
{ X86_FEATURE_MBA, CPUID_EBX, 6, 0x80000008, 0 },
|
||||
{ X86_FEATURE_COHERENCY_SFW_NO, CPUID_EBX, 31, 0x8000001f, 0 },
|
||||
{ X86_FEATURE_SMBA, CPUID_EBX, 2, 0x80000020, 0 },
|
||||
{ X86_FEATURE_BMEC, CPUID_EBX, 3, 0x80000020, 0 },
|
||||
{ X86_FEATURE_TSA_SQ_NO, CPUID_ECX, 1, 0x80000021, 0 },
|
||||
|
||||
@ -2450,7 +2450,7 @@ emit_jmp:
|
||||
/* Update cleanup_addr */
|
||||
ctx->cleanup_addr = proglen;
|
||||
if (bpf_prog_was_classic(bpf_prog) &&
|
||||
!capable(CAP_SYS_ADMIN)) {
|
||||
!ns_capable_noaudit(&init_user_ns, CAP_SYS_ADMIN)) {
|
||||
u8 *ip = image + addrs[i - 1];
|
||||
|
||||
if (emit_spectre_bhb_barrier(&prog, ip, bpf_prog))
|
||||
|
||||
@ -896,7 +896,7 @@ out:
|
||||
static void rebalance_wq_table(void)
|
||||
{
|
||||
const struct cpumask *node_cpus;
|
||||
int node, cpu, iaa = -1;
|
||||
int node_cpu, node, cpu, iaa = 0;
|
||||
|
||||
if (nr_iaa == 0)
|
||||
return;
|
||||
@ -907,36 +907,29 @@ static void rebalance_wq_table(void)
|
||||
clear_wq_table();
|
||||
|
||||
if (nr_iaa == 1) {
|
||||
for (cpu = 0; cpu < nr_cpus; cpu++) {
|
||||
if (WARN_ON(wq_table_add_wqs(0, cpu))) {
|
||||
pr_debug("could not add any wqs for iaa 0 to cpu %d!\n", cpu);
|
||||
return;
|
||||
}
|
||||
for_each_possible_cpu(cpu) {
|
||||
if (WARN_ON(wq_table_add_wqs(0, cpu)))
|
||||
goto err;
|
||||
}
|
||||
|
||||
return;
|
||||
}
|
||||
|
||||
for_each_node_with_cpus(node) {
|
||||
cpu = 0;
|
||||
node_cpus = cpumask_of_node(node);
|
||||
|
||||
for (cpu = 0; cpu < cpumask_weight(node_cpus); cpu++) {
|
||||
int node_cpu = cpumask_nth(cpu, node_cpus);
|
||||
|
||||
if (WARN_ON(node_cpu >= nr_cpu_ids)) {
|
||||
pr_debug("node_cpu %d doesn't exist!\n", node_cpu);
|
||||
return;
|
||||
}
|
||||
|
||||
if ((cpu % cpus_per_iaa) == 0)
|
||||
iaa++;
|
||||
|
||||
if (WARN_ON(wq_table_add_wqs(iaa, node_cpu))) {
|
||||
pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
|
||||
return;
|
||||
}
|
||||
for_each_cpu(node_cpu, node_cpus) {
|
||||
iaa = cpu / cpus_per_iaa;
|
||||
if (WARN_ON(wq_table_add_wqs(iaa, node_cpu)))
|
||||
goto err;
|
||||
cpu++;
|
||||
}
|
||||
}
|
||||
|
||||
return;
|
||||
err:
|
||||
pr_debug("could not add any wqs for iaa %d to cpu %d!\n", iaa, cpu);
|
||||
}
|
||||
|
||||
static inline int check_completion(struct device *dev,
|
||||
|
||||
@ -4801,6 +4801,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
|
||||
unsigned long q_map;
|
||||
struct i40e_vf *vf;
|
||||
int abs_vf_id;
|
||||
int old_link;
|
||||
int ret = 0;
|
||||
int tmp;
|
||||
|
||||
@ -4819,6 +4820,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
|
||||
vf = &pf->vf[vf_id];
|
||||
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
||||
|
||||
/* skip VF link state change if requested state is already set */
|
||||
if (!vf->link_forced)
|
||||
old_link = IFLA_VF_LINK_STATE_AUTO;
|
||||
else if (vf->link_up)
|
||||
old_link = IFLA_VF_LINK_STATE_ENABLE;
|
||||
else
|
||||
old_link = IFLA_VF_LINK_STATE_DISABLE;
|
||||
|
||||
if (link == old_link)
|
||||
goto error_out;
|
||||
|
||||
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
|
||||
pfe.severity = PF_EVENT_SEVERITY_INFO;
|
||||
|
||||
|
||||
@ -288,7 +288,7 @@ static int switchtec_ntb_mw_set_trans(struct ntb_dev *ntb, int pidx, int widx,
|
||||
if (size != 0 && xlate_pos < 12)
|
||||
return -EINVAL;
|
||||
|
||||
if (!IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
|
||||
if (xlate_pos >= 0 && !IS_ALIGNED(addr, BIT_ULL(xlate_pos))) {
|
||||
/*
|
||||
* In certain circumstances we can get a buffer that is
|
||||
* not aligned to its size. (Most of the time
|
||||
|
||||
@ -1006,6 +1006,18 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
|
||||
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
|
||||
goto err_proto;
|
||||
}
|
||||
/*
|
||||
* Ensure command data structures are initialized. We must check both
|
||||
* cmd->req.sg and cmd->iov because they can have different NULL states:
|
||||
* - Uninitialized commands: both NULL
|
||||
* - READ commands: cmd->req.sg allocated, cmd->iov NULL
|
||||
* - WRITE commands: both allocated
|
||||
*/
|
||||
if (unlikely(!cmd->req.sg || !cmd->iov)) {
|
||||
pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
|
||||
queue->idx, data->ttag);
|
||||
goto err_proto;
|
||||
}
|
||||
cmd->pdu_recv = 0;
|
||||
nvmet_tcp_build_pdu_iovec(cmd);
|
||||
queue->cmd = cmd;
|
||||
|
||||
@ -1144,7 +1144,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
|
||||
* The current SCSI handling on the host side does
|
||||
* not correctly handle:
|
||||
* INQUIRY command with page code parameter set to 0x80
|
||||
* MODE_SENSE command with cmd[2] == 0x1c
|
||||
* MODE_SENSE and MODE_SENSE_10 command with cmd[2] == 0x1c
|
||||
* MAINTENANCE_IN is not supported by HyperV FC passthrough
|
||||
*
|
||||
* Setup srb and scsi status so this won't be fatal.
|
||||
@ -1154,6 +1154,7 @@ static void storvsc_on_io_completion(struct storvsc_device *stor_device,
|
||||
|
||||
if ((stor_pkt->vm_srb.cdb[0] == INQUIRY) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MODE_SENSE) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MODE_SENSE_10) ||
|
||||
(stor_pkt->vm_srb.cdb[0] == MAINTENANCE_IN &&
|
||||
hv_dev_is_fc(device))) {
|
||||
vstor_packet->vm_srb.scsi_status = 0;
|
||||
|
||||
@ -80,12 +80,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,
|
||||
struct fb_image *image, u8 *buf, u8 *dst)
|
||||
{
|
||||
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
|
||||
unsigned int charcnt = vc->vc_font.charcount;
|
||||
u32 idx = vc->vc_font.width >> 3;
|
||||
u8 *src;
|
||||
|
||||
while (cnt--) {
|
||||
src = vc->vc_font.data + (scr_readw(s++)&
|
||||
charmask)*cellsize;
|
||||
u16 ch = scr_readw(s++) & charmask;
|
||||
|
||||
if (ch >= charcnt)
|
||||
ch = 0;
|
||||
src = vc->vc_font.data + (unsigned int)ch * cellsize;
|
||||
|
||||
if (attr) {
|
||||
update_attr(buf, src, attr, vc);
|
||||
@ -113,14 +117,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc,
|
||||
u8 *dst)
|
||||
{
|
||||
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
|
||||
unsigned int charcnt = vc->vc_font.charcount;
|
||||
u32 shift_low = 0, mod = vc->vc_font.width % 8;
|
||||
u32 shift_high = 8;
|
||||
u32 idx = vc->vc_font.width >> 3;
|
||||
u8 *src;
|
||||
|
||||
while (cnt--) {
|
||||
src = vc->vc_font.data + (scr_readw(s++)&
|
||||
charmask)*cellsize;
|
||||
u16 ch = scr_readw(s++) & charmask;
|
||||
|
||||
if (ch >= charcnt)
|
||||
ch = 0;
|
||||
src = vc->vc_font.data + (unsigned int)ch * cellsize;
|
||||
|
||||
if (attr) {
|
||||
update_attr(buf, src, attr, vc);
|
||||
@ -161,6 +169,11 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
||||
image.height = vc->vc_font.height;
|
||||
image.depth = 1;
|
||||
|
||||
if (image.dy >= info->var.yres)
|
||||
return;
|
||||
|
||||
image.height = min(image.height, info->var.yres - image.dy);
|
||||
|
||||
if (attribute) {
|
||||
buf = kmalloc(cellsize, GFP_ATOMIC);
|
||||
if (!buf)
|
||||
@ -174,6 +187,18 @@ static void bit_putcs(struct vc_data *vc, struct fb_info *info,
|
||||
cnt = count;
|
||||
|
||||
image.width = vc->vc_font.width * cnt;
|
||||
|
||||
if (image.dx >= info->var.xres)
|
||||
break;
|
||||
|
||||
if (image.dx + image.width > info->var.xres) {
|
||||
image.width = info->var.xres - image.dx;
|
||||
cnt = image.width / vc->vc_font.width;
|
||||
if (cnt == 0)
|
||||
break;
|
||||
image.width = cnt * vc->vc_font.width;
|
||||
}
|
||||
|
||||
pitch = DIV_ROUND_UP(image.width, 8) + scan_align;
|
||||
pitch &= ~scan_align;
|
||||
size = pitch * image.height + buf_align;
|
||||
|
||||
@ -466,6 +466,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
|
||||
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
||||
clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(lo->plh_inode)->flags);
|
||||
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
||||
pnfs_clear_lseg_state(lseg, lseg_list);
|
||||
pnfs_clear_layoutreturn_info(lo);
|
||||
|
||||
@ -178,6 +178,7 @@ cifs_get_spnego_key(struct cifs_ses *sesInfo,
|
||||
saved_cred = override_creds(spnego_cred);
|
||||
spnego_key = request_key(&cifs_spnego_key_type, description, "");
|
||||
revert_creds(saved_cred);
|
||||
trace_smb3_kerberos_auth(server, sesInfo, PTR_ERR_OR_ZERO(spnego_key));
|
||||
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
if (cifsFYI && !IS_ERR(spnego_key)) {
|
||||
|
||||
@ -786,6 +786,7 @@ struct TCP_Server_Info {
|
||||
struct session_key session_key;
|
||||
unsigned long lstrp; /* when we got last response from this server */
|
||||
unsigned long neg_start; /* when negotiate started (jiffies) */
|
||||
unsigned long reconn_delay; /* when resched session and tcon reconnect */
|
||||
struct cifs_secmech secmech; /* crypto sec mech functs, descriptors */
|
||||
#define CIFS_NEGFLAVOR_UNENCAP 1 /* wct == 17, but no ext_sec */
|
||||
#define CIFS_NEGFLAVOR_EXTENDED 2 /* wct == 17, ext_sec bit set */
|
||||
@ -2420,4 +2421,24 @@ static inline bool cifs_netbios_name(const char *name, size_t namelen)
|
||||
(le32_to_cpu((tcon)->fsAttrInfo.Attributes) & \
|
||||
FILE_SUPPORTS_REPARSE_POINTS))
|
||||
|
||||
#define CIFS_RECONN_DELAY_SECS 30
|
||||
#define CIFS_MAX_RECONN_DELAY (4 * CIFS_RECONN_DELAY_SECS)
|
||||
|
||||
static inline void cifs_queue_server_reconn(struct TCP_Server_Info *server)
|
||||
{
|
||||
if (!delayed_work_pending(&server->reconnect)) {
|
||||
WRITE_ONCE(server->reconn_delay, 0);
|
||||
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
}
|
||||
}
|
||||
|
||||
static inline void cifs_requeue_server_reconn(struct TCP_Server_Info *server)
|
||||
{
|
||||
unsigned long delay = READ_ONCE(server->reconn_delay);
|
||||
|
||||
delay = umin(delay + CIFS_RECONN_DELAY_SECS, CIFS_MAX_RECONN_DELAY);
|
||||
WRITE_ONCE(server->reconn_delay, delay);
|
||||
queue_delayed_work(cifsiod_wq, &server->reconnect, delay * HZ);
|
||||
}
|
||||
|
||||
#endif /* _CIFS_GLOB_H */
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
#define _CIFSPROTO_H
|
||||
#include <linux/nls.h>
|
||||
#include <linux/ctype.h>
|
||||
#include "cifsglob.h"
|
||||
#include "trace.h"
|
||||
#ifdef CONFIG_CIFS_DFS_UPCALL
|
||||
#include "dfs_cache.h"
|
||||
|
||||
@ -429,7 +429,7 @@ static int __cifs_reconnect(struct TCP_Server_Info *server,
|
||||
spin_unlock(&server->srv_lock);
|
||||
cifs_swn_reset_server_dstaddr(server);
|
||||
cifs_server_unlock(server);
|
||||
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
cifs_queue_server_reconn(server);
|
||||
}
|
||||
} while (server->tcpStatus == CifsNeedReconnect);
|
||||
|
||||
@ -568,7 +568,7 @@ static int reconnect_dfs_server(struct TCP_Server_Info *server)
|
||||
spin_unlock(&server->srv_lock);
|
||||
cifs_swn_reset_server_dstaddr(server);
|
||||
cifs_server_unlock(server);
|
||||
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
cifs_queue_server_reconn(server);
|
||||
} while (server->tcpStatus == CifsNeedReconnect);
|
||||
|
||||
dfs_cache_noreq_update_tgthint(ref_path, target_hint);
|
||||
@ -4246,8 +4246,10 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
|
||||
struct sockaddr_in6 *addr6 = (struct sockaddr_in6 *)&pserver->dstaddr;
|
||||
struct sockaddr_in *addr = (struct sockaddr_in *)&pserver->dstaddr;
|
||||
bool is_binding = false;
|
||||
bool new_ses;
|
||||
|
||||
spin_lock(&ses->ses_lock);
|
||||
new_ses = ses->ses_status == SES_NEW;
|
||||
cifs_dbg(FYI, "%s: channel connect bitmap: 0x%lx\n",
|
||||
__func__, ses->chans_need_reconnect);
|
||||
|
||||
@ -4333,7 +4335,10 @@ cifs_setup_session(const unsigned int xid, struct cifs_ses *ses,
|
||||
}
|
||||
|
||||
if (rc) {
|
||||
cifs_server_dbg(VFS, "Send error in SessSetup = %d\n", rc);
|
||||
if (new_ses) {
|
||||
cifs_server_dbg(VFS, "failed to create a new SMB session with %s: %d\n",
|
||||
get_security_type_str(ses->sectype), rc);
|
||||
}
|
||||
spin_lock(&ses->ses_lock);
|
||||
if (ses->ses_status == SES_IN_SETUP)
|
||||
ses->ses_status = SES_NEED_RECON;
|
||||
|
||||
@ -460,7 +460,7 @@ skip_add_channels:
|
||||
spin_unlock(&ses->ses_lock);
|
||||
|
||||
if (smb2_command != SMB2_INTERNAL_CMD)
|
||||
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
cifs_queue_server_reconn(server);
|
||||
|
||||
atomic_inc(&tconInfoReconnectCount);
|
||||
out:
|
||||
@ -1625,8 +1625,6 @@ SMB2_auth_kerberos(struct SMB2_sess_data *sess_data)
|
||||
spnego_key = cifs_get_spnego_key(ses, server);
|
||||
if (IS_ERR(spnego_key)) {
|
||||
rc = PTR_ERR(spnego_key);
|
||||
if (rc == -ENOKEY)
|
||||
cifs_dbg(VFS, "Verify user has a krb5 ticket and keyutils is installed\n");
|
||||
spnego_key = NULL;
|
||||
goto out;
|
||||
}
|
||||
@ -4243,7 +4241,7 @@ void smb2_reconnect_server(struct work_struct *work)
|
||||
done:
|
||||
cifs_dbg(FYI, "Reconnecting tcons and channels finished\n");
|
||||
if (resched)
|
||||
queue_delayed_work(cifsiod_wq, &server->reconnect, 2 * HZ);
|
||||
cifs_requeue_server_reconn(server);
|
||||
mutex_unlock(&pserver->reconnect_mutex);
|
||||
|
||||
/* now we can safely release srv struct */
|
||||
@ -4267,7 +4265,7 @@ SMB2_echo(struct TCP_Server_Info *server)
|
||||
server->ops->need_neg(server)) {
|
||||
spin_unlock(&server->srv_lock);
|
||||
/* No need to send echo on newly established connections */
|
||||
mod_delayed_work(cifsiod_wq, &server->reconnect, 0);
|
||||
cifs_queue_server_reconn(server);
|
||||
return rc;
|
||||
}
|
||||
spin_unlock(&server->srv_lock);
|
||||
|
||||
@ -4,5 +4,7 @@
|
||||
*
|
||||
* Author(s): Steve French <stfrench@microsoft.com>
|
||||
*/
|
||||
#include "cifsglob.h"
|
||||
#include "cifs_spnego.h"
|
||||
#define CREATE_TRACE_POINTS
|
||||
#include "trace.h"
|
||||
|
||||
@ -1435,6 +1435,49 @@ DEFINE_SMB3_CREDIT_EVENT(waitff_credits);
|
||||
DEFINE_SMB3_CREDIT_EVENT(overflow_credits);
|
||||
DEFINE_SMB3_CREDIT_EVENT(set_credits);
|
||||
|
||||
TRACE_EVENT(smb3_kerberos_auth,
|
||||
TP_PROTO(struct TCP_Server_Info *server,
|
||||
struct cifs_ses *ses,
|
||||
int rc),
|
||||
TP_ARGS(server, ses, rc),
|
||||
TP_STRUCT__entry(
|
||||
__field(pid_t, pid)
|
||||
__field(uid_t, uid)
|
||||
__field(uid_t, cruid)
|
||||
__string(host, server->hostname)
|
||||
__string(user, ses->user_name)
|
||||
__array(__u8, addr, sizeof(struct sockaddr_storage))
|
||||
__array(char, sec, sizeof("ntlmsspi"))
|
||||
__array(char, upcall_target, sizeof("mount"))
|
||||
__field(int, rc)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->pid = current->pid;
|
||||
__entry->uid = from_kuid_munged(&init_user_ns, ses->linux_uid);
|
||||
__entry->cruid = from_kuid_munged(&init_user_ns, ses->cred_uid);
|
||||
__assign_str(host, server->hostname);
|
||||
__assign_str(user, ses->user_name);
|
||||
memcpy(__entry->addr, &server->dstaddr, sizeof(__entry->addr));
|
||||
|
||||
if (server->sec_kerberos)
|
||||
memcpy(__entry->sec, "krb5", sizeof("krb5"));
|
||||
else if (server->sec_mskerberos)
|
||||
memcpy(__entry->sec, "mskrb5", sizeof("mskrb5"));
|
||||
else if (server->sec_iakerb)
|
||||
memcpy(__entry->sec, "iakerb", sizeof("iakerb"));
|
||||
else
|
||||
memcpy(__entry->sec, "krb5", sizeof("krb5"));
|
||||
|
||||
if (ses->upcall_target == UPTARGET_MOUNT)
|
||||
memcpy(__entry->upcall_target, "mount", sizeof("mount"));
|
||||
else
|
||||
memcpy(__entry->upcall_target, "app", sizeof("app"));
|
||||
__entry->rc = rc;
|
||||
),
|
||||
TP_printk("vers=%d host=%s ip=%pISpsfc sec=%s uid=%d cruid=%d user=%s pid=%d upcall_target=%s err=%d",
|
||||
CIFS_SPNEGO_UPCALL_VERSION, __get_str(host), __entry->addr,
|
||||
__entry->sec, __entry->uid, __entry->cruid, __get_str(user),
|
||||
__entry->pid, __entry->upcall_target, __entry->rc))
|
||||
|
||||
TRACE_EVENT(smb3_tcon_ref,
|
||||
TP_PROTO(unsigned int tcon_debug_id, int ref,
|
||||
|
||||
@ -1129,37 +1129,38 @@ static void smc_pnet_find_ism_by_pnetid(struct net_device *ndev,
|
||||
*/
|
||||
void smc_pnet_find_roce_resource(struct sock *sk, struct smc_init_info *ini)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_get(sk);
|
||||
struct net_device *dev;
|
||||
struct dst_entry *dst;
|
||||
|
||||
if (!dst)
|
||||
goto out;
|
||||
if (!dst->dev)
|
||||
goto out_rel;
|
||||
rcu_read_lock();
|
||||
dst = __sk_dst_get(sk);
|
||||
dev = dst ? dst_dev_rcu(dst) : NULL;
|
||||
dev_hold(dev);
|
||||
rcu_read_unlock();
|
||||
|
||||
smc_pnet_find_roce_by_pnetid(dst->dev, ini);
|
||||
|
||||
out_rel:
|
||||
dst_release(dst);
|
||||
out:
|
||||
return;
|
||||
if (dev) {
|
||||
smc_pnet_find_roce_by_pnetid(dev, ini);
|
||||
dev_put(dev);
|
||||
}
|
||||
}
|
||||
|
||||
void smc_pnet_find_ism_resource(struct sock *sk, struct smc_init_info *ini)
|
||||
{
|
||||
struct dst_entry *dst = sk_dst_get(sk);
|
||||
struct net_device *dev;
|
||||
struct dst_entry *dst;
|
||||
|
||||
ini->ism_dev[0] = NULL;
|
||||
if (!dst)
|
||||
goto out;
|
||||
if (!dst->dev)
|
||||
goto out_rel;
|
||||
|
||||
smc_pnet_find_ism_by_pnetid(dst->dev, ini);
|
||||
rcu_read_lock();
|
||||
dst = __sk_dst_get(sk);
|
||||
dev = dst ? dst_dev_rcu(dst) : NULL;
|
||||
dev_hold(dev);
|
||||
rcu_read_unlock();
|
||||
|
||||
out_rel:
|
||||
dst_release(dst);
|
||||
out:
|
||||
return;
|
||||
if (dev) {
|
||||
smc_pnet_find_ism_by_pnetid(dev, ini);
|
||||
dev_put(dev);
|
||||
}
|
||||
}
|
||||
|
||||
/* Lookup and apply a pnet table entry to the given ib device.
|
||||
|
||||
@ -848,7 +848,7 @@ static int svc_rdma_copy_inline_range(struct svc_rqst *rqstp,
|
||||
head->rc_page_count++;
|
||||
|
||||
dst = page_address(rqstp->rq_pages[head->rc_curpage]);
|
||||
memcpy(dst + head->rc_curpage, src + offset, page_len);
|
||||
memcpy((unsigned char *)dst + head->rc_pageoff, src + offset, page_len);
|
||||
|
||||
head->rc_readbytes += page_len;
|
||||
head->rc_pageoff += page_len;
|
||||
|
||||
@ -1,3 +1,32 @@
|
||||
* Sat Feb 07 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.34.1.el9_7]
|
||||
- scsi: storvsc: Process unsupported MODE_SENSE_10 (Li Tian) [RHEL-145183]
|
||||
- smb: client: Add tracepoint for krb5 auth (Paulo Alcantara) [RHEL-127498]
|
||||
- smb: client: improve error message when creating SMB session (Paulo Alcantara) [RHEL-127498]
|
||||
- smb: client: relax session and tcon reconnect attempts (Paulo Alcantara) [RHEL-127498]
|
||||
- cifs: #include cifsglob.h before trace.h to allow structs in tracepoints (Paulo Alcantara) [RHEL-127498]
|
||||
- smc: Fix use-after-free in __pnet_find_base_ndev(). (Mete Durlu) [RHEL-126890] {CVE-2025-40064}
|
||||
Resolves: RHEL-126890, RHEL-127498, RHEL-145183
|
||||
|
||||
* Thu Feb 05 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.33.1.el9_7]
|
||||
- i40e: avoid redundant VF link state updates (CKI Backport Bot) [RHEL-141877]
|
||||
- x86/sev: Guard sev_evict_cache() with CONFIG_AMD_MEM_ENCRYPT (Paolo Bonzini) [RHEL-128030]
|
||||
- x86/sev: Evict cache lines during SNP memory validation (Paolo Bonzini) [RHEL-128030]
|
||||
- ntb_hw_switchtec: Fix shift-out-of-bounds in switchtec_ntb_mw_set_trans (Myron Stowe) [RHEL-132891] {CVE-2023-53034}
|
||||
- fbdev: Add bounds checking in bit_putcs to fix vmalloc-out-of-bounds (CKI Backport Bot) [RHEL-137683] {CVE-2025-40304}
|
||||
- crypto: iaa - Optimize rebalance_wq_table() (Jay Shin) [RHEL-137272]
|
||||
- fbdev: bitblit: bound-check glyph index in bit_putcs* (CKI Backport Bot) [RHEL-136942] {CVE-2025-40322}
|
||||
- bpf: Do not audit capability check in do_jit() (Jay Shin) [RHEL-135137]
|
||||
Resolves: RHEL-128030, RHEL-132891, RHEL-135137, RHEL-136942, RHEL-137272, RHEL-137683, RHEL-141877
|
||||
|
||||
* Tue Feb 03 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.32.1.el9_7]
|
||||
- svcrdma: use rc_pageoff for memcpy byte offset (CKI Backport Bot) [RHEL-142790] {CVE-2025-68811}
|
||||
- NFSv4/pNFS: Clear NFS_INO_LAYOUTCOMMIT in pnfs_mark_layout_stateid_invalid (CKI Backport Bot) [RHEL-140260] {CVE-2025-68349}
|
||||
Resolves: RHEL-140260, RHEL-142790
|
||||
|
||||
* Sat Jan 31 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.31.1.el9_7]
|
||||
- nvme-tcp: fix NULL pointer dereferences in nvmet_tcp_build_pdu_iovec (CKI Backport Bot) [RHEL-144332] {CVE-2026-22998}
|
||||
Resolves: RHEL-144332
|
||||
|
||||
* Thu Jan 29 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.30.1.el9_7]
|
||||
- io_uring/net: commit partial buffers on retry (Jeff Moyer) [RHEL-137329] {CVE-2025-38730}
|
||||
- io_uring/kbuf: add io_kbuf_commit() helper (Jeff Moyer) [RHEL-137329]
|
||||
|
||||
Loading…
Reference in New Issue
Block a user