Import of kernel-6.12.0-124.29.1.el10_1
This commit is contained in:
parent
356537e51c
commit
2e8a266009
@ -212,6 +212,17 @@ pid>/``).
|
||||
This value defaults to 0.
|
||||
|
||||
|
||||
core_sort_vma
|
||||
=============
|
||||
|
||||
The default coredump writes VMAs in address order. By setting
|
||||
``core_sort_vma`` to 1, VMAs will be written from smallest size
|
||||
to largest size. This is known to break at least elfutils, but
|
||||
can be handy when dealing with very large (and truncated)
|
||||
coredumps where the more useful debugging details are included
|
||||
in the smaller VMAs.
|
||||
|
||||
|
||||
core_uses_pid
|
||||
=============
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 1
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 124.28.1
|
||||
RHEL_RELEASE = 124.29.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
||||
@ -63,6 +63,7 @@ static void free_vma_snapshot(struct coredump_params *cprm);
|
||||
|
||||
static int core_uses_pid;
|
||||
static unsigned int core_pipe_limit;
|
||||
static unsigned int core_sort_vma;
|
||||
static char core_pattern[CORENAME_MAX_SIZE] = "core";
|
||||
static int core_name_size = CORENAME_MAX_SIZE;
|
||||
unsigned int core_file_note_size_limit = CORE_FILE_NOTE_SIZE_DEFAULT;
|
||||
@ -1025,6 +1026,15 @@ static struct ctl_table coredump_sysctls[] = {
|
||||
.extra1 = (unsigned int *)&core_file_note_size_min,
|
||||
.extra2 = (unsigned int *)&core_file_note_size_max,
|
||||
},
|
||||
{
|
||||
.procname = "core_sort_vma",
|
||||
.data = &core_sort_vma,
|
||||
.maxlen = sizeof(int),
|
||||
.mode = 0644,
|
||||
.proc_handler = proc_douintvec_minmax,
|
||||
.extra1 = SYSCTL_ZERO,
|
||||
.extra2 = SYSCTL_ONE,
|
||||
},
|
||||
};
|
||||
|
||||
static int __init init_fs_coredump_sysctls(void)
|
||||
@ -1255,8 +1265,9 @@ static bool dump_vma_snapshot(struct coredump_params *cprm)
|
||||
cprm->vma_data_size += m->dump_size;
|
||||
}
|
||||
|
||||
sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
|
||||
cmp_vma_size, NULL);
|
||||
if (core_sort_vma)
|
||||
sort(cprm->vma_meta, cprm->vma_count, sizeof(*cprm->vma_meta),
|
||||
cmp_vma_size, NULL);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -36,9 +36,8 @@ static struct cached_fid *find_or_create_cached_dir(struct cached_fids *cfids,
|
||||
* fully cached or it may be in the process of
|
||||
* being deleted due to a lease break.
|
||||
*/
|
||||
if (!cfid->time || !cfid->has_lease) {
|
||||
if (!is_valid_cached_dir(cfid))
|
||||
return NULL;
|
||||
}
|
||||
kref_get(&cfid->refcount);
|
||||
return cfid;
|
||||
}
|
||||
@ -193,7 +192,7 @@ replay_again:
|
||||
* Otherwise, it is either a new entry or laundromat worker removed it
|
||||
* from @cfids->entries. Caller will put last reference if the latter.
|
||||
*/
|
||||
if (cfid->has_lease && cfid->time) {
|
||||
if (is_valid_cached_dir(cfid)) {
|
||||
cfid->last_access_time = jiffies;
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
*ret_cfid = cfid;
|
||||
@ -232,7 +231,7 @@ replay_again:
|
||||
list_for_each_entry(parent_cfid, &cfids->entries, entry) {
|
||||
if (parent_cfid->dentry == dentry->d_parent) {
|
||||
cifs_dbg(FYI, "found a parent cached file handle\n");
|
||||
if (parent_cfid->has_lease && parent_cfid->time) {
|
||||
if (is_valid_cached_dir(parent_cfid)) {
|
||||
lease_flags
|
||||
|= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
|
||||
memcpy(pfid->parent_lease_key,
|
||||
@ -388,11 +387,11 @@ out:
|
||||
* lease. Release one here, and the second below.
|
||||
*/
|
||||
cfid->has_lease = false;
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
close_cached_dir(cfid);
|
||||
}
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
close_cached_dir(cfid);
|
||||
} else {
|
||||
*ret_cfid = cfid;
|
||||
atomic_inc(&tcon->num_remote_opens);
|
||||
@ -416,12 +415,18 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
|
||||
if (cfids == NULL)
|
||||
return -EOPNOTSUPP;
|
||||
|
||||
if (!dentry)
|
||||
return -ENOENT;
|
||||
|
||||
spin_lock(&cfids->cfid_list_lock);
|
||||
list_for_each_entry(cfid, &cfids->entries, entry) {
|
||||
if (dentry && cfid->dentry == dentry) {
|
||||
if (cfid->dentry == dentry) {
|
||||
if (!is_valid_cached_dir(cfid))
|
||||
break;
|
||||
cifs_dbg(FYI, "found a cached file handle by dentry\n");
|
||||
kref_get(&cfid->refcount);
|
||||
*ret_cfid = cfid;
|
||||
cfid->last_access_time = jiffies;
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
return 0;
|
||||
}
|
||||
@ -432,12 +437,14 @@ int open_cached_dir_by_dentry(struct cifs_tcon *tcon,
|
||||
|
||||
static void
|
||||
smb2_close_cached_fid(struct kref *ref)
|
||||
__releases(&cfid->cfids->cfid_list_lock)
|
||||
{
|
||||
struct cached_fid *cfid = container_of(ref, struct cached_fid,
|
||||
refcount);
|
||||
int rc;
|
||||
|
||||
spin_lock(&cfid->cfids->cfid_list_lock);
|
||||
lockdep_assert_held(&cfid->cfids->cfid_list_lock);
|
||||
|
||||
if (cfid->on_list) {
|
||||
list_del(&cfid->entry);
|
||||
cfid->on_list = false;
|
||||
@ -472,7 +479,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
spin_lock(&cfid->cfids->cfid_list_lock);
|
||||
if (cfid->has_lease) {
|
||||
cfid->has_lease = false;
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
close_cached_dir(cfid);
|
||||
}
|
||||
spin_unlock(&cfid->cfids->cfid_list_lock);
|
||||
close_cached_dir(cfid);
|
||||
@ -481,7 +488,7 @@ void drop_cached_dir_by_name(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
void close_cached_dir(struct cached_fid *cfid)
|
||||
{
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
kref_put_lock(&cfid->refcount, smb2_close_cached_fid, &cfid->cfids->cfid_list_lock);
|
||||
}
|
||||
|
||||
/*
|
||||
@ -521,10 +528,9 @@ void close_all_cached_dirs(struct cifs_sb_info *cifs_sb)
|
||||
spin_unlock(&cifs_sb->tlink_tree_lock);
|
||||
goto done;
|
||||
}
|
||||
spin_lock(&cfid->fid_lock);
|
||||
|
||||
tmp_list->dentry = cfid->dentry;
|
||||
cfid->dentry = NULL;
|
||||
spin_unlock(&cfid->fid_lock);
|
||||
|
||||
list_add_tail(&tmp_list->entry, &entry);
|
||||
}
|
||||
@ -557,8 +563,8 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
|
||||
|
||||
/*
|
||||
* Mark all the cfids as closed, and move them to the cfids->dying list.
|
||||
* They'll be cleaned up later by cfids_invalidation_worker. Take
|
||||
* a reference to each cfid during this process.
|
||||
* They'll be cleaned up by laundromat. Take a reference to each cfid
|
||||
* during this process.
|
||||
*/
|
||||
spin_lock(&cfids->cfid_list_lock);
|
||||
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
|
||||
@ -575,12 +581,11 @@ void invalidate_all_cached_dirs(struct cifs_tcon *tcon)
|
||||
} else
|
||||
kref_get(&cfid->refcount);
|
||||
}
|
||||
/*
|
||||
* Queue dropping of the dentries once locks have been dropped
|
||||
*/
|
||||
if (!list_empty(&cfids->dying))
|
||||
queue_work(cfid_put_wq, &cfids->invalidation_work);
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
|
||||
/* run laundromat unconditionally now as there might have been previously queued work */
|
||||
mod_delayed_work(cfid_put_wq, &cfids->laundromat_work, 0);
|
||||
flush_delayed_work(&cfids->laundromat_work);
|
||||
}
|
||||
|
||||
static void
|
||||
@ -592,7 +597,7 @@ cached_dir_offload_close(struct work_struct *work)
|
||||
|
||||
WARN_ON(cfid->on_list);
|
||||
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
close_cached_dir(cfid);
|
||||
cifs_put_tcon(tcon, netfs_trace_tcon_ref_put_cached_close);
|
||||
}
|
||||
|
||||
@ -607,14 +612,9 @@ static void cached_dir_put_work(struct work_struct *work)
|
||||
{
|
||||
struct cached_fid *cfid = container_of(work, struct cached_fid,
|
||||
put_work);
|
||||
struct dentry *dentry;
|
||||
|
||||
spin_lock(&cfid->fid_lock);
|
||||
dentry = cfid->dentry;
|
||||
dput(cfid->dentry);
|
||||
cfid->dentry = NULL;
|
||||
spin_unlock(&cfid->fid_lock);
|
||||
|
||||
dput(dentry);
|
||||
queue_work(serverclose_wq, &cfid->close_work);
|
||||
}
|
||||
|
||||
@ -672,7 +672,6 @@ static struct cached_fid *init_cached_dir(const char *path)
|
||||
INIT_LIST_HEAD(&cfid->entry);
|
||||
INIT_LIST_HEAD(&cfid->dirents.entries);
|
||||
mutex_init(&cfid->dirents.de_mutex);
|
||||
spin_lock_init(&cfid->fid_lock);
|
||||
kref_init(&cfid->refcount);
|
||||
return cfid;
|
||||
}
|
||||
@ -696,40 +695,38 @@ static void free_cached_dir(struct cached_fid *cfid)
|
||||
kfree(dirent);
|
||||
}
|
||||
|
||||
/* adjust tcon-level counters and reset per-dir accounting */
|
||||
if (cfid->cfids) {
|
||||
if (cfid->dirents.entries_count)
|
||||
atomic_long_sub((long)cfid->dirents.entries_count,
|
||||
&cfid->cfids->total_dirents_entries);
|
||||
if (cfid->dirents.bytes_used) {
|
||||
atomic64_sub((long long)cfid->dirents.bytes_used,
|
||||
&cfid->cfids->total_dirents_bytes);
|
||||
atomic64_sub((long long)cfid->dirents.bytes_used,
|
||||
&cifs_dircache_bytes_used);
|
||||
}
|
||||
}
|
||||
cfid->dirents.entries_count = 0;
|
||||
cfid->dirents.bytes_used = 0;
|
||||
|
||||
kfree(cfid->path);
|
||||
cfid->path = NULL;
|
||||
kfree(cfid);
|
||||
}
|
||||
|
||||
static void cfids_invalidation_worker(struct work_struct *work)
|
||||
{
|
||||
struct cached_fids *cfids = container_of(work, struct cached_fids,
|
||||
invalidation_work);
|
||||
struct cached_fid *cfid, *q;
|
||||
LIST_HEAD(entry);
|
||||
|
||||
spin_lock(&cfids->cfid_list_lock);
|
||||
/* move cfids->dying to the local list */
|
||||
list_cut_before(&entry, &cfids->dying, &cfids->dying);
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
|
||||
list_for_each_entry_safe(cfid, q, &entry, entry) {
|
||||
list_del(&cfid->entry);
|
||||
/* Drop the ref-count acquired in invalidate_all_cached_dirs */
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
}
|
||||
}
|
||||
|
||||
static void cfids_laundromat_worker(struct work_struct *work)
|
||||
{
|
||||
struct cached_fids *cfids;
|
||||
struct cached_fid *cfid, *q;
|
||||
struct dentry *dentry;
|
||||
LIST_HEAD(entry);
|
||||
|
||||
cfids = container_of(work, struct cached_fids, laundromat_work.work);
|
||||
|
||||
spin_lock(&cfids->cfid_list_lock);
|
||||
/* move cfids->dying to the local list */
|
||||
list_cut_before(&entry, &cfids->dying, &cfids->dying);
|
||||
|
||||
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
|
||||
if (cfid->last_access_time &&
|
||||
time_after(jiffies, cfid->last_access_time + HZ * dir_cache_timeout)) {
|
||||
@ -751,12 +748,9 @@ static void cfids_laundromat_worker(struct work_struct *work)
|
||||
list_for_each_entry_safe(cfid, q, &entry, entry) {
|
||||
list_del(&cfid->entry);
|
||||
|
||||
spin_lock(&cfid->fid_lock);
|
||||
dentry = cfid->dentry;
|
||||
dput(cfid->dentry);
|
||||
cfid->dentry = NULL;
|
||||
spin_unlock(&cfid->fid_lock);
|
||||
|
||||
dput(dentry);
|
||||
if (cfid->is_open) {
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
++cfid->tcon->tc_count;
|
||||
@ -769,7 +763,7 @@ static void cfids_laundromat_worker(struct work_struct *work)
|
||||
* Drop the ref-count from above, either the lease-ref (if there
|
||||
* was one) or the extra one acquired.
|
||||
*/
|
||||
kref_put(&cfid->refcount, smb2_close_cached_fid);
|
||||
close_cached_dir(cfid);
|
||||
}
|
||||
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
|
||||
dir_cache_timeout * HZ);
|
||||
@ -786,11 +780,13 @@ struct cached_fids *init_cached_dirs(void)
|
||||
INIT_LIST_HEAD(&cfids->entries);
|
||||
INIT_LIST_HEAD(&cfids->dying);
|
||||
|
||||
INIT_WORK(&cfids->invalidation_work, cfids_invalidation_worker);
|
||||
INIT_DELAYED_WORK(&cfids->laundromat_work, cfids_laundromat_worker);
|
||||
queue_delayed_work(cfid_put_wq, &cfids->laundromat_work,
|
||||
dir_cache_timeout * HZ);
|
||||
|
||||
atomic_long_set(&cfids->total_dirents_entries, 0);
|
||||
atomic64_set(&cfids->total_dirents_bytes, 0);
|
||||
|
||||
return cfids;
|
||||
}
|
||||
|
||||
@ -807,7 +803,6 @@ void free_cached_dirs(struct cached_fids *cfids)
|
||||
return;
|
||||
|
||||
cancel_delayed_work_sync(&cfids->laundromat_work);
|
||||
cancel_work_sync(&cfids->invalidation_work);
|
||||
|
||||
spin_lock(&cfids->cfid_list_lock);
|
||||
list_for_each_entry_safe(cfid, q, &cfids->entries, entry) {
|
||||
|
||||
@ -27,6 +27,9 @@ struct cached_dirents {
|
||||
struct mutex de_mutex;
|
||||
loff_t pos; /* Expected ctx->pos */
|
||||
struct list_head entries;
|
||||
/* accounting for cached entries in this directory */
|
||||
unsigned long entries_count;
|
||||
unsigned long bytes_used;
|
||||
};
|
||||
|
||||
struct cached_fid {
|
||||
@ -41,7 +44,6 @@ struct cached_fid {
|
||||
unsigned long last_access_time; /* jiffies of when last accessed */
|
||||
struct kref refcount;
|
||||
struct cifs_fid fid;
|
||||
spinlock_t fid_lock;
|
||||
struct cifs_tcon *tcon;
|
||||
struct dentry *dentry;
|
||||
struct work_struct put_work;
|
||||
@ -60,10 +62,21 @@ struct cached_fids {
|
||||
int num_entries;
|
||||
struct list_head entries;
|
||||
struct list_head dying;
|
||||
struct work_struct invalidation_work;
|
||||
struct delayed_work laundromat_work;
|
||||
/* aggregate accounting for all cached dirents under this tcon */
|
||||
atomic_long_t total_dirents_entries;
|
||||
atomic64_t total_dirents_bytes;
|
||||
};
|
||||
|
||||
/* Module-wide directory cache accounting (defined in cifsfs.c) */
|
||||
extern atomic64_t cifs_dircache_bytes_used; /* bytes across all mounts */
|
||||
|
||||
static inline bool
|
||||
is_valid_cached_dir(struct cached_fid *cfid)
|
||||
{
|
||||
return cfid->time && cfid->has_lease;
|
||||
}
|
||||
|
||||
extern struct cached_fids *init_cached_dirs(void);
|
||||
extern void free_cached_dirs(struct cached_fids *cfids);
|
||||
extern int open_cached_dir(unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
@ -239,14 +239,18 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon;
|
||||
struct cifsFileInfo *cfile;
|
||||
struct inode *inode;
|
||||
struct cifsInodeInfo *cinode;
|
||||
char lease[4];
|
||||
int n;
|
||||
|
||||
seq_puts(m, "# Version:1\n");
|
||||
seq_puts(m, "# Format:\n");
|
||||
seq_puts(m, "# <tree id> <ses id> <persistent fid> <flags> <count> <pid> <uid>");
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
seq_printf(m, " <filename> <mid>\n");
|
||||
seq_puts(m, " <filename> <lease> <mid>\n");
|
||||
#else
|
||||
seq_printf(m, " <filename>\n");
|
||||
seq_puts(m, " <filename> <lease>\n");
|
||||
#endif /* CIFS_DEBUG2 */
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
|
||||
@ -266,11 +270,30 @@ static int cifs_debug_files_proc_show(struct seq_file *m, void *v)
|
||||
cfile->pid,
|
||||
from_kuid(&init_user_ns, cfile->uid),
|
||||
cfile->dentry);
|
||||
|
||||
/* Append lease/oplock caching state as RHW letters */
|
||||
inode = d_inode(cfile->dentry);
|
||||
n = 0;
|
||||
if (inode) {
|
||||
cinode = CIFS_I(inode);
|
||||
if (CIFS_CACHE_READ(cinode))
|
||||
lease[n++] = 'R';
|
||||
if (CIFS_CACHE_HANDLE(cinode))
|
||||
lease[n++] = 'H';
|
||||
if (CIFS_CACHE_WRITE(cinode))
|
||||
lease[n++] = 'W';
|
||||
}
|
||||
lease[n] = '\0';
|
||||
seq_puts(m, " ");
|
||||
if (n)
|
||||
seq_printf(m, "%s", lease);
|
||||
else
|
||||
seq_puts(m, "NONE");
|
||||
|
||||
#ifdef CONFIG_CIFS_DEBUG2
|
||||
seq_printf(m, " %llu\n", cfile->fid.mid);
|
||||
#else
|
||||
seq_printf(m, " %llu", cfile->fid.mid);
|
||||
#endif /* CONFIG_CIFS_DEBUG2 */
|
||||
seq_printf(m, "\n");
|
||||
#endif /* CIFS_DEBUG2 */
|
||||
}
|
||||
spin_unlock(&tcon->open_file_lock);
|
||||
}
|
||||
@ -304,8 +327,13 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
|
||||
list_for_each(tmp1, &ses->tcon_list) {
|
||||
tcon = list_entry(tmp1, struct cifs_tcon, tcon_list);
|
||||
cfids = tcon->cfids;
|
||||
if (!cfids)
|
||||
continue;
|
||||
spin_lock(&cfids->cfid_list_lock); /* check lock ordering */
|
||||
seq_printf(m, "Num entries: %d\n", cfids->num_entries);
|
||||
seq_printf(m, "Num entries: %d, cached_dirents: %lu entries, %llu bytes\n",
|
||||
cfids->num_entries,
|
||||
(unsigned long)atomic_long_read(&cfids->total_dirents_entries),
|
||||
(unsigned long long)atomic64_read(&cfids->total_dirents_bytes));
|
||||
list_for_each_entry(cfid, &cfids->entries, entry) {
|
||||
seq_printf(m, "0x%x 0x%llx 0x%llx %s",
|
||||
tcon->tid,
|
||||
@ -316,11 +344,12 @@ static int cifs_debug_dirs_proc_show(struct seq_file *m, void *v)
|
||||
seq_printf(m, "\tvalid file info");
|
||||
if (cfid->dirents.is_valid)
|
||||
seq_printf(m, ", valid dirents");
|
||||
if (!list_empty(&cfid->dirents.entries))
|
||||
seq_printf(m, ", dirents: %lu entries, %lu bytes",
|
||||
cfid->dirents.entries_count, cfid->dirents.bytes_used);
|
||||
seq_printf(m, "\n");
|
||||
}
|
||||
spin_unlock(&cfids->cfid_list_lock);
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
}
|
||||
@ -347,6 +376,22 @@ static __always_inline const char *compression_alg_str(__le16 alg)
|
||||
}
|
||||
}
|
||||
|
||||
static __always_inline const char *cipher_alg_str(__le16 cipher)
|
||||
{
|
||||
switch (cipher) {
|
||||
case SMB2_ENCRYPTION_AES128_CCM:
|
||||
return "AES128-CCM";
|
||||
case SMB2_ENCRYPTION_AES128_GCM:
|
||||
return "AES128-GCM";
|
||||
case SMB2_ENCRYPTION_AES256_CCM:
|
||||
return "AES256-CCM";
|
||||
case SMB2_ENCRYPTION_AES256_GCM:
|
||||
return "AES256-GCM";
|
||||
default:
|
||||
return "UNKNOWN";
|
||||
}
|
||||
}
|
||||
|
||||
static int cifs_debug_data_proc_show(struct seq_file *m, void *v)
|
||||
{
|
||||
struct mid_q_entry *mid_entry;
|
||||
@ -541,6 +586,11 @@ skip_rdma:
|
||||
else
|
||||
seq_puts(m, "disabled (not supported by this server)");
|
||||
|
||||
/* Show negotiated encryption cipher, even if not required */
|
||||
seq_puts(m, "\nEncryption: ");
|
||||
if (server->cipher_type)
|
||||
seq_printf(m, "Negotiated cipher (%s)", cipher_alg_str(server->cipher_type));
|
||||
|
||||
seq_printf(m, "\n\n\tSessions: ");
|
||||
i = 0;
|
||||
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
|
||||
@ -578,12 +628,8 @@ skip_rdma:
|
||||
|
||||
/* dump session id helpful for use with network trace */
|
||||
seq_printf(m, " SessionId: 0x%llx", ses->Suid);
|
||||
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA) {
|
||||
if (ses->session_flags & SMB2_SESSION_FLAG_ENCRYPT_DATA)
|
||||
seq_puts(m, " encrypted");
|
||||
/* can help in debugging to show encryption type */
|
||||
if (server->cipher_type == SMB2_ENCRYPTION_AES256_GCM)
|
||||
seq_puts(m, "(gcm256)");
|
||||
}
|
||||
if (ses->sign)
|
||||
seq_puts(m, " signed");
|
||||
|
||||
|
||||
@ -121,6 +121,46 @@ unsigned int dir_cache_timeout = 30;
|
||||
module_param(dir_cache_timeout, uint, 0644);
|
||||
MODULE_PARM_DESC(dir_cache_timeout, "Number of seconds to cache directory contents for which we have a lease. Default: 30 "
|
||||
"Range: 1 to 65000 seconds, 0 to disable caching dir contents");
|
||||
/* Module-wide total cached dirents (in bytes) across all tcons */
|
||||
atomic64_t cifs_dircache_bytes_used = ATOMIC64_INIT(0);
|
||||
|
||||
/*
|
||||
* Write-only module parameter to drop all cached directory entries across
|
||||
* all CIFS mounts. Echo a non-zero value to trigger.
|
||||
*/
|
||||
static void cifs_drop_all_dir_caches(void)
|
||||
{
|
||||
struct TCP_Server_Info *server;
|
||||
struct cifs_ses *ses;
|
||||
struct cifs_tcon *tcon;
|
||||
|
||||
spin_lock(&cifs_tcp_ses_lock);
|
||||
list_for_each_entry(server, &cifs_tcp_ses_list, tcp_ses_list) {
|
||||
list_for_each_entry(ses, &server->smb_ses_list, smb_ses_list) {
|
||||
if (cifs_ses_exiting(ses))
|
||||
continue;
|
||||
list_for_each_entry(tcon, &ses->tcon_list, tcon_list)
|
||||
invalidate_all_cached_dirs(tcon);
|
||||
}
|
||||
}
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
}
|
||||
|
||||
static int cifs_param_set_drop_dir_cache(const char *val, const struct kernel_param *kp)
|
||||
{
|
||||
bool bv;
|
||||
int rc = kstrtobool(val, &bv);
|
||||
|
||||
if (rc)
|
||||
return rc;
|
||||
if (bv)
|
||||
cifs_drop_all_dir_caches();
|
||||
return 0;
|
||||
}
|
||||
|
||||
module_param_call(drop_dir_cache, cifs_param_set_drop_dir_cache, NULL, NULL, 0200);
|
||||
MODULE_PARM_DESC(drop_dir_cache, "Write 1 to drop all cached directory entries across all CIFS mounts");
|
||||
|
||||
#ifdef CONFIG_CIFS_STATS2
|
||||
unsigned int slow_rsp_threshold = 1;
|
||||
module_param(slow_rsp_threshold, uint, 0644);
|
||||
|
||||
@ -322,13 +322,14 @@ retry_open:
|
||||
list_for_each_entry(parent_cfid, &tcon->cfids->entries, entry) {
|
||||
if (parent_cfid->dentry == direntry->d_parent) {
|
||||
cifs_dbg(FYI, "found a parent cached file handle\n");
|
||||
if (parent_cfid->has_lease && parent_cfid->time) {
|
||||
if (is_valid_cached_dir(parent_cfid)) {
|
||||
lease_flags
|
||||
|= SMB2_LEASE_FLAG_PARENT_LEASE_KEY_SET_LE;
|
||||
memcpy(fid->parent_lease_key,
|
||||
parent_cfid->fid.lease_key,
|
||||
SMB2_LEASE_KEY_SIZE);
|
||||
parent_cfid->dirents.is_valid = false;
|
||||
parent_cfid->dirents.is_failed = true;
|
||||
}
|
||||
break;
|
||||
}
|
||||
@ -683,6 +684,7 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
|
||||
const char *full_path;
|
||||
void *page;
|
||||
int retry_count = 0;
|
||||
struct cached_fid *cfid = NULL;
|
||||
|
||||
xid = get_xid();
|
||||
|
||||
@ -722,6 +724,28 @@ cifs_lookup(struct inode *parent_dir_inode, struct dentry *direntry,
|
||||
cifs_dbg(FYI, "non-NULL inode in lookup\n");
|
||||
} else {
|
||||
cifs_dbg(FYI, "NULL inode in lookup\n");
|
||||
|
||||
/*
|
||||
* We can only rely on negative dentries having the same
|
||||
* spelling as the cached dirent if case insensitivity is
|
||||
* forced on mount.
|
||||
*
|
||||
* XXX: if servers correctly announce Case Sensitivity Search
|
||||
* on GetInfo of FileFSAttributeInformation, then we can take
|
||||
* correct action even if case insensitive is not forced on
|
||||
* mount.
|
||||
*/
|
||||
if (pTcon->nocase && !open_cached_dir_by_dentry(pTcon, direntry->d_parent, &cfid)) {
|
||||
/*
|
||||
* dentry is negative and parent is fully cached:
|
||||
* we can assume file does not exist
|
||||
*/
|
||||
if (cfid->dirents.is_valid) {
|
||||
close_cached_dir(cfid);
|
||||
goto out;
|
||||
}
|
||||
close_cached_dir(cfid);
|
||||
}
|
||||
}
|
||||
cifs_dbg(FYI, "Full path: %s inode = 0x%p\n",
|
||||
full_path, d_inode(direntry));
|
||||
@ -755,6 +779,8 @@ again:
|
||||
}
|
||||
newInode = ERR_PTR(rc);
|
||||
}
|
||||
|
||||
out:
|
||||
free_dentry_path(page);
|
||||
cifs_put_tlink(tlink);
|
||||
free_xid(xid);
|
||||
@ -764,7 +790,8 @@ again:
|
||||
static int
|
||||
cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
|
||||
{
|
||||
struct inode *inode;
|
||||
struct inode *inode = NULL;
|
||||
struct cached_fid *cfid;
|
||||
int rc;
|
||||
|
||||
if (flags & LOOKUP_RCU)
|
||||
@ -811,6 +838,21 @@ cifs_d_revalidate(struct dentry *direntry, unsigned int flags)
|
||||
|
||||
return 1;
|
||||
}
|
||||
} else {
|
||||
struct cifs_sb_info *cifs_sb = CIFS_SB(d_inode(direntry->d_parent)->i_sb);
|
||||
struct cifs_tcon *tcon = cifs_sb_master_tcon(cifs_sb);
|
||||
|
||||
if (!open_cached_dir_by_dentry(tcon, direntry->d_parent, &cfid)) {
|
||||
/*
|
||||
* dentry is negative and parent is fully cached:
|
||||
* we can assume file does not exist
|
||||
*/
|
||||
if (cfid->dirents.is_valid) {
|
||||
close_cached_dir(cfid);
|
||||
return 1;
|
||||
}
|
||||
close_cached_dir(cfid);
|
||||
}
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -2431,8 +2431,10 @@ cifs_do_rename(const unsigned int xid, struct dentry *from_dentry,
|
||||
tcon = tlink_tcon(tlink);
|
||||
server = tcon->ses->server;
|
||||
|
||||
if (!server->ops->rename)
|
||||
return -ENOSYS;
|
||||
if (!server->ops->rename) {
|
||||
rc = -ENOSYS;
|
||||
goto do_rename_exit;
|
||||
}
|
||||
|
||||
/* try path-based rename first */
|
||||
rc = server->ops->rename(xid, tcon, from_dentry,
|
||||
@ -2701,7 +2703,7 @@ cifs_dentry_needs_reval(struct dentry *dentry)
|
||||
return true;
|
||||
|
||||
if (!open_cached_dir_by_dentry(tcon, dentry->d_parent, &cfid)) {
|
||||
if (cfid->time && cifs_i->time > cfid->time) {
|
||||
if (cifs_i->time > cfid->time) {
|
||||
close_cached_dir(cfid);
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -916,6 +916,14 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
|
||||
char *data_end;
|
||||
struct dfs_referral_level_3 *ref;
|
||||
|
||||
if (rsp_size < sizeof(*rsp)) {
|
||||
cifs_dbg(VFS | ONCE,
|
||||
"%s: header is malformed (size is %u, must be %zu)\n",
|
||||
__func__, rsp_size, sizeof(*rsp));
|
||||
rc = -EINVAL;
|
||||
goto parse_DFS_referrals_exit;
|
||||
}
|
||||
|
||||
*num_of_nodes = le16_to_cpu(rsp->NumberOfReferrals);
|
||||
|
||||
if (*num_of_nodes < 1) {
|
||||
@ -925,6 +933,15 @@ parse_dfs_referrals(struct get_dfs_referral_rsp *rsp, u32 rsp_size,
|
||||
goto parse_DFS_referrals_exit;
|
||||
}
|
||||
|
||||
if (sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3) > rsp_size) {
|
||||
cifs_dbg(VFS | ONCE,
|
||||
"%s: malformed buffer (size is %u, must be at least %zu)\n",
|
||||
__func__, rsp_size,
|
||||
sizeof(*rsp) + *num_of_nodes * sizeof(REFERRAL3));
|
||||
rc = -EINVAL;
|
||||
goto parse_DFS_referrals_exit;
|
||||
}
|
||||
|
||||
ref = (struct dfs_referral_level_3 *) &(rsp->referrals);
|
||||
if (ref->VersionNumber != cpu_to_le16(3)) {
|
||||
cifs_dbg(VFS, "Referrals of V%d version are not supported, should be V3\n",
|
||||
|
||||
@ -873,39 +873,42 @@ static void finished_cached_dirents_count(struct cached_dirents *cde,
|
||||
cde->is_valid = 1;
|
||||
}
|
||||
|
||||
static void add_cached_dirent(struct cached_dirents *cde,
|
||||
struct dir_context *ctx,
|
||||
const char *name, int namelen,
|
||||
struct cifs_fattr *fattr,
|
||||
struct file *file)
|
||||
static bool add_cached_dirent(struct cached_dirents *cde,
|
||||
struct dir_context *ctx, const char *name,
|
||||
int namelen, struct cifs_fattr *fattr,
|
||||
struct file *file)
|
||||
{
|
||||
struct cached_dirent *de;
|
||||
|
||||
if (cde->file != file)
|
||||
return;
|
||||
return false;
|
||||
if (cde->is_valid || cde->is_failed)
|
||||
return;
|
||||
return false;
|
||||
if (ctx->pos != cde->pos) {
|
||||
cde->is_failed = 1;
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
de = kzalloc(sizeof(*de), GFP_ATOMIC);
|
||||
if (de == NULL) {
|
||||
cde->is_failed = 1;
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
de->namelen = namelen;
|
||||
de->name = kstrndup(name, namelen, GFP_ATOMIC);
|
||||
if (de->name == NULL) {
|
||||
kfree(de);
|
||||
cde->is_failed = 1;
|
||||
return;
|
||||
return false;
|
||||
}
|
||||
de->pos = ctx->pos;
|
||||
|
||||
memcpy(&de->fattr, fattr, sizeof(struct cifs_fattr));
|
||||
|
||||
list_add_tail(&de->entry, &cde->entries);
|
||||
/* update accounting */
|
||||
cde->entries_count++;
|
||||
cde->bytes_used += sizeof(*de) + (size_t)namelen + 1;
|
||||
return true;
|
||||
}
|
||||
|
||||
static bool cifs_dir_emit(struct dir_context *ctx,
|
||||
@ -914,7 +917,8 @@ static bool cifs_dir_emit(struct dir_context *ctx,
|
||||
struct cached_fid *cfid,
|
||||
struct file *file)
|
||||
{
|
||||
bool rc;
|
||||
size_t delta_bytes = 0;
|
||||
bool rc, added = false;
|
||||
ino_t ino = cifs_uniqueid_to_ino_t(fattr->cf_uniqueid);
|
||||
|
||||
rc = dir_emit(ctx, name, namelen, ino, fattr->cf_dtype);
|
||||
@ -922,10 +926,20 @@ static bool cifs_dir_emit(struct dir_context *ctx,
|
||||
return rc;
|
||||
|
||||
if (cfid) {
|
||||
/* Cost of this entry */
|
||||
delta_bytes = sizeof(struct cached_dirent) + (size_t)namelen + 1;
|
||||
|
||||
mutex_lock(&cfid->dirents.de_mutex);
|
||||
add_cached_dirent(&cfid->dirents, ctx, name, namelen,
|
||||
fattr, file);
|
||||
added = add_cached_dirent(&cfid->dirents, ctx, name, namelen,
|
||||
fattr, file);
|
||||
mutex_unlock(&cfid->dirents.de_mutex);
|
||||
|
||||
if (added) {
|
||||
/* per-tcon then global for consistency with free path */
|
||||
atomic64_add((long long)delta_bytes, &cfid->cfids->total_dirents_bytes);
|
||||
atomic_long_inc(&cfid->cfids->total_dirents_entries);
|
||||
atomic64_add((long long)delta_bytes, &cifs_dircache_bytes_used);
|
||||
}
|
||||
}
|
||||
|
||||
return rc;
|
||||
|
||||
@ -1294,6 +1294,8 @@ static int smb2_set_path_attr(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
smb2_to_name = cifs_convert_path_to_utf16(to_name, cifs_sb);
|
||||
if (smb2_to_name == NULL) {
|
||||
rc = -ENOMEM;
|
||||
if (cfile)
|
||||
cifsFileInfo_put(cfile);
|
||||
goto smb2_rename_path;
|
||||
}
|
||||
in_iov.iov_base = smb2_to_name;
|
||||
|
||||
@ -614,6 +614,15 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
|
||||
struct cifs_tcon *tcon;
|
||||
struct cifs_pending_open *open;
|
||||
|
||||
/* Trace receipt of lease break request from server */
|
||||
trace_smb3_lease_break_enter(le32_to_cpu(rsp->CurrentLeaseState),
|
||||
le32_to_cpu(rsp->Flags),
|
||||
le16_to_cpu(rsp->Epoch),
|
||||
le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
|
||||
le64_to_cpu(rsp->hdr.SessionId),
|
||||
*((u64 *)rsp->LeaseKey),
|
||||
*((u64 *)&rsp->LeaseKey[8]));
|
||||
|
||||
cifs_dbg(FYI, "Checking for lease break\n");
|
||||
|
||||
/* If server is a channel, select the primary channel */
|
||||
@ -660,10 +669,12 @@ smb2_is_valid_lease_break(char *buffer, struct TCP_Server_Info *server)
|
||||
spin_unlock(&cifs_tcp_ses_lock);
|
||||
cifs_dbg(FYI, "Can not process lease break - no lease matched\n");
|
||||
trace_smb3_lease_not_found(le32_to_cpu(rsp->CurrentLeaseState),
|
||||
le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
|
||||
le64_to_cpu(rsp->hdr.SessionId),
|
||||
*((u64 *)rsp->LeaseKey),
|
||||
*((u64 *)&rsp->LeaseKey[8]));
|
||||
le32_to_cpu(rsp->Flags),
|
||||
le16_to_cpu(rsp->Epoch),
|
||||
le32_to_cpu(rsp->hdr.Id.SyncId.TreeId),
|
||||
le64_to_cpu(rsp->hdr.SessionId),
|
||||
*((u64 *)rsp->LeaseKey),
|
||||
*((u64 *)&rsp->LeaseKey[8]));
|
||||
|
||||
return false;
|
||||
}
|
||||
|
||||
@ -945,11 +945,8 @@ smb2_is_path_accessible(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
|
||||
rc = open_cached_dir(xid, tcon, full_path, cifs_sb, true, &cfid);
|
||||
if (!rc) {
|
||||
if (cfid->has_lease) {
|
||||
close_cached_dir(cfid);
|
||||
return 0;
|
||||
}
|
||||
close_cached_dir(cfid);
|
||||
return 0;
|
||||
}
|
||||
|
||||
utf16_path = cifs_convert_path_to_utf16(full_path, cifs_sb);
|
||||
@ -2707,11 +2704,12 @@ smb2_query_info_compound(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
struct cifs_fid fid;
|
||||
int rc;
|
||||
__le16 *utf16_path;
|
||||
struct cached_fid *cfid = NULL;
|
||||
struct cached_fid *cfid;
|
||||
int retries = 0, cur_sleep = 1;
|
||||
|
||||
replay_again:
|
||||
/* reinitialize for possible replay */
|
||||
cfid = NULL;
|
||||
flags = CIFS_CP_CREATE_CLOSE_OP;
|
||||
oplock = SMB2_OPLOCK_LEVEL_NONE;
|
||||
server = cifs_pick_channel(ses);
|
||||
@ -3120,8 +3118,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
|
||||
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
|
||||
if (!utf16_path) {
|
||||
rc = -ENOMEM;
|
||||
free_xid(xid);
|
||||
return ERR_PTR(rc);
|
||||
goto put_tlink;
|
||||
}
|
||||
|
||||
oparms = (struct cifs_open_parms) {
|
||||
@ -3153,6 +3150,7 @@ get_smb2_acl_by_path(struct cifs_sb_info *cifs_sb,
|
||||
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
|
||||
}
|
||||
|
||||
put_tlink:
|
||||
cifs_put_tlink(tlink);
|
||||
free_xid(xid);
|
||||
|
||||
@ -3193,8 +3191,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
|
||||
utf16_path = cifs_convert_path_to_utf16(path, cifs_sb);
|
||||
if (!utf16_path) {
|
||||
rc = -ENOMEM;
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
goto put_tlink;
|
||||
}
|
||||
|
||||
oparms = (struct cifs_open_parms) {
|
||||
@ -3215,6 +3212,7 @@ set_smb2_acl(struct smb_ntsd *pnntsd, __u32 acllen,
|
||||
SMB2_close(xid, tcon, fid.persistent_fid, fid.volatile_fid);
|
||||
}
|
||||
|
||||
put_tlink:
|
||||
cifs_put_tlink(tlink);
|
||||
free_xid(xid);
|
||||
return rc;
|
||||
|
||||
@ -6192,11 +6192,11 @@ SMB2_lease_break(const unsigned int xid, struct cifs_tcon *tcon,
|
||||
please_key_high = (__u64 *)(lease_key+8);
|
||||
if (rc) {
|
||||
cifs_stats_fail_inc(tcon, SMB2_OPLOCK_BREAK_HE);
|
||||
trace_smb3_lease_err(le32_to_cpu(lease_state), tcon->tid,
|
||||
trace_smb3_lease_ack_err(le32_to_cpu(lease_state), tcon->tid,
|
||||
ses->Suid, *please_key_low, *please_key_high, rc);
|
||||
cifs_dbg(FYI, "Send error in Lease Break = %d\n", rc);
|
||||
} else
|
||||
trace_smb3_lease_done(le32_to_cpu(lease_state), tcon->tid,
|
||||
trace_smb3_lease_ack_done(le32_to_cpu(lease_state), tcon->tid,
|
||||
ses->Suid, *please_key_low, *please_key_high);
|
||||
|
||||
return rc;
|
||||
|
||||
@ -1168,8 +1168,54 @@ DEFINE_EVENT(smb3_lease_done_class, smb3_##name, \
|
||||
__u64 lease_key_high), \
|
||||
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high))
|
||||
|
||||
DEFINE_SMB3_LEASE_DONE_EVENT(lease_done);
|
||||
DEFINE_SMB3_LEASE_DONE_EVENT(lease_not_found);
|
||||
DEFINE_SMB3_LEASE_DONE_EVENT(lease_ack_done);
|
||||
/* Tracepoint when a lease break request is received/entered (includes epoch and flags) */
|
||||
DECLARE_EVENT_CLASS(smb3_lease_enter_class,
|
||||
TP_PROTO(__u32 lease_state,
|
||||
__u32 flags,
|
||||
__u16 epoch,
|
||||
__u32 tid,
|
||||
__u64 sesid,
|
||||
__u64 lease_key_low,
|
||||
__u64 lease_key_high),
|
||||
TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high),
|
||||
TP_STRUCT__entry(
|
||||
__field(__u32, lease_state)
|
||||
__field(__u32, flags)
|
||||
__field(__u16, epoch)
|
||||
__field(__u32, tid)
|
||||
__field(__u64, sesid)
|
||||
__field(__u64, lease_key_low)
|
||||
__field(__u64, lease_key_high)
|
||||
),
|
||||
TP_fast_assign(
|
||||
__entry->lease_state = lease_state;
|
||||
__entry->flags = flags;
|
||||
__entry->epoch = epoch;
|
||||
__entry->tid = tid;
|
||||
__entry->sesid = sesid;
|
||||
__entry->lease_key_low = lease_key_low;
|
||||
__entry->lease_key_high = lease_key_high;
|
||||
),
|
||||
TP_printk("sid=0x%llx tid=0x%x lease_key=0x%llx%llx lease_state=0x%x flags=0x%x epoch=%u",
|
||||
__entry->sesid, __entry->tid, __entry->lease_key_high,
|
||||
__entry->lease_key_low, __entry->lease_state, __entry->flags, __entry->epoch)
|
||||
)
|
||||
|
||||
#define DEFINE_SMB3_LEASE_ENTER_EVENT(name) \
|
||||
DEFINE_EVENT(smb3_lease_enter_class, smb3_##name, \
|
||||
TP_PROTO(__u32 lease_state, \
|
||||
__u32 flags, \
|
||||
__u16 epoch, \
|
||||
__u32 tid, \
|
||||
__u64 sesid, \
|
||||
__u64 lease_key_low, \
|
||||
__u64 lease_key_high), \
|
||||
TP_ARGS(lease_state, flags, epoch, tid, sesid, lease_key_low, lease_key_high))
|
||||
|
||||
DEFINE_SMB3_LEASE_ENTER_EVENT(lease_break_enter);
|
||||
/* Lease not found: reuse lease_enter payload (includes epoch and flags) */
|
||||
DEFINE_SMB3_LEASE_ENTER_EVENT(lease_not_found);
|
||||
|
||||
DECLARE_EVENT_CLASS(smb3_lease_err_class,
|
||||
TP_PROTO(__u32 lease_state,
|
||||
@ -1210,7 +1256,7 @@ DEFINE_EVENT(smb3_lease_err_class, smb3_##name, \
|
||||
int rc), \
|
||||
TP_ARGS(lease_state, tid, sesid, lease_key_low, lease_key_high, rc))
|
||||
|
||||
DEFINE_SMB3_LEASE_ERR_EVENT(lease_err);
|
||||
DEFINE_SMB3_LEASE_ERR_EVENT(lease_ack_err);
|
||||
|
||||
DECLARE_EVENT_CLASS(smb3_connect_class,
|
||||
TP_PROTO(char *hostname,
|
||||
|
||||
125
mm/vmalloc.c
125
mm/vmalloc.c
@ -900,6 +900,11 @@ static struct vmap_node *vmap_nodes = &single;
|
||||
static __read_mostly unsigned int nr_vmap_nodes = 1;
|
||||
static __read_mostly unsigned int vmap_zone_size = 1;
|
||||
|
||||
/* A simple iterator over all vmap-nodes. */
|
||||
#define for_each_vmap_node(vn) \
|
||||
for ((vn) = &vmap_nodes[0]; \
|
||||
(vn) < &vmap_nodes[nr_vmap_nodes]; (vn)++)
|
||||
|
||||
static inline unsigned int
|
||||
addr_to_node_id(unsigned long addr)
|
||||
{
|
||||
@ -918,6 +923,19 @@ id_to_node(unsigned int id)
|
||||
return &vmap_nodes[id % nr_vmap_nodes];
|
||||
}
|
||||
|
||||
static inline unsigned int
|
||||
node_to_id(struct vmap_node *node)
|
||||
{
|
||||
/* Pointer arithmetic. */
|
||||
unsigned int id = node - vmap_nodes;
|
||||
|
||||
if (likely(id < nr_vmap_nodes))
|
||||
return id;
|
||||
|
||||
WARN_ONCE(1, "An address 0x%p is out-of-bounds.\n", node);
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* We use the value 0 to represent "no node", that is why
|
||||
* an encoded value will be the node-id incremented by 1.
|
||||
@ -1056,12 +1074,11 @@ find_vmap_area_exceed_addr_lock(unsigned long addr, struct vmap_area **va)
|
||||
{
|
||||
unsigned long va_start_lowest;
|
||||
struct vmap_node *vn;
|
||||
int i;
|
||||
|
||||
repeat:
|
||||
for (i = 0, va_start_lowest = 0; i < nr_vmap_nodes; i++) {
|
||||
vn = &vmap_nodes[i];
|
||||
va_start_lowest = 0;
|
||||
|
||||
for_each_vmap_node(vn) {
|
||||
spin_lock(&vn->busy.lock);
|
||||
*va = __find_vmap_area_exceed_addr(addr, &vn->busy.root);
|
||||
|
||||
@ -2255,9 +2272,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
|
||||
*/
|
||||
purge_nodes = CPU_MASK_NONE;
|
||||
|
||||
for (i = 0; i < nr_vmap_nodes; i++) {
|
||||
vn = &vmap_nodes[i];
|
||||
|
||||
for_each_vmap_node(vn) {
|
||||
INIT_LIST_HEAD(&vn->purge_list);
|
||||
vn->skip_populate = full_pool_decay;
|
||||
decay_va_pool_node(vn, full_pool_decay);
|
||||
@ -2276,7 +2291,7 @@ static bool __purge_vmap_area_lazy(unsigned long start, unsigned long end,
|
||||
end = max(end, list_last_entry(&vn->purge_list,
|
||||
struct vmap_area, list)->va_end);
|
||||
|
||||
cpumask_set_cpu(i, &purge_nodes);
|
||||
cpumask_set_cpu(node_to_id(vn), &purge_nodes);
|
||||
}
|
||||
|
||||
nr_purge_nodes = cpumask_weight(&purge_nodes);
|
||||
@ -3100,7 +3115,7 @@ static void clear_vm_uninitialized_flag(struct vm_struct *vm)
|
||||
/*
|
||||
* Before removing VM_UNINITIALIZED,
|
||||
* we should make sure that vm has proper values.
|
||||
* Pair with smp_rmb() in show_numa_info().
|
||||
* Pair with smp_rmb() in vread_iter() and vmalloc_info_show().
|
||||
*/
|
||||
smp_wmb();
|
||||
vm->flags &= ~VM_UNINITIALIZED;
|
||||
@ -4918,39 +4933,37 @@ bool vmalloc_dump_obj(void *object)
|
||||
#endif
|
||||
|
||||
#ifdef CONFIG_PROC_FS
|
||||
static void show_numa_info(struct seq_file *m, struct vm_struct *v)
|
||||
|
||||
/*
|
||||
* Print number of pages allocated on each memory node.
|
||||
*
|
||||
* This function can only be called if CONFIG_NUMA is enabled
|
||||
* and VM_UNINITIALIZED bit in v->flags is disabled.
|
||||
*/
|
||||
static void show_numa_info(struct seq_file *m, struct vm_struct *v,
|
||||
unsigned int *counters)
|
||||
{
|
||||
if (IS_ENABLED(CONFIG_NUMA)) {
|
||||
unsigned int nr, *counters = m->private;
|
||||
unsigned int step = 1U << vm_area_page_order(v);
|
||||
unsigned int nr;
|
||||
unsigned int step = 1U << vm_area_page_order(v);
|
||||
|
||||
if (!counters)
|
||||
return;
|
||||
if (!counters)
|
||||
return;
|
||||
|
||||
if (v->flags & VM_UNINITIALIZED)
|
||||
return;
|
||||
/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
|
||||
smp_rmb();
|
||||
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
|
||||
|
||||
memset(counters, 0, nr_node_ids * sizeof(unsigned int));
|
||||
|
||||
for (nr = 0; nr < v->nr_pages; nr += step)
|
||||
counters[page_to_nid(v->pages[nr])] += step;
|
||||
for_each_node_state(nr, N_HIGH_MEMORY)
|
||||
if (counters[nr])
|
||||
seq_printf(m, " N%u=%u", nr, counters[nr]);
|
||||
}
|
||||
for (nr = 0; nr < v->nr_pages; nr += step)
|
||||
counters[page_to_nid(v->pages[nr])] += step;
|
||||
for_each_node_state(nr, N_HIGH_MEMORY)
|
||||
if (counters[nr])
|
||||
seq_printf(m, " N%u=%u", nr, counters[nr]);
|
||||
}
|
||||
|
||||
static void show_purge_info(struct seq_file *m)
|
||||
{
|
||||
struct vmap_node *vn;
|
||||
struct vmap_area *va;
|
||||
int i;
|
||||
|
||||
for (i = 0; i < nr_vmap_nodes; i++) {
|
||||
vn = &vmap_nodes[i];
|
||||
|
||||
for_each_vmap_node(vn) {
|
||||
spin_lock(&vn->lazy.lock);
|
||||
list_for_each_entry(va, &vn->lazy.head, list) {
|
||||
seq_printf(m, "0x%pK-0x%pK %7ld unpurged vm_area\n",
|
||||
@ -4966,11 +4979,12 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
|
||||
struct vmap_node *vn;
|
||||
struct vmap_area *va;
|
||||
struct vm_struct *v;
|
||||
int i;
|
||||
unsigned int *counters;
|
||||
|
||||
for (i = 0; i < nr_vmap_nodes; i++) {
|
||||
vn = &vmap_nodes[i];
|
||||
if (IS_ENABLED(CONFIG_NUMA))
|
||||
counters = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
|
||||
|
||||
for_each_vmap_node(vn) {
|
||||
spin_lock(&vn->busy.lock);
|
||||
list_for_each_entry(va, &vn->busy.head, list) {
|
||||
if (!va->vm) {
|
||||
@ -4983,6 +4997,11 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
|
||||
}
|
||||
|
||||
v = va->vm;
|
||||
if (v->flags & VM_UNINITIALIZED)
|
||||
continue;
|
||||
|
||||
/* Pair with smp_wmb() in clear_vm_uninitialized_flag() */
|
||||
smp_rmb();
|
||||
|
||||
seq_printf(m, "0x%pK-0x%pK %7ld",
|
||||
v->addr, v->addr + v->size, v->size);
|
||||
@ -5017,7 +5036,9 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
|
||||
if (is_vmalloc_addr(v->pages))
|
||||
seq_puts(m, " vpages");
|
||||
|
||||
show_numa_info(m, v);
|
||||
if (IS_ENABLED(CONFIG_NUMA))
|
||||
show_numa_info(m, v, counters);
|
||||
|
||||
seq_putc(m, '\n');
|
||||
}
|
||||
spin_unlock(&vn->busy.lock);
|
||||
@ -5027,19 +5048,14 @@ static int vmalloc_info_show(struct seq_file *m, void *p)
|
||||
* As a final step, dump "unpurged" areas.
|
||||
*/
|
||||
show_purge_info(m);
|
||||
if (IS_ENABLED(CONFIG_NUMA))
|
||||
kfree(counters);
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int __init proc_vmalloc_init(void)
|
||||
{
|
||||
void *priv_data = NULL;
|
||||
|
||||
if (IS_ENABLED(CONFIG_NUMA))
|
||||
priv_data = kmalloc(nr_node_ids * sizeof(unsigned int), GFP_KERNEL);
|
||||
|
||||
proc_create_single_data("vmallocinfo",
|
||||
0400, NULL, vmalloc_info_show, priv_data);
|
||||
|
||||
proc_create_single("vmallocinfo", 0400, NULL, vmalloc_info_show);
|
||||
return 0;
|
||||
}
|
||||
module_init(proc_vmalloc_init);
|
||||
@ -5091,7 +5107,7 @@ static void __init vmap_init_free_space(void)
|
||||
static void vmap_init_nodes(void)
|
||||
{
|
||||
struct vmap_node *vn;
|
||||
int i, n;
|
||||
int i;
|
||||
|
||||
#if BITS_PER_LONG == 64
|
||||
/*
|
||||
@ -5108,7 +5124,7 @@ static void vmap_init_nodes(void)
|
||||
* set of cores. Therefore a per-domain purging is supposed to
|
||||
* be added as well as a per-domain balancing.
|
||||
*/
|
||||
n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
|
||||
int n = clamp_t(unsigned int, num_possible_cpus(), 1, 128);
|
||||
|
||||
if (n > 1) {
|
||||
vn = kmalloc_array(n, sizeof(*vn), GFP_NOWAIT | __GFP_NOWARN);
|
||||
@ -5123,8 +5139,7 @@ static void vmap_init_nodes(void)
|
||||
}
|
||||
#endif
|
||||
|
||||
for (n = 0; n < nr_vmap_nodes; n++) {
|
||||
vn = &vmap_nodes[n];
|
||||
for_each_vmap_node(vn) {
|
||||
vn->busy.root = RB_ROOT;
|
||||
INIT_LIST_HEAD(&vn->busy.head);
|
||||
spin_lock_init(&vn->busy.lock);
|
||||
@ -5145,15 +5160,13 @@ static void vmap_init_nodes(void)
|
||||
static unsigned long
|
||||
vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
unsigned long count;
|
||||
unsigned long count = 0;
|
||||
struct vmap_node *vn;
|
||||
int i, j;
|
||||
int i;
|
||||
|
||||
for (count = 0, i = 0; i < nr_vmap_nodes; i++) {
|
||||
vn = &vmap_nodes[i];
|
||||
|
||||
for (j = 0; j < MAX_VA_SIZE_PAGES; j++)
|
||||
count += READ_ONCE(vn->pool[j].len);
|
||||
for_each_vmap_node(vn) {
|
||||
for (i = 0; i < MAX_VA_SIZE_PAGES; i++)
|
||||
count += READ_ONCE(vn->pool[i].len);
|
||||
}
|
||||
|
||||
return count ? count : SHRINK_EMPTY;
|
||||
@ -5162,10 +5175,10 @@ vmap_node_shrink_count(struct shrinker *shrink, struct shrink_control *sc)
|
||||
static unsigned long
|
||||
vmap_node_shrink_scan(struct shrinker *shrink, struct shrink_control *sc)
|
||||
{
|
||||
int i;
|
||||
struct vmap_node *vn;
|
||||
|
||||
for (i = 0; i < nr_vmap_nodes; i++)
|
||||
decay_va_pool_node(&vmap_nodes[i], true);
|
||||
for_each_vmap_node(vn)
|
||||
decay_va_pool_node(vn, true);
|
||||
|
||||
return SHRINK_STOP;
|
||||
}
|
||||
|
||||
@ -1,3 +1,31 @@
|
||||
* Sat Jan 10 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.29.1.el10_1]
|
||||
- gitlab-ci: use rhel10.1 builder image (Michael Hofmann)
|
||||
- mm/vmalloc: fix data race in show_numa_info() (Waiman Long) [RHEL-137997] {CVE-2025-38383}
|
||||
- vmalloc: use for_each_vmap_node() in purge-vmap-area (Waiman Long) [RHEL-137997]
|
||||
- vmalloc: switch to for_each_vmap_node() helper (Waiman Long) [RHEL-137997]
|
||||
- vmalloc: add for_each_vmap_node() helper (Waiman Long) [RHEL-137997]
|
||||
- smb: client: fix refcount leak in smb2_set_path_attr (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: fix potential UAF in smb2_close_cached_fid() (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: fix potential cfid UAF in smb2_query_info_compound (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: Fix refcount leak for cifs_sb_tlink (Paulo Alcantara) [RHEL-128581]
|
||||
- cifs: parse_dfs_referrals: prevent oob on malformed input (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: remove cfids_invalidation_worker (Paulo Alcantara) [RHEL-128581]
|
||||
- smb client: fix bug with newly created file in cached dir (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: short-circuit negative lookups when parent dir is fully cached (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: short-circuit in open_cached_dir_by_dentry() if !dentry (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: remove pointless cfid->has_lease check (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: remove unused fid_lock (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: update cfid->last_access_time in open_cached_dir_by_dentry() (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: ensure open_cached_dir_by_dentry() only returns valid cfid (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: account smb directory cache usage and per-tcon totals (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: add drop_dir_cache module parameter to invalidate cached dirents (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: show lease state as R/H/W (or NONE) in open_files (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: show negotiated cipher in DebugData (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: add new tracepoint to trace lease break notification (Paulo Alcantara) [RHEL-128581]
|
||||
- smb: client: Fix NULL pointer dereference in cifs_debug_dirs_proc_show() (Paulo Alcantara) [RHEL-128581]
|
||||
- coredump: Only sort VMAs when core_sort_vma sysctl is set (Herton R. Krzesinski) [RHEL-113364]
|
||||
Resolves: RHEL-113364, RHEL-128581, RHEL-137997
|
||||
|
||||
* Wed Jan 07 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [6.12.0-124.28.1.el10_1]
|
||||
- libceph: fix potential use-after-free in have_mon_and_osd_map() (CKI Backport Bot) [RHEL-137403] {CVE-2025-68285}
|
||||
Resolves: RHEL-137403
|
||||
|
||||
@ -1,3 +1,3 @@
|
||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.28.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.28.1.el10.x86_64,mailto:security@almalinux.org
|
||||
kernel-uki-virt-addons.centos,1,Red Hat,kernel-uki-virt-addons,6.12.0-124.29.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt-addons.almalinux,1,AlmaLinux,kernel-uki-virt-addons,6.12.0-124.29.1.el10.x86_64,mailto:security@almalinux.org
|
||||
|
||||
4
uki.sbat
4
uki.sbat
@ -1,3 +1,3 @@
|
||||
sbat,1,SBAT Version,sbat,1,https://github.com/rhboot/shim/blob/main/SBAT.md
|
||||
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.28.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.28.1.el10.x86_64,mailto:security@almalinux.org
|
||||
kernel-uki-virt.centos,1,Red Hat,kernel-uki-virt,6.12.0-124.29.1.el10.x86_64,mailto:secalert@redhat.com
|
||||
kernel-uki-virt.almalinux,1,AlmaLinux,kernel-uki-virt,6.12.0-124.29.1.el10.x86_64,mailto:security@almalinux.org
|
||||
|
||||
Loading…
Reference in New Issue
Block a user