Import of kernel-4.18.0-553.71.1.el8_10

This commit is contained in:
eabdullin 2025-09-05 13:33:03 +00:00
parent eabbd11c5f
commit d437976681
13 changed files with 78 additions and 71 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
# #
# Use this spot to avoid future merge conflicts. # Use this spot to avoid future merge conflicts.
# Do not trim this comment. # Do not trim this comment.
RHEL_RELEASE = 553.70.1 RHEL_RELEASE = 553.71.1
# #
# ZSTREAM # ZSTREAM

View File

@ -1063,10 +1063,11 @@ int i40e_pf_reset(struct i40e_hw *hw)
void i40e_clear_hw(struct i40e_hw *hw) void i40e_clear_hw(struct i40e_hw *hw)
{ {
u32 num_queues, base_queue; u32 num_queues, base_queue;
u32 num_pf_int; s32 num_pf_int;
u32 num_vf_int; s32 num_vf_int;
u32 num_vfs; u32 num_vfs;
u32 i, j; s32 i;
u32 j;
u32 val; u32 val;
u32 eol = 0x7ff; u32 eol = 0x7ff;

View File

@ -1169,7 +1169,6 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
const struct gfs2_glock_operations *glops, int create, const struct gfs2_glock_operations *glops, int create,
struct gfs2_glock **glp) struct gfs2_glock **glp)
{ {
struct super_block *s = sdp->sd_vfs;
struct lm_lockname name = { .ln_number = number, struct lm_lockname name = { .ln_number = number,
.ln_type = glops->go_type, .ln_type = glops->go_type,
.ln_sbd = sdp }; .ln_sbd = sdp };
@ -1232,7 +1231,7 @@ int gfs2_glock_get(struct gfs2_sbd *sdp, u64 number,
mapping = gfs2_glock2aspace(gl); mapping = gfs2_glock2aspace(gl);
if (mapping) { if (mapping) {
mapping->a_ops = &gfs2_meta_aops; mapping->a_ops = &gfs2_meta_aops;
mapping->host = s->s_bdev->bd_inode; mapping->host = sdp->sd_inode;
mapping->flags = 0; mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS); mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL; mapping->private_data = NULL;

View File

@ -167,7 +167,7 @@ void gfs2_ail_flush(struct gfs2_glock *gl, bool fsync)
static int gfs2_rgrp_metasync(struct gfs2_glock *gl) static int gfs2_rgrp_metasync(struct gfs2_glock *gl)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *metamapping = &sdp->sd_aspace; struct address_space *metamapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize; const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK; loff_t start = (rgd->rd_addr * bsize) & PAGE_MASK;
@ -224,7 +224,7 @@ static int rgrp_go_sync(struct gfs2_glock *gl)
static void rgrp_go_inval(struct gfs2_glock *gl, int flags) static void rgrp_go_inval(struct gfs2_glock *gl, int flags)
{ {
struct gfs2_sbd *sdp = gl->gl_name.ln_sbd; struct gfs2_sbd *sdp = gl->gl_name.ln_sbd;
struct address_space *mapping = &sdp->sd_aspace; struct address_space *mapping = gfs2_aspace(sdp);
struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl); struct gfs2_rgrpd *rgd = gfs2_glock2rgrp(gl);
const unsigned bsize = sdp->sd_sb.sb_bsize; const unsigned bsize = sdp->sd_sb.sb_bsize;
loff_t start, end; loff_t start, end;

View File

@ -800,7 +800,7 @@ struct gfs2_sbd {
/* Log stuff */ /* Log stuff */
struct address_space sd_aspace; struct inode *sd_inode;
spinlock_t sd_log_lock; spinlock_t sd_log_lock;
@ -856,6 +856,13 @@ struct gfs2_sbd {
unsigned long sd_glock_dqs_held; unsigned long sd_glock_dqs_held;
}; };
#define GFS2_BAD_INO 1
static inline struct address_space *gfs2_aspace(struct gfs2_sbd *sdp)
{
return sdp->sd_inode->i_mapping;
}
static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which) static inline void gfs2_glstats_inc(struct gfs2_glock *gl, int which)
{ {
gl->gl_stats.stats[which]++; gl->gl_stats.stats[which]++;

View File

@ -121,7 +121,7 @@ struct buffer_head *gfs2_getbuf(struct gfs2_glock *gl, u64 blkno, int create)
unsigned int bufnum; unsigned int bufnum;
if (mapping == NULL) if (mapping == NULL)
mapping = &sdp->sd_aspace; mapping = gfs2_aspace(sdp);
shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift; shift = PAGE_SHIFT - sdp->sd_sb.sb_bsize_shift;
index = blkno >> shift; /* convert block to page */ index = blkno >> shift; /* convert block to page */

View File

@ -47,9 +47,7 @@ static inline struct gfs2_sbd *gfs2_mapping2sbd(struct address_space *mapping)
struct gfs2_glock_aspace *gla = struct gfs2_glock_aspace *gla =
container_of(mapping, struct gfs2_glock_aspace, mapping); container_of(mapping, struct gfs2_glock_aspace, mapping);
return gla->glock.gl_name.ln_sbd; return gla->glock.gl_name.ln_sbd;
} else if (mapping->a_ops == &gfs2_rgrp_aops) } else
return container_of(mapping, struct gfs2_sbd, sd_aspace);
else
return inode->i_sb->s_fs_info; return inode->i_sb->s_fs_info;
} }

View File

@ -66,15 +66,16 @@ static void gfs2_tune_init(struct gfs2_tune *gt)
void free_sbd(struct gfs2_sbd *sdp) void free_sbd(struct gfs2_sbd *sdp)
{ {
if (sdp->sd_lkstats) struct super_block *sb = sdp->sd_vfs;
free_percpu(sdp->sd_lkstats); free_percpu(sdp->sd_lkstats);
sb->s_fs_info = NULL;
kfree(sdp); kfree(sdp);
} }
static struct gfs2_sbd *init_sbd(struct super_block *sb) static struct gfs2_sbd *init_sbd(struct super_block *sb)
{ {
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
struct address_space *mapping;
sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL); sdp = kzalloc(sizeof(struct gfs2_sbd), GFP_KERNEL);
if (!sdp) if (!sdp)
@ -111,16 +112,6 @@ static struct gfs2_sbd *init_sbd(struct super_block *sb)
INIT_LIST_HEAD(&sdp->sd_sc_inodes_list); INIT_LIST_HEAD(&sdp->sd_sc_inodes_list);
mapping = &sdp->sd_aspace;
address_space_init_once(mapping);
mapping->a_ops = &gfs2_rgrp_aops;
mapping->host = sb->s_bdev->bd_inode;
mapping->flags = 0;
mapping_set_gfp_mask(mapping, GFP_NOFS);
mapping->private_data = NULL;
mapping->writeback_index = 0;
spin_lock_init(&sdp->sd_log_lock); spin_lock_init(&sdp->sd_log_lock);
atomic_set(&sdp->sd_log_pinned, 0); atomic_set(&sdp->sd_log_pinned, 0);
INIT_LIST_HEAD(&sdp->sd_log_revokes); INIT_LIST_HEAD(&sdp->sd_log_revokes);
@ -1117,6 +1108,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
int silent = fc->sb_flags & SB_SILENT; int silent = fc->sb_flags & SB_SILENT;
struct gfs2_sbd *sdp; struct gfs2_sbd *sdp;
struct gfs2_holder mount_gh; struct gfs2_holder mount_gh;
struct address_space *mapping;
int error; int error;
sdp = init_sbd(sb); sdp = init_sbd(sb);
@ -1138,6 +1130,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sb->s_flags |= SB_NOSEC; sb->s_flags |= SB_NOSEC;
sb->s_magic = GFS2_MAGIC; sb->s_magic = GFS2_MAGIC;
sb->s_op = &gfs2_super_ops; sb->s_op = &gfs2_super_ops;
sb->s_d_op = &gfs2_dops; sb->s_d_op = &gfs2_dops;
sb->s_export_op = &gfs2_export_ops; sb->s_export_op = &gfs2_export_ops;
sb->s_xattr = gfs2_xattr_handlers; sb->s_xattr = gfs2_xattr_handlers;
@ -1165,9 +1158,21 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
sdp->sd_tune.gt_statfs_quantum = 30; sdp->sd_tune.gt_statfs_quantum = 30;
} }
/* Set up an address space for metadata writes */
sdp->sd_inode = new_inode(sb);
error = -ENOMEM;
if (!sdp->sd_inode)
goto fail_free;
sdp->sd_inode->i_ino = GFS2_BAD_INO;
sdp->sd_inode->i_size = OFFSET_MAX;
mapping = gfs2_aspace(sdp);
mapping->a_ops = &gfs2_rgrp_aops;
mapping_set_gfp_mask(mapping, GFP_NOFS);
error = init_names(sdp, silent); error = init_names(sdp, silent);
if (error) if (error)
goto fail_free; goto fail_iput;
snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name); snprintf(sdp->sd_fsname, sizeof(sdp->sd_fsname), "%s", sdp->sd_table_name);
@ -1176,7 +1181,7 @@ static int gfs2_fill_super(struct super_block *sb, struct fs_context *fc)
WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0, WQ_MEM_RECLAIM | WQ_HIGHPRI | WQ_FREEZABLE, 0,
sdp->sd_fsname); sdp->sd_fsname);
if (!sdp->sd_glock_wq) if (!sdp->sd_glock_wq)
goto fail_free; goto fail_iput;
sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s", sdp->sd_delete_wq = alloc_workqueue("gfs2-delete/%s",
WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname); WQ_MEM_RECLAIM | WQ_FREEZABLE, 0, sdp->sd_fsname);
@ -1296,9 +1301,10 @@ fail_delete_wq:
fail_glock_wq: fail_glock_wq:
if (sdp->sd_glock_wq) if (sdp->sd_glock_wq)
destroy_workqueue(sdp->sd_glock_wq); destroy_workqueue(sdp->sd_glock_wq);
fail_iput:
iput(sdp->sd_inode);
fail_free: fail_free:
free_sbd(sdp); free_sbd(sdp);
sb->s_fs_info = NULL;
return error; return error;
} }

View File

@ -670,7 +670,7 @@ restart:
gfs2_jindex_free(sdp); gfs2_jindex_free(sdp);
/* Take apart glock structures and buffer lists */ /* Take apart glock structures and buffer lists */
gfs2_gl_hash_clear(sdp); gfs2_gl_hash_clear(sdp);
truncate_inode_pages_final(&sdp->sd_aspace); iput(sdp->sd_inode);
gfs2_delete_debugfs_file(sdp); gfs2_delete_debugfs_file(sdp);
gfs2_sys_fs_del(sdp); gfs2_sys_fs_del(sdp);

View File

@ -760,7 +760,6 @@ fail_reg:
fs_err(sdp, "error %d adding sysfs files\n", error); fs_err(sdp, "error %d adding sysfs files\n", error);
kobject_put(&sdp->sd_kobj); kobject_put(&sdp->sd_kobj);
wait_for_completion(&sdp->sd_kobj_unregister); wait_for_completion(&sdp->sd_kobj_unregister);
sb->s_fs_info = NULL;
return error; return error;
} }

View File

@ -224,31 +224,23 @@ static void signal_our_withdraw(struct gfs2_sbd *sdp)
*/ */
ret = gfs2_glock_nq(&sdp->sd_live_gh); ret = gfs2_glock_nq(&sdp->sd_live_gh);
gfs2_glock_put(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/* /*
* If we actually got the "live" lock in EX mode, there are no other * If we actually got the "live" lock in EX mode, there are no other
* nodes available to replay our journal. So we try to replay it * nodes available to replay our journal.
* ourselves. We hold the "live" glock to prevent other mounters
* during recovery, then just dequeue it and reacquire it in our
* normal SH mode. Just in case the problem that caused us to
* withdraw prevents us from recovering our journal (e.g. io errors
* and such) we still check if the journal is clean before proceeding
* but we may wait forever until another mounter does the recovery.
*/ */
if (ret == 0) { if (ret == 0) {
fs_warn(sdp, "No other mounters found. Trying to recover our " fs_warn(sdp, "No other mounters found.\n");
"own journal jid %d.\n", sdp->sd_lockstruct.ls_jid); /*
if (gfs2_recover_journal(sdp->sd_jdesc, 1)) * We are about to release the lockspace. By keeping live_gl
fs_warn(sdp, "Unable to recover our journal jid %d.\n", * locked here, we ensure that the next mounter coming along
sdp->sd_lockstruct.ls_jid); * will be a "first" mounter which will perform recovery.
gfs2_glock_dq_wait(&sdp->sd_live_gh); */
gfs2_holder_reinit(LM_ST_SHARED, LM_FLAG_NOEXP | GL_EXACT, goto skip_recovery;
&sdp->sd_live_gh);
gfs2_glock_nq(&sdp->sd_live_gh);
} }
gfs2_glock_put_async(live_gl); /* drop extra reference we acquired */
clear_bit(SDF_WITHDRAW_RECOVERY, &sdp->sd_flags);
/* /*
* At this point our journal is evicted, so we need to get a new inode * At this point our journal is evicted, so we need to get a new inode
* for it. Once done, we need to call gfs2_find_jhead which * for it. Once done, we need to call gfs2_find_jhead which
@ -314,19 +306,25 @@ int gfs2_withdraw(struct gfs2_sbd *sdp)
struct lm_lockstruct *ls = &sdp->sd_lockstruct; struct lm_lockstruct *ls = &sdp->sd_lockstruct;
const struct lm_lockops *lm = ls->ls_ops; const struct lm_lockops *lm = ls->ls_ops;
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW && if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
test_and_set_bit(SDF_WITHDRAWN, &sdp->sd_flags)) { unsigned long old = READ_ONCE(sdp->sd_flags), new;
if (!test_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags))
return -1;
wait_on_bit(&sdp->sd_flags, SDF_WITHDRAW_IN_PROG, for (;;) {
unsigned long ret;
if (old & BIT(SDF_WITHDRAWN)) {
wait_on_bit(&sdp->sd_flags,
SDF_WITHDRAW_IN_PROG,
TASK_UNINTERRUPTIBLE); TASK_UNINTERRUPTIBLE);
return -1; return -1;
} }
new = old | BIT(SDF_WITHDRAWN) | BIT(SDF_WITHDRAW_IN_PROG);
ret = cmpxchg(&sdp->sd_flags, old, new);
if (ret == old)
break;
old = ret;
}
set_bit(SDF_WITHDRAW_IN_PROG, &sdp->sd_flags);
if (sdp->sd_args.ar_errors == GFS2_ERRORS_WITHDRAW) {
fs_err(sdp, "about to withdraw this file system\n"); fs_err(sdp, "about to withdraw this file system\n");
BUG_ON(sdp->sd_args.ar_debug); BUG_ON(sdp->sd_args.ar_debug);

View File

@ -286,6 +286,7 @@ static inline struct bdi_writeback *inode_to_wb(const struct inode *inode)
{ {
#ifdef CONFIG_LOCKDEP #ifdef CONFIG_LOCKDEP
WARN_ON_ONCE(debug_locks && WARN_ON_ONCE(debug_locks &&
(inode->i_sb->s_iflags & SB_I_CGROUPWB) &&
(!lockdep_is_held(&inode->i_lock) && (!lockdep_is_held(&inode->i_lock) &&
!lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) && !lockdep_is_held(&inode->i_mapping->i_pages.xa_lock) &&
!lockdep_is_held(&inode->i_wb->list_lock))); !lockdep_is_held(&inode->i_wb->list_lock)));

View File

@ -1431,12 +1431,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
} }
/* fully reclaim rmem/fwd memory allocated for skb */ /* fully reclaim rmem/fwd memory allocated for skb */
static void udp_rmem_release(struct sock *sk, int size, int partial, static void udp_rmem_release(struct sock *sk, unsigned int size,
bool rx_queue_lock_held) int partial, bool rx_queue_lock_held)
{ {
struct udp_sock *up = udp_sk(sk); struct udp_sock *up = udp_sk(sk);
struct sk_buff_head *sk_queue; struct sk_buff_head *sk_queue;
int amt; unsigned int amt;
if (likely(partial)) { if (likely(partial)) {
up->forward_deficit += size; up->forward_deficit += size;
@ -1456,10 +1456,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
if (!rx_queue_lock_held) if (!rx_queue_lock_held)
spin_lock(&sk_queue->lock); spin_lock(&sk_queue->lock);
amt = (size + sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk->sk_forward_alloc += size; sk->sk_forward_alloc += (int)(size - amt);
amt = (sk->sk_forward_alloc - partial) & ~(SK_MEM_QUANTUM - 1);
sk->sk_forward_alloc -= amt;
if (amt) if (amt)
__sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT); __sk_mem_reduce_allocated(sk, amt >> SK_MEM_QUANTUM_SHIFT);
@ -1643,7 +1641,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
static struct sk_buff *__first_packet_length(struct sock *sk, static struct sk_buff *__first_packet_length(struct sock *sk,
struct sk_buff_head *rcvq, struct sk_buff_head *rcvq,
int *total) unsigned int *total)
{ {
struct sk_buff *skb; struct sk_buff *skb;
@ -1676,8 +1674,8 @@ static int first_packet_length(struct sock *sk)
{ {
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue; struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
struct sk_buff_head *sk_queue = &sk->sk_receive_queue; struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
unsigned int total = 0;
struct sk_buff *skb; struct sk_buff *skb;
int total = 0;
int res; int res;
spin_lock_bh(&rcvq->lock); spin_lock_bh(&rcvq->lock);