Import of kernel-4.18.0-553.64.1.el8_10
This commit is contained in:
parent
ab554a9950
commit
98b6a4f588
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 553.63.1
|
RHEL_RELEASE = 553.64.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# ZSTREAM
|
# ZSTREAM
|
||||||
|
@ -1139,7 +1139,7 @@ static int iwl_parse_tlv_firmware(struct iwl_drv *drv,
|
|||||||
|
|
||||||
if (tlv_len != sizeof(*fseq_ver))
|
if (tlv_len != sizeof(*fseq_ver))
|
||||||
goto invalid_tlv_len;
|
goto invalid_tlv_len;
|
||||||
IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %s\n",
|
IWL_INFO(drv, "TLV_FW_FSEQ_VERSION: %.32s\n",
|
||||||
fseq_ver->version);
|
fseq_ver->version);
|
||||||
}
|
}
|
||||||
break;
|
break;
|
||||||
|
@ -283,7 +283,7 @@ static const struct sysrq_key_op sysrq_showregs_op = {
|
|||||||
static void sysrq_handle_showstate(int key)
|
static void sysrq_handle_showstate(int key)
|
||||||
{
|
{
|
||||||
show_state();
|
show_state();
|
||||||
show_workqueue_state();
|
show_all_workqueues();
|
||||||
}
|
}
|
||||||
static const struct sysrq_key_op sysrq_showstate_op = {
|
static const struct sysrq_key_op sysrq_showstate_op = {
|
||||||
.handler = sysrq_handle_showstate,
|
.handler = sysrq_handle_showstate,
|
||||||
|
@ -1919,9 +1919,11 @@ static void nfs_layoutget_begin(struct pnfs_layout_hdr *lo)
|
|||||||
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
|
static void nfs_layoutget_end(struct pnfs_layout_hdr *lo)
|
||||||
{
|
{
|
||||||
if (atomic_dec_and_test(&lo->plh_outstanding) &&
|
if (atomic_dec_and_test(&lo->plh_outstanding) &&
|
||||||
test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags))
|
test_and_clear_bit(NFS_LAYOUT_DRAIN, &lo->plh_flags)) {
|
||||||
|
smp_mb__after_atomic();
|
||||||
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
|
wake_up_bit(&lo->plh_flags, NFS_LAYOUT_DRAIN);
|
||||||
}
|
}
|
||||||
|
}
|
||||||
|
|
||||||
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
|
static bool pnfs_is_first_layoutget(struct pnfs_layout_hdr *lo)
|
||||||
{
|
{
|
||||||
|
@ -486,7 +486,8 @@ extern bool workqueue_congested(int cpu, struct workqueue_struct *wq);
|
|||||||
extern unsigned int work_busy(struct work_struct *work);
|
extern unsigned int work_busy(struct work_struct *work);
|
||||||
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
|
extern __printf(1, 2) void set_worker_desc(const char *fmt, ...);
|
||||||
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
|
extern void print_worker_info(const char *log_lvl, struct task_struct *task);
|
||||||
extern void show_workqueue_state(void);
|
extern void show_all_workqueues(void);
|
||||||
|
extern void show_one_workqueue(struct workqueue_struct *wq);
|
||||||
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
|
extern void wq_worker_comm(char *buf, size_t size, struct task_struct *task);
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
@ -96,7 +96,7 @@ static int try_to_freeze_tasks(bool user_only)
|
|||||||
todo - wq_busy, wq_busy);
|
todo - wq_busy, wq_busy);
|
||||||
|
|
||||||
if (wq_busy)
|
if (wq_busy)
|
||||||
show_workqueue_state();
|
show_all_workqueues();
|
||||||
|
|
||||||
if (!wakeup) {
|
if (!wakeup) {
|
||||||
read_lock(&tasklist_lock);
|
read_lock(&tasklist_lock);
|
||||||
|
@ -3345,15 +3345,17 @@ static inline bool child_cfs_rq_on_list(struct cfs_rq *cfs_rq)
|
|||||||
{
|
{
|
||||||
struct cfs_rq *prev_cfs_rq;
|
struct cfs_rq *prev_cfs_rq;
|
||||||
struct list_head *prev;
|
struct list_head *prev;
|
||||||
|
struct rq *rq = rq_of(cfs_rq);
|
||||||
|
|
||||||
if (cfs_rq->on_list) {
|
if (cfs_rq->on_list) {
|
||||||
prev = cfs_rq->leaf_cfs_rq_list.prev;
|
prev = cfs_rq->leaf_cfs_rq_list.prev;
|
||||||
} else {
|
} else {
|
||||||
struct rq *rq = rq_of(cfs_rq);
|
|
||||||
|
|
||||||
prev = rq->tmp_alone_branch;
|
prev = rq->tmp_alone_branch;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (prev == &rq->leaf_cfs_rq_list)
|
||||||
|
return false;
|
||||||
|
|
||||||
prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
|
prev_cfs_rq = container_of(prev, struct cfs_rq, leaf_cfs_rq_list);
|
||||||
|
|
||||||
return (prev_cfs_rq->tg->parent == cfs_rq->tg);
|
return (prev_cfs_rq->tg->parent == cfs_rq->tg);
|
||||||
|
@ -2833,6 +2833,16 @@ int ftrace_startup(struct ftrace_ops *ops, int command)
|
|||||||
|
|
||||||
ftrace_startup_enable(command);
|
ftrace_startup_enable(command);
|
||||||
|
|
||||||
|
/*
|
||||||
|
* If ftrace is in an undefined state, we just remove ops from list
|
||||||
|
* to prevent the NULL pointer, instead of totally rolling it back and
|
||||||
|
* free trampoline, because those actions could cause further damage.
|
||||||
|
*/
|
||||||
|
if (unlikely(ftrace_disabled)) {
|
||||||
|
__unregister_ftrace_function(ops);
|
||||||
|
return -ENODEV;
|
||||||
|
}
|
||||||
|
|
||||||
ops->flags &= ~FTRACE_OPS_FL_ADDING;
|
ops->flags &= ~FTRACE_OPS_FL_ADDING;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -359,6 +359,7 @@ EXPORT_SYMBOL_GPL(system_freezable_power_efficient_wq);
|
|||||||
static int worker_thread(void *__worker);
|
static int worker_thread(void *__worker);
|
||||||
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
static void workqueue_sysfs_unregister(struct workqueue_struct *wq);
|
||||||
static void show_pwq(struct pool_workqueue *pwq);
|
static void show_pwq(struct pool_workqueue *pwq);
|
||||||
|
static void show_one_worker_pool(struct worker_pool *pool);
|
||||||
|
|
||||||
#define CREATE_TRACE_POINTS
|
#define CREATE_TRACE_POINTS
|
||||||
#include <trace/events/workqueue.h>
|
#include <trace/events/workqueue.h>
|
||||||
@ -4553,7 +4554,7 @@ void destroy_workqueue(struct workqueue_struct *wq)
|
|||||||
raw_spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
mutex_unlock(&wq->mutex);
|
mutex_unlock(&wq->mutex);
|
||||||
mutex_unlock(&wq_pool_mutex);
|
mutex_unlock(&wq_pool_mutex);
|
||||||
show_workqueue_state();
|
show_one_workqueue(wq);
|
||||||
return;
|
return;
|
||||||
}
|
}
|
||||||
raw_spin_unlock_irq(&pwq->pool->lock);
|
raw_spin_unlock_irq(&pwq->pool->lock);
|
||||||
@ -4816,22 +4817,53 @@ static void pr_cont_pool_info(struct worker_pool *pool)
|
|||||||
pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
|
pr_cont(" flags=0x%x nice=%d", pool->flags, pool->attrs->nice);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void pr_cont_work(bool comma, struct work_struct *work)
|
struct pr_cont_work_struct {
|
||||||
|
bool comma;
|
||||||
|
work_func_t func;
|
||||||
|
long ctr;
|
||||||
|
};
|
||||||
|
|
||||||
|
static void pr_cont_work_flush(bool comma, work_func_t func, struct pr_cont_work_struct *pcwsp)
|
||||||
|
{
|
||||||
|
if (!pcwsp->ctr)
|
||||||
|
goto out_record;
|
||||||
|
if (func == pcwsp->func) {
|
||||||
|
pcwsp->ctr++;
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (pcwsp->ctr == 1)
|
||||||
|
pr_cont("%s %ps", pcwsp->comma ? "," : "", pcwsp->func);
|
||||||
|
else
|
||||||
|
pr_cont("%s %ld*%ps", pcwsp->comma ? "," : "", pcwsp->ctr, pcwsp->func);
|
||||||
|
pcwsp->ctr = 0;
|
||||||
|
out_record:
|
||||||
|
if ((long)func == -1L)
|
||||||
|
return;
|
||||||
|
pcwsp->comma = comma;
|
||||||
|
pcwsp->func = func;
|
||||||
|
pcwsp->ctr = 1;
|
||||||
|
}
|
||||||
|
|
||||||
|
static void pr_cont_work(bool comma, struct work_struct *work, struct pr_cont_work_struct *pcwsp)
|
||||||
{
|
{
|
||||||
if (work->func == wq_barrier_func) {
|
if (work->func == wq_barrier_func) {
|
||||||
struct wq_barrier *barr;
|
struct wq_barrier *barr;
|
||||||
|
|
||||||
barr = container_of(work, struct wq_barrier, work);
|
barr = container_of(work, struct wq_barrier, work);
|
||||||
|
|
||||||
|
pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
|
||||||
pr_cont("%s BAR(%d)", comma ? "," : "",
|
pr_cont("%s BAR(%d)", comma ? "," : "",
|
||||||
task_pid_nr(barr->task));
|
task_pid_nr(barr->task));
|
||||||
} else {
|
} else {
|
||||||
pr_cont("%s %ps", comma ? "," : "", work->func);
|
if (!comma)
|
||||||
|
pr_cont_work_flush(comma, (work_func_t)-1, pcwsp);
|
||||||
|
pr_cont_work_flush(comma, work->func, pcwsp);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void show_pwq(struct pool_workqueue *pwq)
|
static void show_pwq(struct pool_workqueue *pwq)
|
||||||
{
|
{
|
||||||
|
struct pr_cont_work_struct pcws = { .ctr = 0, };
|
||||||
struct worker_pool *pool = pwq->pool;
|
struct worker_pool *pool = pwq->pool;
|
||||||
struct work_struct *work;
|
struct work_struct *work;
|
||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
@ -4864,7 +4896,8 @@ static void show_pwq(struct pool_workqueue *pwq)
|
|||||||
worker->rescue_wq ? "(RESCUER)" : "",
|
worker->rescue_wq ? "(RESCUER)" : "",
|
||||||
worker->current_func);
|
worker->current_func);
|
||||||
list_for_each_entry(work, &worker->scheduled, entry)
|
list_for_each_entry(work, &worker->scheduled, entry)
|
||||||
pr_cont_work(false, work);
|
pr_cont_work(false, work, &pcws);
|
||||||
|
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
|
||||||
comma = true;
|
comma = true;
|
||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
@ -4884,9 +4917,10 @@ static void show_pwq(struct pool_workqueue *pwq)
|
|||||||
if (get_work_pwq(work) != pwq)
|
if (get_work_pwq(work) != pwq)
|
||||||
continue;
|
continue;
|
||||||
|
|
||||||
pr_cont_work(comma, work);
|
pr_cont_work(comma, work, &pcws);
|
||||||
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
|
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
|
||||||
}
|
}
|
||||||
|
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -4895,33 +4929,23 @@ static void show_pwq(struct pool_workqueue *pwq)
|
|||||||
|
|
||||||
pr_info(" inactive:");
|
pr_info(" inactive:");
|
||||||
list_for_each_entry(work, &pwq->inactive_works, entry) {
|
list_for_each_entry(work, &pwq->inactive_works, entry) {
|
||||||
pr_cont_work(comma, work);
|
pr_cont_work(comma, work, &pcws);
|
||||||
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
|
comma = !(*work_data_bits(work) & WORK_STRUCT_LINKED);
|
||||||
}
|
}
|
||||||
|
pr_cont_work_flush(comma, (work_func_t)-1L, &pcws);
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* show_workqueue_state - dump workqueue state
|
* show_one_workqueue - dump state of specified workqueue
|
||||||
*
|
* @wq: workqueue whose state will be printed
|
||||||
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
|
|
||||||
* all busy workqueues and pools.
|
|
||||||
*/
|
*/
|
||||||
void show_workqueue_state(void)
|
void show_one_workqueue(struct workqueue_struct *wq)
|
||||||
{
|
{
|
||||||
struct workqueue_struct *wq;
|
|
||||||
struct worker_pool *pool;
|
|
||||||
unsigned long flags;
|
|
||||||
int pi;
|
|
||||||
|
|
||||||
rcu_read_lock();
|
|
||||||
|
|
||||||
pr_info("Showing busy workqueues and worker pools:\n");
|
|
||||||
|
|
||||||
list_for_each_entry_rcu(wq, &workqueues, list) {
|
|
||||||
struct pool_workqueue *pwq;
|
struct pool_workqueue *pwq;
|
||||||
bool idle = true;
|
bool idle = true;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
||||||
@ -4929,33 +4953,59 @@ void show_workqueue_state(void)
|
|||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
if (idle)
|
if (idle) /* Nothing to print for idle workqueue */
|
||||||
continue;
|
return;
|
||||||
|
|
||||||
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
pr_info("workqueue %s: flags=0x%x\n", wq->name, wq->flags);
|
||||||
|
|
||||||
for_each_pwq(pwq, wq) {
|
for_each_pwq(pwq, wq) {
|
||||||
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
raw_spin_lock_irqsave(&pwq->pool->lock, flags);
|
||||||
if (pwq->nr_active || !list_empty(&pwq->inactive_works))
|
if (pwq->nr_active || !list_empty(&pwq->inactive_works)) {
|
||||||
|
#ifndef CONFIG_PREEMPT_RT
|
||||||
|
/*
|
||||||
|
* Defer printing to avoid deadlocks in console
|
||||||
|
* drivers that queue work while holding locks
|
||||||
|
* also taken in their write paths.
|
||||||
|
*/
|
||||||
|
printk_deferred_enter();
|
||||||
show_pwq(pwq);
|
show_pwq(pwq);
|
||||||
|
printk_deferred_exit();
|
||||||
|
#else
|
||||||
|
show_pwq(pwq);
|
||||||
|
#endif
|
||||||
|
}
|
||||||
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pwq->pool->lock, flags);
|
||||||
/*
|
/*
|
||||||
* We could be printing a lot from atomic context, e.g.
|
* We could be printing a lot from atomic context, e.g.
|
||||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
* sysrq-t -> show_all_workqueues(). Avoid triggering
|
||||||
* hard lockup.
|
* hard lockup.
|
||||||
*/
|
*/
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
}
|
}
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
for_each_pool(pool, pi) {
|
/**
|
||||||
|
* show_one_worker_pool - dump state of specified worker pool
|
||||||
|
* @pool: worker pool whose state will be printed
|
||||||
|
*/
|
||||||
|
static void show_one_worker_pool(struct worker_pool *pool)
|
||||||
|
{
|
||||||
struct worker *worker;
|
struct worker *worker;
|
||||||
bool first = true;
|
bool first = true;
|
||||||
|
unsigned long flags;
|
||||||
|
|
||||||
raw_spin_lock_irqsave(&pool->lock, flags);
|
raw_spin_lock_irqsave(&pool->lock, flags);
|
||||||
if (pool->nr_workers == pool->nr_idle)
|
if (pool->nr_workers == pool->nr_idle)
|
||||||
goto next_pool;
|
goto next_pool;
|
||||||
|
#ifndef CONFIG_PREEMPT_RT
|
||||||
|
/*
|
||||||
|
* Defer printing to avoid deadlocks in console drivers that
|
||||||
|
* queue work while holding locks also taken in their write
|
||||||
|
* paths.
|
||||||
|
*/
|
||||||
|
printk_deferred_enter();
|
||||||
|
#endif
|
||||||
pr_info("pool %d:", pool->id);
|
pr_info("pool %d:", pool->id);
|
||||||
pr_cont_pool_info(pool);
|
pr_cont_pool_info(pool);
|
||||||
pr_cont(" hung=%us workers=%d",
|
pr_cont(" hung=%us workers=%d",
|
||||||
@ -4970,16 +5020,42 @@ void show_workqueue_state(void)
|
|||||||
first = false;
|
first = false;
|
||||||
}
|
}
|
||||||
pr_cont("\n");
|
pr_cont("\n");
|
||||||
|
#ifndef CONFIG_PREEMPT_RT
|
||||||
|
printk_deferred_exit();
|
||||||
|
#endif
|
||||||
next_pool:
|
next_pool:
|
||||||
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
raw_spin_unlock_irqrestore(&pool->lock, flags);
|
||||||
/*
|
/*
|
||||||
* We could be printing a lot from atomic context, e.g.
|
* We could be printing a lot from atomic context, e.g.
|
||||||
* sysrq-t -> show_workqueue_state(). Avoid triggering
|
* sysrq-t -> show_all_workqueues(). Avoid triggering
|
||||||
* hard lockup.
|
* hard lockup.
|
||||||
*/
|
*/
|
||||||
touch_nmi_watchdog();
|
touch_nmi_watchdog();
|
||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* show_all_workqueues - dump workqueue state
|
||||||
|
*
|
||||||
|
* Called from a sysrq handler or try_to_freeze_tasks() and prints out
|
||||||
|
* all busy workqueues and pools.
|
||||||
|
*/
|
||||||
|
void show_all_workqueues(void)
|
||||||
|
{
|
||||||
|
struct workqueue_struct *wq;
|
||||||
|
struct worker_pool *pool;
|
||||||
|
int pi;
|
||||||
|
|
||||||
|
rcu_read_lock();
|
||||||
|
|
||||||
|
pr_info("Showing busy workqueues and worker pools:\n");
|
||||||
|
|
||||||
|
list_for_each_entry_rcu(wq, &workqueues, list)
|
||||||
|
show_one_workqueue(wq);
|
||||||
|
|
||||||
|
for_each_pool(pool, pi)
|
||||||
|
show_one_worker_pool(pool);
|
||||||
|
|
||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5935,7 +6011,7 @@ static void wq_watchdog_timer_fn(struct timer_list *unused)
|
|||||||
rcu_read_unlock();
|
rcu_read_unlock();
|
||||||
|
|
||||||
if (lockup_detected)
|
if (lockup_detected)
|
||||||
show_workqueue_state();
|
show_all_workqueues();
|
||||||
|
|
||||||
wq_watchdog_reset_touched();
|
wq_watchdog_reset_touched();
|
||||||
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
mod_timer(&wq_watchdog_timer, jiffies + thresh);
|
||||||
|
Loading…
Reference in New Issue
Block a user