Import of kernel-6.12.0-55.25.1.el10_0
This commit is contained in:
parent
b13887cf82
commit
051e45fafa
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 55.24.1
|
RHEL_RELEASE = 55.25.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# RHEL_REBASE_NUM
|
# RHEL_REBASE_NUM
|
||||||
|
@ -833,9 +833,9 @@ static void hid_ishtp_cl_remove(struct ishtp_cl_device *cl_device)
|
|||||||
hid_ishtp_cl);
|
hid_ishtp_cl);
|
||||||
|
|
||||||
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
|
dev_dbg(ishtp_device(cl_device), "%s\n", __func__);
|
||||||
hid_ishtp_cl_deinit(hid_ishtp_cl);
|
|
||||||
ishtp_put_device(cl_device);
|
ishtp_put_device(cl_device);
|
||||||
ishtp_hid_remove(client_data);
|
ishtp_hid_remove(client_data);
|
||||||
|
hid_ishtp_cl_deinit(hid_ishtp_cl);
|
||||||
|
|
||||||
hid_ishtp_cl = NULL;
|
hid_ishtp_cl = NULL;
|
||||||
|
|
||||||
|
@ -261,12 +261,14 @@ err_hid_data:
|
|||||||
*/
|
*/
|
||||||
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
|
void ishtp_hid_remove(struct ishtp_cl_data *client_data)
|
||||||
{
|
{
|
||||||
|
void *data;
|
||||||
int i;
|
int i;
|
||||||
|
|
||||||
for (i = 0; i < client_data->num_hid_devices; ++i) {
|
for (i = 0; i < client_data->num_hid_devices; ++i) {
|
||||||
if (client_data->hid_sensor_hubs[i]) {
|
if (client_data->hid_sensor_hubs[i]) {
|
||||||
kfree(client_data->hid_sensor_hubs[i]->driver_data);
|
data = client_data->hid_sensor_hubs[i]->driver_data;
|
||||||
hid_destroy_device(client_data->hid_sensor_hubs[i]);
|
hid_destroy_device(client_data->hid_sensor_hubs[i]);
|
||||||
|
kfree(data);
|
||||||
client_data->hid_sensor_hubs[i] = NULL;
|
client_data->hid_sensor_hubs[i] = NULL;
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -1357,9 +1357,14 @@ static void ib_device_notify_register(struct ib_device *device)
|
|||||||
u32 port;
|
u32 port;
|
||||||
int ret;
|
int ret;
|
||||||
|
|
||||||
|
down_read(&devices_rwsem);
|
||||||
|
|
||||||
|
/* Mark for userspace that device is ready */
|
||||||
|
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||||
|
|
||||||
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
|
ret = rdma_nl_notify_event(device, 0, RDMA_REGISTER_EVENT);
|
||||||
if (ret)
|
if (ret)
|
||||||
return;
|
goto out;
|
||||||
|
|
||||||
rdma_for_each_port(device, port) {
|
rdma_for_each_port(device, port) {
|
||||||
netdev = ib_device_get_netdev(device, port);
|
netdev = ib_device_get_netdev(device, port);
|
||||||
@ -1370,8 +1375,11 @@ static void ib_device_notify_register(struct ib_device *device)
|
|||||||
RDMA_NETDEV_ATTACH_EVENT);
|
RDMA_NETDEV_ATTACH_EVENT);
|
||||||
dev_put(netdev);
|
dev_put(netdev);
|
||||||
if (ret)
|
if (ret)
|
||||||
return;
|
goto out;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
out:
|
||||||
|
up_read(&devices_rwsem);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -1470,10 +1478,9 @@ int ib_register_device(struct ib_device *device, const char *name,
|
|||||||
return ret;
|
return ret;
|
||||||
}
|
}
|
||||||
dev_set_uevent_suppress(&device->dev, false);
|
dev_set_uevent_suppress(&device->dev, false);
|
||||||
/* Mark for userspace that device is ready */
|
|
||||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
|
||||||
|
|
||||||
ib_device_notify_register(device);
|
ib_device_notify_register(device);
|
||||||
|
|
||||||
ib_device_put(device);
|
ib_device_put(device);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
@ -813,6 +813,7 @@ static void rtsx_usb_ms_drv_remove(struct platform_device *pdev)
|
|||||||
|
|
||||||
host->eject = true;
|
host->eject = true;
|
||||||
cancel_work_sync(&host->handle_req);
|
cancel_work_sync(&host->handle_req);
|
||||||
|
cancel_delayed_work_sync(&host->poll_card);
|
||||||
|
|
||||||
mutex_lock(&host->host_mutex);
|
mutex_lock(&host->host_mutex);
|
||||||
if (host->req) {
|
if (host->req) {
|
||||||
|
@ -178,6 +178,7 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
|||||||
{
|
{
|
||||||
struct usbnet *dev = netdev_priv(netdev);
|
struct usbnet *dev = netdev_priv(netdev);
|
||||||
unsigned char buff[2];
|
unsigned char buff[2];
|
||||||
|
int ret;
|
||||||
|
|
||||||
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
|
netdev_dbg(netdev, "%s phy_id:%02x loc:%02x\n",
|
||||||
__func__, phy_id, loc);
|
__func__, phy_id, loc);
|
||||||
@ -185,8 +186,10 @@ static int ch9200_mdio_read(struct net_device *netdev, int phy_id, int loc)
|
|||||||
if (phy_id != 0)
|
if (phy_id != 0)
|
||||||
return -ENODEV;
|
return -ENODEV;
|
||||||
|
|
||||||
control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
|
ret = control_read(dev, REQUEST_READ, 0, loc * 2, buff, 0x02,
|
||||||
CONTROL_TIMEOUT_MS);
|
CONTROL_TIMEOUT_MS);
|
||||||
|
if (ret < 0)
|
||||||
|
return ret;
|
||||||
|
|
||||||
return (buff[0] | buff[1] << 8);
|
return (buff[0] | buff[1] << 8);
|
||||||
}
|
}
|
||||||
|
@ -450,7 +450,8 @@ nvme_tcp_fetch_request(struct nvme_tcp_queue *queue)
|
|||||||
return NULL;
|
return NULL;
|
||||||
}
|
}
|
||||||
|
|
||||||
list_del(&req->entry);
|
list_del_init(&req->entry);
|
||||||
|
init_llist_node(&req->lentry);
|
||||||
return req;
|
return req;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -558,6 +559,8 @@ static int nvme_tcp_init_request(struct blk_mq_tag_set *set,
|
|||||||
req->queue = queue;
|
req->queue = queue;
|
||||||
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
nvme_req(rq)->ctrl = &ctrl->ctrl;
|
||||||
nvme_req(rq)->cmd = &pdu->cmd;
|
nvme_req(rq)->cmd = &pdu->cmd;
|
||||||
|
init_llist_node(&req->lentry);
|
||||||
|
INIT_LIST_HEAD(&req->entry);
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
@ -762,6 +765,14 @@ static int nvme_tcp_handle_r2t(struct nvme_tcp_queue *queue,
|
|||||||
return -EPROTO;
|
return -EPROTO;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (llist_on_list(&req->lentry) ||
|
||||||
|
!list_empty(&req->entry)) {
|
||||||
|
dev_err(queue->ctrl->ctrl.device,
|
||||||
|
"req %d unexpected r2t while processing request\n",
|
||||||
|
rq->tag);
|
||||||
|
return -EPROTO;
|
||||||
|
}
|
||||||
|
|
||||||
req->pdu_len = 0;
|
req->pdu_len = 0;
|
||||||
req->h2cdata_left = r2t_length;
|
req->h2cdata_left = r2t_length;
|
||||||
req->h2cdata_offset = r2t_offset;
|
req->h2cdata_offset = r2t_offset;
|
||||||
@ -2481,6 +2492,8 @@ static void nvme_tcp_submit_async_event(struct nvme_ctrl *arg)
|
|||||||
ctrl->async_req.offset = 0;
|
ctrl->async_req.offset = 0;
|
||||||
ctrl->async_req.curr_bio = NULL;
|
ctrl->async_req.curr_bio = NULL;
|
||||||
ctrl->async_req.data_len = 0;
|
ctrl->async_req.data_len = 0;
|
||||||
|
init_llist_node(&ctrl->async_req.lentry);
|
||||||
|
INIT_LIST_HEAD(&ctrl->async_req.entry);
|
||||||
|
|
||||||
nvme_tcp_queue_request(&ctrl->async_req, true, true);
|
nvme_tcp_queue_request(&ctrl->async_req, true, true);
|
||||||
}
|
}
|
||||||
|
@ -67,6 +67,12 @@
|
|||||||
*/
|
*/
|
||||||
#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
|
#define USB_SHORT_SET_ADDRESS_REQ_TIMEOUT 500 /* ms */
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Give SS hubs 200ms time after wake to train downstream links before
|
||||||
|
* assuming no port activity and allowing hub to runtime suspend back.
|
||||||
|
*/
|
||||||
|
#define USB_SS_PORT_U0_WAKE_TIME 200 /* ms */
|
||||||
|
|
||||||
/* Protect struct usb_device->state and ->children members
|
/* Protect struct usb_device->state and ->children members
|
||||||
* Note: Both are also protected by ->dev.sem, except that ->state can
|
* Note: Both are also protected by ->dev.sem, except that ->state can
|
||||||
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
|
* change to USB_STATE_NOTATTACHED even when the semaphore isn't held. */
|
||||||
@ -1094,6 +1100,7 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|||||||
goto init2;
|
goto init2;
|
||||||
goto init3;
|
goto init3;
|
||||||
}
|
}
|
||||||
|
|
||||||
hub_get(hub);
|
hub_get(hub);
|
||||||
|
|
||||||
/* The superspeed hub except for root hub has to use Hub Depth
|
/* The superspeed hub except for root hub has to use Hub Depth
|
||||||
@ -1342,6 +1349,17 @@ static void hub_activate(struct usb_hub *hub, enum hub_activation_type type)
|
|||||||
device_unlock(&hdev->dev);
|
device_unlock(&hdev->dev);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
if (type == HUB_RESUME && hub_is_superspeed(hub->hdev)) {
|
||||||
|
/* give usb3 downstream links training time after hub resume */
|
||||||
|
usb_autopm_get_interface_no_resume(
|
||||||
|
to_usb_interface(hub->intfdev));
|
||||||
|
|
||||||
|
queue_delayed_work(system_power_efficient_wq,
|
||||||
|
&hub->post_resume_work,
|
||||||
|
msecs_to_jiffies(USB_SS_PORT_U0_WAKE_TIME));
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
hub_put(hub);
|
hub_put(hub);
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -1360,6 +1378,14 @@ static void hub_init_func3(struct work_struct *ws)
|
|||||||
hub_activate(hub, HUB_INIT3);
|
hub_activate(hub, HUB_INIT3);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static void hub_post_resume(struct work_struct *ws)
|
||||||
|
{
|
||||||
|
struct usb_hub *hub = container_of(ws, struct usb_hub, post_resume_work.work);
|
||||||
|
|
||||||
|
usb_autopm_put_interface_async(to_usb_interface(hub->intfdev));
|
||||||
|
hub_put(hub);
|
||||||
|
}
|
||||||
|
|
||||||
enum hub_quiescing_type {
|
enum hub_quiescing_type {
|
||||||
HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
|
HUB_DISCONNECT, HUB_PRE_RESET, HUB_SUSPEND
|
||||||
};
|
};
|
||||||
@ -1385,6 +1411,7 @@ static void hub_quiesce(struct usb_hub *hub, enum hub_quiescing_type type)
|
|||||||
|
|
||||||
/* Stop hub_wq and related activity */
|
/* Stop hub_wq and related activity */
|
||||||
del_timer_sync(&hub->irq_urb_retry);
|
del_timer_sync(&hub->irq_urb_retry);
|
||||||
|
flush_delayed_work(&hub->post_resume_work);
|
||||||
usb_kill_urb(hub->urb);
|
usb_kill_urb(hub->urb);
|
||||||
if (hub->has_indicators)
|
if (hub->has_indicators)
|
||||||
cancel_delayed_work_sync(&hub->leds);
|
cancel_delayed_work_sync(&hub->leds);
|
||||||
@ -1932,6 +1959,7 @@ static int hub_probe(struct usb_interface *intf, const struct usb_device_id *id)
|
|||||||
hub->hdev = hdev;
|
hub->hdev = hdev;
|
||||||
INIT_DELAYED_WORK(&hub->leds, led_work);
|
INIT_DELAYED_WORK(&hub->leds, led_work);
|
||||||
INIT_DELAYED_WORK(&hub->init_work, NULL);
|
INIT_DELAYED_WORK(&hub->init_work, NULL);
|
||||||
|
INIT_DELAYED_WORK(&hub->post_resume_work, hub_post_resume);
|
||||||
INIT_WORK(&hub->events, hub_event);
|
INIT_WORK(&hub->events, hub_event);
|
||||||
INIT_LIST_HEAD(&hub->onboard_devs);
|
INIT_LIST_HEAD(&hub->onboard_devs);
|
||||||
spin_lock_init(&hub->irq_urb_lock);
|
spin_lock_init(&hub->irq_urb_lock);
|
||||||
|
@ -70,6 +70,7 @@ struct usb_hub {
|
|||||||
u8 indicator[USB_MAXCHILDREN];
|
u8 indicator[USB_MAXCHILDREN];
|
||||||
struct delayed_work leds;
|
struct delayed_work leds;
|
||||||
struct delayed_work init_work;
|
struct delayed_work init_work;
|
||||||
|
struct delayed_work post_resume_work;
|
||||||
struct work_struct events;
|
struct work_struct events;
|
||||||
spinlock_t irq_urb_lock;
|
spinlock_t irq_urb_lock;
|
||||||
struct timer_list irq_urb_retry;
|
struct timer_list irq_urb_retry;
|
||||||
|
@ -1820,7 +1820,8 @@ static inline int ext4_valid_inum(struct super_block *sb, unsigned long ino)
|
|||||||
*/
|
*/
|
||||||
enum {
|
enum {
|
||||||
EXT4_MF_MNTDIR_SAMPLED,
|
EXT4_MF_MNTDIR_SAMPLED,
|
||||||
EXT4_MF_FC_INELIGIBLE /* Fast commit ineligible */
|
EXT4_MF_FC_INELIGIBLE, /* Fast commit ineligible */
|
||||||
|
EXT4_MF_JOURNAL_DESTROY /* Journal is in process of destroying */
|
||||||
};
|
};
|
||||||
|
|
||||||
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
|
static inline void ext4_set_mount_flag(struct super_block *sb, int bit)
|
||||||
|
@ -513,4 +513,33 @@ static inline int ext4_should_dioread_nolock(struct inode *inode)
|
|||||||
return 1;
|
return 1;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Pass journal explicitly as it may not be cached in the sbi->s_journal in some
|
||||||
|
* cases
|
||||||
|
*/
|
||||||
|
static inline int ext4_journal_destroy(struct ext4_sb_info *sbi, journal_t *journal)
|
||||||
|
{
|
||||||
|
int err = 0;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* At this point only two things can be operating on the journal.
|
||||||
|
* JBD2 thread performing transaction commit and s_sb_upd_work
|
||||||
|
* issuing sb update through the journal. Once we set
|
||||||
|
* EXT4_JOURNAL_DESTROY, new ext4_handle_error() calls will not
|
||||||
|
* queue s_sb_upd_work and ext4_force_commit() makes sure any
|
||||||
|
* ext4_handle_error() calls from the running transaction commit are
|
||||||
|
* finished. Hence no new s_sb_upd_work can be queued after we
|
||||||
|
* flush it here.
|
||||||
|
*/
|
||||||
|
ext4_set_mount_flag(sbi->s_sb, EXT4_MF_JOURNAL_DESTROY);
|
||||||
|
|
||||||
|
ext4_force_commit(sbi->s_sb);
|
||||||
|
flush_work(&sbi->s_sb_upd_work);
|
||||||
|
|
||||||
|
err = jbd2_journal_destroy(journal);
|
||||||
|
sbi->s_journal = NULL;
|
||||||
|
|
||||||
|
return err;
|
||||||
|
}
|
||||||
|
|
||||||
#endif /* _EXT4_JBD2_H */
|
#endif /* _EXT4_JBD2_H */
|
||||||
|
@ -712,9 +712,13 @@ static void ext4_handle_error(struct super_block *sb, bool force_ro, int error,
|
|||||||
* In case the fs should keep running, we need to writeout
|
* In case the fs should keep running, we need to writeout
|
||||||
* superblock through the journal. Due to lock ordering
|
* superblock through the journal. Due to lock ordering
|
||||||
* constraints, it may not be safe to do it right here so we
|
* constraints, it may not be safe to do it right here so we
|
||||||
* defer superblock flushing to a workqueue.
|
* defer superblock flushing to a workqueue. We just need to be
|
||||||
|
* careful when the journal is already shutting down. If we get
|
||||||
|
* here in that case, just update the sb directly as the last
|
||||||
|
* transaction won't commit anyway.
|
||||||
*/
|
*/
|
||||||
if (continue_fs && journal)
|
if (continue_fs && journal &&
|
||||||
|
!ext4_test_mount_flag(sb, EXT4_MF_JOURNAL_DESTROY))
|
||||||
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
|
schedule_work(&EXT4_SB(sb)->s_sb_upd_work);
|
||||||
else
|
else
|
||||||
ext4_commit_super(sb);
|
ext4_commit_super(sb);
|
||||||
@ -1299,18 +1303,17 @@ static void ext4_put_super(struct super_block *sb)
|
|||||||
ext4_unregister_li_request(sb);
|
ext4_unregister_li_request(sb);
|
||||||
ext4_quotas_off(sb, EXT4_MAXQUOTAS);
|
ext4_quotas_off(sb, EXT4_MAXQUOTAS);
|
||||||
|
|
||||||
flush_work(&sbi->s_sb_upd_work);
|
|
||||||
destroy_workqueue(sbi->rsv_conversion_wq);
|
destroy_workqueue(sbi->rsv_conversion_wq);
|
||||||
ext4_release_orphan_info(sb);
|
ext4_release_orphan_info(sb);
|
||||||
|
|
||||||
if (sbi->s_journal) {
|
if (sbi->s_journal) {
|
||||||
aborted = is_journal_aborted(sbi->s_journal);
|
aborted = is_journal_aborted(sbi->s_journal);
|
||||||
err = jbd2_journal_destroy(sbi->s_journal);
|
err = ext4_journal_destroy(sbi, sbi->s_journal);
|
||||||
sbi->s_journal = NULL;
|
|
||||||
if ((err < 0) && !aborted) {
|
if ((err < 0) && !aborted) {
|
||||||
ext4_abort(sb, -err, "Couldn't clean up the journal");
|
ext4_abort(sb, -err, "Couldn't clean up the journal");
|
||||||
}
|
}
|
||||||
}
|
} else
|
||||||
|
flush_work(&sbi->s_sb_upd_work);
|
||||||
|
|
||||||
ext4_es_unregister_shrinker(sbi);
|
ext4_es_unregister_shrinker(sbi);
|
||||||
timer_shutdown_sync(&sbi->s_err_report);
|
timer_shutdown_sync(&sbi->s_err_report);
|
||||||
@ -4938,10 +4941,7 @@ static int ext4_load_and_init_journal(struct super_block *sb,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
out:
|
out:
|
||||||
/* flush s_sb_upd_work before destroying the journal. */
|
ext4_journal_destroy(sbi, sbi->s_journal);
|
||||||
flush_work(&sbi->s_sb_upd_work);
|
|
||||||
jbd2_journal_destroy(sbi->s_journal);
|
|
||||||
sbi->s_journal = NULL;
|
|
||||||
return -EINVAL;
|
return -EINVAL;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -5637,10 +5637,7 @@ failed_mount_wq:
|
|||||||
sbi->s_ea_block_cache = NULL;
|
sbi->s_ea_block_cache = NULL;
|
||||||
|
|
||||||
if (sbi->s_journal) {
|
if (sbi->s_journal) {
|
||||||
/* flush s_sb_upd_work before journal destroy. */
|
ext4_journal_destroy(sbi, sbi->s_journal);
|
||||||
flush_work(&sbi->s_sb_upd_work);
|
|
||||||
jbd2_journal_destroy(sbi->s_journal);
|
|
||||||
sbi->s_journal = NULL;
|
|
||||||
}
|
}
|
||||||
failed_mount3a:
|
failed_mount3a:
|
||||||
ext4_es_unregister_shrinker(sbi);
|
ext4_es_unregister_shrinker(sbi);
|
||||||
@ -5948,7 +5945,7 @@ static journal_t *ext4_open_dev_journal(struct super_block *sb,
|
|||||||
return journal;
|
return journal;
|
||||||
|
|
||||||
out_journal:
|
out_journal:
|
||||||
jbd2_journal_destroy(journal);
|
ext4_journal_destroy(EXT4_SB(sb), journal);
|
||||||
out_bdev:
|
out_bdev:
|
||||||
bdev_fput(bdev_file);
|
bdev_fput(bdev_file);
|
||||||
return ERR_PTR(errno);
|
return ERR_PTR(errno);
|
||||||
@ -6065,8 +6062,7 @@ static int ext4_load_journal(struct super_block *sb,
|
|||||||
EXT4_SB(sb)->s_journal = journal;
|
EXT4_SB(sb)->s_journal = journal;
|
||||||
err = ext4_clear_journal_err(sb, es);
|
err = ext4_clear_journal_err(sb, es);
|
||||||
if (err) {
|
if (err) {
|
||||||
EXT4_SB(sb)->s_journal = NULL;
|
ext4_journal_destroy(EXT4_SB(sb), journal);
|
||||||
jbd2_journal_destroy(journal);
|
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -6084,7 +6080,7 @@ static int ext4_load_journal(struct super_block *sb,
|
|||||||
return 0;
|
return 0;
|
||||||
|
|
||||||
err_out:
|
err_out:
|
||||||
jbd2_journal_destroy(journal);
|
ext4_journal_destroy(EXT4_SB(sb), journal);
|
||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -47,6 +47,22 @@ struct padata_mt_job_state {
|
|||||||
static void padata_free_pd(struct parallel_data *pd);
|
static void padata_free_pd(struct parallel_data *pd);
|
||||||
static void __init padata_mt_helper(struct work_struct *work);
|
static void __init padata_mt_helper(struct work_struct *work);
|
||||||
|
|
||||||
|
static inline void padata_get_pd(struct parallel_data *pd)
|
||||||
|
{
|
||||||
|
refcount_inc(&pd->refcnt);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void padata_put_pd_cnt(struct parallel_data *pd, int cnt)
|
||||||
|
{
|
||||||
|
if (refcount_sub_and_test(cnt, &pd->refcnt))
|
||||||
|
padata_free_pd(pd);
|
||||||
|
}
|
||||||
|
|
||||||
|
static inline void padata_put_pd(struct parallel_data *pd)
|
||||||
|
{
|
||||||
|
padata_put_pd_cnt(pd, 1);
|
||||||
|
}
|
||||||
|
|
||||||
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
|
static int padata_index_to_cpu(struct parallel_data *pd, int cpu_index)
|
||||||
{
|
{
|
||||||
int cpu, target_cpu;
|
int cpu, target_cpu;
|
||||||
@ -206,7 +222,7 @@ int padata_do_parallel(struct padata_shell *ps,
|
|||||||
if ((pinst->flags & PADATA_RESET))
|
if ((pinst->flags & PADATA_RESET))
|
||||||
goto out;
|
goto out;
|
||||||
|
|
||||||
refcount_inc(&pd->refcnt);
|
padata_get_pd(pd);
|
||||||
padata->pd = pd;
|
padata->pd = pd;
|
||||||
padata->cb_cpu = *cb_cpu;
|
padata->cb_cpu = *cb_cpu;
|
||||||
|
|
||||||
@ -336,8 +352,14 @@ static void padata_reorder(struct parallel_data *pd)
|
|||||||
smp_mb();
|
smp_mb();
|
||||||
|
|
||||||
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
|
reorder = per_cpu_ptr(pd->reorder_list, pd->cpu);
|
||||||
if (!list_empty(&reorder->list) && padata_find_next(pd, false))
|
if (!list_empty(&reorder->list) && padata_find_next(pd, false)) {
|
||||||
|
/*
|
||||||
|
* Other context(eg. the padata_serial_worker) can finish the request.
|
||||||
|
* To avoid UAF issue, add pd ref here, and put pd ref after reorder_work finish.
|
||||||
|
*/
|
||||||
|
padata_get_pd(pd);
|
||||||
queue_work(pinst->serial_wq, &pd->reorder_work);
|
queue_work(pinst->serial_wq, &pd->reorder_work);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static void invoke_padata_reorder(struct work_struct *work)
|
static void invoke_padata_reorder(struct work_struct *work)
|
||||||
@ -348,6 +370,8 @@ static void invoke_padata_reorder(struct work_struct *work)
|
|||||||
pd = container_of(work, struct parallel_data, reorder_work);
|
pd = container_of(work, struct parallel_data, reorder_work);
|
||||||
padata_reorder(pd);
|
padata_reorder(pd);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
/* Pairs with putting the reorder_work in the serial_wq */
|
||||||
|
padata_put_pd(pd);
|
||||||
}
|
}
|
||||||
|
|
||||||
static void padata_serial_worker(struct work_struct *serial_work)
|
static void padata_serial_worker(struct work_struct *serial_work)
|
||||||
@ -380,8 +404,7 @@ static void padata_serial_worker(struct work_struct *serial_work)
|
|||||||
}
|
}
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
if (refcount_sub_and_test(cnt, &pd->refcnt))
|
padata_put_pd_cnt(pd, cnt);
|
||||||
padata_free_pd(pd);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@ -521,13 +544,6 @@ void __init padata_do_multithreaded(struct padata_mt_job *job)
|
|||||||
ps.chunk_size = max(ps.chunk_size, 1ul);
|
ps.chunk_size = max(ps.chunk_size, 1ul);
|
||||||
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
ps.chunk_size = roundup(ps.chunk_size, job->align);
|
||||||
|
|
||||||
/*
|
|
||||||
* chunk_size can be 0 if the caller sets min_chunk to 0. So force it
|
|
||||||
* to at least 1 to prevent divide-by-0 panic in padata_mt_helper().`
|
|
||||||
*/
|
|
||||||
if (!ps.chunk_size)
|
|
||||||
ps.chunk_size = 1U;
|
|
||||||
|
|
||||||
list_for_each_entry(pw, &works, pw_list)
|
list_for_each_entry(pw, &works, pw_list)
|
||||||
if (job->numa_aware) {
|
if (job->numa_aware) {
|
||||||
int old_node = atomic_read(&last_used_nid);
|
int old_node = atomic_read(&last_used_nid);
|
||||||
@ -688,8 +704,7 @@ static int padata_replace(struct padata_instance *pinst)
|
|||||||
synchronize_rcu();
|
synchronize_rcu();
|
||||||
|
|
||||||
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
|
list_for_each_entry_continue_reverse(ps, &pinst->pslist, list)
|
||||||
if (refcount_dec_and_test(&ps->opd->refcnt))
|
padata_put_pd(ps->opd);
|
||||||
padata_free_pd(ps->opd);
|
|
||||||
|
|
||||||
pinst->flags &= ~PADATA_RESET;
|
pinst->flags &= ~PADATA_RESET;
|
||||||
|
|
||||||
@ -977,7 +992,7 @@ static ssize_t padata_sysfs_store(struct kobject *kobj, struct attribute *attr,
|
|||||||
|
|
||||||
pinst = kobj2pinst(kobj);
|
pinst = kobj2pinst(kobj);
|
||||||
pentry = attr2pentry(attr);
|
pentry = attr2pentry(attr);
|
||||||
if (pentry->show)
|
if (pentry->store)
|
||||||
ret = pentry->store(pinst, attr, buf, count);
|
ret = pentry->store(pinst, attr, buf, count);
|
||||||
|
|
||||||
return ret;
|
return ret;
|
||||||
@ -1128,11 +1143,16 @@ void padata_free_shell(struct padata_shell *ps)
|
|||||||
if (!ps)
|
if (!ps)
|
||||||
return;
|
return;
|
||||||
|
|
||||||
|
/*
|
||||||
|
* Wait for all _do_serial calls to finish to avoid touching
|
||||||
|
* freed pd's and ps's.
|
||||||
|
*/
|
||||||
|
synchronize_rcu();
|
||||||
|
|
||||||
mutex_lock(&ps->pinst->lock);
|
mutex_lock(&ps->pinst->lock);
|
||||||
list_del(&ps->list);
|
list_del(&ps->list);
|
||||||
pd = rcu_dereference_protected(ps->pd, 1);
|
pd = rcu_dereference_protected(ps->pd, 1);
|
||||||
if (refcount_dec_and_test(&pd->refcnt))
|
padata_put_pd(pd);
|
||||||
padata_free_pd(pd);
|
|
||||||
mutex_unlock(&ps->pinst->lock);
|
mutex_unlock(&ps->pinst->lock);
|
||||||
|
|
||||||
kfree(ps);
|
kfree(ps);
|
||||||
|
@ -243,9 +243,15 @@ static void tcp_measure_rcv_mss(struct sock *sk, const struct sk_buff *skb)
|
|||||||
do_div(val, skb->truesize);
|
do_div(val, skb->truesize);
|
||||||
tcp_sk(sk)->scaling_ratio = val ? val : 1;
|
tcp_sk(sk)->scaling_ratio = val ? val : 1;
|
||||||
|
|
||||||
if (old_ratio != tcp_sk(sk)->scaling_ratio)
|
if (old_ratio != tcp_sk(sk)->scaling_ratio) {
|
||||||
WRITE_ONCE(tcp_sk(sk)->window_clamp,
|
struct tcp_sock *tp = tcp_sk(sk);
|
||||||
tcp_win_from_space(sk, sk->sk_rcvbuf));
|
|
||||||
|
val = tcp_win_from_space(sk, sk->sk_rcvbuf);
|
||||||
|
tcp_set_window_clamp(sk, val);
|
||||||
|
|
||||||
|
if (tp->window_clamp < tp->rcvq_space.space)
|
||||||
|
tp->rcvq_space.space = tp->window_clamp;
|
||||||
|
}
|
||||||
}
|
}
|
||||||
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
|
icsk->icsk_ack.rcv_mss = min_t(unsigned int, len,
|
||||||
tcp_sk(sk)->advmss);
|
tcp_sk(sk)->advmss);
|
||||||
|
@ -175,6 +175,11 @@ struct hfsc_sched {
|
|||||||
|
|
||||||
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
|
#define HT_INFINITY 0xffffffffffffffffULL /* infinite time value */
|
||||||
|
|
||||||
|
static bool cl_in_el_or_vttree(struct hfsc_class *cl)
|
||||||
|
{
|
||||||
|
return ((cl->cl_flags & HFSC_FSC) && cl->cl_nactive) ||
|
||||||
|
((cl->cl_flags & HFSC_RSC) && !RB_EMPTY_NODE(&cl->el_node));
|
||||||
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* eligible tree holds backlogged classes being sorted by their eligible times.
|
* eligible tree holds backlogged classes being sorted by their eligible times.
|
||||||
@ -203,7 +208,10 @@ eltree_insert(struct hfsc_class *cl)
|
|||||||
static inline void
|
static inline void
|
||||||
eltree_remove(struct hfsc_class *cl)
|
eltree_remove(struct hfsc_class *cl)
|
||||||
{
|
{
|
||||||
rb_erase(&cl->el_node, &cl->sched->eligible);
|
if (!RB_EMPTY_NODE(&cl->el_node)) {
|
||||||
|
rb_erase(&cl->el_node, &cl->sched->eligible);
|
||||||
|
RB_CLEAR_NODE(&cl->el_node);
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
static inline void
|
static inline void
|
||||||
@ -1037,6 +1045,8 @@ hfsc_change_class(struct Qdisc *sch, u32 classid, u32 parentid,
|
|||||||
if (cl == NULL)
|
if (cl == NULL)
|
||||||
return -ENOBUFS;
|
return -ENOBUFS;
|
||||||
|
|
||||||
|
RB_CLEAR_NODE(&cl->el_node);
|
||||||
|
|
||||||
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
err = tcf_block_get(&cl->block, &cl->filter_list, sch, extack);
|
||||||
if (err) {
|
if (err) {
|
||||||
kfree(cl);
|
kfree(cl);
|
||||||
@ -1225,7 +1235,8 @@ hfsc_qlen_notify(struct Qdisc *sch, unsigned long arg)
|
|||||||
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
/* vttree is now handled in update_vf() so that update_vf(cl, 0, 0)
|
||||||
* needs to be called explicitly to remove a class from vttree.
|
* needs to be called explicitly to remove a class from vttree.
|
||||||
*/
|
*/
|
||||||
update_vf(cl, 0, 0);
|
if (cl->cl_nactive)
|
||||||
|
update_vf(cl, 0, 0);
|
||||||
if (cl->cl_flags & HFSC_RSC)
|
if (cl->cl_flags & HFSC_RSC)
|
||||||
eltree_remove(cl);
|
eltree_remove(cl);
|
||||||
}
|
}
|
||||||
@ -1565,7 +1576,10 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
return err;
|
return err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (first) {
|
sch->qstats.backlog += len;
|
||||||
|
sch->q.qlen++;
|
||||||
|
|
||||||
|
if (first && !cl_in_el_or_vttree(cl)) {
|
||||||
if (cl->cl_flags & HFSC_RSC)
|
if (cl->cl_flags & HFSC_RSC)
|
||||||
init_ed(cl, len);
|
init_ed(cl, len);
|
||||||
if (cl->cl_flags & HFSC_FSC)
|
if (cl->cl_flags & HFSC_FSC)
|
||||||
@ -1580,9 +1594,6 @@ hfsc_enqueue(struct sk_buff *skb, struct Qdisc *sch, struct sk_buff **to_free)
|
|||||||
|
|
||||||
}
|
}
|
||||||
|
|
||||||
sch->qstats.backlog += len;
|
|
||||||
sch->q.qlen++;
|
|
||||||
|
|
||||||
return NET_XMIT_SUCCESS;
|
return NET_XMIT_SUCCESS;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -1328,13 +1328,15 @@ static int taprio_dev_notifier(struct notifier_block *nb, unsigned long event,
|
|||||||
|
|
||||||
stab = rtnl_dereference(q->root->stab);
|
stab = rtnl_dereference(q->root->stab);
|
||||||
|
|
||||||
oper = rtnl_dereference(q->oper_sched);
|
rcu_read_lock();
|
||||||
|
oper = rcu_dereference(q->oper_sched);
|
||||||
if (oper)
|
if (oper)
|
||||||
taprio_update_queue_max_sdu(q, oper, stab);
|
taprio_update_queue_max_sdu(q, oper, stab);
|
||||||
|
|
||||||
admin = rtnl_dereference(q->admin_sched);
|
admin = rcu_dereference(q->admin_sched);
|
||||||
if (admin)
|
if (admin)
|
||||||
taprio_update_queue_max_sdu(q, admin, stab);
|
taprio_update_queue_max_sdu(q, admin, stab);
|
||||||
|
rcu_read_unlock();
|
||||||
|
|
||||||
break;
|
break;
|
||||||
}
|
}
|
||||||
|
@ -817,12 +817,20 @@ static int tipc_aead_encrypt(struct tipc_aead *aead, struct sk_buff *skb,
|
|||||||
goto exit;
|
goto exit;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/* Get net to avoid freed tipc_crypto when delete namespace */
|
||||||
|
if (!maybe_get_net(aead->crypto->net)) {
|
||||||
|
tipc_bearer_put(b);
|
||||||
|
rc = -ENODEV;
|
||||||
|
goto exit;
|
||||||
|
}
|
||||||
|
|
||||||
/* Now, do encrypt */
|
/* Now, do encrypt */
|
||||||
rc = crypto_aead_encrypt(req);
|
rc = crypto_aead_encrypt(req);
|
||||||
if (rc == -EINPROGRESS || rc == -EBUSY)
|
if (rc == -EINPROGRESS || rc == -EBUSY)
|
||||||
return rc;
|
return rc;
|
||||||
|
|
||||||
tipc_bearer_put(b);
|
tipc_bearer_put(b);
|
||||||
|
put_net(aead->crypto->net);
|
||||||
|
|
||||||
exit:
|
exit:
|
||||||
kfree(ctx);
|
kfree(ctx);
|
||||||
@ -860,6 +868,7 @@ static void tipc_aead_encrypt_done(void *data, int err)
|
|||||||
kfree(tx_ctx);
|
kfree(tx_ctx);
|
||||||
tipc_bearer_put(b);
|
tipc_bearer_put(b);
|
||||||
tipc_aead_put(aead);
|
tipc_aead_put(aead);
|
||||||
|
put_net(net);
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
|
Loading…
Reference in New Issue
Block a user