Import of kernel-4.18.0-553.100.1.el8_10
This commit is contained in:
parent
8390f5c227
commit
fa54b41fca
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.97.1
|
||||
RHEL_RELEASE = 553.100.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -1441,8 +1441,10 @@ int ib_register_device(struct ib_device *device, const char *name,
|
||||
return ret;
|
||||
}
|
||||
dev_set_uevent_suppress(&device->dev, false);
|
||||
down_read(&devices_rwsem);
|
||||
/* Mark for userspace that device is ready */
|
||||
kobject_uevent(&device->dev.kobj, KOBJ_ADD);
|
||||
up_read(&devices_rwsem);
|
||||
ib_device_put(device);
|
||||
|
||||
return 0;
|
||||
|
||||
@ -3160,7 +3160,7 @@ int _pad_sdma_tx_descs(struct hfi1_devdata *dd, struct sdma_txreq *tx)
|
||||
{
|
||||
int rval = 0;
|
||||
|
||||
if ((unlikely(tx->num_desc + 1 == tx->desc_limit))) {
|
||||
if ((unlikely(tx->num_desc == tx->desc_limit))) {
|
||||
rval = _extend_sdma_tx_descs(dd, tx);
|
||||
if (rval) {
|
||||
__sdma_txclean(dd, tx);
|
||||
|
||||
@ -71,11 +71,8 @@ int rxe_cq_from_init(struct rxe_dev *rxe, struct rxe_cq *cq, int cqe,
|
||||
|
||||
err = do_mmap_info(rxe, uresp ? &uresp->mi : NULL, udata,
|
||||
cq->queue->buf, cq->queue->buf_size, &cq->queue->ip);
|
||||
if (err) {
|
||||
vfree(cq->queue->buf);
|
||||
kfree(cq->queue);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
cq->is_user = uresp;
|
||||
|
||||
|
||||
@ -81,8 +81,14 @@ static void usb_parse_ss_endpoint_companion(struct device *ddev, int cfgno,
|
||||
*/
|
||||
desc = (struct usb_ss_ep_comp_descriptor *) buffer;
|
||||
|
||||
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP ||
|
||||
size < USB_DT_SS_EP_COMP_SIZE) {
|
||||
if (size < USB_DT_SS_EP_COMP_SIZE) {
|
||||
dev_notice(ddev,
|
||||
"invalid SuperSpeed endpoint companion descriptor "
|
||||
"of length %d, skipping\n", size);
|
||||
return;
|
||||
}
|
||||
|
||||
if (desc->bDescriptorType != USB_DT_SS_ENDPOINT_COMP) {
|
||||
dev_notice(ddev, "No SuperSpeed endpoint companion for config %d "
|
||||
" interface %d altsetting %d ep %d: "
|
||||
"using minimum values\n",
|
||||
|
||||
@ -80,12 +80,16 @@ static inline void bit_putcs_aligned(struct vc_data *vc, struct fb_info *info,
|
||||
struct fb_image *image, u8 *buf, u8 *dst)
|
||||
{
|
||||
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
|
||||
unsigned int charcnt = vc->vc_font.charcount;
|
||||
u32 idx = vc->vc_font.width >> 3;
|
||||
u8 *src;
|
||||
|
||||
while (cnt--) {
|
||||
src = vc->vc_font.data + (scr_readw(s++)&
|
||||
charmask)*cellsize;
|
||||
u16 ch = scr_readw(s++) & charmask;
|
||||
|
||||
if (ch >= charcnt)
|
||||
ch = 0;
|
||||
src = vc->vc_font.data + (unsigned int)ch * cellsize;
|
||||
|
||||
if (attr) {
|
||||
update_attr(buf, src, attr, vc);
|
||||
@ -113,14 +117,18 @@ static inline void bit_putcs_unaligned(struct vc_data *vc,
|
||||
u8 *dst)
|
||||
{
|
||||
u16 charmask = vc->vc_hi_font_mask ? 0x1ff : 0xff;
|
||||
unsigned int charcnt = vc->vc_font.charcount;
|
||||
u32 shift_low = 0, mod = vc->vc_font.width % 8;
|
||||
u32 shift_high = 8;
|
||||
u32 idx = vc->vc_font.width >> 3;
|
||||
u8 *src;
|
||||
|
||||
while (cnt--) {
|
||||
src = vc->vc_font.data + (scr_readw(s++)&
|
||||
charmask)*cellsize;
|
||||
u16 ch = scr_readw(s++) & charmask;
|
||||
|
||||
if (ch >= charcnt)
|
||||
ch = 0;
|
||||
src = vc->vc_font.data + (unsigned int)ch * cellsize;
|
||||
|
||||
if (attr) {
|
||||
update_attr(buf, src, attr, vc);
|
||||
|
||||
38
fs/namei.c
38
fs/namei.c
@ -1142,7 +1142,7 @@ const char *get_link(struct nameidata *nd)
|
||||
return ERR_PTR(error);
|
||||
|
||||
nd->last_type = LAST_BIND;
|
||||
res = inode->i_link;
|
||||
res = READ_ONCE(inode->i_link);
|
||||
if (!res) {
|
||||
const char * (*get)(struct dentry *, struct inode *,
|
||||
struct delayed_call *);
|
||||
@ -4936,29 +4936,6 @@ out:
|
||||
return len;
|
||||
}
|
||||
|
||||
/*
|
||||
* A helper for ->readlink(). This should be used *ONLY* for symlinks that
|
||||
* have ->get_link() not calling nd_jump_link(). Using (or not using) it
|
||||
* for any given inode is up to filesystem.
|
||||
*/
|
||||
static int generic_readlink(struct dentry *dentry, char __user *buffer,
|
||||
int buflen)
|
||||
{
|
||||
DEFINE_DELAYED_CALL(done);
|
||||
struct inode *inode = d_inode(dentry);
|
||||
const char *link = inode->i_link;
|
||||
int res;
|
||||
|
||||
if (!link) {
|
||||
link = inode->i_op->get_link(dentry, inode, &done);
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
res = readlink_copy(buffer, buflen, link);
|
||||
do_delayed_call(&done);
|
||||
return res;
|
||||
}
|
||||
|
||||
/**
|
||||
* vfs_readlink - copy symlink body into userspace buffer
|
||||
* @dentry: dentry on which to get symbolic link
|
||||
@ -4972,6 +4949,9 @@ static int generic_readlink(struct dentry *dentry, char __user *buffer,
|
||||
int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
|
||||
{
|
||||
struct inode *inode = d_inode(dentry);
|
||||
DEFINE_DELAYED_CALL(done);
|
||||
const char *link;
|
||||
int res;
|
||||
|
||||
if (unlikely(!(inode->i_opflags & IOP_DEFAULT_READLINK))) {
|
||||
if (unlikely(inode->i_op->readlink))
|
||||
@ -4985,7 +4965,15 @@ int vfs_readlink(struct dentry *dentry, char __user *buffer, int buflen)
|
||||
spin_unlock(&inode->i_lock);
|
||||
}
|
||||
|
||||
return generic_readlink(dentry, buffer, buflen);
|
||||
link = READ_ONCE(inode->i_link);
|
||||
if (!link) {
|
||||
link = inode->i_op->get_link(dentry, inode, &done);
|
||||
if (IS_ERR(link))
|
||||
return PTR_ERR(link);
|
||||
}
|
||||
res = readlink_copy(buffer, buflen, link);
|
||||
do_delayed_call(&done);
|
||||
return res;
|
||||
}
|
||||
EXPORT_SYMBOL(vfs_readlink);
|
||||
|
||||
|
||||
@ -648,6 +648,12 @@ void pde_put(struct proc_dir_entry *pde)
|
||||
}
|
||||
}
|
||||
|
||||
static void pde_erase(struct proc_dir_entry *pde, struct proc_dir_entry *parent)
|
||||
{
|
||||
rb_erase(&pde->subdir_node, &parent->subdir);
|
||||
RB_CLEAR_NODE(&pde->subdir_node);
|
||||
}
|
||||
|
||||
/*
|
||||
* Remove a /proc entry and free it if it's not currently in use.
|
||||
*/
|
||||
@ -666,7 +672,7 @@ void remove_proc_entry(const char *name, struct proc_dir_entry *parent)
|
||||
|
||||
de = pde_subdir_find(parent, fn, len);
|
||||
if (de)
|
||||
rb_erase(&de->subdir_node, &parent->subdir);
|
||||
pde_erase(de, parent);
|
||||
write_unlock(&proc_subdir_lock);
|
||||
if (!de) {
|
||||
WARN(1, "name '%s'\n", name);
|
||||
@ -703,13 +709,13 @@ int remove_proc_subtree(const char *name, struct proc_dir_entry *parent)
|
||||
write_unlock(&proc_subdir_lock);
|
||||
return -ENOENT;
|
||||
}
|
||||
rb_erase(&root->subdir_node, &parent->subdir);
|
||||
pde_erase(root, parent);
|
||||
|
||||
de = root;
|
||||
while (1) {
|
||||
next = pde_subdir_first(de);
|
||||
if (next) {
|
||||
rb_erase(&next->subdir_node, &de->subdir);
|
||||
pde_erase(next, de);
|
||||
de = next;
|
||||
continue;
|
||||
}
|
||||
|
||||
@ -106,10 +106,15 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
unsigned short flags;
|
||||
unsigned int fragments;
|
||||
u64 lookup_table_start, xattr_id_table_start, next_table;
|
||||
int err;
|
||||
int err, devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
|
||||
|
||||
TRACE("Entered squashfs_fill_superblock\n");
|
||||
|
||||
if (!devblksize) {
|
||||
errorf(fc, "squashfs: unable to set blocksize\n");
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/*
|
||||
* squashfs provides 'backing_dev_info' in order to disable read-ahead. For
|
||||
* squashfs, I/O is not deferred, it is done immediately in readpage,
|
||||
@ -131,7 +136,7 @@ static int squashfs_fill_super(struct super_block *sb, struct fs_context *fc)
|
||||
}
|
||||
msblk = sb->s_fs_info;
|
||||
|
||||
msblk->devblksize = sb_min_blocksize(sb, SQUASHFS_DEVBLK_SIZE);
|
||||
msblk->devblksize = devblksize;
|
||||
msblk->devblksize_log2 = ffz(~msblk->devblksize);
|
||||
|
||||
mutex_init(&msblk->meta_index_mutex);
|
||||
|
||||
@ -417,6 +417,8 @@ static int clip_mkip(struct atm_vcc *vcc, int timeout)
|
||||
|
||||
if (!vcc->push)
|
||||
return -EBADFD;
|
||||
if (vcc->user_back)
|
||||
return -EINVAL;
|
||||
clip_vcc = kmalloc(sizeof(struct clip_vcc), GFP_KERNEL);
|
||||
if (!clip_vcc)
|
||||
return -ENOMEM;
|
||||
|
||||
@ -1657,8 +1657,7 @@ int tcp_v4_early_demux(struct sk_buff *skb)
|
||||
bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
|
||||
enum skb_drop_reason *reason)
|
||||
{
|
||||
u32 limit = sk->sk_rcvbuf + sk->sk_sndbuf;
|
||||
u32 tail_gso_size, tail_gso_segs;
|
||||
u32 limit, tail_gso_size, tail_gso_segs;
|
||||
struct skb_shared_info *shinfo;
|
||||
const struct tcphdr *th;
|
||||
struct tcphdr *thtail;
|
||||
@ -1762,11 +1761,13 @@ bool tcp_add_backlog(struct sock *sk, struct sk_buff *skb,
|
||||
__skb_push(skb, hdrlen);
|
||||
|
||||
no_coalesce:
|
||||
limit = (u32)READ_ONCE(sk->sk_rcvbuf) + (u32)(READ_ONCE(sk->sk_sndbuf) >> 1);
|
||||
|
||||
/* Only socket owner can try to collapse/prune rx queues
|
||||
* to reduce memory overhead, so add a little headroom here.
|
||||
* Few sockets backlog are possibly concurrently non empty.
|
||||
*/
|
||||
limit += 64*1024;
|
||||
limit += 64 * 1024;
|
||||
|
||||
if (unlikely(sk_add_backlog(sk, skb, limit))) {
|
||||
bh_unlock_sock(sk);
|
||||
|
||||
@ -1056,9 +1056,8 @@ bool mptcp_incoming_options(struct sock *sk, struct sk_buff *skb)
|
||||
*/
|
||||
if (TCP_SKB_CB(skb)->seq == TCP_SKB_CB(skb)->end_seq) {
|
||||
if (mp_opt.data_fin && mp_opt.data_len == 1 &&
|
||||
mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64) &&
|
||||
schedule_work(&msk->work))
|
||||
sock_hold(subflow->conn);
|
||||
mptcp_update_rcv_data_fin(msk, mp_opt.data_seq, mp_opt.dsn64))
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
@ -807,14 +807,19 @@ static void mptcp_reset_timer(struct sock *sk)
|
||||
|
||||
bool mptcp_schedule_work(struct sock *sk)
|
||||
{
|
||||
if (inet_sk_state_load(sk) != TCP_CLOSE &&
|
||||
schedule_work(&mptcp_sk(sk)->work)) {
|
||||
/* each subflow already holds a reference to the sk, and the
|
||||
* workqueue is invoked by a subflow, so sk can't go away here.
|
||||
*/
|
||||
sock_hold(sk);
|
||||
if (inet_sk_state_load(sk) == TCP_CLOSE)
|
||||
return false;
|
||||
|
||||
/* Get a reference on this socket, mptcp_worker() will release it.
|
||||
* As mptcp_worker() might complete before us, we can not avoid
|
||||
* a sock_hold()/sock_put() if schedule_work() returns false.
|
||||
*/
|
||||
sock_hold(sk);
|
||||
|
||||
if (schedule_work(&mptcp_sk(sk)->work))
|
||||
return true;
|
||||
}
|
||||
|
||||
sock_put(sk);
|
||||
return false;
|
||||
}
|
||||
|
||||
|
||||
@ -365,9 +365,8 @@ void mptcp_subflow_reset(struct sock *ssk)
|
||||
tcp_set_state(ssk, TCP_CLOSE);
|
||||
tcp_send_active_reset(ssk, GFP_ATOMIC);
|
||||
tcp_done(ssk);
|
||||
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags) &&
|
||||
schedule_work(&mptcp_sk(sk)->work))
|
||||
return; /* worker will put sk for us */
|
||||
if (!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &mptcp_sk(sk)->flags))
|
||||
mptcp_schedule_work(sk);
|
||||
|
||||
sock_put(sk);
|
||||
}
|
||||
@ -895,8 +894,8 @@ static enum mapping_status get_mapping_status(struct sock *ssk,
|
||||
skb_ext_del(skb, SKB_EXT_MPTCP);
|
||||
return MAPPING_OK;
|
||||
} else {
|
||||
if (updated && schedule_work(&msk->work))
|
||||
sock_hold((struct sock *)msk);
|
||||
if (updated)
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
|
||||
return MAPPING_DATA_FIN;
|
||||
}
|
||||
@ -984,17 +983,12 @@ static void mptcp_subflow_discard_data(struct sock *ssk, struct sk_buff *skb,
|
||||
/* sched mptcp worker to remove the subflow if no more data is pending */
|
||||
static void subflow_sched_work_if_closed(struct mptcp_sock *msk, struct sock *ssk)
|
||||
{
|
||||
struct sock *sk = (struct sock *)msk;
|
||||
|
||||
if (likely(ssk->sk_state != TCP_CLOSE))
|
||||
return;
|
||||
|
||||
if (skb_queue_empty(&ssk->sk_receive_queue) &&
|
||||
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags)) {
|
||||
sock_hold(sk);
|
||||
if (!schedule_work(&msk->work))
|
||||
sock_put(sk);
|
||||
}
|
||||
!test_and_set_bit(MPTCP_WORK_CLOSE_SUBFLOW, &msk->flags))
|
||||
mptcp_schedule_work((struct sock *)msk);
|
||||
}
|
||||
|
||||
static bool subflow_check_data_avail(struct sock *ssk)
|
||||
|
||||
Loading…
Reference in New Issue
Block a user