Import of kernel-4.18.0-553.104.1.el8_10
This commit is contained in:
parent
fa54b41fca
commit
0936fb7ecc
@ -12,7 +12,7 @@ RHEL_MINOR = 10
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 553.100.1
|
||||
RHEL_RELEASE = 553.104.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -4763,6 +4763,7 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
struct i40e_vf *vf;
|
||||
int abs_vf_id;
|
||||
int old_link;
|
||||
int ret = 0;
|
||||
|
||||
if (test_and_set_bit(__I40E_VIRTCHNL_OP_PENDING, pf->state)) {
|
||||
@ -4780,6 +4781,17 @@ int i40e_ndo_set_vf_link_state(struct net_device *netdev, int vf_id, int link)
|
||||
vf = &pf->vf[vf_id];
|
||||
abs_vf_id = vf->vf_id + hw->func_caps.vf_base_id;
|
||||
|
||||
/* skip VF link state change if requested state is already set */
|
||||
if (!vf->link_forced)
|
||||
old_link = IFLA_VF_LINK_STATE_AUTO;
|
||||
else if (vf->link_up)
|
||||
old_link = IFLA_VF_LINK_STATE_ENABLE;
|
||||
else
|
||||
old_link = IFLA_VF_LINK_STATE_DISABLE;
|
||||
|
||||
if (link == old_link)
|
||||
goto error_out;
|
||||
|
||||
pfe.event = VIRTCHNL_EVENT_LINK_CHANGE;
|
||||
pfe.severity = PF_EVENT_SEVERITY_INFO;
|
||||
|
||||
|
||||
@ -965,6 +965,18 @@ static int nvmet_tcp_handle_h2c_data_pdu(struct nvmet_tcp_queue *queue)
|
||||
pr_err("H2CData PDU len %u is invalid\n", cmd->pdu_len);
|
||||
goto err_proto;
|
||||
}
|
||||
/*
|
||||
* Ensure command data structures are initialized. We must check both
|
||||
* cmd->req.sg and cmd->iov because they can have different NULL states:
|
||||
* - Uninitialized commands: both NULL
|
||||
* - READ commands: cmd->req.sg allocated, cmd->iov NULL
|
||||
* - WRITE commands: both allocated
|
||||
*/
|
||||
if (unlikely(!cmd->req.sg || !cmd->iov)) {
|
||||
pr_err("queue %d: H2CData PDU received for invalid command state (ttag %u)\n",
|
||||
queue->idx, data->ttag);
|
||||
goto err_proto;
|
||||
}
|
||||
cmd->pdu_recv = 0;
|
||||
nvmet_tcp_map_pdu_iovec(cmd);
|
||||
queue->cmd = cmd;
|
||||
|
||||
@ -4549,7 +4549,7 @@ int ext4_truncate(struct inode *inode)
|
||||
trace_ext4_truncate_enter(inode);
|
||||
|
||||
if (!ext4_can_truncate(inode))
|
||||
return 0;
|
||||
goto out_trace;
|
||||
|
||||
ext4_clear_inode_flag(inode, EXT4_INODE_EOFBLOCKS);
|
||||
|
||||
@ -4560,16 +4560,15 @@ int ext4_truncate(struct inode *inode)
|
||||
int has_inline = 1;
|
||||
|
||||
err = ext4_inline_data_truncate(inode, &has_inline);
|
||||
if (err)
|
||||
return err;
|
||||
if (has_inline)
|
||||
return 0;
|
||||
if (err || has_inline)
|
||||
goto out_trace;
|
||||
}
|
||||
|
||||
/* If we zero-out tail of the page, we have to create jinode for jbd2 */
|
||||
if (inode->i_size & (inode->i_sb->s_blocksize - 1)) {
|
||||
if (ext4_inode_attach_jinode(inode) < 0)
|
||||
return 0;
|
||||
err = ext4_inode_attach_jinode(inode);
|
||||
if (err)
|
||||
goto out_trace;
|
||||
}
|
||||
|
||||
if (ext4_test_inode_flag(inode, EXT4_INODE_EXTENTS))
|
||||
@ -4578,8 +4577,10 @@ int ext4_truncate(struct inode *inode)
|
||||
credits = ext4_blocks_for_truncate(inode);
|
||||
|
||||
handle = ext4_journal_start(inode, EXT4_HT_TRUNCATE, credits);
|
||||
if (IS_ERR(handle))
|
||||
return PTR_ERR(handle);
|
||||
if (IS_ERR(handle)) {
|
||||
err = PTR_ERR(handle);
|
||||
goto out_trace;
|
||||
}
|
||||
|
||||
if (inode->i_size & (inode->i_sb->s_blocksize - 1))
|
||||
ext4_block_truncate_page(handle, mapping, inode->i_size);
|
||||
@ -4628,6 +4629,7 @@ out_stop:
|
||||
ext4_mark_inode_dirty(handle, inode);
|
||||
ext4_journal_stop(handle);
|
||||
|
||||
out_trace:
|
||||
trace_ext4_truncate_exit(inode);
|
||||
return err;
|
||||
}
|
||||
|
||||
@ -1698,8 +1698,17 @@ static void nfs_set_open_stateid_locked(struct nfs4_state *state,
|
||||
if (nfs_stateid_is_sequential(state, stateid))
|
||||
break;
|
||||
|
||||
if (status)
|
||||
break;
|
||||
if (status) {
|
||||
if (nfs4_stateid_match_other(stateid, &state->open_stateid) &&
|
||||
!nfs4_stateid_is_newer(stateid, &state->open_stateid)) {
|
||||
trace_nfs4_open_stateid_update_skip(state->inode,
|
||||
stateid, status);
|
||||
return;
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
|
||||
/* Rely on seqids for serialisation with NFSv4.0 */
|
||||
if (!nfs4_has_session(NFS_SERVER(state->inode)->nfs_client))
|
||||
break;
|
||||
|
||||
@ -1247,6 +1247,7 @@ DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_setattr);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_delegreturn);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_wait);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_open_stateid_update_skip);
|
||||
DEFINE_NFS4_INODE_STATEID_EVENT(nfs4_close_stateid_update_wait);
|
||||
|
||||
DECLARE_EVENT_CLASS(nfs4_getattr_event,
|
||||
|
||||
@ -465,6 +465,7 @@ pnfs_mark_layout_stateid_invalid(struct pnfs_layout_hdr *lo,
|
||||
struct pnfs_layout_segment *lseg, *next;
|
||||
|
||||
set_bit(NFS_LAYOUT_INVALID_STID, &lo->plh_flags);
|
||||
clear_bit(NFS_INO_LAYOUTCOMMIT, &NFS_I(lo->plh_inode)->flags);
|
||||
list_for_each_entry_safe(lseg, next, &lo->plh_segs, pls_list)
|
||||
pnfs_clear_lseg_state(lseg, lseg_list);
|
||||
pnfs_clear_layoutreturn_info(lo);
|
||||
|
||||
@ -37,8 +37,6 @@ struct lockref {
|
||||
extern void lockref_get(struct lockref *);
|
||||
extern int lockref_put_return(struct lockref *);
|
||||
extern int lockref_get_not_zero(struct lockref *);
|
||||
extern int lockref_put_not_zero(struct lockref *);
|
||||
extern int lockref_get_or_lock(struct lockref *);
|
||||
extern int lockref_put_or_lock(struct lockref *);
|
||||
|
||||
extern void lockref_mark_dead(struct lockref *);
|
||||
|
||||
@ -35,7 +35,10 @@
|
||||
struct sk_buff;
|
||||
|
||||
struct dst_entry {
|
||||
struct net_device *dev;
|
||||
RH_KABI_REPLACE(struct net_device *dev, union {
|
||||
struct net_device *dev;
|
||||
struct net_device __rcu *dev_rcu;
|
||||
})
|
||||
struct dst_ops *ops;
|
||||
unsigned long _metrics;
|
||||
unsigned long expires;
|
||||
@ -559,4 +562,39 @@ static inline void skb_dst_update_pmtu_no_confirm(struct sk_buff *skb, u32 mtu)
|
||||
dst->ops->update_pmtu(dst, NULL, skb, mtu, false);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev(const struct dst_entry *dst)
|
||||
{
|
||||
return READ_ONCE(dst->dev);
|
||||
}
|
||||
|
||||
static inline struct net_device *dst_dev_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
return rcu_dereference(dst->dev_rcu);
|
||||
}
|
||||
|
||||
static inline struct net *dst_dev_net_rcu(const struct dst_entry *dst)
|
||||
{
|
||||
return dev_net_rcu(dst_dev_rcu(dst));
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net_device *skb_dst_dev_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dst_dev_rcu(skb_dst(skb));
|
||||
}
|
||||
|
||||
static inline struct net *skb_dst_dev_net(const struct sk_buff *skb)
|
||||
{
|
||||
return dev_net(skb_dst_dev(skb));
|
||||
}
|
||||
|
||||
static inline struct net *skb_dst_dev_net_rcu(const struct sk_buff *skb)
|
||||
{
|
||||
return dev_net_rcu(skb_dst_dev_rcu(skb));
|
||||
}
|
||||
|
||||
#endif /* _NET_DST_H */
|
||||
|
||||
@ -394,14 +394,24 @@ static inline bool ip_sk_ignore_df(const struct sock *sk)
|
||||
static inline unsigned int ip_dst_mtu_maybe_forward(const struct dst_entry *dst,
|
||||
bool forwarding)
|
||||
{
|
||||
struct net *net = dev_net(dst->dev);
|
||||
const struct net_device *dev;
|
||||
unsigned int res;
|
||||
struct net *net;
|
||||
|
||||
rcu_read_lock();
|
||||
|
||||
dev = dst_dev_rcu(dst);
|
||||
net = dev_net_rcu(dev);
|
||||
if (net->ipv4.sysctl_ip_fwd_use_pmtu ||
|
||||
ip_mtu_locked(dst) ||
|
||||
!forwarding)
|
||||
return dst_mtu(dst);
|
||||
res = dst_mtu(dst);
|
||||
else
|
||||
res = min(READ_ONCE(dev->mtu), IP_MAX_MTU);
|
||||
|
||||
return min(READ_ONCE(dst->dev->mtu), IP_MAX_MTU);
|
||||
rcu_read_unlock();
|
||||
|
||||
return res;
|
||||
}
|
||||
|
||||
static inline unsigned int ip_skb_dst_mtu(struct sock *sk,
|
||||
|
||||
@ -294,7 +294,7 @@ static inline unsigned int ip6_dst_mtu_forward(const struct dst_entry *dst)
|
||||
|
||||
mtu = IPV6_MIN_MTU;
|
||||
rcu_read_lock();
|
||||
idev = __in6_dev_get(dst->dev);
|
||||
idev = __in6_dev_get(dst_dev_rcu(dst));
|
||||
if (idev)
|
||||
mtu = idev->cnf.mtu6;
|
||||
rcu_read_unlock();
|
||||
|
||||
@ -353,10 +353,15 @@ static inline int inet_iif(const struct sk_buff *skb)
|
||||
static inline int ip4_dst_hoplimit(const struct dst_entry *dst)
|
||||
{
|
||||
int hoplimit = dst_metric_raw(dst, RTAX_HOPLIMIT);
|
||||
struct net *net = dev_net(dst->dev);
|
||||
|
||||
if (hoplimit == 0)
|
||||
if (hoplimit == 0) {
|
||||
const struct net *net;
|
||||
|
||||
rcu_read_lock();
|
||||
net = dst_dev_net_rcu(dst);
|
||||
hoplimit = net->ipv4.sysctl_ip_default_ttl;
|
||||
rcu_read_unlock();
|
||||
}
|
||||
return hoplimit;
|
||||
}
|
||||
|
||||
|
||||
@ -1339,9 +1339,11 @@ static int migration_cpu_stop(void *data)
|
||||
* it.
|
||||
*/
|
||||
WARN_ON_ONCE(!pending->stop_pending);
|
||||
preempt_disable();
|
||||
task_rq_unlock(rq, p, &rf);
|
||||
stop_one_cpu_nowait(task_cpu(p), migration_cpu_stop,
|
||||
&pending->arg, &pending->stop_work);
|
||||
preempt_enable();
|
||||
return 0;
|
||||
}
|
||||
out:
|
||||
@ -1565,12 +1567,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
complete = true;
|
||||
}
|
||||
|
||||
preempt_disable();
|
||||
task_rq_unlock(rq, p, rf);
|
||||
|
||||
if (push_task) {
|
||||
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
||||
p, &rq->push_work);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (complete)
|
||||
complete_all(&pending->done);
|
||||
@ -1636,12 +1639,13 @@ static int affine_move_task(struct rq *rq, struct task_struct *p, struct rq_flag
|
||||
if (flags & SCA_MIGRATE_ENABLE)
|
||||
p->migration_flags &= ~MDF_PUSH;
|
||||
|
||||
preempt_disable();
|
||||
task_rq_unlock(rq, p, rf);
|
||||
|
||||
if (!stop_pending) {
|
||||
stop_one_cpu_nowait(cpu_of(rq), migration_cpu_stop,
|
||||
&pending->arg, &pending->stop_work);
|
||||
}
|
||||
preempt_enable();
|
||||
|
||||
if (flags & SCA_MIGRATE_ENABLE)
|
||||
return 0;
|
||||
@ -6899,9 +6903,11 @@ static void balance_push(struct rq *rq)
|
||||
* Temporarily drop rq->lock such that we can wake-up the stop task.
|
||||
* Both preemption and IRQs are still disabled.
|
||||
*/
|
||||
preempt_disable();
|
||||
raw_spin_unlock(&rq->lock);
|
||||
stop_one_cpu_nowait(rq->cpu, __balance_push_cpu_stop, push_task,
|
||||
this_cpu_ptr(&push_work));
|
||||
preempt_enable();
|
||||
/*
|
||||
* At this point need_resched() is true and we'll take the loop in
|
||||
* schedule(). The next pick is obviously going to be the stop task
|
||||
|
||||
@ -2290,9 +2290,11 @@ skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
|
||||
if (push_task) {
|
||||
preempt_disable();
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||
push_task, &src_rq->push_work);
|
||||
preempt_enable();
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -10138,13 +10138,15 @@ more_balance:
|
||||
busiest->push_cpu = this_cpu;
|
||||
active_balance = 1;
|
||||
}
|
||||
raw_spin_unlock_irqrestore(&busiest->lock, flags);
|
||||
|
||||
preempt_disable();
|
||||
raw_spin_unlock_irqrestore(&busiest->lock, flags);
|
||||
if (active_balance) {
|
||||
stop_one_cpu_nowait(cpu_of(busiest),
|
||||
active_load_balance_cpu_stop, busiest,
|
||||
&busiest->active_balance_work);
|
||||
}
|
||||
preempt_enable();
|
||||
}
|
||||
} else {
|
||||
sd->nr_balance_failed = 0;
|
||||
|
||||
@ -1862,9 +1862,11 @@ retry:
|
||||
*/
|
||||
push_task = get_push_task(rq);
|
||||
if (push_task) {
|
||||
preempt_disable();
|
||||
raw_spin_unlock(&rq->lock);
|
||||
stop_one_cpu_nowait(rq->cpu, push_cpu_stop,
|
||||
push_task, &rq->push_work);
|
||||
preempt_enable();
|
||||
raw_spin_lock(&rq->lock);
|
||||
}
|
||||
|
||||
@ -2201,9 +2203,11 @@ skip:
|
||||
double_unlock_balance(this_rq, src_rq);
|
||||
|
||||
if (push_task) {
|
||||
preempt_disable();
|
||||
raw_spin_unlock(&this_rq->lock);
|
||||
stop_one_cpu_nowait(src_rq->cpu, push_cpu_stop,
|
||||
push_task, &src_rq->push_work);
|
||||
preempt_enable();
|
||||
raw_spin_lock(&this_rq->lock);
|
||||
}
|
||||
}
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
* failure case.
|
||||
*/
|
||||
#define CMPXCHG_LOOP(CODE, SUCCESS) do { \
|
||||
int retry = 100; \
|
||||
struct lockref old; \
|
||||
BUILD_BUG_ON(sizeof(old) != 8); \
|
||||
old.lock_count = READ_ONCE(lockref->lock_count); \
|
||||
@ -21,7 +22,8 @@
|
||||
if (likely(old.lock_count == prev.lock_count)) { \
|
||||
SUCCESS; \
|
||||
} \
|
||||
cpu_relax(); \
|
||||
if (!--retry) \
|
||||
break; \
|
||||
} \
|
||||
} while (0)
|
||||
|
||||
@ -80,59 +82,6 @@ int lockref_get_not_zero(struct lockref *lockref)
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_get_not_zero);
|
||||
|
||||
/**
|
||||
* lockref_put_not_zero - Decrements count unless count <= 1 before decrement
|
||||
* @lockref: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count would become zero
|
||||
*/
|
||||
int lockref_put_not_zero(struct lockref *lockref)
|
||||
{
|
||||
int retval;
|
||||
|
||||
CMPXCHG_LOOP(
|
||||
new.count--;
|
||||
if (old.count <= 1)
|
||||
return 0;
|
||||
,
|
||||
return 1;
|
||||
);
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
retval = 0;
|
||||
if (lockref->count > 1) {
|
||||
lockref->count--;
|
||||
retval = 1;
|
||||
}
|
||||
spin_unlock(&lockref->lock);
|
||||
return retval;
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_put_not_zero);
|
||||
|
||||
/**
|
||||
* lockref_get_or_lock - Increments count unless the count is 0 or dead
|
||||
* @lockref: pointer to lockref structure
|
||||
* Return: 1 if count updated successfully or 0 if count was zero
|
||||
* and we got the lock instead.
|
||||
*/
|
||||
int lockref_get_or_lock(struct lockref *lockref)
|
||||
{
|
||||
CMPXCHG_LOOP(
|
||||
new.count++;
|
||||
if (old.count <= 0)
|
||||
break;
|
||||
,
|
||||
return 1;
|
||||
);
|
||||
|
||||
spin_lock(&lockref->lock);
|
||||
if (lockref->count <= 0)
|
||||
return 0;
|
||||
lockref->count++;
|
||||
spin_unlock(&lockref->lock);
|
||||
return 1;
|
||||
}
|
||||
EXPORT_SYMBOL(lockref_get_or_lock);
|
||||
|
||||
/**
|
||||
* lockref_put_return - Decrement reference count if possible
|
||||
* @lockref: pointer to lockref structure
|
||||
|
||||
@ -170,7 +170,7 @@ void dst_dev_put(struct dst_entry *dst)
|
||||
dst->ops->ifdown(dst, dev, true);
|
||||
dst->input = dst_discard;
|
||||
dst->output = dst_discard_out;
|
||||
dst->dev = blackhole_netdev;
|
||||
rcu_assign_pointer(dst->dev_rcu, blackhole_netdev);
|
||||
dev_hold(dst->dev);
|
||||
dev_put(dev);
|
||||
}
|
||||
|
||||
@ -2033,9 +2033,12 @@ EXPORT_SYMBOL_GPL(sk_free_unlock_clone);
|
||||
|
||||
void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
{
|
||||
const struct net_device *dev;
|
||||
u32 max_segs = 1;
|
||||
|
||||
sk->sk_route_caps = dst->dev->features | sk->sk_route_forced_caps;
|
||||
rcu_read_lock();
|
||||
dev = dst_dev_rcu(dst);
|
||||
sk->sk_route_caps = dev->features | sk->sk_route_forced_caps;
|
||||
if (sk->sk_route_caps & NETIF_F_GSO)
|
||||
sk->sk_route_caps |= NETIF_F_GSO_SOFTWARE;
|
||||
sk->sk_route_caps &= ~sk->sk_route_nocaps;
|
||||
@ -2045,13 +2048,14 @@ void sk_setup_caps(struct sock *sk, struct dst_entry *dst)
|
||||
} else {
|
||||
sk->sk_route_caps |= NETIF_F_SG | NETIF_F_HW_CSUM;
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso_max_size() */
|
||||
sk->sk_gso_max_size = READ_ONCE(dst->dev->gso_max_size);
|
||||
sk->sk_gso_max_size = READ_ONCE(dev->gso_max_size);
|
||||
/* pairs with the WRITE_ONCE() in netif_set_gso_max_segs() */
|
||||
max_segs = max_t(u32, READ_ONCE(dst->dev->gso_max_segs), 1);
|
||||
max_segs = max_t(u32, READ_ONCE(dev->gso_max_segs), 1);
|
||||
}
|
||||
}
|
||||
sk->sk_gso_max_segs = max_segs;
|
||||
sk_dst_set(sk, dst);
|
||||
rcu_read_unlock();
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(sk_setup_caps);
|
||||
|
||||
|
||||
@ -415,17 +415,22 @@ int ip_mc_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
|
||||
int ip_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
int ret_val;
|
||||
|
||||
IP_UPD_PO_STATS(net, IPSTATS_MIB_OUT, skb->len);
|
||||
|
||||
rcu_read_lock();
|
||||
dev = skb_dst_dev_rcu(skb);
|
||||
skb->dev = dev;
|
||||
skb->protocol = htons(ETH_P_IP);
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
ret_val = NF_HOOK_COND(NFPROTO_IPV4, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip_finish_output,
|
||||
!(IPCB(skb)->flags & IPSKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret_val;
|
||||
}
|
||||
|
||||
/*
|
||||
|
||||
@ -1035,17 +1035,20 @@ out: kfree_skb_reason(skb, reason);
|
||||
static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
||||
{
|
||||
struct dst_entry *dst = &rt->dst;
|
||||
struct net *net = dev_net(dst->dev);
|
||||
u32 old_mtu = ipv4_mtu(dst);
|
||||
struct fib_result res;
|
||||
bool lock = false;
|
||||
struct net *net;
|
||||
u32 old_mtu;
|
||||
|
||||
if (ip_mtu_locked(dst))
|
||||
return;
|
||||
|
||||
old_mtu = ipv4_mtu(dst);
|
||||
if (old_mtu < mtu)
|
||||
return;
|
||||
|
||||
rcu_read_lock();
|
||||
net = dst_dev_net_rcu(dst);
|
||||
if (mtu < ip_rt_min_pmtu) {
|
||||
lock = true;
|
||||
mtu = min(old_mtu, ip_rt_min_pmtu);
|
||||
@ -1053,9 +1056,8 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
||||
|
||||
if (rt->rt_pmtu == mtu && !lock &&
|
||||
time_before(jiffies, dst->expires - ip_rt_mtu_expires / 2))
|
||||
return;
|
||||
goto out;
|
||||
|
||||
rcu_read_lock();
|
||||
if (fib_lookup(net, fl4, &res, 0) == 0) {
|
||||
struct fib_nh *nh;
|
||||
|
||||
@ -1064,6 +1066,7 @@ static void __ip_rt_update_pmtu(struct rtable *rt, struct flowi4 *fl4, u32 mtu)
|
||||
update_or_create_fnhe(nh, fl4->daddr, 0, mtu, lock,
|
||||
jiffies + ip_rt_mtu_expires);
|
||||
}
|
||||
out:
|
||||
rcu_read_unlock();
|
||||
}
|
||||
|
||||
|
||||
@ -63,7 +63,7 @@
|
||||
static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev = dst->dev;
|
||||
struct net_device *dev = dst_dev_rcu(dst);
|
||||
const struct in6_addr *nexthop;
|
||||
struct neighbour *neigh;
|
||||
int ret;
|
||||
@ -113,9 +113,9 @@ static int ip6_finish_output2(struct net *net, struct sock *sk, struct sk_buff *
|
||||
|
||||
rcu_read_lock_bh();
|
||||
nexthop = rt6_nexthop((struct rt6_info *)dst, &ipv6_hdr(skb)->daddr);
|
||||
neigh = __ipv6_neigh_lookup_noref(dst->dev, nexthop);
|
||||
neigh = __ipv6_neigh_lookup_noref(dev, nexthop);
|
||||
if (unlikely(!neigh))
|
||||
neigh = __neigh_create(&nd_tbl, nexthop, dst->dev, false);
|
||||
neigh = __neigh_create(&nd_tbl, nexthop, dev, false);
|
||||
if (!IS_ERR(neigh)) {
|
||||
sock_confirm_neigh(skb, neigh);
|
||||
ret = neigh_output(neigh, skb);
|
||||
@ -210,22 +210,30 @@ static int ip6_finish_output(struct net *net, struct sock *sk, struct sk_buff *s
|
||||
|
||||
int ip6_output(struct net *net, struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct net_device *dev = skb_dst(skb)->dev, *indev = skb->dev;
|
||||
struct inet6_dev *idev = ip6_dst_idev(skb_dst(skb));
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net_device *dev, *indev = skb->dev;
|
||||
struct inet6_dev *idev;
|
||||
int ret;
|
||||
|
||||
skb->protocol = htons(ETH_P_IPV6);
|
||||
rcu_read_lock();
|
||||
dev = dst_dev_rcu(dst);
|
||||
idev = ip6_dst_idev(dst);
|
||||
skb->dev = dev;
|
||||
|
||||
if (unlikely(!idev || idev->cnf.disable_ipv6)) {
|
||||
IP6_INC_STATS(net, idev, IPSTATS_MIB_OUTDISCARDS);
|
||||
rcu_read_unlock();
|
||||
kfree_skb_reason(skb, SKB_DROP_REASON_IPV6DISABLED);
|
||||
return 0;
|
||||
}
|
||||
|
||||
return NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip6_finish_output,
|
||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
ret = NF_HOOK_COND(NFPROTO_IPV6, NF_INET_POST_ROUTING,
|
||||
net, sk, skb, indev, dev,
|
||||
ip6_finish_output,
|
||||
!(IP6CB(skb)->flags & IP6SKB_REROUTED));
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
|
||||
@ -245,28 +253,33 @@ bool ip6_autoflowlabel(struct net *net, const struct ipv6_pinfo *np)
|
||||
int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
__u32 mark, struct ipv6_txoptions *opt, int tclass)
|
||||
{
|
||||
struct net *net = sock_net(sk);
|
||||
const struct ipv6_pinfo *np = inet6_sk(sk);
|
||||
struct in6_addr *first_hop = &fl6->daddr;
|
||||
struct dst_entry *dst = skb_dst(skb);
|
||||
struct net *net = sock_net(sk);
|
||||
unsigned int head_room;
|
||||
struct net_device *dev;
|
||||
struct ipv6hdr *hdr;
|
||||
u8 proto = fl6->flowi6_proto;
|
||||
int seg_len = skb->len;
|
||||
int hlimit = -1;
|
||||
int ret, hlimit = -1;
|
||||
u32 mtu;
|
||||
|
||||
head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dst->dev);
|
||||
rcu_read_lock();
|
||||
|
||||
dev = dst_dev_rcu(dst);
|
||||
head_room = sizeof(struct ipv6hdr) + LL_RESERVED_SPACE(dev);
|
||||
if (opt)
|
||||
head_room += opt->opt_nflen + opt->opt_flen;
|
||||
|
||||
if (unlikely(skb_headroom(skb) < head_room)) {
|
||||
struct sk_buff *skb2 = skb_realloc_headroom(skb, head_room);
|
||||
if (!skb2) {
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)),
|
||||
IP6_INC_STATS(net, ip6_dst_idev(dst),
|
||||
IPSTATS_MIB_OUTDISCARDS);
|
||||
kfree_skb(skb);
|
||||
return -ENOBUFS;
|
||||
ret = -ENOBUFS;
|
||||
goto unlock;
|
||||
}
|
||||
if (skb->sk)
|
||||
skb_set_owner_w(skb2, skb->sk);
|
||||
@ -320,18 +333,22 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
* skb to its handler for processing
|
||||
*/
|
||||
skb = l3mdev_ip6_out((struct sock *)sk, skb);
|
||||
if (unlikely(!skb))
|
||||
return 0;
|
||||
if (unlikely(!skb)) {
|
||||
ret = 0;
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
/* hooks should never assume socket lock is held.
|
||||
* we promote our socket to non const
|
||||
*/
|
||||
return NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
net, (struct sock *)sk, skb, NULL, dst->dev,
|
||||
dst_output);
|
||||
ret = NF_HOOK(NFPROTO_IPV6, NF_INET_LOCAL_OUT,
|
||||
net, (struct sock *)sk, skb, NULL, dev,
|
||||
dst_output);
|
||||
goto unlock;
|
||||
}
|
||||
|
||||
skb->dev = dst->dev;
|
||||
ret = -EMSGSIZE;
|
||||
skb->dev = dev;
|
||||
/* ipv6_local_error() does not require socket lock,
|
||||
* we promote our socket to non const
|
||||
*/
|
||||
@ -339,7 +356,9 @@ int ip6_xmit(const struct sock *sk, struct sk_buff *skb, struct flowi6 *fl6,
|
||||
|
||||
IP6_INC_STATS(net, ip6_dst_idev(skb_dst(skb)), IPSTATS_MIB_FRAGFAILS);
|
||||
kfree_skb(skb);
|
||||
return -EMSGSIZE;
|
||||
unlock:
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL(ip6_xmit);
|
||||
|
||||
|
||||
@ -119,6 +119,8 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
|
||||
u16 proto,
|
||||
struct vmci_handle handle)
|
||||
{
|
||||
memset(pkt, 0, sizeof(*pkt));
|
||||
|
||||
/* We register the stream control handler as an any cid handle so we
|
||||
* must always send from a source address of VMADDR_CID_ANY
|
||||
*/
|
||||
@ -131,8 +133,6 @@ vmci_transport_packet_init(struct vmci_transport_packet *pkt,
|
||||
pkt->type = type;
|
||||
pkt->src_port = src->svm_port;
|
||||
pkt->dst_port = dst->svm_port;
|
||||
memset(&pkt->proto, 0, sizeof(pkt->proto));
|
||||
memset(&pkt->_reserved2, 0, sizeof(pkt->_reserved2));
|
||||
|
||||
switch (pkt->type) {
|
||||
case VMCI_TRANSPORT_PACKET_TYPE_INVALID:
|
||||
|
||||
@ -1382,6 +1382,11 @@ int snd_usb_endpoint_set_params(struct snd_usb_audio *chip,
|
||||
ep->sample_rem = ep->cur_rate % ep->pps;
|
||||
ep->packsize[0] = ep->cur_rate / ep->pps;
|
||||
ep->packsize[1] = (ep->cur_rate + (ep->pps - 1)) / ep->pps;
|
||||
if (ep->packsize[1] > ep->maxpacksize) {
|
||||
usb_audio_dbg(chip, "Too small maxpacksize %u for rate %u / pps %u\n",
|
||||
ep->maxpacksize, ep->cur_rate, ep->pps);
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
/* calculate the frequency in 16.16 format */
|
||||
ep->freqm = ep->freqn;
|
||||
|
||||
Loading…
Reference in New Issue
Block a user