Import of kernel-6.12.0-55.33.1.el10_0
This commit is contained in:
parent
d59d7bced9
commit
40d161dbf5
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.32.1
|
||||
RHEL_RELEASE = 55.33.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
@ -1407,18 +1407,28 @@ static void ublk_abort_queue(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
}
|
||||
}
|
||||
|
||||
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
/* Must be called when queue is frozen */
|
||||
static bool ublk_mark_queue_canceling(struct ublk_queue *ubq)
|
||||
{
|
||||
struct gendisk *disk;
|
||||
bool canceled;
|
||||
|
||||
spin_lock(&ubq->cancel_lock);
|
||||
if (ubq->canceling) {
|
||||
spin_unlock(&ubq->cancel_lock);
|
||||
return false;
|
||||
}
|
||||
ubq->canceling = true;
|
||||
canceled = ubq->canceling;
|
||||
if (!canceled)
|
||||
ubq->canceling = true;
|
||||
spin_unlock(&ubq->cancel_lock);
|
||||
|
||||
return canceled;
|
||||
}
|
||||
|
||||
static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
{
|
||||
bool was_canceled = ubq->canceling;
|
||||
struct gendisk *disk;
|
||||
|
||||
if (was_canceled)
|
||||
return false;
|
||||
|
||||
spin_lock(&ub->lock);
|
||||
disk = ub->ub_disk;
|
||||
if (disk)
|
||||
@ -1429,14 +1439,23 @@ static bool ublk_abort_requests(struct ublk_device *ub, struct ublk_queue *ubq)
|
||||
if (!disk)
|
||||
return false;
|
||||
|
||||
/* Now we are serialized with ublk_queue_rq() */
|
||||
/*
|
||||
* Now we are serialized with ublk_queue_rq()
|
||||
*
|
||||
* Make sure that ubq->canceling is set when queue is frozen,
|
||||
* because ublk_queue_rq() has to rely on this flag for avoiding to
|
||||
* touch completed uring_cmd
|
||||
*/
|
||||
blk_mq_quiesce_queue(disk->queue);
|
||||
/* abort queue is for making forward progress */
|
||||
ublk_abort_queue(ub, ubq);
|
||||
was_canceled = ublk_mark_queue_canceling(ubq);
|
||||
if (!was_canceled) {
|
||||
/* abort queue is for making forward progress */
|
||||
ublk_abort_queue(ub, ubq);
|
||||
}
|
||||
blk_mq_unquiesce_queue(disk->queue);
|
||||
put_device(disk_to_dev(disk));
|
||||
|
||||
return true;
|
||||
return !was_canceled;
|
||||
}
|
||||
|
||||
static void ublk_cancel_cmd(struct ublk_queue *ubq, struct ublk_io *io,
|
||||
|
@ -758,7 +758,7 @@ tx_free:
|
||||
dev_kfree_skb_any(skb);
|
||||
tx_kick_pending:
|
||||
if (BNXT_TX_PTP_IS_SET(lflags)) {
|
||||
txr->tx_buf_ring[txr->tx_prod].is_ts_pkt = 0;
|
||||
txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].is_ts_pkt = 0;
|
||||
atomic64_inc(&bp->ptp_cfg->stats.ts_err);
|
||||
if (!(bp->fw_cap & BNXT_FW_CAP_TX_TS_CMP))
|
||||
/* set SKB to err so PTP worker will clean up */
|
||||
@ -766,7 +766,7 @@ tx_kick_pending:
|
||||
}
|
||||
if (txr->kick_pending)
|
||||
bnxt_txr_db_kick(bp, txr, txr->tx_prod);
|
||||
txr->tx_buf_ring[txr->tx_prod].skb = NULL;
|
||||
txr->tx_buf_ring[RING_TX(bp, txr->tx_prod)].skb = NULL;
|
||||
dev_core_stats_tx_dropped_inc(dev);
|
||||
return NETDEV_TX_OK;
|
||||
}
|
||||
|
@ -5003,7 +5003,7 @@ int i40e_get_vf_stats(struct net_device *netdev, int vf_id,
|
||||
vf_stats->broadcast = stats->rx_broadcast;
|
||||
vf_stats->multicast = stats->rx_multicast;
|
||||
vf_stats->rx_dropped = stats->rx_discards + stats->rx_discards_other;
|
||||
vf_stats->tx_dropped = stats->tx_discards;
|
||||
vf_stats->tx_dropped = stats->tx_errors;
|
||||
|
||||
return 0;
|
||||
}
|
||||
|
@ -96,7 +96,7 @@ static void idpf_ctlq_init_rxq_bufs(struct idpf_ctlq_info *cq)
|
||||
*/
|
||||
static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
{
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* free ring buffers and the ring itself */
|
||||
idpf_ctlq_dealloc_ring_res(hw, cq);
|
||||
@ -104,8 +104,7 @@ static void idpf_ctlq_shutdown(struct idpf_hw *hw, struct idpf_ctlq_info *cq)
|
||||
/* Set ring_size to 0 to indicate uninitialized queue */
|
||||
cq->ring_size = 0;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
mutex_destroy(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
}
|
||||
|
||||
/**
|
||||
@ -173,7 +172,7 @@ int idpf_ctlq_add(struct idpf_hw *hw,
|
||||
|
||||
idpf_ctlq_init_regs(hw, cq, is_rxq);
|
||||
|
||||
mutex_init(&cq->cq_lock);
|
||||
spin_lock_init(&cq->cq_lock);
|
||||
|
||||
list_add(&cq->cq_list, &hw->cq_list_head);
|
||||
|
||||
@ -272,7 +271,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
int err = 0;
|
||||
int i;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
/* Ensure there are enough descriptors to send all messages */
|
||||
num_desc_avail = IDPF_CTLQ_DESC_UNUSED(cq);
|
||||
@ -332,7 +331,7 @@ int idpf_ctlq_send(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
wr32(hw, cq->reg.tail, cq->next_to_use);
|
||||
|
||||
err_unlock:
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
return err;
|
||||
}
|
||||
@ -364,7 +363,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
if (*clean_count > cq->ring_size)
|
||||
return -EBADR;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -394,7 +393,7 @@ int idpf_ctlq_clean_sq(struct idpf_ctlq_info *cq, u16 *clean_count,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* Return number of descriptors actually cleaned */
|
||||
*clean_count = i;
|
||||
@ -432,7 +431,7 @@ int idpf_ctlq_post_rx_buffs(struct idpf_hw *hw, struct idpf_ctlq_info *cq,
|
||||
if (*buff_count > 0)
|
||||
buffs_avail = true;
|
||||
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
if (tbp >= cq->ring_size)
|
||||
tbp = 0;
|
||||
@ -521,7 +520,7 @@ post_buffs_out:
|
||||
wr32(hw, cq->reg.tail, cq->next_to_post);
|
||||
}
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
/* return the number of buffers that were not posted */
|
||||
*buff_count = *buff_count - i;
|
||||
@ -549,7 +548,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
u16 i;
|
||||
|
||||
/* take the lock before we start messing with the ring */
|
||||
mutex_lock(&cq->cq_lock);
|
||||
spin_lock(&cq->cq_lock);
|
||||
|
||||
ntc = cq->next_to_clean;
|
||||
|
||||
@ -608,7 +607,7 @@ int idpf_ctlq_recv(struct idpf_ctlq_info *cq, u16 *num_q_msg,
|
||||
|
||||
cq->next_to_clean = ntc;
|
||||
|
||||
mutex_unlock(&cq->cq_lock);
|
||||
spin_unlock(&cq->cq_lock);
|
||||
|
||||
*num_q_msg = i;
|
||||
if (*num_q_msg == 0)
|
||||
|
@ -99,7 +99,7 @@ struct idpf_ctlq_info {
|
||||
|
||||
enum idpf_ctlq_type cq_type;
|
||||
int q_id;
|
||||
struct mutex cq_lock; /* control queue lock */
|
||||
spinlock_t cq_lock; /* control queue lock */
|
||||
/* used for interrupt processing */
|
||||
u16 next_to_use;
|
||||
u16 next_to_clean;
|
||||
|
@ -2321,8 +2321,12 @@ void *idpf_alloc_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem, u64 size)
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
size_t sz = ALIGN(size, 4096);
|
||||
|
||||
mem->va = dma_alloc_coherent(&adapter->pdev->dev, sz,
|
||||
&mem->pa, GFP_KERNEL);
|
||||
/* The control queue resources are freed under a spinlock, contiguous
|
||||
* pages will avoid IOMMU remapping and the use vmap (and vunmap in
|
||||
* dma_free_*() path.
|
||||
*/
|
||||
mem->va = dma_alloc_attrs(&adapter->pdev->dev, sz, &mem->pa,
|
||||
GFP_KERNEL, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = sz;
|
||||
|
||||
return mem->va;
|
||||
@ -2337,8 +2341,8 @@ void idpf_free_dma_mem(struct idpf_hw *hw, struct idpf_dma_mem *mem)
|
||||
{
|
||||
struct idpf_adapter *adapter = hw->back;
|
||||
|
||||
dma_free_coherent(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa);
|
||||
dma_free_attrs(&adapter->pdev->dev, mem->size,
|
||||
mem->va, mem->pa, DMA_ATTR_FORCE_CONTIGUOUS);
|
||||
mem->size = 0;
|
||||
mem->va = NULL;
|
||||
mem->pa = 0;
|
||||
|
@ -6004,9 +6004,9 @@ lpfc_sli4_get_ctl_attr(struct lpfc_hba *phba)
|
||||
phba->sli4_hba.flash_id = bf_get(lpfc_cntl_attr_flash_id, cntl_attr);
|
||||
phba->sli4_hba.asic_rev = bf_get(lpfc_cntl_attr_asic_rev, cntl_attr);
|
||||
|
||||
memset(phba->BIOSVersion, 0, sizeof(phba->BIOSVersion));
|
||||
strlcat(phba->BIOSVersion, (char *)cntl_attr->bios_ver_str,
|
||||
memcpy(phba->BIOSVersion, cntl_attr->bios_ver_str,
|
||||
sizeof(phba->BIOSVersion));
|
||||
phba->BIOSVersion[sizeof(phba->BIOSVersion) - 1] = '\0';
|
||||
|
||||
lpfc_printf_log(phba, KERN_INFO, LOG_SLI,
|
||||
"3086 lnk_type:%d, lnk_numb:%d, bios_ver:%s, "
|
||||
|
@ -2673,6 +2673,19 @@ static int graft_tree(struct mount *mnt, struct mount *p, struct mountpoint *mp)
|
||||
return attach_recursive_mnt(mnt, p, mp, 0);
|
||||
}
|
||||
|
||||
static int may_change_propagation(const struct mount *m)
|
||||
{
|
||||
struct mnt_namespace *ns = m->mnt_ns;
|
||||
|
||||
// it must be mounted in some namespace
|
||||
if (IS_ERR_OR_NULL(ns)) // is_mounted()
|
||||
return -EINVAL;
|
||||
// and the caller must be admin in userns of that namespace
|
||||
if (!ns_capable(ns->user_ns, CAP_SYS_ADMIN))
|
||||
return -EPERM;
|
||||
return 0;
|
||||
}
|
||||
|
||||
/*
|
||||
* Sanity check the flags to change_mnt_propagation.
|
||||
*/
|
||||
@ -2709,6 +2722,10 @@ static int do_change_type(struct path *path, int ms_flags)
|
||||
return -EINVAL;
|
||||
|
||||
namespace_lock();
|
||||
err = may_change_propagation(mnt);
|
||||
if (err)
|
||||
goto out_unlock;
|
||||
|
||||
if (type == MS_SHARED) {
|
||||
err = invent_group_ids(mnt, recurse);
|
||||
if (err)
|
||||
@ -3102,18 +3119,11 @@ static int do_set_group(struct path *from_path, struct path *to_path)
|
||||
|
||||
namespace_lock();
|
||||
|
||||
err = -EINVAL;
|
||||
/* To and From must be mounted */
|
||||
if (!is_mounted(&from->mnt))
|
||||
err = may_change_propagation(from);
|
||||
if (err)
|
||||
goto out;
|
||||
if (!is_mounted(&to->mnt))
|
||||
goto out;
|
||||
|
||||
err = -EPERM;
|
||||
/* We should be allowed to modify mount namespaces of both mounts */
|
||||
if (!ns_capable(from->mnt_ns->user_ns, CAP_SYS_ADMIN))
|
||||
goto out;
|
||||
if (!ns_capable(to->mnt_ns->user_ns, CAP_SYS_ADMIN))
|
||||
err = may_change_propagation(to);
|
||||
if (err)
|
||||
goto out;
|
||||
|
||||
err = -EINVAL;
|
||||
|
@ -4035,7 +4035,17 @@ static __always_inline int ____dev_forward_skb(struct net_device *dev,
|
||||
return 0;
|
||||
}
|
||||
|
||||
bool dev_nit_active(struct net_device *dev);
|
||||
bool dev_nit_active_rcu(const struct net_device *dev);
|
||||
static inline bool dev_nit_active(const struct net_device *dev)
|
||||
{
|
||||
bool ret;
|
||||
|
||||
rcu_read_lock();
|
||||
ret = dev_nit_active_rcu(dev);
|
||||
rcu_read_unlock();
|
||||
return ret;
|
||||
}
|
||||
|
||||
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev);
|
||||
|
||||
static inline void __dev_put(struct net_device *dev)
|
||||
|
@ -23,7 +23,6 @@ struct net_hotdata {
|
||||
struct net_offload udpv6_offload;
|
||||
#endif
|
||||
struct list_head offload_base;
|
||||
struct list_head ptype_all;
|
||||
struct kmem_cache *skbuff_cache;
|
||||
struct kmem_cache *skbuff_fclone_cache;
|
||||
struct kmem_cache *skb_small_head_cache;
|
||||
|
@ -83,6 +83,9 @@ struct net {
|
||||
struct llist_node defer_free_list;
|
||||
struct llist_node cleanup_list; /* namespaces on death row */
|
||||
|
||||
RH_KABI_EXTEND(struct list_head ptype_all)
|
||||
RH_KABI_EXTEND(struct list_head ptype_specific)
|
||||
|
||||
#ifdef CONFIG_KEYS
|
||||
struct key_tag *key_domain; /* Key domain of operation tag */
|
||||
#endif
|
||||
|
@ -570,10 +570,18 @@ static inline void netdev_set_addr_lockdep_class(struct net_device *dev)
|
||||
|
||||
static inline struct list_head *ptype_head(const struct packet_type *pt)
|
||||
{
|
||||
if (pt->type == htons(ETH_P_ALL))
|
||||
return pt->dev ? &pt->dev->ptype_all : &net_hotdata.ptype_all;
|
||||
else
|
||||
return pt->dev ? &pt->dev->ptype_specific :
|
||||
if (pt->type == htons(ETH_P_ALL)) {
|
||||
if (!pt->af_packet_net && !pt->dev)
|
||||
return NULL;
|
||||
|
||||
return pt->dev ? &pt->dev->ptype_all :
|
||||
&pt->af_packet_net->ptype_all;
|
||||
}
|
||||
|
||||
if (pt->dev)
|
||||
return &pt->dev->ptype_specific;
|
||||
|
||||
return pt->af_packet_net ? &pt->af_packet_net->ptype_specific :
|
||||
&ptype_base[ntohs(pt->type) & PTYPE_HASH_MASK];
|
||||
}
|
||||
|
||||
@ -594,6 +602,9 @@ void dev_add_pack(struct packet_type *pt)
|
||||
{
|
||||
struct list_head *head = ptype_head(pt);
|
||||
|
||||
if (WARN_ON_ONCE(!head))
|
||||
return;
|
||||
|
||||
spin_lock(&ptype_lock);
|
||||
list_add_rcu(&pt->list, head);
|
||||
spin_unlock(&ptype_lock);
|
||||
@ -618,6 +629,9 @@ void __dev_remove_pack(struct packet_type *pt)
|
||||
struct list_head *head = ptype_head(pt);
|
||||
struct packet_type *pt1;
|
||||
|
||||
if (!head)
|
||||
return;
|
||||
|
||||
spin_lock(&ptype_lock);
|
||||
|
||||
list_for_each_entry(pt1, head, list) {
|
||||
@ -2271,16 +2285,21 @@ static inline bool skb_loop_sk(struct packet_type *ptype, struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/**
|
||||
* dev_nit_active - return true if any network interface taps are in use
|
||||
* dev_nit_active_rcu - return true if any network interface taps are in use
|
||||
*
|
||||
* The caller must hold the RCU lock
|
||||
*
|
||||
* @dev: network device to check for the presence of taps
|
||||
*/
|
||||
bool dev_nit_active(struct net_device *dev)
|
||||
bool dev_nit_active_rcu(const struct net_device *dev)
|
||||
{
|
||||
return !list_empty(&net_hotdata.ptype_all) ||
|
||||
/* Callers may hold either RCU or RCU BH lock */
|
||||
WARN_ON_ONCE(!rcu_read_lock_held() && !rcu_read_lock_bh_held());
|
||||
|
||||
return !list_empty(&dev_net(dev)->ptype_all) ||
|
||||
!list_empty(&dev->ptype_all);
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(dev_nit_active);
|
||||
EXPORT_SYMBOL_GPL(dev_nit_active_rcu);
|
||||
|
||||
/*
|
||||
* Support routine. Sends outgoing frames to any network
|
||||
@ -2289,11 +2308,12 @@ EXPORT_SYMBOL_GPL(dev_nit_active);
|
||||
|
||||
void dev_queue_xmit_nit(struct sk_buff *skb, struct net_device *dev)
|
||||
{
|
||||
struct list_head *ptype_list = &net_hotdata.ptype_all;
|
||||
struct packet_type *ptype, *pt_prev = NULL;
|
||||
struct list_head *ptype_list;
|
||||
struct sk_buff *skb2 = NULL;
|
||||
|
||||
rcu_read_lock();
|
||||
ptype_list = &dev_net_rcu(dev)->ptype_all;
|
||||
again:
|
||||
list_for_each_entry_rcu(ptype, ptype_list, list) {
|
||||
if (READ_ONCE(ptype->ignore_outgoing))
|
||||
@ -2337,7 +2357,7 @@ again:
|
||||
pt_prev = ptype;
|
||||
}
|
||||
|
||||
if (ptype_list == &net_hotdata.ptype_all) {
|
||||
if (ptype_list != &dev->ptype_all) {
|
||||
ptype_list = &dev->ptype_all;
|
||||
goto again;
|
||||
}
|
||||
@ -3540,6 +3560,18 @@ static netdev_features_t gso_features_check(const struct sk_buff *skb,
|
||||
features &= ~NETIF_F_TSO_MANGLEID;
|
||||
}
|
||||
|
||||
/* NETIF_F_IPV6_CSUM does not support IPv6 extension headers,
|
||||
* so neither does TSO that depends on it.
|
||||
*/
|
||||
if (features & NETIF_F_IPV6_CSUM &&
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_TCPV6 ||
|
||||
(skb_shinfo(skb)->gso_type & SKB_GSO_UDP_L4 &&
|
||||
vlan_get_protocol(skb) == htons(ETH_P_IPV6))) &&
|
||||
skb_transport_header_was_set(skb) &&
|
||||
skb_network_header_len(skb) != sizeof(struct ipv6hdr) &&
|
||||
!ipv6_has_hopopt_jumbo(skb))
|
||||
features &= ~(NETIF_F_IPV6_CSUM | NETIF_F_TSO6 | NETIF_F_GSO_UDP_L4);
|
||||
|
||||
return features;
|
||||
}
|
||||
|
||||
@ -3580,7 +3612,7 @@ static int xmit_one(struct sk_buff *skb, struct net_device *dev,
|
||||
unsigned int len;
|
||||
int rc;
|
||||
|
||||
if (dev_nit_active(dev))
|
||||
if (dev_nit_active_rcu(dev))
|
||||
dev_queue_xmit_nit(skb, dev);
|
||||
|
||||
len = skb->len;
|
||||
@ -5514,7 +5546,8 @@ another_round:
|
||||
if (pfmemalloc)
|
||||
goto skip_taps;
|
||||
|
||||
list_for_each_entry_rcu(ptype, &net_hotdata.ptype_all, list) {
|
||||
list_for_each_entry_rcu(ptype, &dev_net_rcu(skb->dev)->ptype_all,
|
||||
list) {
|
||||
if (pt_prev)
|
||||
ret = deliver_skb(skb, pt_prev, orig_dev);
|
||||
pt_prev = ptype;
|
||||
@ -5626,6 +5659,14 @@ check_vlan_id:
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
&ptype_base[ntohs(type) &
|
||||
PTYPE_HASH_MASK]);
|
||||
|
||||
/* orig_dev and skb->dev could belong to different netns;
|
||||
* Even in such case we need to traverse only the list
|
||||
* coming from skb->dev, as the ptype owner (packet socket)
|
||||
* will use dev_net(skb->dev) to do namespace filtering.
|
||||
*/
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
&dev_net_rcu(skb->dev)->ptype_specific);
|
||||
}
|
||||
|
||||
deliver_ptype_list_skb(skb, &pt_prev, orig_dev, type,
|
||||
|
@ -7,7 +7,6 @@
|
||||
|
||||
struct net_hotdata net_hotdata __cacheline_aligned = {
|
||||
.offload_base = LIST_HEAD_INIT(net_hotdata.offload_base),
|
||||
.ptype_all = LIST_HEAD_INIT(net_hotdata.ptype_all),
|
||||
.gro_normal_batch = 8,
|
||||
|
||||
.netdev_budget = 300,
|
||||
|
@ -185,7 +185,13 @@ static void *ptype_get_idx(struct seq_file *seq, loff_t pos)
|
||||
}
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(pt, &net_hotdata.ptype_all, list) {
|
||||
list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_all, list) {
|
||||
if (i == pos)
|
||||
return pt;
|
||||
++i;
|
||||
}
|
||||
|
||||
list_for_each_entry_rcu(pt, &seq_file_net(seq)->ptype_specific, list) {
|
||||
if (i == pos)
|
||||
return pt;
|
||||
++i;
|
||||
@ -210,6 +216,7 @@ static void *ptype_seq_start(struct seq_file *seq, loff_t *pos)
|
||||
|
||||
static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
{
|
||||
struct net *net = seq_file_net(seq);
|
||||
struct net_device *dev;
|
||||
struct packet_type *pt;
|
||||
struct list_head *nxt;
|
||||
@ -232,15 +239,22 @@ static void *ptype_seq_next(struct seq_file *seq, void *v, loff_t *pos)
|
||||
goto found;
|
||||
}
|
||||
}
|
||||
|
||||
nxt = net_hotdata.ptype_all.next;
|
||||
goto ptype_all;
|
||||
nxt = net->ptype_all.next;
|
||||
goto net_ptype_all;
|
||||
}
|
||||
|
||||
if (pt->type == htons(ETH_P_ALL)) {
|
||||
ptype_all:
|
||||
if (nxt != &net_hotdata.ptype_all)
|
||||
if (pt->af_packet_net) {
|
||||
net_ptype_all:
|
||||
if (nxt != &net->ptype_all && nxt != &net->ptype_specific)
|
||||
goto found;
|
||||
|
||||
if (nxt == &net->ptype_all) {
|
||||
/* continue with ->ptype_specific if it's not empty */
|
||||
nxt = net->ptype_specific.next;
|
||||
if (nxt != &net->ptype_specific)
|
||||
goto found;
|
||||
}
|
||||
|
||||
hash = 0;
|
||||
nxt = ptype_base[0].next;
|
||||
} else
|
||||
|
@ -334,6 +334,9 @@ static __net_init void preinit_net(struct net *net, struct user_namespace *user_
|
||||
idr_init(&net->netns_ids);
|
||||
spin_lock_init(&net->nsid_lock);
|
||||
mutex_init(&net->ipv4.ra_mutex);
|
||||
|
||||
INIT_LIST_HEAD(&net->ptype_all);
|
||||
INIT_LIST_HEAD(&net->ptype_specific);
|
||||
preinit_net_sysctl(net);
|
||||
}
|
||||
|
||||
|
@ -1145,7 +1145,7 @@ restart:
|
||||
goto do_error;
|
||||
|
||||
while (msg_data_left(msg)) {
|
||||
ssize_t copy = 0;
|
||||
int copy = 0;
|
||||
|
||||
skb = tcp_write_queue_tail(sk);
|
||||
if (skb)
|
||||
|
@ -804,8 +804,8 @@ static void mld_del_delrec(struct inet6_dev *idev, struct ifmcaddr6 *im)
|
||||
} else {
|
||||
im->mca_crcount = idev->mc_qrv;
|
||||
}
|
||||
in6_dev_put(pmc->idev);
|
||||
ip6_mc_clear_src(pmc);
|
||||
in6_dev_put(pmc->idev);
|
||||
kfree_rcu(pmc, rcu);
|
||||
}
|
||||
}
|
||||
|
@ -874,7 +874,7 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (p.collect_md) {
|
||||
if (p.collect_md || xi->p.collect_md) {
|
||||
NL_SET_ERR_MSG(extack, "collect_md can't be changed");
|
||||
return -EINVAL;
|
||||
}
|
||||
@ -885,11 +885,6 @@ static int xfrmi_changelink(struct net_device *dev, struct nlattr *tb[],
|
||||
} else {
|
||||
if (xi->dev != dev)
|
||||
return -EEXIST;
|
||||
if (xi->p.collect_md) {
|
||||
NL_SET_ERR_MSG(extack,
|
||||
"device can't be changed to collect_md");
|
||||
return -EINVAL;
|
||||
}
|
||||
}
|
||||
|
||||
return xfrmi_update(xi, &p);
|
||||
|
@ -3261,7 +3261,7 @@ export CFLAGS="%{build_cflags}"
|
||||
# 'make install' for bpf is broken and upstream refuses to fix it.
|
||||
# Install the needed files manually.
|
||||
%{log_msg "install selftests"}
|
||||
for dir in bpf bpf/no_alu32 bpf/progs; do
|
||||
for dir in bpf bpf/no_alu32 bpf/cpuv4 bpf/progs; do
|
||||
# In ARK, the rpm build continues even if some of the selftests
|
||||
# cannot be built. It's not always possible to build selftests,
|
||||
# as upstream sometimes dependens on too new llvm version or has
|
||||
@ -3277,14 +3277,17 @@ done
|
||||
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/test_progs"
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/test_progs-no_alu32"
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/test_progs-cpuv4"
|
||||
|
||||
# The urandom_read binary doesn't pass the check-rpaths check and upstream
|
||||
# refuses to fix it. So, we save it to buildroot_unstripped and delete it so it
|
||||
# will be hidden from check-rpaths and will automatically get restored later.
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/urandom_read"
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/no_alu32/urandom_read"
|
||||
%buildroot_save_unstripped "usr/libexec/kselftests/bpf/cpuv4/urandom_read"
|
||||
rm -f %{buildroot}/usr/libexec/kselftests/bpf/urandom_read
|
||||
rm -f %{buildroot}/usr/libexec/kselftests/bpf/no_alu32/urandom_read
|
||||
rm -f %{buildroot}/usr/libexec/kselftests/bpf/cpuv4/urandom_read
|
||||
|
||||
popd
|
||||
%{log_msg "end build selftests"}
|
||||
|
Loading…
Reference in New Issue
Block a user