Import of kernel-6.12.0-55.30.1.el10_0
This commit is contained in:
parent
eee6502c4c
commit
e9ab258a78
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
|||||||
#
|
#
|
||||||
# Use this spot to avoid future merge conflicts.
|
# Use this spot to avoid future merge conflicts.
|
||||||
# Do not trim this comment.
|
# Do not trim this comment.
|
||||||
RHEL_RELEASE = 55.29.1
|
RHEL_RELEASE = 55.30.1
|
||||||
|
|
||||||
#
|
#
|
||||||
# RHEL_REBASE_NUM
|
# RHEL_REBASE_NUM
|
||||||
|
@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
|
|||||||
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
|
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
|
||||||
{
|
{
|
||||||
return !s390_pci_no_rid && zdev->rid_available &&
|
return !s390_pci_no_rid && zdev->rid_available &&
|
||||||
zpci_is_device_configured(zdev) &&
|
|
||||||
!zdev->vfn;
|
!zdev->vfn;
|
||||||
}
|
}
|
||||||
|
|
||||||
@ -332,6 +331,20 @@ error:
|
|||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||||
|
{
|
||||||
|
struct pci_dev *pdev;
|
||||||
|
|
||||||
|
if (!zdev->vfn)
|
||||||
|
return false;
|
||||||
|
|
||||||
|
pdev = zpci_iov_find_parent_pf(zbus, zdev);
|
||||||
|
if (!pdev)
|
||||||
|
return true;
|
||||||
|
pci_dev_put(pdev);
|
||||||
|
return false;
|
||||||
|
}
|
||||||
|
|
||||||
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
||||||
{
|
{
|
||||||
bool topo_is_tid = zdev->tid_avail;
|
bool topo_is_tid = zdev->tid_avail;
|
||||||
@ -346,6 +359,15 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
|||||||
|
|
||||||
topo = topo_is_tid ? zdev->tid : zdev->pchid;
|
topo = topo_is_tid ? zdev->tid : zdev->pchid;
|
||||||
zbus = zpci_bus_get(topo, topo_is_tid);
|
zbus = zpci_bus_get(topo, topo_is_tid);
|
||||||
|
/*
|
||||||
|
* An isolated VF gets its own domain/bus even if there exists
|
||||||
|
* a matching domain/bus already
|
||||||
|
*/
|
||||||
|
if (zbus && zpci_bus_is_isolated_vf(zbus, zdev)) {
|
||||||
|
zpci_bus_put(zbus);
|
||||||
|
zbus = NULL;
|
||||||
|
}
|
||||||
|
|
||||||
if (!zbus) {
|
if (!zbus) {
|
||||||
zbus = zpci_bus_alloc(topo, topo_is_tid);
|
zbus = zpci_bus_alloc(topo, topo_is_tid);
|
||||||
if (!zbus)
|
if (!zbus)
|
||||||
|
@ -60,18 +60,35 @@ static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, in
|
|||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
|
/**
|
||||||
|
* zpci_iov_find_parent_pf - Find the parent PF, if any, of the given function
|
||||||
|
* @zbus: The bus that the PCI function is on, or would be added on
|
||||||
|
* @zdev: The PCI function
|
||||||
|
*
|
||||||
|
* Finds the parent PF, if it exists and is configured, of the given PCI function
|
||||||
|
* and increments its refcount. Th PF is searched for on the provided bus so the
|
||||||
|
* caller has to ensure that this is the correct bus to search. This function may
|
||||||
|
* be used before adding the PCI function to a zbus.
|
||||||
|
*
|
||||||
|
* Return: Pointer to the struct pci_dev of the parent PF or NULL if it not
|
||||||
|
* found. If the function is not a VF or has no RequesterID information,
|
||||||
|
* NULL is returned as well.
|
||||||
|
*/
|
||||||
|
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||||
{
|
{
|
||||||
int i, cand_devfn;
|
int i, vfid, devfn, cand_devfn;
|
||||||
struct zpci_dev *zdev;
|
|
||||||
struct pci_dev *pdev;
|
struct pci_dev *pdev;
|
||||||
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
|
|
||||||
int rc = 0;
|
|
||||||
|
|
||||||
if (!zbus->multifunction)
|
if (!zbus->multifunction)
|
||||||
return 0;
|
return NULL;
|
||||||
|
/* Non-VFs and VFs without RID available don't have a parent */
|
||||||
/* If the parent PF for the given VF is also configured in the
|
if (!zdev->vfn || !zdev->rid_available)
|
||||||
|
return NULL;
|
||||||
|
/* Linux vfid starts at 0 vfn at 1 */
|
||||||
|
vfid = zdev->vfn - 1;
|
||||||
|
devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
|
||||||
|
/*
|
||||||
|
* If the parent PF for the given VF is also configured in the
|
||||||
* instance, it must be on the same zbus.
|
* instance, it must be on the same zbus.
|
||||||
* We can then identify the parent PF by checking what
|
* We can then identify the parent PF by checking what
|
||||||
* devfn the VF would have if it belonged to that PF using the PF's
|
* devfn the VF would have if it belonged to that PF using the PF's
|
||||||
@ -85,15 +102,26 @@ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn
|
|||||||
if (!pdev)
|
if (!pdev)
|
||||||
continue;
|
continue;
|
||||||
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
|
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
|
||||||
if (cand_devfn == virtfn->devfn) {
|
if (cand_devfn == devfn)
|
||||||
rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
|
return pdev;
|
||||||
/* balance pci_get_slot() */
|
|
||||||
pci_dev_put(pdev);
|
|
||||||
break;
|
|
||||||
}
|
|
||||||
/* balance pci_get_slot() */
|
/* balance pci_get_slot() */
|
||||||
pci_dev_put(pdev);
|
pci_dev_put(pdev);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
|
|
||||||
|
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
|
||||||
|
{
|
||||||
|
struct zpci_dev *zdev = to_zpci(virtfn);
|
||||||
|
struct pci_dev *pdev_pf;
|
||||||
|
int rc = 0;
|
||||||
|
|
||||||
|
pdev_pf = zpci_iov_find_parent_pf(zbus, zdev);
|
||||||
|
if (pdev_pf) {
|
||||||
|
/* Linux' vfids start at 0 while zdev->vfn starts at 1 */
|
||||||
|
rc = zpci_iov_link_virtfn(pdev_pf, virtfn, zdev->vfn - 1);
|
||||||
|
pci_dev_put(pdev_pf);
|
||||||
|
}
|
||||||
return rc;
|
return rc;
|
||||||
}
|
}
|
||||||
|
@ -17,6 +17,8 @@ void zpci_iov_map_resources(struct pci_dev *pdev);
|
|||||||
|
|
||||||
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
|
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
|
||||||
|
|
||||||
|
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev);
|
||||||
|
|
||||||
#else /* CONFIG_PCI_IOV */
|
#else /* CONFIG_PCI_IOV */
|
||||||
static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
|
static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
|
||||||
|
|
||||||
@ -26,5 +28,10 @@ static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *v
|
|||||||
{
|
{
|
||||||
return 0;
|
return 0;
|
||||||
}
|
}
|
||||||
|
|
||||||
|
static inline struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||||
|
{
|
||||||
|
return NULL;
|
||||||
|
}
|
||||||
#endif /* CONFIG_PCI_IOV */
|
#endif /* CONFIG_PCI_IOV */
|
||||||
#endif /* __S390_PCI_IOV_h */
|
#endif /* __S390_PCI_IOV_h */
|
||||||
|
@ -366,12 +366,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
|
|||||||
/*
|
/*
|
||||||
* CM_ID <-- DESTROYING
|
* CM_ID <-- DESTROYING
|
||||||
*
|
*
|
||||||
* Clean up all resources associated with the connection and release
|
* Clean up all resources associated with the connection.
|
||||||
* the initial reference taken by iw_create_cm_id.
|
|
||||||
*
|
|
||||||
* Returns true if and only if the last cm_id_priv reference has been dropped.
|
|
||||||
*/
|
*/
|
||||||
static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||||
{
|
{
|
||||||
struct iwcm_id_private *cm_id_priv;
|
struct iwcm_id_private *cm_id_priv;
|
||||||
struct ib_qp *qp;
|
struct ib_qp *qp;
|
||||||
@ -440,20 +437,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
|||||||
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
||||||
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
|
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
|
||||||
}
|
}
|
||||||
|
|
||||||
return iwcm_deref_id(cm_id_priv);
|
|
||||||
}
|
}
|
||||||
|
|
||||||
/*
|
/*
|
||||||
* This function is only called by the application thread and cannot
|
* Destroy cm_id. If the cm_id still has other references, wait for all
|
||||||
* be called by the event thread. The function will wait for all
|
* references to be released on the cm_id and then release the initial
|
||||||
* references to be released on the cm_id and then kfree the cm_id
|
* reference taken by iw_create_cm_id.
|
||||||
* object.
|
|
||||||
*/
|
*/
|
||||||
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
||||||
{
|
{
|
||||||
if (!destroy_cm_id(cm_id))
|
struct iwcm_id_private *cm_id_priv;
|
||||||
|
|
||||||
|
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||||
|
destroy_cm_id(cm_id);
|
||||||
|
if (refcount_read(&cm_id_priv->refcount) > 1)
|
||||||
flush_workqueue(iwcm_wq);
|
flush_workqueue(iwcm_wq);
|
||||||
|
iwcm_deref_id(cm_id_priv);
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL(iw_destroy_cm_id);
|
EXPORT_SYMBOL(iw_destroy_cm_id);
|
||||||
|
|
||||||
@ -1033,8 +1032,10 @@ static void cm_work_handler(struct work_struct *_work)
|
|||||||
|
|
||||||
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
|
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
|
||||||
ret = process_event(cm_id_priv, &levent);
|
ret = process_event(cm_id_priv, &levent);
|
||||||
if (ret)
|
if (ret) {
|
||||||
WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
|
destroy_cm_id(&cm_id_priv->id);
|
||||||
|
WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
|
||||||
|
}
|
||||||
} else
|
} else
|
||||||
pr_debug("dropping event %d\n", levent.event);
|
pr_debug("dropping event %d\n", levent.event);
|
||||||
if (iwcm_deref_id(cm_id_priv))
|
if (iwcm_deref_id(cm_id_priv))
|
||||||
|
@ -998,7 +998,12 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
|
|||||||
*/
|
*/
|
||||||
static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
|
static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
|
||||||
{
|
{
|
||||||
folio_mark_dirty(bh->b_folio);
|
struct folio *folio = bh->b_folio;
|
||||||
|
struct inode *inode = folio->mapping->host;
|
||||||
|
|
||||||
|
/* only regular files have a_ops */
|
||||||
|
if (S_ISREG(inode->i_mode))
|
||||||
|
folio_mark_dirty(folio);
|
||||||
return ext4_handle_dirty_metadata(handle, NULL, bh);
|
return ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||||
}
|
}
|
||||||
|
|
||||||
|
@ -308,8 +308,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
|
|||||||
/* use after obtaining a reference count */
|
/* use after obtaining a reference count */
|
||||||
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
|
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
|
||||||
{
|
{
|
||||||
return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
|
if (!nf_ct_is_confirmed(ct))
|
||||||
!nf_ct_is_dying(ct);
|
return false;
|
||||||
|
|
||||||
|
/* load ct->timeout after is_confirmed() test.
|
||||||
|
* Pairs with __nf_conntrack_confirm() which:
|
||||||
|
* 1. Increases ct->timeout value
|
||||||
|
* 2. Inserts ct into rcu hlist
|
||||||
|
* 3. Sets the confirmed bit
|
||||||
|
* 4. Unlocks the hlist lock
|
||||||
|
*/
|
||||||
|
smp_acquire__after_ctrl_dep();
|
||||||
|
|
||||||
|
return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
|
||||||
}
|
}
|
||||||
|
|
||||||
#define NF_CT_DAY (86400 * HZ)
|
#define NF_CT_DAY (86400 * HZ)
|
||||||
|
@ -1414,12 +1414,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
|
|||||||
}
|
}
|
||||||
|
|
||||||
/* fully reclaim rmem/fwd memory allocated for skb */
|
/* fully reclaim rmem/fwd memory allocated for skb */
|
||||||
static void udp_rmem_release(struct sock *sk, int size, int partial,
|
static void udp_rmem_release(struct sock *sk, unsigned int size,
|
||||||
bool rx_queue_lock_held)
|
int partial, bool rx_queue_lock_held)
|
||||||
{
|
{
|
||||||
struct udp_sock *up = udp_sk(sk);
|
struct udp_sock *up = udp_sk(sk);
|
||||||
struct sk_buff_head *sk_queue;
|
struct sk_buff_head *sk_queue;
|
||||||
int amt;
|
unsigned int amt;
|
||||||
|
|
||||||
if (likely(partial)) {
|
if (likely(partial)) {
|
||||||
up->forward_deficit += size;
|
up->forward_deficit += size;
|
||||||
@ -1439,10 +1439,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
|
|||||||
if (!rx_queue_lock_held)
|
if (!rx_queue_lock_held)
|
||||||
spin_lock(&sk_queue->lock);
|
spin_lock(&sk_queue->lock);
|
||||||
|
|
||||||
|
amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
|
||||||
sk_forward_alloc_add(sk, size);
|
sk_forward_alloc_add(sk, size - amt);
|
||||||
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
|
|
||||||
sk_forward_alloc_add(sk, -amt);
|
|
||||||
|
|
||||||
if (amt)
|
if (amt)
|
||||||
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
|
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
|
||||||
@ -1514,17 +1512,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
|
|||||||
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||||
{
|
{
|
||||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||||
int rmem, err = -ENOMEM;
|
unsigned int rmem, rcvbuf;
|
||||||
spinlock_t *busy = NULL;
|
spinlock_t *busy = NULL;
|
||||||
int size, rcvbuf;
|
int size, err = -ENOMEM;
|
||||||
|
|
||||||
/* Immediately drop when the receive queue is full.
|
|
||||||
* Always allow at least one packet.
|
|
||||||
*/
|
|
||||||
rmem = atomic_read(&sk->sk_rmem_alloc);
|
rmem = atomic_read(&sk->sk_rmem_alloc);
|
||||||
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
||||||
|
size = skb->truesize;
|
||||||
|
|
||||||
|
/* Immediately drop when the receive queue is full.
|
||||||
|
* Cast to unsigned int performs the boundary check for INT_MAX.
|
||||||
|
*/
|
||||||
|
if (rmem + size > rcvbuf) {
|
||||||
|
if (rcvbuf > INT_MAX >> 1)
|
||||||
|
goto drop;
|
||||||
|
|
||||||
|
/* Always allow at least one packet for small buffer. */
|
||||||
if (rmem > rcvbuf)
|
if (rmem > rcvbuf)
|
||||||
goto drop;
|
goto drop;
|
||||||
|
}
|
||||||
|
|
||||||
/* Under mem pressure, it might be helpful to help udp_recvmsg()
|
/* Under mem pressure, it might be helpful to help udp_recvmsg()
|
||||||
* having linear skbs :
|
* having linear skbs :
|
||||||
@ -1534,10 +1540,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
|||||||
*/
|
*/
|
||||||
if (rmem > (rcvbuf >> 1)) {
|
if (rmem > (rcvbuf >> 1)) {
|
||||||
skb_condense(skb);
|
skb_condense(skb);
|
||||||
|
size = skb->truesize;
|
||||||
busy = busylock_acquire(sk);
|
busy = busylock_acquire(sk);
|
||||||
}
|
}
|
||||||
size = skb->truesize;
|
|
||||||
udp_set_dev_scratch(skb);
|
udp_set_dev_scratch(skb);
|
||||||
|
|
||||||
atomic_add(size, &sk->sk_rmem_alloc);
|
atomic_add(size, &sk->sk_rmem_alloc);
|
||||||
@ -1624,7 +1630,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
|
|||||||
|
|
||||||
static struct sk_buff *__first_packet_length(struct sock *sk,
|
static struct sk_buff *__first_packet_length(struct sock *sk,
|
||||||
struct sk_buff_head *rcvq,
|
struct sk_buff_head *rcvq,
|
||||||
int *total)
|
unsigned int *total)
|
||||||
{
|
{
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
|
|
||||||
@ -1657,8 +1663,8 @@ static int first_packet_length(struct sock *sk)
|
|||||||
{
|
{
|
||||||
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
|
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
|
||||||
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
|
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
|
||||||
|
unsigned int total = 0;
|
||||||
struct sk_buff *skb;
|
struct sk_buff *skb;
|
||||||
int total = 0;
|
|
||||||
int res;
|
int res;
|
||||||
|
|
||||||
spin_lock_bh(&rcvq->lock);
|
spin_lock_bh(&rcvq->lock);
|
||||||
|
@ -1121,6 +1121,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
|
|||||||
|
|
||||||
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
|
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
|
||||||
&nf_conntrack_hash[repl_idx]);
|
&nf_conntrack_hash[repl_idx]);
|
||||||
|
/* confirmed bit must be set after hlist add, not before:
|
||||||
|
* loser_ct can still be visible to other cpu due to
|
||||||
|
* SLAB_TYPESAFE_BY_RCU.
|
||||||
|
*/
|
||||||
|
smp_mb__before_atomic();
|
||||||
|
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
|
||||||
|
|
||||||
NF_CT_STAT_INC(net, clash_resolve);
|
NF_CT_STAT_INC(net, clash_resolve);
|
||||||
return NF_ACCEPT;
|
return NF_ACCEPT;
|
||||||
@ -1257,8 +1263,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
|||||||
* user context, else we insert an already 'dead' hash, blocking
|
* user context, else we insert an already 'dead' hash, blocking
|
||||||
* further use of that particular connection -JM.
|
* further use of that particular connection -JM.
|
||||||
*/
|
*/
|
||||||
ct->status |= IPS_CONFIRMED;
|
|
||||||
|
|
||||||
if (unlikely(nf_ct_is_dying(ct))) {
|
if (unlikely(nf_ct_is_dying(ct))) {
|
||||||
NF_CT_STAT_INC(net, insert_failed);
|
NF_CT_STAT_INC(net, insert_failed);
|
||||||
goto dying;
|
goto dying;
|
||||||
@ -1290,7 +1294,7 @@ chaintoolong:
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Timer relative to confirmation time, not original
|
/* Timeout is relative to confirmation time, not original
|
||||||
setting time, otherwise we'd get timer wrap in
|
setting time, otherwise we'd get timer wrap in
|
||||||
weird delay cases. */
|
weird delay cases. */
|
||||||
ct->timeout += nfct_time_stamp;
|
ct->timeout += nfct_time_stamp;
|
||||||
@ -1298,11 +1302,21 @@ chaintoolong:
|
|||||||
__nf_conntrack_insert_prepare(ct);
|
__nf_conntrack_insert_prepare(ct);
|
||||||
|
|
||||||
/* Since the lookup is lockless, hash insertion must be done after
|
/* Since the lookup is lockless, hash insertion must be done after
|
||||||
* starting the timer and setting the CONFIRMED bit. The RCU barriers
|
* setting ct->timeout. The RCU barriers guarantee that no other CPU
|
||||||
* guarantee that no other CPU can find the conntrack before the above
|
* can find the conntrack before the above stores are visible.
|
||||||
* stores are visible.
|
|
||||||
*/
|
*/
|
||||||
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
||||||
|
|
||||||
|
/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
|
||||||
|
* skip entries that lack this bit. This happens when a CPU is looking
|
||||||
|
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
|
||||||
|
* or when another CPU encounters this entry right after the insertion
|
||||||
|
* but before the set-confirm-bit below. This bit must not be set until
|
||||||
|
* after __nf_conntrack_hash_insert().
|
||||||
|
*/
|
||||||
|
smp_mb__before_atomic();
|
||||||
|
set_bit(IPS_CONFIRMED_BIT, &ct->status);
|
||||||
|
|
||||||
nf_conntrack_double_unlock(hash, reply_hash);
|
nf_conntrack_double_unlock(hash, reply_hash);
|
||||||
local_bh_enable();
|
local_bh_enable();
|
||||||
|
|
||||||
|
@ -1648,10 +1648,16 @@ hfsc_dequeue(struct Qdisc *sch)
|
|||||||
if (cl->qdisc->q.qlen != 0) {
|
if (cl->qdisc->q.qlen != 0) {
|
||||||
/* update ed */
|
/* update ed */
|
||||||
next_len = qdisc_peek_len(cl->qdisc);
|
next_len = qdisc_peek_len(cl->qdisc);
|
||||||
|
/* Check queue length again since some qdisc implementations
|
||||||
|
* (e.g., netem/codel) might empty the queue during the peek
|
||||||
|
* operation.
|
||||||
|
*/
|
||||||
|
if (cl->qdisc->q.qlen != 0) {
|
||||||
if (realtime)
|
if (realtime)
|
||||||
update_ed(cl, next_len);
|
update_ed(cl, next_len);
|
||||||
else
|
else
|
||||||
update_d(cl, next_len);
|
update_d(cl, next_len);
|
||||||
|
}
|
||||||
} else {
|
} else {
|
||||||
/* the class becomes passive */
|
/* the class becomes passive */
|
||||||
eltree_remove(cl);
|
eltree_remove(cl);
|
||||||
|
@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net)
|
|||||||
for (id = 0; srv->idr_in_use; id++) {
|
for (id = 0; srv->idr_in_use; id++) {
|
||||||
con = idr_find(&srv->conn_idr, id);
|
con = idr_find(&srv->conn_idr, id);
|
||||||
if (con) {
|
if (con) {
|
||||||
|
conn_get(con);
|
||||||
spin_unlock_bh(&srv->idr_lock);
|
spin_unlock_bh(&srv->idr_lock);
|
||||||
tipc_conn_close(con);
|
tipc_conn_close(con);
|
||||||
|
conn_put(con);
|
||||||
spin_lock_bh(&srv->idr_lock);
|
spin_lock_bh(&srv->idr_lock);
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
|
|||||||
|
|
||||||
static bool vsock_use_local_transport(unsigned int remote_cid)
|
static bool vsock_use_local_transport(unsigned int remote_cid)
|
||||||
{
|
{
|
||||||
|
lockdep_assert_held(&vsock_register_mutex);
|
||||||
|
|
||||||
if (!transport_local)
|
if (!transport_local)
|
||||||
return false;
|
return false;
|
||||||
|
|
||||||
@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
|||||||
|
|
||||||
remote_flags = vsk->remote_addr.svm_flags;
|
remote_flags = vsk->remote_addr.svm_flags;
|
||||||
|
|
||||||
|
mutex_lock(&vsock_register_mutex);
|
||||||
|
|
||||||
switch (sk->sk_type) {
|
switch (sk->sk_type) {
|
||||||
case SOCK_DGRAM:
|
case SOCK_DGRAM:
|
||||||
new_transport = transport_dgram;
|
new_transport = transport_dgram;
|
||||||
@ -479,12 +483,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
|||||||
new_transport = transport_h2g;
|
new_transport = transport_h2g;
|
||||||
break;
|
break;
|
||||||
default:
|
default:
|
||||||
return -ESOCKTNOSUPPORT;
|
ret = -ESOCKTNOSUPPORT;
|
||||||
|
goto err;
|
||||||
}
|
}
|
||||||
|
|
||||||
if (vsk->transport) {
|
if (vsk->transport) {
|
||||||
if (vsk->transport == new_transport)
|
if (vsk->transport == new_transport) {
|
||||||
return 0;
|
ret = 0;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
/* transport->release() must be called with sock lock acquired.
|
/* transport->release() must be called with sock lock acquired.
|
||||||
* This path can only be taken during vsock_connect(), where we
|
* This path can only be taken during vsock_connect(), where we
|
||||||
@ -499,8 +506,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
|||||||
/* We increase the module refcnt to prevent the transport unloading
|
/* We increase the module refcnt to prevent the transport unloading
|
||||||
* while there are open sockets assigned to it.
|
* while there are open sockets assigned to it.
|
||||||
*/
|
*/
|
||||||
if (!new_transport || !try_module_get(new_transport->module))
|
if (!new_transport || !try_module_get(new_transport->module)) {
|
||||||
return -ENODEV;
|
ret = -ENODEV;
|
||||||
|
goto err;
|
||||||
|
}
|
||||||
|
|
||||||
|
/* It's safe to release the mutex after a successful try_module_get().
|
||||||
|
* Whichever transport `new_transport` points at, it won't go away until
|
||||||
|
* the last module_put() below or in vsock_deassign_transport().
|
||||||
|
*/
|
||||||
|
mutex_unlock(&vsock_register_mutex);
|
||||||
|
|
||||||
if (sk->sk_type == SOCK_SEQPACKET) {
|
if (sk->sk_type == SOCK_SEQPACKET) {
|
||||||
if (!new_transport->seqpacket_allow ||
|
if (!new_transport->seqpacket_allow ||
|
||||||
@ -519,6 +534,9 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
|||||||
vsk->transport = new_transport;
|
vsk->transport = new_transport;
|
||||||
|
|
||||||
return 0;
|
return 0;
|
||||||
|
err:
|
||||||
|
mutex_unlock(&vsock_register_mutex);
|
||||||
|
return ret;
|
||||||
}
|
}
|
||||||
EXPORT_SYMBOL_GPL(vsock_assign_transport);
|
EXPORT_SYMBOL_GPL(vsock_assign_transport);
|
||||||
|
|
||||||
|
Loading…
Reference in New Issue
Block a user