Import of kernel-6.12.0-55.30.1.el10_0
This commit is contained in:
parent
eee6502c4c
commit
e9ab258a78
@ -12,7 +12,7 @@ RHEL_MINOR = 0
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 55.29.1
|
||||
RHEL_RELEASE = 55.30.1
|
||||
|
||||
#
|
||||
# RHEL_REBASE_NUM
|
||||
|
@ -171,7 +171,6 @@ void zpci_bus_scan_busses(void)
|
||||
static bool zpci_bus_is_multifunction_root(struct zpci_dev *zdev)
|
||||
{
|
||||
return !s390_pci_no_rid && zdev->rid_available &&
|
||||
zpci_is_device_configured(zdev) &&
|
||||
!zdev->vfn;
|
||||
}
|
||||
|
||||
@ -332,6 +331,20 @@ error:
|
||||
return rc;
|
||||
}
|
||||
|
||||
static bool zpci_bus_is_isolated_vf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
{
|
||||
struct pci_dev *pdev;
|
||||
|
||||
if (!zdev->vfn)
|
||||
return false;
|
||||
|
||||
pdev = zpci_iov_find_parent_pf(zbus, zdev);
|
||||
if (!pdev)
|
||||
return true;
|
||||
pci_dev_put(pdev);
|
||||
return false;
|
||||
}
|
||||
|
||||
int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
||||
{
|
||||
bool topo_is_tid = zdev->tid_avail;
|
||||
@ -346,6 +359,15 @@ int zpci_bus_device_register(struct zpci_dev *zdev, struct pci_ops *ops)
|
||||
|
||||
topo = topo_is_tid ? zdev->tid : zdev->pchid;
|
||||
zbus = zpci_bus_get(topo, topo_is_tid);
|
||||
/*
|
||||
* An isolated VF gets its own domain/bus even if there exists
|
||||
* a matching domain/bus already
|
||||
*/
|
||||
if (zbus && zpci_bus_is_isolated_vf(zbus, zdev)) {
|
||||
zpci_bus_put(zbus);
|
||||
zbus = NULL;
|
||||
}
|
||||
|
||||
if (!zbus) {
|
||||
zbus = zpci_bus_alloc(topo, topo_is_tid);
|
||||
if (!zbus)
|
||||
|
@ -60,18 +60,35 @@ static int zpci_iov_link_virtfn(struct pci_dev *pdev, struct pci_dev *virtfn, in
|
||||
return 0;
|
||||
}
|
||||
|
||||
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
|
||||
/**
|
||||
* zpci_iov_find_parent_pf - Find the parent PF, if any, of the given function
|
||||
* @zbus: The bus that the PCI function is on, or would be added on
|
||||
* @zdev: The PCI function
|
||||
*
|
||||
* Finds the parent PF, if it exists and is configured, of the given PCI function
|
||||
* and increments its refcount. Th PF is searched for on the provided bus so the
|
||||
* caller has to ensure that this is the correct bus to search. This function may
|
||||
* be used before adding the PCI function to a zbus.
|
||||
*
|
||||
* Return: Pointer to the struct pci_dev of the parent PF or NULL if it not
|
||||
* found. If the function is not a VF or has no RequesterID information,
|
||||
* NULL is returned as well.
|
||||
*/
|
||||
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
{
|
||||
int i, cand_devfn;
|
||||
struct zpci_dev *zdev;
|
||||
int i, vfid, devfn, cand_devfn;
|
||||
struct pci_dev *pdev;
|
||||
int vfid = vfn - 1; /* Linux' vfid's start at 0 vfn at 1*/
|
||||
int rc = 0;
|
||||
|
||||
if (!zbus->multifunction)
|
||||
return 0;
|
||||
|
||||
/* If the parent PF for the given VF is also configured in the
|
||||
return NULL;
|
||||
/* Non-VFs and VFs without RID available don't have a parent */
|
||||
if (!zdev->vfn || !zdev->rid_available)
|
||||
return NULL;
|
||||
/* Linux vfid starts at 0 vfn at 1 */
|
||||
vfid = zdev->vfn - 1;
|
||||
devfn = zdev->rid & ZPCI_RID_MASK_DEVFN;
|
||||
/*
|
||||
* If the parent PF for the given VF is also configured in the
|
||||
* instance, it must be on the same zbus.
|
||||
* We can then identify the parent PF by checking what
|
||||
* devfn the VF would have if it belonged to that PF using the PF's
|
||||
@ -85,15 +102,26 @@ int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn
|
||||
if (!pdev)
|
||||
continue;
|
||||
cand_devfn = pci_iov_virtfn_devfn(pdev, vfid);
|
||||
if (cand_devfn == virtfn->devfn) {
|
||||
rc = zpci_iov_link_virtfn(pdev, virtfn, vfid);
|
||||
/* balance pci_get_slot() */
|
||||
pci_dev_put(pdev);
|
||||
break;
|
||||
}
|
||||
if (cand_devfn == devfn)
|
||||
return pdev;
|
||||
/* balance pci_get_slot() */
|
||||
pci_dev_put(pdev);
|
||||
}
|
||||
}
|
||||
return NULL;
|
||||
}
|
||||
|
||||
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn)
|
||||
{
|
||||
struct zpci_dev *zdev = to_zpci(virtfn);
|
||||
struct pci_dev *pdev_pf;
|
||||
int rc = 0;
|
||||
|
||||
pdev_pf = zpci_iov_find_parent_pf(zbus, zdev);
|
||||
if (pdev_pf) {
|
||||
/* Linux' vfids start at 0 while zdev->vfn starts at 1 */
|
||||
rc = zpci_iov_link_virtfn(pdev_pf, virtfn, zdev->vfn - 1);
|
||||
pci_dev_put(pdev_pf);
|
||||
}
|
||||
return rc;
|
||||
}
|
||||
|
@ -17,6 +17,8 @@ void zpci_iov_map_resources(struct pci_dev *pdev);
|
||||
|
||||
int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *virtfn, int vfn);
|
||||
|
||||
struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev);
|
||||
|
||||
#else /* CONFIG_PCI_IOV */
|
||||
static inline void zpci_iov_remove_virtfn(struct pci_dev *pdev, int vfn) {}
|
||||
|
||||
@ -26,5 +28,10 @@ static inline int zpci_iov_setup_virtfn(struct zpci_bus *zbus, struct pci_dev *v
|
||||
{
|
||||
return 0;
|
||||
}
|
||||
|
||||
static inline struct pci_dev *zpci_iov_find_parent_pf(struct zpci_bus *zbus, struct zpci_dev *zdev)
|
||||
{
|
||||
return NULL;
|
||||
}
|
||||
#endif /* CONFIG_PCI_IOV */
|
||||
#endif /* __S390_PCI_IOV_h */
|
||||
|
@ -366,12 +366,9 @@ EXPORT_SYMBOL(iw_cm_disconnect);
|
||||
/*
|
||||
* CM_ID <-- DESTROYING
|
||||
*
|
||||
* Clean up all resources associated with the connection and release
|
||||
* the initial reference taken by iw_create_cm_id.
|
||||
*
|
||||
* Returns true if and only if the last cm_id_priv reference has been dropped.
|
||||
* Clean up all resources associated with the connection.
|
||||
*/
|
||||
static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
static void destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
{
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
struct ib_qp *qp;
|
||||
@ -440,20 +437,22 @@ static bool destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
iwpm_remove_mapinfo(&cm_id->local_addr, &cm_id->m_local_addr);
|
||||
iwpm_remove_mapping(&cm_id->local_addr, RDMA_NL_IWCM);
|
||||
}
|
||||
|
||||
return iwcm_deref_id(cm_id_priv);
|
||||
}
|
||||
|
||||
/*
|
||||
* This function is only called by the application thread and cannot
|
||||
* be called by the event thread. The function will wait for all
|
||||
* references to be released on the cm_id and then kfree the cm_id
|
||||
* object.
|
||||
* Destroy cm_id. If the cm_id still has other references, wait for all
|
||||
* references to be released on the cm_id and then release the initial
|
||||
* reference taken by iw_create_cm_id.
|
||||
*/
|
||||
void iw_destroy_cm_id(struct iw_cm_id *cm_id)
|
||||
{
|
||||
if (!destroy_cm_id(cm_id))
|
||||
struct iwcm_id_private *cm_id_priv;
|
||||
|
||||
cm_id_priv = container_of(cm_id, struct iwcm_id_private, id);
|
||||
destroy_cm_id(cm_id);
|
||||
if (refcount_read(&cm_id_priv->refcount) > 1)
|
||||
flush_workqueue(iwcm_wq);
|
||||
iwcm_deref_id(cm_id_priv);
|
||||
}
|
||||
EXPORT_SYMBOL(iw_destroy_cm_id);
|
||||
|
||||
@ -1033,8 +1032,10 @@ static void cm_work_handler(struct work_struct *_work)
|
||||
|
||||
if (!test_bit(IWCM_F_DROP_EVENTS, &cm_id_priv->flags)) {
|
||||
ret = process_event(cm_id_priv, &levent);
|
||||
if (ret)
|
||||
WARN_ON_ONCE(destroy_cm_id(&cm_id_priv->id));
|
||||
if (ret) {
|
||||
destroy_cm_id(&cm_id_priv->id);
|
||||
WARN_ON_ONCE(iwcm_deref_id(cm_id_priv));
|
||||
}
|
||||
} else
|
||||
pr_debug("dropping event %d\n", levent.event);
|
||||
if (iwcm_deref_id(cm_id_priv))
|
||||
|
@ -998,7 +998,12 @@ int ext4_walk_page_buffers(handle_t *handle, struct inode *inode,
|
||||
*/
|
||||
static int ext4_dirty_journalled_data(handle_t *handle, struct buffer_head *bh)
|
||||
{
|
||||
folio_mark_dirty(bh->b_folio);
|
||||
struct folio *folio = bh->b_folio;
|
||||
struct inode *inode = folio->mapping->host;
|
||||
|
||||
/* only regular files have a_ops */
|
||||
if (S_ISREG(inode->i_mode))
|
||||
folio_mark_dirty(folio);
|
||||
return ext4_handle_dirty_metadata(handle, NULL, bh);
|
||||
}
|
||||
|
||||
|
@ -308,8 +308,19 @@ static inline bool nf_ct_is_expired(const struct nf_conn *ct)
|
||||
/* use after obtaining a reference count */
|
||||
static inline bool nf_ct_should_gc(const struct nf_conn *ct)
|
||||
{
|
||||
return nf_ct_is_expired(ct) && nf_ct_is_confirmed(ct) &&
|
||||
!nf_ct_is_dying(ct);
|
||||
if (!nf_ct_is_confirmed(ct))
|
||||
return false;
|
||||
|
||||
/* load ct->timeout after is_confirmed() test.
|
||||
* Pairs with __nf_conntrack_confirm() which:
|
||||
* 1. Increases ct->timeout value
|
||||
* 2. Inserts ct into rcu hlist
|
||||
* 3. Sets the confirmed bit
|
||||
* 4. Unlocks the hlist lock
|
||||
*/
|
||||
smp_acquire__after_ctrl_dep();
|
||||
|
||||
return nf_ct_is_expired(ct) && !nf_ct_is_dying(ct);
|
||||
}
|
||||
|
||||
#define NF_CT_DAY (86400 * HZ)
|
||||
|
@ -1414,12 +1414,12 @@ static bool udp_skb_has_head_state(struct sk_buff *skb)
|
||||
}
|
||||
|
||||
/* fully reclaim rmem/fwd memory allocated for skb */
|
||||
static void udp_rmem_release(struct sock *sk, int size, int partial,
|
||||
bool rx_queue_lock_held)
|
||||
static void udp_rmem_release(struct sock *sk, unsigned int size,
|
||||
int partial, bool rx_queue_lock_held)
|
||||
{
|
||||
struct udp_sock *up = udp_sk(sk);
|
||||
struct sk_buff_head *sk_queue;
|
||||
int amt;
|
||||
unsigned int amt;
|
||||
|
||||
if (likely(partial)) {
|
||||
up->forward_deficit += size;
|
||||
@ -1439,10 +1439,8 @@ static void udp_rmem_release(struct sock *sk, int size, int partial,
|
||||
if (!rx_queue_lock_held)
|
||||
spin_lock(&sk_queue->lock);
|
||||
|
||||
|
||||
sk_forward_alloc_add(sk, size);
|
||||
amt = (sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
|
||||
sk_forward_alloc_add(sk, -amt);
|
||||
amt = (size + sk->sk_forward_alloc - partial) & ~(PAGE_SIZE - 1);
|
||||
sk_forward_alloc_add(sk, size - amt);
|
||||
|
||||
if (amt)
|
||||
__sk_mem_reduce_allocated(sk, amt >> PAGE_SHIFT);
|
||||
@ -1514,17 +1512,25 @@ static int udp_rmem_schedule(struct sock *sk, int size)
|
||||
int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||
{
|
||||
struct sk_buff_head *list = &sk->sk_receive_queue;
|
||||
int rmem, err = -ENOMEM;
|
||||
unsigned int rmem, rcvbuf;
|
||||
spinlock_t *busy = NULL;
|
||||
int size, rcvbuf;
|
||||
int size, err = -ENOMEM;
|
||||
|
||||
/* Immediately drop when the receive queue is full.
|
||||
* Always allow at least one packet.
|
||||
*/
|
||||
rmem = atomic_read(&sk->sk_rmem_alloc);
|
||||
rcvbuf = READ_ONCE(sk->sk_rcvbuf);
|
||||
size = skb->truesize;
|
||||
|
||||
/* Immediately drop when the receive queue is full.
|
||||
* Cast to unsigned int performs the boundary check for INT_MAX.
|
||||
*/
|
||||
if (rmem + size > rcvbuf) {
|
||||
if (rcvbuf > INT_MAX >> 1)
|
||||
goto drop;
|
||||
|
||||
/* Always allow at least one packet for small buffer. */
|
||||
if (rmem > rcvbuf)
|
||||
goto drop;
|
||||
}
|
||||
|
||||
/* Under mem pressure, it might be helpful to help udp_recvmsg()
|
||||
* having linear skbs :
|
||||
@ -1534,10 +1540,10 @@ int __udp_enqueue_schedule_skb(struct sock *sk, struct sk_buff *skb)
|
||||
*/
|
||||
if (rmem > (rcvbuf >> 1)) {
|
||||
skb_condense(skb);
|
||||
|
||||
size = skb->truesize;
|
||||
busy = busylock_acquire(sk);
|
||||
}
|
||||
size = skb->truesize;
|
||||
|
||||
udp_set_dev_scratch(skb);
|
||||
|
||||
atomic_add(size, &sk->sk_rmem_alloc);
|
||||
@ -1624,7 +1630,7 @@ EXPORT_SYMBOL_GPL(skb_consume_udp);
|
||||
|
||||
static struct sk_buff *__first_packet_length(struct sock *sk,
|
||||
struct sk_buff_head *rcvq,
|
||||
int *total)
|
||||
unsigned int *total)
|
||||
{
|
||||
struct sk_buff *skb;
|
||||
|
||||
@ -1657,8 +1663,8 @@ static int first_packet_length(struct sock *sk)
|
||||
{
|
||||
struct sk_buff_head *rcvq = &udp_sk(sk)->reader_queue;
|
||||
struct sk_buff_head *sk_queue = &sk->sk_receive_queue;
|
||||
unsigned int total = 0;
|
||||
struct sk_buff *skb;
|
||||
int total = 0;
|
||||
int res;
|
||||
|
||||
spin_lock_bh(&rcvq->lock);
|
||||
|
@ -1121,6 +1121,12 @@ static int nf_ct_resolve_clash_harder(struct sk_buff *skb, u32 repl_idx)
|
||||
|
||||
hlist_nulls_add_head_rcu(&loser_ct->tuplehash[IP_CT_DIR_REPLY].hnnode,
|
||||
&nf_conntrack_hash[repl_idx]);
|
||||
/* confirmed bit must be set after hlist add, not before:
|
||||
* loser_ct can still be visible to other cpu due to
|
||||
* SLAB_TYPESAFE_BY_RCU.
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
set_bit(IPS_CONFIRMED_BIT, &loser_ct->status);
|
||||
|
||||
NF_CT_STAT_INC(net, clash_resolve);
|
||||
return NF_ACCEPT;
|
||||
@ -1257,8 +1263,6 @@ __nf_conntrack_confirm(struct sk_buff *skb)
|
||||
* user context, else we insert an already 'dead' hash, blocking
|
||||
* further use of that particular connection -JM.
|
||||
*/
|
||||
ct->status |= IPS_CONFIRMED;
|
||||
|
||||
if (unlikely(nf_ct_is_dying(ct))) {
|
||||
NF_CT_STAT_INC(net, insert_failed);
|
||||
goto dying;
|
||||
@ -1290,7 +1294,7 @@ chaintoolong:
|
||||
}
|
||||
}
|
||||
|
||||
/* Timer relative to confirmation time, not original
|
||||
/* Timeout is relative to confirmation time, not original
|
||||
setting time, otherwise we'd get timer wrap in
|
||||
weird delay cases. */
|
||||
ct->timeout += nfct_time_stamp;
|
||||
@ -1298,11 +1302,21 @@ chaintoolong:
|
||||
__nf_conntrack_insert_prepare(ct);
|
||||
|
||||
/* Since the lookup is lockless, hash insertion must be done after
|
||||
* starting the timer and setting the CONFIRMED bit. The RCU barriers
|
||||
* guarantee that no other CPU can find the conntrack before the above
|
||||
* stores are visible.
|
||||
* setting ct->timeout. The RCU barriers guarantee that no other CPU
|
||||
* can find the conntrack before the above stores are visible.
|
||||
*/
|
||||
__nf_conntrack_hash_insert(ct, hash, reply_hash);
|
||||
|
||||
/* IPS_CONFIRMED unset means 'ct not (yet) in hash', conntrack lookups
|
||||
* skip entries that lack this bit. This happens when a CPU is looking
|
||||
* at a stale entry that is being recycled due to SLAB_TYPESAFE_BY_RCU
|
||||
* or when another CPU encounters this entry right after the insertion
|
||||
* but before the set-confirm-bit below. This bit must not be set until
|
||||
* after __nf_conntrack_hash_insert().
|
||||
*/
|
||||
smp_mb__before_atomic();
|
||||
set_bit(IPS_CONFIRMED_BIT, &ct->status);
|
||||
|
||||
nf_conntrack_double_unlock(hash, reply_hash);
|
||||
local_bh_enable();
|
||||
|
||||
|
@ -1648,10 +1648,16 @@ hfsc_dequeue(struct Qdisc *sch)
|
||||
if (cl->qdisc->q.qlen != 0) {
|
||||
/* update ed */
|
||||
next_len = qdisc_peek_len(cl->qdisc);
|
||||
/* Check queue length again since some qdisc implementations
|
||||
* (e.g., netem/codel) might empty the queue during the peek
|
||||
* operation.
|
||||
*/
|
||||
if (cl->qdisc->q.qlen != 0) {
|
||||
if (realtime)
|
||||
update_ed(cl, next_len);
|
||||
else
|
||||
update_d(cl, next_len);
|
||||
}
|
||||
} else {
|
||||
/* the class becomes passive */
|
||||
eltree_remove(cl);
|
||||
|
@ -704,8 +704,10 @@ static void tipc_topsrv_stop(struct net *net)
|
||||
for (id = 0; srv->idr_in_use; id++) {
|
||||
con = idr_find(&srv->conn_idr, id);
|
||||
if (con) {
|
||||
conn_get(con);
|
||||
spin_unlock_bh(&srv->idr_lock);
|
||||
tipc_conn_close(con);
|
||||
conn_put(con);
|
||||
spin_lock_bh(&srv->idr_lock);
|
||||
}
|
||||
}
|
||||
|
@ -407,6 +407,8 @@ EXPORT_SYMBOL_GPL(vsock_enqueue_accept);
|
||||
|
||||
static bool vsock_use_local_transport(unsigned int remote_cid)
|
||||
{
|
||||
lockdep_assert_held(&vsock_register_mutex);
|
||||
|
||||
if (!transport_local)
|
||||
return false;
|
||||
|
||||
@ -464,6 +466,8 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
||||
|
||||
remote_flags = vsk->remote_addr.svm_flags;
|
||||
|
||||
mutex_lock(&vsock_register_mutex);
|
||||
|
||||
switch (sk->sk_type) {
|
||||
case SOCK_DGRAM:
|
||||
new_transport = transport_dgram;
|
||||
@ -479,12 +483,15 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
||||
new_transport = transport_h2g;
|
||||
break;
|
||||
default:
|
||||
return -ESOCKTNOSUPPORT;
|
||||
ret = -ESOCKTNOSUPPORT;
|
||||
goto err;
|
||||
}
|
||||
|
||||
if (vsk->transport) {
|
||||
if (vsk->transport == new_transport)
|
||||
return 0;
|
||||
if (vsk->transport == new_transport) {
|
||||
ret = 0;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* transport->release() must be called with sock lock acquired.
|
||||
* This path can only be taken during vsock_connect(), where we
|
||||
@ -499,8 +506,16 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
||||
/* We increase the module refcnt to prevent the transport unloading
|
||||
* while there are open sockets assigned to it.
|
||||
*/
|
||||
if (!new_transport || !try_module_get(new_transport->module))
|
||||
return -ENODEV;
|
||||
if (!new_transport || !try_module_get(new_transport->module)) {
|
||||
ret = -ENODEV;
|
||||
goto err;
|
||||
}
|
||||
|
||||
/* It's safe to release the mutex after a successful try_module_get().
|
||||
* Whichever transport `new_transport` points at, it won't go away until
|
||||
* the last module_put() below or in vsock_deassign_transport().
|
||||
*/
|
||||
mutex_unlock(&vsock_register_mutex);
|
||||
|
||||
if (sk->sk_type == SOCK_SEQPACKET) {
|
||||
if (!new_transport->seqpacket_allow ||
|
||||
@ -519,6 +534,9 @@ int vsock_assign_transport(struct vsock_sock *vsk, struct vsock_sock *psk)
|
||||
vsk->transport = new_transport;
|
||||
|
||||
return 0;
|
||||
err:
|
||||
mutex_unlock(&vsock_register_mutex);
|
||||
return ret;
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(vsock_assign_transport);
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user