Import of kernel-5.14.0-611.54.1.el9_7
This commit is contained in:
parent
5326dccb31
commit
77428886fd
@ -143,3 +143,7 @@ own name.
|
||||
* - ``clock_id``
|
||||
- u64
|
||||
- Clock ID used by the device for registering DPLL devices and pins.
|
||||
* - ``max_mac_per_vf``
|
||||
- u32
|
||||
- Controls the maximum number of MAC address filters that can be assigned
|
||||
to a Virtual Function (VF).
|
||||
|
||||
@ -7,6 +7,40 @@ i40e devlink support
|
||||
This document describes the devlink features implemented by the ``i40e``
|
||||
device driver.
|
||||
|
||||
Parameters
|
||||
==========
|
||||
|
||||
.. list-table:: Generic parameters implemented
|
||||
:widths: 5 5 90
|
||||
|
||||
* - Name
|
||||
- Mode
|
||||
- Notes
|
||||
* - ``max_mac_per_vf``
|
||||
- runtime
|
||||
- Controls the maximum number of MAC addresses a VF can use
|
||||
on i40e devices.
|
||||
|
||||
By default (``0``), the driver enforces its internally calculated per-VF
|
||||
MAC filter limit, which is based on the number of allocated VFS.
|
||||
|
||||
If set to a non-zero value, this parameter acts as a strict cap:
|
||||
the driver will use the user-provided value instead of its internal
|
||||
calculation.
|
||||
|
||||
**Important notes:**
|
||||
|
||||
- This value **must be set before enabling SR-IOV**.
|
||||
Attempting to change it while SR-IOV is enabled will return an error.
|
||||
- MAC filters are a **shared hardware resource** across all VFs.
|
||||
Setting a high value may cause other VFs to be starved of filters.
|
||||
- This value is a **Administrative policy**. The hardware may return
|
||||
errors when its absolute limit is reached, regardless of the value
|
||||
set here.
|
||||
|
||||
The default value is ``0`` (internal calculation is used).
|
||||
|
||||
|
||||
Info versions
|
||||
=============
|
||||
|
||||
|
||||
@ -12,7 +12,7 @@ RHEL_MINOR = 7
|
||||
#
|
||||
# Use this spot to avoid future merge conflicts.
|
||||
# Do not trim this comment.
|
||||
RHEL_RELEASE = 611.49.1
|
||||
RHEL_RELEASE = 611.54.1
|
||||
|
||||
#
|
||||
# ZSTREAM
|
||||
|
||||
@ -221,6 +221,7 @@ config CRYPTO_AUTHENC
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_MANAGER
|
||||
select CRYPTO_HASH
|
||||
select CRYPTO_NULL
|
||||
help
|
||||
Authenc: Combined mode wrapper for IPsec.
|
||||
This is required for IPSec.
|
||||
@ -2119,6 +2120,7 @@ config CRYPTO_USER_API_AEAD
|
||||
depends on NET
|
||||
select CRYPTO_AEAD
|
||||
select CRYPTO_SKCIPHER
|
||||
select CRYPTO_NULL
|
||||
select CRYPTO_USER_API
|
||||
help
|
||||
This option enables the user-spaces interface for AEAD
|
||||
|
||||
@ -513,8 +513,10 @@ static int af_alg_alloc_tsgl(struct sock *sk)
|
||||
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
|
||||
sgl->cur = 0;
|
||||
|
||||
if (sg)
|
||||
if (sg) {
|
||||
sg_unmark_end(sg + MAX_SGL_ENTS - 1);
|
||||
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
|
||||
}
|
||||
|
||||
list_add_tail(&sgl->list, &ctx->tsgl_list);
|
||||
}
|
||||
@ -1133,6 +1135,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
|
||||
|
||||
seglen = min_t(size_t, (maxsize - len),
|
||||
msg_data_left(msg));
|
||||
/* Never pin more pages than the remaining RX accounting budget. */
|
||||
seglen = min_t(size_t, seglen, af_alg_rcvbuf(sk));
|
||||
|
||||
if (list_empty(&areq->rsgl_list)) {
|
||||
rsgl = &areq->first_rsgl;
|
||||
|
||||
@ -26,6 +26,8 @@
|
||||
#include <crypto/internal/aead.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <crypto/if_alg.h>
|
||||
#include <crypto/skcipher.h>
|
||||
#include <crypto/null.h>
|
||||
#include <linux/init.h>
|
||||
#include <linux/list.h>
|
||||
#include <linux/kernel.h>
|
||||
@ -34,13 +36,19 @@
|
||||
#include <linux/net.h>
|
||||
#include <net/sock.h>
|
||||
|
||||
struct aead_tfm {
|
||||
struct crypto_aead *aead;
|
||||
struct crypto_sync_skcipher *null_tfm;
|
||||
};
|
||||
|
||||
static inline bool aead_sufficient_data(struct sock *sk)
|
||||
{
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct crypto_aead *tfm = pask->private;
|
||||
struct aead_tfm *aeadc = pask->private;
|
||||
struct crypto_aead *tfm = aeadc->aead;
|
||||
unsigned int as = crypto_aead_authsize(tfm);
|
||||
|
||||
/*
|
||||
@ -56,12 +64,27 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct crypto_aead *tfm = pask->private;
|
||||
struct aead_tfm *aeadc = pask->private;
|
||||
struct crypto_aead *tfm = aeadc->aead;
|
||||
unsigned int ivsize = crypto_aead_ivsize(tfm);
|
||||
|
||||
return af_alg_sendmsg(sock, msg, size, ivsize);
|
||||
}
|
||||
|
||||
static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst, unsigned int len)
|
||||
{
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
|
||||
|
||||
skcipher_request_set_sync_tfm(skreq, null_tfm);
|
||||
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
|
||||
|
||||
return crypto_skcipher_encrypt(skreq);
|
||||
}
|
||||
|
||||
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
size_t ignored, int flags)
|
||||
{
|
||||
@ -70,7 +93,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct crypto_aead *tfm = pask->private;
|
||||
struct aead_tfm *aeadc = pask->private;
|
||||
struct crypto_aead *tfm = aeadc->aead;
|
||||
struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
|
||||
unsigned int as = crypto_aead_authsize(tfm);
|
||||
unsigned int ivsize = crypto_aead_ivsize(tfm);
|
||||
struct af_alg_async_req *areq;
|
||||
@ -189,7 +214,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
/* Use the RX SGL as source (and destination) for crypto op. */
|
||||
rsgl_src = areq->first_rsgl.sgl.sg;
|
||||
|
||||
memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
|
||||
err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
|
||||
ctx->aead_assoclen);
|
||||
if (err)
|
||||
goto free;
|
||||
|
||||
/* Initialize the crypto operation */
|
||||
aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
|
||||
@ -293,7 +321,7 @@ static int aead_check_key(struct socket *sock)
|
||||
int err = 0;
|
||||
struct sock *psk;
|
||||
struct alg_sock *pask;
|
||||
struct crypto_aead *tfm;
|
||||
struct aead_tfm *tfm;
|
||||
struct sock *sk = sock->sk;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
|
||||
@ -307,7 +335,7 @@ static int aead_check_key(struct socket *sock)
|
||||
|
||||
err = -ENOKEY;
|
||||
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
|
||||
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
|
||||
goto unlock;
|
||||
|
||||
atomic_dec(&pask->nokey_refcnt);
|
||||
@ -381,22 +409,54 @@ static struct proto_ops algif_aead_ops_nokey = {
|
||||
|
||||
static void *aead_bind(const char *name, u32 type, u32 mask)
|
||||
{
|
||||
return crypto_alloc_aead(name, type, mask);
|
||||
struct aead_tfm *tfm;
|
||||
struct crypto_aead *aead;
|
||||
struct crypto_sync_skcipher *null_tfm;
|
||||
|
||||
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
|
||||
if (!tfm)
|
||||
return ERR_PTR(-ENOMEM);
|
||||
|
||||
aead = crypto_alloc_aead(name, type, mask);
|
||||
if (IS_ERR(aead)) {
|
||||
kfree(tfm);
|
||||
return ERR_CAST(aead);
|
||||
}
|
||||
|
||||
null_tfm = crypto_get_default_null_skcipher();
|
||||
if (IS_ERR(null_tfm)) {
|
||||
crypto_free_aead(aead);
|
||||
kfree(tfm);
|
||||
return ERR_CAST(null_tfm);
|
||||
}
|
||||
|
||||
tfm->aead = aead;
|
||||
tfm->null_tfm = null_tfm;
|
||||
|
||||
return tfm;
|
||||
}
|
||||
|
||||
static void aead_release(void *private)
|
||||
{
|
||||
crypto_free_aead(private);
|
||||
struct aead_tfm *tfm = private;
|
||||
|
||||
crypto_free_aead(tfm->aead);
|
||||
crypto_put_default_null_skcipher();
|
||||
kfree(tfm);
|
||||
}
|
||||
|
||||
static int aead_setauthsize(void *private, unsigned int authsize)
|
||||
{
|
||||
return crypto_aead_setauthsize(private, authsize);
|
||||
struct aead_tfm *tfm = private;
|
||||
|
||||
return crypto_aead_setauthsize(tfm->aead, authsize);
|
||||
}
|
||||
|
||||
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
|
||||
{
|
||||
return crypto_aead_setkey(private, key, keylen);
|
||||
struct aead_tfm *tfm = private;
|
||||
|
||||
return crypto_aead_setkey(tfm->aead, key, keylen);
|
||||
}
|
||||
|
||||
static void aead_sock_destruct(struct sock *sk)
|
||||
@ -405,7 +465,8 @@ static void aead_sock_destruct(struct sock *sk)
|
||||
struct af_alg_ctx *ctx = ask->private;
|
||||
struct sock *psk = ask->parent;
|
||||
struct alg_sock *pask = alg_sk(psk);
|
||||
struct crypto_aead *tfm = pask->private;
|
||||
struct aead_tfm *aeadc = pask->private;
|
||||
struct crypto_aead *tfm = aeadc->aead;
|
||||
unsigned int ivlen = crypto_aead_ivsize(tfm);
|
||||
|
||||
af_alg_pull_tsgl(sk, ctx->used, NULL);
|
||||
@ -418,9 +479,10 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
|
||||
{
|
||||
struct af_alg_ctx *ctx;
|
||||
struct alg_sock *ask = alg_sk(sk);
|
||||
struct crypto_aead *tfm = private;
|
||||
struct aead_tfm *tfm = private;
|
||||
struct crypto_aead *aead = tfm->aead;
|
||||
unsigned int len = sizeof(*ctx);
|
||||
unsigned int ivlen = crypto_aead_ivsize(tfm);
|
||||
unsigned int ivlen = crypto_aead_ivsize(aead);
|
||||
|
||||
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
|
||||
if (!ctx)
|
||||
@ -447,9 +509,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
|
||||
|
||||
static int aead_accept_parent(void *private, struct sock *sk)
|
||||
{
|
||||
struct crypto_aead *tfm = private;
|
||||
struct aead_tfm *tfm = private;
|
||||
|
||||
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
|
||||
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
|
||||
return -ENOKEY;
|
||||
|
||||
return aead_accept_parent_nokey(private, sk);
|
||||
|
||||
@ -82,8 +82,14 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
|
||||
* If more buffers are to be expected to be processed, process only
|
||||
* full block size buffers.
|
||||
*/
|
||||
if (ctx->more || len < ctx->used)
|
||||
if (ctx->more || len < ctx->used) {
|
||||
if (len < bs) {
|
||||
err = -EINVAL;
|
||||
goto free;
|
||||
}
|
||||
|
||||
len -= len % bs;
|
||||
}
|
||||
|
||||
/*
|
||||
* Create a per request TX SGL for this request which tracks the
|
||||
|
||||
@ -9,6 +9,7 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
@ -27,6 +28,7 @@ struct authenc_instance_ctx {
|
||||
struct crypto_authenc_ctx {
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_sync_skcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_request_ctx {
|
||||
@ -172,6 +174,21 @@ out:
|
||||
authenc_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_copy_assoc(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
|
||||
skcipher_request_set_sync_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
|
||||
NULL);
|
||||
|
||||
return crypto_skcipher_encrypt(skreq);
|
||||
}
|
||||
|
||||
static int crypto_authenc_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
|
||||
@ -190,7 +207,10 @@ static int crypto_authenc_encrypt(struct aead_request *req)
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
memcpy_sglist(req->dst, req->src, req->assoclen);
|
||||
err = crypto_authenc_copy_assoc(req);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
|
||||
}
|
||||
|
||||
@ -291,6 +311,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_sync_skcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
@ -302,8 +323,14 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||
if (IS_ERR(enc))
|
||||
goto err_free_ahash;
|
||||
|
||||
null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(null);
|
||||
if (IS_ERR(null))
|
||||
goto err_free_skcipher;
|
||||
|
||||
ctx->auth = auth;
|
||||
ctx->enc = enc;
|
||||
ctx->null = null;
|
||||
|
||||
crypto_aead_set_reqsize(
|
||||
tfm,
|
||||
@ -317,6 +344,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_skcipher:
|
||||
crypto_free_skcipher(enc);
|
||||
err_free_ahash:
|
||||
crypto_free_ahash(auth);
|
||||
return err;
|
||||
@ -328,6 +357,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
|
||||
|
||||
crypto_free_ahash(ctx->auth);
|
||||
crypto_free_skcipher(ctx->enc);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static void crypto_authenc_free(struct aead_instance *inst)
|
||||
|
||||
@ -12,6 +12,7 @@
|
||||
#include <crypto/internal/hash.h>
|
||||
#include <crypto/internal/skcipher.h>
|
||||
#include <crypto/authenc.h>
|
||||
#include <crypto/null.h>
|
||||
#include <crypto/scatterwalk.h>
|
||||
#include <linux/err.h>
|
||||
#include <linux/init.h>
|
||||
@ -30,6 +31,7 @@ struct crypto_authenc_esn_ctx {
|
||||
unsigned int reqoff;
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_sync_skcipher *null;
|
||||
};
|
||||
|
||||
struct authenc_esn_request_ctx {
|
||||
@ -162,6 +164,28 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
|
||||
authenc_esn_request_complete(areq, err);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_copy_sg(struct aead_request *req,
|
||||
struct scatterlist *src,
|
||||
struct scatterlist *dst,
|
||||
unsigned int len)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
|
||||
SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
|
||||
|
||||
skcipher_request_set_sync_tfm(skreq, ctx->null);
|
||||
skcipher_request_set_callback(skreq, aead_request_flags(req),
|
||||
NULL, NULL);
|
||||
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
|
||||
|
||||
return crypto_skcipher_encrypt(skreq);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
|
||||
{
|
||||
return crypto_authenc_esn_copy_sg(req, req->src, req->dst, len);
|
||||
}
|
||||
|
||||
static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||
{
|
||||
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
|
||||
@ -183,7 +207,10 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
|
||||
dst = src;
|
||||
|
||||
if (req->src != req->dst) {
|
||||
memcpy_sglist(req->dst, req->src, assoclen);
|
||||
err = crypto_authenc_esn_copy(req, assoclen);
|
||||
if (err)
|
||||
return err;
|
||||
|
||||
sg_init_table(areq_ctx->dst, 2);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
|
||||
}
|
||||
@ -218,6 +245,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
||||
struct scatterlist *dst = req->dst;
|
||||
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
|
||||
u32 tmp[2];
|
||||
int err;
|
||||
|
||||
if (!authsize)
|
||||
goto decrypt;
|
||||
@ -227,8 +255,11 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
|
||||
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
|
||||
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
|
||||
} else
|
||||
memcpy_sglist(dst, src, assoclen);
|
||||
} else {
|
||||
err = crypto_authenc_esn_copy(req, assoclen);
|
||||
if (err)
|
||||
return err;
|
||||
}
|
||||
|
||||
if (crypto_memneq(ihash, ohash, authsize))
|
||||
return -EBADMSG;
|
||||
@ -298,7 +329,10 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
|
||||
|
||||
src = scatterwalk_ffwd(areq_ctx->src, src, 8);
|
||||
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
|
||||
memcpy_sglist(dst, src, assoclen + cryptlen - 8);
|
||||
err = crypto_authenc_esn_copy_sg(req, src, dst,
|
||||
assoclen + cryptlen - 8);
|
||||
if (err)
|
||||
return err;
|
||||
dst = req->dst;
|
||||
}
|
||||
|
||||
@ -322,6 +356,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
|
||||
struct crypto_ahash *auth;
|
||||
struct crypto_skcipher *enc;
|
||||
struct crypto_sync_skcipher *null;
|
||||
int err;
|
||||
|
||||
auth = crypto_spawn_ahash(&ictx->auth);
|
||||
@ -333,8 +368,14 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||
if (IS_ERR(enc))
|
||||
goto err_free_ahash;
|
||||
|
||||
null = crypto_get_default_null_skcipher();
|
||||
err = PTR_ERR(null);
|
||||
if (IS_ERR(null))
|
||||
goto err_free_skcipher;
|
||||
|
||||
ctx->auth = auth;
|
||||
ctx->enc = enc;
|
||||
ctx->null = null;
|
||||
|
||||
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
|
||||
crypto_ahash_alignmask(auth) + 1);
|
||||
@ -351,6 +392,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
|
||||
|
||||
return 0;
|
||||
|
||||
err_free_skcipher:
|
||||
crypto_free_skcipher(enc);
|
||||
err_free_ahash:
|
||||
crypto_free_ahash(auth);
|
||||
return err;
|
||||
@ -362,6 +405,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
|
||||
|
||||
crypto_free_ahash(ctx->auth);
|
||||
crypto_free_skcipher(ctx->enc);
|
||||
crypto_put_default_null_skcipher();
|
||||
}
|
||||
|
||||
static void crypto_authenc_esn_free(struct aead_instance *inst)
|
||||
@ -400,6 +444,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
|
||||
auth = crypto_spawn_ahash_alg(&ctx->auth);
|
||||
auth_base = &auth->base;
|
||||
|
||||
if (auth->digestsize > 0 && auth->digestsize < 4) {
|
||||
err = -EINVAL;
|
||||
goto err_free_inst;
|
||||
}
|
||||
|
||||
err = crypto_grab_skcipher(&ctx->enc, aead_crypto_instance(inst),
|
||||
crypto_attr_alg_name(tb[2]), 0, mask);
|
||||
if (err)
|
||||
|
||||
@ -69,100 +69,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
|
||||
|
||||
/**
|
||||
* memcpy_sglist() - Copy data from one scatterlist to another
|
||||
* @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
|
||||
* @src: The source scatterlist. Can be NULL if @nbytes == 0.
|
||||
* @nbytes: Number of bytes to copy
|
||||
*
|
||||
* The scatterlists can describe exactly the same memory, in which case this
|
||||
* function is a no-op. No other overlaps are supported.
|
||||
*
|
||||
* Context: Any context
|
||||
*/
|
||||
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int src_offset, dst_offset;
|
||||
|
||||
if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
|
||||
return;
|
||||
|
||||
src_offset = src->offset;
|
||||
dst_offset = dst->offset;
|
||||
for (;;) {
|
||||
/* Compute the length to copy this step. */
|
||||
unsigned int len = min3(src->offset + src->length - src_offset,
|
||||
dst->offset + dst->length - dst_offset,
|
||||
nbytes);
|
||||
struct page *src_page = sg_page(src);
|
||||
struct page *dst_page = sg_page(dst);
|
||||
const void *src_virt;
|
||||
void *dst_virt;
|
||||
|
||||
if (IS_ENABLED(CONFIG_HIGHMEM)) {
|
||||
/* HIGHMEM: we may have to actually map the pages. */
|
||||
const unsigned int src_oip = offset_in_page(src_offset);
|
||||
const unsigned int dst_oip = offset_in_page(dst_offset);
|
||||
const unsigned int limit = PAGE_SIZE;
|
||||
|
||||
/* Further limit len to not cross a page boundary. */
|
||||
len = min3(len, limit - src_oip, limit - dst_oip);
|
||||
|
||||
/* Compute the source and destination pages. */
|
||||
src_page += src_offset / PAGE_SIZE;
|
||||
dst_page += dst_offset / PAGE_SIZE;
|
||||
|
||||
if (src_page != dst_page) {
|
||||
/* Copy between different pages. */
|
||||
memcpy_page(dst_page, dst_oip,
|
||||
src_page, src_oip, len);
|
||||
flush_dcache_page(dst_page);
|
||||
} else if (src_oip != dst_oip) {
|
||||
/* Copy between different parts of same page. */
|
||||
dst_virt = kmap_local_page(dst_page);
|
||||
memcpy(dst_virt + dst_oip, dst_virt + src_oip,
|
||||
len);
|
||||
kunmap_local(dst_virt);
|
||||
flush_dcache_page(dst_page);
|
||||
} /* Else, it's the same memory. No action needed. */
|
||||
} else {
|
||||
/*
|
||||
* !HIGHMEM: no mapping needed. Just work in the linear
|
||||
* buffer of each sg entry. Note that we can cross page
|
||||
* boundaries, as they are not significant in this case.
|
||||
*/
|
||||
src_virt = page_address(src_page) + src_offset;
|
||||
dst_virt = page_address(dst_page) + dst_offset;
|
||||
if (src_virt != dst_virt) {
|
||||
memcpy(dst_virt, src_virt, len);
|
||||
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
|
||||
__scatterwalk_flush_dcache_pages(
|
||||
dst_page, dst_offset, len);
|
||||
} /* Else, it's the same memory. No action needed. */
|
||||
}
|
||||
nbytes -= len;
|
||||
if (nbytes == 0) /* No more to copy? */
|
||||
break;
|
||||
|
||||
/*
|
||||
* There's more to copy. Advance the offsets by the length
|
||||
* copied this step, and advance the sg entries as needed.
|
||||
*/
|
||||
src_offset += len;
|
||||
if (src_offset >= src->offset + src->length) {
|
||||
src = sg_next(src);
|
||||
src_offset = src->offset;
|
||||
}
|
||||
dst_offset += len;
|
||||
if (dst_offset >= dst->offset + dst->length) {
|
||||
dst = sg_next(dst);
|
||||
dst_offset = dst->offset;
|
||||
}
|
||||
}
|
||||
}
|
||||
EXPORT_SYMBOL_GPL(memcpy_sglist);
|
||||
|
||||
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
|
||||
struct scatterlist *src,
|
||||
unsigned int len)
|
||||
|
||||
@ -303,9 +303,10 @@ struct i801_priv {
|
||||
|
||||
/*
|
||||
* If set to true the host controller registers are reserved for
|
||||
* ACPI AML use.
|
||||
* ACPI AML use. Needs extra protection by acpi_lock.
|
||||
*/
|
||||
bool acpi_reserved;
|
||||
struct mutex acpi_lock;
|
||||
};
|
||||
|
||||
#define FEATURE_SMBUS_PEC BIT(0)
|
||||
@ -893,8 +894,11 @@ static s32 i801_access(struct i2c_adapter *adap, u16 addr,
|
||||
int hwpec, ret;
|
||||
struct i801_priv *priv = i2c_get_adapdata(adap);
|
||||
|
||||
if (priv->acpi_reserved)
|
||||
mutex_lock(&priv->acpi_lock);
|
||||
if (priv->acpi_reserved) {
|
||||
mutex_unlock(&priv->acpi_lock);
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pm_runtime_get_sync(&priv->pci_dev->dev);
|
||||
|
||||
@ -935,6 +939,7 @@ out:
|
||||
|
||||
pm_runtime_mark_last_busy(&priv->pci_dev->dev);
|
||||
pm_runtime_put_autosuspend(&priv->pci_dev->dev);
|
||||
mutex_unlock(&priv->acpi_lock);
|
||||
return ret;
|
||||
}
|
||||
|
||||
@ -1462,7 +1467,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
|
||||
* further access from the driver itself. This device is now owned
|
||||
* by the system firmware.
|
||||
*/
|
||||
i2c_lock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
|
||||
mutex_lock(&priv->acpi_lock);
|
||||
|
||||
if (!priv->acpi_reserved && i801_acpi_is_smbus_ioport(priv, address)) {
|
||||
priv->acpi_reserved = true;
|
||||
@ -1482,7 +1487,7 @@ i801_acpi_io_handler(u32 function, acpi_physical_address address, u32 bits,
|
||||
else
|
||||
status = acpi_os_write_port(address, (u32)*value, bits);
|
||||
|
||||
i2c_unlock_bus(&priv->adapter, I2C_LOCK_SEGMENT);
|
||||
mutex_unlock(&priv->acpi_lock);
|
||||
|
||||
return status;
|
||||
}
|
||||
@ -1542,6 +1547,7 @@ static int i801_probe(struct pci_dev *dev, const struct pci_device_id *id)
|
||||
priv->adapter.dev.parent = &dev->dev;
|
||||
acpi_use_parent_companion(&priv->adapter.dev);
|
||||
priv->adapter.retries = 3;
|
||||
mutex_init(&priv->acpi_lock);
|
||||
|
||||
priv->pci_dev = dev;
|
||||
priv->features = id->driver_data;
|
||||
|
||||
@ -1225,7 +1225,7 @@ static void alloc_behind_master_bio(struct r1bio *r1_bio,
|
||||
int i = 0;
|
||||
struct bio *behind_bio = NULL;
|
||||
|
||||
behind_bio = bio_alloc_bioset(NULL, vcnt, 0, GFP_NOIO,
|
||||
behind_bio = bio_alloc_bioset(NULL, vcnt, bio->bi_opf, GFP_NOIO,
|
||||
&r1_bio->mddev->bio_set);
|
||||
|
||||
/* discard op, we don't support writezero/writesame yet */
|
||||
|
||||
@ -573,6 +573,10 @@ struct i40e_pf {
|
||||
struct i40e_vf *vf;
|
||||
int num_alloc_vfs; /* actual number of VFs allocated */
|
||||
u32 vf_aq_requests;
|
||||
/* If set to non-zero, the device uses this value
|
||||
* as maximum number of MAC filters per VF.
|
||||
*/
|
||||
u32 max_mac_per_vf;
|
||||
u32 arq_overflows; /* Not fatal, possibly indicative of problems */
|
||||
struct ratelimit_state mdd_message_rate_limit;
|
||||
/* DCBx/DCBNL capability for PF that indicates
|
||||
@ -1277,7 +1281,8 @@ struct i40e_mac_filter *i40e_add_mac_filter(struct i40e_vsi *vsi,
|
||||
const u8 *macaddr);
|
||||
int i40e_del_mac_filter(struct i40e_vsi *vsi, const u8 *macaddr);
|
||||
bool i40e_is_vsi_in_vlan(struct i40e_vsi *vsi);
|
||||
int i40e_count_filters(struct i40e_vsi *vsi);
|
||||
int i40e_count_all_filters(struct i40e_vsi *vsi);
|
||||
int i40e_count_active_filters(struct i40e_vsi *vsi);
|
||||
struct i40e_mac_filter *i40e_find_mac(struct i40e_vsi *vsi, const u8 *macaddr);
|
||||
void i40e_vlan_stripping_enable(struct i40e_vsi *vsi);
|
||||
static inline bool i40e_is_sw_dcb(struct i40e_pf *pf)
|
||||
|
||||
@ -9,6 +9,41 @@
|
||||
static int rh_phys_port_name;
|
||||
module_param(rh_phys_port_name, int, 0644);
|
||||
|
||||
static int i40e_max_mac_per_vf_set(struct devlink *devlink,
|
||||
u32 id,
|
||||
struct devlink_param_gset_ctx *ctx,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
struct i40e_pf *pf = devlink_priv(devlink);
|
||||
|
||||
if (pf->num_alloc_vfs > 0) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Cannot change max_mac_per_vf while SR-IOV is enabled");
|
||||
return -EBUSY;
|
||||
}
|
||||
|
||||
pf->max_mac_per_vf = ctx->val.vu32;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static int i40e_max_mac_per_vf_get(struct devlink *devlink,
|
||||
u32 id,
|
||||
struct devlink_param_gset_ctx *ctx)
|
||||
{
|
||||
struct i40e_pf *pf = devlink_priv(devlink);
|
||||
|
||||
ctx->val.vu32 = pf->max_mac_per_vf;
|
||||
return 0;
|
||||
}
|
||||
|
||||
static const struct devlink_param i40e_dl_params[] = {
|
||||
DEVLINK_PARAM_GENERIC(MAX_MAC_PER_VF,
|
||||
BIT(DEVLINK_PARAM_CMODE_RUNTIME),
|
||||
i40e_max_mac_per_vf_get,
|
||||
i40e_max_mac_per_vf_set,
|
||||
NULL),
|
||||
};
|
||||
|
||||
static void i40e_info_get_dsn(struct i40e_pf *pf, char *buf, size_t len)
|
||||
{
|
||||
u8 dsn[8];
|
||||
@ -169,7 +204,18 @@ void i40e_free_pf(struct i40e_pf *pf)
|
||||
**/
|
||||
void i40e_devlink_register(struct i40e_pf *pf)
|
||||
{
|
||||
devlink_register(priv_to_devlink(pf));
|
||||
struct devlink *dl = priv_to_devlink(pf);
|
||||
struct device *dev = &pf->pdev->dev;
|
||||
int err;
|
||||
|
||||
err = devlink_params_register(dl, i40e_dl_params,
|
||||
ARRAY_SIZE(i40e_dl_params));
|
||||
if (err)
|
||||
dev_err(dev,
|
||||
"devlink params register failed with error %d", err);
|
||||
|
||||
devlink_register(dl);
|
||||
|
||||
}
|
||||
|
||||
/**
|
||||
@ -180,7 +226,11 @@ void i40e_devlink_register(struct i40e_pf *pf)
|
||||
**/
|
||||
void i40e_devlink_unregister(struct i40e_pf *pf)
|
||||
{
|
||||
devlink_unregister(priv_to_devlink(pf));
|
||||
struct devlink *dl = priv_to_devlink(pf);
|
||||
|
||||
devlink_unregister(dl);
|
||||
devlink_params_unregister(dl, i40e_dl_params,
|
||||
ARRAY_SIZE(i40e_dl_params));
|
||||
}
|
||||
|
||||
/**
|
||||
|
||||
@ -1241,12 +1241,30 @@ void i40e_update_stats(struct i40e_vsi *vsi)
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_count_filters - counts VSI mac filters
|
||||
* i40e_count_all_filters - counts VSI MAC filters
|
||||
* @vsi: the VSI to be searched
|
||||
*
|
||||
* Returns count of mac filters
|
||||
**/
|
||||
int i40e_count_filters(struct i40e_vsi *vsi)
|
||||
* Return: count of MAC filters in any state.
|
||||
*/
|
||||
int i40e_count_all_filters(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_mac_filter *f;
|
||||
struct hlist_node *h;
|
||||
int bkt, cnt = 0;
|
||||
|
||||
hash_for_each_safe(vsi->mac_filter_hash, bkt, h, f, hlist)
|
||||
cnt++;
|
||||
|
||||
return cnt;
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_count_active_filters - counts VSI MAC filters
|
||||
* @vsi: the VSI to be searched
|
||||
*
|
||||
* Return: count of active MAC filters.
|
||||
*/
|
||||
int i40e_count_active_filters(struct i40e_vsi *vsi)
|
||||
{
|
||||
struct i40e_mac_filter *f;
|
||||
struct hlist_node *h;
|
||||
|
||||
@ -2865,24 +2865,6 @@ error_param:
|
||||
(u8 *)&stats, sizeof(stats));
|
||||
}
|
||||
|
||||
/**
|
||||
* i40e_can_vf_change_mac
|
||||
* @vf: pointer to the VF info
|
||||
*
|
||||
* Return true if the VF is allowed to change its MAC filters, false otherwise
|
||||
*/
|
||||
static bool i40e_can_vf_change_mac(struct i40e_vf *vf)
|
||||
{
|
||||
/* If the VF MAC address has been set administratively (via the
|
||||
* ndo_set_vf_mac command), then deny permission to the VF to
|
||||
* add/delete unicast MAC addresses, unless the VF is trusted
|
||||
*/
|
||||
if (vf->pf_set_mac && !vf->trusted)
|
||||
return false;
|
||||
|
||||
return true;
|
||||
}
|
||||
|
||||
#define I40E_MAX_MACVLAN_PER_HW 3072
|
||||
#define I40E_MAX_MACVLAN_PER_PF(num_ports) (I40E_MAX_MACVLAN_PER_HW / \
|
||||
(num_ports))
|
||||
@ -2921,8 +2903,10 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
struct i40e_pf *pf = vf->pf;
|
||||
struct i40e_vsi *vsi = pf->vsi[vf->lan_vsi_idx];
|
||||
struct i40e_hw *hw = &pf->hw;
|
||||
int mac2add_cnt = 0;
|
||||
int i;
|
||||
int i, mac_add_max, mac_add_cnt = 0;
|
||||
bool vf_trusted;
|
||||
|
||||
vf_trusted = test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps);
|
||||
|
||||
for (i = 0; i < al->num_elements; i++) {
|
||||
struct i40e_mac_filter *f;
|
||||
@ -2942,9 +2926,8 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
* The VF may request to set the MAC address filter already
|
||||
* assigned to it so do not return an error in that case.
|
||||
*/
|
||||
if (!i40e_can_vf_change_mac(vf) &&
|
||||
!is_multicast_ether_addr(addr) &&
|
||||
!ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
if (!vf_trusted && !is_multicast_ether_addr(addr) &&
|
||||
vf->pf_set_mac && !ether_addr_equal(addr, vf->default_lan_addr.addr)) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"VF attempting to override administratively set MAC address, bring down and up the VF interface to resume normal operation\n");
|
||||
return -EPERM;
|
||||
@ -2953,31 +2936,50 @@ static inline int i40e_check_vf_permission(struct i40e_vf *vf,
|
||||
/*count filters that really will be added*/
|
||||
f = i40e_find_mac(vsi, addr);
|
||||
if (!f)
|
||||
++mac2add_cnt;
|
||||
++mac_add_cnt;
|
||||
}
|
||||
|
||||
/* If this VF is not privileged, then we can't add more than a limited
|
||||
* number of addresses. Check to make sure that the additions do not
|
||||
* push us over the limit.
|
||||
/* Determine the maximum number of MAC addresses this VF may use.
|
||||
*
|
||||
* - For untrusted VFs: use a fixed small limit.
|
||||
*
|
||||
* - For trusted VFs: limit is calculated by dividing total MAC
|
||||
* filter pool across all VFs/ports.
|
||||
*
|
||||
* - User can override this by devlink param "max_mac_per_vf".
|
||||
* If set its value is used as a strict cap for both trusted and
|
||||
* untrusted VFs.
|
||||
* Note:
|
||||
* even when overridden, this is a theoretical maximum; hardware
|
||||
* may reject additional MACs if the absolute HW limit is reached.
|
||||
*/
|
||||
if (!test_bit(I40E_VIRTCHNL_VF_CAP_PRIVILEGE, &vf->vf_caps)) {
|
||||
if ((i40e_count_filters(vsi) + mac2add_cnt) >
|
||||
I40E_VC_MAX_MAC_ADDR_PER_VF) {
|
||||
if (!vf_trusted)
|
||||
mac_add_max = I40E_VC_MAX_MAC_ADDR_PER_VF;
|
||||
else
|
||||
mac_add_max = I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs, hw->num_ports);
|
||||
|
||||
if (pf->max_mac_per_vf > 0)
|
||||
mac_add_max = pf->max_mac_per_vf;
|
||||
|
||||
/* VF can replace all its filters in one step, in this case mac_add_max
|
||||
* will be added as active and another mac_add_max will be in
|
||||
* a to-be-removed state. Account for that.
|
||||
*/
|
||||
if ((i40e_count_active_filters(vsi) + mac_add_cnt) > mac_add_max ||
|
||||
(i40e_count_all_filters(vsi) + mac_add_cnt) > 2 * mac_add_max) {
|
||||
if (pf->max_mac_per_vf == mac_add_max && mac_add_max > 0) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses: VF reached its maximum allowed limit (%d)\n",
|
||||
mac_add_max);
|
||||
return -EPERM;
|
||||
}
|
||||
if (!vf_trusted) {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses, VF is not trusted, switch the VF to trusted to add more functionality\n");
|
||||
return -EPERM;
|
||||
}
|
||||
/* If this VF is trusted, it can use more resources than untrusted.
|
||||
* However to ensure that every trusted VF has appropriate number of
|
||||
* resources, divide whole pool of resources per port and then across
|
||||
* all VFs.
|
||||
*/
|
||||
} else {
|
||||
if ((i40e_count_filters(vsi) + mac2add_cnt) >
|
||||
I40E_VC_MAX_MACVLAN_PER_TRUSTED_VF(pf->num_alloc_vfs,
|
||||
hw->num_ports)) {
|
||||
} else {
|
||||
dev_err(&pf->pdev->dev,
|
||||
"Cannot add more MAC addresses, trusted VF exhausted it's resources\n");
|
||||
"Cannot add more MAC addresses: trusted VF reached its maximum allowed limit (%d)\n",
|
||||
mac_add_max);
|
||||
return -EPERM;
|
||||
}
|
||||
}
|
||||
|
||||
@ -3437,7 +3437,7 @@ void tb_sw_set_unplugged(struct tb_switch *sw)
|
||||
}
|
||||
}
|
||||
|
||||
static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
|
||||
static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
|
||||
{
|
||||
if (flags)
|
||||
tb_sw_dbg(sw, "enabling wakeup: %#x\n", flags);
|
||||
@ -3445,7 +3445,7 @@ static int tb_switch_set_wake(struct tb_switch *sw, unsigned int flags)
|
||||
tb_sw_dbg(sw, "disabling wakeup\n");
|
||||
|
||||
if (tb_switch_is_usb4(sw))
|
||||
return usb4_switch_set_wake(sw, flags);
|
||||
return usb4_switch_set_wake(sw, flags, runtime);
|
||||
return tb_lc_set_wake(sw, flags);
|
||||
}
|
||||
|
||||
@ -3521,7 +3521,7 @@ int tb_switch_resume(struct tb_switch *sw, bool runtime)
|
||||
tb_switch_check_wakes(sw);
|
||||
|
||||
/* Disable wakes */
|
||||
tb_switch_set_wake(sw, 0);
|
||||
tb_switch_set_wake(sw, 0, true);
|
||||
|
||||
err = tb_switch_tmu_init(sw);
|
||||
if (err)
|
||||
@ -3599,10 +3599,11 @@ void tb_switch_suspend(struct tb_switch *sw, bool runtime)
|
||||
flags |= TB_WAKE_ON_USB4;
|
||||
flags |= TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE | TB_WAKE_ON_DP;
|
||||
} else if (device_may_wakeup(&sw->dev)) {
|
||||
flags |= TB_WAKE_ON_CONNECT | TB_WAKE_ON_DISCONNECT;
|
||||
flags |= TB_WAKE_ON_USB4 | TB_WAKE_ON_USB3 | TB_WAKE_ON_PCIE;
|
||||
}
|
||||
|
||||
tb_switch_set_wake(sw, flags);
|
||||
tb_switch_set_wake(sw, flags, runtime);
|
||||
|
||||
if (tb_switch_is_usb4(sw))
|
||||
usb4_switch_set_sleep(sw);
|
||||
|
||||
@ -1304,7 +1304,7 @@ int usb4_switch_read_uid(struct tb_switch *sw, u64 *uid);
|
||||
int usb4_switch_drom_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
size_t size);
|
||||
bool usb4_switch_lane_bonding_possible(struct tb_switch *sw);
|
||||
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags);
|
||||
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime);
|
||||
int usb4_switch_set_sleep(struct tb_switch *sw);
|
||||
int usb4_switch_nvm_sector_size(struct tb_switch *sw);
|
||||
int usb4_switch_nvm_read(struct tb_switch *sw, unsigned int address, void *buf,
|
||||
|
||||
@ -403,12 +403,12 @@ bool usb4_switch_lane_bonding_possible(struct tb_switch *sw)
|
||||
* usb4_switch_set_wake() - Enabled/disable wake
|
||||
* @sw: USB4 router
|
||||
* @flags: Wakeup flags (%0 to disable)
|
||||
* @runtime: Wake is being programmed during system runtime
|
||||
*
|
||||
* Enables/disables router to wake up from sleep.
|
||||
*/
|
||||
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
|
||||
int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags, bool runtime)
|
||||
{
|
||||
struct usb4_port *usb4;
|
||||
struct tb_port *port;
|
||||
u64 route = tb_route(sw);
|
||||
u32 val;
|
||||
@ -438,13 +438,11 @@ int usb4_switch_set_wake(struct tb_switch *sw, unsigned int flags)
|
||||
val |= PORT_CS_19_WOU4;
|
||||
} else {
|
||||
bool configured = val & PORT_CS_19_PC;
|
||||
usb4 = port->usb4;
|
||||
bool wakeup = runtime || device_may_wakeup(&port->usb4->dev);
|
||||
|
||||
if (((flags & TB_WAKE_ON_CONNECT) |
|
||||
device_may_wakeup(&usb4->dev)) && !configured)
|
||||
if ((flags & TB_WAKE_ON_CONNECT) && wakeup && !configured)
|
||||
val |= PORT_CS_19_WOC;
|
||||
if (((flags & TB_WAKE_ON_DISCONNECT) |
|
||||
device_may_wakeup(&usb4->dev)) && configured)
|
||||
if ((flags & TB_WAKE_ON_DISCONNECT) && wakeup && configured)
|
||||
val |= PORT_CS_19_WOD;
|
||||
if ((flags & TB_WAKE_ON_USB4) && configured)
|
||||
val |= PORT_CS_19_WOU4;
|
||||
|
||||
@ -5785,9 +5785,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
|
||||
int len = xdr->buf->len - post_err_offset;
|
||||
|
||||
so->so_replay.rp_status = op->status;
|
||||
so->so_replay.rp_buflen = len;
|
||||
read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
|
||||
if (len <= NFSD4_REPLAY_ISIZE) {
|
||||
so->so_replay.rp_buflen = len;
|
||||
read_bytes_from_xdr_buf(xdr->buf,
|
||||
post_err_offset,
|
||||
so->so_replay.rp_buf, len);
|
||||
} else {
|
||||
so->so_replay.rp_buflen = 0;
|
||||
}
|
||||
}
|
||||
status:
|
||||
*p = op->status;
|
||||
|
||||
@ -472,11 +472,18 @@ struct nfs4_client_reclaim {
|
||||
struct xdr_netobj cr_princhash;
|
||||
};
|
||||
|
||||
/* A reasonable value for REPLAY_ISIZE was estimated as follows:
|
||||
* The OPEN response, typically the largest, requires
|
||||
* 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
|
||||
* 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
|
||||
* 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
|
||||
/*
|
||||
* REPLAY_ISIZE is sized for an OPEN response with delegation:
|
||||
* 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
|
||||
* 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
|
||||
* 4(deleg. recall flag) + 20(deleg. space limit) +
|
||||
* ~32(deleg. ace) = 112 bytes
|
||||
*
|
||||
* Some responses can exceed this. A LOCK denial includes the conflicting
|
||||
* lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
|
||||
* larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
|
||||
* saved. Enlarging this constant increases the size of every
|
||||
* nfs4_stateowner.
|
||||
*/
|
||||
|
||||
#define NFSD4_REPLAY_ISIZE 112
|
||||
|
||||
@ -1960,6 +1960,10 @@ static int match_session(struct cifs_ses *ses,
|
||||
case Kerberos:
|
||||
if (!uid_eq(ctx->cred_uid, ses->cred_uid))
|
||||
return 0;
|
||||
if (strncmp(ses->user_name ?: "",
|
||||
ctx->username ?: "",
|
||||
CIFS_MAX_USERNAME_LEN))
|
||||
return 0;
|
||||
break;
|
||||
case NTLMv2:
|
||||
case RawNTLMSSP:
|
||||
|
||||
@ -89,35 +89,6 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
|
||||
scatterwalk_start(walk, sg_next(walk->sg));
|
||||
}
|
||||
|
||||
/*
|
||||
* Flush the dcache of any pages that overlap the region
|
||||
* [offset, offset + nbytes) relative to base_page.
|
||||
*
|
||||
* This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
|
||||
* that all relevant code (including the call to sg_page() in the caller, if
|
||||
* applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
|
||||
*/
|
||||
static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
|
||||
unsigned int offset,
|
||||
unsigned int nbytes)
|
||||
{
|
||||
unsigned int num_pages;
|
||||
unsigned int i;
|
||||
|
||||
base_page += offset / PAGE_SIZE;
|
||||
offset %= PAGE_SIZE;
|
||||
|
||||
/*
|
||||
* This is an overflow-safe version of
|
||||
* num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
|
||||
*/
|
||||
num_pages = nbytes / PAGE_SIZE;
|
||||
num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
|
||||
|
||||
for (i = 0; i < num_pages; i++)
|
||||
flush_dcache_page(base_page + i);
|
||||
}
|
||||
|
||||
static inline void scatterwalk_done(struct scatter_walk *walk, int out,
|
||||
int more)
|
||||
{
|
||||
@ -130,9 +101,6 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
|
||||
size_t nbytes, int out);
|
||||
void *scatterwalk_map(struct scatter_walk *walk);
|
||||
|
||||
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
|
||||
unsigned int nbytes);
|
||||
|
||||
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
|
||||
unsigned int start, unsigned int nbytes, int out);
|
||||
|
||||
|
||||
@ -68,6 +68,7 @@ struct tc_action {
|
||||
#define TCA_ACT_FLAGS_REPLACE (1U << (TCA_ACT_FLAGS_USER_BITS + 2))
|
||||
#define TCA_ACT_FLAGS_NO_RTNL (1U << (TCA_ACT_FLAGS_USER_BITS + 3))
|
||||
#define TCA_ACT_FLAGS_AT_INGRESS (1U << (TCA_ACT_FLAGS_USER_BITS + 4))
|
||||
#define TCA_ACT_FLAGS_AT_INGRESS_OR_CLSACT (1U << (TCA_ACT_FLAGS_USER_BITS + 5))
|
||||
|
||||
/* Update lastuse only if needed, to avoid dirtying a cache line.
|
||||
* We use a temp variable to avoid fetching jiffies twice.
|
||||
|
||||
@ -556,6 +556,7 @@ enum devlink_param_generic_id {
|
||||
DEVLINK_PARAM_GENERIC_ID_EVENT_EQ_SIZE,
|
||||
DEVLINK_PARAM_GENERIC_ID_ENABLE_PHC,
|
||||
DEVLINK_PARAM_GENERIC_ID_CLOCK_ID,
|
||||
DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF,
|
||||
|
||||
/* add new param generic ids above here*/
|
||||
__DEVLINK_PARAM_GENERIC_ID_MAX,
|
||||
@ -620,6 +621,9 @@ enum devlink_param_generic_id {
|
||||
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME "clock_id"
|
||||
#define DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE DEVLINK_PARAM_TYPE_U64
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME "max_mac_per_vf"
|
||||
#define DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE DEVLINK_PARAM_TYPE_U32
|
||||
|
||||
#define DEVLINK_PARAM_GENERIC(_id, _cmodes, _get, _set, _validate) \
|
||||
{ \
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_##_id, \
|
||||
|
||||
@ -2706,8 +2706,11 @@ void trace_buffered_event_enable(void)
|
||||
for_each_tracing_cpu(cpu) {
|
||||
page = alloc_pages_node(cpu_to_node(cpu),
|
||||
GFP_KERNEL | __GFP_NORETRY, 0);
|
||||
if (!page)
|
||||
goto failed;
|
||||
/* This is just an optimization and can handle failures */
|
||||
if (!page) {
|
||||
pr_err("Failed to allocate event buffer\n");
|
||||
break;
|
||||
}
|
||||
|
||||
event = page_address(page);
|
||||
memset(event, 0, sizeof(*event));
|
||||
@ -2721,10 +2724,6 @@ void trace_buffered_event_enable(void)
|
||||
WARN_ON_ONCE(1);
|
||||
preempt_enable();
|
||||
}
|
||||
|
||||
return;
|
||||
failed:
|
||||
trace_buffered_event_disable();
|
||||
}
|
||||
|
||||
static void enable_trace_buffered_event(void *data)
|
||||
@ -2759,11 +2758,9 @@ void trace_buffered_event_disable(void)
|
||||
if (--trace_buffered_event_ref)
|
||||
return;
|
||||
|
||||
preempt_disable();
|
||||
/* For each CPU, set the buffer as used. */
|
||||
smp_call_function_many(tracing_buffer_mask,
|
||||
disable_trace_buffered_event, NULL, 1);
|
||||
preempt_enable();
|
||||
on_each_cpu_mask(tracing_buffer_mask, disable_trace_buffered_event,
|
||||
NULL, true);
|
||||
|
||||
/* Wait for all current users to finish */
|
||||
synchronize_rcu();
|
||||
@ -2772,17 +2769,19 @@ void trace_buffered_event_disable(void)
|
||||
free_page((unsigned long)per_cpu(trace_buffered_event, cpu));
|
||||
per_cpu(trace_buffered_event, cpu) = NULL;
|
||||
}
|
||||
/*
|
||||
* Make sure trace_buffered_event is NULL before clearing
|
||||
* trace_buffered_event_cnt.
|
||||
*/
|
||||
smp_wmb();
|
||||
|
||||
preempt_disable();
|
||||
/* Do the work on each cpu */
|
||||
smp_call_function_many(tracing_buffer_mask,
|
||||
enable_trace_buffered_event, NULL, 1);
|
||||
preempt_enable();
|
||||
/*
|
||||
* Wait for all CPUs that potentially started checking if they can use
|
||||
* their event buffer only after the previous synchronize_rcu() call and
|
||||
* they still read a valid pointer from trace_buffered_event. It must be
|
||||
* ensured they don't see cleared trace_buffered_event_cnt else they
|
||||
* could wrongly decide to use the pointed-to buffer which is now freed.
|
||||
*/
|
||||
synchronize_rcu();
|
||||
|
||||
/* For each CPU, relinquish the buffer */
|
||||
on_each_cpu_mask(tracing_buffer_mask, enable_trace_buffered_event, NULL,
|
||||
true);
|
||||
}
|
||||
|
||||
static struct trace_buffer *temp_buffer;
|
||||
|
||||
@ -4304,6 +4304,9 @@ static void osd_fault(struct ceph_connection *con)
|
||||
goto out_unlock;
|
||||
}
|
||||
|
||||
osd->o_sparse_op_idx = -1;
|
||||
ceph_init_sparse_read(&osd->o_sparse_read);
|
||||
|
||||
if (!reopen_osd(osd))
|
||||
kick_osd_requests(osd);
|
||||
maybe_request_map(osdc);
|
||||
|
||||
@ -102,6 +102,11 @@ static const struct devlink_param devlink_param_generic[] = {
|
||||
.name = DEVLINK_PARAM_GENERIC_CLOCK_ID_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_CLOCK_ID_TYPE,
|
||||
},
|
||||
{
|
||||
.id = DEVLINK_PARAM_GENERIC_ID_MAX_MAC_PER_VF,
|
||||
.name = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_NAME,
|
||||
.type = DEVLINK_PARAM_GENERIC_MAX_MAC_PER_VF_TYPE,
|
||||
},
|
||||
};
|
||||
|
||||
static int devlink_param_generic_verify(const struct devlink_param *param)
|
||||
|
||||
@ -1358,6 +1358,12 @@ static int tcf_ct_init(struct net *net, struct nlattr *nla,
|
||||
return -EINVAL;
|
||||
}
|
||||
|
||||
if (bind && !(flags & TCA_ACT_FLAGS_AT_INGRESS_OR_CLSACT)) {
|
||||
NL_SET_ERR_MSG_MOD(extack,
|
||||
"Attaching ct to a non ingress/clsact qdisc is unsupported");
|
||||
return -EOPNOTSUPP;
|
||||
}
|
||||
|
||||
err = nla_parse_nested(tb, TCA_CT_MAX, nla, ct_policy, extack);
|
||||
if (err < 0)
|
||||
return err;
|
||||
|
||||
@ -2228,6 +2228,11 @@ static bool is_qdisc_ingress(__u32 classid)
|
||||
return (TC_H_MIN(classid) == TC_H_MIN(TC_H_MIN_INGRESS));
|
||||
}
|
||||
|
||||
static bool is_ingress_or_clsact(struct tcf_block *block, struct Qdisc *q)
|
||||
{
|
||||
return tcf_block_shared(block) || (q && !!(q->flags & TCQ_F_INGRESS));
|
||||
}
|
||||
|
||||
static int tc_new_tfilter(struct sk_buff *skb, struct nlmsghdr *n,
|
||||
struct netlink_ext_ack *extack)
|
||||
{
|
||||
@ -2420,6 +2425,8 @@ replay:
|
||||
flags |= TCA_ACT_FLAGS_NO_RTNL;
|
||||
if (is_qdisc_ingress(parent))
|
||||
flags |= TCA_ACT_FLAGS_AT_INGRESS;
|
||||
if (is_ingress_or_clsact(block, q))
|
||||
flags |= TCA_ACT_FLAGS_AT_INGRESS_OR_CLSACT;
|
||||
err = tp->ops->change(net, skb, tp, cl, t->tcm_handle, tca, &fh,
|
||||
flags, extack);
|
||||
if (err == 0) {
|
||||
|
||||
@ -1,3 +1,43 @@
|
||||
* Sat May 02 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.54.1.el9_7]
|
||||
- crypto: algif_aead - snapshot IV for async AEAD requests (Vladislav Dronov) [RHEL-172201]
|
||||
- crypto: algif_aead - Fix minimum RX size check for decryption (Vladislav Dronov) [RHEL-172201]
|
||||
- crypto: authencesn - reject short ahash digests during instance creation (Vladislav Dronov) [RHEL-172201]
|
||||
- crypto: authencesn - Fix src offset when decrypting in-place (Vladislav Dronov) [RHEL-172201]
|
||||
- crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption (Vladislav Dronov) [RHEL-172201] {CVE-2026-31431}
|
||||
- crypto: authencesn - reject too-short AAD (assoclen<8) to match ESP/ESN spec (Vladislav Dronov) [RHEL-172201] {CVE-2026-23060}
|
||||
- crypto: af_alg - Fix page reassignment overflow in af_alg_pull_tsgl (Vladislav Dronov) [RHEL-172201]
|
||||
- crypto: af_alg - limit RX SG extraction by receive buffer budget (Vladislav Dronov) [RHEL-172201] {CVE-2026-31677}
|
||||
- crypto: algif_aead - Revert to operating out-of-place (Vladislav Dronov) [RHEL-172201] {CVE-2026-31431}
|
||||
- crypto: af-alg - fix NULL pointer dereference in scatterwalk (Vladislav Dronov) [RHEL-172201]
|
||||
Resolves: RHEL-172201
|
||||
|
||||
* Thu Apr 23 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.53.1.el9_7]
|
||||
- tracing: Fix a warning when allocating buffered events fails (CKI KWF BOT) [RHEL-169366]
|
||||
- tracing: Fix a possible race when disabling buffered events (CKI KWF BOT) [RHEL-169366]
|
||||
- tracing: Fix incomplete locking when disabling buffered events (CKI KWF BOT) [RHEL-169366]
|
||||
- thunderbolt: Fix wake on connect at runtime (Desnes Nunes) [RHEL-104807]
|
||||
- thunderbolt: Fix a logic error in wake on connect (Desnes Nunes) [RHEL-104807]
|
||||
- thunderbolt: Use wake on connect and disconnect over suspend (Desnes Nunes) [RHEL-104807]
|
||||
- i2c: i801: Revert "i2c: i801: replace acpi_lock with I2C bus lock" (David Arcari) [RHEL-155311]
|
||||
- net/sched: Only allow act_ct to bind to clsact/ingress qdiscs and shared blocks (CKI Backport Bot) [RHEL-157327] {CVE-2026-23270}
|
||||
Resolves: RHEL-104807, RHEL-155311, RHEL-157327, RHEL-169366
|
||||
|
||||
* Tue Apr 21 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.52.1.el9_7]
|
||||
- libceph: reset sparse-read state in osd_fault() (CKI Backport Bot) [RHEL-150464] {CVE-2026-23136}
|
||||
Resolves: RHEL-150464
|
||||
|
||||
* Thu Apr 16 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.51.1.el9_7]
|
||||
- nfsd: fix heap overflow in NFSv4.0 LOCK replay cache (Scott Mayhew) [RHEL-167016] {CVE-2026-31402}
|
||||
- i40e: support generic devlink param "max_mac_per_vf" (Mohammad Heib) [RHEL-121643]
|
||||
- devlink: Add new "max_mac_per_vf" generic device param (Mohammad Heib) [RHEL-121643]
|
||||
- i40e: improve VF MAC filters accounting (Mohammad Heib) [RHEL-121643]
|
||||
Resolves: RHEL-121643, RHEL-167016
|
||||
|
||||
* Fri Apr 10 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.50.1.el9_7]
|
||||
- smb: client: fix krb5 mount with username option (Paulo Alcantara) [RHEL-158987]
|
||||
- md/raid1: fix data lost for writemostly rdev (Nigel Croxon) [RHEL-143624]
|
||||
Resolves: RHEL-143624, RHEL-158987
|
||||
|
||||
* Tue Apr 07 2026 CKI KWF Bot <cki-ci-bot+kwf-gitlab-com@redhat.com> [5.14.0-611.49.1.el9_7]
|
||||
- rtnetlink: Allocate vfinfo size for VF GUIDs when supported (Kamal Heib) [RHEL-149469] {CVE-2025-22075}
|
||||
Resolves: RHEL-149469
|
||||
|
||||
Loading…
Reference in New Issue
Block a user