Import of kernel-4.18.0-553.123.1.el8_10

This commit is contained in:
almalinux-bot-kernel 2026-05-06 05:11:57 +00:00
parent 085f477512
commit fd24567151
16 changed files with 212 additions and 169 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 553.121.1
RHEL_RELEASE = 553.123.1
#
# ZSTREAM

View File

@ -2643,11 +2643,6 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
pgprintk("%s: spte %llx write_fault %d gfn %llx\n", __func__,
*sptep, write_fault, gfn);
if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
return RET_PF_EMULATE;
}
if (is_shadow_present_pte(*sptep)) {
/*
* If we overwrite a PTE page pointer with a 2MB PMD, unlink
@ -2669,6 +2664,14 @@ static int mmu_set_spte(struct kvm_vcpu *vcpu, struct kvm_memory_slot *slot,
was_rmapped = 1;
}
if (unlikely(is_noslot_pfn(pfn))) {
mark_mmio_spte(vcpu, sptep, gfn, pte_access);
if (flush)
kvm_flush_remote_tlbs_with_address(vcpu->kvm, gfn,
KVM_PAGES_PER_HPAGE(level));
return RET_PF_EMULATE;
}
wrprot = make_spte(vcpu, sp, slot, pte_access, gfn, pfn, *sptep, prefetch,
true, host_writable, &spte);

View File

@ -240,6 +240,7 @@ config CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
This is required for IPSec.
@ -1862,6 +1863,7 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select CRYPTO_NULL
select CRYPTO_USER_API
help
This option enables the user-spaces interface for AEAD

View File

@ -511,8 +511,10 @@ int af_alg_alloc_tsgl(struct sock *sk)
sg_init_table(sgl->sg, MAX_SGL_ENTS + 1);
sgl->cur = 0;
if (sg)
if (sg) {
sg_unmark_end(sg + MAX_SGL_ENTS - 1);
sg_chain(sg, MAX_SGL_ENTS + 1, sgl->sg);
}
list_add_tail(&sgl->list, &ctx->tsgl_list);
}
@ -1114,6 +1116,8 @@ int af_alg_get_rsgl(struct sock *sk, struct msghdr *msg, int flags,
seglen = min_t(size_t, (maxsize - len),
msg_data_left(msg));
/* Never pin more pages than the remaining RX accounting budget. */
seglen = min_t(size_t, seglen, af_alg_rcvbuf(sk));
if (list_empty(&areq->rsgl_list)) {
rsgl = &areq->first_rsgl;

View File

@ -30,6 +30,8 @@
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@ -38,13 +40,19 @@
#include <linux/net.h>
#include <net/sock.h>
struct aead_tfm {
struct crypto_aead *aead;
struct crypto_skcipher *null_tfm;
};
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct crypto_aead *tfm = pask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int as = crypto_aead_authsize(tfm);
/*
@ -60,12 +68,27 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct crypto_aead *tfm = pask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
{
SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@ -74,7 +97,9 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct crypto_aead *tfm = pask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int as = crypto_aead_authsize(tfm);
unsigned int ivsize = crypto_aead_ivsize(tfm);
struct af_alg_async_req *areq;
@ -193,7 +218,10 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Use the RX SGL as source (and destination) for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sg;
memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
ctx->aead_assoclen);
if (err)
goto free;
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
@ -298,7 +326,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct crypto_aead *tfm;
struct aead_tfm *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@ -312,7 +340,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@ -391,22 +419,54 @@ static struct proto_ops algif_aead_ops_nokey = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
return crypto_alloc_aead(name, type, mask);
struct aead_tfm *tfm;
struct crypto_aead *aead;
struct crypto_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
return ERR_CAST(aead);
}
null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
return ERR_CAST(null_tfm);
}
tfm->aead = aead;
tfm->null_tfm = null_tfm;
return tfm;
}
static void aead_release(void *private)
{
crypto_free_aead(private);
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
crypto_put_default_null_skcipher();
kfree(tfm);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
return crypto_aead_setauthsize(private, authsize);
struct aead_tfm *tfm = private;
return crypto_aead_setauthsize(tfm->aead, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
return crypto_aead_setkey(private, key, keylen);
struct aead_tfm *tfm = private;
return crypto_aead_setkey(tfm->aead, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@ -415,7 +475,8 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct crypto_aead *tfm = pask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL);
@ -428,9 +489,10 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct crypto_aead *tfm = private;
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
unsigned int len = sizeof(*ctx);
unsigned int ivlen = crypto_aead_ivsize(tfm);
unsigned int ivlen = crypto_aead_ivsize(aead);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@ -463,9 +525,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
struct crypto_aead *tfm = private;
struct aead_tfm *tfm = private;
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);

View File

@ -90,8 +90,14 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
* If more buffers are to be expected to be processed, process only
* full block size buffers.
*/
if (ctx->more || len < ctx->used)
if (ctx->more || len < ctx->used) {
if (len < bs) {
err = -EINVAL;
goto free;
}
len -= len % bs;
}
/*
* Create a per request TX SGL for this request which tracks the

View File

@ -15,6 +15,7 @@
#include <crypto/public_key.h>
#include <linux/seq_file.h>
#include <linux/module.h>
#include <linux/overflow.h>
#include <linux/slab.h>
#include <linux/ctype.h>
#include <keys/system_keyring.h>
@ -141,12 +142,17 @@ struct asymmetric_key_id *asymmetric_key_generate_id(const void *val_1,
size_t len_2)
{
struct asymmetric_key_id *kid;
size_t kid_sz;
size_t len;
kid = kmalloc(sizeof(struct asymmetric_key_id) + len_1 + len_2,
GFP_KERNEL);
if (check_add_overflow(len_1, len_2, &len))
return ERR_PTR(-EOVERFLOW);
if (check_add_overflow(sizeof(struct asymmetric_key_id), len, &kid_sz))
return ERR_PTR(-EOVERFLOW);
kid = kmalloc(kid_sz, GFP_KERNEL);
if (!kid)
return ERR_PTR(-ENOMEM);
kid->len = len_1 + len_2;
kid->len = len;
memcpy(kid->data, val_1, len_1);
memcpy(kid->data + len_1, val_2, len_2);
return kid;

View File

@ -14,6 +14,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@ -32,6 +33,7 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
};
struct authenc_request_ctx {
@ -187,6 +189,21 @@ out:
authenc_request_complete(areq, err);
}
static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@ -205,7 +222,10 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
memcpy_sglist(req->dst, req->src, req->assoclen);
err = crypto_authenc_copy_assoc(req);
if (err)
return err;
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@ -306,6 +326,7 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@ -317,8 +338,14 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@ -332,6 +359,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@ -343,6 +372,7 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)

View File

@ -17,6 +17,7 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@ -35,6 +36,7 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
};
struct authenc_esn_request_ctx {
@ -177,6 +179,28 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_copy_sg(struct aead_request *req,
struct scatterlist *src,
struct scatterlist *dst,
unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
return crypto_authenc_esn_copy_sg(req, req->src, req->dst, len);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@ -198,7 +222,10 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
memcpy_sglist(req->dst, req->src, assoclen);
err = crypto_authenc_esn_copy(req, assoclen);
if (err)
return err;
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@ -233,6 +260,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
struct scatterlist *dst = req->dst;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
int err;
if (!authsize)
goto decrypt;
@ -242,8 +270,11 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
} else
memcpy_sglist(dst, src, assoclen);
} else {
err = crypto_authenc_esn_copy(req, assoclen);
if (err)
return err;
}
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
@ -313,7 +344,10 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
src = scatterwalk_ffwd(areq_ctx->src, src, 8);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
memcpy_sglist(dst, src, assoclen + cryptlen - 8);
err = crypto_authenc_esn_copy_sg(req, src, dst,
assoclen + cryptlen - 8);
if (err)
return err;
dst = req->dst;
}
@ -337,6 +371,7 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@ -348,8 +383,14 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
crypto_ahash_alignmask(auth) + 1);
@ -366,6 +407,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@ -377,6 +420,7 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)
@ -415,6 +459,11 @@ static int crypto_authenc_esn_create(struct crypto_template *tmpl,
auth_base = &auth->base;
if (auth->digestsize > 0 && auth->digestsize < 4) {
err = -EINVAL;
goto out_put_auth;
}
enc_name = crypto_attr_alg_name(tb[2]);
err = PTR_ERR(enc_name);
if (IS_ERR(enc_name))

View File

@ -74,104 +74,6 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
/**
* memcpy_sglist() - Copy data from one scatterlist to another
* @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
* @src: The source scatterlist. Can be NULL if @nbytes == 0.
* @nbytes: Number of bytes to copy
*
* The scatterlists can describe exactly the same memory, in which case this
* function is a no-op. No other overlaps are supported.
*
* Context: Any context
*/
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
unsigned int src_offset, dst_offset;
if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
return;
src_offset = src->offset;
dst_offset = dst->offset;
for (;;) {
/* Compute the length to copy this step. */
unsigned int len = min3(src->offset + src->length - src_offset,
dst->offset + dst->length - dst_offset,
nbytes);
struct page *src_page = sg_page(src);
struct page *dst_page = sg_page(dst);
const void *src_virt;
void *dst_virt;
if (IS_ENABLED(CONFIG_HIGHMEM)) {
/* HIGHMEM: we may have to actually map the pages. */
const unsigned int src_oip = offset_in_page(src_offset);
const unsigned int dst_oip = offset_in_page(dst_offset);
const unsigned int limit = PAGE_SIZE;
/* Further limit len to not cross a page boundary. */
len = min3(len, limit - src_oip, limit - dst_oip);
/* Compute the source and destination pages. */
src_page += src_offset / PAGE_SIZE;
dst_page += dst_offset / PAGE_SIZE;
if (src_page != dst_page) {
/* Copy between different pages. */
dst_virt = kmap_atomic(dst_page);
src_virt = kmap_atomic(src_page);
memcpy(dst_virt + dst_oip, src_virt + src_oip,
len);
kunmap_atomic((void *)src_virt);
kunmap_atomic(dst_virt);
flush_dcache_page(dst_page);
} else if (src_oip != dst_oip) {
/* Copy between different parts of same page. */
dst_virt = kmap_atomic(dst_page);
memcpy(dst_virt + dst_oip, dst_virt + src_oip,
len);
kunmap_atomic(dst_virt);
flush_dcache_page(dst_page);
} /* Else, it's the same memory. No action needed. */
} else {
/*
* !HIGHMEM: no mapping needed. Just work in the linear
* buffer of each sg entry. Note that we can cross page
* boundaries, as they are not significant in this case.
*/
src_virt = page_address(src_page) + src_offset;
dst_virt = page_address(dst_page) + dst_offset;
if (src_virt != dst_virt) {
memcpy(dst_virt, src_virt, len);
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
__scatterwalk_flush_dcache_pages(
dst_page, dst_offset, len);
} /* Else, it's the same memory. No action needed. */
}
nbytes -= len;
if (nbytes == 0) /* No more to copy? */
break;
/*
* There's more to copy. Advance the offsets by the length
* copied this step, and advance the sg entries as needed.
*/
src_offset += len;
if (src_offset >= src->offset + src->length) {
src = sg_next(src);
src_offset = src->offset;
}
dst_offset += len;
if (dst_offset >= dst->offset + dst->length) {
dst = sg_next(dst);
dst_offset = dst->offset;
}
}
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)

View File

@ -4,6 +4,7 @@
* Copyright (c) 2019-2020 Marvell International Ltd.
*/
#include <linux/array_size.h>
#include <linux/netdevice.h>
#include <linux/etherdevice.h>
#include <linux/skbuff.h>
@ -959,7 +960,7 @@ static inline void qede_tpa_cont(struct qede_dev *edev,
{
int i;
for (i = 0; cqe->len_list[i]; i++)
for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
@ -984,7 +985,7 @@ static int qede_tpa_end(struct qede_dev *edev,
dma_unmap_page(rxq->dev, tpa_info->buffer.mapping,
PAGE_SIZE, rxq->data_direction);
for (i = 0; cqe->len_list[i]; i++)
for (i = 0; cqe->len_list[i] && i < ARRAY_SIZE(cqe->len_list); i++)
qede_fill_frag_skb(edev, rxq, cqe->tpa_agg_index,
le16_to_cpu(cqe->len_list[i]));
if (unlikely(i > 1))

View File

@ -1046,6 +1046,7 @@ void nvme_cleanup_cmd(struct request *req)
clear_bit_unlock(0, &ctrl->discard_page_busy);
else
kfree(page_address(page) + req->special_vec.bv_offset);
req->rq_flags &= ~RQF_SPECIAL_PAYLOAD;
}
}
EXPORT_SYMBOL_GPL(nvme_cleanup_cmd);

View File

@ -111,35 +111,6 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
scatterwalk_start(walk, sg_next(walk->sg));
}
/*
* Flush the dcache of any pages that overlap the region
* [offset, offset + nbytes) relative to base_page.
*
* This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
* that all relevant code (including the call to sg_page() in the caller, if
* applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
*/
static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
unsigned int offset,
unsigned int nbytes)
{
unsigned int num_pages;
unsigned int i;
base_page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
/*
* This is an overflow-safe version of
* num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
*/
num_pages = nbytes / PAGE_SIZE;
num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
for (i = 0; i < num_pages; i++)
flush_dcache_page(base_page + i);
}
static inline void scatterwalk_done(struct scatter_walk *walk, int out,
int more)
{
@ -152,9 +123,6 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
void *scatterwalk_map(struct scatter_walk *walk);
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);

View File

@ -0,0 +1,13 @@
/* SPDX-License-Identifier: GPL-2.0 */
#ifndef _LINUX_ARRAY_SIZE_H
#define _LINUX_ARRAY_SIZE_H
#include <linux/compiler.h>
/**
* ARRAY_SIZE - get the number of elements in array @arr
* @arr: array to be sized
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#endif /* _LINUX_ARRAY_SIZE_H */

View File

@ -3,6 +3,7 @@
#define _LINUX_KERNEL_H
#include <stdarg.h>
#include <linux/array_size.h>
#include <linux/limits.h>
#include <linux/linkage.h>
#include <linux/stddef.h>
@ -43,12 +44,6 @@
#define READ 0
#define WRITE 1
/**
* ARRAY_SIZE - get the number of elements in array @arr
* @arr: array to be sized
*/
#define ARRAY_SIZE(arr) (sizeof(arr) / sizeof((arr)[0]) + __must_be_array(arr))
#define PTR_IF(cond, ptr) ((cond) ? (ptr) : NULL)
#define u64_to_user_ptr(x) ( \

View File

@ -2,6 +2,7 @@
#ifndef _LINUX_STRING_H_
#define _LINUX_STRING_H_
#include <linux/array_size.h>
#include <linux/compiler.h> /* for inline */
#include <linux/types.h> /* for size_t */
#include <linux/stddef.h> /* for NULL */