CVE-2026-31431 (Copy Fail): backport crypto AEAD/algif fixes

This commit is contained in:
Andrew Lukoshko 2026-04-30 14:15:02 +02:00
parent 438b4edcf9
commit c6a6a6451d
3 changed files with 609 additions and 310 deletions

View File

@ -38,7 +38,7 @@ actions:
name: "0008-Bring-back-deprecated-pci-ids-to-be2iscsi-driver.patch"
number: 2008
- type: "patch"
name: "1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch"
name: "1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch"
number: 1100
- replace:
@ -175,7 +175,7 @@ actions:
- name: "Andrew Lukoshko"
email: "alukoshko@almalinux.org"
line:
- "crypto: algif_aead - Revert to operating out-of-place"
- "CVE-2026-31431 (Copy Fail): backport crypto AEAD/algif fixes from stable-5.10.y"
- "hpsa: bring back deprecated PCI ids #CFHack #CFHack2024"
- "mptsas: bring back deprecated PCI ids #CFHack #CFHack2024"
- "megaraid_sas: bring back deprecated PCI ids #CFHack #CFHack2024"

View File

@ -0,0 +1,607 @@
From: AlmaLinux Backport <packager@almalinux.org>
Subject: [PATCH] CVE-2026-31431 ("Copy Fail"): crypto AEAD/algif fixes for EL8
Backport addressing CVE-2026-31431 ("Copy Fail"), reported by Taeyang Lee
<0wn@theori.io>. EL8 kernel is based on 4.18.0; the closest stable
branch with these fixes is linux-5.10.y. Upstream commits applied where
they fit, plus manual adjustments where 4.18 diverges from 5.10.
Stable-5.10.y commits included:
df22c9a65e9a crypto: authencesn - reject too-short AAD (assoclen<8) [prereq, committed 2026-01-30]
534b7f208c60 crypto: scatterwalk - Backport memcpy_sglist()
893d22e0135f crypto: algif_aead - Revert to operating out-of-place [adapted: el8 uses crypto_aead_copy_sgl(null_tfm,...) instead of memcpy_sglist]
08ea39a556ec crypto: algif_aead - snapshot IV for async AEAD requests
8c62f6185765 crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption [last hunk adapted: el8 uses crypto_authenc_esn_copy()]
88881da57e60 crypto: authencesn - Fix src offset when decrypting in-place
fa48d3ea9cdb crypto: af_alg - Fix page reassignment overflow in af_alg_pull_tsgl
74a66fdb5282 crypto: algif_aead - Fix minimum RX size check for decryption
Refactor-only commits skipped (not security-relevant; el8 keeps the
existing null_tfm copy path):
488f9c3ab90e crypto: algif_aead - use memcpy_sglist() instead of null skcipher
274857bb1fbe crypto: authenc - use memcpy_sglist() instead of null skcipher
Signed-off-by: Andrew Lukoshko <alukoshko@almalinux.org>
---
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -524,15 +524,13 @@
/**
* aead_count_tsgl - Count number of TX SG entries
*
- * The counting starts from the beginning of the SGL to @bytes. If
- * an offset is provided, the counting of the SG entries starts at the offset.
+ * The counting starts from the beginning of the SGL to @bytes.
*
* @sk socket of connection to user space
* @bytes Count the number of SG entries holding given number of bytes.
- * @offset Start the counting of SG entries from the given offset.
* @return Number of TX SG entries found given the constraints
*/
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -547,25 +545,11 @@
struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
- size_t bytes_count;
-
- /* Skip offset */
- if (offset >= sg[i].length) {
- offset -= sg[i].length;
- bytes -= sg[i].length;
- continue;
- }
-
- bytes_count = sg[i].length - offset;
-
- offset = 0;
sgl_count++;
-
- /* If we have seen requested number of bytes, stop */
- if (bytes_count >= bytes)
+ if (sg[i].length >= bytes)
return sgl_count;
- bytes -= bytes_count;
+ bytes -= sg[i].length;
}
}
@@ -577,19 +561,14 @@
* aead_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to dst. The caller must release
- * the pages. If @dst_offset is given only reassign the pages to @dst starting
- * at the @dst_offset (byte). The caller must ensure that @dst is large
- * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ * the pages.
*
* @sk socket of connection to user space
* @used Number of bytes to pull from TX SGL
* @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
- * @dst_offset Reassign the TX SGL from given offset. All buffers before
- * reaching the offset is released.
*/
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset)
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -613,19 +592,11 @@
* Assumption: caller created af_alg_count_tsgl(len)
* SG entries in dst.
*/
- if (dst) {
- if (dst_offset >= plen) {
- /* discard page before offset */
- dst_offset -= plen;
- } else {
- /* reassign page to dst after offset */
- get_page(page);
- sg_set_page(dst + j, page,
- plen - dst_offset,
- sg[i].offset + dst_offset);
- dst_offset = 0;
- j++;
- }
+ if (dst && plen) {
+ /* reassign page to dst */
+ get_page(page);
+ sg_set_page(dst + j, page, plen, sg[i].offset);
+ j++;
}
sg[i].length -= plen;
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -100,10 +100,11 @@
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
- unsigned int i, as = crypto_aead_authsize(tfm);
+ unsigned int as = crypto_aead_authsize(tfm);
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
+ void *iv;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
@@ -155,10 +156,14 @@
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
- crypto_aead_reqsize(tfm));
+ crypto_aead_reqsize(tfm) + ivsize);
if (IS_ERR(areq))
return PTR_ERR(areq);
+ iv = (u8 *)aead_request_ctx(&areq->cra_u.aead_req) +
+ crypto_aead_reqsize(tfm);
+ memcpy(iv, ctx->iv, ivsize);
+
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
if (err)
@@ -174,7 +179,7 @@
if (usedpages < outlen) {
size_t less = outlen - usedpages;
- if (used < less) {
+ if (used < less + (ctx->enc ? 0 : as)) {
err = -EINVAL;
goto free;
}
@@ -182,23 +187,24 @@
outlen -= less;
}
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
processed = used + ctx->aead_assoclen;
- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
- for (i = 0; i < tsgl->cur; i++) {
- struct scatterlist *process_sg = tsgl->sg + i;
-
- if (!(process_sg->length) || !sg_page(process_sg))
- continue;
- tsgl_src = process_sg;
- break;
- }
- if (tsgl_src)
- break;
- }
- if (processed && !tsgl_src) {
- err = -EFAULT;
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+ areq->tsgl_entries),
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
goto free;
}
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, processed, areq->tsgl);
+ tsgl_src = areq->tsgl;
/*
* Copy of AAD from source to destination
@@ -207,82 +213,19 @@
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
- *
- * To ensure efficiency, the following implementation ensure that the
- * ciphers are invoked to perform a crypto operation in-place. This
- * is achieved by memory management specified as follows.
*/
- /* Use the RX SGL as source (and destination) for crypto op. */
+ /* Use the RX SGL as destination for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sg;
- if (ctx->enc) {
- /*
- * Encryption operation - The in-place cipher operation is
- * achieved by the following operation:
- *
- * TX SGL: AAD || PT
- * | |
- * | copy |
- * v v
- * RX SGL: AAD || PT || Tag
- */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
- if (err)
- goto free;
- af_alg_pull_tsgl(sk, processed, NULL, 0);
- } else {
- /*
- * Decryption operation - To achieve an in-place cipher
- * operation, the following SGL structure is used:
- *
- * TX SGL: AAD || CT || Tag
- * | | ^
- * | copy | | Create SGL link.
- * v v |
- * RX SGL: AAD || CT ----+
- */
-
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
- if (err)
- goto free;
-
- /* Create TX SGL for tag and chain it to RX SGL. */
- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
- processed - as);
- if (!areq->tsgl_entries)
- areq->tsgl_entries = 1;
- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
- areq->tsgl_entries),
- GFP_KERNEL);
- if (!areq->tsgl) {
- err = -ENOMEM;
- goto free;
- }
- sg_init_table(areq->tsgl, areq->tsgl_entries);
-
- /* Release TX SGL, except for tag data and reassign tag data. */
- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
-
- /* chain the areq TX SGL holding the tag with RX SGL */
- if (usedpages) {
- /* RX SGL present */
- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
-
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
- areq->tsgl);
- } else
- /* no RX SGL present (e.g. authentication only) */
- rsgl_src = areq->tsgl;
- }
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
+ ctx->aead_assoclen);
+ if (err)
+ goto free;
/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
- areq->first_rsgl.sgl.sg, used, ctx->iv);
+ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
+ areq->first_rsgl.sgl.sg, used, iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@@ -536,7 +479,7 @@
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivlen = crypto_aead_ivsize(tfm);
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -97,7 +97,7 @@
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
@@ -108,7 +108,7 @@
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+ af_alg_pull_tsgl(sk, len, areq->tsgl);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
@@ -328,7 +328,7 @@
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
--- a/crypto/authencesn.c
+++ b/crypto/authencesn.c
@@ -206,6 +206,9 @@
struct scatterlist *src, *dst;
int err;
+ if (assoclen < 8)
+ return -EINVAL;
+
sg_init_table(areq_ctx->src, 2);
src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
dst = src;
@@ -245,6 +248,7 @@
crypto_ahash_alignmask(auth) + 1);
unsigned int cryptlen = req->cryptlen - authsize;
unsigned int assoclen = req->assoclen;
+ struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
@@ -252,23 +256,29 @@
if (!authsize)
goto decrypt;
- /* Move high-order bits of sequence number back. */
- scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
- scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
- scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
+ if (src == dst) {
+ /* Move high-order bits of sequence number back. */
+ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+ scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
+ } else
+ memcpy_sglist(dst, src, assoclen);
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
decrypt:
- sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
+ if (req->src == req->dst)
+ src = dst;
+ else
+ src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
- skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
+ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
return crypto_skcipher_decrypt(skreq);
}
@@ -295,31 +305,36 @@
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+ struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
u32 tmp[2];
int err;
- cryptlen -= authsize;
+ if (assoclen < 8)
+ return -EINVAL;
- if (req->src != dst) {
- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
- if (err)
- return err;
- }
+ if (!authsize)
+ goto tail;
+ cryptlen -= authsize;
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
- if (!authsize)
- goto tail;
-
/* Move high-order bits of sequence number to the end. */
- scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
- scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
- scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
-
- sg_init_table(areq_ctx->dst, 2);
- dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
+ scatterwalk_map_and_copy(tmp, src, 0, 8, 0);
+ if (src == dst) {
+ scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
+ dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
+ } else {
+ scatterwalk_map_and_copy(tmp, dst, 0, 4, 1);
+ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1);
+
+ src = scatterwalk_ffwd(areq_ctx->src, src, 8);
+ dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
+ memcpy_sglist(dst, src, assoclen + cryptlen - 8);
+ dst = req->dst;
+ }
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
--- a/crypto/scatterwalk.c
+++ b/crypto/scatterwalk.c
@@ -74,6 +74,104 @@
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+/**
+ * memcpy_sglist() - Copy data from one scatterlist to another
+ * @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
+ * @src: The source scatterlist. Can be NULL if @nbytes == 0.
+ * @nbytes: Number of bytes to copy
+ *
+ * The scatterlists can describe exactly the same memory, in which case this
+ * function is a no-op. No other overlaps are supported.
+ *
+ * Context: Any context
+ */
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes)
+{
+ unsigned int src_offset, dst_offset;
+
+ if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
+ return;
+
+ src_offset = src->offset;
+ dst_offset = dst->offset;
+ for (;;) {
+ /* Compute the length to copy this step. */
+ unsigned int len = min3(src->offset + src->length - src_offset,
+ dst->offset + dst->length - dst_offset,
+ nbytes);
+ struct page *src_page = sg_page(src);
+ struct page *dst_page = sg_page(dst);
+ const void *src_virt;
+ void *dst_virt;
+
+ if (IS_ENABLED(CONFIG_HIGHMEM)) {
+ /* HIGHMEM: we may have to actually map the pages. */
+ const unsigned int src_oip = offset_in_page(src_offset);
+ const unsigned int dst_oip = offset_in_page(dst_offset);
+ const unsigned int limit = PAGE_SIZE;
+
+ /* Further limit len to not cross a page boundary. */
+ len = min3(len, limit - src_oip, limit - dst_oip);
+
+ /* Compute the source and destination pages. */
+ src_page += src_offset / PAGE_SIZE;
+ dst_page += dst_offset / PAGE_SIZE;
+
+ if (src_page != dst_page) {
+ /* Copy between different pages. */
+ dst_virt = kmap_atomic(dst_page);
+ src_virt = kmap_atomic(src_page);
+ memcpy(dst_virt + dst_oip, src_virt + src_oip,
+ len);
+ kunmap_atomic((void *)src_virt);
+ kunmap_atomic(dst_virt);
+ flush_dcache_page(dst_page);
+ } else if (src_oip != dst_oip) {
+ /* Copy between different parts of same page. */
+ dst_virt = kmap_atomic(dst_page);
+ memcpy(dst_virt + dst_oip, dst_virt + src_oip,
+ len);
+ kunmap_atomic(dst_virt);
+ flush_dcache_page(dst_page);
+ } /* Else, it's the same memory. No action needed. */
+ } else {
+ /*
+ * !HIGHMEM: no mapping needed. Just work in the linear
+ * buffer of each sg entry. Note that we can cross page
+ * boundaries, as they are not significant in this case.
+ */
+ src_virt = page_address(src_page) + src_offset;
+ dst_virt = page_address(dst_page) + dst_offset;
+ if (src_virt != dst_virt) {
+ memcpy(dst_virt, src_virt, len);
+ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
+ __scatterwalk_flush_dcache_pages(
+ dst_page, dst_offset, len);
+ } /* Else, it's the same memory. No action needed. */
+ }
+ nbytes -= len;
+ if (nbytes == 0) /* No more to copy? */
+ break;
+
+ /*
+ * There's more to copy. Advance the offsets by the length
+ * copied this step, and advance the sg entries as needed.
+ */
+ src_offset += len;
+ if (src_offset >= src->offset + src->length) {
+ src = sg_next(src);
+ src_offset = src->offset;
+ }
+ dst_offset += len;
+ if (dst_offset >= dst->offset + dst->length) {
+ dst = sg_next(dst);
+ dst_offset = dst->offset;
+ }
+ }
+}
+EXPORT_SYMBOL_GPL(memcpy_sglist);
+
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -231,9 +231,8 @@
}
int af_alg_alloc_tsgl(struct sock *sk);
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
void af_alg_wmem_wakeup(struct sock *sk);
--- a/include/crypto/scatterwalk.h
+++ b/include/crypto/scatterwalk.h
@@ -111,6 +111,35 @@
scatterwalk_start(walk, sg_next(walk->sg));
}
+/*
+ * Flush the dcache of any pages that overlap the region
+ * [offset, offset + nbytes) relative to base_page.
+ *
+ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
+ * that all relevant code (including the call to sg_page() in the caller, if
+ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
+ */
+static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
+ unsigned int offset,
+ unsigned int nbytes)
+{
+ unsigned int num_pages;
+ unsigned int i;
+
+ base_page += offset / PAGE_SIZE;
+ offset %= PAGE_SIZE;
+
+ /*
+ * This is an overflow-safe version of
+ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
+ */
+ num_pages = nbytes / PAGE_SIZE;
+ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
+
+ for (i = 0; i < num_pages; i++)
+ flush_dcache_page(base_page + i);
+}
+
static inline void scatterwalk_done(struct scatter_walk *walk, int out,
int more)
{
@@ -123,6 +152,9 @@
size_t nbytes, int out);
void *scatterwalk_map(struct scatter_walk *walk);
+void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
+ unsigned int nbytes);
+
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);

View File

@ -1,308 +0,0 @@
From a664bf3d603dc3bdcf9ae47cc21e0daec706d7a5 Mon Sep 17 00:00:00 2001
From: Herbert Xu <herbert@gondor.apana.org.au>
Date: Thu, 26 Mar 2026 15:30:20 +0900
Subject: [PATCH] crypto: algif_aead - Revert to operating out-of-place
This mostly reverts commit 72548b093ee3 except for the copying of
the associated data.
There is no benefit in operating in-place in algif_aead since the
source and destination come from different mappings. Get rid of
all the complexity added for in-place operation and just copy the
AD directly.
Backported to kernel-4.18.0-553.120.1.el8_10: this tree pre-dates upstream's
memcpy_sglist() helper, so the AAD copy keeps using
crypto_aead_copy_sgl(null_tfm, ...). The function signatures of
af_alg_count_tsgl() and af_alg_pull_tsgl() are reverted to drop the
offset parameters as in upstream.
Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst")
Reported-by: Taeyang Lee <0wn@theori.io>
Signed-off-by: Herbert Xu <herbert@gondor.apana.org.au>
---
--- a/crypto/af_alg.c
+++ b/crypto/af_alg.c
@@ -524,15 +524,13 @@
/**
* aead_count_tsgl - Count number of TX SG entries
*
- * The counting starts from the beginning of the SGL to @bytes. If
- * an offset is provided, the counting of the SG entries starts at the offset.
+ * The counting starts from the beginning of the SGL to @bytes.
*
* @sk socket of connection to user space
* @bytes Count the number of SG entries holding given number of bytes.
- * @offset Start the counting of SG entries from the given offset.
* @return Number of TX SG entries found given the constraints
*/
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -547,25 +545,11 @@
struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
- size_t bytes_count;
-
- /* Skip offset */
- if (offset >= sg[i].length) {
- offset -= sg[i].length;
- bytes -= sg[i].length;
- continue;
- }
-
- bytes_count = sg[i].length - offset;
-
- offset = 0;
sgl_count++;
-
- /* If we have seen requested number of bytes, stop */
- if (bytes_count >= bytes)
+ if (sg[i].length >= bytes)
return sgl_count;
- bytes -= bytes_count;
+ bytes -= sg[i].length;
}
}
@@ -577,19 +561,14 @@
* aead_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to dst. The caller must release
- * the pages. If @dst_offset is given only reassign the pages to @dst starting
- * at the @dst_offset (byte). The caller must ensure that @dst is large
- * enough (e.g. by using af_alg_count_tsgl with the same offset).
+ * the pages.
*
* @sk socket of connection to user space
* @used Number of bytes to pull from TX SGL
* @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
- * @dst_offset Reassign the TX SGL from given offset. All buffers before
- * reaching the offset is released.
*/
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset)
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@@ -614,18 +593,10 @@
* SG entries in dst.
*/
if (dst) {
- if (dst_offset >= plen) {
- /* discard page before offset */
- dst_offset -= plen;
- } else {
- /* reassign page to dst after offset */
- get_page(page);
- sg_set_page(dst + j, page,
- plen - dst_offset,
- sg[i].offset + dst_offset);
- dst_offset = 0;
- j++;
- }
+ /* reassign page to dst after offset */
+ get_page(page);
+ sg_set_page(dst + j, page, plen, sg[i].offset);
+ j++;
}
sg[i].length -= plen;
--- a/crypto/algif_aead.c
+++ b/crypto/algif_aead.c
@@ -100,9 +100,8 @@
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
- unsigned int i, as = crypto_aead_authsize(tfm);
+ unsigned int as = crypto_aead_authsize(tfm);
struct af_alg_async_req *areq;
- struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
@@ -182,23 +181,24 @@
outlen -= less;
}
+ /*
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
processed = used + ctx->aead_assoclen;
- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
- for (i = 0; i < tsgl->cur; i++) {
- struct scatterlist *process_sg = tsgl->sg + i;
-
- if (!(process_sg->length) || !sg_page(process_sg))
- continue;
- tsgl_src = process_sg;
- break;
- }
- if (tsgl_src)
- break;
- }
- if (processed && !tsgl_src) {
- err = -EFAULT;
+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+ areq->tsgl_entries),
+ GFP_KERNEL);
+ if (!areq->tsgl) {
+ err = -ENOMEM;
goto free;
}
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+ af_alg_pull_tsgl(sk, processed, areq->tsgl);
+ tsgl_src = areq->tsgl;
/*
* Copy of AAD from source to destination
@@ -207,81 +207,18 @@
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
- *
- * To ensure efficiency, the following implementation ensure that the
- * ciphers are invoked to perform a crypto operation in-place. This
- * is achieved by memory management specified as follows.
*/
- /* Use the RX SGL as source (and destination) for crypto op. */
+ /* Use the RX SGL as destination for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sg;
- if (ctx->enc) {
- /*
- * Encryption operation - The in-place cipher operation is
- * achieved by the following operation:
- *
- * TX SGL: AAD || PT
- * | |
- * | copy |
- * v v
- * RX SGL: AAD || PT || Tag
- */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, processed);
- if (err)
- goto free;
- af_alg_pull_tsgl(sk, processed, NULL, 0);
- } else {
- /*
- * Decryption operation - To achieve an in-place cipher
- * operation, the following SGL structure is used:
- *
- * TX SGL: AAD || CT || Tag
- * | | ^
- * | copy | | Create SGL link.
- * v v |
- * RX SGL: AAD || CT ----+
- */
-
- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
- areq->first_rsgl.sgl.sg, outlen);
- if (err)
- goto free;
-
- /* Create TX SGL for tag and chain it to RX SGL. */
- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
- processed - as);
- if (!areq->tsgl_entries)
- areq->tsgl_entries = 1;
- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
- areq->tsgl_entries),
- GFP_KERNEL);
- if (!areq->tsgl) {
- err = -ENOMEM;
- goto free;
- }
- sg_init_table(areq->tsgl, areq->tsgl_entries);
-
- /* Release TX SGL, except for tag data and reassign tag data. */
- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
-
- /* chain the areq TX SGL holding the tag with RX SGL */
- if (usedpages) {
- /* RX SGL present */
- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
-
- sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
- sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
- areq->tsgl);
- } else
- /* no RX SGL present (e.g. authentication only) */
- rsgl_src = areq->tsgl;
- }
+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
+ ctx->aead_assoclen);
+ if (err)
+ goto free;
/* Initialize the crypto operation */
- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
+ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@@ -536,7 +473,7 @@
struct crypto_aead *tfm = aeadc->aead;
unsigned int ivlen = crypto_aead_ivsize(tfm);
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
--- a/crypto/algif_skcipher.c
+++ b/crypto/algif_skcipher.c
@@ -97,7 +97,7 @@
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
+ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
@@ -108,7 +108,7 @@
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
+ af_alg_pull_tsgl(sk, len, areq->tsgl);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
@@ -328,7 +328,7 @@
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
+ af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
--- a/include/crypto/if_alg.h
+++ b/include/crypto/if_alg.h
@@ -231,9 +231,8 @@
}
int af_alg_alloc_tsgl(struct sock *sk);
-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
- size_t dst_offset);
+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
void af_alg_wmem_wakeup(struct sock *sk);