From f9abe6d6e4edf7155cb367d8dee976ecfe0945b3 Mon Sep 17 00:00:00 2001
From: Koichiro Iwao
Date: Thu, 30 Apr 2026 13:35:25 +0000
Subject: [PATCH] Update CVE-2026-31431 patch to include more upstream commits
---
...VE-2026-31431-crypto-Copy-Fail-fixes.patch | 952 ++++++++++++++++++
...ead-Revert-to-operating-out-of-place.patch | 310 ------
SPECS/raspberrypi2.spec | 7 +-
3 files changed, 957 insertions(+), 312 deletions(-)
create mode 100644 SOURCES/1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch
delete mode 100644 SOURCES/1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch
diff --git a/SOURCES/1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch b/SOURCES/1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch
new file mode 100644
index 0000000..656decd
--- /dev/null
+++ b/SOURCES/1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch
@@ -0,0 +1,952 @@
+From: AlmaLinux Backport
+Subject: [PATCH] CVE-2026-31431 ("Copy Fail"): crypto AEAD/algif fixes from linux-6.12.y
+
+Combined backport addressing CVE-2026-31431 ("Copy Fail"), reported by
+Taeyang Lee <0wn@theori.io>. Pulls one prerequisite (committed 2026-01-30
+to linux-6.12.y) plus eight 2026-04-30 stable fixes:
+
+ 161bdc90fce2 crypto: authencesn - reject too-short AAD (assoclen<8) to match ESP/ESN spec
+ 41c3aa511e6e crypto: scatterwalk - Backport memcpy_sglist()
+ 183137264401 crypto: algif_aead - use memcpy_sglist() instead of null skcipher
+ 8b88d99341f1 crypto: algif_aead - Revert to operating out-of-place
+ 46fdb39e8322 crypto: algif_aead - snapshot IV for async AEAD requests
+ 7bc058a9b82b crypto: authenc - use memcpy_sglist() instead of null skcipher
+ 89fe118b6470 crypto: authencesn - Do not place hiseq at end of dst for out-of-place decryption
+ 129f12934401 crypto: authencesn - Fix src offset when decrypting in-place
+ c8369a6d62f5 crypto: af_alg - Fix page reassignment overflow in af_alg_pull_tsgl
+
+161bdc90 is the prerequisite for 89fe118b6470 to apply.
+
+Signed-off-by: Andrew Lukoshko
+---
+--- a/crypto/af_alg.c
++++ b/crypto/af_alg.c
+@@ -635,15 +635,13 @@
+ /**
+ * af_alg_count_tsgl - Count number of TX SG entries
+ *
+- * The counting starts from the beginning of the SGL to @bytes. If
+- * an @offset is provided, the counting of the SG entries starts at the @offset.
++ * The counting starts from the beginning of the SGL to @bytes.
+ *
+ * @sk: socket of connection to user space
+ * @bytes: Count the number of SG entries holding given number of bytes.
+- * @offset: Start the counting of SG entries from the given offset.
+ * Return: Number of TX SG entries found given the constraints
+ */
+-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
++unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
+ {
+ const struct alg_sock *ask = alg_sk(sk);
+ const struct af_alg_ctx *ctx = ask->private;
+@@ -658,25 +656,11 @@
+ const struct scatterlist *sg = sgl->sg;
+
+ for (i = 0; i < sgl->cur; i++) {
+- size_t bytes_count;
+-
+- /* Skip offset */
+- if (offset >= sg[i].length) {
+- offset -= sg[i].length;
+- bytes -= sg[i].length;
+- continue;
+- }
+-
+- bytes_count = sg[i].length - offset;
+-
+- offset = 0;
+ sgl_count++;
+-
+- /* If we have seen requested number of bytes, stop */
+- if (bytes_count >= bytes)
++ if (sg[i].length >= bytes)
+ return sgl_count;
+
+- bytes -= bytes_count;
++ bytes -= sg[i].length;
+ }
+ }
+
+@@ -688,19 +672,14 @@
+ * af_alg_pull_tsgl - Release the specified buffers from TX SGL
+ *
+ * If @dst is non-null, reassign the pages to @dst. The caller must release
+- * the pages. If @dst_offset is given only reassign the pages to @dst starting
+- * at the @dst_offset (byte). The caller must ensure that @dst is large
+- * enough (e.g. by using af_alg_count_tsgl with the same offset).
++ * the pages.
+ *
+ * @sk: socket of connection to user space
+ * @used: Number of bytes to pull from TX SGL
+ * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
+ * caller must release the buffers in dst.
+- * @dst_offset: Reassign the TX SGL from given offset. All buffers before
+- * reaching the offset is released.
+ */
+-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+- size_t dst_offset)
++void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
+ {
+ struct alg_sock *ask = alg_sk(sk);
+ struct af_alg_ctx *ctx = ask->private;
+@@ -724,19 +703,11 @@
+ * Assumption: caller created af_alg_count_tsgl(len)
+ * SG entries in dst.
+ */
+- if (dst) {
+- if (dst_offset >= plen) {
+- /* discard page before offset */
+- dst_offset -= plen;
+- } else {
+- /* reassign page to dst after offset */
+- get_page(page);
+- sg_set_page(dst + j, page,
+- plen - dst_offset,
+- sg[i].offset + dst_offset);
+- dst_offset = 0;
+- j++;
+- }
++ if (dst && plen) {
++ /* reassign page to dst */
++ get_page(page);
++ sg_set_page(dst + j, page, plen, sg[i].offset);
++ j++;
+ }
+
+ sg[i].length -= plen;
+--- a/crypto/algif_aead.c
++++ b/crypto/algif_aead.c
+@@ -26,8 +26,6 @@
+ #include
+ #include
+ #include
+-#include
+-#include
+ #include
+ #include
+ #include
+@@ -36,19 +34,13 @@
+ #include
+ #include
+
+-struct aead_tfm {
+- struct crypto_aead *aead;
+- struct crypto_sync_skcipher *null_tfm;
+-};
+-
+ static inline bool aead_sufficient_data(struct sock *sk)
+ {
+ struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+- struct aead_tfm *aeadc = pask->private;
+- struct crypto_aead *tfm = aeadc->aead;
++ struct crypto_aead *tfm = pask->private;
+ unsigned int as = crypto_aead_authsize(tfm);
+
+ /*
+@@ -64,27 +56,12 @@
+ struct alg_sock *ask = alg_sk(sk);
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+- struct aead_tfm *aeadc = pask->private;
+- struct crypto_aead *tfm = aeadc->aead;
++ struct crypto_aead *tfm = pask->private;
+ unsigned int ivsize = crypto_aead_ivsize(tfm);
+
+ return af_alg_sendmsg(sock, msg, size, ivsize);
+ }
+
+-static int crypto_aead_copy_sgl(struct crypto_sync_skcipher *null_tfm,
+- struct scatterlist *src,
+- struct scatterlist *dst, unsigned int len)
+-{
+- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
+-
+- skcipher_request_set_sync_tfm(skreq, null_tfm);
+- skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_SLEEP,
+- NULL, NULL);
+- skcipher_request_set_crypt(skreq, src, dst, len, NULL);
+-
+- return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
+ size_t ignored, int flags)
+ {
+@@ -93,13 +70,12 @@
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+ struct af_alg_ctx *ctx = ask->private;
+- struct aead_tfm *aeadc = pask->private;
+- struct crypto_aead *tfm = aeadc->aead;
+- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
+- unsigned int i, as = crypto_aead_authsize(tfm);
++ struct crypto_aead *tfm = pask->private;
++ unsigned int as = crypto_aead_authsize(tfm);
++ unsigned int ivsize = crypto_aead_ivsize(tfm);
+ struct af_alg_async_req *areq;
+- struct af_alg_tsgl *tsgl, *tmp;
+ struct scatterlist *rsgl_src, *tsgl_src = NULL;
++ void *iv;
+ int err = 0;
+ size_t used = 0; /* [in] TX bufs to be en/decrypted */
+ size_t outlen = 0; /* [out] RX bufs produced by kernel */
+@@ -151,10 +127,14 @@
+
+ /* Allocate cipher request for current operation. */
+ areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
+- crypto_aead_reqsize(tfm));
++ crypto_aead_reqsize(tfm) + ivsize);
+ if (IS_ERR(areq))
+ return PTR_ERR(areq);
+
++ iv = (u8 *)aead_request_ctx(&areq->cra_u.aead_req) +
++ crypto_aead_reqsize(tfm);
++ memcpy(iv, ctx->iv, ivsize);
++
+ /* convert iovecs of output buffers into RX SGL */
+ err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
+ if (err)
+@@ -178,23 +158,24 @@
+ outlen -= less;
+ }
+
++ /*
++ * Create a per request TX SGL for this request which tracks the
++ * SG entries from the global TX SGL.
++ */
+ processed = used + ctx->aead_assoclen;
+- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
+- for (i = 0; i < tsgl->cur; i++) {
+- struct scatterlist *process_sg = tsgl->sg + i;
+-
+- if (!(process_sg->length) || !sg_page(process_sg))
+- continue;
+- tsgl_src = process_sg;
+- break;
+- }
+- if (tsgl_src)
+- break;
+- }
+- if (processed && !tsgl_src) {
+- err = -EFAULT;
++ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
++ if (!areq->tsgl_entries)
++ areq->tsgl_entries = 1;
++ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
++ areq->tsgl_entries),
++ GFP_KERNEL);
++ if (!areq->tsgl) {
++ err = -ENOMEM;
+ goto free;
+ }
++ sg_init_table(areq->tsgl, areq->tsgl_entries);
++ af_alg_pull_tsgl(sk, processed, areq->tsgl);
++ tsgl_src = areq->tsgl;
+
+ /*
+ * Copy of AAD from source to destination
+@@ -203,84 +184,16 @@
+ * when user space uses an in-place cipher operation, the kernel
+ * will copy the data as it does not see whether such in-place operation
+ * is initiated.
+- *
+- * To ensure efficiency, the following implementation ensure that the
+- * ciphers are invoked to perform a crypto operation in-place. This
+- * is achieved by memory management specified as follows.
+ */
+
+ /* Use the RX SGL as source (and destination) for crypto op. */
+ rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
+
+- if (ctx->enc) {
+- /*
+- * Encryption operation - The in-place cipher operation is
+- * achieved by the following operation:
+- *
+- * TX SGL: AAD || PT
+- * | |
+- * | copy |
+- * v v
+- * RX SGL: AAD || PT || Tag
+- */
+- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
+- areq->first_rsgl.sgl.sgt.sgl,
+- processed);
+- if (err)
+- goto free;
+- af_alg_pull_tsgl(sk, processed, NULL, 0);
+- } else {
+- /*
+- * Decryption operation - To achieve an in-place cipher
+- * operation, the following SGL structure is used:
+- *
+- * TX SGL: AAD || CT || Tag
+- * | | ^
+- * | copy | | Create SGL link.
+- * v v |
+- * RX SGL: AAD || CT ----+
+- */
+-
+- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
+- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
+- areq->first_rsgl.sgl.sgt.sgl,
+- outlen);
+- if (err)
+- goto free;
+-
+- /* Create TX SGL for tag and chain it to RX SGL. */
+- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
+- processed - as);
+- if (!areq->tsgl_entries)
+- areq->tsgl_entries = 1;
+- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+- areq->tsgl_entries),
+- GFP_KERNEL);
+- if (!areq->tsgl) {
+- err = -ENOMEM;
+- goto free;
+- }
+- sg_init_table(areq->tsgl, areq->tsgl_entries);
+-
+- /* Release TX SGL, except for tag data and reassign tag data. */
+- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
+-
+- /* chain the areq TX SGL holding the tag with RX SGL */
+- if (usedpages) {
+- /* RX SGL present */
+- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
+- struct scatterlist *sg = sgl_prev->sgt.sgl;
+-
+- sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
+- sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
+- } else
+- /* no RX SGL present (e.g. authentication only) */
+- rsgl_src = areq->tsgl;
+- }
++ memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
+
+ /* Initialize the crypto operation */
+- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
+- areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
++ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
++ areq->first_rsgl.sgl.sgt.sgl, used, iv);
+ aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
+ aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
+
+@@ -379,7 +292,7 @@
+ int err = 0;
+ struct sock *psk;
+ struct alg_sock *pask;
+- struct aead_tfm *tfm;
++ struct crypto_aead *tfm;
+ struct sock *sk = sock->sk;
+ struct alg_sock *ask = alg_sk(sk);
+
+@@ -393,7 +306,7 @@
+
+ err = -ENOKEY;
+ lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
+- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
++ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ goto unlock;
+
+ atomic_dec(&pask->nokey_refcnt);
+@@ -454,54 +367,22 @@
+
+ static void *aead_bind(const char *name, u32 type, u32 mask)
+ {
+- struct aead_tfm *tfm;
+- struct crypto_aead *aead;
+- struct crypto_sync_skcipher *null_tfm;
+-
+- tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
+- if (!tfm)
+- return ERR_PTR(-ENOMEM);
+-
+- aead = crypto_alloc_aead(name, type, mask);
+- if (IS_ERR(aead)) {
+- kfree(tfm);
+- return ERR_CAST(aead);
+- }
+-
+- null_tfm = crypto_get_default_null_skcipher();
+- if (IS_ERR(null_tfm)) {
+- crypto_free_aead(aead);
+- kfree(tfm);
+- return ERR_CAST(null_tfm);
+- }
+-
+- tfm->aead = aead;
+- tfm->null_tfm = null_tfm;
+-
+- return tfm;
++ return crypto_alloc_aead(name, type, mask);
+ }
+
+ static void aead_release(void *private)
+ {
+- struct aead_tfm *tfm = private;
+-
+- crypto_free_aead(tfm->aead);
+- crypto_put_default_null_skcipher();
+- kfree(tfm);
++ crypto_free_aead(private);
+ }
+
+ static int aead_setauthsize(void *private, unsigned int authsize)
+ {
+- struct aead_tfm *tfm = private;
+-
+- return crypto_aead_setauthsize(tfm->aead, authsize);
++ return crypto_aead_setauthsize(private, authsize);
+ }
+
+ static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
+ {
+- struct aead_tfm *tfm = private;
+-
+- return crypto_aead_setkey(tfm->aead, key, keylen);
++ return crypto_aead_setkey(private, key, keylen);
+ }
+
+ static void aead_sock_destruct(struct sock *sk)
+@@ -510,11 +391,10 @@
+ struct af_alg_ctx *ctx = ask->private;
+ struct sock *psk = ask->parent;
+ struct alg_sock *pask = alg_sk(psk);
+- struct aead_tfm *aeadc = pask->private;
+- struct crypto_aead *tfm = aeadc->aead;
++ struct crypto_aead *tfm = pask->private;
+ unsigned int ivlen = crypto_aead_ivsize(tfm);
+
+- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
++ af_alg_pull_tsgl(sk, ctx->used, NULL);
+ sock_kzfree_s(sk, ctx->iv, ivlen);
+ sock_kfree_s(sk, ctx, ctx->len);
+ af_alg_release_parent(sk);
+@@ -524,10 +404,9 @@
+ {
+ struct af_alg_ctx *ctx;
+ struct alg_sock *ask = alg_sk(sk);
+- struct aead_tfm *tfm = private;
+- struct crypto_aead *aead = tfm->aead;
++ struct crypto_aead *tfm = private;
+ unsigned int len = sizeof(*ctx);
+- unsigned int ivlen = crypto_aead_ivsize(aead);
++ unsigned int ivlen = crypto_aead_ivsize(tfm);
+
+ ctx = sock_kmalloc(sk, len, GFP_KERNEL);
+ if (!ctx)
+@@ -554,9 +433,9 @@
+
+ static int aead_accept_parent(void *private, struct sock *sk)
+ {
+- struct aead_tfm *tfm = private;
++ struct crypto_aead *tfm = private;
+
+- if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
++ if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
+ return -ENOKEY;
+
+ return aead_accept_parent_nokey(private, sk);
+--- a/crypto/algif_skcipher.c
++++ b/crypto/algif_skcipher.c
+@@ -138,7 +138,7 @@
+ * Create a per request TX SGL for this request which tracks the
+ * SG entries from the global TX SGL.
+ */
+- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
++ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
+ if (!areq->tsgl_entries)
+ areq->tsgl_entries = 1;
+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
+@@ -149,7 +149,7 @@
+ goto free;
+ }
+ sg_init_table(areq->tsgl, areq->tsgl_entries);
+- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
++ af_alg_pull_tsgl(sk, len, areq->tsgl);
+
+ /* Initialize the crypto operation */
+ skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
+@@ -363,7 +363,7 @@
+ struct alg_sock *pask = alg_sk(psk);
+ struct crypto_skcipher *tfm = pask->private;
+
+- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
++ af_alg_pull_tsgl(sk, ctx->used, NULL);
+ sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
+ if (ctx->state)
+ sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
+--- a/crypto/authenc.c
++++ b/crypto/authenc.c
+@@ -9,7 +9,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include
+ #include
+@@ -28,7 +27,6 @@
+ struct crypto_authenc_ctx {
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+- struct crypto_sync_skcipher *null;
+ };
+
+ struct authenc_request_ctx {
+@@ -170,21 +168,6 @@
+ authenc_request_complete(areq, err);
+ }
+
+-static int crypto_authenc_copy_assoc(struct aead_request *req)
+-{
+- struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+- struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
+- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+-
+- skcipher_request_set_sync_tfm(skreq, ctx->null);
+- skcipher_request_set_callback(skreq, aead_request_flags(req),
+- NULL, NULL);
+- skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
+- NULL);
+-
+- return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int crypto_authenc_encrypt(struct aead_request *req)
+ {
+ struct crypto_aead *authenc = crypto_aead_reqtfm(req);
+@@ -203,10 +186,7 @@
+ dst = src;
+
+ if (req->src != req->dst) {
+- err = crypto_authenc_copy_assoc(req);
+- if (err)
+- return err;
+-
++ memcpy_sglist(req->dst, req->src, req->assoclen);
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
+ }
+
+@@ -303,7 +283,6 @@
+ struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+- struct crypto_sync_skcipher *null;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+@@ -315,14 +294,8 @@
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+- null = crypto_get_default_null_skcipher();
+- err = PTR_ERR(null);
+- if (IS_ERR(null))
+- goto err_free_skcipher;
+-
+ ctx->auth = auth;
+ ctx->enc = enc;
+- ctx->null = null;
+
+ crypto_aead_set_reqsize(
+ tfm,
+@@ -336,8 +309,6 @@
+
+ return 0;
+
+-err_free_skcipher:
+- crypto_free_skcipher(enc);
+ err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+@@ -349,7 +320,6 @@
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_skcipher(ctx->enc);
+- crypto_put_default_null_skcipher();
+ }
+
+ static void crypto_authenc_free(struct aead_instance *inst)
+--- a/crypto/authencesn.c
++++ b/crypto/authencesn.c
+@@ -12,7 +12,6 @@
+ #include
+ #include
+ #include
+-#include
+ #include
+ #include
+ #include
+@@ -31,7 +30,6 @@
+ unsigned int reqoff;
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+- struct crypto_sync_skcipher *null;
+ };
+
+ struct authenc_esn_request_ctx {
+@@ -158,20 +156,6 @@
+ authenc_esn_request_complete(areq, err);
+ }
+
+-static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
+-{
+- struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+- struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
+- SYNC_SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
+-
+- skcipher_request_set_sync_tfm(skreq, ctx->null);
+- skcipher_request_set_callback(skreq, aead_request_flags(req),
+- NULL, NULL);
+- skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
+-
+- return crypto_skcipher_encrypt(skreq);
+-}
+-
+ static int crypto_authenc_esn_encrypt(struct aead_request *req)
+ {
+ struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
+@@ -185,15 +169,15 @@
+ struct scatterlist *src, *dst;
+ int err;
+
++ if (assoclen < 8)
++ return -EINVAL;
++
+ sg_init_table(areq_ctx->src, 2);
+ src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
+ dst = src;
+
+ if (req->src != req->dst) {
+- err = crypto_authenc_esn_copy(req, assoclen);
+- if (err)
+- return err;
+-
++ memcpy_sglist(req->dst, req->src, assoclen);
+ sg_init_table(areq_ctx->dst, 2);
+ dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
+ }
+@@ -223,6 +207,7 @@
+ u8 *ohash = areq_ctx->tail;
+ unsigned int cryptlen = req->cryptlen - authsize;
+ unsigned int assoclen = req->assoclen;
++ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+ u8 *ihash = ohash + crypto_ahash_digestsize(auth);
+ u32 tmp[2];
+@@ -230,23 +215,29 @@
+ if (!authsize)
+ goto decrypt;
+
+- /* Move high-order bits of sequence number back. */
+- scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
+- scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
+- scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
++ if (src == dst) {
++ /* Move high-order bits of sequence number back. */
++ scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
++ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
++ scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
++ } else
++ memcpy_sglist(dst, src, assoclen);
+
+ if (crypto_memneq(ihash, ohash, authsize))
+ return -EBADMSG;
+
+ decrypt:
+
+- sg_init_table(areq_ctx->dst, 2);
+ dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
++ if (req->src == req->dst)
++ src = dst;
++ else
++ src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
+
+ skcipher_request_set_tfm(skreq, ctx->enc);
+ skcipher_request_set_callback(skreq, flags,
+ req->base.complete, req->base.data);
+- skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
++ skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
+
+ return crypto_skcipher_decrypt(skreq);
+ }
+@@ -271,31 +262,36 @@
+ unsigned int assoclen = req->assoclen;
+ unsigned int cryptlen = req->cryptlen;
+ u8 *ihash = ohash + crypto_ahash_digestsize(auth);
++ struct scatterlist *src = req->src;
+ struct scatterlist *dst = req->dst;
+ u32 tmp[2];
+ int err;
+
+- cryptlen -= authsize;
++ if (assoclen < 8)
++ return -EINVAL;
+
+- if (req->src != dst) {
+- err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
+- if (err)
+- return err;
+- }
++ if (!authsize)
++ goto tail;
+
++ cryptlen -= authsize;
+ scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
+ authsize, 0);
+
+- if (!authsize)
+- goto tail;
+-
+ /* Move high-order bits of sequence number to the end. */
+- scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
+- scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
+- scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
+-
+- sg_init_table(areq_ctx->dst, 2);
+- dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++ scatterwalk_map_and_copy(tmp, src, 0, 8, 0);
++ if (src == dst) {
++ scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
++ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
++ dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++ } else {
++ scatterwalk_map_and_copy(tmp, dst, 0, 4, 1);
++ scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1);
++
++ src = scatterwalk_ffwd(areq_ctx->src, src, 8);
++ dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
++ memcpy_sglist(dst, src, assoclen + cryptlen - 8);
++ dst = req->dst;
++ }
+
+ ahash_request_set_tfm(ahreq, auth);
+ ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
+@@ -317,7 +313,6 @@
+ struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
+ struct crypto_ahash *auth;
+ struct crypto_skcipher *enc;
+- struct crypto_sync_skcipher *null;
+ int err;
+
+ auth = crypto_spawn_ahash(&ictx->auth);
+@@ -329,14 +324,8 @@
+ if (IS_ERR(enc))
+ goto err_free_ahash;
+
+- null = crypto_get_default_null_skcipher();
+- err = PTR_ERR(null);
+- if (IS_ERR(null))
+- goto err_free_skcipher;
+-
+ ctx->auth = auth;
+ ctx->enc = enc;
+- ctx->null = null;
+
+ ctx->reqoff = 2 * crypto_ahash_digestsize(auth);
+
+@@ -352,8 +341,6 @@
+
+ return 0;
+
+-err_free_skcipher:
+- crypto_free_skcipher(enc);
+ err_free_ahash:
+ crypto_free_ahash(auth);
+ return err;
+@@ -365,7 +352,6 @@
+
+ crypto_free_ahash(ctx->auth);
+ crypto_free_skcipher(ctx->enc);
+- crypto_put_default_null_skcipher();
+ }
+
+ static void crypto_authenc_esn_free(struct aead_instance *inst)
+--- a/crypto/scatterwalk.c
++++ b/crypto/scatterwalk.c
+@@ -69,6 +69,100 @@
+ }
+ EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
+
++/**
++ * memcpy_sglist() - Copy data from one scatterlist to another
++ * @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
++ * @src: The source scatterlist. Can be NULL if @nbytes == 0.
++ * @nbytes: Number of bytes to copy
++ *
++ * The scatterlists can describe exactly the same memory, in which case this
++ * function is a no-op. No other overlaps are supported.
++ *
++ * Context: Any context
++ */
++void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
++ unsigned int nbytes)
++{
++ unsigned int src_offset, dst_offset;
++
++ if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
++ return;
++
++ src_offset = src->offset;
++ dst_offset = dst->offset;
++ for (;;) {
++ /* Compute the length to copy this step. */
++ unsigned int len = min3(src->offset + src->length - src_offset,
++ dst->offset + dst->length - dst_offset,
++ nbytes);
++ struct page *src_page = sg_page(src);
++ struct page *dst_page = sg_page(dst);
++ const void *src_virt;
++ void *dst_virt;
++
++ if (IS_ENABLED(CONFIG_HIGHMEM)) {
++ /* HIGHMEM: we may have to actually map the pages. */
++ const unsigned int src_oip = offset_in_page(src_offset);
++ const unsigned int dst_oip = offset_in_page(dst_offset);
++ const unsigned int limit = PAGE_SIZE;
++
++ /* Further limit len to not cross a page boundary. */
++ len = min3(len, limit - src_oip, limit - dst_oip);
++
++ /* Compute the source and destination pages. */
++ src_page += src_offset / PAGE_SIZE;
++ dst_page += dst_offset / PAGE_SIZE;
++
++ if (src_page != dst_page) {
++ /* Copy between different pages. */
++ memcpy_page(dst_page, dst_oip,
++ src_page, src_oip, len);
++ flush_dcache_page(dst_page);
++ } else if (src_oip != dst_oip) {
++ /* Copy between different parts of same page. */
++ dst_virt = kmap_local_page(dst_page);
++ memcpy(dst_virt + dst_oip, dst_virt + src_oip,
++ len);
++ kunmap_local(dst_virt);
++ flush_dcache_page(dst_page);
++ } /* Else, it's the same memory. No action needed. */
++ } else {
++ /*
++ * !HIGHMEM: no mapping needed. Just work in the linear
++ * buffer of each sg entry. Note that we can cross page
++ * boundaries, as they are not significant in this case.
++ */
++ src_virt = page_address(src_page) + src_offset;
++ dst_virt = page_address(dst_page) + dst_offset;
++ if (src_virt != dst_virt) {
++ memcpy(dst_virt, src_virt, len);
++ if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
++ __scatterwalk_flush_dcache_pages(
++ dst_page, dst_offset, len);
++ } /* Else, it's the same memory. No action needed. */
++ }
++ nbytes -= len;
++ if (nbytes == 0) /* No more to copy? */
++ break;
++
++ /*
++ * There's more to copy. Advance the offsets by the length
++ * copied this step, and advance the sg entries as needed.
++ */
++ src_offset += len;
++ if (src_offset >= src->offset + src->length) {
++ src = sg_next(src);
++ src_offset = src->offset;
++ }
++ dst_offset += len;
++ if (dst_offset >= dst->offset + dst->length) {
++ dst = sg_next(dst);
++ dst_offset = dst->offset;
++ }
++ }
++}
++EXPORT_SYMBOL_GPL(memcpy_sglist);
++
+ struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
+ struct scatterlist *src,
+ unsigned int len)
+--- a/crypto/Kconfig
++++ b/crypto/Kconfig
+@@ -222,7 +222,6 @@
+ select CRYPTO_SKCIPHER
+ select CRYPTO_MANAGER
+ select CRYPTO_HASH
+- select CRYPTO_NULL
+ help
+ Authenc: Combined mode wrapper for IPsec.
+
+@@ -1421,7 +1420,6 @@
+ depends on NET
+ select CRYPTO_AEAD
+ select CRYPTO_SKCIPHER
+- select CRYPTO_NULL
+ select CRYPTO_USER_API
+ help
+ Enable the userspace interface for AEAD cipher algorithms.
+--- a/include/crypto/if_alg.h
++++ b/include/crypto/if_alg.h
+@@ -228,9 +228,8 @@
+ return PAGE_SIZE <= af_alg_rcvbuf(sk);
+ }
+
+-unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
+-void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
+- size_t dst_offset);
++unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
++void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
+ void af_alg_wmem_wakeup(struct sock *sk);
+ int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
+ int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
+--- a/include/crypto/scatterwalk.h
++++ b/include/crypto/scatterwalk.h
+@@ -83,6 +83,34 @@
+ scatterwalk_start(walk, sg_next(walk->sg));
+ }
+
++/*
++ * Flush the dcache of any pages that overlap the region
++ * [offset, offset + nbytes) relative to base_page.
++ *
++ * This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
++ * that all relevant code (including the call to sg_page() in the caller, if
++ * applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
++ */
++static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
++ unsigned int offset,
++ unsigned int nbytes)
++{
++ unsigned int num_pages;
++
++ base_page += offset / PAGE_SIZE;
++ offset %= PAGE_SIZE;
++
++ /*
++ * This is an overflow-safe version of
++ * num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
++ */
++ num_pages = nbytes / PAGE_SIZE;
++ num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
++
++ for (unsigned int i = 0; i < num_pages; i++)
++ flush_dcache_page(base_page + i);
++}
++
+ static inline void scatterwalk_done(struct scatter_walk *walk, int out,
+ int more)
+ {
+@@ -94,6 +122,9 @@
+ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
+ size_t nbytes, int out);
+
++void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
++ unsigned int nbytes);
++
+ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
+ unsigned int start, unsigned int nbytes, int out);
+
diff --git a/SOURCES/1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch b/SOURCES/1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch
deleted file mode 100644
index b50d4be..0000000
--- a/SOURCES/1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch
+++ /dev/null
@@ -1,310 +0,0 @@
-From a664bf3d603dc3bdcf9ae47cc21e0daec706d7a5 Mon Sep 17 00:00:00 2001
-From: Herbert Xu
-Date: Thu, 26 Mar 2026 15:30:20 +0900
-Subject: [PATCH] crypto: algif_aead - Revert to operating out-of-place
-
-This mostly reverts commit 72548b093ee3 except for the copying of
-the associated data.
-
-There is no benefit in operating in-place in algif_aead since the
-source and destination come from different mappings. Get rid of
-all the complexity added for in-place operation and just copy the
-AD directly.
-
-Backported to kernel-6.12.0-124.52.1.el10_1: this tree pre-dates upstream's
-memcpy_sglist() helper, so the AAD copy keeps using
-crypto_aead_copy_sgl(null_tfm, ...). The function signatures of
-af_alg_count_tsgl() and af_alg_pull_tsgl() are reverted to drop the
-offset parameters as in upstream.
-
-Fixes: 72548b093ee3 ("crypto: algif_aead - copy AAD from src to dst")
-Reported-by: Taeyang Lee <0wn@theori.io>
-Signed-off-by: Herbert Xu
----
---- a/crypto/af_alg.c
-+++ b/crypto/af_alg.c
-@@ -635,15 +635,13 @@
- /**
- * af_alg_count_tsgl - Count number of TX SG entries
- *
-- * The counting starts from the beginning of the SGL to @bytes. If
-- * an @offset is provided, the counting of the SG entries starts at the @offset.
-+ * The counting starts from the beginning of the SGL to @bytes.
- *
- * @sk: socket of connection to user space
- * @bytes: Count the number of SG entries holding given number of bytes.
-- * @offset: Start the counting of SG entries from the given offset.
- * Return: Number of TX SG entries found given the constraints
- */
--unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
-+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
- {
- const struct alg_sock *ask = alg_sk(sk);
- const struct af_alg_ctx *ctx = ask->private;
-@@ -658,25 +656,11 @@
- const struct scatterlist *sg = sgl->sg;
-
- for (i = 0; i < sgl->cur; i++) {
-- size_t bytes_count;
--
-- /* Skip offset */
-- if (offset >= sg[i].length) {
-- offset -= sg[i].length;
-- bytes -= sg[i].length;
-- continue;
-- }
--
-- bytes_count = sg[i].length - offset;
--
-- offset = 0;
- sgl_count++;
--
-- /* If we have seen requested number of bytes, stop */
-- if (bytes_count >= bytes)
-+ if (sg[i].length >= bytes)
- return sgl_count;
-
-- bytes -= bytes_count;
-+ bytes -= sg[i].length;
- }
- }
-
-@@ -688,19 +672,14 @@
- * af_alg_pull_tsgl - Release the specified buffers from TX SGL
- *
- * If @dst is non-null, reassign the pages to @dst. The caller must release
-- * the pages. If @dst_offset is given only reassign the pages to @dst starting
-- * at the @dst_offset (byte). The caller must ensure that @dst is large
-- * enough (e.g. by using af_alg_count_tsgl with the same offset).
-+ * the pages.
- *
- * @sk: socket of connection to user space
- * @used: Number of bytes to pull from TX SGL
- * @dst: If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
- * caller must release the buffers in dst.
-- * @dst_offset: Reassign the TX SGL from given offset. All buffers before
-- * reaching the offset is released.
- */
--void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
-- size_t dst_offset)
-+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
- {
- struct alg_sock *ask = alg_sk(sk);
- struct af_alg_ctx *ctx = ask->private;
-@@ -725,18 +704,10 @@
- * SG entries in dst.
- */
- if (dst) {
-- if (dst_offset >= plen) {
-- /* discard page before offset */
-- dst_offset -= plen;
-- } else {
-- /* reassign page to dst after offset */
-- get_page(page);
-- sg_set_page(dst + j, page,
-- plen - dst_offset,
-- sg[i].offset + dst_offset);
-- dst_offset = 0;
-- j++;
-- }
-+ /* reassign page to dst after offset */
-+ get_page(page);
-+ sg_set_page(dst + j, page, plen, sg[i].offset);
-+ j++;
- }
-
- sg[i].length -= plen;
---- a/crypto/algif_aead.c
-+++ b/crypto/algif_aead.c
-@@ -96,9 +96,8 @@
- struct aead_tfm *aeadc = pask->private;
- struct crypto_aead *tfm = aeadc->aead;
- struct crypto_sync_skcipher *null_tfm = aeadc->null_tfm;
-- unsigned int i, as = crypto_aead_authsize(tfm);
-+ unsigned int as = crypto_aead_authsize(tfm);
- struct af_alg_async_req *areq;
-- struct af_alg_tsgl *tsgl, *tmp;
- struct scatterlist *rsgl_src, *tsgl_src = NULL;
- int err = 0;
- size_t used = 0; /* [in] TX bufs to be en/decrypted */
-@@ -178,23 +177,24 @@
- outlen -= less;
- }
-
-+ /*
-+ * Create a per request TX SGL for this request which tracks the
-+ * SG entries from the global TX SGL.
-+ */
- processed = used + ctx->aead_assoclen;
-- list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
-- for (i = 0; i < tsgl->cur; i++) {
-- struct scatterlist *process_sg = tsgl->sg + i;
--
-- if (!(process_sg->length) || !sg_page(process_sg))
-- continue;
-- tsgl_src = process_sg;
-- break;
-- }
-- if (tsgl_src)
-- break;
-- }
-- if (processed && !tsgl_src) {
-- err = -EFAULT;
-+ areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
-+ if (!areq->tsgl_entries)
-+ areq->tsgl_entries = 1;
-+ areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
-+ areq->tsgl_entries),
-+ GFP_KERNEL);
-+ if (!areq->tsgl) {
-+ err = -ENOMEM;
- goto free;
- }
-+ sg_init_table(areq->tsgl, areq->tsgl_entries);
-+ af_alg_pull_tsgl(sk, processed, areq->tsgl);
-+ tsgl_src = areq->tsgl;
-
- /*
- * Copy of AAD from source to destination
-@@ -203,83 +203,18 @@
- * when user space uses an in-place cipher operation, the kernel
- * will copy the data as it does not see whether such in-place operation
- * is initiated.
-- *
-- * To ensure efficiency, the following implementation ensure that the
-- * ciphers are invoked to perform a crypto operation in-place. This
-- * is achieved by memory management specified as follows.
- */
-
-- /* Use the RX SGL as source (and destination) for crypto op. */
-+ /* Use the RX SGL as destination for crypto op. */
- rsgl_src = areq->first_rsgl.sgl.sgt.sgl;
-
-- if (ctx->enc) {
-- /*
-- * Encryption operation - The in-place cipher operation is
-- * achieved by the following operation:
-- *
-- * TX SGL: AAD || PT
-- * | |
-- * | copy |
-- * v v
-- * RX SGL: AAD || PT || Tag
-- */
-- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
-- areq->first_rsgl.sgl.sgt.sgl,
-- processed);
-- if (err)
-- goto free;
-- af_alg_pull_tsgl(sk, processed, NULL, 0);
-- } else {
-- /*
-- * Decryption operation - To achieve an in-place cipher
-- * operation, the following SGL structure is used:
-- *
-- * TX SGL: AAD || CT || Tag
-- * | | ^
-- * | copy | | Create SGL link.
-- * v v |
-- * RX SGL: AAD || CT ----+
-- */
--
-- /* Copy AAD || CT to RX SGL buffer for in-place operation. */
-- err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
-- areq->first_rsgl.sgl.sgt.sgl,
-- outlen);
-- if (err)
-- goto free;
--
-- /* Create TX SGL for tag and chain it to RX SGL. */
-- areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
-- processed - as);
-- if (!areq->tsgl_entries)
-- areq->tsgl_entries = 1;
-- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
-- areq->tsgl_entries),
-- GFP_KERNEL);
-- if (!areq->tsgl) {
-- err = -ENOMEM;
-- goto free;
-- }
-- sg_init_table(areq->tsgl, areq->tsgl_entries);
--
-- /* Release TX SGL, except for tag data and reassign tag data. */
-- af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
--
-- /* chain the areq TX SGL holding the tag with RX SGL */
-- if (usedpages) {
-- /* RX SGL present */
-- struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
-- struct scatterlist *sg = sgl_prev->sgt.sgl;
--
-- sg_unmark_end(sg + sgl_prev->sgt.nents - 1);
-- sg_chain(sg, sgl_prev->sgt.nents + 1, areq->tsgl);
-- } else
-- /* no RX SGL present (e.g. authentication only) */
-- rsgl_src = areq->tsgl;
-- }
-+ err = crypto_aead_copy_sgl(null_tfm, tsgl_src, rsgl_src,
-+ ctx->aead_assoclen);
-+ if (err)
-+ goto free;
-
- /* Initialize the crypto operation */
-- aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
-+ aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
- areq->first_rsgl.sgl.sgt.sgl, used, ctx->iv);
- aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
- aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
-@@ -514,7 +449,7 @@
- struct crypto_aead *tfm = aeadc->aead;
- unsigned int ivlen = crypto_aead_ivsize(tfm);
-
-- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
-+ af_alg_pull_tsgl(sk, ctx->used, NULL);
- sock_kzfree_s(sk, ctx->iv, ivlen);
- sock_kfree_s(sk, ctx, ctx->len);
- af_alg_release_parent(sk);
---- a/crypto/algif_skcipher.c
-+++ b/crypto/algif_skcipher.c
-@@ -138,7 +138,7 @@
- * Create a per request TX SGL for this request which tracks the
- * SG entries from the global TX SGL.
- */
-- areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
-+ areq->tsgl_entries = af_alg_count_tsgl(sk, len);
- if (!areq->tsgl_entries)
- areq->tsgl_entries = 1;
- areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
-@@ -149,7 +149,7 @@
- goto free;
- }
- sg_init_table(areq->tsgl, areq->tsgl_entries);
-- af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
-+ af_alg_pull_tsgl(sk, len, areq->tsgl);
-
- /* Initialize the crypto operation */
- skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
-@@ -363,7 +363,7 @@
- struct alg_sock *pask = alg_sk(psk);
- struct crypto_skcipher *tfm = pask->private;
-
-- af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
-+ af_alg_pull_tsgl(sk, ctx->used, NULL);
- sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
- if (ctx->state)
- sock_kzfree_s(sk, ctx->state, crypto_skcipher_statesize(tfm));
---- a/include/crypto/if_alg.h
-+++ b/include/crypto/if_alg.h
-@@ -228,9 +228,8 @@
- return PAGE_SIZE <= af_alg_rcvbuf(sk);
- }
-
--unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
--void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
-- size_t dst_offset);
-+unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
-+void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
- void af_alg_wmem_wakeup(struct sock *sk);
- int af_alg_wait_for_data(struct sock *sk, unsigned flags, unsigned min);
- int af_alg_sendmsg(struct socket *sock, struct msghdr *msg, size_t size,
diff --git a/SPECS/raspberrypi2.spec b/SPECS/raspberrypi2.spec
index bd30ce6..2acf450 100644
--- a/SPECS/raspberrypi2.spec
+++ b/SPECS/raspberrypi2.spec
@@ -11,7 +11,7 @@ ExclusiveArch: aarch64
%define local_version v8
%define bcmmodel 2711
-%define extra_version 3
+%define extra_version 4
# This originally implies Kernel 4.x for RPi 2 and is not appropriate now.
# Be careful to change this not to disturb the seamless package update.
@@ -53,7 +53,7 @@ Source2001: cpupower.config
Source2002: kvm_stat.logrotate
# AlmaLinux patches
## CVE-2026-31431
-Patch1100: 1100-crypto-algif_aead-Revert-to-operating-out-of-place.patch
+Patch1100: 1100-CVE-2026-31431-crypto-Copy-Fail-fixes.patch
BuildRequires: kmod, patch, bash, coreutils, tar
BuildRequires: bzip2, xz, findutils, gzip, m4, perl, perl-Carp, make, diffutils, gawk
@@ -538,6 +538,9 @@ cp $(ls -1 /boot/config-kernel-*-*|sort -V|tail -1) /boot/config-kernel.inc
%endif
%changelog
+* Thu Apr 30 2026 Koichiro Iwao - 6.12.47-20250916.v8.4
+- Update CVE-2026-31431 patch to include more upstream commits
+
* Thu Apr 30 2026 Koichiro Iwao - 6.12.47-20250916.v8.3
- Apply fix for CVE-2026-31431 Copy Fail