Import of kernel-4.18.0-553.121.1.el8_10

This commit is contained in:
almalinux-bot-kernel 2026-05-02 05:08:23 +00:00
parent 7242ccff16
commit 085f477512
12 changed files with 247 additions and 300 deletions

View File

@ -12,7 +12,7 @@ RHEL_MINOR = 10
#
# Use this spot to avoid future merge conflicts.
# Do not trim this comment.
RHEL_RELEASE = 553.120.1
RHEL_RELEASE = 553.121.1
#
# ZSTREAM

View File

@ -240,7 +240,6 @@ config CRYPTO_AUTHENC
select CRYPTO_BLKCIPHER
select CRYPTO_MANAGER
select CRYPTO_HASH
select CRYPTO_NULL
help
Authenc: Combined mode wrapper for IPsec.
This is required for IPSec.
@ -1863,7 +1862,6 @@ config CRYPTO_USER_API_AEAD
depends on NET
select CRYPTO_AEAD
select CRYPTO_BLKCIPHER
select CRYPTO_NULL
select CRYPTO_USER_API
help
This option enables the user-spaces interface for AEAD

View File

@ -524,15 +524,13 @@ EXPORT_SYMBOL_GPL(af_alg_alloc_tsgl);
/**
* aead_count_tsgl - Count number of TX SG entries
*
* The counting starts from the beginning of the SGL to @bytes. If
* an offset is provided, the counting of the SG entries starts at the offset.
* The counting starts from the beginning of the SGL to @bytes.
*
* @sk socket of connection to user space
* @bytes Count the number of SG entries holding given number of bytes.
* @offset Start the counting of SG entries from the given offset.
* @return Number of TX SG entries found given the constraints
*/
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@ -547,25 +545,11 @@ unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset)
struct scatterlist *sg = sgl->sg;
for (i = 0; i < sgl->cur; i++) {
size_t bytes_count;
/* Skip offset */
if (offset >= sg[i].length) {
offset -= sg[i].length;
bytes -= sg[i].length;
continue;
}
bytes_count = sg[i].length - offset;
offset = 0;
sgl_count++;
/* If we have seen requested number of bytes, stop */
if (bytes_count >= bytes)
if (sg[i].length >= bytes)
return sgl_count;
bytes -= bytes_count;
bytes -= sg[i].length;
}
}
@ -577,19 +561,14 @@ EXPORT_SYMBOL_GPL(af_alg_count_tsgl);
* aead_pull_tsgl - Release the specified buffers from TX SGL
*
* If @dst is non-null, reassign the pages to dst. The caller must release
* the pages. If @dst_offset is given only reassign the pages to @dst starting
* at the @dst_offset (byte). The caller must ensure that @dst is large
* enough (e.g. by using af_alg_count_tsgl with the same offset).
* the pages.
*
* @sk socket of connection to user space
* @used Number of bytes to pull from TX SGL
* @dst If non-NULL, buffer is reassigned to dst SGL instead of releasing. The
* caller must release the buffers in dst.
* @dst_offset Reassign the TX SGL from given offset. All buffers before
* reaching the offset is released.
*/
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset)
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst)
{
struct alg_sock *ask = alg_sk(sk);
struct af_alg_ctx *ctx = ask->private;
@ -613,19 +592,11 @@ void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
* Assumption: caller created af_alg_count_tsgl(len)
* SG entries in dst.
*/
if (dst) {
if (dst_offset >= plen) {
/* discard page before offset */
dst_offset -= plen;
} else {
/* reassign page to dst after offset */
get_page(page);
sg_set_page(dst + j, page,
plen - dst_offset,
sg[i].offset + dst_offset);
dst_offset = 0;
j++;
}
if (dst && plen) {
/* reassign page to dst */
get_page(page);
sg_set_page(dst + j, page, plen, sg[i].offset);
j++;
}
sg[i].length -= plen;

View File

@ -30,8 +30,6 @@
#include <crypto/internal/aead.h>
#include <crypto/scatterwalk.h>
#include <crypto/if_alg.h>
#include <crypto/skcipher.h>
#include <crypto/null.h>
#include <linux/init.h>
#include <linux/list.h>
#include <linux/kernel.h>
@ -40,19 +38,13 @@
#include <linux/net.h>
#include <net/sock.h>
struct aead_tfm {
struct crypto_aead *aead;
struct crypto_skcipher *null_tfm;
};
static inline bool aead_sufficient_data(struct sock *sk)
{
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
/*
@ -68,27 +60,12 @@ static int aead_sendmsg(struct socket *sock, struct msghdr *msg, size_t size)
struct alg_sock *ask = alg_sk(sk);
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_aead *tfm = pask->private;
unsigned int ivsize = crypto_aead_ivsize(tfm);
return af_alg_sendmsg(sock, msg, size, ivsize);
}
static int crypto_aead_copy_sgl(struct crypto_skcipher *null_tfm,
struct scatterlist *src,
struct scatterlist *dst, unsigned int len)
{
SKCIPHER_REQUEST_ON_STACK(skreq, null_tfm);
skcipher_request_set_tfm(skreq, null_tfm);
skcipher_request_set_callback(skreq, CRYPTO_TFM_REQ_MAY_BACKLOG,
NULL, NULL);
skcipher_request_set_crypt(skreq, src, dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
size_t ignored, int flags)
{
@ -97,13 +74,12 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct af_alg_ctx *ctx = ask->private;
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_skcipher *null_tfm = aeadc->null_tfm;
unsigned int i, as = crypto_aead_authsize(tfm);
struct crypto_aead *tfm = pask->private;
unsigned int as = crypto_aead_authsize(tfm);
unsigned int ivsize = crypto_aead_ivsize(tfm);
struct af_alg_async_req *areq;
struct af_alg_tsgl *tsgl, *tmp;
struct scatterlist *rsgl_src, *tsgl_src = NULL;
void *iv;
int err = 0;
size_t used = 0; /* [in] TX bufs to be en/decrypted */
size_t outlen = 0; /* [out] RX bufs produced by kernel */
@ -155,10 +131,14 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
/* Allocate cipher request for current operation. */
areq = af_alg_alloc_areq(sk, sizeof(struct af_alg_async_req) +
crypto_aead_reqsize(tfm));
crypto_aead_reqsize(tfm) + ivsize);
if (IS_ERR(areq))
return PTR_ERR(areq);
iv = (u8 *)aead_request_ctx(&areq->cra_u.aead_req) +
crypto_aead_reqsize(tfm);
memcpy(iv, ctx->iv, ivsize);
/* convert iovecs of output buffers into RX SGL */
err = af_alg_get_rsgl(sk, msg, flags, areq, outlen, &usedpages);
if (err)
@ -174,7 +154,7 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
if (usedpages < outlen) {
size_t less = outlen - usedpages;
if (used < less) {
if (used < less + (ctx->enc ? 0 : as)) {
err = -EINVAL;
goto free;
}
@ -182,23 +162,24 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
outlen -= less;
}
/*
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
processed = used + ctx->aead_assoclen;
list_for_each_entry_safe(tsgl, tmp, &ctx->tsgl_list, list) {
for (i = 0; i < tsgl->cur; i++) {
struct scatterlist *process_sg = tsgl->sg + i;
if (!(process_sg->length) || !sg_page(process_sg))
continue;
tsgl_src = process_sg;
break;
}
if (tsgl_src)
break;
}
if (processed && !tsgl_src) {
err = -EFAULT;
areq->tsgl_entries = af_alg_count_tsgl(sk, processed);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
af_alg_pull_tsgl(sk, processed, areq->tsgl);
tsgl_src = areq->tsgl;
/*
* Copy of AAD from source to destination
@ -207,82 +188,16 @@ static int _aead_recvmsg(struct socket *sock, struct msghdr *msg,
* when user space uses an in-place cipher operation, the kernel
* will copy the data as it does not see whether such in-place operation
* is initiated.
*
* To ensure efficiency, the following implementation ensure that the
* ciphers are invoked to perform a crypto operation in-place. This
* is achieved by memory management specified as follows.
*/
/* Use the RX SGL as source (and destination) for crypto op. */
rsgl_src = areq->first_rsgl.sgl.sg;
if (ctx->enc) {
/*
* Encryption operation - The in-place cipher operation is
* achieved by the following operation:
*
* TX SGL: AAD || PT
* | |
* | copy |
* v v
* RX SGL: AAD || PT || Tag
*/
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, processed);
if (err)
goto free;
af_alg_pull_tsgl(sk, processed, NULL, 0);
} else {
/*
* Decryption operation - To achieve an in-place cipher
* operation, the following SGL structure is used:
*
* TX SGL: AAD || CT || Tag
* | | ^
* | copy | | Create SGL link.
* v v |
* RX SGL: AAD || CT ----+
*/
/* Copy AAD || CT to RX SGL buffer for in-place operation. */
err = crypto_aead_copy_sgl(null_tfm, tsgl_src,
areq->first_rsgl.sgl.sg, outlen);
if (err)
goto free;
/* Create TX SGL for tag and chain it to RX SGL. */
areq->tsgl_entries = af_alg_count_tsgl(sk, processed,
processed - as);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
areq->tsgl_entries),
GFP_KERNEL);
if (!areq->tsgl) {
err = -ENOMEM;
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
/* Release TX SGL, except for tag data and reassign tag data. */
af_alg_pull_tsgl(sk, processed, areq->tsgl, processed - as);
/* chain the areq TX SGL holding the tag with RX SGL */
if (usedpages) {
/* RX SGL present */
struct af_alg_sgl *sgl_prev = &areq->last_rsgl->sgl;
sg_unmark_end(sgl_prev->sg + sgl_prev->npages - 1);
sg_chain(sgl_prev->sg, sgl_prev->npages + 1,
areq->tsgl);
} else
/* no RX SGL present (e.g. authentication only) */
rsgl_src = areq->tsgl;
}
memcpy_sglist(rsgl_src, tsgl_src, ctx->aead_assoclen);
/* Initialize the crypto operation */
aead_request_set_crypt(&areq->cra_u.aead_req, rsgl_src,
areq->first_rsgl.sgl.sg, used, ctx->iv);
aead_request_set_crypt(&areq->cra_u.aead_req, tsgl_src,
areq->first_rsgl.sgl.sg, used, iv);
aead_request_set_ad(&areq->cra_u.aead_req, ctx->aead_assoclen);
aead_request_set_tfm(&areq->cra_u.aead_req, tfm);
@ -383,7 +298,7 @@ static int aead_check_key(struct socket *sock)
int err = 0;
struct sock *psk;
struct alg_sock *pask;
struct aead_tfm *tfm;
struct crypto_aead *tfm;
struct sock *sk = sock->sk;
struct alg_sock *ask = alg_sk(sk);
@ -397,7 +312,7 @@ static int aead_check_key(struct socket *sock)
err = -ENOKEY;
lock_sock_nested(psk, SINGLE_DEPTH_NESTING);
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
goto unlock;
if (!pask->refcnt++)
@ -476,54 +391,22 @@ static struct proto_ops algif_aead_ops_nokey = {
static void *aead_bind(const char *name, u32 type, u32 mask)
{
struct aead_tfm *tfm;
struct crypto_aead *aead;
struct crypto_skcipher *null_tfm;
tfm = kzalloc(sizeof(*tfm), GFP_KERNEL);
if (!tfm)
return ERR_PTR(-ENOMEM);
aead = crypto_alloc_aead(name, type, mask);
if (IS_ERR(aead)) {
kfree(tfm);
return ERR_CAST(aead);
}
null_tfm = crypto_get_default_null_skcipher();
if (IS_ERR(null_tfm)) {
crypto_free_aead(aead);
kfree(tfm);
return ERR_CAST(null_tfm);
}
tfm->aead = aead;
tfm->null_tfm = null_tfm;
return tfm;
return crypto_alloc_aead(name, type, mask);
}
static void aead_release(void *private)
{
struct aead_tfm *tfm = private;
crypto_free_aead(tfm->aead);
crypto_put_default_null_skcipher();
kfree(tfm);
crypto_free_aead(private);
}
static int aead_setauthsize(void *private, unsigned int authsize)
{
struct aead_tfm *tfm = private;
return crypto_aead_setauthsize(tfm->aead, authsize);
return crypto_aead_setauthsize(private, authsize);
}
static int aead_setkey(void *private, const u8 *key, unsigned int keylen)
{
struct aead_tfm *tfm = private;
return crypto_aead_setkey(tfm->aead, key, keylen);
return crypto_aead_setkey(private, key, keylen);
}
static void aead_sock_destruct(struct sock *sk)
@ -532,11 +415,10 @@ static void aead_sock_destruct(struct sock *sk)
struct af_alg_ctx *ctx = ask->private;
struct sock *psk = ask->parent;
struct alg_sock *pask = alg_sk(psk);
struct aead_tfm *aeadc = pask->private;
struct crypto_aead *tfm = aeadc->aead;
struct crypto_aead *tfm = pask->private;
unsigned int ivlen = crypto_aead_ivsize(tfm);
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, ivlen);
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);
@ -546,10 +428,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
{
struct af_alg_ctx *ctx;
struct alg_sock *ask = alg_sk(sk);
struct aead_tfm *tfm = private;
struct crypto_aead *aead = tfm->aead;
struct crypto_aead *tfm = private;
unsigned int len = sizeof(*ctx);
unsigned int ivlen = crypto_aead_ivsize(aead);
unsigned int ivlen = crypto_aead_ivsize(tfm);
ctx = sock_kmalloc(sk, len, GFP_KERNEL);
if (!ctx)
@ -582,9 +463,9 @@ static int aead_accept_parent_nokey(void *private, struct sock *sk)
static int aead_accept_parent(void *private, struct sock *sk)
{
struct aead_tfm *tfm = private;
struct crypto_aead *tfm = private;
if (crypto_aead_get_flags(tfm->aead) & CRYPTO_TFM_NEED_KEY)
if (crypto_aead_get_flags(tfm) & CRYPTO_TFM_NEED_KEY)
return -ENOKEY;
return aead_accept_parent_nokey(private, sk);

View File

@ -97,7 +97,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
* Create a per request TX SGL for this request which tracks the
* SG entries from the global TX SGL.
*/
areq->tsgl_entries = af_alg_count_tsgl(sk, len, 0);
areq->tsgl_entries = af_alg_count_tsgl(sk, len);
if (!areq->tsgl_entries)
areq->tsgl_entries = 1;
areq->tsgl = sock_kmalloc(sk, array_size(sizeof(*areq->tsgl),
@ -108,7 +108,7 @@ static int _skcipher_recvmsg(struct socket *sock, struct msghdr *msg,
goto free;
}
sg_init_table(areq->tsgl, areq->tsgl_entries);
af_alg_pull_tsgl(sk, len, areq->tsgl, 0);
af_alg_pull_tsgl(sk, len, areq->tsgl);
/* Initialize the crypto operation */
skcipher_request_set_tfm(&areq->cra_u.skcipher_req, tfm);
@ -328,7 +328,7 @@ static void skcipher_sock_destruct(struct sock *sk)
struct alg_sock *pask = alg_sk(psk);
struct crypto_skcipher *tfm = pask->private;
af_alg_pull_tsgl(sk, ctx->used, NULL, 0);
af_alg_pull_tsgl(sk, ctx->used, NULL);
sock_kzfree_s(sk, ctx->iv, crypto_skcipher_ivsize(tfm));
sock_kfree_s(sk, ctx, ctx->len);
af_alg_release_parent(sk);

View File

@ -14,7 +14,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@ -33,7 +32,6 @@ struct authenc_instance_ctx {
struct crypto_authenc_ctx {
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
};
struct authenc_request_ctx {
@ -189,21 +187,6 @@ out:
authenc_request_complete(areq, err);
}
static int crypto_authenc_copy_assoc(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(authenc);
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, req->assoclen,
NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc = crypto_aead_reqtfm(req);
@ -222,10 +205,7 @@ static int crypto_authenc_encrypt(struct aead_request *req)
dst = src;
if (req->src != req->dst) {
err = crypto_authenc_copy_assoc(req);
if (err)
return err;
memcpy_sglist(req->dst, req->src, req->assoclen);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, req->assoclen);
}
@ -326,7 +306,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@ -338,14 +317,8 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
crypto_aead_set_reqsize(
tfm,
@ -359,8 +332,6 @@ static int crypto_authenc_init_tfm(struct crypto_aead *tfm)
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@ -372,7 +343,6 @@ static void crypto_authenc_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_free(struct aead_instance *inst)

View File

@ -17,7 +17,6 @@
#include <crypto/internal/hash.h>
#include <crypto/internal/skcipher.h>
#include <crypto/authenc.h>
#include <crypto/null.h>
#include <crypto/scatterwalk.h>
#include <linux/err.h>
#include <linux/init.h>
@ -36,7 +35,6 @@ struct crypto_authenc_esn_ctx {
unsigned int reqoff;
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
};
struct authenc_esn_request_ctx {
@ -179,20 +177,6 @@ static void crypto_authenc_esn_encrypt_done(struct crypto_async_request *req,
authenc_esn_request_complete(areq, err);
}
static int crypto_authenc_esn_copy(struct aead_request *req, unsigned int len)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(authenc_esn);
SKCIPHER_REQUEST_ON_STACK(skreq, ctx->null);
skcipher_request_set_tfm(skreq, ctx->null);
skcipher_request_set_callback(skreq, aead_request_flags(req),
NULL, NULL);
skcipher_request_set_crypt(skreq, req->src, req->dst, len, NULL);
return crypto_skcipher_encrypt(skreq);
}
static int crypto_authenc_esn_encrypt(struct aead_request *req)
{
struct crypto_aead *authenc_esn = crypto_aead_reqtfm(req);
@ -206,15 +190,15 @@ static int crypto_authenc_esn_encrypt(struct aead_request *req)
struct scatterlist *src, *dst;
int err;
if (assoclen < 8)
return -EINVAL;
sg_init_table(areq_ctx->src, 2);
src = scatterwalk_ffwd(areq_ctx->src, req->src, assoclen);
dst = src;
if (req->src != req->dst) {
err = crypto_authenc_esn_copy(req, assoclen);
if (err)
return err;
memcpy_sglist(req->dst, req->src, assoclen);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, req->dst, assoclen);
}
@ -245,6 +229,7 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
crypto_ahash_alignmask(auth) + 1);
unsigned int cryptlen = req->cryptlen - authsize;
unsigned int assoclen = req->assoclen;
struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
u32 tmp[2];
@ -252,23 +237,29 @@ static int crypto_authenc_esn_decrypt_tail(struct aead_request *req,
if (!authsize)
goto decrypt;
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
if (src == dst) {
/* Move high-order bits of sequence number back. */
scatterwalk_map_and_copy(tmp, dst, 4, 4, 0);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 0);
scatterwalk_map_and_copy(tmp, dst, 0, 8, 1);
} else
memcpy_sglist(dst, src, assoclen);
if (crypto_memneq(ihash, ohash, authsize))
return -EBADMSG;
decrypt:
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, assoclen);
if (req->src == req->dst)
src = dst;
else
src = scatterwalk_ffwd(areq_ctx->src, src, assoclen);
skcipher_request_set_tfm(skreq, ctx->enc);
skcipher_request_set_callback(skreq, flags,
req->base.complete, req->base.data);
skcipher_request_set_crypt(skreq, dst, dst, cryptlen, req->iv);
skcipher_request_set_crypt(skreq, src, dst, cryptlen, req->iv);
return crypto_skcipher_decrypt(skreq);
}
@ -295,31 +286,36 @@ static int crypto_authenc_esn_decrypt(struct aead_request *req)
unsigned int assoclen = req->assoclen;
unsigned int cryptlen = req->cryptlen;
u8 *ihash = ohash + crypto_ahash_digestsize(auth);
struct scatterlist *src = req->src;
struct scatterlist *dst = req->dst;
u32 tmp[2];
int err;
cryptlen -= authsize;
if (req->src != dst) {
err = crypto_authenc_esn_copy(req, assoclen + cryptlen);
if (err)
return err;
}
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
if (assoclen < 8)
return -EINVAL;
if (!authsize)
goto tail;
/* Move high-order bits of sequence number to the end. */
scatterwalk_map_and_copy(tmp, dst, 0, 8, 0);
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
cryptlen -= authsize;
scatterwalk_map_and_copy(ihash, req->src, assoclen + cryptlen,
authsize, 0);
sg_init_table(areq_ctx->dst, 2);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
/* Move high-order bits of sequence number to the end. */
scatterwalk_map_and_copy(tmp, src, 0, 8, 0);
if (src == dst) {
scatterwalk_map_and_copy(tmp, dst, 4, 4, 1);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen, 4, 1);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
} else {
scatterwalk_map_and_copy(tmp, dst, 0, 4, 1);
scatterwalk_map_and_copy(tmp + 1, dst, assoclen + cryptlen - 4, 4, 1);
src = scatterwalk_ffwd(areq_ctx->src, src, 8);
dst = scatterwalk_ffwd(areq_ctx->dst, dst, 4);
memcpy_sglist(dst, src, assoclen + cryptlen - 8);
dst = req->dst;
}
ahash_request_set_tfm(ahreq, auth);
ahash_request_set_crypt(ahreq, dst, ohash, assoclen + cryptlen);
@ -341,7 +337,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
struct crypto_authenc_esn_ctx *ctx = crypto_aead_ctx(tfm);
struct crypto_ahash *auth;
struct crypto_skcipher *enc;
struct crypto_skcipher *null;
int err;
auth = crypto_spawn_ahash(&ictx->auth);
@ -353,14 +348,8 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
if (IS_ERR(enc))
goto err_free_ahash;
null = crypto_get_default_null_skcipher();
err = PTR_ERR(null);
if (IS_ERR(null))
goto err_free_skcipher;
ctx->auth = auth;
ctx->enc = enc;
ctx->null = null;
ctx->reqoff = ALIGN(2 * crypto_ahash_digestsize(auth),
crypto_ahash_alignmask(auth) + 1);
@ -377,8 +366,6 @@ static int crypto_authenc_esn_init_tfm(struct crypto_aead *tfm)
return 0;
err_free_skcipher:
crypto_free_skcipher(enc);
err_free_ahash:
crypto_free_ahash(auth);
return err;
@ -390,7 +377,6 @@ static void crypto_authenc_esn_exit_tfm(struct crypto_aead *tfm)
crypto_free_ahash(ctx->auth);
crypto_free_skcipher(ctx->enc);
crypto_put_default_null_skcipher();
}
static void crypto_authenc_esn_free(struct aead_instance *inst)

View File

@ -74,6 +74,104 @@ void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
}
EXPORT_SYMBOL_GPL(scatterwalk_map_and_copy);
/**
* memcpy_sglist() - Copy data from one scatterlist to another
* @dst: The destination scatterlist. Can be NULL if @nbytes == 0.
* @src: The source scatterlist. Can be NULL if @nbytes == 0.
* @nbytes: Number of bytes to copy
*
* The scatterlists can describe exactly the same memory, in which case this
* function is a no-op. No other overlaps are supported.
*
* Context: Any context
*/
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes)
{
unsigned int src_offset, dst_offset;
if (unlikely(nbytes == 0)) /* in case src and/or dst is NULL */
return;
src_offset = src->offset;
dst_offset = dst->offset;
for (;;) {
/* Compute the length to copy this step. */
unsigned int len = min3(src->offset + src->length - src_offset,
dst->offset + dst->length - dst_offset,
nbytes);
struct page *src_page = sg_page(src);
struct page *dst_page = sg_page(dst);
const void *src_virt;
void *dst_virt;
if (IS_ENABLED(CONFIG_HIGHMEM)) {
/* HIGHMEM: we may have to actually map the pages. */
const unsigned int src_oip = offset_in_page(src_offset);
const unsigned int dst_oip = offset_in_page(dst_offset);
const unsigned int limit = PAGE_SIZE;
/* Further limit len to not cross a page boundary. */
len = min3(len, limit - src_oip, limit - dst_oip);
/* Compute the source and destination pages. */
src_page += src_offset / PAGE_SIZE;
dst_page += dst_offset / PAGE_SIZE;
if (src_page != dst_page) {
/* Copy between different pages. */
dst_virt = kmap_atomic(dst_page);
src_virt = kmap_atomic(src_page);
memcpy(dst_virt + dst_oip, src_virt + src_oip,
len);
kunmap_atomic((void *)src_virt);
kunmap_atomic(dst_virt);
flush_dcache_page(dst_page);
} else if (src_oip != dst_oip) {
/* Copy between different parts of same page. */
dst_virt = kmap_atomic(dst_page);
memcpy(dst_virt + dst_oip, dst_virt + src_oip,
len);
kunmap_atomic(dst_virt);
flush_dcache_page(dst_page);
} /* Else, it's the same memory. No action needed. */
} else {
/*
* !HIGHMEM: no mapping needed. Just work in the linear
* buffer of each sg entry. Note that we can cross page
* boundaries, as they are not significant in this case.
*/
src_virt = page_address(src_page) + src_offset;
dst_virt = page_address(dst_page) + dst_offset;
if (src_virt != dst_virt) {
memcpy(dst_virt, src_virt, len);
if (ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE)
__scatterwalk_flush_dcache_pages(
dst_page, dst_offset, len);
} /* Else, it's the same memory. No action needed. */
}
nbytes -= len;
if (nbytes == 0) /* No more to copy? */
break;
/*
* There's more to copy. Advance the offsets by the length
* copied this step, and advance the sg entries as needed.
*/
src_offset += len;
if (src_offset >= src->offset + src->length) {
src = sg_next(src);
src_offset = src->offset;
}
dst_offset += len;
if (dst_offset >= dst->offset + dst->length) {
dst = sg_next(dst);
dst_offset = dst->offset;
}
}
}
EXPORT_SYMBOL_GPL(memcpy_sglist);
struct scatterlist *scatterwalk_ffwd(struct scatterlist dst[2],
struct scatterlist *src,
unsigned int len)

View File

@ -5121,9 +5121,14 @@ nfsd4_encode_operation(struct nfsd4_compoundres *resp, struct nfsd4_op *op)
int len = xdr->buf->len - post_err_offset;
so->so_replay.rp_status = op->status;
so->so_replay.rp_buflen = len;
read_bytes_from_xdr_buf(xdr->buf, post_err_offset,
if (len <= NFSD4_REPLAY_ISIZE) {
so->so_replay.rp_buflen = len;
read_bytes_from_xdr_buf(xdr->buf,
post_err_offset,
so->so_replay.rp_buf, len);
} else {
so->so_replay.rp_buflen = 0;
}
}
status:
/* Note that op->status is already in network byte order: */

View File

@ -379,11 +379,18 @@ struct nfs4_client_reclaim {
struct xdr_netobj cr_princhash;
};
/* A reasonable value for REPLAY_ISIZE was estimated as follows:
* The OPEN response, typically the largest, requires
* 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) + 8(verifier) +
* 4(deleg. type) + 8(deleg. stateid) + 4(deleg. recall flag) +
* 20(deleg. space limit) + ~32(deleg. ace) = 112 bytes
/*
* REPLAY_ISIZE is sized for an OPEN response with delegation:
* 4(status) + 8(stateid) + 20(changeinfo) + 4(rflags) +
* 8(verifier) + 4(deleg. type) + 8(deleg. stateid) +
* 4(deleg. recall flag) + 20(deleg. space limit) +
* ~32(deleg. ace) = 112 bytes
*
* Some responses can exceed this. A LOCK denial includes the conflicting
* lock owner, which can be up to 1024 bytes (NFS4_OPAQUE_LIMIT). Responses
* larger than REPLAY_ISIZE are not cached in rp_ibuf; only rp_status is
* saved. Enlarging this constant increases the size of every
* nfs4_stateowner.
*/
#define NFSD4_REPLAY_ISIZE 112

View File

@ -231,9 +231,8 @@ static inline bool af_alg_readable(struct sock *sk)
}
int af_alg_alloc_tsgl(struct sock *sk);
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes, size_t offset);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst,
size_t dst_offset);
unsigned int af_alg_count_tsgl(struct sock *sk, size_t bytes);
void af_alg_pull_tsgl(struct sock *sk, size_t used, struct scatterlist *dst);
void af_alg_free_areq_sgls(struct af_alg_async_req *areq);
int af_alg_wait_for_wmem(struct sock *sk, unsigned int flags);
void af_alg_wmem_wakeup(struct sock *sk);

View File

@ -111,6 +111,35 @@ static inline void scatterwalk_pagedone(struct scatter_walk *walk, int out,
scatterwalk_start(walk, sg_next(walk->sg));
}
/*
* Flush the dcache of any pages that overlap the region
* [offset, offset + nbytes) relative to base_page.
*
* This should be called only when ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE, to ensure
* that all relevant code (including the call to sg_page() in the caller, if
* applicable) gets fully optimized out when !ARCH_IMPLEMENTS_FLUSH_DCACHE_PAGE.
*/
static inline void __scatterwalk_flush_dcache_pages(struct page *base_page,
unsigned int offset,
unsigned int nbytes)
{
unsigned int num_pages;
unsigned int i;
base_page += offset / PAGE_SIZE;
offset %= PAGE_SIZE;
/*
* This is an overflow-safe version of
* num_pages = DIV_ROUND_UP(offset + nbytes, PAGE_SIZE).
*/
num_pages = nbytes / PAGE_SIZE;
num_pages += DIV_ROUND_UP(offset + (nbytes % PAGE_SIZE), PAGE_SIZE);
for (i = 0; i < num_pages; i++)
flush_dcache_page(base_page + i);
}
static inline void scatterwalk_done(struct scatter_walk *walk, int out,
int more)
{
@ -123,6 +152,9 @@ void scatterwalk_copychunks(void *buf, struct scatter_walk *walk,
size_t nbytes, int out);
void *scatterwalk_map(struct scatter_walk *walk);
void memcpy_sglist(struct scatterlist *dst, struct scatterlist *src,
unsigned int nbytes);
void scatterwalk_map_and_copy(void *buf, struct scatterlist *sg,
unsigned int start, unsigned int nbytes, int out);