From 526db249480573dfcca46bd22d548e30c996c5b2 Mon Sep 17 00:00:00 2001 From: Daiki Ueno Date: Mon, 25 Jul 2022 12:42:36 +0900 Subject: [PATCH] Limit input size for AES-GCM according to SP800-38D Resolves: #2095251 Signed-off-by: Daiki Ueno --- gnutls-3.7.6-aes-gcm-pt-limit.patch | 720 ++++++++++++++++++++++++++++ gnutls.spec | 6 +- 2 files changed, 725 insertions(+), 1 deletion(-) create mode 100644 gnutls-3.7.6-aes-gcm-pt-limit.patch diff --git a/gnutls-3.7.6-aes-gcm-pt-limit.patch b/gnutls-3.7.6-aes-gcm-pt-limit.patch new file mode 100644 index 0000000..dd32866 --- /dev/null +++ b/gnutls-3.7.6-aes-gcm-pt-limit.patch @@ -0,0 +1,720 @@ +From 2f61f102169e4d6652c9b82246353cd276366809 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Mon, 27 Jun 2022 11:14:50 +0900 +Subject: [PATCH] cipher: limit plaintext length supplied to AES-GCM + +According to SP800-38D 5.2.1.1, input data length of AES-GCM +encryption function must be less than or equal to 2^39-256 bits. + +Signed-off-by: Daiki Ueno +--- + NEWS | 3 + + lib/accelerated/aarch64/aes-aarch64.h | 15 ++++ + lib/accelerated/aarch64/aes-gcm-aarch64.c | 9 +++ + lib/accelerated/x86/aes-gcm-padlock.c | 29 ++++--- + lib/accelerated/x86/aes-gcm-x86-aesni.c | 30 +++++--- + lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c | 9 +++ + lib/accelerated/x86/aes-gcm-x86-pclmul.c | 9 +++ + lib/accelerated/x86/aes-gcm-x86-ssse3.c | 30 +++++--- + lib/accelerated/x86/aes-x86.h | 15 ++++ + lib/nettle/cipher.c | 41 ++++++++++ + tests/slow/cipher-api-test.c | 79 ++++++++++++++++++++ + 11 files changed, 240 insertions(+), 29 deletions(-) + +diff --git a/lib/accelerated/aarch64/aes-aarch64.h b/lib/accelerated/aarch64/aes-aarch64.h +index 692d8620d7..0e64f4ed8d 100644 +--- a/lib/accelerated/aarch64/aes-aarch64.h ++++ b/lib/accelerated/aarch64/aes-aarch64.h +@@ -20,6 +20,21 @@ typedef struct { + if (s != 16 && s != 24 && s != 32) \ + return GNUTLS_E_INVALID_REQUEST + ++#include ++#define AES_GCM_ENCRYPT_MAX_BYTES ((1ULL << 36) - 32) ++static inline int ++record_aes_gcm_encrypt_size(size_t *counter, size_t size) { ++ size_t sum; ++ ++ if (!INT_ADD_OK(*counter, size, &sum) || ++ sum > AES_GCM_ENCRYPT_MAX_BYTES) { ++ return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); ++ } ++ *counter = sum; ++ ++ return 0; ++} ++ + int aes_v8_set_encrypt_key(const unsigned char *userKey, int bits, AES_KEY *key); + int aes_v8_set_decrypt_key(const unsigned char *userKey, int bits, AES_KEY *key); + void aes_v8_cbc_encrypt(const unsigned char *in, unsigned char *out, +diff --git a/lib/accelerated/aarch64/aes-gcm-aarch64.c b/lib/accelerated/aarch64/aes-gcm-aarch64.c +index 901bd9f60f..be1e69c784 100644 +--- a/lib/accelerated/aarch64/aes-gcm-aarch64.c ++++ b/lib/accelerated/aarch64/aes-gcm-aarch64.c +@@ -62,6 +62,7 @@ struct aes_gcm_ctx { + struct gcm128_context gcm; + unsigned finished; + unsigned auth_finished; ++ size_t rekey_counter; + }; + + void gcm_init_v8(u128 Htable[16], const uint64_t Xi[2]); +@@ -116,6 +117,7 @@ aes_gcm_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) + ctx->gcm.H.u[1] = bswap_64(ctx->gcm.H.u[1]); + + gcm_init_v8(ctx->gcm.Htable, ctx->gcm.H.u); ++ ctx->rekey_counter = 0; + + return 0; + } +@@ -141,6 +143,7 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2; + ctx->finished = 0; + ctx->auth_finished = 0; ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -229,6 +232,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + int exp_blocks = blocks * GCM_BLOCK_SIZE; + int rest = src_size - (exp_blocks); + uint32_t counter; ++ int ret; + + if (unlikely(ctx->finished)) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); +@@ -236,6 +240,11 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ + if (blocks > 0) { + ctr32_encrypt_blocks(src, dst, + blocks, +diff --git a/lib/accelerated/x86/aes-gcm-padlock.c b/lib/accelerated/x86/aes-gcm-padlock.c +index a9c7441d65..739883ab1b 100644 +--- a/lib/accelerated/x86/aes-gcm-padlock.c ++++ b/lib/accelerated/x86/aes-gcm-padlock.c +@@ -43,7 +43,10 @@ + * Actually padlock doesn't include GCM mode. We just use + * the ECB part of padlock and nettle for everything else. + */ +-struct gcm_padlock_aes_ctx GCM_CTX(struct padlock_ctx); ++struct gcm_padlock_aes_ctx { ++ struct GCM_CTX(struct padlock_ctx) inner; ++ size_t rekey_counter; ++}; + + static void padlock_aes_encrypt(const void *_ctx, + size_t length, uint8_t * dst, +@@ -78,7 +81,7 @@ static void padlock_aes256_set_encrypt_key(struct padlock_ctx *_ctx, + + static void aes_gcm_deinit(void *_ctx) + { +- struct padlock_ctx *ctx = _ctx; ++ struct gcm_padlock_aes_ctx *ctx = _ctx; + + zeroize_temp_key(ctx, sizeof(*ctx)); + gnutls_free(ctx); +@@ -108,14 +111,15 @@ aes_gcm_cipher_setkey(void *_ctx, const void *key, size_t keysize) + struct gcm_padlock_aes_ctx *ctx = _ctx; + + if (keysize == 16) { +- GCM_SET_KEY(ctx, padlock_aes128_set_encrypt_key, padlock_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, padlock_aes128_set_encrypt_key, padlock_aes_encrypt, + key); + } else if (keysize == 32) { +- GCM_SET_KEY(ctx, padlock_aes256_set_encrypt_key, padlock_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, padlock_aes256_set_encrypt_key, padlock_aes_encrypt, + key); + } else + return GNUTLS_E_INVALID_REQUEST; + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -126,8 +130,9 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + if (iv_size != GCM_BLOCK_SIZE - 4) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); + +- GCM_SET_IV(ctx, iv_size, iv); ++ GCM_SET_IV(&ctx->inner, iv_size, iv); + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -136,11 +141,17 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t length) + { + struct gcm_padlock_aes_ctx *ctx = _ctx; ++ int ret; + + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_ENCRYPT(ctx, padlock_aes_encrypt, src_size, dst, src); ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ ++ GCM_ENCRYPT(&ctx->inner, padlock_aes_encrypt, src_size, dst, src); + + return 0; + } +@@ -154,7 +165,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(dst_size < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_DECRYPT(ctx, padlock_aes_encrypt, src_size, dst, src); ++ GCM_DECRYPT(&ctx->inner, padlock_aes_encrypt, src_size, dst, src); + return 0; + } + +@@ -162,7 +173,7 @@ static int aes_gcm_auth(void *_ctx, const void *src, size_t src_size) + { + struct gcm_padlock_aes_ctx *ctx = _ctx; + +- GCM_UPDATE(ctx, src_size, src); ++ GCM_UPDATE(&ctx->inner, src_size, src); + + return 0; + } +@@ -171,7 +182,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize) + { + struct gcm_padlock_aes_ctx *ctx = _ctx; + +- GCM_DIGEST(ctx, padlock_aes_encrypt, tagsize, tag); ++ GCM_DIGEST(&ctx->inner, padlock_aes_encrypt, tagsize, tag); + } + + #include "aes-gcm-aead.h" +diff --git a/lib/accelerated/x86/aes-gcm-x86-aesni.c b/lib/accelerated/x86/aes-gcm-x86-aesni.c +index b0edaebfba..3be63ddd97 100644 +--- a/lib/accelerated/x86/aes-gcm-x86-aesni.c ++++ b/lib/accelerated/x86/aes-gcm-x86-aesni.c +@@ -36,12 +36,14 @@ + #include + #include + #include +-#include + + /* GCM mode + * It is used when the CPU doesn't include the PCLMUL instructions. + */ +-struct gcm_x86_aes_ctx GCM_CTX(AES_KEY); ++struct gcm_x86_aes_ctx { ++ struct GCM_CTX(AES_KEY) inner; ++ size_t rekey_counter; ++}; + + static void x86_aes_encrypt(const void *_ctx, + size_t length, uint8_t * dst, +@@ -101,17 +103,18 @@ aes_gcm_cipher_setkey(void *_ctx, const void *key, size_t length) + struct gcm_x86_aes_ctx *ctx = _ctx; + + if (length == 16) { +- GCM_SET_KEY(ctx, x86_aes128_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes128_set_encrypt_key, x86_aes_encrypt, + key); + } else if (length == 24) { +- GCM_SET_KEY(ctx, x86_aes192_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes192_set_encrypt_key, x86_aes_encrypt, + key); + } else if (length == 32) { +- GCM_SET_KEY(ctx, x86_aes256_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes256_set_encrypt_key, x86_aes_encrypt, + key); + } else + return GNUTLS_E_INVALID_REQUEST; + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -122,8 +125,9 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + if (iv_size != GCM_BLOCK_SIZE - 4) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); + +- GCM_SET_IV(ctx, iv_size, iv); ++ GCM_SET_IV(&ctx->inner, iv_size, iv); + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -132,11 +136,17 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t length) + { + struct gcm_x86_aes_ctx *ctx = _ctx; ++ int ret; + + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_ENCRYPT(ctx, x86_aes_encrypt, src_size, dst, src); ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ ++ GCM_ENCRYPT(&ctx->inner, x86_aes_encrypt, src_size, dst, src); + + return 0; + } +@@ -150,7 +160,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(dst_size < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_DECRYPT(ctx, x86_aes_encrypt, src_size, dst, src); ++ GCM_DECRYPT(&ctx->inner, x86_aes_encrypt, src_size, dst, src); + return 0; + } + +@@ -158,7 +168,7 @@ static int aes_gcm_auth(void *_ctx, const void *src, size_t src_size) + { + struct gcm_x86_aes_ctx *ctx = _ctx; + +- GCM_UPDATE(ctx, src_size, src); ++ GCM_UPDATE(&ctx->inner, src_size, src); + + return 0; + } +@@ -167,7 +177,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize) + { + struct gcm_x86_aes_ctx *ctx = _ctx; + +- GCM_DIGEST(ctx, x86_aes_encrypt, tagsize, tag); ++ GCM_DIGEST(&ctx->inner, x86_aes_encrypt, tagsize, tag); + } + + static void aes_gcm_deinit(void *_ctx) +diff --git a/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c b/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c +index 21aef94440..fbefe432f4 100644 +--- a/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c ++++ b/lib/accelerated/x86/aes-gcm-x86-pclmul-avx.c +@@ -61,6 +61,7 @@ struct aes_gcm_ctx { + struct gcm128_context gcm; + unsigned finished; + unsigned auth_finished; ++ size_t rekey_counter; + }; + + void gcm_init_avx(u128 Htable[16], const uint64_t Xi[2]); +@@ -116,6 +117,7 @@ aes_gcm_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) + + gcm_init_avx(ctx->gcm.Htable, ctx->gcm.H.u); + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -140,6 +142,7 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2; + ctx->finished = 0; + ctx->auth_finished = 0; ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -184,6 +187,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + int exp_blocks = blocks * GCM_BLOCK_SIZE; + int rest = src_size - (exp_blocks); + uint32_t counter; ++ int ret; + + if (unlikely(ctx->finished)) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); +@@ -191,6 +195,11 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ + if (blocks > 0) { + aesni_ctr32_encrypt_blocks(src, dst, + blocks, +diff --git a/lib/accelerated/x86/aes-gcm-x86-pclmul.c b/lib/accelerated/x86/aes-gcm-x86-pclmul.c +index e6b4990cbf..5385acbb6b 100644 +--- a/lib/accelerated/x86/aes-gcm-x86-pclmul.c ++++ b/lib/accelerated/x86/aes-gcm-x86-pclmul.c +@@ -60,6 +60,7 @@ struct aes_gcm_ctx { + struct gcm128_context gcm; + unsigned finished; + unsigned auth_finished; ++ size_t rekey_counter; + }; + + void gcm_init_clmul(u128 Htable[16], const uint64_t Xi[2]); +@@ -116,6 +117,7 @@ aes_gcm_cipher_setkey(void *_ctx, const void *userkey, size_t keysize) + + gcm_init_clmul(ctx->gcm.Htable, ctx->gcm.H.u); + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -140,6 +142,7 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + ctx->gcm.Yi.c[GCM_BLOCK_SIZE - 1] = 2; + ctx->finished = 0; + ctx->auth_finished = 0; ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -184,6 +187,7 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + int exp_blocks = blocks * GCM_BLOCK_SIZE; + int rest = src_size - (exp_blocks); + uint32_t counter; ++ int ret; + + if (unlikely(ctx->finished)) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); +@@ -191,6 +195,11 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ + if (blocks > 0) { + aesni_ctr32_encrypt_blocks(src, dst, + blocks, +diff --git a/lib/accelerated/x86/aes-gcm-x86-ssse3.c b/lib/accelerated/x86/aes-gcm-x86-ssse3.c +index 7a2ac50869..f074cb1096 100644 +--- a/lib/accelerated/x86/aes-gcm-x86-ssse3.c ++++ b/lib/accelerated/x86/aes-gcm-x86-ssse3.c +@@ -36,13 +36,15 @@ + #include + #include + #include +-#include + #include + + /* GCM mode + * It is used when the CPU doesn't include the PCLMUL instructions. + */ +-struct gcm_x86_aes_ctx GCM_CTX(AES_KEY); ++struct gcm_x86_aes_ctx { ++ struct GCM_CTX(AES_KEY) inner; ++ size_t rekey_counter; ++}; + + static void x86_aes_encrypt(const void *_ctx, + size_t length, uint8_t * dst, +@@ -110,17 +112,18 @@ aes_gcm_cipher_setkey(void *_ctx, const void *key, size_t keysize) + struct gcm_x86_aes_ctx *ctx = _ctx; + + if (keysize == 16) { +- GCM_SET_KEY(ctx, x86_aes_128_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes_128_set_encrypt_key, x86_aes_encrypt, + key); + } else if (keysize == 24) { +- GCM_SET_KEY(ctx, x86_aes_192_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes_192_set_encrypt_key, x86_aes_encrypt, + key); + } else if (keysize == 32) { +- GCM_SET_KEY(ctx, x86_aes_256_set_encrypt_key, x86_aes_encrypt, ++ GCM_SET_KEY(&ctx->inner, x86_aes_256_set_encrypt_key, x86_aes_encrypt, + key); + } else + return GNUTLS_E_INVALID_REQUEST; + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -131,8 +134,9 @@ static int aes_gcm_setiv(void *_ctx, const void *iv, size_t iv_size) + if (iv_size != GCM_BLOCK_SIZE - 4) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); + +- GCM_SET_IV(ctx, iv_size, iv); ++ GCM_SET_IV(&ctx->inner, iv_size, iv); + ++ ctx->rekey_counter = 0; + return 0; + } + +@@ -141,11 +145,17 @@ aes_gcm_encrypt(void *_ctx, const void *src, size_t src_size, + void *dst, size_t length) + { + struct gcm_x86_aes_ctx *ctx = _ctx; ++ int ret; + + if (unlikely(length < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_ENCRYPT(ctx, x86_aes_encrypt, src_size, dst, src); ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, src_size); ++ if (ret < 0) { ++ return gnutls_assert_val(ret); ++ } ++ ++ GCM_ENCRYPT(&ctx->inner, x86_aes_encrypt, src_size, dst, src); + + return 0; + } +@@ -159,7 +169,7 @@ aes_gcm_decrypt(void *_ctx, const void *src, size_t src_size, + if (unlikely(dst_size < src_size)) + return gnutls_assert_val(GNUTLS_E_SHORT_MEMORY_BUFFER); + +- GCM_DECRYPT(ctx, x86_aes_encrypt, src_size, dst, src); ++ GCM_DECRYPT(&ctx->inner, x86_aes_encrypt, src_size, dst, src); + return 0; + } + +@@ -167,7 +177,7 @@ static int aes_gcm_auth(void *_ctx, const void *src, size_t src_size) + { + struct gcm_x86_aes_ctx *ctx = _ctx; + +- GCM_UPDATE(ctx, src_size, src); ++ GCM_UPDATE(&ctx->inner, src_size, src); + + return 0; + } +@@ -176,7 +186,7 @@ static void aes_gcm_tag(void *_ctx, void *tag, size_t tagsize) + { + struct gcm_x86_aes_ctx *ctx = _ctx; + +- GCM_DIGEST(ctx, x86_aes_encrypt, tagsize, tag); ++ GCM_DIGEST(&ctx->inner, x86_aes_encrypt, tagsize, tag); + } + + static void aes_gcm_deinit(void *_ctx) +diff --git a/lib/accelerated/x86/aes-x86.h b/lib/accelerated/x86/aes-x86.h +index 023b5f7be6..349d3d5d9c 100644 +--- a/lib/accelerated/x86/aes-x86.h ++++ b/lib/accelerated/x86/aes-x86.h +@@ -22,6 +22,21 @@ typedef struct { + if (s != 16 && s != 24 && s != 32) \ + return GNUTLS_E_INVALID_REQUEST + ++#include ++#define AES_GCM_ENCRYPT_MAX_BYTES ((1ULL << 36) - 32) ++static inline int ++record_aes_gcm_encrypt_size(size_t *counter, size_t size) { ++ size_t sum; ++ ++ if (!INT_ADD_OK(*counter, size, &sum) || ++ sum > AES_GCM_ENCRYPT_MAX_BYTES) { ++ return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); ++ } ++ *counter = sum; ++ ++ return 0; ++} ++ + void aesni_ecb_encrypt(const unsigned char *in, unsigned char *out, + size_t len, const AES_KEY * key, int enc); + +diff --git a/lib/nettle/cipher.c b/lib/nettle/cipher.c +index ab4c46d2d0..b41862d1ea 100644 +--- a/lib/nettle/cipher.c ++++ b/lib/nettle/cipher.c +@@ -63,6 +63,7 @@ + #include + #include + #include ++#include + + struct nettle_cipher_ctx; + +@@ -120,8 +121,23 @@ struct nettle_cipher_ctx { + unsigned iv_size; + + bool enc; ++ size_t rekey_counter; + }; + ++#define AES_GCM_ENCRYPT_MAX_BYTES ((1ULL << 36) - 32) ++static inline int ++record_aes_gcm_encrypt_size(size_t *counter, size_t size) { ++ size_t sum; ++ ++ if (!INT_ADD_OK(*counter, size, &sum) || ++ sum > AES_GCM_ENCRYPT_MAX_BYTES) { ++ return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); ++ } ++ *counter = sum; ++ ++ return 0; ++} ++ + static void + _stream_encrypt(struct nettle_cipher_ctx *ctx, size_t length, uint8_t * dst, + const uint8_t * src) +@@ -1133,6 +1149,16 @@ wrap_nettle_cipher_setkey(void *_ctx, const void *key, size_t keysize) + else + ctx->cipher->set_decrypt_key(ctx->ctx_ptr, key); + ++ switch (ctx->cipher->algo) { ++ case GNUTLS_CIPHER_AES_128_GCM: ++ case GNUTLS_CIPHER_AES_192_GCM: ++ case GNUTLS_CIPHER_AES_256_GCM: ++ ctx->rekey_counter = 0; ++ break; ++ default: ++ break; ++ } ++ + return 0; + } + +@@ -1147,6 +1173,7 @@ wrap_nettle_cipher_setiv(void *_ctx, const void *iv, size_t iv_size) + case GNUTLS_CIPHER_AES_192_GCM: + case GNUTLS_CIPHER_AES_256_GCM: + FIPS_RULE(iv_size < GCM_IV_SIZE, GNUTLS_E_INVALID_REQUEST, "access to short GCM nonce size\n"); ++ ctx->rekey_counter = 0; + break; + case GNUTLS_CIPHER_SALSA20_256: + case GNUTLS_CIPHER_ESTREAM_SALSA20_256: +@@ -1207,10 +1234,24 @@ wrap_nettle_cipher_encrypt(void *_ctx, const void *plain, size_t plain_size, + void *encr, size_t encr_size) + { + struct nettle_cipher_ctx *ctx = _ctx; ++ int ret; + + if (unlikely(ctx->cipher->encrypt == NULL)) + return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); + ++ switch (ctx->cipher->algo) { ++ case GNUTLS_CIPHER_AES_128_GCM: ++ case GNUTLS_CIPHER_AES_192_GCM: ++ case GNUTLS_CIPHER_AES_256_GCM: ++ ret = record_aes_gcm_encrypt_size(&ctx->rekey_counter, plain_size); ++ if (ret < 0) { ++ return gnutls_assert_val(GNUTLS_E_INVALID_REQUEST); ++ } ++ break; ++ default: ++ break; ++ } ++ + ctx->cipher->encrypt(ctx, plain_size, encr, plain); + + return 0; +diff --git a/tests/slow/cipher-api-test.c b/tests/slow/cipher-api-test.c +index fc880bcc9f..1d267ce312 100644 +--- a/tests/slow/cipher-api-test.c ++++ b/tests/slow/cipher-api-test.c +@@ -21,6 +21,7 @@ + */ + + #include ++#include + #include + #include + #include +@@ -48,6 +49,11 @@ int main(int argc, char **argv) + #include + #include + ++#define AES_GCM_ENCRYPT_PLAINTEXT_MAX ((1ULL << 36) - 32) ++#if SIZE_MAX >= AES_GCM_ENCRYPT_PLAINTEXT_MAX ++#define TEST_AES_GCM_ENCRYPT_PLAINTEXT_SIZE 1 ++#endif ++ + static void tls_log_func(int level, const char *str) + { + fprintf(stderr, "<%d>| %s", level, str); +@@ -401,6 +407,74 @@ static void test_aead_invalid_short_decrypt(int algo) + return; + } + ++#ifdef TEST_AES_GCM_ENCRYPT_PLAINTEXT_SIZE ++/* Test whether an invalid call to gnutls_cipher_encrypt() with too ++ * long message is caught */ ++static void test_aead_invalid_too_long_encrypt(int algo) ++{ ++ int ret; ++ gnutls_cipher_hd_t ch; ++ uint8_t key16[64]; ++ uint8_t iv16[32]; ++ uint8_t data[128]; ++ gnutls_datum_t key, iv; ++ ++ if (algo != GNUTLS_CIPHER_AES_128_GCM && ++ algo != GNUTLS_CIPHER_AES_192_GCM && ++ algo != GNUTLS_CIPHER_AES_256_GCM) { ++ return; ++ } ++ ++ key.data = key16; ++ key.size = gnutls_cipher_get_key_size(algo); ++ assert(key.size <= sizeof(key16)); ++ ++ iv.data = iv16; ++ iv.size = gnutls_cipher_get_iv_size(algo); ++ assert(iv.size <= sizeof(iv16)); ++ ++ memset(iv.data, 0xff, iv.size); ++ memset(key.data, 0xfe, key.size); ++ memset(data, 0xfa, sizeof(data)); ++ ++ gnutls_global_set_log_function(tls_log_func); ++ if (debug) ++ gnutls_global_set_log_level(4711); ++ ++ ret = global_init(); ++ if (ret < 0) { ++ fail("Cannot initialize library\n"); /*errcode 1 */ ++ } ++ ++ ret = gnutls_cipher_init(&ch, algo, &key, &iv); ++ if (ret < 0) ++ fail("gnutls_cipher_init failed\n"); /*errcode 1 */ ++ ++ /* Test exceeding AES-GCM plaintext limit */ ++ ret = gnutls_cipher_encrypt(ch, data, sizeof(data)); ++ if (ret < 0) ++ fail("could not encrypt data\n"); ++ ++ /* A few blocks larger than AES_GCM_ENCRYPT_PLAINTEXT_MAX combined with ++ * the previous call. Use NULL for PLAINTEXT so the access to the first ++ * block always results in page fault (in case the limit is not ++ * enforced). ++ */ ++ ret = gnutls_cipher_encrypt(ch, NULL, AES_GCM_ENCRYPT_PLAINTEXT_MAX); ++ if (ret >= 0) ++ fail("succeeded in encrypting too long data\n"); ++ if (ret != GNUTLS_E_INVALID_REQUEST) ++ fail("wrong kind of error on encrypting too long data," ++ "%s instead of GNUTLS_E_INVALID_REQUEST\n", ++ gnutls_strerror_name(ret)); ++ ++ gnutls_cipher_deinit(ch); ++ ++ gnutls_global_deinit(); ++ return; ++} ++#endif ++ + static void check_status(int status) + { + if (WEXITSTATUS(status) != 0 || +@@ -464,6 +538,11 @@ void start(const char *name, int algo, unsigned aead) + + success("trying %s: test_aead_invalid_short_decrypt\n", name); + fork_subtest(test_aead_invalid_short_decrypt, algo); ++ ++#if TEST_AES_GCM_ENCRYPT_PLAINTEXT_SIZE ++ success("trying %s: test_aead_invalid_too_long_encrypt\n", name); ++ fork_subtest(test_aead_invalid_too_long_encrypt, algo); ++#endif + } + } + +-- +2.37.1 + diff --git a/gnutls.spec b/gnutls.spec index 5e3c82a..aaaa962 100644 --- a/gnutls.spec +++ b/gnutls.spec @@ -13,7 +13,7 @@ print(string.sub(hash, 0, 16)) } Version: 3.7.6 -Release: 2%{?dist} +Release: 3%{?dist} # not upstreamed Patch: gnutls-3.6.7-no-now-guile.patch Patch: gnutls-3.2.7-rpath.patch @@ -24,6 +24,7 @@ Patch: gnutls-3.7.2-no-explicit-init.patch Patch: gnutls-3.7.6-fips-run-selftests.patch Patch: gnutls-3.7.6-ktls-disable-by-default.patch Patch: gnutls-3.7.6-ktls-fixes.patch +Patch: gnutls-3.7.6-aes-gcm-pt-limit.patch # not upstreamed Patch: gnutls-3.7.3-disable-config-reload.patch @@ -359,6 +360,9 @@ make check %{?_smp_mflags} GNUTLS_SYSTEM_PRIORITY_FILE=/dev/null %endif %changelog +* Mon Jul 25 2022 Daiki Ueno - 3.7.6-3 +- Limit input size for AES-GCM according to SP800-38D (#2095251) + * Tue Jul 19 2022 Daiki Ueno - 3.7.6-2 - Allow enabling KTLS with config file (#2042009)