From 1598e33ffdd6c2b806388375faa05da0852e4b04 Mon Sep 17 00:00:00 2001 From: eabdullin Date: Wed, 14 May 2025 15:15:27 +0000 Subject: [PATCH] import UBI nettle-3.10.1-1.el10 --- .gitignore | 4 +- .nettle.metadata | 1 - SOURCES/nettle-3.3-remove-ecc-testsuite.patch | 181 - SOURCES/nettle-3.4-annocheck.patch | 12 - SOURCES/nettle-3.4.1-ecdsa-verify.patch | 109 - SOURCES/nettle-3.4.1-enable-intel-cet.patch | 210 - SOURCES/nettle-3.4.1-powerpc64-aes-asm.patch | 1142 ------ .../nettle-3.4.1-powerpc64-ghash-asm.patch | 1519 ------- SOURCES/nettle-3.4.1-rsa-decrypt.patch | 609 --- SPECS/nettle.spec | 281 -- gmp-6.2.1-intel-cet.patch | 3515 +++++++++++++++++ gmp-6.2.1-zeroize-allocator.patch | 53 + nettle-3.10-hobble-to-configure.patch | 749 ++++ nettle-3.8-zeroize-stack.patch | 334 ++ nettle-release-keyring.gpg | 486 +++ nettle.spec | 480 +++ sources | 3 + 17 files changed, 5623 insertions(+), 4065 deletions(-) delete mode 100644 .nettle.metadata delete mode 100644 SOURCES/nettle-3.3-remove-ecc-testsuite.patch delete mode 100644 SOURCES/nettle-3.4-annocheck.patch delete mode 100644 SOURCES/nettle-3.4.1-ecdsa-verify.patch delete mode 100644 SOURCES/nettle-3.4.1-enable-intel-cet.patch delete mode 100644 SOURCES/nettle-3.4.1-powerpc64-aes-asm.patch delete mode 100644 SOURCES/nettle-3.4.1-powerpc64-ghash-asm.patch delete mode 100644 SOURCES/nettle-3.4.1-rsa-decrypt.patch delete mode 100644 SPECS/nettle.spec create mode 100644 gmp-6.2.1-intel-cet.patch create mode 100644 gmp-6.2.1-zeroize-allocator.patch create mode 100644 nettle-3.10-hobble-to-configure.patch create mode 100644 nettle-3.8-zeroize-stack.patch create mode 100644 nettle-release-keyring.gpg create mode 100644 nettle.spec create mode 100644 sources diff --git a/.gitignore b/.gitignore index d56bb3f..b7ae9e5 100644 --- a/.gitignore +++ b/.gitignore @@ -1 +1,3 @@ -SOURCES/nettle-3.4.1-hobbled.tar.xz +gmp-6.2.1.tar.xz +nettle-3.10.1.tar.gz +nettle-3.10.1.tar.gz.sig diff --git a/.nettle.metadata b/.nettle.metadata deleted file mode 100644 index d215f72..0000000 --- a/.nettle.metadata +++ /dev/null @@ -1 +0,0 @@ -be5faff0092a6d78186484374c0ea20465eb218c SOURCES/nettle-3.4.1-hobbled.tar.xz diff --git a/SOURCES/nettle-3.3-remove-ecc-testsuite.patch b/SOURCES/nettle-3.3-remove-ecc-testsuite.patch deleted file mode 100644 index 1b519f9..0000000 --- a/SOURCES/nettle-3.3-remove-ecc-testsuite.patch +++ /dev/null @@ -1,181 +0,0 @@ -diff --git a/examples/ecc-benchmark.c b/examples/ecc-benchmark.c -index 8e5e095..720d483 100644 ---- a/examples/ecc-benchmark.c -+++ b/examples/ecc-benchmark.c -@@ -330,8 +330,6 @@ bench_curve (const struct ecc_curve *ecc) - } - - const struct ecc_curve * const curves[] = { -- &nettle_secp_192r1, -- &nettle_secp_224r1, - &_nettle_curve25519, - &nettle_secp_256r1, - &nettle_secp_384r1, -diff --git a/examples/hogweed-benchmark.c b/examples/hogweed-benchmark.c -index 3fabe20..0223fe7 100644 ---- a/examples/hogweed-benchmark.c -+++ b/examples/hogweed-benchmark.c -@@ -393,24 +393,6 @@ bench_ecdsa_init (unsigned size) - - switch (size) - { -- case 192: -- ecc = &nettle_secp_192r1; -- xs = "8e8e07360350fb6b7ad8370cfd32fa8c6bba785e6e200599"; -- ys = "7f82ddb58a43d59ff8dc66053002b918b99bd01bd68d6736"; -- zs = "f2e620e086d658b4b507996988480917640e4dc107808bdd"; -- ctx->digest = hash_string (&nettle_sha1, "abc"); -- ctx->digest_size = 20; -- break; -- case 224: -- ecc = &nettle_secp_224r1; -- xs = "993bf363f4f2bc0f255f22563980449164e9c894d9efd088d7b77334"; -- ys = "b75fff9849997d02d135140e4d0030944589586e22df1fc4b629082a"; -- zs = "cdfd01838247f5de3cc70b688418046f10a2bfaca6de9ec836d48c27"; -- ctx->digest = hash_string (&nettle_sha224, "abc"); -- ctx->digest_size = 28; -- break; -- -- /* From RFC 4754 */ - case 256: - ecc = &nettle_secp_256r1; - xs = "2442A5CC 0ECD015F A3CA31DC 8E2BBC70 BF42D60C BCA20085 E0822CB0 4235E970"; -@@ -581,16 +563,6 @@ bench_openssl_ecdsa_init (unsigned size) - - switch (size) - { -- case 192: -- ctx->key = EC_KEY_new_by_curve_name (NID_X9_62_prime192v1); -- ctx->digest_length = 24; /* truncated */ -- ctx->digest = hash_string (&nettle_sha224, "abc"); -- break; -- case 224: -- ctx->key = EC_KEY_new_by_curve_name (NID_secp224r1); -- ctx->digest_length = SHA224_DIGEST_SIZE; -- ctx->digest = hash_string (&nettle_sha224, "abc"); -- break; - case 256: - ctx->key = EC_KEY_new_by_curve_name (NID_X9_62_prime256v1); - ctx->digest_length = SHA256_DIGEST_SIZE; -@@ -701,14 +673,10 @@ struct alg alg_list[] = { - #if 0 - { "dsa",2048, bench_dsa_init, bench_dsa_sign, bench_dsa_verify, bench_dsa_clear }, - #endif -- { "ecdsa", 192, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, -- { "ecdsa", 224, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, - { "ecdsa", 256, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, - { "ecdsa", 384, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, - { "ecdsa", 521, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, - #if WITH_OPENSSL -- { "ecdsa (openssl)", 192, bench_openssl_ecdsa_init, bench_openssl_ecdsa_sign, bench_openssl_ecdsa_verify, bench_openssl_ecdsa_clear }, -- { "ecdsa (openssl)", 224, bench_openssl_ecdsa_init, bench_openssl_ecdsa_sign, bench_openssl_ecdsa_verify, bench_openssl_ecdsa_clear }, - { "ecdsa (openssl)", 256, bench_openssl_ecdsa_init, bench_openssl_ecdsa_sign, bench_openssl_ecdsa_verify, bench_openssl_ecdsa_clear }, - { "ecdsa (openssl)", 384, bench_openssl_ecdsa_init, bench_openssl_ecdsa_sign, bench_openssl_ecdsa_verify, bench_openssl_ecdsa_clear }, - { "ecdsa (openssl)", 521, bench_openssl_ecdsa_init, bench_openssl_ecdsa_sign, bench_openssl_ecdsa_verify, bench_openssl_ecdsa_clear }, -diff --git a/testsuite/ecdh-test.c b/testsuite/ecdh-test.c -index 5a2b39d..08870b1 100644 ---- a/testsuite/ecdh-test.c -+++ b/testsuite/ecdh-test.c -@@ -138,26 +138,6 @@ test_dh (const char *name, const struct ecc_curve *ecc, - void - test_main(void) - { -- test_dh ("secp-192r1", &nettle_secp_192r1, -- "3406157206141798348095184987208239421004566462391397236532", -- "1050363442265225480786760666329560655512990381040021438562", -- "5298249600854377235107392014200406283816103564916230704184", -- "738368960171459956677260317271477822683777845013274506165", -- "2585840779771604687467445319428618542927556223024046979917", -- "293088185788565313717816218507714888251468410990708684573", -- "149293809021051532782730990145509724807636529827149481690", -- "2891131861147398318714693938158856874319184314120776776192"); -- -- test_dh ("secp-224r1", &nettle_secp_224r1, -- "1321072106881784386340709783538698930880431939595776773514895067682", -- "6768311794185371282972144247871764855860666277647541840973645586477", -- "2880077809069104378181313860274147139049600284805670362929579614547", -- "13934723037778859565852601874354272638301919827851286722006496784914", -- "373124771833407982305885866158843810218322878380632071540538232035", -- "24223309755162432227459925493224336241652868856405241018762887667883", -- "8330362698029245839097779050425944245826040430538860338085968752913", -- "24167244512472228715617822000878192535267113543393576038737592837010"); -- - test_dh ("secp-256r1", &nettle_secp_256r1, - "94731533361265297353914491124013058635674217345912524033267198103710636378786", - "22441589863306126152768848344973918725077248391248404659242620344938484650846", -diff --git a/testsuite/ecdsa-sign-test.c b/testsuite/ecdsa-sign-test.c -index 559de8e..1ca36c2 100644 ---- a/testsuite/ecdsa-sign-test.c -+++ b/testsuite/ecdsa-sign-test.c -@@ -60,37 +60,6 @@ test_main (void) - { - /* Test cases for the smaller groups, verified with a - proof-of-concept implementation done for Yubico AB. */ -- test_ecdsa (&nettle_secp_192r1, -- "DC51D3866A15BACDE33D96F992FCA99D" -- "A7E6EF0934E70975", /* z */ -- -- "9E56F509196784D963D1C0A401510EE7" -- "ADA3DCC5DEE04B15", /* k */ -- -- SHEX("BA7816BF8F01CFEA414140DE5DAE2223" -- "B00361A396177A9C"), /* h */ -- -- "8c478db6a5c131540cebc739f9c0a9a8" -- "c720c2abdd14a891", /* r */ -- -- "a91fb738f9f175d72f9c98527e881c36" -- "8de68cb55ffe589"); /* s */ -- -- test_ecdsa (&nettle_secp_224r1, -- "446df0a771ed58403ca9cb316e617f6b" -- "158420465d00a69601e22858", /* z */ -- -- "4c13f1905ad7eb201178bc08e0c9267b" -- "4751c15d5e1831ca214c33f4", /* z */ -- -- SHEX("1b28a611fe62ab3649350525d06703ba" -- "4b979a1e543566fd5caa85c6"), /* h */ -- -- "2cc280778f3d067df6d3adbe3a6aad63" -- "bc75f08f5c5f915411902a99", /* r */ -- -- "d0f069fd0f108eb07b7bbc54c8d6c88d" -- "f2715c38a95c31a2b486995f"); /* s */ - - /* From RFC 4754 */ - test_ecdsa (&nettle_secp_256r1, -diff --git a/testsuite/testutils.c b/testsuite/testutils.c -index 6f89761..901f62b 100644 ---- a/testsuite/testutils.c -+++ b/testsuite/testutils.c -@@ -1212,8 +1212,6 @@ test_dsa_key(const struct dsa_params *params, - } - - const struct ecc_curve * const ecc_curves[] = { -- &nettle_secp_192r1, -- &nettle_secp_224r1, - &nettle_secp_256r1, - &nettle_secp_384r1, - &nettle_secp_521r1, -@@ -1270,20 +1268,6 @@ test_ecc_mul_a (unsigned curve, unsigned n, const mp_limb_t *p) - { - /* For each curve, the points 2 g, 3 g and 4 g */ - static const struct ecc_ref_point ref[6][3] = { -- { { "dafebf5828783f2ad35534631588a3f629a70fb16982a888", -- "dd6bda0d993da0fa46b27bbc141b868f59331afa5c7e93ab" }, -- { "76e32a2557599e6edcd283201fb2b9aadfd0d359cbb263da", -- "782c37e372ba4520aa62e0fed121d49ef3b543660cfd05fd" }, -- { "35433907297cc378b0015703374729d7a4fe46647084e4ba", -- "a2649984f2135c301ea3acb0776cd4f125389b311db3be32" } -- }, -- { { "706a46dc76dcb76798e60e6d89474788d16dc18032d268fd1a704fa6", -- "1c2b76a7bc25e7702a704fa986892849fca629487acf3709d2e4e8bb" }, -- { "df1b1d66a551d0d31eff822558b9d2cc75c2180279fe0d08fd896d04", -- "a3f7f03cadd0be444c0aa56830130ddf77d317344e1af3591981a925" }, -- { "ae99feebb5d26945b54892092a8aee02912930fa41cd114e40447301", -- "482580a0ec5bc47e88bc8c378632cd196cb3fa058a7114eb03054c9" }, -- }, - { { "7cf27b188d034f7e8a52380304b51ac3c08969e277f21b35a60b48fc47669978", - "7775510db8ed040293d9ac69f7430dbba7dade63ce982299e04b79d227873d1" }, - { "5ecbe4d1a6330a44c8f7ef951d4bf165e6c6b721efada985fb41661bc6e7fd6c", diff --git a/SOURCES/nettle-3.4-annocheck.patch b/SOURCES/nettle-3.4-annocheck.patch deleted file mode 100644 index 825f34e..0000000 --- a/SOURCES/nettle-3.4-annocheck.patch +++ /dev/null @@ -1,12 +0,0 @@ -diff -up nettle-3.4/Makefile.in.annocheck nettle-3.4/Makefile.in ---- nettle-3.4/Makefile.in.annocheck 2018-10-16 15:06:53.340138708 +0200 -+++ nettle-3.4/Makefile.in 2018-10-16 15:12:25.150276379 +0200 -@@ -377,7 +377,7 @@ ecc-25519.$(OBJEXT): ecc-25519.h - - .asm.$(OBJEXT): $(srcdir)/asm.m4 machine.m4 config.m4 - $(M4) $(srcdir)/asm.m4 machine.m4 config.m4 $< >$*.s -- $(COMPILE) -c $*.s -+ $(COMPILE) -c -Wa,--generate-missing-build-notes=yes $*.s - @echo "$@ : $< $(srcdir)/asm.m4 machine.m4 config.m4" >$@.d - - # Texinfo rules diff --git a/SOURCES/nettle-3.4.1-ecdsa-verify.patch b/SOURCES/nettle-3.4.1-ecdsa-verify.patch deleted file mode 100644 index c46f185..0000000 --- a/SOURCES/nettle-3.4.1-ecdsa-verify.patch +++ /dev/null @@ -1,109 +0,0 @@ -From 932ea29845da1ae350d9c056cb2cb0379a66d642 Mon Sep 17 00:00:00 2001 -From: Daiki Ueno -Date: Tue, 30 Mar 2021 09:22:47 +0200 -Subject: [PATCH] Port upstream hardening of EC scaler multiplication - -Some internal functions used in point multiplications are known to -misbehave if the scaler is out-of-range. This performs canonical -reduction on scalers, before point multiplication. - -Signed-off-by: Daiki Ueno ---- - ecc-ecdsa-sign.c | 7 +++++-- - ecc-ecdsa-verify.c | 14 ++++++++++++-- - eddsa-hash.c | 9 +++++++-- - 3 files changed, 24 insertions(+), 6 deletions(-) - -diff --git a/ecc-ecdsa-sign.c b/ecc-ecdsa-sign.c -index 3b9e9cc1..45062528 100644 ---- a/ecc-ecdsa-sign.c -+++ b/ecc-ecdsa-sign.c -@@ -62,6 +62,8 @@ ecc_ecdsa_sign (const struct ecc_curve *ecc, - mp_limb_t *rp, mp_limb_t *sp, - mp_limb_t *scratch) - { -+ mp_limb_t cy; -+ - #define P scratch - #define kinv scratch /* Needs 5*ecc->p.size for computation */ - #define hp (scratch + ecc->p.size) /* NOTE: ecc->p.size + 1 limbs! */ -@@ -91,8 +93,9 @@ ecc_ecdsa_sign (const struct ecc_curve *ecc, - ecc_modq_mul (ecc, tp, zp, rp); - ecc_modq_add (ecc, hp, hp, tp); - ecc_modq_mul (ecc, tp, hp, kinv); -- -- mpn_copyi (sp, tp, ecc->p.size); -+ /* Ensure canonical reduction. */ -+ cy = mpn_sub_n (sp, tp, ecc->q.m, ecc->q.size); -+ cnd_copy (cy, sp, tp, ecc->q.size); - #undef P - #undef hp - #undef kinv -diff --git a/ecc-ecdsa-verify.c b/ecc-ecdsa-verify.c -index d7f5b684..6b8acb07 100644 ---- a/ecc-ecdsa-verify.c -+++ b/ecc-ecdsa-verify.c -@@ -75,6 +75,8 @@ ecc_ecdsa_verify (const struct ecc_curve *ecc, - const mp_limb_t *rp, const mp_limb_t *sp, - mp_limb_t *scratch) - { -+ mp_limb_t cy; -+ - /* Procedure, according to RFC 6090, "KT-I". q denotes the group - order. - -@@ -98,6 +100,7 @@ ecc_ecdsa_verify (const struct ecc_curve *ecc, - #define P1 (scratch + 4*ecc->p.size) - #define sinv (scratch) - #define hp (scratch + ecc->p.size) -+#define tp (scratch + 4*ecc->p.size) - - if (! (ecdsa_in_range (ecc, rp) - && ecdsa_in_range (ecc, sp))) -@@ -112,10 +115,16 @@ ecc_ecdsa_verify (const struct ecc_curve *ecc, - - /* u1 = h / s, P1 = u1 * G */ - ecc_hash (&ecc->q, hp, length, digest); -- ecc_modq_mul (ecc, u1, hp, sinv); -+ ecc_modq_mul (ecc, tp, hp, sinv); -+ /* Ensure canonical reduction. */ -+ cy = mpn_sub_n (u1, tp, ecc->q.m, ecc->q.size); -+ cnd_copy (cy, u1, tp, ecc->q.size); - - /* u2 = r / s, P2 = u2 * Y */ -- ecc_modq_mul (ecc, u2, rp, sinv); -+ ecc_modq_mul (ecc, hp, rp, sinv); -+ /* Ensure canonical reduction. */ -+ cy = mpn_sub_n (u2, hp, ecc->q.m, ecc->q.size); -+ cnd_copy (cy, u2, hp, ecc->q.size); - - /* Total storage: 5*ecc->p.size + ecc->mul_itch */ - ecc->mul (ecc, P2, u2, pp, u2 + ecc->p.size); -@@ -154,4 +163,5 @@ ecc_ecdsa_verify (const struct ecc_curve *ecc, - #undef u2 - #undef hp - #undef u1 -+#undef tp - } -diff --git a/eddsa-hash.c b/eddsa-hash.c -index 4fb79f1b..53c6fc49 100644 ---- a/eddsa-hash.c -+++ b/eddsa-hash.c -@@ -45,7 +45,12 @@ void - _eddsa_hash (const struct ecc_modulo *m, - mp_limb_t *rp, const uint8_t *digest) - { -+ mp_limb_t cy; -+ - size_t nbytes = 1 + m->bit_size / 8; -- mpn_set_base256_le (rp, 2*m->size, digest, 2*nbytes); -- m->mod (m, rp); -+ mpn_set_base256_le (rp + m->size, 2*m->size, digest, 2*nbytes); -+ m->mod (m, rp + m->size); -+ /* Ensure canonical reduction. */ -+ cy = mpn_sub_n (rp, rp + m->size, m->m, m->size); -+ cnd_copy (cy, rp, rp + m->size, m->size); - } --- -2.30.2 - diff --git a/SOURCES/nettle-3.4.1-enable-intel-cet.patch b/SOURCES/nettle-3.4.1-enable-intel-cet.patch deleted file mode 100644 index 1224558..0000000 --- a/SOURCES/nettle-3.4.1-enable-intel-cet.patch +++ /dev/null @@ -1,210 +0,0 @@ -diff --git a/asm.m4 b/asm.m4 -index 8da47201..59d64098 100644 ---- a/asm.m4 -+++ b/asm.m4 -@@ -32,7 +32,7 @@ define(,<>)dnl - define(, - <.globl C_NAME($1) - DECLARE_FUNC(C_NAME($1)) --C_NAME($1):>) -+C_NAME($1): ASM_X86_ENDBR>) - - define(, - , <@ASM_ALIGN_LOG@>)dnl - define(, <@W64_ABI@>)dnl - define(, <@ASM_RODATA@>)dnl -+define(,<@ASM_X86_ENDBR@>)dnl -+define(,<@ASM_X86_MARK_CET_ALIGN@>)dnl - divert(1) -+@ASM_X86_MARK_CET@ - @ASM_MARK_NOEXEC_STACK@ - divert ---- a/configure.ac 2018-12-04 21:56:06.000000000 +0100 -+++ b/configure.ac 2020-05-15 11:46:39.152373137 +0200 -@@ -787,6 +787,68 @@ - ASM_ALIGN_LOG="$nettle_cv_asm_align_log" - fi - -+dnl Define -+dnl 1. ASM_X86_ENDBR for endbr32/endbr64. -+dnl 2. ASM_X86_MARK_CET to add a .note.gnu.property section to mark -+dnl Intel CET support if needed. -+dnl 3. ASM_X86_MARK_CET_ALIGN to align ASM_X86_MARK_CET. -+AC_CACHE_CHECK([if Intel CET is enabled], -+ [nettle_cv_asm_x86_intel_cet], -+ [AC_TRY_COMPILE([ -+#ifndef __CET__ -+#error Intel CET is not enabled -+#endif -+ ], [], -+ [nettle_cv_asm_x86_intel_cet=yes], -+ [nettle_cv_asm_x86_intel_cet=no])]) -+if test "$nettle_cv_asm_x86_intel_cet" = yes; then -+ case $ABI in -+ 32|standard) -+ ASM_X86_ENDBR=endbr32 -+ ASM_X86_MARK_CET_ALIGN=2 -+ ;; -+ 64) -+ ASM_X86_ENDBR=endbr64 -+ ASM_X86_MARK_CET_ALIGN=3 -+ ;; -+ x32) -+ ASM_X86_ENDBR=endbr64 -+ ASM_X86_MARK_CET_ALIGN=2 -+ ;; -+ esac -+ AC_CACHE_CHECK([if .note.gnu.property section is needed], -+ [nettle_cv_asm_x86_gnu_property], -+ [AC_TRY_COMPILE([ -+#if !defined __ELF__ || !defined __CET__ -+#error GNU property is not needed -+#endif -+ ], [], -+ [nettle_cv_asm_x86_gnu_property=yes], -+ [nettle_cv_asm_x86_gnu_property=no])]) -+else -+ nettle_cv_asm_x86_gnu_property=no -+fi -+if test "$nettle_cv_asm_x86_gnu_property" = yes; then -+ ASM_X86_MARK_CET=' -+ .pushsection ".note.gnu.property", "a" -+ .p2align ASM_X86_MARK_CET_ALIGN -+ .long 1f - 0f -+ .long 4f - 1f -+ .long 5 -+0: -+ .asciz "GNU" -+1: -+ .p2align ASM_X86_MARK_CET_ALIGN -+ .long 0xc0000002 -+ .long 3f - 2f -+2: -+ .long 3 -+3: -+ .p2align ASM_X86_MARK_CET_ALIGN -+4: -+ .popsection' -+fi -+ - AC_SUBST(ASM_SYMBOL_PREFIX) - AC_SUBST(ASM_ELF_STYLE) - AC_SUBST(ASM_COFF_STYLE) -@@ -796,6 +858,9 @@ - AC_SUBST(ASM_ALIGN_LOG) - AC_SUBST(W64_ABI) - AC_SUBST(EMULATOR) -+AC_SUBST(ASM_X86_ENDBR) -+AC_SUBST(ASM_X86_MARK_CET) -+AC_SUBST(ASM_X86_MARK_CET_ALIGN) - - AC_SUBST(LIBNETTLE_MAJOR) - AC_SUBST(LIBNETTLE_MINOR) -diff --git a/testsuite/.test-rules.make b/testsuite/.test-rules.make -index 922a2c7f..9de8f412 100644 ---- a/testsuite/.test-rules.make -+++ b/testsuite/.test-rules.make -@@ -178,6 +178,9 @@ xts-test$(EXEEXT): xts-test.$(OBJEXT) - pbkdf2-test$(EXEEXT): pbkdf2-test.$(OBJEXT) - $(LINK) pbkdf2-test.$(OBJEXT) $(TEST_OBJS) -o pbkdf2-test$(EXEEXT) - -+x86-ibt-test$(EXEEXT): x86-ibt-test.$(OBJEXT) -+ $(LINK) x86-ibt-test.$(OBJEXT) $(TEST_OBJS) -o x86-ibt-test$(EXEEXT) -+ - sexp-test$(EXEEXT): sexp-test.$(OBJEXT) - $(LINK) sexp-test.$(OBJEXT) $(TEST_OBJS) -o sexp-test$(EXEEXT) - ---- a/testsuite/Makefile.in 2018-12-04 21:56:06.000000000 +0100 -+++ b/testsuite/Makefile.in 2020-05-15 11:21:15.673321598 +0200 -@@ -31,7 +31,8 @@ - hmac-test.c umac-test.c \ - meta-hash-test.c meta-cipher-test.c\ - meta-aead-test.c meta-armor-test.c \ -- buffer-test.c yarrow-test.c pbkdf2-test.c -+ buffer-test.c yarrow-test.c pbkdf2-test.c \ -+ x86-ibt-test.c - - TS_HOGWEED_SOURCES = sexp-test.c sexp-format-test.c \ - rsa2sexp-test.c sexp2rsa-test.c \ -diff --git a/testsuite/x86-ibt-test.c b/testsuite/x86-ibt-test.c -new file mode 100644 -index 00000000..1f3d1d67 ---- /dev/null -+++ b/testsuite/x86-ibt-test.c -@@ -0,0 +1,69 @@ -+#include "testutils.h" -+#if defined(__GNUC__) && (defined(__i386__) || defined(__x86_64__)) \ -+ && defined(__CET__) && defined(__linux__) -+#include -+ -+static void -+segfault_handler(int signo) -+{ -+ exit(0); -+} -+ -+static void -+ibt_violation(void) -+{ -+#ifdef __i386__ -+ unsigned int reg; -+ asm volatile("lea 1f, %0\n\t" -+ "jmp *%0\n" -+ "1:" : "=r" (reg)); -+#else -+ unsigned long long reg; -+ asm volatile("lea 1f(%%rip), %0\n\t" -+ "jmp *%0\n" -+ "1:" : "=r" (reg)); -+#endif -+} -+ -+#ifdef __i386__ -+static unsigned int -+_get_ssp(void) -+{ -+ unsigned int ssp; -+ asm volatile("xor %0, %0\n\trdsspd %0" : "=r" (ssp)); -+ return ssp; -+} -+#else -+static unsigned long long -+_get_ssp(void) -+{ -+ unsigned long long ssp; -+ asm volatile("xor %0, %0\n\trdsspq %0" : "=r" (ssp)); -+ return ssp; -+} -+#endif -+ -+void -+test_main(void) -+{ -+ /* NB: This test should trigger SIGSEGV on CET platforms. _get_ssp -+ returns the address of shadow stack pointer. If the address of -+ shadow stack pointer is 0, SHSTK is disabled and we assume that -+ IBT is also disabled. */ -+ if (_get_ssp() == 0) -+ { -+ ibt_violation(); -+ SKIP(); -+ } -+ -+ signal(SIGSEGV, segfault_handler); -+ ibt_violation(); -+ FAIL(); -+} -+#else -+void -+test_main(void) -+{ -+ SKIP(); -+} -+#endif --- -2.25.4 - diff --git a/SOURCES/nettle-3.4.1-powerpc64-aes-asm.patch b/SOURCES/nettle-3.4.1-powerpc64-aes-asm.patch deleted file mode 100644 index 8bcdbe7..0000000 --- a/SOURCES/nettle-3.4.1-powerpc64-aes-asm.patch +++ /dev/null @@ -1,1142 +0,0 @@ -diff --git a/Makefile.in b/Makefile.in -index b43e494f..ec46a9df 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -189,7 +189,7 @@ hogweed_SOURCES = sexp.c sexp-format.c \ - ed25519-sha512-pubkey.c \ - ed25519-sha512-sign.c ed25519-sha512-verify.c - --OPT_SOURCES = fat-x86_64.c fat-arm.c mini-gmp.c -+OPT_SOURCES = fat-arm.c fat-ppc.c fat-x86_64.c mini-gmp.c - - HEADERS = aes.h arcfour.h arctwo.h asn1.h blowfish.h \ - base16.h base64.h bignum.h buffer.h camellia.h cast128.h \ -@@ -573,7 +573,8 @@ distdir: $(DISTFILES) - done - set -e; for d in sparc32 sparc64 x86 \ - x86_64 x86_64/aesni x86_64/fat \ -- arm arm/neon arm/v6 arm/fat ; do \ -+ arm arm/neon arm/v6 arm/fat \ -+ powerpc64 powerpc64/p8 powerpc64/fat ; do \ - mkdir "$(distdir)/$$d" ; \ - find "$(srcdir)/$$d" -maxdepth 1 '(' -name '*.asm' -o -name '*.m4' ')' \ - -exec cp '{}' "$(distdir)/$$d" ';' ; \ -diff --git a/aes-decrypt-internal.c b/aes-decrypt-internal.c -index 709c52f9..9e8cf34a 100644 ---- a/aes-decrypt-internal.c -+++ b/aes-decrypt-internal.c -@@ -40,6 +40,16 @@ - #include "aes-internal.h" - #include "macros.h" - -+/* For fat builds */ -+#if HAVE_NATIVE_aes_decrypt -+void -+_nettle_aes_decrypt_c(unsigned rounds, const uint32_t *keys, -+ const struct aes_table *T, -+ size_t length, uint8_t *dst, -+ const uint8_t *src); -+#define _nettle_aes_decrypt _nettle_aes_decrypt_c -+#endif -+ - void - _nettle_aes_decrypt(unsigned rounds, const uint32_t *keys, - const struct aes_table *T, -diff --git a/aes-encrypt-internal.c b/aes-encrypt-internal.c -index 9f61386d..ad17e6c1 100644 ---- a/aes-encrypt-internal.c -+++ b/aes-encrypt-internal.c -@@ -40,6 +40,16 @@ - #include "aes-internal.h" - #include "macros.h" - -+/* For fat builds */ -+#if HAVE_NATIVE_aes_encrypt -+void -+_nettle_aes_encrypt_c(unsigned rounds, const uint32_t *keys, -+ const struct aes_table *T, -+ size_t length, uint8_t *dst, -+ const uint8_t *src); -+#define _nettle_aes_encrypt _nettle_aes_encrypt_c -+#endif -+ - void - _nettle_aes_encrypt(unsigned rounds, const uint32_t *keys, - const struct aes_table *T, -diff --git a/asm.m4 b/asm.m4 -index ee377a78..59d64098 100644 ---- a/asm.m4 -+++ b/asm.m4 -@@ -51,6 +51,14 @@ define(, - <.align ifelse(ALIGN_LOG,yes,,$1) - >) - -+define(, , -+WORDS_BIGENDIAN,no,<$2>, -+,WORDS_BIGENDIAN,< -+>) -+ m4exit(1)>)>) -+define(, , <$1>)>) -+ - dnl Struct defining macros - - dnl STRUCTURE(prefix) -diff --git a/config.m4.in b/config.m4.in -index 666e34b8..e480334d 100644 ---- a/config.m4.in -+++ b/config.m4.in -@@ -9,6 +9,7 @@ define(, <@W64_ABI@>)dnl - define(, <@ASM_RODATA@>)dnl - define(,<@ASM_X86_ENDBR@>)dnl - define(,<@ASM_X86_MARK_CET_ALIGN@>)dnl -+define(, <@ASM_WORDS_BIGENDIAN@>)dnl - divert(1) - @ASM_X86_MARK_CET@ - @ASM_MARK_NOEXEC_STACK@ -diff --git a/configure.ac b/configure.ac -index 090e43a4..788e6842 100644 ---- a/configure.ac -+++ b/configure.ac -@@ -85,6 +85,10 @@ AC_ARG_ENABLE(x86-aesni, - AC_HELP_STRING([--enable-x86-aesni], [Enable x86_64 aes instructions. (default=no)]),, - [enable_x86_aesni=no]) - -+AC_ARG_ENABLE(power-crypto-ext, -+ AC_HELP_STRING([--enable-power-crypto-ext], [Enable POWER crypto extensions. (default=no)]),, -+ [enable_power_crypto_ext=no]) -+ - AC_ARG_ENABLE(mini-gmp, - AC_HELP_STRING([--enable-mini-gmp], [Enable mini-gmp, used instead of libgmp.]),, - [enable_mini_gmp=no]) -@@ -201,7 +205,11 @@ LSH_FUNC_STRERROR - # getenv_secure is used for fat overrides, - # getline is used in the testsuite - AC_CHECK_FUNCS(secure_getenv getline) --AC_C_BIGENDIAN -+ -+ASM_WORDS_BIGENDIAN=unknown -+AC_C_BIGENDIAN([AC_DEFINE([WORDS_BIGENDIAN], 1) -+ ASM_WORDS_BIGENDIAN=yes], -+ [ASM_WORDS_BIGENDIAN=no]) - - LSH_GCC_ATTRIBUTES - -@@ -310,6 +318,17 @@ case "$host_cpu" in - AC_TRY_COMPILE([ - #if defined(__sgi) && defined(__LP64__) - #error 64-bit mips -+#endif -+ ], [], [ -+ ABI=32 -+ ], [ -+ ABI=64 -+ ]) -+ ;; -+ *powerpc64*) -+ AC_TRY_COMPILE([ -+#if defined(__PPC64__) -+#error 64-bit powerpc - #endif - ], [], [ - ABI=32 -@@ -422,6 +441,18 @@ if test "x$enable_assembler" = xyes ; then - esac - fi - ;; -+ *powerpc64*) -+ if test "$ABI" = 64 ; then -+ asm_path="powerpc64" -+ if test "x$enable_fat" = xyes ; then -+ asm_path="powerpc64/fat $asm_path" -+ OPT_NETTLE_SOURCES="fat-ppc.c $OPT_NETTLE_SOURCES" -+ elif test "x$enable_power_crypto_ext" = xyes ; then -+ asm_path="powerpc64/p8 $asm_path" -+ fi -+ fi -+ ;; -+ - *) - enable_assembler=no - ;; -@@ -544,6 +575,8 @@ AC_SUBST([IF_ASM]) - AH_VERBATIM([HAVE_NATIVE], - [/* Define to 1 each of the following for which a native (ie. CPU specific) - implementation of the corresponding routine exists. */ -+#undef HAVE_NATIVE_aes_decrypt -+#undef HAVE_NATIVE_aes_encrypt - #undef HAVE_NATIVE_ecc_192_modp - #undef HAVE_NATIVE_ecc_192_redc - #undef HAVE_NATIVE_ecc_224_modp -@@ -857,6 +890,7 @@ AC_SUBST(ASM_TYPE_PROGBITS) - AC_SUBST(ASM_MARK_NOEXEC_STACK) - AC_SUBST(ASM_ALIGN_LOG) - AC_SUBST(W64_ABI) -+AC_SUBST(ASM_WORDS_BIGENDIAN) - AC_SUBST(EMULATOR) - AC_SUBST(ASM_X86_ENDBR) - AC_SUBST(ASM_X86_MARK_CET) -diff --git a/fat-ppc.c b/fat-ppc.c -new file mode 100644 -index 00000000..7198e2dd ---- /dev/null -+++ b/fat-ppc.c -@@ -0,0 +1,129 @@ -+/* fat-ppc.c -+ -+ Copyright (C) 2020 Mamone Tarsha -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+*/ -+ -+#define _GNU_SOURCE -+ -+#if HAVE_CONFIG_H -+# include "config.h" -+#endif -+ -+#include -+#include -+#include -+#include -+#if defined(__FreeBSD__) && __FreeBSD__ < 12 -+#include -+#else -+#include -+#endif -+ -+#include "nettle-types.h" -+ -+#include "aes-internal.h" -+#include "gcm.h" -+#include "fat-setup.h" -+ -+/* Define from arch/powerpc/include/uapi/asm/cputable.h in Linux kernel */ -+#ifndef PPC_FEATURE2_VEC_CRYPTO -+#define PPC_FEATURE2_VEC_CRYPTO 0x02000000 -+#endif -+ -+struct ppc_features -+{ -+ int have_crypto_ext; -+}; -+ -+static void -+get_ppc_features (struct ppc_features *features) -+{ -+ unsigned long hwcap2 = 0; -+#if defined(__FreeBSD__) -+#if __FreeBSD__ < 12 -+ size_t len = sizeof(hwcap2); -+ sysctlbyname("hw.cpu_features2", &hwcap2, &len, NULL, 0); -+#else -+ elf_aux_info(AT_HWCAP2, &hwcap2, sizeof(hwcap2)); -+#endif -+#else -+ hwcap2 = getauxval(AT_HWCAP2); -+#endif -+ features->have_crypto_ext = -+ (hwcap2 & PPC_FEATURE2_VEC_CRYPTO) == PPC_FEATURE2_VEC_CRYPTO ? 1 : 0; -+} -+ -+DECLARE_FAT_FUNC(_nettle_aes_encrypt, aes_crypt_internal_func) -+DECLARE_FAT_FUNC_VAR(aes_encrypt, aes_crypt_internal_func, c) -+DECLARE_FAT_FUNC_VAR(aes_encrypt, aes_crypt_internal_func, ppc64) -+ -+DECLARE_FAT_FUNC(_nettle_aes_decrypt, aes_crypt_internal_func) -+DECLARE_FAT_FUNC_VAR(aes_decrypt, aes_crypt_internal_func, c) -+DECLARE_FAT_FUNC_VAR(aes_decrypt, aes_crypt_internal_func, ppc64) -+ -+static void CONSTRUCTOR -+fat_init (void) -+{ -+ struct ppc_features features; -+ int verbose; -+ -+ get_ppc_features (&features); -+ -+ verbose = getenv (ENV_VERBOSE) != NULL; -+ if (verbose) -+ fprintf (stderr, "libnettle: cpu features: %s\n", -+ features.have_crypto_ext ? "crypto extensions" : ""); -+ -+ if (features.have_crypto_ext) -+ { -+ if (verbose) -+ fprintf (stderr, "libnettle: enabling arch 2.07 code.\n"); -+ _nettle_aes_encrypt_vec = _nettle_aes_encrypt_ppc64; -+ _nettle_aes_decrypt_vec = _nettle_aes_decrypt_ppc64; -+ } -+ else -+ { -+ _nettle_aes_encrypt_vec = _nettle_aes_encrypt_c; -+ _nettle_aes_decrypt_vec = _nettle_aes_decrypt_c; -+ } -+} -+ -+DEFINE_FAT_FUNC(_nettle_aes_encrypt, void, -+ (unsigned rounds, const uint32_t *keys, -+ const struct aes_table *T, -+ size_t length, uint8_t *dst, -+ const uint8_t *src), -+ (rounds, keys, T, length, dst, src)) -+ -+DEFINE_FAT_FUNC(_nettle_aes_decrypt, void, -+ (unsigned rounds, const uint32_t *keys, -+ const struct aes_table *T, -+ size_t length, uint8_t *dst, -+ const uint8_t *src), -+ (rounds, keys, T, length, dst, src)) -diff --git a/powerpc64/fat/aes-decrypt-internal-2.asm b/powerpc64/fat/aes-decrypt-internal-2.asm -new file mode 100644 -index 00000000..3a4e08c2 ---- /dev/null -+++ b/powerpc64/fat/aes-decrypt-internal-2.asm -@@ -0,0 +1,37 @@ -+C powerpc64/fat/aes-decrypt-internal-2.asm -+ -+ -+ifelse(< -+ Copyright (C) 2020 Mamone Tarsha -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+dnl PROLOGUE(_nettle_aes_decrypt) picked up by configure -+ -+define(, <$1_ppc64>) -+include_src() -diff --git a/powerpc64/fat/aes-encrypt-internal-2.asm b/powerpc64/fat/aes-encrypt-internal-2.asm -new file mode 100644 -index 00000000..42126e4f ---- /dev/null -+++ b/powerpc64/fat/aes-encrypt-internal-2.asm -@@ -0,0 +1,37 @@ -+C powerpc64/fat/aes-encrypt-internal-2.asm -+ -+ -+ifelse(< -+ Copyright (C) 2020 Mamone Tarsha -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+dnl PROLOGUE(_nettle_aes_encrypt) picked up by configure -+ -+define(, <$1_ppc64>) -+include_src() -diff --git a/powerpc64/machine.m4 b/powerpc64/machine.m4 -new file mode 100644 -index 00000000..b76bb8b1 ---- /dev/null -+++ b/powerpc64/machine.m4 -@@ -0,0 +1,36 @@ -+define(, -+<.globl C_NAME($1) -+DECLARE_FUNC(C_NAME($1)) -+ifelse(WORDS_BIGENDIAN,no, -+,<.align FUNC_ALIGN>) -+C_NAME($1): -+addis 2,12,(.TOC.-C_NAME($1))@ha -+addi 2,2,(.TOC.-C_NAME($1))@l -+.localentry C_NAME($1), .-C_NAME($1)>, -+<.section ".opd","aw" -+.align 3 -+C_NAME($1): -+.quad .C_NAME($1),.TOC.@tocbase,0 -+.previous -+ifdef(,<.align FUNC_ALIGN>) -+.C_NAME($1):>) -+undefine()>) -+ -+define(, -+, -+<.size .C_NAME($1), . - .C_NAME($1) -+.size C_NAME($1), . - .C_NAME($1)>)>) -+ -+C Get vector-scalar register from vector register -+C VSR(VR) -+define(,<32+$1>) -+ -+C Load the quadword in DATA_SRC storage into -+C VEC_DST. GPR is general-purpose register -+C used to obtain the effective address of -+C DATA_SRC storage. -+C DATA_LOAD_VEC(VEC_DST, DATA_SRC, GPR) -+define(, -+) -diff --git a/powerpc64/p8/aes-decrypt-internal.asm b/powerpc64/p8/aes-decrypt-internal.asm -new file mode 100644 -index 00000000..bfedb32b ---- /dev/null -+++ b/powerpc64/p8/aes-decrypt-internal.asm -@@ -0,0 +1,356 @@ -+C powerpc64/p8/aes-decrypt-internal.asm -+ -+ifelse(< -+ Copyright (C) 2020 Mamone Tarsha -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+C Register usage: -+ -+define(, <1>) -+define(, <2>) -+ -+define(, <3>) -+define(, <4>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+ -+define(, <0>) -+ -+define(, <1>) -+define(, <2>) -+define(, <3>) -+define(, <4>) -+define(, <5>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+define(, <9>) -+ -+C ZERO vector register is used in place of RoundKey -+C for vncipher instruction because the order of InvMixColumns -+C and Xor processes are flipped in that instruction. -+C The Xor process with RoundKey is executed afterward. -+define(, <10>) -+ -+.file "aes-decrypt-internal.asm" -+ -+.text -+ -+ C _aes_decrypt(unsigned rounds, const uint32_t *keys, -+ C const struct aes_table *T, -+ C size_t length, uint8_t *dst, -+ C uint8_t *src) -+ -+define(, <5>) -+PROLOGUE(_nettle_aes_decrypt) -+ vxor ZERO,ZERO,ZERO -+ -+ DATA_LOAD_VEC(swap_mask,.swap_mask,5) -+ -+ subi ROUNDS,ROUNDS,1 -+ srdi LENGTH,LENGTH,4 -+ -+ srdi 5,LENGTH,3 #8x loop count -+ cmpldi 5,0 -+ beq L4x -+ -+ std 25,-56(SP); -+ std 26,-48(SP); -+ std 27,-40(SP); -+ std 28,-32(SP); -+ std 29,-24(SP); -+ std 30,-16(SP); -+ std 31,-8(SP); -+ -+ li 25,0x10 -+ li 26,0x20 -+ li 27,0x30 -+ li 28,0x40 -+ li 29,0x50 -+ li 30,0x60 -+ li 31,0x70 -+ -+.align 5 -+Lx8_loop: -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ lxvd2x VSR(S1),25,SRC -+ lxvd2x VSR(S2),26,SRC -+ lxvd2x VSR(S3),27,SRC -+ lxvd2x VSR(S4),28,SRC -+ lxvd2x VSR(S5),29,SRC -+ lxvd2x VSR(S6),30,SRC -+ lxvd2x VSR(S7),31,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ vxor S4,S4,K -+ vxor S5,S5,K -+ vxor S6,S6,K -+ vxor S7,S7,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L8x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipher S0,S0,ZERO -+ vncipher S1,S1,ZERO -+ vncipher S2,S2,ZERO -+ vncipher S3,S3,ZERO -+ vncipher S4,S4,ZERO -+ vncipher S5,S5,ZERO -+ vncipher S6,S6,ZERO -+ vncipher S7,S7,ZERO -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ vxor S4,S4,K -+ vxor S5,S5,K -+ vxor S6,S6,K -+ vxor S7,S7,K -+ addi 10,10,0x10 -+ bdnz L8x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipherlast S0,S0,K -+ vncipherlast S1,S1,K -+ vncipherlast S2,S2,K -+ vncipherlast S3,S3,K -+ vncipherlast S4,S4,K -+ vncipherlast S5,S5,K -+ vncipherlast S6,S6,K -+ vncipherlast S7,S7,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ stxvd2x VSR(S1),25,DST -+ stxvd2x VSR(S2),26,DST -+ stxvd2x VSR(S3),27,DST -+ stxvd2x VSR(S4),28,DST -+ stxvd2x VSR(S5),29,DST -+ stxvd2x VSR(S6),30,DST -+ stxvd2x VSR(S7),31,DST -+ -+ addi SRC,SRC,0x80 -+ addi DST,DST,0x80 -+ subic. 5,5,1 -+ bne Lx8_loop -+ -+ ld 25,-56(SP); -+ ld 26,-48(SP); -+ ld 27,-40(SP); -+ ld 28,-32(SP); -+ ld 29,-24(SP); -+ ld 30,-16(SP); -+ ld 31,-8(SP); -+ -+ clrldi LENGTH,LENGTH,61 -+ -+L4x: -+ srdi 5,LENGTH,2 -+ cmpldi 5,0 -+ beq L2x -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ li 9,0x10 -+ lxvd2x VSR(S1),9,SRC -+ addi 9,9,0x10 -+ lxvd2x VSR(S2),9,SRC -+ addi 9,9,0x10 -+ lxvd2x VSR(S3),9,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L4x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipher S0,S0,ZERO -+ vncipher S1,S1,ZERO -+ vncipher S2,S2,ZERO -+ vncipher S3,S3,ZERO -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ addi 10,10,0x10 -+ bdnz L4x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipherlast S0,S0,K -+ vncipherlast S1,S1,K -+ vncipherlast S2,S2,K -+ vncipherlast S3,S3,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ li 9,0x10 -+ stxvd2x VSR(S1),9,DST -+ addi 9,9,0x10 -+ stxvd2x VSR(S2),9,DST -+ addi 9,9,0x10 -+ stxvd2x VSR(S3),9,DST -+ -+ addi SRC,SRC,0x40 -+ addi DST,DST,0x40 -+ -+ clrldi LENGTH,LENGTH,62 -+ -+L2x: -+ srdi 5,LENGTH,1 -+ cmpldi 5,0 -+ beq L1x -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ li 9,0x10 -+ lxvd2x VSR(S1),9,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L2x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipher S0,S0,ZERO -+ vncipher S1,S1,ZERO -+ vxor S0,S0,K -+ vxor S1,S1,K -+ addi 10,10,0x10 -+ bdnz L2x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipherlast S0,S0,K -+ vncipherlast S1,S1,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ li 9,0x10 -+ stxvd2x VSR(S1),9,DST -+ -+ addi SRC,SRC,0x20 -+ addi DST,DST,0x20 -+ -+ clrldi LENGTH,LENGTH,63 -+ -+L1x: -+ cmpldi LENGTH,0 -+ beq Ldone -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L1x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipher S0,S0,ZERO -+ vxor S0,S0,K -+ addi 10,10,0x10 -+ bdnz L1x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vncipherlast S0,S0,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ -+Ldone: -+ blr -+EPILOGUE(_nettle_aes_decrypt) -+ -+ .data -+ .align 4 -+.swap_mask: -+IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>) -+IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>) -diff --git a/powerpc64/p8/aes-encrypt-internal.asm b/powerpc64/p8/aes-encrypt-internal.asm -new file mode 100644 -index 00000000..67c7e597 ---- /dev/null -+++ b/powerpc64/p8/aes-encrypt-internal.asm -@@ -0,0 +1,333 @@ -+C powerpc64/p8/aes-encrypt-internal.asm -+ -+ifelse(< -+ Copyright (C) 2020 Mamone Tarsha -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+C Register usage: -+ -+define(, <1>) -+define(, <2>) -+ -+define(, <3>) -+define(, <4>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+ -+define(, <0>) -+ -+define(, <1>) -+define(, <2>) -+define(, <3>) -+define(, <4>) -+define(, <5>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+define(, <9>) -+ -+.file "aes-encrypt-internal.asm" -+ -+.text -+ -+ C _aes_encrypt(unsigned rounds, const uint32_t *keys, -+ C const struct aes_table *T, -+ C size_t length, uint8_t *dst, -+ C uint8_t *src) -+ -+define(, <5>) -+PROLOGUE(_nettle_aes_encrypt) -+ DATA_LOAD_VEC(swap_mask,.swap_mask,5) -+ -+ subi ROUNDS,ROUNDS,1 -+ srdi LENGTH,LENGTH,4 -+ -+ srdi 5,LENGTH,3 #8x loop count -+ cmpldi 5,0 -+ beq L4x -+ -+ std 25,-56(SP); -+ std 26,-48(SP); -+ std 27,-40(SP); -+ std 28,-32(SP); -+ std 29,-24(SP); -+ std 30,-16(SP); -+ std 31,-8(SP); -+ -+ li 25,0x10 -+ li 26,0x20 -+ li 27,0x30 -+ li 28,0x40 -+ li 29,0x50 -+ li 30,0x60 -+ li 31,0x70 -+ -+.align 5 -+Lx8_loop: -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ lxvd2x VSR(S1),25,SRC -+ lxvd2x VSR(S2),26,SRC -+ lxvd2x VSR(S3),27,SRC -+ lxvd2x VSR(S4),28,SRC -+ lxvd2x VSR(S5),29,SRC -+ lxvd2x VSR(S6),30,SRC -+ lxvd2x VSR(S7),31,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ vxor S4,S4,K -+ vxor S5,S5,K -+ vxor S6,S6,K -+ vxor S7,S7,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L8x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipher S0,S0,K -+ vcipher S1,S1,K -+ vcipher S2,S2,K -+ vcipher S3,S3,K -+ vcipher S4,S4,K -+ vcipher S5,S5,K -+ vcipher S6,S6,K -+ vcipher S7,S7,K -+ addi 10,10,0x10 -+ bdnz L8x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipherlast S0,S0,K -+ vcipherlast S1,S1,K -+ vcipherlast S2,S2,K -+ vcipherlast S3,S3,K -+ vcipherlast S4,S4,K -+ vcipherlast S5,S5,K -+ vcipherlast S6,S6,K -+ vcipherlast S7,S7,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ stxvd2x VSR(S1),25,DST -+ stxvd2x VSR(S2),26,DST -+ stxvd2x VSR(S3),27,DST -+ stxvd2x VSR(S4),28,DST -+ stxvd2x VSR(S5),29,DST -+ stxvd2x VSR(S6),30,DST -+ stxvd2x VSR(S7),31,DST -+ -+ addi SRC,SRC,0x80 -+ addi DST,DST,0x80 -+ subic. 5,5,1 -+ bne Lx8_loop -+ -+ ld 25,-56(SP); -+ ld 26,-48(SP); -+ ld 27,-40(SP); -+ ld 28,-32(SP); -+ ld 29,-24(SP); -+ ld 30,-16(SP); -+ ld 31,-8(SP); -+ -+ clrldi LENGTH,LENGTH,61 -+ -+L4x: -+ srdi 5,LENGTH,2 -+ cmpldi 5,0 -+ beq L2x -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ li 9,0x10 -+ lxvd2x VSR(S1),9,SRC -+ addi 9,9,0x10 -+ lxvd2x VSR(S2),9,SRC -+ addi 9,9,0x10 -+ lxvd2x VSR(S3),9,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ vxor S2,S2,K -+ vxor S3,S3,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L4x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipher S0,S0,K -+ vcipher S1,S1,K -+ vcipher S2,S2,K -+ vcipher S3,S3,K -+ addi 10,10,0x10 -+ bdnz L4x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipherlast S0,S0,K -+ vcipherlast S1,S1,K -+ vcipherlast S2,S2,K -+ vcipherlast S3,S3,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ li 9,0x10 -+ stxvd2x VSR(S1),9,DST -+ addi 9,9,0x10 -+ stxvd2x VSR(S2),9,DST -+ addi 9,9,0x10 -+ stxvd2x VSR(S3),9,DST -+ -+ addi SRC,SRC,0x40 -+ addi DST,DST,0x40 -+ -+ clrldi LENGTH,LENGTH,62 -+ -+L2x: -+ srdi 5,LENGTH,1 -+ cmpldi 5,0 -+ beq L1x -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ li 9,0x10 -+ lxvd2x VSR(S1),9,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ vxor S1,S1,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L2x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipher S0,S0,K -+ vcipher S1,S1,K -+ addi 10,10,0x10 -+ bdnz L2x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipherlast S0,S0,K -+ vcipherlast S1,S1,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ li 9,0x10 -+ stxvd2x VSR(S1),9,DST -+ -+ addi SRC,SRC,0x20 -+ addi DST,DST,0x20 -+ -+ clrldi LENGTH,LENGTH,63 -+ -+L1x: -+ cmpldi LENGTH,0 -+ beq Ldone -+ -+ lxvd2x VSR(K),0,KEYS -+ vperm K,K,K,swap_mask -+ -+ lxvd2x VSR(S0),0,SRC -+ -+IF_LE() -+ -+ vxor S0,S0,K -+ -+ mtctr ROUNDS -+ li 10,0x10 -+.align 5 -+L1x_round_loop: -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipher S0,S0,K -+ addi 10,10,0x10 -+ bdnz L1x_round_loop -+ -+ lxvd2x VSR(K),10,KEYS -+ vperm K,K,K,swap_mask -+ vcipherlast S0,S0,K -+ -+IF_LE() -+ -+ stxvd2x VSR(S0),0,DST -+ -+Ldone: -+ blr -+EPILOGUE(_nettle_aes_encrypt) -+ -+ .data -+ .align 4 -+.swap_mask: -+IF_LE(<.byte 8,9,10,11,12,13,14,15,0,1,2,3,4,5,6,7>) -+IF_BE(<.byte 3,2,1,0,7,6,5,4,11,10,9,8,15,14,13,12>) diff --git a/SOURCES/nettle-3.4.1-powerpc64-ghash-asm.patch b/SOURCES/nettle-3.4.1-powerpc64-ghash-asm.patch deleted file mode 100644 index 255adbd..0000000 --- a/SOURCES/nettle-3.4.1-powerpc64-ghash-asm.patch +++ /dev/null @@ -1,1519 +0,0 @@ -diff -up ./configure.ac.ghash ./configure.ac ---- ./configure.ac.ghash 2021-07-14 14:11:58.126891572 +0200 -+++ ./configure.ac 2021-07-14 14:11:58.130891552 +0200 -@@ -211,6 +211,22 @@ AC_C_BIGENDIAN([AC_DEFINE([WORDS_BIGENDI - ASM_WORDS_BIGENDIAN=yes], - [ASM_WORDS_BIGENDIAN=no]) - -+AC_CACHE_CHECK([for __builtin_bswap64], -+ nettle_cv_c_builtin_bswap64, -+[AC_TRY_LINK([ -+#include -+],[ -+uint64_t x = 17; -+uint64_t y = __builtin_bswap64(x); -+], -+nettle_cv_c_builtin_bswap64=yes, -+nettle_cv_c_builtin_bswap64=no)]) -+ -+AH_TEMPLATE([HAVE_BUILTIN_BSWAP64], [Define if __builtin_bswap64 is available]) -+if test "x$nettle_cv_c_builtin_bswap64" = "xyes" ; then -+ AC_DEFINE(HAVE_BUILTIN_BSWAP64) -+fi -+ - LSH_GCC_ATTRIBUTES - - # According to Simon Josefsson, looking for uint32_t and friends in -@@ -472,7 +488,7 @@ asm_replace_list="aes-encrypt-internal.a - sha3-permute.asm umac-nh.asm umac-nh-n.asm machine.m4" - - # Assembler files which generate additional object files if they are used. --asm_nettle_optional_list="gcm-hash8.asm cpuid.asm \ -+asm_nettle_optional_list="gcm-hash.asm gcm-hash8.asm cpuid.asm \ - aes-encrypt-internal-2.asm aes-decrypt-internal-2.asm memxor-2.asm \ - salsa20-core-internal-2.asm sha1-compress-2.asm sha256-compress-2.asm \ - sha3-permute-2.asm sha512-compress-2.asm \ -@@ -588,6 +604,10 @@ AH_VERBATIM([HAVE_NATIVE], - #undef HAVE_NATIVE_ecc_384_redc - #undef HAVE_NATIVE_ecc_521_modp - #undef HAVE_NATIVE_ecc_521_redc -+#undef HAVE_NATIVE_gcm_init_key -+#undef HAVE_NATIVE_fat_gcm_init_key -+#undef HAVE_NATIVE_gcm_hash -+#undef HAVE_NATIVE_fat_gcm_hash - #undef HAVE_NATIVE_gcm_hash8 - #undef HAVE_NATIVE_salsa20_core - #undef HAVE_NATIVE_sha1_compress -diff -up ./ctr16.c.ghash ./ctr16.c ---- ./ctr16.c.ghash 2021-07-14 14:11:58.130891552 +0200 -+++ ./ctr16.c 2021-07-14 14:11:58.130891552 +0200 -@@ -0,0 +1,106 @@ -+/* ctr16.c -+ -+ Cipher counter mode, optimized for 16-byte blocks. -+ -+ Copyright (C) 2005-2018 Niels Möller -+ Copyright (C) 2018 Red Hat, Inc. -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+*/ -+ -+#if HAVE_CONFIG_H -+# include "config.h" -+#endif -+ -+#include -+ -+#include "ctr.h" -+ -+#include "ctr-internal.h" -+#include "memxor.h" -+#include "nettle-internal.h" -+ -+#define MIN(a,b) (((a) < (b)) ? (a) : (b)) -+ -+void -+_ctr_crypt16(const void *ctx, nettle_cipher_func *f, -+ nettle_fill16_func *fill, uint8_t *ctr, -+ size_t length, uint8_t *dst, -+ const uint8_t *src) -+{ -+ if (dst != src && !((uintptr_t) dst % sizeof(uint64_t))) -+ { -+ size_t blocks = length / 16u; -+ size_t done; -+ fill (ctr, blocks, (union nettle_block16 *) dst); -+ -+ done = blocks * 16; -+ f(ctx, done, dst, dst); -+ memxor (dst, src, done); -+ -+ length -= done; -+ if (length > 0) -+ { /* Left-over partial block */ -+ union nettle_block16 block; -+ dst += done; -+ src += done; -+ assert (length < 16); -+ /* Use fill, to update ctr value in the same way in all cases. */ -+ fill (ctr, 1, &block); -+ f (ctx, 16, block.b, block.b); -+ memxor3 (dst, src, block.b, length); -+ } -+ } -+ else -+ { -+ /* Construct an aligned buffer of consecutive counter values, of -+ size at most CTR_BUFFER_LIMIT. */ -+ TMP_DECL(buffer, union nettle_block16, CTR_BUFFER_LIMIT / 16); -+ size_t blocks = (length + 15) / 16u; -+ size_t i; -+ TMP_ALLOC(buffer, MIN(blocks, CTR_BUFFER_LIMIT / 16)); -+ -+ for (i = 0; blocks >= CTR_BUFFER_LIMIT / 16; -+ i += CTR_BUFFER_LIMIT, blocks -= CTR_BUFFER_LIMIT / 16) -+ { -+ fill (ctr, CTR_BUFFER_LIMIT / 16, buffer); -+ f(ctx, CTR_BUFFER_LIMIT, buffer->b, buffer->b); -+ if (length - i < CTR_BUFFER_LIMIT) -+ goto done; -+ memxor3 (dst + i, src + i, buffer->b, CTR_BUFFER_LIMIT); -+ } -+ -+ if (blocks > 0) -+ { -+ assert (length - i < CTR_BUFFER_LIMIT); -+ fill (ctr, blocks, buffer); -+ f(ctx, blocks * 16, buffer->b, buffer->b); -+ done: -+ memxor3 (dst + i, src + i, buffer->b, length - i); -+ } -+ } -+} -diff -up ./ctr.c.ghash ./ctr.c ---- ./ctr.c.ghash 2018-12-04 21:56:05.000000000 +0100 -+++ ./ctr.c 2021-07-14 14:13:07.714539484 +0200 -@@ -41,11 +41,83 @@ - - #include "ctr.h" - -+#include "ctr-internal.h" - #include "macros.h" - #include "memxor.h" - #include "nettle-internal.h" - --#define NBLOCKS 4 -+#define MIN(a,b) (((a) < (b)) ? (a) : (b)) -+ -+/* The 'u64' member has been added in the public header -+ (nettle-types.h). Check that the alignment is not affected with -+ it using _Static_assert. */ -+union nettle_block16_ -+{ -+ uint8_t b[16]; -+ unsigned long w[16 / sizeof(unsigned long)]; -+}; -+_Static_assert(__alignof(union nettle_block16_) == __alignof(union nettle_block16), -+ "nettle_block16 alignment should be preserved"); -+ -+static size_t -+ctr_fill (size_t block_size, uint8_t *ctr, size_t length, uint8_t *buffer) -+{ -+ size_t i; -+ for (i = 0; i + block_size <= length; i += block_size) -+ { -+ memcpy (buffer + i, ctr, block_size); -+ INCREMENT(block_size, ctr); -+ } -+ return i; -+} -+ -+#if WORDS_BIGENDIAN -+# define USE_CTR_CRYPT16 1 -+static nettle_fill16_func ctr_fill16; -+static void -+ctr_fill16(uint8_t *ctr, size_t blocks, union nettle_block16 *buffer) -+{ -+ uint64_t hi, lo; -+ size_t i; -+ hi = READ_UINT64(ctr); -+ lo = READ_UINT64(ctr + 8); -+ -+ for (i = 0; i < blocks; i++) -+ { -+ buffer[i].u64[0] = hi; -+ buffer[i].u64[1] = lo; -+ hi += !(++lo); -+ } -+ WRITE_UINT64(ctr, hi); -+ WRITE_UINT64(ctr + 8, lo); -+} -+#else /* !WORDS_BIGENDIAN */ -+# if HAVE_BUILTIN_BSWAP64 -+# define USE_CTR_CRYPT16 1 -+static nettle_fill16_func ctr_fill16; -+static void -+ctr_fill16(uint8_t *ctr, size_t blocks, union nettle_block16 *buffer) -+{ -+ uint64_t hi, lo; -+ size_t i; -+ /* Read hi in native endianness */ -+ hi = LE_READ_UINT64(ctr); -+ lo = READ_UINT64(ctr + 8); -+ -+ for (i = 0; i < blocks; i++) -+ { -+ buffer[i].u64[0] = hi; -+ buffer[i].u64[1] = __builtin_bswap64(lo); -+ if (!++lo) -+ hi = __builtin_bswap64(__builtin_bswap64(hi) + 1); -+ } -+ LE_WRITE_UINT64(ctr, hi); -+ WRITE_UINT64(ctr + 8, lo); -+} -+# else /* ! HAVE_BUILTIN_BSWAP64 */ -+# define USE_CTR_CRYPT16 0 -+# endif -+#endif /* !WORDS_BIGENDIAN */ - - void - ctr_crypt(const void *ctx, nettle_cipher_func *f, -@@ -53,84 +125,64 @@ ctr_crypt(const void *ctx, nettle_cipher - size_t length, uint8_t *dst, - const uint8_t *src) - { -- if (src != dst) -+#if USE_CTR_CRYPT16 -+ if (block_size == 16) - { -- if (length == block_size) -- { -- f(ctx, block_size, dst, ctr); -- INCREMENT(block_size, ctr); -- memxor(dst, src, block_size); -- } -- else -+ _ctr_crypt16(ctx, f, ctr_fill16, ctr, length, dst, src); -+ return; -+ } -+#endif -+ -+ if(src != dst) -+ { -+ size_t filled = ctr_fill (block_size, ctr, length, dst); -+ -+ f(ctx, filled, dst, dst); -+ memxor(dst, src, filled); -+ -+ if (filled < length) - { -- size_t left; -- uint8_t *p; -+ TMP_DECL(block, uint8_t, NETTLE_MAX_CIPHER_BLOCK_SIZE); -+ TMP_ALLOC(block, block_size); - -- for (p = dst, left = length; -- left >= block_size; -- left -= block_size, p += block_size) -- { -- memcpy (p, ctr, block_size); -- INCREMENT(block_size, ctr); -- } -- -- f(ctx, length - left, dst, dst); -- memxor(dst, src, length - left); -- -- if (left) -- { -- TMP_DECL(buffer, uint8_t, NETTLE_MAX_CIPHER_BLOCK_SIZE); -- TMP_ALLOC(buffer, block_size); -- -- f(ctx, block_size, buffer, ctr); -- INCREMENT(block_size, ctr); -- memxor3(dst + length - left, src + length - left, buffer, left); -- } -+ f(ctx, block_size, block, ctr); -+ INCREMENT(block_size, ctr); -+ memxor3(dst + filled, src + filled, block, length - filled); - } - } - else - { -- if (length > block_size) -- { -- TMP_DECL(buffer, uint8_t, NBLOCKS * NETTLE_MAX_CIPHER_BLOCK_SIZE); -- size_t chunk = NBLOCKS * block_size; -+ /* For in-place CTR, construct a buffer of consecutive counter -+ values, of size at most CTR_BUFFER_LIMIT. */ -+ TMP_DECL(buffer, uint8_t, CTR_BUFFER_LIMIT); -+ -+ size_t buffer_size; -+ if (length < block_size) -+ buffer_size = block_size; -+ else if (length <= CTR_BUFFER_LIMIT) -+ buffer_size = length; -+ else -+ buffer_size = CTR_BUFFER_LIMIT; - -- TMP_ALLOC(buffer, chunk); -+ TMP_ALLOC(buffer, buffer_size); - -- for (; length >= chunk; -- length -= chunk, src += chunk, dst += chunk) -- { -- unsigned n; -- uint8_t *p; -- for (n = 0, p = buffer; n < NBLOCKS; n++, p += block_size) -- { -- memcpy (p, ctr, block_size); -- INCREMENT(block_size, ctr); -- } -- f(ctx, chunk, buffer, buffer); -- memxor(dst, buffer, chunk); -- } -- -- if (length > 0) -- { -- /* Final, possibly partial, blocks */ -- for (chunk = 0; chunk < length; chunk += block_size) -- { -- memcpy (buffer + chunk, ctr, block_size); -- INCREMENT(block_size, ctr); -- } -- f(ctx, chunk, buffer, buffer); -- memxor3(dst, src, buffer, length); -- } -+ while (length >= block_size) -+ { -+ size_t filled -+ = ctr_fill (block_size, ctr, MIN(buffer_size, length), buffer); -+ assert (filled > 0); -+ f(ctx, filled, buffer, buffer); -+ memxor(dst, buffer, filled); -+ length -= filled; -+ dst += filled; - } -- else if (length > 0) -- { -- TMP_DECL(buffer, uint8_t, NETTLE_MAX_CIPHER_BLOCK_SIZE); -- TMP_ALLOC(buffer, block_size); - -+ /* Final, possibly partial, block. */ -+ if (length > 0) -+ { - f(ctx, block_size, buffer, ctr); - INCREMENT(block_size, ctr); -- memxor3(dst, src, buffer, length); -+ memxor(dst, buffer, length); - } - } - } -diff -up ./ctr-internal.h.ghash ./ctr-internal.h ---- ./ctr-internal.h.ghash 2021-07-14 14:11:58.130891552 +0200 -+++ ./ctr-internal.h 2021-07-14 14:11:58.130891552 +0200 -@@ -0,0 +1,56 @@ -+/* ctr-internal.h -+ -+ Copyright (C) 2018 Niels Möller -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+*/ -+ -+#ifndef NETTLE_CTR_INTERNAL_H_INCLUDED -+#define NETTLE_CTR_INTERNAL_H_INCLUDED -+ -+#include "nettle-types.h" -+ -+/* Name mangling */ -+#define _ctr_crypt16 _nettle_ctr_crypt16 -+ -+/* Size limit for temporary stack buffers. */ -+#define CTR_BUFFER_LIMIT 512 -+ -+/* Fill BUFFER (n blocks) with incrementing CTR values. It would be -+ nice if CTR was always 64-bit aligned, but it isn't when called -+ from ctr_crypt. */ -+typedef void -+nettle_fill16_func(uint8_t *ctr, size_t n, union nettle_block16 *buffer); -+ -+void -+_ctr_crypt16(const void *ctx, nettle_cipher_func *f, -+ nettle_fill16_func *fill, uint8_t *ctr, -+ size_t length, uint8_t *dst, -+ const uint8_t *src); -+ -+ -+#endif /* NETTLE_CTR_INTERNAL_H_INCLUDED */ -diff -up ./fat-ppc.c.ghash ./fat-ppc.c ---- ./fat-ppc.c.ghash 2021-07-14 14:11:58.126891572 +0200 -+++ ./fat-ppc.c 2021-07-14 14:11:58.130891552 +0200 -@@ -49,6 +49,7 @@ - - #include "aes-internal.h" - #include "gcm.h" -+#include "gcm-internal.h" - #include "fat-setup.h" - - /* Define from arch/powerpc/include/uapi/asm/cputable.h in Linux kernel */ -@@ -87,6 +88,16 @@ DECLARE_FAT_FUNC(_nettle_aes_decrypt, ae - DECLARE_FAT_FUNC_VAR(aes_decrypt, aes_crypt_internal_func, c) - DECLARE_FAT_FUNC_VAR(aes_decrypt, aes_crypt_internal_func, ppc64) - -+#if GCM_TABLE_BITS == 8 -+DECLARE_FAT_FUNC(_nettle_gcm_init_key, gcm_init_key_func) -+DECLARE_FAT_FUNC_VAR(gcm_init_key, gcm_init_key_func, c) -+DECLARE_FAT_FUNC_VAR(gcm_init_key, gcm_init_key_func, ppc64) -+ -+DECLARE_FAT_FUNC(_nettle_gcm_hash, gcm_hash_func) -+DECLARE_FAT_FUNC_VAR(gcm_hash, gcm_hash_func, c) -+DECLARE_FAT_FUNC_VAR(gcm_hash, gcm_hash_func, ppc64) -+#endif /* GCM_TABLE_BITS == 8 */ -+ - static void CONSTRUCTOR - fat_init (void) - { -@@ -101,17 +112,29 @@ fat_init (void) - features.have_crypto_ext ? "crypto extensions" : ""); - - if (features.have_crypto_ext) -- { -- if (verbose) -- fprintf (stderr, "libnettle: enabling arch 2.07 code.\n"); -- _nettle_aes_encrypt_vec = _nettle_aes_encrypt_ppc64; -- _nettle_aes_decrypt_vec = _nettle_aes_decrypt_ppc64; -- } -+ { -+ if (verbose) -+ fprintf (stderr, "libnettle: enabling arch 2.07 code.\n"); -+ _nettle_aes_encrypt_vec = _nettle_aes_encrypt_ppc64; -+ _nettle_aes_decrypt_vec = _nettle_aes_decrypt_ppc64; -+#if GCM_TABLE_BITS == 8 -+ /* Make sure _nettle_gcm_init_key_vec function is compatible -+ with _nettle_gcm_hash_vec function e.g. _nettle_gcm_init_key_c() -+ fills gcm_key table with values that are incompatible with -+ _nettle_gcm_hash_ppc64() */ -+ _nettle_gcm_init_key_vec = _nettle_gcm_init_key_ppc64; -+ _nettle_gcm_hash_vec = _nettle_gcm_hash_ppc64; -+#endif /* GCM_TABLE_BITS == 8 */ -+ } - else -- { -- _nettle_aes_encrypt_vec = _nettle_aes_encrypt_c; -- _nettle_aes_decrypt_vec = _nettle_aes_decrypt_c; -- } -+ { -+ _nettle_aes_encrypt_vec = _nettle_aes_encrypt_c; -+ _nettle_aes_decrypt_vec = _nettle_aes_decrypt_c; -+#if GCM_TABLE_BITS == 8 -+ _nettle_gcm_init_key_vec = _nettle_gcm_init_key_c; -+ _nettle_gcm_hash_vec = _nettle_gcm_hash_c; -+#endif /* GCM_TABLE_BITS == 8 */ -+ } - } - - DEFINE_FAT_FUNC(_nettle_aes_encrypt, void, -@@ -127,3 +150,14 @@ DEFINE_FAT_FUNC(_nettle_aes_decrypt, voi - size_t length, uint8_t *dst, - const uint8_t *src), - (rounds, keys, T, length, dst, src)) -+ -+#if GCM_TABLE_BITS == 8 -+DEFINE_FAT_FUNC(_nettle_gcm_init_key, void, -+ (union nettle_block16 *table), -+ (table)) -+ -+DEFINE_FAT_FUNC(_nettle_gcm_hash, void, -+ (const struct gcm_key *key, union nettle_block16 *x, -+ size_t length, const uint8_t *data), -+ (key, x, length, data)) -+#endif /* GCM_TABLE_BITS == 8 */ -diff -up ./fat-setup.h.ghash ./fat-setup.h ---- ./fat-setup.h.ghash 2018-12-04 21:56:06.000000000 +0100 -+++ ./fat-setup.h 2021-07-14 14:11:58.130891552 +0200 -@@ -159,6 +159,11 @@ typedef void aes_crypt_internal_func (un - size_t length, uint8_t *dst, - const uint8_t *src); - -+typedef void gcm_init_key_func (union nettle_block16 *table); -+ -+typedef void gcm_hash_func (const struct gcm_key *key, union nettle_block16 *x, -+ size_t length, const uint8_t *data); -+ - typedef void *(memxor_func)(void *dst, const void *src, size_t n); - - typedef void salsa20_core_func (uint32_t *dst, const uint32_t *src, unsigned rounds); -diff -up ./gcm.c.ghash ./gcm.c ---- ./gcm.c.ghash 2018-12-04 21:56:05.000000000 +0100 -+++ ./gcm.c 2021-07-14 14:11:58.131891547 +0200 -@@ -6,8 +6,9 @@ - See also the gcm paper at - http://www.cryptobarn.com/papers/gcm-spec.pdf. - -- Copyright (C) 2011, 2013 Niels Möller - Copyright (C) 2011 Katholieke Universiteit Leuven -+ Copyright (C) 2011, 2013, 2018 Niels Möller -+ Copyright (C) 2018 Red Hat, Inc. - - Contributed by Nikos Mavrogiannopoulos - -@@ -48,9 +49,11 @@ - - #include "gcm.h" - -+#include "gcm-internal.h" - #include "memxor.h" - #include "nettle-internal.h" - #include "macros.h" -+#include "ctr-internal.h" - - #define GHASH_POLYNOMIAL 0xE1UL - -@@ -112,7 +115,17 @@ gcm_gf_shift (union nettle_block16 *r, c - #endif /* ! WORDS_BIGENDIAN */ - } - --#if GCM_TABLE_BITS == 0 -+#if GCM_TABLE_BITS != 8 -+/* The native implementations (currently ppc64 only) depend on the -+ GCM_TABLE_BITS == 8 layout */ -+#undef HAVE_NATIVE_gcm_hash -+#undef HAVE_NATIVE_gcm_init_key -+#undef HAVE_NATIVE_fat_gcm_hash -+#undef HAVE_NATIVE_fat_gcm_init_key -+#endif -+ -+#if !HAVE_NATIVE_gcm_hash -+# if GCM_TABLE_BITS == 0 - /* Sets x <- x * y mod r, using the plain bitwise algorithm from the - specification. y may be shorter than a full block, missing bytes - are assumed zero. */ -@@ -140,15 +153,15 @@ gcm_gf_mul (union nettle_block16 *x, con - } - memcpy (x->b, Z.b, sizeof(Z)); - } --#else /* GCM_TABLE_BITS != 0 */ -+# else /* GCM_TABLE_BITS != 0 */ - --# if WORDS_BIGENDIAN --# define W(left,right) (0x##left##right) --# else --# define W(left,right) (0x##right##left) --# endif -+# if WORDS_BIGENDIAN -+# define W(left,right) (0x##left##right) -+# else -+# define W(left,right) (0x##right##left) -+# endif - --# if GCM_TABLE_BITS == 4 -+# if GCM_TABLE_BITS == 4 - static const uint16_t - shift_table[0x10] = { - W(00,00),W(1c,20),W(38,40),W(24,60),W(70,80),W(6c,a0),W(48,c0),W(54,e0), -@@ -177,26 +190,13 @@ gcm_gf_shift_4(union nettle_block16 *x) - # error Unsupported word size. */ - #endif - #else /* ! WORDS_BIGENDIAN */ --# if SIZEOF_LONG == 4 --#define RSHIFT_WORD(x) \ -- ((((x) & 0xf0f0f0f0UL) >> 4) \ -- | (((x) & 0x000f0f0f) << 12)) -- reduce = shift_table[(w[3] >> 24) & 0xf]; -- w[3] = RSHIFT_WORD(w[3]) | ((w[2] >> 20) & 0xf0); -- w[2] = RSHIFT_WORD(w[2]) | ((w[1] >> 20) & 0xf0); -- w[1] = RSHIFT_WORD(w[1]) | ((w[0] >> 20) & 0xf0); -- w[0] = RSHIFT_WORD(w[0]) ^ reduce; --# elif SIZEOF_LONG == 8 --#define RSHIFT_WORD(x) \ -- ((((x) & 0xf0f0f0f0f0f0f0f0UL) >> 4) \ -- | (((x) & 0x000f0f0f0f0f0f0fUL) << 12)) -- reduce = shift_table[(w[1] >> 56) & 0xf]; -- w[1] = RSHIFT_WORD(w[1]) | ((w[0] >> 52) & 0xf0); -- w[0] = RSHIFT_WORD(w[0]) ^ reduce; --# else --# error Unsupported word size. */ --# endif --# undef RSHIFT_WORD -+# define RSHIFT_WORD_4(x) \ -+ ((((x) & UINT64_C(0xf0f0f0f0f0f0f0f0)) >> 4) \ -+ | (((x) & UINT64_C(0x000f0f0f0f0f0f0f)) << 12)) -+ reduce = shift_table[(u64[1] >> 56) & 0xf]; -+ u64[1] = RSHIFT_WORD_4(u64[1]) | ((u64[0] >> 52) & 0xf0); -+ u64[0] = RSHIFT_WORD_4(u64[0]) ^ reduce; -+# undef RSHIFT_WORD_4 - #endif /* ! WORDS_BIGENDIAN */ - } - -@@ -219,10 +219,10 @@ gcm_gf_mul (union nettle_block16 *x, con - } - memcpy (x->b, Z.b, sizeof(Z)); - } --# elif GCM_TABLE_BITS == 8 --# if HAVE_NATIVE_gcm_hash8 -+# elif GCM_TABLE_BITS == 8 -+# if HAVE_NATIVE_gcm_hash8 - --#define gcm_hash _nettle_gcm_hash8 -+#define _nettle_gcm_hash _nettle_gcm_hash8 - void - _nettle_gcm_hash8 (const struct gcm_key *key, union nettle_block16 *x, - size_t length, const uint8_t *data); -@@ -317,18 +317,46 @@ gcm_gf_mul (union nettle_block16 *x, con - gcm_gf_shift_8(&Z); - gcm_gf_add(x, &Z, &table[x->b[0]]); - } --# endif /* ! HAVE_NATIVE_gcm_hash8 */ --# else /* GCM_TABLE_BITS != 8 */ --# error Unsupported table size. --# endif /* GCM_TABLE_BITS != 8 */ -+# endif /* ! HAVE_NATIVE_gcm_hash8 */ -+# else /* GCM_TABLE_BITS != 8 */ -+# error Unsupported table size. -+# endif /* GCM_TABLE_BITS != 8 */ -+ -+# undef W -+# endif /* GCM_TABLE_BITS != 0 */ -+#endif /* !HAVE_NATIVE_gcm_hash */ - --#undef W -- --#endif /* GCM_TABLE_BITS */ - - /* Increment the rightmost 32 bits. */ - #define INC32(block) INCREMENT(4, (block.b) + GCM_BLOCK_SIZE - 4) - -+#if !HAVE_NATIVE_gcm_init_key -+# if !HAVE_NATIVE_fat_gcm_hash -+# define _nettle_gcm_init_key _nettle_gcm_init_key_c -+static -+# endif -+void -+_nettle_gcm_init_key_c(union nettle_block16 *table) -+{ -+#if GCM_TABLE_BITS -+ /* Middle element if GCM_TABLE_BITS > 0, otherwise the first -+ element */ -+ unsigned i = (1<h[0].b, 0, GCM_BLOCK_SIZE); - f (cipher, GCM_BLOCK_SIZE, key->h[i].b, key->h[0].b); -- --#if GCM_TABLE_BITS -- /* Algorithm 3 from the gcm paper. First do powers of two, then do -- the rest by adding. */ -- while (i /= 2) -- gcm_gf_shift(&key->h[i], &key->h[2*i]); -- for (i = 2; i < 1<h[i+j], &key->h[i],&key->h[j]); -- } --#endif -+ -+ _nettle_gcm_init_key(key->h); - } - --#ifndef gcm_hash --static void --gcm_hash(const struct gcm_key *key, union nettle_block16 *x, -- size_t length, const uint8_t *data) -+#if !(HAVE_NATIVE_gcm_hash || HAVE_NATIVE_gcm_hash8) -+# if !HAVE_NATIVE_fat_gcm_hash -+# define _nettle_gcm_hash _nettle_gcm_hash_c -+static -+# endif -+void -+_nettle_gcm_hash_c(const struct gcm_key *key, union nettle_block16 *x, -+ size_t length, const uint8_t *data) - { - for (; length >= GCM_BLOCK_SIZE; - length -= GCM_BLOCK_SIZE, data += GCM_BLOCK_SIZE) -@@ -377,7 +398,7 @@ gcm_hash(const struct gcm_key *key, unio - gcm_gf_mul (x, key->h); - } - } --#endif /* !gcm_hash */ -+#endif /* !(HAVE_NATIVE_gcm_hash || HAVE_NATIVE_gcm_hash8) */ - - static void - gcm_hash_sizes(const struct gcm_key *key, union nettle_block16 *x, -@@ -391,7 +412,7 @@ gcm_hash_sizes(const struct gcm_key *key - WRITE_UINT64 (buffer, auth_size); - WRITE_UINT64 (buffer + 8, data_size); - -- gcm_hash(key, x, GCM_BLOCK_SIZE, buffer); -+ _nettle_gcm_hash(key, x, GCM_BLOCK_SIZE, buffer); - } - - /* NOTE: The key is needed only if length != GCM_IV_SIZE */ -@@ -410,7 +431,7 @@ gcm_set_iv(struct gcm_ctx *ctx, const st - else - { - memset(ctx->iv.b, 0, GCM_BLOCK_SIZE); -- gcm_hash(key, &ctx->iv, length, iv); -+ _nettle_gcm_hash(key, &ctx->iv, length, iv); - gcm_hash_sizes(key, &ctx->iv, 0, length); - } - -@@ -429,47 +450,68 @@ gcm_update(struct gcm_ctx *ctx, const st - assert(ctx->auth_size % GCM_BLOCK_SIZE == 0); - assert(ctx->data_size == 0); - -- gcm_hash(key, &ctx->x, length, data); -+ _nettle_gcm_hash(key, &ctx->x, length, data); - - ctx->auth_size += length; - } - -+static nettle_fill16_func gcm_fill; -+#if WORDS_BIGENDIAN - static void --gcm_crypt(struct gcm_ctx *ctx, const void *cipher, nettle_cipher_func *f, -- size_t length, uint8_t *dst, const uint8_t *src) -+gcm_fill(uint8_t *ctr, size_t blocks, union nettle_block16 *buffer) - { -- uint8_t buffer[GCM_BLOCK_SIZE]; -+ uint64_t hi, mid; -+ uint32_t lo; -+ size_t i; -+ hi = READ_UINT64(ctr); -+ mid = (uint64_t) READ_UINT32(ctr + 8) << 32; -+ lo = READ_UINT32(ctr + 12); - -- if (src != dst) -+ for (i = 0; i < blocks; i++) - { -- for (; length >= GCM_BLOCK_SIZE; -- (length -= GCM_BLOCK_SIZE, -- src += GCM_BLOCK_SIZE, dst += GCM_BLOCK_SIZE)) -- { -- f (cipher, GCM_BLOCK_SIZE, dst, ctx->ctr.b); -- memxor (dst, src, GCM_BLOCK_SIZE); -- INC32 (ctx->ctr); -- } -+ buffer[i].u64[0] = hi; -+ buffer[i].u64[1] = mid + lo++; - } -- else -+ WRITE_UINT32(ctr + 12, lo); -+ -+} -+#elif HAVE_BUILTIN_BSWAP64 -+/* Assume __builtin_bswap32 is also available */ -+static void -+gcm_fill(uint8_t *ctr, size_t blocks, union nettle_block16 *buffer) -+{ -+ uint64_t hi, mid; -+ uint32_t lo; -+ size_t i; -+ hi = LE_READ_UINT64(ctr); -+ mid = LE_READ_UINT32(ctr + 8); -+ lo = READ_UINT32(ctr + 12); -+ -+ for (i = 0; i < blocks; i++) - { -- for (; length >= GCM_BLOCK_SIZE; -- (length -= GCM_BLOCK_SIZE, -- src += GCM_BLOCK_SIZE, dst += GCM_BLOCK_SIZE)) -- { -- f (cipher, GCM_BLOCK_SIZE, buffer, ctx->ctr.b); -- memxor3 (dst, src, buffer, GCM_BLOCK_SIZE); -- INC32 (ctx->ctr); -- } -+ buffer[i].u64[0] = hi; -+ buffer[i].u64[1] = mid + ((uint64_t)__builtin_bswap32(lo) << 32); -+ lo++; - } -- if (length > 0) -+ WRITE_UINT32(ctr + 12, lo); -+} -+#else -+static void -+gcm_fill(uint8_t *ctr, size_t blocks, union nettle_block16 *buffer) -+{ -+ uint32_t c; -+ -+ c = READ_UINT32(ctr + GCM_BLOCK_SIZE - 4); -+ -+ for (; blocks-- > 0; buffer++, c++) - { -- /* A final partial block */ -- f (cipher, GCM_BLOCK_SIZE, buffer, ctx->ctr.b); -- memxor3 (dst, src, buffer, length); -- INC32 (ctx->ctr); -+ memcpy(buffer->b, ctr, GCM_BLOCK_SIZE - 4); -+ WRITE_UINT32(buffer->b + GCM_BLOCK_SIZE - 4, c); - } -+ -+ WRITE_UINT32(ctr + GCM_BLOCK_SIZE - 4, c); - } -+#endif - - void - gcm_encrypt (struct gcm_ctx *ctx, const struct gcm_key *key, -@@ -478,8 +520,8 @@ gcm_encrypt (struct gcm_ctx *ctx, const - { - assert(ctx->data_size % GCM_BLOCK_SIZE == 0); - -- gcm_crypt(ctx, cipher, f, length, dst, src); -- gcm_hash(key, &ctx->x, length, dst); -+ _ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src); -+ _nettle_gcm_hash(key, &ctx->x, length, dst); - - ctx->data_size += length; - } -@@ -491,8 +533,8 @@ gcm_decrypt(struct gcm_ctx *ctx, const s - { - assert(ctx->data_size % GCM_BLOCK_SIZE == 0); - -- gcm_hash(key, &ctx->x, length, src); -- gcm_crypt(ctx, cipher, f, length, dst, src); -+ _nettle_gcm_hash(key, &ctx->x, length, src); -+ _ctr_crypt16(cipher, f, gcm_fill, ctx->ctr.b, length, dst, src); - - ctx->data_size += length; - } -diff -up ./gcm-internal.h.ghash ./gcm-internal.h ---- ./gcm-internal.h.ghash 2021-07-14 14:11:58.131891547 +0200 -+++ ./gcm-internal.h 2021-07-14 14:11:58.131891547 +0200 -@@ -0,0 +1,54 @@ -+/* gcm-internal.h -+ -+ Copyright (C) 2020 Niels Möller -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+*/ -+ -+#ifndef NETTLE_GCM_INTERNAL_H_INCLUDED -+#define NETTLE_GCM_INTERNAL_H_INCLUDED -+ -+/* Functions available only in some configurations */ -+void -+_nettle_gcm_init_key (union nettle_block16 *table); -+ -+void -+_nettle_gcm_hash(const struct gcm_key *key, union nettle_block16 *x, -+ size_t length, const uint8_t *data); -+ -+#if HAVE_NATIVE_fat_gcm_init_key -+void -+_nettle_gcm_init_key_c (union nettle_block16 *table); -+#endif -+ -+#if HAVE_NATIVE_fat_gcm_hash -+void -+_nettle_gcm_hash_c (const struct gcm_key *key, union nettle_block16 *x, -+ size_t length, const uint8_t *data); -+#endif -+ -+#endif /* NETTLE_GCM_INTERNAL_H_INCLUDED */ -diff -up ./Makefile.in.ghash ./Makefile.in ---- ./Makefile.in.ghash 2021-07-14 14:11:58.124891582 +0200 -+++ ./Makefile.in 2021-07-14 14:11:58.131891547 +0200 -@@ -96,7 +96,7 @@ nettle_SOURCES = aes-decrypt-internal.c - chacha-crypt.c chacha-core-internal.c \ - chacha-poly1305.c chacha-poly1305-meta.c \ - chacha-set-key.c chacha-set-nonce.c \ -- ctr.c des.c des3.c des-compat.c \ -+ ctr.c ctr16.c des.c des3.c des-compat.c \ - eax.c eax-aes128.c eax-aes128-meta.c \ - gcm.c gcm-aes.c \ - gcm-aes128.c gcm-aes128-meta.c \ -@@ -233,6 +233,8 @@ DISTFILES = $(SOURCES) $(HEADERS) getopt - cast128_sboxes.h desinfo.h desCode.h \ - memxor-internal.h nettle-internal.h nettle-write.h \ - rsa-internal.h \ -+ ctr-internal.h \ -+ gcm-internal.h \ - gmp-glue.h ecc-internal.h fat-setup.h \ - mini-gmp.h asm.m4 \ - nettle.texinfo nettle.info nettle.html nettle.pdf sha-example.c -diff -up ./nettle-types.h.ghash ./nettle-types.h ---- ./nettle-types.h.ghash 2018-12-04 21:56:06.000000000 +0100 -+++ ./nettle-types.h 2021-07-14 14:11:58.131891547 +0200 -@@ -48,6 +48,7 @@ union nettle_block16 - { - uint8_t b[16]; - unsigned long w[16 / sizeof(unsigned long)]; -+ uint64_t u64[2]; - }; - - /* Randomness. Used by key generation and dsa signature creation. */ -diff -up ./powerpc64/fat/gcm-hash.asm.ghash ./powerpc64/fat/gcm-hash.asm ---- ./powerpc64/fat/gcm-hash.asm.ghash 2021-07-14 14:11:58.131891547 +0200 -+++ ./powerpc64/fat/gcm-hash.asm 2021-07-14 14:11:58.131891547 +0200 -@@ -0,0 +1,39 @@ -+C powerpc64/fat/gcm-hash.asm -+ -+ -+ifelse(< -+ Copyright (C) 2020 Mamone Tarsha -+ -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+dnl picked up by configure -+dnl PROLOGUE(_nettle_fat_gcm_init_key) -+dnl PROLOGUE(_nettle_fat_gcm_hash) -+ -+define(, <$1_ppc64>) -+include_src() -diff -up ./powerpc64/p8/gcm-hash.asm.ghash ./powerpc64/p8/gcm-hash.asm ---- ./powerpc64/p8/gcm-hash.asm.ghash 2021-07-14 14:11:58.131891547 +0200 -+++ ./powerpc64/p8/gcm-hash.asm 2021-07-14 14:11:58.131891547 +0200 -@@ -0,0 +1,499 @@ -+C powerpc64/p8/gcm-hash.asm -+ -+ifelse(< -+ Copyright (C) 2020 Niels Möller and Mamone Tarsha -+ This file is part of GNU Nettle. -+ -+ GNU Nettle is free software: you can redistribute it and/or -+ modify it under the terms of either: -+ -+ * the GNU Lesser General Public License as published by the Free -+ Software Foundation; either version 3 of the License, or (at your -+ option) any later version. -+ -+ or -+ -+ * the GNU General Public License as published by the Free -+ Software Foundation; either version 2 of the License, or (at your -+ option) any later version. -+ -+ or both in parallel, as here. -+ -+ GNU Nettle is distributed in the hope that it will be useful, -+ but WITHOUT ANY WARRANTY; without even the implied warranty of -+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU -+ General Public License for more details. -+ -+ You should have received copies of the GNU General Public License and -+ the GNU Lesser General Public License along with this program. If -+ not, see http://www.gnu.org/licenses/. -+>) -+ -+C gcm_set_key() assigns H value in the middle element of the table -+define(, <128>) -+ -+C Register usage: -+ -+define(, <1>) -+define(, <2>) -+ -+define(, <3>) -+ -+define(, <0>) -+define(, <1>) -+define(, <16>) -+define(, <17>) -+define(, <1>) -+ -+define(, <2>) -+define(

, <3>) -+define(

, <4>) -+define(

, <5>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+define(, <9>) -+define(, <10>) -+define(, <11>) -+define(, <12>) -+define(, <13>) -+define(, <14>) -+define(, <15>) -+define(, <13>) -+define(, <14>) -+define(, <15>) -+define(, <16>) -+define(, <17>) -+define(, <18>) -+ -+define(, <18>) -+define(, <19>) -+ -+.file "gcm-hash.asm" -+ -+.text -+ -+ C void gcm_init_key (union gcm_block *table) -+ -+C This function populates the gcm table as the following layout -+C ******************************************************************************* -+C | H1M = (H1 div x⁶⁴)||((H1 mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷)) div x⁶⁴ | -+C | H1L = (H1 mod x⁶⁴)||(((H1 mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷)) mod x⁶⁴) + (H1 div x⁶⁴) | -+C | | -+C | H2M = (H2 div x⁶⁴)||((H2 mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷)) div x⁶⁴ | -+C | H2L = (H2 mod x⁶⁴)||(((H2 mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷)) mod x⁶⁴) + (H2 div x⁶⁴) | -+C | | -+C | H3M = (H3 div x⁶⁴)||((H3 mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷)) div x⁶⁴ | -+C | H3L = (H3 mod x⁶⁴)||(((H3 mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷)) mod x⁶⁴) + (H3 div x⁶⁴) | -+C | | -+C | H4M = (H3 div x⁶⁴)||((H4 mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷)) div x⁶⁴ | -+C | H4L = (H3 mod x⁶⁴)||(((H4 mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷)) mod x⁶⁴) + (H4 div x⁶⁴) | -+C ******************************************************************************* -+ -+define(, <5>) -+PROLOGUE(_nettle_gcm_init_key) -+ DATA_LOAD_VEC(POLY,.polynomial,7) C 0xC2000000000000000000000000000001 -+IF_LE(< -+ li 8,0 -+ lvsl LE_MASK,0,8 C 0x000102030405060708090A0B0C0D0E0F -+ vspltisb LE_TEMP,0x07 C 0x07070707070707070707070707070707 -+ vxor LE_MASK,LE_MASK,LE_TEMP C 0x07060504030201000F0E0D0C0B0A0908 -+>) -+ -+ C 'H' is assigned by gcm_set_key() to the middle element of the table -+ li 10,H_Idx*16 -+ lxvd2x VSR(H),10,TABLE C load 'H' -+ C byte-reverse of each doubleword permuting on little-endian mode -+IF_LE(< -+ vperm H,H,H,LE_MASK -+>) -+ -+ C --- calculate H = H << 1 mod P(X), P(X) = (x¹²⁸+x¹²⁷+x¹²⁶+x¹²¹+1) --- -+ -+ vupkhsb EMSB,H C extend most significant bit to first byte -+ vspltisb B1,1 C 0x01010101010101010101010101010101 -+ vspltb EMSB,EMSB,0 C first byte quadword-extend -+ vsl H,H,B1 C H = H << 1 -+ vand EMSB,EMSB,POLY C EMSB &= 0xC2000000000000000000000000000001 -+ vxor ZERO,ZERO,ZERO C 0x00000000000000000000000000000000 -+ vxor H,H,EMSB C H ^= EMSB -+ -+ C --- calculate H^2 = H*H --- -+ -+ xxmrghd VSR(POLY_L),VSR(ZERO),VSR(POLY) C 0x0000000000000000C200000000000000 -+ -+ C --- Hp = (H mod x⁶⁴) / x⁶⁴ mod P(X) --- -+ C --- Hp = (H mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷) mod P(X), deg(Hp) ≤ 127 --- -+ C --- Hp = (H mod x⁶⁴) × (x⁶⁴+x⁶³+x⁶²+x⁵⁷) --- -+ vpmsumd Hp,H,POLY_L C Hp = (H mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷) -+ xxswapd VSR(Hm),VSR(H) -+ xxmrgld VSR(Hl),VSR(H),VSR(ZERO) C Hl = (H mod x⁶⁴) × x⁶⁴ -+ vxor Hm,Hm,Hp C Hm = Hm + Hp -+ vxor Hl,Hl,Hp C Hl = Hl + Hp -+ xxmrgld VSR(H1L),VSR(H),VSR(Hm) C H1L = (H mod x⁶⁴)||(Hl mod x⁶⁴) -+ xxmrghd VSR(H1M),VSR(H),VSR(Hl) C H1M = (H div x⁶⁴)||(Hl div x⁶⁴) -+ -+ vpmsumd F,H1L,H C F = (H1Lh × Hh) + (H1Ll × Hl) -+ vpmsumd R,H1M,H C R = (H1Mh × Hh) + (H1Ml × Hl) -+ -+ C --- rduction --- -+ vpmsumd T,F,POLY_L C T = (F mod x⁶⁴) × (x⁶³+x⁶²+x⁵⁷) -+ xxswapd VSR(H2),VSR(F) -+ vxor R,R,T C R = R + T -+ vxor H2,R,H2 -+ -+ xxmrgld VSR(Hl),VSR(H2),VSR(ZERO) -+ xxswapd VSR(Hm),VSR(H2) -+ vpmsumd Hp,H2,POLY_L -+ vxor Hl,Hl,Hp -+ vxor Hm,Hm,Hp -+ xxmrghd VSR(H2M),VSR(H2),VSR(Hl) -+ xxmrgld VSR(H2L),VSR(H2),VSR(Hm) -+ -+ C store H1M, H1L, H2M, H2L -+ li 8,1*16 -+ li 9,2*16 -+ li 10,3*16 -+ stxvd2x VSR(H1M),0,TABLE -+ stxvd2x VSR(H1L),8,TABLE -+ stxvd2x VSR(H2M),9,TABLE -+ stxvd2x VSR(H2L),10,TABLE -+ -+ C --- calculate H^3 = H^1*H^2, H^4 = H^2*H^2 --- -+ -+ vpmsumd F,H1L,H2 -+ vpmsumd F2,H2L,H2 -+ vpmsumd R,H1M,H2 -+ vpmsumd R2,H2M,H2 -+ -+ vpmsumd T,F,POLY_L -+ vpmsumd T2,F2,POLY_L -+ xxswapd VSR(H3),VSR(F) -+ xxswapd VSR(H4),VSR(F2) -+ vxor R,R,T -+ vxor R2,R2,T2 -+ vxor H3,R,H3 -+ vxor H4,R2,H4 -+ -+ xxmrgld VSR(Hl),VSR(H3),VSR(ZERO) -+ xxmrgld VSR(Hl2),VSR(H4),VSR(ZERO) -+ xxswapd VSR(Hm),VSR(H3) -+ xxswapd VSR(Hm2),VSR(H4) -+ vpmsumd Hp,H3,POLY_L -+ vpmsumd Hp2,H4,POLY_L -+ vxor Hl,Hl,Hp -+ vxor Hl2,Hl2,Hp2 -+ vxor Hm,Hm,Hp -+ vxor Hm2,Hm2,Hp2 -+ xxmrghd VSR(H1M),VSR(H3),VSR(Hl) -+ xxmrghd VSR(H2M),VSR(H4),VSR(Hl2) -+ xxmrgld VSR(H1L),VSR(H3),VSR(Hm) -+ xxmrgld VSR(H2L),VSR(H4),VSR(Hm2) -+ -+ C store H3M, H3L, H4M, H4L -+ li 7,4*16 -+ li 8,5*16 -+ li 9,6*16 -+ li 10,7*16 -+ stxvd2x VSR(H1M),7,TABLE -+ stxvd2x VSR(H1L),8,TABLE -+ stxvd2x VSR(H2M),9,TABLE -+ stxvd2x VSR(H2L),10,TABLE -+ -+ blr -+EPILOGUE(_nettle_gcm_init_key) -+ -+define(

, <3>) -+define(, <4>) -+define(, <5>) -+define(, <6>) -+ -+define(, <16>) -+define(, <17>) -+define(, <0>) -+ -+define(, <1>) -+define(, <2>) -+define(, <3>) -+define(, <4>) -+define(, <5>) -+define(, <6>) -+define(, <7>) -+define(, <8>) -+define(, <9>) -+define(, <10>) -+define(, <11>) -+define(, <12>) -+define(, <13>) -+define(, <14>) -+define(, <15>) -+define(, <16>) -+define(, <17>) -+define(, <18>) -+define(, <20>) -+define(, <21>) -+define(, <22>) -+define(, <23>) -+ -+define(, <18>) -+define(, <19>) -+ -+ C void gcm_hash (const struct gcm_key *key, union gcm_block *x, -+ C size_t length, const uint8_t *data) -+ -+define(, <5>) -+PROLOGUE(_nettle_gcm_hash) -+ vxor ZERO,ZERO,ZERO -+ DATA_LOAD_VEC(POLY,.polynomial,7) -+IF_LE(< -+ li 8,0 -+ lvsl LE_MASK,0,8 -+ vspltisb LE_TEMP,0x07 -+ vxor LE_MASK,LE_MASK,LE_TEMP -+>) -+ xxmrghd VSR(POLY_L),VSR(ZERO),VSR(POLY) -+ -+ lxvd2x VSR(D),0,X C load 'X' pointer -+ C byte-reverse of each doubleword permuting on little-endian mode -+IF_LE(< -+ vperm D,D,D,LE_MASK -+>) -+ -+ C --- process 4 blocks '128-bit each' per one loop --- -+ -+ srdi. 7,LENGTH,6 C 4-blocks loop count 'LENGTH / (4 * 16)' -+ beq L2x -+ -+ mtctr 7 C assign counter register to loop count -+ -+ C store non-volatile vector registers -+ addi 8,SP,-64 -+ stvx 20,0,8 -+ addi 8,8,16 -+ stvx 21,0,8 -+ addi 8,8,16 -+ stvx 22,0,8 -+ addi 8,8,16 -+ stvx 23,0,8 -+ -+ C load table elements -+ li 8,1*16 -+ li 9,2*16 -+ li 10,3*16 -+ lxvd2x VSR(H1M),0,TABLE -+ lxvd2x VSR(H1L),8,TABLE -+ lxvd2x VSR(H2M),9,TABLE -+ lxvd2x VSR(H2L),10,TABLE -+ li 7,4*16 -+ li 8,5*16 -+ li 9,6*16 -+ li 10,7*16 -+ lxvd2x VSR(H3M),7,TABLE -+ lxvd2x VSR(H3L),8,TABLE -+ lxvd2x VSR(H4M),9,TABLE -+ lxvd2x VSR(H4L),10,TABLE -+ -+ li 8,0x10 -+ li 9,0x20 -+ li 10,0x30 -+.align 5 -+L4x_loop: -+ C input loading -+ lxvd2x VSR(C0),0,DATA C load C0 -+ lxvd2x VSR(C1),8,DATA C load C1 -+ lxvd2x VSR(C2),9,DATA C load C2 -+ lxvd2x VSR(C3),10,DATA C load C3 -+ -+IF_LE(< -+ vperm C0,C0,C0,LE_MASK -+ vperm C1,C1,C1,LE_MASK -+ vperm C2,C2,C2,LE_MASK -+ vperm C3,C3,C3,LE_MASK -+>) -+ -+ C previous digest combining -+ vxor C0,C0,D -+ -+ C polynomial multiplication -+ vpmsumd F2,H3L,C1 -+ vpmsumd R2,H3M,C1 -+ vpmsumd F3,H2L,C2 -+ vpmsumd R3,H2M,C2 -+ vpmsumd F4,H1L,C3 -+ vpmsumd R4,H1M,C3 -+ vpmsumd F,H4L,C0 -+ vpmsumd R,H4M,C0 -+ -+ C deferred recombination of partial products -+ vxor F3,F3,F4 -+ vxor R3,R3,R4 -+ vxor F,F,F2 -+ vxor R,R,R2 -+ vxor F,F,F3 -+ vxor R,R,R3 -+ -+ C reduction -+ vpmsumd T,F,POLY_L -+ xxswapd VSR(D),VSR(F) -+ vxor R,R,T -+ vxor D,R,D -+ -+ addi DATA,DATA,0x40 -+ bdnz L4x_loop -+ -+ C restore non-volatile vector registers -+ addi 8,SP,-64 -+ lvx 20,0,8 -+ addi 8,8,16 -+ lvx 21,0,8 -+ addi 8,8,16 -+ lvx 22,0,8 -+ addi 8,8,16 -+ lvx 23,0,8 -+ -+ clrldi LENGTH,LENGTH,58 C 'set the high-order 58 bits to zeros' -+L2x: -+ C --- process 2 blocks --- -+ -+ srdi. 7,LENGTH,5 C 'LENGTH / (2 * 16)' -+ beq L1x -+ -+ C load table elements -+ li 8,1*16 -+ li 9,2*16 -+ li 10,3*16 -+ lxvd2x VSR(H1M),0,TABLE -+ lxvd2x VSR(H1L),8,TABLE -+ lxvd2x VSR(H2M),9,TABLE -+ lxvd2x VSR(H2L),10,TABLE -+ -+ C input loading -+ li 10,0x10 -+ lxvd2x VSR(C0),0,DATA C load C0 -+ lxvd2x VSR(C1),10,DATA C load C1 -+ -+IF_LE(< -+ vperm C0,C0,C0,LE_MASK -+ vperm C1,C1,C1,LE_MASK -+>) -+ -+ C previous digest combining -+ vxor C0,C0,D -+ -+ C polynomial multiplication -+ vpmsumd F2,H1L,C1 -+ vpmsumd R2,H1M,C1 -+ vpmsumd F,H2L,C0 -+ vpmsumd R,H2M,C0 -+ -+ C deferred recombination of partial products -+ vxor F,F,F2 -+ vxor R,R,R2 -+ -+ C reduction -+ vpmsumd T,F,POLY_L -+ xxswapd VSR(D),VSR(F) -+ vxor R,R,T -+ vxor D,R,D -+ -+ addi DATA,DATA,0x20 -+ clrldi LENGTH,LENGTH,59 C 'set the high-order 59 bits to zeros' -+L1x: -+ C --- process 1 block --- -+ -+ srdi. 7,LENGTH,4 C 'LENGTH / (1 * 16)' -+ beq Lmod -+ -+ C load table elements -+ li 8,1*16 -+ lxvd2x VSR(H1M),0,TABLE -+ lxvd2x VSR(H1L),8,TABLE -+ -+ C input loading -+ lxvd2x VSR(C0),0,DATA C load C0 -+ -+IF_LE(< -+ vperm C0,C0,C0,LE_MASK -+>) -+ -+ C previous digest combining -+ vxor C0,C0,D -+ -+ C polynomial multiplication -+ vpmsumd F,H1L,C0 -+ vpmsumd R,H1M,C0 -+ -+ C reduction -+ vpmsumd T,F,POLY_L -+ xxswapd VSR(D),VSR(F) -+ vxor R,R,T -+ vxor D,R,D -+ -+ addi DATA,DATA,0x10 -+ clrldi LENGTH,LENGTH,60 C 'set the high-order 60 bits to zeros' -+Lmod: -+ C --- process the modulo bytes, padding the low-order bytes with zeros --- -+ -+ cmpldi LENGTH,0 -+ beq Ldone -+ -+ C load table elements -+ li 8,1*16 -+ lxvd2x VSR(H1M),0,TABLE -+ lxvd2x VSR(H1L),8,TABLE -+ -+ C push every modulo byte to the stack and load them with padding into vector register -+ vxor ZERO,ZERO,ZERO -+ addi 8,SP,-16 -+ stvx ZERO,0,8 -+Lstb_loop: -+ subic. LENGTH,LENGTH,1 -+ lbzx 7,LENGTH,DATA -+ stbx 7,LENGTH,8 -+ bne Lstb_loop -+ lxvd2x VSR(C0),0,8 -+ -+IF_LE(< -+ vperm C0,C0,C0,LE_MASK -+>) -+ -+ C previous digest combining -+ vxor C0,C0,D -+ -+ C polynomial multiplication -+ vpmsumd F,H1L,C0 -+ vpmsumd R,H1M,C0 -+ -+ C reduction -+ vpmsumd T,F,POLY_L -+ xxswapd VSR(D),VSR(F) -+ vxor R,R,T -+ vxor D,R,D -+ -+Ldone: -+ C byte-reverse of each doubleword permuting on little-endian mode -+IF_LE(< -+ vperm D,D,D,LE_MASK -+>) -+ stxvd2x VSR(D),0,X C store digest 'D' -+ -+ blr -+EPILOGUE(_nettle_gcm_hash) -+ -+.data -+ C 0xC2000000000000000000000000000001 -+.polynomial: -+.align 4 -+IF_BE(< -+.byte 0xC2 -+.rept 14 -+.byte 0x00 -+.endr -+.byte 0x01 -+>,< -+.byte 0x01 -+.rept 14 -+.byte 0x00 -+.endr -+.byte 0xC2 -+>) diff --git a/SOURCES/nettle-3.4.1-rsa-decrypt.patch b/SOURCES/nettle-3.4.1-rsa-decrypt.patch deleted file mode 100644 index ecfba91..0000000 --- a/SOURCES/nettle-3.4.1-rsa-decrypt.patch +++ /dev/null @@ -1,609 +0,0 @@ -From 5646ca77ee92de0ae33e7d2e0a3383c61a4091ed Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Niels=20M=C3=B6ller?= -Date: Thu, 6 May 2021 21:30:23 +0200 -Subject: [PATCH 1/4] Add check that message length to _pkcs1_sec_decrypt is - valid. - -* pkcs1-sec-decrypt.c (_pkcs1_sec_decrypt): Check that message -length is valid, for given key size. -* testsuite/rsa-sec-decrypt-test.c (test_main): Add test cases for -calls to rsa_sec_decrypt specifying a too large message length. - -(cherry picked from commit 7616541e6eff73353bf682c62e3a68e4fe696707) ---- - ChangeLog | 8 ++++++++ - pkcs1-sec-decrypt.c | 4 +++- - testsuite/rsa-sec-decrypt-test.c | 17 ++++++++++++++++- - 3 files changed, 27 insertions(+), 2 deletions(-) - -diff --git a/ChangeLog b/ChangeLog -index 4c7338a1..7cd0455e 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -1,3 +1,11 @@ -+2021-05-06 Niels Möller -+ -+ Bug fixes merged from from 3.7.3 release (starting from 2021-05-06). -+ * pkcs1-sec-decrypt.c (_pkcs1_sec_decrypt): Check that message -+ length is valid, for given key size. -+ * testsuite/rsa-sec-decrypt-test.c (test_main): Add test cases for -+ calls to rsa_sec_decrypt specifying a too large message length. -+ - 2018-12-04 Niels Möller - - * Released nettle-3.4.1. -diff --git a/pkcs1-sec-decrypt.c b/pkcs1-sec-decrypt.c -index 722044b0..02fd07e1 100644 ---- a/pkcs1-sec-decrypt.c -+++ b/pkcs1-sec-decrypt.c -@@ -64,7 +64,9 @@ _pkcs1_sec_decrypt (size_t length, uint8_t *message, - volatile int ok; - size_t i, t; - -- assert (padded_message_length >= length); -+ /* Message independent branch */ -+ if (length + 11 > padded_message_length) -+ return 0; - - t = padded_message_length - length - 1; - -diff --git a/testsuite/rsa-sec-decrypt-test.c b/testsuite/rsa-sec-decrypt-test.c -index 64f0b13c..4a9f301b 100644 ---- a/testsuite/rsa-sec-decrypt-test.c -+++ b/testsuite/rsa-sec-decrypt-test.c -@@ -55,6 +55,7 @@ rsa_decrypt_for_test(const struct rsa_public_key *pub, - #endif - - #define PAYLOAD_SIZE 50 -+#define DECRYPTED_SIZE 256 - void - test_main(void) - { -@@ -63,7 +64,7 @@ test_main(void) - struct knuth_lfib_ctx random_ctx; - - uint8_t plaintext[PAYLOAD_SIZE]; -- uint8_t decrypted[PAYLOAD_SIZE]; -+ uint8_t decrypted[DECRYPTED_SIZE]; - uint8_t verifybad[PAYLOAD_SIZE]; - unsigned n_size = 1024; - mpz_t gibberish; -@@ -98,6 +99,20 @@ test_main(void) - PAYLOAD_SIZE, decrypted, gibberish) == 1); - ASSERT (MEMEQ (PAYLOAD_SIZE, plaintext, decrypted)); - -+ ASSERT (pub.size > 10); -+ ASSERT (pub.size <= DECRYPTED_SIZE); -+ -+ /* Check that too large message length is rejected, largest -+ valid size is pub.size - 11. */ -+ ASSERT (!rsa_decrypt_for_test (&pub, &key, &random_ctx, -+ (nettle_random_func *) knuth_lfib_random, -+ pub.size - 10, decrypted, gibberish)); -+ -+ /* This case used to result in arithmetic underflow and a crash. */ -+ ASSERT (!rsa_decrypt_for_test (&pub, &key, &random_ctx, -+ (nettle_random_func *) knuth_lfib_random, -+ pub.size, decrypted, gibberish)); -+ - /* bad one */ - memcpy(decrypted, verifybad, PAYLOAD_SIZE); - nettle_mpz_random_size(garbage, &random_ctx, --- -2.31.1 - - -From 743cdf38353f6dd5d3d91eadc769106cfc116301 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Niels=20M=C3=B6ller?= -Date: Tue, 8 Jun 2021 21:30:48 +0200 -Subject: [PATCH 2/4] Fix comment typos. - -(cherry picked from commit 0a714543136de97c7fd34f1c6ac1592dc5036879) ---- - pkcs1-sec-decrypt.c | 4 ++-- - 1 file changed, 2 insertions(+), 2 deletions(-) - -diff --git a/pkcs1-sec-decrypt.c b/pkcs1-sec-decrypt.c -index 02fd07e1..a7f85c2e 100644 ---- a/pkcs1-sec-decrypt.c -+++ b/pkcs1-sec-decrypt.c -@@ -102,8 +102,8 @@ _pkcs1_sec_decrypt_variable(size_t *length, uint8_t *message, - - /* length is discovered in a side-channel silent way. - * not_found goes to 0 when the terminator is found. -- * offset strts at 3 as it includes the terminator and -- * the fomat bytes already */ -+ * offset starts at 3 as it includes the terminator and -+ * the format bytes already */ - offset = 3; - for (i = 2; i < padded_message_length; i++) - { --- -2.31.1 - - -From dfce46c4540d2abf040073070cff15f9d1708050 Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Niels=20M=C3=B6ller?= -Date: Tue, 8 Jun 2021 21:31:39 +0200 -Subject: [PATCH 3/4] Change _rsa_sec_compute_root_tr to take a fix input size. - -Improves consistency with _rsa_sec_compute_root, and fixes zero-input bug. - -(cherry picked from commit 485b5e2820a057e873b1ba812fdb39cae4adf98c) ---- - ChangeLog | 17 +++++++++- - rsa-decrypt-tr.c | 7 ++--- - rsa-internal.h | 4 +-- - rsa-sec-decrypt.c | 9 ++++-- - rsa-sign-tr.c | 61 +++++++++++++++++------------------- - testsuite/rsa-encrypt-test.c | 14 ++++++++- - 6 files changed, 69 insertions(+), 43 deletions(-) - -diff --git a/ChangeLog b/ChangeLog -index 7cd0455e..ae660fc0 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -1,6 +1,21 @@ --2021-05-06 Niels Möller -+2021-05-14 Niels Möller - - Bug fixes merged from from 3.7.3 release (starting from 2021-05-06). -+ * rsa-sign-tr.c (rsa_sec_blind): Delete mn argument. -+ (_rsa_sec_compute_root_tr): Delete mn argument, instead require -+ that input size matches key size. Rearrange use of temporary -+ storage, to support in-place operation, x == m. Update all -+ callers. -+ -+ * rsa-decrypt-tr.c (rsa_decrypt_tr): Make zero-padded copy of -+ input, for calling _rsa_sec_compute_root_tr. -+ * rsa-sec-decrypt.c (rsa_sec_decrypt): Likewise. -+ -+ * testsuite/rsa-encrypt-test.c (test_main): Test calling all of -+ rsa_decrypt, rsa_decrypt_tr, and rsa_sec_decrypt with zero input. -+ -+2021-05-06 Niels Möller -+ - * pkcs1-sec-decrypt.c (_pkcs1_sec_decrypt): Check that message - length is valid, for given key size. - * testsuite/rsa-sec-decrypt-test.c (test_main): Add test cases for -diff --git a/rsa-decrypt-tr.c b/rsa-decrypt-tr.c -index 5dfb91b1..c118e852 100644 ---- a/rsa-decrypt-tr.c -+++ b/rsa-decrypt-tr.c -@@ -52,14 +52,13 @@ rsa_decrypt_tr(const struct rsa_public_key *pub, - mp_size_t key_limb_size; - int res; - -- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); -+ key_limb_size = mpz_size(pub->n); - - TMP_GMP_ALLOC (m, key_limb_size); - TMP_GMP_ALLOC (em, key->size); -+ mpz_limbs_copy(m, gibberish, key_limb_size); - -- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, -- mpz_limbs_read(gibberish), -- mpz_size(gibberish)); -+ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m); - - mpn_get_base256 (em, key->size, m, key_limb_size); - -diff --git a/rsa-internal.h b/rsa-internal.h -index bd667bc2..64a7edf6 100644 ---- a/rsa-internal.h -+++ b/rsa-internal.h -@@ -53,12 +53,12 @@ _rsa_sec_compute_root(const struct rsa_private_key *key, - mp_limb_t *scratch); - - /* Safe side-channel silent variant, using RSA blinding, and checking the -- * result after CRT. */ -+ * result after CRT. In-place calls, with x == m, is allowed. */ - int - _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, - const struct rsa_private_key *key, - void *random_ctx, nettle_random_func *random, -- mp_limb_t *x, const mp_limb_t *m, size_t mn); -+ mp_limb_t *x, const mp_limb_t *m); - - /* additional resistance to memory access side-channel attacks. - * Note: message buffer is returned unchanged on error */ -diff --git a/rsa-sec-decrypt.c b/rsa-sec-decrypt.c -index e6a4b267..633a6852 100644 ---- a/rsa-sec-decrypt.c -+++ b/rsa-sec-decrypt.c -@@ -57,9 +57,12 @@ rsa_sec_decrypt(const struct rsa_public_key *pub, - TMP_GMP_ALLOC (m, mpz_size(pub->n)); - TMP_GMP_ALLOC (em, key->size); - -- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, -- mpz_limbs_read(gibberish), -- mpz_size(gibberish)); -+ /* We need a copy because m can be shorter than key_size, -+ * but _rsa_sec_compute_root_tr expect all inputs to be -+ * normalized to a key_size long buffer length */ -+ mpz_limbs_copy(m, gibberish, mpz_size(pub->n)); -+ -+ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, m, m); - - mpn_get_base256 (em, key->size, m, mpz_size(pub->n)); - -diff --git a/rsa-sign-tr.c b/rsa-sign-tr.c -index 59c9bd07..141a52c7 100644 ---- a/rsa-sign-tr.c -+++ b/rsa-sign-tr.c -@@ -131,35 +131,34 @@ int - _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, - const struct rsa_private_key *key, - void *random_ctx, nettle_random_func *random, -- mp_limb_t *x, const mp_limb_t *m, size_t mn) -+ mp_limb_t *x, const mp_limb_t *m) - { -+ mp_size_t nn; - mpz_t mz; - mpz_t xz; - int res; - -- mpz_init(mz); - mpz_init(xz); - -- mpn_copyi(mpz_limbs_write(mz, mn), m, mn); -- mpz_limbs_finish(mz, mn); -+ nn = mpz_size (pub->n); - -- res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, mz); -+ res = rsa_compute_root_tr(pub, key, random_ctx, random, xz, -+ mpz_roinit_n(mz, m, nn)); - - if (res) -- mpz_limbs_copy(x, xz, mpz_size(pub->n)); -+ mpz_limbs_copy(x, xz, nn); - -- mpz_clear(mz); - mpz_clear(xz); - return res; - } - #else - /* Blinds m, by computing c = m r^e (mod n), for a random r. Also -- returns the inverse (ri), for use by rsa_unblind. */ -+ returns the inverse (ri), for use by rsa_unblind. Must have c != m, -+ no in-place operation.*/ - static void - rsa_sec_blind (const struct rsa_public_key *pub, - void *random_ctx, nettle_random_func *random, -- mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m, -- mp_size_t mn) -+ mp_limb_t *c, mp_limb_t *ri, const mp_limb_t *m) - { - const mp_limb_t *ep = mpz_limbs_read (pub->e); - const mp_limb_t *np = mpz_limbs_read (pub->n); -@@ -177,15 +176,15 @@ rsa_sec_blind (const struct rsa_public_key *pub, - - /* c = m*(r^e) mod n */ - itch = mpn_sec_powm_itch(nn, ebn, nn); -- i2 = mpn_sec_mul_itch(nn, mn); -+ i2 = mpn_sec_mul_itch(nn, nn); - itch = MAX(itch, i2); -- i2 = mpn_sec_div_r_itch(nn + mn, nn); -+ i2 = mpn_sec_div_r_itch(2*nn, nn); - itch = MAX(itch, i2); - i2 = mpn_sec_invert_itch(nn); - itch = MAX(itch, i2); - -- TMP_GMP_ALLOC (tp, nn + mn + itch); -- scratch = tp + nn + mn; -+ TMP_GMP_ALLOC (tp, 2*nn + itch); -+ scratch = tp + 2*nn; - - /* ri = r^(-1) */ - do -@@ -198,9 +197,8 @@ rsa_sec_blind (const struct rsa_public_key *pub, - while (!mpn_sec_invert (ri, tp, np, nn, 2 * nn * GMP_NUMB_BITS, scratch)); - - mpn_sec_powm (c, rp, nn, ep, ebn, np, nn, scratch); -- /* normally mn == nn, but m can be smaller in some cases */ -- mpn_sec_mul (tp, c, nn, m, mn, scratch); -- mpn_sec_div_r (tp, nn + mn, np, nn, scratch); -+ mpn_sec_mul (tp, c, nn, m, nn, scratch); -+ mpn_sec_div_r (tp, 2*nn, np, nn, scratch); - mpn_copyi(c, tp, nn); - - TMP_GMP_FREE (r); -@@ -208,7 +206,7 @@ rsa_sec_blind (const struct rsa_public_key *pub, - TMP_GMP_FREE (tp); - } - --/* m = c ri mod n */ -+/* m = c ri mod n. Allows x == c. */ - static void - rsa_sec_unblind (const struct rsa_public_key *pub, - mp_limb_t *x, mp_limb_t *ri, const mp_limb_t *c) -@@ -298,7 +296,7 @@ int - _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, - const struct rsa_private_key *key, - void *random_ctx, nettle_random_func *random, -- mp_limb_t *x, const mp_limb_t *m, size_t mn) -+ mp_limb_t *x, const mp_limb_t *m) - { - TMP_GMP_DECL (c, mp_limb_t); - TMP_GMP_DECL (ri, mp_limb_t); -@@ -306,7 +304,7 @@ _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, - size_t key_limb_size; - int ret; - -- key_limb_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); -+ key_limb_size = mpz_size(pub->n); - - /* mpz_powm_sec handles only odd moduli. If p, q or n is even, the - key is invalid and rejected by rsa_private_key_prepare. However, -@@ -320,19 +318,18 @@ _rsa_sec_compute_root_tr(const struct rsa_public_key *pub, - } - - assert(mpz_size(pub->n) == key_limb_size); -- assert(mn <= key_limb_size); - - TMP_GMP_ALLOC (c, key_limb_size); - TMP_GMP_ALLOC (ri, key_limb_size); - TMP_GMP_ALLOC (scratch, _rsa_sec_compute_root_itch(key)); - -- rsa_sec_blind (pub, random_ctx, random, x, ri, m, mn); -+ rsa_sec_blind (pub, random_ctx, random, c, ri, m); - -- _rsa_sec_compute_root(key, c, x, scratch); -+ _rsa_sec_compute_root(key, x, c, scratch); - -- ret = rsa_sec_check_root(pub, c, x); -+ ret = rsa_sec_check_root(pub, x, c); - -- rsa_sec_unblind(pub, x, ri, c); -+ rsa_sec_unblind(pub, x, ri, x); - - cnd_mpn_zero(1 - ret, x, key_limb_size); - -@@ -356,17 +353,17 @@ rsa_compute_root_tr(const struct rsa_public_key *pub, - mpz_t x, const mpz_t m) - { - TMP_GMP_DECL (l, mp_limb_t); -+ mp_size_t nn = mpz_size(pub->n); - int res; - -- mp_size_t l_size = NETTLE_OCTET_SIZE_TO_LIMB_SIZE(key->size); -- TMP_GMP_ALLOC (l, l_size); -+ TMP_GMP_ALLOC (l, nn); -+ mpz_limbs_copy(l, m, nn); - -- res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, -- mpz_limbs_read(m), mpz_size(m)); -+ res = _rsa_sec_compute_root_tr (pub, key, random_ctx, random, l, l); - if (res) { -- mp_limb_t *xp = mpz_limbs_write (x, l_size); -- mpn_copyi (xp, l, l_size); -- mpz_limbs_finish (x, l_size); -+ mp_limb_t *xp = mpz_limbs_write (x, nn); -+ mpn_copyi (xp, l, nn); -+ mpz_limbs_finish (x, nn); - } - - TMP_GMP_FREE (l); -diff --git a/testsuite/rsa-encrypt-test.c b/testsuite/rsa-encrypt-test.c -index 87525f78..d3bc374b 100644 ---- a/testsuite/rsa-encrypt-test.c -+++ b/testsuite/rsa-encrypt-test.c -@@ -19,6 +19,7 @@ test_main(void) - uint8_t after; - - mpz_t gibberish; -+ mpz_t zero; - - rsa_private_key_init(&key); - rsa_public_key_init(&pub); -@@ -101,6 +102,17 @@ test_main(void) - ASSERT(decrypted[decrypted_length] == after); - ASSERT(decrypted[0] == 'A'); - -+ /* Test zero input. */ -+ mpz_init_set_ui (zero, 0); -+ decrypted_length = msg_length; -+ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, zero)); -+ ASSERT(!rsa_decrypt_tr(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ &decrypted_length, decrypted, zero)); -+ ASSERT(!rsa_sec_decrypt(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ decrypted_length, decrypted, zero)); -+ ASSERT(decrypted_length == msg_length); - - /* Test invalid key. */ - mpz_add_ui (key.q, key.q, 2); -@@ -112,6 +124,6 @@ test_main(void) - rsa_private_key_clear(&key); - rsa_public_key_clear(&pub); - mpz_clear(gibberish); -+ mpz_clear(zero); - free(decrypted); - } -- --- -2.31.1 - - -From f601611b3c315aba373c0ab2ddf24772e88c1b3e Mon Sep 17 00:00:00 2001 -From: =?UTF-8?q?Niels=20M=C3=B6ller?= -Date: Tue, 8 Jun 2021 21:32:38 +0200 -Subject: [PATCH 4/4] Add input check to rsa_decrypt family of functions. - -(cherry picked from commit 0ad0b5df315665250dfdaa4a1e087f4799edaefe) ---- - ChangeLog | 10 +++++++++- - rsa-decrypt-tr.c | 4 ++++ - rsa-decrypt.c | 10 ++++++++++ - rsa-sec-decrypt.c | 4 ++++ - rsa.h | 5 +++-- - testsuite/rsa-encrypt-test.c | 38 ++++++++++++++++++++++++++++++------ - 6 files changed, 62 insertions(+), 9 deletions(-) - -diff --git a/ChangeLog b/ChangeLog -index ae660fc0..27f022db 100644 ---- a/ChangeLog -+++ b/ChangeLog -@@ -1,6 +1,14 @@ --2021-05-14 Niels Möller -+2021-05-17 Niels Möller - - Bug fixes merged from from 3.7.3 release (starting from 2021-05-06). -+ * rsa-decrypt-tr.c (rsa_decrypt_tr): Check up-front that input is -+ in range. -+ * rsa-sec-decrypt.c (rsa_sec_decrypt): Likewise. -+ * rsa-decrypt.c (rsa_decrypt): Likewise. -+ * testsuite/rsa-encrypt-test.c (test_main): Add tests with input > n. -+ -+2021-05-14 Niels Möller -+ - * rsa-sign-tr.c (rsa_sec_blind): Delete mn argument. - (_rsa_sec_compute_root_tr): Delete mn argument, instead require - that input size matches key size. Rearrange use of temporary -diff --git a/rsa-decrypt-tr.c b/rsa-decrypt-tr.c -index c118e852..1ba3d286 100644 ---- a/rsa-decrypt-tr.c -+++ b/rsa-decrypt-tr.c -@@ -52,6 +52,10 @@ rsa_decrypt_tr(const struct rsa_public_key *pub, - mp_size_t key_limb_size; - int res; - -+ /* First check that input is in range. */ -+ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0) -+ return 0; -+ - key_limb_size = mpz_size(pub->n); - - TMP_GMP_ALLOC (m, key_limb_size); -diff --git a/rsa-decrypt.c b/rsa-decrypt.c -index 7681439d..540d8baa 100644 ---- a/rsa-decrypt.c -+++ b/rsa-decrypt.c -@@ -48,6 +48,16 @@ rsa_decrypt(const struct rsa_private_key *key, - int res; - - mpz_init(m); -+ -+ /* First check that input is in range. Since we don't have the -+ public key available here, we need to reconstruct n. */ -+ mpz_mul (m, key->p, key->q); -+ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, m) >= 0) -+ { -+ mpz_clear (m); -+ return 0; -+ } -+ - rsa_compute_root(key, m, gibberish); - - res = pkcs1_decrypt (key->size, m, length, message); -diff --git a/rsa-sec-decrypt.c b/rsa-sec-decrypt.c -index 633a6852..53113c69 100644 ---- a/rsa-sec-decrypt.c -+++ b/rsa-sec-decrypt.c -@@ -54,6 +54,10 @@ rsa_sec_decrypt(const struct rsa_public_key *pub, - TMP_GMP_DECL (em, uint8_t); - int res; - -+ /* First check that input is in range. */ -+ if (mpz_sgn (gibberish) < 0 || mpz_cmp (gibberish, pub->n) >= 0) -+ return 0; -+ - TMP_GMP_ALLOC (m, mpz_size(pub->n)); - TMP_GMP_ALLOC (em, key->size); - -diff --git a/rsa.h b/rsa.h -index 0aac6a26..54c35688 100644 ---- a/rsa.h -+++ b/rsa.h -@@ -433,13 +433,14 @@ rsa_sec_decrypt(const struct rsa_public_key *pub, - size_t length, uint8_t *message, - const mpz_t gibberish); - --/* Compute x, the e:th root of m. Calling it with x == m is allowed. */ -+/* Compute x, the e:th root of m. Calling it with x == m is allowed. -+ It is required that 0 <= m < n. */ - void - rsa_compute_root(const struct rsa_private_key *key, - mpz_t x, const mpz_t m); - - /* Safer variant, using RSA blinding, and checking the result after -- CRT. */ -+ CRT. It is required that 0 <= m < n. */ - int - rsa_compute_root_tr(const struct rsa_public_key *pub, - const struct rsa_private_key *key, -diff --git a/testsuite/rsa-encrypt-test.c b/testsuite/rsa-encrypt-test.c -index d3bc374b..d1a440f6 100644 ---- a/testsuite/rsa-encrypt-test.c -+++ b/testsuite/rsa-encrypt-test.c -@@ -19,11 +19,12 @@ test_main(void) - uint8_t after; - - mpz_t gibberish; -- mpz_t zero; -+ mpz_t bad_input; - - rsa_private_key_init(&key); - rsa_public_key_init(&pub); - mpz_init(gibberish); -+ mpz_init(bad_input); - - knuth_lfib_init(&lfib, 17); - -@@ -103,15 +104,40 @@ test_main(void) - ASSERT(decrypted[0] == 'A'); - - /* Test zero input. */ -- mpz_init_set_ui (zero, 0); -+ mpz_set_ui (bad_input, 0); - decrypted_length = msg_length; -- ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, zero)); -+ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); - ASSERT(!rsa_decrypt_tr(&pub, &key, - &lfib, (nettle_random_func *) knuth_lfib_random, -- &decrypted_length, decrypted, zero)); -+ &decrypted_length, decrypted, bad_input)); - ASSERT(!rsa_sec_decrypt(&pub, &key, - &lfib, (nettle_random_func *) knuth_lfib_random, -- decrypted_length, decrypted, zero)); -+ decrypted_length, decrypted, bad_input)); -+ ASSERT(decrypted_length == msg_length); -+ -+ /* Test input that is slightly larger than n */ -+ mpz_add(bad_input, gibberish, pub.n); -+ decrypted_length = msg_length; -+ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); -+ ASSERT(!rsa_decrypt_tr(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ &decrypted_length, decrypted, bad_input)); -+ ASSERT(!rsa_sec_decrypt(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ decrypted_length, decrypted, bad_input)); -+ ASSERT(decrypted_length == msg_length); -+ -+ /* Test input that is considerably larger than n */ -+ mpz_mul_2exp (bad_input, pub.n, 100); -+ mpz_add (bad_input, bad_input, gibberish); -+ decrypted_length = msg_length; -+ ASSERT(!rsa_decrypt(&key, &decrypted_length, decrypted, bad_input)); -+ ASSERT(!rsa_decrypt_tr(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ &decrypted_length, decrypted, bad_input)); -+ ASSERT(!rsa_sec_decrypt(&pub, &key, -+ &lfib, (nettle_random_func *) knuth_lfib_random, -+ decrypted_length, decrypted, bad_input)); - ASSERT(decrypted_length == msg_length); - - /* Test invalid key. */ -@@ -124,6 +150,6 @@ test_main(void) - rsa_private_key_clear(&key); - rsa_public_key_clear(&pub); - mpz_clear(gibberish); -- mpz_clear(zero); -+ mpz_clear(bad_input); - free(decrypted); - } --- -2.31.1 - diff --git a/SPECS/nettle.spec b/SPECS/nettle.spec deleted file mode 100644 index 5bde11d..0000000 --- a/SPECS/nettle.spec +++ /dev/null @@ -1,281 +0,0 @@ -%bcond_without fips - -Name: nettle -Version: 3.4.1 -Release: 7%{?dist} -Summary: A low-level cryptographic library - -Group: Development/Libraries -License: LGPLv3+ or GPLv2+ -URL: http://www.lysator.liu.se/~nisse/nettle/ -Source0: %{name}-%{version}-hobbled.tar.xz -#Source0: http://www.lysator.liu.se/~nisse/archive/%{name}-%{version}.tar.gz -Patch0: nettle-3.3-remove-ecc-testsuite.patch -Patch1: nettle-3.4-annocheck.patch -Patch2: nettle-3.4.1-enable-intel-cet.patch -# https://lists.lysator.liu.se/pipermail/nettle-bugs/2021/009458.html -Patch3: nettle-3.4.1-ecdsa-verify.patch -Patch4: nettle-3.4.1-powerpc64-aes-asm.patch -Patch5: nettle-3.4.1-powerpc64-ghash-asm.patch -Patch6: nettle-3.4.1-rsa-decrypt.patch - -BuildRequires: gcc -BuildRequires: gmp-devel, m4 -BuildRequires: libtool, automake, autoconf, gettext-devel -%if %{with fips} -BuildRequires: fipscheck -%endif - -Requires(post): info -Requires(preun): info - - -%package devel -Summary: Development headers for a low-level cryptographic library -Group: Development/Libraries -Requires: %{name} = %{version}-%{release} -Requires: gmp-devel%{?_isa} - -%description -Nettle is a cryptographic library that is designed to fit easily in more -or less any context: In crypto toolkits for object-oriented languages -(C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in -kernel space. - -%description devel -Nettle is a cryptographic library that is designed to fit easily in more -or less any context: In crypto toolkits for object-oriented languages -(C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in -kernel space. This package contains the files needed for developing -applications with nettle. - - -%prep -%setup -q -# Disable -ggdb3 which makes debugedit unhappy -sed s/ggdb3/g/ -i configure -sed 's/ecc-192.c//g' -i Makefile.in -sed 's/ecc-224.c//g' -i Makefile.in -%patch0 -p1 -%patch1 -p1 -%patch2 -p1 -%patch3 -p1 -%patch4 -p1 -%patch5 -p1 -%patch6 -p1 - -%build -autoreconf -ifv -%configure --enable-shared --enable-fat -make %{?_smp_mflags} - -%if %{with fips} -%define __spec_install_post \ - %{?__debug_package:%{__debug_install_post}} \ - %{__arch_install_post} \ - %{__os_install_post} \ - fipshmac -d $RPM_BUILD_ROOT%{_libdir} $RPM_BUILD_ROOT%{_libdir}/libnettle.so.6.* \ - fipshmac -d $RPM_BUILD_ROOT%{_libdir} $RPM_BUILD_ROOT%{_libdir}/libhogweed.so.4.* \ - file=`basename $RPM_BUILD_ROOT%{_libdir}/libnettle.so.6.*.hmac` && mv $RPM_BUILD_ROOT%{_libdir}/$file $RPM_BUILD_ROOT%{_libdir}/.$file && ln -s .$file $RPM_BUILD_ROOT%{_libdir}/.libnettle.so.6.hmac \ - file=`basename $RPM_BUILD_ROOT%{_libdir}/libhogweed.so.4.*.hmac` && mv $RPM_BUILD_ROOT%{_libdir}/$file $RPM_BUILD_ROOT%{_libdir}/.$file && ln -s .$file $RPM_BUILD_ROOT%{_libdir}/.libhogweed.so.4.hmac \ -%{nil} -%endif - -%install -make install DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" -make install-shared DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" -mkdir -p $RPM_BUILD_ROOT%{_infodir} -install -p -m 644 nettle.info $RPM_BUILD_ROOT%{_infodir}/ -rm -f $RPM_BUILD_ROOT%{_libdir}/*.a -rm -f $RPM_BUILD_ROOT%{_infodir}/dir -rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-lfib-stream -rm -f $RPM_BUILD_ROOT%{_bindir}/pkcs1-conv -rm -f $RPM_BUILD_ROOT%{_bindir}/sexp-conv -rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-hash -rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-pbkdf2 - -chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libnettle.so.6.* -chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libhogweed.so.4.* - -%check -make check - -%files -%doc AUTHORS NEWS README TODO -%license COPYINGv2 COPYING.LESSERv3 -%{_infodir}/nettle.info.gz -%{_libdir}/libnettle.so.6 -%{_libdir}/libnettle.so.6.* -%{_libdir}/libhogweed.so.4 -%{_libdir}/libhogweed.so.4.* -%if %{with fips} -%{_libdir}/.libhogweed.so.*.hmac -%{_libdir}/.libnettle.so.*.hmac -%endif - -%files devel -%doc descore.README nettle.html nettle.pdf -%{_includedir}/nettle -%{_libdir}/libnettle.so -%{_libdir}/libhogweed.so -%{_libdir}/pkgconfig/hogweed.pc -%{_libdir}/pkgconfig/nettle.pc - -%post -/sbin/install-info %{_infodir}/%{name}.info %{_infodir}/dir || : -/sbin/ldconfig - -%preun -if [ $1 = 0 ]; then - /sbin/install-info --delete %{_infodir}/%{name}.info %{_infodir}/dir || : -fi - -%postun -p /sbin/ldconfig - - - -%changelog -* Wed Jul 14 2021 Daiki Ueno - 3.4.1-7 -- Backport CVE-2021-3580 from upstream 3.7.3 release (#1967990) - -* Wed Jul 14 2021 Daiki Ueno - 3.4.1-6 -- Enable CTR mode optimization when the block size is 16 - -* Wed Jun 30 2021 Daiki Ueno - 3.4.1-5 -- Backport powerpc64 optimization patches from upstream (#1855228) - Patch from Christopher M. Riedl. - -* Wed Apr 7 2021 Daiki Ueno - 3.4.1-4 -- Fix patch application - -* Tue Mar 30 2021 Daiki Ueno - 3.4.1-3 -- Port fixes for potential miscalculation in ecdsa_verify (#1942925) - -* Fri May 15 2020 Anderson Sasaki - 3.4.1-2 -- Enable Intel CET support (#1737542) - -* Tue Dec 11 2018 Daiki Ueno - 3.4.1-1 -- New upstream release - -* Tue Oct 16 2018 Tomáš Mráz - 3.4-4 -- Cover the gaps in annotation coverage for assembler sources -- Add .hmac checksums for FIPS mode integrity checking - -* Thu Feb 08 2018 Fedora Release Engineering - 3.4-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Mon Nov 20 2017 Nikos Mavrogiannopoulos - 3.4-1 -- New upstream release - -* Wed Aug 09 2017 Nikos Mavrogiannopoulos - 3.3-5 -- Removed executables from the library to allow parallel installation - of x86-64 and x86 packages. The executables had testing purpose, and - may be re-introduced in a separate package if needed. - -* Thu Aug 03 2017 Fedora Release Engineering - 3.3-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Wed Jul 26 2017 Fedora Release Engineering - 3.3-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Fri Feb 10 2017 Fedora Release Engineering - 3.3-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Tue Jul 19 2016 Nikos Mavrogiannopoulos - 3.3-1 -- New upstream release -- Allow arm neon instructions (they are enabled via fat builds) - -* Tue Jul 19 2016 Nikos Mavrogiannopoulos - 3.2-3 -- Backported a fix for more cache silence on RSA and DSA. - -* Thu Feb 18 2016 Nikos Mavrogiannopoulos - 3.2-2 -- Enabled fat builds by default - -* Wed Feb 3 2016 Nikos Mavrogiannopoulos - 3.2-1 -- updated to 3.2 (#1301310) -- Fixed CVE-2015-8803 secp256r1 calculation bug (#1304305) - -* Wed Dec 9 2015 Nikos Mavrogiannopoulos - 3.1.1-6 -- Made version.h architecture independent (#1289938) - -* Wed Dec 2 2015 Nikos Mavrogiannopoulos - 3.1.1-5 -- Disabled arm-neon unconditionally (#1287298) - -* Thu Oct 22 2015 Nikos Mavrogiannopoulos - 3.1.1-4 -- Fixed SHA3 implementation to conform to published version (#1252935) - -* Sun Aug 2 2015 Peter Robinson 3.1.1-3 -- No need to ship license in devel too -- Drop ChangeLog as details are in NEWS - -* Wed Jun 17 2015 Fedora Release Engineering - 3.1.1-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Thu Apr 30 2015 Nikos Mavrogiannopoulos - 3.1.1-1 -- Updated to nettle 3.1.1 - -* Sat Feb 21 2015 Till Maas - 2.7.1-6 -- Rebuilt for Fedora 23 Change - https://fedoraproject.org/wiki/Changes/Harden_all_packages_with_position-independent_code - -* Sun Aug 17 2014 Fedora Release Engineering - 2.7.1-5 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild - -* Sat Jun 07 2014 Fedora Release Engineering - 2.7.1-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild - -* Fri Jan 10 2014 Nikos Mavrogiannopoulos - 2.7.1-3 -- Corrected bug number in previous comment. - -* Fri Dec 13 2013 Nikos Mavrogiannopoulos - 2.7.1-2 -- Added patch nettle-tmpalloc.patch to solve #1051455 - -* Mon Nov 25 2013 Nikos Mavrogiannopoulos - 2.7.1-1 -- Updated to nettle 2.7.1 - -* Sat Aug 03 2013 Fedora Release Engineering - 2.6-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild - -* Wed Feb 6 2013 Tomáš Mráz - 2.6-2 -- nettle includes use gmp.h - -* Tue Feb 5 2013 Tomáš Mráz - 2.6-1 -- New upstream release - -* Fri Jul 20 2012 Fedora Release Engineering - 2.4-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild - -* Thu Jul 05 2012 David Woodhouse - 2.4-3 -- Remove explicit buildroot handling and defattr. - -* Wed Jul 04 2012 David Woodhouse - 2.4-2 -- Review feedback - -* Mon Jun 18 2012 David Woodhouse - 2.4-1 -- Revive package (GnuTLS needs it), disable static, update to current release 2.4 - -* Sat Jul 25 2009 Fedora Release Engineering - 1.15-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild - -* Wed Feb 25 2009 Fedora Release Engineering - 1.15-6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild - -* Thu Apr 10 2008 Ian Weller 1.15-5 -- Moved static lib to -static - -* Mon Mar 24 2008 Ian Weller 1.15-4 -- Added libraries and ldconfig - -* Mon Feb 18 2008 Ian Weller 1.15-3 -- Added provides -static to -devel - -* Sun Feb 17 2008 Ian Weller 1.15-2 -- Removed redundant requires -- Removed redundant documentation between packages -- Fixed license tag -- Fixed -devel description -- Added the static library back to -devel -- Added make clean - -* Fri Feb 08 2008 Ian Weller 1.15-1 -- First package build. diff --git a/gmp-6.2.1-intel-cet.patch b/gmp-6.2.1-intel-cet.patch new file mode 100644 index 0000000..137b06c --- /dev/null +++ b/gmp-6.2.1-intel-cet.patch @@ -0,0 +1,3515 @@ +From 4faa667ce4e1a318db2c55ce83084cbe4924a892 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Thu, 18 Aug 2022 15:55:31 +0900 +Subject: [PATCH] gmp-intel-cet.patch + +--- + acinclude.m4 | 100 +++++++++++++++++++++++++ + configure.ac | 1 + + mpn/x86/aors_n.asm | 5 +- + mpn/x86/aorsmul_1.asm | 1 + + mpn/x86/atom/sse2/aorsmul_1.asm | 1 + + mpn/x86/atom/sse2/mul_basecase.asm | 1 + + mpn/x86/atom/sse2/sqr_basecase.asm | 1 + + mpn/x86/bdiv_dbm1c.asm | 1 + + mpn/x86/copyd.asm | 1 + + mpn/x86/copyi.asm | 1 + + mpn/x86/divrem_1.asm | 1 + + mpn/x86/divrem_2.asm | 1 + + mpn/x86/k6/aors_n.asm | 1 + + mpn/x86/k6/aorsmul_1.asm | 1 + + mpn/x86/k6/divrem_1.asm | 1 + + mpn/x86/k6/k62mmx/copyd.asm | 1 + + mpn/x86/k6/k62mmx/lshift.asm | 1 + + mpn/x86/k6/k62mmx/rshift.asm | 1 + + mpn/x86/k6/mmx/com.asm | 1 + + mpn/x86/k6/mmx/logops_n.asm | 1 + + mpn/x86/k6/mmx/lshift.asm | 1 + + mpn/x86/k6/mmx/popham.asm | 1 + + mpn/x86/k6/mmx/rshift.asm | 1 + + mpn/x86/k6/mod_34lsub1.asm | 1 + + mpn/x86/k6/mul_1.asm | 1 + + mpn/x86/k6/mul_basecase.asm | 1 + + mpn/x86/k6/pre_mod_1.asm | 1 + + mpn/x86/k6/sqr_basecase.asm | 1 + + mpn/x86/k7/aors_n.asm | 1 + + mpn/x86/k7/mmx/com.asm | 1 + + mpn/x86/k7/mmx/copyd.asm | 1 + + mpn/x86/k7/mmx/copyi.asm | 1 + + mpn/x86/k7/mmx/divrem_1.asm | 1 + + mpn/x86/k7/mmx/lshift.asm | 1 + + mpn/x86/k7/mmx/popham.asm | 1 + + mpn/x86/k7/mmx/rshift.asm | 1 + + mpn/x86/k7/mod_1_1.asm | 1 + + mpn/x86/k7/mod_1_4.asm | 1 + + mpn/x86/k7/mod_34lsub1.asm | 1 + + mpn/x86/k7/mul_basecase.asm | 1 + + mpn/x86/k7/sqr_basecase.asm | 1 + + mpn/x86/lshift.asm | 1 + + mpn/x86/mmx/sec_tabselect.asm | 1 + + mpn/x86/mod_34lsub1.asm | 1 + + mpn/x86/mul_1.asm | 1 + + mpn/x86/mul_basecase.asm | 1 + + mpn/x86/p6/aors_n.asm | 3 +- + mpn/x86/p6/aorsmul_1.asm | 3 +- + mpn/x86/p6/copyd.asm | 1 + + mpn/x86/p6/gcd_11.asm | 1 + + mpn/x86/p6/lshsub_n.asm | 3 +- + mpn/x86/p6/mmx/divrem_1.asm | 1 + + mpn/x86/p6/mod_34lsub1.asm | 1 + + mpn/x86/p6/mul_basecase.asm | 3 +- + mpn/x86/p6/sqr_basecase.asm | 3 +- + mpn/x86/pentium/aors_n.asm | 1 + + mpn/x86/pentium/aorsmul_1.asm | 1 + + mpn/x86/pentium/com.asm | 1 + + mpn/x86/pentium/copyd.asm | 1 + + mpn/x86/pentium/copyi.asm | 1 + + mpn/x86/pentium/logops_n.asm | 1 + + mpn/x86/pentium/lshift.asm | 1 + + mpn/x86/pentium/mmx/lshift.asm | 1 + + mpn/x86/pentium/mmx/mul_1.asm | 1 + + mpn/x86/pentium/mmx/rshift.asm | 1 + + mpn/x86/pentium/mod_34lsub1.asm | 1 + + mpn/x86/pentium/mul_1.asm | 1 + + mpn/x86/pentium/mul_2.asm | 1 + + mpn/x86/pentium/mul_basecase.asm | 1 + + mpn/x86/pentium/rshift.asm | 1 + + mpn/x86/pentium/sqr_basecase.asm | 1 + + mpn/x86/pentium4/copyd.asm | 1 + + mpn/x86/pentium4/copyi.asm | 1 + + mpn/x86/pentium4/mmx/popham.asm | 1 + + mpn/x86/pentium4/sse2/add_n.asm | 1 + + mpn/x86/pentium4/sse2/addlsh1_n.asm | 1 + + mpn/x86/pentium4/sse2/addmul_1.asm | 1 + + mpn/x86/pentium4/sse2/cnd_add_n.asm | 1 + + mpn/x86/pentium4/sse2/cnd_sub_n.asm | 1 + + mpn/x86/pentium4/sse2/divrem_1.asm | 1 + + mpn/x86/pentium4/sse2/mod_1_1.asm | 1 + + mpn/x86/pentium4/sse2/mod_1_4.asm | 1 + + mpn/x86/pentium4/sse2/mod_34lsub1.asm | 1 + + mpn/x86/pentium4/sse2/mul_1.asm | 1 + + mpn/x86/pentium4/sse2/mul_basecase.asm | 1 + + mpn/x86/pentium4/sse2/rsh1add_n.asm | 1 + + mpn/x86/pentium4/sse2/sqr_basecase.asm | 1 + + mpn/x86/pentium4/sse2/sub_n.asm | 1 + + mpn/x86/pentium4/sse2/submul_1.asm | 1 + + mpn/x86/rshift.asm | 1 + + mpn/x86/sec_tabselect.asm | 1 + + mpn/x86/sqr_basecase.asm | 1 + + mpn/x86/udiv.asm | 1 + + mpn/x86/umul.asm | 1 + + mpn/x86/x86-defs.m4 | 7 +- + mpn/x86_64/addaddmul_1msb0.asm | 1 + + mpn/x86_64/aorrlsh1_n.asm | 1 + + mpn/x86_64/aorrlshC_n.asm | 1 + + mpn/x86_64/aorrlsh_n.asm | 1 + + mpn/x86_64/aors_err1_n.asm | 1 + + mpn/x86_64/aors_err2_n.asm | 1 + + mpn/x86_64/aors_err3_n.asm | 1 + + mpn/x86_64/aors_n.asm | 1 + + mpn/x86_64/aorsmul_1.asm | 1 + + mpn/x86_64/atom/addmul_2.asm | 1 + + mpn/x86_64/atom/aorrlsh1_n.asm | 1 + + mpn/x86_64/atom/aorrlsh2_n.asm | 1 + + mpn/x86_64/atom/lshift.asm | 1 + + mpn/x86_64/atom/lshiftc.asm | 1 + + mpn/x86_64/atom/mul_2.asm | 1 + + mpn/x86_64/atom/rsh1aors_n.asm | 1 + + mpn/x86_64/atom/rshift.asm | 1 + + mpn/x86_64/atom/sublsh1_n.asm | 1 + + mpn/x86_64/bd1/addmul_2.asm | 1 + + mpn/x86_64/bd1/hamdist.asm | 1 + + mpn/x86_64/bd1/mul_2.asm | 1 + + mpn/x86_64/bd1/mul_basecase.asm | 1 + + mpn/x86_64/bd1/popcount.asm | 1 + + mpn/x86_64/bd2/gcd_11.asm | 1 + + mpn/x86_64/bd2/gcd_22.asm | 1 + + mpn/x86_64/bd4/gcd_11.asm | 1 + + mpn/x86_64/bdiv_dbm1c.asm | 1 + + mpn/x86_64/bdiv_q_1.asm | 1 + + mpn/x86_64/bt1/aors_n.asm | 1 + + mpn/x86_64/bt1/aorsmul_1.asm | 1 + + mpn/x86_64/bt1/copyd.asm | 1 + + mpn/x86_64/bt1/copyi.asm | 1 + + mpn/x86_64/bt1/gcd_11.asm | 1 + + mpn/x86_64/bt1/mul_1.asm | 1 + + mpn/x86_64/bt1/mul_basecase.asm | 1 + + mpn/x86_64/bt1/sqr_basecase.asm | 1 + + mpn/x86_64/cnd_aors_n.asm | 1 + + mpn/x86_64/com.asm | 1 + + mpn/x86_64/copyd.asm | 1 + + mpn/x86_64/copyi.asm | 1 + + mpn/x86_64/core2/aors_err1_n.asm | 1 + + mpn/x86_64/core2/aors_n.asm | 1 + + mpn/x86_64/core2/aorsmul_1.asm | 1 + + mpn/x86_64/core2/divrem_1.asm | 1 + + mpn/x86_64/core2/gcd_11.asm | 1 + + mpn/x86_64/core2/gcd_22.asm | 1 + + mpn/x86_64/core2/hamdist.asm | 1 + + mpn/x86_64/core2/logops_n.asm | 1 + + mpn/x86_64/core2/lshift.asm | 1 + + mpn/x86_64/core2/lshiftc.asm | 1 + + mpn/x86_64/core2/mul_basecase.asm | 5 ++ + mpn/x86_64/core2/mullo_basecase.asm | 1 + + mpn/x86_64/core2/popcount.asm | 1 + + mpn/x86_64/core2/rsh1aors_n.asm | 1 + + mpn/x86_64/core2/rshift.asm | 1 + + mpn/x86_64/core2/sqr_basecase.asm | 1 + + mpn/x86_64/core2/sublshC_n.asm | 1 + + mpn/x86_64/coreibwl/addmul_1.asm | 24 ++++-- + mpn/x86_64/coreibwl/mul_1.asm | 24 ++++-- + mpn/x86_64/coreibwl/mul_basecase.asm | 47 ++++++++---- + mpn/x86_64/coreibwl/mullo_basecase.asm | 1 + + mpn/x86_64/coreibwl/sqr_basecase.asm | 49 ++++++++---- + mpn/x86_64/coreihwl/addmul_2.asm | 1 + + mpn/x86_64/coreihwl/aors_n.asm | 1 + + mpn/x86_64/coreihwl/aorsmul_1.asm | 1 + + mpn/x86_64/coreihwl/gcd_22.asm | 1 + + mpn/x86_64/coreihwl/mul_2.asm | 1 + + mpn/x86_64/coreihwl/mul_basecase.asm | 1 + + mpn/x86_64/coreihwl/mullo_basecase.asm | 1 + + mpn/x86_64/coreihwl/redc_1.asm | 1 + + mpn/x86_64/coreihwl/sqr_basecase.asm | 1 + + mpn/x86_64/coreinhm/aorrlsh_n.asm | 1 + + mpn/x86_64/coreinhm/hamdist.asm | 1 + + mpn/x86_64/coreinhm/popcount.asm | 1 + + mpn/x86_64/coreisbr/addmul_2.asm | 1 + + mpn/x86_64/coreisbr/aorrlshC_n.asm | 1 + + mpn/x86_64/coreisbr/aorrlsh_n.asm | 1 + + mpn/x86_64/coreisbr/aors_n.asm | 1 + + mpn/x86_64/coreisbr/cnd_add_n.asm | 1 + + mpn/x86_64/coreisbr/cnd_sub_n.asm | 1 + + mpn/x86_64/coreisbr/mul_1.asm | 1 + + mpn/x86_64/coreisbr/mul_2.asm | 1 + + mpn/x86_64/coreisbr/mul_basecase.asm | 1 + + mpn/x86_64/coreisbr/mullo_basecase.asm | 1 + + mpn/x86_64/coreisbr/rsh1aors_n.asm | 1 + + mpn/x86_64/coreisbr/sqr_basecase.asm | 1 + + mpn/x86_64/div_qr_1n_pi1.asm | 1 + + mpn/x86_64/div_qr_2n_pi1.asm | 1 + + mpn/x86_64/div_qr_2u_pi1.asm | 1 + + mpn/x86_64/dive_1.asm | 1 + + mpn/x86_64/divrem_1.asm | 1 + + mpn/x86_64/divrem_2.asm | 1 + + mpn/x86_64/fastavx/copyd.asm | 1 + + mpn/x86_64/fastavx/copyi.asm | 1 + + mpn/x86_64/fastsse/com-palignr.asm | 1 + + mpn/x86_64/fastsse/com.asm | 1 + + mpn/x86_64/fastsse/copyd-palignr.asm | 1 + + mpn/x86_64/fastsse/copyd.asm | 1 + + mpn/x86_64/fastsse/copyi-palignr.asm | 1 + + mpn/x86_64/fastsse/copyi.asm | 1 + + mpn/x86_64/fastsse/lshift-movdqu2.asm | 1 + + mpn/x86_64/fastsse/lshift.asm | 1 + + mpn/x86_64/fastsse/lshiftc-movdqu2.asm | 1 + + mpn/x86_64/fastsse/lshiftc.asm | 1 + + mpn/x86_64/fastsse/rshift-movdqu2.asm | 1 + + mpn/x86_64/fastsse/sec_tabselect.asm | 1 + + mpn/x86_64/fat/fat_entry.asm | 1 + + mpn/x86_64/gcd_11.asm | 1 + + mpn/x86_64/gcd_22.asm | 1 + + mpn/x86_64/k10/gcd_22.asm | 1 + + mpn/x86_64/k10/hamdist.asm | 1 + + mpn/x86_64/k10/popcount.asm | 5 +- + mpn/x86_64/k8/addmul_2.asm | 1 + + mpn/x86_64/k8/aorrlsh_n.asm | 1 + + mpn/x86_64/k8/bdiv_q_1.asm | 1 + + mpn/x86_64/k8/div_qr_1n_pi1.asm | 1 + + mpn/x86_64/k8/mul_basecase.asm | 8 ++ + mpn/x86_64/k8/mullo_basecase.asm | 12 ++- + mpn/x86_64/k8/mulmid_basecase.asm | 9 +++ + mpn/x86_64/k8/redc_1.asm | 18 +++-- + mpn/x86_64/k8/sqr_basecase.asm | 18 +++-- + mpn/x86_64/logops_n.asm | 1 + + mpn/x86_64/lshift.asm | 1 + + mpn/x86_64/lshiftc.asm | 1 + + mpn/x86_64/lshsub_n.asm | 1 + + mpn/x86_64/missing.asm | 1 + + mpn/x86_64/mod_1_2.asm | 1 + + mpn/x86_64/mod_1_4.asm | 1 + + mpn/x86_64/mod_34lsub1.asm | 28 ++++--- + mpn/x86_64/mode1o.asm | 1 + + mpn/x86_64/mul_1.asm | 1 + + mpn/x86_64/mul_2.asm | 1 + + mpn/x86_64/nano/dive_1.asm | 1 + + mpn/x86_64/pentium4/aors_n.asm | 1 + + mpn/x86_64/pentium4/mod_34lsub1.asm | 1 + + mpn/x86_64/pentium4/rsh1aors_n.asm | 1 + + mpn/x86_64/pentium4/rshift.asm | 1 + + mpn/x86_64/popham.asm | 1 + + mpn/x86_64/rsh1aors_n.asm | 1 + + mpn/x86_64/rshift.asm | 1 + + mpn/x86_64/sec_tabselect.asm | 1 + + mpn/x86_64/sqr_diag_addlsh1.asm | 1 + + mpn/x86_64/sublsh1_n.asm | 1 + + mpn/x86_64/x86_64-defs.m4 | 5 ++ + mpn/x86_64/zen/aorrlsh_n.asm | 25 +++++-- + mpn/x86_64/zen/mul_basecase.asm | 1 + + mpn/x86_64/zen/mullo_basecase.asm | 1 + + mpn/x86_64/zen/sbpi1_bdiv_r.asm | 1 + + mpn/x86_64/zen/sqr_basecase.asm | 1 + + 244 files changed, 537 insertions(+), 89 deletions(-) + +diff --git a/acinclude.m4 b/acinclude.m4 +index 86175ce..84e880b 100644 +--- a/acinclude.m4 ++++ b/acinclude.m4 +@@ -3135,6 +3135,106 @@ __sparc_get_pc_thunk.l7: + GMP_DEFINE_RAW(["define(,<$gmp_cv_asm_sparc_shared_thunks>)"]) + ]) + ++dnl GMP_ASM_X86_CET_MACROS(ABI) ++dnl ------------ ++dnl Define ++dnl 1. X86_ENDBR for endbr32/endbr64. ++dnl 2. X86_NOTRACK for notrack prefix. ++dnl 3. X86_GNU_PROPERTY to add a .note.gnu.property section to mark ++dnl Intel CET support if needed. ++dnl .section ".note.gnu.property", "a" ++dnl .p2align POINTER-ALIGN ++dnl .long 1f - 0f ++dnl .long 4f - 1f ++dnl .long 5 ++dnl 0: ++dnl .asciz "GNU" ++dnl 1: ++dnl .p2align POINTER-ALIGN ++dnl .long 0xc0000002 ++dnl .long 3f - 2f ++dnl 2: ++dnl .long 3 ++dnl 3: ++dnl .p2align POINTER-ALIGN ++dnl 4: ++AC_DEFUN([GMP_ASM_X86_CET_MACROS],[ ++dnl AC_REQUIRE([AC_PROG_CC]) GMP uses something else ++AC_CACHE_CHECK([if Intel CET is enabled], ++ gmp_cv_asm_x86_intel_cet, [dnl ++ cat > conftest.c </dev/null]) ++ then ++ gmp_cv_asm_x86_intel_cet=yes ++ else ++ gmp_cv_asm_x86_intel_cet=no ++ fi ++ rm -f conftest*]) ++ if test "$gmp_cv_asm_x86_intel_cet" = yes; then ++ case $1 in ++ 32) ++ endbr=endbr32 ++ p2align=2 ++ ;; ++ 64) ++ endbr=endbr64 ++ p2align=3 ++ ;; ++ x32) ++ endbr=endbr64 ++ p2align=2 ++ ;; ++ esac ++ AC_CACHE_CHECK([if .note.gnu.property section is needed], ++ gmp_cv_asm_x86_gnu_property, [dnl ++ cat > conftest.c </dev/null]) ++ then ++ gmp_cv_asm_x86_gnu_property=yes ++ else ++ gmp_cv_asm_x86_gnu_property=no ++ fi ++ rm -f conftest*]) ++ echo ["define(,<$endbr>)"] >> $gmp_tmpconfigm4 ++ echo ["define(,)"] >> $gmp_tmpconfigm4 ++ else ++ gmp_cv_asm_x86_gnu_property=no ++ echo ["define(,<>)"] >> $gmp_tmpconfigm4 ++ echo ["define(,<>)"] >> $gmp_tmpconfigm4 ++ fi ++ if test "$gmp_cv_asm_x86_gnu_property" = yes; then ++ echo ["define(, < ++ .section \".note.gnu.property\", \"a\" ++ .p2align $p2align ++ .long 1f - 0f ++ .long 4f - 1f ++ .long 5 ++0: ++ .asciz \"GNU\" ++1: ++ .p2align $p2align ++ .long 0xc0000002 ++ .long 3f - 2f ++2: ++ .long 3 ++3: ++ .p2align $p2align ++4:>)"] >> $gmp_tmpconfigm4 ++ else ++ echo ["define(,<>)"] >> $gmp_tmpconfigm4 ++ fi ++]) ++ + + dnl GMP_C_ATTRIBUTE_CONST + dnl --------------------- +diff --git a/configure.ac b/configure.ac +index cafdb3c..0fb8b21 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -3813,6 +3813,7 @@ yes + esac + ;; + esac ++ GMP_ASM_X86_CET_MACROS($ABI) + ;; + esac + fi +diff --git a/mpn/x86/aors_n.asm b/mpn/x86/aors_n.asm +index 5d359f5..7ea7814 100644 +--- a/mpn/x86/aors_n.asm ++++ b/mpn/x86/aors_n.asm +@@ -112,7 +112,7 @@ L(0a): leal (%eax,%eax,8),%eax + shrl %ebp C shift bit 0 into carry + popl %ebp FRAME_popl() + +- jmp *%eax C jump into loop ++ X86_NOTRACK jmp *%eax C jump into loop + + EPILOGUE() + +@@ -153,7 +153,7 @@ L(0b): leal (%eax,%eax,8),%eax + C Calculate start address in loop for non-PIC. + leal L(oop)-3(%eax,%eax,8),%eax + ') +- jmp *%eax C jump into loop ++ X86_NOTRACK jmp *%eax C jump into loop + + L(oopgo): + pushl %ebp FRAME_pushl() +@@ -200,3 +200,4 @@ L(oop): movl (%esi),%eax + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/aorsmul_1.asm b/mpn/x86/aorsmul_1.asm +index 54a8905..0ab1e01 100644 +--- a/mpn/x86/aorsmul_1.asm ++++ b/mpn/x86/aorsmul_1.asm +@@ -154,3 +154,4 @@ L(end): movl %ebx,%eax + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/atom/sse2/aorsmul_1.asm b/mpn/x86/atom/sse2/aorsmul_1.asm +index 969a14a..20658e1 100644 +--- a/mpn/x86/atom/sse2/aorsmul_1.asm ++++ b/mpn/x86/atom/sse2/aorsmul_1.asm +@@ -172,3 +172,4 @@ PROLOGUE(func_1c) + mov 20(%esp), %edx C carry + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/atom/sse2/mul_basecase.asm b/mpn/x86/atom/sse2/mul_basecase.asm +index 97d3aeb..74171aa 100644 +--- a/mpn/x86/atom/sse2/mul_basecase.asm ++++ b/mpn/x86/atom/sse2/mul_basecase.asm +@@ -499,3 +499,4 @@ L(done): + pop %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/atom/sse2/sqr_basecase.asm b/mpn/x86/atom/sse2/sqr_basecase.asm +index af19ed8..0031812 100644 +--- a/mpn/x86/atom/sse2/sqr_basecase.asm ++++ b/mpn/x86/atom/sse2/sqr_basecase.asm +@@ -632,3 +632,4 @@ L(one): pmuludq %mm7, %mm7 + pop %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/bdiv_dbm1c.asm b/mpn/x86/bdiv_dbm1c.asm +index 0288c47..7a3b1a6 100644 +--- a/mpn/x86/bdiv_dbm1c.asm ++++ b/mpn/x86/bdiv_dbm1c.asm +@@ -127,3 +127,4 @@ L(b1): add $-4, %ebp + pop %esi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/copyd.asm b/mpn/x86/copyd.asm +index 51fa195..0e588d9 100644 +--- a/mpn/x86/copyd.asm ++++ b/mpn/x86/copyd.asm +@@ -89,3 +89,4 @@ PROLOGUE(mpn_copyd) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/copyi.asm b/mpn/x86/copyi.asm +index f6b0354..6efbb90 100644 +--- a/mpn/x86/copyi.asm ++++ b/mpn/x86/copyi.asm +@@ -97,3 +97,4 @@ PROLOGUE(mpn_copyi) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/divrem_1.asm b/mpn/x86/divrem_1.asm +index 255d493..b1af920 100644 +--- a/mpn/x86/divrem_1.asm ++++ b/mpn/x86/divrem_1.asm +@@ -231,3 +231,4 @@ deflit(`FRAME',8) + popl %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/divrem_2.asm b/mpn/x86/divrem_2.asm +index 4c38ad0..c2920c2 100644 +--- a/mpn/x86/divrem_2.asm ++++ b/mpn/x86/divrem_2.asm +@@ -197,3 +197,4 @@ L(35): sub 20(%esp), %ebp + movl $1, 32(%esp) + jmp L(8) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/aors_n.asm b/mpn/x86/k6/aors_n.asm +index 168f9b4..257ba59 100644 +--- a/mpn/x86/k6/aors_n.asm ++++ b/mpn/x86/k6/aors_n.asm +@@ -335,3 +335,4 @@ L(inplace_done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/aorsmul_1.asm b/mpn/x86/k6/aorsmul_1.asm +index eaa92eb..78be9d2 100644 +--- a/mpn/x86/k6/aorsmul_1.asm ++++ b/mpn/x86/k6/aorsmul_1.asm +@@ -389,3 +389,4 @@ Zdisp( M4_inst,%ecx, disp0,(%edi)) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/divrem_1.asm b/mpn/x86/k6/divrem_1.asm +index b4cea4f..ca41a3f 100644 +--- a/mpn/x86/k6/divrem_1.asm ++++ b/mpn/x86/k6/divrem_1.asm +@@ -201,3 +201,4 @@ deflit(`FRAME',8) + popl %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/k62mmx/copyd.asm b/mpn/x86/k6/k62mmx/copyd.asm +index f80a5a1..fc329f5 100644 +--- a/mpn/x86/k6/k62mmx/copyd.asm ++++ b/mpn/x86/k6/k62mmx/copyd.asm +@@ -116,3 +116,4 @@ L(zero): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/k62mmx/lshift.asm b/mpn/x86/k6/k62mmx/lshift.asm +index c86575f..728fb5b 100644 +--- a/mpn/x86/k6/k62mmx/lshift.asm ++++ b/mpn/x86/k6/k62mmx/lshift.asm +@@ -292,3 +292,4 @@ deflit(`FRAME',4) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/k62mmx/rshift.asm b/mpn/x86/k6/k62mmx/rshift.asm +index f604a7b..bd673f3 100644 +--- a/mpn/x86/k6/k62mmx/rshift.asm ++++ b/mpn/x86/k6/k62mmx/rshift.asm +@@ -291,3 +291,4 @@ L(finish_even): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mmx/com.asm b/mpn/x86/k6/mmx/com.asm +index b747454..646d16b 100644 +--- a/mpn/x86/k6/mmx/com.asm ++++ b/mpn/x86/k6/mmx/com.asm +@@ -101,3 +101,4 @@ L(no_extra): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mmx/logops_n.asm b/mpn/x86/k6/mmx/logops_n.asm +index e17930b..acfd7df 100644 +--- a/mpn/x86/k6/mmx/logops_n.asm ++++ b/mpn/x86/k6/mmx/logops_n.asm +@@ -224,3 +224,4 @@ L(no_extra): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mmx/lshift.asm b/mpn/x86/k6/mmx/lshift.asm +index 45be582..eee1eb8 100644 +--- a/mpn/x86/k6/mmx/lshift.asm ++++ b/mpn/x86/k6/mmx/lshift.asm +@@ -128,3 +128,4 @@ L(top): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mmx/popham.asm b/mpn/x86/k6/mmx/popham.asm +index 2b19d0b..efeb1b4 100644 +--- a/mpn/x86/k6/mmx/popham.asm ++++ b/mpn/x86/k6/mmx/popham.asm +@@ -234,3 +234,4 @@ HAM(` nop C code alignment') + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mmx/rshift.asm b/mpn/x86/k6/mmx/rshift.asm +index cd0382f..ae53711 100644 +--- a/mpn/x86/k6/mmx/rshift.asm ++++ b/mpn/x86/k6/mmx/rshift.asm +@@ -128,3 +128,4 @@ Zdisp( movd, %mm0, 0,(%ecx,%eax,4)) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mod_34lsub1.asm b/mpn/x86/k6/mod_34lsub1.asm +index 7e30503..05f8979 100644 +--- a/mpn/x86/k6/mod_34lsub1.asm ++++ b/mpn/x86/k6/mod_34lsub1.asm +@@ -188,3 +188,4 @@ L(combine): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mul_1.asm b/mpn/x86/k6/mul_1.asm +index 3ef7ec2..2139f36 100644 +--- a/mpn/x86/k6/mul_1.asm ++++ b/mpn/x86/k6/mul_1.asm +@@ -290,3 +290,4 @@ L(finish_not_one): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/mul_basecase.asm b/mpn/x86/k6/mul_basecase.asm +index 7030001..ab202a2 100644 +--- a/mpn/x86/k6/mul_basecase.asm ++++ b/mpn/x86/k6/mul_basecase.asm +@@ -610,3 +610,4 @@ Zdisp( addl, %ecx, disp0,(%edi)) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/pre_mod_1.asm b/mpn/x86/k6/pre_mod_1.asm +index 34db20d..1e4cb17 100644 +--- a/mpn/x86/k6/pre_mod_1.asm ++++ b/mpn/x86/k6/pre_mod_1.asm +@@ -144,3 +144,4 @@ L(q1_ff): + + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k6/sqr_basecase.asm b/mpn/x86/k6/sqr_basecase.asm +index b7ecb5c..f3a101a 100644 +--- a/mpn/x86/k6/sqr_basecase.asm ++++ b/mpn/x86/k6/sqr_basecase.asm +@@ -678,3 +678,4 @@ L(pic_calc): + + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/aors_n.asm b/mpn/x86/k7/aors_n.asm +index 1a08072..bfdf3d4 100644 +--- a/mpn/x86/k7/aors_n.asm ++++ b/mpn/x86/k7/aors_n.asm +@@ -256,3 +256,4 @@ L(even): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/com.asm b/mpn/x86/k7/mmx/com.asm +index a258c22..cf48fac 100644 +--- a/mpn/x86/k7/mmx/com.asm ++++ b/mpn/x86/k7/mmx/com.asm +@@ -123,3 +123,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/copyd.asm b/mpn/x86/k7/mmx/copyd.asm +index 59ece40..3bc9ff8 100644 +--- a/mpn/x86/k7/mmx/copyd.asm ++++ b/mpn/x86/k7/mmx/copyd.asm +@@ -142,3 +142,4 @@ L(done): + + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/copyi.asm b/mpn/x86/k7/mmx/copyi.asm +index 9a28f92..f0648fa 100644 +--- a/mpn/x86/k7/mmx/copyi.asm ++++ b/mpn/x86/k7/mmx/copyi.asm +@@ -155,3 +155,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/divrem_1.asm b/mpn/x86/k7/mmx/divrem_1.asm +index cf34328..370bfbb 100644 +--- a/mpn/x86/k7/mmx/divrem_1.asm ++++ b/mpn/x86/k7/mmx/divrem_1.asm +@@ -830,3 +830,4 @@ L(fraction_entry): + jmp L(fraction_done) + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/lshift.asm b/mpn/x86/k7/mmx/lshift.asm +index b3383cf..4140e82 100644 +--- a/mpn/x86/k7/mmx/lshift.asm ++++ b/mpn/x86/k7/mmx/lshift.asm +@@ -479,3 +479,4 @@ L(end_even_unaligned): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/popham.asm b/mpn/x86/k7/mmx/popham.asm +index 95965b7..f29540a 100644 +--- a/mpn/x86/k7/mmx/popham.asm ++++ b/mpn/x86/k7/mmx/popham.asm +@@ -211,3 +211,4 @@ L(loaded): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mmx/rshift.asm b/mpn/x86/k7/mmx/rshift.asm +index 345d23a..0da1f93 100644 +--- a/mpn/x86/k7/mmx/rshift.asm ++++ b/mpn/x86/k7/mmx/rshift.asm +@@ -478,3 +478,4 @@ L(end_even_unaligned): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mod_1_1.asm b/mpn/x86/k7/mod_1_1.asm +index 1bbe6f9..8da9519 100644 +--- a/mpn/x86/k7/mod_1_1.asm ++++ b/mpn/x86/k7/mod_1_1.asm +@@ -219,3 +219,4 @@ PROLOGUE(mpn_mod_1_1p_cps) + pop %ebp + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mod_1_4.asm b/mpn/x86/k7/mod_1_4.asm +index bb7597e..fe1da5b 100644 +--- a/mpn/x86/k7/mod_1_4.asm ++++ b/mpn/x86/k7/mod_1_4.asm +@@ -258,3 +258,4 @@ C CAUTION: This is the same code as in pentium4/sse2/mod_1_4.asm + pop %ebp + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mod_34lsub1.asm b/mpn/x86/k7/mod_34lsub1.asm +index ee3ad04..0c1b8c8 100644 +--- a/mpn/x86/k7/mod_34lsub1.asm ++++ b/mpn/x86/k7/mod_34lsub1.asm +@@ -186,3 +186,4 @@ L(combine): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/mul_basecase.asm b/mpn/x86/k7/mul_basecase.asm +index 4dfb500..b96fda7 100644 +--- a/mpn/x86/k7/mul_basecase.asm ++++ b/mpn/x86/k7/mul_basecase.asm +@@ -600,3 +600,4 @@ deflit(`disp1', eval(disp0-0 + 4)) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/k7/sqr_basecase.asm b/mpn/x86/k7/sqr_basecase.asm +index 7b6a97e..df47ee4 100644 +--- a/mpn/x86/k7/sqr_basecase.asm ++++ b/mpn/x86/k7/sqr_basecase.asm +@@ -633,3 +633,4 @@ L(diag): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/lshift.asm b/mpn/x86/lshift.asm +index 6ee6153..95f5321 100644 +--- a/mpn/x86/lshift.asm ++++ b/mpn/x86/lshift.asm +@@ -104,3 +104,4 @@ L(end): shll %cl,%ebx C compute least significant limb + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/mmx/sec_tabselect.asm b/mpn/x86/mmx/sec_tabselect.asm +index aae158a..543dec1 100644 +--- a/mpn/x86/mmx/sec_tabselect.asm ++++ b/mpn/x86/mmx/sec_tabselect.asm +@@ -161,3 +161,4 @@ L(b00): pop %ebp + emms + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/mod_34lsub1.asm b/mpn/x86/mod_34lsub1.asm +index e09e702..df52d37 100644 +--- a/mpn/x86/mod_34lsub1.asm ++++ b/mpn/x86/mod_34lsub1.asm +@@ -181,3 +181,4 @@ L(combine): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/mul_1.asm b/mpn/x86/mul_1.asm +index 421de62..dbbc0e3 100644 +--- a/mpn/x86/mul_1.asm ++++ b/mpn/x86/mul_1.asm +@@ -138,3 +138,4 @@ L(end): movl %ebx,%eax + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/mul_basecase.asm b/mpn/x86/mul_basecase.asm +index 8339732..c32fd7e 100644 +--- a/mpn/x86/mul_basecase.asm ++++ b/mpn/x86/mul_basecase.asm +@@ -221,3 +221,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/aors_n.asm b/mpn/x86/p6/aors_n.asm +index df51c2e..ab172df 100644 +--- a/mpn/x86/p6/aors_n.asm ++++ b/mpn/x86/p6/aors_n.asm +@@ -90,7 +90,7 @@ L(here): + ') + + shr %edx C set cy flag +- jmp *%eax ++ X86_NOTRACK jmp *%eax + + ifdef(`PIC',` + L(pic_calc): +@@ -154,3 +154,4 @@ PROLOGUE(func_nc) + movl 20(%esp), %edx + jmp L(start) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/aorsmul_1.asm b/mpn/x86/p6/aorsmul_1.asm +index bc8c49c..2a3b122 100644 +--- a/mpn/x86/p6/aorsmul_1.asm ++++ b/mpn/x86/p6/aorsmul_1.asm +@@ -240,7 +240,7 @@ L(here): + cmovnz( %ebx, %ecx) C high,low carry other way around + cmovnz( %eax, %ebx) + +- jmp *%edx ++ X86_NOTRACK jmp *%edx + + + ifdef(`PIC',` +@@ -318,3 +318,4 @@ deflit(`disp0', eval(UNROLL_BYTES ifelse(UNROLL_BYTES,256,-128))) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/copyd.asm b/mpn/x86/p6/copyd.asm +index 1be7636..bd42da1 100644 +--- a/mpn/x86/p6/copyd.asm ++++ b/mpn/x86/p6/copyd.asm +@@ -176,3 +176,4 @@ L(zero): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/gcd_11.asm b/mpn/x86/p6/gcd_11.asm +index 80e055e..a7fc6a8 100644 +--- a/mpn/x86/p6/gcd_11.asm ++++ b/mpn/x86/p6/gcd_11.asm +@@ -81,3 +81,4 @@ L(end): mov %edx, %eax + pop %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/lshsub_n.asm b/mpn/x86/p6/lshsub_n.asm +index 7ada213..17db5d5 100644 +--- a/mpn/x86/p6/lshsub_n.asm ++++ b/mpn/x86/p6/lshsub_n.asm +@@ -82,7 +82,7 @@ L(here): + pxor %mm1, %mm1 + pxor %mm0, %mm0 + +- jmp *%eax ++ X86_NOTRACK jmp *%eax + + ifdef(`PIC',` + L(pic_calc): +@@ -167,3 +167,4 @@ L(ent): mov 0(up,n,4), %eax + jmp L(top) + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/mmx/divrem_1.asm b/mpn/x86/p6/mmx/divrem_1.asm +index 5300616..b6057dd 100644 +--- a/mpn/x86/p6/mmx/divrem_1.asm ++++ b/mpn/x86/p6/mmx/divrem_1.asm +@@ -765,3 +765,4 @@ L(fraction_top): + jmp L(fraction_done) + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/mod_34lsub1.asm b/mpn/x86/p6/mod_34lsub1.asm +index b88ab5d..46b3806 100644 +--- a/mpn/x86/p6/mod_34lsub1.asm ++++ b/mpn/x86/p6/mod_34lsub1.asm +@@ -188,3 +188,4 @@ L(done_0): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/mul_basecase.asm b/mpn/x86/p6/mul_basecase.asm +index d87bc12..521b31e 100644 +--- a/mpn/x86/p6/mul_basecase.asm ++++ b/mpn/x86/p6/mul_basecase.asm +@@ -524,7 +524,7 @@ L(unroll_outer_entry): + xorl %eax, %ebx C carries other way for odd index + xorl %eax, %ecx + +- jmp *%edx ++ X86_NOTRACK jmp *%edx + + + C ----------------------------------------------------------------------------- +@@ -605,3 +605,4 @@ deflit(`disp1', eval(disp0 + 4)) + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/p6/sqr_basecase.asm b/mpn/x86/p6/sqr_basecase.asm +index 8fc7fdf..f71304f 100644 +--- a/mpn/x86/p6/sqr_basecase.asm ++++ b/mpn/x86/p6/sqr_basecase.asm +@@ -447,7 +447,7 @@ define(cmovX,`ifelse(eval(UNROLL_COUNT%2),1,`cmovz($@)',`cmovnz($@)')') + cmovX( %ebx, %ecx) C high carry reverse + cmovX( %eax, %ebx) C low carry reverse + movl %edx, VAR_JMP +- jmp *%edx ++ X86_NOTRACK jmp *%edx + + + C Must be on an even address here so the low bit of the jump address +@@ -647,3 +647,4 @@ L(pic_calc): + + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/aors_n.asm b/mpn/x86/pentium/aors_n.asm +index 01ebfb9..ca124a5 100644 +--- a/mpn/x86/pentium/aors_n.asm ++++ b/mpn/x86/pentium/aors_n.asm +@@ -201,3 +201,4 @@ L(end2): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/aorsmul_1.asm b/mpn/x86/pentium/aorsmul_1.asm +index d83cc45..5cec8b3 100644 +--- a/mpn/x86/pentium/aorsmul_1.asm ++++ b/mpn/x86/pentium/aorsmul_1.asm +@@ -142,3 +142,4 @@ L(top): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/com.asm b/mpn/x86/pentium/com.asm +index b080545..00064ff 100644 +--- a/mpn/x86/pentium/com.asm ++++ b/mpn/x86/pentium/com.asm +@@ -179,3 +179,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/copyd.asm b/mpn/x86/pentium/copyd.asm +index 72a543b..c7f74b5 100644 +--- a/mpn/x86/pentium/copyd.asm ++++ b/mpn/x86/pentium/copyd.asm +@@ -144,3 +144,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/copyi.asm b/mpn/x86/pentium/copyi.asm +index d983d6b..bc7744e 100644 +--- a/mpn/x86/pentium/copyi.asm ++++ b/mpn/x86/pentium/copyi.asm +@@ -162,3 +162,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/logops_n.asm b/mpn/x86/pentium/logops_n.asm +index 1877317..41a9477 100644 +--- a/mpn/x86/pentium/logops_n.asm ++++ b/mpn/x86/pentium/logops_n.asm +@@ -174,3 +174,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/lshift.asm b/mpn/x86/pentium/lshift.asm +index 2a31f36..68cba52 100644 +--- a/mpn/x86/pentium/lshift.asm ++++ b/mpn/x86/pentium/lshift.asm +@@ -241,3 +241,4 @@ L(L1): movl %edx,(%edi) C store last limb + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mmx/lshift.asm b/mpn/x86/pentium/mmx/lshift.asm +index 04b0ddc..9e18c86 100644 +--- a/mpn/x86/pentium/mmx/lshift.asm ++++ b/mpn/x86/pentium/mmx/lshift.asm +@@ -461,3 +461,4 @@ L(finish_zero_unaligned): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mmx/mul_1.asm b/mpn/x86/pentium/mmx/mul_1.asm +index 4ced577..b04a718 100644 +--- a/mpn/x86/pentium/mmx/mul_1.asm ++++ b/mpn/x86/pentium/mmx/mul_1.asm +@@ -369,3 +369,4 @@ L(small_done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mmx/rshift.asm b/mpn/x86/pentium/mmx/rshift.asm +index e3b274b..5493d20 100644 +--- a/mpn/x86/pentium/mmx/rshift.asm ++++ b/mpn/x86/pentium/mmx/rshift.asm +@@ -466,3 +466,4 @@ L(finish_zero_unaligned): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mod_34lsub1.asm b/mpn/x86/pentium/mod_34lsub1.asm +index 2d88223..0945de8 100644 +--- a/mpn/x86/pentium/mod_34lsub1.asm ++++ b/mpn/x86/pentium/mod_34lsub1.asm +@@ -190,3 +190,4 @@ L(combine): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mul_1.asm b/mpn/x86/pentium/mul_1.asm +index a0858af..2c49130 100644 +--- a/mpn/x86/pentium/mul_1.asm ++++ b/mpn/x86/pentium/mul_1.asm +@@ -175,3 +175,4 @@ L(top): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mul_2.asm b/mpn/x86/pentium/mul_2.asm +index 4c7beb5..e94e071 100644 +--- a/mpn/x86/pentium/mul_2.asm ++++ b/mpn/x86/pentium/mul_2.asm +@@ -148,3 +148,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/mul_basecase.asm b/mpn/x86/pentium/mul_basecase.asm +index e1d0f05..ff269bb 100644 +--- a/mpn/x86/pentium/mul_basecase.asm ++++ b/mpn/x86/pentium/mul_basecase.asm +@@ -140,3 +140,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/rshift.asm b/mpn/x86/pentium/rshift.asm +index 2105c4c..d98080d 100644 +--- a/mpn/x86/pentium/rshift.asm ++++ b/mpn/x86/pentium/rshift.asm +@@ -241,3 +241,4 @@ L(L1): movl %edx,(%edi) C store last limb + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium/sqr_basecase.asm b/mpn/x86/pentium/sqr_basecase.asm +index b11d767..ee64eb3 100644 +--- a/mpn/x86/pentium/sqr_basecase.asm ++++ b/mpn/x86/pentium/sqr_basecase.asm +@@ -526,3 +526,4 @@ L(diag): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/copyd.asm b/mpn/x86/pentium4/copyd.asm +index 82af81c..bf06a05 100644 +--- a/mpn/x86/pentium4/copyd.asm ++++ b/mpn/x86/pentium4/copyd.asm +@@ -69,3 +69,4 @@ L(end): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/copyi.asm b/mpn/x86/pentium4/copyi.asm +index b614887..acbb3f4 100644 +--- a/mpn/x86/pentium4/copyi.asm ++++ b/mpn/x86/pentium4/copyi.asm +@@ -91,3 +91,4 @@ L(replmovs): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/mmx/popham.asm b/mpn/x86/pentium4/mmx/popham.asm +index 9563cb5..f7a6124 100644 +--- a/mpn/x86/pentium4/mmx/popham.asm ++++ b/mpn/x86/pentium4/mmx/popham.asm +@@ -201,3 +201,4 @@ L(loaded): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/add_n.asm b/mpn/x86/pentium4/sse2/add_n.asm +index 8e2380e..e329635 100644 +--- a/mpn/x86/pentium4/sse2/add_n.asm ++++ b/mpn/x86/pentium4/sse2/add_n.asm +@@ -99,3 +99,4 @@ L(top): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/addlsh1_n.asm b/mpn/x86/pentium4/sse2/addlsh1_n.asm +index 93b63b2..e801f7b 100644 +--- a/mpn/x86/pentium4/sse2/addlsh1_n.asm ++++ b/mpn/x86/pentium4/sse2/addlsh1_n.asm +@@ -106,3 +106,4 @@ L(top): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/addmul_1.asm b/mpn/x86/pentium4/sse2/addmul_1.asm +index 7810207..62a7675 100644 +--- a/mpn/x86/pentium4/sse2/addmul_1.asm ++++ b/mpn/x86/pentium4/sse2/addmul_1.asm +@@ -187,3 +187,4 @@ PROLOGUE(mpn_addmul_1c) + movd 20(%esp), %mm6 + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/cnd_add_n.asm b/mpn/x86/pentium4/sse2/cnd_add_n.asm +index b3f3474..7183b94 100644 +--- a/mpn/x86/pentium4/sse2/cnd_add_n.asm ++++ b/mpn/x86/pentium4/sse2/cnd_add_n.asm +@@ -93,3 +93,4 @@ L(top): movd (%ebx,%ecx,4), %mm2 + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/cnd_sub_n.asm b/mpn/x86/pentium4/sse2/cnd_sub_n.asm +index 339a23e..ba0fc47 100644 +--- a/mpn/x86/pentium4/sse2/cnd_sub_n.asm ++++ b/mpn/x86/pentium4/sse2/cnd_sub_n.asm +@@ -112,3 +112,4 @@ L(done_mm1): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/divrem_1.asm b/mpn/x86/pentium4/sse2/divrem_1.asm +index 0146fab..d8619e0 100644 +--- a/mpn/x86/pentium4/sse2/divrem_1.asm ++++ b/mpn/x86/pentium4/sse2/divrem_1.asm +@@ -643,3 +643,4 @@ L(fraction_top): + jmp L(fraction_done) + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/mod_1_1.asm b/mpn/x86/pentium4/sse2/mod_1_1.asm +index ee88bab..2e5a514 100644 +--- a/mpn/x86/pentium4/sse2/mod_1_1.asm ++++ b/mpn/x86/pentium4/sse2/mod_1_1.asm +@@ -164,3 +164,4 @@ C CAUTION: This is the same code as in k7/mod_1_1.asm + pop %ebp + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/mod_1_4.asm b/mpn/x86/pentium4/sse2/mod_1_4.asm +index eb2edb6..5ef3c4a 100644 +--- a/mpn/x86/pentium4/sse2/mod_1_4.asm ++++ b/mpn/x86/pentium4/sse2/mod_1_4.asm +@@ -267,3 +267,4 @@ C CAUTION: This is the same code as in k7/mod_1_4.asm + pop %ebp + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/mod_34lsub1.asm b/mpn/x86/pentium4/sse2/mod_34lsub1.asm +index 31e25b7..5b6b9a7 100644 +--- a/mpn/x86/pentium4/sse2/mod_34lsub1.asm ++++ b/mpn/x86/pentium4/sse2/mod_34lsub1.asm +@@ -173,3 +173,4 @@ L(combine): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/mul_1.asm b/mpn/x86/pentium4/sse2/mul_1.asm +index 6347b8b..9e4f3fc 100644 +--- a/mpn/x86/pentium4/sse2/mul_1.asm ++++ b/mpn/x86/pentium4/sse2/mul_1.asm +@@ -162,3 +162,4 @@ PROLOGUE(mpn_mul_1c) + movd 20(%esp), %mm6 + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/mul_basecase.asm b/mpn/x86/pentium4/sse2/mul_basecase.asm +index 6e3775a..0bad756 100644 +--- a/mpn/x86/pentium4/sse2/mul_basecase.asm ++++ b/mpn/x86/pentium4/sse2/mul_basecase.asm +@@ -660,3 +660,4 @@ L(oel3): + pop %esi C 3 + ret C 3 + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/rsh1add_n.asm b/mpn/x86/pentium4/sse2/rsh1add_n.asm +index f421d13..543a637 100644 +--- a/mpn/x86/pentium4/sse2/rsh1add_n.asm ++++ b/mpn/x86/pentium4/sse2/rsh1add_n.asm +@@ -124,3 +124,4 @@ L(done): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/sqr_basecase.asm b/mpn/x86/pentium4/sse2/sqr_basecase.asm +index 2dd57d2..9695d42 100644 +--- a/mpn/x86/pentium4/sse2/sqr_basecase.asm ++++ b/mpn/x86/pentium4/sse2/sqr_basecase.asm +@@ -703,3 +703,4 @@ L(diag): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/sub_n.asm b/mpn/x86/pentium4/sse2/sub_n.asm +index 5ba1c01..2cd5b22 100644 +--- a/mpn/x86/pentium4/sse2/sub_n.asm ++++ b/mpn/x86/pentium4/sse2/sub_n.asm +@@ -117,3 +117,4 @@ L(done_mm1): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/pentium4/sse2/submul_1.asm b/mpn/x86/pentium4/sse2/submul_1.asm +index 020675b..1172f0a 100644 +--- a/mpn/x86/pentium4/sse2/submul_1.asm ++++ b/mpn/x86/pentium4/sse2/submul_1.asm +@@ -180,3 +180,4 @@ L(eod): paddq %mm6, %mm4 C add 0xFFFFFFFE00000001 + movd %mm0, 8(%edx) C result + jmp L(rt) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/rshift.asm b/mpn/x86/rshift.asm +index a60dcaa..1cedc0d 100644 +--- a/mpn/x86/rshift.asm ++++ b/mpn/x86/rshift.asm +@@ -106,3 +106,4 @@ L(end): shrl %cl,%ebx C compute most significant limb + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/sec_tabselect.asm b/mpn/x86/sec_tabselect.asm +index c7c2e05..3a8fa17 100644 +--- a/mpn/x86/sec_tabselect.asm ++++ b/mpn/x86/sec_tabselect.asm +@@ -113,3 +113,4 @@ L(outer_end): + pop %edi + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/sqr_basecase.asm b/mpn/x86/sqr_basecase.asm +index 39f8a89..3414b05 100644 +--- a/mpn/x86/sqr_basecase.asm ++++ b/mpn/x86/sqr_basecase.asm +@@ -357,3 +357,4 @@ L(diag): + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/udiv.asm b/mpn/x86/udiv.asm +index a3ee088..2531ef7 100644 +--- a/mpn/x86/udiv.asm ++++ b/mpn/x86/udiv.asm +@@ -50,3 +50,4 @@ deflit(`FRAME',0) + movl %edx, (%ecx) + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/umul.asm b/mpn/x86/umul.asm +index 34fe434..5c1da35 100644 +--- a/mpn/x86/umul.asm ++++ b/mpn/x86/umul.asm +@@ -49,3 +49,4 @@ deflit(`FRAME',0) + movl %edx, %eax + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86/x86-defs.m4 b/mpn/x86/x86-defs.m4 +index 81309b2..b3520d2 100644 +--- a/mpn/x86/x86-defs.m4 ++++ b/mpn/x86/x86-defs.m4 +@@ -123,6 +123,7 @@ m4_assert_defined(`WANT_PROFILING') + TYPE($1,`function') + COFF_TYPE($1) + $1: ++ X86_ENDBR + ifelse(WANT_PROFILING,`prof', ` call_mcount') + ifelse(WANT_PROFILING,`gprof', ` call_mcount') + ifelse(WANT_PROFILING,`instrument',` call_instrument(enter)') +@@ -992,7 +993,11 @@ L(movl_eip_`'substr($2,1)): + + dnl ASM_END + +-define(`ASM_END',`load_eip') ++define(`ASM_END', ++`load_eip ++X86_GNU_PROPERTY ++') ++ + + define(`load_eip', `') dnl updated in LEA/LEAL + +diff --git a/mpn/x86_64/addaddmul_1msb0.asm b/mpn/x86_64/addaddmul_1msb0.asm +index 87c21b4..2d03ddb 100644 +--- a/mpn/x86_64/addaddmul_1msb0.asm ++++ b/mpn/x86_64/addaddmul_1msb0.asm +@@ -168,3 +168,4 @@ L(end): cmp $1, R32(n) + pop %r12 + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aorrlsh1_n.asm b/mpn/x86_64/aorrlsh1_n.asm +index 6ee0872..1441a6c 100644 +--- a/mpn/x86_64/aorrlsh1_n.asm ++++ b/mpn/x86_64/aorrlsh1_n.asm +@@ -168,3 +168,4 @@ ifdef(`OPERATION_rsblsh1_n',` + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aorrlshC_n.asm b/mpn/x86_64/aorrlshC_n.asm +index de00154..691abde 100644 +--- a/mpn/x86_64/aorrlshC_n.asm ++++ b/mpn/x86_64/aorrlshC_n.asm +@@ -170,3 +170,4 @@ ifelse(ADDSUB,add,` + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aorrlsh_n.asm b/mpn/x86_64/aorrlsh_n.asm +index 5ca128f..57f0e77 100644 +--- a/mpn/x86_64/aorrlsh_n.asm ++++ b/mpn/x86_64/aorrlsh_n.asm +@@ -174,3 +174,4 @@ L(end): add R32(%rbx), R32(%rbx) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aors_err1_n.asm b/mpn/x86_64/aors_err1_n.asm +index 54d0b3f..8c42ea1 100644 +--- a/mpn/x86_64/aors_err1_n.asm ++++ b/mpn/x86_64/aors_err1_n.asm +@@ -223,3 +223,4 @@ L(end): + pop %rbx + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aors_err2_n.asm b/mpn/x86_64/aors_err2_n.asm +index ce5c2a4..0227e5d 100644 +--- a/mpn/x86_64/aors_err2_n.asm ++++ b/mpn/x86_64/aors_err2_n.asm +@@ -170,3 +170,4 @@ L(end): + pop %rbx + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aors_err3_n.asm b/mpn/x86_64/aors_err3_n.asm +index bb6d0c5..37047db 100644 +--- a/mpn/x86_64/aors_err3_n.asm ++++ b/mpn/x86_64/aors_err3_n.asm +@@ -154,3 +154,4 @@ L(end): + pop %rbx + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aors_n.asm b/mpn/x86_64/aors_n.asm +index d5a314a..b516c4d 100644 +--- a/mpn/x86_64/aors_n.asm ++++ b/mpn/x86_64/aors_n.asm +@@ -176,3 +176,4 @@ L(end): lea 32(up), up + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/aorsmul_1.asm b/mpn/x86_64/aorsmul_1.asm +index dfe4dc4..e3bb2f9 100644 +--- a/mpn/x86_64/aorsmul_1.asm ++++ b/mpn/x86_64/aorsmul_1.asm +@@ -188,3 +188,4 @@ IFDOS(``pop %rdi '') + IFDOS(``pop %rsi '') + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/addmul_2.asm b/mpn/x86_64/atom/addmul_2.asm +index c1dcdc4..c1d9451 100644 +--- a/mpn/x86_64/atom/addmul_2.asm ++++ b/mpn/x86_64/atom/addmul_2.asm +@@ -184,3 +184,4 @@ L(end): mul v1 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/aorrlsh1_n.asm b/mpn/x86_64/atom/aorrlsh1_n.asm +index f44de19..693a302 100644 +--- a/mpn/x86_64/atom/aorrlsh1_n.asm ++++ b/mpn/x86_64/atom/aorrlsh1_n.asm +@@ -236,3 +236,4 @@ IFDOS(` mov 56(%rsp), %r8 ') + sbb R32(%rbp), R32(%rbp) C save acy + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/aorrlsh2_n.asm b/mpn/x86_64/atom/aorrlsh2_n.asm +index 02fb29d..c6ded74 100644 +--- a/mpn/x86_64/atom/aorrlsh2_n.asm ++++ b/mpn/x86_64/atom/aorrlsh2_n.asm +@@ -189,3 +189,4 @@ ifdef(`OPERATION_rsblsh2_n',` + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/lshift.asm b/mpn/x86_64/atom/lshift.asm +index 1b37d5d..894b912 100644 +--- a/mpn/x86_64/atom/lshift.asm ++++ b/mpn/x86_64/atom/lshift.asm +@@ -121,3 +121,4 @@ L(end): shl R8(%rcx), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/lshiftc.asm b/mpn/x86_64/atom/lshiftc.asm +index 7385f8f..40d8fff 100644 +--- a/mpn/x86_64/atom/lshiftc.asm ++++ b/mpn/x86_64/atom/lshiftc.asm +@@ -125,3 +125,4 @@ L(end): shl R8(%rcx), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/mul_2.asm b/mpn/x86_64/atom/mul_2.asm +index 4bc22cd..87414d9 100644 +--- a/mpn/x86_64/atom/mul_2.asm ++++ b/mpn/x86_64/atom/mul_2.asm +@@ -188,3 +188,4 @@ L(end): mul v1 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/rsh1aors_n.asm b/mpn/x86_64/atom/rsh1aors_n.asm +index 6f5f638..f3952c0 100644 +--- a/mpn/x86_64/atom/rsh1aors_n.asm ++++ b/mpn/x86_64/atom/rsh1aors_n.asm +@@ -285,3 +285,4 @@ L(cj1): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/rshift.asm b/mpn/x86_64/atom/rshift.asm +index 29c027d..f4c59e1 100644 +--- a/mpn/x86_64/atom/rshift.asm ++++ b/mpn/x86_64/atom/rshift.asm +@@ -119,3 +119,4 @@ L(end): shr R8(cnt), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/atom/sublsh1_n.asm b/mpn/x86_64/atom/sublsh1_n.asm +index 1306acd..762e1ee 100644 +--- a/mpn/x86_64/atom/sublsh1_n.asm ++++ b/mpn/x86_64/atom/sublsh1_n.asm +@@ -240,3 +240,4 @@ IFDOS(` mov 56(%rsp), %r8 ') + sbb R32(%rbp), R32(%rbp) C save acy + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd1/addmul_2.asm b/mpn/x86_64/bd1/addmul_2.asm +index b54e91a..b1c149b 100644 +--- a/mpn/x86_64/bd1/addmul_2.asm ++++ b/mpn/x86_64/bd1/addmul_2.asm +@@ -233,3 +233,4 @@ L(end): mul v0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd1/hamdist.asm b/mpn/x86_64/bd1/hamdist.asm +index 29e78a3..f93ce4d 100644 +--- a/mpn/x86_64/bd1/hamdist.asm ++++ b/mpn/x86_64/bd1/hamdist.asm +@@ -204,3 +204,4 @@ DEF_OBJECT(L(cnsts),16,`JUMPTABSECT') + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + END_OBJECT(L(cnsts)) + ') ++ASM_END() +diff --git a/mpn/x86_64/bd1/mul_2.asm b/mpn/x86_64/bd1/mul_2.asm +index 85fa7aa..e910cee 100644 +--- a/mpn/x86_64/bd1/mul_2.asm ++++ b/mpn/x86_64/bd1/mul_2.asm +@@ -193,3 +193,4 @@ L(end): mov -8(up), %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd1/mul_basecase.asm b/mpn/x86_64/bd1/mul_basecase.asm +index e47ba58..ebae74d 100644 +--- a/mpn/x86_64/bd1/mul_basecase.asm ++++ b/mpn/x86_64/bd1/mul_basecase.asm +@@ -414,3 +414,4 @@ L(ret2):pop %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd1/popcount.asm b/mpn/x86_64/bd1/popcount.asm +index 28ce461..063c2cc 100644 +--- a/mpn/x86_64/bd1/popcount.asm ++++ b/mpn/x86_64/bd1/popcount.asm +@@ -189,3 +189,4 @@ DEF_OBJECT(L(cnsts),16,`JUMPTABSECT') + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + END_OBJECT(L(cnsts)) + ') ++ASM_END() +diff --git a/mpn/x86_64/bd2/gcd_11.asm b/mpn/x86_64/bd2/gcd_11.asm +index b167077..3d1c788 100644 +--- a/mpn/x86_64/bd2/gcd_11.asm ++++ b/mpn/x86_64/bd2/gcd_11.asm +@@ -94,3 +94,4 @@ L(end): mov v0, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd2/gcd_22.asm b/mpn/x86_64/bd2/gcd_22.asm +index 070cb3e..491f0d9 100644 +--- a/mpn/x86_64/bd2/gcd_22.asm ++++ b/mpn/x86_64/bd2/gcd_22.asm +@@ -140,3 +140,4 @@ L(end): C mov v0, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bd4/gcd_11.asm b/mpn/x86_64/bd4/gcd_11.asm +index 4176b85..d172e32 100644 +--- a/mpn/x86_64/bd4/gcd_11.asm ++++ b/mpn/x86_64/bd4/gcd_11.asm +@@ -94,3 +94,4 @@ L(end): C rax = result + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bdiv_dbm1c.asm b/mpn/x86_64/bdiv_dbm1c.asm +index a53bd52..c383ee3 100644 +--- a/mpn/x86_64/bdiv_dbm1c.asm ++++ b/mpn/x86_64/bdiv_dbm1c.asm +@@ -104,3 +104,4 @@ L(lo1): sub %rax, %r8 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bdiv_q_1.asm b/mpn/x86_64/bdiv_q_1.asm +index 85538c9..c983c7f 100644 +--- a/mpn/x86_64/bdiv_q_1.asm ++++ b/mpn/x86_64/bdiv_q_1.asm +@@ -193,3 +193,4 @@ L(one): shr R8(%rcx), %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/aors_n.asm b/mpn/x86_64/bt1/aors_n.asm +index 9b6b5c7..04d81dd 100644 +--- a/mpn/x86_64/bt1/aors_n.asm ++++ b/mpn/x86_64/bt1/aors_n.asm +@@ -157,3 +157,4 @@ PROLOGUE(func_nc) + IFDOS(` mov 56(%rsp), %r8 ') + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/aorsmul_1.asm b/mpn/x86_64/bt1/aorsmul_1.asm +index 41e1d8a..d309321 100644 +--- a/mpn/x86_64/bt1/aorsmul_1.asm ++++ b/mpn/x86_64/bt1/aorsmul_1.asm +@@ -189,3 +189,4 @@ IFDOS(` pop %rdi ') + IFDOS(` pop %rsi ') + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/copyd.asm b/mpn/x86_64/bt1/copyd.asm +index 877714e..23fb80b 100644 +--- a/mpn/x86_64/bt1/copyd.asm ++++ b/mpn/x86_64/bt1/copyd.asm +@@ -89,3 +89,4 @@ L(end): cmp $-4, R32(n) + L(ret): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/copyi.asm b/mpn/x86_64/bt1/copyi.asm +index ee0f578..25718e6 100644 +--- a/mpn/x86_64/bt1/copyi.asm ++++ b/mpn/x86_64/bt1/copyi.asm +@@ -92,3 +92,4 @@ L(end): cmp $4, R32(n) + L(ret): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/gcd_11.asm b/mpn/x86_64/bt1/gcd_11.asm +index ef53392..03bc06d 100644 +--- a/mpn/x86_64/bt1/gcd_11.asm ++++ b/mpn/x86_64/bt1/gcd_11.asm +@@ -117,3 +117,4 @@ L(count_better): + bsf u0, cnt + jmp L(shr) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/mul_1.asm b/mpn/x86_64/bt1/mul_1.asm +index 4394d6e..634cb35 100644 +--- a/mpn/x86_64/bt1/mul_1.asm ++++ b/mpn/x86_64/bt1/mul_1.asm +@@ -239,3 +239,4 @@ IFDOS(` pop %rdi ') + IFDOS(` pop %rsi ') + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/mul_basecase.asm b/mpn/x86_64/bt1/mul_basecase.asm +index e7d46bf..1726190 100644 +--- a/mpn/x86_64/bt1/mul_basecase.asm ++++ b/mpn/x86_64/bt1/mul_basecase.asm +@@ -484,3 +484,4 @@ L(ret): pop %r13 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/bt1/sqr_basecase.asm b/mpn/x86_64/bt1/sqr_basecase.asm +index 0e417a1..8f665d1 100644 +--- a/mpn/x86_64/bt1/sqr_basecase.asm ++++ b/mpn/x86_64/bt1/sqr_basecase.asm +@@ -563,3 +563,4 @@ L(esd): add %rbx, w0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/cnd_aors_n.asm b/mpn/x86_64/cnd_aors_n.asm +index 13a2ab3..b720ecb 100644 +--- a/mpn/x86_64/cnd_aors_n.asm ++++ b/mpn/x86_64/cnd_aors_n.asm +@@ -181,3 +181,4 @@ L(end): neg R32(%rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/com.asm b/mpn/x86_64/com.asm +index 006acaf..ec72e19 100644 +--- a/mpn/x86_64/com.asm ++++ b/mpn/x86_64/com.asm +@@ -93,3 +93,4 @@ L(e10): movq 24(up,n,8), %r9 + L(ret): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/copyd.asm b/mpn/x86_64/copyd.asm +index a5e6e59..02ab53f 100644 +--- a/mpn/x86_64/copyd.asm ++++ b/mpn/x86_64/copyd.asm +@@ -91,3 +91,4 @@ L(end): shr R32(n) + mov %r9, -16(rp) + 1: ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/copyi.asm b/mpn/x86_64/copyi.asm +index bafce7a..8c6dbdc 100644 +--- a/mpn/x86_64/copyi.asm ++++ b/mpn/x86_64/copyi.asm +@@ -90,3 +90,4 @@ L(end): shr R32(n) + mov %r9, 16(rp) + 1: ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/aors_err1_n.asm b/mpn/x86_64/core2/aors_err1_n.asm +index 3f875ae..c9c6c36 100644 +--- a/mpn/x86_64/core2/aors_err1_n.asm ++++ b/mpn/x86_64/core2/aors_err1_n.asm +@@ -223,3 +223,4 @@ L(end): + pop %rbx + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/aors_n.asm b/mpn/x86_64/core2/aors_n.asm +index f9e0039..7981b7f 100644 +--- a/mpn/x86_64/core2/aors_n.asm ++++ b/mpn/x86_64/core2/aors_n.asm +@@ -148,3 +148,4 @@ PROLOGUE(func_nc) + IFDOS(` mov 56(%rsp), %r8 ') + jmp L(start) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/aorsmul_1.asm b/mpn/x86_64/core2/aorsmul_1.asm +index a7a5d6e..b2b067a 100644 +--- a/mpn/x86_64/core2/aorsmul_1.asm ++++ b/mpn/x86_64/core2/aorsmul_1.asm +@@ -186,3 +186,4 @@ L(n1): mov 8(rp), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/divrem_1.asm b/mpn/x86_64/core2/divrem_1.asm +index 1b3f139..d41c494 100644 +--- a/mpn/x86_64/core2/divrem_1.asm ++++ b/mpn/x86_64/core2/divrem_1.asm +@@ -241,3 +241,4 @@ L(ret): pop %rbx + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/gcd_11.asm b/mpn/x86_64/core2/gcd_11.asm +index b00451f..b730a55 100644 +--- a/mpn/x86_64/core2/gcd_11.asm ++++ b/mpn/x86_64/core2/gcd_11.asm +@@ -91,3 +91,4 @@ L(end): C rax = result + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/gcd_22.asm b/mpn/x86_64/core2/gcd_22.asm +index b5aa73b..0ccde8a 100644 +--- a/mpn/x86_64/core2/gcd_22.asm ++++ b/mpn/x86_64/core2/gcd_22.asm +@@ -135,3 +135,4 @@ L(end): C mov v0, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/hamdist.asm b/mpn/x86_64/core2/hamdist.asm +index a78753d..be451d7 100644 +--- a/mpn/x86_64/core2/hamdist.asm ++++ b/mpn/x86_64/core2/hamdist.asm +@@ -208,3 +208,4 @@ DEF_OBJECT(L(cnsts),16,`JUMPTABSECT') + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + END_OBJECT(L(cnsts)) ++ASM_END() +diff --git a/mpn/x86_64/core2/logops_n.asm b/mpn/x86_64/core2/logops_n.asm +index 5ff174c..451d556 100644 +--- a/mpn/x86_64/core2/logops_n.asm ++++ b/mpn/x86_64/core2/logops_n.asm +@@ -283,3 +283,4 @@ L(ret): FUNC_EXIT() + ret + EPILOGUE() + ') ++ASM_END() +diff --git a/mpn/x86_64/core2/lshift.asm b/mpn/x86_64/core2/lshift.asm +index 9016a71..62053c2 100644 +--- a/mpn/x86_64/core2/lshift.asm ++++ b/mpn/x86_64/core2/lshift.asm +@@ -143,3 +143,4 @@ L(1): shl R8(cnt), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/lshiftc.asm b/mpn/x86_64/core2/lshiftc.asm +index c428f13..cdd4e11 100644 +--- a/mpn/x86_64/core2/lshiftc.asm ++++ b/mpn/x86_64/core2/lshiftc.asm +@@ -157,3 +157,4 @@ L(1): shl R8(cnt), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/mul_basecase.asm b/mpn/x86_64/core2/mul_basecase.asm +index d16be85..0dcf0f8 100644 +--- a/mpn/x86_64/core2/mul_basecase.asm ++++ b/mpn/x86_64/core2/mul_basecase.asm +@@ -347,6 +347,7 @@ L(m2e0):mul v1 + jz L(ret2) + + L(do_am0): ++ X86_ENDBR + push %r15 + push vn_param + +@@ -520,6 +521,7 @@ L(m2e1):mul v1 + jz L(ret2) + + L(do_am1): ++ X86_ENDBR + push %r15 + push vn_param + +@@ -693,6 +695,7 @@ L(m2e2):mul v1 + jz L(ret2) + + L(do_am2): ++ X86_ENDBR + push %r15 + push vn_param + +@@ -866,6 +869,7 @@ L(m2e3):mul v1 + jz L(ret2) + + L(do_am3): ++ X86_ENDBR + push %r15 + push vn_param + +@@ -973,3 +977,4 @@ L(lo3): mul v0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/mullo_basecase.asm b/mpn/x86_64/core2/mullo_basecase.asm +index 0f03d86..11814d5 100644 +--- a/mpn/x86_64/core2/mullo_basecase.asm ++++ b/mpn/x86_64/core2/mullo_basecase.asm +@@ -425,3 +425,4 @@ L(n3): mov (vp_param), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/popcount.asm b/mpn/x86_64/core2/popcount.asm +index 39d8c5d..5e03ef3 100644 +--- a/mpn/x86_64/core2/popcount.asm ++++ b/mpn/x86_64/core2/popcount.asm +@@ -183,3 +183,4 @@ DEF_OBJECT(L(cnsts),16,`JUMPTABSECT') + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + .byte 0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f,0x0f + END_OBJECT(L(cnsts)) ++ASM_END() +diff --git a/mpn/x86_64/core2/rsh1aors_n.asm b/mpn/x86_64/core2/rsh1aors_n.asm +index 27eed37..5b4fe7e 100644 +--- a/mpn/x86_64/core2/rsh1aors_n.asm ++++ b/mpn/x86_64/core2/rsh1aors_n.asm +@@ -167,3 +167,4 @@ L(end): shrd $1, %rbx, %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/rshift.asm b/mpn/x86_64/core2/rshift.asm +index 7578a53..86cc804 100644 +--- a/mpn/x86_64/core2/rshift.asm ++++ b/mpn/x86_64/core2/rshift.asm +@@ -141,3 +141,4 @@ L(1): shr R8(cnt), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/sqr_basecase.asm b/mpn/x86_64/core2/sqr_basecase.asm +index a112c1b..65286b0 100644 +--- a/mpn/x86_64/core2/sqr_basecase.asm ++++ b/mpn/x86_64/core2/sqr_basecase.asm +@@ -982,3 +982,4 @@ L(n3): mov %rax, %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/core2/sublshC_n.asm b/mpn/x86_64/core2/sublshC_n.asm +index 272700d..e30562b 100644 +--- a/mpn/x86_64/core2/sublshC_n.asm ++++ b/mpn/x86_64/core2/sublshC_n.asm +@@ -156,3 +156,4 @@ L(end): shr $RSH, %r11 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreibwl/addmul_1.asm b/mpn/x86_64/coreibwl/addmul_1.asm +index ee7e4ee..4ea5580 100644 +--- a/mpn/x86_64/coreibwl/addmul_1.asm ++++ b/mpn/x86_64/coreibwl/addmul_1.asm +@@ -110,33 +110,39 @@ L(tab): JMPENT( L(f0), L(tab)) + JMPENT( L(f7), L(tab)) + TEXT + +-L(f0): mulx( (up), %r10, %r8) ++L(f0): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea -8(up), up + lea -8(rp), rp + lea -1(n), n + jmp L(b0) + +-L(f3): mulx( (up), %r9, %rax) ++L(f3): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea 16(up), up + lea -48(rp), rp + jmp L(b3) + +-L(f4): mulx( (up), %r10, %r8) ++L(f4): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 24(up), up + lea -40(rp), rp + jmp L(b4) + +-L(f5): mulx( (up), %r9, %rax) ++L(f5): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea 32(up), up + lea -32(rp), rp + jmp L(b5) + +-L(f6): mulx( (up), %r10, %r8) ++L(f6): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 40(up), up + lea -24(rp), rp + jmp L(b6) + +-L(f1): mulx( (up), %r9, %rax) ++L(f1): X86_ENDBR ++ mulx( (up), %r9, %rax) + jrcxz L(1) + jmp L(b1) + L(1): add (rp), %r9 +@@ -156,7 +162,8 @@ ifdef(`PIC', + ` nop;nop;nop;nop', + ` nop;nop;nop;nop;nop;nop;nop;nop;nop;nop;nop') + +-L(f2): mulx( (up), %r10, %r8) ++L(f2): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 8(up), up + lea 8(rp), rp + mulx( (up), %r9, %rax) +@@ -200,7 +207,8 @@ L(b3): adox( 48,(rp), %r9) + mulx( (up), %r9, %rax) + jmp L(top) + +-L(f7): mulx( (up), %r9, %rax) ++L(f7): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea -16(up), up + lea -16(rp), rp + jmp L(b7) +diff --git a/mpn/x86_64/coreibwl/mul_1.asm b/mpn/x86_64/coreibwl/mul_1.asm +index b7fae2f..77121a5 100644 +--- a/mpn/x86_64/coreibwl/mul_1.asm ++++ b/mpn/x86_64/coreibwl/mul_1.asm +@@ -108,48 +108,56 @@ L(tab): JMPENT( L(f0), L(tab)) + JMPENT( L(f7), L(tab)) + TEXT + +-L(f0): mulx( (up), %r10, %r8) ++L(f0): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 56(up), up + lea -8(rp), rp + jmp L(b0) + +-L(f3): mulx( (up), %r9, %rax) ++L(f3): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea 16(up), up + lea 16(rp), rp + inc n + jmp L(b3) + +-L(f4): mulx( (up), %r10, %r8) ++L(f4): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 24(up), up + lea 24(rp), rp + inc n + jmp L(b4) + +-L(f5): mulx( (up), %r9, %rax) ++L(f5): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea 32(up), up + lea 32(rp), rp + inc n + jmp L(b5) + +-L(f6): mulx( (up), %r10, %r8) ++L(f6): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 40(up), up + lea 40(rp), rp + inc n + jmp L(b6) + +-L(f7): mulx( (up), %r9, %rax) ++L(f7): X86_ENDBR ++ mulx( (up), %r9, %rax) + lea 48(up), up + lea 48(rp), rp + inc n + jmp L(b7) + +-L(f1): mulx( (up), %r9, %rax) ++L(f1): X86_ENDBR ++ mulx( (up), %r9, %rax) + test n, n + jnz L(b1) + L(1): mov %r9, (rp) + ret + +-L(f2): mulx( (up), %r10, %r8) ++L(f2): X86_ENDBR ++ mulx( (up), %r10, %r8) + lea 8(up), up + lea 8(rp), rp + mulx( (up), %r9, %rax) +diff --git a/mpn/x86_64/coreibwl/mul_basecase.asm b/mpn/x86_64/coreibwl/mul_basecase.asm +index 42ca976..c5e60e7 100644 +--- a/mpn/x86_64/coreibwl/mul_basecase.asm ++++ b/mpn/x86_64/coreibwl/mul_basecase.asm +@@ -157,45 +157,53 @@ ifdef(`PIC', + jmp *(%r10,%rax,8) + ') + +-L(mf0): mulx( (up), w2, w3) ++L(mf0): X86_ENDBR ++ mulx( (up), w2, w3) + lea 56(up), up + lea -8(rp), rp + jmp L(mb0) + +-L(mf3): mulx( (up), w0, w1) ++L(mf3): X86_ENDBR ++ mulx( (up), w0, w1) + lea 16(up), up + lea 16(rp), rp + inc n + jmp L(mb3) + +-L(mf4): mulx( (up), w2, w3) ++L(mf4): X86_ENDBR ++ mulx( (up), w2, w3) + lea 24(up), up + lea 24(rp), rp + inc n + jmp L(mb4) + +-L(mf5): mulx( (up), w0, w1) ++L(mf5): X86_ENDBR ++ mulx( (up), w0, w1) + lea 32(up), up + lea 32(rp), rp + inc n + jmp L(mb5) + +-L(mf6): mulx( (up), w2, w3) ++L(mf6): X86_ENDBR ++ mulx( (up), w2, w3) + lea 40(up), up + lea 40(rp), rp + inc n + jmp L(mb6) + +-L(mf7): mulx( (up), w0, w1) ++L(mf7): X86_ENDBR ++ mulx( (up), w0, w1) + lea 48(up), up + lea 48(rp), rp + inc n + jmp L(mb7) + +-L(mf1): mulx( (up), w0, w1) ++L(mf1): X86_ENDBR ++ mulx( (up), w0, w1) + jmp L(mb1) + +-L(mf2): mulx( (up), w2, w3) ++L(mf2): X86_ENDBR ++ mulx( (up), w2, w3) + lea 8(up), up + lea 8(rp), rp + mulx( (up), w0, w1) +@@ -256,32 +264,39 @@ L(outer): + lea 8(vp), vp + jmp *jaddr + +-L(f0): mulx( 8,(up), w2, w3) ++L(f0): X86_ENDBR ++ mulx( 8,(up), w2, w3) + lea 8(rp,unneg,8), rp + lea -1(n), n + jmp L(b0) + +-L(f3): mulx( -16,(up), w0, w1) ++L(f3): X86_ENDBR ++ mulx( -16,(up), w0, w1) + lea -56(rp,unneg,8), rp + jmp L(b3) + +-L(f4): mulx( -24,(up), w2, w3) ++L(f4): X86_ENDBR ++ mulx( -24,(up), w2, w3) + lea -56(rp,unneg,8), rp + jmp L(b4) + +-L(f5): mulx( -32,(up), w0, w1) ++L(f5): X86_ENDBR ++ mulx( -32,(up), w0, w1) + lea -56(rp,unneg,8), rp + jmp L(b5) + +-L(f6): mulx( -40,(up), w2, w3) ++L(f6): X86_ENDBR ++ mulx( -40,(up), w2, w3) + lea -56(rp,unneg,8), rp + jmp L(b6) + +-L(f7): mulx( 16,(up), w0, w1) ++L(f7): X86_ENDBR ++ mulx( 16,(up), w0, w1) + lea 8(rp,unneg,8), rp + jmp L(b7) + +-L(f1): mulx( (up), w0, w1) ++L(f1): X86_ENDBR ++ mulx( (up), w0, w1) + lea 8(rp,unneg,8), rp + jmp L(b1) + +@@ -303,6 +318,7 @@ L(done): + ret + + L(f2): ++ X86_ENDBR + mulx( -8,(up), w2, w3) + lea 8(rp,unneg,8), rp + mulx( (up), w0, w1) +@@ -367,3 +383,4 @@ L(atab):JMPENT( L(f0), L(atab)) + JMPENT( L(f7), L(atab)) + TEXT + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreibwl/mullo_basecase.asm b/mpn/x86_64/coreibwl/mullo_basecase.asm +index 5cdb209..b3e435b 100644 +--- a/mpn/x86_64/coreibwl/mullo_basecase.asm ++++ b/mpn/x86_64/coreibwl/mullo_basecase.asm +@@ -393,3 +393,4 @@ L(mtab):JMPENT( L(mf7), L(mtab)) + JMPENT( L(mf4), L(mtab)) + JMPENT( L(mf5), L(mtab)) + JMPENT( L(mf6), L(mtab)) ++ASM_END() +diff --git a/mpn/x86_64/coreibwl/sqr_basecase.asm b/mpn/x86_64/coreibwl/sqr_basecase.asm +index e81b01b..cd523cf 100644 +--- a/mpn/x86_64/coreibwl/sqr_basecase.asm ++++ b/mpn/x86_64/coreibwl/sqr_basecase.asm +@@ -181,14 +181,16 @@ ifdef(`PIC', + jmp *(%r10,%rax,8) + ') + +-L(mf0): mulx( u0, w0, w1) C up[0]^2 ++L(mf0): X86_ENDBR ++ mulx( u0, w0, w1) C up[0]^2 + add u0, u0 + mulx( 8,(up), w2, w3) + lea 64(up), up + add w1, w2 + jmp L(mb0) + +-L(mf3): mulx( u0, w2, w3) C up[0]^2 ++L(mf3): X86_ENDBR ++ mulx( u0, w2, w3) C up[0]^2 + add u0, u0 + mov w2, (rp) + mulx( 8,(up), w0, w1) +@@ -197,7 +199,8 @@ L(mf3): mulx( u0, w2, w3) C up[0]^2 + add w3, w0 + jmp L(mb3) + +-L(mf4): mulx( u0, w0, w1) C up[0]^2 ++L(mf4): X86_ENDBR ++ mulx( u0, w0, w1) C up[0]^2 + add u0, u0 + mulx( 8,(up), w2, w3) + mov w0, (rp) +@@ -206,7 +209,8 @@ L(mf4): mulx( u0, w0, w1) C up[0]^2 + add w1, w2 + jmp L(mb4) + +-L(mf5): mulx( u0, w2, w3) C up[0]^2 ++L(mf5): X86_ENDBR ++ mulx( u0, w2, w3) C up[0]^2 + add u0, u0 + mulx( 8,(up), w0, w1) + mov w2, (rp) +@@ -215,7 +219,8 @@ L(mf5): mulx( u0, w2, w3) C up[0]^2 + add w3, w0 + jmp L(mb5) + +-L(mf6): mulx( u0, w0, w1) C up[0]^2 ++L(mf6): X86_ENDBR ++ mulx( u0, w0, w1) C up[0]^2 + add u0, u0 + mulx( 8,(up), w2, w3) + mov w0, (rp) +@@ -224,7 +229,8 @@ L(mf6): mulx( u0, w0, w1) C up[0]^2 + add w1, w2 + jmp L(mb6) + +-L(mf7): mulx( u0, w2, w3) C up[0]^2 ++L(mf7): X86_ENDBR ++ mulx( u0, w2, w3) C up[0]^2 + add u0, u0 + mulx( 8,(up), w0, w1) + mov w2, (rp) +@@ -233,7 +239,8 @@ L(mf7): mulx( u0, w2, w3) C up[0]^2 + add w3, w0 + jmp L(mb7) + +-L(mf1): mulx( u0, w2, w3) C up[0]^2 ++L(mf1): X86_ENDBR ++ mulx( u0, w2, w3) C up[0]^2 + add u0, u0 + mulx( 8,(up), w0, w1) + mov w2, (rp) +@@ -242,7 +249,8 @@ L(mf1): mulx( u0, w2, w3) C up[0]^2 + add w3, w0 + jmp L(mb1) + +-L(mf2): mulx( u0, w0, w1) C up[0]^2 ++L(mf2): X86_ENDBR ++ mulx( u0, w0, w1) C up[0]^2 + add u0, u0 + mulx( 8,(up), w2, w3) + mov w0, (rp) +@@ -300,7 +308,8 @@ ifdef(`PIC', + + L(ed0): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f7): mov w0, (rp) ++L(f7): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea -64(up,un_save,8), up +@@ -356,7 +365,8 @@ L(b0): mov w0, (rp) + + L(ed1): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f0): mov w0, (rp) ++L(f0): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea -64(up,un_save,8), up +@@ -415,7 +425,8 @@ L(b1): mulx( 8,(up), w2, w3) + + L(ed2): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f1): mov w0, (rp) ++L(f1): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -477,7 +488,8 @@ L(b2): adox( 48,(rp), w0) + + L(ed3): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f2): mov w0, (rp) ++L(f2): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -535,7 +547,8 @@ L(b3): mulx( -16,(up), w0, w1) + + L(ed4): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f3): mov w0, (rp) ++L(f3): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -592,7 +605,8 @@ L(b4): mulx( -24,(up), w2, w3) + + L(ed5): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f4): mov w0, (rp) ++L(f4): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -649,7 +663,8 @@ L(b5): mulx( -32,(up), w0, w1) + + L(ed6): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f5): mov w0, (rp) ++L(f5): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -706,7 +721,8 @@ L(b6): adcx( w1, w2) + + L(ed7): adox( (rp), w0) + adox( %rcx, w1) C relies on rcx = 0 +-L(f6): mov w0, (rp) ++L(f6): X86_ENDBR ++ mov w0, (rp) + adc %rcx, w1 C relies on rcx = 0 + mov w1, 8(rp) + lea (up,un_save,8), up +@@ -837,3 +853,4 @@ L(atab):JMPENT( L(f6), L(atab)) + JMPENT( L(f5), L(atab)) + TEXT + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/addmul_2.asm b/mpn/x86_64/coreihwl/addmul_2.asm +index 9d1c405..322037e 100644 +--- a/mpn/x86_64/coreihwl/addmul_2.asm ++++ b/mpn/x86_64/coreihwl/addmul_2.asm +@@ -239,3 +239,4 @@ L(end): mulx( v0, %rax, w3) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/aors_n.asm b/mpn/x86_64/coreihwl/aors_n.asm +index fc99627..f9d89f7 100644 +--- a/mpn/x86_64/coreihwl/aors_n.asm ++++ b/mpn/x86_64/coreihwl/aors_n.asm +@@ -259,3 +259,4 @@ L(tab): JMPENT( L(0), L(tab)) + JMPENT( L(5), L(tab)) + JMPENT( L(6), L(tab)) + JMPENT( L(7), L(tab)) ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/aorsmul_1.asm b/mpn/x86_64/coreihwl/aorsmul_1.asm +index 3f43afa..d01c941 100644 +--- a/mpn/x86_64/coreihwl/aorsmul_1.asm ++++ b/mpn/x86_64/coreihwl/aorsmul_1.asm +@@ -199,3 +199,4 @@ L(ret): pop %r13 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/gcd_22.asm b/mpn/x86_64/coreihwl/gcd_22.asm +index b5863b6..e41731e 100644 +--- a/mpn/x86_64/coreihwl/gcd_22.asm ++++ b/mpn/x86_64/coreihwl/gcd_22.asm +@@ -136,3 +136,4 @@ L(end): mov v0, %rax + L(ret): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/mul_2.asm b/mpn/x86_64/coreihwl/mul_2.asm +index f1f044f..f48e5d8 100644 +--- a/mpn/x86_64/coreihwl/mul_2.asm ++++ b/mpn/x86_64/coreihwl/mul_2.asm +@@ -174,3 +174,4 @@ L(end): mulx( v1, %rdx, %rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/mul_basecase.asm b/mpn/x86_64/coreihwl/mul_basecase.asm +index b2656c8..14826e8 100644 +--- a/mpn/x86_64/coreihwl/mul_basecase.asm ++++ b/mpn/x86_64/coreihwl/mul_basecase.asm +@@ -439,3 +439,4 @@ L(ret2):pop %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/mullo_basecase.asm b/mpn/x86_64/coreihwl/mullo_basecase.asm +index e65559b..b29352c 100644 +--- a/mpn/x86_64/coreihwl/mullo_basecase.asm ++++ b/mpn/x86_64/coreihwl/mullo_basecase.asm +@@ -420,3 +420,4 @@ L(n3): mov (vp), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/redc_1.asm b/mpn/x86_64/coreihwl/redc_1.asm +index b1d6c0a..3b09a73 100644 +--- a/mpn/x86_64/coreihwl/redc_1.asm ++++ b/mpn/x86_64/coreihwl/redc_1.asm +@@ -435,3 +435,4 @@ L(ret): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreihwl/sqr_basecase.asm b/mpn/x86_64/coreihwl/sqr_basecase.asm +index 641cdf3..b6ea890 100644 +--- a/mpn/x86_64/coreihwl/sqr_basecase.asm ++++ b/mpn/x86_64/coreihwl/sqr_basecase.asm +@@ -504,3 +504,4 @@ L(dend):adc %rbx, %rdx + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreinhm/aorrlsh_n.asm b/mpn/x86_64/coreinhm/aorrlsh_n.asm +index eed64e7..3f25eea 100644 +--- a/mpn/x86_64/coreinhm/aorrlsh_n.asm ++++ b/mpn/x86_64/coreinhm/aorrlsh_n.asm +@@ -198,3 +198,4 @@ IFDOS(` mov 64(%rsp), %r9 ') C cy + sbb R32(%rbx), R32(%rbx) C initialise CF save register + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreinhm/hamdist.asm b/mpn/x86_64/coreinhm/hamdist.asm +index a5a63e4..a84bcbc 100644 +--- a/mpn/x86_64/coreinhm/hamdist.asm ++++ b/mpn/x86_64/coreinhm/hamdist.asm +@@ -194,3 +194,4 @@ L(tab): JMPENT( L(0), L(tab)) + JMPENT( L(1), L(tab)) + JMPENT( L(2), L(tab)) + JMPENT( L(3), L(tab)) ++ASM_END() +diff --git a/mpn/x86_64/coreinhm/popcount.asm b/mpn/x86_64/coreinhm/popcount.asm +index 0a3c867..24c4ebc 100644 +--- a/mpn/x86_64/coreinhm/popcount.asm ++++ b/mpn/x86_64/coreinhm/popcount.asm +@@ -180,3 +180,4 @@ L(tab): JMPENT( L(0), L(tab)) + JMPENT( L(5), L(tab)) + JMPENT( L(6), L(tab)) + JMPENT( L(7), L(tab)) ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/addmul_2.asm b/mpn/x86_64/coreisbr/addmul_2.asm +index 21f0bf4..45c7b15 100644 +--- a/mpn/x86_64/coreisbr/addmul_2.asm ++++ b/mpn/x86_64/coreisbr/addmul_2.asm +@@ -222,3 +222,4 @@ L(end): mul v1 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/aorrlshC_n.asm b/mpn/x86_64/coreisbr/aorrlshC_n.asm +index 23ace41..6af7da8 100644 +--- a/mpn/x86_64/coreisbr/aorrlshC_n.asm ++++ b/mpn/x86_64/coreisbr/aorrlshC_n.asm +@@ -171,3 +171,4 @@ L(end): shr $RSH, %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/aorrlsh_n.asm b/mpn/x86_64/coreisbr/aorrlsh_n.asm +index db8ee68..56ca497 100644 +--- a/mpn/x86_64/coreisbr/aorrlsh_n.asm ++++ b/mpn/x86_64/coreisbr/aorrlsh_n.asm +@@ -213,3 +213,4 @@ IFDOS(` mov 64(%rsp), %r9 ') C cy + sbb R32(%rbx), R32(%rbx) C initialise CF save register + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/aors_n.asm b/mpn/x86_64/coreisbr/aors_n.asm +index 61fee3e..d466248 100644 +--- a/mpn/x86_64/coreisbr/aors_n.asm ++++ b/mpn/x86_64/coreisbr/aors_n.asm +@@ -201,3 +201,4 @@ PROLOGUE(func_nc) + IFDOS(` mov 56(%rsp), %r8 ') + jmp L(ent) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/cnd_add_n.asm b/mpn/x86_64/coreisbr/cnd_add_n.asm +index 43abcc8..3d72bf8 100644 +--- a/mpn/x86_64/coreisbr/cnd_add_n.asm ++++ b/mpn/x86_64/coreisbr/cnd_add_n.asm +@@ -172,3 +172,4 @@ L(end): neg R32(%rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/cnd_sub_n.asm b/mpn/x86_64/coreisbr/cnd_sub_n.asm +index f55492b..3371269 100644 +--- a/mpn/x86_64/coreisbr/cnd_sub_n.asm ++++ b/mpn/x86_64/coreisbr/cnd_sub_n.asm +@@ -198,3 +198,4 @@ L(end): neg R32(%rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/mul_1.asm b/mpn/x86_64/coreisbr/mul_1.asm +index a43a117..1f17293 100644 +--- a/mpn/x86_64/coreisbr/mul_1.asm ++++ b/mpn/x86_64/coreisbr/mul_1.asm +@@ -197,3 +197,4 @@ L(00c): add cin, %r10 + mov 8(up,n,8), %rax + jmp L(L0c) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/mul_2.asm b/mpn/x86_64/coreisbr/mul_2.asm +index 781534d..10f1769 100644 +--- a/mpn/x86_64/coreisbr/mul_2.asm ++++ b/mpn/x86_64/coreisbr/mul_2.asm +@@ -165,3 +165,4 @@ L(end): mul v0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/mul_basecase.asm b/mpn/x86_64/coreisbr/mul_basecase.asm +index 35fd1cc..d5c7e5b 100644 +--- a/mpn/x86_64/coreisbr/mul_basecase.asm ++++ b/mpn/x86_64/coreisbr/mul_basecase.asm +@@ -405,3 +405,4 @@ L(ret2):pop %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/mullo_basecase.asm b/mpn/x86_64/coreisbr/mullo_basecase.asm +index a41a8ac..acf7776 100644 +--- a/mpn/x86_64/coreisbr/mullo_basecase.asm ++++ b/mpn/x86_64/coreisbr/mullo_basecase.asm +@@ -382,3 +382,4 @@ L(n3): mov (vp_param), %r9 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/rsh1aors_n.asm b/mpn/x86_64/coreisbr/rsh1aors_n.asm +index fd2eaea..eefad99 100644 +--- a/mpn/x86_64/coreisbr/rsh1aors_n.asm ++++ b/mpn/x86_64/coreisbr/rsh1aors_n.asm +@@ -191,3 +191,4 @@ L(end): shrd $1, %rbx, %rbp + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/coreisbr/sqr_basecase.asm b/mpn/x86_64/coreisbr/sqr_basecase.asm +index 46a3612..1600e25 100644 +--- a/mpn/x86_64/coreisbr/sqr_basecase.asm ++++ b/mpn/x86_64/coreisbr/sqr_basecase.asm +@@ -482,3 +482,4 @@ L(dend):add %r8, %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/div_qr_1n_pi1.asm b/mpn/x86_64/div_qr_1n_pi1.asm +index b3d45e2..9fd2633 100644 +--- a/mpn/x86_64/div_qr_1n_pi1.asm ++++ b/mpn/x86_64/div_qr_1n_pi1.asm +@@ -245,3 +245,4 @@ L(q_incr_loop): + lea 8(U1), U1 + jmp L(q_incr_loop) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/div_qr_2n_pi1.asm b/mpn/x86_64/div_qr_2n_pi1.asm +index 5e59a0a..c189c33 100644 +--- a/mpn/x86_64/div_qr_2n_pi1.asm ++++ b/mpn/x86_64/div_qr_2n_pi1.asm +@@ -156,3 +156,4 @@ L(fix): C Unlikely update. u2 >= d1 + sbb d1, u2 + jmp L(bck) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/div_qr_2u_pi1.asm b/mpn/x86_64/div_qr_2u_pi1.asm +index 85af96f..f2ac526 100644 +--- a/mpn/x86_64/div_qr_2u_pi1.asm ++++ b/mpn/x86_64/div_qr_2u_pi1.asm +@@ -198,3 +198,4 @@ L(fix_qh): C Unlikely update. u2 >= d1 + sbb d1, u2 + jmp L(bck_qh) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/dive_1.asm b/mpn/x86_64/dive_1.asm +index 988bdab..1929091 100644 +--- a/mpn/x86_64/dive_1.asm ++++ b/mpn/x86_64/dive_1.asm +@@ -156,3 +156,4 @@ L(one): shr R8(%rcx), %rax + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/divrem_1.asm b/mpn/x86_64/divrem_1.asm +index d4d61ad..edfd893 100644 +--- a/mpn/x86_64/divrem_1.asm ++++ b/mpn/x86_64/divrem_1.asm +@@ -312,3 +312,4 @@ L(ret): pop %rbx + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/divrem_2.asm b/mpn/x86_64/divrem_2.asm +index 20811cc..e10f328 100644 +--- a/mpn/x86_64/divrem_2.asm ++++ b/mpn/x86_64/divrem_2.asm +@@ -190,3 +190,4 @@ L(fix): seta %dl + sbb %r11, %rbx + jmp L(bck) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastavx/copyd.asm b/mpn/x86_64/fastavx/copyd.asm +index 56d472f..a69a624 100644 +--- a/mpn/x86_64/fastavx/copyd.asm ++++ b/mpn/x86_64/fastavx/copyd.asm +@@ -170,3 +170,4 @@ L(bc): test $4, R8(n) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastavx/copyi.asm b/mpn/x86_64/fastavx/copyi.asm +index 7607747..f50aa47 100644 +--- a/mpn/x86_64/fastavx/copyi.asm ++++ b/mpn/x86_64/fastavx/copyi.asm +@@ -167,3 +167,4 @@ L(bc): test $4, R8(n) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/com-palignr.asm b/mpn/x86_64/fastsse/com-palignr.asm +index 69027bc..50cd40f 100644 +--- a/mpn/x86_64/fastsse/com-palignr.asm ++++ b/mpn/x86_64/fastsse/com-palignr.asm +@@ -309,3 +309,4 @@ L(end): test $1, R8(n) + 1: FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/com.asm b/mpn/x86_64/fastsse/com.asm +index c867222..aec7d25 100644 +--- a/mpn/x86_64/fastsse/com.asm ++++ b/mpn/x86_64/fastsse/com.asm +@@ -173,3 +173,4 @@ IFDOS(` add $56, %rsp ') + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/copyd-palignr.asm b/mpn/x86_64/fastsse/copyd-palignr.asm +index fac6f8a..fa1e4a4 100644 +--- a/mpn/x86_64/fastsse/copyd-palignr.asm ++++ b/mpn/x86_64/fastsse/copyd-palignr.asm +@@ -252,3 +252,4 @@ L(end): test $1, R8(n) + 1: FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/copyd.asm b/mpn/x86_64/fastsse/copyd.asm +index b3c4706..ce820c5 100644 +--- a/mpn/x86_64/fastsse/copyd.asm ++++ b/mpn/x86_64/fastsse/copyd.asm +@@ -164,3 +164,4 @@ L(sma): test $8, R8(n) + L(don): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/copyi-palignr.asm b/mpn/x86_64/fastsse/copyi-palignr.asm +index 9876a47..fb4655f 100644 +--- a/mpn/x86_64/fastsse/copyi-palignr.asm ++++ b/mpn/x86_64/fastsse/copyi-palignr.asm +@@ -298,3 +298,4 @@ L(end): test $1, R8(n) + 1: FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/copyi.asm b/mpn/x86_64/fastsse/copyi.asm +index 97f7865..826caad 100644 +--- a/mpn/x86_64/fastsse/copyi.asm ++++ b/mpn/x86_64/fastsse/copyi.asm +@@ -183,3 +183,4 @@ dnl jnc 1b + L(ret): FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/lshift-movdqu2.asm b/mpn/x86_64/fastsse/lshift-movdqu2.asm +index a05e850..217f2cd 100644 +--- a/mpn/x86_64/fastsse/lshift-movdqu2.asm ++++ b/mpn/x86_64/fastsse/lshift-movdqu2.asm +@@ -180,3 +180,4 @@ L(end8):movq (ap), %xmm0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/lshift.asm b/mpn/x86_64/fastsse/lshift.asm +index 6a17b93..79a5554 100644 +--- a/mpn/x86_64/fastsse/lshift.asm ++++ b/mpn/x86_64/fastsse/lshift.asm +@@ -171,3 +171,4 @@ L(end8):movq (ap), %xmm0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/lshiftc-movdqu2.asm b/mpn/x86_64/fastsse/lshiftc-movdqu2.asm +index 8250910..9f14435 100644 +--- a/mpn/x86_64/fastsse/lshiftc-movdqu2.asm ++++ b/mpn/x86_64/fastsse/lshiftc-movdqu2.asm +@@ -191,3 +191,4 @@ L(end8):movq (ap), %xmm0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/lshiftc.asm b/mpn/x86_64/fastsse/lshiftc.asm +index a616075..a6630cb 100644 +--- a/mpn/x86_64/fastsse/lshiftc.asm ++++ b/mpn/x86_64/fastsse/lshiftc.asm +@@ -181,3 +181,4 @@ L(end8):movq (ap), %xmm0 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/rshift-movdqu2.asm b/mpn/x86_64/fastsse/rshift-movdqu2.asm +index 1e270b1..15bcc02 100644 +--- a/mpn/x86_64/fastsse/rshift-movdqu2.asm ++++ b/mpn/x86_64/fastsse/rshift-movdqu2.asm +@@ -199,3 +199,4 @@ L(bc): dec R32(n) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fastsse/sec_tabselect.asm b/mpn/x86_64/fastsse/sec_tabselect.asm +index e7b7feb..f3b76eb 100644 +--- a/mpn/x86_64/fastsse/sec_tabselect.asm ++++ b/mpn/x86_64/fastsse/sec_tabselect.asm +@@ -202,3 +202,4 @@ IFDOS(` add $88, %rsp ') + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/fat/fat_entry.asm b/mpn/x86_64/fat/fat_entry.asm +index 5f244ac..2322be8 100644 +--- a/mpn/x86_64/fat/fat_entry.asm ++++ b/mpn/x86_64/fat/fat_entry.asm +@@ -207,3 +207,4 @@ PROLOGUE(__gmpn_cpuid) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/gcd_11.asm b/mpn/x86_64/gcd_11.asm +index f9b3bcc..1e5ac68 100644 +--- a/mpn/x86_64/gcd_11.asm ++++ b/mpn/x86_64/gcd_11.asm +@@ -112,3 +112,4 @@ L(shift_alot): + mov u0, %rdx + jmp L(mid) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/gcd_22.asm b/mpn/x86_64/gcd_22.asm +index 78f985f..c3b0b89 100644 +--- a/mpn/x86_64/gcd_22.asm ++++ b/mpn/x86_64/gcd_22.asm +@@ -161,3 +161,4 @@ L(end): C mov v0, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k10/gcd_22.asm b/mpn/x86_64/k10/gcd_22.asm +index f58b4cc..c7fe668 100644 +--- a/mpn/x86_64/k10/gcd_22.asm ++++ b/mpn/x86_64/k10/gcd_22.asm +@@ -140,3 +140,4 @@ L(end): C mov v0, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k10/hamdist.asm b/mpn/x86_64/k10/hamdist.asm +index f70494a..d885e2d 100644 +--- a/mpn/x86_64/k10/hamdist.asm ++++ b/mpn/x86_64/k10/hamdist.asm +@@ -107,3 +107,4 @@ L(top): mov (ap,n,8), %r8 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k10/popcount.asm b/mpn/x86_64/k10/popcount.asm +index 3814aea..45bcba5 100644 +--- a/mpn/x86_64/k10/popcount.asm ++++ b/mpn/x86_64/k10/popcount.asm +@@ -79,7 +79,7 @@ C neg R32(%rcx) + + lea L(top)(%rip), %rdx + lea (%rdx,%rcx,2), %rdx +- jmp *%rdx ++ X86_NOTRACK jmp *%rdx + ',` + lea (up,n,8), up + +@@ -101,7 +101,7 @@ C lea (%rcx,%rcx,4), %rcx C 10x + + lea L(top)(%rip), %rdx + add %rcx, %rdx +- jmp *%rdx ++ X86_NOTRACK jmp *%rdx + ') + + ALIGN(32) +@@ -136,3 +136,4 @@ C 1 = n mod 8 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/addmul_2.asm b/mpn/x86_64/k8/addmul_2.asm +index 78bcba1..38caa4d 100644 +--- a/mpn/x86_64/k8/addmul_2.asm ++++ b/mpn/x86_64/k8/addmul_2.asm +@@ -193,3 +193,4 @@ L(end): xor R32(w1), R32(w1) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/aorrlsh_n.asm b/mpn/x86_64/k8/aorrlsh_n.asm +index ff3a184..3ab7050 100644 +--- a/mpn/x86_64/k8/aorrlsh_n.asm ++++ b/mpn/x86_64/k8/aorrlsh_n.asm +@@ -215,3 +215,4 @@ L(cj1): mov %r9, 8(rp,n,8) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/bdiv_q_1.asm b/mpn/x86_64/k8/bdiv_q_1.asm +index 1172b0d..606d54f 100644 +--- a/mpn/x86_64/k8/bdiv_q_1.asm ++++ b/mpn/x86_64/k8/bdiv_q_1.asm +@@ -177,3 +177,4 @@ L(one): shr R8(%rcx), %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/div_qr_1n_pi1.asm b/mpn/x86_64/k8/div_qr_1n_pi1.asm +index 86de08c..e91b809 100644 +--- a/mpn/x86_64/k8/div_qr_1n_pi1.asm ++++ b/mpn/x86_64/k8/div_qr_1n_pi1.asm +@@ -247,3 +247,4 @@ L(q_incr_loop): + lea 8(U1), U1 + jmp L(q_incr_loop) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/mul_basecase.asm b/mpn/x86_64/k8/mul_basecase.asm +index ca2efb9..9126c2b 100644 +--- a/mpn/x86_64/k8/mul_basecase.asm ++++ b/mpn/x86_64/k8/mul_basecase.asm +@@ -335,8 +335,10 @@ C addmul_2 for remaining vp's + C adjusted value of n that is reloaded on each iteration + + L(addmul_outer_0): ++ X86_ENDBR + add $3, un + lea 0(%rip), outer_addr ++ X86_ENDBR + + mov un, n + mov -24(up,un,8), %rax +@@ -348,6 +350,7 @@ L(addmul_outer_0): + jmp L(addmul_entry_0) + + L(addmul_outer_1): ++ X86_ENDBR + mov un, n + mov (up,un,8), %rax + mul v0 +@@ -358,8 +361,10 @@ L(addmul_outer_1): + jmp L(addmul_entry_1) + + L(addmul_outer_2): ++ X86_ENDBR + add $1, un + lea 0(%rip), outer_addr ++ X86_ENDBR + + mov un, n + mov -8(up,un,8), %rax +@@ -372,8 +377,10 @@ L(addmul_outer_2): + jmp L(addmul_entry_2) + + L(addmul_outer_3): ++ X86_ENDBR + add $2, un + lea 0(%rip), outer_addr ++ X86_ENDBR + + mov un, n + mov -16(up,un,8), %rax +@@ -467,3 +474,4 @@ L(ret): pop %r15 + ret + + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/mullo_basecase.asm b/mpn/x86_64/k8/mullo_basecase.asm +index fa00f42..4a931a5 100644 +--- a/mpn/x86_64/k8/mullo_basecase.asm ++++ b/mpn/x86_64/k8/mullo_basecase.asm +@@ -99,12 +99,14 @@ dnl JMPENT( L(2m4), L(tab)) C 10 + dnl JMPENT( L(3m4), L(tab)) C 11 + TEXT + +-L(1): imul %r8, %rax ++L(1): X86_ENDBR ++ imul %r8, %rax + mov %rax, (rp) + FUNC_EXIT() + ret + +-L(2): mov 8(vp_param), %r11 ++L(2): X86_ENDBR ++ mov 8(vp_param), %r11 + imul %rax, %r11 C u0 x v1 + mul %r8 C u0 x v0 + mov %rax, (rp) +@@ -115,7 +117,8 @@ L(2): mov 8(vp_param), %r11 + FUNC_EXIT() + ret + +-L(3): mov 8(vp_param), %r9 C v1 ++L(3): X86_ENDBR ++ mov 8(vp_param), %r9 C v1 + mov 16(vp_param), %r11 + mul %r8 C u0 x v0 -> + mov %rax, (rp) C r0 +@@ -335,6 +338,7 @@ L(mul_2_entry_1): + + + L(addmul_outer_1): ++ X86_ENDBR + lea -2(n), j + mov -16(up,n,8), %rax + mul v0 +@@ -346,6 +350,7 @@ L(addmul_outer_1): + jmp L(addmul_entry_1) + + L(addmul_outer_3): ++ X86_ENDBR + lea 0(n), j + mov -16(up,n,8), %rax + xor R32(w3), R32(w3) +@@ -434,3 +439,4 @@ L(ret): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/mulmid_basecase.asm b/mpn/x86_64/k8/mulmid_basecase.asm +index 86f1414..7d5f158 100644 +--- a/mpn/x86_64/k8/mulmid_basecase.asm ++++ b/mpn/x86_64/k8/mulmid_basecase.asm +@@ -329,6 +329,7 @@ C addmul_2 for remaining vp's + + ALIGN(16) + L(addmul_prologue_0): ++ X86_ENDBR + mov -8(up,n,8), %rax + mul v1 + mov %rax, w1 +@@ -338,6 +339,7 @@ L(addmul_prologue_0): + + ALIGN(16) + L(addmul_prologue_1): ++ X86_ENDBR + mov 16(up,n,8), %rax + mul v1 + mov %rax, w0 +@@ -348,6 +350,7 @@ L(addmul_prologue_1): + + ALIGN(16) + L(addmul_prologue_2): ++ X86_ENDBR + mov 8(up,n,8), %rax + mul v1 + mov %rax, w3 +@@ -357,6 +360,7 @@ L(addmul_prologue_2): + + ALIGN(16) + L(addmul_prologue_3): ++ X86_ENDBR + mov (up,n,8), %rax + mul v1 + mov %rax, w2 +@@ -471,6 +475,7 @@ L(diag_prologue_0): + mov vp, vp_inner + mov vn, n + lea 0(%rip), outer_addr ++ X86_ENDBR + mov -8(up,n,8), %rax + jmp L(diag_entry_0) + +@@ -480,6 +485,7 @@ L(diag_prologue_1): + add $3, vn + mov vn, n + lea 0(%rip), outer_addr ++ X86_ENDBR + mov -8(vp_inner), %rax + jmp L(diag_entry_1) + +@@ -489,6 +495,7 @@ L(diag_prologue_2): + add $2, vn + mov vn, n + lea 0(%rip), outer_addr ++ X86_ENDBR + mov 16(vp_inner), %rax + jmp L(diag_entry_2) + +@@ -507,6 +514,7 @@ L(diag_entry_0): + adc %rdx, w1 + adc $0, w2 + L(diag_entry_3): ++ X86_ENDBR + mov -16(up,n,8), %rax + mulq 8(vp_inner) + add %rax, w0 +@@ -557,3 +565,4 @@ L(ret): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/redc_1.asm b/mpn/x86_64/k8/redc_1.asm +index 9327b21..3e241af 100644 +--- a/mpn/x86_64/k8/redc_1.asm ++++ b/mpn/x86_64/k8/redc_1.asm +@@ -125,7 +125,8 @@ L(tab): JMPENT( L(0), L(tab)) + TEXT + + ALIGN(16) +-L(1): mov (mp_param), %rax ++L(1): X86_ENDBR ++ mov (mp_param), %rax + mul q0 + add 8(up), %rax + adc 16(up), %rdx +@@ -136,7 +137,8 @@ L(1): mov (mp_param), %rax + + + ALIGN(16) +-L(2): mov (mp_param), %rax ++L(2): X86_ENDBR ++ mov (mp_param), %rax + mul q0 + xor R32(%r14), R32(%r14) + mov %rax, %r10 +@@ -171,7 +173,8 @@ L(2): mov (mp_param), %rax + jmp L(ret) + + +-L(3): mov (mp_param), %rax ++L(3): X86_ENDBR ++ mov (mp_param), %rax + mul q0 + mov %rax, %rbx + mov %rdx, %r10 +@@ -248,7 +251,7 @@ L(3): mov (mp_param), %rax + + + ALIGN(16) +-L(2m4): ++L(2m4): X86_ENDBR + L(lo2): mov (mp,nneg,8), %rax + mul q0 + xor R32(%r14), R32(%r14) +@@ -324,7 +327,7 @@ L(le2): add %r10, (up) + + + ALIGN(16) +-L(1m4): ++L(1m4): X86_ENDBR + L(lo1): mov (mp,nneg,8), %rax + xor %r9, %r9 + xor R32(%rbx), R32(%rbx) +@@ -398,7 +401,7 @@ L(le1): add %r10, (up) + + ALIGN(16) + L(0): +-L(0m4): ++L(0m4): X86_ENDBR + L(lo0): mov (mp,nneg,8), %rax + mov nneg, i + mul q0 +@@ -463,7 +466,7 @@ L(le0): add %r10, (up) + + + ALIGN(16) +-L(3m4): ++L(3m4): X86_ENDBR + L(lo3): mov (mp,nneg,8), %rax + mul q0 + mov %rax, %rbx +@@ -589,3 +592,4 @@ L(ret): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/k8/sqr_basecase.asm b/mpn/x86_64/k8/sqr_basecase.asm +index 60cf945..37858b4 100644 +--- a/mpn/x86_64/k8/sqr_basecase.asm ++++ b/mpn/x86_64/k8/sqr_basecase.asm +@@ -131,7 +131,8 @@ L(tab): JMPENT( L(4), L(tab)) + JMPENT( L(3m4), L(tab)) + TEXT + +-L(1): mov (up), %rax ++L(1): X86_ENDBR ++ mov (up), %rax + mul %rax + add $40, %rsp + mov %rax, (rp) +@@ -139,7 +140,8 @@ L(1): mov (up), %rax + FUNC_EXIT() + ret + +-L(2): mov (up), %rax ++L(2): X86_ENDBR ++ mov (up), %rax + mov %rax, %r8 + mul %rax + mov 8(up), %r11 +@@ -165,7 +167,8 @@ L(2): mov (up), %rax + FUNC_EXIT() + ret + +-L(3): mov (up), %rax ++L(3): X86_ENDBR ++ mov (up), %rax + mov %rax, %r10 + mul %rax + mov 8(up), %r11 +@@ -210,7 +213,8 @@ L(3): mov (up), %rax + FUNC_EXIT() + ret + +-L(4): mov (up), %rax ++L(4): X86_ENDBR ++ mov (up), %rax + mov %rax, %r11 + mul %rax + mov 8(up), %rbx +@@ -282,6 +286,7 @@ L(4): mov (up), %rax + + + L(0m4): ++ X86_ENDBR + lea -16(rp,n,8), tp C point tp in middle of result operand + mov (up), v0 + mov 8(up), %rax +@@ -340,6 +345,7 @@ L(L3): xor R32(w1), R32(w1) + + + L(1m4): ++ X86_ENDBR + lea 8(rp,n,8), tp C point tp in middle of result operand + mov (up), v0 C u0 + mov 8(up), %rax C u1 +@@ -418,6 +424,7 @@ L(m2x): mov (up,j,8), %rax + + + L(2m4): ++ X86_ENDBR + lea -16(rp,n,8), tp C point tp in middle of result operand + mov (up), v0 + mov 8(up), %rax +@@ -474,7 +481,7 @@ L(L1): xor R32(w0), R32(w0) + jmp L(dowhile_mid) + + +-L(3m4): ++L(3m4): X86_ENDBR + lea 8(rp,n,8), tp C point tp in middle of result operand + mov (up), v0 C u0 + mov 8(up), %rax C u1 +@@ -805,3 +812,4 @@ L(d1): mov %r11, 24(rp,j,8) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/logops_n.asm b/mpn/x86_64/logops_n.asm +index e25854d..b3969ba 100644 +--- a/mpn/x86_64/logops_n.asm ++++ b/mpn/x86_64/logops_n.asm +@@ -258,3 +258,4 @@ L(ret): FUNC_EXIT() + ret + EPILOGUE() + ') ++ASM_END() +diff --git a/mpn/x86_64/lshift.asm b/mpn/x86_64/lshift.asm +index fff3152..4187bdc 100644 +--- a/mpn/x86_64/lshift.asm ++++ b/mpn/x86_64/lshift.asm +@@ -170,3 +170,4 @@ L(ast): mov (up), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/lshiftc.asm b/mpn/x86_64/lshiftc.asm +index c4ba04a..f6fe4c9 100644 +--- a/mpn/x86_64/lshiftc.asm ++++ b/mpn/x86_64/lshiftc.asm +@@ -180,3 +180,4 @@ L(ast): mov (up), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/lshsub_n.asm b/mpn/x86_64/lshsub_n.asm +index 4d428c0..62877d7 100644 +--- a/mpn/x86_64/lshsub_n.asm ++++ b/mpn/x86_64/lshsub_n.asm +@@ -170,3 +170,4 @@ L(end): + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/missing.asm b/mpn/x86_64/missing.asm +index 9b65c89..22dac17 100644 +--- a/mpn/x86_64/missing.asm ++++ b/mpn/x86_64/missing.asm +@@ -128,3 +128,4 @@ PROLOGUE(__gmp_adcx) + ret + EPILOGUE() + PROTECT(__gmp_adcx) ++ASM_END() +diff --git a/mpn/x86_64/mod_1_2.asm b/mpn/x86_64/mod_1_2.asm +index 40fcaeb..fbaae3b 100644 +--- a/mpn/x86_64/mod_1_2.asm ++++ b/mpn/x86_64/mod_1_2.asm +@@ -239,3 +239,4 @@ ifdef(`SHLD_SLOW',` + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/mod_1_4.asm b/mpn/x86_64/mod_1_4.asm +index 6cf304c..8969e42 100644 +--- a/mpn/x86_64/mod_1_4.asm ++++ b/mpn/x86_64/mod_1_4.asm +@@ -270,3 +270,4 @@ ifdef(`SHLD_SLOW',` + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/mod_34lsub1.asm b/mpn/x86_64/mod_34lsub1.asm +index 75421a6..70282b6 100644 +--- a/mpn/x86_64/mod_34lsub1.asm ++++ b/mpn/x86_64/mod_34lsub1.asm +@@ -145,46 +145,55 @@ L(tab): JMPENT( L(0), L(tab)) + JMPENT( L(8), L(tab)) + TEXT + +-L(6): add (ap), %rax ++L(6): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + adc $0, %r9 + add $24, ap +-L(3): add (ap), %rax ++L(3): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + jmp L(cj1) + +-L(7): add (ap), %rax ++L(7): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + adc $0, %r9 + add $24, ap +-L(4): add (ap), %rax ++L(4): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + adc $0, %r9 + add $24, ap +-L(1): add (ap), %rax ++L(1): X86_ENDBR ++ add (ap), %rax + adc $0, %rcx + jmp L(cj2) + +-L(8): add (ap), %rax ++L(8): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + adc $0, %r9 + add $24, ap +-L(5): add (ap), %rax ++L(5): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + adc 16(ap), %rdx + adc $0, %r9 + add $24, ap +-L(2): add (ap), %rax ++L(2): X86_ENDBR ++ add (ap), %rax + adc 8(ap), %rcx + + L(cj2): adc $0, %rdx + L(cj1): adc $0, %r9 +-L(0): add %r9, %rax ++L(0): X86_ENDBR ++ add %r9, %rax + adc $0, %rcx + adc $0, %rdx + adc $0, %rax +@@ -213,3 +222,4 @@ L(0): add %r9, %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/mode1o.asm b/mpn/x86_64/mode1o.asm +index 2cd2b08..3377435 100644 +--- a/mpn/x86_64/mode1o.asm ++++ b/mpn/x86_64/mode1o.asm +@@ -169,3 +169,4 @@ L(one): + + EPILOGUE(mpn_modexact_1c_odd) + EPILOGUE(mpn_modexact_1_odd) ++ASM_END() +diff --git a/mpn/x86_64/mul_1.asm b/mpn/x86_64/mul_1.asm +index e1ba89b..44764dd 100644 +--- a/mpn/x86_64/mul_1.asm ++++ b/mpn/x86_64/mul_1.asm +@@ -190,3 +190,4 @@ IFDOS(``pop %rdi '') + IFDOS(``pop %rsi '') + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/mul_2.asm b/mpn/x86_64/mul_2.asm +index d64313b..b6c6bf1 100644 +--- a/mpn/x86_64/mul_2.asm ++++ b/mpn/x86_64/mul_2.asm +@@ -202,3 +202,4 @@ L(m22): mul v1 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/nano/dive_1.asm b/mpn/x86_64/nano/dive_1.asm +index e9a0763..aead4d5 100644 +--- a/mpn/x86_64/nano/dive_1.asm ++++ b/mpn/x86_64/nano/dive_1.asm +@@ -164,3 +164,4 @@ L(one): shr R8(%rcx), %rax + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/pentium4/aors_n.asm b/mpn/x86_64/pentium4/aors_n.asm +index 8e6ee1b..3751e38 100644 +--- a/mpn/x86_64/pentium4/aors_n.asm ++++ b/mpn/x86_64/pentium4/aors_n.asm +@@ -194,3 +194,4 @@ L(ret): mov R32(%rbx), R32(%rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/pentium4/mod_34lsub1.asm b/mpn/x86_64/pentium4/mod_34lsub1.asm +index f34b3f0..bf83f62 100644 +--- a/mpn/x86_64/pentium4/mod_34lsub1.asm ++++ b/mpn/x86_64/pentium4/mod_34lsub1.asm +@@ -165,3 +165,4 @@ L(combine): + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/pentium4/rsh1aors_n.asm b/mpn/x86_64/pentium4/rsh1aors_n.asm +index 5528ce4..219a809 100644 +--- a/mpn/x86_64/pentium4/rsh1aors_n.asm ++++ b/mpn/x86_64/pentium4/rsh1aors_n.asm +@@ -332,3 +332,4 @@ L(cj1): or %r14, %rbx + L(c3): mov $1, R8(%rax) + jmp L(rc3) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/pentium4/rshift.asm b/mpn/x86_64/pentium4/rshift.asm +index b7c1ee2..848045f 100644 +--- a/mpn/x86_64/pentium4/rshift.asm ++++ b/mpn/x86_64/pentium4/rshift.asm +@@ -167,3 +167,4 @@ L(ast): movq (up), %mm2 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/popham.asm b/mpn/x86_64/popham.asm +index 3a29b2e..b7ceb17 100644 +--- a/mpn/x86_64/popham.asm ++++ b/mpn/x86_64/popham.asm +@@ -161,3 +161,4 @@ L(end): + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/rsh1aors_n.asm b/mpn/x86_64/rsh1aors_n.asm +index a3e9cc5..797e250 100644 +--- a/mpn/x86_64/rsh1aors_n.asm ++++ b/mpn/x86_64/rsh1aors_n.asm +@@ -187,3 +187,4 @@ L(end): mov %rbx, (rp) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/rshift.asm b/mpn/x86_64/rshift.asm +index 3f344f1..0fc5877 100644 +--- a/mpn/x86_64/rshift.asm ++++ b/mpn/x86_64/rshift.asm +@@ -174,3 +174,4 @@ L(ast): mov (up), %r10 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/sec_tabselect.asm b/mpn/x86_64/sec_tabselect.asm +index e8aed26..5dce3c1 100644 +--- a/mpn/x86_64/sec_tabselect.asm ++++ b/mpn/x86_64/sec_tabselect.asm +@@ -174,3 +174,4 @@ L(b00): pop %r15 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/sqr_diag_addlsh1.asm b/mpn/x86_64/sqr_diag_addlsh1.asm +index f486125..a1d8767 100644 +--- a/mpn/x86_64/sqr_diag_addlsh1.asm ++++ b/mpn/x86_64/sqr_diag_addlsh1.asm +@@ -114,3 +114,4 @@ L(end): add %r10, %r8 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/sublsh1_n.asm b/mpn/x86_64/sublsh1_n.asm +index c6d829f..c18f32a 100644 +--- a/mpn/x86_64/sublsh1_n.asm ++++ b/mpn/x86_64/sublsh1_n.asm +@@ -158,3 +158,4 @@ L(end): add R32(%rbp), R32(%rax) + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/x86_64-defs.m4 b/mpn/x86_64/x86_64-defs.m4 +index 4e08f2a..9fe328e 100644 +--- a/mpn/x86_64/x86_64-defs.m4 ++++ b/mpn/x86_64/x86_64-defs.m4 +@@ -95,6 +95,7 @@ m4_assert_numargs(1) + TYPE($1,`function') + COFF_TYPE($1) + $1: ++ X86_ENDBR + ') + + +@@ -167,6 +168,10 @@ ifdef(`PIC', + `lea $1(%rip), $2') + ') + ++dnl ASM_END ++ ++define(`ASM_END', `X86_GNU_PROPERTY') ++ + + define(`DEF_OBJECT', + m4_assert_numargs_range(2,3) +diff --git a/mpn/x86_64/zen/aorrlsh_n.asm b/mpn/x86_64/zen/aorrlsh_n.asm +index e049b2f..6e6783f 100644 +--- a/mpn/x86_64/zen/aorrlsh_n.asm ++++ b/mpn/x86_64/zen/aorrlsh_n.asm +@@ -102,26 +102,30 @@ ifdef(`PIC',` + jmp *(%r11,%rax,8) + ') + +-L(0): lea 32(up), up ++L(0): X86_ENDBR ++ lea 32(up), up + lea 32(vp), vp + lea 32(rp), rp + xor R32(%r11), R32(%r11) + jmp L(e0) + +-L(7): mov %r10, %r11 ++L(7): X86_ENDBRmov ++ %r10, %r11 + lea 24(up), up + lea 24(vp), vp + lea 24(rp), rp + xor R32(%r10), R32(%r10) + jmp L(e7) + +-L(6): lea 16(up), up ++L(6): X86_ENDBR ++ movlea 16(up), up + lea 16(vp), vp + lea 16(rp), rp + xor R32(%r11), R32(%r11) + jmp L(e6) + +-L(5): mov %r10, %r11 ++L(5): X86_ENDBRmov ++ mov %r10, %r11 + lea 8(up), up + lea 8(vp), vp + lea 8(rp), rp +@@ -191,23 +195,27 @@ L(e1): shlx( cnt, %r11, %rax) + lea (%r10,%rax), %rax + jmp L(top) + +-L(4): xor R32(%r11), R32(%r11) ++L(4): X86_ENDBRmov ++ xor R32(%r11), R32(%r11) + jmp L(e4) + +-L(3): mov %r10, %r11 ++L(3): X86_ENDBRmov ++ mov %r10, %r11 + lea -8(up), up + lea -8(vp), vp + lea -8(rp), rp + xor R32(%r10), R32(%r10) + jmp L(e3) + +-L(2): lea -16(up), up ++L(2): X86_ENDBRmov ++ lea -16(up), up + lea -16(vp), vp + lea -16(rp), rp + xor R32(%r11), R32(%r11) + jmp L(e2) + +-L(1): mov %r10, %r11 ++L(1): X86_ENDBRmov ++ mov %r10, %r11 + lea -24(up), up + lea 40(vp), vp + lea 40(rp), rp +@@ -224,3 +232,4 @@ L(tab): JMPENT( L(0), L(tab)) + JMPENT( L(5), L(tab)) + JMPENT( L(6), L(tab)) + JMPENT( L(7), L(tab)) ++ASM_END() +diff --git a/mpn/x86_64/zen/mul_basecase.asm b/mpn/x86_64/zen/mul_basecase.asm +index affa3b6..c70d548 100644 +--- a/mpn/x86_64/zen/mul_basecase.asm ++++ b/mpn/x86_64/zen/mul_basecase.asm +@@ -453,3 +453,4 @@ L(wd3): adc %r11, 8(rp) + jne L(3) + jmp L(end) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/zen/mullo_basecase.asm b/mpn/x86_64/zen/mullo_basecase.asm +index 2ae729a..c081698 100644 +--- a/mpn/x86_64/zen/mullo_basecase.asm ++++ b/mpn/x86_64/zen/mullo_basecase.asm +@@ -297,3 +297,4 @@ L(lo0): .byte 0xc4,0xe2,0xe3,0xf6,0x44,0xce,0x18 C mulx 24(up,n,8), %rbx, %rax + inc %r14 + jmp L(outer) + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/zen/sbpi1_bdiv_r.asm b/mpn/x86_64/zen/sbpi1_bdiv_r.asm +index f6e8f9c..277b3c3 100644 +--- a/mpn/x86_64/zen/sbpi1_bdiv_r.asm ++++ b/mpn/x86_64/zen/sbpi1_bdiv_r.asm +@@ -505,3 +505,4 @@ L(ret): mov %rbp, %rax + pop %r15 + ret + EPILOGUE() ++ASM_END() +diff --git a/mpn/x86_64/zen/sqr_basecase.asm b/mpn/x86_64/zen/sqr_basecase.asm +index a7c6127..d185deb 100644 +--- a/mpn/x86_64/zen/sqr_basecase.asm ++++ b/mpn/x86_64/zen/sqr_basecase.asm +@@ -480,3 +480,4 @@ C pop %r14 + FUNC_EXIT() + ret + EPILOGUE() ++ASM_END() +-- +2.37.1 + diff --git a/gmp-6.2.1-zeroize-allocator.patch b/gmp-6.2.1-zeroize-allocator.patch new file mode 100644 index 0000000..591821e --- /dev/null +++ b/gmp-6.2.1-zeroize-allocator.patch @@ -0,0 +1,53 @@ +diff -r e3123b88d012 memory.c +--- a/memory.c Tue Aug 16 22:02:45 2022 +0200 ++++ b/memory.c Fri Aug 19 06:25:37 2022 +0900 +@@ -29,7 +29,8 @@ + see https://www.gnu.org/licenses/. */ + + #include +-#include /* for malloc, realloc, free */ ++#include /* for malloc, free */ ++#include /* for memcpy, explicit_bzero */ + + #include "gmp-impl.h" + +@@ -98,11 +99,28 @@ + new_size += 2 * GMP_LIMB_BYTES; + #endif + +- ret = realloc (oldptr, new_size); +- if (ret == 0) ++ if (new_size == 0) ++ { ++ explicit_bzero (oldptr, old_size); ++ free (oldptr); ++ return NULL; ++ } ++ else if (old_size == new_size) ++ return oldptr; ++ else + { +- fprintf (stderr, "GNU MP: Cannot reallocate memory (old_size=%lu new_size=%lu)\n", (long) old_size, (long) new_size); +- abort (); ++ /* We can't simply call realloc, as it may allocate memory from ++ a different arena. */ ++ ret = malloc (new_size); ++ if (ret == NULL) ++ { ++ fprintf (stderr, "GNU MP: Cannot reallocate memory (old_size=%lu new_size=%lu)\n", (long) old_size, (long) new_size); ++ explicit_bzero(oldptr, old_size); ++ abort(); ++ } ++ memcpy (ret, oldptr, MIN(old_size, new_size)); ++ explicit_bzero (oldptr, old_size); ++ free (oldptr); + } + + #ifdef DEBUG +@@ -141,5 +159,6 @@ + blk_ptr = p - 1; + } + #endif ++ explicit_bzero (blk_ptr, blk_size); + free (blk_ptr); + } diff --git a/nettle-3.10-hobble-to-configure.patch b/nettle-3.10-hobble-to-configure.patch new file mode 100644 index 0000000..6ab752a --- /dev/null +++ b/nettle-3.10-hobble-to-configure.patch @@ -0,0 +1,749 @@ +From 499fab03ff0b46c2328992595b057ae8db63d544 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Wed, 24 Jul 2024 15:19:01 +0900 +Subject: [PATCH 1/3] Add --disable-sm3 configure option + +Signed-off-by: Daiki Ueno +--- + Makefile.in | 8 +++++--- + configure.ac | 14 ++++++++++++++ + examples/nettle-benchmark.c | 8 +++++++- + nettle-meta-hashes.c | 2 ++ + nettle-meta-macs.c | 2 ++ + testsuite/Makefile.in | 4 +++- + testsuite/hmac-test.c | 2 ++ + testsuite/meta-hash-test.c | 2 ++ + testsuite/meta-mac-test.c | 2 ++ + 9 files changed, 39 insertions(+), 5 deletions(-) + +diff --git a/Makefile.in b/Makefile.in +index 71ad761e..cb7b3d99 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -123,11 +123,11 @@ nettle_SOURCES = aes-decrypt-internal.c aes-decrypt.c aes-decrypt-table.c \ + gost28147.c gosthash94.c gosthash94-meta.c \ + hmac.c hmac-gosthash94.c hmac-md5.c hmac-ripemd160.c \ + hmac-sha1.c hmac-sha224.c hmac-sha256.c hmac-sha384.c \ +- hmac-sha512.c hmac-streebog.c hmac-sm3.c \ ++ hmac-sha512.c hmac-streebog.c \ + hmac-md5-meta.c hmac-ripemd160-meta.c hmac-sha1-meta.c \ + hmac-sha224-meta.c hmac-sha256-meta.c hmac-sha384-meta.c \ + hmac-sha512-meta.c hmac-gosthash94-meta.c \ +- hmac-streebog-meta.c hmac-sm3-meta.c \ ++ hmac-streebog-meta.c \ + knuth-lfib.c hkdf.c \ + md2.c md2-meta.c md4.c md4-meta.c \ + md5.c md5-compat.c md5-meta.c \ +@@ -153,7 +153,6 @@ nettle_SOURCES = aes-decrypt-internal.c aes-decrypt.c aes-decrypt-table.c \ + sha3-224.c sha3-224-meta.c sha3-256.c sha3-256-meta.c \ + sha3-384.c sha3-384-meta.c sha3-512.c sha3-512-meta.c \ + sha3-shake.c shake128.c shake256.c \ +- sm3.c sm3-meta.c \ + serpent-set-key.c serpent-encrypt.c serpent-decrypt.c \ + serpent-meta.c \ + streebog.c streebog-meta.c \ +@@ -228,6 +227,9 @@ hogweed_SOURCES = sexp.c sexp-format.c \ + ed448-shake256.c ed448-shake256-pubkey.c \ + ed448-shake256-sign.c ed448-shake256-verify.c + ++ ++nettle_SOURCES += @IF_SM3@ hmac-sm3.c hmac-sm3-meta.c sm3.c sm3-meta.c ++ + OPT_SOURCES = fat-arm.c fat-arm64.c fat-ppc.c fat-s390x.c fat-x86_64.c mini-gmp.c + + HEADERS = aes.h arcfour.h arctwo.h asn1.h blowfish.h balloon.h \ +diff --git a/configure.ac b/configure.ac +index 7c003bb7..fe174919 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -124,6 +124,10 @@ AC_ARG_ENABLE(mini-gmp, + AS_HELP_STRING([--enable-mini-gmp], [Enable mini-gmp, used instead of libgmp.]),, + [enable_mini_gmp=no]) + ++AC_ARG_ENABLE(sm3, ++ AS_HELP_STRING([--disable-sm3], [Disable SM3 hash algorithm]),, ++ [enable_sm3=yes]) ++ + AC_ARG_VAR(ASM_FLAGS, [Extra flags for processing assembly source files]) + + if test "x$enable_mini_gmp" = xyes ; then +@@ -1157,6 +1161,15 @@ else + IF_MINI_GMP='#' + fi + ++AH_TEMPLATE([WITH_SM3], [Defined if SM3 hash algorithm is enabled]) ++ ++if test "x$enable_sm3" = xyes ; then ++ AC_DEFINE(WITH_SM3) ++ IF_SM3='' ++else ++ IF_SM3='#' ++fi ++ + AC_SUBST(IF_HOGWEED) + AC_SUBST(IF_STATIC) + AC_SUBST(IF_SHARED) +@@ -1165,6 +1178,7 @@ AC_SUBST(IF_DLOPEN_TEST) + AC_SUBST(IF_DOCUMENTATION) + AC_SUBST(IF_DLL) + AC_SUBST(IF_MINI_GMP) ++AC_SUBST(IF_SM3) + + OPENSSL_LIBFLAGS='' + +diff --git a/examples/nettle-benchmark.c b/examples/nettle-benchmark.c +index 2a11a694..36835854 100644 +--- a/examples/nettle-benchmark.c ++++ b/examples/nettle-benchmark.c +@@ -901,6 +901,12 @@ bench_ghash_update(void) + # define OPENSSL(x) + #endif + ++#if WITH_SM3 ++# define SM3(x) x, ++#else ++# define SM3(x) ++#endif ++ + int + main(int argc, char **argv) + { +@@ -920,7 +926,7 @@ main(int argc, char **argv) + &nettle_sha3_384, &nettle_sha3_512, + &nettle_ripemd160, &nettle_gosthash94, + &nettle_gosthash94cp, &nettle_streebog256, +- &nettle_streebog512, &nettle_sm3, ++ &nettle_streebog512, SM3(&nettle_sm3) + NULL + }; + +diff --git a/nettle-meta-hashes.c b/nettle-meta-hashes.c +index 2245dfb7..6d4563d9 100644 +--- a/nettle-meta-hashes.c ++++ b/nettle-meta-hashes.c +@@ -57,7 +57,9 @@ const struct nettle_hash * const _nettle_hashes[] = { + &nettle_sha3_512, + &nettle_streebog256, + &nettle_streebog512, ++#if WITH_SM3 + &nettle_sm3, ++#endif + NULL + }; + +diff --git a/nettle-meta-macs.c b/nettle-meta-macs.c +index 48b2176e..866f0766 100644 +--- a/nettle-meta-macs.c ++++ b/nettle-meta-macs.c +@@ -52,7 +52,9 @@ const struct nettle_mac * const _nettle_macs[] = { + &nettle_hmac_sha512, + &nettle_hmac_streebog256, + &nettle_hmac_streebog512, ++#if WITH_SM3 + &nettle_hmac_sm3, ++#endif + NULL + }; + +diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in +index 0699fa0d..a45ddf77 100644 +--- a/testsuite/Makefile.in ++++ b/testsuite/Makefile.in +@@ -25,7 +25,7 @@ TS_NETTLE_SOURCES = aes-test.c aes-keywrap-test.c arcfour-test.c arctwo-test.c \ + sha3-permute-test.c sha3-224-test.c sha3-256-test.c \ + sha3-384-test.c sha3-512-test.c \ + shake128-test.c shake256-test.c \ +- streebog-test.c sm3-test.c sm4-test.c \ ++ streebog-test.c sm4-test.c \ + serpent-test.c twofish-test.c version-test.c \ + knuth-lfib-test.c \ + cbc-test.c cfb-test.c ctr-test.c gcm-test.c eax-test.c ccm-test.c \ +@@ -60,6 +60,8 @@ TS_HOGWEED_SOURCES = sexp-test.c sexp-format-test.c \ + gostdsa-sign-test.c gostdsa-verify-test.c \ + gostdsa-keygen-test.c gostdsa-vko-test.c + ++TS_NETTLE_SOURCES += @IF_SM3@ sm3-test.c ++ + TS_SOURCES = $(TS_NETTLE_SOURCES) $(TS_HOGWEED_SOURCES) + CXX_SOURCES = cxx-test.cxx + +diff --git a/testsuite/hmac-test.c b/testsuite/hmac-test.c +index d7af2475..d34127bf 100644 +--- a/testsuite/hmac-test.c ++++ b/testsuite/hmac-test.c +@@ -949,9 +949,11 @@ test_main (void) + SHEX("a1aa5f7de402d7b3d323f2991c8d4534" + "013137010a83754fd0af6d7cd4922ed9")); + ++#if WITH_SM3 + test_mac (&nettle_hmac_sm3, (nettle_hash_update_func*) hmac_sm3_set_key, + SDATA("monkey monkey monkey monkey"), + SDATA("abc"), + SHEX("7a9388e2ca5343b5d76e7c2c3d84f239" + "f306c0b60d5e0dc4d2771e42860a6a2b")); ++#endif + } +diff --git a/testsuite/meta-hash-test.c b/testsuite/meta-hash-test.c +index ec4e0d1e..8427e0a1 100644 +--- a/testsuite/meta-hash-test.c ++++ b/testsuite/meta-hash-test.c +@@ -24,7 +24,9 @@ const char* hashes[] = { + "sha3_512", + "streebog256", + "streebog512", ++#if WITH_SM3 + "sm3", ++#endif + }; + + void +diff --git a/testsuite/meta-mac-test.c b/testsuite/meta-mac-test.c +index 6c848a88..c00efd3c 100644 +--- a/testsuite/meta-mac-test.c ++++ b/testsuite/meta-mac-test.c +@@ -16,7 +16,9 @@ const char* macs[] = { + "hmac_sha512", + "hmac_streebog256", + "hmac_streebog512", ++#if WITH_SM3 + "hmac_sm3", ++#endif + }; + + void +-- +2.48.1 + + +From 04ef86ac0ad034f44b325cd6b0ff7880d64f762f Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Wed, 24 Jul 2024 15:28:13 +0900 +Subject: [PATCH 2/3] Add --disable-sm4 configure option + +Signed-off-by: Daiki Ueno +--- + Makefile.in | 3 +-- + configure.ac | 14 ++++++++++++++ + examples/nettle-benchmark.c | 8 +++++++- + nettle-meta-aeads.c | 2 ++ + nettle-meta-ciphers.c | 2 ++ + testsuite/Makefile.in | 3 ++- + testsuite/gcm-test.c | 2 ++ + testsuite/meta-aead-test.c | 2 ++ + testsuite/meta-cipher-test.c | 4 +++- + 9 files changed, 35 insertions(+), 5 deletions(-) + +diff --git a/Makefile.in b/Makefile.in +index cb7b3d99..9c8b8b59 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -117,7 +117,6 @@ nettle_SOURCES = aes-decrypt-internal.c aes-decrypt.c aes-decrypt-table.c \ + gcm-aes256.c gcm-aes256-meta.c \ + gcm-camellia128.c gcm-camellia128-meta.c \ + gcm-camellia256.c gcm-camellia256-meta.c \ +- gcm-sm4.c gcm-sm4-meta.c \ + cmac.c cmac64.c cmac-aes128.c cmac-aes256.c cmac-des3.c \ + cmac-aes128-meta.c cmac-aes256-meta.c cmac-des3-meta.c \ + gost28147.c gosthash94.c gosthash94-meta.c \ +@@ -157,7 +156,6 @@ nettle_SOURCES = aes-decrypt-internal.c aes-decrypt.c aes-decrypt-table.c \ + serpent-meta.c \ + streebog.c streebog-meta.c \ + twofish.c twofish-meta.c \ +- sm4.c sm4-meta.c \ + umac-nh.c umac-nh-n.c umac-l2.c umac-l3.c \ + umac-poly64.c umac-poly128.c umac-set-key.c \ + umac32.c umac64.c umac96.c umac128.c \ +@@ -229,6 +227,7 @@ hogweed_SOURCES = sexp.c sexp-format.c \ + + + nettle_SOURCES += @IF_SM3@ hmac-sm3.c hmac-sm3-meta.c sm3.c sm3-meta.c ++nettle_SOURCES += @IF_SM4@ gcm-sm4.c gcm-sm4-meta.c sm4.c sm4-meta.c + + OPT_SOURCES = fat-arm.c fat-arm64.c fat-ppc.c fat-s390x.c fat-x86_64.c mini-gmp.c + +diff --git a/configure.ac b/configure.ac +index fe174919..494c7d2c 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -128,6 +128,10 @@ AC_ARG_ENABLE(sm3, + AS_HELP_STRING([--disable-sm3], [Disable SM3 hash algorithm]),, + [enable_sm3=yes]) + ++AC_ARG_ENABLE(sm4, ++ AS_HELP_STRING([--disable-sm4], [Disable SM4 symmetric cipher algorithm]),, ++ [enable_sm4=yes]) ++ + AC_ARG_VAR(ASM_FLAGS, [Extra flags for processing assembly source files]) + + if test "x$enable_mini_gmp" = xyes ; then +@@ -1170,6 +1174,15 @@ else + IF_SM3='#' + fi + ++AH_TEMPLATE([WITH_SM4], [Defined if SM4 symmetric cipher is enabled]) ++ ++if test "x$enable_sm4" = xyes ; then ++ AC_DEFINE(WITH_SM4) ++ IF_SM4='' ++else ++ IF_SM4='#' ++fi ++ + AC_SUBST(IF_HOGWEED) + AC_SUBST(IF_STATIC) + AC_SUBST(IF_SHARED) +@@ -1179,6 +1192,7 @@ AC_SUBST(IF_DOCUMENTATION) + AC_SUBST(IF_DLL) + AC_SUBST(IF_MINI_GMP) + AC_SUBST(IF_SM3) ++AC_SUBST(IF_SM4) + + OPENSSL_LIBFLAGS='' + +diff --git a/examples/nettle-benchmark.c b/examples/nettle-benchmark.c +index 36835854..66f92f6e 100644 +--- a/examples/nettle-benchmark.c ++++ b/examples/nettle-benchmark.c +@@ -907,6 +907,12 @@ bench_ghash_update(void) + # define SM3(x) + #endif + ++#if WITH_SM4 ++# define SM4(x) x, ++#else ++# define SM4(x) ++#endif ++ + int + main(int argc, char **argv) + { +@@ -943,7 +949,7 @@ main(int argc, char **argv) + &nettle_des3, + &nettle_serpent256, + &nettle_twofish128, &nettle_twofish192, &nettle_twofish256, +- &nettle_sm4, ++ SM4(&nettle_sm4) + NULL + }; + +diff --git a/nettle-meta-aeads.c b/nettle-meta-aeads.c +index 78f38a3c..c94fecd5 100644 +--- a/nettle-meta-aeads.c ++++ b/nettle-meta-aeads.c +@@ -43,7 +43,9 @@ const struct nettle_aead * const _nettle_aeads[] = { + &nettle_gcm_aes256, + &nettle_gcm_camellia128, + &nettle_gcm_camellia256, ++#if WITH_SM4 + &nettle_gcm_sm4, ++#endif + &nettle_eax_aes128, + &nettle_chacha_poly1305, + NULL +diff --git a/nettle-meta-ciphers.c b/nettle-meta-ciphers.c +index f8d691cf..6a84a43a 100644 +--- a/nettle-meta-ciphers.c ++++ b/nettle-meta-ciphers.c +@@ -54,7 +54,9 @@ const struct nettle_cipher * const _nettle_ciphers[] = { + &nettle_arctwo64, + &nettle_arctwo128, + &nettle_arctwo_gutmann128, ++#if WITH_SM4 + &nettle_sm4, ++#endif + NULL + }; + +diff --git a/testsuite/Makefile.in b/testsuite/Makefile.in +index a45ddf77..3483f409 100644 +--- a/testsuite/Makefile.in ++++ b/testsuite/Makefile.in +@@ -25,7 +25,7 @@ TS_NETTLE_SOURCES = aes-test.c aes-keywrap-test.c arcfour-test.c arctwo-test.c \ + sha3-permute-test.c sha3-224-test.c sha3-256-test.c \ + sha3-384-test.c sha3-512-test.c \ + shake128-test.c shake256-test.c \ +- streebog-test.c sm4-test.c \ ++ streebog-test.c \ + serpent-test.c twofish-test.c version-test.c \ + knuth-lfib-test.c \ + cbc-test.c cfb-test.c ctr-test.c gcm-test.c eax-test.c ccm-test.c \ +@@ -61,6 +61,7 @@ TS_HOGWEED_SOURCES = sexp-test.c sexp-format-test.c \ + gostdsa-keygen-test.c gostdsa-vko-test.c + + TS_NETTLE_SOURCES += @IF_SM3@ sm3-test.c ++TS_NETTLE_SOURCES += @IF_SM4@ sm4-test.c + + TS_SOURCES = $(TS_NETTLE_SOURCES) $(TS_HOGWEED_SOURCES) + CXX_SOURCES = cxx-test.cxx +diff --git a/testsuite/gcm-test.c b/testsuite/gcm-test.c +index e8228ed7..fad9184a 100644 +--- a/testsuite/gcm-test.c ++++ b/testsuite/gcm-test.c +@@ -825,6 +825,7 @@ test_main(void) + "16aedbf5a0de6a57 a637b39b"), /* iv */ + SHEX("5791883f822013f8bd136fc36fb9946b")); /* tag */ + ++#if WITH_SM4 + /* + * GCM-SM4 Test Vectors from + * https://datatracker.ietf.org/doc/html/rfc8998 +@@ -842,6 +843,7 @@ test_main(void) + "A56834CBCF98C397B4024A2691233B8D"), + SHEX("00001234567800000000ABCD"), + SHEX("83DE3541E4C2B58177E065A9BF7B62EC")); ++#endif + + /* Test gcm_hash, with varying message size, keys and iv all zero. + Not compared to any other implementation. */ +diff --git a/testsuite/meta-aead-test.c b/testsuite/meta-aead-test.c +index ceeca227..d1a3193f 100644 +--- a/testsuite/meta-aead-test.c ++++ b/testsuite/meta-aead-test.c +@@ -8,7 +8,9 @@ const char* aeads[] = { + "gcm_aes256", + "gcm_camellia128", + "gcm_camellia256", ++#if WITH_SM4 + "gcm_sm4", ++#endif + "eax_aes128", + "chacha_poly1305", + }; +diff --git a/testsuite/meta-cipher-test.c b/testsuite/meta-cipher-test.c +index 912fac5a..b57fcbe1 100644 +--- a/testsuite/meta-cipher-test.c ++++ b/testsuite/meta-cipher-test.c +@@ -20,7 +20,9 @@ const char* ciphers[] = { + "twofish128", + "twofish192", + "twofish256", +- "sm4" ++#if WITH_SM4 ++ "sm4", ++#endif + }; + + void +-- +2.48.1 + + +From cef5228a90257430d4151163c259bc83fd2f7900 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Thu, 22 Aug 2024 10:49:46 +0900 +Subject: [PATCH 3/3] Add --disable-ecc-{secp192r1,secp224r1} configure option + +Signed-off-by: Daiki Ueno +--- + Makefile.in | 5 ++++- + configure.ac | 36 ++++++++++++++++++++++++++++++++++- + examples/ecc-benchmark.c | 4 ++++ + examples/hogweed-benchmark.c | 6 ++++++ + testsuite/ecdh-test.c | 4 ++++ + testsuite/ecdsa-sign-test.c | 6 ++++++ + testsuite/ecdsa-verify-test.c | 4 ++++ + testsuite/testutils.c | 10 +++++++++- + 8 files changed, 72 insertions(+), 3 deletions(-) + +diff --git a/Makefile.in b/Makefile.in +index 9c8b8b59..1e9ed61c 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -202,7 +202,7 @@ hogweed_SOURCES = sexp.c sexp-format.c \ + ecc-mod-arith.c ecc-pp1-redc.c ecc-pm1-redc.c \ + ecc-curve25519.c ecc-curve448.c \ + ecc-gost-gc256b.c ecc-gost-gc512a.c \ +- ecc-secp192r1.c ecc-secp224r1.c ecc-secp256r1.c \ ++ ecc-secp256r1.c \ + ecc-secp384r1.c ecc-secp521r1.c \ + ecc-size.c ecc-j-to-a.c ecc-a-to-j.c \ + ecc-dup-jj.c ecc-add-jja.c ecc-add-jjj.c ecc-nonsec-add-jjj.c \ +@@ -229,6 +229,9 @@ hogweed_SOURCES = sexp.c sexp-format.c \ + nettle_SOURCES += @IF_SM3@ hmac-sm3.c hmac-sm3-meta.c sm3.c sm3-meta.c + nettle_SOURCES += @IF_SM4@ gcm-sm4.c gcm-sm4-meta.c sm4.c sm4-meta.c + ++hogweed_SOURCES += @IF_ECC_SECP192R1@ ecc-secp192r1.c ++hogweed_SOURCES += @IF_ECC_SECP224R1@ ecc-secp224r1.c ++ + OPT_SOURCES = fat-arm.c fat-arm64.c fat-ppc.c fat-s390x.c fat-x86_64.c mini-gmp.c + + HEADERS = aes.h arcfour.h arctwo.h asn1.h blowfish.h balloon.h \ +diff --git a/configure.ac b/configure.ac +index 494c7d2c..105640e1 100644 +--- a/configure.ac ++++ b/configure.ac +@@ -132,6 +132,14 @@ AC_ARG_ENABLE(sm4, + AS_HELP_STRING([--disable-sm4], [Disable SM4 symmetric cipher algorithm]),, + [enable_sm4=yes]) + ++AC_ARG_ENABLE(ecc-secp192r1, ++ AS_HELP_STRING([--disable-ecc-secp192r1], [Disable NIST secp192r1 curve]),, ++ [enable_ecc_secp192r1=yes]) ++ ++AC_ARG_ENABLE(ecc-secp224r1, ++ AS_HELP_STRING([--disable-ecc-secp224r1], [Disable NIST secp224r1 curve]),, ++ [enable_ecc_secp224r1=yes]) ++ + AC_ARG_VAR(ASM_FLAGS, [Extra flags for processing assembly source files]) + + if test "x$enable_mini_gmp" = xyes ; then +@@ -624,9 +632,15 @@ asm_nettle_optional_list="cpuid.asm cpu-facility.asm \ + + asm_hogweed_optional_list="" + if test "x$enable_public_key" = "xyes" ; then +- asm_hogweed_optional_list="ecc-secp192r1-modp.asm ecc-secp224r1-modp.asm \ ++ asm_hogweed_optional_list="\ + ecc-secp256r1-redc.asm ecc-secp384r1-modp.asm ecc-secp521r1-modp.asm \ + ecc-curve25519-modp.asm ecc-curve448-modp.asm" ++ if test "x$enable_ecc_secp192r1" = "xyes" ; then ++ asm_hogweed_optional_list="ecc-secp192r1-modp.asm $asm_hogweed_optional_list" ++ fi ++ if test "x$enable_ecc_secp224r1" = "xyes" ; then ++ asm_hogweed_optional_list="ecc-secp224r1-modp.asm $asm_hogweed_optional_list" ++ fi + fi + + OPT_NETTLE_OBJS="" +@@ -1183,6 +1197,24 @@ else + IF_SM4='#' + fi + ++AH_TEMPLATE([WITH_ECC_SECP192R1], [Defined if NIST secp192r1 curve is enabled]) ++ ++if test "x$enable_ecc_secp192r1" = xyes ; then ++ AC_DEFINE(WITH_ECC_SECP192R1) ++ IF_ECC_SECP192R1='' ++else ++ IF_ECC_SECP192R1='#' ++fi ++ ++AH_TEMPLATE([WITH_ECC_SECP224R1], [Defined if NIST secp224r1 curve is enabled]) ++ ++if test "x$enable_ecc_secp224r1" = xyes ; then ++ AC_DEFINE(WITH_ECC_SECP224R1) ++ IF_ECC_SECP224R1='' ++else ++ IF_ECC_SECP224R1='#' ++fi ++ + AC_SUBST(IF_HOGWEED) + AC_SUBST(IF_STATIC) + AC_SUBST(IF_SHARED) +@@ -1193,6 +1225,8 @@ AC_SUBST(IF_DLL) + AC_SUBST(IF_MINI_GMP) + AC_SUBST(IF_SM3) + AC_SUBST(IF_SM4) ++AC_SUBST(IF_ECC_SECP192R1) ++AC_SUBST(IF_ECC_SECP224R1) + + OPENSSL_LIBFLAGS='' + +diff --git a/examples/ecc-benchmark.c b/examples/ecc-benchmark.c +index 7e857f80..ebcced65 100644 +--- a/examples/ecc-benchmark.c ++++ b/examples/ecc-benchmark.c +@@ -314,8 +314,12 @@ bench_curve (const struct ecc_curve *ecc) + } + + const struct ecc_curve * const curves[] = { ++#if WITH_ECC_SECP192R1 + &_nettle_secp_192r1, ++#endif ++#if WITH_ECC_SECP224R1 + &_nettle_secp_224r1, ++#endif + &_nettle_curve25519, + &_nettle_secp_256r1, + &_nettle_secp_384r1, +diff --git a/examples/hogweed-benchmark.c b/examples/hogweed-benchmark.c +index 3f858833..df608ffa 100644 +--- a/examples/hogweed-benchmark.c ++++ b/examples/hogweed-benchmark.c +@@ -410,6 +410,7 @@ bench_ecdsa_init (unsigned size) + + switch (size) + { ++#if WITH_ECC_SECP192R1 + case 192: + ecc = &_nettle_secp_192r1; + xs = "8e8e07360350fb6b7ad8370cfd32fa8c6bba785e6e200599"; +@@ -418,6 +419,8 @@ bench_ecdsa_init (unsigned size) + ctx->digest = hash_string (&nettle_sha1, "abc"); + ctx->digest_size = 20; + break; ++#endif ++#if WITH_ECC_SECP224R1 + case 224: + ecc = &_nettle_secp_224r1; + xs = "993bf363f4f2bc0f255f22563980449164e9c894d9efd088d7b77334"; +@@ -426,6 +429,7 @@ bench_ecdsa_init (unsigned size) + ctx->digest = hash_string (&nettle_sha224, "abc"); + ctx->digest_size = 28; + break; ++#endif + + /* From RFC 4754 */ + case 256: +@@ -864,7 +868,9 @@ struct alg alg_list[] = { + #if 0 + { "dsa",2048, bench_dsa_init, bench_dsa_sign, bench_dsa_verify, bench_dsa_clear }, + #endif ++#if WITH_ECC_SECP192R1 + { "ecdsa", 192, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, ++#endif + { "ecdsa", 224, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, + { "ecdsa", 256, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, + { "ecdsa", 384, bench_ecdsa_init, bench_ecdsa_sign, bench_ecdsa_verify, bench_ecdsa_clear }, +diff --git a/testsuite/ecdh-test.c b/testsuite/ecdh-test.c +index ff4f7233..f852d813 100644 +--- a/testsuite/ecdh-test.c ++++ b/testsuite/ecdh-test.c +@@ -159,6 +159,7 @@ test_public_key (const char *label, const struct ecc_curve *ecc, + void + test_main(void) + { ++#if WITH_ECC_SECP192R1 + test_public_key ("(0,0) with secp-192r1", &_nettle_secp_192r1, "0", "0", 0); + test_public_key ( + "(P,0) with secp-192r1", &_nettle_secp_192r1, +@@ -188,7 +189,9 @@ test_main(void) + "293088185788565313717816218507714888251468410990708684573", + "149293809021051532782730990145509724807636529827149481690", + "2891131861147398318714693938158856874319184314120776776192"); ++#endif + ++#if WITH_ECC_SECP224R1 + test_dh ("secp-224r1", &_nettle_secp_224r1, + "1321072106881784386340709783538698930880431939595776773514895067682", + "6768311794185371282972144247871764855860666277647541840973645586477", +@@ -198,6 +201,7 @@ test_main(void) + "24223309755162432227459925493224336241652868856405241018762887667883", + "8330362698029245839097779050425944245826040430538860338085968752913", + "24167244512472228715617822000878192535267113543393576038737592837010"); ++#endif + + test_dh ("secp-256r1", &_nettle_secp_256r1, + "94731533361265297353914491124013058635674217345912524033267198103710636378786", +diff --git a/testsuite/ecdsa-sign-test.c b/testsuite/ecdsa-sign-test.c +index 46fc2738..aa44adb5 100644 +--- a/testsuite/ecdsa-sign-test.c ++++ b/testsuite/ecdsa-sign-test.c +@@ -74,6 +74,7 @@ test_main (void) + if (test_side_channel) + SKIP(); + #endif ++#if WITH_ECC_SECP224R1 + /* Producing the signature for corresponding test in + ecdsa-verify-test.c, with special u1 and u2. */ + test_ecdsa (&_nettle_secp_224r1, +@@ -86,6 +87,7 @@ test_main (void) + "d16dc18032d268fd1a704fa6", /* r */ + "3a41e1423b1853e8aa89747b1f987364" + "44705d6d6d8371ea1f578f2e"); /* s */ ++#endif + + /* Produce a signature where verify operation results in a point duplication. */ + test_ecdsa (&_nettle_secp_256r1, +@@ -99,6 +101,7 @@ test_main (void) + "53f097727a0e0dc284a0daa0da0ab77d" + "5792ae67ed075d1f8d5bda0f853fa093"); /* s */ + ++#if WITH_ECC_SECP192R1 + /* Test cases for the smaller groups, verified with a + proof-of-concept implementation done for Yubico AB. */ + test_ecdsa (&_nettle_secp_192r1, +@@ -116,7 +119,9 @@ test_main (void) + + "a91fb738f9f175d72f9c98527e881c36" + "8de68cb55ffe589"); /* s */ ++#endif + ++#if WITH_ECC_SECP224R1 + test_ecdsa (&_nettle_secp_224r1, + "446df0a771ed58403ca9cb316e617f6b" + "158420465d00a69601e22858", /* z */ +@@ -132,6 +137,7 @@ test_main (void) + + "d0f069fd0f108eb07b7bbc54c8d6c88d" + "f2715c38a95c31a2b486995f"); /* s */ ++#endif + + /* From RFC 4754 */ + test_ecdsa (&_nettle_secp_256r1, +diff --git a/testsuite/ecdsa-verify-test.c b/testsuite/ecdsa-verify-test.c +index 8d527000..1fa69f09 100644 +--- a/testsuite/ecdsa-verify-test.c ++++ b/testsuite/ecdsa-verify-test.c +@@ -81,6 +81,7 @@ test_ecdsa (const struct ecc_curve *ecc, + void + test_main (void) + { ++#if WITH_ECC_SECP224R1 + /* Corresponds to nonce k = 2 and private key z = + 0x99b5b787484def12894ca507058b3bf543d72d82fa7721d2e805e5e6. z and + hash are chosen so that intermediate scalars in the verify +@@ -100,7 +101,9 @@ test_main (void) + "d16dc18032d268fd1a704fa6", /* r */ + "3a41e1423b1853e8aa89747b1f987364" + "44705d6d6d8371ea1f578f2e"); /* s */ ++#endif + ++#if WITH_ECC_SECP192R1 + /* Test case provided by Guido Vranken, from oss-fuzz */ + test_ecdsa (&_nettle_secp_192r1, + "14683086 f1734c6d e68743a6 48181b54 a74d4c5b 383eb6a8", /* x */ +@@ -108,6 +111,7 @@ test_main (void) + SHEX("00"), /* h == 0 corner case*/ + "952800792ed19341fdeeec047f2514f3b0f150d6066151fb", /* r */ + "ec5971222014878b50d7a19d8954bc871e7e65b00b860ffb"); /* s */ ++#endif + + /* Test case provided by Guido Vranken, from oss-fuzz. Triggers + point duplication in the verify operation by using private key = +diff --git a/testsuite/testutils.c b/testsuite/testutils.c +index 76aa5563..5b7c7deb 100644 +--- a/testsuite/testutils.c ++++ b/testsuite/testutils.c +@@ -2230,8 +2230,12 @@ test_dsa_key(const struct dsa_params *params, + } + + const struct ecc_curve * const ecc_curves[] = { ++#if WITH_ECC_SECP192R1 + &_nettle_secp_192r1, ++#endif ++#if WITH_ECC_SECP224R1 + &_nettle_secp_224r1, ++#endif + &_nettle_secp_256r1, + &_nettle_secp_384r1, + &_nettle_secp_521r1, +@@ -2355,7 +2359,8 @@ test_ecc_point (const struct ecc_curve *ecc, + } + + /* For each curve, the points g, 2 g, 3 g and 4 g */ +-static const struct ecc_ref_point ecc_ref[9][4] = { ++static const struct ecc_ref_point ecc_ref[][4] = { ++#if WITH_ECC_SECP192R1 + { { "188da80eb03090f67cbf20eb43a18800f4ff0afd82ff1012", + "07192b95ffc8da78631011ed6b24cdd573f977a11e794811" }, + { "dafebf5828783f2ad35534631588a3f629a70fb16982a888", +@@ -2365,6 +2370,8 @@ static const struct ecc_ref_point ecc_ref[9][4] = { + { "35433907297cc378b0015703374729d7a4fe46647084e4ba", + "a2649984f2135c301ea3acb0776cd4f125389b311db3be32" } + }, ++#endif ++#if WITH_ECC_SECP224R1 + { { "b70e0cbd6bb4bf7f321390b94a03c1d356c21122343280d6115c1d21", + "bd376388b5f723fb4c22dfe6cd4375a05a07476444d5819985007e34" }, + { "706a46dc76dcb76798e60e6d89474788d16dc18032d268fd1a704fa6", +@@ -2374,6 +2381,7 @@ static const struct ecc_ref_point ecc_ref[9][4] = { + { "ae99feebb5d26945b54892092a8aee02912930fa41cd114e40447301", + "482580a0ec5bc47e88bc8c378632cd196cb3fa058a7114eb03054c9" }, + }, ++#endif + { { "6b17d1f2e12c4247f8bce6e563a440f277037d812deb33a0f4a13945d898c296", + "4fe342e2fe1a7f9b8ee7eb4a7c0f9e162bce33576b315ececbb6406837bf51f5" }, + { "7cf27b188d034f7e8a52380304b51ac3c08969e277f21b35a60b48fc47669978", +-- +2.48.1 + diff --git a/nettle-3.8-zeroize-stack.patch b/nettle-3.8-zeroize-stack.patch new file mode 100644 index 0000000..f93a248 --- /dev/null +++ b/nettle-3.8-zeroize-stack.patch @@ -0,0 +1,334 @@ +From 24a4cb910a51f35dff89842e8cce27f88e8e78c3 Mon Sep 17 00:00:00 2001 +From: Daiki Ueno +Date: Wed, 24 Aug 2022 17:19:57 +0900 +Subject: [PATCH] Clear any intermediate data allocate on stack + +Signed-off-by: Daiki Ueno +--- + cbc.c | 3 +++ + cfb.c | 13 +++++++++++++ + ctr.c | 4 ++++ + ctr16.c | 2 ++ + ecc-random.c | 3 +++ + ecdsa-keygen.c | 2 ++ + ecdsa-sign.c | 2 ++ + ed25519-sha512-sign.c | 2 ++ + ed448-shake256-sign.c | 2 ++ + gostdsa-sign.c | 2 ++ + hmac.c | 10 +++++++--- + nettle-internal.h | 5 +++++ + pbkdf2.c | 5 ++++- + pss-mgf1.c | 5 ++++- + pss.c | 4 ++++ + 15 files changed, 59 insertions(+), 5 deletions(-) + +diff --git a/cbc.c b/cbc.c +index 76b6492d..b9da3aa0 100644 +--- a/cbc.c ++++ b/cbc.c +@@ -128,6 +128,9 @@ cbc_decrypt(const void *ctx, nettle_cipher_func *f, + length - block_size); + /* Writes first block. */ + memxor3(dst, buffer, initial_iv, block_size); ++ ++ TMP_CLEAR(buffer, buffer_size); ++ TMP_CLEAR(initial_iv, block_size); + } + } + +diff --git a/cfb.c b/cfb.c +index b9da3159..b1b01b9e 100644 +--- a/cfb.c ++++ b/cfb.c +@@ -83,6 +83,8 @@ cfb_encrypt(const void *ctx, nettle_cipher_func *f, + /* We do not care about updating IV here. This is the last call in + * message sequence and one has to set IV afterwards anyway */ + } ++ ++ TMP_CLEAR(buffer, block_size); + } + + /* Don't allocate any more space than this on the stack */ +@@ -115,6 +117,8 @@ cfb_decrypt(const void *ctx, nettle_cipher_func *f, + + f(ctx, block_size, buffer, iv); + memxor3(dst + length, src + length, buffer, left); ++ ++ TMP_CLEAR(buffer, block_size); + } + } + else +@@ -160,6 +164,9 @@ cfb_decrypt(const void *ctx, nettle_cipher_func *f, + f(ctx, block_size, buffer, iv); + memxor(dst, buffer, left); + } ++ ++ TMP_CLEAR(buffer, buffer_size); ++ TMP_CLEAR(initial_iv, block_size); + } + } + +@@ -196,6 +203,9 @@ cfb8_encrypt(const void *ctx, nettle_cipher_func *f, + pos ++; + } + memcpy(iv, buffer + pos, block_size); ++ ++ TMP_CLEAR(buffer, block_size * 2); ++ TMP_CLEAR(outbuf, block_size); + } + + void +@@ -235,4 +245,7 @@ cfb8_decrypt(const void *ctx, nettle_cipher_func *f, + } + + memcpy(iv, buffer + i, block_size); ++ ++ TMP_CLEAR(buffer, block_size * 2); ++ TMP_CLEAR(outbuf, block_size * 2); + } +diff --git a/ctr.c b/ctr.c +index 8c6b4626..217d1abb 100644 +--- a/ctr.c ++++ b/ctr.c +@@ -137,6 +137,8 @@ ctr_crypt(const void *ctx, nettle_cipher_func *f, + f(ctx, block_size, block, ctr); + INCREMENT(block_size, ctr); + memxor3(dst + filled, src + filled, block, length - filled); ++ ++ TMP_CLEAR(block, block_size); + } + } + else +@@ -173,5 +175,7 @@ ctr_crypt(const void *ctx, nettle_cipher_func *f, + INCREMENT(block_size, ctr); + memxor(dst, buffer, length); + } ++ ++ TMP_CLEAR(buffer, buffer_size); + } + } +diff --git a/ctr16.c b/ctr16.c +index d744d2a9..ec0abd72 100644 +--- a/ctr16.c ++++ b/ctr16.c +@@ -102,5 +102,7 @@ _nettle_ctr_crypt16(const void *ctx, nettle_cipher_func *f, + done: + memxor3 (dst + i, src + i, buffer->b, length - i); + } ++ ++ TMP_CLEAR(buffer, MIN(blocks, CTR_BUFFER_LIMIT / 16)); + } + } +diff --git a/ecc-random.c b/ecc-random.c +index a7b48d6a..676f5933 100644 +--- a/ecc-random.c ++++ b/ecc-random.c +@@ -36,6 +36,7 @@ + #endif + + #include ++#include + + #include "ecc.h" + #include "ecc-internal.h" +@@ -79,4 +80,6 @@ ecc_scalar_random (struct ecc_scalar *x, + TMP_ALLOC (scratch, ECC_MOD_RANDOM_ITCH (x->ecc->q.size)); + + ecc_mod_random (&x->ecc->q, x->p, random_ctx, random, scratch); ++ ++ TMP_CLEAR (scratch, ECC_MOD_RANDOM_ITCH (x->ecc->q.size)); + } +diff --git a/ecdsa-keygen.c b/ecdsa-keygen.c +index 870282b0..05dd827a 100644 +--- a/ecdsa-keygen.c ++++ b/ecdsa-keygen.c +@@ -59,4 +59,6 @@ ecdsa_generate_keypair (struct ecc_point *pub, + ecc_mod_random (&ecc->q, key->p, random_ctx, random, p); + ecc->mul_g (ecc, p, key->p, p + 3*ecc->p.size); + ecc->h_to_a (ecc, 0, pub->p, p, p + 3*ecc->p.size); ++ ++ TMP_CLEAR (p, itch); + } +diff --git a/ecdsa-sign.c b/ecdsa-sign.c +index e6fb3287..e6b960bf 100644 +--- a/ecdsa-sign.c ++++ b/ecdsa-sign.c +@@ -68,4 +68,6 @@ ecdsa_sign (const struct ecc_scalar *key, + mpz_limbs_finish (signature->s, size); + } + while (mpz_sgn (signature->r) == 0 || mpz_sgn (signature->s) == 0); ++ ++ TMP_CLEAR (k, size + ECC_ECDSA_SIGN_ITCH (size)); + } +diff --git a/ed25519-sha512-sign.c b/ed25519-sha512-sign.c +index 389a157e..52a46ea5 100644 +--- a/ed25519-sha512-sign.c ++++ b/ed25519-sha512-sign.c +@@ -38,6 +38,7 @@ + + #include "ecc-internal.h" + #include "sha2.h" ++#include + + void + ed25519_sha512_sign (const uint8_t *pub, +@@ -61,6 +62,7 @@ ed25519_sha512_sign (const uint8_t *pub, + length, msg, signature, scratch_out); + + gmp_free_limbs (scratch, itch); ++ explicit_bzero (digest, sizeof(digest)); + #undef k1 + #undef k2 + #undef scratch_out +diff --git a/ed448-shake256-sign.c b/ed448-shake256-sign.c +index c524593d..01abf457 100644 +--- a/ed448-shake256-sign.c ++++ b/ed448-shake256-sign.c +@@ -39,6 +39,7 @@ + #include "ecc-internal.h" + #include "eddsa-internal.h" + #include "sha3.h" ++#include + + void + ed448_shake256_sign (const uint8_t *pub, +@@ -63,6 +64,7 @@ ed448_shake256_sign (const uint8_t *pub, + length, msg, signature, scratch_out); + + gmp_free_limbs (scratch, itch); ++ explicit_bzero (digest, sizeof(digest)); + #undef k1 + #undef k2 + #undef scratch_out +diff --git a/gostdsa-sign.c b/gostdsa-sign.c +index 892c0742..a7e0c21d 100644 +--- a/gostdsa-sign.c ++++ b/gostdsa-sign.c +@@ -71,4 +71,6 @@ gostdsa_sign (const struct ecc_scalar *key, + mpz_limbs_finish (signature->s, size); + } + while (mpz_sgn (signature->r) == 0 || mpz_sgn (signature->s) == 0); ++ ++ TMP_CLEAR (k, size + ECC_GOSTDSA_SIGN_ITCH (size)); + } +diff --git a/hmac.c b/hmac.c +index ea356970..6a55551b 100644 +--- a/hmac.c ++++ b/hmac.c +@@ -53,6 +53,8 @@ hmac_set_key(void *outer, void *inner, void *state, + { + TMP_DECL(pad, uint8_t, NETTLE_MAX_HASH_BLOCK_SIZE); + TMP_ALLOC(pad, hash->block_size); ++ TMP_DECL(digest, uint8_t, NETTLE_MAX_HASH_DIGEST_SIZE); ++ TMP_ALLOC(digest, hash->digest_size); + + hash->init(outer); + hash->init(inner); +@@ -62,9 +64,6 @@ hmac_set_key(void *outer, void *inner, void *state, + /* Reduce key to the algorithm's hash size. Use the area pointed + * to by state for the temporary state. */ + +- TMP_DECL(digest, uint8_t, NETTLE_MAX_HASH_DIGEST_SIZE); +- TMP_ALLOC(digest, hash->digest_size); +- + hash->init(state); + hash->update(state, key_length, key); + hash->digest(state, hash->digest_size, digest); +@@ -86,6 +85,9 @@ hmac_set_key(void *outer, void *inner, void *state, + hash->update(inner, hash->block_size, pad); + + memcpy(state, inner, hash->context_size); ++ ++ TMP_CLEAR(pad, hash->block_size); ++ TMP_CLEAR(digest, hash->digest_size); + } + + void +@@ -112,4 +114,6 @@ hmac_digest(const void *outer, const void *inner, void *state, + hash->digest(state, length, dst); + + memcpy(state, inner, hash->context_size); ++ ++ TMP_CLEAR(digest, hash->digest_size); + } +diff --git a/nettle-internal.h b/nettle-internal.h +index c41f3ee0..62b89e11 100644 +--- a/nettle-internal.h ++++ b/nettle-internal.h +@@ -76,6 +76,11 @@ + do { assert((size_t)(size) <= (sizeof(name))); } while (0) + #endif + ++#include /* explicit_bzero */ ++ ++#define TMP_CLEAR(name, size) (explicit_bzero (name, sizeof (*name) * (size))) ++#define TMP_CLEAR_ALIGN(name, size) (explicit_bzero (name, size)) ++ + /* Limits that apply to systems that don't have alloca */ + #define NETTLE_MAX_HASH_BLOCK_SIZE 144 /* For sha3_224*/ + #define NETTLE_MAX_HASH_DIGEST_SIZE 64 +diff --git a/pbkdf2.c b/pbkdf2.c +index 291d138a..a8ecba5b 100644 +--- a/pbkdf2.c ++++ b/pbkdf2.c +@@ -92,8 +92,11 @@ pbkdf2 (void *mac_ctx, + if (length <= digest_size) + { + memcpy (dst, T, length); +- return; ++ break; + } + memcpy (dst, T, digest_size); + } ++ ++ TMP_CLEAR (U, digest_size); ++ TMP_CLEAR (T, digest_size); + } +diff --git a/pss-mgf1.c b/pss-mgf1.c +index 3f5e204b..3644c642 100644 +--- a/pss-mgf1.c ++++ b/pss-mgf1.c +@@ -66,8 +66,11 @@ pss_mgf1(const void *seed, const struct nettle_hash *hash, + if (length <= hash->digest_size) + { + hash->digest(state, length, mask); +- return; ++ break; + } + hash->digest(state, hash->digest_size, mask); + } ++ ++ TMP_CLEAR(h, hash->digest_size); ++ TMP_CLEAR_ALIGN(state, hash->context_size); + } +diff --git a/pss.c b/pss.c +index d28e7b13..8106ebf2 100644 +--- a/pss.c ++++ b/pss.c +@@ -77,6 +77,7 @@ pss_encode_mgf1(mpz_t m, size_t bits, + if (key_size < hash->digest_size + salt_length + 2) + { + TMP_GMP_FREE(em); ++ TMP_CLEAR_ALIGN(state, hash->context_size); + return 0; + } + +@@ -111,6 +112,7 @@ pss_encode_mgf1(mpz_t m, size_t bits, + + nettle_mpz_set_str_256_u(m, key_size, em); + TMP_GMP_FREE(em); ++ TMP_CLEAR_ALIGN(state, hash->context_size); + return 1; + } + +@@ -194,5 +196,7 @@ pss_verify_mgf1(const mpz_t m, size_t bits, + ret = 1; + cleanup: + TMP_GMP_FREE(em); ++ TMP_CLEAR(h2, hash->digest_size); ++ TMP_CLEAR_ALIGN(state, hash->context_size); + return ret; + } +-- +2.41.0 + diff --git a/nettle-release-keyring.gpg b/nettle-release-keyring.gpg new file mode 100644 index 0000000..964fe5f --- /dev/null +++ b/nettle-release-keyring.gpg @@ -0,0 +1,486 @@ +-----BEGIN PGP PUBLIC KEY BLOCK----- + +mQFNBFDrIWMBCgCyyYoTAD/aL6Yl90eSJ1xuFpODTcwyRZsNSUZKSmKwnqXo9LgS +2B00yVZ2nO2OrSmWPiYikTciitv04bAqFaggSstx6hlni6n3h2PL0jXpf9EI6qOO +oKwi2IVtbBnJAhWpfRcAce6WEqvnav6KjuBM3lr8/5GzDV8tm6+X/G/paTnBqTB9 +pBxrH7smB+iRjDt/6ykWkbYLd6uBKzIkAp4HqAZb/aZMvxI28PeWGjZJQYq2nVPf +LroM6Ub/sNlXpv/bmHJusFQjUL368njhZD1+aVLCUfBCCDzvZc3EYt3wBkbmuCiA +xOb9ramHgiVkNENtzXR+sbQHtKRQv/jllY1qxROM2/rWmL+HohdxL5E0VPple2bg +U/zqX0Hg2byb8FbpzPJO5PnBD+1PME3Uirsly4N7XT80OvhXlYe4t+9X0QARAQAB +tCROaWVscyBNw7ZsbGVyIDxuaXNzZUBseXNhdG9yLmxpdS5zZT6IRgQQEQIABgUC +UO+4OwAKCRCRWsxFqPTC/VJ2AJ476S2rmYBjmVkDeUD8jqEAoiTazgCfQvldeNG9 +0cjFjkdJU2ZGfKIi32eIRgQQEQgABgUCUu+lQgAKCRCBP+g6dJdIJHrvAJ9X3GhH +fPtxjAJNuKmEbELLIF9yGACg0B7IAguP2w4FzvEBB+nwyg0YfR+IRgQTEQgABgUC +UvTduAAKCRCl5JmPpsyF+2FPAJ94GdVamtip6qD4VGP5gmsvMOpTbQCghDnaG7dI +00yYHiArAVuHqs9W9umIXgQQEQgABgUCUu66wAAKCRAyCarWkNOa0bTTAQChJitT +Ico+A99v3AqjSGnQavQT7+6XBZWcRJIf3zY3VgD/aDsmZwtWmVTZIwNzwol37ERj +RkXbxAyBXwru9v/ivzCIXgQQEQgABgUCUu9apAAKCRACWrAQaxfqHr0QAP4jFQbV +jzSY1+3b7ujNkA9p+4fXQn+BiwLy10rFU7BtCQD/UwWRwl/uw49eX7+aKWYJXt5u +Z0hhFMP2KMWiOz/KXtKIXgQQEQgABgUCUvQNRAAKCRDJ8LaXneN+1AXXAP9adCRf +DSWgN8p62AGtCOjTHNtuxZayYHD4OcrOoenN4AD/f3fnuR/uULcYvdkDuHXDP7ob +5dDuqMgTNqBaa5nJUe+IZQQTEQoAJQUCUwDn+x4aaHR0cDovL3d3dy5nb3RoZ29v +c2UubmV0L3BncC8ACgkQtHXiB7q1gikVmgCglC90w/dZfmB//263FOW/I18/t0MA +oN6Hx7doF3NvvCDpeCAqQPRFPitUiGUEExEKACUFAlMA6AYeGmh0dHA6Ly93d3cu +Z290aGdvb3NlLm5ldC9wZ3AvAAoJEJSP1qDhD1Auyx4AoP5JnZHPOCnHAO6jGMMl +4xfOoABUAJ0eYzrHNG/Ei0yzyyGdKdgiTW/GK4kBHAQQAQIABgUCUu693QAKCRAx +fqbGmBxOSD+LCACSqW/pB1BEaEPkxmJhkZgex06SqOAJza2Q5exmN/Nk1Wh/ZbTL +DAgjq3zntQVqttrSxDCItOWB20vsnBS5xaQb7TwxV4oLrhxmqUktDXyWTrBE3qpL +rq20tx5L0l9HbClvnYkxeBlojGQt5m2KwdKUnTWE5GrxvkDTRLonKnL3nXDvlTOJ +QV3Dez6wlEqBYYj/fntw6eH8k1Gz7zYW2hrNZ7uzhlI4FYEThQv8mXrxp1TW8E0q +l9ub7Cv+X0ljbhC7cXIcOcvtGesXduYACwGgHcUeiR85QdyFVqoMrMSlUqaxEbP4 +1Tt0p4KDy5slR0OIoSJ0ayzFCJjWxZ/sYRj9iQEcBBABAgAGBQJS8m+gAAoJEEH6 +0dUhuUq5UZoH/3e+Xtg1CTT6P5XndYxT/E5WKUSBLZt/7YRzPe5WY7gov32hXk7e +7ThGGamQcvUpfcFaGqW86x+Pi+7e7e1A2VUaGRyvoCRZslOs5/PsGu6/8QXiQjJW ++EUtEiiq+xh/ci++bHDIgo1/QzVAlpODwYlEgEvbV+N97Tcol0Ikp+4HWjNdXyn6 +0JYPcHmLE6cEAOuuKgOr0euWa6/1SINEiTtAhqim0zsWMzb/ry2rtZzkE0tisDq/ +fDDxAmqRuTrs51yRpmHobnZBMT1cDDuNplXqh9Q5YWRncesDSQEYyiy1Ffaq4TvA +6webey/0Ns56YkX45evtMJrTWLaKLzTIzgKJARwEEAECAAYFAlfvfZ8ACgkQcdjx +/zaMZne2TggAnYDb1+f5WiTit1fJIyuT0nCdFEjKc25a7qGpkXMfCW/6k0Jxp7Zp +0ayDgOikLixF6tnfkET9chXOCqKK2av81QQAsWCCJUTcmeuUlqJXuOOITmjbSAda +pbDWAF6Bsi25ofBSLvJPJ5R3DtoVdq0sp0KaL0ruBGJlbjhctAIvhO49BobfYOV7 +7MFWqo4ZJGBIaNmy/rQO/aZ0tPA5Nw+2sI7N2dR2K8Wf9lrl8UijCcZJdZpP8mIC +yvc2xCECWtBrB7kbgBQ9wHi4luPi1Efm/4ug+P6YN2XrpmGb5gfZ87z1Pzwq60lo +hDLRn3lfRICiCqvvvjR/Je5zSmzFtB4maYkBHAQSAQoABgUCUvfMxwAKCRDf0n/3 +9qDzEhUqB/9JfDebTUKfH+20MjOovr8qVlTirtOlWneR3tBfH6kODfWhFXZInCL+ +GEhVZJ2vNNT4BTEHdvjfavag5hFTEgDJWUIeDv+Cvp67BSfo2s55ONQN2rRy3RJ0 +l3XD1w52MOlrHqAzkfCqC37HlwBBgWk5Xc5/qTdLuyhyu+UfqEhnmxiJL4C9hoxg +o8h95UQO5qWD8zPSLZqELsmckezIAgy0DMqL31U+LXeQ8w/Ua2XoJFkDMOWybYPI +7CwbBvA/J9UiNInG98ds3jN5wWO+sYoMV6LoYIEivWGoCqn/tXs+PM6zYVLrk4yK +Gdpz4n/X/MZ3RTkaZNxuHfZbnP3Br0V4iQEgBBABAgAKBQJXXJ+ZAwUCeAAKCRDw +S2N2CHpewYvXB/4ywSL4+7Fakso++EzrYAbi1KP0f1GiTBA4SjdCR5IC01/+fNg3 +aqMWcebY5Oyeh1gUAnfF0tyPVFid4ZKbUbIMDCJK9hd4TYyp5qJ6t2T6hNT69BkN +cU+BHU9uA8VYODUMNnvzrrltPvQm8av0qcuBcB70+zJdZsgELDS2AODV2PLkzZgX +dH2ROpNLKMdtfM1k3JKewvEQOPK0OlhAYNcsu95mHeeisv/HHhNZiUlgqNUdpp4N +sGYosMRDnXTyWaUS3Is6a5xIRUWllBV50imMd+AoEoIbwNloDeh9cmv0O7+rYCb0 ++jEKkzd2a4WEJDLy49iQf8pfiwKZVceTuuSGiQF+BBMBAgAoBQJQ6yFjAhsDBQkS +zAMABgsJCAcDAgYVCAIJCgsEFgIDAQIeAQIXgAAKCRDzWZ/4KMZymKptCfwOG+kx +Oaj1olo9JK4lAUpqcrN6lM/d4Eehbe8Nn+O/Cibs20mnb8p9Lj3Jz9ugBkzkKu+U +YbYAiYHWG70aHE45TIfYzl+fOB8fIYv0Eq6QkmDJ4+nUCm7tGECBY2cfID3mL8xv +Ld+BQjRM2dPUUGasv7pEcjffS5MBLj+BVquPOm+3aYL+xgvkh5WXDHKOlVvBSzk+ +yjGpR9OnJnYRT6xUmACBw7twrGIQvJJyEFffV856PynkpBMJKrWAznc9MeB1nTsB +65dktl+hMdat0EV3KvOBGEMB89lhBmj0e1Xe+j+SdcIhzeq8DuHsX4LEck73OjQ0 +D0xapqAs8Lj74mJOiTZx6/ROU5C0+5QJxEhITC8siBv+1MKYLuQu9SDGk6TqX/Ax +FPTVP87sYB6DGigWWnmCrNEturNc8BjpUF9E/YkCHAQQAQIABgUCUu+hmgAKCRB4 +kQzEU7yhDhRpD/sGaJwM5uV6ngy53C4emTE5NbkWF8e+UhG2GhdXrEhIgR1HkPY9 +X+tZwTZH4xrbzVax1ejeNzg3QM/2nCvxQoijzqaklCu4YKYut/cJmRUzMywRrSdx +42ty4rKctLSYlwM4CsQzFbqpJWP4szW/Iipnj876zT5GMRvDGyPLrdAG1YYiZBFn +1gzQCVbH2+gFokhWvyRTubt7c2X9Nw0TX/o+1yB5VjRBEwGWgoE56VrLosomGZ+I +EdRdWr5Ax6iProEfrD0nY/zRB+AQREPQIaoYVJA1MJOgyZLgSCJR6Eb5mERr7aIy +0jQ6QKbDPvaOJq/UxIPgDh2bZPmpMu8MpsEuZgh4yo/GSsWBiJLz8632j8ReGYCa +Art44jGkGfv9qIAnEYmnxsJ6UZ592PLdhEYh2rgDJ7fMwcZ8A4AowwChy2ezRx+v +Q/dUlkNq6EF1t/qEzeY2IJoeNzV/9Wpou1M98cnIAh2kuameFIpYarAmbpsPs0V3 +IBhwqLthaRXQkRas/yO+21TMGIXKTCUcZwxpdceoGu+IZvaGdXhem/GBdU/ls2Oj +JSVt5CwioyjxrZsRS3cSK6xdhHCWiCOFH8+LSLc8663pySqIOEH1lELRVhAzbZV/ +XvAmmRcQbzxwemYxuejlonFnYLFhNRyoxJzkCOTEf8Wtj2GtjYVWxBXy44kCHAQQ +AQIABgUCUvCdhgAKCRDbPna3qIpeL4ZAEAC4R6NIoHNwnhQPbUB6Wsk6bmguSoLj +MSjNgJJPqSrYc3E9g4F4oJtLkZDX6drp8DuYyitWUn7MeAXHyXjnHViCW/DOq++0 +Oo21k1jUjvH5OYo4KMC735sVDKYtMzECMiVasnVYEONWW1Gt/P7ozYdErNyu6drE +/ocUPklIJ+RKTS/EZBgqiqX4jWES71TGPH8oT8UtVzris5KG+/0XPby51uV2GOKO +p0xeCAld0+cnOVqZDb8ptiyo7jnj3Z72548It2G/1/jrJ6ksvkiZojtYvtteOouj +6VgPuOSg4cKQj4ayrupIA+/t4/pK2lfJg5PLDtNd7Z52PxLb3J2URPyNk89UMlUE +cjMgvZujBh3zI7cl3UU4fHD9QrQvnegFysavck+bUTMbU6NWKAWxTqFWU1xHIfD4 +wnoF8bDAUGCsLCwfyOTs+sZZD5CJVp6XgTMrcg4gnhTFnWeBLnFiQBxrPwdOYrRe +ROv5peODkzjp3iPMdiOTHQfAcbs/jZbpuzTyr4gw5U1oWfpyAEC/rui91J0UB18i +571S4Ai9SuF4t8byqpo6fxhaO/661HnDcomD2HRrlG+jfhKmwwF2NOxz38jDDblh +UnNmBVU/EYXdP9dfKRn3w3iLUinsNIbgGIEj6T+rVi4gEauNMxqdaiqvV7uc6trJ +B1eBrOwG9Bo6zIkCHAQQAQIABgUCUvPnPgAKCRD2iB6QiKkfo7i5EAC2KwD94lhY +GyfgXSb1SOx/I7jYFPNHIZbQB8GefAtmjCJF4aYpD7bGw2Ferc+zpseiblEQEmU9 +Ut3PIIOgqdq+/dXRbmQ5zAkKExvvles2uZ53ISSRz+S3qK0PRydFkrAQ7yYwAKBo +jGLrEImpVl8BZNO2SiY2xArYVvUI1sIG8zvamuvr764Kh36IklS8CfQvzOHA3mYI +mwAKX1i0p9VhVoVSMk1mv/NO+p5H79ZmI53YsBJwyWQ0xMu8rGjoKs8YsQtK5SbK +w/U6+MTW1DBHcvG4C7pCuGtRghCpFQQGkPdXw3BsrF9daFCOcv6z0klsRSZbjlvA +3qPizhXtWY/RR0UoYzdxfisW+RcUF5C1uijrTqHWP4z6iyKY0f86rZmvxMeiH643 +sDqiogunMw/DlY1pK3hDMgg5Ce1u0CNxAZcBJkbN0blkOCOlnnyW+hNyNBRxc1Tr +RsjhAQHpRSE4j3hhesKv8+JnmnMkHmqKnoE7csFuued4WcejbjYE3ynEglDLC3jd +8K+Cg1yUc1TzJaPiYDmNOK1jCJwy5M1osQ2c0oUVdSlmXwSa9jU6NcVuMoj9FlCB +7/zIZemkZ5wlTRPuFLRtnijmyjW+V8hKl9ulfnZoh/wVzkxtUnGEizy9sK9wj58j +VHBW0RyeKIN4Xd7Q/QD0zZEs5Z//ZPAXJokCHAQQAQIABgUCUvUCSQAKCRCrocsw +6Zejr4XxD/9azzHC1w9X5PkYyyXL43+87YIkElhri4g8le+q9WknXU4JWid4n4gN +luU9bsXK31f/+eI2BVyKxBXNl/CTb73Sq8Tqt0Ynp8C0xcuRIib8dH/x2Mb92PeY +n8XHSgdnMsjFZc/VNXQGE12+Qd103Zy5tjqsiKSPXoDNS28Zere/sRdT1YHnHMoF +JwPM+SuTDy+CXN7RVozT8gnLBHegPEisZMq7bDx/wT4UerBGWGPGKMAkFoRM3W3b +U5/xFsWD8A7Uv1IdiB6E2kqfzdlStrTXAi3GwbVCZhZYsqB/UmCQBxwaX9/9yRek +2M/25qy79aZtrpGpMEJc+wiBvEdrQzWq9C2efDrVeJM6Q3xPKJWTnPvbuKbXjzXU +phbocZ5+M+ih589Kji/eUtlW+U2jOdRFyNJjPVijS8FU3+6UNxmT8F7gHIiVpNt9 +YvqD9+PsVQc0f5QsROGYZ31dGTyee7UG8XdKf2eBYV5i5Nz/z9X0peascBP3zJy1 +Tg6rfD3hMggIL54pL8ShDsJOOWXdoqwzFdsRjqAq/TNeYQBZFAJMR6/1vlRsJ6b3 +jacBDBA8EQ0NYOZL5CMFtxU4HD81/Tt2389P25klOSl8Iswl2tXw885hvG/pDWGH +LMTeLX7n+PoxkemViyMtg+Cn7NrrVGVfVpibjhWJtVI+g8a8vPvpM4kCHAQQAQIA +BgUCUv+BFwAKCRDmbmdsCUlMFErBEACWzWArLyyNdbvzWJFUDD6Dys4fUaqJ7tPC +90CQ/nli8Q4SC8wJjnjiCWbjwdHpRRlFyyP061VfHb40OZnAYLi1WwocXj5XxQ1X +tQlSLGSSYMKzWw6gKJGdtc2qhkTfoyINtekKWo1+ODx/SPfhsQIzXEZO9ymNy6Ss +UcEdcO4ADigHovE7+aBj1It8yw1uNdtTNEVu1nIrUp4aiUsQrx8LDs+DTy9/LtBa +gJDBcWrjuIuQNVX9g8Ul9l+3HvavAJceyzDycARFu9cR3jO1oDtJm8dnq97OM8Z4 +TKGTnmsl+zGBY17oIob773qVSWJNB5yMGHoMUOiItpRlaFlZKVd2AwT8ZhsX0yjE +z5sAgSMZmh/Jhueoz3hwub5OnB0YdfdKgeGqpEmbyqL8Oqgq9aZ4lKNmk4uMJiaf +tJzMTQkISoQ0b0Wdxav8ZiHMehr45mUJQSD/rLTzE/WrGjvJ3xDje2oyCkoWups3 +WTnCPDdYkae5V1AM5+4vz+V4Mc/0Owxvxr5LMhBj9QaZ7Q4mrZ6p7Sh3GDnXEB26 +N15s9rHNMgkkMSoTke4/WphJIlguua5i4WOUw+wOV6sD52R0XUMEdXa6NBvBwAhW +kBxgtsVdkjK8EGRR9wuJJIbLc3OjeJs/Rp6od+L0gEx17F9r8tuDUs9IV9uI7Lx7 +fJG9uYziTokCHAQQAQIABgUCUv/N6QAKCRDJbos5REp+x/SXEAC6uhJVKk1qd/L1 +A9p/xH8kp28b5bokWHO5Im1Bfu7IQeJcD/SOs0hyUvAB1Of1boHJBehtcOikFQLP +Y9d6airkAQ3I4/BX+ezEeZJdzbfVJDckESXHYkF633TtGE+n+udVr03JO+ZJic4U +uIYOJ4HRdiUTnN5dkIL61uioPHW5GknayRSAfk7NhOyo5Vot9YauSe3woAdhzKyd +7oWNKhuipCyObHxE3ICpYWrOdDy0mQh9livf3ebp3ltpXKASdd2+F3VEIUPdO11v +K5J/rFWoD7Ume4V7Hz3KUJ8wQuSXXS1prcsW6uFC8A3BcFIyq2c+vx4pj4KuicBW +rNIuZiYf9YKRYSEdc/Kwr0PE3qkxBogvsXcWCYMC9mWMKVhpFSyzEgaDoXg178Ab +oVhQQdLN5rtw/pJ4nLHEw8u3+akqc09sk6mqf9GPSlaXURSGzl8Af+yrXlYJ2z26 +Is8pdny5VDoBOlAaDNcbDtaPUUJxJSjXbPU0guhTa6EvLNvANhLUJ1hW2y1C9Kyq +eeeFkI8ttPbHGD+/d8T3fMNBBE6vMxBFEw/6qZv4/3zLV5gjCGeakXpwmOP7makK +ofqtXJhM8MVL57AAuDVlkh5jKWq1tdC63bVnSo8+2fcpVaMB8k86mMNq9UhFYGkC +SAZsjjwFHiy/XzHtTR9/Uhe/PMt0o4kCHAQQAQIABgUCUwp7CgAKCRAJllHuZTow +Chh4EAC38nWflaXvyRp4KjW0uv1LZ3g06EX9GLj10JQ4Ip47q6//XzPNYcw414+v +Z4vyh1Sq63dQeL7Zn3Y/rf31jk4pk15sLAkWYKZYhM9MS+1RZiLiHFznDaviSVJF +LOAnxTaces5lfiqqmSw1i7KuyKSgwpP/NZN+BZhRRCSlV7xKUnCI6LDJ+JpREtFn +UQ/9BfV0GTetTsz/y/eTFDBmX5LCeseD2qWf3ejDlawmhaG5fNgPix0mg0xEInlo +2aJTX/t1Tjqr6zOg1AUVTDLrR3fOhIHIQLTSxOg3K+lm1l9oSHDQfUK3rXLXkqqh +fcgD2WttwCNbPNeTN+FambwMtG01Yp0gxbee50WNmvsKeljdPh+ShcvUtkSx3NTJ +YstonOhOI7XL4LBUPUJapDc8HjnINxgqzIueHS/8xNBowHMK/R9Dm8FLizkl8s58 +5nqTdU8nuY+P0PdeOZETkkhCZHHWIMibTDwb9AtapmIXepMXPq7W0vt3zBcPQVML +SnQelEDKkqPno4Q2RmPKj7z/6Uckt0nGWVsJupd0CclJxs7GcaGoANfyPeVwSA89 +DJgl63zOn+4RgH5TPFMVd8Aa7sUuPSdUcKUKxvCQx3dyaypOnBdryF0OwlngljTV +WZEuFVxWztxy7y3b6ZcQDo54OCMO+5ALCEnjxxxDpeS/ftNd8IkCHAQQAQIABgUC +UwuSNwAKCRCC1SR7gb/ccA/ND/4xt40bAFSKIA5Bc8MxXj2JCeELVoisITndIhj4 +E+7FU52+9yQVzPZnxKYND1sHL/2uMZ153KKGJGAW6fkf4ldQStgvIGlPJMnCt5XU +gLE6aEq4HsH+ud9YH7e2UpkQKt2P0pYYQtf9bhGHIw708JtJ39vUV2stdya2o7Em +3SWensfyEtDIPAbvx1A/bIFn2byyCe+4V3UKe76Xa7YiO3alxn2PzAVlm61woctZ +VOkd/kxo2KG5/jw3kclgz+z64wUQD3ahhDt2P6MhbuAMZye4Oc5LCy6YWUgGB3CY +kHv4zFvfJdobCKV9uSDBBJRHANAuxhTvREXnzF7IvQFjXUffb8Kn4oQ5h+Iy2Q6i +7dGyVDBPxcB7euH9kHtsCIbAXzG/1/pxp9+EvHL/k1vU/fye2YJb5h4jBqlbuGqQ +iecb3rSAMGDo0W/Uo2jkFRERoMPxUNHdZ2MjEYYTN0ksnB+GStludBOSouNXzWqu +T4nzhtP4zAICHC6j90Po9iL79l54AVDvzbKeNMvYupZz9irjrFuyYCBKWP+Z2vC1 +Q5uQCb+0MGLSnkLGyLb+G1evRcA9GKCeReGeLKGf642iQxDvxMW+Cs0cBNNvjMdw +U5kxTHEXyj/appSDwW/VlWXdkFYE2zw4r1kNfIksq20b9WIzQGCDnctH8TGOJAML +UfTd34kCHAQQAQIABgUCU2ePJgAKCRCLWICvrAIYxyaeD/9fevRzlZBfU9PZ0qfk +SvAmQ2NbBRXKllwiwE9u1YXd58g1S0duaRDNNzwzqSuvFP33nAjFpnZ4oWpBzX7t +xunx4ROOMAFYoqK5cavYcD9WbuOO1p+FJXb7GfOdnkOwakE+EfJSMjuOu+E7jaia +aCy0SkGjSI8xEcqqeh5nKO9+K4IRDWhQHIDu7I8nQ6GwwBCZyw5X/D4fHY5JMbKR +IpaPMLuQZblzXE5qZq/Av7iWGao8Audvczh5kpAQGVctbNV57NWXLwO/4eicOrzC +OxaJJrIjeeYoKY5KNQlAxE9PEisZXbU4Z5ccYuoJI7abQ5+eyWUimrCAzehM3om1 +DzFvbATj828fsvrU3oQB16jraPZ/9USfTsvVVzHxDj7+XK9wwE0rgf9/Qy0eVnV8 +fOSI3I+spu6im2K3QMbCNqHMwl0mDg5BOb04DtiS2pHZ5xzEeaR1sHcZwYaTWDaD +SDkq+l6JGJmXh+JR/+KwTnDAlWWqBtrfmmCE9ZdcZCtFEUBa/lO/iC1HRt+NT/g9 +BH+w42GGWbZiXeS7KdNaWLbA2215hIWnrrFkAL8VJiCSKK/Ix7Es7d/Wa/VHWvWU +RZrYzCcJqwq77OE57sRx6FMhJsyvnhXt4SRf/XTTbP64TfEKhutn4tfTbrHoZ0GC +Gi7dir4+jt/vVpniaP2nk2ULuokCHAQQAQIABgUCU2ePNQAKCRBIBLS8P1Xu+zjI +EACy1yVkGdJNFe0csPfW+SqW+D1GcyqQPs7vvnoMeKcaxepHTGtaF8leRme96FiF +tazbmInmKJE1bPqHUPR5Bkar9slKcU1Gmx54/4LPBxCBE0hOC/uS05Wk3W5NpEcY +2DbVgvA0HQX3BL5YAt5tHRXzafS/qhYYCpoNUCYTNKTOCAEJNVFybHFKPG3hxepv +4tBX3O+oFiGxIg8xkgCs+OLPiffbWkGKlDDe5G3sloEfLXBn1JhxruDK0RrY3rN4 +PBM8WNBuhzfR+UW/HEK+tPUAQYn61G3AkFZHwGzICKfhtMtyXP1T3W8NIrJpGWvQ +MP3V39czra69NvbKrrTrfFisKZoqeBsngdhUo1KV76/067viTKc69xfQ/uF4ccZ9 +b6dlxY6xASDKtIyFzb7bj32ISggYDHNo94ZZKFhIEKLSQBdTv7fShQsJfNJdVVod +JQEt6s0jipEEoxSjymCviw/PHysY3CNsg3piCQgKiLovGzss+Oc6+t4uAXKhByMq +5OrWJmiphldymURPkiRX1CHo4D4BaLRJGJjFZu3U2XHcY5oCWQIJQZTSOf2SLx/D +buCmw/Hf+jA8btK7p3ph6exIXxG1Dj42YFKYgZuGIaPbWQuYH7JLuSj5V614pBdI +e6jU3Ho+E+fLbrcoPATHxvio1AKlT9iDpSGTspYlO8fdkYkCHAQQAQIABgUCU4oC +GQAKCRBDAE0JZl/Vc3kZEACOe2xr/qvzA3B1F6RjTS6TSgVAm+dxWjA6EHBFAor0 +Dq6U0/Ke31m+XOFrknA3AOQKecq3sTUTWrLn2b/s78/1xmp3nRLqWt2mnTiPtYVl +pvj8GVtGcOwgodOub8VKmMTVikztSv3fsa7q7j9aM509n1SNjNZnZG4uQ+7FbGhy +VZFUbPxb2PYDTbP7AFjj9NWqcnVZzQ1TGrhiIV5G947jLkOSngPry8WR1zZO8Tfp +OQsM7C8JDasvh0gh92Ces51YB0JnHeGmorRPGQbUvUU4ebZwQB4WNH5wOaXAkSol +kHzKOF32tPGNu9g6QLzpu7UR8q7atvrYSieVo/JuqAZsy6e4hw1HktSyajtSw2cQ +cuRDFO2VTa8oO+ulMy1fjuUki7jn/gjyYPwAQJyH1w1UBlHWKvDtQWsWkq92xWJ8 +Yn1lOS3xiIXBAYPHxnvXZOBulh7xhc2h4nlvavPkOuDRxgINBoS3ja4MIKXII698 +0w9t8K/8j/QNJxtsZ/q2Hha1gm6pq/wOOFxKLUD1DZvLMs2Eld8TQBV/Gy730PWm +DAk/PGNbxWQI9nPbdwXxp+WM7W0EOzSxaXjAV5WApc1otBr3ZK+kfGCgKa1dCZWG +ZVMnPX4sF1z62UDA/fLWyMhNKm49sYi00q7Q8t2fmlP2WpfRgAsMHPU+/eHX7uY8 +f4kCHAQQAQgABgUCUu7WsgAKCRCL6HmwKHMeHM/tD/9KyMI9QbgAjzqenBjyCYFa +lHJTYTRYYRserhEp1xBkpwkMoTxm4iuNkVZgRuLSj0N1GSfEr8OGdwN6b9hRuKRN +1rfTVbrZg19M4n6hr369fNuMWOnhGY9wZm0gML1JNefn8p2sY+q1QDhBhbC/hZF4 +b1HiGMKhczZq9Bvo0XRzUjdHvW55rtxb0grZxUHfo8M4+vbvxwqxpsgdZNjh8TZ4 +H3ZqH1ClYBsdP7IBTFhFuUtLqyUdi3mvVhSzR6ioIqjXtJBZnzchoXY4/KsJdBtO +Wn3YAbeZoNws0mTUmgIheLUUDqj/PcLpLnZxbdJ2u8VhJjbX4TQ7uhUN+MfU9VrY +pO08waghSbdsrCkONjDd0E/FG9GWBH52AIUlt2tMw6fHZOlQdGBoxzS4RFB8SCFu +nVW25DJyK/6R4UP4kDE4qv6Grt7ck0lRYj38KtnplrjWfYRTfJssf0acfzuw/wFJ +gd/PNrAXewkROpJwzuFCKpbamj0sEmLfplnCxHilSFhVWKr4rY1auic5iF2GjXe/ +fU2Vxvd6l5y9NbTD6msjc6kWHMS3XWtGgE1PivUUrVmxH1DXMjFYT2HOy3dzj8th +s/ima2NQcYTTYvBuJlHCprDEVgLSWFJD2kERNBeG8WPayA+QKYe6jT2TeA+hQ9I2 +g8KMul/RAFazXEDhqHa9UokCHAQQAQgABgUCUu+nPQAKCRA4dwaiaZjlxKGgD/9J +cFkHJAAuP3MyVvzz5afT5IUHTGmeDo+KqoLjBz9iAUZeWQGZG43EHnd4HqwdnN57 +uFuoPkFjODAjqECHwuU8o5am7GWKZs1zAQgt9nXM/hGRJ4zLRHZv3FbBnhFuP7TT +CbvS0zoxL5OhUhT7wRnmtNql76F3RqC7jA0UxeFVW8BF46Cwo3X1baHMEX8xQ/bh +4g6VW2Y8ErOON+BpNmSPGs7OYrsopS9uO6QanOQk3VWhTVZryxZLtYKoWBzZ2Rzz ++aQdct0jOouN+ZL7V3Xdrp0TjoaAqYx9n98gXgq94gR4j1OtmE7oPLrGtvs1mOZc +otW1GVedaMlwVGXiJbyovJgsnAW7xhsvp0++SOIouviqBjEccuVKmVk+CWNW2Het +CTG9Xg+Npqi959hHaDRMiePPWlkIOr1xclydDFr+yo802NA47V4v4Oc1pcd9tr19 +TBMgECrFChx7nMllu47/DipCTnBnysjOf2jWoTXdUNTavzPQtryEmiOM2jt/Yw/R +SzylnnI6ZAS/LrcUEpFWZzfz5+9bft2hv1qs+PTjneTj2KEM++gmtn21l+k090On +iYuhbWGdm70GuU5q83/znKr16gjYBNcnt5Td/j9nz2i1HClTCdUPdYkjZe6P80+i +7fvsAPkrCnJDS3mkFD+ZiSLSz6ogdrOFw+OF1e+rvIkCHAQQAQgABgUCUvAzqgAK +CRB9Ks2vRlPPKDhrEADArFcIXLsYbp3bmllxtyAmqXntuSt+LAwBJZ8HrLuxmtkD +HWD7ByWf9rsAzV35ZsQSmzIXow5dZi66fEZ9MOHTRYAnEaNb4pkJcQbGfI8iKMCv +haffp1rPcj4A/kDcufMSWzGpopqcOF6i/o/JPnpBpExoUKFaBwS4tFr2WUtM0kjT +aOf8AS9o4xCK9+n7NiQqkHcZKMlmSTrU7e/YK4osaJOaBTOCVpLSuherUY0yXMa6 +vZM60JBLVu+cApnJRKGX0m9UvFDh+9d06sPywCki2Z3G6pIZo+TpbshdTgYeda+o +EQXMJS6acd6O6cZMrqKTnyNWRIOPi0aM3e3Wwa/bcwITLsgTfmSE9BDmCj6jByqZ +JPKZcXINfMYT4g3pqPHDTae0jLrZTOk15UvTqzKh2tmQmE3Jw2nHksuTCzgd4CZC +Qwtg1ZSGV+up856L8fR74Q8Q2MhwH9BNJe+ET/vyyREdWLKln4rL8+nve5ucSU9P +tCkx3pFbMkwterqV5uHFB4VvtbIJr+0Zvzh3JFlyBD/oYAPXx2oEGriL6Sfh7O4B +iybQLODZeaIWJ6AcOPwuH1jlX19TXLM4BfU8R4WDbQkGxmXiiZfEjrh7vighzxty +3J389ud+visUc4UuNbVNSV1FYr9AYAgM2bKNa0UN806zEu6grzxQ9vG35rHy/YkC +HAQQAQgABgUCUvIskwAKCRBqbNW3ZWMtOj+9D/sFICkyrr33v0QlbFKFY7543Ikd +nAMJo/XHVMOfeVzezgDR5/mpjrb8wbfLXEpYDqgV6UmLQzBwJlOGtHOyU+uzUmmZ +WWsBQh7EJDREW5OC21XIHbGkIwF4i5iYXJFF0KL403eYGOkphvGg4HPS5D70IIZl +ZAhYe7/bU72A3Nl41U/OZ14w4uDw6EXo+CraNkno6AzmRg/jKPMbrvr6C0k9rwpH +VDoLpppYEY7iD+gcI1FakSFbsUxrKmlYJgDSHsCDj/IYoizBx7W7acykxHUPwzGm +MxwPsNYHnMHIyc357erpbDo4zweKK489gJ94M7pvA9Te0x1PEi+AtgRc23kqIiXx +THycRzJJ5GbpeVoTzbIuQfVGP4O1/iOIhJ8jOSCbeZIK8RU1n8lDt2TSRZRL1n3a +kczsy7BAJAICGBM9R5o1yO5Qi78lOr6D9FWGCCHHyZ0508wypy5luZUTps5ZNy39 +6Rk3y4/9Vqx/8YQ3pTQTC2dqDXbP9IOPPAs3zA+fPGqZEKCyUcjD7cxfinH3vQAu +xOdo6ZBppF5QeTTL+UyotRetdeq5p8QfJzZ2dvbxVOzOj6g2tA6iiyw2LtaH6ByE +Go8MLZhDLiAaHLuJF3S/TOg3B70q44EjVqbWLZVRbNdeLjCP+M5hsRSLHXrAfww2 +qadD0049UHCDXZ86Q4kCHAQQAQoABgUCUu56pAAKCRB0gmVeMH40UhT8D/438F8+ +5u/7HhWs1f95hKMeoOr5Nu0UTh+hTlTEw/752sHdk5XObKsM9T/1nsvdQAra3hYd +qN6MC7oXMM9rD1/9zSpP1BjPLUmULsZEB8oZ/HbUiNxsKp8PWA3MmTIbeaxX/aDl +xR+uo8d1hcRLcle2AYQPgEKG1KRhdYpH62TOjb/QFsP7rxVhOQY99EPG2BzRcMUB +YmiUdoOoWvUF/fdiJ2Brif0/fqq8G7DRtq0FgICxIzahFmtnIv7dOEgtWrAxAqmn +ow5HeXiBv8a+Kjyp0seNfNUQpyb00EHP/f2GpFeHiwg2GZRsrYUAkdrpPYAOC6Se +XgfoBSZn/9eIE/FQh9bSU+8kXmHoMehcfUqUk3SuwFqWknk2IZkPWj4Z11siIFxt +/tNbsoL10V3Hs+ZxeNoXgB90PLH7VmUVqJHBsvr3ha6bwUDXGf5NYA23NQ83k/ls +TenOT0+ZKWFQmyUdJLZ0HtQcjMe11iIk9D+lik2M7n2bvGN0+m41DUu19R4YiQg5 +53AeLgclmA/lfW/2isYGnRYgBpK4eCw0SV1xUwzVrbNpNOzJobTHhjLX4ZGdx5iU +wYn0FKloDRtCaDyeeG1pWJuy+wp9DKnpkTa9Rbf0bwX8PHWH+OO4TLKTr+xcOxXP +I593/hj6yh26+AeiDqrbZ5hUKCOLt7EwOr2i3IkCHAQQAQoABgUCUvATOAAKCRAG +BPSQykBVvB9CEACLITF17WyU8rXc8jzmDCiWWWgBOr5r8jJiF7CY6QwWrvTqNh2M +BI/N0Ej015oc0ijRv6fIvmvtLtcjDe3iemAZ7olTuHWvENGMX4QnVJ03cG1MDNiu +qZxQbDckHi5uAyUWHqhFDlVeCr3anA0qYE7R04iozYi0Way9yw7yhiNd1kQ8gDFs +PtQ/J3s2qtL3uklIGvr7XBBK5mATEKU7QQuLuog6pek7uC8lyOmFSN57Q9jpFBt3 +yGJEtjZDIJaUpCv1X+Yo9IXsV3i1+u8ZNemsTld1EZfNU4txLasJXL4Xsa7eYwdR +GFn1zjglJOS3lMYBhrlW/kdsMX4HvH1fJJInyc4X0ZQ92PIpzjbaTh261uuXuxbK +VEdcbvAsLhzoLdlt9lqbYrZ8CKDVOIxv8Ia3YvJSieoLydTj2enhq5pG8c4hZSIz +DqoX3WJ2Cw/9paOyBujLHtSYanjySlcI/URyta4IbEjMlBHsgf7KN3AquUPRRw1q +IvbsEMT5pgIShw40Mj3hsIqpSVA50MjbuViY6ilj0TTHwqCUA7UMFv53EJ9728yv +iJcYrs9o7fr3gvl8YCrNrmeUPJEPz5i21jthgufVdGKoA8uT0mzEuJRYein4KOh8 +Q2ImFb5B9clTI2R4sLHe/pBm5YHDK+53poMN1hvBy5tM32mIAZV0cwgRfIkCHAQQ +AQoABgUCUx6IhQAKCRA5J42oEJ5iRPQ1D/9nMB6+OLRV2mOg74R1B40e+k2P029e +39yLy4r9a0IhAPJRRjv/8HhjH61m6QD+Yjj96ZkLgbyVdvv3sqeMO2DoKjmOMgis +PLghprJQUfpoZPNTTCq4ULIxS82f7IxoyFC+GdqzpIIvpoRuZWqSlI4IxaGgjpHL +9O5+QS2wuMzMY42EJ8UEfcQItcy9g1Rzv7SB6eSRfFABudBYNgG1OmaANEnV72YO +PMtHu2yDNu/AMEkZQdBjfjUG5pp6Eg3iGzsUrX29s1K9xu5vIsDO54Iuhk+aeeN6 +jxu6lfeZFPFo46hSVLRQAJzL3ky8tK2KEr8bt22hiZlbcaQByMnLscwfW5yw9cY7 +G8yKxQ8EboIo0pghxic50/li1iJh5iW5Y3i/giTrpNVCGioqshOHR03Lggg30PI+ +yJ+cKOzsN+7wR5Eh2LiLlgzlcIbLHy3YCu2GzBQ4VzP++5PAiuU+VFLQ9iW5XVad +RjFPFCg/kU6GvvN6vdldgbZDhhS4lUSSAD1deZKL3DlwJYKuih0oltMD9kYu8Sp/ +p6BlZ4qIA42KamH+C6nWekQO7hTvk56tPwK6WxaRnVNgzjb1avXZBO1k/7c9VoiB +BruTsZ25SAG+FqY9EepemOAcRtvB5pLC0B4zKMntml12DYr0Pb5tf+JRRerbPh45 +Vj6nfJkH8luVWIkCHAQQAQoABgUCU3xTCgAKCRARRZNK4HrFtaruEACyykSlFW47 +Hrkrguzq62A6Wep1WtvCJDUDQLrQTtNGCj1oMcI1G53YaNCX36VFgURCL4rYJqgP +F2H/zD6yQrhEeLYJdcyYN031TI9tRPbag1Q3FFl1PP1hFBniEJ0iPkQlE9U/NdLH +3EOfbfO1AYbs+iAeHO70Co1QNpTQyql+3B5U9DdWt8uz3AFAYxwzzBuIK6Inpj6x +1HvKWTVGSsxeaIoSuJT/TSaqbnI3Ude4E0J0LtIgIxjU8s8UkU3RFNG31CGczENp +R47YN+toEE++mnobKbTa5GjSnkhDp5a/+vZjPCdjyBoUTe+2GDNPDyDF44kcaTv6 +oDdnNmAPQY8agGoIKp+zIC/wLszCd3zH9TKCLBu7N0G4Lb+PP/rfdI/TRPw1ut5B +Ylo7T4CNgiT+WftrTodR6XkyAx8L/oJ/soZrLyCMkgDzU4GSgXT1GXZSHLk6VjCV +K7puGqPlmMpWfwh3iaNMI5TizREE4FCxjzMM3N42TBx0xWBwKkisGy2PiqDEwAPs +OB0eZ7GkgkrAR68h/AWvEEQoNIgoxduEdWADXim9en/U404LBvQPmp04m9uclQes +w8AHK6cV6r3Exrc7CSRhVyPUyP7VC1Gsrj+468/E8SkYH8gQKAq1cZNnLTjGWlPb +9Jk1J+SfpEAO9dW+3q6pIwQKf1WCRReV2IkCHAQRAQIABgUCU5OwIgAKCRBgvLtP +XNf573NgD/9DNocnRe3VdfD2fn6csyYVhXQZBkILzFL1i3Nc+8K9qztMYtpjtDnW +J9vWYw4WMT+dpKkiLT64aHrTCUxOsqzoQniReNFfLDDZkk0Oa+z2qr/tDt3ZGClj +XpIaRSj2V6rf3BtjlATxMg1MSGOozr5qsfe/CxCil1JHKQSXvWoKnfeCp/H1sZM7 +VFYyiAOFXI0a5YYyHExY6NRBFVQo8HugcBFKElL4JlspdSvm6yC4CAMS7mE9X/Qo +IE0FZ0d9H+7xEwTEnZZGJ8He7iudIHIw2cTJvRGVrlHxJY+UiUJaM/A8R9aQmS4k +mnMwGVt5BVp37gJ7qHjhFRpbqd3KsY3NgMOCjFWlWWb56sGPkDdNE3KTOrv7wNy/ +TnN3NDCFoRXLEusIFKSn/jUAbJCIMtP0Z9jR9grtuA73kP8bDf9IJn/gigdYIs+u +0k677rgFQfdykduutc+tUi1H5M5OxApj9CpFF86KzJfC+VcBMV6jpj/JrV+WutBy +gIOMeUgz4iHBoo42pquEz4i/QRXbsoNRrpnQvAIINAN6gZCn028K6yHux+6YoSgd +eHw1xUn5gUFt2lWkO3XcMQT11eEey7ojzeUhnZeBPlnbVv6EJEO/5vHpjWn8C+1U +l4S3Q9VJThQKn1I02PKf21HX0cubshjXjAnjA7JOt3QsxILtGWXWxIkCHAQSAQIA +BgUCUveVrQAKCRBbnqFhZpDPlHxaEADK/1Yqdvt4fPh0bSuUoGLeBP78CAN05glt +bLUpr5X4wx50OI6JnejC98xhoLzg2z1pmjHJY4LeY8eR8xm7+mPiUOLe38CHG9Jl +NOjFBZ5fadSZGJL1B+h7QC4oG6qsepsoGyOvhSna9xM6vuPWjDD3EBxYflt1ac8u +Wt7xPTgK7FJqHobUBdUoViyaaOLQ634c3S4P4nfPnQeZS0gVe8iJYZRsL6O6STX9 +N+w2xzMPCgkuYILQOjWeGU35Er6lGa1NaZb2gWhnbo4uw72TEuodL4EY49se51C8 +TuKDdvVHRjN4Dg0PttWzZDHqtGU1Zq2vYfV7T2M0+dMVxHRiI/vLOXRk3GfXPLl3 +XWRMxMLH0aBe1dY+Dltr0SYr6d/nZKpGUKVG6XLsJSZjsHVo1BpRYoTJ2PolbTd7 +ncBI/dHlRf02zcMylvrAGGsaqK/dmXnO/bqZP6i/9hfqBnyXF3W7ODU8iOe8TVjx +zLhlbpE35VLQfyppk57A1DWGUfZ3dYo4V9DT8Gp2ZMqdjA3p8JhggMbfQcOFuan/ +wC5cOw5GeLTYjMLYelMmGVeP7TUiwWDUQdb/WzUqF7Zoo7vNb7wJeXbxSoWK9Jwq +IPr5W9y4ei7/IGkSnymQ0/561RUW27dNMa1p+8NWgOF9+qLTNyDIQB+wkRrRRcLe +X2fUOcgZhIkCHAQSAQIABgUCUyGvBgAKCRDQsPXjGtuBmowCD/0bZNpCWiHnzTrb +xrmuPjyyDRfAAWQSwTHuvNKWKUodlqMIRnNCuRHLxoYQDhrZXDP8U5RHDF8Oq8OF +5AwMJWpiXTa2kwew3yXwp/Ioro1MMmHpGf8GfPJ+2zFP40B8v4T1VNYIhsIdsuov +HJv6Oe5cA2KdQUeQ00FTaSGbppy9dF3pn32cTOixU2YJLnDtd9Iq2NGiilKVTDuq +kWillGUHtpU9RS0C/5pN1kqRm3mIPWeVyEmrF4hDsGr2phqwQZzBdHGjIyxViTTb +UlwJnmZUH18N47RGv5F6T80+OtZ7vDahidobR2k/sa1H7cfBb3tiNg4ZIPdyTjSz +dOOZCQxhoXwGIQvp1yPr/G+42lZdeO+3bmRx1LsjVTEny/ht3XGjSn0bTZczQDfI +kQDhPRL3UNWPQr6xeD131HTncYUOOlTg9nn8E2FcX/zR9bIRnxOhzD1SV1WDuBXX +Qqt/06g8idOotGj1/AGYp8/NtnpKMs6DqJrwHSL04Jukq4DZsKS4NcmJE7Z3Q/Rp +0TCLEqAT4zbEyBT+moA4FcHezb3hpWJICs2elXa90dWbzv3tBZElcF9xF98vMqpG +WaFem/tnw9GPxvM39vMBzQ3y1tYzDH6XDknuclZCkxY/ZTFA+kXXTBPLGNv4tRek +UAd3fKnL9Uxq5oqsoErtNZaeaii9YIkCHAQSAQgABgUCUyXdYgAKCRCRgV1nApgq +SqwFEACVYopiSrIwLVzA2OLiQ/BDaIXb4nzh/M/QPv+nl1FQDpFPEV5G/E3qaKvl +JkefbwS1/1pmvwxlTrtI/F/MuP1WtotbVBqU1qEyeAHVXVsoxVzDJXd/07HvYEFo +m1KtXo7Wkky3/IZfrRCuURj2Rc3Df2Nw41KKZUqE++LfQoZjeKLBnY6jIO0MyxGS +Edq1qvUYuYA9kuwmLcwjDfwWV4Yrcgw5Bzk9uB5fOuVoQ6d41vxYAwrZbBb6JImY +rMMSMOSwJ9ynC2bdEUCSCnAUEjCUGJYA2yzK4ECT+zf9FyKxMHDROoWX0HUFOjDU +WKie8Se7ArHhijSShZMPMJTREJYMuctn6/N1FHybi1FLlhJuqGhLWyafIfPq2u/9 +WXl2lg0i9SZpnHgXXMir684L+00t7go5sA2LSJ71pFE6hjoM7OuvYvCGTBlZA4XF +FZMDq7q+RDNFkw1UvocAgh7KdarjcZEhkDaIjmv+E33rGDBn/QbXwJFGO8HR2BzM +YoJdfvQTrupoB0Nav/MGl0yRo1oQu9PHyA0Q+NJGewgYVnSsXKborcUtvYfWd4uh +FooYqjbt2X1kRgeDDlXeBXtQKAOPDoFg4hs2dRqZN8LHrIYJ4uncjNkvQwG6qAdC +ce/BJ+J2xA0pC4/+JhtBWju+pwGwxB6FwBtA/NVU0CzRX7BGIIkCHAQSAQoABgUC +UvOycgAKCRBIJckKRXWKIcxSEACDe0fs9tUbgdigsk0tASRBCVV5x3suHOV6AjaB +BLAlWAWe/VFhZ03dpyPpg9J2ZLa3eJhLJcnnpL69SijVjcc2MfG9CHkVHSsZlSUR +BaSKKOuYaj0r+Oz4kBDpFo1XBs39bdXJMeewCy36Y+vHwsfl0usbj5SpzR1y93lJ +gJdn1MRZxewTnOj1YQBu6kevRrhSTqz5YOF2ebIzqALS/iXu1Q7IXzO66sMuAUBM +rehhtqxXgQBvu9dYiKn6dS6HKeQtN+yLxklJ8Y8z2b+kAhtdLIPELyTv7jsPbMQb +A0oExNXNZy7rQzOLWYRw3PCkEnD5pfhLV7v4JsLl0nb5EBYTy9O/qdCvSSLMmscl +Dmien+9ZaydFN9KxJiAM9PzN2naZY9Madh7WHazhbgjJmZOo2MoxHMUJVq+7Uktv +FgZHKMjkwbzEqwC9e9DIa6z4Yh8apDS8FCAkUhpbfQo3Cgtn9r+RYrBOLFkJaXLW +EHifW4CdielGY64dlfAuxdTDdwYj0T7W3GPohdAoZRV8eumURQgMn4pS8qq6DkXK +ISVKcsdUd3JKycykrF9uhaMQ530vERuxeBwrN4dSowA46jK7vCmw4cZIZoVYQJg/ +ELFIxYWoq4up3mAwaIojZc8vrLRsjL0nfTwrjMXT5g+FFlZaALP7kpzInFHKlgLE +aD1TU4kCHAQTAQoABgUCUu/Q6wAKCRCpyGyN066NOiDKD/wPtMoGz3VJQABzU//3 +8eR566SAJiZ215Ahq7OUODM7ovHdbYWS5J82jcQmEmm35slgMroZeicClyyPm5BD +3JNLXVuZsOfj/wl1Ip3qyO7AdX4wWf+IuYBOTbveUK3aodQwWIJbn6jsfnjHNGo+ +Y4nxU5OYAefqOHyj+lvKRrpPTFOY+9bvKq8PLe9rAocsQnmirN4WSUP76XOMNmy0 +AxKqo91D0+aX9N2g+xyH7NYEfFJ7G3R+4biyErnev2cEMzrnYFxj1J84Pf2TRWGs +KrGeeEnBttQZGic/3/82uhvmHJb9lj3roHEo7PWh7B1JkE/XcJmTYGd7WAD3W2bW +9KW2vBkOsYKsOtZNG9VcCupXU/iI8uXrusd5t17ojXdFVAfFIzaLQUGkDJIjG65k +zqFiWCS3cIqW6M71xq1izAUgojtA7SvOuDdLDcRkyYZDgxcGPAqAcYi5NOVMwZG8 +u4BiJJiQ39p3d7F15z8bniXhFXyU3kFmi+n1LNYt3bf8kuqMlopOxl42egzX/S7J +OdY9GAl17/z0dKNEFgHF0cMEfR96oNtx8ohjMMTP7exGYf59gdOCXjFJtT1FRvZ7 +VhVn+jtKD938wUoPjZBzT9s1ZXefX76JS77H+rPn/xGcDfFcfz+dHlYnxo9rjnI9 +ppHYQkWKrYVcHGW6XxlEzci5NokCOwQTAQoAJQUCUwDoCR4aaHR0cDovL3d3dy5n +b3RoZ29vc2UubmV0L3BncC8ACgkQegaFQh6JkEJeHA/8DKGQ1LKDa6vCsiY5L5kD +jOjxJiDV0y4wu0/X5kGCIbDmj2ptm+PPLiTMH96wiGuAKPVyjh9IxG20v1pxe9El +3ROSlh8Ogv4kx+GX1CBkRO/ty6FM/1CpIJy7COTJXmLbdN2WgNicrcqitMl34Cvu +uLNHMNr9oxP5TwpAzhwTJVG8dOamEJ3u2f9soObvs1tppt6M2YjKq+B5CO9/f4// +otJXST7MPPUIy5BgsDSQtzc8hUHJo7xmFFmo94l5qhxmtTJZ2yIlvr8Jhi1HutFy +LFAay4ppoUkQ+M79uuRl+HZzUJ8dKd82SiJNG5Q36tbPr2w6XpNvwYU5+y2GKS8y +sH/bYTm5vgZPiOeZSjKVCC9ODo9AZarj5usfKYTLiQUARkGDrOHjk6rd0J3EQTAF +R8yK/iQ8DZwejrAQNRrDimHLujLROZCWgrMX3/O7BGu5s0rER5O0VSxBNXh3Oo6e +Qpolg6BgACZkQWJzbk8OEJyBk2OQYRYee/60lR02Ym4vj4c0OglZIQNjQna/K1sf +4dI7FQGupUaEsvT0xU/r9HntzE6fSxNZKV7Ut4qHm8X197R+jBtr3Gw2wY/21a+D +MrgYK9gieAAekJLOsBlicHnM1MkAMcXFvMcZXwEBA3XAAmXxirNqSUD3FDiB7S8w +vvWM7U967O1vqtyl4Elu4XSJAs4EEgEIALgFAlL2RqYFgwHhM4ByFIAAAAAAHgBL +b2NjYXNpb25AbmllbHMuZGVzdC11bnJlYWNoLmJlIlppbW1lcm1hbm7igJNTYXNz +YW1hbiBiYXNlZCBrZXkgc2lnbmluZyBwYXJ0eSBhdCBGT1NERU0yMDE0IG9uIDIw +MTQtMDItMDIiOBpodHRwOi8vbmllbHMuZGVzdC11bnJlYWNoLmJlL3BncC1rZXkt +c2lnbmluZy1wb2xpY3kudHh0AAoJECW/SE8Iq0hJ3YMP/2tUD1dHE96JkIRT9dv8 +MC3aFZRGgv6HqvdmMWsurUvf8XEbf5MZvbBRl2rkQsDkEoqE6j1Q7Mu2odrOX3FQ +S5u9L1V2BcGSVLksTEody2FfbXIZWDrSF/HnQ39UORtp1sWB7SxdfPMK2Dz0ZFd+ +4U5iw+4uzPynPI2n05Stn5kCJOUXEPhwO0sUliWb4BzOm4fP00BebKSPFsS97cM+ +1mtoZp+qSZo3F08xOtrOXmj/idild0GhZhGlmpddxf4P7eleWNW0PaaO6GvRnu4c +Dd6/3vSLRGNkJYI1WHfNObwpnKjpJkoIHF4h7ylK83iiSSxh5ZNr1XAtbOSEzuJb +VIwcYoVegHjPRAlULnseuxIPCVdEaPHhaxSouihGOVysBtU/0TjEiuBa2u/RLrfD +T2vIeLvjCOkoRP+fiqmO1qJGhkGcJeponKqLlKXryzNZKc6ZWDC3zlyiTJiKphIi +6zTORqByAdW6xjHzKy9hYSZQIHUDBTB1dST7org8byhzMtKbV2SRGItebvfRpm/5 +eg6nGBSdwzoyYCxO9nMeMsom8z6hyNWx5Oo5NZ9bWrEqTd/e7JjbnYWcsUpw6RFa +3HmzV+DS6HFd1E0QT806yAJoJUOlhfDbFaDtcT66jQbj71xv/cGgllr0nLx97med +oU4xeZ1dEVg2qhHLp5avpFN4iQOcBBABCAAGBQJS76FxAAoJEFmPlar2yZxfgrMc +ALuDba+PUFXbAP0dp5+W/H7ZAPf5TzoBmgo45s05dom05fbrKUYI83mZ19yGWcVV +ajrAtIp87TTFeGPmkrCEXqONKYa4vGJHGZA6S19EaLaseL0T3D+cT+1ycDi2/y9R +jqBHOPw3sB1IlGTXzRwLZ6QyaAl7du16zhZqlwVEJnoAK6+0YMTQJ6a2DJdURIl9 +aLeC7Qwbofjzyzi9+JOg4tX0hrXzBnHYlyXJVZVcSzUuQZvD8J9oZZb1GNNwOl2S +7X4UZoEqshLpXXoxaG4ROcr8/pRmEYUR0QdWap5ZTeW31NyTYArRsCvrxyZu/4Zd +iumKElqThRWcXILozpfeqDhx0AZnn1wXTAELm1YGDNbhHtBUUQ9jcLOvhbPvgF4O +bpFvsTec7dI0opEh7A0AcRoLtMXSsHH+L8NAOAfxXi8FDXyA1M2tmOu0ePQC8xDz +8/bcDMw6NUBn2ls/P3m3irVznZoK83SjpkfvwvtEEjHieuLHp5TwxaiwO8P0S2Jy +AYz3gQYWqnzhaXMcvXr0aMuRMJ3wx84R3tKdRZrxi2/OsUkxsztRTUi0eEZBoqXT +/zcFs/g9UsS2t0qMxn8RyZygP4DN8623HMLpqzLQ7f3Jr+0ggUTzU4vOHdldvWBa +jC44yqTVgB1UO8SzMoKJIIwbbtkUzQG+IFxukIrkeX2kngx1R4ENo13eqM6Ma/hI +hCZdchSGcBw9T4QKVtbMClNr1JwWNBDLAGZHVKgtBeS7b51/1doz7yPmEJ+jghMK +rpt30/dgZDWRuIzPJ4k/Do2NrhyjDp2x3axuqdXEpQjNREhi0AZWLkc0ySZuXfUp +OsKjKFY1jLKwyhNaji+VaT3xy9JmngLM15EkWfeCHe0Q00CXxBwPztbzhfCvuC0V +/cjxdwnVlhEKQjlEIqPCtnGBmm7j1jAwCWnl6uaVr9mI+rX5rL/YrMWpqRUGG395 +sKbCf8mn9rwadLjCXFHEnViJsD/vzbKbJy6CEvBzh7BFWVnzG21COc4hQIAqk6yb +JgyPhnnV7pvSctCiId5FcrGPQhSacXBDpj2flx6zqHRldtv4ogvsX40dmm5NloA6 +iGVwoq3vUOu3Dxk4Qy/TGQPWh3y6RO5VqYduswM8B6GyIVm9sM84AlV2XrSM+P7q +HsbfiRo+wBKpqcw9g5hQmjRh/IEOZgDdLDSHpfv1tpY8iQOcBBIBCAAGBQJS8Ct+ +AAoJEFmPlar2yZxfS64cAMjdwU9Q7eGyBs/wDGU6Z+cTRdNdf2WxXQO27CsU+yEr +OdN2Di1Cq0SPoMvcmK8s/bRM40r7hBkPqPZDC8zZb13mh4f8k3mdZdyGOgnMhhyG +Ns7eeiebSVfEZpaJzmJozeeac5V8NPn6K5nEutQLsyIIhVEXbr9VTApw3l4eLjMY +UrOvhmOnqSjDnBrcS4jaBdIbj16pGmJTwyN9FRYH4lF5VPHIEL2c7PYv17i4H4WQ +Fy80nyNCNlv+2LQNfMGTq49XviB2RmZ+VoYvoJC8O4iaewGFCbtWM4t4jLBKD5I7 +xGxOQnvVBjXj3si/ASsXR4tDM4mAZUpi0YPZeHM+DPIji67MDXIgwDVv4JRO9Cuu +gARL1d2fRarNE8vTuA8mKoCllcCgoli9CG3RcCf/ZrnibT3hLT939UGqt7siBtpc +iEKoYC+VtYTKEFf7fr6BYGA6DfJK/npYKjkkNiO1ptUO5d8Hzo/mdtIL74jorleX +KCLOXu2ryCMmnbwhjRvv4+KBRUHHWotlL5KokBvMNyYvuAxFDQG0FapuZDna0GI6 +cGrGdlVRbpbjD/DvFzEXaEzKksI9VStcPeFPexGDCD8JbtWBWfwK7cPw/Y7mAdf6 +TCock5xmotLdDu07xwlE4amGGfVy2YO+f4tOcNXRjh+LpvO9AC9BngzqnSCBMYO/ +AIRdS0GUuXUqCGCdN0XXkWQJK5quEIALg4Qn4mivweHuw9QeBKqgKA9ku3AA1dJR +cACbVer3njqhFxUrqusn6KnbURYigdzOtEk3QaglAL8ALC13MwE//dRnbyT0J4gU +aMDs6yF5wc0Yyi8ur1VZj5Rk71SOetqaAHgm80myN4Y4pZWC35qPQdGcD+O12ECS +PMY0LsbRT0QNVoVcuv+dqroWLPC3ldrtj6wDC2/XdlblJyo3p67TFAlX59E6GE3N +n6k+AuodyA+ha18gXH0KS9Nbtvroxelp+DVn3lHEpkUbqE6CQs+nbj83TFmNyUp6 ++sObs6ivaco8SbJBlsyhdg7irP08CS/w2YBCYsYAHc2Gthpa5dxCKkEVVM+P/vP9 +MAu/VFsZs5b8gNRTynk7STW7DUHDoK1STQ0WOWonr3BUuBqaqab92LuJ7bHwM03r +gsTIVVMPrN9BvgphLfV4zBBfSdGPnF3KDqoTSpl+QPQq3sg3avBQAjzqhwu3JQRN +iQO8BDABCAAmBQJS8CtrHx0AU2NyZXdlZCB1cCBlbWFpbCB2ZXJpZmljYXRpb24A +CgkQWY+VqvbJnF9GxBv/QuxL5zlNM8jhzk5fuPpN9i++W88qfveAaBtxLTWe2UJl +1/10xHfjw1a7FjJ3osYnEv21yW/w+nfhnMUk2DugC40/zb/+EaPrAIPsvyXDRjJN +f89qYLqi4cagsPI5rzTy230iYk1kXrN3pyIY7feNd0dWpkCsDI5ug3hbezCGYLcg +0B+4msMkuAGOU1Ctus/L2sIpdY277QZqFarK0xgYSGTZt/ohGXo3NbUakbELxeTg +QSdLmeH6SchzES7Kv4Wanv3LRNAZHhV7wJSrAiuHpgDn6d/YWqFwW2PoSTWN1CgC +yZNOgn5cZTCTNnwo90USwyHid/x9oZlmMNALnWk93AOB1TByOBoSbjfiK0xQAmrQ +lik+eOSYEeIxGg+B/CKy7wLqy5DEbf3IeNAiI1UVDQZb4woXJ+Kx+gvcIOstK5AG +zLXowaXdWrqsrfRQEXi6qxewv7P5hFC/e1/7645lLHyivtTj4F5peuQYgopvxKbj +j/9ky6YgPsp77vdYCZRhU4xHyWH9zXZwVCdMe5GNKYcxMyJ9ATECmKxfGmXAiqFx +tzRHroblCVH4ad5h+/dShPH+LUwVJorzp0gBx9E+ZycukMPmEVzji6CT8na7ptNo +exCIt1TH2vwwFJwLPxlRGVEcj6aA7iq54Kwp/hpjYMbg11QvQpqw4Ug4tXs331IP +keYbKbaPudvfeBy3FucO5YSBFnKFIq77Q3ID500L7e9ttnvtn/s3ZIZqY5/87jv3 +0x39o0SoAInsco3hViAYRGZrLKk4jAyM/q88YCa6r7kcPSLpYmfI+7OxFYuS/NvN +46UL2ozm3NQzmXJKiAeCQiAVys89eI5V+QOotMz/hMRb96OEM/AAtrZLGXXxlRC0 +aJJYXXPYELWC0qMxvrR6Z/0LYyKssICH5Qgv7VbrFRLlVw64VV/MZjI/grBXP5yH +arUPBruCOHe35bl0r7Sc+WAyqFgmiP8EXq8vwTQvBouEG3Tl7sScHgKNQ6+2Bhy3 +F/deh4afN+21xZ35I4HS8MZNaFSnQXktdKqSMxnpUhp8oFoshQZ1mz5Muq+Rdgk8 +4LK1B+HxXqMaXz4pcF7SuvU8ZoeMlkbLBdLCAzgHUiL1PZ9uYa8BxOcKErg46Sak +kkwbX90jEo2W2yv7zRuuKySPZaJ8VTHKQtzwuB+S5OmfE5WDzY/SHfQVGPZoKGOJ +BBwEEgECAAYFAlMuylIACgkQ/UqR/oGOsfEG7SAAiSHuPmBO/FE20YKiYK58kKJh +kfBe7g0W7rwjlXUbBDs2Nz0Fn2gyYJIJ7+BKD45bHxyL3zgCiltZ2wmVYLmTm7Hi +LoBJlcusy+0aq8xF2t/am0PFIZQZsDMCs1KpTuRAvMIhQaLZ5/24/qE9RCOWvOkj +OI/jDoxp+yMl78y6vk1te/4yl+TXp2W/y0QobJNDwFTDqFVaxEjJs8LJK7JbqtxF +6plHOtk874AabUAhdvTtX3xNMNoZANoOWMcQKpVJyMuVCIsMohEuc4aGsGE4yKNv +AnGwIG09QIRPva/HiGVl+BgJQgmfJ99iswjM83to9c+KBXlmBeIzrwB4YiNIepJi +3uH5EtQ4v7txtyX8J7XsFZV1+s0UkYP/r8ioCYv2ym3O4Ki/6KcIdSiKWdkuuqLx +DDrVMDBmlMaZyk2dkR107dJUg3CFkXcqi6oDk89y+FkVNf8wI4OUJYGzDUBYRfUT +d95zJ/6IBrTC97xaaFVC0pBjem5XNog6TaCCj6fmiUOcVqLxpB0eaFBXj70t5j3C +9Z0gVXCt2KJFXPLrwIpT41RNWalqRfT0ZHHOel17lxEWwvuBTLU1tx70DwQkV1U9 +qIwBm9uanjnkIfM1s451b662lee8kHXgx+qapYwK5fwTW0SodtuyrYg69bgK7Yls +xUJ7okkdyrkPsgT3aFaW9Ep9A3UXCkw8K8BLH2n4cMwqhR634U30MT37rP6nyFES +vZKzvo1d3CXoOPqiY8JxDCnSpTldt3t3Bdrc771cRzdx64S271TIwJYdXngsV+4i +aSjJj5XkJFiwlGQPiZ6uCrGHvq61GL6+JAgkIvdlPWpWMFOEHpwzHKNBXuKYX/Yn +8NLJHxUNySwOiUxBVLdOrQnFlu6psZUBuqpS6h+YHZzM3zb2Eo6eoZYl96i1ea/M +rvhBGWR2j3CuNwoueZy94ovEhTFWlplC7C93CyjOwXAz4ExhbjekQ4dT0ooFx2aO +OVOMElqbW0E5exwocZcAb5OPZOzYzt0YtM2NhNFIubiU5I9Pf7r1kf1yDlWLkFZ/ +twN51AvAFpNqmi3czFMBU/uEp9g0xmZ76w/g7h9qhja1hgnxEE8VCBCb2ZF5vXYL +YLDx3OMHe4hBAgrcMnWS22TfHUJtM4DmsHMztmVj1e5/nf4XdBebf5wpzTOHNo2a +nFf4XDbGr6dMf6/s/fFMbgmLypkVndrcypgTvSiBYr7nj/phiO/+5jqtC3ikhHZd ++z4L24rj8HqdL16Oz4LprE3iB/8kwgN0Gie+PvzLIe38JO1HmS2l46RTsSLtkT5X +ucbLvowrLax3yhaz5/zgRQFRymve0kpB83e0Wt1wdpLBbeUSrK3qen4PvCQWC4kB +lQQTAQgAPwIbAwYLCQgHAwIGFQgCCQoLBBYCAwECHgECF4AWIQQ0PC/w++5ewu2+ +85nzWZ/4KMZymAUCXiHrCwUJFpzLKAAKCRDzWZ/4KMZymOhLCgCaihPZmNPfMD+S +b2In2kMRmi+A7JWVNJl2iEpannG0jwLn25m7G4dX2ImKDI1acgFUGvwWMvpnHmlo +1DX1KWUTVtjEvgnSPDmPQE/Sh1PmM5GQizKVwoeZH55KomJIy8cax3zbVRNqBhwm +7p6Zq34pzCcMeQo+NsShGaLjqflTMgQb64e/SwZlSNZmyFMJBlUGcmPiwM0mX6QJ +thOlSzR/YAQ+MofqkGTnq8goE/9s1azZDaz5V0Dv+DkDU3RKRo+3f/6o2yW7zipF +0gVypzniy2ZH/9BrcLUQHGutPY/cTPOmQdbE7vaNoaXC43q61f7ti417kLLmWz8F +KO6AoyJGoEQBi8WadspXZ4lLVupNJH6pPMuf5VLzzK926YAc004+wnIyYo/EkPTy +ZowKyo47KzdMyXkn5jPltI8lABnua7kBTQRQ6yFjAQoAypTjSPuQanLuvfu9n5i2 +T4s8xFLCEQNmNMNkRO/rieRLPf2E6eT51gDaT0+xkvCSZ3OjSSXoehpDJBhsKuTq +ewg7I1vaUGdLwFvdF458p4g2vVtFvpufl/yzzs4fp4rPWXH9ez/dbaaywi4vkzjm +9+cbd/eqWfkvj8LldHS4iGHVWFd3uizQldC44RKqed00cE6y0zT0FZ7dAz2E4fyp +9ceneczktDCuEMmyjdViVOp2CEEV8lipIsY27Pvo3IFf47nJPelXtG5b5+g36n6R +vGM25qkQrny6OvNy7ZQVGdHPs56w9Z6Lql9hG/wspRvEPGVB14pgifoOkXx/ZDpr +VR/9tPLv5w6H1S/0NHbwF31OTF+PXO12Muwdq05ZPuJ7jXmhzNGlNmNePkkMvrEe +s8nbu3hi3ao81dAl/3pIK40AEQEAAYkBZQQYAQIADwUCUOshYwIbDAUJEswDAAAK +CRDzWZ/4KMZymBlHCf4/i9P7s38AkdWkwWehQU/TnTcmrVY5s9tGobJBdwckppE9 +UdOitPGDhif/yz2o49HiqlPFioMTHO6VouUhvsjBZ9nW+R5yQxokpkowl1vwc7Er +de/Oi7TtJtHoWrY/ZJeia8UDV2szpESdsMNdz+v/dUQWlekdPAZhU61HKNj+sPnY +wheDZZ5Di2u3zOkLTWTPsgfhM2j40gRY2RudgVVmZHFxFTdHBIz7s57kDpsfA58u +5UIpKePTbpq22pmsWRIpcdgYvYEm6LzWigbp3CovV26IhIjWwGqxsT+NEL5ZNgLl +1r6vn4gZ1NXtZ1NqFe/+5wFh+5I/1a/ycXAXjqbDQxpFu2K3d5pCQq4RvozLNVdq +jW/VIa+WSzgONLCM9jx3w60foHPEx5tqeTrwlhAHrpGCdtl01CLel6DztBEWA4kB +fAQYAQgAJgIbDBYhBDQ8L/D77l7C7b7zmfNZn/goxnKYBQJeIe0PBQkWnM0sAAoJ +EPNZn/goxnKYUl4KAIgdXi/Q+ExT3ZtE5Z5lFQ+wwgmRKLnc0yrvO2d4MJNt+Sy/ +EZK3dbzIRFWPZukYRg26Y4ySilv8g3Fq5bai2afv+Wo3322hL1RavbpItIYbDQ47 +gj9AlWyY8IN4ux3AOI6m4Di29itG5h9G8nY/vzf2S1MxnngKPVcm12zgtSoQ9Vbx +4UZfwhyqs/VqIsPNiC59Asuaj45PGEX2JBUHNGBzc8xXfJWv/Jeh3iZAkZZTS+3U +3S19/p29ChDBRCzAyjlQPPkFLBtJCyn8nt8pGZrJBJbyDTk3GW6FcOiI+Hqvn1eA +JGC+/y5zRTcG+7w+o3Z1nGIDBBo0QvhYSp9mrOjOOUpqiwLa9utimVoLYgc8BFyY +ObGKgo8l0WMMp0M6itctN6Od1Ww8rY1bqwH5uHdqQ23eUhdoyIu8B82gU2da +=hpmd +-----END PGP PUBLIC KEY BLOCK----- diff --git a/nettle.spec b/nettle.spec new file mode 100644 index 0000000..bde07b2 --- /dev/null +++ b/nettle.spec @@ -0,0 +1,480 @@ +## START: Set by rpmautospec +## (rpmautospec version 0.6.5) +## RPMAUTOSPEC: autorelease, autochangelog +%define autorelease(e:s:pb:n) %{?-p:0.}%{lua: + release_number = 1; + base_release_number = tonumber(rpm.expand("%{?-b*}%{!?-b:1}")); + print(release_number + base_release_number - 1); +}%{?-e:.%{-e*}}%{?-s:.%{-s*}}%{!?-n:%{?dist}} +## END: Set by rpmautospec + +# Recent so-version, so we do not bump accidentally. +%global nettle_so_ver 8 +%global hogweed_so_ver 6 + +# Set to 1 when building a bootstrap for a bumped so-name. +%global bootstrap 0 + +%if 0%{?bootstrap} +%global version_old 3.5.1 +%global nettle_so_ver_old 7 +%global hogweed_so_ver_old 5 +%endif + +# * In RHEL nettle is included in the gnutls FIPS module boundary, +# and HMAC is calculated there with its own tool. +# * In RHEL gmp is statically linked to ensure zeroization of CSP. +%if %{defined rhel} +%bcond_with fipshmac +%bcond_without bundle_gmp +%else +%bcond_without fipshmac +%bcond_with bundle_gmp +%endif + +Name: nettle +Version: 3.10.1 +Release: %{?autorelease}%{!?autorelease:1%{?dist}} +Summary: A low-level cryptographic library + +License: LGPL-3.0-or-later OR GPL-2.0-or-later +URL: http://www.lysator.liu.se/~nisse/nettle/ +Source0: http://www.lysator.liu.se/~nisse/archive/%{name}-%{version}.tar.gz +Source1: http://www.lysator.liu.se/~nisse/archive/%{name}-%{version}.tar.gz.sig +Source2: nettle-release-keyring.gpg +%if 0%{?bootstrap} +Source100: %{name}-%{version_old}-hobbled.tar.xz +Source101: nettle-3.5-remove-ecc-testsuite.patch +%endif +Patch: nettle-3.8-zeroize-stack.patch +Patch: nettle-3.10-hobble-to-configure.patch + +%if %{with bundle_gmp} +Source200: gmp-6.2.1.tar.xz +# Taken from the main gmp package +Source201: gmp-6.2.1-intel-cet.patch +Source202: gmp-6.2.1-zeroize-allocator.patch +%endif + +BuildRequires: make +BuildRequires: gcc +%if !%{with bundle_gmp} +BuildRequires: gmp-devel +%endif +BuildRequires: m4 +BuildRequires: libtool, automake, autoconf, gettext-devel +%if %{with fipshmac} +BuildRequires: fipscheck +%endif +BuildRequires: gnupg2 + +%package devel +Summary: Development headers for a low-level cryptographic library +Requires: %{name} = %{version}-%{release} +Requires: gmp-devel%{?_isa} + +%description +Nettle is a cryptographic library that is designed to fit easily in more +or less any context: In crypto toolkits for object-oriented languages +(C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in +kernel space. + +%description devel +Nettle is a cryptographic library that is designed to fit easily in more +or less any context: In crypto toolkits for object-oriented languages +(C++, Python, Pike, ...), in applications like LSH or GNUPG, or even in +kernel space. This package contains the files needed for developing +applications with nettle. + + +%prep +%autosetup -Tb 0 -p1 + +%{gpgverify} --keyring='%{SOURCE2}' --signature='%{SOURCE1}' --data='%{SOURCE0}' + +%if %{with bundle_gmp} +mkdir -p bundled_gmp +pushd bundled_gmp +tar --strip-components=1 -xf %{SOURCE200} +patch -p1 < %{SOURCE201} +patch -p1 < %{SOURCE202} +popd + +# Prevent -lgmp appearing in the compiler command line in dependent components +sed -i '/^Libs.private:/d' hogweed.pc.in +%endif + +%if 0%{?bootstrap} +mkdir -p bootstrap_ver +pushd bootstrap_ver +tar --strip-components=1 -xf %{SOURCE100} +patch -p1 < %{SOURCE101} + +# Disable -ggdb3 which makes debugedit unhappy +sed s/ggdb3/g/ -i configure +sed 's/ecc-192.c//g' -i Makefile.in +sed 's/ecc-224.c//g' -i Makefile.in +popd +%endif + +# Disable -ggdb3 which makes debugedit unhappy +sed s/ggdb3/g/ -i configure + +%build +%if %{with bundle_gmp} +pushd bundled_gmp +autoreconf -ifv +%configure --disable-cxx --disable-shared --enable-fat --with-pic +%make_build +popd +%endif + +autoreconf -ifv +# For annocheck +export ASM_FLAGS="-Wa,--generate-missing-build-notes=yes" +%configure --enable-shared --enable-fat \ +--disable-sm3 --disable-sm4 --disable-ecc-secp192r1 --disable-ecc-secp224r1 \ +%if %{with bundle_gmp} +--with-include-path=$PWD/bundled_gmp --with-lib-path=$PWD/bundled_gmp/.libs \ +%endif +%{nil} +%make_build + +%if 0%{?bootstrap} +pushd bootstrap_ver +autoconf +%configure --with-tests +%make_build +popd +%endif + +%if %{with fipshmac} +%define fipshmac() \ + fipshmac -d $RPM_BUILD_ROOT%{_libdir} $RPM_BUILD_ROOT%{_libdir}/%1.* \ + file=`basename $RPM_BUILD_ROOT%{_libdir}/%1.*.hmac` && \ + mv $RPM_BUILD_ROOT%{_libdir}/$file $RPM_BUILD_ROOT%{_libdir}/.$file && \ + ln -s .$file $RPM_BUILD_ROOT%{_libdir}/.%1.hmac + +%if 0%{?bootstrap} +%define bootstrap_fips 1 +%endif + +%define __spec_install_post \ + %{?__debug_package:%{__debug_install_post}} \ + %{__arch_install_post} \ + %{__os_install_post} \ + %fipshmac libnettle.so.%{nettle_so_ver} \ + %fipshmac libhogweed.so.%{hogweed_so_ver} \ + %{?bootstrap_fips:%fipshmac libnettle.so.%{nettle_so_ver_old}} \ + %{?bootstrap_fips:%fipshmac libhogweed.so.%{hogweed_so_ver_old}} \ +%{nil} +%endif + + +%install +%if 0%{?bootstrap} +make -C bootstrap_ver install-shared-nettle DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" +make -C bootstrap_ver install-shared-hogweed DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" + +chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libnettle.so.%{nettle_so_ver_old}.* +chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libhogweed.so.%{hogweed_so_ver_old}.* +%endif + +%make_install +make install-shared DESTDIR=$RPM_BUILD_ROOT INSTALL="install -p" +mkdir -p $RPM_BUILD_ROOT%{_infodir} +install -p -m 644 nettle.info $RPM_BUILD_ROOT%{_infodir}/ +rm -f $RPM_BUILD_ROOT%{_libdir}/*.a +rm -f $RPM_BUILD_ROOT%{_infodir}/dir +rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-lfib-stream +rm -f $RPM_BUILD_ROOT%{_bindir}/pkcs1-conv +rm -f $RPM_BUILD_ROOT%{_bindir}/sexp-conv +rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-hash +rm -f $RPM_BUILD_ROOT%{_bindir}/nettle-pbkdf2 + +chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libnettle.so.%{nettle_so_ver}.* +chmod 0755 $RPM_BUILD_ROOT%{_libdir}/libhogweed.so.%{hogweed_so_ver}.* + +%check +make check + +%files +%doc AUTHORS NEWS README +%license COPYINGv2 COPYING.LESSERv3 +%{_infodir}/nettle.info.* +%{_libdir}/libnettle.so.%{nettle_so_ver} +%{_libdir}/libnettle.so.%{nettle_so_ver}.* +%{_libdir}/libhogweed.so.%{hogweed_so_ver} +%{_libdir}/libhogweed.so.%{hogweed_so_ver}.* +%if 0%{?bootstrap} +%{_libdir}/libnettle.so.%{nettle_so_ver_old} +%{_libdir}/libnettle.so.%{nettle_so_ver_old}.* +%{_libdir}/libhogweed.so.%{hogweed_so_ver_old} +%{_libdir}/libhogweed.so.%{hogweed_so_ver_old}.* +%endif +%if %{with fipshmac} +%{_libdir}/.libhogweed.so.*.hmac +%{_libdir}/.libnettle.so.*.hmac +%endif + +%files devel +%doc descore.README nettle.html nettle.pdf +%{_includedir}/nettle +%{_libdir}/libnettle.so +%{_libdir}/libhogweed.so +%{_libdir}/pkgconfig/hogweed.pc +%{_libdir}/pkgconfig/nettle.pc + +%ldconfig_scriptlets + + +%changelog +## START: Generated by rpmautospec +* Mon Feb 17 2025 Daiki Ueno - 3.10.1-1 +- Update to nettle 3.10.1 + +* Tue Oct 29 2024 Troy Dawson - 3.10-4 +- Bump release for October 2024 mass rebuild: + +* Mon Oct 07 2024 Alexander Sosedkin - 3.10-3 +- Initial CI and gating setup for RHEL-10 + +* Thu Aug 22 2024 Daiki Ueno - 3.10-2 +- Switch from hobbling to patching to disable algorithms + +* Fri Jul 26 2024 Daiki Ueno - 3.10-1 +- Update to nettle 3.10 + +* Mon Jun 24 2024 Troy Dawson - 3.9.1-11 +- Bump release for June 2024 mass rebuild + +* Thu Jun 20 2024 Daiki Ueno - 3.9.1-10 +- Split "fips" bcond into "fipshmac" and "bundle_gmp" + +* Tue Jun 18 2024 Daiki Ueno - 3.9.1-9 +- Update hobble-nettle to disable SM4 again + +* Fri Jun 07 2024 Daiki Ueno - 3.9.1-8 +- Bundle GMP to privatize memory functions + +* Thu Feb 15 2024 Yaakov Selkowitz - 3.9.1-7 +- Disable HMAC in RHEL 9+ + +* Thu Jan 25 2024 Fedora Release Engineering - 3.9.1-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Sun Jan 21 2024 Fedora Release Engineering - 3.9.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild + +* Thu Aug 24 2023 Daiki Ueno - 3.9.1-4 +- Migrate License field to SPDX license identifier + +* Thu Jul 20 2023 Fedora Release Engineering - 3.9.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild + +* Mon Jun 5 2023 Daiki Ueno - 3.9.1-1 +- Update to nettle 3.9.1 + +* Thu Jan 19 2023 Fedora Release Engineering - 3.8-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild + +* Fri Jul 22 2022 Fedora Release Engineering - 3.8-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild + +* Fri Jun 3 2022 Daiki Ueno - 3.8-1 +- Update to nettle 3.8 + +* Thu Jan 20 2022 Fedora Release Engineering - 3.7.3-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild + +* Thu Jul 22 2021 Fedora Release Engineering - 3.7.3-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild + +* Mon Jun 7 2021 Daiki Ueno - 3.7.3-1 +- Update to nettle 3.7.3 + +* Sun Mar 21 2021 Daiki Ueno - 3.7.2-1 +- Update to nettle 3.7.2 +- Merge nettle-3.6-remove-ecc-testsuite.patch to hobble-nettle script + +* Tue Mar 9 2021 Daiki Ueno - 3.7.1-1 +- Update to nettle 3.7.1 + +* Wed Feb 10 2021 Daiki Ueno - 3.7-3 +- Port a fix for chacha counter issue on ppc64le + +* Tue Jan 26 2021 Fedora Release Engineering - 3.7-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild + +* Mon Jan 11 2021 Daiki Ueno - 3.7-1 +- Update to nettle 3.7 + +* Tue Jul 28 2020 Fedora Release Engineering - 3.6-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild + +* Tue Jul 14 2020 Tom Stellard - 3.6-2 +- Use make macros +- https://fedoraproject.org/wiki/Changes/UseMakeBuildInstallMacro + +* Mon May 4 2020 Daiki Ueno - 3.6-1 +- Update to nettle 3.6 + +* Wed Jan 29 2020 Fedora Release Engineering - 3.5.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild + +* Thu Oct 31 2019 Nikos Mavrogiannopoulos - 3.5.1-4 +- New upstream release + +* Thu Jul 25 2019 Fedora Release Engineering - 3.5.1-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild + +* Tue Jul 16 2019 Daiki Ueno - 3.5.1-2 +- Rebuild with bootstrap enabled + +* Mon Jul 15 2019 Nikos Mavrogiannopoulos - 3.5.1-1 +- New upstream release + +* Wed Apr 24 2019 Björn Esser - 3.4.1rc1-3 +- Remove hardcoded gzip suffix from GNU info pages + +* Fri Feb 01 2019 Fedora Release Engineering - 3.4.1rc1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild + +* Fri Nov 30 2018 Nikos Mavrogiannopoulos - 3.4.1rc1-1 +- New upstream release; provides API for constant memory access RSA operations + +* Tue Oct 16 2018 Tomáš Mráz - 3.4-7 +- Generate the .hmac checksums unless --without fips is used + +* Tue Oct 16 2018 Tomáš Mráz - 3.4-6 +- Cover the gaps in annotation coverage for assembler sources + +* Fri Aug 31 2018 Leigh Scott - 3.4-5 +- update libary versions used for fips + +* Sat Jul 28 2018 Igor Gnatenko - 3.4-4 +- Replace obsolete scriptlets + +* Fri Jul 13 2018 Fedora Release Engineering - 3.4-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild + +* Thu Feb 08 2018 Fedora Release Engineering - 3.4-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild + +* Mon Nov 20 2017 Nikos Mavrogiannopoulos - 3.4-1 +- New upstream release + +* Wed Aug 09 2017 Nikos Mavrogiannopoulos - 3.3-5 +- Removed executables from the library to allow parallel installation + of x86-64 and x86 packages. The executables had testing purpose, and + may be re-introduced in a separate package if needed. + +* Thu Aug 03 2017 Fedora Release Engineering - 3.3-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild + +* Wed Jul 26 2017 Fedora Release Engineering - 3.3-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild + +* Fri Feb 10 2017 Fedora Release Engineering - 3.3-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild + +* Tue Jul 19 2016 Nikos Mavrogiannopoulos - 3.3-1 +- New upstream release +- Allow arm neon instructions (they are enabled via fat builds) + +* Tue Jul 19 2016 Nikos Mavrogiannopoulos - 3.2-3 +- Backported a fix for more cache silence on RSA and DSA. + +* Thu Feb 18 2016 Nikos Mavrogiannopoulos - 3.2-2 +- Enabled fat builds by default + +* Wed Feb 3 2016 Nikos Mavrogiannopoulos - 3.2-1 +- updated to 3.2 (#1301310) +- Fixed CVE-2015-8803 secp256r1 calculation bug (#1304305) + +* Wed Dec 9 2015 Nikos Mavrogiannopoulos - 3.1.1-6 +- Made version.h architecture independent (#1289938) + +* Wed Dec 2 2015 Nikos Mavrogiannopoulos - 3.1.1-5 +- Disabled arm-neon unconditionally (#1287298) + +* Thu Oct 22 2015 Nikos Mavrogiannopoulos - 3.1.1-4 +- Fixed SHA3 implementation to conform to published version (#1252935) + +* Sun Aug 2 2015 Peter Robinson 3.1.1-3 +- No need to ship license in devel too +- Drop ChangeLog as details are in NEWS + +* Wed Jun 17 2015 Fedora Release Engineering - 3.1.1-2 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild + +* Thu Apr 30 2015 Nikos Mavrogiannopoulos - 3.1.1-1 +- Updated to nettle 3.1.1 + +* Sat Feb 21 2015 Till Maas - 2.7.1-6 +- Rebuilt for Fedora 23 Change + https://fedoraproject.org/wiki/Changes/Harden_all_packages_with_position-independent_code + +* Sun Aug 17 2014 Fedora Release Engineering - 2.7.1-5 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild + +* Sat Jun 07 2014 Fedora Release Engineering - 2.7.1-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild + +* Fri Jan 10 2014 Nikos Mavrogiannopoulos - 2.7.1-3 +- Corrected bug number in previous comment. + +* Fri Dec 13 2013 Nikos Mavrogiannopoulos - 2.7.1-2 +- Added patch nettle-tmpalloc.patch to solve #1051455 + +* Mon Nov 25 2013 Nikos Mavrogiannopoulos - 2.7.1-1 +- Updated to nettle 2.7.1 + +* Sat Aug 03 2013 Fedora Release Engineering - 2.6-3 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_20_Mass_Rebuild + +* Wed Feb 6 2013 Tomáš Mráz - 2.6-2 +- nettle includes use gmp.h + +* Tue Feb 5 2013 Tomáš Mráz - 2.6-1 +- New upstream release + +* Fri Jul 20 2012 Fedora Release Engineering - 2.4-4 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_18_Mass_Rebuild + +* Thu Jul 05 2012 David Woodhouse - 2.4-3 +- Remove explicit buildroot handling and defattr. + +* Wed Jul 04 2012 David Woodhouse - 2.4-2 +- Review feedback + +* Mon Jun 18 2012 David Woodhouse - 2.4-1 +- Revive package (GnuTLS needs it), disable static, update to current release 2.4 + +* Sat Jul 25 2009 Fedora Release Engineering - 1.15-7 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_12_Mass_Rebuild + +* Wed Feb 25 2009 Fedora Release Engineering - 1.15-6 +- Rebuilt for https://fedoraproject.org/wiki/Fedora_11_Mass_Rebuild + +* Thu Apr 10 2008 Ian Weller 1.15-5 +- Moved static lib to -static + +* Mon Mar 24 2008 Ian Weller 1.15-4 +- Added libraries and ldconfig + +* Mon Feb 18 2008 Ian Weller 1.15-3 +- Added provides -static to -devel + +* Sun Feb 17 2008 Ian Weller 1.15-2 +- Removed redundant requires +- Removed redundant documentation between packages +- Fixed license tag +- Fixed -devel description +- Added the static library back to -devel +- Added make clean + +* Fri Feb 08 2008 Ian Weller 1.15-1 +- First package build. + +## END: Generated by rpmautospec diff --git a/sources b/sources new file mode 100644 index 0000000..2663a3d --- /dev/null +++ b/sources @@ -0,0 +1,3 @@ +SHA512 (gmp-6.2.1.tar.xz) = c99be0950a1d05a0297d65641dd35b75b74466f7bf03c9e8a99895a3b2f9a0856cd17887738fa51cf7499781b65c049769271cbcb77d057d2e9f1ec52e07dd84 +SHA512 (nettle-3.10.1.tar.gz) = e8673bbcde9cde859ccae75ed6c9c30591e68a995a7c6d724106cfd67a5a5bd45b3468d742443b6565628849d0fd29505a28ca5ee4e89dd13197cdb51429f96c +SHA512 (nettle-3.10.1.tar.gz.sig) = d074a921df31070a6e6562a9f7e213e67b8e6ce331e2683e8180f387aca92058a5fe8610800817a0aa5098b47176dfcb42b52d617648c84cc6262a09ef557eb8