libgcrypt/libgcrypt-1.6.3-aliasing.patch

178 lines
5.0 KiB
Diff

diff -up libgcrypt-1.6.3/cipher/bufhelp.h.aliasing libgcrypt-1.6.3/cipher/bufhelp.h
--- libgcrypt-1.6.3/cipher/bufhelp.h.aliasing 2015-02-27 10:54:03.000000000 +0100
+++ libgcrypt-1.6.3/cipher/bufhelp.h 2015-03-13 15:03:43.301749751 +0100
@@ -80,7 +80,7 @@ do_bytes:
for (; len; len--)
*dst++ = *src++;
#endif /*__GNUC__ >= 4 && (__x86_64__ || __i386__)*/
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
/* Optimized function for buffer xoring */
@@ -117,7 +117,7 @@ do_bytes:
/* Handle tail. */
for (; len; len--)
*dst++ = *src1++ ^ *src2++;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
/* Optimized function for buffer xoring with two destination buffers. Used
@@ -155,7 +155,7 @@ do_bytes:
/* Handle tail. */
for (; len; len--)
*dst1++ = (*dst2++ ^= *src++);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
/* Optimized function for combined buffer xoring and copying. Used by mainly
@@ -208,7 +208,7 @@ do_bytes:
*dst_xor++ = *srcdst_cpy ^ *src_xor++;
*srcdst_cpy++ = temp;
}
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
/* Optimized function for combined buffer xoring and copying. Used by mainly
@@ -234,7 +234,7 @@ buf_eq_const(const void *_a, const void
diff -= !!(a[i] - b[i]);
return !diff;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
#ifndef BUFHELP_FAST_UNALIGNED_ACCESS
@@ -246,14 +246,14 @@ static inline u32 buf_get_be32(const voi
const byte *in = _buf;
return ((u32)in[0] << 24) | ((u32)in[1] << 16) | \
((u32)in[2] << 8) | (u32)in[3];
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline u32 buf_get_le32(const void *_buf)
{
const byte *in = _buf;
return ((u32)in[3] << 24) | ((u32)in[2] << 16) | \
((u32)in[1] << 8) | (u32)in[0];
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_be32(void *_buf, u32 val)
{
@@ -262,7 +262,7 @@ static inline void buf_put_be32(void *_b
out[1] = val >> 16;
out[2] = val >> 8;
out[3] = val;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_le32(void *_buf, u32 val)
{
@@ -271,7 +271,7 @@ static inline void buf_put_le32(void *_b
out[2] = val >> 16;
out[1] = val >> 8;
out[0] = val;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
#ifdef HAVE_U64_TYPEDEF
/* Functions for loading and storing unaligned u64 values of different
@@ -283,7 +283,7 @@ static inline u64 buf_get_be64(const voi
((u64)in[2] << 40) | ((u64)in[3] << 32) | \
((u64)in[4] << 24) | ((u64)in[5] << 16) | \
((u64)in[6] << 8) | (u64)in[7];
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline u64 buf_get_le64(const void *_buf)
{
@@ -292,7 +292,7 @@ static inline u64 buf_get_le64(const voi
((u64)in[5] << 40) | ((u64)in[4] << 32) | \
((u64)in[3] << 24) | ((u64)in[2] << 16) | \
((u64)in[1] << 8) | (u64)in[0];
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_be64(void *_buf, u64 val)
{
@@ -305,7 +305,7 @@ static inline void buf_put_be64(void *_b
out[5] = val >> 16;
out[6] = val >> 8;
out[7] = val;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_le64(void *_buf, u64 val)
{
@@ -318,7 +318,7 @@ static inline void buf_put_le64(void *_b
out[2] = val >> 16;
out[1] = val >> 8;
out[0] = val;
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
#endif /*HAVE_U64_TYPEDEF*/
#else /*BUFHELP_FAST_UNALIGNED_ACCESS*/
@@ -328,24 +328,24 @@ static inline void buf_put_le64(void *_b
static inline u32 buf_get_be32(const void *_buf)
{
return be_bswap32(*(const u32 *)_buf);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline u32 buf_get_le32(const void *_buf)
{
return le_bswap32(*(const u32 *)_buf);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_be32(void *_buf, u32 val)
{
u32 *out = _buf;
*out = be_bswap32(val);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_le32(void *_buf, u32 val)
{
u32 *out = _buf;
*out = le_bswap32(val);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
#ifdef HAVE_U64_TYPEDEF
/* Functions for loading and storing unaligned u64 values of different
@@ -353,24 +353,24 @@ static inline void buf_put_le32(void *_b
static inline u64 buf_get_be64(const void *_buf)
{
return be_bswap64(*(const u64 *)_buf);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline u64 buf_get_le64(const void *_buf)
{
return le_bswap64(*(const u64 *)_buf);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_be64(void *_buf, u64 val)
{
u64 *out = _buf;
*out = be_bswap64(val);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
static inline void buf_put_le64(void *_buf, u64 val)
{
u64 *out = _buf;
*out = le_bswap64(val);
-}
+} __attribute__ ((optimize("no-strict-aliasing")))
#endif /*HAVE_U64_TYPEDEF*/
#endif /*BUFHELP_FAST_UNALIGNED_ACCESS*/