zlib/zlib-1.2.12-s390x-vectorize-crc32.patch
2022-06-23 19:02:11 +00:00

429 lines
15 KiB
Diff

From 957bc67cfb4e01403c01fe6243850383183a7c19 Mon Sep 17 00:00:00 2001
From: Ilya Leoshkevich <iii@linux.ibm.com>
Date: Thu, 19 Mar 2020 11:52:03 +0100
Subject: [PATCH] s390x: vectorize crc32
Use vector extensions when compiling for s390x and binutils knows
about them. At runtime, check whether kernel supports vector
extensions (it has to be not just the CPU, but also the kernel) and
choose between the regular and the vectorized implementations.
---
Makefile.in | 9 ++
configure | 28 +++++
contrib/gcc/zifunc.h | 21 +++-
contrib/s390/crc32-vx.c | 195 ++++++++++++++++++++++++++++++++
contrib/s390/crc32_z_resolver.c | 41 +++++++
crc32.c | 11 +-
6 files changed, 301 insertions(+), 4 deletions(-)
create mode 100644 contrib/s390/crc32-vx.c
create mode 100644 contrib/s390/crc32_z_resolver.c
diff --git a/Makefile.in b/Makefile.in
index 2e78f38..04c2f5d 100644
--- a/Makefile.in
+++ b/Makefile.in
@@ -29,6 +29,7 @@ LDFLAGS=
TEST_LDFLAGS=-L. libz.a
LDSHARED=$(CC)
CPP=$(CC) -E
+VGFMAFLAG=
STATICLIB=libz.a
SHAREDLIB=libz.so
@@ -179,6 +180,9 @@ crc32.o: $(SRCDIR)crc32.c
crc32_z_power8.o: $(SRCDIR)contrib/power/crc32_z_power8.c
$(CC) $(CFLAGS) -mcpu=power8 $(ZINC) -c -o $@ $(SRCDIR)contrib/power/crc32_z_power8.c
+crc32-vx.o: $(SRCDIR)contrib/s390/crc32-vx.c
+ $(CC) $(CFLAGS) $(VGFMAFLAG) $(ZINC) -c -o $@ $(SRCDIR)contrib/s390/crc32-vx.c
+
deflate.o: $(SRCDIR)deflate.c
$(CC) $(CFLAGS) $(ZINC) -c -o $@ $(SRCDIR)deflate.c
@@ -229,6 +233,11 @@ crc32.lo: $(SRCDIR)crc32.c
$(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/crc32.o $(SRCDIR)crc32.c
-@mv objs/crc32.o $@
+crc32-vx.lo: $(SRCDIR)contrib/s390/crc32-vx.c
+ -@mkdir objs 2>/dev/null || test -d objs
+ $(CC) $(SFLAGS) $(VGFMAFLAG) $(ZINC) -DPIC -c -o objs/crc32-vx.o $(SRCDIR)contrib/s390/crc32-vx.c
+ -@mv objs/crc32-vx.o $@
+
crc32_z_power8.lo: $(SRCDIR)contrib/power/crc32_z_power8.c
-@mkdir objs 2>/dev/null || test -d objs
$(CC) $(SFLAGS) -mcpu=power8 $(ZINC) -DPIC -c -o objs/crc32_z_power8.o $(SRCDIR)contrib/power/crc32_z_power8.c
diff --git a/configure b/configure
index dd01b5c..acf94a5 100755
--- a/configure
+++ b/configure
@@ -927,6 +927,32 @@ else
echo "Checking for sys/sdt.h ... No." | tee -a configure.log
fi
+# check if we are compiling for s390 and binutils support vector extensions
+VGFMAFLAG=-march=z13
+cat > $test.c <<EOF
+#ifndef __s390__
+#error
+#endif
+EOF
+if try $CC -c $CFLAGS $VGFMAFLAG $test.c; then
+ CFLAGS="$CFLAGS -DHAVE_S390X_VX"
+ SFLAGS="$SFLAGS -DHAVE_S390X_VX"
+ OBJC="$OBJC crc32-vx.o"
+ PIC_OBJC="$PIC_OBJC crc32-vx.lo"
+ echo "Checking for s390 vector extensions... Yes." | tee -a configure.log
+
+ for flag in -mzarch -fzvector; do
+ if try $CC -c $CFLAGS $VGFMAFLAG $flag $test.c; then
+ VGFMAFLAG="$VGFMAFLAG $flag"
+ echo "Checking for $flag... Yes." | tee -a configure.log
+ else
+ echo "Checking for $flag... No." | tee -a configure.log
+ fi
+ done
+else
+ echo "Checking for s390 vector extensions... No." | tee -a configure.log
+fi
+
# show the results in the log
echo >> configure.log
echo ALL = $ALL >> configure.log
@@ -960,6 +986,7 @@ echo mandir = $mandir >> configure.log
echo prefix = $prefix >> configure.log
echo sharedlibdir = $sharedlibdir >> configure.log
echo uname = $uname >> configure.log
+echo VGFMAFLAG = $VGFMAFLAG >> configure.log
# udpate Makefile with the configure results
sed < ${SRCDIR}Makefile.in "
@@ -969,6 +996,7 @@ sed < ${SRCDIR}Makefile.in "
/^LDFLAGS *=/s#=.*#=$LDFLAGS#
/^LDSHARED *=/s#=.*#=$LDSHARED#
/^CPP *=/s#=.*#=$CPP#
+/^VGFMAFLAG *=/s#=.*#=$VGFMAFLAG#
/^STATICLIB *=/s#=.*#=$STATICLIB#
/^SHAREDLIB *=/s#=.*#=$SHAREDLIB#
/^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV#
diff --git a/contrib/gcc/zifunc.h b/contrib/gcc/zifunc.h
index daf4fe4..b62379e 100644
--- a/contrib/gcc/zifunc.h
+++ b/contrib/gcc/zifunc.h
@@ -8,9 +8,28 @@
/* Helpers for arch optimizations */
+#if defined(__clang__)
+#if __has_feature(coverage_sanitizer)
+#define Z_IFUNC_NO_SANCOV __attribute__((no_sanitize("coverage")))
+#else /* __has_feature(coverage_sanitizer) */
+#define Z_IFUNC_NO_SANCOV
+#endif /* __has_feature(coverage_sanitizer) */
+#else /* __clang__ */
+#define Z_IFUNC_NO_SANCOV
+#endif /* __clang__ */
+
+#ifdef __s390__
+#define Z_IFUNC_PARAMS unsigned long hwcap
+#define Z_IFUNC_ATTRS Z_IFUNC_NO_SANCOV
+#else /* __s390__ */
+#define Z_IFUNC_PARAMS void
+#define Z_IFUNC_ATTRS
+#endif /* __s390__ */
+
#define Z_IFUNC(fname) \
typeof(fname) fname __attribute__ ((ifunc (#fname "_resolver"))); \
- local typeof(fname) *fname##_resolver(void)
+ Z_IFUNC_ATTRS \
+ local typeof(fname) *fname##_resolver(Z_IFUNC_PARAMS)
/* This is a helper macro to declare a resolver for an indirect function
* (ifunc). Let's say you have function
*
diff --git a/contrib/s390/crc32-vx.c b/contrib/s390/crc32-vx.c
new file mode 100644
index 0000000..fa5387c
--- /dev/null
+++ b/contrib/s390/crc32-vx.c
@@ -0,0 +1,195 @@
+/*
+ * Hardware-accelerated CRC-32 variants for Linux on z Systems
+ *
+ * Use the z/Architecture Vector Extension Facility to accelerate the
+ * computing of bitreflected CRC-32 checksums.
+ *
+ * This CRC-32 implementation algorithm is bitreflected and processes
+ * the least-significant bit first (Little-Endian).
+ *
+ * This code was originally written by Hendrik Brueckner
+ * <brueckner@linux.vnet.ibm.com> for use in the Linux kernel and has been
+ * relicensed under the zlib license.
+ */
+
+#include "../../zutil.h"
+
+#include <stdint.h>
+#include <vecintrin.h>
+
+typedef unsigned char uv16qi __attribute__((vector_size(16)));
+typedef unsigned int uv4si __attribute__((vector_size(16)));
+typedef unsigned long long uv2di __attribute__((vector_size(16)));
+
+uint32_t crc32_le_vgfm_16(uint32_t crc, const unsigned char *buf, size_t len) {
+ /*
+ * The CRC-32 constant block contains reduction constants to fold and
+ * process particular chunks of the input data stream in parallel.
+ *
+ * For the CRC-32 variants, the constants are precomputed according to
+ * these definitions:
+ *
+ * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1
+ * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1
+ * R3 = [(x128+32 mod P'(x) << 32)]' << 1
+ * R4 = [(x128-32 mod P'(x) << 32)]' << 1
+ * R5 = [(x64 mod P'(x) << 32)]' << 1
+ * R6 = [(x32 mod P'(x) << 32)]' << 1
+ *
+ * The bitreflected Barret reduction constant, u', is defined as
+ * the bit reversal of floor(x**64 / P(x)).
+ *
+ * where P(x) is the polynomial in the normal domain and the P'(x) is the
+ * polynomial in the reversed (bitreflected) domain.
+ *
+ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials:
+ *
+ * P(x) = 0x04C11DB7
+ * P'(x) = 0xEDB88320
+ */
+ const uv16qi perm_le2be = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; /* BE->LE mask */
+ const uv2di r2r1 = {0x1C6E41596, 0x154442BD4}; /* R2, R1 */
+ const uv2di r4r3 = {0x0CCAA009E, 0x1751997D0}; /* R4, R3 */
+ const uv2di r5 = {0, 0x163CD6124}; /* R5 */
+ const uv2di ru_poly = {0, 0x1F7011641}; /* u' */
+ const uv2di crc_poly = {0, 0x1DB710641}; /* P'(x) << 1 */
+
+ /*
+ * Load the initial CRC value.
+ *
+ * The CRC value is loaded into the rightmost word of the
+ * vector register and is later XORed with the LSB portion
+ * of the loaded input data.
+ */
+ uv2di v0 = {0, 0};
+ v0 = (uv2di)vec_insert(crc, (uv4si)v0, 3);
+
+ /* Load a 64-byte data chunk and XOR with CRC */
+ uv2di v1 = vec_perm(((uv2di *)buf)[0], ((uv2di *)buf)[0], perm_le2be);
+ uv2di v2 = vec_perm(((uv2di *)buf)[1], ((uv2di *)buf)[1], perm_le2be);
+ uv2di v3 = vec_perm(((uv2di *)buf)[2], ((uv2di *)buf)[2], perm_le2be);
+ uv2di v4 = vec_perm(((uv2di *)buf)[3], ((uv2di *)buf)[3], perm_le2be);
+
+ v1 ^= v0;
+ buf += 64;
+ len -= 64;
+
+ while (len >= 64) {
+ /* Load the next 64-byte data chunk */
+ uv16qi part1 = vec_perm(((uv16qi *)buf)[0], ((uv16qi *)buf)[0], perm_le2be);
+ uv16qi part2 = vec_perm(((uv16qi *)buf)[1], ((uv16qi *)buf)[1], perm_le2be);
+ uv16qi part3 = vec_perm(((uv16qi *)buf)[2], ((uv16qi *)buf)[2], perm_le2be);
+ uv16qi part4 = vec_perm(((uv16qi *)buf)[3], ((uv16qi *)buf)[3], perm_le2be);
+
+ /*
+ * Perform a GF(2) multiplication of the doublewords in V1 with
+ * the R1 and R2 reduction constants in V0. The intermediate result
+ * is then folded (accumulated) with the next data chunk in PART1 and
+ * stored in V1. Repeat this step for the register contents
+ * in V2, V3, and V4 respectively.
+ */
+ v1 = (uv2di)vec_gfmsum_accum_128(r2r1, v1, part1);
+ v2 = (uv2di)vec_gfmsum_accum_128(r2r1, v2, part2);
+ v3 = (uv2di)vec_gfmsum_accum_128(r2r1, v3, part3);
+ v4 = (uv2di)vec_gfmsum_accum_128(r2r1, v4, part4);
+
+ buf += 64;
+ len -= 64;
+ }
+
+ /*
+ * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3
+ * and R4 and accumulating the next 128-bit chunk until a single 128-bit
+ * value remains.
+ */
+ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
+ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v3);
+ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v4);
+
+ while (len >= 16) {
+ /* Load next data chunk */
+ v2 = vec_perm(*(uv2di *)buf, *(uv2di *)buf, perm_le2be);
+
+ /* Fold next data chunk */
+ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2);
+
+ buf += 16;
+ len -= 16;
+ }
+
+ /*
+ * Set up a vector register for byte shifts. The shift value must
+ * be loaded in bits 1-4 in byte element 7 of a vector register.
+ * Shift by 8 bytes: 0x40
+ * Shift by 4 bytes: 0x20
+ */
+ uv16qi v9 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0};
+ v9 = vec_insert((unsigned char)0x40, v9, 7);
+
+ /*
+ * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes
+ * to move R4 into the rightmost doubleword and set the leftmost
+ * doubleword to 0x1.
+ */
+ v0 = vec_srb(r4r3, (uv2di)v9);
+ v0[0] = 1;
+
+ /*
+ * Compute GF(2) product of V1 and V0. The rightmost doubleword
+ * of V1 is multiplied with R4. The leftmost doubleword of V1 is
+ * multiplied by 0x1 and is then XORed with rightmost product.
+ * Implicitly, the intermediate leftmost product becomes padded
+ */
+ v1 = (uv2di)vec_gfmsum_128(v0, v1);
+
+ /*
+ * Now do the final 32-bit fold by multiplying the rightmost word
+ * in V1 with R5 and XOR the result with the remaining bits in V1.
+ *
+ * To achieve this by a single VGFMAG, right shift V1 by a word
+ * and store the result in V2 which is then accumulated. Use the
+ * vector unpack instruction to load the rightmost half of the
+ * doubleword into the rightmost doubleword element of V1; the other
+ * half is loaded in the leftmost doubleword.
+ * The vector register with CONST_R5 contains the R5 constant in the
+ * rightmost doubleword and the leftmost doubleword is zero to ignore
+ * the leftmost product of V1.
+ */
+ v9 = vec_insert((unsigned char)0x20, v9, 7);
+ v2 = vec_srb(v1, (uv2di)v9);
+ v1 = vec_unpackl((uv4si)v1); /* Split rightmost doubleword */
+ v1 = (uv2di)vec_gfmsum_accum_128(r5, v1, (uv16qi)v2);
+
+ /*
+ * Apply a Barret reduction to compute the final 32-bit CRC value.
+ *
+ * The input values to the Barret reduction are the degree-63 polynomial
+ * in V1 (R(x)), degree-32 generator polynomial, and the reduction
+ * constant u. The Barret reduction result is the CRC value of R(x) mod
+ * P(x).
+ *
+ * The Barret reduction algorithm is defined as:
+ *
+ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u
+ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x)
+ * 3. C(x) = R(x) XOR T2(x) mod x^32
+ *
+ * Note: The leftmost doubleword of vector register containing
+ * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product
+ * is zero and does not contribute to the final result.
+ */
+
+ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */
+ v2 = vec_unpackl((uv4si)v1);
+ v2 = (uv2di)vec_gfmsum_128(ru_poly, v2);
+
+ /*
+ * Compute the GF(2) product of the CRC polynomial with T1(x) in
+ * V2 and XOR the intermediate result, T2(x), with the value in V1.
+ * The final result is stored in word element 2 of V2.
+ */
+ v2 = vec_unpackl((uv4si)v2);
+ v2 = (uv2di)vec_gfmsum_accum_128(crc_poly, v2, (uv16qi)v1);
+
+ return ((uv4si)v2)[2];
+}
diff --git a/contrib/s390/crc32_z_resolver.c b/contrib/s390/crc32_z_resolver.c
new file mode 100644
index 0000000..9749cab
--- /dev/null
+++ b/contrib/s390/crc32_z_resolver.c
@@ -0,0 +1,41 @@
+#include <sys/auxv.h>
+#include "../gcc/zifunc.h"
+
+#define VX_MIN_LEN 64
+#define VX_ALIGNMENT 16L
+#define VX_ALIGN_MASK (VX_ALIGNMENT - 1)
+
+unsigned int crc32_le_vgfm_16(unsigned int crc, const unsigned char FAR *buf, z_size_t len);
+
+local unsigned long s390_crc32_vx(unsigned long crc, const unsigned char FAR *buf, z_size_t len)
+{
+ uintptr_t prealign, aligned, remaining;
+
+ if (buf == Z_NULL) return 0UL;
+
+ if (len < VX_MIN_LEN + VX_ALIGN_MASK)
+ return crc32_z_default(crc, buf, len);
+
+ if ((uintptr_t)buf & VX_ALIGN_MASK) {
+ prealign = VX_ALIGNMENT - ((uintptr_t)buf & VX_ALIGN_MASK);
+ len -= prealign;
+ crc = crc32_z_default(crc, buf, prealign);
+ buf += prealign;
+ }
+ aligned = len & ~VX_ALIGN_MASK;
+ remaining = len & VX_ALIGN_MASK;
+
+ crc = crc32_le_vgfm_16(crc ^ 0xffffffff, buf, (size_t)aligned) ^ 0xffffffff;
+
+ if (remaining)
+ crc = crc32_z_default(crc, buf + aligned, remaining);
+
+ return crc;
+}
+
+Z_IFUNC(crc32_z)
+{
+ if (hwcap & HWCAP_S390_VX)
+ return s390_crc32_vx;
+ return crc32_z_default;
+}
diff --git a/crc32.c b/crc32.c
index ae7b7e7..c212261 100644
--- a/crc32.c
+++ b/crc32.c
@@ -736,12 +736,12 @@ local z_word_t crc_word_big(data)
#endif
/* ========================================================================= */
-#ifdef Z_POWER_OPT
+#if defined(Z_POWER_OPT) || defined(HAVE_S390X_VX)
/* Rename function so resolver can use its symbol. The default version will be
* returned by the resolver if the host has no support for an optimized version.
*/
#define crc32_z crc32_z_default
-#endif /* Z_POWER_OPT */
+#endif /* defined(Z_POWER_OPT) || defined(HAVE_S390X_VX) */
unsigned long ZEXPORT crc32_z(crc, buf, len)
unsigned long crc;
@@ -1064,10 +1064,15 @@ unsigned long ZEXPORT crc32_z(crc, buf, len)
return crc ^ 0xffffffff;
}
-#ifdef Z_POWER_OPT
+#if defined(Z_POWER_OPT) || defined(HAVE_S390X_VX)
#undef crc32_z
+#ifdef Z_POWER_OPT
#include "contrib/power/crc32_z_resolver.c"
#endif /* Z_POWER_OPT */
+#ifdef HAVE_S390X_VX
+#include "contrib/s390/crc32_z_resolver.c"
+#endif /* HAVE_S390X_VX */
+#endif /* defined(Z_POWER_OPT) || defined(HAVE_S390X_VX) */
#endif
--
2.35.1