diff -Nru zlib-1.2.11.dfsg/debian/changelog zlib-1.2.11.dfsg/debian/changelog --- zlib-1.2.11.dfsg/debian/changelog 2021-03-28 09:10:07.000000000 +0200 +++ zlib-1.2.11.dfsg/debian/changelog 2021-08-06 15:15:44.000000000 +0200 @@ -1,3 +1,13 @@ +zlib (1:1.2.11.dfsg-2ubuntu7) impish; urgency=medium + + * d/rules: use configure options for dltss instead of hardcoding + the CFLAGS + * d/p/lp1932010-ibm-z-add-vectorized-crc32-implementation.patch + ported from zlib-ng #912, adding a vectorized implementation + of CRC32 on s390x architectures based on kernel code. LP: #1932010 + + -- Simon Chopin Fri, 06 Aug 2021 15:15:44 +0200 + zlib (1:1.2.11.dfsg-2ubuntu6) hirsute; urgency=medium * No-change rebuild to build with lto. diff -Nru zlib-1.2.11.dfsg/debian/patches/lp1932010-ibm-z-add-vectorized-crc32-implementation.patch zlib-1.2.11.dfsg/debian/patches/lp1932010-ibm-z-add-vectorized-crc32-implementation.patch --- zlib-1.2.11.dfsg/debian/patches/lp1932010-ibm-z-add-vectorized-crc32-implementation.patch 1970-01-01 01:00:00.000000000 +0100 +++ zlib-1.2.11.dfsg/debian/patches/lp1932010-ibm-z-add-vectorized-crc32-implementation.patch 2021-08-06 15:15:44.000000000 +0200 @@ -0,0 +1,460 @@ +From f8801cc4f765c500867f1ff950708071b6c3c2e2 Mon Sep 17 00:00:00 2001 +From: Ilya Leoshkevich +Date: Tue, 6 Apr 2021 13:51:16 +0200 +Subject: [PATCH] IBM Z: Add vectorized CRC32 implementation + +While DFLTCC takes care of accelerating compression on level 1, other +levels can be sped up too by computing CRC32 using various vector +instructions. + +Take the Linux kernel assembly code that does that - its original +author (Hendrik Brueckner) works for IBM at the time of writing and has +allowed reusing the code under the zlib license. Rewrite it in C for +better maintainability, but keep the original structure, variable names +and comments. + +Ubuntu zlib backport modifications: + +Added a NULL-check in the code, not necessary in -ng +Retrofitted the configuration knobs +Removed the CI part of the patch +--- + Makefile.in | 8 ++ + configure | 52 +++++++++ + contrib/s390/crc32-vx.c | 226 ++++++++++++++++++++++++++++++++++++++++ + crc32.c | 19 +++- + crc32_p.h | 20 ++++ + 5 files changed, 321 insertions(+), 4 deletions(-) + create mode 100644 contrib/s390/crc32-vx.c + create mode 100644 crc32_p.h + +diff --git a/Makefile.in b/Makefile.in +index 1e00528..8aa0579 100644 +--- a/Makefile.in ++++ b/Makefile.in +@@ -30,6 +30,8 @@ TEST_LDFLAGS=-L. libz.a + LDSHARED=$(CC) + CPP=$(CC) -E + ++VGFMAFLAG= ++ + STATICLIB=libz.a + SHAREDLIB=libz.so + SHAREDLIBV=libz.so.1.2.11 +@@ -151,6 +153,12 @@ dfltcc.lo: $(SRCDIR)contrib/s390/dfltcc.c $(SRCDIR)zlib.h zconf.h + $(CC) $(SFLAGS) $(ZINC) -DPIC -c -o objs/dfltcc.o $(SRCDIR)contrib/s390/dfltcc.c + -@mv objs/dfltcc.o $@ + ++crc32-vx.o: $(SRCDIR)contrib/s390/crc32-vx.c $(SRCDIR)zlib.h zconf.h ++ $(CC) $(CFLAGS) $(VGFMAFLAG) -c -o $@ $(SRCDIR)contrib/s390/crc32-vx.c ++ ++crc32-vx.lo: $(SRCDIR)contrib/s390/crc32-vx.c $(SRCDIR)zlib.h zconf.h ++ $(CC) $(SFLAGS) $(VGFMAFLAG) -c -o $@ $(SRCDIR)contrib/s390/crc32-vx.c ++ + crc32_test.o: $(SRCDIR)test/crc32_test.c $(SRCDIR)zlib.h zconf.h + $(CC) $(CFLAGS) $(ZINCOUT) -c -o $@ $(SRCDIR)test/crc32_test.c + +diff --git a/configure b/configure +index b63f9ea..295f6c9 100755 +--- a/configure ++++ b/configure +@@ -89,6 +89,8 @@ warn=0 + debug=0 + old_cc="$CC" + old_cflags="$CFLAGS" ++vgfmaflag="-march=z13" ++crc32_vx=0 + OBJC='$(OBJZ) $(OBJG)' + PIC_OBJC='$(PIC_OBJZ) $(PIC_OBJG)' + +@@ -144,6 +146,10 @@ case "$1" in + PIC_OBJC="$PIC_OBJC dfltcc.lo" + shift + ;; ++ --crc32-vx) ++ crc32_vx=1 ++ shift ++ ;; + *) + echo "unknown option: $1" | tee -a configure.log + echo "$0 --help for help" | tee -a configure.log +@@ -927,6 +933,51 @@ EOF + fi + fi + ++# Check whether "VECTOR GALOIS FIELD MULTIPLY SUM AND ACCUMULATE" intrinsic is available ++assert_crc32_vgfma() { ++ echo -n "Checking for -mzarch... " | tee -a configure.log ++ if try $CC -x c -c /dev/null -o /dev/null -mzarch; then ++ echo Yes. | tee -a configure.log ++ vgfmaflag="${vgfmaflag} -mzarch" ++ else ++ echo No. | tee -a configure.log ++ fi ++ echo -n "Checking for -fzvector... " | tee -a configure.log ++ if try $CC -x c -c /dev/null -o /dev/null -fzvector; then ++ echo Yes. | tee -a configure.log ++ vgfmaflag="${vgfmaflag} -fzvector" ++ else ++ echo No. | tee -a configure.log ++ fi ++ cat > $test.c << EOF ++#include ++int main(void) { ++ unsigned long long a __attribute__((vector_size(16))) = { 0 }; ++ unsigned long long b __attribute__((vector_size(16))) = { 0 }; ++ unsigned char c __attribute__((vector_size(16))) = { 0 }; ++ c = vec_gfmsum_accum_128(a, b, c); ++ return c[0]; ++} ++EOF ++ ++ echo -n "Checking for VGFMA support... " | tee -a configure.log ++ if try $CC -c $CFLAGS $vgfmaflag $test.c; then ++ echo "Yes." | tee -a configure.log ++ else ++ echo "No." | tee -a configure.log ++ echo "VGFMA support mandatory for crc32-vx feature." ++ leave 1 ++ fi ++} ++ ++ ++if test "$crc32_vx" -eq 1; then ++ CFLAGS="$CFLAGS -DS390_CRC32_VX" ++ OBJC="$OBJC crc32-vx.o" ++ PIC_OBJC="$PIC_OBJC crc32-vx.lo" ++ assert_crc32_vgfma ++fi ++ + # show the results in the log + echo >> configure.log + echo ALL = $ALL >> configure.log +@@ -968,6 +1019,7 @@ sed < ${SRCDIR}Makefile.in " + /^LDFLAGS *=/s#=.*#=$LDFLAGS# + /^LDSHARED *=/s#=.*#=$LDSHARED# + /^CPP *=/s#=.*#=$CPP# ++/^VGFMAFLAG *=/s#=.*#=$vgfmaflag# + /^STATICLIB *=/s#=.*#=$STATICLIB# + /^SHAREDLIB *=/s#=.*#=$SHAREDLIB# + /^SHAREDLIBV *=/s#=.*#=$SHAREDLIBV# +diff --git a/contrib/s390/crc32-vx.c b/contrib/s390/crc32-vx.c +new file mode 100644 +index 0000000..b86691e +--- /dev/null ++++ b/contrib/s390/crc32-vx.c +@@ -0,0 +1,226 @@ ++/* ++ * Hardware-accelerated CRC-32 variants for Linux on z Systems ++ * ++ * Use the z/Architecture Vector Extension Facility to accelerate the ++ * computing of bitreflected CRC-32 checksums. ++ * ++ * This CRC-32 implementation algorithm is bitreflected and processes ++ * the least-significant bit first (Little-Endian). ++ * ++ * This code was originally written by Hendrik Brueckner ++ * for use in the Linux kernel and has been ++ * relicensed under the zlib license. ++ */ ++ ++#include "../../zutil.h" ++#include "../../crc32_p.h" ++ ++#include ++ ++typedef unsigned char uv16qi __attribute__((vector_size(16))); ++typedef unsigned int uv4si __attribute__((vector_size(16))); ++typedef unsigned long long uv2di __attribute__((vector_size(16))); ++ ++static uint32_t crc32_le_vgfm_16(uint32_t crc, const unsigned char *buf, size_t len) { ++ /* ++ * The CRC-32 constant block contains reduction constants to fold and ++ * process particular chunks of the input data stream in parallel. ++ * ++ * For the CRC-32 variants, the constants are precomputed according to ++ * these definitions: ++ * ++ * R1 = [(x4*128+32 mod P'(x) << 32)]' << 1 ++ * R2 = [(x4*128-32 mod P'(x) << 32)]' << 1 ++ * R3 = [(x128+32 mod P'(x) << 32)]' << 1 ++ * R4 = [(x128-32 mod P'(x) << 32)]' << 1 ++ * R5 = [(x64 mod P'(x) << 32)]' << 1 ++ * R6 = [(x32 mod P'(x) << 32)]' << 1 ++ * ++ * The bitreflected Barret reduction constant, u', is defined as ++ * the bit reversal of floor(x**64 / P(x)). ++ * ++ * where P(x) is the polynomial in the normal domain and the P'(x) is the ++ * polynomial in the reversed (bitreflected) domain. ++ * ++ * CRC-32 (IEEE 802.3 Ethernet, ...) polynomials: ++ * ++ * P(x) = 0x04C11DB7 ++ * P'(x) = 0xEDB88320 ++ */ ++ const uv16qi perm_le2be = {15, 14, 13, 12, 11, 10, 9, 8, 7, 6, 5, 4, 3, 2, 1, 0}; /* BE->LE mask */ ++ const uv2di r2r1 = {0x1C6E41596, 0x154442BD4}; /* R2, R1 */ ++ const uv2di r4r3 = {0x0CCAA009E, 0x1751997D0}; /* R4, R3 */ ++ const uv2di r5 = {0, 0x163CD6124}; /* R5 */ ++ const uv2di ru_poly = {0, 0x1F7011641}; /* u' */ ++ const uv2di crc_poly = {0, 0x1DB710641}; /* P'(x) << 1 */ ++ ++ /* ++ * Load the initial CRC value. ++ * ++ * The CRC value is loaded into the rightmost word of the ++ * vector register and is later XORed with the LSB portion ++ * of the loaded input data. ++ */ ++ uv2di v0 = {0, 0}; ++ v0 = (uv2di)vec_insert(crc, (uv4si)v0, 3); ++ ++ /* Load a 64-byte data chunk and XOR with CRC */ ++ uv2di v1 = vec_perm(((uv2di *)buf)[0], ((uv2di *)buf)[0], perm_le2be); ++ uv2di v2 = vec_perm(((uv2di *)buf)[1], ((uv2di *)buf)[1], perm_le2be); ++ uv2di v3 = vec_perm(((uv2di *)buf)[2], ((uv2di *)buf)[2], perm_le2be); ++ uv2di v4 = vec_perm(((uv2di *)buf)[3], ((uv2di *)buf)[3], perm_le2be); ++ ++ v1 ^= v0; ++ buf += 64; ++ len -= 64; ++ ++ while (len >= 64) { ++ /* Load the next 64-byte data chunk */ ++ uv16qi part1 = vec_perm(((uv16qi *)buf)[0], ((uv16qi *)buf)[0], perm_le2be); ++ uv16qi part2 = vec_perm(((uv16qi *)buf)[1], ((uv16qi *)buf)[1], perm_le2be); ++ uv16qi part3 = vec_perm(((uv16qi *)buf)[2], ((uv16qi *)buf)[2], perm_le2be); ++ uv16qi part4 = vec_perm(((uv16qi *)buf)[3], ((uv16qi *)buf)[3], perm_le2be); ++ ++ /* ++ * Perform a GF(2) multiplication of the doublewords in V1 with ++ * the R1 and R2 reduction constants in V0. The intermediate result ++ * is then folded (accumulated) with the next data chunk in PART1 and ++ * stored in V1. Repeat this step for the register contents ++ * in V2, V3, and V4 respectively. ++ */ ++ v1 = (uv2di)vec_gfmsum_accum_128(r2r1, v1, part1); ++ v2 = (uv2di)vec_gfmsum_accum_128(r2r1, v2, part2); ++ v3 = (uv2di)vec_gfmsum_accum_128(r2r1, v3, part3); ++ v4 = (uv2di)vec_gfmsum_accum_128(r2r1, v4, part4); ++ ++ buf += 64; ++ len -= 64; ++ } ++ ++ /* ++ * Fold V1 to V4 into a single 128-bit value in V1. Multiply V1 with R3 ++ * and R4 and accumulating the next 128-bit chunk until a single 128-bit ++ * value remains. ++ */ ++ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2); ++ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v3); ++ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v4); ++ ++ while (len >= 16) { ++ /* Load next data chunk */ ++ v2 = vec_perm(*(uv2di *)buf, *(uv2di *)buf, perm_le2be); ++ ++ /* Fold next data chunk */ ++ v1 = (uv2di)vec_gfmsum_accum_128(r4r3, v1, (uv16qi)v2); ++ ++ buf += 16; ++ len -= 16; ++ } ++ ++ /* ++ * Set up a vector register for byte shifts. The shift value must ++ * be loaded in bits 1-4 in byte element 7 of a vector register. ++ * Shift by 8 bytes: 0x40 ++ * Shift by 4 bytes: 0x20 ++ */ ++ uv16qi v9 = {0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0}; ++ v9 = vec_insert((unsigned char)0x40, v9, 7); ++ ++ /* ++ * Prepare V0 for the next GF(2) multiplication: shift V0 by 8 bytes ++ * to move R4 into the rightmost doubleword and set the leftmost ++ * doubleword to 0x1. ++ */ ++ v0 = vec_srb(r4r3, (uv2di)v9); ++ v0[0] = 1; ++ ++ /* ++ * Compute GF(2) product of V1 and V0. The rightmost doubleword ++ * of V1 is multiplied with R4. The leftmost doubleword of V1 is ++ * multiplied by 0x1 and is then XORed with rightmost product. ++ * Implicitly, the intermediate leftmost product becomes padded ++ */ ++ v1 = (uv2di)vec_gfmsum_128(v0, v1); ++ ++ /* ++ * Now do the final 32-bit fold by multiplying the rightmost word ++ * in V1 with R5 and XOR the result with the remaining bits in V1. ++ * ++ * To achieve this by a single VGFMAG, right shift V1 by a word ++ * and store the result in V2 which is then accumulated. Use the ++ * vector unpack instruction to load the rightmost half of the ++ * doubleword into the rightmost doubleword element of V1; the other ++ * half is loaded in the leftmost doubleword. ++ * The vector register with CONST_R5 contains the R5 constant in the ++ * rightmost doubleword and the leftmost doubleword is zero to ignore ++ * the leftmost product of V1. ++ */ ++ v9 = vec_insert((unsigned char)0x20, v9, 7); ++ v2 = vec_srb(v1, (uv2di)v9); ++ v1 = vec_unpackl((uv4si)v1); /* Split rightmost doubleword */ ++ v1 = (uv2di)vec_gfmsum_accum_128(r5, v1, (uv16qi)v2); ++ ++ /* ++ * Apply a Barret reduction to compute the final 32-bit CRC value. ++ * ++ * The input values to the Barret reduction are the degree-63 polynomial ++ * in V1 (R(x)), degree-32 generator polynomial, and the reduction ++ * constant u. The Barret reduction result is the CRC value of R(x) mod ++ * P(x). ++ * ++ * The Barret reduction algorithm is defined as: ++ * ++ * 1. T1(x) = floor( R(x) / x^32 ) GF2MUL u ++ * 2. T2(x) = floor( T1(x) / x^32 ) GF2MUL P(x) ++ * 3. C(x) = R(x) XOR T2(x) mod x^32 ++ * ++ * Note: The leftmost doubleword of vector register containing ++ * CONST_RU_POLY is zero and, thus, the intermediate GF(2) product ++ * is zero and does not contribute to the final result. ++ */ ++ ++ /* T1(x) = floor( R(x) / x^32 ) GF2MUL u */ ++ v2 = vec_unpackl((uv4si)v1); ++ v2 = (uv2di)vec_gfmsum_128(ru_poly, v2); ++ ++ /* ++ * Compute the GF(2) product of the CRC polynomial with T1(x) in ++ * V2 and XOR the intermediate result, T2(x), with the value in V1. ++ * The final result is stored in word element 2 of V2. ++ */ ++ v2 = vec_unpackl((uv4si)v2); ++ v2 = (uv2di)vec_gfmsum_accum_128(crc_poly, v2, (uv16qi)v1); ++ ++ return ((uv4si)v2)[2]; ++} ++ ++#define VX_MIN_LEN 64 ++#define VX_ALIGNMENT 16L ++#define VX_ALIGN_MASK (VX_ALIGNMENT - 1) ++ ++#include ++ZLIB_INTERNAL uint32_t s390_crc32_vx(uint32_t crc, const unsigned char *buf, uint64_t len) { ++ uint64_t prealign, aligned, remaining; ++ ++ if (buf == Z_NULL) ++ return 0; ++ ++ if (len < VX_MIN_LEN + VX_ALIGN_MASK) ++ return crc32_big(crc, buf, len); ++ ++ if ((uintptr_t)buf & VX_ALIGN_MASK) { ++ prealign = VX_ALIGNMENT - ((uintptr_t)buf & VX_ALIGN_MASK); ++ len -= prealign; ++ crc = crc32_big(crc, buf, prealign); ++ buf += prealign; ++ } ++ aligned = len & ~VX_ALIGN_MASK; ++ remaining = len & VX_ALIGN_MASK; ++ ++ crc = crc32_le_vgfm_16(crc ^ 0xffffffff, buf, (size_t)aligned) ^ 0xffffffff; ++ ++ if (remaining) ++ crc = crc32_big(crc, buf + aligned, remaining); ++ ++ return crc; ++} +diff --git a/crc32.c b/crc32.c +index 12daa5e..000724e 100644 +--- a/crc32.c ++++ b/crc32.c +@@ -35,9 +35,9 @@ + # define BYFOUR + #endif + #ifdef BYFOUR +- local unsigned long crc32_little OF((unsigned long, ++ ZLIB_INTERNAL unsigned long crc32_little OF((unsigned long, + const unsigned char FAR *, z_size_t)); +- local unsigned long crc32_big OF((unsigned long, ++ ZLIB_INTERNAL unsigned long crc32_big OF((unsigned long, + const unsigned char FAR *, z_size_t)); + # define TBLS 8 + #else +@@ -252,6 +252,12 @@ unsigned long crc32_vpmsum(unsigned long, const unsigned char FAR *, z_size_t); + #endif + #endif + ++#ifdef S390_CRC32_VX ++unsigned long s390_crc32_vx(unsigned long, const unsigned char *, z_size_t); ++#include ++#include ++#endif ++ + /* due to a quirk of gnu_indirect_function - "local" (aka static) is applied to + * crc32_z which is not desired. crc32_z_ifunc is implictly "local" */ + #ifndef Z_IFUNC_ASM +@@ -269,6 +275,11 @@ unsigned long (*(crc32_z_ifunc(void)))(unsigned long, const unsigned char FAR *, + #endif + #endif /* _ARCH_PWR8 */ + ++#ifdef S390_CRC32_VX ++ if (getauxval(AT_HWCAP) & HWCAP_S390_VX) ++ return s390_crc32_vx; ++#endif ++ + /* return a function pointer for optimized arches here */ + + #ifdef DYNAMIC_CRC_TABLE +@@ -337,7 +348,7 @@ unsigned long ZEXPORT crc32(crc, buf, len) + #define DOLIT32 DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4; DOLIT4 + + /* ========================================================================= */ +-local unsigned long crc32_little(crc, buf, len) ++ZLIB_INTERNAL unsigned long crc32_little(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + z_size_t len; +@@ -378,7 +389,7 @@ local unsigned long crc32_little(crc, buf, len) + #define DOBIG32 DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4; DOBIG4 + + /* ========================================================================= */ +-local unsigned long crc32_big(crc, buf, len) ++ZLIB_INTERNAL unsigned long crc32_big(crc, buf, len) + unsigned long crc; + const unsigned char FAR *buf; + z_size_t len; +diff --git a/crc32_p.h b/crc32_p.h +new file mode 100644 +index 0000000..0773222 +--- /dev/null ++++ b/crc32_p.h +@@ -0,0 +1,20 @@ ++#ifndef CRC32_P_H_ ++#define CRC32_P_H_ ++ ++#include ++ ++#include "zutil.h" ++ ++#if defined(__APPLE__) || defined(__arm__) || defined(__aarch64__) ++# include ++#else ++# include ++#endif ++ ++#if BYTE_ORDER == LITTLE_ENDIAN ++extern uint32_t crc32_little(uint32_t, const unsigned char *, uint64_t); ++#elif BYTE_ORDER == BIG_ENDIAN ++extern uint32_t crc32_big(uint32_t, const unsigned char *, uint64_t); ++#endif ++ ++#endif /* CRC32_P_H_ */ +-- +2.25.1 + diff -Nru zlib-1.2.11.dfsg/debian/patches/series zlib-1.2.11.dfsg/debian/patches/series --- zlib-1.2.11.dfsg/debian/patches/series 2020-10-15 12:01:24.000000000 +0200 +++ zlib-1.2.11.dfsg/debian/patches/series 2021-08-06 15:15:44.000000000 +0200 @@ -4,3 +4,4 @@ 410.patch 410-lp1899621.patch 335.diff +lp1932010-ibm-z-add-vectorized-crc32-implementation.patch diff -Nru zlib-1.2.11.dfsg/debian/rules zlib-1.2.11.dfsg/debian/rules --- zlib-1.2.11.dfsg/debian/rules 2020-09-23 22:44:14.000000000 +0200 +++ zlib-1.2.11.dfsg/debian/rules 2021-08-06 15:15:44.000000000 +0200 @@ -21,6 +21,11 @@ LDFLAGS = `dpkg-buildflags --get LDFLAGS` EXTRA_MAKE = +CONFIGURE_COMMON=--shared --prefix=/usr +CONFIGURE_HOST=--libdir=\$${prefix}/lib/$(DEB_HOST_MULTIARCH) +CONFIGURE_64=--libdir=\$${prefix}/usr/lib64 +CONFIGURE_32=--libdir=\$${prefix}/usr/lib32 + # binutils doesn't supply the prefixed version normally like GCC does so # we can't just unconditionally use DEB_HOST_GNU_TYPE-ar ifeq ($(DEB_HOST_GNU_TYPE),$(DEB_BUILD_GNU_TYPE)) @@ -46,8 +51,9 @@ # s390x fails at compatibility. ifneq (,$(findstring $(DEB_HOST_ARCH), s390x)) m32=-m31 -CFLAGS += -DDFLTCC -DDFLTCC_LEVEL_MASK=0x7e -EXTRA_MAKE += OBJA=dfltcc.o PIC_OBJA=dfltcc.lo +CFLAGS += -DDFLTCC_LEVEL_MASK=0x7e +CONFIGURE_COMMON += --dfltcc +CONFIGURE_HOST += --crc32-vx else m32=-m32 endif @@ -90,12 +96,17 @@ COPYLIST=*.h *.c *.in zlib.map configure zlib.3 test contrib +CONFIGURE_COMMON=--shared --prefix=/usr +CONFIGURE_HOST=--libdir=\$${prefix}/lib/$(DEB_HOST_MULTIARCH) +CONFIGURE_64=--libdir=\$${prefix}/usr/lib64 +CONFIGURE_32=--libdir=\$${prefix}/usr/lib32 + configure-stamp: configure dh_testdir if [ ! -f Makefile.stash ]; then cp Makefile Makefile.stash ; fi - AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" uname=GNU ./configure --shared --prefix=/usr --libdir=\$${prefix}/lib/$(DEB_HOST_MULTIARCH) + AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc" CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" uname=GNU ./configure $(CONFIGURE_COMMON) $(CONFIGURE_HOST) touch $@ @@ -106,7 +117,7 @@ cp -r $(COPYLIST) debian/64 cd debian/64 && AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc $(m64)" \ CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" \ - uname=GNU ./configure --shared --prefix=/usr --libdir=\$${prefix}/usr/lib64 + uname=GNU ./configure $(CONFIGURE_COMMON) --libdir=\$${prefix}/usr/lib64 touch $@ configure32-stamp: configure @@ -116,7 +127,7 @@ cp -r $(COPYLIST) debian/32 cd debian/32 && AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc $(m32)" \ CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" \ - uname=GNU ./configure --shared --prefix=/usr --libdir=\$${prefix}/usr/lib32 + uname=GNU ./configure $(CONFIGURE_COMMON) --libdir=\$${prefix}/usr/lib32 touch $@ configuren32-stamp: configure @@ -126,7 +137,7 @@ cp -r $(COPYLIST) debian/n32 cd debian/n32 && AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc $(mn32)" \ CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" \ - uname=GNU ./configure --shared --prefix=/usr --libdir=\$${prefix}/usr/lib32 + uname=GNU ./configure $(CONFIGURE_COMMON) --libdir=\$${prefix}/usr/lib32 touch $@ configurex32-stamp: configure @@ -136,7 +147,7 @@ cp -r $(COPYLIST) debian/x32 cd debian/x32 && AR=$(AR) CC="$(DEB_HOST_GNU_TYPE)-gcc $(mx32)" \ CFLAGS="$(CFLAGS)" LDFLAGS="$(LDFLAGS)" \ - uname=GNU ./configure --shared --prefix=/usr --libdir=\$${prefix}/usr/libx32 + uname=GNU ./configure $(CONFIGURE_COMMON) --libdir=\$${prefix}/usr/libx32 touch $@ build: build-stamp $(EXTRA_BUILD)