From 9c946e22fcad10c2a44c0380c0909da6732097ce Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Mon, 15 Feb 2016 14:41:27 -0500 Subject: [PATCH 1/8] Add SSSE3 and SSE2 optimized png filter functions --- intel/filter_sse2_intrinsics.c | 261 +++++++++++++++++++++++++++++++++ intel/intel_init.c | 38 +++++ pngpriv.h | 27 ++++ 3 files changed, 326 insertions(+) create mode 100644 intel/filter_sse2_intrinsics.c create mode 100644 intel/intel_init.c diff --git a/intel/filter_sse2_intrinsics.c b/intel/filter_sse2_intrinsics.c new file mode 100644 index 000000000..17b385efb --- /dev/null +++ b/intel/filter_sse2_intrinsics.c @@ -0,0 +1,261 @@ + +/* filter_sse2_intrinsics.c - SSE2 optimized filter functions + * + * Copyright (c) 2016 Google, Inc. + * + * This code is released under the libpng license. + * For conditions of distribution and use, see the disclaimer + * and license in png.h + */ + +#include "../pngpriv.h" + +#ifdef PNG_READ_SUPPORTED + +#if PNG_INTEL_SSE2_OPT > 0 + +#if PNG_INTEL_SSE2_OPT == 1 +#include +#elif PNG_INTEL_SSE2_OPT == 2 +#include +#endif + +// Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d). +// They're positioned like this: +// prev: c b +// row: a d +// The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be +// whichever of a, b, or c is closest to p=a+b-c. +// Up also exists, predicting d=b. But there is not need to optimize Up +// because the compiler will vectorize it for us. + +void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // The Sub filter predicts each pixel as the previous pixel, a. + // There is no pixel to the left of the first pixel. It's encoded directly. + // That works with our main loop if we just say that left pixel was zero. + __m128i a, d = _mm_setzero_si128(); + + int rb = row_info->rowbytes; + while (rb > 0) { + a = d; memcpy(&d, row, 3); + d = _mm_add_epi8(d, a); + memcpy(row, &d, 3); + + row += 3; + rb -= 3; + } +} + +void png_read_filter_row_sub4_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // The Sub filter predicts each pixel as the previous pixel, a. + // There is no pixel to the left of the first pixel. It's encoded directly. + // That works with our main loop if we just say that left pixel was zero. + __m128i a, d = _mm_setzero_si128(); + + int rb = row_info->rowbytes; + while (rb > 0) { + a = d; memcpy(&d, row, 4); + d = _mm_add_epi8(d, a); + memcpy(row, &d, 4); + + row += 4; + rb -= 4; + } +} + +void png_read_filter_row_avg3_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // The Avg filter predicts each pixel as the (truncated) average of a and b. + // There's no pixel to the left of the first pixel. Luckily, it's + // predicted to be half of the pixel above it. So again, this works + // perfectly with our loop if we make sure a starts at zero. + const __m128i zero = _mm_setzero_si128(); + __m128i b; + __m128i a, d = zero; + + int rb = row_info->rowbytes; + while (rb > 0) { + memcpy(&b, prev, 3); + a = d; memcpy(&d, row, 3); + + // PNG requires a truncating average here, so sadly we can't just use + // _mm_avg_epu8... + __m128i avg = _mm_avg_epu8(a,b); + // ...but we can fix it up by subtracting off 1 if it rounded up. + avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), + _mm_set1_epi8(1))); + + d = _mm_add_epi8(d, avg); + memcpy(row, &d, 3); + + prev += 3; + row += 3; + rb -= 3; + } +} +void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // The Avg filter predicts each pixel as the (truncated) average of a and b. + // There's no pixel to the left of the first pixel. Luckily, it's + // predicted to be half of the pixel above it. So again, this works + // perfectly with our loop if we make sure a starts at zero. + const __m128i zero = _mm_setzero_si128(); + __m128i b; + __m128i a, d = zero; + + int rb = row_info->rowbytes; + while (rb > 0) { + memcpy(&b, prev, 4); + a = d; memcpy(&d, row, 4); + + // PNG requires a truncating average here, so sadly we can't just use + // _mm_avg_epu8... + __m128i avg = _mm_avg_epu8(a,b); + // ...but we can fix it up by subtracting off 1 if it rounded up. + avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), + _mm_set1_epi8(1))); + + d = _mm_add_epi8(d, avg); + memcpy(row, &d, 4); + + prev += 4; + row += 4; + rb -= 4; + } +} + +// Returns |x| for 16-bit lanes. +static __m128i abs_i16(__m128i x) { +#if PNG_INTEL_SSE2_OPT >= 2 + return _mm_abs_epi16(x); +#else + // Read this all as, return x<0 ? -x : x. + // To negate two's complement, you flip all the bits then add 1. + __m128i is_negative = _mm_cmplt_epi16(x, _mm_setzero_si128()); + // Flip negative lanes. + x = _mm_xor_si128(x, is_negative); + // +1 to negative lanes, else +0. + x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15)); + return x; +#endif +} + +// Bytewise c ? t : e. +static __m128i if_then_else(__m128i c, __m128i t, __m128i e) { + return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e)); +} + +void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // Paeth tries to predict pixel d using the pixel to the left of it, a, + // and two pixels from the previous row, b and c: + // prev: c b + // row: a d + // The Paeth function predicts d to be whichever of a, b, or c is nearest to + // p=a+b-c. The first pixel has no left context, and so uses an Up filter, + // p = b. This works naturally with our main loop's p = a+b-c if we force a + // and c to zero. Here we zero b and d, which become c and a respectively + // at the start of the loop. + const __m128i zero = _mm_setzero_si128(); + __m128i c, b = zero, + a, d = zero; + + int rb = row_info->rowbytes; + while (rb > 0) { + // It's easiest to do this math (particularly, deal with pc) with 16-bit + // intermediates. + memcpy(&b, prev, 3); + memcpy(&d, row, 3); + c = b; b = _mm_unpacklo_epi8(b, zero); + a = d; d = _mm_unpacklo_epi8(d, zero); + __m128i pa = _mm_sub_epi16(b,c), + // (p-a) == (a+b-c - a) == (b-c) + pb = _mm_sub_epi16(a,c), + // (p-b) == (a+b-c - b) == (a-c) + pc = _mm_add_epi16(pa,pb); + // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + + pa = abs_i16(pa); // |p-a| + pb = abs_i16(pb); // |p-b| + pc = abs_i16(pc); // |p-c| + + __m128i smallest = _mm_min_epi16(pc, _mm_min_epi16(pa, pb)); + + // Paeth breaks ties favoring a over b over c. + __m128i nearest = if_then_else(_mm_cmpeq_epi16(smallest, pa), a, + if_then_else(_mm_cmpeq_epi16(smallest, pb), b, + c)); + + + // Note `_epi8`: we need addition to wrap modulo 255. + d = _mm_add_epi8(d, nearest); + __m128i r = _mm_packus_epi16(d,d); + memcpy(row, &r, 3); + prev += 3; + row += 3; + rb -= 3; + } +} + +void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, + png_const_bytep prev) +{ + // Paeth tries to predict pixel d using the pixel to the left of it, a, + // and two pixels from the previous row, b and c: + // prev: c b + // row: a d + // The Paeth function predicts d to be whichever of a, b, or c is nearest to + // p=a+b-c. The first pixel has no left context, and so uses an Up filter, + // p = b. This works naturally with our main loop's p = a+b-c if we force a + // and c to zero. Here we zero b and d, which become c and a respectively + // at the start of the loop. + const __m128i zero = _mm_setzero_si128(); + __m128i c, b = zero, + a, d = zero; + + int rb = row_info->rowbytes; + while (rb > 0) { + // It's easiest to do this math (particularly, deal with pc) with 16-bit + // intermediates. + memcpy(&b, prev, 4); + memcpy(&d, row, 4); + c = b; b = _mm_unpacklo_epi8(b, zero); + a = d; d = _mm_unpacklo_epi8(d, zero); + __m128i pa = _mm_sub_epi16(b,c), + // (p-a) == (a+b-c - a) == (b-c) + pb = _mm_sub_epi16(a,c), + // (p-b) == (a+b-c - b) == (a-c) + pc = _mm_add_epi16(pa,pb); + // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + + pa = abs_i16(pa); // |p-a| + pb = abs_i16(pb); // |p-b| + pc = abs_i16(pc); // |p-c| + + __m128i smallest = _mm_min_epi16(pc, _mm_min_epi16(pa, pb)); + + // Paeth breaks ties favoring a over b over c. + __m128i nearest = if_then_else(_mm_cmpeq_epi16(smallest, pa), a, + if_then_else(_mm_cmpeq_epi16(smallest, pb), b, + c)); + + + // Note `_epi8`: we need addition to wrap modulo 255. + d = _mm_add_epi8(d, nearest); + __m128i r = _mm_packus_epi16(d,d); + memcpy(row, &r, 4); + prev += 4; + row += 4; + rb -= 4; + } +} + +#endif /* PNG_INTEL_SSE2_OPT > 0 */ +#endif /* READ */ diff --git a/intel/intel_init.c b/intel/intel_init.c new file mode 100644 index 000000000..6b1d859c6 --- /dev/null +++ b/intel/intel_init.c @@ -0,0 +1,38 @@ + +/* intel_init.c - SSE2 optimized filter functions + * + * Copyright (c) 2016 Google, Inc. + * + * This code is released under the libpng license. + * For conditions of distribution and use, see the disclaimer + * and license in png.h + */ + +#include "../pngpriv.h" + +#ifdef PNG_READ_SUPPORTED +#if PNG_INTEL_SSE2_OPT > 0 + +void +png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) +{ + if (bpp == 3) + { + pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_sse2; + pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg3_sse2; + pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = + png_read_filter_row_paeth3_sse2; + } + else if (bpp == 4) + { + pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub4_sse2; + pp->read_filter[PNG_FILTER_VALUE_AVG-1] = png_read_filter_row_avg4_sse2; + pp->read_filter[PNG_FILTER_VALUE_PAETH-1] = + png_read_filter_row_paeth4_sse2; + } + + // No need optimize PNG_FILTER_VALUE_UP. The compiler should autovectorize. +} + +#endif /* PNG_INTEL_SSE2_OPT > 0 */ +#endif /* PNG_READ_SUPPORTED */ diff --git a/pngpriv.h b/pngpriv.h index e210a01c5..9f4d6302d 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -182,6 +182,18 @@ # endif #endif /* PNG_ARM_NEON_OPT > 0 */ +#ifndef PNG_INTEL_SSE2_OPT +# if defined(__SSE3__) || defined(__SSSE3__) +# define PNG_INTEL_SSE2_OPT 2 +# elif defined(__SSE2__) +# define PNG_INTEL_SSE2_OPT 1 +# endif +#endif + +#if PNG_INTEL_SSE2_OPT > 0 +# define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_sse2 +#endif + /* Is this a build of a DLL where compilation of the object modules requires * different preprocessor settings to those required for a simple library? If * so PNG_BUILD_DLL must be set. @@ -1189,6 +1201,19 @@ PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_neon,(png_row_infop PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_neon,(png_row_infop row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub3_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_sub4_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg3_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_avg4_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth3_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void,png_read_filter_row_paeth4_sse2,(png_row_infop + row_info, png_bytep row, png_const_bytep prev_row),PNG_EMPTY); + /* Choose the best filter to use and filter the row data */ PNG_INTERNAL_FUNCTION(void,png_write_find_filter,(png_structrp png_ptr, png_row_infop row_info),PNG_EMPTY); @@ -1915,6 +1940,8 @@ PNG_INTERNAL_FUNCTION(void, PNG_FILTER_OPTIMIZATIONS, (png_structp png_ptr, */ PNG_INTERNAL_FUNCTION(void, png_init_filter_functions_neon, (png_structp png_ptr, unsigned int bpp), PNG_EMPTY); +PNG_INTERNAL_FUNCTION(void, png_init_filter_functions_sse2, + (png_structp png_ptr, unsigned int bpp), PNG_EMPTY); #endif PNG_INTERNAL_FUNCTION(png_uint_32, png_check_keyword, (png_structrp png_ptr, From f84f95788121dcf5946f721ff88d14ee64247a40 Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Mon, 15 Feb 2016 14:45:14 -0500 Subject: [PATCH 2/8] Use PNG_INTEL_SSE_OPT instead of PNG_INTEL_SSE2_OPT --- intel/filter_sse2_intrinsics.c | 10 +++++----- intel/intel_init.c | 4 ++-- pngpriv.h | 8 ++++---- 3 files changed, 11 insertions(+), 11 deletions(-) diff --git a/intel/filter_sse2_intrinsics.c b/intel/filter_sse2_intrinsics.c index 17b385efb..51837a64c 100644 --- a/intel/filter_sse2_intrinsics.c +++ b/intel/filter_sse2_intrinsics.c @@ -12,11 +12,11 @@ #ifdef PNG_READ_SUPPORTED -#if PNG_INTEL_SSE2_OPT > 0 +#if PNG_INTEL_SSE_OPT > 0 -#if PNG_INTEL_SSE2_OPT == 1 +#if PNG_INTEL_SSE_OPT == 1 #include -#elif PNG_INTEL_SSE2_OPT == 2 +#elif PNG_INTEL_SSE_OPT == 2 #include #endif @@ -132,7 +132,7 @@ void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, // Returns |x| for 16-bit lanes. static __m128i abs_i16(__m128i x) { -#if PNG_INTEL_SSE2_OPT >= 2 +#if PNG_INTEL_SSE_OPT >= 2 return _mm_abs_epi16(x); #else // Read this all as, return x<0 ? -x : x. @@ -257,5 +257,5 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, } } -#endif /* PNG_INTEL_SSE2_OPT > 0 */ +#endif /* PNG_INTEL_SSE_OPT > 0 */ #endif /* READ */ diff --git a/intel/intel_init.c b/intel/intel_init.c index 6b1d859c6..7e7af2944 100644 --- a/intel/intel_init.c +++ b/intel/intel_init.c @@ -11,7 +11,7 @@ #include "../pngpriv.h" #ifdef PNG_READ_SUPPORTED -#if PNG_INTEL_SSE2_OPT > 0 +#if PNG_INTEL_SSE_OPT > 0 void png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) @@ -34,5 +34,5 @@ png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) // No need optimize PNG_FILTER_VALUE_UP. The compiler should autovectorize. } -#endif /* PNG_INTEL_SSE2_OPT > 0 */ +#endif /* PNG_INTEL_SSE_OPT > 0 */ #endif /* PNG_READ_SUPPORTED */ diff --git a/pngpriv.h b/pngpriv.h index 9f4d6302d..18ff7f379 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -182,15 +182,15 @@ # endif #endif /* PNG_ARM_NEON_OPT > 0 */ -#ifndef PNG_INTEL_SSE2_OPT +#ifndef PNG_INTEL_SSE_OPT # if defined(__SSE3__) || defined(__SSSE3__) -# define PNG_INTEL_SSE2_OPT 2 +# define PNG_INTEL_SSE_OPT 2 # elif defined(__SSE2__) -# define PNG_INTEL_SSE2_OPT 1 +# define PNG_INTEL_SSE_OPT 1 # endif #endif -#if PNG_INTEL_SSE2_OPT > 0 +#if PNG_INTEL_SSE_OPT > 0 # define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_sse2 #endif From 577c1f03055e3b8c6ca9b7b8883eab1875f0a307 Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Tue, 16 Feb 2016 10:10:11 -0500 Subject: [PATCH 3/8] Reformat a bit, add MSVS checks, add SSE4 --- intel/filter_sse2_intrinsics.c | 176 +++++++++++++++++++-------------- intel/intel_init.c | 7 ++ pngpriv.h | 7 +- 3 files changed, 114 insertions(+), 76 deletions(-) diff --git a/intel/filter_sse2_intrinsics.c b/intel/filter_sse2_intrinsics.c index 51837a64c..01fea3cc1 100644 --- a/intel/filter_sse2_intrinsics.c +++ b/intel/filter_sse2_intrinsics.c @@ -14,10 +14,10 @@ #if PNG_INTEL_SSE_OPT > 0 -#if PNG_INTEL_SSE_OPT == 1 -#include -#elif PNG_INTEL_SSE_OPT == 2 -#include +#if defined(_MSC_VER) && defined(_WIN64) +#include +#else +#include #endif // Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d). @@ -26,8 +26,25 @@ // row: a d // The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be // whichever of a, b, or c is closest to p=a+b-c. -// Up also exists, predicting d=b. But there is not need to optimize Up -// because the compiler will vectorize it for us. + +static __m128i load3(const void* p) { + png_uint_32 packed; + memcpy(&packed, p, 3); + return _mm_cvtsi32_si128(packed); +} + +static __m128i load4(const void* p) { + return _mm_cvtsi32_si128(*(const int*)p); +} + +static void store3(void* p, __m128i v) { + png_uint_32 packed = _mm_cvtsi128_si32(v); + memcpy(p, &packed, 3); +} + +static void store4(void* p, __m128i v) { + *(int*)p = _mm_cvtsi128_si32(v); +} void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row, png_const_bytep prev) @@ -39,13 +56,13 @@ void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row, int rb = row_info->rowbytes; while (rb > 0) { - a = d; memcpy(&d, row, 3); + a = d; d = load3(row); d = _mm_add_epi8(d, a); - memcpy(row, &d, 3); + store3(row, d); row += 3; rb -= 3; - } + } } void png_read_filter_row_sub4_sse2(png_row_infop row_info, png_bytep row, @@ -58,13 +75,13 @@ void png_read_filter_row_sub4_sse2(png_row_infop row_info, png_bytep row, int rb = row_info->rowbytes; while (rb > 0) { - a = d; memcpy(&d, row, 4); + a = d; d = load4(row); d = _mm_add_epi8(d, a); - memcpy(row, &d, 4); + store4(row, d); row += 4; rb -= 4; - } + } } void png_read_filter_row_avg3_sse2(png_row_infop row_info, png_bytep row, @@ -80,24 +97,24 @@ void png_read_filter_row_avg3_sse2(png_row_infop row_info, png_bytep row, int rb = row_info->rowbytes; while (rb > 0) { - memcpy(&b, prev, 3); - a = d; memcpy(&d, row, 3); + b = load3(prev); + a = d; d = load3(row ); - // PNG requires a truncating average here, so sadly we can't just use - // _mm_avg_epu8... - __m128i avg = _mm_avg_epu8(a,b); - // ...but we can fix it up by subtracting off 1 if it rounded up. - avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), - _mm_set1_epi8(1))); + // PNG requires a truncating average, so we can't just use _mm_avg_epu8... + __m128i avg = _mm_avg_epu8(a,b); + // ...but we can fix it up by subtracting off 1 if it rounded up. + avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), + _mm_set1_epi8(1))); - d = _mm_add_epi8(d, avg); - memcpy(row, &d, 3); + d = _mm_add_epi8(d, avg); + store3(row, d); - prev += 3; - row += 3; - rb -= 3; - } + prev += 3; + row += 3; + rb -= 3; + } } + void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, png_const_bytep prev) { @@ -111,23 +128,22 @@ void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, int rb = row_info->rowbytes; while (rb > 0) { - memcpy(&b, prev, 4); - a = d; memcpy(&d, row, 4); + b = load4(prev); + a = d; d = load4(row ); - // PNG requires a truncating average here, so sadly we can't just use - // _mm_avg_epu8... - __m128i avg = _mm_avg_epu8(a,b); - // ...but we can fix it up by subtracting off 1 if it rounded up. - avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), - _mm_set1_epi8(1))); + // PNG requires a truncating average, so we can't just use _mm_avg_epu8... + __m128i avg = _mm_avg_epu8(a,b); + // ...but we can fix it up by subtracting off 1 if it rounded up. + avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), + _mm_set1_epi8(1))); - d = _mm_add_epi8(d, avg); - memcpy(row, &d, 4); + d = _mm_add_epi8(d, avg); + store4(row, d); - prev += 4; - row += 4; - rb -= 4; - } + prev += 4; + row += 4; + rb -= 4; + } } // Returns |x| for 16-bit lanes. @@ -138,8 +154,10 @@ static __m128i abs_i16(__m128i x) { // Read this all as, return x<0 ? -x : x. // To negate two's complement, you flip all the bits then add 1. __m128i is_negative = _mm_cmplt_epi16(x, _mm_setzero_si128()); + // Flip negative lanes. x = _mm_xor_si128(x, is_negative); + // +1 to negative lanes, else +0. x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15)); return x; @@ -148,7 +166,11 @@ static __m128i abs_i16(__m128i x) { // Bytewise c ? t : e. static __m128i if_then_else(__m128i c, __m128i t, __m128i e) { +#if PNG_INTEL_SSE_OPT >= 3 + return _mm_blendv_epi8(e,t,c); +#else return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e)); +#endif } void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, @@ -159,10 +181,13 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, // prev: c b // row: a d // The Paeth function predicts d to be whichever of a, b, or c is nearest to - // p=a+b-c. The first pixel has no left context, and so uses an Up filter, - // p = b. This works naturally with our main loop's p = a+b-c if we force a - // and c to zero. Here we zero b and d, which become c and a respectively - // at the start of the loop. + // p=a+b-c. + + // The first pixel has no left context, and so uses an Up filter, p = b. + // This works naturally with our main loop's p = a+b-c if we force a and c + // to zero. + // Here we zero b and d, which become c and a respectively at the start of + // the loop. const __m128i zero = _mm_setzero_si128(); __m128i c, b = zero, a, d = zero; @@ -171,16 +196,17 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, while (rb > 0) { // It's easiest to do this math (particularly, deal with pc) with 16-bit // intermediates. - memcpy(&b, prev, 3); - memcpy(&d, row, 3); - c = b; b = _mm_unpacklo_epi8(b, zero); - a = d; d = _mm_unpacklo_epi8(d, zero); - __m128i pa = _mm_sub_epi16(b,c), - // (p-a) == (a+b-c - a) == (b-c) - pb = _mm_sub_epi16(a,c), - // (p-b) == (a+b-c - b) == (a-c) - pc = _mm_add_epi16(pa,pb); - // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + c = b; b = _mm_unpacklo_epi8(load3(prev), zero); + a = d; d = _mm_unpacklo_epi8(load3(row ), zero); + + // (p-a) == (a+b-c - a) == (b-c) + __m128i pa = _mm_sub_epi16(b,c); + + // (p-b) == (a+b-c - b) == (a-c) + __m128i pb = _mm_sub_epi16(a,c); + + // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + __m128i pc = _mm_add_epi16(pa,pb); pa = abs_i16(pa); // |p-a| pb = abs_i16(pb); // |p-b| @@ -193,11 +219,10 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, if_then_else(_mm_cmpeq_epi16(smallest, pb), b, c)); - // Note `_epi8`: we need addition to wrap modulo 255. d = _mm_add_epi8(d, nearest); - __m128i r = _mm_packus_epi16(d,d); - memcpy(row, &r, 3); + store3(row, _mm_packus_epi16(d,d)); + prev += 3; row += 3; rb -= 3; @@ -212,10 +237,13 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, // prev: c b // row: a d // The Paeth function predicts d to be whichever of a, b, or c is nearest to - // p=a+b-c. The first pixel has no left context, and so uses an Up filter, - // p = b. This works naturally with our main loop's p = a+b-c if we force a - // and c to zero. Here we zero b and d, which become c and a respectively - // at the start of the loop. + // p=a+b-c. + + // The first pixel has no left context, and so uses an Up filter, p = b. + // This works naturally with our main loop's p = a+b-c if we force a and c + // to zero. + // Here we zero b and d, which become c and a respectively at the start of + // the loop. const __m128i zero = _mm_setzero_si128(); __m128i c, b = zero, a, d = zero; @@ -224,16 +252,17 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, while (rb > 0) { // It's easiest to do this math (particularly, deal with pc) with 16-bit // intermediates. - memcpy(&b, prev, 4); - memcpy(&d, row, 4); - c = b; b = _mm_unpacklo_epi8(b, zero); - a = d; d = _mm_unpacklo_epi8(d, zero); - __m128i pa = _mm_sub_epi16(b,c), - // (p-a) == (a+b-c - a) == (b-c) - pb = _mm_sub_epi16(a,c), - // (p-b) == (a+b-c - b) == (a-c) - pc = _mm_add_epi16(pa,pb); - // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + c = b; b = _mm_unpacklo_epi8(load4(prev), zero); + a = d; d = _mm_unpacklo_epi8(load4(row ), zero); + + // (p-a) == (a+b-c - a) == (b-c) + __m128i pa = _mm_sub_epi16(b,c); + + // (p-b) == (a+b-c - b) == (a-c) + __m128i pb = _mm_sub_epi16(a,c); + + // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) + __m128i pc = _mm_add_epi16(pa,pb); pa = abs_i16(pa); // |p-a| pb = abs_i16(pb); // |p-b| @@ -246,11 +275,10 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, if_then_else(_mm_cmpeq_epi16(smallest, pb), b, c)); - // Note `_epi8`: we need addition to wrap modulo 255. d = _mm_add_epi8(d, nearest); - __m128i r = _mm_packus_epi16(d,d); - memcpy(row, &r, 4); + store4(row, _mm_packus_epi16(d,d)); + prev += 4; row += 4; rb -= 4; diff --git a/intel/intel_init.c b/intel/intel_init.c index 7e7af2944..8616b53f1 100644 --- a/intel/intel_init.c +++ b/intel/intel_init.c @@ -16,6 +16,13 @@ void png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) { + // The techniques used to implement each of these filters in SSE operate on + // one pixel at a time. + // So they generally speed up 3bpp images about 3x, 4bpp images about 4x. + // They can scale up to 6 and 8 bpp images and down to 2 bpp images, + // but they'd not likely have any benefit for 1bpp images. + // Most of these can be implemented using only MMX and 64-bit registers, + // but they end up a bit slower than using the equally-ubiquitous SSE2. if (bpp == 3) { pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_sse2; diff --git a/pngpriv.h b/pngpriv.h index 18ff7f379..cf7b29974 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -183,9 +183,12 @@ #endif /* PNG_ARM_NEON_OPT > 0 */ #ifndef PNG_INTEL_SSE_OPT -# if defined(__SSE3__) || defined(__SSSE3__) +# if defined(__SSE4_1__) +# define PNG_INTEL_SSE_OPT 3 +# elif defined(__SSE3__) || defined(__SSSE3__) # define PNG_INTEL_SSE_OPT 2 -# elif defined(__SSE2__) +# elif defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ + (defined(_M_IX86_FP) && _M_IX86_FP >= 2) # define PNG_INTEL_SSE_OPT 1 # endif #endif From 5bc58a0ebccc3cea72ddb4c6cbde6fd1925bc7ca Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Tue, 16 Feb 2016 10:53:36 -0500 Subject: [PATCH 4/8] Use immintrin, tweak checks for SSE4, SSSE3 --- intel/filter_sse2_intrinsics.c | 6 +----- pngpriv.h | 7 +++++-- 2 files changed, 6 insertions(+), 7 deletions(-) diff --git a/intel/filter_sse2_intrinsics.c b/intel/filter_sse2_intrinsics.c index 01fea3cc1..9e7484923 100644 --- a/intel/filter_sse2_intrinsics.c +++ b/intel/filter_sse2_intrinsics.c @@ -14,11 +14,7 @@ #if PNG_INTEL_SSE_OPT > 0 -#if defined(_MSC_VER) && defined(_WIN64) -#include -#else -#include -#endif +#include // Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d). // They're positioned like this: diff --git a/pngpriv.h b/pngpriv.h index cf7b29974..f5c02de42 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -183,9 +183,12 @@ #endif /* PNG_ARM_NEON_OPT > 0 */ #ifndef PNG_INTEL_SSE_OPT -# if defined(__SSE4_1__) +# if defined(__SSE4_1__) || defined(__AVX__) + /* We are not actually using AVX, but checking for AVX is the best + way we can detect SSE4.1 and SSSE3 on MSVC. + */ # define PNG_INTEL_SSE_OPT 3 -# elif defined(__SSE3__) || defined(__SSSE3__) +# elif defined(__SSSE3__) # define PNG_INTEL_SSE_OPT 2 # elif defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ (defined(_M_IX86_FP) && _M_IX86_FP >= 2) From 9a308a3344e395e79858716a65296cfb47a81ca1 Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Wed, 17 Feb 2016 11:43:34 -0500 Subject: [PATCH 5/8] Add intel opts to Makefile and configure.ac --- Makefile.am | 5 +++++ configure.ac | 35 ++++++++++++++++++++++++++++++++++ intel/filter_sse2_intrinsics.c | 8 ++++---- intel/intel_init.c | 4 ++-- pngpriv.h | 30 ++++++++++++++++++++--------- 5 files changed, 67 insertions(+), 15 deletions(-) diff --git a/Makefile.am b/Makefile.am index 73097dbcc..646bb236a 100644 --- a/Makefile.am +++ b/Makefile.am @@ -83,6 +83,11 @@ libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c\ arm/filter_neon.S arm/filter_neon_intrinsics.c endif +if PNG_INTEL_SSE +libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += intel/intel_init.c\ + intel/filter_sse2_intrinsics.c +endif + nodist_libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = pnglibconf.h libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_LDFLAGS = -no-undefined -export-dynamic \ diff --git a/configure.ac b/configure.ac index 10538fcc4..66f51261d 100644 --- a/configure.ac +++ b/configure.ac @@ -314,6 +314,41 @@ AM_CONDITIONAL([PNG_ARM_NEON], *) test "$enable_arm_neon" != '';; esac]) +# INTEL +# === +# +# INTEL SSE (SIMD) support. + +AC_ARG_ENABLE([intel-sse], + AS_HELP_STRING([[[--enable-intel-sse]]], + [Enable Intel SSE optimizations: =no/off, yes/on:] + [no/off: disable the optimizations;] + [yes/on: enable the optimizations.] + [If not specified: determined by the compiler.]), + [case "$enableval" in + no|off) + # disable the default enabling: + AC_DEFINE([PNG_INTEL_SSE_OPT], [0], + [Disable Intel SSE optimizations]) + # Prevent inclusion of the assembler files below: + enable_intel_sse=no;; + yes|on) + AC_DEFINE([PNG_INTEL_SSE_OPT], [1], + [Enable Intel SSE optimizations]);; + *) + AC_MSG_ERROR([--enable-intel-sse=${enable_intel_sse}: invalid value]) + esac]) + +# Add Intel specific files to all builds where the host_cpu is Intel ('x86*') +# or where Intel optimizations were explicitly requested (this allows a +# fallback if a future host CPU does not match 'x86*') +AM_CONDITIONAL([PNG_INTEL_SSE], + [test "$enable_intel_sse" != 'no' && + case "$host_cpu" in + x86*) :;; + *) test "$enable_intel_sse" != '';; + esac]) + AC_MSG_NOTICE([[Extra options for compiler: $PNG_COPTS]]) # Config files, substituting as above diff --git a/intel/filter_sse2_intrinsics.c b/intel/filter_sse2_intrinsics.c index 9e7484923..3bfdf5dd5 100644 --- a/intel/filter_sse2_intrinsics.c +++ b/intel/filter_sse2_intrinsics.c @@ -12,7 +12,7 @@ #ifdef PNG_READ_SUPPORTED -#if PNG_INTEL_SSE_OPT > 0 +#if PNG_INTEL_SSE_IMPLEMENTATION > 0 #include @@ -144,7 +144,7 @@ void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, // Returns |x| for 16-bit lanes. static __m128i abs_i16(__m128i x) { -#if PNG_INTEL_SSE_OPT >= 2 +#if PNG_INTEL_SSE_IMPLEMENTATION >= 2 return _mm_abs_epi16(x); #else // Read this all as, return x<0 ? -x : x. @@ -162,7 +162,7 @@ static __m128i abs_i16(__m128i x) { // Bytewise c ? t : e. static __m128i if_then_else(__m128i c, __m128i t, __m128i e) { -#if PNG_INTEL_SSE_OPT >= 3 +#if PNG_INTEL_SSE_IMPLEMENTATION >= 3 return _mm_blendv_epi8(e,t,c); #else return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e)); @@ -281,5 +281,5 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row, } } -#endif /* PNG_INTEL_SSE_OPT > 0 */ +#endif /* PNG_INTEL_SSE_IMPLEMENTATION > 0 */ #endif /* READ */ diff --git a/intel/intel_init.c b/intel/intel_init.c index 8616b53f1..394984e06 100644 --- a/intel/intel_init.c +++ b/intel/intel_init.c @@ -11,7 +11,7 @@ #include "../pngpriv.h" #ifdef PNG_READ_SUPPORTED -#if PNG_INTEL_SSE_OPT > 0 +#if PNG_INTEL_SSE_IMPLEMENTATION > 0 void png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) @@ -41,5 +41,5 @@ png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) // No need optimize PNG_FILTER_VALUE_UP. The compiler should autovectorize. } -#endif /* PNG_INTEL_SSE_OPT > 0 */ +#endif /* PNG_INTEL_SSE_IMPLEMENTATION > 0 */ #endif /* PNG_READ_SUPPORTED */ diff --git a/pngpriv.h b/pngpriv.h index f5c02de42..a402704f8 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -183,21 +183,33 @@ #endif /* PNG_ARM_NEON_OPT > 0 */ #ifndef PNG_INTEL_SSE_OPT -# if defined(__SSE4_1__) || defined(__AVX__) - /* We are not actually using AVX, but checking for AVX is the best - way we can detect SSE4.1 and SSSE3 on MSVC. - */ -# define PNG_INTEL_SSE_OPT 3 -# elif defined(__SSSE3__) -# define PNG_INTEL_SSE_OPT 2 -# elif defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ +# if defined(__SSE4_1__) || defined(__AVX__) || defined(__SSSE3__) || \ + defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ (defined(_M_IX86_FP) && _M_IX86_FP >= 2) # define PNG_INTEL_SSE_OPT 1 # endif #endif #if PNG_INTEL_SSE_OPT > 0 -# define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_sse2 +# ifndef PNG_INTEL_SSE_IMPLEMENTATION +# if defined(__SSE4_1__) || defined(__AVX__) + /* We are not actually using AVX, but checking for AVX is the best + way we can detect SSE4.1 and SSSE3 on MSVC. + */ +# define PNG_INTEL_SSE_IMPLEMENTATION 3 +# elif defined(__SSSE3__) +# define PNG_INTEL_SSE_IMPLEMENTATION 2 +# elif defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ + (defined(_M_IX86_FP) && _M_IX86_FP >= 2) +# define PNG_INTEL_SSE_IMPLEMENTATION 1 +# else +# define PNG_INTEL_SSE_IMPLEMENTATION 0 +# endif +# endif + +# if PNG_INTEL_SSE_IMPLEMENTATION > 0 +# define PNG_FILTER_OPTIMIZATIONS png_init_filter_functions_sse2 +# endif #endif /* Is this a build of a DLL where compilation of the object modules requires From fb375429b5f8e2f1e53eecf3155d5172c59b02ef Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Wed, 17 Feb 2016 11:52:09 -0500 Subject: [PATCH 6/8] Fix host_cpu check --- configure.ac | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/configure.ac b/configure.ac index 66f51261d..c768e9ba9 100644 --- a/configure.ac +++ b/configure.ac @@ -345,7 +345,7 @@ AC_ARG_ENABLE([intel-sse], AM_CONDITIONAL([PNG_INTEL_SSE], [test "$enable_intel_sse" != 'no' && case "$host_cpu" in - x86*) :;; + i?86|x86_64) :;; *) test "$enable_intel_sse" != '';; esac]) From 342c4eab2a0565de456f1f3efcc41b635544160e Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Thu, 18 Feb 2016 12:43:50 -0500 Subject: [PATCH 7/8] Move sse opts into contrib/intel --- Makefile.am | 5 -- configure.ac | 35 -------------- contrib/intel/Makefile.am.patch | 17 +++++++ contrib/intel/configure.ac.patch | 46 +++++++++++++++++++ .../intel}/filter_sse2_intrinsics.c | 0 {intel => contrib/intel}/intel_init.c | 0 pngpriv.h | 14 ++++-- 7 files changed, 73 insertions(+), 44 deletions(-) create mode 100644 contrib/intel/Makefile.am.patch create mode 100644 contrib/intel/configure.ac.patch rename {intel => contrib/intel}/filter_sse2_intrinsics.c (100%) rename {intel => contrib/intel}/intel_init.c (100%) diff --git a/Makefile.am b/Makefile.am index 646bb236a..73097dbcc 100644 --- a/Makefile.am +++ b/Makefile.am @@ -83,11 +83,6 @@ libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += arm/arm_init.c\ arm/filter_neon.S arm/filter_neon_intrinsics.c endif -if PNG_INTEL_SSE -libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += intel/intel_init.c\ - intel/filter_sse2_intrinsics.c -endif - nodist_libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES = pnglibconf.h libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_LDFLAGS = -no-undefined -export-dynamic \ diff --git a/configure.ac b/configure.ac index c768e9ba9..10538fcc4 100644 --- a/configure.ac +++ b/configure.ac @@ -314,41 +314,6 @@ AM_CONDITIONAL([PNG_ARM_NEON], *) test "$enable_arm_neon" != '';; esac]) -# INTEL -# === -# -# INTEL SSE (SIMD) support. - -AC_ARG_ENABLE([intel-sse], - AS_HELP_STRING([[[--enable-intel-sse]]], - [Enable Intel SSE optimizations: =no/off, yes/on:] - [no/off: disable the optimizations;] - [yes/on: enable the optimizations.] - [If not specified: determined by the compiler.]), - [case "$enableval" in - no|off) - # disable the default enabling: - AC_DEFINE([PNG_INTEL_SSE_OPT], [0], - [Disable Intel SSE optimizations]) - # Prevent inclusion of the assembler files below: - enable_intel_sse=no;; - yes|on) - AC_DEFINE([PNG_INTEL_SSE_OPT], [1], - [Enable Intel SSE optimizations]);; - *) - AC_MSG_ERROR([--enable-intel-sse=${enable_intel_sse}: invalid value]) - esac]) - -# Add Intel specific files to all builds where the host_cpu is Intel ('x86*') -# or where Intel optimizations were explicitly requested (this allows a -# fallback if a future host CPU does not match 'x86*') -AM_CONDITIONAL([PNG_INTEL_SSE], - [test "$enable_intel_sse" != 'no' && - case "$host_cpu" in - i?86|x86_64) :;; - *) test "$enable_intel_sse" != '';; - esac]) - AC_MSG_NOTICE([[Extra options for compiler: $PNG_COPTS]]) # Config files, substituting as above diff --git a/contrib/intel/Makefile.am.patch b/contrib/intel/Makefile.am.patch new file mode 100644 index 000000000..2f119cd6a --- /dev/null +++ b/contrib/intel/Makefile.am.patch @@ -0,0 +1,17 @@ + +# +# Copyright (c) 2016 Google, Inc. +# +# This code is released under the libpng license. +# For conditions of distribution and use, see the disclaimer +# and license in png.h +# + +# In order to compile Intel SSE optimizations for libpng, please add +# the following code to Makefile.am directly beneath the +# "if PNG_ARM_NEON ... endif" statement. + +if PNG_INTEL_SSE +libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += intel/intel_init.c\ + intel/filter_sse2_intrinsics.c +endif diff --git a/contrib/intel/configure.ac.patch b/contrib/intel/configure.ac.patch new file mode 100644 index 000000000..fec654953 --- /dev/null +++ b/contrib/intel/configure.ac.patch @@ -0,0 +1,46 @@ +# +# Copyright (c) 2016 Google, Inc. +# +# This code is released under the libpng license. +# For conditions of distribution and use, see the disclaimer +# and license in png.h +# + +# In order to compile Intel SSE optimizations for libpng, please add +# the following code to configure.ac under HOST SPECIFIC OPTIONS +# directly beneath the section for ARM. + +# INTEL +# === +# +# INTEL SSE (SIMD) support. + +AC_ARG_ENABLE([intel-sse], + AS_HELP_STRING([[[--enable-intel-sse]]], + [Enable Intel SSE optimizations: =no/off, yes/on:] + [no/off: disable the optimizations;] + [yes/on: enable the optimizations.] + [If not specified: determined by the compiler.]), + [case "$enableval" in + no|off) + # disable the default enabling: + AC_DEFINE([PNG_INTEL_SSE_OPT], [0], + [Disable Intel SSE optimizations]) + # Prevent inclusion of the assembler files below: + enable_intel_sse=no;; + yes|on) + AC_DEFINE([PNG_INTEL_SSE_OPT], [1], + [Enable Intel SSE optimizations]);; + *) + AC_MSG_ERROR([--enable-intel-sse=${enable_intel_sse}: invalid value]) + esac]) + +# Add Intel specific files to all builds where the host_cpu is Intel ('x86*') +# or where Intel optimizations were explicitly requested (this allows a +# fallback if a future host CPU does not match 'x86*') +AM_CONDITIONAL([PNG_INTEL_SSE], + [test "$enable_intel_sse" != 'no' && + case "$host_cpu" in + i?86|x86_64) :;; + *) test "$enable_intel_sse" != '';; + esac]) diff --git a/intel/filter_sse2_intrinsics.c b/contrib/intel/filter_sse2_intrinsics.c similarity index 100% rename from intel/filter_sse2_intrinsics.c rename to contrib/intel/filter_sse2_intrinsics.c diff --git a/intel/intel_init.c b/contrib/intel/intel_init.c similarity index 100% rename from intel/intel_init.c rename to contrib/intel/intel_init.c diff --git a/pngpriv.h b/pngpriv.h index a402704f8..0ac16fadd 100644 --- a/pngpriv.h +++ b/pngpriv.h @@ -183,10 +183,16 @@ #endif /* PNG_ARM_NEON_OPT > 0 */ #ifndef PNG_INTEL_SSE_OPT -# if defined(__SSE4_1__) || defined(__AVX__) || defined(__SSSE3__) || \ - defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ - (defined(_M_IX86_FP) && _M_IX86_FP >= 2) -# define PNG_INTEL_SSE_OPT 1 +# ifdef PNG_INTEL_SSE + /* Only check for SSE if the build configuration has been modified to + * enable SSE optimizations. This means that these optimizations will + * be off by default. See contrib/intel for more details. + */ +# if defined(__SSE4_1__) || defined(__AVX__) || defined(__SSSE3__) || \ + defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \ + (defined(_M_IX86_FP) && _M_IX86_FP >= 2) +# define PNG_INTEL_SSE_OPT 1 +# endif # endif #endif From c3a45944e2a5c2fdc7d651876bb65c982c67bd34 Mon Sep 17 00:00:00 2001 From: Matt Sarett Date: Thu, 18 Feb 2016 12:45:13 -0500 Subject: [PATCH 8/8] Fix paths in Makefile.am.patch --- contrib/intel/Makefile.am.patch | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/contrib/intel/Makefile.am.patch b/contrib/intel/Makefile.am.patch index 2f119cd6a..3921f274d 100644 --- a/contrib/intel/Makefile.am.patch +++ b/contrib/intel/Makefile.am.patch @@ -12,6 +12,6 @@ # "if PNG_ARM_NEON ... endif" statement. if PNG_INTEL_SSE -libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += intel/intel_init.c\ - intel/filter_sse2_intrinsics.c +libpng@PNGLIB_MAJOR@@PNGLIB_MINOR@_la_SOURCES += contrib/intel/intel_init.c\ + contrib/intel/filter_sse2_intrinsics.c endif