Reformat a bit, add MSVS checks, add SSE4

This commit is contained in:
Matt Sarett 2016-02-16 10:10:11 -05:00
parent f84f957881
commit 577c1f0305
3 changed files with 114 additions and 76 deletions

View File

@ -14,10 +14,10 @@
#if PNG_INTEL_SSE_OPT > 0 #if PNG_INTEL_SSE_OPT > 0
#if PNG_INTEL_SSE_OPT == 1 #if defined(_MSC_VER) && defined(_WIN64)
#include <emmintrin.h> #include <intrin.h>
#elif PNG_INTEL_SSE_OPT == 2 #else
#include <tmmintrin.h> #include <x86intrin.h>
#endif #endif
// Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d). // Functions in this file look at most 3 pixels (a,b,c) to predict the 4th (d).
@ -26,8 +26,25 @@
// row: a d // row: a d
// The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be // The Sub filter predicts d=a, Avg d=(a+b)/2, and Paeth predicts d to be
// whichever of a, b, or c is closest to p=a+b-c. // whichever of a, b, or c is closest to p=a+b-c.
// Up also exists, predicting d=b. But there is not need to optimize Up
// because the compiler will vectorize it for us. static __m128i load3(const void* p) {
png_uint_32 packed;
memcpy(&packed, p, 3);
return _mm_cvtsi32_si128(packed);
}
static __m128i load4(const void* p) {
return _mm_cvtsi32_si128(*(const int*)p);
}
static void store3(void* p, __m128i v) {
png_uint_32 packed = _mm_cvtsi128_si32(v);
memcpy(p, &packed, 3);
}
static void store4(void* p, __m128i v) {
*(int*)p = _mm_cvtsi128_si32(v);
}
void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row, void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row,
png_const_bytep prev) png_const_bytep prev)
@ -39,9 +56,9 @@ void png_read_filter_row_sub3_sse2(png_row_infop row_info, png_bytep row,
int rb = row_info->rowbytes; int rb = row_info->rowbytes;
while (rb > 0) { while (rb > 0) {
a = d; memcpy(&d, row, 3); a = d; d = load3(row);
d = _mm_add_epi8(d, a); d = _mm_add_epi8(d, a);
memcpy(row, &d, 3); store3(row, d);
row += 3; row += 3;
rb -= 3; rb -= 3;
@ -58,9 +75,9 @@ void png_read_filter_row_sub4_sse2(png_row_infop row_info, png_bytep row,
int rb = row_info->rowbytes; int rb = row_info->rowbytes;
while (rb > 0) { while (rb > 0) {
a = d; memcpy(&d, row, 4); a = d; d = load4(row);
d = _mm_add_epi8(d, a); d = _mm_add_epi8(d, a);
memcpy(row, &d, 4); store4(row, d);
row += 4; row += 4;
rb -= 4; rb -= 4;
@ -80,24 +97,24 @@ void png_read_filter_row_avg3_sse2(png_row_infop row_info, png_bytep row,
int rb = row_info->rowbytes; int rb = row_info->rowbytes;
while (rb > 0) { while (rb > 0) {
memcpy(&b, prev, 3); b = load3(prev);
a = d; memcpy(&d, row, 3); a = d; d = load3(row );
// PNG requires a truncating average here, so sadly we can't just use // PNG requires a truncating average, so we can't just use _mm_avg_epu8...
// _mm_avg_epu8...
__m128i avg = _mm_avg_epu8(a,b); __m128i avg = _mm_avg_epu8(a,b);
// ...but we can fix it up by subtracting off 1 if it rounded up. // ...but we can fix it up by subtracting off 1 if it rounded up.
avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b),
_mm_set1_epi8(1))); _mm_set1_epi8(1)));
d = _mm_add_epi8(d, avg); d = _mm_add_epi8(d, avg);
memcpy(row, &d, 3); store3(row, d);
prev += 3; prev += 3;
row += 3; row += 3;
rb -= 3; rb -= 3;
} }
} }
void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row, void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row,
png_const_bytep prev) png_const_bytep prev)
{ {
@ -111,18 +128,17 @@ void png_read_filter_row_avg4_sse2(png_row_infop row_info, png_bytep row,
int rb = row_info->rowbytes; int rb = row_info->rowbytes;
while (rb > 0) { while (rb > 0) {
memcpy(&b, prev, 4); b = load4(prev);
a = d; memcpy(&d, row, 4); a = d; d = load4(row );
// PNG requires a truncating average here, so sadly we can't just use // PNG requires a truncating average, so we can't just use _mm_avg_epu8...
// _mm_avg_epu8...
__m128i avg = _mm_avg_epu8(a,b); __m128i avg = _mm_avg_epu8(a,b);
// ...but we can fix it up by subtracting off 1 if it rounded up. // ...but we can fix it up by subtracting off 1 if it rounded up.
avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b), avg = _mm_sub_epi8(avg, _mm_and_si128(_mm_xor_si128(a,b),
_mm_set1_epi8(1))); _mm_set1_epi8(1)));
d = _mm_add_epi8(d, avg); d = _mm_add_epi8(d, avg);
memcpy(row, &d, 4); store4(row, d);
prev += 4; prev += 4;
row += 4; row += 4;
@ -138,8 +154,10 @@ static __m128i abs_i16(__m128i x) {
// Read this all as, return x<0 ? -x : x. // Read this all as, return x<0 ? -x : x.
// To negate two's complement, you flip all the bits then add 1. // To negate two's complement, you flip all the bits then add 1.
__m128i is_negative = _mm_cmplt_epi16(x, _mm_setzero_si128()); __m128i is_negative = _mm_cmplt_epi16(x, _mm_setzero_si128());
// Flip negative lanes. // Flip negative lanes.
x = _mm_xor_si128(x, is_negative); x = _mm_xor_si128(x, is_negative);
// +1 to negative lanes, else +0. // +1 to negative lanes, else +0.
x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15)); x = _mm_add_epi16(x, _mm_srli_epi16(is_negative, 15));
return x; return x;
@ -148,7 +166,11 @@ static __m128i abs_i16(__m128i x) {
// Bytewise c ? t : e. // Bytewise c ? t : e.
static __m128i if_then_else(__m128i c, __m128i t, __m128i e) { static __m128i if_then_else(__m128i c, __m128i t, __m128i e) {
#if PNG_INTEL_SSE_OPT >= 3
return _mm_blendv_epi8(e,t,c);
#else
return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e)); return _mm_or_si128(_mm_and_si128(c, t), _mm_andnot_si128(c, e));
#endif
} }
void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row, void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row,
@ -159,10 +181,13 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row,
// prev: c b // prev: c b
// row: a d // row: a d
// The Paeth function predicts d to be whichever of a, b, or c is nearest to // The Paeth function predicts d to be whichever of a, b, or c is nearest to
// p=a+b-c. The first pixel has no left context, and so uses an Up filter, // p=a+b-c.
// p = b. This works naturally with our main loop's p = a+b-c if we force a
// and c to zero. Here we zero b and d, which become c and a respectively // The first pixel has no left context, and so uses an Up filter, p = b.
// at the start of the loop. // This works naturally with our main loop's p = a+b-c if we force a and c
// to zero.
// Here we zero b and d, which become c and a respectively at the start of
// the loop.
const __m128i zero = _mm_setzero_si128(); const __m128i zero = _mm_setzero_si128();
__m128i c, b = zero, __m128i c, b = zero,
a, d = zero; a, d = zero;
@ -171,16 +196,17 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row,
while (rb > 0) { while (rb > 0) {
// It's easiest to do this math (particularly, deal with pc) with 16-bit // It's easiest to do this math (particularly, deal with pc) with 16-bit
// intermediates. // intermediates.
memcpy(&b, prev, 3); c = b; b = _mm_unpacklo_epi8(load3(prev), zero);
memcpy(&d, row, 3); a = d; d = _mm_unpacklo_epi8(load3(row ), zero);
c = b; b = _mm_unpacklo_epi8(b, zero);
a = d; d = _mm_unpacklo_epi8(d, zero);
__m128i pa = _mm_sub_epi16(b,c),
// (p-a) == (a+b-c - a) == (b-c) // (p-a) == (a+b-c - a) == (b-c)
pb = _mm_sub_epi16(a,c), __m128i pa = _mm_sub_epi16(b,c);
// (p-b) == (a+b-c - b) == (a-c) // (p-b) == (a+b-c - b) == (a-c)
pc = _mm_add_epi16(pa,pb); __m128i pb = _mm_sub_epi16(a,c);
// (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c)
__m128i pc = _mm_add_epi16(pa,pb);
pa = abs_i16(pa); // |p-a| pa = abs_i16(pa); // |p-a|
pb = abs_i16(pb); // |p-b| pb = abs_i16(pb); // |p-b|
@ -193,11 +219,10 @@ void png_read_filter_row_paeth3_sse2(png_row_infop row_info, png_bytep row,
if_then_else(_mm_cmpeq_epi16(smallest, pb), b, if_then_else(_mm_cmpeq_epi16(smallest, pb), b,
c)); c));
// Note `_epi8`: we need addition to wrap modulo 255. // Note `_epi8`: we need addition to wrap modulo 255.
d = _mm_add_epi8(d, nearest); d = _mm_add_epi8(d, nearest);
__m128i r = _mm_packus_epi16(d,d); store3(row, _mm_packus_epi16(d,d));
memcpy(row, &r, 3);
prev += 3; prev += 3;
row += 3; row += 3;
rb -= 3; rb -= 3;
@ -212,10 +237,13 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row,
// prev: c b // prev: c b
// row: a d // row: a d
// The Paeth function predicts d to be whichever of a, b, or c is nearest to // The Paeth function predicts d to be whichever of a, b, or c is nearest to
// p=a+b-c. The first pixel has no left context, and so uses an Up filter, // p=a+b-c.
// p = b. This works naturally with our main loop's p = a+b-c if we force a
// and c to zero. Here we zero b and d, which become c and a respectively // The first pixel has no left context, and so uses an Up filter, p = b.
// at the start of the loop. // This works naturally with our main loop's p = a+b-c if we force a and c
// to zero.
// Here we zero b and d, which become c and a respectively at the start of
// the loop.
const __m128i zero = _mm_setzero_si128(); const __m128i zero = _mm_setzero_si128();
__m128i c, b = zero, __m128i c, b = zero,
a, d = zero; a, d = zero;
@ -224,16 +252,17 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row,
while (rb > 0) { while (rb > 0) {
// It's easiest to do this math (particularly, deal with pc) with 16-bit // It's easiest to do this math (particularly, deal with pc) with 16-bit
// intermediates. // intermediates.
memcpy(&b, prev, 4); c = b; b = _mm_unpacklo_epi8(load4(prev), zero);
memcpy(&d, row, 4); a = d; d = _mm_unpacklo_epi8(load4(row ), zero);
c = b; b = _mm_unpacklo_epi8(b, zero);
a = d; d = _mm_unpacklo_epi8(d, zero);
__m128i pa = _mm_sub_epi16(b,c),
// (p-a) == (a+b-c - a) == (b-c) // (p-a) == (a+b-c - a) == (b-c)
pb = _mm_sub_epi16(a,c), __m128i pa = _mm_sub_epi16(b,c);
// (p-b) == (a+b-c - b) == (a-c) // (p-b) == (a+b-c - b) == (a-c)
pc = _mm_add_epi16(pa,pb); __m128i pb = _mm_sub_epi16(a,c);
// (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c) // (p-c) == (a+b-c - c) == (a+b-c-c) == (b-c)+(a-c)
__m128i pc = _mm_add_epi16(pa,pb);
pa = abs_i16(pa); // |p-a| pa = abs_i16(pa); // |p-a|
pb = abs_i16(pb); // |p-b| pb = abs_i16(pb); // |p-b|
@ -246,11 +275,10 @@ void png_read_filter_row_paeth4_sse2(png_row_infop row_info, png_bytep row,
if_then_else(_mm_cmpeq_epi16(smallest, pb), b, if_then_else(_mm_cmpeq_epi16(smallest, pb), b,
c)); c));
// Note `_epi8`: we need addition to wrap modulo 255. // Note `_epi8`: we need addition to wrap modulo 255.
d = _mm_add_epi8(d, nearest); d = _mm_add_epi8(d, nearest);
__m128i r = _mm_packus_epi16(d,d); store4(row, _mm_packus_epi16(d,d));
memcpy(row, &r, 4);
prev += 4; prev += 4;
row += 4; row += 4;
rb -= 4; rb -= 4;

View File

@ -16,6 +16,13 @@
void void
png_init_filter_functions_sse2(png_structp pp, unsigned int bpp) png_init_filter_functions_sse2(png_structp pp, unsigned int bpp)
{ {
// The techniques used to implement each of these filters in SSE operate on
// one pixel at a time.
// So they generally speed up 3bpp images about 3x, 4bpp images about 4x.
// They can scale up to 6 and 8 bpp images and down to 2 bpp images,
// but they'd not likely have any benefit for 1bpp images.
// Most of these can be implemented using only MMX and 64-bit registers,
// but they end up a bit slower than using the equally-ubiquitous SSE2.
if (bpp == 3) if (bpp == 3)
{ {
pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_sse2; pp->read_filter[PNG_FILTER_VALUE_SUB-1] = png_read_filter_row_sub3_sse2;

View File

@ -183,9 +183,12 @@
#endif /* PNG_ARM_NEON_OPT > 0 */ #endif /* PNG_ARM_NEON_OPT > 0 */
#ifndef PNG_INTEL_SSE_OPT #ifndef PNG_INTEL_SSE_OPT
# if defined(__SSE3__) || defined(__SSSE3__) # if defined(__SSE4_1__)
# define PNG_INTEL_SSE_OPT 3
# elif defined(__SSE3__) || defined(__SSSE3__)
# define PNG_INTEL_SSE_OPT 2 # define PNG_INTEL_SSE_OPT 2
# elif defined(__SSE2__) # elif defined(__SSE2__) || defined(_M_X64) || defined(_M_AMD64) || \
(defined(_M_IX86_FP) && _M_IX86_FP >= 2)
# define PNG_INTEL_SSE_OPT 1 # define PNG_INTEL_SSE_OPT 1
# endif # endif
#endif #endif