add SSE optimized routine for linear interpolated slow motion

This commit is contained in:
veejay
2023-10-08 02:45:18 +02:00
parent ed02a50146
commit 69cfb8c3db

View File

@@ -154,6 +154,9 @@
#include <immintrin.h> #include <immintrin.h>
#endif #endif
#ifdef HAVE_ASM_SSE2
#include <smmintrin.h>
#endif
#define BUFSIZE 1024 #define BUFSIZE 1024
@@ -2531,6 +2534,25 @@ static inline void vj_frame_slow1( uint8_t *dst, uint8_t *a, uint8_t *b, const i
for( i = 0; i < len; i ++ ) { for( i = 0; i < len; i ++ ) {
dst[i] = a[i] + ( frac * ( b[i] - a[i] ) ); dst[i] = a[i] + ( frac * ( b[i] - a[i] ) );
} }
#else
#ifdef HAVE_ASM_SSE2
int i;
__m128i frac128 = _mm_set1_epi32((int)(frac * 65536.0f));
for (i = 0; i <= len - 16; i += 16) {
__m128i a_vec = _mm_loadu_si128((__m128i*)(a + i));
__m128i b_vec = _mm_loadu_si128((__m128i*)(b + i));
__m128i diff = _mm_sub_epi16(b_vec, a_vec);
__m128i scaled_diff = _mm_mulhi_epi16(diff, frac128);
__m128i result = _mm_add_epi16(a_vec, scaled_diff);
_mm_storeu_si128((__m128i*)(dst + i), result);
}
for (; i < len; ++i) {
dst[i] = a[i] + (uint8_t)((frac * (b[i] - a[i])) + 0.5f);
}
#else #else
uint32_t ialpha = (256 * frac); uint32_t ialpha = (256 * frac);
unsigned int i; unsigned int i;
@@ -2563,6 +2585,7 @@ static inline void vj_frame_slow1( uint8_t *dst, uint8_t *a, uint8_t *b, const i
, [alpha] "m" (ialpha)); , [alpha] "m" (ialpha));
} }
#endif #endif
#endif
} }
static void vj_frame_slow_job( void *arg ) static void vj_frame_slow_job( void *arg )