changeset 9557:f75011092766

asm: avx2 code for pixel_add_ps[16x16] - 55x add_ps[16x16] 55.10x 786.02 43312.65
author Sumalatha Polureddy<sumalatha@multicorewareinc.com>
date Fri, 20 Feb 2015 13:39:51 +0530
parents 800470abb9f7
children d77824ea76c9
files source/common/x86/asm-primitives.cpp source/common/x86/pixel.h source/common/x86/pixeladd8.asm
diffstat 3 files changed, 50 insertions(+-), 0 deletions(-) [+]
line wrap: on
line diff
--- a/source/common/x86/asm-primitives.cpp	Fri Feb 20 12:39:44 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Fri Feb 20 13:39:51 2015 +0530
@@ -1499,6 +1499,8 @@ void setupAssemblyPrimitives(EncoderPrim
     }
     if (cpuMask & X265_CPU_AVX2)
     {
+        p.cu[BLOCK_16x16].add_ps = x265_pixel_add_ps_16x16_avx2;
+
         p.pu[LUMA_16x4].pixelavg_pp = x265_pixel_avg_16x4_avx2;
         p.pu[LUMA_16x8].pixelavg_pp = x265_pixel_avg_16x8_avx2;
         p.pu[LUMA_16x12].pixelavg_pp = x265_pixel_avg_16x12_avx2;
--- a/source/common/x86/pixel.h	Fri Feb 20 12:39:44 2015 +0530
+++ b/source/common/x86/pixel.h	Fri Feb 20 13:39:51 2015 +0530
@@ -251,6 +251,8 @@ void x265_pixel_avg_64x48_avx2(pixel* ds
 void x265_pixel_avg_64x32_avx2(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int);
 void x265_pixel_avg_64x16_avx2(pixel* dst, intptr_t dstride, const pixel* src0, intptr_t sstride0, const pixel* src1, intptr_t sstride1, int);
 
+void x265_pixel_add_ps_16x16_avx2(pixel* a, intptr_t dstride, const pixel* b0, const int16_t* b1, intptr_t sstride0, intptr_t sstride1);
+
 #undef DECL_PIXELS
 #undef DECL_HEVC_SSD
 #undef DECL_X1
--- a/source/common/x86/pixeladd8.asm	Fri Feb 20 12:39:44 2015 +0530
+++ b/source/common/x86/pixeladd8.asm	Fri Feb 20 13:39:51 2015 +0530
@@ -398,6 +398,52 @@ cglobal pixel_add_ps_16x%2, 6, 7, 8, des
 
     jnz         .loop
     RET
+
+INIT_YMM avx2
+cglobal pixel_add_ps_16x%2, 6, 7, 8, dest, destride, src0, scr1, srcStride0, srcStride1
+    mov         r6d,        %2/4
+    add         r5,         r5
+.loop:
+
+    pmovzxbw    m0,         [r2]        ; row 0 of src0
+    pmovzxbw    m1,         [r2 + r4]   ; row 1 of src0
+    movu        m2,        [r3]        ; row 0 of src1
+    movu        m3,        [r3 + r5]   ; row 1 of src1
+    paddw       m0,         m2
+    paddw       m1,         m3
+    packuswb    m0,         m1
+
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    pmovzxbw    m2,         [r2]        ; row 2 of src0
+    pmovzxbw    m3,         [r2 + r4]   ; row 3 of src0
+    movu        m4,        [r3]        ; row 2 of src1
+    movu        m5,        [r3 + r5]   ; row 3 of src1
+    paddw       m2,         m4
+    paddw       m3,         m5
+    packuswb    m2,         m3
+
+    lea         r2,         [r2 + r4 * 2]
+    lea         r3,         [r3 + r5 * 2]
+
+    vpermq      m0, m0, 11011000b
+    movu        [r0],      xm0           ; row 0 of dst
+    vextracti128 xm3, m0, 1
+    movu        [r0 + r1], xm3           ; row 1 of dst
+
+    lea         r0,         [r0 + r1 * 2]
+    vpermq      m2, m2, 11011000b
+    movu        [r0],      xm2           ; row 2 of dst
+    vextracti128 xm3, m2, 1
+    movu         [r0 + r1], xm3          ; row 3 of dst
+
+    lea         r0,         [r0 + r1 * 2]
+
+    dec         r6d
+    jnz         .loop
+
+    RET
 %endif
 %endmacro