changeset 9584:14422b2e59a4

asm-avx2: intra_pred_ang8_29, improved 428.68c -> 311.40c over sse4 asm code intra_ang_8x8[29] 11.29x 311.40 3515.58
author Praveen Tiwari <praveen@multicorewareinc.com>
date Thu, 26 Feb 2015 17:13:08 +0530
parents 6c3b6a58d2ef
children fd00ee151e21
files source/common/x86/asm-primitives.cpp source/common/x86/intrapred.h source/common/x86/intrapred8.asm
diffstat 3 files changed, 42 insertions(+-), 0 deletions(-) [+]
line wrap: on
line diff
--- a/source/common/x86/asm-primitives.cpp	Thu Feb 26 16:24:18 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Thu Feb 26 17:13:08 2015 +0530
@@ -1847,6 +1847,7 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_8x8].intra_pred[31] = x265_intra_pred_ang8_31_avx2;
         p.cu[BLOCK_8x8].intra_pred[30] = x265_intra_pred_ang8_30_avx2;
         p.cu[BLOCK_8x8].intra_pred[6] = x265_intra_pred_ang8_6_avx2;
+        p.cu[BLOCK_8x8].intra_pred[29] = x265_intra_pred_ang8_29_avx2;
     }
 }
 #endif // if HIGH_BIT_DEPTH
--- a/source/common/x86/intrapred.h	Thu Feb 26 16:24:18 2015 +0530
+++ b/source/common/x86/intrapred.h	Thu Feb 26 17:13:08 2015 +0530
@@ -166,6 +166,7 @@ void x265_intra_pred_ang8_5_avx2(pixel* 
 void x265_intra_pred_ang8_31_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_6_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_30_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang8_29_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
--- a/source/common/x86/intrapred8.asm	Thu Feb 26 16:24:18 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Thu Feb 26 17:13:08 2015 +0530
@@ -85,6 +85,11 @@ c_ang8_src1_9_1_9:    db 0, 1, 1, 2, 2, 
 c_ang8_src2_10_2_10:  db 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9
 c_ang8_src3_11_3_11:  db 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10, 2, 3, 3, 4, 4, 5, 5, 6, 6, 7, 7, 8, 8, 9, 9, 10
 
+c_ang8_31_8:          db 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 1, 31, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+c_ang8_13_22:         db 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 19, 13, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22
+c_ang8_27_4:          db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
+c_ang8_9_18:          db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
+
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
 pw_planar4_1:         dw 3,  3,  3,  3,  3,  3,  3,  3
@@ -32460,3 +32465,38 @@ cglobal intra_pred_ang8_30, 3, 4, 5
     movhps            [r0 + 2 * r1], xm4
     movhps            [r0 + r3], xm2
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_29, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+
+    pshufb            m1, m0, [c_ang8_src1_9_1_9]
+    pshufb            m2, m0, [c_ang8_src2_10_2_10]
+    pshufb            m4, m0, [c_ang8_src3_11_3_11]
+    pshufb            m0,     [c_ang8_src3_11_4_12]
+
+    pmaddubsw         m1, [c_ang8_13_26]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_7_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_1_14]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_27_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET