changeset 9587:aef6e6318778

asm-avx2: intra_pred_ang8_28
author Praveen Tiwari <praveen@multicorewareinc.com>
date Thu, 26 Feb 2015 18:55:45 +0530
parents 4d8bf8ee9164
children befd957e8fd1
files source/common/x86/asm-primitives.cpp source/common/x86/intrapred.h source/common/x86/intrapred8.asm
diffstat 3 files changed, 43 insertions(+-), 0 deletions(-) [+]
line wrap: on
line diff
--- a/source/common/x86/asm-primitives.cpp	Thu Feb 26 17:55:31 2015 +0530
+++ b/source/common/x86/asm-primitives.cpp	Thu Feb 26 18:55:45 2015 +0530
@@ -1849,6 +1849,7 @@ void setupAssemblyPrimitives(EncoderPrim
         p.cu[BLOCK_8x8].intra_pred[6] = x265_intra_pred_ang8_6_avx2;
         p.cu[BLOCK_8x8].intra_pred[7] = x265_intra_pred_ang8_7_avx2;
         p.cu[BLOCK_8x8].intra_pred[29] = x265_intra_pred_ang8_29_avx2;
+        p.cu[BLOCK_8x8].intra_pred[28] = x265_intra_pred_ang8_28_avx2;
     }
 }
 #endif // if HIGH_BIT_DEPTH
--- a/source/common/x86/intrapred.h	Thu Feb 26 17:55:31 2015 +0530
+++ b/source/common/x86/intrapred.h	Thu Feb 26 18:55:45 2015 +0530
@@ -168,6 +168,7 @@ void x265_intra_pred_ang8_6_avx2(pixel* 
 void x265_intra_pred_ang8_30_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_7_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_intra_pred_ang8_29_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
+void x265_intra_pred_ang8_28_avx2(pixel* dst, intptr_t dstStride, const pixel* srcPix, int dirMode, int bFilter);
 void x265_all_angs_pred_4x4_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_8x8_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
 void x265_all_angs_pred_16x16_sse4(pixel *dest, pixel *refPix, pixel *filtPix, int bLuma);
--- a/source/common/x86/intrapred8.asm	Thu Feb 26 17:55:31 2015 +0530
+++ b/source/common/x86/intrapred8.asm	Thu Feb 26 18:55:45 2015 +0530
@@ -90,6 +90,11 @@ c_ang8_13_22:         db 19, 13, 19, 13,
 c_ang8_27_4:          db 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4, 28, 4
 c_ang8_9_18:          db 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 23, 9, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18, 14, 18
 
+c_ang8_5_10:          db 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 27, 5, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10, 22, 10
+c_ang8_15_20:         db 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 17, 15, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20, 12, 20
+c_ang8_25_30:         db 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 7, 25, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30, 2, 30
+c_ang8_3_8:           db 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 29, 3, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8, 24, 8
+
 ;; (blkSize - 1 - x)
 pw_planar4_0:         dw 3,  2,  1,  0,  3,  2,  1,  0
 pw_planar4_1:         dw 3,  3,  3,  3,  3,  3,  3,  3
@@ -32546,3 +32551,39 @@ cglobal intra_pred_ang8_29, 3, 4, 5
     movhps            [r0 + 2 * r1], xm4
     movhps            [r0 + r3], xm2
     RET
+
+INIT_YMM avx2
+cglobal intra_pred_ang8_28, 3, 4, 5
+    mova              m3, [pw_1024]
+    vbroadcasti128    m0, [r2 + 1]
+    movu              m5, [c_ang8_src1_9_1_9]
+
+    pshufb            m1, m0, m5
+    pshufb            m2, m0, m5
+    pshufb            m4, m0, m5
+    pshufb            m0,     [c_ang8_src2_10_2_10]
+
+    pmaddubsw         m1, [c_ang8_5_10]
+    pmulhrsw          m1, m3
+    pmaddubsw         m2, [c_ang8_15_20]
+    pmulhrsw          m2, m3
+    pmaddubsw         m4, [c_ang8_25_30]
+    pmulhrsw          m4, m3
+    pmaddubsw         m0, [c_ang8_3_8]
+    pmulhrsw          m0, m3
+    packuswb          m1, m2
+    packuswb          m4, m0
+
+    lea               r3, [3 * r1]
+    movq              [r0], xm1
+    vextracti128      xm2, m1, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm1
+    movhps            [r0 + r3], xm2
+    lea               r0, [r0 + 4 * r1]
+    movq              [r0], xm4
+    vextracti128      xm2, m4, 1
+    movq              [r0 + r1], xm2
+    movhps            [r0 + 2 * r1], xm4
+    movhps            [r0 + r3], xm2
+    RET