Mercurial > x265
changeset 9528:66f0864f5b87 draft
asm-avx2: luma_vsp[8x4], luma_vss[8x4]: improve 837c->500c, 790c->476c
author | Divya Manivannan <divya@multicorewareinc.com> |
---|---|
date | Tue, 17 Feb 2015 12:24:04 +0530 |
parents | 4f36bb90fbb4 |
children | 4eaf41c1b2fb |
files | source/common/x86/asm-primitives.cpp source/common/x86/ipfilter8.asm |
diffstat | 2 files changed, 146 insertions(+-), 0 deletions(-) [+] |
line wrap: on
line diff
--- a/source/common/x86/asm-primitives.cpp Tue Feb 17 14:11:09 2015 +0530 +++ b/source/common/x86/asm-primitives.cpp Tue Feb 17 12:24:04 2015 +0530 @@ -1640,10 +1640,12 @@ void setupAssemblyPrimitives(EncoderPrim p.pu[LUMA_4x4].luma_vsp = x265_interp_8tap_vert_sp_4x4_avx2; p.pu[LUMA_4x8].luma_vsp = x265_interp_8tap_vert_sp_4x8_avx2; p.pu[LUMA_4x16].luma_vsp = x265_interp_8tap_vert_sp_4x16_avx2; + p.pu[LUMA_8x4].luma_vsp = x265_interp_8tap_vert_sp_8x4_avx2; p.pu[LUMA_4x4].luma_vss = x265_interp_8tap_vert_ss_4x4_avx2; p.pu[LUMA_4x8].luma_vss = x265_interp_8tap_vert_ss_4x8_avx2; p.pu[LUMA_4x16].luma_vss = x265_interp_8tap_vert_ss_4x16_avx2; + p.pu[LUMA_8x4].luma_vss = x265_interp_8tap_vert_ss_8x4_avx2; // color space i420 p.chroma[X265_CSP_I420].pu[CHROMA_420_4x4].filter_vpp = x265_interp_4tap_vert_pp_4x4_avx2;
--- a/source/common/x86/ipfilter8.asm Tue Feb 17 14:11:09 2015 +0530 +++ b/source/common/x86/ipfilter8.asm Tue Feb 17 12:24:04 2015 +0530 @@ -11576,3 +11576,147 @@ FILTER_VER_LUMA_S_AVX2_8xN sp, 16 FILTER_VER_LUMA_S_AVX2_8xN sp, 32 FILTER_VER_LUMA_S_AVX2_8xN ss, 16 FILTER_VER_LUMA_S_AVX2_8xN ss, 32 + +%macro PROCESS_LUMA_S_AVX2_W8_4R 1 + movu xm0, [r0] ; m0 = row 0 + movu xm1, [r0 + r1] ; m1 = row 1 + punpckhwd xm2, xm0, xm1 + punpcklwd xm0, xm1 + vinserti128 m0, m0, xm2, 1 + pmaddwd m0, [r5] + movu xm2, [r0 + r1 * 2] ; m2 = row 2 + punpckhwd xm3, xm1, xm2 + punpcklwd xm1, xm2 + vinserti128 m1, m1, xm3, 1 + pmaddwd m1, [r5] + movu xm3, [r0 + r4] ; m3 = row 3 + punpckhwd xm4, xm2, xm3 + punpcklwd xm2, xm3 + vinserti128 m2, m2, xm4, 1 + pmaddwd m4, m2, [r5 + 1 * mmsize] + paddd m0, m4 + pmaddwd m2, [r5] + lea r0, [r0 + r1 * 4] + movu xm4, [r0] ; m4 = row 4 + punpckhwd xm5, xm3, xm4 + punpcklwd xm3, xm4 + vinserti128 m3, m3, xm5, 1 + pmaddwd m5, m3, [r5 + 1 * mmsize] + paddd m1, m5 + pmaddwd m3, [r5] + movu xm5, [r0 + r1] ; m5 = row 5 + punpckhwd xm6, xm4, xm5 + punpcklwd xm4, xm5 + vinserti128 m4, m4, xm6, 1 + pmaddwd m6, m4, [r5 + 2 * mmsize] + paddd m0, m6 + pmaddwd m4, [r5 + 1 * mmsize] + paddd m2, m4 + movu xm6, [r0 + r1 * 2] ; m6 = row 6 + punpckhwd xm4, xm5, xm6 + punpcklwd xm5, xm6 + vinserti128 m5, m5, xm4, 1 + pmaddwd m4, m5, [r5 + 2 * mmsize] + paddd m1, m4 + pmaddwd m5, [r5 + 1 * mmsize] + paddd m3, m5 + movu xm4, [r0 + r4] ; m4 = row 7 + punpckhwd xm5, xm6, xm4 + punpcklwd xm6, xm4 + vinserti128 m6, m6, xm5, 1 + pmaddwd m5, m6, [r5 + 3 * mmsize] + paddd m0, m5 + pmaddwd m6, [r5 + 2 * mmsize] + paddd m2, m6 + lea r0, [r0 + r1 * 4] + movu xm5, [r0] ; m5 = row 8 + punpckhwd xm6, xm4, xm5 + punpcklwd xm4, xm5 + vinserti128 m4, m4, xm6, 1 + pmaddwd m6, m4, [r5 + 3 * mmsize] + paddd m1, m6 + pmaddwd m4, [r5 + 2 * mmsize] + paddd m3, m4 + movu xm6, [r0 + r1] ; m6 = row 9 + punpckhwd xm4, xm5, xm6 + punpcklwd xm5, xm6 + vinserti128 m5, m5, xm4, 1 + pmaddwd m5, [r5 + 3 * mmsize] + paddd m2, m5 + movu xm4, [r0 + r1 * 2] ; m4 = row 10 + punpckhwd xm5, xm6, xm4 + punpcklwd xm6, xm4 + vinserti128 m6, m6, xm5, 1 + pmaddwd m6, [r5 + 3 * mmsize] + paddd m3, m6 + +%ifidn %1,sp + paddd m0, m7 + paddd m1, m7 + paddd m2, m7 + paddd m3, m7 + psrad m0, 12 + psrad m1, 12 + psrad m2, 12 + psrad m3, 12 +%else + psrad m0, 6 + psrad m1, 6 + psrad m2, 6 + psrad m3, 6 +%endif + packssdw m0, m1 + packssdw m2, m3 +%ifidn %1,sp + packuswb m0, m2 + mova m4, [interp8_hps_shuf] + vpermd m0, m4, m0 + vextracti128 xm2, m0, 1 +%else + vpermq m0, m0, 11011000b + vpermq m2, m2, 11011000b + vextracti128 xm1, m0, 1 + vextracti128 xm3, m2, 1 +%endif +%endmacro + +%macro FILTER_VER_LUMA_S_AVX2_8x4 1 +INIT_YMM avx2 +cglobal interp_8tap_vert_%1_8x4, 4, 6, 8 + mov r4d, r4m + shl r4d, 7 + add r1d, r1d + +%ifdef PIC + lea r5, [pw_LumaCoeffVer] + add r5, r4 +%else + lea r5, [pw_LumaCoeffVer + r4] +%endif + + lea r4, [r1 * 3] + sub r0, r4 +%ifidn %1,sp + mova m7, [pd_526336] +%else + add r3d, r3d +%endif + + PROCESS_LUMA_S_AVX2_W8_4R %1 + lea r4, [r3 * 3] +%ifidn %1,sp + movq [r2], xm0 + movhps [r2 + r3], xm0 + movq [r2 + r3 * 2], xm2 + movhps [r2 + r4], xm2 +%else + movu [r2], xm0 + movu [r2 + r3], xm1 + movu [r2 + r3 * 2], xm2 + movu [r2 + r4], xm3 +%endif + RET +%endmacro + +FILTER_VER_LUMA_S_AVX2_8x4 sp +FILTER_VER_LUMA_S_AVX2_8x4 ss