25 #define PRED16X16_PLANE \
26 ptrdiff_t stride_1, stride_2, stride_3, stride_4, stride_5, stride_6; \
27 ptrdiff_t stride_8, stride_15; \
28 int32_t res0, res1, res2, res3, cnt; \
29 uint8_t *src0, *src1; \
30 __m256i reg0, reg1, reg2, reg3, reg4; \
31 __m256i tmp0, tmp1, tmp2, tmp3; \
32 __m256i shuff = {0x0B040A0509060807, 0x0F000E010D020C03, 0, 0}; \
33 __m256i mult = {0x0004000300020001, 0x0008000700060005, 0, 0}; \
34 __m256i int_mult1 = {0x0000000100000000, 0x0000000300000002, \
35 0x0000000500000004, 0x0000000700000006}; \
38 stride_2 = stride << 1; \
39 stride_3 = stride_2 + stride; \
40 stride_4 = stride_2 << 1; \
41 stride_5 = stride_4 + stride; \
42 stride_6 = stride_3 << 1; \
43 stride_8 = stride_4 << 1; \
44 stride_15 = (stride_8 << 1) - stride; \
46 src1 = src0 + stride_8; \
48 reg0 = __lasx_xvldx(src0, -stride); \
49 reg1 = __lasx_xvldx(src, (8 - stride)); \
50 reg0 = __lasx_xvilvl_d(reg1, reg0); \
51 reg0 = __lasx_xvshuf_b(reg0, reg0, shuff); \
52 reg0 = __lasx_xvhsubw_hu_bu(reg0, reg0); \
53 reg0 = __lasx_xvmul_h(reg0, mult); \
54 res1 = (src1[0] - src0[stride_6]) + \
55 2 * (src1[stride] - src0[stride_5]) + \
56 3 * (src1[stride_2] - src0[stride_4]) + \
57 4 * (src1[stride_3] - src0[stride_3]) + \
58 5 * (src1[stride_4] - src0[stride_2]) + \
59 6 * (src1[stride_5] - src0[stride]) + \
60 7 * (src1[stride_6] - src0[0]) + \
61 8 * (src0[stride_15] - src0[stride_1]); \
62 reg0 = __lasx_xvhaddw_w_h(reg0, reg0); \
63 reg0 = __lasx_xvhaddw_d_w(reg0, reg0); \
64 reg0 = __lasx_xvhaddw_q_d(reg0, reg0); \
65 res0 = __lasx_xvpickve2gr_w(reg0, 0); \
67 #define PRED16X16_PLANE_END \
68 res2 = (src0[stride_15] + src[15 - stride] + 1) << 4; \
69 res3 = 7 * (res0 + res1); \
71 reg0 = __lasx_xvreplgr2vr_w(res0); \
72 reg1 = __lasx_xvreplgr2vr_w(res1); \
73 reg2 = __lasx_xvreplgr2vr_w(res2); \
74 reg3 = __lasx_xvmul_w(reg0, int_mult1); \
75 reg4 = __lasx_xvslli_w(reg0, 3); \
76 reg4 = __lasx_xvadd_w(reg4, reg3); \
77 for (cnt = 8; cnt--;) { \
78 tmp0 = __lasx_xvadd_w(reg2, reg3); \
79 tmp1 = __lasx_xvadd_w(reg2, reg4); \
80 tmp0 = __lasx_xvssrani_hu_w(tmp1, tmp0, 5); \
81 tmp0 = __lasx_xvpermi_d(tmp0, 0xD8); \
82 reg2 = __lasx_xvadd_w(reg2, reg1); \
83 tmp2 = __lasx_xvadd_w(reg2, reg3); \
84 tmp3 = __lasx_xvadd_w(reg2, reg4); \
85 tmp1 = __lasx_xvssrani_hu_w(tmp3, tmp2, 5); \
86 tmp1 = __lasx_xvpermi_d(tmp1, 0xD8); \
87 tmp0 = __lasx_xvssrani_bu_h(tmp1, tmp0, 0); \
88 reg2 = __lasx_xvadd_w(reg2, reg1); \
89 __lasx_xvstelm_d(tmp0, src, 0, 0); \
90 __lasx_xvstelm_d(tmp0, src, 8, 2); \
92 __lasx_xvstelm_d(tmp0, src, 0, 1); \
93 __lasx_xvstelm_d(tmp0, src, 8, 3); \
101 res0 = (5 * res0 + 32) >> 6;
102 res1 = (5 * res1 + 32) >> 6;
109 res0 = (res0 + (res0 >> 2)) >> 4;
110 res1 = (res1 + (res1 >> 2)) >> 4;
117 cnt = (5 * (res0/4)) / 16;
118 res0 = (5 * (res1/4)) / 16;