36 #define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3)               \ 
   38     vz0 = vec_add(vb0,vb2);            \ 
   39     vz1 = vec_sub(vb0,vb2);            \ 
   40     vz2 = vec_sra(vb1,vec_splat_u16(1));                          \ 
   41     vz2 = vec_sub(vz2,vb3);        \ 
   42     vz3 = vec_sra(vb3,vec_splat_u16(1));                          \ 
   43     vz3 = vec_add(vb1,vz3);        \ 
   45     va0 = vec_add(vz0,vz3);         \ 
   46     va1 = vec_add(vz1,vz2);         \ 
   47     va2 = vec_sub(vz1,vz2);         \ 
   48     va3 = vec_sub(vz0,vz3)         
   50 #define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \ 
   51     b0 = vec_mergeh( a0, a0 ); \ 
   52     b1 = vec_mergeh( a1, a0 ); \ 
   53     b2 = vec_mergeh( a2, a0 ); \ 
   54     b3 = vec_mergeh( a3, a0 ); \ 
   55     a0 = vec_mergeh( b0, b2 ); \ 
   56     a1 = vec_mergel( b0, b2 ); \ 
   57     a2 = vec_mergeh( b1, b3 ); \ 
   58     a3 = vec_mergel( b1, b3 ); \ 
   59     b0 = vec_mergeh( a0, a2 ); \ 
   60     b1 = vec_mergel( a0, a2 ); \ 
   61     b2 = vec_mergeh( a1, a3 ); \ 
   62     b3 = vec_mergel( a1, a3 ) 
   64 #define VEC_LOAD_U8_ADD_S16_STORE_U8(va)                      \ 
   65     vdst_orig = vec_ld(0, dst);                               \ 
   66     vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask);          \ 
   67     vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst);         \ 
   68     va = vec_add(va, vdst_ss);                                \ 
   69     va_u8 = vec_packsu(va, zero_s16v);                        \ 
   70     va_u32 = vec_splat((vec_u32)va_u8, 0);                  \ 
   71     vec_ste(va_u32, element, (uint32_t*)dst); 
   77     vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
 
   81     const vec_u16 v6us = vec_splat_u16(6);
 
   83     vec_u8 vdst_mask = vec_lvsl(0, dst);
 
   84     int element = ((
unsigned long)dst & 0xf) >> 2;
 
   89     vtmp0 = vec_ld(0,block);
 
   90     vtmp1 = vec_sld(vtmp0, vtmp0, 8);
 
   91     vtmp2 = vec_ld(16,block);
 
   92     vtmp3 = vec_sld(vtmp2, vtmp2, 8);
 
   93     memset(block, 0, 16 * 
sizeof(int16_t));
 
   95     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
 
   96     VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
 
   97     VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
 
   99     va0 = vec_sra(va0,v6us);
 
  100     va1 = vec_sra(va1,v6us);
 
  101     va2 = vec_sra(va2,v6us);
 
  102     va3 = vec_sra(va3,v6us);
 
  104     VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
 
  106     VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
 
  108     VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
 
  110     VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
 
  113 #define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,  d0, d1, d2, d3, d4, d5, d6, d7) {\ 
  115     vec_s16 a0v = vec_add(s0, s4);    \ 
  117     vec_s16 a2v = vec_sub(s0, s4);    \ 
  119     vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6);    \ 
  121     vec_s16 a6v = vec_add(vec_sra(s6, onev), s2);    \ 
  123     vec_s16 b0v = vec_add(a0v, a6v);  \ 
  125     vec_s16 b2v = vec_add(a2v, a4v);  \ 
  127     vec_s16 b4v = vec_sub(a2v, a4v);  \ 
  129     vec_s16 b6v = vec_sub(a0v, a6v);  \ 
  132     vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \ 
  135     vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\ 
  138     vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\ 
  140     vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\ 
  142     vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \ 
  144     vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \ 
  146     vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \ 
  148     vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \ 
  150     d0 = vec_add(b0v, b7v); \ 
  152     d1 = vec_add(b2v, b5v); \ 
  154     d2 = vec_add(b4v, b3v); \ 
  156     d3 = vec_add(b6v, b1v); \ 
  158     d4 = vec_sub(b6v, b1v); \ 
  160     d5 = vec_sub(b4v, b3v); \ 
  162     d6 = vec_sub(b2v, b5v); \ 
  164     d7 = vec_sub(b0v, b7v); \ 
  167 #define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \ 
  169     vec_u8 hv = vec_ld( 0, dest );                           \ 
  170     vec_u8 lv = vec_ld( 7, dest );                           \ 
  171     vec_u8 dstv   = vec_perm( hv, lv, (vec_u8)perm_ldv );  \ 
  172     vec_s16 idct_sh6 = vec_sra(idctv, sixv);                 \ 
  173     vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv);   \ 
  174     vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16);  \ 
  175     vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum);        \ 
  178     vec_u8 bodyv  = vec_perm( idstsum8, idstsum8, perm_stv );\ 
  179     vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv );     \ 
  180     lv    = vec_sel( lv, bodyv, edgelv );                      \ 
  181     vec_st( lv, 7, dest );                                     \ 
  182     hv    = vec_ld( 0, dest );                                 \ 
  183     edgehv = vec_perm( zero_u8v, sel, perm_stv );              \ 
  184     hv    = vec_sel( hv, bodyv, edgehv );                      \ 
  185     vec_st( hv, 0, dest );                                     \ 
  188 static void h264_idct8_add_altivec(
uint8_t *dst, int16_t *dct, 
int stride)
 
  191     vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
 
  192     vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, 
idct6, idct7;
 
  194     vec_u8 perm_ldv = vec_lvsl(0, dst);
 
  195     vec_u8 perm_stv = vec_lvsr(8, dst);
 
  197     const vec_u16 onev = vec_splat_u16(1);
 
  198     const vec_u16 twov = vec_splat_u16(2);
 
  199     const vec_u16 sixv = vec_splat_u16(6);
 
  201     const vec_u8 sel = (
vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
 
  206     s0 = vec_ld(0x00, (int16_t*)dct);
 
  207     s1 = vec_ld(0x10, (int16_t*)dct);
 
  208     s2 = vec_ld(0x20, (int16_t*)dct);
 
  209     s3 = vec_ld(0x30, (int16_t*)dct);
 
  210     s4 = vec_ld(0x40, (int16_t*)dct);
 
  211     s5 = vec_ld(0x50, (int16_t*)dct);
 
  212     s6 = vec_ld(0x60, (int16_t*)dct);
 
  213     s7 = vec_ld(0x70, (int16_t*)dct);
 
  214     memset(dct, 0, 64 * 
sizeof(int16_t));
 
  216     IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
 
  217                      d0, d1, d2, d3, d4, d5, d6, d7);
 
  219     TRANSPOSE8( d0,  d1,  d2,  d3,  d4,  d5,  d6, d7 );
 
  221     IDCT8_1D_ALTIVEC(d0,  d1,  d2,  d3,  d4,  d5,  d6, d7,
 
  222                      idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
 
  224     ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
 
  225     ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
 
  226     ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
 
  227     ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
 
  228     ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
 
  229     ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
 
  230     ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
 
  231     ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
 
  237     vec_u8 dcplus, dcminus, 
v0, v1, v2, v3, aligner;
 
  242     dc = (block[0] + 32) >> 6;
 
  244     dc16 = vec_splat((
vec_s16) vec_lde(0, &
dc), 1);
 
  251     aligner = vec_lvsr(0, dst);
 
  252     dcplus = vec_perm(dcplus, dcplus, aligner);
 
  253     dcminus = vec_perm(dcminus, dcminus, aligner);
 
  255     for (i = 0; i < 
size; i += 4) {
 
  256         v0 = vec_ld(0, dst+0*stride);
 
  257         v1 = vec_ld(0, dst+1*stride);
 
  258         v2 = vec_ld(0, dst+2*stride);
 
  259         v3 = vec_ld(0, dst+3*stride);
 
  261         v0 = vec_adds(v0, dcplus);
 
  262         v1 = vec_adds(v1, dcplus);
 
  263         v2 = vec_adds(v2, dcplus);
 
  264         v3 = vec_adds(v3, dcplus);
 
  266         v0 = vec_subs(v0, dcminus);
 
  267         v1 = vec_subs(v1, dcminus);
 
  268         v2 = vec_subs(v2, dcminus);
 
  269         v3 = vec_subs(v3, dcminus);
 
  271         vec_st(v0, 0, dst+0*stride);
 
  272         vec_st(v1, 0, dst+1*stride);
 
  273         vec_st(v2, 0, dst+2*stride);
 
  274         vec_st(v3, 0, dst+3*stride);
 
  280 static void h264_idct_dc_add_altivec(
uint8_t *dst, int16_t *block, 
int stride)
 
  282     h264_idct_dc_add_internal(dst, block, stride, 4);
 
  285 static void h264_idct8_dc_add_altivec(
uint8_t *dst, int16_t *block, 
int stride)
 
  287     h264_idct_dc_add_internal(dst, block, stride, 8);
 
  290 static void h264_idct_add16_altivec(
uint8_t *dst, 
const int *block_offset,
 
  291                                     int16_t *block, 
int stride,
 
  296         int nnz = nnzc[ 
scan8[i] ];
 
  298             if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  299             else                      h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  304 static void h264_idct_add16intra_altivec(
uint8_t *dst, 
const int *block_offset,
 
  305                                          int16_t *block, 
int stride,
 
  310         if(nnzc[ scan8[i] ]) h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  311         else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  315 static void h264_idct8_add4_altivec(
uint8_t *dst, 
const int *block_offset,
 
  316                                     int16_t *block, 
int stride,
 
  320     for(i=0; i<16; i+=4){
 
  321         int nnz = nnzc[ scan8[i] ];
 
  323             if(nnz==1 && block[i*16]) h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  324             else                      h264_idct8_add_altivec(dst + block_offset[i], block + i*16, stride);
 
  329 static void h264_idct_add8_altivec(
uint8_t **dest, 
const int *block_offset,
 
  330                                    int16_t *block, 
int stride,
 
  334     for (j = 1; j < 3; j++) {
 
  335         for(i = j * 16; i < j * 16 + 4; i++){
 
  337                 h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
 
  339                 h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
 
  344 #define transpose4x16(r0, r1, r2, r3) {      \ 
  345     register vec_u8 r4;                    \ 
  346     register vec_u8 r5;                    \ 
  347     register vec_u8 r6;                    \ 
  348     register vec_u8 r7;                    \ 
  350     r4 = vec_mergeh(r0, r2);   \ 
  351     r5 = vec_mergel(r0, r2);   \ 
  352     r6 = vec_mergeh(r1, r3);   \ 
  353     r7 = vec_mergel(r1, r3);   \ 
  355     r0 = vec_mergeh(r4, r6);    \ 
  356     r1 = vec_mergel(r4, r6);    \ 
  357     r2 = vec_mergeh(r5, r7);    \ 
  358     r3 = vec_mergel(r5, r7);    \ 
  361 static inline void write16x4(
uint8_t *dst, 
int dst_stride,
 
  365     uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
 
  366     int int_dst_stride = dst_stride/4;
 
  368     vec_st(r0, 0, result);
 
  369     vec_st(r1, 16, result);
 
  370     vec_st(r2, 32, result);
 
  371     vec_st(r3, 48, result);
 
  374     *(dst_int+   int_dst_stride) = *(src_int + 1);
 
  375     *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
 
  376     *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
 
  377     *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
 
  378     *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
 
  379     *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
 
  380     *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
 
  381     *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
 
  382     *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
 
  383     *(dst_int+10*int_dst_stride) = *(src_int + 10);
 
  384     *(dst_int+11*int_dst_stride) = *(src_int + 11);
 
  385     *(dst_int+12*int_dst_stride) = *(src_int + 12);
 
  386     *(dst_int+13*int_dst_stride) = *(src_int + 13);
 
  387     *(dst_int+14*int_dst_stride) = *(src_int + 14);
 
  388     *(dst_int+15*int_dst_stride) = *(src_int + 15);
 
  394 #define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\ 
  395     register vec_u8 r0  = unaligned_load(0,             src);            \ 
  396     register vec_u8 r1  = unaligned_load(   src_stride, src);            \ 
  397     register vec_u8 r2  = unaligned_load(2* src_stride, src);            \ 
  398     register vec_u8 r3  = unaligned_load(3* src_stride, src);            \ 
  399     register vec_u8 r4  = unaligned_load(4* src_stride, src);            \ 
  400     register vec_u8 r5  = unaligned_load(5* src_stride, src);            \ 
  401     register vec_u8 r6  = unaligned_load(6* src_stride, src);            \ 
  402     register vec_u8 r7  = unaligned_load(7* src_stride, src);            \ 
  403     register vec_u8 r14 = unaligned_load(14*src_stride, src);            \ 
  404     register vec_u8 r15 = unaligned_load(15*src_stride, src);            \ 
  406     r8  = unaligned_load( 8*src_stride, src);                              \ 
  407     r9  = unaligned_load( 9*src_stride, src);                              \ 
  408     r10 = unaligned_load(10*src_stride, src);                              \ 
  409     r11 = unaligned_load(11*src_stride, src);                              \ 
  410     r12 = unaligned_load(12*src_stride, src);                              \ 
  411     r13 = unaligned_load(13*src_stride, src);                              \ 
  414     r0 = vec_mergeh(r0, r8);                                       \ 
  415     r1 = vec_mergeh(r1, r9);                                       \ 
  416     r2 = vec_mergeh(r2, r10);                                      \ 
  417     r3 = vec_mergeh(r3, r11);                                      \ 
  418     r4 = vec_mergeh(r4, r12);                                      \ 
  419     r5 = vec_mergeh(r5, r13);                                      \ 
  420     r6 = vec_mergeh(r6, r14);                                      \ 
  421     r7 = vec_mergeh(r7, r15);                                      \ 
  424     r8  = vec_mergeh(r0, r4);                           \ 
  425     r9  = vec_mergel(r0, r4);                           \ 
  426     r10 = vec_mergeh(r1, r5);                           \ 
  427     r11 = vec_mergel(r1, r5);                           \ 
  428     r12 = vec_mergeh(r2, r6);                           \ 
  429     r13 = vec_mergel(r2, r6);                           \ 
  430     r14 = vec_mergeh(r3, r7);                           \ 
  431     r15 = vec_mergel(r3, r7);                           \ 
  434     r0 = vec_mergeh(r8,  r12);                 \ 
  435     r1 = vec_mergel(r8,  r12);                 \ 
  436     r2 = vec_mergeh(r9,  r13);                 \ 
  437     r4 = vec_mergeh(r10, r14);                 \ 
  438     r5 = vec_mergel(r10, r14);                 \ 
  439     r6 = vec_mergeh(r11, r15);                 \ 
  443     r8  = vec_mergeh(r0, r4);                                 \ 
  444     r9  = vec_mergel(r0, r4);                                 \ 
  445     r10 = vec_mergeh(r1, r5);                                 \ 
  446     r11 = vec_mergel(r1, r5);                                 \ 
  447     r12 = vec_mergeh(r2, r6);                                 \ 
  448     r13 = vec_mergel(r2, r6);                                 \ 
  454 static inline vec_u8 diff_lt_altivec ( 
register vec_u8 x,
 
  458     register vec_u8 diff = vec_subs(x, y);
 
  459     register vec_u8 diffneg = vec_subs(y, x);
 
  460     register vec_u8 o = vec_or(diff, diffneg); 
 
  461     o = (
vec_u8)vec_cmplt(o, a);
 
  465 static inline vec_u8 h264_deblock_mask ( 
register vec_u8 p0,
 
  475     mask = diff_lt_altivec(p0, q0, alpha);
 
  476     tempmask = diff_lt_altivec(p1, p0, beta);
 
  477     mask = vec_and(mask, tempmask);
 
  478     tempmask = diff_lt_altivec(q1, q0, beta);
 
  479     mask = vec_and(mask, tempmask);
 
  485 static inline vec_u8 h264_deblock_q1(
register vec_u8 p0,
 
  491     register vec_u8 average = vec_avg(p0, q0);
 
  499     temp = vec_xor(average, p2);
 
  500     average = vec_avg(average, p2);     
 
  501     ones = vec_splat_u8(1);
 
  502     temp = vec_and(temp, ones);         
 
  503     uncliped = vec_subs(average, temp); 
 
  504     max = vec_adds(p1, tc0);
 
  505     min = vec_subs(p1, tc0);
 
  506     newp1 = vec_max(min, uncliped);
 
  507     newp1 = vec_min(max, newp1);
 
  511 #define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) {                                           \ 
  513     const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4));                               \ 
  515     register vec_u8 pq0bit = vec_xor(p0,q0);                                                    \ 
  516     register vec_u8 q1minus;                                                                    \ 
  517     register vec_u8 p0minus;                                                                    \ 
  518     register vec_u8 stage1;                                                                     \ 
  519     register vec_u8 stage2;                                                                     \ 
  520     register vec_u8 vec160;                                                                     \ 
  521     register vec_u8 delta;                                                                      \ 
  522     register vec_u8 deltaneg;                                                                   \ 
  524     q1minus = vec_nor(q1, q1);                                                      \ 
  525     stage1 = vec_avg(p1, q1minus);                                        \ 
  526     stage2 = vec_sr(stage1, vec_splat_u8(1));       \ 
  527     p0minus = vec_nor(p0, p0);                                                      \ 
  528     stage1 = vec_avg(q0, p0minus);                                        \ 
  529     pq0bit = vec_and(pq0bit, vec_splat_u8(1));                                                    \ 
  530     stage2 = vec_avg(stage2, pq0bit);           \ 
  531     stage2 = vec_adds(stage2, stage1);           \ 
  532     vec160 = vec_ld(0, &A0v);                                                                     \ 
  533     deltaneg = vec_subs(vec160, stage2);                                                  \ 
  534     delta = vec_subs(stage2, vec160);                                                      \ 
  535     deltaneg = vec_min(tc0masked, deltaneg);                                                      \ 
  536     delta = vec_min(tc0masked, delta);                                                            \ 
  537     p0 = vec_subs(p0, deltaneg);                                                                  \ 
  538     q0 = vec_subs(q0, delta);                                                                     \ 
  539     p0 = vec_adds(p0, delta);                                                                     \ 
  540     q0 = vec_adds(q0, deltaneg);                                                                  \ 
  543 #define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) {            \ 
  544     DECLARE_ALIGNED(16, unsigned char, temp)[16];                                             \ 
  545     register vec_u8 alphavec;                                                              \ 
  546     register vec_u8 betavec;                                                               \ 
  547     register vec_u8 mask;                                                                  \ 
  548     register vec_u8 p1mask;                                                                \ 
  549     register vec_u8 q1mask;                                                                \ 
  550     register vector signed   char tc0vec;                                                    \ 
  551     register vec_u8 finaltc0;                                                              \ 
  552     register vec_u8 tc0masked;                                                             \ 
  553     register vec_u8 newp1;                                                                 \ 
  554     register vec_u8 newq1;                                                                 \ 
  558     alphavec = vec_ld(0, temp);                                                              \ 
  559     betavec = vec_splat(alphavec, 0x1);                                                      \ 
  560     alphavec = vec_splat(alphavec, 0x0);                                                     \ 
  561     mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec);             \ 
  563     AV_COPY32(temp, tc0);                                                                    \ 
  564     tc0vec = vec_ld(0, (signed char*)temp);                                                  \ 
  565     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \ 
  566     tc0vec = vec_mergeh(tc0vec, tc0vec);                                                     \ 
  567     mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1)));           \ 
  568     finaltc0 = vec_and((vec_u8)tc0vec, mask);                                \ 
  570     p1mask = diff_lt_altivec(p2, p0, betavec);                                               \ 
  571     p1mask = vec_and(p1mask, mask);                              \ 
  572     tc0masked = vec_and(p1mask, (vec_u8)tc0vec);                                           \ 
  573     finaltc0 = vec_sub(finaltc0, p1mask);                                          \ 
  574     newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked);                                      \ 
  577     q1mask = diff_lt_altivec(q2, q0, betavec);                                               \ 
  578     q1mask = vec_and(q1mask, mask);                             \ 
  579     tc0masked = vec_and(q1mask, (vec_u8)tc0vec);                                           \ 
  580     finaltc0 = vec_sub(finaltc0, q1mask);                                          \ 
  581     newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked);                                      \ 
  584     h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0);                                            \ 
  589 static void h264_v_loop_filter_luma_altivec(
uint8_t *pix, 
int stride, 
int alpha, 
int beta, int8_t *tc0) {
 
  591     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
 
  592         register vec_u8 p2 = vec_ld(-3*stride, pix);
 
  593         register vec_u8 p1 = vec_ld(-2*stride, pix);
 
  594         register vec_u8 p0 = vec_ld(-1*stride, pix);
 
  595         register vec_u8 q0 = vec_ld(0, pix);
 
  596         register vec_u8 q1 = vec_ld(stride, pix);
 
  597         register vec_u8 q2 = vec_ld(2*stride, pix);
 
  598         h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
 
  599         vec_st(p1, -2*stride, pix);
 
  600         vec_st(p0, -1*stride, pix);
 
  602         vec_st(q1, stride, pix);
 
  606 static void h264_h_loop_filter_luma_altivec(
uint8_t *pix, 
int stride, 
int alpha, 
int beta, int8_t *tc0) {
 
  608     register vec_u8 line0, line1, line2, line3, line4, line5;
 
  609     if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
 
  611     readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
 
  612     h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
 
  613     transpose4x16(line1, line2, line3, line4);
 
  614     write16x4(pix-2, stride, line1, line2, line3, line4);
 
  618 void weight_h264_W_altivec(
uint8_t *block, 
int stride, 
int height,
 
  628     offset <<= log2_denom;
 
  629     if(log2_denom) offset += 1<<(log2_denom-1);
 
  630     temp[0] = log2_denom;
 
  635     vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
 
  636     vweight = vec_splat(vtemp, 3);
 
  637     voffset = vec_splat(vtemp, 5);
 
  638     aligned = !((
unsigned long)block & 0xf);
 
  640     for (y = 0; y < 
height; y++) {
 
  641         vblock = vec_ld(0, block);
 
  646         if (w == 16 || aligned) {
 
  648             v0 = vec_adds(v0, voffset);
 
  649             v0 = vec_sra(v0, vlog2_denom);
 
  651         if (w == 16 || !aligned) {
 
  653             v1 = vec_adds(v1, voffset);
 
  654             v1 = vec_sra(v1, vlog2_denom);
 
  656         vblock = vec_packsu(v0, v1);
 
  657         vec_st(vblock, 0, block);
 
  665                              int log2_denom, 
int weightd, 
int weights, 
int offset, 
int w)
 
  667     int y, dst_aligned, src_aligned;
 
  669     vec_s16 vtemp, vweights, vweightd, voffset, 
v0, v1, v2, v3;
 
  674     offset = ((offset + 1) | 1) << log2_denom;
 
  675     temp[0] = log2_denom+1;
 
  681     vlog2_denom = (
vec_u16)vec_splat(vtemp, 1);
 
  682     vweights = vec_splat(vtemp, 3);
 
  683     vweightd = vec_splat(vtemp, 5);
 
  684     voffset = vec_splat(vtemp, 7);
 
  685     dst_aligned = !((
unsigned long)dst & 0xf);
 
  686     src_aligned = !((
unsigned long)src & 0xf);
 
  688     for (y = 0; y < 
height; y++) {
 
  689         vdst = vec_ld(0, dst);
 
  690         vsrc = vec_ld(0, src);
 
  704         if (w == 16 || dst_aligned) {
 
  708             v0 = vec_adds(v0, voffset);
 
  709             v0 = vec_adds(v0, v2);
 
  710             v0 = vec_sra(v0, vlog2_denom);
 
  712         if (w == 16 || !dst_aligned) {
 
  716             v1 = vec_adds(v1, voffset);
 
  717             v1 = vec_adds(v1, v3);
 
  718             v1 = vec_sra(v1, vlog2_denom);
 
  720         vdst = vec_packsu(v0, v1);
 
  721         vec_st(vdst, 0, dst);
 
  728 #define H264_WEIGHT(W) \ 
  729 static void weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \ 
  730                                                 int log2_denom, int weight, int offset) \ 
  732     weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \ 
  734 static void biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \ 
  735                                                   int log2_denom, int weightd, int weights, int offset) \ 
  737     biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \ 
  745                                  const int chroma_format_idc)
 
  751     if (bit_depth == 8) {
 
  753         if (chroma_format_idc <= 1)