• Main Page
  • Related Pages
  • Modules
  • Data Structures
  • Files
  • Examples
  • File List
  • Globals

libavcodec/ppc/vc1dsp_altivec.c

Go to the documentation of this file.
00001 /*
00002  * VC-1 and WMV3 decoder - DSP functions AltiVec-optimized
00003  * Copyright (c) 2006 Konstantin Shishkov
00004  *
00005  * This file is part of FFmpeg.
00006  *
00007  * FFmpeg is free software; you can redistribute it and/or
00008  * modify it under the terms of the GNU Lesser General Public
00009  * License as published by the Free Software Foundation; either
00010  * version 2.1 of the License, or (at your option) any later version.
00011  *
00012  * FFmpeg is distributed in the hope that it will be useful,
00013  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00014  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
00015  * Lesser General Public License for more details.
00016  *
00017  * You should have received a copy of the GNU Lesser General Public
00018  * License along with FFmpeg; if not, write to the Free Software
00019  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00020  */
00021 
00022 #include "libavcodec/dsputil.h"
00023 #include "libavcodec/vc1dsp.h"
00024 
00025 #include "util_altivec.h"
00026 #include "dsputil_altivec.h"
00027 
00028 // main steps of 8x8 transform
00029 #define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
00030 do { \
00031     t0 = vec_sl(vec_add(s0, s4), vec_2); \
00032     t0 = vec_add(vec_sl(t0, vec_1), t0); \
00033     t0 = vec_add(t0, vec_rnd); \
00034     t1 = vec_sl(vec_sub(s0, s4), vec_2); \
00035     t1 = vec_add(vec_sl(t1, vec_1), t1); \
00036     t1 = vec_add(t1, vec_rnd); \
00037     t2 = vec_add(vec_sl(s6, vec_2), vec_sl(s6, vec_1)); \
00038     t2 = vec_add(t2, vec_sl(s2, vec_4)); \
00039     t3 = vec_add(vec_sl(s2, vec_2), vec_sl(s2, vec_1)); \
00040     t3 = vec_sub(t3, vec_sl(s6, vec_4)); \
00041     t4 = vec_add(t0, t2); \
00042     t5 = vec_add(t1, t3); \
00043     t6 = vec_sub(t1, t3); \
00044     t7 = vec_sub(t0, t2); \
00045 \
00046     t0 = vec_sl(vec_add(s1, s3), vec_4); \
00047     t0 = vec_add(t0, vec_sl(s5, vec_3)); \
00048     t0 = vec_add(t0, vec_sl(s7, vec_2)); \
00049     t0 = vec_add(t0, vec_sub(s5, s3)); \
00050 \
00051     t1 = vec_sl(vec_sub(s1, s5), vec_4); \
00052     t1 = vec_sub(t1, vec_sl(s7, vec_3)); \
00053     t1 = vec_sub(t1, vec_sl(s3, vec_2)); \
00054     t1 = vec_sub(t1, vec_add(s1, s7)); \
00055 \
00056     t2 = vec_sl(vec_sub(s7, s3), vec_4); \
00057     t2 = vec_add(t2, vec_sl(s1, vec_3)); \
00058     t2 = vec_add(t2, vec_sl(s5, vec_2)); \
00059     t2 = vec_add(t2, vec_sub(s1, s7)); \
00060 \
00061     t3 = vec_sl(vec_sub(s5, s7), vec_4); \
00062     t3 = vec_sub(t3, vec_sl(s3, vec_3)); \
00063     t3 = vec_add(t3, vec_sl(s1, vec_2)); \
00064     t3 = vec_sub(t3, vec_add(s3, s5)); \
00065 \
00066     s0 = vec_add(t4, t0); \
00067     s1 = vec_add(t5, t1); \
00068     s2 = vec_add(t6, t2); \
00069     s3 = vec_add(t7, t3); \
00070     s4 = vec_sub(t7, t3); \
00071     s5 = vec_sub(t6, t2); \
00072     s6 = vec_sub(t5, t1); \
00073     s7 = vec_sub(t4, t0); \
00074 }while(0)
00075 
00076 #define SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7) \
00077 do { \
00078     s0 = vec_sra(s0, vec_3); \
00079     s1 = vec_sra(s1, vec_3); \
00080     s2 = vec_sra(s2, vec_3); \
00081     s3 = vec_sra(s3, vec_3); \
00082     s4 = vec_sra(s4, vec_3); \
00083     s5 = vec_sra(s5, vec_3); \
00084     s6 = vec_sra(s6, vec_3); \
00085     s7 = vec_sra(s7, vec_3); \
00086 }while(0)
00087 
00088 #define SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7) \
00089 do { \
00090     s0 = vec_sra(s0, vec_7); \
00091     s1 = vec_sra(s1, vec_7); \
00092     s2 = vec_sra(s2, vec_7); \
00093     s3 = vec_sra(s3, vec_7); \
00094     s4 = vec_sra(vec_add(s4, vec_1s), vec_7); \
00095     s5 = vec_sra(vec_add(s5, vec_1s), vec_7); \
00096     s6 = vec_sra(vec_add(s6, vec_1s), vec_7); \
00097     s7 = vec_sra(vec_add(s7, vec_1s), vec_7); \
00098 }while(0)
00099 
00100 /* main steps of 4x4 transform */
00101 #define STEP4(s0, s1, s2, s3, vec_rnd) \
00102 do { \
00103     t1 = vec_add(vec_sl(s0, vec_4), s0); \
00104     t1 = vec_add(t1, vec_rnd); \
00105     t2 = vec_add(vec_sl(s2, vec_4), s2); \
00106     t0 = vec_add(t1, t2); \
00107     t1 = vec_sub(t1, t2); \
00108     t3 = vec_sl(vec_sub(s3, s1), vec_1); \
00109     t3 = vec_add(t3, vec_sl(t3, vec_2)); \
00110     t2 = vec_add(t3, vec_sl(s1, vec_5)); \
00111     t3 = vec_add(t3, vec_sl(s3, vec_3)); \
00112     t3 = vec_add(t3, vec_sl(s3, vec_2)); \
00113     s0 = vec_add(t0, t2); \
00114     s1 = vec_sub(t1, t3); \
00115     s2 = vec_add(t1, t3); \
00116     s3 = vec_sub(t0, t2); \
00117 }while (0)
00118 
00119 #define SHIFT_HOR4(s0, s1, s2, s3) \
00120     s0 = vec_sra(s0, vec_3); \
00121     s1 = vec_sra(s1, vec_3); \
00122     s2 = vec_sra(s2, vec_3); \
00123     s3 = vec_sra(s3, vec_3);
00124 
00125 #define SHIFT_VERT4(s0, s1, s2, s3) \
00126     s0 = vec_sra(s0, vec_7); \
00127     s1 = vec_sra(s1, vec_7); \
00128     s2 = vec_sra(s2, vec_7); \
00129     s3 = vec_sra(s3, vec_7);
00130 
00133 static void vc1_inv_trans_8x8_altivec(DCTELEM block[64])
00134 {
00135     vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
00136     vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
00137     vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
00138     vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
00139     const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
00140     const vector unsigned int vec_7 = vec_splat_u32(7);
00141     const vector unsigned int vec_4 = vec_splat_u32(4);
00142     const vector  signed int vec_4s = vec_splat_s32(4);
00143     const vector unsigned int vec_3 = vec_splat_u32(3);
00144     const vector unsigned int vec_2 = vec_splat_u32(2);
00145     const vector  signed int vec_1s = vec_splat_s32(1);
00146     const vector unsigned int vec_1 = vec_splat_u32(1);
00147 
00148     src0 = vec_ld(  0, block);
00149     src1 = vec_ld( 16, block);
00150     src2 = vec_ld( 32, block);
00151     src3 = vec_ld( 48, block);
00152     src4 = vec_ld( 64, block);
00153     src5 = vec_ld( 80, block);
00154     src6 = vec_ld( 96, block);
00155     src7 = vec_ld(112, block);
00156 
00157     s0 = vec_unpackl(src0);
00158     s1 = vec_unpackl(src1);
00159     s2 = vec_unpackl(src2);
00160     s3 = vec_unpackl(src3);
00161     s4 = vec_unpackl(src4);
00162     s5 = vec_unpackl(src5);
00163     s6 = vec_unpackl(src6);
00164     s7 = vec_unpackl(src7);
00165     s8 = vec_unpackh(src0);
00166     s9 = vec_unpackh(src1);
00167     sA = vec_unpackh(src2);
00168     sB = vec_unpackh(src3);
00169     sC = vec_unpackh(src4);
00170     sD = vec_unpackh(src5);
00171     sE = vec_unpackh(src6);
00172     sF = vec_unpackh(src7);
00173     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
00174     SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
00175     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
00176     SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
00177     src0 = vec_pack(s8, s0);
00178     src1 = vec_pack(s9, s1);
00179     src2 = vec_pack(sA, s2);
00180     src3 = vec_pack(sB, s3);
00181     src4 = vec_pack(sC, s4);
00182     src5 = vec_pack(sD, s5);
00183     src6 = vec_pack(sE, s6);
00184     src7 = vec_pack(sF, s7);
00185     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00186 
00187     s0 = vec_unpackl(src0);
00188     s1 = vec_unpackl(src1);
00189     s2 = vec_unpackl(src2);
00190     s3 = vec_unpackl(src3);
00191     s4 = vec_unpackl(src4);
00192     s5 = vec_unpackl(src5);
00193     s6 = vec_unpackl(src6);
00194     s7 = vec_unpackl(src7);
00195     s8 = vec_unpackh(src0);
00196     s9 = vec_unpackh(src1);
00197     sA = vec_unpackh(src2);
00198     sB = vec_unpackh(src3);
00199     sC = vec_unpackh(src4);
00200     sD = vec_unpackh(src5);
00201     sE = vec_unpackh(src6);
00202     sF = vec_unpackh(src7);
00203     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_64);
00204     SHIFT_VERT8(s0, s1, s2, s3, s4, s5, s6, s7);
00205     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_64);
00206     SHIFT_VERT8(s8, s9, sA, sB, sC, sD, sE, sF);
00207     src0 = vec_pack(s8, s0);
00208     src1 = vec_pack(s9, s1);
00209     src2 = vec_pack(sA, s2);
00210     src3 = vec_pack(sB, s3);
00211     src4 = vec_pack(sC, s4);
00212     src5 = vec_pack(sD, s5);
00213     src6 = vec_pack(sE, s6);
00214     src7 = vec_pack(sF, s7);
00215 
00216     vec_st(src0,  0, block);
00217     vec_st(src1, 16, block);
00218     vec_st(src2, 32, block);
00219     vec_st(src3, 48, block);
00220     vec_st(src4, 64, block);
00221     vec_st(src5, 80, block);
00222     vec_st(src6, 96, block);
00223     vec_st(src7,112, block);
00224 }
00225 
00228 static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, DCTELEM *block)
00229 {
00230     vector signed short src0, src1, src2, src3, src4, src5, src6, src7;
00231     vector signed int s0, s1, s2, s3, s4, s5, s6, s7;
00232     vector signed int s8, s9, sA, sB, sC, sD, sE, sF;
00233     vector signed int t0, t1, t2, t3, t4, t5, t6, t7;
00234     const vector signed int vec_64 = vec_sl(vec_splat_s32(4), vec_splat_u32(4));
00235     const vector unsigned int vec_7 = vec_splat_u32(7);
00236     const vector unsigned int vec_5 = vec_splat_u32(5);
00237     const vector unsigned int vec_4 = vec_splat_u32(4);
00238     const vector  signed int vec_4s = vec_splat_s32(4);
00239     const vector unsigned int vec_3 = vec_splat_u32(3);
00240     const vector unsigned int vec_2 = vec_splat_u32(2);
00241     const vector unsigned int vec_1 = vec_splat_u32(1);
00242     vector unsigned char tmp;
00243     vector signed short tmp2, tmp3;
00244     vector unsigned char perm0, perm1, p0, p1, p;
00245 
00246     src0 = vec_ld(  0, block);
00247     src1 = vec_ld( 16, block);
00248     src2 = vec_ld( 32, block);
00249     src3 = vec_ld( 48, block);
00250     src4 = vec_ld( 64, block);
00251     src5 = vec_ld( 80, block);
00252     src6 = vec_ld( 96, block);
00253     src7 = vec_ld(112, block);
00254 
00255     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00256     s0 = vec_unpackl(src0);
00257     s1 = vec_unpackl(src1);
00258     s2 = vec_unpackl(src2);
00259     s3 = vec_unpackl(src3);
00260     s4 = vec_unpackl(src4);
00261     s5 = vec_unpackl(src5);
00262     s6 = vec_unpackl(src6);
00263     s7 = vec_unpackl(src7);
00264     s8 = vec_unpackh(src0);
00265     s9 = vec_unpackh(src1);
00266     sA = vec_unpackh(src2);
00267     sB = vec_unpackh(src3);
00268     sC = vec_unpackh(src4);
00269     sD = vec_unpackh(src5);
00270     sE = vec_unpackh(src6);
00271     sF = vec_unpackh(src7);
00272     STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_4s);
00273     SHIFT_HOR8(s0, s1, s2, s3, s4, s5, s6, s7);
00274     STEP8(s8, s9, sA, sB, sC, sD, sE, sF, vec_4s);
00275     SHIFT_HOR8(s8, s9, sA, sB, sC, sD, sE, sF);
00276     src0 = vec_pack(s8, s0);
00277     src1 = vec_pack(s9, s1);
00278     src2 = vec_pack(sA, s2);
00279     src3 = vec_pack(sB, s3);
00280     src4 = vec_pack(sC, s4);
00281     src5 = vec_pack(sD, s5);
00282     src6 = vec_pack(sE, s6);
00283     src7 = vec_pack(sF, s7);
00284     TRANSPOSE8(src0, src1, src2, src3, src4, src5, src6, src7);
00285 
00286     s0 = vec_unpackh(src0);
00287     s1 = vec_unpackh(src1);
00288     s2 = vec_unpackh(src2);
00289     s3 = vec_unpackh(src3);
00290     s8 = vec_unpackl(src0);
00291     s9 = vec_unpackl(src1);
00292     sA = vec_unpackl(src2);
00293     sB = vec_unpackl(src3);
00294     STEP4(s0, s1, s2, s3, vec_64);
00295     SHIFT_VERT4(s0, s1, s2, s3);
00296     STEP4(s8, s9, sA, sB, vec_64);
00297     SHIFT_VERT4(s8, s9, sA, sB);
00298     src0 = vec_pack(s0, s8);
00299     src1 = vec_pack(s1, s9);
00300     src2 = vec_pack(s2, sA);
00301     src3 = vec_pack(s3, sB);
00302 
00303     p0 = vec_lvsl (0, dest);
00304     p1 = vec_lvsl (stride, dest);
00305     p = vec_splat_u8 (-1);
00306     perm0 = vec_mergeh (p, p0);
00307     perm1 = vec_mergeh (p, p1);
00308 
00309 #define ADD(dest,src,perm)                                              \
00310     /* *(uint64_t *)&tmp = *(uint64_t *)dest; */                        \
00311     tmp = vec_ld (0, dest);                                             \
00312     tmp2 = (vector signed short)vec_perm (tmp, vec_splat_u8(0), perm);  \
00313     tmp3 = vec_adds (tmp2, src);                                        \
00314     tmp = vec_packsu (tmp3, tmp3);                                      \
00315     vec_ste ((vector unsigned int)tmp, 0, (unsigned int *)dest);        \
00316     vec_ste ((vector unsigned int)tmp, 4, (unsigned int *)dest);
00317 
00318     ADD (dest, src0, perm0)      dest += stride;
00319     ADD (dest, src1, perm1)      dest += stride;
00320     ADD (dest, src2, perm0)      dest += stride;
00321     ADD (dest, src3, perm1)
00322 }
00323 
00324 #define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
00325 #define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
00326 
00327 #define OP_U8_ALTIVEC                          PUT_OP_U8_ALTIVEC
00328 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   put_no_rnd_vc1_chroma_mc8_altivec
00329 #include "h264_template_altivec.c"
00330 #undef OP_U8_ALTIVEC
00331 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
00332 
00333 #define OP_U8_ALTIVEC                          AVG_OP_U8_ALTIVEC
00334 #define PREFIX_no_rnd_vc1_chroma_mc8_altivec   avg_no_rnd_vc1_chroma_mc8_altivec
00335 #include "h264_template_altivec.c"
00336 #undef OP_U8_ALTIVEC
00337 #undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
00338 
00339 void ff_vc1dsp_init_altivec(VC1DSPContext* dsp)
00340 {
00341     if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
00342         return;
00343 
00344     dsp->vc1_inv_trans_8x8 = vc1_inv_trans_8x8_altivec;
00345     dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
00346     dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
00347     dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
00348 }

Generated on Wed Apr 11 2012 07:31:34 for FFmpeg by  doxygen 1.7.1