libpostproc/postprocess_altivec_template.c
Go to the documentation of this file.
00001 /*
00002  * AltiVec optimizations (C) 2004 Romain Dolbeau <romain@dolbeau.org>
00003  *
00004  * based on code by Copyright (C) 2001-2003 Michael Niedermayer (michaelni@gmx.at)
00005  *
00006  * This file is part of FFmpeg.
00007  *
00008  * FFmpeg is free software; you can redistribute it and/or modify
00009  * it under the terms of the GNU General Public License as published by
00010  * the Free Software Foundation; either version 2 of the License, or
00011  * (at your option) any later version.
00012  *
00013  * FFmpeg is distributed in the hope that it will be useful,
00014  * but WITHOUT ANY WARRANTY; without even the implied warranty of
00015  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
00016  * GNU General Public License for more details.
00017  *
00018  * You should have received a copy of the GNU General Public License
00019  * along with FFmpeg; if not, write to the Free Software
00020  * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
00021  */
00022 
00023 #include "libavutil/avutil.h"
00024 
00025 #define ALTIVEC_TRANSPOSE_8x8_SHORT(src_a,src_b,src_c,src_d,src_e,src_f,src_g,src_h) \
00026     do {                                                          \
00027         __typeof__(src_a) tempA1, tempB1, tempC1, tempD1;         \
00028         __typeof__(src_a) tempE1, tempF1, tempG1, tempH1;         \
00029         __typeof__(src_a) tempA2, tempB2, tempC2, tempD2;         \
00030         __typeof__(src_a) tempE2, tempF2, tempG2, tempH2;         \
00031         tempA1 = vec_mergeh (src_a, src_e);                       \
00032         tempB1 = vec_mergel (src_a, src_e);                       \
00033         tempC1 = vec_mergeh (src_b, src_f);                       \
00034         tempD1 = vec_mergel (src_b, src_f);                       \
00035         tempE1 = vec_mergeh (src_c, src_g);                       \
00036         tempF1 = vec_mergel (src_c, src_g);                       \
00037         tempG1 = vec_mergeh (src_d, src_h);                       \
00038         tempH1 = vec_mergel (src_d, src_h);                       \
00039         tempA2 = vec_mergeh (tempA1, tempE1);                     \
00040         tempB2 = vec_mergel (tempA1, tempE1);                     \
00041         tempC2 = vec_mergeh (tempB1, tempF1);                     \
00042         tempD2 = vec_mergel (tempB1, tempF1);                     \
00043         tempE2 = vec_mergeh (tempC1, tempG1);                     \
00044         tempF2 = vec_mergel (tempC1, tempG1);                     \
00045         tempG2 = vec_mergeh (tempD1, tempH1);                     \
00046         tempH2 = vec_mergel (tempD1, tempH1);                     \
00047         src_a = vec_mergeh (tempA2, tempE2);                      \
00048         src_b = vec_mergel (tempA2, tempE2);                      \
00049         src_c = vec_mergeh (tempB2, tempF2);                      \
00050         src_d = vec_mergel (tempB2, tempF2);                      \
00051         src_e = vec_mergeh (tempC2, tempG2);                      \
00052         src_f = vec_mergel (tempC2, tempG2);                      \
00053         src_g = vec_mergeh (tempD2, tempH2);                      \
00054         src_h = vec_mergel (tempD2, tempH2);                      \
00055     } while (0)
00056 
00057 
00058 static inline int vertClassify_altivec(uint8_t src[], int stride, PPContext *c) {
00059     /*
00060     this code makes no assumption on src or stride.
00061     One could remove the recomputation of the perm
00062     vector by assuming (stride % 16) == 0, unfortunately
00063     this is not always true.
00064     */
00065     short data_0 = ((c->nonBQP*c->ppMode.baseDcDiff)>>8) + 1;
00066     DECLARE_ALIGNED(16, short, data)[8] =
00067                     {
00068                         data_0,
00069                         data_0 * 2 + 1,
00070                         c->QP * 2,
00071                         c->QP * 4
00072                     };
00073     int numEq;
00074     uint8_t *src2 = src;
00075     vector signed short v_dcOffset;
00076     vector signed short v2QP;
00077     vector unsigned short v4QP;
00078     vector unsigned short v_dcThreshold;
00079     const int properStride = (stride % 16);
00080     const int srcAlign = ((unsigned long)src2 % 16);
00081     const int two_vectors = ((srcAlign > 8) || properStride) ? 1 : 0;
00082     const vector signed int zero = vec_splat_s32(0);
00083     const vector signed short mask = vec_splat_s16(1);
00084     vector signed int v_numEq = vec_splat_s32(0);
00085     vector signed short v_data = vec_ld(0, data);
00086     vector signed short v_srcAss0, v_srcAss1, v_srcAss2, v_srcAss3,
00087                         v_srcAss4, v_srcAss5, v_srcAss6, v_srcAss7;
00088 //FIXME avoid this mess if possible
00089     register int j0 = 0,
00090                  j1 = stride,
00091                  j2 = 2 * stride,
00092                  j3 = 3 * stride,
00093                  j4 = 4 * stride,
00094                  j5 = 5 * stride,
00095                  j6 = 6 * stride,
00096                  j7 = 7 * stride;
00097     vector unsigned char v_srcA0, v_srcA1, v_srcA2, v_srcA3,
00098                          v_srcA4, v_srcA5, v_srcA6, v_srcA7;
00099 
00100     v_dcOffset = vec_splat(v_data, 0);
00101     v_dcThreshold = (vector unsigned short)vec_splat(v_data, 1);
00102     v2QP = vec_splat(v_data, 2);
00103     v4QP = (vector unsigned short)vec_splat(v_data, 3);
00104 
00105     src2 += stride * 4;
00106 
00107 #define LOAD_LINE(i)                                                    \
00108     {                                                                   \
00109     vector unsigned char perm##i = vec_lvsl(j##i, src2);                \
00110     vector unsigned char v_srcA2##i;                                    \
00111     vector unsigned char v_srcA1##i = vec_ld(j##i, src2);               \
00112     if (two_vectors)                                                    \
00113         v_srcA2##i = vec_ld(j##i + 16, src2);                           \
00114     v_srcA##i =                                                         \
00115         vec_perm(v_srcA1##i, v_srcA2##i, perm##i);                      \
00116     v_srcAss##i =                                                       \
00117         (vector signed short)vec_mergeh((vector signed char)zero,       \
00118                                         (vector signed char)v_srcA##i); }
00119 
00120 #define LOAD_LINE_ALIGNED(i)                                            \
00121     v_srcA##i = vec_ld(j##i, src2);                                     \
00122     v_srcAss##i =                                                       \
00123         (vector signed short)vec_mergeh((vector signed char)zero,       \
00124                                         (vector signed char)v_srcA##i)
00125 
00126     /* Special-casing the aligned case is worthwhile, as all calls from
00127      * the (transposed) horizontable deblocks will be aligned, in addition
00128      * to the naturally aligned vertical deblocks. */
00129     if (properStride && srcAlign) {
00130         LOAD_LINE_ALIGNED(0);
00131         LOAD_LINE_ALIGNED(1);
00132         LOAD_LINE_ALIGNED(2);
00133         LOAD_LINE_ALIGNED(3);
00134         LOAD_LINE_ALIGNED(4);
00135         LOAD_LINE_ALIGNED(5);
00136         LOAD_LINE_ALIGNED(6);
00137         LOAD_LINE_ALIGNED(7);
00138     } else {
00139         LOAD_LINE(0);
00140         LOAD_LINE(1);
00141         LOAD_LINE(2);
00142         LOAD_LINE(3);
00143         LOAD_LINE(4);
00144         LOAD_LINE(5);
00145         LOAD_LINE(6);
00146         LOAD_LINE(7);
00147     }
00148 #undef LOAD_LINE
00149 #undef LOAD_LINE_ALIGNED
00150 
00151 #define ITER(i, j)                                                      \
00152     const vector signed short v_diff##i =                               \
00153         vec_sub(v_srcAss##i, v_srcAss##j);                              \
00154     const vector signed short v_sum##i =                                \
00155         vec_add(v_diff##i, v_dcOffset);                                 \
00156     const vector signed short v_comp##i =                               \
00157         (vector signed short)vec_cmplt((vector unsigned short)v_sum##i, \
00158                                        v_dcThreshold);                  \
00159     const vector signed short v_part##i = vec_and(mask, v_comp##i);
00160 
00161     {
00162         ITER(0, 1)
00163         ITER(1, 2)
00164         ITER(2, 3)
00165         ITER(3, 4)
00166         ITER(4, 5)
00167         ITER(5, 6)
00168         ITER(6, 7)
00169 
00170         v_numEq = vec_sum4s(v_part0, v_numEq);
00171         v_numEq = vec_sum4s(v_part1, v_numEq);
00172         v_numEq = vec_sum4s(v_part2, v_numEq);
00173         v_numEq = vec_sum4s(v_part3, v_numEq);
00174         v_numEq = vec_sum4s(v_part4, v_numEq);
00175         v_numEq = vec_sum4s(v_part5, v_numEq);
00176         v_numEq = vec_sum4s(v_part6, v_numEq);
00177     }
00178 
00179 #undef ITER
00180 
00181     v_numEq = vec_sums(v_numEq, zero);
00182 
00183     v_numEq = vec_splat(v_numEq, 3);
00184     vec_ste(v_numEq, 0, &numEq);
00185 
00186     if (numEq > c->ppMode.flatnessThreshold){
00187         const vector unsigned char mmoP1 = (const vector unsigned char)
00188             {0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f,
00189              0x00, 0x01, 0x12, 0x13, 0x08, 0x09, 0x1A, 0x1B};
00190         const vector unsigned char mmoP2 = (const vector unsigned char)
00191             {0x04, 0x05, 0x16, 0x17, 0x0C, 0x0D, 0x1E, 0x1F,
00192              0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f, 0x1f};
00193         const vector unsigned char mmoP = (const vector unsigned char)
00194             vec_lvsl(8, (unsigned char*)0);
00195 
00196         vector signed short mmoL1 = vec_perm(v_srcAss0, v_srcAss2, mmoP1);
00197         vector signed short mmoL2 = vec_perm(v_srcAss4, v_srcAss6, mmoP2);
00198         vector signed short mmoL = vec_perm(mmoL1, mmoL2, mmoP);
00199         vector signed short mmoR1 = vec_perm(v_srcAss5, v_srcAss7, mmoP1);
00200         vector signed short mmoR2 = vec_perm(v_srcAss1, v_srcAss3, mmoP2);
00201         vector signed short mmoR = vec_perm(mmoR1, mmoR2, mmoP);
00202         vector signed short mmoDiff = vec_sub(mmoL, mmoR);
00203         vector unsigned short mmoSum = (vector unsigned short)vec_add(mmoDiff, v2QP);
00204 
00205         if (vec_any_gt(mmoSum, v4QP))
00206             return 0;
00207         else
00208             return 1;
00209     }
00210     else return 2;
00211 }
00212 
00213 static inline void doVertLowPass_altivec(uint8_t *src, int stride, PPContext *c) {
00214     /*
00215     this code makes no assumption on src or stride.
00216     One could remove the recomputation of the perm
00217     vector by assuming (stride % 16) == 0, unfortunately
00218     this is not always true. Quite a lot of load/stores
00219     can be removed by assuming proper alignment of
00220     src & stride :-(
00221     */
00222     uint8_t *src2 = src;
00223     const vector signed int zero = vec_splat_s32(0);
00224     const int properStride = (stride % 16);
00225     const int srcAlign = ((unsigned long)src2 % 16);
00226     DECLARE_ALIGNED(16, short, qp)[8] = {c->QP};
00227     vector signed short vqp = vec_ld(0, qp);
00228     vector signed short vb0, vb1, vb2, vb3, vb4, vb5, vb6, vb7, vb8, vb9;
00229     vector unsigned char vbA0, av_uninit(vbA1), av_uninit(vbA2), av_uninit(vbA3), av_uninit(vbA4), av_uninit(vbA5), av_uninit(vbA6), av_uninit(vbA7), av_uninit(vbA8), vbA9;
00230     vector unsigned char vbB0, av_uninit(vbB1), av_uninit(vbB2), av_uninit(vbB3), av_uninit(vbB4), av_uninit(vbB5), av_uninit(vbB6), av_uninit(vbB7), av_uninit(vbB8), vbB9;
00231     vector unsigned char vbT0, vbT1, vbT2, vbT3, vbT4, vbT5, vbT6, vbT7, vbT8, vbT9;
00232     vector unsigned char perml0, perml1, perml2, perml3, perml4,
00233                          perml5, perml6, perml7, perml8, perml9;
00234     register int j0 = 0,
00235                  j1 = stride,
00236                  j2 = 2 * stride,
00237                  j3 = 3 * stride,
00238                  j4 = 4 * stride,
00239                  j5 = 5 * stride,
00240                  j6 = 6 * stride,
00241                  j7 = 7 * stride,
00242                  j8 = 8 * stride,
00243                  j9 = 9 * stride;
00244 
00245     vqp = vec_splat(vqp, 0);
00246 
00247     src2 += stride*3;
00248 
00249 #define LOAD_LINE(i)                                                    \
00250     perml##i = vec_lvsl(i * stride, src2);                              \
00251     vbA##i = vec_ld(i * stride, src2);                                  \
00252     vbB##i = vec_ld(i * stride + 16, src2);                             \
00253     vbT##i = vec_perm(vbA##i, vbB##i, perml##i);                        \
00254     vb##i =                                                             \
00255         (vector signed short)vec_mergeh((vector unsigned char)zero,     \
00256                                         (vector unsigned char)vbT##i)
00257 
00258 #define LOAD_LINE_ALIGNED(i)                                            \
00259     vbT##i = vec_ld(j##i, src2);                                        \
00260     vb##i =                                                             \
00261         (vector signed short)vec_mergeh((vector signed char)zero,       \
00262                                         (vector signed char)vbT##i)
00263 
00264       /* Special-casing the aligned case is worthwhile, as all calls from
00265        * the (transposed) horizontable deblocks will be aligned, in addition
00266        * to the naturally aligned vertical deblocks. */
00267     if (properStride && srcAlign) {
00268           LOAD_LINE_ALIGNED(0);
00269           LOAD_LINE_ALIGNED(1);
00270           LOAD_LINE_ALIGNED(2);
00271           LOAD_LINE_ALIGNED(3);
00272           LOAD_LINE_ALIGNED(4);
00273           LOAD_LINE_ALIGNED(5);
00274           LOAD_LINE_ALIGNED(6);
00275           LOAD_LINE_ALIGNED(7);
00276           LOAD_LINE_ALIGNED(8);
00277           LOAD_LINE_ALIGNED(9);
00278     } else {
00279           LOAD_LINE(0);
00280           LOAD_LINE(1);
00281           LOAD_LINE(2);
00282           LOAD_LINE(3);
00283           LOAD_LINE(4);
00284           LOAD_LINE(5);
00285           LOAD_LINE(6);
00286           LOAD_LINE(7);
00287           LOAD_LINE(8);
00288           LOAD_LINE(9);
00289     }
00290 #undef LOAD_LINE
00291 #undef LOAD_LINE_ALIGNED
00292     {
00293         const vector unsigned short v_2 = vec_splat_u16(2);
00294         const vector unsigned short v_4 = vec_splat_u16(4);
00295 
00296         const vector signed short v_diff01 = vec_sub(vb0, vb1);
00297         const vector unsigned short v_cmp01 =
00298             (const vector unsigned short) vec_cmplt(vec_abs(v_diff01), vqp);
00299         const vector signed short v_first = vec_sel(vb1, vb0, v_cmp01);
00300         const vector signed short v_diff89 = vec_sub(vb8, vb9);
00301         const vector unsigned short v_cmp89 =
00302             (const vector unsigned short) vec_cmplt(vec_abs(v_diff89), vqp);
00303         const vector signed short v_last = vec_sel(vb8, vb9, v_cmp89);
00304 
00305         const vector signed short temp01 = vec_mladd(v_first, (vector signed short)v_4, vb1);
00306         const vector signed short temp02 = vec_add(vb2, vb3);
00307         const vector signed short temp03 = vec_add(temp01, (vector signed short)v_4);
00308         const vector signed short v_sumsB0 = vec_add(temp02, temp03);
00309 
00310         const vector signed short temp11 = vec_sub(v_sumsB0, v_first);
00311         const vector signed short v_sumsB1 = vec_add(temp11, vb4);
00312 
00313         const vector signed short temp21 = vec_sub(v_sumsB1, v_first);
00314         const vector signed short v_sumsB2 = vec_add(temp21, vb5);
00315 
00316         const vector signed short temp31 = vec_sub(v_sumsB2, v_first);
00317         const vector signed short v_sumsB3 = vec_add(temp31, vb6);
00318 
00319         const vector signed short temp41 = vec_sub(v_sumsB3, v_first);
00320         const vector signed short v_sumsB4 = vec_add(temp41, vb7);
00321 
00322         const vector signed short temp51 = vec_sub(v_sumsB4, vb1);
00323         const vector signed short v_sumsB5 = vec_add(temp51, vb8);
00324 
00325         const vector signed short temp61 = vec_sub(v_sumsB5, vb2);
00326         const vector signed short v_sumsB6 = vec_add(temp61, v_last);
00327 
00328         const vector signed short temp71 = vec_sub(v_sumsB6, vb3);
00329         const vector signed short v_sumsB7 = vec_add(temp71, v_last);
00330 
00331         const vector signed short temp81 = vec_sub(v_sumsB7, vb4);
00332         const vector signed short v_sumsB8 = vec_add(temp81, v_last);
00333 
00334         const vector signed short temp91 = vec_sub(v_sumsB8, vb5);
00335         const vector signed short v_sumsB9 = vec_add(temp91, v_last);
00336 
00337     #define COMPUTE_VR(i, j, k)                                             \
00338         const vector signed short temps1##i =                               \
00339             vec_add(v_sumsB##i, v_sumsB##k);                                \
00340         const vector signed short temps2##i =                               \
00341             vec_mladd(vb##j, (vector signed short)v_2, temps1##i);          \
00342         const vector signed short  vr##j = vec_sra(temps2##i, v_4)
00343 
00344         COMPUTE_VR(0, 1, 2);
00345         COMPUTE_VR(1, 2, 3);
00346         COMPUTE_VR(2, 3, 4);
00347         COMPUTE_VR(3, 4, 5);
00348         COMPUTE_VR(4, 5, 6);
00349         COMPUTE_VR(5, 6, 7);
00350         COMPUTE_VR(6, 7, 8);
00351         COMPUTE_VR(7, 8, 9);
00352 
00353         const vector signed char neg1 = vec_splat_s8(-1);
00354         const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00355                                                                          0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00356 
00357 #define PACK_AND_STORE(i)                                       \
00358 {   const vector unsigned char perms##i =                       \
00359         vec_lvsr(i * stride, src2);                             \
00360     const vector unsigned char vf##i =                          \
00361         vec_packsu(vr##i, (vector signed short)zero);           \
00362     const vector unsigned char vg##i =                          \
00363         vec_perm(vf##i, vbT##i, permHH);                        \
00364     const vector unsigned char mask##i =                        \
00365         vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
00366     const vector unsigned char vg2##i =                         \
00367         vec_perm(vg##i, vg##i, perms##i);                       \
00368     const vector unsigned char svA##i =                         \
00369         vec_sel(vbA##i, vg2##i, mask##i);                       \
00370     const vector unsigned char svB##i =                         \
00371         vec_sel(vg2##i, vbB##i, mask##i);                       \
00372     vec_st(svA##i, i * stride, src2);                           \
00373     vec_st(svB##i, i * stride + 16, src2);}
00374 
00375 #define PACK_AND_STORE_ALIGNED(i)                               \
00376 {   const vector unsigned char vf##i =                          \
00377         vec_packsu(vr##i, (vector signed short)zero);           \
00378     const vector unsigned char vg##i =                          \
00379         vec_perm(vf##i, vbT##i, permHH);                        \
00380     vec_st(vg##i, i * stride, src2);}
00381 
00382         /* Special-casing the aligned case is worthwhile, as all calls from
00383          * the (transposed) horizontable deblocks will be aligned, in addition
00384          * to the naturally aligned vertical deblocks. */
00385         if (properStride && srcAlign) {
00386             PACK_AND_STORE_ALIGNED(1)
00387             PACK_AND_STORE_ALIGNED(2)
00388             PACK_AND_STORE_ALIGNED(3)
00389             PACK_AND_STORE_ALIGNED(4)
00390             PACK_AND_STORE_ALIGNED(5)
00391             PACK_AND_STORE_ALIGNED(6)
00392             PACK_AND_STORE_ALIGNED(7)
00393             PACK_AND_STORE_ALIGNED(8)
00394         } else {
00395             PACK_AND_STORE(1)
00396             PACK_AND_STORE(2)
00397             PACK_AND_STORE(3)
00398             PACK_AND_STORE(4)
00399             PACK_AND_STORE(5)
00400             PACK_AND_STORE(6)
00401             PACK_AND_STORE(7)
00402             PACK_AND_STORE(8)
00403         }
00404     #undef PACK_AND_STORE
00405     #undef PACK_AND_STORE_ALIGNED
00406     }
00407 }
00408 
00409 
00410 
00411 static inline void doVertDefFilter_altivec(uint8_t src[], int stride, PPContext *c) {
00412     /*
00413     this code makes no assumption on src or stride.
00414     One could remove the recomputation of the perm
00415     vector by assuming (stride % 16) == 0, unfortunately
00416     this is not always true. Quite a lot of load/stores
00417     can be removed by assuming proper alignment of
00418     src & stride :-(
00419     */
00420     uint8_t *src2 = src + stride*3;
00421     const vector signed int zero = vec_splat_s32(0);
00422     DECLARE_ALIGNED(16, short, qp)[8] = {8*c->QP};
00423     vector signed short vqp = vec_splat(
00424                                 (vector signed short)vec_ld(0, qp), 0);
00425 
00426 #define LOAD_LINE(i)                                                    \
00427     const vector unsigned char perm##i =                                \
00428         vec_lvsl(i * stride, src2);                                     \
00429     const vector unsigned char vbA##i =                                 \
00430         vec_ld(i * stride, src2);                                       \
00431     const vector unsigned char vbB##i =                                 \
00432         vec_ld(i * stride + 16, src2);                                  \
00433     const vector unsigned char vbT##i =                                 \
00434         vec_perm(vbA##i, vbB##i, perm##i);                              \
00435     const vector signed short vb##i =                                   \
00436         (vector signed short)vec_mergeh((vector unsigned char)zero,     \
00437                                         (vector unsigned char)vbT##i)
00438 
00439      LOAD_LINE(1);
00440      LOAD_LINE(2);
00441      LOAD_LINE(3);
00442      LOAD_LINE(4);
00443      LOAD_LINE(5);
00444      LOAD_LINE(6);
00445      LOAD_LINE(7);
00446      LOAD_LINE(8);
00447 #undef LOAD_LINE
00448 
00449      const vector signed short v_1 = vec_splat_s16(1);
00450      const vector signed short v_2 = vec_splat_s16(2);
00451      const vector signed short v_5 = vec_splat_s16(5);
00452      const vector signed short v_32 = vec_sl(v_1,
00453                                              (vector unsigned short)v_5);
00454      /* middle energy */
00455      const vector signed short l3minusl6 = vec_sub(vb3, vb6);
00456      const vector signed short l5minusl4 = vec_sub(vb5, vb4);
00457      const vector signed short twotimes_l3minusl6 = vec_mladd(v_2, l3minusl6, (vector signed short)zero);
00458      const vector signed short mE = vec_mladd(v_5, l5minusl4, twotimes_l3minusl6);
00459      const vector signed short absmE = vec_abs(mE);
00460      /* left & right energy */
00461      const vector signed short l1minusl4 = vec_sub(vb1, vb4);
00462      const vector signed short l3minusl2 = vec_sub(vb3, vb2);
00463      const vector signed short l5minusl8 = vec_sub(vb5, vb8);
00464      const vector signed short l7minusl6 = vec_sub(vb7, vb6);
00465      const vector signed short twotimes_l1minusl4 = vec_mladd(v_2, l1minusl4, (vector signed short)zero);
00466      const vector signed short twotimes_l5minusl8 = vec_mladd(v_2, l5minusl8, (vector signed short)zero);
00467      const vector signed short lE = vec_mladd(v_5, l3minusl2, twotimes_l1minusl4);
00468      const vector signed short rE = vec_mladd(v_5, l7minusl6, twotimes_l5minusl8);
00469      /* d */
00470      const vector signed short ddiff = vec_sub(absmE,
00471                                                vec_min(vec_abs(lE),
00472                                                        vec_abs(rE)));
00473      const vector signed short ddiffclamp = vec_max(ddiff, (vector signed short)zero);
00474      const vector signed short dtimes64 = vec_mladd(v_5, ddiffclamp, v_32);
00475      const vector signed short d = vec_sra(dtimes64, vec_splat_u16(6));
00476      const vector signed short minusd = vec_sub((vector signed short)zero, d);
00477      const vector signed short finald = vec_sel(minusd,
00478                                                 d,
00479                                                 vec_cmpgt(vec_sub((vector signed short)zero, mE),
00480                                                           (vector signed short)zero));
00481      /* q */
00482      const vector signed short qtimes2 = vec_sub(vb4, vb5);
00483      /* for a shift right to behave like /2, we need to add one
00484         to all negative integer */
00485      const vector signed short rounddown = vec_sel((vector signed short)zero,
00486                                                    v_1,
00487                                                    vec_cmplt(qtimes2, (vector signed short)zero));
00488      const vector signed short q = vec_sra(vec_add(qtimes2, rounddown), vec_splat_u16(1));
00489      /* clamp */
00490      const vector signed short dclamp_P1 = vec_max((vector signed short)zero, finald);
00491      const vector signed short dclamp_P = vec_min(dclamp_P1, q);
00492      const vector signed short dclamp_N1 = vec_min((vector signed short)zero, finald);
00493      const vector signed short dclamp_N = vec_max(dclamp_N1, q);
00494 
00495      const vector signed short dclampedfinal = vec_sel(dclamp_N,
00496                                                        dclamp_P,
00497                                                        vec_cmpgt(q, (vector signed short)zero));
00498      const vector signed short dornotd = vec_sel((vector signed short)zero,
00499                                                  dclampedfinal,
00500                                                  vec_cmplt(absmE, vqp));
00501      /* add/subtract to l4 and l5 */
00502      const vector signed short vb4minusd = vec_sub(vb4, dornotd);
00503      const vector signed short vb5plusd  = vec_add(vb5, dornotd);
00504      /* finally, stores */
00505      const vector unsigned char st4 = vec_packsu(vb4minusd, (vector signed short)zero);
00506      const vector unsigned char st5 = vec_packsu(vb5plusd,  (vector signed short)zero);
00507 
00508      const vector signed char neg1 = vec_splat_s8(-1);
00509      const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00510                                                                       0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00511 
00512 #define STORE(i)                                                \
00513 {    const vector unsigned char perms##i =                      \
00514          vec_lvsr(i * stride, src2);                            \
00515      const vector unsigned char vg##i =                         \
00516          vec_perm(st##i, vbT##i, permHH);                       \
00517      const vector unsigned char mask##i =                       \
00518          vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms##i); \
00519      const vector unsigned char vg2##i =                        \
00520          vec_perm(vg##i, vg##i, perms##i);                      \
00521      const vector unsigned char svA##i =                        \
00522          vec_sel(vbA##i, vg2##i, mask##i);                      \
00523      const vector unsigned char svB##i =                        \
00524          vec_sel(vg2##i, vbB##i, mask##i);                      \
00525      vec_st(svA##i, i * stride, src2);                          \
00526      vec_st(svB##i, i * stride + 16, src2);}
00527 
00528      STORE(4)
00529      STORE(5)
00530 }
00531 
00532 static inline void dering_altivec(uint8_t src[], int stride, PPContext *c) {
00533     const vector signed int vsint32_8 = vec_splat_s32(8);
00534     const vector unsigned int vuint32_4 = vec_splat_u32(4);
00535     const vector signed char neg1 = vec_splat_s8(-1);
00536 
00537     const vector unsigned char permA1 = (vector unsigned char)
00538         {0x00, 0x01, 0x02, 0x10, 0x11, 0x12, 0x1F, 0x1F,
00539          0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
00540     const vector unsigned char permA2 = (vector unsigned char)
00541         {0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x10, 0x11,
00542          0x12, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F, 0x1F};
00543     const vector unsigned char permA1inc = (vector unsigned char)
00544         {0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x00, 0x00,
00545          0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00546     const vector unsigned char permA2inc = (vector unsigned char)
00547         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x01, 0x01,
00548          0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00549     const vector unsigned char magic = (vector unsigned char)
00550         {0x01, 0x02, 0x01, 0x02, 0x04, 0x02, 0x01, 0x02,
00551          0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00552     const vector unsigned char extractPerm = (vector unsigned char)
00553         {0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01,
00554          0x10, 0x10, 0x10, 0x01, 0x10, 0x10, 0x10, 0x01};
00555     const vector unsigned char extractPermInc = (vector unsigned char)
00556         {0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01,
00557          0x00, 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x01};
00558     const vector unsigned char identity = vec_lvsl(0,(unsigned char *)0);
00559     const vector unsigned char tenRight = (vector unsigned char)
00560         {0x00, 0x10, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
00561          0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00};
00562     const vector unsigned char eightLeft = (vector unsigned char)
00563         {0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
00564          0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x08};
00565 
00566     /*
00567     this code makes no assumption on src or stride.
00568     One could remove the recomputation of the perm
00569     vector by assuming (stride % 16) == 0, unfortunately
00570     this is not always true. Quite a lot of load/stores
00571     can be removed by assuming proper alignment of
00572     src & stride :-(
00573     */
00574     uint8_t *srcCopy = src;
00575     DECLARE_ALIGNED(16, uint8_t, dt)[16] = { deringThreshold };
00576     const vector signed int zero = vec_splat_s32(0);
00577     vector unsigned char v_dt = vec_splat(vec_ld(0, dt), 0);
00578 
00579 #define LOAD_LINE(i)                                                  \
00580     const vector unsigned char perm##i =                              \
00581         vec_lvsl(i * stride, srcCopy);                                \
00582     vector unsigned char sA##i = vec_ld(i * stride, srcCopy);         \
00583     vector unsigned char sB##i = vec_ld(i * stride + 16, srcCopy);    \
00584     vector unsigned char src##i = vec_perm(sA##i, sB##i, perm##i)
00585 
00586     LOAD_LINE(0);
00587     LOAD_LINE(1);
00588     LOAD_LINE(2);
00589     LOAD_LINE(3);
00590     LOAD_LINE(4);
00591     LOAD_LINE(5);
00592     LOAD_LINE(6);
00593     LOAD_LINE(7);
00594     LOAD_LINE(8);
00595     LOAD_LINE(9);
00596 #undef LOAD_LINE
00597 
00598     vector unsigned char v_avg;
00599     DECLARE_ALIGNED(16, signed int, S)[8];
00600     DECLARE_ALIGNED(16, int, tQP2)[4] = { c->QP/2 + 1 };
00601     vector signed int vQP2 = vec_ld(0, tQP2);
00602     vQP2 = vec_splat(vQP2, 0);
00603 
00604     {
00605     const vector unsigned char trunc_perm = (vector unsigned char)
00606         {0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07, 0x08,
00607          0x11, 0x12, 0x13, 0x14, 0x15, 0x16, 0x17, 0x18};
00608     const vector unsigned char trunc_src12 = vec_perm(src1, src2, trunc_perm);
00609     const vector unsigned char trunc_src34 = vec_perm(src3, src4, trunc_perm);
00610     const vector unsigned char trunc_src56 = vec_perm(src5, src6, trunc_perm);
00611     const vector unsigned char trunc_src78 = vec_perm(src7, src8, trunc_perm);
00612 
00613 #define EXTRACT(op) do {                                                \
00614     const vector unsigned char s_1   = vec_##op(trunc_src12, trunc_src34); \
00615     const vector unsigned char s_2   = vec_##op(trunc_src56, trunc_src78); \
00616     const vector unsigned char s_6   = vec_##op(s_1, s_2);     \
00617     const vector unsigned char s_8h  = vec_mergeh(s_6, s_6);   \
00618     const vector unsigned char s_8l  = vec_mergel(s_6, s_6);   \
00619     const vector unsigned char s_9   = vec_##op(s_8h, s_8l);   \
00620     const vector unsigned char s_9h  = vec_mergeh(s_9, s_9);   \
00621     const vector unsigned char s_9l  = vec_mergel(s_9, s_9);   \
00622     const vector unsigned char s_10  = vec_##op(s_9h, s_9l);   \
00623     const vector unsigned char s_10h = vec_mergeh(s_10, s_10); \
00624     const vector unsigned char s_10l = vec_mergel(s_10, s_10); \
00625     const vector unsigned char s_11  = vec_##op(s_10h, s_10l); \
00626     const vector unsigned char s_11h = vec_mergeh(s_11, s_11); \
00627     const vector unsigned char s_11l = vec_mergel(s_11, s_11); \
00628     v_##op = vec_##op(s_11h, s_11l);                           \
00629 } while (0)
00630 
00631     vector unsigned char v_min;
00632     vector unsigned char v_max;
00633     EXTRACT(min);
00634     EXTRACT(max);
00635 #undef EXTRACT
00636 
00637     if (vec_all_lt(vec_sub(v_max, v_min), v_dt))
00638         return;
00639 
00640     v_avg = vec_avg(v_min, v_max);
00641     }
00642 
00643     {
00644     const vector unsigned short mask1 = (vector unsigned short)
00645                                         {0x0001, 0x0002, 0x0004, 0x0008,
00646                                          0x0010, 0x0020, 0x0040, 0x0080};
00647     const vector unsigned short mask2 = (vector unsigned short)
00648                                         {0x0100, 0x0200, 0x0000, 0x0000,
00649                                          0x0000, 0x0000, 0x0000, 0x0000};
00650 
00651     const vector unsigned int vuint32_16 = vec_sl(vec_splat_u32(1), vec_splat_u32(4));
00652     const vector unsigned int vuint32_1 = vec_splat_u32(1);
00653 
00654     vector signed int sumA2;
00655     vector signed int sumB2;
00656     vector signed int sum0, sum1, sum2, sum3, sum4;
00657     vector signed int sum5, sum6, sum7, sum8, sum9;
00658 
00659 #define COMPARE(i)                                                      \
00660     do {                                                                \
00661         const vector unsigned char cmp =                                \
00662             (vector unsigned char)vec_cmpgt(src##i, v_avg);             \
00663         const vector unsigned short cmpHi =                             \
00664             (vector unsigned short)vec_mergeh(cmp, cmp);                \
00665         const vector unsigned short cmpLi =                             \
00666             (vector unsigned short)vec_mergel(cmp, cmp);                \
00667         const vector signed short cmpHf =                               \
00668             (vector signed short)vec_and(cmpHi, mask1);                 \
00669         const vector signed short cmpLf =                               \
00670             (vector signed short)vec_and(cmpLi, mask2);                 \
00671         const vector signed int sump = vec_sum4s(cmpHf, zero);          \
00672         const vector signed int sumq = vec_sum4s(cmpLf, sump);          \
00673         sum##i  = vec_sums(sumq, zero);                                 \
00674     } while (0)
00675 
00676     COMPARE(0);
00677     COMPARE(1);
00678     COMPARE(2);
00679     COMPARE(3);
00680     COMPARE(4);
00681     COMPARE(5);
00682     COMPARE(6);
00683     COMPARE(7);
00684     COMPARE(8);
00685     COMPARE(9);
00686 #undef COMPARE
00687 
00688     {
00689     const vector signed int sump02 = vec_mergel(sum0, sum2);
00690     const vector signed int sump13 = vec_mergel(sum1, sum3);
00691     const vector signed int sumA = vec_mergel(sump02, sump13);
00692 
00693     const vector signed int sump46 = vec_mergel(sum4, sum6);
00694     const vector signed int sump57 = vec_mergel(sum5, sum7);
00695     const vector signed int sumB = vec_mergel(sump46, sump57);
00696 
00697     const vector signed int sump8A = vec_mergel(sum8, zero);
00698     const vector signed int sump9B = vec_mergel(sum9, zero);
00699     const vector signed int sumC = vec_mergel(sump8A, sump9B);
00700 
00701     const vector signed int tA = vec_sl(vec_nor(zero, sumA), vuint32_16);
00702     const vector signed int tB = vec_sl(vec_nor(zero, sumB), vuint32_16);
00703     const vector signed int tC = vec_sl(vec_nor(zero, sumC), vuint32_16);
00704     const vector signed int t2A = vec_or(sumA, tA);
00705     const vector signed int t2B = vec_or(sumB, tB);
00706     const vector signed int t2C = vec_or(sumC, tC);
00707     const vector signed int t3A = vec_and(vec_sra(t2A, vuint32_1),
00708                                           vec_sl(t2A, vuint32_1));
00709     const vector signed int t3B = vec_and(vec_sra(t2B, vuint32_1),
00710                                           vec_sl(t2B, vuint32_1));
00711     const vector signed int t3C = vec_and(vec_sra(t2C, vuint32_1),
00712                                           vec_sl(t2C, vuint32_1));
00713     const vector signed int yA = vec_and(t2A, t3A);
00714     const vector signed int yB = vec_and(t2B, t3B);
00715     const vector signed int yC = vec_and(t2C, t3C);
00716 
00717     const vector unsigned char strangeperm1 = vec_lvsl(4, (unsigned char*)0);
00718     const vector unsigned char strangeperm2 = vec_lvsl(8, (unsigned char*)0);
00719     const vector signed int sumAd4 = vec_perm(yA, yB, strangeperm1);
00720     const vector signed int sumAd8 = vec_perm(yA, yB, strangeperm2);
00721     const vector signed int sumBd4 = vec_perm(yB, yC, strangeperm1);
00722     const vector signed int sumBd8 = vec_perm(yB, yC, strangeperm2);
00723     const vector signed int sumAp = vec_and(yA,
00724                                             vec_and(sumAd4,sumAd8));
00725     const vector signed int sumBp = vec_and(yB,
00726                                             vec_and(sumBd4,sumBd8));
00727     sumA2 = vec_or(sumAp,
00728                    vec_sra(sumAp,
00729                            vuint32_16));
00730     sumB2  = vec_or(sumBp,
00731                     vec_sra(sumBp,
00732                             vuint32_16));
00733     }
00734     vec_st(sumA2, 0, S);
00735     vec_st(sumB2, 16, S);
00736     }
00737 
00738     /* I'm not sure the following is actually faster
00739        than straight, unvectorized C code :-( */
00740 
00741 #define F_INIT()                                       \
00742     vector unsigned char tenRightM = tenRight;         \
00743     vector unsigned char permA1M = permA1;             \
00744     vector unsigned char permA2M = permA2;             \
00745     vector unsigned char extractPermM = extractPerm
00746 
00747 #define F2(i, j, k, l)                                                  \
00748     if (S[i] & (1 << (l+1))) {                                          \
00749         const vector unsigned char a_A = vec_perm(src##i, src##j, permA1M); \
00750         const vector unsigned char a_B = vec_perm(a_A, src##k, permA2M); \
00751         const vector signed int a_sump =                                \
00752             (vector signed int)vec_msum(a_B, magic, (vector unsigned int)zero);\
00753         vector signed int F = vec_sr(vec_sums(a_sump, vsint32_8), vuint32_4); \
00754         const vector signed int p =                                     \
00755             (vector signed int)vec_perm(src##j, (vector unsigned char)zero, \
00756                                         extractPermM);                  \
00757         const vector signed int sum  = vec_add(p, vQP2);                \
00758         const vector signed int diff = vec_sub(p, vQP2);                \
00759         vector signed int newpm;                                        \
00760         vector unsigned char newpm2, mask;                              \
00761         F = vec_splat(F, 3);                                            \
00762         if (vec_all_lt(sum, F))                                         \
00763             newpm = sum;                                                \
00764         else if (vec_all_gt(diff, F))                                   \
00765             newpm = diff;                                               \
00766         else newpm = F;                                                 \
00767         newpm2 = vec_splat((vector unsigned char)newpm, 15);            \
00768         mask = vec_add(identity, tenRightM);                            \
00769         src##j = vec_perm(src##j, newpm2, mask);                        \
00770     }                                                                   \
00771     permA1M = vec_add(permA1M, permA1inc);                              \
00772     permA2M = vec_add(permA2M, permA2inc);                              \
00773     tenRightM = vec_sro(tenRightM, eightLeft);                          \
00774     extractPermM = vec_add(extractPermM, extractPermInc)
00775 
00776 #define ITER(i, j, k) do {                      \
00777     F_INIT();                                   \
00778     F2(i, j, k, 0);                             \
00779     F2(i, j, k, 1);                             \
00780     F2(i, j, k, 2);                             \
00781     F2(i, j, k, 3);                             \
00782     F2(i, j, k, 4);                             \
00783     F2(i, j, k, 5);                             \
00784     F2(i, j, k, 6);                             \
00785     F2(i, j, k, 7);                             \
00786 } while (0)
00787 
00788     ITER(0, 1, 2);
00789     ITER(1, 2, 3);
00790     ITER(2, 3, 4);
00791     ITER(3, 4, 5);
00792     ITER(4, 5, 6);
00793     ITER(5, 6, 7);
00794     ITER(6, 7, 8);
00795     ITER(7, 8, 9);
00796 
00797 #define STORE_LINE(i) do {                              \
00798     const vector unsigned char permST =                 \
00799         vec_lvsr(i * stride, srcCopy);                  \
00800     const vector unsigned char maskST =                 \
00801         vec_perm((vector unsigned char)zero,            \
00802                  (vector unsigned char)neg1, permST);   \
00803     src##i = vec_perm(src##i ,src##i, permST);          \
00804     sA##i= vec_sel(sA##i, src##i, maskST);              \
00805     sB##i= vec_sel(src##i, sB##i, maskST);              \
00806     vec_st(sA##i, i * stride, srcCopy);                 \
00807     vec_st(sB##i, i * stride + 16, srcCopy);            \
00808 } while (0)
00809 
00810     STORE_LINE(1);
00811     STORE_LINE(2);
00812     STORE_LINE(3);
00813     STORE_LINE(4);
00814     STORE_LINE(5);
00815     STORE_LINE(6);
00816     STORE_LINE(7);
00817     STORE_LINE(8);
00818 
00819 #undef STORE_LINE
00820 #undef ITER
00821 #undef F2
00822 }
00823 
00824 #define doHorizLowPass_altivec(a...) doHorizLowPass_C(a)
00825 #define doHorizDefFilter_altivec(a...) doHorizDefFilter_C(a)
00826 #define do_a_deblock_altivec(a...) do_a_deblock_C(a)
00827 
00828 static inline void RENAME(tempNoiseReducer)(uint8_t *src, int stride,
00829                                             uint8_t *tempBlurred, uint32_t *tempBlurredPast, int *maxNoise)
00830 {
00831     const vector signed char neg1 = vec_splat_s8(-1);
00832     const vector unsigned char permHH = (const vector unsigned char){0x00, 0x01, 0x02, 0x03, 0x04, 0x05, 0x06, 0x07,
00833                                                                      0x18, 0x19, 0x1A, 0x1B, 0x1C, 0x1D, 0x1E, 0x1F};
00834 
00835     const vector signed int zero = vec_splat_s32(0);
00836     const vector signed short vsint16_1 = vec_splat_s16(1);
00837     vector signed int v_dp = zero;
00838     vector signed int v_sysdp = zero;
00839     int d, sysd, i;
00840 
00841 #define LOAD_LINE(src, i)                                               \
00842     register int j##src##i = i * stride;                                \
00843     vector unsigned char perm##src##i = vec_lvsl(j##src##i, src);       \
00844     const vector unsigned char v_##src##A1##i = vec_ld(j##src##i, src); \
00845     const vector unsigned char v_##src##A2##i = vec_ld(j##src##i + 16, src); \
00846     const vector unsigned char v_##src##A##i =                          \
00847         vec_perm(v_##src##A1##i, v_##src##A2##i, perm##src##i);         \
00848     vector signed short v_##src##Ass##i =                               \
00849         (vector signed short)vec_mergeh((vector signed char)zero,       \
00850                                         (vector signed char)v_##src##A##i)
00851 
00852     LOAD_LINE(src, 0);
00853     LOAD_LINE(src, 1);
00854     LOAD_LINE(src, 2);
00855     LOAD_LINE(src, 3);
00856     LOAD_LINE(src, 4);
00857     LOAD_LINE(src, 5);
00858     LOAD_LINE(src, 6);
00859     LOAD_LINE(src, 7);
00860 
00861     LOAD_LINE(tempBlurred, 0);
00862     LOAD_LINE(tempBlurred, 1);
00863     LOAD_LINE(tempBlurred, 2);
00864     LOAD_LINE(tempBlurred, 3);
00865     LOAD_LINE(tempBlurred, 4);
00866     LOAD_LINE(tempBlurred, 5);
00867     LOAD_LINE(tempBlurred, 6);
00868     LOAD_LINE(tempBlurred, 7);
00869 #undef LOAD_LINE
00870 
00871 #define ACCUMULATE_DIFFS(i) do {                                \
00872         vector signed short v_d = vec_sub(v_tempBlurredAss##i,  \
00873                                           v_srcAss##i);         \
00874         v_dp = vec_msums(v_d, v_d, v_dp);                       \
00875         v_sysdp = vec_msums(v_d, vsint16_1, v_sysdp);           \
00876     } while (0)
00877 
00878     ACCUMULATE_DIFFS(0);
00879     ACCUMULATE_DIFFS(1);
00880     ACCUMULATE_DIFFS(2);
00881     ACCUMULATE_DIFFS(3);
00882     ACCUMULATE_DIFFS(4);
00883     ACCUMULATE_DIFFS(5);
00884     ACCUMULATE_DIFFS(6);
00885     ACCUMULATE_DIFFS(7);
00886 #undef ACCUMULATE_DIFFS
00887 
00888     tempBlurredPast[127]= maxNoise[0];
00889     tempBlurredPast[128]= maxNoise[1];
00890     tempBlurredPast[129]= maxNoise[2];
00891 
00892     v_dp = vec_sums(v_dp, zero);
00893     v_sysdp = vec_sums(v_sysdp, zero);
00894 
00895     v_dp = vec_splat(v_dp, 3);
00896     v_sysdp = vec_splat(v_sysdp, 3);
00897 
00898     vec_ste(v_dp, 0, &d);
00899     vec_ste(v_sysdp, 0, &sysd);
00900 
00901     i = d;
00902     d = (4*d
00903          +(*(tempBlurredPast-256))
00904          +(*(tempBlurredPast-1))+ (*(tempBlurredPast+1))
00905          +(*(tempBlurredPast+256))
00906          +4)>>3;
00907 
00908     *tempBlurredPast=i;
00909 
00910     if (d > maxNoise[1]) {
00911         if (d < maxNoise[2]) {
00912 #define OP(i) v_tempBlurredAss##i = vec_avg(v_tempBlurredAss##i, v_srcAss##i);
00913 
00914             OP(0);
00915             OP(1);
00916             OP(2);
00917             OP(3);
00918             OP(4);
00919             OP(5);
00920             OP(6);
00921             OP(7);
00922 #undef OP
00923         } else {
00924 #define OP(i) v_tempBlurredAss##i = v_srcAss##i;
00925 
00926             OP(0);
00927             OP(1);
00928             OP(2);
00929             OP(3);
00930             OP(4);
00931             OP(5);
00932             OP(6);
00933             OP(7);
00934 #undef OP
00935         }
00936     } else {
00937         if (d < maxNoise[0]) {
00938             const vector signed short vsint16_7 = vec_splat_s16(7);
00939             const vector signed short vsint16_4 = vec_splat_s16(4);
00940             const vector unsigned short vuint16_3 = vec_splat_u16(3);
00941 
00942 #define OP(i) do {                                                      \
00943             const vector signed short v_temp =                          \
00944                 vec_mladd(v_tempBlurredAss##i, vsint16_7, v_srcAss##i); \
00945             const vector signed short v_temp2 = vec_add(v_temp, vsint16_4); \
00946             v_tempBlurredAss##i = vec_sr(v_temp2, vuint16_3);           \
00947         } while (0)
00948 
00949             OP(0);
00950             OP(1);
00951             OP(2);
00952             OP(3);
00953             OP(4);
00954             OP(5);
00955             OP(6);
00956             OP(7);
00957 #undef OP
00958         } else {
00959             const vector signed short vsint16_3 = vec_splat_s16(3);
00960             const vector signed short vsint16_2 = vec_splat_s16(2);
00961 
00962 #define OP(i) do {                                              \
00963             const vector signed short v_temp =                  \
00964                 vec_mladd(v_tempBlurredAss##i, vsint16_3, v_srcAss##i); \
00965             const vector signed short v_temp2 = vec_add(v_temp, vsint16_2); \
00966             v_tempBlurredAss##i =                                       \
00967                 vec_sr(v_temp2, (vector unsigned short)vsint16_2);      \
00968         } while (0)
00969 
00970             OP(0);
00971             OP(1);
00972             OP(2);
00973             OP(3);
00974             OP(4);
00975             OP(5);
00976             OP(6);
00977             OP(7);
00978 #undef OP
00979         }
00980     }
00981 
00982 #define PACK_AND_STORE(src, i) do {                                      \
00983     const vector unsigned char perms = vec_lvsr(i * stride, src);        \
00984     const vector unsigned char vf =                                      \
00985         vec_packsu(v_tempBlurredAss##1, (vector signed short)zero);     \
00986     const vector unsigned char vg = vec_perm(vf, v_##src##A##i, permHH); \
00987     const vector unsigned char mask =                                    \
00988         vec_perm((vector unsigned char)zero, (vector unsigned char)neg1, perms); \
00989     const vector unsigned char vg2 = vec_perm(vg, vg, perms);            \
00990     const vector unsigned char svA = vec_sel(v_##src##A1##i, vg2, mask); \
00991     const vector unsigned char svB = vec_sel(vg2, v_##src##A2##i, mask); \
00992     vec_st(svA, i * stride, src);                                        \
00993     vec_st(svB, i * stride + 16, src);                                   \
00994 } while (0)
00995 
00996     PACK_AND_STORE(src, 0);
00997     PACK_AND_STORE(src, 1);
00998     PACK_AND_STORE(src, 2);
00999     PACK_AND_STORE(src, 3);
01000     PACK_AND_STORE(src, 4);
01001     PACK_AND_STORE(src, 5);
01002     PACK_AND_STORE(src, 6);
01003     PACK_AND_STORE(src, 7);
01004     PACK_AND_STORE(tempBlurred, 0);
01005     PACK_AND_STORE(tempBlurred, 1);
01006     PACK_AND_STORE(tempBlurred, 2);
01007     PACK_AND_STORE(tempBlurred, 3);
01008     PACK_AND_STORE(tempBlurred, 4);
01009     PACK_AND_STORE(tempBlurred, 5);
01010     PACK_AND_STORE(tempBlurred, 6);
01011     PACK_AND_STORE(tempBlurred, 7);
01012 #undef PACK_AND_STORE
01013 }
01014 
01015 static inline void transpose_16x8_char_toPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
01016     const vector unsigned char zero = vec_splat_u8(0);
01017 
01018 #define LOAD_DOUBLE_LINE(i, j)                                          \
01019     vector unsigned char perm1##i = vec_lvsl(i * stride, src);          \
01020     vector unsigned char perm2##i = vec_lvsl(j * stride, src);          \
01021     vector unsigned char srcA##i = vec_ld(i * stride, src);             \
01022     vector unsigned char srcB##i = vec_ld(i * stride + 16, src);        \
01023     vector unsigned char srcC##i = vec_ld(j * stride, src);             \
01024     vector unsigned char srcD##i = vec_ld(j * stride+ 16, src);         \
01025     vector unsigned char src##i = vec_perm(srcA##i, srcB##i, perm1##i); \
01026     vector unsigned char src##j = vec_perm(srcC##i, srcD##i, perm2##i)
01027 
01028     LOAD_DOUBLE_LINE(0, 1);
01029     LOAD_DOUBLE_LINE(2, 3);
01030     LOAD_DOUBLE_LINE(4, 5);
01031     LOAD_DOUBLE_LINE(6, 7);
01032 #undef LOAD_DOUBLE_LINE
01033 
01034     vector unsigned char tempA = vec_mergeh(src0, zero);
01035     vector unsigned char tempB = vec_mergel(src0, zero);
01036     vector unsigned char tempC = vec_mergeh(src1, zero);
01037     vector unsigned char tempD = vec_mergel(src1, zero);
01038     vector unsigned char tempE = vec_mergeh(src2, zero);
01039     vector unsigned char tempF = vec_mergel(src2, zero);
01040     vector unsigned char tempG = vec_mergeh(src3, zero);
01041     vector unsigned char tempH = vec_mergel(src3, zero);
01042     vector unsigned char tempI = vec_mergeh(src4, zero);
01043     vector unsigned char tempJ = vec_mergel(src4, zero);
01044     vector unsigned char tempK = vec_mergeh(src5, zero);
01045     vector unsigned char tempL = vec_mergel(src5, zero);
01046     vector unsigned char tempM = vec_mergeh(src6, zero);
01047     vector unsigned char tempN = vec_mergel(src6, zero);
01048     vector unsigned char tempO = vec_mergeh(src7, zero);
01049     vector unsigned char tempP = vec_mergel(src7, zero);
01050 
01051     vector unsigned char temp0  = vec_mergeh(tempA, tempI);
01052     vector unsigned char temp1  = vec_mergel(tempA, tempI);
01053     vector unsigned char temp2  = vec_mergeh(tempB, tempJ);
01054     vector unsigned char temp3  = vec_mergel(tempB, tempJ);
01055     vector unsigned char temp4  = vec_mergeh(tempC, tempK);
01056     vector unsigned char temp5  = vec_mergel(tempC, tempK);
01057     vector unsigned char temp6  = vec_mergeh(tempD, tempL);
01058     vector unsigned char temp7  = vec_mergel(tempD, tempL);
01059     vector unsigned char temp8  = vec_mergeh(tempE, tempM);
01060     vector unsigned char temp9  = vec_mergel(tempE, tempM);
01061     vector unsigned char temp10 = vec_mergeh(tempF, tempN);
01062     vector unsigned char temp11 = vec_mergel(tempF, tempN);
01063     vector unsigned char temp12 = vec_mergeh(tempG, tempO);
01064     vector unsigned char temp13 = vec_mergel(tempG, tempO);
01065     vector unsigned char temp14 = vec_mergeh(tempH, tempP);
01066     vector unsigned char temp15 = vec_mergel(tempH, tempP);
01067 
01068     tempA = vec_mergeh(temp0, temp8);
01069     tempB = vec_mergel(temp0, temp8);
01070     tempC = vec_mergeh(temp1, temp9);
01071     tempD = vec_mergel(temp1, temp9);
01072     tempE = vec_mergeh(temp2, temp10);
01073     tempF = vec_mergel(temp2, temp10);
01074     tempG = vec_mergeh(temp3, temp11);
01075     tempH = vec_mergel(temp3, temp11);
01076     tempI = vec_mergeh(temp4, temp12);
01077     tempJ = vec_mergel(temp4, temp12);
01078     tempK = vec_mergeh(temp5, temp13);
01079     tempL = vec_mergel(temp5, temp13);
01080     tempM = vec_mergeh(temp6, temp14);
01081     tempN = vec_mergel(temp6, temp14);
01082     tempO = vec_mergeh(temp7, temp15);
01083     tempP = vec_mergel(temp7, temp15);
01084 
01085     temp0  = vec_mergeh(tempA, tempI);
01086     temp1  = vec_mergel(tempA, tempI);
01087     temp2  = vec_mergeh(tempB, tempJ);
01088     temp3  = vec_mergel(tempB, tempJ);
01089     temp4  = vec_mergeh(tempC, tempK);
01090     temp5  = vec_mergel(tempC, tempK);
01091     temp6  = vec_mergeh(tempD, tempL);
01092     temp7  = vec_mergel(tempD, tempL);
01093     temp8  = vec_mergeh(tempE, tempM);
01094     temp9  = vec_mergel(tempE, tempM);
01095     temp10 = vec_mergeh(tempF, tempN);
01096     temp11 = vec_mergel(tempF, tempN);
01097     temp12 = vec_mergeh(tempG, tempO);
01098     temp13 = vec_mergel(tempG, tempO);
01099     temp14 = vec_mergeh(tempH, tempP);
01100     temp15 = vec_mergel(tempH, tempP);
01101 
01102     vec_st(temp0,    0, dst);
01103     vec_st(temp1,   16, dst);
01104     vec_st(temp2,   32, dst);
01105     vec_st(temp3,   48, dst);
01106     vec_st(temp4,   64, dst);
01107     vec_st(temp5,   80, dst);
01108     vec_st(temp6,   96, dst);
01109     vec_st(temp7,  112, dst);
01110     vec_st(temp8,  128, dst);
01111     vec_st(temp9,  144, dst);
01112     vec_st(temp10, 160, dst);
01113     vec_st(temp11, 176, dst);
01114     vec_st(temp12, 192, dst);
01115     vec_st(temp13, 208, dst);
01116     vec_st(temp14, 224, dst);
01117     vec_st(temp15, 240, dst);
01118 }
01119 
01120 static inline void transpose_8x16_char_fromPackedAlign_altivec(unsigned char* dst, unsigned char* src, int stride) {
01121     const vector unsigned char zero = vec_splat_u8(0);
01122     const vector signed   char neg1 = vec_splat_s8(-1);
01123 
01124 #define LOAD_DOUBLE_LINE(i, j)                                  \
01125     vector unsigned char src##i = vec_ld(i * 16, src);            \
01126     vector unsigned char src##j = vec_ld(j * 16, src)
01127 
01128     LOAD_DOUBLE_LINE(0, 1);
01129     LOAD_DOUBLE_LINE(2, 3);
01130     LOAD_DOUBLE_LINE(4, 5);
01131     LOAD_DOUBLE_LINE(6, 7);
01132     LOAD_DOUBLE_LINE(8, 9);
01133     LOAD_DOUBLE_LINE(10, 11);
01134     LOAD_DOUBLE_LINE(12, 13);
01135     LOAD_DOUBLE_LINE(14, 15);
01136 #undef LOAD_DOUBLE_LINE
01137 
01138     vector unsigned char tempA = vec_mergeh(src0, src8);
01139     vector unsigned char tempB;
01140     vector unsigned char tempC = vec_mergeh(src1, src9);
01141     vector unsigned char tempD;
01142     vector unsigned char tempE = vec_mergeh(src2, src10);
01143     vector unsigned char tempG = vec_mergeh(src3, src11);
01144     vector unsigned char tempI = vec_mergeh(src4, src12);
01145     vector unsigned char tempJ;
01146     vector unsigned char tempK = vec_mergeh(src5, src13);
01147     vector unsigned char tempL;
01148     vector unsigned char tempM = vec_mergeh(src6, src14);
01149     vector unsigned char tempO = vec_mergeh(src7, src15);
01150 
01151     vector unsigned char temp0 = vec_mergeh(tempA, tempI);
01152     vector unsigned char temp1 = vec_mergel(tempA, tempI);
01153     vector unsigned char temp2;
01154     vector unsigned char temp3;
01155     vector unsigned char temp4 = vec_mergeh(tempC, tempK);
01156     vector unsigned char temp5 = vec_mergel(tempC, tempK);
01157     vector unsigned char temp6;
01158     vector unsigned char temp7;
01159     vector unsigned char temp8 = vec_mergeh(tempE, tempM);
01160     vector unsigned char temp9 = vec_mergel(tempE, tempM);
01161     vector unsigned char temp12 = vec_mergeh(tempG, tempO);
01162     vector unsigned char temp13 = vec_mergel(tempG, tempO);
01163 
01164     tempA = vec_mergeh(temp0, temp8);
01165     tempB = vec_mergel(temp0, temp8);
01166     tempC = vec_mergeh(temp1, temp9);
01167     tempD = vec_mergel(temp1, temp9);
01168     tempI = vec_mergeh(temp4, temp12);
01169     tempJ = vec_mergel(temp4, temp12);
01170     tempK = vec_mergeh(temp5, temp13);
01171     tempL = vec_mergel(temp5, temp13);
01172 
01173     temp0 = vec_mergeh(tempA, tempI);
01174     temp1 = vec_mergel(tempA, tempI);
01175     temp2 = vec_mergeh(tempB, tempJ);
01176     temp3 = vec_mergel(tempB, tempJ);
01177     temp4 = vec_mergeh(tempC, tempK);
01178     temp5 = vec_mergel(tempC, tempK);
01179     temp6 = vec_mergeh(tempD, tempL);
01180     temp7 = vec_mergel(tempD, tempL);
01181 
01182 
01183 #define STORE_DOUBLE_LINE(i, j) do {                                    \
01184     vector unsigned char dstAi = vec_ld(i * stride, dst);               \
01185     vector unsigned char dstBi = vec_ld(i * stride + 16, dst);          \
01186     vector unsigned char dstAj = vec_ld(j * stride, dst);               \
01187     vector unsigned char dstBj = vec_ld(j * stride+ 16, dst);           \
01188     vector unsigned char aligni = vec_lvsr(i * stride, dst);            \
01189     vector unsigned char alignj = vec_lvsr(j * stride, dst);            \
01190     vector unsigned char maski =                                        \
01191         vec_perm(zero, (vector unsigned char)neg1, aligni);             \
01192     vector unsigned char maskj =                                        \
01193         vec_perm(zero, (vector unsigned char)neg1, alignj);             \
01194     vector unsigned char dstRi = vec_perm(temp##i, temp##i, aligni);    \
01195     vector unsigned char dstRj = vec_perm(temp##j, temp##j, alignj);    \
01196     vector unsigned char dstAFi = vec_sel(dstAi, dstRi, maski);         \
01197     vector unsigned char dstBFi = vec_sel(dstRi, dstBi, maski);         \
01198     vector unsigned char dstAFj = vec_sel(dstAj, dstRj, maskj);         \
01199     vector unsigned char dstBFj = vec_sel(dstRj, dstBj, maskj);         \
01200     vec_st(dstAFi, i * stride, dst);                                    \
01201     vec_st(dstBFi, i * stride + 16, dst);                               \
01202     vec_st(dstAFj, j * stride, dst);                                    \
01203     vec_st(dstBFj, j * stride + 16, dst);                               \
01204 } while (0)
01205 
01206     STORE_DOUBLE_LINE(0,1);
01207     STORE_DOUBLE_LINE(2,3);
01208     STORE_DOUBLE_LINE(4,5);
01209     STORE_DOUBLE_LINE(6,7);
01210 }