summaryrefslogtreecommitdiff
path: root/ffmpeg/libavcodec/ppc
diff options
context:
space:
mode:
Diffstat (limited to 'ffmpeg/libavcodec/ppc')
-rw-r--r--ffmpeg/libavcodec/ppc/Makefile22
-rw-r--r--ffmpeg/libavcodec/ppc/dsputil_ppc.c5
-rw-r--r--ffmpeg/libavcodec/ppc/fdct_altivec.c2
-rw-r--r--ffmpeg/libavcodec/ppc/fft_altivec.c25
-rw-r--r--ffmpeg/libavcodec/ppc/fmtconvert_altivec.c21
-rw-r--r--ffmpeg/libavcodec/ppc/gmc_altivec.c4
-rw-r--r--ffmpeg/libavcodec/ppc/h264_altivec.c748
-rw-r--r--ffmpeg/libavcodec/ppc/h264_qpel.c317
-rw-r--r--ffmpeg/libavcodec/ppc/h264_qpel_template.c507
-rw-r--r--ffmpeg/libavcodec/ppc/h264chroma_init.c10
-rw-r--r--ffmpeg/libavcodec/ppc/hpeldsp_altivec.c37
-rw-r--r--ffmpeg/libavcodec/ppc/int_altivec.c8
-rw-r--r--ffmpeg/libavcodec/ppc/mpegaudiodec_altivec.c130
-rw-r--r--ffmpeg/libavcodec/ppc/mpegvideo_altivec.c14
-rw-r--r--ffmpeg/libavcodec/ppc/vc1dsp_altivec.c9
-rw-r--r--ffmpeg/libavcodec/ppc/vorbisdsp_altivec.c7
-rw-r--r--ffmpeg/libavcodec/ppc/vp3dsp_altivec.c16
-rw-r--r--ffmpeg/libavcodec/ppc/vp8dsp_altivec.c57
18 files changed, 153 insertions, 1786 deletions
diff --git a/ffmpeg/libavcodec/ppc/Makefile b/ffmpeg/libavcodec/ppc/Makefile
index febbb0a..71b23da 100644
--- a/ffmpeg/libavcodec/ppc/Makefile
+++ b/ffmpeg/libavcodec/ppc/Makefile
@@ -1,24 +1,24 @@
OBJS += ppc/dsputil_ppc.o \
+ ppc/fmtconvert_altivec.o \
ppc/videodsp_ppc.o \
+OBJS-$(CONFIG_FFT) += ppc/fft_altivec.o
OBJS-$(CONFIG_H264CHROMA) += ppc/h264chroma_init.o
-OBJS-$(CONFIG_H264QPEL) += ppc/h264_qpel.o
+OBJS-$(CONFIG_H264DSP) += ppc/h264dsp.o
+OBJS-$(CONFIG_H264QPEL) += ppc/h264qpel.o
OBJS-$(CONFIG_HPELDSP) += ppc/hpeldsp_altivec.o
+OBJS-$(CONFIG_MPEGAUDIODSP) += ppc/mpegaudiodsp_altivec.o
+OBJS-$(CONFIG_MPEGVIDEO) += ppc/mpegvideo_altivec.o
+OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o
OBJS-$(CONFIG_VORBIS_DECODER) += ppc/vorbisdsp_altivec.o
OBJS-$(CONFIG_VP3DSP) += ppc/vp3dsp_altivec.o
-
-FFT-OBJS-$(HAVE_GNU_AS) += ppc/fft_altivec_s.o
-ALTIVEC-OBJS-$(CONFIG_FFT) += ppc/fft_altivec.o \
- $(FFT-OBJS-yes)
-ALTIVEC-OBJS-$(CONFIG_H264DSP) += ppc/h264_altivec.o
-ALTIVEC-OBJS-$(CONFIG_MPEGAUDIODSP) += ppc/mpegaudiodec_altivec.o
-ALTIVEC-OBJS-$(CONFIG_MPEGVIDEO) += ppc/mpegvideo_altivec.o
-ALTIVEC-OBJS-$(CONFIG_VC1_DECODER) += ppc/vc1dsp_altivec.o
-ALTIVEC-OBJS-$(CONFIG_VP8_DECODER) += ppc/vp8dsp_altivec.o
+OBJS-$(CONFIG_VP8_DECODER) += ppc/vp8dsp_altivec.o
ALTIVEC-OBJS += ppc/dsputil_altivec.o \
ppc/fdct_altivec.o \
- ppc/fmtconvert_altivec.o \
ppc/gmc_altivec.o \
ppc/idct_altivec.o \
ppc/int_altivec.o \
+
+FFT-OBJS-$(HAVE_GNU_AS) += ppc/fft_altivec_s.o
+ALTIVEC-OBJS-$(CONFIG_FFT) += $(FFT-OBJS-yes)
diff --git a/ffmpeg/libavcodec/ppc/dsputil_ppc.c b/ffmpeg/libavcodec/ppc/dsputil_ppc.c
index 6112b0c..7454ea0 100644
--- a/ffmpeg/libavcodec/ppc/dsputil_ppc.c
+++ b/ffmpeg/libavcodec/ppc/dsputil_ppc.c
@@ -25,6 +25,7 @@
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
+#include "libavutil/ppc/cpu.h"
#include "dsputil_altivec.h"
/* ***** WARNING ***** WARNING ***** WARNING ***** */
@@ -156,8 +157,7 @@ av_cold void ff_dsputil_init_ppc(DSPContext *c, AVCodecContext *avctx)
}
}
-#if HAVE_ALTIVEC
- if (mm_flags & AV_CPU_FLAG_ALTIVEC) {
+ if (PPC_ALTIVEC(mm_flags)) {
ff_dsputil_init_altivec(c, avctx);
ff_int_init_altivec(c, avctx);
c->gmc1 = ff_gmc1_altivec;
@@ -180,5 +180,4 @@ av_cold void ff_dsputil_init_ppc(DSPContext *c, AVCodecContext *avctx)
}
}
-#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/fdct_altivec.c b/ffmpeg/libavcodec/ppc/fdct_altivec.c
index acab127..ff816e2 100644
--- a/ffmpeg/libavcodec/ppc/fdct_altivec.c
+++ b/ffmpeg/libavcodec/ppc/fdct_altivec.c
@@ -458,5 +458,3 @@ void ff_fdct_altivec(int16_t *block)
#undef CTS
/* }}} */
}
-
-/* vim:set foldmethod=marker foldlevel=0: */
diff --git a/ffmpeg/libavcodec/ppc/fft_altivec.c b/ffmpeg/libavcodec/ppc/fft_altivec.c
index 651ee26..2357198 100644
--- a/ffmpeg/libavcodec/ppc/fft_altivec.c
+++ b/ffmpeg/libavcodec/ppc/fft_altivec.c
@@ -20,6 +20,8 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
+#include "libavutil/cpu.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/fft.h"
@@ -36,8 +38,8 @@
void ff_fft_calc_altivec(FFTContext *s, FFTComplex *z);
void ff_fft_calc_interleave_altivec(FFTContext *s, FFTComplex *z);
-#if HAVE_GNU_AS
-static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+#if HAVE_GNU_AS && HAVE_ALTIVEC
+static void imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
{
int j, k;
int n = 1 << s->mdct_bits;
@@ -117,7 +119,7 @@ static void ff_imdct_half_altivec(FFTContext *s, FFTSample *output, const FFTSam
} while(k >= 0);
}
-static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
+static void imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSample *input)
{
int k;
int n = 1 << s->mdct_bits;
@@ -127,7 +129,7 @@ static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSam
vec_u32 *p0 = (vec_u32*)(output+n4);
vec_u32 *p1 = (vec_u32*)(output+n4*3);
- ff_imdct_half_altivec(s, output+n4, input);
+ imdct_half_altivec(s, output + n4, input);
for (k = 0; k < n16; k++) {
vec_u32 a = p0[k] ^ sign;
@@ -136,15 +138,18 @@ static void ff_imdct_calc_altivec(FFTContext *s, FFTSample *output, const FFTSam
p1[k] = vec_perm(b, b, vcprm(3,2,1,0));
}
}
-#endif /* HAVE_GNU_AS */
+#endif /* HAVE_GNU_AS && HAVE_ALTIVEC */
-av_cold void ff_fft_init_altivec(FFTContext *s)
+av_cold void ff_fft_init_ppc(FFTContext *s)
{
-#if HAVE_GNU_AS
+#if HAVE_GNU_AS && HAVE_ALTIVEC
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
s->fft_calc = ff_fft_calc_interleave_altivec;
if (s->mdct_bits >= 5) {
- s->imdct_calc = ff_imdct_calc_altivec;
- s->imdct_half = ff_imdct_half_altivec;
+ s->imdct_calc = imdct_calc_altivec;
+ s->imdct_half = imdct_half_altivec;
}
-#endif
+#endif /* HAVE_GNU_AS && HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/fmtconvert_altivec.c b/ffmpeg/libavcodec/ppc/fmtconvert_altivec.c
index b29c7d4..cd32e39 100644
--- a/ffmpeg/libavcodec/ppc/fmtconvert_altivec.c
+++ b/ffmpeg/libavcodec/ppc/fmtconvert_altivec.c
@@ -18,14 +18,17 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
-#include "libavcodec/fmtconvert.h"
-
-#include "libavutil/ppc/util_altivec.h"
+#include "config.h"
#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
#include "libavutil/mem.h"
+#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/fmtconvert.h"
#include "dsputil_altivec.h"
-static void int32_to_float_fmul_scalar_altivec(float *dst, const int *src,
+#if HAVE_ALTIVEC
+
+static void int32_to_float_fmul_scalar_altivec(float *dst, const int32_t *src,
float mul, int len)
{
union {
@@ -156,11 +159,19 @@ static void float_to_int16_interleave_altivec(int16_t *dst, const float **src,
}
}
-av_cold void ff_fmt_convert_init_altivec(FmtConvertContext *c, AVCodecContext *avctx)
+#endif /* HAVE_ALTIVEC */
+
+av_cold void ff_fmt_convert_init_ppc(FmtConvertContext *c,
+ AVCodecContext *avctx)
{
+#if HAVE_ALTIVEC
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
c->int32_to_float_fmul_scalar = int32_to_float_fmul_scalar_altivec;
if (!(avctx->flags & CODEC_FLAG_BITEXACT)) {
c->float_to_int16 = float_to_int16_altivec;
c->float_to_int16_interleave = float_to_int16_interleave_altivec;
}
+#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/gmc_altivec.c b/ffmpeg/libavcodec/ppc/gmc_altivec.c
index 4db761d..45d850a 100644
--- a/ffmpeg/libavcodec/ppc/gmc_altivec.c
+++ b/ffmpeg/libavcodec/ppc/gmc_altivec.c
@@ -66,7 +66,7 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int
srcvA = vec_perm(src_0, src_1, vec_lvsl(0, src));
if (src_really_odd != 0x0000000F) {
- // if src & 0xF == 0xF, then (src+1) is properly aligned
+ // if (src & 0xF) == 0xF, then (src+1) is properly aligned
// on the second vector.
srcvB = vec_perm(src_0, src_1, vec_lvsl(1, src));
} else {
@@ -90,7 +90,7 @@ void ff_gmc1_altivec(uint8_t *dst /* align 8 */, uint8_t *src /* align1 */, int
srcvC = vec_perm(src_0, src_1, vec_lvsl(stride + 0, src));
if (src_really_odd != 0x0000000F) {
- // if src & 0xF == 0xF, then (src+1) is properly aligned
+ // if (src & 0xF) == 0xF, then (src+1) is properly aligned
// on the second vector.
srcvD = vec_perm(src_0, src_1, vec_lvsl(stride + 1, src));
} else {
diff --git a/ffmpeg/libavcodec/ppc/h264_altivec.c b/ffmpeg/libavcodec/ppc/h264_altivec.c
deleted file mode 100644
index 3c2bb4d..0000000
--- a/ffmpeg/libavcodec/ppc/h264_altivec.c
+++ /dev/null
@@ -1,748 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/attributes.h"
-#include "libavutil/cpu.h"
-#include "libavutil/intreadwrite.h"
-#include "libavutil/ppc/types_altivec.h"
-#include "libavutil/ppc/util_altivec.h"
-#include "libavcodec/h264data.h"
-#include "libavcodec/h264dsp.h"
-
-/****************************************************************************
- * IDCT transform:
- ****************************************************************************/
-
-#define VEC_1D_DCT(vb0,vb1,vb2,vb3,va0,va1,va2,va3) \
- /* 1st stage */ \
- vz0 = vec_add(vb0,vb2); /* temp[0] = Y[0] + Y[2] */ \
- vz1 = vec_sub(vb0,vb2); /* temp[1] = Y[0] - Y[2] */ \
- vz2 = vec_sra(vb1,vec_splat_u16(1)); \
- vz2 = vec_sub(vz2,vb3); /* temp[2] = Y[1].1/2 - Y[3] */ \
- vz3 = vec_sra(vb3,vec_splat_u16(1)); \
- vz3 = vec_add(vb1,vz3); /* temp[3] = Y[1] + Y[3].1/2 */ \
- /* 2nd stage: output */ \
- va0 = vec_add(vz0,vz3); /* x[0] = temp[0] + temp[3] */ \
- va1 = vec_add(vz1,vz2); /* x[1] = temp[1] + temp[2] */ \
- va2 = vec_sub(vz1,vz2); /* x[2] = temp[1] - temp[2] */ \
- va3 = vec_sub(vz0,vz3) /* x[3] = temp[0] - temp[3] */
-
-#define VEC_TRANSPOSE_4(a0,a1,a2,a3,b0,b1,b2,b3) \
- b0 = vec_mergeh( a0, a0 ); \
- b1 = vec_mergeh( a1, a0 ); \
- b2 = vec_mergeh( a2, a0 ); \
- b3 = vec_mergeh( a3, a0 ); \
- a0 = vec_mergeh( b0, b2 ); \
- a1 = vec_mergel( b0, b2 ); \
- a2 = vec_mergeh( b1, b3 ); \
- a3 = vec_mergel( b1, b3 ); \
- b0 = vec_mergeh( a0, a2 ); \
- b1 = vec_mergel( a0, a2 ); \
- b2 = vec_mergeh( a1, a3 ); \
- b3 = vec_mergel( a1, a3 )
-
-#define VEC_LOAD_U8_ADD_S16_STORE_U8(va) \
- vdst_orig = vec_ld(0, dst); \
- vdst = vec_perm(vdst_orig, zero_u8v, vdst_mask); \
- vdst_ss = (vec_s16) vec_mergeh(zero_u8v, vdst); \
- va = vec_add(va, vdst_ss); \
- va_u8 = vec_packsu(va, zero_s16v); \
- va_u32 = vec_splat((vec_u32)va_u8, 0); \
- vec_ste(va_u32, element, (uint32_t*)dst);
-
-static void ff_h264_idct_add_altivec(uint8_t *dst, int16_t *block, int stride)
-{
- vec_s16 va0, va1, va2, va3;
- vec_s16 vz0, vz1, vz2, vz3;
- vec_s16 vtmp0, vtmp1, vtmp2, vtmp3;
- vec_u8 va_u8;
- vec_u32 va_u32;
- vec_s16 vdst_ss;
- const vec_u16 v6us = vec_splat_u16(6);
- vec_u8 vdst, vdst_orig;
- vec_u8 vdst_mask = vec_lvsl(0, dst);
- int element = ((unsigned long)dst & 0xf) >> 2;
- LOAD_ZERO;
-
- block[0] += 32; /* add 32 as a DC-level for rounding */
-
- vtmp0 = vec_ld(0,block);
- vtmp1 = vec_sld(vtmp0, vtmp0, 8);
- vtmp2 = vec_ld(16,block);
- vtmp3 = vec_sld(vtmp2, vtmp2, 8);
- memset(block, 0, 16 * sizeof(int16_t));
-
- VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
- VEC_TRANSPOSE_4(va0,va1,va2,va3,vtmp0,vtmp1,vtmp2,vtmp3);
- VEC_1D_DCT(vtmp0,vtmp1,vtmp2,vtmp3,va0,va1,va2,va3);
-
- va0 = vec_sra(va0,v6us);
- va1 = vec_sra(va1,v6us);
- va2 = vec_sra(va2,v6us);
- va3 = vec_sra(va3,v6us);
-
- VEC_LOAD_U8_ADD_S16_STORE_U8(va0);
- dst += stride;
- VEC_LOAD_U8_ADD_S16_STORE_U8(va1);
- dst += stride;
- VEC_LOAD_U8_ADD_S16_STORE_U8(va2);
- dst += stride;
- VEC_LOAD_U8_ADD_S16_STORE_U8(va3);
-}
-
-#define IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7, d0, d1, d2, d3, d4, d5, d6, d7) {\
- /* a0 = SRC(0) + SRC(4); */ \
- vec_s16 a0v = vec_add(s0, s4); \
- /* a2 = SRC(0) - SRC(4); */ \
- vec_s16 a2v = vec_sub(s0, s4); \
- /* a4 = (SRC(2)>>1) - SRC(6); */ \
- vec_s16 a4v = vec_sub(vec_sra(s2, onev), s6); \
- /* a6 = (SRC(6)>>1) + SRC(2); */ \
- vec_s16 a6v = vec_add(vec_sra(s6, onev), s2); \
- /* b0 = a0 + a6; */ \
- vec_s16 b0v = vec_add(a0v, a6v); \
- /* b2 = a2 + a4; */ \
- vec_s16 b2v = vec_add(a2v, a4v); \
- /* b4 = a2 - a4; */ \
- vec_s16 b4v = vec_sub(a2v, a4v); \
- /* b6 = a0 - a6; */ \
- vec_s16 b6v = vec_sub(a0v, a6v); \
- /* a1 = SRC(5) - SRC(3) - SRC(7) - (SRC(7)>>1); */ \
- /* a1 = (SRC(5)-SRC(3)) - (SRC(7) + (SRC(7)>>1)); */ \
- vec_s16 a1v = vec_sub( vec_sub(s5, s3), vec_add(s7, vec_sra(s7, onev)) ); \
- /* a3 = SRC(7) + SRC(1) - SRC(3) - (SRC(3)>>1); */ \
- /* a3 = (SRC(7)+SRC(1)) - (SRC(3) + (SRC(3)>>1)); */ \
- vec_s16 a3v = vec_sub( vec_add(s7, s1), vec_add(s3, vec_sra(s3, onev)) );\
- /* a5 = SRC(7) - SRC(1) + SRC(5) + (SRC(5)>>1); */ \
- /* a5 = (SRC(7)-SRC(1)) + SRC(5) + (SRC(5)>>1); */ \
- vec_s16 a5v = vec_add( vec_sub(s7, s1), vec_add(s5, vec_sra(s5, onev)) );\
- /* a7 = SRC(5)+SRC(3) + SRC(1) + (SRC(1)>>1); */ \
- vec_s16 a7v = vec_add( vec_add(s5, s3), vec_add(s1, vec_sra(s1, onev)) );\
- /* b1 = (a7>>2) + a1; */ \
- vec_s16 b1v = vec_add( vec_sra(a7v, twov), a1v); \
- /* b3 = a3 + (a5>>2); */ \
- vec_s16 b3v = vec_add(a3v, vec_sra(a5v, twov)); \
- /* b5 = (a3>>2) - a5; */ \
- vec_s16 b5v = vec_sub( vec_sra(a3v, twov), a5v); \
- /* b7 = a7 - (a1>>2); */ \
- vec_s16 b7v = vec_sub( a7v, vec_sra(a1v, twov)); \
- /* DST(0, b0 + b7); */ \
- d0 = vec_add(b0v, b7v); \
- /* DST(1, b2 + b5); */ \
- d1 = vec_add(b2v, b5v); \
- /* DST(2, b4 + b3); */ \
- d2 = vec_add(b4v, b3v); \
- /* DST(3, b6 + b1); */ \
- d3 = vec_add(b6v, b1v); \
- /* DST(4, b6 - b1); */ \
- d4 = vec_sub(b6v, b1v); \
- /* DST(5, b4 - b3); */ \
- d5 = vec_sub(b4v, b3v); \
- /* DST(6, b2 - b5); */ \
- d6 = vec_sub(b2v, b5v); \
- /* DST(7, b0 - b7); */ \
- d7 = vec_sub(b0v, b7v); \
-}
-
-#define ALTIVEC_STORE_SUM_CLIP(dest, idctv, perm_ldv, perm_stv, sel) { \
- /* unaligned load */ \
- vec_u8 hv = vec_ld( 0, dest ); \
- vec_u8 lv = vec_ld( 7, dest ); \
- vec_u8 dstv = vec_perm( hv, lv, (vec_u8)perm_ldv ); \
- vec_s16 idct_sh6 = vec_sra(idctv, sixv); \
- vec_u16 dst16 = (vec_u16)vec_mergeh(zero_u8v, dstv); \
- vec_s16 idstsum = vec_adds(idct_sh6, (vec_s16)dst16); \
- vec_u8 idstsum8 = vec_packsu(zero_s16v, idstsum); \
- vec_u8 edgehv; \
- /* unaligned store */ \
- vec_u8 bodyv = vec_perm( idstsum8, idstsum8, perm_stv );\
- vec_u8 edgelv = vec_perm( sel, zero_u8v, perm_stv ); \
- lv = vec_sel( lv, bodyv, edgelv ); \
- vec_st( lv, 7, dest ); \
- hv = vec_ld( 0, dest ); \
- edgehv = vec_perm( zero_u8v, sel, perm_stv ); \
- hv = vec_sel( hv, bodyv, edgehv ); \
- vec_st( hv, 0, dest ); \
- }
-
-static void ff_h264_idct8_add_altivec( uint8_t *dst, int16_t *dct, int stride ) {
- vec_s16 s0, s1, s2, s3, s4, s5, s6, s7;
- vec_s16 d0, d1, d2, d3, d4, d5, d6, d7;
- vec_s16 idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7;
-
- vec_u8 perm_ldv = vec_lvsl(0, dst);
- vec_u8 perm_stv = vec_lvsr(8, dst);
-
- const vec_u16 onev = vec_splat_u16(1);
- const vec_u16 twov = vec_splat_u16(2);
- const vec_u16 sixv = vec_splat_u16(6);
-
- const vec_u8 sel = (vec_u8) {0,0,0,0,0,0,0,0,-1,-1,-1,-1,-1,-1,-1,-1};
- LOAD_ZERO;
-
- dct[0] += 32; // rounding for the >>6 at the end
-
- s0 = vec_ld(0x00, (int16_t*)dct);
- s1 = vec_ld(0x10, (int16_t*)dct);
- s2 = vec_ld(0x20, (int16_t*)dct);
- s3 = vec_ld(0x30, (int16_t*)dct);
- s4 = vec_ld(0x40, (int16_t*)dct);
- s5 = vec_ld(0x50, (int16_t*)dct);
- s6 = vec_ld(0x60, (int16_t*)dct);
- s7 = vec_ld(0x70, (int16_t*)dct);
- memset(dct, 0, 64 * sizeof(int16_t));
-
- IDCT8_1D_ALTIVEC(s0, s1, s2, s3, s4, s5, s6, s7,
- d0, d1, d2, d3, d4, d5, d6, d7);
-
- TRANSPOSE8( d0, d1, d2, d3, d4, d5, d6, d7 );
-
- IDCT8_1D_ALTIVEC(d0, d1, d2, d3, d4, d5, d6, d7,
- idct0, idct1, idct2, idct3, idct4, idct5, idct6, idct7);
-
- ALTIVEC_STORE_SUM_CLIP(&dst[0*stride], idct0, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[1*stride], idct1, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[2*stride], idct2, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[3*stride], idct3, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[4*stride], idct4, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[5*stride], idct5, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[6*stride], idct6, perm_ldv, perm_stv, sel);
- ALTIVEC_STORE_SUM_CLIP(&dst[7*stride], idct7, perm_ldv, perm_stv, sel);
-}
-
-static av_always_inline void h264_idct_dc_add_internal(uint8_t *dst, int16_t *block, int stride, int size)
-{
- vec_s16 dc16;
- vec_u8 dcplus, dcminus, v0, v1, v2, v3, aligner;
- LOAD_ZERO;
- DECLARE_ALIGNED(16, int, dc);
- int i;
-
- dc = (block[0] + 32) >> 6;
- block[0] = 0;
- dc16 = vec_splat((vec_s16) vec_lde(0, &dc), 1);
-
- if (size == 4)
- dc16 = vec_sld(dc16, zero_s16v, 8);
- dcplus = vec_packsu(dc16, zero_s16v);
- dcminus = vec_packsu(vec_sub(zero_s16v, dc16), zero_s16v);
-
- aligner = vec_lvsr(0, dst);
- dcplus = vec_perm(dcplus, dcplus, aligner);
- dcminus = vec_perm(dcminus, dcminus, aligner);
-
- for (i = 0; i < size; i += 4) {
- v0 = vec_ld(0, dst+0*stride);
- v1 = vec_ld(0, dst+1*stride);
- v2 = vec_ld(0, dst+2*stride);
- v3 = vec_ld(0, dst+3*stride);
-
- v0 = vec_adds(v0, dcplus);
- v1 = vec_adds(v1, dcplus);
- v2 = vec_adds(v2, dcplus);
- v3 = vec_adds(v3, dcplus);
-
- v0 = vec_subs(v0, dcminus);
- v1 = vec_subs(v1, dcminus);
- v2 = vec_subs(v2, dcminus);
- v3 = vec_subs(v3, dcminus);
-
- vec_st(v0, 0, dst+0*stride);
- vec_st(v1, 0, dst+1*stride);
- vec_st(v2, 0, dst+2*stride);
- vec_st(v3, 0, dst+3*stride);
-
- dst += 4*stride;
- }
-}
-
-static void h264_idct_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
-{
- h264_idct_dc_add_internal(dst, block, stride, 4);
-}
-
-static void ff_h264_idct8_dc_add_altivec(uint8_t *dst, int16_t *block, int stride)
-{
- h264_idct_dc_add_internal(dst, block, stride, 8);
-}
-
-static void ff_h264_idct_add16_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){
- int i;
- for(i=0; i<16; i++){
- int nnz = nnzc[ scan8[i] ];
- if(nnz){
- if(nnz==1 && block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
- else ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
- }
- }
-}
-
-static void ff_h264_idct_add16intra_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){
- int i;
- for(i=0; i<16; i++){
- if(nnzc[ scan8[i] ]) ff_h264_idct_add_altivec(dst + block_offset[i], block + i*16, stride);
- else if(block[i*16]) h264_idct_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
- }
-}
-
-static void ff_h264_idct8_add4_altivec(uint8_t *dst, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){
- int i;
- for(i=0; i<16; i+=4){
- int nnz = nnzc[ scan8[i] ];
- if(nnz){
- if(nnz==1 && block[i*16]) ff_h264_idct8_dc_add_altivec(dst + block_offset[i], block + i*16, stride);
- else ff_h264_idct8_add_altivec (dst + block_offset[i], block + i*16, stride);
- }
- }
-}
-
-static void ff_h264_idct_add8_altivec(uint8_t **dest, const int *block_offset, int16_t *block, int stride, const uint8_t nnzc[15*8]){
- int i, j;
- for (j = 1; j < 3; j++) {
- for(i = j * 16; i < j * 16 + 4; i++){
- if(nnzc[ scan8[i] ])
- ff_h264_idct_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
- else if(block[i*16])
- h264_idct_dc_add_altivec(dest[j-1] + block_offset[i], block + i*16, stride);
- }
- }
-}
-
-#define transpose4x16(r0, r1, r2, r3) { \
- register vec_u8 r4; \
- register vec_u8 r5; \
- register vec_u8 r6; \
- register vec_u8 r7; \
- \
- r4 = vec_mergeh(r0, r2); /*0, 2 set 0*/ \
- r5 = vec_mergel(r0, r2); /*0, 2 set 1*/ \
- r6 = vec_mergeh(r1, r3); /*1, 3 set 0*/ \
- r7 = vec_mergel(r1, r3); /*1, 3 set 1*/ \
- \
- r0 = vec_mergeh(r4, r6); /*all set 0*/ \
- r1 = vec_mergel(r4, r6); /*all set 1*/ \
- r2 = vec_mergeh(r5, r7); /*all set 2*/ \
- r3 = vec_mergel(r5, r7); /*all set 3*/ \
-}
-
-static inline void write16x4(uint8_t *dst, int dst_stride,
- register vec_u8 r0, register vec_u8 r1,
- register vec_u8 r2, register vec_u8 r3) {
- DECLARE_ALIGNED(16, unsigned char, result)[64];
- uint32_t *src_int = (uint32_t *)result, *dst_int = (uint32_t *)dst;
- int int_dst_stride = dst_stride/4;
-
- vec_st(r0, 0, result);
- vec_st(r1, 16, result);
- vec_st(r2, 32, result);
- vec_st(r3, 48, result);
- /* FIXME: there has to be a better way!!!! */
- *dst_int = *src_int;
- *(dst_int+ int_dst_stride) = *(src_int + 1);
- *(dst_int+ 2*int_dst_stride) = *(src_int + 2);
- *(dst_int+ 3*int_dst_stride) = *(src_int + 3);
- *(dst_int+ 4*int_dst_stride) = *(src_int + 4);
- *(dst_int+ 5*int_dst_stride) = *(src_int + 5);
- *(dst_int+ 6*int_dst_stride) = *(src_int + 6);
- *(dst_int+ 7*int_dst_stride) = *(src_int + 7);
- *(dst_int+ 8*int_dst_stride) = *(src_int + 8);
- *(dst_int+ 9*int_dst_stride) = *(src_int + 9);
- *(dst_int+10*int_dst_stride) = *(src_int + 10);
- *(dst_int+11*int_dst_stride) = *(src_int + 11);
- *(dst_int+12*int_dst_stride) = *(src_int + 12);
- *(dst_int+13*int_dst_stride) = *(src_int + 13);
- *(dst_int+14*int_dst_stride) = *(src_int + 14);
- *(dst_int+15*int_dst_stride) = *(src_int + 15);
-}
-
-/** @brief performs a 6x16 transpose of data in src, and stores it to dst
- @todo FIXME: see if we can't spare some vec_lvsl() by them factorizing
- out of unaligned_load() */
-#define readAndTranspose16x6(src, src_stride, r8, r9, r10, r11, r12, r13) {\
- register vec_u8 r0 = unaligned_load(0, src); \
- register vec_u8 r1 = unaligned_load( src_stride, src); \
- register vec_u8 r2 = unaligned_load(2* src_stride, src); \
- register vec_u8 r3 = unaligned_load(3* src_stride, src); \
- register vec_u8 r4 = unaligned_load(4* src_stride, src); \
- register vec_u8 r5 = unaligned_load(5* src_stride, src); \
- register vec_u8 r6 = unaligned_load(6* src_stride, src); \
- register vec_u8 r7 = unaligned_load(7* src_stride, src); \
- register vec_u8 r14 = unaligned_load(14*src_stride, src); \
- register vec_u8 r15 = unaligned_load(15*src_stride, src); \
- \
- r8 = unaligned_load( 8*src_stride, src); \
- r9 = unaligned_load( 9*src_stride, src); \
- r10 = unaligned_load(10*src_stride, src); \
- r11 = unaligned_load(11*src_stride, src); \
- r12 = unaligned_load(12*src_stride, src); \
- r13 = unaligned_load(13*src_stride, src); \
- \
- /*Merge first pairs*/ \
- r0 = vec_mergeh(r0, r8); /*0, 8*/ \
- r1 = vec_mergeh(r1, r9); /*1, 9*/ \
- r2 = vec_mergeh(r2, r10); /*2,10*/ \
- r3 = vec_mergeh(r3, r11); /*3,11*/ \
- r4 = vec_mergeh(r4, r12); /*4,12*/ \
- r5 = vec_mergeh(r5, r13); /*5,13*/ \
- r6 = vec_mergeh(r6, r14); /*6,14*/ \
- r7 = vec_mergeh(r7, r15); /*7,15*/ \
- \
- /*Merge second pairs*/ \
- r8 = vec_mergeh(r0, r4); /*0,4, 8,12 set 0*/ \
- r9 = vec_mergel(r0, r4); /*0,4, 8,12 set 1*/ \
- r10 = vec_mergeh(r1, r5); /*1,5, 9,13 set 0*/ \
- r11 = vec_mergel(r1, r5); /*1,5, 9,13 set 1*/ \
- r12 = vec_mergeh(r2, r6); /*2,6,10,14 set 0*/ \
- r13 = vec_mergel(r2, r6); /*2,6,10,14 set 1*/ \
- r14 = vec_mergeh(r3, r7); /*3,7,11,15 set 0*/ \
- r15 = vec_mergel(r3, r7); /*3,7,11,15 set 1*/ \
- \
- /*Third merge*/ \
- r0 = vec_mergeh(r8, r12); /*0,2,4,6,8,10,12,14 set 0*/ \
- r1 = vec_mergel(r8, r12); /*0,2,4,6,8,10,12,14 set 1*/ \
- r2 = vec_mergeh(r9, r13); /*0,2,4,6,8,10,12,14 set 2*/ \
- r4 = vec_mergeh(r10, r14); /*1,3,5,7,9,11,13,15 set 0*/ \
- r5 = vec_mergel(r10, r14); /*1,3,5,7,9,11,13,15 set 1*/ \
- r6 = vec_mergeh(r11, r15); /*1,3,5,7,9,11,13,15 set 2*/ \
- /* Don't need to compute 3 and 7*/ \
- \
- /*Final merge*/ \
- r8 = vec_mergeh(r0, r4); /*all set 0*/ \
- r9 = vec_mergel(r0, r4); /*all set 1*/ \
- r10 = vec_mergeh(r1, r5); /*all set 2*/ \
- r11 = vec_mergel(r1, r5); /*all set 3*/ \
- r12 = vec_mergeh(r2, r6); /*all set 4*/ \
- r13 = vec_mergel(r2, r6); /*all set 5*/ \
- /* Don't need to compute 14 and 15*/ \
- \
-}
-
-// out: o = |x-y| < a
-static inline vec_u8 diff_lt_altivec ( register vec_u8 x,
- register vec_u8 y,
- register vec_u8 a) {
-
- register vec_u8 diff = vec_subs(x, y);
- register vec_u8 diffneg = vec_subs(y, x);
- register vec_u8 o = vec_or(diff, diffneg); /* |x-y| */
- o = (vec_u8)vec_cmplt(o, a);
- return o;
-}
-
-static inline vec_u8 h264_deblock_mask ( register vec_u8 p0,
- register vec_u8 p1,
- register vec_u8 q0,
- register vec_u8 q1,
- register vec_u8 alpha,
- register vec_u8 beta) {
-
- register vec_u8 mask;
- register vec_u8 tempmask;
-
- mask = diff_lt_altivec(p0, q0, alpha);
- tempmask = diff_lt_altivec(p1, p0, beta);
- mask = vec_and(mask, tempmask);
- tempmask = diff_lt_altivec(q1, q0, beta);
- mask = vec_and(mask, tempmask);
-
- return mask;
-}
-
-// out: newp1 = clip((p2 + ((p0 + q0 + 1) >> 1)) >> 1, p1-tc0, p1+tc0)
-static inline vec_u8 h264_deblock_q1(register vec_u8 p0,
- register vec_u8 p1,
- register vec_u8 p2,
- register vec_u8 q0,
- register vec_u8 tc0) {
-
- register vec_u8 average = vec_avg(p0, q0);
- register vec_u8 temp;
- register vec_u8 uncliped;
- register vec_u8 ones;
- register vec_u8 max;
- register vec_u8 min;
- register vec_u8 newp1;
-
- temp = vec_xor(average, p2);
- average = vec_avg(average, p2); /*avg(p2, avg(p0, q0)) */
- ones = vec_splat_u8(1);
- temp = vec_and(temp, ones); /*(p2^avg(p0, q0)) & 1 */
- uncliped = vec_subs(average, temp); /*(p2+((p0+q0+1)>>1))>>1 */
- max = vec_adds(p1, tc0);
- min = vec_subs(p1, tc0);
- newp1 = vec_max(min, uncliped);
- newp1 = vec_min(max, newp1);
- return newp1;
-}
-
-#define h264_deblock_p0_q0(p0, p1, q0, q1, tc0masked) { \
- \
- const vec_u8 A0v = vec_sl(vec_splat_u8(10), vec_splat_u8(4)); \
- \
- register vec_u8 pq0bit = vec_xor(p0,q0); \
- register vec_u8 q1minus; \
- register vec_u8 p0minus; \
- register vec_u8 stage1; \
- register vec_u8 stage2; \
- register vec_u8 vec160; \
- register vec_u8 delta; \
- register vec_u8 deltaneg; \
- \
- q1minus = vec_nor(q1, q1); /* 255 - q1 */ \
- stage1 = vec_avg(p1, q1minus); /* (p1 - q1 + 256)>>1 */ \
- stage2 = vec_sr(stage1, vec_splat_u8(1)); /* (p1 - q1 + 256)>>2 = 64 + (p1 - q1) >> 2 */ \
- p0minus = vec_nor(p0, p0); /* 255 - p0 */ \
- stage1 = vec_avg(q0, p0minus); /* (q0 - p0 + 256)>>1 */ \
- pq0bit = vec_and(pq0bit, vec_splat_u8(1)); \
- stage2 = vec_avg(stage2, pq0bit); /* 32 + ((q0 - p0)&1 + (p1 - q1) >> 2 + 1) >> 1 */ \
- stage2 = vec_adds(stage2, stage1); /* 160 + ((p0 - q0) + (p1 - q1) >> 2 + 1) >> 1 */ \
- vec160 = vec_ld(0, &A0v); \
- deltaneg = vec_subs(vec160, stage2); /* -d */ \
- delta = vec_subs(stage2, vec160); /* d */ \
- deltaneg = vec_min(tc0masked, deltaneg); \
- delta = vec_min(tc0masked, delta); \
- p0 = vec_subs(p0, deltaneg); \
- q0 = vec_subs(q0, delta); \
- p0 = vec_adds(p0, delta); \
- q0 = vec_adds(q0, deltaneg); \
-}
-
-#define h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0) { \
- DECLARE_ALIGNED(16, unsigned char, temp)[16]; \
- register vec_u8 alphavec; \
- register vec_u8 betavec; \
- register vec_u8 mask; \
- register vec_u8 p1mask; \
- register vec_u8 q1mask; \
- register vector signed char tc0vec; \
- register vec_u8 finaltc0; \
- register vec_u8 tc0masked; \
- register vec_u8 newp1; \
- register vec_u8 newq1; \
- \
- temp[0] = alpha; \
- temp[1] = beta; \
- alphavec = vec_ld(0, temp); \
- betavec = vec_splat(alphavec, 0x1); \
- alphavec = vec_splat(alphavec, 0x0); \
- mask = h264_deblock_mask(p0, p1, q0, q1, alphavec, betavec); /*if in block */ \
- \
- AV_COPY32(temp, tc0); \
- tc0vec = vec_ld(0, (signed char*)temp); \
- tc0vec = vec_mergeh(tc0vec, tc0vec); \
- tc0vec = vec_mergeh(tc0vec, tc0vec); \
- mask = vec_and(mask, vec_cmpgt(tc0vec, vec_splat_s8(-1))); /* if tc0[i] >= 0 */ \
- finaltc0 = vec_and((vec_u8)tc0vec, mask); /* tc = tc0 */ \
- \
- p1mask = diff_lt_altivec(p2, p0, betavec); \
- p1mask = vec_and(p1mask, mask); /* if ( |p2 - p0| < beta) */ \
- tc0masked = vec_and(p1mask, (vec_u8)tc0vec); \
- finaltc0 = vec_sub(finaltc0, p1mask); /* tc++ */ \
- newp1 = h264_deblock_q1(p0, p1, p2, q0, tc0masked); \
- /*end if*/ \
- \
- q1mask = diff_lt_altivec(q2, q0, betavec); \
- q1mask = vec_and(q1mask, mask); /* if ( |q2 - q0| < beta ) */\
- tc0masked = vec_and(q1mask, (vec_u8)tc0vec); \
- finaltc0 = vec_sub(finaltc0, q1mask); /* tc++ */ \
- newq1 = h264_deblock_q1(p0, q1, q2, q0, tc0masked); \
- /*end if*/ \
- \
- h264_deblock_p0_q0(p0, p1, q0, q1, finaltc0); \
- p1 = newp1; \
- q1 = newq1; \
-}
-
-static void h264_v_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
-
- if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) >= 0) {
- register vec_u8 p2 = vec_ld(-3*stride, pix);
- register vec_u8 p1 = vec_ld(-2*stride, pix);
- register vec_u8 p0 = vec_ld(-1*stride, pix);
- register vec_u8 q0 = vec_ld(0, pix);
- register vec_u8 q1 = vec_ld(stride, pix);
- register vec_u8 q2 = vec_ld(2*stride, pix);
- h264_loop_filter_luma_altivec(p2, p1, p0, q0, q1, q2, alpha, beta, tc0);
- vec_st(p1, -2*stride, pix);
- vec_st(p0, -1*stride, pix);
- vec_st(q0, 0, pix);
- vec_st(q1, stride, pix);
- }
-}
-
-static void h264_h_loop_filter_luma_altivec(uint8_t *pix, int stride, int alpha, int beta, int8_t *tc0) {
-
- register vec_u8 line0, line1, line2, line3, line4, line5;
- if ((tc0[0] & tc0[1] & tc0[2] & tc0[3]) < 0)
- return;
- readAndTranspose16x6(pix-3, stride, line0, line1, line2, line3, line4, line5);
- h264_loop_filter_luma_altivec(line0, line1, line2, line3, line4, line5, alpha, beta, tc0);
- transpose4x16(line1, line2, line3, line4);
- write16x4(pix-2, stride, line1, line2, line3, line4);
-}
-
-static av_always_inline
-void weight_h264_W_altivec(uint8_t *block, int stride, int height,
- int log2_denom, int weight, int offset, int w)
-{
- int y, aligned;
- vec_u8 vblock;
- vec_s16 vtemp, vweight, voffset, v0, v1;
- vec_u16 vlog2_denom;
- DECLARE_ALIGNED(16, int32_t, temp)[4];
- LOAD_ZERO;
-
- offset <<= log2_denom;
- if(log2_denom) offset += 1<<(log2_denom-1);
- temp[0] = log2_denom;
- temp[1] = weight;
- temp[2] = offset;
-
- vtemp = (vec_s16)vec_ld(0, temp);
- vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
- vweight = vec_splat(vtemp, 3);
- voffset = vec_splat(vtemp, 5);
- aligned = !((unsigned long)block & 0xf);
-
- for (y = 0; y < height; y++) {
- vblock = vec_ld(0, block);
-
- v0 = (vec_s16)vec_mergeh(zero_u8v, vblock);
- v1 = (vec_s16)vec_mergel(zero_u8v, vblock);
-
- if (w == 16 || aligned) {
- v0 = vec_mladd(v0, vweight, zero_s16v);
- v0 = vec_adds(v0, voffset);
- v0 = vec_sra(v0, vlog2_denom);
- }
- if (w == 16 || !aligned) {
- v1 = vec_mladd(v1, vweight, zero_s16v);
- v1 = vec_adds(v1, voffset);
- v1 = vec_sra(v1, vlog2_denom);
- }
- vblock = vec_packsu(v0, v1);
- vec_st(vblock, 0, block);
-
- block += stride;
- }
-}
-
-static av_always_inline
-void biweight_h264_W_altivec(uint8_t *dst, uint8_t *src, int stride, int height,
- int log2_denom, int weightd, int weights, int offset, int w)
-{
- int y, dst_aligned, src_aligned;
- vec_u8 vsrc, vdst;
- vec_s16 vtemp, vweights, vweightd, voffset, v0, v1, v2, v3;
- vec_u16 vlog2_denom;
- DECLARE_ALIGNED(16, int32_t, temp)[4];
- LOAD_ZERO;
-
- offset = ((offset + 1) | 1) << log2_denom;
- temp[0] = log2_denom+1;
- temp[1] = weights;
- temp[2] = weightd;
- temp[3] = offset;
-
- vtemp = (vec_s16)vec_ld(0, temp);
- vlog2_denom = (vec_u16)vec_splat(vtemp, 1);
- vweights = vec_splat(vtemp, 3);
- vweightd = vec_splat(vtemp, 5);
- voffset = vec_splat(vtemp, 7);
- dst_aligned = !((unsigned long)dst & 0xf);
- src_aligned = !((unsigned long)src & 0xf);
-
- for (y = 0; y < height; y++) {
- vdst = vec_ld(0, dst);
- vsrc = vec_ld(0, src);
-
- v0 = (vec_s16)vec_mergeh(zero_u8v, vdst);
- v1 = (vec_s16)vec_mergel(zero_u8v, vdst);
- v2 = (vec_s16)vec_mergeh(zero_u8v, vsrc);
- v3 = (vec_s16)vec_mergel(zero_u8v, vsrc);
-
- if (w == 8) {
- if (src_aligned)
- v3 = v2;
- else
- v2 = v3;
- }
-
- if (w == 16 || dst_aligned) {
- v0 = vec_mladd(v0, vweightd, zero_s16v);
- v2 = vec_mladd(v2, vweights, zero_s16v);
-
- v0 = vec_adds(v0, voffset);
- v0 = vec_adds(v0, v2);
- v0 = vec_sra(v0, vlog2_denom);
- }
- if (w == 16 || !dst_aligned) {
- v1 = vec_mladd(v1, vweightd, zero_s16v);
- v3 = vec_mladd(v3, vweights, zero_s16v);
-
- v1 = vec_adds(v1, voffset);
- v1 = vec_adds(v1, v3);
- v1 = vec_sra(v1, vlog2_denom);
- }
- vdst = vec_packsu(v0, v1);
- vec_st(vdst, 0, dst);
-
- dst += stride;
- src += stride;
- }
-}
-
-#define H264_WEIGHT(W) \
-static void ff_weight_h264_pixels ## W ## _altivec(uint8_t *block, int stride, int height, \
- int log2_denom, int weight, int offset){ \
- weight_h264_W_altivec(block, stride, height, log2_denom, weight, offset, W); \
-}\
-static void ff_biweight_h264_pixels ## W ## _altivec(uint8_t *dst, uint8_t *src, int stride, int height, \
- int log2_denom, int weightd, int weights, int offset){ \
- biweight_h264_W_altivec(dst, src, stride, height, log2_denom, weightd, weights, offset, W); \
-}
-
-H264_WEIGHT(16)
-H264_WEIGHT( 8)
-
-av_cold void ff_h264dsp_init_ppc(H264DSPContext *c, const int bit_depth,
- const int chroma_format_idc)
-{
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- if (bit_depth == 8) {
- c->h264_idct_add = ff_h264_idct_add_altivec;
- if (chroma_format_idc == 1)
- c->h264_idct_add8 = ff_h264_idct_add8_altivec;
- c->h264_idct_add16 = ff_h264_idct_add16_altivec;
- c->h264_idct_add16intra = ff_h264_idct_add16intra_altivec;
- c->h264_idct_dc_add= h264_idct_dc_add_altivec;
- c->h264_idct8_dc_add = ff_h264_idct8_dc_add_altivec;
- c->h264_idct8_add = ff_h264_idct8_add_altivec;
- c->h264_idct8_add4 = ff_h264_idct8_add4_altivec;
- c->h264_v_loop_filter_luma= h264_v_loop_filter_luma_altivec;
- c->h264_h_loop_filter_luma= h264_h_loop_filter_luma_altivec;
-
- c->weight_h264_pixels_tab[0] = ff_weight_h264_pixels16_altivec;
- c->weight_h264_pixels_tab[1] = ff_weight_h264_pixels8_altivec;
- c->biweight_h264_pixels_tab[0] = ff_biweight_h264_pixels16_altivec;
- c->biweight_h264_pixels_tab[1] = ff_biweight_h264_pixels8_altivec;
- }
- }
-}
diff --git a/ffmpeg/libavcodec/ppc/h264_qpel.c b/ffmpeg/libavcodec/ppc/h264_qpel.c
deleted file mode 100644
index 429ae42..0000000
--- a/ffmpeg/libavcodec/ppc/h264_qpel.c
+++ /dev/null
@@ -1,317 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "config.h"
-#include "libavutil/attributes.h"
-#include "libavcodec/h264qpel.h"
-
-#if HAVE_ALTIVEC
-#include "libavutil/cpu.h"
-#include "libavutil/intreadwrite.h"
-#include "libavutil/ppc/types_altivec.h"
-#include "libavutil/ppc/util_altivec.h"
-#include "dsputil_altivec.h"
-
-#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
-#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
-
-#define OP_U8_ALTIVEC PUT_OP_U8_ALTIVEC
-#define PREFIX_h264_qpel16_h_lowpass_altivec put_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num altivec_put_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec put_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num altivec_put_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec put_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num altivec_put_h264_qpel16_hv_lowpass_num
-#include "h264_qpel_template.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define OP_U8_ALTIVEC AVG_OP_U8_ALTIVEC
-#define PREFIX_h264_qpel16_h_lowpass_altivec avg_h264_qpel16_h_lowpass_altivec
-#define PREFIX_h264_qpel16_h_lowpass_num altivec_avg_h264_qpel16_h_lowpass_num
-#define PREFIX_h264_qpel16_v_lowpass_altivec avg_h264_qpel16_v_lowpass_altivec
-#define PREFIX_h264_qpel16_v_lowpass_num altivec_avg_h264_qpel16_v_lowpass_num
-#define PREFIX_h264_qpel16_hv_lowpass_altivec avg_h264_qpel16_hv_lowpass_altivec
-#define PREFIX_h264_qpel16_hv_lowpass_num altivec_avg_h264_qpel16_hv_lowpass_num
-#include "h264_qpel_template.c"
-#undef OP_U8_ALTIVEC
-#undef PREFIX_h264_qpel16_h_lowpass_altivec
-#undef PREFIX_h264_qpel16_h_lowpass_num
-#undef PREFIX_h264_qpel16_v_lowpass_altivec
-#undef PREFIX_h264_qpel16_v_lowpass_num
-#undef PREFIX_h264_qpel16_hv_lowpass_altivec
-#undef PREFIX_h264_qpel16_hv_lowpass_num
-
-#define H264_MC(OPNAME, SIZE, CODETYPE) \
-static void OPNAME ## h264_qpel ## SIZE ## _mc00_ ## CODETYPE (uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- ff_ ## OPNAME ## pixels ## SIZE ## _ ## CODETYPE(dst, src, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc10_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{ \
- DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc20_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- OPNAME ## h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc30_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+1, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc01_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc02_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- OPNAME ## h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(dst, src, stride, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc03_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, half)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(half, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, src+stride, half, stride, stride, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc11_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc31_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc13_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc33_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc22_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
- OPNAME ## h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(dst, tmp, src, stride, SIZE, stride);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc21_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src, SIZE, stride);\
- put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc23_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfH)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
- put_h264_qpel ## SIZE ## _h_lowpass_ ## CODETYPE(halfH, src + stride, SIZE, stride);\
- put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfH, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc12_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src, SIZE, stride);\
- put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-\
-static void OPNAME ## h264_qpel ## SIZE ## _mc32_ ## CODETYPE(uint8_t *dst, uint8_t *src, ptrdiff_t stride)\
-{\
- DECLARE_ALIGNED(16, uint8_t, halfV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, uint8_t, halfHV)[SIZE*SIZE];\
- DECLARE_ALIGNED(16, int16_t, tmp)[SIZE*(SIZE+8)];\
- put_h264_qpel ## SIZE ## _v_lowpass_ ## CODETYPE(halfV, src+1, SIZE, stride);\
- put_h264_qpel ## SIZE ## _hv_lowpass_ ## CODETYPE(halfHV, tmp, src, SIZE, SIZE, stride);\
- OPNAME ## pixels ## SIZE ## _l2_ ## CODETYPE(dst, halfV, halfHV, stride, SIZE, SIZE);\
-}\
-
-static inline void put_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
- const uint8_t * src2, int dst_stride,
- int src_stride1, int h)
-{
- int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
- mask_ = vec_lvsl(0, src2);
-
- for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
- d = vec_avg(a, b);
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
- dst += dst_stride;
- }
-}
-
-static inline void avg_pixels16_l2_altivec( uint8_t * dst, const uint8_t * src1,
- const uint8_t * src2, int dst_stride,
- int src_stride1, int h)
-{
- int i;
- vec_u8 a, b, d, tmp1, tmp2, mask, mask_, edges, align;
-
- mask_ = vec_lvsl(0, src2);
-
- for (i = 0; i < h; i++) {
-
- tmp1 = vec_ld(i * src_stride1, src1);
- mask = vec_lvsl(i * src_stride1, src1);
- tmp2 = vec_ld(i * src_stride1 + 15, src1);
-
- a = vec_perm(tmp1, tmp2, mask);
-
- tmp1 = vec_ld(i * 16, src2);
- tmp2 = vec_ld(i * 16 + 15, src2);
-
- b = vec_perm(tmp1, tmp2, mask_);
-
- tmp1 = vec_ld(0, dst);
- mask = vec_lvsl(0, dst);
- tmp2 = vec_ld(15, dst);
-
- d = vec_avg(vec_perm(tmp1, tmp2, mask), vec_avg(a, b));
-
- edges = vec_perm(tmp2, tmp1, mask);
-
- align = vec_lvsr(0, dst);
-
- tmp2 = vec_perm(d, edges, align);
- tmp1 = vec_perm(edges, d, align);
-
- vec_st(tmp2, 15, dst);
- vec_st(tmp1, 0 , dst);
-
- dst += dst_stride;
- }
-}
-
-/* Implemented but could be faster
-#define put_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) put_pixels16_l2(d,s1,s2,ds,s1s,16,h)
-#define avg_pixels16_l2_altivec(d,s1,s2,ds,s1s,h) avg_pixels16_l2(d,s1,s2,ds,s1s,16,h)
- */
-
-H264_MC(put_, 16, altivec)
-H264_MC(avg_, 16, altivec)
-#endif /* HAVE_ALTIVEC */
-
-av_cold void ff_h264qpel_init_ppc(H264QpelContext *c, int bit_depth)
-{
-#if HAVE_ALTIVEC
- const int high_bit_depth = bit_depth > 8;
-
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- if (!high_bit_depth) {
-#define dspfunc(PFX, IDX, NUM) \
- c->PFX ## _pixels_tab[IDX][ 0] = PFX ## NUM ## _mc00_altivec; \
- c->PFX ## _pixels_tab[IDX][ 1] = PFX ## NUM ## _mc10_altivec; \
- c->PFX ## _pixels_tab[IDX][ 2] = PFX ## NUM ## _mc20_altivec; \
- c->PFX ## _pixels_tab[IDX][ 3] = PFX ## NUM ## _mc30_altivec; \
- c->PFX ## _pixels_tab[IDX][ 4] = PFX ## NUM ## _mc01_altivec; \
- c->PFX ## _pixels_tab[IDX][ 5] = PFX ## NUM ## _mc11_altivec; \
- c->PFX ## _pixels_tab[IDX][ 6] = PFX ## NUM ## _mc21_altivec; \
- c->PFX ## _pixels_tab[IDX][ 7] = PFX ## NUM ## _mc31_altivec; \
- c->PFX ## _pixels_tab[IDX][ 8] = PFX ## NUM ## _mc02_altivec; \
- c->PFX ## _pixels_tab[IDX][ 9] = PFX ## NUM ## _mc12_altivec; \
- c->PFX ## _pixels_tab[IDX][10] = PFX ## NUM ## _mc22_altivec; \
- c->PFX ## _pixels_tab[IDX][11] = PFX ## NUM ## _mc32_altivec; \
- c->PFX ## _pixels_tab[IDX][12] = PFX ## NUM ## _mc03_altivec; \
- c->PFX ## _pixels_tab[IDX][13] = PFX ## NUM ## _mc13_altivec; \
- c->PFX ## _pixels_tab[IDX][14] = PFX ## NUM ## _mc23_altivec; \
- c->PFX ## _pixels_tab[IDX][15] = PFX ## NUM ## _mc33_altivec
-
- dspfunc(put_h264_qpel, 0, 16);
- dspfunc(avg_h264_qpel, 0, 16);
-#undef dspfunc
- }
- }
-#endif /* HAVE_ALTIVEC */
-}
diff --git a/ffmpeg/libavcodec/ppc/h264_qpel_template.c b/ffmpeg/libavcodec/ppc/h264_qpel_template.c
deleted file mode 100644
index cfc4560..0000000
--- a/ffmpeg/libavcodec/ppc/h264_qpel_template.c
+++ /dev/null
@@ -1,507 +0,0 @@
-/*
- * Copyright (c) 2004 Romain Dolbeau <romain@dolbeau.org>
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "libavutil/mem.h"
-
-#ifdef DEBUG
-#define ASSERT_ALIGNED(ptr) assert(((unsigned long)ptr&0x0000000F));
-#else
-#define ASSERT_ALIGNED(ptr) ;
-#endif
-
-/* this code assume stride % 16 == 0 */
-#ifdef PREFIX_h264_qpel16_h_lowpass_altivec
-static void PREFIX_h264_qpel16_h_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
- register int i;
-
- LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
- const vec_s16 v5ss = vec_splat_s16(5);
- const vec_u16 v5us = vec_splat_u16(5);
- const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
- const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-
- vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
-
- register int align = ((((unsigned long)src) - 2) % 16);
-
- vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
- srcP2A, srcP2B, srcP3A, srcP3B,
- srcM1A, srcM1B, srcM2A, srcM2B,
- sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
- pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
- psumA, psumB, sumA, sumB;
-
- vec_u8 sum, fsum;
-
- for (i = 0 ; i < 16 ; i ++) {
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
-
- sum1A = vec_adds(srcP0A, srcP1A);
- sum1B = vec_adds(srcP0B, srcP1B);
- sum2A = vec_adds(srcM1A, srcP2A);
- sum2B = vec_adds(srcM1B, srcP2B);
- sum3A = vec_adds(srcM2A, srcP3A);
- sum3B = vec_adds(srcM2B, srcP3B);
-
- pp1A = vec_mladd(sum1A, v20ss, v16ss);
- pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
- pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
- pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
- pp3A = vec_add(sum3A, pp1A);
- pp3B = vec_add(sum3B, pp1B);
-
- psumA = vec_sub(pp3A, pp2A);
- psumB = vec_sub(pp3B, pp2B);
-
- sumA = vec_sra(psumA, v5us);
- sumB = vec_sra(psumB, v5us);
-
- sum = vec_packsu(sumA, sumB);
-
- ASSERT_ALIGNED(dst);
-
- OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
- vec_st(fsum, 0, dst);
-
- src += srcStride;
- dst += dstStride;
- }
-}
-#endif
-
-/* this code assume stride % 16 == 0 */
-#ifdef PREFIX_h264_qpel16_v_lowpass_altivec
-static void PREFIX_h264_qpel16_v_lowpass_altivec(uint8_t * dst, uint8_t * src, int dstStride, int srcStride) {
- register int i;
-
- LOAD_ZERO;
- const vec_u8 perm = vec_lvsl(0, src);
- const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
- const vec_u16 v5us = vec_splat_u16(5);
- const vec_s16 v5ss = vec_splat_s16(5);
- const vec_s16 v16ss = vec_sl(vec_splat_s16(1),vec_splat_u16(4));
-
- uint8_t *srcbis = src - (srcStride * 2);
-
- const vec_u8 srcM2a = vec_ld(0, srcbis);
- const vec_u8 srcM2b = vec_ld(16, srcbis);
- const vec_u8 srcM2 = vec_perm(srcM2a, srcM2b, perm);
- //srcbis += srcStride;
- const vec_u8 srcM1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcM1b = vec_ld(16, srcbis);
- const vec_u8 srcM1 = vec_perm(srcM1a, srcM1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP0a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP0b = vec_ld(16, srcbis);
- const vec_u8 srcP0 = vec_perm(srcP0a, srcP0b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP1a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP1b = vec_ld(16, srcbis);
- const vec_u8 srcP1 = vec_perm(srcP1a, srcP1b, perm);
- //srcbis += srcStride;
- const vec_u8 srcP2a = vec_ld(0, srcbis += srcStride);
- const vec_u8 srcP2b = vec_ld(16, srcbis);
- const vec_u8 srcP2 = vec_perm(srcP2a, srcP2b, perm);
- //srcbis += srcStride;
-
- vec_s16 srcM2ssA = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- vec_s16 srcM2ssB = (vec_s16) vec_mergel(zero_u8v, srcM2);
- vec_s16 srcM1ssA = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- vec_s16 srcM1ssB = (vec_s16) vec_mergel(zero_u8v, srcM1);
- vec_s16 srcP0ssA = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- vec_s16 srcP0ssB = (vec_s16) vec_mergel(zero_u8v, srcP0);
- vec_s16 srcP1ssA = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- vec_s16 srcP1ssB = (vec_s16) vec_mergel(zero_u8v, srcP1);
- vec_s16 srcP2ssA = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- vec_s16 srcP2ssB = (vec_s16) vec_mergel(zero_u8v, srcP2);
-
- vec_s16 pp1A, pp1B, pp2A, pp2B, pp3A, pp3B,
- psumA, psumB, sumA, sumB,
- srcP3ssA, srcP3ssB,
- sum1A, sum1B, sum2A, sum2B, sum3A, sum3B;
-
- vec_u8 sum, fsum, srcP3a, srcP3b, srcP3;
-
- for (i = 0 ; i < 16 ; i++) {
- srcP3a = vec_ld(0, srcbis += srcStride);
- srcP3b = vec_ld(16, srcbis);
- srcP3 = vec_perm(srcP3a, srcP3b, perm);
- srcP3ssA = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3ssB = (vec_s16) vec_mergel(zero_u8v, srcP3);
- //srcbis += srcStride;
-
- sum1A = vec_adds(srcP0ssA, srcP1ssA);
- sum1B = vec_adds(srcP0ssB, srcP1ssB);
- sum2A = vec_adds(srcM1ssA, srcP2ssA);
- sum2B = vec_adds(srcM1ssB, srcP2ssB);
- sum3A = vec_adds(srcM2ssA, srcP3ssA);
- sum3B = vec_adds(srcM2ssB, srcP3ssB);
-
- srcM2ssA = srcM1ssA;
- srcM2ssB = srcM1ssB;
- srcM1ssA = srcP0ssA;
- srcM1ssB = srcP0ssB;
- srcP0ssA = srcP1ssA;
- srcP0ssB = srcP1ssB;
- srcP1ssA = srcP2ssA;
- srcP1ssB = srcP2ssB;
- srcP2ssA = srcP3ssA;
- srcP2ssB = srcP3ssB;
-
- pp1A = vec_mladd(sum1A, v20ss, v16ss);
- pp1B = vec_mladd(sum1B, v20ss, v16ss);
-
- pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
- pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
- pp3A = vec_add(sum3A, pp1A);
- pp3B = vec_add(sum3B, pp1B);
-
- psumA = vec_sub(pp3A, pp2A);
- psumB = vec_sub(pp3B, pp2B);
-
- sumA = vec_sra(psumA, v5us);
- sumB = vec_sra(psumB, v5us);
-
- sum = vec_packsu(sumA, sumB);
-
- ASSERT_ALIGNED(dst);
-
- OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
- vec_st(fsum, 0, dst);
-
- dst += dstStride;
- }
-}
-#endif
-
-/* this code assume stride % 16 == 0 *and* tmp is properly aligned */
-#ifdef PREFIX_h264_qpel16_hv_lowpass_altivec
-static void PREFIX_h264_qpel16_hv_lowpass_altivec(uint8_t * dst, int16_t * tmp, uint8_t * src, int dstStride, int tmpStride, int srcStride) {
- register int i;
- LOAD_ZERO;
- const vec_u8 permM2 = vec_lvsl(-2, src);
- const vec_u8 permM1 = vec_lvsl(-1, src);
- const vec_u8 permP0 = vec_lvsl(+0, src);
- const vec_u8 permP1 = vec_lvsl(+1, src);
- const vec_u8 permP2 = vec_lvsl(+2, src);
- const vec_u8 permP3 = vec_lvsl(+3, src);
- const vec_s16 v20ss = vec_sl(vec_splat_s16(5),vec_splat_u16(2));
- const vec_u32 v10ui = vec_splat_u32(10);
- const vec_s16 v5ss = vec_splat_s16(5);
- const vec_s16 v1ss = vec_splat_s16(1);
- const vec_s32 v512si = vec_sl(vec_splat_s32(1),vec_splat_u32(9));
- const vec_u32 v16ui = vec_sl(vec_splat_u32(1),vec_splat_u32(4));
-
- register int align = ((((unsigned long)src) - 2) % 16);
-
- vec_s16 srcP0A, srcP0B, srcP1A, srcP1B,
- srcP2A, srcP2B, srcP3A, srcP3B,
- srcM1A, srcM1B, srcM2A, srcM2B,
- sum1A, sum1B, sum2A, sum2B, sum3A, sum3B,
- pp1A, pp1B, pp2A, pp2B, psumA, psumB;
-
- const vec_u8 mperm = (const vec_u8)
- {0x00, 0x08, 0x01, 0x09, 0x02, 0x0A, 0x03, 0x0B,
- 0x04, 0x0C, 0x05, 0x0D, 0x06, 0x0E, 0x07, 0x0F};
- int16_t *tmpbis = tmp;
-
- vec_s16 tmpM1ssA, tmpM1ssB, tmpM2ssA, tmpM2ssB,
- tmpP0ssA, tmpP0ssB, tmpP1ssA, tmpP1ssB,
- tmpP2ssA, tmpP2ssB;
-
- vec_s32 pp1Ae, pp1Ao, pp1Be, pp1Bo, pp2Ae, pp2Ao, pp2Be, pp2Bo,
- pp3Ae, pp3Ao, pp3Be, pp3Bo, pp1cAe, pp1cAo, pp1cBe, pp1cBo,
- pp32Ae, pp32Ao, pp32Be, pp32Bo, sumAe, sumAo, sumBe, sumBo,
- ssumAe, ssumAo, ssumBe, ssumBo;
- vec_u8 fsum, sumv, sum;
- vec_s16 ssume, ssumo;
-
- src -= (2 * srcStride);
- for (i = 0 ; i < 21 ; i ++) {
- vec_u8 srcM2, srcM1, srcP0, srcP1, srcP2, srcP3;
- vec_u8 srcR1 = vec_ld(-2, src);
- vec_u8 srcR2 = vec_ld(14, src);
-
- switch (align) {
- default: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = vec_perm(srcR1, srcR2, permP3);
- } break;
- case 11: {
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = vec_perm(srcR1, srcR2, permP2);
- srcP3 = srcR2;
- } break;
- case 12: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = vec_perm(srcR1, srcR2, permP1);
- srcP2 = srcR2;
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 13: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = vec_perm(srcR1, srcR2, permP0);
- srcP1 = srcR2;
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 14: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = vec_perm(srcR1, srcR2, permM1);
- srcP0 = srcR2;
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- case 15: {
- vec_u8 srcR3 = vec_ld(30, src);
- srcM2 = vec_perm(srcR1, srcR2, permM2);
- srcM1 = srcR2;
- srcP0 = vec_perm(srcR2, srcR3, permP0);
- srcP1 = vec_perm(srcR2, srcR3, permP1);
- srcP2 = vec_perm(srcR2, srcR3, permP2);
- srcP3 = vec_perm(srcR2, srcR3, permP3);
- } break;
- }
-
- srcP0A = (vec_s16) vec_mergeh(zero_u8v, srcP0);
- srcP0B = (vec_s16) vec_mergel(zero_u8v, srcP0);
- srcP1A = (vec_s16) vec_mergeh(zero_u8v, srcP1);
- srcP1B = (vec_s16) vec_mergel(zero_u8v, srcP1);
-
- srcP2A = (vec_s16) vec_mergeh(zero_u8v, srcP2);
- srcP2B = (vec_s16) vec_mergel(zero_u8v, srcP2);
- srcP3A = (vec_s16) vec_mergeh(zero_u8v, srcP3);
- srcP3B = (vec_s16) vec_mergel(zero_u8v, srcP3);
-
- srcM1A = (vec_s16) vec_mergeh(zero_u8v, srcM1);
- srcM1B = (vec_s16) vec_mergel(zero_u8v, srcM1);
- srcM2A = (vec_s16) vec_mergeh(zero_u8v, srcM2);
- srcM2B = (vec_s16) vec_mergel(zero_u8v, srcM2);
-
- sum1A = vec_adds(srcP0A, srcP1A);
- sum1B = vec_adds(srcP0B, srcP1B);
- sum2A = vec_adds(srcM1A, srcP2A);
- sum2B = vec_adds(srcM1B, srcP2B);
- sum3A = vec_adds(srcM2A, srcP3A);
- sum3B = vec_adds(srcM2B, srcP3B);
-
- pp1A = vec_mladd(sum1A, v20ss, sum3A);
- pp1B = vec_mladd(sum1B, v20ss, sum3B);
-
- pp2A = vec_mladd(sum2A, v5ss, zero_s16v);
- pp2B = vec_mladd(sum2B, v5ss, zero_s16v);
-
- psumA = vec_sub(pp1A, pp2A);
- psumB = vec_sub(pp1B, pp2B);
-
- vec_st(psumA, 0, tmp);
- vec_st(psumB, 16, tmp);
-
- src += srcStride;
- tmp += tmpStride; /* int16_t*, and stride is 16, so it's OK here */
- }
-
- tmpM2ssA = vec_ld(0, tmpbis);
- tmpM2ssB = vec_ld(16, tmpbis);
- tmpbis += tmpStride;
- tmpM1ssA = vec_ld(0, tmpbis);
- tmpM1ssB = vec_ld(16, tmpbis);
- tmpbis += tmpStride;
- tmpP0ssA = vec_ld(0, tmpbis);
- tmpP0ssB = vec_ld(16, tmpbis);
- tmpbis += tmpStride;
- tmpP1ssA = vec_ld(0, tmpbis);
- tmpP1ssB = vec_ld(16, tmpbis);
- tmpbis += tmpStride;
- tmpP2ssA = vec_ld(0, tmpbis);
- tmpP2ssB = vec_ld(16, tmpbis);
- tmpbis += tmpStride;
-
- for (i = 0 ; i < 16 ; i++) {
- const vec_s16 tmpP3ssA = vec_ld(0, tmpbis);
- const vec_s16 tmpP3ssB = vec_ld(16, tmpbis);
-
- const vec_s16 sum1A = vec_adds(tmpP0ssA, tmpP1ssA);
- const vec_s16 sum1B = vec_adds(tmpP0ssB, tmpP1ssB);
- const vec_s16 sum2A = vec_adds(tmpM1ssA, tmpP2ssA);
- const vec_s16 sum2B = vec_adds(tmpM1ssB, tmpP2ssB);
- const vec_s16 sum3A = vec_adds(tmpM2ssA, tmpP3ssA);
- const vec_s16 sum3B = vec_adds(tmpM2ssB, tmpP3ssB);
-
- tmpbis += tmpStride;
-
- tmpM2ssA = tmpM1ssA;
- tmpM2ssB = tmpM1ssB;
- tmpM1ssA = tmpP0ssA;
- tmpM1ssB = tmpP0ssB;
- tmpP0ssA = tmpP1ssA;
- tmpP0ssB = tmpP1ssB;
- tmpP1ssA = tmpP2ssA;
- tmpP1ssB = tmpP2ssB;
- tmpP2ssA = tmpP3ssA;
- tmpP2ssB = tmpP3ssB;
-
- pp1Ae = vec_mule(sum1A, v20ss);
- pp1Ao = vec_mulo(sum1A, v20ss);
- pp1Be = vec_mule(sum1B, v20ss);
- pp1Bo = vec_mulo(sum1B, v20ss);
-
- pp2Ae = vec_mule(sum2A, v5ss);
- pp2Ao = vec_mulo(sum2A, v5ss);
- pp2Be = vec_mule(sum2B, v5ss);
- pp2Bo = vec_mulo(sum2B, v5ss);
-
- pp3Ae = vec_sra((vec_s32)sum3A, v16ui);
- pp3Ao = vec_mulo(sum3A, v1ss);
- pp3Be = vec_sra((vec_s32)sum3B, v16ui);
- pp3Bo = vec_mulo(sum3B, v1ss);
-
- pp1cAe = vec_add(pp1Ae, v512si);
- pp1cAo = vec_add(pp1Ao, v512si);
- pp1cBe = vec_add(pp1Be, v512si);
- pp1cBo = vec_add(pp1Bo, v512si);
-
- pp32Ae = vec_sub(pp3Ae, pp2Ae);
- pp32Ao = vec_sub(pp3Ao, pp2Ao);
- pp32Be = vec_sub(pp3Be, pp2Be);
- pp32Bo = vec_sub(pp3Bo, pp2Bo);
-
- sumAe = vec_add(pp1cAe, pp32Ae);
- sumAo = vec_add(pp1cAo, pp32Ao);
- sumBe = vec_add(pp1cBe, pp32Be);
- sumBo = vec_add(pp1cBo, pp32Bo);
-
- ssumAe = vec_sra(sumAe, v10ui);
- ssumAo = vec_sra(sumAo, v10ui);
- ssumBe = vec_sra(sumBe, v10ui);
- ssumBo = vec_sra(sumBo, v10ui);
-
- ssume = vec_packs(ssumAe, ssumBe);
- ssumo = vec_packs(ssumAo, ssumBo);
-
- sumv = vec_packsu(ssume, ssumo);
- sum = vec_perm(sumv, sumv, mperm);
-
- ASSERT_ALIGNED(dst);
-
- OP_U8_ALTIVEC(fsum, sum, vec_ld(0, dst));
-
- vec_st(fsum, 0, dst);
-
- dst += dstStride;
- }
-}
-#endif
diff --git a/ffmpeg/libavcodec/ppc/h264chroma_init.c b/ffmpeg/libavcodec/ppc/h264chroma_init.c
index f9e2a76..921f2de 100644
--- a/ffmpeg/libavcodec/ppc/h264chroma_init.c
+++ b/ffmpeg/libavcodec/ppc/h264chroma_init.c
@@ -20,15 +20,14 @@
#include "config.h"
#include "libavutil/attributes.h"
-#include "libavcodec/h264chroma.h"
-
-#if HAVE_ALTIVEC
#include "libavutil/cpu.h"
#include "libavutil/intreadwrite.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/h264chroma.h"
#include "dsputil_altivec.h"
+#if HAVE_ALTIVEC
#define PUT_OP_U8_ALTIVEC(d, s, dst) d = s
#define AVG_OP_U8_ALTIVEC(d, s, dst) d = vec_avg(dst, s)
@@ -54,11 +53,12 @@ av_cold void ff_h264chroma_init_ppc(H264ChromaContext *c, int bit_depth)
#if HAVE_ALTIVEC
const int high_bit_depth = bit_depth > 8;
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
if (!high_bit_depth) {
c->put_h264_chroma_pixels_tab[0] = put_h264_chroma_mc8_altivec;
c->avg_h264_chroma_pixels_tab[0] = avg_h264_chroma_mc8_altivec;
}
- }
#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/hpeldsp_altivec.c b/ffmpeg/libavcodec/ppc/hpeldsp_altivec.c
index 4309d39..345ec39 100644
--- a/ffmpeg/libavcodec/ppc/hpeldsp_altivec.c
+++ b/ffmpeg/libavcodec/ppc/hpeldsp_altivec.c
@@ -21,17 +21,19 @@
*/
#include "config.h"
-#include "libavutil/cpu.h"
-#include "libavcodec/hpeldsp.h"
-#if HAVE_ALTIVEC
#if HAVE_ALTIVEC_H
#include <altivec.h>
#endif
+
+#include "libavutil/attributes.h"
+#include "libavutil/cpu.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/hpeldsp.h"
#include "dsputil_altivec.h"
+#if HAVE_ALTIVEC
/* next one assumes that ((line_size % 16) == 0) */
void ff_put_pixels16_altivec(uint8_t *block, const uint8_t *pixels, ptrdiff_t line_size, int h)
{
@@ -444,21 +446,22 @@ static void avg_pixels8_xy2_altivec(uint8_t *block, const uint8_t *pixels, ptrdi
}
#endif /* HAVE_ALTIVEC */
-void ff_hpeldsp_init_ppc(HpelDSPContext* c, int flags)
+av_cold void ff_hpeldsp_init_ppc(HpelDSPContext *c, int flags)
{
#if HAVE_ALTIVEC
- int mm_flags = av_get_cpu_flags();
-
- if (mm_flags & AV_CPU_FLAG_ALTIVEC) {
- c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
- c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
- c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
- c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
- c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
- c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
- c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
- c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
- c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
- }
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
+ c->avg_pixels_tab[0][0] = ff_avg_pixels16_altivec;
+ c->avg_pixels_tab[1][0] = avg_pixels8_altivec;
+ c->avg_pixels_tab[1][3] = avg_pixels8_xy2_altivec;
+
+ c->put_pixels_tab[0][0] = ff_put_pixels16_altivec;
+ c->put_pixels_tab[1][3] = put_pixels8_xy2_altivec;
+ c->put_pixels_tab[0][3] = put_pixels16_xy2_altivec;
+
+ c->put_no_rnd_pixels_tab[0][0] = ff_put_pixels16_altivec;
+ c->put_no_rnd_pixels_tab[1][3] = put_no_rnd_pixels8_xy2_altivec;
+ c->put_no_rnd_pixels_tab[0][3] = put_no_rnd_pixels16_xy2_altivec;
#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/int_altivec.c b/ffmpeg/libavcodec/ppc/int_altivec.c
index 4386b13..d4e0c85 100644
--- a/ffmpeg/libavcodec/ppc/int_altivec.c
+++ b/ffmpeg/libavcodec/ppc/int_altivec.c
@@ -84,14 +84,12 @@ static int32_t scalarproduct_int16_altivec(const int16_t *v1, const int16_t *v2,
{
int i;
LOAD_ZERO;
- const vec_s16 *pv;
register vec_s16 vec1;
register vec_s32 res = vec_splat_s32(0), t;
int32_t ires;
for(i = 0; i < order; i += 8){
- pv = (const vec_s16*)v1;
- vec1 = vec_perm(pv[0], pv[1], vec_lvsl(0, v1));
+ vec1 = vec_unaligned_load(v1);
t = vec_msum(vec1, vec_ld(0, v2), zero_s32v);
res = vec_sums(t, res);
v1 += 8;
@@ -129,8 +127,8 @@ static int32_t scalarproduct_and_madd_int16_altivec(int16_t *v1, const int16_t *
pv1[0] = vec_mladd(t0, muls, i0);
pv1[1] = vec_mladd(t1, muls, i1);
pv1 += 2;
- v2 += 8;
- v3 += 8;
+ v2 += 16;
+ v3 += 16;
} while(--order);
res = vec_splat(vec_sums(res, zero_s32v), 3);
vec_ste(res, 0, &ires);
diff --git a/ffmpeg/libavcodec/ppc/mpegaudiodec_altivec.c b/ffmpeg/libavcodec/ppc/mpegaudiodec_altivec.c
deleted file mode 100644
index 1152fd7..0000000
--- a/ffmpeg/libavcodec/ppc/mpegaudiodec_altivec.c
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * Altivec optimized MP3 decoding functions
- * Copyright (c) 2010 Vitor Sessak
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#include "dsputil_altivec.h"
-#include "libavutil/attributes.h"
-#include "libavutil/internal.h"
-#include "libavutil/ppc/util_altivec.h"
-#include "libavcodec/mpegaudiodsp.h"
-
-#define MACS(rt, ra, rb) rt+=(ra)*(rb)
-#define MLSS(rt, ra, rb) rt-=(ra)*(rb)
-
-#define SUM8(op, sum, w, p) \
-{ \
- op(sum, (w)[0 * 64], (p)[0 * 64]); \
- op(sum, (w)[1 * 64], (p)[1 * 64]); \
- op(sum, (w)[2 * 64], (p)[2 * 64]); \
- op(sum, (w)[3 * 64], (p)[3 * 64]); \
- op(sum, (w)[4 * 64], (p)[4 * 64]); \
- op(sum, (w)[5 * 64], (p)[5 * 64]); \
- op(sum, (w)[6 * 64], (p)[6 * 64]); \
- op(sum, (w)[7 * 64], (p)[7 * 64]); \
-}
-
-static void apply_window(const float *buf, const float *win1,
- const float *win2, float *sum1, float *sum2, int len)
-{
- const vector float *win1a = (const vector float *) win1;
- const vector float *win2a = (const vector float *) win2;
- const vector float *bufa = (const vector float *) buf;
- vector float *sum1a = (vector float *) sum1;
- vector float *sum2a = (vector float *) sum2;
- vector float av_uninit(v0), av_uninit(v4);
- vector float v1, v2, v3;
-
- len = len >> 2;
-
-#define MULT(a, b) \
- { \
- v1 = vec_ld(a, win1a); \
- v2 = vec_ld(b, win2a); \
- v3 = vec_ld(a, bufa); \
- v0 = vec_madd(v3, v1, v0); \
- v4 = vec_madd(v2, v3, v4); \
- }
-
- while (len--) {
- v0 = vec_xor(v0, v0);
- v4 = vec_xor(v4, v4);
-
- MULT( 0, 0);
- MULT( 256, 64);
- MULT( 512, 128);
- MULT( 768, 192);
- MULT(1024, 256);
- MULT(1280, 320);
- MULT(1536, 384);
- MULT(1792, 448);
-
- vec_st(v0, 0, sum1a);
- vec_st(v4, 0, sum2a);
- sum1a++;
- sum2a++;
- win1a++;
- win2a++;
- bufa++;
- }
-}
-
-static void apply_window_mp3(float *in, float *win, int *unused, float *out,
- int incr)
-{
- LOCAL_ALIGNED_16(float, suma, [17]);
- LOCAL_ALIGNED_16(float, sumb, [17]);
- LOCAL_ALIGNED_16(float, sumc, [17]);
- LOCAL_ALIGNED_16(float, sumd, [17]);
-
- float sum;
- int j;
- float *out2 = out + 32 * incr;
-
- /* copy to avoid wrap */
- memcpy(in + 512, in, 32 * sizeof(*in));
-
- apply_window(in + 16, win , win + 512, suma, sumc, 16);
- apply_window(in + 32, win + 48, win + 640, sumb, sumd, 16);
-
- SUM8(MLSS, suma[0], win + 32, in + 48);
-
- sumc[ 0] = 0;
- sumb[16] = 0;
- sumd[16] = 0;
-
- out[0 ] = suma[ 0];
- out += incr;
- out2 -= incr;
- for(j=1;j<16;j++) {
- *out = suma[ j] - sumd[16-j];
- *out2 = -sumb[16-j] - sumc[ j];
- out += incr;
- out2 -= incr;
- }
-
- sum = 0;
- SUM8(MLSS, sum, win + 16 + 32, in + 32);
- *out = sum;
-}
-
-av_cold void ff_mpadsp_init_altivec(MPADSPContext *s)
-{
- s->apply_window_float = apply_window_mp3;
-}
diff --git a/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c b/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
index bf490b0..cedc1c8 100644
--- a/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
+++ b/ffmpeg/libavcodec/ppc/mpegvideo_altivec.c
@@ -24,14 +24,16 @@
#include <stdlib.h>
#include <stdio.h>
+#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/mpegvideo.h"
-
#include "dsputil_altivec.h"
+#if HAVE_ALTIVEC
+
/* AltiVec version of dct_unquantize_h263
this code assumes `block' is 16 bytes-aligned */
static void dct_unquantize_h263_altivec(MpegEncContext *s,
@@ -111,14 +113,18 @@ static void dct_unquantize_h263_altivec(MpegEncContext *s,
}
}
+#endif /* HAVE_ALTIVEC */
-av_cold void ff_MPV_common_init_altivec(MpegEncContext *s)
+av_cold void ff_MPV_common_init_ppc(MpegEncContext *s)
{
- if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC)) return;
+#if HAVE_ALTIVEC
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
if ((s->avctx->dct_algo == FF_DCT_AUTO) ||
- (s->avctx->dct_algo == FF_DCT_ALTIVEC)) {
+ (s->avctx->dct_algo == FF_DCT_ALTIVEC)) {
s->dct_unquantize_h263_intra = dct_unquantize_h263_altivec;
s->dct_unquantize_h263_inter = dct_unquantize_h263_altivec;
}
+#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c b/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c
index 9c2ad70..1b73dd0 100644
--- a/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c
+++ b/ffmpeg/libavcodec/ppc/vc1dsp_altivec.c
@@ -19,11 +19,14 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
#include "libavcodec/vc1dsp.h"
+#if HAVE_ALTIVEC
+
// main steps of 8x8 transform
#define STEP8(s0, s1, s2, s3, s4, s5, s6, s7, vec_rnd) \
do { \
@@ -335,8 +338,11 @@ static void vc1_inv_trans_8x4_altivec(uint8_t *dest, int stride, int16_t *block)
#undef OP_U8_ALTIVEC
#undef PREFIX_no_rnd_vc1_chroma_mc8_altivec
-av_cold void ff_vc1dsp_init_altivec(VC1DSPContext *dsp)
+#endif /* HAVE_ALTIVEC */
+
+av_cold void ff_vc1dsp_init_ppc(VC1DSPContext *dsp)
{
+#if HAVE_ALTIVEC
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
return;
@@ -344,4 +350,5 @@ av_cold void ff_vc1dsp_init_altivec(VC1DSPContext *dsp)
dsp->vc1_inv_trans_8x4 = vc1_inv_trans_8x4_altivec;
dsp->put_no_rnd_vc1_chroma_pixels_tab[0] = put_no_rnd_vc1_chroma_mc8_altivec;
dsp->avg_no_rnd_vc1_chroma_pixels_tab[0] = avg_no_rnd_vc1_chroma_mc8_altivec;
+#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/vorbisdsp_altivec.c b/ffmpeg/libavcodec/ppc/vorbisdsp_altivec.c
index 08a2b26..d243bf6 100644
--- a/ffmpeg/libavcodec/ppc/vorbisdsp_altivec.c
+++ b/ffmpeg/libavcodec/ppc/vorbisdsp_altivec.c
@@ -54,8 +54,9 @@ static void vorbis_inverse_coupling_altivec(float *mag, float *ang,
av_cold void ff_vorbisdsp_init_ppc(VorbisDSPContext *c)
{
#if HAVE_ALTIVEC
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
- }
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
+ c->vorbis_inverse_coupling = vorbis_inverse_coupling_altivec;
#endif /* HAVE_ALTIVEC */
}
diff --git a/ffmpeg/libavcodec/ppc/vp3dsp_altivec.c b/ffmpeg/libavcodec/ppc/vp3dsp_altivec.c
index cc587b0..56c2d0b 100644
--- a/ffmpeg/libavcodec/ppc/vp3dsp_altivec.c
+++ b/ffmpeg/libavcodec/ppc/vp3dsp_altivec.c
@@ -23,14 +23,13 @@
#include "config.h"
#include "libavutil/attributes.h"
#include "libavutil/cpu.h"
-#include "libavcodec/vp3dsp.h"
-
-#if HAVE_ALTIVEC
-
#include "libavutil/ppc/types_altivec.h"
#include "libavutil/ppc/util_altivec.h"
+#include "libavcodec/vp3dsp.h"
#include "dsputil_altivec.h"
+#if HAVE_ALTIVEC
+
static const vec_s16 constants =
{0, 64277, 60547, 54491, 46341, 36410, 25080, 12785};
static const vec_u8 interleave_high =
@@ -181,9 +180,10 @@ static void vp3_idct_add_altivec(uint8_t *dst, int stride, int16_t block[64])
av_cold void ff_vp3dsp_init_ppc(VP3DSPContext *c, int flags)
{
#if HAVE_ALTIVEC
- if (av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC) {
- c->idct_put = vp3_idct_put_altivec;
- c->idct_add = vp3_idct_add_altivec;
- }
+ if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
+ return;
+
+ c->idct_put = vp3_idct_put_altivec;
+ c->idct_add = vp3_idct_add_altivec;
#endif
}
diff --git a/ffmpeg/libavcodec/ppc/vp8dsp_altivec.c b/ffmpeg/libavcodec/ppc/vp8dsp_altivec.c
index 14d8784..c858d8a 100644
--- a/ffmpeg/libavcodec/ppc/vp8dsp_altivec.c
+++ b/ffmpeg/libavcodec/ppc/vp8dsp_altivec.c
@@ -20,6 +20,7 @@
* Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
*/
+#include "config.h"
#include "libavutil/cpu.h"
#include "libavutil/mem.h"
#include "libavutil/ppc/types_altivec.h"
@@ -27,6 +28,7 @@
#include "libavcodec/vp8dsp.h"
#include "dsputil_altivec.h"
+#if HAVE_ALTIVEC
#define REPT4(...) { __VA_ARGS__, __VA_ARGS__, __VA_ARGS__, __VA_ARGS__ }
// h subpel filter uses msum to multiply+add 4 pixel taps at once
@@ -239,15 +241,15 @@ void put_vp8_epel ## WIDTH ## _v ## TAPS ## _altivec(uint8_t *dst, ptrdiff_t dst
}
#define EPEL_HV(WIDTH, HTAPS, VTAPS) \
-static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s, int h, int mx, int my) \
+static void put_vp8_epel ## WIDTH ## _h ## HTAPS ## v ## VTAPS ## _altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my) \
{ \
DECLARE_ALIGNED(16, uint8_t, tmp)[(2*WIDTH+5)*16]; \
if (VTAPS == 6) { \
- put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*stride, stride, h+5, mx, my); \
- put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, stride, tmp+2*16, 16, h, mx, my); \
+ put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-2*sstride, sstride, h+5, mx, my); \
+ put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+2*16, 16, h, mx, my); \
} else { \
- put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-stride, stride, h+4, mx, my); \
- put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, stride, tmp+16, 16, h, mx, my); \
+ put_vp8_epel ## WIDTH ## _h ## HTAPS ## _altivec(tmp, 16, src-sstride, sstride, h+4, mx, my); \
+ put_vp8_epel ## WIDTH ## _v ## VTAPS ## _altivec(dst, dstride, tmp+16, 16, h, mx, my); \
} \
}
@@ -267,13 +269,51 @@ EPEL_HV(4, 4,6)
EPEL_HV(4, 6,4)
EPEL_HV(4, 4,4)
-static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t stride, uint8_t *src, ptrdiff_t s, int h, int mx, int my)
+static void put_vp8_pixels16_altivec(uint8_t *dst, ptrdiff_t dstride, uint8_t *src, ptrdiff_t sstride, int h, int mx, int my)
{
- ff_put_pixels16_altivec(dst, src, stride, h);
+ register vector unsigned char pixelsv1, pixelsv2;
+ register vector unsigned char pixelsv1B, pixelsv2B;
+ register vector unsigned char pixelsv1C, pixelsv2C;
+ register vector unsigned char pixelsv1D, pixelsv2D;
+
+ register vector unsigned char perm = vec_lvsl(0, src);
+ int i;
+ register ptrdiff_t dstride2 = dstride << 1, sstride2 = sstride << 1;
+ register ptrdiff_t dstride3 = dstride2 + dstride, sstride3 = sstride + sstride2;
+ register ptrdiff_t dstride4 = dstride << 2, sstride4 = sstride << 2;
+
+// hand-unrolling the loop by 4 gains about 15%
+// mininum execution time goes from 74 to 60 cycles
+// it's faster than -funroll-loops, but using
+// -funroll-loops w/ this is bad - 74 cycles again.
+// all this is on a 7450, tuning for the 7450
+ for (i = 0; i < h; i += 4) {
+ pixelsv1 = vec_ld( 0, src);
+ pixelsv2 = vec_ld(15, src);
+ pixelsv1B = vec_ld(sstride, src);
+ pixelsv2B = vec_ld(15 + sstride, src);
+ pixelsv1C = vec_ld(sstride2, src);
+ pixelsv2C = vec_ld(15 + sstride2, src);
+ pixelsv1D = vec_ld(sstride3, src);
+ pixelsv2D = vec_ld(15 + sstride3, src);
+ vec_st(vec_perm(pixelsv1, pixelsv2, perm),
+ 0, (unsigned char*)dst);
+ vec_st(vec_perm(pixelsv1B, pixelsv2B, perm),
+ dstride, (unsigned char*)dst);
+ vec_st(vec_perm(pixelsv1C, pixelsv2C, perm),
+ dstride2, (unsigned char*)dst);
+ vec_st(vec_perm(pixelsv1D, pixelsv2D, perm),
+ dstride3, (unsigned char*)dst);
+ src += sstride4;
+ dst += dstride4;
+ }
}
-av_cold void ff_vp8dsp_init_altivec(VP8DSPContext *c)
+#endif /* HAVE_ALTIVEC */
+
+av_cold void ff_vp8dsp_init_ppc(VP8DSPContext *c)
{
+#if HAVE_ALTIVEC
if (!(av_get_cpu_flags() & AV_CPU_FLAG_ALTIVEC))
return;
@@ -301,4 +341,5 @@ av_cold void ff_vp8dsp_init_altivec(VP8DSPContext *c)
c->put_vp8_epel_pixels_tab[2][1][1] = put_vp8_epel4_h4v4_altivec;
c->put_vp8_epel_pixels_tab[2][1][2] = put_vp8_epel4_h6v4_altivec;
c->put_vp8_epel_pixels_tab[2][2][1] = put_vp8_epel4_h4v6_altivec;
+#endif /* HAVE_ALTIVEC */
}