summaryrefslogtreecommitdiff
path: root/ffmpeg1/libavcodec/x86/mathops.h
diff options
context:
space:
mode:
authorTim Redfern <tim@eclectronics.org>2013-09-05 17:55:35 +0100
committerTim Redfern <tim@eclectronics.org>2013-09-05 17:55:35 +0100
commit741fb4b9e135cfb161a749db88713229038577bb (patch)
tree08bc9925659cbcac45162bacf31dc6336d4f60b4 /ffmpeg1/libavcodec/x86/mathops.h
parenta2e1bf3495b7bfefdaedb8fc737e969ab06df079 (diff)
making act segmenter
Diffstat (limited to 'ffmpeg1/libavcodec/x86/mathops.h')
-rw-r--r--ffmpeg1/libavcodec/x86/mathops.h130
1 files changed, 0 insertions, 130 deletions
diff --git a/ffmpeg1/libavcodec/x86/mathops.h b/ffmpeg1/libavcodec/x86/mathops.h
deleted file mode 100644
index 79e29e6..0000000
--- a/ffmpeg1/libavcodec/x86/mathops.h
+++ /dev/null
@@ -1,130 +0,0 @@
-/*
- * simple math operations
- * Copyright (c) 2006 Michael Niedermayer <michaelni@gmx.at> et al
- *
- * This file is part of FFmpeg.
- *
- * FFmpeg is free software; you can redistribute it and/or
- * modify it under the terms of the GNU Lesser General Public
- * License as published by the Free Software Foundation; either
- * version 2.1 of the License, or (at your option) any later version.
- *
- * FFmpeg is distributed in the hope that it will be useful,
- * but WITHOUT ANY WARRANTY; without even the implied warranty of
- * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
- * Lesser General Public License for more details.
- *
- * You should have received a copy of the GNU Lesser General Public
- * License along with FFmpeg; if not, write to the Free Software
- * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
- */
-
-#ifndef AVCODEC_X86_MATHOPS_H
-#define AVCODEC_X86_MATHOPS_H
-
-#include "config.h"
-#include "libavutil/common.h"
-
-#if HAVE_INLINE_ASM
-
-#if ARCH_X86_32
-
-#define MULL MULL
-static av_always_inline av_const int MULL(int a, int b, unsigned shift)
-{
- int rt, dummy;
- __asm__ (
- "imull %3 \n\t"
- "shrdl %4, %%edx, %%eax \n\t"
- :"=a"(rt), "=d"(dummy)
- :"a"(a), "rm"(b), "ci"((uint8_t)shift)
- );
- return rt;
-}
-
-#define MULH MULH
-static av_always_inline av_const int MULH(int a, int b)
-{
- int rt, dummy;
- __asm__ (
- "imull %3"
- :"=d"(rt), "=a"(dummy)
- :"a"(a), "rm"(b)
- );
- return rt;
-}
-
-#define MUL64 MUL64
-static av_always_inline av_const int64_t MUL64(int a, int b)
-{
- int64_t rt;
- __asm__ (
- "imull %2"
- :"=A"(rt)
- :"a"(a), "rm"(b)
- );
- return rt;
-}
-
-#endif /* ARCH_X86_32 */
-
-#if HAVE_CMOV
-/* median of 3 */
-#define mid_pred mid_pred
-static inline av_const int mid_pred(int a, int b, int c)
-{
- int i=b;
- __asm__ volatile(
- "cmp %2, %1 \n\t"
- "cmovg %1, %0 \n\t"
- "cmovg %2, %1 \n\t"
- "cmp %3, %1 \n\t"
- "cmovl %3, %1 \n\t"
- "cmp %1, %0 \n\t"
- "cmovg %1, %0 \n\t"
- :"+&r"(i), "+&r"(a)
- :"r"(b), "r"(c)
- );
- return i;
-}
-#endif
-
-#if HAVE_CMOV
-#define COPY3_IF_LT(x, y, a, b, c, d)\
-__asm__ volatile(\
- "cmpl %0, %3 \n\t"\
- "cmovl %3, %0 \n\t"\
- "cmovl %4, %1 \n\t"\
- "cmovl %5, %2 \n\t"\
- : "+&r" (x), "+&r" (a), "+r" (c)\
- : "r" (y), "r" (b), "r" (d)\
-);
-#endif
-
-#define MASK_ABS(mask, level) \
- __asm__ ("cltd \n\t" \
- "xorl %1, %0 \n\t" \
- "subl %1, %0 \n\t" \
- : "+a"(level), "=&d"(mask))
-
-// avoid +32 for shift optimization (gcc should do that ...)
-#define NEG_SSR32 NEG_SSR32
-static inline int32_t NEG_SSR32( int32_t a, int8_t s){
- __asm__ ("sarl %1, %0\n\t"
- : "+r" (a)
- : "ic" ((uint8_t)(-s))
- );
- return a;
-}
-
-#define NEG_USR32 NEG_USR32
-static inline uint32_t NEG_USR32(uint32_t a, int8_t s){
- __asm__ ("shrl %1, %0\n\t"
- : "+r" (a)
- : "ic" ((uint8_t)(-s))
- );
- return a;
-}
-
-#endif /* HAVE_INLINE_ASM */
-#endif /* AVCODEC_X86_MATHOPS_H */