summaryrefslogtreecommitdiff
path: root/silk/silk_SigProc_FIX.h
diff options
context:
space:
mode:
authorJean-Marc Valin <jmvalin@jmvalin.ca>2011-09-16 00:58:26 -0700
committerJean-Marc Valin <jmvalin@jmvalin.ca>2011-09-16 00:58:26 -0700
commitfb3a437c9dabb4aafe4a3927158161590ed745ab (patch)
tree8677d28fbacc7132286a6779d7e8657440a15b18 /silk/silk_SigProc_FIX.h
parentc0f050e7439ef93e256d35377ef20954a0d13b8f (diff)
downloadopus-fb3a437c9dabb4aafe4a3927158161590ed745ab.tar.gz
Renaming the SKP_ prefix to silk_
Diffstat (limited to 'silk/silk_SigProc_FIX.h')
-rw-r--r--silk/silk_SigProc_FIX.h226
1 files changed, 113 insertions, 113 deletions
diff --git a/silk/silk_SigProc_FIX.h b/silk/silk_SigProc_FIX.h
index e015775d..7514eb76 100644
--- a/silk/silk_SigProc_FIX.h
+++ b/silk/silk_SigProc_FIX.h
@@ -33,7 +33,7 @@ extern "C"
{
#endif
-/*#define SKP_MACRO_COUNT */ /* Used to enable WMOPS counting */
+/*#define silk_MACRO_COUNT */ /* Used to enable WMOPS counting */
#define SILK_MAX_ORDER_LPC 16 /* max order of the LPC analysis in schur() and k2a() */
@@ -408,206 +408,206 @@ static inline opus_int32 silk_ROR32( opus_int32 a32, opus_int rot )
/* Allocate opus_int16 alligned to 4-byte memory address */
#if EMBEDDED_ARM
-#define SKP_DWORD_ALIGN __attribute__((aligned(4)))
+#define silk_DWORD_ALIGN __attribute__((aligned(4)))
#else
-#define SKP_DWORD_ALIGN
+#define silk_DWORD_ALIGN
#endif
/* Useful Macros that can be adjusted to other platforms */
-#define SKP_memcpy(a, b, c) memcpy((a), (b), (c)) /* Dest, Src, ByteCount */
-#define SKP_memset(a, b, c) memset((a), (b), (c)) /* Dest, value, ByteCount */
-#define SKP_memmove(a, b, c) memmove((a), (b), (c)) /* Dest, Src, ByteCount */
+#define silk_memcpy(a, b, c) memcpy((a), (b), (c)) /* Dest, Src, ByteCount */
+#define silk_memset(a, b, c) memset((a), (b), (c)) /* Dest, value, ByteCount */
+#define silk_memmove(a, b, c) memmove((a), (b), (c)) /* Dest, Src, ByteCount */
/* fixed point macros */
/* (a32 * b32) output have to be 32bit int */
-#define SKP_MUL(a32, b32) ((a32) * (b32))
+#define silk_MUL(a32, b32) ((a32) * (b32))
/* (a32 * b32) output have to be 32bit uint */
-#define SKP_MUL_uint(a32, b32) SKP_MUL(a32, b32)
+#define silk_MUL_uint(a32, b32) silk_MUL(a32, b32)
/* a32 + (b32 * c32) output have to be 32bit int */
-#define SKP_MLA(a32, b32, c32) SKP_ADD32((a32),((b32) * (c32)))
+#define silk_MLA(a32, b32, c32) silk_ADD32((a32),((b32) * (c32)))
/* a32 + (b32 * c32) output have to be 32bit uint */
-#define SKP_MLA_uint(a32, b32, c32) SKP_MLA(a32, b32, c32)
+#define silk_MLA_uint(a32, b32, c32) silk_MLA(a32, b32, c32)
/* ((a32 >> 16) * (b32 >> 16)) output have to be 32bit int */
-#define SKP_SMULTT(a32, b32) (((a32) >> 16) * ((b32) >> 16))
+#define silk_SMULTT(a32, b32) (((a32) >> 16) * ((b32) >> 16))
/* a32 + ((a32 >> 16) * (b32 >> 16)) output have to be 32bit int */
-#define SKP_SMLATT(a32, b32, c32) SKP_ADD32((a32),((b32) >> 16) * ((c32) >> 16))
+#define silk_SMLATT(a32, b32, c32) silk_ADD32((a32),((b32) >> 16) * ((c32) >> 16))
-#define SKP_SMLALBB(a64, b16, c16) SKP_ADD64((a64),(opus_int64)((opus_int32)(b16) * (opus_int32)(c16)))
+#define silk_SMLALBB(a64, b16, c16) silk_ADD64((a64),(opus_int64)((opus_int32)(b16) * (opus_int32)(c16)))
/* (a32 * b32) */
-#define SKP_SMULL(a32, b32) ((opus_int64)(a32) * /*(opus_int64)*/(b32))
+#define silk_SMULL(a32, b32) ((opus_int64)(a32) * /*(opus_int64)*/(b32))
/* Adds two signed 32-bit values in a way that can overflow, while not relying on undefined behaviour
(just standard two's complement implementation-specific behaviour) */
-#define SKP_ADD32_ovflw(a, b) ((opus_int32)((opus_uint32)(a) + (opus_uint32)(b)))
+#define silk_ADD32_ovflw(a, b) ((opus_int32)((opus_uint32)(a) + (opus_uint32)(b)))
/* multiply-accumulate macros that allow overflow in the addition (ie, no asserts in debug mode)*/
-#define SKP_MLA_ovflw(a32, b32, c32) SKP_ADD32_ovflw((a32),(opus_uint32)(b32) * (opus_uint32)(c32))
-#ifndef SKP_SMLABB_ovflw
-# define SKP_SMLABB_ovflw(a32, b32, c32) SKP_ADD32_ovflw((a32), (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)))
+#define silk_MLA_ovflw(a32, b32, c32) silk_ADD32_ovflw((a32),(opus_uint32)(b32) * (opus_uint32)(c32))
+#ifndef silk_SMLABB_ovflw
+# define silk_SMLABB_ovflw(a32, b32, c32) silk_ADD32_ovflw((a32), (opus_int32)((opus_int16)(b32)) * (opus_int32)((opus_int16)(c32)))
#endif
-#define SKP_DIV32_16(a32, b16) ((opus_int32)((a32) / (b16)))
-#define SKP_DIV32(a32, b32) ((opus_int32)((a32) / (b32)))
+#define silk_DIV32_16(a32, b16) ((opus_int32)((a32) / (b16)))
+#define silk_DIV32(a32, b32) ((opus_int32)((a32) / (b32)))
/* These macros enables checking for overflow in silk_API_Debug.h*/
-#define SKP_ADD16(a, b) ((a) + (b))
-#define SKP_ADD32(a, b) ((a) + (b))
-#define SKP_ADD64(a, b) ((a) + (b))
-
-#define SKP_SUB16(a, b) ((a) - (b))
-#define SKP_SUB32(a, b) ((a) - (b))
-#define SKP_SUB64(a, b) ((a) - (b))
-
-#define SKP_SAT8(a) ((a) > SKP_int8_MAX ? SKP_int8_MAX : \
- ((a) < SKP_int8_MIN ? SKP_int8_MIN : (a)))
-#define SKP_SAT16(a) ((a) > SKP_int16_MAX ? SKP_int16_MAX : \
- ((a) < SKP_int16_MIN ? SKP_int16_MIN : (a)))
-#define SKP_SAT32(a) ((a) > SKP_int32_MAX ? SKP_int32_MAX : \
- ((a) < SKP_int32_MIN ? SKP_int32_MIN : (a)))
-
-#define SKP_CHECK_FIT8(a) (a)
-#define SKP_CHECK_FIT16(a) (a)
-#define SKP_CHECK_FIT32(a) (a)
-
-#define SKP_ADD_SAT16(a, b) (opus_int16)SKP_SAT16( SKP_ADD32( (opus_int32)(a), (b) ) )
-#define SKP_ADD_SAT64(a, b) ((((a) + (b)) & 0x8000000000000000LL) == 0 ? \
- ((((a) & (b)) & 0x8000000000000000LL) != 0 ? SKP_int64_MIN : (a)+(b)) : \
- ((((a) | (b)) & 0x8000000000000000LL) == 0 ? SKP_int64_MAX : (a)+(b)) )
-
-#define SKP_SUB_SAT16(a, b) (opus_int16)SKP_SAT16( SKP_SUB32( (opus_int32)(a), (b) ) )
-#define SKP_SUB_SAT64(a, b) ((((a)-(b)) & 0x8000000000000000LL) == 0 ? \
- (( (a) & ((b)^0x8000000000000000LL) & 0x8000000000000000LL) ? SKP_int64_MIN : (a)-(b)) : \
- ((((a)^0x8000000000000000LL) & (b) & 0x8000000000000000LL) ? SKP_int64_MAX : (a)-(b)) )
+#define silk_ADD16(a, b) ((a) + (b))
+#define silk_ADD32(a, b) ((a) + (b))
+#define silk_ADD64(a, b) ((a) + (b))
+
+#define silk_SUB16(a, b) ((a) - (b))
+#define silk_SUB32(a, b) ((a) - (b))
+#define silk_SUB64(a, b) ((a) - (b))
+
+#define silk_SAT8(a) ((a) > silk_int8_MAX ? silk_int8_MAX : \
+ ((a) < silk_int8_MIN ? silk_int8_MIN : (a)))
+#define silk_SAT16(a) ((a) > silk_int16_MAX ? silk_int16_MAX : \
+ ((a) < silk_int16_MIN ? silk_int16_MIN : (a)))
+#define silk_SAT32(a) ((a) > silk_int32_MAX ? silk_int32_MAX : \
+ ((a) < silk_int32_MIN ? silk_int32_MIN : (a)))
+
+#define silk_CHECK_FIT8(a) (a)
+#define silk_CHECK_FIT16(a) (a)
+#define silk_CHECK_FIT32(a) (a)
+
+#define silk_ADD_SAT16(a, b) (opus_int16)silk_SAT16( silk_ADD32( (opus_int32)(a), (b) ) )
+#define silk_ADD_SAT64(a, b) ((((a) + (b)) & 0x8000000000000000LL) == 0 ? \
+ ((((a) & (b)) & 0x8000000000000000LL) != 0 ? silk_int64_MIN : (a)+(b)) : \
+ ((((a) | (b)) & 0x8000000000000000LL) == 0 ? silk_int64_MAX : (a)+(b)) )
+
+#define silk_SUB_SAT16(a, b) (opus_int16)silk_SAT16( silk_SUB32( (opus_int32)(a), (b) ) )
+#define silk_SUB_SAT64(a, b) ((((a)-(b)) & 0x8000000000000000LL) == 0 ? \
+ (( (a) & ((b)^0x8000000000000000LL) & 0x8000000000000000LL) ? silk_int64_MIN : (a)-(b)) : \
+ ((((a)^0x8000000000000000LL) & (b) & 0x8000000000000000LL) ? silk_int64_MAX : (a)-(b)) )
/* Saturation for positive input values */
-#define SKP_POS_SAT32(a) ((a) > SKP_int32_MAX ? SKP_int32_MAX : (a))
+#define silk_POS_SAT32(a) ((a) > silk_int32_MAX ? silk_int32_MAX : (a))
/* Add with saturation for positive input values */
-#define SKP_ADD_POS_SAT8(a, b) ((((a)+(b)) & 0x80) ? SKP_int8_MAX : ((a)+(b)))
-#define SKP_ADD_POS_SAT16(a, b) ((((a)+(b)) & 0x8000) ? SKP_int16_MAX : ((a)+(b)))
-#define SKP_ADD_POS_SAT32(a, b) ((((a)+(b)) & 0x80000000) ? SKP_int32_MAX : ((a)+(b)))
-#define SKP_ADD_POS_SAT64(a, b) ((((a)+(b)) & 0x8000000000000000LL) ? SKP_int64_MAX : ((a)+(b)))
-
-#define SKP_LSHIFT8(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 8 */
-#define SKP_LSHIFT16(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 16 */
-#define SKP_LSHIFT32(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 32 */
-#define SKP_LSHIFT64(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 64 */
-#define SKP_LSHIFT(a, shift) SKP_LSHIFT32(a, shift) /* shift >= 0, shift < 32 */
-
-#define SKP_RSHIFT8(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 8 */
-#define SKP_RSHIFT16(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 16 */
-#define SKP_RSHIFT32(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 32 */
-#define SKP_RSHIFT64(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 64 */
-#define SKP_RSHIFT(a, shift) SKP_RSHIFT32(a, shift) /* shift >= 0, shift < 32 */
+#define silk_ADD_POS_SAT8(a, b) ((((a)+(b)) & 0x80) ? silk_int8_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT16(a, b) ((((a)+(b)) & 0x8000) ? silk_int16_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT32(a, b) ((((a)+(b)) & 0x80000000) ? silk_int32_MAX : ((a)+(b)))
+#define silk_ADD_POS_SAT64(a, b) ((((a)+(b)) & 0x8000000000000000LL) ? silk_int64_MAX : ((a)+(b)))
+
+#define silk_LSHIFT8(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 8 */
+#define silk_LSHIFT16(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 16 */
+#define silk_LSHIFT32(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 32 */
+#define silk_LSHIFT64(a, shift) ((a)<<(shift)) /* shift >= 0, shift < 64 */
+#define silk_LSHIFT(a, shift) silk_LSHIFT32(a, shift) /* shift >= 0, shift < 32 */
+
+#define silk_RSHIFT8(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 8 */
+#define silk_RSHIFT16(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 16 */
+#define silk_RSHIFT32(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 32 */
+#define silk_RSHIFT64(a, shift) ((a)>>(shift)) /* shift >= 0, shift < 64 */
+#define silk_RSHIFT(a, shift) silk_RSHIFT32(a, shift) /* shift >= 0, shift < 32 */
/* saturates before shifting */
-#define SKP_LSHIFT_SAT16(a, shift) (SKP_LSHIFT16( SKP_LIMIT( (a), SKP_RSHIFT16( SKP_int16_MIN, (shift) ), \
- SKP_RSHIFT16( SKP_int16_MAX, (shift) ) ), (shift) ))
-#define SKP_LSHIFT_SAT32(a, shift) (SKP_LSHIFT32( SKP_LIMIT( (a), SKP_RSHIFT32( SKP_int32_MIN, (shift) ), \
- SKP_RSHIFT32( SKP_int32_MAX, (shift) ) ), (shift) ))
-
-#define SKP_LSHIFT_ovflw(a, shift) ((a)<<(shift)) /* shift >= 0, allowed to overflow */
-#define SKP_LSHIFT_uint(a, shift) ((a)<<(shift)) /* shift >= 0 */
-#define SKP_RSHIFT_uint(a, shift) ((a)>>(shift)) /* shift >= 0 */
-
-#define SKP_ADD_LSHIFT(a, b, shift) ((a) + SKP_LSHIFT((b), (shift))) /* shift >= 0 */
-#define SKP_ADD_LSHIFT32(a, b, shift) SKP_ADD32((a), SKP_LSHIFT32((b), (shift))) /* shift >= 0 */
-#define SKP_ADD_LSHIFT_uint(a, b, shift) ((a) + SKP_LSHIFT_uint((b), (shift))) /* shift >= 0 */
-#define SKP_ADD_RSHIFT(a, b, shift) ((a) + SKP_RSHIFT((b), (shift))) /* shift >= 0 */
-#define SKP_ADD_RSHIFT32(a, b, shift) SKP_ADD32((a), SKP_RSHIFT32((b), (shift))) /* shift >= 0 */
-#define SKP_ADD_RSHIFT_uint(a, b, shift) ((a) + SKP_RSHIFT_uint((b), (shift))) /* shift >= 0 */
-#define SKP_SUB_LSHIFT32(a, b, shift) SKP_SUB32((a), SKP_LSHIFT32((b), (shift))) /* shift >= 0 */
-#define SKP_SUB_RSHIFT32(a, b, shift) SKP_SUB32((a), SKP_RSHIFT32((b), (shift))) /* shift >= 0 */
+#define silk_LSHIFT_SAT16(a, shift) (silk_LSHIFT16( silk_LIMIT( (a), silk_RSHIFT16( silk_int16_MIN, (shift) ), \
+ silk_RSHIFT16( silk_int16_MAX, (shift) ) ), (shift) ))
+#define silk_LSHIFT_SAT32(a, shift) (silk_LSHIFT32( silk_LIMIT( (a), silk_RSHIFT32( silk_int32_MIN, (shift) ), \
+ silk_RSHIFT32( silk_int32_MAX, (shift) ) ), (shift) ))
+
+#define silk_LSHIFT_ovflw(a, shift) ((a)<<(shift)) /* shift >= 0, allowed to overflow */
+#define silk_LSHIFT_uint(a, shift) ((a)<<(shift)) /* shift >= 0 */
+#define silk_RSHIFT_uint(a, shift) ((a)>>(shift)) /* shift >= 0 */
+
+#define silk_ADD_LSHIFT(a, b, shift) ((a) + silk_LSHIFT((b), (shift))) /* shift >= 0 */
+#define silk_ADD_LSHIFT32(a, b, shift) silk_ADD32((a), silk_LSHIFT32((b), (shift))) /* shift >= 0 */
+#define silk_ADD_LSHIFT_uint(a, b, shift) ((a) + silk_LSHIFT_uint((b), (shift))) /* shift >= 0 */
+#define silk_ADD_RSHIFT(a, b, shift) ((a) + silk_RSHIFT((b), (shift))) /* shift >= 0 */
+#define silk_ADD_RSHIFT32(a, b, shift) silk_ADD32((a), silk_RSHIFT32((b), (shift))) /* shift >= 0 */
+#define silk_ADD_RSHIFT_uint(a, b, shift) ((a) + silk_RSHIFT_uint((b), (shift))) /* shift >= 0 */
+#define silk_SUB_LSHIFT32(a, b, shift) silk_SUB32((a), silk_LSHIFT32((b), (shift))) /* shift >= 0 */
+#define silk_SUB_RSHIFT32(a, b, shift) silk_SUB32((a), silk_RSHIFT32((b), (shift))) /* shift >= 0 */
/* Requires that shift > 0 */
-#define SKP_RSHIFT_ROUND(a, shift) ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
-#define SKP_RSHIFT_ROUND64(a, shift) ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
+#define silk_RSHIFT_ROUND(a, shift) ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
+#define silk_RSHIFT_ROUND64(a, shift) ((shift) == 1 ? ((a) >> 1) + ((a) & 1) : (((a) >> ((shift) - 1)) + 1) >> 1)
/* Number of rightshift required to fit the multiplication */
-#define SKP_NSHIFT_MUL_32_32(a, b) ( -(31- (32-silk_CLZ32(SKP_abs(a)) + (32-silk_CLZ32(SKP_abs(b))))) )
-#define SKP_NSHIFT_MUL_16_16(a, b) ( -(15- (16-silk_CLZ16(SKP_abs(a)) + (16-silk_CLZ16(SKP_abs(b))))) )
+#define silk_NSHIFT_MUL_32_32(a, b) ( -(31- (32-silk_CLZ32(silk_abs(a)) + (32-silk_CLZ32(silk_abs(b))))) )
+#define silk_NSHIFT_MUL_16_16(a, b) ( -(15- (16-silk_CLZ16(silk_abs(a)) + (16-silk_CLZ16(silk_abs(b))))) )
-#define SKP_min(a, b) (((a) < (b)) ? (a) : (b))
-#define SKP_max(a, b) (((a) > (b)) ? (a) : (b))
+#define silk_min(a, b) (((a) < (b)) ? (a) : (b))
+#define silk_max(a, b) (((a) > (b)) ? (a) : (b))
/* Macro to convert floating-point constants to fixed-point */
#define SILK_FIX_CONST( C, Q ) ((opus_int32)((C) * ((opus_int64)1 << (Q)) + 0.5))
-/* SKP_min() versions with typecast in the function call */
-static inline opus_int SKP_min_int(opus_int a, opus_int b)
+/* silk_min() versions with typecast in the function call */
+static inline opus_int silk_min_int(opus_int a, opus_int b)
{
return (((a) < (b)) ? (a) : (b));
}
-static inline opus_int16 SKP_min_16(opus_int16 a, opus_int16 b)
+static inline opus_int16 silk_min_16(opus_int16 a, opus_int16 b)
{
return (((a) < (b)) ? (a) : (b));
}
-static inline opus_int32 SKP_min_32(opus_int32 a, opus_int32 b)
+static inline opus_int32 silk_min_32(opus_int32 a, opus_int32 b)
{
return (((a) < (b)) ? (a) : (b));
}
-static inline opus_int64 SKP_min_64(opus_int64 a, opus_int64 b)
+static inline opus_int64 silk_min_64(opus_int64 a, opus_int64 b)
{
return (((a) < (b)) ? (a) : (b));
}
-/* SKP_min() versions with typecast in the function call */
-static inline opus_int SKP_max_int(opus_int a, opus_int b)
+/* silk_min() versions with typecast in the function call */
+static inline opus_int silk_max_int(opus_int a, opus_int b)
{
return (((a) > (b)) ? (a) : (b));
}
-static inline opus_int16 SKP_max_16(opus_int16 a, opus_int16 b)
+static inline opus_int16 silk_max_16(opus_int16 a, opus_int16 b)
{
return (((a) > (b)) ? (a) : (b));
}
-static inline opus_int32 SKP_max_32(opus_int32 a, opus_int32 b)
+static inline opus_int32 silk_max_32(opus_int32 a, opus_int32 b)
{
return (((a) > (b)) ? (a) : (b));
}
-static inline opus_int64 SKP_max_64(opus_int64 a, opus_int64 b)
+static inline opus_int64 silk_max_64(opus_int64 a, opus_int64 b)
{
return (((a) > (b)) ? (a) : (b));
}
-#define SKP_LIMIT( a, limit1, limit2) ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
+#define silk_LIMIT( a, limit1, limit2) ((limit1) > (limit2) ? ((a) > (limit1) ? (limit1) : ((a) < (limit2) ? (limit2) : (a))) \
: ((a) > (limit2) ? (limit2) : ((a) < (limit1) ? (limit1) : (a))))
-#define SKP_LIMIT_int SKP_LIMIT
-#define SKP_LIMIT_16 SKP_LIMIT
-#define SKP_LIMIT_32 SKP_LIMIT
+#define silk_LIMIT_int silk_LIMIT
+#define silk_LIMIT_16 silk_LIMIT
+#define silk_LIMIT_32 silk_LIMIT
-/*#define SKP_non_neg(a) ((a) & ((-(a)) >> (8 * sizeof(a) - 1)))*/ /* doesn't seem faster than SKP_max(0, a);*/
+/*#define silk_non_neg(a) ((a) & ((-(a)) >> (8 * sizeof(a) - 1)))*/ /* doesn't seem faster than silk_max(0, a);*/
-#define SKP_abs(a) (((a) > 0) ? (a) : -(a)) /* Be careful, SKP_abs returns wrong when input equals to SKP_intXX_MIN */
-#define SKP_abs_int(a) (((a) ^ ((a) >> (8 * sizeof(a) - 1))) - ((a) >> (8 * sizeof(a) - 1)))
-#define SKP_abs_int32(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
-#define SKP_abs_int64(a) (((a) > 0) ? (a) : -(a))
+#define silk_abs(a) (((a) > 0) ? (a) : -(a)) /* Be careful, silk_abs returns wrong when input equals to silk_intXX_MIN */
+#define silk_abs_int(a) (((a) ^ ((a) >> (8 * sizeof(a) - 1))) - ((a) >> (8 * sizeof(a) - 1)))
+#define silk_abs_int32(a) (((a) ^ ((a) >> 31)) - ((a) >> 31))
+#define silk_abs_int64(a) (((a) > 0) ? (a) : -(a))
-#define SKP_sign(a) ((a) > 0 ? 1 : ( (a) < 0 ? -1 : 0 ))
+#define silk_sign(a) ((a) > 0 ? 1 : ( (a) < 0 ? -1 : 0 ))
-#define SKP_sqrt(a) (sqrt(a))
+#define silk_sqrt(a) (sqrt(a))
/* PSEUDO-RANDOM GENERATOR */
/* Make sure to store the result as the seed for the next call (also in between */
/* frames), otherwise result won't be random at all. When only using some of the */
/* bits, take the most significant bits by right-shifting. */
-#define SKP_RAND(seed) (SKP_MLA_ovflw(907633515, (seed), 196314165))
+#define silk_RAND(seed) (silk_MLA_ovflw(907633515, (seed), 196314165))
/* Add some multiplication functions that can be easily mapped to ARM. */
-/* SKP_SMMUL: Signed top word multiply.
+/* silk_SMMUL: Signed top word multiply.
ARMv6 2 instruction cycles.
ARMv3M+ 3 instruction cycles. use SMULL and ignore LSB registers.(except xM)*/
-/*#define SKP_SMMUL(a32, b32) (opus_int32)SKP_RSHIFT(SKP_SMLAL(SKP_SMULWB((a32), (b32)), (a32), SKP_RSHIFT_ROUND((b32), 16)), 16)*/
+/*#define silk_SMMUL(a32, b32) (opus_int32)silk_RSHIFT(silk_SMLAL(silk_SMULWB((a32), (b32)), (a32), silk_RSHIFT_ROUND((b32), 16)), 16)*/
/* the following seems faster on x86 */
-#define SKP_SMMUL(a32, b32) (opus_int32)SKP_RSHIFT64(SKP_SMULL((a32), (b32)), 32)
+#define silk_SMMUL(a32, b32) (opus_int32)silk_RSHIFT64(silk_SMULL((a32), (b32)), 32)
#include "silk_Inlines.h"
#include "silk_MacroCount.h"