ref: 7f1c6fa67dedca50363a15e45f57c97ec16cca76
parent: 81a1942707da6eba1b2a8e62a72a0e4e1e22a3c1
author: Jonathan Lennox <[email protected]>
date: Tue Dec 22 14:21:46 EST 2015
Add Neon intrinsics for Silk noise shape feedback loop. Signed-off-by: Timothy B. Terriberry <[email protected]>
--- a/silk/NSQ.c
+++ b/silk/NSQ.c
@@ -205,7 +205,7 @@
int arch /* I Architecture */
)
{
- opus_int i, j;
+ opus_int i;
opus_int32 LTP_pred_Q13, LPC_pred_Q10, n_AR_Q12, n_LTP_Q13;
opus_int32 n_LF_Q12, r_Q10, rr_Q10, q1_Q0, q1_Q10, q2_Q10, rd1_Q20, rd2_Q20;
opus_int32 exc_Q14, LPC_exc_Q14, xq_Q14, Gain_Q10;
@@ -250,23 +250,8 @@
/* Noise shape feedback */
silk_assert( ( shapingLPCOrder & 1 ) == 0 ); /* check that order is even */
- tmp2 = psLPC_Q14[ 0 ];
- tmp1 = NSQ->sAR2_Q14[ 0 ];
- NSQ->sAR2_Q14[ 0 ] = tmp2;
- n_AR_Q12 = silk_RSHIFT( shapingLPCOrder, 1 );
- n_AR_Q12 = silk_SMLAWB( n_AR_Q12, tmp2, AR_shp_Q13[ 0 ] );
- for( j = 2; j < shapingLPCOrder; j += 2 ) {
- tmp2 = NSQ->sAR2_Q14[ j - 1 ];
- NSQ->sAR2_Q14[ j - 1 ] = tmp1;
- n_AR_Q12 = silk_SMLAWB( n_AR_Q12, tmp1, AR_shp_Q13[ j - 1 ] );
- tmp1 = NSQ->sAR2_Q14[ j + 0 ];
- NSQ->sAR2_Q14[ j + 0 ] = tmp2;
- n_AR_Q12 = silk_SMLAWB( n_AR_Q12, tmp2, AR_shp_Q13[ j ] );
- }
- NSQ->sAR2_Q14[ shapingLPCOrder - 1 ] = tmp1;
- n_AR_Q12 = silk_SMLAWB( n_AR_Q12, tmp1, AR_shp_Q13[ shapingLPCOrder - 1 ] );
+ n_AR_Q12 = silk_NSQ_noise_shape_feedback_loop(psLPC_Q14, NSQ->sAR2_Q14, AR_shp_Q13, shapingLPCOrder, arch);
- n_AR_Q12 = silk_LSHIFT32( n_AR_Q12, 1 ); /* Q11 -> Q12 */
n_AR_Q12 = silk_SMLAWB( n_AR_Q12, NSQ->sLF_AR_shp_Q14, Tilt_Q14 );
n_LF_Q12 = silk_SMULWB( NSQ->sLTP_shp_Q14[ NSQ->sLTP_shp_buf_idx - 1 ], LF_shp_Q14 );
--- a/silk/NSQ.h
+++ b/silk/NSQ.h
@@ -64,6 +64,35 @@
#define silk_noise_shape_quantizer_short_prediction(in, coef, coefRev, order, arch) ((void)arch,silk_noise_shape_quantizer_short_prediction_c(in, coef, order))
+static OPUS_INLINE opus_int32 silk_NSQ_noise_shape_feedback_loop_c(const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef, opus_int order)
+{
+ opus_int32 out;
+ opus_int32 tmp1, tmp2;
+ opus_int j;
+
+ tmp2 = data0[0];
+ tmp1 = data1[0];
+ data1[0] = tmp2;
+
+ out = silk_RSHIFT(order, 1);
+ out = silk_SMLAWB(out, tmp2, coef[0]);
+
+ for (j = 2; j < order; j += 2) {
+ tmp2 = data1[j - 1];
+ data1[j - 1] = tmp1;
+ out = silk_SMLAWB(out, tmp1, coef[j - 1]);
+ tmp1 = data1[j + 0];
+ data1[j + 0] = tmp2;
+ out = silk_SMLAWB(out, tmp2, coef[j]);
+ }
+ data1[order - 1] = tmp1;
+ out = silk_SMLAWB(out, tmp1, coef[order - 1]);
+ /* Q11 -> Q12 */
+ out = silk_LSHIFT32( out, 1 );
+ return out;
+}
+
+#define silk_NSQ_noise_shape_feedback_loop(data0, data1, coef, order, arch) ((void)arch,silk_NSQ_noise_shape_feedback_loop_c(data0, data1, coef, order))
#if defined(OPUS_ARM_MAY_HAVE_NEON_INTR)
#include "arm/NSQ_neon.h"
--- a/silk/arm/NSQ_neon.c
+++ b/silk/arm/NSQ_neon.c
@@ -67,3 +67,46 @@
return out;
}
+
+
+opus_int32 silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef, opus_int order)
+{
+ opus_int32 out;
+ if (order == 8)
+ {
+ int32x4_t a00 = vdupq_n_s32(data0[0]);
+ int32x4_t a01 = vld1q_s32(data1); /* data1[0] ... [3] */
+
+ int32x4_t a0 = vextq_s32 (a00, a01, 3); /* data0[0] data1[0] ...[2] */
+ int32x4_t a1 = vld1q_s32(data1 + 3); /* data1[3] ... [6] */
+
+ /*TODO: Convert these once in advance instead of once per sample, like
+ silk_noise_shape_quantizer_short_prediction_neon() does.*/
+ int16x8_t coef16 = vld1q_s16(coef);
+ int32x4_t coef0 = vmovl_s16(vget_low_s16(coef16));
+ int32x4_t coef1 = vmovl_s16(vget_high_s16(coef16));
+
+ /*This is not bit-exact with the C version, since we do not drop the
+ lower 16 bits of each multiply, but wait until the end to truncate
+ precision. This is an encoder-specific calculation (and unlike
+ silk_noise_shape_quantizer_short_prediction_neon(), is not meant to
+ simulate what the decoder will do). We still could use vqdmulhq_s32()
+ like silk_noise_shape_quantizer_short_prediction_neon() and save
+ half the multiplies, but the speed difference is not large, since we
+ then need two extra adds.*/
+ int64x2_t b0 = vmull_s32(vget_low_s32(a0), vget_low_s32(coef0));
+ int64x2_t b1 = vmlal_s32(b0, vget_high_s32(a0), vget_high_s32(coef0));
+ int64x2_t b2 = vmlal_s32(b1, vget_low_s32(a1), vget_low_s32(coef1));
+ int64x2_t b3 = vmlal_s32(b2, vget_high_s32(a1), vget_high_s32(coef1));
+
+ int64x1_t c = vadd_s64(vget_low_s64(b3), vget_high_s64(b3));
+ int64x1_t cS = vrshr_n_s64(c, 15);
+ int32x2_t d = vreinterpret_s32_s64(cS);
+
+ out = vget_lane_s32(d, 0);
+ vst1q_s32(data1, a0);
+ vst1q_s32(data1 + 4, a1);
+ return out;
+ }
+ return silk_NSQ_noise_shape_feedback_loop_c(data0, data1, coef, order);
+}
--- a/silk/arm/NSQ_neon.h
+++ b/silk/arm/NSQ_neon.h
@@ -27,6 +27,8 @@
#ifndef SILK_NSQ_NEON_H
#define SILK_NSQ_NEON_H
+#include "cpu_support.h"
+
#undef silk_short_prediction_create_arch_coef
/* For vectorized calc, reverse a_Q12 coefs, convert to 32-bit, and shift for vqdmulhq_s32. */
static OPUS_INLINE void silk_short_prediction_create_arch_coef_neon(opus_int32 *out, const opus_int16 *in, opus_int order)
@@ -76,11 +78,16 @@
opus_int32 silk_noise_shape_quantizer_short_prediction_neon(const opus_int32 *buf32, const opus_int32 *coef32, opus_int order);
+opus_int32 silk_NSQ_noise_shape_feedback_loop_neon(const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef, opus_int order);
+
#if defined(OPUS_ARM_PRESUME_NEON_INTR)
#undef silk_noise_shape_quantizer_short_prediction
#define silk_noise_shape_quantizer_short_prediction(in, coef, coefRev, order, arch) \
((void)arch,silk_noise_shape_quantizer_short_prediction_neon(in, coefRev, order))
+#undef silk_NSQ_noise_shape_feedback_loop
+#define silk_NSQ_noise_shape_feedback_loop(data0, data1, coef, order, arch) ((void)arch,silk_NSQ_noise_shape_feedback_loop_neon(data0, data1, coef, order))
+
#elif defined(OPUS_HAVE_RTCD) && defined(OPUS_ARM_MAY_HAVE_NEON_INTR)
/* silk_noise_shape_quantizer_short_prediction implementations take different parameters based on arch
@@ -91,6 +98,15 @@
silk_noise_shape_quantizer_short_prediction_neon(in, coefRev, order) : \
silk_noise_shape_quantizer_short_prediction_c(in, coef, order))
+extern opus_int32
+ (*const SILK_NSQ_NOISE_SHAPE_FEEDBACK_LOOP_IMPL[OPUS_ARCHMASK+1])(
+ const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef,
+ opus_int order);
+
+#undef silk_NSQ_noise_shape_feedback_loop
+#define silk_NSQ_noise_shape_feedback_loop(data0, data1, coef, order, arch) \
+ (SILK_NSQ_NOISE_SHAPE_FEEDBACK_LOOP_IMPL[(arch)&OPUS_ARCHMASK](data0, data1, \
+ coef, order))
#endif
--- /dev/null
+++ b/silk/arm/arm_silk_map.c
@@ -1,0 +1,55 @@
+/***********************************************************************
+Copyright (C) 2014 Vidyo
+Redistribution and use in source and binary forms, with or without
+modification, are permitted provided that the following conditions
+are met:
+- Redistributions of source code must retain the above copyright notice,
+this list of conditions and the following disclaimer.
+- Redistributions in binary form must reproduce the above copyright
+notice, this list of conditions and the following disclaimer in the
+documentation and/or other materials provided with the distribution.
+- Neither the name of Internet Society, IETF or IETF Trust, nor the
+names of specific contributors, may be used to endorse or promote
+products derived from this software without specific prior written
+permission.
+THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
+AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
+ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
+LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
+CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
+SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
+INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
+CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
+ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGE.
+***********************************************************************/
+#ifdef HAVE_CONFIG_H
+# include "config.h"
+#endif
+
+#include "NSQ.h"
+
+#if defined(OPUS_HAVE_RTCD)
+
+# if (defined(OPUS_ARM_MAY_HAVE_NEON_INTR) && \
+ !defined(OPUS_ARM_PRESUME_NEON_INTR))
+
+/*There is no table for silk_noise_shape_quantizer_short_prediction because the
+ NEON version takes different parameters than the C version.
+ Instead RTCD is done via if statements at the call sites.
+ See NSQ_neon.h for details.*/
+
+opus_int32
+ (*const SILK_NSQ_NOISE_SHAPE_FEEDBACK_LOOP_IMPL[OPUS_ARCHMASK+1])(
+ const opus_int32 *data0, opus_int32 *data1, const opus_int16 *coef,
+ opus_int order) = {
+ silk_NSQ_noise_shape_feedback_loop_c, /* ARMv4 */
+ silk_NSQ_noise_shape_feedback_loop_c, /* EDSP */
+ silk_NSQ_noise_shape_feedback_loop_c, /* Media */
+ silk_NSQ_noise_shape_feedback_loop_neon, /* NEON */
+};
+
+# endif
+
+#endif /* OPUS_HAVE_RTCD */
--- a/silk_sources.mk
+++ b/silk_sources.mk
@@ -82,7 +82,9 @@
silk/x86/VAD_sse.c \
silk/x86/VQ_WMat_EC_sse.c
-SILK_SOURCES_ARM_NEON_INTR = silk/arm/NSQ_neon.c
+SILK_SOURCES_ARM_NEON_INTR = \
+silk/arm/arm_silk_map.c \
+silk/arm/NSQ_neon.c
SILK_SOURCES_FIXED = \
silk/fixed/LTP_analysis_filter_FIX.c \