aboutsummaryrefslogtreecommitdiffstats
path: root/host
diff options
context:
space:
mode:
authorMartin Braun <martin.braun@ettus.com>2019-07-18 16:23:12 -0700
committerMartin Braun <martin.braun@ettus.com>2019-11-26 11:49:11 -0800
commit75ad0c55164e8e2f08b4bfc9d5361f2445a80f52 (patch)
treeb81f683d926d059ffbb69bb1d727b89be19ab087 /host
parent9df26a9d89ef8fb50a667428066f3ef1732245c9 (diff)
downloaduhd-75ad0c55164e8e2f08b4bfc9d5361f2445a80f52.tar.gz
uhd-75ad0c55164e8e2f08b4bfc9d5361f2445a80f52.tar.bz2
uhd-75ad0c55164e8e2f08b4bfc9d5361f2445a80f52.zip
convert: Add chdr converters for sc16 -> fc32 and vice versa
These differ from the item32 converters in that they don't IQ swap, and also don't have a BE/LE version.
Diffstat (limited to 'host')
-rw-r--r--host/lib/convert/convert_common.hpp39
-rw-r--r--host/lib/convert/sse2_fc32_to_sc16.cpp53
-rw-r--r--host/lib/convert/sse2_fc64_to_sc16.cpp45
-rw-r--r--host/lib/convert/sse2_sc16_to_fc32.cpp53
-rw-r--r--host/lib/convert/sse2_sc16_to_fc64.cpp45
5 files changed, 235 insertions, 0 deletions
diff --git a/host/lib/convert/convert_common.hpp b/host/lib/convert/convert_common.hpp
index 7b627061e..0de344b75 100644
--- a/host/lib/convert/convert_common.hpp
+++ b/host/lib/convert/convert_common.hpp
@@ -114,6 +114,26 @@ UHD_INLINE void xx_to_item32_sc16(
}
}
+template <typename T>
+UHD_FORCE_INLINE sc16_t xx_to_sc16_x1(
+ const std::complex<T>& num, const double scale_factor)
+{
+ uint16_t real = int16_t(num.real() * T(scale_factor));
+ uint16_t imag = int16_t(num.imag() * T(scale_factor));
+ return sc16_t(real, imag);
+}
+
+template <typename T>
+UHD_FORCE_INLINE void xx_to_chdr_sc16(const std::complex<T>* input,
+ sc16_t* output,
+ const size_t nsamps,
+ const double scale_factor)
+{
+ for (size_t i = 0; i < nsamps; i++) {
+ output[i] = xx_to_sc16_x1(input[i], scale_factor);
+ }
+}
+
/***********************************************************************
* Convert items32 sc16 buffer to xx
**********************************************************************/
@@ -147,6 +167,25 @@ UHD_INLINE void item32_sc16_to_xx(
}
}
+template <typename T>
+UHD_FORCE_INLINE std::complex<T> chdr_sc16_x1_to_xx(
+ const sc16_t item, const double scale_factor)
+{
+ return std::complex<T>(
+ T(item.real()) * T(scale_factor), T(item.imag()) * T(scale_factor));
+}
+
+template <typename T>
+UHD_FORCE_INLINE void chdr_sc16_to_xx(const sc16_t* input,
+ std::complex<T>* output,
+ const size_t nsamps,
+ const double scale_factor)
+{
+ for (size_t i = 0; i < nsamps; i++) {
+ output[i] = chdr_sc16_x1_to_xx<T>(input[i], scale_factor);
+ }
+}
+
/***********************************************************************
* Convert xx to items32 sc8 buffer
**********************************************************************/
diff --git a/host/lib/convert/sse2_fc32_to_sc16.cpp b/host/lib/convert/sse2_fc32_to_sc16.cpp
index 2d1f853b9..d02868d80 100644
--- a/host/lib/convert/sse2_fc32_to_sc16.cpp
+++ b/host/lib/convert/sse2_fc32_to_sc16.cpp
@@ -120,3 +120,56 @@ DECLARE_CONVERTER(fc32, 1, sc16_item32_be, 1, PRIORITY_SIMD)
// convert any remaining samples
xx_to_item32_sc16<uhd::htonx>(input + i, output + i, nsamps - i, scale_factor);
}
+
+DECLARE_CONVERTER(fc32, 1, sc16_chdr, 1, PRIORITY_SIMD)
+{
+ const fc32_t* input = reinterpret_cast<const fc32_t*>(inputs[0]);
+ sc16_t* output = reinterpret_cast<sc16_t*>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor));
+
+// this macro converts values faster by using SSE intrinsics to convert 4 values at a time
+#define convert_fc32_1_to_item32_1_guts(_al_) \
+ for (; i + 3 < nsamps; i += 4) { \
+ /* load from input */ \
+ __m128 tmplo = \
+ _mm_load##_al_##ps(reinterpret_cast<const float*>(input + i + 0)); \
+ __m128 tmphi = \
+ _mm_load##_al_##ps(reinterpret_cast<const float*>(input + i + 2)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpilo = _mm_cvtps_epi32(_mm_mul_ps(tmplo, scalar)); \
+ __m128i tmpihi = _mm_cvtps_epi32(_mm_mul_ps(tmphi, scalar)); \
+ \
+ /* pack from 32 bit integers to 16 bit */ \
+ __m128i tmpi = _mm_packs_epi32(tmpilo, tmpihi); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128(reinterpret_cast<__m128i*>(output + i), tmpi); \
+ }
+
+ size_t i = 0;
+
+ // need to dispatch according to alignment for fastest conversion
+ switch (size_t(input) & 0xf) {
+ case 0x0:
+ // the data is 16-byte aligned, so do the fast processing of the bulk of the
+ // samples
+ convert_fc32_1_to_item32_1_guts(_) break;
+ case 0x8:
+ // the first sample is 8-byte aligned - process it to align the remainder of
+ // the samples to 16-bytes
+ xx_to_chdr_sc16(input, output, 1, scale_factor);
+ i++;
+ // do faster processing of the bulk of the samples now that we are 16-byte
+ // aligned
+ convert_fc32_1_to_item32_1_guts(_) break;
+ default:
+ // we are not 8 or 16-byte aligned, so do fast processing with the unaligned
+ // load
+ convert_fc32_1_to_item32_1_guts(u_)
+ }
+
+ // convert any remaining samples
+ xx_to_chdr_sc16(input + i, output + i, nsamps - i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_fc64_to_sc16.cpp b/host/lib/convert/sse2_fc64_to_sc16.cpp
index 7c2ce1f8e..c85e441b4 100644
--- a/host/lib/convert/sse2_fc64_to_sc16.cpp
+++ b/host/lib/convert/sse2_fc64_to_sc16.cpp
@@ -107,3 +107,48 @@ DECLARE_CONVERTER(fc64, 1, sc16_item32_be, 1, PRIORITY_SIMD)
// convert remainder
xx_to_item32_sc16<uhd::htonx>(input + i, output + i, nsamps - i, scale_factor);
}
+
+DECLARE_CONVERTER(fc64, 1, sc16_chdr, 1, PRIORITY_SIMD)
+{
+ const fc64_t* input = reinterpret_cast<const fc64_t*>(inputs[0]);
+ sc16_t* output = reinterpret_cast<sc16_t*>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor);
+
+#define convert_fc64_1_to_chdr_1_guts(_al_) \
+ for (; i + 3 < nsamps; i += 4) { \
+ /* load from input */ \
+ __m128d tmp0 = \
+ _mm_load##_al_##pd(reinterpret_cast<const double*>(input + i + 0)); \
+ __m128d tmp1 = \
+ _mm_load##_al_##pd(reinterpret_cast<const double*>(input + i + 1)); \
+ __m128d tmp2 = \
+ _mm_load##_al_##pd(reinterpret_cast<const double*>(input + i + 2)); \
+ __m128d tmp3 = \
+ _mm_load##_al_##pd(reinterpret_cast<const double*>(input + i + 3)); \
+ \
+ /* convert and scale */ \
+ __m128i tmpi0 = _mm_cvttpd_epi32(_mm_mul_pd(tmp0, scalar)); \
+ __m128i tmpi1 = _mm_cvttpd_epi32(_mm_mul_pd(tmp1, scalar)); \
+ __m128i tmpilo = _mm_unpacklo_epi64(tmpi0, tmpi1); \
+ __m128i tmpi2 = _mm_cvttpd_epi32(_mm_mul_pd(tmp2, scalar)); \
+ __m128i tmpi3 = _mm_cvttpd_epi32(_mm_mul_pd(tmp3, scalar)); \
+ __m128i tmpihi = _mm_unpacklo_epi64(tmpi2, tmpi3); \
+ \
+ /* store to output */ \
+ _mm_storeu_si128( \
+ reinterpret_cast<__m128i*>(output + i), _mm_packs_epi32(tmpilo, tmpihi)); \
+ }
+
+ size_t i = 0;
+
+ // dispatch according to alignment
+ if ((size_t(input) & 0xf) == 0) {
+ convert_fc64_1_to_chdr_1_guts(_)
+ } else {
+ convert_fc64_1_to_chdr_1_guts(u_)
+ }
+
+ // convert remainder
+ xx_to_chdr_sc16(input + i, output + i, nsamps - i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc16_to_fc32.cpp b/host/lib/convert/sse2_sc16_to_fc32.cpp
index a16ef30d4..fdaf8a5cb 100644
--- a/host/lib/convert/sse2_sc16_to_fc32.cpp
+++ b/host/lib/convert/sse2_sc16_to_fc32.cpp
@@ -119,3 +119,56 @@ DECLARE_CONVERTER(sc16_item32_be, 1, fc32, 1, PRIORITY_SIMD)
// convert any remaining samples
item32_sc16_to_xx<uhd::htonx>(input + i, output + i, nsamps - i, scale_factor);
}
+
+DECLARE_CONVERTER(sc16_chdr, 1, fc32, 1, PRIORITY_SIMD)
+{
+ const sc16_t* input = reinterpret_cast<const sc16_t*>(inputs[0]);
+ fc32_t* output = reinterpret_cast<fc32_t*>(outputs[0]);
+
+ const __m128 scalar = _mm_set_ps1(float(scale_factor) / (1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+// this macro converts values faster by using SSE intrinsics to convert 4 values at a time
+#define convert_item32_1_to_fc32_1_guts(_al_) \
+ for (; i + 3 < nsamps; i += 4) { \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(input + i)); \
+ \
+ /* unpack + swap 16-bit pairs */ \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128 tmplo = _mm_mul_ps(_mm_cvtepi32_ps(tmpilo), scalar); \
+ __m128 tmphi = _mm_mul_ps(_mm_cvtepi32_ps(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store##_al_##ps(reinterpret_cast<float*>(output + i + 0), tmplo); \
+ _mm_store##_al_##ps(reinterpret_cast<float*>(output + i + 2), tmphi); \
+ }
+
+ size_t i = 0;
+
+ // need to dispatch according to alignment for fastest conversion
+ switch (size_t(output) & 0xf) {
+ case 0x0:
+ // the data is 16-byte aligned, so do the fast processing of the bulk of the
+ // samples
+ convert_item32_1_to_fc32_1_guts(_) break;
+ case 0x8:
+ // the first sample is 8-byte aligned - process it to align the remainder of
+ // the samples to 16-bytes
+ chdr_sc16_to_xx(input, output, 1, scale_factor);
+ i++;
+ // do faster processing of the bulk of the samples now that we are 16-byte
+ // aligned
+ convert_item32_1_to_fc32_1_guts(_) break;
+ default:
+ // we are not 8 or 16-byte aligned, so do fast processing with the unaligned
+ // load and store
+ convert_item32_1_to_fc32_1_guts(u_)
+ }
+
+ // convert any remaining samples
+ chdr_sc16_to_xx(input + i, output + i, nsamps - i, scale_factor);
+}
diff --git a/host/lib/convert/sse2_sc16_to_fc64.cpp b/host/lib/convert/sse2_sc16_to_fc64.cpp
index 45821ac9f..4be6bd084 100644
--- a/host/lib/convert/sse2_sc16_to_fc64.cpp
+++ b/host/lib/convert/sse2_sc16_to_fc64.cpp
@@ -103,3 +103,48 @@ DECLARE_CONVERTER(sc16_item32_be, 1, fc64, 1, PRIORITY_SIMD)
// convert remainder
item32_sc16_to_xx<uhd::htonx>(input + i, output + i, nsamps - i, scale_factor);
}
+
+DECLARE_CONVERTER(sc16_chdr, 1, fc64, 1, PRIORITY_SIMD)
+{
+ const sc16_t* input = reinterpret_cast<const sc16_t*>(inputs[0]);
+ fc64_t* output = reinterpret_cast<fc64_t*>(outputs[0]);
+
+ const __m128d scalar = _mm_set1_pd(scale_factor / (1 << 16));
+ const __m128i zeroi = _mm_setzero_si128();
+
+#define convert_chdr_1_to_fc64_1_guts(_al_) \
+ for (; i + 3 < nsamps; i += 4) { \
+ /* load from input */ \
+ __m128i tmpi = _mm_loadu_si128(reinterpret_cast<const __m128i*>(input + i)); \
+ \
+ /* unpack 16-bit pairs */ \
+ __m128i tmpilo = _mm_unpacklo_epi16(zeroi, tmpi); /* value in upper 16 bits */ \
+ __m128i tmpihi = _mm_unpackhi_epi16(zeroi, tmpi); \
+ \
+ /* convert and scale */ \
+ __m128d tmp0 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ tmpilo = _mm_unpackhi_epi64(tmpilo, zeroi); \
+ __m128d tmp1 = _mm_mul_pd(_mm_cvtepi32_pd(tmpilo), scalar); \
+ __m128d tmp2 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ tmpihi = _mm_unpackhi_epi64(tmpihi, zeroi); \
+ __m128d tmp3 = _mm_mul_pd(_mm_cvtepi32_pd(tmpihi), scalar); \
+ \
+ /* store to output */ \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + i + 0), tmp0); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + i + 1), tmp1); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + i + 2), tmp2); \
+ _mm_store##_al_##pd(reinterpret_cast<double*>(output + i + 3), tmp3); \
+ }
+
+ size_t i = 0;
+
+ // dispatch according to alignment
+ if ((size_t(output) & 0xf) == 0) {
+ convert_chdr_1_to_fc64_1_guts(_)
+ } else {
+ convert_chdr_1_to_fc64_1_guts(u_)
+ }
+
+ // convert remainder
+ chdr_sc16_to_xx(input + i, output + i, nsamps - i, scale_factor);
+}